From d188e2462637227368889c38261b251423b4bc61 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 19 Apr 2021 07:30:14 +0200 Subject: [PATCH 001/647] Fix type definition --- api/new.d.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/new.d.ts b/api/new.d.ts index dfcbf3efc..475d897f3 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -66,7 +66,7 @@ declare type extendsCallback = (options: ClientExtendsCallbackOptions) => any; // /Extend API declare type callbackFn = (err: ApiError, result: ApiResponse) => void; -interface Client { +declare class Client { connectionPool: ConnectionPool transport: Transport serializer: Serializer From d2f77e994408afe1b9b64df397e7f4e761615821 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 19 Apr 2021 09:32:32 +0200 Subject: [PATCH 002/647] Fix type definition --- api/new.d.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/api/new.d.ts b/api/new.d.ts index 475d897f3..53b8493f6 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -67,6 +67,7 @@ declare type extendsCallback = (options: ClientExtendsCallbackOptions) => any; declare type callbackFn = (err: ApiError, result: ApiResponse) => void; declare class Client { + constructor(opts: ClientOptions); connectionPool: ConnectionPool transport: Transport serializer: Serializer From 04c56fe269aa870d02e47ea2fbe816135819466e Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 21 Apr 2021 13:47:19 +0200 Subject: [PATCH 003/647] Updated ci configuration --- ...arch-js+7.11.yml => elastic+elasticsearch-js+7.13.yml} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename .ci/jobs/{elastic+elasticsearch-js+7.11.yml => elastic+elasticsearch-js+7.13.yml} (60%) diff --git a/.ci/jobs/elastic+elasticsearch-js+7.11.yml b/.ci/jobs/elastic+elasticsearch-js+7.13.yml similarity index 60% rename from .ci/jobs/elastic+elasticsearch-js+7.11.yml rename to .ci/jobs/elastic+elasticsearch-js+7.13.yml index d2d3b40e6..6f57a009d 100644 --- a/.ci/jobs/elastic+elasticsearch-js+7.11.yml +++ b/.ci/jobs/elastic+elasticsearch-js+7.13.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+7.11 - display-name: 'elastic / elasticsearch-js # 7.11' - description: Testing the elasticsearch-js 7.11 branch. + name: elastic+elasticsearch-js+7.13 + display-name: 'elastic / elasticsearch-js # 7.13' + description: Testing the elasticsearch-js 7.13 branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/7.11 + default: refs/heads/7.13 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: From d1a5fc7cfa485b74d3e9125d766eaecbdbecea7f Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 22 Apr 2021 08:09:27 +0200 Subject: [PATCH 004/647] Bumped v8.0.0-canary.7 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index c1e635424..a3b610002 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.6", + "versionCanary": "8.0.0-canary.7", "keywords": [ "elasticsearch", "elastic", From 265eb2b22591f5ec054d0521268f535dcce38c86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 26 Apr 2021 16:25:40 +0200 Subject: [PATCH 005/647] [DOCS] Adds Breaking changes from old client to Introduction. (#1453) --- docs/breaking-changes.asciidoc | 8 ++++---- docs/index.asciidoc | 1 - docs/introduction.asciidoc | 6 ++++++ 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/breaking-changes.asciidoc b/docs/breaking-changes.asciidoc index 503e03297..9942eb9a8 100644 --- a/docs/breaking-changes.asciidoc +++ b/docs/breaking-changes.asciidoc @@ -1,5 +1,5 @@ [[breaking-changes]] -== Breaking changes coming from the old client +=== Breaking changes coming from the old client If you were already using the previous version of this client – the one you used to install with `npm install elasticsearch` – you will encounter some breaking @@ -7,7 +7,7 @@ changes. [discrete] -=== Don’t panic! +==== Don’t panic! Every breaking change was carefully weighed, and each is justified. Furthermore, the new codebase has been rewritten with modern JavaScript and has been @@ -15,7 +15,7 @@ carefully designed to be easy to maintain. [discrete] -=== Breaking changes +==== Breaking changes * Minimum supported version of Node.js is `v8`. @@ -212,7 +212,7 @@ client.transport.request({ ---- [discrete] -=== Talk is cheap. Show me the code. +==== Talk is cheap. Show me the code. You can find a code snippet with the old client below followed by the same code logic but with the new client. diff --git a/docs/index.asciidoc b/docs/index.asciidoc index aed78305f..11ac4f3ec 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -18,5 +18,4 @@ include::transport.asciidoc[] include::typescript.asciidoc[] include::reference.asciidoc[] include::examples/index.asciidoc[] -include::breaking-changes.asciidoc[] include::helpers.asciidoc[] diff --git a/docs/introduction.asciidoc b/docs/introduction.asciidoc index d2f4b61f1..154aaaf6f 100644 --- a/docs/introduction.asciidoc +++ b/docs/introduction.asciidoc @@ -4,6 +4,9 @@ This is the official Node.js client for {es}. This page gives a quick overview about the features of the client. +Refer to <> for breaking changes coming from the old +client. + [discrete] === Features @@ -189,3 +192,6 @@ npm install esmaster@github:elastic/elasticsearch-js ---- WARNING: This command installs the master branch of the client which is not considered stable. + + +include::breaking-changes.asciidoc[] \ No newline at end of file From 0d1e1c26133d470c450439e9d573b5061f5c4037 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 26 Apr 2021 16:50:53 +0200 Subject: [PATCH 006/647] API generation --- api/api/fleet.js | 65 +++++++++ api/api/monitoring.js | 2 +- api/api/searchable_snapshots.js | 31 ++++- api/api/security.js | 210 +++++++++++++++++++++++++++++ api/api/snapshot.js | 4 +- api/index.js | 11 ++ api/requestParams.d.ts | 51 ++++++- docs/reference.asciidoc | 227 ++++++++++++++++++++++++++++---- index.d.ts | 74 ++++++++++- 9 files changed, 639 insertions(+), 36 deletions(-) create mode 100644 api/api/fleet.js diff --git a/api/api/fleet.js b/api/api/fleet.js new file mode 100644 index 000000000..50329860d --- /dev/null +++ b/api/api/fleet.js @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +/* eslint camelcase: 0 */ +/* eslint no-unused-vars: 0 */ + +const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') +const acceptedQuerystring = ['wait_for_advance', 'wait_for_index', 'checkpoints', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] +const snakeCase = { waitForAdvance: 'wait_for_advance', waitForIndex: 'wait_for_index', errorTrace: 'error_trace', filterPath: 'filter_path' } + +function FleetApi (transport, ConfigurationError) { + this.transport = transport + this[kConfigurationError] = ConfigurationError +} + +FleetApi.prototype.globalCheckpoints = function fleetGlobalCheckpointsApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.index == null) { + const err = new this[kConfigurationError]('Missing required parameter: index') + return handleError(err, callback) + } + + let { method, body, index, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + encodeURIComponent(index) + '/' + '_fleet' + '/' + 'global_checkpoints' + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + +Object.defineProperties(FleetApi.prototype, { + global_checkpoints: { get () { return this.globalCheckpoints } } +}) + +module.exports = FleetApi diff --git a/api/api/monitoring.js b/api/api/monitoring.js index 531a8b068..5366bd517 100644 --- a/api/api/monitoring.js +++ b/api/api/monitoring.js @@ -56,7 +56,7 @@ MonitoringApi.prototype.bulk = function monitoringBulkApi (params, options, call const request = { method, path, - body: body || '', + bulkBody: body, querystring } diff --git a/api/api/searchable_snapshots.js b/api/api/searchable_snapshots.js index f14772670..3353211ca 100644 --- a/api/api/searchable_snapshots.js +++ b/api/api/searchable_snapshots.js @@ -23,14 +23,40 @@ /* eslint no-unused-vars: 0 */ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'index', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'master_timeout', 'wait_for_completion', 'storage', 'level'] -const snakeCase = { ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', errorTrace: 'error_trace', filterPath: 'filter_path', masterTimeout: 'master_timeout', waitForCompletion: 'wait_for_completion' } +const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'index', 'master_timeout', 'wait_for_completion', 'storage', 'level'] +const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', masterTimeout: 'master_timeout', waitForCompletion: 'wait_for_completion' } function SearchableSnapshotsApi (transport, ConfigurationError) { this.transport = transport this[kConfigurationError] = ConfigurationError } +SearchableSnapshotsApi.prototype.cacheStats = function searchableSnapshotsCacheStatsApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + let { method, body, nodeId, node_id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if ((node_id || nodeId) != null) { + if (method == null) method = 'GET' + path = '/' + '_searchable_snapshots' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'cache' + '/' + 'stats' + } else { + if (method == null) method = 'GET' + path = '/' + '_searchable_snapshots' + '/' + 'cache' + '/' + 'stats' + } + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + SearchableSnapshotsApi.prototype.clearCache = function searchableSnapshotsClearCacheApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -125,6 +151,7 @@ SearchableSnapshotsApi.prototype.stats = function searchableSnapshotsStatsApi (p } Object.defineProperties(SearchableSnapshotsApi.prototype, { + cache_stats: { get () { return this.cacheStats } }, clear_cache: { get () { return this.clearCache } } }) diff --git a/api/api/security.js b/api/api/security.js index 389dd6d4a..96d775dc5 100644 --- a/api/api/security.js +++ b/api/api/security.js @@ -192,6 +192,50 @@ SecurityApi.prototype.clearCachedRoles = function securityClearCachedRolesApi (p return this.transport.request(request, options, callback) } +SecurityApi.prototype.clearCachedServiceTokens = function securityClearCachedServiceTokensApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.namespace == null) { + const err = new this[kConfigurationError]('Missing required parameter: namespace') + return handleError(err, callback) + } + if (params.service == null) { + const err = new this[kConfigurationError]('Missing required parameter: service') + return handleError(err, callback) + } + if (params.name == null) { + const err = new this[kConfigurationError]('Missing required parameter: name') + return handleError(err, callback) + } + + // check required url components + if (params.name != null && (params.service == null || params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: service, namespace') + return handleError(err, callback) + } else if (params.service != null && (params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') + return handleError(err, callback) + } + + let { method, body, namespace, service, name, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' + '/' + encodeURIComponent(name) + '/' + '_clear_cache' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + SecurityApi.prototype.createApiKey = function securityCreateApiKeyApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -219,6 +263,51 @@ SecurityApi.prototype.createApiKey = function securityCreateApiKeyApi (params, o return this.transport.request(request, options, callback) } +SecurityApi.prototype.createServiceToken = function securityCreateServiceTokenApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.namespace == null) { + const err = new this[kConfigurationError]('Missing required parameter: namespace') + return handleError(err, callback) + } + if (params.service == null) { + const err = new this[kConfigurationError]('Missing required parameter: service') + return handleError(err, callback) + } + + // check required url components + if (params.name != null && (params.service == null || params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: service, namespace') + return handleError(err, callback) + } else if (params.service != null && (params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') + return handleError(err, callback) + } + + let { method, body, namespace, service, name, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if ((namespace) != null && (service) != null && (name) != null) { + if (method == null) method = 'PUT' + path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' + '/' + encodeURIComponent(name) + } else { + if (method == null) method = 'POST' + path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' + } + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + SecurityApi.prototype.deletePrivileges = function securityDeletePrivilegesApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -310,6 +399,50 @@ SecurityApi.prototype.deleteRoleMapping = function securityDeleteRoleMappingApi return this.transport.request(request, options, callback) } +SecurityApi.prototype.deleteServiceToken = function securityDeleteServiceTokenApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.namespace == null) { + const err = new this[kConfigurationError]('Missing required parameter: namespace') + return handleError(err, callback) + } + if (params.service == null) { + const err = new this[kConfigurationError]('Missing required parameter: service') + return handleError(err, callback) + } + if (params.name == null) { + const err = new this[kConfigurationError]('Missing required parameter: name') + return handleError(err, callback) + } + + // check required url components + if (params.name != null && (params.service == null || params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: service, namespace') + return handleError(err, callback) + } else if (params.service != null && (params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') + return handleError(err, callback) + } + + let { method, body, namespace, service, name, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'DELETE' + path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' + '/' + encodeURIComponent(name) + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + SecurityApi.prototype.deleteUser = function securityDeleteUserApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -520,6 +653,78 @@ SecurityApi.prototype.getRoleMapping = function securityGetRoleMappingApi (param return this.transport.request(request, options, callback) } +SecurityApi.prototype.getServiceAccounts = function securityGetServiceAccountsApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required url components + if (params.service != null && (params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') + return handleError(err, callback) + } + + let { method, body, namespace, service, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if ((namespace) != null && (service) != null) { + if (method == null) method = 'GET' + path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + } else if ((namespace) != null) { + if (method == null) method = 'GET' + path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + } else { + if (method == null) method = 'GET' + path = '/' + '_security' + '/' + 'service' + } + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + +SecurityApi.prototype.getServiceCredentials = function securityGetServiceCredentialsApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.namespace == null) { + const err = new this[kConfigurationError]('Missing required parameter: namespace') + return handleError(err, callback) + } + if (params.service == null) { + const err = new this[kConfigurationError]('Missing required parameter: service') + return handleError(err, callback) + } + + // check required url components + if (params.service != null && (params.namespace == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') + return handleError(err, callback) + } + + let { method, body, namespace, service, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + SecurityApi.prototype.getToken = function securityGetTokenApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -833,10 +1038,13 @@ Object.defineProperties(SecurityApi.prototype, { clear_cached_privileges: { get () { return this.clearCachedPrivileges } }, clear_cached_realms: { get () { return this.clearCachedRealms } }, clear_cached_roles: { get () { return this.clearCachedRoles } }, + clear_cached_service_tokens: { get () { return this.clearCachedServiceTokens } }, create_api_key: { get () { return this.createApiKey } }, + create_service_token: { get () { return this.createServiceToken } }, delete_privileges: { get () { return this.deletePrivileges } }, delete_role: { get () { return this.deleteRole } }, delete_role_mapping: { get () { return this.deleteRoleMapping } }, + delete_service_token: { get () { return this.deleteServiceToken } }, delete_user: { get () { return this.deleteUser } }, disable_user: { get () { return this.disableUser } }, enable_user: { get () { return this.enableUser } }, @@ -845,6 +1053,8 @@ Object.defineProperties(SecurityApi.prototype, { get_privileges: { get () { return this.getPrivileges } }, get_role: { get () { return this.getRole } }, get_role_mapping: { get () { return this.getRoleMapping } }, + get_service_accounts: { get () { return this.getServiceAccounts } }, + get_service_credentials: { get () { return this.getServiceCredentials } }, get_token: { get () { return this.getToken } }, get_user: { get () { return this.getUser } }, get_user_privileges: { get () { return this.getUserPrivileges } }, diff --git a/api/api/snapshot.js b/api/api/snapshot.js index 35de58733..911d13f2d 100644 --- a/api/api/snapshot.js +++ b/api/api/snapshot.js @@ -23,8 +23,8 @@ /* eslint no-unused-vars: 0 */ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['master_timeout', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion', 'verify', 'ignore_unavailable', 'verbose', 'local'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletion: 'wait_for_completion', ignoreUnavailable: 'ignore_unavailable' } +const acceptedQuerystring = ['master_timeout', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion', 'verify', 'ignore_unavailable', 'index_details', 'verbose', 'local'] +const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletion: 'wait_for_completion', ignoreUnavailable: 'ignore_unavailable', indexDetails: 'index_details' } function SnapshotApi (transport, ConfigurationError) { this.transport = transport diff --git a/api/index.js b/api/index.js index e3f2a4cac..ca8bb5d80 100644 --- a/api/index.js +++ b/api/index.js @@ -41,6 +41,7 @@ const existsSourceApi = require('./api/exists_source') const explainApi = require('./api/explain') const FeaturesApi = require('./api/features') const fieldCapsApi = require('./api/field_caps') +const FleetApi = require('./api/fleet') const getApi = require('./api/get') const getScriptApi = require('./api/get_script') const getScriptContextApi = require('./api/get_script_context') @@ -102,6 +103,7 @@ const kDanglingIndices = Symbol('DanglingIndices') const kEnrich = Symbol('Enrich') const kEql = Symbol('Eql') const kFeatures = Symbol('Features') +const kFleet = Symbol('Fleet') const kGraph = Symbol('Graph') const kIlm = Symbol('Ilm') const kIndices = Symbol('Indices') @@ -137,6 +139,7 @@ function ESAPI (opts) { this[kEnrich] = null this[kEql] = null this[kFeatures] = null + this[kFleet] = null this[kGraph] = null this[kIlm] = null this[kIndices] = null @@ -285,6 +288,14 @@ Object.defineProperties(ESAPI.prototype, { } }, field_caps: { get () { return this.fieldCaps } }, + fleet: { + get () { + if (this[kFleet] === null) { + this[kFleet] = new FleetApi(this.transport, this[kConfigurationError]) + } + return this[kFleet] + } + }, get_script: { get () { return this.getScript } }, get_script_context: { get () { return this.getScriptContext } }, get_script_languages: { get () { return this.getScriptLanguages } }, diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index da48b23d0..e5332916d 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -810,6 +810,14 @@ export interface FieldCaps extends Generic { body?: T; } +export interface FleetGlobalCheckpoints extends Generic { + index: string; + wait_for_advance?: boolean; + wait_for_index?: boolean; + checkpoints?: string | string[]; + timeout?: string; +} + export interface Get extends Generic { id: string; index: string; @@ -1869,7 +1877,7 @@ export interface MlValidateDetector extends Generic { body: T; } -export interface MonitoringBulk extends Generic { +export interface MonitoringBulk extends Generic { type?: string; system_id?: string; system_api_version?: string; @@ -1879,7 +1887,7 @@ export interface MonitoringBulk extends Generic { export interface Msearch extends Generic { index?: string | string[]; - search_type?: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch'; + search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; max_concurrent_searches?: number; typed_keys?: boolean; pre_filter_shard_size?: number; @@ -1891,7 +1899,7 @@ export interface Msearch extends Generic { export interface MsearchTemplate extends Generic { index?: string | string[]; - search_type?: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch'; + search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; typed_keys?: boolean; max_concurrent_searches?: number; rest_total_hits_as_int?: boolean; @@ -2136,7 +2144,7 @@ export interface SearchTemplate extends Generic { preference?: string; routing?: string | string[]; scroll?: string; - search_type?: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch'; + search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; explain?: boolean; profile?: boolean; typed_keys?: boolean; @@ -2145,6 +2153,10 @@ export interface SearchTemplate extends Generic { body: T; } +export interface SearchableSnapshotsCacheStats extends Generic { + node_id?: string | string[]; +} + export interface SearchableSnapshotsClearCache extends Generic { index?: string | string[]; ignore_unavailable?: boolean; @@ -2192,11 +2204,24 @@ export interface SecurityClearCachedRoles extends Generic { name: string | string[]; } +export interface SecurityClearCachedServiceTokens extends Generic { + namespace: string; + service: string; + name: string | string[]; +} + export interface SecurityCreateApiKey extends Generic { refresh?: 'wait_for' | boolean; body: T; } +export interface SecurityCreateServiceToken extends Generic { + namespace: string; + service: string; + name?: string; + refresh?: 'wait_for' | boolean; +} + export interface SecurityDeletePrivileges extends Generic { application: string; name: string; @@ -2213,6 +2238,13 @@ export interface SecurityDeleteRoleMapping extends Generic { refresh?: 'wait_for' | boolean; } +export interface SecurityDeleteServiceToken extends Generic { + namespace: string; + service: string; + name: string; + refresh?: 'wait_for' | boolean; +} + export interface SecurityDeleteUser extends Generic { username: string; refresh?: 'wait_for' | boolean; @@ -2252,6 +2284,16 @@ export interface SecurityGetRoleMapping extends Generic { name?: string | string[]; } +export interface SecurityGetServiceAccounts extends Generic { + namespace?: string; + service?: string; +} + +export interface SecurityGetServiceCredentials extends Generic { + namespace: string; + service: string; +} + export interface SecurityGetToken extends Generic { body: T; } @@ -2396,6 +2438,7 @@ export interface SnapshotGet extends Generic { snapshot: string | string[]; master_timeout?: string; ignore_unavailable?: boolean; + index_details?: boolean; verbose?: boolean; } diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index b02a3837e..d3c57ffc4 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -2058,7 +2058,7 @@ client.cluster.getSettings({ include_defaults: boolean }) ---- -link:{ref}/cluster-update-settings.html[Documentation] + +link:{ref}/cluster-get-settings.html[Documentation] + [cols=2*] |=== |`flat_settings` or `flatSettings` @@ -3293,6 +3293,41 @@ _Default:_ `open` |=== +[discrete] +=== fleet.globalCheckpoints +*Stability:* experimental +[source,ts] +---- +client.fleet.globalCheckpoints({ + index: string, + wait_for_advance: boolean, + wait_for_index: boolean, + checkpoints: string | string[], + timeout: string +}) +---- +[cols=2*] +|=== +|`index` +|`string` - The name of the index. + +|`wait_for_advance` or `waitForAdvance` +|`boolean` - Whether to wait for the global checkpoint to advance past the specified current checkpoints + +_Default:_ `false` + +|`wait_for_index` or `waitForIndex` +|`boolean` - Whether to wait for the target index to exist and all primary shards be active + +_Default:_ `false` + +|`checkpoints` +|`string \| string[]` - Comma separated list of checkpoints + +|`timeout` +|`string` - Timeout to wait for global checkpoint to advance + +_Default:_ `30s` + +|=== + [discrete] === get @@ -6010,7 +6045,7 @@ link:{ref}/ml-delete-calendar-job.html[Documentation] + [discrete] === ml.deleteDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.deleteDataFrameAnalytics({ @@ -6178,7 +6213,7 @@ link:{ref}/ml-delete-snapshot.html[Documentation] + [discrete] === ml.deleteTrainedModel -*Stability:* beta + [source,ts] ---- client.ml.deleteTrainedModel({ @@ -6195,7 +6230,7 @@ link:{ref}/delete-trained-models.html[Documentation] + [discrete] === ml.deleteTrainedModelAlias -*Stability:* beta + [source,ts] ---- client.ml.deleteTrainedModelAlias({ @@ -6233,7 +6268,7 @@ link:{ref}/ml-apis.html[Documentation] + [discrete] === ml.evaluateDataFrame -*Stability:* beta + [source,ts] ---- client.ml.evaluateDataFrame({ @@ -6250,7 +6285,7 @@ link:{ref}/evaluate-dfanalytics.html[Documentation] + [discrete] === ml.explainDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.explainDataFrameAnalytics({ @@ -6505,7 +6540,7 @@ link:{ref}/ml-get-category.html[Documentation] + [discrete] === ml.getDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.getDataFrameAnalytics({ @@ -6540,7 +6575,7 @@ _Default:_ `100` [discrete] === ml.getDataFrameAnalyticsStats -*Stability:* beta + [source,ts] ---- client.ml.getDataFrameAnalyticsStats({ @@ -6926,7 +6961,7 @@ link:{ref}/ml-get-record.html[Documentation] + [discrete] === ml.getTrainedModels -*Stability:* beta + [source,ts] ---- client.ml.getTrainedModels({ @@ -6980,7 +7015,7 @@ _Default:_ `100` [discrete] === ml.getTrainedModelsStats -*Stability:* beta + [source,ts] ---- client.ml.getTrainedModelsStats({ @@ -7088,7 +7123,7 @@ link:{ref}/ml-post-data.html[Documentation] + [discrete] === ml.previewDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.previewDataFrameAnalytics({ @@ -7172,7 +7207,7 @@ link:{ref}/ml-put-calendar-job.html[Documentation] + [discrete] === ml.putDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.putDataFrameAnalytics({ @@ -7272,7 +7307,7 @@ link:{ref}/ml-put-job.html[Documentation] + [discrete] === ml.putTrainedModel -*Stability:* beta + [source,ts] ---- client.ml.putTrainedModel({ @@ -7293,7 +7328,7 @@ link:{ref}/put-trained-models.html[Documentation] + [discrete] === ml.putTrainedModelAlias -*Stability:* beta + [source,ts] ---- client.ml.putTrainedModelAlias({ @@ -7368,7 +7403,7 @@ link:{ref}/ml-set-upgrade-mode.html[Documentation] + [discrete] === ml.startDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.startDataFrameAnalytics({ @@ -7426,7 +7461,7 @@ link:{ref}/ml-start-datafeed.html[Documentation] + [discrete] === ml.stopDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.stopDataFrameAnalytics({ @@ -7498,7 +7533,7 @@ WARNING: This parameter has been deprecated. [discrete] === ml.updateDataFrameAnalytics -*Stability:* beta + [source,ts] ---- client.ml.updateDataFrameAnalytics({ @@ -7726,7 +7761,7 @@ WARNING: This parameter has been deprecated. ---- client.msearch({ index: string | string[], - search_type: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch', + search_type: 'query_then_fetch' | 'dfs_query_then_fetch', max_concurrent_searches: number, typed_keys: boolean, pre_filter_shard_size: number, @@ -7744,7 +7779,7 @@ link:{ref}/search-multi-search.html[Documentation] + |`string \| string[]` - A comma-separated list of index names to use as default |`search_type` or `searchType` -|`'query_then_fetch' \| 'query_and_fetch' \| 'dfs_query_then_fetch' \| 'dfs_query_and_fetch'` - Search operation type +|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type |`max_concurrent_searches` or `maxConcurrentSearches` |`number` - Controls the maximum number of concurrent searches the multi search api will execute @@ -7778,7 +7813,7 @@ _Default:_ `true` ---- client.msearchTemplate({ index: string | string[], - search_type: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch', + search_type: 'query_then_fetch' | 'dfs_query_then_fetch', typed_keys: boolean, max_concurrent_searches: number, rest_total_hits_as_int: boolean, @@ -7793,7 +7828,7 @@ link:{ref}/search-multi-search.html[Documentation] + |`string \| string[]` - A comma-separated list of index names to use as default |`search_type` or `searchType` -|`'query_then_fetch' \| 'query_and_fetch' \| 'dfs_query_then_fetch' \| 'dfs_query_and_fetch'` - Search operation type +|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type |`typed_keys` or `typedKeys` |`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -8780,7 +8815,7 @@ client.searchTemplate({ preference: string, routing: string | string[], scroll: string, - search_type: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch', + search_type: 'query_then_fetch' | 'dfs_query_then_fetch', explain: boolean, profile: boolean, typed_keys: boolean, @@ -8818,7 +8853,7 @@ _Default:_ `open` |`string` - Specify how long a consistent view of the index should be maintained for scrolled search |`search_type` or `searchType` -|`'query_then_fetch' \| 'query_and_fetch' \| 'dfs_query_then_fetch' \| 'dfs_query_and_fetch'` - Search operation type +|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type |`explain` |`boolean` - Specify whether to return detailed information about score computation as part of a hit @@ -8841,6 +8876,23 @@ _Default:_ `true` |=== +[discrete] +=== searchableSnapshots.cacheStats +*Stability:* experimental +[source,ts] +---- +client.searchableSnapshots.cacheStats({ + node_id: string | string[] +}) +---- +link:{ref}/searchable-snapshots-apis.html[Documentation] + +[cols=2*] +|=== +|`node_id` or `nodeId` +|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes + +|=== + [discrete] === searchableSnapshots.clearCache *Stability:* experimental @@ -9037,6 +9089,31 @@ link:{ref}/security-api-clear-role-cache.html[Documentation] + |=== +[discrete] +=== security.clearCachedServiceTokens +*Stability:* beta +[source,ts] +---- +client.security.clearCachedServiceTokens({ + namespace: string, + service: string, + name: string | string[] +}) +---- +link:{ref}/security-api-clear-service-token-caches.html[Documentation] + +[cols=2*] +|=== +|`namespace` +|`string` - An identifier for the namespace + +|`service` +|`string` - An identifier for the service name + +|`name` +|`string \| string[]` - A comma-separated list of service token names + +|=== + [discrete] === security.createApiKey @@ -9058,6 +9135,35 @@ link:{ref}/security-api-create-api-key.html[Documentation] + |=== +[discrete] +=== security.createServiceToken +*Stability:* beta +[source,ts] +---- +client.security.createServiceToken({ + namespace: string, + service: string, + name: string, + refresh: 'true' | 'false' | 'wait_for' +}) +---- +link:{ref}/security-api-create-service-token.html[Documentation] + +[cols=2*] +|=== +|`namespace` +|`string` - An identifier for the namespace + +|`service` +|`string` - An identifier for the service name + +|`name` +|`string` - An identifier for the token name + +|`refresh` +|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +|=== + [discrete] === security.deletePrivileges @@ -9125,6 +9231,35 @@ link:{ref}/security-api-delete-role-mapping.html[Documentation] + |=== +[discrete] +=== security.deleteServiceToken +*Stability:* beta +[source,ts] +---- +client.security.deleteServiceToken({ + namespace: string, + service: string, + name: string, + refresh: 'true' | 'false' | 'wait_for' +}) +---- +link:{ref}/security-api-delete-service-token.html[Documentation] + +[cols=2*] +|=== +|`namespace` +|`string` - An identifier for the namespace + +|`service` +|`string` - An identifier for the service name + +|`name` +|`string` - An identifier for the token name + +|`refresh` +|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +|=== + [discrete] === security.deleteUser @@ -9286,6 +9421,48 @@ link:{ref}/security-api-get-role-mapping.html[Documentation] + |=== +[discrete] +=== security.getServiceAccounts +*Stability:* beta +[source,ts] +---- +client.security.getServiceAccounts({ + namespace: string, + service: string +}) +---- +link:{ref}/security-api-get-service-accounts.html[Documentation] + +[cols=2*] +|=== +|`namespace` +|`string` - An identifier for the namespace + +|`service` +|`string` - An identifier for the service name + +|=== + +[discrete] +=== security.getServiceCredentials +*Stability:* beta +[source,ts] +---- +client.security.getServiceCredentials({ + namespace: string, + service: string +}) +---- +link:{ref}/security-api-get-service-credentials.html[Documentation] + +[cols=2*] +|=== +|`namespace` +|`string` - An identifier for the namespace + +|`service` +|`string` - An identifier for the service name + +|=== + [discrete] === security.getToken @@ -9863,6 +10040,7 @@ client.snapshot.get({ snapshot: string | string[], master_timeout: string, ignore_unavailable: boolean, + index_details: boolean, verbose: boolean }) ---- @@ -9881,6 +10059,9 @@ link:{ref}/modules-snapshots.html[Documentation] + |`ignore_unavailable` or `ignoreUnavailable` |`boolean` - Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown +|`index_details` or `indexDetails` +|`boolean` - Whether to include details of each index in the snapshot, if those details are available. Defaults to false. + |`verbose` |`boolean` - Whether to show verbose snapshot info or only show the basic info found in the repository index blob diff --git a/index.d.ts b/index.d.ts index 015f64383..45e78212f 100644 --- a/index.d.ts +++ b/index.d.ts @@ -747,6 +747,16 @@ declare class Client { fieldCaps, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback fieldCaps, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.FieldCaps, callback: callbackFn): TransportRequestCallback fieldCaps, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.FieldCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + fleet: { + global_checkpoints, TContext = Context>(params?: RequestParams.FleetGlobalCheckpoints, options?: TransportRequestOptions): TransportRequestPromise> + global_checkpoints, TContext = Context>(callback: callbackFn): TransportRequestCallback + global_checkpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, callback: callbackFn): TransportRequestCallback + global_checkpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + globalCheckpoints, TContext = Context>(params?: RequestParams.FleetGlobalCheckpoints, options?: TransportRequestOptions): TransportRequestPromise> + globalCheckpoints, TContext = Context>(callback: callbackFn): TransportRequestCallback + globalCheckpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, callback: callbackFn): TransportRequestCallback + globalCheckpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + } get, TContext = Context>(params?: RequestParams.Get, options?: TransportRequestOptions): TransportRequestPromise> get, TContext = Context>(callback: callbackFn): TransportRequestCallback get, TContext = Context>(params: RequestParams.Get, callback: callbackFn): TransportRequestCallback @@ -1832,10 +1842,10 @@ declare class Client { validateDetector, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlValidateDetector, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } monitoring: { - bulk, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MonitoringBulk, options?: TransportRequestOptions): TransportRequestPromise> - bulk, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MonitoringBulk, callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MonitoringBulk, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.MonitoringBulk, options?: TransportRequestOptions): TransportRequestPromise> + bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback + bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MonitoringBulk, callback: callbackFn): TransportRequestCallback + bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MonitoringBulk, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } msearch, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.Msearch, options?: TransportRequestOptions): TransportRequestPromise> msearch, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback @@ -2034,6 +2044,14 @@ declare class Client { searchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchTemplate, callback: callbackFn): TransportRequestCallback searchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback searchable_snapshots: { + cache_stats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> + cache_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback + cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback + cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + cacheStats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> + cacheStats, TContext = Context>(callback: callbackFn): TransportRequestCallback + cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback + cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback clear_cache, TContext = Context>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> clear_cache, TContext = Context>(callback: callbackFn): TransportRequestCallback clear_cache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback @@ -2052,6 +2070,14 @@ declare class Client { stats, TContext = Context>(params: RequestParams.SearchableSnapshotsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } searchableSnapshots: { + cache_stats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> + cache_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback + cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback + cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + cacheStats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> + cacheStats, TContext = Context>(callback: callbackFn): TransportRequestCallback + cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback + cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback clear_cache, TContext = Context>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> clear_cache, TContext = Context>(callback: callbackFn): TransportRequestCallback clear_cache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback @@ -2114,6 +2140,14 @@ declare class Client { clearCachedRoles, TContext = Context>(callback: callbackFn): TransportRequestCallback clearCachedRoles, TContext = Context>(params: RequestParams.SecurityClearCachedRoles, callback: callbackFn): TransportRequestCallback clearCachedRoles, TContext = Context>(params: RequestParams.SecurityClearCachedRoles, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clear_cached_service_tokens, TContext = Context>(params?: RequestParams.SecurityClearCachedServiceTokens, options?: TransportRequestOptions): TransportRequestPromise> + clear_cached_service_tokens, TContext = Context>(callback: callbackFn): TransportRequestCallback + clear_cached_service_tokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, callback: callbackFn): TransportRequestCallback + clear_cached_service_tokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCachedServiceTokens, TContext = Context>(params?: RequestParams.SecurityClearCachedServiceTokens, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedServiceTokens, TContext = Context>(callback: callbackFn): TransportRequestCallback + clearCachedServiceTokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, callback: callbackFn): TransportRequestCallback + clearCachedServiceTokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback create_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityCreateApiKey, options?: TransportRequestOptions): TransportRequestPromise> create_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback create_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityCreateApiKey, callback: callbackFn): TransportRequestCallback @@ -2122,6 +2156,14 @@ declare class Client { createApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback createApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityCreateApiKey, callback: callbackFn): TransportRequestCallback createApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityCreateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + create_service_token, TContext = Context>(params?: RequestParams.SecurityCreateServiceToken, options?: TransportRequestOptions): TransportRequestPromise> + create_service_token, TContext = Context>(callback: callbackFn): TransportRequestCallback + create_service_token, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, callback: callbackFn): TransportRequestCallback + create_service_token, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + createServiceToken, TContext = Context>(params?: RequestParams.SecurityCreateServiceToken, options?: TransportRequestOptions): TransportRequestPromise> + createServiceToken, TContext = Context>(callback: callbackFn): TransportRequestCallback + createServiceToken, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, callback: callbackFn): TransportRequestCallback + createServiceToken, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback delete_privileges, TContext = Context>(params?: RequestParams.SecurityDeletePrivileges, options?: TransportRequestOptions): TransportRequestPromise> delete_privileges, TContext = Context>(callback: callbackFn): TransportRequestCallback delete_privileges, TContext = Context>(params: RequestParams.SecurityDeletePrivileges, callback: callbackFn): TransportRequestCallback @@ -2146,6 +2188,14 @@ declare class Client { deleteRoleMapping, TContext = Context>(callback: callbackFn): TransportRequestCallback deleteRoleMapping, TContext = Context>(params: RequestParams.SecurityDeleteRoleMapping, callback: callbackFn): TransportRequestCallback deleteRoleMapping, TContext = Context>(params: RequestParams.SecurityDeleteRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + delete_service_token, TContext = Context>(params?: RequestParams.SecurityDeleteServiceToken, options?: TransportRequestOptions): TransportRequestPromise> + delete_service_token, TContext = Context>(callback: callbackFn): TransportRequestCallback + delete_service_token, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, callback: callbackFn): TransportRequestCallback + delete_service_token, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteServiceToken, TContext = Context>(params?: RequestParams.SecurityDeleteServiceToken, options?: TransportRequestOptions): TransportRequestPromise> + deleteServiceToken, TContext = Context>(callback: callbackFn): TransportRequestCallback + deleteServiceToken, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, callback: callbackFn): TransportRequestCallback + deleteServiceToken, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback delete_user, TContext = Context>(params?: RequestParams.SecurityDeleteUser, options?: TransportRequestOptions): TransportRequestPromise> delete_user, TContext = Context>(callback: callbackFn): TransportRequestCallback delete_user, TContext = Context>(params: RequestParams.SecurityDeleteUser, callback: callbackFn): TransportRequestCallback @@ -2210,6 +2260,22 @@ declare class Client { getRoleMapping, TContext = Context>(callback: callbackFn): TransportRequestCallback getRoleMapping, TContext = Context>(params: RequestParams.SecurityGetRoleMapping, callback: callbackFn): TransportRequestCallback getRoleMapping, TContext = Context>(params: RequestParams.SecurityGetRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get_service_accounts, TContext = Context>(params?: RequestParams.SecurityGetServiceAccounts, options?: TransportRequestOptions): TransportRequestPromise> + get_service_accounts, TContext = Context>(callback: callbackFn): TransportRequestCallback + get_service_accounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, callback: callbackFn): TransportRequestCallback + get_service_accounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getServiceAccounts, TContext = Context>(params?: RequestParams.SecurityGetServiceAccounts, options?: TransportRequestOptions): TransportRequestPromise> + getServiceAccounts, TContext = Context>(callback: callbackFn): TransportRequestCallback + getServiceAccounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, callback: callbackFn): TransportRequestCallback + getServiceAccounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get_service_credentials, TContext = Context>(params?: RequestParams.SecurityGetServiceCredentials, options?: TransportRequestOptions): TransportRequestPromise> + get_service_credentials, TContext = Context>(callback: callbackFn): TransportRequestCallback + get_service_credentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, callback: callbackFn): TransportRequestCallback + get_service_credentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getServiceCredentials, TContext = Context>(params?: RequestParams.SecurityGetServiceCredentials, options?: TransportRequestOptions): TransportRequestPromise> + getServiceCredentials, TContext = Context>(callback: callbackFn): TransportRequestCallback + getServiceCredentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, callback: callbackFn): TransportRequestCallback + getServiceCredentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback get_token, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityGetToken, options?: TransportRequestOptions): TransportRequestPromise> get_token, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback get_token, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGetToken, callback: callbackFn): TransportRequestCallback From 147560ba44edde2a227e66e548f1434f991a52fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 27 Apr 2021 11:21:15 +0200 Subject: [PATCH 007/647] [DOCS] Reviews Client helpers section in the Node.JS book (#1434) * [DOCS] Reviews Client helpers section in the Node.JS book. * [DOCS] Changes link. * Update docs/index.asciidoc --- docs/helpers.asciidoc | 114 +++++++++++++++++++++++++++--------------- 1 file changed, 73 insertions(+), 41 deletions(-) diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index 78d91a9d5..71dd9de15 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -1,22 +1,26 @@ [[client-helpers]] -== Client Helpers +== Client helpers -The client comes with an handy collection of helpers to give you a more comfortable experience with some APIs. +The client comes with an handy collection of helpers to give you a more +comfortable experience with some APIs. -CAUTION: The client helpers are experimental, and the API may change in the next minor releases. -The helpers will not work in any Node.js version lower than 10. +CAUTION: The client helpers are experimental, and the API may change in the next +minor releases. The helpers will not work in any Node.js version lower than 10. [discrete] -=== Bulk Helper +[[bulk-helper]] +=== Bulk helper ~Added~ ~in~ ~`v7.7.0`~ -Running Bulk requests can be complex due to the shape of the API, this helper aims to provide a nicer developer experience around the Bulk API. +Running bulk requests can be complex due to the shape of the API, this helper +aims to provide a nicer developer experience around the Bulk API. [discrete] ==== Usage + [source,js] ---- const { createReadStream } = require('fs') @@ -45,13 +49,14 @@ console.log(result) // } ---- -To create a new instance of the Bulk helper, you should access it as shown in the example above, the configuration options are: +To create a new instance of the Bulk helper, access it as shown in the example +above, the configuration options are: [cols=2*] |=== |`datasource` a|An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects. + -If it is a stream, we recommend to use the https://www.npmjs.com/package/split2[`split2`] package, that will split the stream on new lines delimiters. + +If it is a stream, we recommend to use the https://www.npmjs.com/package/split2[`split2`] package, that splits the stream on new lines delimiters. + This parameter is mandatory. [source,js] ---- @@ -66,7 +71,7 @@ const b = client.helpers.bulk({ ---- |`onDocument` -a|A function that will be called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the link:{ref}/docs-bulk.html[Bulk API documentation] to see the supported operations. + +a|A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the link:{ref}/docs-bulk.html[Bulk API documentation] to see the supported operations. + This parameter is mandatory. [source,js] ---- @@ -80,7 +85,7 @@ const b = client.helpers.bulk({ ---- |`onDrop` -a|A function that will be called for everytime a document can't be indexed and it has reached the maximum amount of retries. +a|A function that is called for everytime a document can't be indexed and it has reached the maximum amount of retries. [source,js] ---- const b = client.helpers.bulk({ @@ -101,7 +106,7 @@ const b = client.helpers.bulk({ ---- |`flushInterval` -a|How much time (in milliseconds) the helper will wait before flushing the body from the last document read. + +a|How much time (in milliseconds) the helper waits before flushing the body from the last document read. + _Default:_ `30000` [source,js] ---- @@ -111,7 +116,7 @@ const b = client.helpers.bulk({ ---- |`concurrency` -a|How many request will be executed at the same time. + +a|How many request is executed at the same time. + _Default:_ `5` [source,js] ---- @@ -121,7 +126,7 @@ const b = client.helpers.bulk({ ---- |`retries` -a|How many times a document will be retried before to call the `onDrop` callback. + +a|How many times a document is retried before to call the `onDrop` callback. + _Default:_ Client max retries. [source,js] ---- @@ -141,7 +146,7 @@ const b = client.helpers.bulk({ ---- |`refreshOnCompletion` -a|If `true`, at the end of the bulk operation it will run a refresh on all indices or on the specified indices. + +a|If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices. + _Default:_ false. [source,js] ---- @@ -161,6 +166,7 @@ const b = client.helpers.bulk({ [discrete] ===== Index + [source,js] ---- client.helpers.bulk({ @@ -176,6 +182,7 @@ client.helpers.bulk({ [discrete] ===== Create + [source,js] ---- client.helpers.bulk({ @@ -228,9 +235,12 @@ client.helpers.bulk({ [discrete] ==== Abort a bulk operation -If needed, you can abort a bulk operation at any time. The bulk helper returns a https://promisesaplus.com/[thenable], which has an `abort` method. +If needed, you can abort a bulk operation at any time. The bulk helper returns a +https://promisesaplus.com/[thenable], which has an `abort` method. -NOTE: The abort method will stop the execution of the bulk operation, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. +NOTE: The abort method stops the execution of the bulk operation, but if you +are using a concurrency higher than one, the operations that are already running +will not be stopped. [source,js] ---- @@ -258,8 +268,9 @@ console.log(await b) [discrete] ==== Passing custom options to the Bulk API -You can pass any option supported by the link:{ref}/docs-bulk.html#docs-bulk-api-query-params[Bulk API] to the helper, and the helper will use those options in conjuction with the Bulk -API call. +You can pass any option supported by the link: +{ref}/docs-bulk.html#docs-bulk-api-query-params[Bulk API] to the helper, and the +helper uses those options in conjunction with the Bulk API call. [source,js] ---- @@ -308,13 +319,16 @@ console.log(result) [discrete] -=== Multi Search Helper +[[multi-search-helper]] +=== Multi search helper ~Added~ ~in~ ~`v7.8.0`~ -If you are sending search request at a high rate, this helper might be useful for you. -It will use the mutli search API under the hood to batch the requests and improve the overall performances of your application. + -The `result` exposes a `documents` property as well, which allows you to access directly the hits sources. +If you send search request at a high rate, this helper might be useful +for you. It uses the multi search API under the hood to batch the requests +and improve the overall performances of your application. The `result` exposes a +`documents` property as well, which allows you to access directly the hits +sources. [discrete] @@ -346,7 +360,8 @@ m.search( ) ---- -To create a new instance of the Msearch helper, you should access it as shown in the example above, the configuration options are: +To create a new instance of the multi search (msearch) helper, you should access +it as shown in the example above, the configuration options are: [cols=2*] |=== |`operations` @@ -360,7 +375,7 @@ const m = client.helpers.msearch({ ---- |`flushInterval` -a|How much time (in milliseconds) the helper will wait before flushing the operations from the last operation read. + +a|How much time (in milliseconds) the helper waits before flushing the operations from the last operation read. + _Default:_ `500` [source,js] ---- @@ -370,7 +385,7 @@ const m = client.helpers.msearch({ ---- |`concurrency` -a|How many request will be executed at the same time. + +a|How many request is executed at the same time. + _Default:_ `5` [source,js] ---- @@ -380,7 +395,7 @@ const m = client.helpers.msearch({ ---- |`retries` -a|How many times an operation will be retried before to resolve the request. An operation will be retried only in case of a 429 error. + +a|How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error. + _Default:_ Client max retries. [source,js] ---- @@ -403,15 +418,21 @@ const m = client.helpers.msearch({ [discrete] -==== Stopping the Msearch Helper +==== Stopping the msearch helper -If needed, you can stop a msearch processor at any time. The msearch helper returns a https://promisesaplus.com/[thenable], which has an `stop` method. +If needed, you can stop an msearch processor at any time. The msearch helper +returns a https://promisesaplus.com/[thenable], which has an `stop` method. -If you are creating multiple msearch helpers instances and using them for a limitied period of time, remember to always use the `stop` method once you have finished using them, otherwise your application will start leaking memory. +If you are creating multiple msearch helpers instances and using them for a +limitied period of time, remember to always use the `stop` method once you have +finished using them, otherwise your application will start leaking memory. -The `stop` method accepts an optional error, that will be dispatched every subsequent search request. +The `stop` method accepts an optional error, that will be dispatched every +subsequent search request. -NOTE: The stop method will stop the execution of the msearch processor, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. +NOTE: The stop method stops the execution of the msearch processor, but if +you are using a concurrency higher than one, the operations that are already +running will not be stopped. [source,js] ---- @@ -439,12 +460,15 @@ setImmediate(() => m.stop()) [discrete] -=== Search Helper +[[search-helper]] +=== Search helper ~Added~ ~in~ ~`v7.7.0`~ -A simple wrapper around the search API. Instead of returning the entire `result` object it will return only the search documents source. -For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the querystring. +A simple wrapper around the search API. Instead of returning the entire `result` +object it returns only the search documents source. For improving the +performances, this helper automatically adds `filter_path=hits.hits._source` to +the query string. [source,js] ---- @@ -466,12 +490,16 @@ for (const doc of documents) { [discrete] -=== Scroll Search Helper +[[scroll-search-helper]] +=== Scroll search helper ~Added~ ~in~ ~`v7.7.0`~ -This helpers offers a simple and intuitive way to use the scroll search API. Once called, it returns an https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of[async iterator] which can be used in conjuction with a for-await...of. + -It handles automatically the `429` error and uses the client's `maxRetries` option. +This helpers offers a simple and intuitive way to use the scroll search API. +Once called, it returns an +https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function[async iterator] +which can be used in conjuction with a for-await...of. It handles automatically +the `429` error and uses the `maxRetries` option of the client. [source,js] ---- @@ -510,7 +538,8 @@ for await (const result of scrollSearch) { [discrete] ==== Quickly getting the documents -If you only need the documents from the result of a scroll search, you can access them via `result.documents`: +If you only need the documents from the result of a scroll search, you can +access them via `result.documents`: [source,js] ---- @@ -521,12 +550,15 @@ for await (const result of scrollSearch) { [discrete] -=== Scroll Documents Helper +[[scroll-documents-helper]] +=== Scroll documents helper ~Added~ ~in~ ~`v7.7.0`~ -It works in the same way as the scroll search helper, but it returns only the documents instead. Note, every loop cycle will return you a single document, and you can't use the `clear` method. -For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the querystring. +It works in the same way as the scroll search helper, but it returns only the +documents instead. Note, every loop cycle returns a single document, and you +can't use the `clear` method. For improving the performances, this helper +automatically adds `filter_path=hits.hits._source` to the query string. [source,js] ---- From dc2de57bd3d2220d869d16ca551a31c0e91dd10b Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 3 May 2021 16:38:58 +0200 Subject: [PATCH 008/647] Improve response error message (#1457) --- lib/errors.js | 14 +++++++- test/unit/errors.test.js | 72 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/lib/errors.js b/lib/errors.js index 5f6d199d4..657419c8e 100644 --- a/lib/errors.js +++ b/lib/errors.js @@ -90,7 +90,15 @@ class ResponseError extends ElasticsearchClientError { super('Response Error') Error.captureStackTrace(this, ResponseError) this.name = 'ResponseError' - this.message = (meta.body && meta.body.error && meta.body.error.type) || 'Response Error' + if (meta.body && meta.body.error && meta.body.status) { + if (Array.isArray(meta.body.error.root_cause)) { + this.message = meta.body.error.root_cause.map(entry => `[${entry.type}] Reason: ${entry.reason}`).join('; ') + } else { + this.message = 'Response Error' + } + } else { + this.message = 'Response Error' + } this.meta = meta } @@ -108,6 +116,10 @@ class ResponseError extends ElasticsearchClientError { get headers () { return this.meta.headers } + + toString () { + return JSON.stringify(this.meta.body) + } } class RequestAbortedError extends ElasticsearchClientError { diff --git a/test/unit/errors.test.js b/test/unit/errors.test.js index 147304308..7c3f706a1 100644 --- a/test/unit/errors.test.js +++ b/test/unit/errors.test.js @@ -103,3 +103,75 @@ test('RequestAbortedError', t => { t.true(err.hasOwnProperty('meta')) t.end() }) + +test('ResponseError with meaningful message / 1', t => { + const meta = { + body: { + error: { + root_cause: [ + { + type: 'index_not_found_exception', + reason: 'no such index [foo]', + 'resource.type': 'index_expression', + 'resource.id': 'foo', + index_uuid: '_na_', + index: 'foo' + } + ], + type: 'index_not_found_exception', + reason: 'no such index [foo]', + 'resource.type': 'index_expression', + 'resource.id': 'foo', + index_uuid: '_na_', + index: 'foo' + }, + status: 404 + }, + statusCode: 404, + headers: {} + } + const err = new errors.ResponseError(meta) + t.strictEqual(err.message, '[index_not_found_exception] Reason: no such index [foo]') + t.strictEqual(err.toString(), JSON.stringify(meta.body)) + t.end() +}) + +test('ResponseError with meaningful message / 2', t => { + const meta = { + body: { + error: { + root_cause: [ + { + type: 'index_not_found_exception', + reason: 'no such index [foo]', + 'resource.type': 'index_expression', + 'resource.id': 'foo', + index_uuid: '_na_', + index: 'foo' + }, + { + type: 'nested_cause', + reason: 'this is a nested cause', + 'resource.type': 'index_expression', + 'resource.id': 'foo', + index_uuid: '_na_', + index: 'foo' + } + ], + type: 'index_not_found_exception', + reason: 'no such index [foo]', + 'resource.type': 'index_expression', + 'resource.id': 'foo', + index_uuid: '_na_', + index: 'foo' + }, + status: 404 + }, + statusCode: 404, + headers: {} + } + const err = new errors.ResponseError(meta) + t.strictEqual(err.message, '[index_not_found_exception] Reason: no such index [foo]; [nested_cause] Reason: this is a nested cause') + t.strictEqual(err.toString(), JSON.stringify(meta.body)) + t.end() +}) From d5f61c7833444dee9db293482c7e4fe9d510ee1f Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 3 May 2021 16:40:09 +0200 Subject: [PATCH 009/647] Catch HEAD errors (#1460) --- lib/Transport.js | 4 ++-- test/unit/transport.test.js | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/Transport.js b/lib/Transport.js index c808c51bc..dae838d1b 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -306,8 +306,8 @@ class Transport { return callback(err, result) } } else { - // cast to boolean if the request method was HEAD - result.body = isHead === true ? true : payload + // cast to boolean if the request method was HEAD and there was no error + result.body = isHead === true && result.statusCode < 400 ? true : payload } // we should ignore the statusCode if the user has configured the `ignore` field with diff --git a/test/unit/transport.test.js b/test/unit/transport.test.js index 479f0419b..dbb1f5df0 100644 --- a/test/unit/transport.test.js +++ b/test/unit/transport.test.js @@ -1613,7 +1613,7 @@ test('Should cast to boolean HEAD request', t => { }) t.test('4xx response', t => { - t.plan(2) + t.plan(3) const pool = new ConnectionPool({ Connection: MockConnection }) pool.addConnection('http://localhost:9200/') @@ -1633,12 +1633,13 @@ test('Should cast to boolean HEAD request', t => { path: '/400' }, (err, { body, statusCode }) => { t.ok(err instanceof ResponseError) + t.false(typeof err.body === 'boolean') t.strictEqual(statusCode, 400) }) }) t.test('5xx response', t => { - t.plan(2) + t.plan(3) const pool = new ConnectionPool({ Connection: MockConnection }) pool.addConnection('http://localhost:9200/') @@ -1657,6 +1658,7 @@ test('Should cast to boolean HEAD request', t => { path: '/500' }, (err, { body, statusCode }) => { t.ok(err instanceof ResponseError) + t.false(typeof err.body === 'boolean') t.strictEqual(statusCode, 500) }) }) From ce6c459eabe83d6ce40a9cfa262cd53b5453543f Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 3 May 2021 16:56:33 +0200 Subject: [PATCH 010/647] Bumped v8.0.0-canary.8 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index a3b610002..7db5df52b 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.7", + "versionCanary": "8.0.0-canary.8", "keywords": [ "elasticsearch", "elastic", From 623bd729108e6cbd73207f2f60ee0492665038df Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 12 May 2021 15:12:49 +0200 Subject: [PATCH 011/647] Updated integration test helpers (#1466) --- test/integration/helper.js | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/helper.js b/test/integration/helper.js index 39b87a27c..eb2021040 100644 --- a/test/integration/helper.js +++ b/test/integration/helper.js @@ -87,6 +87,7 @@ function isXPackTemplate (name) { case '.deprecation-indexing-template': case '.deprecation-indexing-mappings': case '.deprecation-indexing-settings': + case 'data-streams-mappings': return true } return false From 791f2168a3b99b1fd78b5940ec5cbd8ca0ac4293 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 12 May 2021 17:38:05 +0200 Subject: [PATCH 012/647] API generation --- api/api/termsenum.js | 56 +++++++++++++++++++++++++++++++++++++++++ api/index.js | 2 ++ api/requestParams.d.ts | 5 ++++ docs/reference.asciidoc | 21 ++++++++++++++++ index.d.ts | 4 +++ 5 files changed, 88 insertions(+) create mode 100644 api/api/termsenum.js diff --git a/api/api/termsenum.js b/api/api/termsenum.js new file mode 100644 index 000000000..029c4769d --- /dev/null +++ b/api/api/termsenum.js @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +/* eslint camelcase: 0 */ +/* eslint no-unused-vars: 0 */ + +const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') +const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] +const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } + +function termsenumApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.index == null) { + const err = new this[kConfigurationError]('Missing required parameter: index') + return handleError(err, callback) + } + + let { method, body, index, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = body == null ? 'GET' : 'POST' + path = '/' + encodeURIComponent(index) + '/' + '_terms_enum' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +module.exports = termsenumApi diff --git a/api/index.js b/api/index.js index ca8bb5d80..e226a1f55 100644 --- a/api/index.js +++ b/api/index.js @@ -84,6 +84,7 @@ const SnapshotApi = require('./api/snapshot') const SqlApi = require('./api/sql') const SslApi = require('./api/ssl') const TasksApi = require('./api/tasks') +const termsenumApi = require('./api/termsenum') const termvectorsApi = require('./api/termvectors') const TextStructureApi = require('./api/text_structure') const TransformApi = require('./api/transform') @@ -201,6 +202,7 @@ ESAPI.prototype.scroll = scrollApi ESAPI.prototype.search = searchApi ESAPI.prototype.searchShards = searchShardsApi ESAPI.prototype.searchTemplate = searchTemplateApi +ESAPI.prototype.termsenum = termsenumApi ESAPI.prototype.termvectors = termvectorsApi ESAPI.prototype.update = updateApi ESAPI.prototype.updateByQuery = updateByQueryApi diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index e5332916d..1391ed578 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -2509,6 +2509,11 @@ export interface TasksList extends Generic { timeout?: string; } +export interface Termsenum extends Generic { + index: string | string[]; + body?: T; +} + export interface Termvectors extends Generic { index: string; id?: string; diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index d3c57ffc4..de5c23a2b 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -10345,6 +10345,27 @@ _Default:_ `nodes` |=== +[discrete] +=== termsenum +*Stability:* beta +[source,ts] +---- +client.termsenum({ + index: string | string[], + body: object +}) +---- +link:{ref}/terms-enum.html[Documentation] + +[cols=2*] +|=== +|`index` +|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices + +|`body` +|`object` - field name, string which is the prefix expected in matching terms, timeout and size for max number of results + +|=== + [discrete] === termvectors diff --git a/index.d.ts b/index.d.ts index 45e78212f..1064b3b69 100644 --- a/index.d.ts +++ b/index.d.ts @@ -2561,6 +2561,10 @@ declare class Client { list, TContext = Context>(params: RequestParams.TasksList, callback: callbackFn): TransportRequestCallback list, TContext = Context>(params: RequestParams.TasksList, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } + termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Termsenum, options?: TransportRequestOptions): TransportRequestPromise> + termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termsenum, callback: callbackFn): TransportRequestCallback + termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termsenum, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Termvectors, options?: TransportRequestOptions): TransportRequestPromise> termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termvectors, callback: callbackFn): TransportRequestCallback From 52d68a0e83b7a4a489ccf9f4a5b66ad4d6de90a1 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 12 May 2021 17:41:45 +0200 Subject: [PATCH 013/647] Updated type definitions --- api/kibana.d.ts | 513 +- api/new.d.ts | 1782 ++-- api/types.d.ts | 21965 ++++++++++++++++++++++++---------------------- 3 files changed, 12829 insertions(+), 11431 deletions(-) diff --git a/api/kibana.d.ts b/api/kibana.d.ts index 4b1e6fc48..6fbb8d54c 100644 --- a/api/kibana.d.ts +++ b/api/kibana.d.ts @@ -119,19 +119,19 @@ interface KibanaClient { transforms(params?: T.CatTransformsRequest, options?: TransportRequestOptions): TransportRequestPromise> } ccr: { - deleteAutoFollowPattern(params: T.DeleteAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - follow(params: T.CreateFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - followInfo(params: T.FollowInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - followStats(params: T.FollowIndexStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - forgetFollower(params: T.ForgetFollowerIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAutoFollowPattern(params?: T.GetAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseAutoFollowPattern(params: T.PauseAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseFollow(params: T.PauseFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - putAutoFollowPattern(params: T.PutAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeAutoFollowPattern(params: T.ResumeAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeFollow(params: T.ResumeFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + follow(params: T.CcrCreateFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + followInfo(params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + followStats(params: T.CcrFollowIndexStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + forgetFollower(params: T.CcrForgetFollowerIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + getAutoFollowPattern(params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + pauseFollow(params: T.CcrPauseFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + resumeFollow(params: T.CcrResumeFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(params?: T.CcrStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfollow(params: T.UnfollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + unfollow(params: T.CcrUnfollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> } clearScroll(params?: T.ClearScrollRequest, options?: TransportRequestOptions): TransportRequestPromise> closePointInTime(params?: T.ClosePointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -140,14 +140,14 @@ interface KibanaClient { deleteComponentTemplate(params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> deleteVotingConfigExclusions(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> existsComponentTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getComponentTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + getComponentTemplate(params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> getSettings(params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> health(params?: T.ClusterHealthRequest, options?: TransportRequestOptions): TransportRequestPromise> pendingTasks(params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> postVotingConfigExclusions(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putComponentTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> putSettings(params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - remoteInfo(params?: T.RemoteInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + remoteInfo(params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> reroute(params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): TransportRequestPromise> state(params?: T.ClusterStateRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(params?: T.ClusterStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -174,10 +174,10 @@ interface KibanaClient { deleteByQueryRethrottle(params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> deleteScript(params: T.DeleteScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> enrich: { - deletePolicy(params: T.DeleteEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - executePolicy(params: T.ExecuteEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPolicy(params?: T.GetEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPolicy(params: T.PutEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + deletePolicy(params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + executePolicy(params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + getPolicy(params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + putPolicy(params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(params?: T.EnrichStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> } eql: { @@ -186,257 +186,266 @@ interface KibanaClient { getStatus(params: T.EqlGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> search(params: T.EqlSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> } - exists(params: T.DocumentExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsSource(params: T.SourceExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> + exists(params: T.ExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsSource(params: T.ExistsSourceRequest, options?: TransportRequestOptions): TransportRequestPromise> explain(params: T.ExplainRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> features: { getFeatures(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> resetFeatures(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> } - fieldCaps(params?: T.FieldCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> + fieldCaps(params?: T.FieldCapsRequest, options?: TransportRequestOptions): TransportRequestPromise> + fleet: { + globalCheckpoints(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + } get(params: T.GetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> getScript(params: T.GetScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - getScriptContext(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getScriptLanguages(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getSource(params: T.SourceRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + getScriptContext(params?: T.GetScriptContextRequest, options?: TransportRequestOptions): TransportRequestPromise> + getScriptLanguages(params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getSource(params?: T.GetSourceRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> graph: { explore(params: T.GraphExploreRequest, options?: TransportRequestOptions): TransportRequestPromise> } ilm: { - deleteLifecycle(params: T.DeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - explainLifecycle(params: T.ExplainLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(params?: T.GetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(params?: T.GetIlmStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - moveToStep(params: T.MoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(params?: T.PutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - removePolicy(params: T.RemovePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - retry(params: T.RetryIlmRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(params?: T.StartIlmRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(params?: T.StopIlmRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteLifecycle(params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + explainLifecycle(params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + getLifecycle(params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + getStatus(params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + moveToStep(params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> + putLifecycle(params?: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + removePolicy(params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + retry(params: T.IlmRetryRequest, options?: TransportRequestOptions): TransportRequestPromise> + start(params?: T.IlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> + stop(params?: T.IlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> } index(params: T.IndexRequest, options?: TransportRequestOptions): TransportRequestPromise> indices: { - addBlock(params: T.IndexAddBlockRequest, options?: TransportRequestOptions): TransportRequestPromise> - analyze(params?: T.AnalyzeRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCache(params?: T.ClearCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.CloneIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - close(params: T.CloseIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.CreateIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + addBlock(params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): TransportRequestPromise> + analyze(params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCache(params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> + clone(params: T.IndicesCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> + close(params: T.IndicesCloseRequest, options?: TransportRequestOptions): TransportRequestPromise> + create(params: T.IndicesCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> createDataStream(params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> dataStreamsStats(params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.DeleteIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteAlias(params: T.DeleteAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(params: T.IndicesDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteAlias(params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> deleteDataStream(params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteTemplate(params: T.DeleteIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - exists(params: T.IndexExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsAlias(params: T.AliasExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - existsTemplate(params: T.IndexTemplateExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsType(params: T.TypeExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - flush(params?: T.FlushRequest, options?: TransportRequestOptions): TransportRequestPromise> - flushSynced(params?: T.SyncedFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> - forcemerge(params?: T.ForceMergeRequest, options?: TransportRequestOptions): TransportRequestPromise> - freeze(params: T.FreezeIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.GetIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAlias(params?: T.GetAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTemplate(params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + exists(params: T.IndicesExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsAlias(params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsTemplate(params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsType(params: T.IndicesExistsTypeRequest, options?: TransportRequestOptions): TransportRequestPromise> + flush(params?: T.IndicesFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> + flushSynced(params?: T.IndicesFlushSyncedRequest, options?: TransportRequestOptions): TransportRequestPromise> + forcemerge(params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): TransportRequestPromise> + freeze(params: T.IndicesFreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.IndicesGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + getAlias(params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> getDataStream(params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFieldMapping(params: T.GetFieldMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getMapping(params?: T.GetMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getSettings(params?: T.GetIndexSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTemplate(params?: T.GetIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + getFieldMapping(params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + getIndexTemplate(params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + getMapping(params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + getSettings(params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTemplate(params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> getUpgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - open(params: T.OpenIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + open(params: T.IndicesOpenRequest, options?: TransportRequestOptions): TransportRequestPromise> promoteDataStream(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putAlias(params: T.PutAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - putIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putMapping(params?: T.PutMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putSettings(params?: T.UpdateIndexSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTemplate(params: T.PutIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - recovery(params?: T.RecoveryStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - refresh(params?: T.RefreshRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSearchAnalyzers(params: T.ReloadSearchAnalyzersRequest, options?: TransportRequestOptions): TransportRequestPromise> - resolveIndex(params: T.ResolveIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - rollover(params: T.RolloverIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - segments(params?: T.SegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> + putAlias(params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + putMapping(params?: T.IndicesPutMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + putSettings(params?: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> + putTemplate(params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + recovery(params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): TransportRequestPromise> + refresh(params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): TransportRequestPromise> + reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): TransportRequestPromise> + resolveIndex(params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + rollover(params: T.IndicesRolloverRequest, options?: TransportRequestOptions): TransportRequestPromise> + segments(params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> shardStores(params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): TransportRequestPromise> - shrink(params: T.ShrinkIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulateIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + shrink(params: T.IndicesShrinkRequest, options?: TransportRequestOptions): TransportRequestPromise> + simulateIndexTemplate(params?: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> simulateTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - split(params: T.SplitIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + split(params: T.IndicesSplitRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(params?: T.IndicesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfreeze(params: T.UnfreezeIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateAliases(params?: T.BulkAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + unfreeze(params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateAliases(params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): TransportRequestPromise> upgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - validateQuery(params?: T.ValidateQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> + validateQuery(params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> } - info(params?: T.RootNodeInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(params?: T.InfoRequest, options?: TransportRequestOptions): TransportRequestPromise> ingest: { - deletePipeline(params: T.DeletePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - geoIpStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline(params?: T.GetPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - processorGrok(params?: T.GrokProcessorPatternsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline(params: T.PutPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulate(params?: T.SimulatePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + deletePipeline(params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + geoIpStats(params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getPipeline(params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + processorGrok(params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): TransportRequestPromise> + putPipeline(params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + simulate(params?: T.IngestSimulatePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> } license: { - delete(params?: T.DeleteLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params?: T.GetLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus(params?: T.GetBasicLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus(params?: T.GetTrialLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - post(params?: T.PostLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic(params?: T.StartBasicLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial(params?: T.StartTrialLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(params?: T.LicenseDeleteLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params?: T.LicenseGetLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBasicStatus(params?: T.LicenseGetBasicLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrialStatus(params?: T.LicenseGetTrialLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + post(params?: T.LicensePostLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartBasic(params?: T.LicenseStartBasicLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartTrial(params?: T.LicenseStartTrialLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> } logstash: { deletePipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> getPipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> putPipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> } - mget(params?: T.MultiGetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + mget(params?: T.MgetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> migration: { - deprecations(params?: T.DeprecationInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + deprecations(params?: T.MigrationDeprecationInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> } ml: { - closeJob(params: T.CloseJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendar(params: T.DeleteCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarEvent(params: T.DeleteCalendarEventRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarJob(params: T.DeleteCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataFrameAnalytics(params: T.DeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDatafeed(params: T.DeleteDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteExpiredData(params?: T.DeleteExpiredDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteFilter(params: T.DeleteFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteForecast(params: T.DeleteForecastRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob(params: T.DeleteJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteModelSnapshot(params: T.DeleteModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModel(params: T.DeleteTrainedModelRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModelAlias(params: T.DeleteTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - estimateModelMemory(params?: T.EstimateModelMemoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - evaluateDataFrame(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - explainDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + closeJob(params: T.MlCloseJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteCalendar(params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteDatafeed(params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteExpiredData(params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteFilter(params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteForecast(params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteJob(params: T.MlDeleteJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + estimateModelMemory(params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + evaluateDataFrame(params?: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): TransportRequestPromise> + explainDataFrameAnalytics(params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> findFileStructure(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - flushJob(params: T.FlushJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - forecast(params: T.ForecastJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuckets(params: T.GetBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendarEvents(params: T.GetCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendars(params?: T.GetCalendarsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCategories(params: T.GetCategoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalyticsStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeedStats(params?: T.GetDatafeedStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeeds(params?: T.GetDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFilters(params?: T.GetFiltersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getInfluencers(params: T.GetInfluencersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobStats(params?: T.GetJobStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(params?: T.GetJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getModelSnapshots(params: T.GetModelSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getOverallBuckets(params: T.GetOverallBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRecords(params: T.GetAnomalyRecordsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModels(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModelsStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - info(params?: T.MachineLearningInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - openJob(params: T.OpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - postCalendarEvents(params: T.PostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.PostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - previewDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - previewDatafeed(params: T.PreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - putCalendar(params: T.PutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - putCalendarJob(params: T.PutCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putDatafeed(params: T.PutDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - putFilter(params: T.PutFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.PutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + flushJob(params: T.MlFlushJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + forecast(params: T.MlForecastJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBuckets(params: T.MlGetBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getCalendarEvents(params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getCalendars(params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getCategories(params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDataFrameAnalytics(params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDataFrameAnalyticsStats(params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDatafeedStats(params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDatafeeds(params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getFilters(params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): TransportRequestPromise> + getInfluencers(params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): TransportRequestPromise> + getJobStats(params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getJobs(params?: T.MlGetJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getModelSnapshots(params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getOverallBuckets(params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRecords(params: T.MlGetAnomalyRecordsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrainedModels(params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrainedModelsStats(params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(params?: T.MlInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + openJob(params: T.MlOpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + postCalendarEvents(params?: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> + postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + previewDatafeed(params: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> + putCalendarJob(params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + putDatafeed(params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + putFilter(params: T.MlPutFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> + putJob(params: T.MlPutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModel(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModelAlias(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - revertModelSnapshot(params: T.RevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - setUpgradeMode(params?: T.SetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> - startDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - startDatafeed(params: T.StartDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - stopDatafeed(params: T.StopDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateDatafeed(params: T.UpdateDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateFilter(params: T.UpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(params: T.UpdateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateModelSnapshot(params: T.UpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - upgradeJobSnapshot(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - validate(params?: T.ValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - validateDetector(params?: T.ValidateDetectorRequest, options?: TransportRequestOptions): TransportRequestPromise> + putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + setUpgradeMode(params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> + startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + startDatafeed(params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopDatafeed(params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateDatafeed(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + validate(params?: T.MlValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + validateDetector(params?: T.MlValidateDetectorRequest, options?: TransportRequestOptions): TransportRequestPromise> } monitoring: { bulk(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> } - msearch(params?: T.MultiSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - msearchTemplate(params?: T.MultiSearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - mtermvectors(params?: T.MultiTermVectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> + msearch(params?: T.MsearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + msearchTemplate(params?: T.MsearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + mtermvectors(params?: T.MtermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> nodes: { - hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(params?: T.NodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSecureSettings(params?: T.ReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.NodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(params?: T.NodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> + hotThreads(params?: T.NodesNodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(params?: T.NodesNodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + reloadSecureSettings(params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> + stats(params?: T.NodesNodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + usage(params?: T.NodesNodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> } openPointInTime(params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> ping(params?: T.PingRequest, options?: TransportRequestOptions): TransportRequestPromise> putScript(params: T.PutScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - rankEval(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + rankEval(params: T.RankEvalRequest, options?: TransportRequestOptions): TransportRequestPromise> reindex(params?: T.ReindexRequest, options?: TransportRequestOptions): TransportRequestPromise> reindexRethrottle(params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> renderSearchTemplate(params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> rollup: { - deleteJob(params: T.DeleteRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(params?: T.GetRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupCaps(params?: T.GetRollupCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupIndexCaps(params: T.GetRollupIndexCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.CreateRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteJob(params: T.RollupDeleteRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + getJobs(params?: T.RollupGetRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRollupCaps(params?: T.RollupGetRollupCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> + putJob(params: T.RollupCreateRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> rollup(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - rollupSearch(params: T.RollupSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - startJob(params: T.StartRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopJob(params: T.StopRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + rollupSearch(params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + startJob(params: T.RollupStartRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopJob(params: T.RollupStopRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> } - scriptsPainlessExecute(params?: T.ExecutePainlessScriptRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + scriptsPainlessExecute(params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> scroll(params?: T.ScrollRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> search(params?: T.SearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> searchShards(params?: T.SearchShardsRequest, options?: TransportRequestOptions): TransportRequestPromise> - searchTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + searchTemplate(params?: T.SearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> searchableSnapshots: { + cacheStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> clearCache(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> mount(params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): TransportRequestPromise> repositoryStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> stats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> } security: { - authenticate(params?: T.AuthenticateRequest, options?: TransportRequestOptions): TransportRequestPromise> - changePassword(params?: T.ChangePasswordRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearApiKeyCache(params?: T.ClearApiKeyCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedPrivileges(params: T.ClearCachedPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRealms(params: T.ClearCachedRealmsRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRoles(params: T.ClearCachedRolesRequest, options?: TransportRequestOptions): TransportRequestPromise> - createApiKey(params?: T.CreateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePrivileges(params: T.DeletePrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRole(params: T.DeleteRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRoleMapping(params: T.DeleteRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteUser(params: T.DeleteUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - disableUser(params: T.DisableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - enableUser(params: T.EnableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - getApiKey(params?: T.GetApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuiltinPrivileges(params?: T.GetBuiltinPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPrivileges(params?: T.GetPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRole(params?: T.GetRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRoleMapping(params?: T.GetRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getToken(params?: T.GetUserAccessTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUser(params?: T.GetUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUserPrivileges(params?: T.GetUserPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - grantApiKey(params?: T.GrantApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - hasPrivileges(params?: T.HasPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateApiKey(params?: T.InvalidateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateToken(params?: T.InvalidateUserAccessTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPrivileges(params?: T.PutPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRole(params: T.PutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRoleMapping(params: T.PutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putUser(params: T.PutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + authenticate(params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): TransportRequestPromise> + changePassword(params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearApiKeyCache(params?: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedRoles(params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): TransportRequestPromise> + createApiKey(params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + createServiceToken(params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + deletePrivileges(params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteRole(params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteUser(params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + disableUser(params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + enableUser(params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + getApiKey(params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBuiltinPrivileges(params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getPrivileges(params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRole(params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRoleMapping(params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + getServiceAccounts(params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getToken(params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + getUser(params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + getUserPrivileges(params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + grantApiKey(params?: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + hasPrivileges(params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + invalidateApiKey(params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + invalidateToken(params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + putPrivileges(params?: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + putRole(params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> + putRoleMapping(params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> } shutdown: { deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -444,75 +453,75 @@ interface KibanaClient { putNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> } slm: { - deleteLifecycle(params: T.DeleteSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeLifecycle(params: T.ExecuteSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeRetention(params?: T.ExecuteRetentionRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(params?: T.GetSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStats(params?: T.GetSnapshotLifecycleStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(params?: T.GetSnapshotLifecycleManagementStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(params: T.PutSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(params?: T.StartSnapshotLifecycleManagementRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(params?: T.StopSnapshotLifecycleManagementRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteLifecycle(params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + executeLifecycle(params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + executeRetention(params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): TransportRequestPromise> + getLifecycle(params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + getStats(params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getStatus(params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + putLifecycle(params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + start(params?: T.SlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> + stop(params?: T.SlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> } snapshot: { - cleanupRepository(params: T.CleanupRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.CloneSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.SnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - createRepository(params: T.CreateRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.DeleteSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRepository(params: T.DeleteRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.GetSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRepository(params?: T.GetRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - restore(params: T.RestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> + cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + clone(params: T.SnapshotCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> + create(params: T.SnapshotCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> + createRepository(params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteRepository(params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.SnapshotGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRepository(params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + restore(params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> status(params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - verifyRepository(params: T.VerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> } sql: { - clearCursor(params?: T.ClearSqlCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> - query(params?: T.QuerySqlRequest, options?: TransportRequestOptions): TransportRequestPromise> - translate(params?: T.TranslateSqlRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCursor(params?: T.SqlClearSqlCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> + query(params?: T.SqlQuerySqlRequest, options?: TransportRequestOptions): TransportRequestPromise> + translate(params?: T.SqlTranslateSqlRequest, options?: TransportRequestOptions): TransportRequestPromise> } ssl: { - certificates(params?: T.GetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> + certificates(params?: T.SslGetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> } tasks: { - cancel(params?: T.CancelTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.GetTaskRequest, options?: TransportRequestOptions): TransportRequestPromise> - list(params?: T.ListTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> + cancel(params?: T.TaskCancelTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.TaskGetTaskRequest, options?: TransportRequestOptions): TransportRequestPromise> + list(params?: T.TaskListTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> } - termvectors(params: T.TermVectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> + termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> textStructure: { - findStructure(params: T.FindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> + findStructure(params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> } transform: { - deleteTransform(params: T.DeleteTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransform(params?: T.GetTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats(params: T.GetTransformStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform(params?: T.PreviewTransformRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - putTransform(params: T.PutTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - startTransform(params: T.StartTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform(params: T.StopTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform(params: T.UpdateTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTransform(params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTransform(params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTransformStats(params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + previewTransform(params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + putTransform(params: T.TransformPutTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + startTransform(params: T.TransformStartTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopTransform(params: T.TransformStopTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateTransform(params?: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> } update(params: T.UpdateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> updateByQuery(params: T.UpdateByQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> updateByQueryRethrottle(params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> watcher: { - ackWatch(params: T.AcknowledgeWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - activateWatch(params: T.ActivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deactivateWatch(params: T.DeactivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteWatch(params: T.DeleteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeWatch(params?: T.ExecuteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - getWatch(params: T.GetWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - putWatch(params: T.PutWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + ackWatch(params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + activateWatch(params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + deactivateWatch(params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteWatch(params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + executeWatch(params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + getWatch(params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + putWatch(params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> queryWatches(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - start(params?: T.StartWatcherRequest, options?: TransportRequestOptions): TransportRequestPromise> + start(params?: T.WatcherStartRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(params?: T.WatcherStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(params?: T.StopWatcherRequest, options?: TransportRequestOptions): TransportRequestPromise> + stop(params?: T.WatcherStopRequest, options?: TransportRequestOptions): TransportRequestPromise> } xpack: { - info(params?: T.XPackInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(params?: T.XPackUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(params?: T.XpackInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + usage(params?: T.XpackUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> } } diff --git a/api/new.d.ts b/api/new.d.ts index 53b8493f6..692d929ad 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -67,7 +67,7 @@ declare type extendsCallback = (options: ClientExtendsCallbackOptions) => any; declare type callbackFn = (err: ApiError, result: ApiResponse) => void; declare class Client { - constructor(opts: ClientOptions); + constructor(opts: ClientOptions) connectionPool: ConnectionPool transport: Transport serializer: Serializer @@ -226,47 +226,47 @@ declare class Client { transforms(params: T.CatTransformsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } ccr: { - deleteAutoFollowPattern(params: T.DeleteAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteAutoFollowPattern(params: T.DeleteAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern(params: T.DeleteAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow(params: T.CreateFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - follow(params: T.CreateFollowIndexRequest, callback: callbackFn): TransportRequestCallback - follow(params: T.CreateFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followInfo(params: T.FollowInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - followInfo(params: T.FollowInfoRequest, callback: callbackFn): TransportRequestCallback - followInfo(params: T.FollowInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followStats(params: T.FollowIndexStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - followStats(params: T.FollowIndexStatsRequest, callback: callbackFn): TransportRequestCallback - followStats(params: T.FollowIndexStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forgetFollower(params: T.ForgetFollowerIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - forgetFollower(params: T.ForgetFollowerIndexRequest, callback: callbackFn): TransportRequestCallback - forgetFollower(params: T.ForgetFollowerIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern(params?: T.GetAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAutoFollowPattern(callback: callbackFn): TransportRequestCallback - getAutoFollowPattern(params: T.GetAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern(params: T.GetAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern(params: T.PauseAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseAutoFollowPattern(params: T.PauseAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern(params: T.PauseAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseFollow(params: T.PauseFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseFollow(params: T.PauseFollowIndexRequest, callback: callbackFn): TransportRequestCallback - pauseFollow(params: T.PauseFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern(params: T.PutAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - putAutoFollowPattern(params: T.PutAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern(params: T.PutAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern(params: T.ResumeAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeAutoFollowPattern(params: T.ResumeAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern(params: T.ResumeAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeFollow(params: T.ResumeFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeFollow(params: T.ResumeFollowIndexRequest, callback: callbackFn): TransportRequestCallback - resumeFollow(params: T.ResumeFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback + deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + follow(params: T.CcrCreateFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + follow(params: T.CcrCreateFollowIndexRequest, callback: callbackFn): TransportRequestCallback + follow(params: T.CcrCreateFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + followInfo(params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + followInfo(params: T.CcrFollowInfoRequest, callback: callbackFn): TransportRequestCallback + followInfo(params: T.CcrFollowInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + followStats(params: T.CcrFollowIndexStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + followStats(params: T.CcrFollowIndexStatsRequest, callback: callbackFn): TransportRequestCallback + followStats(params: T.CcrFollowIndexStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + forgetFollower(params: T.CcrForgetFollowerIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + forgetFollower(params: T.CcrForgetFollowerIndexRequest, callback: callbackFn): TransportRequestCallback + forgetFollower(params: T.CcrForgetFollowerIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getAutoFollowPattern(params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + getAutoFollowPattern(callback: callbackFn): TransportRequestCallback + getAutoFollowPattern(params: T.CcrGetAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback + getAutoFollowPattern(params: T.CcrGetAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback + pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + pauseFollow(params: T.CcrPauseFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + pauseFollow(params: T.CcrPauseFollowIndexRequest, callback: callbackFn): TransportRequestCallback + pauseFollow(params: T.CcrPauseFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback + putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> + resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback + resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + resumeFollow(params: T.CcrResumeFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + resumeFollow(params: T.CcrResumeFollowIndexRequest, callback: callbackFn): TransportRequestCallback + resumeFollow(params: T.CcrResumeFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback stats(params?: T.CcrStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(callback: callbackFn): TransportRequestCallback stats(params: T.CcrStatsRequest, callback: callbackFn): TransportRequestCallback stats(params: T.CcrStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfollow(params: T.UnfollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfollow(params: T.UnfollowIndexRequest, callback: callbackFn): TransportRequestCallback - unfollow(params: T.UnfollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + unfollow(params: T.CcrUnfollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + unfollow(params: T.CcrUnfollowIndexRequest, callback: callbackFn): TransportRequestCallback + unfollow(params: T.CcrUnfollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } clearScroll(params?: T.ClearScrollRequest, options?: TransportRequestOptions): TransportRequestPromise> clearScroll(callback: callbackFn): TransportRequestCallback @@ -292,10 +292,10 @@ declare class Client { existsComponentTemplate(callback: callbackFn): TransportRequestCallback existsComponentTemplate(params: TODO, callback: callbackFn): TransportRequestCallback existsComponentTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getComponentTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getComponentTemplate(callback: callbackFn): TransportRequestCallback - getComponentTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - getComponentTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getComponentTemplate(params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + getComponentTemplate(callback: callbackFn): TransportRequestCallback + getComponentTemplate(params: T.ClusterGetComponentTemplateRequest, callback: callbackFn): TransportRequestCallback + getComponentTemplate(params: T.ClusterGetComponentTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback getSettings(params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> getSettings(callback: callbackFn): TransportRequestCallback getSettings(params: T.ClusterGetSettingsRequest, callback: callbackFn): TransportRequestCallback @@ -312,18 +312,17 @@ declare class Client { postVotingConfigExclusions(callback: callbackFn): TransportRequestCallback postVotingConfigExclusions(params: TODO, callback: callbackFn): TransportRequestCallback postVotingConfigExclusions(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putComponentTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putComponentTemplate(callback: callbackFn): TransportRequestCallback - putComponentTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - putComponentTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, callback: callbackFn): TransportRequestCallback + putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback putSettings(params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> putSettings(callback: callbackFn): TransportRequestCallback putSettings(params: T.ClusterPutSettingsRequest, callback: callbackFn): TransportRequestCallback putSettings(params: T.ClusterPutSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remoteInfo(params?: T.RemoteInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - remoteInfo(callback: callbackFn): TransportRequestCallback - remoteInfo(params: T.RemoteInfoRequest, callback: callbackFn): TransportRequestCallback - remoteInfo(params: T.RemoteInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + remoteInfo(params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + remoteInfo(callback: callbackFn): TransportRequestCallback + remoteInfo(params: T.ClusterRemoteInfoRequest, callback: callbackFn): TransportRequestCallback + remoteInfo(params: T.ClusterRemoteInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback reroute(params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): TransportRequestPromise> reroute(callback: callbackFn): TransportRequestCallback reroute(params: T.ClusterRerouteRequest, callback: callbackFn): TransportRequestCallback @@ -405,19 +404,19 @@ declare class Client { deleteScript(params: T.DeleteScriptRequest, callback: callbackFn): TransportRequestCallback deleteScript(params: T.DeleteScriptRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback enrich: { - deletePolicy(params: T.DeleteEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePolicy(params: T.DeleteEnrichPolicyRequest, callback: callbackFn): TransportRequestCallback - deletePolicy(params: T.DeleteEnrichPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executePolicy(params: T.ExecuteEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - executePolicy(params: T.ExecuteEnrichPolicyRequest, callback: callbackFn): TransportRequestCallback - executePolicy(params: T.ExecuteEnrichPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPolicy(params?: T.GetEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPolicy(callback: callbackFn): TransportRequestCallback - getPolicy(params: T.GetEnrichPolicyRequest, callback: callbackFn): TransportRequestCallback - getPolicy(params: T.GetEnrichPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPolicy(params: T.PutEnrichPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPolicy(params: T.PutEnrichPolicyRequest, callback: callbackFn): TransportRequestCallback - putPolicy(params: T.PutEnrichPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deletePolicy(params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + deletePolicy(params: T.EnrichDeletePolicyRequest, callback: callbackFn): TransportRequestCallback + deletePolicy(params: T.EnrichDeletePolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + executePolicy(params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + executePolicy(params: T.EnrichExecutePolicyRequest, callback: callbackFn): TransportRequestCallback + executePolicy(params: T.EnrichExecutePolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getPolicy(params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + getPolicy(callback: callbackFn): TransportRequestCallback + getPolicy(params: T.EnrichGetPolicyRequest, callback: callbackFn): TransportRequestCallback + getPolicy(params: T.EnrichGetPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putPolicy(params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + putPolicy(params: T.EnrichPutPolicyRequest, callback: callbackFn): TransportRequestCallback + putPolicy(params: T.EnrichPutPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback stats(params?: T.EnrichStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(callback: callbackFn): TransportRequestCallback stats(params: T.EnrichStatsRequest, callback: callbackFn): TransportRequestCallback @@ -437,12 +436,12 @@ declare class Client { search(params: T.EqlSearchRequest, callback: callbackFn, TContext>): TransportRequestCallback search(params: T.EqlSearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback } - exists(params: T.DocumentExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - exists(params: T.DocumentExistsRequest, callback: callbackFn): TransportRequestCallback - exists(params: T.DocumentExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsSource(params: T.SourceExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsSource(params: T.SourceExistsRequest, callback: callbackFn): TransportRequestCallback - existsSource(params: T.SourceExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + exists(params: T.ExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> + exists(params: T.ExistsRequest, callback: callbackFn): TransportRequestCallback + exists(params: T.ExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + existsSource(params: T.ExistsSourceRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsSource(params: T.ExistsSourceRequest, callback: callbackFn): TransportRequestCallback + existsSource(params: T.ExistsSourceRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback explain(params: T.ExplainRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> explain(params: T.ExplainRequest, callback: callbackFn, TContext>): TransportRequestCallback explain(params: T.ExplainRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback @@ -456,93 +455,100 @@ declare class Client { resetFeatures(params: TODO, callback: callbackFn): TransportRequestCallback resetFeatures(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - fieldCaps(params?: T.FieldCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - fieldCaps(callback: callbackFn): TransportRequestCallback - fieldCaps(params: T.FieldCapabilitiesRequest, callback: callbackFn): TransportRequestCallback - fieldCaps(params: T.FieldCapabilitiesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + fieldCaps(params?: T.FieldCapsRequest, options?: TransportRequestOptions): TransportRequestPromise> + fieldCaps(callback: callbackFn): TransportRequestCallback + fieldCaps(params: T.FieldCapsRequest, callback: callbackFn): TransportRequestCallback + fieldCaps(params: T.FieldCapsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + fleet: { + globalCheckpoints(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + globalCheckpoints(callback: callbackFn): TransportRequestCallback + globalCheckpoints(params: TODO, callback: callbackFn): TransportRequestCallback + globalCheckpoints(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + } get(params: T.GetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> get(params: T.GetRequest, callback: callbackFn, TContext>): TransportRequestCallback get(params: T.GetRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback getScript(params: T.GetScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> getScript(params: T.GetScriptRequest, callback: callbackFn): TransportRequestCallback getScript(params: T.GetScriptRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptContext(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getScriptContext(callback: callbackFn): TransportRequestCallback - getScriptContext(params: TODO, callback: callbackFn): TransportRequestCallback - getScriptContext(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptLanguages(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getScriptLanguages(callback: callbackFn): TransportRequestCallback - getScriptLanguages(params: TODO, callback: callbackFn): TransportRequestCallback - getScriptLanguages(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSource(params: T.SourceRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - getSource(params: T.SourceRequest, callback: callbackFn, TContext>): TransportRequestCallback - getSource(params: T.SourceRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + getScriptContext(params?: T.GetScriptContextRequest, options?: TransportRequestOptions): TransportRequestPromise> + getScriptContext(callback: callbackFn): TransportRequestCallback + getScriptContext(params: T.GetScriptContextRequest, callback: callbackFn): TransportRequestCallback + getScriptContext(params: T.GetScriptContextRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getScriptLanguages(params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getScriptLanguages(callback: callbackFn): TransportRequestCallback + getScriptLanguages(params: T.GetScriptLanguagesRequest, callback: callbackFn): TransportRequestCallback + getScriptLanguages(params: T.GetScriptLanguagesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getSource(params?: T.GetSourceRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + getSource(callback: callbackFn, TContext>): TransportRequestCallback + getSource(params: T.GetSourceRequest, callback: callbackFn, TContext>): TransportRequestCallback + getSource(params: T.GetSourceRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback graph: { explore(params: T.GraphExploreRequest, options?: TransportRequestOptions): TransportRequestPromise> explore(params: T.GraphExploreRequest, callback: callbackFn): TransportRequestCallback explore(params: T.GraphExploreRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } ilm: { - deleteLifecycle(params: T.DeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle(params: T.DeleteLifecycleRequest, callback: callbackFn): TransportRequestCallback - deleteLifecycle(params: T.DeleteLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainLifecycle(params: T.ExplainLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - explainLifecycle(params: T.ExplainLifecycleRequest, callback: callbackFn): TransportRequestCallback - explainLifecycle(params: T.ExplainLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle(params?: T.GetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.GetLifecycleRequest, callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.GetLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus(params?: T.GetIlmStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(callback: callbackFn): TransportRequestCallback - getStatus(params: T.GetIlmStatusRequest, callback: callbackFn): TransportRequestCallback - getStatus(params: T.GetIlmStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - moveToStep(params: T.MoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> - moveToStep(params: T.MoveToStepRequest, callback: callbackFn): TransportRequestCallback - moveToStep(params: T.MoveToStepRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle(params?: T.PutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.PutLifecycleRequest, callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.PutLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - removePolicy(params: T.RemovePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - removePolicy(params: T.RemovePolicyRequest, callback: callbackFn): TransportRequestCallback - removePolicy(params: T.RemovePolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - retry(params: T.RetryIlmRequest, options?: TransportRequestOptions): TransportRequestPromise> - retry(params: T.RetryIlmRequest, callback: callbackFn): TransportRequestCallback - retry(params: T.RetryIlmRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start(params?: T.StartIlmRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(callback: callbackFn): TransportRequestCallback - start(params: T.StartIlmRequest, callback: callbackFn): TransportRequestCallback - start(params: T.StartIlmRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop(params?: T.StopIlmRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(callback: callbackFn): TransportRequestCallback - stop(params: T.StopIlmRequest, callback: callbackFn): TransportRequestCallback - stop(params: T.StopIlmRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteLifecycle(params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteLifecycle(params: T.IlmDeleteLifecycleRequest, callback: callbackFn): TransportRequestCallback + deleteLifecycle(params: T.IlmDeleteLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + explainLifecycle(params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + explainLifecycle(params: T.IlmExplainLifecycleRequest, callback: callbackFn): TransportRequestCallback + explainLifecycle(params: T.IlmExplainLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getLifecycle(params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + getLifecycle(callback: callbackFn): TransportRequestCallback + getLifecycle(params: T.IlmGetLifecycleRequest, callback: callbackFn): TransportRequestCallback + getLifecycle(params: T.IlmGetLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getStatus(params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getStatus(callback: callbackFn): TransportRequestCallback + getStatus(params: T.IlmGetStatusRequest, callback: callbackFn): TransportRequestCallback + getStatus(params: T.IlmGetStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + moveToStep(params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> + moveToStep(params: T.IlmMoveToStepRequest, callback: callbackFn): TransportRequestCallback + moveToStep(params: T.IlmMoveToStepRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putLifecycle(params?: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + putLifecycle(callback: callbackFn): TransportRequestCallback + putLifecycle(params: T.IlmPutLifecycleRequest, callback: callbackFn): TransportRequestCallback + putLifecycle(params: T.IlmPutLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + removePolicy(params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> + removePolicy(params: T.IlmRemovePolicyRequest, callback: callbackFn): TransportRequestCallback + removePolicy(params: T.IlmRemovePolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + retry(params: T.IlmRetryRequest, options?: TransportRequestOptions): TransportRequestPromise> + retry(params: T.IlmRetryRequest, callback: callbackFn): TransportRequestCallback + retry(params: T.IlmRetryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + start(params?: T.IlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> + start(callback: callbackFn): TransportRequestCallback + start(params: T.IlmStartRequest, callback: callbackFn): TransportRequestCallback + start(params: T.IlmStartRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stop(params?: T.IlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> + stop(callback: callbackFn): TransportRequestCallback + stop(params: T.IlmStopRequest, callback: callbackFn): TransportRequestCallback + stop(params: T.IlmStopRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } index(params: T.IndexRequest, options?: TransportRequestOptions): TransportRequestPromise> index(params: T.IndexRequest, callback: callbackFn): TransportRequestCallback index(params: T.IndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback indices: { - addBlock(params: T.IndexAddBlockRequest, options?: TransportRequestOptions): TransportRequestPromise> - addBlock(params: T.IndexAddBlockRequest, callback: callbackFn): TransportRequestCallback - addBlock(params: T.IndexAddBlockRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - analyze(params?: T.AnalyzeRequest, options?: TransportRequestOptions): TransportRequestPromise> - analyze(callback: callbackFn): TransportRequestCallback - analyze(params: T.AnalyzeRequest, callback: callbackFn): TransportRequestCallback - analyze(params: T.AnalyzeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache(params?: T.ClearCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCache(callback: callbackFn): TransportRequestCallback - clearCache(params: T.ClearCacheRequest, callback: callbackFn): TransportRequestCallback - clearCache(params: T.ClearCacheRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clone(params: T.CloneIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.CloneIndexRequest, callback: callbackFn): TransportRequestCallback - clone(params: T.CloneIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - close(params: T.CloseIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - close(params: T.CloseIndexRequest, callback: callbackFn): TransportRequestCallback - close(params: T.CloseIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create(params: T.CreateIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.CreateIndexRequest, callback: callbackFn): TransportRequestCallback - create(params: T.CreateIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + addBlock(params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): TransportRequestPromise> + addBlock(params: T.IndicesAddBlockRequest, callback: callbackFn): TransportRequestCallback + addBlock(params: T.IndicesAddBlockRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + analyze(params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): TransportRequestPromise> + analyze(callback: callbackFn): TransportRequestCallback + analyze(params: T.IndicesAnalyzeRequest, callback: callbackFn): TransportRequestCallback + analyze(params: T.IndicesAnalyzeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCache(params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCache(callback: callbackFn): TransportRequestCallback + clearCache(params: T.IndicesClearCacheRequest, callback: callbackFn): TransportRequestCallback + clearCache(params: T.IndicesClearCacheRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clone(params: T.IndicesCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> + clone(params: T.IndicesCloneRequest, callback: callbackFn): TransportRequestCallback + clone(params: T.IndicesCloneRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + close(params: T.IndicesCloseRequest, options?: TransportRequestOptions): TransportRequestPromise> + close(params: T.IndicesCloseRequest, callback: callbackFn): TransportRequestCallback + close(params: T.IndicesCloseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + create(params: T.IndicesCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> + create(params: T.IndicesCreateRequest, callback: callbackFn): TransportRequestCallback + create(params: T.IndicesCreateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback createDataStream(params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> createDataStream(params: T.IndicesCreateDataStreamRequest, callback: callbackFn): TransportRequestCallback createDataStream(params: T.IndicesCreateDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback @@ -550,83 +556,81 @@ declare class Client { dataStreamsStats(callback: callbackFn): TransportRequestCallback dataStreamsStats(params: T.IndicesDataStreamsStatsRequest, callback: callbackFn): TransportRequestCallback dataStreamsStats(params: T.IndicesDataStreamsStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete(params: T.DeleteIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.DeleteIndexRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.DeleteIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAlias(params: T.DeleteAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteAlias(params: T.DeleteAliasRequest, callback: callbackFn): TransportRequestCallback - deleteAlias(params: T.DeleteAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + delete(params: T.IndicesDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(params: T.IndicesDeleteRequest, callback: callbackFn): TransportRequestCallback + delete(params: T.IndicesDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteAlias(params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteAlias(params: T.IndicesDeleteAliasRequest, callback: callbackFn): TransportRequestCallback + deleteAlias(params: T.IndicesDeleteAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback deleteDataStream(params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> deleteDataStream(params: T.IndicesDeleteDataStreamRequest, callback: callbackFn): TransportRequestCallback deleteDataStream(params: T.IndicesDeleteDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteIndexTemplate(callback: callbackFn): TransportRequestCallback - deleteIndexTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTemplate(params: T.DeleteIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTemplate(params: T.DeleteIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - deleteTemplate(params: T.DeleteIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists(params: T.IndexExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - exists(params: T.IndexExistsRequest, callback: callbackFn): TransportRequestCallback - exists(params: T.IndexExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsAlias(params: T.AliasExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsAlias(params: T.AliasExistsRequest, callback: callbackFn): TransportRequestCallback - existsAlias(params: T.AliasExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - existsIndexTemplate(callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsTemplate(params: T.IndexTemplateExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsTemplate(params: T.IndexTemplateExistsRequest, callback: callbackFn): TransportRequestCallback - existsTemplate(params: T.IndexTemplateExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsType(params: T.TypeExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsType(params: T.TypeExistsRequest, callback: callbackFn): TransportRequestCallback - existsType(params: T.TypeExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flush(params?: T.FlushRequest, options?: TransportRequestOptions): TransportRequestPromise> - flush(callback: callbackFn): TransportRequestCallback - flush(params: T.FlushRequest, callback: callbackFn): TransportRequestCallback - flush(params: T.FlushRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flushSynced(params?: T.SyncedFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> - flushSynced(callback: callbackFn): TransportRequestCallback - flushSynced(params: T.SyncedFlushRequest, callback: callbackFn): TransportRequestCallback - flushSynced(params: T.SyncedFlushRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forcemerge(params?: T.ForceMergeRequest, options?: TransportRequestOptions): TransportRequestPromise> - forcemerge(callback: callbackFn): TransportRequestCallback - forcemerge(params: T.ForceMergeRequest, callback: callbackFn): TransportRequestCallback - forcemerge(params: T.ForceMergeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - freeze(params: T.FreezeIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - freeze(params: T.FreezeIndexRequest, callback: callbackFn): TransportRequestCallback - freeze(params: T.FreezeIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.GetIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.GetIndexRequest, callback: callbackFn): TransportRequestCallback - get(params: T.GetIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAlias(params?: T.GetAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAlias(callback: callbackFn): TransportRequestCallback - getAlias(params: T.GetAliasRequest, callback: callbackFn): TransportRequestCallback - getAlias(params: T.GetAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, callback: callbackFn): TransportRequestCallback + deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteTemplate(params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTemplate(params: T.IndicesDeleteTemplateRequest, callback: callbackFn): TransportRequestCallback + deleteTemplate(params: T.IndicesDeleteTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + exists(params: T.IndicesExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> + exists(params: T.IndicesExistsRequest, callback: callbackFn): TransportRequestCallback + exists(params: T.IndicesExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + existsAlias(params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsAlias(params: T.IndicesExistsAliasRequest, callback: callbackFn): TransportRequestCallback + existsAlias(params: T.IndicesExistsAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, callback: callbackFn): TransportRequestCallback + existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + existsTemplate(params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsTemplate(params: T.IndicesExistsTemplateRequest, callback: callbackFn): TransportRequestCallback + existsTemplate(params: T.IndicesExistsTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + existsType(params: T.IndicesExistsTypeRequest, options?: TransportRequestOptions): TransportRequestPromise> + existsType(params: T.IndicesExistsTypeRequest, callback: callbackFn): TransportRequestCallback + existsType(params: T.IndicesExistsTypeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + flush(params?: T.IndicesFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> + flush(callback: callbackFn): TransportRequestCallback + flush(params: T.IndicesFlushRequest, callback: callbackFn): TransportRequestCallback + flush(params: T.IndicesFlushRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + flushSynced(params?: T.IndicesFlushSyncedRequest, options?: TransportRequestOptions): TransportRequestPromise> + flushSynced(callback: callbackFn): TransportRequestCallback + flushSynced(params: T.IndicesFlushSyncedRequest, callback: callbackFn): TransportRequestCallback + flushSynced(params: T.IndicesFlushSyncedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + forcemerge(params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): TransportRequestPromise> + forcemerge(callback: callbackFn): TransportRequestCallback + forcemerge(params: T.IndicesForcemergeRequest, callback: callbackFn): TransportRequestCallback + forcemerge(params: T.IndicesForcemergeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + freeze(params: T.IndicesFreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> + freeze(params: T.IndicesFreezeRequest, callback: callbackFn): TransportRequestCallback + freeze(params: T.IndicesFreezeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get(params: T.IndicesGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.IndicesGetRequest, callback: callbackFn): TransportRequestCallback + get(params: T.IndicesGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getAlias(params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + getAlias(callback: callbackFn): TransportRequestCallback + getAlias(params: T.IndicesGetAliasRequest, callback: callbackFn): TransportRequestCallback + getAlias(params: T.IndicesGetAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback getDataStream(params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> getDataStream(callback: callbackFn): TransportRequestCallback getDataStream(params: T.IndicesGetDataStreamRequest, callback: callbackFn): TransportRequestCallback getDataStream(params: T.IndicesGetDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFieldMapping(params: T.GetFieldMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFieldMapping(params: T.GetFieldMappingRequest, callback: callbackFn): TransportRequestCallback - getFieldMapping(params: T.GetFieldMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getIndexTemplate(callback: callbackFn): TransportRequestCallback - getIndexTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - getIndexTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getMapping(params?: T.GetMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getMapping(callback: callbackFn): TransportRequestCallback - getMapping(params: T.GetMappingRequest, callback: callbackFn): TransportRequestCallback - getMapping(params: T.GetMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSettings(params?: T.GetIndexSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getSettings(callback: callbackFn): TransportRequestCallback - getSettings(params: T.GetIndexSettingsRequest, callback: callbackFn): TransportRequestCallback - getSettings(params: T.GetIndexSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTemplate(params?: T.GetIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTemplate(callback: callbackFn): TransportRequestCallback - getTemplate(params: T.GetIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - getTemplate(params: T.GetIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getFieldMapping(params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + getFieldMapping(params: T.IndicesGetFieldMappingRequest, callback: callbackFn): TransportRequestCallback + getFieldMapping(params: T.IndicesGetFieldMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getIndexTemplate(params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + getIndexTemplate(callback: callbackFn): TransportRequestCallback + getIndexTemplate(params: T.IndicesGetIndexTemplateRequest, callback: callbackFn): TransportRequestCallback + getIndexTemplate(params: T.IndicesGetIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getMapping(params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + getMapping(callback: callbackFn): TransportRequestCallback + getMapping(params: T.IndicesGetMappingRequest, callback: callbackFn): TransportRequestCallback + getMapping(params: T.IndicesGetMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getSettings(params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getSettings(callback: callbackFn): TransportRequestCallback + getSettings(params: T.IndicesGetSettingsRequest, callback: callbackFn): TransportRequestCallback + getSettings(params: T.IndicesGetSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getTemplate(params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTemplate(callback: callbackFn): TransportRequestCallback + getTemplate(params: T.IndicesGetTemplateRequest, callback: callbackFn): TransportRequestCallback + getTemplate(params: T.IndicesGetTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback getUpgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> getUpgrade(callback: callbackFn): TransportRequestCallback getUpgrade(params: TODO, callback: callbackFn): TransportRequestCallback @@ -634,147 +638,146 @@ declare class Client { migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, callback: callbackFn): TransportRequestCallback migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - open(params: T.OpenIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - open(params: T.OpenIndexRequest, callback: callbackFn): TransportRequestCallback - open(params: T.OpenIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + open(params: T.IndicesOpenRequest, options?: TransportRequestOptions): TransportRequestPromise> + open(params: T.IndicesOpenRequest, callback: callbackFn): TransportRequestCallback + open(params: T.IndicesOpenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback promoteDataStream(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> promoteDataStream(callback: callbackFn): TransportRequestCallback promoteDataStream(params: TODO, callback: callbackFn): TransportRequestCallback promoteDataStream(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAlias(params: T.PutAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - putAlias(params: T.PutAliasRequest, callback: callbackFn): TransportRequestCallback - putAlias(params: T.PutAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putIndexTemplate(callback: callbackFn): TransportRequestCallback - putIndexTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - putIndexTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putMapping(params?: T.PutMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putMapping(callback: callbackFn): TransportRequestCallback - putMapping(params: T.PutMappingRequest, callback: callbackFn): TransportRequestCallback - putMapping(params: T.PutMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putSettings(params?: T.UpdateIndexSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putSettings(callback: callbackFn): TransportRequestCallback - putSettings(params: T.UpdateIndexSettingsRequest, callback: callbackFn): TransportRequestCallback - putSettings(params: T.UpdateIndexSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTemplate(params: T.PutIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTemplate(params: T.PutIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - putTemplate(params: T.PutIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - recovery(params?: T.RecoveryStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - recovery(callback: callbackFn): TransportRequestCallback - recovery(params: T.RecoveryStatusRequest, callback: callbackFn): TransportRequestCallback - recovery(params: T.RecoveryStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - refresh(params?: T.RefreshRequest, options?: TransportRequestOptions): TransportRequestPromise> - refresh(callback: callbackFn): TransportRequestCallback - refresh(params: T.RefreshRequest, callback: callbackFn): TransportRequestCallback - refresh(params: T.RefreshRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers(params: T.ReloadSearchAnalyzersRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSearchAnalyzers(params: T.ReloadSearchAnalyzersRequest, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers(params: T.ReloadSearchAnalyzersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resolveIndex(params: T.ResolveIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - resolveIndex(params: T.ResolveIndexRequest, callback: callbackFn): TransportRequestCallback - resolveIndex(params: T.ResolveIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollover(params: T.RolloverIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - rollover(params: T.RolloverIndexRequest, callback: callbackFn): TransportRequestCallback - rollover(params: T.RolloverIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - segments(params?: T.SegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> - segments(callback: callbackFn): TransportRequestCallback - segments(params: T.SegmentsRequest, callback: callbackFn): TransportRequestCallback - segments(params: T.SegmentsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putAlias(params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + putAlias(params: T.IndicesPutAliasRequest, callback: callbackFn): TransportRequestCallback + putAlias(params: T.IndicesPutAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, callback: callbackFn): TransportRequestCallback + putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putMapping(params?: T.IndicesPutMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + putMapping(callback: callbackFn): TransportRequestCallback + putMapping(params: T.IndicesPutMappingRequest, callback: callbackFn): TransportRequestCallback + putMapping(params: T.IndicesPutMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putSettings(params?: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> + putSettings(callback: callbackFn): TransportRequestCallback + putSettings(params: T.IndicesPutSettingsRequest, callback: callbackFn): TransportRequestCallback + putSettings(params: T.IndicesPutSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putTemplate(params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + putTemplate(params: T.IndicesPutTemplateRequest, callback: callbackFn): TransportRequestCallback + putTemplate(params: T.IndicesPutTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + recovery(params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): TransportRequestPromise> + recovery(callback: callbackFn): TransportRequestCallback + recovery(params: T.IndicesRecoveryRequest, callback: callbackFn): TransportRequestCallback + recovery(params: T.IndicesRecoveryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + refresh(params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): TransportRequestPromise> + refresh(callback: callbackFn): TransportRequestCallback + refresh(params: T.IndicesRefreshRequest, callback: callbackFn): TransportRequestCallback + refresh(params: T.IndicesRefreshRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): TransportRequestPromise> + reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, callback: callbackFn): TransportRequestCallback + reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + resolveIndex(params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> + resolveIndex(params: T.IndicesResolveIndexRequest, callback: callbackFn): TransportRequestCallback + resolveIndex(params: T.IndicesResolveIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + rollover(params: T.IndicesRolloverRequest, options?: TransportRequestOptions): TransportRequestPromise> + rollover(params: T.IndicesRolloverRequest, callback: callbackFn): TransportRequestCallback + rollover(params: T.IndicesRolloverRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + segments(params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> + segments(callback: callbackFn): TransportRequestCallback + segments(params: T.IndicesSegmentsRequest, callback: callbackFn): TransportRequestCallback + segments(params: T.IndicesSegmentsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback shardStores(params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): TransportRequestPromise> shardStores(callback: callbackFn): TransportRequestCallback shardStores(params: T.IndicesShardStoresRequest, callback: callbackFn): TransportRequestCallback shardStores(params: T.IndicesShardStoresRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shrink(params: T.ShrinkIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - shrink(params: T.ShrinkIndexRequest, callback: callbackFn): TransportRequestCallback - shrink(params: T.ShrinkIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - simulateIndexTemplate(callback: callbackFn): TransportRequestCallback - simulateIndexTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + shrink(params: T.IndicesShrinkRequest, options?: TransportRequestOptions): TransportRequestPromise> + shrink(params: T.IndicesShrinkRequest, callback: callbackFn): TransportRequestCallback + shrink(params: T.IndicesShrinkRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + simulateIndexTemplate(params?: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + simulateIndexTemplate(callback: callbackFn): TransportRequestCallback + simulateIndexTemplate(params: T.IndicesSimulateIndexTemplateRequest, callback: callbackFn): TransportRequestCallback + simulateIndexTemplate(params: T.IndicesSimulateIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback simulateTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> simulateTemplate(callback: callbackFn): TransportRequestCallback simulateTemplate(params: TODO, callback: callbackFn): TransportRequestCallback simulateTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - split(params: T.SplitIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - split(params: T.SplitIndexRequest, callback: callbackFn): TransportRequestCallback - split(params: T.SplitIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + split(params: T.IndicesSplitRequest, options?: TransportRequestOptions): TransportRequestPromise> + split(params: T.IndicesSplitRequest, callback: callbackFn): TransportRequestCallback + split(params: T.IndicesSplitRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback stats(params?: T.IndicesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(callback: callbackFn): TransportRequestCallback stats(params: T.IndicesStatsRequest, callback: callbackFn): TransportRequestCallback stats(params: T.IndicesStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfreeze(params: T.UnfreezeIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfreeze(params: T.UnfreezeIndexRequest, callback: callbackFn): TransportRequestCallback - unfreeze(params: T.UnfreezeIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateAliases(params?: T.BulkAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateAliases(callback: callbackFn): TransportRequestCallback - updateAliases(params: T.BulkAliasRequest, callback: callbackFn): TransportRequestCallback - updateAliases(params: T.BulkAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + unfreeze(params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> + unfreeze(params: T.IndicesUnfreezeRequest, callback: callbackFn): TransportRequestCallback + unfreeze(params: T.IndicesUnfreezeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateAliases(params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateAliases(callback: callbackFn): TransportRequestCallback + updateAliases(params: T.IndicesUpdateAliasesRequest, callback: callbackFn): TransportRequestCallback + updateAliases(params: T.IndicesUpdateAliasesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback upgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> upgrade(callback: callbackFn): TransportRequestCallback upgrade(params: TODO, callback: callbackFn): TransportRequestCallback upgrade(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateQuery(params?: T.ValidateQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - validateQuery(callback: callbackFn): TransportRequestCallback - validateQuery(params: T.ValidateQueryRequest, callback: callbackFn): TransportRequestCallback - validateQuery(params: T.ValidateQueryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + validateQuery(params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> + validateQuery(callback: callbackFn): TransportRequestCallback + validateQuery(params: T.IndicesValidateQueryRequest, callback: callbackFn): TransportRequestCallback + validateQuery(params: T.IndicesValidateQueryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - info(params?: T.RootNodeInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.RootNodeInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.RootNodeInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + info(params?: T.InfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(callback: callbackFn): TransportRequestCallback + info(params: T.InfoRequest, callback: callbackFn): TransportRequestCallback + info(params: T.InfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback ingest: { - deletePipeline(params: T.DeletePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePipeline(params: T.DeletePipelineRequest, callback: callbackFn): TransportRequestCallback - deletePipeline(params: T.DeletePipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - geoIpStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - geoIpStats(callback: callbackFn): TransportRequestCallback - geoIpStats(params: TODO, callback: callbackFn): TransportRequestCallback - geoIpStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPipeline(params?: T.GetPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline(callback: callbackFn): TransportRequestCallback - getPipeline(params: T.GetPipelineRequest, callback: callbackFn): TransportRequestCallback - getPipeline(params: T.GetPipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - processorGrok(params?: T.GrokProcessorPatternsRequest, options?: TransportRequestOptions): TransportRequestPromise> - processorGrok(callback: callbackFn): TransportRequestCallback - processorGrok(params: T.GrokProcessorPatternsRequest, callback: callbackFn): TransportRequestCallback - processorGrok(params: T.GrokProcessorPatternsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPipeline(params: T.PutPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline(params: T.PutPipelineRequest, callback: callbackFn): TransportRequestCallback - putPipeline(params: T.PutPipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate(params?: T.SimulatePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulate(callback: callbackFn): TransportRequestCallback - simulate(params: T.SimulatePipelineRequest, callback: callbackFn): TransportRequestCallback - simulate(params: T.SimulatePipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deletePipeline(params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + deletePipeline(params: T.IngestDeletePipelineRequest, callback: callbackFn): TransportRequestCallback + deletePipeline(params: T.IngestDeletePipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + geoIpStats(params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + geoIpStats(callback: callbackFn): TransportRequestCallback + geoIpStats(params: T.IngestGeoIpStatsRequest, callback: callbackFn): TransportRequestCallback + geoIpStats(params: T.IngestGeoIpStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getPipeline(params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + getPipeline(callback: callbackFn): TransportRequestCallback + getPipeline(params: T.IngestGetPipelineRequest, callback: callbackFn): TransportRequestCallback + getPipeline(params: T.IngestGetPipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + processorGrok(params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): TransportRequestPromise> + processorGrok(callback: callbackFn): TransportRequestCallback + processorGrok(params: T.IngestProcessorGrokRequest, callback: callbackFn): TransportRequestCallback + processorGrok(params: T.IngestProcessorGrokRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putPipeline(params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + putPipeline(params: T.IngestPutPipelineRequest, callback: callbackFn): TransportRequestCallback + putPipeline(params: T.IngestPutPipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + simulate(params?: T.IngestSimulatePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> + simulate(callback: callbackFn): TransportRequestCallback + simulate(params: T.IngestSimulatePipelineRequest, callback: callbackFn): TransportRequestCallback + simulate(params: T.IngestSimulatePipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } license: { - delete(params?: T.DeleteLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(callback: callbackFn): TransportRequestCallback - delete(params: T.DeleteLicenseRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.DeleteLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params?: T.GetLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(callback: callbackFn): TransportRequestCallback - get(params: T.GetLicenseRequest, callback: callbackFn): TransportRequestCallback - get(params: T.GetLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBasicStatus(params?: T.GetBasicLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus(callback: callbackFn): TransportRequestCallback - getBasicStatus(params: T.GetBasicLicenseStatusRequest, callback: callbackFn): TransportRequestCallback - getBasicStatus(params: T.GetBasicLicenseStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrialStatus(params?: T.GetTrialLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus(callback: callbackFn): TransportRequestCallback - getTrialStatus(params: T.GetTrialLicenseStatusRequest, callback: callbackFn): TransportRequestCallback - getTrialStatus(params: T.GetTrialLicenseStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post(params?: T.PostLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - post(callback: callbackFn): TransportRequestCallback - post(params: T.PostLicenseRequest, callback: callbackFn): TransportRequestCallback - post(params: T.PostLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartBasic(params?: T.StartBasicLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic(callback: callbackFn): TransportRequestCallback - postStartBasic(params: T.StartBasicLicenseRequest, callback: callbackFn): TransportRequestCallback - postStartBasic(params: T.StartBasicLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartTrial(params?: T.StartTrialLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial(callback: callbackFn): TransportRequestCallback - postStartTrial(params: T.StartTrialLicenseRequest, callback: callbackFn): TransportRequestCallback - postStartTrial(params: T.StartTrialLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + delete(params?: T.LicenseDeleteLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(callback: callbackFn): TransportRequestCallback + delete(params: T.LicenseDeleteLicenseRequest, callback: callbackFn): TransportRequestCallback + delete(params: T.LicenseDeleteLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get(params?: T.LicenseGetLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(callback: callbackFn): TransportRequestCallback + get(params: T.LicenseGetLicenseRequest, callback: callbackFn): TransportRequestCallback + get(params: T.LicenseGetLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getBasicStatus(params?: T.LicenseGetBasicLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBasicStatus(callback: callbackFn): TransportRequestCallback + getBasicStatus(params: T.LicenseGetBasicLicenseStatusRequest, callback: callbackFn): TransportRequestCallback + getBasicStatus(params: T.LicenseGetBasicLicenseStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getTrialStatus(params?: T.LicenseGetTrialLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrialStatus(callback: callbackFn): TransportRequestCallback + getTrialStatus(params: T.LicenseGetTrialLicenseStatusRequest, callback: callbackFn): TransportRequestCallback + getTrialStatus(params: T.LicenseGetTrialLicenseStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + post(params?: T.LicensePostLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + post(callback: callbackFn): TransportRequestCallback + post(params: T.LicensePostLicenseRequest, callback: callbackFn): TransportRequestCallback + post(params: T.LicensePostLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postStartBasic(params?: T.LicenseStartBasicLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartBasic(callback: callbackFn): TransportRequestCallback + postStartBasic(params: T.LicenseStartBasicLicenseRequest, callback: callbackFn): TransportRequestCallback + postStartBasic(params: T.LicenseStartBasicLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postStartTrial(params?: T.LicenseStartTrialLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartTrial(callback: callbackFn): TransportRequestCallback + postStartTrial(params: T.LicenseStartTrialLicenseRequest, callback: callbackFn): TransportRequestCallback + postStartTrial(params: T.LicenseStartTrialLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } logstash: { deletePipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -790,236 +793,233 @@ declare class Client { putPipeline(params: TODO, callback: callbackFn): TransportRequestCallback putPipeline(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - mget(params?: T.MultiGetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - mget(callback: callbackFn, TContext>): TransportRequestCallback - mget(params: T.MultiGetRequest, callback: callbackFn, TContext>): TransportRequestCallback - mget(params: T.MultiGetRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + mget(params?: T.MgetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + mget(callback: callbackFn, TContext>): TransportRequestCallback + mget(params: T.MgetRequest, callback: callbackFn, TContext>): TransportRequestCallback + mget(params: T.MgetRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback migration: { - deprecations(params?: T.DeprecationInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - deprecations(callback: callbackFn): TransportRequestCallback - deprecations(params: T.DeprecationInfoRequest, callback: callbackFn): TransportRequestCallback - deprecations(params: T.DeprecationInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deprecations(params?: T.MigrationDeprecationInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + deprecations(callback: callbackFn): TransportRequestCallback + deprecations(params: T.MigrationDeprecationInfoRequest, callback: callbackFn): TransportRequestCallback + deprecations(params: T.MigrationDeprecationInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } ml: { - closeJob(params: T.CloseJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - closeJob(params: T.CloseJobRequest, callback: callbackFn): TransportRequestCallback - closeJob(params: T.CloseJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendar(params: T.DeleteCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendar(params: T.DeleteCalendarRequest, callback: callbackFn): TransportRequestCallback - deleteCalendar(params: T.DeleteCalendarRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent(params: T.DeleteCalendarEventRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarEvent(params: T.DeleteCalendarEventRequest, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent(params: T.DeleteCalendarEventRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarJob(params: T.DeleteCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarJob(params: T.DeleteCalendarJobRequest, callback: callbackFn): TransportRequestCallback - deleteCalendarJob(params: T.DeleteCalendarJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics(params: T.DeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataFrameAnalytics(params: T.DeleteDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics(params: T.DeleteDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDatafeed(params: T.DeleteDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDatafeed(params: T.DeleteDatafeedRequest, callback: callbackFn): TransportRequestCallback - deleteDatafeed(params: T.DeleteDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteExpiredData(params?: T.DeleteExpiredDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteExpiredData(callback: callbackFn): TransportRequestCallback - deleteExpiredData(params: T.DeleteExpiredDataRequest, callback: callbackFn): TransportRequestCallback - deleteExpiredData(params: T.DeleteExpiredDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteFilter(params: T.DeleteFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteFilter(params: T.DeleteFilterRequest, callback: callbackFn): TransportRequestCallback - deleteFilter(params: T.DeleteFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteForecast(params: T.DeleteForecastRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteForecast(params: T.DeleteForecastRequest, callback: callbackFn): TransportRequestCallback - deleteForecast(params: T.DeleteForecastRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteJob(params: T.DeleteJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob(params: T.DeleteJobRequest, callback: callbackFn): TransportRequestCallback - deleteJob(params: T.DeleteJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot(params: T.DeleteModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteModelSnapshot(params: T.DeleteModelSnapshotRequest, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot(params: T.DeleteModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTrainedModel(params: T.DeleteTrainedModelRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModel(params: T.DeleteTrainedModelRequest, callback: callbackFn): TransportRequestCallback - deleteTrainedModel(params: T.DeleteTrainedModelRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTrainedModelAlias(params: T.DeleteTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModelAlias(params: T.DeleteTrainedModelAliasRequest, callback: callbackFn): TransportRequestCallback - deleteTrainedModelAlias(params: T.DeleteTrainedModelAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - estimateModelMemory(params?: T.EstimateModelMemoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - estimateModelMemory(callback: callbackFn): TransportRequestCallback - estimateModelMemory(params: T.EstimateModelMemoryRequest, callback: callbackFn): TransportRequestCallback - estimateModelMemory(params: T.EstimateModelMemoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - evaluateDataFrame(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - evaluateDataFrame(callback: callbackFn): TransportRequestCallback - evaluateDataFrame(params: TODO, callback: callbackFn): TransportRequestCallback - evaluateDataFrame(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - explainDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics(params: TODO, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + closeJob(params: T.MlCloseJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + closeJob(params: T.MlCloseJobRequest, callback: callbackFn): TransportRequestCallback + closeJob(params: T.MlCloseJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteCalendar(params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteCalendar(params: T.MlDeleteCalendarRequest, callback: callbackFn): TransportRequestCallback + deleteCalendar(params: T.MlDeleteCalendarRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, callback: callbackFn): TransportRequestCallback + deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, callback: callbackFn): TransportRequestCallback + deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteDatafeed(params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteDatafeed(params: T.MlDeleteDatafeedRequest, callback: callbackFn): TransportRequestCallback + deleteDatafeed(params: T.MlDeleteDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteExpiredData(params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteExpiredData(callback: callbackFn): TransportRequestCallback + deleteExpiredData(params: T.MlDeleteExpiredDataRequest, callback: callbackFn): TransportRequestCallback + deleteExpiredData(params: T.MlDeleteExpiredDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteFilter(params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteFilter(params: T.MlDeleteFilterRequest, callback: callbackFn): TransportRequestCallback + deleteFilter(params: T.MlDeleteFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteForecast(params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteForecast(params: T.MlDeleteForecastRequest, callback: callbackFn): TransportRequestCallback + deleteForecast(params: T.MlDeleteForecastRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteJob(params: T.MlDeleteJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteJob(params: T.MlDeleteJobRequest, callback: callbackFn): TransportRequestCallback + deleteJob(params: T.MlDeleteJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, callback: callbackFn): TransportRequestCallback + deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, callback: callbackFn): TransportRequestCallback + deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, callback: callbackFn): TransportRequestCallback + deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + estimateModelMemory(params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + estimateModelMemory(callback: callbackFn): TransportRequestCallback + estimateModelMemory(params: T.MlEstimateModelMemoryRequest, callback: callbackFn): TransportRequestCallback + estimateModelMemory(params: T.MlEstimateModelMemoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + evaluateDataFrame(params?: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): TransportRequestPromise> + evaluateDataFrame(callback: callbackFn): TransportRequestCallback + evaluateDataFrame(params: T.MlEvaluateDataFrameRequest, callback: callbackFn): TransportRequestCallback + evaluateDataFrame(params: T.MlEvaluateDataFrameRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + explainDataFrameAnalytics(params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + explainDataFrameAnalytics(callback: callbackFn): TransportRequestCallback + explainDataFrameAnalytics(params: T.MlExplainDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + explainDataFrameAnalytics(params: T.MlExplainDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback findFileStructure(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> findFileStructure(callback: callbackFn): TransportRequestCallback findFileStructure(params: TODO, callback: callbackFn): TransportRequestCallback findFileStructure(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flushJob(params: T.FlushJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - flushJob(params: T.FlushJobRequest, callback: callbackFn): TransportRequestCallback - flushJob(params: T.FlushJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forecast(params: T.ForecastJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - forecast(params: T.ForecastJobRequest, callback: callbackFn): TransportRequestCallback - forecast(params: T.ForecastJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuckets(params: T.GetBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuckets(params: T.GetBucketsRequest, callback: callbackFn): TransportRequestCallback - getBuckets(params: T.GetBucketsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendarEvents(params: T.GetCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendarEvents(params: T.GetCalendarEventsRequest, callback: callbackFn): TransportRequestCallback - getCalendarEvents(params: T.GetCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendars(params?: T.GetCalendarsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendars(callback: callbackFn): TransportRequestCallback - getCalendars(params: T.GetCalendarsRequest, callback: callbackFn): TransportRequestCallback - getCalendars(params: T.GetCalendarsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCategories(params: T.GetCategoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCategories(params: T.GetCategoriesRequest, callback: callbackFn): TransportRequestCallback - getCategories(params: T.GetCategoriesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics(params: TODO, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalyticsStats(callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats(params: TODO, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeedStats(params?: T.GetDatafeedStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeedStats(callback: callbackFn): TransportRequestCallback - getDatafeedStats(params: T.GetDatafeedStatsRequest, callback: callbackFn): TransportRequestCallback - getDatafeedStats(params: T.GetDatafeedStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeeds(params?: T.GetDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeeds(callback: callbackFn): TransportRequestCallback - getDatafeeds(params: T.GetDatafeedsRequest, callback: callbackFn): TransportRequestCallback - getDatafeeds(params: T.GetDatafeedsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFilters(params?: T.GetFiltersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFilters(callback: callbackFn): TransportRequestCallback - getFilters(params: T.GetFiltersRequest, callback: callbackFn): TransportRequestCallback - getFilters(params: T.GetFiltersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getInfluencers(params: T.GetInfluencersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getInfluencers(params: T.GetInfluencersRequest, callback: callbackFn): TransportRequestCallback - getInfluencers(params: T.GetInfluencersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobStats(params?: T.GetJobStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobStats(callback: callbackFn): TransportRequestCallback - getJobStats(params: T.GetJobStatsRequest, callback: callbackFn): TransportRequestCallback - getJobStats(params: T.GetJobStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs(params?: T.GetJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(callback: callbackFn): TransportRequestCallback - getJobs(params: T.GetJobsRequest, callback: callbackFn): TransportRequestCallback - getJobs(params: T.GetJobsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getModelSnapshots(params: T.GetModelSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getModelSnapshots(params: T.GetModelSnapshotsRequest, callback: callbackFn): TransportRequestCallback - getModelSnapshots(params: T.GetModelSnapshotsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getOverallBuckets(params: T.GetOverallBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getOverallBuckets(params: T.GetOverallBucketsRequest, callback: callbackFn): TransportRequestCallback - getOverallBuckets(params: T.GetOverallBucketsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRecords(params: T.GetAnomalyRecordsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRecords(params: T.GetAnomalyRecordsRequest, callback: callbackFn): TransportRequestCallback - getRecords(params: T.GetAnomalyRecordsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModels(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModels(callback: callbackFn): TransportRequestCallback - getTrainedModels(params: TODO, callback: callbackFn): TransportRequestCallback - getTrainedModels(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModelsStats(callback: callbackFn): TransportRequestCallback - getTrainedModelsStats(params: TODO, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info(params?: T.MachineLearningInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.MachineLearningInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.MachineLearningInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - openJob(params: T.OpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - openJob(params: T.OpenJobRequest, callback: callbackFn): TransportRequestCallback - openJob(params: T.OpenJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postCalendarEvents(params: T.PostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - postCalendarEvents(params: T.PostCalendarEventsRequest, callback: callbackFn): TransportRequestCallback - postCalendarEvents(params: T.PostCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postData(params: T.PostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.PostJobDataRequest, callback: callbackFn): TransportRequestCallback - postData(params: T.PostJobDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - previewDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics(params: TODO, callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDatafeed(params: T.PreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - previewDatafeed(params: T.PreviewDatafeedRequest, callback: callbackFn, TContext>): TransportRequestCallback - previewDatafeed(params: T.PreviewDatafeedRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - putCalendar(params: T.PutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - putCalendar(params: T.PutCalendarRequest, callback: callbackFn): TransportRequestCallback - putCalendar(params: T.PutCalendarRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putCalendarJob(params: T.PutCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putCalendarJob(params: T.PutCalendarJobRequest, callback: callbackFn): TransportRequestCallback - putCalendarJob(params: T.PutCalendarJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics(params: TODO, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDatafeed(params: T.PutDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - putDatafeed(params: T.PutDatafeedRequest, callback: callbackFn): TransportRequestCallback - putDatafeed(params: T.PutDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putFilter(params: T.PutFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - putFilter(params: T.PutFilterRequest, callback: callbackFn): TransportRequestCallback - putFilter(params: T.PutFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob(params: T.PutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.PutJobRequest, callback: callbackFn): TransportRequestCallback - putJob(params: T.PutJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + flushJob(params: T.MlFlushJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + flushJob(params: T.MlFlushJobRequest, callback: callbackFn): TransportRequestCallback + flushJob(params: T.MlFlushJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + forecast(params: T.MlForecastJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + forecast(params: T.MlForecastJobRequest, callback: callbackFn): TransportRequestCallback + forecast(params: T.MlForecastJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getBuckets(params: T.MlGetBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBuckets(params: T.MlGetBucketsRequest, callback: callbackFn): TransportRequestCallback + getBuckets(params: T.MlGetBucketsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getCalendarEvents(params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getCalendarEvents(params: T.MlGetCalendarEventsRequest, callback: callbackFn): TransportRequestCallback + getCalendarEvents(params: T.MlGetCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getCalendars(params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getCalendars(callback: callbackFn): TransportRequestCallback + getCalendars(params: T.MlGetCalendarsRequest, callback: callbackFn): TransportRequestCallback + getCalendars(params: T.MlGetCalendarsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getCategories(params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getCategories(params: T.MlGetCategoriesRequest, callback: callbackFn): TransportRequestCallback + getCategories(params: T.MlGetCategoriesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getDataFrameAnalytics(params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDataFrameAnalytics(callback: callbackFn): TransportRequestCallback + getDataFrameAnalytics(params: T.MlGetDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + getDataFrameAnalytics(params: T.MlGetDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getDataFrameAnalyticsStats(params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDataFrameAnalyticsStats(callback: callbackFn): TransportRequestCallback + getDataFrameAnalyticsStats(params: T.MlGetDataFrameAnalyticsStatsRequest, callback: callbackFn): TransportRequestCallback + getDataFrameAnalyticsStats(params: T.MlGetDataFrameAnalyticsStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getDatafeedStats(params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDatafeedStats(callback: callbackFn): TransportRequestCallback + getDatafeedStats(params: T.MlGetDatafeedStatsRequest, callback: callbackFn): TransportRequestCallback + getDatafeedStats(params: T.MlGetDatafeedStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getDatafeeds(params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getDatafeeds(callback: callbackFn): TransportRequestCallback + getDatafeeds(params: T.MlGetDatafeedsRequest, callback: callbackFn): TransportRequestCallback + getDatafeeds(params: T.MlGetDatafeedsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getFilters(params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): TransportRequestPromise> + getFilters(callback: callbackFn): TransportRequestCallback + getFilters(params: T.MlGetFiltersRequest, callback: callbackFn): TransportRequestCallback + getFilters(params: T.MlGetFiltersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getInfluencers(params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): TransportRequestPromise> + getInfluencers(params: T.MlGetInfluencersRequest, callback: callbackFn): TransportRequestCallback + getInfluencers(params: T.MlGetInfluencersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getJobStats(params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getJobStats(callback: callbackFn): TransportRequestCallback + getJobStats(params: T.MlGetJobStatsRequest, callback: callbackFn): TransportRequestCallback + getJobStats(params: T.MlGetJobStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getJobs(params?: T.MlGetJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getJobs(callback: callbackFn): TransportRequestCallback + getJobs(params: T.MlGetJobsRequest, callback: callbackFn): TransportRequestCallback + getJobs(params: T.MlGetJobsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getModelSnapshots(params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getModelSnapshots(params: T.MlGetModelSnapshotsRequest, callback: callbackFn): TransportRequestCallback + getModelSnapshots(params: T.MlGetModelSnapshotsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getOverallBuckets(params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getOverallBuckets(params: T.MlGetOverallBucketsRequest, callback: callbackFn): TransportRequestCallback + getOverallBuckets(params: T.MlGetOverallBucketsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getRecords(params: T.MlGetAnomalyRecordsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRecords(params: T.MlGetAnomalyRecordsRequest, callback: callbackFn): TransportRequestCallback + getRecords(params: T.MlGetAnomalyRecordsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getTrainedModels(params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrainedModels(callback: callbackFn): TransportRequestCallback + getTrainedModels(params: T.MlGetTrainedModelsRequest, callback: callbackFn): TransportRequestCallback + getTrainedModels(params: T.MlGetTrainedModelsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getTrainedModelsStats(params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrainedModelsStats(callback: callbackFn): TransportRequestCallback + getTrainedModelsStats(params: T.MlGetTrainedModelsStatsRequest, callback: callbackFn): TransportRequestCallback + getTrainedModelsStats(params: T.MlGetTrainedModelsStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + info(params?: T.MlInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(callback: callbackFn): TransportRequestCallback + info(params: T.MlInfoRequest, callback: callbackFn): TransportRequestCallback + info(params: T.MlInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + openJob(params: T.MlOpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + openJob(params: T.MlOpenJobRequest, callback: callbackFn): TransportRequestCallback + openJob(params: T.MlOpenJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postCalendarEvents(params?: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> + postCalendarEvents(callback: callbackFn): TransportRequestCallback + postCalendarEvents(params: T.MlPostCalendarEventsRequest, callback: callbackFn): TransportRequestCallback + postCalendarEvents(params: T.MlPostCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + postData(params: T.MlPostJobDataRequest, callback: callbackFn): TransportRequestCallback + postData(params: T.MlPostJobDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + previewDataFrameAnalytics(callback: callbackFn): TransportRequestCallback + previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + previewDatafeed(params: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + previewDatafeed(params: T.MlPreviewDatafeedRequest, callback: callbackFn, TContext>): TransportRequestCallback + previewDatafeed(params: T.MlPreviewDatafeedRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> + putCalendar(params: T.MlPutCalendarRequest, callback: callbackFn): TransportRequestCallback + putCalendar(params: T.MlPutCalendarRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putCalendarJob(params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + putCalendarJob(params: T.MlPutCalendarJobRequest, callback: callbackFn): TransportRequestCallback + putCalendarJob(params: T.MlPutCalendarJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putDatafeed(params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + putDatafeed(params: T.MlPutDatafeedRequest, callback: callbackFn): TransportRequestCallback + putDatafeed(params: T.MlPutDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putFilter(params: T.MlPutFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> + putFilter(params: T.MlPutFilterRequest, callback: callbackFn): TransportRequestCallback + putFilter(params: T.MlPutFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putJob(params: T.MlPutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + putJob(params: T.MlPutJobRequest, callback: callbackFn): TransportRequestCallback + putJob(params: T.MlPutJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback putTrainedModel(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModel(callback: callbackFn): TransportRequestCallback putTrainedModel(params: TODO, callback: callbackFn): TransportRequestCallback putTrainedModel(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTrainedModelAlias(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModelAlias(callback: callbackFn): TransportRequestCallback - putTrainedModelAlias(params: TODO, callback: callbackFn): TransportRequestCallback - putTrainedModelAlias(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - revertModelSnapshot(params: T.RevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - revertModelSnapshot(params: T.RevertModelSnapshotRequest, callback: callbackFn): TransportRequestCallback - revertModelSnapshot(params: T.RevertModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - setUpgradeMode(params?: T.SetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> - setUpgradeMode(callback: callbackFn): TransportRequestCallback - setUpgradeMode(params: T.SetUpgradeModeRequest, callback: callbackFn): TransportRequestCallback - setUpgradeMode(params: T.SetUpgradeModeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - startDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics(params: TODO, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDatafeed(params: T.StartDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - startDatafeed(params: T.StartDatafeedRequest, callback: callbackFn): TransportRequestCallback - startDatafeed(params: T.StartDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - stopDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics(params: TODO, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDatafeed(params: T.StopDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopDatafeed(params: T.StopDatafeedRequest, callback: callbackFn): TransportRequestCallback - stopDatafeed(params: T.StopDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics(params: TODO, callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateDatafeed(params: T.UpdateDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateDatafeed(params: T.UpdateDatafeedRequest, callback: callbackFn): TransportRequestCallback - updateDatafeed(params: T.UpdateDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateFilter(params: T.UpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateFilter(params: T.UpdateFilterRequest, callback: callbackFn): TransportRequestCallback - updateFilter(params: T.UpdateFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateJob(params: T.UpdateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(params: T.UpdateJobRequest, callback: callbackFn): TransportRequestCallback - updateJob(params: T.UpdateJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateModelSnapshot(params: T.UpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateModelSnapshot(params: T.UpdateModelSnapshotRequest, callback: callbackFn): TransportRequestCallback - updateModelSnapshot(params: T.UpdateModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - upgradeJobSnapshot(callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot(params: TODO, callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate(params?: T.ValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - validate(callback: callbackFn): TransportRequestCallback - validate(params: T.ValidateJobRequest, callback: callbackFn): TransportRequestCallback - validate(params: T.ValidateJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateDetector(params?: T.ValidateDetectorRequest, options?: TransportRequestOptions): TransportRequestPromise> - validateDetector(callback: callbackFn): TransportRequestCallback - validateDetector(params: T.ValidateDetectorRequest, callback: callbackFn): TransportRequestCallback - validateDetector(params: T.ValidateDetectorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, callback: callbackFn): TransportRequestCallback + putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, callback: callbackFn): TransportRequestCallback + revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + setUpgradeMode(params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> + setUpgradeMode(callback: callbackFn): TransportRequestCallback + setUpgradeMode(params: T.MlSetUpgradeModeRequest, callback: callbackFn): TransportRequestCallback + setUpgradeMode(params: T.MlSetUpgradeModeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + startDatafeed(params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + startDatafeed(params: T.MlStartDatafeedRequest, callback: callbackFn): TransportRequestCallback + startDatafeed(params: T.MlStartDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stopDatafeed(params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopDatafeed(params: T.MlStopDatafeedRequest, callback: callbackFn): TransportRequestCallback + stopDatafeed(params: T.MlStopDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback + updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateDatafeed(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + updateDatafeed(callback: callbackFn): TransportRequestCallback + updateDatafeed(params: TODO, callback: callbackFn): TransportRequestCallback + updateDatafeed(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateFilter(params: T.MlUpdateFilterRequest, callback: callbackFn): TransportRequestCallback + updateFilter(params: T.MlUpdateFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + updateJob(callback: callbackFn): TransportRequestCallback + updateJob(params: TODO, callback: callbackFn): TransportRequestCallback + updateJob(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, callback: callbackFn): TransportRequestCallback + updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> + upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, callback: callbackFn): TransportRequestCallback + upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + validate(params?: T.MlValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + validate(callback: callbackFn): TransportRequestCallback + validate(params: T.MlValidateJobRequest, callback: callbackFn): TransportRequestCallback + validate(params: T.MlValidateJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + validateDetector(params?: T.MlValidateDetectorRequest, options?: TransportRequestOptions): TransportRequestPromise> + validateDetector(callback: callbackFn): TransportRequestCallback + validateDetector(params: T.MlValidateDetectorRequest, callback: callbackFn): TransportRequestCallback + validateDetector(params: T.MlValidateDetectorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } monitoring: { bulk(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -1027,39 +1027,39 @@ declare class Client { bulk(params: TODO, callback: callbackFn): TransportRequestCallback bulk(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - msearch(params?: T.MultiSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - msearch(callback: callbackFn, TContext>): TransportRequestCallback - msearch(params: T.MultiSearchRequest, callback: callbackFn, TContext>): TransportRequestCallback - msearch(params: T.MultiSearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - msearchTemplate(params?: T.MultiSearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - msearchTemplate(callback: callbackFn): TransportRequestCallback - msearchTemplate(params: T.MultiSearchTemplateRequest, callback: callbackFn): TransportRequestCallback - msearchTemplate(params: T.MultiSearchTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mtermvectors(params?: T.MultiTermVectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mtermvectors(callback: callbackFn): TransportRequestCallback - mtermvectors(params: T.MultiTermVectorsRequest, callback: callbackFn): TransportRequestCallback - mtermvectors(params: T.MultiTermVectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + msearch(params?: T.MsearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + msearch(callback: callbackFn, TContext>): TransportRequestCallback + msearch(params: T.MsearchRequest, callback: callbackFn, TContext>): TransportRequestCallback + msearch(params: T.MsearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + msearchTemplate(params?: T.MsearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + msearchTemplate(callback: callbackFn, TContext>): TransportRequestCallback + msearchTemplate(params: T.MsearchTemplateRequest, callback: callbackFn, TContext>): TransportRequestCallback + msearchTemplate(params: T.MsearchTemplateRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + mtermvectors(params?: T.MtermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> + mtermvectors(callback: callbackFn): TransportRequestCallback + mtermvectors(params: T.MtermvectorsRequest, callback: callbackFn): TransportRequestCallback + mtermvectors(params: T.MtermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback nodes: { - hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> - hotThreads(callback: callbackFn): TransportRequestCallback - hotThreads(params: T.NodesHotThreadsRequest, callback: callbackFn): TransportRequestCallback - hotThreads(params: T.NodesHotThreadsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info(params?: T.NodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.NodesInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.NodesInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSecureSettings(params?: T.ReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSecureSettings(callback: callbackFn): TransportRequestCallback - reloadSecureSettings(params: T.ReloadSecureSettingsRequest, callback: callbackFn): TransportRequestCallback - reloadSecureSettings(params: T.ReloadSecureSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.NodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.NodesStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.NodesStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage(params?: T.NodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(callback: callbackFn): TransportRequestCallback - usage(params: T.NodesUsageRequest, callback: callbackFn): TransportRequestCallback - usage(params: T.NodesUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + hotThreads(params?: T.NodesNodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> + hotThreads(callback: callbackFn): TransportRequestCallback + hotThreads(params: T.NodesNodesHotThreadsRequest, callback: callbackFn): TransportRequestCallback + hotThreads(params: T.NodesNodesHotThreadsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + info(params?: T.NodesNodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(callback: callbackFn): TransportRequestCallback + info(params: T.NodesNodesInfoRequest, callback: callbackFn): TransportRequestCallback + info(params: T.NodesNodesInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + reloadSecureSettings(params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> + reloadSecureSettings(callback: callbackFn): TransportRequestCallback + reloadSecureSettings(params: T.NodesReloadSecureSettingsRequest, callback: callbackFn): TransportRequestCallback + reloadSecureSettings(params: T.NodesReloadSecureSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stats(params?: T.NodesNodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + stats(callback: callbackFn): TransportRequestCallback + stats(params: T.NodesNodesStatsRequest, callback: callbackFn): TransportRequestCallback + stats(params: T.NodesNodesStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + usage(params?: T.NodesNodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> + usage(callback: callbackFn): TransportRequestCallback + usage(params: T.NodesNodesUsageRequest, callback: callbackFn): TransportRequestCallback + usage(params: T.NodesNodesUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } openPointInTime(params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> openPointInTime(params: T.OpenPointInTimeRequest, callback: callbackFn): TransportRequestCallback @@ -1071,10 +1071,9 @@ declare class Client { putScript(params: T.PutScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> putScript(params: T.PutScriptRequest, callback: callbackFn): TransportRequestCallback putScript(params: T.PutScriptRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rankEval(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - rankEval(callback: callbackFn): TransportRequestCallback - rankEval(params: TODO, callback: callbackFn): TransportRequestCallback - rankEval(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + rankEval(params: T.RankEvalRequest, options?: TransportRequestOptions): TransportRequestPromise> + rankEval(params: T.RankEvalRequest, callback: callbackFn): TransportRequestCallback + rankEval(params: T.RankEvalRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback reindex(params?: T.ReindexRequest, options?: TransportRequestOptions): TransportRequestPromise> reindex(callback: callbackFn): TransportRequestCallback reindex(params: T.ReindexRequest, callback: callbackFn): TransportRequestCallback @@ -1087,41 +1086,41 @@ declare class Client { renderSearchTemplate(params: T.RenderSearchTemplateRequest, callback: callbackFn): TransportRequestCallback renderSearchTemplate(params: T.RenderSearchTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback rollup: { - deleteJob(params: T.DeleteRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob(params: T.DeleteRollupJobRequest, callback: callbackFn): TransportRequestCallback - deleteJob(params: T.DeleteRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs(params?: T.GetRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(callback: callbackFn): TransportRequestCallback - getJobs(params: T.GetRollupJobRequest, callback: callbackFn): TransportRequestCallback - getJobs(params: T.GetRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupCaps(params?: T.GetRollupCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupCaps(callback: callbackFn): TransportRequestCallback - getRollupCaps(params: T.GetRollupCapabilitiesRequest, callback: callbackFn): TransportRequestCallback - getRollupCaps(params: T.GetRollupCapabilitiesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps(params: T.GetRollupIndexCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupIndexCaps(params: T.GetRollupIndexCapabilitiesRequest, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps(params: T.GetRollupIndexCapabilitiesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob(params: T.CreateRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.CreateRollupJobRequest, callback: callbackFn): TransportRequestCallback - putJob(params: T.CreateRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteJob(params: T.RollupDeleteRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteJob(params: T.RollupDeleteRollupJobRequest, callback: callbackFn): TransportRequestCallback + deleteJob(params: T.RollupDeleteRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getJobs(params?: T.RollupGetRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + getJobs(callback: callbackFn): TransportRequestCallback + getJobs(params: T.RollupGetRollupJobRequest, callback: callbackFn): TransportRequestCallback + getJobs(params: T.RollupGetRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getRollupCaps(params?: T.RollupGetRollupCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRollupCaps(callback: callbackFn): TransportRequestCallback + getRollupCaps(params: T.RollupGetRollupCapabilitiesRequest, callback: callbackFn): TransportRequestCallback + getRollupCaps(params: T.RollupGetRollupCapabilitiesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, callback: callbackFn): TransportRequestCallback + getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putJob(params: T.RollupCreateRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + putJob(params: T.RollupCreateRollupJobRequest, callback: callbackFn): TransportRequestCallback + putJob(params: T.RollupCreateRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback rollup(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> rollup(callback: callbackFn): TransportRequestCallback rollup(params: TODO, callback: callbackFn): TransportRequestCallback rollup(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollupSearch(params: T.RollupSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - rollupSearch(params: T.RollupSearchRequest, callback: callbackFn, TContext>): TransportRequestCallback - rollupSearch(params: T.RollupSearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - startJob(params: T.StartRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - startJob(params: T.StartRollupJobRequest, callback: callbackFn): TransportRequestCallback - startJob(params: T.StartRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopJob(params: T.StopRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopJob(params: T.StopRollupJobRequest, callback: callbackFn): TransportRequestCallback - stopJob(params: T.StopRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + rollupSearch(params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + rollupSearch(params: T.RollupRollupSearchRequest, callback: callbackFn, TContext>): TransportRequestCallback + rollupSearch(params: T.RollupRollupSearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + startJob(params: T.RollupStartRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + startJob(params: T.RollupStartRollupJobRequest, callback: callbackFn): TransportRequestCallback + startJob(params: T.RollupStartRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stopJob(params: T.RollupStopRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopJob(params: T.RollupStopRollupJobRequest, callback: callbackFn): TransportRequestCallback + stopJob(params: T.RollupStopRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - scriptsPainlessExecute(params?: T.ExecutePainlessScriptRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - scriptsPainlessExecute(callback: callbackFn, TContext>): TransportRequestCallback - scriptsPainlessExecute(params: T.ExecutePainlessScriptRequest, callback: callbackFn, TContext>): TransportRequestCallback - scriptsPainlessExecute(params: T.ExecutePainlessScriptRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + scriptsPainlessExecute(params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + scriptsPainlessExecute(callback: callbackFn, TContext>): TransportRequestCallback + scriptsPainlessExecute(params: T.ScriptsPainlessExecuteRequest, callback: callbackFn, TContext>): TransportRequestCallback + scriptsPainlessExecute(params: T.ScriptsPainlessExecuteRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback scroll(params?: T.ScrollRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> scroll(callback: callbackFn, TContext>): TransportRequestCallback scroll(params: T.ScrollRequest, callback: callbackFn, TContext>): TransportRequestCallback @@ -1134,11 +1133,15 @@ declare class Client { searchShards(callback: callbackFn): TransportRequestCallback searchShards(params: T.SearchShardsRequest, callback: callbackFn): TransportRequestCallback searchShards(params: T.SearchShardsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - searchTemplate(callback: callbackFn): TransportRequestCallback - searchTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - searchTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + searchTemplate(params?: T.SearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + searchTemplate(callback: callbackFn, TContext>): TransportRequestCallback + searchTemplate(params: T.SearchTemplateRequest, callback: callbackFn, TContext>): TransportRequestCallback + searchTemplate(params: T.SearchTemplateRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback searchableSnapshots: { + cacheStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + cacheStats(callback: callbackFn): TransportRequestCallback + cacheStats(params: TODO, callback: callbackFn): TransportRequestCallback + cacheStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback clearCache(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> clearCache(callback: callbackFn): TransportRequestCallback clearCache(params: TODO, callback: callbackFn): TransportRequestCallback @@ -1156,110 +1159,126 @@ declare class Client { stats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } security: { - authenticate(params?: T.AuthenticateRequest, options?: TransportRequestOptions): TransportRequestPromise> - authenticate(callback: callbackFn): TransportRequestCallback - authenticate(params: T.AuthenticateRequest, callback: callbackFn): TransportRequestCallback - authenticate(params: T.AuthenticateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - changePassword(params?: T.ChangePasswordRequest, options?: TransportRequestOptions): TransportRequestPromise> - changePassword(callback: callbackFn): TransportRequestCallback - changePassword(params: T.ChangePasswordRequest, callback: callbackFn): TransportRequestCallback - changePassword(params: T.ChangePasswordRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearApiKeyCache(params?: T.ClearApiKeyCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearApiKeyCache(callback: callbackFn): TransportRequestCallback - clearApiKeyCache(params: T.ClearApiKeyCacheRequest, callback: callbackFn): TransportRequestCallback - clearApiKeyCache(params: T.ClearApiKeyCacheRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedPrivileges(params: T.ClearCachedPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedPrivileges(params: T.ClearCachedPrivilegesRequest, callback: callbackFn): TransportRequestCallback - clearCachedPrivileges(params: T.ClearCachedPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRealms(params: T.ClearCachedRealmsRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRealms(params: T.ClearCachedRealmsRequest, callback: callbackFn): TransportRequestCallback - clearCachedRealms(params: T.ClearCachedRealmsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRoles(params: T.ClearCachedRolesRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRoles(params: T.ClearCachedRolesRequest, callback: callbackFn): TransportRequestCallback - clearCachedRoles(params: T.ClearCachedRolesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createApiKey(params?: T.CreateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - createApiKey(callback: callbackFn): TransportRequestCallback - createApiKey(params: T.CreateApiKeyRequest, callback: callbackFn): TransportRequestCallback - createApiKey(params: T.CreateApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePrivileges(params: T.DeletePrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePrivileges(params: T.DeletePrivilegesRequest, callback: callbackFn): TransportRequestCallback - deletePrivileges(params: T.DeletePrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRole(params: T.DeleteRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRole(params: T.DeleteRoleRequest, callback: callbackFn): TransportRequestCallback - deleteRole(params: T.DeleteRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRoleMapping(params: T.DeleteRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRoleMapping(params: T.DeleteRoleMappingRequest, callback: callbackFn): TransportRequestCallback - deleteRoleMapping(params: T.DeleteRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteUser(params: T.DeleteUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteUser(params: T.DeleteUserRequest, callback: callbackFn): TransportRequestCallback - deleteUser(params: T.DeleteUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - disableUser(params: T.DisableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - disableUser(params: T.DisableUserRequest, callback: callbackFn): TransportRequestCallback - disableUser(params: T.DisableUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enableUser(params: T.EnableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - enableUser(params: T.EnableUserRequest, callback: callbackFn): TransportRequestCallback - enableUser(params: T.EnableUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getApiKey(params?: T.GetApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getApiKey(callback: callbackFn): TransportRequestCallback - getApiKey(params: T.GetApiKeyRequest, callback: callbackFn): TransportRequestCallback - getApiKey(params: T.GetApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges(params?: T.GetBuiltinPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuiltinPrivileges(callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges(params: T.GetBuiltinPrivilegesRequest, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges(params: T.GetBuiltinPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPrivileges(params?: T.GetPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPrivileges(callback: callbackFn): TransportRequestCallback - getPrivileges(params: T.GetPrivilegesRequest, callback: callbackFn): TransportRequestCallback - getPrivileges(params: T.GetPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRole(params?: T.GetRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRole(callback: callbackFn): TransportRequestCallback - getRole(params: T.GetRoleRequest, callback: callbackFn): TransportRequestCallback - getRole(params: T.GetRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRoleMapping(params?: T.GetRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRoleMapping(callback: callbackFn): TransportRequestCallback - getRoleMapping(params: T.GetRoleMappingRequest, callback: callbackFn): TransportRequestCallback - getRoleMapping(params: T.GetRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getToken(params?: T.GetUserAccessTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - getToken(callback: callbackFn): TransportRequestCallback - getToken(params: T.GetUserAccessTokenRequest, callback: callbackFn): TransportRequestCallback - getToken(params: T.GetUserAccessTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUser(params?: T.GetUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUser(callback: callbackFn): TransportRequestCallback - getUser(params: T.GetUserRequest, callback: callbackFn): TransportRequestCallback - getUser(params: T.GetUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUserPrivileges(params?: T.GetUserPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUserPrivileges(callback: callbackFn): TransportRequestCallback - getUserPrivileges(params: T.GetUserPrivilegesRequest, callback: callbackFn): TransportRequestCallback - getUserPrivileges(params: T.GetUserPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - grantApiKey(params?: T.GrantApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - grantApiKey(callback: callbackFn): TransportRequestCallback - grantApiKey(params: T.GrantApiKeyRequest, callback: callbackFn): TransportRequestCallback - grantApiKey(params: T.GrantApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hasPrivileges(params?: T.HasPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - hasPrivileges(callback: callbackFn): TransportRequestCallback - hasPrivileges(params: T.HasPrivilegesRequest, callback: callbackFn): TransportRequestCallback - hasPrivileges(params: T.HasPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateApiKey(params?: T.InvalidateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateApiKey(callback: callbackFn): TransportRequestCallback - invalidateApiKey(params: T.InvalidateApiKeyRequest, callback: callbackFn): TransportRequestCallback - invalidateApiKey(params: T.InvalidateApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateToken(params?: T.InvalidateUserAccessTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateToken(callback: callbackFn): TransportRequestCallback - invalidateToken(params: T.InvalidateUserAccessTokenRequest, callback: callbackFn): TransportRequestCallback - invalidateToken(params: T.InvalidateUserAccessTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPrivileges(params?: T.PutPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPrivileges(callback: callbackFn): TransportRequestCallback - putPrivileges(params: T.PutPrivilegesRequest, callback: callbackFn): TransportRequestCallback - putPrivileges(params: T.PutPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRole(params: T.PutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRole(params: T.PutRoleRequest, callback: callbackFn): TransportRequestCallback - putRole(params: T.PutRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRoleMapping(params: T.PutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRoleMapping(params: T.PutRoleMappingRequest, callback: callbackFn): TransportRequestCallback - putRoleMapping(params: T.PutRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putUser(params: T.PutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - putUser(params: T.PutUserRequest, callback: callbackFn): TransportRequestCallback - putUser(params: T.PutUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + authenticate(params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): TransportRequestPromise> + authenticate(callback: callbackFn): TransportRequestCallback + authenticate(params: T.SecurityAuthenticateRequest, callback: callbackFn): TransportRequestCallback + authenticate(params: T.SecurityAuthenticateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + changePassword(params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): TransportRequestPromise> + changePassword(callback: callbackFn): TransportRequestCallback + changePassword(params: T.SecurityChangePasswordRequest, callback: callbackFn): TransportRequestCallback + changePassword(params: T.SecurityChangePasswordRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearApiKeyCache(params?: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearApiKeyCache(callback: callbackFn): TransportRequestCallback + clearApiKeyCache(params: T.SecurityClearApiKeyCacheRequest, callback: callbackFn): TransportRequestCallback + clearApiKeyCache(params: T.SecurityClearApiKeyCacheRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, callback: callbackFn): TransportRequestCallback + clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, callback: callbackFn): TransportRequestCallback + clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCachedRoles(params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedRoles(params: T.SecurityClearCachedRolesRequest, callback: callbackFn): TransportRequestCallback + clearCachedRoles(params: T.SecurityClearCachedRolesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, callback: callbackFn): TransportRequestCallback + clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + createApiKey(params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + createApiKey(callback: callbackFn): TransportRequestCallback + createApiKey(params: T.SecurityCreateApiKeyRequest, callback: callbackFn): TransportRequestCallback + createApiKey(params: T.SecurityCreateApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + createServiceToken(params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + createServiceToken(params: T.SecurityCreateServiceTokenRequest, callback: callbackFn): TransportRequestCallback + createServiceToken(params: T.SecurityCreateServiceTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deletePrivileges(params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + deletePrivileges(params: T.SecurityDeletePrivilegesRequest, callback: callbackFn): TransportRequestCallback + deletePrivileges(params: T.SecurityDeletePrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteRole(params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteRole(params: T.SecurityDeleteRoleRequest, callback: callbackFn): TransportRequestCallback + deleteRole(params: T.SecurityDeleteRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, callback: callbackFn): TransportRequestCallback + deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, callback: callbackFn): TransportRequestCallback + deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteUser(params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteUser(params: T.SecurityDeleteUserRequest, callback: callbackFn): TransportRequestCallback + deleteUser(params: T.SecurityDeleteUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + disableUser(params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + disableUser(params: T.SecurityDisableUserRequest, callback: callbackFn): TransportRequestCallback + disableUser(params: T.SecurityDisableUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + enableUser(params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + enableUser(params: T.SecurityEnableUserRequest, callback: callbackFn): TransportRequestCallback + enableUser(params: T.SecurityEnableUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getApiKey(params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + getApiKey(callback: callbackFn): TransportRequestCallback + getApiKey(params: T.SecurityGetApiKeyRequest, callback: callbackFn): TransportRequestCallback + getApiKey(params: T.SecurityGetApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getBuiltinPrivileges(params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBuiltinPrivileges(callback: callbackFn): TransportRequestCallback + getBuiltinPrivileges(params: T.SecurityGetBuiltinPrivilegesRequest, callback: callbackFn): TransportRequestCallback + getBuiltinPrivileges(params: T.SecurityGetBuiltinPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getPrivileges(params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getPrivileges(callback: callbackFn): TransportRequestCallback + getPrivileges(params: T.SecurityGetPrivilegesRequest, callback: callbackFn): TransportRequestCallback + getPrivileges(params: T.SecurityGetPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getRole(params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRole(callback: callbackFn): TransportRequestCallback + getRole(params: T.SecurityGetRoleRequest, callback: callbackFn): TransportRequestCallback + getRole(params: T.SecurityGetRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getRoleMapping(params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRoleMapping(callback: callbackFn): TransportRequestCallback + getRoleMapping(params: T.SecurityGetRoleMappingRequest, callback: callbackFn): TransportRequestCallback + getRoleMapping(params: T.SecurityGetRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getServiceAccounts(params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getServiceAccounts(callback: callbackFn): TransportRequestCallback + getServiceAccounts(params: T.SecurityGetServiceAccountsRequest, callback: callbackFn): TransportRequestCallback + getServiceAccounts(params: T.SecurityGetServiceAccountsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, callback: callbackFn): TransportRequestCallback + getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getToken(params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + getToken(callback: callbackFn): TransportRequestCallback + getToken(params: T.SecurityGetTokenRequest, callback: callbackFn): TransportRequestCallback + getToken(params: T.SecurityGetTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getUser(params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + getUser(callback: callbackFn): TransportRequestCallback + getUser(params: T.SecurityGetUserRequest, callback: callbackFn): TransportRequestCallback + getUser(params: T.SecurityGetUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getUserPrivileges(params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + getUserPrivileges(callback: callbackFn): TransportRequestCallback + getUserPrivileges(params: T.SecurityGetUserPrivilegesRequest, callback: callbackFn): TransportRequestCallback + getUserPrivileges(params: T.SecurityGetUserPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + grantApiKey(params?: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + grantApiKey(callback: callbackFn): TransportRequestCallback + grantApiKey(params: T.SecurityGrantApiKeyRequest, callback: callbackFn): TransportRequestCallback + grantApiKey(params: T.SecurityGrantApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + hasPrivileges(params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + hasPrivileges(callback: callbackFn): TransportRequestCallback + hasPrivileges(params: T.SecurityHasPrivilegesRequest, callback: callbackFn): TransportRequestCallback + hasPrivileges(params: T.SecurityHasPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + invalidateApiKey(params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> + invalidateApiKey(callback: callbackFn): TransportRequestCallback + invalidateApiKey(params: T.SecurityInvalidateApiKeyRequest, callback: callbackFn): TransportRequestCallback + invalidateApiKey(params: T.SecurityInvalidateApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + invalidateToken(params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> + invalidateToken(callback: callbackFn): TransportRequestCallback + invalidateToken(params: T.SecurityInvalidateTokenRequest, callback: callbackFn): TransportRequestCallback + invalidateToken(params: T.SecurityInvalidateTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putPrivileges(params?: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> + putPrivileges(callback: callbackFn): TransportRequestCallback + putPrivileges(params: T.SecurityPutPrivilegesRequest, callback: callbackFn): TransportRequestCallback + putPrivileges(params: T.SecurityPutPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putRole(params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> + putRole(params: T.SecurityPutRoleRequest, callback: callbackFn): TransportRequestCallback + putRole(params: T.SecurityPutRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putRoleMapping(params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> + putRoleMapping(params: T.SecurityPutRoleMappingRequest, callback: callbackFn): TransportRequestCallback + putRoleMapping(params: T.SecurityPutRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + putUser(params: T.SecurityPutUserRequest, callback: callbackFn): TransportRequestCallback + putUser(params: T.SecurityPutUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } shutdown: { deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -1276,145 +1295,146 @@ declare class Client { putNode(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } slm: { - deleteLifecycle(params: T.DeleteSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle(params: T.DeleteSnapshotLifecycleRequest, callback: callbackFn): TransportRequestCallback - deleteLifecycle(params: T.DeleteSnapshotLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeLifecycle(params: T.ExecuteSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeLifecycle(params: T.ExecuteSnapshotLifecycleRequest, callback: callbackFn): TransportRequestCallback - executeLifecycle(params: T.ExecuteSnapshotLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeRetention(params?: T.ExecuteRetentionRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeRetention(callback: callbackFn): TransportRequestCallback - executeRetention(params: T.ExecuteRetentionRequest, callback: callbackFn): TransportRequestCallback - executeRetention(params: T.ExecuteRetentionRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle(params?: T.GetSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.GetSnapshotLifecycleRequest, callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.GetSnapshotLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStats(params?: T.GetSnapshotLifecycleStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStats(callback: callbackFn): TransportRequestCallback - getStats(params: T.GetSnapshotLifecycleStatsRequest, callback: callbackFn): TransportRequestCallback - getStats(params: T.GetSnapshotLifecycleStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus(params?: T.GetSnapshotLifecycleManagementStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(callback: callbackFn): TransportRequestCallback - getStatus(params: T.GetSnapshotLifecycleManagementStatusRequest, callback: callbackFn): TransportRequestCallback - getStatus(params: T.GetSnapshotLifecycleManagementStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.PutSnapshotLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(params: T.PutSnapshotLifecycleRequest, callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.PutSnapshotLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start(params?: T.StartSnapshotLifecycleManagementRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(callback: callbackFn): TransportRequestCallback - start(params: T.StartSnapshotLifecycleManagementRequest, callback: callbackFn): TransportRequestCallback - start(params: T.StartSnapshotLifecycleManagementRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop(params?: T.StopSnapshotLifecycleManagementRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(callback: callbackFn): TransportRequestCallback - stop(params: T.StopSnapshotLifecycleManagementRequest, callback: callbackFn): TransportRequestCallback - stop(params: T.StopSnapshotLifecycleManagementRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteLifecycle(params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteLifecycle(params: T.SlmDeleteLifecycleRequest, callback: callbackFn): TransportRequestCallback + deleteLifecycle(params: T.SlmDeleteLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + executeLifecycle(params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + executeLifecycle(params: T.SlmExecuteLifecycleRequest, callback: callbackFn): TransportRequestCallback + executeLifecycle(params: T.SlmExecuteLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + executeRetention(params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): TransportRequestPromise> + executeRetention(callback: callbackFn): TransportRequestCallback + executeRetention(params: T.SlmExecuteRetentionRequest, callback: callbackFn): TransportRequestCallback + executeRetention(params: T.SlmExecuteRetentionRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getLifecycle(params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + getLifecycle(callback: callbackFn): TransportRequestCallback + getLifecycle(params: T.SlmGetLifecycleRequest, callback: callbackFn): TransportRequestCallback + getLifecycle(params: T.SlmGetLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getStats(params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getStats(callback: callbackFn): TransportRequestCallback + getStats(params: T.SlmGetStatsRequest, callback: callbackFn): TransportRequestCallback + getStats(params: T.SlmGetStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getStatus(params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getStatus(callback: callbackFn): TransportRequestCallback + getStatus(params: T.SlmGetStatusRequest, callback: callbackFn): TransportRequestCallback + getStatus(params: T.SlmGetStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putLifecycle(params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> + putLifecycle(params: T.SlmPutLifecycleRequest, callback: callbackFn): TransportRequestCallback + putLifecycle(params: T.SlmPutLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + start(params?: T.SlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> + start(callback: callbackFn): TransportRequestCallback + start(params: T.SlmStartRequest, callback: callbackFn): TransportRequestCallback + start(params: T.SlmStartRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stop(params?: T.SlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> + stop(callback: callbackFn): TransportRequestCallback + stop(params: T.SlmStopRequest, callback: callbackFn): TransportRequestCallback + stop(params: T.SlmStopRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } snapshot: { - cleanupRepository(params: T.CleanupRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - cleanupRepository(params: T.CleanupRepositoryRequest, callback: callbackFn): TransportRequestCallback - cleanupRepository(params: T.CleanupRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clone(params: T.CloneSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.CloneSnapshotRequest, callback: callbackFn): TransportRequestCallback - clone(params: T.CloneSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create(params: T.SnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.SnapshotRequest, callback: callbackFn): TransportRequestCallback - create(params: T.SnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createRepository(params: T.CreateRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - createRepository(params: T.CreateRepositoryRequest, callback: callbackFn): TransportRequestCallback - createRepository(params: T.CreateRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete(params: T.DeleteSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.DeleteSnapshotRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.DeleteSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRepository(params: T.DeleteRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRepository(params: T.DeleteRepositoryRequest, callback: callbackFn): TransportRequestCallback - deleteRepository(params: T.DeleteRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.GetSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.GetSnapshotRequest, callback: callbackFn): TransportRequestCallback - get(params: T.GetSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRepository(params?: T.GetRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRepository(callback: callbackFn): TransportRequestCallback - getRepository(params: T.GetRepositoryRequest, callback: callbackFn): TransportRequestCallback - getRepository(params: T.GetRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - restore(params: T.RestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> - restore(params: T.RestoreRequest, callback: callbackFn): TransportRequestCallback - restore(params: T.RestoreRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, callback: callbackFn): TransportRequestCallback + cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clone(params: T.SnapshotCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> + clone(params: T.SnapshotCloneRequest, callback: callbackFn): TransportRequestCallback + clone(params: T.SnapshotCloneRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + create(params: T.SnapshotCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> + create(params: T.SnapshotCreateRequest, callback: callbackFn): TransportRequestCallback + create(params: T.SnapshotCreateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + createRepository(params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + createRepository(params: T.SnapshotCreateRepositoryRequest, callback: callbackFn): TransportRequestCallback + createRepository(params: T.SnapshotCreateRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + delete(params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(params: T.SnapshotDeleteRequest, callback: callbackFn): TransportRequestCallback + delete(params: T.SnapshotDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteRepository(params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteRepository(params: T.SnapshotDeleteRepositoryRequest, callback: callbackFn): TransportRequestCallback + deleteRepository(params: T.SnapshotDeleteRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get(params: T.SnapshotGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.SnapshotGetRequest, callback: callbackFn): TransportRequestCallback + get(params: T.SnapshotGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getRepository(params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + getRepository(callback: callbackFn): TransportRequestCallback + getRepository(params: T.SnapshotGetRepositoryRequest, callback: callbackFn): TransportRequestCallback + getRepository(params: T.SnapshotGetRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + restore(params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> + restore(params: T.SnapshotRestoreRequest, callback: callbackFn): TransportRequestCallback + restore(params: T.SnapshotRestoreRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback status(params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> status(callback: callbackFn): TransportRequestCallback status(params: T.SnapshotStatusRequest, callback: callbackFn): TransportRequestCallback status(params: T.SnapshotStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - verifyRepository(params: T.VerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - verifyRepository(params: T.VerifyRepositoryRequest, callback: callbackFn): TransportRequestCallback - verifyRepository(params: T.VerifyRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + verifyRepository(params: T.SnapshotVerifyRepositoryRequest, callback: callbackFn): TransportRequestCallback + verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } sql: { - clearCursor(params?: T.ClearSqlCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCursor(callback: callbackFn): TransportRequestCallback - clearCursor(params: T.ClearSqlCursorRequest, callback: callbackFn): TransportRequestCallback - clearCursor(params: T.ClearSqlCursorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - query(params?: T.QuerySqlRequest, options?: TransportRequestOptions): TransportRequestPromise> - query(callback: callbackFn): TransportRequestCallback - query(params: T.QuerySqlRequest, callback: callbackFn): TransportRequestCallback - query(params: T.QuerySqlRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - translate(params?: T.TranslateSqlRequest, options?: TransportRequestOptions): TransportRequestPromise> - translate(callback: callbackFn): TransportRequestCallback - translate(params: T.TranslateSqlRequest, callback: callbackFn): TransportRequestCallback - translate(params: T.TranslateSqlRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCursor(params?: T.SqlClearSqlCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCursor(callback: callbackFn): TransportRequestCallback + clearCursor(params: T.SqlClearSqlCursorRequest, callback: callbackFn): TransportRequestCallback + clearCursor(params: T.SqlClearSqlCursorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + query(params?: T.SqlQuerySqlRequest, options?: TransportRequestOptions): TransportRequestPromise> + query(callback: callbackFn): TransportRequestCallback + query(params: T.SqlQuerySqlRequest, callback: callbackFn): TransportRequestCallback + query(params: T.SqlQuerySqlRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + translate(params?: T.SqlTranslateSqlRequest, options?: TransportRequestOptions): TransportRequestPromise> + translate(callback: callbackFn): TransportRequestCallback + translate(params: T.SqlTranslateSqlRequest, callback: callbackFn): TransportRequestCallback + translate(params: T.SqlTranslateSqlRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } ssl: { - certificates(params?: T.GetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> - certificates(callback: callbackFn): TransportRequestCallback - certificates(params: T.GetCertificatesRequest, callback: callbackFn): TransportRequestCallback - certificates(params: T.GetCertificatesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + certificates(params?: T.SslGetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> + certificates(callback: callbackFn): TransportRequestCallback + certificates(params: T.SslGetCertificatesRequest, callback: callbackFn): TransportRequestCallback + certificates(params: T.SslGetCertificatesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } tasks: { - cancel(params?: T.CancelTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - cancel(callback: callbackFn): TransportRequestCallback - cancel(params: T.CancelTasksRequest, callback: callbackFn): TransportRequestCallback - cancel(params: T.CancelTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.GetTaskRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.GetTaskRequest, callback: callbackFn): TransportRequestCallback - get(params: T.GetTaskRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - list(params?: T.ListTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - list(callback: callbackFn): TransportRequestCallback - list(params: T.ListTasksRequest, callback: callbackFn): TransportRequestCallback - list(params: T.ListTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + cancel(params?: T.TaskCancelTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> + cancel(callback: callbackFn): TransportRequestCallback + cancel(params: T.TaskCancelTasksRequest, callback: callbackFn): TransportRequestCallback + cancel(params: T.TaskCancelTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get(params: T.TaskGetTaskRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.TaskGetTaskRequest, callback: callbackFn): TransportRequestCallback + get(params: T.TaskGetTaskRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + list(params?: T.TaskListTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> + list(callback: callbackFn): TransportRequestCallback + list(params: T.TaskListTasksRequest, callback: callbackFn): TransportRequestCallback + list(params: T.TaskListTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - termvectors(params: T.TermVectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> - termvectors(params: T.TermVectorsRequest, callback: callbackFn): TransportRequestCallback - termvectors(params: T.TermVectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> + termvectors(params: T.TermvectorsRequest, callback: callbackFn): TransportRequestCallback + termvectors(params: T.TermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback textStructure: { - findStructure(params: T.FindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> - findStructure(params: T.FindStructureRequest, callback: callbackFn): TransportRequestCallback - findStructure(params: T.FindStructureRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + findStructure(params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> + findStructure(params: T.TextStructureFindStructureRequest, callback: callbackFn): TransportRequestCallback + findStructure(params: T.TextStructureFindStructureRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } transform: { - deleteTransform(params: T.DeleteTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTransform(params: T.DeleteTransformRequest, callback: callbackFn): TransportRequestCallback - deleteTransform(params: T.DeleteTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransform(params?: T.GetTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransform(callback: callbackFn): TransportRequestCallback - getTransform(params: T.GetTransformRequest, callback: callbackFn): TransportRequestCallback - getTransform(params: T.GetTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransformStats(params: T.GetTransformStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats(params: T.GetTransformStatsRequest, callback: callbackFn): TransportRequestCallback - getTransformStats(params: T.GetTransformStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewTransform(params?: T.PreviewTransformRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - previewTransform(callback: callbackFn, TContext>): TransportRequestCallback - previewTransform(params: T.PreviewTransformRequest, callback: callbackFn, TContext>): TransportRequestCallback - previewTransform(params: T.PreviewTransformRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - putTransform(params: T.PutTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTransform(params: T.PutTransformRequest, callback: callbackFn): TransportRequestCallback - putTransform(params: T.PutTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTransform(params: T.StartTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - startTransform(params: T.StartTransformRequest, callback: callbackFn): TransportRequestCallback - startTransform(params: T.StartTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTransform(params: T.StopTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform(params: T.StopTransformRequest, callback: callbackFn): TransportRequestCallback - stopTransform(params: T.StopTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateTransform(params: T.UpdateTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform(params: T.UpdateTransformRequest, callback: callbackFn): TransportRequestCallback - updateTransform(params: T.UpdateTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteTransform(params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteTransform(params: T.TransformDeleteTransformRequest, callback: callbackFn): TransportRequestCallback + deleteTransform(params: T.TransformDeleteTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getTransform(params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTransform(callback: callbackFn): TransportRequestCallback + getTransform(params: T.TransformGetTransformRequest, callback: callbackFn): TransportRequestCallback + getTransform(params: T.TransformGetTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getTransformStats(params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTransformStats(params: T.TransformGetTransformStatsRequest, callback: callbackFn): TransportRequestCallback + getTransformStats(params: T.TransformGetTransformStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + previewTransform(params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + previewTransform(callback: callbackFn, TContext>): TransportRequestCallback + previewTransform(params: T.TransformPreviewTransformRequest, callback: callbackFn, TContext>): TransportRequestCallback + previewTransform(params: T.TransformPreviewTransformRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback + putTransform(params: T.TransformPutTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + putTransform(params: T.TransformPutTransformRequest, callback: callbackFn): TransportRequestCallback + putTransform(params: T.TransformPutTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + startTransform(params: T.TransformStartTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + startTransform(params: T.TransformStartTransformRequest, callback: callbackFn): TransportRequestCallback + startTransform(params: T.TransformStartTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stopTransform(params: T.TransformStopTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + stopTransform(params: T.TransformStopTransformRequest, callback: callbackFn): TransportRequestCallback + stopTransform(params: T.TransformStopTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateTransform(params?: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateTransform(callback: callbackFn): TransportRequestCallback + updateTransform(params: T.TransformUpdateTransformRequest, callback: callbackFn): TransportRequestCallback + updateTransform(params: T.TransformUpdateTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } update(params: T.UpdateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> update(params: T.UpdateRequest, callback: callbackFn, TContext>): TransportRequestCallback @@ -1426,54 +1446,54 @@ declare class Client { updateByQueryRethrottle(params: T.UpdateByQueryRethrottleRequest, callback: callbackFn): TransportRequestCallback updateByQueryRethrottle(params: T.UpdateByQueryRethrottleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback watcher: { - ackWatch(params: T.AcknowledgeWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - ackWatch(params: T.AcknowledgeWatchRequest, callback: callbackFn): TransportRequestCallback - ackWatch(params: T.AcknowledgeWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - activateWatch(params: T.ActivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - activateWatch(params: T.ActivateWatchRequest, callback: callbackFn): TransportRequestCallback - activateWatch(params: T.ActivateWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deactivateWatch(params: T.DeactivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deactivateWatch(params: T.DeactivateWatchRequest, callback: callbackFn): TransportRequestCallback - deactivateWatch(params: T.DeactivateWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteWatch(params: T.DeleteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteWatch(params: T.DeleteWatchRequest, callback: callbackFn): TransportRequestCallback - deleteWatch(params: T.DeleteWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeWatch(params?: T.ExecuteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeWatch(callback: callbackFn): TransportRequestCallback - executeWatch(params: T.ExecuteWatchRequest, callback: callbackFn): TransportRequestCallback - executeWatch(params: T.ExecuteWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getWatch(params: T.GetWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - getWatch(params: T.GetWatchRequest, callback: callbackFn): TransportRequestCallback - getWatch(params: T.GetWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putWatch(params: T.PutWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - putWatch(params: T.PutWatchRequest, callback: callbackFn): TransportRequestCallback - putWatch(params: T.PutWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + ackWatch(params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + ackWatch(params: T.WatcherAckWatchRequest, callback: callbackFn): TransportRequestCallback + ackWatch(params: T.WatcherAckWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + activateWatch(params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + activateWatch(params: T.WatcherActivateWatchRequest, callback: callbackFn): TransportRequestCallback + activateWatch(params: T.WatcherActivateWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deactivateWatch(params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + deactivateWatch(params: T.WatcherDeactivateWatchRequest, callback: callbackFn): TransportRequestCallback + deactivateWatch(params: T.WatcherDeactivateWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteWatch(params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteWatch(params: T.WatcherDeleteWatchRequest, callback: callbackFn): TransportRequestCallback + deleteWatch(params: T.WatcherDeleteWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + executeWatch(params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + executeWatch(callback: callbackFn): TransportRequestCallback + executeWatch(params: T.WatcherExecuteWatchRequest, callback: callbackFn): TransportRequestCallback + executeWatch(params: T.WatcherExecuteWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getWatch(params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + getWatch(params: T.WatcherGetWatchRequest, callback: callbackFn): TransportRequestCallback + getWatch(params: T.WatcherGetWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + putWatch(params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> + putWatch(params: T.WatcherPutWatchRequest, callback: callbackFn): TransportRequestCallback + putWatch(params: T.WatcherPutWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback queryWatches(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> queryWatches(callback: callbackFn): TransportRequestCallback queryWatches(params: TODO, callback: callbackFn): TransportRequestCallback queryWatches(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start(params?: T.StartWatcherRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(callback: callbackFn): TransportRequestCallback - start(params: T.StartWatcherRequest, callback: callbackFn): TransportRequestCallback - start(params: T.StartWatcherRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + start(params?: T.WatcherStartRequest, options?: TransportRequestOptions): TransportRequestPromise> + start(callback: callbackFn): TransportRequestCallback + start(params: T.WatcherStartRequest, callback: callbackFn): TransportRequestCallback + start(params: T.WatcherStartRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback stats(params?: T.WatcherStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> stats(callback: callbackFn): TransportRequestCallback stats(params: T.WatcherStatsRequest, callback: callbackFn): TransportRequestCallback stats(params: T.WatcherStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop(params?: T.StopWatcherRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(callback: callbackFn): TransportRequestCallback - stop(params: T.StopWatcherRequest, callback: callbackFn): TransportRequestCallback - stop(params: T.StopWatcherRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stop(params?: T.WatcherStopRequest, options?: TransportRequestOptions): TransportRequestPromise> + stop(callback: callbackFn): TransportRequestCallback + stop(params: T.WatcherStopRequest, callback: callbackFn): TransportRequestCallback + stop(params: T.WatcherStopRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } xpack: { - info(params?: T.XPackInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.XPackInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.XPackInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage(params?: T.XPackUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(callback: callbackFn): TransportRequestCallback - usage(params: T.XPackUsageRequest, callback: callbackFn): TransportRequestCallback - usage(params: T.XPackUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + info(params?: T.XpackInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(callback: callbackFn): TransportRequestCallback + info(params: T.XpackInfoRequest, callback: callbackFn): TransportRequestCallback + info(params: T.XpackInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + usage(params?: T.XpackUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> + usage(callback: callbackFn): TransportRequestCallback + usage(params: T.XpackUsageRequest, callback: callbackFn): TransportRequestCallback + usage(params: T.XpackUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } } @@ -1495,4 +1515,4 @@ export { ClientOptions, NodeOptions, ClientExtendsCallbackOptions -}; +} \ No newline at end of file diff --git a/api/types.d.ts b/api/types.d.ts index da0b09124..af4fa95e4 100644 --- a/api/types.d.ts +++ b/api/types.d.ts @@ -17,5472 +17,6613 @@ * under the License. */ -export type AccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' - -export interface AcknowledgeState { - state: AcknowledgementState - timestamp: DateString -} - -export interface AcknowledgeWatchRequest extends RequestBase { - watch_id: Name - action_id?: Names -} - -export interface AcknowledgeWatchResponse extends ResponseBase { - status: WatchStatus -} - -export interface AcknowledgedResponseBase extends ResponseBase { - acknowledged: boolean -} - -export type AcknowledgementState = 'awaits_successful_execution' | 'ackable' | 'acked' - -export interface Action { - action_type?: ActionType - condition?: ConditionContainer - foreach?: string - max_iterations?: integer - name?: string - throttle_period?: Time - throttle_period_in_millis?: EpochMillis - transform?: TransformContainer - index: ActionIndex +export interface BulkBulkCreateOperation extends BulkBulkOperation { } -export type ActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' - -export type ActionExecutionState = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' - -export interface ActionIndex { - index: IndexName +export interface BulkBulkCreateResponseItem extends BulkBulkResponseItemBase { } -export interface ActionStatus { - ack: AcknowledgeState - last_execution?: ExecutionState - last_successful_execution?: ExecutionState - last_throttle?: ThrottleState +export interface BulkBulkDeleteOperation extends BulkBulkOperation { } -export type ActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' - -export interface ActivateWatchRequest extends RequestBase { - watch_id: Name +export interface BulkBulkDeleteResponseItem extends BulkBulkResponseItemBase { } -export interface ActivateWatchResponse extends ResponseBase { - status: ActivationStatus +export interface BulkBulkIndexOperation extends BulkBulkOperation { } -export interface ActivationState { - active: boolean - timestamp: Timestamp +export interface BulkBulkIndexResponseItem extends BulkBulkResponseItemBase { } -export interface ActivationStatus { - actions: Record - state: ActivationState +export interface BulkBulkOperation { + _id: Id + _index: IndexName + retry_on_conflict: integer + routing: Routing version: VersionNumber + version_type: VersionType } -export interface AdaptiveSelectionStats { - avg_queue_size: long - avg_response_time: long - avg_response_time_ns: long - avg_service_time: string - avg_service_time_ns: long - outgoing_searches: long - rank: string +export interface BulkBulkOperationContainer { + index?: BulkBulkIndexOperation + create?: BulkBulkCreateOperation + update?: BulkBulkUpdateOperation + delete?: BulkBulkDeleteOperation } -export interface AdjacencyMatrixAggregation extends BucketAggregationBase { - filters?: Record +export interface BulkBulkResponseItemBase { + _id?: string | null + _index: string + status: integer + error?: ErrorCause + _primary_term?: long + result?: string + _seq_no?: SequenceNumber + _shards?: ShardStatistics + _type?: string + _version?: VersionNumber + forced_refresh?: boolean + get?: InlineGet> } -export type Aggregate = SingleBucketAggregate | AutoDateHistogramAggregate | FiltersAggregate | SignificantTermsAggregate | TermsAggregate | BucketAggregate | CompositeBucketAggregate | MultiBucketAggregate | MatrixStatsAggregate | KeyedValueAggregate | MetricAggregate - -export interface AggregateBase { - meta?: Record +export interface BulkBulkResponseItemContainer { + index?: BulkBulkIndexResponseItem + create?: BulkBulkCreateResponseItem + update?: BulkBulkUpdateResponseItem + delete?: BulkBulkDeleteResponseItem } -export type AggregateName = string - -export interface Aggregation { - meta?: Record - name?: string +export interface BulkBulkUpdateOperation extends BulkBulkOperation { } -export interface AggregationBreakdown { - build_aggregation: long - build_aggregation_count: long - build_leaf_collector: long - build_leaf_collector_count: long - collect: long - collect_count: long - initialize: long - initialize_count: long - post_collection?: long - post_collection_count?: long - reduce: long - reduce_count: long +export interface BulkBulkUpdateResponseItem extends BulkBulkResponseItemBase { } -export interface AggregationContainer { - adjacency_matrix?: AdjacencyMatrixAggregation - aggs?: Record - aggregations?: Record - auto_date_histogram?: AutoDateHistogramAggregation - avg?: AverageAggregation - avg_bucket?: AverageBucketAggregation - boxplot?: BoxplotAggregation - bucket_script?: BucketScriptAggregation - bucket_selector?: BucketSelectorAggregation - bucket_sort?: BucketSortAggregation - cardinality?: CardinalityAggregation - children?: ChildrenAggregation - composite?: CompositeAggregation - cumulative_cardinality?: CumulativeCardinalityAggregation - cumulative_sum?: CumulativeSumAggregation - date_histogram?: DateHistogramAggregation - date_range?: DateRangeAggregation - derivative?: DerivativeAggregation - diversified_sampler?: DiversifiedSamplerAggregation - extended_stats?: ExtendedStatsAggregation - extended_stats_bucket?: ExtendedStatsBucketAggregation - filter?: QueryContainer - filters?: FiltersAggregation - geo_bounds?: GeoBoundsAggregation - geo_centroid?: GeoCentroidAggregation - geo_distance?: GeoDistanceAggregation - geohash_grid?: GeoHashGridAggregation - geo_line?: GeoLineAggregation - geotile_grid?: GeoTileGridAggregation - global?: GlobalAggregation - histogram?: HistogramAggregation - ip_range?: IpRangeAggregation - inference?: InferenceAggregation - line?: GeoLineAggregation - matrix_stats?: MatrixStatsAggregation - max?: MaxAggregation - max_bucket?: MaxBucketAggregation - median_absolute_deviation?: MedianAbsoluteDeviationAggregation - meta?: Record - min?: MinAggregation - min_bucket?: MinBucketAggregation - missing?: MissingAggregation - moving_avg?: MovingAverageAggregation - moving_percentiles?: MovingPercentilesAggregation - moving_fn?: MovingFunctionAggregation - multi_terms?: MultiTermsAggregation - nested?: NestedAggregation - normalize?: NormalizeAggregation - parent?: ParentAggregation - percentile_ranks?: PercentileRanksAggregation - percentiles?: PercentilesAggregation - percentiles_bucket?: PercentilesBucketAggregation - range?: RangeAggregation - rare_terms?: RareTermsAggregation - rate?: RateAggregation - reverse_nested?: ReverseNestedAggregation - sampler?: SamplerAggregation - scripted_metric?: ScriptedMetricAggregation - serial_diff?: SerialDifferencingAggregation - significant_terms?: SignificantTermsAggregation - significant_text?: SignificantTextAggregation - stats?: StatsAggregation - stats_bucket?: StatsBucketAggregation - string_stats?: StringStatsAggregation - sum?: SumAggregation - sum_bucket?: SumBucketAggregation - terms?: TermsAggregation - top_hits?: TopHitsAggregation - t_test?: TTestAggregation - top_metrics?: TopMetricsAggregation - value_count?: ValueCountAggregation - weighted_avg?: WeightedAverageAggregation - variable_width_histogram?: VariableWidthHistogramAggregation -} - -export interface AggregationProfile { - breakdown: AggregationBreakdown - description: string - time_in_nanos: long - type: string - debug: AggregationProfileDebug - children?: Array +export interface BulkRequest extends RequestBase { + index?: IndexName + type?: Type + pipeline?: string + refresh?: Refresh + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + require_alias?: boolean + body?: (BulkBulkOperationContainer | TSource)[] } -export interface AggregationProfileDebug { +export interface BulkResponse { + errors: boolean + items: BulkBulkResponseItemContainer[] + took: long + ingest_took?: long } -export interface AggregationRange { - from?: double | string - key?: string - to?: double | string +export interface ClearScrollRequest extends RequestBase { + scroll_id?: Ids + body?: { + scroll_id?: Ids + } } -export interface Alias { - filter?: QueryContainer - index_routing?: Routing - is_hidden?: boolean - is_write_index?: boolean - routing?: Routing - search_routing?: Routing +export interface ClearScrollResponse { + succeeded: boolean + num_freed: integer } -export interface AliasAction { +export interface ClosePointInTimeRequest extends RequestBase { + body?: { + id: Id + } } -export interface AliasDefinition { - filter?: QueryContainer - index_routing?: string - is_write_index?: boolean - routing?: string - search_routing?: string +export interface ClosePointInTimeResponse { + succeeded: boolean + num_freed: integer } -export interface AliasExistsRequest extends RequestBase { - name: Names +export interface CountRequest extends RequestBase { index?: Indices + type?: Types allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: DefaultOperator + df?: string expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean ignore_unavailable?: boolean - local?: boolean + lenient?: boolean + min_score?: double + preference?: string + query_on_query_string?: string + routing?: Routing + terminate_after?: long + q?: string + body?: { + query?: QueryDslQueryContainer + } } -export type AliasExistsResponse = boolean - -export interface AllField { - analyzer: string - enabled: boolean - omit_norms: boolean - search_analyzer: string - similarity: string - store: boolean - store_term_vector_offsets: boolean - store_term_vector_payloads: boolean - store_term_vector_positions: boolean - store_term_vectors: boolean +export interface CountResponse { + count: long + _shards: ShardStatistics } -export interface AllocationDecision { - decider: string - decision: AllocationExplainDecision - explanation: string +export interface CreateRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + pipeline?: string + refresh?: Refresh + routing?: Routing + timeout?: Time + version?: VersionNumber + version_type?: VersionType + wait_for_active_shards?: WaitForActiveShards + body?: TDocument } -export type AllocationExplainDecision = 'NO' | 'YES' | 'THROTTLE' | 'ALWAYS' +export interface CreateResponse extends WriteResponseBase { +} -export interface AllocationStore { - allocation_id: string - found: boolean - in_sync: boolean - matching_size_in_bytes: long - matching_sync_id: boolean - store_exception: string +export interface DeleteRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + if_primary_term?: long + if_seq_no?: SequenceNumber + refresh?: Refresh + routing?: Routing + timeout?: Time + version?: VersionNumber + version_type?: VersionType + wait_for_active_shards?: WaitForActiveShards } -export interface AlwaysCondition { +export interface DeleteResponse extends WriteResponseBase { } -export interface AnalysisConfig { - bucket_span: TimeSpan - categorization_field_name?: Field - categorization_filters?: Array - detectors: Array - influencers?: Array - latency?: Time - multivariate_by_fields?: boolean - per_partition_categorization?: PerPartitionCategorization - summary_count_field_name?: Field - categorization_analyzer?: CategorizationAnalyzer | string +export interface DeleteByQueryRequest extends RequestBase { + index: Indices + type?: Types + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + conflicts?: Conflicts + default_operator?: DefaultOperator + df?: string + expand_wildcards?: ExpandWildcards + from?: long + ignore_unavailable?: boolean + lenient?: boolean + max_docs?: long + preference?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: long + routing?: Routing + q?: string + scroll?: Time + scroll_size?: long + search_timeout?: Time + search_type?: SearchType + size?: long + slices?: long + sort?: string[] + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stats?: string[] + terminate_after?: long + timeout?: Time + version?: boolean + wait_for_active_shards?: WaitForActiveShards + wait_for_completion?: boolean + body?: { + max_docs?: long + query?: QueryDslQueryContainer + slice?: SlicedScroll + } } -export interface AnalysisLimits { - categorization_examples_limit?: long - model_memory_limit: string +export interface DeleteByQueryResponse { + batches?: long + deleted?: long + failures?: BulkIndexByScrollFailure[] + noops?: long + requests_per_second?: float + retries?: Retries + slice_id?: integer + task?: TaskId + throttled_millis?: long + throttled_until_millis?: long + timed_out?: boolean + took?: long + total?: long + version_conflicts?: long } -export interface AnalysisMemoryLimit { - model_memory_limit: string +export interface DeleteByQueryRethrottleRequest extends RequestBase { + task_id: Id + requests_per_second?: long } -export interface AnalyticsStatsUsage { - boxplot_usage: long - cumulative_cardinality_usage: long - string_stats_usage: long - top_metrics_usage: long - t_test_usage: long - moving_percentiles_usage: long - normalize_usage: long - rate_usage: long - multi_terms_usage?: long +export interface DeleteByQueryRethrottleResponse extends TaskListTasksResponse { } -export interface AnalyticsUsage extends XPackUsage { - stats: AnalyticsStatsUsage +export interface DeleteScriptRequest extends RequestBase { + id: Id + master_timeout?: Time + timeout?: Time } -export interface AnalyzeDetail { - analyzer?: AnalyzerDetail - charfilters?: Array - custom_analyzer: boolean - tokenfilters?: Array - tokenizer?: TokenDetail +export interface DeleteScriptResponse extends AcknowledgedResponseBase { } -export interface AnalyzeRequest extends RequestBase { - index?: IndexName - body?: { - analyzer?: string - attributes?: Array - char_filter?: Array - explain?: boolean - field?: Field - filter?: Array - normalizer?: string - text?: TextToAnalyze - tokenizer?: string | Tokenizer - } +export interface ExistsRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + source_enabled?: boolean + source_excludes?: Fields + source_includes?: Fields + stored_fields?: Fields + version?: VersionNumber + version_type?: VersionType } -export interface AnalyzeResponse extends ResponseBase { - detail?: AnalyzeDetail - tokens?: Array +export type ExistsResponse = boolean + +export interface ExistsSourceRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + source_enabled?: boolean + source_excludes?: Fields + source_includes?: Fields + version?: VersionNumber + version_type?: VersionType } -export interface AnalyzeToken { - end_offset: long - position: long - position_length?: long - start_offset: long - token: string - type: string +export type ExistsSourceResponse = boolean + +export interface ExplainExplanation { + description: string + details: ExplainExplanationDetail[] + value: float } -export interface AnalyzerDetail { - name: string - tokens: Array +export interface ExplainExplanationDetail { + description: string + details?: ExplainExplanationDetail[] + value: float } -export interface AnomalyCause { - actual: Array - by_field_name: string - by_field_value: string - correlated_by_field_value: string - field_name: string - function: string - function_description: string - influencers: Array - over_field_name: string - over_field_value: string - partition_field_name: string - partition_field_value: string - probability: double - typical: Array +export interface ExplainRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + analyzer?: string + analyze_wildcard?: boolean + default_operator?: DefaultOperator + df?: string + lenient?: boolean + preference?: string + query_on_query_string?: string + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + q?: string + body?: { + query?: QueryDslQueryContainer + } } -export interface AnomalyDetectors { - categorization_analyzer: CategorizationAnalyzer - categorization_examples_limit: integer - model_memory_limit: ByteSize - model_snapshot_retention_days: integer - daily_model_snapshot_retention_after_days: integer +export interface ExplainResponse { + _index: IndexName + _type?: Type + _id: Id + matched: boolean + explanation?: ExplainExplanationDetail + get?: InlineGet } -export interface AnomalyRecord { - actual?: Array - bucket_span: Time - by_field_name?: string - by_field_value?: string - causes?: Array - detector_index: integer - field_name?: string - function?: string - function_description?: string - influencers?: Array - initial_record_score: double - is_interim: boolean - job_id: string - over_field_name?: string - over_field_value?: string - partition_field_name?: string - partition_field_value?: string - probability: double - record_score: double - result_type: string - timestamp: EpochMillis - typical?: Array +export interface FieldCapsFieldCapabilitiesBodyIndexFilter { + range?: FieldCapsFieldCapabilitiesBodyIndexFilterRange + match_none?: EmptyObject + term?: FieldCapsFieldCapabilitiesBodyIndexFilterTerm } -export interface ApiKey { - name: Name - expiration?: Time - role_descriptors?: Array> +export interface FieldCapsFieldCapabilitiesBodyIndexFilterRange { + timestamp: FieldCapsFieldCapabilitiesBodyIndexFilterRangeTimestamp } -export interface ApiKeyApplication { - application: string - privileges: Array - resources: Array +export interface FieldCapsFieldCapabilitiesBodyIndexFilterRangeTimestamp { + gte?: integer + gt?: integer + lte?: integer + lt?: integer } -export type ApiKeyGrantType = 'access_token' | 'password' +export interface FieldCapsFieldCapabilitiesBodyIndexFilterTerm { + versionControl: FieldCapsFieldCapabilitiesBodyIndexFilterTermVersionControl +} -export interface ApiKeyPrivileges { - names: Indices - privileges: Array +export interface FieldCapsFieldCapabilitiesBodyIndexFilterTermVersionControl { + value: string +} + +export interface FieldCapsFieldCapability { + aggregatable: boolean + indices?: Indices + meta?: Record + non_aggregatable_indices?: Indices + non_searchable_indices?: Indices + searchable: boolean + type: string } -export interface ApiKeyRole { - cluster: Array - index: Array - applications?: Array +export interface FieldCapsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + fields?: Fields + ignore_unavailable?: boolean + include_unmapped?: boolean + body?: { + index_filter?: FieldCapsFieldCapabilitiesBodyIndexFilter + } } -export interface ApiKeys { - creation: long - expiration?: long +export interface FieldCapsResponse { + indices: Indices + fields: Record> +} + +export interface GetRequest extends RequestBase { id: Id - invalidated: boolean - name: Name - realm: string - username: Name - metadata?: Record + index: IndexName + type?: Type + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + source_enabled?: boolean + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + version?: VersionNumber + version_type?: VersionType + _source?: boolean | Fields } -export interface AppendProcessor extends ProcessorBase { - field: Field - value: Array - allow_duplicates?: boolean +export interface GetResponse { + _index: IndexName + fields?: Record + found: boolean + _id: Id + _primary_term?: long + _routing?: string + _seq_no?: SequenceNumber + _source?: TDocument + _type?: Type + _version?: VersionNumber } -export interface ApplicationGlobalUserPrivileges { - manage: ManageUserPrivileges +export interface GetScriptRequest extends RequestBase { + id: Id + master_timeout?: Time } -export interface ApplicationPrivileges { - application: string - privileges: Array - resources: Array +export interface GetScriptResponse { + _id: Id + found: boolean + script?: StoredScript } -export interface ApplicationPrivilegesCheck { - application: string - privileges: Array - resources: Array +export interface GetScriptContextContext { + methods: GetScriptContextContextMethod[] + name: Name } -export interface ApplicationResourcePrivileges { - application: string - privileges: Array - resources: Array +export interface GetScriptContextContextMethod { + name: Name + return_type: string + params: GetScriptContextContextMethodParam[] } -export type ApplicationsPrivileges = Record +export interface GetScriptContextContextMethodParam { + name: Name + type: string +} -export type AppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' +export interface GetScriptContextRequest extends RequestBase { +} -export interface ArrayCompareCondition { - array_path: string - comparison: string - path: string - quantifier: Quantifier - value: any +export interface GetScriptContextResponse { + contexts: GetScriptContextContext[] } -export interface AsciiFoldingTokenFilter extends TokenFilterBase { - preserve_original: boolean +export interface GetScriptLanguagesLanguageContext { + contexts: string[] + language: ScriptLanguage } -export interface AsyncSearch { - aggregations?: Record - _clusters?: ClusterStatistics - fields?: Record - hits: HitsMetadata - max_score?: double - num_reduce_phases?: long - profile?: Profile - pit_id?: Id - _scroll_id?: Id - _shards: ShardStatistics - suggest?: Record>> - terminated_early?: boolean - timed_out: boolean - took: long +export interface GetScriptLanguagesRequest extends RequestBase { } -export interface AsyncSearchDeleteRequest extends RequestBase { - id: Id +export interface GetScriptLanguagesResponse { + language_contexts: GetScriptLanguagesLanguageContext[] + types_allowed: string[] } -export interface AsyncSearchDeleteResponse extends AcknowledgedResponseBase { +export interface GetSourceRequest extends GetRequest { } -export interface AsyncSearchDocumentResponseBase extends AsyncSearchResponseBase { - response: AsyncSearch +export type GetSourceResponse = TDocument + +export interface IndexRequest extends RequestBase { + id?: Id + index: IndexName + type?: Type + if_primary_term?: long + if_seq_no?: SequenceNumber + op_type?: OpType + pipeline?: string + refresh?: Refresh + routing?: Routing + timeout?: Time + version?: VersionNumber + version_type?: VersionType + wait_for_active_shards?: WaitForActiveShards + require_alias?: boolean + body?: TDocument } -export interface AsyncSearchGetRequest extends RequestBase { - id: Id - typed_keys?: boolean - body?: { - keep_alive?: Time - typed_keys?: boolean - wait_for_completion_timeout?: Time - } +export interface IndexResponse extends WriteResponseBase { } -export interface AsyncSearchGetResponse extends AsyncSearchDocumentResponseBase { +export interface InfoRequest extends RequestBase { } -export interface AsyncSearchResponseBase extends ResponseBase { - id?: Id - is_partial: boolean - is_running: boolean - expiration_time_in_millis: EpochMillis - start_time_in_millis: EpochMillis +export interface InfoResponse { + cluster_name: Name + cluster_uuid: Uuid + name: Name + tagline: string + version: ElasticsearchVersionInfo } -export interface AsyncSearchStatusRequest extends RequestBase { - id: Id +export interface MgetHit { + error?: MainError + fields?: Record + found?: boolean + _id: Id + _index: IndexName + _primary_term?: long + _routing?: Routing + _seq_no?: SequenceNumber + _source?: TDocument + _type?: Type + _version?: VersionNumber } -export interface AsyncSearchStatusResponse extends AsyncSearchResponseBase { - _shards: ShardStatistics - completion_status: integer +export type MgetMultiGetId = string | integer + +export interface MgetOperation { + _id: MgetMultiGetId + _index?: IndexName + routing?: Routing + _source?: boolean | Fields | SearchTypesSourceFilter + stored_fields?: Fields + _type?: Type + version?: VersionNumber + version_type?: VersionType } -export interface AsyncSearchSubmitRequest extends RequestBase { - index?: Indices - batched_reduce_size?: long - wait_for_completion_timeout?: Time - keep_on_completion?: boolean - typed_keys?: boolean +export interface MgetRequest extends RequestBase { + index?: IndexName + type?: Type + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields body?: { - aggs?: Record - allow_no_indices?: boolean - allow_partial_search_results?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - collapse?: FieldCollapse - default_operator?: DefaultOperator - df?: string - docvalue_fields?: Fields - expand_wildcards?: ExpandWildcards - explain?: boolean - from?: integer - highlight?: Highlight - ignore_throttled?: boolean - ignore_unavailable?: boolean - indices_boost?: Array> - keep_alive?: Time - keep_on_completion?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - min_score?: double - post_filter?: QueryContainer - preference?: string - profile?: boolean - pit?: PointInTimeReference - query?: QueryContainer - query_on_query_string?: string - request_cache?: boolean - rescore?: Array - routing?: Routing - script_fields?: Record - search_after?: Array - search_type?: SearchType - sequence_number_primary_term?: boolean - size?: integer - sort?: Sort - _source?: boolean | SourceFilter - stats?: Array - stored_fields?: Fields - suggest?: Record - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - terminate_after?: long - timeout?: string - track_scores?: boolean - track_total_hits?: boolean - typed_keys?: boolean - version?: boolean - wait_for_completion_timeout?: Time - fields?: Array + docs?: MgetOperation[] + ids?: MgetMultiGetId[] } } -export interface AsyncSearchSubmitResponse extends AsyncSearchDocumentResponseBase { +export interface MgetResponse { + docs: MgetHit[] } -export interface AttachmentProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - indexed_chars?: long - indexed_chars_field?: Field - properties?: Array - target_field?: Field - resource_name?: string +export interface MsearchBody { + aggregations?: Record + aggs?: Record + query?: QueryDslQueryContainer + from?: integer + size?: integer + pit?: SearchTypesPointInTimeReference + track_total_hits?: boolean | integer + suggest?: SearchTypesSuggestContainer | Record } -export interface AuditUsage extends SecurityFeatureToggle { - outputs?: Array +export interface MsearchHeader { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + index?: Indices + preference?: string + request_cache?: boolean + routing?: string + search_type?: SearchType } -export interface AuthenticateRequest extends RequestBase { +export interface MsearchRequest extends RequestBase { + index?: Indices + type?: Types + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + max_concurrent_searches?: long + max_concurrent_shard_requests?: long + pre_filter_shard_size?: long + search_type?: SearchType + rest_total_hits_as_int?: boolean + typed_keys?: boolean + body?: (MsearchHeader | MsearchBody)[] } -export interface AuthenticateResponse extends ResponseBase { - authentication_realm: RealmInfo - email?: string - full_name?: string - lookup_realm: RealmInfo - metadata: Record - roles: Array - username: string - enabled: boolean - authentication_type: string +export interface MsearchResponse { + took: long + responses: (MsearchSearchResult | ErrorResponseBase)[] } -export interface AuthenticatedUser extends XPackUser { - authentication_realm: UserRealm - lookup_realm: UserRealm - authentication_provider?: AuthenticationProvider - authentication_type: string +export interface MsearchSearchResult extends SearchResponse { + status: integer } -export interface AuthenticationProvider { - type: string - name: string +export interface MsearchTemplateRequest extends RequestBase { + index?: Indices + type?: Types + ccs_minimize_roundtrips?: boolean + max_concurrent_searches?: long + search_type?: SearchType + rest_total_hits_as_int?: boolean + typed_keys?: boolean + body?: MsearchTemplateTemplateItem[] } -export interface AutoDateHistogramAggregate extends MultiBucketAggregate> { - interval: DateMathTime +export interface MsearchTemplateResponse { + responses: SearchResponse[] + took: long } -export interface AutoDateHistogramAggregation extends BucketAggregationBase { - buckets?: integer - field?: Field - format?: string - minimum_interval?: MinimumInterval - missing?: DateString - offset?: string +export interface MsearchTemplateTemplateItem { + id?: Id + index?: Indices params?: Record - script?: Script - time_zone?: string -} - -export interface AutoFollowPattern { - active: boolean - remote_cluster: string - follow_index_pattern?: IndexPattern - leader_index_patterns: IndexPatterns - max_outstanding_read_requests: integer -} - -export interface AutoFollowPatternItem { - name: Name - pattern: AutoFollowPattern -} - -export interface AutoFollowedCluster { - cluster_name: Name - last_seen_metadata_version: VersionNumber - time_since_last_check_millis: DateString -} - -export interface AverageAggregation extends FormatMetricAggregationBase { -} - -export interface AverageBucketAggregation extends PipelineAggregationBase { -} - -export interface BaseUrlConfig { - url_name: string - url_value: string -} - -export interface BinaryProperty extends DocValuesPropertyBase { - type: 'binary' + source?: string } -export interface BoolQuery extends QueryBase { - filter?: QueryContainer | Array - minimum_should_match?: MinimumShouldMatch - must?: QueryContainer | Array - must_not?: QueryContainer | Array - should?: QueryContainer | Array +export interface MtermvectorsOperation { + doc: object + fields: Fields + field_statistics: boolean + filter: TermvectorsFilter + _id: Id + _index: IndexName + offsets: boolean + payloads: boolean + positions: boolean + routing: Routing + term_statistics: boolean + version: VersionNumber + version_type: VersionType } -export interface BooleanProperty extends DocValuesPropertyBase { - boost?: double - fielddata?: NumericFielddata - index?: boolean - null_value?: boolean - type: 'boolean' +export interface MtermvectorsRequest extends RequestBase { + index?: IndexName + type?: Type + fields?: Fields + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: Routing + term_statistics?: boolean + version?: VersionNumber + version_type?: VersionType + body?: { + docs?: MtermvectorsOperation[] + ids?: Id[] + } } -export interface BoostingQuery extends QueryBase { - negative_boost?: double - negative?: QueryContainer - positive?: QueryContainer +export interface MtermvectorsResponse { + docs: MtermvectorsTermVectorsResult[] } -export type BoundaryScanner = 'chars' | 'sentence' | 'word' - -export interface BoundingBox { - bottom_right?: GeoLocation - top_left?: GeoLocation - wkt?: string +export interface MtermvectorsTermVectorsResult { + found: boolean + id: Id + index: IndexName + term_vectors: Record + took: long + version: VersionNumber } -export interface BoxPlotAggregate extends AggregateBase { - min: double - max: double - q1: double - q2: double - q3: double +export interface OpenPointInTimeRequest extends RequestBase { + index: Indices + keep_alive?: Time } -export interface BoxplotAggregation extends MetricAggregationBase { - compression?: double +export interface OpenPointInTimeResponse { + id: Id } -export interface BreakerStats { - estimated_size: string - estimated_size_in_bytes: long - limit_size: string - limit_size_in_bytes: long - overhead: float - tripped: float +export interface PingRequest extends RequestBase { } -export type Bucket = CompositeBucket | DateHistogramBucket | FiltersBucketItem | IpRangeBucket | RangeBucket | RareTermsBucket | SignificantTermsBucket | KeyedBucket +export type PingResponse = boolean -export interface BucketAggregate extends AggregateBase { - after_key: Record - bg_count: long - doc_count: long - doc_count_error_upper_bound: long - sum_other_doc_count: long - interval: DateMathTime - items: Bucket +export interface PutScriptRequest extends RequestBase { + id: Id + context?: Name + master_timeout?: Time + timeout?: Time + body?: { + script?: StoredScript + } } -export interface BucketAggregationBase extends Aggregation { - aggregations?: Record +export interface PutScriptResponse extends AcknowledgedResponseBase { } -export interface BucketInfluencer { - bucket_span: long - influencer_field_name: string - influencer_field_value: string - influencer_score: double - initial_influencer_score: double - is_interim: boolean - job_id: Id - probability: double - result_type: string - timestamp: DateString +export interface RankEvalDocumentRating { + _id: Id + _index: IndexName + rating: integer } -export interface BucketScriptAggregation extends PipelineAggregationBase { - script?: Script +export interface RankEvalRankEvalHit { + _id: Id + _index: IndexName + _type?: Type + _score: double } -export interface BucketSelectorAggregation extends PipelineAggregationBase { - script?: Script +export interface RankEvalRankEvalHitItem { + hit: RankEvalRankEvalHit + rating?: double } -export interface BucketSortAggregation extends Aggregation { - from?: integer - gap_policy?: GapPolicy - size?: integer - sort?: Sort +export interface RankEvalRankEvalMetric { + precision?: RankEvalRankEvalMetricPrecision + recall?: RankEvalRankEvalMetricRecall + mean_reciprocal_rank?: RankEvalRankEvalMetricMeanReciprocalRank + dcg?: RankEvalRankEvalMetricDiscountedCumulativeGain + expected_reciprocal_rank?: RankEvalRankEvalMetricExpectedReciprocalRank } -export interface BucketsPath { +export interface RankEvalRankEvalMetricBase { + k?: integer } -export interface BulkAliasRequest extends RequestBase { - master_timeout?: Time - timeout?: Time - body: { - actions?: Array - } +export interface RankEvalRankEvalMetricDetail { + metric_score: double + unrated_docs: RankEvalUnratedDocument[] + hits: RankEvalRankEvalHitItem[] + metric_details: Record> } -export interface BulkAliasResponse extends AcknowledgedResponseBase { +export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { + normalize?: boolean } -export interface BulkCreateOperation extends BulkOperation { +export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { + maximum_relevance: integer } -export interface BulkCreateResponseItem extends BulkResponseItemBase { +export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEvalMetricRatingTreshold { } -export interface BulkDeleteOperation extends BulkOperation { +export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { + ignore_unlabeled?: boolean } -export interface BulkDeleteResponseItem extends BulkResponseItemBase { +export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { + relevant_rating_threshold?: integer } -export interface BulkIndexByScrollFailure { - cause: MainError - id: string - index: string - status: integer - type: string +export interface RankEvalRankEvalMetricRecall extends RankEvalRankEvalMetricRatingTreshold { } -export interface BulkIndexOperation extends BulkOperation { +export interface RankEvalRankEvalQuery { + query: QueryDslQueryContainer + size?: integer } -export interface BulkIndexResponseItem extends BulkResponseItemBase { +export interface RankEvalRankEvalRequestItem { + id: Id + request?: RankEvalRankEvalQuery + ratings: RankEvalDocumentRating[] + template_id?: Id + params?: Record } -export interface BulkMonitoringRequest extends RequestBase { - stub_a: string - stub_b: string - body: { - stub_c: string +export interface RankEvalRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + search_type?: string + body?: { + requests: RankEvalRankEvalRequestItem[] + metric?: RankEvalRankEvalMetric } } -export interface BulkMonitoringResponse extends ResponseBase { - stub: integer +export interface RankEvalResponse { + metric_score: double + details: Record + failures: Record } -export interface BulkOperation { +export interface RankEvalUnratedDocument { _id: Id _index: IndexName - retry_on_conflict: integer - routing: Routing - version: VersionNumber - version_type: VersionType } -export interface BulkOperationContainer { - index?: BulkIndexOperation - create?: BulkCreateOperation - update?: BulkUpdateOperation - delete?: BulkDeleteOperation +export interface ReindexDestination { + index: IndexName + op_type?: OpType + pipeline?: string + routing?: Routing + version_type?: VersionType } -export interface BulkRequest extends RequestBase { - index?: IndexName - type?: Type - pipeline?: string - refresh?: Refresh - routing?: Routing - _source?: boolean - _source_excludes?: Fields - _source_includes?: Fields +export interface ReindexRemoteSource { + connect_timeout: Time + host: Host + username: Username + password: Password + socket_timeout: Time +} + +export interface ReindexRequest extends RequestBase { + refresh?: boolean + requests_per_second?: long + scroll?: Time + slices?: long timeout?: Time - type_query_string?: string wait_for_active_shards?: WaitForActiveShards + wait_for_completion?: boolean require_alias?: boolean - body: Array + body?: { + conflicts?: Conflicts + dest?: ReindexDestination + max_docs?: long + script?: Script + size?: long + source?: ReindexSource + } } -export interface BulkResponse extends ResponseBase { - errors: boolean - items: Array - took: long - ingest_took?: long +export interface ReindexResponse { + batches?: long + created?: long + deleted?: long + failures?: BulkIndexByScrollFailure[] + noops?: long + retries?: Retries + requests_per_second?: long + slice_id?: integer + task?: TaskId + throttled_millis?: EpochMillis + throttled_until_millis?: EpochMillis + timed_out?: boolean + took?: Time + total?: long + updated?: long + version_conflicts?: long } -export interface BulkResponseItemBase { - _id?: string | null - _index: string - status: integer - error?: ErrorCause - _primary_term?: long - result?: string - _seq_no?: SequenceNumber - _shards?: ShardStatistics - _type?: string - _version?: VersionNumber - forced_refresh?: boolean - get?: InlineGet> +export interface ReindexSource { + index: Indices + query?: QueryDslQueryContainer + remote?: ReindexRemoteSource + size?: integer + slice?: SlicedScroll + sort?: SearchTypesSort + _source?: Fields } -export interface BulkResponseItemContainer { - index?: BulkIndexResponseItem - create?: BulkCreateResponseItem - update?: BulkUpdateResponseItem - delete?: BulkDeleteResponseItem +export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { + tasks: Record } -export interface BulkUpdateOperation extends BulkOperation { +export interface ReindexRethrottleReindexStatus { + batches: long + created: long + deleted: long + noops: long + requests_per_second: float + retries: Retries + throttled_millis: long + throttled_until_millis: long + total: long + updated: long + version_conflicts: long } -export interface BulkUpdateResponseItem extends BulkResponseItemBase { +export interface ReindexRethrottleReindexTask { + action: string + cancellable: boolean + description: string + id: long + node: Name + running_time_in_nanos: long + start_time_in_millis: long + status: ReindexRethrottleReindexStatus + type: string + headers: HttpHeaders } -export type ByteSize = long | string +export interface ReindexRethrottleRequest extends RequestBase { + task_id: Id + requests_per_second?: long +} -export type Bytes = 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb' +export interface ReindexRethrottleResponse { + nodes: Record +} -export interface BytesProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field +export interface RenderSearchTemplateRequest extends RequestBase { + body?: { + file?: string + params?: Record + source?: string + } } -export interface CPUStats { - percent: integer - sys: string - sys_in_millis: long - total: string - total_in_millis: long - user: string - user_in_millis: long +export interface RenderSearchTemplateResponse { + template_output: Record } -export interface Calendar { - calendar_id: string - description: string - job_ids: Array +export interface ScriptsPainlessExecutePainlessContextSetup { + document: any + index: IndexName + query: QueryDslQueryContainer } -export interface CancelTasksRequest extends RequestBase { - task_id?: TaskId - actions?: string | Array - nodes?: Array - parent_task_id?: string +export interface ScriptsPainlessExecutePainlessExecutionPosition { + offset: integer + start: integer + end: integer } -export interface CancelTasksResponse extends ResponseBase { - node_failures?: Array - nodes: Record +export interface ScriptsPainlessExecuteRequest extends RequestBase { + body?: { + context?: string + context_setup?: ScriptsPainlessExecutePainlessContextSetup + script?: InlineScript + } } -export interface CardinalityAggregation extends MetricAggregationBase { - precision_threshold?: integer - rehash?: boolean +export interface ScriptsPainlessExecuteResponse { + result: TResult } -export interface CatAliasesRecord { - alias?: string - a?: string - index?: IndexName - i?: IndexName - idx?: IndexName - filter?: string - f?: string - fi?: string - 'routing.index'?: string - ri?: string - routingIndex?: string - 'routing.search'?: string - rs?: string - routingSearch?: string - is_write_index?: string - w?: string - isWriteIndex?: string +export interface ScrollRequest extends RequestBase { + scroll_id?: Id + scroll?: Time + rest_total_hits_as_int?: boolean + total_hits_as_integer?: boolean + body?: { + scroll?: Time + scroll_id: ScrollId + rest_total_hits_as_int?: boolean + } } -export interface CatAliasesRequest extends CatRequestBase { - name?: Names +export interface ScrollResponse extends SearchResponse { +} + +export interface SearchRequest extends RequestBase { + index?: Indices + type?: Types + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: long + ccs_minimize_roundtrips?: boolean + default_operator?: DefaultOperator + df?: string + docvalue_fields?: Fields expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: long + min_compatible_shard_node?: VersionString + preference?: string + pre_filter_shard_size?: long + request_cache?: boolean + routing?: Routing + scroll?: Time + search_type?: SearchType + stats?: string[] + stored_fields?: Fields + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + suggest_text?: string + terminate_after?: long + timeout?: Time + track_total_hits?: boolean | integer + track_scores?: boolean + typed_keys?: boolean + rest_total_hits_as_int?: boolean + version?: boolean + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + seq_no_primary_term?: boolean + q?: string + size?: integer + from?: integer + sort?: string | string[] + body?: { + aggs?: Record + aggregations?: Record + collapse?: SearchTypesFieldCollapse + explain?: boolean + from?: integer + highlight?: SearchTypesHighlight + track_total_hits?: boolean | integer + indices_boost?: Record[] + docvalue_fields?: SearchTypesDocValueField | (Field | SearchTypesDocValueField)[] + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + query?: QueryDslQueryContainer + rescore?: SearchTypesRescore | SearchTypesRescore[] + script_fields?: Record + search_after?: (integer | string)[] + size?: integer + slice?: SlicedScroll + sort?: SearchTypesSort + _source?: boolean | Fields | SearchTypesSourceFilter + fields?: (Field | DateField)[] + suggest?: SearchTypesSuggestContainer | Record + terminate_after?: long + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: Fields + pit?: SearchTypesPointInTimeReference + runtime_mappings?: MappingRuntimeFields + stats?: string[] + } } -export type CatAliasesResponse = CatAliasesRecord[] +export interface SearchResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchTypesHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + documents?: TDocument[] + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchTypesProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean +} -export interface CatAllocationRecord { - shards?: string - s?: string - 'disk.indices'?: ByteSize - di?: ByteSize - diskIndices?: ByteSize - 'disk.used'?: ByteSize - du?: ByteSize - diskUsed?: ByteSize - 'disk.avail'?: ByteSize - da?: ByteSize - diskAvail?: ByteSize - 'disk.total'?: ByteSize - dt?: ByteSize - diskTotal?: ByteSize - 'disk.percent'?: Percentage - dp?: Percentage - diskPercent?: Percentage - host?: string - h?: string - ip?: string - node?: string - n?: string +export interface SearchTypesAggregationBreakdown { + build_aggregation: long + build_aggregation_count: long + build_leaf_collector: long + build_leaf_collector_count: long + collect: long + collect_count: long + initialize: long + initialize_count: long + post_collection?: long + post_collection_count?: long + reduce: long + reduce_count: long } -export interface CatAllocationRequest extends CatRequestBase { - node_id?: NodeIds - bytes?: Bytes +export interface SearchTypesAggregationProfile { + breakdown: SearchTypesAggregationBreakdown + description: string + time_in_nanos: long + type: string + debug?: SearchTypesAggregationProfileDebug + children?: SearchTypesAggregationProfileDebug[] } -export type CatAllocationResponse = CatAllocationRecord[] +export interface SearchTypesAggregationProfileDebug { +} -export interface CatCountRecord { - epoch?: EpochMillis - t?: EpochMillis - time?: EpochMillis - timestamp?: DateString - ts?: DateString - hms?: DateString - hhmmss?: DateString - count?: string - dc?: string - 'docs.count'?: string - docsCount?: string +export type SearchTypesBoundaryScanner = 'chars' | 'sentence' | 'word' + +export interface SearchTypesCollector { + name: string + reason: string + time_in_nanos: long + children?: SearchTypesCollector[] } -export interface CatCountRequest extends CatRequestBase { - index?: Indices +export interface SearchTypesCompletionSuggestOption { + collate_match?: boolean + contexts?: Record + fields?: Record + _id: string + _index: IndexName + _type?: Type + _routing?: Routing + _score: double + _source: TDocument + text: string } -export type CatCountResponse = CatCountRecord[] +export interface SearchTypesCompletionSuggester extends SearchTypesSuggesterBase { + contexts?: Record + fuzzy?: SearchTypesSuggestFuzziness + prefix?: string + regex?: string + skip_duplicates?: boolean +} -export interface CatDataFrameAnalyticsRecord { - id?: Id - type?: Type - t?: Type - create_time?: string - ct?: string - createTime?: string - version?: VersionString - v?: VersionString - source_index?: IndexName - si?: IndexName - sourceIndex?: IndexName - dest_index?: IndexName - di?: IndexName - destIndex?: IndexName - description?: string - d?: string - model_memory_limit?: string - mml?: string - modelMemoryLimit?: string - state?: string - s?: string - failure_reason?: string - fr?: string - failureReason?: string - progress?: string - p?: string - assignment_explanation?: string - ae?: string - assignmentExplanation?: string - 'node.id'?: Id - ni?: Id - nodeId?: Id - 'node.name'?: Name - nn?: Name - nodeName?: Name - 'node.ephemeral_id'?: Id - ne?: Id - nodeEphemeralId?: Id - 'node.address'?: string - na?: string - nodeAddress?: string +export type SearchTypesContext = string | QueryDslGeoLocation + +export interface SearchTypesDirectGenerator { + field: Field + max_edits?: integer + max_inspections?: float + max_term_freq?: float + min_doc_freq?: float + min_word_length?: integer + post_filter?: string + pre_filter?: string + prefix_length?: integer + size?: integer + suggest_mode?: SuggestMode } -export interface CatDataFrameAnalyticsRequest extends CatRequestBase { - id?: Id - allow_no_match?: boolean - bytes?: Bytes +export interface SearchTypesDocValueField { + field: Field + format?: string } -export type CatDataFrameAnalyticsResponse = CatDataFrameAnalyticsRecord[] +export interface SearchTypesFieldCollapse { + field: Field + inner_hits?: SearchTypesInnerHits | SearchTypesInnerHits[] + max_concurrent_group_searches?: integer +} -export interface CatDatafeedsRecord { - id?: string - state?: DatafeedState - s?: DatafeedState - assignment_explanation?: string - ae?: string - 'buckets.count'?: string - bc?: string - bucketsCount?: string - 'search.count'?: string - sc?: string - searchCount?: string - 'search.time'?: string - st?: string - searchTime?: string - 'search.bucket_avg'?: string - sba?: string - searchBucketAvg?: string - 'search.exp_avg_hour'?: string - seah?: string - searchExpAvgHour?: string - 'node.id'?: string - ni?: string - nodeId?: string - 'node.name'?: string - nn?: string - nodeName?: string - 'node.ephemeral_id'?: string - ne?: string - nodeEphemeralId?: string - 'node.address'?: string - na?: string - nodeAddress?: string +export interface SearchTypesFieldSort { + missing?: AggregationsMissing + mode?: SearchTypesSortMode + nested?: SearchTypesNestedSortValue + order?: SearchTypesSortOrder + unmapped_type?: MappingFieldType } -export interface CatDatafeedsRequest extends CatRequestBase { - datafeed_id?: Id - allow_no_datafeeds?: boolean +export interface SearchTypesGeoDistanceSortKeys { + mode?: SearchTypesSortMode + distance_type?: GeoDistanceType + order?: SearchTypesSortOrder + unit?: DistanceUnit +} +export type SearchTypesGeoDistanceSort = SearchTypesGeoDistanceSortKeys | + { [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] } + +export interface SearchTypesHighlight { + fields: Record + type?: SearchTypesHighlighterType + boundary_chars?: string + boundary_max_scan?: integer + boundary_scanner?: SearchTypesBoundaryScanner + boundary_scanner_locale?: string + encoder?: SearchTypesHighlighterEncoder + fragmenter?: SearchTypesHighlighterFragmenter + fragment_offset?: integer + fragment_size?: integer + max_fragment_length?: integer + no_match_size?: integer + number_of_fragments?: integer + order?: SearchTypesHighlighterOrder + post_tags?: string[] + pre_tags?: string[] + require_field_match?: boolean + tags_schema?: SearchTypesHighlighterTagsSchema + highlight_query?: QueryDslQueryContainer + max_analyzed_offset?: string | integer +} + +export interface SearchTypesHighlightField { + boundary_chars?: string + boundary_max_scan?: integer + boundary_scanner?: SearchTypesBoundaryScanner + boundary_scanner_locale?: string + field?: Field + force_source?: boolean + fragmenter?: SearchTypesHighlighterFragmenter + fragment_offset?: integer + fragment_size?: integer + highlight_query?: QueryDslQueryContainer + matched_fields?: Fields + max_fragment_length?: integer + no_match_size?: integer + number_of_fragments?: integer + order?: SearchTypesHighlighterOrder + phrase_limit?: integer + post_tags?: string[] + pre_tags?: string[] + require_field_match?: boolean + tags_schema?: SearchTypesHighlighterTagsSchema + type?: SearchTypesHighlighterType | string } -export type CatDatafeedsResponse = CatDatafeedsRecord[] +export type SearchTypesHighlighterEncoder = 'default' | 'html' -export interface CatFielddataRecord { - id?: string - host?: string - h?: string - ip?: string - node?: string - n?: string - field?: string - f?: string - size?: string +export type SearchTypesHighlighterFragmenter = 'simple' | 'span' + +export type SearchTypesHighlighterOrder = 'score' + +export type SearchTypesHighlighterTagsSchema = 'styled' + +export type SearchTypesHighlighterType = 'plain' | 'fvh' | 'unified' + +export interface SearchTypesHit { + _index: IndexName + _id: Id + _score?: double + _type?: Type + _explanation?: ExplainExplanation + fields?: Record + highlight?: Record + inner_hits?: Record + matched_queries?: string[] + _nested?: SearchTypesNestedIdentity + _ignored?: string[] + _shard?: string + _node?: string + _routing?: string + _source?: TDocument + _seq_no?: SequenceNumber + _primary_term?: long + _version?: VersionNumber + sort?: SearchTypesSortResults +} + +export interface SearchTypesHitsMetadata { + total: SearchTypesTotalHits | long + hits: SearchTypesHit[] + max_score?: double } -export interface CatFielddataRequest extends CatRequestBase { +export interface SearchTypesInnerHits { + name?: Name + size?: integer + from?: integer + collapse?: SearchTypesFieldCollapse + docvalue_fields?: Fields + explain?: boolean + highlight?: SearchTypesHighlight + ignore_unmapped?: boolean + script_fields?: Record + seq_no_primary_term?: boolean fields?: Fields - bytes?: Bytes + sort?: SearchTypesSort + _source?: boolean | SearchTypesSourceFilter + version?: boolean } -export type CatFielddataResponse = CatFielddataRecord[] +export interface SearchTypesInnerHitsMetadata { + total: SearchTypesTotalHits | long + hits: SearchTypesHit>[] + max_score?: double +} -export interface CatHealthRecord { - epoch?: EpochMillis - time?: EpochMillis - timestamp?: DateString - ts?: DateString - hms?: DateString - hhmmss?: DateString - cluster?: string - cl?: string - status?: string - st?: string - 'node.total'?: string - nt?: string - nodeTotal?: string - 'node.data'?: string - nd?: string - nodeData?: string - shards?: string - t?: string - sh?: string - 'shards.total'?: string - shardsTotal?: string - pri?: string - p?: string - 'shards.primary'?: string - shardsPrimary?: string - relo?: string - r?: string - 'shards.relocating'?: string - shardsRelocating?: string - init?: string - i?: string - 'shards.initializing'?: string - shardsInitializing?: string - unassign?: string - u?: string - 'shards.unassigned'?: string - shardsUnassigned?: string - pending_tasks?: string - pt?: string - pendingTasks?: string - max_task_wait_time?: string - mtwt?: string - maxTaskWaitTime?: string - active_shards_percent?: string - asp?: string - activeShardsPercent?: string +export interface SearchTypesInnerHitsResult { + hits: SearchTypesInnerHitsMetadata } -export interface CatHealthRequest extends CatRequestBase { - include_timestamp?: boolean - ts?: boolean +export interface SearchTypesLaplaceSmoothingModel { + alpha: double } -export type CatHealthResponse = CatHealthRecord[] +export interface SearchTypesLinearInterpolationSmoothingModel { + bigram_lambda: double + trigram_lambda: double + unigram_lambda: double +} -export interface CatHelpRecord { - endpoint: string +export interface SearchTypesNestedIdentity { + field: Field + offset: integer + _nested?: SearchTypesNestedIdentity } -export interface CatHelpRequest extends CatRequestBase { +export interface SearchTypesNestedSortValue { + filter?: QueryDslQueryContainer + max_children?: integer + path: Field } -export type CatHelpResponse = CatHelpRecord[] +export interface SearchTypesPhraseSuggestCollate { + params?: Record + prune?: boolean + query: SearchTypesPhraseSuggestCollateQuery +} -export interface CatIndicesRecord { - health?: string - h?: string - status?: string - s?: string - index?: string - i?: string - idx?: string - uuid?: string - id?: string - pri?: string - p?: string - 'shards.primary'?: string - shardsPrimary?: string - rep?: string - r?: string - 'shards.replica'?: string - shardsReplica?: string - 'docs.count'?: string - dc?: string - docsCount?: string - 'docs.deleted'?: string - dd?: string - docsDeleted?: string - 'creation.date'?: string - cd?: string - 'creation.date.string'?: string - cds?: string - 'store.size'?: string - ss?: string - storeSize?: string - 'pri.store.size'?: string - 'completion.size'?: string - cs?: string - completionSize?: string - 'pri.completion.size'?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'pri.fielddata.memory_size'?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'pri.fielddata.evictions'?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'pri.query_cache.memory_size'?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'pri.query_cache.evictions'?: string - 'request_cache.memory_size'?: string - rcm?: string - requestCacheMemory?: string - 'pri.request_cache.memory_size'?: string - 'request_cache.evictions'?: string - rce?: string - requestCacheEvictions?: string - 'pri.request_cache.evictions'?: string - 'request_cache.hit_count'?: string - rchc?: string - requestCacheHitCount?: string - 'pri.request_cache.hit_count'?: string - 'request_cache.miss_count'?: string - rcmc?: string - requestCacheMissCount?: string - 'pri.request_cache.miss_count'?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'pri.flush.total'?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'pri.flush.total_time'?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'pri.get.current'?: string - 'get.time'?: string - gti?: string - getTime?: string - 'pri.get.time'?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'pri.get.total'?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'pri.get.exists_time'?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'pri.get.exists_total'?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'pri.get.missing_time'?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'pri.get.missing_total'?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'pri.indexing.delete_current'?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'pri.indexing.delete_time'?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'pri.indexing.delete_total'?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'pri.indexing.index_current'?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'pri.indexing.index_time'?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'pri.indexing.index_total'?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'pri.indexing.index_failed'?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'pri.merges.current'?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'pri.merges.current_docs'?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'pri.merges.current_size'?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'pri.merges.total'?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'pri.merges.total_docs'?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'pri.merges.total_size'?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'pri.merges.total_time'?: string - 'refresh.total'?: string - rto?: string - refreshTotal?: string - 'pri.refresh.total'?: string - 'refresh.time'?: string - rti?: string - refreshTime?: string - 'pri.refresh.time'?: string - 'refresh.external_total'?: string - reto?: string - 'pri.refresh.external_total'?: string - 'refresh.external_time'?: string - reti?: string - 'pri.refresh.external_time'?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'pri.refresh.listeners'?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'pri.search.fetch_current'?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'pri.search.fetch_time'?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'pri.search.fetch_total'?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'pri.search.open_contexts'?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'pri.search.query_current'?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'pri.search.query_time'?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'pri.search.query_total'?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'pri.search.scroll_current'?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'pri.search.scroll_time'?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'pri.search.scroll_total'?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'pri.segments.count'?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'pri.segments.memory'?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'pri.segments.index_writer_memory'?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'pri.segments.version_map_memory'?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'pri.segments.fixed_bitset_memory'?: string - 'warmer.current'?: string - wc?: string - warmerCurrent?: string - 'pri.warmer.current'?: string - 'warmer.total'?: string - wto?: string - warmerTotal?: string - 'pri.warmer.total'?: string - 'warmer.total_time'?: string - wtt?: string - warmerTotalTime?: string - 'pri.warmer.total_time'?: string - 'suggest.current'?: string - suc?: string - suggestCurrent?: string - 'pri.suggest.current'?: string - 'suggest.time'?: string - suti?: string - suggestTime?: string - 'pri.suggest.time'?: string - 'suggest.total'?: string - suto?: string - suggestTotal?: string - 'pri.suggest.total'?: string - 'memory.total'?: string - tm?: string - memoryTotal?: string - 'pri.memory.total'?: string - 'search.throttled'?: string - sth?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperation?: string - 'pri.bulk.total_operations'?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'pri.bulk.total_time'?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'pri.bulk.total_size_in_bytes'?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'pri.bulk.avg_time'?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string - 'pri.bulk.avg_size_in_bytes'?: string -} - -export interface CatIndicesRequest extends CatRequestBase { - index?: Indices - bytes?: Bytes - expand_wildcards?: ExpandWildcards - health?: Health - include_unloaded_segments?: boolean - pri?: boolean -} - -export type CatIndicesResponse = CatIndicesRecord[] - -export interface CatJobsRecord { - id?: Id - state?: JobState - s?: JobState - opened_time?: string - ot?: string - assignment_explanation?: string - ae?: string - 'data.processed_records'?: string - dpr?: string - dataProcessedRecords?: string - 'data.processed_fields'?: string - dpf?: string - dataProcessedFields?: string - 'data.input_bytes'?: ByteSize - dib?: ByteSize - dataInputBytes?: ByteSize - 'data.input_records'?: string - dir?: string - dataInputRecords?: string - 'data.input_fields'?: string - dif?: string - dataInputFields?: string - 'data.invalid_dates'?: string - did?: string - dataInvalidDates?: string - 'data.missing_fields'?: string - dmf?: string - dataMissingFields?: string - 'data.out_of_order_timestamps'?: string - doot?: string - dataOutOfOrderTimestamps?: string - 'data.empty_buckets'?: string - deb?: string - dataEmptyBuckets?: string - 'data.sparse_buckets'?: string - dsb?: string - dataSparseBuckets?: string - 'data.buckets'?: string - db?: string - dataBuckets?: string - 'data.earliest_record'?: string - der?: string - dataEarliestRecord?: string - 'data.latest_record'?: string - dlr?: string - dataLatestRecord?: string - 'data.last'?: string - dl?: string - dataLast?: string - 'data.last_empty_bucket'?: string - dleb?: string - dataLastEmptyBucket?: string - 'data.last_sparse_bucket'?: string - dlsb?: string - dataLastSparseBucket?: string - 'model.bytes'?: ByteSize - mb?: ByteSize - modelBytes?: ByteSize - 'model.memory_status'?: ModelMemoryStatus - mms?: ModelMemoryStatus - modelMemoryStatus?: ModelMemoryStatus - 'model.bytes_exceeded'?: ByteSize - mbe?: ByteSize - modelBytesExceeded?: ByteSize - 'model.memory_limit'?: string - mml?: string - modelMemoryLimit?: string - 'model.by_fields'?: string - mbf?: string - modelByFields?: string - 'model.over_fields'?: string - mof?: string - modelOverFields?: string - 'model.partition_fields'?: string - mpf?: string - modelPartitionFields?: string - 'model.bucket_allocation_failures'?: string - mbaf?: string - modelBucketAllocationFailures?: string - 'model.categorization_status'?: ModelCategorizationStatus - mcs?: ModelCategorizationStatus - modelCategorizationStatus?: ModelCategorizationStatus - 'model.categorized_doc_count'?: string - mcdc?: string - modelCategorizedDocCount?: string - 'model.total_category_count'?: string - mtcc?: string - modelTotalCategoryCount?: string - 'model.frequent_category_count'?: string - modelFrequentCategoryCount?: string - 'model.rare_category_count'?: string - mrcc?: string - modelRareCategoryCount?: string - 'model.dead_category_count'?: string - mdcc?: string - modelDeadCategoryCount?: string - 'model.failed_category_count'?: string - mfcc?: string - modelFailedCategoryCount?: string - 'model.log_time'?: string - mlt?: string - modelLogTime?: string - 'model.timestamp'?: string - mt?: string - modelTimestamp?: string - 'forecasts.total'?: string - ft?: string - forecastsTotal?: string - 'forecasts.memory.min'?: string - fmmin?: string - forecastsMemoryMin?: string - 'forecasts.memory.max'?: string - fmmax?: string - forecastsMemoryMax?: string - 'forecasts.memory.avg'?: string - fmavg?: string - forecastsMemoryAvg?: string - 'forecasts.memory.total'?: string - fmt?: string - forecastsMemoryTotal?: string - 'forecasts.records.min'?: string - frmin?: string - forecastsRecordsMin?: string - 'forecasts.records.max'?: string - frmax?: string - forecastsRecordsMax?: string - 'forecasts.records.avg'?: string - fravg?: string - forecastsRecordsAvg?: string - 'forecasts.records.total'?: string - frt?: string - forecastsRecordsTotal?: string - 'forecasts.time.min'?: string - ftmin?: string - forecastsTimeMin?: string - 'forecasts.time.max'?: string - ftmax?: string - forecastsTimeMax?: string - 'forecasts.time.avg'?: string - ftavg?: string - forecastsTimeAvg?: string - 'forecasts.time.total'?: string - ftt?: string - forecastsTimeTotal?: string - 'node.id'?: NodeId - ni?: NodeId - nodeId?: NodeId - 'node.name'?: string - nn?: string - nodeName?: string - 'node.ephemeral_id'?: NodeId - ne?: NodeId - nodeEphemeralId?: NodeId - 'node.address'?: string - na?: string - nodeAddress?: string - 'buckets.count'?: string - bc?: string - bucketsCount?: string - 'buckets.time.total'?: string - btt?: string - bucketsTimeTotal?: string - 'buckets.time.min'?: string - btmin?: string - bucketsTimeMin?: string - 'buckets.time.max'?: string - btmax?: string - bucketsTimeMax?: string - 'buckets.time.exp_avg'?: string - btea?: string - bucketsTimeExpAvg?: string - 'buckets.time.exp_avg_hour'?: string - bteah?: string - bucketsTimeExpAvgHour?: string -} - -export interface CatJobsRequest extends CatRequestBase { - job_id?: Id - allow_no_jobs?: boolean - bytes?: Bytes -} - -export type CatJobsResponse = CatJobsRecord[] - -export interface CatMasterRecord { - id?: string - host?: string - h?: string - ip?: string - node?: string - n?: string -} - -export interface CatMasterRequest extends CatRequestBase { -} - -export type CatMasterResponse = CatMasterRecord[] - -export interface CatNodeAttributesRecord { - node?: string - id?: string - pid?: string - host?: string - h?: string - ip?: string - i?: string - port?: string - attr?: string - value?: string -} - -export interface CatNodeAttributesRequest extends CatRequestBase { -} - -export type CatNodeAttributesResponse = CatNodeAttributesRecord[] - -export interface CatNodesRecord { - id?: Id - nodeId?: Id - pid?: string - p?: string - ip?: string - i?: string - port?: string - po?: string - http_address?: string - http?: string - version?: VersionString - v?: VersionString - flavor?: string - f?: string - type?: Type - t?: Type - build?: string - b?: string - jdk?: string - j?: string - 'disk.total'?: ByteSize - dt?: ByteSize - diskTotal?: ByteSize - 'disk.used'?: ByteSize - du?: ByteSize - diskUsed?: ByteSize - 'disk.avail'?: ByteSize - d?: ByteSize - da?: ByteSize - disk?: ByteSize - diskAvail?: ByteSize - 'disk.used_percent'?: Percentage - dup?: Percentage - diskUsedPercent?: Percentage - 'heap.current'?: string - hc?: string - heapCurrent?: string - 'heap.percent'?: Percentage - hp?: Percentage - heapPercent?: Percentage - 'heap.max'?: string - hm?: string - heapMax?: string - 'ram.current'?: string - rc?: string - ramCurrent?: string - 'ram.percent'?: Percentage - rp?: Percentage - ramPercent?: Percentage - 'ram.max'?: string - rn?: string - ramMax?: string - 'file_desc.current'?: string - fdc?: string - fileDescriptorCurrent?: string - 'file_desc.percent'?: Percentage - fdp?: Percentage - fileDescriptorPercent?: Percentage - 'file_desc.max'?: string - fdm?: string - fileDescriptorMax?: string - cpu?: string - load_1m?: string - load_5m?: string - load_15m?: string - l?: string - uptime?: string - u?: string - 'node.role'?: string - r?: string - role?: string - nodeRole?: string - master?: string - m?: string - name?: Name - n?: Name - 'completion.size'?: string - cs?: string - completionSize?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'query_cache.hit_count'?: string - qchc?: string - queryCacheHitCount?: string - 'query_cache.miss_count'?: string - qcmc?: string - queryCacheMissCount?: string - 'request_cache.memory_size'?: string - rcm?: string - requestCacheMemory?: string - 'request_cache.evictions'?: string - rce?: string - requestCacheEvictions?: string - 'request_cache.hit_count'?: string - rchc?: string - requestCacheHitCount?: string - 'request_cache.miss_count'?: string - rcmc?: string - requestCacheMissCount?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'get.time'?: string - gti?: string - getTime?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'refresh.total'?: string - 'refresh.time'?: string - 'refresh.external_total'?: string - rto?: string - refreshTotal?: string - 'refresh.external_time'?: string - rti?: string - refreshTime?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'script.compilations'?: string - scrcc?: string - scriptCompilations?: string - 'script.cache_evictions'?: string - scrce?: string - scriptCacheEvictions?: string - 'script.compilation_limit_triggered'?: string - scrclt?: string - scriptCacheCompilationLimitTriggered?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'suggest.current'?: string - suc?: string - suggestCurrent?: string - 'suggest.time'?: string - suti?: string - suggestTime?: string - 'suggest.total'?: string - suto?: string - suggestTotal?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperations?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string +export interface SearchTypesPhraseSuggestCollateQuery { + id?: Id + source?: string +} + +export interface SearchTypesPhraseSuggestHighlight { + post_tag: string + pre_tag: string +} + +export interface SearchTypesPhraseSuggestOption { + text: string + highlighted: string + score: double +} + +export interface SearchTypesPhraseSuggester extends SearchTypesSuggesterBase { + collate?: SearchTypesPhraseSuggestCollate + confidence?: double + direct_generator?: SearchTypesDirectGenerator[] + force_unigrams?: boolean + gram_size?: integer + highlight?: SearchTypesPhraseSuggestHighlight + max_errors?: double + real_word_error_likelihood?: double + separator?: string + shard_size?: integer + smoothing?: SearchTypesSmoothingModelContainer + text?: string + token_limit?: integer +} + +export interface SearchTypesPointInTimeReference { + id: Id + keep_alive?: Time +} + +export interface SearchTypesProfile { + shards: SearchTypesShardProfile[] +} + +export interface SearchTypesQueryBreakdown { + advance: long + advance_count: long + build_scorer: long + build_scorer_count: long + create_weight: long + create_weight_count: long + match: long + match_count: long + shallow_advance: long + shallow_advance_count: long + next_doc: long + next_doc_count: long + score: long + score_count: long + compute_max_score: long + compute_max_score_count: long + set_min_competitive_score: long + set_min_competitive_score_count: long +} + +export interface SearchTypesQueryProfile { + breakdown: SearchTypesQueryBreakdown + description: string + time_in_nanos: long + type: string + children?: SearchTypesQueryProfile[] +} + +export interface SearchTypesRescore { + query: SearchTypesRescoreQuery + window_size?: integer +} + +export interface SearchTypesRescoreQuery { + rescore_query: QueryDslQueryContainer + query_weight?: double + rescore_query_weight?: double + score_mode?: SearchTypesScoreMode +} + +export type SearchTypesScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' + +export interface SearchTypesScoreSort { + mode?: SearchTypesSortMode + order?: SearchTypesSortOrder +} + +export interface SearchTypesScriptSort { + order?: SearchTypesSortOrder + script: Script + type?: string +} + +export interface SearchTypesSearchProfile { + collector: SearchTypesCollector[] + query: SearchTypesQueryProfile[] + rewrite_time: long +} + +export interface SearchTypesShardProfile { + aggregations: SearchTypesAggregationProfile[] + id: string + searches: SearchTypesSearchProfile[] +} + +export interface SearchTypesSmoothingModelContainer { + laplace?: SearchTypesLaplaceSmoothingModel + linear_interpolation?: SearchTypesLinearInterpolationSmoothingModel + stupid_backoff?: SearchTypesStupidBackoffSmoothingModel +} + +export type SearchTypesSort = SearchTypesSortCombinations | SearchTypesSortCombinations[] + +export type SearchTypesSortCombinations = Field | SearchTypesSortContainer | SearchTypesSortOrder + +export interface SearchTypesSortContainerKeys { + _score?: SearchTypesScoreSort + _doc?: SearchTypesScoreSort + _geo_distance?: SearchTypesGeoDistanceSort + _script?: SearchTypesScriptSort +} +export type SearchTypesSortContainer = SearchTypesSortContainerKeys | + { [property: string]: SearchTypesFieldSort | SearchTypesSortOrder } + +export type SearchTypesSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' + +export type SearchTypesSortOrder = 'asc' | 'desc' | '_doc' + +export type SearchTypesSortResults = (long | double | string | null)[] + +export interface SearchTypesSourceFilter { + excludes?: Fields + includes?: Fields + exclude?: Fields + include?: Fields +} + +export type SearchTypesStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' + +export interface SearchTypesStupidBackoffSmoothingModel { + discount: double +} + +export interface SearchTypesSuggest { + length: integer + offset: integer + options: SearchTypesSuggestOption[] + text: string +} + +export interface SearchTypesSuggestContainer { + completion?: SearchTypesCompletionSuggester + phrase?: SearchTypesPhraseSuggester + prefix?: string + regex?: string + term?: SearchTypesTermSuggester + text?: string +} + +export interface SearchTypesSuggestContextQuery { + boost?: double + context: SearchTypesContext + neighbours?: Distance[] | integer[] + precision?: Distance | integer + prefix?: boolean +} + +export interface SearchTypesSuggestFuzziness { + fuzziness: Fuzziness + min_length: integer + prefix_length: integer + transpositions: boolean + unicode_aware: boolean +} + +export type SearchTypesSuggestOption = SearchTypesCompletionSuggestOption | SearchTypesPhraseSuggestOption | SearchTypesTermSuggestOption + +export type SearchTypesSuggestSort = 'score' | 'frequency' + +export interface SearchTypesSuggesterBase { + field: Field + analyzer?: string + size?: integer +} + +export interface SearchTypesTermSuggestOption { + text: string + freq?: long + score: double +} + +export interface SearchTypesTermSuggester extends SearchTypesSuggesterBase { + lowercase_terms?: boolean + max_edits?: integer + max_inspections?: integer + max_term_freq?: float + min_doc_freq?: float + min_word_length?: integer + prefix_length?: integer + shard_size?: integer + sort?: SearchTypesSuggestSort + string_distance?: SearchTypesStringDistance + suggest_mode?: SuggestMode + text?: string +} + +export interface SearchTypesTotalHits { + relation: SearchTypesTotalHitsRelation + value: long +} + +export type SearchTypesTotalHitsRelation = 'eq' | 'gte' + +export interface SearchShardsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean + preference?: string + routing?: Routing +} + +export interface SearchShardsResponse { + nodes: Record + shards: NodeShard[][] + indices: Record +} + +export interface SearchShardsShardStoreIndex { + aliases?: Name[] + filter?: QueryDslQueryContainer +} + +export interface SearchTemplateRequest extends RequestBase { + index?: Indices + type?: Types + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_throttled?: boolean + ignore_unavailable?: boolean + preference?: string + profile?: boolean + routing?: Routing + scroll?: Time + search_type?: SearchType + total_hits_as_integer?: boolean + typed_keys?: boolean + body?: { + id?: Id + params?: Record + source?: string + } +} + +export interface SearchTemplateResponse { + _shards: ShardStatistics + timed_out: boolean + took: integer + hits: SearchTypesHitsMetadata +} + +export interface TermvectorsFieldStatistics { + doc_count: integer + sum_doc_freq: long + sum_ttf: long +} + +export interface TermvectorsFilter { + max_doc_freq?: integer + max_num_terms?: integer + max_term_freq?: integer + max_word_length?: integer + min_doc_freq?: integer + min_term_freq?: integer + min_word_length?: integer +} + +export interface TermvectorsRequest extends RequestBase { + index: IndexName + id?: Id + type?: Type + fields?: Fields + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: Routing + term_statistics?: boolean + version?: VersionNumber + version_type?: VersionType + body?: { + doc?: TDocument + filter?: TermvectorsFilter + per_field_analyzer?: Record + } +} + +export interface TermvectorsResponse { + found: boolean + _id: Id + _index: IndexName + term_vectors?: Record + took: long + _type?: Type + _version: VersionNumber +} + +export interface TermvectorsTerm { + doc_freq?: integer + score?: double + term_freq: integer + tokens: TermvectorsToken[] + ttf?: integer +} + +export interface TermvectorsTermVector { + field_statistics: TermvectorsFieldStatistics + terms: Record +} + +export interface TermvectorsToken { + end_offset?: integer + payload?: string + position: integer + start_offset?: integer +} + +export interface UpdateRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + if_primary_term?: long + if_seq_no?: SequenceNumber + lang?: string + refresh?: Refresh + require_alias?: boolean + retry_on_conflict?: long + routing?: Routing + source_enabled?: boolean + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + body?: { + detect_noop?: boolean + doc?: TPartialDocument + doc_as_upsert?: boolean + script?: Script + scripted_upsert?: boolean + _source?: boolean | SearchTypesSourceFilter + upsert?: TDocument + } +} + +export interface UpdateResponse extends WriteResponseBase { + get?: InlineGet +} + +export interface UpdateByQueryRequest extends RequestBase { + index: Indices + type?: Types + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + conflicts?: Conflicts + default_operator?: DefaultOperator + df?: string + expand_wildcards?: ExpandWildcards + from?: long + ignore_unavailable?: boolean + lenient?: boolean + pipeline?: string + preference?: string + query_on_query_string?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: long + routing?: Routing + scroll?: Time + scroll_size?: long + search_timeout?: Time + search_type?: SearchType + size?: long + slices?: long + sort?: string[] + source_enabled?: boolean + source_excludes?: Fields + source_includes?: Fields + stats?: string[] + terminate_after?: long + timeout?: Time + version?: boolean + version_type?: boolean + wait_for_active_shards?: WaitForActiveShards + wait_for_completion?: boolean + body?: { + max_docs?: long + query?: QueryDslQueryContainer + script?: Script + slice?: SlicedScroll + conflicts?: Conflicts + } +} + +export interface UpdateByQueryResponse { + batches?: long + failures?: BulkIndexByScrollFailure[] + noops?: long + deleted?: long + requests_per_second?: float + retries?: Retries + task?: TaskId + timed_out?: boolean + took?: long + total?: long + updated?: long + version_conflicts?: long + throttled_millis?: ulong + throttled_until_millis?: ulong +} + +export interface UpdateByQueryRethrottleRequest extends RequestBase { + task_id: Id + requests_per_second?: long +} + +export interface UpdateByQueryRethrottleResponse { + nodes: Record +} + +export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { + tasks: Record +} + +export interface SpecUtilsBaseNode { + attributes: Record + host: Host + ip: Ip + name: Name + roles?: NodeRoles + transport_address: TransportAddress +} + +export interface AcknowledgedResponseBase { + acknowledged: boolean +} + +export type AggregateName = string + +export interface BulkIndexByScrollFailure { + cause: MainError + id: Id + index: IndexName + status: integer + type: string +} + +export interface BulkStats { + total_operations: long + total_time?: string + total_time_in_millis: long + total_size?: ByteSize + total_size_in_bytes: long + avg_time?: string + avg_time_in_millis: long + avg_size?: ByteSize + avg_size_in_bytes: long +} + +export type ByteSize = long | string + +export type Bytes = 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb' + +export type CategoryId = string + +export interface ChainTransform { + transforms: TransformContainer[] +} + +export interface ClusterStatistics { + skipped: integer + successful: integer + total: integer +} + +export interface CompletionStats { + size_in_bytes: long + size?: ByteSize + fields?: Record +} + +export type Conflicts = 'abort' | 'proceed' + +export type DataStreamName = string + +export interface DateField { + field: Field + format?: string + include_unmapped?: boolean +} + +export type DateMath = string + +export type DateMathTime = string + +export type DateString = string + +export type DefaultOperator = 'AND' | 'OR' + +export interface DictionaryResponseBase { + [key: string]: TValue +} + +export type Distance = string + +export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' + +export interface DocStats { + count: long + deleted: long +} + +export interface ElasticsearchVersionInfo { + build_date: DateString + build_flavor: string + build_hash: string + build_snapshot: boolean + build_type: string + lucene_version: VersionString + minimum_index_compatibility_version: VersionString + minimum_wire_compatibility_version: VersionString + number: string +} + +export interface EmptyObject { +} + +export type EpochMillis = string | long + +export interface ErrorCause { + type: string + reason: string + caused_by?: ErrorCause + shard?: integer | string + stack_trace?: string + root_cause?: ErrorCause[] + bytes_limit?: long + bytes_wanted?: long + column?: integer + col?: integer + failed_shards?: ShardFailure[] + grouped?: boolean + index?: IndexName + index_uuid?: Uuid + language?: string + licensed_expired_feature?: string + line?: integer + max_buckets?: integer + phase?: string + property_name?: string + processor_type?: string + resource_id?: Ids + 'resource.id'?: Ids + resource_type?: string + 'resource.type'?: string + script?: string + script_stack?: string[] + header?: HttpHeaders + lang?: string + position?: ScriptsPainlessExecutePainlessExecutionPosition +} + +export interface ErrorResponseBase { + error: MainError | string + status: integer +} + +export type ExpandWildcardOptions = 'all' | 'open' | 'closed' | 'hidden' | 'none' + +export type ExpandWildcards = ExpandWildcardOptions | ExpandWildcardOptions[] | string + +export type Field = string + +export interface FieldMemoryUsage { + memory_size?: ByteSize + memory_size_in_bytes: long +} + +export interface FieldSizeUsage { + size?: ByteSize + size_in_bytes: long +} + +export interface FielddataStats { + evictions?: long + memory_size?: ByteSize + memory_size_in_bytes: long + fields?: Record +} + +export type Fields = Field | Field[] + +export interface FlushStats { + periodic: long + total: long + total_time?: string + total_time_in_millis: long +} + +export type Fuzziness = string | integer + +export type GeoDistanceType = 'arc' | 'plane' + +export type GeoHashPrecision = number + +export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' + +export type GeoTilePrecision = number + +export interface GetStats { + current: long + exists_time?: string + exists_time_in_millis: long + exists_total: long + missing_time?: string + missing_time_in_millis: long + missing_total: long + time?: string + time_in_millis: long + total: long +} + +export type GroupBy = 'nodes' | 'parents' | 'none' + +export type Health = 'green' | 'yellow' | 'red' + +export type Host = string + +export type HttpHeaders = Record + +export type Id = string + +export type Ids = Id | Id[] + +export type IndexAlias = string + +export type IndexName = string + +export type IndexPattern = string + +export type IndexPatterns = IndexPattern[] + +export interface IndexedScript extends ScriptBase { + id: Id +} + +export interface IndexingStats { + index_current: long + delete_current: long + delete_time?: string + delete_time_in_millis: long + delete_total: long + is_throttled: boolean + noop_update_total: long + throttle_time?: string + throttle_time_in_millis: long + index_time?: string + index_time_in_millis: long + index_total: long + index_failed: long + types?: Record +} + +export type Indices = string | string[] + +export interface IndicesResponseBase extends AcknowledgedResponseBase { + _shards?: ShardStatistics +} + +export interface InlineGet { + fields?: Record + found: boolean + _seq_no: SequenceNumber + _primary_term: long + _routing?: Routing + _source: TDocument +} + +export interface InlineScript extends ScriptBase { + source: string +} + +export type Ip = string + +export interface LatLon { + lat: double + lon: double +} + +export type Level = 'cluster' | 'indices' | 'shards' + +export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' + +export interface MainError extends ErrorCause { + headers?: Record + root_cause: ErrorCause[] +} + +export interface MergesStats { + current: long + current_docs: long + current_size?: string + current_size_in_bytes: long + total: long + total_auto_throttle?: string + total_auto_throttle_in_bytes: long + total_docs: long + total_size?: string + total_size_in_bytes: long + total_stopped_time?: string + total_stopped_time_in_millis: long + total_throttled_time?: string + total_throttled_time_in_millis: long + total_time?: string + total_time_in_millis: long +} + +export type Metadata = Record + +export type Metrics = string | string[] + +export type MinimumShouldMatch = integer | string + +export type MultiTermQueryRewrite = string + +export type Name = string + +export type Names = string | string[] + +export type Namespace = string + +export interface NodeAttributes { + attributes: Record + ephemeral_id: Id + id?: Id + name: NodeName + transport_address: TransportAddress + roles?: NodeRoles +} + +export type NodeId = string + +export type NodeIds = string + +export type NodeName = string + +export type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only' + +export type NodeRoles = NodeRole[] + +export interface NodeShard { + state: IndicesStatsShardRoutingState + primary: boolean + node?: NodeName + shard: integer + index: IndexName + allocation_id?: Record + recovery_source?: Record + unassigned_info?: ClusterAllocationExplainUnassignedInformation +} + +export interface NodeStatistics { + failures?: ErrorCause[] + total: integer + successful: integer + failed: integer +} + +export type OpType = 'index' | 'create' + +export type Password = string + +export type Percentage = string | float + +export type PipelineName = string + +export interface PluginStats { + classname: string + description: string + elasticsearch_version: VersionString + extended_plugins: string[] + has_native_controller: boolean + java_version: VersionString + name: Name + version: VersionString + licensed: boolean + type: string +} + +export type PropertyName = string + +export interface QueryCacheStats { + cache_count: integer + cache_size: integer + evictions: integer + hit_count: integer + memory_size?: ByteSize + memory_size_in_bytes: integer + miss_count: integer + total_count: integer +} + +export interface RecoveryStats { + current_as_source: long + current_as_target: long + throttle_time?: string + throttle_time_in_millis: long +} + +export type Refresh = boolean | RefreshOptions + +export type RefreshOptions = 'wait_for' + +export interface RefreshStats { + external_total: long + external_total_time_in_millis: long + listeners: long + total: long + total_time?: string + total_time_in_millis: long +} + +export type RelationName = string + +export interface RequestBase extends SpecUtilsCommonQueryParameters { +} + +export interface RequestCacheStats { + evictions: long + hit_count: long + memory_size?: string + memory_size_in_bytes: long + miss_count: long +} + +export type Result = 'Error' | 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' + +export interface Retries { + bulk: long + search: long +} + +export type Routing = string | number + +export type Script = InlineScript | IndexedScript | string + +export interface ScriptBase { + lang?: ScriptLanguage + params?: Record +} + +export interface ScriptField { + script: Script +} + +export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' + +export interface ScriptTransform { + lang: string + params: Record +} + +export type ScrollId = string + +export interface SearchStats { + fetch_current: long + fetch_time_in_millis: long + fetch_total: long + open_contexts?: long + query_current: long + query_time_in_millis: long + query_total: long + scroll_current: long + scroll_time_in_millis: long + scroll_total: long + suggest_current: long + suggest_time_in_millis: long + suggest_total: long + groups?: Record +} + +export interface SearchTransform { + request: WatcherSearchInputRequestDefinition + timeout: Time +} + +export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' + +export interface SegmentsStats { + count: integer + doc_values_memory?: ByteSize + doc_values_memory_in_bytes: integer + file_sizes: Record + fixed_bit_set?: ByteSize + fixed_bit_set_memory_in_bytes: integer + index_writer_memory?: ByteSize + index_writer_max_memory_in_bytes?: integer + index_writer_memory_in_bytes: integer + max_unsafe_auto_id_timestamp: integer + memory?: ByteSize + memory_in_bytes: integer + norms_memory?: ByteSize + norms_memory_in_bytes: integer + points_memory?: ByteSize + points_memory_in_bytes: integer + stored_memory?: ByteSize + stored_fields_memory_in_bytes: integer + terms_memory_in_bytes: integer + terms_memory?: ByteSize + term_vectory_memory?: ByteSize + term_vectors_memory_in_bytes: integer + version_map_memory?: ByteSize + version_map_memory_in_bytes: integer +} + +export type SequenceNumber = integer + +export type Service = string + +export type ShapeRelation = 'intersects' | 'disjoint' | 'within' + +export interface ShardFailure { + index?: IndexName + node?: string + reason: ErrorCause + shard: integer + status?: string } -export interface CatNodesRequest extends CatRequestBase { - bytes?: Bytes - full_id?: boolean | string +export interface ShardStatistics { + failed: uint + successful: uint + total: uint + failures?: ShardFailure[] + skipped?: uint } -export type CatNodesResponse = CatNodesRecord[] +export interface ShardsOperationResponseBase { + _shards: ShardStatistics +} -export interface CatPendingTasksRecord { - insertOrder?: string - o?: string - timeInQueue?: string - t?: string - priority?: string - p?: string - source?: string - s?: string +export type Size = 'Raw' | 'k' | 'm' | 'g' | 't' | 'p' + +export interface SlicedScroll { + field?: Field + id: integer + max: integer +} + +export interface StoreStats { + size?: ByteSize + size_in_bytes: integer + reserved?: ByteSize + reserved_in_bytes: integer + total_data_set_size?: ByteSize + total_data_set_size_in_bytes?: integer } -export interface CatPendingTasksRequest extends CatRequestBase { +export interface StoredScript { + lang?: ScriptLanguage + source: string } -export type CatPendingTasksResponse = CatPendingTasksRecord[] +export type SuggestMode = 'missing' | 'popular' | 'always' -export interface CatPluginsRecord { - id?: NodeId - name?: Name - n?: Name - component?: string - c?: string - version?: VersionString - v?: VersionString - description?: string - d?: string - type?: Type - t?: Type +export type SuggestionName = string + +export type TaskId = string | integer + +export type ThreadType = 'cpu' | 'wait' | 'block' + +export type Time = string | integer + +export type TimeSpan = string + +export type Timestamp = string + +export interface Transform { } -export interface CatPluginsRequest extends CatRequestBase { +export interface TransformContainer { + chain?: ChainTransform + script?: ScriptTransform + search?: SearchTransform +} + +export interface TranslogStats { + earliest_last_modified_age: long + operations: long + size?: string + size_in_bytes: long + uncommitted_operations: integer + uncommitted_size?: string + uncommitted_size_in_bytes: long } -export type CatPluginsResponse = CatPluginsRecord[] +export type TransportAddress = string -export interface CatRecoveryRecord { - index?: IndexName - i?: IndexName - idx?: IndexName - shard?: string - s?: string - sh?: string - start_time?: string - start?: string - start_time_millis?: string - start_millis?: string - stop_time?: string - stop?: string - stop_time_millis?: string - stop_millis?: string - time?: string - t?: string - ti?: string - type?: Type - ty?: Type - stage?: string - st?: string - source_host?: string - shost?: string - source_node?: string - snode?: string - target_host?: string - thost?: string - target_node?: string - tnode?: string - repository?: string - rep?: string - snapshot?: string - snap?: string - files?: string - f?: string - files_recovered?: string - fr?: string - files_percent?: Percentage - fp?: Percentage - files_total?: string - tf?: string - bytes?: string - b?: string - bytes_recovered?: string - br?: string - bytes_percent?: Percentage - bp?: Percentage - bytes_total?: string - tb?: string - translog_ops?: string - to?: string - translog_ops_recovered?: string - tor?: string - translog_ops_percent?: Percentage - top?: Percentage +export type Type = string + +export type Types = Type | Type[] + +export type Username = string + +export type Uuid = string + +export type VersionNumber = long + +export type VersionString = string + +export type VersionType = 'internal' | 'external' | 'external_gte' | 'force' + +export type WaitForActiveShardOptions = 'all' + +export type WaitForActiveShards = integer | WaitForActiveShardOptions + +export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' + +export type WaitForStatus = 'green' | 'yellow' | 'red' + +export interface WarmerStats { + current: long + total: long + total_time?: string + total_time_in_millis: long } -export interface CatRecoveryRequest extends CatRequestBase { - index?: Indices - active_only?: boolean - bytes?: Bytes - detailed?: boolean +export interface WriteResponseBase { + _id: Id + _index: IndexName + _primary_term: long + result: Result + _seq_no: SequenceNumber + _shards: ShardStatistics + _type?: Type + _version: VersionNumber + forced_refresh?: boolean + error?: ErrorCause } -export type CatRecoveryResponse = CatRecoveryRecord[] +export type double = number -export interface CatRepositoriesRecord { - id?: string - repoId?: string - type?: string - t?: string +export type float = number + +export type integer = number + +export type long = number + +export type uint = number + +export type ulong = number + +export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { + filters?: Record } -export interface CatRepositoriesRequest extends CatRequestBase { +export type AggregationsAggregate = AggregationsSingleBucketAggregate | AggregationsAutoDateHistogramAggregate | AggregationsFiltersAggregate | AggregationsSignificantTermsAggregate | AggregationsTermsAggregate | AggregationsBucketAggregate | AggregationsCompositeBucketAggregate | AggregationsMultiBucketAggregate | AggregationsMatrixStatsAggregate | AggregationsKeyedValueAggregate | AggregationsMetricAggregate + +export interface AggregationsAggregateBase { + meta?: Record } -export type CatRepositoriesResponse = CatRepositoriesRecord[] +export interface AggregationsAggregation { + meta?: Record + name?: string +} -export interface CatRequestBase extends RequestBase, CommonCatQueryParameters { +export interface AggregationsAggregationContainer { + aggs?: Record + meta?: Record + adjacency_matrix?: AggregationsAdjacencyMatrixAggregation + aggregations?: Record + auto_date_histogram?: AggregationsAutoDateHistogramAggregation + avg?: AggregationsAverageAggregation + avg_bucket?: AggregationsAverageBucketAggregation + boxplot?: AggregationsBoxplotAggregation + bucket_script?: AggregationsBucketScriptAggregation + bucket_selector?: AggregationsBucketSelectorAggregation + bucket_sort?: AggregationsBucketSortAggregation + cardinality?: AggregationsCardinalityAggregation + children?: AggregationsChildrenAggregation + composite?: AggregationsCompositeAggregation + cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation + cumulative_sum?: AggregationsCumulativeSumAggregation + date_histogram?: AggregationsDateHistogramAggregation + date_range?: AggregationsDateRangeAggregation + derivative?: AggregationsDerivativeAggregation + diversified_sampler?: AggregationsDiversifiedSamplerAggregation + extended_stats?: AggregationsExtendedStatsAggregation + extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + filter?: QueryDslQueryContainer + filters?: AggregationsFiltersAggregation + geo_bounds?: AggregationsGeoBoundsAggregation + geo_centroid?: AggregationsGeoCentroidAggregation + geo_distance?: AggregationsGeoDistanceAggregation + geohash_grid?: AggregationsGeoHashGridAggregation + geo_line?: AggregationsGeoLineAggregation + geotile_grid?: AggregationsGeoTileGridAggregation + global?: AggregationsGlobalAggregation + histogram?: AggregationsHistogramAggregation + ip_range?: AggregationsIpRangeAggregation + inference?: AggregationsInferenceAggregation + line?: AggregationsGeoLineAggregation + matrix_stats?: AggregationsMatrixStatsAggregation + max?: AggregationsMaxAggregation + max_bucket?: AggregationsMaxBucketAggregation + median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation + min?: AggregationsMinAggregation + min_bucket?: AggregationsMinBucketAggregation + missing?: AggregationsMissingAggregation + moving_avg?: AggregationsMovingAverageAggregation + moving_percentiles?: AggregationsMovingPercentilesAggregation + moving_fn?: AggregationsMovingFunctionAggregation + multi_terms?: AggregationsMultiTermsAggregation + nested?: AggregationsNestedAggregation + normalize?: AggregationsNormalizeAggregation + parent?: AggregationsParentAggregation + percentile_ranks?: AggregationsPercentileRanksAggregation + percentiles?: AggregationsPercentilesAggregation + percentiles_bucket?: AggregationsPercentilesBucketAggregation + range?: AggregationsRangeAggregation + rare_terms?: AggregationsRareTermsAggregation + rate?: AggregationsRateAggregation + reverse_nested?: AggregationsReverseNestedAggregation + sampler?: AggregationsSamplerAggregation + scripted_metric?: AggregationsScriptedMetricAggregation + serial_diff?: AggregationsSerialDifferencingAggregation + significant_terms?: AggregationsSignificantTermsAggregation + significant_text?: AggregationsSignificantTextAggregation + stats?: AggregationsStatsAggregation + stats_bucket?: AggregationsStatsBucketAggregation + string_stats?: AggregationsStringStatsAggregation + sum?: AggregationsSumAggregation + sum_bucket?: AggregationsSumBucketAggregation + terms?: AggregationsTermsAggregation + top_hits?: AggregationsTopHitsAggregation + t_test?: AggregationsTTestAggregation + top_metrics?: AggregationsTopMetricsAggregation + value_count?: AggregationsValueCountAggregation + weighted_avg?: AggregationsWeightedAverageAggregation + variable_width_histogram?: AggregationsVariableWidthHistogramAggregation +} + +export interface AggregationsAggregationRange { + from?: double | string + key?: string + to?: double | string } -export interface CatResponseBase extends ResponseBase { +export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregate> { + interval: DateMathTime } -export interface CatSegmentsRecord { - index?: IndexName - i?: IndexName - idx?: IndexName - shard?: string - s?: string - sh?: string - prirep?: string - p?: string - pr?: string - primaryOrReplica?: string - ip?: string - id?: NodeId - segment?: string - seg?: string - generation?: string - g?: string - gen?: string - 'docs.count'?: string - dc?: string - docsCount?: string - 'docs.deleted'?: string - dd?: string - docsDeleted?: string - size?: ByteSize - si?: ByteSize - 'size.memory'?: ByteSize - sm?: ByteSize - sizeMemory?: ByteSize - committed?: string - ic?: string - isCommitted?: string - searchable?: string - is?: string - isSearchable?: string - version?: VersionString - v?: VersionString - compound?: string - ico?: string - isCompound?: string +export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { + buckets?: integer + field?: Field + format?: string + minimum_interval?: AggregationsMinimumInterval + missing?: DateString + offset?: string + params?: Record + script?: Script + time_zone?: string +} + +export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { + min: double + max: double + q1: double + q2: double + q3: double +} + +export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { + compression?: double +} + +export type AggregationsBucket = AggregationsCompositeBucket | AggregationsDateHistogramBucket | AggregationsFiltersBucketItem | AggregationsIpRangeBucket | AggregationsRangeBucket | AggregationsRareTermsBucket | AggregationsSignificantTermsBucket | AggregationsKeyedBucket + +export interface AggregationsBucketAggregate extends AggregationsAggregateBase { + after_key: Record + bg_count: long + doc_count: long + doc_count_error_upper_bound: long + sum_other_doc_count: long + interval: DateMathTime + items: AggregationsBucket } -export interface CatSegmentsRequest extends CatRequestBase { - index?: Indices - bytes?: Bytes +export interface AggregationsBucketAggregationBase extends AggregationsAggregation { + aggregations?: Record } -export type CatSegmentsResponse = CatSegmentsRecord[] +export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { + script?: Script +} -export interface CatShardsRecord { - index?: string - i?: string - idx?: string - shard?: string - s?: string - sh?: string - prirep?: string - p?: string - pr?: string - primaryOrReplica?: string - state?: string - st?: string - docs?: string - d?: string - dc?: string - store?: string - sto?: string - ip?: string - id?: string - node?: string - n?: string - sync_id?: string - 'unassigned.reason'?: string - ur?: string - 'unassigned.at'?: string - ua?: string - 'unassigned.for'?: string - uf?: string - 'unassigned.details'?: string - ud?: string - 'recoverysource.type'?: string - rs?: string - 'completion.size'?: string - cs?: string - completionSize?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'get.time'?: string - gti?: string - getTime?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'refresh.total'?: string - 'refresh.time'?: string - 'refresh.external_total'?: string - rto?: string - refreshTotal?: string - 'refresh.external_time'?: string - rti?: string - refreshTime?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'seq_no.max'?: string - sqm?: string - maxSeqNo?: string - 'seq_no.local_checkpoint'?: string - sql?: string - localCheckpoint?: string - 'seq_no.global_checkpoint'?: string - sqg?: string - globalCheckpoint?: string - 'warmer.current'?: string - wc?: string - warmerCurrent?: string - 'warmer.total'?: string - wto?: string - warmerTotal?: string - 'warmer.total_time'?: string - wtt?: string - warmerTotalTime?: string - 'path.data'?: string - pd?: string - dataPath?: string - 'path.state'?: string - ps?: string - statsPath?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperations?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string +export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { + script?: Script } -export interface CatShardsRequest extends CatRequestBase { - index?: Indices - bytes?: Bytes +export interface AggregationsBucketSortAggregation extends AggregationsAggregation { + from?: integer + gap_policy?: AggregationsGapPolicy + size?: integer + sort?: SearchTypesSort } -export type CatShardsResponse = CatShardsRecord[] +export interface AggregationsBucketsPath { +} -export interface CatSnapshotsRecord { - id?: string - snapshot?: string - repository?: string - re?: string - repo?: string - status?: string - s?: string - start_epoch?: EpochMillis - ste?: EpochMillis - startEpoch?: EpochMillis - start_time?: DateString - sti?: DateString - startTime?: DateString - end_epoch?: EpochMillis - ete?: EpochMillis - endEpoch?: EpochMillis - end_time?: DateString - eti?: DateString - endTime?: DateString - duration?: Time - dur?: Time - indices?: string - i?: string - successful_shards?: string - ss?: string - failed_shards?: string - fs?: string - total_shards?: string - ts?: string - reason?: string - r?: string +export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { + precision_threshold?: integer + rehash?: boolean } -export interface CatSnapshotsRequest extends CatRequestBase { - repository?: Names - ignore_unavailable?: boolean +export interface AggregationsChiSquareHeuristic { + background_is_superset: boolean + include_negatives: boolean } -export type CatSnapshotsResponse = CatSnapshotsRecord[] +export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { + type?: RelationName +} -export interface CatTasksRecord { - id?: Id - action?: string - ac?: string - task_id?: Id - ti?: Id - parent_task_id?: string - pti?: string - type?: Type - ty?: Type - start_time?: string - start?: string - timestamp?: string - ts?: string - hms?: string - hhmmss?: string - running_time_ns?: string - running_time?: string - time?: string - node_id?: NodeId - ni?: NodeId - ip?: string - i?: string - port?: string - po?: string - node?: string - n?: string - version?: VersionString - v?: VersionString - x_opaque_id?: string - x?: string - description?: string - desc?: string +export interface AggregationsClassificationInferenceOptions { + num_top_classes?: integer + num_top_feature_importance_values?: integer + prediction_field_type?: string + results_field?: string + top_classes_results_field?: string } -export interface CatTasksRequest extends CatRequestBase { - actions?: Array - detailed?: boolean - node_id?: Array - parent_task?: long +export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { + after?: Record + size?: integer + sources?: Record[] } -export type CatTasksResponse = CatTasksRecord[] +export interface AggregationsCompositeAggregationSource { + terms?: AggregationsTermsAggregation + histogram?: AggregationsHistogramAggregation + date_histogram?: AggregationsDateHistogramAggregation + geotile_grid?: AggregationsGeoTileGridAggregation +} -export interface CatTemplatesRecord { - name?: Name - n?: Name - index_patterns?: string - t?: string - order?: string - o?: string - p?: string - version?: VersionString - v?: VersionString - composed_of?: string - c?: string +export interface AggregationsCompositeBucketKeys { } +export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys | + { [property: string]: AggregationsAggregate } -export interface CatTemplatesRequest extends CatRequestBase { - name?: Name +export interface AggregationsCompositeBucketAggregate extends AggregationsMultiBucketAggregate> { + after_key: Record } -export type CatTemplatesResponse = CatTemplatesRecord[] +export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { +} -export interface CatThreadPoolRecord { - node_name?: string - nn?: string - node_id?: NodeId - id?: NodeId - ephemeral_node_id?: string - eid?: string - pid?: string - p?: string - host?: string - h?: string - ip?: string - i?: string - port?: string - po?: string - name?: string - n?: string - type?: string - t?: string - active?: string - a?: string - pool_size?: string - psz?: string - queue?: string - q?: string - queue_size?: string - qs?: string - rejected?: string - r?: string - largest?: string - l?: string - completed?: string - c?: string - core?: string - cr?: string - max?: string - mx?: string - size?: string - sz?: string - keep_alive?: string - ka?: string +export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { } -export interface CatThreadPoolRequest extends CatRequestBase { - thread_pool_patterns?: Names - size?: Size | boolean +export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { + calendar_interval?: AggregationsDateInterval | Time + extended_bounds?: AggregationsExtendedBounds + hard_bounds?: AggregationsExtendedBounds + field?: Field + fixed_interval?: AggregationsDateInterval | Time + format?: string + interval?: AggregationsDateInterval | Time + min_doc_count?: integer + missing?: DateString + offset?: Time + order?: AggregationsHistogramOrder + params?: Record + script?: Script + time_zone?: string } -export type CatThreadPoolResponse = CatThreadPoolRecord[] +export interface AggregationsDateHistogramBucketKeys { +} +export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys | + { [property: string]: AggregationsAggregate } -export interface CatTrainedModelsRecord { - id?: Id - created_by?: string - c?: string - createdBy?: string - heap_size?: ByteSize - hs?: ByteSize - modelHeapSize?: ByteSize - operations?: string - o?: string - modelOperations?: string - license?: string - l?: string - create_time?: DateString - ct?: DateString - version?: VersionString - v?: VersionString - description?: string - d?: string - 'ingest.pipelines'?: string - ip?: string - ingestPipelines?: string - 'ingest.count'?: string - ic?: string - ingestCount?: string - 'ingest.time'?: string - it?: string - ingestTime?: string - 'ingest.current'?: string - icurr?: string - ingestCurrent?: string - 'ingest.failed'?: string - if?: string - ingestFailed?: string - 'data_frame.id'?: string - dfid?: string - dataFrameAnalytics?: string - 'data_frame.create_time'?: string - dft?: string - dataFrameAnalyticsTime?: string - 'data_frame.source_index'?: string - dfsi?: string - dataFrameAnalyticsSrcIndex?: string - 'data_frame.analysis'?: string - dfa?: string - dataFrameAnalyticsAnalysis?: string +export type AggregationsDateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' + +export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { + field?: Field + format?: string + missing?: AggregationsMissing + ranges?: AggregationsDateRangeExpression[] + time_zone?: string } -export interface CatTrainedModelsRequest extends CatRequestBase { - model_id?: Id - allow_no_match?: boolean - bytes?: Bytes - from?: integer - size?: integer +export interface AggregationsDateRangeExpression { + from?: DateMath | float + from_as_string?: string + to_as_string?: string + key?: string + to?: DateMath | float + doc_count?: long } -export type CatTrainedModelsResponse = CatTrainedModelsRecord[] +export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { +} -export interface CatTransformsRecord { - id?: Id - state?: string - s?: string - checkpoint?: string - c?: string - documents_processed?: string - docp?: string - documentsProcessed?: string - checkpoint_progress?: string - cp?: string - checkpointProgress?: string - last_search_time?: string - lst?: string - lastSearchTime?: string - changes_last_detection_time?: string - cldt?: string - create_time?: string - ct?: string - createTime?: string - version?: VersionString - v?: VersionString - source_index?: string - si?: string - sourceIndex?: string - dest_index?: string - di?: string - destIndex?: string - pipeline?: string - p?: string - description?: string - d?: string - transform_type?: string - tt?: string - frequency?: string - f?: string - max_page_search_size?: string - mpsz?: string - docs_per_second?: string - dps?: string - reason?: string - r?: string - search_total?: string - st?: string - search_failure?: string - sf?: string - search_time?: string - stime?: string - index_total?: string - it?: string - index_failure?: string - if?: string - index_time?: string - itime?: string - documents_indexed?: string - doci?: string - delete_time?: string - dtime?: string - documents_deleted?: string - docd?: string - trigger_count?: string - tc?: string - pages_processed?: string - pp?: string - processing_time?: string - pt?: string - checkpoint_duration_time_exp_avg?: string - cdtea?: string - checkpointTimeExpAvg?: string - indexed_documents_exp_avg?: string - idea?: string - processed_documents_exp_avg?: string - pdea?: string +export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { + execution_hint?: AggregationsSamplerAggregationExecutionHint + max_docs_per_value?: integer + script?: Script + shard_size?: integer + field?: Field +} + +export interface AggregationsEwmaModelSettings { + alpha?: float +} + +export interface AggregationsExtendedBounds { + max: T + min: T } -export interface CatTransformsRequest extends CatRequestBase { - transform_id?: Id - allow_no_match?: boolean - from?: integer - size?: integer +export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { + std_deviation_bounds: AggregationsStandardDeviationBounds + sum_of_squares?: double + variance?: double + variance_population?: double + variance_sampling?: double + std_deviation?: double + std_deviation_population?: double + std_deviation_sampling?: double } -export type CatTransformsResponse = CatTransformsRecord[] +export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { + sigma?: double +} -export interface CategorizationAnalyzer { - filter?: Array - tokenizer?: string | Tokenizer - char_filter?: Array +export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { + sigma?: double } -export interface CategoryDefinition { - category_id: long - examples: Array - job_id: string - max_matching_length: long - regex: string - terms: string +export interface AggregationsFiltersAggregate extends AggregationsAggregateBase { + buckets: AggregationsFiltersBucketItem[] | Record } -export type CategoryId = string +export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { + filters?: Record | QueryDslQueryContainer[] + other_bucket?: boolean + other_bucket_key?: string +} -export interface CcrAutoFollowStats { - auto_followed_clusters: Array - number_of_failed_follow_indices: long - number_of_failed_remote_cluster_state_requests: long - number_of_successful_follow_indices: long - recent_auto_follow_errors: Array +export interface AggregationsFiltersBucketItemKeys { + doc_count: long } +export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys | + { [property: string]: AggregationsAggregate } -export interface CcrFollowStats { - indices: Array +export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { + format?: string } -export interface CcrStatsRequest extends RequestBase { +export interface AggregationsFormattableMetricAggregation extends AggregationsMetricAggregationBase { + format?: string } -export interface CcrStatsResponse extends ResponseBase { - auto_follow_stats: CcrAutoFollowStats - follow_stats: CcrFollowStats +export type AggregationsGapPolicy = 'skip' | 'insert_zeros' + +export interface AggregationsGeoBounds { + bottom_right: LatLon + top_left: LatLon } -export interface CcrUsage extends XPackUsage { - auto_follow_patterns_count: integer - follower_indices_count: integer +export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { + bounds: AggregationsGeoBounds } -export interface ChainInput { - inputs: Record +export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { + wrap_longitude?: boolean } -export interface ChainTransform { - transforms: Array +export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { + count: long + location: QueryDslGeoLocation } -export interface ChangePasswordRequest extends RequestBase { - username?: Name - refresh?: Refresh - body: { - password?: string - } +export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { + count?: long + location?: QueryDslGeoLocation } -export interface ChangePasswordResponse extends ResponseBase { +export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { + distance_type?: GeoDistanceType + field?: Field + origin?: QueryDslGeoLocation | string + ranges?: AggregationsAggregationRange[] + unit?: DistanceUnit } -export type CharFilter = HtmlStripCharFilter | MappingCharFilter | PatternReplaceTokenFilter +export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { + bounds?: QueryDslBoundingBox + field?: Field + precision?: GeoHashPrecision + shard_size?: integer + size?: integer +} -export interface CharFilterBase { +export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { type: string - version?: VersionString + geometry: AggregationsLineStringGeoShape + properties: AggregationsGeoLineProperties } -export interface CharFilterDetail { - filtered_text: Array - name: string +export interface AggregationsGeoLineAggregation { + point: AggregationsGeoLinePoint + sort: AggregationsGeoLineSort + include_sort?: boolean + sort_order?: SearchTypesSortOrder + size?: integer +} + +export interface AggregationsGeoLinePoint { + field: Field +} + +export interface AggregationsGeoLineProperties { + complete: boolean + sort_values: double[] +} + +export interface AggregationsGeoLineSort { + field: Field } -export interface CharFilterTypes { - char_filter_types: Array - tokenizer_types: Array - filter_types: Array - analyzer_types: Array - built_in_char_filters: Array - built_in_tokenizers: Array - built_in_filters: Array - built_in_analyzers: Array +export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { + field?: Field + precision?: GeoTilePrecision + shard_size?: integer + size?: integer + bounds?: AggregationsGeoBounds } -export interface CharGroupTokenizer extends TokenizerBase { - tokenize_on_chars: Array +export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { } -export interface ChiSquareHeuristic { +export interface AggregationsGoogleNormalizedDistanceHeuristic { background_is_superset: boolean - include_negatives: boolean } -export type ChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' +export interface AggregationsHdrMethod { + number_of_significant_value_digits?: integer +} -export interface ChildrenAggregation extends BucketAggregationBase { - type?: RelationName +export interface AggregationsHdrPercentileItem { + key: double + value: double } -export interface ChunkingConfig { - mode: ChunkingMode - time_span?: Time +export interface AggregationsHdrPercentilesAggregate extends AggregationsAggregateBase { + values: AggregationsHdrPercentileItem[] } -export type ChunkingMode = 'auto' | 'manual' | 'off' +export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { + extended_bounds?: AggregationsExtendedBounds + hard_bounds?: AggregationsExtendedBounds + field?: Field + interval?: double + min_doc_count?: integer + missing?: double + offset?: double + order?: AggregationsHistogramOrder + script?: Script + format?: string +} -export interface CircleProcessor extends ProcessorBase { - error_distance: double - field: Field - ignore_missing: boolean - shape_type: ShapeType - target_field: Field +export interface AggregationsHistogramOrder { + _count?: SearchTypesSortOrder + _key?: SearchTypesSortOrder } -export interface ClassificationInferenceOptions { - num_top_classes?: integer - num_top_feature_importance_values?: integer - prediction_field_type?: string +export interface AggregationsHoltLinearModelSettings { + alpha?: float + beta?: float } -export interface CleanupRepositoryRequest extends RequestBase { - repository: Name - master_timeout?: Time - timeout?: Time +export interface AggregationsHoltWintersModelSettings { + alpha?: float + beta?: float + gamma?: float + pad?: boolean + period?: integer + type?: AggregationsHoltWintersType } -export interface CleanupRepositoryResponse extends ResponseBase { - results: CleanupRepositoryResults +export type AggregationsHoltWintersType = 'add' | 'mult' + +export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { + model_id: Name + inference_config?: AggregationsInferenceConfigContainer } -export interface CleanupRepositoryResults { - deleted_blobs: long - deleted_bytes: long +export interface AggregationsInferenceConfigContainer { + regression?: AggregationsRegressionInferenceOptions + classification?: AggregationsClassificationInferenceOptions } -export interface ClearApiKeyCacheNode { - name: Name +export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { + field?: Field + ranges?: AggregationsIpRangeAggregationRange[] } -export interface ClearApiKeyCacheRequest extends RequestBase { - ids?: Ids +export interface AggregationsIpRangeAggregationRange { + from?: string + mask?: string + to?: string } -export interface ClearApiKeyCacheResponse extends ResponseBase { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record +export interface AggregationsIpRangeBucketKeys { } +export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys | + { [property: string]: AggregationsAggregate } -export interface ClearCacheRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - fielddata?: boolean +export interface AggregationsKeyedBucketKeys { + doc_count: long + key: TKey + key_as_string: string +} +export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys | + { [property: string]: AggregationsAggregate } + +export interface AggregationsKeyedValueAggregate extends AggregationsValueAggregate { + keys: string[] +} + +export interface AggregationsLineStringGeoShape { + coordinates: QueryDslGeoCoordinate[] +} + +export interface AggregationsMatrixAggregation extends AggregationsAggregation { fields?: Fields - ignore_unavailable?: boolean - query?: boolean - request?: boolean + missing?: Record +} + +export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { + correlation: Record + covariance: Record + count: integer + kurtosis: double + mean: double + skewness: double + variance: double + name: string } -export interface ClearCacheResponse extends ShardsOperationResponseBase { +export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { + mode?: AggregationsMatrixStatsMode } -export interface ClearCachedPrivilegeNode { - name: Name +export type AggregationsMatrixStatsMode = 'avg' | 'min' | 'max' | 'sum' | 'median' + +export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { } -export interface ClearCachedPrivilegesRequest extends RequestBase { - application: Name +export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { } -export interface ClearCachedPrivilegesResponse extends ResponseBase { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record +export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { + compression?: double } -export interface ClearCachedRealmsRequest extends RequestBase { - realms: Names - usernames?: Array +export type AggregationsMetricAggregate = AggregationsValueAggregate | AggregationsBoxPlotAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsGeoLineAggregate | AggregationsPercentilesAggregate | AggregationsScriptedMetricAggregate | AggregationsStatsAggregate | AggregationsStringStatsAggregate | AggregationsTopHitsAggregate | AggregationsTopMetricsAggregate | AggregationsExtendedStatsAggregate | AggregationsTDigestPercentilesAggregate | AggregationsHdrPercentilesAggregate + +export interface AggregationsMetricAggregationBase { + field?: Field + missing?: AggregationsMissing + script?: Script } -export interface ClearCachedRealmsResponse extends ResponseBase { - cluster_name: string - nodes: Record - _nodes: NodeStatistics +export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { } -export interface ClearCachedRolesRequest extends RequestBase { - name: Names +export interface AggregationsMinBucketAggregation extends AggregationsPipelineAggregationBase { } -export interface ClearCachedRolesResponse extends ResponseBase { - cluster_name: string - nodes: Record - _nodes: NodeStatistics +export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year' + +export type AggregationsMissing = string | integer | double | boolean + +export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { + field?: Field + missing?: AggregationsMissing } -export interface ClearScrollRequest extends RequestBase { - scroll_id?: Ids - body?: { - scroll_id?: Ids - } +export interface AggregationsMovingAverageAggregation extends AggregationsPipelineAggregationBase { + minimize?: boolean + model?: AggregationsMovingAverageModel + settings: AggregationsMovingAverageSettings + predict?: integer + window?: integer } -export interface ClearScrollResponse extends ResponseBase { +export type AggregationsMovingAverageModel = 'linear' | 'simple' | 'ewma' | 'holt' | 'holt_winters' + +export type AggregationsMovingAverageSettings = AggregationsEwmaModelSettings | AggregationsHoltLinearModelSettings | AggregationsHoltWintersModelSettings + +export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { + script?: string + shift?: integer + window?: integer } -export interface ClearSqlCursorRequest extends RequestBase { - body: { - cursor?: string - } +export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { + window?: integer + shift?: integer } -export interface ClearSqlCursorResponse extends ResponseBase { - succeeded: boolean +export interface AggregationsMultiBucketAggregate extends AggregationsAggregateBase { + buckets: TBucket[] } -export interface CloneIndexRequest extends RequestBase { - index: IndexName - target: Name - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards - body?: { - aliases?: Record - settings?: Record - } +export interface AggregationsMultiTermLookup { + field: Field } -export interface CloneIndexResponse extends AcknowledgedResponseBase { - index: string - shards_acknowledged: boolean +export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + terms: AggregationsMultiTermLookup[] } -export interface CloneSnapshotRequest extends RequestBase { - repository: Name - snapshot: Name - target_snapshot: Name - master_timeout?: Time - timeout?: Time - body: { - indices: string - } +export interface AggregationsMutualInformationHeuristic { + background_is_superset: boolean + include_negatives: boolean } -export interface CloneSnapshotResponse extends AcknowledgedResponseBase { +export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { + path?: Field } -export interface CloseIndexRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards +export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { + method?: AggregationsNormalizeMethod } -export interface CloseIndexResponse extends AcknowledgedResponseBase { - indices: Record - shards_acknowledged: boolean +export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'zscore' | 'softmax' + +export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { + type?: RelationName } -export interface CloseIndexResult { - closed: boolean - shards?: Record +export interface AggregationsPercentageScoreHeuristic { } -export interface CloseJobRequest extends RequestBase { - job_id: Id - allow_no_jobs?: boolean - force?: boolean - timeout?: Time +export interface AggregationsPercentileItem { + percentile: double + value: double } -export interface CloseJobResponse extends ResponseBase { - closed: boolean +export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { + keyed?: boolean + values?: double[] + hdr?: AggregationsHdrMethod + tdigest?: AggregationsTDigest } -export interface ClosePointInTimeRequest extends RequestBase { - body?: { - id: Id - } +export interface AggregationsPercentilesAggregate extends AggregationsAggregateBase { + items: AggregationsPercentileItem[] } -export interface ClosePointInTimeResponse extends ResponseBase { - succeeded: boolean - num_freed: integer +export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { + keyed?: boolean + percents?: double[] + hdr?: AggregationsHdrMethod + tdigest?: AggregationsTDigest } -export interface CloseShardResult { - failures: Array +export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { + percents?: double[] } -export interface ClusterAllocationExplainRequest extends RequestBase { - include_disk_info?: boolean - include_yes_decisions?: boolean - body?: { - index?: IndexName - primary?: boolean - shard?: integer - } +export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { + buckets_path?: AggregationsBucketsPath + format?: string + gap_policy?: AggregationsGapPolicy } -export interface ClusterAllocationExplainResponse extends ResponseBase { - allocate_explanation?: string - allocation_delay?: string - allocation_delay_in_millis?: long - can_allocate?: Decision - can_move_to_other_node?: Decision - can_rebalance_cluster?: Decision - can_rebalance_cluster_decisions?: Array - can_rebalance_to_other_node?: Decision - can_remain_decisions?: Array - can_remain_on_current_node?: Decision - cluster_info?: ClusterInfo - configured_delay?: string - configured_delay_in_millis?: long - current_node?: CurrentNode - current_state: string - index: string - move_explanation?: string - node_allocation_decisions?: Array - primary: boolean - rebalance_explanation?: string - remaining_delay?: string - remaining_delay_in_millis?: long - shard: integer - unassigned_info?: UnassignedInformation +export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { + field?: Field + ranges?: AggregationsAggregationRange[] + script?: Script +} + +export interface AggregationsRangeBucketKeys { +} +export type AggregationsRangeBucket = AggregationsRangeBucketKeys | + { [property: string]: AggregationsAggregate } + +export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { + exclude?: string | string[] + field?: Field + include?: string | string[] | AggregationsTermsInclude + max_doc_count?: long + missing?: AggregationsMissing + precision?: double + value_type?: string +} + +export interface AggregationsRareTermsBucketKeys { +} +export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys | + { [property: string]: AggregationsAggregate } + +export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { + unit?: AggregationsDateInterval + mode?: AggregationsRateMode } -export interface ClusterCertificateInformation { - alias?: string - expiry: DateString - format: string - has_private_key: boolean - path: string - serial_number: string - subject_dn: string +export type AggregationsRateMode = 'sum' | 'value_count' + +export interface AggregationsRegressionInferenceOptions { + results_field: Field + num_top_feature_importance_values?: integer } -export interface ClusterComponentTemplateExistsRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { + path?: Field } -export interface ClusterComponentTemplateExistsResponse extends ResponseBase { - stub: integer +export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { + shard_size?: integer } -export interface ClusterDeleteComponentTemplateRequest extends RequestBase { - name: Name - master_timeout?: Time - timeout?: Time +export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' + +export interface AggregationsScriptedHeuristic { + script: Script } -export interface ClusterDeleteComponentTemplateResponse extends AcknowledgedResponseBase { +export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { + value: any } -export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { + combine_script?: Script + init_script?: Script + map_script?: Script + params?: Record + reduce_script?: Script } -export interface ClusterDeleteVotingConfigExclusionsResponse extends ResponseBase { - stub: integer +export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { + lag?: integer } -export interface ClusterFileSystem { - available_in_bytes: long - free_in_bytes: long - total_in_bytes: long +export interface AggregationsSignificantTermsAggregate extends AggregationsMultiBucketAggregate { + bg_count: long + doc_count: long } -export interface ClusterGetComponentTemplateRequest extends RequestBase { - name?: Name - flat_settings?: boolean - local?: boolean - master_timeout?: Time +export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { + background_filter?: QueryDslQueryContainer + chi_square?: AggregationsChiSquareHeuristic + exclude?: string | string[] + execution_hint?: AggregationsTermsAggregationExecutionHint + field?: Field + gnd?: AggregationsGoogleNormalizedDistanceHeuristic + include?: string | string[] + min_doc_count?: long + mutual_information?: AggregationsMutualInformationHeuristic + percentage?: AggregationsPercentageScoreHeuristic + script_heuristic?: AggregationsScriptedHeuristic + shard_min_doc_count?: long + shard_size?: integer + size?: integer } -export interface ClusterGetComponentTemplateResponse extends ResponseBase { - stub: integer +export interface AggregationsSignificantTermsBucketKeys { } +export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys | + { [property: string]: AggregationsAggregate } -export interface ClusterGetSettingsRequest extends RequestBase { - flat_settings?: boolean - include_defaults?: boolean - master_timeout?: Time - timeout?: Time +export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { + background_filter?: QueryDslQueryContainer + chi_square?: AggregationsChiSquareHeuristic + exclude?: string | string[] + execution_hint?: AggregationsTermsAggregationExecutionHint + field?: Field + filter_duplicate_text?: boolean + gnd?: AggregationsGoogleNormalizedDistanceHeuristic + include?: string | string[] + min_doc_count?: long + mutual_information?: AggregationsMutualInformationHeuristic + percentage?: AggregationsPercentageScoreHeuristic + script_heuristic?: AggregationsScriptedHeuristic + shard_min_doc_count?: long + shard_size?: integer + size?: integer + source_fields?: Fields } -export interface ClusterGetSettingsResponse extends ResponseBase { - persistent: Record - transient: Record - defaults?: Record +export interface AggregationsSingleBucketAggregateKeys extends AggregationsAggregateBase { + doc_count: double } +export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys | + { [property: string]: AggregationsAggregate } -export interface ClusterHealthRequest extends RequestBase { - index?: Indices - expand_wildcards?: ExpandWildcards - level?: Level - local?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards - wait_for_events?: WaitForEvents - wait_for_nodes?: string - wait_for_no_initializing_shards?: boolean - wait_for_no_relocating_shards?: boolean - wait_for_status?: WaitForStatus +export interface AggregationsStandardDeviationBounds { + lower?: double + upper?: double + lower_population?: double + upper_population?: double + lower_sampling?: double + upper_sampling?: double } -export interface ClusterHealthResponse extends ResponseBase { - active_primary_shards: integer - active_shards: integer - active_shards_percent_as_number: Percentage - cluster_name: string - delayed_unassigned_shards: integer - indices?: Record - initializing_shards: integer - number_of_data_nodes: integer - number_of_in_flight_fetch: integer - number_of_nodes: integer - number_of_pending_tasks: integer - relocating_shards: integer - status: Health - task_max_waiting_in_queue_millis: EpochMillis - timed_out: boolean - unassigned_shards: integer +export interface AggregationsStatsAggregate extends AggregationsAggregateBase { + count: double + sum: double + avg?: double + max?: double + min?: double } -export interface ClusterIndicesShardsIndexStats { - primaries: ClusterShardMetrics - replication: ClusterShardMetrics - shards: ClusterShardMetrics +export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { } -export interface ClusterIndicesShardsStats { - index?: ClusterIndicesShardsIndexStats - primaries?: double - replication?: double - total?: double +export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { } -export interface ClusterIndicesStats { - completion: CompletionStats +export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { count: long - docs: DocStats - fielddata: FielddataStats - query_cache: QueryCacheStats - segments: SegmentsStats - shards: ClusterIndicesShardsStats - store: StoreStats - mappings: FieldTypesMappings - analysis: CharFilterTypes - versions?: Array + min_length: integer + max_length: integer + avg_length: double + entropy: double + distribution?: Record } -export interface ClusterInfo { - nodes: Record - shard_sizes: Record - shard_paths: Record - reserved_sizes: Array +export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { + show_distribution?: boolean } -export interface ClusterIngestStats { - number_of_pipelines: integer - processor_stats: Record +export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { } -export interface ClusterJvm { - max_uptime_in_millis: long - mem: ClusterJvmMemory - threads: long - versions: Array +export interface AggregationsSumBucketAggregation extends AggregationsPipelineAggregationBase { } -export interface ClusterJvmMemory { - heap_max_in_bytes: long - heap_used_in_bytes: long +export interface AggregationsTDigest { + compression?: integer } -export interface ClusterJvmVersion { - bundled_jdk: boolean - count: integer - using_bundled_jdk: boolean - version: VersionString - vm_name: string - vm_vendor: string - vm_version: VersionString +export interface AggregationsTDigestPercentilesAggregate extends AggregationsAggregateBase { + values: Record } -export interface ClusterNetworkTypes { - http_types: Record - transport_types: Record +export interface AggregationsTTestAggregation extends AggregationsAggregation { + a?: AggregationsTestPopulation + b?: AggregationsTestPopulation + type?: AggregationsTTestType } -export interface ClusterNodeCount { - coordinating_only: integer - data: integer - ingest: integer - master: integer - total: integer - voting_only: integer - data_cold: integer - data_frozen?: integer - data_content: integer - data_warm: integer - data_hot: integer - ml: integer - remote_cluster_client: integer - transform: integer +export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' + +export interface AggregationsTermsAggregate extends AggregationsMultiBucketAggregate { + doc_count_error_upper_bound: long + sum_other_doc_count: long } -export interface ClusterNodesStats { - count: ClusterNodeCount - discovery_types: Record - fs: ClusterFileSystem - ingest: ClusterIngestStats - jvm: ClusterJvm - network_types: ClusterNetworkTypes - os: ClusterOperatingSystemStats - packaging_types: Array - plugins: Array - process: ClusterProcess - versions: Array -} - -export interface ClusterOperatingSystemArchitecture { - count: integer - arch: string +export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { + collect_mode?: AggregationsTermsAggregationCollectMode + exclude?: string | string[] + execution_hint?: AggregationsTermsAggregationExecutionHint + field?: Field + include?: string | string[] | AggregationsTermsInclude + min_doc_count?: integer + missing?: AggregationsMissing + missing_bucket?: boolean + value_type?: string + order?: AggregationsTermsAggregationOrder + script?: Script + shard_size?: integer + show_term_doc_count_error?: boolean + size?: integer } -export interface ClusterOperatingSystemName { - count: integer - name: string +export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' + +export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' + +export type AggregationsTermsAggregationOrder = SearchTypesSortOrder | Record | Record[] + +export interface AggregationsTermsInclude { + num_partitions: long + partition: long } -export interface ClusterOperatingSystemPrettyName { - count: integer - pretty_name: string +export interface AggregationsTestPopulation { + field: Field + script?: Script + filter?: QueryDslQueryContainer } -export interface ClusterOperatingSystemStats { - allocated_processors: integer - available_processors: integer - mem: OperatingSystemMemoryInfo - names: Array - pretty_names: Array - architectures?: Array +export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { + hits: SearchTypesHitsMetadata> } -export interface ClusterPendingTasksRequest extends RequestBase { - local?: boolean - master_timeout?: Time +export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { + docvalue_fields?: Fields + explain?: boolean + from?: integer + highlight?: SearchTypesHighlight + script_fields?: Record + size?: integer + sort?: SearchTypesSort + _source?: boolean | SearchTypesSourceFilter | Fields + stored_fields?: Fields + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean } -export interface ClusterPendingTasksResponse extends ResponseBase { - tasks: Array +export interface AggregationsTopMetrics { + sort: (long | double | string)[] + metrics: Record } -export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { - node_names?: Names - node_ids?: Ids - timeout?: Time - wait_for_removal?: boolean +export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { + top: AggregationsTopMetrics[] } -export interface ClusterPostVotingConfigExclusionsResponse extends ResponseBase { - stub: integer +export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { + metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] + size?: integer + sort?: SearchTypesSort } -export interface ClusterProcess { - cpu: ClusterProcessCpu - open_file_descriptors: ClusterProcessOpenFileDescriptors +export interface AggregationsTopMetricsValue { + field: Field } -export interface ClusterProcessCpu { - percent: integer +export interface AggregationsValueAggregate extends AggregationsAggregateBase { + value: double + value_as_string?: string } -export interface ClusterProcessOpenFileDescriptors { - avg: long - max: long - min: long +export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { } -export interface ClusterProcessorStats { - count: long - current: long - failed: long - time_in_millis: long +export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' + +export interface AggregationsVariableWidthHistogramAggregation { + field?: Field + buckets?: integer + shard_size?: integer + initial_buffer?: integer } -export interface ClusterPutComponentTemplateRequest extends RequestBase { - stub_a: string - stub_b: string - body: { - stub_c: string - } +export interface AggregationsWeightedAverageAggregation extends AggregationsAggregation { + format?: string + value?: AggregationsWeightedAverageValue + value_type?: AggregationsValueType + weight?: AggregationsWeightedAverageValue } -export interface ClusterPutComponentTemplateResponse extends ResponseBase { - stub: integer +export interface AggregationsWeightedAverageValue { + field?: Field + missing?: double + script?: Script } -export interface ClusterPutSettingsRequest extends RequestBase { - flat_settings?: boolean - master_timeout?: Time - timeout?: Time - body: { - persistent?: Record - transient?: Record - } +export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { + preserve_original: boolean } -export interface ClusterPutSettingsResponse extends ResponseBase { - acknowledged: boolean - persistent: Record - transient: Record +export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceTokenFilter + +export interface AnalysisCharFilterBase { + type: string + version?: VersionString +} + +export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { + tokenize_on_chars: string[] +} + +export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { + common_words: string[] + common_words_path: string + ignore_case: boolean + query_mode: boolean +} + +export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { + hyphenation_patterns_path: string + max_subword_size: integer + min_subword_size: integer + min_word_size: integer + only_longest_match: boolean + word_list: string[] + word_list_path: string } -export interface ClusterRerouteCommand { - cancel: ClusterRerouteCommandAction +export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { + filter: string[] + script: Script } -export interface ClusterRerouteCommandAction { - index: IndexName - shard: integer - node: string +export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' + +export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { + delimiter: string + encoding: AnalysisDelimitedPayloadEncoding } -export interface ClusterRerouteDecision { - decider: string - decision: string - explanation: string +export type AnalysisEdgeNGramSide = 'front' | 'back' + +export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { + max_gram: integer + min_gram: integer + side: AnalysisEdgeNGramSide } -export interface ClusterRerouteExplanation { - command: string - decisions: Array - parameters: ClusterRerouteParameters +export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { + custom_token_chars: string + max_gram: integer + min_gram: integer + token_chars: AnalysisTokenChar[] } -export interface ClusterRerouteParameters { - allow_primary: boolean - from_node: string - index: string - node: string - shard: integer - to_node: string +export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { + articles: string[] + articles_case: boolean } -export interface ClusterRerouteRequest extends RequestBase { - dry_run?: boolean - explain?: boolean - master_timeout?: Time - metric?: Metrics - retry_failed?: boolean - timeout?: Time - body?: { - commands?: Array - } +export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { + max_output_size: integer + separator: string } -export interface ClusterRerouteResponse extends ResponseBase { - acknowledged: boolean - explanations: Array - state: Array +export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { } -export interface ClusterShardMetrics { - avg: double - max: double - min: double +export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { + dedup: boolean + dictionary: string + locale: string + longest_only: boolean } -export interface ClusterStateBlockIndex { - description: string - retryable: boolean - levels: Array - aliases?: Array - aliases_version?: VersionNumber - version?: VersionNumber - mapping_version?: VersionNumber - settings_version?: VersionNumber - routing_num_shards?: VersionNumber - state?: string +export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { } -export interface ClusterStateBlocks { - indices?: Record> +export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { } -export interface ClusterStateMetadata { - cluster_uuid: Uuid - cluster_uuid_committed: boolean - templates: ClusterStateMetadataTemplate - indices?: Record> - 'index-graveyard': ClusterStateMetadataIndexGraveyard - cluster_coordination: ClusterStateMetadataClusterCoordination +export type AnalysisKeepTypesMode = 'include' | 'exclude' + +export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { + mode: AnalysisKeepTypesMode + types: string[] } -export interface ClusterStateMetadataClusterCoordination { - term: integer - last_committed_config: Array - last_accepted_config: Array - voting_config_exclusions: Array +export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { + keep_words: string[] + keep_words_case: boolean + keep_words_path: string } -export interface ClusterStateMetadataIndexGraveyard { - tombstones: Array +export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { + ignore_case: boolean + keywords: string[] + keywords_path: string + keywords_pattern: string } -export interface ClusterStateMetadataTemplate { +export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { + buffer_size: integer } -export interface ClusterStateRequest extends RequestBase { - metric?: Metrics - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - local?: boolean - master_timeout?: Time - wait_for_metadata_version?: VersionNumber - wait_for_timeout?: Time +export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { + max: integer + min: integer } -export interface ClusterStateResponse extends ResponseBase { - cluster_name: string - cluster_uuid: Uuid - master_node?: string - state?: Array - state_uuid?: Uuid - version?: VersionNumber - blocks?: ClusterStateBlocks - metadata?: ClusterStateMetadata +export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { } -export interface ClusterStatistics { - skipped: integer - successful: integer - total: integer +export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { + consume_all_tokens: boolean + max_token_count: integer } -export interface ClusterStatsRequest extends RequestBase { - node_id?: NodeIds - flat_settings?: boolean - timeout?: Time +export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { + language: string } -export interface ClusterStatsResponse extends NodesResponseBase { - cluster_name: Name - cluster_uuid: Uuid - indices: ClusterIndicesStats - nodes: ClusterNodesStats - status: ClusterStatus - timestamp: long - _nodes: NodeStatistics +export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { } -export type ClusterStatus = 'green' | 'yellow' | 'red' +export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { + mappings: string[] + mappings_path: string +} -export interface Collector { - name: string - reason: string - time_in_nanos: long - children?: Array +export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { + filters: string[] + preserve_original: boolean } -export interface CommonGramsTokenFilter extends TokenFilterBase { - common_words: Array - common_words_path: string - ignore_case: boolean - query_mode: boolean +export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { + max_gram: integer + min_gram: integer } -export interface CommonTermsQuery extends QueryBase { - analyzer?: string - cutoff_frequency?: double - high_freq_operator?: Operator - low_freq_operator?: Operator - minimum_should_match?: MinimumShouldMatch - query?: string +export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { + custom_token_chars: string + max_gram: integer + min_gram: integer + token_chars: AnalysisTokenChar[] } -export interface CompactNodeInfo { - name: string +export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' + +export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { + stoptags: string[] } -export interface CompareCondition { - comparison: string - path: string - value: any +export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { + decompound_mode: AnalysisNoriDecompoundMode + discard_punctuation: boolean + user_dictionary: string + user_dictionary_rules: string[] } -export interface CompletionProperty extends DocValuesPropertyBase { - analyzer?: string - contexts?: Array - max_input_length?: integer - preserve_position_increments?: boolean - preserve_separators?: boolean - search_analyzer?: string - type: 'completion' +export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { + buffer_size: integer + delimiter: string + replacement: string + reverse: boolean + skip: integer } -export interface CompletionStats { - size_in_bytes: long - fields?: Record +export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { + patterns: string[] + preserve_original: boolean } -export interface CompletionSuggestOption { - collate_match?: boolean - contexts?: Record> - fields?: Record - _id: string - _index: IndexName - _type?: Type - _routing?: Routing - _score: double - _source: TDocument - text: string +export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { + flags: string + pattern: string + replacement: string } -export interface CompletionSuggester extends SuggesterBase { - contexts?: Record | GeoLocation | Array> - fuzzy?: SuggestFuzziness - prefix?: string - regex?: string - skip_duplicates?: boolean +export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { } -export interface CompositeAggregation extends BucketAggregationBase { - after?: Record - size?: integer - sources?: Array> +export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { + script: Script } -export interface CompositeAggregationSource { - terms?: TermsAggregation - histogram?: HistogramAggregation - date_histogram?: DateHistogramAggregation - geotile_grid?: GeoTileGridAggregation +export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { } -export interface CompositeBucketKeys { +export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { } -export type CompositeBucket = CompositeBucketKeys | - { [property: string]: Aggregate } -export interface CompositeBucketAggregate extends MultiBucketAggregate> { - after_key: Record +export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { + filler_token: string + max_shingle_size: integer + min_shingle_size: integer + output_unigrams: boolean + output_unigrams_if_no_shingles: boolean + token_separator: string } -export interface CompoundWordTokenFilterBase extends TokenFilterBase { - hyphenation_patterns_path: string - max_subword_size: integer - min_subword_size: integer - min_word_size: integer - only_longest_match: boolean - word_list: Array - word_list_path: string +export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' + +export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { + language: AnalysisSnowballLanguage } -export interface ConditionContainer { - always?: AlwaysCondition - array_compare?: ArrayCompareCondition - compare?: CompareCondition - never?: NeverCondition - script?: ScriptCondition +export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { + max_token_length: integer } -export type ConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' +export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { + rules: string[] + rules_path: string +} -export interface ConditionTokenFilter extends TokenFilterBase { - filter: Array - script: Script +export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { + language: string } -export type ConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' +export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { + ignore_case?: boolean + remove_trailing?: boolean + stopwords: AnalysisStopWords + stopwords_path?: string +} -export type Conflicts = 'abort' | 'proceed' +export type AnalysisStopWords = string | string[] -export type ConnectionScheme = 'http' | 'https' +export type AnalysisSynonymFormat = 'solr' | 'wordnet' -export interface ConstantKeywordProperty extends PropertyBase { - value?: any - type: 'constant_keyword' +export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { + expand: boolean + format: AnalysisSynonymFormat + lenient: boolean + synonyms: string[] + synonyms_path: string + tokenizer: string + updateable: boolean } -export interface ConstantScoreQuery extends QueryBase { - filter?: QueryContainer - boost?: float +export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { + expand: boolean + format: AnalysisSynonymFormat + lenient: boolean + synonyms: string[] + synonyms_path: string + tokenizer: string + updateable: boolean } -export type Context = string | GeoLocation +export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' -export interface ConvertProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - target_field: Field - type: ConvertProcessorType +export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter + +export interface AnalysisTokenFilterBase { + type: string + version?: VersionString } -export type ConvertProcessorType = 'integer' | 'long' | 'float' | 'double' | 'string' | 'boolean' | 'auto' +export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer -export interface CoordinatorStats { - executed_searches_total: long - node_id: string - queue_size: integer - remote_requests_current: integer - remote_requests_total: long +export interface AnalysisTokenizerBase { + type: string + version?: VersionString } -export type CoreProperty = ObjectProperty | NestedProperty | SearchAsYouTypeProperty | TextProperty | DocValuesProperty +export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { +} -export interface CorePropertyBase extends PropertyBase { - copy_to?: Fields - similarity?: string - store?: boolean +export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { + length: integer } -export interface CountRequest extends RequestBase { - index?: Indices - type?: Types - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: DefaultOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - min_score?: double - preference?: string - query_on_query_string?: string - routing?: Routing - terminate_after?: long - q?: string - body?: { - query?: QueryContainer - } +export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { + max_token_length: integer } -export interface CountResponse extends ResponseBase { - count: long - _shards: ShardStatistics +export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { + only_on_same_position: boolean } -export interface CreateApiKeyRequest extends RequestBase { - refresh?: Refresh - body: { - expiration?: Time - name?: string - role_descriptors?: Record - } +export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { } -export interface CreateApiKeyResponse extends ResponseBase { - api_key: string - expiration?: long - id: Id - name: string +export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { + max_token_length: integer } -export interface CreateFollowIndexRequest extends RequestBase { - index: IndexName - wait_for_active_shards?: WaitForActiveShards - body: { - leader_index?: IndexName - max_outstanding_read_requests?: long - max_outstanding_write_requests?: long - max_read_request_operation_count?: long - max_read_request_size?: string - max_retry_delay?: Time - max_write_buffer_count?: long - max_write_buffer_size?: string - max_write_request_operation_count?: long - max_write_request_size?: string - read_poll_timeout?: Time - remote_cluster?: string - } +export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { + adjust_offsets: boolean + catenate_all: boolean + catenate_numbers: boolean + catenate_words: boolean + generate_number_parts: boolean + generate_word_parts: boolean + preserve_original: boolean + protected_words: string[] + protected_words_path: string + split_on_case_change: boolean + split_on_numerics: boolean + stem_english_possessive: boolean + type_table: string[] + type_table_path: string } -export interface CreateFollowIndexResponse extends ResponseBase { - follow_index_created: boolean - follow_index_shards_acked: boolean - index_following_started: boolean +export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { + catenate_all: boolean + catenate_numbers: boolean + catenate_words: boolean + generate_number_parts: boolean + generate_word_parts: boolean + preserve_original: boolean + protected_words: string[] + protected_words_path: string + split_on_case_change: boolean + split_on_numerics: boolean + stem_english_possessive: boolean + type_table: string[] + type_table_path: string } -export interface CreateIndexRequest extends RequestBase { - index: IndexName - include_type_name?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards - body?: { - aliases?: Record - mappings?: Record | TypeMapping - settings?: Record - } +export interface MappingAllField { + analyzer: string + enabled: boolean + omit_norms: boolean + search_analyzer: string + similarity: string + store: boolean + store_term_vector_offsets: boolean + store_term_vector_payloads: boolean + store_term_vector_positions: boolean + store_term_vectors: boolean } -export interface CreateIndexResponse extends AcknowledgedResponseBase { - index: string - shards_acknowledged: boolean +export interface MappingBinaryProperty extends MappingDocValuesPropertyBase { + type: 'binary' } -export interface CreateRepositoryRequest extends RequestBase { - repository: Name - master_timeout?: Time - timeout?: Time - verify?: boolean - body: { - repository?: SnapshotRepository - type: string - settings: SnapshotRepositorySettings - } +export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { + boost?: double + fielddata?: IndicesNumericFielddata + index?: boolean + null_value?: boolean + type: 'boolean' } -export interface CreateRepositoryResponse extends AcknowledgedResponseBase { +export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { + analyzer?: string + contexts?: MappingSuggestContext[] + max_input_length?: integer + preserve_position_increments?: boolean + preserve_separators?: boolean + search_analyzer?: string + type: 'completion' } -export interface CreateRequest extends RequestBase { - id: Id - index: IndexName - type?: Type - pipeline?: string - refresh?: Refresh - routing?: Routing - timeout?: Time - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards - body: TDocument +export interface MappingConstantKeywordProperty extends MappingPropertyBase { + value?: any + type: 'constant_keyword' } -export interface CreateResponse extends WriteResponseBase { +export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty + +export interface MappingCorePropertyBase extends MappingPropertyBase { + copy_to?: Fields + similarity?: string + store?: boolean } -export interface CreateRollupJobRequest extends RequestBase { - id: Id - body: { - cron?: string - groups?: RollupGroupings - index_pattern?: string - metrics?: Array - page_size?: long - rollup_index?: IndexName - } +export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { + boost?: double + format?: string + ignore_malformed?: boolean + index?: boolean + null_value?: DateString + precision_step?: integer + type: 'date_nanos' } -export interface CreateRollupJobResponse extends AcknowledgedResponseBase { +export interface MappingDateProperty extends MappingDocValuesPropertyBase { + boost?: double + fielddata?: IndicesNumericFielddata + format?: string + ignore_malformed?: boolean + index?: boolean + null_value?: DateString + precision_step?: integer + type: 'date' } -export interface CronExpression extends ScheduleBase { +export interface MappingDateRangeProperty extends MappingRangePropertyBase { + format?: string + type: 'date_range' } -export interface CsvProcessor extends ProcessorBase { - empty_value: any - description?: string - field: Field - ignore_missing?: boolean - quote?: string - separator?: string - target_fields: Fields - trim: boolean +export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingGenericProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty + +export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { + doc_values?: boolean } -export interface CumulativeCardinalityAggregation extends PipelineAggregationBase { +export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { + type: 'double_range' } -export interface CumulativeSumAggregation extends PipelineAggregationBase { +export type MappingDynamicMapping = 'strict' | 'runtime' | 'true' | 'false' + +export interface MappingDynamicTemplate { + mapping?: MappingPropertyBase + match?: string + match_mapping_type?: string + match_pattern?: MappingMatchType + path_match?: string + path_unmatch?: string + unmatch?: string } -export interface CurrentNode { - id: string - name: string - attributes: Record - transport_address: string - weight_ranking: integer +export interface MappingFieldAliasProperty extends MappingPropertyBase { + path?: Field + type: 'alias' } -export interface CustomSettings { - custom_urls?: Array - created_by?: string - job_tags?: Record +export interface MappingFieldMapping { } -export interface DailySchedule { - at: Array | TimeOfDay +export interface MappingFieldNamesField { + enabled: boolean } -export interface DataCounts { - bucket_count: long - earliest_record_timestamp?: long - empty_bucket_count: long - input_bytes: long - input_field_count: long - input_record_count: long - invalid_date_count: long - job_id: Id - last_data_time?: long - latest_empty_bucket_timestamp?: long - latest_record_timestamp?: long - latest_sparse_bucket_timestamp?: long - latest_bucket_timestamp?: long - missing_field_count: long - out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long - sparse_bucket_count: long +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' + +export interface MappingFlattenedProperty extends MappingPropertyBase { + boost?: double + depth_limit?: integer + doc_values?: boolean + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + null_value?: string + similarity?: string + split_queries_on_whitespace?: boolean + type: 'flattened' } -export interface DataDescription { - format?: string - time_field: Field - time_format?: string - field_delimiter?: string +export interface MappingFloatRangeProperty extends MappingRangePropertyBase { + type: 'float_range' } -export interface DataPathStats { - available: string - available_in_bytes: long - disk_queue: string - disk_reads: long - disk_read_size: string - disk_read_size_in_bytes: long - disk_writes: long - disk_write_size: string - disk_write_size_in_bytes: long - free: string - free_in_bytes: long - mount: string - path: string - total: string - total_in_bytes: long +export interface MappingGenericProperty extends MappingDocValuesPropertyBase { + analyzer: string + boost: double + fielddata: IndicesStringFielddata + ignore_malformed: boolean + index: boolean + index_options: MappingIndexOptions + norms: boolean + null_value: string + position_increment_gap: integer + search_analyzer: string + term_vector: MappingTermVectorOption type: string } -export type DataStreamHealthStatus = 'GREEN' | 'green' | 'YELLOW' | 'yellow' | 'RED' | 'red' - -export type DataStreamName = string +export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'COUNTERCLOCKWISE' | 'ccw' | 'CCW' | 'left' | 'LEFT' | 'clockwise' | 'CLOCKWISE' | 'cw' | 'CW' -export interface DataStreamsStatsItem { - backing_indices: integer - data_stream: Name - store_size?: ByteSize - store_size_bytes: integer - maximum_timestamp: integer +export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { + ignore_malformed?: boolean + ignore_z_value?: boolean + null_value?: QueryDslGeoLocation + type: 'geo_point' } -export interface DataStreamsUsage extends XPackUsage { - data_streams: long - indices_count: long +export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + orientation?: MappingGeoOrientation + strategy?: MappingGeoStrategy + type: 'geo_shape' } -export interface DataTierPhaseCountUsage { - node_count: long - index_count: long - total_shard_count: long - primary_shard_count: long - doc_count: long - total_size_bytes: long - primary_size_bytes: long - primary_shard_size_avg_bytes: long - primary_shard_size_median_bytes: long - primary_shard_size_mad_bytes: long -} +export type MappingGeoStrategy = 'recursive' | 'term' -export interface DataTiersUsage extends XPackUsage { - data_warm: DataTierPhaseCountUsage - data_frozen?: DataTierPhaseCountUsage - data_cold: DataTierPhaseCountUsage - data_content: DataTierPhaseCountUsage - data_hot: DataTierPhaseCountUsage +export interface MappingHistogramProperty extends MappingPropertyBase { + ignore_malformed?: boolean + type: 'histogram' } -export interface Datafeed { - aggregations?: Record - aggs?: Record - chunking_config?: ChunkingConfig - datafeed_id: Id - frequency?: Timestamp - indices: Indices - indexes?: Array - job_id: Id - max_empty_searches?: integer - query: QueryContainer - query_delay?: Timestamp - script_fields?: Record - scroll_size?: integer - delayed_data_check_config: DelayedDataCheckConfig - runtime_mappings?: RuntimeFields - indices_options?: DatafeedIndicesOptions +export interface MappingIndexField { + enabled: boolean } -export interface DatafeedCount { - count: long -} +export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' -export interface DatafeedIndicesOptions { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - ignore_throttled?: boolean +export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { + type: 'integer_range' } -export type DatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' - -export interface DatafeedStats { - assignment_explanation?: string - datafeed_id: Id - node?: DiscoveryNode - state: DatafeedState - timing_stats: DatafeedTimingStats +export interface MappingIpProperty extends MappingDocValuesPropertyBase { + boost?: double + index?: boolean + null_value?: string + type: 'ip' } -export interface DatafeedTimingStats { - bucket_count: long - exponential_average_search_time_per_hour_ms: double - job_id: string - search_count: long - total_search_time_ms: double +export interface MappingIpRangeProperty extends MappingRangePropertyBase { + type: 'ip_range' } -export interface Datafeeds { - scroll_size: integer +export interface MappingJoinProperty extends MappingPropertyBase { + relations?: Record + type: 'join' } -export interface DateDecayFunctionKeys extends DecayFunctionBase { +export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { + boost?: double + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + normalizer?: string + norms?: boolean + null_value?: string + split_queries_on_whitespace?: boolean + type: 'keyword' } -export type DateDecayFunction = DateDecayFunctionKeys | - { [property: string]: DecayPlacement } -export interface DateField { - field: Field - format?: string - include_unmapped?: boolean +export interface MappingLongRangeProperty extends MappingRangePropertyBase { + type: 'long_range' } -export interface DateHistogramAggregation extends BucketAggregationBase { - calendar_interval?: DateInterval | Time - extended_bounds?: ExtendedBounds - hard_bounds?: ExtendedBounds - field?: Field - fixed_interval?: DateInterval | Time - format?: string - interval?: DateInterval | Time - min_doc_count?: integer - missing?: DateString - offset?: Time - order?: HistogramOrder - params?: Record - script?: Script - time_zone?: string -} +export type MappingMatchType = 'simple' | 'regex' -export interface DateHistogramBucketKeys { +export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { + type: 'murmur3' } -export type DateHistogramBucket = DateHistogramBucketKeys | - { [property: string]: Aggregate } -export interface DateHistogramRollupGrouping { - delay?: Time - field: Field - format?: string - interval?: Time - calendar_interval?: Time - fixed_interval?: Time - time_zone?: string +export interface MappingNestedProperty extends MappingCorePropertyBase { + dynamic?: boolean | MappingDynamicMapping + enabled?: boolean + properties?: Record + include_in_parent?: boolean + include_in_root?: boolean + type: 'nested' } -export interface DateIndexNameProcessor extends ProcessorBase { - date_formats: Array - date_rounding: DateRounding - field: Field - index_name_format: string - index_name_prefix: string - locale: string - timezone: string +export interface MappingNumberProperty extends MappingDocValuesPropertyBase { + boost?: double + coerce?: boolean + fielddata?: IndicesNumericFielddata + ignore_malformed?: boolean + index?: boolean + null_value?: double + scaling_factor?: double + type: MappingNumberType } -export type DateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' +export type MappingNumberType = 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer' | 'long' | 'short' | 'byte' | 'unsigned_long' -export type DateMath = string +export interface MappingObjectProperty extends MappingCorePropertyBase { + dynamic?: boolean | MappingDynamicMapping + enabled?: boolean + properties?: Record + type?: 'object' +} -export type DateMathTime = string +export interface MappingPercolatorProperty extends MappingPropertyBase { + type: 'percolator' +} -export interface DateNanosProperty extends DocValuesPropertyBase { - boost?: double - format?: string +export interface MappingPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean - index?: boolean - null_value?: DateString - precision_step?: integer - type: 'date_nanos' + ignore_z_value?: boolean + null_value?: string + type: 'point' } -export interface DateProcessor extends ProcessorBase { - field: Field - formats: Array - locale?: string - target_field: Field - timezone: string +export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingCoreProperty + +export interface MappingPropertyBase { + local_metadata?: Metadata + meta?: Record + name?: PropertyName + properties?: Record + ignore_above?: integer + dynamic?: boolean | MappingDynamicMapping + fields?: Record } -export interface DateProperty extends DocValuesPropertyBase { +export type MappingRangeProperty = MappingLongRangeProperty | MappingIpRangeProperty | MappingIntegerRangeProperty | MappingFloatRangeProperty | MappingDoubleRangeProperty | MappingDateRangeProperty + +export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { boost?: double - fielddata?: NumericFielddata - format?: string - ignore_malformed?: boolean + coerce?: boolean index?: boolean - null_value?: DateString - precision_step?: integer - type: 'date' } -export interface DateRangeAggregation extends BucketAggregationBase { - field?: Field - format?: string - missing?: Missing - ranges?: Array - time_zone?: string +export interface MappingRankFeatureProperty extends MappingPropertyBase { + positive_score_impact?: boolean + type: 'rank_feature' } -export interface DateRangeExpression { - from?: DateMath | float - from_as_string?: string - to_as_string?: string - key?: string - to?: DateMath | float - doc_count?: long +export interface MappingRankFeaturesProperty extends MappingPropertyBase { + type: 'rank_features' } -export interface DateRangeProperty extends RangePropertyBase { - format?: string - type: 'date_range' +export interface MappingRoutingField { + required: boolean } -export type DateRounding = 's' | 'm' | 'h' | 'd' | 'w' | 'M' | 'y' +export interface MappingRuntimeField { + format?: string + script?: Script + type: MappingRuntimeFieldType +} -export type DateString = string +export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' -export type Day = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' +export type MappingRuntimeFields = Record -export interface DeactivateWatchRequest extends RequestBase { - watch_id: Name +export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { + analyzer?: string + index?: boolean + index_options?: MappingIndexOptions + max_shingle_size?: integer + norms?: boolean + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: MappingTermVectorOption + type: 'search_as_you_type' } -export interface DeactivateWatchResponse extends ResponseBase { - status: ActivationStatus -} +export type MappingShapeOrientation = 'right' | 'counterclockwise' | 'ccw' | 'left' | 'clockwise' | 'cw' -export type DecayFunction = DateDecayFunction | NumericDecayFunction | GeoDecayFunction +export interface MappingShapeProperty extends MappingDocValuesPropertyBase { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + orientation?: MappingShapeOrientation + type: 'shape' +} -export interface DecayFunctionBase extends ScoreFunctionBase { - multi_value_mode?: MultiValueMode +export interface MappingSizeField { + enabled: boolean } -export interface DecayPlacement { - decay?: double - offset?: TScale - scale?: TScale - origin?: TOrigin +export interface MappingSourceField { + compress?: boolean + compress_threshold?: string + enabled: boolean + excludes?: string[] + includes?: string[] } -export type Decision = 'yes' | 'no' | 'worse_balance' | 'throttled' | 'awaiting_info' | 'allocation_delayed' | 'no_valid_shard_copy' | 'no_attempt' +export interface MappingSuggestContext { + name: Name + path?: Field + type: string + precision?: integer +} -export type DefaultOperator = 'AND' | 'OR' +export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' -export interface Defaults { - anomaly_detectors: AnomalyDetectors - datafeeds: Datafeeds +export interface MappingTextIndexPrefixes { + max_chars: integer + min_chars: integer } -export interface DelayedDataCheckConfig { - check_window?: Time - enabled: boolean +export interface MappingTextProperty extends MappingCorePropertyBase { + analyzer?: string + boost?: double + eager_global_ordinals?: boolean + fielddata?: boolean + fielddata_frequency_filter?: IndicesFielddataFrequencyFilter + index?: boolean + index_options?: MappingIndexOptions + index_phrases?: boolean + index_prefixes?: MappingTextIndexPrefixes + norms?: boolean + position_increment_gap?: integer + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: MappingTermVectorOption + type: 'text' } -export interface DeleteAliasRequest extends RequestBase { - index: Indices - name: Names - master_timeout?: Time - timeout?: Time +export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { + analyzer?: string + boost?: double + index?: boolean + null_value?: double + enable_position_increments?: boolean + type: 'token_count' } -export interface DeleteAliasResponse extends ResponseBase { +export interface MappingTypeMapping { + all_field?: MappingAllField + date_detection?: boolean + dynamic?: boolean | MappingDynamicMapping + dynamic_date_formats?: string[] + dynamic_templates?: Record | Record[] + _field_names?: MappingFieldNamesField + index_field?: MappingIndexField + _meta?: Metadata + numeric_detection?: boolean + properties?: Record + _routing?: MappingRoutingField + _size?: MappingSizeField + _source?: MappingSourceField + runtime?: Record } -export interface DeleteAutoFollowPatternRequest extends RequestBase { - name: Name +export interface MappingVersionProperty extends MappingDocValuesPropertyBase { + type: 'version' } -export interface DeleteAutoFollowPatternResponse extends AcknowledgedResponseBase { +export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { + type: 'wildcard' } -export interface DeleteAutoscalingPolicyRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface QueryDslBoolQuery extends QueryDslQueryBase { + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + minimum_should_match?: MinimumShouldMatch + must?: QueryDslQueryContainer | QueryDslQueryContainer[] + must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] + should?: QueryDslQueryContainer | QueryDslQueryContainer[] } -export interface DeleteAutoscalingPolicyResponse extends ResponseBase { - stub: integer +export interface QueryDslBoostingQuery extends QueryDslQueryBase { + negative_boost?: double + negative?: QueryDslQueryContainer + positive?: QueryDslQueryContainer } -export interface DeleteByQueryRequest extends RequestBase { - index: Indices - type?: Types - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - conflicts?: Conflicts - default_operator?: DefaultOperator - df?: string - expand_wildcards?: ExpandWildcards - from?: long - ignore_unavailable?: boolean - lenient?: boolean - preference?: string - query_on_query_string?: string - refresh?: boolean - request_cache?: boolean - requests_per_second?: long - routing?: Routing - q?: string - scroll?: Time - scroll_size?: long - search_timeout?: Time - search_type?: SearchType - size?: long - slices?: long - sort?: Array - source_enabled?: boolean - source_excludes?: Fields - source_includes?: Fields - stats?: Array - terminate_after?: long - timeout?: Time - version?: boolean - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - body: { - max_docs?: long - query?: QueryContainer - slice?: SlicedScroll - } +export interface QueryDslBoundingBox { + bottom_right?: QueryDslGeoLocation + top_left?: QueryDslGeoLocation + wkt?: string } -export interface DeleteByQueryResponse extends ResponseBase { - batches?: long - deleted?: long - failures?: Array - noops?: long - requests_per_second?: float - retries?: Retries - slice_id?: integer - task?: TaskId - throttled_millis?: long - throttled_until_millis?: long - timed_out?: boolean - took?: long - total?: long - version_conflicts?: long -} +export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' -export interface DeleteByQueryRethrottleRequest extends RequestBase { - task_id: Id - requests_per_second?: long +export interface QueryDslCombinedFieldsQuery { + query: string + fields: Field[] + operator?: string } -export interface DeleteByQueryRethrottleResponse extends ListTasksResponse { +export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { + analyzer?: string + cutoff_frequency?: double + high_freq_operator?: QueryDslOperator + low_freq_operator?: QueryDslOperator + minimum_should_match?: MinimumShouldMatch + query?: string } -export interface DeleteCalendarEventRequest extends RequestBase { - calendar_id: Id - event_id: Id +export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { + filter?: QueryDslQueryContainer + boost?: float } -export interface DeleteCalendarEventResponse extends AcknowledgedResponseBase { +export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } +export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys | + { [property: string]: QueryDslDecayPlacement } -export interface DeleteCalendarJobRequest extends RequestBase { - calendar_id: Id - job_id: Id -} +export type QueryDslDecayFunction = QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction -export interface DeleteCalendarJobResponse extends ResponseBase { - calendar_id: Id - description?: string - job_ids: Ids +export interface QueryDslDecayFunctionBase extends QueryDslScoreFunctionBase { + multi_value_mode?: QueryDslMultiValueMode } -export interface DeleteCalendarRequest extends RequestBase { - calendar_id: Id +export interface QueryDslDecayPlacement { + decay?: double + offset?: TScale + scale?: TScale + origin?: TOrigin } -export interface DeleteCalendarResponse extends AcknowledgedResponseBase { +export interface QueryDslDisMaxQuery extends QueryDslQueryBase { + queries?: QueryDslQueryContainer[] + tie_breaker?: double + boost?: float } -export interface DeleteDanglingIndexRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface QueryDslDistanceFeatureQuery extends QueryDslQueryBase { + origin?: number[] | QueryDslGeoCoordinate | DateMath + pivot?: Distance | Time + field?: Field } -export interface DeleteDanglingIndexResponse extends ResponseBase { - stub: integer +export interface QueryDslExistsQuery extends QueryDslQueryBase { + field?: Field } -export interface DeleteDataFrameAnalyticsRequest extends RequestBase { - id: Id - force?: boolean - timeout?: Time +export interface QueryDslFieldLookup { + id?: Id + index?: IndexName + path?: Field + routing?: Routing } -export interface DeleteDataFrameAnalyticsResponse extends AcknowledgedResponseBase { +export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' + +export interface QueryDslFieldValueFactorScoreFunction extends QueryDslScoreFunctionBase { + field: Field + factor?: double + missing?: double + modifier?: QueryDslFieldValueFactorModifier } -export interface DeleteDatafeedRequest extends RequestBase { - datafeed_id: Id - force?: boolean +export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' + +export interface QueryDslFunctionScoreContainer { + exp?: QueryDslDecayFunction + gauss?: QueryDslDecayFunction + linear?: QueryDslDecayFunction + field_value_factor?: QueryDslFieldValueFactorScoreFunction + random_score?: QueryDslRandomScoreFunction + script_score?: QueryDslScriptScoreFunction + filter?: QueryDslQueryContainer + weight?: double } -export interface DeleteDatafeedResponse extends AcknowledgedResponseBase { +export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' + +export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { + boost_mode?: QueryDslFunctionBoostMode + functions?: QueryDslFunctionScoreContainer[] + max_boost?: double + min_score?: double + query?: QueryDslQueryContainer + score_mode?: QueryDslFunctionScoreMode + boost?: float } -export interface DeleteEnrichPolicyRequest extends RequestBase { - name: Name +export interface QueryDslFuzzyQuery extends QueryDslQueryBase { + max_expansions?: integer + prefix_length?: integer + rewrite?: MultiTermQueryRewrite + transpositions?: boolean + fuzziness?: Fuzziness + value: any } -export interface DeleteEnrichPolicyResponse extends AcknowledgedResponseBase { +export interface QueryDslGeoBoundingBoxQuery extends QueryDslQueryBase { + bounding_box?: QueryDslBoundingBox + type?: QueryDslGeoExecution + validation_method?: QueryDslGeoValidationMethod + top_left?: LatLon + bottom_right?: LatLon } -export interface DeleteExpiredDataRequest extends RequestBase { - name?: Name - requests_per_second?: float - timeout?: Time - body?: { - requests_per_second?: float - timeout?: Time - } +export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint + +export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } +export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys | + { [property: string]: QueryDslDecayPlacement } -export interface DeleteExpiredDataResponse extends ResponseBase { - deleted: boolean +export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { + distance?: Distance + distance_type?: GeoDistanceType + validation_method?: QueryDslGeoValidationMethod } +export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys | + { [property: string]: QueryDslGeoLocation } -export interface DeleteFilterRequest extends RequestBase { - filter_id: Id +export type QueryDslGeoExecution = 'memory' | 'indexed' + +export type QueryDslGeoLocation = string | double[] | QueryDslTwoDimensionalPoint + +export interface QueryDslGeoPolygonQuery extends QueryDslQueryBase { + points?: QueryDslGeoLocation[] + validation_method?: QueryDslGeoValidationMethod } -export interface DeleteFilterResponse extends AcknowledgedResponseBase { +export interface QueryDslGeoShape { + type?: string } -export interface DeleteForecastRequest extends RequestBase { - job_id: Id - forecast_id?: Id - allow_no_forecasts?: boolean - timeout?: Time +export interface QueryDslGeoShapeQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + indexed_shape?: QueryDslFieldLookup + relation?: GeoShapeRelation + shape?: QueryDslGeoShape } -export interface DeleteForecastResponse extends AcknowledgedResponseBase { +export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' + +export interface QueryDslHasChildQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + inner_hits?: SearchTypesInnerHits + max_children?: integer + min_children?: integer + query?: QueryDslQueryContainer + score_mode?: QueryDslChildScoreMode + type?: RelationName } -export interface DeleteIndexRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time +export interface QueryDslHasParentQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + inner_hits?: SearchTypesInnerHits + parent_type?: RelationName + query?: QueryDslQueryContainer + score?: boolean } -export interface DeleteIndexResponse extends IndicesResponseBase { +export interface QueryDslIdsQuery extends QueryDslQueryBase { + values?: Id[] | long[] } -export interface DeleteIndexTemplateRequest extends RequestBase { - name: Name - master_timeout?: Time - timeout?: Time +export interface QueryDslIntervalsAllOf { + intervals?: QueryDslIntervalsContainer[] + max_gaps?: integer + ordered?: boolean + filter?: QueryDslIntervalsFilter } -export interface DeleteIndexTemplateResponse extends AcknowledgedResponseBase { +export interface QueryDslIntervalsAnyOf { + intervals?: QueryDslIntervalsContainer[] + filter?: QueryDslIntervalsFilter } -export interface DeleteJobRequest extends RequestBase { - job_id: Id - force?: boolean - wait_for_completion?: boolean +export interface QueryDslIntervalsContainer { + all_of?: QueryDslIntervalsAllOf + any_of?: QueryDslIntervalsAnyOf + fuzzy?: QueryDslIntervalsFuzzy + match?: QueryDslIntervalsMatch + prefix?: QueryDslIntervalsPrefix + wildcard?: QueryDslIntervalsWildcard } -export interface DeleteJobResponse extends AcknowledgedResponseBase { +export interface QueryDslIntervalsFilter { + after?: QueryDslIntervalsContainer + before?: QueryDslIntervalsContainer + contained_by?: QueryDslIntervalsContainer + containing?: QueryDslIntervalsContainer + not_contained_by?: QueryDslIntervalsContainer + not_containing?: QueryDslIntervalsContainer + not_overlapping?: QueryDslIntervalsContainer + overlapping?: QueryDslIntervalsContainer + script?: Script } -export interface DeleteLicenseRequest extends RequestBase { +export interface QueryDslIntervalsFuzzy { + analyzer?: string + fuzziness?: Fuzziness + prefix_length?: integer + term?: string + transpositions?: boolean + use_field?: Field } -export interface DeleteLicenseResponse extends AcknowledgedResponseBase { +export interface QueryDslIntervalsMatch { + analyzer?: string + max_gaps?: integer + ordered?: boolean + query?: string + use_field?: Field + filter?: QueryDslIntervalsFilter } -export interface DeleteLifecycleRequest extends RequestBase { - policy?: Name - policy_id: Id +export interface QueryDslIntervalsPrefix { + analyzer?: string + prefix?: string + use_field?: Field } -export interface DeleteLifecycleResponse extends AcknowledgedResponseBase { +export interface QueryDslIntervalsQuery extends QueryDslQueryBase { + all_of?: QueryDslIntervalsAllOf + any_of?: QueryDslIntervalsAnyOf + fuzzy?: QueryDslIntervalsFuzzy + match?: QueryDslIntervalsMatch + prefix?: QueryDslIntervalsPrefix + wildcard?: QueryDslIntervalsWildcard } -export interface DeleteModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id +export interface QueryDslIntervalsWildcard { + analyzer?: string + pattern?: string + use_field?: Field } -export interface DeleteModelSnapshotResponse extends AcknowledgedResponseBase { +export type QueryDslLike = string | QueryDslLikeDocument + +export interface QueryDslLikeDocument { + doc?: any + fields?: Fields + _id?: Id | number + _type?: Type + _index?: IndexName + per_field_analyzer?: Record + routing?: Routing } -export interface DeletePipelineRequest extends RequestBase { - id: Id - master_timeout?: Time - timeout?: Time +export interface QueryDslMatchAllQuery extends QueryDslQueryBase { + norm_field?: string } -export interface DeletePipelineResponse extends AcknowledgedResponseBase { +export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { + analyzer?: string + fuzziness?: Fuzziness + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + max_expansions?: integer + minimum_should_match?: MinimumShouldMatch + operator?: QueryDslOperator + prefix_length?: integer + query?: string } -export interface DeletePrivilegesRequest extends RequestBase { - application: Name - name: Name - refresh?: Refresh +export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { } -export interface DeletePrivilegesResponse extends DictionaryResponseBase> { +export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { + analyzer?: string + max_expansions?: integer + query?: string + slop?: integer + zero_terms_query?: QueryDslZeroTermsQuery } -export interface DeleteRepositoryRequest extends RequestBase { - repository: Names - master_timeout?: Time - timeout?: Time +export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { + analyzer?: string + query?: string + slop?: integer } -export interface DeleteRepositoryResponse extends AcknowledgedResponseBase { +export interface QueryDslMatchQuery extends QueryDslQueryBase { + analyzer?: string + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: double + fuzziness?: Fuzziness + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + lenient?: boolean + max_expansions?: integer + minimum_should_match?: MinimumShouldMatch + operator?: QueryDslOperator + prefix_length?: integer + query?: string | float | boolean + zero_terms_query?: QueryDslZeroTermsQuery } -export interface DeleteRequest extends RequestBase { - id: Id - index: IndexName - type?: Type - if_primary_term?: long - if_seq_no?: SequenceNumber - refresh?: Refresh +export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { + analyzer?: string + boost_terms?: double + fields?: Fields + include?: boolean + like?: QueryDslLike | QueryDslLike[] + max_doc_freq?: integer + max_query_terms?: integer + max_word_length?: integer + min_doc_freq?: integer + minimum_should_match?: MinimumShouldMatch + min_term_freq?: integer + min_word_length?: integer + per_field_analyzer?: Record routing?: Routing - timeout?: Time + stop_words?: AnalysisStopWords + unlike?: QueryDslLike | QueryDslLike[] version?: VersionNumber version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards } -export interface DeleteResponse extends WriteResponseBase { +export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { + analyzer?: string + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: double + fields?: Fields + fuzziness?: Fuzziness + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + lenient?: boolean + max_expansions?: integer + minimum_should_match?: MinimumShouldMatch + operator?: QueryDslOperator + prefix_length?: integer + query?: string + slop?: integer + tie_breaker?: double + type?: QueryDslTextQueryType + use_dis_max?: boolean + zero_terms_query?: QueryDslZeroTermsQuery } -export interface DeleteRoleMappingRequest extends RequestBase { - name: Name - refresh?: Refresh -} +export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' -export interface DeleteRoleMappingResponse extends ResponseBase { - found: boolean +export interface QueryDslNamedQueryKeys { + boost?: float + _name?: string + ignore_unmapped?: boolean } +export type QueryDslNamedQuery = QueryDslNamedQueryKeys | + { [property: string]: TQuery } -export interface DeleteRoleRequest extends RequestBase { - name: Name - refresh?: Refresh +export interface QueryDslNestedQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + inner_hits?: SearchTypesInnerHits + path?: Field + query?: QueryDslQueryContainer + score_mode?: QueryDslNestedScoreMode } -export interface DeleteRoleResponse extends ResponseBase { - found: boolean -} +export type QueryDslNestedScoreMode = 'avg' | 'sum' | 'min' | 'max' | 'none' -export interface DeleteRollupJobRequest extends RequestBase { - id: Id +export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } +export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys | + { [property: string]: QueryDslDecayPlacement } + +export type QueryDslOperator = 'and' | 'or' | 'AND' | 'OR' -export interface DeleteRollupJobResponse extends AcknowledgedResponseBase { - task_failures?: Array +export interface QueryDslParentIdQuery extends QueryDslQueryBase { + id?: Id + ignore_unmapped?: boolean + type?: RelationName } -export interface DeleteScriptRequest extends RequestBase { - id: Id - master_timeout?: Time - timeout?: Time +export interface QueryDslPercolateQuery extends QueryDslQueryBase { + document?: any + documents?: any[] + field?: Field + id?: Id + index?: IndexName + preference?: string + routing?: Routing + version?: VersionNumber } -export interface DeleteScriptResponse extends AcknowledgedResponseBase { +export interface QueryDslPinnedQuery extends QueryDslQueryBase { + ids?: Id[] | long[] + organic?: QueryDslQueryContainer } -export interface DeleteSnapshotLifecycleRequest extends RequestBase { - policy_id: Name +export interface QueryDslPrefixQuery extends QueryDslQueryBase { + rewrite?: MultiTermQueryRewrite + value: string } -export interface DeleteSnapshotLifecycleResponse extends AcknowledgedResponseBase { +export interface QueryDslQueryBase { + boost?: float + _name?: string } -export interface DeleteSnapshotRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Time +export interface QueryDslQueryContainer { + bool?: QueryDslBoolQuery + boosting?: QueryDslBoostingQuery + common?: Record + combined_fields?: QueryDslCombinedFieldsQuery + constant_score?: QueryDslConstantScoreQuery + dis_max?: QueryDslDisMaxQuery + distance_feature?: Record | QueryDslDistanceFeatureQuery + exists?: QueryDslExistsQuery + function_score?: QueryDslFunctionScoreQuery + fuzzy?: Record + geo_bounding_box?: QueryDslNamedQuery + geo_distance?: QueryDslGeoDistanceQuery + geo_polygon?: QueryDslNamedQuery + geo_shape?: QueryDslNamedQuery + has_child?: QueryDslHasChildQuery + has_parent?: QueryDslHasParentQuery + ids?: QueryDslIdsQuery + intervals?: QueryDslNamedQuery + match?: QueryDslNamedQuery + match_all?: QueryDslMatchAllQuery + match_bool_prefix?: QueryDslNamedQuery + match_none?: QueryDslMatchNoneQuery + match_phrase?: QueryDslNamedQuery + match_phrase_prefix?: QueryDslNamedQuery + more_like_this?: QueryDslMoreLikeThisQuery + multi_match?: QueryDslMultiMatchQuery + nested?: QueryDslNestedQuery + parent_id?: QueryDslParentIdQuery + percolate?: QueryDslPercolateQuery + pinned?: QueryDslPinnedQuery + prefix?: QueryDslNamedQuery + query_string?: QueryDslQueryStringQuery + range?: QueryDslNamedQuery + rank_feature?: QueryDslNamedQuery + regexp?: QueryDslNamedQuery + script?: QueryDslScriptQuery + script_score?: QueryDslScriptScoreQuery + shape?: QueryDslNamedQuery + simple_query_string?: QueryDslSimpleQueryStringQuery + span_containing?: QueryDslSpanContainingQuery + field_masking_span?: QueryDslSpanFieldMaskingQuery + span_first?: QueryDslSpanFirstQuery + span_multi?: QueryDslSpanMultiTermQuery + span_near?: QueryDslSpanNearQuery + span_not?: QueryDslSpanNotQuery + span_or?: QueryDslSpanOrQuery + span_term?: QueryDslNamedQuery + span_within?: QueryDslSpanWithinQuery + template?: QueryDslQueryTemplate + term?: QueryDslNamedQuery + terms?: QueryDslNamedQuery + terms_set?: QueryDslNamedQuery + wildcard?: QueryDslNamedQuery + type?: QueryDslTypeQuery +} + +export interface QueryDslQueryStringQuery extends QueryDslQueryBase { + allow_leading_wildcard?: boolean + analyzer?: string + analyze_wildcard?: boolean + auto_generate_synonyms_phrase_query?: boolean + default_field?: Field + default_operator?: QueryDslOperator + enable_position_increments?: boolean + escape?: boolean + fields?: Fields + fuzziness?: Fuzziness + fuzzy_max_expansions?: integer + fuzzy_prefix_length?: integer + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + lenient?: boolean + max_determinized_states?: integer + minimum_should_match?: MinimumShouldMatch + phrase_slop?: double + query?: string + quote_analyzer?: string + quote_field_suffix?: string + rewrite?: MultiTermQueryRewrite + tie_breaker?: double + time_zone?: string + type?: QueryDslTextQueryType } -export interface DeleteSnapshotResponse extends AcknowledgedResponseBase { +export interface QueryDslQueryTemplate { + source: string } -export interface DeleteTrainedModelAliasRequest extends RequestBase { - model_alias: Name - model_id: Id +export interface QueryDslRandomScoreFunction extends QueryDslScoreFunctionBase { + field?: Field + seed?: long | string } -export interface DeleteTrainedModelAliasResponse extends AcknowledgedResponseBase { +export interface QueryDslRangeQuery extends QueryDslQueryBase { + gt?: double | DateMath + gte?: double | DateMath + lt?: double | DateMath + lte?: double | DateMath + relation?: QueryDslRangeRelation + time_zone?: string + from?: double | DateMath + to?: double | DateMath } -export interface DeleteTrainedModelRequest extends RequestBase { - model_id: Id +export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' + +export interface QueryDslRankFeatureFunction { } -export interface DeleteTrainedModelResponse extends AcknowledgedResponseBase { +export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { + function?: QueryDslRankFeatureFunction } -export interface DeleteTransformRequest extends RequestBase { - transform_id: Name - force?: boolean +export interface QueryDslRegexpQuery extends QueryDslQueryBase { + flags?: string + max_determinized_states?: integer + value?: string } -export interface DeleteTransformResponse extends AcknowledgedResponseBase { +export interface QueryDslScoreFunctionBase { + filter?: QueryDslQueryContainer + weight?: double } -export interface DeleteUserRequest extends RequestBase { - username: Name - refresh?: Refresh +export interface QueryDslScriptQuery extends QueryDslQueryBase { + script?: Script } -export interface DeleteUserResponse extends ResponseBase { - found: boolean +export interface QueryDslScriptScoreFunction extends QueryDslScoreFunctionBase { + script: Script } -export interface DeleteWatchRequest extends RequestBase { - id: Name +export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { + query?: QueryDslQueryContainer + script?: Script } -export interface DeleteWatchResponse extends ResponseBase { - found: boolean - _id: Id - _version: VersionNumber +export interface QueryDslShapeQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + indexed_shape?: QueryDslFieldLookup + relation?: ShapeRelation + shape?: QueryDslGeoShape } -export type DelimitedPayloadEncoding = 'int' | 'float' | 'identity' +export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' -export interface DelimitedPayloadTokenFilter extends TokenFilterBase { - delimiter: string - encoding: DelimitedPayloadEncoding +export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { + analyzer?: string + analyze_wildcard?: boolean + auto_generate_synonyms_phrase_query?: boolean + default_operator?: QueryDslOperator + fields?: Fields + flags?: QueryDslSimpleQueryStringFlags | string + fuzzy_max_expansions?: integer + fuzzy_prefix_length?: integer + fuzzy_transpositions?: boolean + lenient?: boolean + minimum_should_match?: MinimumShouldMatch + query?: string + quote_field_suffix?: string } -export interface DeprecationInfo { - details: string - level: DeprecationWarningLevel - message: string - url: string +export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { + big?: QueryDslSpanQuery + little?: QueryDslSpanQuery +} + +export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { + field?: Field + query?: QueryDslSpanQuery } -export interface DeprecationInfoRequest extends RequestBase { - index?: IndexName +export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { + end?: integer + match?: QueryDslSpanQuery } -export interface DeprecationInfoResponse extends ResponseBase { - cluster_settings: Array - index_settings: Record> - node_settings: Array - ml_settings: Array +export interface QueryDslSpanGapQuery extends QueryDslQueryBase { + field?: Field + width?: integer } -export type DeprecationWarningLevel = 'none' | 'info' | 'warning' | 'critical' +export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { + match?: QueryDslQueryContainer +} -export interface DerivativeAggregation extends PipelineAggregationBase { +export interface QueryDslSpanNearQuery extends QueryDslQueryBase { + clauses?: QueryDslSpanQuery[] + in_order?: boolean + slop?: integer } -export interface DetectionRule { - actions: Array - conditions: Array - scope?: Record +export interface QueryDslSpanNotQuery extends QueryDslQueryBase { + dist?: integer + exclude?: QueryDslSpanQuery + include?: QueryDslSpanQuery + post?: integer + pre?: integer } -export interface Detector { - by_field_name?: Field - custom_rules?: Array - detector_description?: string - detector_index?: integer - exclude_frequent?: ExcludeFrequent - field_name?: Field - function: string - use_null?: boolean - over_field_name?: Field - partition_field_name?: Field +export interface QueryDslSpanOrQuery extends QueryDslQueryBase { + clauses?: QueryDslSpanQuery[] } -export interface DictionaryResponseBase extends ResponseBase { - [key: string]: TValue +export interface QueryDslSpanQuery extends QueryDslQueryBase { + span_containing?: QueryDslNamedQuery + field_masking_span?: QueryDslNamedQuery + span_first?: QueryDslNamedQuery + span_gap?: QueryDslNamedQuery + span_multi?: QueryDslSpanMultiTermQuery + span_near?: QueryDslNamedQuery + span_not?: QueryDslNamedQuery + span_or?: QueryDslNamedQuery + span_term?: QueryDslNamedQuery + span_within?: QueryDslNamedQuery } -export interface DirectGenerator { - field: Field - max_edits?: integer - max_inspections?: float - max_term_freq?: float - min_doc_freq?: float - min_word_length?: integer - post_filter?: string - pre_filter?: string - prefix_length?: integer - size?: integer - suggest_mode?: SuggestMode +export interface QueryDslSpanTermQuery extends QueryDslQueryBase { + value: string } -export interface DisMaxQuery extends QueryBase { - queries?: Array - tie_breaker?: double - boost?: float +export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { + big?: QueryDslSpanQuery + little?: QueryDslSpanQuery } -export interface DisableUserRequest extends RequestBase { - username: Name - refresh?: Refresh +export interface QueryDslTermQuery extends QueryDslQueryBase { + value?: string | float | boolean } -export interface DisableUserResponse extends ResponseBase { +export interface QueryDslTermsQuery extends QueryDslQueryBase { + terms?: string[] + index?: IndexName + id?: Id + path?: string + routing?: Routing } -export interface DiscoveryNode { - attributes: Record - ephemeral_id: Id - id: Id - name: Name - transport_address: string +export interface QueryDslTermsSetQuery extends QueryDslQueryBase { + minimum_should_match_field?: Field + minimum_should_match_script?: Script + terms?: string[] } -export interface DiskUsage { - path: string - total_bytes: long - used_bytes: long - free_bytes: long - free_disk_percent: double - used_disk_percent: double +export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' + +export interface QueryDslThreeDimensionalPoint { + lat: double + lon: double + z?: double } -export interface DissectProcessor extends ProcessorBase { - append_separator: string - field: Field - ignore_missing: boolean - pattern: string +export interface QueryDslTwoDimensionalPoint { + lat: double + lon: double } -export type Distance = string +export interface QueryDslTypeQuery extends QueryDslQueryBase { + value: string +} -export interface DistanceFeatureQuery extends QueryBase { - origin?: Array | GeoCoordinate | DateMath - pivot?: Distance | Time - field?: Field +export interface QueryDslWildcardQuery extends QueryDslQueryBase { + rewrite?: MultiTermQueryRewrite + value: string } -export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' +export type QueryDslZeroTermsQuery = 'all' | 'none' -export interface DiversifiedSamplerAggregation extends BucketAggregationBase { - execution_hint?: SamplerAggregationExecutionHint - max_docs_per_value?: integer - script?: Script - shard_size?: integer - field?: Field +export interface AsyncSearchAsyncSearch { + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + hits: SearchTypesHitsMetadata + max_score?: double + num_reduce_phases?: long + profile?: SearchTypesProfile + pit_id?: Id + _scroll_id?: Id + _shards: ShardStatistics + suggest?: Record[]> + terminated_early?: boolean + timed_out: boolean + took: long } -export interface DocStats { - count: long - deleted: long +export interface AsyncSearchAsyncSearchDocumentResponseBase extends AsyncSearchAsyncSearchResponseBase { + response: AsyncSearchAsyncSearch } -export interface DocValueField { - field: Field - format?: string +export interface AsyncSearchAsyncSearchResponseBase { + id?: Id + is_partial: boolean + is_running: boolean + expiration_time_in_millis: EpochMillis + start_time_in_millis: EpochMillis } -export type DocValuesProperty = BinaryProperty | BooleanProperty | DateProperty | DateNanosProperty | KeywordProperty | NumberProperty | RangeProperty | GeoPointProperty | GeoShapeProperty | CompletionProperty | GenericProperty | IpProperty | Murmur3HashProperty | ShapeProperty | TokenCountProperty | VersionProperty | WildcardProperty | PointProperty +export interface AsyncSearchDeleteRequest extends RequestBase { + id: Id +} -export interface DocValuesPropertyBase extends CorePropertyBase { - doc_values?: boolean +export interface AsyncSearchDeleteResponse extends AcknowledgedResponseBase { } -export interface DocumentExistsRequest extends RequestBase { +export interface AsyncSearchGetRequest extends RequestBase { id: Id - index: IndexName - type?: Type - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - source_enabled?: boolean - source_excludes?: Fields - source_includes?: Fields - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType + keep_alive?: Time + typed_keys?: boolean + wait_for_completion_timeout?: Time } -export type DocumentExistsResponse = boolean +export interface AsyncSearchGetResponse extends AsyncSearchAsyncSearchDocumentResponseBase { +} -export interface DocumentSimulation { - _id: Id - _index: IndexName - _ingest: Ingest - _parent?: string - _routing?: string - _source: Record - _type?: Type +export interface AsyncSearchStatusRequest extends RequestBase { + id: Id } -export interface DotExpanderProcessor extends ProcessorBase { - field: Field - path?: string +export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { + _shards: ShardStatistics + completion_status: integer } -export interface DoubleRangeProperty extends RangePropertyBase { - type: 'double_range' +export interface AsyncSearchSubmitRequest extends RequestBase { + index?: Indices + batched_reduce_size?: long + wait_for_completion_timeout?: Time + keep_on_completion?: boolean + typed_keys?: boolean + body?: { + aggs?: Record + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: long + collapse?: SearchTypesFieldCollapse + default_operator?: DefaultOperator + df?: string + docvalue_fields?: Fields + expand_wildcards?: ExpandWildcards + explain?: boolean + from?: integer + highlight?: SearchTypesHighlight + ignore_throttled?: boolean + ignore_unavailable?: boolean + indices_boost?: Record[] + keep_alive?: Time + keep_on_completion?: boolean + lenient?: boolean + max_concurrent_shard_requests?: long + min_score?: double + post_filter?: QueryDslQueryContainer + preference?: string + profile?: boolean + pit?: SearchTypesPointInTimeReference + query?: QueryDslQueryContainer + query_on_query_string?: string + request_cache?: boolean + rescore?: SearchTypesRescore[] + routing?: Routing + script_fields?: Record + search_after?: any[] + search_type?: SearchType + sequence_number_primary_term?: boolean + size?: integer + sort?: SearchTypesSort + _source?: boolean | SearchTypesSourceFilter + stats?: string[] + stored_fields?: Fields + suggest?: Record + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + suggest_text?: string + terminate_after?: long + timeout?: string + track_scores?: boolean + track_total_hits?: boolean + typed_keys?: boolean + version?: boolean + wait_for_completion_timeout?: Time + fields?: (Field | DateField)[] + } } -export interface DropProcessor extends ProcessorBase { +export interface AsyncSearchSubmitResponse extends AsyncSearchAsyncSearchDocumentResponseBase { } -export type DynamicMapping = 'strict' | 'runtime' | 'true' | 'false' +export interface AutoscalingCapacityGetRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } +} -export interface DynamicTemplate { - mapping?: PropertyBase - match?: string - match_mapping_type?: string - match_pattern?: MatchType - path_match?: string - path_unmatch?: string - unmatch?: string +export interface AutoscalingCapacityGetResponse { + stub: integer } -export type EdgeNGramSide = 'front' | 'back' +export interface AutoscalingPolicyDeleteRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } +} -export interface EdgeNGramTokenFilter extends TokenFilterBase { - max_gram: integer - min_gram: integer - side: EdgeNGramSide +export interface AutoscalingPolicyDeleteResponse { + stub: integer } -export interface EdgeNGramTokenizer extends TokenizerBase { - custom_token_chars: string - max_gram: integer - min_gram: integer - token_chars: Array +export interface AutoscalingPolicyGetRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } } -export interface ElasticsearchVersionInfo { - build_date: DateString - build_flavor: string - build_hash: string - build_snapshot: boolean - build_type: string - lucene_version: VersionString - minimum_index_compatibility_version: VersionString - minimum_wire_compatibility_version: VersionString - number: string +export interface AutoscalingPolicyGetResponse { + stub: integer } -export interface ElisionTokenFilter extends TokenFilterBase { - articles: Array - articles_case: boolean +export interface AutoscalingPolicyPutRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } } -export interface EmailActionResult { - account?: string - message: EmailResult - reason?: string +export interface AutoscalingPolicyPutResponse { + stub: integer } -export interface EmailBody { - html: string - text: string +export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } -export type EmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' +export interface CatAliasesAliasesRecord { + alias?: string + a?: string + index?: IndexName + i?: IndexName + idx?: IndexName + filter?: string + f?: string + fi?: string + 'routing.index'?: string + ri?: string + routingIndex?: string + 'routing.search'?: string + rs?: string + routingSearch?: string + is_write_index?: string + w?: string + isWriteIndex?: string +} -export interface EmailResult { - bcc?: Array - body?: EmailBody - cc?: Array - from?: string - id: Id - priority?: EmailPriority - reply_to?: Array - sent_date: DateString - subject: string - to: Array +export interface CatAliasesRequest extends CatCatRequestBase { + name?: Names + expand_wildcards?: ExpandWildcards } -export interface EmptyObject { +export type CatAliasesResponse = CatAliasesAliasesRecord[] + +export interface CatAllocationAllocationRecord { + shards?: string + s?: string + 'disk.indices'?: ByteSize + di?: ByteSize + diskIndices?: ByteSize + 'disk.used'?: ByteSize + du?: ByteSize + diskUsed?: ByteSize + 'disk.avail'?: ByteSize + da?: ByteSize + diskAvail?: ByteSize + 'disk.total'?: ByteSize + dt?: ByteSize + diskTotal?: ByteSize + 'disk.percent'?: Percentage + dp?: Percentage + diskPercent?: Percentage + host?: Host + h?: Host + ip?: Ip + node?: string + n?: string } -export interface EnableUserRequest extends RequestBase { - username: Name - refresh?: Refresh +export interface CatAllocationRequest extends CatCatRequestBase { + node_id?: NodeIds + bytes?: Bytes } -export interface EnableUserResponse extends ResponseBase { -} +export type CatAllocationResponse = CatAllocationAllocationRecord[] -export interface EnrichPolicy { - enrich_fields: Fields - indices: Indices - match_field: Field - query?: string +export interface CatCountCountRecord { + epoch?: EpochMillis + t?: EpochMillis + time?: EpochMillis + timestamp?: DateString + ts?: DateString + hms?: DateString + hhmmss?: DateString + count?: string + dc?: string + 'docs.count'?: string + docsCount?: string } -export type EnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' - -export interface EnrichProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - max_matches?: integer - override?: boolean - policy_name: string - shape_relation?: GeoShapeRelation - target_field: Field +export interface CatCountRequest extends CatCatRequestBase { + index?: Indices } -export interface EnrichStatsRequest extends RequestBase { -} +export type CatCountResponse = CatCountCountRecord[] -export interface EnrichStatsResponse extends ResponseBase { - coordinator_stats: Array - executing_policies: Array +export interface CatDataFrameAnalyticsDataFrameAnalyticsRecord { + id?: Id + type?: Type + t?: Type + create_time?: string + ct?: string + createTime?: string + version?: VersionString + v?: VersionString + source_index?: IndexName + si?: IndexName + sourceIndex?: IndexName + dest_index?: IndexName + di?: IndexName + destIndex?: IndexName + description?: string + d?: string + model_memory_limit?: string + mml?: string + modelMemoryLimit?: string + state?: string + s?: string + failure_reason?: string + fr?: string + failureReason?: string + progress?: string + p?: string + assignment_explanation?: string + ae?: string + assignmentExplanation?: string + 'node.id'?: Id + ni?: Id + nodeId?: Id + 'node.name'?: Name + nn?: Name + nodeName?: Name + 'node.ephemeral_id'?: Id + ne?: Id + nodeEphemeralId?: Id + 'node.address'?: string + na?: string + nodeAddress?: string } -export type EpochMillis = string | long - -export interface EqlDeleteRequest extends RequestBase { - id: Id +export interface CatDataFrameAnalyticsRequest extends CatCatRequestBase { + id?: Id + allow_no_match?: boolean + bytes?: Bytes } -export interface EqlDeleteResponse extends AcknowledgedResponseBase { -} +export type CatDataFrameAnalyticsResponse = CatDataFrameAnalyticsDataFrameAnalyticsRecord[] -export interface EqlFeaturesJoinUsage { - join_queries_two: uint - join_queries_three: uint - join_until: uint - join_queries_five_or_more: uint - join_queries_four: uint +export interface CatDatafeedsDatafeedsRecord { + id?: string + state?: MlDatafeedState + s?: MlDatafeedState + assignment_explanation?: string + ae?: string + 'buckets.count'?: string + bc?: string + bucketsCount?: string + 'search.count'?: string + sc?: string + searchCount?: string + 'search.time'?: string + st?: string + searchTime?: string + 'search.bucket_avg'?: string + sba?: string + searchBucketAvg?: string + 'search.exp_avg_hour'?: string + seah?: string + searchExpAvgHour?: string + 'node.id'?: string + ni?: string + nodeId?: string + 'node.name'?: string + nn?: string + nodeName?: string + 'node.ephemeral_id'?: string + ne?: string + nodeEphemeralId?: string + 'node.address'?: string + na?: string + nodeAddress?: string } -export interface EqlFeaturesKeysUsage { - join_keys_two: uint - join_keys_one: uint - join_keys_three: uint - join_keys_five_or_more: uint - join_keys_four: uint +export interface CatDatafeedsRequest extends CatCatRequestBase { + datafeed_id?: Id + allow_no_datafeeds?: boolean } -export interface EqlFeaturesPipesUsage { - pipe_tail: uint - pipe_head: uint -} +export type CatDatafeedsResponse = CatDatafeedsDatafeedsRecord[] -export interface EqlFeaturesSequencesUsage { - sequence_queries_three: uint - sequence_queries_four: uint - sequence_queries_two: uint - sequence_until: uint - sequence_queries_five_or_more: uint - sequence_maxspan: uint +export interface CatFielddataFielddataRecord { + id?: string + host?: string + h?: string + ip?: string + node?: string + n?: string + field?: string + f?: string + size?: string } -export interface EqlFeaturesUsage { - join: uint - joins: EqlFeaturesJoinUsage - keys: EqlFeaturesKeysUsage - event: uint - pipes: EqlFeaturesPipesUsage - sequence: uint - sequences: EqlFeaturesSequencesUsage +export interface CatFielddataRequest extends CatCatRequestBase { + fields?: Fields + bytes?: Bytes } -export interface EqlGetRequest extends RequestBase { - id: Id - keep_alive?: Time - wait_for_completion_timeout?: Time -} +export type CatFielddataResponse = CatFielddataFielddataRecord[] -export interface EqlGetResponse extends EqlSearchResponseBase { +export interface CatHealthHealthRecord { + epoch?: EpochMillis + time?: EpochMillis + timestamp?: DateString + ts?: DateString + hms?: DateString + hhmmss?: DateString + cluster?: string + cl?: string + status?: string + st?: string + 'node.total'?: string + nt?: string + nodeTotal?: string + 'node.data'?: string + nd?: string + nodeData?: string + shards?: string + t?: string + sh?: string + 'shards.total'?: string + shardsTotal?: string + pri?: string + p?: string + 'shards.primary'?: string + shardsPrimary?: string + relo?: string + r?: string + 'shards.relocating'?: string + shardsRelocating?: string + init?: string + i?: string + 'shards.initializing'?: string + shardsInitializing?: string + unassign?: string + u?: string + 'shards.unassigned'?: string + shardsUnassigned?: string + pending_tasks?: string + pt?: string + pendingTasks?: string + max_task_wait_time?: string + mtwt?: string + maxTaskWaitTime?: string + active_shards_percent?: string + asp?: string + activeShardsPercent?: string } -export interface EqlGetStatusRequest extends RequestBase { - id: Id +export interface CatHealthRequest extends CatCatRequestBase { + include_timestamp?: boolean + ts?: boolean } -export interface EqlGetStatusResponse extends ResponseBase { - id: Id - is_partial: boolean - is_running: boolean - start_time_in_millis?: EpochMillis - expiration_time_in_millis?: EpochMillis - completion_status?: integer -} +export type CatHealthResponse = CatHealthHealthRecord[] -export interface EqlHits { - total?: TotalHits - events?: Array> - sequences?: Array> +export interface CatHelpHelpRecord { + endpoint: string } -export interface EqlHitsEvent { - _index: IndexName - _id: Id - _source: TEvent - fields?: Record> +export interface CatHelpRequest extends CatCatRequestBase { } -export interface EqlHitsSequence { - events: Array> - join_keys: Array -} +export type CatHelpResponse = CatHelpHelpRecord[] -export interface EqlSearchFieldFormatted { - field: Field - format: string +export interface CatIndicesIndicesRecord { + health?: string + h?: string + status?: string + s?: string + index?: string + i?: string + idx?: string + uuid?: string + id?: string + pri?: string + p?: string + 'shards.primary'?: string + shardsPrimary?: string + rep?: string + r?: string + 'shards.replica'?: string + shardsReplica?: string + 'docs.count'?: string + dc?: string + docsCount?: string + 'docs.deleted'?: string + dd?: string + docsDeleted?: string + 'creation.date'?: string + cd?: string + 'creation.date.string'?: string + cds?: string + 'store.size'?: string + ss?: string + storeSize?: string + 'pri.store.size'?: string + 'completion.size'?: string + cs?: string + completionSize?: string + 'pri.completion.size'?: string + 'fielddata.memory_size'?: string + fm?: string + fielddataMemory?: string + 'pri.fielddata.memory_size'?: string + 'fielddata.evictions'?: string + fe?: string + fielddataEvictions?: string + 'pri.fielddata.evictions'?: string + 'query_cache.memory_size'?: string + qcm?: string + queryCacheMemory?: string + 'pri.query_cache.memory_size'?: string + 'query_cache.evictions'?: string + qce?: string + queryCacheEvictions?: string + 'pri.query_cache.evictions'?: string + 'request_cache.memory_size'?: string + rcm?: string + requestCacheMemory?: string + 'pri.request_cache.memory_size'?: string + 'request_cache.evictions'?: string + rce?: string + requestCacheEvictions?: string + 'pri.request_cache.evictions'?: string + 'request_cache.hit_count'?: string + rchc?: string + requestCacheHitCount?: string + 'pri.request_cache.hit_count'?: string + 'request_cache.miss_count'?: string + rcmc?: string + requestCacheMissCount?: string + 'pri.request_cache.miss_count'?: string + 'flush.total'?: string + ft?: string + flushTotal?: string + 'pri.flush.total'?: string + 'flush.total_time'?: string + ftt?: string + flushTotalTime?: string + 'pri.flush.total_time'?: string + 'get.current'?: string + gc?: string + getCurrent?: string + 'pri.get.current'?: string + 'get.time'?: string + gti?: string + getTime?: string + 'pri.get.time'?: string + 'get.total'?: string + gto?: string + getTotal?: string + 'pri.get.total'?: string + 'get.exists_time'?: string + geti?: string + getExistsTime?: string + 'pri.get.exists_time'?: string + 'get.exists_total'?: string + geto?: string + getExistsTotal?: string + 'pri.get.exists_total'?: string + 'get.missing_time'?: string + gmti?: string + getMissingTime?: string + 'pri.get.missing_time'?: string + 'get.missing_total'?: string + gmto?: string + getMissingTotal?: string + 'pri.get.missing_total'?: string + 'indexing.delete_current'?: string + idc?: string + indexingDeleteCurrent?: string + 'pri.indexing.delete_current'?: string + 'indexing.delete_time'?: string + idti?: string + indexingDeleteTime?: string + 'pri.indexing.delete_time'?: string + 'indexing.delete_total'?: string + idto?: string + indexingDeleteTotal?: string + 'pri.indexing.delete_total'?: string + 'indexing.index_current'?: string + iic?: string + indexingIndexCurrent?: string + 'pri.indexing.index_current'?: string + 'indexing.index_time'?: string + iiti?: string + indexingIndexTime?: string + 'pri.indexing.index_time'?: string + 'indexing.index_total'?: string + iito?: string + indexingIndexTotal?: string + 'pri.indexing.index_total'?: string + 'indexing.index_failed'?: string + iif?: string + indexingIndexFailed?: string + 'pri.indexing.index_failed'?: string + 'merges.current'?: string + mc?: string + mergesCurrent?: string + 'pri.merges.current'?: string + 'merges.current_docs'?: string + mcd?: string + mergesCurrentDocs?: string + 'pri.merges.current_docs'?: string + 'merges.current_size'?: string + mcs?: string + mergesCurrentSize?: string + 'pri.merges.current_size'?: string + 'merges.total'?: string + mt?: string + mergesTotal?: string + 'pri.merges.total'?: string + 'merges.total_docs'?: string + mtd?: string + mergesTotalDocs?: string + 'pri.merges.total_docs'?: string + 'merges.total_size'?: string + mts?: string + mergesTotalSize?: string + 'pri.merges.total_size'?: string + 'merges.total_time'?: string + mtt?: string + mergesTotalTime?: string + 'pri.merges.total_time'?: string + 'refresh.total'?: string + rto?: string + refreshTotal?: string + 'pri.refresh.total'?: string + 'refresh.time'?: string + rti?: string + refreshTime?: string + 'pri.refresh.time'?: string + 'refresh.external_total'?: string + reto?: string + 'pri.refresh.external_total'?: string + 'refresh.external_time'?: string + reti?: string + 'pri.refresh.external_time'?: string + 'refresh.listeners'?: string + rli?: string + refreshListeners?: string + 'pri.refresh.listeners'?: string + 'search.fetch_current'?: string + sfc?: string + searchFetchCurrent?: string + 'pri.search.fetch_current'?: string + 'search.fetch_time'?: string + sfti?: string + searchFetchTime?: string + 'pri.search.fetch_time'?: string + 'search.fetch_total'?: string + sfto?: string + searchFetchTotal?: string + 'pri.search.fetch_total'?: string + 'search.open_contexts'?: string + so?: string + searchOpenContexts?: string + 'pri.search.open_contexts'?: string + 'search.query_current'?: string + sqc?: string + searchQueryCurrent?: string + 'pri.search.query_current'?: string + 'search.query_time'?: string + sqti?: string + searchQueryTime?: string + 'pri.search.query_time'?: string + 'search.query_total'?: string + sqto?: string + searchQueryTotal?: string + 'pri.search.query_total'?: string + 'search.scroll_current'?: string + scc?: string + searchScrollCurrent?: string + 'pri.search.scroll_current'?: string + 'search.scroll_time'?: string + scti?: string + searchScrollTime?: string + 'pri.search.scroll_time'?: string + 'search.scroll_total'?: string + scto?: string + searchScrollTotal?: string + 'pri.search.scroll_total'?: string + 'segments.count'?: string + sc?: string + segmentsCount?: string + 'pri.segments.count'?: string + 'segments.memory'?: string + sm?: string + segmentsMemory?: string + 'pri.segments.memory'?: string + 'segments.index_writer_memory'?: string + siwm?: string + segmentsIndexWriterMemory?: string + 'pri.segments.index_writer_memory'?: string + 'segments.version_map_memory'?: string + svmm?: string + segmentsVersionMapMemory?: string + 'pri.segments.version_map_memory'?: string + 'segments.fixed_bitset_memory'?: string + sfbm?: string + fixedBitsetMemory?: string + 'pri.segments.fixed_bitset_memory'?: string + 'warmer.current'?: string + wc?: string + warmerCurrent?: string + 'pri.warmer.current'?: string + 'warmer.total'?: string + wto?: string + warmerTotal?: string + 'pri.warmer.total'?: string + 'warmer.total_time'?: string + wtt?: string + warmerTotalTime?: string + 'pri.warmer.total_time'?: string + 'suggest.current'?: string + suc?: string + suggestCurrent?: string + 'pri.suggest.current'?: string + 'suggest.time'?: string + suti?: string + suggestTime?: string + 'pri.suggest.time'?: string + 'suggest.total'?: string + suto?: string + suggestTotal?: string + 'pri.suggest.total'?: string + 'memory.total'?: string + tm?: string + memoryTotal?: string + 'pri.memory.total'?: string + 'search.throttled'?: string + sth?: string + 'bulk.total_operations'?: string + bto?: string + bulkTotalOperation?: string + 'pri.bulk.total_operations'?: string + 'bulk.total_time'?: string + btti?: string + bulkTotalTime?: string + 'pri.bulk.total_time'?: string + 'bulk.total_size_in_bytes'?: string + btsi?: string + bulkTotalSizeInBytes?: string + 'pri.bulk.total_size_in_bytes'?: string + 'bulk.avg_time'?: string + bati?: string + bulkAvgTime?: string + 'pri.bulk.avg_time'?: string + 'bulk.avg_size_in_bytes'?: string + basi?: string + bulkAvgSizeInBytes?: string + 'pri.bulk.avg_size_in_bytes'?: string } -export interface EqlSearchRequest extends RequestBase { - index: IndexName - allow_no_indices?: boolean +export interface CatIndicesRequest extends CatCatRequestBase { + index?: Indices + bytes?: Bytes expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - keep_alive?: Time - keep_on_completion?: boolean - wait_for_completion_timeout?: Time - body: { - query: string - case_sensitive?: boolean - event_category_field?: Field - tiebreaker_field?: Field - timestamp_field?: Field - fetch_size?: uint - filter?: QueryContainer | Array - keep_alive?: Time - keep_on_completion?: boolean - wait_for_completion_timeout?: Time - size?: integer | float - fields?: Array - } + health?: Health + include_unloaded_segments?: boolean + pri?: boolean } -export interface EqlSearchResponse extends EqlSearchResponseBase { -} +export type CatIndicesResponse = CatIndicesIndicesRecord[] -export interface EqlSearchResponseBase extends ResponseBase { +export interface CatJobsJobsRecord { id?: Id - is_partial?: boolean - is_running?: boolean - took?: integer - timed_out?: boolean - hits: EqlHits -} - -export interface EqlUsage extends XPackUsage { - features: EqlFeaturesUsage - queries: Record -} - -export interface ErrorCause { - type: string - reason: string - caused_by?: ErrorCause - shard?: integer | string - stack_trace?: string - root_cause?: Array - bytes_limit?: long - bytes_wanted?: long - column?: integer - col?: integer - failed_shards?: Array - grouped?: boolean - index?: IndexName - index_uuid?: Uuid - language?: string - licensed_expired_feature?: string - line?: integer - max_buckets?: integer - phase?: string - property_name?: string - processor_type?: string - resource_id?: Ids - 'resource.id'?: Ids - resource_type?: string - 'resource.type'?: string - script?: string - script_stack?: Array - header?: Record - lang?: string - position?: PainlessExecutionPosition -} - -export interface ErrorResponse { - error: MainError - status: integer -} - -export interface EstimateModelMemoryRequest extends RequestBase { - body: { - analysis_config?: AnalysisConfig - max_bucket_cardinality?: Record - overall_cardinality?: Record - } -} - -export interface EstimateModelMemoryResponse extends ResponseBase { - model_memory_estimate: string -} - -export interface EwmaModelSettings { - alpha?: float -} - -export type ExcludeFrequent = 'all' | 'none' | 'by' | 'over' - -export interface ExecuteEnrichPolicyRequest extends RequestBase { - name: Name - wait_for_completion?: boolean -} - -export interface ExecuteEnrichPolicyResponse extends ResponseBase { - status: ExecuteEnrichPolicyStatus - task_id?: TaskId -} - -export interface ExecuteEnrichPolicyStatus { - phase: EnrichPolicyPhase -} - -export interface ExecutePainlessScriptRequest extends RequestBase { - body?: { - context?: string - context_setup?: PainlessContextSetup - script?: InlineScript - } -} - -export interface ExecutePainlessScriptResponse extends ResponseBase { - result: TResult -} - -export interface ExecuteRetentionRequest extends RequestBase { -} - -export interface ExecuteRetentionResponse extends AcknowledgedResponseBase { + state?: MlJobState + s?: MlJobState + opened_time?: string + ot?: string + assignment_explanation?: string + ae?: string + 'data.processed_records'?: string + dpr?: string + dataProcessedRecords?: string + 'data.processed_fields'?: string + dpf?: string + dataProcessedFields?: string + 'data.input_bytes'?: ByteSize + dib?: ByteSize + dataInputBytes?: ByteSize + 'data.input_records'?: string + dir?: string + dataInputRecords?: string + 'data.input_fields'?: string + dif?: string + dataInputFields?: string + 'data.invalid_dates'?: string + did?: string + dataInvalidDates?: string + 'data.missing_fields'?: string + dmf?: string + dataMissingFields?: string + 'data.out_of_order_timestamps'?: string + doot?: string + dataOutOfOrderTimestamps?: string + 'data.empty_buckets'?: string + deb?: string + dataEmptyBuckets?: string + 'data.sparse_buckets'?: string + dsb?: string + dataSparseBuckets?: string + 'data.buckets'?: string + db?: string + dataBuckets?: string + 'data.earliest_record'?: string + der?: string + dataEarliestRecord?: string + 'data.latest_record'?: string + dlr?: string + dataLatestRecord?: string + 'data.last'?: string + dl?: string + dataLast?: string + 'data.last_empty_bucket'?: string + dleb?: string + dataLastEmptyBucket?: string + 'data.last_sparse_bucket'?: string + dlsb?: string + dataLastSparseBucket?: string + 'model.bytes'?: ByteSize + mb?: ByteSize + modelBytes?: ByteSize + 'model.memory_status'?: CatJobsModelMemoryStatus + mms?: CatJobsModelMemoryStatus + modelMemoryStatus?: CatJobsModelMemoryStatus + 'model.bytes_exceeded'?: ByteSize + mbe?: ByteSize + modelBytesExceeded?: ByteSize + 'model.memory_limit'?: string + mml?: string + modelMemoryLimit?: string + 'model.by_fields'?: string + mbf?: string + modelByFields?: string + 'model.over_fields'?: string + mof?: string + modelOverFields?: string + 'model.partition_fields'?: string + mpf?: string + modelPartitionFields?: string + 'model.bucket_allocation_failures'?: string + mbaf?: string + modelBucketAllocationFailures?: string + 'model.categorization_status'?: CatJobsModelCategorizationStatus + mcs?: CatJobsModelCategorizationStatus + modelCategorizationStatus?: CatJobsModelCategorizationStatus + 'model.categorized_doc_count'?: string + mcdc?: string + modelCategorizedDocCount?: string + 'model.total_category_count'?: string + mtcc?: string + modelTotalCategoryCount?: string + 'model.frequent_category_count'?: string + modelFrequentCategoryCount?: string + 'model.rare_category_count'?: string + mrcc?: string + modelRareCategoryCount?: string + 'model.dead_category_count'?: string + mdcc?: string + modelDeadCategoryCount?: string + 'model.failed_category_count'?: string + mfcc?: string + modelFailedCategoryCount?: string + 'model.log_time'?: string + mlt?: string + modelLogTime?: string + 'model.timestamp'?: string + mt?: string + modelTimestamp?: string + 'forecasts.total'?: string + ft?: string + forecastsTotal?: string + 'forecasts.memory.min'?: string + fmmin?: string + forecastsMemoryMin?: string + 'forecasts.memory.max'?: string + fmmax?: string + forecastsMemoryMax?: string + 'forecasts.memory.avg'?: string + fmavg?: string + forecastsMemoryAvg?: string + 'forecasts.memory.total'?: string + fmt?: string + forecastsMemoryTotal?: string + 'forecasts.records.min'?: string + frmin?: string + forecastsRecordsMin?: string + 'forecasts.records.max'?: string + frmax?: string + forecastsRecordsMax?: string + 'forecasts.records.avg'?: string + fravg?: string + forecastsRecordsAvg?: string + 'forecasts.records.total'?: string + frt?: string + forecastsRecordsTotal?: string + 'forecasts.time.min'?: string + ftmin?: string + forecastsTimeMin?: string + 'forecasts.time.max'?: string + ftmax?: string + forecastsTimeMax?: string + 'forecasts.time.avg'?: string + ftavg?: string + forecastsTimeAvg?: string + 'forecasts.time.total'?: string + ftt?: string + forecastsTimeTotal?: string + 'node.id'?: NodeId + ni?: NodeId + nodeId?: NodeId + 'node.name'?: string + nn?: string + nodeName?: string + 'node.ephemeral_id'?: NodeId + ne?: NodeId + nodeEphemeralId?: NodeId + 'node.address'?: string + na?: string + nodeAddress?: string + 'buckets.count'?: string + bc?: string + bucketsCount?: string + 'buckets.time.total'?: string + btt?: string + bucketsTimeTotal?: string + 'buckets.time.min'?: string + btmin?: string + bucketsTimeMin?: string + 'buckets.time.max'?: string + btmax?: string + bucketsTimeMax?: string + 'buckets.time.exp_avg'?: string + btea?: string + bucketsTimeExpAvg?: string + 'buckets.time.exp_avg_hour'?: string + bteah?: string + bucketsTimeExpAvgHour?: string } -export interface ExecuteSnapshotLifecycleRequest extends RequestBase { - policy_id: Name -} +export type CatJobsModelCategorizationStatus = 'ok' | 'warn' -export interface ExecuteSnapshotLifecycleResponse extends ResponseBase { - snapshot_name: string -} +export type CatJobsModelMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' -export interface ExecuteWatchRequest extends RequestBase { - id?: Name - debug?: boolean - body?: { - action_modes?: Record - alternative_input?: Record - ignore_condition?: boolean - record_execution?: boolean - simulated_actions?: SimulatedActions - trigger_data?: ScheduleTriggerEvent - watch?: Watch - } +export interface CatJobsRequest extends CatCatRequestBase { + job_id?: Id + allow_no_jobs?: boolean + bytes?: Bytes } -export interface ExecuteWatchResponse extends ResponseBase { - _id: Id - watch_record: WatchRecord -} +export type CatJobsResponse = CatJobsJobsRecord[] -export interface ExecutingPolicy { - name: string - task: TaskInfo +export interface CatMasterMasterRecord { + id?: string + host?: string + h?: string + ip?: string + node?: string + n?: string } -export type ExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished' - -export interface ExecutionResult { - actions: Array - condition: ExecutionResultCondition - execution_duration: integer - execution_time: DateString - input: ExecutionResultInput +export interface CatMasterRequest extends CatCatRequestBase { } -export interface ExecutionResultAction { - email?: EmailActionResult - id: Id - index?: IndexActionResult - logging?: LoggingActionResult - pagerduty?: PagerDutyActionResult - reason?: string - slack?: SlackActionResult - status: Status - type: ActionType - webhook?: WebhookActionResult -} +export type CatMasterResponse = CatMasterMasterRecord[] -export interface ExecutionResultCondition { - met: boolean - status: Status - type: ConditionType +export interface CatNodeAttributesNodeAttributesRecord { + node?: string + id?: string + pid?: string + host?: string + h?: string + ip?: string + i?: string + port?: string + attr?: string + value?: string } -export interface ExecutionResultInput { - payload: Record - status: Status - type: InputType +export interface CatNodeAttributesRequest extends CatCatRequestBase { } -export interface ExecutionState { - successful: boolean - timestamp: DateString -} +export type CatNodeAttributesResponse = CatNodeAttributesNodeAttributesRecord[] -export interface ExecutionThreadPool { - max_size: long - queue_size: long +export interface CatNodesNodesRecord { + id?: Id + nodeId?: Id + pid?: string + p?: string + ip?: string + i?: string + port?: string + po?: string + http_address?: string + http?: string + version?: VersionString + v?: VersionString + flavor?: string + f?: string + type?: Type + t?: Type + build?: string + b?: string + jdk?: string + j?: string + 'disk.total'?: ByteSize + dt?: ByteSize + diskTotal?: ByteSize + 'disk.used'?: ByteSize + du?: ByteSize + diskUsed?: ByteSize + 'disk.avail'?: ByteSize + d?: ByteSize + da?: ByteSize + disk?: ByteSize + diskAvail?: ByteSize + 'disk.used_percent'?: Percentage + dup?: Percentage + diskUsedPercent?: Percentage + 'heap.current'?: string + hc?: string + heapCurrent?: string + 'heap.percent'?: Percentage + hp?: Percentage + heapPercent?: Percentage + 'heap.max'?: string + hm?: string + heapMax?: string + 'ram.current'?: string + rc?: string + ramCurrent?: string + 'ram.percent'?: Percentage + rp?: Percentage + ramPercent?: Percentage + 'ram.max'?: string + rn?: string + ramMax?: string + 'file_desc.current'?: string + fdc?: string + fileDescriptorCurrent?: string + 'file_desc.percent'?: Percentage + fdp?: Percentage + fileDescriptorPercent?: Percentage + 'file_desc.max'?: string + fdm?: string + fileDescriptorMax?: string + cpu?: string + load_1m?: string + load_5m?: string + load_15m?: string + l?: string + uptime?: string + u?: string + 'node.role'?: string + r?: string + role?: string + nodeRole?: string + master?: string + m?: string + name?: Name + n?: Name + 'completion.size'?: string + cs?: string + completionSize?: string + 'fielddata.memory_size'?: string + fm?: string + fielddataMemory?: string + 'fielddata.evictions'?: string + fe?: string + fielddataEvictions?: string + 'query_cache.memory_size'?: string + qcm?: string + queryCacheMemory?: string + 'query_cache.evictions'?: string + qce?: string + queryCacheEvictions?: string + 'query_cache.hit_count'?: string + qchc?: string + queryCacheHitCount?: string + 'query_cache.miss_count'?: string + qcmc?: string + queryCacheMissCount?: string + 'request_cache.memory_size'?: string + rcm?: string + requestCacheMemory?: string + 'request_cache.evictions'?: string + rce?: string + requestCacheEvictions?: string + 'request_cache.hit_count'?: string + rchc?: string + requestCacheHitCount?: string + 'request_cache.miss_count'?: string + rcmc?: string + requestCacheMissCount?: string + 'flush.total'?: string + ft?: string + flushTotal?: string + 'flush.total_time'?: string + ftt?: string + flushTotalTime?: string + 'get.current'?: string + gc?: string + getCurrent?: string + 'get.time'?: string + gti?: string + getTime?: string + 'get.total'?: string + gto?: string + getTotal?: string + 'get.exists_time'?: string + geti?: string + getExistsTime?: string + 'get.exists_total'?: string + geto?: string + getExistsTotal?: string + 'get.missing_time'?: string + gmti?: string + getMissingTime?: string + 'get.missing_total'?: string + gmto?: string + getMissingTotal?: string + 'indexing.delete_current'?: string + idc?: string + indexingDeleteCurrent?: string + 'indexing.delete_time'?: string + idti?: string + indexingDeleteTime?: string + 'indexing.delete_total'?: string + idto?: string + indexingDeleteTotal?: string + 'indexing.index_current'?: string + iic?: string + indexingIndexCurrent?: string + 'indexing.index_time'?: string + iiti?: string + indexingIndexTime?: string + 'indexing.index_total'?: string + iito?: string + indexingIndexTotal?: string + 'indexing.index_failed'?: string + iif?: string + indexingIndexFailed?: string + 'merges.current'?: string + mc?: string + mergesCurrent?: string + 'merges.current_docs'?: string + mcd?: string + mergesCurrentDocs?: string + 'merges.current_size'?: string + mcs?: string + mergesCurrentSize?: string + 'merges.total'?: string + mt?: string + mergesTotal?: string + 'merges.total_docs'?: string + mtd?: string + mergesTotalDocs?: string + 'merges.total_size'?: string + mts?: string + mergesTotalSize?: string + 'merges.total_time'?: string + mtt?: string + mergesTotalTime?: string + 'refresh.total'?: string + 'refresh.time'?: string + 'refresh.external_total'?: string + rto?: string + refreshTotal?: string + 'refresh.external_time'?: string + rti?: string + refreshTime?: string + 'refresh.listeners'?: string + rli?: string + refreshListeners?: string + 'script.compilations'?: string + scrcc?: string + scriptCompilations?: string + 'script.cache_evictions'?: string + scrce?: string + scriptCacheEvictions?: string + 'script.compilation_limit_triggered'?: string + scrclt?: string + scriptCacheCompilationLimitTriggered?: string + 'search.fetch_current'?: string + sfc?: string + searchFetchCurrent?: string + 'search.fetch_time'?: string + sfti?: string + searchFetchTime?: string + 'search.fetch_total'?: string + sfto?: string + searchFetchTotal?: string + 'search.open_contexts'?: string + so?: string + searchOpenContexts?: string + 'search.query_current'?: string + sqc?: string + searchQueryCurrent?: string + 'search.query_time'?: string + sqti?: string + searchQueryTime?: string + 'search.query_total'?: string + sqto?: string + searchQueryTotal?: string + 'search.scroll_current'?: string + scc?: string + searchScrollCurrent?: string + 'search.scroll_time'?: string + scti?: string + searchScrollTime?: string + 'search.scroll_total'?: string + scto?: string + searchScrollTotal?: string + 'segments.count'?: string + sc?: string + segmentsCount?: string + 'segments.memory'?: string + sm?: string + segmentsMemory?: string + 'segments.index_writer_memory'?: string + siwm?: string + segmentsIndexWriterMemory?: string + 'segments.version_map_memory'?: string + svmm?: string + segmentsVersionMapMemory?: string + 'segments.fixed_bitset_memory'?: string + sfbm?: string + fixedBitsetMemory?: string + 'suggest.current'?: string + suc?: string + suggestCurrent?: string + 'suggest.time'?: string + suti?: string + suggestTime?: string + 'suggest.total'?: string + suto?: string + suggestTotal?: string + 'bulk.total_operations'?: string + bto?: string + bulkTotalOperations?: string + 'bulk.total_time'?: string + btti?: string + bulkTotalTime?: string + 'bulk.total_size_in_bytes'?: string + btsi?: string + bulkTotalSizeInBytes?: string + 'bulk.avg_time'?: string + bati?: string + bulkAvgTime?: string + 'bulk.avg_size_in_bytes'?: string + basi?: string + bulkAvgSizeInBytes?: string } -export interface ExistsQuery extends QueryBase { - field?: Field +export interface CatNodesRequest extends CatCatRequestBase { + bytes?: Bytes + full_id?: boolean | string } -export type ExpandWildcardOptions = 'open' | 'closed' | 'hidden' | 'none' | 'all' +export type CatNodesResponse = CatNodesNodesRecord[] -export type ExpandWildcards = ExpandWildcardOptions | Array | string - -export interface ExplainAnalyzeToken { - bytes: string - end_offset: long - keyword?: boolean - position: long - positionLength: long - start_offset: long - termFrequency: long - token: string - type: string +export interface CatPendingTasksPendingTasksRecord { + insertOrder?: string + o?: string + timeInQueue?: string + t?: string + priority?: string + p?: string + source?: string + s?: string } -export interface ExplainLifecycleRequest extends RequestBase { - index: IndexName - only_errors?: boolean - only_managed?: boolean +export interface CatPendingTasksRequest extends CatCatRequestBase { } -export interface ExplainLifecycleResponse extends ResponseBase { - indices: Record | LifecycleExplainProject -} +export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] -export interface ExplainRequest extends RequestBase { - id: Id - index: IndexName +export interface CatPluginsPluginsRecord { + id?: NodeId + name?: Name + n?: Name + component?: string + c?: string + version?: VersionString + v?: VersionString + description?: string + d?: string type?: Type - analyzer?: string - analyze_wildcard?: boolean - default_operator?: DefaultOperator - df?: string - lenient?: boolean - preference?: string - query_on_query_string?: string - routing?: Routing - _source?: boolean | Fields | SourceFilter - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - q?: string - body?: { - query?: QueryContainer - } -} - -export interface ExplainResponse extends ResponseBase { - _index: IndexName - _type?: Type - _id: Id - matched: boolean - explanation?: ExplanationDetail - get?: InlineGet -} - -export interface Explanation { - description: string - details: Array - value: float -} - -export interface ExplanationDetail { - description: string - details?: Array - value: float -} - -export interface ExtendedBounds { - max: T - min: T -} - -export interface ExtendedMemoryStats extends MemoryStats { - free_percent: integer - used_percent: integer -} - -export interface ExtendedStatsAggregate extends StatsAggregate { - std_deviation_bounds: StandardDeviationBounds - sum_of_squares?: double - variance?: double - variance_population?: double - variance_sampling?: double - std_deviation?: double - std_deviation_population?: double - std_deviation_sampling?: double -} - -export interface ExtendedStatsAggregation extends FormatMetricAggregationBase { - sigma?: double -} - -export interface ExtendedStatsBucketAggregation extends PipelineAggregationBase { - sigma?: double -} - -export interface FailProcessor extends ProcessorBase { - message: string -} - -export type Field = string - -export interface FieldAliasProperty extends PropertyBase { - path?: Field - type: 'alias' -} - -export interface FieldCapabilities { - aggregatable: boolean - indices?: Indices - meta?: Record> - non_aggregatable_indices?: Indices - non_searchable_indices?: Indices - searchable: boolean - type: string -} - -export interface FieldCapabilitiesBodyIndexFilter { - range?: FieldCapabilitiesBodyIndexFilterRange - match_none?: EmptyObject - term?: FieldCapabilitiesBodyIndexFilterTerm -} - -export interface FieldCapabilitiesBodyIndexFilterRange { - timestamp: FieldCapabilitiesBodyIndexFilterRangeTimestamp + t?: Type } -export interface FieldCapabilitiesBodyIndexFilterRangeTimestamp { - gte?: integer - gt?: integer - lte?: integer - lt?: integer +export interface CatPluginsRequest extends CatCatRequestBase { } -export interface FieldCapabilitiesBodyIndexFilterTerm { - versionControl: FieldCapabilitiesBodyIndexFilterTermVersionControl -} +export type CatPluginsResponse = CatPluginsPluginsRecord[] -export interface FieldCapabilitiesBodyIndexFilterTermVersionControl { - value: string +export interface CatRecoveryRecoveryRecord { + index?: IndexName + i?: IndexName + idx?: IndexName + shard?: string + s?: string + sh?: string + start_time?: string + start?: string + start_time_millis?: string + start_millis?: string + stop_time?: string + stop?: string + stop_time_millis?: string + stop_millis?: string + time?: string + t?: string + ti?: string + type?: Type + ty?: Type + stage?: string + st?: string + source_host?: string + shost?: string + source_node?: string + snode?: string + target_host?: string + thost?: string + target_node?: string + tnode?: string + repository?: string + rep?: string + snapshot?: string + snap?: string + files?: string + f?: string + files_recovered?: string + fr?: string + files_percent?: Percentage + fp?: Percentage + files_total?: string + tf?: string + bytes?: string + b?: string + bytes_recovered?: string + br?: string + bytes_percent?: Percentage + bp?: Percentage + bytes_total?: string + tb?: string + translog_ops?: string + to?: string + translog_ops_recovered?: string + tor?: string + translog_ops_percent?: Percentage + top?: Percentage } -export interface FieldCapabilitiesRequest extends RequestBase { +export interface CatRecoveryRequest extends CatCatRequestBase { index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - fields?: Fields - ignore_unavailable?: boolean - include_unmapped?: boolean - body?: { - index_filter?: FieldCapabilitiesBodyIndexFilter - } -} - -export interface FieldCapabilitiesResponse extends ResponseBase { - indices: Indices - fields: Record> -} - -export interface FieldCollapse { - field: Field - inner_hits?: InnerHits | Array - max_concurrent_group_searches?: integer -} - -export interface FieldLookup { - id?: Id - index?: IndexName - path?: Field - routing?: Routing + active_only?: boolean + bytes?: Bytes + detailed?: boolean } -export interface FieldMapping { -} +export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] -export interface FieldNamesField { - enabled: boolean +export interface CatRepositoriesRepositoriesRecord { + id?: string + repoId?: string + type?: string + t?: string } -export interface FieldSecurity { - except?: Fields - grant: Fields +export interface CatRepositoriesRequest extends CatCatRequestBase { } -export interface FieldSecuritySettings { - except: Array - grant: Array -} +export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] -export interface FieldSort { - missing?: Missing - mode?: SortMode - nested?: NestedSortValue - order?: SortOrder - unmapped_type?: FieldType +export interface CatSegmentsRequest extends CatCatRequestBase { + index?: Indices + bytes?: Bytes } -export interface FieldStat { - count: number - cardinality: number - top_hits: Array - mean_value?: number - median_value?: number - max_value?: number - min_value?: number - earliest?: string - latest?: string -} +export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] -export interface FieldStatistics { - doc_count: integer - sum_doc_freq: long - sum_ttf: long +export interface CatSegmentsSegmentsRecord { + index?: IndexName + i?: IndexName + idx?: IndexName + shard?: string + s?: string + sh?: string + prirep?: string + p?: string + pr?: string + primaryOrReplica?: string + ip?: string + id?: NodeId + segment?: string + seg?: string + generation?: string + g?: string + gen?: string + 'docs.count'?: string + dc?: string + docsCount?: string + 'docs.deleted'?: string + dd?: string + docsDeleted?: string + size?: ByteSize + si?: ByteSize + 'size.memory'?: ByteSize + sm?: ByteSize + sizeMemory?: ByteSize + committed?: string + ic?: string + isCommitted?: string + searchable?: string + is?: string + isSearchable?: string + version?: VersionString + v?: VersionString + compound?: string + ico?: string + isCompound?: string } -export type FieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' - -export interface FieldTypesMappings { - field_types: Array - runtime_field_types?: Array +export interface CatShardsRequest extends CatCatRequestBase { + index?: Indices + bytes?: Bytes } -export interface FieldTypesStats { - name: Name - count: integer - index_count: integer +export type CatShardsResponse = CatShardsShardsRecord[] + +export interface CatShardsShardsRecord { + index?: string + i?: string + idx?: string + shard?: string + s?: string + sh?: string + prirep?: string + p?: string + pr?: string + primaryOrReplica?: string + state?: string + st?: string + docs?: string + d?: string + dc?: string + store?: string + sto?: string + ip?: string + id?: string + node?: string + n?: string + sync_id?: string + 'unassigned.reason'?: string + ur?: string + 'unassigned.at'?: string + ua?: string + 'unassigned.for'?: string + uf?: string + 'unassigned.details'?: string + ud?: string + 'recoverysource.type'?: string + rs?: string + 'completion.size'?: string + cs?: string + completionSize?: string + 'fielddata.memory_size'?: string + fm?: string + fielddataMemory?: string + 'fielddata.evictions'?: string + fe?: string + fielddataEvictions?: string + 'query_cache.memory_size'?: string + qcm?: string + queryCacheMemory?: string + 'query_cache.evictions'?: string + qce?: string + queryCacheEvictions?: string + 'flush.total'?: string + ft?: string + flushTotal?: string + 'flush.total_time'?: string + ftt?: string + flushTotalTime?: string + 'get.current'?: string + gc?: string + getCurrent?: string + 'get.time'?: string + gti?: string + getTime?: string + 'get.total'?: string + gto?: string + getTotal?: string + 'get.exists_time'?: string + geti?: string + getExistsTime?: string + 'get.exists_total'?: string + geto?: string + getExistsTotal?: string + 'get.missing_time'?: string + gmti?: string + getMissingTime?: string + 'get.missing_total'?: string + gmto?: string + getMissingTotal?: string + 'indexing.delete_current'?: string + idc?: string + indexingDeleteCurrent?: string + 'indexing.delete_time'?: string + idti?: string + indexingDeleteTime?: string + 'indexing.delete_total'?: string + idto?: string + indexingDeleteTotal?: string + 'indexing.index_current'?: string + iic?: string + indexingIndexCurrent?: string + 'indexing.index_time'?: string + iiti?: string + indexingIndexTime?: string + 'indexing.index_total'?: string + iito?: string + indexingIndexTotal?: string + 'indexing.index_failed'?: string + iif?: string + indexingIndexFailed?: string + 'merges.current'?: string + mc?: string + mergesCurrent?: string + 'merges.current_docs'?: string + mcd?: string + mergesCurrentDocs?: string + 'merges.current_size'?: string + mcs?: string + mergesCurrentSize?: string + 'merges.total'?: string + mt?: string + mergesTotal?: string + 'merges.total_docs'?: string + mtd?: string + mergesTotalDocs?: string + 'merges.total_size'?: string + mts?: string + mergesTotalSize?: string + 'merges.total_time'?: string + mtt?: string + mergesTotalTime?: string + 'refresh.total'?: string + 'refresh.time'?: string + 'refresh.external_total'?: string + rto?: string + refreshTotal?: string + 'refresh.external_time'?: string + rti?: string + refreshTime?: string + 'refresh.listeners'?: string + rli?: string + refreshListeners?: string + 'search.fetch_current'?: string + sfc?: string + searchFetchCurrent?: string + 'search.fetch_time'?: string + sfti?: string + searchFetchTime?: string + 'search.fetch_total'?: string + sfto?: string + searchFetchTotal?: string + 'search.open_contexts'?: string + so?: string + searchOpenContexts?: string + 'search.query_current'?: string + sqc?: string + searchQueryCurrent?: string + 'search.query_time'?: string + sqti?: string + searchQueryTime?: string + 'search.query_total'?: string + sqto?: string + searchQueryTotal?: string + 'search.scroll_current'?: string + scc?: string + searchScrollCurrent?: string + 'search.scroll_time'?: string + scti?: string + searchScrollTime?: string + 'search.scroll_total'?: string + scto?: string + searchScrollTotal?: string + 'segments.count'?: string + sc?: string + segmentsCount?: string + 'segments.memory'?: string + sm?: string + segmentsMemory?: string + 'segments.index_writer_memory'?: string + siwm?: string + segmentsIndexWriterMemory?: string + 'segments.version_map_memory'?: string + svmm?: string + segmentsVersionMapMemory?: string + 'segments.fixed_bitset_memory'?: string + sfbm?: string + fixedBitsetMemory?: string + 'seq_no.max'?: string + sqm?: string + maxSeqNo?: string + 'seq_no.local_checkpoint'?: string + sql?: string + localCheckpoint?: string + 'seq_no.global_checkpoint'?: string + sqg?: string + globalCheckpoint?: string + 'warmer.current'?: string + wc?: string + warmerCurrent?: string + 'warmer.total'?: string + wto?: string + warmerTotal?: string + 'warmer.total_time'?: string + wtt?: string + warmerTotalTime?: string + 'path.data'?: string + pd?: string + dataPath?: string + 'path.state'?: string + ps?: string + statsPath?: string + 'bulk.total_operations'?: string + bto?: string + bulkTotalOperations?: string + 'bulk.total_time'?: string + btti?: string + bulkTotalTime?: string + 'bulk.total_size_in_bytes'?: string + btsi?: string + bulkTotalSizeInBytes?: string + 'bulk.avg_time'?: string + bati?: string + bulkAvgTime?: string + 'bulk.avg_size_in_bytes'?: string + basi?: string + bulkAvgSizeInBytes?: string } -export type FieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' - -export interface FieldValueFactorScoreFunction extends ScoreFunctionBase { - field: Field - factor?: double - missing?: double - modifier?: FieldValueFactorModifier +export interface CatSnapshotsRequest extends CatCatRequestBase { + repository?: Names + ignore_unavailable?: boolean } -export interface FielddataFrequencyFilter { - max: double - min: double - min_segment_size: integer -} +export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] -export interface FielddataStats { - evictions?: long - memory_size_in_bytes: long - fields?: Record +export interface CatSnapshotsSnapshotsRecord { + id?: string + snapshot?: string + repository?: string + re?: string + repo?: string + status?: string + s?: string + start_epoch?: EpochMillis + ste?: EpochMillis + startEpoch?: EpochMillis + start_time?: DateString + sti?: DateString + startTime?: DateString + end_epoch?: EpochMillis + ete?: EpochMillis + endEpoch?: EpochMillis + end_time?: DateString + eti?: DateString + endTime?: DateString + duration?: Time + dur?: Time + indices?: string + i?: string + successful_shards?: string + ss?: string + failed_shards?: string + fs?: string + total_shards?: string + ts?: string + reason?: string + r?: string } -export type Fields = Field | Array - -export interface FileCountSnapshotStats { - file_count: integer - size_in_bytes: long +export interface CatTasksRequest extends CatCatRequestBase { + actions?: string[] + detailed?: boolean + node_id?: string[] + parent_task?: long } -export interface FileSystemStats { - data: Array - timestamp: long - total: TotalFileSystemStats -} +export type CatTasksResponse = CatTasksTasksRecord[] -export interface Filter { +export interface CatTasksTasksRecord { + id?: Id + action?: string + ac?: string + task_id?: Id + ti?: Id + parent_task_id?: string + pti?: string + type?: Type + ty?: Type + start_time?: string + start?: string + timestamp?: string + ts?: string + hms?: string + hhmmss?: string + running_time_ns?: string + running_time?: string + time?: string + node_id?: NodeId + ni?: NodeId + ip?: string + i?: string + port?: string + po?: string + node?: string + n?: string + version?: VersionString + v?: VersionString + x_opaque_id?: string + x?: string description?: string - filter_id: Id - items: Array -} - -export interface FilterRef { - filter_id: Id - filter_type: RuleFilterType -} - -export interface FiltersAggregate extends AggregateBase { - buckets: Array | Record -} - -export interface FiltersAggregation extends BucketAggregationBase { - filters?: Record | Array - other_bucket?: boolean - other_bucket_key?: string + desc?: string } -export interface FiltersBucketItemKeys { - doc_count: long +export interface CatTemplatesRequest extends CatCatRequestBase { + name?: Name } -export type FiltersBucketItem = FiltersBucketItemKeys | - { [property: string]: Aggregate } -export interface FindStructureRequest { - charset?: string - column_names?: string - delimiter?: string - explain?: boolean - format?: string - grok_pattern?: string - has_header_row?: boolean - lines_to_sample?: uint - quote?: string - should_trim_fields?: boolean - timeout?: Time - timestamp_field?: Field - timestamp_format?: string - body: TBody -} +export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] -export interface FindStructureResponse { - charset: string - has_header_row: boolean - has_byte_order_marker: boolean - format: string - field_stats: Record - sample_start: string - num_messages_analyzed: number - mappings: TypeMapping - quote: string - delimiter: string - need_client_timezone: boolean - num_lines_analyzed: number - column_names?: Array - explanation?: Array - grok_pattern?: string - multiline_start_pattern?: string - exclude_lines_pattern?: string - java_timestamp_formats?: Array - joda_timestamp_formats?: Array - timestamp_field?: string - should_trim_fields?: boolean +export interface CatTemplatesTemplatesRecord { + name?: Name + n?: Name + index_patterns?: string + t?: string + order?: string + o?: string + p?: string + version?: VersionString + v?: VersionString + composed_of?: string + c?: string } -export interface FingerprintTokenFilter extends TokenFilterBase { - max_output_size: integer - separator: string +export interface CatThreadPoolRequest extends CatCatRequestBase { + thread_pool_patterns?: Names + size?: Size | boolean } -export interface FlattenedProperty extends PropertyBase { - boost?: double - depth_limit?: integer - doc_values?: boolean - eager_global_ordinals?: boolean - index?: boolean - index_options?: IndexOptions - null_value?: string - similarity?: string - split_queries_on_whitespace?: boolean - type: 'flattened' -} +export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] -export interface FlattenedUsage extends XPackUsage { - field_count: integer +export interface CatThreadPoolThreadPoolRecord { + node_name?: string + nn?: string + node_id?: NodeId + id?: NodeId + ephemeral_node_id?: string + eid?: string + pid?: string + p?: string + host?: string + h?: string + ip?: string + i?: string + port?: string + po?: string + name?: string + n?: string + type?: string + t?: string + active?: string + a?: string + pool_size?: string + psz?: string + queue?: string + q?: string + queue_size?: string + qs?: string + rejected?: string + r?: string + largest?: string + l?: string + completed?: string + c?: string + core?: string + cr?: string + max?: string + mx?: string + size?: string + sz?: string + keep_alive?: string + ka?: string } -export interface FloatRangeProperty extends RangePropertyBase { - type: 'float_range' +export interface CatTrainedModelsRequest extends CatCatRequestBase { + model_id?: Id + allow_no_match?: boolean + bytes?: Bytes + from?: integer + size?: integer } -export interface FlushJobRequest extends RequestBase { - job_id: Id - skip_time?: string - body?: { - advance_time?: DateString - calc_interim?: boolean - end?: DateString - start?: DateString - } -} +export type CatTrainedModelsResponse = CatTrainedModelsTrainedModelsRecord[] -export interface FlushJobResponse extends ResponseBase { - flushed: boolean - last_finalized_bucket_end?: integer +export interface CatTrainedModelsTrainedModelsRecord { + id?: Id + created_by?: string + c?: string + createdBy?: string + heap_size?: ByteSize + hs?: ByteSize + modelHeapSize?: ByteSize + operations?: string + o?: string + modelOperations?: string + license?: string + l?: string + create_time?: DateString + ct?: DateString + version?: VersionString + v?: VersionString + description?: string + d?: string + 'ingest.pipelines'?: string + ip?: string + ingestPipelines?: string + 'ingest.count'?: string + ic?: string + ingestCount?: string + 'ingest.time'?: string + it?: string + ingestTime?: string + 'ingest.current'?: string + icurr?: string + ingestCurrent?: string + 'ingest.failed'?: string + if?: string + ingestFailed?: string + 'data_frame.id'?: string + dfid?: string + dataFrameAnalytics?: string + 'data_frame.create_time'?: string + dft?: string + dataFrameAnalyticsTime?: string + 'data_frame.source_index'?: string + dfsi?: string + dataFrameAnalyticsSrcIndex?: string + 'data_frame.analysis'?: string + dfa?: string + dataFrameAnalyticsAnalysis?: string } -export interface FlushRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - force?: boolean - ignore_unavailable?: boolean - wait_if_ongoing?: boolean +export interface CatTransformsRequest extends CatCatRequestBase { + transform_id?: Id + allow_no_match?: boolean + from?: integer + size?: integer } -export interface FlushResponse extends ShardsOperationResponseBase { -} +export type CatTransformsResponse = CatTransformsTransformsRecord[] -export interface FlushStats { - periodic: long - total: long - total_time?: string - total_time_in_millis: long +export interface CatTransformsTransformsRecord { + id?: Id + state?: string + s?: string + checkpoint?: string + c?: string + documents_processed?: string + docp?: string + documentsProcessed?: string + checkpoint_progress?: string + cp?: string + checkpointProgress?: string + last_search_time?: string + lst?: string + lastSearchTime?: string + changes_last_detection_time?: string + cldt?: string + create_time?: string + ct?: string + createTime?: string + version?: VersionString + v?: VersionString + source_index?: string + si?: string + sourceIndex?: string + dest_index?: string + di?: string + destIndex?: string + pipeline?: string + p?: string + description?: string + d?: string + transform_type?: string + tt?: string + frequency?: string + f?: string + max_page_search_size?: string + mpsz?: string + docs_per_second?: string + dps?: string + reason?: string + r?: string + search_total?: string + st?: string + search_failure?: string + sf?: string + search_time?: string + stime?: string + index_total?: string + it?: string + index_failure?: string + if?: string + index_time?: string + itime?: string + documents_indexed?: string + doci?: string + delete_time?: string + dtime?: string + documents_deleted?: string + docd?: string + trigger_count?: string + tc?: string + pages_processed?: string + pp?: string + processing_time?: string + pt?: string + checkpoint_duration_time_exp_avg?: string + cdtea?: string + checkpointTimeExpAvg?: string + indexed_documents_exp_avg?: string + idea?: string + processed_documents_exp_avg?: string + pdea?: string } -export interface FollowConfig { - max_outstanding_read_requests: integer - max_outstanding_write_requests: integer - max_read_request_operation_count: integer - max_read_request_size: string - max_retry_delay: Time - max_write_buffer_count: integer - max_write_buffer_size: string - max_write_request_operation_count: integer - max_write_request_size: string - read_poll_timeout: Time +export interface CcrFollowIndexStats { + index: IndexName + shards: CcrShardStats[] } -export interface FollowIndexReadException { +export interface CcrReadException { exception: ErrorCause from_seq_no: SequenceNumber retries: integer } -export interface FollowIndexShardStats { +export interface CcrShardStats { bytes_read: long failed_read_requests: long failed_write_requests: long @@ -5501,7 +6642,7 @@ export interface FollowIndexShardStats { operations_written: long outstanding_read_requests: integer outstanding_write_requests: integer - read_exceptions: Array + read_exceptions: CcrReadException[] remote_cluster: string shard_id: integer successful_read_requests: long @@ -5514,1450 +6655,1867 @@ export interface FollowIndexShardStats { write_buffer_size_in_bytes: ByteSize } -export interface FollowIndexStats { +export interface CcrCreateFollowIndexRequest extends RequestBase { index: IndexName - shards: Array + wait_for_active_shards?: WaitForActiveShards + body?: { + leader_index?: IndexName + max_outstanding_read_requests?: long + max_outstanding_write_requests?: long + max_read_request_operation_count?: long + max_read_request_size?: string + max_retry_delay?: Time + max_write_buffer_count?: long + max_write_buffer_size?: string + max_write_request_operation_count?: long + max_write_request_size?: string + read_poll_timeout?: Time + remote_cluster?: string + } } -export interface FollowIndexStatsRequest extends RequestBase { - index: Indices +export interface CcrCreateFollowIndexResponse { + follow_index_created: boolean + follow_index_shards_acked: boolean + index_following_started: boolean } -export interface FollowIndexStatsResponse extends ResponseBase { - indices: Array +export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { + name: Name } -export interface FollowInfoRequest extends RequestBase { - index: Indices +export interface CcrDeleteAutoFollowPatternResponse extends AcknowledgedResponseBase { } -export interface FollowInfoResponse extends ResponseBase { - follower_indices: Array +export interface CcrFollowIndexStatsRequest extends RequestBase { + index: Indices } -export type FollowerIndexStatus = 'active' | 'paused' +export interface CcrFollowIndexStatsResponse { + indices: CcrFollowIndexStats[] +} -export interface FollowerInfo { +export interface CcrFollowInfoFollowerIndex { follower_index: IndexName leader_index: IndexName - parameters?: FollowConfig + parameters?: CcrFollowInfoFollowerIndexParameters remote_cluster: Name - status: FollowerIndexStatus + status: CcrFollowInfoFollowerIndexStatus } -export interface ForceMergeRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flush?: boolean - ignore_unavailable?: boolean - max_num_segments?: long - only_expunge_deletes?: boolean +export interface CcrFollowInfoFollowerIndexParameters { + max_outstanding_read_requests: integer + max_outstanding_write_requests: integer + max_read_request_operation_count: integer + max_read_request_size: string + max_retry_delay: Time + max_write_buffer_count: integer + max_write_buffer_size: string + max_write_request_operation_count: integer + max_write_request_size: string + read_poll_timeout: Time } -export interface ForceMergeResponse extends ShardsOperationResponseBase { +export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' + +export interface CcrFollowInfoRequest extends RequestBase { + index: Indices } -export interface ForeachProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - processor: ProcessorContainer +export interface CcrFollowInfoResponse { + follower_indices: CcrFollowInfoFollowerIndex[] } -export interface ForecastJobRequest extends RequestBase { - job_id: Id +export interface CcrForgetFollowerIndexRequest extends RequestBase { + index: IndexName body?: { - duration?: Time - expires_in?: Time + follower_cluster?: string + follower_index?: IndexName + follower_index_uuid?: Uuid + leader_remote_cluster?: string } } -export interface ForecastJobResponse extends AcknowledgedResponseBase { - forecast_id: string +export interface CcrForgetFollowerIndexResponse { + _shards: ShardStatistics +} + +export interface CcrGetAutoFollowPatternAutoFollowPattern { + name: Name + pattern: CcrGetAutoFollowPatternAutoFollowPattern +} + +export interface CcrGetAutoFollowPatternRequest extends RequestBase { + name?: Name +} + +export interface CcrGetAutoFollowPatternResponse { + patterns: CcrGetAutoFollowPatternAutoFollowPattern[] +} + +export interface CcrPauseAutoFollowPatternRequest extends RequestBase { + name: Name +} + +export interface CcrPauseAutoFollowPatternResponse extends AcknowledgedResponseBase { } -export interface ForgetFollowerIndexRequest extends RequestBase { +export interface CcrPauseFollowIndexRequest extends RequestBase { index: IndexName - body: { - follower_cluster?: string - follower_index?: IndexName - follower_index_uuid?: string - leader_remote_cluster?: string +} + +export interface CcrPauseFollowIndexResponse extends AcknowledgedResponseBase { +} + +export interface CcrPutAutoFollowPatternRequest extends RequestBase { + name: Name + body?: { + remote_cluster: string + follow_index_pattern?: IndexPattern + leader_index_patterns?: IndexPatterns + max_outstanding_read_requests?: integer + settings?: Record + max_outstanding_write_requests?: integer + read_poll_timeout?: Time + max_read_request_operation_count?: integer + max_read_request_size?: ByteSize + max_retry_delay?: Time + max_write_buffer_count?: integer + max_write_buffer_size?: ByteSize + max_write_request_operation_count?: integer + max_write_request_size?: ByteSize } } -export interface ForgetFollowerIndexResponse extends ResponseBase { - _shards: ShardStatistics +export interface CcrPutAutoFollowPatternResponse extends AcknowledgedResponseBase { } -export interface FormatMetricAggregationBase extends MetricAggregationBase { - format?: string +export interface CcrResumeAutoFollowPatternRequest extends RequestBase { + name: Name } -export interface FormattableMetricAggregation extends MetricAggregationBase { - format?: string +export interface CcrResumeAutoFollowPatternResponse extends AcknowledgedResponseBase { } -export interface FoundUserPrivilege { - found: boolean +export interface CcrResumeFollowIndexRequest extends RequestBase { + index: IndexName + body?: { + max_outstanding_read_requests?: long + max_outstanding_write_requests?: long + max_read_request_operation_count?: long + max_read_request_size?: string + max_retry_delay?: Time + max_write_buffer_count?: long + max_write_buffer_size?: string + max_write_request_operation_count?: long + max_write_request_size?: string + read_poll_timeout?: Time + } +} + +export interface CcrResumeFollowIndexResponse extends AcknowledgedResponseBase { +} + +export interface CcrStatsAutoFollowStats { + auto_followed_clusters: CcrStatsAutoFollowedCluster[] + number_of_failed_follow_indices: long + number_of_failed_remote_cluster_state_requests: long + number_of_successful_follow_indices: long + recent_auto_follow_errors: ErrorCause[] +} + +export interface CcrStatsAutoFollowedCluster { + cluster_name: Name + last_seen_metadata_version: VersionNumber + time_since_last_check_millis: DateString +} + +export interface CcrStatsFollowStats { + indices: CcrFollowIndexStats[] +} + +export interface CcrStatsRequest extends RequestBase { +} + +export interface CcrStatsResponse { + auto_follow_stats: CcrStatsAutoFollowStats + follow_stats: CcrStatsFollowStats } -export interface FreezeIndexRequest extends RequestBase { +export interface CcrUnfollowIndexRequest extends RequestBase { index: IndexName - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards } -export interface FreezeIndexResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean +export interface CcrUnfollowIndexResponse extends AcknowledgedResponseBase { +} + +export interface ClusterClusterStateBlockIndex { + description?: string + retryable?: boolean + levels?: string[] + aliases?: IndexAlias[] + aliases_version?: VersionNumber + version?: VersionNumber + mapping_version?: VersionNumber + settings_version?: VersionNumber + routing_num_shards?: VersionNumber + state?: string + settings?: Record + in_sync_allocations?: Record + primary_terms?: Record + mappings?: Record + rollover_info?: Record + timestamp_range?: Record + system?: boolean +} + +export interface ClusterClusterStateDeletedSnapshots { + snapshot_deletions: string[] +} + +export interface ClusterClusterStateIndexLifecycle { + policies: Record + operation_mode: LifecycleOperationMode } -export interface FrozenIndicesUsage extends XPackUsage { - indices_count: long +export interface ClusterClusterStateIndexLifecyclePolicy { + phases: IlmPhases } -export type FunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' +export interface ClusterClusterStateIndexLifecycleSummary { + policy: ClusterClusterStateIndexLifecyclePolicy + headers: HttpHeaders + version: VersionNumber + modified_date: long + modified_date_string: DateString +} -export interface FunctionScoreContainer { - exp?: DecayFunction - gauss?: DecayFunction - linear?: DecayFunction - field_value_factor?: FieldValueFactorScoreFunction - random_score?: RandomScoreFunction - script_score?: ScriptScoreFunction - filter?: QueryContainer - weight?: double +export interface ClusterClusterStateIngest { + pipeline: ClusterClusterStateIngestPipeline[] } -export type FunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' +export interface ClusterClusterStateIngestPipeline { + id: Id + config: ClusterClusterStateIngestPipelineConfig +} -export interface FunctionScoreQuery extends QueryBase { - boost_mode?: FunctionBoostMode - functions?: Array - max_boost?: double - min_score?: double - query?: QueryContainer - score_mode?: FunctionScoreMode - boost?: float +export interface ClusterClusterStateIngestPipelineConfig { + description?: string + version?: VersionNumber + processors: IngestProcessorContainer[] } -export type Fuzziness = string | integer +export interface ClusterClusterStateMetadata { + cluster_uuid: Uuid + cluster_uuid_committed: boolean + templates: ClusterClusterStateMetadataTemplate + indices?: Record + 'index-graveyard': ClusterClusterStateMetadataIndexGraveyard + cluster_coordination: ClusterClusterStateMetadataClusterCoordination + ingest?: ClusterClusterStateIngest + repositories?: Record + component_template?: Record + index_template?: Record + index_lifecycle?: ClusterClusterStateIndexLifecycle +} + +export interface ClusterClusterStateMetadataClusterCoordination { + term: integer + last_committed_config: string[] + last_accepted_config: string[] + voting_config_exclusions: ClusterVotingConfigExclusionsItem[] +} -export interface FuzzyQuery extends QueryBase { - max_expansions?: integer - prefix_length?: integer - rewrite?: MultiTermQueryRewrite - transpositions?: boolean - fuzziness?: Fuzziness - value: any +export interface ClusterClusterStateMetadataIndexGraveyard { + tombstones: ClusterTombstone[] } -export type GapPolicy = 'skip' | 'insert_zeros' +export interface ClusterClusterStateMetadataTemplate { +} -export interface GarbageCollectionGenerationStats { - collection_count: long - collection_time: string - collection_time_in_millis: long +export interface ClusterClusterStateRoutingNodes { + unassigned: NodeShard[] + nodes: Record } -export interface GarbageCollectionStats { - collectors: Record +export interface ClusterClusterStateSnapshots { + snapshots: SnapshotStatus[] } -export interface GenericProperty extends DocValuesPropertyBase { - analyzer: string - boost: double - fielddata: StringFielddata - ignore_malformed: boolean - index: boolean - index_options: IndexOptions - norms: boolean - null_value: string - position_increment_gap: integer - search_analyzer: string - term_vector: TermVectorOption - type: string +export type ClusterClusterStatus = 'green' | 'yellow' | 'red' + +export interface ClusterComponentTemplate { + name: Name + component_template: ClusterComponentTemplateNode } -export interface GeoBoundingBoxQuery extends QueryBase { - bounding_box?: BoundingBox - type?: GeoExecution - validation_method?: GeoValidationMethod +export interface ClusterComponentTemplateNode { + template: ClusterComponentTemplateSummary + version?: VersionNumber + _meta?: Metadata } -export interface GeoBounds { - bottom_right: LatLon - top_left: LatLon +export interface ClusterComponentTemplateSummary { + _meta?: Metadata + version?: VersionNumber + settings: Record + mappings?: MappingTypeMapping + aliases?: Record } -export interface GeoBoundsAggregate extends AggregateBase { - bounds: GeoBounds +export interface ClusterTombstone { + index: ClusterTombstoneIndex + delete_date?: DateString + delete_date_in_millis: long } -export interface GeoBoundsAggregation extends MetricAggregationBase { - wrap_longitude?: boolean +export interface ClusterTombstoneIndex { + index_name: Name + index_uuid: Uuid } -export interface GeoCentroidAggregate extends AggregateBase { - count: long - location: GeoLocation +export interface ClusterVotingConfigExclusionsItem { + node_id: Id + node_name: Name } -export interface GeoCentroidAggregation extends MetricAggregationBase { - count?: long - location?: GeoLocation +export interface ClusterAllocationExplainAllocationDecision { + decider: string + decision: ClusterAllocationExplainAllocationExplainDecision + explanation: string } -export type GeoCoordinate = string | Array | ThreeDimensionalPoint +export type ClusterAllocationExplainAllocationExplainDecision = 'NO' | 'YES' | 'THROTTLE' | 'ALWAYS' -export interface GeoDecayFunctionKeys extends DecayFunctionBase { +export interface ClusterAllocationExplainAllocationStore { + allocation_id: string + found: boolean + in_sync: boolean + matching_size_in_bytes: long + matching_sync_id: boolean + store_exception: string } -export type GeoDecayFunction = GeoDecayFunctionKeys | - { [property: string]: DecayPlacement } -export interface GeoDistanceAggregation extends BucketAggregationBase { - distance_type?: GeoDistanceType - field?: Field - origin?: GeoLocation | string - ranges?: Array - unit?: DistanceUnit +export interface ClusterAllocationExplainClusterInfo { + nodes: Record + shard_sizes: Record + shard_data_set_sizes?: Record + shard_paths: Record + reserved_sizes: ClusterAllocationExplainReservedSize[] } -export interface GeoDistanceQuery extends QueryBase { - distance?: Distance - distance_type?: GeoDistanceType - location?: GeoLocation - validation_method?: GeoValidationMethod +export interface ClusterAllocationExplainCurrentNode { + id: Id + name: Name + attributes: Record + transport_address: TransportAddress + weight_ranking: integer } -export interface GeoDistanceSortKeys { - mode?: SortMode - distance_type?: GeoDistanceType - order?: SortOrder - unit?: DistanceUnit -} -export type GeoDistanceSort = GeoDistanceSortKeys | - { [property: string]: GeoLocation | Array } +export type ClusterAllocationExplainDecision = 'yes' | 'no' | 'worse_balance' | 'throttled' | 'awaiting_info' | 'allocation_delayed' | 'no_valid_shard_copy' | 'no_attempt' -export type GeoDistanceType = 'arc' | 'plane' +export interface ClusterAllocationExplainDiskUsage { + path: string + total_bytes: long + used_bytes: long + free_bytes: long + free_disk_percent: double + used_disk_percent: double +} -export type GeoExecution = 'memory' | 'indexed' +export interface ClusterAllocationExplainNodeAllocationExplanation { + deciders: ClusterAllocationExplainAllocationDecision[] + node_attributes: Record + node_decision: ClusterAllocationExplainDecision + node_id: Id + node_name: Name + store?: ClusterAllocationExplainAllocationStore + transport_address: TransportAddress + weight_ranking: integer +} -export interface GeoHashGridAggregation extends BucketAggregationBase { - bounds?: BoundingBox - field?: Field - precision?: GeoHashPrecision - shard_size?: integer - size?: integer +export interface ClusterAllocationExplainNodeDiskUsage { + node_name: Name + least_available: ClusterAllocationExplainDiskUsage + most_available: ClusterAllocationExplainDiskUsage } -export type GeoHashPrecision = number +export interface ClusterAllocationExplainRequest extends RequestBase { + include_disk_info?: boolean + include_yes_decisions?: boolean + body?: { + current_node?: string + index?: IndexName + primary?: boolean + shard?: integer + } +} -export interface GeoIpProcessor extends ProcessorBase { - database_file: string - field: Field - first_only: boolean - ignore_missing: boolean - properties: Array - target_field: Field +export interface ClusterAllocationExplainReservedSize { + node_id: Id + path: string + total: long + shards: string[] } -export interface GeoLineAggregate extends AggregateBase { - type: string - geometry: LineStringGeoShape - properties: GeoLineProperties +export interface ClusterAllocationExplainResponse { + allocate_explanation?: string + allocation_delay?: string + allocation_delay_in_millis?: long + can_allocate?: ClusterAllocationExplainDecision + can_move_to_other_node?: ClusterAllocationExplainDecision + can_rebalance_cluster?: ClusterAllocationExplainDecision + can_rebalance_cluster_decisions?: ClusterAllocationExplainAllocationDecision[] + can_rebalance_to_other_node?: ClusterAllocationExplainDecision + can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] + can_remain_on_current_node?: ClusterAllocationExplainDecision + cluster_info?: ClusterAllocationExplainClusterInfo + configured_delay?: string + configured_delay_in_millis?: long + current_node?: ClusterAllocationExplainCurrentNode + current_state: string + index: IndexName + move_explanation?: string + node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] + primary: boolean + rebalance_explanation?: string + remaining_delay?: string + remaining_delay_in_millis?: long + shard: integer + unassigned_info?: ClusterAllocationExplainUnassignedInformation } -export interface GeoLineAggregation { - point: GeoLinePoint - sort: GeoLineSort - include_sort?: boolean - sort_order?: SortOrder - size?: integer +export interface ClusterAllocationExplainUnassignedInformation { + at: DateString + last_allocation_status?: string + reason: ClusterAllocationExplainUnassignedInformationReason + details?: string + failed_allocation_attempts?: integer + delayed?: boolean + allocation_status?: string } -export interface GeoLinePoint { - field: Field +export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' + +export interface ClusterDeleteComponentTemplateRequest extends RequestBase { + name: Name + master_timeout?: Time + timeout?: Time } -export interface GeoLineProperties { - complete: boolean - sort_values: Array +export interface ClusterDeleteComponentTemplateResponse extends AcknowledgedResponseBase { } -export interface GeoLineSort { - field: Field +export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { + body?: { + stub: string + } } -export type GeoLocation = string | Array | TwoDimensionalPoint +export interface ClusterDeleteVotingConfigExclusionsResponse { + stub: integer +} -export type GeoOrientation = 'right' | 'counterclockwise' | 'ccw' | 'left' | 'clockwise' | 'cw' +export interface ClusterExistsComponentTemplateRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } +} -export interface GeoPointProperty extends DocValuesPropertyBase { - ignore_malformed?: boolean - ignore_z_value?: boolean - null_value?: GeoLocation - type: 'geo_point' +export interface ClusterExistsComponentTemplateResponse { + stub: integer } -export interface GeoPolygonQuery extends QueryBase { - points?: Array - validation_method?: GeoValidationMethod +export interface ClusterGetComponentTemplateRequest extends RequestBase { + name?: Name + flat_settings?: boolean + local?: boolean + master_timeout?: Time } -export interface GeoShape { - type?: string +export interface ClusterGetComponentTemplateResponse { + component_templates: ClusterComponentTemplate[] } -export interface GeoShapeProperty extends DocValuesPropertyBase { - coerce?: boolean - ignore_malformed?: boolean - ignore_z_value?: boolean - orientation?: GeoOrientation - strategy?: GeoStrategy - type: 'geo_shape' +export interface ClusterGetSettingsRequest extends RequestBase { + flat_settings?: boolean + include_defaults?: boolean + master_timeout?: Time + timeout?: Time } -export interface GeoShapeQuery extends QueryBase { - ignore_unmapped?: boolean - indexed_shape?: FieldLookup - relation?: GeoShapeRelation - shape?: GeoShape +export interface ClusterGetSettingsResponse { + persistent: Record + transient: Record + defaults?: Record } -export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' +export interface ClusterHealthIndexHealthStats { + active_primary_shards: integer + active_shards: integer + initializing_shards: integer + number_of_replicas: integer + number_of_shards: integer + relocating_shards: integer + shards?: Record + status: Health + unassigned_shards: integer +} -export type GeoStrategy = 'recursive' | 'term' +export interface ClusterHealthRequest extends RequestBase { + index?: Indices + expand_wildcards?: ExpandWildcards + level?: Level + local?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + wait_for_events?: WaitForEvents + wait_for_nodes?: string + wait_for_no_initializing_shards?: boolean + wait_for_no_relocating_shards?: boolean + wait_for_status?: WaitForStatus +} -export interface GeoTileGridAggregation extends BucketAggregationBase { - field?: Field - precision?: GeoTilePrecision - shard_size?: integer - size?: integer +export interface ClusterHealthResponse { + active_primary_shards: integer + active_shards: integer + active_shards_percent_as_number: Percentage + cluster_name: string + delayed_unassigned_shards: integer + indices?: Record + initializing_shards: integer + number_of_data_nodes: integer + number_of_in_flight_fetch: integer + number_of_nodes: integer + number_of_pending_tasks: integer + relocating_shards: integer + status: Health + task_max_waiting_in_queue_millis: EpochMillis + timed_out: boolean + unassigned_shards: integer } -export type GeoTilePrecision = number +export interface ClusterHealthShardHealthStats { + active_shards: integer + initializing_shards: integer + primary_active: boolean + relocating_shards: integer + status: Health + unassigned_shards: integer +} -export type GeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' +export interface ClusterPendingTasksPendingTask { + insert_order: integer + priority: string + source: string + time_in_queue: string + time_in_queue_millis: integer +} -export interface GetAliasRequest extends RequestBase { - name?: Names - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean +export interface ClusterPendingTasksRequest extends RequestBase { local?: boolean + master_timeout?: Time } -export interface GetAliasResponse extends DictionaryResponseBase { +export interface ClusterPendingTasksResponse { + tasks: ClusterPendingTasksPendingTask[] } -export interface GetAnomalyRecordsRequest extends RequestBase { - job_id: Id - exclude_interim?: boolean - from?: integer - size?: integer - start?: DateString - end?: DateString +export interface ClusterPutComponentTemplateRequest extends RequestBase { + name: Name + create?: boolean + master_timeout?: Time body?: { - desc?: boolean - exclude_interim?: boolean - page?: Page - record_score?: double - sort?: Field - start?: DateString - end?: DateString + template: IndicesIndexState + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings + version?: VersionNumber + _meta?: Metadata } } -export interface GetAnomalyRecordsResponse extends ResponseBase { - count: long - records: Array +export interface ClusterPutComponentTemplateResponse extends AcknowledgedResponseBase { +} + +export interface ClusterPutSettingsRequest extends RequestBase { + flat_settings?: boolean + master_timeout?: Time + timeout?: Time + body?: { + persistent?: Record + transient?: Record + } } -export interface GetApiKeyRequest extends RequestBase { - id?: string - name?: string - owner?: boolean - realm_name?: string - username?: string +export interface ClusterPutSettingsResponse { + acknowledged: boolean + persistent: Record + transient: Record } -export interface GetApiKeyResponse extends ResponseBase { - api_keys: Array +export interface ClusterPutVotingConfigExclusionsRequest extends RequestBase { + node_names?: Names + node_ids?: Ids + timeout?: Time + wait_for_removal?: boolean } -export interface GetAutoFollowPatternRequest extends RequestBase { - name?: Name +export interface ClusterPutVotingConfigExclusionsResponse { + stub: integer } -export interface GetAutoFollowPatternResponse extends ResponseBase { - patterns: Array +export interface ClusterRemoteInfoClusterRemoteInfo { + connected: boolean + initial_connect_timeout: Time + max_connections_per_cluster: integer + num_nodes_connected: long + seeds: string[] + skip_unavailable: boolean } -export interface GetAutoscalingCapacityRequest extends RequestBase { - stub_a: string - stub_b: string +export interface ClusterRemoteInfoRequest extends RequestBase { body?: { - stub_c: string + stub: string } } -export interface GetAutoscalingCapacityResponse extends ResponseBase { - stub: integer +export interface ClusterRemoteInfoResponse extends DictionaryResponseBase { } -export interface GetAutoscalingPolicyRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface ClusterRerouteCommand { + cancel?: ClusterRerouteCommandCancelAction + move?: ClusterRerouteCommandMoveAction + allocate_replica?: ClusterRerouteCommandAllocateReplicaAction + allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction + allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction } -export interface GetAutoscalingPolicyResponse extends ResponseBase { - stub: integer +export interface ClusterRerouteCommandAllocatePrimaryAction { + index: IndexName + shard: integer + node: string + accept_data_loss: boolean } -export interface GetBasicLicenseStatusRequest extends RequestBase { +export interface ClusterRerouteCommandAllocateReplicaAction { + index: IndexName + shard: integer + node: string } -export interface GetBasicLicenseStatusResponse extends ResponseBase { - eligible_to_start_basic: boolean +export interface ClusterRerouteCommandCancelAction { + index: IndexName + shard: integer + node: string + allow_primary?: boolean } -export interface GetBucketsRequest extends RequestBase { - job_id: Id - timestamp?: Timestamp - from?: integer - size?: integer - exclude_interim?: boolean - sort?: Field - desc?: boolean - start?: DateString - end?: DateString +export interface ClusterRerouteCommandMoveAction { + index: IndexName + shard: integer + from_node: string + to_node: string +} + +export interface ClusterRerouteRequest extends RequestBase { + dry_run?: boolean + explain?: boolean + metric?: Metrics + retry_failed?: boolean + master_timeout?: Time + timeout?: Time body?: { - anomaly_score?: double - desc?: boolean - exclude_interim?: boolean - expand?: boolean - page?: Page - sort?: Field - start?: DateString - end?: DateString + commands?: ClusterRerouteCommand[] } } -export interface GetBucketsResponse extends ResponseBase { - buckets: Array - count: long +export interface ClusterRerouteRerouteDecision { + decider: string + decision: string + explanation: string } -export interface GetBuiltinPrivilegesRequest extends RequestBase { +export interface ClusterRerouteRerouteExplanation { + command: string + decisions: ClusterRerouteRerouteDecision[] + parameters: ClusterRerouteRerouteParameters } -export interface GetBuiltinPrivilegesResponse extends ResponseBase { - cluster: Array - index: Array +export interface ClusterRerouteRerouteParameters { + allow_primary: boolean + index: IndexName + node: NodeName + shard: integer + from_node?: NodeName + to_node?: NodeName } -export interface GetCalendarEventsRequest extends RequestBase { - calendar_id: Id - job_id?: Id - end?: DateString - from?: integer - start?: string - size?: integer - body?: { - end?: DateString - from?: integer - start?: string - size?: integer - } +export interface ClusterRerouteRerouteState { + cluster_uuid: Uuid + state_uuid?: Uuid + master_node?: string + version?: VersionNumber + blocks?: EmptyObject + nodes?: Record + routing_table?: Record + routing_nodes?: ClusterClusterStateRoutingNodes + security_tokens?: Record + snapshots?: ClusterClusterStateSnapshots + snapshot_deletions?: ClusterClusterStateDeletedSnapshots + metadata?: ClusterClusterStateMetadata } -export interface GetCalendarEventsResponse extends ResponseBase { - count: integer - events: Array +export interface ClusterRerouteResponse extends AcknowledgedResponseBase { + explanations?: ClusterRerouteRerouteExplanation[] + state: ClusterRerouteRerouteState } -export interface GetCalendarsRequest extends RequestBase { - calendar_id?: Id - body?: { - page?: Page - } +export interface ClusterStateClusterStateBlocks { + indices?: Record> } -export interface GetCalendarsResponse extends ResponseBase { - calendars: Array - count: long +export interface ClusterStateRequest extends RequestBase { + metric?: Metrics + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + local?: boolean + master_timeout?: Time + wait_for_metadata_version?: VersionNumber + wait_for_timeout?: Time } -export interface GetCategoriesRequest extends RequestBase { - job_id: Id - category_id?: CategoryId - body?: { - page?: Page - } +export interface ClusterStateResponse { + cluster_name: Name + cluster_uuid: Uuid + master_node?: string + state?: string[] + state_uuid?: Uuid + version?: VersionNumber + blocks?: ClusterStateClusterStateBlocks + metadata?: ClusterClusterStateMetadata + nodes?: Record + routing_table?: Record + routing_nodes?: ClusterClusterStateRoutingNodes + snapshots?: ClusterClusterStateSnapshots + snapshot_deletions?: ClusterClusterStateDeletedSnapshots +} + +export interface ClusterStatsCharFilterTypes { + char_filter_types: ClusterStatsFieldTypes[] + tokenizer_types: ClusterStatsFieldTypes[] + filter_types: ClusterStatsFieldTypes[] + analyzer_types: ClusterStatsFieldTypes[] + built_in_char_filters: ClusterStatsFieldTypes[] + built_in_tokenizers: ClusterStatsFieldTypes[] + built_in_filters: ClusterStatsFieldTypes[] + built_in_analyzers: ClusterStatsFieldTypes[] +} + +export interface ClusterStatsClusterFileSystem { + available_in_bytes: long + free_in_bytes: long + total_in_bytes: long } -export interface GetCategoriesResponse extends ResponseBase { - categories: Array +export interface ClusterStatsClusterIndices { + completion: CompletionStats count: long + docs: DocStats + fielddata: FielddataStats + query_cache: QueryCacheStats + segments: SegmentsStats + shards: ClusterStatsClusterIndicesShards + store: StoreStats + mappings: ClusterStatsFieldTypesMappings + analysis: ClusterStatsCharFilterTypes + versions?: ClusterStatsIndicesVersions[] +} + +export interface ClusterStatsClusterIndicesShards { + index?: ClusterStatsClusterIndicesShardsIndex + primaries?: double + replication?: double + total?: double } -export interface GetCertificatesRequest extends RequestBase { +export interface ClusterStatsClusterIndicesShardsIndex { + primaries: ClusterStatsClusterShardMetrics + replication: ClusterStatsClusterShardMetrics + shards: ClusterStatsClusterShardMetrics } -export type GetCertificatesResponse = ClusterCertificateInformation[] +export interface ClusterStatsClusterIngest { + number_of_pipelines: integer + processor_stats: Record +} -export interface GetDatafeedStatsRequest extends RequestBase { - datafeed_id?: Ids - allow_no_datafeeds?: boolean +export interface ClusterStatsClusterJvm { + max_uptime_in_millis: long + mem: ClusterStatsClusterJvmMemory + threads: long + versions: ClusterStatsClusterJvmVersion[] } -export interface GetDatafeedStatsResponse extends ResponseBase { - count: long - datafeeds: Array +export interface ClusterStatsClusterJvmMemory { + heap_max_in_bytes: long + heap_used_in_bytes: long } -export interface GetDatafeedsRequest extends RequestBase { - datafeed_id?: Id - allow_no_datafeeds?: boolean - exclude_generated?: boolean +export interface ClusterStatsClusterJvmVersion { + bundled_jdk: boolean + count: integer + using_bundled_jdk: boolean + version: VersionString + vm_name: string + vm_vendor: string + vm_version: VersionString } -export interface GetDatafeedsResponse extends ResponseBase { - count: long - datafeeds: Array +export interface ClusterStatsClusterNetworkTypes { + http_types: Record + transport_types: Record } -export interface GetEnrichPolicyRequest extends RequestBase { - name?: Names +export interface ClusterStatsClusterNodeCount { + coordinating_only: integer + data: integer + ingest: integer + master: integer + total: integer + voting_only: integer + data_cold: integer + data_frozen?: integer + data_content: integer + data_warm: integer + data_hot: integer + ml: integer + remote_cluster_client: integer + transform: integer } -export interface GetEnrichPolicyResponse extends ResponseBase { - policies: Array +export interface ClusterStatsClusterNodes { + count: ClusterStatsClusterNodeCount + discovery_types: Record + fs: ClusterStatsClusterFileSystem + ingest: ClusterStatsClusterIngest + jvm: ClusterStatsClusterJvm + network_types: ClusterStatsClusterNetworkTypes + os: ClusterStatsClusterOperatingSystem + packaging_types: ClusterStatsNodePackagingType[] + plugins: PluginStats[] + process: ClusterStatsClusterProcess + versions: VersionString[] +} + +export interface ClusterStatsClusterOperatingSystem { + allocated_processors: integer + available_processors: integer + mem: ClusterStatsOperatingSystemMemoryInfo + names: ClusterStatsClusterOperatingSystemName[] + pretty_names: ClusterStatsClusterOperatingSystemName[] + architectures?: ClusterStatsClusterOperatingSystemArchitecture[] } -export interface GetFeaturesRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface ClusterStatsClusterOperatingSystemArchitecture { + count: integer + arch: string } -export interface GetFeaturesResponse extends ResponseBase { - stub: integer +export interface ClusterStatsClusterOperatingSystemName { + count: integer + name: Name } -export interface GetFieldMappingRequest extends RequestBase { - fields: Fields - index?: Indices - type?: Types - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - include_defaults?: boolean - include_type_name?: boolean - local?: boolean +export interface ClusterStatsClusterProcess { + cpu: ClusterStatsClusterProcessCpu + open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors } -export interface GetFieldMappingResponse extends DictionaryResponseBase { +export interface ClusterStatsClusterProcessCpu { + percent: integer } -export interface GetFiltersRequest extends RequestBase { - filter_id?: Id - from?: integer - size?: integer +export interface ClusterStatsClusterProcessOpenFileDescriptors { + avg: long + max: long + min: long } -export interface GetFiltersResponse extends ResponseBase { +export interface ClusterStatsClusterProcessor { count: long - filters: Array + current: long + failed: long + time_in_millis: long } -export interface GetIlmStatusRequest extends RequestBase { +export interface ClusterStatsClusterShardMetrics { + avg: double + max: double + min: double } -export interface GetIlmStatusResponse extends ResponseBase { - operation_mode: LifecycleOperationMode +export interface ClusterStatsFieldTypes { + name: Name + count: integer + index_count: integer + script_count?: integer } -export interface GetIndexRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - include_type_name?: boolean - local?: boolean - master_timeout?: Time +export interface ClusterStatsFieldTypesMappings { + field_types: ClusterStatsFieldTypes[] + runtime_field_types?: ClusterStatsRuntimeFieldTypes[] } -export interface GetIndexResponse extends DictionaryResponseBase { +export interface ClusterStatsIndicesVersions { + index_count: integer + primary_shard_count: integer + total_primary_bytes: long + version: VersionString } -export interface GetIndexSettingsRequest extends RequestBase { - index?: Indices - name?: Names - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards +export interface ClusterStatsNodePackagingType { + count: integer + flavor: string + type: string +} + +export interface ClusterStatsOperatingSystemMemoryInfo { + free_in_bytes: long + free_percent: integer + total_in_bytes: long + used_in_bytes: long + used_percent: integer +} + +export interface ClusterStatsRequest extends RequestBase { + node_id?: NodeIds flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean - master_timeout?: Time + timeout?: Time +} + +export interface ClusterStatsResponse extends NodesNodesResponseBase { + cluster_name: Name + cluster_uuid: Uuid + indices: ClusterStatsClusterIndices + nodes: ClusterStatsClusterNodes + status: ClusterClusterStatus + timestamp: long } -export interface GetIndexSettingsResponse extends DictionaryResponseBase { +export interface ClusterStatsRuntimeFieldTypes { + name: Name + count: integer + index_count: integer + scriptless_count: integer + shadowed_count: integer + lang: string[] + lines_max: integer + lines_total: integer + chars_max: integer + chars_total: integer + source_max: integer + source_total: integer + doc_max: integer + doc_total: integer } -export interface GetIndexTemplateRequest extends RequestBase { - name?: Names - flat_settings?: boolean - include_type_name?: boolean - local?: boolean - master_timeout?: Time +export interface DanglingIndicesIndexDeleteRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } } -export interface GetIndexTemplateResponse extends DictionaryResponseBase { +export interface DanglingIndicesIndexDeleteResponse { + stub: integer } -export interface GetInfluencersRequest extends RequestBase { - job_id: Id +export interface DanglingIndicesIndexImportRequest extends RequestBase { + stub_a: string + stub_b: string body?: { - descending?: boolean - end?: DateString - exclude_interim?: boolean - influencer_score?: double - page?: Page - sort?: Field - start?: DateString + stub_c: string } } -export interface GetInfluencersResponse extends ResponseBase { - count: long - influencers: Array +export interface DanglingIndicesIndexImportResponse { + stub: integer } -export interface GetJobStatsRequest extends RequestBase { - job_id?: Id - allow_no_jobs?: boolean +export interface DanglingIndicesIndicesListRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } } -export interface GetJobStatsResponse extends ResponseBase { - count: long - jobs: Array +export interface DanglingIndicesIndicesListResponse { + stub: integer } -export interface GetJobsRequest extends RequestBase { - job_id?: Ids - allow_no_jobs?: boolean - exclude_generated?: boolean +export interface EnrichConfiguration { + geo_match?: EnrichPolicy + match: EnrichPolicy } -export interface GetJobsResponse extends ResponseBase { - count: long - jobs: Array +export interface EnrichPolicy { + enrich_fields: Fields + indices: Indices + match_field: Field + query?: string } -export interface GetLicenseRequest extends RequestBase { - accept_enterprise?: boolean - local?: boolean +export interface EnrichSummary { + config: EnrichConfiguration } -export interface GetLicenseResponse extends ResponseBase { - license: LicenseInformation +export interface EnrichDeletePolicyRequest extends RequestBase { + name: Name } -export interface GetLifecycleRequest extends RequestBase { - policy?: Name - policy_id?: Id +export interface EnrichDeletePolicyResponse extends AcknowledgedResponseBase { } -export interface GetLifecycleResponse extends DictionaryResponseBase { +export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' + +export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { + phase: EnrichExecutePolicyEnrichPolicyPhase } -export interface GetMappingRequest extends RequestBase { - index?: Indices - type?: Types - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - include_type_name?: boolean - local?: boolean - master_timeout?: Time +export interface EnrichExecutePolicyRequest extends RequestBase { + name: Name + wait_for_completion?: boolean } -export interface GetMappingResponse extends DictionaryResponseBase { +export interface EnrichExecutePolicyResponse { + status: EnrichExecutePolicyExecuteEnrichPolicyStatus + task_id?: TaskId } -export interface GetModelSnapshotsRequest extends RequestBase { - job_id: Id - snapshot_id?: Id - body?: { - desc?: boolean - end?: DateString - page?: Page - sort?: Field - start?: DateString - } +export interface EnrichGetPolicyRequest extends RequestBase { + name?: Names } -export interface GetModelSnapshotsResponse extends ResponseBase { - count: long - model_snapshots: Array +export interface EnrichGetPolicyResponse { + policies: EnrichSummary[] } -export interface GetOverallBucketsRequest extends RequestBase { - job_id: Id +export interface EnrichPutPolicyRequest extends RequestBase { + name: Name body?: { - allow_no_jobs?: boolean - bucket_span?: Time - end?: DateString - exclude_interim?: boolean - overall_score?: double - start?: DateString - top_n?: integer + geo_match?: EnrichPolicy + match?: EnrichPolicy } } -export interface GetOverallBucketsResponse extends ResponseBase { - count: long - overall_buckets: Array -} - -export interface GetPipelineRequest extends RequestBase { - id?: Id - master_timeout?: Time - summary?: boolean +export interface EnrichPutPolicyResponse extends AcknowledgedResponseBase { } -export interface GetPipelineResponse extends DictionaryResponseBase { +export interface EnrichStatsCoordinatorStats { + executed_searches_total: long + node_id: Id + queue_size: integer + remote_requests_current: integer + remote_requests_total: long } -export interface GetPrivilegesRequest extends RequestBase { - application?: Name - name?: Name +export interface EnrichStatsExecutingPolicy { + name: Name + task: TaskInfo } -export interface GetPrivilegesResponse extends DictionaryResponseBase> { +export interface EnrichStatsRequest extends RequestBase { } -export interface GetRepositoryRequest extends RequestBase { - repository?: Names - local?: boolean - master_timeout?: Time +export interface EnrichStatsResponse { + coordinator_stats: EnrichStatsCoordinatorStats[] + executing_policies: EnrichStatsExecutingPolicy[] } -export interface GetRepositoryResponse extends DictionaryResponseBase { +export interface EqlEqlHits { + total?: SearchTypesTotalHits + events?: EqlHitsEvent[] + sequences?: EqlHitsSequence[] } -export interface GetRequest extends RequestBase { - id: Id - index: IndexName - type?: Type - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - source_enabled?: boolean - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType - _source?: boolean | string | Array +export interface EqlEqlSearchResponseBase { + id?: Id + is_partial?: boolean + is_running?: boolean + took?: integer + timed_out?: boolean + hits: EqlEqlHits } -export interface GetResponse extends ResponseBase { +export interface EqlHitsEvent { _index: IndexName - fields?: Record - found: boolean _id: Id - _primary_term?: long - _routing?: string - _seq_no?: SequenceNumber - _source?: TDocument - _type: Type - _version?: VersionNumber + _source: TEvent + fields?: Record } -export interface GetRoleMappingRequest extends RequestBase { - name?: Name +export interface EqlHitsSequence { + events: EqlHitsEvent[] + join_keys: any[] } -export interface GetRoleMappingResponse extends DictionaryResponseBase { +export interface EqlDeleteRequest extends RequestBase { + id: Id } -export interface GetRoleRequest extends RequestBase { - name?: Name +export interface EqlDeleteResponse extends AcknowledgedResponseBase { } -export interface GetRoleResponse extends DictionaryResponseBase { +export interface EqlGetRequest extends RequestBase { + id: Id + keep_alive?: Time + wait_for_completion_timeout?: Time } -export interface GetRollupCapabilitiesRequest extends RequestBase { - id?: Id +export interface EqlGetResponse extends EqlEqlSearchResponseBase { } -export interface GetRollupCapabilitiesResponse extends DictionaryResponseBase { +export interface EqlGetStatusRequest extends RequestBase { + id: Id } -export interface GetRollupIndexCapabilitiesRequest extends RequestBase { - index: Id +export interface EqlGetStatusResponse { + id: Id + is_partial: boolean + is_running: boolean + start_time_in_millis?: EpochMillis + expiration_time_in_millis?: EpochMillis + completion_status?: integer } -export interface GetRollupIndexCapabilitiesResponse extends DictionaryResponseBase { +export interface EqlSearchRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + keep_alive?: Time + keep_on_completion?: boolean + wait_for_completion_timeout?: Time + body?: { + query: string + case_sensitive?: boolean + event_category_field?: Field + tiebreaker_field?: Field + timestamp_field?: Field + fetch_size?: uint + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + keep_alive?: Time + keep_on_completion?: boolean + wait_for_completion_timeout?: Time + size?: uint | float + fields?: (Field | EqlSearchSearchFieldFormatted)[] + result_position?: EqlSearchResultPosition + } } -export interface GetRollupJobRequest extends RequestBase { - id?: Id +export interface EqlSearchResponse extends EqlEqlSearchResponseBase { } -export interface GetRollupJobResponse extends ResponseBase { - jobs: Array +export type EqlSearchResultPosition = 'tail' | 'head' + +export interface EqlSearchSearchFieldFormatted { + field: Field + format?: string } -export interface GetScriptContextRequest extends RequestBase { - stub_a: integer - stub_b: integer +export interface FeaturesGetFeaturesRequest extends RequestBase { + stub_a: string + stub_b: string body?: { - stub_c: integer + stub_c: string } } -export interface GetScriptContextResponse extends ResponseBase { +export interface FeaturesGetFeaturesResponse { stub: integer } -export interface GetScriptLanguagesRequest extends RequestBase { - stub_a: integer - stub_b: integer +export interface FeaturesResetFeaturesRequest extends RequestBase { + stub_a: string + stub_b: string body?: { - stub_c: integer + stub_c: string } } -export interface GetScriptLanguagesResponse extends ResponseBase { +export interface FeaturesResetFeaturesResponse { stub: integer } -export interface GetScriptRequest extends RequestBase { - id: Id - master_timeout?: Time -} - -export interface GetScriptResponse extends ResponseBase { - _id: Id - found: boolean - script?: StoredScript +export interface GraphConnection { + doc_count: long + source: long + target: long + weight: double } -export interface GetSnapshotLifecycleManagementStatusRequest extends RequestBase { +export interface GraphExploreControls { + sample_diversity?: GraphSampleDiversity + sample_size?: integer + timeout?: Time + use_significance: boolean } -export interface GetSnapshotLifecycleManagementStatusResponse extends ResponseBase { - operation_mode: LifecycleOperationMode +export interface GraphHop { + connections?: GraphHop + query: QueryDslQueryContainer + vertices: GraphVertexDefinition[] } -export interface GetSnapshotLifecycleRequest extends RequestBase { - policy_id?: Names +export interface GraphSampleDiversity { + field: Field + max_docs_per_value: integer } -export interface GetSnapshotLifecycleResponse extends DictionaryResponseBase { +export interface GraphVertex { + depth: long + field: Field + term: string + weight: double } -export interface GetSnapshotLifecycleStatsRequest extends RequestBase { +export interface GraphVertexDefinition { + exclude?: string[] + field: Field + include?: GraphVertexInclude[] + min_doc_count?: long + shard_min_doc_count?: long + size?: integer } -export interface GetSnapshotLifecycleStatsResponse extends ResponseBase { - retention_deletion_time: string - retention_deletion_time_millis: EpochMillis - retention_failed: long - retention_runs: long - retention_timed_out: long - total_snapshots_deleted: long - total_snapshot_deletion_failures: long - total_snapshots_failed: long - total_snapshots_taken: long - policy_stats: Array +export interface GraphVertexInclude { + boost: double + term: string } -export interface GetSnapshotRequest extends RequestBase { - repository: Name - snapshot: Names - ignore_unavailable?: boolean - master_timeout?: Time - verbose?: boolean +export interface GraphExploreRequest extends RequestBase { + index: Indices + type?: Types + routing?: Routing + timeout?: Time + body?: { + connections?: GraphHop + controls?: GraphExploreControls + query?: QueryDslQueryContainer + vertices?: GraphVertexDefinition[] + } } -export interface GetSnapshotResponse extends ResponseBase { - responses?: Array - snapshots?: Array +export interface GraphExploreResponse { + connections: GraphConnection[] + failures: ShardFailure[] + timed_out: boolean + took: long + vertices: GraphVertex[] } -export interface GetStats { - current: long - exists_time?: string - exists_time_in_millis: long - exists_total: long - missing_time?: string - missing_time_in_millis: long - missing_total: long - time?: string - time_in_millis: long - total: long +export interface IlmAction { } -export interface GetTaskRequest extends RequestBase { - task_id: Id - timeout?: Time - wait_for_completion?: boolean +export interface IlmPhase { + actions: Record | string[] + min_age?: Time } -export interface GetTaskResponse extends ResponseBase { - completed: boolean - task: TaskInfo - response?: TaskStatus - error?: ErrorCause +export interface IlmPhases { + cold?: IlmPhase + delete?: IlmPhase + hot?: IlmPhase + warm?: IlmPhase } -export interface GetTransformRequest extends RequestBase { - transform_id?: Name - allow_no_match?: boolean - from?: integer - size?: integer - exclude_generated?: boolean +export interface IlmPolicy { + phases: IlmPhases + name?: Name } -export interface GetTransformResponse extends ResponseBase { - count: long - transforms: Array +export interface IlmDeleteLifecycleRequest extends RequestBase { + policy?: Name + policy_id: Id } -export interface GetTransformStatsRequest extends RequestBase { - transform_id: Name - allow_no_match?: boolean - from?: long - size?: long +export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { } -export interface GetTransformStatsResponse extends ResponseBase { - count: long - transforms: Array +export interface IlmExplainLifecycleLifecycleExplain { + action: Name + action_time_millis: EpochMillis + age: Time + failed_step?: Name + failed_step_retry_count?: integer + index: IndexName + is_auto_retryable_error?: boolean + lifecycle_date_millis: EpochMillis + managed: boolean + phase: Name + phase_time_millis: EpochMillis + policy: Name + step: Name + step_info?: Record + step_time_millis: EpochMillis + phase_execution: IlmExplainLifecycleLifecycleExplainPhaseExecution } -export interface GetTrialLicenseStatusRequest extends RequestBase { +export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { + policy: Name + version: VersionNumber + modified_date_in_millis: EpochMillis } -export interface GetTrialLicenseStatusResponse extends ResponseBase { - eligible_to_start_trial: boolean +export interface IlmExplainLifecycleLifecycleExplainProject { + project: IlmExplainLifecycleLifecycleExplainProjectSummary } -export interface GetUserAccessTokenRequest extends RequestBase { - body: { - grant_type?: AccessTokenGrantType - scope?: string - password?: string - kerberos_ticket?: string - refresh_token?: string - username?: string - } +export interface IlmExplainLifecycleLifecycleExplainProjectSummary { + index: IndexName + managed: boolean } -export interface GetUserAccessTokenResponse extends ResponseBase { - access_token: string - expires_in: long - scope?: string - type: string - refresh_token: string - kerberos_authentication_response_token?: string - authentication: AuthenticatedUser +export interface IlmExplainLifecycleRequest extends RequestBase { + index: IndexName + only_errors?: boolean + only_managed?: boolean } -export interface GetUserPrivilegesRequest extends RequestBase { +export interface IlmExplainLifecycleResponse { + indices: Record | IlmExplainLifecycleLifecycleExplainProject } -export interface GetUserPrivilegesResponse extends ResponseBase { - applications: Array - cluster: Array - global: Array - indices: Array - run_as: Array +export interface IlmGetLifecycleLifecycle { + modified_date: DateString + policy: IlmPolicy + version: VersionNumber } -export interface GetUserRequest extends RequestBase { - username?: Names +export interface IlmGetLifecycleRequest extends RequestBase { + policy?: Name + policy_id?: Id } -export interface GetUserResponse extends DictionaryResponseBase { +export interface IlmGetLifecycleResponse extends DictionaryResponseBase { } -export interface GetWatchRequest extends RequestBase { - id: Name +export interface IlmGetStatusRequest extends RequestBase { } -export interface GetWatchResponse extends ResponseBase { - found: boolean - _id: Id - status?: WatchStatus - watch?: Watch - _primary_term?: integer - _seq_no?: SequenceNumber - _version?: VersionNumber +export interface IlmGetStatusResponse { + operation_mode: LifecycleOperationMode } -export interface GlobalAggregation extends BucketAggregationBase { +export interface IlmMoveToStepRequest extends RequestBase { + index: IndexName + body?: { + current_step?: IlmMoveToStepStepKey + next_step?: IlmMoveToStepStepKey + } } -export interface GlobalPrivileges { - application: ApplicationGlobalUserPrivileges +export interface IlmMoveToStepResponse extends AcknowledgedResponseBase { } -export interface GoogleNormalizedDistanceHeuristic { - background_is_superset: boolean +export interface IlmMoveToStepStepKey { + action: string + name: string + phase: string } -export interface GrantApiKeyRequest extends RequestBase { - body: { - api_key: ApiKey - grant_type: ApiKeyGrantType - access_token?: string - username?: string - password?: string +export interface IlmPutLifecycleRequest extends RequestBase { + policy?: Name + policy_id?: Id + body?: { + policy?: IlmPolicy } } -export interface GrantApiKeyResponse extends ResponseBase { - api_key: string - id: Id - name: Name - expiration?: EpochMillis +export interface IlmPutLifecycleResponse extends AcknowledgedResponseBase { } -export interface GraphConnection { - doc_count: long - source: long - target: long - weight: double +export interface IlmRemovePolicyRequest extends RequestBase { + index: IndexName } -export interface GraphExploreControls { - sample_diversity?: SampleDiversity - sample_size?: integer - timeout?: Time - use_significance: boolean +export interface IlmRemovePolicyResponse { + failed_indexes: IndexName[] + has_failures: boolean } -export interface GraphExploreRequest extends RequestBase { - index: Indices - type?: Types - routing?: Routing - timeout?: Time - body?: { - connections?: Hop - controls?: GraphExploreControls - query?: QueryContainer - vertices?: Array - } +export interface IlmRetryRequest extends RequestBase { + index: IndexName } -export interface GraphExploreResponse extends ResponseBase { - connections: Array - failures: Array - timed_out: boolean - took: long - vertices: Array +export interface IlmRetryResponse extends AcknowledgedResponseBase { } -export interface GraphVertex { - depth: long - field: string - term: string - weight: double +export interface IlmStartRequest extends RequestBase { + body?: { + stub: boolean + } } -export interface GraphVertexDefinition { - exclude?: Array - field: Field - include?: Array - min_doc_count?: long - shard_min_doc_count?: long - size?: integer +export interface IlmStartResponse extends AcknowledgedResponseBase { } -export interface GraphVertexInclude { - boost: double - term: string +export interface IlmStopRequest extends RequestBase { + body?: { + stub: boolean + } } -export interface GrokProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - pattern_definitions: Record - patterns: Array - trace_match?: boolean +export interface IlmStopResponse extends AcknowledgedResponseBase { } -export interface GrokProcessorPatternsRequest extends RequestBase { +export interface IndicesAlias { + filter?: QueryDslQueryContainer + index_routing?: Routing + is_hidden?: boolean + is_write_index?: boolean + routing?: Routing + search_routing?: Routing } -export interface GrokProcessorPatternsResponse extends ResponseBase { - patterns: Record +export interface IndicesAliasDefinition { + filter?: QueryDslQueryContainer + index_routing?: string + is_write_index?: boolean + routing?: string + search_routing?: string } -export type GroupBy = 'nodes' | 'parents' | 'none' +export type IndicesDataStreamHealthStatus = 'GREEN' | 'green' | 'YELLOW' | 'yellow' | 'RED' | 'red' -export interface GsubProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - pattern: string - replacement: string - target_field?: Field +export interface IndicesFielddataFrequencyFilter { + max: double + min: double + min_segment_size: integer } -export interface HasChildQuery extends QueryBase { - ignore_unmapped?: boolean - inner_hits?: InnerHits - max_children?: integer - min_children?: integer - query?: QueryContainer - score_mode?: ChildScoreMode - type?: RelationName +export type IndicesIndexCheckOnStartup = 'false' | 'checksum' | 'true' + +export interface IndicesIndexRouting { + allocation?: IndicesIndexRoutingAllocation + rebalance?: IndicesIndexRoutingRebalance } -export interface HasParentQuery extends QueryBase { - ignore_unmapped?: boolean - inner_hits?: InnerHits - parent_type?: RelationName - query?: QueryContainer - score?: boolean +export interface IndicesIndexRoutingAllocation { + enable?: IndicesIndexRoutingAllocationOptions + include?: IndicesIndexRoutingAllocationInclude + initial_recovery?: IndicesIndexRoutingAllocationInitialRecovery + disk?: IndicesIndexRoutingAllocationDisk } -export interface HasPrivilegesRequest extends RequestBase { - user?: Name - body: { - application?: Array - cluster?: Array - index?: Array - } +export interface IndicesIndexRoutingAllocationDisk { + threshold_enabled: boolean | string } -export interface HasPrivilegesResponse extends ResponseBase { - application: ApplicationsPrivileges - cluster: Record - has_all_requested: boolean - index: Record - username: string +export interface IndicesIndexRoutingAllocationInclude { + _tier_preference?: string + _id?: Id } -export interface HdrMethod { - number_of_significant_value_digits?: integer +export interface IndicesIndexRoutingAllocationInitialRecovery { + _id?: Id } -export interface HdrPercentileItem { - key: double - value: double +export type IndicesIndexRoutingAllocationOptions = 'all' | 'primaries' | 'new_primaries' | 'none' + +export interface IndicesIndexRoutingRebalance { + enable: IndicesIndexRoutingRebalanceOptions +} + +export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' + +export interface IndicesIndexSettingBlocks { + read_only?: boolean + 'index.blocks.read_only'?: boolean + read_only_allow_delete?: boolean + 'index.blocks.read_only_allow_delete'?: boolean + read?: boolean + 'index.blocks.read'?: boolean + write?: boolean | string + 'index.blocks.write'?: boolean | string + metadata?: boolean + 'index.blocks.metadata'?: boolean +} + +export interface IndicesIndexSettings { + number_of_shards?: integer | string + 'index.number_of_shards'?: integer | string + number_of_replicas?: integer | string + 'index.number_of_replicas'?: integer | string + number_of_routing_shards?: integer + 'index.number_of_routing_shards'?: integer + check_on_startup?: IndicesIndexCheckOnStartup + 'index.check_on_startup'?: IndicesIndexCheckOnStartup + codec?: string + 'index.codec'?: string + routing_partition_size?: integer | string + 'index.routing_partition_size'?: integer | string + 'soft_deletes.retention_lease.period'?: Time + 'index.soft_deletes.retention_lease.period'?: Time + load_fixed_bitset_filters_eagerly?: boolean + 'index.load_fixed_bitset_filters_eagerly'?: boolean + hidden?: boolean | string + 'index.hidden'?: boolean | string + auto_expand_replicas?: string + 'index.auto_expand_replicas'?: string + 'search.idle.after'?: Time + 'index.search.idle.after'?: Time + refresh_interval?: Time + 'index.refresh_interval'?: Time + max_result_window?: integer + 'index.max_result_window'?: integer + max_inner_result_window?: integer + 'index.max_inner_result_window'?: integer + max_rescore_window?: integer + 'index.max_rescore_window'?: integer + max_docvalue_fields_search?: integer + 'index.max_docvalue_fields_search'?: integer + max_script_fields?: integer + 'index.max_script_fields'?: integer + max_ngram_diff?: integer + 'index.max_ngram_diff'?: integer + max_shingle_diff?: integer + 'index.max_shingle_diff'?: integer + blocks?: IndicesIndexSettingBlocks + 'index.blocks'?: IndicesIndexSettingBlocks + max_refresh_listeners?: integer + 'index.max_refresh_listeners'?: integer + 'analyze.max_token_count'?: integer + 'index.analyze.max_token_count'?: integer + 'highlight.max_analyzed_offset'?: integer + 'index.highlight.max_analyzed_offset'?: integer + max_terms_count?: integer + 'index.max_terms_count'?: integer + max_regex_length?: integer + 'index.max_regex_length'?: integer + routing?: IndicesIndexRouting + 'index.routing'?: IndicesIndexRouting + gc_deletes?: Time + 'index.gc_deletes'?: Time + default_pipeline?: PipelineName + 'index.default_pipeline'?: PipelineName + final_pipeline?: PipelineName + 'index.final_pipeline'?: PipelineName + lifecycle?: IndicesIndexSettingsLifecycle + 'index.lifecycle'?: IndicesIndexSettingsLifecycle + provided_name?: Name + 'index.provided_name'?: Name + creation_date?: DateString + 'index.creation_date'?: DateString + uuid?: Uuid + 'index.uuid'?: Uuid + version?: IndicesIndexVersioning + 'index.version'?: IndicesIndexVersioning + verified_before_close?: boolean | string + 'index.verified_before_close'?: boolean | string + format?: string | integer + 'index.format'?: string | integer + max_slices_per_scroll?: integer + 'index.max_slices_per_scroll'?: integer + 'translog.durability'?: string + 'index.translog.durability'?: string + 'query_string.lenient'?: boolean | string + 'index.query_string.lenient'?: boolean | string + priority?: integer | string + 'index.priority'?: integer | string + top_metrics_max_size?: integer + analysis?: IndicesIndexSettingsAnalysis +} + +export interface IndicesIndexSettingsAnalysis { + char_filter?: Record +} + +export interface IndicesIndexSettingsLifecycle { + name: Name } -export interface HdrPercentilesAggregate extends AggregateBase { - values: Array +export interface IndicesIndexState { + aliases?: Record + mappings?: MappingTypeMapping + settings: IndicesIndexSettings | IndicesIndexStatePrefixedSettings } -export type Health = 'green' | 'yellow' | 'red' +export interface IndicesIndexStatePrefixedSettings { + index: IndicesIndexSettings +} -export interface Highlight { - fields: Record - type?: HighlighterType - boundary_chars?: string - boundary_max_scan?: integer - boundary_scanner?: BoundaryScanner - boundary_scanner_locale?: string - encoder?: HighlighterEncoder - fragmenter?: HighlighterFragmenter - fragment_offset?: integer - fragment_size?: integer - max_fragment_length?: integer - no_match_size?: integer - number_of_fragments?: integer - order?: HighlighterOrder - post_tags?: Array - pre_tags?: Array - require_field_match?: boolean - tags_schema?: HighlighterTagsSchema - highlight_query?: QueryContainer - max_analyzed_offset?: string | integer +export interface IndicesIndexVersioning { + created: VersionString } -export interface HighlightField { - boundary_chars?: string - boundary_max_scan?: integer - boundary_scanner?: BoundaryScanner - boundary_scanner_locale?: string - field?: Field - force_source?: boolean - fragmenter?: HighlighterFragmenter - fragment_offset?: integer - fragment_size?: integer - highlight_query?: QueryContainer - matched_fields?: Fields - max_fragment_length?: integer - no_match_size?: integer - number_of_fragments?: integer - order?: HighlighterOrder - phrase_limit?: integer - post_tags?: Array - pre_tags?: Array - require_field_match?: boolean - tags_schema?: HighlighterTagsSchema - type?: HighlighterType | string +export interface IndicesNumericFielddata { + format: IndicesNumericFielddataFormat } -export type HighlighterEncoder = 'default' | 'html' +export type IndicesNumericFielddataFormat = 'array' | 'disabled' -export type HighlighterFragmenter = 'simple' | 'span' +export interface IndicesStringFielddata { + format: IndicesStringFielddataFormat +} -export type HighlighterOrder = 'score' +export type IndicesStringFielddataFormat = 'paged_bytes' | 'disabled' -export type HighlighterTagsSchema = 'styled' +export interface IndicesTemplateMapping { + aliases: Record + index_patterns: Name[] + mappings: MappingTypeMapping + order: integer + settings: Record + version?: VersionNumber +} -export type HighlighterType = 'plain' | 'fvh' | 'unified' +export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' -export interface HistogramAggregation extends BucketAggregationBase { - extended_bounds?: ExtendedBounds - hard_bounds?: ExtendedBounds - field?: Field - interval?: double - min_doc_count?: integer - missing?: double - offset?: double - order?: HistogramOrder - script?: Script - format?: string +export interface IndicesAddBlockIndicesBlockStatus { + name: IndexName + blocked: boolean } -export interface HistogramOrder { - _count?: SortOrder - _key?: SortOrder +export interface IndicesAddBlockRequest extends RequestBase { + index: IndexName + block: IndicesAddBlockIndicesBlockOptions + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time } -export interface HistogramProperty extends PropertyBase { - ignore_malformed?: boolean - type: 'histogram' +export interface IndicesAddBlockResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean + indices: IndicesAddBlockIndicesBlockStatus[] } -export interface HistogramRollupGrouping { - fields: Fields - interval: long +export interface IndicesAnalyzeAnalyzeDetail { + analyzer?: IndicesAnalyzeAnalyzerDetail + charfilters?: IndicesAnalyzeCharFilterDetail[] + custom_analyzer: boolean + tokenfilters?: IndicesAnalyzeTokenDetail[] + tokenizer?: IndicesAnalyzeTokenDetail } -export interface Hit { - _index: IndexName - _id: Id - _score?: double - _type?: Type - _explanation?: Explanation - fields?: Record - highlight?: Record> - inner_hits?: Record - matched_queries?: Array - _nested?: NestedIdentity - _ignored?: Array - _shard?: string - _node?: string - _routing?: string - _source?: TDocument - _seq_no?: SequenceNumber - _primary_term?: long - _version?: VersionNumber - sort?: SortResults +export interface IndicesAnalyzeAnalyzeToken { + end_offset: long + position: long + position_length?: long + start_offset: long + token: string + type: string } -export interface HitsMetadata { - total: TotalHits | long - hits: Array> - max_score?: double +export interface IndicesAnalyzeAnalyzerDetail { + name: string + tokens: IndicesAnalyzeExplainAnalyzeToken[] } -export interface HoltLinearModelSettings { - alpha?: float - beta?: float +export interface IndicesAnalyzeCharFilterDetail { + filtered_text: string[] + name: string } -export interface HoltWintersModelSettings { - alpha?: float - beta?: float - gamma?: float - pad?: boolean - period?: integer - type?: HoltWintersType +export interface IndicesAnalyzeExplainAnalyzeToken { + bytes: string + end_offset: long + keyword?: boolean + position: long + positionLength: long + start_offset: long + termFrequency: long + token: string + type: string +} + +export interface IndicesAnalyzeRequest extends RequestBase { + index?: IndexName + body?: { + analyzer?: string + attributes?: string[] + char_filter?: (string | AnalysisCharFilter)[] + explain?: boolean + field?: Field + filter?: (string | AnalysisTokenFilter)[] + normalizer?: string + text?: IndicesAnalyzeTextToAnalyze + tokenizer?: string | AnalysisTokenizer + } +} + +export interface IndicesAnalyzeResponse { + detail?: IndicesAnalyzeAnalyzeDetail + tokens?: IndicesAnalyzeAnalyzeToken[] } -export type HoltWintersType = 'add' | 'mult' +export type IndicesAnalyzeTextToAnalyze = string | string[] -export interface Hop { - connections?: Hop - query: QueryContainer - vertices: Array +export interface IndicesAnalyzeTokenDetail { + name: string + tokens: IndicesAnalyzeExplainAnalyzeToken[] } -export interface HotThreadInformation { - hosts: Array - node_id: string - node_name: string - threads: Array +export interface IndicesClearCacheRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + fielddata?: boolean + fields?: Fields + ignore_unavailable?: boolean + query?: boolean + request?: boolean } -export interface HourlySchedule { - minute: Array +export interface IndicesClearCacheResponse extends ShardsOperationResponseBase { } -export interface HtmlStripCharFilter extends CharFilterBase { +export interface IndicesCloneRequest extends RequestBase { + index: IndexName + target: Name + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + body?: { + aliases?: Record + settings?: Record + } } -export interface HttpInput { - extract: Array - request: HttpInputRequestDefinition - response_content_type: ResponseContentType +export interface IndicesCloneResponse extends AcknowledgedResponseBase { + index: IndexName + shards_acknowledged: boolean } -export interface HttpInputAuthentication { - basic: HttpInputBasicAuthentication +export interface IndicesCloseCloseIndexResult { + closed: boolean + shards?: Record } -export interface HttpInputBasicAuthentication { - password: string - username: string +export interface IndicesCloseCloseShardResult { + failures: ShardFailure[] } -export type HttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete' - -export interface HttpInputProxy { - host: string - port: integer +export interface IndicesCloseRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards } -export interface HttpInputRequestDefinition { - auth?: HttpInputAuthentication - body?: string - connection_timeout?: Time - headers?: Record - host?: string - method?: HttpInputMethod - params?: Record - path?: string - port?: integer - proxy?: HttpInputProxy - read_timeout?: Time - scheme?: ConnectionScheme - url?: string +export interface IndicesCloseResponse extends AcknowledgedResponseBase { + indices: Record + shards_acknowledged: boolean } -export interface HttpInputRequestResult extends HttpInputRequestDefinition { +export interface IndicesCreateRequest extends RequestBase { + index: IndexName + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + body?: { + aliases?: Record + mappings?: Record | MappingTypeMapping + settings?: Record + } } -export interface HttpInputResponseResult { - body: string - headers: Record> - status: integer +export interface IndicesCreateResponse extends AcknowledgedResponseBase { + index: IndexName + shards_acknowledged: boolean } -export interface HttpStats { - current_open: integer - total_opened: long +export interface IndicesCreateDataStreamRequest extends RequestBase { + name: DataStreamName } -export interface HunspellTokenFilter extends TokenFilterBase { - dedup: boolean - dictionary: string - locale: string - longest_only: boolean +export interface IndicesCreateDataStreamResponse extends AcknowledgedResponseBase { } -export interface HyphenationDecompounderTokenFilter extends CompoundWordTokenFilterBase { +export interface IndicesDataStreamsStatsDataStreamsStatsItem { + backing_indices: integer + data_stream: Name + store_size?: ByteSize + store_size_bytes: integer + maximum_timestamp: integer } -export type Id = string - -export type Ids = Id | Array - -export interface IdsQuery extends QueryBase { - values?: Array | Array +export interface IndicesDataStreamsStatsRequest extends RequestBase { + name?: IndexName + expand_wildcards?: ExpandWildcards + human?: boolean } -export interface IlmPolicyStatistics { - indices_managed: integer - phases: Phases +export interface IndicesDataStreamsStatsResponse { + _shards: ShardStatistics + backing_indices: integer + data_stream_count: integer + total_store_sizes?: ByteSize + total_store_size_bytes: integer + data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] } -export interface IlmUsage { - policy_count: integer - policy_stats: Array +export interface IndicesDeleteRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time } -export interface ImportDanglingIndexRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface IndicesDeleteResponse extends IndicesResponseBase { } -export interface ImportDanglingIndexResponse extends ResponseBase { - stub: integer +export interface IndicesDeleteAliasRequest extends RequestBase { + index: Indices + name: Names + master_timeout?: Time + timeout?: Time } -export interface IndexActionResult { - response: IndexActionResultIndexResponse +export interface IndicesDeleteAliasResponse extends AcknowledgedResponseBase { } -export interface IndexActionResultIndexResponse { - created: boolean - id: Id - index: IndexName - result: Result - version: VersionNumber - type?: Type +export interface IndicesDeleteDataStreamRequest extends RequestBase { + name: DataStreamName } -export interface IndexAddBlockRequest extends RequestBase { - index: IndexName - block: IndexBlockOptions - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcardOptions - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time +export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBase { } -export interface IndexAddBlockResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean - indices: Array +export interface IndicesDeleteIndexTemplateRequest extends RequestBase { + name: Name } -export type IndexAlias = string - -export interface IndexAliases { - aliases: Record +export interface IndicesDeleteIndexTemplateResponse extends AcknowledgedResponseBase { } -export type IndexBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' +export interface IndicesDeleteTemplateRequest extends RequestBase { + name: Name + master_timeout?: Time + timeout?: Time +} -export interface IndexBlockStatus { - name: IndexName - blocked: boolean +export interface IndicesDeleteTemplateResponse extends AcknowledgedResponseBase { } -export interface IndexExistsRequest extends RequestBase { +export interface IndicesExistsRequest extends RequestBase { index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards @@ -6967,4142 +8525,4125 @@ export interface IndexExistsRequest extends RequestBase { local?: boolean } -export type IndexExistsResponse = boolean +export type IndicesExistsResponse = boolean -export interface IndexField { - enabled: boolean +export interface IndicesExistsAliasRequest extends RequestBase { + name: Names + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean } -export interface IndexHealthStats { - active_primary_shards: integer - active_shards: integer - initializing_shards: integer - number_of_replicas: integer - number_of_shards: integer - relocating_shards: integer - shards?: Record - status: Health - unassigned_shards: integer -} +export type IndicesExistsAliasResponse = boolean -export interface IndexMappings { - item: TypeMapping - mappings: TypeMapping +export interface IndicesExistsIndexTemplateRequest extends RequestBase { + name: Name + master_timeout?: Time } -export type IndexName = string - -export type IndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' - -export type IndexPattern = string +export type IndicesExistsIndexTemplateResponse = boolean -export type IndexPatterns = Array - -export interface IndexPrivilegesCheck { - names: Array - privileges: Array +export interface IndicesExistsTemplateRequest extends RequestBase { + name: Names + flat_settings?: boolean + local?: boolean + master_timeout?: Time } -export interface IndexRequest extends RequestBase { - id?: Id - index: IndexName - type?: Type - if_primary_term?: long - if_seq_no?: SequenceNumber - op_type?: OpType - pipeline?: string - refresh?: Refresh - routing?: Routing - timeout?: Time - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards - require_alias?: boolean - body: TDocument -} +export type IndicesExistsTemplateResponse = boolean -export interface IndexResponse extends WriteResponseBase { +export interface IndicesExistsTypeRequest extends RequestBase { + index: Indices + type: Types + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean } -export interface IndexSegment { - shards: Record> -} +export type IndicesExistsTypeResponse = boolean -export interface IndexState { - aliases: Record - mappings: TypeMapping - settings: Record +export interface IndicesFlushRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + force?: boolean + ignore_unavailable?: boolean + wait_if_ongoing?: boolean } -export interface IndexStats { - completion?: CompletionStats - docs?: DocStats - fielddata?: FielddataStats - flush?: FlushStats - get?: GetStats - indexing?: IndexingStats - merges?: MergesStats - query_cache?: QueryCacheStats - recovery?: RecoveryStats - refresh?: RefreshStats - request_cache?: RequestCacheStats - search?: SearchStats - segments?: SegmentsStats - store?: StoreStats - translog?: TranslogStats - warmer?: WarmerStats +export interface IndicesFlushResponse extends ShardsOperationResponseBase { } -export interface IndexTemplateExistsRequest extends RequestBase { - name: Names - flat_settings?: boolean - local?: boolean - master_timeout?: Time +export interface IndicesFlushSyncedRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean } -export type IndexTemplateExistsResponse = boolean - -export interface IndexedScript extends ScriptBase { - id: string +export interface IndicesFlushSyncedResponse extends DictionaryResponseBase { + _shards: ShardStatistics } -export type IndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' - -export interface IndexingStats { - index_current: long - delete_current: long - delete_time?: string - delete_time_in_millis: long - delete_total: long - is_throttled: boolean - noop_update_total: long - throttle_time?: string - throttle_time_in_millis: long - index_time?: string - index_time_in_millis: long - index_total: long - index_failed: long - types?: Record +export interface IndicesForcemergeRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flush?: boolean + ignore_unavailable?: boolean + max_num_segments?: long + only_expunge_deletes?: boolean } -export type Indices = string | Array +export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { +} -export interface IndicesCreateDataStreamRequest extends RequestBase { - name: DataStreamName +export interface IndicesFreezeRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards } -export interface IndicesCreateDataStreamResponse extends AcknowledgedResponseBase { +export interface IndicesFreezeResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean } -export interface IndicesDataStreamsStatsRequest extends RequestBase { - name?: IndexName - expand_wildcards?: ExpandWildcardOptions - human?: boolean +export interface IndicesGetRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + include_type_name?: boolean + local?: boolean + master_timeout?: Time } -export interface IndicesDataStreamsStatsResponse extends ResponseBase { - _shards: ShardStatistics - backing_indices: integer - data_stream_count: integer - total_store_sizes?: ByteSize - total_store_size_bytes: integer - data_streams: Array +export interface IndicesGetResponse extends DictionaryResponseBase { } -export interface IndicesDeleteDataStreamRequest extends RequestBase { - name: DataStreamName +export interface IndicesGetAliasIndexAliases { + aliases: Record } -export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBase { +export interface IndicesGetAliasRequest extends RequestBase { + name?: Names + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean +} + +export interface IndicesGetAliasResponse extends DictionaryResponseBase { } -export interface IndicesGetDataStreamItem { +export interface IndicesGetDataStreamIndicesGetDataStreamItem { name: DataStreamName - timestamp_field: IndicesGetDataStreamItemTimestampField - indices: Array + timestamp_field: IndicesGetDataStreamIndicesGetDataStreamItemTimestampField + indices: IndicesGetDataStreamIndicesGetDataStreamItemIndex[] generation: integer template: Name hidden: boolean - status: DataStreamHealthStatus + system?: boolean + status: IndicesDataStreamHealthStatus ilm_policy?: Name - _meta?: Record + _meta?: Metadata } -export interface IndicesGetDataStreamItemIndex { +export interface IndicesGetDataStreamIndicesGetDataStreamItemIndex { index_name: IndexName index_uuid: Uuid } -export interface IndicesGetDataStreamItemTimestampField { +export interface IndicesGetDataStreamIndicesGetDataStreamItemTimestampField { name: Field } export interface IndicesGetDataStreamRequest extends RequestBase { name?: IndexName - expand_wildcards?: ExpandWildcardOptions + expand_wildcards?: ExpandWildcards } -export interface IndicesGetDataStreamResponse extends ResponseBase { - data_streams: Array +export interface IndicesGetDataStreamResponse { + data_streams: IndicesGetDataStreamIndicesGetDataStreamItem[] } -export interface IndicesMigrateToDataStreamRequest extends RequestBase { - name: IndexName +export interface IndicesGetFieldMappingRequest extends RequestBase { + fields: Fields + index?: Indices + type?: Types + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + include_defaults?: boolean + include_type_name?: boolean + local?: boolean } -export interface IndicesMigrateToDataStreamResponse extends AcknowledgedResponseBase { +export interface IndicesGetFieldMappingResponse extends DictionaryResponseBase { } -export interface IndicesOptions { - allow_no_indices: boolean - expand_wildcards: ExpandWildcards - ignore_unavailable: boolean +export interface IndicesGetFieldMappingTypeFieldMappings { + mappings: Record } -export interface IndicesPrivileges { - field_security?: FieldSecurity - names: Indices - privileges: Array - query?: string | QueryContainer - allow_restricted_indices?: boolean +export interface IndicesGetIndexTemplateIndexTemplate { + index_patterns: Name[] + composed_of: Name[] + template: IndicesGetIndexTemplateIndexTemplateSummary + version?: VersionNumber + priority?: long + _meta?: Metadata + allow_auto_create?: boolean + data_stream?: Record } -export interface IndicesPromoteDataStreamRequest extends RequestBase { - name: IndexName +export interface IndicesGetIndexTemplateIndexTemplateItem { + name: Name + index_template: IndicesGetIndexTemplateIndexTemplate } -export interface IndicesPromoteDataStreamResponse extends ResponseBase { - stub: integer +export interface IndicesGetIndexTemplateIndexTemplateSummary { + aliases?: Record + mappings?: MappingTypeMapping + settings?: Record } -export interface IndicesResponseBase extends AcknowledgedResponseBase { - _shards?: ShardStatistics +export interface IndicesGetIndexTemplateRequest extends RequestBase { + name?: Name + local?: boolean + body?: { + flat_settings?: boolean + include_type_name?: boolean + master_timeout?: Time + } } -export interface IndicesShardStores { - shards: Record +export interface IndicesGetIndexTemplateResponse { + index_templates: IndicesGetIndexTemplateIndexTemplateItem[] } -export interface IndicesShardStoresRequest extends RequestBase { +export interface IndicesGetMappingIndexMappingRecord { + item?: MappingTypeMapping + mappings: MappingTypeMapping +} + +export interface IndicesGetMappingRequest extends RequestBase { index?: Indices + type?: Types allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - status?: string | Array -} - -export interface IndicesShardStoresResponse extends ResponseBase { - indices: Record + include_type_name?: boolean + local?: boolean + master_timeout?: Time } -export interface IndicesStats { - primaries: IndexStats - shards?: Record> - total: IndexStats - uuid?: string +export interface IndicesGetMappingResponse extends DictionaryResponseBase { } -export interface IndicesStatsRequest extends RequestBase { - metric?: Metrics +export interface IndicesGetSettingsRequest extends RequestBase { index?: Indices - completion_fields?: Fields + name?: Names + allow_no_indices?: boolean expand_wildcards?: ExpandWildcards - fielddata_fields?: Fields - fields?: Fields - forbid_closed_indices?: boolean - groups?: string | Array - include_segment_file_sizes?: boolean - include_unloaded_segments?: boolean - level?: Level - types?: Types + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + local?: boolean + master_timeout?: Time } -export interface IndicesStatsResponse extends ResponseBase { - indices?: Record - _shards: ShardStatistics - _all: IndicesStats +export interface IndicesGetSettingsResponse extends DictionaryResponseBase { } -export interface IndicesVersionsStats { - index_count: integer - primary_shard_count: integer - total_primary_bytes: long - version: VersionString +export interface IndicesGetTemplateRequest extends RequestBase { + name?: Names + flat_settings?: boolean + include_type_name?: boolean + local?: boolean + master_timeout?: Time } -export interface InferenceAggregation extends PipelineAggregationBase { - model_id: Name - inference_config?: InferenceConfigContainer +export interface IndicesGetTemplateResponse extends DictionaryResponseBase { } -export interface InferenceConfigContainer { - regression?: RegressionInferenceOptions - classification?: ClassificationInferenceOptions +export interface IndicesGetUpgradeRequest extends RequestBase { + stub: string } -export interface InferenceProcessor extends ProcessorBase { - model_id: Id - target_field: Field - field_map?: Record - inference_config?: InferenceProcessorConfig +export interface IndicesGetUpgradeResponse { + overlapping?: IndicesSimulateIndexTemplateOverlappingIndexTemplate[] + template?: IndicesTemplateMapping } -export interface InferenceProcessorConfig { - regression?: InferenceProcessorConfigRegression +export interface IndicesMigrateToDataStreamRequest extends RequestBase { + name: IndexName } -export interface InferenceProcessorConfigRegression { - results_field: string +export interface IndicesMigrateToDataStreamResponse extends AcknowledgedResponseBase { } -export interface Influence { - influencer_field_name: string - influencer_field_values: Array +export interface IndicesOpenRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards } -export interface Ingest { - timestamp: DateString - pipeline?: string +export interface IndicesOpenResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean } -export interface IngestStats { - count: long - current: long - failed: long - processors: Array - time_in_millis: long +export interface IndicesPromoteDataStreamRequest extends RequestBase { + name: IndexName } -export interface InlineGet { - fields?: Record - found: boolean - _seq_no: SequenceNumber - _primary_term: long - _routing?: Routing - _source: TDocument +export interface IndicesPromoteDataStreamResponse { + stub: integer } -export interface InlineRoleTemplate { - template: InlineRoleTemplateSource - format?: RoleTemplateFormat +export interface IndicesPutAliasRequest extends RequestBase { + index: Indices + name: Name + master_timeout?: Time + timeout?: Time + body?: { + filter?: QueryDslQueryContainer + index_routing?: Routing + is_write_index?: boolean + routing?: Routing + search_routing?: Routing + } } -export interface InlineRoleTemplateSource { - source: string +export interface IndicesPutAliasResponse extends AcknowledgedResponseBase { } -export interface InlineScript extends ScriptBase { - source: string +export interface IndicesPutIndexTemplateIndexTemplateMapping { + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings } -export interface InnerHits { - name?: Name - size?: integer - from?: integer - collapse?: FieldCollapse - docvalue_fields?: Fields - explain?: boolean - highlight?: Highlight - ignore_unmapped?: boolean - script_fields?: Record - seq_no_primary_term?: boolean - fields?: Fields - sort?: Sort - _source?: boolean | SourceFilter - version?: boolean +export interface IndicesPutIndexTemplateRequest extends RequestBase { + name: Name + body?: { + index_patterns?: Indices + composed_of?: Name[] + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: EmptyObject + priority?: integer + version?: VersionNumber + _meta?: Metadata + } } -export interface InnerHitsMetadata { - total: TotalHits | long - hits: Array>> - max_score?: double +export interface IndicesPutIndexTemplateResponse extends AcknowledgedResponseBase { } -export interface InnerHitsResult { - hits: InnerHitsMetadata +export interface IndicesPutMappingRequest extends RequestBase { + index?: Indices + type?: Type + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + write_index_only?: boolean + body?: { + all_field?: MappingAllField + date_detection?: boolean + dynamic?: boolean | MappingDynamicMapping + dynamic_date_formats?: string[] + dynamic_templates?: Record | Record[] + field_names_field?: MappingFieldNamesField + index_field?: MappingIndexField + meta?: Record + numeric_detection?: boolean + properties?: Record + routing_field?: MappingRoutingField + size_field?: MappingSizeField + source_field?: MappingSourceField + runtime?: MappingRuntimeFields + } } -export interface InputContainer { - chain?: ChainInput - http?: HttpInput - search?: SearchInput - simple?: SimpleInput +export interface IndicesPutMappingResponse extends IndicesResponseBase { } -export type InputType = 'http' | 'search' | 'simple' +export interface IndicesPutSettingsIndexSettingsBody extends IndicesIndexSettings { + settings?: IndicesIndexSettings +} -export interface IntegerRangeProperty extends RangePropertyBase { - type: 'integer_range' +export interface IndicesPutSettingsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + master_timeout?: Time + preserve_existing?: boolean + timeout?: Time + body?: IndicesPutSettingsIndexSettingsBody } -export interface Interval extends ScheduleBase { - factor: long - unit: IntervalUnit +export interface IndicesPutSettingsResponse extends AcknowledgedResponseBase { } -export type IntervalUnit = 's' | 'm' | 'h' | 'd' | 'w' +export interface IndicesPutTemplateRequest extends RequestBase { + name: Name + create?: boolean + flat_settings?: boolean + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + body?: { + aliases?: Record + index_patterns?: string | string[] + mappings?: MappingTypeMapping + order?: integer + settings?: Record + version?: VersionNumber + } +} -export interface IntervalsAllOf { - intervals?: Array - max_gaps?: integer - ordered?: boolean - filter?: IntervalsFilter +export interface IndicesPutTemplateResponse extends AcknowledgedResponseBase { } -export interface IntervalsAnyOf { - intervals?: Array - filter?: IntervalsFilter +export interface IndicesRecoveryFileDetails { + length: long + name: string + recovered: long } -export interface IntervalsContainer { - all_of?: IntervalsAllOf - any_of?: IntervalsAnyOf - fuzzy?: IntervalsFuzzy - match?: IntervalsMatch - prefix?: IntervalsPrefix - wildcard?: IntervalsWildcard +export interface IndicesRecoveryRecoveryBytes { + percent: Percentage + recovered?: ByteSize + recovered_in_bytes: ByteSize + reused?: ByteSize + reused_in_bytes: ByteSize + total?: ByteSize + total_in_bytes: ByteSize } -export interface IntervalsFilter { - after?: IntervalsContainer - before?: IntervalsContainer - contained_by?: IntervalsContainer - containing?: IntervalsContainer - not_contained_by?: IntervalsContainer - not_containing?: IntervalsContainer - not_overlapping?: IntervalsContainer - overlapping?: IntervalsContainer - script?: Script +export interface IndicesRecoveryRecoveryFiles { + details?: IndicesRecoveryFileDetails[] + percent: Percentage + recovered: long + reused: long + total: long } -export interface IntervalsFuzzy { - analyzer?: string - fuzziness?: Fuzziness - prefix_length?: integer - term?: string - transpositions?: boolean - use_field?: Field +export interface IndicesRecoveryRecoveryIndexStatus { + bytes?: IndicesRecoveryRecoveryBytes + files: IndicesRecoveryRecoveryFiles + size: IndicesRecoveryRecoveryBytes + source_throttle_time?: Time + source_throttle_time_in_millis: EpochMillis + target_throttle_time?: Time + target_throttle_time_in_millis: EpochMillis + total_time_in_millis: EpochMillis + total_time?: Time } -export interface IntervalsMatch { - analyzer?: string - max_gaps?: integer - ordered?: boolean - query?: string - use_field?: Field - filter?: IntervalsFilter +export interface IndicesRecoveryRecoveryOrigin { + hostname?: string + host?: Host + transport_address?: TransportAddress + id?: Id + ip?: Ip + name?: Name + bootstrap_new_history_uuid?: boolean + repository?: Name + snapshot?: Name + version?: VersionString + restoreUUID?: Uuid + index?: IndexName } -export interface IntervalsPrefix { - analyzer?: string - prefix?: string - use_field?: Field +export interface IndicesRecoveryRecoveryStartStatus { + check_index_time: long + total_time_in_millis: string } -export interface IntervalsQuery extends QueryBase { - all_of?: IntervalsAllOf - any_of?: IntervalsAnyOf - fuzzy?: IntervalsFuzzy - match?: IntervalsMatch - prefix?: IntervalsPrefix - wildcard?: IntervalsWildcard +export interface IndicesRecoveryRecoveryStatus { + shards: IndicesRecoveryShardRecovery[] } -export interface IntervalsWildcard { - analyzer?: string - pattern?: string - use_field?: Field +export interface IndicesRecoveryRequest extends RequestBase { + index?: Indices + active_only?: boolean + detailed?: boolean } -export interface InvalidRoleTemplate { - template: string - format?: RoleTemplateFormat +export interface IndicesRecoveryResponse extends DictionaryResponseBase { } -export interface InvalidateApiKeyRequest extends RequestBase { - body: { - id?: string - ids?: Array - name?: string - owner?: boolean - realm_name?: string - username?: string - } +export interface IndicesRecoveryShardRecovery { + id: long + index: IndicesRecoveryRecoveryIndexStatus + primary: boolean + source: IndicesRecoveryRecoveryOrigin + stage: string + start?: IndicesRecoveryRecoveryStartStatus + start_time?: DateString + start_time_in_millis: EpochMillis + stop_time?: DateString + stop_time_in_millis: EpochMillis + target: IndicesRecoveryRecoveryOrigin + total_time?: DateString + total_time_in_millis: EpochMillis + translog: IndicesRecoveryTranslogStatus + type: Type + verify_index: IndicesRecoveryVerifyIndex +} + +export interface IndicesRecoveryTranslogStatus { + percent: Percentage + recovered: long + total: long + total_on_start: long + total_time?: string + total_time_in_millis: EpochMillis +} + +export interface IndicesRecoveryVerifyIndex { + check_index_time?: Time + check_index_time_in_millis: EpochMillis + total_time?: Time + total_time_in_millis: EpochMillis } -export interface InvalidateApiKeyResponse extends ResponseBase { - error_count: integer - error_details?: Array - invalidated_api_keys: Array - previously_invalidated_api_keys: Array +export interface IndicesRefreshRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean } -export interface InvalidateUserAccessTokenRequest extends RequestBase { - body: { - token?: string - refresh_token?: string - realm_name?: string - username?: string - } +export interface IndicesRefreshResponse extends ShardsOperationResponseBase { } -export interface InvalidateUserAccessTokenResponse extends ResponseBase { - error_count: long - error_details?: Array - invalidated_tokens: long - previously_invalidated_tokens: long +export interface IndicesReloadSearchAnalyzersReloadDetails { + index: string + reloaded_analyzers: string[] + reloaded_node_ids: string[] } -export interface IpFilterUsage { - http: boolean - transport: boolean +export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean } -export interface IpProperty extends DocValuesPropertyBase { - boost?: double - index?: boolean - null_value?: string - type: 'ip' +export interface IndicesReloadSearchAnalyzersResponse { + reload_details: IndicesReloadSearchAnalyzersReloadDetails[] + _shards: ShardStatistics } -export interface IpRangeAggregation extends BucketAggregationBase { - field?: Field - ranges?: Array +export interface IndicesResolveIndexRequest extends RequestBase { + name: Names + expand_wildcards?: ExpandWildcards } -export interface IpRangeAggregationRange { - from?: string - mask?: string - to?: string +export interface IndicesResolveIndexResolveIndexAliasItem { + name: Name + indices: Indices } -export interface IpRangeBucketKeys { +export interface IndicesResolveIndexResolveIndexDataStreamsItem { + name: DataStreamName + timestamp_field: Field + backing_indices: Indices } -export type IpRangeBucket = IpRangeBucketKeys | - { [property: string]: Aggregate } -export interface IpRangeProperty extends RangePropertyBase { - type: 'ip_range' +export interface IndicesResolveIndexResolveIndexItem { + name: Name + aliases?: string[] + attributes: string[] + data_stream?: DataStreamName } -export interface Job { - allow_lazy_open?: boolean - analysis_config?: AnalysisConfig - analysis_limits?: AnalysisLimits - background_persist_interval?: Time - count?: integer - created_by?: EmptyObject - create_time?: integer - detectors?: JobStatistics - data_description?: DataDescription - description?: string - finished_time?: integer - forecasts?: MlJobForecasts - job_id?: Id - job_type?: string - model_plot?: ModelPlotConfig - model_size?: JobStatistics - model_snapshot_id?: Id - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_index_name?: IndexName - results_retention_days?: long - groups?: Array - model_plot_config?: ModelPlotConfig - custom_settings?: CustomSettings - job_version?: VersionString - deleting?: boolean - daily_model_snapshot_retention_after_days?: long +export interface IndicesResolveIndexResponse { + indices: IndicesResolveIndexResolveIndexItem[] + aliases: IndicesResolveIndexResolveIndexAliasItem[] + data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] } -export interface JobForecastStatistics { - memory_bytes?: JobStatistics - processing_time_ms?: JobStatistics - records?: JobStatistics - status?: Record - total: long - forecasted_jobs: integer +export interface IndicesRolloverRequest extends RequestBase { + alias: IndexAlias + new_index?: IndexName + dry_run?: boolean + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + body?: { + aliases?: Record + conditions?: IndicesRolloverRolloverConditions + mappings?: Record | MappingTypeMapping + settings?: Record + } } -export type JobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening' - -export interface JobStatistics { - avg: double - max: double - min: double - total: double +export interface IndicesRolloverResponse extends AcknowledgedResponseBase { + conditions: Record + dry_run: boolean + new_index: string + old_index: string + rolled_over: boolean + shards_acknowledged: boolean } -export interface JobStats { - assignment_explanation?: string - data_counts: DataCounts - forecasts_stats: JobForecastStatistics - job_id: string - model_size_stats: ModelSizeStats - node?: DiscoveryNode - open_time?: DateString - state: JobState - timing_stats: TimingStats - deleting?: boolean +export interface IndicesRolloverRolloverConditions { + max_age?: Time + max_docs?: long + max_size?: string + max_primary_shard_size?: ByteSize } -export interface JoinProcessor extends ProcessorBase { - field: Field - separator: string - target_field?: Field +export interface IndicesSegmentsIndexSegment { + shards: Record } -export interface JoinProperty extends PropertyBase { - relations?: Record> - type: 'join' +export interface IndicesSegmentsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + verbose?: boolean } -export interface JsonProcessor extends ProcessorBase { - add_to_root: boolean - field: Field - target_field: Field +export interface IndicesSegmentsResponse { + indices: Record + _shards: ShardStatistics } -export interface JvmClassesStats { - current_loaded_count: long - total_loaded_count: long - total_unloaded_count: long +export interface IndicesSegmentsSegment { + attributes: Record + committed: boolean + compound: boolean + deleted_docs: long + generation: integer + memory_in_bytes: double + search: boolean + size_in_bytes: double + num_docs: long + version: VersionString } -export interface KStemTokenFilter extends TokenFilterBase { +export interface IndicesSegmentsShardSegmentRouting { + node: string + primary: boolean + state: string } -export type KeepTypesMode = 'include' | 'exclude' - -export interface KeepTypesTokenFilter extends TokenFilterBase { - mode: KeepTypesMode - types: Array +export interface IndicesSegmentsShardsSegment { + num_committed_segments: integer + routing: IndicesSegmentsShardSegmentRouting + num_search_segments: integer + segments: Record } -export interface KeepWordsTokenFilter extends TokenFilterBase { - keep_words: Array - keep_words_case: boolean - keep_words_path: string +export interface IndicesShardStoresIndicesShardStores { + shards: Record } -export interface KeyValueProcessor extends ProcessorBase { - exclude_keys?: Array - field: Field - field_split: string - ignore_missing?: boolean - include_keys?: Array - prefix?: string - strip_brackets?: boolean - target_field?: Field - trim_key?: string - trim_value?: string - value_split: string +export interface IndicesShardStoresRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + status?: string | string[] } -export interface KeyedBucketKeys { - doc_count: long - key: TKey - key_as_string: string +export interface IndicesShardStoresResponse { + indices: Record } -export type KeyedBucket = KeyedBucketKeys | - { [property: string]: Aggregate } -export interface KeyedProcessorStats { - statistics: ProcessStats - type: string +export interface IndicesShardStoresShardStore { + allocation: IndicesShardStoresShardStoreAllocation + allocation_id: Id + attributes: Record + id: Id + legacy_version: VersionNumber + name: Name + store_exception: IndicesShardStoresShardStoreException + transport_address: TransportAddress } -export interface KeyedValueAggregate extends ValueAggregate { - keys: Array -} +export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused' -export interface KeywordMarkerTokenFilter extends TokenFilterBase { - ignore_case: boolean - keywords: Array - keywords_path: string - keywords_pattern: string +export interface IndicesShardStoresShardStoreException { + reason: string + type: string } -export interface KeywordProperty extends DocValuesPropertyBase { - boost?: double - eager_global_ordinals?: boolean - index?: boolean - index_options?: IndexOptions - normalizer?: string - norms?: boolean - null_value?: string - split_queries_on_whitespace?: boolean - type: 'keyword' +export interface IndicesShardStoresShardStoreWrapper { + stores: IndicesShardStoresShardStore[] } -export interface KeywordTokenizer extends TokenizerBase { - buffer_size: integer +export interface IndicesShrinkRequest extends RequestBase { + index: IndexName + target: IndexName + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + body?: { + aliases?: Record + settings?: Record + } } -export interface KibanaUrlConfig extends BaseUrlConfig { - time_range?: string +export interface IndicesShrinkResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean + index: IndexName } -export interface LaplaceSmoothingModel { - alpha: double +export interface IndicesSimulateIndexTemplateOverlappingIndexTemplate { + name: Name + index_patterns?: IndexName[] } -export interface LatLon { - lat: double - lon: double +export interface IndicesSimulateIndexTemplateRequest extends RequestBase { + name?: Name + body?: { + index_patterns?: IndexName[] + composed_of?: Name[] + overlapping?: IndicesSimulateIndexTemplateOverlappingIndexTemplate[] + template?: IndicesTemplateMapping + } } -export interface LengthTokenFilter extends TokenFilterBase { - max: integer - min: integer +export interface IndicesSimulateIndexTemplateResponse extends AcknowledgedResponseBase { } -export interface LetterTokenizer extends TokenizerBase { +export interface IndicesSimulateTemplateRequest extends RequestBase { + name?: Name + create?: boolean + master_timeout?: Time + body?: IndicesGetIndexTemplateIndexTemplate } -export type Level = 'cluster' | 'indices' | 'shards' - -export interface License { - expiry_date_in_millis: EpochMillis - issue_date_in_millis: EpochMillis - issued_to: string - issuer: string - max_nodes?: long - max_resource_units?: long - signature: string - start_date_in_millis: EpochMillis - type: LicenseType - uid: string +export interface IndicesSimulateTemplateResponse { + stub: string } -export interface LicenseAcknowledgement { - license: Array - message: string +export interface IndicesSplitRequest extends RequestBase { + index: IndexName + target: IndexName + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + body?: { + aliases?: Record + settings?: Record + } } -export interface LicenseInformation { - expiry_date: DateString - expiry_date_in_millis: EpochMillis - issue_date: DateString - issue_date_in_millis: EpochMillis - issued_to: string - issuer: string - max_nodes: long - max_resource_units?: integer - status: LicenseStatus - type: LicenseType - uid: Uuid - start_date_in_millis: EpochMillis +export interface IndicesSplitResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean + index: IndexName } -export type LicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' - -export type LicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' +export interface IndicesStatsIndexStats { + completion?: CompletionStats + docs?: DocStats + fielddata?: FielddataStats + flush?: FlushStats + get?: GetStats + indexing?: IndexingStats + merges?: MergesStats + query_cache?: QueryCacheStats + recovery?: RecoveryStats + refresh?: RefreshStats + request_cache?: RequestCacheStats + search?: SearchStats + segments?: SegmentsStats + store?: StoreStats + translog?: TranslogStats + warmer?: WarmerStats + bulk?: BulkStats +} -export interface LifecycleAction { +export interface IndicesStatsIndicesStats { + primaries: IndicesStatsIndexStats + shards?: Record + total: IndicesStatsIndexStats + uuid?: Uuid } -export interface LifecycleExplain { - action: Name - action_time_millis: EpochMillis - age: Time - failed_step?: Name - failed_step_retry_count?: integer - index: IndexName - is_auto_retryable_error?: boolean - lifecycle_date_millis: EpochMillis - managed: boolean - phase: Name - phase_time_millis: EpochMillis - policy: Name - step: Name - step_info?: Record - step_time_millis: EpochMillis - phase_execution: LifecycleExplainPhaseExecution +export interface IndicesStatsRequest extends RequestBase { + metric?: Metrics + index?: Indices + completion_fields?: Fields + expand_wildcards?: ExpandWildcards + fielddata_fields?: Fields + fields?: Fields + forbid_closed_indices?: boolean + groups?: string | string[] + include_segment_file_sizes?: boolean + include_unloaded_segments?: boolean + level?: Level + types?: Types } -export interface LifecycleExplainPhaseExecution { - policy: Name - version: VersionNumber - modified_date_in_millis: EpochMillis +export interface IndicesStatsResponse { + indices?: Record + _shards: ShardStatistics + _all: IndicesStatsIndicesStats } -export interface LifecycleExplainProject { - project: LifecycleExplainProjectSummary +export interface IndicesStatsShardCommit { + generation: integer + id: Id + num_docs: long + user_data: Record } -export interface LifecycleExplainProjectSummary { - index: IndexName - managed: boolean +export interface IndicesStatsShardFileSizeInfo { + description: string + size_in_bytes: long } -export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' - -export interface LifecyclePolicy { - modified_date: DateString - policy: Policy - version: VersionNumber +export interface IndicesStatsShardLease { + id: Id + retaining_seq_no: SequenceNumber + timestamp: long + source: string } -export type Like = string | LikeDocument - -export interface LikeDocument { - doc?: any - fields?: Fields - _id?: Id | number - _type?: Type - _index?: IndexName - per_field_analyzer?: Record - routing?: Routing +export interface IndicesStatsShardPath { + data_path: string + is_custom_data_path: boolean + state_path: string } -export interface LimitTokenCountTokenFilter extends TokenFilterBase { - consume_all_tokens: boolean - max_token_count: integer +export interface IndicesStatsShardQueryCache { + cache_count: long + cache_size: long + evictions: long + hit_count: long + memory_size_in_bytes: long + miss_count: long + total_count: long } -export interface Limits { - max_model_memory_limit?: ByteSize - effective_max_model_memory_limit: ByteSize - total_ml_memory: ByteSize +export interface IndicesStatsShardRetentionLeases { + primary_term: long + version: VersionNumber + leases: IndicesStatsShardLease[] } -export interface LineStringGeoShape { - coordinates: Array +export interface IndicesStatsShardRouting { + node: string + primary: boolean + relocating_node?: string + state: IndicesStatsShardRoutingState } -export interface LinearInterpolationSmoothingModel { - bigram_lambda: double - trigram_lambda: double - unigram_lambda: double -} +export type IndicesStatsShardRoutingState = 'UNASSIGNED' | 'INITIALIZING' | 'STARTED' | 'RELOCATING' -export interface ListDanglingIndicesRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface IndicesStatsShardSequenceNumber { + global_checkpoint: long + local_checkpoint: long + max_seq_no: SequenceNumber } -export interface ListDanglingIndicesResponse extends ResponseBase { - stub: integer +export interface IndicesStatsShardStats { + commit: IndicesStatsShardCommit + completion: CompletionStats + docs: DocStats + fielddata: FielddataStats + flush: FlushStats + get: GetStats + indexing: IndexingStats + merges: MergesStats + shard_path: IndicesStatsShardPath + query_cache: IndicesStatsShardQueryCache + recovery: RecoveryStats + refresh: RefreshStats + request_cache: RequestCacheStats + retention_leases: IndicesStatsShardRetentionLeases + routing: IndicesStatsShardRouting + search: SearchStats + segments: SegmentsStats + seq_no: IndicesStatsShardSequenceNumber + store: StoreStats + translog: TranslogStats + warmer: WarmerStats + bulk?: BulkStats } -export interface ListTasksRequest extends RequestBase { - actions?: string | Array - detailed?: boolean - group_by?: GroupBy - nodes?: Array - parent_task_id?: Id +export interface IndicesUnfreezeRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time timeout?: Time - wait_for_completion?: boolean + wait_for_active_shards?: string } -export interface ListTasksResponse extends ResponseBase { - node_failures?: Array - nodes?: Record - tasks?: Record | Array +export interface IndicesUnfreezeResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean } -export interface LoggingActionResult { - logged_text: string +export interface IndicesUpdateAliasesIndicesUpdateAliasBulk { } -export interface LogstashDeletePipelineRequest extends RequestBase { - stub_a: string - stub_b: string +export interface IndicesUpdateAliasesRequest extends RequestBase { + master_timeout?: Time + timeout?: Time body?: { - stub_c: string + actions?: IndicesUpdateAliasesIndicesUpdateAliasBulk[] } } -export interface LogstashDeletePipelineResponse extends ResponseBase { - stub: integer +export interface IndicesUpdateAliasesResponse extends AcknowledgedResponseBase { } -export interface LogstashGetPipelineRequest extends RequestBase { - stub_a: string - stub_b: string +export interface IndicesUpgradeRequest extends RequestBase { + stub_b: integer + stub_a: integer body?: { - stub_c: string + stub_c: integer } } -export interface LogstashGetPipelineResponse extends ResponseBase { +export interface IndicesUpgradeResponse { stub: integer } -export interface LogstashPutPipelineRequest extends RequestBase { - stub_a: string - stub_b: string - body: { - stub_c: string +export interface IndicesValidateQueryIndicesValidationExplanation { + error?: string + explanation?: string + index: IndexName + valid: boolean +} + +export interface IndicesValidateQueryRequest extends RequestBase { + index?: Indices + type?: Types + allow_no_indices?: boolean + all_shards?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: DefaultOperator + df?: string + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_unavailable?: boolean + lenient?: boolean + query_on_query_string?: string + rewrite?: boolean + q?: string + body?: { + query?: QueryDslQueryContainer } } -export interface LogstashPutPipelineResponse extends ResponseBase { - stub: integer +export interface IndicesValidateQueryResponse { + explanations?: IndicesValidateQueryIndicesValidationExplanation[] + _shards?: ShardStatistics + valid: boolean + error?: string } -export interface LongRangeProperty extends RangePropertyBase { - type: 'long_range' +export interface IngestAppendProcessor extends IngestProcessorBase { + field: Field + value: any[] + allow_duplicates?: boolean } -export interface LowercaseProcessor extends ProcessorBase { +export interface IngestAttachmentProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean + indexed_chars?: long + indexed_chars_field?: Field + properties?: string[] target_field?: Field + resource_name?: string } -export interface LowercaseTokenFilter extends TokenFilterBase { - language: string +export interface IngestBytesProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field } -export interface LowercaseTokenizer extends TokenizerBase { +export interface IngestCircleProcessor extends IngestProcessorBase { + error_distance: double + field: Field + ignore_missing: boolean + shape_type: IngestShapeType + target_field: Field } -export interface MachineLearningInfoRequest extends RequestBase { +export interface IngestConvertProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field: Field + type: IngestConvertType } -export interface MachineLearningInfoResponse extends ResponseBase { - defaults: Defaults - limits: Limits - upgrade_mode: boolean - native_code: NativeCode +export type IngestConvertType = 'integer' | 'long' | 'float' | 'double' | 'string' | 'boolean' | 'auto' + +export interface IngestCsvProcessor extends IngestProcessorBase { + empty_value: any + description?: string + field: Field + ignore_missing?: boolean + quote?: string + separator?: string + target_fields: Fields + trim: boolean } -export interface MachineLearningUsage extends XPackUsage { - datafeeds: Record - jobs: Record - node_count: integer - data_frame_analytics_jobs: MlDataFrameAnalyticsJobsUsage - inference: MlInferenceUsage +export interface IngestDateIndexNameProcessor extends IngestProcessorBase { + date_formats: string[] + date_rounding: string | IngestDateRounding + field: Field + index_name_format: string + index_name_prefix: string + locale: string + timezone: string } -export interface MainError extends ErrorCause { - headers?: Record - root_cause: Array +export interface IngestDateProcessor extends IngestProcessorBase { + field: Field + formats: string[] + locale?: string + target_field?: Field + timezone?: string } -export interface ManageUserPrivileges { - applications: Array +export type IngestDateRounding = 's' | 'm' | 'h' | 'd' | 'w' | 'M' | 'y' + +export interface IngestDissectProcessor extends IngestProcessorBase { + append_separator: string + field: Field + ignore_missing: boolean + pattern: string } -export interface MappingCharFilter extends CharFilterBase { - mappings: Array - mappings_path: string +export interface IngestDotExpanderProcessor extends IngestProcessorBase { + field: Field + path?: string } -export interface MatchAllQuery extends QueryBase { - norm_field?: string +export interface IngestDropProcessor extends IngestProcessorBase { } -export interface MatchBoolPrefixQuery extends QueryBase { - analyzer?: string - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: Operator - prefix_length?: integer - query?: string +export interface IngestEnrichProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + max_matches?: integer + override?: boolean + policy_name: string + shape_relation?: GeoShapeRelation + target_field: Field } -export interface MatchNoneQuery extends QueryBase { +export interface IngestFailProcessor extends IngestProcessorBase { + message: string } -export interface MatchPhrasePrefixQuery extends QueryBase { - analyzer?: string - max_expansions?: integer - query?: string - slop?: integer - zero_terms_query?: ZeroTermsQuery +export interface IngestForeachProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + processor: IngestProcessorContainer } -export interface MatchPhraseQuery extends QueryBase { - analyzer?: string - query?: string - slop?: integer +export interface IngestGeoIpProcessor extends IngestProcessorBase { + database_file: string + field: Field + first_only: boolean + ignore_missing: boolean + properties: string[] + target_field: Field } -export interface MatchQuery extends QueryBase { - analyzer?: string - auto_generate_synonyms_phrase_query?: boolean - cutoff_frequency?: double - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: Operator - prefix_length?: integer - query?: string | float | boolean - zero_terms_query?: ZeroTermsQuery +export interface IngestGrokProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + pattern_definitions: Record + patterns: string[] + trace_match?: boolean } -export type MatchType = 'simple' | 'regex' +export interface IngestGsubProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + pattern: string + replacement: string + target_field?: Field +} -export interface MatrixAggregation extends Aggregation { - fields?: Fields - missing?: Record +export interface IngestInferenceConfig { + regression?: IngestInferenceConfigRegression } -export interface MatrixStatsAggregate extends AggregateBase { - correlation: Record - covariance: Record - count: integer - kurtosis: double - mean: double - skewness: double - variance: double - name: string +export interface IngestInferenceConfigRegression { + results_field: string } -export interface MatrixStatsAggregation extends MatrixAggregation { - mode?: MatrixStatsMode +export interface IngestInferenceProcessor extends IngestProcessorBase { + model_id: Id + target_field: Field + field_map?: Record + inference_config?: IngestInferenceConfig } -export type MatrixStatsMode = 'avg' | 'min' | 'max' | 'sum' | 'median' +export interface IngestJoinProcessor extends IngestProcessorBase { + field: Field + separator: string + target_field?: Field +} -export interface MaxAggregation extends FormatMetricAggregationBase { +export interface IngestJsonProcessor extends IngestProcessorBase { + add_to_root: boolean + field: Field + target_field: Field } -export interface MaxBucketAggregation extends PipelineAggregationBase { +export interface IngestKeyValueProcessor extends IngestProcessorBase { + exclude_keys?: string[] + field: Field + field_split: string + ignore_missing?: boolean + include_keys?: string[] + prefix?: string + strip_brackets?: boolean + target_field?: Field + trim_key?: string + trim_value?: string + value_split: string } -export interface MedianAbsoluteDeviationAggregation extends FormatMetricAggregationBase { - compression?: double +export interface IngestLowercaseProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field } -export interface MemoryStats { - resident: string - resident_in_bytes: long - share: string - share_in_bytes: long - total_virtual: string - total_virtual_in_bytes: long +export interface IngestPipeline { + description?: string + on_failure?: IngestProcessorContainer[] + processors?: IngestProcessorContainer[] + version?: VersionNumber } -export type MemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' +export interface IngestPipelineConfig { + description?: string + version?: VersionNumber + processors: IngestProcessorContainer[] +} -export interface MergesStats { - current: long - current_docs: long - current_size?: string - current_size_in_bytes: long - total: long - total_auto_throttle?: string - total_auto_throttle_in_bytes: long - total_docs: long - total_size?: string - total_size_in_bytes: long - total_stopped_time?: string - total_stopped_time_in_millis: long - total_throttled_time?: string - total_throttled_time_in_millis: long - total_time?: string - total_time_in_millis: long +export interface IngestPipelineProcessor extends IngestProcessorBase { + name: Name } -export type MetricAggregate = ValueAggregate | BoxPlotAggregate | GeoBoundsAggregate | GeoCentroidAggregate | GeoLineAggregate | PercentilesAggregate | ScriptedMetricAggregate | StatsAggregate | StringStatsAggregate | TopHitsAggregate | TopMetricsAggregate | ExtendedStatsAggregate | TDigestPercentilesAggregate | HdrPercentilesAggregate +export interface IngestProcessorBase { + if?: string + ignore_failure?: boolean + on_failure?: IngestProcessorContainer[] + tag?: string +} -export interface MetricAggregationBase { - field?: Field - missing?: Missing +export interface IngestProcessorContainer { + attachment?: IngestAttachmentProcessor + append?: IngestAppendProcessor + csv?: IngestCsvProcessor + convert?: IngestConvertProcessor + date?: IngestDateProcessor + date_index_name?: IngestDateIndexNameProcessor + dot_expander?: IngestDotExpanderProcessor + enrich?: IngestEnrichProcessor + fail?: IngestFailProcessor + foreach?: IngestForeachProcessor + json?: IngestJsonProcessor + user_agent?: IngestUserAgentProcessor + kv?: IngestKeyValueProcessor + geoip?: IngestGeoIpProcessor + grok?: IngestGrokProcessor + gsub?: IngestGsubProcessor + join?: IngestJoinProcessor + lowercase?: IngestLowercaseProcessor + remove?: IngestRemoveProcessor + rename?: IngestRenameProcessor script?: Script + set?: IngestSetProcessor + sort?: IngestSortProcessor + split?: IngestSplitProcessor + trim?: IngestTrimProcessor + uppercase?: IngestUppercaseProcessor + urldecode?: IngestUrlDecodeProcessor + bytes?: IngestBytesProcessor + dissect?: IngestDissectProcessor + set_security_user?: IngestSetSecurityUserProcessor + pipeline?: IngestPipelineProcessor + drop?: IngestDropProcessor + circle?: IngestCircleProcessor + inference?: IngestInferenceProcessor +} + +export interface IngestRemoveProcessor extends IngestProcessorBase { + field: Fields + ignore_missing?: boolean } -export type Metrics = string | Array - -export interface MinAggregation extends FormatMetricAggregationBase { +export interface IngestRenameProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field: Field } -export interface MinBucketAggregation extends PipelineAggregationBase { +export interface IngestSetProcessor extends IngestProcessorBase { + field: Field + override?: boolean + value: any } -export interface MinimalLicenseInformation { - expiry_date_in_millis: EpochMillis - mode: LicenseType - status: LicenseStatus - type: LicenseType - uid: string +export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { + field: Field + properties?: string[] } -export type MinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year' +export type IngestShapeType = 'geo_shape' | 'shape' -export type MinimumShouldMatch = integer | string - -export type Missing = string | integer | double | boolean - -export interface MissingAggregation extends BucketAggregationBase { - field?: Field - missing?: Missing +export interface IngestSortProcessor extends IngestProcessorBase { + field: Field + order: SearchTypesSortOrder + target_field: Field } -export interface MlDataFrameAnalyticsJobsCountUsage { - count: long +export interface IngestSplitProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + preserve_trailing?: boolean + separator: string + target_field?: Field } -export interface MlDataFrameAnalyticsJobsMemoryUsage { - peak_usage_bytes: JobStatistics +export interface IngestTrimProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field } -export interface MlDataFrameAnalyticsJobsUsage { - memory_usage?: MlDataFrameAnalyticsJobsMemoryUsage - _all: MlDataFrameAnalyticsJobsCountUsage - analysis_counts?: EmptyObject +export interface IngestUppercaseProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field } -export interface MlInferenceIngestProcessorCountUsage { - max: long - sum: long - min: long +export interface IngestUrlDecodeProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field } -export interface MlInferenceIngestProcessorUsage { - num_docs_processed: MlInferenceIngestProcessorCountUsage - pipelines: MlUsageCounter - num_failures: MlInferenceIngestProcessorCountUsage - time_ms: MlInferenceIngestProcessorCountUsage +export interface IngestUserAgentProcessor extends IngestProcessorBase { + field: Field + ignore_missing: boolean + options: IngestUserAgentProperty[] + regex_file: string + target_field: Field } -export interface MlInferenceTrainedModelsCountUsage { - total: long - prepackaged: long - other: long - regression: long - classification: long -} +export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' -export interface MlInferenceTrainedModelsUsage { - estimated_operations?: JobStatistics - estimated_heap_memory_usage_bytes?: JobStatistics - count?: MlInferenceTrainedModelsCountUsage - _all: MlUsageCounter +export interface IngestDeletePipelineRequest extends RequestBase { + id: Id + master_timeout?: Time + timeout?: Time } -export interface MlInferenceUsage { - ingest_processors: Record - trained_models: MlInferenceTrainedModelsUsage +export interface IngestDeletePipelineResponse extends AcknowledgedResponseBase { } -export interface MlJobForecasts { - total: long - forecasted_jobs: long +export interface IngestGeoIpStatsGeoIpDownloadStatistics { + successful_downloads: integer + failed_downloads: integer + total_download_time: integer + database_count: integer + skipped_updates: integer } -export interface MlUsageCounter { - count: long +export interface IngestGeoIpStatsGeoIpNodeDatabaseName { + name: Name } -export type ModelCategorizationStatus = 'ok' | 'warn' - -export type ModelMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' - -export interface ModelPlotConfig { - terms?: Field - enabled: boolean - annotations_enabled?: boolean +export interface IngestGeoIpStatsGeoIpNodeDatabases { + databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] + files_in_temp: string[] } -export interface ModelPlotConfigEnabled { - enabled: boolean - terms?: string +export interface IngestGeoIpStatsRequest extends RequestBase { } -export interface ModelSizeStats { - bucket_allocation_failures_count: long - job_id: Id - log_time: Time - memory_status: MemoryStatus - model_bytes: long - model_bytes_exceeded?: long - model_bytes_memory_limit?: long - peak_model_bytes?: long - assignment_memory_basis?: string - result_type: string - total_by_field_count: long - total_over_field_count: long - total_partition_field_count: long - categorization_status: string - categorized_doc_count: integer - dead_category_count: integer - failed_category_count: integer - frequent_category_count: integer - rare_category_count: integer - total_category_count: integer - timestamp?: long +export interface IngestGeoIpStatsResponse { + stats: IngestGeoIpStatsGeoIpDownloadStatistics + nodes: Record } -export interface ModelSnapshot { - description: string - job_id: Id - latest_record_time_stamp: Time - latest_result_time_stamp: Time - model_size_stats: ModelSizeStats - retain: boolean - snapshot_doc_count: long - snapshot_id: Id - timestamp: Time - min_version: VersionString +export interface IngestGetPipelineRequest extends RequestBase { + id?: Id + master_timeout?: Time + summary?: boolean } -export interface MonitoringUsage extends XPackUsage { - collection_enabled: boolean - enabled_exporters: Record +export interface IngestGetPipelineResponse extends DictionaryResponseBase { } -export type Month = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' +export interface IngestProcessorGrokRequest extends RequestBase { +} -export interface MoreLikeThisQuery extends QueryBase { - analyzer?: string - boost_terms?: double - fields?: Fields - include?: boolean - like?: Like | Array - max_doc_freq?: integer - max_query_terms?: integer - max_word_length?: integer - min_doc_freq?: integer - minimum_should_match?: MinimumShouldMatch - min_term_freq?: integer - min_word_length?: integer - per_field_analyzer?: Record - routing?: Routing - stop_words?: StopWords - unlike?: Like | Array - version?: VersionNumber - version_type?: VersionType +export interface IngestProcessorGrokResponse { + patterns: Record } -export interface MoveToStepRequest extends RequestBase { - index: IndexName +export interface IngestPutPipelineRequest extends RequestBase { + id: Id + master_timeout?: Time + timeout?: Time body?: { - current_step?: StepKey - next_step?: StepKey + description?: string + on_failure?: IngestProcessorContainer[] + processors?: IngestProcessorContainer[] + version?: VersionNumber } } -export interface MoveToStepResponse extends AcknowledgedResponseBase { +export interface IngestPutPipelineResponse extends AcknowledgedResponseBase { } -export interface MovingAverageAggregation extends PipelineAggregationBase { - minimize?: boolean - model?: MovingAverageModel - settings: MovingAverageSettings - predict?: integer - window?: integer +export interface IngestSimulatePipelineDocumentSimulation { + _id: Id + _index: IndexName + _ingest: IngestSimulatePipelineIngest + _parent?: string + _routing?: string + _source: Record + _type?: Type } -export type MovingAverageModel = 'linear' | 'simple' | 'ewma' | 'holt' | 'holt_winters' - -export type MovingAverageSettings = EwmaModelSettings | HoltLinearModelSettings | HoltWintersModelSettings - -export interface MovingFunctionAggregation extends PipelineAggregationBase { - script?: string - shift?: integer - window?: integer +export interface IngestSimulatePipelineIngest { + timestamp: DateString + pipeline?: Name } -export interface MovingPercentilesAggregation extends PipelineAggregationBase { - window?: integer - shift?: integer +export interface IngestSimulatePipelinePipelineSimulation { + doc?: IngestSimulatePipelineDocumentSimulation + processor_results?: IngestSimulatePipelinePipelineSimulation[] + tag?: string + processor_type?: string + status?: WatcherActionStatusOptions } -export interface MultiBucketAggregate extends AggregateBase { - buckets: Array +export interface IngestSimulatePipelineRequest extends RequestBase { + id?: Id + verbose?: boolean + body?: { + docs?: IngestSimulatePipelineSimulatePipelineDocument[] + pipeline?: IngestPipeline + } } -export interface MultiGetHit { - error?: MainError - fields?: Record - found?: boolean - _id: Id - _index: IndexName - _primary_term?: long - _routing?: Routing - _seq_no?: SequenceNumber - _source?: TDocument - _type?: Type - _version?: VersionNumber +export interface IngestSimulatePipelineResponse { + docs: IngestSimulatePipelinePipelineSimulation[] } -export type MultiGetId = string | integer - -export interface MultiGetOperation { - can_be_flattened?: boolean - _id: MultiGetId +export interface IngestSimulatePipelineSimulatePipelineDocument { + _id?: Id _index?: IndexName - routing?: Routing - _source?: boolean | Fields | SourceFilter - stored_fields?: Fields - _type?: Type - version?: VersionNumber - version_type?: VersionType + _source: any } -export interface MultiGetRequest extends RequestBase { - index?: IndexName - type?: Type - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - source_enabled?: boolean - _source?: boolean | Fields - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - body: { - docs?: Array - ids?: Array - } +export interface LicenseLicense { + expiry_date_in_millis: EpochMillis + issue_date_in_millis: EpochMillis + issued_to: string + issuer: string + max_nodes?: long + max_resource_units?: long + signature: string + start_date_in_millis: EpochMillis + type: LicenseLicenseType + uid: string } -export interface MultiGetResponse extends ResponseBase { - docs: Array> -} +export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' -export interface MultiMatchQuery extends QueryBase { - analyzer?: string - auto_generate_synonyms_phrase_query?: boolean - cutoff_frequency?: double - fields?: Fields - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: Operator - prefix_length?: integer - query?: string - slop?: integer - tie_breaker?: double - type?: TextQueryType - use_dis_max?: boolean - zero_terms_query?: ZeroTermsQuery +export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' + +export interface LicenseDeleteLicenseRequest extends RequestBase { } -export interface MultiSearchBody { - aggregations?: Record - aggs?: Record - query?: QueryContainer - from?: integer - size?: integer - pit?: PointInTimeReference - track_total_hits?: boolean | integer - suggest?: SuggestContainer | Record +export interface LicenseDeleteLicenseResponse extends AcknowledgedResponseBase { } -export interface MultiSearchHeader { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - index?: Indices - preference?: string - request_cache?: boolean - routing?: string - search_type?: SearchType +export interface LicenseGetBasicLicenseStatusRequest extends RequestBase { } -export interface MultiSearchRequest extends RequestBase { - index?: Indices - type?: Types - ccs_minimize_roundtrips?: boolean - max_concurrent_searches?: long - max_concurrent_shard_requests?: long - pre_filter_shard_size?: long - search_type?: SearchType - rest_total_hits_as_int?: boolean - typed_keys?: boolean - body: Array +export interface LicenseGetBasicLicenseStatusResponse { + eligible_to_start_basic: boolean } -export interface MultiSearchResponse extends ResponseBase { - took: long - responses: Array | ErrorResponse> +export interface LicenseGetLicenseLicenseInformation { + expiry_date: DateString + expiry_date_in_millis: EpochMillis + issue_date: DateString + issue_date_in_millis: EpochMillis + issued_to: string + issuer: string + max_nodes: long + max_resource_units?: integer + status: LicenseLicenseStatus + type: LicenseLicenseType + uid: Uuid + start_date_in_millis: EpochMillis } -export interface MultiSearchResult extends SearchResponse { - status: integer +export interface LicenseGetLicenseRequest extends RequestBase { + accept_enterprise?: boolean + local?: boolean } -export interface MultiSearchTemplateRequest extends RequestBase { - index?: Indices - type?: Types - ccs_minimize_roundtrips?: boolean - max_concurrent_searches?: long - search_type?: SearchType - total_hits_as_integer?: boolean - typed_keys?: boolean - body: { - operations?: Record - } +export interface LicenseGetLicenseResponse { + license: LicenseGetLicenseLicenseInformation } -export interface MultiSearchTemplateResponse extends ResponseBase { - responses: Array> - took: long +export interface LicenseGetTrialLicenseStatusRequest extends RequestBase { } -export interface MultiTermLookup { - field: Field +export interface LicenseGetTrialLicenseStatusResponse { + eligible_to_start_trial: boolean } -export type MultiTermQueryRewrite = string - -export interface MultiTermVectorOperation { - doc: object - fields: Fields - field_statistics: boolean - filter: TermVectorFilter - _id: Id - _index: IndexName - offsets: boolean - payloads: boolean - positions: boolean - routing: Routing - term_statistics: boolean - version: VersionNumber - version_type: VersionType +export interface LicensePostLicenseAcknowledgement { + license: string[] + message: string } -export interface MultiTermVectorsRequest extends RequestBase { - index?: IndexName - type?: Type - fields?: Fields - field_statistics?: boolean - offsets?: boolean - payloads?: boolean - positions?: boolean - preference?: string - realtime?: boolean - routing?: Routing - term_statistics?: boolean - version?: VersionNumber - version_type?: VersionType +export interface LicensePostLicenseRequest extends RequestBase { + acknowledge?: boolean body?: { - docs?: Array - ids?: Array + license?: LicenseLicense + licenses?: LicenseLicense[] } } -export interface MultiTermVectorsResponse extends ResponseBase { - docs: Array +export interface LicensePostLicenseResponse { + acknowledge?: LicensePostLicenseAcknowledgement + acknowledged: boolean + license_status: LicenseLicenseStatus } -export interface MultiTermsAggregation extends BucketAggregationBase { - terms: Array +export interface LicenseStartBasicLicenseRequest extends RequestBase { + acknowledge?: boolean } -export type MultiValueMode = 'min' | 'max' | 'avg' | 'sum' - -export interface MultiplexerTokenFilter extends TokenFilterBase { - filters: Array - preserve_original: boolean +export interface LicenseStartBasicLicenseResponse extends AcknowledgedResponseBase { + acknowledge: Record + basic_was_started: boolean + error_message: string } -export interface Murmur3HashProperty extends DocValuesPropertyBase { - type: 'murmur3' +export interface LicenseStartTrialLicenseRequest extends RequestBase { + acknowledge?: boolean + type_query_string?: string } -export interface MutualInformationHeuristic { - background_is_superset: boolean - include_negatives: boolean +export interface LicenseStartTrialLicenseResponse extends AcknowledgedResponseBase { + error_message?: string + acknowledged: boolean + trial_was_started: boolean + type: LicenseLicenseType } -export interface NGramTokenFilter extends TokenFilterBase { - max_gram: integer - min_gram: integer +export interface LogstashPipelineDeleteRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } } -export interface NGramTokenizer extends TokenizerBase { - custom_token_chars: string - max_gram: integer - min_gram: integer - token_chars: Array +export interface LogstashPipelineDeleteResponse { + stub: integer } -export type Name = string +export interface LogstashPipelineGetRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } +} -export interface NamedPolicy extends EnrichPolicy { - name: string +export interface LogstashPipelineGetResponse { + stub: integer } -export interface NamedPolicyConfig { - geo_match?: NamedPolicy - match: NamedPolicy +export interface LogstashPipelinePutRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } } -export interface NamedPolicyMetadata { - config: NamedPolicyConfig +export interface LogstashPipelinePutResponse { + stub: integer } -export interface NamedQueryKeys { - boost?: float - _name?: string - ignore_unmapped?: boolean +export interface MigrationDeprecationInfoDeprecation { + details: string + level: MigrationDeprecationInfoDeprecationLevel + message: string + url: string } -export type NamedQuery = NamedQueryKeys | - { [property: string]: TQuery } -export type Names = string | Array +export type MigrationDeprecationInfoDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' -export interface NativeCode { - build_hash: string - version: VersionString +export interface MigrationDeprecationInfoRequest extends RequestBase { + index?: IndexName } -export interface NativeCodeInformation { - build_hash: string - version: VersionString +export interface MigrationDeprecationInfoResponse { + cluster_settings: MigrationDeprecationInfoDeprecation[] + index_settings: Record + node_settings: MigrationDeprecationInfoDeprecation[] + ml_settings: MigrationDeprecationInfoDeprecation[] } -export interface NestedAggregation extends BucketAggregationBase { - path?: Field +export interface MlAnalysisConfig { + bucket_span: TimeSpan + categorization_field_name?: Field + categorization_filters?: string[] + detectors: MlDetector[] + influencers?: Field[] + latency?: Time + multivariate_by_fields?: boolean + per_partition_categorization?: MlPerPartitionCategorization + summary_count_field_name?: Field + categorization_analyzer?: MlCategorizationAnalyzer | string } -export interface NestedIdentity { - field: Field - offset: integer - _nested?: NestedIdentity +export interface MlAnalysisLimits { + categorization_examples_limit?: long + model_memory_limit: string } -export interface NestedProperty extends CorePropertyBase { - dynamic?: boolean | DynamicMapping - enabled?: boolean - properties?: Record - include_in_parent?: boolean - include_in_root?: boolean - type: 'nested' +export interface MlAnalysisMemoryLimit { + model_memory_limit: string } -export interface NestedQuery extends QueryBase { - ignore_unmapped?: boolean - inner_hits?: InnerHits - path?: Field - query?: QueryContainer - score_mode?: NestedScoreMode +export interface MlAnomaly { + actual?: double[] + bucket_span: Time + by_field_name?: string + by_field_value?: string + causes?: MlAnomalyCause[] + detector_index: integer + field_name?: string + function?: string + function_description?: string + influencers?: MlInfluence[] + initial_record_score: double + is_interim: boolean + job_id: string + over_field_name?: string + over_field_value?: string + partition_field_name?: string + partition_field_value?: string + probability: double + record_score: double + result_type: string + timestamp: EpochMillis + typical?: double[] +} + +export interface MlAnomalyCause { + actual: double[] + by_field_name: Name + by_field_value: string + correlated_by_field_value: string + field_name: Field + function: string + function_description: string + influencers: MlInfluence[] + over_field_name: Name + over_field_value: string + partition_field_name: string + partition_field_value: string + probability: double + typical: double[] } -export type NestedScoreMode = 'avg' | 'sum' | 'min' | 'max' | 'none' +export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' -export interface NestedSortValue { - filter: QueryContainer - max_children?: integer - path: Field +export interface MlBucketInfluencer { + bucket_span: long + influencer_score: double + influencer_field_name: Field + influencer_field_value: string + initial_influencer_score: double + is_interim: boolean + job_id: Id + probability: double + result_type: string + timestamp: Time + foo?: string } -export interface NeverCondition { +export interface MlBucketSummary { + anomaly_score: double + bucket_influencers: MlBucketInfluencer[] + bucket_span: Time + event_count: long + initial_anomaly_score: double + is_interim: boolean + job_id: Id + partition_scores?: MlPartitionScore[] + processing_time_ms: double + result_type: string + timestamp: Time } -export interface NodeAllocationExplanation { - deciders: Array - node_attributes: Record - node_decision: Decision - node_id: string - node_name: string - store?: AllocationStore - transport_address: string - weight_ranking: integer +export interface MlCalendarEvent { + calendar_id?: Id + event_id?: Id + description: string + end_time: EpochMillis + start_time: EpochMillis } -export interface NodeAttributes { - attributes: Record - ephemeral_id: string - id: string - name: string - transport_address: string +export interface MlCategorizationAnalyzer { + filter?: (string | AnalysisTokenFilter)[] + tokenizer?: string | AnalysisTokenizer + char_filter?: (string | AnalysisCharFilter)[] } -export interface NodeBufferPool { - count: long - total_capacity: string - total_capacity_in_bytes: long - used: string - used_in_bytes: long +export interface MlCategory { + category_id: ulong + examples: string[] + grok_pattern?: string + job_id: Id + max_matching_length: ulong + partition_field_name?: string + partition_field_value?: string + regex: string + terms: string + num_matches?: long + preferred_to_categories?: Id[] + p?: string + result_type: string + mlcategory: string } -export interface NodeDiskUsage { - node_name: string - least_available: DiskUsage - most_available: DiskUsage +export interface MlChunkingConfig { + mode: MlChunkingMode + time_span?: Time } -export type NodeId = string +export type MlChunkingMode = 'auto' | 'manual' | 'off' -export type NodeIds = string +export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' -export interface NodeInfo { - attributes: Record - build_flavor: string - build_hash: string - build_type: string - host: string - http: NodeInfoHttp - ip: string - jvm: NodeJvmInfo - name: string - network: NodeInfoNetwork - os: NodeOperatingSystemInfo - plugins: Array - process: NodeProcessInfo - roles: Array - settings: Array - thread_pool: Record - total_indexing_buffer: long - transport: NodeInfoTransport - transport_address: string - version: VersionString +export interface MlDataCounts { + bucket_count: long + earliest_record_timestamp?: long + empty_bucket_count: long + input_bytes: long + input_field_count: long + input_record_count: long + invalid_date_count: long + job_id: Id + last_data_time?: long + latest_empty_bucket_timestamp?: long + latest_record_timestamp?: long + latest_sparse_bucket_timestamp?: long + latest_bucket_timestamp?: long + missing_field_count: long + out_of_order_timestamp_count: long + processed_field_count: long + processed_record_count: long + sparse_bucket_count: long } -export interface NodeInfoHttp { - bound_address: Array - max_content_length: string - max_content_length_in_bytes: long - publish_address: string +export interface MlDataDescription { + format?: string + time_field: Field + time_format?: string + field_delimiter?: string } -export interface NodeInfoJvmMemory { - direct_max: string - direct_max_in_bytes: long - heap_init: string - heap_init_in_bytes: long - heap_max: string - heap_max_in_bytes: long - non_heap_init: string - non_heap_init_in_bytes: long - non_heap_max: string - non_heap_max_in_bytes: long +export interface MlDatafeed { + aggregations?: Record + aggs?: Record + chunking_config?: MlChunkingConfig + datafeed_id: Id + frequency?: Timestamp + indices: Indices + indexes?: string[] + job_id: Id + max_empty_searches?: integer + query: QueryDslQueryContainer + query_delay?: Timestamp + script_fields?: Record + scroll_size?: integer + delayed_data_check_config: MlDelayedDataCheckConfig + runtime_mappings?: MappingRuntimeFields + indices_options?: MlDatafeedIndicesOptions } -export interface NodeInfoMemory { - total: string - total_in_bytes: long +export interface MlDatafeedIndicesOptions { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + ignore_throttled?: boolean } -export interface NodeInfoNetwork { - primary_interface: NodeInfoNetworkInterface - refresh_interval: integer +export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' + +export interface MlDatafeedStats { + assignment_explanation?: string + datafeed_id: Id + node?: MlDiscoveryNode + state: MlDatafeedState + timing_stats: MlDatafeedTimingStats } -export interface NodeInfoNetworkInterface { - address: string - mac_address: string - name: string +export interface MlDatafeedTimingStats { + bucket_count: long + exponential_average_search_time_per_hour_ms: double + job_id: Id + search_count: long + total_search_time_ms: double +} + +export interface MlDataframeAnalysis { + dependent_variable: string + prediction_field_name?: Field + alpha?: double + lambda?: double + gamma?: double + eta?: double + eta_growth_rate_per_tree?: double + feature_bag_fraction?: double + max_trees?: integer + maximum_number_trees?: integer + soft_tree_depth_limit?: integer + soft_tree_depth_tolerance?: double + downsample_factor?: double + max_optimization_rounds_per_hyperparameter?: integer + early_stopping_enabled?: boolean + num_top_feature_importance_values?: integer + feature_processors?: MlDataframeAnalysisFeatureProcessor[] + randomize_seed?: double + training_percent?: Percentage +} + +export type MlDataframeAnalysisAnalyzedFields = string[] | MlDataframeAnalysisAnalyzedFieldsIncludeExclude + +export interface MlDataframeAnalysisAnalyzedFieldsIncludeExclude { + includes: string[] + excludes: string[] +} + +export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { + class_assignment_objective?: string + num_top_classes?: integer } -export interface NodeInfoOSCPU { - cache_size: string - cache_size_in_bytes: integer - cores_per_socket: integer - mhz: integer - model: string - total_cores: integer - total_sockets: integer - vendor: string +export interface MlDataframeAnalysisContainer { + outlier_detection?: MlDataframeAnalysisOutlierDetection + regression?: MlDataframeAnalysisRegression + classification?: MlDataframeAnalysisClassification } -export interface NodeInfoTransport { - bound_address: Array - publish_address: string +export interface MlDataframeAnalysisFeatureProcessor { + frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding + multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding + n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding + one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding + target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding } -export interface NodeIngestStats { - pipelines: Record - total: IngestStats +export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { + feature_name: Name + field: Field + frequency_map: Record } -export interface NodeJvmInfo { - gc_collectors: Array - mem: NodeInfoJvmMemory - memory_pools: Array - pid: integer - start_time_in_millis: long - version: VersionString - vm_name: Name - vm_vendor: string - vm_version: VersionString +export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { + processors: integer[] } -export interface NodeJvmStats { - buffer_pools: Record - classes: JvmClassesStats - gc: GarbageCollectionStats - mem: MemoryStats - threads: ThreadStats - timestamp: long - uptime: string - uptime_in_millis: long +export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { + feature_prefix?: string + field: Field + length?: integer + n_grams: integer[] + start?: integer + custom?: boolean } -export interface NodeOperatingSystemInfo { - arch: string - available_processors: integer - cpu: NodeInfoOSCPU - mem: NodeInfoMemory - name: string - pretty_name: Name - refresh_interval_in_millis: integer - swap: NodeInfoMemory - version: VersionString +export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { + field: Field + hot_map: string } -export interface NodePackagingType { - count: integer - flavor: string - type: string +export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { + default_value: integer + feature_name: Name + field: Field + target_map: Record } -export interface NodeProcessInfo { - id: long - mlockall: boolean - refresh_interval_in_millis: long +export interface MlDataframeAnalysisOutlierDetection { + n_neighbors?: integer + method?: string + feature_influence_threshold?: double + compute_feature_influence?: boolean + outlier_fraction?: double + standardization_enabled?: boolean } -export interface NodeReloadException { - name: Name - reload_exception?: NodeReloadExceptionCausedBy +export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { + loss_function?: string + loss_function_parameter?: double } -export interface NodeReloadExceptionCausedBy { - type: string - reason: string - caused_by?: NodeReloadExceptionCausedBy +export interface MlDataframeAnalytics { + analysis_stats?: MlDataframeAnalyticsStatsContainer + assignment_explanation?: string + data_counts: MlDataframeAnalyticsStatsDataCounts + id: Id + memory_usage: MlDataframeAnalyticsStatsMemoryUsage + node?: NodeAttributes + progress: MlDataframeAnalyticsStatsProgress[] + state: MlDataframeState } -export type NodeRole = 'master' | 'data' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only' - -export interface NodeStatistics { - failed: integer - failures?: Array - successful: integer - total: integer +export interface MlDataframeAnalyticsDestination { + index: IndexName + results_field?: Field } -export interface NodeStats { - adaptive_selection: Record - breakers: Record - fs: FileSystemStats - host: string - http: HttpStats - indices: IndexStats - ingest: NodeIngestStats - ip: Array - jvm: NodeJvmStats - name: string - os: OperatingSystemStats - process: ProcessStats - roles: Array - script: ScriptStats - thread_pool: Record - timestamp: long - transport: TransportStats - transport_address: string +export interface MlDataframeAnalyticsFieldSelection { + is_included: boolean + is_required: boolean + feature_type?: string + mapping_types: string[] + name: Field + reason?: string } -export interface NodeThreadPoolInfo { - core: integer - keep_alive: string - max: integer - queue_size: integer - size: integer - type: string +export interface MlDataframeAnalyticsMemoryEstimation { + expected_memory_with_disk: ByteSize + expected_memory_without_disk: ByteSize } -export interface NodeUsageInformation { - rest_actions: Record - since: EpochMillis - timestamp: EpochMillis - aggregations: Record +export interface MlDataframeAnalyticsSource { + index: Indices + query?: QueryDslQueryContainer + _source?: MlDataframeAnalysisAnalyzedFields + runtime_mappings?: MappingRuntimeFields } -export interface NodesHotThreadsRequest extends RequestBase { - node_id?: NodeIds - ignore_idle_threads?: boolean - interval?: Time - snapshots?: long - threads?: long - thread_type?: ThreadType - timeout?: Time +export interface MlDataframeAnalyticsStatsContainer { + classification_stats?: MlDataframeAnalyticsStatsHyperparameters + outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection + regression_stats?: MlDataframeAnalyticsStatsHyperparameters } -export interface NodesHotThreadsResponse extends ResponseBase { - hot_threads: Array +export interface MlDataframeAnalyticsStatsDataCounts { + skipped_docs_count: integer + test_docs_count: integer + training_docs_count: integer } -export interface NodesInfoRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - flat_settings?: boolean - timeout?: Time +export interface MlDataframeAnalyticsStatsHyperparameters { + hyperparameters: MlHyperparameters + iteration: integer + timestamp: DateString + timing_stats: MlTimingStats + validation_loss: MlValidationLoss } -export interface NodesInfoResponse extends NodesResponseBase { - cluster_name: string - nodes: Record +export interface MlDataframeAnalyticsStatsMemoryUsage { + memory_reestimate_bytes?: long + peak_usage_bytes: long + status: string + timestamp?: DateString } -export interface NodesResponseBase extends ResponseBase { - _nodes: NodeStatistics +export interface MlDataframeAnalyticsStatsOutlierDetection { + parameters: MlOutlierDetectionParameters + timestamp: DateString + timing_stats: MlTimingStats } -export interface NodesStatsRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - index_metric?: Metrics - completion_fields?: Fields - fielddata_fields?: Fields - fields?: Fields - groups?: boolean - include_segment_file_sizes?: boolean - level?: Level - timeout?: Time - types?: Array +export interface MlDataframeAnalyticsStatsProgress { + phase: string + progress_percent: integer } -export interface NodesStatsResponse extends NodesResponseBase { - cluster_name: string - nodes: Record +export interface MlDataframeAnalyticsSummary { + id: Id + source: MlDataframeAnalyticsSource + dest: MlDataframeAnalyticsDestination + analysis: MlDataframeAnalysisContainer + description?: string + model_memory_limit?: ByteSize + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields + allow_lazy_start?: boolean + create_time?: long + version?: VersionString } -export interface NodesUsageRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - timeout?: Time +export interface MlDataframeEvaluationClassification { + actual_field: Field + predicted_field?: Field + top_classes_field?: Field + metrics?: MlDataframeEvaluationClassificationMetrics } -export interface NodesUsageResponse extends NodesResponseBase { - cluster_name: string - nodes: Record +export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { + accuracy?: Record + multiclass_confusion_matrix?: Record } -export type NoriDecompoundMode = 'discard' | 'none' | 'mixed' +export interface MlDataframeEvaluationClassificationMetricsAucRoc { + class_name?: Name + include_curve?: boolean +} -export interface NoriPartOfSpeechTokenFilter extends TokenFilterBase { - stoptags: Array +export interface MlDataframeEvaluationContainer { + classification?: MlDataframeEvaluationClassification + outlier_detection?: MlDataframeEvaluationOutlierDetection + regression?: MlDataframeEvaluationRegression } -export interface NoriTokenizer extends TokenizerBase { - decompound_mode: NoriDecompoundMode - discard_punctuation: boolean - user_dictionary: string - user_dictionary_rules: Array +export interface MlDataframeEvaluationMetrics { + auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc + precision?: Record + recall?: Record } -export interface NormalizeAggregation extends PipelineAggregationBase { - method?: NormalizeMethod +export interface MlDataframeEvaluationOutlierDetection { + actual_field: Field + predicted_probability_field: Field + metrics?: MlDataframeEvaluationOutlierDetectionMetrics } -export type NormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'zscore' | 'softmax' +export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { + confusion_matrix?: Record +} -export interface NumberProperty extends DocValuesPropertyBase { - boost?: double - coerce?: boolean - fielddata?: NumericFielddata - ignore_malformed?: boolean - index?: boolean - null_value?: double - scaling_factor?: double - type: NumberType +export interface MlDataframeEvaluationRegression { + actual_field: Field + predicted_field: Field + metrics?: MlDataframeEvaluationRegressionMetrics } -export type NumberType = 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer' | 'long' | 'short' | 'byte' | 'unsigned_long' +export interface MlDataframeEvaluationRegressionMetrics { + mse?: Record + msle?: MlDataframeEvaluationRegressionMetricsMsle + huber?: MlDataframeEvaluationRegressionMetricsHuber + r_squared?: Record +} -export interface NumericDecayFunctionKeys extends DecayFunctionBase { +export interface MlDataframeEvaluationRegressionMetricsHuber { + delta?: double } -export type NumericDecayFunction = NumericDecayFunctionKeys | - { [property: string]: DecayPlacement } -export interface NumericFielddata { - format: NumericFielddataFormat +export interface MlDataframeEvaluationRegressionMetricsMsle { + offset?: double } -export type NumericFielddataFormat = 'array' | 'disabled' +export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' -export interface ObjectProperty extends CorePropertyBase { - dynamic?: boolean | DynamicMapping - enabled?: boolean - properties?: Record - type: 'object' +export interface MlDelayedDataCheckConfig { + check_window?: Time + enabled: boolean } -export type OpType = 'index' | 'create' +export interface MlDetectionRule { + actions?: MlRuleAction[] + conditions: MlRuleCondition[] + scope?: Record +} -export interface OpenIndexRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards +export interface MlDetector { + by_field_name?: Field + custom_rules?: MlDetectionRule[] + detector_description?: string + detector_index?: integer + exclude_frequent?: MlExcludeFrequent + field_name?: Field + function?: string + use_null?: boolean + over_field_name?: Field + partition_field_name?: Field + description?: string } -export interface OpenIndexResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean +export interface MlDiscoveryNode { + attributes: Record + ephemeral_id: Id + id: Id + name: Name + transport_address: TransportAddress } -export interface OpenJobRequest extends RequestBase { - job_id: Id - body?: { - timeout?: Time - } +export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' + +export interface MlFilter { + description?: string + filter_id: Id + items: string[] } -export interface OpenJobResponse extends ResponseBase { - opened: boolean +export interface MlFilterRef { + filter_id: Id + filter_type: MlFilterType } -export interface OpenPointInTimeRequest extends RequestBase { - index: Indices - keep_alive?: Time +export type MlFilterType = 'include' | 'exclude' + +export interface MlHyperparameter { + absolute_importance?: double + name: Name + relative_importance?: double + supplied: boolean + value: double } -export interface OpenPointInTimeResponse extends ResponseBase { - id: Id +export interface MlHyperparameters { + alpha?: double + lambda?: double + gamma?: double + eta?: double + eta_growth_rate_per_tree?: double + feature_bag_fraction?: double + downsample_factor?: double + max_attempts_to_add_tree?: integer + max_optimization_rounds_per_hyperparameter?: integer + max_trees?: integer + num_folds?: integer + num_splits_per_feature?: integer + soft_tree_depth_limit?: integer + soft_tree_depth_tolerance?: double +} + +export interface MlInfluence { + influencer_field_name: string + influencer_field_values: string[] } -export interface OperatingSystemMemoryInfo { - free_in_bytes: long - free_percent: integer - total_in_bytes: long - used_in_bytes: long - used_percent: integer +export interface MlJob { + allow_lazy_open?: boolean + analysis_config?: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + background_persist_interval?: Time + count?: integer + created_by?: EmptyObject + create_time?: integer + detectors?: MlJobStatistics + data_description?: MlDataDescription + description?: string + finished_time?: integer + forecasts?: XpackUsageMlJobForecasts + job_id?: Id + job_type?: string + model_plot?: MlModelPlotConfig + model_size?: MlJobStatistics + model_snapshot_id?: Id + model_snapshot_retention_days?: long + renormalization_window_days?: long + results_index_name?: IndexName + results_retention_days?: long + groups?: string[] + model_plot_config?: MlModelPlotConfig + custom_settings?: XpackUsageCustomSettings + job_version?: VersionString + deleting?: boolean + daily_model_snapshot_retention_after_days?: long } -export interface OperatingSystemStats { - cpu: CPUStats - mem: ExtendedMemoryStats - swap: MemoryStats - timestamp: long +export interface MlJobForecastStatistics { + memory_bytes?: MlJobStatistics + processing_time_ms?: MlJobStatistics + records?: MlJobStatistics + status?: Record + total: long + forecasted_jobs: integer } -export type Operator = 'and' | 'or' | 'AND' | 'OR' +export type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening' -export interface OverallBucket { - bucket_span: long - is_interim: boolean - jobs: Array - overall_score: double - result_type: string - timestamp: DateString +export interface MlJobStatistics { + avg: double + max: double + min: double + total: double } -export interface OverallBucketJobInfo { +export interface MlJobStats { + assignment_explanation?: string + data_counts: MlDataCounts + forecasts_stats: MlJobForecastStatistics job_id: string - max_anomaly_score: double + model_size_stats: MlModelSizeStats + node?: MlDiscoveryNode + open_time?: DateString + state: MlJobState + timing_stats: MlJobTimingStats + deleting?: boolean } -export interface Page { - from: integer - size: integer +export interface MlJobTimingStats { + average_bucket_processing_time_ms?: double + bucket_count: long + exponential_average_bucket_processing_time_ms?: double + exponential_average_bucket_processing_time_per_hour_ms: double + job_id: Id + total_bucket_processing_time_ms: double + maximum_bucket_processing_time_ms?: double + minimum_bucket_processing_time_ms?: double } -export interface PagerDutyActionEventResult { - event: PagerDutyEvent - reason: string - request: HttpInputRequestResult - response: HttpInputResponseResult -} +export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' -export interface PagerDutyActionResult { - sent_event: PagerDutyActionEventResult +export interface MlModelPlotConfig { + terms?: Field + enabled: boolean + annotations_enabled?: boolean } -export interface PagerDutyContext { - href: string - src: string - type: PagerDutyContextType +export interface MlModelPlotConfigEnabled { + enabled: boolean + annotations_enabled?: boolean + terms?: string } -export type PagerDutyContextType = 'link' | 'image' - -export interface PagerDutyEvent { - account: string - attach_payload: boolean - client: string - client_url: string - context: Array - description: string - event_type: PagerDutyEventType - incident_key: string +export interface MlModelSizeStats { + bucket_allocation_failures_count: long + job_id: Id + log_time: Time + memory_status: MlMemoryStatus + model_bytes: long + model_bytes_exceeded?: long + model_bytes_memory_limit?: long + peak_model_bytes?: long + assignment_memory_basis?: string + result_type: string + total_by_field_count: long + total_over_field_count: long + total_partition_field_count: long + categorization_status: string + categorized_doc_count: integer + dead_category_count: integer + failed_category_count: integer + frequent_category_count: integer + rare_category_count: integer + total_category_count: integer + timestamp?: long } -export type PagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' +export interface MlModelSnapshot { + description?: string + job_id: Id + latest_record_time_stamp?: Time + latest_result_time_stamp?: Time + min_version: VersionString + model_size_stats?: MlModelSizeStats + retain: boolean + snapshot_doc_count: long + snapshot_id: Id + timestamp: Time +} -export interface PainlessContextSetup { - document: any - index: IndexName - query: QueryContainer +export interface MlOutlierDetectionParameters { + compute_feature_influence?: boolean + feature_influence_threshold?: double + method?: string + n_neighbors?: integer + outlier_fraction?: double + standardization_enabled?: boolean } -export interface PainlessExecutionPosition { - offset: integer - start: integer - end: integer +export interface MlOverallBucket { + bucket_span: long + is_interim: boolean + jobs: MlOverallBucketJob[] + overall_score: double + result_type: string + timestamp: Time } -export interface ParentAggregation extends BucketAggregationBase { - type?: RelationName +export interface MlOverallBucketJob { + job_id: Id + max_anomaly_score: double } -export interface ParentIdQuery extends QueryBase { - id?: Id - ignore_unmapped?: boolean - type?: RelationName +export interface MlPage { + from?: integer + size?: integer } -export interface PartitionScore { +export interface MlPartitionScore { initial_record_score: double - partition_field_name: string + partition_field_name: Field partition_field_value: string probability: double record_score: double } -export interface PathHierarchyTokenizer extends TokenizerBase { - buffer_size: integer - delimiter: string - replacement: string - reverse: boolean - skip: integer +export interface MlPerPartitionCategorization { + enabled?: boolean + stop_on_warn?: boolean } -export interface PatternCaptureTokenFilter extends TokenFilterBase { - patterns: Array - preserve_original: boolean -} +export type MlRuleAction = 'skip_result' | 'skip_model_update' -export interface PatternReplaceTokenFilter extends TokenFilterBase { - flags: string - pattern: string - replacement: string +export interface MlRuleCondition { + applies_to: MlAppliesTo + operator: MlConditionOperator + value: double } -export interface PauseAutoFollowPatternRequest extends RequestBase { - name: Name +export interface MlTimingStats { + elapsed_time: integer + iteration_time?: integer } -export interface PauseAutoFollowPatternResponse extends AcknowledgedResponseBase { +export interface MlTotalFeatureImportance { + feature_name: Name + importance: MlTotalFeatureImportanceStatistics[] + classes: MlTotalFeatureImportanceClass[] } -export interface PauseFollowIndexRequest extends RequestBase { - index: IndexName +export interface MlTotalFeatureImportanceClass { + class_name: Name + importance: MlTotalFeatureImportanceStatistics[] } -export interface PauseFollowIndexResponse extends AcknowledgedResponseBase { +export interface MlTotalFeatureImportanceStatistics { + mean_magnitude: double + max: integer + min: integer } -export interface PendingTask { - insert_order: integer - priority: string - source: string - time_in_queue: string - time_in_queue_millis: integer +export interface MlTrainedModelConfig { + model_id: Id + tags: string[] + version?: VersionString + compressed_definition?: string + created_by?: string + create_time?: Time + default_field_map?: Record + description: string + estimated_heap_memory_usage_bytes?: integer + estimated_operations?: integer + inference_config: AggregationsInferenceConfigContainer + input: MlTrainedModelConfigInput + license_level?: string + metadata?: MlTrainedModelConfigMetadata } -export interface PerPartitionCategorization { - enabled?: boolean - stop_on_warn?: boolean +export interface MlTrainedModelConfigInput { + field_names: Field[] } -export type Percentage = string | float +export interface MlTrainedModelConfigMetadata { + model_aliases?: string[] + feature_importance_baseline?: Record + hyperparameters?: MlHyperparameter[] + total_feature_importance?: MlTotalFeatureImportance[] +} -export interface PercentageScoreHeuristic { +export interface MlTrainedModelInferenceStats { + failure_count: long + inference_count: long + cache_miss_count: long + missing_all_fields_count: long + timestamp: Time } -export interface PercentileItem { - percentile: double - value: double +export interface MlTrainedModelStats { + model_id: Id + pipeline_count: integer + inference_stats?: MlTrainedModelInferenceStats + ingest?: Record } -export interface PercentileRanksAggregation extends FormatMetricAggregationBase { - keyed?: boolean - values?: Array - hdr?: HdrMethod - tdigest?: TDigest +export interface MlValidationLoss { + fold_values: string[] + loss_type: string } -export interface PercentilesAggregate extends AggregateBase { - items: Array +export interface MlCloseJobRequest extends RequestBase { + job_id: Id + allow_no_jobs?: boolean + force?: boolean + timeout?: Time } -export interface PercentilesAggregation extends FormatMetricAggregationBase { - keyed?: boolean - percents?: Array - hdr?: HdrMethod - tdigest?: TDigest +export interface MlCloseJobResponse { + closed: boolean } -export interface PercentilesBucketAggregation extends PipelineAggregationBase { - percents?: Array +export interface MlDeleteCalendarRequest extends RequestBase { + calendar_id: Id } -export interface PercolateQuery extends QueryBase { - document?: any - documents?: Array - field?: Field - id?: Id - index?: IndexName - preference?: string - routing?: Routing - version?: VersionNumber +export interface MlDeleteCalendarResponse extends AcknowledgedResponseBase { } -export interface PercolatorProperty extends PropertyBase { - type: 'percolator' +export interface MlDeleteCalendarEventRequest extends RequestBase { + calendar_id: Id + event_id: Id } -export interface Phase { - actions: Record | Array - min_age?: Time +export interface MlDeleteCalendarEventResponse extends AcknowledgedResponseBase { } -export interface Phases { - cold?: Phase - delete?: Phase - hot?: Phase - warm?: Phase +export interface MlDeleteCalendarJobRequest extends RequestBase { + calendar_id: Id + job_id: Id } -export interface PhraseSuggestCollate { - params?: Record - prune?: boolean - query: PhraseSuggestCollateQuery +export interface MlDeleteCalendarJobResponse { + calendar_id: Id + description?: string + job_ids: Ids } -export interface PhraseSuggestCollateQuery { - id?: Id - source?: string +export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { + id: Id + force?: boolean + timeout?: Time } -export interface PhraseSuggestHighlight { - post_tag: string - pre_tag: string +export interface MlDeleteDataFrameAnalyticsResponse extends AcknowledgedResponseBase { } -export interface PhraseSuggestOption { - text: string - highlighted: string - score: double +export interface MlDeleteDatafeedRequest extends RequestBase { + datafeed_id: Id + force?: boolean } -export interface PhraseSuggester extends SuggesterBase { - collate?: PhraseSuggestCollate - confidence?: double - direct_generator?: Array - force_unigrams?: boolean - gram_size?: integer - highlight?: PhraseSuggestHighlight - max_errors?: double - real_word_error_likelihood?: double - separator?: string - shard_size?: integer - smoothing?: SmoothingModelContainer - text?: string - token_limit?: integer +export interface MlDeleteDatafeedResponse extends AcknowledgedResponseBase { } -export interface PingRequest extends RequestBase { +export interface MlDeleteExpiredDataRequest extends RequestBase { + name?: Name + requests_per_second?: float + timeout?: Time + body?: { + requests_per_second?: float + timeout?: Time + } } -export type PingResponse = boolean +export interface MlDeleteExpiredDataResponse { + deleted: boolean +} -export interface PinnedQuery extends QueryBase { - ids?: Array | Array - organic?: QueryContainer +export interface MlDeleteFilterRequest extends RequestBase { + filter_id: Id } -export interface Pipeline { - description?: string - on_failure?: Array - processors?: Array - version?: VersionNumber +export interface MlDeleteFilterResponse extends AcknowledgedResponseBase { } -export interface PipelineAggregationBase extends Aggregation { - buckets_path?: BucketsPath - format?: string - gap_policy?: GapPolicy +export interface MlDeleteForecastRequest extends RequestBase { + job_id: Id + forecast_id?: Id + allow_no_forecasts?: boolean + timeout?: Time } -export interface PipelineProcessor extends ProcessorBase { - name: string +export interface MlDeleteForecastResponse extends AcknowledgedResponseBase { } -export interface PipelineSimulation { - doc?: DocumentSimulation - processor_results?: Array - tag?: string - processor_type?: string - status?: Status +export interface MlDeleteJobRequest extends RequestBase { + job_id: Id + force?: boolean + wait_for_completion?: boolean } -export interface PluginStats { - classname: string - description: string - elasticsearch_version: VersionString - extended_plugins: Array - has_native_controller: boolean - java_version: VersionString - name: string - version: VersionString - licensed: boolean - type: string +export interface MlDeleteJobResponse extends AcknowledgedResponseBase { } -export interface PointInTimeReference { - id: Id - keep_alive?: Time +export interface MlDeleteModelSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id } -export interface PointProperty extends DocValuesPropertyBase { - ignore_malformed?: boolean - ignore_z_value?: boolean - null_value?: string - type: 'point' +export interface MlDeleteModelSnapshotResponse extends AcknowledgedResponseBase { } -export interface Policy { - phases: Phases - name?: string +export interface MlDeleteTrainedModelRequest extends RequestBase { + model_id: Id } -export interface PorterStemTokenFilter extends TokenFilterBase { +export interface MlDeleteTrainedModelResponse extends AcknowledgedResponseBase { } -export interface PostCalendarEventsRequest extends RequestBase { - calendar_id: Id - body: { - events?: Array - } +export interface MlDeleteTrainedModelAliasRequest extends RequestBase { + model_alias: Name + model_id: Id } -export interface PostCalendarEventsResponse extends ResponseBase { - events: Array +export interface MlDeleteTrainedModelAliasResponse extends AcknowledgedResponseBase { } -export interface PostJobDataRequest extends RequestBase { - job_id: Id - reset_end?: DateString - reset_start?: DateString - body: { - data?: Array +export interface MlEstimateModelMemoryRequest extends RequestBase { + body?: { + analysis_config?: MlAnalysisConfig + max_bucket_cardinality?: Record + overall_cardinality?: Record } } -export interface PostJobDataResponse extends ResponseBase { - bucket_count: long - earliest_record_timestamp: integer - empty_bucket_count: long - input_bytes: long - input_field_count: long - input_record_count: long - invalid_date_count: long - job_id: string - last_data_time: integer - latest_record_timestamp: integer - missing_field_count: long - out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long - sparse_bucket_count: long +export interface MlEstimateModelMemoryResponse { + model_memory_estimate: ByteSize } -export interface PostLicenseRequest extends RequestBase { - acknowledge?: boolean - body?: { - license?: License - licenses?: Array - } +export interface MlEvaluateDataFrameConfusionMatrixItem { + actual_class: Name + actual_class_doc_count: integer + predicted_classes: MlEvaluateDataFrameConfusionMatrixPrediction[] + other_predicted_class_doc_count: integer } -export interface PostLicenseResponse extends ResponseBase { - acknowledge?: LicenseAcknowledgement - acknowledged: boolean - license_status: LicenseStatus +export interface MlEvaluateDataFrameConfusionMatrixPrediction { + predicted_class: Name + count: integer } -export interface PredicateTokenFilter extends TokenFilterBase { - script: Script +export interface MlEvaluateDataFrameConfusionMatrixTreshold { + tp: integer + fp: integer + tn: integer + fn: integer } -export interface PrefixQuery extends QueryBase { - rewrite?: MultiTermQueryRewrite - value: string +export interface MlEvaluateDataFrameDataframeClassificationSummary { + auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy + multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix + precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision + recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall } -export interface PreviewDatafeedRequest extends RequestBase { - datafeed_id: Id +export interface MlEvaluateDataFrameDataframeClassificationSummaryAccuracy { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + overall_accuracy: double } -export interface PreviewDatafeedResponse extends ResponseBase { - data: Array +export interface MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix { + confusion_matrix: MlEvaluateDataFrameConfusionMatrixItem[] + other_actual_class_count: integer } -export interface PreviewTransformRequest extends RequestBase { - body: { - description?: string - dest?: TransformDestination - frequency?: Time - pivot?: TransformPivot - source?: TransformSource - sync?: TransformSyncContainer - } +export interface MlEvaluateDataFrameDataframeClassificationSummaryPrecision { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + avg_precision: double } -export interface PreviewTransformResponse extends ResponseBase { - generated_dest_index: IndexState - preview: Array +export interface MlEvaluateDataFrameDataframeClassificationSummaryRecall { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + avg_recall: double } -export type Privileges = Record +export interface MlEvaluateDataFrameDataframeEvaluationClass extends MlEvaluateDataFrameDataframeEvaluationValue { + class_name: Name +} -export interface PrivilegesActions { - actions: Array - application?: string - name?: string - metadata?: Record +export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc extends MlEvaluateDataFrameDataframeEvaluationValue { + curve?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem[] } -export interface ProcessStats { - cpu: CPUStats - mem: MemoryStats - open_file_descriptors: integer - timestamp: long +export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem { + tpr: double + fpr: double + threshold: double } -export interface ProcessorBase { - if?: string - ignore_failure?: boolean - on_failure?: Array - tag?: string +export interface MlEvaluateDataFrameDataframeEvaluationValue { + value: double } -export interface ProcessorContainer { - attachment?: AttachmentProcessor - append?: AppendProcessor - csv?: CsvProcessor - convert?: ConvertProcessor - date?: DateProcessor - date_index_name?: DateIndexNameProcessor - dot_expander?: DotExpanderProcessor - enrich?: EnrichProcessor - fail?: FailProcessor - foreach?: ForeachProcessor - json?: JsonProcessor - user_agent?: UserAgentProcessor - kv?: KeyValueProcessor - geoip?: GeoIpProcessor - grok?: GrokProcessor - gsub?: GsubProcessor - join?: JoinProcessor - lowercase?: LowercaseProcessor - remove?: RemoveProcessor - rename?: RenameProcessor - script?: ScriptProcessor - set?: SetProcessor - sort?: SortProcessor - split?: SplitProcessor - trim?: TrimProcessor - uppercase?: UppercaseProcessor - urldecode?: UrlDecodeProcessor - bytes?: BytesProcessor - dissect?: DissectProcessor - set_security_user?: SetSecurityUserProcessor - pipeline?: PipelineProcessor - drop?: DropProcessor - circle?: CircleProcessor - inference?: InferenceProcessor -} - -export interface Profile { - shards: Array -} - -export type Property = FlattenedProperty | JoinProperty | PercolatorProperty | RankFeatureProperty | RankFeaturesProperty | ConstantKeywordProperty | FieldAliasProperty | HistogramProperty | CoreProperty - -export interface PropertyBase { - local_metadata?: Record - meta?: Record - name?: PropertyName - properties?: Record - ignore_above?: integer - dynamic?: boolean | DynamicMapping - fields?: Record +export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { + auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + precision?: Record + recall?: Record + confusion_matrix?: Record } -export type PropertyName = string +export interface MlEvaluateDataFrameDataframeRegressionSummary { + huber?: MlEvaluateDataFrameDataframeEvaluationValue + mse?: MlEvaluateDataFrameDataframeEvaluationValue + msle?: MlEvaluateDataFrameDataframeEvaluationValue + r_squared?: MlEvaluateDataFrameDataframeEvaluationValue +} -export interface PutAliasRequest extends RequestBase { - index: Indices - name: Name - master_timeout?: Time - timeout?: Time +export interface MlEvaluateDataFrameRequest extends RequestBase { body?: { - filter?: QueryContainer - index_routing?: Routing - is_write_index?: boolean - routing?: Routing - search_routing?: Routing + evaluation: MlDataframeEvaluationContainer + index: IndexName + query?: QueryDslQueryContainer } } -export interface PutAliasResponse extends ResponseBase { +export interface MlEvaluateDataFrameResponse { + classification?: MlEvaluateDataFrameDataframeClassificationSummary + outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary + regression?: MlEvaluateDataFrameDataframeRegressionSummary } -export interface PutAutoFollowPatternRequest extends RequestBase { - name: Name - body: { - remote_cluster: string - follow_index_pattern?: IndexPattern - leader_index_patterns?: IndexPatterns - max_outstanding_read_requests?: integer - settings?: Record - max_outstanding_write_requests?: integer - read_poll_timeout?: Time - max_read_request_operation_count?: integer - max_read_request_size?: ByteSize - max_retry_delay?: Time - max_write_buffer_count?: integer - max_write_buffer_size?: ByteSize - max_write_request_operation_count?: integer - max_write_request_size?: ByteSize +export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { + id?: Id + body?: { + source?: MlDataframeAnalyticsSource + dest?: MlDataframeAnalyticsDestination + analysis: MlDataframeAnalysisContainer + description?: string + model_memory_limit?: ByteSize + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields + allow_lazy_start?: boolean } } -export interface PutAutoFollowPatternResponse extends AcknowledgedResponseBase { +export interface MlExplainDataFrameAnalyticsResponse { + field_selection: MlDataframeAnalyticsFieldSelection[] + memory_estimation: MlDataframeAnalyticsMemoryEstimation } -export interface PutAutoscalingPolicyRequest extends RequestBase { - stub_a: string - stub_b: string - body: { - stub_c: string - } +export interface MlFindFileStructureRequest extends RequestBase { + stub: string } -export interface PutAutoscalingPolicyResponse extends ResponseBase { - stub: integer +export interface MlFindFileStructureResponse { + stub: string } -export interface PutCalendarJobRequest extends RequestBase { - calendar_id: Id +export interface MlFlushJobRequest extends RequestBase { job_id: Id + skip_time?: string + body?: { + advance_time?: DateString + calc_interim?: boolean + end?: DateString + start?: DateString + } } -export interface PutCalendarJobResponse extends ResponseBase { - calendar_id: string - description: string - job_ids: Array +export interface MlFlushJobResponse { + flushed: boolean + last_finalized_bucket_end?: integer } -export interface PutCalendarRequest extends RequestBase { - calendar_id: Id +export interface MlForecastJobRequest extends RequestBase { + job_id: Id body?: { - description?: string + duration?: Time + expires_in?: Time } } -export interface PutCalendarResponse extends ResponseBase { - calendar_id: string - description: string - job_ids: Array +export interface MlForecastJobResponse extends AcknowledgedResponseBase { + forecast_id: Id } -export interface PutDatafeedRequest extends RequestBase { - datafeed_id: Id - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - body: { - aggregations?: Record - chunking_config?: ChunkingConfig - frequency?: Time - indices?: Array - indexes?: Array - job_id?: Id - max_empty_searches?: integer - query?: QueryContainer - query_delay?: Time - script_fields?: Record - scroll_size?: integer +export interface MlGetAnomalyRecordsRequest extends RequestBase { + job_id: Id + exclude_interim?: boolean + from?: integer + size?: integer + start?: DateString + end?: DateString + body?: { + desc?: boolean + exclude_interim?: boolean + page?: MlPage + record_score?: double + sort?: Field + start?: DateString + end?: DateString } } -export interface PutDatafeedResponse extends ResponseBase { - aggregations: Record - chunking_config: ChunkingConfig - datafeed_id: string - frequency: Time - indices: Indices - job_id: string - max_empty_searches: integer - query: QueryContainer - query_delay: Time - script_fields: Record - scroll_size: integer +export interface MlGetAnomalyRecordsResponse { + count: long + records: MlAnomaly[] } -export interface PutEnrichPolicyRequest extends RequestBase { - name: Name - body: { - geo_match?: EnrichPolicy - match?: EnrichPolicy +export interface MlGetBucketsRequest extends RequestBase { + job_id: Id + timestamp?: Timestamp + from?: integer + size?: integer + exclude_interim?: boolean + sort?: Field + desc?: boolean + start?: DateString + end?: DateString + body?: { + anomaly_score?: double + desc?: boolean + exclude_interim?: boolean + expand?: boolean + page?: MlPage + sort?: Field + start?: DateString + end?: DateString } } -export interface PutEnrichPolicyResponse extends AcknowledgedResponseBase { +export interface MlGetBucketsResponse { + buckets: MlBucketSummary[] + count: long } -export interface PutFilterRequest extends RequestBase { - filter_id: Id - body: { - description?: string - items?: Array +export interface MlGetCalendarEventsRequest extends RequestBase { + calendar_id: Id + job_id?: Id + end?: DateString + from?: integer + start?: string + size?: integer + body?: { + end?: DateString + from?: integer + start?: string + size?: integer } } -export interface PutFilterResponse extends ResponseBase { - description: string - filter_id: string - items: Array +export interface MlGetCalendarEventsResponse { + count: long + events: MlCalendarEvent[] } -export interface PutIndexTemplateRequest extends RequestBase { - name: Name - create?: boolean - flat_settings?: boolean - include_type_name?: boolean - master_timeout?: Time - timeout?: Time - body: { - aliases?: Record - index_patterns?: string | Array - mappings?: TypeMapping - order?: integer - settings?: Record - version?: VersionNumber +export interface MlGetCalendarsCalendar { + calendar_id: Id + description?: string + job_ids: Id[] +} + +export interface MlGetCalendarsRequest extends RequestBase { + calendar_id?: Id + from?: integer + size?: integer + body?: { + page?: MlPage } } -export interface PutIndexTemplateResponse extends AcknowledgedResponseBase { +export interface MlGetCalendarsResponse { + calendars: MlGetCalendarsCalendar[] + count: long } -export interface PutJobRequest extends RequestBase { +export interface MlGetCategoriesRequest extends RequestBase { job_id: Id - body: { - allow_lazy_open?: boolean - analysis_config?: AnalysisConfig - analysis_limits?: AnalysisLimits - data_description?: DataDescription - description?: string - model_plot?: ModelPlotConfig - model_snapshot_retention_days?: long - results_index_name?: IndexName + category_id?: CategoryId + from?: integer + size?: integer + partition_field_value?: string + body?: { + page?: MlPage } } -export interface PutJobResponse extends ResponseBase { - allow_lazy_open: boolean - analysis_config: AnalysisConfig - analysis_limits: AnalysisLimits - background_persist_interval: Time - create_time: DateString - data_description: DataDescription - description: string - job_id: string - job_type: string - model_plot: ModelPlotConfig - model_snapshot_id: string - model_snapshot_retention_days: long - renormalization_window_days: long - results_index_name: string - results_retention_days: long +export interface MlGetCategoriesResponse { + categories: MlCategory[] + count: long } -export interface PutLifecycleRequest extends RequestBase { - policy?: Name - policy_id?: Id - body?: { - policy?: Policy - } +export interface MlGetDataFrameAnalyticsRequest extends RequestBase { + id?: Id + allow_no_match?: boolean + from?: integer + size?: integer + exclude_generated?: boolean } -export interface PutLifecycleResponse extends AcknowledgedResponseBase { +export interface MlGetDataFrameAnalyticsResponse { + count: integer + data_frame_analytics: MlDataframeAnalyticsSummary[] } -export interface PutMappingRequest extends RequestBase { - index?: Indices - type?: Type - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - include_type_name?: boolean - master_timeout?: Time - timeout?: Time - write_index_only?: boolean - body: { - all_field?: AllField - date_detection?: boolean - dynamic?: boolean | DynamicMapping - dynamic_date_formats?: Array - dynamic_templates?: Record | Array> - field_names_field?: FieldNamesField - index_field?: IndexField - meta?: Record - numeric_detection?: boolean - properties?: Record - routing_field?: RoutingField - size_field?: SizeField - source_field?: SourceField - runtime?: RuntimeFields - } +export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { + id?: Id + allow_no_match?: boolean + from?: integer + size?: integer + verbose?: boolean } -export interface PutMappingResponse extends IndicesResponseBase { +export interface MlGetDataFrameAnalyticsStatsResponse { + count: long + data_frame_analytics: MlDataframeAnalytics[] } -export interface PutPipelineRequest extends RequestBase { - id: Id - master_timeout?: Time - timeout?: Time - body: { - description?: string - on_failure?: Array - processors?: Array - version?: VersionNumber - } +export interface MlGetDatafeedStatsRequest extends RequestBase { + datafeed_id?: Ids + allow_no_datafeeds?: boolean } -export interface PutPipelineResponse extends AcknowledgedResponseBase { +export interface MlGetDatafeedStatsResponse { + count: long + datafeeds: MlDatafeedStats[] } -export interface PutPrivilegesRequest extends RequestBase { - refresh?: Refresh - body: Record> +export interface MlGetDatafeedsRequest extends RequestBase { + datafeed_id?: Id + allow_no_datafeeds?: boolean + exclude_generated?: boolean } -export interface PutPrivilegesResponse extends DictionaryResponseBase> { +export interface MlGetDatafeedsResponse { + count: long + datafeeds: MlDatafeed[] } -export interface PutPrivilegesStatus { - created: boolean +export interface MlGetFiltersRequest extends RequestBase { + filter_id?: Id + from?: integer + size?: integer } -export interface PutRoleMappingRequest extends RequestBase { - name: Name - refresh?: Refresh - body: { - enabled?: boolean - metadata?: Record - roles?: Array - rules?: RoleMappingRuleBase - run_as?: Array +export interface MlGetFiltersResponse { + count: long + filters: MlFilter[] +} + +export interface MlGetInfluencersRequest extends RequestBase { + job_id: Id + desc?: boolean + end?: DateString + exclude_interim?: boolean + influencer_score?: double + from?: integer + size?: integer + sort?: Field + start?: DateString + body?: { + page?: MlPage } } -export interface PutRoleMappingResponse extends ResponseBase { - created?: boolean - role_mapping: PutRoleMappingStatus +export interface MlGetInfluencersResponse { + count: long + influencers: MlBucketInfluencer[] } -export interface PutRoleMappingStatus { - created: boolean +export interface MlGetJobStatsRequest extends RequestBase { + job_id?: Id + allow_no_jobs?: boolean } -export interface PutRoleRequest extends RequestBase { - name: Name - refresh?: Refresh - body: { - applications?: Array - cluster?: Array - global?: Record - indices?: Array - metadata?: Record - run_as?: Array - transient_metadata?: TransientMetadata - } +export interface MlGetJobStatsResponse { + count: long + jobs: MlJobStats[] } -export interface PutRoleResponse extends ResponseBase { - role: PutRoleStatus +export interface MlGetJobsRequest extends RequestBase { + job_id?: Ids + allow_no_match?: boolean + allow_no_jobs?: boolean + exclude_generated?: boolean } -export interface PutRoleStatus { - created: boolean +export interface MlGetJobsResponse { + count: long + jobs: MlJob[] } -export interface PutScriptRequest extends RequestBase { - id: Id - context?: Name - master_timeout?: Time - timeout?: Time - body: { - script?: StoredScript +export interface MlGetModelSnapshotsRequest extends RequestBase { + job_id: Id + snapshot_id?: Id + desc?: boolean + end?: Time + from?: integer + size?: integer + sort?: Field + start?: Time + body?: { + start?: Time + end?: Time } } -export interface PutScriptResponse extends AcknowledgedResponseBase { +export interface MlGetModelSnapshotsResponse { + count: long + model_snapshots: MlModelSnapshot[] } -export interface PutSnapshotLifecycleRequest extends RequestBase { - policy_id: Name +export interface MlGetOverallBucketsRequest extends RequestBase { + job_id: Id + bucket_span?: Time + overall_score?: double | string + top_n?: integer + end?: Time + start?: Time + exclude_interim?: boolean body?: { - config?: SnapshotLifecycleConfig - name?: string - repository?: string - retention?: SnapshotRetentionConfiguration - schedule?: CronExpression + allow_no_jobs?: boolean } } -export interface PutSnapshotLifecycleResponse extends AcknowledgedResponseBase { +export interface MlGetOverallBucketsResponse { + count: long + overall_buckets: MlOverallBucket[] } -export interface PutTransformRequest extends RequestBase { - transform_id: Name - defer_validation?: boolean - body: { - description?: string - dest?: TransformDestination - frequency?: Time - pivot?: TransformPivot - source?: TransformSource - sync?: TransformSyncContainer - } +export interface MlGetTrainedModelsRequest extends RequestBase { + model_id?: Id + allow_no_match?: boolean + decompress_definition?: boolean + exclude_generated?: boolean + from?: integer + include?: string + size?: integer + tags?: string } -export interface PutTransformResponse extends AcknowledgedResponseBase { +export interface MlGetTrainedModelsResponse { + count: integer + trained_model_configs: MlTrainedModelConfig[] } -export interface PutUserRequest extends RequestBase { - username: Name - refresh?: Refresh - body: { - username?: Name - email?: string | null - full_name?: string | null - metadata?: Record - password?: string - password_hash?: string - roles?: Array - enabled?: boolean - } +export interface MlGetTrainedModelsStatsRequest extends RequestBase { + model_id?: Id + allow_no_match?: boolean + from?: integer + size?: integer } -export interface PutUserResponse extends ResponseBase { - created: boolean +export interface MlGetTrainedModelsStatsResponse { + count: integer + trained_model_stats: MlTrainedModelStats[] } -export interface PutWatchRequest extends RequestBase { - id: Name - active?: boolean - if_primary_term?: long - if_sequence_number?: long - version?: VersionNumber - body?: { - actions?: Record - condition?: ConditionContainer - input?: InputContainer - metadata?: Record - throttle_period?: string - transform?: TransformContainer - trigger?: TriggerContainer - } +export interface MlInfoAnomalyDetectors { + categorization_analyzer: MlCategorizationAnalyzer + categorization_examples_limit: integer + model_memory_limit: ByteSize + model_snapshot_retention_days: integer + daily_model_snapshot_retention_after_days: integer } -export interface PutWatchResponse extends ResponseBase { - created: boolean - _id: Id - _primary_term: long - _seq_no: SequenceNumber - _version: VersionNumber +export interface MlInfoDatafeeds { + scroll_size: integer } -export type Quantifier = 'some' | 'all' +export interface MlInfoDefaults { + anomaly_detectors: MlInfoAnomalyDetectors + datafeeds: MlInfoDatafeeds +} -export interface QueryBase { - boost?: float - _name?: string +export interface MlInfoLimits { + max_model_memory_limit?: ByteSize + effective_max_model_memory_limit: ByteSize + total_ml_memory: ByteSize } -export interface QueryBreakdown { - advance: long - advance_count: long - build_scorer: long - build_scorer_count: long - create_weight: long - create_weight_count: long - match: long - match_count: long - shallow_advance: long - shallow_advance_count: long - next_doc: long - next_doc_count: long - score: long - score_count: long - compute_max_score: long - compute_max_score_count: long - set_min_competitive_score: long - set_min_competitive_score_count: long +export interface MlInfoNativeCode { + build_hash: string + version: VersionString } -export interface QueryCacheStats { - cache_count: long - cache_size: long - evictions: long - hit_count: long - memory_size_in_bytes: long - miss_count: long - total_count: long +export interface MlInfoRequest extends RequestBase { } -export interface QueryContainer { - bool?: BoolQuery - boosting?: BoostingQuery - common?: Record - constant_score?: ConstantScoreQuery - dis_max?: DisMaxQuery - distance_feature?: Record | DistanceFeatureQuery - exists?: ExistsQuery - function_score?: FunctionScoreQuery - fuzzy?: Record - geo_bounding_box?: NamedQuery - geo_distance?: NamedQuery - geo_polygon?: NamedQuery - geo_shape?: NamedQuery - has_child?: HasChildQuery - has_parent?: HasParentQuery - ids?: IdsQuery - intervals?: NamedQuery - is_conditionless?: boolean - is_strict?: boolean - is_verbatim?: boolean - is_writable?: boolean - match?: NamedQuery - match_all?: MatchAllQuery - match_bool_prefix?: NamedQuery - match_none?: MatchNoneQuery - match_phrase?: NamedQuery - match_phrase_prefix?: NamedQuery - more_like_this?: MoreLikeThisQuery - multi_match?: MultiMatchQuery - nested?: NestedQuery - parent_id?: ParentIdQuery - percolate?: PercolateQuery - pinned?: PinnedQuery - prefix?: NamedQuery - query_string?: QueryStringQuery - range?: NamedQuery - rank_feature?: NamedQuery - regexp?: NamedQuery - script?: ScriptQuery - script_score?: ScriptScoreQuery - shape?: NamedQuery - simple_query_string?: SimpleQueryStringQuery - span_containing?: SpanContainingQuery - field_masking_span?: SpanFieldMaskingQuery - span_first?: SpanFirstQuery - span_multi?: SpanMultiTermQuery - span_near?: SpanNearQuery - span_not?: SpanNotQuery - span_or?: SpanOrQuery - span_term?: NamedQuery - span_within?: SpanWithinQuery - template?: QueryTemplate - term?: NamedQuery - terms?: NamedQuery | Array> - terms_set?: NamedQuery - wildcard?: NamedQuery - type?: TypeQuery -} - -export interface QueryProfile { - breakdown: QueryBreakdown - description: string - time_in_nanos: long - type: string - children?: Array +export interface MlInfoResponse { + defaults: MlInfoDefaults + limits: MlInfoLimits + upgrade_mode: boolean + native_code: MlInfoNativeCode } -export interface QuerySqlRequest extends RequestBase { - format?: string - body: { - columnar?: boolean - cursor?: string - fetch_size?: integer - filter?: QueryContainer - query?: string - time_zone?: string +export interface MlOpenJobRequest extends RequestBase { + job_id: Id + body?: { + timeout?: Time } } -export interface QuerySqlResponse extends ResponseBase { - columns?: Array - cursor?: string - rows: Array +export interface MlOpenJobResponse { + opened: boolean } -export interface QueryStringQuery extends QueryBase { - allow_leading_wildcard?: boolean - analyzer?: string - analyze_wildcard?: boolean - auto_generate_synonyms_phrase_query?: boolean - default_field?: Field - default_operator?: Operator - enable_position_increments?: boolean - escape?: boolean - fields?: Fields - fuzziness?: Fuzziness - fuzzy_max_expansions?: integer - fuzzy_prefix_length?: integer - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_determinized_states?: integer - minimum_should_match?: MinimumShouldMatch - phrase_slop?: double - query?: string - quote_analyzer?: string - quote_field_suffix?: string - rewrite?: MultiTermQueryRewrite - tie_breaker?: double - time_zone?: string - type?: TextQueryType +export interface MlPostCalendarEventsRequest extends RequestBase { + calendar_id?: Id + body?: { + events: MlCalendarEvent[] + } } -export interface QueryTemplate { - source: string +export interface MlPostCalendarEventsResponse { + events: MlCalendarEvent[] } -export interface QueryUsage { - count?: integer - failed?: integer - paging?: integer - total?: integer +export interface MlPostJobDataRequest extends RequestBase { + job_id: Id + reset_end?: DateString + reset_start?: DateString + body?: { + data?: any[] + } +} + +export interface MlPostJobDataResponse { + bucket_count: long + earliest_record_timestamp: integer + empty_bucket_count: long + input_bytes: long + input_field_count: long + input_record_count: long + invalid_date_count: long + job_id: Id + last_data_time: integer + latest_record_timestamp: integer + missing_field_count: long + out_of_order_timestamp_count: long + processed_field_count: long + processed_record_count: long + sparse_bucket_count: long } -export interface QueryUserPrivileges { - term: TermUserPrivileges +export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { + source: MlDataframeAnalyticsSource + analysis: MlDataframeAnalysisContainer + model_memory_limit?: ByteSize + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields } -export interface QueryWatchesRequest extends RequestBase { - stub_a: string - stub_b: string +export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { + id?: Id body?: { - stub_c: string + config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig } } -export interface QueryWatchesResponse extends ResponseBase { - stub: integer +export interface MlPreviewDataFrameAnalyticsResponse { + feature_values: Record[] } -export interface RandomScoreFunction extends ScoreFunctionBase { - field?: Field - seed?: long | string +export interface MlPreviewDatafeedRequest extends RequestBase { + datafeed_id: Id } -export interface RangeAggregation extends BucketAggregationBase { - field?: Field - ranges?: Array - script?: Script +export interface MlPreviewDatafeedResponse { + data: TDocument[] } -export interface RangeBucketKeys { +export interface MlPutCalendarRequest extends RequestBase { + calendar_id: Id + body?: { + description?: string + } } -export type RangeBucket = RangeBucketKeys | - { [property: string]: Aggregate } - -export type RangeProperty = LongRangeProperty | IpRangeProperty | IntegerRangeProperty | FloatRangeProperty | DoubleRangeProperty | DateRangeProperty -export interface RangePropertyBase extends DocValuesPropertyBase { - boost?: double - coerce?: boolean - index?: boolean +export interface MlPutCalendarResponse { + calendar_id: Id + description: string + job_ids: Ids } -export interface RangeQuery extends QueryBase { - gt?: double | DateMath - gte?: double | DateMath - lt?: double | DateMath - lte?: double | DateMath - relation?: RangeRelation - time_zone?: string - from?: double | DateMath - to?: double | DateMath +export interface MlPutCalendarJobRequest extends RequestBase { + calendar_id: Id + job_id: Id } -export type RangeRelation = 'within' | 'contains' | 'intersects' +export interface MlPutCalendarJobResponse { + calendar_id: Id + description?: string + job_ids: Ids +} -export interface RankFeatureFunction { +export interface MlPutDataFrameAnalyticsRequest extends RequestBase { + id: Id + body?: { + source?: MlDataframeAnalyticsSource + dest: MlDataframeAnalyticsDestination + analysis: MlDataframeAnalysisContainer + description?: string + model_memory_limit?: ByteSize + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields + allow_lazy_start?: boolean + } } -export interface RankFeatureProperty extends PropertyBase { - positive_score_impact?: boolean - type: 'rank_feature' +export interface MlPutDataFrameAnalyticsResponse { + id: Id + create_time: long + version: VersionString + source: MlDataframeAnalyticsSource + description?: string + dest: MlDataframeAnalyticsDestination + model_memory_limit: ByteSize + allow_lazy_start: boolean + max_num_threads: integer + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields } -export interface RankFeatureQuery extends QueryBase { - function?: RankFeatureFunction +export interface MlPutDatafeedRequest extends RequestBase { + datafeed_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + body?: { + aggregations?: Record + chunking_config?: MlChunkingConfig + frequency?: Time + indices?: Indices + indexes?: string[] + job_id?: Id + max_empty_searches?: integer + query?: QueryDslQueryContainer + query_delay?: Time + script_fields?: Record + scroll_size?: integer + } } -export interface RankFeaturesProperty extends PropertyBase { - type: 'rank_features' +export interface MlPutDatafeedResponse { + aggregations: Record + chunking_config: MlChunkingConfig + datafeed_id: Id + frequency: Time + indices: Indices + job_id: Id + max_empty_searches: integer + query: QueryDslQueryContainer + query_delay: Time + script_fields: Record + scroll_size: integer } -export interface RareTermsAggregation extends BucketAggregationBase { - exclude?: string | Array - field?: Field - include?: string | Array | TermsInclude - max_doc_count?: long - missing?: Missing - precision?: double - value_type?: string +export interface MlPutFilterRequest extends RequestBase { + filter_id: Id + body?: { + description?: string + items?: string[] + } } -export interface RareTermsBucketKeys { +export interface MlPutFilterResponse { + description: string + filter_id: Id + items: string[] } -export type RareTermsBucket = RareTermsBucketKeys | - { [property: string]: Aggregate } -export interface RateAggregation extends FormatMetricAggregationBase { - unit?: DateInterval - mode?: RateMode +export interface MlPutJobRequest extends RequestBase { + job_id: Id + body?: { + allow_lazy_open?: boolean + analysis_config?: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + data_description?: MlDataDescription + description?: string + model_plot?: MlModelPlotConfig + model_snapshot_retention_days?: long + results_index_name?: IndexName + } } -export type RateMode = 'sum' | 'value_count' +export interface MlPutJobResponse { + allow_lazy_open: boolean + analysis_config: MlAnalysisConfig + analysis_limits: MlAnalysisLimits + background_persist_interval: Time + create_time: DateString + data_description: MlDataDescription + description: string + job_id: Id + job_type: string + model_plot: MlModelPlotConfig + model_snapshot_id: Id + model_snapshot_retention_days: long + renormalization_window_days: long + results_index_name: string + results_retention_days: long +} -export interface RealmCacheUsage { - size: long +export interface MlPutTrainedModelRequest extends RequestBase { + stub: string + body?: { + stub?: string + } } -export interface RealmInfo { - name: string - type: string +export interface MlPutTrainedModelResponse { + stub: boolean } -export interface RealmUsage extends XPackUsage { - name?: Array - order?: Array - size?: Array - cache?: Array - has_authorization_realms?: Array - has_default_username_pattern?: Array - has_truststore?: Array - is_authentication_delegated?: Array +export interface MlPutTrainedModelAliasRequest extends RequestBase { + model_alias: string + model_id: Id + reassign?: boolean } -export interface RecoveryBytes { - percent: Percentage - recovered?: ByteSize - recovered_in_bytes: ByteSize - reused?: ByteSize - reused_in_bytes: ByteSize - total?: ByteSize - total_in_bytes: ByteSize +export interface MlPutTrainedModelAliasResponse extends AcknowledgedResponseBase { } -export interface RecoveryFileDetails { - length: long - name: string - recovered: long +export interface MlRevertModelSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id + body?: { + delete_intervening_results?: boolean + } } -export interface RecoveryFiles { - details?: Array - percent: Percentage - recovered: long - reused: long - total: long +export interface MlRevertModelSnapshotResponse { + model: MlModelSnapshot } -export interface RecoveryIndexStatus { - bytes?: RecoveryBytes - files: RecoveryFiles - size: RecoveryBytes - source_throttle_time?: Time - source_throttle_time_in_millis: EpochMillis - target_throttle_time?: Time - target_throttle_time_in_millis: EpochMillis - total_time_in_millis: EpochMillis - total_time?: Time +export interface MlSetUpgradeModeRequest extends RequestBase { + enabled?: boolean + timeout?: Time } -export interface RecoveryOrigin { - hostname?: string - host?: string - transport_address?: string - id?: Id - ip?: string - name?: Name - bootstrap_new_history_uuid?: boolean - repository?: Name - snapshot?: Name - version?: VersionString - restoreUUID?: Uuid - index?: IndexName +export interface MlSetUpgradeModeResponse extends AcknowledgedResponseBase { } -export interface RecoveryStartStatus { - check_index_time: long - total_time_in_millis: string +export interface MlStartDataFrameAnalyticsRequest extends RequestBase { + id: Id + timeout?: Time } -export interface RecoveryStats { - current_as_source: long - current_as_target: long - throttle_time?: string - throttle_time_in_millis: long +export interface MlStartDataFrameAnalyticsResponse extends AcknowledgedResponseBase { + node: NodeId } -export interface RecoveryStatus { - shards: Array +export interface MlStartDatafeedRequest extends RequestBase { + datafeed_id: Id + start?: Time + body?: { + end?: Time + start?: Time + timeout?: Time + } } -export interface RecoveryStatusRequest extends RequestBase { - index?: Indices - active_only?: boolean - detailed?: boolean +export interface MlStartDatafeedResponse { + node: NodeIds + started: boolean } -export interface RecoveryStatusResponse extends DictionaryResponseBase { +export interface MlStopDataFrameAnalyticsRequest extends RequestBase { + id: Id + allow_no_match?: boolean + force?: boolean + timeout?: Time } -export interface RecoveryTranslogStatus { - percent: Percentage - recovered: long - total: long - total_on_start: long - total_time?: string - total_time_in_millis: EpochMillis +export interface MlStopDataFrameAnalyticsResponse { + stopped: boolean } -export interface RecoveryVerifyIndex { - check_index_time?: Time - check_index_time_in_millis: EpochMillis - total_time?: Time - total_time_in_millis: EpochMillis +export interface MlStopDatafeedRequest extends RequestBase { + datafeed_id: Ids + allow_no_match?: boolean + force?: boolean + body?: { + force?: boolean + timeout?: Time + } } -export type Refresh = boolean | RefreshOptions - -export type RefreshOptions = 'wait_for' - -export interface RefreshRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean +export interface MlStopDatafeedResponse { + stopped: boolean } -export interface RefreshResponse extends ShardsOperationResponseBase { +export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { + id: Id + body?: { + description?: string + model_memory_limit?: ByteSize + max_num_threads?: integer + allow_lazy_start?: boolean + } } -export interface RefreshStats { - external_total: long - external_total_time_in_millis: long - listeners: long - total: long - total_time?: string - total_time_in_millis: long +export interface MlUpdateDataFrameAnalyticsResponse { + id: Id + create_time: long + version: VersionString + source: MlDataframeAnalyticsSource + description?: string + dest: MlDataframeAnalyticsDestination + model_memory_limit: ByteSize + allow_lazy_start: boolean + max_num_threads: integer + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields } -export interface RegexpQuery extends QueryBase { - flags?: string - max_determinized_states?: integer - value?: string +export interface MlUpdateFilterRequest extends RequestBase { + filter_id: Id + body?: { + add_items?: string[] + description?: string + remove_items?: string[] + } } -export interface RegressionInferenceOptions { - results_field: Field - num_top_feature_importance_values?: integer +export interface MlUpdateFilterResponse { + description: string + filter_id: Id + items: string[] } -export interface ReindexDestination { - index: IndexName - op_type?: OpType - pipeline?: string - routing?: ReindexRouting - version_type?: VersionType +export interface MlUpdateJobRequest extends RequestBase { + job_id: Id + body?: { + allow_lazy_open?: boolean + analysis_limits?: MlAnalysisMemoryLimit + background_persist_interval?: Time + custom_settings?: Record + categorization_filters?: string[] + description?: string + model_plot_config?: MlModelPlotConfigEnabled + daily_model_snapshot_retention_after_days?: long + model_snapshot_retention_days?: long + renormalization_window_days?: long + results_retention_days?: long + groups?: string[] + detectors?: MlDetector[] + per_partition_categorization?: MlPerPartitionCategorization + } } -export interface ReindexNode { - attributes: Record - host: string - ip: string - name: Name - roles: Array - tasks: Record - transport_address: string +export interface MlUpdateJobResponse { + stub: boolean } -export interface ReindexRequest extends RequestBase { - refresh?: boolean - requests_per_second?: long - scroll?: Time - slices?: long - timeout?: Time - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - require_alias?: boolean - body: { - conflicts?: Conflicts - dest?: ReindexDestination - max_docs?: long - script?: Script - size?: long - source?: ReindexSource +export interface MlUpdateModelSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id + body?: { + description?: string + retain?: boolean } } -export interface ReindexResponse extends ResponseBase { - batches?: long - created?: long - deleted?: long - failures?: Array - noops?: long - retries?: Retries - requests_per_second?: long - slice_id?: integer - task?: TaskId - throttled_millis?: EpochMillis - throttled_until_millis?: EpochMillis - timed_out?: boolean - took?: Time - total?: long - updated?: long - version_conflicts?: long +export interface MlUpdateModelSnapshotResponse extends AcknowledgedResponseBase { + model: MlModelSnapshot } -export interface ReindexRethrottleRequest extends RequestBase { - task_id: Id - requests_per_second?: long +export interface MlUpgradeJobSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id + wait_for_completion?: boolean + timeout?: Time } -export interface ReindexRethrottleResponse extends ResponseBase { - nodes: Record +export interface MlUpgradeJobSnapshotResponse { + node: NodeId + completed: boolean } -export interface ReindexRouting { +export interface MlValidateDetectorRequest extends RequestBase { + body?: MlDetector } -export interface ReindexSource { - index: Indices - query?: QueryContainer - remote?: RemoteSource - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: Fields +export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { } -export interface ReindexStatus { - batches: long - created: long - deleted: long - noops: long - requests_per_second: float - retries: Retries - throttled_millis: long - throttled_until_millis: long - total: long - updated: long - version_conflicts: long +export interface MlValidateJobRequest extends RequestBase { + body?: { + job_id?: Id + analysis_config?: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + data_description?: MlDataDescription + description?: string + model_plot?: MlModelPlotConfig + model_snapshot_retention_days?: long + results_index_name?: IndexName + } } -export interface ReindexTask { - action: string - cancellable: boolean - description: string - id: long - node: Name - running_time_in_nanos: long - start_time_in_millis: long - status: ReindexStatus - type: string - headers: Record +export interface MlValidateJobResponse extends AcknowledgedResponseBase { } -export type RelationName = string +export interface MonitoringBulkRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } +} -export interface ReloadDetails { - index: string - reloaded_analyzers: Array - reloaded_node_ids: Array +export interface MonitoringBulkResponse { + stub: integer } -export interface ReloadSearchAnalyzersRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean +export interface NodesAdaptiveSelection { + avg_queue_size: long + avg_response_time: long + avg_response_time_ns: long + avg_service_time: string + avg_service_time_ns: long + outgoing_searches: long + rank: string } -export interface ReloadSearchAnalyzersResponse extends ResponseBase { - reload_details: Array - _shards: ShardStatistics +export interface NodesBreaker { + estimated_size: string + estimated_size_in_bytes: long + limit_size: string + limit_size_in_bytes: long + overhead: float + tripped: float } -export interface ReloadSecureSettingsRequest extends RequestBase { - node_id?: NodeIds - timeout?: Time - body?: { - secure_settings_password?: string - } +export interface NodesCpu { + percent: integer + sys?: string + sys_in_millis?: long + total?: string + total_in_millis?: long + user?: string + user_in_millis?: long + load_average?: Record } -export interface ReloadSecureSettingsResponse extends NodesResponseBase { - cluster_name: Name - nodes: Record +export interface NodesDataPathStats { + available: string + available_in_bytes: long + disk_queue: string + disk_reads: long + disk_read_size: string + disk_read_size_in_bytes: long + disk_writes: long + disk_write_size: string + disk_write_size_in_bytes: long + free: string + free_in_bytes: long + mount: string + path: string + total: string + total_in_bytes: long + type: string } -export interface RemoteInfo { - connected: boolean - initial_connect_timeout: Time - max_connections_per_cluster: integer - num_nodes_connected: long - seeds: Array - skip_unavailable: boolean +export interface NodesExtendedMemoryStats extends NodesMemoryStats { + free_percent: integer + used_percent: integer + total_in_bytes: integer + free_in_bytes: integer + used_in_bytes: integer } -export interface RemoteInfoRequest extends RequestBase { +export interface NodesFileSystem { + data: NodesDataPathStats[] + timestamp: long + total: NodesFileSystemTotal } -export interface RemoteInfoResponse extends DictionaryResponseBase { +export interface NodesFileSystemTotal { + available: string + available_in_bytes: long + free: string + free_in_bytes: long + total: string + total_in_bytes: long } -export interface RemoteSource { - connect_timeout: Time - host: Uri - password: string - socket_timeout: Time - username: string +export interface NodesGarbageCollector { + collectors: Record } -export interface RemoveDuplicatesTokenFilter extends TokenFilterBase { +export interface NodesGarbageCollectorTotal { + collection_count: long + collection_time: string + collection_time_in_millis: long } -export interface RemovePolicyRequest extends RequestBase { - index: IndexName +export interface NodesHttp { + current_open: integer + total_opened: long } -export interface RemovePolicyResponse extends ResponseBase { - failed_indexes: Array - has_failures: boolean +export interface NodesIngest { + pipelines: Record + total: NodesIngestTotal } -export interface RemoveProcessor extends ProcessorBase { - field: Fields - ignore_missing?: boolean +export interface NodesIngestTotal { + count: long + current: long + failed: long + processors: NodesKeyedProcessor[] + time_in_millis: long } -export interface RenameProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - target_field: Field +export interface NodesJvm { + buffer_pools: Record + classes: NodesJvmClasses + gc: NodesGarbageCollector + mem: NodesMemoryStats + threads: NodesJvmThreads + timestamp: long + uptime: string + uptime_in_millis: long } -export interface RenderSearchTemplateRequest extends RequestBase { - body?: { - file?: string - params?: Record - source?: string - } +export interface NodesJvmClasses { + current_loaded_count: long + total_loaded_count: long + total_unloaded_count: long } -export interface RenderSearchTemplateResponse extends ResponseBase { - template_output: Record +export interface NodesJvmThreads { + count: long + peak_count: long } -export interface RequestBase extends CommonQueryParameters { +export interface NodesKeyedProcessor { + statistics: NodesProcess + type: string } -export interface RequestCacheStats { - evictions: long - hit_count: long - memory_size?: string - memory_size_in_bytes: long - miss_count: long +export interface NodesMemoryStats { + resident?: string + resident_in_bytes?: long + share?: string + share_in_bytes?: long + total_virtual?: string + total_virtual_in_bytes?: long + total_in_bytes: long + free_in_bytes: long + used_in_bytes: long } -export interface Rescore { - query: RescoreQuery - window_size?: integer +export interface NodesNodeBufferPool { + count: long + total_capacity: string + total_capacity_in_bytes: long + used: string + used_in_bytes: long } -export interface RescoreQuery { - rescore_query: QueryContainer - query_weight?: double - rescore_query_weight?: double - score_mode?: ScoreMode +export interface NodesNodesResponseBase { + _nodes: NodeStatistics } -export interface ReservedSize { - node_id: string - path: string - total: long - shards: Array +export interface NodesOperatingSystem { + cpu: NodesCpu + mem: NodesExtendedMemoryStats + swap: NodesMemoryStats + timestamp: long } -export interface ResolveIndexAliasItem { - name: Name - indices: Indices +export interface NodesProcess { + cpu: NodesCpu + mem: NodesMemoryStats + open_file_descriptors: integer + timestamp: long } -export interface ResolveIndexDataStreamsItem { - name: DataStreamName - timestamp_field: Field - backing_indices: Indices +export interface NodesScripting { + cache_evictions: long + compilations: long } -export interface ResolveIndexItem { +export interface NodesStats { + adaptive_selection: Record + breakers: Record + fs: NodesFileSystem + host: Host + http: NodesHttp + indices: IndicesStatsIndexStats + ingest: NodesIngest + ip: Ip | Ip[] + jvm: NodesJvm name: Name - aliases?: Array - attributes: Array - data_stream?: DataStreamName + os: NodesOperatingSystem + process: NodesProcess + roles: NodeRoles + script: NodesScripting + thread_pool: Record + timestamp: long + transport: NodesTransport + transport_address: TransportAddress + attributes: Record } -export interface ResolveIndexRequest extends RequestBase { - name: Names - expand_wildcards?: ExpandWildcards +export interface NodesThreadCount { + active: long + completed: long + largest: long + queue: long + rejected: long + threads: long } -export interface ResolveIndexResponse extends ResponseBase { - indices: Array - aliases: Array - data_streams: Array +export interface NodesTransport { + rx_count: long + rx_size: string + rx_size_in_bytes: long + server_open: integer + tx_count: long + tx_size: string + tx_size_in_bytes: long } -export type ResourcePrivileges = Record - -export interface ResponseBase { +export interface NodesNodesHotThreadsHotThread { + hosts: Host[] + node_id: Id + node_name: Name + threads: string[] } -export type ResponseContentType = 'json' | 'yaml' | 'text' +export interface NodesNodesHotThreadsRequest extends RequestBase { + node_id?: NodeIds + ignore_idle_threads?: boolean + interval?: Time + snapshots?: long + threads?: long + thread_type?: ThreadType + timeout?: Time +} -export interface RestoreRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Time - wait_for_completion?: boolean - body?: { - ignore_index_settings?: Array - ignore_unavailable?: boolean - include_aliases?: boolean - include_global_state?: boolean - index_settings?: UpdateIndexSettingsRequest - indices?: Indices - partial?: boolean - rename_pattern?: string - rename_replacement?: string - } +export interface NodesNodesHotThreadsResponse { + hot_threads: NodesNodesHotThreadsHotThread[] } -export interface RestoreResponse extends ResponseBase { - snapshot: SnapshotRestore +export interface NodesNodesInfoNodeInfo { + attributes: Record + build_flavor: string + build_hash: string + build_type: string + host: Host + http?: NodesNodesInfoNodeInfoHttp + ip: Ip + jvm?: NodesNodesInfoNodeJvmInfo + name: Name + network?: NodesNodesInfoNodeInfoNetwork + os?: NodesNodesInfoNodeOperatingSystemInfo + plugins?: PluginStats[] + process?: NodesNodesInfoNodeProcessInfo + roles: NodeRoles + settings?: NodesNodesInfoNodeInfoSettings + thread_pool?: Record + total_indexing_buffer?: long + total_indexing_buffer_in_bytes?: ByteSize + transport?: NodesNodesInfoNodeInfoTransport + transport_address: TransportAddress + version: VersionString + modules?: PluginStats[] + ingest?: NodesNodesInfoNodeInfoIngest + aggregations?: Record } -export type Result = 'Error' | 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' +export interface NodesNodesInfoNodeInfoAction { + destructive_requires_name: string +} -export interface ResultBucket { - anomaly_score: double - bucket_influencers: Array - bucket_span: Time - event_count: long - initial_anomaly_score: double - is_interim: boolean - job_id: Id - partition_scores?: Array - processing_time_ms: double - result_type: string - timestamp: Time +export interface NodesNodesInfoNodeInfoAggregation { + types: string[] } -export interface ResumeAutoFollowPatternRequest extends RequestBase { - name: Name +export interface NodesNodesInfoNodeInfoBootstrap { + memory_lock: string } -export interface ResumeAutoFollowPatternResponse extends AcknowledgedResponseBase { +export interface NodesNodesInfoNodeInfoClient { + type: string } -export interface ResumeFollowIndexRequest extends RequestBase { - index: IndexName - body?: { - max_outstanding_read_requests?: long - max_outstanding_write_requests?: long - max_read_request_operation_count?: long - max_read_request_size?: string - max_retry_delay?: Time - max_write_buffer_count?: long - max_write_buffer_size?: string - max_write_request_operation_count?: long - max_write_request_size?: string - read_poll_timeout?: Time - } +export interface NodesNodesInfoNodeInfoDiscover { + seed_hosts: string } -export interface ResumeFollowIndexResponse extends AcknowledgedResponseBase { +export interface NodesNodesInfoNodeInfoHttp { + bound_address: string[] + max_content_length?: ByteSize + max_content_length_in_bytes: long + publish_address: string } -export interface Retries { - bulk: long - search: long +export interface NodesNodesInfoNodeInfoIngest { + processors: NodesNodesInfoNodeInfoIngestProcessor[] } -export interface RetryIlmRequest extends RequestBase { - index: IndexName +export interface NodesNodesInfoNodeInfoIngestProcessor { + type: string } -export interface RetryIlmResponse extends AcknowledgedResponseBase { +export interface NodesNodesInfoNodeInfoJvmMemory { + direct_max?: ByteSize + direct_max_in_bytes: long + heap_init?: ByteSize + heap_init_in_bytes: long + heap_max?: ByteSize + heap_max_in_bytes: long + non_heap_init?: ByteSize + non_heap_init_in_bytes: long + non_heap_max?: ByteSize + non_heap_max_in_bytes: long } -export interface ReverseNestedAggregation extends BucketAggregationBase { - path?: Field +export interface NodesNodesInfoNodeInfoMemory { + total: string + total_in_bytes: long } -export interface ReverseTokenFilter extends TokenFilterBase { +export interface NodesNodesInfoNodeInfoNetwork { + primary_interface: NodesNodesInfoNodeInfoNetworkInterface + refresh_interval: integer } -export interface RevertModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - body?: { - delete_intervening_results?: boolean - } +export interface NodesNodesInfoNodeInfoNetworkInterface { + address: string + mac_address: string + name: Name } -export interface RevertModelSnapshotResponse extends ResponseBase { - model: ModelSnapshot +export interface NodesNodesInfoNodeInfoOSCPU { + cache_size: string + cache_size_in_bytes: integer + cores_per_socket: integer + mhz: integer + model: string + total_cores: integer + total_sockets: integer + vendor: string } -export interface RoleMappingRuleBase { +export interface NodesNodesInfoNodeInfoPath { + logs: string + home: string + repo: string[] + data?: string[] } -export interface RoleMappingUsage { - enabled: integer - size: integer +export interface NodesNodesInfoNodeInfoRepositories { + url: NodesNodesInfoNodeInfoRepositoriesUrl } -export type RoleTemplate = InlineRoleTemplate | StoredRoleTemplate | InvalidRoleTemplate +export interface NodesNodesInfoNodeInfoRepositoriesUrl { + allowed_urls: string +} -export type RoleTemplateFormat = 'string' | 'json' +export interface NodesNodesInfoNodeInfoScript { + allowed_types: string + disable_max_compilations_rate: string +} -export interface RolloverConditions { - max_age?: Time - max_docs?: long - max_size?: string - max_primary_shard_size?: ByteSize +export interface NodesNodesInfoNodeInfoSearch { + remote: NodesNodesInfoNodeInfoSearchRemote } -export interface RolloverIndexRequest extends RequestBase { - alias: IndexAlias - new_index?: IndexName - dry_run?: boolean - include_type_name?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards - body?: { - aliases?: Record - conditions?: RolloverConditions - mappings?: Record | TypeMapping - settings?: Record - } +export interface NodesNodesInfoNodeInfoSearchRemote { + connect: string } -export interface RolloverIndexResponse extends AcknowledgedResponseBase { - conditions: Record - dry_run: boolean - new_index: string - old_index: string - rolled_over: boolean - shards_acknowledged: boolean +export interface NodesNodesInfoNodeInfoSettings { + cluster: NodesNodesInfoNodeInfoSettingsCluster + node: NodesNodesInfoNodeInfoSettingsNode + path: NodesNodesInfoNodeInfoPath + repositories?: NodesNodesInfoNodeInfoRepositories + discovery?: NodesNodesInfoNodeInfoDiscover + action?: NodesNodesInfoNodeInfoAction + client: NodesNodesInfoNodeInfoClient + http: NodesNodesInfoNodeInfoSettingsHttp + bootstrap?: NodesNodesInfoNodeInfoBootstrap + transport: NodesNodesInfoNodeInfoSettingsTransport + network?: NodesNodesInfoNodeInfoSettingsNetwork + xpack?: NodesNodesInfoNodeInfoXpack + script?: NodesNodesInfoNodeInfoScript + search?: NodesNodesInfoNodeInfoSearch } -export interface RollupCapabilities { - rollup_jobs: Array +export interface NodesNodesInfoNodeInfoSettingsCluster { + name: Name + routing?: IndicesIndexRouting + election: NodesNodesInfoNodeInfoSettingsClusterElection + initial_master_nodes?: string } -export interface RollupCapabilitiesJob { - fields: Record> - index_pattern: string - job_id: string - rollup_index: string +export interface NodesNodesInfoNodeInfoSettingsClusterElection { + strategy: Name } -export interface RollupFieldMetric { - field: Field - metrics: Array +export interface NodesNodesInfoNodeInfoSettingsHttp { + type: string | NodesNodesInfoNodeInfoSettingsHttpType + 'type.default'?: string + compression?: boolean | string + port?: integer | string } -export interface RollupGroupings { - date_histogram?: DateHistogramRollupGrouping - histogram?: HistogramRollupGrouping - terms?: TermsRollupGrouping +export interface NodesNodesInfoNodeInfoSettingsHttpType { + default: string } -export interface RollupIndexCapabilities { - rollup_jobs: Array +export interface NodesNodesInfoNodeInfoSettingsNetwork { + host: Host } -export interface RollupIndexCapabilitiesJob { - fields: Record> - index_pattern: string - job_id: Id - rollup_index: IndexName +export interface NodesNodesInfoNodeInfoSettingsNode { + name: Name + attr: Record + max_local_storage_nodes?: string } -export interface RollupIndexCapabilitiesJobField { - agg: string - time_zone?: string - calendar_interval?: Time +export interface NodesNodesInfoNodeInfoSettingsTransport { + type: string | NodesNodesInfoNodeInfoSettingsTransportType + 'type.default'?: string + features?: NodesNodesInfoNodeInfoSettingsTransportFeatures } -export interface RollupJobConfiguration { - cron: string - groups: RollupGroupings - id: Id - index_pattern: string - metrics: Array - page_size: long - rollup_index: IndexName - timeout: Time +export interface NodesNodesInfoNodeInfoSettingsTransportFeatures { + 'x-pack': string } -export interface RollupJobInformation { - config: RollupJobConfiguration - stats: RollupJobStats - status: RollupJobStatus +export interface NodesNodesInfoNodeInfoSettingsTransportType { + default: string } -export interface RollupJobStats { - documents_processed: long - index_failures: long - index_time_in_ms: long - index_total: long - pages_processed: long - rollups_indexed: long - search_failures: long - search_time_in_ms: long - search_total: long - trigger_count: long - processing_time_in_ms: long - processing_total: long +export interface NodesNodesInfoNodeInfoTransport { + bound_address: string[] + publish_address: string + profiles: Record } -export interface RollupJobStatus { - current_position?: Record - job_state: IndexingJobState - upgraded_doc_id?: boolean +export interface NodesNodesInfoNodeInfoXpack { + license?: NodesNodesInfoNodeInfoXpackLicense + security: NodesNodesInfoNodeInfoXpackSecurity + notification?: Record } -export interface RollupJobTaskFailure { - task_id: TaskId - node_id: Id - status: string - reason: RollupJobTaskFailureReason +export interface NodesNodesInfoNodeInfoXpackLicense { + self_generated: NodesNodesInfoNodeInfoXpackLicenseType } -export interface RollupJobTaskFailureReason { +export interface NodesNodesInfoNodeInfoXpackLicenseType { type: string - reason: string } -export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' - -export interface RollupRequest extends RequestBase { - stubb: integer - stuba: integer - body: { - stub: integer - } +export interface NodesNodesInfoNodeInfoXpackSecurity { + http: NodesNodesInfoNodeInfoXpackSecuritySsl + enabled: string + transport: NodesNodesInfoNodeInfoXpackSecuritySsl + authc?: NodesNodesInfoNodeInfoXpackSecurityAuthc } -export interface RollupResponse extends ResponseBase { - stub: integer +export interface NodesNodesInfoNodeInfoXpackSecurityAuthc { + realms: NodesNodesInfoNodeInfoXpackSecurityAuthcRealms + token: NodesNodesInfoNodeInfoXpackSecurityAuthcToken } -export interface RollupSearchRequest extends RequestBase { - index: Indices - type?: Type - rest_total_hits_as_int?: boolean - typed_keys?: boolean - body: { - aggs?: Record - query?: QueryContainer - size?: integer - } +export interface NodesNodesInfoNodeInfoXpackSecurityAuthcRealms { + file?: Record + native?: Record + pki?: Record } -export interface RollupSearchResponse extends ResponseBase { +export interface NodesNodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { + enabled?: string + order: string } -export interface RootNodeInfoRequest extends RequestBase { +export interface NodesNodesInfoNodeInfoXpackSecurityAuthcToken { + enabled: string } -export interface RootNodeInfoResponse extends ResponseBase { - cluster_name: string - cluster_uuid: string - name: string - tagline: string - version: ElasticsearchVersionInfo +export interface NodesNodesInfoNodeInfoXpackSecuritySsl { + ssl: Record } -export type Routing = string | number - -export interface RoutingField { - required: boolean +export interface NodesNodesInfoNodeJvmInfo { + gc_collectors: string[] + mem: NodesNodesInfoNodeInfoJvmMemory + memory_pools: string[] + pid: integer + start_time_in_millis: long + version: VersionString + vm_name: Name + vm_vendor: string + vm_version: VersionString + bundled_jdk: boolean + using_bundled_jdk: boolean + using_compressed_ordinary_object_pointers?: boolean | string + input_arguments: string[] } -export type RuleAction = 'skip_result' | 'skip_model_update' - -export interface RuleCondition { - applies_to: AppliesTo - operator: ConditionOperator - value: double +export interface NodesNodesInfoNodeOperatingSystemInfo { + arch: string + available_processors: integer + allocated_processors?: integer + name: Name + pretty_name: Name + refresh_interval_in_millis: integer + version: VersionString + cpu?: NodesNodesInfoNodeInfoOSCPU + mem?: NodesNodesInfoNodeInfoMemory + swap?: NodesNodesInfoNodeInfoMemory } -export type RuleFilterType = 'include' | 'exclude' - -export interface RuntimeField { - format?: string - script?: Script - type: RuntimeFieldType +export interface NodesNodesInfoNodeProcessInfo { + id: long + mlockall: boolean + refresh_interval_in_millis: long } -export type RuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' +export interface NodesNodesInfoNodeThreadPoolInfo { + core?: integer + keep_alive?: string + max?: integer + queue_size: integer + size?: integer + type: string +} -export interface RuntimeFieldTypesStats { - name: Name - count: integer - index_count: integer - scriptless_count: integer - shadowed_count: integer - lang: Array - lines_max: integer - lines_total: integer - chars_max: integer - chars_total: integer - source_max: integer - source_total: integer - doc_max: integer - doc_total: integer +export interface NodesNodesInfoRequest extends RequestBase { + node_id?: NodeIds + metric?: Metrics + flat_settings?: boolean + master_timeout?: Time + timeout?: Time } -export type RuntimeFields = Record +export interface NodesNodesInfoResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} -export interface RuntimeFieldsTypeUsage { - chars_max: long - chars_total: long - count: long - doc_max: long - doc_total: long - index_count: long - lang: Array - lines_max: long - lines_total: long - name: Field - scriptless_count: long - shadowed_count: long - source_max: long - source_total: long +export interface NodesNodesStatsRequest extends RequestBase { + node_id?: NodeIds + metric?: Metrics + index_metric?: Metrics + completion_fields?: Fields + fielddata_fields?: Fields + fields?: Fields + groups?: boolean + include_segment_file_sizes?: boolean + level?: Level + master_timeout?: Time + timeout?: Time + types?: string[] + include_unloaded_segments?: boolean } -export interface RuntimeFieldsUsage extends XPackUsage { - field_types: Array +export interface NodesNodesStatsResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record } -export interface SampleDiversity { - field: Field - max_docs_per_value: integer +export interface NodesNodesUsageNodeUsage { + rest_actions: Record + since: EpochMillis + timestamp: EpochMillis + aggregations: Record } -export interface SamplerAggregation extends BucketAggregationBase { - shard_size?: integer +export interface NodesNodesUsageRequest extends RequestBase { + node_id?: NodeIds + metric?: Metrics + timeout?: Time } -export type SamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' - -export interface ScheduleBase { +export interface NodesNodesUsageResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record } -export interface ScheduleContainer { - cron?: CronExpression - daily?: DailySchedule - hourly?: HourlySchedule - interval?: Interval - monthly?: Array - weekly?: Array - yearly?: Array +export interface NodesReloadSecureSettingsNodeReloadException { + name: Name + reload_exception?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy } -export interface ScheduleTriggerEvent { - scheduled_time: DateString | string - triggered_time?: DateString | string +export interface NodesReloadSecureSettingsNodeReloadExceptionCausedBy { + type: string + reason: string + caused_by?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy } -export interface ScheduledEvent { - calendar_id: Id - description: string - end_time: EpochMillis - event_id: Id - start_time: EpochMillis +export interface NodesReloadSecureSettingsRequest extends RequestBase { + node_id?: NodeIds + timeout?: Time + body?: { + secure_settings_password?: Password + } } -export interface ScoreFunctionBase { - filter?: QueryContainer - weight?: double +export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record } -export type ScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' - -export interface ScoreSort { - mode?: SortMode - order?: SortOrder +export interface RollupDateHistogramGrouping { + delay?: Time + field: Field + format?: string + interval?: Time + calendar_interval?: Time + fixed_interval?: Time + time_zone?: string } -export type Script = InlineScript | IndexedScript | string - -export interface ScriptBase { - lang?: string - params?: Record +export interface RollupFieldMetric { + field: Field + metrics: RollupMetric[] } -export interface ScriptCondition { - lang: string - params?: Record - source: string +export interface RollupGroupings { + date_histogram?: RollupDateHistogramGrouping + histogram?: RollupHistogramGrouping + terms?: RollupTermsGrouping } -export interface ScriptField { - script: Script +export interface RollupHistogramGrouping { + fields: Fields + interval: long } -export interface ScriptProcessor extends ProcessorBase { - id?: Id - lang?: string - params?: Record - source: string -} +export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' -export interface ScriptQuery extends QueryBase { - script?: Script +export interface RollupTermsGrouping { + fields: Fields } -export interface ScriptScoreFunction extends ScoreFunctionBase { - script: Script +export interface RollupCreateRollupJobRequest extends RequestBase { + id: Id + body?: { + cron?: string + groups?: RollupGroupings + index_pattern?: string + metrics?: RollupFieldMetric[] + page_size?: long + rollup_index?: IndexName + } } -export interface ScriptScoreQuery extends QueryBase { - query?: QueryContainer - script?: Script +export interface RollupCreateRollupJobResponse extends AcknowledgedResponseBase { } -export interface ScriptSort { - order?: SortOrder - script: Script - type?: string +export interface RollupDeleteRollupJobRequest extends RequestBase { + id: Id } -export interface ScriptStats { - cache_evictions: long - compilations: long +export interface RollupDeleteRollupJobResponse extends AcknowledgedResponseBase { + task_failures?: RollupDeleteRollupJobTaskFailure[] } -export interface ScriptTransform { - lang: string - params: Record +export interface RollupDeleteRollupJobTaskFailure { + task_id: TaskId + node_id: Id + status: string + reason: RollupDeleteRollupJobTaskFailureReason } -export interface ScriptedHeuristic { - script: Script +export interface RollupDeleteRollupJobTaskFailureReason { + type: string + reason: string } -export interface ScriptedMetricAggregate extends AggregateBase { - value: any +export interface RollupGetRollupCapabilitiesRequest extends RequestBase { + id?: Id } -export interface ScriptedMetricAggregation extends MetricAggregationBase { - combine_script?: Script - init_script?: Script - map_script?: Script - params?: Record - reduce_script?: Script +export interface RollupGetRollupCapabilitiesResponse extends DictionaryResponseBase { } -export type ScrollId = string - -export interface ScrollRequest extends RequestBase { - scroll_id?: Id - scroll?: Time - rest_total_hits_as_int?: boolean - total_hits_as_integer?: boolean - body?: { - scroll?: Time - scroll_id?: ScrollId - rest_total_hits_as_int?: boolean - } +export interface RollupGetRollupCapabilitiesRollupCapabilities { + rollup_jobs: RollupGetRollupCapabilitiesRollupCapabilitySummary[] } -export interface ScrollResponse extends SearchResponse { - failed_shards?: Array +export interface RollupGetRollupCapabilitiesRollupCapabilitySummary { + fields: Record> + index_pattern: string + job_id: string + rollup_index: string } -export interface ScrollResponseErrorReason { - type: string - reason: string +export interface RollupGetRollupIndexCapabilitiesIndexCapabilities { + rollup_jobs: RollupGetRollupIndexCapabilitiesRollupJobSummary[] } -export interface ScrollResponseFailedShard { - shard: integer - reason: ScrollResponseErrorReason +export interface RollupGetRollupIndexCapabilitiesRequest extends RequestBase { + index: Id } -export interface SearchAsYouTypeProperty extends CorePropertyBase { - analyzer?: string - index?: boolean - index_options?: IndexOptions - max_shingle_size?: integer - norms?: boolean - search_analyzer?: string - search_quote_analyzer?: string - term_vector?: TermVectorOption - type: 'search_as_you_type' +export interface RollupGetRollupIndexCapabilitiesResponse extends DictionaryResponseBase { } -export interface SearchInput { - extract: Array - request: SearchInputRequestDefinition - timeout: Time +export interface RollupGetRollupIndexCapabilitiesRollupJobSummary { + fields: Record + index_pattern: string + job_id: Id + rollup_index: IndexName } -export interface SearchInputRequestDefinition { - body?: SearchRequest - indices?: Array - indices_options?: IndicesOptions - search_type?: SearchType - template?: SearchTemplateRequest +export interface RollupGetRollupIndexCapabilitiesRollupJobSummaryField { + agg: string + time_zone?: string + calendar_interval?: Time } -export interface SearchNode { - name: string - transport_address: string -} +export type RollupGetRollupJobIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' -export interface SearchProfile { - collector: Array - query: Array - rewrite_time: long +export interface RollupGetRollupJobRequest extends RequestBase { + id?: Id } -export interface SearchRequest extends RequestBase { - index?: Indices - type?: Types - allow_no_indices?: boolean - allow_partial_search_results?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - ccs_minimize_roundtrips?: boolean - default_operator?: DefaultOperator - df?: string - docvalue_fields?: Fields - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - preference?: string - pre_filter_shard_size?: long - query_on_query_string?: string - request_cache?: boolean - routing?: Routing - scroll?: Time - search_type?: SearchType - sequence_number_primary_term?: boolean - stats?: Array - stored_fields?: Fields - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - total_hits_as_integer?: boolean - track_total_hits?: boolean | integer - typed_keys?: boolean - rest_total_hits_as_int?: boolean - _source_excludes?: Fields - _source_includes?: Fields - seq_no_primary_term?: boolean - q?: string - size?: integer - from?: integer - sort?: string | Array - body?: { - aggs?: Record - aggregations?: Record - collapse?: FieldCollapse - explain?: boolean - from?: integer - highlight?: Highlight - track_total_hits?: boolean | integer - indices_boost?: Array> - docvalue_fields?: DocValueField | Array - min_score?: double - post_filter?: QueryContainer - profile?: boolean - query?: QueryContainer - rescore?: Rescore | Array - script_fields?: Record - search_after?: Array - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: boolean | Fields | SourceFilter - fields?: Array - suggest?: SuggestContainer | Record - terminate_after?: long - timeout?: string - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean - stored_fields?: Fields - pit?: PointInTimeReference - runtime_mappings?: RuntimeFields - stats?: Array - } +export interface RollupGetRollupJobResponse { + jobs: RollupGetRollupJobRollupJob[] } -export interface SearchResponse extends ResponseBase { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: HitsMetadata - aggregations?: Record - _clusters?: ClusterStatistics - documents?: Array - fields?: Record - max_score?: double - num_reduce_phases?: long - profile?: Profile - pit_id?: Id - _scroll_id?: ScrollId - suggest?: Record>> - terminated_early?: boolean +export interface RollupGetRollupJobRollupJob { + config: RollupGetRollupJobRollupJobConfiguration + stats: RollupGetRollupJobRollupJobStats + status: RollupGetRollupJobRollupJobStatus } -export interface SearchShard { - index: string - node: string - primary: boolean - relocating_node: string - shard: integer - state: string +export interface RollupGetRollupJobRollupJobConfiguration { + cron: string + groups: RollupGroupings + id: Id + index_pattern: string + metrics: RollupFieldMetric[] + page_size: long + rollup_index: IndexName + timeout: Time } -export interface SearchShardsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean - preference?: string - routing?: Routing +export interface RollupGetRollupJobRollupJobStats { + documents_processed: long + index_failures: long + index_time_in_ms: long + index_total: long + pages_processed: long + rollups_indexed: long + search_failures: long + search_time_in_ms: long + search_total: long + trigger_count: long + processing_time_in_ms: long + processing_total: long } -export interface SearchShardsResponse extends ResponseBase { - nodes: Record - shards: Array> +export interface RollupGetRollupJobRollupJobStatus { + current_position?: Record + job_state: RollupGetRollupJobIndexingJobState + upgraded_doc_id?: boolean } -export interface SearchStats { - fetch_current: long - fetch_time_in_millis: long - fetch_total: long - open_contexts?: long - query_current: long - query_time_in_millis: long - query_total: long - scroll_current: long - scroll_time_in_millis: long - scroll_total: long - suggest_current: long - suggest_time_in_millis: long - suggest_total: long - groups?: Record +export interface RollupRollupRequest extends RequestBase { + stubb: integer + stuba: integer + body?: { + stub: integer + } } -export interface SearchTemplateRequest extends RequestBase { - index?: Indices - type?: Types - allow_no_indices?: boolean - ccs_minimize_roundtrips?: boolean - expand_wildcards?: ExpandWildcards - explain?: boolean - ignore_throttled?: boolean - ignore_unavailable?: boolean - preference?: string - profile?: boolean - routing?: Routing - scroll?: Time - search_type?: SearchType - total_hits_as_integer?: boolean +export interface RollupRollupResponse { + stub: integer +} + +export interface RollupRollupSearchRequest extends RequestBase { + index: Indices + type?: Type + rest_total_hits_as_int?: boolean typed_keys?: boolean - body: { - id?: string - params?: Record - source?: string + body?: { + aggs?: Record + query?: QueryDslQueryContainer + size?: integer } } -export interface SearchTransform { - request: SearchInputRequestDefinition - timeout: Time +export type RollupRollupSearchResponse = boolean + +export interface RollupStartRollupJobRequest extends RequestBase { + id: Id } -export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' +export interface RollupStartRollupJobResponse { + started: boolean +} + +export interface RollupStopRollupJobRequest extends RequestBase { + id: Id + timeout?: Time + wait_for_completion?: boolean +} + +export interface RollupStopRollupJobResponse { + stopped: boolean +} export interface SearchableSnapshotsClearCacheRequest extends RequestBase { - stub_a: integer - stub_b: integer - body?: { - stub_c: integer - } + index?: Indices + expand_wildcards?: ExpandWildcards + allow_no_indices?: boolean + ignore_unavailable?: boolean + pretty?: boolean + human?: boolean } -export interface SearchableSnapshotsClearCacheResponse extends ResponseBase { +export interface SearchableSnapshotsClearCacheResponse { stub: integer } +export interface SearchableSnapshotsMountMountedSnapshot { + snapshot: Name + indices: Indices + shards: ShardStatistics +} + export interface SearchableSnapshotsMountRequest extends RequestBase { repository: Name snapshot: Name master_timeout?: Time wait_for_completion?: boolean storage?: string - body: { + body?: { index: IndexName renamed_index?: IndexName index_settings?: Record - ignore_index_settings?: Array + ignore_index_settings?: string[] } } -export interface SearchableSnapshotsMountResponse extends ResponseBase { - snapshot: SearchableSnapshotsMountSnapshot -} - -export interface SearchableSnapshotsMountSnapshot { - snapshot: Name - indices: Indices - shards: ShardStatistics +export interface SearchableSnapshotsMountResponse { + snapshot: SearchableSnapshotsMountMountedSnapshot } export interface SearchableSnapshotsRepositoryStatsRequest extends RequestBase { @@ -11113,7 +12654,7 @@ export interface SearchableSnapshotsRepositoryStatsRequest extends RequestBase { } } -export interface SearchableSnapshotsRepositoryStatsResponse extends ResponseBase { +export interface SearchableSnapshotsRepositoryStatsResponse { stub: integer } @@ -11125,721 +12666,695 @@ export interface SearchableSnapshotsStatsRequest extends RequestBase { } } -export interface SearchableSnapshotsStatsResponse extends ResponseBase { +export interface SearchableSnapshotsStatsResponse { stub: integer } -export interface SearchableSnapshotsUsage extends XPackUsage { - indices_count: integer - full_copy_indices_count?: integer - shared_cache_indices_count?: integer +export interface SecurityApplicationGlobalUserPrivileges { + manage: SecurityManageUserPrivileges +} + +export interface SecurityApplicationPrivileges { + application: string + privileges: string[] + resources: string[] +} + +export interface SecurityClusterNode { + name: Name +} + +export interface SecurityCreatedStatus { + created: boolean +} + +export interface SecurityFieldSecurity { + except?: Fields + grant: Fields +} + +export interface SecurityGlobalPrivileges { + application: SecurityApplicationGlobalUserPrivileges } -export interface SecurityFeatureToggle { +export interface SecurityIndicesPrivileges { + field_security?: SecurityFieldSecurity + names: Indices + privileges: string[] + query?: string | QueryDslQueryContainer + allow_restricted_indices?: boolean +} + +export interface SecurityManageUserPrivileges { + applications: string[] +} + +export interface SecurityRealmInfo { + name: Name + type: string +} + +export interface SecurityRoleMapping { enabled: boolean + metadata: Metadata + roles: string[] + rules: SecurityRoleMappingRuleBase } -export interface SecurityNode { - name: string +export interface SecurityRoleMappingRuleBase { } -export interface SecurityRolesDlsBitSetCacheUsage { - count: integer - memory: ByteSize - memory_in_bytes: ulong +export interface SecurityUser { + email?: string + full_name?: Name + metadata: Metadata + roles: string[] + username: Username + enabled: boolean } -export interface SecurityRolesDlsUsage { - bit_set_cache: SecurityRolesDlsBitSetCacheUsage +export interface SecurityAuthenticateRequest extends RequestBase { } -export interface SecurityRolesFileUsage { - dls: boolean - fls: boolean - size: long +export interface SecurityAuthenticateResponse { + authentication_realm: SecurityRealmInfo + email?: string + full_name?: Name + lookup_realm: SecurityRealmInfo + metadata: Metadata + roles: string[] + username: Username + enabled: boolean + authentication_type: string + token?: SecurityAuthenticateToken } -export interface SecurityRolesNativeUsage { - dls: boolean - fls: boolean - size: long +export interface SecurityAuthenticateToken { + name: Name } -export interface SecurityRolesUsage { - native: SecurityRolesNativeUsage - dls: SecurityRolesDlsUsage - file: SecurityRolesFileUsage +export interface SecurityChangePasswordRequest extends RequestBase { + username?: Username + refresh?: Refresh + body?: { + password?: Password + } } -export interface SecurityUsage extends XPackUsage { - api_key_service: SecurityFeatureToggle - anonymous: SecurityFeatureToggle - audit: AuditUsage - fips_140: SecurityFeatureToggle - ipfilter: IpFilterUsage - realms: Record - role_mapping: Record - roles: SecurityRolesUsage - ssl: SslUsage - system_key?: SecurityFeatureToggle - token_service: SecurityFeatureToggle - operator_privileges: XPackUsage +export interface SecurityChangePasswordResponse { } -export interface Segment { - attributes: Record - committed: boolean - compound: boolean - deleted_docs: long - generation: integer - memory_in_bytes: double - search: boolean - size_in_bytes: double - num_docs: long - version: VersionString +export interface SecurityClearApiKeyCacheRequest extends RequestBase { + ids?: Ids } -export interface SegmentsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - verbose?: boolean +export interface SecurityClearApiKeyCacheResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record } -export interface SegmentsResponse extends ResponseBase { - indices: Record - _shards: ShardStatistics +export interface SecurityClearCachedPrivilegesRequest extends RequestBase { + application: Name } -export interface SegmentsStats { - count: long - doc_values_memory_in_bytes: long - file_sizes: Record - fixed_bit_set_memory_in_bytes: long - index_writer_max_memory_in_bytes?: long - index_writer_memory_in_bytes: long - max_unsafe_auto_id_timestamp: long - memory_in_bytes: long - norms_memory_in_bytes: long - points_memory_in_bytes: long - stored_fields_memory_in_bytes: long - terms_memory_in_bytes: long - term_vectors_memory_in_bytes: long - version_map_memory_in_bytes: long +export interface SecurityClearCachedPrivilegesResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record } -export type SequenceNumber = integer +export interface SecurityClearCachedRealmsRequest extends RequestBase { + realms: Names + usernames?: string[] +} -export interface SerialDifferencingAggregation extends PipelineAggregationBase { - lag?: integer +export interface SecurityClearCachedRealmsResponse { + cluster_name: Name + nodes: Record + _nodes: NodeStatistics } -export interface SetProcessor extends ProcessorBase { - field: Field - override?: boolean - value: any +export interface SecurityClearCachedRolesRequest extends RequestBase { + name: Names } -export interface SetSecurityUserProcessor extends ProcessorBase { - field: Field - properties?: Array +export interface SecurityClearCachedRolesResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record } -export interface SetUpgradeModeRequest extends RequestBase { - enabled?: boolean - timeout?: Time +export interface SecurityClearCachedServiceTokensRequest extends RequestBase { + namespace: Namespace + service: Service + name: Names } -export interface SetUpgradeModeResponse extends AcknowledgedResponseBase { +export interface SecurityClearCachedServiceTokensResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record } -export type ShapeOrientation = 'right' | 'counterclockwise' | 'ccw' | 'left' | 'clockwise' | 'cw' +export interface SecurityCreateApiKeyIndexPrivileges { + names: Indices + privileges: string[] +} -export interface ShapeProperty extends DocValuesPropertyBase { - coerce?: boolean - ignore_malformed?: boolean - ignore_z_value?: boolean - orientation?: ShapeOrientation - type: 'shape' +export interface SecurityCreateApiKeyRequest extends RequestBase { + refresh?: Refresh + body?: { + expiration?: Time + name?: Name + role_descriptors?: Record + metadata?: Metadata + } } -export interface ShapeQuery extends QueryBase { - ignore_unmapped?: boolean - indexed_shape?: FieldLookup - relation?: ShapeRelation - shape?: GeoShape +export interface SecurityCreateApiKeyResponse { + api_key: string + expiration?: long + id: Id + name: Name } -export type ShapeRelation = 'intersects' | 'disjoint' | 'within' +export interface SecurityCreateApiKeyRoleDescriptor { + cluster: string[] + index: SecurityCreateApiKeyIndexPrivileges[] + applications?: SecurityApplicationPrivileges[] +} -export type ShapeType = 'geo_shape' | 'shape' +export interface SecurityCreateServiceTokenRequest extends RequestBase { + namespace: Namespace + service: Service + name: Name +} -export interface ShardCommit { - generation: integer - id: string - num_docs: long - user_data: Record +export interface SecurityCreateServiceTokenResponse { + created: boolean + token: SecurityCreateServiceTokenToken } -export interface ShardCompletion { - size_in_bytes: long +export interface SecurityCreateServiceTokenToken { + name: Name + value: string } -export interface ShardDocs { - count: long - deleted: long +export interface SecurityDeletePrivilegesFoundStatus { + found: boolean } -export interface ShardFailure { - index: string - node: string - reason: ErrorCause - shard: integer - status?: string +export interface SecurityDeletePrivilegesRequest extends RequestBase { + application: Name + name: Name + refresh?: Refresh } -export interface ShardFielddata { - evictions: long - memory_size_in_bytes: long +export interface SecurityDeletePrivilegesResponse extends DictionaryResponseBase> { } -export interface ShardFileSizeInfo { - description: string - size_in_bytes: long +export interface SecurityDeleteRoleRequest extends RequestBase { + name: Name + refresh?: Refresh } -export interface ShardFlush { - total: long - periodic: long - total_time_in_millis: long +export interface SecurityDeleteRoleResponse { + found: boolean } -export interface ShardGet { - current: long - exists_time_in_millis: long - exists_total: long - missing_time_in_millis: long - missing_total: long - time_in_millis: long - total: long +export interface SecurityDeleteRoleMappingRequest extends RequestBase { + name: Name + refresh?: Refresh +} + +export interface SecurityDeleteRoleMappingResponse { + found: boolean +} + +export interface SecurityDeleteServiceTokenRequest extends RequestBase { + namespace: Namespace + service: Service + name: Name + refresh?: Refresh +} + +export interface SecurityDeleteServiceTokenResponse { + found: boolean +} + +export interface SecurityDeleteUserRequest extends RequestBase { + username: Username + refresh?: Refresh +} + +export interface SecurityDeleteUserResponse { + found: boolean +} + +export interface SecurityDisableUserRequest extends RequestBase { + username: Username + refresh?: Refresh +} + +export interface SecurityDisableUserResponse { +} + +export interface SecurityEnableUserRequest extends RequestBase { + username: Username + refresh?: Refresh +} + +export interface SecurityEnableUserResponse { +} + +export interface SecurityGetApiKeyApiKey { + creation: long + expiration?: long + id: Id + invalidated: boolean + name: Name + realm: string + username: Username + metadata?: Metadata +} + +export interface SecurityGetApiKeyRequest extends RequestBase { + id?: Id + name?: Name + owner?: boolean + realm_name?: Name + username?: Username +} + +export interface SecurityGetApiKeyResponse { + api_keys: SecurityGetApiKeyApiKey[] +} + +export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { +} + +export interface SecurityGetBuiltinPrivilegesResponse { + cluster: string[] + index: Indices +} + +export interface SecurityGetPrivilegesRequest extends RequestBase { + application?: Name + name?: Name } -export interface ShardHealthStats { - active_shards: integer - initializing_shards: integer - primary_active: boolean - relocating_shards: integer - status: Health - unassigned_shards: integer +export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { } -export interface ShardIndexing { - delete_current: long - delete_time_in_millis: long - delete_total: long - index_current: long - index_failed: long - index_time_in_millis: long - index_total: long - is_throttled: boolean - noop_update_total: long - throttle_time_in_millis: long +export interface SecurityGetRoleInlineRoleTemplate { + template: SecurityGetRoleInlineRoleTemplateSource + format?: SecurityGetRoleTemplateFormat } -export interface ShardLease { - id: Id - retaining_seq_no: SequenceNumber - timestamp: long +export interface SecurityGetRoleInlineRoleTemplateSource { source: string } -export interface ShardMerges { - current: long - current_docs: long - current_size_in_bytes: long - total: long - total_auto_throttle_in_bytes: long - total_docs: long - total_size_in_bytes: long - total_stopped_time_in_millis: long - total_throttled_time_in_millis: long - total_time_in_millis: long +export interface SecurityGetRoleInvalidRoleTemplate { + template: string + format?: SecurityGetRoleTemplateFormat } -export interface ShardPath { - data_path: string - is_custom_data_path: boolean - state_path: string +export interface SecurityGetRoleRequest extends RequestBase { + name?: Name } -export interface ShardProfile { - aggregations: Array - id: string - searches: Array +export interface SecurityGetRoleResponse extends DictionaryResponseBase { } -export interface ShardQueryCache { - cache_count: long - cache_size: long - evictions: long - hit_count: long - memory_size_in_bytes: long - miss_count: long - total_count: long +export interface SecurityGetRoleRole { + cluster: string[] + indices: SecurityIndicesPrivileges[] + metadata: Metadata + run_as: string[] + transient_metadata: SecurityGetRoleTransientMetadata + applications: SecurityApplicationPrivileges[] + role_templates?: SecurityGetRoleRoleTemplate[] } -export interface ShardRecovery { - id: long - index: RecoveryIndexStatus - primary: boolean - source: RecoveryOrigin - stage: string - start?: RecoveryStartStatus - start_time?: DateString - start_time_in_millis: EpochMillis - stop_time?: DateString - stop_time_in_millis: EpochMillis - target: RecoveryOrigin - total_time?: DateString - total_time_in_millis: EpochMillis - translog: RecoveryTranslogStatus - type: Type - verify_index: RecoveryVerifyIndex +export type SecurityGetRoleRoleTemplate = SecurityGetRoleInlineRoleTemplate | SecurityGetRoleStoredRoleTemplate | SecurityGetRoleInvalidRoleTemplate + +export interface SecurityGetRoleStoredRoleTemplate { + template: SecurityGetRoleStoredRoleTemplateId + format?: SecurityGetRoleTemplateFormat } -export interface ShardRefresh { - listeners: long - total: long - total_time_in_millis: long - external_total: long - external_total_time_in_millis: long +export interface SecurityGetRoleStoredRoleTemplateId { + id: string } -export interface ShardRequestCache { - evictions: long - hit_count: long - memory_size_in_bytes: long - miss_count: long +export type SecurityGetRoleTemplateFormat = 'string' | 'json' + +export interface SecurityGetRoleTransientMetadata { + enabled: boolean } -export interface ShardRetentionLeases { - primary_term: long - version: VersionNumber - leases: Array +export interface SecurityGetRoleMappingRequest extends RequestBase { + name?: Name } -export interface ShardRouting { - node: string - primary: boolean - relocating_node?: string - state: ShardRoutingState +export interface SecurityGetRoleMappingResponse extends DictionaryResponseBase { } -export type ShardRoutingState = 'UNASSIGNED' | 'INITIALIZING' | 'STARTED' | 'RELOCATING' +export interface SecurityGetServiceAccountsRequest extends RequestBase { + namespace?: Namespace + service?: Service +} -export interface ShardSearch { - fetch_current: long - fetch_time_in_millis: long - fetch_total: long - open_contexts: long - query_current: long - query_time_in_millis: long - query_total: long - scroll_current: long - scroll_time_in_millis: long - scroll_total: long - suggest_current: long - suggest_time_in_millis: long - suggest_total: long +export interface SecurityGetServiceAccountsResponse extends DictionaryResponseBase { } -export interface ShardSegmentRouting { - node: string - primary: boolean - state: string +export interface SecurityGetServiceAccountsRoleDescriptor { + cluster: string[] + indices: SecurityIndicesPrivileges[] + global?: SecurityGlobalPrivileges[] + applications?: SecurityApplicationPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: Record } -export interface ShardSegments { - count: long - doc_values_memory_in_bytes: long - file_sizes: Record - fixed_bit_set_memory_in_bytes: long - index_writer_memory_in_bytes: long - max_unsafe_auto_id_timestamp: long - memory_in_bytes: long - norms_memory_in_bytes: long - points_memory_in_bytes: long - stored_fields_memory_in_bytes: long - terms_memory_in_bytes: long - term_vectors_memory_in_bytes: long - version_map_memory_in_bytes: long -} - -export interface ShardSequenceNumber { - global_checkpoint: long - local_checkpoint: long - max_seq_no: SequenceNumber +export interface SecurityGetServiceAccountsRoleDescriptorWrapper { + role_descriptor: SecurityGetServiceAccountsRoleDescriptor } -export interface ShardStatistics { - failed: uint - successful: uint - total: uint - failures?: Array - skipped?: uint +export interface SecurityGetServiceCredentialsRequest extends RequestBase { + namespace: Namespace + service: Service } -export interface ShardStats { - commit: ShardCommit - completion: ShardCompletion - docs: ShardDocs - fielddata: ShardFielddata - flush: ShardFlush - get: ShardGet - indexing: ShardIndexing - merges: ShardMerges - shard_path: ShardPath - query_cache: ShardQueryCache - recovery: ShardStatsRecovery - refresh: ShardRefresh - request_cache: ShardRequestCache - retention_leases: ShardRetentionLeases - routing: ShardRouting - search: ShardSearch - segments: ShardSegments - seq_no: ShardSequenceNumber - store: ShardStatsStore - translog: ShardTransactionLog - warmer: ShardWarmer -} - -export interface ShardStatsRecovery { - current_as_source: long - current_as_target: long - throttle_time_in_millis: long +export interface SecurityGetServiceCredentialsResponse { + service_account: string + node_name: NodeName + count: integer + tokens: Record + file_tokens: Record } -export interface ShardStatsStore { - reserved_in_bytes: long - size_in_bytes: long +export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' + +export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { + authentication_realm: SecurityGetTokenUserRealm + lookup_realm: SecurityGetTokenUserRealm + authentication_provider?: SecurityGetTokenAuthenticationProvider + authentication_type: string } -export interface ShardStore { - allocation: ShardStoreAllocation - allocation_id: Id - attributes: Record - id: Id - legacy_version: VersionNumber +export interface SecurityGetTokenAuthenticationProvider { + type: string name: Name - store_exception: ShardStoreException - transport_address: string } -export type ShardStoreAllocation = 'primary' | 'replica' | 'unused' +export interface SecurityGetTokenRequest extends RequestBase { + body?: { + grant_type?: SecurityGetTokenAccessTokenGrantType + scope?: string + password?: Password + kerberos_ticket?: string + refresh_token?: string + username?: Username + } +} -export interface ShardStoreException { - reason: string +export interface SecurityGetTokenResponse { + access_token: string + expires_in: long + scope?: string type: string + refresh_token: string + kerberos_authentication_response_token?: string + authentication: SecurityGetTokenAuthenticatedUser } -export interface ShardStoreWrapper { - stores: Array +export interface SecurityGetTokenUserRealm { + name: Name + type: string } -export interface ShardTransactionLog { - earliest_last_modified_age: long - operations: long - size_in_bytes: long - uncommitted_operations: long - uncommitted_size_in_bytes: long +export interface SecurityGetUserRequest extends RequestBase { + username?: Username | Username[] } -export interface ShardWarmer { - current: long - total: long - total_time_in_millis: long +export interface SecurityGetUserResponse extends DictionaryResponseBase { } -export interface ShardsOperationResponseBase extends ResponseBase { - _shards: ShardStatistics +export interface SecurityGetUserPrivilegesRequest extends RequestBase { + application?: Name + priviledge?: Name } -export interface ShardsSegment { - num_committed_segments: integer - routing: ShardSegmentRouting - num_search_segments: integer - segments: Record +export interface SecurityGetUserPrivilegesResponse { + applications: SecurityApplicationPrivileges[] + cluster: string[] + global: SecurityGlobalPrivileges[] + indices: SecurityIndicesPrivileges[] + run_as: string[] } -export interface ShingleTokenFilter extends TokenFilterBase { - filler_token: string - max_shingle_size: integer - min_shingle_size: integer - output_unigrams: boolean - output_unigrams_if_no_shingles: boolean - token_separator: string +export interface SecurityGrantApiKeyApiKey { + name: Name + expiration?: Time + role_descriptors?: Record[] } -export interface ShrinkIndexRequest extends RequestBase { - index: IndexName - target: IndexName - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards +export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' + +export interface SecurityGrantApiKeyRequest extends RequestBase { body?: { - aliases?: Record - settings?: Record + api_key: SecurityGrantApiKeyApiKey + grant_type: SecurityGrantApiKeyApiKeyGrantType + access_token?: string + username?: Username + password?: Password } } -export interface ShrinkIndexResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean - index: IndexName +export interface SecurityGrantApiKeyResponse { + api_key: string + id: Id + name: Name + expiration?: EpochMillis } -export interface SignificantTermsAggregate extends MultiBucketAggregate { - bg_count: long - doc_count: long +export interface SecurityHasPrivilegesApplicationPrivilegesCheck { + application: string + privileges: string[] + resources: string[] } -export interface SignificantTermsAggregation extends BucketAggregationBase { - background_filter?: QueryContainer - chi_square?: ChiSquareHeuristic - exclude?: string | Array - execution_hint?: TermsAggregationExecutionHint - field?: Field - gnd?: GoogleNormalizedDistanceHeuristic - include?: string | Array - min_doc_count?: long - mutual_information?: MutualInformationHeuristic - percentage?: PercentageScoreHeuristic - script_heuristic?: ScriptedHeuristic - shard_min_doc_count?: long - shard_size?: integer - size?: integer -} +export type SecurityHasPrivilegesApplicationsPrivileges = Record -export interface SignificantTermsBucketKeys { +export interface SecurityHasPrivilegesIndexPrivilegesCheck { + names: string[] + privileges: string[] } -export type SignificantTermsBucket = SignificantTermsBucketKeys | - { [property: string]: Aggregate } -export interface SignificantTextAggregation extends BucketAggregationBase { - background_filter?: QueryContainer - chi_square?: ChiSquareHeuristic - exclude?: string | Array - execution_hint?: TermsAggregationExecutionHint - field?: Field - filter_duplicate_text?: boolean - gnd?: GoogleNormalizedDistanceHeuristic - include?: string | Array - min_doc_count?: long - mutual_information?: MutualInformationHeuristic - percentage?: PercentageScoreHeuristic - script_heuristic?: ScriptedHeuristic - shard_min_doc_count?: long - shard_size?: integer - size?: integer - source_fields?: Fields -} +export type SecurityHasPrivilegesPrivileges = Record -export interface SimpleInput { - payload: Record +export interface SecurityHasPrivilegesRequest extends RequestBase { + user?: Name + body?: { + application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + cluster?: string[] + index?: SecurityHasPrivilegesIndexPrivilegesCheck[] + } } -export type SimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' +export type SecurityHasPrivilegesResourcePrivileges = Record -export interface SimpleQueryStringQuery extends QueryBase { - analyzer?: string - analyze_wildcard?: boolean - auto_generate_synonyms_phrase_query?: boolean - default_operator?: Operator - fields?: Fields - flags?: SimpleQueryStringFlags | string - fuzzy_max_expansions?: integer - fuzzy_prefix_length?: integer - fuzzy_transpositions?: boolean - lenient?: boolean - minimum_should_match?: MinimumShouldMatch - query?: string - quote_field_suffix?: string +export interface SecurityHasPrivilegesResponse { + application: SecurityHasPrivilegesApplicationsPrivileges + cluster: Record + has_all_requested: boolean + index: Record + username: Username } -export interface SimulatePipelineDocument { - _id?: Id - _index?: IndexName - _source: any +export interface SecurityInvalidateApiKeyRequest extends RequestBase { + body?: { + id?: Id + ids?: Id[] + name?: Name + owner?: boolean + realm_name?: string + username?: Username + } } -export interface SimulatePipelineRequest extends RequestBase { - id?: Id - verbose?: boolean - body: { - docs?: Array - pipeline?: Pipeline - } +export interface SecurityInvalidateApiKeyResponse { + error_count: integer + error_details?: ErrorCause[] + invalidated_api_keys: string[] + previously_invalidated_api_keys: string[] } -export interface SimulatePipelineResponse extends ResponseBase { - docs: Array +export interface SecurityInvalidateTokenRequest extends RequestBase { + body?: { + token?: string + refresh_token?: string + realm_name?: Name + username?: Username + } } -export interface SimulatedActions { - actions: Array - all: SimulatedActions - use_all: boolean +export interface SecurityInvalidateTokenResponse { + error_count: long + error_details?: ErrorCause[] + invalidated_tokens: long + previously_invalidated_tokens: long } -export interface SingleBucketAggregateKeys extends AggregateBase { - doc_count: double +export interface SecurityPutPrivilegesActions { + actions: string[] + application?: string + name?: Name + metadata?: Metadata } -export type SingleBucketAggregate = SingleBucketAggregateKeys | - { [property: string]: Aggregate } -export interface SingleGroupSource { - field: Field - script: Script +export interface SecurityPutPrivilegesRequest extends RequestBase { + refresh?: Refresh + body?: Record> } -export type Size = 'Raw' | 'k' | 'm' | 'g' | 't' | 'p' +export interface SecurityPutPrivilegesResponse extends DictionaryResponseBase> { +} -export interface SizeField { - enabled: boolean +export interface SecurityPutRoleRequest extends RequestBase { + name: Name + refresh?: Refresh + body?: { + applications?: SecurityApplicationPrivileges[] + cluster?: string[] + global?: Record + indices?: SecurityIndicesPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: SecurityGetRoleTransientMetadata + } } -export interface SlackActionResult { - account?: string - message: SlackMessage +export interface SecurityPutRoleResponse { + role: SecurityCreatedStatus } -export interface SlackAttachment { - author_icon?: string - author_link?: string - author_name: string - color?: string - fallback?: string - fields?: Array - footer?: string - footer_icon?: string - image_url?: string - pretext?: string - text?: string - thumb_url?: string - title: string - title_link?: string - ts?: DateString +export interface SecurityPutRoleMappingRequest extends RequestBase { + name: Name + refresh?: Refresh + body?: { + enabled?: boolean + metadata?: Metadata + roles?: string[] + rules?: SecurityRoleMappingRuleBase + run_as?: string[] + } } -export interface SlackAttachmentField { - short: boolean - title: string - value: string +export interface SecurityPutRoleMappingResponse { + created?: boolean + role_mapping: SecurityCreatedStatus } -export interface SlackDynamicAttachment { - attachment_template: SlackAttachment - list_path: string +export interface SecurityPutUserRequest extends RequestBase { + username: Username + refresh?: Refresh + body?: { + username?: Username + email?: string | null + full_name?: string | null + metadata?: Metadata + password?: Password + password_hash?: string + roles?: string[] + enabled?: boolean + } } -export interface SlackMessage { - attachments: Array - dynamic_attachments?: SlackDynamicAttachment - from: string - icon?: string - text: string - to: Array +export interface SecurityPutUserResponse { + created: boolean } -export interface SlicedScroll { - field?: Field - id: integer - max: integer +export interface ShutdownDeleteNodeRequest extends RequestBase { + body?: { + stub: string + } } -export interface SlmUsage extends XPackUsage { - policy_count?: integer - policy_stats?: SnapshotLifecycleStats +export interface ShutdownDeleteNodeResponse { + stub: boolean } -export interface SmoothingModelContainer { - laplace: LaplaceSmoothingModel - linear_interpolation: LinearInterpolationSmoothingModel - stupid_backoff: StupidBackoffSmoothingModel +export interface ShutdownGetNodeRequest extends RequestBase { + body?: { + stub: string + } } -export interface SnapshotIndexStats { - shards: Record - shards_stats: SnapshotShardsStats - stats: SnapshotStats +export interface ShutdownGetNodeResponse { + stub: boolean } -export interface SnapshotInfo { - data_streams: Array - duration_in_millis?: EpochMillis - end_time?: DateString - end_time_in_millis?: EpochMillis - failures?: Array - include_global_state?: boolean - indices: Array - metadata?: Record - reason?: string - snapshot: string - shards?: ShardStatistics - start_time?: DateString - start_time_in_millis?: EpochMillis - state?: string - uuid: Uuid - version?: VersionString - version_id?: VersionNumber - feature_states?: Array +export interface ShutdownPutNodeRequest extends RequestBase { + body?: { + stub: string + } } -export interface SnapshotInfoFeatureState { - feature_name: string - indices: Indices +export interface ShutdownPutNodeResponse { + stub: boolean } -export interface SnapshotLifecycleConfig { +export interface SlmConfiguration { ignore_unavailable?: boolean include_global_state?: boolean indices: Indices } -export interface SnapshotLifecycleInProgress { - name: string +export interface SlmInProgress { + name: Name start_time_millis: DateString state: string - uuid: string + uuid: Uuid } -export interface SnapshotLifecycleInvocationRecord { - snapshot_name: string +export interface SlmInvocation { + snapshot_name: Name time: DateString } -export interface SnapshotLifecyclePolicy { - config: SnapshotLifecycleConfig - name: string +export interface SlmPolicy { + config: SlmConfiguration + name: Name repository: string - retention: SnapshotRetentionConfiguration - schedule: CronExpression + retention: SlmRetention + schedule: WatcherCronExpression +} + +export interface SlmRetention { + expire_after: Time + max_count: integer + min_count: integer } -export interface SnapshotLifecyclePolicyMetadata { - in_progress?: SnapshotLifecycleInProgress - last_failure?: SnapshotLifecycleInvocationRecord - last_success?: SnapshotLifecycleInvocationRecord +export interface SlmSnapshotLifecycle { + in_progress?: SlmInProgress + last_failure?: SlmInvocation + last_success?: SlmInvocation modified_date?: DateString modified_date_millis: EpochMillis next_execution?: DateString next_execution_millis: EpochMillis - policy: SnapshotLifecyclePolicy + policy: SlmPolicy version: VersionNumber - stats: SnapshotLifecycleStats + stats: SlmStatistics } -export interface SnapshotLifecycleStats { +export interface SlmStatistics { retention_deletion_time?: DateString retention_deletion_time_millis?: EpochMillis retention_failed?: long @@ -11856,64 +13371,113 @@ export interface SnapshotLifecycleStats { snapshots_taken?: long } -export interface SnapshotRepository { - type: string - uuid?: Uuid - settings: SnapshotRepositorySettings +export interface SlmDeleteLifecycleRequest extends RequestBase { + policy_id: Name } -export interface SnapshotRepositorySettings { - chunk_size?: string - compress?: string | boolean - concurrent_streams?: string | integer - location: string - read_only?: string | boolean - readonly?: string | boolean +export interface SlmDeleteLifecycleResponse extends AcknowledgedResponseBase { } -export interface SnapshotRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Time - wait_for_completion?: boolean +export interface SlmExecuteLifecycleRequest extends RequestBase { + policy_id: Name +} + +export interface SlmExecuteLifecycleResponse { + snapshot_name: Name +} + +export interface SlmExecuteRetentionRequest extends RequestBase { +} + +export interface SlmExecuteRetentionResponse extends AcknowledgedResponseBase { +} + +export interface SlmGetLifecycleRequest extends RequestBase { + policy_id?: Names +} + +export interface SlmGetLifecycleResponse extends DictionaryResponseBase { +} + +export interface SlmGetStatsRequest extends RequestBase { +} + +export interface SlmGetStatsResponse { + retention_deletion_time: string + retention_deletion_time_millis: EpochMillis + retention_failed: long + retention_runs: long + retention_timed_out: long + total_snapshots_deleted: long + total_snapshot_deletion_failures: long + total_snapshots_failed: long + total_snapshots_taken: long + policy_stats: string[] +} + +export interface SlmGetStatusRequest extends RequestBase { +} + +export interface SlmGetStatusResponse { + operation_mode: LifecycleOperationMode +} + +export interface SlmPutLifecycleRequest extends RequestBase { + policy_id: Name body?: { - ignore_unavailable?: boolean - include_global_state?: boolean - indices?: Indices - metadata?: Record - partial?: boolean + config?: SlmConfiguration + name?: Name + repository?: string + retention?: SlmRetention + schedule?: WatcherCronExpression } } -export interface SnapshotResponse extends ResponseBase { - accepted?: boolean - snapshot?: SnapshotInfo +export interface SlmPutLifecycleResponse extends AcknowledgedResponseBase { } -export interface SnapshotResponseItem { - repository: Name - snapshots?: Array - error?: ErrorCause +export interface SlmStartRequest extends RequestBase { } -export interface SnapshotRestore { - indices: Array - snapshot: string - shards: ShardStatistics +export interface SlmStartResponse extends AcknowledgedResponseBase { } -export interface SnapshotRetentionConfiguration { - expire_after: Time - max_count: integer - min_count: integer +export interface SlmStopRequest extends RequestBase { } -export interface SnapshotShardFailure { - index: string - node_id: string - reason: string - shard_id: string - status: string +export interface SlmStopResponse extends AcknowledgedResponseBase { +} + +export interface SnapshotFileCountSnapshotStats { + file_count: integer + size_in_bytes: long +} + +export interface SnapshotIndexDetails { + shard_count: integer + size?: ByteSize + size_in_bytes: long + max_segments_per_shard: long +} + +export interface SnapshotInfoFeatureState { + feature_name: string + indices: Indices +} + +export interface SnapshotRepository { + type: string + uuid?: Uuid + settings: SnapshotRepositorySettings +} + +export interface SnapshotRepositorySettings { + chunk_size?: string + compress?: string | boolean + concurrent_streams?: string | integer + location: string + read_only?: string | boolean + readonly?: string | boolean } export interface SnapshotShardsStats { @@ -11939,1943 +13503,1748 @@ export interface SnapshotShardsStatsSummaryItem { size_in_bytes: long } -export interface SnapshotShardsStatus { +export interface SnapshotSnapshotIndexStats { + shards: Record + shards_stats: SnapshotShardsStats + stats: SnapshotSnapshotStats +} + +export interface SnapshotSnapshotInfo { + data_streams: string[] + duration?: Time + duration_in_millis?: EpochMillis + end_time?: Time + end_time_in_millis?: EpochMillis + failures?: SnapshotSnapshotShardFailure[] + include_global_state?: boolean + indices: IndexName[] + index_details?: Record + metadata?: Metadata + reason?: string + snapshot: Name + shards?: ShardStatistics + start_time?: Time + start_time_in_millis?: EpochMillis + state?: string + uuid: Uuid + version?: VersionString + version_id?: VersionNumber + feature_states?: SnapshotInfoFeatureState[] +} + +export interface SnapshotSnapshotShardFailure { + index: IndexName + node_id: Id + reason: string + shard_id: Id + status: string +} + +export interface SnapshotSnapshotShardsStatus { stage: SnapshotShardsStatsStage stats: SnapshotShardsStatsSummary } -export interface SnapshotStats { - incremental: FileCountSnapshotStats +export interface SnapshotSnapshotStats { + incremental: SnapshotFileCountSnapshotStats start_time_in_millis: long time_in_millis: long - total: FileCountSnapshotStats + total: SnapshotFileCountSnapshotStats } export interface SnapshotStatus { include_global_state: boolean - indices: Record + indices: Record repository: string shards_stats: SnapshotShardsStats snapshot: string state: string - stats: SnapshotStats + stats: SnapshotSnapshotStats uuid: Uuid } -export interface SnapshotStatusRequest extends RequestBase { - repository?: Name - snapshot?: Names - ignore_unavailable?: boolean - master_timeout?: Time -} - -export interface SnapshotStatusResponse extends ResponseBase { - snapshots: Array -} - -export type SnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' - -export interface SnowballTokenFilter extends TokenFilterBase { - language: SnowballLanguage -} - -export type Sort = SortCombinations | Array - -export type SortCombinations = Field | SortContainer | SortOrder - -export interface SortContainerKeys { - _score?: ScoreSort - _doc?: ScoreSort - _geo_distance?: GeoDistanceSort - _script?: ScriptSort +export interface SnapshotCleanupRepositoryCleanupRepositoryResults { + deleted_blobs: long + deleted_bytes: long } -export type SortContainer = SortContainerKeys | - { [property: string]: FieldSort | SortOrder } - -export type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' -export type SortOrder = 'asc' | 'desc' | '_doc' - -export interface SortProcessor extends ProcessorBase { - field: Field - order: SortOrder - target_field: Field +export interface SnapshotCleanupRepositoryRequest extends RequestBase { + repository: Name + master_timeout?: Time + timeout?: Time } -export type SortResults = Array - -export interface SourceExistsRequest extends RequestBase { - id: Id - index: IndexName - type?: Type - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - source_enabled?: boolean - source_excludes?: Fields - source_includes?: Fields - version?: VersionNumber - version_type?: VersionType +export interface SnapshotCleanupRepositoryResponse { + results: SnapshotCleanupRepositoryCleanupRepositoryResults } -export type SourceExistsResponse = boolean - -export interface SourceField { - compress?: boolean - compress_threshold?: string - enabled: boolean - excludes?: Array - includes?: Array +export interface SnapshotCloneRequest extends RequestBase { + repository: Name + snapshot: Name + target_snapshot: Name + master_timeout?: Time + timeout?: Time + body?: { + indices: string + } } -export interface SourceFilter { - excludes?: Fields - includes?: Fields - exclude?: Fields - include?: Fields +export interface SnapshotCloneResponse extends AcknowledgedResponseBase { } -export interface SourceRequest extends RequestBase { - id: Id - index: IndexName - type?: Type - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - source_enabled?: boolean - _source_excludes?: Fields - _source_includes?: Fields - version?: VersionNumber - version_type?: VersionType +export interface SnapshotCreateRequest extends RequestBase { + repository: Name + snapshot: Name + master_timeout?: Time + wait_for_completion?: boolean + body?: { + ignore_unavailable?: boolean + include_global_state?: boolean + indices?: Indices + metadata?: Metadata + partial?: boolean + } } -export interface SourceResponse extends ResponseBase { - body: TDocument +export interface SnapshotCreateResponse { + accepted?: boolean + snapshot?: SnapshotSnapshotInfo } -export interface SpanContainingQuery extends QueryBase { - big?: SpanQuery - little?: SpanQuery +export interface SnapshotCreateRepositoryRequest extends RequestBase { + repository: Name + master_timeout?: Time + timeout?: Time + verify?: boolean + body?: { + repository?: SnapshotRepository + type: string + settings: SnapshotRepositorySettings + } } -export interface SpanFieldMaskingQuery extends QueryBase { - field?: Field - query?: SpanQuery +export interface SnapshotCreateRepositoryResponse extends AcknowledgedResponseBase { } -export interface SpanFirstQuery extends QueryBase { - end?: integer - match?: SpanQuery +export interface SnapshotDeleteRequest extends RequestBase { + repository: Name + snapshot: Name + master_timeout?: Time } -export interface SpanGapQuery extends QueryBase { - field?: Field - width?: integer +export interface SnapshotDeleteResponse extends AcknowledgedResponseBase { } -export interface SpanMultiTermQuery extends QueryBase { - match?: QueryContainer +export interface SnapshotDeleteRepositoryRequest extends RequestBase { + repository: Names + master_timeout?: Time + timeout?: Time } - -export interface SpanNearQuery extends QueryBase { - clauses?: Array - in_order?: boolean - slop?: integer + +export interface SnapshotDeleteRepositoryResponse extends AcknowledgedResponseBase { } -export interface SpanNotQuery extends QueryBase { - dist?: integer - exclude?: SpanQuery - include?: SpanQuery - post?: integer - pre?: integer +export interface SnapshotGetRequest extends RequestBase { + repository: Name + snapshot: Names + ignore_unavailable?: boolean + master_timeout?: Time + verbose?: boolean + index_details?: boolean + human?: boolean } -export interface SpanOrQuery extends QueryBase { - clauses?: Array +export interface SnapshotGetResponse { + responses?: SnapshotGetSnapshotResponseItem[] + snapshots?: SnapshotSnapshotInfo[] } -export interface SpanQuery extends QueryBase { - span_containing?: NamedQuery - field_masking_span?: NamedQuery - span_first?: NamedQuery - span_gap?: NamedQuery - span_multi?: SpanMultiTermQuery - span_near?: NamedQuery - span_not?: NamedQuery - span_or?: NamedQuery - span_term?: NamedQuery - span_within?: NamedQuery +export interface SnapshotGetSnapshotResponseItem { + repository: Name + snapshots?: SnapshotSnapshotInfo[] + error?: ErrorCause } -export interface SpanTermQuery extends QueryBase { - value: string +export interface SnapshotGetRepositoryRequest extends RequestBase { + repository?: Names + local?: boolean + master_timeout?: Time } -export interface SpanWithinQuery extends QueryBase { - big?: SpanQuery - little?: SpanQuery +export interface SnapshotGetRepositoryResponse extends DictionaryResponseBase { } -export interface SplitIndexRequest extends RequestBase { - index: IndexName - target: IndexName +export interface SnapshotRestoreRequest extends RequestBase { + repository: Name + snapshot: Name master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards + wait_for_completion?: boolean body?: { - aliases?: Record - settings?: Record + ignore_index_settings?: string[] + ignore_unavailable?: boolean + include_aliases?: boolean + include_global_state?: boolean + index_settings?: IndicesPutSettingsRequest + indices?: Indices + partial?: boolean + rename_pattern?: string + rename_replacement?: string } } -export interface SplitIndexResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean - index: IndexName +export interface SnapshotRestoreResponse { + snapshot: SnapshotRestoreSnapshotRestore } -export interface SplitProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - preserve_trailing?: boolean - separator: string - target_field?: Field +export interface SnapshotRestoreSnapshotRestore { + indices: IndexName[] + snapshot: string + shards: ShardStatistics } -export interface SqlColumn { - name: string - type: string +export interface SnapshotStatusRequest extends RequestBase { + repository?: Name + snapshot?: Names + ignore_unavailable?: boolean + master_timeout?: Time } -export type SqlRow = Array +export interface SnapshotStatusResponse { + snapshots: SnapshotStatus[] +} -export interface SqlUsage extends XPackUsage { - features: Record - queries: Record +export interface SnapshotVerifyRepositoryCompactNodeInfo { + name: Name } -export interface SslUsage { - http: SecurityFeatureToggle - transport: SecurityFeatureToggle +export interface SnapshotVerifyRepositoryRequest extends RequestBase { + repository: Name + master_timeout?: Time + timeout?: Time } -export interface StandardDeviationBounds { - lower?: double - upper?: double - lower_population?: double - upper_population?: double - lower_sampling?: double - upper_sampling?: double +export interface SnapshotVerifyRepositoryResponse { + nodes: Record } -export interface StandardTokenizer extends TokenizerBase { - max_token_length: integer +export interface SqlClearSqlCursorRequest extends RequestBase { + body?: { + cursor: string + } } -export interface StartBasicLicenseRequest extends RequestBase { - acknowledge?: boolean +export interface SqlClearSqlCursorResponse { + succeeded: boolean } -export interface StartBasicLicenseResponse extends AcknowledgedResponseBase { - acknowledge: Record> - basic_was_started: boolean - error_message: string +export interface SqlQuerySqlColumn { + name: Name + type: string } -export interface StartDatafeedRequest extends RequestBase { - datafeed_id: Id - start?: Time +export interface SqlQuerySqlRequest extends RequestBase { + format?: string body?: { - end?: Time - start?: Time - timeout?: Time + columnar?: boolean + cursor?: string + fetch_size?: integer + filter?: QueryDslQueryContainer + query?: string + request_timeout?: Time + page_timeout?: Time + time_zone?: string + field_multi_value_leniency?: boolean } } -export interface StartDatafeedResponse extends ResponseBase { - node: NodeIds - started: boolean +export interface SqlQuerySqlResponse { + columns?: SqlQuerySqlColumn[] + cursor?: string + rows: SqlQuerySqlRow[] } -export interface StartIlmRequest extends RequestBase { -} +export type SqlQuerySqlRow = any[] -export interface StartIlmResponse extends AcknowledgedResponseBase { +export interface SqlTranslateSqlRequest extends RequestBase { + body?: { + fetch_size?: integer + filter?: QueryDslQueryContainer + query: string + time_zone?: string + } } -export interface StartRollupJobRequest extends RequestBase { - id: Id +export interface SqlTranslateSqlResponse { + size: long + _source: boolean | Fields | SearchTypesSourceFilter + fields: Record[] + sort: SearchTypesSort } -export interface StartRollupJobResponse extends ResponseBase { - started: boolean +export interface SslGetCertificatesCertificateInformation { + alias?: string + expiry: DateString + format: string + has_private_key: boolean + path: string + serial_number: string + subject_dn: string } -export interface StartSnapshotLifecycleManagementRequest extends RequestBase { +export interface SslGetCertificatesRequest extends RequestBase { } -export interface StartSnapshotLifecycleManagementResponse extends AcknowledgedResponseBase { -} +export type SslGetCertificatesResponse = SslGetCertificatesCertificateInformation[] -export interface StartTransformRequest extends RequestBase { - transform_id: Name - timeout?: Time +export interface TaskInfo { + action: string + cancellable: boolean + children?: TaskInfo[] + description?: string + headers: HttpHeaders + id: long + node: string + running_time_in_nanos: long + start_time_in_millis: long + status?: TaskStatus + type: string + parent_task_id?: Id } -export interface StartTransformResponse extends AcknowledgedResponseBase { +export interface TaskState { + action: string + cancellable: boolean + description?: string + headers: HttpHeaders + id: long + node: string + parent_task_id?: TaskId + running_time_in_nanos: long + start_time_in_millis: long + status?: TaskStatus + type: string } -export interface StartTrialLicenseRequest extends RequestBase { - acknowledge?: boolean - type_query_string?: string +export interface TaskStatus { + batches: long + canceled?: string + created: long + deleted: long + noops: long + failures?: string[] + requests_per_second: float + retries: Retries + throttled?: Time + throttled_millis: long + throttled_until?: Time + throttled_until_millis: long + timed_out?: boolean + took?: long + total: long + updated: long + version_conflicts: long } -export interface StartTrialLicenseResponse extends AcknowledgedResponseBase { - error_message?: string - acknowledged: boolean - trial_was_started: boolean - type: LicenseType +export interface TaskTaskExecutingNode extends SpecUtilsBaseNode { + tasks: Record } -export interface StartWatcherRequest extends RequestBase { +export interface TaskCancelTasksRequest extends RequestBase { + task_id?: TaskId + actions?: string | string[] + nodes?: string[] + parent_task_id?: string + wait_for_completion?: boolean } -export interface StartWatcherResponse extends AcknowledgedResponseBase { +export interface TaskCancelTasksResponse { + node_failures?: ErrorCause[] + nodes: Record } -export interface StatsAggregate extends AggregateBase { - count: double - sum: double - avg?: double - max?: double - min?: double +export interface TaskGetTaskRequest extends RequestBase { + task_id: Id + timeout?: Time + wait_for_completion?: boolean } -export interface StatsAggregation extends FormatMetricAggregationBase { +export interface TaskGetTaskResponse { + completed: boolean + task: TaskInfo + response?: TaskStatus + error?: ErrorCause } -export interface StatsBucketAggregation extends PipelineAggregationBase { +export interface TaskListTasksRequest extends RequestBase { + actions?: string | string[] + detailed?: boolean + group_by?: GroupBy + nodes?: string[] + parent_task_id?: Id + timeout?: Time + wait_for_completion?: boolean } -export type Status = 'success' | 'failure' | 'simulated' | 'throttled' +export interface TaskListTasksResponse { + node_failures?: ErrorCause[] + nodes?: Record + tasks?: Record | TaskInfo[] +} -export interface StemmerOverrideTokenFilter extends TokenFilterBase { - rules: Array - rules_path: string +export interface TextStructureFindStructureFieldStat { + count: integer + cardinality: integer + top_hits: TextStructureFindStructureTopHit[] + mean_value?: integer + median_value?: integer + max_value?: integer + min_value?: integer + earliest?: string + latest?: string } -export interface StemmerTokenFilter extends TokenFilterBase { - language: string +export interface TextStructureFindStructureRequest { + charset?: string + column_names?: string + delimiter?: string + explain?: boolean + format?: string + grok_pattern?: string + has_header_row?: boolean + line_merge_size_limit?: uint + lines_to_sample?: uint + quote?: string + should_trim_fields?: boolean + timeout?: Time + timestamp_field?: Field + timestamp_format?: string + body?: TJsonDocument[] } -export interface StepKey { - action: string - name: string - phase: string +export interface TextStructureFindStructureResponse { + charset: string + has_header_row?: boolean + has_byte_order_marker: boolean + format: string + field_stats: Record + sample_start: string + num_messages_analyzed: integer + mappings: MappingTypeMapping + quote?: string + delimiter?: string + need_client_timezone: boolean + num_lines_analyzed: integer + column_names?: string[] + explanation?: string[] + grok_pattern?: string + multiline_start_pattern?: string + exclude_lines_pattern?: string + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + timestamp_field?: Field + should_trim_fields?: boolean + ingest_pipeline: IngestPipelineConfig } -export interface StopDatafeedRequest extends RequestBase { - datafeed_id: Ids - allow_no_match?: boolean - force?: boolean - body?: { - force?: boolean - timeout?: Time - } +export interface TextStructureFindStructureTopHit { + count: long + value: any } -export interface StopDatafeedResponse extends ResponseBase { - stopped: boolean +export interface TransformLatest { + sort: Field + unique_key: Field[] } -export interface StopIlmRequest extends RequestBase { +export interface TransformPivot { + aggregations?: Record + aggs?: Record + group_by: Record + max_page_search_size?: integer } -export interface StopIlmResponse extends AcknowledgedResponseBase { +export interface TransformPivotGroupByContainer { + date_histogram?: AggregationsDateHistogramAggregation + geotile_grid?: AggregationsGeoTileGridAggregation + histogram?: AggregationsHistogramAggregation + terms?: AggregationsTermsAggregation } -export interface StopRollupJobRequest extends RequestBase { - id: Id - timeout?: Time - wait_for_completion?: boolean +export interface TransformRetentionPolicy { + field: Field + max_age: Time } -export interface StopRollupJobResponse extends ResponseBase { - stopped: boolean +export interface TransformRetentionPolicyContainer { + time: TransformRetentionPolicy } -export interface StopSnapshotLifecycleManagementRequest extends RequestBase { +export interface TransformSettings { + dates_as_epoch_millis?: boolean + docs_per_second?: float + max_page_search_size?: integer } -export interface StopSnapshotLifecycleManagementResponse extends AcknowledgedResponseBase { +export interface TransformSyncContainer { + time: TransformTimeSync } -export interface StopTokenFilter extends TokenFilterBase { - ignore_case?: boolean - remove_trailing?: boolean - stopwords: StopWords - stopwords_path?: string +export interface TransformTimeSync { + delay?: Time + field: Field } -export interface StopTransformRequest extends RequestBase { +export interface TransformDeleteTransformRequest extends RequestBase { transform_id: Name - allow_no_match?: boolean force?: boolean - timeout?: Time - wait_for_checkpoint?: boolean - wait_for_completion?: boolean -} - -export interface StopTransformResponse extends AcknowledgedResponseBase { -} - -export interface StopWatcherRequest extends RequestBase { } -export interface StopWatcherResponse extends AcknowledgedResponseBase { +export interface TransformDeleteTransformResponse extends AcknowledgedResponseBase { } -export type StopWords = string | Array - -export interface StoreStats { - size?: string - size_in_bytes: double - reserved_in_bytes: double +export interface TransformGetTransformRequest extends RequestBase { + transform_id?: Name + allow_no_match?: boolean + from?: integer + size?: integer + exclude_generated?: boolean } -export interface StoredRoleTemplate { - template: StoredRoleTemplateId - format?: RoleTemplateFormat +export interface TransformGetTransformResponse { + count: long + transforms: Transform[] } -export interface StoredRoleTemplateId { - id: string +export interface TransformGetTransformStatsCheckpointStats { + checkpoint: long + checkpoint_progress?: TransformGetTransformStatsTransformProgress + timestamp?: DateString + timestamp_millis: EpochMillis + time_upper_bound?: DateString + time_upper_bound_millis?: EpochMillis } -export interface StoredScript { - lang?: string - source: string +export interface TransformGetTransformStatsCheckpointing { + changes_last_detected_at: long + changes_last_detected_at_date_time?: DateString + last: TransformGetTransformStatsCheckpointStats + next?: TransformGetTransformStatsCheckpointStats + operations_behind?: long } -export type StringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' - -export interface StringFielddata { - format: StringFielddataFormat +export interface TransformGetTransformStatsRequest extends RequestBase { + transform_id: Name + allow_no_match?: boolean + from?: long + size?: long } -export type StringFielddataFormat = 'paged_bytes' | 'disabled' - -export interface StringStatsAggregate extends AggregateBase { +export interface TransformGetTransformStatsResponse { count: long - min_length: integer - max_length: integer - avg_length: double - entropy: double - distribution?: Record + transforms: TransformGetTransformStatsTransformStats[] } -export interface StringStatsAggregation extends MetricAggregationBase { - show_distribution?: boolean +export interface TransformGetTransformStatsTransformIndexerStats { + documents_indexed: long + documents_processed: long + exponential_avg_checkpoint_duration_ms: double + exponential_avg_documents_indexed: double + exponential_avg_documents_processed: double + index_failures: long + index_time_in_ms: long + index_total: long + pages_processed: long + processing_time_in_ms: long + processing_total: long + search_failures: long + search_time_in_ms: long + search_total: long + trigger_count: long } -export interface StupidBackoffSmoothingModel { - discount: double +export interface TransformGetTransformStatsTransformProgress { + docs_indexed: long + docs_processed: long + docs_remaining: long + percent_complete: double + total_docs: long } -export interface Suggest { - length: integer - offset: integer - options: Array> - text: string +export interface TransformGetTransformStatsTransformStats { + checkpointing: TransformGetTransformStatsCheckpointing + id: Id + node?: NodeAttributes + reason?: string + state: string + stats: TransformGetTransformStatsTransformIndexerStats } -export interface SuggestContainer { - completion?: CompletionSuggester - phrase?: PhraseSuggester - prefix?: string - regex?: string - term?: TermSuggester - text?: string +export interface TransformPreviewTransformRequest extends RequestBase { + body?: { + dest?: ReindexDestination + description?: string + frequency?: Time + pivot?: TransformPivot + source?: ReindexSource + settings?: TransformSettings + sync?: TransformSyncContainer + retention_policy?: TransformRetentionPolicyContainer + latest?: TransformLatest + } } -export interface SuggestContext { - name: string - path: Field - type: string +export interface TransformPreviewTransformResponse { + generated_dest_index: IndicesIndexState + preview: TTransform[] } -export interface SuggestContextQuery { - boost?: double - context: Context - neighbours?: Array | Array - precision?: Distance | integer - prefix?: boolean +export interface TransformPutTransformRequest extends TransformPreviewTransformRequest { + transform_id: Id + defer_validation?: boolean } -export interface SuggestFuzziness { - fuzziness: Fuzziness - min_length: integer - prefix_length: integer - transpositions: boolean - unicode_aware: boolean +export interface TransformPutTransformResponse extends AcknowledgedResponseBase { } -export type SuggestMode = 'missing' | 'popular' | 'always' - -export type SuggestOption = CompletionSuggestOption | PhraseSuggestOption | TermSuggestOption - -export type SuggestSort = 'score' | 'frequency' - -export interface SuggesterBase { - field: Field - analyzer?: string - size?: integer +export interface TransformStartTransformRequest extends RequestBase { + transform_id: Name + timeout?: Time } -export type SuggestionName = string - -export interface SumAggregation extends FormatMetricAggregationBase { +export interface TransformStartTransformResponse extends AcknowledgedResponseBase { } -export interface SumBucketAggregation extends PipelineAggregationBase { +export interface TransformStopTransformRequest extends RequestBase { + transform_id: Name + allow_no_match?: boolean + force?: boolean + timeout?: Time + wait_for_checkpoint?: boolean + wait_for_completion?: boolean } -export interface SyncedFlushRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean +export interface TransformStopTransformResponse extends AcknowledgedResponseBase { } -export interface SyncedFlushResponse extends DictionaryResponseBase { - _shards: ShardStatistics +export interface TransformUpdateTransformRequest extends TransformPutTransformRequest { } -export type SynonymFormat = 'solr' | 'wordnet' - -export interface SynonymGraphTokenFilter extends TokenFilterBase { - expand: boolean - format: SynonymFormat - lenient: boolean - synonyms: Array - synonyms_path: string - tokenizer: string - updateable: boolean +export interface TransformUpdateTransformResponse { + create_time: long + description: string + dest: ReindexDestination + frequency: Time + id: Id + pivot: TransformPivot + settings: TransformSettings + source: ReindexSource + sync?: TransformSyncContainer + version: VersionString } -export interface SynonymTokenFilter extends TokenFilterBase { - expand: boolean - format: SynonymFormat - lenient: boolean - synonyms: Array - synonyms_path: string - tokenizer: string - updateable: boolean +export interface WatcherAcknowledgeState { + state: WatcherAcknowledgementOptions + timestamp: DateString } -export interface TDigest { - compression?: integer -} +export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' -export interface TDigestPercentilesAggregate extends AggregateBase { - values: Record +export interface WatcherAction { + action_type?: WatcherActionType + condition?: WatcherConditionContainer + foreach?: string + max_iterations?: integer + name?: Name + throttle_period?: Time + throttle_period_in_millis?: EpochMillis + transform?: TransformContainer + index?: WatcherIndex + logging?: WatcherLogging } -export interface TTestAggregation extends Aggregation { - a?: TestPopulation - b?: TestPopulation - type?: TTestType +export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' + +export interface WatcherActionStatus { + ack: WatcherAcknowledgeState + last_execution?: WatcherExecutionState + last_successful_execution?: WatcherExecutionState + last_throttle?: WatcherThrottleState } -export type TTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' +export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | 'throttled' -export interface TaskExecutingNode { - attributes: Record - host: string - ip: string - name: string - roles: Array - tasks: Record - transport_address: string -} +export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' -export type TaskId = string | integer +export type WatcherActions = Record -export interface TaskInfo { - action: string - cancellable: boolean - children?: Array - description?: string - headers: Record - id: long - node: string - running_time_in_nanos: long - start_time_in_millis: long - status?: TaskStatus - type: string - parent_task_id?: Id +export interface WatcherActivationState { + active: boolean + timestamp: Timestamp } -export interface TaskRetries { - bulk: integer - search: integer +export interface WatcherActivationStatus { + actions: WatcherActions + state: WatcherActivationState + version: VersionNumber } -export interface TaskState { - action: string - cancellable: boolean - description?: string - headers: Record - id: long - node: string - parent_task_id?: TaskId - running_time_in_nanos: long - start_time_in_millis: long - status?: TaskStatus - type: string +export interface WatcherAlwaysCondition { } -export interface TaskStatus { - batches: long - canceled?: string - created: long - deleted: long - noops: long - failures?: Array - requests_per_second: float - retries: TaskRetries - throttled?: Time - throttled_millis: long - throttled_until?: Time - throttled_until_millis: long - timed_out?: boolean - took?: long - total: long - updated: long - version_conflicts: long +export interface WatcherArrayCompareCondition { + array_path: string + comparison: string + path: string + quantifier: WatcherQuantifier + value: any } -export interface TemplateMapping { - aliases: Record - index_patterns: Array - mappings: TypeMapping - order: integer - settings: Record - version?: VersionNumber +export interface WatcherChainInput { + inputs: WatcherInputContainer[] } -export interface TermQuery extends QueryBase { - value?: string | float | boolean +export interface WatcherCompareCondition { + comparison?: string + path?: string + value?: any + 'ctx.payload.match'?: WatcherCompareContextPayloadCondition + 'ctx.payload.value'?: WatcherCompareContextPayloadCondition } -export interface TermSuggestOption { - text: string - freq?: long - score: double +export interface WatcherCompareContextPayloadCondition { + eq?: any + lt?: any + gt?: any + lte?: any + gte?: any } -export interface TermSuggester extends SuggesterBase { - lowercase_terms?: boolean - max_edits?: integer - max_inspections?: integer - max_term_freq?: float - min_doc_freq?: float - min_word_length?: integer - prefix_length?: integer - shard_size?: integer - sort?: SuggestSort - string_distance?: StringDistance - suggest_mode?: SuggestMode - text?: string +export interface WatcherConditionContainer { + always?: WatcherAlwaysCondition + array_compare?: WatcherArrayCompareCondition + compare?: WatcherCompareCondition + never?: WatcherNeverCondition + script?: WatcherScriptCondition } -export interface TermUserPrivileges { - apps: boolean -} +export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' -export interface TermVector { - field_statistics: FieldStatistics - terms: Record +export type WatcherConnectionScheme = 'http' | 'https' + +export interface WatcherCronExpression extends WatcherScheduleBase { } -export interface TermVectorFilter { - max_doc_freq?: integer - max_num_terms?: integer - max_term_freq?: integer - max_word_length?: integer - min_doc_freq?: integer - min_term_freq?: integer - min_word_length?: integer +export interface WatcherDailySchedule { + at: string[] | WatcherTimeOfDay } -export type TermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' +export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' -export interface TermVectorTerm { - doc_freq?: integer - score?: double - term_freq: integer - tokens: Array - ttf?: integer +export interface WatcherEmailResult { + account?: string + message: WatcherEmailResult + reason?: string } -export interface TermVectorsRequest extends RequestBase { - index: IndexName - id?: Id - type?: Type - fields?: Fields - field_statistics?: boolean - offsets?: boolean - payloads?: boolean - positions?: boolean - preference?: string - realtime?: boolean - routing?: Routing - term_statistics?: boolean - version?: VersionNumber - version_type?: VersionType - body?: { - doc?: TDocument - filter?: TermVectorFilter - per_field_analyzer?: Record - } -} +export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished' -export interface TermVectorsResponse extends ResponseBase { - found: boolean - _id: Id - _index: IndexName - term_vectors?: Record - took: long - _type?: Type - _version: VersionNumber +export interface WatcherExecutionResult { + actions: WatcherExecutionResultAction[] + condition: WatcherExecutionResultCondition + execution_duration: integer + execution_time: DateString + input: WatcherExecutionResultInput } -export interface TermVectorsResult { - found: boolean +export interface WatcherExecutionResultAction { + email?: WatcherEmailResult id: Id - index: IndexName - term_vectors: Record - took: long - version: VersionNumber + index?: WatcherIndexResult + logging?: WatcherLoggingResult + pagerduty?: WatcherPagerDutyResult + reason?: string + slack?: WatcherSlackResult + status: WatcherActionStatusOptions + type: WatcherActionType + webhook?: WatcherWebhookResult } -export interface TermsAggregate extends MultiBucketAggregate { - doc_count_error_upper_bound: long - sum_other_doc_count: long +export interface WatcherExecutionResultCondition { + met: boolean + status: WatcherActionStatusOptions + type: WatcherConditionType } -export interface TermsAggregation extends BucketAggregationBase { - collect_mode?: TermsAggregationCollectMode - exclude?: string | Array - execution_hint?: TermsAggregationExecutionHint - field?: Field - include?: string | Array | TermsInclude - min_doc_count?: integer - missing?: Missing - missing_bucket?: boolean - value_type?: string - order?: TermsAggregationOrder - script?: Script - shard_size?: integer - show_term_doc_count_error?: boolean - size?: integer +export interface WatcherExecutionResultInput { + payload: Record + status: WatcherActionStatusOptions + type: WatcherInputType } -export type TermsAggregationCollectMode = 'depth_first' | 'breadth_first' - -export type TermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' - -export type TermsAggregationOrder = SortOrder | Record | Array> - -export interface TermsInclude { - num_partitions: long - partition: long +export interface WatcherExecutionState { + successful: boolean + timestamp: DateString } -export interface TermsQuery extends QueryBase { - terms?: Array - index?: IndexName - id?: Id - path?: string - routing?: Routing -} +export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' -export interface TermsRollupGrouping { - fields: Fields +export interface WatcherExecutionThreadPool { + max_size: long + queue_size: long } -export interface TermsSetQuery extends QueryBase { - minimum_should_match_field?: Field - minimum_should_match_script?: Script - terms?: Array +export interface WatcherHourlySchedule { + minute: integer[] } -export interface TestPopulation { - field: Field - script?: Script - filter?: QueryContainer +export interface WatcherHttpInput { + http?: WatcherHttpInput + extract?: string[] + request?: WatcherHttpInputRequestDefinition + response_content_type?: WatcherResponseContentType } -export interface TextIndexPrefixes { - max_chars: integer - min_chars: integer +export interface WatcherHttpInputAuthentication { + basic: WatcherHttpInputBasicAuthentication } -export interface TextProperty extends CorePropertyBase { - analyzer?: string - boost?: double - eager_global_ordinals?: boolean - fielddata?: boolean - fielddata_frequency_filter?: FielddataFrequencyFilter - index?: boolean - index_options?: IndexOptions - index_phrases?: boolean - index_prefixes?: TextIndexPrefixes - norms?: boolean - position_increment_gap?: integer - search_analyzer?: string - search_quote_analyzer?: string - term_vector?: TermVectorOption - type: 'text' +export interface WatcherHttpInputBasicAuthentication { + password: Password + username: Username } -export type TextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' +export type WatcherHttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete' -export type TextToAnalyze = string | Array - -export interface ThreadCountStats { - active: long - completed: long - largest: long - queue: long - rejected: long - threads: long +export interface WatcherHttpInputProxy { + host: Host + port: uint } -export interface ThreadStats { - count: long - peak_count: long +export interface WatcherHttpInputRequestDefinition { + auth?: WatcherHttpInputAuthentication + body?: string + connection_timeout?: Time + headers?: Record + host?: Host + method?: WatcherHttpInputMethod + params?: Record + path?: string + port?: uint + proxy?: WatcherHttpInputProxy + read_timeout?: Time + scheme?: WatcherConnectionScheme + url?: string } -export type ThreadType = 'cpu' | 'wait' | 'block' - -export interface ThreeDimensionalPoint { - lat: double - lon: double - z?: double +export interface WatcherHttpInputRequestResult extends WatcherHttpInputRequestDefinition { } -export interface ThrottleState { - reason: string - timestamp: DateString +export interface WatcherHttpInputResponseResult { + body: string + headers: HttpHeaders + status: integer } -export type Time = string | integer - -export interface TimeOfDay { - hour: Array - minute: Array +export interface WatcherIndex { + index: IndexName + doc_id?: Id } -export interface TimeOfMonth { - at: Array - on: Array +export interface WatcherIndexResult { + response: WatcherIndexResultSummary } -export interface TimeOfWeek { - at: Array - on: Array +export interface WatcherIndexResultSummary { + created: boolean + id: Id + index: IndexName + result: Result + version: VersionNumber + type?: Type } -export interface TimeOfYear { - at: Array - int: Array - on: Array +export interface WatcherIndicesOptions { + allow_no_indices: boolean + expand_wildcards: ExpandWildcards + ignore_unavailable: boolean + ignore_throttled?: boolean } -export type TimeSpan = string +export interface WatcherInputContainer { + chain?: WatcherChainInput + http?: WatcherHttpInput + search?: WatcherSearchInput + simple?: Record +} -export type Timestamp = string +export type WatcherInputType = 'http' | 'search' | 'simple' -export interface TimingStats { - average_bucket_processing_time_ms?: double - bucket_count: long - exponential_average_bucket_processing_time_ms?: double - exponential_average_bucket_processing_time_per_hour_ms: double - job_id: Id - total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms?: double - minimum_bucket_processing_time_ms?: double +export interface WatcherLogging { + level: string + text: string } -export interface Token { - end_offset?: integer - payload?: string - position: integer - start_offset?: integer +export interface WatcherLoggingResult { + logged_text: string } -export type TokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' +export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' -export interface TokenCountProperty extends DocValuesPropertyBase { - analyzer?: string - boost?: double - index?: boolean - null_value?: double - enable_position_increments?: boolean - type: 'token_count' +export interface WatcherNeverCondition { } -export interface TokenDetail { - name: string - tokens: Array +export interface WatcherPagerDutyActionEventResult { + event: WatcherPagerDutyEvent + reason: string + request: WatcherHttpInputRequestResult + response: WatcherHttpInputResponseResult } -export type TokenFilter = AsciiFoldingTokenFilter | CommonGramsTokenFilter | ConditionTokenFilter | DelimitedPayloadTokenFilter | EdgeNGramTokenFilter | ElisionTokenFilter | FingerprintTokenFilter | HunspellTokenFilter | HyphenationDecompounderTokenFilter | KeepTypesTokenFilter | KeepWordsTokenFilter | KeywordMarkerTokenFilter | KStemTokenFilter | LengthTokenFilter | LimitTokenCountTokenFilter | LowercaseTokenFilter | MultiplexerTokenFilter | NGramTokenFilter | NoriPartOfSpeechTokenFilter | PatternCaptureTokenFilter | PatternReplaceTokenFilter | PorterStemTokenFilter | PredicateTokenFilter | RemoveDuplicatesTokenFilter | ReverseTokenFilter | ShingleTokenFilter | SnowballTokenFilter | StemmerOverrideTokenFilter | StemmerTokenFilter | StopTokenFilter | SynonymGraphTokenFilter | SynonymTokenFilter | TrimTokenFilter | TruncateTokenFilter | UniqueTokenFilter | UppercaseTokenFilter | WordDelimiterGraphTokenFilter | WordDelimiterTokenFilter - -export interface TokenFilterBase { - type: string - version?: VersionString +export interface WatcherPagerDutyContext { + href: string + src: string + type: WatcherPagerDutyContextType } -export type Tokenizer = CharGroupTokenizer | EdgeNGramTokenizer | KeywordTokenizer | LetterTokenizer | LowercaseTokenizer | NGramTokenizer | NoriTokenizer | PathHierarchyTokenizer | StandardTokenizer | UaxEmailUrlTokenizer | WhitespaceTokenizer +export type WatcherPagerDutyContextType = 'link' | 'image' -export interface TokenizerBase { - type: string - version?: VersionString +export interface WatcherPagerDutyEvent { + account: string + attach_payload: boolean + client: string + client_url: string + context: WatcherPagerDutyContext[] + description: string + event_type: WatcherPagerDutyEventType + incident_key: string } -export interface TopHit { - count: long - value: any -} +export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' -export interface TopHitsAggregate extends AggregateBase { - hits: HitsMetadata> +export interface WatcherPagerDutyResult { + sent_event: WatcherPagerDutyActionEventResult } -export interface TopHitsAggregation extends MetricAggregationBase { - docvalue_fields?: Fields - explain?: boolean - from?: integer - highlight?: Highlight - script_fields?: Record - size?: integer - sort?: Sort - _source?: boolean | SourceFilter | Fields - stored_fields?: Fields - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean -} +export type WatcherQuantifier = 'some' | 'all' -export interface TopMetrics { - sort: Array - metrics: Record +export type WatcherResponseContentType = 'json' | 'yaml' | 'text' + +export interface WatcherScheduleBase { } -export interface TopMetricsAggregate extends AggregateBase { - top: Array +export interface WatcherScheduleContainer { + cron?: WatcherCronExpression + daily?: WatcherDailySchedule + hourly?: WatcherHourlySchedule + interval?: Time + monthly?: WatcherTimeOfMonth[] + weekly?: WatcherTimeOfWeek[] + yearly?: WatcherTimeOfYear[] } -export interface TopMetricsAggregation extends MetricAggregationBase { - metrics?: TopMetricsValue | Array - size?: integer - sort?: Sort +export interface WatcherScheduleTriggerEvent { + scheduled_time: DateString | string + triggered_time?: DateString | string } -export interface TopMetricsValue { - field: Field +export interface WatcherScriptCondition { + lang: string + params?: Record + source: string } -export interface TotalFileSystemStats { - available: string - available_in_bytes: long - free: string - free_in_bytes: long - total: string - total_in_bytes: long +export interface WatcherSearchInput { + extract?: string[] + request: WatcherSearchInputRequestDefinition + timeout?: Time } -export interface TotalHits { - relation: TotalHitsRelation - value: long +export interface WatcherSearchInputRequestBody { + query: QueryDslQueryContainer } -export type TotalHitsRelation = 'eq' | 'gte' +export interface WatcherSearchInputRequestDefinition { + body?: WatcherSearchInputRequestBody + indices?: IndexName[] + indices_options?: WatcherIndicesOptions + search_type?: SearchType + template?: SearchTemplateRequest + rest_total_hits_as_int?: boolean +} -export interface Transform { +export interface WatcherSimulatedActions { + actions: string[] + all: WatcherSimulatedActions + use_all: boolean } -export interface TransformCheckpointStats { - checkpoint: long - checkpoint_progress?: TransformProgress - timestamp?: DateString - timestamp_millis: EpochMillis - time_upper_bound?: DateString - time_upper_bound_millis?: EpochMillis +export interface WatcherSlackAttachment { + author_icon?: string + author_link?: string + author_name: string + color?: string + fallback?: string + fields?: WatcherSlackAttachmentField[] + footer?: string + footer_icon?: string + image_url?: string + pretext?: string + text?: string + thumb_url?: string + title: string + title_link?: string + ts?: DateString } -export interface TransformCheckpointingInfo { - changes_last_detected_at: long - changes_last_detected_at_date_time?: DateString - last: TransformCheckpointStats - next?: TransformCheckpointStats - operations_behind?: long +export interface WatcherSlackAttachmentField { + short: boolean + title: string + value: string } -export interface TransformContainer { - chain: ChainTransform - script: ScriptTransform - search: SearchTransform +export interface WatcherSlackDynamicAttachment { + attachment_template: WatcherSlackAttachment + list_path: string } -export interface TransformDestination { - index: IndexName - pipeline?: string +export interface WatcherSlackMessage { + attachments: WatcherSlackAttachment[] + dynamic_attachments?: WatcherSlackDynamicAttachment + from: string + icon?: string + text: string + to: string[] } -export interface TransformIndexerStats { - documents_indexed: long - documents_processed: long - exponential_avg_checkpoint_duration_ms: double - exponential_avg_documents_indexed: double - exponential_avg_documents_processed: double - index_failures: long - index_time_in_ms: long - index_total: long - pages_processed: long - processing_time_in_ms: long - processing_total: long - search_failures: long - search_time_in_ms: long - search_total: long - trigger_count: long +export interface WatcherSlackResult { + account?: string + message: WatcherSlackMessage } -export interface TransformPivot { - aggregations: Record - group_by: Record - max_page_search_size?: integer +export interface WatcherThrottleState { + reason: string + timestamp: DateString } -export interface TransformProgress { - docs_indexed: long - docs_processed: long - docs_remaining: long - percent_complete: double - total_docs: long +export interface WatcherTimeOfDay { + hour: integer[] + minute: integer[] } -export interface TransformSource { - index: Indices - query: QueryContainer +export interface WatcherTimeOfMonth { + at: string[] + on: integer[] } -export interface TransformStats { - checkpointing: TransformCheckpointingInfo - id: Id - node?: NodeAttributes - reason?: string - state: string - stats: TransformIndexerStats +export interface WatcherTimeOfWeek { + at: string[] + on: WatcherDay[] } -export interface TransformSyncContainer { - time: TransformTimeSync +export interface WatcherTimeOfYear { + at: string[] + int: WatcherMonth[] + on: integer[] } -export interface TransformTimeSync { - delay: Time - field: Field +export interface WatcherTriggerContainer { + schedule: WatcherScheduleContainer } -export interface TransientMetadata { - enabled: boolean +export interface WatcherTriggerEventContainer { + schedule: WatcherScheduleTriggerEvent } -export interface TranslateSqlRequest extends RequestBase { - body: { - fetch_size?: integer - filter?: QueryContainer - query?: string - time_zone?: string - } +export interface WatcherTriggerEventResult { + manual: WatcherTriggerEventContainer + triggered_time: DateString + type: string } -export interface TranslateSqlResponse extends ResponseBase { - size: long - _source: boolean | Fields | SourceFilter - fields: Array> - sort: Sort +export interface WatcherWatch { + actions: Record + condition: WatcherConditionContainer + input: WatcherInputContainer + metadata?: Metadata + status?: WatcherWatchStatus + throttle_period?: string + transform?: TransformContainer + trigger: WatcherTriggerContainer + throttle_period_in_millis?: long } -export interface TranslogStats { - earliest_last_modified_age: long - operations: long - size?: string - size_in_bytes: long - uncommitted_operations: integer - uncommitted_size?: string - uncommitted_size_in_bytes: long +export interface WatcherWatchStatus { + actions: WatcherActions + last_checked?: DateString + last_met_condition?: DateString + state: WatcherActivationState + version: VersionNumber + execution_state?: string } -export interface TransportStats { - rx_count: long - rx_size: string - rx_size_in_bytes: long - server_open: integer - tx_count: long - tx_size: string - tx_size_in_bytes: long +export interface WatcherWebhookResult { + request: WatcherHttpInputRequestResult + response?: WatcherHttpInputResponseResult } -export interface TriggerContainer { - schedule: ScheduleContainer +export interface WatcherAckWatchRequest extends RequestBase { + watch_id: Name + action_id?: Names } -export interface TriggerEventContainer { - schedule: ScheduleTriggerEvent +export interface WatcherAckWatchResponse { + status: WatcherWatchStatus } -export interface TriggerEventResult { - manual: TriggerEventContainer - triggered_time: DateString - type: string +export interface WatcherActivateWatchRequest extends RequestBase { + watch_id: Name } -export interface TrimProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field +export interface WatcherActivateWatchResponse { + status: WatcherActivationStatus } -export interface TrimTokenFilter extends TokenFilterBase { +export interface WatcherDeactivateWatchRequest extends RequestBase { + watch_id: Name } -export interface TruncateTokenFilter extends TokenFilterBase { - length: integer +export interface WatcherDeactivateWatchResponse { + status: WatcherActivationStatus } -export interface TwoDimensionalPoint { - lat: double - lon: double +export interface WatcherDeleteWatchRequest extends RequestBase { + id: Name } -export type Type = string +export interface WatcherDeleteWatchResponse { + found: boolean + _id: Id + _version: VersionNumber +} -export interface TypeExistsRequest extends RequestBase { - index: Indices - type: Types - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean +export interface WatcherExecuteWatchRequest extends RequestBase { + id?: Id + debug?: boolean + body?: { + action_modes?: Record + alternative_input?: Record + ignore_condition?: boolean + record_execution?: boolean + simulated_actions?: WatcherSimulatedActions + trigger_data?: WatcherScheduleTriggerEvent + watch?: WatcherWatch + } } -export type TypeExistsResponse = boolean - -export interface TypeFieldMappings { - mappings: Record +export interface WatcherExecuteWatchResponse { + _id: Id + watch_record: WatcherExecuteWatchWatchRecord } -export interface TypeMapping { - all_field?: AllField - date_detection?: boolean - dynamic?: boolean | DynamicMapping - dynamic_date_formats?: Array - dynamic_templates?: Record | Array> - _field_names?: FieldNamesField - index_field?: IndexField - _meta?: Record - numeric_detection?: boolean - properties?: Record - _routing?: RoutingField - _size?: SizeField - _source?: SourceField - runtime?: Record +export interface WatcherExecuteWatchWatchRecord { + condition: WatcherConditionContainer + input: WatcherInputContainer + messages: string[] + metadata: Metadata + node: string + result: WatcherExecutionResult + state: WatcherExecutionStatus + trigger_event: WatcherTriggerEventResult + user: Username + watch_id: Id } -export interface TypeQuery extends QueryBase { - value: string +export interface WatcherGetWatchRequest extends RequestBase { + id: Name } -export type Types = Type | Array - -export interface UaxEmailUrlTokenizer extends TokenizerBase { - max_token_length: integer +export interface WatcherGetWatchResponse { + found: boolean + _id: Id + status?: WatcherWatchStatus + watch?: WatcherWatch + _primary_term?: integer + _seq_no?: SequenceNumber + _version?: VersionNumber } -export interface UnassignedInformation { - at: DateString - last_allocation_status: string - reason: UnassignedInformationReason - details?: string - failed_allocation_attempts?: integer +export interface WatcherPutWatchRequest extends RequestBase { + id: Id + active?: boolean + if_primary_term?: long + if_sequence_number?: long + version?: VersionNumber + body?: { + actions?: Record + condition?: WatcherConditionContainer + input?: WatcherInputContainer + metadata?: Metadata + throttle_period?: string + transform?: TransformContainer + trigger?: WatcherTriggerContainer + } } -export type UnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' - -export interface UnfollowIndexRequest extends RequestBase { - index: IndexName +export interface WatcherPutWatchResponse { + created: boolean + _id: Id + _primary_term: long + _seq_no: SequenceNumber + _version: VersionNumber } -export interface UnfollowIndexResponse extends AcknowledgedResponseBase { +export interface WatcherQueryWatchesRequest extends RequestBase { + stub_a: string + stub_b: string + body?: { + stub_c: string + } } -export interface UnfreezeIndexRequest extends RequestBase { - index: IndexName - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: string +export interface WatcherQueryWatchesResponse { + stub: integer } -export interface UnfreezeIndexResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean +export interface WatcherStartRequest extends RequestBase { } -export interface UniqueTokenFilter extends TokenFilterBase { - only_on_same_position: boolean +export interface WatcherStartResponse extends AcknowledgedResponseBase { } -export interface UpdateByQueryRequest extends RequestBase { - index: Indices - type?: Types - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - conflicts?: Conflicts - default_operator?: DefaultOperator - df?: string - expand_wildcards?: ExpandWildcards - from?: long - ignore_unavailable?: boolean - lenient?: boolean - pipeline?: string - preference?: string - query_on_query_string?: string - refresh?: boolean - request_cache?: boolean - requests_per_second?: long - routing?: Routing - scroll?: Time - scroll_size?: long - search_timeout?: Time - search_type?: SearchType - size?: long - slices?: long - sort?: Array - source_enabled?: boolean - source_excludes?: Fields - source_includes?: Fields - stats?: Array - terminate_after?: long - timeout?: Time - version?: boolean - version_type?: boolean - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - body?: { - max_docs?: long - query?: QueryContainer - script?: Script - slice?: SlicedScroll - conflicts?: Conflicts - } +export interface WatcherStatsRequest extends RequestBase { + metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] + emit_stacktraces?: boolean } -export interface UpdateByQueryResponse extends ResponseBase { - batches?: long - failures?: Array - noops?: long - deleted?: long - requests_per_second?: float - retries?: Retries - task?: TaskId - timed_out?: boolean - took?: long - total?: long - updated?: long - version_conflicts?: long - throttled_millis?: ulong - throttled_until_millis?: ulong +export interface WatcherStatsResponse { + cluster_name: Name + manually_stopped: boolean + stats: WatcherStatsWatcherNodeStats[] + _nodes: NodeStatistics } -export interface UpdateByQueryRethrottleNode { - attributes: Record - host: string - transport_address: string - ip: string - name: Name - roles: Array - tasks: Record +export interface WatcherStatsWatchRecordQueuedStats { + execution_time: DateString } -export interface UpdateByQueryRethrottleRequest extends RequestBase { - task_id: Id - requests_per_second?: long +export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { + execution_phase: WatcherExecutionPhase + triggered_time: DateString + executed_actions?: string[] + watch_id: Id + watch_record_id: Id } -export interface UpdateByQueryRethrottleResponse extends ResponseBase { - nodes: Record -} +export type WatcherStatsWatcherMetric = '_all' | 'queued_watches' | 'current_watches' | 'pending_watches' -export interface UpdateDatafeedRequest extends RequestBase { - datafeed_id: Id - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - body: { - aggregations?: Record - chunking_config?: ChunkingConfig - delayed_data_check_config?: DelayedDataCheckConfig - frequency?: Time - indexes?: Indices - indices?: Indices - indices_options?: DatafeedIndicesOptions - job_id?: Id - max_empty_searches?: integer - query?: QueryContainer - query_delay?: Time - script_fields?: Record - scroll_size?: integer - } +export interface WatcherStatsWatcherNodeStats { + current_watches?: WatcherStatsWatchRecordStats[] + execution_thread_pool: WatcherExecutionThreadPool + queued_watches?: WatcherStatsWatchRecordQueuedStats[] + watch_count: long + watcher_state: WatcherStatsWatcherState + node_id: Id } -export interface UpdateDatafeedResponse extends ResponseBase { - aggregations?: Record - chunking_config?: ChunkingConfig - datafeed_id: Id - frequency?: Time - indices: Indices - job_id: string - max_empty_searches?: integer - query: QueryContainer - query_delay: Time - script_fields?: Record - scroll_size: integer - indices_options: DatafeedIndicesOptions - delayed_data_check_config: DelayedDataCheckConfig -} +export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' -export interface UpdateFilterRequest extends RequestBase { - filter_id: Id - body: { - add_items?: Array - description?: string - remove_items?: Array - } +export interface WatcherStopRequest extends RequestBase { } -export interface UpdateFilterResponse extends ResponseBase { - description: string - filter_id: string - items: Array +export interface WatcherStopResponse extends AcknowledgedResponseBase { } -export interface UpdateIndexSettingsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - master_timeout?: Time - preserve_existing?: boolean - timeout?: Time - body: { - index?: Record - refresh_interval?: Time - number_of_replicas?: integer - } +export interface XpackInfoBuildInformation { + date: DateString + hash: string } -export interface UpdateIndexSettingsResponse extends AcknowledgedResponseBase { +export interface XpackInfoFeature { + available: boolean + description?: string + enabled: boolean + native_code_info?: XpackInfoNativeCodeInformation +} + +export interface XpackInfoFeatures { + aggregate_metric: XpackInfoFeature + analytics: XpackInfoFeature + ccr: XpackInfoFeature + data_frame?: XpackInfoFeature + data_science?: XpackInfoFeature + data_streams: XpackInfoFeature + data_tiers: XpackInfoFeature + enrich: XpackInfoFeature + eql: XpackInfoFeature + flattened?: XpackInfoFeature + frozen_indices: XpackInfoFeature + graph: XpackInfoFeature + ilm: XpackInfoFeature + logstash: XpackInfoFeature + ml: XpackInfoFeature + monitoring: XpackInfoFeature + rollup: XpackInfoFeature + runtime_fields?: XpackInfoFeature + searchable_snapshots: XpackInfoFeature + security: XpackInfoFeature + slm: XpackInfoFeature + spatial: XpackInfoFeature + sql: XpackInfoFeature + transform: XpackInfoFeature + vectors: XpackInfoFeature + voting_only: XpackInfoFeature + watcher: XpackInfoFeature +} + +export interface XpackInfoMinimalLicenseInformation { + expiry_date_in_millis: EpochMillis + mode: LicenseLicenseType + status: LicenseLicenseStatus + type: LicenseLicenseType + uid: string } -export interface UpdateJobRequest extends RequestBase { - job_id: Id - body: { - allow_lazy_open?: boolean - analysis_limits?: AnalysisMemoryLimit - background_persist_interval?: Time - custom_settings?: Record - description?: string - model_plot_config?: ModelPlotConfigEnabled - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_retention_days?: long - groups?: Array - } +export interface XpackInfoNativeCodeInformation { + build_hash: string + version: VersionString } -export interface UpdateJobResponse extends ResponseBase { +export interface XpackInfoRequest extends RequestBase { + categories?: string[] } -export interface UpdateModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - body: { - description?: string - retain?: boolean - } +export interface XpackInfoResponse { + build: XpackInfoBuildInformation + features: XpackInfoFeatures + license: XpackInfoMinimalLicenseInformation + tagline: string } -export interface UpdateModelSnapshotResponse extends AcknowledgedResponseBase { - model: ModelSnapshot +export interface XpackUsageAnalytics extends XpackUsageBase { + stats: XpackUsageAnalyticsStatistics } -export interface UpdateRequest extends RequestBase { - id: Id - index: IndexName - type?: Type - if_primary_term?: long - if_seq_no?: SequenceNumber - lang?: string - refresh?: Refresh - require_alias?: boolean - retry_on_conflict?: long - routing?: Routing - source_enabled?: boolean - timeout?: Time - wait_for_active_shards?: WaitForActiveShards - _source?: boolean | string | Array - _source_excludes?: Fields - _source_includes?: Fields - body: { - detect_noop?: boolean - doc?: TPartialDocument - doc_as_upsert?: boolean - script?: Script - scripted_upsert?: boolean - _source?: boolean | SourceFilter - upsert?: TDocument - } +export interface XpackUsageAnalyticsStatistics { + boxplot_usage: long + cumulative_cardinality_usage: long + string_stats_usage: long + top_metrics_usage: long + t_test_usage: long + moving_percentiles_usage: long + normalize_usage: long + rate_usage: long + multi_terms_usage?: long } -export interface UpdateResponse extends WriteResponseBase { - get?: InlineGet +export interface XpackUsageAudit extends XpackUsageFeatureToggle { + outputs?: string[] } -export interface UpdateTransformRequest extends RequestBase { - transform_id: Name - defer_validation?: boolean - body: { - description?: string - dest?: TransformDestination - frequency?: Time - source?: TransformSource - sync?: TransformSyncContainer - } +export interface XpackUsageBase { + available: boolean + enabled: boolean } -export interface UpdateTransformResponse extends ResponseBase { - create_time: long - create_time_date_time: DateString - description: string - dest: TransformDestination - frequency: Time - id: Id - pivot: TransformPivot - source: TransformSource - sync: TransformSyncContainer - version: VersionString +export interface XpackUsageBaseUrlConfig { + url_name: string + url_value: string } -export interface UppercaseProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field +export interface XpackUsageCcr extends XpackUsageBase { + auto_follow_patterns_count: integer + follower_indices_count: integer } -export interface UppercaseTokenFilter extends TokenFilterBase { +export interface XpackUsageCounter { + active: long + total: long } -export type Uri = string +export interface XpackUsageCustomSettings { + custom_urls?: XpackUsageUrlConfig[] + created_by?: string + job_tags?: Record +} -export type UrlConfig = BaseUrlConfig | KibanaUrlConfig +export interface XpackUsageDataStreams extends XpackUsageBase { + data_streams: long + indices_count: long +} -export interface UrlDecodeProcessor extends ProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field +export interface XpackUsageDataTierPhaseStatistics { + node_count: long + index_count: long + total_shard_count: long + primary_shard_count: long + doc_count: long + total_size_bytes: long + primary_size_bytes: long + primary_shard_size_avg_bytes: long + primary_shard_size_median_bytes: long + primary_shard_size_mad_bytes: long } -export interface UsageCount { - active: long - total: long +export interface XpackUsageDataTiers extends XpackUsageBase { + data_warm: XpackUsageDataTierPhaseStatistics + data_frozen?: XpackUsageDataTierPhaseStatistics + data_cold: XpackUsageDataTierPhaseStatistics + data_content: XpackUsageDataTierPhaseStatistics + data_hot: XpackUsageDataTierPhaseStatistics } -export interface UserAgentProcessor extends ProcessorBase { - field: Field - ignore_missing: boolean - options: Array - regex_file: string - target_field: Field +export interface XpackUsageDatafeed { + count: long } -export type UserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' - -export interface UserIndicesPrivileges { - field_security?: FieldSecuritySettings - names: Array - privileges: Array - query?: QueryUserPrivileges - allow_restricted_indices: boolean +export interface XpackUsageEql extends XpackUsageBase { + features: XpackUsageEqlFeatures + queries: Record } -export interface UserRealm { - name: string - type: string +export interface XpackUsageEqlFeatures { + join: uint + joins: XpackUsageEqlFeaturesJoin + keys: XpackUsageEqlFeaturesKeys + event: uint + pipes: XpackUsageEqlFeaturesPipes + sequence: uint + sequences: XpackUsageEqlFeaturesSequences } -export type Uuid = string - -export interface ValidateDetectorRequest extends RequestBase { - body: Detector +export interface XpackUsageEqlFeaturesJoin { + join_queries_two: uint + join_queries_three: uint + join_until: uint + join_queries_five_or_more: uint + join_queries_four: uint } -export interface ValidateDetectorResponse extends AcknowledgedResponseBase { +export interface XpackUsageEqlFeaturesKeys { + join_keys_two: uint + join_keys_one: uint + join_keys_three: uint + join_keys_five_or_more: uint + join_keys_four: uint } -export interface ValidateJobRequest extends RequestBase { - body: { - job_id?: Id - analysis_config?: AnalysisConfig - analysis_limits?: AnalysisLimits - data_description?: DataDescription - description?: string - model_plot?: ModelPlotConfig - model_snapshot_retention_days?: long - results_index_name?: IndexName - } +export interface XpackUsageEqlFeaturesPipes { + pipe_tail: uint + pipe_head: uint } -export interface ValidateJobResponse extends AcknowledgedResponseBase { +export interface XpackUsageEqlFeaturesSequences { + sequence_queries_three: uint + sequence_queries_four: uint + sequence_queries_two: uint + sequence_until: uint + sequence_queries_five_or_more: uint + sequence_maxspan: uint } -export interface ValidateQueryRequest extends RequestBase { - index?: Indices - type?: Types - allow_no_indices?: boolean - all_shards?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: DefaultOperator - df?: string - expand_wildcards?: ExpandWildcards - explain?: boolean - ignore_unavailable?: boolean - lenient?: boolean - query_on_query_string?: string - rewrite?: boolean - q?: string - body?: { - query?: QueryContainer - } +export interface XpackUsageFeatureToggle { + enabled: boolean } -export interface ValidateQueryResponse extends ResponseBase { - explanations?: Array - _shards?: ShardStatistics - valid: boolean - error?: string +export interface XpackUsageFlattened extends XpackUsageBase { + field_count: integer } -export interface ValidationExplanation { - error?: string - explanation?: string - index: IndexName - valid: boolean +export interface XpackUsageFrozenIndices extends XpackUsageBase { + indices_count: long } -export interface ValueAggregate extends AggregateBase { - value: double - value_as_string?: string +export interface XpackUsageIlm { + policy_count: integer + policy_stats: XpackUsageIlmPolicyStatistics[] } -export interface ValueCountAggregation extends FormattableMetricAggregation { +export interface XpackUsageIlmPolicyStatistics { + indices_managed: integer + phases: IlmPhases } -export type ValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' - -export interface VariableWidthHistogramAggregation { - field?: Field - buckets?: integer - shard_size?: integer - initial_buffer?: integer +export interface XpackUsageIpFilter { + http: boolean + transport: boolean } -export interface VectorUsage extends XPackUsage { - dense_vector_dims_avg_count: integer - dense_vector_fields_count: integer - sparse_vector_fields_count?: integer +export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { + time_range?: string } -export interface VerifyRepositoryRequest extends RequestBase { - repository: Name - master_timeout?: Time - timeout?: Time +export interface XpackUsageMachineLearning extends XpackUsageBase { + datafeeds: Record + jobs: Record + node_count: integer + data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs + inference: XpackUsageMlInference } -export interface VerifyRepositoryResponse extends ResponseBase { - nodes: Record +export interface XpackUsageMlCounter { + count: long } -export type VersionNumber = long - -export interface VersionProperty extends DocValuesPropertyBase { - type: 'version' +export interface XpackUsageMlDataFrameAnalyticsJobs { + memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory + _all: XpackUsageMlDataFrameAnalyticsJobsCount + analysis_counts?: EmptyObject } -export type VersionString = string - -export type VersionType = 'internal' | 'external' | 'external_gte' | 'force' - -export type WaitForActiveShardOptions = 'all' - -export type WaitForActiveShards = integer | WaitForActiveShardOptions - -export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' - -export type WaitForStatus = 'green' | 'yellow' | 'red' - -export interface WarmerStats { - current: long - total: long - total_time?: string - total_time_in_millis: long +export interface XpackUsageMlDataFrameAnalyticsJobsCount { + count: long } -export interface Watch { - actions: Record - condition: ConditionContainer - input: InputContainer - metadata?: Record - status?: WatchStatus - throttle_period?: string - transform?: TransformContainer - trigger: TriggerContainer +export interface XpackUsageMlDataFrameAnalyticsJobsMemory { + peak_usage_bytes: MlJobStatistics } -export interface WatchRecord { - condition: ConditionContainer - input: InputContainer - messages: Array - metadata: Record - node: string - result: ExecutionResult - state: ActionExecutionState - trigger_event: TriggerEventResult - user: string - watch_id: Id +export interface XpackUsageMlInference { + ingest_processors: Record + trained_models: XpackUsageMlInferenceTrainedModels } -export interface WatchRecordQueuedStats { - execution_time: DateString +export interface XpackUsageMlInferenceIngestProcessor { + num_docs_processed: XpackUsageMlInferenceIngestProcessorCount + pipelines: XpackUsageMlCounter + num_failures: XpackUsageMlInferenceIngestProcessorCount + time_ms: XpackUsageMlInferenceIngestProcessorCount } -export interface WatchRecordStats extends WatchRecordQueuedStats { - execution_phase: ExecutionPhase - triggered_time: DateString - executed_actions?: Array - watch_id: Id - watch_record_id: Id +export interface XpackUsageMlInferenceIngestProcessorCount { + max: long + sum: long + min: long } -export interface WatchStatus { - actions: Record - last_checked?: DateString - last_met_condition?: DateString - state: ActivationState - version: VersionNumber - execution_state?: string +export interface XpackUsageMlInferenceTrainedModels { + estimated_operations?: MlJobStatistics + estimated_heap_memory_usage_bytes?: MlJobStatistics + count?: XpackUsageMlInferenceTrainedModelsCount + _all: XpackUsageMlCounter } -export interface WatcherActionTotalsUsage { +export interface XpackUsageMlInferenceTrainedModelsCount { total: long - total_time_in_ms: long + prepackaged: long + other: long + regression: long + classification: long } -export interface WatcherActionsUsage { - actions: Record +export interface XpackUsageMlJobForecasts { + total: long + forecasted_jobs: long } -export interface WatcherNodeStats { - current_watches?: Array - execution_thread_pool: ExecutionThreadPool - queued_watches?: Array - watch_count: long - watcher_state: WatcherState - node_id: Id +export interface XpackUsageMonitoring extends XpackUsageBase { + collection_enabled: boolean + enabled_exporters: Record } -export type WatcherState = 'stopped' | 'starting' | 'started' | 'stopping' - -export interface WatcherStatsRequest extends RequestBase { - metric?: Metrics - emit_stacktraces?: boolean +export interface XpackUsageQuery { + count?: integer + failed?: integer + paging?: integer + total?: integer } -export interface WatcherStatsResponse extends ResponseBase { - cluster_name: string - manually_stopped: boolean - stats: Array - _nodes: NodeStatistics +export interface XpackUsageRealm extends XpackUsageBase { + name?: string[] + order?: long[] + size?: long[] + cache?: XpackUsageRealmCache[] + has_authorization_realms?: boolean[] + has_default_username_pattern?: boolean[] + has_truststore?: boolean[] + is_authentication_delegated?: boolean[] } -export interface WatcherUsage extends XPackUsage { - execution: WatcherActionsUsage - watch: WatcherWatchUsage - count: UsageCount +export interface XpackUsageRealmCache { + size: long } -export interface WatcherWatchTriggerScheduleUsage extends UsageCount { - cron: UsageCount - _all: UsageCount +export interface XpackUsageRequest extends RequestBase { + master_timeout?: Time } -export interface WatcherWatchTriggerUsage { - schedule?: WatcherWatchTriggerScheduleUsage - _all: UsageCount +export interface XpackUsageResponse { + aggregate_metric: XpackUsageBase + analytics: XpackUsageAnalytics + watcher: XpackUsageWatcher + ccr: XpackUsageCcr + data_frame?: XpackUsageBase + data_science?: XpackUsageBase + data_streams?: XpackUsageDataStreams + data_tiers: XpackUsageDataTiers + enrich?: XpackUsageBase + eql: XpackUsageEql + flattened?: XpackUsageFlattened + frozen_indices: XpackUsageFrozenIndices + graph: XpackUsageBase + ilm: XpackUsageIlm + logstash: XpackUsageBase + ml: XpackUsageMachineLearning + monitoring: XpackUsageMonitoring + rollup: XpackUsageBase + runtime_fields?: XpackUsageRuntimeFieldTypes + spatial: XpackUsageBase + searchable_snapshots: XpackUsageSearchableSnapshots + security: XpackUsageSecurity + slm: XpackUsageSlm + sql: XpackUsageSql + transform: XpackUsageBase + vectors: XpackUsageVector + voting_only: XpackUsageBase +} + +export interface XpackUsageRoleMapping { + enabled: integer + size: integer } -export interface WatcherWatchUsage { - input: Record - condition?: Record - action?: Record - trigger: WatcherWatchTriggerUsage +export interface XpackUsageRuntimeFieldTypes extends XpackUsageBase { + field_types: XpackUsageRuntimeFieldsType[] } -export interface WebhookActionResult { - request: HttpInputRequestResult - response?: HttpInputResponseResult +export interface XpackUsageRuntimeFieldsType { + chars_max: long + chars_total: long + count: long + doc_max: long + doc_total: long + index_count: long + lang: string[] + lines_max: long + lines_total: long + name: Field + scriptless_count: long + shadowed_count: long + source_max: long + source_total: long } -export interface WeightedAverageAggregation extends Aggregation { - format?: string - value?: WeightedAverageValue - value_type?: ValueType - weight?: WeightedAverageValue +export interface XpackUsageSearchableSnapshots extends XpackUsageBase { + indices_count: integer + full_copy_indices_count?: integer + shared_cache_indices_count?: integer } -export interface WeightedAverageValue { - field?: Field - missing?: double - script?: Script +export interface XpackUsageSecurity extends XpackUsageBase { + api_key_service: XpackUsageFeatureToggle + anonymous: XpackUsageFeatureToggle + audit: XpackUsageAudit + fips_140: XpackUsageFeatureToggle + ipfilter: XpackUsageIpFilter + realms: Record + role_mapping: Record + roles: XpackUsageSecurityRoles + ssl: XpackUsageSsl + system_key?: XpackUsageFeatureToggle + token_service: XpackUsageFeatureToggle + operator_privileges: XpackUsageBase } -export interface WhitespaceTokenizer extends TokenizerBase { - max_token_length: integer +export interface XpackUsageSecurityRoles { + native: XpackUsageSecurityRolesNative + dls: XpackUsageSecurityRolesDls + file: XpackUsageSecurityRolesFile } -export interface WildcardProperty extends DocValuesPropertyBase { - type: 'wildcard' +export interface XpackUsageSecurityRolesDls { + bit_set_cache: XpackUsageSecurityRolesDlsBitSetCache } -export interface WildcardQuery extends QueryBase { - rewrite?: MultiTermQueryRewrite - value: string +export interface XpackUsageSecurityRolesDlsBitSetCache { + count: integer + memory?: ByteSize + memory_in_bytes: ulong } -export interface WordDelimiterGraphTokenFilter extends TokenFilterBase { - adjust_offsets: boolean - catenate_all: boolean - catenate_numbers: boolean - catenate_words: boolean - generate_number_parts: boolean - generate_word_parts: boolean - preserve_original: boolean - protected_words: Array - protected_words_path: string - split_on_case_change: boolean - split_on_numerics: boolean - stem_english_possessive: boolean - type_table: Array - type_table_path: string +export interface XpackUsageSecurityRolesFile { + dls: boolean + fls: boolean + size: long } -export interface WordDelimiterTokenFilter extends TokenFilterBase { - catenate_all: boolean - catenate_numbers: boolean - catenate_words: boolean - generate_number_parts: boolean - generate_word_parts: boolean - preserve_original: boolean - protected_words: Array - protected_words_path: string - split_on_case_change: boolean - split_on_numerics: boolean - stem_english_possessive: boolean - type_table: Array - type_table_path: string +export interface XpackUsageSecurityRolesNative { + dls: boolean + fls: boolean + size: long } -export interface WriteResponseBase extends ResponseBase { - _id: Id - _index: IndexName - _primary_term: long - result: Result - _seq_no: SequenceNumber - _shards: ShardStatistics - _type?: Type - _version: VersionNumber - forced_refresh?: boolean - error?: ErrorCause +export interface XpackUsageSlm extends XpackUsageBase { + policy_count?: integer + policy_stats?: SlmStatistics } -export interface XPackBuildInformation { - date: DateString - hash: string +export interface XpackUsageSql extends XpackUsageBase { + features: Record + queries: Record } -export interface XPackFeature { - available: boolean - description?: string - enabled: boolean - native_code_info?: NativeCodeInformation -} - -export interface XPackFeatures { - aggregate_metric: XPackFeature - analytics: XPackFeature - ccr: XPackFeature - data_frame?: XPackFeature - data_science?: XPackFeature - data_streams: XPackFeature - data_tiers: XPackFeature - enrich: XPackFeature - eql: XPackFeature - flattened?: XPackFeature - frozen_indices: XPackFeature - graph: XPackFeature - ilm: XPackFeature - logstash: XPackFeature - ml: XPackFeature - monitoring: XPackFeature - rollup: XPackFeature - runtime_fields?: XPackFeature - searchable_snapshots: XPackFeature - security: XPackFeature - slm: XPackFeature - spatial: XPackFeature - sql: XPackFeature - transform: XPackFeature - vectors: XPackFeature - voting_only: XPackFeature - watcher: XPackFeature -} - -export interface XPackInfoRequest extends RequestBase { - categories?: Array -} - -export interface XPackInfoResponse extends ResponseBase { - build: XPackBuildInformation - features: XPackFeatures - license: MinimalLicenseInformation - tagline: string +export interface XpackUsageSsl { + http: XpackUsageFeatureToggle + transport: XpackUsageFeatureToggle } -export interface XPackRole { - cluster: Array - indices: Array - metadata: Record - run_as: Array - transient_metadata: TransientMetadata - applications: Array - role_templates?: Array -} +export type XpackUsageUrlConfig = XpackUsageBaseUrlConfig | XpackUsageKibanaUrlConfig -export interface XPackRoleMapping { - enabled: boolean - metadata: Record - roles: Array - rules: RoleMappingRuleBase +export interface XpackUsageVector extends XpackUsageBase { + dense_vector_dims_avg_count: integer + dense_vector_fields_count: integer + sparse_vector_fields_count?: integer } -export interface XPackUsage { - available: boolean - enabled: boolean +export interface XpackUsageWatcher extends XpackUsageBase { + execution: XpackUsageWatcherActions + watch: XpackUsageWatcherWatch + count: XpackUsageCounter } -export interface XPackUsageRequest extends RequestBase { - master_timeout?: Time +export interface XpackUsageWatcherActionTotals { + total: long + total_time_in_ms: long } -export interface XPackUsageResponse extends ResponseBase { - aggregate_metric: XPackUsage - analytics: AnalyticsUsage - watcher: WatcherUsage - ccr: CcrUsage - data_frame?: XPackUsage - data_science?: XPackUsage - data_streams?: DataStreamsUsage - data_tiers: DataTiersUsage - enrich?: XPackUsage - eql: EqlUsage - flattened?: FlattenedUsage - frozen_indices: FrozenIndicesUsage - graph: XPackUsage - ilm: IlmUsage - logstash: XPackUsage - ml: MachineLearningUsage - monitoring: MonitoringUsage - rollup: XPackUsage - runtime_fields?: RuntimeFieldsUsage - spatial: XPackUsage - searchable_snapshots: SearchableSnapshotsUsage - security: SecurityUsage - slm: SlmUsage - sql: SqlUsage - transform: XPackUsage - vectors: VectorUsage - voting_only: XPackUsage -} - -export interface XPackUser { - email?: string - full_name?: string - metadata: Record - roles: Array - username: string - enabled: boolean +export interface XpackUsageWatcherActions { + actions: Record } -export type ZeroTermsQuery = 'all' | 'none' - -export type double = number - -export type float = number +export interface XpackUsageWatcherWatch { + input: Record + condition?: Record + action?: Record + trigger: XpackUsageWatcherWatchTrigger +} -export type integer = number +export interface XpackUsageWatcherWatchTrigger { + schedule?: XpackUsageWatcherWatchTriggerSchedule + _all: XpackUsageCounter +} -export type long = number +export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter { + cron: XpackUsageCounter + _all: XpackUsageCounter +} -export type uint = number +export interface SpecUtilsAdditionalProperties { +} -export type ulong = number +export interface SpecUtilsCommonQueryParameters { + error_trace?: boolean + filter_path?: string | string[] + human?: boolean + pretty?: boolean + source_query_string?: string +} -export interface CommonCatQueryParameters { +export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names help?: boolean local?: boolean master_timeout?: Time - s?: Array + s?: string[] v?: boolean } -export interface CommonQueryParameters { - error_trace?: boolean - filter_path?: string | Array - human?: boolean - pretty?: boolean - source_query_string?: string -} - From 8d7136241a4a6ff1a985c9f6506aee47ed341cb5 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 12 May 2021 18:19:10 +0200 Subject: [PATCH 014/647] Bumped v8.0.0-canary.9 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 7db5df52b..feaba8ed5 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.8", + "versionCanary": "8.0.0-canary.9", "keywords": [ "elasticsearch", "elastic", From bf02b3d0e51612369abbc407b6c30e7c4b61e01b Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 19 May 2021 11:09:19 +0200 Subject: [PATCH 015/647] Add top level type error to error message (#1468) --- lib/errors.js | 5 +++-- test/types/kibana.test-d.ts | 12 ++++++------ test/types/new-types.test-d.ts | 12 ++++++------ test/unit/errors.test.js | 26 ++++++++++++++++++++++++-- 4 files changed, 39 insertions(+), 16 deletions(-) diff --git a/lib/errors.js b/lib/errors.js index 657419c8e..cf2f296d8 100644 --- a/lib/errors.js +++ b/lib/errors.js @@ -92,9 +92,10 @@ class ResponseError extends ElasticsearchClientError { this.name = 'ResponseError' if (meta.body && meta.body.error && meta.body.status) { if (Array.isArray(meta.body.error.root_cause)) { - this.message = meta.body.error.root_cause.map(entry => `[${entry.type}] Reason: ${entry.reason}`).join('; ') + this.message = meta.body.error.type + ': ' + this.message += meta.body.error.root_cause.map(entry => `[${entry.type}] Reason: ${entry.reason}`).join('; ') } else { - this.message = 'Response Error' + this.message = meta.body.error.type } } else { this.message = 'Response Error' diff --git a/test/types/kibana.test-d.ts b/test/types/kibana.test-d.ts index 139d21035..6385ebd02 100644 --- a/test/types/kibana.test-d.ts +++ b/test/types/kibana.test-d.ts @@ -66,27 +66,27 @@ client.on('resurrect', (err, meta) => { // Check API returned type and optional parameters { const promise = client.info() - expectType>>(promise) + expectType>>(promise) promise - .then(result => expectType>(result)) + .then(result => expectType>(result)) .catch((err: ApiError) => expectType(err)) expectType(promise.abort()) } { const promise = client.info({ pretty: true }) - expectType>>(promise) + expectType>>(promise) promise - .then(result => expectType>(result)) + .then(result => expectType>(result)) .catch((err: ApiError) => expectType(err)) expectType(promise.abort()) } { const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>>(promise) + expectType>>(promise) promise - .then(result => expectType>(result)) + .then(result => expectType>(result)) .catch((err: ApiError) => expectType(err)) expectType(promise.abort()) } diff --git a/test/types/new-types.test-d.ts b/test/types/new-types.test-d.ts index 9d35666a5..4a237dcf4 100644 --- a/test/types/new-types.test-d.ts +++ b/test/types/new-types.test-d.ts @@ -66,27 +66,27 @@ client.on('resurrect', (err, meta) => { // Check API returned type and optional parameters { const promise = client.info() - expectType>>(promise) + expectType>>(promise) promise - .then(result => expectType>(result)) + .then(result => expectType>(result)) .catch((err: ApiError) => expectType(err)) expectType(promise.abort()) } { const promise = client.info({ pretty: true }) - expectType>>(promise) + expectType>>(promise) promise - .then(result => expectType>(result)) + .then(result => expectType>(result)) .catch((err: ApiError) => expectType(err)) expectType(promise.abort()) } { const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>>(promise) + expectType>>(promise) promise - .then(result => expectType>(result)) + .then(result => expectType>(result)) .catch((err: ApiError) => expectType(err)) expectType(promise.abort()) } diff --git a/test/unit/errors.test.js b/test/unit/errors.test.js index 7c3f706a1..783f1b2f6 100644 --- a/test/unit/errors.test.js +++ b/test/unit/errors.test.js @@ -131,7 +131,7 @@ test('ResponseError with meaningful message / 1', t => { headers: {} } const err = new errors.ResponseError(meta) - t.strictEqual(err.message, '[index_not_found_exception] Reason: no such index [foo]') + t.strictEqual(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]') t.strictEqual(err.toString(), JSON.stringify(meta.body)) t.end() }) @@ -171,7 +171,29 @@ test('ResponseError with meaningful message / 2', t => { headers: {} } const err = new errors.ResponseError(meta) - t.strictEqual(err.message, '[index_not_found_exception] Reason: no such index [foo]; [nested_cause] Reason: this is a nested cause') + t.strictEqual(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]; [nested_cause] Reason: this is a nested cause') + t.strictEqual(err.toString(), JSON.stringify(meta.body)) + t.end() +}) + +test('ResponseError with meaningful message / 3', t => { + const meta = { + body: { + error: { + type: 'index_not_found_exception', + reason: 'no such index [foo]', + 'resource.type': 'index_expression', + 'resource.id': 'foo', + index_uuid: '_na_', + index: 'foo' + }, + status: 404 + }, + statusCode: 404, + headers: {} + } + const err = new errors.ResponseError(meta) + t.strictEqual(err.message, 'index_not_found_exception') t.strictEqual(err.toString(), JSON.stringify(meta.body)) t.end() }) From 12ce270239d03520584067ce4d5425c696e6cea5 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 19 May 2021 11:14:31 +0200 Subject: [PATCH 016/647] Updated type definitions --- api/kibana.d.ts | 35 +- api/new.d.ts | 138 ++++---- api/types.d.ts | 845 +++++++++++++++++++++++++----------------------- 3 files changed, 522 insertions(+), 496 deletions(-) diff --git a/api/kibana.d.ts b/api/kibana.d.ts index 6fbb8d54c..af2a8c0e8 100644 --- a/api/kibana.d.ts +++ b/api/kibana.d.ts @@ -285,13 +285,13 @@ interface KibanaClient { simulate(params?: T.IngestSimulatePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> } license: { - delete(params?: T.LicenseDeleteLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params?: T.LicenseGetLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus(params?: T.LicenseGetBasicLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus(params?: T.LicenseGetTrialLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - post(params?: T.LicensePostLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic(params?: T.LicenseStartBasicLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial(params?: T.LicenseStartTrialLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params?: T.LicenseGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBasicStatus(params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrialStatus(params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + post(params?: T.LicensePostRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartBasic(params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartTrial(params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): TransportRequestPromise> } logstash: { deletePipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -375,11 +375,11 @@ interface KibanaClient { msearchTemplate(params?: T.MsearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> mtermvectors(params?: T.MtermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> nodes: { - hotThreads(params?: T.NodesNodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(params?: T.NodesNodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(params?: T.NodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> reloadSecureSettings(params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.NodesNodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(params?: T.NodesNodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> + stats(params?: T.NodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + usage(params?: T.NodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> } openPointInTime(params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> ping(params?: T.PingRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -477,18 +477,19 @@ interface KibanaClient { verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> } sql: { - clearCursor(params?: T.SqlClearSqlCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> - query(params?: T.SqlQuerySqlRequest, options?: TransportRequestOptions): TransportRequestPromise> - translate(params?: T.SqlTranslateSqlRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCursor(params?: T.SqlClearCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> + query(params?: T.SqlQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> + translate(params?: T.SqlTranslateRequest, options?: TransportRequestOptions): TransportRequestPromise> } ssl: { certificates(params?: T.SslGetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> } tasks: { - cancel(params?: T.TaskCancelTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.TaskGetTaskRequest, options?: TransportRequestOptions): TransportRequestPromise> - list(params?: T.TaskListTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> + cancel(params?: T.TaskCancelRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.TaskGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + list(params?: T.TaskListRequest, options?: TransportRequestOptions): TransportRequestPromise> } + termsEnum(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> textStructure: { findStructure(params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> diff --git a/api/new.d.ts b/api/new.d.ts index 692d929ad..6b9dfed5e 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -750,34 +750,34 @@ declare class Client { simulate(params: T.IngestSimulatePipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } license: { - delete(params?: T.LicenseDeleteLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(callback: callbackFn): TransportRequestCallback - delete(params: T.LicenseDeleteLicenseRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.LicenseDeleteLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params?: T.LicenseGetLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(callback: callbackFn): TransportRequestCallback - get(params: T.LicenseGetLicenseRequest, callback: callbackFn): TransportRequestCallback - get(params: T.LicenseGetLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBasicStatus(params?: T.LicenseGetBasicLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus(callback: callbackFn): TransportRequestCallback - getBasicStatus(params: T.LicenseGetBasicLicenseStatusRequest, callback: callbackFn): TransportRequestCallback - getBasicStatus(params: T.LicenseGetBasicLicenseStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrialStatus(params?: T.LicenseGetTrialLicenseStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus(callback: callbackFn): TransportRequestCallback - getTrialStatus(params: T.LicenseGetTrialLicenseStatusRequest, callback: callbackFn): TransportRequestCallback - getTrialStatus(params: T.LicenseGetTrialLicenseStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post(params?: T.LicensePostLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - post(callback: callbackFn): TransportRequestCallback - post(params: T.LicensePostLicenseRequest, callback: callbackFn): TransportRequestCallback - post(params: T.LicensePostLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartBasic(params?: T.LicenseStartBasicLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic(callback: callbackFn): TransportRequestCallback - postStartBasic(params: T.LicenseStartBasicLicenseRequest, callback: callbackFn): TransportRequestCallback - postStartBasic(params: T.LicenseStartBasicLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartTrial(params?: T.LicenseStartTrialLicenseRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial(callback: callbackFn): TransportRequestCallback - postStartTrial(params: T.LicenseStartTrialLicenseRequest, callback: callbackFn): TransportRequestCallback - postStartTrial(params: T.LicenseStartTrialLicenseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + delete(params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> + delete(callback: callbackFn): TransportRequestCallback + delete(params: T.LicenseDeleteRequest, callback: callbackFn): TransportRequestCallback + delete(params: T.LicenseDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get(params?: T.LicenseGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(callback: callbackFn): TransportRequestCallback + get(params: T.LicenseGetRequest, callback: callbackFn): TransportRequestCallback + get(params: T.LicenseGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getBasicStatus(params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getBasicStatus(callback: callbackFn): TransportRequestCallback + getBasicStatus(params: T.LicenseGetBasicStatusRequest, callback: callbackFn): TransportRequestCallback + getBasicStatus(params: T.LicenseGetBasicStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getTrialStatus(params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + getTrialStatus(callback: callbackFn): TransportRequestCallback + getTrialStatus(params: T.LicenseGetTrialStatusRequest, callback: callbackFn): TransportRequestCallback + getTrialStatus(params: T.LicenseGetTrialStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + post(params?: T.LicensePostRequest, options?: TransportRequestOptions): TransportRequestPromise> + post(callback: callbackFn): TransportRequestCallback + post(params: T.LicensePostRequest, callback: callbackFn): TransportRequestCallback + post(params: T.LicensePostRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postStartBasic(params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartBasic(callback: callbackFn): TransportRequestCallback + postStartBasic(params: T.LicensePostStartBasicRequest, callback: callbackFn): TransportRequestCallback + postStartBasic(params: T.LicensePostStartBasicRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postStartTrial(params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): TransportRequestPromise> + postStartTrial(callback: callbackFn): TransportRequestCallback + postStartTrial(params: T.LicensePostStartTrialRequest, callback: callbackFn): TransportRequestCallback + postStartTrial(params: T.LicensePostStartTrialRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } logstash: { deletePipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -1040,26 +1040,26 @@ declare class Client { mtermvectors(params: T.MtermvectorsRequest, callback: callbackFn): TransportRequestCallback mtermvectors(params: T.MtermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback nodes: { - hotThreads(params?: T.NodesNodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> - hotThreads(callback: callbackFn): TransportRequestCallback - hotThreads(params: T.NodesNodesHotThreadsRequest, callback: callbackFn): TransportRequestCallback - hotThreads(params: T.NodesNodesHotThreadsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info(params?: T.NodesNodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.NodesNodesInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.NodesNodesInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> + hotThreads(callback: callbackFn): TransportRequestCallback + hotThreads(params: T.NodesHotThreadsRequest, callback: callbackFn): TransportRequestCallback + hotThreads(params: T.NodesHotThreadsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + info(params?: T.NodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> + info(callback: callbackFn): TransportRequestCallback + info(params: T.NodesInfoRequest, callback: callbackFn): TransportRequestCallback + info(params: T.NodesInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback reloadSecureSettings(params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> reloadSecureSettings(callback: callbackFn): TransportRequestCallback reloadSecureSettings(params: T.NodesReloadSecureSettingsRequest, callback: callbackFn): TransportRequestCallback reloadSecureSettings(params: T.NodesReloadSecureSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.NodesNodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.NodesNodesStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.NodesNodesStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage(params?: T.NodesNodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(callback: callbackFn): TransportRequestCallback - usage(params: T.NodesNodesUsageRequest, callback: callbackFn): TransportRequestCallback - usage(params: T.NodesNodesUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stats(params?: T.NodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> + stats(callback: callbackFn): TransportRequestCallback + stats(params: T.NodesStatsRequest, callback: callbackFn): TransportRequestCallback + stats(params: T.NodesStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + usage(params?: T.NodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> + usage(callback: callbackFn): TransportRequestCallback + usage(params: T.NodesUsageRequest, callback: callbackFn): TransportRequestCallback + usage(params: T.NodesUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } openPointInTime(params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> openPointInTime(params: T.OpenPointInTimeRequest, callback: callbackFn): TransportRequestCallback @@ -1367,18 +1367,18 @@ declare class Client { verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } sql: { - clearCursor(params?: T.SqlClearSqlCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCursor(callback: callbackFn): TransportRequestCallback - clearCursor(params: T.SqlClearSqlCursorRequest, callback: callbackFn): TransportRequestCallback - clearCursor(params: T.SqlClearSqlCursorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - query(params?: T.SqlQuerySqlRequest, options?: TransportRequestOptions): TransportRequestPromise> - query(callback: callbackFn): TransportRequestCallback - query(params: T.SqlQuerySqlRequest, callback: callbackFn): TransportRequestCallback - query(params: T.SqlQuerySqlRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - translate(params?: T.SqlTranslateSqlRequest, options?: TransportRequestOptions): TransportRequestPromise> - translate(callback: callbackFn): TransportRequestCallback - translate(params: T.SqlTranslateSqlRequest, callback: callbackFn): TransportRequestCallback - translate(params: T.SqlTranslateSqlRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearCursor(params?: T.SqlClearCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> + clearCursor(callback: callbackFn): TransportRequestCallback + clearCursor(params: T.SqlClearCursorRequest, callback: callbackFn): TransportRequestCallback + clearCursor(params: T.SqlClearCursorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + query(params?: T.SqlQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> + query(callback: callbackFn): TransportRequestCallback + query(params: T.SqlQueryRequest, callback: callbackFn): TransportRequestCallback + query(params: T.SqlQueryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + translate(params?: T.SqlTranslateRequest, options?: TransportRequestOptions): TransportRequestPromise> + translate(callback: callbackFn): TransportRequestCallback + translate(params: T.SqlTranslateRequest, callback: callbackFn): TransportRequestCallback + translate(params: T.SqlTranslateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } ssl: { certificates(params?: T.SslGetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -1387,18 +1387,22 @@ declare class Client { certificates(params: T.SslGetCertificatesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } tasks: { - cancel(params?: T.TaskCancelTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - cancel(callback: callbackFn): TransportRequestCallback - cancel(params: T.TaskCancelTasksRequest, callback: callbackFn): TransportRequestCallback - cancel(params: T.TaskCancelTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.TaskGetTaskRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.TaskGetTaskRequest, callback: callbackFn): TransportRequestCallback - get(params: T.TaskGetTaskRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - list(params?: T.TaskListTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - list(callback: callbackFn): TransportRequestCallback - list(params: T.TaskListTasksRequest, callback: callbackFn): TransportRequestCallback - list(params: T.TaskListTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + cancel(params?: T.TaskCancelRequest, options?: TransportRequestOptions): TransportRequestPromise> + cancel(callback: callbackFn): TransportRequestCallback + cancel(params: T.TaskCancelRequest, callback: callbackFn): TransportRequestCallback + cancel(params: T.TaskCancelRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get(params: T.TaskGetRequest, options?: TransportRequestOptions): TransportRequestPromise> + get(params: T.TaskGetRequest, callback: callbackFn): TransportRequestCallback + get(params: T.TaskGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + list(params?: T.TaskListRequest, options?: TransportRequestOptions): TransportRequestPromise> + list(callback: callbackFn): TransportRequestCallback + list(params: T.TaskListRequest, callback: callbackFn): TransportRequestCallback + list(params: T.TaskListRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } + termsEnum(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + termsEnum(callback: callbackFn): TransportRequestCallback + termsEnum(params: TODO, callback: callbackFn): TransportRequestCallback + termsEnum(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> termvectors(params: T.TermvectorsRequest, callback: callbackFn): TransportRequestCallback termvectors(params: T.TermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback diff --git a/api/types.d.ts b/api/types.d.ts index af4fa95e4..cfead6e0e 100644 --- a/api/types.d.ts +++ b/api/types.d.ts @@ -17,25 +17,25 @@ * under the License. */ -export interface BulkBulkCreateOperation extends BulkBulkOperation { +export interface BulkCreateOperation extends BulkOperation { } -export interface BulkBulkCreateResponseItem extends BulkBulkResponseItemBase { +export interface BulkCreateResponseItem extends BulkResponseItemBase { } -export interface BulkBulkDeleteOperation extends BulkBulkOperation { +export interface BulkDeleteOperation extends BulkOperation { } -export interface BulkBulkDeleteResponseItem extends BulkBulkResponseItemBase { +export interface BulkDeleteResponseItem extends BulkResponseItemBase { } -export interface BulkBulkIndexOperation extends BulkBulkOperation { +export interface BulkIndexOperation extends BulkOperation { } -export interface BulkBulkIndexResponseItem extends BulkBulkResponseItemBase { +export interface BulkIndexResponseItem extends BulkResponseItemBase { } -export interface BulkBulkOperation { +export interface BulkOperation { _id: Id _index: IndexName retry_on_conflict: integer @@ -44,39 +44,11 @@ export interface BulkBulkOperation { version_type: VersionType } -export interface BulkBulkOperationContainer { - index?: BulkBulkIndexOperation - create?: BulkBulkCreateOperation - update?: BulkBulkUpdateOperation - delete?: BulkBulkDeleteOperation -} - -export interface BulkBulkResponseItemBase { - _id?: string | null - _index: string - status: integer - error?: ErrorCause - _primary_term?: long - result?: string - _seq_no?: SequenceNumber - _shards?: ShardStatistics - _type?: string - _version?: VersionNumber - forced_refresh?: boolean - get?: InlineGet> -} - -export interface BulkBulkResponseItemContainer { - index?: BulkBulkIndexResponseItem - create?: BulkBulkCreateResponseItem - update?: BulkBulkUpdateResponseItem - delete?: BulkBulkDeleteResponseItem -} - -export interface BulkBulkUpdateOperation extends BulkBulkOperation { -} - -export interface BulkBulkUpdateResponseItem extends BulkBulkResponseItemBase { +export interface BulkOperationContainer { + index?: BulkIndexOperation + create?: BulkCreateOperation + update?: BulkUpdateOperation + delete?: BulkDeleteOperation } export interface BulkRequest extends RequestBase { @@ -91,16 +63,44 @@ export interface BulkRequest extends RequestBase { timeout?: Time wait_for_active_shards?: WaitForActiveShards require_alias?: boolean - body?: (BulkBulkOperationContainer | TSource)[] + body?: (BulkOperationContainer | TSource)[] } export interface BulkResponse { errors: boolean - items: BulkBulkResponseItemContainer[] + items: BulkResponseItemContainer[] took: long ingest_took?: long } +export interface BulkResponseItemBase { + _id?: string | null + _index: string + status: integer + error?: ErrorCause + _primary_term?: long + result?: string + _seq_no?: SequenceNumber + _shards?: ShardStatistics + _type?: string + _version?: VersionNumber + forced_refresh?: boolean + get?: InlineGet> +} + +export interface BulkResponseItemContainer { + index?: BulkIndexResponseItem + create?: BulkCreateResponseItem + update?: BulkUpdateResponseItem + delete?: BulkDeleteResponseItem +} + +export interface BulkUpdateOperation extends BulkOperation { +} + +export interface BulkUpdateResponseItem extends BulkResponseItemBase { +} + export interface ClearScrollRequest extends RequestBase { scroll_id?: Ids body?: { @@ -251,7 +251,7 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { requests_per_second?: long } -export interface DeleteByQueryRethrottleResponse extends TaskListTasksResponse { +export interface DeleteByQueryRethrottleResponse extends TaskListResponse { } export interface DeleteScriptRequest extends RequestBase { @@ -527,7 +527,7 @@ export interface MgetOperation { _id: MgetMultiGetId _index?: IndexName routing?: Routing - _source?: boolean | Fields | SearchTypesSourceFilter + _source?: boolean | Fields | SearchSourceFilter stored_fields?: Fields _type?: Type version?: VersionNumber @@ -561,9 +561,9 @@ export interface MsearchBody { query?: QueryDslQueryContainer from?: integer size?: integer - pit?: SearchTypesPointInTimeReference + pit?: SearchPointInTimeReference track_total_hits?: boolean | integer - suggest?: SearchTypesSuggestContainer | Record + suggest?: SearchSuggestContainer | Record } export interface MsearchHeader { @@ -857,7 +857,7 @@ export interface ReindexSource { remote?: ReindexRemoteSource size?: integer slice?: SlicedScroll - sort?: SearchTypesSort + sort?: SearchSort _source?: Fields } @@ -1001,33 +1001,33 @@ export interface SearchRequest extends RequestBase { body?: { aggs?: Record aggregations?: Record - collapse?: SearchTypesFieldCollapse + collapse?: SearchFieldCollapse explain?: boolean from?: integer - highlight?: SearchTypesHighlight + highlight?: SearchHighlight track_total_hits?: boolean | integer indices_boost?: Record[] - docvalue_fields?: SearchTypesDocValueField | (Field | SearchTypesDocValueField)[] + docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean query?: QueryDslQueryContainer - rescore?: SearchTypesRescore | SearchTypesRescore[] + rescore?: SearchRescore | SearchRescore[] script_fields?: Record search_after?: (integer | string)[] size?: integer slice?: SlicedScroll - sort?: SearchTypesSort - _source?: boolean | Fields | SearchTypesSourceFilter + sort?: SearchSort + _source?: boolean | Fields | SearchSourceFilter fields?: (Field | DateField)[] - suggest?: SearchTypesSuggestContainer | Record + suggest?: SearchSuggestContainer | Record terminate_after?: long timeout?: string track_scores?: boolean version?: boolean seq_no_primary_term?: boolean stored_fields?: Fields - pit?: SearchTypesPointInTimeReference + pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields stats?: string[] } @@ -1037,21 +1037,21 @@ export interface SearchResponse { took: long timed_out: boolean _shards: ShardStatistics - hits: SearchTypesHitsMetadata + hits: SearchHitsMetadata aggregations?: Record _clusters?: ClusterStatistics documents?: TDocument[] fields?: Record max_score?: double num_reduce_phases?: long - profile?: SearchTypesProfile + profile?: SearchProfile pit_id?: Id _scroll_id?: ScrollId - suggest?: Record[]> + suggest?: Record[]> terminated_early?: boolean } -export interface SearchTypesAggregationBreakdown { +export interface SearchAggregationBreakdown { build_aggregation: long build_aggregation_count: long build_leaf_collector: long @@ -1066,30 +1066,30 @@ export interface SearchTypesAggregationBreakdown { reduce_count: long } -export interface SearchTypesAggregationProfile { - breakdown: SearchTypesAggregationBreakdown +export interface SearchAggregationProfile { + breakdown: SearchAggregationBreakdown description: string time_in_nanos: long type: string - debug?: SearchTypesAggregationProfileDebug - children?: SearchTypesAggregationProfileDebug[] + debug?: SearchAggregationProfileDebug + children?: SearchAggregationProfileDebug[] } -export interface SearchTypesAggregationProfileDebug { +export interface SearchAggregationProfileDebug { } -export type SearchTypesBoundaryScanner = 'chars' | 'sentence' | 'word' +export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' -export interface SearchTypesCollector { +export interface SearchCollector { name: string reason: string time_in_nanos: long - children?: SearchTypesCollector[] + children?: SearchCollector[] } -export interface SearchTypesCompletionSuggestOption { +export interface SearchCompletionSuggestOption { collate_match?: boolean - contexts?: Record + contexts?: Record fields?: Record _id: string _index: IndexName @@ -1100,17 +1100,17 @@ export interface SearchTypesCompletionSuggestOption { text: string } -export interface SearchTypesCompletionSuggester extends SearchTypesSuggesterBase { - contexts?: Record - fuzzy?: SearchTypesSuggestFuzziness +export interface SearchCompletionSuggester extends SearchSuggesterBase { + contexts?: Record + fuzzy?: SearchSuggestFuzziness prefix?: string regex?: string skip_duplicates?: boolean } -export type SearchTypesContext = string | QueryDslGeoLocation +export type SearchContext = string | QueryDslGeoLocation -export interface SearchTypesDirectGenerator { +export interface SearchDirectGenerator { field: Field max_edits?: integer max_inspections?: float @@ -1124,65 +1124,65 @@ export interface SearchTypesDirectGenerator { suggest_mode?: SuggestMode } -export interface SearchTypesDocValueField { +export interface SearchDocValueField { field: Field format?: string } -export interface SearchTypesFieldCollapse { +export interface SearchFieldCollapse { field: Field - inner_hits?: SearchTypesInnerHits | SearchTypesInnerHits[] + inner_hits?: SearchInnerHits | SearchInnerHits[] max_concurrent_group_searches?: integer } -export interface SearchTypesFieldSort { +export interface SearchFieldSort { missing?: AggregationsMissing - mode?: SearchTypesSortMode - nested?: SearchTypesNestedSortValue - order?: SearchTypesSortOrder + mode?: SearchSortMode + nested?: SearchNestedSortValue + order?: SearchSortOrder unmapped_type?: MappingFieldType } -export interface SearchTypesGeoDistanceSortKeys { - mode?: SearchTypesSortMode +export interface SearchGeoDistanceSortKeys { + mode?: SearchSortMode distance_type?: GeoDistanceType - order?: SearchTypesSortOrder + order?: SearchSortOrder unit?: DistanceUnit } -export type SearchTypesGeoDistanceSort = SearchTypesGeoDistanceSortKeys | +export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys | { [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] } -export interface SearchTypesHighlight { - fields: Record - type?: SearchTypesHighlighterType +export interface SearchHighlight { + fields: Record + type?: SearchHighlighterType boundary_chars?: string boundary_max_scan?: integer - boundary_scanner?: SearchTypesBoundaryScanner + boundary_scanner?: SearchBoundaryScanner boundary_scanner_locale?: string - encoder?: SearchTypesHighlighterEncoder - fragmenter?: SearchTypesHighlighterFragmenter + encoder?: SearchHighlighterEncoder + fragmenter?: SearchHighlighterFragmenter fragment_offset?: integer fragment_size?: integer max_fragment_length?: integer no_match_size?: integer number_of_fragments?: integer - order?: SearchTypesHighlighterOrder + order?: SearchHighlighterOrder post_tags?: string[] pre_tags?: string[] require_field_match?: boolean - tags_schema?: SearchTypesHighlighterTagsSchema + tags_schema?: SearchHighlighterTagsSchema highlight_query?: QueryDslQueryContainer max_analyzed_offset?: string | integer } -export interface SearchTypesHighlightField { +export interface SearchHighlightField { boundary_chars?: string boundary_max_scan?: integer - boundary_scanner?: SearchTypesBoundaryScanner + boundary_scanner?: SearchBoundaryScanner boundary_scanner_locale?: string field?: Field force_source?: boolean - fragmenter?: SearchTypesHighlighterFragmenter + fragmenter?: SearchHighlighterFragmenter fragment_offset?: integer fragment_size?: integer highlight_query?: QueryDslQueryContainer @@ -1190,26 +1190,26 @@ export interface SearchTypesHighlightField { max_fragment_length?: integer no_match_size?: integer number_of_fragments?: integer - order?: SearchTypesHighlighterOrder + order?: SearchHighlighterOrder phrase_limit?: integer post_tags?: string[] pre_tags?: string[] require_field_match?: boolean - tags_schema?: SearchTypesHighlighterTagsSchema - type?: SearchTypesHighlighterType | string + tags_schema?: SearchHighlighterTagsSchema + type?: SearchHighlighterType | string } -export type SearchTypesHighlighterEncoder = 'default' | 'html' +export type SearchHighlighterEncoder = 'default' | 'html' -export type SearchTypesHighlighterFragmenter = 'simple' | 'span' +export type SearchHighlighterFragmenter = 'simple' | 'span' -export type SearchTypesHighlighterOrder = 'score' +export type SearchHighlighterOrder = 'score' -export type SearchTypesHighlighterTagsSchema = 'styled' +export type SearchHighlighterTagsSchema = 'styled' -export type SearchTypesHighlighterType = 'plain' | 'fvh' | 'unified' +export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' -export interface SearchTypesHit { +export interface SearchHit { _index: IndexName _id: Id _score?: double @@ -1217,9 +1217,9 @@ export interface SearchTypesHit { _explanation?: ExplainExplanation fields?: Record highlight?: Record - inner_hits?: Record + inner_hits?: Record matched_queries?: string[] - _nested?: SearchTypesNestedIdentity + _nested?: SearchNestedIdentity _ignored?: string[] _shard?: string _node?: string @@ -1228,112 +1228,112 @@ export interface SearchTypesHit { _seq_no?: SequenceNumber _primary_term?: long _version?: VersionNumber - sort?: SearchTypesSortResults + sort?: SearchSortResults } -export interface SearchTypesHitsMetadata { - total: SearchTypesTotalHits | long - hits: SearchTypesHit[] +export interface SearchHitsMetadata { + total: SearchTotalHits | long + hits: SearchHit[] max_score?: double } -export interface SearchTypesInnerHits { +export interface SearchInnerHits { name?: Name size?: integer from?: integer - collapse?: SearchTypesFieldCollapse + collapse?: SearchFieldCollapse docvalue_fields?: Fields explain?: boolean - highlight?: SearchTypesHighlight + highlight?: SearchHighlight ignore_unmapped?: boolean script_fields?: Record seq_no_primary_term?: boolean fields?: Fields - sort?: SearchTypesSort - _source?: boolean | SearchTypesSourceFilter + sort?: SearchSort + _source?: boolean | SearchSourceFilter version?: boolean } -export interface SearchTypesInnerHitsMetadata { - total: SearchTypesTotalHits | long - hits: SearchTypesHit>[] +export interface SearchInnerHitsMetadata { + total: SearchTotalHits | long + hits: SearchHit>[] max_score?: double } -export interface SearchTypesInnerHitsResult { - hits: SearchTypesInnerHitsMetadata +export interface SearchInnerHitsResult { + hits: SearchInnerHitsMetadata } -export interface SearchTypesLaplaceSmoothingModel { +export interface SearchLaplaceSmoothingModel { alpha: double } -export interface SearchTypesLinearInterpolationSmoothingModel { +export interface SearchLinearInterpolationSmoothingModel { bigram_lambda: double trigram_lambda: double unigram_lambda: double } -export interface SearchTypesNestedIdentity { +export interface SearchNestedIdentity { field: Field offset: integer - _nested?: SearchTypesNestedIdentity + _nested?: SearchNestedIdentity } -export interface SearchTypesNestedSortValue { +export interface SearchNestedSortValue { filter?: QueryDslQueryContainer max_children?: integer path: Field } -export interface SearchTypesPhraseSuggestCollate { +export interface SearchPhraseSuggestCollate { params?: Record prune?: boolean - query: SearchTypesPhraseSuggestCollateQuery + query: SearchPhraseSuggestCollateQuery } -export interface SearchTypesPhraseSuggestCollateQuery { +export interface SearchPhraseSuggestCollateQuery { id?: Id source?: string } -export interface SearchTypesPhraseSuggestHighlight { +export interface SearchPhraseSuggestHighlight { post_tag: string pre_tag: string } -export interface SearchTypesPhraseSuggestOption { +export interface SearchPhraseSuggestOption { text: string highlighted: string score: double } -export interface SearchTypesPhraseSuggester extends SearchTypesSuggesterBase { - collate?: SearchTypesPhraseSuggestCollate +export interface SearchPhraseSuggester extends SearchSuggesterBase { + collate?: SearchPhraseSuggestCollate confidence?: double - direct_generator?: SearchTypesDirectGenerator[] + direct_generator?: SearchDirectGenerator[] force_unigrams?: boolean gram_size?: integer - highlight?: SearchTypesPhraseSuggestHighlight + highlight?: SearchPhraseSuggestHighlight max_errors?: double real_word_error_likelihood?: double separator?: string shard_size?: integer - smoothing?: SearchTypesSmoothingModelContainer + smoothing?: SearchSmoothingModelContainer text?: string token_limit?: integer } -export interface SearchTypesPointInTimeReference { +export interface SearchPointInTimeReference { id: Id keep_alive?: Time } -export interface SearchTypesProfile { - shards: SearchTypesShardProfile[] +export interface SearchProfile { + shards: SearchShardProfile[] } -export interface SearchTypesQueryBreakdown { +export interface SearchQueryBreakdown { advance: long advance_count: long build_scorer: long @@ -1354,114 +1354,114 @@ export interface SearchTypesQueryBreakdown { set_min_competitive_score_count: long } -export interface SearchTypesQueryProfile { - breakdown: SearchTypesQueryBreakdown +export interface SearchQueryProfile { + breakdown: SearchQueryBreakdown description: string time_in_nanos: long type: string - children?: SearchTypesQueryProfile[] + children?: SearchQueryProfile[] } -export interface SearchTypesRescore { - query: SearchTypesRescoreQuery +export interface SearchRescore { + query: SearchRescoreQuery window_size?: integer } -export interface SearchTypesRescoreQuery { +export interface SearchRescoreQuery { rescore_query: QueryDslQueryContainer query_weight?: double rescore_query_weight?: double - score_mode?: SearchTypesScoreMode + score_mode?: SearchScoreMode } -export type SearchTypesScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' +export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' -export interface SearchTypesScoreSort { - mode?: SearchTypesSortMode - order?: SearchTypesSortOrder +export interface SearchScoreSort { + mode?: SearchSortMode + order?: SearchSortOrder } -export interface SearchTypesScriptSort { - order?: SearchTypesSortOrder +export interface SearchScriptSort { + order?: SearchSortOrder script: Script type?: string } -export interface SearchTypesSearchProfile { - collector: SearchTypesCollector[] - query: SearchTypesQueryProfile[] +export interface SearchSearchProfile { + collector: SearchCollector[] + query: SearchQueryProfile[] rewrite_time: long } -export interface SearchTypesShardProfile { - aggregations: SearchTypesAggregationProfile[] +export interface SearchShardProfile { + aggregations: SearchAggregationProfile[] id: string - searches: SearchTypesSearchProfile[] + searches: SearchSearchProfile[] } -export interface SearchTypesSmoothingModelContainer { - laplace?: SearchTypesLaplaceSmoothingModel - linear_interpolation?: SearchTypesLinearInterpolationSmoothingModel - stupid_backoff?: SearchTypesStupidBackoffSmoothingModel +export interface SearchSmoothingModelContainer { + laplace?: SearchLaplaceSmoothingModel + linear_interpolation?: SearchLinearInterpolationSmoothingModel + stupid_backoff?: SearchStupidBackoffSmoothingModel } -export type SearchTypesSort = SearchTypesSortCombinations | SearchTypesSortCombinations[] +export type SearchSort = SearchSortCombinations | SearchSortCombinations[] -export type SearchTypesSortCombinations = Field | SearchTypesSortContainer | SearchTypesSortOrder +export type SearchSortCombinations = Field | SearchSortContainer | SearchSortOrder -export interface SearchTypesSortContainerKeys { - _score?: SearchTypesScoreSort - _doc?: SearchTypesScoreSort - _geo_distance?: SearchTypesGeoDistanceSort - _script?: SearchTypesScriptSort +export interface SearchSortContainerKeys { + _score?: SearchScoreSort + _doc?: SearchScoreSort + _geo_distance?: SearchGeoDistanceSort + _script?: SearchScriptSort } -export type SearchTypesSortContainer = SearchTypesSortContainerKeys | - { [property: string]: SearchTypesFieldSort | SearchTypesSortOrder } +export type SearchSortContainer = SearchSortContainerKeys | + { [property: string]: SearchFieldSort | SearchSortOrder } -export type SearchTypesSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' +export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' -export type SearchTypesSortOrder = 'asc' | 'desc' | '_doc' +export type SearchSortOrder = 'asc' | 'desc' | '_doc' -export type SearchTypesSortResults = (long | double | string | null)[] +export type SearchSortResults = (long | double | string | null)[] -export interface SearchTypesSourceFilter { +export interface SearchSourceFilter { excludes?: Fields includes?: Fields exclude?: Fields include?: Fields } -export type SearchTypesStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' +export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' -export interface SearchTypesStupidBackoffSmoothingModel { +export interface SearchStupidBackoffSmoothingModel { discount: double } -export interface SearchTypesSuggest { +export interface SearchSuggest { length: integer offset: integer - options: SearchTypesSuggestOption[] + options: SearchSuggestOption[] text: string } -export interface SearchTypesSuggestContainer { - completion?: SearchTypesCompletionSuggester - phrase?: SearchTypesPhraseSuggester +export interface SearchSuggestContainer { + completion?: SearchCompletionSuggester + phrase?: SearchPhraseSuggester prefix?: string regex?: string - term?: SearchTypesTermSuggester + term?: SearchTermSuggester text?: string } -export interface SearchTypesSuggestContextQuery { +export interface SearchSuggestContextQuery { boost?: double - context: SearchTypesContext + context: SearchContext neighbours?: Distance[] | integer[] precision?: Distance | integer prefix?: boolean } -export interface SearchTypesSuggestFuzziness { +export interface SearchSuggestFuzziness { fuzziness: Fuzziness min_length: integer prefix_length: integer @@ -1469,23 +1469,23 @@ export interface SearchTypesSuggestFuzziness { unicode_aware: boolean } -export type SearchTypesSuggestOption = SearchTypesCompletionSuggestOption | SearchTypesPhraseSuggestOption | SearchTypesTermSuggestOption +export type SearchSuggestOption = SearchCompletionSuggestOption | SearchPhraseSuggestOption | SearchTermSuggestOption -export type SearchTypesSuggestSort = 'score' | 'frequency' +export type SearchSuggestSort = 'score' | 'frequency' -export interface SearchTypesSuggesterBase { +export interface SearchSuggesterBase { field: Field analyzer?: string size?: integer } -export interface SearchTypesTermSuggestOption { +export interface SearchTermSuggestOption { text: string freq?: long score: double } -export interface SearchTypesTermSuggester extends SearchTypesSuggesterBase { +export interface SearchTermSuggester extends SearchSuggesterBase { lowercase_terms?: boolean max_edits?: integer max_inspections?: integer @@ -1494,18 +1494,18 @@ export interface SearchTypesTermSuggester extends SearchTypesSuggesterBase { min_word_length?: integer prefix_length?: integer shard_size?: integer - sort?: SearchTypesSuggestSort - string_distance?: SearchTypesStringDistance + sort?: SearchSuggestSort + string_distance?: SearchStringDistance suggest_mode?: SuggestMode text?: string } -export interface SearchTypesTotalHits { - relation: SearchTypesTotalHitsRelation +export interface SearchTotalHits { + relation: SearchTotalHitsRelation value: long } -export type SearchTypesTotalHitsRelation = 'eq' | 'gte' +export type SearchTotalHitsRelation = 'eq' | 'gte' export interface SearchShardsRequest extends RequestBase { index?: Indices @@ -1555,7 +1555,7 @@ export interface SearchTemplateResponse { _shards: ShardStatistics timed_out: boolean took: integer - hits: SearchTypesHitsMetadata + hits: SearchHitsMetadata } export interface TermvectorsFieldStatistics { @@ -1649,7 +1649,7 @@ export interface UpdateRequest doc_as_upsert?: boolean script?: Script scripted_upsert?: boolean - _source?: boolean | SearchTypesSourceFilter + _source?: boolean | SearchSourceFilter upsert?: TDocument } } @@ -2521,7 +2521,7 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati from?: integer gap_policy?: AggregationsGapPolicy size?: integer - sort?: SearchTypesSort + sort?: SearchSort } export interface AggregationsBucketsPath { @@ -2732,7 +2732,7 @@ export interface AggregationsGeoLineAggregation { point: AggregationsGeoLinePoint sort: AggregationsGeoLineSort include_sort?: boolean - sort_order?: SearchTypesSortOrder + sort_order?: SearchSortOrder size?: integer } @@ -2791,8 +2791,8 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr } export interface AggregationsHistogramOrder { - _count?: SearchTypesSortOrder - _key?: SearchTypesSortOrder + _count?: SearchSortOrder + _key?: SearchSortOrder } export interface AggregationsHoltLinearModelSettings { @@ -3202,7 +3202,7 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' -export type AggregationsTermsAggregationOrder = SearchTypesSortOrder | Record | Record[] +export type AggregationsTermsAggregationOrder = SearchSortOrder | Record | Record[] export interface AggregationsTermsInclude { num_partitions: long @@ -3216,18 +3216,18 @@ export interface AggregationsTestPopulation { } export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { - hits: SearchTypesHitsMetadata> + hits: SearchHitsMetadata> } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { docvalue_fields?: Fields explain?: boolean from?: integer - highlight?: SearchTypesHighlight + highlight?: SearchHighlight script_fields?: Record size?: integer - sort?: SearchTypesSort - _source?: boolean | SearchTypesSourceFilter | Fields + sort?: SearchSort + _source?: boolean | SearchSourceFilter | Fields stored_fields?: Fields track_scores?: boolean version?: boolean @@ -3246,7 +3246,7 @@ export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBa export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] size?: integer - sort?: SearchTypesSort + sort?: SearchSort } export interface AggregationsTopMetricsValue { @@ -4180,7 +4180,7 @@ export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'stric export interface QueryDslHasChildQuery extends QueryDslQueryBase { ignore_unmapped?: boolean - inner_hits?: SearchTypesInnerHits + inner_hits?: SearchInnerHits max_children?: integer min_children?: integer query?: QueryDslQueryContainer @@ -4190,7 +4190,7 @@ export interface QueryDslHasChildQuery extends QueryDslQueryBase { export interface QueryDslHasParentQuery extends QueryDslQueryBase { ignore_unmapped?: boolean - inner_hits?: SearchTypesInnerHits + inner_hits?: SearchInnerHits parent_type?: RelationName query?: QueryDslQueryContainer score?: boolean @@ -4387,7 +4387,7 @@ export type QueryDslNamedQuery = QueryDslNamedQueryKeys { aggregations?: Record _clusters?: ClusterStatistics fields?: Record - hits: SearchTypesHitsMetadata + hits: SearchHitsMetadata max_score?: double num_reduce_phases?: long - profile?: SearchTypesProfile + profile?: SearchProfile pit_id?: Id _scroll_id?: Id _shards: ShardStatistics - suggest?: Record[]> + suggest?: Record[]> terminated_early?: boolean timed_out: boolean took: long @@ -4771,14 +4771,14 @@ export interface AsyncSearchSubmitRequest extends RequestBase { analyzer?: string analyze_wildcard?: boolean batched_reduce_size?: long - collapse?: SearchTypesFieldCollapse + collapse?: SearchFieldCollapse default_operator?: DefaultOperator df?: string docvalue_fields?: Fields expand_wildcards?: ExpandWildcards explain?: boolean from?: integer - highlight?: SearchTypesHighlight + highlight?: SearchHighlight ignore_throttled?: boolean ignore_unavailable?: boolean indices_boost?: Record[] @@ -4790,22 +4790,22 @@ export interface AsyncSearchSubmitRequest extends RequestBase { post_filter?: QueryDslQueryContainer preference?: string profile?: boolean - pit?: SearchTypesPointInTimeReference + pit?: SearchPointInTimeReference query?: QueryDslQueryContainer query_on_query_string?: string request_cache?: boolean - rescore?: SearchTypesRescore[] + rescore?: SearchRescore[] routing?: Routing script_fields?: Record search_after?: any[] search_type?: SearchType sequence_number_primary_term?: boolean size?: integer - sort?: SearchTypesSort - _source?: boolean | SearchTypesSourceFilter + sort?: SearchSort + _source?: boolean | SearchSourceFilter stats?: string[] stored_fields?: Fields - suggest?: Record + suggest?: Record suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long @@ -6742,7 +6742,15 @@ export interface CcrForgetFollowerIndexResponse { export interface CcrGetAutoFollowPatternAutoFollowPattern { name: Name - pattern: CcrGetAutoFollowPatternAutoFollowPattern + pattern: CcrGetAutoFollowPatternAutoFollowPatternSummary +} + +export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { + active: boolean + remote_cluster: string + follow_index_pattern?: IndexPattern + leader_index_patterns: IndexPatterns + max_outstanding_read_requests: integer } export interface CcrGetAutoFollowPatternRequest extends RequestBase { @@ -7672,6 +7680,7 @@ export interface EnrichPolicy { indices: Indices match_field: Field query?: string + name?: Name } export interface EnrichSummary { @@ -7742,7 +7751,7 @@ export interface EnrichStatsResponse { } export interface EqlEqlHits { - total?: SearchTypesTotalHits + total?: SearchTotalHits events?: EqlHitsEvent[] sequences?: EqlHitsSequence[] } @@ -8264,6 +8273,11 @@ export interface IndicesNumericFielddata { export type IndicesNumericFielddataFormat = 'array' | 'disabled' +export interface IndicesOverlappingIndexTemplate { + name: Name + index_patterns?: IndexName[] +} + export interface IndicesStringFielddata { format: IndicesStringFielddataFormat } @@ -8782,7 +8796,7 @@ export interface IndicesGetUpgradeRequest extends RequestBase { } export interface IndicesGetUpgradeResponse { - overlapping?: IndicesSimulateIndexTemplateOverlappingIndexTemplate[] + overlapping?: IndicesOverlappingIndexTemplate[] template?: IndicesTemplateMapping } @@ -9215,22 +9229,17 @@ export interface IndicesShrinkResponse extends AcknowledgedResponseBase { index: IndexName } -export interface IndicesSimulateIndexTemplateOverlappingIndexTemplate { - name: Name - index_patterns?: IndexName[] -} - export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name?: Name body?: { index_patterns?: IndexName[] composed_of?: Name[] - overlapping?: IndicesSimulateIndexTemplateOverlappingIndexTemplate[] + overlapping?: IndicesOverlappingIndexTemplate[] template?: IndicesTemplateMapping } } -export interface IndicesSimulateIndexTemplateResponse extends AcknowledgedResponseBase { +export interface IndicesSimulateIndexTemplateResponse { } export interface IndicesSimulateTemplateRequest extends RequestBase { @@ -9319,6 +9328,10 @@ export interface IndicesStatsShardCommit { export interface IndicesStatsShardFileSizeInfo { description: string size_in_bytes: long + min_size_in_bytes?: long + max_size_in_bytes?: long + average_size_in_bytes?: long + count?: long } export interface IndicesStatsShardLease { @@ -9729,7 +9742,7 @@ export type IngestShapeType = 'geo_shape' | 'shape' export interface IngestSortProcessor extends IngestProcessorBase { field: Field - order: SearchTypesSortOrder + order: SearchSortOrder target_field: Field } @@ -9834,6 +9847,12 @@ export interface IngestPutPipelineRequest extends RequestBase { export interface IngestPutPipelineResponse extends AcknowledgedResponseBase { } +export interface IngestSimulatePipelineDocument { + _id?: Id + _index?: IndexName + _source: any +} + export interface IngestSimulatePipelineDocumentSimulation { _id: Id _index: IndexName @@ -9861,7 +9880,7 @@ export interface IngestSimulatePipelineRequest extends RequestBase { id?: Id verbose?: boolean body?: { - docs?: IngestSimulatePipelineSimulatePipelineDocument[] + docs?: IngestSimulatePipelineDocument[] pipeline?: IngestPipeline } } @@ -9870,12 +9889,6 @@ export interface IngestSimulatePipelineResponse { docs: IngestSimulatePipelinePipelineSimulation[] } -export interface IngestSimulatePipelineSimulatePipelineDocument { - _id?: Id - _index?: IndexName - _source: any -} - export interface LicenseLicense { expiry_date_in_millis: EpochMillis issue_date_in_millis: EpochMillis @@ -9893,20 +9906,13 @@ export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' -export interface LicenseDeleteLicenseRequest extends RequestBase { +export interface LicenseDeleteRequest extends RequestBase { } -export interface LicenseDeleteLicenseResponse extends AcknowledgedResponseBase { +export interface LicenseDeleteResponse extends AcknowledgedResponseBase { } -export interface LicenseGetBasicLicenseStatusRequest extends RequestBase { -} - -export interface LicenseGetBasicLicenseStatusResponse { - eligible_to_start_basic: boolean -} - -export interface LicenseGetLicenseLicenseInformation { +export interface LicenseGetLicenseInformation { expiry_date: DateString expiry_date_in_millis: EpochMillis issue_date: DateString @@ -9921,28 +9927,35 @@ export interface LicenseGetLicenseLicenseInformation { start_date_in_millis: EpochMillis } -export interface LicenseGetLicenseRequest extends RequestBase { +export interface LicenseGetRequest extends RequestBase { accept_enterprise?: boolean local?: boolean } -export interface LicenseGetLicenseResponse { - license: LicenseGetLicenseLicenseInformation +export interface LicenseGetResponse { + license: LicenseGetLicenseInformation } -export interface LicenseGetTrialLicenseStatusRequest extends RequestBase { +export interface LicenseGetBasicStatusRequest extends RequestBase { } -export interface LicenseGetTrialLicenseStatusResponse { +export interface LicenseGetBasicStatusResponse { + eligible_to_start_basic: boolean +} + +export interface LicenseGetTrialStatusRequest extends RequestBase { +} + +export interface LicenseGetTrialStatusResponse { eligible_to_start_trial: boolean } -export interface LicensePostLicenseAcknowledgement { +export interface LicensePostAcknowledgement { license: string[] message: string } -export interface LicensePostLicenseRequest extends RequestBase { +export interface LicensePostRequest extends RequestBase { acknowledge?: boolean body?: { license?: LicenseLicense @@ -9950,28 +9963,28 @@ export interface LicensePostLicenseRequest extends RequestBase { } } -export interface LicensePostLicenseResponse { - acknowledge?: LicensePostLicenseAcknowledgement +export interface LicensePostResponse { + acknowledge?: LicensePostAcknowledgement acknowledged: boolean license_status: LicenseLicenseStatus } -export interface LicenseStartBasicLicenseRequest extends RequestBase { +export interface LicensePostStartBasicRequest extends RequestBase { acknowledge?: boolean } -export interface LicenseStartBasicLicenseResponse extends AcknowledgedResponseBase { +export interface LicensePostStartBasicResponse extends AcknowledgedResponseBase { acknowledge: Record basic_was_started: boolean error_message: string } -export interface LicenseStartTrialLicenseRequest extends RequestBase { +export interface LicensePostStartTrialRequest extends RequestBase { acknowledge?: boolean type_query_string?: string } -export interface LicenseStartTrialLicenseResponse extends AcknowledgedResponseBase { +export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { error_message?: string acknowledged: boolean trial_was_started: boolean @@ -11315,6 +11328,7 @@ export interface MlGetOverallBucketsRequest extends RequestBase { end?: Time start?: Time exclude_interim?: boolean + allow_no_match?: boolean body?: { allow_no_jobs?: boolean } @@ -12023,14 +12037,14 @@ export interface NodesTransport { tx_size_in_bytes: long } -export interface NodesNodesHotThreadsHotThread { +export interface NodesHotThreadsHotThread { hosts: Host[] node_id: Id node_name: Name threads: string[] } -export interface NodesNodesHotThreadsRequest extends RequestBase { +export interface NodesHotThreadsRequest extends RequestBase { node_id?: NodeIds ignore_idle_threads?: boolean interval?: Time @@ -12040,73 +12054,73 @@ export interface NodesNodesHotThreadsRequest extends RequestBase { timeout?: Time } -export interface NodesNodesHotThreadsResponse { - hot_threads: NodesNodesHotThreadsHotThread[] +export interface NodesHotThreadsResponse { + hot_threads: NodesHotThreadsHotThread[] } -export interface NodesNodesInfoNodeInfo { +export interface NodesInfoNodeInfo { attributes: Record build_flavor: string build_hash: string build_type: string host: Host - http?: NodesNodesInfoNodeInfoHttp + http?: NodesInfoNodeInfoHttp ip: Ip - jvm?: NodesNodesInfoNodeJvmInfo + jvm?: NodesInfoNodeJvmInfo name: Name - network?: NodesNodesInfoNodeInfoNetwork - os?: NodesNodesInfoNodeOperatingSystemInfo + network?: NodesInfoNodeInfoNetwork + os?: NodesInfoNodeOperatingSystemInfo plugins?: PluginStats[] - process?: NodesNodesInfoNodeProcessInfo + process?: NodesInfoNodeProcessInfo roles: NodeRoles - settings?: NodesNodesInfoNodeInfoSettings - thread_pool?: Record + settings?: NodesInfoNodeInfoSettings + thread_pool?: Record total_indexing_buffer?: long total_indexing_buffer_in_bytes?: ByteSize - transport?: NodesNodesInfoNodeInfoTransport + transport?: NodesInfoNodeInfoTransport transport_address: TransportAddress version: VersionString modules?: PluginStats[] - ingest?: NodesNodesInfoNodeInfoIngest - aggregations?: Record + ingest?: NodesInfoNodeInfoIngest + aggregations?: Record } -export interface NodesNodesInfoNodeInfoAction { +export interface NodesInfoNodeInfoAction { destructive_requires_name: string } -export interface NodesNodesInfoNodeInfoAggregation { +export interface NodesInfoNodeInfoAggregation { types: string[] } -export interface NodesNodesInfoNodeInfoBootstrap { +export interface NodesInfoNodeInfoBootstrap { memory_lock: string } -export interface NodesNodesInfoNodeInfoClient { +export interface NodesInfoNodeInfoClient { type: string } -export interface NodesNodesInfoNodeInfoDiscover { +export interface NodesInfoNodeInfoDiscover { seed_hosts: string } -export interface NodesNodesInfoNodeInfoHttp { +export interface NodesInfoNodeInfoHttp { bound_address: string[] max_content_length?: ByteSize max_content_length_in_bytes: long publish_address: string } -export interface NodesNodesInfoNodeInfoIngest { - processors: NodesNodesInfoNodeInfoIngestProcessor[] +export interface NodesInfoNodeInfoIngest { + processors: NodesInfoNodeInfoIngestProcessor[] } -export interface NodesNodesInfoNodeInfoIngestProcessor { +export interface NodesInfoNodeInfoIngestProcessor { type: string } -export interface NodesNodesInfoNodeInfoJvmMemory { +export interface NodesInfoNodeInfoJvmMemory { direct_max?: ByteSize direct_max_in_bytes: long heap_init?: ByteSize @@ -12119,23 +12133,23 @@ export interface NodesNodesInfoNodeInfoJvmMemory { non_heap_max_in_bytes: long } -export interface NodesNodesInfoNodeInfoMemory { +export interface NodesInfoNodeInfoMemory { total: string total_in_bytes: long } -export interface NodesNodesInfoNodeInfoNetwork { - primary_interface: NodesNodesInfoNodeInfoNetworkInterface +export interface NodesInfoNodeInfoNetwork { + primary_interface: NodesInfoNodeInfoNetworkInterface refresh_interval: integer } -export interface NodesNodesInfoNodeInfoNetworkInterface { +export interface NodesInfoNodeInfoNetworkInterface { address: string mac_address: string name: Name } -export interface NodesNodesInfoNodeInfoOSCPU { +export interface NodesInfoNodeInfoOSCPU { cache_size: string cache_size_in_bytes: integer cores_per_socket: integer @@ -12146,151 +12160,151 @@ export interface NodesNodesInfoNodeInfoOSCPU { vendor: string } -export interface NodesNodesInfoNodeInfoPath { +export interface NodesInfoNodeInfoPath { logs: string home: string repo: string[] data?: string[] } -export interface NodesNodesInfoNodeInfoRepositories { - url: NodesNodesInfoNodeInfoRepositoriesUrl +export interface NodesInfoNodeInfoRepositories { + url: NodesInfoNodeInfoRepositoriesUrl } -export interface NodesNodesInfoNodeInfoRepositoriesUrl { +export interface NodesInfoNodeInfoRepositoriesUrl { allowed_urls: string } -export interface NodesNodesInfoNodeInfoScript { +export interface NodesInfoNodeInfoScript { allowed_types: string disable_max_compilations_rate: string } -export interface NodesNodesInfoNodeInfoSearch { - remote: NodesNodesInfoNodeInfoSearchRemote +export interface NodesInfoNodeInfoSearch { + remote: NodesInfoNodeInfoSearchRemote } -export interface NodesNodesInfoNodeInfoSearchRemote { +export interface NodesInfoNodeInfoSearchRemote { connect: string } -export interface NodesNodesInfoNodeInfoSettings { - cluster: NodesNodesInfoNodeInfoSettingsCluster - node: NodesNodesInfoNodeInfoSettingsNode - path: NodesNodesInfoNodeInfoPath - repositories?: NodesNodesInfoNodeInfoRepositories - discovery?: NodesNodesInfoNodeInfoDiscover - action?: NodesNodesInfoNodeInfoAction - client: NodesNodesInfoNodeInfoClient - http: NodesNodesInfoNodeInfoSettingsHttp - bootstrap?: NodesNodesInfoNodeInfoBootstrap - transport: NodesNodesInfoNodeInfoSettingsTransport - network?: NodesNodesInfoNodeInfoSettingsNetwork - xpack?: NodesNodesInfoNodeInfoXpack - script?: NodesNodesInfoNodeInfoScript - search?: NodesNodesInfoNodeInfoSearch -} - -export interface NodesNodesInfoNodeInfoSettingsCluster { +export interface NodesInfoNodeInfoSettings { + cluster: NodesInfoNodeInfoSettingsCluster + node: NodesInfoNodeInfoSettingsNode + path: NodesInfoNodeInfoPath + repositories?: NodesInfoNodeInfoRepositories + discovery?: NodesInfoNodeInfoDiscover + action?: NodesInfoNodeInfoAction + client: NodesInfoNodeInfoClient + http: NodesInfoNodeInfoSettingsHttp + bootstrap?: NodesInfoNodeInfoBootstrap + transport: NodesInfoNodeInfoSettingsTransport + network?: NodesInfoNodeInfoSettingsNetwork + xpack?: NodesInfoNodeInfoXpack + script?: NodesInfoNodeInfoScript + search?: NodesInfoNodeInfoSearch +} + +export interface NodesInfoNodeInfoSettingsCluster { name: Name routing?: IndicesIndexRouting - election: NodesNodesInfoNodeInfoSettingsClusterElection + election: NodesInfoNodeInfoSettingsClusterElection initial_master_nodes?: string } -export interface NodesNodesInfoNodeInfoSettingsClusterElection { +export interface NodesInfoNodeInfoSettingsClusterElection { strategy: Name } -export interface NodesNodesInfoNodeInfoSettingsHttp { - type: string | NodesNodesInfoNodeInfoSettingsHttpType +export interface NodesInfoNodeInfoSettingsHttp { + type: string | NodesInfoNodeInfoSettingsHttpType 'type.default'?: string compression?: boolean | string port?: integer | string } -export interface NodesNodesInfoNodeInfoSettingsHttpType { +export interface NodesInfoNodeInfoSettingsHttpType { default: string } -export interface NodesNodesInfoNodeInfoSettingsNetwork { +export interface NodesInfoNodeInfoSettingsNetwork { host: Host } -export interface NodesNodesInfoNodeInfoSettingsNode { +export interface NodesInfoNodeInfoSettingsNode { name: Name attr: Record max_local_storage_nodes?: string } -export interface NodesNodesInfoNodeInfoSettingsTransport { - type: string | NodesNodesInfoNodeInfoSettingsTransportType +export interface NodesInfoNodeInfoSettingsTransport { + type: string | NodesInfoNodeInfoSettingsTransportType 'type.default'?: string - features?: NodesNodesInfoNodeInfoSettingsTransportFeatures + features?: NodesInfoNodeInfoSettingsTransportFeatures } -export interface NodesNodesInfoNodeInfoSettingsTransportFeatures { +export interface NodesInfoNodeInfoSettingsTransportFeatures { 'x-pack': string } -export interface NodesNodesInfoNodeInfoSettingsTransportType { +export interface NodesInfoNodeInfoSettingsTransportType { default: string } -export interface NodesNodesInfoNodeInfoTransport { +export interface NodesInfoNodeInfoTransport { bound_address: string[] publish_address: string profiles: Record } -export interface NodesNodesInfoNodeInfoXpack { - license?: NodesNodesInfoNodeInfoXpackLicense - security: NodesNodesInfoNodeInfoXpackSecurity +export interface NodesInfoNodeInfoXpack { + license?: NodesInfoNodeInfoXpackLicense + security: NodesInfoNodeInfoXpackSecurity notification?: Record } -export interface NodesNodesInfoNodeInfoXpackLicense { - self_generated: NodesNodesInfoNodeInfoXpackLicenseType +export interface NodesInfoNodeInfoXpackLicense { + self_generated: NodesInfoNodeInfoXpackLicenseType } -export interface NodesNodesInfoNodeInfoXpackLicenseType { +export interface NodesInfoNodeInfoXpackLicenseType { type: string } -export interface NodesNodesInfoNodeInfoXpackSecurity { - http: NodesNodesInfoNodeInfoXpackSecuritySsl +export interface NodesInfoNodeInfoXpackSecurity { + http: NodesInfoNodeInfoXpackSecuritySsl enabled: string - transport: NodesNodesInfoNodeInfoXpackSecuritySsl - authc?: NodesNodesInfoNodeInfoXpackSecurityAuthc + transport: NodesInfoNodeInfoXpackSecuritySsl + authc?: NodesInfoNodeInfoXpackSecurityAuthc } -export interface NodesNodesInfoNodeInfoXpackSecurityAuthc { - realms: NodesNodesInfoNodeInfoXpackSecurityAuthcRealms - token: NodesNodesInfoNodeInfoXpackSecurityAuthcToken +export interface NodesInfoNodeInfoXpackSecurityAuthc { + realms: NodesInfoNodeInfoXpackSecurityAuthcRealms + token: NodesInfoNodeInfoXpackSecurityAuthcToken } -export interface NodesNodesInfoNodeInfoXpackSecurityAuthcRealms { - file?: Record - native?: Record - pki?: Record +export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { + file?: Record + native?: Record + pki?: Record } -export interface NodesNodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { +export interface NodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { enabled?: string order: string } -export interface NodesNodesInfoNodeInfoXpackSecurityAuthcToken { +export interface NodesInfoNodeInfoXpackSecurityAuthcToken { enabled: string } -export interface NodesNodesInfoNodeInfoXpackSecuritySsl { +export interface NodesInfoNodeInfoXpackSecuritySsl { ssl: Record } -export interface NodesNodesInfoNodeJvmInfo { +export interface NodesInfoNodeJvmInfo { gc_collectors: string[] - mem: NodesNodesInfoNodeInfoJvmMemory + mem: NodesInfoNodeInfoJvmMemory memory_pools: string[] pid: integer start_time_in_millis: long @@ -12304,7 +12318,7 @@ export interface NodesNodesInfoNodeJvmInfo { input_arguments: string[] } -export interface NodesNodesInfoNodeOperatingSystemInfo { +export interface NodesInfoNodeOperatingSystemInfo { arch: string available_processors: integer allocated_processors?: integer @@ -12312,18 +12326,18 @@ export interface NodesNodesInfoNodeOperatingSystemInfo { pretty_name: Name refresh_interval_in_millis: integer version: VersionString - cpu?: NodesNodesInfoNodeInfoOSCPU - mem?: NodesNodesInfoNodeInfoMemory - swap?: NodesNodesInfoNodeInfoMemory + cpu?: NodesInfoNodeInfoOSCPU + mem?: NodesInfoNodeInfoMemory + swap?: NodesInfoNodeInfoMemory } -export interface NodesNodesInfoNodeProcessInfo { +export interface NodesInfoNodeProcessInfo { id: long mlockall: boolean refresh_interval_in_millis: long } -export interface NodesNodesInfoNodeThreadPoolInfo { +export interface NodesInfoNodeThreadPoolInfo { core?: integer keep_alive?: string max?: integer @@ -12332,7 +12346,7 @@ export interface NodesNodesInfoNodeThreadPoolInfo { type: string } -export interface NodesNodesInfoRequest extends RequestBase { +export interface NodesInfoRequest extends RequestBase { node_id?: NodeIds metric?: Metrics flat_settings?: boolean @@ -12340,12 +12354,36 @@ export interface NodesNodesInfoRequest extends RequestBase { timeout?: Time } -export interface NodesNodesInfoResponse extends NodesNodesResponseBase { +export interface NodesInfoResponse extends NodesNodesResponseBase { cluster_name: Name - nodes: Record + nodes: Record +} + +export interface NodesReloadSecureSettingsNodeReloadException { + name: Name + reload_exception?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy } -export interface NodesNodesStatsRequest extends RequestBase { +export interface NodesReloadSecureSettingsNodeReloadExceptionCausedBy { + type: string + reason: string + caused_by?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy +} + +export interface NodesReloadSecureSettingsRequest extends RequestBase { + node_id?: NodeIds + timeout?: Time + body?: { + secure_settings_password?: Password + } +} + +export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export interface NodesStatsRequest extends RequestBase { node_id?: NodeIds metric?: Metrics index_metric?: Metrics @@ -12361,51 +12399,27 @@ export interface NodesNodesStatsRequest extends RequestBase { include_unloaded_segments?: boolean } -export interface NodesNodesStatsResponse extends NodesNodesResponseBase { +export interface NodesStatsResponse extends NodesNodesResponseBase { cluster_name: Name nodes: Record } -export interface NodesNodesUsageNodeUsage { +export interface NodesUsageNodeUsage { rest_actions: Record since: EpochMillis timestamp: EpochMillis aggregations: Record } -export interface NodesNodesUsageRequest extends RequestBase { +export interface NodesUsageRequest extends RequestBase { node_id?: NodeIds metric?: Metrics timeout?: Time } -export interface NodesNodesUsageResponse extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesReloadSecureSettingsNodeReloadException { - name: Name - reload_exception?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy -} - -export interface NodesReloadSecureSettingsNodeReloadExceptionCausedBy { - type: string - reason: string - caused_by?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy -} - -export interface NodesReloadSecureSettingsRequest extends RequestBase { - node_id?: NodeIds - timeout?: Time - body?: { - secure_settings_password?: Password - } -} - -export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { +export interface NodesUsageResponse extends NodesNodesResponseBase { cluster_name: Name - nodes: Record + nodes: Record } export interface RollupDateHistogramGrouping { @@ -12589,7 +12603,14 @@ export interface RollupRollupSearchRequest extends RequestBase { } } -export type RollupRollupSearchResponse = boolean +export interface RollupRollupSearchResponse { + took: long + timed_out: boolean + terminated_early?: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: Record +} export interface RollupStartRollupJobRequest extends RequestBase { id: Id @@ -13727,22 +13748,22 @@ export interface SnapshotVerifyRepositoryResponse { nodes: Record } -export interface SqlClearSqlCursorRequest extends RequestBase { +export interface SqlClearCursorRequest extends RequestBase { body?: { cursor: string } } -export interface SqlClearSqlCursorResponse { +export interface SqlClearCursorResponse { succeeded: boolean } -export interface SqlQuerySqlColumn { +export interface SqlQueryColumn { name: Name type: string } -export interface SqlQuerySqlRequest extends RequestBase { +export interface SqlQueryRequest extends RequestBase { format?: string body?: { columnar?: boolean @@ -13757,15 +13778,15 @@ export interface SqlQuerySqlRequest extends RequestBase { } } -export interface SqlQuerySqlResponse { - columns?: SqlQuerySqlColumn[] +export interface SqlQueryResponse { + columns?: SqlQueryColumn[] cursor?: string - rows: SqlQuerySqlRow[] + rows: SqlQueryRow[] } -export type SqlQuerySqlRow = any[] +export type SqlQueryRow = any[] -export interface SqlTranslateSqlRequest extends RequestBase { +export interface SqlTranslateRequest extends RequestBase { body?: { fetch_size?: integer filter?: QueryDslQueryContainer @@ -13774,11 +13795,11 @@ export interface SqlTranslateSqlRequest extends RequestBase { } } -export interface SqlTranslateSqlResponse { +export interface SqlTranslateResponse { size: long - _source: boolean | Fields | SearchTypesSourceFilter + _source: boolean | Fields | SearchSourceFilter fields: Record[] - sort: SearchTypesSort + sort: SearchSort } export interface SslGetCertificatesCertificateInformation { @@ -13849,7 +13870,7 @@ export interface TaskTaskExecutingNode extends SpecUtilsBaseNode { tasks: Record } -export interface TaskCancelTasksRequest extends RequestBase { +export interface TaskCancelRequest extends RequestBase { task_id?: TaskId actions?: string | string[] nodes?: string[] @@ -13857,25 +13878,25 @@ export interface TaskCancelTasksRequest extends RequestBase { wait_for_completion?: boolean } -export interface TaskCancelTasksResponse { +export interface TaskCancelResponse { node_failures?: ErrorCause[] nodes: Record } -export interface TaskGetTaskRequest extends RequestBase { +export interface TaskGetRequest extends RequestBase { task_id: Id timeout?: Time wait_for_completion?: boolean } -export interface TaskGetTaskResponse { +export interface TaskGetResponse { completed: boolean task: TaskInfo response?: TaskStatus error?: ErrorCause } -export interface TaskListTasksRequest extends RequestBase { +export interface TaskListRequest extends RequestBase { actions?: string | string[] detailed?: boolean group_by?: GroupBy @@ -13885,7 +13906,7 @@ export interface TaskListTasksRequest extends RequestBase { wait_for_completion?: boolean } -export interface TaskListTasksResponse { +export interface TaskListResponse { node_failures?: ErrorCause[] nodes?: Record tasks?: Record | TaskInfo[] From 728215f3c042002886330835af1e4a21cd51c966 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 19 May 2021 11:16:33 +0200 Subject: [PATCH 017/647] API generation --- api/api/security.js | 22 ++++++++++++++++++++++ api/api/{termsenum.js => terms_enum.js} | 4 ++-- api/index.js | 5 +++-- api/requestParams.d.ts | 5 ++++- docs/reference.asciidoc | 16 +++++++++++++--- index.d.ts | 20 ++++++++++++++++---- 6 files changed, 60 insertions(+), 12 deletions(-) rename api/api/{termsenum.js => terms_enum.js} (95%) diff --git a/api/api/security.js b/api/api/security.js index 96d775dc5..1a0f41df4 100644 --- a/api/api/security.js +++ b/api/api/security.js @@ -524,6 +524,27 @@ SecurityApi.prototype.enableUser = function securityEnableUserApi (params, optio return this.transport.request(request, options, callback) } +SecurityApi.prototype.enrollNode = function securityEnrollNodeApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + '_security' + '/' + 'enroll_node' + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + SecurityApi.prototype.getApiKey = function securityGetApiKeyApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -1048,6 +1069,7 @@ Object.defineProperties(SecurityApi.prototype, { delete_user: { get () { return this.deleteUser } }, disable_user: { get () { return this.disableUser } }, enable_user: { get () { return this.enableUser } }, + enroll_node: { get () { return this.enrollNode } }, get_api_key: { get () { return this.getApiKey } }, get_builtin_privileges: { get () { return this.getBuiltinPrivileges } }, get_privileges: { get () { return this.getPrivileges } }, diff --git a/api/api/termsenum.js b/api/api/terms_enum.js similarity index 95% rename from api/api/termsenum.js rename to api/api/terms_enum.js index 029c4769d..c4df48d83 100644 --- a/api/api/termsenum.js +++ b/api/api/terms_enum.js @@ -26,7 +26,7 @@ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } -function termsenumApi (params, options, callback) { +function termsEnumApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) // check required parameters @@ -53,4 +53,4 @@ function termsenumApi (params, options, callback) { return this.transport.request(request, options, callback) } -module.exports = termsenumApi +module.exports = termsEnumApi diff --git a/api/index.js b/api/index.js index e226a1f55..0ea8e1b84 100644 --- a/api/index.js +++ b/api/index.js @@ -84,7 +84,7 @@ const SnapshotApi = require('./api/snapshot') const SqlApi = require('./api/sql') const SslApi = require('./api/ssl') const TasksApi = require('./api/tasks') -const termsenumApi = require('./api/termsenum') +const termsEnumApi = require('./api/terms_enum') const termvectorsApi = require('./api/termvectors') const TextStructureApi = require('./api/text_structure') const TransformApi = require('./api/transform') @@ -202,7 +202,7 @@ ESAPI.prototype.scroll = scrollApi ESAPI.prototype.search = searchApi ESAPI.prototype.searchShards = searchShardsApi ESAPI.prototype.searchTemplate = searchTemplateApi -ESAPI.prototype.termsenum = termsenumApi +ESAPI.prototype.termsEnum = termsEnumApi ESAPI.prototype.termvectors = termvectorsApi ESAPI.prototype.update = updateApi ESAPI.prototype.updateByQuery = updateByQueryApi @@ -464,6 +464,7 @@ Object.defineProperties(ESAPI.prototype, { return this[kTasks] } }, + terms_enum: { get () { return this.termsEnum } }, textStructure: { get () { if (this[kTextStructure] === null) { diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index 1391ed578..a917765fa 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -2260,6 +2260,9 @@ export interface SecurityEnableUser extends Generic { refresh?: 'wait_for' | boolean; } +export interface SecurityEnrollNode extends Generic { +} + export interface SecurityGetApiKey extends Generic { id?: string; name?: string; @@ -2509,7 +2512,7 @@ export interface TasksList extends Generic { timeout?: string; } -export interface Termsenum extends Generic { +export interface TermsEnum extends Generic { index: string | string[]; body?: T; } diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index de5c23a2b..678517a65 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -9323,6 +9323,16 @@ link:{ref}/security-api-enable-user.html[Documentation] + |=== +[discrete] +=== security.enrollNode + +[source,ts] +---- +client.security.enrollNode() +---- +link:{ref}/security-api-enroll-node.html[Documentation] + + + [discrete] === security.getApiKey @@ -10346,16 +10356,16 @@ _Default:_ `nodes` |=== [discrete] -=== termsenum +=== termsEnum *Stability:* beta [source,ts] ---- -client.termsenum({ +client.termsEnum({ index: string | string[], body: object }) ---- -link:{ref}/terms-enum.html[Documentation] + +link:{ref}/search-terms-enum.html[Documentation] + [cols=2*] |=== |`index` diff --git a/index.d.ts b/index.d.ts index 1064b3b69..38f2aca8f 100644 --- a/index.d.ts +++ b/index.d.ts @@ -2220,6 +2220,14 @@ declare class Client { enableUser, TContext = Context>(callback: callbackFn): TransportRequestCallback enableUser, TContext = Context>(params: RequestParams.SecurityEnableUser, callback: callbackFn): TransportRequestCallback enableUser, TContext = Context>(params: RequestParams.SecurityEnableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + enroll_node, TContext = Context>(params?: RequestParams.SecurityEnrollNode, options?: TransportRequestOptions): TransportRequestPromise> + enroll_node, TContext = Context>(callback: callbackFn): TransportRequestCallback + enroll_node, TContext = Context>(params: RequestParams.SecurityEnrollNode, callback: callbackFn): TransportRequestCallback + enroll_node, TContext = Context>(params: RequestParams.SecurityEnrollNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + enrollNode, TContext = Context>(params?: RequestParams.SecurityEnrollNode, options?: TransportRequestOptions): TransportRequestPromise> + enrollNode, TContext = Context>(callback: callbackFn): TransportRequestCallback + enrollNode, TContext = Context>(params: RequestParams.SecurityEnrollNode, callback: callbackFn): TransportRequestCallback + enrollNode, TContext = Context>(params: RequestParams.SecurityEnrollNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback get_api_key, TContext = Context>(params?: RequestParams.SecurityGetApiKey, options?: TransportRequestOptions): TransportRequestPromise> get_api_key, TContext = Context>(callback: callbackFn): TransportRequestCallback get_api_key, TContext = Context>(params: RequestParams.SecurityGetApiKey, callback: callbackFn): TransportRequestCallback @@ -2561,10 +2569,14 @@ declare class Client { list, TContext = Context>(params: RequestParams.TasksList, callback: callbackFn): TransportRequestCallback list, TContext = Context>(params: RequestParams.TasksList, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Termsenum, options?: TransportRequestOptions): TransportRequestPromise> - termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termsenum, callback: callbackFn): TransportRequestCallback - termsenum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termsenum, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TermsEnum, options?: TransportRequestOptions): TransportRequestPromise> + terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, callback: callbackFn): TransportRequestCallback + terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TermsEnum, options?: TransportRequestOptions): TransportRequestPromise> + termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, callback: callbackFn): TransportRequestCallback + termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Termvectors, options?: TransportRequestOptions): TransportRequestPromise> termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termvectors, callback: callbackFn): TransportRequestCallback From b4b45459c8705c06231ce353265a7b3dc0e2ecc8 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 19 May 2021 11:18:32 +0200 Subject: [PATCH 018/647] Bumped v8.0.0-canary.10 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index feaba8ed5..e6462ac05 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.9", + "versionCanary": "8.0.0-canary.10", "keywords": [ "elasticsearch", "elastic", From c34a6690ba1a25d46592697b90c9c3325aaa8e3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 19 May 2021 12:55:18 +0200 Subject: [PATCH 019/647] [DOCS] Removes link from API reference that breaks the docs build (#1470) --- docs/reference.asciidoc | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 678517a65..368524de6 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -9330,8 +9330,6 @@ link:{ref}/security-api-enable-user.html[Documentation] + ---- client.security.enrollNode() ---- -link:{ref}/security-api-enroll-node.html[Documentation] + - [discrete] === security.getApiKey From 507ed99e745e1da92ecd39886fa14184ec8c0ebf Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 20 May 2021 16:14:51 +0200 Subject: [PATCH 020/647] Update body error check (#1472) --- lib/errors.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/errors.js b/lib/errors.js index cf2f296d8..a62d45936 100644 --- a/lib/errors.js +++ b/lib/errors.js @@ -90,7 +90,7 @@ class ResponseError extends ElasticsearchClientError { super('Response Error') Error.captureStackTrace(this, ResponseError) this.name = 'ResponseError' - if (meta.body && meta.body.error && meta.body.status) { + if (meta.body && meta.body.error && meta.body.error.type) { if (Array.isArray(meta.body.error.root_cause)) { this.message = meta.body.error.type + ': ' this.message += meta.body.error.root_cause.map(entry => `[${entry.type}] Reason: ${entry.reason}`).join('; ') From c64152b9b369224d2c62abee8545841ee1f78f1a Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 20 May 2021 16:16:45 +0200 Subject: [PATCH 021/647] Drop node v10 support (#1471) --- .ci/test-matrix.yml | 2 +- .github/workflows/nodejs.yml | 4 +- index.js | 11 -- lib/Connection.js | 4 +- package.json | 26 ++-- test/acceptance/events-order.test.js | 80 +++++------ test/acceptance/observability.test.js | 62 ++++----- test/acceptance/proxy.test.js | 24 ++-- test/acceptance/resurrect.test.js | 32 ++--- test/acceptance/sniff.test.js | 48 +++---- test/integration/helpers/bulk.test.js | 14 +- test/integration/helpers/msearch.test.js | 10 +- test/integration/helpers/scroll.test.js | 10 +- test/integration/helpers/search.test.js | 4 +- test/integration/test-runner.js | 6 +- test/unit/api-async.js | 2 +- test/unit/api.test.js | 14 +- test/unit/base-connection-pool.test.js | 60 ++++----- test/unit/child.test.js | 10 +- test/unit/client.test.js | 158 +++++++++++----------- test/unit/cloud-connection-pool.test.js | 4 +- test/unit/connection-pool.test.js | 136 +++++++++---------- test/unit/connection.test.js | 82 ++++++------ test/unit/errors.test.js | 66 ++++----- test/unit/esm/index.mjs | 2 +- test/unit/events.test.js | 2 +- test/unit/helpers/bulk.test.js | 126 +++++++++--------- test/unit/helpers/msearch.test.js | 72 +++++----- test/unit/helpers/scroll.test.js | 56 ++++---- test/unit/helpers/search.test.js | 16 +-- test/unit/selectors.test.js | 2 +- test/unit/serializer.test.js | 16 +-- test/unit/transport.test.js | 162 +++++++++++------------ 33 files changed, 655 insertions(+), 668 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 4c86f5987..26b7dbcd5 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -3,9 +3,9 @@ STACK_VERSION: - 8.0.0-SNAPSHOT NODE_JS_VERSION: + - 16 - 14 - 12 - - 10 TEST_SUITE: - free diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 9f1fe8a63..f65a2278b 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -9,7 +9,7 @@ jobs: strategy: matrix: - node-version: [10.x, 12.x, 14.x, 15.x] + node-version: [12.x, 14.x, 16.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: @@ -46,7 +46,7 @@ jobs: strategy: matrix: - node-version: [10.x, 12.x, 14.x] + node-version: [12.x, 14.x, 16.x] steps: - uses: actions/checkout@v2 diff --git a/index.js b/index.js index 02851266d..e36e6bee4 100644 --- a/index.js +++ b/index.js @@ -19,8 +19,6 @@ 'use strict' -const nodeMajor = Number(process.versions.node.split('.')[0]) - const { EventEmitter } = require('events') const { URL } = require('url') const debug = require('debug')('elasticsearch') @@ -47,15 +45,6 @@ const kEventEmitter = Symbol('elasticsearchjs-event-emitter') const ESAPI = require('./api') -/* istanbul ignore next */ -if (nodeMajor >= 10 && nodeMajor < 12) { - process.emitWarning('You are using a version of Node.js that will reach EOL in April 2021. ' + - 'The support for this version will be dropped in 7.13. ' + - 'Please refer to https://ela.st/nodejs-support for additional information.', - 'DeprecationWarning' - ) -} - class Client extends ESAPI { constructor (opts = {}) { super({ ConfigurationError }) diff --git a/lib/Connection.js b/lib/Connection.js index cb5c7c7f7..20e08f708 100644 --- a/lib/Connection.js +++ b/lib/Connection.js @@ -25,7 +25,7 @@ const hpagent = require('hpagent') const http = require('http') const https = require('https') const debug = require('debug')('elasticsearch') -const pump = require('pump') +const { pipeline } = require('stream') const INVALID_PATH_REGEX = /[^\u0021-\u00ff]/ const { ConnectionError, @@ -133,7 +133,7 @@ class Connection { // starts the request if (isStream(params.body) === true) { - pump(params.body, request, err => { + pipeline(params.body, request, err => { /* istanbul ignore if */ if (err != null && cleanedListeners === false) { cleanListeners() diff --git a/package.json b/package.json index e6462ac05..dfd45d446 100644 --- a/package.json +++ b/package.json @@ -48,39 +48,38 @@ }, "devDependencies": { "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", - "@types/node": "^14.14.28", - "convert-hrtime": "^3.0.0", + "@types/node": "^15.3.1", + "convert-hrtime": "^5.0.0", "cross-zip": "^4.0.0", "dedent": "^0.7.0", "deepmerge": "^4.2.2", "dezalgo": "^1.0.3", "fast-deep-equal": "^3.1.3", "into-stream": "^6.0.0", - "js-yaml": "^4.0.0", + "js-yaml": "^4.1.0", "license-checker": "^25.0.1", "minimist": "^1.2.5", "node-fetch": "^2.6.1", - "ora": "^5.3.0", + "ora": "^5.4.0", "pretty-hrtime": "^1.0.3", "proxy": "^1.0.2", "rimraf": "^3.0.2", - "semver": "^7.3.4", - "simple-git": "^2.35.0", - "simple-statistics": "^7.4.1", + "semver": "^7.3.5", + "simple-git": "^2.39.0", + "simple-statistics": "^7.7.0", "split2": "^3.2.2", "standard": "^16.0.3", "stoppable": "^1.1.0", - "tap": "^14.11.0", - "tsd": "^0.14.0", + "tap": "^15.0.9", + "tsd": "^0.15.1", "workq": "^3.0.0", - "xmlbuilder2": "^2.4.0" + "xmlbuilder2": "^2.4.1" }, "dependencies": { "debug": "^4.3.1", "hpagent": "^0.1.1", "ms": "^2.1.3", - "pump": "^3.0.0", - "secure-json-parse": "^2.3.1" + "secure-json-parse": "^2.4.0" }, "license": "Apache-2.0", "repository": { @@ -91,13 +90,12 @@ "url": "https://github.com/elastic/elasticsearch-js/issues" }, "engines": { - "node": ">=10" + "node": ">=12" }, "tsd": { "directory": "test/types" }, "tap": { - "esm": false, "ts": false, "jsx": false, "flow": false, diff --git a/test/acceptance/events-order.test.js b/test/acceptance/events-order.test.js index b4a9ff6b3..0bbd9a49a 100644 --- a/test/acceptance/events-order.test.js +++ b/test/acceptance/events-order.test.js @@ -57,27 +57,27 @@ test('No errors', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.DESERIALIZATION) + t.equal(order.shift(), events.DESERIALIZATION) }) client.on(events.RESPONSE, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) client.info((err, result) => { t.error(err) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) }) @@ -99,12 +99,12 @@ test('Connection error', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (_err, request) => { @@ -113,12 +113,12 @@ test('Connection error', t => { client.on(events.RESPONSE, (err, request) => { t.ok(err instanceof ConnectionError) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) client.info((err, result) => { t.ok(err instanceof ConnectionError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) }) @@ -140,12 +140,12 @@ test('TimeoutError error', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (_err, request) => { @@ -154,12 +154,12 @@ test('TimeoutError error', t => { client.on(events.RESPONSE, (err, request) => { t.ok(err instanceof TimeoutError) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) client.info((err, result) => { t.ok(err instanceof TimeoutError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) }) @@ -180,12 +180,12 @@ test('RequestAbortedError error', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (_err, request) => { @@ -194,12 +194,12 @@ test('RequestAbortedError error', t => { client.on(events.RESPONSE, (err, request) => { t.ok(err instanceof RequestAbortedError) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) const request = client.info((err, result) => { t.ok(err instanceof RequestAbortedError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) request.abort() @@ -232,27 +232,27 @@ test('ResponseError error (no retry)', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.DESERIALIZATION) + t.equal(order.shift(), events.DESERIALIZATION) }) client.on(events.RESPONSE, (err, request) => { t.ok(err instanceof ResponseError) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) client.info((err, result) => { t.ok(err instanceof ResponseError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) }) @@ -285,27 +285,27 @@ test('ResponseError error (with retry)', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.DESERIALIZATION) + t.equal(order.shift(), events.DESERIALIZATION) }) client.on(events.RESPONSE, (err, request) => { t.ok(err instanceof ResponseError) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) client.info((err, result) => { t.ok(err instanceof ResponseError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) }) @@ -325,12 +325,12 @@ test('Serialization Error', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.ok(err instanceof SerializationError) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (_err, request) => { @@ -345,7 +345,7 @@ test('Serialization Error', t => { body.o = body client.index({ index: 'test', body }, (err, result) => { t.ok(err instanceof SerializationError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) }) @@ -384,27 +384,27 @@ test('Deserialization Error', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.DESERIALIZATION) + t.equal(order.shift(), events.DESERIALIZATION) }) client.on(events.RESPONSE, (err, request) => { t.ok(err instanceof DeserializationError) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) client.info((err, result) => { t.ok(err instanceof DeserializationError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) }) }) @@ -435,27 +435,27 @@ test('Socket destroyed while reading the body', t => { client.on(events.SERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.SERIALIZATION) + t.equal(order.shift(), events.SERIALIZATION) }) client.on(events.REQUEST, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.REQUEST) + t.equal(order.shift(), events.REQUEST) }) client.on(events.DESERIALIZATION, (err, request) => { t.error(err) - t.strictEqual(order.shift(), events.DESERIALIZATION) + t.equal(order.shift(), events.DESERIALIZATION) }) client.on(events.RESPONSE, (err, request) => { t.ok(err instanceof ConnectionError) - t.strictEqual(order.shift(), events.RESPONSE) + t.equal(order.shift(), events.RESPONSE) }) client.info((err, result) => { t.ok(err instanceof ConnectionError) - t.strictEqual(order.length, 0) + t.equal(order.length, 0) server.stop() }) }) diff --git a/test/acceptance/observability.test.js b/test/acceptance/observability.test.js index d7141b923..2d74b4f32 100644 --- a/test/acceptance/observability.test.js +++ b/test/acceptance/observability.test.js @@ -17,7 +17,7 @@ test('Request id', t => { t.type(genReqId, 'function') for (let i = 1; i <= 10; i++) { - t.strictEqual(genReqId(), i) + t.equal(genReqId(), i) } t.end() @@ -40,12 +40,12 @@ test('Request id', t => { client.on('request', (err, { meta }) => { t.error(err) - t.strictEqual(meta.request.id, 'custom-id') + t.equal(meta.request.id, 'custom-id') }) client.on('response', (err, { meta }) => { t.error(err) - t.strictEqual(meta.request.id, 'custom-id') + t.equal(meta.request.id, 'custom-id') }) client.info({}, options, t.error) @@ -61,12 +61,12 @@ test('Request id', t => { client.on('request', (err, { meta }) => { t.error(err) - t.strictEqual(meta.request.id, 'custom-id') + t.equal(meta.request.id, 'custom-id') }) client.on('response', (err, { meta }) => { t.error(err) - t.strictEqual(meta.request.id, 'custom-id') + t.equal(meta.request.id, 'custom-id') }) client.info({}, { id: 'custom-id' }, t.error) @@ -84,7 +84,7 @@ test('Request id', t => { client.on('sniff', (err, { meta }) => { t.error(err) - t.strictEqual(meta.request.id, 1) + t.equal(meta.request.id, 1) }) }) @@ -99,15 +99,15 @@ test('Request id', t => { }) client.on('request', (e, { meta }) => { - t.strictEqual(meta.request.id, 'custom') + t.equal(meta.request.id, 'custom') }) client.on('response', (e, { meta }) => { - t.strictEqual(meta.request.id, 'custom') + t.equal(meta.request.id, 'custom') }) client.on('sniff', (e, { meta }) => { - t.strictEqual(meta.request.id, 'custom') + t.equal(meta.request.id, 'custom') }) client.transport.request({ @@ -139,7 +139,7 @@ test('Request id', t => { client.on('resurrect', (err, meta) => { t.error(err) - t.strictEqual(meta.request.id, 'custom') + t.equal(meta.request.id, 'custom') clock.uninstall() }) @@ -160,12 +160,12 @@ test('Request context', t => { client.on('request', (err, { meta }) => { t.error(err) - t.strictEqual(meta.context, null) + t.equal(meta.context, null) }) client.on('response', (err, { meta }) => { t.error(err) - t.strictEqual(meta.context, null) + t.equal(meta.context, null) }) client.info(t.error) @@ -181,12 +181,12 @@ test('Request context', t => { client.on('request', (err, { meta }) => { t.error(err) - t.deepEqual(meta.context, { winter: 'is coming' }) + t.same(meta.context, { winter: 'is coming' }) }) client.on('response', (err, { meta }) => { t.error(err) - t.deepEqual(meta.context, { winter: 'is coming' }) + t.same(meta.context, { winter: 'is coming' }) }) client.info({}, { context: { winter: 'is coming' } }, t.error) @@ -203,12 +203,12 @@ test('Request context', t => { client.on('request', (err, { meta }) => { t.error(err) - t.deepEqual(meta.context, { winter: 'is coming' }) + t.same(meta.context, { winter: 'is coming' }) }) client.on('response', (err, { meta }) => { t.error(err) - t.deepEqual(meta.context, { winter: 'is coming' }) + t.same(meta.context, { winter: 'is coming' }) }) client.info(t.error) @@ -225,12 +225,12 @@ test('Request context', t => { client.on('request', (err, { meta }) => { t.error(err) - t.deepEqual(meta.context, { winter: 'has come' }) + t.same(meta.context, { winter: 'has come' }) }) client.on('response', (err, { meta }) => { t.error(err) - t.deepEqual(meta.context, { winter: 'has come' }) + t.same(meta.context, { winter: 'has come' }) }) client.info({}, { context: { winter: 'has come' } }, t.error) @@ -245,7 +245,7 @@ test('Client name', t => { node: 'http://localhost:9200/', name: 'cluster' }) - t.strictEqual(client.name, 'cluster') + t.equal(client.name, 'cluster') t.end() }) @@ -259,17 +259,17 @@ test('Client name', t => { client.on('request', (err, { meta }) => { t.error(err) - t.strictEqual(meta.name, 'cluster') + t.equal(meta.name, 'cluster') }) client.on('response', (err, { meta }) => { t.error(err) - t.strictEqual(meta.name, 'cluster') + t.equal(meta.name, 'cluster') }) client.info((err, { meta }) => { t.error(err) - t.strictEqual(meta.name, 'cluster') + t.equal(meta.name, 'cluster') }) }) @@ -284,17 +284,17 @@ test('Client name', t => { client.on('request', (err, { meta }) => { t.error(err) - t.strictEqual(meta.name, symbol) + t.equal(meta.name, symbol) }) client.on('response', (err, { meta }) => { t.error(err) - t.strictEqual(meta.name, symbol) + t.equal(meta.name, symbol) }) client.info((err, { meta }) => { t.error(err) - t.strictEqual(meta.name, symbol) + t.equal(meta.name, symbol) }) }) @@ -310,7 +310,7 @@ test('Client name', t => { client.on('sniff', (err, { meta }) => { t.error(err) - t.strictEqual(meta.name, 'elasticsearch-js') + t.equal(meta.name, 'elasticsearch-js') }) }) @@ -325,15 +325,15 @@ test('Client name', t => { }) client.on('request', (e, { meta }) => { - t.strictEqual(meta.name, 'elasticsearch-js') + t.equal(meta.name, 'elasticsearch-js') }) client.on('response', (e, { meta }) => { - t.strictEqual(meta.name, 'elasticsearch-js') + t.equal(meta.name, 'elasticsearch-js') }) client.on('sniff', (e, { meta }) => { - t.strictEqual(meta.name, 'elasticsearch-js') + t.equal(meta.name, 'elasticsearch-js') }) client.transport.request({ @@ -364,7 +364,7 @@ test('Client name', t => { client.on('resurrect', (err, meta) => { t.error(err) - t.strictEqual(meta.name, 'elasticsearch-js') + t.equal(meta.name, 'elasticsearch-js') clock.uninstall() }) @@ -392,7 +392,7 @@ test('Client name', t => { client.on('resurrect', (err, meta) => { t.error(err) - t.strictEqual(meta.name, 'child-client') + t.equal(meta.name, 'child-client') clock.uninstall() }) diff --git a/test/acceptance/proxy.test.js b/test/acceptance/proxy.test.js index 9210b6586..da54084cb 100644 --- a/test/acceptance/proxy.test.js +++ b/test/acceptance/proxy.test.js @@ -18,7 +18,7 @@ test('http-http proxy support', async t => { const server = await createServer() const proxy = await createProxy() server.on('request', (req, res) => { - t.strictEqual(req.url, '/_cluster/health') + t.equal(req.url, '/_cluster/health') res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ hello: 'world' })) }) @@ -29,7 +29,7 @@ test('http-http proxy support', async t => { }) const response = await client.cluster.health() - t.deepEqual(response.body, { hello: 'world' }) + t.same(response.body, { hello: 'world' }) server.close() proxy.close() @@ -39,7 +39,7 @@ test('http-https proxy support', async t => { const server = await createSecureServer() const proxy = await createProxy() server.on('request', (req, res) => { - t.strictEqual(req.url, '/_cluster/health') + t.equal(req.url, '/_cluster/health') res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ hello: 'world' })) }) @@ -50,7 +50,7 @@ test('http-https proxy support', async t => { }) const response = await client.cluster.health() - t.deepEqual(response.body, { hello: 'world' }) + t.same(response.body, { hello: 'world' }) server.close() proxy.close() @@ -60,7 +60,7 @@ test('https-http proxy support', async t => { const server = await createServer() const proxy = await createSecureProxy() server.on('request', (req, res) => { - t.strictEqual(req.url, '/_cluster/health') + t.equal(req.url, '/_cluster/health') res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ hello: 'world' })) }) @@ -71,7 +71,7 @@ test('https-http proxy support', async t => { }) const response = await client.cluster.health() - t.deepEqual(response.body, { hello: 'world' }) + t.same(response.body, { hello: 'world' }) server.close() proxy.close() @@ -81,7 +81,7 @@ test('https-https proxy support', async t => { const server = await createSecureServer() const proxy = await createSecureProxy() server.on('request', (req, res) => { - t.strictEqual(req.url, '/_cluster/health') + t.equal(req.url, '/_cluster/health') res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ hello: 'world' })) }) @@ -92,7 +92,7 @@ test('https-https proxy support', async t => { }) const response = await client.cluster.health() - t.deepEqual(response.body, { hello: 'world' }) + t.same(response.body, { hello: 'world' }) server.close() proxy.close() @@ -102,7 +102,7 @@ test('http basic authentication', async t => { const server = await createServer() const proxy = await createProxy() server.on('request', (req, res) => { - t.strictEqual(req.url, '/_cluster/health') + t.equal(req.url, '/_cluster/health') res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ hello: 'world' })) }) @@ -117,7 +117,7 @@ test('http basic authentication', async t => { }) const response = await client.cluster.health() - t.deepEqual(response.body, { hello: 'world' }) + t.same(response.body, { hello: 'world' }) server.close() proxy.close() @@ -127,7 +127,7 @@ test('https basic authentication', async t => { const server = await createSecureServer() const proxy = await createProxy() server.on('request', (req, res) => { - t.strictEqual(req.url, '/_cluster/health') + t.equal(req.url, '/_cluster/health') res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ hello: 'world' })) }) @@ -142,7 +142,7 @@ test('https basic authentication', async t => { }) const response = await client.cluster.health() - t.deepEqual(response.body, { hello: 'world' }) + t.same(response.body, { hello: 'world' }) server.close() proxy.close() diff --git a/test/acceptance/resurrect.test.js b/test/acceptance/resurrect.test.js index d9bfd6112..432929852 100644 --- a/test/acceptance/resurrect.test.js +++ b/test/acceptance/resurrect.test.js @@ -56,11 +56,11 @@ test('Should execute the recurrect API with the ping strategy', t => { client.on(events.RESURRECT, (err, meta) => { t.error(err) - t.strictEqual(meta.strategy, 'ping') - t.false(meta.isAlive) - t.strictEqual(meta.connection.id, 'node0') - t.strictEqual(meta.name, 'elasticsearch-js') - t.deepEqual(meta.request, { id: 2 }) + t.equal(meta.strategy, 'ping') + t.notOk(meta.isAlive) + t.equal(meta.connection.id, 'node0') + t.equal(meta.name, 'elasticsearch-js') + t.same(meta.request, { id: 2 }) }) q.add((q, done) => { @@ -117,14 +117,14 @@ test('Resurrect a node and handle 502/3/4 status code', t => { let idCount = 2 client.on(events.RESURRECT, (err, meta) => { t.error(err) - t.strictEqual(meta.strategy, 'ping') - t.strictEqual(meta.connection.id, 'node0') - t.strictEqual(meta.name, 'elasticsearch-js') - t.deepEqual(meta.request, { id: idCount++ }) + t.equal(meta.strategy, 'ping') + t.equal(meta.connection.id, 'node0') + t.equal(meta.name, 'elasticsearch-js') + t.same(meta.request, { id: idCount++ }) if (count < 4) { - t.false(meta.isAlive) + t.notOk(meta.isAlive) } else { - t.true(meta.isAlive) + t.ok(meta.isAlive) } }) @@ -179,11 +179,11 @@ test('Should execute the recurrect API with the optimistic strategy', t => { client.on(events.RESURRECT, (err, meta) => { t.error(err) - t.strictEqual(meta.strategy, 'optimistic') - t.true(meta.isAlive) - t.strictEqual(meta.connection.id, 'node0') - t.strictEqual(meta.name, 'elasticsearch-js') - t.deepEqual(meta.request, { id: 2 }) + t.equal(meta.strategy, 'optimistic') + t.ok(meta.isAlive) + t.equal(meta.connection.id, 'node0') + t.equal(meta.name, 'elasticsearch-js') + t.same(meta.request, { id: 2 }) }) q.add((q, done) => { diff --git a/test/acceptance/sniff.test.js b/test/acceptance/sniff.test.js index 89008ce21..ee18c9298 100644 --- a/test/acceptance/sniff.test.js +++ b/test/acceptance/sniff.test.js @@ -43,11 +43,11 @@ test('Should update the connection pool', t => { const client = new Client({ node: nodes[Object.keys(nodes)[0]].url }) - t.strictEqual(client.connectionPool.size, 1) + t.equal(client.connectionPool.size, 1) client.on(events.SNIFF, (err, request) => { t.error(err) - t.strictEqual( + t.equal( request.meta.sniff.reason, Transport.sniffReasons.DEFAULT ) @@ -56,14 +56,14 @@ test('Should update the connection pool', t => { // run the sniffer client.transport.sniff((err, hosts) => { t.error(err) - t.strictEqual(hosts.length, 4) + t.equal(hosts.length, 4) const ids = Object.keys(nodes) for (let i = 0; i < hosts.length; i++) { const id = ids[i] // the first node will be an update of the existing one if (id === 'node0') { - t.deepEqual(hosts[i], { + t.same(hosts[i], { url: new URL(nodes[id].url), id: id, roles: { @@ -74,7 +74,7 @@ test('Should update the connection pool', t => { } }) } else { - t.deepEqual(hosts[i], { + t.same(hosts[i], { url: new URL(nodes[id].url), id: id, roles: { @@ -90,7 +90,7 @@ test('Should update the connection pool', t => { } } - t.strictEqual(client.connectionPool.size, 4) + t.equal(client.connectionPool.size, 4) }) t.teardown(shutdown) }) @@ -103,11 +103,11 @@ test('Should handle hostnames in publish_address', t => { const client = new Client({ node: nodes[Object.keys(nodes)[0]].url }) - t.strictEqual(client.connectionPool.size, 1) + t.equal(client.connectionPool.size, 1) client.on(events.SNIFF, (err, request) => { t.error(err) - t.strictEqual( + t.equal( request.meta.sniff.reason, Transport.sniffReasons.DEFAULT ) @@ -116,14 +116,14 @@ test('Should handle hostnames in publish_address', t => { // run the sniffer client.transport.sniff((err, hosts) => { t.error(err) - t.strictEqual(hosts.length, 4) + t.equal(hosts.length, 4) for (let i = 0; i < hosts.length; i++) { // the first node will be an update of the existing one - t.strictEqual(hosts[i].url.hostname, 'localhost') + t.equal(hosts[i].url.hostname, 'localhost') } - t.strictEqual(client.connectionPool.size, 4) + t.equal(client.connectionPool.size, 4) }) t.teardown(shutdown) }) @@ -144,21 +144,21 @@ test('Sniff interval', t => { client.on(events.SNIFF, (err, request) => { t.error(err) const { hosts, reason } = request.meta.sniff - t.strictEqual( + t.equal( client.connectionPool.size, hosts.length ) - t.strictEqual(reason, Transport.sniffReasons.SNIFF_INTERVAL) + t.equal(reason, Transport.sniffReasons.SNIFF_INTERVAL) }) - t.strictEqual(client.connectionPool.size, 1) + t.equal(client.connectionPool.size, 1) q.add((q, done) => { clock.tick(51) client.info(err => { t.error(err) waitSniffEnd(() => { - t.strictEqual(client.connectionPool.size, 4) + t.equal(client.connectionPool.size, 4) done() }) }) @@ -173,7 +173,7 @@ test('Sniff interval', t => { client.info(err => { t.error(err) waitSniffEnd(() => { - t.strictEqual(client.connectionPool.size, 3) + t.equal(client.connectionPool.size, 3) done() }) }) @@ -208,14 +208,14 @@ test('Sniff on start', t => { client.on(events.SNIFF, (err, request) => { t.error(err) const { hosts, reason } = request.meta.sniff - t.strictEqual( + t.equal( client.connectionPool.size, hosts.length ) - t.strictEqual(reason, Transport.sniffReasons.SNIFF_ON_START) + t.equal(reason, Transport.sniffReasons.SNIFF_ON_START) }) - t.strictEqual(client.connectionPool.size, 1) + t.equal(client.connectionPool.size, 1) t.teardown(shutdown) }) }) @@ -238,10 +238,10 @@ test('Should not close living connections', t => { Connection: MyConnection }) - t.strictEqual(client.connectionPool.size, 1) + t.equal(client.connectionPool.size, 1) client.transport.sniff((err, hosts) => { t.error(err) - t.strictEqual( + t.equal( client.connectionPool.size, hosts.length ) @@ -276,16 +276,16 @@ test('Sniff on connection fault', t => { Connection: MyConnection }) - t.strictEqual(client.connectionPool.size, 2) + t.equal(client.connectionPool.size, 2) // this event will be triggered by the connection fault client.on(events.SNIFF, (err, request) => { t.error(err) const { hosts, reason } = request.meta.sniff - t.strictEqual( + t.equal( client.connectionPool.size, hosts.length ) - t.strictEqual(reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) + t.equal(reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) }) client.info((err, result) => { diff --git a/test/integration/helpers/bulk.test.js b/test/integration/helpers/bulk.test.js index 1966f39b1..011f524c3 100644 --- a/test/integration/helpers/bulk.test.js +++ b/test/integration/helpers/bulk.test.js @@ -102,9 +102,9 @@ test('bulk index with custom id', async t => { id: '19273860' // id of document n° 4242 }) - t.strictEqual(body._index, INDEX) - t.strictEqual(body._id, '19273860') - t.strictEqual(body._source.id, '19273860') + t.equal(body._index, INDEX) + t.equal(body._id, '19273860') + t.equal(body._source.id, '19273860') }) test('abort the operation on document drop', async t => { @@ -113,9 +113,9 @@ test('abort the operation on document drop', async t => { datasource: stream.pipe(split(JSON.parse)), concurrency: 1, onDrop (doc) { - t.strictEqual(doc.status, 400) - t.strictEqual(doc.error.type, 'mapper_parsing_exception') - t.strictEqual(doc.document.id, '45924372') + t.equal(doc.status, 400) + t.equal(doc.error.type, 'mapper_parsing_exception') + t.equal(doc.document.id, '45924372') b.abort() }, onDocument (doc) { @@ -135,7 +135,7 @@ test('abort the operation on document drop', async t => { const result = await b t.type(result.time, 'number') t.type(result.bytes, 'number') - t.strictEqual(result.total - 1, result.successful) + t.equal(result.total - 1, result.successful) t.match(result, { retry: 0, failed: 1, diff --git a/test/integration/helpers/msearch.test.js b/test/integration/helpers/msearch.test.js index e387b4fa9..c9c726ecc 100644 --- a/test/integration/helpers/msearch.test.js +++ b/test/integration/helpers/msearch.test.js @@ -62,7 +62,7 @@ test('Basic', t => { { query: { match: { title: 'javascript' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 106) + t.equal(result.body.hits.total.value, 106) } ) @@ -71,7 +71,7 @@ test('Basic', t => { { query: { match: { title: 'ruby' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 29) + t.equal(result.body.hits.total.value, 29) } ) @@ -87,7 +87,7 @@ test('Bad request', t => { { query: { match: { title: 'javascript' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 106) + t.equal(result.body.hits.total.value, 106) } ) @@ -95,7 +95,7 @@ test('Bad request', t => { { index: INDEX }, { query: { foo: { title: 'ruby' } } }, (err, result) => { - t.true(err instanceof errors.ResponseError) + t.ok(err instanceof errors.ResponseError) } ) @@ -112,7 +112,7 @@ test('Send multiple request concurrently over the concurrency limit', t => { { query: { match: { title: 'javascript' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 106) + t.equal(result.body.hits.total.value, 106) } ) } diff --git a/test/integration/helpers/scroll.test.js b/test/integration/helpers/scroll.test.js index e7777a4b7..e197ce21a 100644 --- a/test/integration/helpers/scroll.test.js +++ b/test/integration/helpers/scroll.test.js @@ -69,10 +69,10 @@ test('search helper', async t => { for await (const search of scrollSearch) { count += 1 for (const doc of search.documents) { - t.true(doc.title.toLowerCase().includes('javascript')) + t.ok(doc.title.toLowerCase().includes('javascript')) } } - t.strictEqual(count, 11) + t.equal(count, 11) }) test('clear a scroll search', async t => { @@ -94,7 +94,7 @@ test('clear a scroll search', async t => { search.clear() } } - t.strictEqual(count, 2) + t.equal(count, 2) }) test('scroll documents', async t => { @@ -112,7 +112,7 @@ test('scroll documents', async t => { let count = 0 for await (const doc of scrollSearch) { count += 1 - t.true(doc.title.toLowerCase().includes('javascript')) + t.ok(doc.title.toLowerCase().includes('javascript')) } - t.strictEqual(count, 106) + t.equal(count, 106) }) diff --git a/test/integration/helpers/search.test.js b/test/integration/helpers/search.test.js index 247fc9ac9..d4aa57c9a 100644 --- a/test/integration/helpers/search.test.js +++ b/test/integration/helpers/search.test.js @@ -64,8 +64,8 @@ test('search helper', async t => { } } }) - t.strictEqual(results.length, 10) + t.equal(results.length, 10) for (const result of results) { - t.true(result.title.toLowerCase().includes('javascript')) + t.ok(result.title.toLowerCase().includes('javascript')) } }) diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index 7dbd7c158..ef577210a 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -594,7 +594,7 @@ function match (val1, val2, action) { // tap.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) // everything else } else { - assert.strictEqual(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) + assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) } } @@ -654,9 +654,9 @@ function gte (val1, val2) { */ function length (val, len) { if (typeof val === 'string' || Array.isArray(val)) { - assert.strictEqual(val.length, len) + assert.equal(val.length, len) } else if (typeof val === 'object' && val !== null) { - assert.strictEqual(Object.keys(val).length, len) + assert.equal(Object.keys(val).length, len) } else { assert.fail(`length: the given value is invalid: ${val}`) } diff --git a/test/unit/api-async.js b/test/unit/api-async.js index 437c00abf..fb0bb1b80 100644 --- a/test/unit/api-async.js +++ b/test/unit/api-async.js @@ -42,7 +42,7 @@ function runAsyncTest (test) { type: 'doc', q: 'foo:bar' }) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) } catch (err) { t.fail(err) } diff --git a/test/unit/api.test.js b/test/unit/api.test.js index b5a7fbb57..667ec66ab 100644 --- a/test/unit/api.test.js +++ b/test/unit/api.test.js @@ -41,7 +41,7 @@ test('Basic (callback)', t => { q: 'foo:bar' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -66,7 +66,7 @@ test('Basic (promises)', t => { q: 'foo:bar' }) .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) .catch(t.fail) @@ -169,7 +169,7 @@ test('Abort method (callback)', t => { q: 'foo:bar' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) @@ -197,7 +197,7 @@ test('Abort method (promises)', t => { request .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) .catch(t.fail) @@ -226,7 +226,7 @@ test('Basic (options and callback)', t => { requestTimeout: 10000 }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -253,7 +253,7 @@ test('Basic (options and promises)', t => { requestTimeout: 10000 }) .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) .catch(t.fail) @@ -264,7 +264,7 @@ test('If the API uses the same key for both url and query parameter, the url sho t.plan(2) function handler (req, res) { - t.strictEqual(req.url, '/index/_bulk') + t.equal(req.url, '/index/_bulk') res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } diff --git a/test/unit/base-connection-pool.test.js b/test/unit/base-connection-pool.test.js index 268f68e70..6fe8c7206 100644 --- a/test/unit/base-connection-pool.test.js +++ b/test/unit/base-connection-pool.test.js @@ -30,7 +30,7 @@ test('API', t => { const href = 'http://localhost:9200/' pool.addConnection(href) t.ok(pool.connections.find(c => c.id === href) instanceof Connection) - t.strictEqual(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) + t.equal(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) t.end() }) @@ -42,7 +42,7 @@ test('API', t => { pool.addConnection(href) t.fail('Should throw') } catch (err) { - t.is(err.message, `Connection with id '${href}' is already present`) + t.equal(err.message, `Connection with id '${href}' is already present`) } t.end() }) @@ -52,8 +52,8 @@ test('API', t => { const href = 'http://us/"er:p@assword@localhost:9200/' pool.addConnection(href) const conn = pool.connections[0] - t.strictEqual(conn.url.username, 'us%22er') - t.strictEqual(conn.url.password, 'p%40assword') + t.equal(conn.url.username, 'us%22er') + t.equal(conn.url.password, 'p%40assword') t.match(conn.headers, { authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64') }) @@ -66,7 +66,7 @@ test('API', t => { let connection = pool.addConnection(href) t.same(pool.markDead(connection), pool) connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.status, Connection.statuses.ALIVE) + t.equal(connection.status, Connection.statuses.ALIVE) t.end() }) @@ -76,7 +76,7 @@ test('API', t => { let connection = pool.addConnection(href) t.same(pool.markAlive(connection), pool) connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.status, Connection.statuses.ALIVE) + t.equal(connection.status, Connection.statuses.ALIVE) t.end() }) @@ -88,7 +88,7 @@ test('API', t => { pool.getConnection() t.fail('Should fail') } catch (err) { - t.is(err.message, 'getConnection must be implemented') + t.equal(err.message, 'getConnection must be implemented') } t.end() }) @@ -98,7 +98,7 @@ test('API', t => { const href = 'http://localhost:9200/' const connection = pool.addConnection(href) pool.removeConnection(connection) - t.strictEqual(pool.size, 0) + t.equal(pool.size, 0) t.end() }) @@ -107,7 +107,7 @@ test('API', t => { pool.addConnection('http://localhost:9200/') pool.addConnection('http://localhost:9201/') pool.empty(() => { - t.strictEqual(pool.size, 0) + t.equal(pool.size, 0) t.end() }) }) @@ -115,7 +115,7 @@ test('API', t => { t.test('urlToHost', t => { const pool = new BaseConnectionPool({ Connection }) const url = 'http://localhost:9200/' - t.deepEqual( + t.same( pool.urlToHost(url), { url: new URL(url) } ) @@ -140,7 +140,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://127.0.0.1:9200/'), id: 'a1', roles: { @@ -160,8 +160,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') t.end() }) @@ -182,7 +182,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://[::1]:9200/'), id: 'a1', roles: { @@ -202,8 +202,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') t.end() }) @@ -224,7 +224,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://example.com:9200/'), id: 'a1', roles: { @@ -244,8 +244,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') t.end() }) @@ -266,7 +266,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://example.com:9200/'), id: 'a1', roles: { @@ -286,8 +286,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') t.end() }) @@ -308,8 +308,8 @@ test('API', t => { } } - t.strictEqual(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') + t.equal(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') t.end() }) @@ -425,13 +425,13 @@ test('API', t => { // roles will never be updated, we only use it to do // a dummy check to see if the connection has been updated - t.deepEqual(pool.connections.find(c => c.id === 'a1').roles, { + t.same(pool.connections.find(c => c.id === 'a1').roles, { master: true, data: true, ingest: true, ml: false }) - t.strictEqual(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) + t.equal(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) }) t.test('Add a new connection', t => { @@ -480,9 +480,9 @@ test('API', t => { roles: null }]) - t.false(pool.connections.find(c => c.id === 'a1')) - t.true(pool.connections.find(c => c.id === 'a2')) - t.true(pool.connections.find(c => c.id === 'a3')) + t.notOk(pool.connections.find(c => c.id === 'a1')) + t.ok(pool.connections.find(c => c.id === 'a2')) + t.ok(pool.connections.find(c => c.id === 'a3')) }) t.end() @@ -497,7 +497,7 @@ test('API', t => { pool.createConnection('http://localhost:9200/') t.fail('Should throw') } catch (err) { - t.is(err.message, 'Connection with id \'http://localhost:9200//' is already present') + t.equal(err.message, 'Connection with id \'http://localhost:9200//' is already present') } }) diff --git a/test/unit/child.test.js b/test/unit/child.test.js index 1cb3d2b66..626d8941b 100644 --- a/test/unit/child.test.js +++ b/test/unit/child.test.js @@ -76,7 +76,7 @@ test('Should create a child client (timeout check)', t => { client.info((err, res) => { t.error(err) child.info((err, res) => { - t.true(err instanceof errors.TimeoutError) + t.ok(err instanceof errors.TimeoutError) server.stop() }) }) @@ -238,7 +238,7 @@ test('Should create a child client (generateRequestId check)', t => { let count = 0 client.on('request', (err, { meta }) => { t.error(err) - t.strictEqual( + t.equal( meta.request.id, count++ === 0 ? 'trace-1-0' : 'trace-2-0' ) @@ -263,13 +263,13 @@ test('Should create a child client (name check)', t => { name: 'child' }) - t.strictEqual(client.name, 'parent') - t.strictEqual(child.name, 'child') + t.equal(client.name, 'parent') + t.equal(child.name, 'child') let count = 0 client.on('request', (err, { meta }) => { t.error(err) - t.strictEqual( + t.equal( meta.name, count++ === 0 ? 'parent' : 'child' ) diff --git a/test/unit/client.test.js b/test/unit/client.test.js index a0a5c5d72..9c2c551ba 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -111,7 +111,7 @@ test('Configure host', t => { resurrectTimeout: 0 }) - t.deepEqual(pool.connections.find(c => c.id === 'node').roles, { + t.same(pool.connections.find(c => c.id === 'node').roles, { master: true, data: false, ingest: false, @@ -152,7 +152,7 @@ test('Configure host', t => { resurrectTimeout: 0 }) - t.deepEqual(pool.connections.find(c => c.id === 'node1').roles, { + t.same(pool.connections.find(c => c.id === 'node1').roles, { master: true, data: false, ingest: false, @@ -167,7 +167,7 @@ test('Configure host', t => { resurrectTimeout: 0 }) - t.deepEqual(pool.connections.find(c => c.id === 'node2').roles, { + t.same(pool.connections.find(c => c.id === 'node2').roles, { master: false, data: true, ingest: false, @@ -226,7 +226,7 @@ test('Authentication', t => { client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -250,7 +250,7 @@ test('Authentication', t => { client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -278,7 +278,7 @@ test('Authentication', t => { client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -307,12 +307,12 @@ test('Authentication', t => { } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) first = false client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -346,12 +346,12 @@ test('Authentication', t => { } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) first = false client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -383,7 +383,7 @@ test('Authentication', t => { client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -410,7 +410,7 @@ test('Authentication', t => { client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -439,12 +439,12 @@ test('Authentication', t => { } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) first = false client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -477,12 +477,12 @@ test('Authentication', t => { } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) first = false client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -510,7 +510,7 @@ test('Authentication', t => { client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -539,7 +539,7 @@ test('Authentication', t => { client.info((err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -575,7 +575,7 @@ test('Custom headers per request', t => { } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -625,8 +625,8 @@ test('Extend client APIs', t => { const client = new Client({ node: 'http://localhost:9200/' }) client.extend('method', ({ makeRequest, result, ConfigurationError }) => { t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { + t.ok(new ConfigurationError() instanceof Error) + t.same(result, { body: null, statusCode: null, headers: null, @@ -634,8 +634,8 @@ test('Extend client APIs', t => { }) return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) + t.same(params, { you_know: 'for search' }) + t.same(options, { winter: 'is coming' }) } }) @@ -651,8 +651,8 @@ test('Extend client APIs', t => { const client = new Client({ node: 'http://localhost:9200/' }) client.extend('namespace.method', ({ makeRequest, result, ConfigurationError }) => { t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { + t.ok(new ConfigurationError() instanceof Error) + t.same(result, { body: null, statusCode: null, headers: null, @@ -660,8 +660,8 @@ test('Extend client APIs', t => { }) return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) + t.same(params, { you_know: 'for search' }) + t.same(options, { winter: 'is coming' }) } }) @@ -677,8 +677,8 @@ test('Extend client APIs', t => { const client = new Client({ node: 'http://localhost:9200/' }) client.extend('namespace.method1', ({ makeRequest, result, ConfigurationError }) => { t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { + t.ok(new ConfigurationError() instanceof Error) + t.same(result, { body: null, statusCode: null, headers: null, @@ -686,15 +686,15 @@ test('Extend client APIs', t => { }) return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) + t.same(params, { you_know: 'for search' }) + t.same(options, { winter: 'is coming' }) } }) client.extend('namespace.method2', ({ makeRequest, result, ConfigurationError }) => { t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { + t.ok(new ConfigurationError() instanceof Error) + t.same(result, { body: null, statusCode: null, headers: null, @@ -702,8 +702,8 @@ test('Extend client APIs', t => { }) return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) + t.same(params, { you_know: 'for search' }) + t.same(options, { winter: 'is coming' }) } }) @@ -726,7 +726,7 @@ test('Extend client APIs', t => { client.extend('index', () => {}) t.fail('Should throw') } catch (err) { - t.is(err.message, 'The method "index" already exists') + t.equal(err.message, 'The method "index" already exists') } }) @@ -738,7 +738,7 @@ test('Extend client APIs', t => { client.extend('indices.delete', () => {}) t.fail('Should throw') } catch (err) { - t.is(err.message, 'The method "delete" already exists on namespace "indices"') + t.equal(err.message, 'The method "delete" already exists on namespace "indices"') } }) @@ -769,8 +769,8 @@ test('Extend client APIs', t => { class MyTransport extends Transport { request (params, options) { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) + t.same(params, { you_know: 'for search' }) + t.same(options, { winter: 'is coming' }) } } @@ -803,7 +803,7 @@ test('Extend client APIs', t => { { winter: 'is coming' }, (err, res) => { t.error(err) - t.deepEqual(res, { hello: 'world' }) + t.same(res, { hello: 'world' }) } ) }) @@ -825,7 +825,7 @@ test('Extend client APIs', t => { { you_know: 'for search' }, { winter: 'is coming' } ) - .then(res => t.deepEqual(res, { hello: 'world' })) + .then(res => t.same(res, { hello: 'world' })) .catch(err => t.fail(err)) }) @@ -863,9 +863,9 @@ test('Elastic cloud config', t => { } }) - t.strictEqual(client.transport.compression, 'gzip') - t.strictEqual(client.transport.suggestCompression, true) - t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) + t.equal(client.transport.compression, 'gzip') + t.equal(client.transport.suggestCompression, true) + t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) }) t.test('Without kibana component', t => { @@ -898,9 +898,9 @@ test('Elastic cloud config', t => { } }) - t.strictEqual(client.transport.compression, 'gzip') - t.strictEqual(client.transport.suggestCompression, true) - t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) + t.equal(client.transport.compression, 'gzip') + t.equal(client.transport.suggestCompression, true) + t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) }) t.test('Auth as separate option', t => { @@ -935,9 +935,9 @@ test('Elastic cloud config', t => { } }) - t.strictEqual(client.transport.compression, 'gzip') - t.strictEqual(client.transport.suggestCompression, true) - t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) + t.equal(client.transport.compression, 'gzip') + t.equal(client.transport.suggestCompression, true) + t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) }) t.test('ApiKey should take precedence over basic auth', t => { @@ -973,9 +973,9 @@ test('Elastic cloud config', t => { } }) - t.strictEqual(client.transport.compression, 'gzip') - t.strictEqual(client.transport.suggestCompression, true) - t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) + t.equal(client.transport.compression, 'gzip') + t.equal(client.transport.suggestCompression, true) + t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) }) t.test('Override default options', t => { @@ -995,9 +995,9 @@ test('Elastic cloud config', t => { }) t.ok(client.connectionPool instanceof CloudConnectionPool) - t.strictEqual(client.transport.compression, false) - t.strictEqual(client.transport.suggestCompression, false) - t.deepEqual(client.connectionPool._ssl, { secureProtocol: 'TLSv1_1_method' }) + t.equal(client.transport.compression, false) + t.equal(client.transport.suggestCompression, false) + t.same(client.connectionPool._ssl, { secureProtocol: 'TLSv1_1_method' }) }) t.end() @@ -1008,7 +1008,7 @@ test('Opaque Id support', t => { t.plan(3) function handler (req, res) { - t.strictEqual(req.headers['x-opaque-id'], undefined) + t.equal(req.headers['x-opaque-id'], undefined) res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } @@ -1023,7 +1023,7 @@ test('Opaque Id support', t => { q: 'foo:bar' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1033,7 +1033,7 @@ test('Opaque Id support', t => { t.plan(3) function handler (req, res) { - t.strictEqual(req.headers['x-opaque-id'], 'bar') + t.equal(req.headers['x-opaque-id'], 'bar') res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } @@ -1050,7 +1050,7 @@ test('Opaque Id support', t => { opaqueId: 'bar' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1060,7 +1060,7 @@ test('Opaque Id support', t => { t.plan(3) function handler (req, res) { - t.strictEqual(req.headers['x-opaque-id'], 'foo-bar') + t.equal(req.headers['x-opaque-id'], 'foo-bar') res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } @@ -1078,7 +1078,7 @@ test('Opaque Id support', t => { opaqueId: 'bar' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1091,8 +1091,8 @@ test('Correctly handles the same header cased differently', t => { t.plan(4) function handler (req, res) { - t.strictEqual(req.headers.authorization, 'Basic foobar') - t.strictEqual(req.headers.foo, 'baz') + t.equal(req.headers.authorization, 'Basic foobar') + t.equal(req.headers.foo, 'baz') res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } @@ -1119,7 +1119,7 @@ test('Correctly handles the same header cased differently', t => { } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1144,7 +1144,7 @@ test('Random selector', t => { q: 'foo:bar' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1154,7 +1154,7 @@ test('Disable keep alive agent', t => { t.plan(3) function handler (req, res) { - t.strictEqual(req.headers.connection, 'close') + t.equal(req.headers.connection, 'close') res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } @@ -1170,7 +1170,7 @@ test('Disable keep alive agent', t => { q: 'foo:bar' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1184,7 +1184,7 @@ test('name property as string', t => { name: 'client-name' }) - t.strictEqual(client.name, 'client-name') + t.equal(client.name, 'client-name') }) test('name property as symbol', t => { @@ -1196,7 +1196,7 @@ test('name property as symbol', t => { name: symbol }) - t.strictEqual(client.name, symbol) + t.equal(client.name, symbol) }) // The nodejs http agent will try to wait for the whole @@ -1218,8 +1218,8 @@ test('Bad content length', t => { const client = new Client({ node: `http://localhost:${port}`, maxRetries: 1 }) client.info((err, { body }) => { t.ok(err instanceof errors.ConnectionError) - t.is(err.message, 'Response aborted while reading the body') - t.strictEqual(count, 2) + t.equal(err.message, 'Response aborted while reading the body') + t.equal(count, 2) server.stop() }) }) @@ -1244,8 +1244,8 @@ test('Socket destryed while reading the body', t => { const client = new Client({ node: `http://localhost:${port}`, maxRetries: 1 }) client.info((err, { body }) => { t.ok(err instanceof errors.ConnectionError) - t.is(err.message, 'Response aborted while reading the body') - t.strictEqual(count, 2) + t.equal(err.message, 'Response aborted while reading the body') + t.equal(count, 2) server.stop() }) }) @@ -1274,8 +1274,8 @@ test('Content length too big (buffer)', t => { const client = new Client({ node: 'http://localhost:9200/', Connection: MockConnection }) client.info((err, result) => { t.ok(err instanceof errors.RequestAbortedError) - t.is(err.message, `The content length (${buffer.constants.MAX_LENGTH + 10}) is bigger than the maximum allowed buffer (${buffer.constants.MAX_LENGTH})`) - t.strictEqual(result.meta.attempts, 0) + t.equal(err.message, `The content length (${buffer.constants.MAX_LENGTH + 10}) is bigger than the maximum allowed buffer (${buffer.constants.MAX_LENGTH})`) + t.equal(result.meta.attempts, 0) }) }) @@ -1301,8 +1301,8 @@ test('Content length too big (string)', t => { const client = new Client({ node: 'http://localhost:9200/', Connection: MockConnection }) client.info((err, result) => { t.ok(err instanceof errors.RequestAbortedError) - t.is(err.message, `The content length (${buffer.constants.MAX_STRING_LENGTH + 10}) is bigger than the maximum allowed string (${buffer.constants.MAX_STRING_LENGTH})`) - t.strictEqual(result.meta.attempts, 0) + t.equal(err.message, `The content length (${buffer.constants.MAX_STRING_LENGTH + 10}) is bigger than the maximum allowed string (${buffer.constants.MAX_STRING_LENGTH})`) + t.equal(result.meta.attempts, 0) }) }) @@ -1389,7 +1389,7 @@ test('Prototype poisoning protection enabled by default', t => { }) client.info((err, result) => { - t.true(err instanceof errors.DeserializationError) + t.ok(err instanceof errors.DeserializationError) }) }) diff --git a/test/unit/cloud-connection-pool.test.js b/test/unit/cloud-connection-pool.test.js index ccefcc337..76c135509 100644 --- a/test/unit/cloud-connection-pool.test.js +++ b/test/unit/cloud-connection-pool.test.js @@ -33,7 +33,7 @@ test('Should expose a cloudConnection property', t => { test('Get connection should always return cloudConnection', t => { const pool = new CloudConnectionPool({ Connection }) const conn = pool.addConnection('http://localhost:9200/') - t.deepEqual(pool.getConnection(), conn) + t.same(pool.getConnection(), conn) t.end() }) @@ -42,7 +42,7 @@ test('pool.empty should reset cloudConnection', t => { pool.addConnection('http://localhost:9200/') t.ok(pool.cloudConnection instanceof Connection) pool.empty(() => { - t.strictEqual(pool.cloudConnection, null) + t.equal(pool.cloudConnection, null) t.end() }) }) diff --git a/test/unit/connection-pool.test.js b/test/unit/connection-pool.test.js index 6569f59f2..7a75879dd 100644 --- a/test/unit/connection-pool.test.js +++ b/test/unit/connection-pool.test.js @@ -32,8 +32,8 @@ test('API', t => { const href = 'http://localhost:9200/' pool.addConnection(href) t.ok(pool.connections.find(c => c.id === href) instanceof Connection) - t.strictEqual(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) + t.equal(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) + t.same(pool.dead, []) t.end() }) @@ -45,7 +45,7 @@ test('API', t => { pool.addConnection(href) t.fail('Should throw') } catch (err) { - t.is(err.message, `Connection with id '${href}' is already present`) + t.equal(err.message, `Connection with id '${href}' is already present`) } t.end() }) @@ -55,8 +55,8 @@ test('API', t => { const href = 'http://us/"er:p@assword@localhost:9200/' pool.addConnection(href) const conn = pool.getConnection() - t.strictEqual(conn.url.username, 'us%22er') - t.strictEqual(conn.url.password, 'p%40assword') + t.equal(conn.url.username, 'us%22er') + t.equal(conn.url.password, 'p%40assword') t.match(conn.headers, { authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64') }) @@ -69,9 +69,9 @@ test('API', t => { let connection = pool.addConnection(href) pool.markDead(connection) connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.deadCount, 1) - t.true(connection.resurrectTimeout > 0) - t.deepEqual(pool.dead, [href]) + t.equal(connection.deadCount, 1) + t.ok(connection.resurrectTimeout > 0) + t.same(pool.dead, [href]) t.end() }) @@ -84,7 +84,7 @@ test('API', t => { pool.markDead(conn2) setTimeout(() => { pool.markDead(conn1) - t.deepEqual(pool.dead, [href2, href1]) + t.same(pool.dead, [href2, href1]) t.end() }, 10) }) @@ -93,7 +93,7 @@ test('API', t => { const pool = new ConnectionPool({ Connection, sniffEnabled: true }) pool.addConnection('http://localhost:9200/') pool.markDead({ id: 'foo-bar' }) - t.deepEqual(pool.dead, []) + t.same(pool.dead, []) t.end() }) @@ -104,10 +104,10 @@ test('API', t => { pool.markDead(connection) pool.markAlive(connection) connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.deadCount, 0) - t.strictEqual(connection.resurrectTimeout, 0) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) + t.equal(connection.deadCount, 0) + t.equal(connection.resurrectTimeout, 0) + t.equal(connection.status, Connection.statuses.ALIVE) + t.same(pool.dead, []) t.end() }) @@ -129,12 +129,12 @@ test('API', t => { name: 'elasticsearch-js' } pool.resurrect(opts, (isAlive, connection) => { - t.true(isAlive) + t.ok(isAlive) connection = pool.connections.find(c => c.id === connection.id) - t.strictEqual(connection.deadCount, 0) - t.strictEqual(connection.resurrectTimeout, 0) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) + t.equal(connection.deadCount, 0) + t.equal(connection.resurrectTimeout, 0) + t.equal(connection.status, Connection.statuses.ALIVE) + t.same(pool.dead, []) t.end() }) }) @@ -155,12 +155,12 @@ test('API', t => { name: 'elasticsearch-js' } pool.resurrect(opts, (isAlive, connection) => { - t.false(isAlive) + t.notOk(isAlive) connection = pool.connections.find(c => c.id === connection.id) - t.strictEqual(connection.deadCount, 2) - t.true(connection.resurrectTimeout > 0) - t.strictEqual(connection.status, Connection.statuses.DEAD) - t.deepEqual(pool.dead, [href]) + t.equal(connection.deadCount, 2) + t.ok(connection.resurrectTimeout > 0) + t.equal(connection.status, Connection.statuses.DEAD) + t.same(pool.dead, [href]) t.end() }) }) @@ -183,12 +183,12 @@ test('API', t => { name: 'elasticsearch-js' } pool.resurrect(opts, (isAlive, connection) => { - t.true(isAlive) + t.ok(isAlive) connection = pool.connections.find(c => c.id === connection.id) - t.strictEqual(connection.deadCount, 1) - t.true(connection.resurrectTimeout > 0) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) + t.equal(connection.deadCount, 1) + t.ok(connection.resurrectTimeout > 0) + t.equal(connection.status, Connection.statuses.ALIVE) + t.same(pool.dead, []) t.end() }) }) @@ -211,10 +211,10 @@ test('API', t => { t.ok(isAlive === null) t.ok(connection === null) connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.deadCount, 1) - t.true(connection.resurrectTimeout > 0) - t.strictEqual(connection.status, Connection.statuses.DEAD) - t.deepEqual(pool.dead, [href]) + t.equal(connection.deadCount, 1) + t.ok(connection.resurrectTimeout > 0) + t.equal(connection.status, Connection.statuses.DEAD) + t.same(pool.dead, [href]) t.end() }) }) @@ -238,7 +238,7 @@ test('API', t => { pool.addConnection([href1, href2]) const filter = node => node.id === href1 - t.strictEqual(pool.getConnection({ filter }).id, href1) + t.equal(pool.getConnection({ filter }).id, href1) t.end() }) @@ -266,7 +266,7 @@ test('API', t => { pool.markDead(conn) const filter = node => { - t.strictEqual(node.status, Connection.statuses.ALIVE) + t.equal(node.status, Connection.statuses.ALIVE) return true } pool.getConnection({ filter }) @@ -282,7 +282,7 @@ test('API', t => { pool.markDead(conn2) const conn = pool.getConnection() t.ok(conn instanceof Connection) - t.is(conn.status, 'dead') + t.equal(conn.status, 'dead') t.end() }) @@ -295,7 +295,7 @@ test('API', t => { const connection = pool.addConnection(href) t.ok(pool.getConnection() instanceof Connection) pool.removeConnection(connection) - t.strictEqual(pool.getConnection(), null) + t.equal(pool.getConnection(), null) t.end() }) @@ -304,8 +304,8 @@ test('API', t => { pool.addConnection('http://localhost:9200/') pool.addConnection('http://localhost:9201/') pool.empty(() => { - t.strictEqual(pool.size, 0) - t.deepEqual(pool.dead, []) + t.equal(pool.size, 0) + t.same(pool.dead, []) t.end() }) }) @@ -313,7 +313,7 @@ test('API', t => { t.test('urlToHost', t => { const pool = new ConnectionPool({ Connection }) const url = 'http://localhost:9200/' - t.deepEqual( + t.same( pool.urlToHost(url), { url: new URL(url) } ) @@ -338,7 +338,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://127.0.0.1:9200/'), id: 'a1', roles: { @@ -358,8 +358,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') t.end() }) @@ -380,7 +380,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://[::1]:9200/'), id: 'a1', roles: { @@ -400,8 +400,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') t.end() }) @@ -422,7 +422,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://example.com:9200/'), id: 'a1', roles: { @@ -442,8 +442,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') t.end() }) @@ -464,7 +464,7 @@ test('API', t => { } } - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ + t.same(pool.nodesToHost(nodes, 'http:'), [{ url: new URL('http://example.com:9200/'), id: 'a1', roles: { @@ -484,8 +484,8 @@ test('API', t => { } }]) - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') + t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') t.end() }) @@ -506,8 +506,8 @@ test('API', t => { } } - t.strictEqual(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') + t.equal(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') + t.equal(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') t.end() }) @@ -662,13 +662,13 @@ test('API', t => { // roles will never be updated, we only use it to do // a dummy check to see if the connection has been updated - t.deepEqual(pool.connections.find(c => c.id === 'a1').roles, { + t.same(pool.connections.find(c => c.id === 'a1').roles, { master: true, data: true, ingest: true, ml: false }) - t.strictEqual(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) + t.equal(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) }) t.test('Add a new connection', t => { @@ -717,9 +717,9 @@ test('API', t => { roles: null }]) - t.false(pool.connections.find(c => c.id === 'a1')) - t.true(pool.connections.find(c => c.id === 'a2')) - t.true(pool.connections.find(c => c.id === 'a3')) + t.notOk(pool.connections.find(c => c.id === 'a1')) + t.ok(pool.connections.find(c => c.id === 'a2')) + t.ok(pool.connections.find(c => c.id === 'a3')) }) t.test('Remove old connections (markDead)', t => { @@ -732,7 +732,7 @@ test('API', t => { }) pool.markDead(conn) - t.deepEqual(pool.dead, ['a1']) + t.same(pool.dead, ['a1']) pool.update([{ url: new URL('http://127.0.0.1:9200/'), @@ -744,10 +744,10 @@ test('API', t => { roles: null }]) - t.deepEqual(pool.dead, []) - t.false(pool.connections.find(c => c.id === 'a1')) - t.true(pool.connections.find(c => c.id === 'a2')) - t.true(pool.connections.find(c => c.id === 'a3')) + t.same(pool.dead, []) + t.notOk(pool.connections.find(c => c.id === 'a1')) + t.ok(pool.connections.find(c => c.id === 'a2')) + t.ok(pool.connections.find(c => c.id === 'a3')) }) t.end() @@ -761,14 +761,14 @@ test('Node selector', t => { t.plan(1) const pool = new ConnectionPool({ Connection }) pool.addConnection('http://localhost:9200/') - t.true(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) + t.ok(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) }) t.test('random', t => { t.plan(1) const pool = new ConnectionPool({ Connection }) pool.addConnection('http://localhost:9200/') - t.true(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) + t.ok(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) }) t.end() @@ -779,7 +779,7 @@ test('Node filter', t => { t.plan(1) const pool = new ConnectionPool({ Connection }) pool.addConnection({ url: new URL('http://localhost:9200/') }) - t.true(pool.getConnection({ filter: defaultNodeFilter }) instanceof Connection) + t.ok(pool.getConnection({ filter: defaultNodeFilter }) instanceof Connection) }) t.test('Should filter master only nodes', t => { @@ -794,7 +794,7 @@ test('Node filter', t => { ml: false } }) - t.strictEqual(pool.getConnection({ filter: defaultNodeFilter }), null) + t.equal(pool.getConnection({ filter: defaultNodeFilter }), null) }) t.end() diff --git a/test/unit/connection.test.js b/test/unit/connection.test.js index 6b1b2e653..a951a9e6b 100644 --- a/test/unit/connection.test.js +++ b/test/unit/connection.test.js @@ -63,7 +63,7 @@ test('Basic (http)', t => { res.on('data', chunk => { payload += chunk }) res.on('error', err => t.fail(err)) res.on('end', () => { - t.strictEqual(payload, 'ok') + t.equal(payload, 'ok') server.stop() }) }) @@ -103,7 +103,7 @@ test('Basic (https)', t => { res.on('data', chunk => { payload += chunk }) res.on('error', err => t.fail(err)) res.on('end', () => { - t.strictEqual(payload, 'ok') + t.equal(payload, 'ok') server.stop() }) }) @@ -144,7 +144,7 @@ test('Basic (https with ssl agent)', t => { res.on('data', chunk => { payload += chunk }) res.on('error', err => t.fail(err)) res.on('end', () => { - t.strictEqual(payload, 'ok') + t.equal(payload, 'ok') server.stop() }) }) @@ -179,7 +179,7 @@ test('Custom http agent', t => { return agent } }) - t.true(connection.agent.custom) + t.ok(connection.agent.custom) connection.request({ path: '/hello', method: 'GET', @@ -198,7 +198,7 @@ test('Custom http agent', t => { res.on('data', chunk => { payload += chunk }) res.on('error', err => t.fail(err)) res.on('end', () => { - t.strictEqual(payload, 'ok') + t.equal(payload, 'ok') server.stop() }) }) @@ -268,7 +268,7 @@ test('querystring', t => { t.plan(2) function handler (req, res) { - t.strictEqual(req.url, '/hello?hello=world&you_know=for%20search') + t.equal(req.url, '/hello?hello=world&you_know=for%20search') res.end('ok') } @@ -291,7 +291,7 @@ test('querystring', t => { t.plan(2) function handler (req, res) { - t.strictEqual(req.url, '/hello') + t.equal(req.url, '/hello') res.end('ok') } @@ -322,7 +322,7 @@ test('Body request', t => { req.on('data', chunk => { payload += chunk }) req.on('error', err => t.fail(err)) req.on('end', () => { - t.strictEqual(payload, 'hello') + t.equal(payload, 'hello') res.end('ok') }) } @@ -351,7 +351,7 @@ test('Send body as buffer', t => { req.on('data', chunk => { payload += chunk }) req.on('error', err => t.fail(err)) req.on('end', () => { - t.strictEqual(payload, 'hello') + t.equal(payload, 'hello') res.end('ok') }) } @@ -380,7 +380,7 @@ test('Send body as stream', t => { req.on('data', chunk => { payload += chunk }) req.on('error', err => t.fail(err)) req.on('end', () => { - t.strictEqual(payload, 'hello') + t.equal(payload, 'hello') res.end('ok') }) } @@ -413,7 +413,7 @@ test('Should not close a connection if there are open requests', t => { }) setTimeout(() => { - t.strictEqual(connection._openRequests, 1) + t.equal(connection._openRequests, 1) connection.close() }, 500) @@ -422,14 +422,14 @@ test('Should not close a connection if there are open requests', t => { method: 'GET' }, (err, res) => { t.error(err) - t.strictEqual(connection._openRequests, 0) + t.equal(connection._openRequests, 0) let payload = '' res.setEncoding('utf8') res.on('data', chunk => { payload += chunk }) res.on('error', err => t.fail(err)) res.on('end', () => { - t.strictEqual(payload, 'ok') + t.equal(payload, 'ok') server.stop() }) }) @@ -450,7 +450,7 @@ test('Should not close a connection if there are open requests (with agent disab }) setTimeout(() => { - t.strictEqual(connection._openRequests, 1) + t.equal(connection._openRequests, 1) connection.close() }, 500) @@ -459,14 +459,14 @@ test('Should not close a connection if there are open requests (with agent disab method: 'GET' }, (err, res) => { t.error(err) - t.strictEqual(connection._openRequests, 0) + t.equal(connection._openRequests, 0) let payload = '' res.setEncoding('utf8') res.on('data', chunk => { payload += chunk }) res.on('error', err => t.fail(err)) res.on('end', () => { - t.strictEqual(payload, 'ok') + t.equal(payload, 'ok') server.stop() }) }) @@ -502,7 +502,7 @@ test('Url with querystring', t => { t.plan(2) function handler (req, res) { - t.strictEqual(req.url, '/hello?foo=bar&baz=faz') + t.equal(req.url, '/hello?foo=bar&baz=faz') res.end('ok') } @@ -546,7 +546,7 @@ test('Custom headers for connection', t => { }, (err, res) => { t.error(err) // should not update the default - t.deepEqual(connection.headers, { 'x-foo': 'bar' }) + t.same(connection.headers, { 'x-foo': 'bar' }) server.stop() }) }) @@ -576,7 +576,7 @@ test('asStream set to true', t => { res.on('data', chunk => { payload += chunk }) res.on('error', err => t.fail(err)) res.on('end', () => { - t.strictEqual(payload, 'ok') + t.equal(payload, 'ok') server.stop() }) }) @@ -587,7 +587,7 @@ test('Connection id should not contain credentials', t => { const connection = new Connection({ url: new URL('http://user:password@localhost:9200/') }) - t.strictEqual(connection.id, 'http://localhost:9200/') + t.equal(connection.id, 'http://localhost:9200/') t.end() }) @@ -595,7 +595,7 @@ test('Ipv6 support', t => { const connection = new Connection({ url: new URL('http://[::1]:9200/') }) - t.strictEqual(connection.buildRequestObject({}).hostname, '::1') + t.equal(connection.buildRequestObject({}).hostname, '::1') t.end() }) @@ -607,7 +607,7 @@ test('Should throw if the protocol is not http or https', t => { t.fail('Should throw') } catch (err) { t.ok(err instanceof ConfigurationError) - t.is(err.message, 'Invalid protocol: \'nope:\'') + t.equal(err.message, 'Invalid protocol: \'nope:\'') } t.end() }) @@ -623,7 +623,7 @@ test('Should disallow two-byte characters in URL path', t => { path: '/thisisinvalid\uffe2', method: 'GET' }, (err, res) => { - t.strictEqual( + t.equal( err.message, 'ERR_UNESCAPED_CHARACTERS: /thisisinvalid\uffe2' ) @@ -638,7 +638,7 @@ test('setRole', t => { url: new URL('http://localhost:9200/') }) - t.deepEqual(connection.roles, { + t.same(connection.roles, { master: true, data: true, ingest: true, @@ -647,7 +647,7 @@ test('setRole', t => { connection.setRole('master', false) - t.deepEqual(connection.roles, { + t.same(connection.roles, { master: false, data: true, ingest: true, @@ -666,8 +666,8 @@ test('setRole', t => { connection.setRole('car', true) t.fail('Shoud throw') } catch (err) { - t.true(err instanceof ConfigurationError) - t.is(err.message, 'Unsupported role: \'car\'') + t.ok(err instanceof ConfigurationError) + t.equal(err.message, 'Unsupported role: \'car\'') } }) @@ -682,8 +682,8 @@ test('setRole', t => { connection.setRole('master', 1) t.fail('Shoud throw') } catch (err) { - t.true(err instanceof ConfigurationError) - t.is(err.message, 'enabled should be a boolean') + t.ok(err instanceof ConfigurationError) + t.equal(err.message, 'enabled should be a boolean') } }) @@ -708,7 +708,7 @@ test('Util.inspect Connection class should hide agent, ssl and auth', t => { .replace(/(\r\n|\n|\r)/gm, '') } - t.strictEqual(cleanStr(inspect(connection)), cleanStr(`{ url: 'http://localhost:9200/', + t.equal(cleanStr(inspect(connection)), cleanStr(`{ url: 'http://localhost:9200/', id: 'node-id', headers: { foo: 'bar' }, deadCount: 0, @@ -728,7 +728,7 @@ test('connection.toJSON should hide agent, ssl and auth', t => { headers: { foo: 'bar' } }) - t.deepEqual(connection.toJSON(), { + t.same(connection.toJSON(), { url: 'http://localhost:9200/', id: 'node-id', headers: { @@ -754,7 +754,7 @@ test('Port handling', t => { url: new URL('http://localhost/') }) - t.strictEqual( + t.equal( connection.buildRequestObject({}).port, undefined ) @@ -767,7 +767,7 @@ test('Port handling', t => { url: new URL('https://localhost/') }) - t.strictEqual( + t.equal( connection.buildRequestObject({}).port, undefined ) @@ -784,7 +784,7 @@ test('Authorization header', t => { url: new URL('http://localhost:9200/') }) - t.deepEqual(connection.headers, {}) + t.same(connection.headers, {}) t.end() }) @@ -795,7 +795,7 @@ test('Authorization header', t => { auth: { username: 'foo', password: 'bar' } }) - t.deepEqual(connection.headers, { authorization: 'Basic Zm9vOmJhcg==' }) + t.same(connection.headers, { authorization: 'Basic Zm9vOmJhcg==' }) t.end() }) @@ -806,7 +806,7 @@ test('Authorization header', t => { auth: { apiKey: 'Zm9vOmJhcg==' } }) - t.deepEqual(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) + t.same(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) t.end() }) @@ -817,7 +817,7 @@ test('Authorization header', t => { auth: { apiKey: { id: 'foo', api_key: 'bar' } } }) - t.deepEqual(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) + t.same(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) t.end() }) @@ -830,7 +830,7 @@ test('Should not add agent and ssl to the serialized connection', t => { url: new URL('http://localhost:9200/') }) - t.strictEqual( + t.equal( JSON.stringify(connection), '{"url":"http://localhost:9200/","id":"http://localhost:9200/","headers":{},"deadCount":0,"resurrectTimeout":0,"_openRequests":0,"status":"alive","roles":{"master":true,"data":true,"ingest":true,"ml":false}}' ) @@ -890,7 +890,7 @@ test('Should correctly resolve request pathname', t => { url: new URL('http://localhost/test') }) - t.strictEqual( + t.equal( connection.buildRequestObject({ path: 'hello' }).pathname, @@ -906,7 +906,7 @@ test('Proxy agent (http)', t => { proxy: 'http://localhost:8080/' }) - t.true(connection.agent instanceof hpagent.HttpProxyAgent) + t.ok(connection.agent instanceof hpagent.HttpProxyAgent) }) test('Proxy agent (https)', t => { @@ -917,7 +917,7 @@ test('Proxy agent (https)', t => { proxy: 'http://localhost:8080/' }) - t.true(connection.agent instanceof hpagent.HttpsProxyAgent) + t.ok(connection.agent instanceof hpagent.HttpsProxyAgent) }) test('Abort with a slow body', t => { diff --git a/test/unit/errors.test.js b/test/unit/errors.test.js index 783f1b2f6..301ca1108 100644 --- a/test/unit/errors.test.js +++ b/test/unit/errors.test.js @@ -26,57 +26,57 @@ const { errors } = require('../../index') test('ElasticsearchClientError', t => { const err = new errors.ElasticsearchClientError() - t.true(err instanceof Error) + t.ok(err instanceof Error) t.end() }) test('TimeoutError', t => { const err = new errors.TimeoutError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.ok(err.hasOwnProperty('meta')) t.end() }) test('ConnectionError', t => { const err = new errors.ConnectionError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.ok(err.hasOwnProperty('meta')) t.end() }) test('NoLivingConnectionsError', t => { const err = new errors.NoLivingConnectionsError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.ok(err.hasOwnProperty('meta')) t.end() }) test('SerializationError', t => { const err = new errors.SerializationError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.false(err.hasOwnProperty('meta')) - t.true(err.hasOwnProperty('data')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.notOk(err.hasOwnProperty('meta')) + t.ok(err.hasOwnProperty('data')) t.end() }) test('DeserializationError', t => { const err = new errors.DeserializationError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.false(err.hasOwnProperty('meta')) - t.true(err.hasOwnProperty('data')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.notOk(err.hasOwnProperty('meta')) + t.ok(err.hasOwnProperty('data')) t.end() }) test('ConfigurationError', t => { const err = new errors.ConfigurationError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.false(err.hasOwnProperty('meta')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.notOk(err.hasOwnProperty('meta')) t.end() }) @@ -87,9 +87,9 @@ test('ResponseError', t => { headers: 1 } const err = new errors.ResponseError(meta) - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.ok(err.hasOwnProperty('meta')) t.ok(err.body) t.ok(err.statusCode) t.ok(err.headers) @@ -98,9 +98,9 @@ test('ResponseError', t => { test('RequestAbortedError', t => { const err = new errors.RequestAbortedError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) + t.ok(err instanceof Error) + t.ok(err instanceof errors.ElasticsearchClientError) + t.ok(err.hasOwnProperty('meta')) t.end() }) @@ -131,8 +131,8 @@ test('ResponseError with meaningful message / 1', t => { headers: {} } const err = new errors.ResponseError(meta) - t.strictEqual(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]') - t.strictEqual(err.toString(), JSON.stringify(meta.body)) + t.equal(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]') + t.equal(err.toString(), JSON.stringify(meta.body)) t.end() }) @@ -171,8 +171,8 @@ test('ResponseError with meaningful message / 2', t => { headers: {} } const err = new errors.ResponseError(meta) - t.strictEqual(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]; [nested_cause] Reason: this is a nested cause') - t.strictEqual(err.toString(), JSON.stringify(meta.body)) + t.equal(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]; [nested_cause] Reason: this is a nested cause') + t.equal(err.toString(), JSON.stringify(meta.body)) t.end() }) @@ -193,7 +193,7 @@ test('ResponseError with meaningful message / 3', t => { headers: {} } const err = new errors.ResponseError(meta) - t.strictEqual(err.message, 'index_not_found_exception') - t.strictEqual(err.toString(), JSON.stringify(meta.body)) + t.equal(err.message, 'index_not_found_exception') + t.equal(err.toString(), JSON.stringify(meta.body)) t.end() }) diff --git a/test/unit/esm/index.mjs b/test/unit/esm/index.mjs index d78462891..4f5d17f6a 100644 --- a/test/unit/esm/index.mjs +++ b/test/unit/esm/index.mjs @@ -4,5 +4,5 @@ import { Client } from '../../../index.mjs' t.test('esm support', t => { t.plan(1) const client = new Client({ node: 'http://localhost:9200/' }) - t.strictEqual(client.name, 'elasticsearch-js') + t.equal(client.name, 'elasticsearch-js') }) diff --git a/test/unit/events.test.js b/test/unit/events.test.js index cfaf20e29..6612213ca 100644 --- a/test/unit/events.test.js +++ b/test/unit/events.test.js @@ -289,7 +289,7 @@ test('Emit event', t => { client.on(events.REQUEST, (err, request) => { t.error(err) - t.deepEqual(request, { hello: 'world' }) + t.same(request, { hello: 'world' }) }) client.emit(events.REQUEST, null, { hello: 'world' }) diff --git a/test/unit/helpers/bulk.test.js b/test/unit/helpers/bulk.test.js index e99f3fba3..a2059fb73 100644 --- a/test/unit/helpers/bulk.test.js +++ b/test/unit/helpers/bulk.test.js @@ -44,14 +44,14 @@ test('bulk index', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson', 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=bp` }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) @@ -89,14 +89,14 @@ test('bulk index', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) t.notMatch(params.headers, { 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=bp` }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) @@ -134,9 +134,9 @@ test('bulk index', t => { t.test('Should perform a bulk request (high flush size)', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - t.strictEqual(params.body.split('\n').filter(Boolean).length, 6) + t.equal(params.body.split('\n').filter(Boolean).length, 6) return { body: { errors: false, items: new Array(3).fill({}) } } } }) @@ -175,14 +175,14 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { if (params.method === 'GET') { - t.strictEqual(params.path, '/_all/_refresh') + t.equal(params.path, '/_all/_refresh') return { body: { acknowledged: true } } } else { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } } @@ -220,14 +220,14 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { if (params.method === 'GET') { - t.strictEqual(params.path, '/test/_refresh') + t.equal(params.path, '/test/_refresh') return { body: { acknowledged: true } } } else { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } } @@ -264,11 +264,11 @@ test('bulk index', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) @@ -308,7 +308,7 @@ test('bulk index', t => { t.test('Should perform a bulk request (retry)', async t => { async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') + t.equal(req.url, '/_bulk') t.match(req.headers, { 'content-type': 'application/x-ndjson' }) let body = '' @@ -353,7 +353,7 @@ test('bulk index', t => { } }, onDrop (doc) { - t.deepEqual(doc, { + t.same(doc, { status: 429, error: null, operation: { index: { _index: 'test' } }, @@ -402,7 +402,7 @@ test('bulk index', t => { } }, onDrop (doc) { - t.deepEqual(doc, { + t.same(doc, { status: 429, error: null, operation: { index: { _index: 'test' } }, @@ -426,7 +426,7 @@ test('bulk index', t => { t.test('Should perform a bulk request (failure)', async t => { async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') + t.equal(req.url, '/_bulk') t.match(req.headers, { 'content-type': 'application/x-ndjson' }) let body = '' @@ -471,7 +471,7 @@ test('bulk index', t => { } }, onDrop (doc) { - t.deepEqual(doc, { + t.same(doc, { status: 400, error: { something: 'went wrong' }, operation: { index: { _index: 'test' } }, @@ -525,7 +525,7 @@ test('bulk index', t => { await b t.fail('Should throw') } catch (err) { - t.true(err instanceof errors.ResponseError) + t.ok(err instanceof errors.ResponseError) } }) @@ -561,13 +561,13 @@ test('bulk index', t => { await b t.fail('Should throw') } catch (err) { - t.true(err instanceof errors.ResponseError) + t.ok(err instanceof errors.ResponseError) } }) t.test('Should abort a bulk request', async t => { async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') + t.equal(req.url, '/_bulk') t.match(req.headers, { 'content-type': 'application/x-ndjson' }) let body = '' @@ -653,8 +653,8 @@ test('bulk index', t => { } }) .catch(err => { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, 'Bulk helper invalid action: \'foo\'') + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'Bulk helper invalid action: \'foo\'') }) }) @@ -666,11 +666,11 @@ test('bulk index', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) @@ -718,11 +718,11 @@ test('bulk index', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) @@ -774,11 +774,11 @@ test('bulk create', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { create: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { create: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) @@ -823,11 +823,11 @@ test('bulk update', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { update: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) + t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) return { body: { errors: false, items: [{}] } } } }) @@ -871,11 +871,11 @@ test('bulk update', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { update: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), { doc: dataset[count++] }) + t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { doc: dataset[count++] }) return { body: { errors: false, items: [{}] } } } }) @@ -921,9 +921,9 @@ test('bulk delete', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - t.deepEqual(JSON.parse(params.body), { delete: { _index: 'test', _id: count++ } }) + t.same(JSON.parse(params.body), { delete: { _index: 'test', _id: count++ } }) return { body: { errors: false, items: [{}] } } } }) @@ -963,7 +963,7 @@ test('bulk delete', t => { t.test('Should perform a bulk request (failure)', async t => { async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') + t.equal(req.url, '/_bulk') t.match(req.headers, { 'content-type': 'application/x-ndjson' }) let body = '' @@ -1011,7 +1011,7 @@ test('bulk delete', t => { } }, onDrop (doc) { - t.deepEqual(doc, { + t.same(doc, { status: 400, error: { something: 'went wrong' }, operation: { delete: { _index: 'test', _id: 1 } }, @@ -1051,7 +1051,7 @@ test('transport options', t => { return { body: { errors: false, items: [{}] } } } - t.strictEqual(params.path, '/_all/_refresh') + t.equal(params.path, '/_all/_refresh') t.match(params.headers, { foo: 'bar' }) @@ -1081,7 +1081,7 @@ test('transport options', t => { } }) - t.strictEqual(count, 4) // three bulk requests, one refresh + t.equal(count, 4) // three bulk requests, one refresh t.type(result.time, 'number') t.type(result.bytes, 'number') t.match(result, { @@ -1111,8 +1111,8 @@ test('errors', t => { } }) } catch (err) { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, 'bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator') + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator') } }) @@ -1129,8 +1129,8 @@ test('errors', t => { } }) } catch (err) { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, 'bulk helper: the datasource is required') + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'bulk helper: the datasource is required') } }) @@ -1143,8 +1143,8 @@ test('errors', t => { datasource: dataset.slice() }) } catch (err) { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, 'bulk helper: the onDocument callback is required') + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'bulk helper: the onDocument callback is required') } }) @@ -1159,11 +1159,11 @@ test('Flush interval', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) @@ -1210,12 +1210,12 @@ test('Flush interval', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.true(count < 2) - t.strictEqual(params.path, '/_bulk') + t.ok(count < 2) + t.equal(params.path, '/_bulk') t.match(params.headers, { 'content-type': 'application/x-ndjson' }) const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) diff --git a/test/unit/helpers/msearch.test.js b/test/unit/helpers/msearch.test.js index b3edf3756..b80792fa5 100644 --- a/test/unit/helpers/msearch.test.js +++ b/test/unit/helpers/msearch.test.js @@ -56,7 +56,7 @@ test('Basic', async t => { { query: { match: { foo: 'bar' } } } ) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -67,7 +67,7 @@ test('Basic', async t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -116,7 +116,7 @@ test('Multiple searches (inside async iterator)', t => { m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { t.error(err) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -127,7 +127,7 @@ test('Multiple searches (inside async iterator)', t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -136,7 +136,7 @@ test('Multiple searches (inside async iterator)', t => { m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { t.error(err) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -147,7 +147,7 @@ test('Multiple searches (inside async iterator)', t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { four: 'four' }, { five: 'five' }, { six: 'six' } @@ -197,7 +197,7 @@ test('Multiple searches (async iterator exits)', t => { m.search({ index: 'test' }, { query: {} }, (err, result) => { t.error(err) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -208,7 +208,7 @@ test('Multiple searches (async iterator exits)', t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -217,7 +217,7 @@ test('Multiple searches (async iterator exits)', t => { m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { t.error(err) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -228,7 +228,7 @@ test('Multiple searches (async iterator exits)', t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { four: 'four' }, { five: 'five' }, { six: 'six' } @@ -260,7 +260,7 @@ test('Stop a msearch processor (promises)', async t => { { query: { match: { foo: 'bar' } } } ) } catch (err) { - t.strictEqual(err.message, 'The msearch processor has been stopped') + t.equal(err.message, 'The msearch processor has been stopped') } t.teardown(() => m.stop()) @@ -285,7 +285,7 @@ test('Stop a msearch processor (callbacks)', t => { m.stop() m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.strictEqual(err.message, 'The msearch processor has been stopped') + t.equal(err.message, 'The msearch processor has been stopped') }) }) @@ -306,12 +306,12 @@ test('Bad header', t => { const m = client.helpers.msearch() m.search(null, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.strictEqual(err.message, 'The header should be an object') + t.equal(err.message, 'The header should be an object') }) m.search(null, { query: { match: { foo: 'bar' } } }) .catch(err => { - t.strictEqual(err.message, 'The header should be an object') + t.equal(err.message, 'The header should be an object') }) t.teardown(() => m.stop()) @@ -334,12 +334,12 @@ test('Bad body', t => { const m = client.helpers.msearch() m.search({ index: 'test' }, null, (err, result) => { - t.strictEqual(err.message, 'The body should be an object') + t.equal(err.message, 'The body should be an object') }) m.search({ index: 'test' }, null) .catch(err => { - t.strictEqual(err.message, 'The body should be an object') + t.equal(err.message, 'The body should be an object') }) t.teardown(() => m.stop()) @@ -389,7 +389,7 @@ test('Retry on 429', async t => { { query: { match: { foo: 'bar' } } } ) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -400,7 +400,7 @@ test('Retry on 429', async t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -436,7 +436,7 @@ test('Single search errors', async t => { { query: { match: { foo: 'bar' } } } ) } catch (err) { - t.true(err instanceof errors.ResponseError) + t.ok(err instanceof errors.ResponseError) } t.teardown(() => m.stop()) @@ -465,13 +465,13 @@ test('Entire msearch fails', t => { const m = client.helpers.msearch({ operations: 1 }) m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.true(err instanceof errors.ResponseError) - t.deepEqual(result.documents, []) + t.ok(err instanceof errors.ResponseError) + t.same(result.documents, []) }) m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.true(err instanceof errors.ResponseError) - t.deepEqual(result.documents, []) + t.ok(err instanceof errors.ResponseError) + t.same(result.documents, []) }) t.teardown(() => m.stop()) @@ -523,13 +523,13 @@ test('Stop the msearch helper with an error', t => { m.then( () => t.fail('Should fail'), - err => t.is(err.message, 'kaboom') + err => t.equal(err.message, 'kaboom') ) - m.catch(err => t.is(err.message, 'kaboom')) + m.catch(err => t.equal(err.message, 'kaboom')) m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.is(err.message, 'kaboom') + t.equal(err.message, 'kaboom') }) }) @@ -564,7 +564,7 @@ test('Multiple searches (concurrency = 1)', t => { m.search({ index: 'test' }, { query: {} }, (err, result) => { t.error(err) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -575,7 +575,7 @@ test('Multiple searches (concurrency = 1)', t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -584,7 +584,7 @@ test('Multiple searches (concurrency = 1)', t => { m.search({ index: 'test' }, { query: {} }, (err, result) => { t.error(err) - t.deepEqual(result.body, { + t.same(result.body, { status: 200, hits: { hits: [ @@ -595,7 +595,7 @@ test('Multiple searches (concurrency = 1)', t => { } }) - t.deepEqual(result.documents, [ + t.same(result.documents, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -647,12 +647,12 @@ test('Flush interval', t => { m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { t.error(err) - t.is(result.documents.length, 3) + t.equal(result.documents.length, 3) }) m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { t.error(err) - t.is(result.documents.length, 3) + t.equal(result.documents.length, 3) }) setImmediate(clock.next) @@ -691,7 +691,7 @@ test('Flush interval - early stop', t => { m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { t.error(err) - t.is(result.documents.length, 3) + t.equal(result.documents.length, 3) }) setImmediate(() => { @@ -750,11 +750,11 @@ test('Stop should resolve the helper (error)', t => { setImmediate(m.stop, new Error('kaboom')) m.then(() => t.fail('Should not fail')) - .catch(err => t.is(err.message, 'kaboom')) + .catch(err => t.equal(err.message, 'kaboom')) - m.catch(err => t.is(err.message, 'kaboom')) + m.catch(err => t.equal(err.message, 'kaboom')) - m.then(() => t.fail('Should not fail'), err => t.is(err.message, 'kaboom')) + m.then(() => t.fail('Should not fail'), err => t.equal(err.message, 'kaboom')) }) test('Should use req options', async t => { diff --git a/test/unit/helpers/scroll.test.js b/test/unit/helpers/scroll.test.js index cfc26d959..4943e703b 100644 --- a/test/unit/helpers/scroll.test.js +++ b/test/unit/helpers/scroll.test.js @@ -38,11 +38,11 @@ test('Scroll search', async t => { count += 1 if (params.method === 'POST') { - t.strictEqual(params.querystring, 'scroll=1m') + t.equal(params.querystring, 'scroll=1m') } if (count === 4) { // final automated clear - t.strictEqual(params.method, 'DELETE') + t.equal(params.method, 'DELETE') } return { body: { @@ -73,8 +73,8 @@ test('Scroll search', async t => { }) for await (const result of scrollSearch) { - t.strictEqual(result.body.count, count) - t.strictEqual(result.body._scroll_id, 'id') + t.equal(result.body.count, count) + t.equal(result.body._scroll_id, 'id') } }) @@ -87,7 +87,7 @@ test('Clear a scroll search', async t => { }) if (params.method === 'DELETE') { const body = JSON.parse(params.body) - t.strictEqual(body.scroll_id, 'id') + t.equal(body.scroll_id, 'id') } return { body: { @@ -120,7 +120,7 @@ test('Clear a scroll search', async t => { if (count === 2) { t.fail('The scroll search should be cleared') } - t.strictEqual(result.body.count, count) + t.equal(result.body.count, count) if (count === 1) { await result.clear() } @@ -138,7 +138,7 @@ test('Scroll search (retry)', async t => { } if (count === 5) { // final automated clear - t.strictEqual(params.method, 'DELETE') + t.equal(params.method, 'DELETE') } return { statusCode: 200, @@ -172,9 +172,9 @@ test('Scroll search (retry)', async t => { }) for await (const result of scrollSearch) { - t.strictEqual(result.body.count, count) - t.notStrictEqual(result.body.count, 1) - t.strictEqual(result.body._scroll_id, 'id') + t.equal(result.body.count, count) + t.not(result.body.count, 1) + t.equal(result.body._scroll_id, 'id') } }) @@ -208,9 +208,9 @@ test('Scroll search (retry throws and maxRetries)', async t => { t.fail('we should not be here') } } catch (err) { - t.true(err instanceof errors.ResponseError) - t.strictEqual(err.statusCode, 429) - t.strictEqual(count, expectedAttempts) + t.ok(err instanceof errors.ResponseError) + t.equal(err.statusCode, 429) + t.equal(count, expectedAttempts) } }) @@ -222,7 +222,7 @@ test('Scroll search (retry throws later)', async t => { onRequest (params) { count += 1 // filter_path should not be added if is not already present - t.strictEqual(params.querystring, 'scroll=1m') + t.equal(params.querystring, 'scroll=1m') if (count > 1) { return { body: {}, statusCode: 429 } } @@ -258,12 +258,12 @@ test('Scroll search (retry throws later)', async t => { try { for await (const result of scrollSearch) { // eslint-disable-line - t.strictEqual(result.body.count, count) + t.equal(result.body.count, count) } } catch (err) { - t.true(err instanceof errors.ResponseError) - t.strictEqual(err.statusCode, 429) - t.strictEqual(count, expectedAttempts) + t.ok(err instanceof errors.ResponseError) + t.equal(err.statusCode, 429) + t.equal(count, expectedAttempts) } }) @@ -272,11 +272,11 @@ test('Scroll search documents', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { if (count === 0) { - t.strictEqual(params.querystring, 'filter_path=hits.hits._source%2C_scroll_id&scroll=1m') + t.equal(params.querystring, 'filter_path=hits.hits._source%2C_scroll_id&scroll=1m') } else { if (params.method !== 'DELETE') { - t.strictEqual(params.querystring, 'scroll=1m') - t.strictEqual(params.body, '{"scroll_id":"id"}') + t.equal(params.querystring, 'scroll=1m') + t.equal(params.body, '{"scroll_id":"id"}') } } return { @@ -309,7 +309,7 @@ test('Scroll search documents', async t => { let n = 1 for await (const hit of scrollSearch) { - t.deepEqual(hit, { val: n * count }) + t.same(hit, { val: n * count }) n += 1 if (n === 4) { count += 1 @@ -348,9 +348,9 @@ test('Should not retry if maxRetries = 0', async t => { t.fail('we should not be here') } } catch (err) { - t.true(err instanceof errors.ResponseError) - t.strictEqual(err.statusCode, 429) - t.strictEqual(count, expectedAttempts) + t.ok(err instanceof errors.ResponseError) + t.equal(err.statusCode, 429) + t.equal(count, expectedAttempts) } }) @@ -359,10 +359,10 @@ test('Fix querystring for scroll search', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { if (count === 0) { - t.strictEqual(params.querystring, 'size=1&scroll=1m') + t.equal(params.querystring, 'size=1&scroll=1m') } else { if (params.method !== 'DELETE') { - t.strictEqual(params.querystring, 'scroll=1m') + t.equal(params.querystring, 'scroll=1m') } } return { @@ -392,7 +392,7 @@ test('Fix querystring for scroll search', async t => { }) for await (const response of scrollSearch) { - t.strictEqual(response.body.hits.hits.length, 1) + t.equal(response.body.hits.hits.length, 1) count += 1 } }) diff --git a/test/unit/helpers/search.test.js b/test/unit/helpers/search.test.js index 06ce5870a..ad01ad35a 100644 --- a/test/unit/helpers/search.test.js +++ b/test/unit/helpers/search.test.js @@ -26,7 +26,7 @@ const { connection } = require('../../utils') test('Search should have an additional documents property', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=hits.hits._source') + t.equal(params.querystring, 'filter_path=hits.hits._source') return { body: { hits: { @@ -50,7 +50,7 @@ test('Search should have an additional documents property', async t => { index: 'test', body: { foo: 'bar' } }) - t.deepEqual(result, [ + t.same(result, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -60,7 +60,7 @@ test('Search should have an additional documents property', async t => { test('kGetHits fallback', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=hits.hits._source') + t.equal(params.querystring, 'filter_path=hits.hits._source') return { body: {} } } }) @@ -74,13 +74,13 @@ test('kGetHits fallback', async t => { index: 'test', body: { foo: 'bar' } }) - t.deepEqual(result, []) + t.same(result, []) }) test('Merge filter paths (snake_case)', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=foo%2Chits.hits._source') + t.equal(params.querystring, 'filter_path=foo%2Chits.hits._source') return { body: { hits: { @@ -105,7 +105,7 @@ test('Merge filter paths (snake_case)', async t => { filter_path: 'foo', body: { foo: 'bar' } }) - t.deepEqual(result, [ + t.same(result, [ { one: 'one' }, { two: 'two' }, { three: 'three' } @@ -115,7 +115,7 @@ test('Merge filter paths (snake_case)', async t => { test('Merge filter paths (camelCase)', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=foo%2Chits.hits._source') + t.equal(params.querystring, 'filter_path=foo%2Chits.hits._source') return { body: { hits: { @@ -140,7 +140,7 @@ test('Merge filter paths (camelCase)', async t => { filterPath: 'foo', body: { foo: 'bar' } }) - t.deepEqual(result, [ + t.same(result, [ { one: 'one' }, { two: 'two' }, { three: 'three' } diff --git a/test/unit/selectors.test.js b/test/unit/selectors.test.js index 798a457e0..54f3087c2 100644 --- a/test/unit/selectors.test.js +++ b/test/unit/selectors.test.js @@ -28,7 +28,7 @@ test('RoundRobinSelector', t => { t.plan(arr.length + 1) for (let i = 0; i <= arr.length; i++) { - t.strictEqual( + t.equal( selector(arr), i === arr.length ? arr[0] : arr[i] ) diff --git a/test/unit/serializer.test.js b/test/unit/serializer.test.js index d0e847300..edf29c880 100644 --- a/test/unit/serializer.test.js +++ b/test/unit/serializer.test.js @@ -29,8 +29,8 @@ test('Basic', t => { const s = new Serializer() const obj = { hello: 'world' } const json = JSON.stringify(obj) - t.strictEqual(s.serialize(obj), json) - t.deepEqual(s.deserialize(json), obj) + t.equal(s.serialize(obj), json) + t.same(s.deserialize(json), obj) }) test('ndserialize', t => { @@ -41,7 +41,7 @@ test('ndserialize', t => { { winter: 'is coming' }, { you_know: 'for search' } ] - t.strictEqual( + t.equal( s.ndserialize(obj), JSON.stringify(obj[0]) + '\n' + JSON.stringify(obj[1]) + '\n' + @@ -57,7 +57,7 @@ test('ndserialize (strings)', t => { JSON.stringify({ winter: 'is coming' }), JSON.stringify({ you_know: 'for search' }) ] - t.strictEqual( + t.equal( s.ndserialize(obj), obj[0] + '\n' + obj[1] + '\n' + @@ -73,7 +73,7 @@ test('qserialize', t => { you_know: 'for search' } - t.strictEqual( + t.equal( s.qserialize(obj), stringify(obj) ) @@ -87,7 +87,7 @@ test('qserialize (array)', t => { arr: ['foo', 'bar'] } - t.strictEqual( + t.equal( s.qserialize(obj), 'hello=world&arr=foo%2Cbar' ) @@ -101,7 +101,7 @@ test('qserialize (string)', t => { you_know: 'for search' } - t.strictEqual( + t.equal( s.qserialize(stringify(obj)), stringify(obj) ) @@ -116,7 +116,7 @@ test('qserialize (key with undefined value)', t => { foo: 'bar' } - t.strictEqual( + t.equal( s.qserialize(obj), 'hello=world&foo=bar' ) diff --git a/test/unit/transport.test.js b/test/unit/transport.test.js index dbb1f5df0..410fe8995 100644 --- a/test/unit/transport.test.js +++ b/test/unit/transport.test.js @@ -71,7 +71,7 @@ test('Basic', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -99,7 +99,7 @@ test('Basic (promises support)', t => { path: '/hello' }) .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) }) .catch(t.fail) }) @@ -154,7 +154,7 @@ test('Basic (options + promises support)', t => { requestTimeout: 1000 }) .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) }) .catch(t.fail) }) @@ -171,7 +171,7 @@ test('Send POST', t => { req.on('data', chunk => { json += chunk }) req.on('error', err => t.fail(err)) req.on('end', () => { - t.deepEqual(JSON.parse(json), { hello: 'world' }) + t.same(JSON.parse(json), { hello: 'world' }) res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) }) @@ -197,7 +197,7 @@ test('Send POST', t => { body: { hello: 'world' } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -222,7 +222,7 @@ test('Send POST (ndjson)', t => { req.on('data', chunk => { json += chunk }) req.on('error', err => t.fail(err)) req.on('end', () => { - t.strictEqual( + t.equal( json, JSON.stringify(bulkBody[0]) + '\n' + JSON.stringify(bulkBody[1]) + '\n' + @@ -253,7 +253,7 @@ test('Send POST (ndjson)', t => { bulkBody }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -270,7 +270,7 @@ test('Send stream', t => { req.on('data', chunk => { json += chunk }) req.on('error', err => t.fail(err)) req.on('end', () => { - t.deepEqual(JSON.parse(json), { hello: 'world' }) + t.same(JSON.parse(json), { hello: 'world' }) res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) }) @@ -296,7 +296,7 @@ test('Send stream', t => { body: intoStream(JSON.stringify({ hello: 'world' })) }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -313,7 +313,7 @@ test('Send stream (bulkBody)', t => { req.on('data', chunk => { json += chunk }) req.on('error', err => t.fail(err)) req.on('end', () => { - t.deepEqual(JSON.parse(json), { hello: 'world' }) + t.same(JSON.parse(json), { hello: 'world' }) res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) }) @@ -339,7 +339,7 @@ test('Send stream (bulkBody)', t => { bulkBody: intoStream(JSON.stringify({ hello: 'world' })) }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -371,7 +371,7 @@ test('Not JSON payload from server', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.strictEqual(body, 'hello!') + t.equal(body, 'hello!') server.stop() }) }) @@ -391,8 +391,8 @@ test('NoLivingConnectionsError (null connection)', t => { sniffInterval: false, sniffOnStart: false, nodeSelector (connections) { - t.is(connections.length, 1) - t.true(connections[0] instanceof Connection) + t.equal(connections.length, 1) + t.ok(connections[0] instanceof Connection) return null } }) @@ -419,8 +419,8 @@ test('NoLivingConnectionsError (undefined connection)', t => { sniffInterval: false, sniffOnStart: false, nodeSelector (connections) { - t.is(connections.length, 1) - t.true(connections[0] instanceof Connection) + t.equal(connections.length, 1) + t.ok(connections[0] instanceof Connection) return undefined } }) @@ -521,7 +521,7 @@ test('TimeoutError (should call markDead on the failing connection)', t => { class CustomConnectionPool extends ConnectionPool { markDead (connection) { - t.strictEqual(connection.id, 'node1') + t.equal(connection.id, 'node1') super.markDead(connection) } } @@ -555,7 +555,7 @@ test('ConnectionError (should call markDead on the failing connection)', t => { class CustomConnectionPool extends ConnectionPool { markDead (connection) { - t.strictEqual(connection.id, 'node1') + t.equal(connection.id, 'node1') super.markDead(connection) } } @@ -626,7 +626,7 @@ test('Retry mechanism', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -671,7 +671,7 @@ test('Should not retry if the body is a stream', t => { body: intoStream(JSON.stringify({ hello: 'world' })) }, (err, { body }) => { t.ok(err instanceof ResponseError) - t.strictEqual(count, 1) + t.equal(count, 1) server.stop() }) }) @@ -716,7 +716,7 @@ test('Should not retry if the bulkBody is a stream', t => { bulkBody: intoStream(JSON.stringify([{ hello: 'world' }])) }, (err, { body }) => { t.ok(err instanceof ResponseError) - t.strictEqual(count, 1) + t.equal(count, 1) server.stop() }) }) @@ -763,7 +763,7 @@ test('No retry', t => { maxRetries: 0 }, (err, { body }) => { t.ok(err instanceof ResponseError) - t.strictEqual(count, 1) + t.equal(count, 1) server.stop() }) }) @@ -813,7 +813,7 @@ test('Custom retry mechanism', t => { maxRetries: 1 }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -824,7 +824,7 @@ test('Should not retry on 429', t => { let count = 0 function handler (req, res) { - t.strictEqual(count++, 0) + t.equal(count++, 0) res.statusCode = 429 res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) @@ -858,7 +858,7 @@ test('Should not retry on 429', t => { path: '/hello' }, (err, result) => { t.ok(err) - t.strictEqual(err.statusCode, 429) + t.equal(err.statusCode, 429) server.stop() }) }) @@ -869,7 +869,7 @@ test('Should call markAlive with a successful response', t => { class CustomConnectionPool extends ConnectionPool { markAlive (connection) { - t.strictEqual(connection.id, 'node1') + t.equal(connection.id, 'node1') super.markAlive(connection) } } @@ -895,7 +895,7 @@ test('Should call markAlive with a successful response', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) }) }) @@ -932,7 +932,7 @@ test('Should call resurrect on every request', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) }) }) @@ -1076,8 +1076,8 @@ test('ResponseError', t => { path: '/hello' }, (err, { body }) => { t.ok(err instanceof ResponseError) - t.deepEqual(err.body, { status: 500 }) - t.strictEqual(err.statusCode, 500) + t.same(err.body, { status: 500 }) + t.equal(err.statusCode, 500) server.stop() }) }) @@ -1113,7 +1113,7 @@ test('Override requestTimeout', t => { requestTimeout: 2000 }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1125,7 +1125,7 @@ test('sniff', t => { class MyTransport extends Transport { sniff (opts) { - t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_ON_START) + t.equal(opts.reason, Transport.sniffReasons.SNIFF_ON_START) } } @@ -1150,7 +1150,7 @@ test('sniff', t => { class MyTransport extends Transport { sniff (opts) { - t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) + t.equal(opts.reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) } } @@ -1184,7 +1184,7 @@ test('sniff', t => { class MyTransport extends Transport { sniff (opts) { - t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_INTERVAL) + t.equal(opts.reason, Transport.sniffReasons.SNIFF_INTERVAL) } } @@ -1329,7 +1329,7 @@ test('Should retry the request if the statusCode is 502/3/4', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1362,7 +1362,7 @@ test('Ignore status code', t => { ignore: [404] }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) }) transport.request({ @@ -1386,7 +1386,7 @@ test('Should serialize the querystring', t => { t.plan(2) function handler (req, res) { - t.strictEqual(req.url, '/hello?hello=world&you_know=for%20search') + t.equal(req.url, '/hello?hello=world&you_know=for%20search') res.end('ok') } @@ -1582,8 +1582,8 @@ test('Should cast to boolean HEAD request', t => { path: '/200' }, (err, { body, statusCode }) => { t.error(err) - t.strictEqual(statusCode, 200) - t.strictEqual(body, true) + t.equal(statusCode, 200) + t.equal(body, true) }) }) @@ -1607,8 +1607,8 @@ test('Should cast to boolean HEAD request', t => { path: '/404' }, (err, { body, statusCode }) => { t.error(err) - t.strictEqual(statusCode, 404) - t.strictEqual(body, false) + t.equal(statusCode, 404) + t.equal(body, false) }) }) @@ -1633,8 +1633,8 @@ test('Should cast to boolean HEAD request', t => { path: '/400' }, (err, { body, statusCode }) => { t.ok(err instanceof ResponseError) - t.false(typeof err.body === 'boolean') - t.strictEqual(statusCode, 400) + t.notOk(typeof err.body === 'boolean') + t.equal(statusCode, 400) }) }) @@ -1658,8 +1658,8 @@ test('Should cast to boolean HEAD request', t => { path: '/500' }, (err, { body, statusCode }) => { t.ok(err instanceof ResponseError) - t.false(typeof err.body === 'boolean') - t.strictEqual(statusCode, 500) + t.notOk(typeof err.body === 'boolean') + t.equal(statusCode, 500) }) }) @@ -1700,7 +1700,7 @@ test('Suggest compression', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -1775,7 +1775,7 @@ test('Warning header', t => { path: '/hello' }, (err, { warnings }) => { t.error(err) - t.deepEqual(warnings, [warn]) + t.same(warnings, [warn]) warnings.forEach(w => t.type(w, 'string')) server.stop() }) @@ -1812,7 +1812,7 @@ test('Warning header', t => { path: '/hello' }, (err, { warnings }) => { t.error(err) - t.deepEqual(warnings, [warn1, warn2]) + t.same(warnings, [warn1, warn2]) warnings.forEach(w => t.type(w, 'string')) server.stop() }) @@ -1846,7 +1846,7 @@ test('Warning header', t => { path: '/hello' }, (err, { warnings }) => { t.error(err) - t.strictEqual(warnings, null) + t.equal(warnings, null) server.stop() }) }) @@ -1893,7 +1893,7 @@ test('asStream set to true', t => { body.on('data', chunk => { payload += chunk }) body.on('error', err => t.fail(err)) body.on('end', () => { - t.deepEqual(JSON.parse(payload), { hello: 'world' }) + t.same(JSON.parse(payload), { hello: 'world' }) server.stop() }) }) @@ -1914,7 +1914,7 @@ test('Compress request', t => { .on('data', chunk => { json += chunk }) .on('error', err => t.fail(err)) .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) + t.same(JSON.parse(json), { you_know: 'for search' }) res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ you_know: 'for search' })) }) @@ -1942,7 +1942,7 @@ test('Compress request', t => { compression: 'gzip' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { you_know: 'for search' }) + t.same(body, { you_know: 'for search' }) server.stop() }) }) @@ -1961,7 +1961,7 @@ test('Compress request', t => { .on('data', chunk => { json += chunk }) .on('error', err => t.fail(err)) .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) + t.same(JSON.parse(json), { you_know: 'for search' }) res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ you_know: 'for search' })) }) @@ -1988,7 +1988,7 @@ test('Compress request', t => { body: { you_know: 'for search' } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { you_know: 'for search' }) + t.same(body, { you_know: 'for search' }) server.stop() }) }) @@ -2007,7 +2007,7 @@ test('Compress request', t => { .on('data', chunk => { json += chunk }) .on('error', err => t.fail(err)) .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) + t.same(JSON.parse(json), { you_know: 'for search' }) res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ you_know: 'for search' })) }) @@ -2035,7 +2035,7 @@ test('Compress request', t => { compression: 'gzip' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { you_know: 'for search' }) + t.same(body, { you_know: 'for search' }) server.stop() }) }) @@ -2057,8 +2057,8 @@ test('Compress request', t => { }) t.fail('Should throw') } catch (err) { - t.true(err instanceof ConfigurationError) - t.is(err.message, 'Invalid compression: \'deflate\'') + t.ok(err instanceof ConfigurationError) + t.equal(err.message, 'Invalid compression: \'deflate\'') } }) @@ -2066,8 +2066,8 @@ test('Compress request', t => { t.plan(9) function handler (req, res) { - t.strictEqual(req.headers['content-encoding'], undefined) - t.strictEqual(req.headers['content-type'], undefined) + t.equal(req.headers['content-encoding'], undefined) + t.equal(req.headers['content-type'], undefined) res.end() } @@ -2126,7 +2126,7 @@ test('Compress request', t => { .on('data', chunk => { json += chunk }) .on('error', err => t.fail(err)) .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) + t.same(JSON.parse(json), { you_know: 'for search' }) res.setHeader('Content-Type', 'application/json;utf=8') if (count++ > 0) { res.end(JSON.stringify({ you_know: 'for search' })) @@ -2160,8 +2160,8 @@ test('Compress request', t => { compression: 'gzip' }, (err, { body, meta }) => { t.error(err) - t.deepEqual(body, { you_know: 'for search' }) - t.strictEqual(count, 2) + t.same(body, { you_know: 'for search' }) + t.equal(count, 2) server.stop() }) }) @@ -2201,7 +2201,7 @@ test('Headers configuration', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -2242,7 +2242,7 @@ test('Headers configuration', t => { headers: { 'x-baz': 'faz' } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -2280,7 +2280,7 @@ test('Headers configuration', t => { headers: { 'x-foo': 'faz' } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -2318,7 +2318,7 @@ test('nodeFilter and nodeSelector', t => { path: '/hello' }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) }) }) @@ -2327,7 +2327,7 @@ test('Should accept custom querystring in the optons object', t => { t.plan(3) function handler (req, res) { - t.strictEqual(req.url, '/hello?foo=bar') + t.equal(req.url, '/hello?foo=bar') res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } @@ -2353,7 +2353,7 @@ test('Should accept custom querystring in the optons object', t => { querystring: { foo: 'bar' } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -2363,7 +2363,7 @@ test('Should accept custom querystring in the optons object', t => { t.plan(3) function handler (req, res) { - t.strictEqual(req.url, '/hello?baz=faz&foo=bar') + t.equal(req.url, '/hello?baz=faz&foo=bar') res.setHeader('Content-Type', 'application/json;utf=8') res.end(JSON.stringify({ hello: 'world' })) } @@ -2390,7 +2390,7 @@ test('Should accept custom querystring in the optons object', t => { querystring: { foo: 'bar' } }, (err, { body }) => { t.error(err) - t.deepEqual(body, { hello: 'world' }) + t.same(body, { hello: 'world' }) server.stop() }) }) @@ -2454,8 +2454,8 @@ test('Should pass request params and options to generateRequestId', t => { sniffInterval: false, sniffOnStart: false, generateRequestId: function (requestParams, requestOptions) { - t.deepEqual(requestParams, params) - t.deepEqual(requestOptions, options) + t.same(requestParams, params) + t.same(requestOptions, options) return 'id' } }) @@ -2489,8 +2489,8 @@ test('Secure json parsing', t => { method: 'GET', path: '/hello' }, (err, { body }) => { - t.true(err instanceof DeserializationError) - t.is(err.message, 'Object contains forbidden prototype property') + t.ok(err instanceof DeserializationError) + t.equal(err.message, 'Object contains forbidden prototype property') server.stop() }) }) @@ -2521,8 +2521,8 @@ test('Secure json parsing', t => { method: 'GET', path: '/hello' }, (err, { body }) => { - t.true(err instanceof DeserializationError) - t.is(err.message, 'Object contains forbidden prototype property') + t.ok(err instanceof DeserializationError) + t.equal(err.message, 'Object contains forbidden prototype property') server.stop() }) }) @@ -2535,7 +2535,7 @@ test('Lowercase headers utilty', t => { t.plan(4) const { lowerCaseHeaders } = Transport.internals - t.deepEqual(lowerCaseHeaders({ + t.same(lowerCaseHeaders({ Foo: 'bar', Faz: 'baz', 'X-Hello': 'world' @@ -2545,7 +2545,7 @@ test('Lowercase headers utilty', t => { 'x-hello': 'world' }) - t.deepEqual(lowerCaseHeaders({ + t.same(lowerCaseHeaders({ Foo: 'bar', faz: 'baz', 'X-hello': 'world' @@ -2555,9 +2555,9 @@ test('Lowercase headers utilty', t => { 'x-hello': 'world' }) - t.strictEqual(lowerCaseHeaders(null), null) + t.equal(lowerCaseHeaders(null), null) - t.strictEqual(lowerCaseHeaders(undefined), undefined) + t.equal(lowerCaseHeaders(undefined), undefined) }) test('The callback with a sync error should be called in the next tick - json', t => { From 5292fcae0387a76a5ba0727692f3aa02e94961f4 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 20 May 2021 16:20:45 +0200 Subject: [PATCH 022/647] Bumped v8.0.0-canary.11 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index dfd45d446..9efe4d585 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.10", + "versionCanary": "8.0.0-canary.11", "keywords": [ "elasticsearch", "elastic", From 4e6ae5d2e822f93f5cc79b53ffa95cb905dba62b Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 3 Jun 2021 17:50:16 +0200 Subject: [PATCH 023/647] Fix integration test (#1479) --- test/integration/index.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration/index.js b/test/integration/index.js index 934552101..149d1d274 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -82,6 +82,7 @@ const platinumBlackList = { ], // The cleanup fails with a index not found when retrieving the jobs 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], + 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], 'ml/preview_datafeed.yml': ['*'], // Investigate why is failing 'ml/inference_crud.yml': ['*'], @@ -94,6 +95,7 @@ const platinumBlackList = { // the body is correct, but the regex is failing 'sql/sql.yml': ['Getting textual representation'], 'searchable_snapshots/10_usage.yml': ['*'], + 'service_accounts/10_basic.yml': ['*'], // we are setting two certificates in the docker config 'ssl/10_basic.yml': ['*'], // very likely, the index template has not been loaded yet. From 894a33ce867679a29b0472d3970c447227eebb3f Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 3 Jun 2021 18:39:18 +0200 Subject: [PATCH 024/647] Updated type definitions --- api/kibana.d.ts | 4 +- api/new.d.ts | 15 +++- api/types.d.ts | 201 +++++++++++++++++++++++++----------------------- 3 files changed, 120 insertions(+), 100 deletions(-) diff --git a/api/kibana.d.ts b/api/kibana.d.ts index af2a8c0e8..3dbec026d 100644 --- a/api/kibana.d.ts +++ b/api/kibana.d.ts @@ -446,6 +446,7 @@ interface KibanaClient { putRole(params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> putRoleMapping(params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + samlCompleteLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> } shutdown: { deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -472,6 +473,7 @@ interface KibanaClient { deleteRepository(params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> get(params: T.SnapshotGetRequest, options?: TransportRequestOptions): TransportRequestPromise> getRepository(params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> + repositoryAnalyze(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> restore(params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> status(params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -489,7 +491,7 @@ interface KibanaClient { get(params: T.TaskGetRequest, options?: TransportRequestOptions): TransportRequestPromise> list(params?: T.TaskListRequest, options?: TransportRequestOptions): TransportRequestPromise> } - termsEnum(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + termsEnum(params: T.TermsEnumRequest, options?: TransportRequestOptions): TransportRequestPromise> termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> textStructure: { findStructure(params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> diff --git a/api/new.d.ts b/api/new.d.ts index 6b9dfed5e..8bc6cef17 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -1279,6 +1279,10 @@ declare class Client { putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> putUser(params: T.SecurityPutUserRequest, callback: callbackFn): TransportRequestCallback putUser(params: T.SecurityPutUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlCompleteLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlCompleteLogout(callback: callbackFn): TransportRequestCallback + samlCompleteLogout(params: TODO, callback: callbackFn): TransportRequestCallback + samlCompleteLogout(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } shutdown: { deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -1355,6 +1359,10 @@ declare class Client { getRepository(callback: callbackFn): TransportRequestCallback getRepository(params: T.SnapshotGetRepositoryRequest, callback: callbackFn): TransportRequestCallback getRepository(params: T.SnapshotGetRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + repositoryAnalyze(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + repositoryAnalyze(callback: callbackFn): TransportRequestCallback + repositoryAnalyze(params: TODO, callback: callbackFn): TransportRequestCallback + repositoryAnalyze(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback restore(params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> restore(params: T.SnapshotRestoreRequest, callback: callbackFn): TransportRequestCallback restore(params: T.SnapshotRestoreRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback @@ -1399,10 +1407,9 @@ declare class Client { list(params: T.TaskListRequest, callback: callbackFn): TransportRequestCallback list(params: T.TaskListRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } - termsEnum(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - termsEnum(callback: callbackFn): TransportRequestCallback - termsEnum(params: TODO, callback: callbackFn): TransportRequestCallback - termsEnum(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + termsEnum(params: T.TermsEnumRequest, options?: TransportRequestOptions): TransportRequestPromise> + termsEnum(params: T.TermsEnumRequest, callback: callbackFn): TransportRequestCallback + termsEnum(params: T.TermsEnumRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> termvectors(params: T.TermvectorsRequest, callback: callbackFn): TransportRequestCallback termvectors(params: T.TermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback diff --git a/api/types.d.ts b/api/types.d.ts index cfead6e0e..52e48fcfa 100644 --- a/api/types.d.ts +++ b/api/types.d.ts @@ -1558,6 +1558,25 @@ export interface SearchTemplateResponse { hits: SearchHitsMetadata } +export interface TermsEnumRequest extends RequestBase { + index: IndexName + body?: { + field: Field + size?: integer + timeout?: Time + case_insensitive?: boolean + index_filter?: QueryDslQueryContainer + string?: string + search_after?: string + } +} + +export interface TermsEnumResponse { + _shards: ShardStatistics + terms: string[] + complete: boolean +} + export interface TermvectorsFieldStatistics { doc_count: integer sum_doc_freq: long @@ -3825,9 +3844,7 @@ export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase } export interface MappingNestedProperty extends MappingCorePropertyBase { - dynamic?: boolean | MappingDynamicMapping enabled?: boolean - properties?: Record include_in_parent?: boolean include_in_root?: boolean type: 'nested' @@ -3847,9 +3864,7 @@ export interface MappingNumberProperty extends MappingDocValuesPropertyBase { export type MappingNumberType = 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer' | 'long' | 'short' | 'byte' | 'unsigned_long' export interface MappingObjectProperty extends MappingCorePropertyBase { - dynamic?: boolean | MappingDynamicMapping enabled?: boolean - properties?: Record type?: 'object' } @@ -4046,7 +4061,6 @@ export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { filter?: QueryDslQueryContainer - boost?: float } export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { @@ -4070,7 +4084,6 @@ export interface QueryDslDecayPlacement { export interface QueryDslDisMaxQuery extends QueryDslQueryBase { queries?: QueryDslQueryContainer[] tie_breaker?: double - boost?: float } export interface QueryDslDistanceFeatureQuery extends QueryDslQueryBase { @@ -4121,7 +4134,6 @@ export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { min_score?: double query?: QueryDslQueryContainer score_mode?: QueryDslFunctionScoreMode - boost?: float } export interface QueryDslFuzzyQuery extends QueryDslQueryBase { @@ -5492,9 +5504,9 @@ export interface CatJobsJobsRecord { 'model.bytes'?: ByteSize mb?: ByteSize modelBytes?: ByteSize - 'model.memory_status'?: CatJobsModelMemoryStatus - mms?: CatJobsModelMemoryStatus - modelMemoryStatus?: CatJobsModelMemoryStatus + 'model.memory_status'?: MlMemoryStatus + mms?: MlMemoryStatus + modelMemoryStatus?: MlMemoryStatus 'model.bytes_exceeded'?: ByteSize mbe?: ByteSize modelBytesExceeded?: ByteSize @@ -5513,9 +5525,9 @@ export interface CatJobsJobsRecord { 'model.bucket_allocation_failures'?: string mbaf?: string modelBucketAllocationFailures?: string - 'model.categorization_status'?: CatJobsModelCategorizationStatus - mcs?: CatJobsModelCategorizationStatus - modelCategorizationStatus?: CatJobsModelCategorizationStatus + 'model.categorization_status'?: MlCategorizationStatus + mcs?: MlCategorizationStatus + modelCategorizationStatus?: MlCategorizationStatus 'model.categorized_doc_count'?: string mcdc?: string modelCategorizedDocCount?: string @@ -5610,10 +5622,6 @@ export interface CatJobsJobsRecord { bucketsTimeExpAvgHour?: string } -export type CatJobsModelCategorizationStatus = 'ok' | 'warn' - -export type CatJobsModelMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' - export interface CatJobsRequest extends CatCatRequestBase { job_id?: Id allow_no_jobs?: boolean @@ -10052,7 +10060,7 @@ export interface MlAnalysisConfig { categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] - influencers?: Field[] + influencers: Field[] latency?: Time multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization @@ -10155,6 +10163,8 @@ export interface MlCategorizationAnalyzer { char_filter?: (string | AnalysisCharFilter)[] } +export type MlCategorizationStatus = 'ok' | 'warn' + export interface MlCategory { category_id: ulong examples: string[] @@ -10181,20 +10191,26 @@ export type MlChunkingMode = 'auto' | 'manual' | 'off' export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' +export interface MlCustomSettings { + custom_urls?: XpackUsageUrlConfig[] + created_by?: string + job_tags?: Record +} + export interface MlDataCounts { bucket_count: long - earliest_record_timestamp?: long + earliest_record_timestamp: long empty_bucket_count: long input_bytes: long input_field_count: long input_record_count: long invalid_date_count: long job_id: Id - last_data_time?: long - latest_empty_bucket_timestamp?: long - latest_record_timestamp?: long - latest_sparse_bucket_timestamp?: long - latest_bucket_timestamp?: long + last_data_time: long + latest_empty_bucket_timestamp: long + latest_record_timestamp: long + latest_sparse_bucket_timestamp: long + latest_bucket_timestamp: long missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -10238,9 +10254,9 @@ export interface MlDatafeedIndicesOptions { export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { - assignment_explanation?: string + assignment_explanation: string datafeed_id: Id - node?: MlDiscoveryNode + node: MlDiscoveryNode state: MlDatafeedState timing_stats: MlDatafeedTimingStats } @@ -10251,6 +10267,7 @@ export interface MlDatafeedTimingStats { job_id: Id search_count: long total_search_time_ms: double + average_search_time_per_bucket_ms: number } export interface MlDataframeAnalysis { @@ -10372,8 +10389,8 @@ export interface MlDataframeAnalyticsFieldSelection { } export interface MlDataframeAnalyticsMemoryEstimation { - expected_memory_with_disk: ByteSize - expected_memory_without_disk: ByteSize + expected_memory_with_disk: string + expected_memory_without_disk: string } export interface MlDataframeAnalyticsSource { @@ -10427,7 +10444,7 @@ export interface MlDataframeAnalyticsSummary { dest: MlDataframeAnalyticsDestination analysis: MlDataframeAnalysisContainer description?: string - model_memory_limit?: ByteSize + model_memory_limit?: string max_num_threads?: integer analyzed_fields?: MlDataframeAnalysisAnalyzedFields allow_lazy_start?: boolean @@ -10504,7 +10521,7 @@ export interface MlDelayedDataCheckConfig { export interface MlDetectionRule { actions?: MlRuleAction[] - conditions: MlRuleCondition[] + conditions?: MlRuleCondition[] scope?: Record } @@ -10515,11 +10532,10 @@ export interface MlDetector { detector_index?: integer exclude_frequent?: MlExcludeFrequent field_name?: Field - function?: string + function: string use_null?: boolean over_field_name?: Field partition_field_name?: Field - description?: string } export interface MlDiscoveryNode { @@ -10577,29 +10593,23 @@ export interface MlInfluence { export interface MlJob { allow_lazy_open?: boolean - analysis_config?: MlAnalysisConfig + analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval?: Time - count?: integer - created_by?: EmptyObject - create_time?: integer - detectors?: MlJobStatistics - data_description?: MlDataDescription - description?: string - finished_time?: integer - forecasts?: XpackUsageMlJobForecasts - job_id?: Id - job_type?: string - model_plot?: MlModelPlotConfig - model_size?: MlJobStatistics - model_snapshot_id?: Id - model_snapshot_retention_days?: long - renormalization_window_days?: long + background_persist_interval: Time + create_time: integer + data_description: MlDataDescription + description: string + finished_time: integer + job_id: Id + job_type: string + model_snapshot_id: Id + model_snapshot_retention_days: long + renormalization_window_days: long results_index_name?: IndexName results_retention_days?: long groups?: string[] model_plot_config?: MlModelPlotConfig - custom_settings?: XpackUsageCustomSettings + custom_settings?: MlCustomSettings job_version?: VersionString deleting?: boolean daily_model_snapshot_retention_after_days?: long @@ -10624,12 +10634,12 @@ export interface MlJobStatistics { } export interface MlJobStats { - assignment_explanation?: string + assignment_explanation: string data_counts: MlDataCounts forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node?: MlDiscoveryNode + node: MlDiscoveryNode open_time?: DateString state: MlJobState timing_stats: MlJobTimingStats @@ -10637,14 +10647,14 @@ export interface MlJobStats { } export interface MlJobTimingStats { - average_bucket_processing_time_ms?: double + average_bucket_processing_time_ms: double bucket_count: long - exponential_average_bucket_processing_time_ms?: double + exponential_average_bucket_processing_time_ms: double exponential_average_bucket_processing_time_per_hour_ms: double job_id: Id total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms?: double - minimum_bucket_processing_time_ms?: double + maximum_bucket_processing_time_ms: double + minimum_bucket_processing_time_ms: double } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' @@ -10655,27 +10665,21 @@ export interface MlModelPlotConfig { annotations_enabled?: boolean } -export interface MlModelPlotConfigEnabled { - enabled: boolean - annotations_enabled?: boolean - terms?: string -} - export interface MlModelSizeStats { bucket_allocation_failures_count: long job_id: Id log_time: Time memory_status: MlMemoryStatus model_bytes: long - model_bytes_exceeded?: long - model_bytes_memory_limit?: long - peak_model_bytes?: long + model_bytes_exceeded: long + model_bytes_memory_limit: long + peak_model_bytes: long assignment_memory_basis?: string result_type: string total_by_field_count: long total_over_field_count: long total_partition_field_count: long - categorization_status: string + categorization_status: MlCategorizationStatus categorized_doc_count: integer dead_category_count: integer failed_category_count: integer @@ -10688,14 +10692,14 @@ export interface MlModelSizeStats { export interface MlModelSnapshot { description?: string job_id: Id - latest_record_time_stamp?: Time - latest_result_time_stamp?: Time + latest_record_time_stamp?: integer + latest_result_time_stamp?: integer min_version: VersionString - model_size_stats?: MlModelSizeStats + model_size_stats: MlModelSizeStats retain: boolean snapshot_doc_count: long snapshot_id: Id - timestamp: Time + timestamp: integer } export interface MlOutlierDetectionParameters { @@ -10943,7 +10947,7 @@ export interface MlEstimateModelMemoryRequest extends RequestBase { } export interface MlEstimateModelMemoryResponse { - model_memory_estimate: ByteSize + model_memory_estimate: string } export interface MlEvaluateDataFrameConfusionMatrixItem { @@ -11046,7 +11050,7 @@ export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { dest?: MlDataframeAnalyticsDestination analysis: MlDataframeAnalysisContainer description?: string - model_memory_limit?: ByteSize + model_memory_limit?: string max_num_threads?: integer analyzed_fields?: MlDataframeAnalysisAnalyzedFields allow_lazy_start?: boolean @@ -11370,7 +11374,7 @@ export interface MlGetTrainedModelsStatsResponse { export interface MlInfoAnomalyDetectors { categorization_analyzer: MlCategorizationAnalyzer categorization_examples_limit: integer - model_memory_limit: ByteSize + model_memory_limit: string model_snapshot_retention_days: integer daily_model_snapshot_retention_after_days: integer } @@ -11457,7 +11461,7 @@ export interface MlPostJobDataResponse { export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { source: MlDataframeAnalyticsSource analysis: MlDataframeAnalysisContainer - model_memory_limit?: ByteSize + model_memory_limit?: string max_num_threads?: integer analyzed_fields?: MlDataframeAnalysisAnalyzedFields } @@ -11475,6 +11479,10 @@ export interface MlPreviewDataFrameAnalyticsResponse { export interface MlPreviewDatafeedRequest extends RequestBase { datafeed_id: Id + body?: { + job_config?: MlJob + datafeed_config?: MlDatafeed + } } export interface MlPreviewDatafeedResponse { @@ -11542,13 +11550,16 @@ export interface MlPutDatafeedRequest extends RequestBase { body?: { aggregations?: Record chunking_config?: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time indices?: Indices indexes?: string[] + indices_options?: MlDatafeedIndicesOptions job_id?: Id max_empty_searches?: integer query?: QueryDslQueryContainer query_delay?: Time + runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer } @@ -11557,14 +11568,17 @@ export interface MlPutDatafeedRequest extends RequestBase { export interface MlPutDatafeedResponse { aggregations: Record chunking_config: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id frequency: Time indices: Indices job_id: Id + indices_options?: MlDatafeedIndicesOptions max_empty_searches: integer query: QueryDslQueryContainer query_delay: Time - script_fields: Record + runtime_mappings?: MappingRuntimeFields + script_fields?: Record scroll_size: integer } @@ -11586,32 +11600,40 @@ export interface MlPutJobRequest extends RequestBase { job_id: Id body?: { allow_lazy_open?: boolean - analysis_config?: MlAnalysisConfig + analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits + background_persist_interval: Time + custom_settings?: MlCustomSettings data_description?: MlDataDescription + daily_model_snapshot_retention_after_days?: long + groups?: string[] description?: string - model_plot?: MlModelPlotConfig + model_plot_config?: MlModelPlotConfig model_snapshot_retention_days?: long results_index_name?: IndexName + results_retention_days?: long } } export interface MlPutJobResponse { allow_lazy_open: boolean analysis_config: MlAnalysisConfig - analysis_limits: MlAnalysisLimits + analysis_limits?: MlAnalysisLimits background_persist_interval: Time create_time: DateString + custom_settings?: MlCustomSettings data_description: MlDataDescription + daily_model_snapshot_retention_after_days?: long + groups?: string[] description: string job_id: Id job_type: string - model_plot: MlModelPlotConfig + model_plot_config: MlModelPlotConfig model_snapshot_id: Id model_snapshot_retention_days: long renormalization_window_days: long results_index_name: string - results_retention_days: long + results_retention_days?: long } export interface MlPutTrainedModelRequest extends RequestBase { @@ -11690,7 +11712,7 @@ export interface MlStopDataFrameAnalyticsResponse { } export interface MlStopDatafeedRequest extends RequestBase { - datafeed_id: Ids + datafeed_id: Id allow_no_match?: boolean force?: boolean body?: { @@ -11707,7 +11729,7 @@ export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { id: Id body?: { description?: string - model_memory_limit?: ByteSize + model_memory_limit?: string max_num_threads?: integer allow_lazy_start?: boolean } @@ -11720,7 +11742,7 @@ export interface MlUpdateDataFrameAnalyticsResponse { source: MlDataframeAnalyticsSource description?: string dest: MlDataframeAnalyticsDestination - model_memory_limit: ByteSize + model_memory_limit: string allow_lazy_start: boolean max_num_threads: integer analysis: MlDataframeAnalysisContainer @@ -11751,7 +11773,7 @@ export interface MlUpdateJobRequest extends RequestBase { custom_settings?: Record categorization_filters?: string[] description?: string - model_plot_config?: MlModelPlotConfigEnabled + model_plot_config?: MlModelPlotConfig daily_model_snapshot_retention_after_days?: long model_snapshot_retention_days?: long renormalization_window_days?: long @@ -14881,12 +14903,6 @@ export interface XpackUsageCounter { total: long } -export interface XpackUsageCustomSettings { - custom_urls?: XpackUsageUrlConfig[] - created_by?: string - job_tags?: Record -} - export interface XpackUsageDataStreams extends XpackUsageBase { data_streams: long indices_count: long @@ -15052,11 +15068,6 @@ export interface XpackUsageMlInferenceTrainedModelsCount { classification: long } -export interface XpackUsageMlJobForecasts { - total: long - forecasted_jobs: long -} - export interface XpackUsageMonitoring extends XpackUsageBase { collection_enabled: boolean enabled_exporters: Record From 102520c3046e771cc7be3625c3f68520b0273d9b Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 3 Jun 2021 18:41:34 +0200 Subject: [PATCH 025/647] Bumped v8.0.0-canary.12 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 9efe4d585..d8d2268ae 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.11", + "versionCanary": "8.0.0-canary.12", "keywords": [ "elasticsearch", "elastic", From 249118fac675b2c00e7c9998fb2ceeb482bbf170 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 4 Jun 2021 11:36:49 +0200 Subject: [PATCH 026/647] Updated type definitions --- api/kibana.d.ts | 2 +- api/new.d.ts | 3 ++- api/types.d.ts | 26 +++++++++++++------------- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/api/kibana.d.ts b/api/kibana.d.ts index 3dbec026d..cb1f66ba0 100644 --- a/api/kibana.d.ts +++ b/api/kibana.d.ts @@ -344,7 +344,7 @@ interface KibanaClient { postCalendarEvents(params?: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - previewDatafeed(params: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + previewDatafeed(params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> putCalendarJob(params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> diff --git a/api/new.d.ts b/api/new.d.ts index 8bc6cef17..c43909548 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -945,7 +945,8 @@ declare class Client { previewDataFrameAnalytics(callback: callbackFn): TransportRequestCallback previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDatafeed(params: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + previewDatafeed(params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> + previewDatafeed(callback: callbackFn, TContext>): TransportRequestCallback previewDatafeed(params: T.MlPreviewDatafeedRequest, callback: callbackFn, TContext>): TransportRequestCallback previewDatafeed(params: T.MlPreviewDatafeedRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> diff --git a/api/types.d.ts b/api/types.d.ts index 52e48fcfa..74d62ffe7 100644 --- a/api/types.d.ts +++ b/api/types.d.ts @@ -10231,7 +10231,7 @@ export interface MlDatafeed { chunking_config?: MlChunkingConfig datafeed_id: Id frequency?: Timestamp - indices: Indices + indices: string[] indexes?: string[] job_id: Id max_empty_searches?: integer @@ -10692,8 +10692,8 @@ export interface MlModelSizeStats { export interface MlModelSnapshot { description?: string job_id: Id - latest_record_time_stamp?: integer - latest_result_time_stamp?: integer + latest_record_time_stamp: integer + latest_result_time_stamp: integer min_version: VersionString model_size_stats: MlModelSizeStats retain: boolean @@ -11241,7 +11241,7 @@ export interface MlGetDatafeedStatsResponse { } export interface MlGetDatafeedsRequest extends RequestBase { - datafeed_id?: Id + datafeed_id?: Ids allow_no_datafeeds?: boolean exclude_generated?: boolean } @@ -11389,9 +11389,9 @@ export interface MlInfoDefaults { } export interface MlInfoLimits { - max_model_memory_limit?: ByteSize - effective_max_model_memory_limit: ByteSize - total_ml_memory: ByteSize + max_model_memory_limit?: string + effective_max_model_memory_limit: string + total_ml_memory: string } export interface MlInfoNativeCode { @@ -11478,7 +11478,7 @@ export interface MlPreviewDataFrameAnalyticsResponse { } export interface MlPreviewDatafeedRequest extends RequestBase { - datafeed_id: Id + datafeed_id?: Id body?: { job_config?: MlJob datafeed_config?: MlDatafeed @@ -11520,7 +11520,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { dest: MlDataframeAnalyticsDestination analysis: MlDataframeAnalysisContainer description?: string - model_memory_limit?: ByteSize + model_memory_limit?: string max_num_threads?: integer analyzed_fields?: MlDataframeAnalysisAnalyzedFields allow_lazy_start?: boolean @@ -11534,7 +11534,7 @@ export interface MlPutDataFrameAnalyticsResponse { source: MlDataframeAnalyticsSource description?: string dest: MlDataframeAnalyticsDestination - model_memory_limit: ByteSize + model_memory_limit: string allow_lazy_start: boolean max_num_threads: integer analysis: MlDataframeAnalysisContainer @@ -11552,7 +11552,7 @@ export interface MlPutDatafeedRequest extends RequestBase { chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time - indices?: Indices + indices?: string[] indexes?: string[] indices_options?: MlDatafeedIndicesOptions job_id?: Id @@ -11571,7 +11571,7 @@ export interface MlPutDatafeedResponse { delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id frequency: Time - indices: Indices + indices: string[] job_id: Id indices_options?: MlDatafeedIndicesOptions max_empty_searches: integer @@ -11628,7 +11628,7 @@ export interface MlPutJobResponse { description: string job_id: Id job_type: string - model_plot_config: MlModelPlotConfig + model_plot_config?: MlModelPlotConfig model_snapshot_id: Id model_snapshot_retention_days: long renormalization_window_days: long From 4933e3b9bec474511233f5a05819e493060a86e2 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 4 Jun 2021 11:38:21 +0200 Subject: [PATCH 027/647] Bumped v8.0.0-canary.13 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d8d2268ae..fdd8fc05a 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.12", + "versionCanary": "8.0.0-canary.13", "keywords": [ "elasticsearch", "elastic", From 9bf3fdf8f18b780f1e5b09bf0a52dcf77fe6126a Mon Sep 17 00:00:00 2001 From: Hamza Sharif Date: Mon, 12 Jul 2021 05:50:13 -0400 Subject: [PATCH 028/647] Fix typo (#1486) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f74d258fa..eefcc47ce 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ npm install @elastic/elasticsearch NOTE: The minimum supported version of Node.js is `v10`. -The client versioning follows the Elastc Stack versioning, this means that +The client versioning follows the Elastic Stack versioning, this means that major, minor, and patch releases are done following a precise schedule that often does not coincide with the [Node.js release](https://nodejs.org/en/about/releases/) times. From f2d8cf7d34ebc149c9119d13c36a79420d454c8d Mon Sep 17 00:00:00 2001 From: Joshua Hull Date: Mon, 12 Jul 2021 07:04:29 -0700 Subject: [PATCH 029/647] Bulk update improvements (#1428) --- lib/Helpers.d.ts | 2 + lib/Helpers.js | 9 ++++ test/unit/helpers/bulk.test.js | 96 ++++++++++++++++++++++++++++++++++ 3 files changed, 107 insertions(+) diff --git a/lib/Helpers.d.ts b/lib/Helpers.d.ts index 66d7ee427..a416842c7 100644 --- a/lib/Helpers.d.ts +++ b/lib/Helpers.d.ts @@ -36,6 +36,7 @@ export interface ScrollSearchResponse extends Promise { abort: () => BulkHelper + readonly stats: BulkStats } export interface BulkStats { @@ -43,6 +44,7 @@ export interface BulkStats { failed: number retry: number successful: number + noop: number time: number bytes: number aborted: boolean diff --git a/lib/Helpers.js b/lib/Helpers.js index 184e256b0..cd78c392d 100644 --- a/lib/Helpers.js +++ b/lib/Helpers.js @@ -456,6 +456,7 @@ class Helpers { failed: 0, retry: 0, successful: 0, + noop: 0, time: 0, bytes: 0, aborted: false @@ -463,6 +464,9 @@ class Helpers { const p = iterate() const helper = { + get stats () { + return stats + }, then (onFulfilled, onRejected) { return p.then(onFulfilled, onRejected) }, @@ -692,6 +696,11 @@ class Helpers { if (err) return callback(err, null) if (body.errors === false) { stats.successful += body.items.length + for (const item of body.items) { + if (item.update && item.update.result === 'noop') { + stats.noop++ + } + } return callback(null, []) } const retry = [] diff --git a/test/unit/helpers/bulk.test.js b/test/unit/helpers/bulk.test.js index a2059fb73..5fda2856f 100644 --- a/test/unit/helpers/bulk.test.js +++ b/test/unit/helpers/bulk.test.js @@ -913,6 +913,55 @@ test('bulk update', t => { }) }) + t.test('Should track the number of noop results', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.strictEqual(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + const [action, payload] = params.body.split('\n') + t.deepEqual(JSON.parse(action), { update: { _index: 'test', _id: count } }) + t.deepEqual(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) + return { body: { errors: false, items: [{ update: { result: 'noop' } }] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return [{ + update: { + _index: 'test', + _id: id++ + } + }, { + doc_as_upsert: true + }] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + noop: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + t.end() }) @@ -1263,5 +1312,52 @@ test('Flush interval', t => { }) }) + t.test('Operation stats', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.strictEqual(params.path, '/_bulk') + t.match(params.headers, { + 'content-type': 'application/x-ndjson', + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=bp` + }) + const [action, payload] = params.body.split('\n') + t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) + t.deepEqual(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const b = client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + const result = await b + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, b.stats) + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + t.end() }) From 101a13f63efa303a2ce1310bcdb98e4372e377b8 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 13 Jul 2021 09:39:10 +0200 Subject: [PATCH 030/647] Add support for bearer auth (#1488) --- docs/basic-config.asciidoc | 9 ++++++++- docs/connecting.asciidoc | 20 ++++++++++++++++++++ index.d.ts | 5 +++-- lib/Connection.js | 2 ++ lib/pool/index.d.ts | 5 +++++ test/unit/client.test.js | 27 +++++++++++++++++++++++++++ 6 files changed, 65 insertions(+), 3 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index 38ed7c1d8..04e95ca9e 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -67,6 +67,13 @@ auth: { apiKey: 'base64EncodedKey' } ---- +Bearer authentication, useful for https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html[service account tokens]. Be aware that it does not handle automatic token refresh: +[source,js] +---- +auth: { + bearer: 'token' +} +---- |`maxRetries` @@ -248,4 +255,4 @@ const client = new Client({ |`boolean`, `'proto'`, `'constructor'` - By the default the client will protect you against prototype poisoning attacks. Read https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08[this article] to learn more. If needed you can disable prototype poisoning protection entirely or one of the two checks. Read the `secure-json-parse` https://github.com/fastify/secure-json-parse[documentation] to learn more. + _Default:_ `false` -|=== \ No newline at end of file +|=== diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 98840159b..a036478d0 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -93,6 +93,26 @@ const client = new Client({ }) ---- +[discrete] +[[auth-bearer]] +==== Bearer authentication + +You can provide your credentials by passing the `bearer` token +parameter via the `auth` option. +Useful for https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html[service account tokens]. +Be aware that it does not handle automatic token refresh. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + bearer: 'token' + } +}) +---- + [discrete] [[auth-basic]] diff --git a/index.d.ts b/index.d.ts index 38f2aca8f..f20ea08c9 100644 --- a/index.d.ts +++ b/index.d.ts @@ -43,7 +43,8 @@ import { CloudConnectionPool, ResurrectEvent, BasicAuth, - ApiKeyAuth + ApiKeyAuth, + BearerAuth } from './lib/pool'; import Serializer from './lib/Serializer'; import Helpers from './lib/Helpers'; @@ -106,7 +107,7 @@ interface ClientOptions { opaqueIdPrefix?: string; generateRequestId?: generateRequestIdFn; name?: string | symbol; - auth?: BasicAuth | ApiKeyAuth; + auth?: BasicAuth | ApiKeyAuth | BearerAuth; context?: Context; proxy?: string | URL; enableMetaHeader?: boolean; diff --git a/lib/Connection.js b/lib/Connection.js index 20e08f708..6eda7c539 100644 --- a/lib/Connection.js +++ b/lib/Connection.js @@ -331,6 +331,8 @@ function prepareHeaders (headers = {}, auth) { } else { headers.authorization = `ApiKey ${auth.apiKey}` } + } else if (auth.bearer) { + headers.authorization = `Bearer ${auth.bearer}` } else if (auth.username && auth.password) { headers.authorization = 'Basic ' + Buffer.from(`${auth.username}:${auth.password}`).toString('base64') } diff --git a/lib/pool/index.d.ts b/lib/pool/index.d.ts index 246f88d2b..c1ebbdad6 100644 --- a/lib/pool/index.d.ts +++ b/lib/pool/index.d.ts @@ -61,6 +61,10 @@ interface BasicAuth { password: string; } +interface BearerAuth { + bearer: string +} + interface resurrectOptions { now?: number; requestId: string; @@ -204,6 +208,7 @@ export { getConnectionOptions, ApiKeyAuth, BasicAuth, + BearerAuth, internals, resurrectOptions, ResurrectEvent, diff --git a/test/unit/client.test.js b/test/unit/client.test.js index 9c2c551ba..95ae7cc12 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -1421,3 +1421,30 @@ test('Disable prototype poisoning protection', t => { t.error(err) }) }) + +test('Bearer auth', t => { + t.plan(3) + + function handler (req, res) { + t.match(req.headers, { + authorization: 'Bearer Zm9vOmJhcg==' + }) + res.setHeader('Content-Type', 'application/json;utf=8') + res.end(JSON.stringify({ hello: 'world' })) + } + + buildServer(handler, ({ port }, server) => { + const client = new Client({ + node: `http://localhost:${port}`, + auth: { + bearer: 'Zm9vOmJhcg==' + } + }) + + client.info((err, { body }) => { + t.error(err) + t.same(body, { hello: 'world' }) + server.stop() + }) + }) +}) From 4073d60b9759b42338724556dd732fbe6e0bd3d0 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 13 Jul 2021 09:47:45 +0200 Subject: [PATCH 031/647] Add api compatibility header support (#1478) --- docs/advanced-config.asciidoc | 16 ++++++++++- index.js | 4 +++ lib/Transport.js | 9 ++++--- test/unit/client.test.js | 50 +++++++++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/docs/advanced-config.asciidoc b/docs/advanced-config.asciidoc index e5a64ea1f..1308b806a 100644 --- a/docs/advanced-config.asciidoc +++ b/docs/advanced-config.asciidoc @@ -83,4 +83,18 @@ class MySerializer extends Serializer { const client = new Client({ Serializer: MySerializer }) ----- \ No newline at end of file +---- + +[discrete] +==== Migrate to v8 + +The Node.js client can be configured to emit an HTTP header +``Accept: application/vnd.elasticsearch+json; compatible-with=7`` +which signals to Elasticsearch that the client is requesting +``7.x`` version of request and response bodies. This allows for +upgrading from 7.x to 8.x version of Elasticsearch without upgrading +everything at once. Elasticsearch should be upgraded first after +the compatibility header is configured and clients should be upgraded +second. +To enable to setting, configure the environment variable +``ELASTIC_CLIENT_APIVERSIONING`` to ``true``. diff --git a/index.js b/index.js index e36e6bee4..fc9e62ff3 100644 --- a/index.js +++ b/index.js @@ -116,6 +116,10 @@ class Client extends ESAPI { disablePrototypePoisoningProtection: false }, opts) + if (process.env.ELASTIC_CLIENT_APIVERSIONING === 'true') { + options.headers = Object.assign({ accept: 'application/vnd.elasticsearch+json; compatible-with=7' }, options.headers) + } + this[kInitialOptions] = options this[kExtensions] = [] this.name = options.name diff --git a/lib/Transport.js b/lib/Transport.js index dae838d1b..e99c1ccc9 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -38,6 +38,7 @@ const clientVersion = require('../package.json').version const userAgent = `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${process.version})` const MAX_BUFFER_LENGTH = buffer.constants.MAX_LENGTH const MAX_STRING_LENGTH = buffer.constants.MAX_STRING_LENGTH +const kApiVersioning = Symbol('api versioning') class Transport { constructor (opts) { @@ -64,6 +65,7 @@ class Transport { this.generateRequestId = opts.generateRequestId || generateRequestId() this.name = opts.name this.opaqueIdPrefix = opts.opaqueIdPrefix + this[kApiVersioning] = process.env.ELASTIC_CLIENT_APIVERSIONING === 'true' this.nodeFilter = opts.nodeFilter || defaultNodeFilter if (typeof opts.nodeSelector === 'function') { @@ -295,7 +297,8 @@ class Transport { // - the request is not a HEAD request // - the payload is not an empty string if (result.headers['content-type'] !== undefined && - result.headers['content-type'].indexOf('application/json') > -1 && + (result.headers['content-type'].indexOf('application/json') > -1 || + result.headers['content-type'].indexOf('application/vnd.elasticsearch+json') > -1) && isHead === false && payload !== '' ) { @@ -369,7 +372,7 @@ class Transport { } if (params.body !== '') { - headers['content-type'] = headers['content-type'] || 'application/json' + headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+json; compatible-with=7' : 'application/json') } // handle ndjson body @@ -386,7 +389,7 @@ class Transport { params.body = params.bulkBody } if (params.body !== '') { - headers['content-type'] = headers['content-type'] || 'application/x-ndjson' + headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+x-ndjson; compatible-with=7' : 'application/x-ndjson') } } diff --git a/test/unit/client.test.js b/test/unit/client.test.js index 95ae7cc12..8c65cf21d 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -1422,6 +1422,56 @@ test('Disable prototype poisoning protection', t => { }) }) +test('API compatibility header (json)', t => { + t.plan(4) + + function handler (req, res) { + t.equal(req.headers.accept, 'application/vnd.elasticsearch+json; compatible-with=7') + t.equal(req.headers['content-type'], 'application/vnd.elasticsearch+json; compatible-with=7') + res.setHeader('Content-Type', 'application/vnd.elasticsearch+json; compatible-with=7') + res.end(JSON.stringify({ hello: 'world' })) + } + + buildServer(handler, ({ port }, server) => { + process.env.ELASTIC_CLIENT_APIVERSIONING = 'true' + const client = new Client({ + node: `http://localhost:${port}` + }) + + client.index({ index: 'foo', body: {} }, (err, { body }) => { + t.error(err) + t.same(body, { hello: 'world' }) + server.stop() + delete process.env.ELASTIC_CLIENT_APIVERSIONING + }) + }) +}) + +test('API compatibility header (x-ndjson)', t => { + t.plan(4) + + function handler (req, res) { + t.equal(req.headers.accept, 'application/vnd.elasticsearch+json; compatible-with=7') + t.equal(req.headers['content-type'], 'application/vnd.elasticsearch+x-ndjson; compatible-with=7') + res.setHeader('Content-Type', 'application/vnd.elasticsearch+json; compatible-with=7') + res.end(JSON.stringify({ hello: 'world' })) + } + + buildServer(handler, ({ port }, server) => { + process.env.ELASTIC_CLIENT_APIVERSIONING = 'true' + const client = new Client({ + node: `http://localhost:${port}` + }) + + client.bulk({ index: 'foo', body: [{}, {}] }, (err, { body }) => { + t.error(err) + t.same(body, { hello: 'world' }) + server.stop() + delete process.env.ELASTIC_CLIENT_APIVERSIONING + }) + }) +}) + test('Bearer auth', t => { t.plan(3) From cab8d8be38ca8c7826c57764fecb77668a3f102a Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 15 Jul 2021 18:01:43 +0200 Subject: [PATCH 032/647] API generation --- api/api/ilm.js | 26 ++- api/api/indices.js | 60 +++++- api/api/ml.js | 112 ++++++++++ api/api/security.js | 194 +++++++++++++++++- api/api/snapshot.js | 32 ++- api/api/sql.js | 90 +++++++- api/requestParams.d.ts | 104 ++++++++++ docs/reference.asciidoc | 444 +++++++++++++++++++++++++++++++++++++++- index.d.ts | 144 +++++++++++++ 9 files changed, 1192 insertions(+), 14 deletions(-) diff --git a/api/api/ilm.js b/api/api/ilm.js index 9ded145c4..b108bbf91 100644 --- a/api/api/ilm.js +++ b/api/api/ilm.js @@ -23,8 +23,8 @@ /* eslint no-unused-vars: 0 */ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'only_managed', 'only_errors'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', onlyManaged: 'only_managed', onlyErrors: 'only_errors' } +const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'only_managed', 'only_errors', 'dry_run'] +const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', onlyManaged: 'only_managed', onlyErrors: 'only_errors', dryRun: 'dry_run' } function IlmApi (transport, ConfigurationError) { this.transport = transport @@ -132,6 +132,27 @@ IlmApi.prototype.getStatus = function ilmGetStatusApi (params, options, callback return this.transport.request(request, options, callback) } +IlmApi.prototype.migrateToDataTiers = function ilmMigrateToDataTiersApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_ilm' + '/' + 'migrate_to_data_tiers' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + IlmApi.prototype.moveToStep = function ilmMoveToStepApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -287,6 +308,7 @@ Object.defineProperties(IlmApi.prototype, { explain_lifecycle: { get () { return this.explainLifecycle } }, get_lifecycle: { get () { return this.getLifecycle } }, get_status: { get () { return this.getStatus } }, + migrate_to_data_tiers: { get () { return this.migrateToDataTiers } }, move_to_step: { get () { return this.moveToStep } }, put_lifecycle: { get () { return this.putLifecycle } }, remove_policy: { get () { return this.removePolicy } } diff --git a/api/api/indices.js b/api/api/indices.js index 76e9ed726..f1fbdbc9a 100644 --- a/api/api/indices.js +++ b/api/api/indices.js @@ -23,8 +23,8 @@ /* eslint no-unused-vars: 0 */ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['timeout', 'master_timeout', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'index', 'fielddata', 'fields', 'query', 'request', 'wait_for_active_shards', 'local', 'flat_settings', 'include_defaults', 'force', 'wait_if_ongoing', 'flush', 'max_num_segments', 'only_expunge_deletes', 'create', 'cause', 'write_index_only', 'preserve_existing', 'order', 'detailed', 'active_only', 'dry_run', 'verbose', 'status', 'completion_fields', 'fielddata_fields', 'groups', 'level', 'types', 'include_segment_file_sizes', 'include_unloaded_segments', 'forbid_closed_indices', 'explain', 'q', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'lenient', 'rewrite', 'all_shards'] -const snakeCase = { masterTimeout: 'master_timeout', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', errorTrace: 'error_trace', filterPath: 'filter_path', waitForActiveShards: 'wait_for_active_shards', flatSettings: 'flat_settings', includeDefaults: 'include_defaults', waitIfOngoing: 'wait_if_ongoing', maxNumSegments: 'max_num_segments', onlyExpungeDeletes: 'only_expunge_deletes', writeIndexOnly: 'write_index_only', preserveExisting: 'preserve_existing', activeOnly: 'active_only', dryRun: 'dry_run', completionFields: 'completion_fields', fielddataFields: 'fielddata_fields', includeSegmentFileSizes: 'include_segment_file_sizes', includeUnloadedSegments: 'include_unloaded_segments', forbidClosedIndices: 'forbid_closed_indices', analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', allShards: 'all_shards' } +const acceptedQuerystring = ['timeout', 'master_timeout', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'index', 'fielddata', 'fields', 'query', 'request', 'wait_for_active_shards', 'run_expensive_tasks', 'flush', 'local', 'flat_settings', 'include_defaults', 'force', 'wait_if_ongoing', 'max_num_segments', 'only_expunge_deletes', 'create', 'cause', 'write_index_only', 'preserve_existing', 'order', 'detailed', 'active_only', 'dry_run', 'verbose', 'status', 'completion_fields', 'fielddata_fields', 'groups', 'level', 'types', 'include_segment_file_sizes', 'include_unloaded_segments', 'forbid_closed_indices', 'explain', 'q', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'lenient', 'rewrite', 'all_shards'] +const snakeCase = { masterTimeout: 'master_timeout', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', errorTrace: 'error_trace', filterPath: 'filter_path', waitForActiveShards: 'wait_for_active_shards', runExpensiveTasks: 'run_expensive_tasks', flatSettings: 'flat_settings', includeDefaults: 'include_defaults', waitIfOngoing: 'wait_if_ongoing', maxNumSegments: 'max_num_segments', onlyExpungeDeletes: 'only_expunge_deletes', writeIndexOnly: 'write_index_only', preserveExisting: 'preserve_existing', activeOnly: 'active_only', dryRun: 'dry_run', completionFields: 'completion_fields', fielddataFields: 'fielddata_fields', includeSegmentFileSizes: 'include_segment_file_sizes', includeUnloadedSegments: 'include_unloaded_segments', forbidClosedIndices: 'forbid_closed_indices', analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', allShards: 'all_shards' } function IndicesApi (transport, ConfigurationError) { this.transport = transport @@ -414,6 +414,33 @@ IndicesApi.prototype.deleteTemplate = function indicesDeleteTemplateApi (params, return this.transport.request(request, options, callback) } +IndicesApi.prototype.diskUsage = function indicesDiskUsageApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.index == null) { + const err = new this[kConfigurationError]('Missing required parameter: index') + return handleError(err, callback) + } + + let { method, body, index, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + encodeURIComponent(index) + '/' + '_disk_usage' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + IndicesApi.prototype.exists = function indicesExistsApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -564,6 +591,33 @@ IndicesApi.prototype.existsType = function indicesExistsTypeApi (params, options return this.transport.request(request, options, callback) } +IndicesApi.prototype.fieldUsageStats = function indicesFieldUsageStatsApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.index == null) { + const err = new this[kConfigurationError]('Missing required parameter: index') + return handleError(err, callback) + } + + let { method, body, index, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + encodeURIComponent(index) + '/' + '_field_usage_stats' + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + IndicesApi.prototype.flush = function indicesFlushApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -1571,10 +1625,12 @@ Object.defineProperties(IndicesApi.prototype, { delete_data_stream: { get () { return this.deleteDataStream } }, delete_index_template: { get () { return this.deleteIndexTemplate } }, delete_template: { get () { return this.deleteTemplate } }, + disk_usage: { get () { return this.diskUsage } }, exists_alias: { get () { return this.existsAlias } }, exists_index_template: { get () { return this.existsIndexTemplate } }, exists_template: { get () { return this.existsTemplate } }, exists_type: { get () { return this.existsType } }, + field_usage_stats: { get () { return this.fieldUsageStats } }, get_alias: { get () { return this.getAlias } }, get_data_stream: { get () { return this.getDataStream } }, get_field_mapping: { get () { return this.getFieldMapping } }, diff --git a/api/api/ml.js b/api/api/ml.js index 3ce838ae7..545403d3b 100644 --- a/api/api/ml.js +++ b/api/api/ml.js @@ -1048,6 +1048,33 @@ MlApi.prototype.getTrainedModelsStats = function mlGetTrainedModelsStatsApi (par return this.transport.request(request, options, callback) } +MlApi.prototype.inferTrainedModelDeployment = function mlInferTrainedModelDeploymentApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.model_id == null && params.modelId == null) { + const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') + return handleError(err, callback) + } + + let { method, body, modelId, model_id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'deployment' + '/' + '_infer' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + MlApi.prototype.info = function mlInfoApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -1466,6 +1493,33 @@ MlApi.prototype.putTrainedModelAlias = function mlPutTrainedModelAliasApi (param return this.transport.request(request, options, callback) } +MlApi.prototype.resetJob = function mlResetJobApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.job_id == null && params.jobId == null) { + const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') + return handleError(err, callback) + } + + let { method, body, jobId, job_id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_reset' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + MlApi.prototype.revertModelSnapshot = function mlRevertModelSnapshotApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -1578,6 +1632,33 @@ MlApi.prototype.startDatafeed = function mlStartDatafeedApi (params, options, ca return this.transport.request(request, options, callback) } +MlApi.prototype.startTrainedModelDeployment = function mlStartTrainedModelDeploymentApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.model_id == null && params.modelId == null) { + const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') + return handleError(err, callback) + } + + let { method, body, modelId, model_id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'deployment' + '/' + '_start' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + MlApi.prototype.stopDataFrameAnalytics = function mlStopDataFrameAnalyticsApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -1632,6 +1713,33 @@ MlApi.prototype.stopDatafeed = function mlStopDatafeedApi (params, options, call return this.transport.request(request, options, callback) } +MlApi.prototype.stopTrainedModelDeployment = function mlStopTrainedModelDeploymentApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.model_id == null && params.modelId == null) { + const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') + return handleError(err, callback) + } + + let { method, body, modelId, model_id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'deployment' + '/' + '_stop' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + MlApi.prototype.updateDataFrameAnalytics = function mlUpdateDataFrameAnalyticsApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -1923,6 +2031,7 @@ Object.defineProperties(MlApi.prototype, { get_records: { get () { return this.getRecords } }, get_trained_models: { get () { return this.getTrainedModels } }, get_trained_models_stats: { get () { return this.getTrainedModelsStats } }, + infer_trained_model_deployment: { get () { return this.inferTrainedModelDeployment } }, open_job: { get () { return this.openJob } }, post_calendar_events: { get () { return this.postCalendarEvents } }, post_data: { get () { return this.postData } }, @@ -1936,12 +2045,15 @@ Object.defineProperties(MlApi.prototype, { put_job: { get () { return this.putJob } }, put_trained_model: { get () { return this.putTrainedModel } }, put_trained_model_alias: { get () { return this.putTrainedModelAlias } }, + reset_job: { get () { return this.resetJob } }, revert_model_snapshot: { get () { return this.revertModelSnapshot } }, set_upgrade_mode: { get () { return this.setUpgradeMode } }, start_data_frame_analytics: { get () { return this.startDataFrameAnalytics } }, start_datafeed: { get () { return this.startDatafeed } }, + start_trained_model_deployment: { get () { return this.startTrainedModelDeployment } }, stop_data_frame_analytics: { get () { return this.stopDataFrameAnalytics } }, stop_datafeed: { get () { return this.stopDatafeed } }, + stop_trained_model_deployment: { get () { return this.stopTrainedModelDeployment } }, update_data_frame_analytics: { get () { return this.updateDataFrameAnalytics } }, update_datafeed: { get () { return this.updateDatafeed } }, update_filter: { get () { return this.updateFilter } }, diff --git a/api/api/security.js b/api/api/security.js index 1a0f41df4..f9269b2bd 100644 --- a/api/api/security.js +++ b/api/api/security.js @@ -524,6 +524,27 @@ SecurityApi.prototype.enableUser = function securityEnableUserApi (params, optio return this.transport.request(request, options, callback) } +SecurityApi.prototype.enrollKibana = function securityEnrollKibanaApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + '_security' + '/' + 'enroll' + '/' + 'kibana' + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + SecurityApi.prototype.enrollNode = function securityEnrollNodeApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -532,7 +553,7 @@ SecurityApi.prototype.enrollNode = function securityEnrollNodeApi (params, optio let path = '' if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'enroll_node' + path = '/' + '_security' + '/' + 'enroll' + '/' + 'node' // build request object const request = { @@ -1053,6 +1074,168 @@ SecurityApi.prototype.putUser = function securityPutUserApi (params, options, ca return this.transport.request(request, options, callback) } +SecurityApi.prototype.samlAuthenticate = function securitySamlAuthenticateApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.body == null) { + const err = new this[kConfigurationError]('Missing required parameter: body') + return handleError(err, callback) + } + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_security' + '/' + 'saml' + '/' + 'authenticate' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +SecurityApi.prototype.samlCompleteLogout = function securitySamlCompleteLogoutApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.body == null) { + const err = new this[kConfigurationError]('Missing required parameter: body') + return handleError(err, callback) + } + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_security' + '/' + 'saml' + '/' + 'complete_logout' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +SecurityApi.prototype.samlInvalidate = function securitySamlInvalidateApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.body == null) { + const err = new this[kConfigurationError]('Missing required parameter: body') + return handleError(err, callback) + } + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_security' + '/' + 'saml' + '/' + 'invalidate' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +SecurityApi.prototype.samlLogout = function securitySamlLogoutApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.body == null) { + const err = new this[kConfigurationError]('Missing required parameter: body') + return handleError(err, callback) + } + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_security' + '/' + 'saml' + '/' + 'logout' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +SecurityApi.prototype.samlPrepareAuthentication = function securitySamlPrepareAuthenticationApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.body == null) { + const err = new this[kConfigurationError]('Missing required parameter: body') + return handleError(err, callback) + } + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_security' + '/' + 'saml' + '/' + 'prepare' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +SecurityApi.prototype.samlServiceProviderMetadata = function securitySamlServiceProviderMetadataApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.realm_name == null && params.realmName == null) { + const err = new this[kConfigurationError]('Missing required parameter: realm_name or realmName') + return handleError(err, callback) + } + + let { method, body, realmName, realm_name, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + '_security' + '/' + 'saml' + '/' + 'metadata' + '/' + encodeURIComponent(realm_name || realmName) + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + Object.defineProperties(SecurityApi.prototype, { change_password: { get () { return this.changePassword } }, clear_api_key_cache: { get () { return this.clearApiKeyCache } }, @@ -1069,6 +1252,7 @@ Object.defineProperties(SecurityApi.prototype, { delete_user: { get () { return this.deleteUser } }, disable_user: { get () { return this.disableUser } }, enable_user: { get () { return this.enableUser } }, + enroll_kibana: { get () { return this.enrollKibana } }, enroll_node: { get () { return this.enrollNode } }, get_api_key: { get () { return this.getApiKey } }, get_builtin_privileges: { get () { return this.getBuiltinPrivileges } }, @@ -1087,7 +1271,13 @@ Object.defineProperties(SecurityApi.prototype, { put_privileges: { get () { return this.putPrivileges } }, put_role: { get () { return this.putRole } }, put_role_mapping: { get () { return this.putRoleMapping } }, - put_user: { get () { return this.putUser } } + put_user: { get () { return this.putUser } }, + saml_authenticate: { get () { return this.samlAuthenticate } }, + saml_complete_logout: { get () { return this.samlCompleteLogout } }, + saml_invalidate: { get () { return this.samlInvalidate } }, + saml_logout: { get () { return this.samlLogout } }, + saml_prepare_authentication: { get () { return this.samlPrepareAuthentication } }, + saml_service_provider_metadata: { get () { return this.samlServiceProviderMetadata } } }) module.exports = SecurityApi diff --git a/api/api/snapshot.js b/api/api/snapshot.js index 911d13f2d..3028779fd 100644 --- a/api/api/snapshot.js +++ b/api/api/snapshot.js @@ -23,8 +23,8 @@ /* eslint no-unused-vars: 0 */ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['master_timeout', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion', 'verify', 'ignore_unavailable', 'index_details', 'verbose', 'local'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletion: 'wait_for_completion', ignoreUnavailable: 'ignore_unavailable', indexDetails: 'index_details' } +const acceptedQuerystring = ['master_timeout', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion', 'verify', 'ignore_unavailable', 'index_details', 'include_repository', 'verbose', 'local', 'blob_count', 'concurrency', 'read_node_count', 'early_read_node_count', 'seed', 'rare_action_probability', 'max_blob_size', 'max_total_data_size', 'detailed', 'rarely_abort_writes'] +const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletion: 'wait_for_completion', ignoreUnavailable: 'ignore_unavailable', indexDetails: 'index_details', includeRepository: 'include_repository', blobCount: 'blob_count', readNodeCount: 'read_node_count', earlyReadNodeCount: 'early_read_node_count', rareActionProbability: 'rare_action_probability', maxBlobSize: 'max_blob_size', maxTotalDataSize: 'max_total_data_size', rarelyAbortWrites: 'rarely_abort_writes' } function SnapshotApi (transport, ConfigurationError) { this.transport = transport @@ -301,6 +301,33 @@ SnapshotApi.prototype.getRepository = function snapshotGetRepositoryApi (params, return this.transport.request(request, options, callback) } +SnapshotApi.prototype.repositoryAnalyze = function snapshotRepositoryAnalyzeApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.repository == null) { + const err = new this[kConfigurationError]('Missing required parameter: repository') + return handleError(err, callback) + } + + let { method, body, repository, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'POST' + path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_analyze' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + SnapshotApi.prototype.restore = function snapshotRestoreApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -405,6 +432,7 @@ Object.defineProperties(SnapshotApi.prototype, { create_repository: { get () { return this.createRepository } }, delete_repository: { get () { return this.deleteRepository } }, get_repository: { get () { return this.getRepository } }, + repository_analyze: { get () { return this.repositoryAnalyze } }, verify_repository: { get () { return this.verifyRepository } } }) diff --git a/api/api/sql.js b/api/api/sql.js index 543a8011a..be7c56938 100644 --- a/api/api/sql.js +++ b/api/api/sql.js @@ -23,8 +23,8 @@ /* eslint no-unused-vars: 0 */ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'format'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } +const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'delimiter', 'format', 'keep_alive', 'wait_for_completion_timeout'] +const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', keepAlive: 'keep_alive', waitForCompletionTimeout: 'wait_for_completion_timeout' } function SqlApi (transport, ConfigurationError) { this.transport = transport @@ -58,6 +58,87 @@ SqlApi.prototype.clearCursor = function sqlClearCursorApi (params, options, call return this.transport.request(request, options, callback) } +SqlApi.prototype.deleteAsync = function sqlDeleteAsyncApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.id == null) { + const err = new this[kConfigurationError]('Missing required parameter: id') + return handleError(err, callback) + } + + let { method, body, id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'DELETE' + path = '/' + '_sql' + '/' + 'async' + '/' + 'delete' + '/' + encodeURIComponent(id) + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +SqlApi.prototype.getAsync = function sqlGetAsyncApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.id == null) { + const err = new this[kConfigurationError]('Missing required parameter: id') + return handleError(err, callback) + } + + let { method, body, id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + '_sql' + '/' + 'async' + '/' + encodeURIComponent(id) + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + +SqlApi.prototype.getAsyncStatus = function sqlGetAsyncStatusApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.id == null) { + const err = new this[kConfigurationError]('Missing required parameter: id') + return handleError(err, callback) + } + + let { method, body, id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + '_sql' + '/' + 'async' + '/' + 'status' + '/' + encodeURIComponent(id) + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + SqlApi.prototype.query = function sqlQueryApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -113,7 +194,10 @@ SqlApi.prototype.translate = function sqlTranslateApi (params, options, callback } Object.defineProperties(SqlApi.prototype, { - clear_cursor: { get () { return this.clearCursor } } + clear_cursor: { get () { return this.clearCursor } }, + delete_async: { get () { return this.deleteAsync } }, + get_async: { get () { return this.getAsync } }, + get_async_status: { get () { return this.getAsyncStatus } } }) module.exports = SqlApi diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index a917765fa..13c4cdca3 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -886,6 +886,11 @@ export interface IlmGetLifecycle extends Generic { export interface IlmGetStatus extends Generic { } +export interface IlmMigrateToDataTiers extends Generic { + dry_run?: boolean; + body?: T; +} + export interface IlmMoveToStep extends Generic { index: string; body?: T; @@ -1021,6 +1026,15 @@ export interface IndicesDeleteTemplate extends Generic { master_timeout?: string; } +export interface IndicesDiskUsage extends Generic { + index: string; + run_expensive_tasks?: boolean; + flush?: boolean; + ignore_unavailable?: boolean; + allow_no_indices?: boolean; + expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; +} + export interface IndicesExists extends Generic { index: string | string[]; local?: boolean; @@ -1063,6 +1077,14 @@ export interface IndicesExistsType extends Generic { local?: boolean; } +export interface IndicesFieldUsageStats extends Generic { + index: string; + fields?: string | string[]; + ignore_unavailable?: boolean; + allow_no_indices?: boolean; + expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; +} + export interface IndicesFlush extends Generic { index?: string | string[]; force?: boolean; @@ -1715,6 +1737,11 @@ export interface MlGetTrainedModelsStats extends Generic { size?: number; } +export interface MlInferTrainedModelDeployment extends Generic { + model_id: string; + timeout?: string; +} + export interface MlInfo extends Generic { } @@ -1775,6 +1802,10 @@ export interface MlPutFilter extends Generic { export interface MlPutJob extends Generic { job_id: string; + ignore_unavailable?: boolean; + allow_no_indices?: boolean; + ignore_throttled?: boolean; + expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; body: T; } @@ -1789,6 +1820,11 @@ export interface MlPutTrainedModelAlias extends Generic { reassign?: boolean; } +export interface MlResetJob extends Generic { + job_id: string; + wait_for_completion?: boolean; +} + export interface MlRevertModelSnapshot extends Generic { job_id: string; snapshot_id: string; @@ -1815,6 +1851,11 @@ export interface MlStartDatafeed extends Generic { body?: T; } +export interface MlStartTrainedModelDeployment extends Generic { + model_id: string; + timeout?: string; +} + export interface MlStopDataFrameAnalytics extends Generic { id: string; allow_no_match?: boolean; @@ -1832,6 +1873,10 @@ export interface MlStopDatafeed extends Generic { body?: T; } +export interface MlStopTrainedModelDeployment extends Generic { + model_id: string; +} + export interface MlUpdateDataFrameAnalytics extends Generic { id: string; body: T; @@ -2260,6 +2305,9 @@ export interface SecurityEnableUser extends Generic { refresh?: 'wait_for' | boolean; } +export interface SecurityEnrollKibana extends Generic { +} + export interface SecurityEnrollNode extends Generic { } @@ -2349,6 +2397,30 @@ export interface SecurityPutUser extends Generic { body: T; } +export interface SecuritySamlAuthenticate extends Generic { + body: T; +} + +export interface SecuritySamlCompleteLogout extends Generic { + body: T; +} + +export interface SecuritySamlInvalidate extends Generic { + body: T; +} + +export interface SecuritySamlLogout extends Generic { + body: T; +} + +export interface SecuritySamlPrepareAuthentication extends Generic { + body: T; +} + +export interface SecuritySamlServiceProviderMetadata extends Generic { + realm_name: string; +} + export interface ShutdownDeleteNode extends Generic { node_id: string; } @@ -2442,6 +2514,7 @@ export interface SnapshotGet extends Generic { master_timeout?: string; ignore_unavailable?: boolean; index_details?: boolean; + include_repository?: boolean; verbose?: boolean; } @@ -2451,6 +2524,21 @@ export interface SnapshotGetRepository extends Generic { local?: boolean; } +export interface SnapshotRepositoryAnalyze extends Generic { + repository: string; + blob_count?: number; + concurrency?: number; + read_node_count?: number; + early_read_node_count?: number; + seed?: number; + rare_action_probability?: number; + max_blob_size?: string; + max_total_data_size?: string; + timeout?: string; + detailed?: boolean; + rarely_abort_writes?: boolean; +} + export interface SnapshotRestore extends Generic { repository: string; snapshot: string; @@ -2476,6 +2564,22 @@ export interface SqlClearCursor extends Generic { body: T; } +export interface SqlDeleteAsync extends Generic { + id: string; +} + +export interface SqlGetAsync extends Generic { + id: string; + delimiter?: string; + format?: string; + keep_alive?: string; + wait_for_completion_timeout?: string; +} + +export interface SqlGetAsyncStatus extends Generic { + id: string; +} + export interface SqlQuery extends Generic { format?: string; body: T; diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 368524de6..ef81efd20 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -3586,6 +3586,27 @@ client.ilm.getStatus() link:{ref}/ilm-get-status.html[Documentation] + +[discrete] +=== ilm.migrateToDataTiers + +[source,ts] +---- +client.ilm.migrateToDataTiers({ + dry_run: boolean, + body: object +}) +---- +link:{ref}/ilm-migrate-to-data-tiers.html[Documentation] + +[cols=2*] +|=== +|`dry_run` or `dryRun` +|`boolean` - If set to true it will simulate the migration, providing a way to retrieve the ILM policies and indices that need to be migrated. The default is false + +|`body` +|`object` - Optionally specify a legacy index template name to delete and optionally specify a node attribute name used for index shard routing (defaults to "data") + +|=== + [discrete] === ilm.moveToStep @@ -4145,6 +4166,44 @@ link:{ref}/indices-templates.html[Documentation] + |=== +[discrete] +=== indices.diskUsage +*Stability:* experimental +[source,ts] +---- +client.indices.diskUsage({ + index: string, + run_expensive_tasks: boolean, + flush: boolean, + ignore_unavailable: boolean, + allow_no_indices: boolean, + expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' +}) +---- +link:{ref}/indices-disk-usage.html[Documentation] + +[cols=2*] +|=== +|`index` +|`string` - Comma-separated list of indices or data streams to analyze the disk usage + +|`run_expensive_tasks` or `runExpensiveTasks` +|`boolean` - Must be set to [true] in order for the task to be performed. Defaults to false. + +|`flush` +|`boolean` - Whether flush or not before analyzing the index disk usage. Defaults to true + +|`ignore_unavailable` or `ignoreUnavailable` +|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) + +|`allow_no_indices` or `allowNoIndices` +|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + +|`expand_wildcards` or `expandWildcards` +|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + +_Default:_ `open` + +|=== + [discrete] === indices.exists @@ -4321,6 +4380,40 @@ _Default:_ `open` |=== +[discrete] +=== indices.fieldUsageStats +*Stability:* experimental +[source,ts] +---- +client.indices.fieldUsageStats({ + index: string, + fields: string | string[], + ignore_unavailable: boolean, + allow_no_indices: boolean, + expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' +}) +---- +link:{ref}/indices-field-usage-stats.html[Documentation] + +[cols=2*] +|=== +|`index` +|`string` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices + +|`fields` +|`string \| string[]` - A comma-separated list of fields to include in the stats if only a subset of fields should be returned (supports wildcards) + +|`ignore_unavailable` or `ignoreUnavailable` +|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) + +|`allow_no_indices` or `allowNoIndices` +|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + +|`expand_wildcards` or `expandWildcards` +|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + +_Default:_ `open` + +|=== + [discrete] === indices.flush @@ -7044,6 +7137,27 @@ _Default:_ `100` |=== +[discrete] +=== ml.inferTrainedModelDeployment +*Stability:* experimental +[source,ts] +---- +client.ml.inferTrainedModelDeployment({ + model_id: string, + timeout: string +}) +---- +link:{ref}/ml-infer-trained-model-deployment.html[Documentation] + +[cols=2*] +|=== +|`model_id` or `modelId` +|`string` - The ID of the model to perform inference on + +|`timeout` +|`string` - Controls the time to wait for the inference result + +|=== + [discrete] === ml.info @@ -7291,6 +7405,10 @@ link:{ref}/ml-put-filter.html[Documentation] + ---- client.ml.putJob({ job_id: string, + ignore_unavailable: boolean, + allow_no_indices: boolean, + ignore_throttled: boolean, + expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', body: object }) ---- @@ -7300,6 +7418,18 @@ link:{ref}/ml-put-job.html[Documentation] + |`job_id` or `jobId` |`string` - The ID of the job to create +|`ignore_unavailable` or `ignoreUnavailable` +|`boolean` - Ignore unavailable indexes (default: false). Only set if datafeed_config is provided. + +|`allow_no_indices` or `allowNoIndices` +|`boolean` - Ignore if the source indices expressions resolves to no concrete indices (default: true). Only set if datafeed_config is provided. + +|`ignore_throttled` or `ignoreThrottled` +|`boolean` - Ignore indices that are marked as throttled (default: true). Only set if datafeed_config is provided. + +|`expand_wildcards` or `expandWildcards` +|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether source index expressions should get expanded to open or closed indices (default: open). Only set if datafeed_config is provided. + |`body` |`object` - The job @@ -7351,6 +7481,28 @@ link:{ref}/put-trained-models-aliases.html[Documentation] + |=== +[discrete] +=== ml.resetJob + +[source,ts] +---- +client.ml.resetJob({ + job_id: string, + wait_for_completion: boolean +}) +---- +link:{ref}/ml-reset-job.html[Documentation] + +[cols=2*] +|=== +|`job_id` or `jobId` +|`string` - The ID of the job to reset + +|`wait_for_completion` or `waitForCompletion` +|`boolean` - Should this request wait until the operation has completed before returning + +_Default:_ `true` + +|=== + [discrete] === ml.revertModelSnapshot @@ -7459,6 +7611,27 @@ link:{ref}/ml-start-datafeed.html[Documentation] + |=== +[discrete] +=== ml.startTrainedModelDeployment +*Stability:* experimental +[source,ts] +---- +client.ml.startTrainedModelDeployment({ + model_id: string, + timeout: string +}) +---- +link:{ref}/ml-start-trained-model-deployment.html[Documentation] + +[cols=2*] +|=== +|`model_id` or `modelId` +|`string` - The ID of the model to deploy + +|`timeout` +|`string` - Controls the time to wait until the model is deployed + +|=== + [discrete] === ml.stopDataFrameAnalytics @@ -7531,6 +7704,23 @@ WARNING: This parameter has been deprecated. |=== +[discrete] +=== ml.stopTrainedModelDeployment +*Stability:* experimental +[source,ts] +---- +client.ml.stopTrainedModelDeployment({ + model_id: string +}) +---- +link:{ref}/stop-trained-model-deployment.html[Documentation] + +[cols=2*] +|=== +|`model_id` or `modelId` +|`string` - The ID of the model to undeploy + +|=== + [discrete] === ml.updateDataFrameAnalytics @@ -8307,7 +8497,7 @@ client.renderSearchTemplate({ body: object }) ---- -link:{ref}/search-template.html#_validating_templates[Documentation] + +link:{ref}/render-search-template-api.html[Documentation] + [cols=2*] |=== |`id` @@ -8409,7 +8599,7 @@ link:{ref}/rollup-put-job.html[Documentation] + [discrete] === rollup.rollup - +*Stability:* experimental [source,ts] ---- client.rollup.rollup({ @@ -8418,7 +8608,7 @@ client.rollup.rollup({ body: object }) ---- -link:{ref}/rollup-api.html[Documentation] + +link:{ref}/xpack-rollup.html[Documentation] + [cols=2*] |=== |`index` @@ -9323,6 +9513,16 @@ link:{ref}/security-api-enable-user.html[Documentation] + |=== +[discrete] +=== security.enrollKibana + +[source,ts] +---- +client.security.enrollKibana() +---- +link:{ref}/security-api-enroll-kibana.html[Documentation] + + + [discrete] === security.enrollNode @@ -9330,6 +9530,8 @@ link:{ref}/security-api-enable-user.html[Documentation] + ---- client.security.enrollNode() ---- +link:{ref}/security-api-node-enrollment.html[Documentation] + + [discrete] === security.getApiKey @@ -9687,6 +9889,108 @@ link:{ref}/security-api-put-user.html[Documentation] + |=== +[discrete] +=== security.samlAuthenticate + +[source,ts] +---- +client.security.samlAuthenticate({ + body: object +}) +---- +link:{ref}/security-api-saml-authenticate.html[Documentation] + +[cols=2*] +|=== +|`body` +|`object` - The SAML response to authenticate + +|=== + +[discrete] +=== security.samlCompleteLogout + +[source,ts] +---- +client.security.samlCompleteLogout({ + body: object +}) +---- +link:{ref}/security-api-saml-complete-logout.html[Documentation] + +[cols=2*] +|=== +|`body` +|`object` - The logout response to verify + +|=== + +[discrete] +=== security.samlInvalidate + +[source,ts] +---- +client.security.samlInvalidate({ + body: object +}) +---- +link:{ref}/security-api-saml-invalidate.html[Documentation] + +[cols=2*] +|=== +|`body` +|`object` - The LogoutRequest message + +|=== + +[discrete] +=== security.samlLogout + +[source,ts] +---- +client.security.samlLogout({ + body: object +}) +---- +link:{ref}/security-api-saml-logout.html[Documentation] + +[cols=2*] +|=== +|`body` +|`object` - The tokens to invalidate + +|=== + +[discrete] +=== security.samlPrepareAuthentication + +[source,ts] +---- +client.security.samlPrepareAuthentication({ + body: object +}) +---- +link:{ref}/security-api-saml-prepare-authentication.html[Documentation] + +[cols=2*] +|=== +|`body` +|`object` - The realm for which to create the authentication request, identified by either its name or the ACS URL + +|=== + +[discrete] +=== security.samlServiceProviderMetadata + +[source,ts] +---- +client.security.samlServiceProviderMetadata({ + realm_name: string +}) +---- +link:{ref}/security-api-saml-sp-metadata.html[Documentation] + +[cols=2*] +|=== +|`realm_name` or `realmName` +|`string` - The name of the SAML realm to get the metadata for + +|=== + [discrete] === shutdown.deleteNode *Stability:* experimental @@ -10049,6 +10353,7 @@ client.snapshot.get({ master_timeout: string, ignore_unavailable: boolean, index_details: boolean, + include_repository: boolean, verbose: boolean }) ---- @@ -10070,6 +10375,9 @@ link:{ref}/modules-snapshots.html[Documentation] + |`index_details` or `indexDetails` |`boolean` - Whether to include details of each index in the snapshot, if those details are available. Defaults to false. +|`include_repository` or `includeRepository` +|`boolean` - Whether to include the repository name in the snapshot info. Defaults to true. + |`verbose` |`boolean` - Whether to show verbose snapshot info or only show the basic info found in the repository index blob @@ -10100,6 +10408,67 @@ link:{ref}/modules-snapshots.html[Documentation] + |=== +[discrete] +=== snapshot.repositoryAnalyze + +[source,ts] +---- +client.snapshot.repositoryAnalyze({ + repository: string, + blob_count: number, + concurrency: number, + read_node_count: number, + early_read_node_count: number, + seed: number, + rare_action_probability: number, + max_blob_size: string, + max_total_data_size: string, + timeout: string, + detailed: boolean, + rarely_abort_writes: boolean +}) +---- +link:{ref}/modules-snapshots.html[Documentation] + +[cols=2*] +|=== +|`repository` +|`string` - A repository name + +|`blob_count` or `blobCount` +|`number` - Number of blobs to create during the test. Defaults to 100. + +|`concurrency` +|`number` - Number of operations to run concurrently during the test. Defaults to 10. + +|`read_node_count` or `readNodeCount` +|`number` - Number of nodes on which to read a blob after writing. Defaults to 10. + +|`early_read_node_count` or `earlyReadNodeCount` +|`number` - Number of nodes on which to perform an early read on a blob, i.e. before writing has completed. Early reads are rare actions so the 'rare_action_probability' parameter is also relevant. Defaults to 2. + +|`seed` +|`number` - Seed for the random number generator used to create the test workload. Defaults to a random value. + +|`rare_action_probability` or `rareActionProbability` +|`number` - Probability of taking a rare action such as an early read or an overwrite. Defaults to 0.02. + +|`max_blob_size` or `maxBlobSize` +|`string` - Maximum size of a blob to create during the test, e.g '1gb' or '100mb'. Defaults to '10mb'. + +|`max_total_data_size` or `maxTotalDataSize` +|`string` - Maximum total size of all blobs to create during the test, e.g '1tb' or '100gb'. Defaults to '1gb'. + +|`timeout` +|`string` - Explicit operation timeout. Defaults to '30s'. + +|`detailed` +|`boolean` - Whether to return detailed results or a summary. Defaults to 'false' so that only the summary is returned. + +|`rarely_abort_writes` or `rarelyAbortWrites` +|`boolean` - Whether to rarely abort writes before they complete. Defaults to 'true'. + +|=== + [discrete] === snapshot.restore @@ -10204,6 +10573,75 @@ link:{ref}/sql-pagination.html[Documentation] + |=== +[discrete] +=== sql.deleteAsync + +[source,ts] +---- +client.sql.deleteAsync({ + id: string +}) +---- +link:{ref}/delete-async-sql-search-api.html[Documentation] + +[cols=2*] +|=== +|`id` +|`string` - The async search ID + +|=== + +[discrete] +=== sql.getAsync + +[source,ts] +---- +client.sql.getAsync({ + id: string, + delimiter: string, + format: string, + keep_alive: string, + wait_for_completion_timeout: string +}) +---- +link:{ref}/get-async-sql-search-api.html[Documentation] + +[cols=2*] +|=== +|`id` +|`string` - The async search ID + +|`delimiter` +|`string` - Separator for CSV results + +_Default:_ `,` + +|`format` +|`string` - Short version of the Accept header, e.g. json, yaml + +|`keep_alive` or `keepAlive` +|`string` - Retention period for the search and its results + +_Default:_ `5d` + +|`wait_for_completion_timeout` or `waitForCompletionTimeout` +|`string` - Duration to wait for complete results + +|=== + +[discrete] +=== sql.getAsyncStatus + +[source,ts] +---- +client.sql.getAsyncStatus({ + id: string +}) +---- +link:{ref}/get-async-sql-search-status-api.html[Documentation] + +[cols=2*] +|=== +|`id` +|`string` - The async search ID + +|=== + [discrete] === sql.query diff --git a/index.d.ts b/index.d.ts index f20ea08c9..978e085f8 100644 --- a/index.d.ts +++ b/index.d.ts @@ -833,6 +833,14 @@ declare class Client { getStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback getStatus, TContext = Context>(params: RequestParams.IlmGetStatus, callback: callbackFn): TransportRequestCallback getStatus, TContext = Context>(params: RequestParams.IlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmMigrateToDataTiers, options?: TransportRequestOptions): TransportRequestPromise> + migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, callback: callbackFn): TransportRequestCallback + migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmMigrateToDataTiers, options?: TransportRequestOptions): TransportRequestPromise> + migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, callback: callbackFn): TransportRequestCallback + migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback move_to_step, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmMoveToStep, options?: TransportRequestOptions): TransportRequestPromise> move_to_step, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback move_to_step, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMoveToStep, callback: callbackFn): TransportRequestCallback @@ -959,6 +967,14 @@ declare class Client { deleteTemplate, TContext = Context>(callback: callbackFn): TransportRequestCallback deleteTemplate, TContext = Context>(params: RequestParams.IndicesDeleteTemplate, callback: callbackFn): TransportRequestCallback deleteTemplate, TContext = Context>(params: RequestParams.IndicesDeleteTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + disk_usage, TContext = Context>(params?: RequestParams.IndicesDiskUsage, options?: TransportRequestOptions): TransportRequestPromise> + disk_usage, TContext = Context>(callback: callbackFn): TransportRequestCallback + disk_usage, TContext = Context>(params: RequestParams.IndicesDiskUsage, callback: callbackFn): TransportRequestCallback + disk_usage, TContext = Context>(params: RequestParams.IndicesDiskUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + diskUsage, TContext = Context>(params?: RequestParams.IndicesDiskUsage, options?: TransportRequestOptions): TransportRequestPromise> + diskUsage, TContext = Context>(callback: callbackFn): TransportRequestCallback + diskUsage, TContext = Context>(params: RequestParams.IndicesDiskUsage, callback: callbackFn): TransportRequestCallback + diskUsage, TContext = Context>(params: RequestParams.IndicesDiskUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback exists(params?: RequestParams.IndicesExists, options?: TransportRequestOptions): TransportRequestPromise> exists(callback: callbackFn): TransportRequestCallback exists(params: RequestParams.IndicesExists, callback: callbackFn): TransportRequestCallback @@ -995,6 +1011,14 @@ declare class Client { existsType(callback: callbackFn): TransportRequestCallback existsType(params: RequestParams.IndicesExistsType, callback: callbackFn): TransportRequestCallback existsType(params: RequestParams.IndicesExistsType, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + field_usage_stats, TContext = Context>(params?: RequestParams.IndicesFieldUsageStats, options?: TransportRequestOptions): TransportRequestPromise> + field_usage_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback + field_usage_stats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, callback: callbackFn): TransportRequestCallback + field_usage_stats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + fieldUsageStats, TContext = Context>(params?: RequestParams.IndicesFieldUsageStats, options?: TransportRequestOptions): TransportRequestPromise> + fieldUsageStats, TContext = Context>(callback: callbackFn): TransportRequestCallback + fieldUsageStats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, callback: callbackFn): TransportRequestCallback + fieldUsageStats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback flush, TContext = Context>(params?: RequestParams.IndicesFlush, options?: TransportRequestOptions): TransportRequestPromise> flush, TContext = Context>(callback: callbackFn): TransportRequestCallback flush, TContext = Context>(params: RequestParams.IndicesFlush, callback: callbackFn): TransportRequestCallback @@ -1625,6 +1649,14 @@ declare class Client { getTrainedModelsStats, TContext = Context>(callback: callbackFn): TransportRequestCallback getTrainedModelsStats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, callback: callbackFn): TransportRequestCallback getTrainedModelsStats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + infer_trained_model_deployment, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + infer_trained_model_deployment, TContext = Context>(callback: callbackFn): TransportRequestCallback + infer_trained_model_deployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + infer_trained_model_deployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + inferTrainedModelDeployment, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + inferTrainedModelDeployment, TContext = Context>(callback: callbackFn): TransportRequestCallback + inferTrainedModelDeployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + inferTrainedModelDeployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback info, TContext = Context>(params?: RequestParams.MlInfo, options?: TransportRequestOptions): TransportRequestPromise> info, TContext = Context>(callback: callbackFn): TransportRequestCallback info, TContext = Context>(params: RequestParams.MlInfo, callback: callbackFn): TransportRequestCallback @@ -1733,6 +1765,14 @@ declare class Client { putTrainedModelAlias, TContext = Context>(callback: callbackFn): TransportRequestCallback putTrainedModelAlias, TContext = Context>(params: RequestParams.MlPutTrainedModelAlias, callback: callbackFn): TransportRequestCallback putTrainedModelAlias, TContext = Context>(params: RequestParams.MlPutTrainedModelAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + reset_job, TContext = Context>(params?: RequestParams.MlResetJob, options?: TransportRequestOptions): TransportRequestPromise> + reset_job, TContext = Context>(callback: callbackFn): TransportRequestCallback + reset_job, TContext = Context>(params: RequestParams.MlResetJob, callback: callbackFn): TransportRequestCallback + reset_job, TContext = Context>(params: RequestParams.MlResetJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + resetJob, TContext = Context>(params?: RequestParams.MlResetJob, options?: TransportRequestOptions): TransportRequestPromise> + resetJob, TContext = Context>(callback: callbackFn): TransportRequestCallback + resetJob, TContext = Context>(params: RequestParams.MlResetJob, callback: callbackFn): TransportRequestCallback + resetJob, TContext = Context>(params: RequestParams.MlResetJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlRevertModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlRevertModelSnapshot, callback: callbackFn): TransportRequestCallback @@ -1765,6 +1805,14 @@ declare class Client { startDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback startDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDatafeed, callback: callbackFn): TransportRequestCallback startDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + start_trained_model_deployment, TContext = Context>(params?: RequestParams.MlStartTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + start_trained_model_deployment, TContext = Context>(callback: callbackFn): TransportRequestCallback + start_trained_model_deployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + start_trained_model_deployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + startTrainedModelDeployment, TContext = Context>(params?: RequestParams.MlStartTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + startTrainedModelDeployment, TContext = Context>(callback: callbackFn): TransportRequestCallback + startTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + startTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStopDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDataFrameAnalytics, callback: callbackFn): TransportRequestCallback @@ -1781,6 +1829,14 @@ declare class Client { stopDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback stopDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDatafeed, callback: callbackFn): TransportRequestCallback stopDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stop_trained_model_deployment, TContext = Context>(params?: RequestParams.MlStopTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + stop_trained_model_deployment, TContext = Context>(callback: callbackFn): TransportRequestCallback + stop_trained_model_deployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + stop_trained_model_deployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + stopTrainedModelDeployment, TContext = Context>(params?: RequestParams.MlStopTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + stopTrainedModelDeployment, TContext = Context>(callback: callbackFn): TransportRequestCallback + stopTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + stopTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback update_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> update_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback update_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDataFrameAnalytics, callback: callbackFn): TransportRequestCallback @@ -2221,6 +2277,14 @@ declare class Client { enableUser, TContext = Context>(callback: callbackFn): TransportRequestCallback enableUser, TContext = Context>(params: RequestParams.SecurityEnableUser, callback: callbackFn): TransportRequestCallback enableUser, TContext = Context>(params: RequestParams.SecurityEnableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + enroll_kibana, TContext = Context>(params?: RequestParams.SecurityEnrollKibana, options?: TransportRequestOptions): TransportRequestPromise> + enroll_kibana, TContext = Context>(callback: callbackFn): TransportRequestCallback + enroll_kibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, callback: callbackFn): TransportRequestCallback + enroll_kibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + enrollKibana, TContext = Context>(params?: RequestParams.SecurityEnrollKibana, options?: TransportRequestOptions): TransportRequestPromise> + enrollKibana, TContext = Context>(callback: callbackFn): TransportRequestCallback + enrollKibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, callback: callbackFn): TransportRequestCallback + enrollKibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback enroll_node, TContext = Context>(params?: RequestParams.SecurityEnrollNode, options?: TransportRequestOptions): TransportRequestPromise> enroll_node, TContext = Context>(callback: callbackFn): TransportRequestCallback enroll_node, TContext = Context>(params: RequestParams.SecurityEnrollNode, callback: callbackFn): TransportRequestCallback @@ -2373,6 +2437,54 @@ declare class Client { putUser, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback putUser, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, callback: callbackFn): TransportRequestCallback putUser, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlAuthenticate, options?: TransportRequestOptions): TransportRequestPromise> + saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, callback: callbackFn): TransportRequestCallback + saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlAuthenticate, options?: TransportRequestOptions): TransportRequestPromise> + samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, callback: callbackFn): TransportRequestCallback + samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlCompleteLogout, options?: TransportRequestOptions): TransportRequestPromise> + saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, callback: callbackFn): TransportRequestCallback + saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlCompleteLogout, options?: TransportRequestOptions): TransportRequestPromise> + samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, callback: callbackFn): TransportRequestCallback + samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlInvalidate, options?: TransportRequestOptions): TransportRequestPromise> + saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, callback: callbackFn): TransportRequestCallback + saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlInvalidate, options?: TransportRequestOptions): TransportRequestPromise> + samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, callback: callbackFn): TransportRequestCallback + samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlLogout, options?: TransportRequestOptions): TransportRequestPromise> + saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, callback: callbackFn): TransportRequestCallback + saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlLogout, options?: TransportRequestOptions): TransportRequestPromise> + samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, callback: callbackFn): TransportRequestCallback + samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlPrepareAuthentication, options?: TransportRequestOptions): TransportRequestPromise> + saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, callback: callbackFn): TransportRequestCallback + saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlPrepareAuthentication, options?: TransportRequestOptions): TransportRequestPromise> + samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, callback: callbackFn): TransportRequestCallback + samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + saml_service_provider_metadata, TContext = Context>(params?: RequestParams.SecuritySamlServiceProviderMetadata, options?: TransportRequestOptions): TransportRequestPromise> + saml_service_provider_metadata, TContext = Context>(callback: callbackFn): TransportRequestCallback + saml_service_provider_metadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, callback: callbackFn): TransportRequestCallback + saml_service_provider_metadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlServiceProviderMetadata, TContext = Context>(params?: RequestParams.SecuritySamlServiceProviderMetadata, options?: TransportRequestOptions): TransportRequestPromise> + samlServiceProviderMetadata, TContext = Context>(callback: callbackFn): TransportRequestCallback + samlServiceProviderMetadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, callback: callbackFn): TransportRequestCallback + samlServiceProviderMetadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } shutdown: { delete_node, TContext = Context>(params?: RequestParams.ShutdownDeleteNode, options?: TransportRequestOptions): TransportRequestPromise> @@ -2515,6 +2627,14 @@ declare class Client { getRepository, TContext = Context>(callback: callbackFn): TransportRequestCallback getRepository, TContext = Context>(params: RequestParams.SnapshotGetRepository, callback: callbackFn): TransportRequestCallback getRepository, TContext = Context>(params: RequestParams.SnapshotGetRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + repository_analyze, TContext = Context>(params?: RequestParams.SnapshotRepositoryAnalyze, options?: TransportRequestOptions): TransportRequestPromise> + repository_analyze, TContext = Context>(callback: callbackFn): TransportRequestCallback + repository_analyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, callback: callbackFn): TransportRequestCallback + repository_analyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + repositoryAnalyze, TContext = Context>(params?: RequestParams.SnapshotRepositoryAnalyze, options?: TransportRequestOptions): TransportRequestPromise> + repositoryAnalyze, TContext = Context>(callback: callbackFn): TransportRequestCallback + repositoryAnalyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, callback: callbackFn): TransportRequestCallback + repositoryAnalyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback restore, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SnapshotRestore, options?: TransportRequestOptions): TransportRequestPromise> restore, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback restore, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotRestore, callback: callbackFn): TransportRequestCallback @@ -2541,6 +2661,30 @@ declare class Client { clearCursor, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback clearCursor, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlClearCursor, callback: callbackFn): TransportRequestCallback clearCursor, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlClearCursor, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + delete_async, TContext = Context>(params?: RequestParams.SqlDeleteAsync, options?: TransportRequestOptions): TransportRequestPromise> + delete_async, TContext = Context>(callback: callbackFn): TransportRequestCallback + delete_async, TContext = Context>(params: RequestParams.SqlDeleteAsync, callback: callbackFn): TransportRequestCallback + delete_async, TContext = Context>(params: RequestParams.SqlDeleteAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteAsync, TContext = Context>(params?: RequestParams.SqlDeleteAsync, options?: TransportRequestOptions): TransportRequestPromise> + deleteAsync, TContext = Context>(callback: callbackFn): TransportRequestCallback + deleteAsync, TContext = Context>(params: RequestParams.SqlDeleteAsync, callback: callbackFn): TransportRequestCallback + deleteAsync, TContext = Context>(params: RequestParams.SqlDeleteAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get_async, TContext = Context>(params?: RequestParams.SqlGetAsync, options?: TransportRequestOptions): TransportRequestPromise> + get_async, TContext = Context>(callback: callbackFn): TransportRequestCallback + get_async, TContext = Context>(params: RequestParams.SqlGetAsync, callback: callbackFn): TransportRequestCallback + get_async, TContext = Context>(params: RequestParams.SqlGetAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getAsync, TContext = Context>(params?: RequestParams.SqlGetAsync, options?: TransportRequestOptions): TransportRequestPromise> + getAsync, TContext = Context>(callback: callbackFn): TransportRequestCallback + getAsync, TContext = Context>(params: RequestParams.SqlGetAsync, callback: callbackFn): TransportRequestCallback + getAsync, TContext = Context>(params: RequestParams.SqlGetAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get_async_status, TContext = Context>(params?: RequestParams.SqlGetAsyncStatus, options?: TransportRequestOptions): TransportRequestPromise> + get_async_status, TContext = Context>(callback: callbackFn): TransportRequestCallback + get_async_status, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, callback: callbackFn): TransportRequestCallback + get_async_status, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getAsyncStatus, TContext = Context>(params?: RequestParams.SqlGetAsyncStatus, options?: TransportRequestOptions): TransportRequestPromise> + getAsyncStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback + getAsyncStatus, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, callback: callbackFn): TransportRequestCallback + getAsyncStatus, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback query, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SqlQuery, options?: TransportRequestOptions): TransportRequestPromise> query, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlQuery, callback: callbackFn): TransportRequestCallback From d2ce46b8e0d88abb7dd8ed223c36b477c0a753f6 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Thu, 15 Jul 2021 14:06:17 -0500 Subject: [PATCH 033/647] Fix URLs for trained model deployments (#1494) --- docs/reference.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index ef81efd20..96633c4bf 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -7147,7 +7147,7 @@ client.ml.inferTrainedModelDeployment({ timeout: string }) ---- -link:{ref}/ml-infer-trained-model-deployment.html[Documentation] + +link:{ref}/ml-df-analytics-apis.html[Documentation] + [cols=2*] |=== |`model_id` or `modelId` @@ -7621,7 +7621,7 @@ client.ml.startTrainedModelDeployment({ timeout: string }) ---- -link:{ref}/ml-start-trained-model-deployment.html[Documentation] + +link:{ref}/ml-df-analytics-apis.html[Documentation] + [cols=2*] |=== |`model_id` or `modelId` @@ -7713,7 +7713,7 @@ client.ml.stopTrainedModelDeployment({ model_id: string }) ---- -link:{ref}/stop-trained-model-deployment.html[Documentation] + +link:{ref}/ml-df-analytics-apis.html[Documentation] + [cols=2*] |=== |`model_id` or `modelId` From 3137780d6b86c77e9d8921f5681965f135eebcb2 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Fri, 16 Jul 2021 10:00:24 +0200 Subject: [PATCH 034/647] Update integration test (#1493) --- .ci/run-elasticsearch.sh | 4 +++- test/integration/index.js | 3 +++ test/integration/test-runner.js | 19 +++++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/.ci/run-elasticsearch.sh b/.ci/run-elasticsearch.sh index 3f3796fb6..0c27e9b75 100755 --- a/.ci/run-elasticsearch.sh +++ b/.ci/run-elasticsearch.sh @@ -7,7 +7,7 @@ # Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'. # Export the NUMBER_OF_NODES variable to start more than 1 node -# Version 1.3.0 +# Version 1.4.0 # - Initial version of the run-elasticsearch.sh script # - Deleting the volume should not dependent on the container still running # - Fixed `ES_JAVA_OPTS` config @@ -17,6 +17,7 @@ # - Added 5 retries on docker pull for fixing transient network errors # - Added flags to make local CCR configurations work # - Added action.destructive_requires_name=false as the default will be true in v8 +# - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing script_path=$(dirname $(realpath -s $0)) source $script_path/functions/imports.sh @@ -40,6 +41,7 @@ environment=($(cat <<-END --env path.repo=/tmp --env repositories.url.allowed_urls=http://snapshot.test* --env action.destructive_requires_name=false + --env ingest.geoip.downloader.enabled=false END )) if [[ "$TEST_SUITE" == "platinum" ]]; then diff --git a/test/integration/index.js b/test/integration/index.js index 149d1d274..833b7b3ea 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -45,6 +45,8 @@ const freeSkips = { // TODO: remove this once 'arbitrary_key' is implemented // https://github.com/elastic/elasticsearch/pull/41492 'indices.split/30_copy_settings.yml': ['*'], + 'indices.stats/50_disk_usage.yml': ['Disk usage stats'], + 'indices.stats/60_field_usage.yml': ['Field usage stats'], // skipping because we are booting ES with `discovery.type=single-node` // and this test will fail because of this configuration 'nodes.stats/30_discovery.yml': ['*'], @@ -56,6 +58,7 @@ const freeSkips = { 'search/320_disallow_queries.yml': ['Test disallow expensive queries'] } const platinumBlackList = { + 'analytics/histogram.yml': ['Histogram requires values in increasing order'], // this two test cases are broken, we should // return on those in the future. 'analytics/top_metrics.yml': [ diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index ef577210a..824fb05fb 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -80,6 +80,18 @@ function build (opts = {}) { // remove 'x_pack_rest_user', used in some xpack test await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) + + const { body: searchableSnapshotIndices } = await client.cluster.state({ + metric: 'metadata', + filter_path: 'metadata.indices.*.settings.index.store.snapshot' + }) + if (searchableSnapshotIndices.metadata != null && searchableSnapshotIndices.metadata.indices != null) { + await helper.runInParallel( + client, 'indices.delete', + Object.keys(searchableSnapshotIndices.metadata.indices).map(i => ({ index: i })), + { ignore: [404] } + ) + } } // clean snapshots @@ -164,6 +176,13 @@ function build (opts = {}) { ) } + const { body: shutdownNodes } = await client.shutdown.getNode() + if (shutdownNodes._nodes == null && shutdownNodes.cluster_name == null) { + for (const node of shutdownNodes.nodes) { + await client.shutdown.deleteNode({ node_id: node.node_id }) + } + } + // wait for pending task before resolving the promise await sleep(100) while (true) { From 76f5845ac169398f14e9bdddce5a347ca7d5add4 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 16 Jul 2021 11:29:04 +0200 Subject: [PATCH 035/647] Update CI conf --- ...arch-js+7.12.yml => elastic+elasticsearch-js+7.14.yml} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename .ci/jobs/{elastic+elasticsearch-js+7.12.yml => elastic+elasticsearch-js+7.14.yml} (60%) diff --git a/.ci/jobs/elastic+elasticsearch-js+7.12.yml b/.ci/jobs/elastic+elasticsearch-js+7.14.yml similarity index 60% rename from .ci/jobs/elastic+elasticsearch-js+7.12.yml rename to .ci/jobs/elastic+elasticsearch-js+7.14.yml index 0dd3a7cfc..0cab6d3eb 100644 --- a/.ci/jobs/elastic+elasticsearch-js+7.12.yml +++ b/.ci/jobs/elastic+elasticsearch-js+7.14.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+7.12 - display-name: 'elastic / elasticsearch-js # 7.12' - description: Testing the elasticsearch-js 7.12 branch. + name: elastic+elasticsearch-js+7.14 + display-name: 'elastic / elasticsearch-js # 7.14' + description: Testing the elasticsearch-js 7.14 branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/7.12 + default: refs/heads/7.14 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: From 17c744ed800c5e1a2d30ff7cac75ba6e7805266f Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 19 Jul 2021 16:42:04 +0200 Subject: [PATCH 036/647] Verify connection to Elasticsearch (#1487) --- docs/connecting.asciidoc | 13 + index.js | 4 + lib/Transport.d.ts | 3 +- lib/Transport.js | 235 +++-- lib/errors.d.ts | 7 + lib/errors.js | 13 +- test/acceptance/events-order.test.js | 3 +- test/acceptance/observability.test.js | 3 +- test/acceptance/product-check.test.js | 1131 +++++++++++++++++++++++++ test/acceptance/proxy.test.js | 2 +- test/acceptance/resurrect.test.js | 4 +- test/acceptance/sniff.test.js | 4 +- test/unit/api-async.js | 4 +- test/unit/api.test.js | 4 +- test/unit/child.test.js | 3 +- test/unit/client.test.js | 4 +- test/unit/events.test.js | 3 +- test/unit/helpers/bulk.test.js | 4 +- test/unit/helpers/msearch.test.js | 4 +- test/unit/helpers/scroll.test.js | 4 +- test/unit/helpers/search.test.js | 3 +- test/unit/transport.test.js | 68 ++ test/utils/MockConnection.js | 5 +- test/utils/index.js | 18 +- 24 files changed, 1448 insertions(+), 98 deletions(-) create mode 100644 test/acceptance/product-check.test.js diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index a036478d0..529046e36 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -10,6 +10,7 @@ This page contains the information you need to connect and use the Client with * <> * <> * <> +* <> [[auth-reference]] [discrete] @@ -517,3 +518,15 @@ a|* `name` - `string` * `statusCode` - `object`, the response headers * `headers` - `object`, the response status code |=== + +[discrete] +[[product-check]] +=== Automatic product check + +Since v7.14.0, the client performs a required product check before the first call. +This pre-flight product check allows the client to establish the version of Elasticsearch +that it is communicating with. The product check requires one additional HTTP request to +be sent to the server as part of the request pipeline before the main API call is sent. +In most cases, this will succeed during the very first API call that the client sends. +Once the product check completes, no further product check HTTP requests are sent for +subsequent API calls. diff --git a/index.js b/index.js index fc9e62ff3..60c44014a 100644 --- a/index.js +++ b/index.js @@ -255,6 +255,10 @@ class Client extends ESAPI { } const client = new Client(options) + // sync product check + const tSymbol = Object.getOwnPropertySymbols(this.transport) + .filter(symbol => symbol.description === 'product check')[0] + client.transport[tSymbol] = this.transport[tSymbol] // Add parent extensions if (this[kExtensions].length > 0) { this[kExtensions].forEach(({ name, opts, fn }) => { diff --git a/lib/Transport.d.ts b/lib/Transport.d.ts index 18b4c37cb..912dd96da 100644 --- a/lib/Transport.d.ts +++ b/lib/Transport.d.ts @@ -26,7 +26,8 @@ import * as errors from './errors'; export type ApiError = errors.ConfigurationError | errors.ConnectionError | errors.DeserializationError | errors.SerializationError | errors.NoLivingConnectionsError | errors.ResponseError | - errors.TimeoutError | errors.RequestAbortedError + errors.TimeoutError | errors.RequestAbortedError | + errors.ProductNotSupportedError export type Context = unknown diff --git a/lib/Transport.js b/lib/Transport.js index e99c1ccc9..6abcb723f 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -24,20 +24,24 @@ const os = require('os') const { gzip, unzip, createGzip } = require('zlib') const buffer = require('buffer') const ms = require('ms') +const { EventEmitter } = require('events') const { ConnectionError, RequestAbortedError, NoLivingConnectionsError, ResponseError, - ConfigurationError + ConfigurationError, + ProductNotSupportedError } = require('./errors') const noop = () => {} +const productCheckEmitter = new EventEmitter() const clientVersion = require('../package.json').version const userAgent = `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${process.version})` const MAX_BUFFER_LENGTH = buffer.constants.MAX_LENGTH const MAX_STRING_LENGTH = buffer.constants.MAX_STRING_LENGTH +const kProductCheck = Symbol('product check') const kApiVersioning = Symbol('api versioning') class Transport { @@ -65,6 +69,7 @@ class Transport { this.generateRequestId = opts.generateRequestId || generateRequestId() this.name = opts.name this.opaqueIdPrefix = opts.opaqueIdPrefix + this[kProductCheck] = 0 // 0 = to be checked, 1 = checking, 2 = checked-ok, 3 checked-notok this[kApiVersioning] = process.env.ELASTIC_CLIENT_APIVERSIONING === 'true' this.nodeFilter = opts.nodeFilter || defaultNodeFilter @@ -83,7 +88,11 @@ class Transport { this._isSniffing = false if (opts.sniffOnStart === true) { - this.sniff({ reason: Transport.sniffReasons.SNIFF_ON_START }) + // timer needed otherwise it will clash + // with the product check testing + setTimeout(() => { + this.sniff({ reason: Transport.sniffReasons.SNIFF_ON_START }) + }, 10) } } @@ -350,91 +359,124 @@ class Transport { } } - this.emit('serialization', null, result) - const headers = Object.assign({}, this.headers, lowerCaseHeaders(options.headers)) + const prepareRequest = () => { + this.emit('serialization', null, result) + const headers = Object.assign({}, this.headers, lowerCaseHeaders(options.headers)) - if (options.opaqueId !== undefined) { - headers['x-opaque-id'] = this.opaqueIdPrefix !== null - ? this.opaqueIdPrefix + options.opaqueId - : options.opaqueId - } + if (options.opaqueId !== undefined) { + headers['x-opaque-id'] = this.opaqueIdPrefix !== null + ? this.opaqueIdPrefix + options.opaqueId + : options.opaqueId + } - // handle json body - if (params.body != null) { - if (shouldSerialize(params.body) === true) { - try { - params.body = this.serializer.serialize(params.body) - } catch (err) { - this.emit('request', err, result) - process.nextTick(callback, err, result) - return transportReturn + // handle json body + if (params.body != null) { + if (shouldSerialize(params.body) === true) { + try { + params.body = this.serializer.serialize(params.body) + } catch (err) { + this.emit('request', err, result) + process.nextTick(callback, err, result) + return transportReturn + } + } + + if (params.body !== '') { + headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+json; compatible-with=7' : 'application/json') + } + + // handle ndjson body + } else if (params.bulkBody != null) { + if (shouldSerialize(params.bulkBody) === true) { + try { + params.body = this.serializer.ndserialize(params.bulkBody) + } catch (err) { + this.emit('request', err, result) + process.nextTick(callback, err, result) + return transportReturn + } + } else { + params.body = params.bulkBody + } + if (params.body !== '') { + headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+x-ndjson; compatible-with=7' : 'application/x-ndjson') } } - if (params.body !== '') { - headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+json; compatible-with=7' : 'application/json') + params.headers = headers + // serializes the querystring + if (options.querystring == null) { + params.querystring = this.serializer.qserialize(params.querystring) + } else { + params.querystring = this.serializer.qserialize( + Object.assign({}, params.querystring, options.querystring) + ) } - // handle ndjson body - } else if (params.bulkBody != null) { - if (shouldSerialize(params.bulkBody) === true) { - try { - params.body = this.serializer.ndserialize(params.bulkBody) - } catch (err) { - this.emit('request', err, result) - process.nextTick(callback, err, result) - return transportReturn + // handles request timeout + params.timeout = toMs(options.requestTimeout || this.requestTimeout) + if (options.asStream === true) params.asStream = true + meta.request.params = params + meta.request.options = options + + // handle compression + if (params.body !== '' && params.body != null) { + if (isStream(params.body) === true) { + if (compression === 'gzip') { + params.headers['content-encoding'] = compression + params.body = params.body.pipe(createGzip()) + } + makeRequest() + } else if (compression === 'gzip') { + gzip(params.body, (err, buffer) => { + /* istanbul ignore next */ + if (err) { + this.emit('request', err, result) + return callback(err, result) + } + params.headers['content-encoding'] = compression + params.headers['content-length'] = '' + Buffer.byteLength(buffer) + params.body = buffer + makeRequest() + }) + } else { + params.headers['content-length'] = '' + Buffer.byteLength(params.body) + makeRequest() } } else { - params.body = params.bulkBody - } - if (params.body !== '') { - headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+x-ndjson; compatible-with=7' : 'application/x-ndjson') + makeRequest() } } - params.headers = headers - // serializes the querystring - if (options.querystring == null) { - params.querystring = this.serializer.qserialize(params.querystring) - } else { - params.querystring = this.serializer.qserialize( - Object.assign({}, params.querystring, options.querystring) - ) - } - - // handles request timeout - params.timeout = toMs(options.requestTimeout || this.requestTimeout) - if (options.asStream === true) params.asStream = true - meta.request.params = params - meta.request.options = options - - // handle compression - if (params.body !== '' && params.body != null) { - if (isStream(params.body) === true) { - if (compression === 'gzip') { - params.headers['content-encoding'] = compression - params.body = params.body.pipe(createGzip()) - } - makeRequest() - } else if (compression === 'gzip') { - gzip(params.body, (err, buffer) => { - /* istanbul ignore next */ - if (err) { + // still need to check the product or waiting for the check to finish + if (this[kProductCheck] === 0 || this[kProductCheck] === 1) { + // let pass info requests + if (params.method === 'GET' && params.path === '/') { + prepareRequest() + } else { + // wait for product check to finish + productCheckEmitter.once('product-check', status => { + if (status === false) { + const err = new ProductNotSupportedError(result) this.emit('request', err, result) - return callback(err, result) + process.nextTick(callback, err, result) + } else { + prepareRequest() } - params.headers['content-encoding'] = compression - params.headers['content-length'] = '' + Buffer.byteLength(buffer) - params.body = buffer - makeRequest() }) - } else { - params.headers['content-length'] = '' + Buffer.byteLength(params.body) - makeRequest() + // the very first request triggers the product check + if (this[kProductCheck] === 0) { + this.productCheck() + } } + // the product check is finished and it's not Elasticsearch + } else if (this[kProductCheck] === 3) { + const err = new ProductNotSupportedError(result) + this.emit('request', err, result) + process.nextTick(callback, err, result) + // the product check finished and it's Elasticsearch } else { - makeRequest() + prepareRequest() } return transportReturn @@ -494,6 +536,59 @@ class Transport { callback(null, hosts) }) } + + productCheck () { + debug('Start product check') + this[kProductCheck] = 1 + this.request({ + method: 'GET', + path: '/' + }, (err, result) => { + this[kProductCheck] = 3 + if (err) { + debug('Product check failed', err) + if (err.statusCode === 401 || err.statusCode === 403) { + this[kProductCheck] = 2 + process.emitWarning('The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') + productCheckEmitter.emit('product-check', true) + } else { + this[kProductCheck] = 0 + productCheckEmitter.emit('product-check', false) + } + } else { + debug('Checking elasticsearch version', result.body, result.headers) + if (result.body.version == null || typeof result.body.version.number !== 'string') { + debug('Can\'t access Elasticsearch version') + return productCheckEmitter.emit('product-check', false) + } + const tagline = result.body.tagline + const version = result.body.version.number.split('.') + const major = Number(version[0]) + const minor = Number(version[1]) + if (major < 6) { + return productCheckEmitter.emit('product-check', false) + } else if (major >= 6 && major < 7) { + if (tagline !== 'You Know, for Search') { + debug('Bad tagline') + return productCheckEmitter.emit('product-check', false) + } + } else if (major === 7 && minor < 14) { + if (tagline !== 'You Know, for Search' || result.body.version.build_flavor !== 'default') { + debug('Bad tagline or build_flavor') + return productCheckEmitter.emit('product-check', false) + } + } else { + if (result.headers['x-elastic-product'] !== 'Elasticsearch') { + debug('x-elastic-product not recognized') + return productCheckEmitter.emit('product-check', false) + } + } + debug('Valid Elasticsearch distribution') + this[kProductCheck] = 2 + productCheckEmitter.emit('product-check', true) + } + }) + } } Transport.sniffReasons = { diff --git a/lib/errors.d.ts b/lib/errors.d.ts index 12241e486..3ed037fd0 100644 --- a/lib/errors.d.ts +++ b/lib/errors.d.ts @@ -81,3 +81,10 @@ export declare class RequestAbortedError, TConte meta: ApiResponse; constructor(message: string, meta: ApiResponse); } + +export declare class ProductNotSupportedError, TContext = Context> extends ElasticsearchClientError { + name: string; + message: string; + meta: ApiResponse; + constructor(meta: ApiResponse); +} diff --git a/lib/errors.js b/lib/errors.js index a62d45936..2ec9bc715 100644 --- a/lib/errors.js +++ b/lib/errors.js @@ -133,6 +133,16 @@ class RequestAbortedError extends ElasticsearchClientError { } } +class ProductNotSupportedError extends ElasticsearchClientError { + constructor (meta) { + super('Product Not Supported Error') + Error.captureStackTrace(this, ProductNotSupportedError) + this.name = 'ProductNotSupportedError' + this.message = 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.' + this.meta = meta + } +} + module.exports = { ElasticsearchClientError, TimeoutError, @@ -142,5 +152,6 @@ module.exports = { DeserializationError, ConfigurationError, ResponseError, - RequestAbortedError + RequestAbortedError, + ProductNotSupportedError } diff --git a/test/acceptance/events-order.test.js b/test/acceptance/events-order.test.js index 0bbd9a49a..335fd4ba8 100644 --- a/test/acceptance/events-order.test.js +++ b/test/acceptance/events-order.test.js @@ -21,7 +21,7 @@ const { test } = require('tap') const intoStream = require('into-stream') -const { Client, Connection, events } = require('../../index') +const { Connection, events } = require('../../index') const { TimeoutError, ConnectionError, @@ -31,6 +31,7 @@ const { DeserializationError } = require('../../lib/errors') const { + Client, buildServer, connection: { MockConnection, diff --git a/test/acceptance/observability.test.js b/test/acceptance/observability.test.js index 2d74b4f32..df889f22c 100644 --- a/test/acceptance/observability.test.js +++ b/test/acceptance/observability.test.js @@ -2,8 +2,9 @@ const { test } = require('tap') const FakeTimers = require('@sinonjs/fake-timers') -const { Client, Transport } = require('../../index') +const { Transport } = require('../../index') const { + Client, connection: { MockConnection, MockConnectionSniff } } = require('../utils') const noop = () => {} diff --git a/test/acceptance/product-check.test.js b/test/acceptance/product-check.test.js new file mode 100644 index 000000000..4b62833c4 --- /dev/null +++ b/test/acceptance/product-check.test.js @@ -0,0 +1,1131 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +const { test } = require('tap') +const { Client } = require('../../') +const { + connection: { + MockConnectionTimeout, + buildMockConnection + } +} = require('../utils') + +test('No errors v8', t => { + t.plan(7) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) +}) + +test('Errors v8', t => { + t.plan(3) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) + +test('No errors ≤v7.13', t => { + t.plan(7) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '7.13.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) +}) + +test('Errors ≤v7.13', t => { + t.plan(3) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '7.13.0-SNAPSHOT', + build_flavor: 'other', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'Other' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) + +test('No errors v6', t => { + t.plan(7) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '6.8.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) +}) + +test('Errors v6', t => { + t.plan(3) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '6.8.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'Other' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) + +test('Auth error - 401', t => { + t.plan(8) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 401, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + security: 'exception' + } + } + } + }) + + process.on('warning', onWarning) + function onWarning (warning) { + t.equal(warning.message, 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') + } + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.statusCode, 401) + process.removeListener('warning', onWarning) + }) +}) + +test('Auth error - 403', t => { + t.plan(8) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 403, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + security: 'exception' + } + } + } + }) + + process.on('warning', onWarning) + function onWarning (warning) { + t.equal(warning.message, 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') + } + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.statusCode, 403) + process.removeListener('warning', onWarning) + }) +}) + +test('500 error', t => { + t.plan(8) + + let count = 0 + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const MockConnection = buildMockConnection({ + onRequest (params) { + const req = requests.shift() + t.equal(req.method, params.method) + t.equal(req.path, params.path) + + if (count++ >= 1) { + return { + statusCode: 200, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } else { + return { + statusCode: 500, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + error: 'kaboom' + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) + }) +}) + +test('TimeoutError', t => { + t.plan(3) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnectionTimeout, + maxRetries: 0 + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) + +test('Multiple subsequent calls, no errors', t => { + t.plan(15) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }, { + method: 'HEAD', + path: '/' + }, { + method: 'POST', + path: '/foo/_doc' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) + + client.ping((err, result) => { + t.error(err) + }) + + client.index({ + index: 'foo', + body: { + foo: 'bar' + } + }, (err, result) => { + t.error(err) + }) +}) + +test('Multiple subsequent calls, with errors', t => { + t.plan(7) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }, { + method: 'HEAD', + path: '/' + }, { + method: 'POST', + path: '/foo/_doc' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) + + client.ping((err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) + + client.index({ + index: 'foo', + body: { + foo: 'bar' + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) + +test('Later successful call', t => { + t.plan(11) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) + + setTimeout(() => { + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) + }, 100) +}) + +test('Later errored call', t => { + t.plan(5) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) + + setTimeout(() => { + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) + }, 100) +}) + +test('Errors ≤v5', t => { + t.plan(3) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '5.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) + +test('Bad info response', t => { + t.plan(3) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) + +test('No multiple checks with child clients', t => { + t.plan(11) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + t.error(err) + const req = requests.shift() + t.equal(event.meta.request.params.method, req.method) + t.equal(event.meta.request.params.path, req.path) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) + + setTimeout(() => { + const child = client.child() + child.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.error(err) + }) + }, 100) +}) diff --git a/test/acceptance/proxy.test.js b/test/acceptance/proxy.test.js index da54084cb..29dd6b2a3 100644 --- a/test/acceptance/proxy.test.js +++ b/test/acceptance/proxy.test.js @@ -4,8 +4,8 @@ process.env.NODE_TLS_REJECT_UNAUTHORIZED = 0 const { test } = require('tap') -const { Client } = require('../../index') const { + Client, buildProxy: { createProxy, createSecureProxy, diff --git a/test/acceptance/resurrect.test.js b/test/acceptance/resurrect.test.js index 432929852..c2e43a6c6 100644 --- a/test/acceptance/resurrect.test.js +++ b/test/acceptance/resurrect.test.js @@ -23,8 +23,8 @@ const { test } = require('tap') const { URL } = require('url') const FakeTimers = require('@sinonjs/fake-timers') const workq = require('workq') -const { buildCluster } = require('../utils') -const { Client, events } = require('../../index') +const { Client, buildCluster } = require('../utils') +const { events } = require('../../index') /** * The aim of this test is to verify how the resurrect logic behaves diff --git a/test/acceptance/sniff.test.js b/test/acceptance/sniff.test.js index ee18c9298..5dfaa3f76 100644 --- a/test/acceptance/sniff.test.js +++ b/test/acceptance/sniff.test.js @@ -23,8 +23,8 @@ const { test } = require('tap') const { URL } = require('url') const FakeTimers = require('@sinonjs/fake-timers') const workq = require('workq') -const { buildCluster } = require('../utils') -const { Client, Connection, Transport, events, errors } = require('../../index') +const { Client, buildCluster } = require('../utils') +const { Connection, Transport, events, errors } = require('../../index') /** * The aim of this test is to verify how the sniffer behaves diff --git a/test/unit/api-async.js b/test/unit/api-async.js index fb0bb1b80..219df5533 100644 --- a/test/unit/api-async.js +++ b/test/unit/api-async.js @@ -19,8 +19,8 @@ 'use strict' -const { Client, errors } = require('../../index') -const { buildServer } = require('../utils') +const { errors } = require('../../index') +const { Client, buildServer } = require('../utils') function runAsyncTest (test) { test('async await (search)', t => { diff --git a/test/unit/api.test.js b/test/unit/api.test.js index 667ec66ab..de5021b66 100644 --- a/test/unit/api.test.js +++ b/test/unit/api.test.js @@ -20,8 +20,8 @@ 'use strict' const { test } = require('tap') -const { Client, errors } = require('../../index') -const { buildServer } = require('../utils') +const { errors } = require('../../index') +const { Client, buildServer } = require('../utils') test('Basic (callback)', t => { t.plan(2) diff --git a/test/unit/child.test.js b/test/unit/child.test.js index 626d8941b..a07a6d135 100644 --- a/test/unit/child.test.js +++ b/test/unit/child.test.js @@ -20,8 +20,9 @@ 'use strict' const { test } = require('tap') -const { Client, errors } = require('../../index') +const { errors } = require('../../index') const { + Client, buildServer, connection: { MockConnection } } = require('../utils') diff --git a/test/unit/client.test.js b/test/unit/client.test.js index 8c65cf21d..d9a26c110 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -23,9 +23,9 @@ const { test } = require('tap') const { URL } = require('url') const buffer = require('buffer') const intoStream = require('into-stream') -const { Client, ConnectionPool, Transport, Connection, errors } = require('../../index') +const { ConnectionPool, Transport, Connection, errors } = require('../../index') const { CloudConnectionPool } = require('../../lib/pool') -const { buildServer } = require('../utils') +const { Client, buildServer } = require('../utils') let clientVersion = require('../../package.json').version if (clientVersion.includes('-')) { clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' diff --git a/test/unit/events.test.js b/test/unit/events.test.js index 6612213ca..5286a3f40 100644 --- a/test/unit/events.test.js +++ b/test/unit/events.test.js @@ -20,9 +20,10 @@ 'use strict' const { test } = require('tap') -const { Client, events } = require('../../index') +const { events } = require('../../index') const { TimeoutError } = require('../../lib/errors') const { + Client, connection: { MockConnection, MockConnectionTimeout diff --git a/test/unit/helpers/bulk.test.js b/test/unit/helpers/bulk.test.js index 5fda2856f..b8a00a1f0 100644 --- a/test/unit/helpers/bulk.test.js +++ b/test/unit/helpers/bulk.test.js @@ -24,8 +24,8 @@ const { join } = require('path') const split = require('split2') const FakeTimers = require('@sinonjs/fake-timers') const { test } = require('tap') -const { Client, errors } = require('../../../') -const { buildServer, connection } = require('../../utils') +const { errors } = require('../../../') +const { Client, buildServer, connection } = require('../../utils') let clientVersion = require('../../../package.json').version if (clientVersion.includes('-')) { clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' diff --git a/test/unit/helpers/msearch.test.js b/test/unit/helpers/msearch.test.js index b80792fa5..b7a06ac8f 100644 --- a/test/unit/helpers/msearch.test.js +++ b/test/unit/helpers/msearch.test.js @@ -20,8 +20,8 @@ 'use strict' const { test } = require('tap') -const { Client, errors } = require('../../../') -const { connection } = require('../../utils') +const { errors } = require('../../../') +const { Client, connection } = require('../../utils') const FakeTimers = require('@sinonjs/fake-timers') test('Basic', async t => { diff --git a/test/unit/helpers/scroll.test.js b/test/unit/helpers/scroll.test.js index 4943e703b..2a8ea2a4f 100644 --- a/test/unit/helpers/scroll.test.js +++ b/test/unit/helpers/scroll.test.js @@ -20,8 +20,8 @@ 'use strict' const { test } = require('tap') -const { Client, errors } = require('../../../') -const { connection } = require('../../utils') +const { errors } = require('../../../') +const { Client, connection } = require('../../utils') let clientVersion = require('../../../package.json').version if (clientVersion.includes('-')) { clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' diff --git a/test/unit/helpers/search.test.js b/test/unit/helpers/search.test.js index ad01ad35a..bea39bc62 100644 --- a/test/unit/helpers/search.test.js +++ b/test/unit/helpers/search.test.js @@ -20,8 +20,7 @@ 'use strict' const { test } = require('tap') -const { Client } = require('../../../') -const { connection } = require('../../utils') +const { Client, connection } = require('../../utils') test('Search should have an additional documents property', async t => { const MockConnection = connection.buildMockConnection({ diff --git a/test/unit/transport.test.js b/test/unit/transport.test.js index 410fe8995..c617bc3af 100644 --- a/test/unit/transport.test.js +++ b/test/unit/transport.test.js @@ -27,6 +27,7 @@ const os = require('os') const intoStream = require('into-stream') const { buildServer, + skipProductCheck, connection: { MockConnection, MockConnectionTimeout, MockConnectionError } } = require('../utils') const { @@ -65,6 +66,7 @@ test('Basic', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -92,6 +94,7 @@ test('Basic (promises support)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport .request({ @@ -119,6 +122,7 @@ test('Basic - failing (promises support)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport .request({ @@ -145,6 +149,7 @@ test('Basic (options + promises support)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport .request({ @@ -190,6 +195,7 @@ test('Send POST', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -246,6 +252,7 @@ test('Send POST (ndjson)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -289,6 +296,7 @@ test('Send stream', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -332,6 +340,7 @@ test('Send stream (bulkBody)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -365,6 +374,7 @@ test('Not JSON payload from server', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -396,6 +406,7 @@ test('NoLivingConnectionsError (null connection)', t => { return null } }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -424,6 +435,7 @@ test('NoLivingConnectionsError (undefined connection)', t => { return undefined } }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -447,6 +459,7 @@ test('SerializationError', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) const body = { hello: 'world' } body.o = body @@ -473,6 +486,7 @@ test('SerializationError (bulk)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) const bulkBody = { hello: 'world' } bulkBody.o = bulkBody @@ -505,6 +519,7 @@ test('DeserializationError', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -541,6 +556,7 @@ test('TimeoutError (should call markDead on the failing connection)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -575,6 +591,7 @@ test('ConnectionError (should call markDead on the failing connection)', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -620,6 +637,7 @@ test('Retry mechanism', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -664,6 +682,7 @@ test('Should not retry if the body is a stream', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -709,6 +728,7 @@ test('Should not retry if the bulkBody is a stream', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -754,6 +774,7 @@ test('No retry', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -805,6 +826,7 @@ test('Custom retry mechanism', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -852,6 +874,7 @@ test('Should not retry on 429', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -889,6 +912,7 @@ test('Should call markAlive with a successful response', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -926,6 +950,7 @@ test('Should call resurrect on every request', t => { sniffOnStart: false, name: 'elasticsearch-js' }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -954,6 +979,7 @@ test('Should return a request aborter utility', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) const request = transport.request({ method: 'GET', @@ -1002,6 +1028,7 @@ test('Retry mechanism and abort', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) const request = transport.request({ method: 'GET', @@ -1031,6 +1058,7 @@ test('Abort a request with the promise API', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) const request = transport.request({ method: 'GET', @@ -1070,6 +1098,7 @@ test('ResponseError', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1105,6 +1134,7 @@ test('Override requestTimeout', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1167,6 +1197,7 @@ test('sniff', t => { sniffOnConnectionFault: true, sniffEndpoint: '/sniff' }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1200,6 +1231,7 @@ test('sniff', t => { sniffInterval: 1, sniffEndpoint: '/sniff' }) + skipProductCheck(transport) const params = { method: 'GET', path: '/' } clock.tick(100) @@ -1233,6 +1265,7 @@ test('sniff', t => { sniffInterval: false, sniffEndpoint: '/sniff' }) + skipProductCheck(transport) transport.sniff((err, hosts) => { t.ok(err instanceof ConnectionError) @@ -1269,6 +1302,7 @@ test(`Should mark as dead connections where the statusCode is 502/3/4 sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1323,6 +1357,7 @@ test('Should retry the request if the statusCode is 502/3/4', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1354,6 +1389,7 @@ test('Ignore status code', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1403,6 +1439,7 @@ test('Should serialize the querystring', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1446,6 +1483,7 @@ test('timeout option', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1476,6 +1514,7 @@ test('timeout option', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1512,6 +1551,7 @@ test('timeout option', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1542,6 +1582,7 @@ test('timeout option', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1576,6 +1617,7 @@ test('Should cast to boolean HEAD request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'HEAD', @@ -1601,6 +1643,7 @@ test('Should cast to boolean HEAD request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'HEAD', @@ -1627,6 +1670,7 @@ test('Should cast to boolean HEAD request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'HEAD', @@ -1652,6 +1696,7 @@ test('Should cast to boolean HEAD request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'HEAD', @@ -1694,6 +1739,7 @@ test('Suggest compression', t => { sniffOnStart: false, suggestCompression: true }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1734,6 +1780,7 @@ test('Broken compression', t => { sniffOnStart: false, suggestCompression: true }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1769,6 +1816,7 @@ test('Warning header', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1806,6 +1854,7 @@ test('Warning header', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1840,6 +1889,7 @@ test('Warning header', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1875,6 +1925,7 @@ test('asStream set to true', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -1933,6 +1984,7 @@ test('Compress request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -1981,6 +2033,7 @@ test('Compress request', t => { sniffOnStart: false, compression: 'gzip' }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -2026,6 +2079,7 @@ test('Compress request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -2085,6 +2139,7 @@ test('Compress request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'DELETE', @@ -2151,6 +2206,7 @@ test('Compress request', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'POST', @@ -2195,6 +2251,7 @@ test('Headers configuration', t => { 'x-foo': 'bar' } }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2234,6 +2291,7 @@ test('Headers configuration', t => { 'x-foo': 'bar' } }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2272,6 +2330,7 @@ test('Headers configuration', t => { 'x-foo': 'bar' } }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2312,6 +2371,7 @@ test('nodeFilter and nodeSelector', t => { return conns[0] } }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2345,6 +2405,7 @@ test('Should accept custom querystring in the optons object', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2381,6 +2442,7 @@ test('Should accept custom querystring in the optons object', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2425,6 +2487,7 @@ test('Should add an User-Agent header', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2459,6 +2522,7 @@ test('Should pass request params and options to generateRequestId', t => { return 'id' } }) + skipProductCheck(transport) transport.request(params, options, t.error) }) @@ -2484,6 +2548,7 @@ test('Secure json parsing', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2516,6 +2581,7 @@ test('Secure json parsing', t => { sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) transport.request({ method: 'GET', @@ -2574,6 +2640,7 @@ test('The callback with a sync error should be called in the next tick - json', sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) const body = { a: true } body.o = body @@ -2605,6 +2672,7 @@ test('The callback with a sync error should be called in the next tick - ndjson' sniffInterval: false, sniffOnStart: false }) + skipProductCheck(transport) const field = { a: true } field.o = field diff --git a/test/utils/MockConnection.js b/test/utils/MockConnection.js index f714fdd30..0719696af 100644 --- a/test/utils/MockConnection.js +++ b/test/utils/MockConnection.js @@ -133,7 +133,7 @@ function buildMockConnection (opts) { class MockConnection extends Connection { request (params, callback) { - let { body, statusCode } = opts.onRequest(params) + let { body, statusCode, headers } = opts.onRequest(params) if (typeof body !== 'string') { body = JSON.stringify(body) } @@ -144,7 +144,8 @@ function buildMockConnection (opts) { 'content-type': 'application/json;utf=8', date: new Date().toISOString(), connection: 'keep-alive', - 'content-length': Buffer.byteLength(body) + 'content-length': Buffer.byteLength(body), + ...headers } process.nextTick(() => { if (!aborted) { diff --git a/test/utils/index.js b/test/utils/index.js index ac513fde9..1adff9f3b 100644 --- a/test/utils/index.js +++ b/test/utils/index.js @@ -25,6 +25,7 @@ const buildServer = require('./buildServer') const buildCluster = require('./buildCluster') const buildProxy = require('./buildProxy') const connection = require('./MockConnection') +const { Client } = require('../../') async function waitCluster (client, waitForStatus = 'green', timeout = '50s', times = 0) { if (!client) { @@ -41,10 +42,25 @@ async function waitCluster (client, waitForStatus = 'green', timeout = '50s', ti } } +function skipProductCheck (client) { + const tSymbol = Object.getOwnPropertySymbols(client.transport || client) + .filter(symbol => symbol.description === 'product check')[0] + ;(client.transport || client)[tSymbol] = 2 +} + +class NoProductCheckClient extends Client { + constructor (opts) { + super(opts) + skipProductCheck(this) + } +} + module.exports = { buildServer, buildCluster, buildProxy, connection, - waitCluster + waitCluster, + skipProductCheck, + Client: NoProductCheckClient } From ff98e6310cc8fc4dd1af659402fc74b4deda97f1 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 19 Jul 2021 16:44:39 +0200 Subject: [PATCH 037/647] API generation --- api/api/nodes.js | 70 +++++++++++++++++++++++++++++++++++++++-- api/requestParams.d.ts | 9 ++++++ docs/reference.asciidoc | 42 +++++++++++++++++++++++-- index.d.ts | 16 ++++++++++ 4 files changed, 133 insertions(+), 4 deletions(-) diff --git a/api/api/nodes.js b/api/api/nodes.js index 275f47cd7..b06002e66 100644 --- a/api/api/nodes.js +++ b/api/api/nodes.js @@ -23,14 +23,78 @@ /* eslint no-unused-vars: 0 */ const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['interval', 'snapshots', 'threads', 'ignore_idle_threads', 'type', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'flat_settings', 'completion_fields', 'fielddata_fields', 'fields', 'groups', 'level', 'types', 'include_segment_file_sizes', 'include_unloaded_segments'] -const snakeCase = { ignoreIdleThreads: 'ignore_idle_threads', errorTrace: 'error_trace', filterPath: 'filter_path', flatSettings: 'flat_settings', completionFields: 'completion_fields', fielddataFields: 'fielddata_fields', includeSegmentFileSizes: 'include_segment_file_sizes', includeUnloadedSegments: 'include_unloaded_segments' } +const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'interval', 'snapshots', 'threads', 'ignore_idle_threads', 'type', 'timeout', 'flat_settings', 'completion_fields', 'fielddata_fields', 'fields', 'groups', 'level', 'types', 'include_segment_file_sizes', 'include_unloaded_segments'] +const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', ignoreIdleThreads: 'ignore_idle_threads', flatSettings: 'flat_settings', completionFields: 'completion_fields', fielddataFields: 'fielddata_fields', includeSegmentFileSizes: 'include_segment_file_sizes', includeUnloadedSegments: 'include_unloaded_segments' } function NodesApi (transport, ConfigurationError) { this.transport = transport this[kConfigurationError] = ConfigurationError } +NodesApi.prototype.clearMeteringArchive = function nodesClearMeteringArchiveApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.node_id == null && params.nodeId == null) { + const err = new this[kConfigurationError]('Missing required parameter: node_id or nodeId') + return handleError(err, callback) + } + if (params.max_archive_version == null && params.maxArchiveVersion == null) { + const err = new this[kConfigurationError]('Missing required parameter: max_archive_version or maxArchiveVersion') + return handleError(err, callback) + } + + // check required url components + if ((params.max_archive_version != null || params.maxArchiveVersion != null) && ((params.node_id == null && params.nodeId == null))) { + const err = new this[kConfigurationError]('Missing required parameter of the url: node_id') + return handleError(err, callback) + } + + let { method, body, nodeId, node_id, maxArchiveVersion, max_archive_version, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'DELETE' + path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + '_repositories_metering' + '/' + encodeURIComponent(max_archive_version || maxArchiveVersion) + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +NodesApi.prototype.getMeteringInfo = function nodesGetMeteringInfoApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.node_id == null && params.nodeId == null) { + const err = new this[kConfigurationError]('Missing required parameter: node_id or nodeId') + return handleError(err, callback) + } + + let { method, body, nodeId, node_id, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = 'GET' + path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + '_repositories_metering' + + // build request object + const request = { + method, + path, + body: null, + querystring + } + + return this.transport.request(request, options, callback) +} + NodesApi.prototype.hotThreads = function nodesHotThreadsApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -186,6 +250,8 @@ NodesApi.prototype.usage = function nodesUsageApi (params, options, callback) { } Object.defineProperties(NodesApi.prototype, { + clear_metering_archive: { get () { return this.clearMeteringArchive } }, + get_metering_info: { get () { return this.getMeteringInfo } }, hot_threads: { get () { return this.hotThreads } }, reload_secure_settings: { get () { return this.reloadSecureSettings } } }) diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index 13c4cdca3..73f98a8cf 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -1969,6 +1969,15 @@ export interface Mtermvectors extends Generic { body?: T; } +export interface NodesClearMeteringArchive extends Generic { + node_id: string | string[]; + max_archive_version: number; +} + +export interface NodesGetMeteringInfo extends Generic { + node_id: string | string[]; +} + export interface NodesHotThreads extends Generic { node_id?: string | string[]; interval?: string; diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 96633c4bf..888f72f82 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -4393,7 +4393,7 @@ client.indices.fieldUsageStats({ expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' }) ---- -link:{ref}/indices-field-usage-stats.html[Documentation] + +link:{ref}/field-usage-stats.html[Documentation] + [cols=2*] |=== |`index` @@ -8111,6 +8111,44 @@ _Default:_ `true` |=== +[discrete] +=== nodes.clearMeteringArchive +*Stability:* experimental +[source,ts] +---- +client.nodes.clearMeteringArchive({ + node_id: string | string[], + max_archive_version: number +}) +---- +link:{ref}/clear-repositories-metering-archive-api.html[Documentation] + +[cols=2*] +|=== +|`node_id` or `nodeId` +|`string \| string[]` - Comma-separated list of node IDs or names used to limit returned information. + +|`max_archive_version` or `maxArchiveVersion` +|`number` - Specifies the maximum archive_version to be cleared from the archive. + +|=== + +[discrete] +=== nodes.getMeteringInfo +*Stability:* experimental +[source,ts] +---- +client.nodes.getMeteringInfo({ + node_id: string | string[] +}) +---- +link:{ref}/get-repositories-metering-api.html[Documentation] + +[cols=2*] +|=== +|`node_id` or `nodeId` +|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information. + +|=== + [discrete] === nodes.hotThreads @@ -9520,7 +9558,7 @@ link:{ref}/security-api-enable-user.html[Documentation] + ---- client.security.enrollKibana() ---- -link:{ref}/security-api-enroll-kibana.html[Documentation] + +link:{ref}/security-api-kibana-enrollment.html[Documentation] + [discrete] diff --git a/index.d.ts b/index.d.ts index 978e085f8..e7d00efeb 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1921,6 +1921,22 @@ declare class Client { mtermvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Mtermvectors, callback: callbackFn): TransportRequestCallback mtermvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Mtermvectors, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback nodes: { + clear_metering_archive, TContext = Context>(params?: RequestParams.NodesClearMeteringArchive, options?: TransportRequestOptions): TransportRequestPromise> + clear_metering_archive, TContext = Context>(callback: callbackFn): TransportRequestCallback + clear_metering_archive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, callback: callbackFn): TransportRequestCallback + clear_metering_archive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + clearMeteringArchive, TContext = Context>(params?: RequestParams.NodesClearMeteringArchive, options?: TransportRequestOptions): TransportRequestPromise> + clearMeteringArchive, TContext = Context>(callback: callbackFn): TransportRequestCallback + clearMeteringArchive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, callback: callbackFn): TransportRequestCallback + clearMeteringArchive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + get_metering_info, TContext = Context>(params?: RequestParams.NodesGetMeteringInfo, options?: TransportRequestOptions): TransportRequestPromise> + get_metering_info, TContext = Context>(callback: callbackFn): TransportRequestCallback + get_metering_info, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, callback: callbackFn): TransportRequestCallback + get_metering_info, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getMeteringInfo, TContext = Context>(params?: RequestParams.NodesGetMeteringInfo, options?: TransportRequestOptions): TransportRequestPromise> + getMeteringInfo, TContext = Context>(callback: callbackFn): TransportRequestCallback + getMeteringInfo, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, callback: callbackFn): TransportRequestCallback + getMeteringInfo, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback hot_threads, TContext = Context>(params?: RequestParams.NodesHotThreads, options?: TransportRequestOptions): TransportRequestPromise> hot_threads, TContext = Context>(callback: callbackFn): TransportRequestCallback hot_threads, TContext = Context>(params: RequestParams.NodesHotThreads, callback: callbackFn): TransportRequestCallback From 304d251c3f6ca5cf767228d2c4328dffc553bc81 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 19 Jul 2021 16:49:51 +0200 Subject: [PATCH 038/647] Updated type definitions --- api/kibana.d.ts | 14 +++++++++++++ api/new.d.ts | 56 +++++++++++++++++++++++++++++++++++++++++++++++++ api/types.d.ts | 4 ++-- 3 files changed, 72 insertions(+), 2 deletions(-) diff --git a/api/kibana.d.ts b/api/kibana.d.ts index cb1f66ba0..ebae7c894 100644 --- a/api/kibana.d.ts +++ b/api/kibana.d.ts @@ -210,6 +210,7 @@ interface KibanaClient { explainLifecycle(params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> getLifecycle(params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> getStatus(params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> + migrateToDataTiers(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> moveToStep(params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> putLifecycle(params?: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> removePolicy(params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -232,11 +233,13 @@ interface KibanaClient { deleteDataStream(params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> deleteTemplate(params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> + diskUsage(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> exists(params: T.IndicesExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> existsAlias(params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> existsTemplate(params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> existsType(params: T.IndicesExistsTypeRequest, options?: TransportRequestOptions): TransportRequestPromise> + fieldUsageStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> flush(params?: T.IndicesFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> flushSynced(params?: T.IndicesFlushSyncedRequest, options?: TransportRequestOptions): TransportRequestPromise> forcemerge(params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -353,6 +356,7 @@ interface KibanaClient { putJob(params: T.MlPutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModel(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> + resetJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> setUpgradeMode(params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -375,6 +379,8 @@ interface KibanaClient { msearchTemplate(params?: T.MsearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> mtermvectors(params?: T.MtermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> nodes: { + clearMeteringArchive(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + getMeteringInfo(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> info(params?: T.NodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> reloadSecureSettings(params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -446,7 +452,12 @@ interface KibanaClient { putRole(params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> putRoleMapping(params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> + samlAuthenticate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> samlCompleteLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlInvalidate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlPrepareAuthentication(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlServiceProviderMetadata(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> } shutdown: { deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -480,6 +491,9 @@ interface KibanaClient { } sql: { clearCursor(params?: T.SqlClearCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> + deleteAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + getAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + getAsyncStatus(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> query(params?: T.SqlQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> translate(params?: T.SqlTranslateRequest, options?: TransportRequestOptions): TransportRequestPromise> } diff --git a/api/new.d.ts b/api/new.d.ts index c43909548..5a9e4923a 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -503,6 +503,10 @@ declare class Client { getStatus(callback: callbackFn): TransportRequestCallback getStatus(params: T.IlmGetStatusRequest, callback: callbackFn): TransportRequestCallback getStatus(params: T.IlmGetStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + migrateToDataTiers(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + migrateToDataTiers(callback: callbackFn): TransportRequestCallback + migrateToDataTiers(params: TODO, callback: callbackFn): TransportRequestCallback + migrateToDataTiers(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback moveToStep(params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> moveToStep(params: T.IlmMoveToStepRequest, callback: callbackFn): TransportRequestCallback moveToStep(params: T.IlmMoveToStepRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback @@ -571,6 +575,10 @@ declare class Client { deleteTemplate(params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> deleteTemplate(params: T.IndicesDeleteTemplateRequest, callback: callbackFn): TransportRequestCallback deleteTemplate(params: T.IndicesDeleteTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + diskUsage(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + diskUsage(callback: callbackFn): TransportRequestCallback + diskUsage(params: TODO, callback: callbackFn): TransportRequestCallback + diskUsage(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback exists(params: T.IndicesExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> exists(params: T.IndicesExistsRequest, callback: callbackFn): TransportRequestCallback exists(params: T.IndicesExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback @@ -586,6 +594,10 @@ declare class Client { existsType(params: T.IndicesExistsTypeRequest, options?: TransportRequestOptions): TransportRequestPromise> existsType(params: T.IndicesExistsTypeRequest, callback: callbackFn): TransportRequestCallback existsType(params: T.IndicesExistsTypeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + fieldUsageStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + fieldUsageStats(callback: callbackFn): TransportRequestCallback + fieldUsageStats(params: TODO, callback: callbackFn): TransportRequestCallback + fieldUsageStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback flush(params?: T.IndicesFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> flush(callback: callbackFn): TransportRequestCallback flush(params: T.IndicesFlushRequest, callback: callbackFn): TransportRequestCallback @@ -974,6 +986,10 @@ declare class Client { putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, callback: callbackFn): TransportRequestCallback putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + resetJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + resetJob(callback: callbackFn): TransportRequestCallback + resetJob(params: TODO, callback: callbackFn): TransportRequestCallback + resetJob(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, callback: callbackFn): TransportRequestCallback revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback @@ -1041,6 +1057,14 @@ declare class Client { mtermvectors(params: T.MtermvectorsRequest, callback: callbackFn): TransportRequestCallback mtermvectors(params: T.MtermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback nodes: { + clearMeteringArchive(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + clearMeteringArchive(callback: callbackFn): TransportRequestCallback + clearMeteringArchive(params: TODO, callback: callbackFn): TransportRequestCallback + clearMeteringArchive(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getMeteringInfo(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + getMeteringInfo(callback: callbackFn): TransportRequestCallback + getMeteringInfo(params: TODO, callback: callbackFn): TransportRequestCallback + getMeteringInfo(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> hotThreads(callback: callbackFn): TransportRequestCallback hotThreads(params: T.NodesHotThreadsRequest, callback: callbackFn): TransportRequestCallback @@ -1280,10 +1304,30 @@ declare class Client { putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> putUser(params: T.SecurityPutUserRequest, callback: callbackFn): TransportRequestCallback putUser(params: T.SecurityPutUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlAuthenticate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlAuthenticate(callback: callbackFn): TransportRequestCallback + samlAuthenticate(params: TODO, callback: callbackFn): TransportRequestCallback + samlAuthenticate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback samlCompleteLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> samlCompleteLogout(callback: callbackFn): TransportRequestCallback samlCompleteLogout(params: TODO, callback: callbackFn): TransportRequestCallback samlCompleteLogout(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlInvalidate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlInvalidate(callback: callbackFn): TransportRequestCallback + samlInvalidate(params: TODO, callback: callbackFn): TransportRequestCallback + samlInvalidate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlLogout(callback: callbackFn): TransportRequestCallback + samlLogout(params: TODO, callback: callbackFn): TransportRequestCallback + samlLogout(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlPrepareAuthentication(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlPrepareAuthentication(callback: callbackFn): TransportRequestCallback + samlPrepareAuthentication(params: TODO, callback: callbackFn): TransportRequestCallback + samlPrepareAuthentication(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + samlServiceProviderMetadata(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + samlServiceProviderMetadata(callback: callbackFn): TransportRequestCallback + samlServiceProviderMetadata(params: TODO, callback: callbackFn): TransportRequestCallback + samlServiceProviderMetadata(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback } shutdown: { deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> @@ -1380,6 +1424,18 @@ declare class Client { clearCursor(callback: callbackFn): TransportRequestCallback clearCursor(params: T.SqlClearCursorRequest, callback: callbackFn): TransportRequestCallback clearCursor(params: T.SqlClearCursorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + deleteAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + deleteAsync(callback: callbackFn): TransportRequestCallback + deleteAsync(params: TODO, callback: callbackFn): TransportRequestCallback + deleteAsync(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + getAsync(callback: callbackFn): TransportRequestCallback + getAsync(params: TODO, callback: callbackFn): TransportRequestCallback + getAsync(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + getAsyncStatus(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + getAsyncStatus(callback: callbackFn): TransportRequestCallback + getAsyncStatus(params: TODO, callback: callbackFn): TransportRequestCallback + getAsyncStatus(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback query(params?: T.SqlQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> query(callback: callbackFn): TransportRequestCallback query(params: T.SqlQueryRequest, callback: callbackFn): TransportRequestCallback diff --git a/api/types.d.ts b/api/types.d.ts index 74d62ffe7..b32273e32 100644 --- a/api/types.d.ts +++ b/api/types.d.ts @@ -12833,9 +12833,9 @@ export interface SecurityClearCachedRealmsRequest extends RequestBase { } export interface SecurityClearCachedRealmsResponse { + _nodes: NodeStatistics cluster_name: Name nodes: Record - _nodes: NodeStatistics } export interface SecurityClearCachedRolesRequest extends RequestBase { @@ -14760,10 +14760,10 @@ export interface WatcherStatsRequest extends RequestBase { } export interface WatcherStatsResponse { + _nodes: NodeStatistics cluster_name: Name manually_stopped: boolean stats: WatcherStatsWatcherNodeStats[] - _nodes: NodeStatistics } export interface WatcherStatsWatchRecordQueuedStats { From 34571a94f885f2f2320f86fdbd05ce9c9fe1e884 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 19 Jul 2021 16:58:03 +0200 Subject: [PATCH 039/647] Bumped v8.0.0-canary.14 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index fdd8fc05a..8d6176c1f 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.13", + "versionCanary": "8.0.0-canary.14", "keywords": [ "elasticsearch", "elastic", From 28e57dd2e414214f220b2226d910e7c2116011c0 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Sat, 24 Jul 2021 14:44:32 +0200 Subject: [PATCH 040/647] Do not check for 100% coverage (#1501) --- .github/workflows/nodejs.yml | 4 ++-- package.json | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index f65a2278b..38550268f 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -125,7 +125,7 @@ jobs: strategy: matrix: - node-version: [12.x] + node-version: [14.x] steps: - uses: actions/checkout@v2 @@ -159,7 +159,7 @@ jobs: strategy: matrix: - node-version: [12.x] + node-version: [14.x] steps: - uses: actions/checkout@v2 diff --git a/package.json b/package.json index 8d6176c1f..34ec0fc61 100644 --- a/package.json +++ b/package.json @@ -100,6 +100,7 @@ "jsx": false, "flow": false, "coverage": false, - "jobs-auto": true + "jobs-auto": true, + "check-coverage": false } } From 6686316433d08189e3d0a8893e41480113fc8a25 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Sat, 24 Jul 2021 16:54:18 +0200 Subject: [PATCH 041/647] Better message in case of unsupported build flavor (#1498) --- README.md | 4 +- docs/installation.asciidoc | 15 +-- lib/Transport.js | 40 +++++--- test/acceptance/product-check.test.js | 126 +++++++++++++++++++++++++- 4 files changed, 159 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index eefcc47ce..091a2df94 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,9 @@ of `^7.10.0`). ### Compatibility -The library is compatible with all Elasticsearch versions since 5.x, and you should use the same major version of the Elasticsearch instance that you are using. +Elastic language clients are guaranteed to be able to communicate with Elasticsearch or Elastic solutions running on the same major version and greater or equal minor version. + +Language clients are forward compatible; meaning that clients support communicating with greater minor versions of Elasticsearch. Elastic language clients are not guaranteed to be backwards compatible. | Elasticsearch Version | Client Version | | --------------------- |----------------| diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc index 83628a3d6..67aacfdb4 100644 --- a/docs/installation.asciidoc +++ b/docs/installation.asciidoc @@ -17,7 +17,7 @@ To install a specific major version of the client, run the following command: npm install @elastic/elasticsearch@ ---- -To learn more about the supported major versions, please refer to the +To learn more about the supported major versions, please refer to the <>. [discrete] @@ -37,7 +37,7 @@ to support that version for at least another minor release. If you are using the with a version of Node.js that will be unsupported soon, you will see a warning in your logs (the client will start logging the warning with two minors in advance). -Unless you are *always* using a supported version of Node.js, +Unless you are *always* using a supported version of Node.js, we recommend defining the client dependency in your `package.json` with the `~` instead of `^`. In this way, you will lock the dependency on the minor release and not the major. (for example, `~7.10.0` instead @@ -62,9 +62,12 @@ of `^7.10.0`). [[js-compatibility-matrix]] === Compatibility matrix -The library is compatible with all {es} versions since 5.x. We recommend you to -use the same major version of the client as the {es} instance that you are -using. +Elastic language clients are guaranteed to be able to communicate with Elasticsearch +or Elastic solutions running on the same major version and greater or equal minor version. + +Language clients are forward compatible; meaning that clients support communicating +with greater minor versions of Elasticsearch. Elastic language clients are not +guaranteed to be backwards compatible. [%header,cols=2*] |=== @@ -91,4 +94,4 @@ using. WARNING: There is no official support for the browser environment. It exposes your {es} instance to everyone, which could lead to security issues. We recommend you to write a lightweight proxy that uses this client instead, -you can see a proxy example https://github.com/elastic/elasticsearch-js/tree/master/docs/examples/proxy[here]. \ No newline at end of file +you can see a proxy example https://github.com/elastic/elasticsearch-js/tree/master/docs/examples/proxy[here]. diff --git a/lib/Transport.js b/lib/Transport.js index 6abcb723f..cd2d7584c 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -69,7 +69,7 @@ class Transport { this.generateRequestId = opts.generateRequestId || generateRequestId() this.name = opts.name this.opaqueIdPrefix = opts.opaqueIdPrefix - this[kProductCheck] = 0 // 0 = to be checked, 1 = checking, 2 = checked-ok, 3 checked-notok + this[kProductCheck] = 0 // 0 = to be checked, 1 = checking, 2 = checked-ok, 3 checked-notok, 4 checked-nodefault this[kApiVersioning] = process.env.ELASTIC_CLIENT_APIVERSIONING === 'true' this.nodeFilter = opts.nodeFilter || defaultNodeFilter @@ -455,9 +455,12 @@ class Transport { prepareRequest() } else { // wait for product check to finish - productCheckEmitter.once('product-check', status => { + productCheckEmitter.once('product-check', (error, status) => { if (status === false) { - const err = new ProductNotSupportedError(result) + const err = error || new ProductNotSupportedError(result) + if (this[kProductCheck] === 4) { + err.message = 'The client noticed that the server is not a supported distribution of Elasticsearch' + } this.emit('request', err, result) process.nextTick(callback, err, result) } else { @@ -470,8 +473,11 @@ class Transport { } } // the product check is finished and it's not Elasticsearch - } else if (this[kProductCheck] === 3) { + } else if (this[kProductCheck] === 3 || this[kProductCheck] === 4) { const err = new ProductNotSupportedError(result) + if (this[kProductCheck] === 4) { + err.message = 'The client noticed that the server is not a supported distribution of Elasticsearch' + } this.emit('request', err, result) process.nextTick(callback, err, result) // the product check finished and it's Elasticsearch @@ -550,42 +556,48 @@ class Transport { if (err.statusCode === 401 || err.statusCode === 403) { this[kProductCheck] = 2 process.emitWarning('The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') - productCheckEmitter.emit('product-check', true) + productCheckEmitter.emit('product-check', null, true) } else { this[kProductCheck] = 0 - productCheckEmitter.emit('product-check', false) + productCheckEmitter.emit('product-check', err, false) } } else { debug('Checking elasticsearch version', result.body, result.headers) if (result.body.version == null || typeof result.body.version.number !== 'string') { debug('Can\'t access Elasticsearch version') - return productCheckEmitter.emit('product-check', false) + return productCheckEmitter.emit('product-check', null, false) } const tagline = result.body.tagline const version = result.body.version.number.split('.') const major = Number(version[0]) const minor = Number(version[1]) if (major < 6) { - return productCheckEmitter.emit('product-check', false) + return productCheckEmitter.emit('product-check', null, false) } else if (major >= 6 && major < 7) { if (tagline !== 'You Know, for Search') { debug('Bad tagline') - return productCheckEmitter.emit('product-check', false) + return productCheckEmitter.emit('product-check', null, false) } } else if (major === 7 && minor < 14) { - if (tagline !== 'You Know, for Search' || result.body.version.build_flavor !== 'default') { - debug('Bad tagline or build_flavor') - return productCheckEmitter.emit('product-check', false) + if (tagline !== 'You Know, for Search') { + debug('Bad tagline') + return productCheckEmitter.emit('product-check', null, false) + } + + if (result.body.version.build_flavor !== 'default') { + debug('Bad build_flavor') + this[kProductCheck] = 4 + return productCheckEmitter.emit('product-check', null, false) } } else { if (result.headers['x-elastic-product'] !== 'Elasticsearch') { debug('x-elastic-product not recognized') - return productCheckEmitter.emit('product-check', false) + return productCheckEmitter.emit('product-check', null, false) } } debug('Valid Elasticsearch distribution') this[kProductCheck] = 2 - productCheckEmitter.emit('product-check', true) + productCheckEmitter.emit('product-check', null, true) } }) } diff --git a/test/acceptance/product-check.test.js b/test/acceptance/product-check.test.js index 4b62833c4..e32908aef 100644 --- a/test/acceptance/product-check.test.js +++ b/test/acceptance/product-check.test.js @@ -24,6 +24,7 @@ const { Client } = require('../../') const { connection: { MockConnectionTimeout, + MockConnectionError, buildMockConnection } } = require('../utils') @@ -210,7 +211,7 @@ test('No errors ≤v7.13', t => { }) }) -test('Errors ≤v7.13', t => { +test('Errors ≤v7.13 (tagline)', t => { t.plan(3) const MockConnection = buildMockConnection({ onRequest (params) { @@ -222,7 +223,7 @@ test('Errors ≤v7.13', t => { cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', version: { number: '7.13.0-SNAPSHOT', - build_flavor: 'other', + build_flavor: 'default', build_type: 'docker', build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', build_date: '2021-07-10T01:45:02.136546168Z', @@ -271,6 +272,83 @@ test('Errors ≤v7.13', t => { }) }) +test('Errors ≤v7.13 (build flavor)', t => { + t.plan(5) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '7.13.0-SNAPSHOT', + build_flavor: 'other', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'The client noticed that the server is not a supported distribution of Elasticsearch') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not a supported distribution of Elasticsearch') + }) + + setTimeout(() => { + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not a supported distribution of Elasticsearch') + }) + }, 100) +}) + test('No errors v6', t => { t.plan(7) const MockConnection = buildMockConnection({ @@ -571,7 +649,7 @@ test('500 error', t => { } } }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + t.equal(err.message, 'Response Error') client.search({ index: 'foo', @@ -608,7 +686,7 @@ test('TimeoutError', t => { if (req.method === 'GET') { t.error(err) } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + t.equal(err.message, 'Request timed out') } }) @@ -620,7 +698,45 @@ test('TimeoutError', t => { } } }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + t.equal(err.message, 'Request timed out') + }) +}) + +test('ConnectionError', t => { + t.plan(3) + + const requests = [{ + method: 'GET', + path: '/' + }, { + method: 'POST', + path: '/foo/_search' + }] + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnectionError, + maxRetries: 0 + }) + + client.on('request', (err, event) => { + const req = requests.shift() + if (req.method === 'GET') { + t.error(err) + } else { + t.equal(err.message, 'Kaboom') + } + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'Kaboom') }) }) From 728868f3ea7488de0ea23d993e4a79acd14a02ee Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Sat, 24 Jul 2021 17:49:05 +0200 Subject: [PATCH 042/647] Add note about prototype poisoning protection perf (#1503) --- docs/basic-config.asciidoc | 41 ++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index 04e95ca9e..e4e1d3013 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -1,7 +1,7 @@ [[basic-config]] === Basic configuration -This page shows you the possible basic configuration options that the clients +This page shows you the possible basic configuration options that the clients offers. @@ -46,9 +46,9 @@ node: { ---- |`auth` -a|Your authentication data. You can use both basic authentication and +a|Your authentication data. You can use both basic authentication and {ref}/security-api-create-api-key.html[ApiKey]. + -See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] +See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] for more details. + _Default:_ `null` @@ -141,7 +141,7 @@ const client = new Client({ ---- |`agent` -a|`http.AgentOptions, function` - http agent https://nodejs.org/api/http.html#http_new_agent_options[options], +a|`http.AgentOptions, function` - http agent https://nodejs.org/api/http.html#http_new_agent_options[options], or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`. + _Default:_ `null` @@ -196,7 +196,7 @@ function nodeSelector (connections) { ---- |`generateRequestId` -a|`function` - function to generate the request id for every request, it takes +a|`function` - function to generate the request id for every request, it takes two parameters, the request parameters and options. + By default it generates an incremental integer for every request. + _Custom function example:_ @@ -233,17 +233,17 @@ such as the client and platform version. + _Default:_ `true` |`cloud` -a|`object` - Custom configuration for connecting to -https://cloud.elastic.co[Elastic Cloud]. See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] +a|`object` - Custom configuration for connecting to +https://cloud.elastic.co[Elastic Cloud]. See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] for more details. + -_Default:_ `null` + -_Cloud configuration example:_ +_Default:_ `null` + +_Cloud configuration example:_ [source,js] ---- const client = new Client({ cloud: { id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==' - }, + }, auth: { username: 'elastic', password: 'changeme' @@ -256,3 +256,24 @@ const client = new Client({ _Default:_ `false` |=== + +[discrete] +==== Performances considerations + +By default, the client will protection you against prototype poisoning attacks. +Read https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08[this article] to learn more. +If needed you can disable prototype poisoning protection entirely or one of the two checks. +Read the `secure-json-parse` https://github.com/fastify/secure-json-parse[documentation] to learn more. + +While it's good to be safe, you should know that security always comes with a cost. +With big enough payloads, this security check could causea drop in the overall performances, +which might be a problem for your application. +If you know you can trust the data stored in Elasticsearch, you can safely disable this check. + +[source,js] +---- +const client = new Client({ + disablePrototypePoisoningProtection: true +}) +---- + From b0a7a21f727ad283a4912095f8db8f6fd6d6e8b2 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 2 Aug 2021 11:19:28 +0200 Subject: [PATCH 043/647] Support mapbox content type (#1500) --- docs/transport.asciidoc | 7 ++++ lib/Transport.d.ts | 4 +- lib/Transport.js | 14 ++++--- test/unit/transport.test.js | 73 +++++++++++++++++++++++++++++++++++++ 4 files changed, 91 insertions(+), 7 deletions(-) diff --git a/docs/transport.asciidoc b/docs/transport.asciidoc index f4fc722ce..308e7098e 100644 --- a/docs/transport.asciidoc +++ b/docs/transport.asciidoc @@ -32,3 +32,10 @@ class MyTransport extends Transport { } ---- +==== Supported content types + +- `application/json`, in this case the transport will return a plain JavaScript object +- `text/plain`, in this case the transport will return a plain string +- `application/vnd.mapbox-vector-tile`, in this case the transport will return a Buffer +- `application/vnd.elasticsearch+json`, in this case the transport will return a plain JavaScript object + diff --git a/lib/Transport.d.ts b/lib/Transport.d.ts index 912dd96da..2313bf23f 100644 --- a/lib/Transport.d.ts +++ b/lib/Transport.d.ts @@ -155,8 +155,8 @@ export default class Transport { _nextSniff: number; _isSniffing: boolean; constructor(opts: TransportOptions); - request(params: TransportRequestParams, options?: TransportRequestOptions): TransportRequestPromise; - request(params: TransportRequestParams, options?: TransportRequestOptions, callback?: (err: ApiError, result: ApiResponse) => void): TransportRequestCallback; + request, TContext = Context>(params: TransportRequestParams, options?: TransportRequestOptions): TransportRequestPromise>; + request, TContext = Context>(params: TransportRequestParams, options?: TransportRequestOptions, callback?: (err: ApiError, result: ApiResponse) => void): TransportRequestCallback; getConnection(opts: TransportGetConnectionOptions): Connection | null; sniff(opts?: TransportSniffOptions, callback?: (...args: any[]) => void): void; } diff --git a/lib/Transport.js b/lib/Transport.js index cd2d7584c..3e6d9fd3d 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -237,6 +237,7 @@ class Transport { const contentEncoding = (result.headers['content-encoding'] || '').toLowerCase() const isCompressed = contentEncoding.indexOf('gzip') > -1 || contentEncoding.indexOf('deflate') > -1 + const isVectorTile = (result.headers['content-type'] || '').indexOf('application/vnd.mapbox-vector-tile') > -1 /* istanbul ignore else */ if (result.headers['content-length'] !== undefined) { @@ -255,8 +256,9 @@ class Transport { } // if the response is compressed, we must handle it // as buffer for allowing decompression later - let payload = isCompressed ? [] : '' - const onData = isCompressed + // while if it's a vector tile, we should return it as buffer + let payload = isCompressed || isVectorTile ? [] : '' + const onData = isCompressed || isVectorTile ? chunk => { payload.push(chunk) } : chunk => { payload += chunk } const onEnd = err => { @@ -272,7 +274,7 @@ class Transport { if (isCompressed) { unzip(Buffer.concat(payload), onBody) } else { - onBody(null, payload) + onBody(null, isVectorTile ? Buffer.concat(payload) : payload) } } @@ -281,7 +283,7 @@ class Transport { onEnd(new Error('Response aborted while reading the body')) } - if (!isCompressed) { + if (!isCompressed && !isVectorTile) { response.setEncoding('utf8') } @@ -297,7 +299,9 @@ class Transport { this.emit('response', err, result) return callback(err, result) } - if (Buffer.isBuffer(payload)) { + + const isVectorTile = (result.headers['content-type'] || '').indexOf('application/vnd.mapbox-vector-tile') > -1 + if (Buffer.isBuffer(payload) && !isVectorTile) { payload = payload.toString() } const isHead = params.method === 'HEAD' diff --git a/test/unit/transport.test.js b/test/unit/transport.test.js index c617bc3af..e1a6aaa9f 100644 --- a/test/unit/transport.test.js +++ b/test/unit/transport.test.js @@ -2689,3 +2689,76 @@ test('The callback with a sync error should be called in the next tick - ndjson' t.type(transportReturn.catch, 'function') t.type(transportReturn.abort, 'function') }) + +test('Support mapbox vector tile', t => { + t.plan(2) + const mvtContent = 'GoMCCgRtZXRhEikSFAAAAQACAQMBBAAFAgYDBwAIBAkAGAMiDwkAgEAagEAAAP8//z8ADxoOX3NoYXJkcy5mYWlsZWQaD19zaGFyZHMuc2tpcHBlZBoSX3NoYXJkcy5zdWNjZXNzZnVsGg1fc2hhcmRzLnRvdGFsGhlhZ2dyZWdhdGlvbnMuX2NvdW50LmNvdW50GhdhZ2dyZWdhdGlvbnMuX2NvdW50LnN1bRoTaGl0cy50b3RhbC5yZWxhdGlvbhoQaGl0cy50b3RhbC52YWx1ZRoJdGltZWRfb3V0GgR0b29rIgIwACICMAIiCRkAAAAAAAAAACIECgJlcSICOAAogCB4Ag==' + + function handler (req, res) { + res.setHeader('Content-Type', 'application/vnd.mapbox-vector-tile') + res.end(Buffer.from(mvtContent, 'base64')) + } + + buildServer(handler, ({ port }, server) => { + const pool = new ConnectionPool({ Connection }) + pool.addConnection(`http://localhost:${port}`) + + const transport = new Transport({ + emit: () => {}, + connectionPool: pool, + serializer: new Serializer(), + maxRetries: 3, + requestTimeout: 30000, + sniffInterval: false, + sniffOnStart: false + }) + skipProductCheck(transport) + + transport.request({ + method: 'GET', + path: '/hello' + }, (err, { body }) => { + t.error(err) + t.same(body.toString('base64'), Buffer.from(mvtContent, 'base64').toString('base64')) + server.stop() + }) + }) +}) + +test('Compressed mapbox vector tile', t => { + t.plan(2) + const mvtContent = 'GoMCCgRtZXRhEikSFAAAAQACAQMBBAAFAgYDBwAIBAkAGAMiDwkAgEAagEAAAP8//z8ADxoOX3NoYXJkcy5mYWlsZWQaD19zaGFyZHMuc2tpcHBlZBoSX3NoYXJkcy5zdWNjZXNzZnVsGg1fc2hhcmRzLnRvdGFsGhlhZ2dyZWdhdGlvbnMuX2NvdW50LmNvdW50GhdhZ2dyZWdhdGlvbnMuX2NvdW50LnN1bRoTaGl0cy50b3RhbC5yZWxhdGlvbhoQaGl0cy50b3RhbC52YWx1ZRoJdGltZWRfb3V0GgR0b29rIgIwACICMAIiCRkAAAAAAAAAACIECgJlcSICOAAogCB4Ag==' + + function handler (req, res) { + const body = gzipSync(Buffer.from(mvtContent, 'base64')) + res.setHeader('Content-Type', 'application/vnd.mapbox-vector-tile') + res.setHeader('Content-Encoding', 'gzip') + res.setHeader('Content-Length', Buffer.byteLength(body)) + res.end(body) + } + + buildServer(handler, ({ port }, server) => { + const pool = new ConnectionPool({ Connection }) + pool.addConnection(`http://localhost:${port}`) + + const transport = new Transport({ + emit: () => {}, + connectionPool: pool, + serializer: new Serializer(), + maxRetries: 3, + requestTimeout: 30000, + sniffInterval: false, + sniffOnStart: false + }) + skipProductCheck(transport) + + transport.request({ + method: 'GET', + path: '/hello' + }, (err, { body }) => { + t.error(err) + t.same(body.toString('base64'), Buffer.from(mvtContent, 'base64').toString('base64')) + server.stop() + }) + }) +}) From 2d1505eb2b7ad05387ca8ba8a6874f580a126ecd Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 2 Aug 2021 11:20:31 +0200 Subject: [PATCH 044/647] Support CA fingerprint validation (#1499) Co-authored-by: Aleh Zasypkin Co-authored-by: Ioannis Kakavas --- docs/basic-config.asciidoc | 5 +- docs/connecting.asciidoc | 23 ++++++ index.d.ts | 1 + index.js | 14 ++++ lib/Connection.d.ts | 1 + lib/Connection.js | 50 +++++++++++- lib/pool/BaseConnectionPool.js | 3 + lib/pool/index.d.ts | 1 + test/acceptance/sniff.test.js | 1 + test/unit/client.test.js | 86 ++++++++++++++++++++ test/unit/connection.test.js | 139 ++++++++++++++++++++++++++++++++- test/utils/buildServer.js | 17 +++- 12 files changed, 337 insertions(+), 4 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index e4e1d3013..c800b38c0 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -255,6 +255,10 @@ const client = new Client({ |`boolean`, `'proto'`, `'constructor'` - By the default the client will protect you against prototype poisoning attacks. Read https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08[this article] to learn more. If needed you can disable prototype poisoning protection entirely or one of the two checks. Read the `secure-json-parse` https://github.com/fastify/secure-json-parse[documentation] to learn more. + _Default:_ `false` +|`caFingerprint` +|`string` - If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. + +_Default:_ `null` + |=== [discrete] @@ -276,4 +280,3 @@ const client = new Client({ disablePrototypePoisoningProtection: true }) ---- - diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 529046e36..819e3a64b 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -176,6 +176,29 @@ const client = new Client({ }) ---- +[discrete] +[[auth-ca-fingerprint]] +==== CA fingerprint + +You can configure the client to only trust certificates that are signed by a specific CA certificate ( CA certificate pinning ) by providing a `caFingerprint` option. This will verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied value. +a `caFingerprint` option, which will verify the supplied certificate authority fingerprint. +You must configure a SHA256 digest. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://example.com/' + auth: { ... }, + // the fingerprint (SHA256) of the CA certificate that is used to sign the certificate that the Elasticsearch node presents for TLS. + caFingerprint: '20:0D:CA:FA:76:...', + ssl: { + // might be required if it's a self-signed certificate + rejectUnauthorized: false + } +}) +---- + [discrete] [[client-usage]] diff --git a/index.d.ts b/index.d.ts index e7d00efeb..0dae77481 100644 --- a/index.d.ts +++ b/index.d.ts @@ -118,6 +118,7 @@ interface ClientOptions { password?: string; }; disablePrototypePoisoningProtection?: boolean | 'proto' | 'constructor'; + caFingerprint?: string; } declare class Client { diff --git a/index.js b/index.js index 60c44014a..11b60a56d 100644 --- a/index.js +++ b/index.js @@ -102,6 +102,7 @@ class Client extends ESAPI { suggestCompression: false, compression: false, ssl: null, + caFingerprint: null, agent: null, headers: {}, nodeFilter: null, @@ -116,6 +117,10 @@ class Client extends ESAPI { disablePrototypePoisoningProtection: false }, opts) + if (options.caFingerprint !== null && isHttpConnection(opts.node || opts.nodes)) { + throw new ConfigurationError('You can\'t configure the caFingerprint with a http connection') + } + if (process.env.ELASTIC_CLIENT_APIVERSIONING === 'true') { options.headers = Object.assign({ accept: 'application/vnd.elasticsearch+json; compatible-with=7' }, options.headers) } @@ -146,6 +151,7 @@ class Client extends ESAPI { Connection: options.Connection, auth: options.auth, emit: this[kEventEmitter].emit.bind(this[kEventEmitter]), + caFingerprint: options.caFingerprint, sniffEnabled: options.sniffInterval !== false || options.sniffOnStart !== false || options.sniffOnConnectionFault !== false @@ -315,6 +321,14 @@ function getAuth (node) { } } +function isHttpConnection (node) { + if (Array.isArray(node)) { + return node.some((n) => new URL(n).protocol === 'http:') + } else { + return new URL(node).protocol === 'http:' + } +} + const events = { RESPONSE: 'response', REQUEST: 'request', diff --git a/lib/Connection.d.ts b/lib/Connection.d.ts index 933a6a8eb..6b5c6cb7d 100644 --- a/lib/Connection.d.ts +++ b/lib/Connection.d.ts @@ -40,6 +40,7 @@ export interface ConnectionOptions { roles?: ConnectionRoles; auth?: BasicAuth | ApiKeyAuth; proxy?: string | URL; + caFingerprint?: string; } interface ConnectionRoles { diff --git a/lib/Connection.js b/lib/Connection.js index 6eda7c539..88a154ae6 100644 --- a/lib/Connection.js +++ b/lib/Connection.js @@ -42,6 +42,7 @@ class Connection { this.headers = prepareHeaders(opts.headers, opts.auth) this.deadCount = 0 this.resurrectTimeout = 0 + this.caFingerprint = opts.caFingerprint this._openRequests = 0 this._status = opts.status || Connection.statuses.ALIVE @@ -123,10 +124,36 @@ class Connection { callback(new RequestAbortedError(), null) } + const onSocket = socket => { + /* istanbul ignore else */ + if (!socket.isSessionReused()) { + socket.once('secureConnect', () => { + const issuerCertificate = getIssuerCertificate(socket) + /* istanbul ignore next */ + if (issuerCertificate == null) { + onError(new Error('Invalid or malformed certificate')) + request.once('error', () => {}) // we need to catch the request aborted error + return request.abort() + } + + // Check if fingerprint matches + /* istanbul ignore else */ + if (this.caFingerprint !== issuerCertificate.fingerprint256) { + onError(new Error('Server certificate CA fingerprint does not match the value configured in caFingerprint')) + request.once('error', () => {}) // we need to catch the request aborted error + return request.abort() + } + }) + } + } + request.on('response', onResponse) request.on('timeout', onTimeout) request.on('error', onError) request.on('abort', onAbort) + if (this.caFingerprint != null) { + request.on('socket', onSocket) + } // Disables the Nagle algorithm request.setNoDelay(true) @@ -152,6 +179,7 @@ class Connection { request.removeListener('timeout', onTimeout) request.removeListener('error', onError) request.removeListener('abort', onAbort) + request.removeListener('socket', onSocket) cleanedListeners = true } } @@ -340,5 +368,25 @@ function prepareHeaders (headers = {}, auth) { return headers } +function getIssuerCertificate (socket) { + let certificate = socket.getPeerCertificate(true) + while (certificate && Object.keys(certificate).length > 0) { + // invalid certificate + if (certificate.issuerCertificate == null) { + return null + } + + // We have reached the root certificate. + // In case of self-signed certificates, `issuerCertificate` may be a circular reference. + if (certificate.fingerprint256 === certificate.issuerCertificate.fingerprint256) { + break + } + + // continue the loop + certificate = certificate.issuerCertificate + } + return certificate +} + module.exports = Connection -module.exports.internals = { prepareHeaders } +module.exports.internals = { prepareHeaders, getIssuerCertificate } diff --git a/lib/pool/BaseConnectionPool.js b/lib/pool/BaseConnectionPool.js index 2b3081153..80e80a318 100644 --- a/lib/pool/BaseConnectionPool.js +++ b/lib/pool/BaseConnectionPool.js @@ -36,6 +36,7 @@ class BaseConnectionPool { this._ssl = opts.ssl this._agent = opts.agent this._proxy = opts.proxy || null + this._caFingerprint = opts.caFingerprint || null } getConnection () { @@ -72,6 +73,8 @@ class BaseConnectionPool { if (opts.agent == null) opts.agent = this._agent /* istanbul ignore else */ if (opts.proxy == null) opts.proxy = this._proxy + /* istanbul ignore else */ + if (opts.caFingerprint == null) opts.caFingerprint = this._caFingerprint const connection = new this.Connection(opts) diff --git a/lib/pool/index.d.ts b/lib/pool/index.d.ts index c1ebbdad6..7b3f62f94 100644 --- a/lib/pool/index.d.ts +++ b/lib/pool/index.d.ts @@ -31,6 +31,7 @@ interface BaseConnectionPoolOptions { auth?: BasicAuth | ApiKeyAuth; emit: (event: string | symbol, ...args: any[]) => boolean; Connection: typeof Connection; + caFingerprint?: string; } interface ConnectionPoolOptions extends BaseConnectionPoolOptions { diff --git a/test/acceptance/sniff.test.js b/test/acceptance/sniff.test.js index 5dfaa3f76..d18e8a2a9 100644 --- a/test/acceptance/sniff.test.js +++ b/test/acceptance/sniff.test.js @@ -77,6 +77,7 @@ test('Should update the connection pool', t => { t.same(hosts[i], { url: new URL(nodes[id].url), id: id, + caFingerprint: null, roles: { master: true, data: true, diff --git a/test/unit/client.test.js b/test/unit/client.test.js index d9a26c110..fbc45dc82 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -26,6 +26,7 @@ const intoStream = require('into-stream') const { ConnectionPool, Transport, Connection, errors } = require('../../index') const { CloudConnectionPool } = require('../../lib/pool') const { Client, buildServer } = require('../utils') + let clientVersion = require('../../package.json').version if (clientVersion.includes('-')) { clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' @@ -1498,3 +1499,88 @@ test('Bearer auth', t => { }) }) }) + +test('Check server fingerprint (success)', t => { + t.plan(1) + + function handler (req, res) { + res.end('ok') + } + + buildServer(handler, { secure: true }, ({ port, caFingerprint }, server) => { + const client = new Client({ + node: `https://localhost:${port}`, + caFingerprint + }) + + client.info((err, res) => { + t.error(err) + server.stop() + }) + }) +}) + +test('Check server fingerprint (failure)', t => { + t.plan(2) + + function handler (req, res) { + res.end('ok') + } + + buildServer(handler, { secure: true }, ({ port }, server) => { + const client = new Client({ + node: `https://localhost:${port}`, + caFingerprint: 'FO:OB:AR' + }) + + client.info((err, res) => { + t.ok(err instanceof errors.ConnectionError) + t.equal(err.message, 'Server certificate CA fingerprint does not match the value configured in caFingerprint') + server.stop() + }) + }) +}) + +test('caFingerprint can\'t be configured over http / 1', t => { + t.plan(2) + + try { + new Client({ // eslint-disable-line + node: 'http://localhost:9200/', + caFingerprint: 'FO:OB:AR' + }) + t.fail('shuld throw') + } catch (err) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') + } +}) + +test('caFingerprint can\'t be configured over http / 2', t => { + t.plan(2) + + try { + new Client({ // eslint-disable-line + nodes: ['http://localhost:9200/'], + caFingerprint: 'FO:OB:AR' + }) + t.fail('should throw') + } catch (err) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') + } +}) + +test('caFingerprint can\'t be configured over http / 3', t => { + t.plan(1) + + try { + new Client({ // eslint-disable-line + nodes: ['https://localhost:9200/'], + caFingerprint: 'FO:OB:AR' + }) + t.pass('should not throw') + } catch (err) { + t.fail('shuld not throw') + } +}) diff --git a/test/unit/connection.test.js b/test/unit/connection.test.js index a951a9e6b..5ba1c494a 100644 --- a/test/unit/connection.test.js +++ b/test/unit/connection.test.js @@ -28,7 +28,8 @@ const hpagent = require('hpagent') const intoStream = require('into-stream') const { buildServer } = require('../utils') const Connection = require('../../lib/Connection') -const { TimeoutError, ConfigurationError, RequestAbortedError } = require('../../lib/errors') +const { TimeoutError, ConfigurationError, RequestAbortedError, ConnectionError } = require('../../lib/errors') +const { getIssuerCertificate } = Connection.internals test('Basic (http)', t => { t.plan(4) @@ -947,3 +948,139 @@ test('Abort with a slow body', t => { setImmediate(() => request.abort()) }) + +test('Check server fingerprint (success)', t => { + t.plan(2) + + function handler (req, res) { + res.end('ok') + } + + buildServer(handler, { secure: true }, ({ port, caFingerprint }, server) => { + const connection = new Connection({ + url: new URL(`https://localhost:${port}`), + caFingerprint + }) + + connection.request({ + path: '/hello', + method: 'GET' + }, (err, res) => { + t.error(err) + + let payload = '' + res.setEncoding('utf8') + res.on('data', chunk => { payload += chunk }) + res.on('error', err => t.fail(err)) + res.on('end', () => { + t.equal(payload, 'ok') + server.stop() + }) + }) + }) +}) + +test('Check server fingerprint (failure)', t => { + t.plan(2) + + function handler (req, res) { + res.end('ok') + } + + buildServer(handler, { secure: true }, ({ port }, server) => { + const connection = new Connection({ + url: new URL(`https://localhost:${port}`), + caFingerprint: 'FO:OB:AR' + }) + + connection.request({ + path: '/hello', + method: 'GET' + }, (err, res) => { + t.ok(err instanceof ConnectionError) + t.equal(err.message, 'Server certificate CA fingerprint does not match the value configured in caFingerprint') + server.stop() + }) + }) +}) + +test('getIssuerCertificate returns the root CA', t => { + t.plan(2) + const issuerCertificate = { + fingerprint256: 'BA:ZF:AZ', + subject: { + C: '1', + ST: '1', + L: '1', + O: '1', + OU: '1', + CN: '1' + }, + issuer: { + C: '1', + ST: '1', + L: '1', + O: '1', + OU: '1', + CN: '1' + } + } + issuerCertificate.issuerCertificate = issuerCertificate + + const socket = { + getPeerCertificate (bool) { + t.ok(bool) + return { + fingerprint256: 'FO:OB:AR', + subject: { + C: '1', + ST: '1', + L: '1', + O: '1', + OU: '1', + CN: '1' + }, + issuer: { + C: '2', + ST: '2', + L: '2', + O: '2', + OU: '2', + CN: '2' + }, + issuerCertificate + } + } + } + t.same(getIssuerCertificate(socket), issuerCertificate) +}) + +test('getIssuerCertificate detects invalid/malformed certificates', t => { + t.plan(2) + const socket = { + getPeerCertificate (bool) { + t.ok(bool) + return { + fingerprint256: 'FO:OB:AR', + subject: { + C: '1', + ST: '1', + L: '1', + O: '1', + OU: '1', + CN: '1' + }, + issuer: { + C: '2', + ST: '2', + L: '2', + O: '2', + OU: '2', + CN: '2' + } + // missing issuerCertificate + } + } + } + t.equal(getIssuerCertificate(socket), null) +}) diff --git a/test/utils/buildServer.js b/test/utils/buildServer.js index b47b2fec2..ef907c05f 100644 --- a/test/utils/buildServer.js +++ b/test/utils/buildServer.js @@ -19,6 +19,7 @@ 'use strict' +const crypto = require('crypto') const debug = require('debug')('elasticsearch-test') const stoppable = require('stoppable') @@ -35,6 +36,13 @@ const secureOpts = { cert: readFileSync(join(__dirname, '..', 'fixtures', 'https.cert'), 'utf8') } +const caFingerprint = getFingerprint(secureOpts.cert + .split('\n') + .slice(1, -1) + .map(line => line.trim()) + .join('') +) + let id = 0 function buildServer (handler, opts, cb) { const serverId = id++ @@ -58,7 +66,7 @@ function buildServer (handler, opts, cb) { server.listen(0, () => { const port = server.address().port debug(`Server '${serverId}' booted on port ${port}`) - resolve([Object.assign({}, secureOpts, { port }), server]) + resolve([Object.assign({}, secureOpts, { port, caFingerprint }), server]) }) }) } else { @@ -70,4 +78,11 @@ function buildServer (handler, opts, cb) { } } +function getFingerprint (content, inputEncoding = 'base64', outputEncoding = 'hex') { + const shasum = crypto.createHash('sha256') + shasum.update(content, inputEncoding) + const res = shasum.digest(outputEncoding) + return res.toUpperCase().match(/.{1,2}/g).join(':') +} + module.exports = buildServer From 516f52e303dd2e475d883fad850ba0fb07adadf5 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 11 Aug 2021 09:40:33 +0200 Subject: [PATCH 045/647] Show the body as string if the response error can't be read as ES error (#1509) * Show the body as string if the response error can't be read as ES error * Updated test --- lib/errors.js | 4 +++- test/acceptance/product-check.test.js | 2 +- test/unit/client.test.js | 31 ++++++++++++++++++++++++++- test/unit/errors.test.js | 26 ++++++++++++++++++++++ 4 files changed, 60 insertions(+), 3 deletions(-) diff --git a/lib/errors.js b/lib/errors.js index 2ec9bc715..bc691c86b 100644 --- a/lib/errors.js +++ b/lib/errors.js @@ -97,8 +97,10 @@ class ResponseError extends ElasticsearchClientError { } else { this.message = meta.body.error.type } + } else if (typeof meta.body === 'object' && meta.body != null) { + this.message = JSON.stringify(meta.body) } else { - this.message = 'Response Error' + this.message = meta.body || 'Response Error' } this.meta = meta } diff --git a/test/acceptance/product-check.test.js b/test/acceptance/product-check.test.js index e32908aef..30c50eaa2 100644 --- a/test/acceptance/product-check.test.js +++ b/test/acceptance/product-check.test.js @@ -649,7 +649,7 @@ test('500 error', t => { } } }, (err, result) => { - t.equal(err.message, 'Response Error') + t.equal(err.message, '{"error":"kaboom"}') client.search({ index: 'foo', diff --git a/test/unit/client.test.js b/test/unit/client.test.js index fbc45dc82..cd6484ffe 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -25,7 +25,8 @@ const buffer = require('buffer') const intoStream = require('into-stream') const { ConnectionPool, Transport, Connection, errors } = require('../../index') const { CloudConnectionPool } = require('../../lib/pool') -const { Client, buildServer } = require('../utils') +const { Client, buildServer, connection } = require('../utils') +const { buildMockConnection } = connection let clientVersion = require('../../package.json').version if (clientVersion.includes('-')) { @@ -1584,3 +1585,31 @@ test('caFingerprint can\'t be configured over http / 3', t => { t.fail('shuld not throw') } }) + +test('Error body that is not a json', t => { + t.plan(5) + + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 400, + body: 'error!', + headers: { 'content-type': 'text/html' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxRetries: 1 + }) + + client.info((err, result) => { + t.ok(err instanceof errors.ResponseError) + t.equal(err.name, 'ResponseError') + t.equal(err.body, 'error!') + t.equal(err.message, 'error!') + t.equal(err.statusCode, 400) + }) +}) diff --git a/test/unit/errors.test.js b/test/unit/errors.test.js index 301ca1108..b8db815d6 100644 --- a/test/unit/errors.test.js +++ b/test/unit/errors.test.js @@ -197,3 +197,29 @@ test('ResponseError with meaningful message / 3', t => { t.equal(err.toString(), JSON.stringify(meta.body)) t.end() }) + +test('ResponseError with meaningful message when body is not json', t => { + const meta = { + statusCode: 400, + body: 'error!', + headers: { 'content-type': 'text/html' } + } + const err = new errors.ResponseError(meta) + t.equal(err.name, 'ResponseError') + t.equal(err.message, 'error!') + t.equal(err.toString(), JSON.stringify(meta.body)) + t.end() +}) + +test('ResponseError with meaningful message when body is falsy', t => { + const meta = { + statusCode: 400, + body: '', + headers: { 'content-type': 'text/plain' } + } + const err = new errors.ResponseError(meta) + t.equal(err.name, 'ResponseError') + t.equal(err.message, 'Response Error') + t.equal(err.toString(), JSON.stringify(meta.body)) + t.end() +}) From 7c59eda3db406f339f5650d4fb3d552f4e857ec7 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 11 Aug 2021 12:00:42 +0200 Subject: [PATCH 046/647] API generation --- api/api/ml.js | 4 ++++ api/requestParams.d.ts | 3 ++- docs/reference.asciidoc | 32 +++++++++++++++++++------------- index.d.ts | 16 ++++++++-------- 4 files changed, 33 insertions(+), 22 deletions(-) diff --git a/api/api/ml.js b/api/api/ml.js index 545403d3b..bcc953e9d 100644 --- a/api/api/ml.js +++ b/api/api/ml.js @@ -1056,6 +1056,10 @@ MlApi.prototype.inferTrainedModelDeployment = function mlInferTrainedModelDeploy const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') return handleError(err, callback) } + if (params.body == null) { + const err = new this[kConfigurationError]('Missing required parameter: body') + return handleError(err, callback) + } let { method, body, modelId, model_id, ...querystring } = params querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index 73f98a8cf..151072bc9 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -1737,9 +1737,10 @@ export interface MlGetTrainedModelsStats extends Generic { size?: number; } -export interface MlInferTrainedModelDeployment extends Generic { +export interface MlInferTrainedModelDeployment extends Generic { model_id: string; timeout?: string; + body: T; } export interface MlInfo extends Generic { diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 888f72f82..276398f41 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -1949,7 +1949,7 @@ link:{ref}/cluster-allocation-explain.html[Documentation] + |`boolean` - Return information about disk usage and shard sizes (default: false) |`body` -|`object` - The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard' +|`object` - The index, shard, and primary flag to explain. Empty means 'explain a randomly-chosen unassigned shard' |=== @@ -7144,17 +7144,22 @@ _Default:_ `100` ---- client.ml.inferTrainedModelDeployment({ model_id: string, - timeout: string + timeout: string, + body: object }) ---- -link:{ref}/ml-df-analytics-apis.html[Documentation] + +link:{ref}/infer-trained-model-deployment.html[Documentation] + [cols=2*] |=== |`model_id` or `modelId` -|`string` - The ID of the model to perform inference on +|`string` - The unique identifier of the trained model. |`timeout` -|`string` - Controls the time to wait for the inference result +|`string` - Controls the amount of time to wait for inference results. + +_Default:_ `10s` + +|`body` +|`object` - The input text to be evaluated. |=== @@ -7621,14 +7626,15 @@ client.ml.startTrainedModelDeployment({ timeout: string }) ---- -link:{ref}/ml-df-analytics-apis.html[Documentation] + +link:{ref}/start-trained-model-deployment.html[Documentation] + [cols=2*] |=== |`model_id` or `modelId` -|`string` - The ID of the model to deploy +|`string` - The unique identifier of the trained model. |`timeout` -|`string` - Controls the time to wait until the model is deployed +|`string` - Controls the amount of time to wait for the model to deploy. + +_Default:_ `20s` |=== @@ -7713,11 +7719,11 @@ client.ml.stopTrainedModelDeployment({ model_id: string }) ---- -link:{ref}/ml-df-analytics-apis.html[Documentation] + +link:{ref}/stop-trained-model-deployment.html[Documentation] + [cols=2*] |=== |`model_id` or `modelId` -|`string` - The ID of the model to undeploy +|`string` - The unique identifier of the trained model. |=== @@ -10603,7 +10609,7 @@ client.sql.clearCursor({ body: object }) ---- -link:{ref}/sql-pagination.html[Documentation] + +link:{ref}/clear-sql-cursor-api.html[Documentation] + [cols=2*] |=== |`body` @@ -10690,7 +10696,7 @@ client.sql.query({ body: object }) ---- -link:{ref}/sql-rest-overview.html[Documentation] + +link:{ref}/sql-search-api.html[Documentation] + {jsclient}/sql_query_examples.html[Code Example] + [cols=2*] |=== @@ -10711,7 +10717,7 @@ client.sql.translate({ body: object }) ---- -link:{ref}/sql-translate.html[Documentation] + +link:{ref}/sql-translate-api.html[Documentation] + [cols=2*] |=== |`body` diff --git a/index.d.ts b/index.d.ts index 0dae77481..5de87f4b9 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1650,14 +1650,14 @@ declare class Client { getTrainedModelsStats, TContext = Context>(callback: callbackFn): TransportRequestCallback getTrainedModelsStats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, callback: callbackFn): TransportRequestCallback getTrainedModelsStats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - infer_trained_model_deployment, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - infer_trained_model_deployment, TContext = Context>(callback: callbackFn): TransportRequestCallback - infer_trained_model_deployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - infer_trained_model_deployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - inferTrainedModelDeployment, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - inferTrainedModelDeployment, TContext = Context>(callback: callbackFn): TransportRequestCallback - inferTrainedModelDeployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - inferTrainedModelDeployment, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> + inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback + inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback info, TContext = Context>(params?: RequestParams.MlInfo, options?: TransportRequestOptions): TransportRequestPromise> info, TContext = Context>(callback: callbackFn): TransportRequestCallback info, TContext = Context>(params: RequestParams.MlInfo, callback: callbackFn): TransportRequestCallback From c100edcca237f04d0024c57edd7bcb23425f971c Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 11 Aug 2021 12:01:03 +0200 Subject: [PATCH 047/647] Updated types --- api/kibana.d.ts | 6 +- api/new.d.ts | 20 +- api/types.d.ts | 627 ++++++++++++++++++++++++++++++------------------ 3 files changed, 412 insertions(+), 241 deletions(-) diff --git a/api/kibana.d.ts b/api/kibana.d.ts index ebae7c894..7b4c6ea49 100644 --- a/api/kibana.d.ts +++ b/api/kibana.d.ts @@ -345,7 +345,7 @@ interface KibanaClient { info(params?: T.MlInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> openJob(params: T.MlOpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> postCalendarEvents(params?: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + postData(params: T.MlPostDataRequest, options?: TransportRequestOptions): TransportRequestPromise> previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> previewDatafeed(params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -356,7 +356,7 @@ interface KibanaClient { putJob(params: T.MlPutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModel(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - resetJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + resetJob(params: T.MlResetJobRequest, options?: TransportRequestOptions): TransportRequestPromise> revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> setUpgradeMode(params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -366,7 +366,7 @@ interface KibanaClient { updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> updateDatafeed(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + updateJob(params: T.MlUpdateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> validate(params?: T.MlValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> diff --git a/api/new.d.ts b/api/new.d.ts index 5a9e4923a..a7d882b02 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -950,9 +950,9 @@ declare class Client { postCalendarEvents(callback: callbackFn): TransportRequestCallback postCalendarEvents(params: T.MlPostCalendarEventsRequest, callback: callbackFn): TransportRequestCallback postCalendarEvents(params: T.MlPostCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.MlPostJobDataRequest, callback: callbackFn): TransportRequestCallback - postData(params: T.MlPostJobDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postData(params: T.MlPostDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + postData(params: T.MlPostDataRequest, callback: callbackFn): TransportRequestCallback + postData(params: T.MlPostDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> previewDataFrameAnalytics(callback: callbackFn): TransportRequestCallback previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback @@ -986,10 +986,9 @@ declare class Client { putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, callback: callbackFn): TransportRequestCallback putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resetJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - resetJob(callback: callbackFn): TransportRequestCallback - resetJob(params: TODO, callback: callbackFn): TransportRequestCallback - resetJob(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + resetJob(params: T.MlResetJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + resetJob(params: T.MlResetJobRequest, callback: callbackFn): TransportRequestCallback + resetJob(params: T.MlResetJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, callback: callbackFn): TransportRequestCallback revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback @@ -1019,10 +1018,9 @@ declare class Client { updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> updateFilter(params: T.MlUpdateFilterRequest, callback: callbackFn): TransportRequestCallback updateFilter(params: T.MlUpdateFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(callback: callbackFn): TransportRequestCallback - updateJob(params: TODO, callback: callbackFn): TransportRequestCallback - updateJob(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateJob(params: T.MlUpdateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateJob(params: T.MlUpdateJobRequest, callback: callbackFn): TransportRequestCallback + updateJob(params: T.MlUpdateJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, callback: callbackFn): TransportRequestCallback updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback diff --git a/api/types.d.ts b/api/types.d.ts index b32273e32..5678de767 100644 --- a/api/types.d.ts +++ b/api/types.d.ts @@ -1014,7 +1014,7 @@ export interface SearchRequest extends RequestBase { query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] script_fields?: Record - search_after?: (integer | string)[] + search_after?: SearchSortResults size?: integer slice?: SlicedScroll sort?: SearchSort @@ -1129,6 +1129,12 @@ export interface SearchDocValueField { format?: string } +export interface SearchFieldAndFormat { + field: Field + format?: string + include_unmapped?: boolean +} + export interface SearchFieldCollapse { field: Field inner_hits?: SearchInnerHits | SearchInnerHits[] @@ -1242,15 +1248,17 @@ export interface SearchInnerHits { size?: integer from?: integer collapse?: SearchFieldCollapse - docvalue_fields?: Fields + docvalue_fields?: (SearchFieldAndFormat | Field)[] explain?: boolean highlight?: SearchHighlight ignore_unmapped?: boolean - script_fields?: Record + script_fields?: Record seq_no_primary_term?: boolean fields?: Fields sort?: SearchSort _source?: boolean | SearchSourceFilter + stored_field?: Fields + track_scores?: boolean version?: boolean } @@ -1820,6 +1828,8 @@ export interface DateField { include_unmapped?: boolean } +export type DateFormat = string + export type DateMath = string export type DateMathTime = string @@ -1934,6 +1944,8 @@ export type GeoDistanceType = 'arc' | 'plane' export type GeoHashPrecision = number +export type GeoShape = any + export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' export type GeoTilePrecision = number @@ -2024,7 +2036,6 @@ export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' export interface MainError extends ErrorCause { headers?: Record - root_cause: ErrorCause[] } export interface MergesStats { @@ -2171,7 +2182,7 @@ export interface Retries { search: long } -export type Routing = string | number +export type Routing = string export type Script = InlineScript | IndexedScript | string @@ -2182,6 +2193,7 @@ export interface ScriptBase { export interface ScriptField { script: Script + ignore_failure?: boolean } export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' @@ -2304,6 +2316,8 @@ export type Time = string | integer export type TimeSpan = string +export type TimeZone = string + export type Timestamp = string export interface Transform { @@ -2366,7 +2380,6 @@ export interface WriteResponseBase { _type?: Type _version: VersionNumber forced_refresh?: boolean - error?: ErrorCause } export type double = number @@ -3533,7 +3546,7 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { stopwords_path?: string } -export type AnalysisStopWords = string | string[] +export type AnalysisStopWords = string[] export type AnalysisSynonymFormat = 'solr' | 'wordnet' @@ -4031,36 +4044,49 @@ export interface QueryDslBoolQuery extends QueryDslQueryBase { } export interface QueryDslBoostingQuery extends QueryDslQueryBase { - negative_boost?: double - negative?: QueryDslQueryContainer - positive?: QueryDslQueryContainer + negative_boost: double + negative: QueryDslQueryContainer + positive: QueryDslQueryContainer } export interface QueryDslBoundingBox { bottom_right?: QueryDslGeoLocation top_left?: QueryDslGeoLocation + top_right?: QueryDslGeoLocation + bottom_left?: QueryDslGeoLocation + top?: double + left?: double + right?: double + bottom?: double wkt?: string } export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' -export interface QueryDslCombinedFieldsQuery { - query: string +export type QueryDslCombinedFieldsOperator = 'or' | 'and' + +export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { fields: Field[] - operator?: string + query: string + auto_generate_synonyms_phrase_query?: boolean + operator?: QueryDslCombinedFieldsOperator + mimimum_should_match?: MinimumShouldMatch + zero_terms_query?: QueryDslCombinedFieldsZeroTerms } +export type QueryDslCombinedFieldsZeroTerms = 'none' | 'all' + export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { analyzer?: string cutoff_frequency?: double high_freq_operator?: QueryDslOperator low_freq_operator?: QueryDslOperator minimum_should_match?: MinimumShouldMatch - query?: string + query: string } export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { - filter?: QueryDslQueryContainer + filter: QueryDslQueryContainer } export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { @@ -4068,6 +4094,18 @@ export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys | { [property: string]: QueryDslDecayPlacement } +export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { + gt?: DateMath + gte?: DateMath + lt?: DateMath + lte?: DateMath + format?: DateFormat + time_zone?: TimeZone +} + export type QueryDslDecayFunction = QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction export interface QueryDslDecayFunctionBase extends QueryDslScoreFunctionBase { @@ -4082,22 +4120,24 @@ export interface QueryDslDecayPlacement { } export interface QueryDslDisMaxQuery extends QueryDslQueryBase { - queries?: QueryDslQueryContainer[] + queries: QueryDslQueryContainer[] tie_breaker?: double } -export interface QueryDslDistanceFeatureQuery extends QueryDslQueryBase { - origin?: number[] | QueryDslGeoCoordinate | DateMath - pivot?: Distance | Time - field?: Field +export type QueryDslDistanceFeatureQuery = QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery + +export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { + origin: TOrigin + pivot: TDistance + field: Field } export interface QueryDslExistsQuery extends QueryDslQueryBase { - field?: Field + field: Field } export interface QueryDslFieldLookup { - id?: Id + id: Id index?: IndexName path?: Field routing?: Routing @@ -4142,16 +4182,15 @@ export interface QueryDslFuzzyQuery extends QueryDslQueryBase { rewrite?: MultiTermQueryRewrite transpositions?: boolean fuzziness?: Fuzziness - value: any + value: string } -export interface QueryDslGeoBoundingBoxQuery extends QueryDslQueryBase { - bounding_box?: QueryDslBoundingBox +export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { type?: QueryDslGeoExecution validation_method?: QueryDslGeoValidationMethod - top_left?: LatLon - bottom_right?: LatLon } +export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys | + { [property: string]: QueryDslBoundingBox } export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint @@ -4160,6 +4199,9 @@ export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys | { [property: string]: QueryDslDecayPlacement } +export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { distance?: Distance distance_type?: GeoDistanceType @@ -4172,22 +4214,28 @@ export type QueryDslGeoExecution = 'memory' | 'indexed' export type QueryDslGeoLocation = string | double[] | QueryDslTwoDimensionalPoint -export interface QueryDslGeoPolygonQuery extends QueryDslQueryBase { - points?: QueryDslGeoLocation[] - validation_method?: QueryDslGeoValidationMethod +export interface QueryDslGeoPolygonPoints { + points: QueryDslGeoLocation[] } -export interface QueryDslGeoShape { - type?: string +export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { + validation_method?: QueryDslGeoValidationMethod } +export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys | + { [property: string]: QueryDslGeoPolygonPoints } -export interface QueryDslGeoShapeQuery extends QueryDslQueryBase { - ignore_unmapped?: boolean +export interface QueryDslGeoShapeFieldQuery { + shape?: GeoShape indexed_shape?: QueryDslFieldLookup relation?: GeoShapeRelation - shape?: QueryDslGeoShape } +export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { + ignore_unmapped?: boolean +} +export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys | + { [property: string]: QueryDslGeoShapeFieldQuery } + export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' export interface QueryDslHasChildQuery extends QueryDslQueryBase { @@ -4195,32 +4243,32 @@ export interface QueryDslHasChildQuery extends QueryDslQueryBase { inner_hits?: SearchInnerHits max_children?: integer min_children?: integer - query?: QueryDslQueryContainer + query: QueryDslQueryContainer score_mode?: QueryDslChildScoreMode - type?: RelationName + type: RelationName } export interface QueryDslHasParentQuery extends QueryDslQueryBase { ignore_unmapped?: boolean inner_hits?: SearchInnerHits - parent_type?: RelationName - query?: QueryDslQueryContainer + parent_type: RelationName + query: QueryDslQueryContainer score?: boolean } export interface QueryDslIdsQuery extends QueryDslQueryBase { - values?: Id[] | long[] + values?: Ids } export interface QueryDslIntervalsAllOf { - intervals?: QueryDslIntervalsContainer[] + intervals: QueryDslIntervalsContainer[] max_gaps?: integer ordered?: boolean filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsAnyOf { - intervals?: QueryDslIntervalsContainer[] + intervals: QueryDslIntervalsContainer[] filter?: QueryDslIntervalsFilter } @@ -4249,7 +4297,7 @@ export interface QueryDslIntervalsFuzzy { analyzer?: string fuzziness?: Fuzziness prefix_length?: integer - term?: string + term: string transpositions?: boolean use_field?: Field } @@ -4258,14 +4306,14 @@ export interface QueryDslIntervalsMatch { analyzer?: string max_gaps?: integer ordered?: boolean - query?: string + query: string use_field?: Field filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsPrefix { analyzer?: string - prefix?: string + prefix: string use_field?: Field } @@ -4280,7 +4328,7 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase { export interface QueryDslIntervalsWildcard { analyzer?: string - pattern?: string + pattern: string use_field?: Field } @@ -4288,16 +4336,17 @@ export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { doc?: any - fields?: Fields - _id?: Id | number + fields?: Field[] + _id?: Id _type?: Type _index?: IndexName per_field_analyzer?: Record routing?: Routing + version?: VersionNumber + version_type?: VersionType } export interface QueryDslMatchAllQuery extends QueryDslQueryBase { - norm_field?: string } export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { @@ -4309,7 +4358,7 @@ export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { minimum_should_match?: MinimumShouldMatch operator?: QueryDslOperator prefix_length?: integer - query?: string + query: string } export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { @@ -4318,15 +4367,16 @@ export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { analyzer?: string max_expansions?: integer - query?: string + query: string slop?: integer zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { analyzer?: string - query?: string + query: string slop?: integer + zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchQuery extends QueryDslQueryBase { @@ -4341,16 +4391,17 @@ export interface QueryDslMatchQuery extends QueryDslQueryBase { minimum_should_match?: MinimumShouldMatch operator?: QueryDslOperator prefix_length?: integer - query?: string | float | boolean + query: string | float | boolean zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { analyzer?: string boost_terms?: double - fields?: Fields + fail_on_unsupported_field?: boolean + fields?: Field[] include?: boolean - like?: QueryDslLike | QueryDslLike[] + like: QueryDslLike | QueryDslLike[] max_doc_freq?: integer max_query_terms?: integer max_word_length?: integer @@ -4379,40 +4430,38 @@ export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { minimum_should_match?: MinimumShouldMatch operator?: QueryDslOperator prefix_length?: integer - query?: string + query: string slop?: integer tie_breaker?: double type?: QueryDslTextQueryType - use_dis_max?: boolean zero_terms_query?: QueryDslZeroTermsQuery } export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' -export interface QueryDslNamedQueryKeys { - boost?: float - _name?: string - ignore_unmapped?: boolean -} -export type QueryDslNamedQuery = QueryDslNamedQueryKeys | - { [property: string]: TQuery } - export interface QueryDslNestedQuery extends QueryDslQueryBase { ignore_unmapped?: boolean inner_hits?: SearchInnerHits - path?: Field - query?: QueryDslQueryContainer + path: Field + query: QueryDslQueryContainer score_mode?: QueryDslNestedScoreMode } export type QueryDslNestedScoreMode = 'avg' | 'sum' | 'min' | 'max' | 'none' +export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { + gt?: double + gte?: double + lt?: double + lte?: double +} + export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys | { [property: string]: QueryDslDecayPlacement } -export type QueryDslOperator = 'and' | 'or' | 'AND' | 'OR' +export type QueryDslOperator = 'and' | 'or' export interface QueryDslParentIdQuery extends QueryDslQueryBase { id?: Id @@ -4423,22 +4472,24 @@ export interface QueryDslParentIdQuery extends QueryDslQueryBase { export interface QueryDslPercolateQuery extends QueryDslQueryBase { document?: any documents?: any[] - field?: Field + field: Field id?: Id index?: IndexName + name?: string preference?: string routing?: Routing version?: VersionNumber } export interface QueryDslPinnedQuery extends QueryDslQueryBase { - ids?: Id[] | long[] - organic?: QueryDslQueryContainer + ids: Id[] + organic: QueryDslQueryContainer } export interface QueryDslPrefixQuery extends QueryDslQueryBase { rewrite?: MultiTermQueryRewrite value: string + case_insensitive?: boolean } export interface QueryDslQueryBase { @@ -4453,38 +4504,38 @@ export interface QueryDslQueryContainer { combined_fields?: QueryDslCombinedFieldsQuery constant_score?: QueryDslConstantScoreQuery dis_max?: QueryDslDisMaxQuery - distance_feature?: Record | QueryDslDistanceFeatureQuery + distance_feature?: QueryDslDistanceFeatureQuery exists?: QueryDslExistsQuery function_score?: QueryDslFunctionScoreQuery fuzzy?: Record - geo_bounding_box?: QueryDslNamedQuery + geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery - geo_polygon?: QueryDslNamedQuery - geo_shape?: QueryDslNamedQuery + geo_polygon?: QueryDslGeoPolygonQuery + geo_shape?: QueryDslGeoShapeQuery has_child?: QueryDslHasChildQuery has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery - intervals?: QueryDslNamedQuery - match?: QueryDslNamedQuery + intervals?: Record + match?: Record match_all?: QueryDslMatchAllQuery - match_bool_prefix?: QueryDslNamedQuery + match_bool_prefix?: Record match_none?: QueryDslMatchNoneQuery - match_phrase?: QueryDslNamedQuery - match_phrase_prefix?: QueryDslNamedQuery + match_phrase?: Record + match_phrase_prefix?: Record more_like_this?: QueryDslMoreLikeThisQuery multi_match?: QueryDslMultiMatchQuery nested?: QueryDslNestedQuery parent_id?: QueryDslParentIdQuery percolate?: QueryDslPercolateQuery pinned?: QueryDslPinnedQuery - prefix?: QueryDslNamedQuery + prefix?: Record query_string?: QueryDslQueryStringQuery - range?: QueryDslNamedQuery - rank_feature?: QueryDslNamedQuery - regexp?: QueryDslNamedQuery + range?: Record + rank_feature?: QueryDslRankFeatureQuery + regexp?: Record script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery - shape?: QueryDslNamedQuery + shape?: QueryDslShapeQuery simple_query_string?: QueryDslSimpleQueryStringQuery span_containing?: QueryDslSpanContainingQuery field_masking_span?: QueryDslSpanFieldMaskingQuery @@ -4493,13 +4544,12 @@ export interface QueryDslQueryContainer { span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery span_or?: QueryDslSpanOrQuery - span_term?: QueryDslNamedQuery + span_term?: Record span_within?: QueryDslSpanWithinQuery - template?: QueryDslQueryTemplate - term?: QueryDslNamedQuery - terms?: QueryDslNamedQuery - terms_set?: QueryDslNamedQuery - wildcard?: QueryDslNamedQuery + term?: Record + terms?: QueryDslTermsQuery + terms_set?: Record + wildcard?: Record type?: QueryDslTypeQuery } @@ -4512,7 +4562,7 @@ export interface QueryDslQueryStringQuery extends QueryDslQueryBase { default_operator?: QueryDslOperator enable_position_increments?: boolean escape?: boolean - fields?: Fields + fields?: Field[] fuzziness?: Fuzziness fuzzy_max_expansions?: integer fuzzy_prefix_length?: integer @@ -4522,33 +4572,24 @@ export interface QueryDslQueryStringQuery extends QueryDslQueryBase { max_determinized_states?: integer minimum_should_match?: MinimumShouldMatch phrase_slop?: double - query?: string + query: string quote_analyzer?: string quote_field_suffix?: string rewrite?: MultiTermQueryRewrite tie_breaker?: double - time_zone?: string + time_zone?: TimeZone type?: QueryDslTextQueryType } -export interface QueryDslQueryTemplate { - source: string -} - export interface QueryDslRandomScoreFunction extends QueryDslScoreFunctionBase { field?: Field seed?: long | string } -export interface QueryDslRangeQuery extends QueryDslQueryBase { - gt?: double | DateMath - gte?: double | DateMath - lt?: double | DateMath - lte?: double | DateMath +export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery + +export interface QueryDslRangeQueryBase extends QueryDslQueryBase { relation?: QueryDslRangeRelation - time_zone?: string - from?: double | DateMath - to?: double | DateMath } export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' @@ -4556,14 +4597,36 @@ export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' export interface QueryDslRankFeatureFunction { } +export interface QueryDslRankFeatureFunctionLinear extends QueryDslRankFeatureFunction { +} + +export interface QueryDslRankFeatureFunctionLogarithm extends QueryDslRankFeatureFunction { + scaling_factor: float +} + +export interface QueryDslRankFeatureFunctionSaturation extends QueryDslRankFeatureFunction { + pivot?: float +} + +export interface QueryDslRankFeatureFunctionSigmoid extends QueryDslRankFeatureFunction { + pivot: float + exponent: float +} + export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { - function?: QueryDslRankFeatureFunction + field: Field + saturation?: QueryDslRankFeatureFunctionSaturation + log?: QueryDslRankFeatureFunctionLogarithm + linear?: QueryDslRankFeatureFunctionLinear + sigmoid?: QueryDslRankFeatureFunctionSigmoid } export interface QueryDslRegexpQuery extends QueryDslQueryBase { + case_insensitive?: boolean flags?: string max_determinized_states?: integer - value?: string + rewrite?: MultiTermQueryRewrite + value: string } export interface QueryDslScoreFunctionBase { @@ -4572,7 +4635,7 @@ export interface QueryDslScoreFunctionBase { } export interface QueryDslScriptQuery extends QueryDslQueryBase { - script?: Script + script: Script } export interface QueryDslScriptScoreFunction extends QueryDslScoreFunctionBase { @@ -4580,16 +4643,22 @@ export interface QueryDslScriptScoreFunction extends QueryDslScoreFunctionBase { } export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { - query?: QueryDslQueryContainer - script?: Script + min_score?: float + query: QueryDslQueryContainer + script: Script } -export interface QueryDslShapeQuery extends QueryDslQueryBase { +export interface QueryDslShapeFieldQuery { ignore_unmapped?: boolean indexed_shape?: QueryDslFieldLookup relation?: ShapeRelation - shape?: QueryDslGeoShape + shape?: GeoShape +} + +export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { } +export type QueryDslShapeQuery = QueryDslShapeQueryKeys | + { [property: string]: QueryDslShapeFieldQuery } export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' @@ -4598,70 +4667,65 @@ export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { analyze_wildcard?: boolean auto_generate_synonyms_phrase_query?: boolean default_operator?: QueryDslOperator - fields?: Fields + fields?: Field[] flags?: QueryDslSimpleQueryStringFlags | string fuzzy_max_expansions?: integer fuzzy_prefix_length?: integer fuzzy_transpositions?: boolean lenient?: boolean minimum_should_match?: MinimumShouldMatch - query?: string + query: string quote_field_suffix?: string } export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { - big?: QueryDslSpanQuery - little?: QueryDslSpanQuery + big: QueryDslSpanQuery + little: QueryDslSpanQuery } export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { - field?: Field - query?: QueryDslSpanQuery + field: Field + query: QueryDslSpanQuery } export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { - end?: integer - match?: QueryDslSpanQuery -} - -export interface QueryDslSpanGapQuery extends QueryDslQueryBase { - field?: Field - width?: integer + end: integer + match: QueryDslSpanQuery } export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { - match?: QueryDslQueryContainer + match: QueryDslQueryContainer } export interface QueryDslSpanNearQuery extends QueryDslQueryBase { - clauses?: QueryDslSpanQuery[] + clauses: QueryDslSpanQuery[] in_order?: boolean slop?: integer } export interface QueryDslSpanNotQuery extends QueryDslQueryBase { dist?: integer - exclude?: QueryDslSpanQuery - include?: QueryDslSpanQuery + exclude: QueryDslSpanQuery + include: QueryDslSpanQuery post?: integer pre?: integer } export interface QueryDslSpanOrQuery extends QueryDslQueryBase { - clauses?: QueryDslSpanQuery[] + clauses: QueryDslSpanQuery[] } -export interface QueryDslSpanQuery extends QueryDslQueryBase { - span_containing?: QueryDslNamedQuery - field_masking_span?: QueryDslNamedQuery - span_first?: QueryDslNamedQuery - span_gap?: QueryDslNamedQuery +export interface QueryDslSpanQuery { + span_containing?: QueryDslSpanContainingQuery + field_masking_span?: QueryDslSpanFieldMaskingQuery + span_first?: QueryDslSpanFirstQuery + span_gap?: Record span_multi?: QueryDslSpanMultiTermQuery - span_near?: QueryDslNamedQuery - span_not?: QueryDslNamedQuery - span_or?: QueryDslNamedQuery - span_term?: QueryDslNamedQuery - span_within?: QueryDslNamedQuery + span_near?: QueryDslSpanNearQuery + span_not?: QueryDslSpanNotQuery + span_or?: QueryDslSpanOrQuery + span_term?: Record + span_within?: QueryDslSpanWithinQuery } export interface QueryDslSpanTermQuery extends QueryDslQueryBase { @@ -4669,27 +4733,37 @@ export interface QueryDslSpanTermQuery extends QueryDslQueryBase { } export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { - big?: QueryDslSpanQuery - little?: QueryDslSpanQuery + big: QueryDslSpanQuery + little: QueryDslSpanQuery } export interface QueryDslTermQuery extends QueryDslQueryBase { - value?: string | float | boolean + value: string | float | boolean + case_insensitive?: boolean } -export interface QueryDslTermsQuery extends QueryDslQueryBase { - terms?: string[] - index?: IndexName - id?: Id - path?: string +export interface QueryDslTermsLookup { + index: IndexName + id: Id + path: Field routing?: Routing } -export interface QueryDslTermsSetQuery extends QueryDslQueryBase { +export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { +} +export type QueryDslTermsQuery = QueryDslTermsQueryKeys | + { [property: string]: string[] | long[] | QueryDslTermsLookup } + +export interface QueryDslTermsSetFieldQuery { minimum_should_match_field?: Field minimum_should_match_script?: Script - terms?: string[] + terms: string[] +} + +export interface QueryDslTermsSetQueryKeys extends QueryDslQueryBase { } +export type QueryDslTermsSetQuery = QueryDslTermsSetQueryKeys | + { [property: string]: QueryDslTermsSetFieldQuery } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' @@ -4709,6 +4783,7 @@ export interface QueryDslTypeQuery extends QueryDslQueryBase { } export interface QueryDslWildcardQuery extends QueryDslQueryBase { + case_insensitive?: boolean rewrite?: MultiTermQueryRewrite value: string } @@ -4809,7 +4884,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { rescore?: SearchRescore[] routing?: Routing script_fields?: Record - search_after?: any[] + search_after?: SearchSortResults search_type?: SearchType sequence_number_primary_term?: boolean size?: integer @@ -8480,7 +8555,6 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { export interface IndicesDataStreamsStatsRequest extends RequestBase { name?: IndexName expand_wildcards?: ExpandWildcards - human?: boolean } export interface IndicesDataStreamsStatsResponse { @@ -10057,20 +10131,21 @@ export interface MigrationDeprecationInfoResponse { export interface MlAnalysisConfig { bucket_span: TimeSpan + categorization_analyzer?: MlCategorizationAnalyzer | string categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] - influencers: Field[] + influencers?: Field[] + model_prune_window?: Time latency?: Time multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization summary_count_field_name?: Field - categorization_analyzer?: MlCategorizationAnalyzer | string } export interface MlAnalysisLimits { categorization_examples_limit?: long - model_memory_limit: string + model_memory_limit?: string } export interface MlAnalysisMemoryLimit { @@ -10158,9 +10233,9 @@ export interface MlCalendarEvent { } export interface MlCategorizationAnalyzer { + char_filter?: (string | AnalysisCharFilter)[] filter?: (string | AnalysisTokenFilter)[] tokenizer?: string | AnalysisTokenizer - char_filter?: (string | AnalysisCharFilter)[] } export type MlCategorizationStatus = 'ok' | 'warn' @@ -10191,26 +10266,28 @@ export type MlChunkingMode = 'auto' | 'manual' | 'off' export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' -export interface MlCustomSettings { +export interface MlCustomSettingsKeys { custom_urls?: XpackUsageUrlConfig[] created_by?: string job_tags?: Record } +export type MlCustomSettings = MlCustomSettingsKeys | + { [property: string]: any } export interface MlDataCounts { bucket_count: long - earliest_record_timestamp: long + earliest_record_timestamp?: long empty_bucket_count: long input_bytes: long input_field_count: long input_record_count: long invalid_date_count: long job_id: Id - last_data_time: long - latest_empty_bucket_timestamp: long - latest_record_timestamp: long - latest_sparse_bucket_timestamp: long - latest_bucket_timestamp: long + last_data_time?: long + latest_empty_bucket_timestamp?: long + latest_record_timestamp?: long + latest_sparse_bucket_timestamp?: long + latest_bucket_timestamp?: long missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -10244,6 +10321,25 @@ export interface MlDatafeed { indices_options?: MlDatafeedIndicesOptions } +export interface MlDatafeedConfig { + aggregations?: Record + aggs?: Record + chunking_config?: MlChunkingConfig + datafeed_id?: Id + delayed_data_check_config?: MlDelayedDataCheckConfig + frequency?: Timestamp + indexes?: Indices + indices?: Indices + indices_options?: MlDatafeedIndicesOptions + job_id?: Id + max_empty_searches?: integer + query?: QueryDslQueryContainer + query_delay?: Timestamp + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size?: integer +} + export interface MlDatafeedIndicesOptions { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards @@ -10251,14 +10347,20 @@ export interface MlDatafeedIndicesOptions { ignore_throttled?: boolean } +export interface MlDatafeedRunningState { + real_time_configured: boolean + real_time_running: boolean +} + export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { - assignment_explanation: string + assignment_explanation?: string datafeed_id: Id - node: MlDiscoveryNode + node?: MlDiscoveryNode state: MlDatafeedState timing_stats: MlDatafeedTimingStats + running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { @@ -10267,7 +10369,7 @@ export interface MlDatafeedTimingStats { job_id: Id search_count: long total_search_time_ms: double - average_search_time_per_bucket_ms: number + average_search_time_per_bucket_ms?: number } export interface MlDataframeAnalysis { @@ -10532,10 +10634,11 @@ export interface MlDetector { detector_index?: integer exclude_frequent?: MlExcludeFrequent field_name?: Field - function: string - use_null?: boolean + function?: string over_field_name?: Field partition_field_name?: Field + use_null?: boolean + description?: string } export interface MlDiscoveryNode { @@ -10556,7 +10659,7 @@ export interface MlFilter { export interface MlFilterRef { filter_id: Id - filter_type: MlFilterType + filter_type?: MlFilterType } export type MlFilterType = 'include' | 'exclude' @@ -10592,27 +10695,56 @@ export interface MlInfluence { } export interface MlJob { - allow_lazy_open?: boolean + allow_lazy_open: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval: Time - create_time: integer + background_persist_interval?: Time + blocked?: MlJobBlocked + create_time?: integer + custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription - description: string - finished_time: integer + datafeed_config?: MlDatafeed + deleting?: boolean + description?: string + finished_time?: integer + groups?: string[] job_id: Id - job_type: string - model_snapshot_id: Id + job_type?: string + job_version?: VersionString + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id model_snapshot_retention_days: long - renormalization_window_days: long - results_index_name?: IndexName + renormalization_window_days?: long + results_index_name: IndexName results_retention_days?: long - groups?: string[] - model_plot_config?: MlModelPlotConfig +} + +export interface MlJobBlocked { + reason: MlJobBlockedReason + task_id?: TaskId +} + +export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' + +export interface MlJobConfig { + allow_lazy_open?: boolean + analysis_config: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + background_persist_interval?: Time custom_settings?: MlCustomSettings - job_version?: VersionString - deleting?: boolean daily_model_snapshot_retention_after_days?: long + data_description: MlDataDescription + datafeed_config?: MlDatafeedConfig + description?: string + groups?: string[] + job_id?: Id + job_type?: string + model_plot_config?: MlModelPlotConfig + model_snapshot_retention_days?: long + renormalization_window_days?: long + results_index_name?: IndexName + results_retention_days?: long } export interface MlJobForecastStatistics { @@ -10634,12 +10766,12 @@ export interface MlJobStatistics { } export interface MlJobStats { - assignment_explanation: string + assignment_explanation?: string data_counts: MlDataCounts forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node: MlDiscoveryNode + node?: MlDiscoveryNode open_time?: DateString state: MlJobState timing_stats: MlJobTimingStats @@ -10647,22 +10779,22 @@ export interface MlJobStats { } export interface MlJobTimingStats { - average_bucket_processing_time_ms: double + average_bucket_processing_time_ms?: double bucket_count: long - exponential_average_bucket_processing_time_ms: double + exponential_average_bucket_processing_time_ms?: double exponential_average_bucket_processing_time_per_hour_ms: double job_id: Id total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms: double - minimum_bucket_processing_time_ms: double + maximum_bucket_processing_time_ms?: double + minimum_bucket_processing_time_ms?: double } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' export interface MlModelPlotConfig { - terms?: Field - enabled: boolean annotations_enabled?: boolean + enabled?: boolean + terms?: Field } export interface MlModelSizeStats { @@ -10671,9 +10803,9 @@ export interface MlModelSizeStats { log_time: Time memory_status: MlMemoryStatus model_bytes: long - model_bytes_exceeded: long - model_bytes_memory_limit: long - peak_model_bytes: long + model_bytes_exceeded?: long + model_bytes_memory_limit?: long + peak_model_bytes?: long assignment_memory_basis?: string result_type: string total_by_field_count: long @@ -10692,10 +10824,10 @@ export interface MlModelSizeStats { export interface MlModelSnapshot { description?: string job_id: Id - latest_record_time_stamp: integer - latest_result_time_stamp: integer + latest_record_time_stamp?: integer + latest_result_time_stamp?: integer min_version: VersionString - model_size_stats: MlModelSizeStats + model_size_stats?: MlModelSizeStats retain: boolean snapshot_doc_count: long snapshot_id: Id @@ -10962,7 +11094,7 @@ export interface MlEvaluateDataFrameConfusionMatrixPrediction { count: integer } -export interface MlEvaluateDataFrameConfusionMatrixTreshold { +export interface MlEvaluateDataFrameConfusionMatrixThreshold { tp: integer fp: integer tn: integer @@ -11019,7 +11151,7 @@ export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc precision?: Record recall?: Record - confusion_matrix?: Record + confusion_matrix?: Record } export interface MlEvaluateDataFrameDataframeRegressionSummary { @@ -11418,6 +11550,7 @@ export interface MlOpenJobRequest extends RequestBase { export interface MlOpenJobResponse { opened: boolean + node: Id } export interface MlPostCalendarEventsRequest extends RequestBase { @@ -11431,18 +11564,22 @@ export interface MlPostCalendarEventsResponse { events: MlCalendarEvent[] } -export interface MlPostJobDataRequest extends RequestBase { +export type MlPostDataInput = any | MlPostDataMultipleInputs + +export interface MlPostDataMultipleInputs { + data: any[] +} + +export interface MlPostDataRequest extends RequestBase { job_id: Id reset_end?: DateString reset_start?: DateString - body?: { - data?: any[] - } + body?: MlPostDataInput } -export interface MlPostJobDataResponse { +export interface MlPostDataResponse { bucket_count: long - earliest_record_timestamp: integer + earliest_record_timestamp?: integer empty_bucket_count: long input_bytes: long input_field_count: long @@ -11450,7 +11587,7 @@ export interface MlPostJobDataResponse { invalid_date_count: long job_id: Id last_data_time: integer - latest_record_timestamp: integer + latest_record_timestamp?: integer missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -11480,25 +11617,24 @@ export interface MlPreviewDataFrameAnalyticsResponse { export interface MlPreviewDatafeedRequest extends RequestBase { datafeed_id?: Id body?: { - job_config?: MlJob - datafeed_config?: MlDatafeed + job_config?: MlJobConfig + datafeed_config?: MlDatafeedConfig } } -export interface MlPreviewDatafeedResponse { - data: TDocument[] -} +export type MlPreviewDatafeedResponse = TDocument[] export interface MlPutCalendarRequest extends RequestBase { calendar_id: Id body?: { + job_ids?: Ids description?: string } } export interface MlPutCalendarResponse { calendar_id: Id - description: string + description?: string job_ids: Ids } @@ -11548,12 +11684,13 @@ export interface MlPutDatafeedRequest extends RequestBase { ignore_throttled?: boolean ignore_unavailable?: boolean body?: { + aggs?: Record aggregations?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time - indices?: string[] - indexes?: string[] + indices?: Indices + indexes?: Indices indices_options?: MlDatafeedIndicesOptions job_id?: Id max_empty_searches?: integer @@ -11566,15 +11703,15 @@ export interface MlPutDatafeedRequest extends RequestBase { } export interface MlPutDatafeedResponse { - aggregations: Record + aggregations?: Record chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Time + frequency?: Time indices: string[] job_id: Id indices_options?: MlDatafeedIndicesOptions - max_empty_searches: integer + max_empty_searches?: integer query: QueryDslQueryContainer query_delay: Time runtime_mappings?: MappingRuntimeFields @@ -11591,7 +11728,7 @@ export interface MlPutFilterRequest extends RequestBase { } export interface MlPutFilterResponse { - description: string + description?: string filter_id: Id items: string[] } @@ -11604,12 +11741,14 @@ export interface MlPutJobRequest extends RequestBase { analysis_limits?: MlAnalysisLimits background_persist_interval: Time custom_settings?: MlCustomSettings - data_description?: MlDataDescription daily_model_snapshot_retention_after_days?: long - groups?: string[] + data_description: MlDataDescription + datafeed_config?: MlDatafeedConfig description?: string + groups?: string[] model_plot_config?: MlModelPlotConfig model_snapshot_retention_days?: long + renormalization_window_days?: long results_index_name?: IndexName results_retention_days?: long } @@ -11618,20 +11757,22 @@ export interface MlPutJobRequest extends RequestBase { export interface MlPutJobResponse { allow_lazy_open: boolean analysis_config: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - background_persist_interval: Time + analysis_limits: MlAnalysisLimits + background_persist_interval?: Time create_time: DateString custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days: long data_description: MlDataDescription - daily_model_snapshot_retention_after_days?: long + datafeed_config?: MlDatafeed + description?: string groups?: string[] - description: string job_id: Id job_type: string + job_version: string model_plot_config?: MlModelPlotConfig - model_snapshot_id: Id + model_snapshot_id?: Id model_snapshot_retention_days: long - renormalization_window_days: long + renormalization_window_days?: long results_index_name: string results_retention_days?: long } @@ -11656,9 +11797,18 @@ export interface MlPutTrainedModelAliasRequest extends RequestBase { export interface MlPutTrainedModelAliasResponse extends AcknowledgedResponseBase { } +export interface MlResetJobRequest extends RequestBase { + job_id: Id + wait_for_completion?: boolean +} + +export interface MlResetJobResponse extends AcknowledgedResponseBase { +} + export interface MlRevertModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id + delete_intervening_results?: boolean body?: { delete_intervening_results?: boolean } @@ -11785,7 +11935,27 @@ export interface MlUpdateJobRequest extends RequestBase { } export interface MlUpdateJobResponse { - stub: boolean + allow_lazy_open: boolean + analysis_config: MlAnalysisConfig + analysis_limits: MlAnalysisLimits + background_persist_interval?: Time + create_time: Time + custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days: long + data_description: MlDataDescription + datafeed_config?: MlDatafeed + description?: string + groups?: string[] + job_id: Id + job_type: string + job_version: string + finished_time?: Time + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days: long + renormalization_window_days?: long + results_index_name: string + results_retention_days?: long } export interface MlUpdateModelSnapshotRequest extends RequestBase { @@ -15270,6 +15440,9 @@ export interface SpecUtilsCommonQueryParameters { source_query_string?: string } +export interface SpecUtilsAdditionalProperty { +} + export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names From 498199ca60153e3c80996e0312634be871475b54 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 11 Aug 2021 12:11:07 +0200 Subject: [PATCH 048/647] Bumped v8.0.0-canary.15 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 34ec0fc61..0113413ed 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.14", + "versionCanary": "8.0.0-canary.15", "keywords": [ "elasticsearch", "elastic", From def28e33d1ec9879599b925f8b0123039355cd69 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 16 Aug 2021 11:27:35 +0200 Subject: [PATCH 049/647] API generation --- api/api/search_mvt.js | 87 +++++++++++++++++++++++++++++++++++++++++ api/index.js | 3 ++ api/requestParams.d.ts | 14 +++++++ docs/reference.asciidoc | 61 +++++++++++++++++++++++++++++ index.d.ts | 8 ++++ 5 files changed, 173 insertions(+) create mode 100644 api/api/search_mvt.js diff --git a/api/api/search_mvt.js b/api/api/search_mvt.js new file mode 100644 index 000000000..242622b0d --- /dev/null +++ b/api/api/search_mvt.js @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +/* eslint camelcase: 0 */ +/* eslint no-unused-vars: 0 */ + +const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') +const acceptedQuerystring = ['exact_bounds', 'extent', 'grid_precision', 'grid_type', 'size', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] +const snakeCase = { exactBounds: 'exact_bounds', gridPrecision: 'grid_precision', gridType: 'grid_type', errorTrace: 'error_trace', filterPath: 'filter_path' } + +function searchMvtApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + // check required parameters + if (params.index == null) { + const err = new this[kConfigurationError]('Missing required parameter: index') + return handleError(err, callback) + } + if (params.field == null) { + const err = new this[kConfigurationError]('Missing required parameter: field') + return handleError(err, callback) + } + if (params.zoom == null) { + const err = new this[kConfigurationError]('Missing required parameter: zoom') + return handleError(err, callback) + } + if (params.x == null) { + const err = new this[kConfigurationError]('Missing required parameter: x') + return handleError(err, callback) + } + if (params.y == null) { + const err = new this[kConfigurationError]('Missing required parameter: y') + return handleError(err, callback) + } + + // check required url components + if (params.y != null && (params.x == null || params.zoom == null || params.field == null || params.index == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: x, zoom, field, index') + return handleError(err, callback) + } else if (params.x != null && (params.zoom == null || params.field == null || params.index == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: zoom, field, index') + return handleError(err, callback) + } else if (params.zoom != null && (params.field == null || params.index == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: field, index') + return handleError(err, callback) + } else if (params.field != null && (params.index == null)) { + const err = new this[kConfigurationError]('Missing required parameter of the url: index') + return handleError(err, callback) + } + + let { method, body, index, field, zoom, x, y, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = body == null ? 'GET' : 'POST' + path = '/' + encodeURIComponent(index) + '/' + '_mvt' + '/' + encodeURIComponent(field) + '/' + encodeURIComponent(zoom) + '/' + encodeURIComponent(x) + '/' + encodeURIComponent(y) + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + +module.exports = searchMvtApi diff --git a/api/index.js b/api/index.js index 0ea8e1b84..2b37a5bfb 100644 --- a/api/index.js +++ b/api/index.js @@ -74,6 +74,7 @@ const RollupApi = require('./api/rollup') const scriptsPainlessExecuteApi = require('./api/scripts_painless_execute') const scrollApi = require('./api/scroll') const searchApi = require('./api/search') +const searchMvtApi = require('./api/search_mvt') const searchShardsApi = require('./api/search_shards') const searchTemplateApi = require('./api/search_template') const SearchableSnapshotsApi = require('./api/searchable_snapshots') @@ -200,6 +201,7 @@ ESAPI.prototype.renderSearchTemplate = renderSearchTemplateApi ESAPI.prototype.scriptsPainlessExecute = scriptsPainlessExecuteApi ESAPI.prototype.scroll = scrollApi ESAPI.prototype.search = searchApi +ESAPI.prototype.searchMvt = searchMvtApi ESAPI.prototype.searchShards = searchShardsApi ESAPI.prototype.searchTemplate = searchTemplateApi ESAPI.prototype.termsEnum = termsEnumApi @@ -397,6 +399,7 @@ Object.defineProperties(ESAPI.prototype, { } }, scripts_painless_execute: { get () { return this.scriptsPainlessExecute } }, + search_mvt: { get () { return this.searchMvt } }, search_shards: { get () { return this.searchShards } }, search_template: { get () { return this.searchTemplate } }, searchableSnapshots: { diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index 151072bc9..682142a2a 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -2180,6 +2180,20 @@ export interface Search extends Generic { body?: T; } +export interface SearchMvt extends Generic { + index: string | string[]; + field: string; + zoom: integer; + x: integer; + y: integer; + exact_bounds?: boolean; + extent?: number; + grid_precision?: number; + grid_type?: 'grid' | 'point'; + size?: number; + body?: T; +} + export interface SearchShards extends Generic { index?: string | string[]; preference?: string; diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 276398f41..4a5e37087 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -8993,6 +8993,67 @@ _Default:_ `5` |=== +[discrete] +=== searchMvt +*Stability:* experimental +[source,ts] +---- +client.searchMvt({ + index: string | string[], + field: string, + zoom: integer, + x: integer, + y: integer, + exact_bounds: boolean, + extent: number, + grid_precision: number, + grid_type: 'grid' | 'point', + size: number, + body: object +}) +---- +link:{ref}/search-vector-tile-api.html[Documentation] + +[cols=2*] +|=== +|`index` +|`string \| string[]` - Comma-separated list of data streams, indices, or aliases to search + +|`field` +|`string` - Field containing geospatial data to return + +|`zoom` +|`integer` - Zoom level for the vector tile to search + +|`x` +|`integer` - X coordinate for the vector tile to search + +|`y` +|`integer` - Y coordinate for the vector tile to search + +|`exact_bounds` or `exactBounds` +|`boolean` - If false, the meta layer's feature is the bounding box of the tile. If true, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. + +|`extent` +|`number` - Size, in pixels, of a side of the vector tile. + +_Default:_ `4096` + +|`grid_precision` or `gridPrecision` +|`number` - Additional zoom levels available through the aggs layer. Accepts 0-8. + +_Default:_ `8` + +|`grid_type` or `gridType` +|`'grid' \| 'point'` - Determines the geometry type for features in the aggs layer. + +_Default:_ `grid` + +|`size` +|`number` - Maximum number of features to return in the hits layer. Accepts 0-10000. + +_Default:_ `10000` + +|`body` +|`object` - Search request body. + +|=== + [discrete] === searchShards diff --git a/index.d.ts b/index.d.ts index 5de87f4b9..365368fdf 100644 --- a/index.d.ts +++ b/index.d.ts @@ -2101,6 +2101,14 @@ declare class Client { search, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Search, callback: callbackFn): TransportRequestCallback search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Search, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchMvt, options?: TransportRequestOptions): TransportRequestPromise> + search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, callback: callbackFn): TransportRequestCallback + search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchMvt, options?: TransportRequestOptions): TransportRequestPromise> + searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, callback: callbackFn): TransportRequestCallback + searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback search_shards, TContext = Context>(params?: RequestParams.SearchShards, options?: TransportRequestOptions): TransportRequestPromise> search_shards, TContext = Context>(callback: callbackFn): TransportRequestCallback search_shards, TContext = Context>(params: RequestParams.SearchShards, callback: callbackFn): TransportRequestCallback From 7814527439b34a00e7f5685d6fae70afbf80ecca Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 16 Aug 2021 11:27:58 +0200 Subject: [PATCH 050/647] Updated types --- api/kibana.d.ts | 4 +- api/new.d.ts | 13 ++--- api/types.d.ts | 134 ++++++++++++++++++------------------------------ 3 files changed, 59 insertions(+), 92 deletions(-) diff --git a/api/kibana.d.ts b/api/kibana.d.ts index 7b4c6ea49..a80431ec2 100644 --- a/api/kibana.d.ts +++ b/api/kibana.d.ts @@ -345,7 +345,7 @@ interface KibanaClient { info(params?: T.MlInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> openJob(params: T.MlOpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> postCalendarEvents(params?: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.MlPostDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> previewDatafeed(params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> @@ -366,7 +366,7 @@ interface KibanaClient { updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> updateDatafeed(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(params: T.MlUpdateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> + updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> validate(params?: T.MlValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> diff --git a/api/new.d.ts b/api/new.d.ts index a7d882b02..c06583c75 100644 --- a/api/new.d.ts +++ b/api/new.d.ts @@ -950,9 +950,9 @@ declare class Client { postCalendarEvents(callback: callbackFn): TransportRequestCallback postCalendarEvents(params: T.MlPostCalendarEventsRequest, callback: callbackFn): TransportRequestCallback postCalendarEvents(params: T.MlPostCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postData(params: T.MlPostDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.MlPostDataRequest, callback: callbackFn): TransportRequestCallback - postData(params: T.MlPostDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> + postData(params: T.MlPostJobDataRequest, callback: callbackFn): TransportRequestCallback + postData(params: T.MlPostJobDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> previewDataFrameAnalytics(callback: callbackFn): TransportRequestCallback previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback @@ -1018,9 +1018,10 @@ declare class Client { updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> updateFilter(params: T.MlUpdateFilterRequest, callback: callbackFn): TransportRequestCallback updateFilter(params: T.MlUpdateFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateJob(params: T.MlUpdateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(params: T.MlUpdateJobRequest, callback: callbackFn): TransportRequestCallback - updateJob(params: T.MlUpdateJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> + updateJob(callback: callbackFn): TransportRequestCallback + updateJob(params: TODO, callback: callbackFn): TransportRequestCallback + updateJob(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, callback: callbackFn): TransportRequestCallback updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback diff --git a/api/types.d.ts b/api/types.d.ts index 5678de767..9536d1616 100644 --- a/api/types.d.ts +++ b/api/types.d.ts @@ -10135,7 +10135,7 @@ export interface MlAnalysisConfig { categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] - influencers?: Field[] + influencers: Field[] model_prune_window?: Time latency?: Time multivariate_by_fields?: boolean @@ -10266,28 +10266,26 @@ export type MlChunkingMode = 'auto' | 'manual' | 'off' export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' -export interface MlCustomSettingsKeys { +export interface MlCustomSettings { custom_urls?: XpackUsageUrlConfig[] created_by?: string job_tags?: Record } -export type MlCustomSettings = MlCustomSettingsKeys | - { [property: string]: any } export interface MlDataCounts { bucket_count: long - earliest_record_timestamp?: long + earliest_record_timestamp: long empty_bucket_count: long input_bytes: long input_field_count: long input_record_count: long invalid_date_count: long job_id: Id - last_data_time?: long - latest_empty_bucket_timestamp?: long - latest_record_timestamp?: long - latest_sparse_bucket_timestamp?: long - latest_bucket_timestamp?: long + last_data_time: long + latest_empty_bucket_timestamp: long + latest_record_timestamp: long + latest_sparse_bucket_timestamp: long + latest_bucket_timestamp: long missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -10328,12 +10326,12 @@ export interface MlDatafeedConfig { datafeed_id?: Id delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Timestamp - indexes?: Indices - indices?: Indices + indexes?: string[] + indices: string[] indices_options?: MlDatafeedIndicesOptions job_id?: Id max_empty_searches?: integer - query?: QueryDslQueryContainer + query: QueryDslQueryContainer query_delay?: Timestamp runtime_mappings?: MappingRuntimeFields script_fields?: Record @@ -10347,20 +10345,14 @@ export interface MlDatafeedIndicesOptions { ignore_throttled?: boolean } -export interface MlDatafeedRunningState { - real_time_configured: boolean - real_time_running: boolean -} - export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { - assignment_explanation?: string + assignment_explanation: string datafeed_id: Id - node?: MlDiscoveryNode + node: MlDiscoveryNode state: MlDatafeedState timing_stats: MlDatafeedTimingStats - running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { @@ -10369,7 +10361,7 @@ export interface MlDatafeedTimingStats { job_id: Id search_count: long total_search_time_ms: double - average_search_time_per_bucket_ms?: number + average_search_time_per_bucket_ms: number } export interface MlDataframeAnalysis { @@ -10634,11 +10626,10 @@ export interface MlDetector { detector_index?: integer exclude_frequent?: MlExcludeFrequent field_name?: Field - function?: string + function: string over_field_name?: Field partition_field_name?: Field use_null?: boolean - description?: string } export interface MlDiscoveryNode { @@ -10698,9 +10689,9 @@ export interface MlJob { allow_lazy_open: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval?: Time + background_persist_interval: Time blocked?: MlJobBlocked - create_time?: integer + create_time: integer custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription @@ -10710,8 +10701,8 @@ export interface MlJob { finished_time?: integer groups?: string[] job_id: Id - job_type?: string - job_version?: VersionString + job_type: string + job_version: VersionString model_plot_config?: MlModelPlotConfig model_snapshot_id?: Id model_snapshot_retention_days: long @@ -10766,12 +10757,12 @@ export interface MlJobStatistics { } export interface MlJobStats { - assignment_explanation?: string + assignment_explanation: string data_counts: MlDataCounts forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node?: MlDiscoveryNode + node: MlDiscoveryNode open_time?: DateString state: MlJobState timing_stats: MlJobTimingStats @@ -10779,14 +10770,14 @@ export interface MlJobStats { } export interface MlJobTimingStats { - average_bucket_processing_time_ms?: double + average_bucket_processing_time_ms: double bucket_count: long - exponential_average_bucket_processing_time_ms?: double + exponential_average_bucket_processing_time_ms: double exponential_average_bucket_processing_time_per_hour_ms: double job_id: Id total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms?: double - minimum_bucket_processing_time_ms?: double + maximum_bucket_processing_time_ms: double + minimum_bucket_processing_time_ms: double } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' @@ -10803,9 +10794,9 @@ export interface MlModelSizeStats { log_time: Time memory_status: MlMemoryStatus model_bytes: long - model_bytes_exceeded?: long - model_bytes_memory_limit?: long - peak_model_bytes?: long + model_bytes_exceeded: long + model_bytes_memory_limit: long + peak_model_bytes: long assignment_memory_basis?: string result_type: string total_by_field_count: long @@ -10824,10 +10815,10 @@ export interface MlModelSizeStats { export interface MlModelSnapshot { description?: string job_id: Id - latest_record_time_stamp?: integer - latest_result_time_stamp?: integer + latest_record_time_stamp: integer + latest_result_time_stamp: integer min_version: VersionString - model_size_stats?: MlModelSizeStats + model_size_stats: MlModelSizeStats retain: boolean snapshot_doc_count: long snapshot_id: Id @@ -11550,7 +11541,6 @@ export interface MlOpenJobRequest extends RequestBase { export interface MlOpenJobResponse { opened: boolean - node: Id } export interface MlPostCalendarEventsRequest extends RequestBase { @@ -11564,22 +11554,18 @@ export interface MlPostCalendarEventsResponse { events: MlCalendarEvent[] } -export type MlPostDataInput = any | MlPostDataMultipleInputs - -export interface MlPostDataMultipleInputs { - data: any[] -} - -export interface MlPostDataRequest extends RequestBase { +export interface MlPostJobDataRequest extends RequestBase { job_id: Id reset_end?: DateString reset_start?: DateString - body?: MlPostDataInput + body?: { + data?: any[] + } } -export interface MlPostDataResponse { +export interface MlPostJobDataResponse { bucket_count: long - earliest_record_timestamp?: integer + earliest_record_timestamp: integer empty_bucket_count: long input_bytes: long input_field_count: long @@ -11587,7 +11573,7 @@ export interface MlPostDataResponse { invalid_date_count: long job_id: Id last_data_time: integer - latest_record_timestamp?: integer + latest_record_timestamp: integer missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -11622,19 +11608,20 @@ export interface MlPreviewDatafeedRequest extends RequestBase { } } -export type MlPreviewDatafeedResponse = TDocument[] +export interface MlPreviewDatafeedResponse { + data: TDocument[] +} export interface MlPutCalendarRequest extends RequestBase { calendar_id: Id body?: { - job_ids?: Ids description?: string } } export interface MlPutCalendarResponse { calendar_id: Id - description?: string + description: string job_ids: Ids } @@ -11684,13 +11671,12 @@ export interface MlPutDatafeedRequest extends RequestBase { ignore_throttled?: boolean ignore_unavailable?: boolean body?: { - aggs?: Record aggregations?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time - indices?: Indices - indexes?: Indices + indices?: string[] + indexes?: string[] indices_options?: MlDatafeedIndicesOptions job_id?: Id max_empty_searches?: integer @@ -11703,15 +11689,15 @@ export interface MlPutDatafeedRequest extends RequestBase { } export interface MlPutDatafeedResponse { - aggregations?: Record + aggregations: Record chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency?: Time + frequency: Time indices: string[] job_id: Id indices_options?: MlDatafeedIndicesOptions - max_empty_searches?: integer + max_empty_searches: integer query: QueryDslQueryContainer query_delay: Time runtime_mappings?: MappingRuntimeFields @@ -11728,7 +11714,7 @@ export interface MlPutFilterRequest extends RequestBase { } export interface MlPutFilterResponse { - description?: string + description: string filter_id: Id items: string[] } @@ -11808,7 +11794,6 @@ export interface MlResetJobResponse extends AcknowledgedResponseBase { export interface MlRevertModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id - delete_intervening_results?: boolean body?: { delete_intervening_results?: boolean } @@ -11935,27 +11920,7 @@ export interface MlUpdateJobRequest extends RequestBase { } export interface MlUpdateJobResponse { - allow_lazy_open: boolean - analysis_config: MlAnalysisConfig - analysis_limits: MlAnalysisLimits - background_persist_interval?: Time - create_time: Time - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days: long - data_description: MlDataDescription - datafeed_config?: MlDatafeed - description?: string - groups?: string[] - job_id: Id - job_type: string - job_version: string - finished_time?: Time - model_plot_config?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days: long - renormalization_window_days?: long - results_index_name: string - results_retention_days?: long + stub: boolean } export interface MlUpdateModelSnapshotRequest extends RequestBase { @@ -13814,6 +13779,7 @@ export interface SnapshotCreateRequest extends RequestBase { ignore_unavailable?: boolean include_global_state?: boolean indices?: Indices + feature_states?: string[] metadata?: Metadata partial?: boolean } From 2e70f7ba816a4771f2d901e4d5db44e27a4e3c04 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 16 Aug 2021 11:28:25 +0200 Subject: [PATCH 051/647] Bumped v8.0.0-canary.16 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0113413ed..89f80ce4d 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.15", + "versionCanary": "8.0.0-canary.16", "keywords": [ "elasticsearch", "elastic", From 5dab5d0abde5ee72102caed9c74f94dd0b2dbb87 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 16 Aug 2021 11:52:04 +0200 Subject: [PATCH 052/647] Update README.md --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 091a2df94..ca9d6d292 100644 --- a/README.md +++ b/README.md @@ -54,9 +54,8 @@ of `^7.10.0`). ### Compatibility -Elastic language clients are guaranteed to be able to communicate with Elasticsearch or Elastic solutions running on the same major version and greater or equal minor version. - -Language clients are forward compatible; meaning that clients support communicating with greater minor versions of Elasticsearch. Elastic language clients are not guaranteed to be backwards compatible. +Language clients are forward compatible; meaning that clients support communicating with greater minor versions of Elasticsearch. +Elastic language clients are also backwards compatible with lesser supported minor Elasticsearch versions. | Elasticsearch Version | Client Version | | --------------------- |----------------| From 08b80844da023fc08376787d2a2e792833b96417 Mon Sep 17 00:00:00 2001 From: delvedor Date: Tue, 17 Aug 2021 09:53:49 +0200 Subject: [PATCH 053/647] API generation --- api/requestParams.d.ts | 6 +++--- docs/reference.asciidoc | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index 682142a2a..63bdda001 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -2183,9 +2183,9 @@ export interface Search extends Generic { export interface SearchMvt extends Generic { index: string | string[]; field: string; - zoom: integer; - x: integer; - y: integer; + zoom: number; + x: number; + y: number; exact_bounds?: boolean; extent?: number; grid_precision?: number; diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 4a5e37087..805481e4d 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -9001,9 +9001,9 @@ _Default:_ `5` client.searchMvt({ index: string | string[], field: string, - zoom: integer, - x: integer, - y: integer, + zoom: number, + x: number, + y: number, exact_bounds: boolean, extent: number, grid_precision: number, @@ -9022,13 +9022,13 @@ link:{ref}/search-vector-tile-api.html[Documentation] + |`string` - Field containing geospatial data to return |`zoom` -|`integer` - Zoom level for the vector tile to search +|`number` - Zoom level for the vector tile to search |`x` -|`integer` - X coordinate for the vector tile to search +|`number` - X coordinate for the vector tile to search |`y` -|`integer` - Y coordinate for the vector tile to search +|`number` - Y coordinate for the vector tile to search |`exact_bounds` or `exactBounds` |`boolean` - If false, the meta layer's feature is the bounding box of the tile. If true, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. From 969decde2840113d1f45eca4624c532afbdb24e7 Mon Sep 17 00:00:00 2001 From: delvedor Date: Tue, 17 Aug 2021 09:56:19 +0200 Subject: [PATCH 054/647] Bumped v8.0.0-canary.17 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 89f80ce4d..6cdf82807 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.16", + "versionCanary": "8.0.0-canary.17", "keywords": [ "elasticsearch", "elastic", From 4ae38ca7f04b1565d31ba77501fc8897e268d624 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 18 Aug 2021 17:47:23 +0200 Subject: [PATCH 055/647] Update integration test scripts (#1525) * Updated scripts * Update runner --- .ci/functions/imports.sh | 5 +++-- .ci/run-elasticsearch.sh | 5 +++-- .ci/run-repository.sh | 1 + test/integration/index.js | 4 ++-- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.ci/functions/imports.sh b/.ci/functions/imports.sh index 3fb28cc38..c05f36826 100644 --- a/.ci/functions/imports.sh +++ b/.ci/functions/imports.sh @@ -26,10 +26,11 @@ if [[ -z $es_node_name ]]; then export es_node_name=instance export elastic_password=changeme export elasticsearch_image=elasticsearch - export elasticsearch_url=https://elastic:${elastic_password}@${es_node_name}:9200 + export elasticsearch_scheme="https" if [[ $TEST_SUITE != "platinum" ]]; then - export elasticsearch_url=http://${es_node_name}:9200 + export elasticsearch_scheme="http" fi + export elasticsearch_url=${elasticsearch_scheme}://elastic:${elastic_password}@${es_node_name}:9200 export external_elasticsearch_url=${elasticsearch_url/$es_node_name/localhost} export elasticsearch_container="${elasticsearch_image}:${STACK_VERSION}" diff --git a/.ci/run-elasticsearch.sh b/.ci/run-elasticsearch.sh index 0c27e9b75..89bce8cbd 100755 --- a/.ci/run-elasticsearch.sh +++ b/.ci/run-elasticsearch.sh @@ -7,7 +7,7 @@ # Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'. # Export the NUMBER_OF_NODES variable to start more than 1 node -# Version 1.4.0 +# Version 1.5.0 # - Initial version of the run-elasticsearch.sh script # - Deleting the volume should not dependent on the container still running # - Fixed `ES_JAVA_OPTS` config @@ -18,6 +18,7 @@ # - Added flags to make local CCR configurations work # - Added action.destructive_requires_name=false as the default will be true in v8 # - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing +# - Moved ELASTIC_PASSWORD to the base arguments for "Security On by default" script_path=$(dirname $(realpath -s $0)) source $script_path/functions/imports.sh @@ -31,6 +32,7 @@ cluster_name=${moniker}${suffix} declare -a volumes environment=($(cat <<-END + --env ELASTIC_PASSWORD=$elastic_password --env node.name=$es_node_name --env cluster.name=$cluster_name --env cluster.initial_master_nodes=$master_node_name @@ -46,7 +48,6 @@ END )) if [[ "$TEST_SUITE" == "platinum" ]]; then environment+=($(cat <<-END - --env ELASTIC_PASSWORD=$elastic_password --env xpack.license.self_generated.type=trial --env xpack.security.enabled=true --env xpack.security.http.ssl.enabled=true diff --git a/.ci/run-repository.sh b/.ci/run-repository.sh index b64c38d96..026595fdf 100755 --- a/.ci/run-repository.sh +++ b/.ci/run-repository.sh @@ -38,6 +38,7 @@ fi docker run \ --network=${network_name} \ --env "TEST_ES_SERVER=${ELASTICSEARCH_URL}" \ + --env "TEST_SUITE=${TEST_SUITE}" \ --volume $repo:/usr/src/app \ --volume /usr/src/app/node_modules \ --name elasticsearch-js \ diff --git a/test/integration/index.js b/test/integration/index.js index 833b7b3ea..098b52073 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -337,10 +337,10 @@ function generateJunitXmlReport (junit, suite) { } if (require.main === module) { - const node = process.env.TEST_ES_SERVER || 'http://localhost:9200/' + const node = process.env.TEST_ES_SERVER || 'https://elastic:changeme@localhost:9200/' const opts = { node, - isXPack: node.indexOf('@') > -1 + isXPack: process.env.TEST_SUITE !== 'free' } runner(opts) } From f891fd53a2a770854e38187578ee23bbd4fbe325 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 18 Aug 2021 17:48:38 +0200 Subject: [PATCH 056/647] Updated CI conf --- ...arch-js+7.13.yml => elastic+elasticsearch-js+7.15.yml} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename .ci/jobs/{elastic+elasticsearch-js+7.13.yml => elastic+elasticsearch-js+7.15.yml} (60%) diff --git a/.ci/jobs/elastic+elasticsearch-js+7.13.yml b/.ci/jobs/elastic+elasticsearch-js+7.15.yml similarity index 60% rename from .ci/jobs/elastic+elasticsearch-js+7.13.yml rename to .ci/jobs/elastic+elasticsearch-js+7.15.yml index 6f57a009d..e37c37998 100644 --- a/.ci/jobs/elastic+elasticsearch-js+7.13.yml +++ b/.ci/jobs/elastic+elasticsearch-js+7.15.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+7.13 - display-name: 'elastic / elasticsearch-js # 7.13' - description: Testing the elasticsearch-js 7.13 branch. + name: elastic+elasticsearch-js+7.15 + display-name: 'elastic / elasticsearch-js # 7.15' + description: Testing the elasticsearch-js 7.15 branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/7.13 + default: refs/heads/7.15 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: From 77bf94a5003f584c6b7e542a5ec321df595fd0ff Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 18 Aug 2021 17:52:14 +0200 Subject: [PATCH 057/647] API generation --- api/api/security.js | 22 ++++++++++++++++++++++ api/requestParams.d.ts | 4 ++++ docs/reference.asciidoc | 17 +++++++++++++++++ index.d.ts | 8 ++++++++ 4 files changed, 51 insertions(+) diff --git a/api/api/security.js b/api/api/security.js index f9269b2bd..4926d6cb8 100644 --- a/api/api/security.js +++ b/api/api/security.js @@ -1074,6 +1074,27 @@ SecurityApi.prototype.putUser = function securityPutUserApi (params, options, ca return this.transport.request(request, options, callback) } +SecurityApi.prototype.queryApiKeys = function securityQueryApiKeysApi (params, options, callback) { + ;[params, options, callback] = normalizeArguments(params, options, callback) + + let { method, body, ...querystring } = params + querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) + + let path = '' + if (method == null) method = body == null ? 'GET' : 'POST' + path = '/' + '_security' + '/' + '_query' + '/' + 'api_key' + + // build request object + const request = { + method, + path, + body: body || '', + querystring + } + + return this.transport.request(request, options, callback) +} + SecurityApi.prototype.samlAuthenticate = function securitySamlAuthenticateApi (params, options, callback) { ;[params, options, callback] = normalizeArguments(params, options, callback) @@ -1272,6 +1293,7 @@ Object.defineProperties(SecurityApi.prototype, { put_role: { get () { return this.putRole } }, put_role_mapping: { get () { return this.putRoleMapping } }, put_user: { get () { return this.putUser } }, + query_api_keys: { get () { return this.queryApiKeys } }, saml_authenticate: { get () { return this.samlAuthenticate } }, saml_complete_logout: { get () { return this.samlCompleteLogout } }, saml_invalidate: { get () { return this.samlInvalidate } }, diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts index 63bdda001..8aab88c42 100644 --- a/api/requestParams.d.ts +++ b/api/requestParams.d.ts @@ -2421,6 +2421,10 @@ export interface SecurityPutUser extends Generic { body: T; } +export interface SecurityQueryApiKeys extends Generic { + body?: T; +} + export interface SecuritySamlAuthenticate extends Generic { body: T; } diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 805481e4d..7bbdfbfb7 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -9994,6 +9994,23 @@ link:{ref}/security-api-put-user.html[Documentation] + |=== +[discrete] +=== security.queryApiKeys + +[source,ts] +---- +client.security.queryApiKeys({ + body: object +}) +---- +link:{ref}/security-api-query-api-key.html[Documentation] + +[cols=2*] +|=== +|`body` +|`object` - From, size, query, sort and search_after + +|=== + [discrete] === security.samlAuthenticate diff --git a/index.d.ts b/index.d.ts index 365368fdf..8e923d6c1 100644 --- a/index.d.ts +++ b/index.d.ts @@ -2462,6 +2462,14 @@ declare class Client { putUser, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback putUser, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, callback: callbackFn): TransportRequestCallback putUser, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityQueryApiKeys, options?: TransportRequestOptions): TransportRequestPromise> + query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, callback: callbackFn): TransportRequestCallback + query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback + queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityQueryApiKeys, options?: TransportRequestOptions): TransportRequestPromise> + queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback + queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, callback: callbackFn): TransportRequestCallback + queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlAuthenticate, options?: TransportRequestOptions): TransportRequestPromise> saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, callback: callbackFn): TransportRequestCallback From f161946984b1e66466f1362355064d4b57181e4f Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Fri, 20 Aug 2021 10:39:38 +0200 Subject: [PATCH 058/647] Add warning.name to product check security exception (#1515) --- lib/Transport.js | 5 ++++- test/acceptance/product-check.test.js | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/Transport.js b/lib/Transport.js index 3e6d9fd3d..4d609387d 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -559,7 +559,10 @@ class Transport { debug('Product check failed', err) if (err.statusCode === 401 || err.statusCode === 403) { this[kProductCheck] = 2 - process.emitWarning('The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') + process.emitWarning( + 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.', + 'ProductNotSupportedSecurityError' + ) productCheckEmitter.emit('product-check', null, true) } else { this[kProductCheck] = 0 diff --git a/test/acceptance/product-check.test.js b/test/acceptance/product-check.test.js index 30c50eaa2..4f357efd7 100644 --- a/test/acceptance/product-check.test.js +++ b/test/acceptance/product-check.test.js @@ -470,7 +470,7 @@ test('Errors v6', t => { }) test('Auth error - 401', t => { - t.plan(8) + t.plan(9) const MockConnection = buildMockConnection({ onRequest (params) { return { @@ -487,6 +487,7 @@ test('Auth error - 401', t => { process.on('warning', onWarning) function onWarning (warning) { + t.equal(warning.name, 'ProductNotSupportedSecurityError') t.equal(warning.message, 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') } @@ -524,7 +525,7 @@ test('Auth error - 401', t => { }) test('Auth error - 403', t => { - t.plan(8) + t.plan(9) const MockConnection = buildMockConnection({ onRequest (params) { return { @@ -541,6 +542,7 @@ test('Auth error - 403', t => { process.on('warning', onWarning) function onWarning (warning) { + t.equal(warning.name, 'ProductNotSupportedSecurityError') t.equal(warning.message, 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') } From a0dcace7cd2672a945b2333a245e199699a27789 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Fri, 20 Aug 2021 12:40:25 +0200 Subject: [PATCH 059/647] Fix isHttpConnection check (#1526) --- index.js | 4 ++-- test/unit/client.test.js | 30 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/index.js b/index.js index 11b60a56d..6c8212d9a 100644 --- a/index.js +++ b/index.js @@ -323,9 +323,9 @@ function getAuth (node) { function isHttpConnection (node) { if (Array.isArray(node)) { - return node.some((n) => new URL(n).protocol === 'http:') + return node.some((n) => (typeof n === 'string' ? new URL(n).protocol : n.url.protocol) === 'http:') } else { - return new URL(node).protocol === 'http:' + return (typeof node === 'string' ? new URL(node).protocol : node.url.protocol) === 'http:' } } diff --git a/test/unit/client.test.js b/test/unit/client.test.js index cd6484ffe..20c1ce568 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -1586,6 +1586,36 @@ test('caFingerprint can\'t be configured over http / 3', t => { } }) +test('caFingerprint can\'t be configured over http / 4', t => { + t.plan(2) + + try { + new Client({ // eslint-disable-line + node: { url: new URL('http://localhost:9200/') }, + caFingerprint: 'FO:OB:AR' + }) + t.fail('shuld throw') + } catch (err) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') + } +}) + +test('caFingerprint can\'t be configured over http / 5', t => { + t.plan(2) + + try { + new Client({ // eslint-disable-line + nodes: [{ url: new URL('http://localhost:9200/') }], + caFingerprint: 'FO:OB:AR' + }) + t.fail('should throw') + } catch (err) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') + } +}) + test('Error body that is not a json', t => { t.plan(5) From 5447acb5348ae59b422dbf750fc1baf2cb966128 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 20 Aug 2021 12:42:53 +0200 Subject: [PATCH 060/647] Bumped v8.0.0-canary.18 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 6cdf82807..8d2d32582 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.17", + "versionCanary": "8.0.0-canary.18", "keywords": [ "elasticsearch", "elastic", From a9b62049cdf39c151614ee04f853f7b6e0b1ea88 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 23 Aug 2021 15:29:57 +0200 Subject: [PATCH 061/647] Fix parcel build (#1535) --- test/bundlers/parcel-test/package.json | 1 - 1 file changed, 1 deletion(-) diff --git a/test/bundlers/parcel-test/package.json b/test/bundlers/parcel-test/package.json index 68444883b..aae44d09d 100644 --- a/test/bundlers/parcel-test/package.json +++ b/test/bundlers/parcel-test/package.json @@ -2,7 +2,6 @@ "name": "parcel-test", "version": "1.0.0", "description": "", - "main": "index.js", "scripts": { "start": "node index.js", "build": "parcel build index.js --no-source-maps" From a7658b2a66e646a95101216622c1ffc3a9e3aabf Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 24 Aug 2021 11:25:48 +0200 Subject: [PATCH 062/647] Always display request params and options in request event (#1531) --- lib/Transport.js | 4 +-- test/acceptance/product-check.test.js | 38 +++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/lib/Transport.js b/lib/Transport.js index 4d609387d..f6588225e 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -420,8 +420,6 @@ class Transport { // handles request timeout params.timeout = toMs(options.requestTimeout || this.requestTimeout) if (options.asStream === true) params.asStream = true - meta.request.params = params - meta.request.options = options // handle compression if (params.body !== '' && params.body != null) { @@ -452,6 +450,8 @@ class Transport { } } + meta.request.params = params + meta.request.options = options // still need to check the product or waiting for the check to finish if (this[kProductCheck] === 0 || this[kProductCheck] === 1) { // let pass info requests diff --git a/test/acceptance/product-check.test.js b/test/acceptance/product-check.test.js index 4f357efd7..cdda9449f 100644 --- a/test/acceptance/product-check.test.js +++ b/test/acceptance/product-check.test.js @@ -1247,3 +1247,41 @@ test('No multiple checks with child clients', t => { }) }, 100) }) + +test('Observability events should have all the expected properties', t => { + t.plan(5) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + tagline: 'You Know, for Search' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (e, event) => { + t.ok(event.meta.request.params) + t.ok(event.meta.request.options) + }) + + client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') + }) +}) From ee50a8e770457f72b3f41e13878cb9615bf6d7d8 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 24 Aug 2021 11:53:14 +0200 Subject: [PATCH 063/647] Always emit request aborted event (#1534) --- lib/Transport.js | 1 + test/acceptance/product-check.test.js | 63 ++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/lib/Transport.js b/lib/Transport.js index f6588225e..d998e6aa1 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -182,6 +182,7 @@ class Transport { const makeRequest = () => { if (meta.aborted === true) { + this.emit('request', new RequestAbortedError(), result) return process.nextTick(callback, new RequestAbortedError(), result) } meta.connection = this.getConnection({ requestId: meta.request.id }) diff --git a/test/acceptance/product-check.test.js b/test/acceptance/product-check.test.js index cdda9449f..2ed7cec88 100644 --- a/test/acceptance/product-check.test.js +++ b/test/acceptance/product-check.test.js @@ -20,7 +20,7 @@ 'use strict' const { test } = require('tap') -const { Client } = require('../../') +const { Client, errors } = require('../../') const { connection: { MockConnectionTimeout, @@ -1285,3 +1285,64 @@ test('Observability events should have all the expected properties', t => { t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') }) }) + +test('Abort a request while running the product check', t => { + t.plan(4) + const MockConnection = buildMockConnection({ + onRequest (params) { + return { + statusCode: 200, + headers: { + 'x-elastic-product': 'Elasticsearch' + }, + body: { + name: '1ef419078577', + cluster_name: 'docker-cluster', + cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', + version: { + number: '8.0.0-SNAPSHOT', + build_flavor: 'default', + build_type: 'docker', + build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', + build_date: '2021-07-10T01:45:02.136546168Z', + build_snapshot: true, + lucene_version: '8.9.0', + minimum_wire_compatibility_version: '7.15.0', + minimum_index_compatibility_version: '7.0.0' + }, + tagline: 'You Know, for Search' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + client.on('request', (err, event) => { + if (event.meta.request.params.path.includes('search')) { + t.ok(err instanceof errors.RequestAbortedError) + } + }) + + // the response event won't be executed for the search + client.on('response', (err, event) => { + t.error(err) + t.equal(event.meta.request.params.path, '/') + }) + + const req = client.search({ + index: 'foo', + body: { + query: { + match_all: {} + } + } + }, (err, result) => { + t.ok(err instanceof errors.RequestAbortedError) + }) + + setImmediate(() => req.abort()) +}) From 8156252598416ae971c139dd87ecc6eb7f6f3c58 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 24 Aug 2021 14:54:30 +0200 Subject: [PATCH 064/647] Documentation Update for FaaS use cases (#1522) --- docs/connecting.asciidoc | 71 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 819e3a64b..c76bf340b 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -8,6 +8,7 @@ This page contains the information you need to connect and use the Client with * <> * <> +* <> * <> * <> * <> @@ -419,6 +420,76 @@ _Default:_ `null` _Default:_ `null` |=== +[discrete] +[[client-faas-env]] +=== Using the Client in a Function-as-a-Service Environment + +This section illustrates the best practices for leveraging the {es} client in a Function-as-a-Service (FaaS) environment. +The most influential optimization is to initialize the client outside of the function, the global scope. +This practice does not only improve performance but also enables background functionality as – for example – https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[sniffing]. +The following examples provide a skeleton for the best practices. + +[discrete] +==== GCP Cloud Functions + +[source,js] +---- +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +exports.testFunction = async function (req, res) { + // use the client +} +---- + +[discrete] +==== AWS Lambda + +[source,js] +---- +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +exports.handler = async function (event, context) { + // use the client +} +---- + +[discrete] +==== Azure Functions + +[source,js] +---- +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +module.exports = async function (context, req) { + // use the client +} +---- + +Resources used to assess these recommendations: + +- https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations[GCP Cloud Functions: Tips & Tricks] +- https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html[Best practices for working with AWS Lambda functions] +- https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables[Azure Functions Python developer guide] +- https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html[AWS Lambda: Comparing the effect of global scope] + [discrete] [[client-connect-proxy]] From 470bc84eb78f9425d8565880f08738749d133caf Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Sat, 28 Aug 2021 18:21:32 +0200 Subject: [PATCH 065/647] Do not use a singleton for EE (#1543) --- lib/Transport.js | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/lib/Transport.js b/lib/Transport.js index d998e6aa1..ca9477ae6 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -36,13 +36,13 @@ const { const noop = () => {} -const productCheckEmitter = new EventEmitter() const clientVersion = require('../package.json').version const userAgent = `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${process.version})` const MAX_BUFFER_LENGTH = buffer.constants.MAX_LENGTH const MAX_STRING_LENGTH = buffer.constants.MAX_STRING_LENGTH const kProductCheck = Symbol('product check') const kApiVersioning = Symbol('api versioning') +const kEventEmitter = Symbol('event emitter') class Transport { constructor (opts) { @@ -71,6 +71,7 @@ class Transport { this.opaqueIdPrefix = opts.opaqueIdPrefix this[kProductCheck] = 0 // 0 = to be checked, 1 = checking, 2 = checked-ok, 3 checked-notok, 4 checked-nodefault this[kApiVersioning] = process.env.ELASTIC_CLIENT_APIVERSIONING === 'true' + this[kEventEmitter] = new EventEmitter() this.nodeFilter = opts.nodeFilter || defaultNodeFilter if (typeof opts.nodeSelector === 'function') { @@ -460,7 +461,7 @@ class Transport { prepareRequest() } else { // wait for product check to finish - productCheckEmitter.once('product-check', (error, status) => { + this[kEventEmitter].once('product-check', (error, status) => { if (status === false) { const err = error || new ProductNotSupportedError(result) if (this[kProductCheck] === 4) { @@ -564,48 +565,48 @@ class Transport { 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.', 'ProductNotSupportedSecurityError' ) - productCheckEmitter.emit('product-check', null, true) + this[kEventEmitter].emit('product-check', null, true) } else { this[kProductCheck] = 0 - productCheckEmitter.emit('product-check', err, false) + this[kEventEmitter].emit('product-check', err, false) } } else { debug('Checking elasticsearch version', result.body, result.headers) if (result.body.version == null || typeof result.body.version.number !== 'string') { debug('Can\'t access Elasticsearch version') - return productCheckEmitter.emit('product-check', null, false) + return this[kEventEmitter].emit('product-check', null, false) } const tagline = result.body.tagline const version = result.body.version.number.split('.') const major = Number(version[0]) const minor = Number(version[1]) if (major < 6) { - return productCheckEmitter.emit('product-check', null, false) + return this[kEventEmitter].emit('product-check', null, false) } else if (major >= 6 && major < 7) { if (tagline !== 'You Know, for Search') { debug('Bad tagline') - return productCheckEmitter.emit('product-check', null, false) + return this[kEventEmitter].emit('product-check', null, false) } } else if (major === 7 && minor < 14) { if (tagline !== 'You Know, for Search') { debug('Bad tagline') - return productCheckEmitter.emit('product-check', null, false) + return this[kEventEmitter].emit('product-check', null, false) } if (result.body.version.build_flavor !== 'default') { debug('Bad build_flavor') this[kProductCheck] = 4 - return productCheckEmitter.emit('product-check', null, false) + return this[kEventEmitter].emit('product-check', null, false) } } else { if (result.headers['x-elastic-product'] !== 'Elasticsearch') { debug('x-elastic-product not recognized') - return productCheckEmitter.emit('product-check', null, false) + return this[kEventEmitter].emit('product-check', null, false) } } debug('Valid Elasticsearch distribution') this[kProductCheck] = 2 - productCheckEmitter.emit('product-check', null, true) + this[kEventEmitter].emit('product-check', null, true) } }) } From acb77fa3d1badc15091bd330e2fcf87246851799 Mon Sep 17 00:00:00 2001 From: delvedor Date: Sat, 28 Aug 2021 18:25:45 +0200 Subject: [PATCH 066/647] Bumped v8.0.0-canary.19 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 8d2d32582..f0f6f52e5 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.18", + "versionCanary": "8.0.0-canary.19", "keywords": [ "elasticsearch", "elastic", From f981c273d642bc8f0aabc352183d7764958bc4e0 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 30 Aug 2021 10:24:04 +0200 Subject: [PATCH 067/647] CI: Rename master to main --- .ci/jobs/defaults.yml | 2 +- ...ch-js+master.yml => elastic+elasticsearch-js+main.yml} | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) rename .ci/jobs/{elastic+elasticsearch-js+master.yml => elastic+elasticsearch-js+main.yml} (59%) diff --git a/.ci/jobs/defaults.yml b/.ci/jobs/defaults.yml index 069c3dac8..d105838af 100644 --- a/.ci/jobs/defaults.yml +++ b/.ci/jobs/defaults.yml @@ -15,7 +15,7 @@ parameters: - string: name: branch_specifier - default: refs/heads/master + default: refs/heads/main description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) properties: diff --git a/.ci/jobs/elastic+elasticsearch-js+master.yml b/.ci/jobs/elastic+elasticsearch-js+main.yml similarity index 59% rename from .ci/jobs/elastic+elasticsearch-js+master.yml rename to .ci/jobs/elastic+elasticsearch-js+main.yml index c044f5853..b41259007 100644 --- a/.ci/jobs/elastic+elasticsearch-js+master.yml +++ b/.ci/jobs/elastic+elasticsearch-js+main.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+master - display-name: 'elastic / elasticsearch-js # master' - description: Testing the elasticsearch-js master branch. + name: elastic+elasticsearch-js+main + display-name: 'elastic / elasticsearch-js # main' + description: Testing the elasticsearch-js main branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/master + default: refs/heads/main description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: From b67d42cb5ff7a39a1836d176266ac32af9e72f07 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 30 Aug 2021 15:36:14 +0200 Subject: [PATCH 068/647] Add test for mocking library (#1545) --- .github/workflows/nodejs.yml | 21 +++++++++++ test/mock/index.js | 71 ++++++++++++++++++++++++++++++++++++ test/mock/package.json | 18 +++++++++ 3 files changed, 110 insertions(+) create mode 100644 test/mock/index.js create mode 100644 test/mock/package.json diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 38550268f..5c3e8529f 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -119,6 +119,27 @@ jobs: npm start --prefix test/bundlers/rollup-test npm start --prefix test/bundlers/webpack-test + mock-support: + name: Mock support + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Use Node.js 14.x + uses: actions/setup-node@v1 + with: + node-version: 14.x + + - name: Install + run: | + npm install + npm install --prefix test/mock + + - name: Run test + run: | + npm test --prefix test/mock + code-coverage: name: Code coverage runs-on: ubuntu-latest diff --git a/test/mock/index.js b/test/mock/index.js new file mode 100644 index 000000000..d9525299d --- /dev/null +++ b/test/mock/index.js @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +const { test } = require('tap') +const { Client, errors } = require('../../') +const Mock = require('@elastic/elasticsearch-mock') + +test('Mock should work', async t => { + t.plan(1) + + const mock = new Mock() + const client = new Client({ + node: 'http://localhost:9200/', + Connection: mock.getConnection() + }) + + mock.add({ + method: 'GET', + path: '/_cat/indices' + }, () => { + return { status: 'ok' } + }) + + const response = await client.cat.indices() + t.same(response.body, { status: 'ok' }) +}) + +test('Return an error', async t => { + t.plan(1) + + const mock = new Mock() + const client = new Client({ + node: 'http://localhost:9200/', + Connection: mock.getConnection() + }) + + mock.add({ + method: 'GET', + path: '/_cat/indices' + }, () => { + return new errors.ResponseError({ + body: { errors: {}, status: 500 }, + statusCode: 500 + }) + }) + + try { + await client.cat.indices() + t.fail('Should throw') + } catch (err) { + t.ok(err instanceof errors.ResponseError) + } +}) diff --git a/test/mock/package.json b/test/mock/package.json new file mode 100644 index 000000000..0017d01cd --- /dev/null +++ b/test/mock/package.json @@ -0,0 +1,18 @@ +{ + "name": "mock", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "standard && tap index.js" + }, + "keywords": [], + "author": "", + "license": "ISC", + "dependencies": { + "@elastic/elasticsearch": "file:../..", + "@elastic/elasticsearch-mock": "^0.3.1", + "standard": "^16.0.3", + "tap": "^15.0.9" + } +} From f7b653dff6b986e757688ebd594d6ace3a43066f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 7 Sep 2021 09:59:38 +0200 Subject: [PATCH 069/647] [DOCS] Adds a link to the Quick Start section that points to an EC ingest example (#1546) --- docs/introduction.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/introduction.asciidoc b/docs/introduction.asciidoc index 154aaaf6f..6cb9df2bc 100644 --- a/docs/introduction.asciidoc +++ b/docs/introduction.asciidoc @@ -133,6 +133,9 @@ async function run () { run().catch(console.log) ---- +TIP: For an elaborate example of how to ingest data into Elastic Cloud, +refer to {cloud}/ec-getting-started-node-js.html[this page]. + [discrete] ==== Install multiple versions From 212c3c4ffeec974e1ad8ef35f2ab9552e42b3d86 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 16 Sep 2021 08:19:57 +0200 Subject: [PATCH 070/647] Add support for maxResponseSize and maxCompressedResponseSize (#1551) --- docs/basic-config.asciidoc | 8 ++ docs/connecting.asciidoc | 9 ++ index.d.ts | 2 + index.js | 17 ++- lib/Transport.d.ts | 4 + lib/Transport.js | 14 ++- test/unit/client.test.js | 217 +++++++++++++++++++++++++++++++++++++ 7 files changed, 265 insertions(+), 6 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index c800b38c0..c9b4660dd 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -259,6 +259,14 @@ _Default:_ `false` |`string` - If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. + _Default:_ `null` +|`maxResponseSize` +|`number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH + +_Default:_ `null` + +|`maxCompressedResponseSize` +|`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH + +_Default:_ `null` + |=== [discrete] diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index c76bf340b..acfbe120f 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -418,6 +418,15 @@ _Default:_ `null` |`context` |`any` - Custom object per request. _(you can use it to pass data to the clients events)_ + _Default:_ `null` + +|`maxResponseSize` +|`number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH + +_Default:_ `null` + +|`maxCompressedResponseSize` +|`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH + +_Default:_ `null` + |=== [discrete] diff --git a/index.d.ts b/index.d.ts index 8e923d6c1..1d7ccae41 100644 --- a/index.d.ts +++ b/index.d.ts @@ -119,6 +119,8 @@ interface ClientOptions { }; disablePrototypePoisoningProtection?: boolean | 'proto' | 'constructor'; caFingerprint?: string; + maxResponseSize?: number; + maxCompressedResponseSize?: number; } declare class Client { diff --git a/index.js b/index.js index 6c8212d9a..d90aa1a1b 100644 --- a/index.js +++ b/index.js @@ -21,6 +21,7 @@ const { EventEmitter } = require('events') const { URL } = require('url') +const buffer = require('buffer') const debug = require('debug')('elasticsearch') const Transport = require('./lib/Transport') const Connection = require('./lib/Connection') @@ -114,9 +115,19 @@ class Client extends ESAPI { context: null, proxy: null, enableMetaHeader: true, - disablePrototypePoisoningProtection: false + disablePrototypePoisoningProtection: false, + maxResponseSize: null, + maxCompressedResponseSize: null }, opts) + if (options.maxResponseSize !== null && options.maxResponseSize > buffer.constants.MAX_STRING_LENGTH) { + throw new ConfigurationError(`The maxResponseSize cannot be bigger than ${buffer.constants.MAX_STRING_LENGTH}`) + } + + if (options.maxCompressedResponseSize !== null && options.maxCompressedResponseSize > buffer.constants.MAX_LENGTH) { + throw new ConfigurationError(`The maxCompressedResponseSize cannot be bigger than ${buffer.constants.MAX_LENGTH}`) + } + if (options.caFingerprint !== null && isHttpConnection(opts.node || opts.nodes)) { throw new ConfigurationError('You can\'t configure the caFingerprint with a http connection') } @@ -178,7 +189,9 @@ class Client extends ESAPI { generateRequestId: options.generateRequestId, name: options.name, opaqueIdPrefix: options.opaqueIdPrefix, - context: options.context + context: options.context, + maxResponseSize: options.maxResponseSize, + maxCompressedResponseSize: options.maxCompressedResponseSize }) this.helpers = new Helpers({ diff --git a/lib/Transport.d.ts b/lib/Transport.d.ts index 2313bf23f..25b770fdb 100644 --- a/lib/Transport.d.ts +++ b/lib/Transport.d.ts @@ -61,6 +61,8 @@ interface TransportOptions { generateRequestId?: generateRequestIdFn; name?: string; opaqueIdPrefix?: string; + maxResponseSize?: number; + maxCompressedResponseSize?: number; } export interface RequestEvent, TContext = Context> { @@ -113,6 +115,8 @@ export interface TransportRequestOptions { context?: Context; warnings?: string[]; opaqueId?: string; + maxResponseSize?: number; + maxCompressedResponseSize?: number; } export interface TransportRequestCallback { diff --git a/lib/Transport.js b/lib/Transport.js index ca9477ae6..83a218a64 100644 --- a/lib/Transport.js +++ b/lib/Transport.js @@ -43,6 +43,8 @@ const MAX_STRING_LENGTH = buffer.constants.MAX_STRING_LENGTH const kProductCheck = Symbol('product check') const kApiVersioning = Symbol('api versioning') const kEventEmitter = Symbol('event emitter') +const kMaxResponseSize = Symbol('max response size') +const kMaxCompressedResponseSize = Symbol('max compressed response size') class Transport { constructor (opts) { @@ -72,6 +74,8 @@ class Transport { this[kProductCheck] = 0 // 0 = to be checked, 1 = checking, 2 = checked-ok, 3 checked-notok, 4 checked-nodefault this[kApiVersioning] = process.env.ELASTIC_CLIENT_APIVERSIONING === 'true' this[kEventEmitter] = new EventEmitter() + this[kMaxResponseSize] = opts.maxResponseSize || MAX_STRING_LENGTH + this[kMaxCompressedResponseSize] = opts.maxCompressedResponseSize || MAX_BUFFER_LENGTH this.nodeFilter = opts.nodeFilter || defaultNodeFilter if (typeof opts.nodeSelector === 'function') { @@ -162,6 +166,8 @@ class Transport { ? 0 : (typeof options.maxRetries === 'number' ? options.maxRetries : this.maxRetries) const compression = options.compression !== undefined ? options.compression : this.compression + const maxResponseSize = options.maxResponseSize || this[kMaxResponseSize] + const maxCompressedResponseSize = options.maxCompressedResponseSize || this[kMaxCompressedResponseSize] let request = { abort: noop } const transportReturn = { then (onFulfilled, onRejected) { @@ -244,15 +250,15 @@ class Transport { /* istanbul ignore else */ if (result.headers['content-length'] !== undefined) { const contentLength = Number(result.headers['content-length']) - if (isCompressed && contentLength > MAX_BUFFER_LENGTH) { + if (isCompressed && contentLength > maxCompressedResponseSize) { response.destroy() return onConnectionError( - new RequestAbortedError(`The content length (${contentLength}) is bigger than the maximum allowed buffer (${MAX_BUFFER_LENGTH})`, result) + new RequestAbortedError(`The content length (${contentLength}) is bigger than the maximum allowed buffer (${maxCompressedResponseSize})`, result) ) - } else if (contentLength > MAX_STRING_LENGTH) { + } else if (contentLength > maxResponseSize) { response.destroy() return onConnectionError( - new RequestAbortedError(`The content length (${contentLength}) is bigger than the maximum allowed string (${MAX_STRING_LENGTH})`, result) + new RequestAbortedError(`The content length (${contentLength}) is bigger than the maximum allowed string (${maxResponseSize})`, result) ) } } diff --git a/test/unit/client.test.js b/test/unit/client.test.js index 20c1ce568..3503e17a7 100644 --- a/test/unit/client.test.js +++ b/test/unit/client.test.js @@ -1308,6 +1308,223 @@ test('Content length too big (string)', t => { }) }) +test('Content length too big custom (buffer)', t => { + t.plan(4) + + class MockConnection extends Connection { + request (params, callback) { + const stream = intoStream(JSON.stringify({ hello: 'world' })) + stream.statusCode = 200 + stream.headers = { + 'content-type': 'application/json;utf=8', + 'content-encoding': 'gzip', + 'content-length': 1100, + connection: 'keep-alive', + date: new Date().toISOString() + } + stream.on('close', () => t.pass('Stream destroyed')) + process.nextTick(callback, null, stream) + return { abort () {} } + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxCompressedResponseSize: 1000 + }) + client.info((err, result) => { + t.ok(err instanceof errors.RequestAbortedError) + t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed buffer (1000)') + t.equal(result.meta.attempts, 0) + }) +}) + +test('Content length too big custom (string)', t => { + t.plan(4) + + class MockConnection extends Connection { + request (params, callback) { + const stream = intoStream(JSON.stringify({ hello: 'world' })) + stream.statusCode = 200 + stream.headers = { + 'content-type': 'application/json;utf=8', + 'content-length': 1100, + connection: 'keep-alive', + date: new Date().toISOString() + } + stream.on('close', () => t.pass('Stream destroyed')) + process.nextTick(callback, null, stream) + return { abort () {} } + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxResponseSize: 1000 + }) + client.info((err, result) => { + t.ok(err instanceof errors.RequestAbortedError) + t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed string (1000)') + t.equal(result.meta.attempts, 0) + }) +}) + +test('Content length too big custom option (buffer)', t => { + t.plan(4) + + class MockConnection extends Connection { + request (params, callback) { + const stream = intoStream(JSON.stringify({ hello: 'world' })) + stream.statusCode = 200 + stream.headers = { + 'content-type': 'application/json;utf=8', + 'content-encoding': 'gzip', + 'content-length': 1100, + connection: 'keep-alive', + date: new Date().toISOString() + } + stream.on('close', () => t.pass('Stream destroyed')) + process.nextTick(callback, null, stream) + return { abort () {} } + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + client.info({}, { maxCompressedResponseSize: 1000 }, (err, result) => { + t.ok(err instanceof errors.RequestAbortedError) + t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed buffer (1000)') + t.equal(result.meta.attempts, 0) + }) +}) + +test('Content length too big custom option (string)', t => { + t.plan(4) + + class MockConnection extends Connection { + request (params, callback) { + const stream = intoStream(JSON.stringify({ hello: 'world' })) + stream.statusCode = 200 + stream.headers = { + 'content-type': 'application/json;utf=8', + 'content-length': 1100, + connection: 'keep-alive', + date: new Date().toISOString() + } + stream.on('close', () => t.pass('Stream destroyed')) + process.nextTick(callback, null, stream) + return { abort () {} } + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + client.info({}, { maxResponseSize: 1000 }, (err, result) => { + t.ok(err instanceof errors.RequestAbortedError) + t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed string (1000)') + t.equal(result.meta.attempts, 0) + }) +}) + +test('Content length too big custom option override (buffer)', t => { + t.plan(4) + + class MockConnection extends Connection { + request (params, callback) { + const stream = intoStream(JSON.stringify({ hello: 'world' })) + stream.statusCode = 200 + stream.headers = { + 'content-type': 'application/json;utf=8', + 'content-encoding': 'gzip', + 'content-length': 1100, + connection: 'keep-alive', + date: new Date().toISOString() + } + stream.on('close', () => t.pass('Stream destroyed')) + process.nextTick(callback, null, stream) + return { abort () {} } + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxCompressedResponseSize: 2000 + }) + client.info({}, { maxCompressedResponseSize: 1000 }, (err, result) => { + t.ok(err instanceof errors.RequestAbortedError) + t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed buffer (1000)') + t.equal(result.meta.attempts, 0) + }) +}) + +test('Content length too big custom option override (string)', t => { + t.plan(4) + + class MockConnection extends Connection { + request (params, callback) { + const stream = intoStream(JSON.stringify({ hello: 'world' })) + stream.statusCode = 200 + stream.headers = { + 'content-type': 'application/json;utf=8', + 'content-length': 1100, + connection: 'keep-alive', + date: new Date().toISOString() + } + stream.on('close', () => t.pass('Stream destroyed')) + process.nextTick(callback, null, stream) + return { abort () {} } + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxResponseSize: 2000 + }) + client.info({}, { maxResponseSize: 1000 }, (err, result) => { + t.ok(err instanceof errors.RequestAbortedError) + t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed string (1000)') + t.equal(result.meta.attempts, 0) + }) +}) + +test('maxResponseSize cannot be bigger than buffer.constants.MAX_STRING_LENGTH', t => { + t.plan(2) + + try { + new Client({ // eslint-disable-line + node: 'http://localhost:9200/', + maxResponseSize: buffer.constants.MAX_STRING_LENGTH + 10 + }) + t.fail('should throw') + } catch (err) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, `The maxResponseSize cannot be bigger than ${buffer.constants.MAX_STRING_LENGTH}`) + } +}) + +test('maxCompressedResponseSize cannot be bigger than buffer.constants.MAX_STRING_LENGTH', t => { + t.plan(2) + + try { + new Client({ // eslint-disable-line + node: 'http://localhost:9200/', + maxCompressedResponseSize: buffer.constants.MAX_LENGTH + 10 + }) + t.fail('should throw') + } catch (err) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, `The maxCompressedResponseSize cannot be bigger than ${buffer.constants.MAX_LENGTH}`) + } +}) + test('Meta header enabled', t => { t.plan(2) From de99b8081695c4876f726efb146813035a29e3ce Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 16 Sep 2021 08:20:59 +0200 Subject: [PATCH 071/647] Update compatibility info (#1550) --- README.md | 10 +++++----- docs/installation.asciidoc | 10 +++------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index ca9d6d292..673bbc1f7 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ npm install @elastic/elasticsearch ### Node.js support -NOTE: The minimum supported version of Node.js is `v10`. +NOTE: The minimum supported version of Node.js is `v12`. The client versioning follows the Elastic Stack versioning, this means that major, minor, and patch releases are done following a precise schedule that @@ -49,13 +49,13 @@ of `^7.10.0`). | Node.js Version | Node.js EOL date | End of support | | --------------- |------------------| ---------------------- | -| `8.x` | `December 2019` | `7.11` (early 2021) | -| `10.x` | `Apri 2021` | `7.12` (mid 2021) | +| `8.x` | `December 2019` | `7.11` (early 2021) | +| `10.x` | `April 2021` | `7.12` (mid 2021) | ### Compatibility -Language clients are forward compatible; meaning that clients support communicating with greater minor versions of Elasticsearch. -Elastic language clients are also backwards compatible with lesser supported minor Elasticsearch versions. +Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch. +Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made. | Elasticsearch Version | Client Version | | --------------------- |----------------| diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc index 67aacfdb4..ff9fdd2ee 100644 --- a/docs/installation.asciidoc +++ b/docs/installation.asciidoc @@ -24,7 +24,7 @@ To learn more about the supported major versions, please refer to the [[nodejs-support]] === Node.js support -NOTE: The minimum supported version of Node.js is `v10`. +NOTE: The minimum supported version of Node.js is `v12`. The client versioning follows the {stack} versioning, this means that major, minor, and patch releases are done following a precise schedule that @@ -62,12 +62,8 @@ of `^7.10.0`). [[js-compatibility-matrix]] === Compatibility matrix -Elastic language clients are guaranteed to be able to communicate with Elasticsearch -or Elastic solutions running on the same major version and greater or equal minor version. - -Language clients are forward compatible; meaning that clients support communicating -with greater minor versions of Elasticsearch. Elastic language clients are not -guaranteed to be backwards compatible. +Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch. +Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made. [%header,cols=2*] |=== From f28c93fe131e6e9c52adf9eca3ea4521caebc9ac Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 16 Sep 2021 08:44:21 +0200 Subject: [PATCH 072/647] Bumped v8.0.0-canary.20 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index f0f6f52e5..0d36e77ca 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.19", + "versionCanary": "8.0.0-canary.20", "keywords": [ "elasticsearch", "elastic", From 3feda5d9f6633daca3c036d415c63388a57b809a Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 22 Sep 2021 10:05:58 +0200 Subject: [PATCH 073/647] Show socket local/remote address in case of ECONNRESET (#1555) * Show socket local/remote address in case of ECONNRESET * Fallback if socket or properties are not defined --- lib/Connection.js | 9 ++++++++- test/unit/connection.test.js | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/lib/Connection.js b/lib/Connection.js index 88a154ae6..feaa3ab23 100644 --- a/lib/Connection.js +++ b/lib/Connection.js @@ -113,7 +113,14 @@ class Connection { const onError = err => { cleanListeners() this._openRequests-- - callback(new ConnectionError(err.message), null) + let message = err.message + if (err.code === 'ECONNRESET') { + /* istanbul ignore next */ + const socket = request.socket || {} + /* istanbul ignore next */ + message += ` - Local: ${socket.localAddress || 'unknown'}:${socket.localPort || 'unknown'}, Remote: ${socket.remoteAddress || 'unknown'}:${socket.remotePort || 'unknown'}` + } + callback(new ConnectionError(message), null) } const onAbort = () => { diff --git a/test/unit/connection.test.js b/test/unit/connection.test.js index 5ba1c494a..a556564a0 100644 --- a/test/unit/connection.test.js +++ b/test/unit/connection.test.js @@ -1084,3 +1084,25 @@ test('getIssuerCertificate detects invalid/malformed certificates', t => { } t.equal(getIssuerCertificate(socket), null) }) + +test('Should show local/remote socket address in case of ECONNRESET', t => { + t.plan(2) + + function handler (req, res) { + res.destroy() + } + + buildServer(handler, ({ port }, server) => { + const connection = new Connection({ + url: new URL(`http://localhost:${port}`) + }) + connection.request({ + path: '/hello', + method: 'GET' + }, (err, res) => { + t.ok(err instanceof ConnectionError) + t.match(err.message, /socket\shang\sup\s-\sLocal:\s127.0.0.1:\d+,\sRemote:\s127.0.0.1:\d+/) + server.stop() + }) + }) +}) From 4c72b981cdf7f7b12129b0ef606db8743244cc0f Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 29 Sep 2021 07:26:09 +0200 Subject: [PATCH 074/647] Bumped v8.0.0-canary.21 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0d36e77ca..abf24519d 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.20", + "versionCanary": "8.0.0-canary.21", "keywords": [ "elasticsearch", "elastic", From 1a227459f096951032b881acce18a01352901096 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 30 Sep 2021 09:45:04 +0200 Subject: [PATCH 075/647] Elasticsearch v8 (#1558) --- .ci/run-repository.sh | 6 +- .github/workflows/nodejs.yml | 278 +- .gitignore | 2 + .npmignore | 4 +- api/api/async_search.js | 141 - api/api/autoscaling.js | 147 - api/api/bulk.js | 70 - api/api/cat.js | 648 - api/api/ccr.js | 403 - api/api/clear_scroll.js | 55 - api/api/close_point_in_time.js | 50 - api/api/cluster.js | 420 - api/api/count.js | 55 - api/api/create.js | 69 - api/api/dangling_indices.js | 115 - api/api/delete.js | 65 - api/api/delete_by_query.js | 60 - api/api/delete_by_query_rethrottle.js | 60 - api/api/delete_script.js | 56 - api/api/enrich.js | 173 - api/api/eql.js | 150 - api/api/exists.js | 60 - api/api/exists_source.js | 74 - api/api/explain.js | 60 - api/api/features.js | 81 - api/api/field_caps.js | 55 - api/api/fleet.js | 65 - api/api/get.js | 60 - api/api/get_script.js | 56 - api/api/get_script_context.js | 50 - api/api/get_script_languages.js | 50 - api/api/get_source.js | 60 - api/api/graph.js | 61 - api/api/ilm.js | 317 - api/api/index.js | 65 - api/api/indices.js | 1657 -- api/api/info.js | 50 - api/api/ingest.js | 200 - api/api/license.js | 188 - api/api/logstash.js | 125 - api/api/mget.js | 61 - api/api/migration.js | 60 - api/api/ml.js | 2070 --- api/api/monitoring.js | 66 - api/api/msearch.js | 61 - api/api/msearch_template.js | 61 - api/api/mtermvectors.js | 55 - api/api/nodes.js | 259 - api/api/open_point_in_time.js | 55 - api/api/ping.js | 50 - api/api/put_script.js | 71 - api/api/rank_eval.js | 61 - api/api/reindex.js | 56 - api/api/reindex_rethrottle.js | 60 - api/api/render_search_template.js | 55 - api/api/rollup.js | 319 - api/api/scripts_painless_execute.js | 50 - api/api/scroll.js | 55 - api/api/search.js | 55 - api/api/search_mvt.js | 87 - api/api/search_shards.js | 55 - api/api/search_template.js | 61 - api/api/searchable_snapshots.js | 158 - api/api/security.js | 1305 -- api/api/shutdown.js | 124 - api/api/slm.js | 256 - api/api/snapshot.js | 439 - api/api/sql.js | 203 - api/api/ssl.js | 55 - api/api/tasks.js | 108 - api/api/terms_enum.js | 56 - api/api/termvectors.js | 61 - api/api/text_structure.js | 65 - api/api/transform.js | 268 - api/api/update.js | 69 - api/api/update_by_query.js | 56 - api/api/update_by_query_rethrottle.js | 60 - api/api/watcher.js | 333 - api/api/xpack.js | 76 - api/index.js | 508 - api/kibana.d.ts | 545 - api/new.d.ts | 1585 -- api/requestParams.d.ts | 2862 --- api/utils.js | 58 - index.d.ts | 2972 +-- index.js | 343 +- index.mjs | 29 - lib/Connection.d.ts | 99 - lib/Connection.js | 399 - lib/Helpers.d.ts | 124 - lib/Serializer.d.ts | 30 - lib/Serializer.js | 94 - lib/Transport.d.ts | 166 - lib/Transport.js | 695 - lib/errors.d.ts | 90 - lib/errors.js | 159 - lib/pool/BaseConnectionPool.js | 262 - lib/pool/CloudConnectionPool.js | 64 - lib/pool/ConnectionPool.js | 246 - lib/pool/index.d.ts | 220 - package.json | 105 +- scripts/release-canary.js | 1 - src/Client.ts | 337 + lib/Helpers.js => src/Helpers.ts | 525 +- src/SniffingTransport.ts | 54 + src/api/api/async_search.ts | 165 + src/api/api/autoscaling.ts | 157 + src/api/api/bulk.ts | 75 + src/api/api/cat.ts | 882 + src/api/api/ccr.ts | 418 + src/api/api/clear_scroll.ts | 67 + src/api/api/close_point_in_time.ts | 66 + src/api/api/cluster.ts | 508 + src/api/api/count.ts | 74 + src/api/api/create.ts | 65 + src/api/api/dangling_indices.ts | 130 + src/api/api/delete.ts | 66 + src/api/api/delete_by_query.ts | 66 + src/api/api/delete_by_query_rethrottle.ts | 66 + src/api/api/delete_script.ts | 66 + src/api/api/enrich.ts | 194 + src/api/api/eql.ts | 157 + src/api/api/exists.ts | 66 + src/api/api/exists_source.ts | 66 + src/api/api/explain.ts | 66 + src/api/api/features.ts | 103 + src/api/api/field_caps.ts | 74 + src/api/api/fleet.ts | 71 + src/api/api/get.ts | 66 + src/api/api/get_script.ts | 66 + src/api/api/get_script_context.ts | 67 + src/api/api/get_script_languages.ts | 67 + src/api/api/get_source.ts | 66 + src/api/api/graph.ts | 73 + src/api/api/ilm.ts | 362 + src/api/api/index.ts | 72 + src/api/api/indices.ts | 1701 ++ src/api/api/info.ts | 67 + src/api/api/ingest.ts | 231 + src/api/api/license.ts | 248 + src/api/api/logstash.ts | 128 + src/api/api/mget.ts | 74 + src/api/api/migration.ts | 81 + src/api/api/ml.ts | 2132 +++ src/api/api/monitoring.ts | 72 + src/api/api/msearch.ts | 72 + src/api/api/msearch_template.ts | 72 + src/api/api/mtermvectors.ts | 74 + src/api/api/nodes.ts | 301 + src/api/api/open_point_in_time.ts | 66 + src/api/api/ping.ts | 67 + src/api/api/put_script.ts | 73 + src/api/api/rank_eval.ts | 73 + src/api/api/reindex.ts | 67 + src/api/api/reindex_rethrottle.ts | 66 + src/api/api/render_search_template.ts | 74 + src/api/api/rollup.ts | 312 + src/api/api/scripts_painless_execute.ts | 67 + src/api/api/scroll.ts | 66 + src/api/api/search.ts | 74 + src/api/api/search_mvt.ts | 66 + src/api/api/search_shards.ts | 74 + src/api/api/search_template.ts | 74 + src/api/api/searchable_snapshots.ts | 178 + src/api/api/security.ts | 1307 ++ src/api/api/shutdown.ts | 137 + src/api/api/slm.ts | 310 + src/api/api/snapshot.ts | 398 + src/api/api/sql.ts | 208 + src/api/api/ssl.ts | 74 + src/api/api/tasks.ts | 138 + src/api/api/terms_enum.ts | 66 + src/api/api/termvectors.ts | 73 + src/api/api/text_structure.ts | 72 + src/api/api/transform.ts | 285 + src/api/api/update.ts | 66 + src/api/api/update_by_query.ts | 66 + src/api/api/update_by_query_rethrottle.ts | 66 + src/api/api/watcher.ts | 379 + src/api/api/xpack.ts | 103 + src/api/index.ts | 432 + src/api/kibana.ts | 520 + src/api/types.ts | 15456 ++++++++++++++++ api/types.d.ts => src/api/typesWithBodyKey.ts | 2361 ++- test/acceptance/events-order.test.js | 463 - test/acceptance/observability.test.js | 404 - test/acceptance/product-check.test.js | 1348 -- test/acceptance/proxy.test.js | 149 - test/acceptance/resurrect.test.js | 213 - test/acceptance/sniff.test.js | 298 - test/benchmarks/macro/complex.bench.js | 101 - test/benchmarks/macro/simple.bench.js | 269 - test/benchmarks/micro/basic.bench.js | 98 - test/benchmarks/suite.js | 272 - test/bundlers/parcel-test/index.js | 7 - test/bundlers/parcel-test/package.json | 18 - test/bundlers/rollup-test/index.js | 7 - test/bundlers/rollup-test/package.json | 19 - test/bundlers/rollup-test/rollup.config.js | 13 - test/bundlers/webpack-test/index.js | 7 - test/bundlers/webpack-test/package.json | 17 - test/bundlers/webpack-test/webpack.config.js | 12 - test/integration/index.js | 29 +- test/integration/integration/README.md | 52 + test/integration/integration/helper.js | 96 + .../integration/helpers/bulk.test.js | 204 + .../integration/helpers/msearch.test.js | 121 + .../integration/helpers/scroll.test.js | 118 + .../integration/helpers/search.test.js | 71 + test/integration/integration/index.js | 385 + test/integration/integration/reporter.js | 109 + test/integration/integration/test-runner.js | 909 + test/integration/test-runner.js | 128 +- test/types/api-response-body.test-d.ts | 284 - test/types/api-response.test-d.ts | 80 - test/types/client-options.test-d.ts | 695 - test/types/client.test-d.ts | 138 - test/types/connection-pool.test-d.ts | 110 - test/types/connection.test-d.ts | 54 - test/types/errors.test-d.ts | 104 - test/types/helpers.test-d.ts | 476 - test/types/kibana.test-d.ts | 116 - test/types/new-types.test-d.ts | 108 - test/types/serializer.test-d.ts | 28 - test/types/transport.test-d.ts | 182 - test/unit/api-async.js | 95 - test/unit/api.test.js | 335 - test/unit/base-connection-pool.test.js | 505 - test/unit/child.test.js | 321 - test/unit/client.test.js | 1862 -- test/unit/client.test.ts | 434 + test/unit/cloud-connection-pool.test.js | 48 - test/unit/connection-pool.test.js | 801 - test/unit/connection.test.js | 1108 -- test/unit/errors.test.js | 225 - test/unit/esm/index.mjs | 8 - test/unit/esm/index.test.js | 19 - test/unit/events.test.js | 297 - .../helpers/{bulk.test.js => bulk.test.ts} | 145 +- .../{msearch.test.js => msearch.test.ts} | 349 +- .../{scroll.test.js => scroll.test.ts} | 77 +- .../{search.test.js => search.test.ts} | 50 +- test/unit/selectors.test.js | 42 - test/unit/serializer.test.js | 231 - test/unit/transport.test.js | 2764 --- test/utils/MockConnection.js | 180 - test/utils/MockConnection.ts | 139 + test/utils/buildCluster.js | 110 - test/utils/buildCluster.ts | 119 + test/utils/buildProxy.js | 60 - test/utils/buildProxy.ts | 71 + test/utils/{buildServer.js => buildServer.ts} | 63 +- test/utils/index.js | 66 - lib/pool/index.js => test/utils/index.ts | 18 +- tsconfig.json | 37 + 255 files changed, 36930 insertions(+), 42043 deletions(-) delete mode 100644 api/api/async_search.js delete mode 100644 api/api/autoscaling.js delete mode 100644 api/api/bulk.js delete mode 100644 api/api/cat.js delete mode 100644 api/api/ccr.js delete mode 100644 api/api/clear_scroll.js delete mode 100644 api/api/close_point_in_time.js delete mode 100644 api/api/cluster.js delete mode 100644 api/api/count.js delete mode 100644 api/api/create.js delete mode 100644 api/api/dangling_indices.js delete mode 100644 api/api/delete.js delete mode 100644 api/api/delete_by_query.js delete mode 100644 api/api/delete_by_query_rethrottle.js delete mode 100644 api/api/delete_script.js delete mode 100644 api/api/enrich.js delete mode 100644 api/api/eql.js delete mode 100644 api/api/exists.js delete mode 100644 api/api/exists_source.js delete mode 100644 api/api/explain.js delete mode 100644 api/api/features.js delete mode 100644 api/api/field_caps.js delete mode 100644 api/api/fleet.js delete mode 100644 api/api/get.js delete mode 100644 api/api/get_script.js delete mode 100644 api/api/get_script_context.js delete mode 100644 api/api/get_script_languages.js delete mode 100644 api/api/get_source.js delete mode 100644 api/api/graph.js delete mode 100644 api/api/ilm.js delete mode 100644 api/api/index.js delete mode 100644 api/api/indices.js delete mode 100644 api/api/info.js delete mode 100644 api/api/ingest.js delete mode 100644 api/api/license.js delete mode 100644 api/api/logstash.js delete mode 100644 api/api/mget.js delete mode 100644 api/api/migration.js delete mode 100644 api/api/ml.js delete mode 100644 api/api/monitoring.js delete mode 100644 api/api/msearch.js delete mode 100644 api/api/msearch_template.js delete mode 100644 api/api/mtermvectors.js delete mode 100644 api/api/nodes.js delete mode 100644 api/api/open_point_in_time.js delete mode 100644 api/api/ping.js delete mode 100644 api/api/put_script.js delete mode 100644 api/api/rank_eval.js delete mode 100644 api/api/reindex.js delete mode 100644 api/api/reindex_rethrottle.js delete mode 100644 api/api/render_search_template.js delete mode 100644 api/api/rollup.js delete mode 100644 api/api/scripts_painless_execute.js delete mode 100644 api/api/scroll.js delete mode 100644 api/api/search.js delete mode 100644 api/api/search_mvt.js delete mode 100644 api/api/search_shards.js delete mode 100644 api/api/search_template.js delete mode 100644 api/api/searchable_snapshots.js delete mode 100644 api/api/security.js delete mode 100644 api/api/shutdown.js delete mode 100644 api/api/slm.js delete mode 100644 api/api/snapshot.js delete mode 100644 api/api/sql.js delete mode 100644 api/api/ssl.js delete mode 100644 api/api/tasks.js delete mode 100644 api/api/terms_enum.js delete mode 100644 api/api/termvectors.js delete mode 100644 api/api/text_structure.js delete mode 100644 api/api/transform.js delete mode 100644 api/api/update.js delete mode 100644 api/api/update_by_query.js delete mode 100644 api/api/update_by_query_rethrottle.js delete mode 100644 api/api/watcher.js delete mode 100644 api/api/xpack.js delete mode 100644 api/index.js delete mode 100644 api/kibana.d.ts delete mode 100644 api/new.d.ts delete mode 100644 api/requestParams.d.ts delete mode 100644 api/utils.js delete mode 100644 index.mjs delete mode 100644 lib/Connection.d.ts delete mode 100644 lib/Connection.js delete mode 100644 lib/Helpers.d.ts delete mode 100644 lib/Serializer.d.ts delete mode 100644 lib/Serializer.js delete mode 100644 lib/Transport.d.ts delete mode 100644 lib/Transport.js delete mode 100644 lib/errors.d.ts delete mode 100644 lib/errors.js delete mode 100644 lib/pool/BaseConnectionPool.js delete mode 100644 lib/pool/CloudConnectionPool.js delete mode 100644 lib/pool/ConnectionPool.js delete mode 100644 lib/pool/index.d.ts create mode 100644 src/Client.ts rename lib/Helpers.js => src/Helpers.ts (57%) create mode 100644 src/SniffingTransport.ts create mode 100644 src/api/api/async_search.ts create mode 100644 src/api/api/autoscaling.ts create mode 100644 src/api/api/bulk.ts create mode 100644 src/api/api/cat.ts create mode 100644 src/api/api/ccr.ts create mode 100644 src/api/api/clear_scroll.ts create mode 100644 src/api/api/close_point_in_time.ts create mode 100644 src/api/api/cluster.ts create mode 100644 src/api/api/count.ts create mode 100644 src/api/api/create.ts create mode 100644 src/api/api/dangling_indices.ts create mode 100644 src/api/api/delete.ts create mode 100644 src/api/api/delete_by_query.ts create mode 100644 src/api/api/delete_by_query_rethrottle.ts create mode 100644 src/api/api/delete_script.ts create mode 100644 src/api/api/enrich.ts create mode 100644 src/api/api/eql.ts create mode 100644 src/api/api/exists.ts create mode 100644 src/api/api/exists_source.ts create mode 100644 src/api/api/explain.ts create mode 100644 src/api/api/features.ts create mode 100644 src/api/api/field_caps.ts create mode 100644 src/api/api/fleet.ts create mode 100644 src/api/api/get.ts create mode 100644 src/api/api/get_script.ts create mode 100644 src/api/api/get_script_context.ts create mode 100644 src/api/api/get_script_languages.ts create mode 100644 src/api/api/get_source.ts create mode 100644 src/api/api/graph.ts create mode 100644 src/api/api/ilm.ts create mode 100644 src/api/api/index.ts create mode 100644 src/api/api/indices.ts create mode 100644 src/api/api/info.ts create mode 100644 src/api/api/ingest.ts create mode 100644 src/api/api/license.ts create mode 100644 src/api/api/logstash.ts create mode 100644 src/api/api/mget.ts create mode 100644 src/api/api/migration.ts create mode 100644 src/api/api/ml.ts create mode 100644 src/api/api/monitoring.ts create mode 100644 src/api/api/msearch.ts create mode 100644 src/api/api/msearch_template.ts create mode 100644 src/api/api/mtermvectors.ts create mode 100644 src/api/api/nodes.ts create mode 100644 src/api/api/open_point_in_time.ts create mode 100644 src/api/api/ping.ts create mode 100644 src/api/api/put_script.ts create mode 100644 src/api/api/rank_eval.ts create mode 100644 src/api/api/reindex.ts create mode 100644 src/api/api/reindex_rethrottle.ts create mode 100644 src/api/api/render_search_template.ts create mode 100644 src/api/api/rollup.ts create mode 100644 src/api/api/scripts_painless_execute.ts create mode 100644 src/api/api/scroll.ts create mode 100644 src/api/api/search.ts create mode 100644 src/api/api/search_mvt.ts create mode 100644 src/api/api/search_shards.ts create mode 100644 src/api/api/search_template.ts create mode 100644 src/api/api/searchable_snapshots.ts create mode 100644 src/api/api/security.ts create mode 100644 src/api/api/shutdown.ts create mode 100644 src/api/api/slm.ts create mode 100644 src/api/api/snapshot.ts create mode 100644 src/api/api/sql.ts create mode 100644 src/api/api/ssl.ts create mode 100644 src/api/api/tasks.ts create mode 100644 src/api/api/terms_enum.ts create mode 100644 src/api/api/termvectors.ts create mode 100644 src/api/api/text_structure.ts create mode 100644 src/api/api/transform.ts create mode 100644 src/api/api/update.ts create mode 100644 src/api/api/update_by_query.ts create mode 100644 src/api/api/update_by_query_rethrottle.ts create mode 100644 src/api/api/watcher.ts create mode 100644 src/api/api/xpack.ts create mode 100644 src/api/index.ts create mode 100644 src/api/kibana.ts create mode 100644 src/api/types.ts rename api/types.d.ts => src/api/typesWithBodyKey.ts (88%) delete mode 100644 test/acceptance/events-order.test.js delete mode 100644 test/acceptance/observability.test.js delete mode 100644 test/acceptance/product-check.test.js delete mode 100644 test/acceptance/proxy.test.js delete mode 100644 test/acceptance/resurrect.test.js delete mode 100644 test/acceptance/sniff.test.js delete mode 100644 test/benchmarks/macro/complex.bench.js delete mode 100644 test/benchmarks/macro/simple.bench.js delete mode 100644 test/benchmarks/micro/basic.bench.js delete mode 100644 test/benchmarks/suite.js delete mode 100644 test/bundlers/parcel-test/index.js delete mode 100644 test/bundlers/parcel-test/package.json delete mode 100644 test/bundlers/rollup-test/index.js delete mode 100644 test/bundlers/rollup-test/package.json delete mode 100644 test/bundlers/rollup-test/rollup.config.js delete mode 100644 test/bundlers/webpack-test/index.js delete mode 100644 test/bundlers/webpack-test/package.json delete mode 100644 test/bundlers/webpack-test/webpack.config.js create mode 100644 test/integration/integration/README.md create mode 100644 test/integration/integration/helper.js create mode 100644 test/integration/integration/helpers/bulk.test.js create mode 100644 test/integration/integration/helpers/msearch.test.js create mode 100644 test/integration/integration/helpers/scroll.test.js create mode 100644 test/integration/integration/helpers/search.test.js create mode 100644 test/integration/integration/index.js create mode 100644 test/integration/integration/reporter.js create mode 100644 test/integration/integration/test-runner.js delete mode 100644 test/types/api-response-body.test-d.ts delete mode 100644 test/types/api-response.test-d.ts delete mode 100644 test/types/client-options.test-d.ts delete mode 100644 test/types/client.test-d.ts delete mode 100644 test/types/connection-pool.test-d.ts delete mode 100644 test/types/connection.test-d.ts delete mode 100644 test/types/errors.test-d.ts delete mode 100644 test/types/helpers.test-d.ts delete mode 100644 test/types/kibana.test-d.ts delete mode 100644 test/types/new-types.test-d.ts delete mode 100644 test/types/serializer.test-d.ts delete mode 100644 test/types/transport.test-d.ts delete mode 100644 test/unit/api-async.js delete mode 100644 test/unit/api.test.js delete mode 100644 test/unit/base-connection-pool.test.js delete mode 100644 test/unit/child.test.js delete mode 100644 test/unit/client.test.js create mode 100644 test/unit/client.test.ts delete mode 100644 test/unit/cloud-connection-pool.test.js delete mode 100644 test/unit/connection-pool.test.js delete mode 100644 test/unit/connection.test.js delete mode 100644 test/unit/errors.test.js delete mode 100644 test/unit/esm/index.mjs delete mode 100644 test/unit/esm/index.test.js delete mode 100644 test/unit/events.test.js rename test/unit/helpers/{bulk.test.js => bulk.test.ts} (88%) rename test/unit/helpers/{msearch.test.js => msearch.test.ts} (76%) rename test/unit/helpers/{scroll.test.js => scroll.test.ts} (83%) rename test/unit/helpers/{search.test.js => search.test.ts} (74%) delete mode 100644 test/unit/selectors.test.js delete mode 100644 test/unit/serializer.test.js delete mode 100644 test/unit/transport.test.js delete mode 100644 test/utils/MockConnection.js create mode 100644 test/utils/MockConnection.ts delete mode 100644 test/utils/buildCluster.js create mode 100644 test/utils/buildCluster.ts delete mode 100644 test/utils/buildProxy.js create mode 100644 test/utils/buildProxy.ts rename test/utils/{buildServer.js => buildServer.ts} (58%) delete mode 100644 test/utils/index.js rename lib/pool/index.js => test/utils/index.ts (75%) create mode 100644 tsconfig.json diff --git a/.ci/run-repository.sh b/.ci/run-repository.sh index 026595fdf..846abfc26 100755 --- a/.ci/run-repository.sh +++ b/.ci/run-repository.sh @@ -30,10 +30,6 @@ docker build \ echo -e "\033[1m>>>>> NPM run test:integration >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" repo=$(realpath $(dirname $(realpath -s $0))/../) -run_script_args="" -if [[ "$NODE_JS_VERSION" == "8" ]]; then - run_script_args="--harmony-async-iteration" -fi docker run \ --network=${network_name} \ @@ -44,4 +40,4 @@ docker run \ --name elasticsearch-js \ --rm \ elastic/elasticsearch-js \ - node ${run_script_args} test/integration/index.js + npm run test:integration diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 5c3e8529f..1bb004ad1 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -32,147 +32,143 @@ jobs: run: | npm run test:unit - - name: Acceptance test - run: | - npm run test:acceptance - - - name: Type Definitions - run: | - npm run test:types - - helpers-integration-test: - name: Helpers integration test - runs-on: ubuntu-latest - - strategy: - matrix: - node-version: [12.x, 14.x, 16.x] - - steps: - - uses: actions/checkout@v2 - - - name: Configure sysctl limits - run: | - sudo swapoff -a - sudo sysctl -w vm.swappiness=1 - sudo sysctl -w fs.file-max=262144 - sudo sysctl -w vm.max_map_count=262144 - - - name: Runs Elasticsearch - uses: elastic/elastic-github-actions/elasticsearch@master - with: - stack-version: 8.0.0-SNAPSHOT - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - - name: Install - run: | - npm install - - - name: Integration test - run: | - npm run test:integration:helpers - - bundler-support: - name: Bundler support - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Configure sysctl limits - run: | - sudo swapoff -a - sudo sysctl -w vm.swappiness=1 - sudo sysctl -w fs.file-max=262144 - sudo sysctl -w vm.max_map_count=262144 - - - name: Runs Elasticsearch - uses: elastic/elastic-github-actions/elasticsearch@master - with: - stack-version: 8.0.0-SNAPSHOT - - - name: Use Node.js 14.x - uses: actions/setup-node@v1 - with: - node-version: 14.x - - - name: Install - run: | - npm install - npm install --prefix test/bundlers/parcel-test - npm install --prefix test/bundlers/rollup-test - npm install --prefix test/bundlers/webpack-test - - - name: Build - run: | - npm run build --prefix test/bundlers/parcel-test - npm run build --prefix test/bundlers/rollup-test - npm run build --prefix test/bundlers/webpack-test - - - name: Run bundle - run: | - npm start --prefix test/bundlers/parcel-test - npm start --prefix test/bundlers/rollup-test - npm start --prefix test/bundlers/webpack-test - - mock-support: - name: Mock support - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Use Node.js 14.x - uses: actions/setup-node@v1 - with: - node-version: 14.x - - - name: Install - run: | - npm install - npm install --prefix test/mock - - - name: Run test - run: | - npm test --prefix test/mock - - code-coverage: - name: Code coverage - runs-on: ubuntu-latest - - strategy: - matrix: - node-version: [14.x] - - steps: - - uses: actions/checkout@v2 - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - - name: Install - run: | - npm install - - - name: Code coverage report - run: | - npm run test:coverage-report - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - with: - file: ./coverage.lcov - fail_ci_if_error: true - - - name: Code coverage 100% - run: | - npm run test:coverage-100 + # - name: Acceptance test + # run: | + # npm run test:acceptance + + # helpers-integration-test: + # name: Helpers integration test + # runs-on: ubuntu-latest + + # strategy: + # matrix: + # node-version: [12.x, 14.x, 16.x] + + # steps: + # - uses: actions/checkout@v2 + + # - name: Configure sysctl limits + # run: | + # sudo swapoff -a + # sudo sysctl -w vm.swappiness=1 + # sudo sysctl -w fs.file-max=262144 + # sudo sysctl -w vm.max_map_count=262144 + + # - name: Runs Elasticsearch + # uses: elastic/elastic-github-actions/elasticsearch@master + # with: + # stack-version: 8.0.0-SNAPSHOT + + # - name: Use Node.js ${{ matrix.node-version }} + # uses: actions/setup-node@v1 + # with: + # node-version: ${{ matrix.node-version }} + + # - name: Install + # run: | + # npm install + + # - name: Integration test + # run: | + # npm run test:integration:helpers + + # bundler-support: + # name: Bundler support + # runs-on: ubuntu-latest + + # steps: + # - uses: actions/checkout@v2 + + # - name: Configure sysctl limits + # run: | + # sudo swapoff -a + # sudo sysctl -w vm.swappiness=1 + # sudo sysctl -w fs.file-max=262144 + # sudo sysctl -w vm.max_map_count=262144 + + # - name: Runs Elasticsearch + # uses: elastic/elastic-github-actions/elasticsearch@master + # with: + # stack-version: 8.0.0-SNAPSHOT + + # - name: Use Node.js 14.x + # uses: actions/setup-node@v1 + # with: + # node-version: 14.x + + # - name: Install + # run: | + # npm install + # npm install --prefix test/bundlers/parcel-test + # npm install --prefix test/bundlers/rollup-test + # npm install --prefix test/bundlers/webpack-test + + # - name: Build + # run: | + # npm run build --prefix test/bundlers/parcel-test + # npm run build --prefix test/bundlers/rollup-test + # npm run build --prefix test/bundlers/webpack-test + + # - name: Run bundle + # run: | + # npm start --prefix test/bundlers/parcel-test + # npm start --prefix test/bundlers/rollup-test + # npm start --prefix test/bundlers/webpack-test + + # mock-support: + # name: Mock support + # runs-on: ubuntu-latest + + # steps: + # - uses: actions/checkout@v2 + + # - name: Use Node.js 14.x + # uses: actions/setup-node@v1 + # with: + # node-version: 14.x + + # - name: Install + # run: | + # npm install + # npm install --prefix test/mock + + # - name: Run test + # run: | + # npm test --prefix test/mock + + # code-coverage: + # name: Code coverage + # runs-on: ubuntu-latest + + # strategy: + # matrix: + # node-version: [14.x] + + # steps: + # - uses: actions/checkout@v2 + + # - name: Use Node.js ${{ matrix.node-version }} + # uses: actions/setup-node@v1 + # with: + # node-version: ${{ matrix.node-version }} + + # - name: Install + # run: | + # npm install + + # - name: Code coverage report + # run: | + # npm run test:coverage-report + + # - name: Upload coverage to Codecov + # uses: codecov/codecov-action@v1 + # with: + # file: ./coverage.lcov + # fail_ci_if_error: true + + # - name: Code coverage 100% + # run: | + # npm run test:coverage-100 license: name: License check diff --git a/.gitignore b/.gitignore index 6de1b46bc..5c0af8e20 100644 --- a/.gitignore +++ b/.gitignore @@ -61,3 +61,5 @@ test/benchmarks/macro/fixtures/* test/bundlers/**/bundle.js test/bundlers/parcel-test/.parcel-cache + +lib diff --git a/.npmignore b/.npmignore index 9ef569a66..2a7110656 100644 --- a/.npmignore +++ b/.npmignore @@ -71,6 +71,8 @@ certs CODE_OF_CONDUCT.md CONTRIBUTING.md +src + # CANARY-PACKAGE -api/kibana.d.ts +lib/api/kibana.* # /CANARY-PACKAGE diff --git a/api/api/async_search.js b/api/api/async_search.js deleted file mode 100644 index 1d73931b9..000000000 --- a/api/api/async_search.js +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion_timeout', 'keep_alive', 'typed_keys', 'keep_on_completion', 'batched_reduce_size', 'request_cache', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'explain', 'stored_fields', 'docvalue_fields', 'from', 'ignore_unavailable', 'ignore_throttled', 'allow_no_indices', 'expand_wildcards', 'lenient', 'preference', 'q', 'routing', 'search_type', 'size', 'sort', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'terminate_after', 'stats', 'suggest_field', 'suggest_mode', 'suggest_size', 'suggest_text', 'timeout', 'track_scores', 'track_total_hits', 'allow_partial_search_results', 'version', 'seq_no_primary_term', 'max_concurrent_shard_requests'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletionTimeout: 'wait_for_completion_timeout', keepAlive: 'keep_alive', typedKeys: 'typed_keys', keepOnCompletion: 'keep_on_completion', batchedReduceSize: 'batched_reduce_size', requestCache: 'request_cache', analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', storedFields: 'stored_fields', docvalueFields: 'docvalue_fields', ignoreUnavailable: 'ignore_unavailable', ignoreThrottled: 'ignore_throttled', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', searchType: 'search_type', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', terminateAfter: 'terminate_after', suggestField: 'suggest_field', suggestMode: 'suggest_mode', suggestSize: 'suggest_size', suggestText: 'suggest_text', trackScores: 'track_scores', trackTotalHits: 'track_total_hits', allowPartialSearchResults: 'allow_partial_search_results', seqNoPrimaryTerm: 'seq_no_primary_term', maxConcurrentShardRequests: 'max_concurrent_shard_requests' } - -function AsyncSearchApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -AsyncSearchApi.prototype.delete = function asyncSearchDeleteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_async_search' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -AsyncSearchApi.prototype.get = function asyncSearchGetApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_async_search' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -AsyncSearchApi.prototype.status = function asyncSearchStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_async_search' + '/' + 'status' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -AsyncSearchApi.prototype.submit = function asyncSearchSubmitApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_async_search' - } else { - if (method == null) method = 'POST' - path = '/' + '_async_search' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = AsyncSearchApi diff --git a/api/api/autoscaling.js b/api/api/autoscaling.js deleted file mode 100644 index 47785cd5f..000000000 --- a/api/api/autoscaling.js +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function AutoscalingApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -AutoscalingApi.prototype.deleteAutoscalingPolicy = function autoscalingDeleteAutoscalingPolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_autoscaling' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -AutoscalingApi.prototype.getAutoscalingCapacity = function autoscalingGetAutoscalingCapacityApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_autoscaling' + '/' + 'capacity' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -AutoscalingApi.prototype.getAutoscalingPolicy = function autoscalingGetAutoscalingPolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_autoscaling' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -AutoscalingApi.prototype.putAutoscalingPolicy = function autoscalingPutAutoscalingPolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_autoscaling' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(AutoscalingApi.prototype, { - delete_autoscaling_policy: { get () { return this.deleteAutoscalingPolicy } }, - get_autoscaling_capacity: { get () { return this.getAutoscalingCapacity } }, - get_autoscaling_policy: { get () { return this.getAutoscalingPolicy } }, - put_autoscaling_policy: { get () { return this.putAutoscalingPolicy } } -}) - -module.exports = AutoscalingApi diff --git a/api/api/bulk.js b/api/api/bulk.js deleted file mode 100644 index 100130755..000000000 --- a/api/api/bulk.js +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['wait_for_active_shards', 'refresh', 'routing', 'timeout', 'type', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'pipeline', 'require_alias', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { waitForActiveShards: 'wait_for_active_shards', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', requireAlias: 'require_alias', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function bulkApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params.type != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (type) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + '_bulk' - } else if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_bulk' - } else { - if (method == null) method = 'POST' - path = '/' + '_bulk' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = bulkApi diff --git a/api/api/cat.js b/api/api/cat.js deleted file mode 100644 index 87d6120cd..000000000 --- a/api/api/cat.js +++ /dev/null @@ -1,648 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['format', 'local', 'h', 'help', 's', 'v', 'expand_wildcards', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'bytes', 'master_timeout', 'fields', 'time', 'ts', 'health', 'pri', 'include_unloaded_segments', 'allow_no_match', 'allow_no_datafeeds', 'allow_no_jobs', 'from', 'size', 'full_id', 'include_bootstrap', 'active_only', 'detailed', 'index', 'ignore_unavailable', 'nodes', 'actions', 'parent_task_id'] -const snakeCase = { expandWildcards: 'expand_wildcards', errorTrace: 'error_trace', filterPath: 'filter_path', masterTimeout: 'master_timeout', includeUnloadedSegments: 'include_unloaded_segments', allowNoMatch: 'allow_no_match', allowNoDatafeeds: 'allow_no_datafeeds', allowNoJobs: 'allow_no_jobs', fullId: 'full_id', includeBootstrap: 'include_bootstrap', activeOnly: 'active_only', ignoreUnavailable: 'ignore_unavailable', parentTaskId: 'parent_task_id' } - -function CatApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -CatApi.prototype.aliases = function catAliasesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'aliases' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'aliases' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.allocation = function catAllocationApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'allocation' + '/' + encodeURIComponent(node_id || nodeId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'allocation' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.count = function catCountApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'count' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'count' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.fielddata = function catFielddataApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, fields, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((fields) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'fielddata' + '/' + encodeURIComponent(fields) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'fielddata' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.health = function catHealthApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'health' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.help = function catHelpApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.indices = function catIndicesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'indices' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'indices' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.master = function catMasterApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'master' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.mlDataFrameAnalytics = function catMlDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'data_frame' + '/' + 'analytics' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.mlDatafeeds = function catMlDatafeedsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((datafeed_id || datafeedId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'datafeeds' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.mlJobs = function catMlJobsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'anomaly_detectors' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.mlTrainedModels = function catMlTrainedModelsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((model_id || modelId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'trained_models' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.nodeattrs = function catNodeattrsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'nodeattrs' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.nodes = function catNodesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'nodes' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.pendingTasks = function catPendingTasksApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'pending_tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.plugins = function catPluginsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'plugins' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.recovery = function catRecoveryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'recovery' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'recovery' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.repositories = function catRepositoriesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'repositories' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.segments = function catSegmentsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'segments' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'segments' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.shards = function catShardsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'shards' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'shards' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.snapshots = function catSnapshotsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((repository) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'snapshots' + '/' + encodeURIComponent(repository) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'snapshots' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.tasks = function catTasksApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.templates = function catTemplatesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'templates' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'templates' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.threadPool = function catThreadPoolApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, threadPoolPatterns, thread_pool_patterns, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((thread_pool_patterns || threadPoolPatterns) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'thread_pool' + '/' + encodeURIComponent(thread_pool_patterns || threadPoolPatterns) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'thread_pool' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CatApi.prototype.transforms = function catTransformsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((transform_id || transformId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'transforms' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(CatApi.prototype, { - ml_data_frame_analytics: { get () { return this.mlDataFrameAnalytics } }, - ml_datafeeds: { get () { return this.mlDatafeeds } }, - ml_jobs: { get () { return this.mlJobs } }, - ml_trained_models: { get () { return this.mlTrainedModels } }, - pending_tasks: { get () { return this.pendingTasks } }, - thread_pool: { get () { return this.threadPool } } -}) - -module.exports = CatApi diff --git a/api/api/ccr.js b/api/api/ccr.js deleted file mode 100644 index 8282ee015..000000000 --- a/api/api/ccr.js +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_active_shards'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', waitForActiveShards: 'wait_for_active_shards' } - -function CcrApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -CcrApi.prototype.deleteAutoFollowPattern = function ccrDeleteAutoFollowPatternApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.follow = function ccrFollowApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'follow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.followInfo = function ccrFollowInfoApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'info' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.followStats = function ccrFollowStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.forgetFollower = function ccrForgetFollowerApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'forget_follower' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.getAutoFollowPattern = function ccrGetAutoFollowPatternApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_ccr' + '/' + 'auto_follow' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.pauseAutoFollowPattern = function ccrPauseAutoFollowPatternApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) + '/' + 'pause' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.pauseFollow = function ccrPauseFollowApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'pause_follow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.putAutoFollowPattern = function ccrPutAutoFollowPatternApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.resumeAutoFollowPattern = function ccrResumeAutoFollowPatternApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) + '/' + 'resume' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.resumeFollow = function ccrResumeFollowApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'resume_follow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.stats = function ccrStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_ccr' + '/' + 'stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -CcrApi.prototype.unfollow = function ccrUnfollowApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'unfollow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(CcrApi.prototype, { - delete_auto_follow_pattern: { get () { return this.deleteAutoFollowPattern } }, - follow_info: { get () { return this.followInfo } }, - follow_stats: { get () { return this.followStats } }, - forget_follower: { get () { return this.forgetFollower } }, - get_auto_follow_pattern: { get () { return this.getAutoFollowPattern } }, - pause_auto_follow_pattern: { get () { return this.pauseAutoFollowPattern } }, - pause_follow: { get () { return this.pauseFollow } }, - put_auto_follow_pattern: { get () { return this.putAutoFollowPattern } }, - resume_auto_follow_pattern: { get () { return this.resumeAutoFollowPattern } }, - resume_follow: { get () { return this.resumeFollow } } -}) - -module.exports = CcrApi diff --git a/api/api/clear_scroll.js b/api/api/clear_scroll.js deleted file mode 100644 index 3b742abbc..000000000 --- a/api/api/clear_scroll.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function clearScrollApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, scrollId, scroll_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((scroll_id || scrollId) != null) { - if (method == null) method = 'DELETE' - path = '/' + '_search' + '/' + 'scroll' + '/' + encodeURIComponent(scroll_id || scrollId) - } else { - if (method == null) method = 'DELETE' - path = '/' + '_search' + '/' + 'scroll' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = clearScrollApi diff --git a/api/api/close_point_in_time.js b/api/api/close_point_in_time.js deleted file mode 100644 index 14c73a446..000000000 --- a/api/api/close_point_in_time.js +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function closePointInTimeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_pit' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = closePointInTimeApi diff --git a/api/api/cluster.js b/api/api/cluster.js deleted file mode 100644 index 5feb744f0..000000000 --- a/api/api/cluster.js +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['include_yes_decisions', 'include_disk_info', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'timeout', 'master_timeout', 'wait_for_removal', 'local', 'flat_settings', 'include_defaults', 'expand_wildcards', 'level', 'wait_for_active_shards', 'wait_for_nodes', 'wait_for_events', 'wait_for_no_relocating_shards', 'wait_for_no_initializing_shards', 'wait_for_status', 'node_ids', 'node_names', 'create', 'dry_run', 'explain', 'retry_failed', 'metric', 'wait_for_metadata_version', 'wait_for_timeout', 'ignore_unavailable', 'allow_no_indices'] -const snakeCase = { includeYesDecisions: 'include_yes_decisions', includeDiskInfo: 'include_disk_info', errorTrace: 'error_trace', filterPath: 'filter_path', masterTimeout: 'master_timeout', waitForRemoval: 'wait_for_removal', flatSettings: 'flat_settings', includeDefaults: 'include_defaults', expandWildcards: 'expand_wildcards', waitForActiveShards: 'wait_for_active_shards', waitForNodes: 'wait_for_nodes', waitForEvents: 'wait_for_events', waitForNoRelocatingShards: 'wait_for_no_relocating_shards', waitForNoInitializingShards: 'wait_for_no_initializing_shards', waitForStatus: 'wait_for_status', nodeIds: 'node_ids', nodeNames: 'node_names', dryRun: 'dry_run', retryFailed: 'retry_failed', waitForMetadataVersion: 'wait_for_metadata_version', waitForTimeout: 'wait_for_timeout', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices' } - -function ClusterApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -ClusterApi.prototype.allocationExplain = function clusterAllocationExplainApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_cluster' + '/' + 'allocation' + '/' + 'explain' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.deleteComponentTemplate = function clusterDeleteComponentTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.deleteVotingConfigExclusions = function clusterDeleteVotingConfigExclusionsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_cluster' + '/' + 'voting_config_exclusions' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.existsComponentTemplate = function clusterExistsComponentTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'HEAD' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.getComponentTemplate = function clusterGetComponentTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_component_template' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.getSettings = function clusterGetSettingsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'settings' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.health = function clusterHealthApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'health' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'health' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.pendingTasks = function clusterPendingTasksApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'pending_tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.postVotingConfigExclusions = function clusterPostVotingConfigExclusionsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_cluster' + '/' + 'voting_config_exclusions' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.putComponentTemplate = function clusterPutComponentTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.putSettings = function clusterPutSettingsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_cluster' + '/' + 'settings' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.remoteInfo = function clusterRemoteInfoApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_remote' + '/' + 'info' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.reroute = function clusterRerouteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_cluster' + '/' + 'reroute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.state = function clusterStateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required url components - if (params.index != null && (params.metric == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: metric') - return handleError(err, callback) - } - - let { method, body, metric, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((metric) != null && (index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'state' + '/' + encodeURIComponent(metric) + '/' + encodeURIComponent(index) - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'state' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'state' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ClusterApi.prototype.stats = function clusterStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'stats' + '/' + 'nodes' + '/' + encodeURIComponent(node_id || nodeId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(ClusterApi.prototype, { - allocation_explain: { get () { return this.allocationExplain } }, - delete_component_template: { get () { return this.deleteComponentTemplate } }, - delete_voting_config_exclusions: { get () { return this.deleteVotingConfigExclusions } }, - exists_component_template: { get () { return this.existsComponentTemplate } }, - get_component_template: { get () { return this.getComponentTemplate } }, - get_settings: { get () { return this.getSettings } }, - pending_tasks: { get () { return this.pendingTasks } }, - post_voting_config_exclusions: { get () { return this.postVotingConfigExclusions } }, - put_component_template: { get () { return this.putComponentTemplate } }, - put_settings: { get () { return this.putSettings } }, - remote_info: { get () { return this.remoteInfo } } -}) - -module.exports = ClusterApi diff --git a/api/api/count.js b/api/api/count.js deleted file mode 100644 index ff2f52461..000000000 --- a/api/api/count.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['ignore_unavailable', 'ignore_throttled', 'allow_no_indices', 'expand_wildcards', 'min_score', 'preference', 'routing', 'q', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'lenient', 'terminate_after', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { ignoreUnavailable: 'ignore_unavailable', ignoreThrottled: 'ignore_throttled', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', minScore: 'min_score', analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', terminateAfter: 'terminate_after', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function countApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_count' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_count' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = countApi diff --git a/api/api/create.js b/api/api/create.js deleted file mode 100644 index 9a47b60d7..000000000 --- a/api/api/create.js +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['wait_for_active_shards', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'pipeline', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { waitForActiveShards: 'wait_for_active_shards', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function createApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) + '/' + '_create' - } else { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_create' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = createApi diff --git a/api/api/dangling_indices.js b/api/api/dangling_indices.js deleted file mode 100644 index 0d2a84ba4..000000000 --- a/api/api/dangling_indices.js +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['accept_data_loss', 'timeout', 'master_timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { acceptDataLoss: 'accept_data_loss', masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function DanglingIndicesApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -DanglingIndicesApi.prototype.deleteDanglingIndex = function danglingIndicesDeleteDanglingIndexApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index_uuid == null && params.indexUuid == null) { - const err = new this[kConfigurationError]('Missing required parameter: index_uuid or indexUuid') - return handleError(err, callback) - } - - let { method, body, indexUuid, index_uuid, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_dangling' + '/' + encodeURIComponent(index_uuid || indexUuid) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -DanglingIndicesApi.prototype.importDanglingIndex = function danglingIndicesImportDanglingIndexApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index_uuid == null && params.indexUuid == null) { - const err = new this[kConfigurationError]('Missing required parameter: index_uuid or indexUuid') - return handleError(err, callback) - } - - let { method, body, indexUuid, index_uuid, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_dangling' + '/' + encodeURIComponent(index_uuid || indexUuid) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -DanglingIndicesApi.prototype.listDanglingIndices = function danglingIndicesListDanglingIndicesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_dangling' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(DanglingIndicesApi.prototype, { - delete_dangling_index: { get () { return this.deleteDanglingIndex } }, - import_dangling_index: { get () { return this.importDanglingIndex } }, - list_dangling_indices: { get () { return this.listDanglingIndices } } -}) - -module.exports = DanglingIndicesApi diff --git a/api/api/delete.js b/api/api/delete.js deleted file mode 100644 index 10709c2c5..000000000 --- a/api/api/delete.js +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['wait_for_active_shards', 'refresh', 'routing', 'timeout', 'if_seq_no', 'if_primary_term', 'version', 'version_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { waitForActiveShards: 'wait_for_active_shards', ifSeqNo: 'if_seq_no', ifPrimaryTerm: 'if_primary_term', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function deleteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = deleteApi diff --git a/api/api/delete_by_query.js b/api/api/delete_by_query.js deleted file mode 100644 index d8adab456..000000000 --- a/api/api/delete_by_query.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['analyzer', 'analyze_wildcard', 'default_operator', 'df', 'from', 'ignore_unavailable', 'allow_no_indices', 'conflicts', 'expand_wildcards', 'lenient', 'preference', 'q', 'routing', 'scroll', 'search_type', 'search_timeout', 'max_docs', 'sort', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'terminate_after', 'stats', 'version', 'request_cache', 'refresh', 'timeout', 'wait_for_active_shards', 'scroll_size', 'wait_for_completion', 'requests_per_second', 'slices', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', searchType: 'search_type', searchTimeout: 'search_timeout', maxDocs: 'max_docs', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', terminateAfter: 'terminate_after', requestCache: 'request_cache', waitForActiveShards: 'wait_for_active_shards', scrollSize: 'scroll_size', waitForCompletion: 'wait_for_completion', requestsPerSecond: 'requests_per_second', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function deleteByQueryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_delete_by_query' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = deleteByQueryApi diff --git a/api/api/delete_by_query_rethrottle.js b/api/api/delete_by_query_rethrottle.js deleted file mode 100644 index 01e4174fd..000000000 --- a/api/api/delete_by_query_rethrottle.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['requests_per_second', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { requestsPerSecond: 'requests_per_second', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function deleteByQueryRethrottleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.task_id == null && params.taskId == null) { - const err = new this[kConfigurationError]('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - if (params.requests_per_second == null && params.requestsPerSecond == null) { - const err = new this[kConfigurationError]('Missing required parameter: requests_per_second or requestsPerSecond') - return handleError(err, callback) - } - - let { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_delete_by_query' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_rethrottle' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = deleteByQueryRethrottleApi diff --git a/api/api/delete_script.js b/api/api/delete_script.js deleted file mode 100644 index 038899354..000000000 --- a/api/api/delete_script.js +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['timeout', 'master_timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function deleteScriptApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = deleteScriptApi diff --git a/api/api/enrich.js b/api/api/enrich.js deleted file mode 100644 index 538a035d2..000000000 --- a/api/api/enrich.js +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletion: 'wait_for_completion' } - -function EnrichApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -EnrichApi.prototype.deletePolicy = function enrichDeletePolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -EnrichApi.prototype.executePolicy = function enrichExecutePolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) + '/' + '_execute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -EnrichApi.prototype.getPolicy = function enrichGetPolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_enrich' + '/' + 'policy' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -EnrichApi.prototype.putPolicy = function enrichPutPolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -EnrichApi.prototype.stats = function enrichStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_enrich' + '/' + '_stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(EnrichApi.prototype, { - delete_policy: { get () { return this.deletePolicy } }, - execute_policy: { get () { return this.executePolicy } }, - get_policy: { get () { return this.getPolicy } }, - put_policy: { get () { return this.putPolicy } } -}) - -module.exports = EnrichApi diff --git a/api/api/eql.js b/api/api/eql.js deleted file mode 100644 index 46b718994..000000000 --- a/api/api/eql.js +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion_timeout', 'keep_alive', 'keep_on_completion'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletionTimeout: 'wait_for_completion_timeout', keepAlive: 'keep_alive', keepOnCompletion: 'keep_on_completion' } - -function EqlApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -EqlApi.prototype.delete = function eqlDeleteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_eql' + '/' + 'search' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -EqlApi.prototype.get = function eqlGetApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_eql' + '/' + 'search' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -EqlApi.prototype.getStatus = function eqlGetStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_eql' + '/' + 'search' + '/' + 'status' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -EqlApi.prototype.search = function eqlSearchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_eql' + '/' + 'search' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(EqlApi.prototype, { - get_status: { get () { return this.getStatus } } -}) - -module.exports = EqlApi diff --git a/api/api/exists.js b/api/api/exists.js deleted file mode 100644 index 6e50929d6..000000000 --- a/api/api/exists.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['stored_fields', 'preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'version', 'version_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { storedFields: 'stored_fields', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function existsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = existsApi diff --git a/api/api/exists_source.js b/api/api/exists_source.js deleted file mode 100644 index 6677f8d01..000000000 --- a/api/api/exists_source.js +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'version', 'version_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function existsSourceApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - // check required url components - if (params.id != null && (params.type == null || params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: type, index') - return handleError(err, callback) - } else if (params.type != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) + '/' + '_source' - } else { - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_source' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = existsSourceApi diff --git a/api/api/explain.js b/api/api/explain.js deleted file mode 100644 index 5c64b86b0..000000000 --- a/api/api/explain.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['analyze_wildcard', 'analyzer', 'default_operator', 'df', 'stored_fields', 'lenient', 'preference', 'q', 'routing', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', storedFields: 'stored_fields', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function explainApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_explain' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = explainApi diff --git a/api/api/features.js b/api/api/features.js deleted file mode 100644 index 539b43a2b..000000000 --- a/api/api/features.js +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['master_timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function FeaturesApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -FeaturesApi.prototype.getFeatures = function featuresGetFeaturesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_features' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -FeaturesApi.prototype.resetFeatures = function featuresResetFeaturesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_features' + '/' + '_reset' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(FeaturesApi.prototype, { - get_features: { get () { return this.getFeatures } }, - reset_features: { get () { return this.resetFeatures } } -}) - -module.exports = FeaturesApi diff --git a/api/api/field_caps.js b/api/api/field_caps.js deleted file mode 100644 index ba3ea8214..000000000 --- a/api/api/field_caps.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['fields', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'include_unmapped', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', includeUnmapped: 'include_unmapped', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function fieldCapsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_field_caps' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_field_caps' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = fieldCapsApi diff --git a/api/api/fleet.js b/api/api/fleet.js deleted file mode 100644 index 50329860d..000000000 --- a/api/api/fleet.js +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['wait_for_advance', 'wait_for_index', 'checkpoints', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { waitForAdvance: 'wait_for_advance', waitForIndex: 'wait_for_index', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function FleetApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -FleetApi.prototype.globalCheckpoints = function fleetGlobalCheckpointsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_fleet' + '/' + 'global_checkpoints' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(FleetApi.prototype, { - global_checkpoints: { get () { return this.globalCheckpoints } } -}) - -module.exports = FleetApi diff --git a/api/api/get.js b/api/api/get.js deleted file mode 100644 index d72dbfa73..000000000 --- a/api/api/get.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['stored_fields', 'preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'version', 'version_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { storedFields: 'stored_fields', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function getApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = getApi diff --git a/api/api/get_script.js b/api/api/get_script.js deleted file mode 100644 index c3b0c08ce..000000000 --- a/api/api/get_script.js +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['master_timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function getScriptApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = getScriptApi diff --git a/api/api/get_script_context.js b/api/api/get_script_context.js deleted file mode 100644 index 7f4d6f4f9..000000000 --- a/api/api/get_script_context.js +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function getScriptContextApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_script_context' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = getScriptContextApi diff --git a/api/api/get_script_languages.js b/api/api/get_script_languages.js deleted file mode 100644 index a5d71c61b..000000000 --- a/api/api/get_script_languages.js +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function getScriptLanguagesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_script_language' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = getScriptLanguagesApi diff --git a/api/api/get_source.js b/api/api/get_source.js deleted file mode 100644 index 402a4fae7..000000000 --- a/api/api/get_source.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'version', 'version_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function getSourceApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_source' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = getSourceApi diff --git a/api/api/graph.js b/api/api/graph.js deleted file mode 100644 index ce5ed266f..000000000 --- a/api/api/graph.js +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['routing', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function GraphApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -GraphApi.prototype.explore = function graphExploreApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_graph' + '/' + 'explore' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = GraphApi diff --git a/api/api/ilm.js b/api/api/ilm.js deleted file mode 100644 index b108bbf91..000000000 --- a/api/api/ilm.js +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'only_managed', 'only_errors', 'dry_run'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', onlyManaged: 'only_managed', onlyErrors: 'only_errors', dryRun: 'dry_run' } - -function IlmApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -IlmApi.prototype.deleteLifecycle = function ilmDeleteLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.policy == null) { - const err = new this[kConfigurationError]('Missing required parameter: policy') - return handleError(err, callback) - } - - let { method, body, policy, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ilm' + '/' + 'policy' + '/' + encodeURIComponent(policy) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.explainLifecycle = function ilmExplainLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_ilm' + '/' + 'explain' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.getLifecycle = function ilmGetLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, policy, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((policy) != null) { - if (method == null) method = 'GET' - path = '/' + '_ilm' + '/' + 'policy' + '/' + encodeURIComponent(policy) - } else { - if (method == null) method = 'GET' - path = '/' + '_ilm' + '/' + 'policy' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.getStatus = function ilmGetStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_ilm' + '/' + 'status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.migrateToDataTiers = function ilmMigrateToDataTiersApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ilm' + '/' + 'migrate_to_data_tiers' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.moveToStep = function ilmMoveToStepApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ilm' + '/' + 'move' + '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.putLifecycle = function ilmPutLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.policy == null) { - const err = new this[kConfigurationError]('Missing required parameter: policy') - return handleError(err, callback) - } - - let { method, body, policy, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ilm' + '/' + 'policy' + '/' + encodeURIComponent(policy) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.removePolicy = function ilmRemovePolicyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ilm' + '/' + 'remove' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.retry = function ilmRetryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ilm' + '/' + 'retry' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.start = function ilmStartApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ilm' + '/' + 'start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IlmApi.prototype.stop = function ilmStopApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ilm' + '/' + 'stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(IlmApi.prototype, { - delete_lifecycle: { get () { return this.deleteLifecycle } }, - explain_lifecycle: { get () { return this.explainLifecycle } }, - get_lifecycle: { get () { return this.getLifecycle } }, - get_status: { get () { return this.getStatus } }, - migrate_to_data_tiers: { get () { return this.migrateToDataTiers } }, - move_to_step: { get () { return this.moveToStep } }, - put_lifecycle: { get () { return this.putLifecycle } }, - remove_policy: { get () { return this.removePolicy } } -}) - -module.exports = IlmApi diff --git a/api/api/index.js b/api/api/index.js deleted file mode 100644 index 12f4ef553..000000000 --- a/api/api/index.js +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['wait_for_active_shards', 'op_type', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'if_seq_no', 'if_primary_term', 'pipeline', 'require_alias', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { waitForActiveShards: 'wait_for_active_shards', opType: 'op_type', versionType: 'version_type', ifSeqNo: 'if_seq_no', ifPrimaryTerm: 'if_primary_term', requireAlias: 'require_alias', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function indexApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (id) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_doc' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = indexApi diff --git a/api/api/indices.js b/api/api/indices.js deleted file mode 100644 index f1fbdbc9a..000000000 --- a/api/api/indices.js +++ /dev/null @@ -1,1657 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['timeout', 'master_timeout', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'index', 'fielddata', 'fields', 'query', 'request', 'wait_for_active_shards', 'run_expensive_tasks', 'flush', 'local', 'flat_settings', 'include_defaults', 'force', 'wait_if_ongoing', 'max_num_segments', 'only_expunge_deletes', 'create', 'cause', 'write_index_only', 'preserve_existing', 'order', 'detailed', 'active_only', 'dry_run', 'verbose', 'status', 'completion_fields', 'fielddata_fields', 'groups', 'level', 'types', 'include_segment_file_sizes', 'include_unloaded_segments', 'forbid_closed_indices', 'explain', 'q', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'lenient', 'rewrite', 'all_shards'] -const snakeCase = { masterTimeout: 'master_timeout', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', errorTrace: 'error_trace', filterPath: 'filter_path', waitForActiveShards: 'wait_for_active_shards', runExpensiveTasks: 'run_expensive_tasks', flatSettings: 'flat_settings', includeDefaults: 'include_defaults', waitIfOngoing: 'wait_if_ongoing', maxNumSegments: 'max_num_segments', onlyExpungeDeletes: 'only_expunge_deletes', writeIndexOnly: 'write_index_only', preserveExisting: 'preserve_existing', activeOnly: 'active_only', dryRun: 'dry_run', completionFields: 'completion_fields', fielddataFields: 'fielddata_fields', includeSegmentFileSizes: 'include_segment_file_sizes', includeUnloadedSegments: 'include_unloaded_segments', forbidClosedIndices: 'forbid_closed_indices', analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', allShards: 'all_shards' } - -function IndicesApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -IndicesApi.prototype.addBlock = function indicesAddBlockApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.block == null) { - const err = new this[kConfigurationError]('Missing required parameter: block') - return handleError(err, callback) - } - - // check required url components - if (params.block != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, block, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_block' + '/' + encodeURIComponent(block) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.analyze = function indicesAnalyzeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_analyze' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_analyze' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.clearCache = function indicesClearCacheApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_cache' + '/' + 'clear' - } else { - if (method == null) method = 'POST' - path = '/' + '_cache' + '/' + 'clear' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.clone = function indicesCloneApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.target == null) { - const err = new this[kConfigurationError]('Missing required parameter: target') - return handleError(err, callback) - } - - // check required url components - if (params.target != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, target, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_clone' + '/' + encodeURIComponent(target) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.close = function indicesCloseApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_close' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.create = function indicesCreateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.createDataStream = function indicesCreateDataStreamApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_data_stream' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.dataStreamsStats = function indicesDataStreamsStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_data_stream' + '/' + encodeURIComponent(name) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_data_stream' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.delete = function indicesDeleteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.deleteAlias = function indicesDeleteAliasApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params.name != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (name) != null) { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + '_aliases' + '/' + encodeURIComponent(name) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.deleteDataStream = function indicesDeleteDataStreamApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_data_stream' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.deleteIndexTemplate = function indicesDeleteIndexTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.deleteTemplate = function indicesDeleteTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.diskUsage = function indicesDiskUsageApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_disk_usage' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.exists = function indicesExistsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.existsAlias = function indicesExistsAliasApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (name) != null) { - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'HEAD' - path = '/' + '_alias' + '/' + encodeURIComponent(name) - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.existsIndexTemplate = function indicesExistsIndexTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'HEAD' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.existsTemplate = function indicesExistsTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'HEAD' - path = '/' + '_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.existsType = function indicesExistsTypeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.type == null) { - const err = new this[kConfigurationError]('Missing required parameter: type') - return handleError(err, callback) - } - - // check required url components - if (params.type != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' + '/' + encodeURIComponent(type) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.fieldUsageStats = function indicesFieldUsageStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_field_usage_stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.flush = function indicesFlushApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_flush' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_flush' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.forcemerge = function indicesForcemergeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_forcemerge' - } else { - if (method == null) method = 'POST' - path = '/' + '_forcemerge' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.freeze = function indicesFreezeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_freeze' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.get = function indicesGetApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.getAlias = function indicesGetAliasApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (name) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_alias' + '/' + encodeURIComponent(name) - } else if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_alias' - } else { - if (method == null) method = 'GET' - path = '/' + '_alias' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.getDataStream = function indicesGetDataStreamApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_data_stream' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_data_stream' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.getFieldMapping = function indicesGetFieldMappingApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.fields == null) { - const err = new this[kConfigurationError]('Missing required parameter: fields') - return handleError(err, callback) - } - - let { method, body, fields, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (fields) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' + '/' + 'field' + '/' + encodeURIComponent(fields) - } else { - if (method == null) method = 'GET' - path = '/' + '_mapping' + '/' + 'field' + '/' + encodeURIComponent(fields) - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.getIndexTemplate = function indicesGetIndexTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_index_template' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.getMapping = function indicesGetMappingApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' - } else { - if (method == null) method = 'GET' - path = '/' + '_mapping' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.getSettings = function indicesGetSettingsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (name) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_settings' + '/' + encodeURIComponent(name) - } else if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_settings' - } else if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_settings' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_settings' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.getTemplate = function indicesGetTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_template' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_template' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.migrateToDataStream = function indicesMigrateToDataStreamApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_data_stream' + '/' + '_migrate' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.open = function indicesOpenApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_open' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.promoteDataStream = function indicesPromoteDataStreamApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_data_stream' + '/' + '_promote' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.putAlias = function indicesPutAliasApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params.name != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (name) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_aliases' + '/' + encodeURIComponent(name) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.putIndexTemplate = function indicesPutIndexTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.putMapping = function indicesPutMappingApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.putSettings = function indicesPutSettingsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_settings' - } else { - if (method == null) method = 'PUT' - path = '/' + '_settings' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.putTemplate = function indicesPutTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.recovery = function indicesRecoveryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_recovery' - } else { - if (method == null) method = 'GET' - path = '/' + '_recovery' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.refresh = function indicesRefreshApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_refresh' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_refresh' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.reloadSearchAnalyzers = function indicesReloadSearchAnalyzersApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_reload_search_analyzers' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.resolveIndex = function indicesResolveIndexApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_resolve' + '/' + 'index' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.rollover = function indicesRolloverApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.alias == null) { - const err = new this[kConfigurationError]('Missing required parameter: alias') - return handleError(err, callback) - } - - // check required url components - if ((params.new_index != null || params.newIndex != null) && (params.alias == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: alias') - return handleError(err, callback) - } - - let { method, body, alias, newIndex, new_index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((alias) != null && (new_index || newIndex) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(alias) + '/' + '_rollover' + '/' + encodeURIComponent(new_index || newIndex) - } else { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(alias) + '/' + '_rollover' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.segments = function indicesSegmentsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_segments' - } else { - if (method == null) method = 'GET' - path = '/' + '_segments' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.shardStores = function indicesShardStoresApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_shard_stores' - } else { - if (method == null) method = 'GET' - path = '/' + '_shard_stores' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.shrink = function indicesShrinkApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.target == null) { - const err = new this[kConfigurationError]('Missing required parameter: target') - return handleError(err, callback) - } - - // check required url components - if (params.target != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, target, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_shrink' + '/' + encodeURIComponent(target) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.simulateIndexTemplate = function indicesSimulateIndexTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_index_template' + '/' + '_simulate_index' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.simulateTemplate = function indicesSimulateTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'POST' - path = '/' + '_index_template' + '/' + '_simulate' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'POST' - path = '/' + '_index_template' + '/' + '_simulate' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.split = function indicesSplitApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.target == null) { - const err = new this[kConfigurationError]('Missing required parameter: target') - return handleError(err, callback) - } - - // check required url components - if (params.target != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, target, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_split' + '/' + encodeURIComponent(target) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.stats = function indicesStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, metric, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_stats' + '/' + encodeURIComponent(metric) - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_stats' + '/' + encodeURIComponent(metric) - } else if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.unfreeze = function indicesUnfreezeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_unfreeze' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.updateAliases = function indicesUpdateAliasesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_aliases' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IndicesApi.prototype.validateQuery = function indicesValidateQueryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required url components - if (params.type != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (type) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + '_validate' + '/' + 'query' - } else if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_validate' + '/' + 'query' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_validate' + '/' + 'query' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(IndicesApi.prototype, { - add_block: { get () { return this.addBlock } }, - clear_cache: { get () { return this.clearCache } }, - create_data_stream: { get () { return this.createDataStream } }, - data_streams_stats: { get () { return this.dataStreamsStats } }, - delete_alias: { get () { return this.deleteAlias } }, - delete_data_stream: { get () { return this.deleteDataStream } }, - delete_index_template: { get () { return this.deleteIndexTemplate } }, - delete_template: { get () { return this.deleteTemplate } }, - disk_usage: { get () { return this.diskUsage } }, - exists_alias: { get () { return this.existsAlias } }, - exists_index_template: { get () { return this.existsIndexTemplate } }, - exists_template: { get () { return this.existsTemplate } }, - exists_type: { get () { return this.existsType } }, - field_usage_stats: { get () { return this.fieldUsageStats } }, - get_alias: { get () { return this.getAlias } }, - get_data_stream: { get () { return this.getDataStream } }, - get_field_mapping: { get () { return this.getFieldMapping } }, - get_index_template: { get () { return this.getIndexTemplate } }, - get_mapping: { get () { return this.getMapping } }, - get_settings: { get () { return this.getSettings } }, - get_template: { get () { return this.getTemplate } }, - migrate_to_data_stream: { get () { return this.migrateToDataStream } }, - promote_data_stream: { get () { return this.promoteDataStream } }, - put_alias: { get () { return this.putAlias } }, - put_index_template: { get () { return this.putIndexTemplate } }, - put_mapping: { get () { return this.putMapping } }, - put_settings: { get () { return this.putSettings } }, - put_template: { get () { return this.putTemplate } }, - reload_search_analyzers: { get () { return this.reloadSearchAnalyzers } }, - resolve_index: { get () { return this.resolveIndex } }, - shard_stores: { get () { return this.shardStores } }, - simulate_index_template: { get () { return this.simulateIndexTemplate } }, - simulate_template: { get () { return this.simulateTemplate } }, - update_aliases: { get () { return this.updateAliases } }, - validate_query: { get () { return this.validateQuery } } -}) - -module.exports = IndicesApi diff --git a/api/api/info.js b/api/api/info.js deleted file mode 100644 index 7a1c84709..000000000 --- a/api/api/info.js +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function infoApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = infoApi diff --git a/api/api/ingest.js b/api/api/ingest.js deleted file mode 100644 index e3016377d..000000000 --- a/api/api/ingest.js +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['master_timeout', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'summary', 'verbose'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function IngestApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -IngestApi.prototype.deletePipeline = function ingestDeletePipelineApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IngestApi.prototype.geoIpStats = function ingestGeoIpStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_ingest' + '/' + 'geoip' + '/' + 'stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IngestApi.prototype.getPipeline = function ingestGetPipelineApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_ingest' + '/' + 'pipeline' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IngestApi.prototype.processorGrok = function ingestProcessorGrokApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_ingest' + '/' + 'processor' + '/' + 'grok' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -IngestApi.prototype.putPipeline = function ingestPutPipelineApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -IngestApi.prototype.simulate = function ingestSimulateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) + '/' + '_simulate' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + '_simulate' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(IngestApi.prototype, { - delete_pipeline: { get () { return this.deletePipeline } }, - geo_ip_stats: { get () { return this.geoIpStats } }, - get_pipeline: { get () { return this.getPipeline } }, - processor_grok: { get () { return this.processorGrok } }, - put_pipeline: { get () { return this.putPipeline } } -}) - -module.exports = IngestApi diff --git a/api/api/license.js b/api/api/license.js deleted file mode 100644 index 5449f2a96..000000000 --- a/api/api/license.js +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'local', 'accept_enterprise', 'acknowledge', 'type'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', acceptEnterprise: 'accept_enterprise' } - -function LicenseApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -LicenseApi.prototype.delete = function licenseDeleteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_license' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -LicenseApi.prototype.get = function licenseGetApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_license' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -LicenseApi.prototype.getBasicStatus = function licenseGetBasicStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_license' + '/' + 'basic_status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -LicenseApi.prototype.getTrialStatus = function licenseGetTrialStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_license' + '/' + 'trial_status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -LicenseApi.prototype.post = function licensePostApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_license' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -LicenseApi.prototype.postStartBasic = function licensePostStartBasicApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_license' + '/' + 'start_basic' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -LicenseApi.prototype.postStartTrial = function licensePostStartTrialApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_license' + '/' + 'start_trial' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(LicenseApi.prototype, { - get_basic_status: { get () { return this.getBasicStatus } }, - get_trial_status: { get () { return this.getTrialStatus } }, - post_start_basic: { get () { return this.postStartBasic } }, - post_start_trial: { get () { return this.postStartTrial } } -}) - -module.exports = LicenseApi diff --git a/api/api/logstash.js b/api/api/logstash.js deleted file mode 100644 index 22bec441f..000000000 --- a/api/api/logstash.js +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function LogstashApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -LogstashApi.prototype.deletePipeline = function logstashDeletePipelineApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_logstash' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -LogstashApi.prototype.getPipeline = function logstashGetPipelineApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_logstash' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -LogstashApi.prototype.putPipeline = function logstashPutPipelineApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_logstash' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(LogstashApi.prototype, { - delete_pipeline: { get () { return this.deletePipeline } }, - get_pipeline: { get () { return this.getPipeline } }, - put_pipeline: { get () { return this.putPipeline } } -}) - -module.exports = LogstashApi diff --git a/api/api/mget.js b/api/api/mget.js deleted file mode 100644 index 623145c10..000000000 --- a/api/api/mget.js +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['stored_fields', 'preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { storedFields: 'stored_fields', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function mgetApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_mget' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_mget' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = mgetApi diff --git a/api/api/migration.js b/api/api/migration.js deleted file mode 100644 index ed480f871..000000000 --- a/api/api/migration.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function MigrationApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -MigrationApi.prototype.deprecations = function migrationDeprecationsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_migration' + '/' + 'deprecations' - } else { - if (method == null) method = 'GET' - path = '/' + '_migration' + '/' + 'deprecations' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = MigrationApi diff --git a/api/api/ml.js b/api/api/ml.js deleted file mode 100644 index bcc953e9d..000000000 --- a/api/api/ml.js +++ /dev/null @@ -1,2070 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['allow_no_match', 'allow_no_jobs', 'force', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'requests_per_second', 'allow_no_forecasts', 'wait_for_completion', 'calc_interim', 'start', 'end', 'advance_time', 'skip_time', 'duration', 'expires_in', 'max_model_memory', 'expand', 'exclude_interim', 'from', 'size', 'anomaly_score', 'sort', 'desc', 'job_id', 'partition_field_value', 'exclude_generated', 'verbose', 'allow_no_datafeeds', 'influencer_score', 'top_n', 'bucket_span', 'overall_score', 'record_score', 'include', 'include_model_definition', 'decompress_definition', 'tags', 'reset_start', 'reset_end', 'ignore_unavailable', 'allow_no_indices', 'ignore_throttled', 'expand_wildcards', 'reassign', 'delete_intervening_results', 'enabled'] -const snakeCase = { allowNoMatch: 'allow_no_match', allowNoJobs: 'allow_no_jobs', errorTrace: 'error_trace', filterPath: 'filter_path', requestsPerSecond: 'requests_per_second', allowNoForecasts: 'allow_no_forecasts', waitForCompletion: 'wait_for_completion', calcInterim: 'calc_interim', advanceTime: 'advance_time', skipTime: 'skip_time', expiresIn: 'expires_in', maxModelMemory: 'max_model_memory', excludeInterim: 'exclude_interim', anomalyScore: 'anomaly_score', jobId: 'job_id', partitionFieldValue: 'partition_field_value', excludeGenerated: 'exclude_generated', allowNoDatafeeds: 'allow_no_datafeeds', influencerScore: 'influencer_score', topN: 'top_n', bucketSpan: 'bucket_span', overallScore: 'overall_score', recordScore: 'record_score', includeModelDefinition: 'include_model_definition', decompressDefinition: 'decompress_definition', resetStart: 'reset_start', resetEnd: 'reset_end', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', ignoreThrottled: 'ignore_throttled', expandWildcards: 'expand_wildcards', deleteInterveningResults: 'delete_intervening_results' } - -function MlApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -MlApi.prototype.closeJob = function mlCloseJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_close' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteCalendar = function mlDeleteCalendarApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.calendar_id == null && params.calendarId == null) { - const err = new this[kConfigurationError]('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - - let { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteCalendarEvent = function mlDeleteCalendarEventApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.calendar_id == null && params.calendarId == null) { - const err = new this[kConfigurationError]('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params.event_id == null && params.eventId == null) { - const err = new this[kConfigurationError]('Missing required parameter: event_id or eventId') - return handleError(err, callback) - } - - // check required url components - if ((params.event_id != null || params.eventId != null) && ((params.calendar_id == null && params.calendarId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: calendar_id') - return handleError(err, callback) - } - - let { method, body, calendarId, calendar_id, eventId, event_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'events' + '/' + encodeURIComponent(event_id || eventId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteCalendarJob = function mlDeleteCalendarJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.calendar_id == null && params.calendarId == null) { - const err = new this[kConfigurationError]('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params.job_id != null || params.jobId != null) && ((params.calendar_id == null && params.calendarId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: calendar_id') - return handleError(err, callback) - } - - let { method, body, calendarId, calendar_id, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'jobs' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteDataFrameAnalytics = function mlDeleteDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteDatafeed = function mlDeleteDatafeedApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.datafeed_id == null && params.datafeedId == null) { - const err = new this[kConfigurationError]('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteExpiredData = function mlDeleteExpiredDataApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null) { - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + '_delete_expired_data' + '/' + encodeURIComponent(job_id || jobId) - } else { - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + '_delete_expired_data' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteFilter = function mlDeleteFilterApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.filter_id == null && params.filterId == null) { - const err = new this[kConfigurationError]('Missing required parameter: filter_id or filterId') - return handleError(err, callback) - } - - let { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteForecast = function mlDeleteForecastApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params.forecast_id != null || params.forecastId != null) && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, forecastId, forecast_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null && (forecast_id || forecastId) != null) { - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_forecast' + '/' + encodeURIComponent(forecast_id || forecastId) - } else { - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_forecast' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteJob = function mlDeleteJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteModelSnapshot = function mlDeleteModelSnapshotApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params.snapshot_id == null && params.snapshotId == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot_id or snapshotId') - return handleError(err, callback) - } - - // check required url components - if ((params.snapshot_id != null || params.snapshotId != null) && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteTrainedModel = function mlDeleteTrainedModelApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.model_id == null && params.modelId == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.deleteTrainedModelAlias = function mlDeleteTrainedModelAliasApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.model_alias == null && params.modelAlias == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_alias or modelAlias') - return handleError(err, callback) - } - if (params.model_id == null && params.modelId == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - - // check required url components - if ((params.model_alias != null || params.modelAlias != null) && ((params.model_id == null && params.modelId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: model_id') - return handleError(err, callback) - } - - let { method, body, modelAlias, model_alias, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'model_aliases' + '/' + encodeURIComponent(model_alias || modelAlias) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.estimateModelMemory = function mlEstimateModelMemoryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_estimate_model_memory' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.evaluateDataFrame = function mlEvaluateDataFrameApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + '_evaluate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.explainDataFrameAnalytics = function mlExplainDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_explain' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + '_explain' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.flushJob = function mlFlushJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_flush' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.forecast = function mlForecastApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_forecast' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getBuckets = function mlGetBucketsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if (params.timestamp != null && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, timestamp, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null && (timestamp) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'buckets' + '/' + encodeURIComponent(timestamp) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'buckets' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getCalendarEvents = function mlGetCalendarEventsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.calendar_id == null && params.calendarId == null) { - const err = new this[kConfigurationError]('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - - let { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'events' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getCalendars = function mlGetCalendarsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((calendar_id || calendarId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'calendars' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getCategories = function mlGetCategoriesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params.category_id != null || params.categoryId != null) && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, categoryId, category_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null && (category_id || categoryId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'categories' + '/' + encodeURIComponent(category_id || categoryId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'categories' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getDataFrameAnalytics = function mlGetDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getDataFrameAnalyticsStats = function mlGetDataFrameAnalyticsStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getDatafeedStats = function mlGetDatafeedStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((datafeed_id || datafeedId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getDatafeeds = function mlGetDatafeedsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((datafeed_id || datafeedId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getFilters = function mlGetFiltersApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((filter_id || filterId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'filters' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getInfluencers = function mlGetInfluencersApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'influencers' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getJobStats = function mlGetJobStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getJobs = function mlGetJobsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getModelSnapshots = function mlGetModelSnapshotsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params.snapshot_id != null || params.snapshotId != null) && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((job_id || jobId) != null && (snapshot_id || snapshotId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getOverallBuckets = function mlGetOverallBucketsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'overall_buckets' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getRecords = function mlGetRecordsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'records' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getTrainedModels = function mlGetTrainedModelsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((model_id || modelId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'trained_models' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.getTrainedModelsStats = function mlGetTrainedModelsStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((model_id || modelId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.inferTrainedModelDeployment = function mlInferTrainedModelDeploymentApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.model_id == null && params.modelId == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'deployment' + '/' + '_infer' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.info = function mlInfoApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'info' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.openJob = function mlOpenJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_open' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.postCalendarEvents = function mlPostCalendarEventsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.calendar_id == null && params.calendarId == null) { - const err = new this[kConfigurationError]('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'events' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.postData = function mlPostDataApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_data' - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.previewDataFrameAnalytics = function mlPreviewDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_preview' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + '_preview' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.previewDatafeed = function mlPreviewDatafeedApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((datafeed_id || datafeedId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_preview' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + '_preview' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putCalendar = function mlPutCalendarApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.calendar_id == null && params.calendarId == null) { - const err = new this[kConfigurationError]('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - - let { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putCalendarJob = function mlPutCalendarJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.calendar_id == null && params.calendarId == null) { - const err = new this[kConfigurationError]('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params.job_id != null || params.jobId != null) && ((params.calendar_id == null && params.calendarId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: calendar_id') - return handleError(err, callback) - } - - let { method, body, calendarId, calendar_id, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'jobs' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putDataFrameAnalytics = function mlPutDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putDatafeed = function mlPutDatafeedApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.datafeed_id == null && params.datafeedId == null) { - const err = new this[kConfigurationError]('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putFilter = function mlPutFilterApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.filter_id == null && params.filterId == null) { - const err = new this[kConfigurationError]('Missing required parameter: filter_id or filterId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putJob = function mlPutJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putTrainedModel = function mlPutTrainedModelApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.model_id == null && params.modelId == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.putTrainedModelAlias = function mlPutTrainedModelAliasApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.model_alias == null && params.modelAlias == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_alias or modelAlias') - return handleError(err, callback) - } - if (params.model_id == null && params.modelId == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - - // check required url components - if ((params.model_alias != null || params.modelAlias != null) && ((params.model_id == null && params.modelId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: model_id') - return handleError(err, callback) - } - - let { method, body, modelAlias, model_alias, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'model_aliases' + '/' + encodeURIComponent(model_alias || modelAlias) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.resetJob = function mlResetJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_reset' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.revertModelSnapshot = function mlRevertModelSnapshotApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params.snapshot_id == null && params.snapshotId == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot_id or snapshotId') - return handleError(err, callback) - } - - // check required url components - if ((params.snapshot_id != null || params.snapshotId != null) && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) + '/' + '_revert' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.setUpgradeMode = function mlSetUpgradeModeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'set_upgrade_mode' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.startDataFrameAnalytics = function mlStartDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.startDatafeed = function mlStartDatafeedApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.datafeed_id == null && params.datafeedId == null) { - const err = new this[kConfigurationError]('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.startTrainedModelDeployment = function mlStartTrainedModelDeploymentApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.model_id == null && params.modelId == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'deployment' + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.stopDataFrameAnalytics = function mlStopDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.stopDatafeed = function mlStopDatafeedApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.datafeed_id == null && params.datafeedId == null) { - const err = new this[kConfigurationError]('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.stopTrainedModelDeployment = function mlStopTrainedModelDeploymentApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.model_id == null && params.modelId == null) { - const err = new this[kConfigurationError]('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - - let { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) + '/' + 'deployment' + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.updateDataFrameAnalytics = function mlUpdateDataFrameAnalyticsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.updateDatafeed = function mlUpdateDatafeedApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.datafeed_id == null && params.datafeedId == null) { - const err = new this[kConfigurationError]('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.updateFilter = function mlUpdateFilterApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.filter_id == null && params.filterId == null) { - const err = new this[kConfigurationError]('Missing required parameter: filter_id or filterId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.updateJob = function mlUpdateJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.updateModelSnapshot = function mlUpdateModelSnapshotApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params.snapshot_id == null && params.snapshotId == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot_id or snapshotId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if ((params.snapshot_id != null || params.snapshotId != null) && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.upgradeJobSnapshot = function mlUpgradeJobSnapshotApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.job_id == null && params.jobId == null) { - const err = new this[kConfigurationError]('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params.snapshot_id == null && params.snapshotId == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot_id or snapshotId') - return handleError(err, callback) - } - - // check required url components - if ((params.snapshot_id != null || params.snapshotId != null) && ((params.job_id == null && params.jobId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - let { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) + '/' + '_upgrade' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.validate = function mlValidateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_validate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -MlApi.prototype.validateDetector = function mlValidateDetectorApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_validate' + '/' + 'detector' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(MlApi.prototype, { - close_job: { get () { return this.closeJob } }, - delete_calendar: { get () { return this.deleteCalendar } }, - delete_calendar_event: { get () { return this.deleteCalendarEvent } }, - delete_calendar_job: { get () { return this.deleteCalendarJob } }, - delete_data_frame_analytics: { get () { return this.deleteDataFrameAnalytics } }, - delete_datafeed: { get () { return this.deleteDatafeed } }, - delete_expired_data: { get () { return this.deleteExpiredData } }, - delete_filter: { get () { return this.deleteFilter } }, - delete_forecast: { get () { return this.deleteForecast } }, - delete_job: { get () { return this.deleteJob } }, - delete_model_snapshot: { get () { return this.deleteModelSnapshot } }, - delete_trained_model: { get () { return this.deleteTrainedModel } }, - delete_trained_model_alias: { get () { return this.deleteTrainedModelAlias } }, - estimate_model_memory: { get () { return this.estimateModelMemory } }, - evaluate_data_frame: { get () { return this.evaluateDataFrame } }, - explain_data_frame_analytics: { get () { return this.explainDataFrameAnalytics } }, - flush_job: { get () { return this.flushJob } }, - get_buckets: { get () { return this.getBuckets } }, - get_calendar_events: { get () { return this.getCalendarEvents } }, - get_calendars: { get () { return this.getCalendars } }, - get_categories: { get () { return this.getCategories } }, - get_data_frame_analytics: { get () { return this.getDataFrameAnalytics } }, - get_data_frame_analytics_stats: { get () { return this.getDataFrameAnalyticsStats } }, - get_datafeed_stats: { get () { return this.getDatafeedStats } }, - get_datafeeds: { get () { return this.getDatafeeds } }, - get_filters: { get () { return this.getFilters } }, - get_influencers: { get () { return this.getInfluencers } }, - get_job_stats: { get () { return this.getJobStats } }, - get_jobs: { get () { return this.getJobs } }, - get_model_snapshots: { get () { return this.getModelSnapshots } }, - get_overall_buckets: { get () { return this.getOverallBuckets } }, - get_records: { get () { return this.getRecords } }, - get_trained_models: { get () { return this.getTrainedModels } }, - get_trained_models_stats: { get () { return this.getTrainedModelsStats } }, - infer_trained_model_deployment: { get () { return this.inferTrainedModelDeployment } }, - open_job: { get () { return this.openJob } }, - post_calendar_events: { get () { return this.postCalendarEvents } }, - post_data: { get () { return this.postData } }, - preview_data_frame_analytics: { get () { return this.previewDataFrameAnalytics } }, - preview_datafeed: { get () { return this.previewDatafeed } }, - put_calendar: { get () { return this.putCalendar } }, - put_calendar_job: { get () { return this.putCalendarJob } }, - put_data_frame_analytics: { get () { return this.putDataFrameAnalytics } }, - put_datafeed: { get () { return this.putDatafeed } }, - put_filter: { get () { return this.putFilter } }, - put_job: { get () { return this.putJob } }, - put_trained_model: { get () { return this.putTrainedModel } }, - put_trained_model_alias: { get () { return this.putTrainedModelAlias } }, - reset_job: { get () { return this.resetJob } }, - revert_model_snapshot: { get () { return this.revertModelSnapshot } }, - set_upgrade_mode: { get () { return this.setUpgradeMode } }, - start_data_frame_analytics: { get () { return this.startDataFrameAnalytics } }, - start_datafeed: { get () { return this.startDatafeed } }, - start_trained_model_deployment: { get () { return this.startTrainedModelDeployment } }, - stop_data_frame_analytics: { get () { return this.stopDataFrameAnalytics } }, - stop_datafeed: { get () { return this.stopDatafeed } }, - stop_trained_model_deployment: { get () { return this.stopTrainedModelDeployment } }, - update_data_frame_analytics: { get () { return this.updateDataFrameAnalytics } }, - update_datafeed: { get () { return this.updateDatafeed } }, - update_filter: { get () { return this.updateFilter } }, - update_job: { get () { return this.updateJob } }, - update_model_snapshot: { get () { return this.updateModelSnapshot } }, - upgrade_job_snapshot: { get () { return this.upgradeJobSnapshot } }, - validate_detector: { get () { return this.validateDetector } } -}) - -module.exports = MlApi diff --git a/api/api/monitoring.js b/api/api/monitoring.js deleted file mode 100644 index 5366bd517..000000000 --- a/api/api/monitoring.js +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['system_id', 'system_api_version', 'interval', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { systemId: 'system_id', systemApiVersion: 'system_api_version', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function MonitoringApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -MonitoringApi.prototype.bulk = function monitoringBulkApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((type) != null) { - if (method == null) method = 'POST' - path = '/' + '_monitoring' + '/' + encodeURIComponent(type) + '/' + 'bulk' - } else { - if (method == null) method = 'POST' - path = '/' + '_monitoring' + '/' + 'bulk' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = MonitoringApi diff --git a/api/api/msearch.js b/api/api/msearch.js deleted file mode 100644 index 1227765bc..000000000 --- a/api/api/msearch.js +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['search_type', 'max_concurrent_searches', 'typed_keys', 'pre_filter_shard_size', 'max_concurrent_shard_requests', 'rest_total_hits_as_int', 'ccs_minimize_roundtrips', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { searchType: 'search_type', maxConcurrentSearches: 'max_concurrent_searches', typedKeys: 'typed_keys', preFilterShardSize: 'pre_filter_shard_size', maxConcurrentShardRequests: 'max_concurrent_shard_requests', restTotalHitsAsInt: 'rest_total_hits_as_int', ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function msearchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_msearch' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_msearch' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = msearchApi diff --git a/api/api/msearch_template.js b/api/api/msearch_template.js deleted file mode 100644 index cfce7eb0c..000000000 --- a/api/api/msearch_template.js +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['search_type', 'typed_keys', 'max_concurrent_searches', 'rest_total_hits_as_int', 'ccs_minimize_roundtrips', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { searchType: 'search_type', typedKeys: 'typed_keys', maxConcurrentSearches: 'max_concurrent_searches', restTotalHitsAsInt: 'rest_total_hits_as_int', ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function msearchTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_msearch' + '/' + 'template' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_msearch' + '/' + 'template' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = msearchTemplateApi diff --git a/api/api/mtermvectors.js b/api/api/mtermvectors.js deleted file mode 100644 index 7dc49fe46..000000000 --- a/api/api/mtermvectors.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['ids', 'term_statistics', 'field_statistics', 'fields', 'offsets', 'positions', 'payloads', 'preference', 'routing', 'realtime', 'version', 'version_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { termStatistics: 'term_statistics', fieldStatistics: 'field_statistics', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function mtermvectorsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_mtermvectors' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_mtermvectors' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = mtermvectorsApi diff --git a/api/api/nodes.js b/api/api/nodes.js deleted file mode 100644 index b06002e66..000000000 --- a/api/api/nodes.js +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'interval', 'snapshots', 'threads', 'ignore_idle_threads', 'type', 'timeout', 'flat_settings', 'completion_fields', 'fielddata_fields', 'fields', 'groups', 'level', 'types', 'include_segment_file_sizes', 'include_unloaded_segments'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', ignoreIdleThreads: 'ignore_idle_threads', flatSettings: 'flat_settings', completionFields: 'completion_fields', fielddataFields: 'fielddata_fields', includeSegmentFileSizes: 'include_segment_file_sizes', includeUnloadedSegments: 'include_unloaded_segments' } - -function NodesApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -NodesApi.prototype.clearMeteringArchive = function nodesClearMeteringArchiveApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.node_id == null && params.nodeId == null) { - const err = new this[kConfigurationError]('Missing required parameter: node_id or nodeId') - return handleError(err, callback) - } - if (params.max_archive_version == null && params.maxArchiveVersion == null) { - const err = new this[kConfigurationError]('Missing required parameter: max_archive_version or maxArchiveVersion') - return handleError(err, callback) - } - - // check required url components - if ((params.max_archive_version != null || params.maxArchiveVersion != null) && ((params.node_id == null && params.nodeId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: node_id') - return handleError(err, callback) - } - - let { method, body, nodeId, node_id, maxArchiveVersion, max_archive_version, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + '_repositories_metering' + '/' + encodeURIComponent(max_archive_version || maxArchiveVersion) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -NodesApi.prototype.getMeteringInfo = function nodesGetMeteringInfoApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.node_id == null && params.nodeId == null) { - const err = new this[kConfigurationError]('Missing required parameter: node_id or nodeId') - return handleError(err, callback) - } - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + '_repositories_metering' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -NodesApi.prototype.hotThreads = function nodesHotThreadsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'hot_threads' - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'hot_threads' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -NodesApi.prototype.info = function nodesInfoApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + encodeURIComponent(metric) - } else if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -NodesApi.prototype.reloadSecureSettings = function nodesReloadSecureSettingsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null) { - if (method == null) method = 'POST' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'reload_secure_settings' - } else { - if (method == null) method = 'POST' - path = '/' + '_nodes' + '/' + 'reload_secure_settings' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -NodesApi.prototype.stats = function nodesStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, metric, indexMetric, index_metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null && (metric) != null && (index_metric || indexMetric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'stats' + '/' + encodeURIComponent(metric) + '/' + encodeURIComponent(index_metric || indexMetric) - } else if ((node_id || nodeId) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'stats' + '/' + encodeURIComponent(metric) - } else if ((metric) != null && (index_metric || indexMetric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'stats' + '/' + encodeURIComponent(metric) + '/' + encodeURIComponent(index_metric || indexMetric) - } else if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'stats' - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'stats' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -NodesApi.prototype.usage = function nodesUsageApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'usage' + '/' + encodeURIComponent(metric) - } else if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'usage' - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'usage' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'usage' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(NodesApi.prototype, { - clear_metering_archive: { get () { return this.clearMeteringArchive } }, - get_metering_info: { get () { return this.getMeteringInfo } }, - hot_threads: { get () { return this.hotThreads } }, - reload_secure_settings: { get () { return this.reloadSecureSettings } } -}) - -module.exports = NodesApi diff --git a/api/api/open_point_in_time.js b/api/api/open_point_in_time.js deleted file mode 100644 index 81953f193..000000000 --- a/api/api/open_point_in_time.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['preference', 'routing', 'ignore_unavailable', 'expand_wildcards', 'keep_alive', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { ignoreUnavailable: 'ignore_unavailable', expandWildcards: 'expand_wildcards', keepAlive: 'keep_alive', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function openPointInTimeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_pit' - } else { - if (method == null) method = 'POST' - path = '/' + '_pit' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = openPointInTimeApi diff --git a/api/api/ping.js b/api/api/ping.js deleted file mode 100644 index 48c64f38e..000000000 --- a/api/api/ping.js +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function pingApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'HEAD' - path = '/' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = pingApi diff --git a/api/api/put_script.js b/api/api/put_script.js deleted file mode 100644 index 0737ffc61..000000000 --- a/api/api/put_script.js +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['timeout', 'master_timeout', 'context', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function putScriptApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params.context != null && (params.id == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: id') - return handleError(err, callback) - } - - let { method, body, id, context, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null && (context) != null) { - if (method == null) method = 'PUT' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) + '/' + encodeURIComponent(context) - } else { - if (method == null) method = 'PUT' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = putScriptApi diff --git a/api/api/rank_eval.js b/api/api/rank_eval.js deleted file mode 100644 index e8b7993bc..000000000 --- a/api/api/rank_eval.js +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'search_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', searchType: 'search_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function rankEvalApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_rank_eval' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_rank_eval' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = rankEvalApi diff --git a/api/api/reindex.js b/api/api/reindex.js deleted file mode 100644 index 6f3f0541f..000000000 --- a/api/api/reindex.js +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['refresh', 'timeout', 'wait_for_active_shards', 'wait_for_completion', 'requests_per_second', 'scroll', 'slices', 'max_docs', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { waitForActiveShards: 'wait_for_active_shards', waitForCompletion: 'wait_for_completion', requestsPerSecond: 'requests_per_second', maxDocs: 'max_docs', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function reindexApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_reindex' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = reindexApi diff --git a/api/api/reindex_rethrottle.js b/api/api/reindex_rethrottle.js deleted file mode 100644 index 649edfa29..000000000 --- a/api/api/reindex_rethrottle.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['requests_per_second', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { requestsPerSecond: 'requests_per_second', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function reindexRethrottleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.task_id == null && params.taskId == null) { - const err = new this[kConfigurationError]('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - if (params.requests_per_second == null && params.requestsPerSecond == null) { - const err = new this[kConfigurationError]('Missing required parameter: requests_per_second or requestsPerSecond') - return handleError(err, callback) - } - - let { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_reindex' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_rethrottle' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = reindexRethrottleApi diff --git a/api/api/render_search_template.js b/api/api/render_search_template.js deleted file mode 100644 index 795ec8c51..000000000 --- a/api/api/render_search_template.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function renderSearchTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_render' + '/' + 'template' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_render' + '/' + 'template' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = renderSearchTemplateApi diff --git a/api/api/rollup.js b/api/api/rollup.js deleted file mode 100644 index 37a3ad946..000000000 --- a/api/api/rollup.js +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'typed_keys', 'rest_total_hits_as_int', 'wait_for_completion', 'timeout'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', typedKeys: 'typed_keys', restTotalHitsAsInt: 'rest_total_hits_as_int', waitForCompletion: 'wait_for_completion' } - -function RollupApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -RollupApi.prototype.deleteJob = function rollupDeleteJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.getJobs = function rollupGetJobsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'job' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.getRollupCaps = function rollupGetRollupCapsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'data' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'data' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.getRollupIndexCaps = function rollupGetRollupIndexCapsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_rollup' + '/' + 'data' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.putJob = function rollupPutJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.rollup = function rollupRollupApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.rollup_index == null && params.rollupIndex == null) { - const err = new this[kConfigurationError]('Missing required parameter: rollup_index or rollupIndex') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if ((params.rollup_index != null || params.rollupIndex != null) && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, rollupIndex, rollup_index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_rollup' + '/' + encodeURIComponent(rollup_index || rollupIndex) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.rollupSearch = function rollupRollupSearchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params.type != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (type) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + '_rollup_search' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_rollup_search' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.startJob = function rollupStartJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -RollupApi.prototype.stopJob = function rollupStopJobApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(RollupApi.prototype, { - delete_job: { get () { return this.deleteJob } }, - get_jobs: { get () { return this.getJobs } }, - get_rollup_caps: { get () { return this.getRollupCaps } }, - get_rollup_index_caps: { get () { return this.getRollupIndexCaps } }, - put_job: { get () { return this.putJob } }, - rollup_search: { get () { return this.rollupSearch } }, - start_job: { get () { return this.startJob } }, - stop_job: { get () { return this.stopJob } } -}) - -module.exports = RollupApi diff --git a/api/api/scripts_painless_execute.js b/api/api/scripts_painless_execute.js deleted file mode 100644 index fbe2f08be..000000000 --- a/api/api/scripts_painless_execute.js +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function scriptsPainlessExecuteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_scripts' + '/' + 'painless' + '/' + '_execute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = scriptsPainlessExecuteApi diff --git a/api/api/scroll.js b/api/api/scroll.js deleted file mode 100644 index 27b7eb78e..000000000 --- a/api/api/scroll.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['scroll', 'scroll_id', 'rest_total_hits_as_int', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { scrollId: 'scroll_id', restTotalHitsAsInt: 'rest_total_hits_as_int', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function scrollApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, scrollId, scroll_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((scroll_id || scrollId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' + '/' + 'scroll' + '/' + encodeURIComponent(scroll_id || scrollId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' + '/' + 'scroll' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = scrollApi diff --git a/api/api/search.js b/api/api/search.js deleted file mode 100644 index 03c1a88e7..000000000 --- a/api/api/search.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['analyzer', 'analyze_wildcard', 'ccs_minimize_roundtrips', 'default_operator', 'df', 'explain', 'stored_fields', 'docvalue_fields', 'from', 'ignore_unavailable', 'ignore_throttled', 'allow_no_indices', 'expand_wildcards', 'lenient', 'preference', 'q', 'routing', 'scroll', 'search_type', 'size', 'sort', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'terminate_after', 'stats', 'suggest_field', 'suggest_mode', 'suggest_size', 'suggest_text', 'timeout', 'track_scores', 'track_total_hits', 'allow_partial_search_results', 'typed_keys', 'version', 'seq_no_primary_term', 'request_cache', 'batched_reduce_size', 'max_concurrent_shard_requests', 'pre_filter_shard_size', 'rest_total_hits_as_int', 'min_compatible_shard_node', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { analyzeWildcard: 'analyze_wildcard', ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', defaultOperator: 'default_operator', storedFields: 'stored_fields', docvalueFields: 'docvalue_fields', ignoreUnavailable: 'ignore_unavailable', ignoreThrottled: 'ignore_throttled', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', searchType: 'search_type', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', terminateAfter: 'terminate_after', suggestField: 'suggest_field', suggestMode: 'suggest_mode', suggestSize: 'suggest_size', suggestText: 'suggest_text', trackScores: 'track_scores', trackTotalHits: 'track_total_hits', allowPartialSearchResults: 'allow_partial_search_results', typedKeys: 'typed_keys', seqNoPrimaryTerm: 'seq_no_primary_term', requestCache: 'request_cache', batchedReduceSize: 'batched_reduce_size', maxConcurrentShardRequests: 'max_concurrent_shard_requests', preFilterShardSize: 'pre_filter_shard_size', restTotalHitsAsInt: 'rest_total_hits_as_int', minCompatibleShardNode: 'min_compatible_shard_node', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function searchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_search' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = searchApi diff --git a/api/api/search_mvt.js b/api/api/search_mvt.js deleted file mode 100644 index 242622b0d..000000000 --- a/api/api/search_mvt.js +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['exact_bounds', 'extent', 'grid_precision', 'grid_type', 'size', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { exactBounds: 'exact_bounds', gridPrecision: 'grid_precision', gridType: 'grid_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function searchMvtApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.field == null) { - const err = new this[kConfigurationError]('Missing required parameter: field') - return handleError(err, callback) - } - if (params.zoom == null) { - const err = new this[kConfigurationError]('Missing required parameter: zoom') - return handleError(err, callback) - } - if (params.x == null) { - const err = new this[kConfigurationError]('Missing required parameter: x') - return handleError(err, callback) - } - if (params.y == null) { - const err = new this[kConfigurationError]('Missing required parameter: y') - return handleError(err, callback) - } - - // check required url components - if (params.y != null && (params.x == null || params.zoom == null || params.field == null || params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: x, zoom, field, index') - return handleError(err, callback) - } else if (params.x != null && (params.zoom == null || params.field == null || params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: zoom, field, index') - return handleError(err, callback) - } else if (params.zoom != null && (params.field == null || params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: field, index') - return handleError(err, callback) - } else if (params.field != null && (params.index == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: index') - return handleError(err, callback) - } - - let { method, body, index, field, zoom, x, y, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_mvt' + '/' + encodeURIComponent(field) + '/' + encodeURIComponent(zoom) + '/' + encodeURIComponent(x) + '/' + encodeURIComponent(y) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = searchMvtApi diff --git a/api/api/search_shards.js b/api/api/search_shards.js deleted file mode 100644 index 760bb03e9..000000000 --- a/api/api/search_shards.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['preference', 'routing', 'local', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function searchShardsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_search_shards' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search_shards' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = searchShardsApi diff --git a/api/api/search_template.js b/api/api/search_template.js deleted file mode 100644 index ac143e507..000000000 --- a/api/api/search_template.js +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['ignore_unavailable', 'ignore_throttled', 'allow_no_indices', 'expand_wildcards', 'preference', 'routing', 'scroll', 'search_type', 'explain', 'profile', 'typed_keys', 'rest_total_hits_as_int', 'ccs_minimize_roundtrips', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { ignoreUnavailable: 'ignore_unavailable', ignoreThrottled: 'ignore_throttled', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', searchType: 'search_type', typedKeys: 'typed_keys', restTotalHitsAsInt: 'rest_total_hits_as_int', ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function searchTemplateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_search' + '/' + 'template' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' + '/' + 'template' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = searchTemplateApi diff --git a/api/api/searchable_snapshots.js b/api/api/searchable_snapshots.js deleted file mode 100644 index 3353211ca..000000000 --- a/api/api/searchable_snapshots.js +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards', 'index', 'master_timeout', 'wait_for_completion', 'storage', 'level'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', masterTimeout: 'master_timeout', waitForCompletion: 'wait_for_completion' } - -function SearchableSnapshotsApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -SearchableSnapshotsApi.prototype.cacheStats = function searchableSnapshotsCacheStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_searchable_snapshots' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'cache' + '/' + 'stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_searchable_snapshots' + '/' + 'cache' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SearchableSnapshotsApi.prototype.clearCache = function searchableSnapshotsClearCacheApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_searchable_snapshots' + '/' + 'cache' + '/' + 'clear' - } else { - if (method == null) method = 'POST' - path = '/' + '_searchable_snapshots' + '/' + 'cache' + '/' + 'clear' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SearchableSnapshotsApi.prototype.mount = function searchableSnapshotsMountApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - if (params.snapshot == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params.snapshot != null && (params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - let { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) + '/' + '_mount' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SearchableSnapshotsApi.prototype.stats = function searchableSnapshotsStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_searchable_snapshots' + '/' + 'stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_searchable_snapshots' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(SearchableSnapshotsApi.prototype, { - cache_stats: { get () { return this.cacheStats } }, - clear_cache: { get () { return this.clearCache } } -}) - -module.exports = SearchableSnapshotsApi diff --git a/api/api/security.js b/api/api/security.js deleted file mode 100644 index 4926d6cb8..000000000 --- a/api/api/security.js +++ /dev/null @@ -1,1305 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'refresh', 'usernames', 'id', 'name', 'username', 'realm_name', 'owner'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', realmName: 'realm_name' } - -function SecurityApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -SecurityApi.prototype.authenticate = function securityAuthenticateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + '_authenticate' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.changePassword = function securityChangePasswordApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((username) != null) { - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) + '/' + '_password' - } else { - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + '_password' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.clearApiKeyCache = function securityClearApiKeyCacheApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.ids == null) { - const err = new this[kConfigurationError]('Missing required parameter: ids') - return handleError(err, callback) - } - - let { method, body, ids, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'api_key' + '/' + encodeURIComponent(ids) + '/' + '_clear_cache' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.clearCachedPrivileges = function securityClearCachedPrivilegesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.application == null) { - const err = new this[kConfigurationError]('Missing required parameter: application') - return handleError(err, callback) - } - - let { method, body, application, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'privilege' + '/' + encodeURIComponent(application) + '/' + '_clear_cache' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.clearCachedRealms = function securityClearCachedRealmsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.realms == null) { - const err = new this[kConfigurationError]('Missing required parameter: realms') - return handleError(err, callback) - } - - let { method, body, realms, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'realm' + '/' + encodeURIComponent(realms) + '/' + '_clear_cache' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.clearCachedRoles = function securityClearCachedRolesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) + '/' + '_clear_cache' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.clearCachedServiceTokens = function securityClearCachedServiceTokensApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.namespace == null) { - const err = new this[kConfigurationError]('Missing required parameter: namespace') - return handleError(err, callback) - } - if (params.service == null) { - const err = new this[kConfigurationError]('Missing required parameter: service') - return handleError(err, callback) - } - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params.name != null && (params.service == null || params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: service, namespace') - return handleError(err, callback) - } else if (params.service != null && (params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') - return handleError(err, callback) - } - - let { method, body, namespace, service, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' + '/' + encodeURIComponent(name) + '/' + '_clear_cache' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.createApiKey = function securityCreateApiKeyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'api_key' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.createServiceToken = function securityCreateServiceTokenApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.namespace == null) { - const err = new this[kConfigurationError]('Missing required parameter: namespace') - return handleError(err, callback) - } - if (params.service == null) { - const err = new this[kConfigurationError]('Missing required parameter: service') - return handleError(err, callback) - } - - // check required url components - if (params.name != null && (params.service == null || params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: service, namespace') - return handleError(err, callback) - } else if (params.service != null && (params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') - return handleError(err, callback) - } - - let { method, body, namespace, service, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((namespace) != null && (service) != null && (name) != null) { - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.deletePrivileges = function securityDeletePrivilegesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.application == null) { - const err = new this[kConfigurationError]('Missing required parameter: application') - return handleError(err, callback) - } - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params.name != null && (params.application == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: application') - return handleError(err, callback) - } - - let { method, body, application, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'privilege' + '/' + encodeURIComponent(application) + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.deleteRole = function securityDeleteRoleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.deleteRoleMapping = function securityDeleteRoleMappingApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'role_mapping' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.deleteServiceToken = function securityDeleteServiceTokenApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.namespace == null) { - const err = new this[kConfigurationError]('Missing required parameter: namespace') - return handleError(err, callback) - } - if (params.service == null) { - const err = new this[kConfigurationError]('Missing required parameter: service') - return handleError(err, callback) - } - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params.name != null && (params.service == null || params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: service, namespace') - return handleError(err, callback) - } else if (params.service != null && (params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') - return handleError(err, callback) - } - - let { method, body, namespace, service, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' + '/' + 'token' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.deleteUser = function securityDeleteUserApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.username == null) { - const err = new this[kConfigurationError]('Missing required parameter: username') - return handleError(err, callback) - } - - let { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.disableUser = function securityDisableUserApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.username == null) { - const err = new this[kConfigurationError]('Missing required parameter: username') - return handleError(err, callback) - } - - let { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) + '/' + '_disable' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.enableUser = function securityEnableUserApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.username == null) { - const err = new this[kConfigurationError]('Missing required parameter: username') - return handleError(err, callback) - } - - let { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) + '/' + '_enable' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.enrollKibana = function securityEnrollKibanaApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'enroll' + '/' + 'kibana' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.enrollNode = function securityEnrollNodeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'enroll' + '/' + 'node' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getApiKey = function securityGetApiKeyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'api_key' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getBuiltinPrivileges = function securityGetBuiltinPrivilegesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' + '/' + '_builtin' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getPrivileges = function securityGetPrivilegesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required url components - if (params.name != null && (params.application == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: application') - return handleError(err, callback) - } - - let { method, body, application, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((application) != null && (name) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' + '/' + encodeURIComponent(application) + '/' + encodeURIComponent(name) - } else if ((application) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' + '/' + encodeURIComponent(application) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getRole = function securityGetRoleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getRoleMapping = function securityGetRoleMappingApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role_mapping' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role_mapping' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getServiceAccounts = function securityGetServiceAccountsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required url components - if (params.service != null && (params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') - return handleError(err, callback) - } - - let { method, body, namespace, service, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((namespace) != null && (service) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) - } else if ((namespace) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'service' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getServiceCredentials = function securityGetServiceCredentialsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.namespace == null) { - const err = new this[kConfigurationError]('Missing required parameter: namespace') - return handleError(err, callback) - } - if (params.service == null) { - const err = new this[kConfigurationError]('Missing required parameter: service') - return handleError(err, callback) - } - - // check required url components - if (params.service != null && (params.namespace == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: namespace') - return handleError(err, callback) - } - - let { method, body, namespace, service, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'service' + '/' + encodeURIComponent(namespace) + '/' + encodeURIComponent(service) + '/' + 'credential' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getToken = function securityGetTokenApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'oauth2' + '/' + 'token' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getUser = function securityGetUserApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((username) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'user' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.getUserPrivileges = function securityGetUserPrivilegesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'user' + '/' + '_privileges' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.grantApiKey = function securityGrantApiKeyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'api_key' + '/' + 'grant' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.hasPrivileges = function securityHasPrivilegesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, user, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((user) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(user) + '/' + '_has_privileges' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_security' + '/' + 'user' + '/' + '_has_privileges' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.invalidateApiKey = function securityInvalidateApiKeyApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'api_key' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.invalidateToken = function securityInvalidateTokenApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'oauth2' + '/' + 'token' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.putPrivileges = function securityPutPrivilegesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'privilege' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.putRole = function securityPutRoleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.putRoleMapping = function securityPutRoleMappingApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.name == null) { - const err = new this[kConfigurationError]('Missing required parameter: name') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'role_mapping' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.putUser = function securityPutUserApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.username == null) { - const err = new this[kConfigurationError]('Missing required parameter: username') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.queryApiKeys = function securityQueryApiKeysApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_security' + '/' + '_query' + '/' + 'api_key' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.samlAuthenticate = function securitySamlAuthenticateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'saml' + '/' + 'authenticate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.samlCompleteLogout = function securitySamlCompleteLogoutApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'saml' + '/' + 'complete_logout' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.samlInvalidate = function securitySamlInvalidateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'saml' + '/' + 'invalidate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.samlLogout = function securitySamlLogoutApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'saml' + '/' + 'logout' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.samlPrepareAuthentication = function securitySamlPrepareAuthenticationApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'saml' + '/' + 'prepare' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SecurityApi.prototype.samlServiceProviderMetadata = function securitySamlServiceProviderMetadataApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.realm_name == null && params.realmName == null) { - const err = new this[kConfigurationError]('Missing required parameter: realm_name or realmName') - return handleError(err, callback) - } - - let { method, body, realmName, realm_name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'saml' + '/' + 'metadata' + '/' + encodeURIComponent(realm_name || realmName) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(SecurityApi.prototype, { - change_password: { get () { return this.changePassword } }, - clear_api_key_cache: { get () { return this.clearApiKeyCache } }, - clear_cached_privileges: { get () { return this.clearCachedPrivileges } }, - clear_cached_realms: { get () { return this.clearCachedRealms } }, - clear_cached_roles: { get () { return this.clearCachedRoles } }, - clear_cached_service_tokens: { get () { return this.clearCachedServiceTokens } }, - create_api_key: { get () { return this.createApiKey } }, - create_service_token: { get () { return this.createServiceToken } }, - delete_privileges: { get () { return this.deletePrivileges } }, - delete_role: { get () { return this.deleteRole } }, - delete_role_mapping: { get () { return this.deleteRoleMapping } }, - delete_service_token: { get () { return this.deleteServiceToken } }, - delete_user: { get () { return this.deleteUser } }, - disable_user: { get () { return this.disableUser } }, - enable_user: { get () { return this.enableUser } }, - enroll_kibana: { get () { return this.enrollKibana } }, - enroll_node: { get () { return this.enrollNode } }, - get_api_key: { get () { return this.getApiKey } }, - get_builtin_privileges: { get () { return this.getBuiltinPrivileges } }, - get_privileges: { get () { return this.getPrivileges } }, - get_role: { get () { return this.getRole } }, - get_role_mapping: { get () { return this.getRoleMapping } }, - get_service_accounts: { get () { return this.getServiceAccounts } }, - get_service_credentials: { get () { return this.getServiceCredentials } }, - get_token: { get () { return this.getToken } }, - get_user: { get () { return this.getUser } }, - get_user_privileges: { get () { return this.getUserPrivileges } }, - grant_api_key: { get () { return this.grantApiKey } }, - has_privileges: { get () { return this.hasPrivileges } }, - invalidate_api_key: { get () { return this.invalidateApiKey } }, - invalidate_token: { get () { return this.invalidateToken } }, - put_privileges: { get () { return this.putPrivileges } }, - put_role: { get () { return this.putRole } }, - put_role_mapping: { get () { return this.putRoleMapping } }, - put_user: { get () { return this.putUser } }, - query_api_keys: { get () { return this.queryApiKeys } }, - saml_authenticate: { get () { return this.samlAuthenticate } }, - saml_complete_logout: { get () { return this.samlCompleteLogout } }, - saml_invalidate: { get () { return this.samlInvalidate } }, - saml_logout: { get () { return this.samlLogout } }, - saml_prepare_authentication: { get () { return this.samlPrepareAuthentication } }, - saml_service_provider_metadata: { get () { return this.samlServiceProviderMetadata } } -}) - -module.exports = SecurityApi diff --git a/api/api/shutdown.js b/api/api/shutdown.js deleted file mode 100644 index 096b1dfdf..000000000 --- a/api/api/shutdown.js +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function ShutdownApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -ShutdownApi.prototype.deleteNode = function shutdownDeleteNodeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.node_id == null && params.nodeId == null) { - const err = new this[kConfigurationError]('Missing required parameter: node_id or nodeId') - return handleError(err, callback) - } - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'shutdown' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -ShutdownApi.prototype.getNode = function shutdownGetNodeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'shutdown' - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'shutdown' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -ShutdownApi.prototype.putNode = function shutdownPutNodeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.node_id == null && params.nodeId == null) { - const err = new this[kConfigurationError]('Missing required parameter: node_id or nodeId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'shutdown' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(ShutdownApi.prototype, { - delete_node: { get () { return this.deleteNode } }, - get_node: { get () { return this.getNode } }, - put_node: { get () { return this.putNode } } -}) - -module.exports = ShutdownApi diff --git a/api/api/slm.js b/api/api/slm.js deleted file mode 100644 index ba2b94fa5..000000000 --- a/api/api/slm.js +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function SlmApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -SlmApi.prototype.deleteLifecycle = function slmDeleteLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.policy_id == null && params.policyId == null) { - const err = new this[kConfigurationError]('Missing required parameter: policy_id or policyId') - return handleError(err, callback) - } - - let { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.executeLifecycle = function slmExecuteLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.policy_id == null && params.policyId == null) { - const err = new this[kConfigurationError]('Missing required parameter: policy_id or policyId') - return handleError(err, callback) - } - - let { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) + '/' + '_execute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.executeRetention = function slmExecuteRetentionApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_slm' + '/' + '_execute_retention' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.getLifecycle = function slmGetLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((policy_id || policyId) != null) { - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) - } else { - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'policy' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.getStats = function slmGetStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.getStatus = function slmGetStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.putLifecycle = function slmPutLifecycleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.policy_id == null && params.policyId == null) { - const err = new this[kConfigurationError]('Missing required parameter: policy_id or policyId') - return handleError(err, callback) - } - - let { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.start = function slmStartApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_slm' + '/' + 'start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SlmApi.prototype.stop = function slmStopApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_slm' + '/' + 'stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(SlmApi.prototype, { - delete_lifecycle: { get () { return this.deleteLifecycle } }, - execute_lifecycle: { get () { return this.executeLifecycle } }, - execute_retention: { get () { return this.executeRetention } }, - get_lifecycle: { get () { return this.getLifecycle } }, - get_stats: { get () { return this.getStats } }, - get_status: { get () { return this.getStatus } }, - put_lifecycle: { get () { return this.putLifecycle } } -}) - -module.exports = SlmApi diff --git a/api/api/snapshot.js b/api/api/snapshot.js deleted file mode 100644 index 3028779fd..000000000 --- a/api/api/snapshot.js +++ /dev/null @@ -1,439 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['master_timeout', 'timeout', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'wait_for_completion', 'verify', 'ignore_unavailable', 'index_details', 'include_repository', 'verbose', 'local', 'blob_count', 'concurrency', 'read_node_count', 'early_read_node_count', 'seed', 'rare_action_probability', 'max_blob_size', 'max_total_data_size', 'detailed', 'rarely_abort_writes'] -const snakeCase = { masterTimeout: 'master_timeout', errorTrace: 'error_trace', filterPath: 'filter_path', waitForCompletion: 'wait_for_completion', ignoreUnavailable: 'ignore_unavailable', indexDetails: 'index_details', includeRepository: 'include_repository', blobCount: 'blob_count', readNodeCount: 'read_node_count', earlyReadNodeCount: 'early_read_node_count', rareActionProbability: 'rare_action_probability', maxBlobSize: 'max_blob_size', maxTotalDataSize: 'max_total_data_size', rarelyAbortWrites: 'rarely_abort_writes' } - -function SnapshotApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -SnapshotApi.prototype.cleanupRepository = function snapshotCleanupRepositoryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - - let { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_cleanup' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.clone = function snapshotCloneApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - if (params.snapshot == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot') - return handleError(err, callback) - } - if (params.target_snapshot == null && params.targetSnapshot == null) { - const err = new this[kConfigurationError]('Missing required parameter: target_snapshot or targetSnapshot') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if ((params.target_snapshot != null || params.targetSnapshot != null) && (params.snapshot == null || params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: snapshot, repository') - return handleError(err, callback) - } else if (params.snapshot != null && (params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - let { method, body, repository, snapshot, targetSnapshot, target_snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) + '/' + '_clone' + '/' + encodeURIComponent(target_snapshot || targetSnapshot) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.create = function snapshotCreateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - if (params.snapshot == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params.snapshot != null && (params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - let { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.createRepository = function snapshotCreateRepositoryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.delete = function snapshotDeleteApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - if (params.snapshot == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params.snapshot != null && (params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - let { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.deleteRepository = function snapshotDeleteRepositoryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - - let { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.get = function snapshotGetApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - if (params.snapshot == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params.snapshot != null && (params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - let { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.getRepository = function snapshotGetRepositoryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((repository) != null) { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) - } else { - if (method == null) method = 'GET' - path = '/' + '_snapshot' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.repositoryAnalyze = function snapshotRepositoryAnalyzeApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - - let { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_analyze' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.restore = function snapshotRestoreApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - if (params.snapshot == null) { - const err = new this[kConfigurationError]('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params.snapshot != null && (params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - let { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) + '/' + '_restore' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.status = function snapshotStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required url components - if (params.snapshot != null && (params.repository == null)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - let { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((repository) != null && (snapshot) != null) { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) + '/' + '_status' - } else if ((repository) != null) { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_status' - } else { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + '_status' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SnapshotApi.prototype.verifyRepository = function snapshotVerifyRepositoryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.repository == null) { - const err = new this[kConfigurationError]('Missing required parameter: repository') - return handleError(err, callback) - } - - let { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_verify' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(SnapshotApi.prototype, { - cleanup_repository: { get () { return this.cleanupRepository } }, - create_repository: { get () { return this.createRepository } }, - delete_repository: { get () { return this.deleteRepository } }, - get_repository: { get () { return this.getRepository } }, - repository_analyze: { get () { return this.repositoryAnalyze } }, - verify_repository: { get () { return this.verifyRepository } } -}) - -module.exports = SnapshotApi diff --git a/api/api/sql.js b/api/api/sql.js deleted file mode 100644 index be7c56938..000000000 --- a/api/api/sql.js +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'delimiter', 'format', 'keep_alive', 'wait_for_completion_timeout'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', keepAlive: 'keep_alive', waitForCompletionTimeout: 'wait_for_completion_timeout' } - -function SqlApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -SqlApi.prototype.clearCursor = function sqlClearCursorApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_sql' + '/' + 'close' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SqlApi.prototype.deleteAsync = function sqlDeleteAsyncApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_sql' + '/' + 'async' + '/' + 'delete' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SqlApi.prototype.getAsync = function sqlGetAsyncApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_sql' + '/' + 'async' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SqlApi.prototype.getAsyncStatus = function sqlGetAsyncStatusApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_sql' + '/' + 'async' + '/' + 'status' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -SqlApi.prototype.query = function sqlQueryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_sql' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -SqlApi.prototype.translate = function sqlTranslateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_sql' + '/' + 'translate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(SqlApi.prototype, { - clear_cursor: { get () { return this.clearCursor } }, - delete_async: { get () { return this.deleteAsync } }, - get_async: { get () { return this.getAsync } }, - get_async_status: { get () { return this.getAsyncStatus } } -}) - -module.exports = SqlApi diff --git a/api/api/ssl.js b/api/api/ssl.js deleted file mode 100644 index 5ff91770c..000000000 --- a/api/api/ssl.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function SslApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -SslApi.prototype.certificates = function sslCertificatesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_ssl' + '/' + 'certificates' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = SslApi diff --git a/api/api/tasks.js b/api/api/tasks.js deleted file mode 100644 index 131279e80..000000000 --- a/api/api/tasks.js +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['nodes', 'actions', 'parent_task_id', 'wait_for_completion', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'timeout', 'detailed', 'group_by'] -const snakeCase = { parentTaskId: 'parent_task_id', waitForCompletion: 'wait_for_completion', errorTrace: 'error_trace', filterPath: 'filter_path', groupBy: 'group_by' } - -function TasksApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -TasksApi.prototype.cancel = function tasksCancelApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((task_id || taskId) != null) { - if (method == null) method = 'POST' - path = '/' + '_tasks' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_cancel' - } else { - if (method == null) method = 'POST' - path = '/' + '_tasks' + '/' + '_cancel' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -TasksApi.prototype.get = function tasksGetApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.task_id == null && params.taskId == null) { - const err = new this[kConfigurationError]('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - - let { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_tasks' + '/' + encodeURIComponent(task_id || taskId) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -TasksApi.prototype.list = function tasksListApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = TasksApi diff --git a/api/api/terms_enum.js b/api/api/terms_enum.js deleted file mode 100644 index c4df48d83..000000000 --- a/api/api/terms_enum.js +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path' } - -function termsEnumApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_terms_enum' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = termsEnumApi diff --git a/api/api/termvectors.js b/api/api/termvectors.js deleted file mode 100644 index db851d4ee..000000000 --- a/api/api/termvectors.js +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['term_statistics', 'field_statistics', 'fields', 'offsets', 'positions', 'payloads', 'preference', 'routing', 'realtime', 'version', 'version_type', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { termStatistics: 'term_statistics', fieldStatistics: 'field_statistics', versionType: 'version_type', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function termvectorsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_termvectors' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_termvectors' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = termvectorsApi diff --git a/api/api/text_structure.js b/api/api/text_structure.js deleted file mode 100644 index 1d29691c4..000000000 --- a/api/api/text_structure.js +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['lines_to_sample', 'line_merge_size_limit', 'timeout', 'charset', 'format', 'has_header_row', 'column_names', 'delimiter', 'quote', 'should_trim_fields', 'grok_pattern', 'timestamp_field', 'timestamp_format', 'explain', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { linesToSample: 'lines_to_sample', lineMergeSizeLimit: 'line_merge_size_limit', hasHeaderRow: 'has_header_row', columnNames: 'column_names', shouldTrimFields: 'should_trim_fields', grokPattern: 'grok_pattern', timestampField: 'timestamp_field', timestampFormat: 'timestamp_format', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function TextStructureApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -TextStructureApi.prototype.findStructure = function textStructureFindStructureApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_text_structure' + '/' + 'find_structure' - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(TextStructureApi.prototype, { - find_structure: { get () { return this.findStructure } } -}) - -module.exports = TextStructureApi diff --git a/api/api/transform.js b/api/api/transform.js deleted file mode 100644 index 613f7a66f..000000000 --- a/api/api/transform.js +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['force', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'from', 'size', 'allow_no_match', 'exclude_generated', 'defer_validation', 'timeout', 'wait_for_completion', 'wait_for_checkpoint'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', allowNoMatch: 'allow_no_match', excludeGenerated: 'exclude_generated', deferValidation: 'defer_validation', waitForCompletion: 'wait_for_completion', waitForCheckpoint: 'wait_for_checkpoint' } - -function TransformApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -TransformApi.prototype.deleteTransform = function transformDeleteTransformApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.transform_id == null && params.transformId == null) { - const err = new this[kConfigurationError]('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -TransformApi.prototype.getTransform = function transformGetTransformApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((transform_id || transformId) != null) { - if (method == null) method = 'GET' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) - } else { - if (method == null) method = 'GET' - path = '/' + '_transform' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -TransformApi.prototype.getTransformStats = function transformGetTransformStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.transform_id == null && params.transformId == null) { - const err = new this[kConfigurationError]('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -TransformApi.prototype.previewTransform = function transformPreviewTransformApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + '_preview' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -TransformApi.prototype.putTransform = function transformPutTransformApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.transform_id == null && params.transformId == null) { - const err = new this[kConfigurationError]('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -TransformApi.prototype.startTransform = function transformStartTransformApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.transform_id == null && params.transformId == null) { - const err = new this[kConfigurationError]('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -TransformApi.prototype.stopTransform = function transformStopTransformApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.transform_id == null && params.transformId == null) { - const err = new this[kConfigurationError]('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -TransformApi.prototype.updateTransform = function transformUpdateTransformApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.transform_id == null && params.transformId == null) { - const err = new this[kConfigurationError]('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(TransformApi.prototype, { - delete_transform: { get () { return this.deleteTransform } }, - get_transform: { get () { return this.getTransform } }, - get_transform_stats: { get () { return this.getTransformStats } }, - preview_transform: { get () { return this.previewTransform } }, - put_transform: { get () { return this.putTransform } }, - start_transform: { get () { return this.startTransform } }, - stop_transform: { get () { return this.stopTransform } }, - update_transform: { get () { return this.updateTransform } } -}) - -module.exports = TransformApi diff --git a/api/api/update.js b/api/api/update.js deleted file mode 100644 index f6eb8f34d..000000000 --- a/api/api/update.js +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['wait_for_active_shards', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'lang', 'refresh', 'retry_on_conflict', 'routing', 'timeout', 'if_seq_no', 'if_primary_term', 'require_alias', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { waitForActiveShards: 'wait_for_active_shards', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', retryOnConflict: 'retry_on_conflict', ifSeqNo: 'if_seq_no', ifPrimaryTerm: 'if_primary_term', requireAlias: 'require_alias', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function updateApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - if (params.body == null) { - const err = new this[kConfigurationError]('Missing required parameter: body') - return handleError(err, callback) - } - - let { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) + '/' + '_update' - } else { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_update' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = updateApi diff --git a/api/api/update_by_query.js b/api/api/update_by_query.js deleted file mode 100644 index cdfac8b52..000000000 --- a/api/api/update_by_query.js +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['analyzer', 'analyze_wildcard', 'default_operator', 'df', 'from', 'ignore_unavailable', 'allow_no_indices', 'conflicts', 'expand_wildcards', 'lenient', 'pipeline', 'preference', 'q', 'routing', 'scroll', 'search_type', 'search_timeout', 'max_docs', 'sort', '_source', '_source_excludes', '_source_exclude', '_source_includes', '_source_include', 'terminate_after', 'stats', 'version', 'version_type', 'request_cache', 'refresh', 'timeout', 'wait_for_active_shards', 'scroll_size', 'wait_for_completion', 'requests_per_second', 'slices', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { analyzeWildcard: 'analyze_wildcard', defaultOperator: 'default_operator', ignoreUnavailable: 'ignore_unavailable', allowNoIndices: 'allow_no_indices', expandWildcards: 'expand_wildcards', searchType: 'search_type', searchTimeout: 'search_timeout', maxDocs: 'max_docs', _sourceExcludes: '_source_excludes', _sourceExclude: '_source_exclude', _sourceIncludes: '_source_includes', _sourceInclude: '_source_include', terminateAfter: 'terminate_after', versionType: 'version_type', requestCache: 'request_cache', waitForActiveShards: 'wait_for_active_shards', scrollSize: 'scroll_size', waitForCompletion: 'wait_for_completion', requestsPerSecond: 'requests_per_second', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function updateByQueryApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.index == null) { - const err = new this[kConfigurationError]('Missing required parameter: index') - return handleError(err, callback) - } - - let { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_update_by_query' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = updateByQueryApi diff --git a/api/api/update_by_query_rethrottle.js b/api/api/update_by_query_rethrottle.js deleted file mode 100644 index 2a9ebf90c..000000000 --- a/api/api/update_by_query_rethrottle.js +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['requests_per_second', 'pretty', 'human', 'error_trace', 'source', 'filter_path'] -const snakeCase = { requestsPerSecond: 'requests_per_second', errorTrace: 'error_trace', filterPath: 'filter_path' } - -function updateByQueryRethrottleApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.task_id == null && params.taskId == null) { - const err = new this[kConfigurationError]('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - if (params.requests_per_second == null && params.requestsPerSecond == null) { - const err = new this[kConfigurationError]('Missing required parameter: requests_per_second or requestsPerSecond') - return handleError(err, callback) - } - - let { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_update_by_query' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_rethrottle' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = updateByQueryRethrottleApi diff --git a/api/api/watcher.js b/api/api/watcher.js deleted file mode 100644 index a426faa4a..000000000 --- a/api/api/watcher.js +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['pretty', 'human', 'error_trace', 'source', 'filter_path', 'debug', 'active', 'version', 'if_seq_no', 'if_primary_term', 'metric', 'emit_stacktraces'] -const snakeCase = { errorTrace: 'error_trace', filterPath: 'filter_path', ifSeqNo: 'if_seq_no', ifPrimaryTerm: 'if_primary_term', emitStacktraces: 'emit_stacktraces' } - -function WatcherApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -WatcherApi.prototype.ackWatch = function watcherAckWatchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.watch_id == null && params.watchId == null) { - const err = new this[kConfigurationError]('Missing required parameter: watch_id or watchId') - return handleError(err, callback) - } - - // check required url components - if ((params.action_id != null || params.actionId != null) && ((params.watch_id == null && params.watchId == null))) { - const err = new this[kConfigurationError]('Missing required parameter of the url: watch_id') - return handleError(err, callback) - } - - let { method, body, watchId, watch_id, actionId, action_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((watch_id || watchId) != null && (action_id || actionId) != null) { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_ack' + '/' + encodeURIComponent(action_id || actionId) - } else { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_ack' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.activateWatch = function watcherActivateWatchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.watch_id == null && params.watchId == null) { - const err = new this[kConfigurationError]('Missing required parameter: watch_id or watchId') - return handleError(err, callback) - } - - let { method, body, watchId, watch_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_activate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.deactivateWatch = function watcherDeactivateWatchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.watch_id == null && params.watchId == null) { - const err = new this[kConfigurationError]('Missing required parameter: watch_id or watchId') - return handleError(err, callback) - } - - let { method, body, watchId, watch_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_deactivate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.deleteWatch = function watcherDeleteWatchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'DELETE' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.executeWatch = function watcherExecuteWatchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((id) != null) { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) + '/' + '_execute' - } else { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + '_execute' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.getWatch = function watcherGetWatchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.putWatch = function watcherPutWatchApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - // check required parameters - if (params.id == null) { - const err = new this[kConfigurationError]('Missing required parameter: id') - return handleError(err, callback) - } - - let { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.queryWatches = function watcherQueryWatchesApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_watcher' + '/' + '_query' + '/' + 'watches' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.start = function watcherStartApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_watcher' + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.stats = function watcherStatsApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_watcher' + '/' + 'stats' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_watcher' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -WatcherApi.prototype.stop = function watcherStopApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'POST' - path = '/' + '_watcher' + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - return this.transport.request(request, options, callback) -} - -Object.defineProperties(WatcherApi.prototype, { - ack_watch: { get () { return this.ackWatch } }, - activate_watch: { get () { return this.activateWatch } }, - deactivate_watch: { get () { return this.deactivateWatch } }, - delete_watch: { get () { return this.deleteWatch } }, - execute_watch: { get () { return this.executeWatch } }, - get_watch: { get () { return this.getWatch } }, - put_watch: { get () { return this.putWatch } }, - query_watches: { get () { return this.queryWatches } } -}) - -module.exports = WatcherApi diff --git a/api/api/xpack.js b/api/api/xpack.js deleted file mode 100644 index 39a179d84..000000000 --- a/api/api/xpack.js +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -const acceptedQuerystring = ['categories', 'accept_enterprise', 'pretty', 'human', 'error_trace', 'source', 'filter_path', 'master_timeout'] -const snakeCase = { acceptEnterprise: 'accept_enterprise', errorTrace: 'error_trace', filterPath: 'filter_path', masterTimeout: 'master_timeout' } - -function XpackApi (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError -} - -XpackApi.prototype.info = function xpackInfoApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_xpack' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -XpackApi.prototype.usage = function xpackUsageApi (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - let { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - if (method == null) method = 'GET' - path = '/' + '_xpack' + '/' + 'usage' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - return this.transport.request(request, options, callback) -} - -module.exports = XpackApi diff --git a/api/index.js b/api/index.js deleted file mode 100644 index 2b37a5bfb..000000000 --- a/api/index.js +++ /dev/null @@ -1,508 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const AsyncSearchApi = require('./api/async_search') -const AutoscalingApi = require('./api/autoscaling') -const bulkApi = require('./api/bulk') -const CatApi = require('./api/cat') -const CcrApi = require('./api/ccr') -const clearScrollApi = require('./api/clear_scroll') -const closePointInTimeApi = require('./api/close_point_in_time') -const ClusterApi = require('./api/cluster') -const countApi = require('./api/count') -const createApi = require('./api/create') -const DanglingIndicesApi = require('./api/dangling_indices') -const deleteApi = require('./api/delete') -const deleteByQueryApi = require('./api/delete_by_query') -const deleteByQueryRethrottleApi = require('./api/delete_by_query_rethrottle') -const deleteScriptApi = require('./api/delete_script') -const EnrichApi = require('./api/enrich') -const EqlApi = require('./api/eql') -const existsApi = require('./api/exists') -const existsSourceApi = require('./api/exists_source') -const explainApi = require('./api/explain') -const FeaturesApi = require('./api/features') -const fieldCapsApi = require('./api/field_caps') -const FleetApi = require('./api/fleet') -const getApi = require('./api/get') -const getScriptApi = require('./api/get_script') -const getScriptContextApi = require('./api/get_script_context') -const getScriptLanguagesApi = require('./api/get_script_languages') -const getSourceApi = require('./api/get_source') -const GraphApi = require('./api/graph') -const IlmApi = require('./api/ilm') -const indexApi = require('./api/index') -const IndicesApi = require('./api/indices') -const infoApi = require('./api/info') -const IngestApi = require('./api/ingest') -const LicenseApi = require('./api/license') -const LogstashApi = require('./api/logstash') -const mgetApi = require('./api/mget') -const MigrationApi = require('./api/migration') -const MlApi = require('./api/ml') -const MonitoringApi = require('./api/monitoring') -const msearchApi = require('./api/msearch') -const msearchTemplateApi = require('./api/msearch_template') -const mtermvectorsApi = require('./api/mtermvectors') -const NodesApi = require('./api/nodes') -const openPointInTimeApi = require('./api/open_point_in_time') -const pingApi = require('./api/ping') -const putScriptApi = require('./api/put_script') -const rankEvalApi = require('./api/rank_eval') -const reindexApi = require('./api/reindex') -const reindexRethrottleApi = require('./api/reindex_rethrottle') -const renderSearchTemplateApi = require('./api/render_search_template') -const RollupApi = require('./api/rollup') -const scriptsPainlessExecuteApi = require('./api/scripts_painless_execute') -const scrollApi = require('./api/scroll') -const searchApi = require('./api/search') -const searchMvtApi = require('./api/search_mvt') -const searchShardsApi = require('./api/search_shards') -const searchTemplateApi = require('./api/search_template') -const SearchableSnapshotsApi = require('./api/searchable_snapshots') -const SecurityApi = require('./api/security') -const ShutdownApi = require('./api/shutdown') -const SlmApi = require('./api/slm') -const SnapshotApi = require('./api/snapshot') -const SqlApi = require('./api/sql') -const SslApi = require('./api/ssl') -const TasksApi = require('./api/tasks') -const termsEnumApi = require('./api/terms_enum') -const termvectorsApi = require('./api/termvectors') -const TextStructureApi = require('./api/text_structure') -const TransformApi = require('./api/transform') -const updateApi = require('./api/update') -const updateByQueryApi = require('./api/update_by_query') -const updateByQueryRethrottleApi = require('./api/update_by_query_rethrottle') -const WatcherApi = require('./api/watcher') -const XpackApi = require('./api/xpack') - -const { kConfigurationError } = require('./utils') -const kAsyncSearch = Symbol('AsyncSearch') -const kAutoscaling = Symbol('Autoscaling') -const kCat = Symbol('Cat') -const kCcr = Symbol('Ccr') -const kCluster = Symbol('Cluster') -const kDanglingIndices = Symbol('DanglingIndices') -const kEnrich = Symbol('Enrich') -const kEql = Symbol('Eql') -const kFeatures = Symbol('Features') -const kFleet = Symbol('Fleet') -const kGraph = Symbol('Graph') -const kIlm = Symbol('Ilm') -const kIndices = Symbol('Indices') -const kIngest = Symbol('Ingest') -const kLicense = Symbol('License') -const kLogstash = Symbol('Logstash') -const kMigration = Symbol('Migration') -const kMl = Symbol('Ml') -const kMonitoring = Symbol('Monitoring') -const kNodes = Symbol('Nodes') -const kRollup = Symbol('Rollup') -const kSearchableSnapshots = Symbol('SearchableSnapshots') -const kSecurity = Symbol('Security') -const kShutdown = Symbol('Shutdown') -const kSlm = Symbol('Slm') -const kSnapshot = Symbol('Snapshot') -const kSql = Symbol('Sql') -const kSsl = Symbol('Ssl') -const kTasks = Symbol('Tasks') -const kTextStructure = Symbol('TextStructure') -const kTransform = Symbol('Transform') -const kWatcher = Symbol('Watcher') -const kXpack = Symbol('Xpack') - -function ESAPI (opts) { - this[kConfigurationError] = opts.ConfigurationError - this[kAsyncSearch] = null - this[kAutoscaling] = null - this[kCat] = null - this[kCcr] = null - this[kCluster] = null - this[kDanglingIndices] = null - this[kEnrich] = null - this[kEql] = null - this[kFeatures] = null - this[kFleet] = null - this[kGraph] = null - this[kIlm] = null - this[kIndices] = null - this[kIngest] = null - this[kLicense] = null - this[kLogstash] = null - this[kMigration] = null - this[kMl] = null - this[kMonitoring] = null - this[kNodes] = null - this[kRollup] = null - this[kSearchableSnapshots] = null - this[kSecurity] = null - this[kShutdown] = null - this[kSlm] = null - this[kSnapshot] = null - this[kSql] = null - this[kSsl] = null - this[kTasks] = null - this[kTextStructure] = null - this[kTransform] = null - this[kWatcher] = null - this[kXpack] = null -} - -ESAPI.prototype.bulk = bulkApi -ESAPI.prototype.clearScroll = clearScrollApi -ESAPI.prototype.closePointInTime = closePointInTimeApi -ESAPI.prototype.count = countApi -ESAPI.prototype.create = createApi -ESAPI.prototype.delete = deleteApi -ESAPI.prototype.deleteByQuery = deleteByQueryApi -ESAPI.prototype.deleteByQueryRethrottle = deleteByQueryRethrottleApi -ESAPI.prototype.deleteScript = deleteScriptApi -ESAPI.prototype.exists = existsApi -ESAPI.prototype.existsSource = existsSourceApi -ESAPI.prototype.explain = explainApi -ESAPI.prototype.fieldCaps = fieldCapsApi -ESAPI.prototype.get = getApi -ESAPI.prototype.getScript = getScriptApi -ESAPI.prototype.getScriptContext = getScriptContextApi -ESAPI.prototype.getScriptLanguages = getScriptLanguagesApi -ESAPI.prototype.getSource = getSourceApi -ESAPI.prototype.index = indexApi -ESAPI.prototype.info = infoApi -ESAPI.prototype.mget = mgetApi -ESAPI.prototype.msearch = msearchApi -ESAPI.prototype.msearchTemplate = msearchTemplateApi -ESAPI.prototype.mtermvectors = mtermvectorsApi -ESAPI.prototype.openPointInTime = openPointInTimeApi -ESAPI.prototype.ping = pingApi -ESAPI.prototype.putScript = putScriptApi -ESAPI.prototype.rankEval = rankEvalApi -ESAPI.prototype.reindex = reindexApi -ESAPI.prototype.reindexRethrottle = reindexRethrottleApi -ESAPI.prototype.renderSearchTemplate = renderSearchTemplateApi -ESAPI.prototype.scriptsPainlessExecute = scriptsPainlessExecuteApi -ESAPI.prototype.scroll = scrollApi -ESAPI.prototype.search = searchApi -ESAPI.prototype.searchMvt = searchMvtApi -ESAPI.prototype.searchShards = searchShardsApi -ESAPI.prototype.searchTemplate = searchTemplateApi -ESAPI.prototype.termsEnum = termsEnumApi -ESAPI.prototype.termvectors = termvectorsApi -ESAPI.prototype.update = updateApi -ESAPI.prototype.updateByQuery = updateByQueryApi -ESAPI.prototype.updateByQueryRethrottle = updateByQueryRethrottleApi - -Object.defineProperties(ESAPI.prototype, { - asyncSearch: { - get () { - if (this[kAsyncSearch] === null) { - this[kAsyncSearch] = new AsyncSearchApi(this.transport, this[kConfigurationError]) - } - return this[kAsyncSearch] - } - }, - async_search: { get () { return this.asyncSearch } }, - autoscaling: { - get () { - if (this[kAutoscaling] === null) { - this[kAutoscaling] = new AutoscalingApi(this.transport, this[kConfigurationError]) - } - return this[kAutoscaling] - } - }, - cat: { - get () { - if (this[kCat] === null) { - this[kCat] = new CatApi(this.transport, this[kConfigurationError]) - } - return this[kCat] - } - }, - ccr: { - get () { - if (this[kCcr] === null) { - this[kCcr] = new CcrApi(this.transport, this[kConfigurationError]) - } - return this[kCcr] - } - }, - clear_scroll: { get () { return this.clearScroll } }, - close_point_in_time: { get () { return this.closePointInTime } }, - cluster: { - get () { - if (this[kCluster] === null) { - this[kCluster] = new ClusterApi(this.transport, this[kConfigurationError]) - } - return this[kCluster] - } - }, - danglingIndices: { - get () { - if (this[kDanglingIndices] === null) { - this[kDanglingIndices] = new DanglingIndicesApi(this.transport, this[kConfigurationError]) - } - return this[kDanglingIndices] - } - }, - dangling_indices: { get () { return this.danglingIndices } }, - delete_by_query: { get () { return this.deleteByQuery } }, - delete_by_query_rethrottle: { get () { return this.deleteByQueryRethrottle } }, - delete_script: { get () { return this.deleteScript } }, - enrich: { - get () { - if (this[kEnrich] === null) { - this[kEnrich] = new EnrichApi(this.transport, this[kConfigurationError]) - } - return this[kEnrich] - } - }, - eql: { - get () { - if (this[kEql] === null) { - this[kEql] = new EqlApi(this.transport, this[kConfigurationError]) - } - return this[kEql] - } - }, - exists_source: { get () { return this.existsSource } }, - features: { - get () { - if (this[kFeatures] === null) { - this[kFeatures] = new FeaturesApi(this.transport, this[kConfigurationError]) - } - return this[kFeatures] - } - }, - field_caps: { get () { return this.fieldCaps } }, - fleet: { - get () { - if (this[kFleet] === null) { - this[kFleet] = new FleetApi(this.transport, this[kConfigurationError]) - } - return this[kFleet] - } - }, - get_script: { get () { return this.getScript } }, - get_script_context: { get () { return this.getScriptContext } }, - get_script_languages: { get () { return this.getScriptLanguages } }, - get_source: { get () { return this.getSource } }, - graph: { - get () { - if (this[kGraph] === null) { - this[kGraph] = new GraphApi(this.transport, this[kConfigurationError]) - } - return this[kGraph] - } - }, - ilm: { - get () { - if (this[kIlm] === null) { - this[kIlm] = new IlmApi(this.transport, this[kConfigurationError]) - } - return this[kIlm] - } - }, - indices: { - get () { - if (this[kIndices] === null) { - this[kIndices] = new IndicesApi(this.transport, this[kConfigurationError]) - } - return this[kIndices] - } - }, - ingest: { - get () { - if (this[kIngest] === null) { - this[kIngest] = new IngestApi(this.transport, this[kConfigurationError]) - } - return this[kIngest] - } - }, - license: { - get () { - if (this[kLicense] === null) { - this[kLicense] = new LicenseApi(this.transport, this[kConfigurationError]) - } - return this[kLicense] - } - }, - logstash: { - get () { - if (this[kLogstash] === null) { - this[kLogstash] = new LogstashApi(this.transport, this[kConfigurationError]) - } - return this[kLogstash] - } - }, - migration: { - get () { - if (this[kMigration] === null) { - this[kMigration] = new MigrationApi(this.transport, this[kConfigurationError]) - } - return this[kMigration] - } - }, - ml: { - get () { - if (this[kMl] === null) { - this[kMl] = new MlApi(this.transport, this[kConfigurationError]) - } - return this[kMl] - } - }, - monitoring: { - get () { - if (this[kMonitoring] === null) { - this[kMonitoring] = new MonitoringApi(this.transport, this[kConfigurationError]) - } - return this[kMonitoring] - } - }, - msearch_template: { get () { return this.msearchTemplate } }, - nodes: { - get () { - if (this[kNodes] === null) { - this[kNodes] = new NodesApi(this.transport, this[kConfigurationError]) - } - return this[kNodes] - } - }, - open_point_in_time: { get () { return this.openPointInTime } }, - put_script: { get () { return this.putScript } }, - rank_eval: { get () { return this.rankEval } }, - reindex_rethrottle: { get () { return this.reindexRethrottle } }, - render_search_template: { get () { return this.renderSearchTemplate } }, - rollup: { - get () { - if (this[kRollup] === null) { - this[kRollup] = new RollupApi(this.transport, this[kConfigurationError]) - } - return this[kRollup] - } - }, - scripts_painless_execute: { get () { return this.scriptsPainlessExecute } }, - search_mvt: { get () { return this.searchMvt } }, - search_shards: { get () { return this.searchShards } }, - search_template: { get () { return this.searchTemplate } }, - searchableSnapshots: { - get () { - if (this[kSearchableSnapshots] === null) { - this[kSearchableSnapshots] = new SearchableSnapshotsApi(this.transport, this[kConfigurationError]) - } - return this[kSearchableSnapshots] - } - }, - searchable_snapshots: { get () { return this.searchableSnapshots } }, - security: { - get () { - if (this[kSecurity] === null) { - this[kSecurity] = new SecurityApi(this.transport, this[kConfigurationError]) - } - return this[kSecurity] - } - }, - shutdown: { - get () { - if (this[kShutdown] === null) { - this[kShutdown] = new ShutdownApi(this.transport, this[kConfigurationError]) - } - return this[kShutdown] - } - }, - slm: { - get () { - if (this[kSlm] === null) { - this[kSlm] = new SlmApi(this.transport, this[kConfigurationError]) - } - return this[kSlm] - } - }, - snapshot: { - get () { - if (this[kSnapshot] === null) { - this[kSnapshot] = new SnapshotApi(this.transport, this[kConfigurationError]) - } - return this[kSnapshot] - } - }, - sql: { - get () { - if (this[kSql] === null) { - this[kSql] = new SqlApi(this.transport, this[kConfigurationError]) - } - return this[kSql] - } - }, - ssl: { - get () { - if (this[kSsl] === null) { - this[kSsl] = new SslApi(this.transport, this[kConfigurationError]) - } - return this[kSsl] - } - }, - tasks: { - get () { - if (this[kTasks] === null) { - this[kTasks] = new TasksApi(this.transport, this[kConfigurationError]) - } - return this[kTasks] - } - }, - terms_enum: { get () { return this.termsEnum } }, - textStructure: { - get () { - if (this[kTextStructure] === null) { - this[kTextStructure] = new TextStructureApi(this.transport, this[kConfigurationError]) - } - return this[kTextStructure] - } - }, - text_structure: { get () { return this.textStructure } }, - transform: { - get () { - if (this[kTransform] === null) { - this[kTransform] = new TransformApi(this.transport, this[kConfigurationError]) - } - return this[kTransform] - } - }, - update_by_query: { get () { return this.updateByQuery } }, - update_by_query_rethrottle: { get () { return this.updateByQueryRethrottle } }, - watcher: { - get () { - if (this[kWatcher] === null) { - this[kWatcher] = new WatcherApi(this.transport, this[kConfigurationError]) - } - return this[kWatcher] - } - }, - xpack: { - get () { - if (this[kXpack] === null) { - this[kXpack] = new XpackApi(this.transport, this[kConfigurationError]) - } - return this[kXpack] - } - } -}) - -module.exports = ESAPI diff --git a/api/kibana.d.ts b/api/kibana.d.ts deleted file mode 100644 index a80431ec2..000000000 --- a/api/kibana.d.ts +++ /dev/null @@ -1,545 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/// - -import { - ClientOptions, - ConnectionPool, - Serializer, - Transport, - errors, - RequestEvent, - ResurrectEvent, - ApiError -} from '../index' -import Helpers from '../lib/Helpers' -import { - ApiResponse, - TransportRequestPromise, - TransportRequestParams, - TransportRequestOptions -} from '../lib/Transport' -import * as T from './types' - -/** - * We are still working on this type, it will arrive soon. - * If it's critical for you, please open an issue. - * https://github.com/elastic/elasticsearch-js - */ -type TODO = Record - -// Extend API -interface ClientExtendsCallbackOptions { - ConfigurationError: errors.ConfigurationError, - makeRequest(params: TransportRequestParams, options?: TransportRequestOptions): Promise | void; - result: { - body: null, - statusCode: null, - headers: null, - warnings: null - } -} -declare type extendsCallback = (options: ClientExtendsCallbackOptions) => any; -// /Extend API - -interface KibanaClient { - connectionPool: ConnectionPool - transport: Transport - serializer: Serializer - extend(method: string, fn: extendsCallback): void - extend(method: string, opts: { force: boolean }, fn: extendsCallback): void; - helpers: Helpers - child(opts?: ClientOptions): KibanaClient - close(): Promise; - emit(event: string | symbol, ...args: any[]): boolean; - on(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - once(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - off(event: string | symbol, listener: (...args: any[]) => void): this; - asyncSearch: { - delete(params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - status(params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - submit(params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - } - autoscaling: { - deleteAutoscalingPolicy(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingCapacity(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingPolicy(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putAutoscalingPolicy(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - bulk(params: T.BulkRequest, options?: TransportRequestOptions): TransportRequestPromise> - cat: { - aliases(params?: T.CatAliasesRequest, options?: TransportRequestOptions): TransportRequestPromise> - allocation(params?: T.CatAllocationRequest, options?: TransportRequestOptions): TransportRequestPromise> - count(params?: T.CatCountRequest, options?: TransportRequestOptions): TransportRequestPromise> - fielddata(params?: T.CatFielddataRequest, options?: TransportRequestOptions): TransportRequestPromise> - health(params?: T.CatHealthRequest, options?: TransportRequestOptions): TransportRequestPromise> - help(params?: T.CatHelpRequest, options?: TransportRequestOptions): TransportRequestPromise> - indices(params?: T.CatIndicesRequest, options?: TransportRequestOptions): TransportRequestPromise> - master(params?: T.CatMasterRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlDataFrameAnalytics(params?: T.CatDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlDatafeeds(params?: T.CatDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlJobs(params?: T.CatJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlTrainedModels(params?: T.CatTrainedModelsRequest, options?: TransportRequestOptions): TransportRequestPromise> - nodeattrs(params?: T.CatNodeAttributesRequest, options?: TransportRequestOptions): TransportRequestPromise> - nodes(params?: T.CatNodesRequest, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks(params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - plugins(params?: T.CatPluginsRequest, options?: TransportRequestOptions): TransportRequestPromise> - recovery(params?: T.CatRecoveryRequest, options?: TransportRequestOptions): TransportRequestPromise> - repositories(params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> - segments(params?: T.CatSegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> - shards(params?: T.CatShardsRequest, options?: TransportRequestOptions): TransportRequestPromise> - snapshots(params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> - tasks(params?: T.CatTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - templates(params?: T.CatTemplatesRequest, options?: TransportRequestOptions): TransportRequestPromise> - threadPool(params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): TransportRequestPromise> - transforms(params?: T.CatTransformsRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - ccr: { - deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - follow(params: T.CcrCreateFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - followInfo(params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - followStats(params: T.CcrFollowIndexStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - forgetFollower(params: T.CcrForgetFollowerIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAutoFollowPattern(params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseFollow(params: T.CcrPauseFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeFollow(params: T.CcrResumeFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.CcrStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfollow(params: T.CcrUnfollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - clearScroll(params?: T.ClearScrollRequest, options?: TransportRequestOptions): TransportRequestPromise> - closePointInTime(params?: T.ClosePointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> - cluster: { - allocationExplain(params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteComponentTemplate(params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteVotingConfigExclusions(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - existsComponentTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getComponentTemplate(params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - getSettings(params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - health(params?: T.ClusterHealthRequest, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks(params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - postVotingConfigExclusions(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - putSettings(params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - remoteInfo(params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - reroute(params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): TransportRequestPromise> - state(params?: T.ClusterStateRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.ClusterStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - count(params?: T.CountRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.CreateRequest, options?: TransportRequestOptions): TransportRequestPromise> - danglingIndices: { - deleteDanglingIndex(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - importDanglingIndex(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - listDanglingIndices(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - dataFrameTransformDeprecated: { - deleteTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - startTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - delete(params: T.DeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQuery(params: T.DeleteByQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQueryRethrottle(params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteScript(params: T.DeleteScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - enrich: { - deletePolicy(params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - executePolicy(params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPolicy(params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPolicy(params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.EnrichStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - eql: { - delete(params: T.EqlDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.EqlGetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - getStatus(params: T.EqlGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - search(params: T.EqlSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - } - exists(params: T.ExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsSource(params: T.ExistsSourceRequest, options?: TransportRequestOptions): TransportRequestPromise> - explain(params: T.ExplainRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - features: { - getFeatures(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - resetFeatures(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - fieldCaps(params?: T.FieldCapsRequest, options?: TransportRequestOptions): TransportRequestPromise> - fleet: { - globalCheckpoints(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - get(params: T.GetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - getScript(params: T.GetScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - getScriptContext(params?: T.GetScriptContextRequest, options?: TransportRequestOptions): TransportRequestPromise> - getScriptLanguages(params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getSource(params?: T.GetSourceRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - graph: { - explore(params: T.GraphExploreRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - ilm: { - deleteLifecycle(params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - explainLifecycle(params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - migrateToDataTiers(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - moveToStep(params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(params?: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - removePolicy(params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - retry(params: T.IlmRetryRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(params?: T.IlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(params?: T.IlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - index(params: T.IndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - indices: { - addBlock(params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): TransportRequestPromise> - analyze(params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCache(params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.IndicesCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> - close(params: T.IndicesCloseRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.IndicesCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> - createDataStream(params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - dataStreamsStats(params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.IndicesDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteAlias(params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataStream(params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTemplate(params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - diskUsage(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - exists(params: T.IndicesExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsAlias(params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsTemplate(params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsType(params: T.IndicesExistsTypeRequest, options?: TransportRequestOptions): TransportRequestPromise> - fieldUsageStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - flush(params?: T.IndicesFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> - flushSynced(params?: T.IndicesFlushSyncedRequest, options?: TransportRequestOptions): TransportRequestPromise> - forcemerge(params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): TransportRequestPromise> - freeze(params: T.IndicesFreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.IndicesGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAlias(params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDataStream(params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFieldMapping(params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getIndexTemplate(params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - getMapping(params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getSettings(params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTemplate(params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUpgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - open(params: T.IndicesOpenRequest, options?: TransportRequestOptions): TransportRequestPromise> - promoteDataStream(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putAlias(params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - putMapping(params?: T.IndicesPutMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putSettings(params?: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTemplate(params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - recovery(params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): TransportRequestPromise> - refresh(params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): TransportRequestPromise> - resolveIndex(params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - rollover(params: T.IndicesRolloverRequest, options?: TransportRequestOptions): TransportRequestPromise> - segments(params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> - shardStores(params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): TransportRequestPromise> - shrink(params: T.IndicesShrinkRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulateIndexTemplate(params?: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulateTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - split(params: T.IndicesSplitRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.IndicesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfreeze(params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateAliases(params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): TransportRequestPromise> - upgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - validateQuery(params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - info(params?: T.InfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - ingest: { - deletePipeline(params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - geoIpStats(params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline(params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - processorGrok(params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline(params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulate(params?: T.IngestSimulatePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - license: { - delete(params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params?: T.LicenseGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus(params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus(params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - post(params?: T.LicensePostRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic(params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial(params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - logstash: { - deletePipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - mget(params?: T.MgetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - migration: { - deprecations(params?: T.MigrationDeprecationInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - ml: { - closeJob(params: T.MlCloseJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendar(params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDatafeed(params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteExpiredData(params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteFilter(params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteForecast(params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob(params: T.MlDeleteJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - estimateModelMemory(params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - evaluateDataFrame(params?: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): TransportRequestPromise> - explainDataFrameAnalytics(params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - findFileStructure(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - flushJob(params: T.MlFlushJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - forecast(params: T.MlForecastJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuckets(params: T.MlGetBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendarEvents(params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendars(params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCategories(params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalytics(params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalyticsStats(params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeedStats(params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeeds(params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFilters(params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getInfluencers(params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobStats(params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(params?: T.MlGetJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getModelSnapshots(params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getOverallBuckets(params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRecords(params: T.MlGetAnomalyRecordsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModels(params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModelsStats(params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(params?: T.MlInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - openJob(params: T.MlOpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - postCalendarEvents(params?: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - previewDatafeed(params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - putCalendarJob(params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putDatafeed(params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - putFilter(params: T.MlPutFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.MlPutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModel(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - resetJob(params: T.MlResetJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - setUpgradeMode(params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> - startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - startDatafeed(params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopDatafeed(params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateDatafeed(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - validate(params?: T.MlValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - validateDetector(params?: T.MlValidateDetectorRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - monitoring: { - bulk(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - msearch(params?: T.MsearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - msearchTemplate(params?: T.MsearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - mtermvectors(params?: T.MtermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> - nodes: { - clearMeteringArchive(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getMeteringInfo(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(params?: T.NodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSecureSettings(params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.NodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(params?: T.NodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - openPointInTime(params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> - ping(params?: T.PingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putScript(params: T.PutScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - rankEval(params: T.RankEvalRequest, options?: TransportRequestOptions): TransportRequestPromise> - reindex(params?: T.ReindexRequest, options?: TransportRequestOptions): TransportRequestPromise> - reindexRethrottle(params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> - renderSearchTemplate(params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - rollup: { - deleteJob(params: T.RollupDeleteRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(params?: T.RollupGetRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupCaps(params?: T.RollupGetRollupCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.RollupCreateRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - rollup(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - rollupSearch(params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - startJob(params: T.RollupStartRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopJob(params: T.RollupStopRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - scriptsPainlessExecute(params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - scroll(params?: T.ScrollRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - search(params?: T.SearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - searchShards(params?: T.SearchShardsRequest, options?: TransportRequestOptions): TransportRequestPromise> - searchTemplate(params?: T.SearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - searchableSnapshots: { - cacheStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - clearCache(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - mount(params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): TransportRequestPromise> - repositoryStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - security: { - authenticate(params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): TransportRequestPromise> - changePassword(params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearApiKeyCache(params?: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRoles(params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): TransportRequestPromise> - createApiKey(params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - createServiceToken(params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePrivileges(params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRole(params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteUser(params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - disableUser(params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - enableUser(params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - getApiKey(params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuiltinPrivileges(params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPrivileges(params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRole(params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRoleMapping(params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getServiceAccounts(params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getToken(params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUser(params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUserPrivileges(params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - grantApiKey(params?: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - hasPrivileges(params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateApiKey(params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateToken(params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPrivileges(params?: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRole(params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRoleMapping(params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - samlAuthenticate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlCompleteLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlInvalidate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlPrepareAuthentication(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlServiceProviderMetadata(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - shutdown: { - deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - } - slm: { - deleteLifecycle(params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeLifecycle(params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeRetention(params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStats(params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(params?: T.SlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(params?: T.SlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - snapshot: { - cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.SnapshotCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.SnapshotCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> - createRepository(params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRepository(params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.SnapshotGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRepository(params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - repositoryAnalyze(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - restore(params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> - status(params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - sql: { - clearCursor(params?: T.SqlClearCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAsyncStatus(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - query(params?: T.SqlQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - translate(params?: T.SqlTranslateRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - ssl: { - certificates(params?: T.SslGetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - tasks: { - cancel(params?: T.TaskCancelRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.TaskGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - list(params?: T.TaskListRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - termsEnum(params: T.TermsEnumRequest, options?: TransportRequestOptions): TransportRequestPromise> - termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> - textStructure: { - findStructure(params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - transform: { - deleteTransform(params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransform(params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats(params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform(params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - putTransform(params: T.TransformPutTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - startTransform(params: T.TransformStartTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform(params: T.TransformStopTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform(params?: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - update(params: T.UpdateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - updateByQuery(params: T.UpdateByQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateByQueryRethrottle(params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> - watcher: { - ackWatch(params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - activateWatch(params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deactivateWatch(params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteWatch(params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeWatch(params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - getWatch(params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - putWatch(params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - queryWatches(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - start(params?: T.WatcherStartRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(params?: T.WatcherStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(params?: T.WatcherStopRequest, options?: TransportRequestOptions): TransportRequestPromise> - } - xpack: { - info(params?: T.XpackInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(params?: T.XpackUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> - } -} - -export { KibanaClient } diff --git a/api/new.d.ts b/api/new.d.ts deleted file mode 100644 index c06583c75..000000000 --- a/api/new.d.ts +++ /dev/null @@ -1,1585 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/// - -import { - ClientOptions, - ConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - Connection, - Serializer, - Transport, - errors, - RequestEvent, - ResurrectEvent, - ApiError, - NodeOptions, - events -} from '../index' -import Helpers from '../lib/Helpers' -import { - ApiResponse, - TransportRequestCallback, - TransportRequestPromise, - TransportRequestParams, - TransportRequestOptions -} from '../lib/Transport' -import * as T from './types' - -/** - * We are still working on this type, it will arrive soon. - * If it's critical for you, please open an issue. - * https://github.com/elastic/elasticsearch-js - */ -type TODO = Record - -// Extend API -interface ClientExtendsCallbackOptions { - ConfigurationError: errors.ConfigurationError, - makeRequest(params: TransportRequestParams, options?: TransportRequestOptions): Promise | void; - result: { - body: null, - statusCode: null, - headers: null, - warnings: null - } -} -declare type extendsCallback = (options: ClientExtendsCallbackOptions) => any; -// /Extend API - -declare type callbackFn = (err: ApiError, result: ApiResponse) => void; -declare class Client { - constructor(opts: ClientOptions) - connectionPool: ConnectionPool - transport: Transport - serializer: Serializer - extend(method: string, fn: extendsCallback): void - extend(method: string, opts: { force: boolean }, fn: extendsCallback): void; - helpers: Helpers - child(opts?: ClientOptions): Client - close(callback: Function): void; - close(): Promise; - emit(event: string | symbol, ...args: any[]): boolean; - on(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - once(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - off(event: string | symbol, listener: (...args: any[]) => void): this; - asyncSearch: { - delete(params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.AsyncSearchDeleteRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.AsyncSearchDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - get(params: T.AsyncSearchGetRequest, callback: callbackFn, TContext>): TransportRequestCallback - get(params: T.AsyncSearchGetRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - status(params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - status(params: T.AsyncSearchStatusRequest, callback: callbackFn, TContext>): TransportRequestCallback - status(params: T.AsyncSearchStatusRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - submit(params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - submit(callback: callbackFn, TContext>): TransportRequestCallback - submit(params: T.AsyncSearchSubmitRequest, callback: callbackFn, TContext>): TransportRequestCallback - submit(params: T.AsyncSearchSubmitRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - } - autoscaling: { - deleteAutoscalingPolicy(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteAutoscalingPolicy(callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy(params: TODO, callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoscalingCapacity(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingCapacity(callback: callbackFn): TransportRequestCallback - getAutoscalingCapacity(params: TODO, callback: callbackFn): TransportRequestCallback - getAutoscalingCapacity(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingPolicy(callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy(params: TODO, callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putAutoscalingPolicy(callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy(params: TODO, callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - bulk(params: T.BulkRequest, options?: TransportRequestOptions): TransportRequestPromise> - bulk(params: T.BulkRequest, callback: callbackFn): TransportRequestCallback - bulk(params: T.BulkRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cat: { - aliases(params?: T.CatAliasesRequest, options?: TransportRequestOptions): TransportRequestPromise> - aliases(callback: callbackFn): TransportRequestCallback - aliases(params: T.CatAliasesRequest, callback: callbackFn): TransportRequestCallback - aliases(params: T.CatAliasesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - allocation(params?: T.CatAllocationRequest, options?: TransportRequestOptions): TransportRequestPromise> - allocation(callback: callbackFn): TransportRequestCallback - allocation(params: T.CatAllocationRequest, callback: callbackFn): TransportRequestCallback - allocation(params: T.CatAllocationRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - count(params?: T.CatCountRequest, options?: TransportRequestOptions): TransportRequestPromise> - count(callback: callbackFn): TransportRequestCallback - count(params: T.CatCountRequest, callback: callbackFn): TransportRequestCallback - count(params: T.CatCountRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fielddata(params?: T.CatFielddataRequest, options?: TransportRequestOptions): TransportRequestPromise> - fielddata(callback: callbackFn): TransportRequestCallback - fielddata(params: T.CatFielddataRequest, callback: callbackFn): TransportRequestCallback - fielddata(params: T.CatFielddataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - health(params?: T.CatHealthRequest, options?: TransportRequestOptions): TransportRequestPromise> - health(callback: callbackFn): TransportRequestCallback - health(params: T.CatHealthRequest, callback: callbackFn): TransportRequestCallback - health(params: T.CatHealthRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - help(params?: T.CatHelpRequest, options?: TransportRequestOptions): TransportRequestPromise> - help(callback: callbackFn): TransportRequestCallback - help(params: T.CatHelpRequest, callback: callbackFn): TransportRequestCallback - help(params: T.CatHelpRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - indices(params?: T.CatIndicesRequest, options?: TransportRequestOptions): TransportRequestPromise> - indices(callback: callbackFn): TransportRequestCallback - indices(params: T.CatIndicesRequest, callback: callbackFn): TransportRequestCallback - indices(params: T.CatIndicesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - master(params?: T.CatMasterRequest, options?: TransportRequestOptions): TransportRequestPromise> - master(callback: callbackFn): TransportRequestCallback - master(params: T.CatMasterRequest, callback: callbackFn): TransportRequestCallback - master(params: T.CatMasterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics(params?: T.CatDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics(params: T.CatDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics(params: T.CatDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlDatafeeds(params?: T.CatDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlDatafeeds(callback: callbackFn): TransportRequestCallback - mlDatafeeds(params: T.CatDatafeedsRequest, callback: callbackFn): TransportRequestCallback - mlDatafeeds(params: T.CatDatafeedsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlJobs(params?: T.CatJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlJobs(callback: callbackFn): TransportRequestCallback - mlJobs(params: T.CatJobsRequest, callback: callbackFn): TransportRequestCallback - mlJobs(params: T.CatJobsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlTrainedModels(params?: T.CatTrainedModelsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mlTrainedModels(callback: callbackFn): TransportRequestCallback - mlTrainedModels(params: T.CatTrainedModelsRequest, callback: callbackFn): TransportRequestCallback - mlTrainedModels(params: T.CatTrainedModelsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodeattrs(params?: T.CatNodeAttributesRequest, options?: TransportRequestOptions): TransportRequestPromise> - nodeattrs(callback: callbackFn): TransportRequestCallback - nodeattrs(params: T.CatNodeAttributesRequest, callback: callbackFn): TransportRequestCallback - nodeattrs(params: T.CatNodeAttributesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodes(params?: T.CatNodesRequest, options?: TransportRequestOptions): TransportRequestPromise> - nodes(callback: callbackFn): TransportRequestCallback - nodes(params: T.CatNodesRequest, callback: callbackFn): TransportRequestCallback - nodes(params: T.CatNodesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pendingTasks(params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks(callback: callbackFn): TransportRequestCallback - pendingTasks(params: T.CatPendingTasksRequest, callback: callbackFn): TransportRequestCallback - pendingTasks(params: T.CatPendingTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - plugins(params?: T.CatPluginsRequest, options?: TransportRequestOptions): TransportRequestPromise> - plugins(callback: callbackFn): TransportRequestCallback - plugins(params: T.CatPluginsRequest, callback: callbackFn): TransportRequestCallback - plugins(params: T.CatPluginsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - recovery(params?: T.CatRecoveryRequest, options?: TransportRequestOptions): TransportRequestPromise> - recovery(callback: callbackFn): TransportRequestCallback - recovery(params: T.CatRecoveryRequest, callback: callbackFn): TransportRequestCallback - recovery(params: T.CatRecoveryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositories(params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> - repositories(callback: callbackFn): TransportRequestCallback - repositories(params: T.CatRepositoriesRequest, callback: callbackFn): TransportRequestCallback - repositories(params: T.CatRepositoriesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - segments(params?: T.CatSegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> - segments(callback: callbackFn): TransportRequestCallback - segments(params: T.CatSegmentsRequest, callback: callbackFn): TransportRequestCallback - segments(params: T.CatSegmentsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shards(params?: T.CatShardsRequest, options?: TransportRequestOptions): TransportRequestPromise> - shards(callback: callbackFn): TransportRequestCallback - shards(params: T.CatShardsRequest, callback: callbackFn): TransportRequestCallback - shards(params: T.CatShardsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - snapshots(params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> - snapshots(callback: callbackFn): TransportRequestCallback - snapshots(params: T.CatSnapshotsRequest, callback: callbackFn): TransportRequestCallback - snapshots(params: T.CatSnapshotsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - tasks(params?: T.CatTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - tasks(callback: callbackFn): TransportRequestCallback - tasks(params: T.CatTasksRequest, callback: callbackFn): TransportRequestCallback - tasks(params: T.CatTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - templates(params?: T.CatTemplatesRequest, options?: TransportRequestOptions): TransportRequestPromise> - templates(callback: callbackFn): TransportRequestCallback - templates(params: T.CatTemplatesRequest, callback: callbackFn): TransportRequestCallback - templates(params: T.CatTemplatesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - threadPool(params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): TransportRequestPromise> - threadPool(callback: callbackFn): TransportRequestCallback - threadPool(params: T.CatThreadPoolRequest, callback: callbackFn): TransportRequestCallback - threadPool(params: T.CatThreadPoolRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - transforms(params?: T.CatTransformsRequest, options?: TransportRequestOptions): TransportRequestPromise> - transforms(callback: callbackFn): TransportRequestCallback - transforms(params: T.CatTransformsRequest, callback: callbackFn): TransportRequestCallback - transforms(params: T.CatTransformsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ccr: { - deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern(params: T.CcrDeleteAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow(params: T.CcrCreateFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - follow(params: T.CcrCreateFollowIndexRequest, callback: callbackFn): TransportRequestCallback - follow(params: T.CcrCreateFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followInfo(params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - followInfo(params: T.CcrFollowInfoRequest, callback: callbackFn): TransportRequestCallback - followInfo(params: T.CcrFollowInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followStats(params: T.CcrFollowIndexStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - followStats(params: T.CcrFollowIndexStatsRequest, callback: callbackFn): TransportRequestCallback - followStats(params: T.CcrFollowIndexStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forgetFollower(params: T.CcrForgetFollowerIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - forgetFollower(params: T.CcrForgetFollowerIndexRequest, callback: callbackFn): TransportRequestCallback - forgetFollower(params: T.CcrForgetFollowerIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern(params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAutoFollowPattern(callback: callbackFn): TransportRequestCallback - getAutoFollowPattern(params: T.CcrGetAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern(params: T.CcrGetAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern(params: T.CcrPauseAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseFollow(params: T.CcrPauseFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - pauseFollow(params: T.CcrPauseFollowIndexRequest, callback: callbackFn): TransportRequestCallback - pauseFollow(params: T.CcrPauseFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern(params: T.CcrPutAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern(params: T.CcrResumeAutoFollowPatternRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeFollow(params: T.CcrResumeFollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - resumeFollow(params: T.CcrResumeFollowIndexRequest, callback: callbackFn): TransportRequestCallback - resumeFollow(params: T.CcrResumeFollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.CcrStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.CcrStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.CcrStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfollow(params: T.CcrUnfollowIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfollow(params: T.CcrUnfollowIndexRequest, callback: callbackFn): TransportRequestCallback - unfollow(params: T.CcrUnfollowIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - clearScroll(params?: T.ClearScrollRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearScroll(callback: callbackFn): TransportRequestCallback - clearScroll(params: T.ClearScrollRequest, callback: callbackFn): TransportRequestCallback - clearScroll(params: T.ClearScrollRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - closePointInTime(params?: T.ClosePointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> - closePointInTime(callback: callbackFn): TransportRequestCallback - closePointInTime(params: T.ClosePointInTimeRequest, callback: callbackFn): TransportRequestCallback - closePointInTime(params: T.ClosePointInTimeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cluster: { - allocationExplain(params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): TransportRequestPromise> - allocationExplain(callback: callbackFn): TransportRequestCallback - allocationExplain(params: T.ClusterAllocationExplainRequest, callback: callbackFn): TransportRequestCallback - allocationExplain(params: T.ClusterAllocationExplainRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteComponentTemplate(params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteComponentTemplate(params: T.ClusterDeleteComponentTemplateRequest, callback: callbackFn): TransportRequestCallback - deleteComponentTemplate(params: T.ClusterDeleteComponentTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteVotingConfigExclusions(callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions(params: TODO, callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsComponentTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - existsComponentTemplate(callback: callbackFn): TransportRequestCallback - existsComponentTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - existsComponentTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getComponentTemplate(params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - getComponentTemplate(callback: callbackFn): TransportRequestCallback - getComponentTemplate(params: T.ClusterGetComponentTemplateRequest, callback: callbackFn): TransportRequestCallback - getComponentTemplate(params: T.ClusterGetComponentTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSettings(params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getSettings(callback: callbackFn): TransportRequestCallback - getSettings(params: T.ClusterGetSettingsRequest, callback: callbackFn): TransportRequestCallback - getSettings(params: T.ClusterGetSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - health(params?: T.ClusterHealthRequest, options?: TransportRequestOptions): TransportRequestPromise> - health(callback: callbackFn): TransportRequestCallback - health(params: T.ClusterHealthRequest, callback: callbackFn): TransportRequestCallback - health(params: T.ClusterHealthRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pendingTasks(params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks(callback: callbackFn): TransportRequestCallback - pendingTasks(params: T.ClusterPendingTasksRequest, callback: callbackFn): TransportRequestCallback - pendingTasks(params: T.ClusterPendingTasksRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - postVotingConfigExclusions(callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions(params: TODO, callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, callback: callbackFn): TransportRequestCallback - putComponentTemplate(params: T.ClusterPutComponentTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putSettings(params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putSettings(callback: callbackFn): TransportRequestCallback - putSettings(params: T.ClusterPutSettingsRequest, callback: callbackFn): TransportRequestCallback - putSettings(params: T.ClusterPutSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remoteInfo(params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - remoteInfo(callback: callbackFn): TransportRequestCallback - remoteInfo(params: T.ClusterRemoteInfoRequest, callback: callbackFn): TransportRequestCallback - remoteInfo(params: T.ClusterRemoteInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reroute(params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): TransportRequestPromise> - reroute(callback: callbackFn): TransportRequestCallback - reroute(params: T.ClusterRerouteRequest, callback: callbackFn): TransportRequestCallback - reroute(params: T.ClusterRerouteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - state(params?: T.ClusterStateRequest, options?: TransportRequestOptions): TransportRequestPromise> - state(callback: callbackFn): TransportRequestCallback - state(params: T.ClusterStateRequest, callback: callbackFn): TransportRequestCallback - state(params: T.ClusterStateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.ClusterStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.ClusterStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.ClusterStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - count(params?: T.CountRequest, options?: TransportRequestOptions): TransportRequestPromise> - count(callback: callbackFn): TransportRequestCallback - count(params: T.CountRequest, callback: callbackFn): TransportRequestCallback - count(params: T.CountRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create(params: T.CreateRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.CreateRequest, callback: callbackFn): TransportRequestCallback - create(params: T.CreateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - danglingIndices: { - deleteDanglingIndex(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteDanglingIndex(callback: callbackFn): TransportRequestCallback - deleteDanglingIndex(params: TODO, callback: callbackFn): TransportRequestCallback - deleteDanglingIndex(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - importDanglingIndex(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - importDanglingIndex(callback: callbackFn): TransportRequestCallback - importDanglingIndex(params: TODO, callback: callbackFn): TransportRequestCallback - importDanglingIndex(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - listDanglingIndices(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - listDanglingIndices(callback: callbackFn): TransportRequestCallback - listDanglingIndices(params: TODO, callback: callbackFn): TransportRequestCallback - listDanglingIndices(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - dataFrameTransformDeprecated: { - deleteTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteTransform(callback: callbackFn): TransportRequestCallback - deleteTransform(params: TODO, callback: callbackFn): TransportRequestCallback - deleteTransform(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getTransform(callback: callbackFn): TransportRequestCallback - getTransform(params: TODO, callback: callbackFn): TransportRequestCallback - getTransform(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransformStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats(callback: callbackFn): TransportRequestCallback - getTransformStats(params: TODO, callback: callbackFn): TransportRequestCallback - getTransformStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform(callback: callbackFn): TransportRequestCallback - previewTransform(params: TODO, callback: callbackFn): TransportRequestCallback - previewTransform(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putTransform(callback: callbackFn): TransportRequestCallback - putTransform(params: TODO, callback: callbackFn): TransportRequestCallback - putTransform(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - startTransform(callback: callbackFn): TransportRequestCallback - startTransform(params: TODO, callback: callbackFn): TransportRequestCallback - startTransform(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform(callback: callbackFn): TransportRequestCallback - stopTransform(params: TODO, callback: callbackFn): TransportRequestCallback - stopTransform(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateTransform(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform(callback: callbackFn): TransportRequestCallback - updateTransform(params: TODO, callback: callbackFn): TransportRequestCallback - updateTransform(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - delete(params: T.DeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.DeleteRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.DeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteByQuery(params: T.DeleteByQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQuery(params: T.DeleteByQueryRequest, callback: callbackFn): TransportRequestCallback - deleteByQuery(params: T.DeleteByQueryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle(params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQueryRethrottle(params: T.DeleteByQueryRethrottleRequest, callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle(params: T.DeleteByQueryRethrottleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteScript(params: T.DeleteScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteScript(params: T.DeleteScriptRequest, callback: callbackFn): TransportRequestCallback - deleteScript(params: T.DeleteScriptRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enrich: { - deletePolicy(params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePolicy(params: T.EnrichDeletePolicyRequest, callback: callbackFn): TransportRequestCallback - deletePolicy(params: T.EnrichDeletePolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executePolicy(params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - executePolicy(params: T.EnrichExecutePolicyRequest, callback: callbackFn): TransportRequestCallback - executePolicy(params: T.EnrichExecutePolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPolicy(params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPolicy(callback: callbackFn): TransportRequestCallback - getPolicy(params: T.EnrichGetPolicyRequest, callback: callbackFn): TransportRequestCallback - getPolicy(params: T.EnrichGetPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPolicy(params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPolicy(params: T.EnrichPutPolicyRequest, callback: callbackFn): TransportRequestCallback - putPolicy(params: T.EnrichPutPolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.EnrichStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.EnrichStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.EnrichStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - eql: { - delete(params: T.EqlDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.EqlDeleteRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.EqlDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.EqlGetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - get(params: T.EqlGetRequest, callback: callbackFn, TContext>): TransportRequestCallback - get(params: T.EqlGetRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - getStatus(params: T.EqlGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(params: T.EqlGetStatusRequest, callback: callbackFn): TransportRequestCallback - getStatus(params: T.EqlGetStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search(params: T.EqlSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - search(params: T.EqlSearchRequest, callback: callbackFn, TContext>): TransportRequestCallback - search(params: T.EqlSearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - } - exists(params: T.ExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - exists(params: T.ExistsRequest, callback: callbackFn): TransportRequestCallback - exists(params: T.ExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsSource(params: T.ExistsSourceRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsSource(params: T.ExistsSourceRequest, callback: callbackFn): TransportRequestCallback - existsSource(params: T.ExistsSourceRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explain(params: T.ExplainRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - explain(params: T.ExplainRequest, callback: callbackFn, TContext>): TransportRequestCallback - explain(params: T.ExplainRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - features: { - getFeatures(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getFeatures(callback: callbackFn): TransportRequestCallback - getFeatures(params: TODO, callback: callbackFn): TransportRequestCallback - getFeatures(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resetFeatures(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - resetFeatures(callback: callbackFn): TransportRequestCallback - resetFeatures(params: TODO, callback: callbackFn): TransportRequestCallback - resetFeatures(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - fieldCaps(params?: T.FieldCapsRequest, options?: TransportRequestOptions): TransportRequestPromise> - fieldCaps(callback: callbackFn): TransportRequestCallback - fieldCaps(params: T.FieldCapsRequest, callback: callbackFn): TransportRequestCallback - fieldCaps(params: T.FieldCapsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fleet: { - globalCheckpoints(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - globalCheckpoints(callback: callbackFn): TransportRequestCallback - globalCheckpoints(params: TODO, callback: callbackFn): TransportRequestCallback - globalCheckpoints(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - get(params: T.GetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - get(params: T.GetRequest, callback: callbackFn, TContext>): TransportRequestCallback - get(params: T.GetRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - getScript(params: T.GetScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - getScript(params: T.GetScriptRequest, callback: callbackFn): TransportRequestCallback - getScript(params: T.GetScriptRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptContext(params?: T.GetScriptContextRequest, options?: TransportRequestOptions): TransportRequestPromise> - getScriptContext(callback: callbackFn): TransportRequestCallback - getScriptContext(params: T.GetScriptContextRequest, callback: callbackFn): TransportRequestCallback - getScriptContext(params: T.GetScriptContextRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptLanguages(params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getScriptLanguages(callback: callbackFn): TransportRequestCallback - getScriptLanguages(params: T.GetScriptLanguagesRequest, callback: callbackFn): TransportRequestCallback - getScriptLanguages(params: T.GetScriptLanguagesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSource(params?: T.GetSourceRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - getSource(callback: callbackFn, TContext>): TransportRequestCallback - getSource(params: T.GetSourceRequest, callback: callbackFn, TContext>): TransportRequestCallback - getSource(params: T.GetSourceRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - graph: { - explore(params: T.GraphExploreRequest, options?: TransportRequestOptions): TransportRequestPromise> - explore(params: T.GraphExploreRequest, callback: callbackFn): TransportRequestCallback - explore(params: T.GraphExploreRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ilm: { - deleteLifecycle(params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle(params: T.IlmDeleteLifecycleRequest, callback: callbackFn): TransportRequestCallback - deleteLifecycle(params: T.IlmDeleteLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainLifecycle(params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - explainLifecycle(params: T.IlmExplainLifecycleRequest, callback: callbackFn): TransportRequestCallback - explainLifecycle(params: T.IlmExplainLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle(params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.IlmGetLifecycleRequest, callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.IlmGetLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus(params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(callback: callbackFn): TransportRequestCallback - getStatus(params: T.IlmGetStatusRequest, callback: callbackFn): TransportRequestCallback - getStatus(params: T.IlmGetStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migrateToDataTiers(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - migrateToDataTiers(callback: callbackFn): TransportRequestCallback - migrateToDataTiers(params: TODO, callback: callbackFn): TransportRequestCallback - migrateToDataTiers(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - moveToStep(params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): TransportRequestPromise> - moveToStep(params: T.IlmMoveToStepRequest, callback: callbackFn): TransportRequestCallback - moveToStep(params: T.IlmMoveToStepRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle(params?: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.IlmPutLifecycleRequest, callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.IlmPutLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - removePolicy(params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): TransportRequestPromise> - removePolicy(params: T.IlmRemovePolicyRequest, callback: callbackFn): TransportRequestCallback - removePolicy(params: T.IlmRemovePolicyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - retry(params: T.IlmRetryRequest, options?: TransportRequestOptions): TransportRequestPromise> - retry(params: T.IlmRetryRequest, callback: callbackFn): TransportRequestCallback - retry(params: T.IlmRetryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start(params?: T.IlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(callback: callbackFn): TransportRequestCallback - start(params: T.IlmStartRequest, callback: callbackFn): TransportRequestCallback - start(params: T.IlmStartRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop(params?: T.IlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(callback: callbackFn): TransportRequestCallback - stop(params: T.IlmStopRequest, callback: callbackFn): TransportRequestCallback - stop(params: T.IlmStopRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - index(params: T.IndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - index(params: T.IndexRequest, callback: callbackFn): TransportRequestCallback - index(params: T.IndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - indices: { - addBlock(params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): TransportRequestPromise> - addBlock(params: T.IndicesAddBlockRequest, callback: callbackFn): TransportRequestCallback - addBlock(params: T.IndicesAddBlockRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - analyze(params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): TransportRequestPromise> - analyze(callback: callbackFn): TransportRequestCallback - analyze(params: T.IndicesAnalyzeRequest, callback: callbackFn): TransportRequestCallback - analyze(params: T.IndicesAnalyzeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache(params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCache(callback: callbackFn): TransportRequestCallback - clearCache(params: T.IndicesClearCacheRequest, callback: callbackFn): TransportRequestCallback - clearCache(params: T.IndicesClearCacheRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clone(params: T.IndicesCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.IndicesCloneRequest, callback: callbackFn): TransportRequestCallback - clone(params: T.IndicesCloneRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - close(params: T.IndicesCloseRequest, options?: TransportRequestOptions): TransportRequestPromise> - close(params: T.IndicesCloseRequest, callback: callbackFn): TransportRequestCallback - close(params: T.IndicesCloseRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create(params: T.IndicesCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.IndicesCreateRequest, callback: callbackFn): TransportRequestCallback - create(params: T.IndicesCreateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createDataStream(params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - createDataStream(params: T.IndicesCreateDataStreamRequest, callback: callbackFn): TransportRequestCallback - createDataStream(params: T.IndicesCreateDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - dataStreamsStats(params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - dataStreamsStats(callback: callbackFn): TransportRequestCallback - dataStreamsStats(params: T.IndicesDataStreamsStatsRequest, callback: callbackFn): TransportRequestCallback - dataStreamsStats(params: T.IndicesDataStreamsStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete(params: T.IndicesDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.IndicesDeleteRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.IndicesDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAlias(params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteAlias(params: T.IndicesDeleteAliasRequest, callback: callbackFn): TransportRequestCallback - deleteAlias(params: T.IndicesDeleteAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDataStream(params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataStream(params: T.IndicesDeleteDataStreamRequest, callback: callbackFn): TransportRequestCallback - deleteDataStream(params: T.IndicesDeleteDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate(params: T.IndicesDeleteIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTemplate(params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTemplate(params: T.IndicesDeleteTemplateRequest, callback: callbackFn): TransportRequestCallback - deleteTemplate(params: T.IndicesDeleteTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - diskUsage(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - diskUsage(callback: callbackFn): TransportRequestCallback - diskUsage(params: TODO, callback: callbackFn): TransportRequestCallback - diskUsage(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists(params: T.IndicesExistsRequest, options?: TransportRequestOptions): TransportRequestPromise> - exists(params: T.IndicesExistsRequest, callback: callbackFn): TransportRequestCallback - exists(params: T.IndicesExistsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsAlias(params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsAlias(params: T.IndicesExistsAliasRequest, callback: callbackFn): TransportRequestCallback - existsAlias(params: T.IndicesExistsAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params: T.IndicesExistsIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsTemplate(params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsTemplate(params: T.IndicesExistsTemplateRequest, callback: callbackFn): TransportRequestCallback - existsTemplate(params: T.IndicesExistsTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsType(params: T.IndicesExistsTypeRequest, options?: TransportRequestOptions): TransportRequestPromise> - existsType(params: T.IndicesExistsTypeRequest, callback: callbackFn): TransportRequestCallback - existsType(params: T.IndicesExistsTypeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fieldUsageStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - fieldUsageStats(callback: callbackFn): TransportRequestCallback - fieldUsageStats(params: TODO, callback: callbackFn): TransportRequestCallback - fieldUsageStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flush(params?: T.IndicesFlushRequest, options?: TransportRequestOptions): TransportRequestPromise> - flush(callback: callbackFn): TransportRequestCallback - flush(params: T.IndicesFlushRequest, callback: callbackFn): TransportRequestCallback - flush(params: T.IndicesFlushRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flushSynced(params?: T.IndicesFlushSyncedRequest, options?: TransportRequestOptions): TransportRequestPromise> - flushSynced(callback: callbackFn): TransportRequestCallback - flushSynced(params: T.IndicesFlushSyncedRequest, callback: callbackFn): TransportRequestCallback - flushSynced(params: T.IndicesFlushSyncedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forcemerge(params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): TransportRequestPromise> - forcemerge(callback: callbackFn): TransportRequestCallback - forcemerge(params: T.IndicesForcemergeRequest, callback: callbackFn): TransportRequestCallback - forcemerge(params: T.IndicesForcemergeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - freeze(params: T.IndicesFreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> - freeze(params: T.IndicesFreezeRequest, callback: callbackFn): TransportRequestCallback - freeze(params: T.IndicesFreezeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.IndicesGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.IndicesGetRequest, callback: callbackFn): TransportRequestCallback - get(params: T.IndicesGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAlias(params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - getAlias(callback: callbackFn): TransportRequestCallback - getAlias(params: T.IndicesGetAliasRequest, callback: callbackFn): TransportRequestCallback - getAlias(params: T.IndicesGetAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataStream(params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDataStream(callback: callbackFn): TransportRequestCallback - getDataStream(params: T.IndicesGetDataStreamRequest, callback: callbackFn): TransportRequestCallback - getDataStream(params: T.IndicesGetDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFieldMapping(params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFieldMapping(params: T.IndicesGetFieldMappingRequest, callback: callbackFn): TransportRequestCallback - getFieldMapping(params: T.IndicesGetFieldMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getIndexTemplate(params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - getIndexTemplate(callback: callbackFn): TransportRequestCallback - getIndexTemplate(params: T.IndicesGetIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - getIndexTemplate(params: T.IndicesGetIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getMapping(params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getMapping(callback: callbackFn): TransportRequestCallback - getMapping(params: T.IndicesGetMappingRequest, callback: callbackFn): TransportRequestCallback - getMapping(params: T.IndicesGetMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSettings(params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getSettings(callback: callbackFn): TransportRequestCallback - getSettings(params: T.IndicesGetSettingsRequest, callback: callbackFn): TransportRequestCallback - getSettings(params: T.IndicesGetSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTemplate(params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTemplate(callback: callbackFn): TransportRequestCallback - getTemplate(params: T.IndicesGetTemplateRequest, callback: callbackFn): TransportRequestCallback - getTemplate(params: T.IndicesGetTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUpgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getUpgrade(callback: callbackFn): TransportRequestCallback - getUpgrade(params: TODO, callback: callbackFn): TransportRequestCallback - getUpgrade(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): TransportRequestPromise> - migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, callback: callbackFn): TransportRequestCallback - migrateToDataStream(params: T.IndicesMigrateToDataStreamRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - open(params: T.IndicesOpenRequest, options?: TransportRequestOptions): TransportRequestPromise> - open(params: T.IndicesOpenRequest, callback: callbackFn): TransportRequestCallback - open(params: T.IndicesOpenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - promoteDataStream(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - promoteDataStream(callback: callbackFn): TransportRequestCallback - promoteDataStream(params: TODO, callback: callbackFn): TransportRequestCallback - promoteDataStream(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAlias(params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - putAlias(params: T.IndicesPutAliasRequest, callback: callbackFn): TransportRequestCallback - putAlias(params: T.IndicesPutAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - putIndexTemplate(params: T.IndicesPutIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putMapping(params?: T.IndicesPutMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putMapping(callback: callbackFn): TransportRequestCallback - putMapping(params: T.IndicesPutMappingRequest, callback: callbackFn): TransportRequestCallback - putMapping(params: T.IndicesPutMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putSettings(params?: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putSettings(callback: callbackFn): TransportRequestCallback - putSettings(params: T.IndicesPutSettingsRequest, callback: callbackFn): TransportRequestCallback - putSettings(params: T.IndicesPutSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTemplate(params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTemplate(params: T.IndicesPutTemplateRequest, callback: callbackFn): TransportRequestCallback - putTemplate(params: T.IndicesPutTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - recovery(params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): TransportRequestPromise> - recovery(callback: callbackFn): TransportRequestCallback - recovery(params: T.IndicesRecoveryRequest, callback: callbackFn): TransportRequestCallback - recovery(params: T.IndicesRecoveryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - refresh(params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): TransportRequestPromise> - refresh(callback: callbackFn): TransportRequestCallback - refresh(params: T.IndicesRefreshRequest, callback: callbackFn): TransportRequestCallback - refresh(params: T.IndicesRefreshRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers(params: T.IndicesReloadSearchAnalyzersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resolveIndex(params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): TransportRequestPromise> - resolveIndex(params: T.IndicesResolveIndexRequest, callback: callbackFn): TransportRequestCallback - resolveIndex(params: T.IndicesResolveIndexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollover(params: T.IndicesRolloverRequest, options?: TransportRequestOptions): TransportRequestPromise> - rollover(params: T.IndicesRolloverRequest, callback: callbackFn): TransportRequestCallback - rollover(params: T.IndicesRolloverRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - segments(params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): TransportRequestPromise> - segments(callback: callbackFn): TransportRequestCallback - segments(params: T.IndicesSegmentsRequest, callback: callbackFn): TransportRequestCallback - segments(params: T.IndicesSegmentsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shardStores(params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): TransportRequestPromise> - shardStores(callback: callbackFn): TransportRequestCallback - shardStores(params: T.IndicesShardStoresRequest, callback: callbackFn): TransportRequestCallback - shardStores(params: T.IndicesShardStoresRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shrink(params: T.IndicesShrinkRequest, options?: TransportRequestOptions): TransportRequestPromise> - shrink(params: T.IndicesShrinkRequest, callback: callbackFn): TransportRequestCallback - shrink(params: T.IndicesShrinkRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate(params?: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulateIndexTemplate(callback: callbackFn): TransportRequestCallback - simulateIndexTemplate(params: T.IndicesSimulateIndexTemplateRequest, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate(params: T.IndicesSimulateIndexTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulateTemplate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - simulateTemplate(callback: callbackFn): TransportRequestCallback - simulateTemplate(params: TODO, callback: callbackFn): TransportRequestCallback - simulateTemplate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - split(params: T.IndicesSplitRequest, options?: TransportRequestOptions): TransportRequestPromise> - split(params: T.IndicesSplitRequest, callback: callbackFn): TransportRequestCallback - split(params: T.IndicesSplitRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.IndicesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.IndicesStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.IndicesStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfreeze(params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): TransportRequestPromise> - unfreeze(params: T.IndicesUnfreezeRequest, callback: callbackFn): TransportRequestCallback - unfreeze(params: T.IndicesUnfreezeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateAliases(params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateAliases(callback: callbackFn): TransportRequestCallback - updateAliases(params: T.IndicesUpdateAliasesRequest, callback: callbackFn): TransportRequestCallback - updateAliases(params: T.IndicesUpdateAliasesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - upgrade(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - upgrade(callback: callbackFn): TransportRequestCallback - upgrade(params: TODO, callback: callbackFn): TransportRequestCallback - upgrade(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateQuery(params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - validateQuery(callback: callbackFn): TransportRequestCallback - validateQuery(params: T.IndicesValidateQueryRequest, callback: callbackFn): TransportRequestCallback - validateQuery(params: T.IndicesValidateQueryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - info(params?: T.InfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.InfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.InfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ingest: { - deletePipeline(params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePipeline(params: T.IngestDeletePipelineRequest, callback: callbackFn): TransportRequestCallback - deletePipeline(params: T.IngestDeletePipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - geoIpStats(params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - geoIpStats(callback: callbackFn): TransportRequestCallback - geoIpStats(params: T.IngestGeoIpStatsRequest, callback: callbackFn): TransportRequestCallback - geoIpStats(params: T.IngestGeoIpStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPipeline(params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline(callback: callbackFn): TransportRequestCallback - getPipeline(params: T.IngestGetPipelineRequest, callback: callbackFn): TransportRequestCallback - getPipeline(params: T.IngestGetPipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - processorGrok(params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): TransportRequestPromise> - processorGrok(callback: callbackFn): TransportRequestCallback - processorGrok(params: T.IngestProcessorGrokRequest, callback: callbackFn): TransportRequestCallback - processorGrok(params: T.IngestProcessorGrokRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPipeline(params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline(params: T.IngestPutPipelineRequest, callback: callbackFn): TransportRequestCallback - putPipeline(params: T.IngestPutPipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate(params?: T.IngestSimulatePipelineRequest, options?: TransportRequestOptions): TransportRequestPromise> - simulate(callback: callbackFn): TransportRequestCallback - simulate(params: T.IngestSimulatePipelineRequest, callback: callbackFn): TransportRequestCallback - simulate(params: T.IngestSimulatePipelineRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - license: { - delete(params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(callback: callbackFn): TransportRequestCallback - delete(params: T.LicenseDeleteRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.LicenseDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params?: T.LicenseGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(callback: callbackFn): TransportRequestCallback - get(params: T.LicenseGetRequest, callback: callbackFn): TransportRequestCallback - get(params: T.LicenseGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBasicStatus(params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus(callback: callbackFn): TransportRequestCallback - getBasicStatus(params: T.LicenseGetBasicStatusRequest, callback: callbackFn): TransportRequestCallback - getBasicStatus(params: T.LicenseGetBasicStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrialStatus(params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus(callback: callbackFn): TransportRequestCallback - getTrialStatus(params: T.LicenseGetTrialStatusRequest, callback: callbackFn): TransportRequestCallback - getTrialStatus(params: T.LicenseGetTrialStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post(params?: T.LicensePostRequest, options?: TransportRequestOptions): TransportRequestPromise> - post(callback: callbackFn): TransportRequestCallback - post(params: T.LicensePostRequest, callback: callbackFn): TransportRequestCallback - post(params: T.LicensePostRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartBasic(params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic(callback: callbackFn): TransportRequestCallback - postStartBasic(params: T.LicensePostStartBasicRequest, callback: callbackFn): TransportRequestCallback - postStartBasic(params: T.LicensePostStartBasicRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartTrial(params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial(callback: callbackFn): TransportRequestCallback - postStartTrial(params: T.LicensePostStartTrialRequest, callback: callbackFn): TransportRequestCallback - postStartTrial(params: T.LicensePostStartTrialRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - logstash: { - deletePipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deletePipeline(callback: callbackFn): TransportRequestCallback - deletePipeline(params: TODO, callback: callbackFn): TransportRequestCallback - deletePipeline(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline(callback: callbackFn): TransportRequestCallback - getPipeline(params: TODO, callback: callbackFn): TransportRequestCallback - getPipeline(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPipeline(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline(callback: callbackFn): TransportRequestCallback - putPipeline(params: TODO, callback: callbackFn): TransportRequestCallback - putPipeline(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - mget(params?: T.MgetRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - mget(callback: callbackFn, TContext>): TransportRequestCallback - mget(params: T.MgetRequest, callback: callbackFn, TContext>): TransportRequestCallback - mget(params: T.MgetRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - migration: { - deprecations(params?: T.MigrationDeprecationInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - deprecations(callback: callbackFn): TransportRequestCallback - deprecations(params: T.MigrationDeprecationInfoRequest, callback: callbackFn): TransportRequestCallback - deprecations(params: T.MigrationDeprecationInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ml: { - closeJob(params: T.MlCloseJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - closeJob(params: T.MlCloseJobRequest, callback: callbackFn): TransportRequestCallback - closeJob(params: T.MlCloseJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendar(params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendar(params: T.MlDeleteCalendarRequest, callback: callbackFn): TransportRequestCallback - deleteCalendar(params: T.MlDeleteCalendarRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent(params: T.MlDeleteCalendarEventRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, callback: callbackFn): TransportRequestCallback - deleteCalendarJob(params: T.MlDeleteCalendarJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics(params: T.MlDeleteDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDatafeed(params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteDatafeed(params: T.MlDeleteDatafeedRequest, callback: callbackFn): TransportRequestCallback - deleteDatafeed(params: T.MlDeleteDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteExpiredData(params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteExpiredData(callback: callbackFn): TransportRequestCallback - deleteExpiredData(params: T.MlDeleteExpiredDataRequest, callback: callbackFn): TransportRequestCallback - deleteExpiredData(params: T.MlDeleteExpiredDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteFilter(params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteFilter(params: T.MlDeleteFilterRequest, callback: callbackFn): TransportRequestCallback - deleteFilter(params: T.MlDeleteFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteForecast(params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteForecast(params: T.MlDeleteForecastRequest, callback: callbackFn): TransportRequestCallback - deleteForecast(params: T.MlDeleteForecastRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteJob(params: T.MlDeleteJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob(params: T.MlDeleteJobRequest, callback: callbackFn): TransportRequestCallback - deleteJob(params: T.MlDeleteJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot(params: T.MlDeleteModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, callback: callbackFn): TransportRequestCallback - deleteTrainedModel(params: T.MlDeleteTrainedModelRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, callback: callbackFn): TransportRequestCallback - deleteTrainedModelAlias(params: T.MlDeleteTrainedModelAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - estimateModelMemory(params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - estimateModelMemory(callback: callbackFn): TransportRequestCallback - estimateModelMemory(params: T.MlEstimateModelMemoryRequest, callback: callbackFn): TransportRequestCallback - estimateModelMemory(params: T.MlEstimateModelMemoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - evaluateDataFrame(params?: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): TransportRequestPromise> - evaluateDataFrame(callback: callbackFn): TransportRequestCallback - evaluateDataFrame(params: T.MlEvaluateDataFrameRequest, callback: callbackFn): TransportRequestCallback - evaluateDataFrame(params: T.MlEvaluateDataFrameRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics(params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - explainDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics(params: T.MlExplainDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics(params: T.MlExplainDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - findFileStructure(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - findFileStructure(callback: callbackFn): TransportRequestCallback - findFileStructure(params: TODO, callback: callbackFn): TransportRequestCallback - findFileStructure(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flushJob(params: T.MlFlushJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - flushJob(params: T.MlFlushJobRequest, callback: callbackFn): TransportRequestCallback - flushJob(params: T.MlFlushJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forecast(params: T.MlForecastJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - forecast(params: T.MlForecastJobRequest, callback: callbackFn): TransportRequestCallback - forecast(params: T.MlForecastJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuckets(params: T.MlGetBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuckets(params: T.MlGetBucketsRequest, callback: callbackFn): TransportRequestCallback - getBuckets(params: T.MlGetBucketsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendarEvents(params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendarEvents(params: T.MlGetCalendarEventsRequest, callback: callbackFn): TransportRequestCallback - getCalendarEvents(params: T.MlGetCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendars(params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCalendars(callback: callbackFn): TransportRequestCallback - getCalendars(params: T.MlGetCalendarsRequest, callback: callbackFn): TransportRequestCallback - getCalendars(params: T.MlGetCalendarsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCategories(params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getCategories(params: T.MlGetCategoriesRequest, callback: callbackFn): TransportRequestCallback - getCategories(params: T.MlGetCategoriesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics(params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics(params: T.MlGetDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics(params: T.MlGetDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats(params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalyticsStats(callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats(params: T.MlGetDataFrameAnalyticsStatsRequest, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats(params: T.MlGetDataFrameAnalyticsStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeedStats(params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeedStats(callback: callbackFn): TransportRequestCallback - getDatafeedStats(params: T.MlGetDatafeedStatsRequest, callback: callbackFn): TransportRequestCallback - getDatafeedStats(params: T.MlGetDatafeedStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeeds(params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeeds(callback: callbackFn): TransportRequestCallback - getDatafeeds(params: T.MlGetDatafeedsRequest, callback: callbackFn): TransportRequestCallback - getDatafeeds(params: T.MlGetDatafeedsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFilters(params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getFilters(callback: callbackFn): TransportRequestCallback - getFilters(params: T.MlGetFiltersRequest, callback: callbackFn): TransportRequestCallback - getFilters(params: T.MlGetFiltersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getInfluencers(params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): TransportRequestPromise> - getInfluencers(params: T.MlGetInfluencersRequest, callback: callbackFn): TransportRequestCallback - getInfluencers(params: T.MlGetInfluencersRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobStats(params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobStats(callback: callbackFn): TransportRequestCallback - getJobStats(params: T.MlGetJobStatsRequest, callback: callbackFn): TransportRequestCallback - getJobStats(params: T.MlGetJobStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs(params?: T.MlGetJobsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(callback: callbackFn): TransportRequestCallback - getJobs(params: T.MlGetJobsRequest, callback: callbackFn): TransportRequestCallback - getJobs(params: T.MlGetJobsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getModelSnapshots(params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getModelSnapshots(params: T.MlGetModelSnapshotsRequest, callback: callbackFn): TransportRequestCallback - getModelSnapshots(params: T.MlGetModelSnapshotsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getOverallBuckets(params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getOverallBuckets(params: T.MlGetOverallBucketsRequest, callback: callbackFn): TransportRequestCallback - getOverallBuckets(params: T.MlGetOverallBucketsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRecords(params: T.MlGetAnomalyRecordsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRecords(params: T.MlGetAnomalyRecordsRequest, callback: callbackFn): TransportRequestCallback - getRecords(params: T.MlGetAnomalyRecordsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModels(params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModels(callback: callbackFn): TransportRequestCallback - getTrainedModels(params: T.MlGetTrainedModelsRequest, callback: callbackFn): TransportRequestCallback - getTrainedModels(params: T.MlGetTrainedModelsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats(params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModelsStats(callback: callbackFn): TransportRequestCallback - getTrainedModelsStats(params: T.MlGetTrainedModelsStatsRequest, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats(params: T.MlGetTrainedModelsStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info(params?: T.MlInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.MlInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.MlInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - openJob(params: T.MlOpenJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - openJob(params: T.MlOpenJobRequest, callback: callbackFn): TransportRequestCallback - openJob(params: T.MlOpenJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postCalendarEvents(params?: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): TransportRequestPromise> - postCalendarEvents(callback: callbackFn): TransportRequestCallback - postCalendarEvents(params: T.MlPostCalendarEventsRequest, callback: callbackFn): TransportRequestCallback - postCalendarEvents(params: T.MlPostCalendarEventsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postData(params: T.MlPostJobDataRequest, options?: TransportRequestOptions): TransportRequestPromise> - postData(params: T.MlPostJobDataRequest, callback: callbackFn): TransportRequestCallback - postData(params: T.MlPostJobDataRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics(params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - previewDataFrameAnalytics(callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics(params: T.MlPreviewDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDatafeed(params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - previewDatafeed(callback: callbackFn, TContext>): TransportRequestCallback - previewDatafeed(params: T.MlPreviewDatafeedRequest, callback: callbackFn, TContext>): TransportRequestCallback - previewDatafeed(params: T.MlPreviewDatafeedRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - putCalendar(params: T.MlPutCalendarRequest, options?: TransportRequestOptions): TransportRequestPromise> - putCalendar(params: T.MlPutCalendarRequest, callback: callbackFn): TransportRequestCallback - putCalendar(params: T.MlPutCalendarRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putCalendarJob(params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putCalendarJob(params: T.MlPutCalendarJobRequest, callback: callbackFn): TransportRequestCallback - putCalendarJob(params: T.MlPutCalendarJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics(params: T.MlPutDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDatafeed(params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - putDatafeed(params: T.MlPutDatafeedRequest, callback: callbackFn): TransportRequestCallback - putDatafeed(params: T.MlPutDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putFilter(params: T.MlPutFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - putFilter(params: T.MlPutFilterRequest, callback: callbackFn): TransportRequestCallback - putFilter(params: T.MlPutFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob(params: T.MlPutJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.MlPutJobRequest, callback: callbackFn): TransportRequestCallback - putJob(params: T.MlPutJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTrainedModel(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModel(callback: callbackFn): TransportRequestCallback - putTrainedModel(params: TODO, callback: callbackFn): TransportRequestCallback - putTrainedModel(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, callback: callbackFn): TransportRequestCallback - putTrainedModelAlias(params: T.MlPutTrainedModelAliasRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resetJob(params: T.MlResetJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - resetJob(params: T.MlResetJobRequest, callback: callbackFn): TransportRequestCallback - resetJob(params: T.MlResetJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, callback: callbackFn): TransportRequestCallback - revertModelSnapshot(params: T.MlRevertModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - setUpgradeMode(params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): TransportRequestPromise> - setUpgradeMode(callback: callbackFn): TransportRequestCallback - setUpgradeMode(params: T.MlSetUpgradeModeRequest, callback: callbackFn): TransportRequestCallback - setUpgradeMode(params: T.MlSetUpgradeModeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics(params: T.MlStartDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDatafeed(params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - startDatafeed(params: T.MlStartDatafeedRequest, callback: callbackFn): TransportRequestCallback - startDatafeed(params: T.MlStartDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics(params: T.MlStopDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDatafeed(params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopDatafeed(params: T.MlStopDatafeedRequest, callback: callbackFn): TransportRequestCallback - stopDatafeed(params: T.MlStopDatafeedRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics(params: T.MlUpdateDataFrameAnalyticsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateDatafeed(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateDatafeed(callback: callbackFn): TransportRequestCallback - updateDatafeed(params: TODO, callback: callbackFn): TransportRequestCallback - updateDatafeed(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateFilter(params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateFilter(params: T.MlUpdateFilterRequest, callback: callbackFn): TransportRequestCallback - updateFilter(params: T.MlUpdateFilterRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateJob(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - updateJob(callback: callbackFn): TransportRequestCallback - updateJob(params: TODO, callback: callbackFn): TransportRequestCallback - updateJob(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, callback: callbackFn): TransportRequestCallback - updateModelSnapshot(params: T.MlUpdateModelSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): TransportRequestPromise> - upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot(params: T.MlUpgradeJobSnapshotRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate(params?: T.MlValidateJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - validate(callback: callbackFn): TransportRequestCallback - validate(params: T.MlValidateJobRequest, callback: callbackFn): TransportRequestCallback - validate(params: T.MlValidateJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateDetector(params?: T.MlValidateDetectorRequest, options?: TransportRequestOptions): TransportRequestPromise> - validateDetector(callback: callbackFn): TransportRequestCallback - validateDetector(params: T.MlValidateDetectorRequest, callback: callbackFn): TransportRequestCallback - validateDetector(params: T.MlValidateDetectorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - monitoring: { - bulk(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - bulk(callback: callbackFn): TransportRequestCallback - bulk(params: TODO, callback: callbackFn): TransportRequestCallback - bulk(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - msearch(params?: T.MsearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - msearch(callback: callbackFn, TContext>): TransportRequestCallback - msearch(params: T.MsearchRequest, callback: callbackFn, TContext>): TransportRequestCallback - msearch(params: T.MsearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - msearchTemplate(params?: T.MsearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - msearchTemplate(callback: callbackFn, TContext>): TransportRequestCallback - msearchTemplate(params: T.MsearchTemplateRequest, callback: callbackFn, TContext>): TransportRequestCallback - msearchTemplate(params: T.MsearchTemplateRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - mtermvectors(params?: T.MtermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> - mtermvectors(callback: callbackFn): TransportRequestCallback - mtermvectors(params: T.MtermvectorsRequest, callback: callbackFn): TransportRequestCallback - mtermvectors(params: T.MtermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodes: { - clearMeteringArchive(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - clearMeteringArchive(callback: callbackFn): TransportRequestCallback - clearMeteringArchive(params: TODO, callback: callbackFn): TransportRequestCallback - clearMeteringArchive(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getMeteringInfo(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getMeteringInfo(callback: callbackFn): TransportRequestCallback - getMeteringInfo(params: TODO, callback: callbackFn): TransportRequestCallback - getMeteringInfo(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hotThreads(params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): TransportRequestPromise> - hotThreads(callback: callbackFn): TransportRequestCallback - hotThreads(params: T.NodesHotThreadsRequest, callback: callbackFn): TransportRequestCallback - hotThreads(params: T.NodesHotThreadsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info(params?: T.NodesInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.NodesInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.NodesInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSecureSettings(params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): TransportRequestPromise> - reloadSecureSettings(callback: callbackFn): TransportRequestCallback - reloadSecureSettings(params: T.NodesReloadSecureSettingsRequest, callback: callbackFn): TransportRequestCallback - reloadSecureSettings(params: T.NodesReloadSecureSettingsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.NodesStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.NodesStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.NodesStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage(params?: T.NodesUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(callback: callbackFn): TransportRequestCallback - usage(params: T.NodesUsageRequest, callback: callbackFn): TransportRequestCallback - usage(params: T.NodesUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - openPointInTime(params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): TransportRequestPromise> - openPointInTime(params: T.OpenPointInTimeRequest, callback: callbackFn): TransportRequestCallback - openPointInTime(params: T.OpenPointInTimeRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ping(params?: T.PingRequest, options?: TransportRequestOptions): TransportRequestPromise> - ping(callback: callbackFn): TransportRequestCallback - ping(params: T.PingRequest, callback: callbackFn): TransportRequestCallback - ping(params: T.PingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putScript(params: T.PutScriptRequest, options?: TransportRequestOptions): TransportRequestPromise> - putScript(params: T.PutScriptRequest, callback: callbackFn): TransportRequestCallback - putScript(params: T.PutScriptRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rankEval(params: T.RankEvalRequest, options?: TransportRequestOptions): TransportRequestPromise> - rankEval(params: T.RankEvalRequest, callback: callbackFn): TransportRequestCallback - rankEval(params: T.RankEvalRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindex(params?: T.ReindexRequest, options?: TransportRequestOptions): TransportRequestPromise> - reindex(callback: callbackFn): TransportRequestCallback - reindex(params: T.ReindexRequest, callback: callbackFn): TransportRequestCallback - reindex(params: T.ReindexRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindexRethrottle(params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> - reindexRethrottle(params: T.ReindexRethrottleRequest, callback: callbackFn): TransportRequestCallback - reindexRethrottle(params: T.ReindexRethrottleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - renderSearchTemplate(params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise> - renderSearchTemplate(callback: callbackFn): TransportRequestCallback - renderSearchTemplate(params: T.RenderSearchTemplateRequest, callback: callbackFn): TransportRequestCallback - renderSearchTemplate(params: T.RenderSearchTemplateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollup: { - deleteJob(params: T.RollupDeleteRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob(params: T.RollupDeleteRollupJobRequest, callback: callbackFn): TransportRequestCallback - deleteJob(params: T.RollupDeleteRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs(params?: T.RollupGetRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - getJobs(callback: callbackFn): TransportRequestCallback - getJobs(params: T.RollupGetRollupJobRequest, callback: callbackFn): TransportRequestCallback - getJobs(params: T.RollupGetRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupCaps(params?: T.RollupGetRollupCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupCaps(callback: callbackFn): TransportRequestCallback - getRollupCaps(params: T.RollupGetRollupCapabilitiesRequest, callback: callbackFn): TransportRequestCallback - getRollupCaps(params: T.RollupGetRollupCapabilitiesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps(params: T.RollupGetRollupIndexCapabilitiesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob(params: T.RollupCreateRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - putJob(params: T.RollupCreateRollupJobRequest, callback: callbackFn): TransportRequestCallback - putJob(params: T.RollupCreateRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollup(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - rollup(callback: callbackFn): TransportRequestCallback - rollup(params: TODO, callback: callbackFn): TransportRequestCallback - rollup(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollupSearch(params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - rollupSearch(params: T.RollupRollupSearchRequest, callback: callbackFn, TContext>): TransportRequestCallback - rollupSearch(params: T.RollupRollupSearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - startJob(params: T.RollupStartRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - startJob(params: T.RollupStartRollupJobRequest, callback: callbackFn): TransportRequestCallback - startJob(params: T.RollupStartRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopJob(params: T.RollupStopRollupJobRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopJob(params: T.RollupStopRollupJobRequest, callback: callbackFn): TransportRequestCallback - stopJob(params: T.RollupStopRollupJobRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - scriptsPainlessExecute(params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - scriptsPainlessExecute(callback: callbackFn, TContext>): TransportRequestCallback - scriptsPainlessExecute(params: T.ScriptsPainlessExecuteRequest, callback: callbackFn, TContext>): TransportRequestCallback - scriptsPainlessExecute(params: T.ScriptsPainlessExecuteRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - scroll(params?: T.ScrollRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - scroll(callback: callbackFn, TContext>): TransportRequestCallback - scroll(params: T.ScrollRequest, callback: callbackFn, TContext>): TransportRequestCallback - scroll(params: T.ScrollRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - search(params?: T.SearchRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - search(callback: callbackFn, TContext>): TransportRequestCallback - search(params: T.SearchRequest, callback: callbackFn, TContext>): TransportRequestCallback - search(params: T.SearchRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - searchShards(params?: T.SearchShardsRequest, options?: TransportRequestOptions): TransportRequestPromise> - searchShards(callback: callbackFn): TransportRequestCallback - searchShards(params: T.SearchShardsRequest, callback: callbackFn): TransportRequestCallback - searchShards(params: T.SearchShardsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchTemplate(params?: T.SearchTemplateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - searchTemplate(callback: callbackFn, TContext>): TransportRequestCallback - searchTemplate(params: T.SearchTemplateRequest, callback: callbackFn, TContext>): TransportRequestCallback - searchTemplate(params: T.SearchTemplateRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - searchableSnapshots: { - cacheStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - cacheStats(callback: callbackFn): TransportRequestCallback - cacheStats(params: TODO, callback: callbackFn): TransportRequestCallback - cacheStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - clearCache(callback: callbackFn): TransportRequestCallback - clearCache(params: TODO, callback: callbackFn): TransportRequestCallback - clearCache(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mount(params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): TransportRequestPromise> - mount(params: T.SearchableSnapshotsMountRequest, callback: callbackFn): TransportRequestCallback - mount(params: T.SearchableSnapshotsMountRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositoryStats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - repositoryStats(callback: callbackFn): TransportRequestCallback - repositoryStats(params: TODO, callback: callbackFn): TransportRequestCallback - repositoryStats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: TODO, callback: callbackFn): TransportRequestCallback - stats(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - security: { - authenticate(params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): TransportRequestPromise> - authenticate(callback: callbackFn): TransportRequestCallback - authenticate(params: T.SecurityAuthenticateRequest, callback: callbackFn): TransportRequestCallback - authenticate(params: T.SecurityAuthenticateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - changePassword(params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): TransportRequestPromise> - changePassword(callback: callbackFn): TransportRequestCallback - changePassword(params: T.SecurityChangePasswordRequest, callback: callbackFn): TransportRequestCallback - changePassword(params: T.SecurityChangePasswordRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearApiKeyCache(params?: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearApiKeyCache(callback: callbackFn): TransportRequestCallback - clearApiKeyCache(params: T.SecurityClearApiKeyCacheRequest, callback: callbackFn): TransportRequestCallback - clearApiKeyCache(params: T.SecurityClearApiKeyCacheRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, callback: callbackFn): TransportRequestCallback - clearCachedPrivileges(params: T.SecurityClearCachedPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, callback: callbackFn): TransportRequestCallback - clearCachedRealms(params: T.SecurityClearCachedRealmsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRoles(params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRoles(params: T.SecurityClearCachedRolesRequest, callback: callbackFn): TransportRequestCallback - clearCachedRoles(params: T.SecurityClearCachedRolesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, callback: callbackFn): TransportRequestCallback - clearCachedServiceTokens(params: T.SecurityClearCachedServiceTokensRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createApiKey(params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - createApiKey(callback: callbackFn): TransportRequestCallback - createApiKey(params: T.SecurityCreateApiKeyRequest, callback: callbackFn): TransportRequestCallback - createApiKey(params: T.SecurityCreateApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createServiceToken(params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - createServiceToken(params: T.SecurityCreateServiceTokenRequest, callback: callbackFn): TransportRequestCallback - createServiceToken(params: T.SecurityCreateServiceTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePrivileges(params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - deletePrivileges(params: T.SecurityDeletePrivilegesRequest, callback: callbackFn): TransportRequestCallback - deletePrivileges(params: T.SecurityDeletePrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRole(params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRole(params: T.SecurityDeleteRoleRequest, callback: callbackFn): TransportRequestCallback - deleteRole(params: T.SecurityDeleteRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, callback: callbackFn): TransportRequestCallback - deleteRoleMapping(params: T.SecurityDeleteRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, callback: callbackFn): TransportRequestCallback - deleteServiceToken(params: T.SecurityDeleteServiceTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteUser(params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteUser(params: T.SecurityDeleteUserRequest, callback: callbackFn): TransportRequestCallback - deleteUser(params: T.SecurityDeleteUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - disableUser(params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - disableUser(params: T.SecurityDisableUserRequest, callback: callbackFn): TransportRequestCallback - disableUser(params: T.SecurityDisableUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enableUser(params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - enableUser(params: T.SecurityEnableUserRequest, callback: callbackFn): TransportRequestCallback - enableUser(params: T.SecurityEnableUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getApiKey(params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - getApiKey(callback: callbackFn): TransportRequestCallback - getApiKey(params: T.SecurityGetApiKeyRequest, callback: callbackFn): TransportRequestCallback - getApiKey(params: T.SecurityGetApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges(params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getBuiltinPrivileges(callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges(params: T.SecurityGetBuiltinPrivilegesRequest, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges(params: T.SecurityGetBuiltinPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPrivileges(params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getPrivileges(callback: callbackFn): TransportRequestCallback - getPrivileges(params: T.SecurityGetPrivilegesRequest, callback: callbackFn): TransportRequestCallback - getPrivileges(params: T.SecurityGetPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRole(params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRole(callback: callbackFn): TransportRequestCallback - getRole(params: T.SecurityGetRoleRequest, callback: callbackFn): TransportRequestCallback - getRole(params: T.SecurityGetRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRoleMapping(params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRoleMapping(callback: callbackFn): TransportRequestCallback - getRoleMapping(params: T.SecurityGetRoleMappingRequest, callback: callbackFn): TransportRequestCallback - getRoleMapping(params: T.SecurityGetRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getServiceAccounts(params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getServiceAccounts(callback: callbackFn): TransportRequestCallback - getServiceAccounts(params: T.SecurityGetServiceAccountsRequest, callback: callbackFn): TransportRequestCallback - getServiceAccounts(params: T.SecurityGetServiceAccountsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, callback: callbackFn): TransportRequestCallback - getServiceCredentials(params: T.SecurityGetServiceCredentialsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getToken(params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - getToken(callback: callbackFn): TransportRequestCallback - getToken(params: T.SecurityGetTokenRequest, callback: callbackFn): TransportRequestCallback - getToken(params: T.SecurityGetTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUser(params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUser(callback: callbackFn): TransportRequestCallback - getUser(params: T.SecurityGetUserRequest, callback: callbackFn): TransportRequestCallback - getUser(params: T.SecurityGetUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUserPrivileges(params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - getUserPrivileges(callback: callbackFn): TransportRequestCallback - getUserPrivileges(params: T.SecurityGetUserPrivilegesRequest, callback: callbackFn): TransportRequestCallback - getUserPrivileges(params: T.SecurityGetUserPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - grantApiKey(params?: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - grantApiKey(callback: callbackFn): TransportRequestCallback - grantApiKey(params: T.SecurityGrantApiKeyRequest, callback: callbackFn): TransportRequestCallback - grantApiKey(params: T.SecurityGrantApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hasPrivileges(params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - hasPrivileges(callback: callbackFn): TransportRequestCallback - hasPrivileges(params: T.SecurityHasPrivilegesRequest, callback: callbackFn): TransportRequestCallback - hasPrivileges(params: T.SecurityHasPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateApiKey(params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateApiKey(callback: callbackFn): TransportRequestCallback - invalidateApiKey(params: T.SecurityInvalidateApiKeyRequest, callback: callbackFn): TransportRequestCallback - invalidateApiKey(params: T.SecurityInvalidateApiKeyRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateToken(params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): TransportRequestPromise> - invalidateToken(callback: callbackFn): TransportRequestCallback - invalidateToken(params: T.SecurityInvalidateTokenRequest, callback: callbackFn): TransportRequestCallback - invalidateToken(params: T.SecurityInvalidateTokenRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPrivileges(params?: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): TransportRequestPromise> - putPrivileges(callback: callbackFn): TransportRequestCallback - putPrivileges(params: T.SecurityPutPrivilegesRequest, callback: callbackFn): TransportRequestCallback - putPrivileges(params: T.SecurityPutPrivilegesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRole(params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRole(params: T.SecurityPutRoleRequest, callback: callbackFn): TransportRequestCallback - putRole(params: T.SecurityPutRoleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRoleMapping(params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): TransportRequestPromise> - putRoleMapping(params: T.SecurityPutRoleMappingRequest, callback: callbackFn): TransportRequestCallback - putRoleMapping(params: T.SecurityPutRoleMappingRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putUser(params: T.SecurityPutUserRequest, options?: TransportRequestOptions): TransportRequestPromise> - putUser(params: T.SecurityPutUserRequest, callback: callbackFn): TransportRequestCallback - putUser(params: T.SecurityPutUserRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlAuthenticate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlAuthenticate(callback: callbackFn): TransportRequestCallback - samlAuthenticate(params: TODO, callback: callbackFn): TransportRequestCallback - samlAuthenticate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlCompleteLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlCompleteLogout(callback: callbackFn): TransportRequestCallback - samlCompleteLogout(params: TODO, callback: callbackFn): TransportRequestCallback - samlCompleteLogout(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlInvalidate(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlInvalidate(callback: callbackFn): TransportRequestCallback - samlInvalidate(params: TODO, callback: callbackFn): TransportRequestCallback - samlInvalidate(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlLogout(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlLogout(callback: callbackFn): TransportRequestCallback - samlLogout(params: TODO, callback: callbackFn): TransportRequestCallback - samlLogout(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlPrepareAuthentication(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlPrepareAuthentication(callback: callbackFn): TransportRequestCallback - samlPrepareAuthentication(params: TODO, callback: callbackFn): TransportRequestCallback - samlPrepareAuthentication(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlServiceProviderMetadata(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - samlServiceProviderMetadata(callback: callbackFn): TransportRequestCallback - samlServiceProviderMetadata(params: TODO, callback: callbackFn): TransportRequestCallback - samlServiceProviderMetadata(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - shutdown: { - deleteNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteNode(callback: callbackFn): TransportRequestCallback - deleteNode(params: TODO, callback: callbackFn): TransportRequestCallback - deleteNode(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getNode(callback: callbackFn): TransportRequestCallback - getNode(params: TODO, callback: callbackFn): TransportRequestCallback - getNode(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putNode(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - putNode(callback: callbackFn): TransportRequestCallback - putNode(params: TODO, callback: callbackFn): TransportRequestCallback - putNode(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - slm: { - deleteLifecycle(params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle(params: T.SlmDeleteLifecycleRequest, callback: callbackFn): TransportRequestCallback - deleteLifecycle(params: T.SlmDeleteLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeLifecycle(params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeLifecycle(params: T.SlmExecuteLifecycleRequest, callback: callbackFn): TransportRequestCallback - executeLifecycle(params: T.SlmExecuteLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeRetention(params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeRetention(callback: callbackFn): TransportRequestCallback - executeRetention(params: T.SlmExecuteRetentionRequest, callback: callbackFn): TransportRequestCallback - executeRetention(params: T.SlmExecuteRetentionRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle(params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle(callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.SlmGetLifecycleRequest, callback: callbackFn): TransportRequestCallback - getLifecycle(params: T.SlmGetLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStats(params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStats(callback: callbackFn): TransportRequestCallback - getStats(params: T.SlmGetStatsRequest, callback: callbackFn): TransportRequestCallback - getStats(params: T.SlmGetStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus(params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - getStatus(callback: callbackFn): TransportRequestCallback - getStatus(params: T.SlmGetStatusRequest, callback: callbackFn): TransportRequestCallback - getStatus(params: T.SlmGetStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle(params: T.SlmPutLifecycleRequest, callback: callbackFn): TransportRequestCallback - putLifecycle(params: T.SlmPutLifecycleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start(params?: T.SlmStartRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(callback: callbackFn): TransportRequestCallback - start(params: T.SlmStartRequest, callback: callbackFn): TransportRequestCallback - start(params: T.SlmStartRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop(params?: T.SlmStopRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(callback: callbackFn): TransportRequestCallback - stop(params: T.SlmStopRequest, callback: callbackFn): TransportRequestCallback - stop(params: T.SlmStopRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - snapshot: { - cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, callback: callbackFn): TransportRequestCallback - cleanupRepository(params: T.SnapshotCleanupRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clone(params: T.SnapshotCloneRequest, options?: TransportRequestOptions): TransportRequestPromise> - clone(params: T.SnapshotCloneRequest, callback: callbackFn): TransportRequestCallback - clone(params: T.SnapshotCloneRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create(params: T.SnapshotCreateRequest, options?: TransportRequestOptions): TransportRequestPromise> - create(params: T.SnapshotCreateRequest, callback: callbackFn): TransportRequestCallback - create(params: T.SnapshotCreateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createRepository(params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - createRepository(params: T.SnapshotCreateRepositoryRequest, callback: callbackFn): TransportRequestCallback - createRepository(params: T.SnapshotCreateRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete(params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): TransportRequestPromise> - delete(params: T.SnapshotDeleteRequest, callback: callbackFn): TransportRequestCallback - delete(params: T.SnapshotDeleteRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRepository(params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteRepository(params: T.SnapshotDeleteRepositoryRequest, callback: callbackFn): TransportRequestCallback - deleteRepository(params: T.SnapshotDeleteRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.SnapshotGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.SnapshotGetRequest, callback: callbackFn): TransportRequestCallback - get(params: T.SnapshotGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRepository(params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - getRepository(callback: callbackFn): TransportRequestCallback - getRepository(params: T.SnapshotGetRepositoryRequest, callback: callbackFn): TransportRequestCallback - getRepository(params: T.SnapshotGetRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositoryAnalyze(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - repositoryAnalyze(callback: callbackFn): TransportRequestCallback - repositoryAnalyze(params: TODO, callback: callbackFn): TransportRequestCallback - repositoryAnalyze(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - restore(params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): TransportRequestPromise> - restore(params: T.SnapshotRestoreRequest, callback: callbackFn): TransportRequestCallback - restore(params: T.SnapshotRestoreRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - status(params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): TransportRequestPromise> - status(callback: callbackFn): TransportRequestCallback - status(params: T.SnapshotStatusRequest, callback: callbackFn): TransportRequestCallback - status(params: T.SnapshotStatusRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): TransportRequestPromise> - verifyRepository(params: T.SnapshotVerifyRepositoryRequest, callback: callbackFn): TransportRequestCallback - verifyRepository(params: T.SnapshotVerifyRepositoryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - sql: { - clearCursor(params?: T.SqlClearCursorRequest, options?: TransportRequestOptions): TransportRequestPromise> - clearCursor(callback: callbackFn): TransportRequestCallback - clearCursor(params: T.SqlClearCursorRequest, callback: callbackFn): TransportRequestCallback - clearCursor(params: T.SqlClearCursorRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - deleteAsync(callback: callbackFn): TransportRequestCallback - deleteAsync(params: TODO, callback: callbackFn): TransportRequestCallback - deleteAsync(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAsync(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAsync(callback: callbackFn): TransportRequestCallback - getAsync(params: TODO, callback: callbackFn): TransportRequestCallback - getAsync(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAsyncStatus(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - getAsyncStatus(callback: callbackFn): TransportRequestCallback - getAsyncStatus(params: TODO, callback: callbackFn): TransportRequestCallback - getAsyncStatus(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - query(params?: T.SqlQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - query(callback: callbackFn): TransportRequestCallback - query(params: T.SqlQueryRequest, callback: callbackFn): TransportRequestCallback - query(params: T.SqlQueryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - translate(params?: T.SqlTranslateRequest, options?: TransportRequestOptions): TransportRequestPromise> - translate(callback: callbackFn): TransportRequestCallback - translate(params: T.SqlTranslateRequest, callback: callbackFn): TransportRequestCallback - translate(params: T.SqlTranslateRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ssl: { - certificates(params?: T.SslGetCertificatesRequest, options?: TransportRequestOptions): TransportRequestPromise> - certificates(callback: callbackFn): TransportRequestCallback - certificates(params: T.SslGetCertificatesRequest, callback: callbackFn): TransportRequestCallback - certificates(params: T.SslGetCertificatesRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - tasks: { - cancel(params?: T.TaskCancelRequest, options?: TransportRequestOptions): TransportRequestPromise> - cancel(callback: callbackFn): TransportRequestCallback - cancel(params: T.TaskCancelRequest, callback: callbackFn): TransportRequestCallback - cancel(params: T.TaskCancelRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get(params: T.TaskGetRequest, options?: TransportRequestOptions): TransportRequestPromise> - get(params: T.TaskGetRequest, callback: callbackFn): TransportRequestCallback - get(params: T.TaskGetRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - list(params?: T.TaskListRequest, options?: TransportRequestOptions): TransportRequestPromise> - list(callback: callbackFn): TransportRequestCallback - list(params: T.TaskListRequest, callback: callbackFn): TransportRequestCallback - list(params: T.TaskListRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - termsEnum(params: T.TermsEnumRequest, options?: TransportRequestOptions): TransportRequestPromise> - termsEnum(params: T.TermsEnumRequest, callback: callbackFn): TransportRequestCallback - termsEnum(params: T.TermsEnumRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - termvectors(params: T.TermvectorsRequest, options?: TransportRequestOptions): TransportRequestPromise> - termvectors(params: T.TermvectorsRequest, callback: callbackFn): TransportRequestCallback - termvectors(params: T.TermvectorsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - textStructure: { - findStructure(params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): TransportRequestPromise> - findStructure(params: T.TextStructureFindStructureRequest, callback: callbackFn): TransportRequestCallback - findStructure(params: T.TextStructureFindStructureRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - transform: { - deleteTransform(params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteTransform(params: T.TransformDeleteTransformRequest, callback: callbackFn): TransportRequestCallback - deleteTransform(params: T.TransformDeleteTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransform(params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransform(callback: callbackFn): TransportRequestCallback - getTransform(params: T.TransformGetTransformRequest, callback: callbackFn): TransportRequestCallback - getTransform(params: T.TransformGetTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransformStats(params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats(params: T.TransformGetTransformStatsRequest, callback: callbackFn): TransportRequestCallback - getTransformStats(params: T.TransformGetTransformStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewTransform(params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - previewTransform(callback: callbackFn, TContext>): TransportRequestCallback - previewTransform(params: T.TransformPreviewTransformRequest, callback: callbackFn, TContext>): TransportRequestCallback - previewTransform(params: T.TransformPreviewTransformRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - putTransform(params: T.TransformPutTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - putTransform(params: T.TransformPutTransformRequest, callback: callbackFn): TransportRequestCallback - putTransform(params: T.TransformPutTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTransform(params: T.TransformStartTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - startTransform(params: T.TransformStartTransformRequest, callback: callbackFn): TransportRequestCallback - startTransform(params: T.TransformStartTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTransform(params: T.TransformStopTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform(params: T.TransformStopTransformRequest, callback: callbackFn): TransportRequestCallback - stopTransform(params: T.TransformStopTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateTransform(params?: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform(callback: callbackFn): TransportRequestCallback - updateTransform(params: T.TransformUpdateTransformRequest, callback: callbackFn): TransportRequestCallback - updateTransform(params: T.TransformUpdateTransformRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - update(params: T.UpdateRequest, options?: TransportRequestOptions): TransportRequestPromise, TContext>> - update(params: T.UpdateRequest, callback: callbackFn, TContext>): TransportRequestCallback - update(params: T.UpdateRequest, options: TransportRequestOptions, callback: callbackFn, TContext>): TransportRequestCallback - updateByQuery(params: T.UpdateByQueryRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateByQuery(params: T.UpdateByQueryRequest, callback: callbackFn): TransportRequestCallback - updateByQuery(params: T.UpdateByQueryRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle(params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): TransportRequestPromise> - updateByQueryRethrottle(params: T.UpdateByQueryRethrottleRequest, callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle(params: T.UpdateByQueryRethrottleRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - watcher: { - ackWatch(params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - ackWatch(params: T.WatcherAckWatchRequest, callback: callbackFn): TransportRequestCallback - ackWatch(params: T.WatcherAckWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - activateWatch(params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - activateWatch(params: T.WatcherActivateWatchRequest, callback: callbackFn): TransportRequestCallback - activateWatch(params: T.WatcherActivateWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deactivateWatch(params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deactivateWatch(params: T.WatcherDeactivateWatchRequest, callback: callbackFn): TransportRequestCallback - deactivateWatch(params: T.WatcherDeactivateWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteWatch(params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - deleteWatch(params: T.WatcherDeleteWatchRequest, callback: callbackFn): TransportRequestCallback - deleteWatch(params: T.WatcherDeleteWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeWatch(params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - executeWatch(callback: callbackFn): TransportRequestCallback - executeWatch(params: T.WatcherExecuteWatchRequest, callback: callbackFn): TransportRequestCallback - executeWatch(params: T.WatcherExecuteWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getWatch(params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - getWatch(params: T.WatcherGetWatchRequest, callback: callbackFn): TransportRequestCallback - getWatch(params: T.WatcherGetWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putWatch(params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): TransportRequestPromise> - putWatch(params: T.WatcherPutWatchRequest, callback: callbackFn): TransportRequestCallback - putWatch(params: T.WatcherPutWatchRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - queryWatches(params?: TODO, options?: TransportRequestOptions): TransportRequestPromise> - queryWatches(callback: callbackFn): TransportRequestCallback - queryWatches(params: TODO, callback: callbackFn): TransportRequestCallback - queryWatches(params: TODO, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start(params?: T.WatcherStartRequest, options?: TransportRequestOptions): TransportRequestPromise> - start(callback: callbackFn): TransportRequestCallback - start(params: T.WatcherStartRequest, callback: callbackFn): TransportRequestCallback - start(params: T.WatcherStartRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats(params?: T.WatcherStatsRequest, options?: TransportRequestOptions): TransportRequestPromise> - stats(callback: callbackFn): TransportRequestCallback - stats(params: T.WatcherStatsRequest, callback: callbackFn): TransportRequestCallback - stats(params: T.WatcherStatsRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop(params?: T.WatcherStopRequest, options?: TransportRequestOptions): TransportRequestPromise> - stop(callback: callbackFn): TransportRequestCallback - stop(params: T.WatcherStopRequest, callback: callbackFn): TransportRequestCallback - stop(params: T.WatcherStopRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - xpack: { - info(params?: T.XpackInfoRequest, options?: TransportRequestOptions): TransportRequestPromise> - info(callback: callbackFn): TransportRequestCallback - info(params: T.XpackInfoRequest, callback: callbackFn): TransportRequestCallback - info(params: T.XpackInfoRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage(params?: T.XpackUsageRequest, options?: TransportRequestOptions): TransportRequestPromise> - usage(callback: callbackFn): TransportRequestCallback - usage(params: T.XpackUsageRequest, callback: callbackFn): TransportRequestCallback - usage(params: T.XpackUsageRequest, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } -} - -export * as estypes from './types' -export { - Client, - Transport, - ConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - Connection, - Serializer, - events, - errors, - ApiError, - ApiResponse, - RequestEvent, - ResurrectEvent, - ClientOptions, - NodeOptions, - ClientExtendsCallbackOptions -} \ No newline at end of file diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts deleted file mode 100644 index 8aab88c42..000000000 --- a/api/requestParams.d.ts +++ /dev/null @@ -1,2862 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { RequestBody, RequestNDBody } from '../lib/Transport' - -export interface Generic { - method?: string; - filter_path?: string | string[]; - pretty?: boolean; - human?: boolean; - error_trace?: boolean; - source?: string; -} - -export interface AsyncSearchDelete extends Generic { - id: string; -} - -export interface AsyncSearchGet extends Generic { - id: string; - wait_for_completion_timeout?: string; - keep_alive?: string; - typed_keys?: boolean; -} - -export interface AsyncSearchStatus extends Generic { - id: string; -} - -export interface AsyncSearchSubmit extends Generic { - index?: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - wait_for_completion_timeout?: string; - keep_on_completion?: boolean; - keep_alive?: string; - batched_reduce_size?: number; - request_cache?: boolean; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - explain?: boolean; - stored_fields?: string | string[]; - docvalue_fields?: string | string[]; - from?: number; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string | string[]; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - size?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - suggest_field?: string; - suggest_mode?: 'missing' | 'popular' | 'always'; - suggest_size?: number; - suggest_text?: string; - timeout?: string; - track_scores?: boolean; - track_total_hits?: boolean | number; - allow_partial_search_results?: boolean; - typed_keys?: boolean; - version?: boolean; - seq_no_primary_term?: boolean; - max_concurrent_shard_requests?: number; - body?: T; -} - -export interface AutoscalingDeleteAutoscalingPolicy extends Generic { - name: string; -} - -export interface AutoscalingGetAutoscalingCapacity extends Generic { -} - -export interface AutoscalingGetAutoscalingPolicy extends Generic { - name: string; -} - -export interface AutoscalingPutAutoscalingPolicy extends Generic { - name: string; - body: T; -} - -export interface Bulk extends Generic { - index?: string; - type?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - wait_for_active_shards?: string; - refresh?: 'wait_for' | boolean; - routing?: string; - timeout?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - pipeline?: string; - require_alias?: boolean; - body: T; -} - -export interface CatAliases extends Generic { - name?: string | string[]; - format?: string; - local?: boolean; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface CatAllocation extends Generic { - node_id?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatCount extends Generic { - index?: string | string[]; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatFielddata extends Generic { - fields?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatHealth extends Generic { - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - ts?: boolean; - v?: boolean; -} - -export interface CatHelp extends Generic { - help?: boolean; - s?: string | string[]; -} - -export interface CatIndices extends Generic { - index?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - master_timeout?: string; - h?: string | string[]; - health?: 'green' | 'yellow' | 'red'; - help?: boolean; - pri?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; - include_unloaded_segments?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface CatMaster extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatMlDataFrameAnalytics extends Generic { - id?: string; - allow_no_match?: boolean; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatMlDatafeeds extends Generic { - datafeed_id?: string; - allow_no_match?: boolean; - allow_no_datafeeds?: boolean; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatMlJobs extends Generic { - job_id?: string; - allow_no_match?: boolean; - allow_no_jobs?: boolean; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatMlTrainedModels extends Generic { - model_id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatNodeattrs extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatNodes extends Generic { - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - full_id?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; - include_unloaded_segments?: boolean; -} - -export interface CatPendingTasks extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatPlugins extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - include_bootstrap?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatRecovery extends Generic { - index?: string | string[]; - format?: string; - active_only?: boolean; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - detailed?: boolean; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatRepositories extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatSegments extends Generic { - index?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatShards extends Generic { - index?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatSnapshots extends Generic { - repository?: string | string[]; - format?: string; - ignore_unavailable?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatTasks extends Generic { - format?: string; - nodes?: string | string[]; - actions?: string | string[]; - detailed?: boolean; - parent_task_id?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatTemplates extends Generic { - name?: string; - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatThreadPool extends Generic { - thread_pool_patterns?: string | string[]; - format?: string; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatTransforms extends Generic { - transform_id?: string; - from?: number; - size?: number; - allow_no_match?: boolean; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CcrDeleteAutoFollowPattern extends Generic { - name: string; -} - -export interface CcrFollow extends Generic { - index: string; - wait_for_active_shards?: string; - body: T; -} - -export interface CcrFollowInfo extends Generic { - index: string | string[]; -} - -export interface CcrFollowStats extends Generic { - index: string | string[]; -} - -export interface CcrForgetFollower extends Generic { - index: string; - body: T; -} - -export interface CcrGetAutoFollowPattern extends Generic { - name?: string; -} - -export interface CcrPauseAutoFollowPattern extends Generic { - name: string; -} - -export interface CcrPauseFollow extends Generic { - index: string; -} - -export interface CcrPutAutoFollowPattern extends Generic { - name: string; - body: T; -} - -export interface CcrResumeAutoFollowPattern extends Generic { - name: string; -} - -export interface CcrResumeFollow extends Generic { - index: string; - body?: T; -} - -export interface CcrStats extends Generic { -} - -export interface CcrUnfollow extends Generic { - index: string; -} - -export interface ClearScroll extends Generic { - scroll_id?: string | string[]; - body?: T; -} - -export interface ClosePointInTime extends Generic { - body?: T; -} - -export interface ClusterAllocationExplain extends Generic { - include_yes_decisions?: boolean; - include_disk_info?: boolean; - body?: T; -} - -export interface ClusterDeleteComponentTemplate extends Generic { - name: string; - timeout?: string; - master_timeout?: string; -} - -export interface ClusterDeleteVotingConfigExclusions extends Generic { - wait_for_removal?: boolean; -} - -export interface ClusterExistsComponentTemplate extends Generic { - name: string; - master_timeout?: string; - local?: boolean; -} - -export interface ClusterGetComponentTemplate extends Generic { - name?: string | string[]; - master_timeout?: string; - local?: boolean; -} - -export interface ClusterGetSettings extends Generic { - flat_settings?: boolean; - master_timeout?: string; - timeout?: string; - include_defaults?: boolean; -} - -export interface ClusterHealth extends Generic { - index?: string | string[]; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - level?: 'cluster' | 'indices' | 'shards'; - local?: boolean; - master_timeout?: string; - timeout?: string; - wait_for_active_shards?: string; - wait_for_nodes?: string; - wait_for_events?: 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid'; - wait_for_no_relocating_shards?: boolean; - wait_for_no_initializing_shards?: boolean; - wait_for_status?: 'green' | 'yellow' | 'red'; -} - -export interface ClusterPendingTasks extends Generic { - local?: boolean; - master_timeout?: string; -} - -export interface ClusterPostVotingConfigExclusions extends Generic { - node_ids?: string; - node_names?: string; - timeout?: string; -} - -export interface ClusterPutComponentTemplate extends Generic { - name: string; - create?: boolean; - timeout?: string; - master_timeout?: string; - body: T; -} - -export interface ClusterPutSettings extends Generic { - flat_settings?: boolean; - master_timeout?: string; - timeout?: string; - body: T; -} - -export interface ClusterRemoteInfo extends Generic { -} - -export interface ClusterReroute extends Generic { - dry_run?: boolean; - explain?: boolean; - retry_failed?: boolean; - metric?: string | string[]; - master_timeout?: string; - timeout?: string; - body?: T; -} - -export interface ClusterState extends Generic { - index?: string | string[]; - metric?: string | string[]; - local?: boolean; - master_timeout?: string; - flat_settings?: boolean; - wait_for_metadata_version?: number; - wait_for_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface ClusterStats extends Generic { - node_id?: string | string[]; - flat_settings?: boolean; - timeout?: string; -} - -export interface Count extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - min_score?: number; - preference?: string; - routing?: string | string[]; - q?: string; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - lenient?: boolean; - terminate_after?: number; - body?: T; -} - -export interface Create extends Generic { - id: string; - index: string; - type?: string; - wait_for_active_shards?: string; - refresh?: 'wait_for' | boolean; - routing?: string; - timeout?: string; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - pipeline?: string; - body: T; -} - -export interface DanglingIndicesDeleteDanglingIndex extends Generic { - index_uuid: string; - accept_data_loss?: boolean; - timeout?: string; - master_timeout?: string; -} - -export interface DanglingIndicesImportDanglingIndex extends Generic { - index_uuid: string; - accept_data_loss?: boolean; - timeout?: string; - master_timeout?: string; -} - -export interface DanglingIndicesListDanglingIndices extends Generic { -} - -export interface Delete extends Generic { - id: string; - index: string; - type?: string; - wait_for_active_shards?: string; - refresh?: 'wait_for' | boolean; - routing?: string; - timeout?: string; - if_seq_no?: number; - if_primary_term?: number; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface DeleteByQuery extends Generic { - index: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - from?: number; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - conflicts?: 'abort' | 'proceed'; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - search_timeout?: string; - max_docs?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - version?: boolean; - request_cache?: boolean; - refresh?: boolean; - timeout?: string; - wait_for_active_shards?: string; - scroll_size?: number; - wait_for_completion?: boolean; - requests_per_second?: number; - slices?: number|string; - body: T; -} - -export interface DeleteByQueryRethrottle extends Generic { - task_id: string; - requests_per_second: number; -} - -export interface DeleteScript extends Generic { - id: string; - timeout?: string; - master_timeout?: string; -} - -export interface EnrichDeletePolicy extends Generic { - name: string; -} - -export interface EnrichExecutePolicy extends Generic { - name: string; - wait_for_completion?: boolean; -} - -export interface EnrichGetPolicy extends Generic { - name?: string | string[]; -} - -export interface EnrichPutPolicy extends Generic { - name: string; - body: T; -} - -export interface EnrichStats extends Generic { -} - -export interface EqlDelete extends Generic { - id: string; -} - -export interface EqlGet extends Generic { - id: string; - wait_for_completion_timeout?: string; - keep_alive?: string; -} - -export interface EqlGetStatus extends Generic { - id: string; -} - -export interface EqlSearch extends Generic { - index: string; - wait_for_completion_timeout?: string; - keep_on_completion?: boolean; - keep_alive?: string; - body: T; -} - -export interface Exists extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - stored_fields?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface ExistsSource extends Generic { - id: string; - index: string; - type?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface Explain extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyze_wildcard?: boolean; - analyzer?: string; - default_operator?: 'AND' | 'OR'; - df?: string; - stored_fields?: string | string[]; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - body?: T; -} - -export interface FeaturesGetFeatures extends Generic { - master_timeout?: string; -} - -export interface FeaturesResetFeatures extends Generic { -} - -export interface FieldCaps extends Generic { - index?: string | string[]; - fields?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - include_unmapped?: boolean; - body?: T; -} - -export interface FleetGlobalCheckpoints extends Generic { - index: string; - wait_for_advance?: boolean; - wait_for_index?: boolean; - checkpoints?: string | string[]; - timeout?: string; -} - -export interface Get extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - stored_fields?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface GetScript extends Generic { - id: string; - master_timeout?: string; -} - -export interface GetScriptContext extends Generic { -} - -export interface GetScriptLanguages extends Generic { -} - -export interface GetSource extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface GraphExplore extends Generic { - index: string | string[]; - routing?: string; - timeout?: string; - body?: T; -} - -export interface IlmDeleteLifecycle extends Generic { - policy: string; -} - -export interface IlmExplainLifecycle extends Generic { - index: string; - only_managed?: boolean; - only_errors?: boolean; -} - -export interface IlmGetLifecycle extends Generic { - policy?: string; -} - -export interface IlmGetStatus extends Generic { -} - -export interface IlmMigrateToDataTiers extends Generic { - dry_run?: boolean; - body?: T; -} - -export interface IlmMoveToStep extends Generic { - index: string; - body?: T; -} - -export interface IlmPutLifecycle extends Generic { - policy: string; - body?: T; -} - -export interface IlmRemovePolicy extends Generic { - index: string; -} - -export interface IlmRetry extends Generic { - index: string; -} - -export interface IlmStart extends Generic { -} - -export interface IlmStop extends Generic { -} - -export interface Index extends Generic { - id?: string; - index: string; - wait_for_active_shards?: string; - op_type?: 'index' | 'create'; - refresh?: 'wait_for' | boolean; - routing?: string; - timeout?: string; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - if_seq_no?: number; - if_primary_term?: number; - pipeline?: string; - require_alias?: boolean; - body: T; -} - -export interface IndicesAddBlock extends Generic { - index: string | string[]; - block: string; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesAnalyze extends Generic { - index?: string; - body?: T; -} - -export interface IndicesClearCache extends Generic { - index?: string | string[]; - fielddata?: boolean; - fields?: string | string[]; - query?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - request?: boolean; -} - -export interface IndicesClone extends Generic { - index: string; - target: string; - timeout?: string; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesClose extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface IndicesCreate extends Generic { - index: string; - wait_for_active_shards?: string; - timeout?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesCreateDataStream extends Generic { - name: string; -} - -export interface IndicesDataStreamsStats extends Generic { - name?: string | string[]; -} - -export interface IndicesDelete extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesDeleteAlias extends Generic { - index: string | string[]; - name: string | string[]; - timeout?: string; - master_timeout?: string; -} - -export interface IndicesDeleteDataStream extends Generic { - name: string | string[]; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesDeleteIndexTemplate extends Generic { - name: string; - timeout?: string; - master_timeout?: string; -} - -export interface IndicesDeleteTemplate extends Generic { - name: string; - timeout?: string; - master_timeout?: string; -} - -export interface IndicesDiskUsage extends Generic { - index: string; - run_expensive_tasks?: boolean; - flush?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesExists extends Generic { - index: string | string[]; - local?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - include_defaults?: boolean; -} - -export interface IndicesExistsAlias extends Generic { - name: string | string[]; - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesExistsIndexTemplate extends Generic { - name: string; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesExistsTemplate extends Generic { - name: string | string[]; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesExistsType extends Generic { - index: string | string[]; - type: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesFieldUsageStats extends Generic { - index: string; - fields?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesFlush extends Generic { - index?: string | string[]; - force?: boolean; - wait_if_ongoing?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesForcemerge extends Generic { - index?: string | string[]; - flush?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - max_num_segments?: number; - only_expunge_deletes?: boolean; -} - -export interface IndicesFreeze extends Generic { - index: string; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface IndicesGet extends Generic { - index: string | string[]; - local?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - include_defaults?: boolean; - master_timeout?: string; -} - -export interface IndicesGetAlias extends Generic { - name?: string | string[]; - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesGetDataStream extends Generic { - name?: string | string[]; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesGetFieldMapping extends Generic { - fields: string | string[]; - index?: string | string[]; - include_defaults?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesGetIndexTemplate extends Generic { - name?: string | string[]; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesGetMapping extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesGetSettings extends Generic { - index?: string | string[]; - name?: string | string[]; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - local?: boolean; - include_defaults?: boolean; -} - -export interface IndicesGetTemplate extends Generic { - name?: string | string[]; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesMigrateToDataStream extends Generic { - name: string; -} - -export interface IndicesOpen extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface IndicesPromoteDataStream extends Generic { - name: string; -} - -export interface IndicesPutAlias extends Generic { - index: string | string[]; - name: string; - timeout?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesPutIndexTemplate extends Generic { - name: string; - create?: boolean; - cause?: string; - master_timeout?: string; - body: T; -} - -export interface IndicesPutMapping extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - write_index_only?: boolean; - body: T; -} - -export interface IndicesPutSettings extends Generic { - index?: string | string[]; - master_timeout?: string; - timeout?: string; - preserve_existing?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - body: T; -} - -export interface IndicesPutTemplate extends Generic { - name: string; - order?: number; - create?: boolean; - master_timeout?: string; - body: T; -} - -export interface IndicesRecovery extends Generic { - index?: string | string[]; - detailed?: boolean; - active_only?: boolean; -} - -export interface IndicesRefresh extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesReloadSearchAnalyzers extends Generic { - index: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesResolveIndex extends Generic { - name: string | string[]; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesRollover extends Generic { - alias: string; - new_index?: string; - timeout?: string; - dry_run?: boolean; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesSegments extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - verbose?: boolean; -} - -export interface IndicesShardStores extends Generic { - index?: string | string[]; - status?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesShrink extends Generic { - index: string; - target: string; - timeout?: string; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesSimulateIndexTemplate extends Generic { - name: string; - create?: boolean; - cause?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesSimulateTemplate extends Generic { - name?: string; - create?: boolean; - cause?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesSplit extends Generic { - index: string; - target: string; - timeout?: string; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesStats extends Generic { - metric?: string | string[]; - index?: string | string[]; - completion_fields?: string | string[]; - fielddata_fields?: string | string[]; - fields?: string | string[]; - groups?: string | string[]; - level?: 'cluster' | 'indices' | 'shards'; - types?: string | string[]; - include_segment_file_sizes?: boolean; - include_unloaded_segments?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - forbid_closed_indices?: boolean; -} - -export interface IndicesUnfreeze extends Generic { - index: string; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface IndicesUpdateAliases extends Generic { - timeout?: string; - master_timeout?: string; - body: T; -} - -export interface IndicesValidateQuery extends Generic { - index?: string | string[]; - type?: string | string[]; - explain?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - q?: string; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - lenient?: boolean; - rewrite?: boolean; - all_shards?: boolean; - body?: T; -} - -export interface Info extends Generic { -} - -export interface IngestDeletePipeline extends Generic { - id: string; - master_timeout?: string; - timeout?: string; -} - -export interface IngestGeoIpStats extends Generic { -} - -export interface IngestGetPipeline extends Generic { - id?: string; - summary?: boolean; - master_timeout?: string; -} - -export interface IngestProcessorGrok extends Generic { -} - -export interface IngestPutPipeline extends Generic { - id: string; - master_timeout?: string; - timeout?: string; - body: T; -} - -export interface IngestSimulate extends Generic { - id?: string; - verbose?: boolean; - body: T; -} - -export interface LicenseDelete extends Generic { -} - -export interface LicenseGet extends Generic { - local?: boolean; - accept_enterprise?: boolean; -} - -export interface LicenseGetBasicStatus extends Generic { -} - -export interface LicenseGetTrialStatus extends Generic { -} - -export interface LicensePost extends Generic { - acknowledge?: boolean; - body?: T; -} - -export interface LicensePostStartBasic extends Generic { - acknowledge?: boolean; -} - -export interface LicensePostStartTrial extends Generic { - type?: string; - acknowledge?: boolean; -} - -export interface LogstashDeletePipeline extends Generic { - id: string; -} - -export interface LogstashGetPipeline extends Generic { - id: string; -} - -export interface LogstashPutPipeline extends Generic { - id: string; - body: T; -} - -export interface Mget extends Generic { - index?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - stored_fields?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - body: T; -} - -export interface MigrationDeprecations extends Generic { - index?: string; -} - -export interface MlCloseJob extends Generic { - job_id: string; - allow_no_match?: boolean; - allow_no_jobs?: boolean; - force?: boolean; - timeout?: string; - body?: T; -} - -export interface MlDeleteCalendar extends Generic { - calendar_id: string; -} - -export interface MlDeleteCalendarEvent extends Generic { - calendar_id: string; - event_id: string; -} - -export interface MlDeleteCalendarJob extends Generic { - calendar_id: string; - job_id: string; -} - -export interface MlDeleteDataFrameAnalytics extends Generic { - id: string; - force?: boolean; - timeout?: string; -} - -export interface MlDeleteDatafeed extends Generic { - datafeed_id: string; - force?: boolean; -} - -export interface MlDeleteExpiredData extends Generic { - job_id?: string; - requests_per_second?: number; - timeout?: string; - body?: T; -} - -export interface MlDeleteFilter extends Generic { - filter_id: string; -} - -export interface MlDeleteForecast extends Generic { - job_id: string; - forecast_id?: string; - allow_no_forecasts?: boolean; - timeout?: string; -} - -export interface MlDeleteJob extends Generic { - job_id: string; - force?: boolean; - wait_for_completion?: boolean; -} - -export interface MlDeleteModelSnapshot extends Generic { - job_id: string; - snapshot_id: string; -} - -export interface MlDeleteTrainedModel extends Generic { - model_id: string; -} - -export interface MlDeleteTrainedModelAlias extends Generic { - model_alias: string; - model_id: string; -} - -export interface MlEstimateModelMemory extends Generic { - body: T; -} - -export interface MlEvaluateDataFrame extends Generic { - body: T; -} - -export interface MlExplainDataFrameAnalytics extends Generic { - id?: string; - body?: T; -} - -export interface MlFlushJob extends Generic { - job_id: string; - calc_interim?: boolean; - start?: string; - end?: string; - advance_time?: string; - skip_time?: string; - body?: T; -} - -export interface MlForecast extends Generic { - job_id: string; - duration?: string; - expires_in?: string; - max_model_memory?: string; -} - -export interface MlGetBuckets extends Generic { - job_id: string; - timestamp?: string; - expand?: boolean; - exclude_interim?: boolean; - from?: number; - size?: number; - start?: string; - end?: string; - anomaly_score?: number; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetCalendarEvents extends Generic { - calendar_id: string; - job_id?: string; - start?: string; - end?: string; - from?: number; - size?: number; -} - -export interface MlGetCalendars extends Generic { - calendar_id?: string; - from?: number; - size?: number; - body?: T; -} - -export interface MlGetCategories extends Generic { - job_id: string; - category_id?: number; - from?: number; - size?: number; - partition_field_value?: string; - body?: T; -} - -export interface MlGetDataFrameAnalytics extends Generic { - id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; - exclude_generated?: boolean; -} - -export interface MlGetDataFrameAnalyticsStats extends Generic { - id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; - verbose?: boolean; -} - -export interface MlGetDatafeedStats extends Generic { - datafeed_id?: string; - allow_no_match?: boolean; - allow_no_datafeeds?: boolean; -} - -export interface MlGetDatafeeds extends Generic { - datafeed_id?: string; - allow_no_match?: boolean; - allow_no_datafeeds?: boolean; - exclude_generated?: boolean; -} - -export interface MlGetFilters extends Generic { - filter_id?: string; - from?: number; - size?: number; -} - -export interface MlGetInfluencers extends Generic { - job_id: string; - exclude_interim?: boolean; - from?: number; - size?: number; - start?: string; - end?: string; - influencer_score?: number; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetJobStats extends Generic { - job_id?: string; - allow_no_match?: boolean; - allow_no_jobs?: boolean; -} - -export interface MlGetJobs extends Generic { - job_id?: string; - allow_no_match?: boolean; - allow_no_jobs?: boolean; - exclude_generated?: boolean; -} - -export interface MlGetModelSnapshots extends Generic { - job_id: string; - snapshot_id?: string; - from?: number; - size?: number; - start?: string; - end?: string; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetOverallBuckets extends Generic { - job_id: string; - top_n?: number; - bucket_span?: string; - overall_score?: number; - exclude_interim?: boolean; - start?: string; - end?: string; - allow_no_match?: boolean; - allow_no_jobs?: boolean; - body?: T; -} - -export interface MlGetRecords extends Generic { - job_id: string; - exclude_interim?: boolean; - from?: number; - size?: number; - start?: string; - end?: string; - record_score?: number; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetTrainedModels extends Generic { - model_id?: string; - allow_no_match?: boolean; - include?: string; - include_model_definition?: boolean; - decompress_definition?: boolean; - from?: number; - size?: number; - tags?: string | string[]; - exclude_generated?: boolean; -} - -export interface MlGetTrainedModelsStats extends Generic { - model_id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; -} - -export interface MlInferTrainedModelDeployment extends Generic { - model_id: string; - timeout?: string; - body: T; -} - -export interface MlInfo extends Generic { -} - -export interface MlOpenJob extends Generic { - job_id: string; -} - -export interface MlPostCalendarEvents extends Generic { - calendar_id: string; - body: T; -} - -export interface MlPostData extends Generic { - job_id: string; - reset_start?: string; - reset_end?: string; - body: T; -} - -export interface MlPreviewDataFrameAnalytics extends Generic { - id?: string; - body?: T; -} - -export interface MlPreviewDatafeed extends Generic { - datafeed_id?: string; - body?: T; -} - -export interface MlPutCalendar extends Generic { - calendar_id: string; - body?: T; -} - -export interface MlPutCalendarJob extends Generic { - calendar_id: string; - job_id: string; -} - -export interface MlPutDataFrameAnalytics extends Generic { - id: string; - body: T; -} - -export interface MlPutDatafeed extends Generic { - datafeed_id: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - ignore_throttled?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - body: T; -} - -export interface MlPutFilter extends Generic { - filter_id: string; - body: T; -} - -export interface MlPutJob extends Generic { - job_id: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - ignore_throttled?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - body: T; -} - -export interface MlPutTrainedModel extends Generic { - model_id: string; - body: T; -} - -export interface MlPutTrainedModelAlias extends Generic { - model_alias: string; - model_id: string; - reassign?: boolean; -} - -export interface MlResetJob extends Generic { - job_id: string; - wait_for_completion?: boolean; -} - -export interface MlRevertModelSnapshot extends Generic { - job_id: string; - snapshot_id: string; - delete_intervening_results?: boolean; - body?: T; -} - -export interface MlSetUpgradeMode extends Generic { - enabled?: boolean; - timeout?: string; -} - -export interface MlStartDataFrameAnalytics extends Generic { - id: string; - timeout?: string; - body?: T; -} - -export interface MlStartDatafeed extends Generic { - datafeed_id: string; - start?: string; - end?: string; - timeout?: string; - body?: T; -} - -export interface MlStartTrainedModelDeployment extends Generic { - model_id: string; - timeout?: string; -} - -export interface MlStopDataFrameAnalytics extends Generic { - id: string; - allow_no_match?: boolean; - force?: boolean; - timeout?: string; - body?: T; -} - -export interface MlStopDatafeed extends Generic { - datafeed_id: string; - allow_no_match?: boolean; - allow_no_datafeeds?: boolean; - force?: boolean; - timeout?: string; - body?: T; -} - -export interface MlStopTrainedModelDeployment extends Generic { - model_id: string; -} - -export interface MlUpdateDataFrameAnalytics extends Generic { - id: string; - body: T; -} - -export interface MlUpdateDatafeed extends Generic { - datafeed_id: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - ignore_throttled?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - body: T; -} - -export interface MlUpdateFilter extends Generic { - filter_id: string; - body: T; -} - -export interface MlUpdateJob extends Generic { - job_id: string; - body: T; -} - -export interface MlUpdateModelSnapshot extends Generic { - job_id: string; - snapshot_id: string; - body: T; -} - -export interface MlUpgradeJobSnapshot extends Generic { - job_id: string; - snapshot_id: string; - timeout?: string; - wait_for_completion?: boolean; -} - -export interface MlValidate extends Generic { - body: T; -} - -export interface MlValidateDetector extends Generic { - body: T; -} - -export interface MonitoringBulk extends Generic { - type?: string; - system_id?: string; - system_api_version?: string; - interval?: string; - body: T; -} - -export interface Msearch extends Generic { - index?: string | string[]; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - max_concurrent_searches?: number; - typed_keys?: boolean; - pre_filter_shard_size?: number; - max_concurrent_shard_requests?: number; - rest_total_hits_as_int?: boolean; - ccs_minimize_roundtrips?: boolean; - body: T; -} - -export interface MsearchTemplate extends Generic { - index?: string | string[]; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - typed_keys?: boolean; - max_concurrent_searches?: number; - rest_total_hits_as_int?: boolean; - ccs_minimize_roundtrips?: boolean; - body: T; -} - -export interface Mtermvectors extends Generic { - index?: string; - ids?: string | string[]; - term_statistics?: boolean; - field_statistics?: boolean; - fields?: string | string[]; - offsets?: boolean; - positions?: boolean; - payloads?: boolean; - preference?: string; - routing?: string; - realtime?: boolean; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - body?: T; -} - -export interface NodesClearMeteringArchive extends Generic { - node_id: string | string[]; - max_archive_version: number; -} - -export interface NodesGetMeteringInfo extends Generic { - node_id: string | string[]; -} - -export interface NodesHotThreads extends Generic { - node_id?: string | string[]; - interval?: string; - snapshots?: number; - threads?: number; - ignore_idle_threads?: boolean; - type?: 'cpu' | 'wait' | 'block'; - timeout?: string; -} - -export interface NodesInfo extends Generic { - node_id?: string | string[]; - metric?: string | string[]; - flat_settings?: boolean; - timeout?: string; -} - -export interface NodesReloadSecureSettings extends Generic { - node_id?: string | string[]; - timeout?: string; - body?: T; -} - -export interface NodesStats extends Generic { - node_id?: string | string[]; - metric?: string | string[]; - index_metric?: string | string[]; - completion_fields?: string | string[]; - fielddata_fields?: string | string[]; - fields?: string | string[]; - groups?: boolean; - level?: 'indices' | 'node' | 'shards'; - types?: string | string[]; - timeout?: string; - include_segment_file_sizes?: boolean; - include_unloaded_segments?: boolean; -} - -export interface NodesUsage extends Generic { - node_id?: string | string[]; - metric?: string | string[]; - timeout?: string; -} - -export interface OpenPointInTime extends Generic { - index?: string | string[]; - preference?: string; - routing?: string; - ignore_unavailable?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - keep_alive?: string; -} - -export interface Ping extends Generic { -} - -export interface PutScript extends Generic { - id: string; - context?: string; - timeout?: string; - master_timeout?: string; - body: T; -} - -export interface RankEval extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - body: T; -} - -export interface Reindex extends Generic { - refresh?: boolean; - timeout?: string; - wait_for_active_shards?: string; - wait_for_completion?: boolean; - requests_per_second?: number; - scroll?: string; - slices?: number|string; - max_docs?: number; - body: T; -} - -export interface ReindexRethrottle extends Generic { - task_id: string; - requests_per_second: number; -} - -export interface RenderSearchTemplate extends Generic { - id?: string; - body?: T; -} - -export interface RollupDeleteJob extends Generic { - id: string; -} - -export interface RollupGetJobs extends Generic { - id?: string; -} - -export interface RollupGetRollupCaps extends Generic { - id?: string; -} - -export interface RollupGetRollupIndexCaps extends Generic { - index: string; -} - -export interface RollupPutJob extends Generic { - id: string; - body: T; -} - -export interface RollupRollup extends Generic { - index: string; - rollup_index: string; - body: T; -} - -export interface RollupRollupSearch extends Generic { - index: string | string[]; - type?: string; - typed_keys?: boolean; - rest_total_hits_as_int?: boolean; - body: T; -} - -export interface RollupStartJob extends Generic { - id: string; -} - -export interface RollupStopJob extends Generic { - id: string; - wait_for_completion?: boolean; - timeout?: string; -} - -export interface ScriptsPainlessExecute extends Generic { - body?: T; -} - -export interface Scroll extends Generic { - scroll_id?: string; - scroll?: string; - rest_total_hits_as_int?: boolean; - body?: T; -} - -export interface Search extends Generic { - index?: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyzer?: string; - analyze_wildcard?: boolean; - ccs_minimize_roundtrips?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - explain?: boolean; - stored_fields?: string | string[]; - docvalue_fields?: string | string[]; - from?: number; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - size?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - suggest_field?: string; - suggest_mode?: 'missing' | 'popular' | 'always'; - suggest_size?: number; - suggest_text?: string; - timeout?: string; - track_scores?: boolean; - track_total_hits?: boolean | number; - allow_partial_search_results?: boolean; - typed_keys?: boolean; - version?: boolean; - seq_no_primary_term?: boolean; - request_cache?: boolean; - batched_reduce_size?: number; - max_concurrent_shard_requests?: number; - pre_filter_shard_size?: number; - rest_total_hits_as_int?: boolean; - min_compatible_shard_node?: string; - body?: T; -} - -export interface SearchMvt extends Generic { - index: string | string[]; - field: string; - zoom: number; - x: number; - y: number; - exact_bounds?: boolean; - extent?: number; - grid_precision?: number; - grid_type?: 'grid' | 'point'; - size?: number; - body?: T; -} - -export interface SearchShards extends Generic { - index?: string | string[]; - preference?: string; - routing?: string; - local?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface SearchTemplate extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - preference?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - explain?: boolean; - profile?: boolean; - typed_keys?: boolean; - rest_total_hits_as_int?: boolean; - ccs_minimize_roundtrips?: boolean; - body: T; -} - -export interface SearchableSnapshotsCacheStats extends Generic { - node_id?: string | string[]; -} - -export interface SearchableSnapshotsClearCache extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'none' | 'all'; -} - -export interface SearchableSnapshotsMount extends Generic { - repository: string; - snapshot: string; - master_timeout?: string; - wait_for_completion?: boolean; - storage?: string; - body: T; -} - -export interface SearchableSnapshotsStats extends Generic { - index?: string | string[]; - level?: 'cluster' | 'indices' | 'shards'; -} - -export interface SecurityAuthenticate extends Generic { -} - -export interface SecurityChangePassword extends Generic { - username?: string; - refresh?: 'wait_for' | boolean; - body: T; -} - -export interface SecurityClearApiKeyCache extends Generic { - ids: string | string[]; -} - -export interface SecurityClearCachedPrivileges extends Generic { - application: string | string[]; -} - -export interface SecurityClearCachedRealms extends Generic { - realms: string | string[]; - usernames?: string | string[]; -} - -export interface SecurityClearCachedRoles extends Generic { - name: string | string[]; -} - -export interface SecurityClearCachedServiceTokens extends Generic { - namespace: string; - service: string; - name: string | string[]; -} - -export interface SecurityCreateApiKey extends Generic { - refresh?: 'wait_for' | boolean; - body: T; -} - -export interface SecurityCreateServiceToken extends Generic { - namespace: string; - service: string; - name?: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityDeletePrivileges extends Generic { - application: string; - name: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityDeleteRole extends Generic { - name: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityDeleteRoleMapping extends Generic { - name: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityDeleteServiceToken extends Generic { - namespace: string; - service: string; - name: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityDeleteUser extends Generic { - username: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityDisableUser extends Generic { - username: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityEnableUser extends Generic { - username: string; - refresh?: 'wait_for' | boolean; -} - -export interface SecurityEnrollKibana extends Generic { -} - -export interface SecurityEnrollNode extends Generic { -} - -export interface SecurityGetApiKey extends Generic { - id?: string; - name?: string; - username?: string; - realm_name?: string; - owner?: boolean; -} - -export interface SecurityGetBuiltinPrivileges extends Generic { -} - -export interface SecurityGetPrivileges extends Generic { - application?: string; - name?: string; -} - -export interface SecurityGetRole extends Generic { - name?: string | string[]; -} - -export interface SecurityGetRoleMapping extends Generic { - name?: string | string[]; -} - -export interface SecurityGetServiceAccounts extends Generic { - namespace?: string; - service?: string; -} - -export interface SecurityGetServiceCredentials extends Generic { - namespace: string; - service: string; -} - -export interface SecurityGetToken extends Generic { - body: T; -} - -export interface SecurityGetUser extends Generic { - username?: string | string[]; -} - -export interface SecurityGetUserPrivileges extends Generic { -} - -export interface SecurityGrantApiKey extends Generic { - refresh?: 'wait_for' | boolean; - body: T; -} - -export interface SecurityHasPrivileges extends Generic { - user?: string; - body: T; -} - -export interface SecurityInvalidateApiKey extends Generic { - body: T; -} - -export interface SecurityInvalidateToken extends Generic { - body: T; -} - -export interface SecurityPutPrivileges extends Generic { - refresh?: 'wait_for' | boolean; - body: T; -} - -export interface SecurityPutRole extends Generic { - name: string; - refresh?: 'wait_for' | boolean; - body: T; -} - -export interface SecurityPutRoleMapping extends Generic { - name: string; - refresh?: 'wait_for' | boolean; - body: T; -} - -export interface SecurityPutUser extends Generic { - username: string; - refresh?: 'wait_for' | boolean; - body: T; -} - -export interface SecurityQueryApiKeys extends Generic { - body?: T; -} - -export interface SecuritySamlAuthenticate extends Generic { - body: T; -} - -export interface SecuritySamlCompleteLogout extends Generic { - body: T; -} - -export interface SecuritySamlInvalidate extends Generic { - body: T; -} - -export interface SecuritySamlLogout extends Generic { - body: T; -} - -export interface SecuritySamlPrepareAuthentication extends Generic { - body: T; -} - -export interface SecuritySamlServiceProviderMetadata extends Generic { - realm_name: string; -} - -export interface ShutdownDeleteNode extends Generic { - node_id: string; -} - -export interface ShutdownGetNode extends Generic { - node_id?: string; -} - -export interface ShutdownPutNode extends Generic { - node_id: string; - body: T; -} - -export interface SlmDeleteLifecycle extends Generic { - policy_id: string; -} - -export interface SlmExecuteLifecycle extends Generic { - policy_id: string; -} - -export interface SlmExecuteRetention extends Generic { -} - -export interface SlmGetLifecycle extends Generic { - policy_id?: string | string[]; -} - -export interface SlmGetStats extends Generic { -} - -export interface SlmGetStatus extends Generic { -} - -export interface SlmPutLifecycle extends Generic { - policy_id: string; - body?: T; -} - -export interface SlmStart extends Generic { -} - -export interface SlmStop extends Generic { -} - -export interface SnapshotCleanupRepository extends Generic { - repository: string; - master_timeout?: string; - timeout?: string; -} - -export interface SnapshotClone extends Generic { - repository: string; - snapshot: string; - target_snapshot: string; - master_timeout?: string; - body: T; -} - -export interface SnapshotCreate extends Generic { - repository: string; - snapshot: string; - master_timeout?: string; - wait_for_completion?: boolean; - body?: T; -} - -export interface SnapshotCreateRepository extends Generic { - repository: string; - master_timeout?: string; - timeout?: string; - verify?: boolean; - body: T; -} - -export interface SnapshotDelete extends Generic { - repository: string; - snapshot: string | string[]; - master_timeout?: string; -} - -export interface SnapshotDeleteRepository extends Generic { - repository: string | string[]; - master_timeout?: string; - timeout?: string; -} - -export interface SnapshotGet extends Generic { - repository: string; - snapshot: string | string[]; - master_timeout?: string; - ignore_unavailable?: boolean; - index_details?: boolean; - include_repository?: boolean; - verbose?: boolean; -} - -export interface SnapshotGetRepository extends Generic { - repository?: string | string[]; - master_timeout?: string; - local?: boolean; -} - -export interface SnapshotRepositoryAnalyze extends Generic { - repository: string; - blob_count?: number; - concurrency?: number; - read_node_count?: number; - early_read_node_count?: number; - seed?: number; - rare_action_probability?: number; - max_blob_size?: string; - max_total_data_size?: string; - timeout?: string; - detailed?: boolean; - rarely_abort_writes?: boolean; -} - -export interface SnapshotRestore extends Generic { - repository: string; - snapshot: string; - master_timeout?: string; - wait_for_completion?: boolean; - body?: T; -} - -export interface SnapshotStatus extends Generic { - repository?: string; - snapshot?: string | string[]; - master_timeout?: string; - ignore_unavailable?: boolean; -} - -export interface SnapshotVerifyRepository extends Generic { - repository: string; - master_timeout?: string; - timeout?: string; -} - -export interface SqlClearCursor extends Generic { - body: T; -} - -export interface SqlDeleteAsync extends Generic { - id: string; -} - -export interface SqlGetAsync extends Generic { - id: string; - delimiter?: string; - format?: string; - keep_alive?: string; - wait_for_completion_timeout?: string; -} - -export interface SqlGetAsyncStatus extends Generic { - id: string; -} - -export interface SqlQuery extends Generic { - format?: string; - body: T; -} - -export interface SqlTranslate extends Generic { - body: T; -} - -export interface SslCertificates extends Generic { -} - -export interface TasksCancel extends Generic { - task_id?: string; - nodes?: string | string[]; - actions?: string | string[]; - parent_task_id?: string; - wait_for_completion?: boolean; -} - -export interface TasksGet extends Generic { - task_id: string; - wait_for_completion?: boolean; - timeout?: string; -} - -export interface TasksList extends Generic { - nodes?: string | string[]; - actions?: string | string[]; - detailed?: boolean; - parent_task_id?: string; - wait_for_completion?: boolean; - group_by?: 'nodes' | 'parents' | 'none'; - timeout?: string; -} - -export interface TermsEnum extends Generic { - index: string | string[]; - body?: T; -} - -export interface Termvectors extends Generic { - index: string; - id?: string; - term_statistics?: boolean; - field_statistics?: boolean; - fields?: string | string[]; - offsets?: boolean; - positions?: boolean; - payloads?: boolean; - preference?: string; - routing?: string; - realtime?: boolean; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - body?: T; -} - -export interface TextStructureFindStructure extends Generic { - lines_to_sample?: number; - line_merge_size_limit?: number; - timeout?: string; - charset?: string; - format?: 'ndjson' | 'xml' | 'delimited' | 'semi_structured_text'; - has_header_row?: boolean; - column_names?: string | string[]; - delimiter?: string; - quote?: string; - should_trim_fields?: boolean; - grok_pattern?: string; - timestamp_field?: string; - timestamp_format?: string; - explain?: boolean; - body: T; -} - -export interface TransformDeleteTransform extends Generic { - transform_id: string; - force?: boolean; -} - -export interface TransformGetTransform extends Generic { - transform_id?: string; - from?: number; - size?: number; - allow_no_match?: boolean; - exclude_generated?: boolean; -} - -export interface TransformGetTransformStats extends Generic { - transform_id: string; - from?: number; - size?: number; - allow_no_match?: boolean; -} - -export interface TransformPreviewTransform extends Generic { - body: T; -} - -export interface TransformPutTransform extends Generic { - transform_id: string; - defer_validation?: boolean; - body: T; -} - -export interface TransformStartTransform extends Generic { - transform_id: string; - timeout?: string; -} - -export interface TransformStopTransform extends Generic { - transform_id: string; - force?: boolean; - wait_for_completion?: boolean; - timeout?: string; - allow_no_match?: boolean; - wait_for_checkpoint?: boolean; -} - -export interface TransformUpdateTransform extends Generic { - transform_id: string; - defer_validation?: boolean; - body: T; -} - -export interface Update extends Generic { - id: string; - index: string; - type?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - wait_for_active_shards?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - lang?: string; - refresh?: 'wait_for' | boolean; - retry_on_conflict?: number; - routing?: string; - timeout?: string; - if_seq_no?: number; - if_primary_term?: number; - require_alias?: boolean; - body: T; -} - -export interface UpdateByQuery extends Generic { - index: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - from?: number; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - conflicts?: 'abort' | 'proceed'; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - pipeline?: string; - preference?: string; - q?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - search_timeout?: string; - max_docs?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - version?: boolean; - version_type?: boolean; - request_cache?: boolean; - refresh?: boolean; - timeout?: string; - wait_for_active_shards?: string; - scroll_size?: number; - wait_for_completion?: boolean; - requests_per_second?: number; - slices?: number|string; - body?: T; -} - -export interface UpdateByQueryRethrottle extends Generic { - task_id: string; - requests_per_second: number; -} - -export interface WatcherAckWatch extends Generic { - watch_id: string; - action_id?: string | string[]; -} - -export interface WatcherActivateWatch extends Generic { - watch_id: string; -} - -export interface WatcherDeactivateWatch extends Generic { - watch_id: string; -} - -export interface WatcherDeleteWatch extends Generic { - id: string; -} - -export interface WatcherExecuteWatch extends Generic { - id?: string; - debug?: boolean; - body?: T; -} - -export interface WatcherGetWatch extends Generic { - id: string; -} - -export interface WatcherPutWatch extends Generic { - id: string; - active?: boolean; - version?: number; - if_seq_no?: number; - if_primary_term?: number; - body?: T; -} - -export interface WatcherQueryWatches extends Generic { - body?: T; -} - -export interface WatcherStart extends Generic { -} - -export interface WatcherStats extends Generic { - metric?: string | string[]; - emit_stacktraces?: boolean; -} - -export interface WatcherStop extends Generic { -} - -export interface XpackInfo extends Generic { - categories?: string | string[]; - accept_enterprise?: boolean; -} - -export interface XpackUsage extends Generic { - master_timeout?: string; -} diff --git a/api/utils.js b/api/utils.js deleted file mode 100644 index bf18fc9fe..000000000 --- a/api/utils.js +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const result = { body: null, statusCode: null, headers: null, warnings: null } -const kConfigurationError = Symbol('configuration error') - -function handleError (err, callback) { - if (callback) { - process.nextTick(callback, err, result) - return { then: noop, catch: noop, abort: noop } - } - return Promise.reject(err) -} - -function snakeCaseKeys (acceptedQuerystring, snakeCase, querystring) { - const target = {} - const keys = Object.keys(querystring) - for (let i = 0, len = keys.length; i < len; i++) { - const key = keys[i] - target[snakeCase[key] || key] = querystring[key] - } - return target -} - -function normalizeArguments (params, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - return [params, options, callback] -} - -function noop () {} - -module.exports = { handleError, snakeCaseKeys, normalizeArguments, noop, kConfigurationError } diff --git a/index.d.ts b/index.d.ts index 1d7ccae41..a96aa2b92 100644 --- a/index.d.ts +++ b/index.d.ts @@ -17,2973 +17,7 @@ * under the License. */ -/// +import { errors } from '@elastic/transport' +import Client from './lib/Client' -import { ConnectionOptions as TlsConnectionOptions } from 'tls'; -import Transport, { - ApiError, - ApiResponse, - RequestEvent, - TransportRequestParams, - TransportRequestOptions, - nodeFilterFn, - nodeSelectorFn, - generateRequestIdFn, - TransportRequestCallback, - TransportRequestPromise, - RequestBody, - RequestNDBody, - Context -} from './lib/Transport'; -import { URL } from 'url'; -import Connection, { AgentOptions, agentFn } from './lib/Connection'; -import { - ConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - ResurrectEvent, - BasicAuth, - ApiKeyAuth, - BearerAuth -} from './lib/pool'; -import Serializer from './lib/Serializer'; -import Helpers from './lib/Helpers'; -import * as errors from './lib/errors'; -import * as estypes from './api/types' -import * as RequestParams from './api/requestParams' - -declare type callbackFn = (err: ApiError, result: ApiResponse) => void; - -// Extend API -interface ClientExtendsCallbackOptions { - ConfigurationError: errors.ConfigurationError, - makeRequest(params: TransportRequestParams, options?: TransportRequestOptions): Promise | void; - result: { - body: null, - statusCode: null, - headers: null, - warnings: null - } -} -declare type extendsCallback = (options: ClientExtendsCallbackOptions) => any; -// /Extend API - -interface NodeOptions { - url: URL; - id?: string; - agent?: AgentOptions; - ssl?: TlsConnectionOptions; - headers?: Record; - roles?: { - master: boolean; - data: boolean; - ingest: boolean; - ml: boolean; - } -} - -interface ClientOptions { - node?: string | string[] | NodeOptions | NodeOptions[]; - nodes?: string | string[] | NodeOptions | NodeOptions[]; - Connection?: typeof Connection; - ConnectionPool?: typeof ConnectionPool; - Transport?: typeof Transport; - Serializer?: typeof Serializer; - maxRetries?: number; - requestTimeout?: number; - pingTimeout?: number; - sniffInterval?: number | boolean; - sniffOnStart?: boolean; - sniffEndpoint?: string; - sniffOnConnectionFault?: boolean; - resurrectStrategy?: 'ping' | 'optimistic' | 'none'; - suggestCompression?: boolean; - compression?: 'gzip'; - ssl?: TlsConnectionOptions; - agent?: AgentOptions | agentFn | false; - nodeFilter?: nodeFilterFn; - nodeSelector?: nodeSelectorFn | string; - headers?: Record; - opaqueIdPrefix?: string; - generateRequestId?: generateRequestIdFn; - name?: string | symbol; - auth?: BasicAuth | ApiKeyAuth | BearerAuth; - context?: Context; - proxy?: string | URL; - enableMetaHeader?: boolean; - cloud?: { - id: string; - // TODO: remove username and password here in 8 - username?: string; - password?: string; - }; - disablePrototypePoisoningProtection?: boolean | 'proto' | 'constructor'; - caFingerprint?: string; - maxResponseSize?: number; - maxCompressedResponseSize?: number; -} - -declare class Client { - constructor(opts: ClientOptions); - connectionPool: ConnectionPool; - transport: Transport; - serializer: Serializer; - extend(method: string, fn: extendsCallback): void - extend(method: string, opts: { force: boolean }, fn: extendsCallback): void; - helpers: Helpers; - child(opts?: ClientOptions): Client; - close(callback: Function): void; - close(): Promise; - emit(event: string | symbol, ...args: any[]): boolean; - on(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - once(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - off(event: string | symbol, listener: (...args: any[]) => void): this; - /* GENERATED */ - async_search: { - delete, TContext = Context>(params?: RequestParams.AsyncSearchDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.AsyncSearchDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.AsyncSearchDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params?: RequestParams.AsyncSearchGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.AsyncSearchGet, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.AsyncSearchGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params?: RequestParams.AsyncSearchStatus, options?: TransportRequestOptions): TransportRequestPromise> - status, TContext = Context>(callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params: RequestParams.AsyncSearchStatus, callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params: RequestParams.AsyncSearchStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.AsyncSearchSubmit, options?: TransportRequestOptions): TransportRequestPromise> - submit, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AsyncSearchSubmit, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AsyncSearchSubmit, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - asyncSearch: { - delete, TContext = Context>(params?: RequestParams.AsyncSearchDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.AsyncSearchDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.AsyncSearchDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params?: RequestParams.AsyncSearchGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.AsyncSearchGet, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.AsyncSearchGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params?: RequestParams.AsyncSearchStatus, options?: TransportRequestOptions): TransportRequestPromise> - status, TContext = Context>(callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params: RequestParams.AsyncSearchStatus, callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params: RequestParams.AsyncSearchStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.AsyncSearchSubmit, options?: TransportRequestOptions): TransportRequestPromise> - submit, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AsyncSearchSubmit, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AsyncSearchSubmit, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - autoscaling: { - delete_autoscaling_policy, TContext = Context>(params?: RequestParams.AutoscalingDeleteAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - delete_autoscaling_policy, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_autoscaling_policy, TContext = Context>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - delete_autoscaling_policy, TContext = Context>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy, TContext = Context>(params?: RequestParams.AutoscalingDeleteAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - deleteAutoscalingPolicy, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy, TContext = Context>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy, TContext = Context>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_autoscaling_capacity, TContext = Context>(params?: RequestParams.AutoscalingGetAutoscalingCapacity, options?: TransportRequestOptions): TransportRequestPromise> - get_autoscaling_capacity, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_autoscaling_capacity, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingCapacity, callback: callbackFn): TransportRequestCallback - get_autoscaling_capacity, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingCapacity, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoscalingCapacity, TContext = Context>(params?: RequestParams.AutoscalingGetAutoscalingCapacity, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingCapacity, TContext = Context>(callback: callbackFn): TransportRequestCallback - getAutoscalingCapacity, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingCapacity, callback: callbackFn): TransportRequestCallback - getAutoscalingCapacity, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingCapacity, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_autoscaling_policy, TContext = Context>(params?: RequestParams.AutoscalingGetAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - get_autoscaling_policy, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_autoscaling_policy, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - get_autoscaling_policy, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy, TContext = Context>(params?: RequestParams.AutoscalingGetAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingPolicy, TContext = Context>(callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy, TContext = Context>(params: RequestParams.AutoscalingGetAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.AutoscalingPutAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AutoscalingPutAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AutoscalingPutAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.AutoscalingPutAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AutoscalingPutAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.AutoscalingPutAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.Bulk, options?: TransportRequestOptions): TransportRequestPromise> - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.Bulk, callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.Bulk, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cat: { - aliases, TContext = Context>(params?: RequestParams.CatAliases, options?: TransportRequestOptions): TransportRequestPromise> - aliases, TContext = Context>(callback: callbackFn): TransportRequestCallback - aliases, TContext = Context>(params: RequestParams.CatAliases, callback: callbackFn): TransportRequestCallback - aliases, TContext = Context>(params: RequestParams.CatAliases, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - allocation, TContext = Context>(params?: RequestParams.CatAllocation, options?: TransportRequestOptions): TransportRequestPromise> - allocation, TContext = Context>(callback: callbackFn): TransportRequestCallback - allocation, TContext = Context>(params: RequestParams.CatAllocation, callback: callbackFn): TransportRequestCallback - allocation, TContext = Context>(params: RequestParams.CatAllocation, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - count, TContext = Context>(params?: RequestParams.CatCount, options?: TransportRequestOptions): TransportRequestPromise> - count, TContext = Context>(callback: callbackFn): TransportRequestCallback - count, TContext = Context>(params: RequestParams.CatCount, callback: callbackFn): TransportRequestCallback - count, TContext = Context>(params: RequestParams.CatCount, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fielddata, TContext = Context>(params?: RequestParams.CatFielddata, options?: TransportRequestOptions): TransportRequestPromise> - fielddata, TContext = Context>(callback: callbackFn): TransportRequestCallback - fielddata, TContext = Context>(params: RequestParams.CatFielddata, callback: callbackFn): TransportRequestCallback - fielddata, TContext = Context>(params: RequestParams.CatFielddata, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - health, TContext = Context>(params?: RequestParams.CatHealth, options?: TransportRequestOptions): TransportRequestPromise> - health, TContext = Context>(callback: callbackFn): TransportRequestCallback - health, TContext = Context>(params: RequestParams.CatHealth, callback: callbackFn): TransportRequestCallback - health, TContext = Context>(params: RequestParams.CatHealth, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - help, TContext = Context>(params?: RequestParams.CatHelp, options?: TransportRequestOptions): TransportRequestPromise> - help, TContext = Context>(callback: callbackFn): TransportRequestCallback - help, TContext = Context>(params: RequestParams.CatHelp, callback: callbackFn): TransportRequestCallback - help, TContext = Context>(params: RequestParams.CatHelp, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - indices, TContext = Context>(params?: RequestParams.CatIndices, options?: TransportRequestOptions): TransportRequestPromise> - indices, TContext = Context>(callback: callbackFn): TransportRequestCallback - indices, TContext = Context>(params: RequestParams.CatIndices, callback: callbackFn): TransportRequestCallback - indices, TContext = Context>(params: RequestParams.CatIndices, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - master, TContext = Context>(params?: RequestParams.CatMaster, options?: TransportRequestOptions): TransportRequestPromise> - master, TContext = Context>(callback: callbackFn): TransportRequestCallback - master, TContext = Context>(params: RequestParams.CatMaster, callback: callbackFn): TransportRequestCallback - master, TContext = Context>(params: RequestParams.CatMaster, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_data_frame_analytics, TContext = Context>(params?: RequestParams.CatMlDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - ml_data_frame_analytics, TContext = Context>(callback: callbackFn): TransportRequestCallback - ml_data_frame_analytics, TContext = Context>(params: RequestParams.CatMlDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - ml_data_frame_analytics, TContext = Context>(params: RequestParams.CatMlDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics, TContext = Context>(params?: RequestParams.CatMlDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - mlDataFrameAnalytics, TContext = Context>(callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics, TContext = Context>(params: RequestParams.CatMlDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics, TContext = Context>(params: RequestParams.CatMlDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_datafeeds, TContext = Context>(params?: RequestParams.CatMlDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - ml_datafeeds, TContext = Context>(callback: callbackFn): TransportRequestCallback - ml_datafeeds, TContext = Context>(params: RequestParams.CatMlDatafeeds, callback: callbackFn): TransportRequestCallback - ml_datafeeds, TContext = Context>(params: RequestParams.CatMlDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlDatafeeds, TContext = Context>(params?: RequestParams.CatMlDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - mlDatafeeds, TContext = Context>(callback: callbackFn): TransportRequestCallback - mlDatafeeds, TContext = Context>(params: RequestParams.CatMlDatafeeds, callback: callbackFn): TransportRequestCallback - mlDatafeeds, TContext = Context>(params: RequestParams.CatMlDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_jobs, TContext = Context>(params?: RequestParams.CatMlJobs, options?: TransportRequestOptions): TransportRequestPromise> - ml_jobs, TContext = Context>(callback: callbackFn): TransportRequestCallback - ml_jobs, TContext = Context>(params: RequestParams.CatMlJobs, callback: callbackFn): TransportRequestCallback - ml_jobs, TContext = Context>(params: RequestParams.CatMlJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlJobs, TContext = Context>(params?: RequestParams.CatMlJobs, options?: TransportRequestOptions): TransportRequestPromise> - mlJobs, TContext = Context>(callback: callbackFn): TransportRequestCallback - mlJobs, TContext = Context>(params: RequestParams.CatMlJobs, callback: callbackFn): TransportRequestCallback - mlJobs, TContext = Context>(params: RequestParams.CatMlJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_trained_models, TContext = Context>(params?: RequestParams.CatMlTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - ml_trained_models, TContext = Context>(callback: callbackFn): TransportRequestCallback - ml_trained_models, TContext = Context>(params: RequestParams.CatMlTrainedModels, callback: callbackFn): TransportRequestCallback - ml_trained_models, TContext = Context>(params: RequestParams.CatMlTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlTrainedModels, TContext = Context>(params?: RequestParams.CatMlTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - mlTrainedModels, TContext = Context>(callback: callbackFn): TransportRequestCallback - mlTrainedModels, TContext = Context>(params: RequestParams.CatMlTrainedModels, callback: callbackFn): TransportRequestCallback - mlTrainedModels, TContext = Context>(params: RequestParams.CatMlTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodeattrs, TContext = Context>(params?: RequestParams.CatNodeattrs, options?: TransportRequestOptions): TransportRequestPromise> - nodeattrs, TContext = Context>(callback: callbackFn): TransportRequestCallback - nodeattrs, TContext = Context>(params: RequestParams.CatNodeattrs, callback: callbackFn): TransportRequestCallback - nodeattrs, TContext = Context>(params: RequestParams.CatNodeattrs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodes, TContext = Context>(params?: RequestParams.CatNodes, options?: TransportRequestOptions): TransportRequestPromise> - nodes, TContext = Context>(callback: callbackFn): TransportRequestCallback - nodes, TContext = Context>(params: RequestParams.CatNodes, callback: callbackFn): TransportRequestCallback - nodes, TContext = Context>(params: RequestParams.CatNodes, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = Context>(params?: RequestParams.CatPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pending_tasks, TContext = Context>(callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = Context>(params: RequestParams.CatPendingTasks, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = Context>(params: RequestParams.CatPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = Context>(params?: RequestParams.CatPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks, TContext = Context>(callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = Context>(params: RequestParams.CatPendingTasks, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = Context>(params: RequestParams.CatPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - plugins, TContext = Context>(params?: RequestParams.CatPlugins, options?: TransportRequestOptions): TransportRequestPromise> - plugins, TContext = Context>(callback: callbackFn): TransportRequestCallback - plugins, TContext = Context>(params: RequestParams.CatPlugins, callback: callbackFn): TransportRequestCallback - plugins, TContext = Context>(params: RequestParams.CatPlugins, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - recovery, TContext = Context>(params?: RequestParams.CatRecovery, options?: TransportRequestOptions): TransportRequestPromise> - recovery, TContext = Context>(callback: callbackFn): TransportRequestCallback - recovery, TContext = Context>(params: RequestParams.CatRecovery, callback: callbackFn): TransportRequestCallback - recovery, TContext = Context>(params: RequestParams.CatRecovery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositories, TContext = Context>(params?: RequestParams.CatRepositories, options?: TransportRequestOptions): TransportRequestPromise> - repositories, TContext = Context>(callback: callbackFn): TransportRequestCallback - repositories, TContext = Context>(params: RequestParams.CatRepositories, callback: callbackFn): TransportRequestCallback - repositories, TContext = Context>(params: RequestParams.CatRepositories, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - segments, TContext = Context>(params?: RequestParams.CatSegments, options?: TransportRequestOptions): TransportRequestPromise> - segments, TContext = Context>(callback: callbackFn): TransportRequestCallback - segments, TContext = Context>(params: RequestParams.CatSegments, callback: callbackFn): TransportRequestCallback - segments, TContext = Context>(params: RequestParams.CatSegments, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shards, TContext = Context>(params?: RequestParams.CatShards, options?: TransportRequestOptions): TransportRequestPromise> - shards, TContext = Context>(callback: callbackFn): TransportRequestCallback - shards, TContext = Context>(params: RequestParams.CatShards, callback: callbackFn): TransportRequestCallback - shards, TContext = Context>(params: RequestParams.CatShards, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - snapshots, TContext = Context>(params?: RequestParams.CatSnapshots, options?: TransportRequestOptions): TransportRequestPromise> - snapshots, TContext = Context>(callback: callbackFn): TransportRequestCallback - snapshots, TContext = Context>(params: RequestParams.CatSnapshots, callback: callbackFn): TransportRequestCallback - snapshots, TContext = Context>(params: RequestParams.CatSnapshots, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - tasks, TContext = Context>(params?: RequestParams.CatTasks, options?: TransportRequestOptions): TransportRequestPromise> - tasks, TContext = Context>(callback: callbackFn): TransportRequestCallback - tasks, TContext = Context>(params: RequestParams.CatTasks, callback: callbackFn): TransportRequestCallback - tasks, TContext = Context>(params: RequestParams.CatTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - templates, TContext = Context>(params?: RequestParams.CatTemplates, options?: TransportRequestOptions): TransportRequestPromise> - templates, TContext = Context>(callback: callbackFn): TransportRequestCallback - templates, TContext = Context>(params: RequestParams.CatTemplates, callback: callbackFn): TransportRequestCallback - templates, TContext = Context>(params: RequestParams.CatTemplates, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - thread_pool, TContext = Context>(params?: RequestParams.CatThreadPool, options?: TransportRequestOptions): TransportRequestPromise> - thread_pool, TContext = Context>(callback: callbackFn): TransportRequestCallback - thread_pool, TContext = Context>(params: RequestParams.CatThreadPool, callback: callbackFn): TransportRequestCallback - thread_pool, TContext = Context>(params: RequestParams.CatThreadPool, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - threadPool, TContext = Context>(params?: RequestParams.CatThreadPool, options?: TransportRequestOptions): TransportRequestPromise> - threadPool, TContext = Context>(callback: callbackFn): TransportRequestCallback - threadPool, TContext = Context>(params: RequestParams.CatThreadPool, callback: callbackFn): TransportRequestCallback - threadPool, TContext = Context>(params: RequestParams.CatThreadPool, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - transforms, TContext = Context>(params?: RequestParams.CatTransforms, options?: TransportRequestOptions): TransportRequestPromise> - transforms, TContext = Context>(callback: callbackFn): TransportRequestCallback - transforms, TContext = Context>(params: RequestParams.CatTransforms, callback: callbackFn): TransportRequestCallback - transforms, TContext = Context>(params: RequestParams.CatTransforms, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ccr: { - delete_auto_follow_pattern, TContext = Context>(params?: RequestParams.CcrDeleteAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - delete_auto_follow_pattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrDeleteAutoFollowPattern, callback: callbackFn): TransportRequestCallback - delete_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrDeleteAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern, TContext = Context>(params?: RequestParams.CcrDeleteAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - deleteAutoFollowPattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern, TContext = Context>(params: RequestParams.CcrDeleteAutoFollowPattern, callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern, TContext = Context>(params: RequestParams.CcrDeleteAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.CcrFollow, options?: TransportRequestOptions): TransportRequestPromise> - follow, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - follow, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrFollow, callback: callbackFn): TransportRequestCallback - follow, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow_info, TContext = Context>(params?: RequestParams.CcrFollowInfo, options?: TransportRequestOptions): TransportRequestPromise> - follow_info, TContext = Context>(callback: callbackFn): TransportRequestCallback - follow_info, TContext = Context>(params: RequestParams.CcrFollowInfo, callback: callbackFn): TransportRequestCallback - follow_info, TContext = Context>(params: RequestParams.CcrFollowInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followInfo, TContext = Context>(params?: RequestParams.CcrFollowInfo, options?: TransportRequestOptions): TransportRequestPromise> - followInfo, TContext = Context>(callback: callbackFn): TransportRequestCallback - followInfo, TContext = Context>(params: RequestParams.CcrFollowInfo, callback: callbackFn): TransportRequestCallback - followInfo, TContext = Context>(params: RequestParams.CcrFollowInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow_stats, TContext = Context>(params?: RequestParams.CcrFollowStats, options?: TransportRequestOptions): TransportRequestPromise> - follow_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - follow_stats, TContext = Context>(params: RequestParams.CcrFollowStats, callback: callbackFn): TransportRequestCallback - follow_stats, TContext = Context>(params: RequestParams.CcrFollowStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followStats, TContext = Context>(params?: RequestParams.CcrFollowStats, options?: TransportRequestOptions): TransportRequestPromise> - followStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - followStats, TContext = Context>(params: RequestParams.CcrFollowStats, callback: callbackFn): TransportRequestCallback - followStats, TContext = Context>(params: RequestParams.CcrFollowStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forget_follower, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.CcrForgetFollower, options?: TransportRequestOptions): TransportRequestPromise> - forget_follower, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - forget_follower, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrForgetFollower, callback: callbackFn): TransportRequestCallback - forget_follower, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrForgetFollower, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forgetFollower, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.CcrForgetFollower, options?: TransportRequestOptions): TransportRequestPromise> - forgetFollower, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - forgetFollower, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrForgetFollower, callback: callbackFn): TransportRequestCallback - forgetFollower, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrForgetFollower, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_auto_follow_pattern, TContext = Context>(params?: RequestParams.CcrGetAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - get_auto_follow_pattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrGetAutoFollowPattern, callback: callbackFn): TransportRequestCallback - get_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrGetAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern, TContext = Context>(params?: RequestParams.CcrGetAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - getAutoFollowPattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - getAutoFollowPattern, TContext = Context>(params: RequestParams.CcrGetAutoFollowPattern, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern, TContext = Context>(params: RequestParams.CcrGetAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pause_auto_follow_pattern, TContext = Context>(params?: RequestParams.CcrPauseAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - pause_auto_follow_pattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - pause_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrPauseAutoFollowPattern, callback: callbackFn): TransportRequestCallback - pause_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrPauseAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern, TContext = Context>(params?: RequestParams.CcrPauseAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - pauseAutoFollowPattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern, TContext = Context>(params: RequestParams.CcrPauseAutoFollowPattern, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern, TContext = Context>(params: RequestParams.CcrPauseAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pause_follow, TContext = Context>(params?: RequestParams.CcrPauseFollow, options?: TransportRequestOptions): TransportRequestPromise> - pause_follow, TContext = Context>(callback: callbackFn): TransportRequestCallback - pause_follow, TContext = Context>(params: RequestParams.CcrPauseFollow, callback: callbackFn): TransportRequestCallback - pause_follow, TContext = Context>(params: RequestParams.CcrPauseFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseFollow, TContext = Context>(params?: RequestParams.CcrPauseFollow, options?: TransportRequestOptions): TransportRequestPromise> - pauseFollow, TContext = Context>(callback: callbackFn): TransportRequestCallback - pauseFollow, TContext = Context>(params: RequestParams.CcrPauseFollow, callback: callbackFn): TransportRequestCallback - pauseFollow, TContext = Context>(params: RequestParams.CcrPauseFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.CcrPutAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrPutAutoFollowPattern, callback: callbackFn): TransportRequestCallback - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrPutAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.CcrPutAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrPutAutoFollowPattern, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrPutAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resume_auto_follow_pattern, TContext = Context>(params?: RequestParams.CcrResumeAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - resume_auto_follow_pattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - resume_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrResumeAutoFollowPattern, callback: callbackFn): TransportRequestCallback - resume_auto_follow_pattern, TContext = Context>(params: RequestParams.CcrResumeAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern, TContext = Context>(params?: RequestParams.CcrResumeAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - resumeAutoFollowPattern, TContext = Context>(callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern, TContext = Context>(params: RequestParams.CcrResumeAutoFollowPattern, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern, TContext = Context>(params: RequestParams.CcrResumeAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resume_follow, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.CcrResumeFollow, options?: TransportRequestOptions): TransportRequestPromise> - resume_follow, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - resume_follow, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrResumeFollow, callback: callbackFn): TransportRequestCallback - resume_follow, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrResumeFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeFollow, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.CcrResumeFollow, options?: TransportRequestOptions): TransportRequestPromise> - resumeFollow, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - resumeFollow, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrResumeFollow, callback: callbackFn): TransportRequestCallback - resumeFollow, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.CcrResumeFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.CcrStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.CcrStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.CcrStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfollow, TContext = Context>(params?: RequestParams.CcrUnfollow, options?: TransportRequestOptions): TransportRequestPromise> - unfollow, TContext = Context>(callback: callbackFn): TransportRequestCallback - unfollow, TContext = Context>(params: RequestParams.CcrUnfollow, callback: callbackFn): TransportRequestCallback - unfollow, TContext = Context>(params: RequestParams.CcrUnfollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - clear_scroll, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClearScroll, options?: TransportRequestOptions): TransportRequestPromise> - clear_scroll, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_scroll, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClearScroll, callback: callbackFn): TransportRequestCallback - clear_scroll, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClearScroll, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearScroll, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClearScroll, options?: TransportRequestOptions): TransportRequestPromise> - clearScroll, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearScroll, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClearScroll, callback: callbackFn): TransportRequestCallback - clearScroll, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClearScroll, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - close_point_in_time, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClosePointInTime, options?: TransportRequestOptions): TransportRequestPromise> - close_point_in_time, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - close_point_in_time, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClosePointInTime, callback: callbackFn): TransportRequestCallback - close_point_in_time, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClosePointInTime, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - closePointInTime, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClosePointInTime, options?: TransportRequestOptions): TransportRequestPromise> - closePointInTime, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - closePointInTime, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClosePointInTime, callback: callbackFn): TransportRequestCallback - closePointInTime, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClosePointInTime, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cluster: { - allocation_explain, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClusterAllocationExplain, options?: TransportRequestOptions): TransportRequestPromise> - allocation_explain, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - allocation_explain, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterAllocationExplain, callback: callbackFn): TransportRequestCallback - allocation_explain, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterAllocationExplain, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - allocationExplain, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClusterAllocationExplain, options?: TransportRequestOptions): TransportRequestPromise> - allocationExplain, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - allocationExplain, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterAllocationExplain, callback: callbackFn): TransportRequestCallback - allocationExplain, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterAllocationExplain, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_component_template, TContext = Context>(params?: RequestParams.ClusterDeleteComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - delete_component_template, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_component_template, TContext = Context>(params: RequestParams.ClusterDeleteComponentTemplate, callback: callbackFn): TransportRequestCallback - delete_component_template, TContext = Context>(params: RequestParams.ClusterDeleteComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteComponentTemplate, TContext = Context>(params?: RequestParams.ClusterDeleteComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - deleteComponentTemplate, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteComponentTemplate, TContext = Context>(params: RequestParams.ClusterDeleteComponentTemplate, callback: callbackFn): TransportRequestCallback - deleteComponentTemplate, TContext = Context>(params: RequestParams.ClusterDeleteComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_voting_config_exclusions, TContext = Context>(params?: RequestParams.ClusterDeleteVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - delete_voting_config_exclusions, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_voting_config_exclusions, TContext = Context>(params: RequestParams.ClusterDeleteVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - delete_voting_config_exclusions, TContext = Context>(params: RequestParams.ClusterDeleteVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions, TContext = Context>(params?: RequestParams.ClusterDeleteVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - deleteVotingConfigExclusions, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions, TContext = Context>(params: RequestParams.ClusterDeleteVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions, TContext = Context>(params: RequestParams.ClusterDeleteVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_component_template(params?: RequestParams.ClusterExistsComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - exists_component_template(callback: callbackFn): TransportRequestCallback - exists_component_template(params: RequestParams.ClusterExistsComponentTemplate, callback: callbackFn): TransportRequestCallback - exists_component_template(params: RequestParams.ClusterExistsComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsComponentTemplate(params?: RequestParams.ClusterExistsComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - existsComponentTemplate(callback: callbackFn): TransportRequestCallback - existsComponentTemplate(params: RequestParams.ClusterExistsComponentTemplate, callback: callbackFn): TransportRequestCallback - existsComponentTemplate(params: RequestParams.ClusterExistsComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_component_template, TContext = Context>(params?: RequestParams.ClusterGetComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - get_component_template, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_component_template, TContext = Context>(params: RequestParams.ClusterGetComponentTemplate, callback: callbackFn): TransportRequestCallback - get_component_template, TContext = Context>(params: RequestParams.ClusterGetComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getComponentTemplate, TContext = Context>(params?: RequestParams.ClusterGetComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - getComponentTemplate, TContext = Context>(callback: callbackFn): TransportRequestCallback - getComponentTemplate, TContext = Context>(params: RequestParams.ClusterGetComponentTemplate, callback: callbackFn): TransportRequestCallback - getComponentTemplate, TContext = Context>(params: RequestParams.ClusterGetComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_settings, TContext = Context>(params?: RequestParams.ClusterGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - get_settings, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_settings, TContext = Context>(params: RequestParams.ClusterGetSettings, callback: callbackFn): TransportRequestCallback - get_settings, TContext = Context>(params: RequestParams.ClusterGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSettings, TContext = Context>(params?: RequestParams.ClusterGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - getSettings, TContext = Context>(callback: callbackFn): TransportRequestCallback - getSettings, TContext = Context>(params: RequestParams.ClusterGetSettings, callback: callbackFn): TransportRequestCallback - getSettings, TContext = Context>(params: RequestParams.ClusterGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - health, TContext = Context>(params?: RequestParams.ClusterHealth, options?: TransportRequestOptions): TransportRequestPromise> - health, TContext = Context>(callback: callbackFn): TransportRequestCallback - health, TContext = Context>(params: RequestParams.ClusterHealth, callback: callbackFn): TransportRequestCallback - health, TContext = Context>(params: RequestParams.ClusterHealth, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = Context>(params?: RequestParams.ClusterPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pending_tasks, TContext = Context>(callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = Context>(params: RequestParams.ClusterPendingTasks, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = Context>(params: RequestParams.ClusterPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = Context>(params?: RequestParams.ClusterPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks, TContext = Context>(callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = Context>(params: RequestParams.ClusterPendingTasks, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = Context>(params: RequestParams.ClusterPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_voting_config_exclusions, TContext = Context>(params?: RequestParams.ClusterPostVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - post_voting_config_exclusions, TContext = Context>(callback: callbackFn): TransportRequestCallback - post_voting_config_exclusions, TContext = Context>(params: RequestParams.ClusterPostVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - post_voting_config_exclusions, TContext = Context>(params: RequestParams.ClusterPostVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions, TContext = Context>(params?: RequestParams.ClusterPostVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - postVotingConfigExclusions, TContext = Context>(callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions, TContext = Context>(params: RequestParams.ClusterPostVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions, TContext = Context>(params: RequestParams.ClusterPostVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_component_template, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClusterPutComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - put_component_template, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_component_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutComponentTemplate, callback: callbackFn): TransportRequestCallback - put_component_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClusterPutComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutComponentTemplate, callback: callbackFn): TransportRequestCallback - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClusterPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutSettings, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClusterPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutSettings, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remote_info, TContext = Context>(params?: RequestParams.ClusterRemoteInfo, options?: TransportRequestOptions): TransportRequestPromise> - remote_info, TContext = Context>(callback: callbackFn): TransportRequestCallback - remote_info, TContext = Context>(params: RequestParams.ClusterRemoteInfo, callback: callbackFn): TransportRequestCallback - remote_info, TContext = Context>(params: RequestParams.ClusterRemoteInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remoteInfo, TContext = Context>(params?: RequestParams.ClusterRemoteInfo, options?: TransportRequestOptions): TransportRequestPromise> - remoteInfo, TContext = Context>(callback: callbackFn): TransportRequestCallback - remoteInfo, TContext = Context>(params: RequestParams.ClusterRemoteInfo, callback: callbackFn): TransportRequestCallback - remoteInfo, TContext = Context>(params: RequestParams.ClusterRemoteInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reroute, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ClusterReroute, options?: TransportRequestOptions): TransportRequestPromise> - reroute, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - reroute, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterReroute, callback: callbackFn): TransportRequestCallback - reroute, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ClusterReroute, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - state, TContext = Context>(params?: RequestParams.ClusterState, options?: TransportRequestOptions): TransportRequestPromise> - state, TContext = Context>(callback: callbackFn): TransportRequestCallback - state, TContext = Context>(params: RequestParams.ClusterState, callback: callbackFn): TransportRequestCallback - state, TContext = Context>(params: RequestParams.ClusterState, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.ClusterStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.ClusterStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.ClusterStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - count, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Count, options?: TransportRequestOptions): TransportRequestPromise> - count, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - count, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Count, callback: callbackFn): TransportRequestCallback - count, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Count, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Create, options?: TransportRequestOptions): TransportRequestPromise> - create, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Create, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Create, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - dangling_indices: { - delete_dangling_index, TContext = Context>(params?: RequestParams.DanglingIndicesDeleteDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - delete_dangling_index, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, callback: callbackFn): TransportRequestCallback - delete_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDanglingIndex, TContext = Context>(params?: RequestParams.DanglingIndicesDeleteDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - deleteDanglingIndex, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, callback: callbackFn): TransportRequestCallback - deleteDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - import_dangling_index, TContext = Context>(params?: RequestParams.DanglingIndicesImportDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - import_dangling_index, TContext = Context>(callback: callbackFn): TransportRequestCallback - import_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, callback: callbackFn): TransportRequestCallback - import_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - importDanglingIndex, TContext = Context>(params?: RequestParams.DanglingIndicesImportDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - importDanglingIndex, TContext = Context>(callback: callbackFn): TransportRequestCallback - importDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, callback: callbackFn): TransportRequestCallback - importDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - list_dangling_indices, TContext = Context>(params?: RequestParams.DanglingIndicesListDanglingIndices, options?: TransportRequestOptions): TransportRequestPromise> - list_dangling_indices, TContext = Context>(callback: callbackFn): TransportRequestCallback - list_dangling_indices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, callback: callbackFn): TransportRequestCallback - list_dangling_indices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - listDanglingIndices, TContext = Context>(params?: RequestParams.DanglingIndicesListDanglingIndices, options?: TransportRequestOptions): TransportRequestPromise> - listDanglingIndices, TContext = Context>(callback: callbackFn): TransportRequestCallback - listDanglingIndices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, callback: callbackFn): TransportRequestCallback - listDanglingIndices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - danglingIndices: { - delete_dangling_index, TContext = Context>(params?: RequestParams.DanglingIndicesDeleteDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - delete_dangling_index, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, callback: callbackFn): TransportRequestCallback - delete_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDanglingIndex, TContext = Context>(params?: RequestParams.DanglingIndicesDeleteDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - deleteDanglingIndex, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, callback: callbackFn): TransportRequestCallback - deleteDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesDeleteDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - import_dangling_index, TContext = Context>(params?: RequestParams.DanglingIndicesImportDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - import_dangling_index, TContext = Context>(callback: callbackFn): TransportRequestCallback - import_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, callback: callbackFn): TransportRequestCallback - import_dangling_index, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - importDanglingIndex, TContext = Context>(params?: RequestParams.DanglingIndicesImportDanglingIndex, options?: TransportRequestOptions): TransportRequestPromise> - importDanglingIndex, TContext = Context>(callback: callbackFn): TransportRequestCallback - importDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, callback: callbackFn): TransportRequestCallback - importDanglingIndex, TContext = Context>(params: RequestParams.DanglingIndicesImportDanglingIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - list_dangling_indices, TContext = Context>(params?: RequestParams.DanglingIndicesListDanglingIndices, options?: TransportRequestOptions): TransportRequestPromise> - list_dangling_indices, TContext = Context>(callback: callbackFn): TransportRequestCallback - list_dangling_indices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, callback: callbackFn): TransportRequestCallback - list_dangling_indices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - listDanglingIndices, TContext = Context>(params?: RequestParams.DanglingIndicesListDanglingIndices, options?: TransportRequestOptions): TransportRequestPromise> - listDanglingIndices, TContext = Context>(callback: callbackFn): TransportRequestCallback - listDanglingIndices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, callback: callbackFn): TransportRequestCallback - listDanglingIndices, TContext = Context>(params: RequestParams.DanglingIndicesListDanglingIndices, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - delete, TContext = Context>(params?: RequestParams.Delete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.Delete, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.Delete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.DeleteByQuery, options?: TransportRequestOptions): TransportRequestPromise> - delete_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.DeleteByQuery, callback: callbackFn): TransportRequestCallback - delete_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.DeleteByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.DeleteByQuery, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.DeleteByQuery, callback: callbackFn): TransportRequestCallback - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.DeleteByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_by_query_rethrottle, TContext = Context>(params?: RequestParams.DeleteByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - delete_by_query_rethrottle, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_by_query_rethrottle, TContext = Context>(params: RequestParams.DeleteByQueryRethrottle, callback: callbackFn): TransportRequestCallback - delete_by_query_rethrottle, TContext = Context>(params: RequestParams.DeleteByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle, TContext = Context>(params?: RequestParams.DeleteByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQueryRethrottle, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle, TContext = Context>(params: RequestParams.DeleteByQueryRethrottle, callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle, TContext = Context>(params: RequestParams.DeleteByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_script, TContext = Context>(params?: RequestParams.DeleteScript, options?: TransportRequestOptions): TransportRequestPromise> - delete_script, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_script, TContext = Context>(params: RequestParams.DeleteScript, callback: callbackFn): TransportRequestCallback - delete_script, TContext = Context>(params: RequestParams.DeleteScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteScript, TContext = Context>(params?: RequestParams.DeleteScript, options?: TransportRequestOptions): TransportRequestPromise> - deleteScript, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteScript, TContext = Context>(params: RequestParams.DeleteScript, callback: callbackFn): TransportRequestCallback - deleteScript, TContext = Context>(params: RequestParams.DeleteScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enrich: { - delete_policy, TContext = Context>(params?: RequestParams.EnrichDeletePolicy, options?: TransportRequestOptions): TransportRequestPromise> - delete_policy, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_policy, TContext = Context>(params: RequestParams.EnrichDeletePolicy, callback: callbackFn): TransportRequestCallback - delete_policy, TContext = Context>(params: RequestParams.EnrichDeletePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePolicy, TContext = Context>(params?: RequestParams.EnrichDeletePolicy, options?: TransportRequestOptions): TransportRequestPromise> - deletePolicy, TContext = Context>(callback: callbackFn): TransportRequestCallback - deletePolicy, TContext = Context>(params: RequestParams.EnrichDeletePolicy, callback: callbackFn): TransportRequestCallback - deletePolicy, TContext = Context>(params: RequestParams.EnrichDeletePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_policy, TContext = Context>(params?: RequestParams.EnrichExecutePolicy, options?: TransportRequestOptions): TransportRequestPromise> - execute_policy, TContext = Context>(callback: callbackFn): TransportRequestCallback - execute_policy, TContext = Context>(params: RequestParams.EnrichExecutePolicy, callback: callbackFn): TransportRequestCallback - execute_policy, TContext = Context>(params: RequestParams.EnrichExecutePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executePolicy, TContext = Context>(params?: RequestParams.EnrichExecutePolicy, options?: TransportRequestOptions): TransportRequestPromise> - executePolicy, TContext = Context>(callback: callbackFn): TransportRequestCallback - executePolicy, TContext = Context>(params: RequestParams.EnrichExecutePolicy, callback: callbackFn): TransportRequestCallback - executePolicy, TContext = Context>(params: RequestParams.EnrichExecutePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_policy, TContext = Context>(params?: RequestParams.EnrichGetPolicy, options?: TransportRequestOptions): TransportRequestPromise> - get_policy, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_policy, TContext = Context>(params: RequestParams.EnrichGetPolicy, callback: callbackFn): TransportRequestCallback - get_policy, TContext = Context>(params: RequestParams.EnrichGetPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPolicy, TContext = Context>(params?: RequestParams.EnrichGetPolicy, options?: TransportRequestOptions): TransportRequestPromise> - getPolicy, TContext = Context>(callback: callbackFn): TransportRequestCallback - getPolicy, TContext = Context>(params: RequestParams.EnrichGetPolicy, callback: callbackFn): TransportRequestCallback - getPolicy, TContext = Context>(params: RequestParams.EnrichGetPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_policy, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.EnrichPutPolicy, options?: TransportRequestOptions): TransportRequestPromise> - put_policy, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_policy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.EnrichPutPolicy, callback: callbackFn): TransportRequestCallback - put_policy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.EnrichPutPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.EnrichPutPolicy, options?: TransportRequestOptions): TransportRequestPromise> - putPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.EnrichPutPolicy, callback: callbackFn): TransportRequestCallback - putPolicy, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.EnrichPutPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.EnrichStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.EnrichStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.EnrichStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - eql: { - delete, TContext = Context>(params?: RequestParams.EqlDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.EqlDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.EqlDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params?: RequestParams.EqlGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.EqlGet, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.EqlGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params?: RequestParams.EqlGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_status, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params: RequestParams.EqlGetStatus, callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params: RequestParams.EqlGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params?: RequestParams.EqlGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - getStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params: RequestParams.EqlGetStatus, callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params: RequestParams.EqlGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.EqlSearch, options?: TransportRequestOptions): TransportRequestPromise> - search, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.EqlSearch, callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.EqlSearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - exists(params?: RequestParams.Exists, options?: TransportRequestOptions): TransportRequestPromise> - exists(callback: callbackFn): TransportRequestCallback - exists(params: RequestParams.Exists, callback: callbackFn): TransportRequestCallback - exists(params: RequestParams.Exists, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_source(params?: RequestParams.ExistsSource, options?: TransportRequestOptions): TransportRequestPromise> - exists_source(callback: callbackFn): TransportRequestCallback - exists_source(params: RequestParams.ExistsSource, callback: callbackFn): TransportRequestCallback - exists_source(params: RequestParams.ExistsSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsSource(params?: RequestParams.ExistsSource, options?: TransportRequestOptions): TransportRequestPromise> - existsSource(callback: callbackFn): TransportRequestCallback - existsSource(params: RequestParams.ExistsSource, callback: callbackFn): TransportRequestCallback - existsSource(params: RequestParams.ExistsSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explain, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Explain, options?: TransportRequestOptions): TransportRequestPromise> - explain, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - explain, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Explain, callback: callbackFn): TransportRequestCallback - explain, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Explain, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - features: { - get_features, TContext = Context>(params?: RequestParams.FeaturesGetFeatures, options?: TransportRequestOptions): TransportRequestPromise> - get_features, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_features, TContext = Context>(params: RequestParams.FeaturesGetFeatures, callback: callbackFn): TransportRequestCallback - get_features, TContext = Context>(params: RequestParams.FeaturesGetFeatures, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFeatures, TContext = Context>(params?: RequestParams.FeaturesGetFeatures, options?: TransportRequestOptions): TransportRequestPromise> - getFeatures, TContext = Context>(callback: callbackFn): TransportRequestCallback - getFeatures, TContext = Context>(params: RequestParams.FeaturesGetFeatures, callback: callbackFn): TransportRequestCallback - getFeatures, TContext = Context>(params: RequestParams.FeaturesGetFeatures, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reset_features, TContext = Context>(params?: RequestParams.FeaturesResetFeatures, options?: TransportRequestOptions): TransportRequestPromise> - reset_features, TContext = Context>(callback: callbackFn): TransportRequestCallback - reset_features, TContext = Context>(params: RequestParams.FeaturesResetFeatures, callback: callbackFn): TransportRequestCallback - reset_features, TContext = Context>(params: RequestParams.FeaturesResetFeatures, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resetFeatures, TContext = Context>(params?: RequestParams.FeaturesResetFeatures, options?: TransportRequestOptions): TransportRequestPromise> - resetFeatures, TContext = Context>(callback: callbackFn): TransportRequestCallback - resetFeatures, TContext = Context>(params: RequestParams.FeaturesResetFeatures, callback: callbackFn): TransportRequestCallback - resetFeatures, TContext = Context>(params: RequestParams.FeaturesResetFeatures, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - field_caps, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.FieldCaps, options?: TransportRequestOptions): TransportRequestPromise> - field_caps, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - field_caps, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.FieldCaps, callback: callbackFn): TransportRequestCallback - field_caps, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.FieldCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fieldCaps, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.FieldCaps, options?: TransportRequestOptions): TransportRequestPromise> - fieldCaps, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - fieldCaps, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.FieldCaps, callback: callbackFn): TransportRequestCallback - fieldCaps, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.FieldCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fleet: { - global_checkpoints, TContext = Context>(params?: RequestParams.FleetGlobalCheckpoints, options?: TransportRequestOptions): TransportRequestPromise> - global_checkpoints, TContext = Context>(callback: callbackFn): TransportRequestCallback - global_checkpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, callback: callbackFn): TransportRequestCallback - global_checkpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - globalCheckpoints, TContext = Context>(params?: RequestParams.FleetGlobalCheckpoints, options?: TransportRequestOptions): TransportRequestPromise> - globalCheckpoints, TContext = Context>(callback: callbackFn): TransportRequestCallback - globalCheckpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, callback: callbackFn): TransportRequestCallback - globalCheckpoints, TContext = Context>(params: RequestParams.FleetGlobalCheckpoints, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - get, TContext = Context>(params?: RequestParams.Get, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.Get, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.Get, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_script, TContext = Context>(params?: RequestParams.GetScript, options?: TransportRequestOptions): TransportRequestPromise> - get_script, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_script, TContext = Context>(params: RequestParams.GetScript, callback: callbackFn): TransportRequestCallback - get_script, TContext = Context>(params: RequestParams.GetScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScript, TContext = Context>(params?: RequestParams.GetScript, options?: TransportRequestOptions): TransportRequestPromise> - getScript, TContext = Context>(callback: callbackFn): TransportRequestCallback - getScript, TContext = Context>(params: RequestParams.GetScript, callback: callbackFn): TransportRequestCallback - getScript, TContext = Context>(params: RequestParams.GetScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_script_context, TContext = Context>(params?: RequestParams.GetScriptContext, options?: TransportRequestOptions): TransportRequestPromise> - get_script_context, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_script_context, TContext = Context>(params: RequestParams.GetScriptContext, callback: callbackFn): TransportRequestCallback - get_script_context, TContext = Context>(params: RequestParams.GetScriptContext, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptContext, TContext = Context>(params?: RequestParams.GetScriptContext, options?: TransportRequestOptions): TransportRequestPromise> - getScriptContext, TContext = Context>(callback: callbackFn): TransportRequestCallback - getScriptContext, TContext = Context>(params: RequestParams.GetScriptContext, callback: callbackFn): TransportRequestCallback - getScriptContext, TContext = Context>(params: RequestParams.GetScriptContext, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_script_languages, TContext = Context>(params?: RequestParams.GetScriptLanguages, options?: TransportRequestOptions): TransportRequestPromise> - get_script_languages, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_script_languages, TContext = Context>(params: RequestParams.GetScriptLanguages, callback: callbackFn): TransportRequestCallback - get_script_languages, TContext = Context>(params: RequestParams.GetScriptLanguages, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptLanguages, TContext = Context>(params?: RequestParams.GetScriptLanguages, options?: TransportRequestOptions): TransportRequestPromise> - getScriptLanguages, TContext = Context>(callback: callbackFn): TransportRequestCallback - getScriptLanguages, TContext = Context>(params: RequestParams.GetScriptLanguages, callback: callbackFn): TransportRequestCallback - getScriptLanguages, TContext = Context>(params: RequestParams.GetScriptLanguages, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_source, TContext = Context>(params?: RequestParams.GetSource, options?: TransportRequestOptions): TransportRequestPromise> - get_source, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_source, TContext = Context>(params: RequestParams.GetSource, callback: callbackFn): TransportRequestCallback - get_source, TContext = Context>(params: RequestParams.GetSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSource, TContext = Context>(params?: RequestParams.GetSource, options?: TransportRequestOptions): TransportRequestPromise> - getSource, TContext = Context>(callback: callbackFn): TransportRequestCallback - getSource, TContext = Context>(params: RequestParams.GetSource, callback: callbackFn): TransportRequestCallback - getSource, TContext = Context>(params: RequestParams.GetSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - graph: { - explore, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.GraphExplore, options?: TransportRequestOptions): TransportRequestPromise> - explore, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - explore, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.GraphExplore, callback: callbackFn): TransportRequestCallback - explore, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.GraphExplore, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ilm: { - delete_lifecycle, TContext = Context>(params?: RequestParams.IlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - delete_lifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = Context>(params: RequestParams.IlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = Context>(params: RequestParams.IlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = Context>(params?: RequestParams.IlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = Context>(params: RequestParams.IlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = Context>(params: RequestParams.IlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explain_lifecycle, TContext = Context>(params?: RequestParams.IlmExplainLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - explain_lifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - explain_lifecycle, TContext = Context>(params: RequestParams.IlmExplainLifecycle, callback: callbackFn): TransportRequestCallback - explain_lifecycle, TContext = Context>(params: RequestParams.IlmExplainLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainLifecycle, TContext = Context>(params?: RequestParams.IlmExplainLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - explainLifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - explainLifecycle, TContext = Context>(params: RequestParams.IlmExplainLifecycle, callback: callbackFn): TransportRequestCallback - explainLifecycle, TContext = Context>(params: RequestParams.IlmExplainLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = Context>(params?: RequestParams.IlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - get_lifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = Context>(params: RequestParams.IlmGetLifecycle, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = Context>(params: RequestParams.IlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = Context>(params?: RequestParams.IlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = Context>(params: RequestParams.IlmGetLifecycle, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = Context>(params: RequestParams.IlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params?: RequestParams.IlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_status, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params: RequestParams.IlmGetStatus, callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params: RequestParams.IlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params?: RequestParams.IlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - getStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params: RequestParams.IlmGetStatus, callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params: RequestParams.IlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmMigrateToDataTiers, options?: TransportRequestOptions): TransportRequestPromise> - migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, callback: callbackFn): TransportRequestCallback - migrate_to_data_tiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmMigrateToDataTiers, options?: TransportRequestOptions): TransportRequestPromise> - migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, callback: callbackFn): TransportRequestCallback - migrateToDataTiers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMigrateToDataTiers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - move_to_step, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmMoveToStep, options?: TransportRequestOptions): TransportRequestPromise> - move_to_step, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - move_to_step, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMoveToStep, callback: callbackFn): TransportRequestCallback - move_to_step, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMoveToStep, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - moveToStep, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmMoveToStep, options?: TransportRequestOptions): TransportRequestPromise> - moveToStep, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - moveToStep, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMoveToStep, callback: callbackFn): TransportRequestCallback - moveToStep, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmMoveToStep, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmPutLifecycle, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmPutLifecycle, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remove_policy, TContext = Context>(params?: RequestParams.IlmRemovePolicy, options?: TransportRequestOptions): TransportRequestPromise> - remove_policy, TContext = Context>(callback: callbackFn): TransportRequestCallback - remove_policy, TContext = Context>(params: RequestParams.IlmRemovePolicy, callback: callbackFn): TransportRequestCallback - remove_policy, TContext = Context>(params: RequestParams.IlmRemovePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - removePolicy, TContext = Context>(params?: RequestParams.IlmRemovePolicy, options?: TransportRequestOptions): TransportRequestPromise> - removePolicy, TContext = Context>(callback: callbackFn): TransportRequestCallback - removePolicy, TContext = Context>(params: RequestParams.IlmRemovePolicy, callback: callbackFn): TransportRequestCallback - removePolicy, TContext = Context>(params: RequestParams.IlmRemovePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - retry, TContext = Context>(params?: RequestParams.IlmRetry, options?: TransportRequestOptions): TransportRequestPromise> - retry, TContext = Context>(callback: callbackFn): TransportRequestCallback - retry, TContext = Context>(params: RequestParams.IlmRetry, callback: callbackFn): TransportRequestCallback - retry, TContext = Context>(params: RequestParams.IlmRetry, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params?: RequestParams.IlmStart, options?: TransportRequestOptions): TransportRequestPromise> - start, TContext = Context>(callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params: RequestParams.IlmStart, callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params: RequestParams.IlmStart, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params?: RequestParams.IlmStop, options?: TransportRequestOptions): TransportRequestPromise> - stop, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params: RequestParams.IlmStop, callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params: RequestParams.IlmStop, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - index, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Index, options?: TransportRequestOptions): TransportRequestPromise> - index, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - index, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Index, callback: callbackFn): TransportRequestCallback - index, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Index, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - indices: { - add_block, TContext = Context>(params?: RequestParams.IndicesAddBlock, options?: TransportRequestOptions): TransportRequestPromise> - add_block, TContext = Context>(callback: callbackFn): TransportRequestCallback - add_block, TContext = Context>(params: RequestParams.IndicesAddBlock, callback: callbackFn): TransportRequestCallback - add_block, TContext = Context>(params: RequestParams.IndicesAddBlock, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - addBlock, TContext = Context>(params?: RequestParams.IndicesAddBlock, options?: TransportRequestOptions): TransportRequestPromise> - addBlock, TContext = Context>(callback: callbackFn): TransportRequestCallback - addBlock, TContext = Context>(params: RequestParams.IndicesAddBlock, callback: callbackFn): TransportRequestCallback - addBlock, TContext = Context>(params: RequestParams.IndicesAddBlock, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - analyze, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesAnalyze, options?: TransportRequestOptions): TransportRequestPromise> - analyze, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - analyze, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesAnalyze, callback: callbackFn): TransportRequestCallback - analyze, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesAnalyze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params?: RequestParams.IndicesClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clear_cache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params: RequestParams.IndicesClearCache, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params: RequestParams.IndicesClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params?: RequestParams.IndicesClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clearCache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params: RequestParams.IndicesClearCache, callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params: RequestParams.IndicesClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesClone, options?: TransportRequestOptions): TransportRequestPromise> - clone, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesClone, callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesClone, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - close, TContext = Context>(params?: RequestParams.IndicesClose, options?: TransportRequestOptions): TransportRequestPromise> - close, TContext = Context>(callback: callbackFn): TransportRequestCallback - close, TContext = Context>(params: RequestParams.IndicesClose, callback: callbackFn): TransportRequestCallback - close, TContext = Context>(params: RequestParams.IndicesClose, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesCreate, options?: TransportRequestOptions): TransportRequestPromise> - create, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesCreate, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesCreate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create_data_stream, TContext = Context>(params?: RequestParams.IndicesCreateDataStream, options?: TransportRequestOptions): TransportRequestPromise> - create_data_stream, TContext = Context>(callback: callbackFn): TransportRequestCallback - create_data_stream, TContext = Context>(params: RequestParams.IndicesCreateDataStream, callback: callbackFn): TransportRequestCallback - create_data_stream, TContext = Context>(params: RequestParams.IndicesCreateDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createDataStream, TContext = Context>(params?: RequestParams.IndicesCreateDataStream, options?: TransportRequestOptions): TransportRequestPromise> - createDataStream, TContext = Context>(callback: callbackFn): TransportRequestCallback - createDataStream, TContext = Context>(params: RequestParams.IndicesCreateDataStream, callback: callbackFn): TransportRequestCallback - createDataStream, TContext = Context>(params: RequestParams.IndicesCreateDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - data_streams_stats, TContext = Context>(params?: RequestParams.IndicesDataStreamsStats, options?: TransportRequestOptions): TransportRequestPromise> - data_streams_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - data_streams_stats, TContext = Context>(params: RequestParams.IndicesDataStreamsStats, callback: callbackFn): TransportRequestCallback - data_streams_stats, TContext = Context>(params: RequestParams.IndicesDataStreamsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - dataStreamsStats, TContext = Context>(params?: RequestParams.IndicesDataStreamsStats, options?: TransportRequestOptions): TransportRequestPromise> - dataStreamsStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - dataStreamsStats, TContext = Context>(params: RequestParams.IndicesDataStreamsStats, callback: callbackFn): TransportRequestCallback - dataStreamsStats, TContext = Context>(params: RequestParams.IndicesDataStreamsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params?: RequestParams.IndicesDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.IndicesDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.IndicesDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_alias, TContext = Context>(params?: RequestParams.IndicesDeleteAlias, options?: TransportRequestOptions): TransportRequestPromise> - delete_alias, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_alias, TContext = Context>(params: RequestParams.IndicesDeleteAlias, callback: callbackFn): TransportRequestCallback - delete_alias, TContext = Context>(params: RequestParams.IndicesDeleteAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAlias, TContext = Context>(params?: RequestParams.IndicesDeleteAlias, options?: TransportRequestOptions): TransportRequestPromise> - deleteAlias, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteAlias, TContext = Context>(params: RequestParams.IndicesDeleteAlias, callback: callbackFn): TransportRequestCallback - deleteAlias, TContext = Context>(params: RequestParams.IndicesDeleteAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_data_stream, TContext = Context>(params?: RequestParams.IndicesDeleteDataStream, options?: TransportRequestOptions): TransportRequestPromise> - delete_data_stream, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_data_stream, TContext = Context>(params: RequestParams.IndicesDeleteDataStream, callback: callbackFn): TransportRequestCallback - delete_data_stream, TContext = Context>(params: RequestParams.IndicesDeleteDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDataStream, TContext = Context>(params?: RequestParams.IndicesDeleteDataStream, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataStream, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteDataStream, TContext = Context>(params: RequestParams.IndicesDeleteDataStream, callback: callbackFn): TransportRequestCallback - deleteDataStream, TContext = Context>(params: RequestParams.IndicesDeleteDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_index_template, TContext = Context>(params?: RequestParams.IndicesDeleteIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - delete_index_template, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_index_template, TContext = Context>(params: RequestParams.IndicesDeleteIndexTemplate, callback: callbackFn): TransportRequestCallback - delete_index_template, TContext = Context>(params: RequestParams.IndicesDeleteIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate, TContext = Context>(params?: RequestParams.IndicesDeleteIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - deleteIndexTemplate, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteIndexTemplate, TContext = Context>(params: RequestParams.IndicesDeleteIndexTemplate, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate, TContext = Context>(params: RequestParams.IndicesDeleteIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_template, TContext = Context>(params?: RequestParams.IndicesDeleteTemplate, options?: TransportRequestOptions): TransportRequestPromise> - delete_template, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_template, TContext = Context>(params: RequestParams.IndicesDeleteTemplate, callback: callbackFn): TransportRequestCallback - delete_template, TContext = Context>(params: RequestParams.IndicesDeleteTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTemplate, TContext = Context>(params?: RequestParams.IndicesDeleteTemplate, options?: TransportRequestOptions): TransportRequestPromise> - deleteTemplate, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteTemplate, TContext = Context>(params: RequestParams.IndicesDeleteTemplate, callback: callbackFn): TransportRequestCallback - deleteTemplate, TContext = Context>(params: RequestParams.IndicesDeleteTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - disk_usage, TContext = Context>(params?: RequestParams.IndicesDiskUsage, options?: TransportRequestOptions): TransportRequestPromise> - disk_usage, TContext = Context>(callback: callbackFn): TransportRequestCallback - disk_usage, TContext = Context>(params: RequestParams.IndicesDiskUsage, callback: callbackFn): TransportRequestCallback - disk_usage, TContext = Context>(params: RequestParams.IndicesDiskUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - diskUsage, TContext = Context>(params?: RequestParams.IndicesDiskUsage, options?: TransportRequestOptions): TransportRequestPromise> - diskUsage, TContext = Context>(callback: callbackFn): TransportRequestCallback - diskUsage, TContext = Context>(params: RequestParams.IndicesDiskUsage, callback: callbackFn): TransportRequestCallback - diskUsage, TContext = Context>(params: RequestParams.IndicesDiskUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists(params?: RequestParams.IndicesExists, options?: TransportRequestOptions): TransportRequestPromise> - exists(callback: callbackFn): TransportRequestCallback - exists(params: RequestParams.IndicesExists, callback: callbackFn): TransportRequestCallback - exists(params: RequestParams.IndicesExists, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_alias(params?: RequestParams.IndicesExistsAlias, options?: TransportRequestOptions): TransportRequestPromise> - exists_alias(callback: callbackFn): TransportRequestCallback - exists_alias(params: RequestParams.IndicesExistsAlias, callback: callbackFn): TransportRequestCallback - exists_alias(params: RequestParams.IndicesExistsAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsAlias(params?: RequestParams.IndicesExistsAlias, options?: TransportRequestOptions): TransportRequestPromise> - existsAlias(callback: callbackFn): TransportRequestCallback - existsAlias(params: RequestParams.IndicesExistsAlias, callback: callbackFn): TransportRequestCallback - existsAlias(params: RequestParams.IndicesExistsAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_index_template(params?: RequestParams.IndicesExistsIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - exists_index_template(callback: callbackFn): TransportRequestCallback - exists_index_template(params: RequestParams.IndicesExistsIndexTemplate, callback: callbackFn): TransportRequestCallback - exists_index_template(params: RequestParams.IndicesExistsIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params?: RequestParams.IndicesExistsIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - existsIndexTemplate(callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params: RequestParams.IndicesExistsIndexTemplate, callback: callbackFn): TransportRequestCallback - existsIndexTemplate(params: RequestParams.IndicesExistsIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_template(params?: RequestParams.IndicesExistsTemplate, options?: TransportRequestOptions): TransportRequestPromise> - exists_template(callback: callbackFn): TransportRequestCallback - exists_template(params: RequestParams.IndicesExistsTemplate, callback: callbackFn): TransportRequestCallback - exists_template(params: RequestParams.IndicesExistsTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsTemplate(params?: RequestParams.IndicesExistsTemplate, options?: TransportRequestOptions): TransportRequestPromise> - existsTemplate(callback: callbackFn): TransportRequestCallback - existsTemplate(params: RequestParams.IndicesExistsTemplate, callback: callbackFn): TransportRequestCallback - existsTemplate(params: RequestParams.IndicesExistsTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_type(params?: RequestParams.IndicesExistsType, options?: TransportRequestOptions): TransportRequestPromise> - exists_type(callback: callbackFn): TransportRequestCallback - exists_type(params: RequestParams.IndicesExistsType, callback: callbackFn): TransportRequestCallback - exists_type(params: RequestParams.IndicesExistsType, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsType(params?: RequestParams.IndicesExistsType, options?: TransportRequestOptions): TransportRequestPromise> - existsType(callback: callbackFn): TransportRequestCallback - existsType(params: RequestParams.IndicesExistsType, callback: callbackFn): TransportRequestCallback - existsType(params: RequestParams.IndicesExistsType, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - field_usage_stats, TContext = Context>(params?: RequestParams.IndicesFieldUsageStats, options?: TransportRequestOptions): TransportRequestPromise> - field_usage_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - field_usage_stats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, callback: callbackFn): TransportRequestCallback - field_usage_stats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fieldUsageStats, TContext = Context>(params?: RequestParams.IndicesFieldUsageStats, options?: TransportRequestOptions): TransportRequestPromise> - fieldUsageStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - fieldUsageStats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, callback: callbackFn): TransportRequestCallback - fieldUsageStats, TContext = Context>(params: RequestParams.IndicesFieldUsageStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flush, TContext = Context>(params?: RequestParams.IndicesFlush, options?: TransportRequestOptions): TransportRequestPromise> - flush, TContext = Context>(callback: callbackFn): TransportRequestCallback - flush, TContext = Context>(params: RequestParams.IndicesFlush, callback: callbackFn): TransportRequestCallback - flush, TContext = Context>(params: RequestParams.IndicesFlush, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forcemerge, TContext = Context>(params?: RequestParams.IndicesForcemerge, options?: TransportRequestOptions): TransportRequestPromise> - forcemerge, TContext = Context>(callback: callbackFn): TransportRequestCallback - forcemerge, TContext = Context>(params: RequestParams.IndicesForcemerge, callback: callbackFn): TransportRequestCallback - forcemerge, TContext = Context>(params: RequestParams.IndicesForcemerge, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - freeze, TContext = Context>(params?: RequestParams.IndicesFreeze, options?: TransportRequestOptions): TransportRequestPromise> - freeze, TContext = Context>(callback: callbackFn): TransportRequestCallback - freeze, TContext = Context>(params: RequestParams.IndicesFreeze, callback: callbackFn): TransportRequestCallback - freeze, TContext = Context>(params: RequestParams.IndicesFreeze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params?: RequestParams.IndicesGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.IndicesGet, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.IndicesGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_alias, TContext = Context>(params?: RequestParams.IndicesGetAlias, options?: TransportRequestOptions): TransportRequestPromise> - get_alias, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_alias, TContext = Context>(params: RequestParams.IndicesGetAlias, callback: callbackFn): TransportRequestCallback - get_alias, TContext = Context>(params: RequestParams.IndicesGetAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAlias, TContext = Context>(params?: RequestParams.IndicesGetAlias, options?: TransportRequestOptions): TransportRequestPromise> - getAlias, TContext = Context>(callback: callbackFn): TransportRequestCallback - getAlias, TContext = Context>(params: RequestParams.IndicesGetAlias, callback: callbackFn): TransportRequestCallback - getAlias, TContext = Context>(params: RequestParams.IndicesGetAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_data_stream, TContext = Context>(params?: RequestParams.IndicesGetDataStream, options?: TransportRequestOptions): TransportRequestPromise> - get_data_stream, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_data_stream, TContext = Context>(params: RequestParams.IndicesGetDataStream, callback: callbackFn): TransportRequestCallback - get_data_stream, TContext = Context>(params: RequestParams.IndicesGetDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataStream, TContext = Context>(params?: RequestParams.IndicesGetDataStream, options?: TransportRequestOptions): TransportRequestPromise> - getDataStream, TContext = Context>(callback: callbackFn): TransportRequestCallback - getDataStream, TContext = Context>(params: RequestParams.IndicesGetDataStream, callback: callbackFn): TransportRequestCallback - getDataStream, TContext = Context>(params: RequestParams.IndicesGetDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_field_mapping, TContext = Context>(params?: RequestParams.IndicesGetFieldMapping, options?: TransportRequestOptions): TransportRequestPromise> - get_field_mapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_field_mapping, TContext = Context>(params: RequestParams.IndicesGetFieldMapping, callback: callbackFn): TransportRequestCallback - get_field_mapping, TContext = Context>(params: RequestParams.IndicesGetFieldMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFieldMapping, TContext = Context>(params?: RequestParams.IndicesGetFieldMapping, options?: TransportRequestOptions): TransportRequestPromise> - getFieldMapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - getFieldMapping, TContext = Context>(params: RequestParams.IndicesGetFieldMapping, callback: callbackFn): TransportRequestCallback - getFieldMapping, TContext = Context>(params: RequestParams.IndicesGetFieldMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_index_template, TContext = Context>(params?: RequestParams.IndicesGetIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - get_index_template, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_index_template, TContext = Context>(params: RequestParams.IndicesGetIndexTemplate, callback: callbackFn): TransportRequestCallback - get_index_template, TContext = Context>(params: RequestParams.IndicesGetIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getIndexTemplate, TContext = Context>(params?: RequestParams.IndicesGetIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - getIndexTemplate, TContext = Context>(callback: callbackFn): TransportRequestCallback - getIndexTemplate, TContext = Context>(params: RequestParams.IndicesGetIndexTemplate, callback: callbackFn): TransportRequestCallback - getIndexTemplate, TContext = Context>(params: RequestParams.IndicesGetIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_mapping, TContext = Context>(params?: RequestParams.IndicesGetMapping, options?: TransportRequestOptions): TransportRequestPromise> - get_mapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_mapping, TContext = Context>(params: RequestParams.IndicesGetMapping, callback: callbackFn): TransportRequestCallback - get_mapping, TContext = Context>(params: RequestParams.IndicesGetMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getMapping, TContext = Context>(params?: RequestParams.IndicesGetMapping, options?: TransportRequestOptions): TransportRequestPromise> - getMapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - getMapping, TContext = Context>(params: RequestParams.IndicesGetMapping, callback: callbackFn): TransportRequestCallback - getMapping, TContext = Context>(params: RequestParams.IndicesGetMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_settings, TContext = Context>(params?: RequestParams.IndicesGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - get_settings, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_settings, TContext = Context>(params: RequestParams.IndicesGetSettings, callback: callbackFn): TransportRequestCallback - get_settings, TContext = Context>(params: RequestParams.IndicesGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSettings, TContext = Context>(params?: RequestParams.IndicesGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - getSettings, TContext = Context>(callback: callbackFn): TransportRequestCallback - getSettings, TContext = Context>(params: RequestParams.IndicesGetSettings, callback: callbackFn): TransportRequestCallback - getSettings, TContext = Context>(params: RequestParams.IndicesGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_template, TContext = Context>(params?: RequestParams.IndicesGetTemplate, options?: TransportRequestOptions): TransportRequestPromise> - get_template, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_template, TContext = Context>(params: RequestParams.IndicesGetTemplate, callback: callbackFn): TransportRequestCallback - get_template, TContext = Context>(params: RequestParams.IndicesGetTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTemplate, TContext = Context>(params?: RequestParams.IndicesGetTemplate, options?: TransportRequestOptions): TransportRequestPromise> - getTemplate, TContext = Context>(callback: callbackFn): TransportRequestCallback - getTemplate, TContext = Context>(params: RequestParams.IndicesGetTemplate, callback: callbackFn): TransportRequestCallback - getTemplate, TContext = Context>(params: RequestParams.IndicesGetTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migrate_to_data_stream, TContext = Context>(params?: RequestParams.IndicesMigrateToDataStream, options?: TransportRequestOptions): TransportRequestPromise> - migrate_to_data_stream, TContext = Context>(callback: callbackFn): TransportRequestCallback - migrate_to_data_stream, TContext = Context>(params: RequestParams.IndicesMigrateToDataStream, callback: callbackFn): TransportRequestCallback - migrate_to_data_stream, TContext = Context>(params: RequestParams.IndicesMigrateToDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migrateToDataStream, TContext = Context>(params?: RequestParams.IndicesMigrateToDataStream, options?: TransportRequestOptions): TransportRequestPromise> - migrateToDataStream, TContext = Context>(callback: callbackFn): TransportRequestCallback - migrateToDataStream, TContext = Context>(params: RequestParams.IndicesMigrateToDataStream, callback: callbackFn): TransportRequestCallback - migrateToDataStream, TContext = Context>(params: RequestParams.IndicesMigrateToDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - open, TContext = Context>(params?: RequestParams.IndicesOpen, options?: TransportRequestOptions): TransportRequestPromise> - open, TContext = Context>(callback: callbackFn): TransportRequestCallback - open, TContext = Context>(params: RequestParams.IndicesOpen, callback: callbackFn): TransportRequestCallback - open, TContext = Context>(params: RequestParams.IndicesOpen, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - promote_data_stream, TContext = Context>(params?: RequestParams.IndicesPromoteDataStream, options?: TransportRequestOptions): TransportRequestPromise> - promote_data_stream, TContext = Context>(callback: callbackFn): TransportRequestCallback - promote_data_stream, TContext = Context>(params: RequestParams.IndicesPromoteDataStream, callback: callbackFn): TransportRequestCallback - promote_data_stream, TContext = Context>(params: RequestParams.IndicesPromoteDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - promoteDataStream, TContext = Context>(params?: RequestParams.IndicesPromoteDataStream, options?: TransportRequestOptions): TransportRequestPromise> - promoteDataStream, TContext = Context>(callback: callbackFn): TransportRequestCallback - promoteDataStream, TContext = Context>(params: RequestParams.IndicesPromoteDataStream, callback: callbackFn): TransportRequestCallback - promoteDataStream, TContext = Context>(params: RequestParams.IndicesPromoteDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_alias, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutAlias, options?: TransportRequestOptions): TransportRequestPromise> - put_alias, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_alias, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutAlias, callback: callbackFn): TransportRequestCallback - put_alias, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAlias, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutAlias, options?: TransportRequestOptions): TransportRequestPromise> - putAlias, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putAlias, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutAlias, callback: callbackFn): TransportRequestCallback - putAlias, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - put_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutIndexTemplate, callback: callbackFn): TransportRequestCallback - put_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutIndexTemplate, callback: callbackFn): TransportRequestCallback - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutMapping, options?: TransportRequestOptions): TransportRequestPromise> - put_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutMapping, callback: callbackFn): TransportRequestCallback - put_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putMapping, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutMapping, options?: TransportRequestOptions): TransportRequestPromise> - putMapping, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putMapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutMapping, callback: callbackFn): TransportRequestCallback - putMapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutSettings, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutSettings, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_template, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutTemplate, options?: TransportRequestOptions): TransportRequestPromise> - put_template, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutTemplate, callback: callbackFn): TransportRequestCallback - put_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesPutTemplate, options?: TransportRequestOptions): TransportRequestPromise> - putTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutTemplate, callback: callbackFn): TransportRequestCallback - putTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesPutTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - recovery, TContext = Context>(params?: RequestParams.IndicesRecovery, options?: TransportRequestOptions): TransportRequestPromise> - recovery, TContext = Context>(callback: callbackFn): TransportRequestCallback - recovery, TContext = Context>(params: RequestParams.IndicesRecovery, callback: callbackFn): TransportRequestCallback - recovery, TContext = Context>(params: RequestParams.IndicesRecovery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - refresh, TContext = Context>(params?: RequestParams.IndicesRefresh, options?: TransportRequestOptions): TransportRequestPromise> - refresh, TContext = Context>(callback: callbackFn): TransportRequestCallback - refresh, TContext = Context>(params: RequestParams.IndicesRefresh, callback: callbackFn): TransportRequestCallback - refresh, TContext = Context>(params: RequestParams.IndicesRefresh, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reload_search_analyzers, TContext = Context>(params?: RequestParams.IndicesReloadSearchAnalyzers, options?: TransportRequestOptions): TransportRequestPromise> - reload_search_analyzers, TContext = Context>(callback: callbackFn): TransportRequestCallback - reload_search_analyzers, TContext = Context>(params: RequestParams.IndicesReloadSearchAnalyzers, callback: callbackFn): TransportRequestCallback - reload_search_analyzers, TContext = Context>(params: RequestParams.IndicesReloadSearchAnalyzers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers, TContext = Context>(params?: RequestParams.IndicesReloadSearchAnalyzers, options?: TransportRequestOptions): TransportRequestPromise> - reloadSearchAnalyzers, TContext = Context>(callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers, TContext = Context>(params: RequestParams.IndicesReloadSearchAnalyzers, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers, TContext = Context>(params: RequestParams.IndicesReloadSearchAnalyzers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resolve_index, TContext = Context>(params?: RequestParams.IndicesResolveIndex, options?: TransportRequestOptions): TransportRequestPromise> - resolve_index, TContext = Context>(callback: callbackFn): TransportRequestCallback - resolve_index, TContext = Context>(params: RequestParams.IndicesResolveIndex, callback: callbackFn): TransportRequestCallback - resolve_index, TContext = Context>(params: RequestParams.IndicesResolveIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resolveIndex, TContext = Context>(params?: RequestParams.IndicesResolveIndex, options?: TransportRequestOptions): TransportRequestPromise> - resolveIndex, TContext = Context>(callback: callbackFn): TransportRequestCallback - resolveIndex, TContext = Context>(params: RequestParams.IndicesResolveIndex, callback: callbackFn): TransportRequestCallback - resolveIndex, TContext = Context>(params: RequestParams.IndicesResolveIndex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollover, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesRollover, options?: TransportRequestOptions): TransportRequestPromise> - rollover, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - rollover, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesRollover, callback: callbackFn): TransportRequestCallback - rollover, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesRollover, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - segments, TContext = Context>(params?: RequestParams.IndicesSegments, options?: TransportRequestOptions): TransportRequestPromise> - segments, TContext = Context>(callback: callbackFn): TransportRequestCallback - segments, TContext = Context>(params: RequestParams.IndicesSegments, callback: callbackFn): TransportRequestCallback - segments, TContext = Context>(params: RequestParams.IndicesSegments, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shard_stores, TContext = Context>(params?: RequestParams.IndicesShardStores, options?: TransportRequestOptions): TransportRequestPromise> - shard_stores, TContext = Context>(callback: callbackFn): TransportRequestCallback - shard_stores, TContext = Context>(params: RequestParams.IndicesShardStores, callback: callbackFn): TransportRequestCallback - shard_stores, TContext = Context>(params: RequestParams.IndicesShardStores, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shardStores, TContext = Context>(params?: RequestParams.IndicesShardStores, options?: TransportRequestOptions): TransportRequestPromise> - shardStores, TContext = Context>(callback: callbackFn): TransportRequestCallback - shardStores, TContext = Context>(params: RequestParams.IndicesShardStores, callback: callbackFn): TransportRequestCallback - shardStores, TContext = Context>(params: RequestParams.IndicesShardStores, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shrink, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesShrink, options?: TransportRequestOptions): TransportRequestPromise> - shrink, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - shrink, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesShrink, callback: callbackFn): TransportRequestCallback - shrink, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesShrink, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesSimulateIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateIndexTemplate, callback: callbackFn): TransportRequestCallback - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesSimulateIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateIndexTemplate, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate_template, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesSimulateTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulate_template, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - simulate_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateTemplate, callback: callbackFn): TransportRequestCallback - simulate_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesSimulateTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateTemplate, callback: callbackFn): TransportRequestCallback - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSimulateTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - split, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesSplit, options?: TransportRequestOptions): TransportRequestPromise> - split, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - split, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSplit, callback: callbackFn): TransportRequestCallback - split, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesSplit, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.IndicesStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.IndicesStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.IndicesStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfreeze, TContext = Context>(params?: RequestParams.IndicesUnfreeze, options?: TransportRequestOptions): TransportRequestPromise> - unfreeze, TContext = Context>(callback: callbackFn): TransportRequestCallback - unfreeze, TContext = Context>(params: RequestParams.IndicesUnfreeze, callback: callbackFn): TransportRequestCallback - unfreeze, TContext = Context>(params: RequestParams.IndicesUnfreeze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_aliases, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesUpdateAliases, options?: TransportRequestOptions): TransportRequestPromise> - update_aliases, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_aliases, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesUpdateAliases, callback: callbackFn): TransportRequestCallback - update_aliases, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesUpdateAliases, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateAliases, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesUpdateAliases, options?: TransportRequestOptions): TransportRequestPromise> - updateAliases, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateAliases, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesUpdateAliases, callback: callbackFn): TransportRequestCallback - updateAliases, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesUpdateAliases, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate_query, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesValidateQuery, options?: TransportRequestOptions): TransportRequestPromise> - validate_query, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - validate_query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesValidateQuery, callback: callbackFn): TransportRequestCallback - validate_query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesValidateQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IndicesValidateQuery, options?: TransportRequestOptions): TransportRequestPromise> - validateQuery, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - validateQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesValidateQuery, callback: callbackFn): TransportRequestCallback - validateQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IndicesValidateQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - info, TContext = Context>(params?: RequestParams.Info, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = Context>(callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.Info, callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.Info, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ingest: { - delete_pipeline, TContext = Context>(params?: RequestParams.IngestDeletePipeline, options?: TransportRequestOptions): TransportRequestPromise> - delete_pipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_pipeline, TContext = Context>(params: RequestParams.IngestDeletePipeline, callback: callbackFn): TransportRequestCallback - delete_pipeline, TContext = Context>(params: RequestParams.IngestDeletePipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = Context>(params?: RequestParams.IngestDeletePipeline, options?: TransportRequestOptions): TransportRequestPromise> - deletePipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = Context>(params: RequestParams.IngestDeletePipeline, callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = Context>(params: RequestParams.IngestDeletePipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - geo_ip_stats, TContext = Context>(params?: RequestParams.IngestGeoIpStats, options?: TransportRequestOptions): TransportRequestPromise> - geo_ip_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - geo_ip_stats, TContext = Context>(params: RequestParams.IngestGeoIpStats, callback: callbackFn): TransportRequestCallback - geo_ip_stats, TContext = Context>(params: RequestParams.IngestGeoIpStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - geoIpStats, TContext = Context>(params?: RequestParams.IngestGeoIpStats, options?: TransportRequestOptions): TransportRequestPromise> - geoIpStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - geoIpStats, TContext = Context>(params: RequestParams.IngestGeoIpStats, callback: callbackFn): TransportRequestCallback - geoIpStats, TContext = Context>(params: RequestParams.IngestGeoIpStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = Context>(params?: RequestParams.IngestGetPipeline, options?: TransportRequestOptions): TransportRequestPromise> - get_pipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = Context>(params: RequestParams.IngestGetPipeline, callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = Context>(params: RequestParams.IngestGetPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPipeline, TContext = Context>(params?: RequestParams.IngestGetPipeline, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - getPipeline, TContext = Context>(params: RequestParams.IngestGetPipeline, callback: callbackFn): TransportRequestCallback - getPipeline, TContext = Context>(params: RequestParams.IngestGetPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - processor_grok, TContext = Context>(params?: RequestParams.IngestProcessorGrok, options?: TransportRequestOptions): TransportRequestPromise> - processor_grok, TContext = Context>(callback: callbackFn): TransportRequestCallback - processor_grok, TContext = Context>(params: RequestParams.IngestProcessorGrok, callback: callbackFn): TransportRequestCallback - processor_grok, TContext = Context>(params: RequestParams.IngestProcessorGrok, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - processorGrok, TContext = Context>(params?: RequestParams.IngestProcessorGrok, options?: TransportRequestOptions): TransportRequestPromise> - processorGrok, TContext = Context>(callback: callbackFn): TransportRequestCallback - processorGrok, TContext = Context>(params: RequestParams.IngestProcessorGrok, callback: callbackFn): TransportRequestCallback - processorGrok, TContext = Context>(params: RequestParams.IngestProcessorGrok, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IngestPutPipeline, options?: TransportRequestOptions): TransportRequestPromise> - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IngestPutPipeline, callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IngestPutPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IngestPutPipeline, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IngestPutPipeline, callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IngestPutPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.IngestSimulate, options?: TransportRequestOptions): TransportRequestPromise> - simulate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - simulate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IngestSimulate, callback: callbackFn): TransportRequestCallback - simulate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.IngestSimulate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - license: { - delete, TContext = Context>(params?: RequestParams.LicenseDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.LicenseDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.LicenseDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params?: RequestParams.LicenseGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.LicenseGet, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.LicenseGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_basic_status, TContext = Context>(params?: RequestParams.LicenseGetBasicStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_basic_status, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_basic_status, TContext = Context>(params: RequestParams.LicenseGetBasicStatus, callback: callbackFn): TransportRequestCallback - get_basic_status, TContext = Context>(params: RequestParams.LicenseGetBasicStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBasicStatus, TContext = Context>(params?: RequestParams.LicenseGetBasicStatus, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback - getBasicStatus, TContext = Context>(params: RequestParams.LicenseGetBasicStatus, callback: callbackFn): TransportRequestCallback - getBasicStatus, TContext = Context>(params: RequestParams.LicenseGetBasicStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_trial_status, TContext = Context>(params?: RequestParams.LicenseGetTrialStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_trial_status, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_trial_status, TContext = Context>(params: RequestParams.LicenseGetTrialStatus, callback: callbackFn): TransportRequestCallback - get_trial_status, TContext = Context>(params: RequestParams.LicenseGetTrialStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrialStatus, TContext = Context>(params?: RequestParams.LicenseGetTrialStatus, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback - getTrialStatus, TContext = Context>(params: RequestParams.LicenseGetTrialStatus, callback: callbackFn): TransportRequestCallback - getTrialStatus, TContext = Context>(params: RequestParams.LicenseGetTrialStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.LicensePost, options?: TransportRequestOptions): TransportRequestPromise> - post, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - post, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.LicensePost, callback: callbackFn): TransportRequestCallback - post, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.LicensePost, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_start_basic, TContext = Context>(params?: RequestParams.LicensePostStartBasic, options?: TransportRequestOptions): TransportRequestPromise> - post_start_basic, TContext = Context>(callback: callbackFn): TransportRequestCallback - post_start_basic, TContext = Context>(params: RequestParams.LicensePostStartBasic, callback: callbackFn): TransportRequestCallback - post_start_basic, TContext = Context>(params: RequestParams.LicensePostStartBasic, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartBasic, TContext = Context>(params?: RequestParams.LicensePostStartBasic, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic, TContext = Context>(callback: callbackFn): TransportRequestCallback - postStartBasic, TContext = Context>(params: RequestParams.LicensePostStartBasic, callback: callbackFn): TransportRequestCallback - postStartBasic, TContext = Context>(params: RequestParams.LicensePostStartBasic, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_start_trial, TContext = Context>(params?: RequestParams.LicensePostStartTrial, options?: TransportRequestOptions): TransportRequestPromise> - post_start_trial, TContext = Context>(callback: callbackFn): TransportRequestCallback - post_start_trial, TContext = Context>(params: RequestParams.LicensePostStartTrial, callback: callbackFn): TransportRequestCallback - post_start_trial, TContext = Context>(params: RequestParams.LicensePostStartTrial, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartTrial, TContext = Context>(params?: RequestParams.LicensePostStartTrial, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial, TContext = Context>(callback: callbackFn): TransportRequestCallback - postStartTrial, TContext = Context>(params: RequestParams.LicensePostStartTrial, callback: callbackFn): TransportRequestCallback - postStartTrial, TContext = Context>(params: RequestParams.LicensePostStartTrial, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - logstash: { - delete_pipeline, TContext = Context>(params?: RequestParams.LogstashDeletePipeline, options?: TransportRequestOptions): TransportRequestPromise> - delete_pipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_pipeline, TContext = Context>(params: RequestParams.LogstashDeletePipeline, callback: callbackFn): TransportRequestCallback - delete_pipeline, TContext = Context>(params: RequestParams.LogstashDeletePipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = Context>(params?: RequestParams.LogstashDeletePipeline, options?: TransportRequestOptions): TransportRequestPromise> - deletePipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = Context>(params: RequestParams.LogstashDeletePipeline, callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = Context>(params: RequestParams.LogstashDeletePipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = Context>(params?: RequestParams.LogstashGetPipeline, options?: TransportRequestOptions): TransportRequestPromise> - get_pipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = Context>(params: RequestParams.LogstashGetPipeline, callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = Context>(params: RequestParams.LogstashGetPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPipeline, TContext = Context>(params?: RequestParams.LogstashGetPipeline, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline, TContext = Context>(callback: callbackFn): TransportRequestCallback - getPipeline, TContext = Context>(params: RequestParams.LogstashGetPipeline, callback: callbackFn): TransportRequestCallback - getPipeline, TContext = Context>(params: RequestParams.LogstashGetPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.LogstashPutPipeline, options?: TransportRequestOptions): TransportRequestPromise> - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.LogstashPutPipeline, callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.LogstashPutPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.LogstashPutPipeline, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.LogstashPutPipeline, callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.LogstashPutPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - mget, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Mget, options?: TransportRequestOptions): TransportRequestPromise> - mget, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - mget, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Mget, callback: callbackFn): TransportRequestCallback - mget, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Mget, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migration: { - deprecations, TContext = Context>(params?: RequestParams.MigrationDeprecations, options?: TransportRequestOptions): TransportRequestPromise> - deprecations, TContext = Context>(callback: callbackFn): TransportRequestCallback - deprecations, TContext = Context>(params: RequestParams.MigrationDeprecations, callback: callbackFn): TransportRequestCallback - deprecations, TContext = Context>(params: RequestParams.MigrationDeprecations, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ml: { - close_job, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlCloseJob, options?: TransportRequestOptions): TransportRequestPromise> - close_job, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - close_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlCloseJob, callback: callbackFn): TransportRequestCallback - close_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlCloseJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - closeJob, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlCloseJob, options?: TransportRequestOptions): TransportRequestPromise> - closeJob, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - closeJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlCloseJob, callback: callbackFn): TransportRequestCallback - closeJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlCloseJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_calendar, TContext = Context>(params?: RequestParams.MlDeleteCalendar, options?: TransportRequestOptions): TransportRequestPromise> - delete_calendar, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_calendar, TContext = Context>(params: RequestParams.MlDeleteCalendar, callback: callbackFn): TransportRequestCallback - delete_calendar, TContext = Context>(params: RequestParams.MlDeleteCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendar, TContext = Context>(params?: RequestParams.MlDeleteCalendar, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendar, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteCalendar, TContext = Context>(params: RequestParams.MlDeleteCalendar, callback: callbackFn): TransportRequestCallback - deleteCalendar, TContext = Context>(params: RequestParams.MlDeleteCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_calendar_event, TContext = Context>(params?: RequestParams.MlDeleteCalendarEvent, options?: TransportRequestOptions): TransportRequestPromise> - delete_calendar_event, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_calendar_event, TContext = Context>(params: RequestParams.MlDeleteCalendarEvent, callback: callbackFn): TransportRequestCallback - delete_calendar_event, TContext = Context>(params: RequestParams.MlDeleteCalendarEvent, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent, TContext = Context>(params?: RequestParams.MlDeleteCalendarEvent, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarEvent, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteCalendarEvent, TContext = Context>(params: RequestParams.MlDeleteCalendarEvent, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent, TContext = Context>(params: RequestParams.MlDeleteCalendarEvent, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_calendar_job, TContext = Context>(params?: RequestParams.MlDeleteCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - delete_calendar_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_calendar_job, TContext = Context>(params: RequestParams.MlDeleteCalendarJob, callback: callbackFn): TransportRequestCallback - delete_calendar_job, TContext = Context>(params: RequestParams.MlDeleteCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarJob, TContext = Context>(params?: RequestParams.MlDeleteCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteCalendarJob, TContext = Context>(params: RequestParams.MlDeleteCalendarJob, callback: callbackFn): TransportRequestCallback - deleteCalendarJob, TContext = Context>(params: RequestParams.MlDeleteCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_data_frame_analytics, TContext = Context>(params?: RequestParams.MlDeleteDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - delete_data_frame_analytics, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_data_frame_analytics, TContext = Context>(params: RequestParams.MlDeleteDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - delete_data_frame_analytics, TContext = Context>(params: RequestParams.MlDeleteDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics, TContext = Context>(params?: RequestParams.MlDeleteDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataFrameAnalytics, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics, TContext = Context>(params: RequestParams.MlDeleteDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics, TContext = Context>(params: RequestParams.MlDeleteDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_datafeed, TContext = Context>(params?: RequestParams.MlDeleteDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - delete_datafeed, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_datafeed, TContext = Context>(params: RequestParams.MlDeleteDatafeed, callback: callbackFn): TransportRequestCallback - delete_datafeed, TContext = Context>(params: RequestParams.MlDeleteDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDatafeed, TContext = Context>(params?: RequestParams.MlDeleteDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - deleteDatafeed, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteDatafeed, TContext = Context>(params: RequestParams.MlDeleteDatafeed, callback: callbackFn): TransportRequestCallback - deleteDatafeed, TContext = Context>(params: RequestParams.MlDeleteDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlDeleteExpiredData, options?: TransportRequestOptions): TransportRequestPromise> - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlDeleteExpiredData, callback: callbackFn): TransportRequestCallback - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlDeleteExpiredData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlDeleteExpiredData, options?: TransportRequestOptions): TransportRequestPromise> - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlDeleteExpiredData, callback: callbackFn): TransportRequestCallback - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlDeleteExpiredData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_filter, TContext = Context>(params?: RequestParams.MlDeleteFilter, options?: TransportRequestOptions): TransportRequestPromise> - delete_filter, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_filter, TContext = Context>(params: RequestParams.MlDeleteFilter, callback: callbackFn): TransportRequestCallback - delete_filter, TContext = Context>(params: RequestParams.MlDeleteFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteFilter, TContext = Context>(params?: RequestParams.MlDeleteFilter, options?: TransportRequestOptions): TransportRequestPromise> - deleteFilter, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteFilter, TContext = Context>(params: RequestParams.MlDeleteFilter, callback: callbackFn): TransportRequestCallback - deleteFilter, TContext = Context>(params: RequestParams.MlDeleteFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_forecast, TContext = Context>(params?: RequestParams.MlDeleteForecast, options?: TransportRequestOptions): TransportRequestPromise> - delete_forecast, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_forecast, TContext = Context>(params: RequestParams.MlDeleteForecast, callback: callbackFn): TransportRequestCallback - delete_forecast, TContext = Context>(params: RequestParams.MlDeleteForecast, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteForecast, TContext = Context>(params?: RequestParams.MlDeleteForecast, options?: TransportRequestOptions): TransportRequestPromise> - deleteForecast, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteForecast, TContext = Context>(params: RequestParams.MlDeleteForecast, callback: callbackFn): TransportRequestCallback - deleteForecast, TContext = Context>(params: RequestParams.MlDeleteForecast, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_job, TContext = Context>(params?: RequestParams.MlDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - delete_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_job, TContext = Context>(params: RequestParams.MlDeleteJob, callback: callbackFn): TransportRequestCallback - delete_job, TContext = Context>(params: RequestParams.MlDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = Context>(params?: RequestParams.MlDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteJob, TContext = Context>(params: RequestParams.MlDeleteJob, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = Context>(params: RequestParams.MlDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_model_snapshot, TContext = Context>(params?: RequestParams.MlDeleteModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - delete_model_snapshot, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_model_snapshot, TContext = Context>(params: RequestParams.MlDeleteModelSnapshot, callback: callbackFn): TransportRequestCallback - delete_model_snapshot, TContext = Context>(params: RequestParams.MlDeleteModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot, TContext = Context>(params?: RequestParams.MlDeleteModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - deleteModelSnapshot, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteModelSnapshot, TContext = Context>(params: RequestParams.MlDeleteModelSnapshot, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot, TContext = Context>(params: RequestParams.MlDeleteModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_trained_model, TContext = Context>(params?: RequestParams.MlDeleteTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - delete_trained_model, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_trained_model, TContext = Context>(params: RequestParams.MlDeleteTrainedModel, callback: callbackFn): TransportRequestCallback - delete_trained_model, TContext = Context>(params: RequestParams.MlDeleteTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTrainedModel, TContext = Context>(params?: RequestParams.MlDeleteTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModel, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteTrainedModel, TContext = Context>(params: RequestParams.MlDeleteTrainedModel, callback: callbackFn): TransportRequestCallback - deleteTrainedModel, TContext = Context>(params: RequestParams.MlDeleteTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_trained_model_alias, TContext = Context>(params?: RequestParams.MlDeleteTrainedModelAlias, options?: TransportRequestOptions): TransportRequestPromise> - delete_trained_model_alias, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_trained_model_alias, TContext = Context>(params: RequestParams.MlDeleteTrainedModelAlias, callback: callbackFn): TransportRequestCallback - delete_trained_model_alias, TContext = Context>(params: RequestParams.MlDeleteTrainedModelAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTrainedModelAlias, TContext = Context>(params?: RequestParams.MlDeleteTrainedModelAlias, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModelAlias, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteTrainedModelAlias, TContext = Context>(params: RequestParams.MlDeleteTrainedModelAlias, callback: callbackFn): TransportRequestCallback - deleteTrainedModelAlias, TContext = Context>(params: RequestParams.MlDeleteTrainedModelAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlEstimateModelMemory, options?: TransportRequestOptions): TransportRequestPromise> - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEstimateModelMemory, callback: callbackFn): TransportRequestCallback - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEstimateModelMemory, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlEstimateModelMemory, options?: TransportRequestOptions): TransportRequestPromise> - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEstimateModelMemory, callback: callbackFn): TransportRequestCallback - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEstimateModelMemory, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlEvaluateDataFrame, options?: TransportRequestOptions): TransportRequestPromise> - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEvaluateDataFrame, callback: callbackFn): TransportRequestCallback - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEvaluateDataFrame, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlEvaluateDataFrame, options?: TransportRequestOptions): TransportRequestPromise> - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEvaluateDataFrame, callback: callbackFn): TransportRequestCallback - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlEvaluateDataFrame, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlExplainDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlExplainDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlExplainDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlExplainDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlExplainDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlExplainDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flush_job, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlFlushJob, options?: TransportRequestOptions): TransportRequestPromise> - flush_job, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - flush_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlFlushJob, callback: callbackFn): TransportRequestCallback - flush_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlFlushJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flushJob, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlFlushJob, options?: TransportRequestOptions): TransportRequestPromise> - flushJob, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - flushJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlFlushJob, callback: callbackFn): TransportRequestCallback - flushJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlFlushJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forecast, TContext = Context>(params?: RequestParams.MlForecast, options?: TransportRequestOptions): TransportRequestPromise> - forecast, TContext = Context>(callback: callbackFn): TransportRequestCallback - forecast, TContext = Context>(params: RequestParams.MlForecast, callback: callbackFn): TransportRequestCallback - forecast, TContext = Context>(params: RequestParams.MlForecast, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetBuckets, options?: TransportRequestOptions): TransportRequestPromise> - get_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetBuckets, callback: callbackFn): TransportRequestCallback - get_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetBuckets, options?: TransportRequestOptions): TransportRequestPromise> - getBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetBuckets, callback: callbackFn): TransportRequestCallback - getBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_calendar_events, TContext = Context>(params?: RequestParams.MlGetCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - get_calendar_events, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_calendar_events, TContext = Context>(params: RequestParams.MlGetCalendarEvents, callback: callbackFn): TransportRequestCallback - get_calendar_events, TContext = Context>(params: RequestParams.MlGetCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendarEvents, TContext = Context>(params?: RequestParams.MlGetCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - getCalendarEvents, TContext = Context>(callback: callbackFn): TransportRequestCallback - getCalendarEvents, TContext = Context>(params: RequestParams.MlGetCalendarEvents, callback: callbackFn): TransportRequestCallback - getCalendarEvents, TContext = Context>(params: RequestParams.MlGetCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_calendars, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetCalendars, options?: TransportRequestOptions): TransportRequestPromise> - get_calendars, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_calendars, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCalendars, callback: callbackFn): TransportRequestCallback - get_calendars, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCalendars, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendars, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetCalendars, options?: TransportRequestOptions): TransportRequestPromise> - getCalendars, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getCalendars, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCalendars, callback: callbackFn): TransportRequestCallback - getCalendars, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCalendars, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_categories, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetCategories, options?: TransportRequestOptions): TransportRequestPromise> - get_categories, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_categories, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCategories, callback: callbackFn): TransportRequestCallback - get_categories, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCategories, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCategories, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetCategories, options?: TransportRequestOptions): TransportRequestPromise> - getCategories, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getCategories, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCategories, callback: callbackFn): TransportRequestCallback - getCategories, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetCategories, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics, TContext = Context>(params?: RequestParams.MlGetDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - get_data_frame_analytics, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_data_frame_analytics, TContext = Context>(params: RequestParams.MlGetDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics, TContext = Context>(params: RequestParams.MlGetDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics, TContext = Context>(params?: RequestParams.MlGetDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalytics, TContext = Context>(callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics, TContext = Context>(params: RequestParams.MlGetDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics, TContext = Context>(params: RequestParams.MlGetDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics_stats, TContext = Context>(params?: RequestParams.MlGetDataFrameAnalyticsStats, options?: TransportRequestOptions): TransportRequestPromise> - get_data_frame_analytics_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_data_frame_analytics_stats, TContext = Context>(params: RequestParams.MlGetDataFrameAnalyticsStats, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics_stats, TContext = Context>(params: RequestParams.MlGetDataFrameAnalyticsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats, TContext = Context>(params?: RequestParams.MlGetDataFrameAnalyticsStats, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalyticsStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats, TContext = Context>(params: RequestParams.MlGetDataFrameAnalyticsStats, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats, TContext = Context>(params: RequestParams.MlGetDataFrameAnalyticsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_datafeed_stats, TContext = Context>(params?: RequestParams.MlGetDatafeedStats, options?: TransportRequestOptions): TransportRequestPromise> - get_datafeed_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_datafeed_stats, TContext = Context>(params: RequestParams.MlGetDatafeedStats, callback: callbackFn): TransportRequestCallback - get_datafeed_stats, TContext = Context>(params: RequestParams.MlGetDatafeedStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeedStats, TContext = Context>(params?: RequestParams.MlGetDatafeedStats, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeedStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - getDatafeedStats, TContext = Context>(params: RequestParams.MlGetDatafeedStats, callback: callbackFn): TransportRequestCallback - getDatafeedStats, TContext = Context>(params: RequestParams.MlGetDatafeedStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_datafeeds, TContext = Context>(params?: RequestParams.MlGetDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - get_datafeeds, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_datafeeds, TContext = Context>(params: RequestParams.MlGetDatafeeds, callback: callbackFn): TransportRequestCallback - get_datafeeds, TContext = Context>(params: RequestParams.MlGetDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeeds, TContext = Context>(params?: RequestParams.MlGetDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeeds, TContext = Context>(callback: callbackFn): TransportRequestCallback - getDatafeeds, TContext = Context>(params: RequestParams.MlGetDatafeeds, callback: callbackFn): TransportRequestCallback - getDatafeeds, TContext = Context>(params: RequestParams.MlGetDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_filters, TContext = Context>(params?: RequestParams.MlGetFilters, options?: TransportRequestOptions): TransportRequestPromise> - get_filters, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_filters, TContext = Context>(params: RequestParams.MlGetFilters, callback: callbackFn): TransportRequestCallback - get_filters, TContext = Context>(params: RequestParams.MlGetFilters, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFilters, TContext = Context>(params?: RequestParams.MlGetFilters, options?: TransportRequestOptions): TransportRequestPromise> - getFilters, TContext = Context>(callback: callbackFn): TransportRequestCallback - getFilters, TContext = Context>(params: RequestParams.MlGetFilters, callback: callbackFn): TransportRequestCallback - getFilters, TContext = Context>(params: RequestParams.MlGetFilters, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_influencers, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetInfluencers, options?: TransportRequestOptions): TransportRequestPromise> - get_influencers, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_influencers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetInfluencers, callback: callbackFn): TransportRequestCallback - get_influencers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetInfluencers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getInfluencers, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetInfluencers, options?: TransportRequestOptions): TransportRequestPromise> - getInfluencers, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getInfluencers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetInfluencers, callback: callbackFn): TransportRequestCallback - getInfluencers, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetInfluencers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_job_stats, TContext = Context>(params?: RequestParams.MlGetJobStats, options?: TransportRequestOptions): TransportRequestPromise> - get_job_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_job_stats, TContext = Context>(params: RequestParams.MlGetJobStats, callback: callbackFn): TransportRequestCallback - get_job_stats, TContext = Context>(params: RequestParams.MlGetJobStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobStats, TContext = Context>(params?: RequestParams.MlGetJobStats, options?: TransportRequestOptions): TransportRequestPromise> - getJobStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - getJobStats, TContext = Context>(params: RequestParams.MlGetJobStats, callback: callbackFn): TransportRequestCallback - getJobStats, TContext = Context>(params: RequestParams.MlGetJobStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = Context>(params?: RequestParams.MlGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - get_jobs, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_jobs, TContext = Context>(params: RequestParams.MlGetJobs, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = Context>(params: RequestParams.MlGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs, TContext = Context>(params?: RequestParams.MlGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - getJobs, TContext = Context>(callback: callbackFn): TransportRequestCallback - getJobs, TContext = Context>(params: RequestParams.MlGetJobs, callback: callbackFn): TransportRequestCallback - getJobs, TContext = Context>(params: RequestParams.MlGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetModelSnapshots, options?: TransportRequestOptions): TransportRequestPromise> - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetModelSnapshots, callback: callbackFn): TransportRequestCallback - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetModelSnapshots, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetModelSnapshots, options?: TransportRequestOptions): TransportRequestPromise> - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetModelSnapshots, callback: callbackFn): TransportRequestCallback - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetModelSnapshots, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetOverallBuckets, options?: TransportRequestOptions): TransportRequestPromise> - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetOverallBuckets, callback: callbackFn): TransportRequestCallback - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetOverallBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetOverallBuckets, options?: TransportRequestOptions): TransportRequestPromise> - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetOverallBuckets, callback: callbackFn): TransportRequestCallback - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetOverallBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_records, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetRecords, options?: TransportRequestOptions): TransportRequestPromise> - get_records, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_records, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetRecords, callback: callbackFn): TransportRequestCallback - get_records, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetRecords, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRecords, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlGetRecords, options?: TransportRequestOptions): TransportRequestPromise> - getRecords, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getRecords, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetRecords, callback: callbackFn): TransportRequestCallback - getRecords, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlGetRecords, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_trained_models, TContext = Context>(params?: RequestParams.MlGetTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - get_trained_models, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_trained_models, TContext = Context>(params: RequestParams.MlGetTrainedModels, callback: callbackFn): TransportRequestCallback - get_trained_models, TContext = Context>(params: RequestParams.MlGetTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModels, TContext = Context>(params?: RequestParams.MlGetTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModels, TContext = Context>(callback: callbackFn): TransportRequestCallback - getTrainedModels, TContext = Context>(params: RequestParams.MlGetTrainedModels, callback: callbackFn): TransportRequestCallback - getTrainedModels, TContext = Context>(params: RequestParams.MlGetTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_trained_models_stats, TContext = Context>(params?: RequestParams.MlGetTrainedModelsStats, options?: TransportRequestOptions): TransportRequestPromise> - get_trained_models_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_trained_models_stats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, callback: callbackFn): TransportRequestCallback - get_trained_models_stats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats, TContext = Context>(params?: RequestParams.MlGetTrainedModelsStats, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModelsStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - getTrainedModelsStats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats, TContext = Context>(params: RequestParams.MlGetTrainedModelsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - infer_trained_model_deployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlInferTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - inferTrainedModelDeployment, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlInferTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params?: RequestParams.MlInfo, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = Context>(callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.MlInfo, callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.MlInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - open_job, TContext = Context>(params?: RequestParams.MlOpenJob, options?: TransportRequestOptions): TransportRequestPromise> - open_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - open_job, TContext = Context>(params: RequestParams.MlOpenJob, callback: callbackFn): TransportRequestCallback - open_job, TContext = Context>(params: RequestParams.MlOpenJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - openJob, TContext = Context>(params?: RequestParams.MlOpenJob, options?: TransportRequestOptions): TransportRequestPromise> - openJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - openJob, TContext = Context>(params: RequestParams.MlOpenJob, callback: callbackFn): TransportRequestCallback - openJob, TContext = Context>(params: RequestParams.MlOpenJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPostCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPostCalendarEvents, callback: callbackFn): TransportRequestCallback - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPostCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPostCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPostCalendarEvents, callback: callbackFn): TransportRequestCallback - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPostCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_data, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.MlPostData, options?: TransportRequestOptions): TransportRequestPromise> - post_data, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - post_data, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MlPostData, callback: callbackFn): TransportRequestCallback - post_data, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MlPostData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postData, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.MlPostData, options?: TransportRequestOptions): TransportRequestPromise> - postData, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - postData, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MlPostData, callback: callbackFn): TransportRequestCallback - postData, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MlPostData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - preview_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPreviewDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - preview_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - preview_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - preview_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPreviewDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - previewDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - previewDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - preview_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPreviewDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - preview_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - preview_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDatafeed, callback: callbackFn): TransportRequestCallback - preview_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPreviewDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - previewDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - previewDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDatafeed, callback: callbackFn): TransportRequestCallback - previewDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPreviewDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_calendar, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutCalendar, options?: TransportRequestOptions): TransportRequestPromise> - put_calendar, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_calendar, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutCalendar, callback: callbackFn): TransportRequestCallback - put_calendar, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putCalendar, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutCalendar, options?: TransportRequestOptions): TransportRequestPromise> - putCalendar, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putCalendar, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutCalendar, callback: callbackFn): TransportRequestCallback - putCalendar, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_calendar_job, TContext = Context>(params?: RequestParams.MlPutCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - put_calendar_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_calendar_job, TContext = Context>(params: RequestParams.MlPutCalendarJob, callback: callbackFn): TransportRequestCallback - put_calendar_job, TContext = Context>(params: RequestParams.MlPutCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putCalendarJob, TContext = Context>(params?: RequestParams.MlPutCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - putCalendarJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - putCalendarJob, TContext = Context>(params: RequestParams.MlPutCalendarJob, callback: callbackFn): TransportRequestCallback - putCalendarJob, TContext = Context>(params: RequestParams.MlPutCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - put_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDatafeed, callback: callbackFn): TransportRequestCallback - put_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - putDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDatafeed, callback: callbackFn): TransportRequestCallback - putDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_filter, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutFilter, options?: TransportRequestOptions): TransportRequestPromise> - put_filter, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_filter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutFilter, callback: callbackFn): TransportRequestCallback - put_filter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putFilter, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutFilter, options?: TransportRequestOptions): TransportRequestPromise> - putFilter, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putFilter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutFilter, callback: callbackFn): TransportRequestCallback - putFilter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutJob, options?: TransportRequestOptions): TransportRequestPromise> - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutJob, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutJob, options?: TransportRequestOptions): TransportRequestPromise> - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutJob, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_trained_model, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - put_trained_model, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_trained_model, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutTrainedModel, callback: callbackFn): TransportRequestCallback - put_trained_model, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlPutTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutTrainedModel, callback: callbackFn): TransportRequestCallback - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlPutTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_trained_model_alias, TContext = Context>(params?: RequestParams.MlPutTrainedModelAlias, options?: TransportRequestOptions): TransportRequestPromise> - put_trained_model_alias, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_trained_model_alias, TContext = Context>(params: RequestParams.MlPutTrainedModelAlias, callback: callbackFn): TransportRequestCallback - put_trained_model_alias, TContext = Context>(params: RequestParams.MlPutTrainedModelAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTrainedModelAlias, TContext = Context>(params?: RequestParams.MlPutTrainedModelAlias, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModelAlias, TContext = Context>(callback: callbackFn): TransportRequestCallback - putTrainedModelAlias, TContext = Context>(params: RequestParams.MlPutTrainedModelAlias, callback: callbackFn): TransportRequestCallback - putTrainedModelAlias, TContext = Context>(params: RequestParams.MlPutTrainedModelAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reset_job, TContext = Context>(params?: RequestParams.MlResetJob, options?: TransportRequestOptions): TransportRequestPromise> - reset_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - reset_job, TContext = Context>(params: RequestParams.MlResetJob, callback: callbackFn): TransportRequestCallback - reset_job, TContext = Context>(params: RequestParams.MlResetJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resetJob, TContext = Context>(params?: RequestParams.MlResetJob, options?: TransportRequestOptions): TransportRequestPromise> - resetJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - resetJob, TContext = Context>(params: RequestParams.MlResetJob, callback: callbackFn): TransportRequestCallback - resetJob, TContext = Context>(params: RequestParams.MlResetJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlRevertModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlRevertModelSnapshot, callback: callbackFn): TransportRequestCallback - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlRevertModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlRevertModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlRevertModelSnapshot, callback: callbackFn): TransportRequestCallback - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlRevertModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - set_upgrade_mode, TContext = Context>(params?: RequestParams.MlSetUpgradeMode, options?: TransportRequestOptions): TransportRequestPromise> - set_upgrade_mode, TContext = Context>(callback: callbackFn): TransportRequestCallback - set_upgrade_mode, TContext = Context>(params: RequestParams.MlSetUpgradeMode, callback: callbackFn): TransportRequestCallback - set_upgrade_mode, TContext = Context>(params: RequestParams.MlSetUpgradeMode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - setUpgradeMode, TContext = Context>(params?: RequestParams.MlSetUpgradeMode, options?: TransportRequestOptions): TransportRequestPromise> - setUpgradeMode, TContext = Context>(callback: callbackFn): TransportRequestCallback - setUpgradeMode, TContext = Context>(params: RequestParams.MlSetUpgradeMode, callback: callbackFn): TransportRequestCallback - setUpgradeMode, TContext = Context>(params: RequestParams.MlSetUpgradeMode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStartDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStartDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStartDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - start_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - start_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDatafeed, callback: callbackFn): TransportRequestCallback - start_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStartDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - startDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - startDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDatafeed, callback: callbackFn): TransportRequestCallback - startDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStartDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_trained_model_deployment, TContext = Context>(params?: RequestParams.MlStartTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - start_trained_model_deployment, TContext = Context>(callback: callbackFn): TransportRequestCallback - start_trained_model_deployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - start_trained_model_deployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTrainedModelDeployment, TContext = Context>(params?: RequestParams.MlStartTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - startTrainedModelDeployment, TContext = Context>(callback: callbackFn): TransportRequestCallback - startTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - startTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStartTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStopDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStopDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStopDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - stop_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDatafeed, callback: callbackFn): TransportRequestCallback - stop_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlStopDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - stopDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - stopDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDatafeed, callback: callbackFn): TransportRequestCallback - stopDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlStopDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_trained_model_deployment, TContext = Context>(params?: RequestParams.MlStopTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - stop_trained_model_deployment, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop_trained_model_deployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - stop_trained_model_deployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTrainedModelDeployment, TContext = Context>(params?: RequestParams.MlStopTrainedModelDeployment, options?: TransportRequestOptions): TransportRequestPromise> - stopTrainedModelDeployment, TContext = Context>(callback: callbackFn): TransportRequestCallback - stopTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, callback: callbackFn): TransportRequestCallback - stopTrainedModelDeployment, TContext = Context>(params: RequestParams.MlStopTrainedModelDeployment, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - update_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - update_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - updateDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - updateDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - update_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDatafeed, callback: callbackFn): TransportRequestCallback - update_datafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDatafeed, callback: callbackFn): TransportRequestCallback - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_filter, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateFilter, options?: TransportRequestOptions): TransportRequestPromise> - update_filter, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_filter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateFilter, callback: callbackFn): TransportRequestCallback - update_filter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateFilter, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateFilter, options?: TransportRequestOptions): TransportRequestPromise> - updateFilter, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateFilter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateFilter, callback: callbackFn): TransportRequestCallback - updateFilter, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_job, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateJob, options?: TransportRequestOptions): TransportRequestPromise> - update_job, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateJob, callback: callbackFn): TransportRequestCallback - update_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateJob, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateJob, options?: TransportRequestOptions): TransportRequestPromise> - updateJob, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateJob, callback: callbackFn): TransportRequestCallback - updateJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateModelSnapshot, callback: callbackFn): TransportRequestCallback - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlUpdateModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateModelSnapshot, callback: callbackFn): TransportRequestCallback - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlUpdateModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - upgrade_job_snapshot, TContext = Context>(params?: RequestParams.MlUpgradeJobSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - upgrade_job_snapshot, TContext = Context>(callback: callbackFn): TransportRequestCallback - upgrade_job_snapshot, TContext = Context>(params: RequestParams.MlUpgradeJobSnapshot, callback: callbackFn): TransportRequestCallback - upgrade_job_snapshot, TContext = Context>(params: RequestParams.MlUpgradeJobSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot, TContext = Context>(params?: RequestParams.MlUpgradeJobSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - upgradeJobSnapshot, TContext = Context>(callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot, TContext = Context>(params: RequestParams.MlUpgradeJobSnapshot, callback: callbackFn): TransportRequestCallback - upgradeJobSnapshot, TContext = Context>(params: RequestParams.MlUpgradeJobSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlValidate, options?: TransportRequestOptions): TransportRequestPromise> - validate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - validate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlValidate, callback: callbackFn): TransportRequestCallback - validate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlValidate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate_detector, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlValidateDetector, options?: TransportRequestOptions): TransportRequestPromise> - validate_detector, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - validate_detector, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlValidateDetector, callback: callbackFn): TransportRequestCallback - validate_detector, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlValidateDetector, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateDetector, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.MlValidateDetector, options?: TransportRequestOptions): TransportRequestPromise> - validateDetector, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - validateDetector, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlValidateDetector, callback: callbackFn): TransportRequestCallback - validateDetector, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.MlValidateDetector, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - monitoring: { - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.MonitoringBulk, options?: TransportRequestOptions): TransportRequestPromise> - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MonitoringBulk, callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MonitoringBulk, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - msearch, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.Msearch, options?: TransportRequestOptions): TransportRequestPromise> - msearch, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - msearch, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.Msearch, callback: callbackFn): TransportRequestCallback - msearch, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.Msearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.MsearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MsearchTemplate, callback: callbackFn): TransportRequestCallback - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MsearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.MsearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MsearchTemplate, callback: callbackFn): TransportRequestCallback - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.MsearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mtermvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Mtermvectors, options?: TransportRequestOptions): TransportRequestPromise> - mtermvectors, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - mtermvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Mtermvectors, callback: callbackFn): TransportRequestCallback - mtermvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Mtermvectors, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodes: { - clear_metering_archive, TContext = Context>(params?: RequestParams.NodesClearMeteringArchive, options?: TransportRequestOptions): TransportRequestPromise> - clear_metering_archive, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_metering_archive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, callback: callbackFn): TransportRequestCallback - clear_metering_archive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearMeteringArchive, TContext = Context>(params?: RequestParams.NodesClearMeteringArchive, options?: TransportRequestOptions): TransportRequestPromise> - clearMeteringArchive, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearMeteringArchive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, callback: callbackFn): TransportRequestCallback - clearMeteringArchive, TContext = Context>(params: RequestParams.NodesClearMeteringArchive, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_metering_info, TContext = Context>(params?: RequestParams.NodesGetMeteringInfo, options?: TransportRequestOptions): TransportRequestPromise> - get_metering_info, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_metering_info, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, callback: callbackFn): TransportRequestCallback - get_metering_info, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getMeteringInfo, TContext = Context>(params?: RequestParams.NodesGetMeteringInfo, options?: TransportRequestOptions): TransportRequestPromise> - getMeteringInfo, TContext = Context>(callback: callbackFn): TransportRequestCallback - getMeteringInfo, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, callback: callbackFn): TransportRequestCallback - getMeteringInfo, TContext = Context>(params: RequestParams.NodesGetMeteringInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hot_threads, TContext = Context>(params?: RequestParams.NodesHotThreads, options?: TransportRequestOptions): TransportRequestPromise> - hot_threads, TContext = Context>(callback: callbackFn): TransportRequestCallback - hot_threads, TContext = Context>(params: RequestParams.NodesHotThreads, callback: callbackFn): TransportRequestCallback - hot_threads, TContext = Context>(params: RequestParams.NodesHotThreads, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hotThreads, TContext = Context>(params?: RequestParams.NodesHotThreads, options?: TransportRequestOptions): TransportRequestPromise> - hotThreads, TContext = Context>(callback: callbackFn): TransportRequestCallback - hotThreads, TContext = Context>(params: RequestParams.NodesHotThreads, callback: callbackFn): TransportRequestCallback - hotThreads, TContext = Context>(params: RequestParams.NodesHotThreads, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params?: RequestParams.NodesInfo, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = Context>(callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.NodesInfo, callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.NodesInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.NodesReloadSecureSettings, options?: TransportRequestOptions): TransportRequestPromise> - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.NodesReloadSecureSettings, callback: callbackFn): TransportRequestCallback - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.NodesReloadSecureSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.NodesReloadSecureSettings, options?: TransportRequestOptions): TransportRequestPromise> - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.NodesReloadSecureSettings, callback: callbackFn): TransportRequestCallback - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.NodesReloadSecureSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.NodesStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.NodesStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.NodesStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage, TContext = Context>(params?: RequestParams.NodesUsage, options?: TransportRequestOptions): TransportRequestPromise> - usage, TContext = Context>(callback: callbackFn): TransportRequestCallback - usage, TContext = Context>(params: RequestParams.NodesUsage, callback: callbackFn): TransportRequestCallback - usage, TContext = Context>(params: RequestParams.NodesUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - open_point_in_time, TContext = Context>(params?: RequestParams.OpenPointInTime, options?: TransportRequestOptions): TransportRequestPromise> - open_point_in_time, TContext = Context>(callback: callbackFn): TransportRequestCallback - open_point_in_time, TContext = Context>(params: RequestParams.OpenPointInTime, callback: callbackFn): TransportRequestCallback - open_point_in_time, TContext = Context>(params: RequestParams.OpenPointInTime, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - openPointInTime, TContext = Context>(params?: RequestParams.OpenPointInTime, options?: TransportRequestOptions): TransportRequestPromise> - openPointInTime, TContext = Context>(callback: callbackFn): TransportRequestCallback - openPointInTime, TContext = Context>(params: RequestParams.OpenPointInTime, callback: callbackFn): TransportRequestCallback - openPointInTime, TContext = Context>(params: RequestParams.OpenPointInTime, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ping(params?: RequestParams.Ping, options?: TransportRequestOptions): TransportRequestPromise> - ping(callback: callbackFn): TransportRequestCallback - ping(params: RequestParams.Ping, callback: callbackFn): TransportRequestCallback - ping(params: RequestParams.Ping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_script, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.PutScript, options?: TransportRequestOptions): TransportRequestPromise> - put_script, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_script, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.PutScript, callback: callbackFn): TransportRequestCallback - put_script, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.PutScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putScript, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.PutScript, options?: TransportRequestOptions): TransportRequestPromise> - putScript, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putScript, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.PutScript, callback: callbackFn): TransportRequestCallback - putScript, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.PutScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rank_eval, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RankEval, options?: TransportRequestOptions): TransportRequestPromise> - rank_eval, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - rank_eval, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RankEval, callback: callbackFn): TransportRequestCallback - rank_eval, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RankEval, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rankEval, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RankEval, options?: TransportRequestOptions): TransportRequestPromise> - rankEval, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - rankEval, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RankEval, callback: callbackFn): TransportRequestCallback - rankEval, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RankEval, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindex, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Reindex, options?: TransportRequestOptions): TransportRequestPromise> - reindex, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - reindex, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Reindex, callback: callbackFn): TransportRequestCallback - reindex, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Reindex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindex_rethrottle, TContext = Context>(params?: RequestParams.ReindexRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - reindex_rethrottle, TContext = Context>(callback: callbackFn): TransportRequestCallback - reindex_rethrottle, TContext = Context>(params: RequestParams.ReindexRethrottle, callback: callbackFn): TransportRequestCallback - reindex_rethrottle, TContext = Context>(params: RequestParams.ReindexRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindexRethrottle, TContext = Context>(params?: RequestParams.ReindexRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - reindexRethrottle, TContext = Context>(callback: callbackFn): TransportRequestCallback - reindexRethrottle, TContext = Context>(params: RequestParams.ReindexRethrottle, callback: callbackFn): TransportRequestCallback - reindexRethrottle, TContext = Context>(params: RequestParams.ReindexRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - render_search_template, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RenderSearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - render_search_template, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - render_search_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RenderSearchTemplate, callback: callbackFn): TransportRequestCallback - render_search_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RenderSearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RenderSearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RenderSearchTemplate, callback: callbackFn): TransportRequestCallback - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RenderSearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollup: { - delete_job, TContext = Context>(params?: RequestParams.RollupDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - delete_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_job, TContext = Context>(params: RequestParams.RollupDeleteJob, callback: callbackFn): TransportRequestCallback - delete_job, TContext = Context>(params: RequestParams.RollupDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = Context>(params?: RequestParams.RollupDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteJob, TContext = Context>(params: RequestParams.RollupDeleteJob, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = Context>(params: RequestParams.RollupDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = Context>(params?: RequestParams.RollupGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - get_jobs, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_jobs, TContext = Context>(params: RequestParams.RollupGetJobs, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = Context>(params: RequestParams.RollupGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs, TContext = Context>(params?: RequestParams.RollupGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - getJobs, TContext = Context>(callback: callbackFn): TransportRequestCallback - getJobs, TContext = Context>(params: RequestParams.RollupGetJobs, callback: callbackFn): TransportRequestCallback - getJobs, TContext = Context>(params: RequestParams.RollupGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_rollup_caps, TContext = Context>(params?: RequestParams.RollupGetRollupCaps, options?: TransportRequestOptions): TransportRequestPromise> - get_rollup_caps, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_rollup_caps, TContext = Context>(params: RequestParams.RollupGetRollupCaps, callback: callbackFn): TransportRequestCallback - get_rollup_caps, TContext = Context>(params: RequestParams.RollupGetRollupCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupCaps, TContext = Context>(params?: RequestParams.RollupGetRollupCaps, options?: TransportRequestOptions): TransportRequestPromise> - getRollupCaps, TContext = Context>(callback: callbackFn): TransportRequestCallback - getRollupCaps, TContext = Context>(params: RequestParams.RollupGetRollupCaps, callback: callbackFn): TransportRequestCallback - getRollupCaps, TContext = Context>(params: RequestParams.RollupGetRollupCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_rollup_index_caps, TContext = Context>(params?: RequestParams.RollupGetRollupIndexCaps, options?: TransportRequestOptions): TransportRequestPromise> - get_rollup_index_caps, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_rollup_index_caps, TContext = Context>(params: RequestParams.RollupGetRollupIndexCaps, callback: callbackFn): TransportRequestCallback - get_rollup_index_caps, TContext = Context>(params: RequestParams.RollupGetRollupIndexCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps, TContext = Context>(params?: RequestParams.RollupGetRollupIndexCaps, options?: TransportRequestOptions): TransportRequestPromise> - getRollupIndexCaps, TContext = Context>(callback: callbackFn): TransportRequestCallback - getRollupIndexCaps, TContext = Context>(params: RequestParams.RollupGetRollupIndexCaps, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps, TContext = Context>(params: RequestParams.RollupGetRollupIndexCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RollupPutJob, options?: TransportRequestOptions): TransportRequestPromise> - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupPutJob, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RollupPutJob, options?: TransportRequestOptions): TransportRequestPromise> - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupPutJob, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollup, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RollupRollup, options?: TransportRequestOptions): TransportRequestPromise> - rollup, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - rollup, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupRollup, callback: callbackFn): TransportRequestCallback - rollup, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupRollup, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollup_search, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RollupRollupSearch, options?: TransportRequestOptions): TransportRequestPromise> - rollup_search, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - rollup_search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupRollupSearch, callback: callbackFn): TransportRequestCallback - rollup_search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupRollupSearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollupSearch, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.RollupRollupSearch, options?: TransportRequestOptions): TransportRequestPromise> - rollupSearch, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - rollupSearch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupRollupSearch, callback: callbackFn): TransportRequestCallback - rollupSearch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.RollupRollupSearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_job, TContext = Context>(params?: RequestParams.RollupStartJob, options?: TransportRequestOptions): TransportRequestPromise> - start_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - start_job, TContext = Context>(params: RequestParams.RollupStartJob, callback: callbackFn): TransportRequestCallback - start_job, TContext = Context>(params: RequestParams.RollupStartJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startJob, TContext = Context>(params?: RequestParams.RollupStartJob, options?: TransportRequestOptions): TransportRequestPromise> - startJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - startJob, TContext = Context>(params: RequestParams.RollupStartJob, callback: callbackFn): TransportRequestCallback - startJob, TContext = Context>(params: RequestParams.RollupStartJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_job, TContext = Context>(params?: RequestParams.RollupStopJob, options?: TransportRequestOptions): TransportRequestPromise> - stop_job, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop_job, TContext = Context>(params: RequestParams.RollupStopJob, callback: callbackFn): TransportRequestCallback - stop_job, TContext = Context>(params: RequestParams.RollupStopJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopJob, TContext = Context>(params?: RequestParams.RollupStopJob, options?: TransportRequestOptions): TransportRequestPromise> - stopJob, TContext = Context>(callback: callbackFn): TransportRequestCallback - stopJob, TContext = Context>(params: RequestParams.RollupStopJob, callback: callbackFn): TransportRequestCallback - stopJob, TContext = Context>(params: RequestParams.RollupStopJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ScriptsPainlessExecute, options?: TransportRequestOptions): TransportRequestPromise> - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ScriptsPainlessExecute, callback: callbackFn): TransportRequestCallback - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ScriptsPainlessExecute, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ScriptsPainlessExecute, options?: TransportRequestOptions): TransportRequestPromise> - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ScriptsPainlessExecute, callback: callbackFn): TransportRequestCallback - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ScriptsPainlessExecute, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - scroll, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Scroll, options?: TransportRequestOptions): TransportRequestPromise> - scroll, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - scroll, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Scroll, callback: callbackFn): TransportRequestCallback - scroll, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Scroll, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Search, options?: TransportRequestOptions): TransportRequestPromise> - search, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Search, callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Search, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchMvt, options?: TransportRequestOptions): TransportRequestPromise> - search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, callback: callbackFn): TransportRequestCallback - search_mvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchMvt, options?: TransportRequestOptions): TransportRequestPromise> - searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, callback: callbackFn): TransportRequestCallback - searchMvt, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchMvt, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search_shards, TContext = Context>(params?: RequestParams.SearchShards, options?: TransportRequestOptions): TransportRequestPromise> - search_shards, TContext = Context>(callback: callbackFn): TransportRequestCallback - search_shards, TContext = Context>(params: RequestParams.SearchShards, callback: callbackFn): TransportRequestCallback - search_shards, TContext = Context>(params: RequestParams.SearchShards, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchShards, TContext = Context>(params?: RequestParams.SearchShards, options?: TransportRequestOptions): TransportRequestPromise> - searchShards, TContext = Context>(callback: callbackFn): TransportRequestCallback - searchShards, TContext = Context>(params: RequestParams.SearchShards, callback: callbackFn): TransportRequestCallback - searchShards, TContext = Context>(params: RequestParams.SearchShards, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search_template, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - search_template, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - search_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchTemplate, callback: callbackFn): TransportRequestCallback - search_template, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - searchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - searchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchTemplate, callback: callbackFn): TransportRequestCallback - searchTemplate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchable_snapshots: { - cache_stats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> - cache_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback - cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cacheStats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> - cacheStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback - cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clear_cache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clearCache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchableSnapshotsMount, options?: TransportRequestOptions): TransportRequestPromise> - mount, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchableSnapshotsMount, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchableSnapshotsMount, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.SearchableSnapshotsStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.SearchableSnapshotsStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.SearchableSnapshotsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - searchableSnapshots: { - cache_stats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> - cache_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback - cache_stats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cacheStats, TContext = Context>(params?: RequestParams.SearchableSnapshotsCacheStats, options?: TransportRequestOptions): TransportRequestPromise> - cacheStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, callback: callbackFn): TransportRequestCallback - cacheStats, TContext = Context>(params: RequestParams.SearchableSnapshotsCacheStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clear_cache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clearCache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clearCache, TContext = Context>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SearchableSnapshotsMount, options?: TransportRequestOptions): TransportRequestPromise> - mount, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchableSnapshotsMount, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SearchableSnapshotsMount, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.SearchableSnapshotsStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.SearchableSnapshotsStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.SearchableSnapshotsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - security: { - authenticate, TContext = Context>(params?: RequestParams.SecurityAuthenticate, options?: TransportRequestOptions): TransportRequestPromise> - authenticate, TContext = Context>(callback: callbackFn): TransportRequestCallback - authenticate, TContext = Context>(params: RequestParams.SecurityAuthenticate, callback: callbackFn): TransportRequestCallback - authenticate, TContext = Context>(params: RequestParams.SecurityAuthenticate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - change_password, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityChangePassword, options?: TransportRequestOptions): TransportRequestPromise> - change_password, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - change_password, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityChangePassword, callback: callbackFn): TransportRequestCallback - change_password, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityChangePassword, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - changePassword, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityChangePassword, options?: TransportRequestOptions): TransportRequestPromise> - changePassword, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - changePassword, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityChangePassword, callback: callbackFn): TransportRequestCallback - changePassword, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityChangePassword, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_api_key_cache, TContext = Context>(params?: RequestParams.SecurityClearApiKeyCache, options?: TransportRequestOptions): TransportRequestPromise> - clear_api_key_cache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_api_key_cache, TContext = Context>(params: RequestParams.SecurityClearApiKeyCache, callback: callbackFn): TransportRequestCallback - clear_api_key_cache, TContext = Context>(params: RequestParams.SecurityClearApiKeyCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearApiKeyCache, TContext = Context>(params?: RequestParams.SecurityClearApiKeyCache, options?: TransportRequestOptions): TransportRequestPromise> - clearApiKeyCache, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearApiKeyCache, TContext = Context>(params: RequestParams.SecurityClearApiKeyCache, callback: callbackFn): TransportRequestCallback - clearApiKeyCache, TContext = Context>(params: RequestParams.SecurityClearApiKeyCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cached_privileges, TContext = Context>(params?: RequestParams.SecurityClearCachedPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - clear_cached_privileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cached_privileges, TContext = Context>(params: RequestParams.SecurityClearCachedPrivileges, callback: callbackFn): TransportRequestCallback - clear_cached_privileges, TContext = Context>(params: RequestParams.SecurityClearCachedPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedPrivileges, TContext = Context>(params?: RequestParams.SecurityClearCachedPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedPrivileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCachedPrivileges, TContext = Context>(params: RequestParams.SecurityClearCachedPrivileges, callback: callbackFn): TransportRequestCallback - clearCachedPrivileges, TContext = Context>(params: RequestParams.SecurityClearCachedPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cached_realms, TContext = Context>(params?: RequestParams.SecurityClearCachedRealms, options?: TransportRequestOptions): TransportRequestPromise> - clear_cached_realms, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cached_realms, TContext = Context>(params: RequestParams.SecurityClearCachedRealms, callback: callbackFn): TransportRequestCallback - clear_cached_realms, TContext = Context>(params: RequestParams.SecurityClearCachedRealms, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRealms, TContext = Context>(params?: RequestParams.SecurityClearCachedRealms, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRealms, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCachedRealms, TContext = Context>(params: RequestParams.SecurityClearCachedRealms, callback: callbackFn): TransportRequestCallback - clearCachedRealms, TContext = Context>(params: RequestParams.SecurityClearCachedRealms, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cached_roles, TContext = Context>(params?: RequestParams.SecurityClearCachedRoles, options?: TransportRequestOptions): TransportRequestPromise> - clear_cached_roles, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cached_roles, TContext = Context>(params: RequestParams.SecurityClearCachedRoles, callback: callbackFn): TransportRequestCallback - clear_cached_roles, TContext = Context>(params: RequestParams.SecurityClearCachedRoles, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRoles, TContext = Context>(params?: RequestParams.SecurityClearCachedRoles, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRoles, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCachedRoles, TContext = Context>(params: RequestParams.SecurityClearCachedRoles, callback: callbackFn): TransportRequestCallback - clearCachedRoles, TContext = Context>(params: RequestParams.SecurityClearCachedRoles, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cached_service_tokens, TContext = Context>(params?: RequestParams.SecurityClearCachedServiceTokens, options?: TransportRequestOptions): TransportRequestPromise> - clear_cached_service_tokens, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cached_service_tokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, callback: callbackFn): TransportRequestCallback - clear_cached_service_tokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedServiceTokens, TContext = Context>(params?: RequestParams.SecurityClearCachedServiceTokens, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedServiceTokens, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCachedServiceTokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, callback: callbackFn): TransportRequestCallback - clearCachedServiceTokens, TContext = Context>(params: RequestParams.SecurityClearCachedServiceTokens, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityCreateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - create_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - create_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityCreateApiKey, callback: callbackFn): TransportRequestCallback - create_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityCreateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityCreateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - createApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - createApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityCreateApiKey, callback: callbackFn): TransportRequestCallback - createApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityCreateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create_service_token, TContext = Context>(params?: RequestParams.SecurityCreateServiceToken, options?: TransportRequestOptions): TransportRequestPromise> - create_service_token, TContext = Context>(callback: callbackFn): TransportRequestCallback - create_service_token, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, callback: callbackFn): TransportRequestCallback - create_service_token, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createServiceToken, TContext = Context>(params?: RequestParams.SecurityCreateServiceToken, options?: TransportRequestOptions): TransportRequestPromise> - createServiceToken, TContext = Context>(callback: callbackFn): TransportRequestCallback - createServiceToken, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, callback: callbackFn): TransportRequestCallback - createServiceToken, TContext = Context>(params: RequestParams.SecurityCreateServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_privileges, TContext = Context>(params?: RequestParams.SecurityDeletePrivileges, options?: TransportRequestOptions): TransportRequestPromise> - delete_privileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_privileges, TContext = Context>(params: RequestParams.SecurityDeletePrivileges, callback: callbackFn): TransportRequestCallback - delete_privileges, TContext = Context>(params: RequestParams.SecurityDeletePrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePrivileges, TContext = Context>(params?: RequestParams.SecurityDeletePrivileges, options?: TransportRequestOptions): TransportRequestPromise> - deletePrivileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - deletePrivileges, TContext = Context>(params: RequestParams.SecurityDeletePrivileges, callback: callbackFn): TransportRequestCallback - deletePrivileges, TContext = Context>(params: RequestParams.SecurityDeletePrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_role, TContext = Context>(params?: RequestParams.SecurityDeleteRole, options?: TransportRequestOptions): TransportRequestPromise> - delete_role, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_role, TContext = Context>(params: RequestParams.SecurityDeleteRole, callback: callbackFn): TransportRequestCallback - delete_role, TContext = Context>(params: RequestParams.SecurityDeleteRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRole, TContext = Context>(params?: RequestParams.SecurityDeleteRole, options?: TransportRequestOptions): TransportRequestPromise> - deleteRole, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteRole, TContext = Context>(params: RequestParams.SecurityDeleteRole, callback: callbackFn): TransportRequestCallback - deleteRole, TContext = Context>(params: RequestParams.SecurityDeleteRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_role_mapping, TContext = Context>(params?: RequestParams.SecurityDeleteRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - delete_role_mapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_role_mapping, TContext = Context>(params: RequestParams.SecurityDeleteRoleMapping, callback: callbackFn): TransportRequestCallback - delete_role_mapping, TContext = Context>(params: RequestParams.SecurityDeleteRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRoleMapping, TContext = Context>(params?: RequestParams.SecurityDeleteRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - deleteRoleMapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteRoleMapping, TContext = Context>(params: RequestParams.SecurityDeleteRoleMapping, callback: callbackFn): TransportRequestCallback - deleteRoleMapping, TContext = Context>(params: RequestParams.SecurityDeleteRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_service_token, TContext = Context>(params?: RequestParams.SecurityDeleteServiceToken, options?: TransportRequestOptions): TransportRequestPromise> - delete_service_token, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_service_token, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, callback: callbackFn): TransportRequestCallback - delete_service_token, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteServiceToken, TContext = Context>(params?: RequestParams.SecurityDeleteServiceToken, options?: TransportRequestOptions): TransportRequestPromise> - deleteServiceToken, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteServiceToken, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, callback: callbackFn): TransportRequestCallback - deleteServiceToken, TContext = Context>(params: RequestParams.SecurityDeleteServiceToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_user, TContext = Context>(params?: RequestParams.SecurityDeleteUser, options?: TransportRequestOptions): TransportRequestPromise> - delete_user, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_user, TContext = Context>(params: RequestParams.SecurityDeleteUser, callback: callbackFn): TransportRequestCallback - delete_user, TContext = Context>(params: RequestParams.SecurityDeleteUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteUser, TContext = Context>(params?: RequestParams.SecurityDeleteUser, options?: TransportRequestOptions): TransportRequestPromise> - deleteUser, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteUser, TContext = Context>(params: RequestParams.SecurityDeleteUser, callback: callbackFn): TransportRequestCallback - deleteUser, TContext = Context>(params: RequestParams.SecurityDeleteUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - disable_user, TContext = Context>(params?: RequestParams.SecurityDisableUser, options?: TransportRequestOptions): TransportRequestPromise> - disable_user, TContext = Context>(callback: callbackFn): TransportRequestCallback - disable_user, TContext = Context>(params: RequestParams.SecurityDisableUser, callback: callbackFn): TransportRequestCallback - disable_user, TContext = Context>(params: RequestParams.SecurityDisableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - disableUser, TContext = Context>(params?: RequestParams.SecurityDisableUser, options?: TransportRequestOptions): TransportRequestPromise> - disableUser, TContext = Context>(callback: callbackFn): TransportRequestCallback - disableUser, TContext = Context>(params: RequestParams.SecurityDisableUser, callback: callbackFn): TransportRequestCallback - disableUser, TContext = Context>(params: RequestParams.SecurityDisableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enable_user, TContext = Context>(params?: RequestParams.SecurityEnableUser, options?: TransportRequestOptions): TransportRequestPromise> - enable_user, TContext = Context>(callback: callbackFn): TransportRequestCallback - enable_user, TContext = Context>(params: RequestParams.SecurityEnableUser, callback: callbackFn): TransportRequestCallback - enable_user, TContext = Context>(params: RequestParams.SecurityEnableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enableUser, TContext = Context>(params?: RequestParams.SecurityEnableUser, options?: TransportRequestOptions): TransportRequestPromise> - enableUser, TContext = Context>(callback: callbackFn): TransportRequestCallback - enableUser, TContext = Context>(params: RequestParams.SecurityEnableUser, callback: callbackFn): TransportRequestCallback - enableUser, TContext = Context>(params: RequestParams.SecurityEnableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enroll_kibana, TContext = Context>(params?: RequestParams.SecurityEnrollKibana, options?: TransportRequestOptions): TransportRequestPromise> - enroll_kibana, TContext = Context>(callback: callbackFn): TransportRequestCallback - enroll_kibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, callback: callbackFn): TransportRequestCallback - enroll_kibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enrollKibana, TContext = Context>(params?: RequestParams.SecurityEnrollKibana, options?: TransportRequestOptions): TransportRequestPromise> - enrollKibana, TContext = Context>(callback: callbackFn): TransportRequestCallback - enrollKibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, callback: callbackFn): TransportRequestCallback - enrollKibana, TContext = Context>(params: RequestParams.SecurityEnrollKibana, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enroll_node, TContext = Context>(params?: RequestParams.SecurityEnrollNode, options?: TransportRequestOptions): TransportRequestPromise> - enroll_node, TContext = Context>(callback: callbackFn): TransportRequestCallback - enroll_node, TContext = Context>(params: RequestParams.SecurityEnrollNode, callback: callbackFn): TransportRequestCallback - enroll_node, TContext = Context>(params: RequestParams.SecurityEnrollNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enrollNode, TContext = Context>(params?: RequestParams.SecurityEnrollNode, options?: TransportRequestOptions): TransportRequestPromise> - enrollNode, TContext = Context>(callback: callbackFn): TransportRequestCallback - enrollNode, TContext = Context>(params: RequestParams.SecurityEnrollNode, callback: callbackFn): TransportRequestCallback - enrollNode, TContext = Context>(params: RequestParams.SecurityEnrollNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_api_key, TContext = Context>(params?: RequestParams.SecurityGetApiKey, options?: TransportRequestOptions): TransportRequestPromise> - get_api_key, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_api_key, TContext = Context>(params: RequestParams.SecurityGetApiKey, callback: callbackFn): TransportRequestCallback - get_api_key, TContext = Context>(params: RequestParams.SecurityGetApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getApiKey, TContext = Context>(params?: RequestParams.SecurityGetApiKey, options?: TransportRequestOptions): TransportRequestPromise> - getApiKey, TContext = Context>(callback: callbackFn): TransportRequestCallback - getApiKey, TContext = Context>(params: RequestParams.SecurityGetApiKey, callback: callbackFn): TransportRequestCallback - getApiKey, TContext = Context>(params: RequestParams.SecurityGetApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_builtin_privileges, TContext = Context>(params?: RequestParams.SecurityGetBuiltinPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - get_builtin_privileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_builtin_privileges, TContext = Context>(params: RequestParams.SecurityGetBuiltinPrivileges, callback: callbackFn): TransportRequestCallback - get_builtin_privileges, TContext = Context>(params: RequestParams.SecurityGetBuiltinPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges, TContext = Context>(params?: RequestParams.SecurityGetBuiltinPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - getBuiltinPrivileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges, TContext = Context>(params: RequestParams.SecurityGetBuiltinPrivileges, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges, TContext = Context>(params: RequestParams.SecurityGetBuiltinPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_privileges, TContext = Context>(params?: RequestParams.SecurityGetPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - get_privileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_privileges, TContext = Context>(params: RequestParams.SecurityGetPrivileges, callback: callbackFn): TransportRequestCallback - get_privileges, TContext = Context>(params: RequestParams.SecurityGetPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPrivileges, TContext = Context>(params?: RequestParams.SecurityGetPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - getPrivileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - getPrivileges, TContext = Context>(params: RequestParams.SecurityGetPrivileges, callback: callbackFn): TransportRequestCallback - getPrivileges, TContext = Context>(params: RequestParams.SecurityGetPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_role, TContext = Context>(params?: RequestParams.SecurityGetRole, options?: TransportRequestOptions): TransportRequestPromise> - get_role, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_role, TContext = Context>(params: RequestParams.SecurityGetRole, callback: callbackFn): TransportRequestCallback - get_role, TContext = Context>(params: RequestParams.SecurityGetRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRole, TContext = Context>(params?: RequestParams.SecurityGetRole, options?: TransportRequestOptions): TransportRequestPromise> - getRole, TContext = Context>(callback: callbackFn): TransportRequestCallback - getRole, TContext = Context>(params: RequestParams.SecurityGetRole, callback: callbackFn): TransportRequestCallback - getRole, TContext = Context>(params: RequestParams.SecurityGetRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_role_mapping, TContext = Context>(params?: RequestParams.SecurityGetRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - get_role_mapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_role_mapping, TContext = Context>(params: RequestParams.SecurityGetRoleMapping, callback: callbackFn): TransportRequestCallback - get_role_mapping, TContext = Context>(params: RequestParams.SecurityGetRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRoleMapping, TContext = Context>(params?: RequestParams.SecurityGetRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - getRoleMapping, TContext = Context>(callback: callbackFn): TransportRequestCallback - getRoleMapping, TContext = Context>(params: RequestParams.SecurityGetRoleMapping, callback: callbackFn): TransportRequestCallback - getRoleMapping, TContext = Context>(params: RequestParams.SecurityGetRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_service_accounts, TContext = Context>(params?: RequestParams.SecurityGetServiceAccounts, options?: TransportRequestOptions): TransportRequestPromise> - get_service_accounts, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_service_accounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, callback: callbackFn): TransportRequestCallback - get_service_accounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getServiceAccounts, TContext = Context>(params?: RequestParams.SecurityGetServiceAccounts, options?: TransportRequestOptions): TransportRequestPromise> - getServiceAccounts, TContext = Context>(callback: callbackFn): TransportRequestCallback - getServiceAccounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, callback: callbackFn): TransportRequestCallback - getServiceAccounts, TContext = Context>(params: RequestParams.SecurityGetServiceAccounts, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_service_credentials, TContext = Context>(params?: RequestParams.SecurityGetServiceCredentials, options?: TransportRequestOptions): TransportRequestPromise> - get_service_credentials, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_service_credentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, callback: callbackFn): TransportRequestCallback - get_service_credentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getServiceCredentials, TContext = Context>(params?: RequestParams.SecurityGetServiceCredentials, options?: TransportRequestOptions): TransportRequestPromise> - getServiceCredentials, TContext = Context>(callback: callbackFn): TransportRequestCallback - getServiceCredentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, callback: callbackFn): TransportRequestCallback - getServiceCredentials, TContext = Context>(params: RequestParams.SecurityGetServiceCredentials, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_token, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityGetToken, options?: TransportRequestOptions): TransportRequestPromise> - get_token, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_token, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGetToken, callback: callbackFn): TransportRequestCallback - get_token, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGetToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getToken, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityGetToken, options?: TransportRequestOptions): TransportRequestPromise> - getToken, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - getToken, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGetToken, callback: callbackFn): TransportRequestCallback - getToken, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGetToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_user, TContext = Context>(params?: RequestParams.SecurityGetUser, options?: TransportRequestOptions): TransportRequestPromise> - get_user, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_user, TContext = Context>(params: RequestParams.SecurityGetUser, callback: callbackFn): TransportRequestCallback - get_user, TContext = Context>(params: RequestParams.SecurityGetUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUser, TContext = Context>(params?: RequestParams.SecurityGetUser, options?: TransportRequestOptions): TransportRequestPromise> - getUser, TContext = Context>(callback: callbackFn): TransportRequestCallback - getUser, TContext = Context>(params: RequestParams.SecurityGetUser, callback: callbackFn): TransportRequestCallback - getUser, TContext = Context>(params: RequestParams.SecurityGetUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_user_privileges, TContext = Context>(params?: RequestParams.SecurityGetUserPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - get_user_privileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_user_privileges, TContext = Context>(params: RequestParams.SecurityGetUserPrivileges, callback: callbackFn): TransportRequestCallback - get_user_privileges, TContext = Context>(params: RequestParams.SecurityGetUserPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUserPrivileges, TContext = Context>(params?: RequestParams.SecurityGetUserPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - getUserPrivileges, TContext = Context>(callback: callbackFn): TransportRequestCallback - getUserPrivileges, TContext = Context>(params: RequestParams.SecurityGetUserPrivileges, callback: callbackFn): TransportRequestCallback - getUserPrivileges, TContext = Context>(params: RequestParams.SecurityGetUserPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - grant_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityGrantApiKey, options?: TransportRequestOptions): TransportRequestPromise> - grant_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - grant_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGrantApiKey, callback: callbackFn): TransportRequestCallback - grant_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGrantApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - grantApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityGrantApiKey, options?: TransportRequestOptions): TransportRequestPromise> - grantApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - grantApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGrantApiKey, callback: callbackFn): TransportRequestCallback - grantApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityGrantApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - has_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityHasPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - has_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - has_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityHasPrivileges, callback: callbackFn): TransportRequestCallback - has_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityHasPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityHasPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityHasPrivileges, callback: callbackFn): TransportRequestCallback - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityHasPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityInvalidateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateApiKey, callback: callbackFn): TransportRequestCallback - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityInvalidateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateApiKey, callback: callbackFn): TransportRequestCallback - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidate_token, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityInvalidateToken, options?: TransportRequestOptions): TransportRequestPromise> - invalidate_token, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - invalidate_token, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateToken, callback: callbackFn): TransportRequestCallback - invalidate_token, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateToken, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityInvalidateToken, options?: TransportRequestOptions): TransportRequestPromise> - invalidateToken, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - invalidateToken, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateToken, callback: callbackFn): TransportRequestCallback - invalidateToken, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityInvalidateToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - put_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutPrivileges, callback: callbackFn): TransportRequestCallback - put_privileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - putPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutPrivileges, callback: callbackFn): TransportRequestCallback - putPrivileges, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_role, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutRole, options?: TransportRequestOptions): TransportRequestPromise> - put_role, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_role, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRole, callback: callbackFn): TransportRequestCallback - put_role, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRole, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutRole, options?: TransportRequestOptions): TransportRequestPromise> - putRole, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putRole, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRole, callback: callbackFn): TransportRequestCallback - putRole, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRoleMapping, callback: callbackFn): TransportRequestCallback - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRoleMapping, callback: callbackFn): TransportRequestCallback - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_user, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutUser, options?: TransportRequestOptions): TransportRequestPromise> - put_user, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_user, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, callback: callbackFn): TransportRequestCallback - put_user, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putUser, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityPutUser, options?: TransportRequestOptions): TransportRequestPromise> - putUser, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putUser, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, callback: callbackFn): TransportRequestCallback - putUser, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityPutUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityQueryApiKeys, options?: TransportRequestOptions): TransportRequestPromise> - query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, callback: callbackFn): TransportRequestCallback - query_api_keys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecurityQueryApiKeys, options?: TransportRequestOptions): TransportRequestPromise> - queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, callback: callbackFn): TransportRequestCallback - queryApiKeys, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecurityQueryApiKeys, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlAuthenticate, options?: TransportRequestOptions): TransportRequestPromise> - saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, callback: callbackFn): TransportRequestCallback - saml_authenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlAuthenticate, options?: TransportRequestOptions): TransportRequestPromise> - samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, callback: callbackFn): TransportRequestCallback - samlAuthenticate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlAuthenticate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlCompleteLogout, options?: TransportRequestOptions): TransportRequestPromise> - saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, callback: callbackFn): TransportRequestCallback - saml_complete_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlCompleteLogout, options?: TransportRequestOptions): TransportRequestPromise> - samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, callback: callbackFn): TransportRequestCallback - samlCompleteLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlCompleteLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlInvalidate, options?: TransportRequestOptions): TransportRequestPromise> - saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, callback: callbackFn): TransportRequestCallback - saml_invalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlInvalidate, options?: TransportRequestOptions): TransportRequestPromise> - samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, callback: callbackFn): TransportRequestCallback - samlInvalidate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlInvalidate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlLogout, options?: TransportRequestOptions): TransportRequestPromise> - saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, callback: callbackFn): TransportRequestCallback - saml_logout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlLogout, options?: TransportRequestOptions): TransportRequestPromise> - samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, callback: callbackFn): TransportRequestCallback - samlLogout, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlLogout, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlPrepareAuthentication, options?: TransportRequestOptions): TransportRequestPromise> - saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, callback: callbackFn): TransportRequestCallback - saml_prepare_authentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SecuritySamlPrepareAuthentication, options?: TransportRequestOptions): TransportRequestPromise> - samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, callback: callbackFn): TransportRequestCallback - samlPrepareAuthentication, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SecuritySamlPrepareAuthentication, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - saml_service_provider_metadata, TContext = Context>(params?: RequestParams.SecuritySamlServiceProviderMetadata, options?: TransportRequestOptions): TransportRequestPromise> - saml_service_provider_metadata, TContext = Context>(callback: callbackFn): TransportRequestCallback - saml_service_provider_metadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, callback: callbackFn): TransportRequestCallback - saml_service_provider_metadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - samlServiceProviderMetadata, TContext = Context>(params?: RequestParams.SecuritySamlServiceProviderMetadata, options?: TransportRequestOptions): TransportRequestPromise> - samlServiceProviderMetadata, TContext = Context>(callback: callbackFn): TransportRequestCallback - samlServiceProviderMetadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, callback: callbackFn): TransportRequestCallback - samlServiceProviderMetadata, TContext = Context>(params: RequestParams.SecuritySamlServiceProviderMetadata, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - shutdown: { - delete_node, TContext = Context>(params?: RequestParams.ShutdownDeleteNode, options?: TransportRequestOptions): TransportRequestPromise> - delete_node, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_node, TContext = Context>(params: RequestParams.ShutdownDeleteNode, callback: callbackFn): TransportRequestCallback - delete_node, TContext = Context>(params: RequestParams.ShutdownDeleteNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteNode, TContext = Context>(params?: RequestParams.ShutdownDeleteNode, options?: TransportRequestOptions): TransportRequestPromise> - deleteNode, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteNode, TContext = Context>(params: RequestParams.ShutdownDeleteNode, callback: callbackFn): TransportRequestCallback - deleteNode, TContext = Context>(params: RequestParams.ShutdownDeleteNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_node, TContext = Context>(params?: RequestParams.ShutdownGetNode, options?: TransportRequestOptions): TransportRequestPromise> - get_node, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_node, TContext = Context>(params: RequestParams.ShutdownGetNode, callback: callbackFn): TransportRequestCallback - get_node, TContext = Context>(params: RequestParams.ShutdownGetNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getNode, TContext = Context>(params?: RequestParams.ShutdownGetNode, options?: TransportRequestOptions): TransportRequestPromise> - getNode, TContext = Context>(callback: callbackFn): TransportRequestCallback - getNode, TContext = Context>(params: RequestParams.ShutdownGetNode, callback: callbackFn): TransportRequestCallback - getNode, TContext = Context>(params: RequestParams.ShutdownGetNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_node, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ShutdownPutNode, options?: TransportRequestOptions): TransportRequestPromise> - put_node, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_node, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ShutdownPutNode, callback: callbackFn): TransportRequestCallback - put_node, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ShutdownPutNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putNode, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.ShutdownPutNode, options?: TransportRequestOptions): TransportRequestPromise> - putNode, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putNode, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ShutdownPutNode, callback: callbackFn): TransportRequestCallback - putNode, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.ShutdownPutNode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - slm: { - delete_lifecycle, TContext = Context>(params?: RequestParams.SlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - delete_lifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = Context>(params: RequestParams.SlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = Context>(params: RequestParams.SlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = Context>(params?: RequestParams.SlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = Context>(params: RequestParams.SlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = Context>(params: RequestParams.SlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_lifecycle, TContext = Context>(params?: RequestParams.SlmExecuteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - execute_lifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - execute_lifecycle, TContext = Context>(params: RequestParams.SlmExecuteLifecycle, callback: callbackFn): TransportRequestCallback - execute_lifecycle, TContext = Context>(params: RequestParams.SlmExecuteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeLifecycle, TContext = Context>(params?: RequestParams.SlmExecuteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - executeLifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - executeLifecycle, TContext = Context>(params: RequestParams.SlmExecuteLifecycle, callback: callbackFn): TransportRequestCallback - executeLifecycle, TContext = Context>(params: RequestParams.SlmExecuteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_retention, TContext = Context>(params?: RequestParams.SlmExecuteRetention, options?: TransportRequestOptions): TransportRequestPromise> - execute_retention, TContext = Context>(callback: callbackFn): TransportRequestCallback - execute_retention, TContext = Context>(params: RequestParams.SlmExecuteRetention, callback: callbackFn): TransportRequestCallback - execute_retention, TContext = Context>(params: RequestParams.SlmExecuteRetention, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeRetention, TContext = Context>(params?: RequestParams.SlmExecuteRetention, options?: TransportRequestOptions): TransportRequestPromise> - executeRetention, TContext = Context>(callback: callbackFn): TransportRequestCallback - executeRetention, TContext = Context>(params: RequestParams.SlmExecuteRetention, callback: callbackFn): TransportRequestCallback - executeRetention, TContext = Context>(params: RequestParams.SlmExecuteRetention, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = Context>(params?: RequestParams.SlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - get_lifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = Context>(params: RequestParams.SlmGetLifecycle, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = Context>(params: RequestParams.SlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = Context>(params?: RequestParams.SlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle, TContext = Context>(callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = Context>(params: RequestParams.SlmGetLifecycle, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = Context>(params: RequestParams.SlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_stats, TContext = Context>(params?: RequestParams.SlmGetStats, options?: TransportRequestOptions): TransportRequestPromise> - get_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_stats, TContext = Context>(params: RequestParams.SlmGetStats, callback: callbackFn): TransportRequestCallback - get_stats, TContext = Context>(params: RequestParams.SlmGetStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStats, TContext = Context>(params?: RequestParams.SlmGetStats, options?: TransportRequestOptions): TransportRequestPromise> - getStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - getStats, TContext = Context>(params: RequestParams.SlmGetStats, callback: callbackFn): TransportRequestCallback - getStats, TContext = Context>(params: RequestParams.SlmGetStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params?: RequestParams.SlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_status, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params: RequestParams.SlmGetStatus, callback: callbackFn): TransportRequestCallback - get_status, TContext = Context>(params: RequestParams.SlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params?: RequestParams.SlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - getStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params: RequestParams.SlmGetStatus, callback: callbackFn): TransportRequestCallback - getStatus, TContext = Context>(params: RequestParams.SlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SlmPutLifecycle, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SlmPutLifecycle, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params?: RequestParams.SlmStart, options?: TransportRequestOptions): TransportRequestPromise> - start, TContext = Context>(callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params: RequestParams.SlmStart, callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params: RequestParams.SlmStart, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params?: RequestParams.SlmStop, options?: TransportRequestOptions): TransportRequestPromise> - stop, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params: RequestParams.SlmStop, callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params: RequestParams.SlmStop, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - snapshot: { - cleanup_repository, TContext = Context>(params?: RequestParams.SnapshotCleanupRepository, options?: TransportRequestOptions): TransportRequestPromise> - cleanup_repository, TContext = Context>(callback: callbackFn): TransportRequestCallback - cleanup_repository, TContext = Context>(params: RequestParams.SnapshotCleanupRepository, callback: callbackFn): TransportRequestCallback - cleanup_repository, TContext = Context>(params: RequestParams.SnapshotCleanupRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cleanupRepository, TContext = Context>(params?: RequestParams.SnapshotCleanupRepository, options?: TransportRequestOptions): TransportRequestPromise> - cleanupRepository, TContext = Context>(callback: callbackFn): TransportRequestCallback - cleanupRepository, TContext = Context>(params: RequestParams.SnapshotCleanupRepository, callback: callbackFn): TransportRequestCallback - cleanupRepository, TContext = Context>(params: RequestParams.SnapshotCleanupRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SnapshotClone, options?: TransportRequestOptions): TransportRequestPromise> - clone, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotClone, callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotClone, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SnapshotCreate, options?: TransportRequestOptions): TransportRequestPromise> - create, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotCreate, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotCreate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create_repository, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SnapshotCreateRepository, options?: TransportRequestOptions): TransportRequestPromise> - create_repository, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - create_repository, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotCreateRepository, callback: callbackFn): TransportRequestCallback - create_repository, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotCreateRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createRepository, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SnapshotCreateRepository, options?: TransportRequestOptions): TransportRequestPromise> - createRepository, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - createRepository, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotCreateRepository, callback: callbackFn): TransportRequestCallback - createRepository, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotCreateRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params?: RequestParams.SnapshotDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.SnapshotDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = Context>(params: RequestParams.SnapshotDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_repository, TContext = Context>(params?: RequestParams.SnapshotDeleteRepository, options?: TransportRequestOptions): TransportRequestPromise> - delete_repository, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_repository, TContext = Context>(params: RequestParams.SnapshotDeleteRepository, callback: callbackFn): TransportRequestCallback - delete_repository, TContext = Context>(params: RequestParams.SnapshotDeleteRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRepository, TContext = Context>(params?: RequestParams.SnapshotDeleteRepository, options?: TransportRequestOptions): TransportRequestPromise> - deleteRepository, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteRepository, TContext = Context>(params: RequestParams.SnapshotDeleteRepository, callback: callbackFn): TransportRequestCallback - deleteRepository, TContext = Context>(params: RequestParams.SnapshotDeleteRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params?: RequestParams.SnapshotGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.SnapshotGet, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.SnapshotGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_repository, TContext = Context>(params?: RequestParams.SnapshotGetRepository, options?: TransportRequestOptions): TransportRequestPromise> - get_repository, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_repository, TContext = Context>(params: RequestParams.SnapshotGetRepository, callback: callbackFn): TransportRequestCallback - get_repository, TContext = Context>(params: RequestParams.SnapshotGetRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRepository, TContext = Context>(params?: RequestParams.SnapshotGetRepository, options?: TransportRequestOptions): TransportRequestPromise> - getRepository, TContext = Context>(callback: callbackFn): TransportRequestCallback - getRepository, TContext = Context>(params: RequestParams.SnapshotGetRepository, callback: callbackFn): TransportRequestCallback - getRepository, TContext = Context>(params: RequestParams.SnapshotGetRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repository_analyze, TContext = Context>(params?: RequestParams.SnapshotRepositoryAnalyze, options?: TransportRequestOptions): TransportRequestPromise> - repository_analyze, TContext = Context>(callback: callbackFn): TransportRequestCallback - repository_analyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, callback: callbackFn): TransportRequestCallback - repository_analyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositoryAnalyze, TContext = Context>(params?: RequestParams.SnapshotRepositoryAnalyze, options?: TransportRequestOptions): TransportRequestPromise> - repositoryAnalyze, TContext = Context>(callback: callbackFn): TransportRequestCallback - repositoryAnalyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, callback: callbackFn): TransportRequestCallback - repositoryAnalyze, TContext = Context>(params: RequestParams.SnapshotRepositoryAnalyze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - restore, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SnapshotRestore, options?: TransportRequestOptions): TransportRequestPromise> - restore, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - restore, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotRestore, callback: callbackFn): TransportRequestCallback - restore, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SnapshotRestore, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params?: RequestParams.SnapshotStatus, options?: TransportRequestOptions): TransportRequestPromise> - status, TContext = Context>(callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params: RequestParams.SnapshotStatus, callback: callbackFn): TransportRequestCallback - status, TContext = Context>(params: RequestParams.SnapshotStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - verify_repository, TContext = Context>(params?: RequestParams.SnapshotVerifyRepository, options?: TransportRequestOptions): TransportRequestPromise> - verify_repository, TContext = Context>(callback: callbackFn): TransportRequestCallback - verify_repository, TContext = Context>(params: RequestParams.SnapshotVerifyRepository, callback: callbackFn): TransportRequestCallback - verify_repository, TContext = Context>(params: RequestParams.SnapshotVerifyRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - verifyRepository, TContext = Context>(params?: RequestParams.SnapshotVerifyRepository, options?: TransportRequestOptions): TransportRequestPromise> - verifyRepository, TContext = Context>(callback: callbackFn): TransportRequestCallback - verifyRepository, TContext = Context>(params: RequestParams.SnapshotVerifyRepository, callback: callbackFn): TransportRequestCallback - verifyRepository, TContext = Context>(params: RequestParams.SnapshotVerifyRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - sql: { - clear_cursor, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SqlClearCursor, options?: TransportRequestOptions): TransportRequestPromise> - clear_cursor, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - clear_cursor, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlClearCursor, callback: callbackFn): TransportRequestCallback - clear_cursor, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlClearCursor, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCursor, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SqlClearCursor, options?: TransportRequestOptions): TransportRequestPromise> - clearCursor, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - clearCursor, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlClearCursor, callback: callbackFn): TransportRequestCallback - clearCursor, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlClearCursor, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_async, TContext = Context>(params?: RequestParams.SqlDeleteAsync, options?: TransportRequestOptions): TransportRequestPromise> - delete_async, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_async, TContext = Context>(params: RequestParams.SqlDeleteAsync, callback: callbackFn): TransportRequestCallback - delete_async, TContext = Context>(params: RequestParams.SqlDeleteAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAsync, TContext = Context>(params?: RequestParams.SqlDeleteAsync, options?: TransportRequestOptions): TransportRequestPromise> - deleteAsync, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteAsync, TContext = Context>(params: RequestParams.SqlDeleteAsync, callback: callbackFn): TransportRequestCallback - deleteAsync, TContext = Context>(params: RequestParams.SqlDeleteAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_async, TContext = Context>(params?: RequestParams.SqlGetAsync, options?: TransportRequestOptions): TransportRequestPromise> - get_async, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_async, TContext = Context>(params: RequestParams.SqlGetAsync, callback: callbackFn): TransportRequestCallback - get_async, TContext = Context>(params: RequestParams.SqlGetAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAsync, TContext = Context>(params?: RequestParams.SqlGetAsync, options?: TransportRequestOptions): TransportRequestPromise> - getAsync, TContext = Context>(callback: callbackFn): TransportRequestCallback - getAsync, TContext = Context>(params: RequestParams.SqlGetAsync, callback: callbackFn): TransportRequestCallback - getAsync, TContext = Context>(params: RequestParams.SqlGetAsync, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_async_status, TContext = Context>(params?: RequestParams.SqlGetAsyncStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_async_status, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_async_status, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, callback: callbackFn): TransportRequestCallback - get_async_status, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAsyncStatus, TContext = Context>(params?: RequestParams.SqlGetAsyncStatus, options?: TransportRequestOptions): TransportRequestPromise> - getAsyncStatus, TContext = Context>(callback: callbackFn): TransportRequestCallback - getAsyncStatus, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, callback: callbackFn): TransportRequestCallback - getAsyncStatus, TContext = Context>(params: RequestParams.SqlGetAsyncStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - query, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SqlQuery, options?: TransportRequestOptions): TransportRequestPromise> - query, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlQuery, callback: callbackFn): TransportRequestCallback - query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - translate, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.SqlTranslate, options?: TransportRequestOptions): TransportRequestPromise> - translate, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - translate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlTranslate, callback: callbackFn): TransportRequestCallback - translate, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.SqlTranslate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ssl: { - certificates, TContext = Context>(params?: RequestParams.SslCertificates, options?: TransportRequestOptions): TransportRequestPromise> - certificates, TContext = Context>(callback: callbackFn): TransportRequestCallback - certificates, TContext = Context>(params: RequestParams.SslCertificates, callback: callbackFn): TransportRequestCallback - certificates, TContext = Context>(params: RequestParams.SslCertificates, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - tasks: { - cancel, TContext = Context>(params?: RequestParams.TasksCancel, options?: TransportRequestOptions): TransportRequestPromise> - cancel, TContext = Context>(callback: callbackFn): TransportRequestCallback - cancel, TContext = Context>(params: RequestParams.TasksCancel, callback: callbackFn): TransportRequestCallback - cancel, TContext = Context>(params: RequestParams.TasksCancel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params?: RequestParams.TasksGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = Context>(callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.TasksGet, callback: callbackFn): TransportRequestCallback - get, TContext = Context>(params: RequestParams.TasksGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - list, TContext = Context>(params?: RequestParams.TasksList, options?: TransportRequestOptions): TransportRequestPromise> - list, TContext = Context>(callback: callbackFn): TransportRequestCallback - list, TContext = Context>(params: RequestParams.TasksList, callback: callbackFn): TransportRequestCallback - list, TContext = Context>(params: RequestParams.TasksList, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TermsEnum, options?: TransportRequestOptions): TransportRequestPromise> - terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, callback: callbackFn): TransportRequestCallback - terms_enum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TermsEnum, options?: TransportRequestOptions): TransportRequestPromise> - termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, callback: callbackFn): TransportRequestCallback - termsEnum, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TermsEnum, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Termvectors, options?: TransportRequestOptions): TransportRequestPromise> - termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termvectors, callback: callbackFn): TransportRequestCallback - termvectors, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Termvectors, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - text_structure: { - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.TextStructureFindStructure, options?: TransportRequestOptions): TransportRequestPromise> - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, callback: callbackFn): TransportRequestCallback - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.TextStructureFindStructure, options?: TransportRequestOptions): TransportRequestPromise> - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, callback: callbackFn): TransportRequestCallback - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - textStructure: { - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.TextStructureFindStructure, options?: TransportRequestOptions): TransportRequestPromise> - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, callback: callbackFn): TransportRequestCallback - find_structure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params?: RequestParams.TextStructureFindStructure, options?: TransportRequestOptions): TransportRequestPromise> - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(callback: callbackFn): TransportRequestCallback - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, callback: callbackFn): TransportRequestCallback - findStructure, TRequestBody extends RequestNDBody = Record[], TContext = Context>(params: RequestParams.TextStructureFindStructure, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - transform: { - delete_transform, TContext = Context>(params?: RequestParams.TransformDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - delete_transform, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_transform, TContext = Context>(params: RequestParams.TransformDeleteTransform, callback: callbackFn): TransportRequestCallback - delete_transform, TContext = Context>(params: RequestParams.TransformDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = Context>(params?: RequestParams.TransformDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - deleteTransform, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = Context>(params: RequestParams.TransformDeleteTransform, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = Context>(params: RequestParams.TransformDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform, TContext = Context>(params?: RequestParams.TransformGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - get_transform, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_transform, TContext = Context>(params: RequestParams.TransformGetTransform, callback: callbackFn): TransportRequestCallback - get_transform, TContext = Context>(params: RequestParams.TransformGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransform, TContext = Context>(params?: RequestParams.TransformGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - getTransform, TContext = Context>(callback: callbackFn): TransportRequestCallback - getTransform, TContext = Context>(params: RequestParams.TransformGetTransform, callback: callbackFn): TransportRequestCallback - getTransform, TContext = Context>(params: RequestParams.TransformGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = Context>(params?: RequestParams.TransformGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - get_transform_stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = Context>(params: RequestParams.TransformGetTransformStats, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = Context>(params: RequestParams.TransformGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = Context>(params?: RequestParams.TransformGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats, TContext = Context>(callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = Context>(params: RequestParams.TransformGetTransformStats, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = Context>(params: RequestParams.TransformGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TransformPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - preview_transform, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPreviewTransform, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TransformPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPreviewTransform, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TransformPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - put_transform, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPutTransform, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TransformPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - putTransform, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPutTransform, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_transform, TContext = Context>(params?: RequestParams.TransformStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - start_transform, TContext = Context>(callback: callbackFn): TransportRequestCallback - start_transform, TContext = Context>(params: RequestParams.TransformStartTransform, callback: callbackFn): TransportRequestCallback - start_transform, TContext = Context>(params: RequestParams.TransformStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTransform, TContext = Context>(params?: RequestParams.TransformStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - startTransform, TContext = Context>(callback: callbackFn): TransportRequestCallback - startTransform, TContext = Context>(params: RequestParams.TransformStartTransform, callback: callbackFn): TransportRequestCallback - startTransform, TContext = Context>(params: RequestParams.TransformStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = Context>(params?: RequestParams.TransformStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stop_transform, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop_transform, TContext = Context>(params: RequestParams.TransformStopTransform, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = Context>(params: RequestParams.TransformStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = Context>(params?: RequestParams.TransformStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform, TContext = Context>(callback: callbackFn): TransportRequestCallback - stopTransform, TContext = Context>(params: RequestParams.TransformStopTransform, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = Context>(params: RequestParams.TransformStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TransformUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - update_transform, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformUpdateTransform, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.TransformUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformUpdateTransform, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.TransformUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - update, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.Update, options?: TransportRequestOptions): TransportRequestPromise> - update, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Update, callback: callbackFn): TransportRequestCallback - update, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.Update, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.UpdateByQuery, options?: TransportRequestOptions): TransportRequestPromise> - update_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.UpdateByQuery, callback: callbackFn): TransportRequestCallback - update_by_query, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.UpdateByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.UpdateByQuery, options?: TransportRequestOptions): TransportRequestPromise> - updateByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.UpdateByQuery, callback: callbackFn): TransportRequestCallback - updateByQuery, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.UpdateByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_by_query_rethrottle, TContext = Context>(params?: RequestParams.UpdateByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - update_by_query_rethrottle, TContext = Context>(callback: callbackFn): TransportRequestCallback - update_by_query_rethrottle, TContext = Context>(params: RequestParams.UpdateByQueryRethrottle, callback: callbackFn): TransportRequestCallback - update_by_query_rethrottle, TContext = Context>(params: RequestParams.UpdateByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle, TContext = Context>(params?: RequestParams.UpdateByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - updateByQueryRethrottle, TContext = Context>(callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle, TContext = Context>(params: RequestParams.UpdateByQueryRethrottle, callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle, TContext = Context>(params: RequestParams.UpdateByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - watcher: { - ack_watch, TContext = Context>(params?: RequestParams.WatcherAckWatch, options?: TransportRequestOptions): TransportRequestPromise> - ack_watch, TContext = Context>(callback: callbackFn): TransportRequestCallback - ack_watch, TContext = Context>(params: RequestParams.WatcherAckWatch, callback: callbackFn): TransportRequestCallback - ack_watch, TContext = Context>(params: RequestParams.WatcherAckWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ackWatch, TContext = Context>(params?: RequestParams.WatcherAckWatch, options?: TransportRequestOptions): TransportRequestPromise> - ackWatch, TContext = Context>(callback: callbackFn): TransportRequestCallback - ackWatch, TContext = Context>(params: RequestParams.WatcherAckWatch, callback: callbackFn): TransportRequestCallback - ackWatch, TContext = Context>(params: RequestParams.WatcherAckWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - activate_watch, TContext = Context>(params?: RequestParams.WatcherActivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - activate_watch, TContext = Context>(callback: callbackFn): TransportRequestCallback - activate_watch, TContext = Context>(params: RequestParams.WatcherActivateWatch, callback: callbackFn): TransportRequestCallback - activate_watch, TContext = Context>(params: RequestParams.WatcherActivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - activateWatch, TContext = Context>(params?: RequestParams.WatcherActivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - activateWatch, TContext = Context>(callback: callbackFn): TransportRequestCallback - activateWatch, TContext = Context>(params: RequestParams.WatcherActivateWatch, callback: callbackFn): TransportRequestCallback - activateWatch, TContext = Context>(params: RequestParams.WatcherActivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deactivate_watch, TContext = Context>(params?: RequestParams.WatcherDeactivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - deactivate_watch, TContext = Context>(callback: callbackFn): TransportRequestCallback - deactivate_watch, TContext = Context>(params: RequestParams.WatcherDeactivateWatch, callback: callbackFn): TransportRequestCallback - deactivate_watch, TContext = Context>(params: RequestParams.WatcherDeactivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deactivateWatch, TContext = Context>(params?: RequestParams.WatcherDeactivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - deactivateWatch, TContext = Context>(callback: callbackFn): TransportRequestCallback - deactivateWatch, TContext = Context>(params: RequestParams.WatcherDeactivateWatch, callback: callbackFn): TransportRequestCallback - deactivateWatch, TContext = Context>(params: RequestParams.WatcherDeactivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_watch, TContext = Context>(params?: RequestParams.WatcherDeleteWatch, options?: TransportRequestOptions): TransportRequestPromise> - delete_watch, TContext = Context>(callback: callbackFn): TransportRequestCallback - delete_watch, TContext = Context>(params: RequestParams.WatcherDeleteWatch, callback: callbackFn): TransportRequestCallback - delete_watch, TContext = Context>(params: RequestParams.WatcherDeleteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteWatch, TContext = Context>(params?: RequestParams.WatcherDeleteWatch, options?: TransportRequestOptions): TransportRequestPromise> - deleteWatch, TContext = Context>(callback: callbackFn): TransportRequestCallback - deleteWatch, TContext = Context>(params: RequestParams.WatcherDeleteWatch, callback: callbackFn): TransportRequestCallback - deleteWatch, TContext = Context>(params: RequestParams.WatcherDeleteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_watch, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.WatcherExecuteWatch, options?: TransportRequestOptions): TransportRequestPromise> - execute_watch, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - execute_watch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherExecuteWatch, callback: callbackFn): TransportRequestCallback - execute_watch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherExecuteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeWatch, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.WatcherExecuteWatch, options?: TransportRequestOptions): TransportRequestPromise> - executeWatch, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - executeWatch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherExecuteWatch, callback: callbackFn): TransportRequestCallback - executeWatch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherExecuteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_watch, TContext = Context>(params?: RequestParams.WatcherGetWatch, options?: TransportRequestOptions): TransportRequestPromise> - get_watch, TContext = Context>(callback: callbackFn): TransportRequestCallback - get_watch, TContext = Context>(params: RequestParams.WatcherGetWatch, callback: callbackFn): TransportRequestCallback - get_watch, TContext = Context>(params: RequestParams.WatcherGetWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getWatch, TContext = Context>(params?: RequestParams.WatcherGetWatch, options?: TransportRequestOptions): TransportRequestPromise> - getWatch, TContext = Context>(callback: callbackFn): TransportRequestCallback - getWatch, TContext = Context>(params: RequestParams.WatcherGetWatch, callback: callbackFn): TransportRequestCallback - getWatch, TContext = Context>(params: RequestParams.WatcherGetWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_watch, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.WatcherPutWatch, options?: TransportRequestOptions): TransportRequestPromise> - put_watch, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - put_watch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherPutWatch, callback: callbackFn): TransportRequestCallback - put_watch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherPutWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putWatch, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.WatcherPutWatch, options?: TransportRequestOptions): TransportRequestPromise> - putWatch, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - putWatch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherPutWatch, callback: callbackFn): TransportRequestCallback - putWatch, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherPutWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - query_watches, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.WatcherQueryWatches, options?: TransportRequestOptions): TransportRequestPromise> - query_watches, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - query_watches, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherQueryWatches, callback: callbackFn): TransportRequestCallback - query_watches, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherQueryWatches, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - queryWatches, TRequestBody extends RequestBody = Record, TContext = Context>(params?: RequestParams.WatcherQueryWatches, options?: TransportRequestOptions): TransportRequestPromise> - queryWatches, TRequestBody extends RequestBody = Record, TContext = Context>(callback: callbackFn): TransportRequestCallback - queryWatches, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherQueryWatches, callback: callbackFn): TransportRequestCallback - queryWatches, TRequestBody extends RequestBody = Record, TContext = Context>(params: RequestParams.WatcherQueryWatches, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params?: RequestParams.WatcherStart, options?: TransportRequestOptions): TransportRequestPromise> - start, TContext = Context>(callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params: RequestParams.WatcherStart, callback: callbackFn): TransportRequestCallback - start, TContext = Context>(params: RequestParams.WatcherStart, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params?: RequestParams.WatcherStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = Context>(callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.WatcherStats, callback: callbackFn): TransportRequestCallback - stats, TContext = Context>(params: RequestParams.WatcherStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params?: RequestParams.WatcherStop, options?: TransportRequestOptions): TransportRequestPromise> - stop, TContext = Context>(callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params: RequestParams.WatcherStop, callback: callbackFn): TransportRequestCallback - stop, TContext = Context>(params: RequestParams.WatcherStop, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - xpack: { - info, TContext = Context>(params?: RequestParams.XpackInfo, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = Context>(callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.XpackInfo, callback: callbackFn): TransportRequestCallback - info, TContext = Context>(params: RequestParams.XpackInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage, TContext = Context>(params?: RequestParams.XpackUsage, options?: TransportRequestOptions): TransportRequestPromise> - usage, TContext = Context>(callback: callbackFn): TransportRequestCallback - usage, TContext = Context>(params: RequestParams.XpackUsage, callback: callbackFn): TransportRequestCallback - usage, TContext = Context>(params: RequestParams.XpackUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - /* /GENERATED */ -} - -declare const events: { - SERIALIZATION: string; - REQUEST: string; - DESERIALIZATION: string; - RESPONSE: string; - SNIFF: string; - RESURRECT: string; -}; - -export { - Client, - Transport, - ConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - Connection, - Serializer, - events, - errors, - ApiError, - ApiResponse, - RequestEvent, - ResurrectEvent, - estypes, - RequestParams, - ClientOptions, - NodeOptions, - ClientExtendsCallbackOptions -}; +export { Client, errors } diff --git a/index.js b/index.js index d90aa1a1b..8a45b40a5 100644 --- a/index.js +++ b/index.js @@ -19,344 +19,7 @@ 'use strict' -const { EventEmitter } = require('events') -const { URL } = require('url') -const buffer = require('buffer') -const debug = require('debug')('elasticsearch') -const Transport = require('./lib/Transport') -const Connection = require('./lib/Connection') -const { ConnectionPool, CloudConnectionPool } = require('./lib/pool') -const Helpers = require('./lib/Helpers') -const Serializer = require('./lib/Serializer') -const errors = require('./lib/errors') -const { ConfigurationError } = errors -const { prepareHeaders } = Connection.internals -let clientVersion = require('./package.json').version -/* istanbul ignore next */ -if (clientVersion.includes('-')) { - // clean prerelease - clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' -} -const nodeVersion = process.versions.node +const { errors } = require('@elastic/transport') +const { default: Client } = require('./lib/Client') -const kInitialOptions = Symbol('elasticsearchjs-initial-options') -const kChild = Symbol('elasticsearchjs-child') -const kExtensions = Symbol('elasticsearchjs-extensions') -const kEventEmitter = Symbol('elasticsearchjs-event-emitter') - -const ESAPI = require('./api') - -class Client extends ESAPI { - constructor (opts = {}) { - super({ ConfigurationError }) - if (opts.cloud && opts[kChild] === undefined) { - const { id, username, password } = opts.cloud - // the cloud id is `cluster-name:base64encodedurl` - // the url is a string divided by two '$', the first is the cloud url - // the second the elasticsearch instance, the third the kibana instance - const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$') - - // TODO: remove username and password here in 8 - if (username && password) { - opts.auth = Object.assign({}, opts.auth, { username, password }) - } - opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` - - // Cloud has better performances with compression enabled - // see https://github.com/elastic/elasticsearch-py/pull/704. - // So unless the user specifies otherwise, we enable compression. - if (opts.compression == null) opts.compression = 'gzip' - if (opts.suggestCompression == null) opts.suggestCompression = true - if (opts.ssl == null || - (opts.ssl && opts.ssl.secureProtocol == null)) { - opts.ssl = opts.ssl || {} - opts.ssl.secureProtocol = 'TLSv1_2_method' - } - } - - if (!opts.node && !opts.nodes) { - throw new ConfigurationError('Missing node(s) option') - } - - if (opts[kChild] === undefined) { - const checkAuth = getAuth(opts.node || opts.nodes) - if (checkAuth && checkAuth.username && checkAuth.password) { - opts.auth = Object.assign({}, opts.auth, { username: checkAuth.username, password: checkAuth.password }) - } - } - - const options = opts[kChild] !== undefined - ? opts[kChild].initialOptions - : Object.assign({}, { - Connection, - Transport, - Serializer, - ConnectionPool: opts.cloud ? CloudConnectionPool : ConnectionPool, - maxRetries: 3, - requestTimeout: 30000, - pingTimeout: 3000, - sniffInterval: false, - sniffOnStart: false, - sniffEndpoint: '_nodes/_all/http', - sniffOnConnectionFault: false, - resurrectStrategy: 'ping', - suggestCompression: false, - compression: false, - ssl: null, - caFingerprint: null, - agent: null, - headers: {}, - nodeFilter: null, - nodeSelector: 'round-robin', - generateRequestId: null, - name: 'elasticsearch-js', - auth: null, - opaqueIdPrefix: null, - context: null, - proxy: null, - enableMetaHeader: true, - disablePrototypePoisoningProtection: false, - maxResponseSize: null, - maxCompressedResponseSize: null - }, opts) - - if (options.maxResponseSize !== null && options.maxResponseSize > buffer.constants.MAX_STRING_LENGTH) { - throw new ConfigurationError(`The maxResponseSize cannot be bigger than ${buffer.constants.MAX_STRING_LENGTH}`) - } - - if (options.maxCompressedResponseSize !== null && options.maxCompressedResponseSize > buffer.constants.MAX_LENGTH) { - throw new ConfigurationError(`The maxCompressedResponseSize cannot be bigger than ${buffer.constants.MAX_LENGTH}`) - } - - if (options.caFingerprint !== null && isHttpConnection(opts.node || opts.nodes)) { - throw new ConfigurationError('You can\'t configure the caFingerprint with a http connection') - } - - if (process.env.ELASTIC_CLIENT_APIVERSIONING === 'true') { - options.headers = Object.assign({ accept: 'application/vnd.elasticsearch+json; compatible-with=7' }, options.headers) - } - - this[kInitialOptions] = options - this[kExtensions] = [] - this.name = options.name - - if (options.enableMetaHeader) { - options.headers['x-elastic-client-meta'] = `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion}` - } - - if (opts[kChild] !== undefined) { - this.serializer = options[kChild].serializer - this.connectionPool = options[kChild].connectionPool - this[kEventEmitter] = options[kChild].eventEmitter - } else { - this[kEventEmitter] = new EventEmitter() - this.serializer = new options.Serializer({ - disablePrototypePoisoningProtection: options.disablePrototypePoisoningProtection - }) - this.connectionPool = new options.ConnectionPool({ - pingTimeout: options.pingTimeout, - resurrectStrategy: options.resurrectStrategy, - ssl: options.ssl, - agent: options.agent, - proxy: options.proxy, - Connection: options.Connection, - auth: options.auth, - emit: this[kEventEmitter].emit.bind(this[kEventEmitter]), - caFingerprint: options.caFingerprint, - sniffEnabled: options.sniffInterval !== false || - options.sniffOnStart !== false || - options.sniffOnConnectionFault !== false - }) - // Add the connections before initialize the Transport - this.connectionPool.addConnection(options.node || options.nodes) - } - - this.transport = new options.Transport({ - emit: this[kEventEmitter].emit.bind(this[kEventEmitter]), - connectionPool: this.connectionPool, - serializer: this.serializer, - maxRetries: options.maxRetries, - requestTimeout: options.requestTimeout, - sniffInterval: options.sniffInterval, - sniffOnStart: options.sniffOnStart, - sniffOnConnectionFault: options.sniffOnConnectionFault, - sniffEndpoint: options.sniffEndpoint, - suggestCompression: options.suggestCompression, - compression: options.compression, - headers: options.headers, - nodeFilter: options.nodeFilter, - nodeSelector: options.nodeSelector, - generateRequestId: options.generateRequestId, - name: options.name, - opaqueIdPrefix: options.opaqueIdPrefix, - context: options.context, - maxResponseSize: options.maxResponseSize, - maxCompressedResponseSize: options.maxCompressedResponseSize - }) - - this.helpers = new Helpers({ - client: this, - maxRetries: options.maxRetries, - metaHeader: options.enableMetaHeader - ? `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion}` - : null - }) - } - - get emit () { - return this[kEventEmitter].emit.bind(this[kEventEmitter]) - } - - get on () { - return this[kEventEmitter].on.bind(this[kEventEmitter]) - } - - get once () { - return this[kEventEmitter].once.bind(this[kEventEmitter]) - } - - get off () { - return this[kEventEmitter].off.bind(this[kEventEmitter]) - } - - extend (name, opts, fn) { - if (typeof opts === 'function') { - fn = opts - opts = {} - } - - let [namespace, method] = name.split('.') - if (method == null) { - method = namespace - namespace = null - } - - if (namespace != null) { - if (this[namespace] != null && this[namespace][method] != null && opts.force !== true) { - throw new Error(`The method "${method}" already exists on namespace "${namespace}"`) - } - - if (this[namespace] == null) this[namespace] = {} - this[namespace][method] = fn({ - makeRequest: this.transport.request.bind(this.transport), - result: { body: null, statusCode: null, headers: null, warnings: null }, - ConfigurationError - }) - } else { - if (this[method] != null && opts.force !== true) { - throw new Error(`The method "${method}" already exists`) - } - - this[method] = fn({ - makeRequest: this.transport.request.bind(this.transport), - result: { body: null, statusCode: null, headers: null, warnings: null }, - ConfigurationError - }) - } - - this[kExtensions].push({ name, opts, fn }) - } - - child (opts) { - // Merge the new options with the initial ones - const options = Object.assign({}, this[kInitialOptions], opts) - // Pass to the child client the parent instances that cannot be overriden - options[kChild] = { - connectionPool: this.connectionPool, - serializer: this.serializer, - eventEmitter: this[kEventEmitter], - initialOptions: options - } - - /* istanbul ignore else */ - if (options.auth !== undefined) { - options.headers = prepareHeaders(options.headers, options.auth) - } - - const client = new Client(options) - // sync product check - const tSymbol = Object.getOwnPropertySymbols(this.transport) - .filter(symbol => symbol.description === 'product check')[0] - client.transport[tSymbol] = this.transport[tSymbol] - // Add parent extensions - if (this[kExtensions].length > 0) { - this[kExtensions].forEach(({ name, opts, fn }) => { - client.extend(name, opts, fn) - }) - } - return client - } - - close (callback) { - if (callback == null) { - return new Promise((resolve, reject) => { - this.close(resolve) - }) - } - debug('Closing the client') - this.connectionPool.empty(callback) - } -} - -function getAuth (node) { - if (Array.isArray(node)) { - for (const url of node) { - const auth = getUsernameAndPassword(url) - if (auth.username !== '' && auth.password !== '') { - return auth - } - } - - return null - } - - const auth = getUsernameAndPassword(node) - if (auth.username !== '' && auth.password !== '') { - return auth - } - - return null - - function getUsernameAndPassword (node) { - /* istanbul ignore else */ - if (typeof node === 'string') { - const { username, password } = new URL(node) - return { - username: decodeURIComponent(username), - password: decodeURIComponent(password) - } - } else if (node.url instanceof URL) { - return { - username: decodeURIComponent(node.url.username), - password: decodeURIComponent(node.url.password) - } - } - } -} - -function isHttpConnection (node) { - if (Array.isArray(node)) { - return node.some((n) => (typeof n === 'string' ? new URL(n).protocol : n.url.protocol) === 'http:') - } else { - return (typeof node === 'string' ? new URL(node).protocol : node.url.protocol) === 'http:' - } -} - -const events = { - RESPONSE: 'response', - REQUEST: 'request', - SNIFF: 'sniff', - RESURRECT: 'resurrect', - SERIALIZATION: 'serialization', - DESERIALIZATION: 'deserialization' -} - -module.exports = { - Client, - Transport, - ConnectionPool, - Connection, - Serializer, - events, - errors -} +module.exports = { Client, errors } diff --git a/index.mjs b/index.mjs deleted file mode 100644 index c23a3d832..000000000 --- a/index.mjs +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import mod from './index.js' - -export default mod -export const Client = mod.Client -export const Transport = mod.Transport -export const ConnectionPool = mod.ConnectionPool -export const Connection = mod.Connection -export const Serializer = mod.Serializer -export const events = mod.events -export const errors = mod.errors diff --git a/lib/Connection.d.ts b/lib/Connection.d.ts deleted file mode 100644 index 6b5c6cb7d..000000000 --- a/lib/Connection.d.ts +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/// - -import { URL } from 'url'; -import { inspect, InspectOptions } from 'util' -import { Readable as ReadableStream } from 'stream'; -import { ApiKeyAuth, BasicAuth } from './pool' -import * as http from 'http' -import * as https from 'https' -import * as hpagent from 'hpagent' -import { ConnectionOptions as TlsConnectionOptions } from 'tls' - -export declare type agentFn = (opts: ConnectionOptions) => any; - -export interface ConnectionOptions { - url: URL; - ssl?: TlsConnectionOptions; - id?: string; - headers?: Record; - agent?: AgentOptions | agentFn; - status?: string; - roles?: ConnectionRoles; - auth?: BasicAuth | ApiKeyAuth; - proxy?: string | URL; - caFingerprint?: string; -} - -interface ConnectionRoles { - master?: boolean - data?: boolean - ingest?: boolean - ml?: boolean -} - -interface RequestOptions extends http.ClientRequestArgs { - asStream?: boolean; - body?: string | Buffer | ReadableStream | null; - querystring?: string; -} - -export interface AgentOptions { - keepAlive?: boolean; - keepAliveMsecs?: number; - maxSockets?: number; - maxFreeSockets?: number; -} - -export default class Connection { - static statuses: { - ALIVE: string; - DEAD: string; - }; - static roles: { - MASTER: string; - DATA: string; - INGEST: string; - ML: string; - }; - url: URL - ssl: TlsConnectionOptions | null - id: string - headers: Record - status: string - roles: ConnectionRoles - deadCount: number - resurrectTimeout: number - makeRequest: any - _openRequests: number - _status: string - _agent: http.Agent | https.Agent | hpagent.HttpProxyAgent | hpagent.HttpsProxyAgent - constructor(opts?: ConnectionOptions) - request(params: RequestOptions, callback: (err: Error | null, response: http.IncomingMessage | null) => void): http.ClientRequest - close(): Connection - setRole(role: string, enabled: boolean): Connection - buildRequestObject(params: any): http.ClientRequestArgs - // @ts-ignore - [inspect.custom](object: any, options: InspectOptions): string - toJSON(): any -} - -export {}; diff --git a/lib/Connection.js b/lib/Connection.js deleted file mode 100644 index feaa3ab23..000000000 --- a/lib/Connection.js +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const assert = require('assert') -const { inspect } = require('util') -const hpagent = require('hpagent') -const http = require('http') -const https = require('https') -const debug = require('debug')('elasticsearch') -const { pipeline } = require('stream') -const INVALID_PATH_REGEX = /[^\u0021-\u00ff]/ -const { - ConnectionError, - RequestAbortedError, - TimeoutError, - ConfigurationError -} = require('./errors') - -class Connection { - constructor (opts) { - this.url = opts.url - this.ssl = opts.ssl || null - this.id = opts.id || stripAuth(opts.url.href) - this.headers = prepareHeaders(opts.headers, opts.auth) - this.deadCount = 0 - this.resurrectTimeout = 0 - this.caFingerprint = opts.caFingerprint - - this._openRequests = 0 - this._status = opts.status || Connection.statuses.ALIVE - this.roles = Object.assign({}, defaultRoles, opts.roles) - - if (!['http:', 'https:'].includes(this.url.protocol)) { - throw new ConfigurationError(`Invalid protocol: '${this.url.protocol}'`) - } - - if (typeof opts.agent === 'function') { - this.agent = opts.agent(opts) - } else if (opts.agent === false) { - this.agent = undefined - } else { - const agentOptions = Object.assign({}, { - keepAlive: true, - keepAliveMsecs: 1000, - maxSockets: 256, - maxFreeSockets: 256, - scheduling: 'lifo' - }, opts.agent) - if (opts.proxy) { - agentOptions.proxy = opts.proxy - this.agent = this.url.protocol === 'http:' - ? new hpagent.HttpProxyAgent(agentOptions) - : new hpagent.HttpsProxyAgent(Object.assign({}, agentOptions, this.ssl)) - } else { - this.agent = this.url.protocol === 'http:' - ? new http.Agent(agentOptions) - : new https.Agent(Object.assign({}, agentOptions, this.ssl)) - } - } - - this.makeRequest = this.url.protocol === 'http:' - ? http.request - : https.request - } - - request (params, callback) { - this._openRequests++ - let cleanedListeners = false - - const requestParams = this.buildRequestObject(params) - // https://github.com/nodejs/node/commit/b961d9fd83 - if (INVALID_PATH_REGEX.test(requestParams.path) === true) { - callback(new TypeError(`ERR_UNESCAPED_CHARACTERS: ${requestParams.path}`), null) - /* istanbul ignore next */ - return { abort: () => {} } - } - - debug('Starting a new request', params) - const request = this.makeRequest(requestParams) - - const onResponse = response => { - cleanListeners() - this._openRequests-- - callback(null, response) - } - - const onTimeout = () => { - cleanListeners() - this._openRequests-- - request.once('error', () => {}) // we need to catch the request aborted error - request.abort() - callback(new TimeoutError('Request timed out', params), null) - } - - const onError = err => { - cleanListeners() - this._openRequests-- - let message = err.message - if (err.code === 'ECONNRESET') { - /* istanbul ignore next */ - const socket = request.socket || {} - /* istanbul ignore next */ - message += ` - Local: ${socket.localAddress || 'unknown'}:${socket.localPort || 'unknown'}, Remote: ${socket.remoteAddress || 'unknown'}:${socket.remotePort || 'unknown'}` - } - callback(new ConnectionError(message), null) - } - - const onAbort = () => { - cleanListeners() - request.once('error', () => {}) // we need to catch the request aborted error - debug('Request aborted', params) - this._openRequests-- - callback(new RequestAbortedError(), null) - } - - const onSocket = socket => { - /* istanbul ignore else */ - if (!socket.isSessionReused()) { - socket.once('secureConnect', () => { - const issuerCertificate = getIssuerCertificate(socket) - /* istanbul ignore next */ - if (issuerCertificate == null) { - onError(new Error('Invalid or malformed certificate')) - request.once('error', () => {}) // we need to catch the request aborted error - return request.abort() - } - - // Check if fingerprint matches - /* istanbul ignore else */ - if (this.caFingerprint !== issuerCertificate.fingerprint256) { - onError(new Error('Server certificate CA fingerprint does not match the value configured in caFingerprint')) - request.once('error', () => {}) // we need to catch the request aborted error - return request.abort() - } - }) - } - } - - request.on('response', onResponse) - request.on('timeout', onTimeout) - request.on('error', onError) - request.on('abort', onAbort) - if (this.caFingerprint != null) { - request.on('socket', onSocket) - } - - // Disables the Nagle algorithm - request.setNoDelay(true) - - // starts the request - if (isStream(params.body) === true) { - pipeline(params.body, request, err => { - /* istanbul ignore if */ - if (err != null && cleanedListeners === false) { - cleanListeners() - this._openRequests-- - callback(err, null) - } - }) - } else { - request.end(params.body) - } - - return request - - function cleanListeners () { - request.removeListener('response', onResponse) - request.removeListener('timeout', onTimeout) - request.removeListener('error', onError) - request.removeListener('abort', onAbort) - request.removeListener('socket', onSocket) - cleanedListeners = true - } - } - - // TODO: write a better closing logic - close (callback = () => {}) { - debug('Closing connection', this.id) - if (this._openRequests > 0) { - setTimeout(() => this.close(callback), 1000) - } else { - if (this.agent !== undefined) { - this.agent.destroy() - } - callback() - } - } - - setRole (role, enabled) { - if (validRoles.indexOf(role) === -1) { - throw new ConfigurationError(`Unsupported role: '${role}'`) - } - if (typeof enabled !== 'boolean') { - throw new ConfigurationError('enabled should be a boolean') - } - - this.roles[role] = enabled - return this - } - - get status () { - return this._status - } - - set status (status) { - assert( - ~validStatuses.indexOf(status), - `Unsupported status: '${status}'` - ) - this._status = status - } - - buildRequestObject (params) { - const url = this.url - const request = { - protocol: url.protocol, - hostname: url.hostname[0] === '[' - ? url.hostname.slice(1, -1) - : url.hostname, - hash: url.hash, - search: url.search, - pathname: url.pathname, - path: '', - href: url.href, - origin: url.origin, - // https://github.com/elastic/elasticsearch-js/issues/843 - port: url.port !== '' ? url.port : undefined, - headers: this.headers, - agent: this.agent - } - - const paramsKeys = Object.keys(params) - for (let i = 0, len = paramsKeys.length; i < len; i++) { - const key = paramsKeys[i] - if (key === 'path') { - request.pathname = resolve(request.pathname, params[key]) - } else if (key === 'querystring' && !!params[key] === true) { - if (request.search === '') { - request.search = '?' + params[key] - } else { - request.search += '&' + params[key] - } - } else if (key === 'headers') { - request.headers = Object.assign({}, request.headers, params.headers) - } else { - request[key] = params[key] - } - } - - request.path = request.pathname + request.search - - return request - } - - // Handles console.log and utils.inspect invocations. - // We want to hide `auth`, `agent` and `ssl` since they made - // the logs very hard to read. The user can still - // access them with `instance.agent` and `instance.ssl`. - [inspect.custom] (depth, options) { - const { - authorization, - ...headers - } = this.headers - - return { - url: stripAuth(this.url.toString()), - id: this.id, - headers, - deadCount: this.deadCount, - resurrectTimeout: this.resurrectTimeout, - _openRequests: this._openRequests, - status: this.status, - roles: this.roles - } - } - - toJSON () { - const { - authorization, - ...headers - } = this.headers - - return { - url: stripAuth(this.url.toString()), - id: this.id, - headers, - deadCount: this.deadCount, - resurrectTimeout: this.resurrectTimeout, - _openRequests: this._openRequests, - status: this.status, - roles: this.roles - } - } -} - -Connection.statuses = { - ALIVE: 'alive', - DEAD: 'dead' -} - -Connection.roles = { - MASTER: 'master', - DATA: 'data', - INGEST: 'ingest', - ML: 'ml' -} - -const defaultRoles = { - [Connection.roles.MASTER]: true, - [Connection.roles.DATA]: true, - [Connection.roles.INGEST]: true, - [Connection.roles.ML]: false -} - -const validStatuses = Object.keys(Connection.statuses) - .map(k => Connection.statuses[k]) -const validRoles = Object.keys(Connection.roles) - .map(k => Connection.roles[k]) - -function stripAuth (url) { - if (url.indexOf('@') === -1) return url - return url.slice(0, url.indexOf('//') + 2) + url.slice(url.indexOf('@') + 1) -} - -function isStream (obj) { - return obj != null && typeof obj.pipe === 'function' -} - -function resolve (host, path) { - const hostEndWithSlash = host[host.length - 1] === '/' - const pathStartsWithSlash = path[0] === '/' - - if (hostEndWithSlash === true && pathStartsWithSlash === true) { - return host + path.slice(1) - } else if (hostEndWithSlash !== pathStartsWithSlash) { - return host + path - } else { - return host + '/' + path - } -} - -function prepareHeaders (headers = {}, auth) { - if (auth != null && headers.authorization == null) { - /* istanbul ignore else */ - if (auth.apiKey) { - if (typeof auth.apiKey === 'object') { - headers.authorization = 'ApiKey ' + Buffer.from(`${auth.apiKey.id}:${auth.apiKey.api_key}`).toString('base64') - } else { - headers.authorization = `ApiKey ${auth.apiKey}` - } - } else if (auth.bearer) { - headers.authorization = `Bearer ${auth.bearer}` - } else if (auth.username && auth.password) { - headers.authorization = 'Basic ' + Buffer.from(`${auth.username}:${auth.password}`).toString('base64') - } - } - return headers -} - -function getIssuerCertificate (socket) { - let certificate = socket.getPeerCertificate(true) - while (certificate && Object.keys(certificate).length > 0) { - // invalid certificate - if (certificate.issuerCertificate == null) { - return null - } - - // We have reached the root certificate. - // In case of self-signed certificates, `issuerCertificate` may be a circular reference. - if (certificate.fingerprint256 === certificate.issuerCertificate.fingerprint256) { - break - } - - // continue the loop - certificate = certificate.issuerCertificate - } - return certificate -} - -module.exports = Connection -module.exports.internals = { prepareHeaders, getIssuerCertificate } diff --git a/lib/Helpers.d.ts b/lib/Helpers.d.ts deleted file mode 100644 index a416842c7..000000000 --- a/lib/Helpers.d.ts +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { Readable as ReadableStream } from 'stream' -import { TransportRequestOptions, ApiError, ApiResponse, RequestBody, Context } from './Transport' -import { Search, Msearch, Bulk } from '../api/requestParams' - -export default class Helpers { - search>(params: Search, options?: TransportRequestOptions): Promise - scrollSearch, TRequestBody extends RequestBody = Record, TContext = Context>(params: Search, options?: TransportRequestOptions): AsyncIterable> - scrollDocuments>(params: Search, options?: TransportRequestOptions): AsyncIterable - msearch(options?: MsearchHelperOptions, reqOptions?: TransportRequestOptions): MsearchHelper - bulk(options: BulkHelperOptions, reqOptions?: TransportRequestOptions): BulkHelper -} - -export interface ScrollSearchResponse, TContext = Context> extends ApiResponse { - clear: () => Promise - documents: TDocument[] -} - -export interface BulkHelper extends Promise { - abort: () => BulkHelper - readonly stats: BulkStats -} - -export interface BulkStats { - total: number - failed: number - retry: number - successful: number - noop: number - time: number - bytes: number - aborted: boolean -} - -interface IndexAction { - index: { - _index: string - [key: string]: any - } -} - -interface CreateAction { - create: { - _index: string - [key: string]: any - } -} - -interface UpdateActionOperation { - update: { - _index: string - [key: string]: any - } -} - -interface DeleteAction { - delete: { - _index: string - [key: string]: any - } -} - -type UpdateAction = [UpdateActionOperation, Record] -type Action = IndexAction | CreateAction | UpdateAction | DeleteAction -type Omit = Pick> - -export interface BulkHelperOptions extends Omit { - datasource: TDocument[] | Buffer | ReadableStream | AsyncIterator - onDocument: (doc: TDocument) => Action - flushBytes?: number - flushInterval?: number - concurrency?: number - retries?: number - wait?: number - onDrop?: (doc: OnDropDocument) => void - refreshOnCompletion?: boolean | string -} - -export interface OnDropDocument { - status: number - error: { - type: string, - reason: string, - caused_by: { - type: string, - reason: string - } - } - document: TDocument - retried: boolean -} - -export interface MsearchHelperOptions extends Omit { - operations?: number - flushInterval?: number - concurrency?: number - retries?: number - wait?: number -} - -declare type callbackFn = (err: ApiError, result: ApiResponse) => void; -export interface MsearchHelper extends Promise { - stop(error?: Error): void - search, TRequestBody extends RequestBody = Record, TContext = Context>(header: Omit, body: TRequestBody): Promise> - search, TRequestBody extends RequestBody = Record, TContext = Context>(header: Omit, body: TRequestBody, callback: callbackFn): void -} diff --git a/lib/Serializer.d.ts b/lib/Serializer.d.ts deleted file mode 100644 index 7aad1d117..000000000 --- a/lib/Serializer.d.ts +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -export interface SerializerOptions { - disablePrototypePoisoningProtection: boolean | 'proto' | 'constructor' -} - -export default class Serializer { - constructor (opts?: SerializerOptions) - serialize(object: any): string; - deserialize(json: string): any; - ndserialize(array: any[]): string; - qserialize(object: any): string; -} diff --git a/lib/Serializer.js b/lib/Serializer.js deleted file mode 100644 index 7797f1f00..000000000 --- a/lib/Serializer.js +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { stringify } = require('querystring') -const debug = require('debug')('elasticsearch') -const sjson = require('secure-json-parse') -const { SerializationError, DeserializationError } = require('./errors') -const kJsonOptions = Symbol('secure json parse options') - -class Serializer { - constructor (opts = {}) { - const disable = opts.disablePrototypePoisoningProtection - this[kJsonOptions] = { - protoAction: disable === true || disable === 'proto' ? 'ignore' : 'error', - constructorAction: disable === true || disable === 'constructor' ? 'ignore' : 'error' - } - } - - serialize (object) { - debug('Serializing', object) - let json - try { - json = JSON.stringify(object) - } catch (err) { - throw new SerializationError(err.message, object) - } - return json - } - - deserialize (json) { - debug('Deserializing', json) - let object - try { - object = sjson.parse(json, this[kJsonOptions]) - } catch (err) { - throw new DeserializationError(err.message, json) - } - return object - } - - ndserialize (array) { - debug('ndserialize', array) - if (Array.isArray(array) === false) { - throw new SerializationError('The argument provided is not an array') - } - let ndjson = '' - for (let i = 0, len = array.length; i < len; i++) { - if (typeof array[i] === 'string') { - ndjson += array[i] + '\n' - } else { - ndjson += this.serialize(array[i]) + '\n' - } - } - return ndjson - } - - qserialize (object) { - debug('qserialize', object) - if (object == null) return '' - if (typeof object === 'string') return object - // arrays should be serialized as comma separated list - const keys = Object.keys(object) - for (let i = 0, len = keys.length; i < len; i++) { - const key = keys[i] - // elasticsearch will complain for keys without a value - if (object[key] === undefined) { - delete object[key] - } else if (Array.isArray(object[key]) === true) { - object[key] = object[key].join(',') - } - } - return stringify(object) - } -} - -module.exports = Serializer diff --git a/lib/Transport.d.ts b/lib/Transport.d.ts deleted file mode 100644 index 25b770fdb..000000000 --- a/lib/Transport.d.ts +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { Readable as ReadableStream } from 'stream'; -import { ConnectionPool, CloudConnectionPool } from './pool'; -import Connection from './Connection'; -import Serializer from './Serializer'; -import * as errors from './errors'; - -export type ApiError = errors.ConfigurationError | errors.ConnectionError | - errors.DeserializationError | errors.SerializationError | - errors.NoLivingConnectionsError | errors.ResponseError | - errors.TimeoutError | errors.RequestAbortedError | - errors.ProductNotSupportedError - -export type Context = unknown - -export interface nodeSelectorFn { - (connections: Connection[]): Connection; -} - -export interface nodeFilterFn { - (connection: Connection): boolean; -} - -export interface generateRequestIdFn { - (params: TransportRequestParams, options: TransportRequestOptions): any; -} - -interface TransportOptions { - emit: (event: string | symbol, ...args: any[]) => boolean; - connectionPool: ConnectionPool | CloudConnectionPool; - serializer: Serializer; - maxRetries: number; - requestTimeout: number | string; - suggestCompression?: boolean; - compression?: 'gzip'; - sniffInterval?: number; - sniffOnConnectionFault?: boolean; - sniffEndpoint: string; - sniffOnStart?: boolean; - nodeFilter?: nodeFilterFn; - nodeSelector?: string | nodeSelectorFn; - headers?: Record; - generateRequestId?: generateRequestIdFn; - name?: string; - opaqueIdPrefix?: string; - maxResponseSize?: number; - maxCompressedResponseSize?: number; -} - -export interface RequestEvent, TContext = Context> { - body: TResponse; - statusCode: number | null; - headers: Record | null; - warnings: string[] | null; - meta: { - context: TContext; - name: string | symbol; - request: { - params: TransportRequestParams; - options: TransportRequestOptions; - id: any; - }; - connection: Connection; - attempts: number; - aborted: boolean; - sniff?: { - hosts: any[]; - reason: string; - }; - }; -} - -// ApiResponse and RequestEvent are the same thing -// we are doing this for have more clear names -export interface ApiResponse, TContext = Context> extends RequestEvent {} - -export type RequestBody> = T | string | Buffer | ReadableStream -export type RequestNDBody[]> = T | string | string[] | Buffer | ReadableStream - -export interface TransportRequestParams { - method: string; - path: string; - body?: RequestBody; - bulkBody?: RequestNDBody; - querystring?: Record | string; -} - -export interface TransportRequestOptions { - ignore?: number[]; - requestTimeout?: number | string; - maxRetries?: number; - asStream?: boolean; - headers?: Record; - querystring?: Record; - compression?: 'gzip'; - id?: any; - context?: Context; - warnings?: string[]; - opaqueId?: string; - maxResponseSize?: number; - maxCompressedResponseSize?: number; -} - -export interface TransportRequestCallback { - abort: () => void; -} - -export interface TransportRequestPromise extends Promise { - abort: () => void; - finally(onFinally?: (() => void) | undefined | null): Promise; -} - -export interface TransportGetConnectionOptions { - requestId: string; -} - -export interface TransportSniffOptions { - reason: string; - requestId?: string; -} - -export default class Transport { - static sniffReasons: { - SNIFF_ON_START: string; - SNIFF_INTERVAL: string; - SNIFF_ON_CONNECTION_FAULT: string; - DEFAULT: string; - }; - emit: (event: string | symbol, ...args: any[]) => boolean; - connectionPool: ConnectionPool | CloudConnectionPool; - serializer: Serializer; - maxRetries: number; - requestTimeout: number; - suggestCompression: boolean; - compression: 'gzip' | false; - sniffInterval: number; - sniffOnConnectionFault: boolean; - opaqueIdPrefix: string | null; - sniffEndpoint: string; - _sniffEnabled: boolean; - _nextSniff: number; - _isSniffing: boolean; - constructor(opts: TransportOptions); - request, TContext = Context>(params: TransportRequestParams, options?: TransportRequestOptions): TransportRequestPromise>; - request, TContext = Context>(params: TransportRequestParams, options?: TransportRequestOptions, callback?: (err: ApiError, result: ApiResponse) => void): TransportRequestCallback; - getConnection(opts: TransportGetConnectionOptions): Connection | null; - sniff(opts?: TransportSniffOptions, callback?: (...args: any[]) => void): void; -} diff --git a/lib/Transport.js b/lib/Transport.js deleted file mode 100644 index 83a218a64..000000000 --- a/lib/Transport.js +++ /dev/null @@ -1,695 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const debug = require('debug')('elasticsearch') -const os = require('os') -const { gzip, unzip, createGzip } = require('zlib') -const buffer = require('buffer') -const ms = require('ms') -const { EventEmitter } = require('events') -const { - ConnectionError, - RequestAbortedError, - NoLivingConnectionsError, - ResponseError, - ConfigurationError, - ProductNotSupportedError -} = require('./errors') - -const noop = () => {} - -const clientVersion = require('../package.json').version -const userAgent = `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${process.version})` -const MAX_BUFFER_LENGTH = buffer.constants.MAX_LENGTH -const MAX_STRING_LENGTH = buffer.constants.MAX_STRING_LENGTH -const kProductCheck = Symbol('product check') -const kApiVersioning = Symbol('api versioning') -const kEventEmitter = Symbol('event emitter') -const kMaxResponseSize = Symbol('max response size') -const kMaxCompressedResponseSize = Symbol('max compressed response size') - -class Transport { - constructor (opts) { - if (typeof opts.compression === 'string' && opts.compression !== 'gzip') { - throw new ConfigurationError(`Invalid compression: '${opts.compression}'`) - } - - this.emit = opts.emit - this.connectionPool = opts.connectionPool - this.serializer = opts.serializer - this.maxRetries = opts.maxRetries - this.requestTimeout = toMs(opts.requestTimeout) - this.suggestCompression = opts.suggestCompression === true - this.compression = opts.compression || false - this.context = opts.context || null - this.headers = Object.assign({}, - { 'user-agent': userAgent }, - opts.suggestCompression === true ? { 'accept-encoding': 'gzip,deflate' } : null, - lowerCaseHeaders(opts.headers) - ) - this.sniffInterval = opts.sniffInterval - this.sniffOnConnectionFault = opts.sniffOnConnectionFault - this.sniffEndpoint = opts.sniffEndpoint - this.generateRequestId = opts.generateRequestId || generateRequestId() - this.name = opts.name - this.opaqueIdPrefix = opts.opaqueIdPrefix - this[kProductCheck] = 0 // 0 = to be checked, 1 = checking, 2 = checked-ok, 3 checked-notok, 4 checked-nodefault - this[kApiVersioning] = process.env.ELASTIC_CLIENT_APIVERSIONING === 'true' - this[kEventEmitter] = new EventEmitter() - this[kMaxResponseSize] = opts.maxResponseSize || MAX_STRING_LENGTH - this[kMaxCompressedResponseSize] = opts.maxCompressedResponseSize || MAX_BUFFER_LENGTH - - this.nodeFilter = opts.nodeFilter || defaultNodeFilter - if (typeof opts.nodeSelector === 'function') { - this.nodeSelector = opts.nodeSelector - } else if (opts.nodeSelector === 'round-robin') { - this.nodeSelector = roundRobinSelector() - } else if (opts.nodeSelector === 'random') { - this.nodeSelector = randomSelector - } else { - this.nodeSelector = roundRobinSelector() - } - - this._sniffEnabled = typeof this.sniffInterval === 'number' - this._nextSniff = this._sniffEnabled ? (Date.now() + this.sniffInterval) : 0 - this._isSniffing = false - - if (opts.sniffOnStart === true) { - // timer needed otherwise it will clash - // with the product check testing - setTimeout(() => { - this.sniff({ reason: Transport.sniffReasons.SNIFF_ON_START }) - }, 10) - } - } - - request (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - let p = null - - // promises support - if (callback === undefined) { - let onFulfilled = null - let onRejected = null - p = new Promise((resolve, reject) => { - onFulfilled = resolve - onRejected = reject - }) - callback = function callback (err, result) { - err ? onRejected(err) : onFulfilled(result) - } - } - - const meta = { - context: null, - request: { - params: null, - options: null, - id: options.id || this.generateRequestId(params, options) - }, - name: this.name, - connection: null, - attempts: 0, - aborted: false - } - - if (this.context != null && options.context != null) { - meta.context = Object.assign({}, this.context, options.context) - } else if (this.context != null) { - meta.context = this.context - } else if (options.context != null) { - meta.context = options.context - } - - const result = { - body: null, - statusCode: null, - headers: null, - meta - } - - Object.defineProperty(result, 'warnings', { - get () { - return this.headers && this.headers.warning - ? this.headers.warning.split(/(?!\B"[^"]*),(?![^"]*"\B)/) - : null - } - }) - - // We should not retry if we are sending a stream body, because we should store in memory - // a copy of the stream to be able to send it again, but since we don't know in advance - // the size of the stream, we risk to take too much memory. - // Furthermore, copying everytime the stream is very a expensive operation. - const maxRetries = isStream(params.body) || isStream(params.bulkBody) - ? 0 - : (typeof options.maxRetries === 'number' ? options.maxRetries : this.maxRetries) - const compression = options.compression !== undefined ? options.compression : this.compression - const maxResponseSize = options.maxResponseSize || this[kMaxResponseSize] - const maxCompressedResponseSize = options.maxCompressedResponseSize || this[kMaxCompressedResponseSize] - let request = { abort: noop } - const transportReturn = { - then (onFulfilled, onRejected) { - return p.then(onFulfilled, onRejected) - }, - catch (onRejected) { - return p.catch(onRejected) - }, - abort () { - meta.aborted = true - request.abort() - debug('Aborting request', params) - return this - }, - finally (onFinally) { - return p.finally(onFinally) - } - } - - const makeRequest = () => { - if (meta.aborted === true) { - this.emit('request', new RequestAbortedError(), result) - return process.nextTick(callback, new RequestAbortedError(), result) - } - meta.connection = this.getConnection({ requestId: meta.request.id }) - if (meta.connection == null) { - return process.nextTick(callback, new NoLivingConnectionsError(), result) - } - this.emit('request', null, result) - // perform the actual http request - request = meta.connection.request(params, onResponse) - } - - const onConnectionError = (err) => { - if (err.name !== 'RequestAbortedError') { - // if there is an error in the connection - // let's mark the connection as dead - this.connectionPool.markDead(meta.connection) - - if (this.sniffOnConnectionFault === true) { - this.sniff({ - reason: Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT, - requestId: meta.request.id - }) - } - - // retry logic - if (meta.attempts < maxRetries) { - meta.attempts++ - debug(`Retrying request, there are still ${maxRetries - meta.attempts} attempts`, params) - makeRequest() - return - } - } - - err.meta = result - this.emit('response', err, result) - return callback(err, result) - } - - const onResponse = (err, response) => { - if (err !== null) { - return onConnectionError(err) - } - - result.statusCode = response.statusCode - result.headers = response.headers - - if (options.asStream === true) { - result.body = response - this.emit('response', null, result) - callback(null, result) - return - } - - const contentEncoding = (result.headers['content-encoding'] || '').toLowerCase() - const isCompressed = contentEncoding.indexOf('gzip') > -1 || contentEncoding.indexOf('deflate') > -1 - const isVectorTile = (result.headers['content-type'] || '').indexOf('application/vnd.mapbox-vector-tile') > -1 - - /* istanbul ignore else */ - if (result.headers['content-length'] !== undefined) { - const contentLength = Number(result.headers['content-length']) - if (isCompressed && contentLength > maxCompressedResponseSize) { - response.destroy() - return onConnectionError( - new RequestAbortedError(`The content length (${contentLength}) is bigger than the maximum allowed buffer (${maxCompressedResponseSize})`, result) - ) - } else if (contentLength > maxResponseSize) { - response.destroy() - return onConnectionError( - new RequestAbortedError(`The content length (${contentLength}) is bigger than the maximum allowed string (${maxResponseSize})`, result) - ) - } - } - // if the response is compressed, we must handle it - // as buffer for allowing decompression later - // while if it's a vector tile, we should return it as buffer - let payload = isCompressed || isVectorTile ? [] : '' - const onData = isCompressed || isVectorTile - ? chunk => { payload.push(chunk) } - : chunk => { payload += chunk } - const onEnd = err => { - response.removeListener('data', onData) - response.removeListener('end', onEnd) - response.removeListener('error', onEnd) - response.removeListener('aborted', onAbort) - - if (err) { - return onConnectionError(new ConnectionError(err.message)) - } - - if (isCompressed) { - unzip(Buffer.concat(payload), onBody) - } else { - onBody(null, isVectorTile ? Buffer.concat(payload) : payload) - } - } - - const onAbort = () => { - response.destroy() - onEnd(new Error('Response aborted while reading the body')) - } - - if (!isCompressed && !isVectorTile) { - response.setEncoding('utf8') - } - - this.emit('deserialization', null, result) - response.on('data', onData) - response.on('error', onEnd) - response.on('end', onEnd) - response.on('aborted', onAbort) - } - - const onBody = (err, payload) => { - if (err) { - this.emit('response', err, result) - return callback(err, result) - } - - const isVectorTile = (result.headers['content-type'] || '').indexOf('application/vnd.mapbox-vector-tile') > -1 - if (Buffer.isBuffer(payload) && !isVectorTile) { - payload = payload.toString() - } - const isHead = params.method === 'HEAD' - // we should attempt the payload deserialization only if: - // - a `content-type` is defined and is equal to `application/json` - // - the request is not a HEAD request - // - the payload is not an empty string - if (result.headers['content-type'] !== undefined && - (result.headers['content-type'].indexOf('application/json') > -1 || - result.headers['content-type'].indexOf('application/vnd.elasticsearch+json') > -1) && - isHead === false && - payload !== '' - ) { - try { - result.body = this.serializer.deserialize(payload) - } catch (err) { - this.emit('response', err, result) - return callback(err, result) - } - } else { - // cast to boolean if the request method was HEAD and there was no error - result.body = isHead === true && result.statusCode < 400 ? true : payload - } - - // we should ignore the statusCode if the user has configured the `ignore` field with - // the statusCode we just got or if the request method is HEAD and the statusCode is 404 - const ignoreStatusCode = (Array.isArray(options.ignore) && options.ignore.indexOf(result.statusCode) > -1) || - (isHead === true && result.statusCode === 404) - - if (ignoreStatusCode === false && - (result.statusCode === 502 || result.statusCode === 503 || result.statusCode === 504)) { - // if the statusCode is 502/3/4 we should run our retry strategy - // and mark the connection as dead - this.connectionPool.markDead(meta.connection) - // retry logic (we shoukd not retry on "429 - Too Many Requests") - if (meta.attempts < maxRetries && result.statusCode !== 429) { - meta.attempts++ - debug(`Retrying request, there are still ${maxRetries - meta.attempts} attempts`, params) - makeRequest() - return - } - } else { - // everything has worked as expected, let's mark - // the connection as alive (or confirm it) - this.connectionPool.markAlive(meta.connection) - } - - if (ignoreStatusCode === false && result.statusCode >= 400) { - const error = new ResponseError(result) - this.emit('response', error, result) - callback(error, result) - } else { - // cast to boolean if the request method was HEAD - if (isHead === true && result.statusCode === 404) { - result.body = false - } - this.emit('response', null, result) - callback(null, result) - } - } - - const prepareRequest = () => { - this.emit('serialization', null, result) - const headers = Object.assign({}, this.headers, lowerCaseHeaders(options.headers)) - - if (options.opaqueId !== undefined) { - headers['x-opaque-id'] = this.opaqueIdPrefix !== null - ? this.opaqueIdPrefix + options.opaqueId - : options.opaqueId - } - - // handle json body - if (params.body != null) { - if (shouldSerialize(params.body) === true) { - try { - params.body = this.serializer.serialize(params.body) - } catch (err) { - this.emit('request', err, result) - process.nextTick(callback, err, result) - return transportReturn - } - } - - if (params.body !== '') { - headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+json; compatible-with=7' : 'application/json') - } - - // handle ndjson body - } else if (params.bulkBody != null) { - if (shouldSerialize(params.bulkBody) === true) { - try { - params.body = this.serializer.ndserialize(params.bulkBody) - } catch (err) { - this.emit('request', err, result) - process.nextTick(callback, err, result) - return transportReturn - } - } else { - params.body = params.bulkBody - } - if (params.body !== '') { - headers['content-type'] = headers['content-type'] || (this[kApiVersioning] ? 'application/vnd.elasticsearch+x-ndjson; compatible-with=7' : 'application/x-ndjson') - } - } - - params.headers = headers - // serializes the querystring - if (options.querystring == null) { - params.querystring = this.serializer.qserialize(params.querystring) - } else { - params.querystring = this.serializer.qserialize( - Object.assign({}, params.querystring, options.querystring) - ) - } - - // handles request timeout - params.timeout = toMs(options.requestTimeout || this.requestTimeout) - if (options.asStream === true) params.asStream = true - - // handle compression - if (params.body !== '' && params.body != null) { - if (isStream(params.body) === true) { - if (compression === 'gzip') { - params.headers['content-encoding'] = compression - params.body = params.body.pipe(createGzip()) - } - makeRequest() - } else if (compression === 'gzip') { - gzip(params.body, (err, buffer) => { - /* istanbul ignore next */ - if (err) { - this.emit('request', err, result) - return callback(err, result) - } - params.headers['content-encoding'] = compression - params.headers['content-length'] = '' + Buffer.byteLength(buffer) - params.body = buffer - makeRequest() - }) - } else { - params.headers['content-length'] = '' + Buffer.byteLength(params.body) - makeRequest() - } - } else { - makeRequest() - } - } - - meta.request.params = params - meta.request.options = options - // still need to check the product or waiting for the check to finish - if (this[kProductCheck] === 0 || this[kProductCheck] === 1) { - // let pass info requests - if (params.method === 'GET' && params.path === '/') { - prepareRequest() - } else { - // wait for product check to finish - this[kEventEmitter].once('product-check', (error, status) => { - if (status === false) { - const err = error || new ProductNotSupportedError(result) - if (this[kProductCheck] === 4) { - err.message = 'The client noticed that the server is not a supported distribution of Elasticsearch' - } - this.emit('request', err, result) - process.nextTick(callback, err, result) - } else { - prepareRequest() - } - }) - // the very first request triggers the product check - if (this[kProductCheck] === 0) { - this.productCheck() - } - } - // the product check is finished and it's not Elasticsearch - } else if (this[kProductCheck] === 3 || this[kProductCheck] === 4) { - const err = new ProductNotSupportedError(result) - if (this[kProductCheck] === 4) { - err.message = 'The client noticed that the server is not a supported distribution of Elasticsearch' - } - this.emit('request', err, result) - process.nextTick(callback, err, result) - // the product check finished and it's Elasticsearch - } else { - prepareRequest() - } - - return transportReturn - } - - getConnection (opts) { - const now = Date.now() - if (this._sniffEnabled === true && now > this._nextSniff) { - this.sniff({ reason: Transport.sniffReasons.SNIFF_INTERVAL, requestId: opts.requestId }) - } - return this.connectionPool.getConnection({ - filter: this.nodeFilter, - selector: this.nodeSelector, - requestId: opts.requestId, - name: this.name, - now - }) - } - - sniff (opts, callback = noop) { - if (this._isSniffing === true) return - this._isSniffing = true - debug('Started sniffing request') - - if (typeof opts === 'function') { - callback = opts - opts = { reason: Transport.sniffReasons.DEFAULT } - } - - const { reason } = opts - - const request = { - method: 'GET', - path: this.sniffEndpoint - } - - this.request(request, { id: opts.requestId }, (err, result) => { - this._isSniffing = false - if (this._sniffEnabled === true) { - this._nextSniff = Date.now() + this.sniffInterval - } - - if (err != null) { - debug('Sniffing errored', err) - result.meta.sniff = { hosts: [], reason } - this.emit('sniff', err, result) - return callback(err) - } - - debug('Sniffing ended successfully', result.body) - const protocol = result.meta.connection.url.protocol || /* istanbul ignore next */ 'http:' - const hosts = this.connectionPool.nodesToHost(result.body.nodes, protocol) - this.connectionPool.update(hosts) - - result.meta.sniff = { hosts, reason } - this.emit('sniff', null, result) - callback(null, hosts) - }) - } - - productCheck () { - debug('Start product check') - this[kProductCheck] = 1 - this.request({ - method: 'GET', - path: '/' - }, (err, result) => { - this[kProductCheck] = 3 - if (err) { - debug('Product check failed', err) - if (err.statusCode === 401 || err.statusCode === 403) { - this[kProductCheck] = 2 - process.emitWarning( - 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.', - 'ProductNotSupportedSecurityError' - ) - this[kEventEmitter].emit('product-check', null, true) - } else { - this[kProductCheck] = 0 - this[kEventEmitter].emit('product-check', err, false) - } - } else { - debug('Checking elasticsearch version', result.body, result.headers) - if (result.body.version == null || typeof result.body.version.number !== 'string') { - debug('Can\'t access Elasticsearch version') - return this[kEventEmitter].emit('product-check', null, false) - } - const tagline = result.body.tagline - const version = result.body.version.number.split('.') - const major = Number(version[0]) - const minor = Number(version[1]) - if (major < 6) { - return this[kEventEmitter].emit('product-check', null, false) - } else if (major >= 6 && major < 7) { - if (tagline !== 'You Know, for Search') { - debug('Bad tagline') - return this[kEventEmitter].emit('product-check', null, false) - } - } else if (major === 7 && minor < 14) { - if (tagline !== 'You Know, for Search') { - debug('Bad tagline') - return this[kEventEmitter].emit('product-check', null, false) - } - - if (result.body.version.build_flavor !== 'default') { - debug('Bad build_flavor') - this[kProductCheck] = 4 - return this[kEventEmitter].emit('product-check', null, false) - } - } else { - if (result.headers['x-elastic-product'] !== 'Elasticsearch') { - debug('x-elastic-product not recognized') - return this[kEventEmitter].emit('product-check', null, false) - } - } - debug('Valid Elasticsearch distribution') - this[kProductCheck] = 2 - this[kEventEmitter].emit('product-check', null, true) - } - }) - } -} - -Transport.sniffReasons = { - SNIFF_ON_START: 'sniff-on-start', - SNIFF_INTERVAL: 'sniff-interval', - SNIFF_ON_CONNECTION_FAULT: 'sniff-on-connection-fault', - // TODO: find a better name - DEFAULT: 'default' -} - -function toMs (time) { - if (typeof time === 'string') { - return ms(time) - } - return time -} - -function shouldSerialize (obj) { - return typeof obj !== 'string' && - typeof obj.pipe !== 'function' && - Buffer.isBuffer(obj) === false -} - -function isStream (obj) { - return obj != null && typeof obj.pipe === 'function' -} - -function defaultNodeFilter (node) { - // avoid master only nodes - if (node.roles.master === true && - node.roles.data === false && - node.roles.ingest === false) { - return false - } - return true -} - -function roundRobinSelector () { - let current = -1 - return function _roundRobinSelector (connections) { - if (++current >= connections.length) { - current = 0 - } - return connections[current] - } -} - -function randomSelector (connections) { - const index = Math.floor(Math.random() * connections.length) - return connections[index] -} - -function generateRequestId () { - const maxInt = 2147483647 - let nextReqId = 0 - return function genReqId (params, options) { - return (nextReqId = (nextReqId + 1) & maxInt) - } -} - -function lowerCaseHeaders (oldHeaders) { - if (oldHeaders == null) return oldHeaders - const newHeaders = {} - for (const header in oldHeaders) { - newHeaders[header.toLowerCase()] = oldHeaders[header] - } - return newHeaders -} - -module.exports = Transport -module.exports.internals = { - defaultNodeFilter, - roundRobinSelector, - randomSelector, - generateRequestId, - lowerCaseHeaders -} diff --git a/lib/errors.d.ts b/lib/errors.d.ts deleted file mode 100644 index 3ed037fd0..000000000 --- a/lib/errors.d.ts +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { ApiResponse, Context } from './Transport' - -export declare class ElasticsearchClientError extends Error { - name: string; - message: string; -} - -export declare class TimeoutError, TContext = Context> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} - -export declare class ConnectionError, TContext = Context> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} - -export declare class NoLivingConnectionsError, TContext = Context> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} - -export declare class SerializationError extends ElasticsearchClientError { - name: string; - message: string; - data: any; - constructor(message: string, data: any); -} - -export declare class DeserializationError extends ElasticsearchClientError { - name: string; - message: string; - data: string; - constructor(message: string, data: string); -} - -export declare class ConfigurationError extends ElasticsearchClientError { - name: string; - message: string; - constructor(message: string); -} - -export declare class ResponseError, TContext = Context> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - body: TResponse; - statusCode: number; - headers: Record; - constructor(meta: ApiResponse); -} - -export declare class RequestAbortedError, TContext = Context> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} - -export declare class ProductNotSupportedError, TContext = Context> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(meta: ApiResponse); -} diff --git a/lib/errors.js b/lib/errors.js deleted file mode 100644 index bc691c86b..000000000 --- a/lib/errors.js +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -class ElasticsearchClientError extends Error { - constructor (message) { - super(message) - this.name = 'ElasticsearchClientError' - } -} - -class TimeoutError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, TimeoutError) - this.name = 'TimeoutError' - this.message = message || 'Timeout Error' - this.meta = meta - } -} - -class ConnectionError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, ConnectionError) - this.name = 'ConnectionError' - this.message = message || 'Connection Error' - this.meta = meta - } -} - -class NoLivingConnectionsError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, NoLivingConnectionsError) - this.name = 'NoLivingConnectionsError' - this.message = message || 'Given the configuration, the ConnectionPool was not able to find a usable Connection for this request.' - this.meta = meta - } -} - -class SerializationError extends ElasticsearchClientError { - constructor (message, data) { - super(message, data) - Error.captureStackTrace(this, SerializationError) - this.name = 'SerializationError' - this.message = message || 'Serialization Error' - this.data = data - } -} - -class DeserializationError extends ElasticsearchClientError { - constructor (message, data) { - super(message, data) - Error.captureStackTrace(this, DeserializationError) - this.name = 'DeserializationError' - this.message = message || 'Deserialization Error' - this.data = data - } -} - -class ConfigurationError extends ElasticsearchClientError { - constructor (message) { - super(message) - Error.captureStackTrace(this, ConfigurationError) - this.name = 'ConfigurationError' - this.message = message || 'Configuration Error' - } -} - -class ResponseError extends ElasticsearchClientError { - constructor (meta) { - super('Response Error') - Error.captureStackTrace(this, ResponseError) - this.name = 'ResponseError' - if (meta.body && meta.body.error && meta.body.error.type) { - if (Array.isArray(meta.body.error.root_cause)) { - this.message = meta.body.error.type + ': ' - this.message += meta.body.error.root_cause.map(entry => `[${entry.type}] Reason: ${entry.reason}`).join('; ') - } else { - this.message = meta.body.error.type - } - } else if (typeof meta.body === 'object' && meta.body != null) { - this.message = JSON.stringify(meta.body) - } else { - this.message = meta.body || 'Response Error' - } - this.meta = meta - } - - get body () { - return this.meta.body - } - - get statusCode () { - if (this.meta.body && typeof this.meta.body.status === 'number') { - return this.meta.body.status - } - return this.meta.statusCode - } - - get headers () { - return this.meta.headers - } - - toString () { - return JSON.stringify(this.meta.body) - } -} - -class RequestAbortedError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, RequestAbortedError) - this.name = 'RequestAbortedError' - this.message = message || 'Request aborted' - this.meta = meta - } -} - -class ProductNotSupportedError extends ElasticsearchClientError { - constructor (meta) { - super('Product Not Supported Error') - Error.captureStackTrace(this, ProductNotSupportedError) - this.name = 'ProductNotSupportedError' - this.message = 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.' - this.meta = meta - } -} - -module.exports = { - ElasticsearchClientError, - TimeoutError, - ConnectionError, - NoLivingConnectionsError, - SerializationError, - DeserializationError, - ConfigurationError, - ResponseError, - RequestAbortedError, - ProductNotSupportedError -} diff --git a/lib/pool/BaseConnectionPool.js b/lib/pool/BaseConnectionPool.js deleted file mode 100644 index 80e80a318..000000000 --- a/lib/pool/BaseConnectionPool.js +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { URL } = require('url') -const debug = require('debug')('elasticsearch') -const Connection = require('../Connection') -const noop = () => {} - -class BaseConnectionPool { - constructor (opts) { - // list of nodes and weights - this.connections = [] - // how many nodes we have in our scheduler - this.size = this.connections.length - this.Connection = opts.Connection - this.emit = opts.emit || noop - this.auth = opts.auth || null - this._ssl = opts.ssl - this._agent = opts.agent - this._proxy = opts.proxy || null - this._caFingerprint = opts.caFingerprint || null - } - - getConnection () { - throw new Error('getConnection must be implemented') - } - - markAlive () { - return this - } - - markDead () { - return this - } - - /** - * Creates a new connection instance. - */ - createConnection (opts) { - if (typeof opts === 'string') { - opts = this.urlToHost(opts) - } - - if (this.auth !== null) { - opts.auth = this.auth - } else if (opts.url.username !== '' && opts.url.password !== '') { - opts.auth = { - username: decodeURIComponent(opts.url.username), - password: decodeURIComponent(opts.url.password) - } - } - - if (opts.ssl == null) opts.ssl = this._ssl - /* istanbul ignore else */ - if (opts.agent == null) opts.agent = this._agent - /* istanbul ignore else */ - if (opts.proxy == null) opts.proxy = this._proxy - /* istanbul ignore else */ - if (opts.caFingerprint == null) opts.caFingerprint = this._caFingerprint - - const connection = new this.Connection(opts) - - for (const conn of this.connections) { - if (conn.id === connection.id) { - throw new Error(`Connection with id '${connection.id}' is already present`) - } - } - - return connection - } - - /** - * Adds a new connection to the pool. - * - * @param {object|string} host - * @returns {ConnectionPool} - */ - addConnection (opts) { - if (Array.isArray(opts)) { - return opts.forEach(o => this.addConnection(o)) - } - - if (typeof opts === 'string') { - opts = this.urlToHost(opts) - } - - const connectionById = this.connections.find(c => c.id === opts.id) - const connectionByUrl = this.connections.find(c => c.id === opts.url.href) - - if (connectionById || connectionByUrl) { - throw new Error(`Connection with id '${opts.id || opts.url.href}' is already present`) - } - - this.update([...this.connections, opts]) - return this.connections[this.size - 1] - } - - /** - * Removes a new connection to the pool. - * - * @param {object} connection - * @returns {ConnectionPool} - */ - removeConnection (connection) { - debug('Removing connection', connection) - return this.update(this.connections.filter(c => c.id !== connection.id)) - } - - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty (callback) { - debug('Emptying the connection pool') - let openConnections = this.size - this.connections.forEach(connection => { - connection.close(() => { - if (--openConnections === 0) { - this.connections = [] - this.size = this.connections.length - callback() - } - }) - }) - } - - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update (nodes) { - debug('Updating the connection pool') - const newConnections = [] - const oldConnections = [] - - for (const node of nodes) { - // if we already have a given connection in the pool - // we mark it as alive and we do not close the connection - // to avoid socket issues - const connectionById = this.connections.find(c => c.id === node.id) - const connectionByUrl = this.connections.find(c => c.id === node.url.href) - if (connectionById) { - debug(`The connection with id '${node.id}' is already present`) - this.markAlive(connectionById) - newConnections.push(connectionById) - // in case the user has passed a single url (or an array of urls), - // the connection id will be the full href; to avoid closing valid connections - // because are not present in the pool, we check also the node url, - // and if is already present we update its id with the ES provided one. - } else if (connectionByUrl) { - connectionByUrl.id = node.id - this.markAlive(connectionByUrl) - newConnections.push(connectionByUrl) - } else { - newConnections.push(this.createConnection(node)) - } - } - - const ids = nodes.map(c => c.id) - // remove all the dead connections and old connections - for (const connection of this.connections) { - if (ids.indexOf(connection.id) === -1) { - oldConnections.push(connection) - } - } - - // close old connections - oldConnections.forEach(connection => connection.close()) - - this.connections = newConnections - this.size = this.connections.length - - return this - } - - /** - * Transforms the nodes objects to a host object. - * - * @param {object} nodes - * @returns {array} hosts - */ - nodesToHost (nodes, protocol) { - const ids = Object.keys(nodes) - const hosts = [] - - for (let i = 0, len = ids.length; i < len; i++) { - const node = nodes[ids[i]] - // If there is no protocol in - // the `publish_address` new URL will throw - // the publish_address can have two forms: - // - ip:port - // - hostname/ip:port - // if we encounter the second case, we should - // use the hostname instead of the ip - let address = node.http.publish_address - const parts = address.split('/') - // the url is in the form of hostname/ip:port - if (parts.length > 1) { - const hostname = parts[0] - const port = parts[1].match(/((?::))(?:[0-9]+)$/g)[0].slice(1) - address = `${hostname}:${port}` - } - - address = address.slice(0, 4) === 'http' - /* istanbul ignore next */ - ? address - : `${protocol}//${address}` - const roles = node.roles.reduce((acc, role) => { - acc[role] = true - return acc - }, {}) - - hosts.push({ - url: new URL(address), - id: ids[i], - roles: Object.assign({ - [Connection.roles.MASTER]: false, - [Connection.roles.DATA]: false, - [Connection.roles.INGEST]: false, - [Connection.roles.ML]: false - }, roles) - }) - } - - return hosts - } - - /** - * Transforms an url string to a host object - * - * @param {string} url - * @returns {object} host - */ - urlToHost (url) { - return { - url: new URL(url) - } - } -} - -module.exports = BaseConnectionPool diff --git a/lib/pool/CloudConnectionPool.js b/lib/pool/CloudConnectionPool.js deleted file mode 100644 index 6f68f6149..000000000 --- a/lib/pool/CloudConnectionPool.js +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const BaseConnectionPool = require('./BaseConnectionPool') - -class CloudConnectionPool extends BaseConnectionPool { - constructor (opts) { - super(opts) - this.cloudConnection = null - } - - /** - * Returns the only cloud connection. - * - * @returns {object} connection - */ - getConnection () { - return this.cloudConnection - } - - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty (callback) { - super.empty(() => { - this.cloudConnection = null - callback() - }) - } - - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update (connections) { - super.update(connections) - this.cloudConnection = this.connections[0] - return this - } -} - -module.exports = CloudConnectionPool diff --git a/lib/pool/ConnectionPool.js b/lib/pool/ConnectionPool.js deleted file mode 100644 index 6fd6fc7db..000000000 --- a/lib/pool/ConnectionPool.js +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const BaseConnectionPool = require('./BaseConnectionPool') -const assert = require('assert') -const debug = require('debug')('elasticsearch') -const Connection = require('../Connection') -const noop = () => {} - -class ConnectionPool extends BaseConnectionPool { - constructor (opts) { - super(opts) - - this.dead = [] - // the resurrect timeout is 60s - this.resurrectTimeout = 1000 * 60 - // number of consecutive failures after which - // the timeout doesn't increase - this.resurrectTimeoutCutoff = 5 - this.pingTimeout = opts.pingTimeout - this._sniffEnabled = opts.sniffEnabled || false - - const resurrectStrategy = opts.resurrectStrategy || 'ping' - this.resurrectStrategy = ConnectionPool.resurrectStrategies[resurrectStrategy] - assert( - this.resurrectStrategy != null, - `Invalid resurrection strategy: '${resurrectStrategy}'` - ) - } - - /** - * Marks a connection as 'alive'. - * If needed removes the connection from the dead list - * and then resets the `deadCount`. - * - * @param {object} connection - */ - markAlive (connection) { - const { id } = connection - debug(`Marking as 'alive' connection '${id}'`) - const index = this.dead.indexOf(id) - if (index > -1) this.dead.splice(index, 1) - connection.status = Connection.statuses.ALIVE - connection.deadCount = 0 - connection.resurrectTimeout = 0 - return this - } - - /** - * Marks a connection as 'dead'. - * If needed adds the connection to the dead list - * and then increments the `deadCount`. - * - * @param {object} connection - */ - markDead (connection) { - const { id } = connection - debug(`Marking as 'dead' connection '${id}'`) - if (this.dead.indexOf(id) === -1) { - // It might happen that `markDead` is called jsut after - // a pool update, and in such case we will add to the dead - // list a node that no longer exist. The following check verify - // that the connection is still part of the pool before - // marking it as dead. - for (let i = 0; i < this.size; i++) { - if (this.connections[i].id === id) { - this.dead.push(id) - break - } - } - } - connection.status = Connection.statuses.DEAD - connection.deadCount++ - // resurrectTimeout formula: - // `resurrectTimeout * 2 ** min(deadCount - 1, resurrectTimeoutCutoff)` - connection.resurrectTimeout = Date.now() + this.resurrectTimeout * Math.pow( - 2, Math.min(connection.deadCount - 1, this.resurrectTimeoutCutoff) - ) - - // sort the dead list in ascending order - // based on the resurrectTimeout - this.dead.sort((a, b) => { - const conn1 = this.connections.find(c => c.id === a) - const conn2 = this.connections.find(c => c.id === b) - return conn1.resurrectTimeout - conn2.resurrectTimeout - }) - - return this - } - - /** - * If enabled, tries to resurrect a connection with the given - * resurrect strategy ('ping', 'optimistic', 'none'). - * - * @param {object} { now, requestId } - * @param {function} callback (isAlive, connection) - */ - resurrect (opts, callback = noop) { - if (this.resurrectStrategy === 0 || this.dead.length === 0) { - debug('Nothing to resurrect') - callback(null, null) - return - } - - // the dead list is sorted in ascending order based on the timeout - // so the first element will always be the one with the smaller timeout - const connection = this.connections.find(c => c.id === this.dead[0]) - if ((opts.now || Date.now()) < connection.resurrectTimeout) { - debug('Nothing to resurrect') - callback(null, null) - return - } - - const { id } = connection - - // ping strategy - if (this.resurrectStrategy === 1) { - connection.request({ - method: 'HEAD', - path: '/', - timeout: this.pingTimeout - }, (err, response) => { - let isAlive = true - const statusCode = response !== null ? response.statusCode : 0 - if (err != null || - (statusCode === 502 || statusCode === 503 || statusCode === 504)) { - debug(`Resurrect: connection '${id}' is still dead`) - this.markDead(connection) - isAlive = false - } else { - debug(`Resurrect: connection '${id}' is now alive`) - this.markAlive(connection) - } - this.emit('resurrect', null, { - strategy: 'ping', - name: opts.name, - request: { id: opts.requestId }, - isAlive, - connection - }) - callback(isAlive, connection) - }) - // optimistic strategy - } else { - debug(`Resurrect: optimistic resurrection for connection '${id}'`) - this.dead.splice(this.dead.indexOf(id), 1) - connection.status = Connection.statuses.ALIVE - this.emit('resurrect', null, { - strategy: 'optimistic', - name: opts.name, - request: { id: opts.requestId }, - isAlive: true, - connection - }) - callback(true, connection) // eslint-disable-line - } - } - - /** - * Returns an alive connection if present, - * otherwise returns a dead connection. - * By default it filters the `master` only nodes. - * It uses the selector to choose which - * connection return. - * - * @param {object} options (filter and selector) - * @returns {object|null} connection - */ - getConnection (opts = {}) { - const filter = opts.filter || (() => true) - const selector = opts.selector || (c => c[0]) - - this.resurrect({ - now: opts.now, - requestId: opts.requestId, - name: opts.name - }) - - const noAliveConnections = this.size === this.dead.length - - // TODO: can we cache this? - const connections = [] - for (let i = 0; i < this.size; i++) { - const connection = this.connections[i] - if (noAliveConnections || connection.status === Connection.statuses.ALIVE) { - if (filter(connection) === true) { - connections.push(connection) - } - } - } - - if (connections.length === 0) return null - - return selector(connections) - } - - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty (callback) { - super.empty(() => { - this.dead = [] - callback() - }) - } - - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update (connections) { - super.update(connections) - this.dead = [] - return this - } -} - -ConnectionPool.resurrectStrategies = { - none: 0, - ping: 1, - optimistic: 2 -} - -module.exports = ConnectionPool diff --git a/lib/pool/index.d.ts b/lib/pool/index.d.ts deleted file mode 100644 index 7b3f62f94..000000000 --- a/lib/pool/index.d.ts +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/// - -import { URL } from 'url' -import { SecureContextOptions } from 'tls'; -import Connection, { AgentOptions } from '../Connection'; -import { nodeFilterFn, nodeSelectorFn } from '../Transport'; - -interface BaseConnectionPoolOptions { - ssl?: SecureContextOptions; - agent?: AgentOptions; - proxy?: string | URL; - auth?: BasicAuth | ApiKeyAuth; - emit: (event: string | symbol, ...args: any[]) => boolean; - Connection: typeof Connection; - caFingerprint?: string; -} - -interface ConnectionPoolOptions extends BaseConnectionPoolOptions { - pingTimeout?: number; - resurrectStrategy?: 'ping' | 'optimistic' | 'none'; - sniffEnabled?: boolean; -} - -interface getConnectionOptions { - filter?: nodeFilterFn; - selector?: nodeSelectorFn; - requestId?: string | number; - name?: string; - now?: number; -} - -interface ApiKeyAuth { - apiKey: - | string - | { - id: string; - api_key: string; - } -} - -interface BasicAuth { - username: string; - password: string; -} - -interface BearerAuth { - bearer: string -} - -interface resurrectOptions { - now?: number; - requestId: string; - name: string; -} - -interface ResurrectEvent { - strategy: string; - isAlive: boolean; - connection: Connection; - name: string; - request: { - id: any; - }; -} - - -declare class BaseConnectionPool { - connections: Connection[]; - size: number; - emit: (event: string | symbol, ...args: any[]) => boolean; - _ssl: SecureContextOptions | null; - _agent: AgentOptions | null; - _proxy: string | URL; - auth: BasicAuth | ApiKeyAuth; - Connection: typeof Connection; - constructor(opts?: BaseConnectionPoolOptions); - /** - * Marks a connection as 'alive'. - * If needed removes the connection from the dead list - * and then resets the `deadCount`. - * - * @param {object} connection - */ - markAlive(connection: Connection): this; - /** - * Marks a connection as 'dead'. - * If needed adds the connection to the dead list - * and then increments the `deadCount`. - * - * @param {object} connection - */ - markDead(connection: Connection): this; - /** - * Returns an alive connection if present, - * otherwise returns a dead connection. - * By default it filters the `master` only nodes. - * It uses the selector to choose which - * connection return. - * - * @param {object} options (filter and selector) - * @returns {object|null} connection - */ - getConnection(opts?: getConnectionOptions): Connection | null; - /** - * Adds a new connection to the pool. - * - * @param {object|string} host - * @returns {ConnectionPool} - */ - addConnection(opts: any): Connection; - /** - * Removes a new connection to the pool. - * - * @param {object} connection - * @returns {ConnectionPool} - */ - removeConnection(connection: Connection): this; - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty(): this; - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update(connections: any[]): this; - /** - * Transforms the nodes objects to a host object. - * - * @param {object} nodes - * @returns {array} hosts - */ - nodesToHost(nodes: any, protocol: string): any[]; - /** - * Transforms an url string to a host object - * - * @param {string} url - * @returns {object} host - */ - urlToHost(url: string): { url: URL }; -} - -declare class ConnectionPool extends BaseConnectionPool { - static resurrectStrategies: { - none: number; - ping: number; - optimistic: number; - }; - dead: string[]; - _sniffEnabled: boolean; - resurrectTimeout: number; - resurrectTimeoutCutoff: number; - pingTimeout: number; - resurrectStrategy: number; - constructor(opts?: ConnectionPoolOptions); - - /** - * If enabled, tries to resurrect a connection with the given - * resurrect strategy ('ping', 'optimistic', 'none'). - * - * @param {object} { now, requestId, name } - * @param {function} callback (isAlive, connection) - */ - resurrect(opts: resurrectOptions, callback?: (isAlive: boolean | null, connection: Connection | null) => void): void; -} - -declare class CloudConnectionPool extends BaseConnectionPool { - cloudConnection: Connection | null - constructor(opts?: BaseConnectionPoolOptions); - getConnection(): Connection | null; -} - -declare function defaultNodeFilter(node: Connection): boolean; -declare function roundRobinSelector(): (connections: Connection[]) => Connection; -declare function randomSelector(connections: Connection[]): Connection; - -declare const internals: { - defaultNodeFilter: typeof defaultNodeFilter; - roundRobinSelector: typeof roundRobinSelector; - randomSelector: typeof randomSelector; -}; - -export { - // Interfaces - ConnectionPoolOptions, - getConnectionOptions, - ApiKeyAuth, - BasicAuth, - BearerAuth, - internals, - resurrectOptions, - ResurrectEvent, - // Classes - BaseConnectionPool, - ConnectionPool, - CloudConnectionPool -}; diff --git a/package.json b/package.json index abf24519d..94b29c66e 100644 --- a/package.json +++ b/package.json @@ -1,18 +1,26 @@ { "name": "@elastic/elasticsearch", + "version": "8.0.0", + "versionCanary": "8.0.0-canary.21", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", - "exports": { - ".": { - "require": "./index.js", - "import": "./index.mjs" - }, - "./": "./" + "scripts": { + "test": "npm run build && npm run lint && tap test/{unit,acceptance}/{*,**/*}.test.ts", + "test:unit": "npm run build && tap test/unit/{*,**/*}.test.ts", + "test:acceptance": "npm run build && tap test/acceptance/*.test.ts", + "test:coverage-100": "npm run build && tap test/{unit,acceptance}/{*,**/*}.test.ts --coverage --100", + "test:coverage-report": "npm run build && tap test/{unit,acceptance}/{*,**/*}.test.ts --coverage && nyc report --reporter=text-lcov > coverage.lcov", + "test:coverage-ui": "npm run build && tap test/{unit,acceptance}/{*,**/*}.test.ts --coverage --coverage-report=html", + "test:integration": "tsc && node test/integration/index.js", + "lint": "ts-standard src", + "lint:fix": "ts-standard --fix src", + "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'", + "prebuild": "npm run clean-build && npm run lint", + "build": "tsc", + "clean-build": "rimraf ./lib && mkdir lib", + "prepublishOnly": "npm run build" }, - "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", - "version": "8.0.0-SNAPSHOT.9f33e3c7", - "versionCanary": "8.0.0-canary.21", "keywords": [ "elasticsearch", "elastic", @@ -23,84 +31,63 @@ "client", "index" ], - "scripts": { - "test": "npm run lint && tap test/{unit,acceptance}/{*,**/*}.test.js && npm run test:types", - "test:unit": "tap test/unit/{*,**/*}.test.js", - "test:acceptance": "tap test/acceptance/*.test.js", - "test:integration": "node test/integration/index.js", - "test:integration:helpers": "tap test/integration/helpers/*.test.js", - "test:types": "tsd", - "test:coverage-100": "tap test/{unit,acceptance}/{*,**/*}.test.js --coverage --100 --nyc-arg=\"--exclude=api\"", - "test:coverage-report": "tap test/{unit,acceptance}/{*,**/*}.test.js --coverage --nyc-arg=\"--exclude=api\" && nyc report --reporter=text-lcov > coverage.lcov", - "test:coverage-ui": "tap test/{unit,acceptance}/{*,**/*}.test.js --coverage --coverage-report=html --nyc-arg=\"--exclude=api\"", - "lint": "standard", - "lint:fix": "standard --fix", - "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'", - "build-esm": "npx gen-esm-wrapper . index.mjs && standard --fix index.mjs" - }, "author": { "name": "Tomas Della Vedova", "company": "Elastic BV" }, - "original-author": { - "name": "Spencer Alger", - "company": "Elasticsearch BV" + "license": "Apache-2.0", + "repository": { + "type": "git", + "url": "https://github.com/elastic/elasticsearch-js.git" + }, + "bugs": { + "url": "https://github.com/elastic/elasticsearch-js/issues" + }, + "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", + "engines": { + "node": ">=12" }, "devDependencies": { "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", - "@types/node": "^15.3.1", - "convert-hrtime": "^5.0.0", + "@types/debug": "^4.1.6", + "@types/ms": "^0.7.31", + "@types/node": "^16.4.1", + "@types/sinonjs__fake-timers": "^6.0.3", + "@types/split2": "^3.2.1", + "@types/stoppable": "^1.1.1", + "@types/tap": "^15.0.5", "cross-zip": "^4.0.0", - "dedent": "^0.7.0", - "deepmerge": "^4.2.2", - "dezalgo": "^1.0.3", "fast-deep-equal": "^3.1.3", "into-stream": "^6.0.0", "js-yaml": "^4.1.0", "license-checker": "^25.0.1", "minimist": "^1.2.5", - "node-fetch": "^2.6.1", - "ora": "^5.4.0", - "pretty-hrtime": "^1.0.3", + "ms": "^2.1.3", + "node-abort-controller": "^2.0.0", + "node-fetch": "^2.6.2", + "ora": "^5.4.1", "proxy": "^1.0.2", "rimraf": "^3.0.2", "semver": "^7.3.5", - "simple-git": "^2.39.0", - "simple-statistics": "^7.7.0", "split2": "^3.2.2", "standard": "^16.0.3", "stoppable": "^1.1.0", "tap": "^15.0.9", - "tsd": "^0.15.1", + "ts-node": "^10.1.0", + "ts-standard": "^10.0.0", + "typescript": "^4.3.5", "workq": "^3.0.0", - "xmlbuilder2": "^2.4.1" + "xmlbuilder2": "^3.0.2" }, "dependencies": { - "debug": "^4.3.1", - "hpagent": "^0.1.1", - "ms": "^2.1.3", - "secure-json-parse": "^2.4.0" - }, - "license": "Apache-2.0", - "repository": { - "type": "git", - "url": "https://github.com/elastic/elasticsearch-js.git" - }, - "bugs": { - "url": "https://github.com/elastic/elasticsearch-js/issues" - }, - "engines": { - "node": ">=12" - }, - "tsd": { - "directory": "test/types" + "@elastic/transport": "^0.0.6", + "tslib": "^2.3.0" }, "tap": { - "ts": false, + "ts": true, "jsx": false, "flow": false, "coverage": false, - "jobs-auto": true, "check-coverage": false } } diff --git a/scripts/release-canary.js b/scripts/release-canary.js index f9bda6c01..0f3e42c2b 100644 --- a/scripts/release-canary.js +++ b/scripts/release-canary.js @@ -40,7 +40,6 @@ async function release (opts) { packageJson.name = '@elastic/elasticsearch-canary' packageJson.version = newCanaryVersion packageJson.versionCanary = newCanaryVersion - packageJson.types = './api/new.d.ts' packageJson.commitHash = execSync('git log -1 --pretty=format:%h').toString() // update the package.json diff --git a/src/Client.ts b/src/Client.ts new file mode 100644 index 000000000..7c6e7768e --- /dev/null +++ b/src/Client.ts @@ -0,0 +1,337 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License") you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { ConnectionOptions as TlsConnectionOptions } from 'tls' +import { URL } from 'url' +import buffer from 'buffer' +import { + Transport, + UndiciConnection, + WeightedConnectionPool, + CloudConnectionPool, + Serializer, + Diagnostic, + errors, + BaseConnectionPool +} from '@elastic/transport' +import { + HttpAgentOptions, + UndiciAgentOptions, + agentFn, + nodeFilterFn, + nodeSelectorFn, + generateRequestIdFn, + BasicAuth, + ApiKeyAuth, + BearerAuth, + Context +} from '@elastic/transport/lib/types' +import BaseConnection, { prepareHeaders } from '@elastic/transport/lib/connection/BaseConnection' +import SniffingTransport from './SniffingTransport' +import Helpers from './Helpers' +import API from './api' + +const kChild = Symbol('elasticsearchjs-child') +const kInitialOptions = Symbol('elasticsearchjs-initial-options') +let clientVersion: string = require('../package.json').version // eslint-disable-line +/* istanbul ignore next */ +if (clientVersion.includes('-')) { + // clean prerelease + clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' +} +let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +/* istanbul ignore next */ +if (transportVersion.includes('-')) { + // clean prerelease + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} +const nodeVersion = process.versions.node + +interface NodeOptions { + url: URL + id?: string + agent?: HttpAgentOptions | UndiciAgentOptions + ssl?: TlsConnectionOptions + headers?: Record + roles?: { + master: boolean + data: boolean + ingest: boolean + ml: boolean + } +} + +export interface ClientOptions { + node?: string | string[] | NodeOptions | NodeOptions[] + nodes?: string | string[] | NodeOptions | NodeOptions[] + Connection?: typeof BaseConnection + ConnectionPool?: typeof BaseConnectionPool + Transport?: typeof Transport + Serializer?: typeof Serializer + maxRetries?: number + requestTimeout?: number + pingTimeout?: number + sniffInterval?: number | boolean + sniffOnStart?: boolean + sniffEndpoint?: string + sniffOnConnectionFault?: boolean + resurrectStrategy?: 'ping' | 'optimistic' | 'none' + compression?: boolean + tls?: TlsConnectionOptions + agent?: HttpAgentOptions | UndiciAgentOptions | agentFn | false + nodeFilter?: nodeFilterFn + nodeSelector?: nodeSelectorFn + headers?: Record + opaqueIdPrefix?: string + generateRequestId?: generateRequestIdFn + name?: string | symbol + auth?: BasicAuth | ApiKeyAuth | BearerAuth + context?: Context + proxy?: string | URL + enableMetaHeader?: boolean + cloud?: { + id: string + } + disablePrototypePoisoningProtection?: boolean | 'proto' | 'constructor' + caFingerprint?: string + maxResponseSize?: number + maxCompressedResponseSize?: number +} + +export default class Client extends API { + diagnostic: Diagnostic + name: string | symbol + connectionPool: BaseConnectionPool + transport: SniffingTransport + serializer: Serializer + helpers: Helpers + constructor (opts: ClientOptions) { + super() + // @ts-expect-error kChild symbol is for internal use only + if ((opts.cloud != null) && opts[kChild] === undefined) { + const { id } = opts.cloud + // the cloud id is `cluster-name:base64encodedurl` + // the url is a string divided by two '$', the first is the cloud url + // the second the elasticsearch instance, the third the kibana instance + const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$') + + opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` + + // Cloud has better performances with compression enabled + // see https://github.com/elastic/elasticsearch-py/pull/704. + // So unless the user specifies otherwise, we enable compression. + if (opts.compression == null) opts.compression = true + if (opts.tls == null || + (opts.tls != null && opts.tls.secureProtocol == null)) { + opts.tls = opts.tls ?? {} + opts.tls.secureProtocol = 'TLSv1_2_method' + } + } + + if (opts.node == null && opts.nodes == null) { + throw new errors.ConfigurationError('Missing node(s) option') + } + + // @ts-expect-error kChild symbol is for internal use only + if (opts[kChild] === undefined) { + const checkAuth = getAuth(opts.node ?? opts.nodes) + if ((checkAuth != null) && checkAuth.username !== '' && checkAuth.password !== '') { + opts.auth = Object.assign({}, opts.auth, { username: checkAuth.username, password: checkAuth.password }) + } + } + + const options: Required = Object.assign({}, { + Connection: UndiciConnection, + Transport: SniffingTransport, + Serializer, + ConnectionPool: (opts.cloud != null) ? CloudConnectionPool : WeightedConnectionPool, + maxRetries: 3, + requestTimeout: 30000, + pingTimeout: 3000, + sniffInterval: false, + sniffOnStart: false, + sniffEndpoint: '_nodes/_all/http', + sniffOnConnectionFault: false, + resurrectStrategy: 'ping', + compression: false, + tls: null, + caFingerprint: null, + agent: null, + headers: {}, + nodeFilter: null, + generateRequestId: null, + name: 'elasticsearch-js', + auth: null, + opaqueIdPrefix: null, + context: null, + proxy: null, + enableMetaHeader: true, + maxResponseSize: null, + maxCompressedResponseSize: null + }, opts) + + if (options.caFingerprint !== null && isHttpConnection(opts.node ?? opts.nodes)) { + throw new errors.ConfigurationError('You can\'t configure the caFingerprint with a http connection') + } + + if (options.maxResponseSize !== null && options.maxResponseSize > buffer.constants.MAX_STRING_LENGTH) { + throw new errors.ConfigurationError(`The maxResponseSize cannot be bigger than ${buffer.constants.MAX_STRING_LENGTH}`) + } + + if (options.maxCompressedResponseSize !== null && options.maxCompressedResponseSize > buffer.constants.MAX_LENGTH) { + throw new errors.ConfigurationError(`The maxCompressedResponseSize cannot be bigger than ${buffer.constants.MAX_LENGTH}`) + } + + if (options.enableMetaHeader) { + options.headers['x-elastic-client-meta'] = `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}` + } + + this.name = options.name + // @ts-expect-error kInitialOptions symbol is for internal use only + this[kInitialOptions] = options + + // @ts-expect-error kChild symbol is for internal use only + if (opts[kChild] !== undefined) { + // @ts-expect-error kChild symbol is for internal use only + this.serializer = opts[kChild].serializer + // @ts-expect-error kChild symbol is for internal use only + this.connectionPool = opts[kChild].connectionPool + // @ts-expect-error kChild symbol is for internal use only + this.diagnostic = opts[kChild].diagnostic + } else { + this.diagnostic = new Diagnostic() + this.serializer = new options.Serializer() + this.connectionPool = new options.ConnectionPool({ + pingTimeout: options.pingTimeout, + resurrectStrategy: options.resurrectStrategy, + tls: options.tls, + agent: options.agent, + proxy: options.proxy, + Connection: options.Connection, + auth: options.auth, + diagnostic: this.diagnostic, + caFingerprint: options.caFingerprint + }) + this.connectionPool.addConnection(options.node ?? options.nodes) + } + + this.transport = new options.Transport({ + diagnostic: this.diagnostic, + connectionPool: this.connectionPool, + serializer: this.serializer, + maxRetries: options.maxRetries, + requestTimeout: options.requestTimeout, + sniffInterval: options.sniffInterval, + sniffOnStart: options.sniffOnStart, + sniffOnConnectionFault: options.sniffOnConnectionFault, + sniffEndpoint: options.sniffEndpoint, + compression: options.compression, + headers: options.headers, + nodeFilter: options.nodeFilter, + nodeSelector: options.nodeSelector, + generateRequestId: options.generateRequestId, + name: options.name, + opaqueIdPrefix: options.opaqueIdPrefix, + context: options.context, + productCheck: 'Elasticsearch', + maxResponseSize: options.maxResponseSize, + maxCompressedResponseSize: options.maxCompressedResponseSize + }) + + this.helpers = new Helpers({ + client: this, + metaHeader: options.enableMetaHeader + ? `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}` + : null, + maxRetries: options.maxRetries + }) + } + + child (opts: ClientOptions): Client { + // Merge the new options with the initial ones + // @ts-expect-error kChild symbol is for internal use only + const options: ClientOptions = Object.assign({}, this[kInitialOptions], opts) + // Pass to the child client the parent instances that cannot be overriden + // @ts-expect-error kInitialOptions symbol is for internal use only + options[kChild] = { + connectionPool: this.connectionPool, + serializer: this.serializer, + diagnostic: this.diagnostic, + initialOptions: options + } + + /* istanbul ignore else */ + if (options.auth !== undefined) { + options.headers = prepareHeaders(options.headers, options.auth) + } + + return new Client(options) + } + + async close (): Promise { + return await this.connectionPool.empty() + } +} + +function isHttpConnection (node?: string | string[] | NodeOptions | NodeOptions[]): boolean { + if (Array.isArray(node)) { + return node.some((n) => (typeof n === 'string' ? new URL(n).protocol : n.url.protocol) === 'http:') + } else { + if (node == null) return false + return (typeof node === 'string' ? new URL(node).protocol : node.url.protocol) === 'http:' + } +} + +function getAuth (node?: string | string[] | NodeOptions | NodeOptions[]): { username: string, password: string } | null { + if (Array.isArray(node)) { + for (const url of node) { + const auth = getUsernameAndPassword(url) + if (auth != null && auth.username !== '' && auth.password !== '') { + return auth + } + } + + return null + } else { + const auth = getUsernameAndPassword(node) + if (auth != null && auth.username !== '' && auth.password !== '') { + return auth + } + + return null + } + + function getUsernameAndPassword (node?: string | NodeOptions): { username: string, password: string } | null { + /* istanbul ignore else */ + if (typeof node === 'string') { + const { username, password } = new URL(node) + return { + username: decodeURIComponent(username), + password: decodeURIComponent(password) + } + } else if (node != null && node.url instanceof URL) { + return { + username: decodeURIComponent(node.url.username), + password: decodeURIComponent(node.url.password) + } + } else { + return null + } + } +} diff --git a/lib/Helpers.js b/src/Helpers.ts similarity index 57% rename from lib/Helpers.js rename to src/Helpers.ts index cd78c392d..cc36b9841 100644 --- a/lib/Helpers.js +++ b/src/Helpers.ts @@ -3,7 +3,7 @@ * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may + * the Apache License, Version 2.0 (the "License") you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -17,26 +17,123 @@ * under the License. */ -'use strict' +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/promise-function-async */ -/* eslint camelcase: 0 */ +import assert from 'assert' +import { promisify } from 'util' +import { Readable } from 'stream' +import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' +import Client from './Client' +import * as T from './api/types' -const { Readable } = require('stream') -const { promisify } = require('util') -const { ResponseError, ConfigurationError } = require('./errors') +export interface HelpersOptions { + client: Client + metaHeader: string | null + maxRetries: number +} -const pImmediate = promisify(setImmediate) +export interface ScrollSearchOptions extends TransportRequestOptions { + wait?: number +} + +export interface ScrollSearchResponse extends TransportResult, unknown> { + clear: () => Promise + documents: TDocument[] +} + +export interface MsearchHelperOptions extends T.MsearchRequest { + operations?: number + flushInterval?: number + concurrency?: number + retries?: number + wait?: number +} + +export interface MsearchHelper extends Promise { + stop: (error?: Error | null) => void + search: (header: T.MsearchHeader, body: T.MsearchBody) => Promise> +} + +export interface MsearchHelperResponse { + body: T.SearchResponse + documents: TDocument[] + status: number + responses: T.MsearchResponse +} + +export interface BulkStats { + total: number + failed: number + retry: number + successful: number + noop: number + time: number + bytes: number + aborted: boolean +} + +interface IndexAction { + index: T.BulkIndexOperation +} + +interface CreateAction { + create: T.BulkCreateOperation +} + +interface UpdateActionOperation { + update: T.BulkUpdateOperation +} + +interface DeleteAction { + delete: T.BulkDeleteOperation +} + +type UpdateAction = [UpdateActionOperation, Record] +type Action = IndexAction | CreateAction | UpdateAction | DeleteAction + +export interface OnDropDocument { + status: number + operation: Action + error: T.ErrorCause | null + document: TDocument + retried: boolean +} + +export interface BulkHelperOptions extends T.BulkRequest { + datasource: TDocument[] | Buffer | Readable | AsyncIterator + onDocument: (doc: TDocument) => Action + flushBytes?: number + flushInterval?: number + concurrency?: number + retries?: number + wait?: number + onDrop?: (doc: OnDropDocument) => void + refreshOnCompletion?: boolean | string +} + +export interface BulkHelper extends Promise { + abort: () => BulkHelper + readonly stats: BulkStats +} + +const { ResponseError, ConfigurationError } = errors const sleep = promisify(setTimeout) +const pImmediate = promisify(setImmediate) +/* istanbul ignore next */ +const noop = (): void => {} const kClient = Symbol('elasticsearch-client') const kMetaHeader = Symbol('meta header') -/* istanbul ignore next */ -const noop = () => {} +const kMaxRetries = Symbol('max retries') -class Helpers { - constructor (opts) { +export default class Helpers { + [kClient]: Client + [kMetaHeader]: string | null + [kMaxRetries]: number + constructor (opts: HelpersOptions) { this[kClient] = opts.client this[kMetaHeader] = opts.metaHeader - this.maxRetries = opts.maxRetries + this[kMaxRetries] = opts.maxRetries } /** @@ -48,11 +145,12 @@ class Helpers { * @param {object} options - The client optional configuration for this request. * @return {array} The documents that matched the request. */ - async search (params, options) { + async search (params: T.SearchRequest, options: TransportRequestOptions = {}): Promise { appendFilterPath('hits.hits._source', params, true) - const { body } = await this[kClient].search(params, options) - if (body.hits && body.hits.hits) { - return body.hits.hits.map(d => d._source) + options.meta = true + const { body: result } = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) + if (result.hits?.hits != null) { + return result.hits.hits.map(d => d._source as TDocument) } return [] } @@ -72,71 +170,77 @@ class Helpers { * @param {object} options - The client optional configuration for this request. * @return {iterator} the async iterator */ - async * scrollSearch (params, options = {}) { + async * scrollSearch (params: T.SearchRequest, options: ScrollSearchOptions = {}): AsyncIterable> { + options.meta = true if (this[kMetaHeader] !== null) { - options.headers = options.headers || {} - options.headers['x-elastic-client-meta'] = this[kMetaHeader] + ',h=s' + options.headers = options.headers ?? {} + options.headers['x-elastic-client-meta'] = `${this[kMetaHeader] as string},h=s` } - // TODO: study scroll search slices - const wait = options.wait || 5000 - const maxRetries = options.maxRetries || this.maxRetries + const wait = options.wait ?? 5000 + const maxRetries = options.maxRetries ?? this[kMaxRetries] if (Array.isArray(options.ignore)) { options.ignore.push(429) } else { options.ignore = [429] } - params.scroll = params.scroll || '1m' + params.scroll = params.scroll ?? '1m' appendFilterPath('_scroll_id', params, false) - const { method, body, index, ...querystring } = params - let response = null + let response: TransportResult, unknown> | undefined for (let i = 0; i <= maxRetries; i++) { - response = await this[kClient].search(params, options) + response = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) if (response.statusCode !== 429) break await sleep(wait) } + assert(response !== undefined, 'The response is undefined, please file a bug report') if (response.statusCode === 429) { + // @ts-expect-error throw new ResponseError(response) } let scroll_id = response.body._scroll_id let stop = false - const clear = async () => { + const clear = async (): Promise => { stop = true await this[kClient].clearScroll( - { body: { scroll_id } }, + { scroll_id }, { ignore: [400], ...options } ) } - while (response.body.hits && response.body.hits.hits.length > 0) { + while (response.body.hits != null && response.body.hits.hits.length > 0) { // scroll id is always present in the response, but it might // change over time based on the number of shards scroll_id = response.body._scroll_id + // @ts-expect-error response.clear = clear - addDocumentsGetter(response) + addDocumentsGetter(response) + // @ts-expect-error yield response - if (stop === true) { + if (stop) { break } for (let i = 0; i <= maxRetries; i++) { - response = await this[kClient].scroll({ - scroll: querystring.scroll, - rest_total_hits_as_int: querystring.rest_total_hits_as_int || querystring.restTotalHitsAsInt, - body: { scroll_id } - }, options) + const r = await this[kClient].scroll({ + scroll: params.scroll, + rest_total_hits_as_int: params.rest_total_hits_as_int, + scroll_id + }, options as TransportRequestOptionsWithMeta) + response = r as TransportResult, unknown> + assert(response !== undefined, 'The response is undefined, please file a bug report') if (response.statusCode !== 429) break await sleep(wait) } if (response.statusCode === 429) { + // @ts-expect-error throw new ResponseError(response) } } - if (stop === false) { + if (!stop) { await clear() } } @@ -156,9 +260,9 @@ class Helpers { * @param {object} options - The client optional configuration for this request. * @return {iterator} the async iterator */ - async * scrollDocuments (params, options) { + async * scrollDocuments (params: T.SearchRequest, options: ScrollSearchOptions = {}): AsyncIterable { appendFilterPath('hits.hits._source', params, true) - for await (const { documents } of this.scrollSearch(params, options)) { + for await (const { documents } of this.scrollSearch(params, options)) { for (const document of documents) { yield document } @@ -172,19 +276,20 @@ class Helpers { * @param {object} reqOptions - The client optional configuration for this request. * @return {object} The possible operations to run. */ - msearch (options = {}, reqOptions = {}) { + msearch (options: MsearchHelperOptions = {}, reqOptions: TransportRequestOptions = {}): MsearchHelper { const client = this[kClient] const { operations = 5, concurrency = 5, flushInterval = 500, - retries = this.maxRetries, + retries = this[kMaxRetries], wait = 5000, ...msearchOptions } = options + reqOptions.meta = true let stopReading = false - let stopError = null + let stopError: Error | null = null let timeoutRef = null const operationsStream = new Readable({ objectMode: true, @@ -192,15 +297,19 @@ class Helpers { }) const p = iterate() - const helper = { - then (onFulfilled, onRejected) { + const helper: MsearchHelper = { + [Symbol.toStringTag]: 'Promise', + then (onFulfilled: any, onRejected?: any) { return p.then(onFulfilled, onRejected) }, - catch (onRejected) { + catch (onRejected: any) { return p.catch(onRejected) }, + finally (onFinally: any) { + return p.finally(onFinally) + }, stop (error = null) { - if (stopReading === true) return + if (stopReading) return stopReading = true stopError = error operationsStream.push(null) @@ -208,53 +317,45 @@ class Helpers { // TODO: support abort a single search? // NOTE: the validation checks are synchronous and the callback/promise will // be resolved in the same tick. We might want to fix this in the future. - search (header, body, callback) { - if (stopReading === true) { + search (header: T.MsearchHeader, body: T.MsearchBody): Promise> { + if (stopReading) { const error = stopError === null ? new ConfigurationError('The msearch processor has been stopped') : stopError - return callback ? callback(error, {}) : Promise.reject(error) + return Promise.reject(error) } if (!(typeof header === 'object' && header !== null && !Array.isArray(header))) { - const error = new ConfigurationError('The header should be an object') - return callback ? callback(error, {}) : Promise.reject(error) + return Promise.reject(new ConfigurationError('The header should be an object')) } if (!(typeof body === 'object' && body !== null && !Array.isArray(body))) { - const error = new ConfigurationError('The body should be an object') - return callback ? callback(error, {}) : Promise.reject(error) + return Promise.reject(new ConfigurationError('The body should be an object')) } - let promise = null - if (callback === undefined) { - let onFulfilled = null - let onRejected = null - promise = new Promise((resolve, reject) => { - onFulfilled = resolve - onRejected = reject - }) - callback = function callback (err, result) { - err ? onRejected(err) : onFulfilled(result) - } + let onFulfilled: any = null + let onRejected: any = null + const promise = new Promise>((resolve, reject) => { + onFulfilled = resolve + onRejected = reject + }) + const callback = function callback (err: Error | null, result: T.MsearchResponse): void { + err !== null ? onRejected(err) : onFulfilled(result) } operationsStream.push([header, body, callback]) - - if (promise !== null) { - return promise - } + return promise } } return helper - async function iterate () { + async function iterate (): Promise { const { semaphore, finish } = buildSemaphore() - const msearchBody = [] - const callbacks = [] + const msearchBody: Array = [] + const callbacks: any[] = [] let loadedOperations = 0 - timeoutRef = setTimeout(onFlushTimeout, flushInterval) + timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line for await (const operation of operationsStream) { timeoutRef.refresh() @@ -284,7 +385,7 @@ class Helpers { throw stopError } - async function onFlushTimeout () { + async function onFlushTimeout (): Promise { if (loadedOperations === 0) return const msearchBodyCopy = msearchBody.slice() const callbacksCopy = callbacks.slice() @@ -296,6 +397,7 @@ class Helpers { send(msearchBodyCopy, callbacksCopy) } catch (err) { /* istanbul ignore next */ + // @ts-expect-error helper.stop(err) } } @@ -312,14 +414,14 @@ class Helpers { // to send the actual msearch request. // It also returns a finish function, which returns a promise that is resolved // when there are no longer request running. - function buildSemaphore () { - let resolveSemaphore = null - let resolveFinish = null + function buildSemaphore (): { semaphore: () => Promise, finish: () => Promise } { + let resolveSemaphore: ((value?: any) => void) | null = null + let resolveFinish: ((value?: any) => void) | null = null let running = 0 return { semaphore, finish } - function finish () { + function finish (): Promise { return new Promise((resolve, reject) => { if (running === 0) { resolve() @@ -329,7 +431,7 @@ class Helpers { }) } - function semaphore () { + function semaphore (): Promise { if (running < concurrency) { running += 1 return pImmediate(send) @@ -340,25 +442,25 @@ class Helpers { } } - function send (msearchBody, callbacks) { + function send (msearchBody: Array, callbacks: any[]): void { /* istanbul ignore if */ if (running > concurrency) { throw new Error('Max concurrency reached') } msearchOperation(msearchBody, callbacks, () => { running -= 1 - if (resolveSemaphore) { + if (resolveSemaphore !== null) { running += 1 resolveSemaphore(send) resolveSemaphore = null - } else if (resolveFinish && running === 0) { + } else if (resolveFinish != null && running === 0) { resolveFinish() } }) } } - function msearchOperation (msearchBody, callbacks, done) { + function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { let retryCount = retries // Instead of going full on async-await, which would make the code easier to read, @@ -366,7 +468,7 @@ class Helpers { // This because every time we use async await, V8 will create multiple promises // behind the scenes, making the code slightly slower. tryMsearch(msearchBody, callbacks, retrySearch) - function retrySearch (msearchBody, callbacks) { + function retrySearch (msearchBody: Array, callbacks: any[]): void { if (msearchBody.length > 0 && retryCount > 0) { retryCount -= 1 setTimeout(tryMsearch, wait, msearchBody, callbacks, retrySearch) @@ -378,36 +480,38 @@ class Helpers { // This function never returns an error, if the msearch operation fails, // the error is dispatched to all search executors. - function tryMsearch (msearchBody, callbacks, done) { - client.msearch(Object.assign({}, msearchOptions, { body: msearchBody }), reqOptions, (err, results) => { - const retryBody = [] - const retryCallbacks = [] - if (err) { - addDocumentsGetter(results) - for (const callback of callbacks) { - callback(err, results) - } - return done(retryBody, retryCallbacks) - } - const { responses } = results.body - for (let i = 0, len = responses.length; i < len; i++) { - const response = responses[i] - if (response.status === 429 && retryCount > 0) { - retryBody.push(msearchBody[i * 2]) - retryBody.push(msearchBody[(i * 2) + 1]) - retryCallbacks.push(callbacks[i]) - continue + function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { + client.msearch(Object.assign({}, msearchOptions, { body: msearchBody }), reqOptions as TransportRequestOptionsWithMeta) + .then(results => { + const retryBody = [] + const retryCallbacks = [] + const { responses } = results.body + for (let i = 0, len = responses.length; i < len; i++) { + const response = responses[i] + if (response.status === 429 && retryCount > 0) { + retryBody.push(msearchBody[i * 2]) + retryBody.push(msearchBody[(i * 2) + 1]) + retryCallbacks.push(callbacks[i]) + continue + } + const result = { ...results, body: response } + // @ts-expect-error + addDocumentsGetter(result) + if (response.status != null && response.status >= 400) { + // @ts-expect-error + callbacks[i](new ResponseError(result), result) + } else { + callbacks[i](null, result) + } } - const result = { ...results, body: response } - addDocumentsGetter(result) - if (response.status >= 400) { - callbacks[i](new ResponseError(result), result) - } else { - callbacks[i](null, result) + done(retryBody, retryCallbacks) + }) + .catch(err => { + for (const callback of callbacks) { + callback(err, null) } - } - done(retryBody, retryCallbacks) - }) + return done([], []) + }) } } } @@ -419,20 +523,21 @@ class Helpers { * @param {object} reqOptions - The client optional configuration for this request. * @return {object} The possible operations to run with the datasource. */ - bulk (options, reqOptions = {}) { + bulk (options: BulkHelperOptions, reqOptions: TransportRequestOptions = {}): BulkHelper { const client = this[kClient] const { serializer } = client if (this[kMetaHeader] !== null) { - reqOptions.headers = reqOptions.headers || {} - reqOptions.headers['x-elastic-client-meta'] = this[kMetaHeader] + ',h=bp' + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${this[kMetaHeader] as string},h=bp` } + reqOptions.meta = true const { datasource, onDocument, flushBytes = 5000000, flushInterval = 30000, concurrency = 5, - retries = this.maxRetries, + retries = this[kMaxRetries], wait = 5000, onDrop = noop, refreshOnCompletion = false, @@ -440,17 +545,20 @@ class Helpers { } = options if (datasource === undefined) { + // @ts-expect-error return Promise.reject(new ConfigurationError('bulk helper: the datasource is required')) } - if (!(Array.isArray(datasource) || Buffer.isBuffer(datasource) || typeof datasource.pipe === 'function' || datasource[Symbol.asyncIterator])) { + if (!(Array.isArray(datasource) || Buffer.isBuffer(datasource) || isReadableStream(datasource) || isAsyncIterator(datasource))) { + // @ts-expect-error return Promise.reject(new ConfigurationError('bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator')) } if (onDocument === undefined) { + // @ts-expect-error return Promise.reject(new ConfigurationError('bulk helper: the onDocument callback is required')) } let shouldAbort = false - let timeoutRef = null + let timeoutRef: any = null const stats = { total: 0, failed: 0, @@ -463,16 +571,20 @@ class Helpers { } const p = iterate() - const helper = { - get stats () { - return stats - }, - then (onFulfilled, onRejected) { + const helper: BulkHelper = { + [Symbol.toStringTag]: 'Promise', + then (onFulfilled: any, onRejected?: any) { return p.then(onFulfilled, onRejected) }, - catch (onRejected) { + catch (onRejected: any) { return p.catch(onRejected) }, + finally (onFinally: any) { + return p.finally(onFinally) + }, + get stats () { + return stats + }, abort () { clearTimeout(timeoutRef) shouldAbort = true @@ -492,17 +604,18 @@ class Helpers { * It creates an array of strings instead of a ndjson string because the bulkOperation * will navigate the body for matching failed operations with the original document. */ - async function iterate () { + async function iterate (): Promise { const { semaphore, finish } = buildSemaphore() const startTime = Date.now() - const bulkBody = [] + const bulkBody: string[] = [] let actionBody = '' let payloadBody = '' let chunkBytes = 0 - timeoutRef = setTimeout(onFlushTimeout, flushInterval) + timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line + // @ts-expect-error datasoruce is an iterable for await (const chunk of datasource) { - if (shouldAbort === true) break + if (shouldAbort) break timeoutRef.refresh() const action = onDocument(chunk) const operation = Array.isArray(action) @@ -514,9 +627,11 @@ class Helpers { chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) bulkBody.push(actionBody, payloadBody) } else if (operation === 'update') { + // @ts-expect-error in case of update action is an array actionBody = serializer.serialize(action[0]) payloadBody = typeof chunk === 'string' ? `{"doc":${chunk}}` + // @ts-expect-error in case of update action is an array : serializer.serialize({ doc: chunk, ...action[1] }) chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) bulkBody.push(actionBody, payloadBody) @@ -541,7 +656,7 @@ class Helpers { clearTimeout(timeoutRef) // In some cases the previos http call does not have finished, // or we didn't reach the flush bytes threshold, so we force one last operation. - if (shouldAbort === false && chunkBytes > 0) { + if (!shouldAbort && chunkBytes > 0) { const send = await semaphore() stats.bytes += chunkBytes send(bulkBody) @@ -549,7 +664,7 @@ class Helpers { await finish() - if (refreshOnCompletion) { + if (refreshOnCompletion !== false) { await client.indices.refresh({ index: typeof refreshOnCompletion === 'string' ? refreshOnCompletion @@ -562,7 +677,7 @@ class Helpers { return stats - async function onFlushTimeout () { + async function onFlushTimeout (): Promise { if (chunkBytes === 0) return stats.bytes += chunkBytes const bulkBodyCopy = bulkBody.slice() @@ -571,9 +686,9 @@ class Helpers { try { const send = await semaphore() send(bulkBodyCopy) - } catch (err) { + } catch (err: any) { /* istanbul ignore next */ - helper.abort() + helper.abort() // eslint-disable-line } } } @@ -590,19 +705,19 @@ class Helpers { // It also returns a finish function, which returns a promise that is resolved // when there are no longer request running. It rejects an error if one // of the request has failed for some reason. - function buildSemaphore () { - let resolveSemaphore = null - let resolveFinish = null - let rejectFinish = null - let error = null + function buildSemaphore (): { semaphore: () => Promise, finish: () => Promise } { + let resolveSemaphore: ((value?: any) => void) | null = null + let resolveFinish: ((value?: any) => void) | null = null + let rejectFinish: ((value?: any) => void) | null = null + let error: Error | null = null let running = 0 return { semaphore, finish } - function finish () { + function finish (): Promise { return new Promise((resolve, reject) => { if (running === 0) { - if (error) { + if (error !== null) { reject(error) } else { resolve() @@ -614,7 +729,7 @@ class Helpers { }) } - function semaphore () { + function semaphore (): Promise { if (running < concurrency) { running += 1 return pImmediate(send) @@ -625,23 +740,23 @@ class Helpers { } } - function send (bulkBody) { + function send (bulkBody: string[]): void { /* istanbul ignore if */ if (running > concurrency) { throw new Error('Max concurrency reached') } bulkOperation(bulkBody, err => { running -= 1 - if (err) { + if (err != null) { shouldAbort = true error = err } - if (resolveSemaphore) { + if (resolveSemaphore !== null) { running += 1 resolveSemaphore(send) resolveSemaphore = null - } else if (resolveFinish && running === 0) { - if (error) { + } else if (resolveFinish != null && rejectFinish != null && running === 0) { + if (error != null) { rejectFinish(error) } else { resolveFinish() @@ -651,7 +766,7 @@ class Helpers { } } - function bulkOperation (bulkBody, callback) { + function bulkOperation (bulkBody: string[], callback: (err?: Error | null) => void): void { let retryCount = retries let isRetrying = false @@ -660,9 +775,9 @@ class Helpers { // This because every time we use async await, V8 will create multiple promises // behind the scenes, making the code slightly slower. tryBulk(bulkBody, retryDocuments) - function retryDocuments (err, bulkBody) { - if (err) return callback(err) - if (shouldAbort === true) return callback() + function retryDocuments (err: Error | null, bulkBody: string[]): void { + if (err != null) return callback(err) + if (shouldAbort) return callback() if (bulkBody.length > 0) { if (retryCount > 0) { @@ -690,55 +805,60 @@ class Helpers { callback() } - function tryBulk (bulkBody, callback) { - if (shouldAbort === true) return callback(null, []) - client.bulk(Object.assign({}, bulkOptions, { body: bulkBody }), reqOptions, (err, { body }) => { - if (err) return callback(err, null) - if (body.errors === false) { - stats.successful += body.items.length - for (const item of body.items) { - if (item.update && item.update.result === 'noop') { - stats.noop++ + function tryBulk (bulkBody: string[], callback: (err: Error | null, bulkBody: string[]) => void): void { + if (shouldAbort) return callback(null, []) + client.bulk(Object.assign({}, bulkOptions, { body: bulkBody }), reqOptions as TransportRequestOptionsWithMeta) + .then(response => { + const result = response.body + if (!result.errors) { + stats.successful += result.items.length + for (const item of result.items) { + if (item.update?.result === 'noop') { + stats.noop++ + } } + return callback(null, []) } - return callback(null, []) - } - const retry = [] - const { items } = body - for (let i = 0, len = items.length; i < len; i++) { - const action = items[i] - const operation = Object.keys(action)[0] - const { status } = action[operation] - const indexSlice = operation !== 'delete' ? i * 2 : i - - if (status >= 400) { - // 429 is the only staus code where we might want to retry - // a document, because it was not an error in the document itself, - // but the ES node were handling too many operations. - if (status === 429) { - retry.push(bulkBody[indexSlice]) - /* istanbul ignore next */ - if (operation !== 'delete') { - retry.push(bulkBody[indexSlice + 1]) + const retry = [] + const { items } = result + for (let i = 0, len = items.length; i < len; i++) { + const action = items[i] + const operation = Object.keys(action)[0] + const responseItem = action[operation as keyof T.BulkResponseItemContainer] + assert(responseItem !== undefined, 'The responseItem is undefined, please file a bug report') + const indexSlice = operation !== 'delete' ? i * 2 : i + + if (responseItem.status >= 400) { + // 429 is the only staus code where we might want to retry + // a document, because it was not an error in the document itself, + // but the ES node were handling too many operations. + if (responseItem.status === 429) { + retry.push(bulkBody[indexSlice]) + /* istanbul ignore next */ + if (operation !== 'delete') { + retry.push(bulkBody[indexSlice + 1]) + } + } else { + onDrop({ + status: responseItem.status, + error: responseItem.error ?? null, + operation: serializer.deserialize(bulkBody[indexSlice]), + document: operation !== 'delete' + ? serializer.deserialize(bulkBody[indexSlice + 1]) + : null, + retried: isRetrying + }) + stats.failed += 1 } } else { - onDrop({ - status: status, - error: action[operation].error, - operation: serializer.deserialize(bulkBody[indexSlice]), - document: operation !== 'delete' - ? serializer.deserialize(bulkBody[indexSlice + 1]) - : null, - retried: isRetrying - }) - stats.failed += 1 + stats.successful += 1 } - } else { - stats.successful += 1 } - } - callback(null, retry) - }) + callback(null, retry) + }) + .catch(err => { + callback(err, []) + }) } } } @@ -746,10 +866,11 @@ class Helpers { // Using a getter will improve the overall performances of the code, // as we will reed the documents only if needed. -function addDocumentsGetter (result) { +function addDocumentsGetter (result: TransportResult, unknown>): void { Object.defineProperty(result, 'documents', { get () { - if (this.body.hits && this.body.hits.hits) { + if (this.body.hits?.hits != null) { + // @ts-expect-error return this.body.hits.hits.map(d => d._source) } return [] @@ -757,14 +878,18 @@ function addDocumentsGetter (result) { }) } -function appendFilterPath (filter, params, force) { +function appendFilterPath (filter: string, params: Record, force: boolean): void { if (params.filter_path !== undefined) { - params.filter_path += ',' + filter - } else if (params.filterPath !== undefined) { - params.filterPath += ',' + filter - } else if (force === true) { + params.filter_path += ',' + filter // eslint-disable-line + } else if (force) { params.filter_path = filter } } -module.exports = Helpers +function isReadableStream (obj: any): obj is Readable { + return obj != null && typeof obj.pipe === 'function' +} + +function isAsyncIterator (obj: any): obj is AsyncIterator { + return obj?.[Symbol.asyncIterator] != null +} diff --git a/src/SniffingTransport.ts b/src/SniffingTransport.ts new file mode 100644 index 000000000..4b02038fb --- /dev/null +++ b/src/SniffingTransport.ts @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License") you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import assert from 'assert' +import { Transport, SniffOptions } from '@elastic/transport' + +export default class SniffingTransport extends Transport { + sniff (opts: SniffOptions): void { + if (this.isSniffing) return + this.isSniffing = true + + const request = { + method: 'GET', + path: this.sniffEndpoint ?? '/_nodes/_all/http' + } + + this.request(request, { id: opts.requestId, meta: true }) + .then(result => { + assert(isObject(result.body), 'The body should be an object') + this.isSniffing = false + const protocol = result.meta.connection?.url.protocol ?? /* istanbul ignore next */ 'http:' + const hosts = this.connectionPool.nodesToHost(result.body.nodes, protocol) + this.connectionPool.update(hosts) + + result.meta.sniff = { hosts, reason: opts.reason } + this.diagnostic.emit('sniff', null, result) + }) + .catch(err => { + this.isSniffing = false + err.meta.sniff = { hosts: [], reason: opts.reason } + this.diagnostic.emit('sniff', err, null) + }) + } +} + +function isObject (obj: any): obj is Record { + return typeof obj === 'object' +} diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts new file mode 100644 index 000000000..d071d3119 --- /dev/null +++ b/src/api/api/async_search.ts @@ -0,0 +1,165 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class AsyncSearch { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_async_search/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> + async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['keep_alive', 'typed_keys', 'wait_for_completion_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_async_search/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise> + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_async_search/status/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> + async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['batched_reduce_size', 'wait_for_completion_timeout', 'keep_on_completion', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_async_search` + } else { + method = 'POST' + path = '/_async_search' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts new file mode 100644 index 000000000..413d9ef75 --- /dev/null +++ b/src/api/api/autoscaling.ts @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Autoscaling { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_autoscaling/capacity' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'PUT' + const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts new file mode 100644 index 000000000..f41f1ca5d --- /dev/null +++ b/src/api/api/bulk.ts @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'type'] + const acceptedQuery: string[] = ['pipeline', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'timeout', 'type', 'wait_for_active_shards', 'require_alias', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.type != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/${encodeURIComponent(params.type.toString())}/_bulk` + } else if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_bulk` + } else { + method = 'POST' + path = '/_bulk' + } + return await this.transport.request({ path, method, querystring, bulkBody: body }, options) +} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts new file mode 100644 index 000000000..dbadd370e --- /dev/null +++ b/src/api/api/cat.ts @@ -0,0 +1,882 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Cat { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise + async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['expand_wildcards', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_cat/aliases/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_cat/aliases' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise + async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_cat/allocation/${encodeURIComponent(params.node_id.toString())}` + } else { + method = 'GET' + path = '/_cat/allocation' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise + async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/count/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/count' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise + async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['fields'] + const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.fields != null) { + method = 'GET' + path = `/_cat/fielddata/${encodeURIComponent(params.fields.toString())}` + } else { + method = 'GET' + path = '/_cat/fielddata' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['include_timestamp', 'ts', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/health' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> + async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise + async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise + async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['bytes', 'expand_wildcards', 'health', 'include_unloaded_segments', 'pri', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/indices/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/indices' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise + async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/master' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['allow_no_match', 'bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_cat/ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/data_frame/analytics' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['allow_no_datafeeds', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = 'GET' + path = `/_cat/ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/datafeeds' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['allow_no_jobs', 'bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'GET' + path = `/_cat/ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/anomaly_detectors' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = ['allow_no_match', 'bytes', 'from', 'size', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.model_id != null) { + method = 'GET' + path = `/_cat/ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/trained_models' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/nodeattrs' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise + async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['bytes', 'full_id', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/nodes' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/pending_tasks' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise + async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/plugins' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['active_only', 'bytes', 'detailed', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/recovery/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/recovery' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/repositories' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/segments/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/segments' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise + async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/shards/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/shards' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository'] + const acceptedQuery: string[] = ['ignore_unavailable', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.repository != null) { + method = 'GET' + path = `/_cat/snapshots/${encodeURIComponent(params.repository.toString())}` + } else { + method = 'GET' + path = '/_cat/snapshots' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise + async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['actions', 'detailed', 'node_id', 'parent_task', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/tasks' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise + async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_cat/templates/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_cat/templates' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> + async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['thread_pool_patterns'] + const acceptedQuery: string[] = ['size', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.thread_pool_patterns != null) { + method = 'GET' + path = `/_cat/thread_pool/${encodeURIComponent(params.thread_pool_patterns.toString())}` + } else { + method = 'GET' + path = '/_cat/thread_pool' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise + async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.transform_id != null) { + method = 'GET' + path = `/_cat/transforms/${encodeURIComponent(params.transform_id.toString())}` + } else { + method = 'GET' + path = '/_cat/transforms' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts new file mode 100644 index 000000000..644257e36 --- /dev/null +++ b/src/api/api/ccr.ts @@ -0,0 +1,418 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Ccr { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise + async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/follow` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/info` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/stats` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/forget_follower` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_ccr/auto_follow' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}/pause` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/pause_follow` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}/resume` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/resume_follow` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ccr/stats' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/unfollow` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts new file mode 100644 index 000000000..1db7bc45c --- /dev/null +++ b/src/api/api/clear_scroll.ts @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_search/scroll' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts new file mode 100644 index 000000000..677d953b2 --- /dev/null +++ b/src/api/api/close_point_in_time.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_pit' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts new file mode 100644 index 000000000..1656788f9 --- /dev/null +++ b/src/api/api/cluster.ts @@ -0,0 +1,508 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Cluster { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['include_disk_info', 'include_yes_decisions', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_cluster/allocation/explain' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_component_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['wait_for_removal', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_cluster/voting_config_exclusions' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['master_timeout', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/_component_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['flat_settings', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_component_template/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_component_template' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['flat_settings', 'include_defaults', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cluster/settings' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['expand_wildcards', 'level', 'local', 'master_timeout', 'timeout', 'wait_for_active_shards', 'wait_for_events', 'wait_for_nodes', 'wait_for_no_initializing_shards', 'wait_for_no_relocating_shards', 'wait_for_status', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cluster/health/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cluster/health' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cluster/pending_tasks' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['node_names', 'node_ids', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_cluster/voting_config_exclusions' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['create', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_component_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['flat_settings', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = '/_cluster/settings' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_remote/info' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['dry_run', 'explain', 'metric', 'retry_failed', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_cluster/reroute' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise + async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['metric', 'index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'local', 'master_timeout', 'wait_for_metadata_version', 'wait_for_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.metric != null && params.index != null) { + method = 'GET' + path = `/_cluster/state/${encodeURIComponent(params.metric.toString())}/${encodeURIComponent(params.index.toString())}` + } else if (params.metric != null) { + method = 'GET' + path = `/_cluster/state/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_cluster/state' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = ['flat_settings', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_cluster/stats/nodes/${encodeURIComponent(params.node_id.toString())}` + } else { + method = 'GET' + path = '/_cluster/stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/count.ts b/src/api/api/count.ts new file mode 100644 index 000000000..1b786e867 --- /dev/null +++ b/src/api/api/count.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise +export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'lenient', 'min_score', 'preference', 'routing', 'terminate_after', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_count` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_count' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/create.ts b/src/api/api/create.ts new file mode 100644 index 000000000..5f78ec9b9 --- /dev/null +++ b/src/api/api/create.ts @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise +export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedQuery: string[] = ['pipeline', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_create/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts new file mode 100644 index 000000000..585d13504 --- /dev/null +++ b/src/api/api/dangling_indices.ts @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class DanglingIndices { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index_uuid'] + const acceptedQuery: string[] = ['accept_data_loss', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_dangling/${encodeURIComponent(params.index_uuid.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index_uuid'] + const acceptedQuery: string[] = ['accept_data_loss', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_dangling/${encodeURIComponent(params.index_uuid.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_dangling' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts new file mode 100644 index 000000000..1a937b06b --- /dev/null +++ b/src/api/api/delete.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedQuery: string[] = ['if_primary_term', 'if_seq_no', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts new file mode 100644 index 000000000..96fa6681f --- /dev/null +++ b/src/api/api/delete_by_query.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'analyzer', 'analyze_wildcard', 'conflicts', 'default_operator', 'df', 'expand_wildcards', 'from', 'ignore_unavailable', 'lenient', 'preference', 'refresh', 'request_cache', 'requests_per_second', 'routing', 'q', 'scroll', 'scroll_size', 'search_timeout', 'search_type', 'size', 'slices', 'sort', '_source', '_source_excludes', '_source_includes', 'stats', 'terminate_after', 'timeout', 'version', 'wait_for_active_shards', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_delete_by_query` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts new file mode 100644 index 000000000..3ca74fd52 --- /dev/null +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_id'] + const acceptedQuery: string[] = ['requests_per_second', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_delete_by_query/${encodeURIComponent(params.task_id.toString())}/_rethrottle` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts new file mode 100644 index 000000000..3f603f40b --- /dev/null +++ b/src/api/api/delete_script.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_scripts/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts new file mode 100644 index 000000000..c3c575c35 --- /dev/null +++ b/src/api/api/enrich.ts @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Enrich { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}/_execute` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_enrich/policy' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_enrich/_stats' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts new file mode 100644 index 000000000..8a7d5ae9c --- /dev/null +++ b/src/api/api/eql.ts @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Eql { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_eql/search/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise> + async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['keep_alive', 'wait_for_completion_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_eql/search/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_eql/search/status/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_eql/search` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts new file mode 100644 index 000000000..b1c8d14cd --- /dev/null +++ b/src/api/api/exists.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index'] + const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts new file mode 100644 index 000000000..375ef4f3a --- /dev/null +++ b/src/api/api/exists_source.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/${encodeURIComponent(params.index.toString())}/_source/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts new file mode 100644 index 000000000..62e5f0198 --- /dev/null +++ b/src/api/api/explain.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index'] + const acceptedQuery: string[] = ['analyzer', 'analyze_wildcard', 'default_operator', 'df', 'lenient', 'preference', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_explain/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/features.ts b/src/api/api/features.ts new file mode 100644 index 000000000..f1b834ed8 --- /dev/null +++ b/src/api/api/features.ts @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Features { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_features' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_features/_reset' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts new file mode 100644 index 000000000..0fa9ca6bf --- /dev/null +++ b/src/api/api/field_caps.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'fields', 'ignore_unavailable', 'include_unmapped', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_field_caps` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_field_caps' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts new file mode 100644 index 000000000..44f1f2758 --- /dev/null +++ b/src/api/api/fleet.ts @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Fleet { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['wait_for_advance', 'wait_for_index', 'checkpoints', 'timeout'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_fleet/global_checkpoints` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/get.ts b/src/api/api/get.ts new file mode 100644 index 000000000..9344e573b --- /dev/null +++ b/src/api/api/get.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise> +export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index'] + const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts new file mode 100644 index 000000000..0e27e86d0 --- /dev/null +++ b/src/api/api/get_script.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_scripts/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts new file mode 100644 index 000000000..0e0bc0f7a --- /dev/null +++ b/src/api/api/get_script_context.ts @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_script_context' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts new file mode 100644 index 000000000..6d492458f --- /dev/null +++ b/src/api/api/get_script_languages.ts @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_script_language' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts new file mode 100644 index 000000000..60c6e7e55 --- /dev/null +++ b/src/api/api/get_source.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index'] + const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_source/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts new file mode 100644 index 000000000..38ec4fa0a --- /dev/null +++ b/src/api/api/graph.ts @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Graph { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise + async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['routing', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_graph/explore` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts new file mode 100644 index 000000000..eebf27269 --- /dev/null +++ b/src/api/api/ilm.ts @@ -0,0 +1,362 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Ilm { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['policy'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ilm/policy/${encodeURIComponent(params.policy.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['only_errors', 'only_managed', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_ilm/explain` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['policy'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.policy != null) { + method = 'GET' + path = `/_ilm/policy/${encodeURIComponent(params.policy.toString())}` + } else { + method = 'GET' + path = '/_ilm/policy' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ilm/status' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['dry_run'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ilm/migrate_to_data_tiers' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> + async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ilm/move/${encodeURIComponent(params.index.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ilm/policy/${encodeURIComponent(params.policy.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ilm/remove` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise + async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ilm/retry` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ilm/start' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ilm/stop' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/index.ts b/src/api/api/index.ts new file mode 100644 index 000000000..2800f42da --- /dev/null +++ b/src/api/api/index.ts @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise +export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index'] + const acceptedQuery: string[] = ['if_primary_term', 'if_seq_no', 'op_type', 'pipeline', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'require_alias', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.id != null) { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + } else { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_doc` + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts new file mode 100644 index 000000000..c51601c59 --- /dev/null +++ b/src/api/api/indices.ts @@ -0,0 +1,1701 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Indices { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'block'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_block/${encodeURIComponent(params.block.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_analyze` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_analyze' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'fielddata', 'fields', 'ignore_unavailable', 'query', 'request', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_cache/clear` + } else { + method = 'POST' + path = '/_cache/clear' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'target'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_clone/${encodeURIComponent(params.target.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise + async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_close` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['include_type_name', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_stats` + } else { + method = 'GET' + path = '/_data_stream/_stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'name'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'DELETE' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else { + method = 'DELETE' + path = `/${encodeURIComponent(params.index.toString())}/_aliases/${encodeURIComponent(params.name.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_index_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flush', 'ignore_unavailable', 'master_timeout', 'timeout', 'run_expensive_tasks', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_disk_usage` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise + async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/${encodeURIComponent(params.index.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name', 'index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'HEAD' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else { + method = 'HEAD' + path = `/_alias/${encodeURIComponent(params.name.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/_index_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['flat_settings', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptions): Promise + async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'type'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/${encodeURIComponent(params.index.toString())}/_mapping/${encodeURIComponent(params.type.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['fields', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_field_usage_stats` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise + async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'force', 'ignore_unavailable', 'wait_if_ongoing', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_flush` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_flush' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flush', 'ignore_unavailable', 'max_num_segments', 'only_expunge_deletes', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_forcemerge` + } else { + method = 'POST' + path = '/_forcemerge' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'include_type_name', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name', 'index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else if (params.name != null) { + method = 'GET' + path = `/_alias/${encodeURIComponent(params.name.toString())}` + } else if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_alias` + } else { + method = 'GET' + path = '/_alias' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_data_stream/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_data_stream' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['fields', 'index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'include_defaults', 'include_type_name', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.fields != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mapping/field/${encodeURIComponent(params.fields.toString())}` + } else { + method = 'GET' + path = `/_mapping/field/${encodeURIComponent(params.fields.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['local', 'flat_settings', 'include_type_name', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_index_template/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_index_template' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'include_type_name', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mapping` + } else { + method = 'GET' + path = '/_mapping' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'name'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_settings/${encodeURIComponent(params.name.toString())}` + } else if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_settings` + } else if (params.name != null) { + method = 'GET' + path = `/_settings/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_settings' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['flat_settings', 'include_type_name', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_template/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_template' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_data_stream/_migrate/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise + async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_open` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_data_stream/_promote/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'name'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_aliases/${encodeURIComponent(params.name.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_index_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'include_type_name', 'master_timeout', 'timeout', 'write_index_only', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_mapping` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'master_timeout', 'preserve_existing', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_settings` + } else { + method = 'PUT' + path = '/_settings' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['create', 'flat_settings', 'include_type_name', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_template/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['active_only', 'detailed', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_recovery` + } else { + method = 'GET' + path = '/_recovery' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> + async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_refresh` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_refresh' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_reload_search_analyzers` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_resolve/index/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise + async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['alias', 'new_index'] + const acceptedQuery: string[] = ['dry_run', 'include_type_name', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.alias != null && params.new_index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.alias.toString())}/_rollover/${encodeURIComponent(params.new_index.toString())}` + } else { + method = 'POST' + path = `/${encodeURIComponent(params.alias.toString())}/_rollover` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'verbose', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_segments` + } else { + method = 'GET' + path = '/_segments' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'status', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_shard_stores` + } else { + method = 'GET' + path = '/_shard_stores' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise + async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'target'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_shrink/${encodeURIComponent(params.target.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_index_template/_simulate_index/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise + async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['create', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'POST' + path = `/_index_template/_simulate/${encodeURIComponent(params.name.toString())}` + } else { + method = 'POST' + path = '/_index_template/_simulate' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> + async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise + async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'target'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_split/${encodeURIComponent(params.target.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['metric', 'index'] + const acceptedQuery: string[] = ['completion_fields', 'expand_wildcards', 'fielddata_fields', 'fields', 'forbid_closed_indices', 'groups', 'include_segment_file_sizes', 'include_unloaded_segments', 'level', 'types', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.metric != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_stats/${encodeURIComponent(params.metric.toString())}` + } else if (params.metric != null) { + method = 'GET' + path = `/_stats/${encodeURIComponent(params.metric.toString())}` + } else if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_stats` + } else { + method = 'GET' + path = '/_stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_aliases' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'type'] + const acceptedQuery: string[] = ['allow_no_indices', 'all_shards', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'expand_wildcards', 'explain', 'ignore_unavailable', 'lenient', 'rewrite', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_validate/query` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_validate/query' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/info.ts b/src/api/api/info.ts new file mode 100644 index 000000000..336e08268 --- /dev/null +++ b/src/api/api/info.ts @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts new file mode 100644 index 000000000..e9bc410fd --- /dev/null +++ b/src/api/api/ingest.ts @@ -0,0 +1,231 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Ingest { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ingest/geoip/stats' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['master_timeout', 'summary', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ingest/pipeline' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ingest/processor/grok' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise + async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['verbose', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}/_simulate` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ingest/pipeline/_simulate' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/license.ts b/src/api/api/license.ts new file mode 100644 index 000000000..1ab3ee8e2 --- /dev/null +++ b/src/api/api/license.ts @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class License { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_license' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['accept_enterprise', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_license' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_license/basic_status' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_license/trial_status' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['acknowledge', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = '/_license' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['acknowledge', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_license/start_basic' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['acknowledge', 'type_query_string', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_license/start_trial' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts new file mode 100644 index 000000000..7080312a4 --- /dev/null +++ b/src/api/api/logstash.ts @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Logstash { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'PUT' + const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts new file mode 100644 index 000000000..292c07b01 --- /dev/null +++ b/src/api/api/mget.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mget` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_mget' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts new file mode 100644 index 000000000..de7253afd --- /dev/null +++ b/src/api/api/migration.ts @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Migration { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_migration/deprecations` + } else { + method = 'GET' + path = '/_migration/deprecations' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts new file mode 100644 index 000000000..57596a6f2 --- /dev/null +++ b/src/api/api/ml.ts @@ -0,0 +1,2132 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Ml { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise + async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['allow_no_match', 'allow_no_jobs', 'force', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_close` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id', 'event_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events/${encodeURIComponent(params.event_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id', 'job_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/jobs/${encodeURIComponent(params.job_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['force', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['force', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'DELETE' + path = `/_ml/_delete_expired_data/${encodeURIComponent(params.job_id.toString())}` + } else { + method = 'DELETE' + path = '/_ml/_delete_expired_data' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['filter_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'forecast_id'] + const acceptedQuery: string[] = ['allow_no_forecasts', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.forecast_id != null) { + method = 'DELETE' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast/${encodeURIComponent(params.forecast_id.toString())}` + } else { + method = 'DELETE' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['force', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_alias', 'model_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/model_aliases/${encodeURIComponent(params.model_alias.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ml/anomaly_detectors/_estimate_model_memory' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ml/data_frame/_evaluate' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_explain` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/data_frame/analytics/_explain' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise + async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['skip_time', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_flush` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise + async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'timestamp'] + const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.timestamp != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/buckets/${encodeURIComponent(params.timestamp.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/buckets` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id'] + const acceptedQuery: string[] = ['job_id', 'end', 'from', 'start', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id'] + const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.calendar_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/calendars' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'category_id'] + const acceptedQuery: string[] = ['from', 'size', 'partition_field_value', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.category_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories/${encodeURIComponent(params.category_id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories/` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ml/data_frame/analytics' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'verbose', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/data_frame/analytics/_stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['allow_no_datafeeds', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = 'GET' + path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/datafeeds/_stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['allow_no_datafeeds', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = 'GET' + path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + } else { + method = 'GET' + path = '/_ml/datafeeds' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['filter_id'] + const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.filter_id != null) { + method = 'GET' + path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` + } else { + method = 'GET' + path = '/_ml/filters' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['desc', 'end', 'exclude_interim', 'influencer_score', 'from', 'size', 'sort', 'start', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/influencers` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['allow_no_jobs', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/anomaly_detectors/_stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['allow_no_match', 'allow_no_jobs', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + } else { + method = 'GET' + path = '/_ml/anomaly_detectors' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedQuery: string[] = ['desc', 'from', 'size', 'sort', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.snapshot_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['bucket_span', 'overall_score', 'top_n', 'end', 'start', 'exclude_interim', 'allow_no_match', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/overall_buckets` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/records` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_stats` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = ['allow_no_match', 'decompress_definition', 'exclude_generated', 'from', 'include', 'size', 'tags', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.model_id != null) { + method = 'GET' + path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + } else { + method = 'GET' + path = '/_ml/trained_models' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.model_id != null) { + method = 'GET' + path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/trained_models/_stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = ['timeout'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_infer` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ml/info' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise + async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_open` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise + async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['reset_end', 'reset_start', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_data` + return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + } + + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_preview` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/data_frame/analytics/_preview' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_preview` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/datafeeds/_preview' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['calendar_id', 'job_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/jobs/${encodeURIComponent(params.job_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise + async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['filter_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_alias', 'model_id'] + const acceptedQuery: string[] = ['reassign', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/model_aliases/${encodeURIComponent(params.model_alias.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id', 'part'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/definition/${encodeURIComponent(params.part.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/vocabulary` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise + async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_reset` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_revert` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['enabled', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ml/set_upgrade_mode' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_start` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_start` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = ['timeout', 'wait_for'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_start` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['allow_no_match', 'force', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_stop` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['allow_no_match', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_stop` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_stop` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_update` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['datafeed_id'] + const acceptedQuery: string[] = ['ignore_unavailable', 'allow_no_indices', 'ignore_throttled', 'expand_wildcards'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_update` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['filter_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}/_update` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_update` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_update` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedQuery: string[] = ['wait_for_completion', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_upgrade` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise + async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ml/anomaly_detectors/_validate' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'POST' + const path = '/_ml/anomaly_detectors/_validate/detector' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts new file mode 100644 index 000000000..2822b7699 --- /dev/null +++ b/src/api/api/monitoring.ts @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Monitoring { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['type'] + const acceptedQuery: string[] = ['system_id', 'system_api_version', 'interval', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'POST' + const path = '/_monitoring/bulk' + return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + } +} diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts new file mode 100644 index 000000000..b55b46093 --- /dev/null +++ b/src/api/api/msearch.ts @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'ccs_minimize_roundtrips', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'max_concurrent_searches', 'max_concurrent_shard_requests', 'pre_filter_shard_size', 'search_type', 'rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_msearch` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_msearch' + } + return await this.transport.request({ path, method, querystring, bulkBody: body }, options) +} diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts new file mode 100644 index 000000000..b6298aff1 --- /dev/null +++ b/src/api/api/msearch_template.ts @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['ccs_minimize_roundtrips', 'max_concurrent_searches', 'search_type', 'rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_msearch/template` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_msearch/template' + } + return await this.transport.request({ path, method, querystring, bulkBody: body }, options) +} diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts new file mode 100644 index 000000000..4cdc15eab --- /dev/null +++ b/src/api/api/mtermvectors.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['fields', 'field_statistics', 'offsets', 'payloads', 'positions', 'preference', 'realtime', 'routing', 'term_statistics', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mtermvectors` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_mtermvectors' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts new file mode 100644 index 000000000..a07b701e0 --- /dev/null +++ b/src/api/api/nodes.ts @@ -0,0 +1,301 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Nodes { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id', 'max_archive_version'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/_repositories_metering/${encodeURIComponent(params.max_archive_version.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/_repositories_metering` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = ['ignore_idle_threads', 'interval', 'snapshots', 'threads', 'thread_type', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/hot_threads` + } else { + method = 'GET' + path = '/_nodes/hot_threads' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id', 'metric'] + const acceptedQuery: string[] = ['flat_settings', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null && params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/${encodeURIComponent(params.metric.toString())}` + } else if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}` + } else if (params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_nodes' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'POST' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/reload_secure_settings` + } else { + method = 'POST' + path = '/_nodes/reload_secure_settings' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] + const acceptedQuery: string[] = ['completion_fields', 'fielddata_fields', 'fields', 'groups', 'include_segment_file_sizes', 'level', 'master_timeout', 'timeout', 'types', 'include_unloaded_segments', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null && params.metric != null && params.index_metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/stats/${encodeURIComponent(params.metric.toString())}/${encodeURIComponent(params.index_metric.toString())}` + } else if (params.node_id != null && params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/stats/${encodeURIComponent(params.metric.toString())}` + } else if (params.metric != null && params.index_metric != null) { + method = 'GET' + path = `/_nodes/stats/${encodeURIComponent(params.metric.toString())}/${encodeURIComponent(params.index_metric.toString())}` + } else if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/stats` + } else if (params.metric != null) { + method = 'GET' + path = `/_nodes/stats/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_nodes/stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id', 'metric'] + const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null && params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/usage/${encodeURIComponent(params.metric.toString())}` + } else if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/usage` + } else if (params.metric != null) { + method = 'GET' + path = `/_nodes/usage/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_nodes/usage' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts new file mode 100644 index 000000000..94be5c04b --- /dev/null +++ b/src/api/api/open_point_in_time.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['keep_alive', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_pit` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts new file mode 100644 index 000000000..49ec7c223 --- /dev/null +++ b/src/api/api/ping.ts @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise +export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'HEAD' + const path = '/' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts new file mode 100644 index 000000000..1f82c1445 --- /dev/null +++ b/src/api/api/put_script.ts @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'context'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null && params.context != null) { + method = 'PUT' + path = `/_scripts/${encodeURIComponent(params.id.toString())}/${encodeURIComponent(params.context.toString())}` + } else { + method = 'PUT' + path = `/_scripts/${encodeURIComponent(params.id.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts new file mode 100644 index 000000000..921372d5c --- /dev/null +++ b/src/api/api/rank_eval.ts @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'search_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_rank_eval` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_rank_eval' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts new file mode 100644 index 000000000..513f1cd60 --- /dev/null +++ b/src/api/api/reindex.ts @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise +export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['refresh', 'requests_per_second', 'scroll', 'slices', 'timeout', 'wait_for_active_shards', 'wait_for_completion', 'require_alias', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_reindex' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts new file mode 100644 index 000000000..436f34394 --- /dev/null +++ b/src/api/api/reindex_rethrottle.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_id'] + const acceptedQuery: string[] = ['requests_per_second', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_reindex/${encodeURIComponent(params.task_id.toString())}/_rethrottle` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts new file mode 100644 index 000000000..03285bf5a --- /dev/null +++ b/src/api/api/render_search_template.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_render/template/${encodeURIComponent(params.id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_render/template' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts new file mode 100644 index 000000000..0a728192f --- /dev/null +++ b/src/api/api/rollup.ts @@ -0,0 +1,312 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Rollup { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_rollup/job/' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_rollup/data/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_rollup/data/' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_rollup/data` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptions): Promise + async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'rollup_index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_rollup/${encodeURIComponent(params.rollup_index.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> + async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'type'] + const acceptedQuery: string[] = ['rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_rollup_search` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise + async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}/_start` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise + async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}/_stop` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts new file mode 100644 index 000000000..968516d40 --- /dev/null +++ b/src/api/api/scripts_painless_execute.ts @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_scripts/painless/_execute' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts new file mode 100644 index 000000000..0592663ba --- /dev/null +++ b/src/api/api/scroll.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> +export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['rest_total_hits_as_int', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_search/scroll' + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/search.ts b/src/api/api/search.ts new file mode 100644 index 000000000..388cc6f33 --- /dev/null +++ b/src/api/api/search.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> +export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'allow_partial_search_results', 'analyzer', 'analyze_wildcard', 'batched_reduce_size', 'ccs_minimize_roundtrips', 'default_operator', 'df', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'lenient', 'max_concurrent_shard_requests', 'min_compatible_shard_node', 'preference', 'pre_filter_shard_size', 'request_cache', 'routing', 'scroll', 'search_type', 'suggest_field', 'suggest_mode', 'suggest_size', 'suggest_text', 'typed_keys', 'rest_total_hits_as_int', '_source_excludes', '_source_includes', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_search` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_search' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts new file mode 100644 index 000000000..43a63b254 --- /dev/null +++ b/src/api/api/search_mvt.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_mvt/${encodeURIComponent(params.field.toString())}/${encodeURIComponent(params.zoom.toString())}/${encodeURIComponent(params.x.toString())}/${encodeURIComponent(params.y.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts new file mode 100644 index 000000000..24216f115 --- /dev/null +++ b/src/api/api/search_shards.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'preference', 'routing', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_search_shards` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_search_shards' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts new file mode 100644 index 000000000..f15905827 --- /dev/null +++ b/src/api/api/search_template.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'ccs_minimize_roundtrips', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'preference', 'routing', 'scroll', 'search_type', 'rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_search/template` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_search/template' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts new file mode 100644 index 000000000..08156add7 --- /dev/null +++ b/src/api/api/searchable_snapshots.ts @@ -0,0 +1,178 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class SearchableSnapshots { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_searchable_snapshots/${encodeURIComponent(params.node_id.toString())}/cache/stats` + } else { + method = 'GET' + path = '/_searchable_snapshots/cache/stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['expand_wildcards', 'allow_no_indices', 'ignore_unavailable', 'pretty', 'human', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_searchable_snapshots/cache/clear` + } else { + method = 'POST' + path = '/_searchable_snapshots/cache/clear' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedQuery: string[] = ['master_timeout', 'wait_for_completion', 'storage', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_mount` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['level', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_searchable_snapshots/stats` + } else { + method = 'GET' + path = '/_searchable_snapshots/stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/security.ts b/src/api/api/security.ts new file mode 100644 index 000000000..673023465 --- /dev/null +++ b/src/api/api/security.ts @@ -0,0 +1,1307 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Security { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/_authenticate' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> + async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['username'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.username != null) { + method = 'PUT' + path = `/_security/user/${encodeURIComponent(params.username.toString())}/_password` + } else { + method = 'PUT' + path = '/_security/user/_password' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ids'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/api_key/${encodeURIComponent(params.ids.toString())}/_clear_cache` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['application'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/_clear_cache` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['realms'] + const acceptedQuery: string[] = ['usernames', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/realm/${encodeURIComponent(params.realms.toString())}/_clear_cache` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/role/${encodeURIComponent(params.name.toString())}/_clear_cache` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['namespace', 'service', 'name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}/_clear_cache` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = '/_security/api_key' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['namespace', 'service', 'name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.namespace != null && params.service != null && params.name != null) { + method = 'PUT' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}` + } else { + method = 'POST' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['application', 'name'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/role/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['namespace', 'service', 'name'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['username'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['username'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}/_disable` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['username'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}/_enable` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/enroll/kibana' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/enroll/node' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['id', 'name', 'owner', 'realm_name', 'username', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/api_key' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/privilege/_builtin' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['application', 'name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.application != null && params.name != null) { + method = 'GET' + path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/${encodeURIComponent(params.name.toString())}` + } else if (params.application != null) { + method = 'GET' + path = `/_security/privilege/${encodeURIComponent(params.application.toString())}` + } else { + method = 'GET' + path = '/_security/privilege' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_security/role/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_security/role' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_security/role_mapping' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['namespace', 'service'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.namespace != null && params.service != null) { + method = 'GET' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}` + } else if (params.namespace != null) { + method = 'GET' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}` + } else { + method = 'GET' + path = '/_security/service' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['namespace', 'service'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/oauth2/token' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['username'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.username != null) { + method = 'GET' + path = `/_security/user/${encodeURIComponent(params.username.toString())}` + } else { + method = 'GET' + path = '/_security/user' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['application', 'priviledge', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/user/_privileges' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/api_key/grant' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['user'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.user != null) { + method = body != null ? 'POST' : 'GET' + path = `/_security/user/${encodeURIComponent(params.user.toString())}/_has_privileges` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_security/user/_has_privileges' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_security/api_key' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_security/oauth2/token' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'PUT' + const path = '/_security/privilege/' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/role/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise + async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/_query/api_key' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/saml/authenticate' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/saml/complete_logout' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/saml/invalidate' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/saml/logout' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/saml/prepare' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['realm_name'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_security/saml/metadata/${encodeURIComponent(params.realm_name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts new file mode 100644 index 000000000..10f9bb2f8 --- /dev/null +++ b/src/api/api/shutdown.ts @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Shutdown { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` + } else { + method = 'GET' + path = '/_nodes/shutdown' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts new file mode 100644 index 000000000..a4033c13c --- /dev/null +++ b/src/api/api/slm.ts @@ -0,0 +1,310 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Slm { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['policy_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['policy_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}/_execute` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_slm/_execute_retention' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['policy_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.policy_id != null) { + method = 'GET' + path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` + } else { + method = 'GET' + path = '/_slm/policy' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_slm/stats' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_slm/status' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['policy_id'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_slm/start' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_slm/stop' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts new file mode 100644 index 000000000..62b0df495 --- /dev/null +++ b/src/api/api/snapshot.ts @@ -0,0 +1,398 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Snapshot { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_cleanup` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_clone/${encodeURIComponent(params.target_snapshot.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedQuery: string[] = ['master_timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'verify', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedQuery: string[] = ['ignore_unavailable', 'master_timeout', 'verbose', 'index_details', 'human', 'include_repository', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository'] + const acceptedQuery: string[] = ['local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.repository != null) { + method = 'GET' + path = `/_snapshot/${encodeURIComponent(params.repository.toString())}` + } else { + method = 'GET' + path = '/_snapshot' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository'] + const acceptedQuery: string[] = ['blob_count', 'concurrency', 'read_node_count', 'early_read_node_count', 'seed', 'rare_action_probability', 'max_blob_size', 'max_total_data_size', 'timeout', 'detailed', 'rarely_abort_writes'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_analyze` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise + async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedQuery: string[] = ['master_timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_restore` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedQuery: string[] = ['ignore_unavailable', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.repository != null && params.snapshot != null) { + method = 'GET' + path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_status` + } else if (params.repository != null) { + method = 'GET' + path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_status` + } else { + method = 'GET' + path = '/_snapshot/_status' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['repository'] + const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_verify` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts new file mode 100644 index 000000000..2e180dd11 --- /dev/null +++ b/src/api/api/sql.ts @@ -0,0 +1,208 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Sql { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_sql/close' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_sql/async/delete/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['delimiter', 'format', 'keep_alive', 'wait_for_completion_timeout'] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_sql/async/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = [] + const querystring: Record = {} + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_sql/async/status/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['format', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_sql' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise + async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_sql/translate' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts new file mode 100644 index 000000000..416d0ea0c --- /dev/null +++ b/src/api/api/ssl.ts @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Ssl { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise + async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ssl/certificates' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts new file mode 100644 index 000000000..69d4cf61f --- /dev/null +++ b/src/api/api/tasks.ts @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Tasks { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise + async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_id'] + const acceptedQuery: string[] = ['actions', 'nodes', 'parent_task_id', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_id != null) { + method = 'POST' + path = `/_tasks/${encodeURIComponent(params.task_id.toString())}/_cancel` + } else { + method = 'POST' + path = '/_tasks/_cancel' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_id'] + const acceptedQuery: string[] = ['timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_tasks/${encodeURIComponent(params.task_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['actions', 'detailed', 'group_by', 'nodes', 'parent_task_id', 'timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_tasks' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts new file mode 100644 index 000000000..d072cfb8d --- /dev/null +++ b/src/api/api/terms_enum.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_terms_enum` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts new file mode 100644 index 000000000..8c1994730 --- /dev/null +++ b/src/api/api/termvectors.ts @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'id'] + const acceptedQuery: string[] = ['fields', 'field_statistics', 'offsets', 'payloads', 'positions', 'preference', 'realtime', 'routing', 'term_statistics', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_termvectors/${encodeURIComponent(params.id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_termvectors` + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts new file mode 100644 index 000000000..ad3ab4fea --- /dev/null +++ b/src/api/api/text_structure.ts @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class TextStructure { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['charset', 'column_names', 'delimiter', 'explain', 'format', 'grok_pattern', 'has_header_row', 'line_merge_size_limit', 'lines_to_sample', 'quote', 'should_trim_fields', 'timeout', 'timestamp_field', 'timestamp_format'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + body = params[key] + } + } + + const method = 'POST' + const path = '/_text_structure/find_structure' + return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + } +} diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts new file mode 100644 index 000000000..de9e0b877 --- /dev/null +++ b/src/api/api/transform.ts @@ -0,0 +1,285 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Transform { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const acceptedQuery: string[] = ['force', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.transform_id != null) { + method = 'GET' + path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` + } else { + method = 'GET' + path = '/_transform' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_stats` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.transform_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_preview` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_transform/_preview' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id', 'transform_id'] + const acceptedQuery: string[] = ['defer_validation', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_start` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const acceptedQuery: string[] = ['allow_no_match', 'force', 'timeout', 'wait_for_checkpoint', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_stop` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id', 'transform_id'] + const acceptedQuery: string[] = ['defer_validation', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_update` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/update.ts b/src/api/api/update.ts new file mode 100644 index 000000000..83f547c92 --- /dev/null +++ b/src/api/api/update.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedQuery: string[] = ['if_primary_term', 'if_seq_no', 'lang', 'refresh', 'require_alias', 'retry_on_conflict', 'routing', 'timeout', 'wait_for_active_shards', '_source_excludes', '_source_includes', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_update/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts new file mode 100644 index 000000000..4e964edd6 --- /dev/null +++ b/src/api/api/update_by_query.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const acceptedQuery: string[] = ['allow_no_indices', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'expand_wildcards', 'from', 'ignore_unavailable', 'lenient', 'pipeline', 'preference', 'refresh', 'request_cache', 'requests_per_second', 'routing', 'scroll', 'scroll_size', 'search_timeout', 'search_type', 'size', 'slices', 'sort', '_source', '_source_excludes', '_source_includes', 'stats', 'terminate_after', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_update_by_query` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts new file mode 100644 index 000000000..e5252fae9 --- /dev/null +++ b/src/api/api/update_by_query_rethrottle.ts @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_id'] + const acceptedQuery: string[] = ['requests_per_second', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = `/_update_by_query/${encodeURIComponent(params.task_id.toString())}/_rethrottle` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts new file mode 100644 index 000000000..0fa326977 --- /dev/null +++ b/src/api/api/watcher.ts @@ -0,0 +1,379 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Watcher { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['watch_id', 'action_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.watch_id != null && params.action_id != null) { + method = 'PUT' + path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_ack/${encodeURIComponent(params.action_id.toString())}` + } else { + method = 'PUT' + path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_ack` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['watch_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_activate` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['watch_id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_deactivate` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['debug', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'PUT' + path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}/_execute` + } else { + method = 'PUT' + path = '/_watcher/watch/_execute' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedQuery: string[] = ['active', 'if_primary_term', 'if_sequence_number', 'version', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params.body ?? undefined + + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_watcher/_query/watches' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_watcher/_start' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['metric'] + const acceptedQuery: string[] = ['emit_stacktraces', 'metric', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.metric != null) { + method = 'GET' + path = `/_watcher/stats/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_watcher/stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'POST' + const path = '/_watcher/_stop' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts new file mode 100644 index 000000000..190c2a93f --- /dev/null +++ b/src/api/api/xpack.ts @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Xpack { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['categories', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_xpack' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const querystring: Record = {} + // @ts-expect-error + let body: Record = params?.body ?? undefined + + params = params ?? {} + for (const key in params) { + if (acceptedQuery.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + + const method = 'GET' + const path = '/_xpack/usage' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/index.ts b/src/api/index.ts new file mode 100644 index 000000000..6bb6b461d --- /dev/null +++ b/src/api/index.ts @@ -0,0 +1,432 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import AsyncSearchApi from './api/async_search' +import AutoscalingApi from './api/autoscaling' +import bulkApi from './api/bulk' +import CatApi from './api/cat' +import CcrApi from './api/ccr' +import clearScrollApi from './api/clear_scroll' +import closePointInTimeApi from './api/close_point_in_time' +import ClusterApi from './api/cluster' +import countApi from './api/count' +import createApi from './api/create' +import DanglingIndicesApi from './api/dangling_indices' +import deleteApi from './api/delete' +import deleteByQueryApi from './api/delete_by_query' +import deleteByQueryRethrottleApi from './api/delete_by_query_rethrottle' +import deleteScriptApi from './api/delete_script' +import EnrichApi from './api/enrich' +import EqlApi from './api/eql' +import existsApi from './api/exists' +import existsSourceApi from './api/exists_source' +import explainApi from './api/explain' +import FeaturesApi from './api/features' +import fieldCapsApi from './api/field_caps' +import FleetApi from './api/fleet' +import getApi from './api/get' +import getScriptApi from './api/get_script' +import getScriptContextApi from './api/get_script_context' +import getScriptLanguagesApi from './api/get_script_languages' +import getSourceApi from './api/get_source' +import GraphApi from './api/graph' +import IlmApi from './api/ilm' +import indexApi from './api/index' +import IndicesApi from './api/indices' +import infoApi from './api/info' +import IngestApi from './api/ingest' +import LicenseApi from './api/license' +import LogstashApi from './api/logstash' +import mgetApi from './api/mget' +import MigrationApi from './api/migration' +import MlApi from './api/ml' +import MonitoringApi from './api/monitoring' +import msearchApi from './api/msearch' +import msearchTemplateApi from './api/msearch_template' +import mtermvectorsApi from './api/mtermvectors' +import NodesApi from './api/nodes' +import openPointInTimeApi from './api/open_point_in_time' +import pingApi from './api/ping' +import putScriptApi from './api/put_script' +import rankEvalApi from './api/rank_eval' +import reindexApi from './api/reindex' +import reindexRethrottleApi from './api/reindex_rethrottle' +import renderSearchTemplateApi from './api/render_search_template' +import RollupApi from './api/rollup' +import scriptsPainlessExecuteApi from './api/scripts_painless_execute' +import scrollApi from './api/scroll' +import searchApi from './api/search' +import searchMvtApi from './api/search_mvt' +import searchShardsApi from './api/search_shards' +import searchTemplateApi from './api/search_template' +import SearchableSnapshotsApi from './api/searchable_snapshots' +import SecurityApi from './api/security' +import ShutdownApi from './api/shutdown' +import SlmApi from './api/slm' +import SnapshotApi from './api/snapshot' +import SqlApi from './api/sql' +import SslApi from './api/ssl' +import TasksApi from './api/tasks' +import termsEnumApi from './api/terms_enum' +import termvectorsApi from './api/termvectors' +import TextStructureApi from './api/text_structure' +import TransformApi from './api/transform' +import updateApi from './api/update' +import updateByQueryApi from './api/update_by_query' +import updateByQueryRethrottleApi from './api/update_by_query_rethrottle' +import WatcherApi from './api/watcher' +import XpackApi from './api/xpack' + +export default interface API { + new(): API + asyncSearch: AsyncSearchApi + autoscaling: AutoscalingApi + bulk: typeof bulkApi + cat: CatApi + ccr: CcrApi + clearScroll: typeof clearScrollApi + closePointInTime: typeof closePointInTimeApi + cluster: ClusterApi + count: typeof countApi + create: typeof createApi + danglingIndices: DanglingIndicesApi + delete: typeof deleteApi + deleteByQuery: typeof deleteByQueryApi + deleteByQueryRethrottle: typeof deleteByQueryRethrottleApi + deleteScript: typeof deleteScriptApi + enrich: EnrichApi + eql: EqlApi + exists: typeof existsApi + existsSource: typeof existsSourceApi + explain: typeof explainApi + features: FeaturesApi + fieldCaps: typeof fieldCapsApi + fleet: FleetApi + get: typeof getApi + getScript: typeof getScriptApi + getScriptContext: typeof getScriptContextApi + getScriptLanguages: typeof getScriptLanguagesApi + getSource: typeof getSourceApi + graph: GraphApi + ilm: IlmApi + index: typeof indexApi + indices: IndicesApi + info: typeof infoApi + ingest: IngestApi + license: LicenseApi + logstash: LogstashApi + mget: typeof mgetApi + migration: MigrationApi + ml: MlApi + monitoring: MonitoringApi + msearch: typeof msearchApi + msearchTemplate: typeof msearchTemplateApi + mtermvectors: typeof mtermvectorsApi + nodes: NodesApi + openPointInTime: typeof openPointInTimeApi + ping: typeof pingApi + putScript: typeof putScriptApi + rankEval: typeof rankEvalApi + reindex: typeof reindexApi + reindexRethrottle: typeof reindexRethrottleApi + renderSearchTemplate: typeof renderSearchTemplateApi + rollup: RollupApi + scriptsPainlessExecute: typeof scriptsPainlessExecuteApi + scroll: typeof scrollApi + search: typeof searchApi + searchMvt: typeof searchMvtApi + searchShards: typeof searchShardsApi + searchTemplate: typeof searchTemplateApi + searchableSnapshots: SearchableSnapshotsApi + security: SecurityApi + shutdown: ShutdownApi + slm: SlmApi + snapshot: SnapshotApi + sql: SqlApi + ssl: SslApi + tasks: TasksApi + termsEnum: typeof termsEnumApi + termvectors: typeof termvectorsApi + textStructure: TextStructureApi + transform: TransformApi + update: typeof updateApi + updateByQuery: typeof updateByQueryApi + updateByQueryRethrottle: typeof updateByQueryRethrottleApi + watcher: WatcherApi + xpack: XpackApi +} + +const kAsyncSearch = Symbol('AsyncSearch') +const kAutoscaling = Symbol('Autoscaling') +const kCat = Symbol('Cat') +const kCcr = Symbol('Ccr') +const kCluster = Symbol('Cluster') +const kDanglingIndices = Symbol('DanglingIndices') +const kEnrich = Symbol('Enrich') +const kEql = Symbol('Eql') +const kFeatures = Symbol('Features') +const kFleet = Symbol('Fleet') +const kGraph = Symbol('Graph') +const kIlm = Symbol('Ilm') +const kIndices = Symbol('Indices') +const kIngest = Symbol('Ingest') +const kLicense = Symbol('License') +const kLogstash = Symbol('Logstash') +const kMigration = Symbol('Migration') +const kMl = Symbol('Ml') +const kMonitoring = Symbol('Monitoring') +const kNodes = Symbol('Nodes') +const kRollup = Symbol('Rollup') +const kSearchableSnapshots = Symbol('SearchableSnapshots') +const kSecurity = Symbol('Security') +const kShutdown = Symbol('Shutdown') +const kSlm = Symbol('Slm') +const kSnapshot = Symbol('Snapshot') +const kSql = Symbol('Sql') +const kSsl = Symbol('Ssl') +const kTasks = Symbol('Tasks') +const kTextStructure = Symbol('TextStructure') +const kTransform = Symbol('Transform') +const kWatcher = Symbol('Watcher') +const kXpack = Symbol('Xpack') + +export default class API { + [kAsyncSearch]: symbol | null + [kAutoscaling]: symbol | null + [kCat]: symbol | null + [kCcr]: symbol | null + [kCluster]: symbol | null + [kDanglingIndices]: symbol | null + [kEnrich]: symbol | null + [kEql]: symbol | null + [kFeatures]: symbol | null + [kFleet]: symbol | null + [kGraph]: symbol | null + [kIlm]: symbol | null + [kIndices]: symbol | null + [kIngest]: symbol | null + [kLicense]: symbol | null + [kLogstash]: symbol | null + [kMigration]: symbol | null + [kMl]: symbol | null + [kMonitoring]: symbol | null + [kNodes]: symbol | null + [kRollup]: symbol | null + [kSearchableSnapshots]: symbol | null + [kSecurity]: symbol | null + [kShutdown]: symbol | null + [kSlm]: symbol | null + [kSnapshot]: symbol | null + [kSql]: symbol | null + [kSsl]: symbol | null + [kTasks]: symbol | null + [kTextStructure]: symbol | null + [kTransform]: symbol | null + [kWatcher]: symbol | null + [kXpack]: symbol | null + constructor () { + this[kAsyncSearch] = null + this[kAutoscaling] = null + this[kCat] = null + this[kCcr] = null + this[kCluster] = null + this[kDanglingIndices] = null + this[kEnrich] = null + this[kEql] = null + this[kFeatures] = null + this[kFleet] = null + this[kGraph] = null + this[kIlm] = null + this[kIndices] = null + this[kIngest] = null + this[kLicense] = null + this[kLogstash] = null + this[kMigration] = null + this[kMl] = null + this[kMonitoring] = null + this[kNodes] = null + this[kRollup] = null + this[kSearchableSnapshots] = null + this[kSecurity] = null + this[kShutdown] = null + this[kSlm] = null + this[kSnapshot] = null + this[kSql] = null + this[kSsl] = null + this[kTasks] = null + this[kTextStructure] = null + this[kTransform] = null + this[kWatcher] = null + this[kXpack] = null + } +} + +API.prototype.bulk = bulkApi +API.prototype.clearScroll = clearScrollApi +API.prototype.closePointInTime = closePointInTimeApi +API.prototype.count = countApi +API.prototype.create = createApi +API.prototype.delete = deleteApi +API.prototype.deleteByQuery = deleteByQueryApi +API.prototype.deleteByQueryRethrottle = deleteByQueryRethrottleApi +API.prototype.deleteScript = deleteScriptApi +API.prototype.exists = existsApi +API.prototype.existsSource = existsSourceApi +API.prototype.explain = explainApi +API.prototype.fieldCaps = fieldCapsApi +API.prototype.get = getApi +API.prototype.getScript = getScriptApi +API.prototype.getScriptContext = getScriptContextApi +API.prototype.getScriptLanguages = getScriptLanguagesApi +API.prototype.getSource = getSourceApi +API.prototype.index = indexApi +API.prototype.info = infoApi +API.prototype.mget = mgetApi +API.prototype.msearch = msearchApi +API.prototype.msearchTemplate = msearchTemplateApi +API.prototype.mtermvectors = mtermvectorsApi +API.prototype.openPointInTime = openPointInTimeApi +API.prototype.ping = pingApi +API.prototype.putScript = putScriptApi +API.prototype.rankEval = rankEvalApi +API.prototype.reindex = reindexApi +API.prototype.reindexRethrottle = reindexRethrottleApi +API.prototype.renderSearchTemplate = renderSearchTemplateApi +API.prototype.scriptsPainlessExecute = scriptsPainlessExecuteApi +API.prototype.scroll = scrollApi +API.prototype.search = searchApi +API.prototype.searchMvt = searchMvtApi +API.prototype.searchShards = searchShardsApi +API.prototype.searchTemplate = searchTemplateApi +API.prototype.termsEnum = termsEnumApi +API.prototype.termvectors = termvectorsApi +API.prototype.update = updateApi +API.prototype.updateByQuery = updateByQueryApi +API.prototype.updateByQueryRethrottle = updateByQueryRethrottleApi + +Object.defineProperties(API.prototype, { + asyncSearch: { + get () { return this[kAsyncSearch] === null ? (this[kAsyncSearch] = new AsyncSearchApi(this.transport)) : this[kAsyncSearch] } + }, + autoscaling: { + get () { return this[kAutoscaling] === null ? (this[kAutoscaling] = new AutoscalingApi(this.transport)) : this[kAutoscaling] } + }, + cat: { + get () { return this[kCat] === null ? (this[kCat] = new CatApi(this.transport)) : this[kCat] } + }, + ccr: { + get () { return this[kCcr] === null ? (this[kCcr] = new CcrApi(this.transport)) : this[kCcr] } + }, + cluster: { + get () { return this[kCluster] === null ? (this[kCluster] = new ClusterApi(this.transport)) : this[kCluster] } + }, + danglingIndices: { + get () { return this[kDanglingIndices] === null ? (this[kDanglingIndices] = new DanglingIndicesApi(this.transport)) : this[kDanglingIndices] } + }, + enrich: { + get () { return this[kEnrich] === null ? (this[kEnrich] = new EnrichApi(this.transport)) : this[kEnrich] } + }, + eql: { + get () { return this[kEql] === null ? (this[kEql] = new EqlApi(this.transport)) : this[kEql] } + }, + features: { + get () { return this[kFeatures] === null ? (this[kFeatures] = new FeaturesApi(this.transport)) : this[kFeatures] } + }, + fleet: { + get () { return this[kFleet] === null ? (this[kFleet] = new FleetApi(this.transport)) : this[kFleet] } + }, + graph: { + get () { return this[kGraph] === null ? (this[kGraph] = new GraphApi(this.transport)) : this[kGraph] } + }, + ilm: { + get () { return this[kIlm] === null ? (this[kIlm] = new IlmApi(this.transport)) : this[kIlm] } + }, + indices: { + get () { return this[kIndices] === null ? (this[kIndices] = new IndicesApi(this.transport)) : this[kIndices] } + }, + ingest: { + get () { return this[kIngest] === null ? (this[kIngest] = new IngestApi(this.transport)) : this[kIngest] } + }, + license: { + get () { return this[kLicense] === null ? (this[kLicense] = new LicenseApi(this.transport)) : this[kLicense] } + }, + logstash: { + get () { return this[kLogstash] === null ? (this[kLogstash] = new LogstashApi(this.transport)) : this[kLogstash] } + }, + migration: { + get () { return this[kMigration] === null ? (this[kMigration] = new MigrationApi(this.transport)) : this[kMigration] } + }, + ml: { + get () { return this[kMl] === null ? (this[kMl] = new MlApi(this.transport)) : this[kMl] } + }, + monitoring: { + get () { return this[kMonitoring] === null ? (this[kMonitoring] = new MonitoringApi(this.transport)) : this[kMonitoring] } + }, + nodes: { + get () { return this[kNodes] === null ? (this[kNodes] = new NodesApi(this.transport)) : this[kNodes] } + }, + rollup: { + get () { return this[kRollup] === null ? (this[kRollup] = new RollupApi(this.transport)) : this[kRollup] } + }, + searchableSnapshots: { + get () { return this[kSearchableSnapshots] === null ? (this[kSearchableSnapshots] = new SearchableSnapshotsApi(this.transport)) : this[kSearchableSnapshots] } + }, + security: { + get () { return this[kSecurity] === null ? (this[kSecurity] = new SecurityApi(this.transport)) : this[kSecurity] } + }, + shutdown: { + get () { return this[kShutdown] === null ? (this[kShutdown] = new ShutdownApi(this.transport)) : this[kShutdown] } + }, + slm: { + get () { return this[kSlm] === null ? (this[kSlm] = new SlmApi(this.transport)) : this[kSlm] } + }, + snapshot: { + get () { return this[kSnapshot] === null ? (this[kSnapshot] = new SnapshotApi(this.transport)) : this[kSnapshot] } + }, + sql: { + get () { return this[kSql] === null ? (this[kSql] = new SqlApi(this.transport)) : this[kSql] } + }, + ssl: { + get () { return this[kSsl] === null ? (this[kSsl] = new SslApi(this.transport)) : this[kSsl] } + }, + tasks: { + get () { return this[kTasks] === null ? (this[kTasks] = new TasksApi(this.transport)) : this[kTasks] } + }, + textStructure: { + get () { return this[kTextStructure] === null ? (this[kTextStructure] = new TextStructureApi(this.transport)) : this[kTextStructure] } + }, + transform: { + get () { return this[kTransform] === null ? (this[kTransform] = new TransformApi(this.transport)) : this[kTransform] } + }, + watcher: { + get () { return this[kWatcher] === null ? (this[kWatcher] = new WatcherApi(this.transport)) : this[kWatcher] } + }, + xpack: { + get () { return this[kXpack] === null ? (this[kXpack] = new XpackApi(this.transport)) : this[kXpack] } + } +}) diff --git a/src/api/kibana.ts b/src/api/kibana.ts new file mode 100644 index 000000000..fbd2b61b8 --- /dev/null +++ b/src/api/kibana.ts @@ -0,0 +1,520 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Serializer, + Diagnostic, + BaseConnectionPool, + TransportRequestOptions, + TransportResult +} from '@elastic/transport' +import * as T from './types' +import * as TB from './typesWithBodyKey' +import SniffingTransport from '../SniffingTransport' +import Helpers from '../Helpers' +import { ClientOptions } from '../Client' + +interface KibanaClient { + diagnostic: Diagnostic + name: string | symbol + connectionPool: BaseConnectionPool + transport: SniffingTransport + serializer: Serializer + helpers: Helpers + child: (opts?: ClientOptions) => KibanaClient + close: () => Promise + asyncSearch: { + delete: (params: T.AsyncSearchDeleteRequest| TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions) => Promise> + get: (params: T.AsyncSearchGetRequest| TB.AsyncSearchGetRequest, options?: TransportRequestOptions) => Promise, TContext>> + status: (params: T.AsyncSearchStatusRequest| TB.AsyncSearchStatusRequest, options?: TransportRequestOptions) => Promise, TContext>> + submit: (params?: T.AsyncSearchSubmitRequest| TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions) => Promise, TContext>> + } + autoscaling: { + deleteAutoscalingPolicy: (params: T.AutoscalingDeleteAutoscalingPolicyRequest| TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> + getAutoscalingCapacity: (params?: T.AutoscalingGetAutoscalingCapacityRequest| TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions) => Promise> + getAutoscalingPolicy: (params: T.AutoscalingGetAutoscalingPolicyRequest| TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> + putAutoscalingPolicy: (params: T.AutoscalingPutAutoscalingPolicyRequest| TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> + } + bulk: (params: T.BulkRequest| TB.BulkRequest, options?: TransportRequestOptions) => Promise> + cat: { + aliases: (params?: T.CatAliasesRequest| TB.CatAliasesRequest, options?: TransportRequestOptions) => Promise> + allocation: (params?: T.CatAllocationRequest| TB.CatAllocationRequest, options?: TransportRequestOptions) => Promise> + count: (params?: T.CatCountRequest| TB.CatCountRequest, options?: TransportRequestOptions) => Promise> + fielddata: (params?: T.CatFielddataRequest| TB.CatFielddataRequest, options?: TransportRequestOptions) => Promise> + health: (params?: T.CatHealthRequest| TB.CatHealthRequest, options?: TransportRequestOptions) => Promise> + help: (params?: T.CatHelpRequest| TB.CatHelpRequest, options?: TransportRequestOptions) => Promise> + indices: (params?: T.CatIndicesRequest| TB.CatIndicesRequest, options?: TransportRequestOptions) => Promise> + master: (params?: T.CatMasterRequest| TB.CatMasterRequest, options?: TransportRequestOptions) => Promise> + mlDataFrameAnalytics: (params?: T.CatMlDataFrameAnalyticsRequest| TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + mlDatafeeds: (params?: T.CatMlDatafeedsRequest| TB.CatMlDatafeedsRequest, options?: TransportRequestOptions) => Promise> + mlJobs: (params?: T.CatMlJobsRequest| TB.CatMlJobsRequest, options?: TransportRequestOptions) => Promise> + mlTrainedModels: (params?: T.CatMlTrainedModelsRequest| TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions) => Promise> + nodeattrs: (params?: T.CatNodeattrsRequest| TB.CatNodeattrsRequest, options?: TransportRequestOptions) => Promise> + nodes: (params?: T.CatNodesRequest| TB.CatNodesRequest, options?: TransportRequestOptions) => Promise> + pendingTasks: (params?: T.CatPendingTasksRequest| TB.CatPendingTasksRequest, options?: TransportRequestOptions) => Promise> + plugins: (params?: T.CatPluginsRequest| TB.CatPluginsRequest, options?: TransportRequestOptions) => Promise> + recovery: (params?: T.CatRecoveryRequest| TB.CatRecoveryRequest, options?: TransportRequestOptions) => Promise> + repositories: (params?: T.CatRepositoriesRequest| TB.CatRepositoriesRequest, options?: TransportRequestOptions) => Promise> + segments: (params?: T.CatSegmentsRequest| TB.CatSegmentsRequest, options?: TransportRequestOptions) => Promise> + shards: (params?: T.CatShardsRequest| TB.CatShardsRequest, options?: TransportRequestOptions) => Promise> + snapshots: (params?: T.CatSnapshotsRequest| TB.CatSnapshotsRequest, options?: TransportRequestOptions) => Promise> + tasks: (params?: T.CatTasksRequest| TB.CatTasksRequest, options?: TransportRequestOptions) => Promise> + templates: (params?: T.CatTemplatesRequest| TB.CatTemplatesRequest, options?: TransportRequestOptions) => Promise> + threadPool: (params?: T.CatThreadPoolRequest| TB.CatThreadPoolRequest, options?: TransportRequestOptions) => Promise> + transforms: (params?: T.CatTransformsRequest| TB.CatTransformsRequest, options?: TransportRequestOptions) => Promise> + } + ccr: { + deleteAutoFollowPattern: (params: T.CcrDeleteAutoFollowPatternRequest| TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> + follow: (params: T.CcrFollowRequest| TB.CcrFollowRequest, options?: TransportRequestOptions) => Promise> + followInfo: (params: T.CcrFollowInfoRequest| TB.CcrFollowInfoRequest, options?: TransportRequestOptions) => Promise> + followStats: (params: T.CcrFollowStatsRequest| TB.CcrFollowStatsRequest, options?: TransportRequestOptions) => Promise> + forgetFollower: (params: T.CcrForgetFollowerRequest| TB.CcrForgetFollowerRequest, options?: TransportRequestOptions) => Promise> + getAutoFollowPattern: (params?: T.CcrGetAutoFollowPatternRequest| TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> + pauseAutoFollowPattern: (params: T.CcrPauseAutoFollowPatternRequest| TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> + pauseFollow: (params: T.CcrPauseFollowRequest| TB.CcrPauseFollowRequest, options?: TransportRequestOptions) => Promise> + putAutoFollowPattern: (params: T.CcrPutAutoFollowPatternRequest| TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> + resumeAutoFollowPattern: (params: T.CcrResumeAutoFollowPatternRequest| TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> + resumeFollow: (params: T.CcrResumeFollowRequest| TB.CcrResumeFollowRequest, options?: TransportRequestOptions) => Promise> + stats: (params?: T.CcrStatsRequest| TB.CcrStatsRequest, options?: TransportRequestOptions) => Promise> + unfollow: (params: T.CcrUnfollowRequest| TB.CcrUnfollowRequest, options?: TransportRequestOptions) => Promise> + } + clearScroll: (params?: T.ClearScrollRequest| TB.ClearScrollRequest, options?: TransportRequestOptions) => Promise> + closePointInTime: (params?: T.ClosePointInTimeRequest| TB.ClosePointInTimeRequest, options?: TransportRequestOptions) => Promise> + cluster: { + allocationExplain: (params?: T.ClusterAllocationExplainRequest| TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions) => Promise> + deleteComponentTemplate: (params: T.ClusterDeleteComponentTemplateRequest| TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions) => Promise> + deleteVotingConfigExclusions: (params?: T.ClusterDeleteVotingConfigExclusionsRequest| TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions) => Promise> + existsComponentTemplate: (params: T.ClusterExistsComponentTemplateRequest| TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions) => Promise> + getComponentTemplate: (params?: T.ClusterGetComponentTemplateRequest| TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions) => Promise> + getSettings: (params?: T.ClusterGetSettingsRequest| TB.ClusterGetSettingsRequest, options?: TransportRequestOptions) => Promise> + health: (params?: T.ClusterHealthRequest| TB.ClusterHealthRequest, options?: TransportRequestOptions) => Promise> + pendingTasks: (params?: T.ClusterPendingTasksRequest| TB.ClusterPendingTasksRequest, options?: TransportRequestOptions) => Promise> + postVotingConfigExclusions: (params?: T.ClusterPostVotingConfigExclusionsRequest| TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions) => Promise> + putComponentTemplate: (params: T.ClusterPutComponentTemplateRequest| TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions) => Promise> + putSettings: (params?: T.ClusterPutSettingsRequest| TB.ClusterPutSettingsRequest, options?: TransportRequestOptions) => Promise> + remoteInfo: (params?: T.ClusterRemoteInfoRequest| TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions) => Promise> + reroute: (params?: T.ClusterRerouteRequest| TB.ClusterRerouteRequest, options?: TransportRequestOptions) => Promise> + state: (params?: T.ClusterStateRequest| TB.ClusterStateRequest, options?: TransportRequestOptions) => Promise> + stats: (params?: T.ClusterStatsRequest| TB.ClusterStatsRequest, options?: TransportRequestOptions) => Promise> + } + count: (params?: T.CountRequest| TB.CountRequest, options?: TransportRequestOptions) => Promise> + create: (params: T.CreateRequest| TB.CreateRequest, options?: TransportRequestOptions) => Promise> + danglingIndices: { + deleteDanglingIndex: (params: T.DanglingIndicesDeleteDanglingIndexRequest| TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions) => Promise> + importDanglingIndex: (params: T.DanglingIndicesImportDanglingIndexRequest| TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions) => Promise> + listDanglingIndices: (params?: T.DanglingIndicesListDanglingIndicesRequest| TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions) => Promise> + } + dataFrameTransformDeprecated: { + deleteTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getTransformStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + previewTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + putTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + startTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + stopTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + updateTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + } + delete: (params: T.DeleteRequest| TB.DeleteRequest, options?: TransportRequestOptions) => Promise> + deleteByQuery: (params: T.DeleteByQueryRequest| TB.DeleteByQueryRequest, options?: TransportRequestOptions) => Promise> + deleteByQueryRethrottle: (params: T.DeleteByQueryRethrottleRequest| TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions) => Promise> + deleteScript: (params: T.DeleteScriptRequest| TB.DeleteScriptRequest, options?: TransportRequestOptions) => Promise> + enrich: { + deletePolicy: (params: T.EnrichDeletePolicyRequest| TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions) => Promise> + executePolicy: (params: T.EnrichExecutePolicyRequest| TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions) => Promise> + getPolicy: (params?: T.EnrichGetPolicyRequest| TB.EnrichGetPolicyRequest, options?: TransportRequestOptions) => Promise> + putPolicy: (params: T.EnrichPutPolicyRequest| TB.EnrichPutPolicyRequest, options?: TransportRequestOptions) => Promise> + stats: (params?: T.EnrichStatsRequest| TB.EnrichStatsRequest, options?: TransportRequestOptions) => Promise> + } + eql: { + delete: (params: T.EqlDeleteRequest| TB.EqlDeleteRequest, options?: TransportRequestOptions) => Promise> + get: (params: T.EqlGetRequest| TB.EqlGetRequest, options?: TransportRequestOptions) => Promise, TContext>> + getStatus: (params: T.EqlGetStatusRequest| TB.EqlGetStatusRequest, options?: TransportRequestOptions) => Promise> + search: (params: T.EqlSearchRequest| TB.EqlSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + } + exists: (params: T.ExistsRequest| TB.ExistsRequest, options?: TransportRequestOptions) => Promise> + existsSource: (params: T.ExistsSourceRequest| TB.ExistsSourceRequest, options?: TransportRequestOptions) => Promise> + explain: (params: T.ExplainRequest| TB.ExplainRequest, options?: TransportRequestOptions) => Promise, TContext>> + features: { + getFeatures: (params?: T.FeaturesGetFeaturesRequest| TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions) => Promise> + resetFeatures: (params?: T.FeaturesResetFeaturesRequest| TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions) => Promise> + } + fieldCaps: (params?: T.FieldCapsRequest| TB.FieldCapsRequest, options?: TransportRequestOptions) => Promise> + fleet: { + globalCheckpoints: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + } + get: (params: T.GetRequest| TB.GetRequest, options?: TransportRequestOptions) => Promise, TContext>> + getScript: (params: T.GetScriptRequest| TB.GetScriptRequest, options?: TransportRequestOptions) => Promise> + getScriptContext: (params?: T.GetScriptContextRequest| TB.GetScriptContextRequest, options?: TransportRequestOptions) => Promise> + getScriptLanguages: (params?: T.GetScriptLanguagesRequest| TB.GetScriptLanguagesRequest, options?: TransportRequestOptions) => Promise> + getSource: (params?: T.GetSourceRequest| TB.GetSourceRequest, options?: TransportRequestOptions) => Promise, TContext>> + graph: { + explore: (params: T.GraphExploreRequest| TB.GraphExploreRequest, options?: TransportRequestOptions) => Promise> + } + ilm: { + deleteLifecycle: (params: T.IlmDeleteLifecycleRequest| TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions) => Promise> + explainLifecycle: (params: T.IlmExplainLifecycleRequest| TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions) => Promise> + getLifecycle: (params?: T.IlmGetLifecycleRequest| TB.IlmGetLifecycleRequest, options?: TransportRequestOptions) => Promise> + getStatus: (params?: T.IlmGetStatusRequest| TB.IlmGetStatusRequest, options?: TransportRequestOptions) => Promise> + migrateToDataTiers: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + moveToStep: (params: T.IlmMoveToStepRequest| TB.IlmMoveToStepRequest, options?: TransportRequestOptions) => Promise> + putLifecycle: (params: T.IlmPutLifecycleRequest| TB.IlmPutLifecycleRequest, options?: TransportRequestOptions) => Promise> + removePolicy: (params: T.IlmRemovePolicyRequest| TB.IlmRemovePolicyRequest, options?: TransportRequestOptions) => Promise> + retry: (params: T.IlmRetryRequest| TB.IlmRetryRequest, options?: TransportRequestOptions) => Promise> + start: (params?: T.IlmStartRequest| TB.IlmStartRequest, options?: TransportRequestOptions) => Promise> + stop: (params?: T.IlmStopRequest| TB.IlmStopRequest, options?: TransportRequestOptions) => Promise> + } + index: (params: T.IndexRequest| TB.IndexRequest, options?: TransportRequestOptions) => Promise> + indices: { + addBlock: (params: T.IndicesAddBlockRequest| TB.IndicesAddBlockRequest, options?: TransportRequestOptions) => Promise> + analyze: (params?: T.IndicesAnalyzeRequest| TB.IndicesAnalyzeRequest, options?: TransportRequestOptions) => Promise> + clearCache: (params?: T.IndicesClearCacheRequest| TB.IndicesClearCacheRequest, options?: TransportRequestOptions) => Promise> + clone: (params: T.IndicesCloneRequest| TB.IndicesCloneRequest, options?: TransportRequestOptions) => Promise> + close: (params: T.IndicesCloseRequest| TB.IndicesCloseRequest, options?: TransportRequestOptions) => Promise> + create: (params: T.IndicesCreateRequest| TB.IndicesCreateRequest, options?: TransportRequestOptions) => Promise> + createDataStream: (params: T.IndicesCreateDataStreamRequest| TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions) => Promise> + dataStreamsStats: (params?: T.IndicesDataStreamsStatsRequest| TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions) => Promise> + delete: (params: T.IndicesDeleteRequest| TB.IndicesDeleteRequest, options?: TransportRequestOptions) => Promise> + deleteAlias: (params: T.IndicesDeleteAliasRequest| TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions) => Promise> + deleteDataStream: (params: T.IndicesDeleteDataStreamRequest| TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions) => Promise> + deleteIndexTemplate: (params: T.IndicesDeleteIndexTemplateRequest| TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions) => Promise> + deleteTemplate: (params: T.IndicesDeleteTemplateRequest| TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions) => Promise> + diskUsage: (params: T.IndicesDiskUsageRequest| TB.IndicesDiskUsageRequest, options?: TransportRequestOptions) => Promise> + exists: (params: T.IndicesExistsRequest| TB.IndicesExistsRequest, options?: TransportRequestOptions) => Promise> + existsAlias: (params: T.IndicesExistsAliasRequest| TB.IndicesExistsAliasRequest, options?: TransportRequestOptions) => Promise> + existsIndexTemplate: (params: T.IndicesExistsIndexTemplateRequest| TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions) => Promise> + existsTemplate: (params: T.IndicesExistsTemplateRequest| TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions) => Promise> + existsType: (params: T.IndicesExistsTypeRequest| TB.IndicesExistsTypeRequest, options?: TransportRequestOptions) => Promise> + fieldUsageStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + flush: (params?: T.IndicesFlushRequest| TB.IndicesFlushRequest, options?: TransportRequestOptions) => Promise> + forcemerge: (params?: T.IndicesForcemergeRequest| TB.IndicesForcemergeRequest, options?: TransportRequestOptions) => Promise> + freeze: (params: T.IndicesFreezeRequest| TB.IndicesFreezeRequest, options?: TransportRequestOptions) => Promise> + get: (params: T.IndicesGetRequest| TB.IndicesGetRequest, options?: TransportRequestOptions) => Promise> + getAlias: (params?: T.IndicesGetAliasRequest| TB.IndicesGetAliasRequest, options?: TransportRequestOptions) => Promise> + getDataStream: (params?: T.IndicesGetDataStreamRequest| TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions) => Promise> + getFieldMapping: (params: T.IndicesGetFieldMappingRequest| TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions) => Promise> + getIndexTemplate: (params?: T.IndicesGetIndexTemplateRequest| TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions) => Promise> + getMapping: (params?: T.IndicesGetMappingRequest| TB.IndicesGetMappingRequest, options?: TransportRequestOptions) => Promise> + getSettings: (params?: T.IndicesGetSettingsRequest| TB.IndicesGetSettingsRequest, options?: TransportRequestOptions) => Promise> + getTemplate: (params?: T.IndicesGetTemplateRequest| TB.IndicesGetTemplateRequest, options?: TransportRequestOptions) => Promise> + migrateToDataStream: (params: T.IndicesMigrateToDataStreamRequest| TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions) => Promise> + open: (params: T.IndicesOpenRequest| TB.IndicesOpenRequest, options?: TransportRequestOptions) => Promise> + promoteDataStream: (params: T.IndicesPromoteDataStreamRequest| TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions) => Promise> + putAlias: (params: T.IndicesPutAliasRequest| TB.IndicesPutAliasRequest, options?: TransportRequestOptions) => Promise> + putIndexTemplate: (params: T.IndicesPutIndexTemplateRequest| TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions) => Promise> + putMapping: (params: T.IndicesPutMappingRequest| TB.IndicesPutMappingRequest, options?: TransportRequestOptions) => Promise> + putSettings: (params?: T.IndicesPutSettingsRequest| TB.IndicesPutSettingsRequest, options?: TransportRequestOptions) => Promise> + putTemplate: (params: T.IndicesPutTemplateRequest| TB.IndicesPutTemplateRequest, options?: TransportRequestOptions) => Promise> + recovery: (params?: T.IndicesRecoveryRequest| TB.IndicesRecoveryRequest, options?: TransportRequestOptions) => Promise> + refresh: (params?: T.IndicesRefreshRequest| TB.IndicesRefreshRequest, options?: TransportRequestOptions) => Promise> + reloadSearchAnalyzers: (params: T.IndicesReloadSearchAnalyzersRequest| TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions) => Promise> + resolveIndex: (params: T.IndicesResolveIndexRequest| TB.IndicesResolveIndexRequest, options?: TransportRequestOptions) => Promise> + rollover: (params: T.IndicesRolloverRequest| TB.IndicesRolloverRequest, options?: TransportRequestOptions) => Promise> + segments: (params?: T.IndicesSegmentsRequest| TB.IndicesSegmentsRequest, options?: TransportRequestOptions) => Promise> + shardStores: (params?: T.IndicesShardStoresRequest| TB.IndicesShardStoresRequest, options?: TransportRequestOptions) => Promise> + shrink: (params: T.IndicesShrinkRequest| TB.IndicesShrinkRequest, options?: TransportRequestOptions) => Promise> + simulateIndexTemplate: (params: T.IndicesSimulateIndexTemplateRequest| TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions) => Promise> + simulateTemplate: (params?: T.IndicesSimulateTemplateRequest| TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions) => Promise> + split: (params: T.IndicesSplitRequest| TB.IndicesSplitRequest, options?: TransportRequestOptions) => Promise> + stats: (params?: T.IndicesStatsRequest| TB.IndicesStatsRequest, options?: TransportRequestOptions) => Promise> + unfreeze: (params: T.IndicesUnfreezeRequest| TB.IndicesUnfreezeRequest, options?: TransportRequestOptions) => Promise> + updateAliases: (params?: T.IndicesUpdateAliasesRequest| TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions) => Promise> + validateQuery: (params?: T.IndicesValidateQueryRequest| TB.IndicesValidateQueryRequest, options?: TransportRequestOptions) => Promise> + } + info: (params?: T.InfoRequest| TB.InfoRequest, options?: TransportRequestOptions) => Promise> + ingest: { + deletePipeline: (params: T.IngestDeletePipelineRequest| TB.IngestDeletePipelineRequest, options?: TransportRequestOptions) => Promise> + geoIpStats: (params?: T.IngestGeoIpStatsRequest| TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions) => Promise> + getPipeline: (params?: T.IngestGetPipelineRequest| TB.IngestGetPipelineRequest, options?: TransportRequestOptions) => Promise> + processorGrok: (params?: T.IngestProcessorGrokRequest| TB.IngestProcessorGrokRequest, options?: TransportRequestOptions) => Promise> + putPipeline: (params: T.IngestPutPipelineRequest| TB.IngestPutPipelineRequest, options?: TransportRequestOptions) => Promise> + simulate: (params?: T.IngestSimulateRequest| TB.IngestSimulateRequest, options?: TransportRequestOptions) => Promise> + } + license: { + delete: (params?: T.LicenseDeleteRequest| TB.LicenseDeleteRequest, options?: TransportRequestOptions) => Promise> + get: (params?: T.LicenseGetRequest| TB.LicenseGetRequest, options?: TransportRequestOptions) => Promise> + getBasicStatus: (params?: T.LicenseGetBasicStatusRequest| TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions) => Promise> + getTrialStatus: (params?: T.LicenseGetTrialStatusRequest| TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions) => Promise> + post: (params?: T.LicensePostRequest| TB.LicensePostRequest, options?: TransportRequestOptions) => Promise> + postStartBasic: (params?: T.LicensePostStartBasicRequest| TB.LicensePostStartBasicRequest, options?: TransportRequestOptions) => Promise> + postStartTrial: (params?: T.LicensePostStartTrialRequest| TB.LicensePostStartTrialRequest, options?: TransportRequestOptions) => Promise> + } + logstash: { + deletePipeline: (params: T.LogstashDeletePipelineRequest| TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions) => Promise> + getPipeline: (params: T.LogstashGetPipelineRequest| TB.LogstashGetPipelineRequest, options?: TransportRequestOptions) => Promise> + putPipeline: (params: T.LogstashPutPipelineRequest| TB.LogstashPutPipelineRequest, options?: TransportRequestOptions) => Promise> + } + mget: (params?: T.MgetRequest| TB.MgetRequest, options?: TransportRequestOptions) => Promise, TContext>> + migration: { + deprecations: (params?: T.MigrationDeprecationsRequest| TB.MigrationDeprecationsRequest, options?: TransportRequestOptions) => Promise> + } + ml: { + closeJob: (params: T.MlCloseJobRequest| TB.MlCloseJobRequest, options?: TransportRequestOptions) => Promise> + deleteCalendar: (params: T.MlDeleteCalendarRequest| TB.MlDeleteCalendarRequest, options?: TransportRequestOptions) => Promise> + deleteCalendarEvent: (params: T.MlDeleteCalendarEventRequest| TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions) => Promise> + deleteCalendarJob: (params: T.MlDeleteCalendarJobRequest| TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions) => Promise> + deleteDataFrameAnalytics: (params: T.MlDeleteDataFrameAnalyticsRequest| TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + deleteDatafeed: (params: T.MlDeleteDatafeedRequest| TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions) => Promise> + deleteExpiredData: (params?: T.MlDeleteExpiredDataRequest| TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions) => Promise> + deleteFilter: (params: T.MlDeleteFilterRequest| TB.MlDeleteFilterRequest, options?: TransportRequestOptions) => Promise> + deleteForecast: (params: T.MlDeleteForecastRequest| TB.MlDeleteForecastRequest, options?: TransportRequestOptions) => Promise> + deleteJob: (params: T.MlDeleteJobRequest| TB.MlDeleteJobRequest, options?: TransportRequestOptions) => Promise> + deleteModelSnapshot: (params: T.MlDeleteModelSnapshotRequest| TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions) => Promise> + deleteTrainedModel: (params: T.MlDeleteTrainedModelRequest| TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions) => Promise> + deleteTrainedModelAlias: (params: T.MlDeleteTrainedModelAliasRequest| TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise> + estimateModelMemory: (params?: T.MlEstimateModelMemoryRequest| TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions) => Promise> + evaluateDataFrame: (params?: T.MlEvaluateDataFrameRequest| TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions) => Promise> + explainDataFrameAnalytics: (params?: T.MlExplainDataFrameAnalyticsRequest| TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + flushJob: (params: T.MlFlushJobRequest| TB.MlFlushJobRequest, options?: TransportRequestOptions) => Promise> + forecast: (params: T.MlForecastRequest| TB.MlForecastRequest, options?: TransportRequestOptions) => Promise> + getBuckets: (params: T.MlGetBucketsRequest| TB.MlGetBucketsRequest, options?: TransportRequestOptions) => Promise> + getCalendarEvents: (params: T.MlGetCalendarEventsRequest| TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions) => Promise> + getCalendars: (params?: T.MlGetCalendarsRequest| TB.MlGetCalendarsRequest, options?: TransportRequestOptions) => Promise> + getCategories: (params: T.MlGetCategoriesRequest| TB.MlGetCategoriesRequest, options?: TransportRequestOptions) => Promise> + getDataFrameAnalytics: (params?: T.MlGetDataFrameAnalyticsRequest| TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + getDataFrameAnalyticsStats: (params?: T.MlGetDataFrameAnalyticsStatsRequest| TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions) => Promise> + getDatafeedStats: (params?: T.MlGetDatafeedStatsRequest| TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions) => Promise> + getDatafeeds: (params?: T.MlGetDatafeedsRequest| TB.MlGetDatafeedsRequest, options?: TransportRequestOptions) => Promise> + getFilters: (params?: T.MlGetFiltersRequest| TB.MlGetFiltersRequest, options?: TransportRequestOptions) => Promise> + getInfluencers: (params: T.MlGetInfluencersRequest| TB.MlGetInfluencersRequest, options?: TransportRequestOptions) => Promise> + getJobStats: (params?: T.MlGetJobStatsRequest| TB.MlGetJobStatsRequest, options?: TransportRequestOptions) => Promise> + getJobs: (params?: T.MlGetJobsRequest| TB.MlGetJobsRequest, options?: TransportRequestOptions) => Promise> + getModelSnapshots: (params: T.MlGetModelSnapshotsRequest| TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions) => Promise> + getOverallBuckets: (params: T.MlGetOverallBucketsRequest| TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions) => Promise> + getRecords: (params: T.MlGetRecordsRequest| TB.MlGetRecordsRequest, options?: TransportRequestOptions) => Promise> + getTrainedModelDeploymentStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getTrainedModels: (params?: T.MlGetTrainedModelsRequest| TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions) => Promise> + getTrainedModelsStats: (params?: T.MlGetTrainedModelsStatsRequest| TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions) => Promise> + inferTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + info: (params?: T.MlInfoRequest| TB.MlInfoRequest, options?: TransportRequestOptions) => Promise> + openJob: (params: T.MlOpenJobRequest| TB.MlOpenJobRequest, options?: TransportRequestOptions) => Promise> + postCalendarEvents: (params: T.MlPostCalendarEventsRequest| TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions) => Promise> + postData: (params: T.MlPostDataRequest| TB.MlPostDataRequest, options?: TransportRequestOptions) => Promise> + previewDataFrameAnalytics: (params?: T.MlPreviewDataFrameAnalyticsRequest| TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + previewDatafeed: (params?: T.MlPreviewDatafeedRequest| TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions) => Promise, TContext>> + putCalendar: (params: T.MlPutCalendarRequest| TB.MlPutCalendarRequest, options?: TransportRequestOptions) => Promise> + putCalendarJob: (params: T.MlPutCalendarJobRequest| TB.MlPutCalendarJobRequest, options?: TransportRequestOptions) => Promise> + putDataFrameAnalytics: (params: T.MlPutDataFrameAnalyticsRequest| TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + putDatafeed: (params: T.MlPutDatafeedRequest| TB.MlPutDatafeedRequest, options?: TransportRequestOptions) => Promise> + putFilter: (params: T.MlPutFilterRequest| TB.MlPutFilterRequest, options?: TransportRequestOptions) => Promise> + putJob: (params: T.MlPutJobRequest| TB.MlPutJobRequest, options?: TransportRequestOptions) => Promise> + putTrainedModel: (params: T.MlPutTrainedModelRequest| TB.MlPutTrainedModelRequest, options?: TransportRequestOptions) => Promise> + putTrainedModelAlias: (params: T.MlPutTrainedModelAliasRequest| TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise> + putTrainedModelDefinitionPart: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + putTrainedModelVocabulary: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + resetJob: (params: T.MlResetJobRequest| TB.MlResetJobRequest, options?: TransportRequestOptions) => Promise> + revertModelSnapshot: (params: T.MlRevertModelSnapshotRequest| TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions) => Promise> + setUpgradeMode: (params?: T.MlSetUpgradeModeRequest| TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions) => Promise> + startDataFrameAnalytics: (params: T.MlStartDataFrameAnalyticsRequest| TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + startDatafeed: (params: T.MlStartDatafeedRequest| TB.MlStartDatafeedRequest, options?: TransportRequestOptions) => Promise> + startTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + stopDataFrameAnalytics: (params: T.MlStopDataFrameAnalyticsRequest| TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + stopDatafeed: (params: T.MlStopDatafeedRequest| TB.MlStopDatafeedRequest, options?: TransportRequestOptions) => Promise> + stopTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + updateDataFrameAnalytics: (params: T.MlUpdateDataFrameAnalyticsRequest| TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> + updateDatafeed: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + updateFilter: (params: T.MlUpdateFilterRequest| TB.MlUpdateFilterRequest, options?: TransportRequestOptions) => Promise> + updateJob: (params: T.MlUpdateJobRequest| TB.MlUpdateJobRequest, options?: TransportRequestOptions) => Promise> + updateModelSnapshot: (params: T.MlUpdateModelSnapshotRequest| TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions) => Promise> + upgradeJobSnapshot: (params: T.MlUpgradeJobSnapshotRequest| TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions) => Promise> + validate: (params?: T.MlValidateRequest| TB.MlValidateRequest, options?: TransportRequestOptions) => Promise> + validateDetector: (params?: T.MlValidateDetectorRequest| TB.MlValidateDetectorRequest, options?: TransportRequestOptions) => Promise> + } + monitoring: { + bulk: (params: T.MonitoringBulkRequest| TB.MonitoringBulkRequest, options?: TransportRequestOptions) => Promise> + } + msearch: (params?: T.MsearchRequest| TB.MsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + msearchTemplate: (params?: T.MsearchTemplateRequest| TB.MsearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> + mtermvectors: (params?: T.MtermvectorsRequest| TB.MtermvectorsRequest, options?: TransportRequestOptions) => Promise> + nodes: { + clearRepositoriesMeteringArchive: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getRepositoriesMeteringInfo: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + hotThreads: (params?: T.NodesHotThreadsRequest| TB.NodesHotThreadsRequest, options?: TransportRequestOptions) => Promise> + info: (params?: T.NodesInfoRequest| TB.NodesInfoRequest, options?: TransportRequestOptions) => Promise> + reloadSecureSettings: (params?: T.NodesReloadSecureSettingsRequest| TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions) => Promise> + stats: (params?: T.NodesStatsRequest| TB.NodesStatsRequest, options?: TransportRequestOptions) => Promise> + usage: (params?: T.NodesUsageRequest| TB.NodesUsageRequest, options?: TransportRequestOptions) => Promise> + } + openPointInTime: (params: T.OpenPointInTimeRequest| TB.OpenPointInTimeRequest, options?: TransportRequestOptions) => Promise> + ping: (params?: T.PingRequest| TB.PingRequest, options?: TransportRequestOptions) => Promise> + putScript: (params: T.PutScriptRequest| TB.PutScriptRequest, options?: TransportRequestOptions) => Promise> + rankEval: (params: T.RankEvalRequest| TB.RankEvalRequest, options?: TransportRequestOptions) => Promise> + reindex: (params?: T.ReindexRequest| TB.ReindexRequest, options?: TransportRequestOptions) => Promise> + reindexRethrottle: (params: T.ReindexRethrottleRequest| TB.ReindexRethrottleRequest, options?: TransportRequestOptions) => Promise> + renderSearchTemplate: (params?: T.RenderSearchTemplateRequest| TB.RenderSearchTemplateRequest, options?: TransportRequestOptions) => Promise> + rollup: { + deleteJob: (params: T.RollupDeleteJobRequest| TB.RollupDeleteJobRequest, options?: TransportRequestOptions) => Promise> + getJobs: (params?: T.RollupGetJobsRequest| TB.RollupGetJobsRequest, options?: TransportRequestOptions) => Promise> + getRollupCaps: (params?: T.RollupGetRollupCapsRequest| TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions) => Promise> + getRollupIndexCaps: (params: T.RollupGetRollupIndexCapsRequest| TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions) => Promise> + putJob: (params: T.RollupPutJobRequest| TB.RollupPutJobRequest, options?: TransportRequestOptions) => Promise> + rollup: (params: T.RollupRollupRequest| TB.RollupRollupRequest, options?: TransportRequestOptions) => Promise> + rollupSearch: (params: T.RollupRollupSearchRequest| TB.RollupRollupSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + startJob: (params: T.RollupStartJobRequest| TB.RollupStartJobRequest, options?: TransportRequestOptions) => Promise> + stopJob: (params: T.RollupStopJobRequest| TB.RollupStopJobRequest, options?: TransportRequestOptions) => Promise> + } + scriptsPainlessExecute: (params?: T.ScriptsPainlessExecuteRequest| TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions) => Promise, TContext>> + scroll: (params?: T.ScrollRequest| TB.ScrollRequest, options?: TransportRequestOptions) => Promise, TContext>> + search: (params?: T.SearchRequest| TB.SearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + searchMvt: (params: T.SearchMvtRequest| TB.SearchMvtRequest, options?: TransportRequestOptions) => Promise> + searchShards: (params?: T.SearchShardsRequest| TB.SearchShardsRequest, options?: TransportRequestOptions) => Promise> + searchTemplate: (params?: T.SearchTemplateRequest| TB.SearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> + searchableSnapshots: { + cacheStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + clearCache: (params?: T.SearchableSnapshotsClearCacheRequest| TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions) => Promise> + mount: (params: T.SearchableSnapshotsMountRequest| TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions) => Promise> + stats: (params?: T.SearchableSnapshotsStatsRequest| TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions) => Promise> + } + security: { + authenticate: (params?: T.SecurityAuthenticateRequest| TB.SecurityAuthenticateRequest, options?: TransportRequestOptions) => Promise> + changePassword: (params?: T.SecurityChangePasswordRequest| TB.SecurityChangePasswordRequest, options?: TransportRequestOptions) => Promise> + clearApiKeyCache: (params: T.SecurityClearApiKeyCacheRequest| TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions) => Promise> + clearCachedPrivileges: (params: T.SecurityClearCachedPrivilegesRequest| TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions) => Promise> + clearCachedRealms: (params: T.SecurityClearCachedRealmsRequest| TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions) => Promise> + clearCachedRoles: (params: T.SecurityClearCachedRolesRequest| TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions) => Promise> + clearCachedServiceTokens: (params: T.SecurityClearCachedServiceTokensRequest| TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions) => Promise> + createApiKey: (params?: T.SecurityCreateApiKeyRequest| TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions) => Promise> + createServiceToken: (params: T.SecurityCreateServiceTokenRequest| TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions) => Promise> + deletePrivileges: (params: T.SecurityDeletePrivilegesRequest| TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions) => Promise> + deleteRole: (params: T.SecurityDeleteRoleRequest| TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions) => Promise> + deleteRoleMapping: (params: T.SecurityDeleteRoleMappingRequest| TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions) => Promise> + deleteServiceToken: (params: T.SecurityDeleteServiceTokenRequest| TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions) => Promise> + deleteUser: (params: T.SecurityDeleteUserRequest| TB.SecurityDeleteUserRequest, options?: TransportRequestOptions) => Promise> + disableUser: (params: T.SecurityDisableUserRequest| TB.SecurityDisableUserRequest, options?: TransportRequestOptions) => Promise> + enableUser: (params: T.SecurityEnableUserRequest| TB.SecurityEnableUserRequest, options?: TransportRequestOptions) => Promise> + enrollKibana: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + enrollNode: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getApiKey: (params?: T.SecurityGetApiKeyRequest| TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions) => Promise> + getBuiltinPrivileges: (params?: T.SecurityGetBuiltinPrivilegesRequest| TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions) => Promise> + getPrivileges: (params?: T.SecurityGetPrivilegesRequest| TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions) => Promise> + getRole: (params?: T.SecurityGetRoleRequest| TB.SecurityGetRoleRequest, options?: TransportRequestOptions) => Promise> + getRoleMapping: (params?: T.SecurityGetRoleMappingRequest| TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions) => Promise> + getServiceAccounts: (params?: T.SecurityGetServiceAccountsRequest| TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions) => Promise> + getServiceCredentials: (params: T.SecurityGetServiceCredentialsRequest| TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions) => Promise> + getToken: (params?: T.SecurityGetTokenRequest| TB.SecurityGetTokenRequest, options?: TransportRequestOptions) => Promise> + getUser: (params?: T.SecurityGetUserRequest| TB.SecurityGetUserRequest, options?: TransportRequestOptions) => Promise> + getUserPrivileges: (params?: T.SecurityGetUserPrivilegesRequest| TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions) => Promise> + grantApiKey: (params?: T.SecurityGrantApiKeyRequest| TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions) => Promise> + hasPrivileges: (params?: T.SecurityHasPrivilegesRequest| TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions) => Promise> + invalidateApiKey: (params?: T.SecurityInvalidateApiKeyRequest| TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions) => Promise> + invalidateToken: (params?: T.SecurityInvalidateTokenRequest| TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions) => Promise> + putPrivileges: (params?: T.SecurityPutPrivilegesRequest| TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions) => Promise> + putRole: (params: T.SecurityPutRoleRequest| TB.SecurityPutRoleRequest, options?: TransportRequestOptions) => Promise> + putRoleMapping: (params: T.SecurityPutRoleMappingRequest| TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions) => Promise> + putUser: (params: T.SecurityPutUserRequest| TB.SecurityPutUserRequest, options?: TransportRequestOptions) => Promise> + queryApiKeys: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + samlAuthenticate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + samlCompleteLogout: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + samlInvalidate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + samlLogout: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + samlPrepareAuthentication: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + samlServiceProviderMetadata: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + } + shutdown: { + deleteNode: (params: T.ShutdownDeleteNodeRequest| TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions) => Promise> + getNode: (params?: T.ShutdownGetNodeRequest| TB.ShutdownGetNodeRequest, options?: TransportRequestOptions) => Promise> + putNode: (params: T.ShutdownPutNodeRequest| TB.ShutdownPutNodeRequest, options?: TransportRequestOptions) => Promise> + } + slm: { + deleteLifecycle: (params: T.SlmDeleteLifecycleRequest| TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions) => Promise> + executeLifecycle: (params: T.SlmExecuteLifecycleRequest| TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions) => Promise> + executeRetention: (params?: T.SlmExecuteRetentionRequest| TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions) => Promise> + getLifecycle: (params?: T.SlmGetLifecycleRequest| TB.SlmGetLifecycleRequest, options?: TransportRequestOptions) => Promise> + getStats: (params?: T.SlmGetStatsRequest| TB.SlmGetStatsRequest, options?: TransportRequestOptions) => Promise> + getStatus: (params?: T.SlmGetStatusRequest| TB.SlmGetStatusRequest, options?: TransportRequestOptions) => Promise> + putLifecycle: (params: T.SlmPutLifecycleRequest| TB.SlmPutLifecycleRequest, options?: TransportRequestOptions) => Promise> + start: (params?: T.SlmStartRequest| TB.SlmStartRequest, options?: TransportRequestOptions) => Promise> + stop: (params?: T.SlmStopRequest| TB.SlmStopRequest, options?: TransportRequestOptions) => Promise> + } + snapshot: { + cleanupRepository: (params: T.SnapshotCleanupRepositoryRequest| TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions) => Promise> + clone: (params: T.SnapshotCloneRequest| TB.SnapshotCloneRequest, options?: TransportRequestOptions) => Promise> + create: (params: T.SnapshotCreateRequest| TB.SnapshotCreateRequest, options?: TransportRequestOptions) => Promise> + createRepository: (params: T.SnapshotCreateRepositoryRequest| TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions) => Promise> + delete: (params: T.SnapshotDeleteRequest| TB.SnapshotDeleteRequest, options?: TransportRequestOptions) => Promise> + deleteRepository: (params: T.SnapshotDeleteRepositoryRequest| TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions) => Promise> + get: (params: T.SnapshotGetRequest| TB.SnapshotGetRequest, options?: TransportRequestOptions) => Promise> + getRepository: (params?: T.SnapshotGetRepositoryRequest| TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions) => Promise> + repositoryAnalyze: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + restore: (params: T.SnapshotRestoreRequest| TB.SnapshotRestoreRequest, options?: TransportRequestOptions) => Promise> + status: (params?: T.SnapshotStatusRequest| TB.SnapshotStatusRequest, options?: TransportRequestOptions) => Promise> + verifyRepository: (params: T.SnapshotVerifyRepositoryRequest| TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions) => Promise> + } + sql: { + clearCursor: (params?: T.SqlClearCursorRequest| TB.SqlClearCursorRequest, options?: TransportRequestOptions) => Promise> + deleteAsync: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getAsync: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getAsyncStatus: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + query: (params?: T.SqlQueryRequest| TB.SqlQueryRequest, options?: TransportRequestOptions) => Promise> + translate: (params?: T.SqlTranslateRequest| TB.SqlTranslateRequest, options?: TransportRequestOptions) => Promise> + } + ssl: { + certificates: (params?: T.SslCertificatesRequest| TB.SslCertificatesRequest, options?: TransportRequestOptions) => Promise> + } + tasks: { + cancel: (params?: T.TasksCancelRequest| TB.TasksCancelRequest, options?: TransportRequestOptions) => Promise> + get: (params: T.TasksGetRequest| TB.TasksGetRequest, options?: TransportRequestOptions) => Promise> + list: (params?: T.TasksListRequest| TB.TasksListRequest, options?: TransportRequestOptions) => Promise> + } + termsEnum: (params: T.TermsEnumRequest| TB.TermsEnumRequest, options?: TransportRequestOptions) => Promise> + termvectors: (params: T.TermvectorsRequest| TB.TermvectorsRequest, options?: TransportRequestOptions) => Promise> + textStructure: { + findStructure: (params: T.TextStructureFindStructureRequest| TB.TextStructureFindStructureRequest, options?: TransportRequestOptions) => Promise> + } + transform: { + deleteTransform: (params: T.TransformDeleteTransformRequest| TB.TransformDeleteTransformRequest, options?: TransportRequestOptions) => Promise> + getTransform: (params?: T.TransformGetTransformRequest| TB.TransformGetTransformRequest, options?: TransportRequestOptions) => Promise> + getTransformStats: (params: T.TransformGetTransformStatsRequest| TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions) => Promise> + previewTransform: (params?: T.TransformPreviewTransformRequest| TB.TransformPreviewTransformRequest, options?: TransportRequestOptions) => Promise, TContext>> + putTransform: (params: T.TransformPutTransformRequest| TB.TransformPutTransformRequest, options?: TransportRequestOptions) => Promise> + startTransform: (params: T.TransformStartTransformRequest| TB.TransformStartTransformRequest, options?: TransportRequestOptions) => Promise> + stopTransform: (params: T.TransformStopTransformRequest| TB.TransformStopTransformRequest, options?: TransportRequestOptions) => Promise> + updateTransform: (params?: T.TransformUpdateTransformRequest| TB.TransformUpdateTransformRequest, options?: TransportRequestOptions) => Promise> + } + update: (params: T.UpdateRequest| TB.UpdateRequest, options?: TransportRequestOptions) => Promise, TContext>> + updateByQuery: (params: T.UpdateByQueryRequest| TB.UpdateByQueryRequest, options?: TransportRequestOptions) => Promise> + updateByQueryRethrottle: (params: T.UpdateByQueryRethrottleRequest| TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions) => Promise> + watcher: { + ackWatch: (params: T.WatcherAckWatchRequest| TB.WatcherAckWatchRequest, options?: TransportRequestOptions) => Promise> + activateWatch: (params: T.WatcherActivateWatchRequest| TB.WatcherActivateWatchRequest, options?: TransportRequestOptions) => Promise> + deactivateWatch: (params: T.WatcherDeactivateWatchRequest| TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions) => Promise> + deleteWatch: (params: T.WatcherDeleteWatchRequest| TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions) => Promise> + executeWatch: (params?: T.WatcherExecuteWatchRequest| TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions) => Promise> + getWatch: (params: T.WatcherGetWatchRequest| TB.WatcherGetWatchRequest, options?: TransportRequestOptions) => Promise> + putWatch: (params: T.WatcherPutWatchRequest| TB.WatcherPutWatchRequest, options?: TransportRequestOptions) => Promise> + queryWatches: (params?: T.WatcherQueryWatchesRequest| TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions) => Promise> + start: (params?: T.WatcherStartRequest| TB.WatcherStartRequest, options?: TransportRequestOptions) => Promise> + stats: (params?: T.WatcherStatsRequest| TB.WatcherStatsRequest, options?: TransportRequestOptions) => Promise> + stop: (params?: T.WatcherStopRequest| TB.WatcherStopRequest, options?: TransportRequestOptions) => Promise> + } + xpack: { + info: (params?: T.XpackInfoRequest| TB.XpackInfoRequest, options?: TransportRequestOptions) => Promise> + usage: (params?: T.XpackUsageRequest| TB.XpackUsageRequest, options?: TransportRequestOptions) => Promise> + } +} + +export type { KibanaClient } diff --git a/src/api/types.ts b/src/api/types.ts new file mode 100644 index 000000000..12ea36bd0 --- /dev/null +++ b/src/api/types.ts @@ -0,0 +1,15456 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable @typescript-eslint/array-type */ +/* eslint-disable @typescript-eslint/no-empty-interface */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +/** + * We are still working on this type, it will arrive soon. + * If it's critical for you, please open an issue. + * https://github.com/elastic/elasticsearch-js + */ +export type TODO = Record + +export interface BulkCreateOperation extends BulkOperation { +} + +export interface BulkCreateResponseItem extends BulkResponseItemBase { +} + +export interface BulkDeleteOperation extends BulkOperation { +} + +export interface BulkDeleteResponseItem extends BulkResponseItemBase { +} + +export interface BulkIndexOperation extends BulkOperation { +} + +export interface BulkIndexResponseItem extends BulkResponseItemBase { +} + +export interface BulkOperation { + _id?: Id + _index?: IndexName + retry_on_conflict?: integer + routing?: Routing + version?: VersionNumber + version_type?: VersionType +} + +export interface BulkOperationContainer { + index?: BulkIndexOperation + create?: BulkCreateOperation + update?: BulkUpdateOperation + delete?: BulkDeleteOperation +} + +export interface BulkRequest extends RequestBase { + index?: IndexName + type?: Type + pipeline?: string + refresh?: Refresh + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + require_alias?: boolean + operations?: (BulkOperationContainer | TSource)[] +} + +export interface BulkResponse { + errors: boolean + items: BulkResponseItemContainer[] + took: long + ingest_took?: long +} + +export interface BulkResponseItemBase { + _id?: string | null + _index: string + status: integer + error?: ErrorCause + _primary_term?: long + result?: string + _seq_no?: SequenceNumber + _shards?: ShardStatistics + _type?: string + _version?: VersionNumber + forced_refresh?: boolean + get?: InlineGet> +} + +export interface BulkResponseItemContainer { + index?: BulkIndexResponseItem + create?: BulkCreateResponseItem + update?: BulkUpdateResponseItem + delete?: BulkDeleteResponseItem +} + +export interface BulkUpdateOperation extends BulkOperation { +} + +export interface BulkUpdateResponseItem extends BulkResponseItemBase { +} + +export interface ClearScrollRequest extends RequestBase { + scroll_id?: Ids +} + +export interface ClearScrollResponse { + succeeded: boolean + num_freed: integer +} + +export interface ClosePointInTimeRequest extends RequestBase { + id: Id +} + +export interface ClosePointInTimeResponse { + succeeded: boolean + num_freed: integer +} + +export interface CountRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: DefaultOperator + df?: string + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + min_score?: double + preference?: string + routing?: Routing + terminate_after?: long + q?: string + query?: QueryDslQueryContainer +} + +export interface CountResponse { + count: long + _shards: ShardStatistics +} + +export interface CreateRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + pipeline?: string + refresh?: Refresh + routing?: Routing + timeout?: Time + version?: VersionNumber + version_type?: VersionType + wait_for_active_shards?: WaitForActiveShards + document?: TDocument +} + +export interface CreateResponse extends WriteResponseBase { +} + +export interface DeleteRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + if_primary_term?: long + if_seq_no?: SequenceNumber + refresh?: Refresh + routing?: Routing + timeout?: Time + version?: VersionNumber + version_type?: VersionType + wait_for_active_shards?: WaitForActiveShards +} + +export interface DeleteResponse extends WriteResponseBase { +} + +export interface DeleteByQueryRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + conflicts?: Conflicts + default_operator?: DefaultOperator + df?: string + expand_wildcards?: ExpandWildcards + from?: long + ignore_unavailable?: boolean + lenient?: boolean + max_docs?: long + preference?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: long + routing?: Routing + q?: string + scroll?: Time + scroll_size?: long + search_timeout?: Time + search_type?: SearchType + size?: long + slices?: long + sort?: string[] + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stats?: string[] + terminate_after?: long + timeout?: Time + version?: boolean + wait_for_active_shards?: WaitForActiveShards + wait_for_completion?: boolean + query?: QueryDslQueryContainer + slice?: SlicedScroll +} + +export interface DeleteByQueryResponse { + batches?: long + deleted?: long + failures?: BulkIndexByScrollFailure[] + noops?: long + requests_per_second?: float + retries?: Retries + slice_id?: integer + task?: TaskId + throttled_millis?: long + throttled_until_millis?: long + timed_out?: boolean + took?: long + total?: long + version_conflicts?: long +} + +export interface DeleteByQueryRethrottleRequest extends RequestBase { + task_id: Id + requests_per_second?: long +} + +export interface DeleteByQueryRethrottleResponse extends TasksListResponse { +} + +export interface DeleteScriptRequest extends RequestBase { + id: Id + master_timeout?: Time + timeout?: Time +} + +export interface DeleteScriptResponse extends AcknowledgedResponseBase { +} + +export interface ExistsRequest extends RequestBase { + id: Id + index: IndexName + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + version?: VersionNumber + version_type?: VersionType +} + +export type ExistsResponse = boolean + +export interface ExistsSourceRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + version?: VersionNumber + version_type?: VersionType +} + +export type ExistsSourceResponse = boolean + +export interface ExplainExplanation { + description: string + details: ExplainExplanationDetail[] + value: float +} + +export interface ExplainExplanationDetail { + description: string + details?: ExplainExplanationDetail[] + value: float +} + +export interface ExplainRequest extends RequestBase { + id: Id + index: IndexName + analyzer?: string + analyze_wildcard?: boolean + default_operator?: DefaultOperator + df?: string + lenient?: boolean + preference?: string + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + q?: string + query?: QueryDslQueryContainer +} + +export interface ExplainResponse { + _index: IndexName + _type?: Type + _id: Id + matched: boolean + explanation?: ExplainExplanationDetail + get?: InlineGet +} + +export interface FieldCapsFieldCapability { + aggregatable: boolean + indices?: Indices + meta?: Record + non_aggregatable_indices?: Indices + non_searchable_indices?: Indices + searchable: boolean + type: string + metadata_field?: boolean +} + +export interface FieldCapsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + fields?: Fields + ignore_unavailable?: boolean + include_unmapped?: boolean + index_filter?: QueryDslQueryContainer + runtime_mappings?: MappingRuntimeFields +} + +export interface FieldCapsResponse { + indices: Indices + fields: Record> +} + +export interface GetRequest extends RequestBase { + id: Id + index: IndexName + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + version?: VersionNumber + version_type?: VersionType +} + +export interface GetResponse { + _index: IndexName + fields?: Record + found: boolean + _id: Id + _primary_term?: long + _routing?: string + _seq_no?: SequenceNumber + _source?: TDocument + _type?: Type + _version?: VersionNumber +} + +export interface GetScriptRequest extends RequestBase { + id: Id + master_timeout?: Time +} + +export interface GetScriptResponse { + _id: Id + found: boolean + script?: StoredScript +} + +export interface GetScriptContextContext { + methods: GetScriptContextContextMethod[] + name: Name +} + +export interface GetScriptContextContextMethod { + name: Name + return_type: string + params: GetScriptContextContextMethodParam[] +} + +export interface GetScriptContextContextMethodParam { + name: Name + type: string +} + +export interface GetScriptContextRequest extends RequestBase { +} + +export interface GetScriptContextResponse { + contexts: GetScriptContextContext[] +} + +export interface GetScriptLanguagesLanguageContext { + contexts: string[] + language: ScriptLanguage +} + +export interface GetScriptLanguagesRequest extends RequestBase { +} + +export interface GetScriptLanguagesResponse { + language_contexts: GetScriptLanguagesLanguageContext[] + types_allowed: string[] +} + +export interface GetSourceRequest extends GetRequest { +} + +export type GetSourceResponse = TDocument + +export interface IndexRequest extends RequestBase { + id?: Id + index: IndexName + if_primary_term?: long + if_seq_no?: SequenceNumber + op_type?: OpType + pipeline?: string + refresh?: Refresh + routing?: Routing + timeout?: Time + version?: VersionNumber + version_type?: VersionType + wait_for_active_shards?: WaitForActiveShards + require_alias?: boolean + document?: TDocument +} + +export interface IndexResponse extends WriteResponseBase { +} + +export interface InfoRequest extends RequestBase { +} + +export interface InfoResponse { + cluster_name: Name + cluster_uuid: Uuid + name: Name + tagline: string + version: ElasticsearchVersionInfo +} + +export interface MgetHit { + error?: MainError + fields?: Record + found?: boolean + _id: Id + _index: IndexName + _primary_term?: long + _routing?: Routing + _seq_no?: SequenceNumber + _source?: TDocument + _type?: Type + _version?: VersionNumber +} + +export type MgetMultiGetId = string | integer + +export interface MgetOperation { + _id: MgetMultiGetId + _index?: IndexName + routing?: Routing + _source?: boolean | Fields | SearchSourceFilter + stored_fields?: Fields + _type?: Type + version?: VersionNumber + version_type?: VersionType +} + +export interface MgetRequest extends RequestBase { + index?: IndexName + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + docs?: MgetOperation[] + ids?: MgetMultiGetId[] +} + +export interface MgetResponse { + docs: MgetHit[] +} + +export interface MsearchBody { + aggregations?: Record + aggs?: Record + query?: QueryDslQueryContainer + from?: integer + size?: integer + pit?: SearchPointInTimeReference + track_total_hits?: boolean | integer + suggest?: SearchSuggestContainer | Record +} + +export interface MsearchHeader { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + index?: Indices + preference?: string + request_cache?: boolean + routing?: string + search_type?: SearchType +} + +export interface MsearchRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + max_concurrent_searches?: long + max_concurrent_shard_requests?: long + pre_filter_shard_size?: long + search_type?: SearchType + rest_total_hits_as_int?: boolean + typed_keys?: boolean + searches?: (MsearchHeader | MsearchBody)[] +} + +export interface MsearchResponse { + took: long + responses: (MsearchSearchResult | ErrorResponseBase)[] +} + +export interface MsearchSearchResult extends SearchResponse { + status: integer +} + +export interface MsearchTemplateRequest extends RequestBase { + index?: Indices + ccs_minimize_roundtrips?: boolean + max_concurrent_searches?: long + search_type?: SearchType + rest_total_hits_as_int?: boolean + typed_keys?: boolean + search_templates?: MsearchTemplateTemplateItem[] +} + +export interface MsearchTemplateResponse { + responses: (SearchResponse | ErrorResponseBase)[] + took: long +} + +export interface MsearchTemplateTemplateItem { + id?: Id + index?: Indices + params?: Record + source?: string +} + +export interface MtermvectorsOperation { + _id: Id + _index?: IndexName + doc?: object + fields?: Fields + field_statistics?: boolean + filter?: TermvectorsFilter + offsets?: boolean + payloads?: boolean + positions?: boolean + routing?: Routing + term_statistics?: boolean + version?: VersionNumber + version_type?: VersionType +} + +export interface MtermvectorsRequest extends RequestBase { + index?: IndexName + ids?: Id[] + fields?: Fields + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: Routing + term_statistics?: boolean + version?: VersionNumber + version_type?: VersionType + docs?: MtermvectorsOperation[] +} + +export interface MtermvectorsResponse { + docs: MtermvectorsTermVectorsResult[] +} + +export interface MtermvectorsTermVectorsResult { + _id: Id + _index: IndexName + _version?: VersionNumber + took?: long + found?: boolean + term_vectors?: Record + error?: ErrorCause +} + +export interface OpenPointInTimeRequest extends RequestBase { + index: Indices + keep_alive?: Time +} + +export interface OpenPointInTimeResponse { + id: Id +} + +export interface PingRequest extends RequestBase { +} + +export type PingResponse = boolean + +export interface PutScriptRequest extends RequestBase { + id: Id + context?: Name + master_timeout?: Time + timeout?: Time + script?: StoredScript +} + +export interface PutScriptResponse extends AcknowledgedResponseBase { +} + +export interface RankEvalDocumentRating { + _id: Id + _index: IndexName + rating: integer +} + +export interface RankEvalRankEvalHit { + _id: Id + _index: IndexName + _type?: Type + _score: double +} + +export interface RankEvalRankEvalHitItem { + hit: RankEvalRankEvalHit + rating?: double +} + +export interface RankEvalRankEvalMetric { + precision?: RankEvalRankEvalMetricPrecision + recall?: RankEvalRankEvalMetricRecall + mean_reciprocal_rank?: RankEvalRankEvalMetricMeanReciprocalRank + dcg?: RankEvalRankEvalMetricDiscountedCumulativeGain + expected_reciprocal_rank?: RankEvalRankEvalMetricExpectedReciprocalRank +} + +export interface RankEvalRankEvalMetricBase { + k?: integer +} + +export interface RankEvalRankEvalMetricDetail { + metric_score: double + unrated_docs: RankEvalUnratedDocument[] + hits: RankEvalRankEvalHitItem[] + metric_details: Record> +} + +export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { + normalize?: boolean +} + +export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { + maximum_relevance: integer +} + +export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEvalMetricRatingTreshold { +} + +export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { + ignore_unlabeled?: boolean +} + +export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { + relevant_rating_threshold?: integer +} + +export interface RankEvalRankEvalMetricRecall extends RankEvalRankEvalMetricRatingTreshold { +} + +export interface RankEvalRankEvalQuery { + query: QueryDslQueryContainer + size?: integer +} + +export interface RankEvalRankEvalRequestItem { + id: Id + request?: RankEvalRankEvalQuery + ratings: RankEvalDocumentRating[] + template_id?: Id + params?: Record +} + +export interface RankEvalRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + search_type?: string + requests: RankEvalRankEvalRequestItem[] + metric?: RankEvalRankEvalMetric +} + +export interface RankEvalResponse { + metric_score: double + details: Record + failures: Record +} + +export interface RankEvalUnratedDocument { + _id: Id + _index: IndexName +} + +export interface ReindexDestination { + index: IndexName + op_type?: OpType + pipeline?: string + routing?: Routing + version_type?: VersionType +} + +export interface ReindexRemoteSource { + connect_timeout: Time + host: Host + username: Username + password: Password + socket_timeout: Time +} + +export interface ReindexRequest extends RequestBase { + refresh?: boolean + requests_per_second?: long + scroll?: Time + slices?: long + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + wait_for_completion?: boolean + require_alias?: boolean + conflicts?: Conflicts + dest?: ReindexDestination + max_docs?: long + script?: Script + size?: long + source?: ReindexSource +} + +export interface ReindexResponse { + batches?: long + created?: long + deleted?: long + failures?: BulkIndexByScrollFailure[] + noops?: long + retries?: Retries + requests_per_second?: long + slice_id?: integer + task?: TaskId + throttled_millis?: EpochMillis + throttled_until_millis?: EpochMillis + timed_out?: boolean + took?: Time + total?: long + updated?: long + version_conflicts?: long +} + +export interface ReindexSource { + index: Indices + query?: QueryDslQueryContainer + remote?: ReindexRemoteSource + size?: integer + slice?: SlicedScroll + sort?: SearchSort + _source?: Fields +} + +export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { + tasks: Record +} + +export interface ReindexRethrottleReindexStatus { + batches: long + created: long + deleted: long + noops: long + requests_per_second: float + retries: Retries + throttled_millis: long + throttled_until_millis: long + total: long + updated: long + version_conflicts: long +} + +export interface ReindexRethrottleReindexTask { + action: string + cancellable: boolean + description: string + id: long + node: Name + running_time_in_nanos: long + start_time_in_millis: long + status: ReindexRethrottleReindexStatus + type: string + headers: HttpHeaders +} + +export interface ReindexRethrottleRequest extends RequestBase { + task_id: Id + requests_per_second?: long +} + +export interface ReindexRethrottleResponse { + nodes: Record +} + +export interface RenderSearchTemplateRequest extends RequestBase { + id?: Id + file?: string + params?: Record + source?: string +} + +export interface RenderSearchTemplateResponse { + template_output: Record +} + +export interface ScriptsPainlessExecutePainlessContextSetup { + document: any + index: IndexName + query: QueryDslQueryContainer +} + +export interface ScriptsPainlessExecutePainlessExecutionPosition { + offset: integer + start: integer + end: integer +} + +export interface ScriptsPainlessExecuteRequest extends RequestBase { + context?: string + context_setup?: ScriptsPainlessExecutePainlessContextSetup + script?: InlineScript +} + +export interface ScriptsPainlessExecuteResponse { + result: TResult +} + +export interface ScrollRequest extends RequestBase { + scroll_id?: Id + scroll?: Time + rest_total_hits_as_int?: boolean +} + +export interface ScrollResponse extends SearchResponse { +} + +export interface SearchRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: long + ccs_minimize_roundtrips?: boolean + default_operator?: DefaultOperator + df?: string + docvalue_fields?: Fields + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: long + min_compatible_shard_node?: VersionString + preference?: string + pre_filter_shard_size?: long + request_cache?: boolean + routing?: Routing + scroll?: Time + search_type?: SearchType + stats?: string[] + stored_fields?: Fields + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + suggest_text?: string + terminate_after?: long + timeout?: Time + track_total_hits?: boolean | integer + track_scores?: boolean + typed_keys?: boolean + rest_total_hits_as_int?: boolean + version?: boolean + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + seq_no_primary_term?: boolean + q?: string + size?: integer + from?: integer + sort?: string | string[] + aggs?: Record + aggregations?: Record + collapse?: SearchFieldCollapse + highlight?: SearchHighlight + indices_boost?: Record[] + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + query?: QueryDslQueryContainer + rescore?: SearchRescore | SearchRescore[] + script_fields?: Record + search_after?: SearchSortResults + slice?: SlicedScroll + fields?: (Field | DateField)[] + suggest?: SearchSuggestContainer | Record + pit?: SearchPointInTimeReference + runtime_mappings?: MappingRuntimeFields +} + +export interface SearchResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + documents?: TDocument[] + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean +} + +export interface SearchAggregationBreakdown { + build_aggregation: long + build_aggregation_count: long + build_leaf_collector: long + build_leaf_collector_count: long + collect: long + collect_count: long + initialize: long + initialize_count: long + post_collection?: long + post_collection_count?: long + reduce: long + reduce_count: long +} + +export interface SearchAggregationProfile { + breakdown: SearchAggregationBreakdown + description: string + time_in_nanos: long + type: string + debug?: SearchAggregationProfileDebug + children?: SearchAggregationProfileDebug[] +} + +export interface SearchAggregationProfileDebug { + [key: string]: never +} + +export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' + +export interface SearchCollector { + name: string + reason: string + time_in_nanos: long + children?: SearchCollector[] +} + +export interface SearchCompletionSuggestOption { + collate_match?: boolean + contexts?: Record + fields?: Record + _id: string + _index: IndexName + _type?: Type + _routing?: Routing + _score: double + _source: TDocument + text: string +} + +export interface SearchCompletionSuggester extends SearchSuggesterBase { + contexts?: Record + fuzzy?: SearchSuggestFuzziness + prefix?: string + regex?: string + skip_duplicates?: boolean +} + +export type SearchContext = string | QueryDslGeoLocation + +export interface SearchDirectGenerator { + field: Field + max_edits?: integer + max_inspections?: float + max_term_freq?: float + min_doc_freq?: float + min_word_length?: integer + post_filter?: string + pre_filter?: string + prefix_length?: integer + size?: integer + suggest_mode?: SuggestMode +} + +export interface SearchDocValueField { + field: Field + format?: string +} + +export interface SearchFieldAndFormat { + field: Field + format?: string + include_unmapped?: boolean +} + +export interface SearchFieldCollapse { + field: Field + inner_hits?: SearchInnerHits | SearchInnerHits[] + max_concurrent_group_searches?: integer +} + +export interface SearchFieldSort { + missing?: AggregationsMissing + mode?: SearchSortMode + nested?: SearchNestedSortValue + order?: SearchSortOrder + unmapped_type?: MappingFieldType +} + +export interface SearchGeoDistanceSortKeys { + mode?: SearchSortMode + distance_type?: GeoDistanceType + order?: SearchSortOrder + unit?: DistanceUnit +} +export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys | +{ [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] } + +export interface SearchHighlight { + fields: Record + type?: SearchHighlighterType + boundary_chars?: string + boundary_max_scan?: integer + boundary_scanner?: SearchBoundaryScanner + boundary_scanner_locale?: string + encoder?: SearchHighlighterEncoder + fragmenter?: SearchHighlighterFragmenter + fragment_offset?: integer + fragment_size?: integer + max_fragment_length?: integer + no_match_size?: integer + number_of_fragments?: integer + order?: SearchHighlighterOrder + post_tags?: string[] + pre_tags?: string[] + require_field_match?: boolean + tags_schema?: SearchHighlighterTagsSchema + highlight_query?: QueryDslQueryContainer + max_analyzed_offset?: string | integer +} + +export interface SearchHighlightField { + boundary_chars?: string + boundary_max_scan?: integer + boundary_scanner?: SearchBoundaryScanner + boundary_scanner_locale?: string + field?: Field + force_source?: boolean + fragmenter?: SearchHighlighterFragmenter + fragment_offset?: integer + fragment_size?: integer + highlight_query?: QueryDslQueryContainer + matched_fields?: Fields + max_fragment_length?: integer + no_match_size?: integer + number_of_fragments?: integer + order?: SearchHighlighterOrder + phrase_limit?: integer + post_tags?: string[] + pre_tags?: string[] + require_field_match?: boolean + tags_schema?: SearchHighlighterTagsSchema + type?: SearchHighlighterType | string +} + +export type SearchHighlighterEncoder = 'default' | 'html' + +export type SearchHighlighterFragmenter = 'simple' | 'span' + +export type SearchHighlighterOrder = 'score' + +export type SearchHighlighterTagsSchema = 'styled' + +export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' + +export interface SearchHit { + _index: IndexName + _id: Id + _score?: double + _type?: Type + _explanation?: ExplainExplanation + fields?: Record + highlight?: Record + inner_hits?: Record + matched_queries?: string[] + _nested?: SearchNestedIdentity + _ignored?: string[] + _shard?: string + _node?: string + _routing?: string + _source?: TDocument + _seq_no?: SequenceNumber + _primary_term?: long + _version?: VersionNumber + sort?: SearchSortResults +} + +export interface SearchHitsMetadata { + total: SearchTotalHits | long + hits: SearchHit[] + max_score?: double +} + +export interface SearchInnerHits { + name?: Name + size?: integer + from?: integer + collapse?: SearchFieldCollapse + docvalue_fields?: (SearchFieldAndFormat | Field)[] + explain?: boolean + highlight?: SearchHighlight + ignore_unmapped?: boolean + script_fields?: Record + seq_no_primary_term?: boolean + fields?: Fields + sort?: SearchSort + _source?: boolean | SearchSourceFilter + stored_field?: Fields + track_scores?: boolean + version?: boolean +} + +export interface SearchInnerHitsMetadata { + total: SearchTotalHits | long + hits: SearchHit>[] + max_score?: double +} + +export interface SearchInnerHitsResult { + hits: SearchInnerHitsMetadata +} + +export interface SearchLaplaceSmoothingModel { + alpha: double +} + +export interface SearchLinearInterpolationSmoothingModel { + bigram_lambda: double + trigram_lambda: double + unigram_lambda: double +} + +export interface SearchNestedIdentity { + field: Field + offset: integer + _nested?: SearchNestedIdentity +} + +export interface SearchNestedSortValue { + filter?: QueryDslQueryContainer + max_children?: integer + path: Field +} + +export interface SearchPhraseSuggestCollate { + params?: Record + prune?: boolean + query: SearchPhraseSuggestCollateQuery +} + +export interface SearchPhraseSuggestCollateQuery { + id?: Id + source?: string +} + +export interface SearchPhraseSuggestHighlight { + post_tag: string + pre_tag: string +} + +export interface SearchPhraseSuggestOption { + text: string + highlighted: string + score: double +} + +export interface SearchPhraseSuggester extends SearchSuggesterBase { + collate?: SearchPhraseSuggestCollate + confidence?: double + direct_generator?: SearchDirectGenerator[] + force_unigrams?: boolean + gram_size?: integer + highlight?: SearchPhraseSuggestHighlight + max_errors?: double + real_word_error_likelihood?: double + separator?: string + shard_size?: integer + smoothing?: SearchSmoothingModelContainer + text?: string + token_limit?: integer +} + +export interface SearchPointInTimeReference { + id: Id + keep_alive?: Time +} + +export interface SearchProfile { + shards: SearchShardProfile[] +} + +export interface SearchQueryBreakdown { + advance: long + advance_count: long + build_scorer: long + build_scorer_count: long + create_weight: long + create_weight_count: long + match: long + match_count: long + shallow_advance: long + shallow_advance_count: long + next_doc: long + next_doc_count: long + score: long + score_count: long + compute_max_score: long + compute_max_score_count: long + set_min_competitive_score: long + set_min_competitive_score_count: long +} + +export interface SearchQueryProfile { + breakdown: SearchQueryBreakdown + description: string + time_in_nanos: long + type: string + children?: SearchQueryProfile[] +} + +export interface SearchRescore { + query: SearchRescoreQuery + window_size?: integer +} + +export interface SearchRescoreQuery { + rescore_query: QueryDslQueryContainer + query_weight?: double + rescore_query_weight?: double + score_mode?: SearchScoreMode +} + +export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' + +export interface SearchScoreSort { + mode?: SearchSortMode + order?: SearchSortOrder +} + +export interface SearchScriptSort { + order?: SearchSortOrder + script: Script + type?: string +} + +export interface SearchSearchProfile { + collector: SearchCollector[] + query: SearchQueryProfile[] + rewrite_time: long +} + +export interface SearchShardProfile { + aggregations: SearchAggregationProfile[] + id: string + searches: SearchSearchProfile[] +} + +export interface SearchSmoothingModelContainer { + laplace?: SearchLaplaceSmoothingModel + linear_interpolation?: SearchLinearInterpolationSmoothingModel + stupid_backoff?: SearchStupidBackoffSmoothingModel +} + +export type SearchSort = SearchSortCombinations | SearchSortCombinations[] + +export type SearchSortCombinations = Field | SearchSortContainer | SearchSortOrder + +export interface SearchSortContainerKeys { + _score?: SearchScoreSort + _doc?: SearchScoreSort + _geo_distance?: SearchGeoDistanceSort + _script?: SearchScriptSort +} +export type SearchSortContainer = SearchSortContainerKeys | +{ [property: string]: SearchFieldSort | SearchSortOrder } + +export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' + +export type SearchSortOrder = 'asc' | 'desc' | '_doc' + +export type SearchSortResults = (long | double | string | null)[] + +export interface SearchSourceFilter { + excludes?: Fields + includes?: Fields + exclude?: Fields + include?: Fields +} + +export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' + +export interface SearchStupidBackoffSmoothingModel { + discount: double +} + +export interface SearchSuggest { + length: integer + offset: integer + options: SearchSuggestOption[] + text: string +} + +export interface SearchSuggestContainer { + completion?: SearchCompletionSuggester + phrase?: SearchPhraseSuggester + prefix?: string + regex?: string + term?: SearchTermSuggester + text?: string +} + +export interface SearchSuggestContextQuery { + boost?: double + context: SearchContext + neighbours?: Distance[] | integer[] + precision?: Distance | integer + prefix?: boolean +} + +export interface SearchSuggestFuzziness { + fuzziness: Fuzziness + min_length: integer + prefix_length: integer + transpositions: boolean + unicode_aware: boolean +} + +export type SearchSuggestOption = SearchCompletionSuggestOption | SearchPhraseSuggestOption | SearchTermSuggestOption + +export type SearchSuggestSort = 'score' | 'frequency' + +export interface SearchSuggesterBase { + field: Field + analyzer?: string + size?: integer +} + +export interface SearchTermSuggestOption { + text: string + freq?: long + score: double +} + +export interface SearchTermSuggester extends SearchSuggesterBase { + lowercase_terms?: boolean + max_edits?: integer + max_inspections?: integer + max_term_freq?: float + min_doc_freq?: float + min_word_length?: integer + prefix_length?: integer + shard_size?: integer + sort?: SearchSuggestSort + string_distance?: SearchStringDistance + suggest_mode?: SuggestMode + text?: string +} + +export interface SearchTotalHits { + relation: SearchTotalHitsRelation + value: long +} + +export type SearchTotalHitsRelation = 'eq' | 'gte' + +export interface SearchMvtRequest extends RequestBase { + index: Indices + field: Field + zoom: SearchMvtZoomLevel + x: SearchMvtCoordinate + y: SearchMvtCoordinate + exact_bounds?: boolean + extent?: integer + grid_precision?: integer + grid_type?: SearchMvtGridType + size?: integer + aggs?: Record + fields?: Fields + query?: QueryDslQueryContainer + runtime_mappings?: MappingRuntimeFields + sort?: SearchSort +} + +export type SearchMvtResponse = MapboxVectorTiles + +export type SearchMvtCoordinate = integer + +export type SearchMvtGridType = 'grid' | 'point' + +export type SearchMvtZoomLevel = integer + +export interface SearchShardsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean + preference?: string + routing?: Routing +} + +export interface SearchShardsResponse { + nodes: Record + shards: NodeShard[][] + indices: Record +} + +export interface SearchShardsShardStoreIndex { + aliases?: Name[] + filter?: QueryDslQueryContainer +} + +export interface SearchTemplateRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_throttled?: boolean + ignore_unavailable?: boolean + preference?: string + profile?: boolean + routing?: Routing + scroll?: Time + search_type?: SearchType + rest_total_hits_as_int?: boolean + typed_keys?: boolean + id?: Id + params?: Record + source?: string +} + +export interface SearchTemplateResponse { + _shards: ShardStatistics + timed_out: boolean + took: integer + hits: SearchHitsMetadata +} + +export interface TermsEnumRequest extends RequestBase { + index: IndexName + field: Field + size?: integer + timeout?: Time + case_insensitive?: boolean + index_filter?: QueryDslQueryContainer + string?: string + search_after?: string +} + +export interface TermsEnumResponse { + _shards: ShardStatistics + terms: string[] + complete: boolean +} + +export interface TermvectorsFieldStatistics { + doc_count: integer + sum_doc_freq: long + sum_ttf: long +} + +export interface TermvectorsFilter { + max_doc_freq?: integer + max_num_terms?: integer + max_term_freq?: integer + max_word_length?: integer + min_doc_freq?: integer + min_term_freq?: integer + min_word_length?: integer +} + +export interface TermvectorsRequest extends RequestBase { + index: IndexName + id?: Id + fields?: Fields + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: Routing + term_statistics?: boolean + version?: VersionNumber + version_type?: VersionType + doc?: TDocument + filter?: TermvectorsFilter + per_field_analyzer?: Record +} + +export interface TermvectorsResponse { + found: boolean + _id: Id + _index: IndexName + term_vectors?: Record + took: long + _type?: Type + _version: VersionNumber +} + +export interface TermvectorsTerm { + doc_freq?: integer + score?: double + term_freq: integer + tokens: TermvectorsToken[] + ttf?: integer +} + +export interface TermvectorsTermVector { + field_statistics: TermvectorsFieldStatistics + terms: Record +} + +export interface TermvectorsToken { + end_offset?: integer + payload?: string + position: integer + start_offset?: integer +} + +export interface UpdateRequest extends RequestBase { + id: Id + index: IndexName + type?: Type + if_primary_term?: long + if_seq_no?: SequenceNumber + lang?: string + refresh?: Refresh + require_alias?: boolean + retry_on_conflict?: long + routing?: Routing + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + detect_noop?: boolean + doc?: TPartialDocument + doc_as_upsert?: boolean + script?: Script + scripted_upsert?: boolean + upsert?: TDocument +} + +export interface UpdateResponse extends WriteResponseBase { + get?: InlineGet +} + +export interface UpdateByQueryRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + conflicts?: Conflicts + default_operator?: DefaultOperator + df?: string + expand_wildcards?: ExpandWildcards + from?: long + ignore_unavailable?: boolean + lenient?: boolean + pipeline?: string + preference?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: long + routing?: Routing + scroll?: Time + scroll_size?: long + search_timeout?: Time + search_type?: SearchType + size?: long + slices?: long + sort?: string[] + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stats?: string[] + terminate_after?: long + timeout?: Time + version?: boolean + version_type?: boolean + wait_for_active_shards?: WaitForActiveShards + wait_for_completion?: boolean + max_docs?: long + query?: QueryDslQueryContainer + script?: Script + slice?: SlicedScroll +} + +export interface UpdateByQueryResponse { + batches?: long + failures?: BulkIndexByScrollFailure[] + noops?: long + deleted?: long + requests_per_second?: float + retries?: Retries + task?: TaskId + timed_out?: boolean + took?: long + total?: long + updated?: long + version_conflicts?: long + throttled_millis?: ulong + throttled_until_millis?: ulong +} + +export interface UpdateByQueryRethrottleRequest extends RequestBase { + task_id: Id + requests_per_second?: long +} + +export interface UpdateByQueryRethrottleResponse { + nodes: Record +} + +export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { + tasks: Record +} + +export interface SpecUtilsBaseNode { + attributes: Record + host: Host + ip: Ip + name: Name + roles?: NodeRoles + transport_address: TransportAddress +} + +export interface AcknowledgedResponseBase { + acknowledged: boolean +} + +export type AggregateName = string + +export interface BulkIndexByScrollFailure { + cause: MainError + id: Id + index: IndexName + status: integer + type: string +} + +export interface BulkStats { + total_operations: long + total_time?: string + total_time_in_millis: long + total_size?: ByteSize + total_size_in_bytes: long + avg_time?: string + avg_time_in_millis: long + avg_size?: ByteSize + avg_size_in_bytes: long +} + +export type ByteSize = long | string + +export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' + +export type CategoryId = string + +export interface ChainTransform { + transforms: TransformContainer[] +} + +export interface ClusterStatistics { + skipped: integer + successful: integer + total: integer +} + +export interface CompletionStats { + size_in_bytes: long + size?: ByteSize + fields?: Record +} + +export type Conflicts = 'abort' | 'proceed' + +export type DataStreamName = string + +export type DataStreamNames = DataStreamName | DataStreamName[] + +export interface DateField { + field: Field + format?: string + include_unmapped?: boolean +} + +export type DateFormat = string + +export type DateMath = string + +export type DateMathTime = string + +export type DateString = string + +export type DefaultOperator = 'AND' | 'OR' + +export interface DictionaryResponseBase { + [key: string]: TValue +} + +export type Distance = string + +export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' + +export interface DocStats { + count: long + deleted: long +} + +export interface ElasticsearchVersionInfo { + build_date: DateString + build_flavor: string + build_hash: string + build_snapshot: boolean + build_type: string + lucene_version: VersionString + minimum_index_compatibility_version: VersionString + minimum_wire_compatibility_version: VersionString + number: string +} + +export interface EmptyObject { + [key: string]: never +} + +export type EpochMillis = string | long + +export interface ErrorCause { + type: string + reason: string + caused_by?: ErrorCause + shard?: integer | string + stack_trace?: string + root_cause?: ErrorCause[] + bytes_limit?: long + bytes_wanted?: long + column?: integer + col?: integer + failed_shards?: ShardFailure[] + grouped?: boolean + index?: IndexName + index_uuid?: Uuid + language?: string + licensed_expired_feature?: string + line?: integer + max_buckets?: integer + phase?: string + property_name?: string + processor_type?: string + resource_id?: Ids + 'resource.id'?: Ids + resource_type?: string + 'resource.type'?: string + script?: string + script_stack?: string[] + header?: HttpHeaders + lang?: string + position?: ScriptsPainlessExecutePainlessExecutionPosition +} + +export interface ErrorResponseBase { + error: MainError | string + status?: integer +} + +export type ExpandWildcardOptions = 'all' | 'open' | 'closed' | 'hidden' | 'none' + +export type ExpandWildcards = ExpandWildcardOptions | ExpandWildcardOptions[] | string + +export type Field = string + +export interface FieldMemoryUsage { + memory_size?: ByteSize + memory_size_in_bytes: long +} + +export interface FieldSizeUsage { + size?: ByteSize + size_in_bytes: long +} + +export interface FielddataStats { + evictions?: long + memory_size?: ByteSize + memory_size_in_bytes: long + fields?: Record +} + +export type Fields = Field | Field[] + +export interface FlushStats { + periodic: long + total: long + total_time?: string + total_time_in_millis: long +} + +export type Fuzziness = string | integer + +export type GeoDistanceType = 'arc' | 'plane' + +export type GeoHashPrecision = number + +export type GeoShape = any + +export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' + +export type GeoTilePrecision = number + +export interface GetStats { + current: long + exists_time?: string + exists_time_in_millis: long + exists_total: long + missing_time?: string + missing_time_in_millis: long + missing_total: long + time?: string + time_in_millis: long + total: long +} + +export type GroupBy = 'nodes' | 'parents' | 'none' + +export type Health = 'green' | 'yellow' | 'red' + +export type Host = string + +export type HttpHeaders = Record + +export type Id = string + +export type Ids = Id | Id[] + +export type IndexAlias = string + +export type IndexName = string + +export type IndexPattern = string + +export type IndexPatterns = IndexPattern[] + +export interface IndexedScript extends ScriptBase { + id: Id +} + +export interface IndexingStats { + index_current: long + delete_current: long + delete_time?: string + delete_time_in_millis: long + delete_total: long + is_throttled: boolean + noop_update_total: long + throttle_time?: string + throttle_time_in_millis: long + index_time?: string + index_time_in_millis: long + index_total: long + index_failed: long + types?: Record +} + +export type Indices = IndexName | IndexName[] + +export interface IndicesResponseBase extends AcknowledgedResponseBase { + _shards?: ShardStatistics +} + +export interface InlineGet { + fields?: Record + found: boolean + _seq_no: SequenceNumber + _primary_term: long + _routing?: Routing + _source: TDocument +} + +export interface InlineScript extends ScriptBase { + source: string +} + +export type Ip = string + +export interface LatLon { + lat: double + lon: double +} + +export type Level = 'cluster' | 'indices' | 'shards' + +export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' + +export interface MainError extends ErrorCause { + headers?: Record +} + +export type MapboxVectorTiles = ArrayBuffer + +export interface MergesStats { + current: long + current_docs: long + current_size?: string + current_size_in_bytes: long + total: long + total_auto_throttle?: string + total_auto_throttle_in_bytes: long + total_docs: long + total_size?: string + total_size_in_bytes: long + total_stopped_time?: string + total_stopped_time_in_millis: long + total_throttled_time?: string + total_throttled_time_in_millis: long + total_time?: string + total_time_in_millis: long +} + +export type Metadata = Record + +export type Metrics = string | string[] + +export type MinimumShouldMatch = integer | string + +export type MultiTermQueryRewrite = string + +export type Name = string + +export type Names = Name | Name[] + +export type Namespace = string + +export interface NodeAttributes { + attributes: Record + ephemeral_id: Id + id?: Id + name: NodeName + transport_address: TransportAddress + roles?: NodeRoles +} + +export type NodeId = string + +export type NodeIds = NodeId | NodeId[] + +export type NodeName = string + +export type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only' + +export type NodeRoles = NodeRole[] + +export interface NodeShard { + state: IndicesStatsShardRoutingState + primary: boolean + node?: NodeName + shard: integer + index: IndexName + allocation_id?: Record + recovery_source?: Record + unassigned_info?: ClusterAllocationExplainUnassignedInformation +} + +export interface NodeStatistics { + failures?: ErrorCause[] + total: integer + successful: integer + failed: integer +} + +export type OpType = 'index' | 'create' + +export type Password = string + +export type Percentage = string | float + +export type PipelineName = string + +export interface PluginStats { + classname: string + description: string + elasticsearch_version: VersionString + extended_plugins: string[] + has_native_controller: boolean + java_version: VersionString + name: Name + version: VersionString + licensed: boolean + type: string +} + +export type PropertyName = string + +export interface QueryCacheStats { + cache_count: integer + cache_size: integer + evictions: integer + hit_count: integer + memory_size?: ByteSize + memory_size_in_bytes: integer + miss_count: integer + total_count: integer +} + +export interface RecoveryStats { + current_as_source: long + current_as_target: long + throttle_time?: string + throttle_time_in_millis: long +} + +export type Refresh = boolean | RefreshOptions + +export type RefreshOptions = 'wait_for' + +export interface RefreshStats { + external_total: long + external_total_time_in_millis: long + listeners: long + total: long + total_time?: string + total_time_in_millis: long +} + +export type RelationName = string + +export interface RequestBase extends SpecUtilsCommonQueryParameters { +} + +export interface RequestCacheStats { + evictions: long + hit_count: long + memory_size?: string + memory_size_in_bytes: long + miss_count: long +} + +export type Result = 'Error' | 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' + +export interface Retries { + bulk: long + search: long +} + +export type Routing = string + +export type Script = InlineScript | IndexedScript | string + +export interface ScriptBase { + lang?: ScriptLanguage | string + params?: Record +} + +export interface ScriptField { + script: Script + ignore_failure?: boolean +} + +export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' + +export interface ScriptTransform { + lang: string + params: Record +} + +export type ScrollId = string + +export interface SearchStats { + fetch_current: long + fetch_time_in_millis: long + fetch_total: long + open_contexts?: long + query_current: long + query_time_in_millis: long + query_total: long + scroll_current: long + scroll_time_in_millis: long + scroll_total: long + suggest_current: long + suggest_time_in_millis: long + suggest_total: long + groups?: Record +} + +export interface SearchTransform { + request: WatcherSearchInputRequestDefinition + timeout: Time +} + +export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' + +export interface SegmentsStats { + count: integer + doc_values_memory?: ByteSize + doc_values_memory_in_bytes: integer + file_sizes: Record + fixed_bit_set?: ByteSize + fixed_bit_set_memory_in_bytes: integer + index_writer_memory?: ByteSize + index_writer_max_memory_in_bytes?: integer + index_writer_memory_in_bytes: integer + max_unsafe_auto_id_timestamp: integer + memory?: ByteSize + memory_in_bytes: integer + norms_memory?: ByteSize + norms_memory_in_bytes: integer + points_memory?: ByteSize + points_memory_in_bytes: integer + stored_memory?: ByteSize + stored_fields_memory_in_bytes: integer + terms_memory_in_bytes: integer + terms_memory?: ByteSize + term_vectory_memory?: ByteSize + term_vectors_memory_in_bytes: integer + version_map_memory?: ByteSize + version_map_memory_in_bytes: integer +} + +export type SequenceNumber = integer + +export type Service = string + +export type ShapeRelation = 'intersects' | 'disjoint' | 'within' + +export interface ShardFailure { + index?: IndexName + node?: string + reason: ErrorCause + shard: integer + status?: string +} + +export interface ShardStatistics { + failed: uint + successful: uint + total: uint + failures?: ShardFailure[] + skipped?: uint +} + +export interface ShardsOperationResponseBase { + _shards: ShardStatistics +} + +export type Size = 'Raw' | 'k' | 'm' | 'g' | 't' | 'p' + +export interface SlicedScroll { + field?: Field + id: integer + max: integer +} + +export interface StoreStats { + size?: ByteSize + size_in_bytes: integer + reserved?: ByteSize + reserved_in_bytes: integer + total_data_set_size?: ByteSize + total_data_set_size_in_bytes?: integer +} + +export interface StoredScript { + lang?: ScriptLanguage | string + source: string +} + +export type SuggestMode = 'missing' | 'popular' | 'always' + +export type SuggestionName = string + +export type TaskId = string | integer + +export type ThreadType = 'cpu' | 'wait' | 'block' + +export type Time = string | integer + +export type TimeSpan = string + +export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' + +export type TimeZone = string + +export type Timestamp = string + +export interface Transform { + [key: string]: never +} + +export interface TransformContainer { + chain?: ChainTransform + script?: ScriptTransform + search?: SearchTransform +} + +export interface TranslogStats { + earliest_last_modified_age: long + operations: long + size?: string + size_in_bytes: long + uncommitted_operations: integer + uncommitted_size?: string + uncommitted_size_in_bytes: long +} + +export type TransportAddress = string + +export type Type = string + +export type Types = Type | Type[] + +export type Username = string + +export type Uuid = string + +export type VersionNumber = long + +export type VersionString = string + +export type VersionType = 'internal' | 'external' | 'external_gte' | 'force' + +export type WaitForActiveShardOptions = 'all' + +export type WaitForActiveShards = integer | WaitForActiveShardOptions + +export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' + +export type WaitForStatus = 'green' | 'yellow' | 'red' + +export interface WarmerStats { + current: long + total: long + total_time?: string + total_time_in_millis: long +} + +export interface WriteResponseBase { + _id: Id + _index: IndexName + _primary_term: long + result: Result + _seq_no: SequenceNumber + _shards: ShardStatistics + _type?: Type + _version: VersionNumber + forced_refresh?: boolean +} + +export type double = number + +export type float = number + +export type integer = number + +export type long = number + +export type uint = number + +export type ulong = number + +export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { + filters?: Record +} + +export type AggregationsAggregate = AggregationsSingleBucketAggregate | AggregationsAutoDateHistogramAggregate | AggregationsFiltersAggregate | AggregationsSignificantTermsAggregate | AggregationsTermsAggregate | AggregationsBucketAggregate | AggregationsCompositeBucketAggregate | AggregationsMultiBucketAggregate | AggregationsMatrixStatsAggregate | AggregationsKeyedValueAggregate | AggregationsMetricAggregate + +export interface AggregationsAggregateBase { + meta?: Record +} + +export interface AggregationsAggregation { + meta?: Record + name?: string +} + +export interface AggregationsAggregationContainer { + aggs?: Record + meta?: Record + adjacency_matrix?: AggregationsAdjacencyMatrixAggregation + aggregations?: Record + auto_date_histogram?: AggregationsAutoDateHistogramAggregation + avg?: AggregationsAverageAggregation + avg_bucket?: AggregationsAverageBucketAggregation + boxplot?: AggregationsBoxplotAggregation + bucket_script?: AggregationsBucketScriptAggregation + bucket_selector?: AggregationsBucketSelectorAggregation + bucket_sort?: AggregationsBucketSortAggregation + cardinality?: AggregationsCardinalityAggregation + children?: AggregationsChildrenAggregation + composite?: AggregationsCompositeAggregation + cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation + cumulative_sum?: AggregationsCumulativeSumAggregation + date_histogram?: AggregationsDateHistogramAggregation + date_range?: AggregationsDateRangeAggregation + derivative?: AggregationsDerivativeAggregation + diversified_sampler?: AggregationsDiversifiedSamplerAggregation + extended_stats?: AggregationsExtendedStatsAggregation + extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + filter?: QueryDslQueryContainer + filters?: AggregationsFiltersAggregation + geo_bounds?: AggregationsGeoBoundsAggregation + geo_centroid?: AggregationsGeoCentroidAggregation + geo_distance?: AggregationsGeoDistanceAggregation + geohash_grid?: AggregationsGeoHashGridAggregation + geo_line?: AggregationsGeoLineAggregation + geotile_grid?: AggregationsGeoTileGridAggregation + global?: AggregationsGlobalAggregation + histogram?: AggregationsHistogramAggregation + ip_range?: AggregationsIpRangeAggregation + inference?: AggregationsInferenceAggregation + line?: AggregationsGeoLineAggregation + matrix_stats?: AggregationsMatrixStatsAggregation + max?: AggregationsMaxAggregation + max_bucket?: AggregationsMaxBucketAggregation + median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation + min?: AggregationsMinAggregation + min_bucket?: AggregationsMinBucketAggregation + missing?: AggregationsMissingAggregation + moving_avg?: AggregationsMovingAverageAggregation + moving_percentiles?: AggregationsMovingPercentilesAggregation + moving_fn?: AggregationsMovingFunctionAggregation + multi_terms?: AggregationsMultiTermsAggregation + nested?: AggregationsNestedAggregation + normalize?: AggregationsNormalizeAggregation + parent?: AggregationsParentAggregation + percentile_ranks?: AggregationsPercentileRanksAggregation + percentiles?: AggregationsPercentilesAggregation + percentiles_bucket?: AggregationsPercentilesBucketAggregation + range?: AggregationsRangeAggregation + rare_terms?: AggregationsRareTermsAggregation + rate?: AggregationsRateAggregation + reverse_nested?: AggregationsReverseNestedAggregation + sampler?: AggregationsSamplerAggregation + scripted_metric?: AggregationsScriptedMetricAggregation + serial_diff?: AggregationsSerialDifferencingAggregation + significant_terms?: AggregationsSignificantTermsAggregation + significant_text?: AggregationsSignificantTextAggregation + stats?: AggregationsStatsAggregation + stats_bucket?: AggregationsStatsBucketAggregation + string_stats?: AggregationsStringStatsAggregation + sum?: AggregationsSumAggregation + sum_bucket?: AggregationsSumBucketAggregation + terms?: AggregationsTermsAggregation + top_hits?: AggregationsTopHitsAggregation + t_test?: AggregationsTTestAggregation + top_metrics?: AggregationsTopMetricsAggregation + value_count?: AggregationsValueCountAggregation + weighted_avg?: AggregationsWeightedAverageAggregation + variable_width_histogram?: AggregationsVariableWidthHistogramAggregation +} + +export interface AggregationsAggregationRange { + from?: double | string + key?: string + to?: double | string +} + +export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregate> { + interval: DateMathTime +} + +export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { + buckets?: integer + field?: Field + format?: string + minimum_interval?: AggregationsMinimumInterval + missing?: DateString + offset?: string + params?: Record + script?: Script + time_zone?: string +} + +export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { + min: double + max: double + q1: double + q2: double + q3: double +} + +export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { + compression?: double +} + +export type AggregationsBucket = AggregationsCompositeBucket | AggregationsDateHistogramBucket | AggregationsFiltersBucketItem | AggregationsIpRangeBucket | AggregationsRangeBucket | AggregationsRareTermsBucket | AggregationsSignificantTermsBucket | AggregationsKeyedBucket + +export interface AggregationsBucketAggregate extends AggregationsAggregateBase { + after_key: Record + bg_count: long + doc_count: long + doc_count_error_upper_bound: long + sum_other_doc_count: long + interval: DateMathTime + items: AggregationsBucket +} + +export interface AggregationsBucketAggregationBase extends AggregationsAggregation { + aggregations?: Record +} + +export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { + script?: Script +} + +export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { + script?: Script +} + +export interface AggregationsBucketSortAggregation extends AggregationsAggregation { + from?: integer + gap_policy?: AggregationsGapPolicy + size?: integer + sort?: SearchSort +} + +export interface AggregationsBucketsPath { + [key: string]: never +} + +export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { + precision_threshold?: integer + rehash?: boolean +} + +export interface AggregationsChiSquareHeuristic { + background_is_superset: boolean + include_negatives: boolean +} + +export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { + type?: RelationName +} + +export interface AggregationsClassificationInferenceOptions { + num_top_classes?: integer + num_top_feature_importance_values?: integer + prediction_field_type?: string + results_field?: string + top_classes_results_field?: string +} + +export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { + after?: Record + size?: integer + sources?: Record[] +} + +export interface AggregationsCompositeAggregationSource { + terms?: AggregationsTermsAggregation + histogram?: AggregationsHistogramAggregation + date_histogram?: AggregationsDateHistogramAggregation + geotile_grid?: AggregationsGeoTileGridAggregation +} + +export interface AggregationsCompositeBucketKeys { +} +export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsCompositeBucketAggregate extends AggregationsMultiBucketAggregate> { + after_key: Record +} + +export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { + calendar_interval?: AggregationsDateInterval | Time + extended_bounds?: AggregationsExtendedBounds + hard_bounds?: AggregationsExtendedBounds + field?: Field + fixed_interval?: AggregationsDateInterval | Time + format?: string + interval?: AggregationsDateInterval | Time + min_doc_count?: integer + missing?: DateString + offset?: Time + order?: AggregationsHistogramOrder + params?: Record + script?: Script + time_zone?: string + keyed?: boolean +} + +export interface AggregationsDateHistogramBucketKeys { +} +export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys | +{ [property: string]: AggregationsAggregate } + +export type AggregationsDateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' + +export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { + field?: Field + format?: string + missing?: AggregationsMissing + ranges?: AggregationsDateRangeExpression[] + time_zone?: string + keyed?: boolean +} + +export interface AggregationsDateRangeExpression { + from?: DateMath | float + from_as_string?: string + to_as_string?: string + key?: string + to?: DateMath | float + doc_count?: long +} + +export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { + execution_hint?: AggregationsSamplerAggregationExecutionHint + max_docs_per_value?: integer + script?: Script + shard_size?: integer + field?: Field +} + +export interface AggregationsEwmaModelSettings { + alpha?: float +} + +export interface AggregationsExtendedBounds { + max: T + min: T +} + +export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { + std_deviation_bounds: AggregationsStandardDeviationBounds + sum_of_squares?: double + variance?: double + variance_population?: double + variance_sampling?: double + std_deviation?: double + std_deviation_population?: double + std_deviation_sampling?: double +} + +export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { + sigma?: double +} + +export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { + sigma?: double +} + +export interface AggregationsFiltersAggregate extends AggregationsAggregateBase { + buckets: AggregationsFiltersBucketItem[] | Record +} + +export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { + filters?: Record | QueryDslQueryContainer[] + other_bucket?: boolean + other_bucket_key?: string + keyed?: boolean +} + +export interface AggregationsFiltersBucketItemKeys { + doc_count: long +} +export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { + format?: string +} + +export interface AggregationsFormattableMetricAggregation extends AggregationsMetricAggregationBase { + format?: string +} + +export type AggregationsGapPolicy = 'skip' | 'insert_zeros' + +export interface AggregationsGeoBounds { + bottom_right: LatLon + top_left: LatLon +} + +export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { + bounds: AggregationsGeoBounds +} + +export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { + wrap_longitude?: boolean +} + +export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { + count: long + location: QueryDslGeoLocation +} + +export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { + count?: long + location?: QueryDslGeoLocation +} + +export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { + distance_type?: GeoDistanceType + field?: Field + origin?: QueryDslGeoLocation | string + ranges?: AggregationsAggregationRange[] + unit?: DistanceUnit +} + +export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { + bounds?: QueryDslBoundingBox + field?: Field + precision?: GeoHashPrecision + shard_size?: integer + size?: integer +} + +export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { + type: string + geometry: AggregationsLineStringGeoShape + properties: AggregationsGeoLineProperties +} + +export interface AggregationsGeoLineAggregation { + point: AggregationsGeoLinePoint + sort: AggregationsGeoLineSort + include_sort?: boolean + sort_order?: SearchSortOrder + size?: integer +} + +export interface AggregationsGeoLinePoint { + field: Field +} + +export interface AggregationsGeoLineProperties { + complete: boolean + sort_values: double[] +} + +export interface AggregationsGeoLineSort { + field: Field +} + +export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { + field?: Field + precision?: GeoTilePrecision + shard_size?: integer + size?: integer + bounds?: AggregationsGeoBounds +} + +export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { +} + +export interface AggregationsGoogleNormalizedDistanceHeuristic { + background_is_superset: boolean +} + +export interface AggregationsHdrMethod { + number_of_significant_value_digits?: integer +} + +export interface AggregationsHdrPercentileItem { + key: double + value: double +} + +export interface AggregationsHdrPercentilesAggregate extends AggregationsAggregateBase { + values: AggregationsHdrPercentileItem[] +} + +export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { + extended_bounds?: AggregationsExtendedBounds + hard_bounds?: AggregationsExtendedBounds + field?: Field + interval?: double + min_doc_count?: integer + missing?: double + offset?: double + order?: AggregationsHistogramOrder + script?: Script + format?: string + keyed?: boolean +} + +export interface AggregationsHistogramOrder { + _count?: SearchSortOrder + _key?: SearchSortOrder +} + +export interface AggregationsHoltLinearModelSettings { + alpha?: float + beta?: float +} + +export interface AggregationsHoltWintersModelSettings { + alpha?: float + beta?: float + gamma?: float + pad?: boolean + period?: integer + type?: AggregationsHoltWintersType +} + +export type AggregationsHoltWintersType = 'add' | 'mult' + +export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { + model_id: Name + inference_config?: AggregationsInferenceConfigContainer +} + +export interface AggregationsInferenceConfigContainer { + regression?: AggregationsRegressionInferenceOptions + classification?: AggregationsClassificationInferenceOptions +} + +export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { + field?: Field + ranges?: AggregationsIpRangeAggregationRange[] +} + +export interface AggregationsIpRangeAggregationRange { + from?: string + mask?: string + to?: string +} + +export interface AggregationsIpRangeBucketKeys { +} +export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsKeyedBucketKeys { + doc_count: long + key: TKey + key_as_string: string +} +export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsKeyedValueAggregate extends AggregationsValueAggregate { + keys: string[] +} + +export interface AggregationsLineStringGeoShape { + coordinates: QueryDslGeoCoordinate[] +} + +export interface AggregationsMatrixAggregation extends AggregationsAggregation { + fields?: Fields + missing?: Record +} + +export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { + correlation: Record + covariance: Record + count: integer + kurtosis: double + mean: double + skewness: double + variance: double + name: string +} + +export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { + mode?: AggregationsMatrixStatsMode +} + +export type AggregationsMatrixStatsMode = 'avg' | 'min' | 'max' | 'sum' | 'median' + +export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { + compression?: double +} + +export type AggregationsMetricAggregate = AggregationsValueAggregate | AggregationsBoxPlotAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsGeoLineAggregate | AggregationsPercentilesAggregate | AggregationsScriptedMetricAggregate | AggregationsStatsAggregate | AggregationsStringStatsAggregate | AggregationsTopHitsAggregate | AggregationsTopMetricsAggregate | AggregationsExtendedStatsAggregate | AggregationsTDigestPercentilesAggregate | AggregationsHdrPercentilesAggregate + +export interface AggregationsMetricAggregationBase { + field?: Field + missing?: AggregationsMissing + script?: Script +} + +export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsMinBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year' + +export type AggregationsMissing = string | integer | double | boolean + +export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { + field?: Field + missing?: AggregationsMissing +} + +export interface AggregationsMovingAverageAggregation extends AggregationsPipelineAggregationBase { + minimize?: boolean + model?: AggregationsMovingAverageModel + settings: AggregationsMovingAverageSettings + predict?: integer + window?: integer +} + +export type AggregationsMovingAverageModel = 'linear' | 'simple' | 'ewma' | 'holt' | 'holt_winters' + +export type AggregationsMovingAverageSettings = AggregationsEwmaModelSettings | AggregationsHoltLinearModelSettings | AggregationsHoltWintersModelSettings + +export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { + script?: string + shift?: integer + window?: integer +} + +export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { + window?: integer + shift?: integer + keyed?: boolean +} + +export interface AggregationsMultiBucketAggregate extends AggregationsAggregateBase { + buckets: TBucket[] +} + +export interface AggregationsMultiTermLookup { + field: Field +} + +export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + terms: AggregationsMultiTermLookup[] +} + +export interface AggregationsMutualInformationHeuristic { + background_is_superset: boolean + include_negatives: boolean +} + +export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { + path?: Field +} + +export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { + method?: AggregationsNormalizeMethod +} + +export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' + +export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { + type?: RelationName +} + +export interface AggregationsPercentageScoreHeuristic { + [key: string]: never +} + +export interface AggregationsPercentileItem { + percentile: double + value: double +} + +export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { + keyed?: boolean + values?: double[] + hdr?: AggregationsHdrMethod + tdigest?: AggregationsTDigest +} + +export interface AggregationsPercentilesAggregate extends AggregationsAggregateBase { + items: AggregationsPercentileItem[] +} + +export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { + keyed?: boolean + percents?: double[] + hdr?: AggregationsHdrMethod + tdigest?: AggregationsTDigest +} + +export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { + percents?: double[] +} + +export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { + buckets_path?: AggregationsBucketsPath + format?: string + gap_policy?: AggregationsGapPolicy +} + +export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { + field?: Field + ranges?: AggregationsAggregationRange[] + script?: Script + keyed?: boolean +} + +export interface AggregationsRangeBucketKeys { +} +export type AggregationsRangeBucket = AggregationsRangeBucketKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { + exclude?: string | string[] + field?: Field + include?: string | string[] | AggregationsTermsInclude + max_doc_count?: long + missing?: AggregationsMissing + precision?: double + value_type?: string +} + +export interface AggregationsRareTermsBucketKeys { +} +export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { + unit?: AggregationsDateInterval + mode?: AggregationsRateMode +} + +export type AggregationsRateMode = 'sum' | 'value_count' + +export interface AggregationsRegressionInferenceOptions { + results_field?: Field + num_top_feature_importance_values?: integer +} + +export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { + path?: Field +} + +export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { + shard_size?: integer +} + +export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' + +export interface AggregationsScriptedHeuristic { + script: Script +} + +export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { + value: any +} + +export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { + combine_script?: Script + init_script?: Script + map_script?: Script + params?: Record + reduce_script?: Script +} + +export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { + lag?: integer +} + +export interface AggregationsSignificantTermsAggregate extends AggregationsMultiBucketAggregate { + bg_count: long + doc_count: long +} + +export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { + background_filter?: QueryDslQueryContainer + chi_square?: AggregationsChiSquareHeuristic + exclude?: string | string[] + execution_hint?: AggregationsTermsAggregationExecutionHint + field?: Field + gnd?: AggregationsGoogleNormalizedDistanceHeuristic + include?: string | string[] + min_doc_count?: long + mutual_information?: AggregationsMutualInformationHeuristic + percentage?: AggregationsPercentageScoreHeuristic + script_heuristic?: AggregationsScriptedHeuristic + shard_min_doc_count?: long + shard_size?: integer + size?: integer +} + +export interface AggregationsSignificantTermsBucketKeys { +} +export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { + background_filter?: QueryDslQueryContainer + chi_square?: AggregationsChiSquareHeuristic + exclude?: string | string[] + execution_hint?: AggregationsTermsAggregationExecutionHint + field?: Field + filter_duplicate_text?: boolean + gnd?: AggregationsGoogleNormalizedDistanceHeuristic + include?: string | string[] + min_doc_count?: long + mutual_information?: AggregationsMutualInformationHeuristic + percentage?: AggregationsPercentageScoreHeuristic + script_heuristic?: AggregationsScriptedHeuristic + shard_min_doc_count?: long + shard_size?: integer + size?: integer + source_fields?: Fields +} + +export interface AggregationsSingleBucketAggregateKeys extends AggregationsAggregateBase { + doc_count: double +} +export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys | +{ [property: string]: AggregationsAggregate } + +export interface AggregationsStandardDeviationBounds { + lower?: double + upper?: double + lower_population?: double + upper_population?: double + lower_sampling?: double + upper_sampling?: double +} + +export interface AggregationsStatsAggregate extends AggregationsAggregateBase { + count: double + sum: double + avg?: double + max?: double + min?: double +} + +export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { + count: long + min_length: integer + max_length: integer + avg_length: double + entropy: double + distribution?: Record +} + +export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { + show_distribution?: boolean +} + +export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsSumBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsTDigest { + compression?: integer +} + +export interface AggregationsTDigestPercentilesAggregate extends AggregationsAggregateBase { + values: Record +} + +export interface AggregationsTTestAggregation extends AggregationsAggregation { + a?: AggregationsTestPopulation + b?: AggregationsTestPopulation + type?: AggregationsTTestType +} + +export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' + +export interface AggregationsTermsAggregate extends AggregationsMultiBucketAggregate { + doc_count_error_upper_bound: long + sum_other_doc_count: long +} + +export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { + collect_mode?: AggregationsTermsAggregationCollectMode + exclude?: string | string[] + execution_hint?: AggregationsTermsAggregationExecutionHint + field?: Field + include?: string | string[] | AggregationsTermsInclude + min_doc_count?: integer + missing?: AggregationsMissing + missing_bucket?: boolean + value_type?: string + order?: AggregationsTermsAggregationOrder + script?: Script + shard_size?: integer + show_term_doc_count_error?: boolean + size?: integer +} + +export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' + +export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' + +export type AggregationsTermsAggregationOrder = SearchSortOrder | Record | Record[] + +export interface AggregationsTermsInclude { + num_partitions: long + partition: long +} + +export interface AggregationsTestPopulation { + field: Field + script?: Script + filter?: QueryDslQueryContainer +} + +export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { + hits: SearchHitsMetadata> +} + +export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { + docvalue_fields?: Fields + explain?: boolean + from?: integer + highlight?: SearchHighlight + script_fields?: Record + size?: integer + sort?: SearchSort + _source?: boolean | SearchSourceFilter | Fields + stored_fields?: Fields + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean +} + +export interface AggregationsTopMetrics { + sort: (long | double | string)[] + metrics: Record +} + +export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { + top: AggregationsTopMetrics[] +} + +export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { + metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] + size?: integer + sort?: SearchSort +} + +export interface AggregationsTopMetricsValue { + field: Field +} + +export interface AggregationsValueAggregate extends AggregationsAggregateBase { + value: double + value_as_string?: string +} + +export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { +} + +export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' + +export interface AggregationsVariableWidthHistogramAggregation { + field?: Field + buckets?: integer + shard_size?: integer + initial_buffer?: integer +} + +export interface AggregationsWeightedAverageAggregation extends AggregationsAggregation { + format?: string + value?: AggregationsWeightedAverageValue + value_type?: AggregationsValueType + weight?: AggregationsWeightedAverageValue +} + +export interface AggregationsWeightedAverageValue { + field?: Field + missing?: double + script?: Script +} + +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer + +export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'asciifolding' + preserve_original: boolean +} + +export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceTokenFilter + +export interface AnalysisCharFilterBase { + version?: VersionString +} + +export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { + type: 'char_group' + tokenize_on_chars: string[] +} + +export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { + type: 'common_grams' + common_words: string[] + common_words_path: string + ignore_case: boolean + query_mode: boolean +} + +export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { + hyphenation_patterns_path: string + max_subword_size: integer + min_subword_size: integer + min_word_size: integer + only_longest_match: boolean + word_list: string[] + word_list_path: string +} + +export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { + type: 'condition' + filter: string[] + script: Script +} + +export interface AnalysisCustomAnalyzer { + type: 'custom' + char_filter?: string[] + filter?: string[] + position_increment_gap?: integer + position_offset_gap?: integer + tokenizer: string +} + +export interface AnalysisCustomNormalizer { + type: 'custom' + char_filter?: string[] + filter?: string[] +} + +export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' + +export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { + type: 'delimited_payload' + delimiter: string + encoding: AnalysisDelimitedPayloadEncoding +} + +export type AnalysisEdgeNGramSide = 'front' | 'back' + +export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { + type: 'edge_ngram' + max_gram: integer + min_gram: integer + side: AnalysisEdgeNGramSide +} + +export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { + type: 'edge_ngram' + custom_token_chars: string + max_gram: integer + min_gram: integer + token_chars: AnalysisTokenChar[] +} + +export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { + type: 'elision' + articles: string[] + articles_case: boolean +} + +export interface AnalysisFingerprintAnalyzer { + type: 'fingerprint' + version: VersionString + max_output_size: integer + preserve_original: boolean + separator: string + stopwords: AnalysisStopWords + stopwords_path: string +} + +export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { + type: 'fingerprint' + max_output_size: integer + separator: string +} + +export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { + type: 'html_strip' +} + +export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { + type: 'hunspell' + dedup: boolean + dictionary: string + locale: string + longest_only: boolean +} + +export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { + type: 'hyphenation_decompounder' +} + +export interface AnalysisIcuAnalyzer { + type: 'icu_analyzer' + method: AnalysisIcuNormalizationType + mode: AnalysisIcuNormalizationMode +} + +export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' + +export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' + +export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { + type: 'kstem' +} + +export type AnalysisKeepTypesMode = 'include' | 'exclude' + +export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { + type: 'keep_types' + mode: AnalysisKeepTypesMode + types: string[] +} + +export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { + type: 'keep' + keep_words: string[] + keep_words_case: boolean + keep_words_path: string +} + +export interface AnalysisKeywordAnalyzer { + type: 'keyword' + version: VersionString +} + +export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { + type: 'keyword_marker' + ignore_case: boolean + keywords: string[] + keywords_path: string + keywords_pattern: string +} + +export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { + type: 'keyword' + buffer_size: integer +} + +export interface AnalysisKuromojiAnalyzer { + type: 'kuromoji' + mode: AnalysisKuromojiTokenizationMode + user_dictionary: string +} + +export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_part_of_speech' + stoptags: string[] +} + +export interface AnalysisKuromojiReadingFormTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_readingform' + use_romaji: boolean +} + +export interface AnalysisKuromojiStemmerTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_stemmer' + minimum_length: integer +} + +export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' + +export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { + type: 'kuromoji_tokenizer' + discard_punctuation: boolean + mode: AnalysisKuromojiTokenizationMode + nbest_cost: integer + nbest_examples: string + user_dictionary: string + user_dictionary_rules: string[] +} + +export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' + +export interface AnalysisLanguageAnalyzer { + type: 'language' + version: VersionString + language: AnalysisLanguage + stem_exclusion: string[] + stopwords: AnalysisStopWords + stopwords_path: string +} + +export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { + type: 'length' + max: integer + min: integer +} + +export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { + type: 'letter' +} + +export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { + type: 'limit' + consume_all_tokens: boolean + max_token_count: integer +} + +export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { + type: 'lowercase' + language: string +} + +export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { + type: 'lowercase' +} + +export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { + type: 'mapping' + mappings: string[] + mappings_path: string +} + +export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { + type: 'multiplexer' + filters: string[] + preserve_original: boolean +} + +export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { + type: 'ngram' + max_gram: integer + min_gram: integer +} + +export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { + type: 'ngram' + custom_token_chars: string + max_gram: integer + min_gram: integer + token_chars: AnalysisTokenChar[] +} + +export interface AnalysisNoriAnalyzer { + type: 'nori' + version: VersionString + decompound_mode: AnalysisNoriDecompoundMode + stoptags: string[] + user_dictionary: string +} + +export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' + +export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { + type: 'nori_part_of_speech' + stoptags: string[] +} + +export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { + type: 'nori_tokenizer' + decompound_mode: AnalysisNoriDecompoundMode + discard_punctuation: boolean + user_dictionary: string + user_dictionary_rules: string[] +} + +export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { + type: 'path_hierarchy' + buffer_size: integer + delimiter: string + replacement: string + reverse: boolean + skip: integer +} + +export interface AnalysisPatternAnalyzer { + type: 'pattern' + version: VersionString + flags: string + lowercase: boolean + pattern: string + stopwords: AnalysisStopWords +} + +export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { + type: 'pattern_capture' + patterns: string[] + preserve_original: boolean +} + +export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { + type: 'pattern_replace' + flags: string + pattern: string + replacement: string +} + +export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { + type: 'porter_stem' +} + +export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { + type: 'predicate_token_filter' + script: Script +} + +export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { + type: 'remove_duplicates' +} + +export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { + type: 'reverse' +} + +export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { + type: 'shingle' + filler_token: string + max_shingle_size: integer + min_shingle_size: integer + output_unigrams: boolean + output_unigrams_if_no_shingles: boolean + token_separator: string +} + +export interface AnalysisSimpleAnalyzer { + type: 'simple' + version: VersionString +} + +export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' + +export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { + type: 'snowball' + language: AnalysisSnowballLanguage +} + +export interface AnalysisStandardAnalyzer { + type: 'standard' + max_token_length: integer + stopwords: AnalysisStopWords +} + +export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { + type: 'standard' + max_token_length: integer +} + +export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { + type: 'stemmer_override' + rules: string[] + rules_path: string +} + +export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { + type: 'stemmer' + language: string +} + +export interface AnalysisStopAnalyzer { + type: 'stop' + version: VersionString + stopwords: AnalysisStopWords + stopwords_path: string +} + +export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { + type: 'stop' + ignore_case?: boolean + remove_trailing?: boolean + stopwords: AnalysisStopWords + stopwords_path?: string +} + +export type AnalysisStopWords = string | string[] + +export type AnalysisSynonymFormat = 'solr' | 'wordnet' + +export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { + type: 'synonym_graph' + expand: boolean + format: AnalysisSynonymFormat + lenient: boolean + synonyms: string[] + synonyms_path: string + tokenizer: string + updateable: boolean +} + +export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { + type: 'synonym' + expand: boolean + format: AnalysisSynonymFormat + lenient: boolean + synonyms: string[] + synonyms_path: string + tokenizer: string + updateable: boolean +} + +export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' + +export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter + +export interface AnalysisTokenFilterBase { + version?: VersionString +} + +export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer + +export interface AnalysisTokenizerBase { + version?: VersionString +} + +export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { + type: 'trim' +} + +export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { + type: 'truncate' + length: integer +} + +export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { + type: 'uax_url_email' + max_token_length: integer +} + +export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { + type: 'unique' + only_on_same_position: boolean +} + +export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { + type: 'uppercase' +} + +export interface AnalysisWhitespaceAnalyzer { + type: 'whitespace' + version: VersionString +} + +export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { + type: 'whitespace' + max_token_length: integer +} + +export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { + type: 'word_delimiter_graph' + adjust_offsets: boolean + catenate_all: boolean + catenate_numbers: boolean + catenate_words: boolean + generate_number_parts: boolean + generate_word_parts: boolean + preserve_original: boolean + protected_words: string[] + protected_words_path: string + split_on_case_change: boolean + split_on_numerics: boolean + stem_english_possessive: boolean + type_table: string[] + type_table_path: string +} + +export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { + type: 'word_delimiter' + catenate_all: boolean + catenate_numbers: boolean + catenate_words: boolean + generate_number_parts: boolean + generate_word_parts: boolean + preserve_original: boolean + protected_words: string[] + protected_words_path: string + split_on_case_change: boolean + split_on_numerics: boolean + stem_english_possessive: boolean + type_table: string[] + type_table_path: string +} + +export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { + type: 'aggregate_metric_double' + default_metric: string + metrics: string[] +} + +export interface MappingAllField { + analyzer: string + enabled: boolean + omit_norms: boolean + search_analyzer: string + similarity: string + store: boolean + store_term_vector_offsets: boolean + store_term_vector_payloads: boolean + store_term_vector_positions: boolean + store_term_vectors: boolean +} + +export interface MappingBinaryProperty extends MappingDocValuesPropertyBase { + type: 'binary' +} + +export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { + boost?: double + fielddata?: IndicesNumericFielddata + index?: boolean + null_value?: boolean + type: 'boolean' +} + +export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { + analyzer?: string + contexts?: MappingSuggestContext[] + max_input_length?: integer + preserve_position_increments?: boolean + preserve_separators?: boolean + search_analyzer?: string + type: 'completion' +} + +export interface MappingConstantKeywordProperty extends MappingPropertyBase { + value?: any + type: 'constant_keyword' +} + +export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty + +export interface MappingCorePropertyBase extends MappingPropertyBase { + copy_to?: Fields + similarity?: string + store?: boolean +} + +export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { + boost?: double + format?: string + ignore_malformed?: boolean + index?: boolean + null_value?: DateString + precision_step?: integer + type: 'date_nanos' +} + +export interface MappingDateProperty extends MappingDocValuesPropertyBase { + boost?: double + fielddata?: IndicesNumericFielddata + format?: string + ignore_malformed?: boolean + index?: boolean + null_value?: DateString + precision_step?: integer + type: 'date' +} + +export interface MappingDateRangeProperty extends MappingRangePropertyBase { + format?: string + type: 'date_range' +} + +export interface MappingDenseVectorProperty extends MappingPropertyBase { + type: 'dense_vector' + dims: integer +} + +export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingGenericProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty + +export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { + doc_values?: boolean +} + +export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { + type: 'double_range' +} + +export type MappingDynamicMapping = 'strict' | 'runtime' | 'true' | 'false' + +export interface MappingDynamicTemplate { + mapping?: MappingProperty + match?: string + match_mapping_type?: string + match_pattern?: MappingMatchType + path_match?: string + path_unmatch?: string + unmatch?: string +} + +export interface MappingFieldAliasProperty extends MappingPropertyBase { + path?: Field + type: 'alias' +} + +export interface MappingFieldMapping { + [key: string]: never +} + +export interface MappingFieldNamesField { + enabled: boolean +} + +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' + +export interface MappingFlattenedProperty extends MappingPropertyBase { + boost?: double + depth_limit?: integer + doc_values?: boolean + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + null_value?: string + similarity?: string + split_queries_on_whitespace?: boolean + type: 'flattened' +} + +export interface MappingFloatRangeProperty extends MappingRangePropertyBase { + type: 'float_range' +} + +export interface MappingGenericProperty extends MappingDocValuesPropertyBase { + analyzer: string + boost: double + fielddata: IndicesStringFielddata + ignore_malformed: boolean + index: boolean + index_options: MappingIndexOptions + norms: boolean + null_value: string + position_increment_gap: integer + search_analyzer: string + term_vector: MappingTermVectorOption + type: string +} + +export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'COUNTERCLOCKWISE' | 'ccw' | 'CCW' | 'left' | 'LEFT' | 'clockwise' | 'CLOCKWISE' | 'cw' | 'CW' + +export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { + ignore_malformed?: boolean + ignore_z_value?: boolean + null_value?: QueryDslGeoLocation + type: 'geo_point' +} + +export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + orientation?: MappingGeoOrientation + strategy?: MappingGeoStrategy + type: 'geo_shape' +} + +export type MappingGeoStrategy = 'recursive' | 'term' + +export interface MappingHistogramProperty extends MappingPropertyBase { + ignore_malformed?: boolean + type: 'histogram' +} + +export interface MappingIndexField { + enabled: boolean +} + +export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' + +export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { + type: 'integer_range' +} + +export interface MappingIpProperty extends MappingDocValuesPropertyBase { + boost?: double + index?: boolean + null_value?: string + type: 'ip' +} + +export interface MappingIpRangeProperty extends MappingRangePropertyBase { + type: 'ip_range' +} + +export interface MappingJoinProperty extends MappingPropertyBase { + relations?: Record + type: 'join' +} + +export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { + boost?: double + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + normalizer?: string + norms?: boolean + null_value?: string + split_queries_on_whitespace?: boolean + type: 'keyword' +} + +export interface MappingLongRangeProperty extends MappingRangePropertyBase { + type: 'long_range' +} + +export type MappingMatchType = 'simple' | 'regex' + +export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { + type: 'murmur3' +} + +export interface MappingNestedProperty extends MappingCorePropertyBase { + enabled?: boolean + include_in_parent?: boolean + include_in_root?: boolean + type: 'nested' +} + +export interface MappingNumberProperty extends MappingDocValuesPropertyBase { + boost?: double + coerce?: boolean + fielddata?: IndicesNumericFielddata + ignore_malformed?: boolean + index?: boolean + null_value?: double + scaling_factor?: double + type: MappingNumberType +} + +export type MappingNumberType = 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer' | 'long' | 'short' | 'byte' | 'unsigned_long' + +export interface MappingObjectProperty extends MappingCorePropertyBase { + enabled?: boolean + type?: 'object' +} + +export interface MappingPercolatorProperty extends MappingPropertyBase { + type: 'percolator' +} + +export interface MappingPointProperty extends MappingDocValuesPropertyBase { + ignore_malformed?: boolean + ignore_z_value?: boolean + null_value?: string + type: 'point' +} + +export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingDenseVectorProperty | MappingAggregateMetricDoubleProperty | MappingCoreProperty + +export interface MappingPropertyBase { + local_metadata?: Metadata + meta?: Record + name?: PropertyName + properties?: Record + ignore_above?: integer + dynamic?: boolean | MappingDynamicMapping + fields?: Record +} + +export type MappingRangeProperty = MappingLongRangeProperty | MappingIpRangeProperty | MappingIntegerRangeProperty | MappingFloatRangeProperty | MappingDoubleRangeProperty | MappingDateRangeProperty + +export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { + boost?: double + coerce?: boolean + index?: boolean +} + +export interface MappingRankFeatureProperty extends MappingPropertyBase { + positive_score_impact?: boolean + type: 'rank_feature' +} + +export interface MappingRankFeaturesProperty extends MappingPropertyBase { + type: 'rank_features' +} + +export interface MappingRoutingField { + required: boolean +} + +export interface MappingRuntimeField { + format?: string + script?: Script + type: MappingRuntimeFieldType +} + +export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' + +export type MappingRuntimeFields = Record + +export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { + analyzer?: string + index?: boolean + index_options?: MappingIndexOptions + max_shingle_size?: integer + norms?: boolean + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: MappingTermVectorOption + type: 'search_as_you_type' +} + +export type MappingShapeOrientation = 'right' | 'counterclockwise' | 'ccw' | 'left' | 'clockwise' | 'cw' + +export interface MappingShapeProperty extends MappingDocValuesPropertyBase { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + orientation?: MappingShapeOrientation + type: 'shape' +} + +export interface MappingSizeField { + enabled: boolean +} + +export interface MappingSourceField { + compress?: boolean + compress_threshold?: string + enabled?: boolean + excludes?: string[] + includes?: string[] +} + +export interface MappingSuggestContext { + name: Name + path?: Field + type: string + precision?: integer | string +} + +export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' + +export interface MappingTextIndexPrefixes { + max_chars: integer + min_chars: integer +} + +export interface MappingTextProperty extends MappingCorePropertyBase { + analyzer?: string + boost?: double + eager_global_ordinals?: boolean + fielddata?: boolean + fielddata_frequency_filter?: IndicesFielddataFrequencyFilter + index?: boolean + index_options?: MappingIndexOptions + index_phrases?: boolean + index_prefixes?: MappingTextIndexPrefixes + norms?: boolean + position_increment_gap?: integer + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: MappingTermVectorOption + type: 'text' +} + +export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { + analyzer?: string + boost?: double + index?: boolean + null_value?: double + enable_position_increments?: boolean + type: 'token_count' +} + +export interface MappingTypeMapping { + all_field?: MappingAllField + date_detection?: boolean + dynamic?: boolean | MappingDynamicMapping + dynamic_date_formats?: string[] + dynamic_templates?: Record | Record[] + _field_names?: MappingFieldNamesField + index_field?: MappingIndexField + _meta?: Metadata + numeric_detection?: boolean + properties?: Record + _routing?: MappingRoutingField + _size?: MappingSizeField + _source?: MappingSourceField + runtime?: Record +} + +export interface MappingVersionProperty extends MappingDocValuesPropertyBase { + type: 'version' +} + +export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { + type: 'wildcard' + null_value?: string +} + +export interface QueryDslBoolQuery extends QueryDslQueryBase { + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + minimum_should_match?: MinimumShouldMatch + must?: QueryDslQueryContainer | QueryDslQueryContainer[] + must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] + should?: QueryDslQueryContainer | QueryDslQueryContainer[] +} + +export interface QueryDslBoostingQuery extends QueryDslQueryBase { + negative_boost: double + negative: QueryDslQueryContainer + positive: QueryDslQueryContainer +} + +export interface QueryDslBoundingBox { + bottom_right?: QueryDslGeoLocation + top_left?: QueryDslGeoLocation + top_right?: QueryDslGeoLocation + bottom_left?: QueryDslGeoLocation + top?: double + left?: double + right?: double + bottom?: double + wkt?: string +} + +export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' + +export type QueryDslCombinedFieldsOperator = 'or' | 'and' + +export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { + fields: Field[] + query: string + auto_generate_synonyms_phrase_query?: boolean + operator?: QueryDslCombinedFieldsOperator + mimimum_should_match?: MinimumShouldMatch + zero_terms_query?: QueryDslCombinedFieldsZeroTerms +} + +export type QueryDslCombinedFieldsZeroTerms = 'none' | 'all' + +export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { + analyzer?: string + cutoff_frequency?: double + high_freq_operator?: QueryDslOperator + low_freq_operator?: QueryDslOperator + minimum_should_match?: MinimumShouldMatch + query: string +} + +export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { + filter: QueryDslQueryContainer +} + +export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys | +{ [property: string]: QueryDslDecayPlacement } + +export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { + gt?: DateMath + gte?: DateMath + lt?: DateMath + lte?: DateMath + format?: DateFormat + time_zone?: TimeZone +} + +export type QueryDslDecayFunction = QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction + +export interface QueryDslDecayFunctionBase extends QueryDslScoreFunctionBase { + multi_value_mode?: QueryDslMultiValueMode +} + +export interface QueryDslDecayPlacement { + decay?: double + offset?: TScale + scale?: TScale + origin?: TOrigin +} + +export interface QueryDslDisMaxQuery extends QueryDslQueryBase { + queries: QueryDslQueryContainer[] + tie_breaker?: double +} + +export type QueryDslDistanceFeatureQuery = QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery + +export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { + origin: TOrigin + pivot: TDistance + field: Field +} + +export interface QueryDslExistsQuery extends QueryDslQueryBase { + field: Field +} + +export interface QueryDslFieldLookup { + id: Id + index?: IndexName + path?: Field + routing?: Routing +} + +export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' + +export interface QueryDslFieldValueFactorScoreFunction extends QueryDslScoreFunctionBase { + field: Field + factor?: double + missing?: double + modifier?: QueryDslFieldValueFactorModifier +} + +export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' + +export interface QueryDslFunctionScoreContainer { + exp?: QueryDslDecayFunction + gauss?: QueryDslDecayFunction + linear?: QueryDslDecayFunction + field_value_factor?: QueryDslFieldValueFactorScoreFunction + random_score?: QueryDslRandomScoreFunction + script_score?: QueryDslScriptScoreFunction + filter?: QueryDslQueryContainer + weight?: double +} + +export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' + +export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { + boost_mode?: QueryDslFunctionBoostMode + functions?: QueryDslFunctionScoreContainer[] + max_boost?: double + min_score?: double + query?: QueryDslQueryContainer + score_mode?: QueryDslFunctionScoreMode +} + +export interface QueryDslFuzzyQuery extends QueryDslQueryBase { + max_expansions?: integer + prefix_length?: integer + rewrite?: MultiTermQueryRewrite + transpositions?: boolean + fuzziness?: Fuzziness + value: string +} + +export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { + type?: QueryDslGeoExecution + validation_method?: QueryDslGeoValidationMethod + ignore_unmapped?: boolean +} +export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys | +{ [property: string]: QueryDslBoundingBox } + +export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint + +export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys | +{ [property: string]: QueryDslDecayPlacement } + +export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { + distance?: Distance + distance_type?: GeoDistanceType + validation_method?: QueryDslGeoValidationMethod +} +export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys | +{ [property: string]: QueryDslGeoLocation } + +export type QueryDslGeoExecution = 'memory' | 'indexed' + +export type QueryDslGeoLocation = string | double[] | QueryDslTwoDimensionalPoint + +export interface QueryDslGeoPolygonPoints { + points: QueryDslGeoLocation[] +} + +export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { + validation_method?: QueryDslGeoValidationMethod + ignore_unmapped?: boolean +} +export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys | +{ [property: string]: QueryDslGeoPolygonPoints } + +export interface QueryDslGeoShapeFieldQuery { + shape?: GeoShape + indexed_shape?: QueryDslFieldLookup + relation?: GeoShapeRelation +} + +export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { + ignore_unmapped?: boolean +} +export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys | +{ [property: string]: QueryDslGeoShapeFieldQuery } + +export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' + +export interface QueryDslHasChildQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + inner_hits?: SearchInnerHits + max_children?: integer + min_children?: integer + query: QueryDslQueryContainer + score_mode?: QueryDslChildScoreMode + type: RelationName +} + +export interface QueryDslHasParentQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + inner_hits?: SearchInnerHits + parent_type: RelationName + query: QueryDslQueryContainer + score?: boolean +} + +export interface QueryDslIdsQuery extends QueryDslQueryBase { + values?: Ids +} + +export interface QueryDslIntervalsAllOf { + intervals: QueryDslIntervalsContainer[] + max_gaps?: integer + ordered?: boolean + filter?: QueryDslIntervalsFilter +} + +export interface QueryDslIntervalsAnyOf { + intervals: QueryDslIntervalsContainer[] + filter?: QueryDslIntervalsFilter +} + +export interface QueryDslIntervalsContainer { + all_of?: QueryDslIntervalsAllOf + any_of?: QueryDslIntervalsAnyOf + fuzzy?: QueryDslIntervalsFuzzy + match?: QueryDslIntervalsMatch + prefix?: QueryDslIntervalsPrefix + wildcard?: QueryDslIntervalsWildcard +} + +export interface QueryDslIntervalsFilter { + after?: QueryDslIntervalsContainer + before?: QueryDslIntervalsContainer + contained_by?: QueryDslIntervalsContainer + containing?: QueryDslIntervalsContainer + not_contained_by?: QueryDslIntervalsContainer + not_containing?: QueryDslIntervalsContainer + not_overlapping?: QueryDslIntervalsContainer + overlapping?: QueryDslIntervalsContainer + script?: Script +} + +export interface QueryDslIntervalsFuzzy { + analyzer?: string + fuzziness?: Fuzziness + prefix_length?: integer + term: string + transpositions?: boolean + use_field?: Field +} + +export interface QueryDslIntervalsMatch { + analyzer?: string + max_gaps?: integer + ordered?: boolean + query: string + use_field?: Field + filter?: QueryDslIntervalsFilter +} + +export interface QueryDslIntervalsPrefix { + analyzer?: string + prefix: string + use_field?: Field +} + +export interface QueryDslIntervalsQuery extends QueryDslQueryBase { + all_of?: QueryDslIntervalsAllOf + any_of?: QueryDslIntervalsAnyOf + fuzzy?: QueryDslIntervalsFuzzy + match?: QueryDslIntervalsMatch + prefix?: QueryDslIntervalsPrefix + wildcard?: QueryDslIntervalsWildcard +} + +export interface QueryDslIntervalsWildcard { + analyzer?: string + pattern: string + use_field?: Field +} + +export type QueryDslLike = string | QueryDslLikeDocument + +export interface QueryDslLikeDocument { + doc?: any + fields?: Field[] + _id?: Id + _type?: Type + _index?: IndexName + per_field_analyzer?: Record + routing?: Routing + version?: VersionNumber + version_type?: VersionType +} + +export interface QueryDslMatchAllQuery extends QueryDslQueryBase { +} + +export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { + analyzer?: string + fuzziness?: Fuzziness + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + max_expansions?: integer + minimum_should_match?: MinimumShouldMatch + operator?: QueryDslOperator + prefix_length?: integer + query: string +} + +export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { +} + +export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { + analyzer?: string + max_expansions?: integer + query: string + slop?: integer + zero_terms_query?: QueryDslZeroTermsQuery +} + +export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { + analyzer?: string + query: string + slop?: integer + zero_terms_query?: QueryDslZeroTermsQuery +} + +export interface QueryDslMatchQuery extends QueryDslQueryBase { + analyzer?: string + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: double + fuzziness?: Fuzziness + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + lenient?: boolean + max_expansions?: integer + minimum_should_match?: MinimumShouldMatch + operator?: QueryDslOperator + prefix_length?: integer + query: string | float | boolean + zero_terms_query?: QueryDslZeroTermsQuery +} + +export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { + analyzer?: string + boost_terms?: double + fail_on_unsupported_field?: boolean + fields?: Field[] + include?: boolean + like: QueryDslLike | QueryDslLike[] + max_doc_freq?: integer + max_query_terms?: integer + max_word_length?: integer + min_doc_freq?: integer + minimum_should_match?: MinimumShouldMatch + min_term_freq?: integer + min_word_length?: integer + per_field_analyzer?: Record + routing?: Routing + stop_words?: AnalysisStopWords + unlike?: QueryDslLike | QueryDslLike[] + version?: VersionNumber + version_type?: VersionType +} + +export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { + analyzer?: string + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: double + fields?: Fields + fuzziness?: Fuzziness + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + lenient?: boolean + max_expansions?: integer + minimum_should_match?: MinimumShouldMatch + operator?: QueryDslOperator + prefix_length?: integer + query: string + slop?: integer + tie_breaker?: double + type?: QueryDslTextQueryType + zero_terms_query?: QueryDslZeroTermsQuery +} + +export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' + +export interface QueryDslNestedQuery extends QueryDslQueryBase { + ignore_unmapped?: boolean + inner_hits?: SearchInnerHits + path: Field + query: QueryDslQueryContainer + score_mode?: QueryDslNestedScoreMode +} + +export type QueryDslNestedScoreMode = 'avg' | 'sum' | 'min' | 'max' | 'none' + +export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { + gt?: double + gte?: double + lt?: double + lte?: double +} + +export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys | +{ [property: string]: QueryDslDecayPlacement } + +export type QueryDslOperator = 'and' | 'or' + +export interface QueryDslParentIdQuery extends QueryDslQueryBase { + id?: Id + ignore_unmapped?: boolean + type?: RelationName +} + +export interface QueryDslPercolateQuery extends QueryDslQueryBase { + document?: any + documents?: any[] + field: Field + id?: Id + index?: IndexName + name?: string + preference?: string + routing?: Routing + version?: VersionNumber +} + +export interface QueryDslPinnedQuery extends QueryDslQueryBase { + ids: Id[] + organic: QueryDslQueryContainer +} + +export interface QueryDslPrefixQuery extends QueryDslQueryBase { + rewrite?: MultiTermQueryRewrite + value: string + case_insensitive?: boolean +} + +export interface QueryDslQueryBase { + boost?: float + _name?: string +} + +export interface QueryDslQueryContainer { + bool?: QueryDslBoolQuery + boosting?: QueryDslBoostingQuery + common?: Record + combined_fields?: QueryDslCombinedFieldsQuery + constant_score?: QueryDslConstantScoreQuery + dis_max?: QueryDslDisMaxQuery + distance_feature?: QueryDslDistanceFeatureQuery + exists?: QueryDslExistsQuery + function_score?: QueryDslFunctionScoreQuery + fuzzy?: Record + geo_bounding_box?: QueryDslGeoBoundingBoxQuery + geo_distance?: QueryDslGeoDistanceQuery + geo_polygon?: QueryDslGeoPolygonQuery + geo_shape?: QueryDslGeoShapeQuery + has_child?: QueryDslHasChildQuery + has_parent?: QueryDslHasParentQuery + ids?: QueryDslIdsQuery + intervals?: Record + match?: Record + match_all?: QueryDslMatchAllQuery + match_bool_prefix?: Record + match_none?: QueryDslMatchNoneQuery + match_phrase?: Record + match_phrase_prefix?: Record + more_like_this?: QueryDslMoreLikeThisQuery + multi_match?: QueryDslMultiMatchQuery + nested?: QueryDslNestedQuery + parent_id?: QueryDslParentIdQuery + percolate?: QueryDslPercolateQuery + pinned?: QueryDslPinnedQuery + prefix?: Record + query_string?: QueryDslQueryStringQuery + range?: Record + rank_feature?: QueryDslRankFeatureQuery + regexp?: Record + script?: QueryDslScriptQuery + script_score?: QueryDslScriptScoreQuery + shape?: QueryDslShapeQuery + simple_query_string?: QueryDslSimpleQueryStringQuery + span_containing?: QueryDslSpanContainingQuery + field_masking_span?: QueryDslSpanFieldMaskingQuery + span_first?: QueryDslSpanFirstQuery + span_multi?: QueryDslSpanMultiTermQuery + span_near?: QueryDslSpanNearQuery + span_not?: QueryDslSpanNotQuery + span_or?: QueryDslSpanOrQuery + span_term?: Record + span_within?: QueryDslSpanWithinQuery + term?: Record + terms?: QueryDslTermsQuery + terms_set?: Record + wildcard?: Record + type?: QueryDslTypeQuery +} + +export interface QueryDslQueryStringQuery extends QueryDslQueryBase { + allow_leading_wildcard?: boolean + analyzer?: string + analyze_wildcard?: boolean + auto_generate_synonyms_phrase_query?: boolean + default_field?: Field + default_operator?: QueryDslOperator + enable_position_increments?: boolean + escape?: boolean + fields?: Field[] + fuzziness?: Fuzziness + fuzzy_max_expansions?: integer + fuzzy_prefix_length?: integer + fuzzy_rewrite?: MultiTermQueryRewrite + fuzzy_transpositions?: boolean + lenient?: boolean + max_determinized_states?: integer + minimum_should_match?: MinimumShouldMatch + phrase_slop?: double + query: string + quote_analyzer?: string + quote_field_suffix?: string + rewrite?: MultiTermQueryRewrite + tie_breaker?: double + time_zone?: TimeZone + type?: QueryDslTextQueryType +} + +export interface QueryDslRandomScoreFunction extends QueryDslScoreFunctionBase { + field?: Field + seed?: long | string +} + +export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery + +export interface QueryDslRangeQueryBase extends QueryDslQueryBase { + relation?: QueryDslRangeRelation +} + +export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' + +export interface QueryDslRankFeatureFunction { + [key: string]: never +} + +export interface QueryDslRankFeatureFunctionLinear { + [key: string]: never +} + +export interface QueryDslRankFeatureFunctionLogarithm { + scaling_factor: float +} + +export interface QueryDslRankFeatureFunctionSaturation { + pivot?: float +} + +export interface QueryDslRankFeatureFunctionSigmoid { + pivot: float + exponent: float +} + +export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { + field: Field + saturation?: QueryDslRankFeatureFunctionSaturation + log?: QueryDslRankFeatureFunctionLogarithm + linear?: QueryDslRankFeatureFunctionLinear + sigmoid?: QueryDslRankFeatureFunctionSigmoid +} + +export interface QueryDslRegexpQuery extends QueryDslQueryBase { + case_insensitive?: boolean + flags?: string + max_determinized_states?: integer + rewrite?: MultiTermQueryRewrite + value: string +} + +export interface QueryDslScoreFunctionBase { + filter?: QueryDslQueryContainer + weight?: double +} + +export interface QueryDslScriptQuery extends QueryDslQueryBase { + script: Script +} + +export interface QueryDslScriptScoreFunction extends QueryDslScoreFunctionBase { + script: Script +} + +export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { + min_score?: float + query: QueryDslQueryContainer + script: Script +} + +export interface QueryDslShapeFieldQuery { + ignore_unmapped?: boolean + indexed_shape?: QueryDslFieldLookup + relation?: ShapeRelation + shape?: GeoShape +} + +export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { +} +export type QueryDslShapeQuery = QueryDslShapeQueryKeys | +{ [property: string]: QueryDslShapeFieldQuery } + +export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' + +export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { + analyzer?: string + analyze_wildcard?: boolean + auto_generate_synonyms_phrase_query?: boolean + default_operator?: QueryDslOperator + fields?: Field[] + flags?: QueryDslSimpleQueryStringFlags | string + fuzzy_max_expansions?: integer + fuzzy_prefix_length?: integer + fuzzy_transpositions?: boolean + lenient?: boolean + minimum_should_match?: MinimumShouldMatch + query: string + quote_field_suffix?: string +} + +export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { + big: QueryDslSpanQuery + little: QueryDslSpanQuery +} + +export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { + field: Field + query: QueryDslSpanQuery +} + +export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { + end: integer + match: QueryDslSpanQuery +} + +export type QueryDslSpanGapQuery = Record + +export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { + match: QueryDslQueryContainer +} + +export interface QueryDslSpanNearQuery extends QueryDslQueryBase { + clauses: QueryDslSpanQuery[] + in_order?: boolean + slop?: integer +} + +export interface QueryDslSpanNotQuery extends QueryDslQueryBase { + dist?: integer + exclude: QueryDslSpanQuery + include: QueryDslSpanQuery + post?: integer + pre?: integer +} + +export interface QueryDslSpanOrQuery extends QueryDslQueryBase { + clauses: QueryDslSpanQuery[] +} + +export interface QueryDslSpanQuery { + span_containing?: QueryDslSpanContainingQuery + field_masking_span?: QueryDslSpanFieldMaskingQuery + span_first?: QueryDslSpanFirstQuery + span_gap?: QueryDslSpanGapQuery + span_multi?: QueryDslSpanMultiTermQuery + span_near?: QueryDslSpanNearQuery + span_not?: QueryDslSpanNotQuery + span_or?: QueryDslSpanOrQuery + span_term?: Record + span_within?: QueryDslSpanWithinQuery +} + +export interface QueryDslSpanTermQuery extends QueryDslQueryBase { + value: string +} + +export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { + big: QueryDslSpanQuery + little: QueryDslSpanQuery +} + +export interface QueryDslTermQuery extends QueryDslQueryBase { + value: string | float | boolean + case_insensitive?: boolean +} + +export interface QueryDslTermsLookup { + index: IndexName + id: Id + path: Field + routing?: Routing +} + +export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { +} +export type QueryDslTermsQuery = QueryDslTermsQueryKeys | +{ [property: string]: string[] | long[] | QueryDslTermsLookup } + +export interface QueryDslTermsSetFieldQuery { + minimum_should_match_field?: Field + minimum_should_match_script?: Script + terms: string[] +} + +export interface QueryDslTermsSetQueryKeys extends QueryDslQueryBase { +} +export type QueryDslTermsSetQuery = QueryDslTermsSetQueryKeys | +{ [property: string]: QueryDslTermsSetFieldQuery } + +export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' + +export interface QueryDslThreeDimensionalPoint { + lat: double + lon: double + z?: double +} + +export interface QueryDslTwoDimensionalPoint { + lat: double + lon: double +} + +export interface QueryDslTypeQuery extends QueryDslQueryBase { + value: string +} + +export interface QueryDslWildcardQuery extends QueryDslQueryBase { + case_insensitive?: boolean + rewrite?: MultiTermQueryRewrite + value: string +} + +export type QueryDslZeroTermsQuery = 'all' | 'none' + +export interface AsyncSearchAsyncSearch { + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + hits: SearchHitsMetadata + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: Id + _shards: ShardStatistics + suggest?: Record[]> + terminated_early?: boolean + timed_out: boolean + took: long +} + +export interface AsyncSearchAsyncSearchDocumentResponseBase extends AsyncSearchAsyncSearchResponseBase { + response: AsyncSearchAsyncSearch +} + +export interface AsyncSearchAsyncSearchResponseBase { + id?: Id + is_partial: boolean + is_running: boolean + expiration_time_in_millis: EpochMillis + start_time_in_millis: EpochMillis +} + +export interface AsyncSearchDeleteRequest extends RequestBase { + id: Id +} + +export interface AsyncSearchDeleteResponse extends AcknowledgedResponseBase { +} + +export interface AsyncSearchGetRequest extends RequestBase { + id: Id + keep_alive?: Time + typed_keys?: boolean + wait_for_completion_timeout?: Time +} + +export interface AsyncSearchGetResponse extends AsyncSearchAsyncSearchDocumentResponseBase { +} + +export interface AsyncSearchStatusRequest extends RequestBase { + id: Id +} + +export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { + _shards: ShardStatistics + completion_status: integer +} + +export interface AsyncSearchSubmitRequest extends RequestBase { + index?: Indices + batched_reduce_size?: long + wait_for_completion_timeout?: Time + keep_on_completion?: boolean + typed_keys?: boolean + aggs?: Record + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + collapse?: SearchFieldCollapse + default_operator?: DefaultOperator + df?: string + docvalue_fields?: Fields + expand_wildcards?: ExpandWildcards + explain?: boolean + from?: integer + highlight?: SearchHighlight + ignore_throttled?: boolean + ignore_unavailable?: boolean + indices_boost?: Record[] + keep_alive?: Time + lenient?: boolean + max_concurrent_shard_requests?: long + min_score?: double + post_filter?: QueryDslQueryContainer + preference?: string + profile?: boolean + pit?: SearchPointInTimeReference + query?: QueryDslQueryContainer + request_cache?: boolean + rescore?: SearchRescore[] + routing?: Routing + script_fields?: Record + search_after?: SearchSortResults + search_type?: SearchType + sequence_number_primary_term?: boolean + size?: integer + sort?: SearchSort + _source?: boolean | SearchSourceFilter + stats?: string[] + stored_fields?: Fields + suggest?: Record + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + suggest_text?: string + terminate_after?: long + timeout?: string + track_scores?: boolean + track_total_hits?: boolean + version?: boolean + fields?: (Field | DateField)[] + runtime_mappings?: MappingRuntimeFields +} + +export interface AsyncSearchSubmitResponse extends AsyncSearchAsyncSearchDocumentResponseBase { +} + +export interface AutoscalingAutoscalingPolicy { + roles: string[] + deciders: Record +} + +export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { + name: Name +} + +export interface AutoscalingDeleteAutoscalingPolicyResponse extends AcknowledgedResponseBase { +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { + node: AutoscalingGetAutoscalingCapacityAutoscalingResources + total: AutoscalingGetAutoscalingCapacityAutoscalingResources +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingDecider { + required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + reason_summary?: string + reason_details?: any +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingDeciders { + required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + current_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + current_nodes: AutoscalingGetAutoscalingCapacityAutoscalingNode[] + deciders: Record +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingNode { + name: NodeName +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { + storage: integer + memory: integer +} + +export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { +} + +export interface AutoscalingGetAutoscalingCapacityResponse { + policies: Record +} + +export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { + name: Name +} + +export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy + +export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { + name: Name + policy?: AutoscalingAutoscalingPolicy +} + +export interface AutoscalingPutAutoscalingPolicyResponse extends AcknowledgedResponseBase { +} + +export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { +} + +export interface CatAliasesAliasesRecord { + alias?: string + a?: string + index?: IndexName + i?: IndexName + idx?: IndexName + filter?: string + f?: string + fi?: string + 'routing.index'?: string + ri?: string + routingIndex?: string + 'routing.search'?: string + rs?: string + routingSearch?: string + is_write_index?: string + w?: string + isWriteIndex?: string +} + +export interface CatAliasesRequest extends CatCatRequestBase { + name?: Names + expand_wildcards?: ExpandWildcards +} + +export type CatAliasesResponse = CatAliasesAliasesRecord[] + +export interface CatAllocationAllocationRecord { + shards?: string + s?: string + 'disk.indices'?: ByteSize + di?: ByteSize + diskIndices?: ByteSize + 'disk.used'?: ByteSize + du?: ByteSize + diskUsed?: ByteSize + 'disk.avail'?: ByteSize + da?: ByteSize + diskAvail?: ByteSize + 'disk.total'?: ByteSize + dt?: ByteSize + diskTotal?: ByteSize + 'disk.percent'?: Percentage + dp?: Percentage + diskPercent?: Percentage + host?: Host + h?: Host + ip?: Ip + node?: string + n?: string +} + +export interface CatAllocationRequest extends CatCatRequestBase { + node_id?: NodeIds + bytes?: Bytes +} + +export type CatAllocationResponse = CatAllocationAllocationRecord[] + +export interface CatCountCountRecord { + epoch?: EpochMillis + t?: EpochMillis + time?: EpochMillis + timestamp?: DateString + ts?: DateString + hms?: DateString + hhmmss?: DateString + count?: string + dc?: string + 'docs.count'?: string + docsCount?: string +} + +export interface CatCountRequest extends CatCatRequestBase { + index?: Indices +} + +export type CatCountResponse = CatCountCountRecord[] + +export interface CatFielddataFielddataRecord { + id?: string + host?: string + h?: string + ip?: string + node?: string + n?: string + field?: string + f?: string + size?: string +} + +export interface CatFielddataRequest extends CatCatRequestBase { + fields?: Fields + bytes?: Bytes +} + +export type CatFielddataResponse = CatFielddataFielddataRecord[] + +export interface CatHealthHealthRecord { + epoch?: EpochMillis + time?: EpochMillis + timestamp?: DateString + ts?: DateString + hms?: DateString + hhmmss?: DateString + cluster?: string + cl?: string + status?: string + st?: string + 'node.total'?: string + nt?: string + nodeTotal?: string + 'node.data'?: string + nd?: string + nodeData?: string + shards?: string + t?: string + sh?: string + 'shards.total'?: string + shardsTotal?: string + pri?: string + p?: string + 'shards.primary'?: string + shardsPrimary?: string + relo?: string + r?: string + 'shards.relocating'?: string + shardsRelocating?: string + init?: string + i?: string + 'shards.initializing'?: string + shardsInitializing?: string + unassign?: string + u?: string + 'shards.unassigned'?: string + shardsUnassigned?: string + pending_tasks?: string + pt?: string + pendingTasks?: string + max_task_wait_time?: string + mtwt?: string + maxTaskWaitTime?: string + active_shards_percent?: string + asp?: string + activeShardsPercent?: string +} + +export interface CatHealthRequest extends CatCatRequestBase { + include_timestamp?: boolean + ts?: boolean +} + +export type CatHealthResponse = CatHealthHealthRecord[] + +export interface CatHelpHelpRecord { + endpoint: string +} + +export interface CatHelpRequest extends CatCatRequestBase { +} + +export type CatHelpResponse = CatHelpHelpRecord[] + +export interface CatIndicesIndicesRecord { + health?: string + h?: string + status?: string + s?: string + index?: string + i?: string + idx?: string + uuid?: string + id?: string + pri?: string + p?: string + 'shards.primary'?: string + shardsPrimary?: string + rep?: string + r?: string + 'shards.replica'?: string + shardsReplica?: string + 'docs.count'?: string + dc?: string + docsCount?: string + 'docs.deleted'?: string + dd?: string + docsDeleted?: string + 'creation.date'?: string + cd?: string + 'creation.date.string'?: string + cds?: string + 'store.size'?: string + ss?: string + storeSize?: string + 'pri.store.size'?: string + 'completion.size'?: string + cs?: string + completionSize?: string + 'pri.completion.size'?: string + 'fielddata.memory_size'?: string + fm?: string + fielddataMemory?: string + 'pri.fielddata.memory_size'?: string + 'fielddata.evictions'?: string + fe?: string + fielddataEvictions?: string + 'pri.fielddata.evictions'?: string + 'query_cache.memory_size'?: string + qcm?: string + queryCacheMemory?: string + 'pri.query_cache.memory_size'?: string + 'query_cache.evictions'?: string + qce?: string + queryCacheEvictions?: string + 'pri.query_cache.evictions'?: string + 'request_cache.memory_size'?: string + rcm?: string + requestCacheMemory?: string + 'pri.request_cache.memory_size'?: string + 'request_cache.evictions'?: string + rce?: string + requestCacheEvictions?: string + 'pri.request_cache.evictions'?: string + 'request_cache.hit_count'?: string + rchc?: string + requestCacheHitCount?: string + 'pri.request_cache.hit_count'?: string + 'request_cache.miss_count'?: string + rcmc?: string + requestCacheMissCount?: string + 'pri.request_cache.miss_count'?: string + 'flush.total'?: string + ft?: string + flushTotal?: string + 'pri.flush.total'?: string + 'flush.total_time'?: string + ftt?: string + flushTotalTime?: string + 'pri.flush.total_time'?: string + 'get.current'?: string + gc?: string + getCurrent?: string + 'pri.get.current'?: string + 'get.time'?: string + gti?: string + getTime?: string + 'pri.get.time'?: string + 'get.total'?: string + gto?: string + getTotal?: string + 'pri.get.total'?: string + 'get.exists_time'?: string + geti?: string + getExistsTime?: string + 'pri.get.exists_time'?: string + 'get.exists_total'?: string + geto?: string + getExistsTotal?: string + 'pri.get.exists_total'?: string + 'get.missing_time'?: string + gmti?: string + getMissingTime?: string + 'pri.get.missing_time'?: string + 'get.missing_total'?: string + gmto?: string + getMissingTotal?: string + 'pri.get.missing_total'?: string + 'indexing.delete_current'?: string + idc?: string + indexingDeleteCurrent?: string + 'pri.indexing.delete_current'?: string + 'indexing.delete_time'?: string + idti?: string + indexingDeleteTime?: string + 'pri.indexing.delete_time'?: string + 'indexing.delete_total'?: string + idto?: string + indexingDeleteTotal?: string + 'pri.indexing.delete_total'?: string + 'indexing.index_current'?: string + iic?: string + indexingIndexCurrent?: string + 'pri.indexing.index_current'?: string + 'indexing.index_time'?: string + iiti?: string + indexingIndexTime?: string + 'pri.indexing.index_time'?: string + 'indexing.index_total'?: string + iito?: string + indexingIndexTotal?: string + 'pri.indexing.index_total'?: string + 'indexing.index_failed'?: string + iif?: string + indexingIndexFailed?: string + 'pri.indexing.index_failed'?: string + 'merges.current'?: string + mc?: string + mergesCurrent?: string + 'pri.merges.current'?: string + 'merges.current_docs'?: string + mcd?: string + mergesCurrentDocs?: string + 'pri.merges.current_docs'?: string + 'merges.current_size'?: string + mcs?: string + mergesCurrentSize?: string + 'pri.merges.current_size'?: string + 'merges.total'?: string + mt?: string + mergesTotal?: string + 'pri.merges.total'?: string + 'merges.total_docs'?: string + mtd?: string + mergesTotalDocs?: string + 'pri.merges.total_docs'?: string + 'merges.total_size'?: string + mts?: string + mergesTotalSize?: string + 'pri.merges.total_size'?: string + 'merges.total_time'?: string + mtt?: string + mergesTotalTime?: string + 'pri.merges.total_time'?: string + 'refresh.total'?: string + rto?: string + refreshTotal?: string + 'pri.refresh.total'?: string + 'refresh.time'?: string + rti?: string + refreshTime?: string + 'pri.refresh.time'?: string + 'refresh.external_total'?: string + reto?: string + 'pri.refresh.external_total'?: string + 'refresh.external_time'?: string + reti?: string + 'pri.refresh.external_time'?: string + 'refresh.listeners'?: string + rli?: string + refreshListeners?: string + 'pri.refresh.listeners'?: string + 'search.fetch_current'?: string + sfc?: string + searchFetchCurrent?: string + 'pri.search.fetch_current'?: string + 'search.fetch_time'?: string + sfti?: string + searchFetchTime?: string + 'pri.search.fetch_time'?: string + 'search.fetch_total'?: string + sfto?: string + searchFetchTotal?: string + 'pri.search.fetch_total'?: string + 'search.open_contexts'?: string + so?: string + searchOpenContexts?: string + 'pri.search.open_contexts'?: string + 'search.query_current'?: string + sqc?: string + searchQueryCurrent?: string + 'pri.search.query_current'?: string + 'search.query_time'?: string + sqti?: string + searchQueryTime?: string + 'pri.search.query_time'?: string + 'search.query_total'?: string + sqto?: string + searchQueryTotal?: string + 'pri.search.query_total'?: string + 'search.scroll_current'?: string + scc?: string + searchScrollCurrent?: string + 'pri.search.scroll_current'?: string + 'search.scroll_time'?: string + scti?: string + searchScrollTime?: string + 'pri.search.scroll_time'?: string + 'search.scroll_total'?: string + scto?: string + searchScrollTotal?: string + 'pri.search.scroll_total'?: string + 'segments.count'?: string + sc?: string + segmentsCount?: string + 'pri.segments.count'?: string + 'segments.memory'?: string + sm?: string + segmentsMemory?: string + 'pri.segments.memory'?: string + 'segments.index_writer_memory'?: string + siwm?: string + segmentsIndexWriterMemory?: string + 'pri.segments.index_writer_memory'?: string + 'segments.version_map_memory'?: string + svmm?: string + segmentsVersionMapMemory?: string + 'pri.segments.version_map_memory'?: string + 'segments.fixed_bitset_memory'?: string + sfbm?: string + fixedBitsetMemory?: string + 'pri.segments.fixed_bitset_memory'?: string + 'warmer.current'?: string + wc?: string + warmerCurrent?: string + 'pri.warmer.current'?: string + 'warmer.total'?: string + wto?: string + warmerTotal?: string + 'pri.warmer.total'?: string + 'warmer.total_time'?: string + wtt?: string + warmerTotalTime?: string + 'pri.warmer.total_time'?: string + 'suggest.current'?: string + suc?: string + suggestCurrent?: string + 'pri.suggest.current'?: string + 'suggest.time'?: string + suti?: string + suggestTime?: string + 'pri.suggest.time'?: string + 'suggest.total'?: string + suto?: string + suggestTotal?: string + 'pri.suggest.total'?: string + 'memory.total'?: string + tm?: string + memoryTotal?: string + 'pri.memory.total'?: string + 'search.throttled'?: string + sth?: string + 'bulk.total_operations'?: string + bto?: string + bulkTotalOperation?: string + 'pri.bulk.total_operations'?: string + 'bulk.total_time'?: string + btti?: string + bulkTotalTime?: string + 'pri.bulk.total_time'?: string + 'bulk.total_size_in_bytes'?: string + btsi?: string + bulkTotalSizeInBytes?: string + 'pri.bulk.total_size_in_bytes'?: string + 'bulk.avg_time'?: string + bati?: string + bulkAvgTime?: string + 'pri.bulk.avg_time'?: string + 'bulk.avg_size_in_bytes'?: string + basi?: string + bulkAvgSizeInBytes?: string + 'pri.bulk.avg_size_in_bytes'?: string +} + +export interface CatIndicesRequest extends CatCatRequestBase { + index?: Indices + bytes?: Bytes + expand_wildcards?: ExpandWildcards + health?: Health + include_unloaded_segments?: boolean + pri?: boolean +} + +export type CatIndicesResponse = CatIndicesIndicesRecord[] + +export interface CatMasterMasterRecord { + id?: string + host?: string + h?: string + ip?: string + node?: string + n?: string +} + +export interface CatMasterRequest extends CatCatRequestBase { +} + +export type CatMasterResponse = CatMasterMasterRecord[] + +export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { + id?: Id + type?: Type + t?: Type + create_time?: string + ct?: string + createTime?: string + version?: VersionString + v?: VersionString + source_index?: IndexName + si?: IndexName + sourceIndex?: IndexName + dest_index?: IndexName + di?: IndexName + destIndex?: IndexName + description?: string + d?: string + model_memory_limit?: string + mml?: string + modelMemoryLimit?: string + state?: string + s?: string + failure_reason?: string + fr?: string + failureReason?: string + progress?: string + p?: string + assignment_explanation?: string + ae?: string + assignmentExplanation?: string + 'node.id'?: Id + ni?: Id + nodeId?: Id + 'node.name'?: Name + nn?: Name + nodeName?: Name + 'node.ephemeral_id'?: Id + ne?: Id + nodeEphemeralId?: Id + 'node.address'?: string + na?: string + nodeAddress?: string +} + +export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { + id?: Id + allow_no_match?: boolean + bytes?: Bytes +} + +export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] + +export interface CatMlDatafeedsDatafeedsRecord { + id?: string + state?: MlDatafeedState + s?: MlDatafeedState + assignment_explanation?: string + ae?: string + 'buckets.count'?: string + bc?: string + bucketsCount?: string + 'search.count'?: string + sc?: string + searchCount?: string + 'search.time'?: string + st?: string + searchTime?: string + 'search.bucket_avg'?: string + sba?: string + searchBucketAvg?: string + 'search.exp_avg_hour'?: string + seah?: string + searchExpAvgHour?: string + 'node.id'?: string + ni?: string + nodeId?: string + 'node.name'?: string + nn?: string + nodeName?: string + 'node.ephemeral_id'?: string + ne?: string + nodeEphemeralId?: string + 'node.address'?: string + na?: string + nodeAddress?: string +} + +export interface CatMlDatafeedsRequest extends CatCatRequestBase { + datafeed_id?: Id + allow_no_datafeeds?: boolean +} + +export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] + +export interface CatMlJobsJobsRecord { + id?: Id + state?: MlJobState + s?: MlJobState + opened_time?: string + ot?: string + assignment_explanation?: string + ae?: string + 'data.processed_records'?: string + dpr?: string + dataProcessedRecords?: string + 'data.processed_fields'?: string + dpf?: string + dataProcessedFields?: string + 'data.input_bytes'?: ByteSize + dib?: ByteSize + dataInputBytes?: ByteSize + 'data.input_records'?: string + dir?: string + dataInputRecords?: string + 'data.input_fields'?: string + dif?: string + dataInputFields?: string + 'data.invalid_dates'?: string + did?: string + dataInvalidDates?: string + 'data.missing_fields'?: string + dmf?: string + dataMissingFields?: string + 'data.out_of_order_timestamps'?: string + doot?: string + dataOutOfOrderTimestamps?: string + 'data.empty_buckets'?: string + deb?: string + dataEmptyBuckets?: string + 'data.sparse_buckets'?: string + dsb?: string + dataSparseBuckets?: string + 'data.buckets'?: string + db?: string + dataBuckets?: string + 'data.earliest_record'?: string + der?: string + dataEarliestRecord?: string + 'data.latest_record'?: string + dlr?: string + dataLatestRecord?: string + 'data.last'?: string + dl?: string + dataLast?: string + 'data.last_empty_bucket'?: string + dleb?: string + dataLastEmptyBucket?: string + 'data.last_sparse_bucket'?: string + dlsb?: string + dataLastSparseBucket?: string + 'model.bytes'?: ByteSize + mb?: ByteSize + modelBytes?: ByteSize + 'model.memory_status'?: MlMemoryStatus + mms?: MlMemoryStatus + modelMemoryStatus?: MlMemoryStatus + 'model.bytes_exceeded'?: ByteSize + mbe?: ByteSize + modelBytesExceeded?: ByteSize + 'model.memory_limit'?: string + mml?: string + modelMemoryLimit?: string + 'model.by_fields'?: string + mbf?: string + modelByFields?: string + 'model.over_fields'?: string + mof?: string + modelOverFields?: string + 'model.partition_fields'?: string + mpf?: string + modelPartitionFields?: string + 'model.bucket_allocation_failures'?: string + mbaf?: string + modelBucketAllocationFailures?: string + 'model.categorization_status'?: MlCategorizationStatus + mcs?: MlCategorizationStatus + modelCategorizationStatus?: MlCategorizationStatus + 'model.categorized_doc_count'?: string + mcdc?: string + modelCategorizedDocCount?: string + 'model.total_category_count'?: string + mtcc?: string + modelTotalCategoryCount?: string + 'model.frequent_category_count'?: string + modelFrequentCategoryCount?: string + 'model.rare_category_count'?: string + mrcc?: string + modelRareCategoryCount?: string + 'model.dead_category_count'?: string + mdcc?: string + modelDeadCategoryCount?: string + 'model.failed_category_count'?: string + mfcc?: string + modelFailedCategoryCount?: string + 'model.log_time'?: string + mlt?: string + modelLogTime?: string + 'model.timestamp'?: string + mt?: string + modelTimestamp?: string + 'forecasts.total'?: string + ft?: string + forecastsTotal?: string + 'forecasts.memory.min'?: string + fmmin?: string + forecastsMemoryMin?: string + 'forecasts.memory.max'?: string + fmmax?: string + forecastsMemoryMax?: string + 'forecasts.memory.avg'?: string + fmavg?: string + forecastsMemoryAvg?: string + 'forecasts.memory.total'?: string + fmt?: string + forecastsMemoryTotal?: string + 'forecasts.records.min'?: string + frmin?: string + forecastsRecordsMin?: string + 'forecasts.records.max'?: string + frmax?: string + forecastsRecordsMax?: string + 'forecasts.records.avg'?: string + fravg?: string + forecastsRecordsAvg?: string + 'forecasts.records.total'?: string + frt?: string + forecastsRecordsTotal?: string + 'forecasts.time.min'?: string + ftmin?: string + forecastsTimeMin?: string + 'forecasts.time.max'?: string + ftmax?: string + forecastsTimeMax?: string + 'forecasts.time.avg'?: string + ftavg?: string + forecastsTimeAvg?: string + 'forecasts.time.total'?: string + ftt?: string + forecastsTimeTotal?: string + 'node.id'?: NodeId + ni?: NodeId + nodeId?: NodeId + 'node.name'?: string + nn?: string + nodeName?: string + 'node.ephemeral_id'?: NodeId + ne?: NodeId + nodeEphemeralId?: NodeId + 'node.address'?: string + na?: string + nodeAddress?: string + 'buckets.count'?: string + bc?: string + bucketsCount?: string + 'buckets.time.total'?: string + btt?: string + bucketsTimeTotal?: string + 'buckets.time.min'?: string + btmin?: string + bucketsTimeMin?: string + 'buckets.time.max'?: string + btmax?: string + bucketsTimeMax?: string + 'buckets.time.exp_avg'?: string + btea?: string + bucketsTimeExpAvg?: string + 'buckets.time.exp_avg_hour'?: string + bteah?: string + bucketsTimeExpAvgHour?: string +} + +export interface CatMlJobsRequest extends CatCatRequestBase { + job_id?: Id + allow_no_jobs?: boolean + bytes?: Bytes +} + +export type CatMlJobsResponse = CatMlJobsJobsRecord[] + +export interface CatMlTrainedModelsRequest extends CatCatRequestBase { + model_id?: Id + allow_no_match?: boolean + bytes?: Bytes + from?: integer + size?: integer +} + +export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] + +export interface CatMlTrainedModelsTrainedModelsRecord { + id?: Id + created_by?: string + c?: string + createdBy?: string + heap_size?: ByteSize + hs?: ByteSize + modelHeapSize?: ByteSize + operations?: string + o?: string + modelOperations?: string + license?: string + l?: string + create_time?: DateString + ct?: DateString + version?: VersionString + v?: VersionString + description?: string + d?: string + 'ingest.pipelines'?: string + ip?: string + ingestPipelines?: string + 'ingest.count'?: string + ic?: string + ingestCount?: string + 'ingest.time'?: string + it?: string + ingestTime?: string + 'ingest.current'?: string + icurr?: string + ingestCurrent?: string + 'ingest.failed'?: string + if?: string + ingestFailed?: string + 'data_frame.id'?: string + dfid?: string + dataFrameAnalytics?: string + 'data_frame.create_time'?: string + dft?: string + dataFrameAnalyticsTime?: string + 'data_frame.source_index'?: string + dfsi?: string + dataFrameAnalyticsSrcIndex?: string + 'data_frame.analysis'?: string + dfa?: string + dataFrameAnalyticsAnalysis?: string + type?: string +} + +export interface CatNodeattrsNodeAttributesRecord { + node?: string + id?: string + pid?: string + host?: string + h?: string + ip?: string + i?: string + port?: string + attr?: string + value?: string +} + +export interface CatNodeattrsRequest extends CatCatRequestBase { +} + +export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] + +export interface CatNodesNodesRecord { + id?: Id + nodeId?: Id + pid?: string + p?: string + ip?: string + i?: string + port?: string + po?: string + http_address?: string + http?: string + version?: VersionString + v?: VersionString + flavor?: string + f?: string + type?: Type + t?: Type + build?: string + b?: string + jdk?: string + j?: string + 'disk.total'?: ByteSize + dt?: ByteSize + diskTotal?: ByteSize + 'disk.used'?: ByteSize + du?: ByteSize + diskUsed?: ByteSize + 'disk.avail'?: ByteSize + d?: ByteSize + da?: ByteSize + disk?: ByteSize + diskAvail?: ByteSize + 'disk.used_percent'?: Percentage + dup?: Percentage + diskUsedPercent?: Percentage + 'heap.current'?: string + hc?: string + heapCurrent?: string + 'heap.percent'?: Percentage + hp?: Percentage + heapPercent?: Percentage + 'heap.max'?: string + hm?: string + heapMax?: string + 'ram.current'?: string + rc?: string + ramCurrent?: string + 'ram.percent'?: Percentage + rp?: Percentage + ramPercent?: Percentage + 'ram.max'?: string + rn?: string + ramMax?: string + 'file_desc.current'?: string + fdc?: string + fileDescriptorCurrent?: string + 'file_desc.percent'?: Percentage + fdp?: Percentage + fileDescriptorPercent?: Percentage + 'file_desc.max'?: string + fdm?: string + fileDescriptorMax?: string + cpu?: string + load_1m?: string + load_5m?: string + load_15m?: string + l?: string + uptime?: string + u?: string + 'node.role'?: string + r?: string + role?: string + nodeRole?: string + master?: string + m?: string + name?: Name + n?: Name + 'completion.size'?: string + cs?: string + completionSize?: string + 'fielddata.memory_size'?: string + fm?: string + fielddataMemory?: string + 'fielddata.evictions'?: string + fe?: string + fielddataEvictions?: string + 'query_cache.memory_size'?: string + qcm?: string + queryCacheMemory?: string + 'query_cache.evictions'?: string + qce?: string + queryCacheEvictions?: string + 'query_cache.hit_count'?: string + qchc?: string + queryCacheHitCount?: string + 'query_cache.miss_count'?: string + qcmc?: string + queryCacheMissCount?: string + 'request_cache.memory_size'?: string + rcm?: string + requestCacheMemory?: string + 'request_cache.evictions'?: string + rce?: string + requestCacheEvictions?: string + 'request_cache.hit_count'?: string + rchc?: string + requestCacheHitCount?: string + 'request_cache.miss_count'?: string + rcmc?: string + requestCacheMissCount?: string + 'flush.total'?: string + ft?: string + flushTotal?: string + 'flush.total_time'?: string + ftt?: string + flushTotalTime?: string + 'get.current'?: string + gc?: string + getCurrent?: string + 'get.time'?: string + gti?: string + getTime?: string + 'get.total'?: string + gto?: string + getTotal?: string + 'get.exists_time'?: string + geti?: string + getExistsTime?: string + 'get.exists_total'?: string + geto?: string + getExistsTotal?: string + 'get.missing_time'?: string + gmti?: string + getMissingTime?: string + 'get.missing_total'?: string + gmto?: string + getMissingTotal?: string + 'indexing.delete_current'?: string + idc?: string + indexingDeleteCurrent?: string + 'indexing.delete_time'?: string + idti?: string + indexingDeleteTime?: string + 'indexing.delete_total'?: string + idto?: string + indexingDeleteTotal?: string + 'indexing.index_current'?: string + iic?: string + indexingIndexCurrent?: string + 'indexing.index_time'?: string + iiti?: string + indexingIndexTime?: string + 'indexing.index_total'?: string + iito?: string + indexingIndexTotal?: string + 'indexing.index_failed'?: string + iif?: string + indexingIndexFailed?: string + 'merges.current'?: string + mc?: string + mergesCurrent?: string + 'merges.current_docs'?: string + mcd?: string + mergesCurrentDocs?: string + 'merges.current_size'?: string + mcs?: string + mergesCurrentSize?: string + 'merges.total'?: string + mt?: string + mergesTotal?: string + 'merges.total_docs'?: string + mtd?: string + mergesTotalDocs?: string + 'merges.total_size'?: string + mts?: string + mergesTotalSize?: string + 'merges.total_time'?: string + mtt?: string + mergesTotalTime?: string + 'refresh.total'?: string + 'refresh.time'?: string + 'refresh.external_total'?: string + rto?: string + refreshTotal?: string + 'refresh.external_time'?: string + rti?: string + refreshTime?: string + 'refresh.listeners'?: string + rli?: string + refreshListeners?: string + 'script.compilations'?: string + scrcc?: string + scriptCompilations?: string + 'script.cache_evictions'?: string + scrce?: string + scriptCacheEvictions?: string + 'script.compilation_limit_triggered'?: string + scrclt?: string + scriptCacheCompilationLimitTriggered?: string + 'search.fetch_current'?: string + sfc?: string + searchFetchCurrent?: string + 'search.fetch_time'?: string + sfti?: string + searchFetchTime?: string + 'search.fetch_total'?: string + sfto?: string + searchFetchTotal?: string + 'search.open_contexts'?: string + so?: string + searchOpenContexts?: string + 'search.query_current'?: string + sqc?: string + searchQueryCurrent?: string + 'search.query_time'?: string + sqti?: string + searchQueryTime?: string + 'search.query_total'?: string + sqto?: string + searchQueryTotal?: string + 'search.scroll_current'?: string + scc?: string + searchScrollCurrent?: string + 'search.scroll_time'?: string + scti?: string + searchScrollTime?: string + 'search.scroll_total'?: string + scto?: string + searchScrollTotal?: string + 'segments.count'?: string + sc?: string + segmentsCount?: string + 'segments.memory'?: string + sm?: string + segmentsMemory?: string + 'segments.index_writer_memory'?: string + siwm?: string + segmentsIndexWriterMemory?: string + 'segments.version_map_memory'?: string + svmm?: string + segmentsVersionMapMemory?: string + 'segments.fixed_bitset_memory'?: string + sfbm?: string + fixedBitsetMemory?: string + 'suggest.current'?: string + suc?: string + suggestCurrent?: string + 'suggest.time'?: string + suti?: string + suggestTime?: string + 'suggest.total'?: string + suto?: string + suggestTotal?: string + 'bulk.total_operations'?: string + bto?: string + bulkTotalOperations?: string + 'bulk.total_time'?: string + btti?: string + bulkTotalTime?: string + 'bulk.total_size_in_bytes'?: string + btsi?: string + bulkTotalSizeInBytes?: string + 'bulk.avg_time'?: string + bati?: string + bulkAvgTime?: string + 'bulk.avg_size_in_bytes'?: string + basi?: string + bulkAvgSizeInBytes?: string +} + +export interface CatNodesRequest extends CatCatRequestBase { + bytes?: Bytes + full_id?: boolean | string +} + +export type CatNodesResponse = CatNodesNodesRecord[] + +export interface CatPendingTasksPendingTasksRecord { + insertOrder?: string + o?: string + timeInQueue?: string + t?: string + priority?: string + p?: string + source?: string + s?: string +} + +export interface CatPendingTasksRequest extends CatCatRequestBase { +} + +export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] + +export interface CatPluginsPluginsRecord { + id?: NodeId + name?: Name + n?: Name + component?: string + c?: string + version?: VersionString + v?: VersionString + description?: string + d?: string + type?: Type + t?: Type +} + +export interface CatPluginsRequest extends CatCatRequestBase { +} + +export type CatPluginsResponse = CatPluginsPluginsRecord[] + +export interface CatRecoveryRecoveryRecord { + index?: IndexName + i?: IndexName + idx?: IndexName + shard?: string + s?: string + sh?: string + start_time?: string + start?: string + start_time_millis?: string + start_millis?: string + stop_time?: string + stop?: string + stop_time_millis?: string + stop_millis?: string + time?: string + t?: string + ti?: string + type?: Type + ty?: Type + stage?: string + st?: string + source_host?: string + shost?: string + source_node?: string + snode?: string + target_host?: string + thost?: string + target_node?: string + tnode?: string + repository?: string + rep?: string + snapshot?: string + snap?: string + files?: string + f?: string + files_recovered?: string + fr?: string + files_percent?: Percentage + fp?: Percentage + files_total?: string + tf?: string + bytes?: string + b?: string + bytes_recovered?: string + br?: string + bytes_percent?: Percentage + bp?: Percentage + bytes_total?: string + tb?: string + translog_ops?: string + to?: string + translog_ops_recovered?: string + tor?: string + translog_ops_percent?: Percentage + top?: Percentage +} + +export interface CatRecoveryRequest extends CatCatRequestBase { + index?: Indices + active_only?: boolean + bytes?: Bytes + detailed?: boolean +} + +export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] + +export interface CatRepositoriesRepositoriesRecord { + id?: string + repoId?: string + type?: string + t?: string +} + +export interface CatRepositoriesRequest extends CatCatRequestBase { +} + +export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] + +export interface CatSegmentsRequest extends CatCatRequestBase { + index?: Indices + bytes?: Bytes +} + +export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] + +export interface CatSegmentsSegmentsRecord { + index?: IndexName + i?: IndexName + idx?: IndexName + shard?: string + s?: string + sh?: string + prirep?: string + p?: string + pr?: string + primaryOrReplica?: string + ip?: string + id?: NodeId + segment?: string + seg?: string + generation?: string + g?: string + gen?: string + 'docs.count'?: string + dc?: string + docsCount?: string + 'docs.deleted'?: string + dd?: string + docsDeleted?: string + size?: ByteSize + si?: ByteSize + 'size.memory'?: ByteSize + sm?: ByteSize + sizeMemory?: ByteSize + committed?: string + ic?: string + isCommitted?: string + searchable?: string + is?: string + isSearchable?: string + version?: VersionString + v?: VersionString + compound?: string + ico?: string + isCompound?: string +} + +export interface CatShardsRequest extends CatCatRequestBase { + index?: Indices + bytes?: Bytes +} + +export type CatShardsResponse = CatShardsShardsRecord[] + +export interface CatShardsShardsRecord { + index?: string + i?: string + idx?: string + shard?: string + s?: string + sh?: string + prirep?: string + p?: string + pr?: string + primaryOrReplica?: string + state?: string + st?: string + docs?: string + d?: string + dc?: string + store?: string + sto?: string + ip?: string + id?: string + node?: string + n?: string + sync_id?: string + 'unassigned.reason'?: string + ur?: string + 'unassigned.at'?: string + ua?: string + 'unassigned.for'?: string + uf?: string + 'unassigned.details'?: string + ud?: string + 'recoverysource.type'?: string + rs?: string + 'completion.size'?: string + cs?: string + completionSize?: string + 'fielddata.memory_size'?: string + fm?: string + fielddataMemory?: string + 'fielddata.evictions'?: string + fe?: string + fielddataEvictions?: string + 'query_cache.memory_size'?: string + qcm?: string + queryCacheMemory?: string + 'query_cache.evictions'?: string + qce?: string + queryCacheEvictions?: string + 'flush.total'?: string + ft?: string + flushTotal?: string + 'flush.total_time'?: string + ftt?: string + flushTotalTime?: string + 'get.current'?: string + gc?: string + getCurrent?: string + 'get.time'?: string + gti?: string + getTime?: string + 'get.total'?: string + gto?: string + getTotal?: string + 'get.exists_time'?: string + geti?: string + getExistsTime?: string + 'get.exists_total'?: string + geto?: string + getExistsTotal?: string + 'get.missing_time'?: string + gmti?: string + getMissingTime?: string + 'get.missing_total'?: string + gmto?: string + getMissingTotal?: string + 'indexing.delete_current'?: string + idc?: string + indexingDeleteCurrent?: string + 'indexing.delete_time'?: string + idti?: string + indexingDeleteTime?: string + 'indexing.delete_total'?: string + idto?: string + indexingDeleteTotal?: string + 'indexing.index_current'?: string + iic?: string + indexingIndexCurrent?: string + 'indexing.index_time'?: string + iiti?: string + indexingIndexTime?: string + 'indexing.index_total'?: string + iito?: string + indexingIndexTotal?: string + 'indexing.index_failed'?: string + iif?: string + indexingIndexFailed?: string + 'merges.current'?: string + mc?: string + mergesCurrent?: string + 'merges.current_docs'?: string + mcd?: string + mergesCurrentDocs?: string + 'merges.current_size'?: string + mcs?: string + mergesCurrentSize?: string + 'merges.total'?: string + mt?: string + mergesTotal?: string + 'merges.total_docs'?: string + mtd?: string + mergesTotalDocs?: string + 'merges.total_size'?: string + mts?: string + mergesTotalSize?: string + 'merges.total_time'?: string + mtt?: string + mergesTotalTime?: string + 'refresh.total'?: string + 'refresh.time'?: string + 'refresh.external_total'?: string + rto?: string + refreshTotal?: string + 'refresh.external_time'?: string + rti?: string + refreshTime?: string + 'refresh.listeners'?: string + rli?: string + refreshListeners?: string + 'search.fetch_current'?: string + sfc?: string + searchFetchCurrent?: string + 'search.fetch_time'?: string + sfti?: string + searchFetchTime?: string + 'search.fetch_total'?: string + sfto?: string + searchFetchTotal?: string + 'search.open_contexts'?: string + so?: string + searchOpenContexts?: string + 'search.query_current'?: string + sqc?: string + searchQueryCurrent?: string + 'search.query_time'?: string + sqti?: string + searchQueryTime?: string + 'search.query_total'?: string + sqto?: string + searchQueryTotal?: string + 'search.scroll_current'?: string + scc?: string + searchScrollCurrent?: string + 'search.scroll_time'?: string + scti?: string + searchScrollTime?: string + 'search.scroll_total'?: string + scto?: string + searchScrollTotal?: string + 'segments.count'?: string + sc?: string + segmentsCount?: string + 'segments.memory'?: string + sm?: string + segmentsMemory?: string + 'segments.index_writer_memory'?: string + siwm?: string + segmentsIndexWriterMemory?: string + 'segments.version_map_memory'?: string + svmm?: string + segmentsVersionMapMemory?: string + 'segments.fixed_bitset_memory'?: string + sfbm?: string + fixedBitsetMemory?: string + 'seq_no.max'?: string + sqm?: string + maxSeqNo?: string + 'seq_no.local_checkpoint'?: string + sql?: string + localCheckpoint?: string + 'seq_no.global_checkpoint'?: string + sqg?: string + globalCheckpoint?: string + 'warmer.current'?: string + wc?: string + warmerCurrent?: string + 'warmer.total'?: string + wto?: string + warmerTotal?: string + 'warmer.total_time'?: string + wtt?: string + warmerTotalTime?: string + 'path.data'?: string + pd?: string + dataPath?: string + 'path.state'?: string + ps?: string + statsPath?: string + 'bulk.total_operations'?: string + bto?: string + bulkTotalOperations?: string + 'bulk.total_time'?: string + btti?: string + bulkTotalTime?: string + 'bulk.total_size_in_bytes'?: string + btsi?: string + bulkTotalSizeInBytes?: string + 'bulk.avg_time'?: string + bati?: string + bulkAvgTime?: string + 'bulk.avg_size_in_bytes'?: string + basi?: string + bulkAvgSizeInBytes?: string +} + +export interface CatSnapshotsRequest extends CatCatRequestBase { + repository?: Names + ignore_unavailable?: boolean +} + +export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] + +export interface CatSnapshotsSnapshotsRecord { + id?: string + snapshot?: string + repository?: string + re?: string + repo?: string + status?: string + s?: string + start_epoch?: EpochMillis + ste?: EpochMillis + startEpoch?: EpochMillis + start_time?: DateString + sti?: DateString + startTime?: DateString + end_epoch?: EpochMillis + ete?: EpochMillis + endEpoch?: EpochMillis + end_time?: DateString + eti?: DateString + endTime?: DateString + duration?: Time + dur?: Time + indices?: string + i?: string + successful_shards?: string + ss?: string + failed_shards?: string + fs?: string + total_shards?: string + ts?: string + reason?: string + r?: string +} + +export interface CatTasksRequest extends CatCatRequestBase { + actions?: string[] + detailed?: boolean + node_id?: string[] + parent_task?: long +} + +export type CatTasksResponse = CatTasksTasksRecord[] + +export interface CatTasksTasksRecord { + id?: Id + action?: string + ac?: string + task_id?: Id + ti?: Id + parent_task_id?: string + pti?: string + type?: Type + ty?: Type + start_time?: string + start?: string + timestamp?: string + ts?: string + hms?: string + hhmmss?: string + running_time_ns?: string + running_time?: string + time?: string + node_id?: NodeId + ni?: NodeId + ip?: string + i?: string + port?: string + po?: string + node?: string + n?: string + version?: VersionString + v?: VersionString + x_opaque_id?: string + x?: string + description?: string + desc?: string +} + +export interface CatTemplatesRequest extends CatCatRequestBase { + name?: Name +} + +export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] + +export interface CatTemplatesTemplatesRecord { + name?: Name + n?: Name + index_patterns?: string + t?: string + order?: string + o?: string + p?: string + version?: VersionString + v?: VersionString + composed_of?: string + c?: string +} + +export interface CatThreadPoolRequest extends CatCatRequestBase { + thread_pool_patterns?: Names + size?: Size | boolean +} + +export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] + +export interface CatThreadPoolThreadPoolRecord { + node_name?: string + nn?: string + node_id?: NodeId + id?: NodeId + ephemeral_node_id?: string + eid?: string + pid?: string + p?: string + host?: string + h?: string + ip?: string + i?: string + port?: string + po?: string + name?: string + n?: string + type?: string + t?: string + active?: string + a?: string + pool_size?: string + psz?: string + queue?: string + q?: string + queue_size?: string + qs?: string + rejected?: string + r?: string + largest?: string + l?: string + completed?: string + c?: string + core?: string + cr?: string + max?: string + mx?: string + size?: string + sz?: string + keep_alive?: string + ka?: string +} + +export interface CatTransformsRequest extends CatCatRequestBase { + transform_id?: Id + allow_no_match?: boolean + from?: integer + size?: integer +} + +export type CatTransformsResponse = CatTransformsTransformsRecord[] + +export interface CatTransformsTransformsRecord { + id?: Id + state?: string + s?: string + checkpoint?: string + c?: string + documents_processed?: string + docp?: string + documentsProcessed?: string + checkpoint_progress?: string + cp?: string + checkpointProgress?: string + last_search_time?: string + lst?: string + lastSearchTime?: string + changes_last_detection_time?: string + cldt?: string + create_time?: string + ct?: string + createTime?: string + version?: VersionString + v?: VersionString + source_index?: string + si?: string + sourceIndex?: string + dest_index?: string + di?: string + destIndex?: string + pipeline?: string + p?: string + description?: string + d?: string + transform_type?: string + tt?: string + frequency?: string + f?: string + max_page_search_size?: string + mpsz?: string + docs_per_second?: string + dps?: string + reason?: string + r?: string + search_total?: string + st?: string + search_failure?: string + sf?: string + search_time?: string + stime?: string + index_total?: string + it?: string + index_failure?: string + if?: string + index_time?: string + itime?: string + documents_indexed?: string + doci?: string + delete_time?: string + dtime?: string + documents_deleted?: string + docd?: string + trigger_count?: string + tc?: string + pages_processed?: string + pp?: string + processing_time?: string + pt?: string + checkpoint_duration_time_exp_avg?: string + cdtea?: string + checkpointTimeExpAvg?: string + indexed_documents_exp_avg?: string + idea?: string + processed_documents_exp_avg?: string + pdea?: string +} + +export interface CcrFollowIndexStats { + index: IndexName + shards: CcrShardStats[] +} + +export interface CcrReadException { + exception: ErrorCause + from_seq_no: SequenceNumber + retries: integer +} + +export interface CcrShardStats { + bytes_read: long + failed_read_requests: long + failed_write_requests: long + fatal_exception?: ErrorCause + follower_aliases_version: VersionNumber + follower_global_checkpoint: long + follower_index: string + follower_mapping_version: VersionNumber + follower_max_seq_no: SequenceNumber + follower_settings_version: VersionNumber + last_requested_seq_no: SequenceNumber + leader_global_checkpoint: long + leader_index: string + leader_max_seq_no: SequenceNumber + operations_read: long + operations_written: long + outstanding_read_requests: integer + outstanding_write_requests: integer + read_exceptions: CcrReadException[] + remote_cluster: string + shard_id: integer + successful_read_requests: long + successful_write_requests: long + time_since_last_read_millis: EpochMillis + total_read_remote_exec_time_millis: EpochMillis + total_read_time_millis: EpochMillis + total_write_time_millis: EpochMillis + write_buffer_operation_count: long + write_buffer_size_in_bytes: ByteSize +} + +export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { + name: Name +} + +export interface CcrDeleteAutoFollowPatternResponse extends AcknowledgedResponseBase { +} + +export interface CcrFollowRequest extends RequestBase { + index: IndexName + wait_for_active_shards?: WaitForActiveShards + leader_index?: IndexName + max_outstanding_read_requests?: long + max_outstanding_write_requests?: long + max_read_request_operation_count?: long + max_read_request_size?: string + max_retry_delay?: Time + max_write_buffer_count?: long + max_write_buffer_size?: string + max_write_request_operation_count?: long + max_write_request_size?: string + read_poll_timeout?: Time + remote_cluster?: string +} + +export interface CcrFollowResponse { + follow_index_created: boolean + follow_index_shards_acked: boolean + index_following_started: boolean +} + +export interface CcrFollowInfoFollowerIndex { + follower_index: IndexName + leader_index: IndexName + parameters?: CcrFollowInfoFollowerIndexParameters + remote_cluster: Name + status: CcrFollowInfoFollowerIndexStatus +} + +export interface CcrFollowInfoFollowerIndexParameters { + max_outstanding_read_requests: integer + max_outstanding_write_requests: integer + max_read_request_operation_count: integer + max_read_request_size: string + max_retry_delay: Time + max_write_buffer_count: integer + max_write_buffer_size: string + max_write_request_operation_count: integer + max_write_request_size: string + read_poll_timeout: Time +} + +export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' + +export interface CcrFollowInfoRequest extends RequestBase { + index: Indices +} + +export interface CcrFollowInfoResponse { + follower_indices: CcrFollowInfoFollowerIndex[] +} + +export interface CcrFollowStatsRequest extends RequestBase { + index: Indices +} + +export interface CcrFollowStatsResponse { + indices: CcrFollowIndexStats[] +} + +export interface CcrForgetFollowerRequest extends RequestBase { + index: IndexName + follower_cluster?: string + follower_index?: IndexName + follower_index_uuid?: Uuid + leader_remote_cluster?: string +} + +export interface CcrForgetFollowerResponse { + _shards: ShardStatistics +} + +export interface CcrGetAutoFollowPatternAutoFollowPattern { + name: Name + pattern: CcrGetAutoFollowPatternAutoFollowPatternSummary +} + +export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { + active: boolean + remote_cluster: string + follow_index_pattern?: IndexPattern + leader_index_patterns: IndexPatterns + leader_index_exclusion_patterns: IndexPatterns + max_outstanding_read_requests: integer +} + +export interface CcrGetAutoFollowPatternRequest extends RequestBase { + name?: Name +} + +export interface CcrGetAutoFollowPatternResponse { + patterns: CcrGetAutoFollowPatternAutoFollowPattern[] +} + +export interface CcrPauseAutoFollowPatternRequest extends RequestBase { + name: Name +} + +export interface CcrPauseAutoFollowPatternResponse extends AcknowledgedResponseBase { +} + +export interface CcrPauseFollowRequest extends RequestBase { + index: IndexName +} + +export interface CcrPauseFollowResponse extends AcknowledgedResponseBase { +} + +export interface CcrPutAutoFollowPatternRequest extends RequestBase { + name: Name + remote_cluster: string + follow_index_pattern?: IndexPattern + leader_index_patterns?: IndexPatterns + leader_index_exclusion_patterns?: IndexPatterns + max_outstanding_read_requests?: integer + settings?: Record + max_outstanding_write_requests?: integer + read_poll_timeout?: Time + max_read_request_operation_count?: integer + max_read_request_size?: ByteSize + max_retry_delay?: Time + max_write_buffer_count?: integer + max_write_buffer_size?: ByteSize + max_write_request_operation_count?: integer + max_write_request_size?: ByteSize +} + +export interface CcrPutAutoFollowPatternResponse extends AcknowledgedResponseBase { +} + +export interface CcrResumeAutoFollowPatternRequest extends RequestBase { + name: Name +} + +export interface CcrResumeAutoFollowPatternResponse extends AcknowledgedResponseBase { +} + +export interface CcrResumeFollowRequest extends RequestBase { + index: IndexName + max_outstanding_read_requests?: long + max_outstanding_write_requests?: long + max_read_request_operation_count?: long + max_read_request_size?: string + max_retry_delay?: Time + max_write_buffer_count?: long + max_write_buffer_size?: string + max_write_request_operation_count?: long + max_write_request_size?: string + read_poll_timeout?: Time +} + +export interface CcrResumeFollowResponse extends AcknowledgedResponseBase { +} + +export interface CcrStatsAutoFollowStats { + auto_followed_clusters: CcrStatsAutoFollowedCluster[] + number_of_failed_follow_indices: long + number_of_failed_remote_cluster_state_requests: long + number_of_successful_follow_indices: long + recent_auto_follow_errors: ErrorCause[] +} + +export interface CcrStatsAutoFollowedCluster { + cluster_name: Name + last_seen_metadata_version: VersionNumber + time_since_last_check_millis: DateString +} + +export interface CcrStatsFollowStats { + indices: CcrFollowIndexStats[] +} + +export interface CcrStatsRequest extends RequestBase { +} + +export interface CcrStatsResponse { + auto_follow_stats: CcrStatsAutoFollowStats + follow_stats: CcrStatsFollowStats +} + +export interface CcrUnfollowRequest extends RequestBase { + index: IndexName +} + +export interface CcrUnfollowResponse extends AcknowledgedResponseBase { +} + +export type ClusterClusterStatus = 'green' | 'yellow' | 'red' + +export interface ClusterComponentTemplate { + name: Name + component_template: ClusterComponentTemplateNode +} + +export interface ClusterComponentTemplateNode { + template: ClusterComponentTemplateSummary + version?: VersionNumber + _meta?: Metadata +} + +export interface ClusterComponentTemplateSummary { + _meta?: Metadata + version?: VersionNumber + settings: Record + mappings?: MappingTypeMapping + aliases?: Record +} + +export interface ClusterAllocationExplainAllocationDecision { + decider: string + decision: ClusterAllocationExplainAllocationExplainDecision + explanation: string +} + +export type ClusterAllocationExplainAllocationExplainDecision = 'NO' | 'YES' | 'THROTTLE' | 'ALWAYS' + +export interface ClusterAllocationExplainAllocationStore { + allocation_id: string + found: boolean + in_sync: boolean + matching_size_in_bytes: long + matching_sync_id: boolean + store_exception: string +} + +export interface ClusterAllocationExplainClusterInfo { + nodes: Record + shard_sizes: Record + shard_data_set_sizes?: Record + shard_paths: Record + reserved_sizes: ClusterAllocationExplainReservedSize[] +} + +export interface ClusterAllocationExplainCurrentNode { + id: Id + name: Name + attributes: Record + transport_address: TransportAddress + weight_ranking: integer +} + +export type ClusterAllocationExplainDecision = 'yes' | 'no' | 'worse_balance' | 'throttled' | 'awaiting_info' | 'allocation_delayed' | 'no_valid_shard_copy' | 'no_attempt' + +export interface ClusterAllocationExplainDiskUsage { + path: string + total_bytes: long + used_bytes: long + free_bytes: long + free_disk_percent: double + used_disk_percent: double +} + +export interface ClusterAllocationExplainNodeAllocationExplanation { + deciders: ClusterAllocationExplainAllocationDecision[] + node_attributes: Record + node_decision: ClusterAllocationExplainDecision + node_id: Id + node_name: Name + store?: ClusterAllocationExplainAllocationStore + transport_address: TransportAddress + weight_ranking: integer +} + +export interface ClusterAllocationExplainNodeDiskUsage { + node_name: Name + least_available: ClusterAllocationExplainDiskUsage + most_available: ClusterAllocationExplainDiskUsage +} + +export interface ClusterAllocationExplainRequest extends RequestBase { + include_disk_info?: boolean + include_yes_decisions?: boolean + current_node?: string + index?: IndexName + primary?: boolean + shard?: integer +} + +export interface ClusterAllocationExplainReservedSize { + node_id: Id + path: string + total: long + shards: string[] +} + +export interface ClusterAllocationExplainResponse { + allocate_explanation?: string + allocation_delay?: string + allocation_delay_in_millis?: long + can_allocate?: ClusterAllocationExplainDecision + can_move_to_other_node?: ClusterAllocationExplainDecision + can_rebalance_cluster?: ClusterAllocationExplainDecision + can_rebalance_cluster_decisions?: ClusterAllocationExplainAllocationDecision[] + can_rebalance_to_other_node?: ClusterAllocationExplainDecision + can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] + can_remain_on_current_node?: ClusterAllocationExplainDecision + cluster_info?: ClusterAllocationExplainClusterInfo + configured_delay?: string + configured_delay_in_millis?: long + current_node?: ClusterAllocationExplainCurrentNode + current_state: string + index: IndexName + move_explanation?: string + node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] + primary: boolean + rebalance_explanation?: string + remaining_delay?: string + remaining_delay_in_millis?: long + shard: integer + unassigned_info?: ClusterAllocationExplainUnassignedInformation + note?: string +} + +export interface ClusterAllocationExplainUnassignedInformation { + at: DateString + last_allocation_status?: string + reason: ClusterAllocationExplainUnassignedInformationReason + details?: string + failed_allocation_attempts?: integer + delayed?: boolean + allocation_status?: string +} + +export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' + +export interface ClusterDeleteComponentTemplateRequest extends RequestBase { + name: Name + master_timeout?: Time + timeout?: Time +} + +export interface ClusterDeleteComponentTemplateResponse extends AcknowledgedResponseBase { +} + +export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { + wait_for_removal?: boolean +} + +export type ClusterDeleteVotingConfigExclusionsResponse = boolean + +export interface ClusterExistsComponentTemplateRequest extends RequestBase { + name: Names + master_timeout?: Time + local?: boolean +} + +export type ClusterExistsComponentTemplateResponse = boolean + +export interface ClusterGetComponentTemplateRequest extends RequestBase { + name?: Name + flat_settings?: boolean + local?: boolean + master_timeout?: Time +} + +export interface ClusterGetComponentTemplateResponse { + component_templates: ClusterComponentTemplate[] +} + +export interface ClusterGetSettingsRequest extends RequestBase { + flat_settings?: boolean + include_defaults?: boolean + master_timeout?: Time + timeout?: Time +} + +export interface ClusterGetSettingsResponse { + persistent: Record + transient: Record + defaults?: Record +} + +export interface ClusterHealthIndexHealthStats { + active_primary_shards: integer + active_shards: integer + initializing_shards: integer + number_of_replicas: integer + number_of_shards: integer + relocating_shards: integer + shards?: Record + status: Health + unassigned_shards: integer +} + +export interface ClusterHealthRequest extends RequestBase { + index?: Indices + expand_wildcards?: ExpandWildcards + level?: Level + local?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + wait_for_events?: WaitForEvents + wait_for_nodes?: string + wait_for_no_initializing_shards?: boolean + wait_for_no_relocating_shards?: boolean + wait_for_status?: WaitForStatus +} + +export interface ClusterHealthResponse { + active_primary_shards: integer + active_shards: integer + active_shards_percent_as_number: Percentage + cluster_name: string + delayed_unassigned_shards: integer + indices?: Record + initializing_shards: integer + number_of_data_nodes: integer + number_of_in_flight_fetch: integer + number_of_nodes: integer + number_of_pending_tasks: integer + relocating_shards: integer + status: Health + task_max_waiting_in_queue_millis: EpochMillis + timed_out: boolean + unassigned_shards: integer +} + +export interface ClusterHealthShardHealthStats { + active_shards: integer + initializing_shards: integer + primary_active: boolean + relocating_shards: integer + status: Health + unassigned_shards: integer +} + +export interface ClusterPendingTasksPendingTask { + insert_order: integer + priority: string + source: string + time_in_queue: string + time_in_queue_millis: integer +} + +export interface ClusterPendingTasksRequest extends RequestBase { + local?: boolean + master_timeout?: Time +} + +export interface ClusterPendingTasksResponse { + tasks: ClusterPendingTasksPendingTask[] +} + +export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { + node_names?: Names + node_ids?: Ids + timeout?: Time +} + +export type ClusterPostVotingConfigExclusionsResponse = boolean + +export interface ClusterPutComponentTemplateRequest extends RequestBase { + name: Name + create?: boolean + master_timeout?: Time + template: IndicesIndexState + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings + version?: VersionNumber + _meta?: Metadata +} + +export interface ClusterPutComponentTemplateResponse extends AcknowledgedResponseBase { +} + +export interface ClusterPutSettingsRequest extends RequestBase { + flat_settings?: boolean + master_timeout?: Time + timeout?: Time + persistent?: Record + transient?: Record +} + +export interface ClusterPutSettingsResponse { + acknowledged: boolean + persistent: Record + transient: Record +} + +export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo + +export interface ClusterRemoteInfoClusterRemoteProxyInfo { + mode: 'proxy' + connected: boolean + initial_connect_timeout: Time + skip_unavailable: boolean + proxy_address: string + server_name: string + num_proxy_sockets_connected: integer + max_proxy_socket_connections: integer +} + +export interface ClusterRemoteInfoClusterRemoteSniffInfo { + mode: 'sniff' + connected: boolean + max_connections_per_cluster: integer + num_nodes_connected: long + initial_connect_timeout: Time + skip_unavailable: boolean + seeds: string[] +} + +export interface ClusterRemoteInfoRequest extends RequestBase { +} + +export interface ClusterRemoteInfoResponse extends DictionaryResponseBase { +} + +export interface ClusterRerouteCommand { + cancel?: ClusterRerouteCommandCancelAction + move?: ClusterRerouteCommandMoveAction + allocate_replica?: ClusterRerouteCommandAllocateReplicaAction + allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction + allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction +} + +export interface ClusterRerouteCommandAllocatePrimaryAction { + index: IndexName + shard: integer + node: string + accept_data_loss: boolean +} + +export interface ClusterRerouteCommandAllocateReplicaAction { + index: IndexName + shard: integer + node: string +} + +export interface ClusterRerouteCommandCancelAction { + index: IndexName + shard: integer + node: string + allow_primary?: boolean +} + +export interface ClusterRerouteCommandMoveAction { + index: IndexName + shard: integer + from_node: string + to_node: string +} + +export interface ClusterRerouteRequest extends RequestBase { + dry_run?: boolean + explain?: boolean + metric?: Metrics + retry_failed?: boolean + master_timeout?: Time + timeout?: Time + commands?: ClusterRerouteCommand[] +} + +export interface ClusterRerouteRerouteDecision { + decider: string + decision: string + explanation: string +} + +export interface ClusterRerouteRerouteExplanation { + command: string + decisions: ClusterRerouteRerouteDecision[] + parameters: ClusterRerouteRerouteParameters +} + +export interface ClusterRerouteRerouteParameters { + allow_primary: boolean + index: IndexName + node: NodeName + shard: integer + from_node?: NodeName + to_node?: NodeName +} + +export interface ClusterRerouteResponse { + explanations?: ClusterRerouteRerouteExplanation[] + state: any +} + +export interface ClusterStateRequest extends RequestBase { + metric?: Metrics + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + local?: boolean + master_timeout?: Time + wait_for_metadata_version?: VersionNumber + wait_for_timeout?: Time +} + +export type ClusterStateResponse = any + +export interface ClusterStatsCharFilterTypes { + char_filter_types: ClusterStatsFieldTypes[] + tokenizer_types: ClusterStatsFieldTypes[] + filter_types: ClusterStatsFieldTypes[] + analyzer_types: ClusterStatsFieldTypes[] + built_in_char_filters: ClusterStatsFieldTypes[] + built_in_tokenizers: ClusterStatsFieldTypes[] + built_in_filters: ClusterStatsFieldTypes[] + built_in_analyzers: ClusterStatsFieldTypes[] +} + +export interface ClusterStatsClusterFileSystem { + available_in_bytes: long + free_in_bytes: long + total_in_bytes: long +} + +export interface ClusterStatsClusterIndices { + completion: CompletionStats + count: long + docs: DocStats + fielddata: FielddataStats + query_cache: QueryCacheStats + segments: SegmentsStats + shards: ClusterStatsClusterIndicesShards + store: StoreStats + mappings: ClusterStatsFieldTypesMappings + analysis: ClusterStatsCharFilterTypes + versions?: ClusterStatsIndicesVersions[] +} + +export interface ClusterStatsClusterIndicesShards { + index?: ClusterStatsClusterIndicesShardsIndex + primaries?: double + replication?: double + total?: double +} + +export interface ClusterStatsClusterIndicesShardsIndex { + primaries: ClusterStatsClusterShardMetrics + replication: ClusterStatsClusterShardMetrics + shards: ClusterStatsClusterShardMetrics +} + +export interface ClusterStatsClusterIngest { + number_of_pipelines: integer + processor_stats: Record +} + +export interface ClusterStatsClusterJvm { + max_uptime_in_millis: long + mem: ClusterStatsClusterJvmMemory + threads: long + versions: ClusterStatsClusterJvmVersion[] +} + +export interface ClusterStatsClusterJvmMemory { + heap_max_in_bytes: long + heap_used_in_bytes: long +} + +export interface ClusterStatsClusterJvmVersion { + bundled_jdk: boolean + count: integer + using_bundled_jdk: boolean + version: VersionString + vm_name: string + vm_vendor: string + vm_version: VersionString +} + +export interface ClusterStatsClusterNetworkTypes { + http_types: Record + transport_types: Record +} + +export interface ClusterStatsClusterNodeCount { + coordinating_only: integer + data: integer + ingest: integer + master: integer + total: integer + voting_only: integer + data_cold: integer + data_frozen?: integer + data_content: integer + data_warm: integer + data_hot: integer + ml: integer + remote_cluster_client: integer + transform: integer +} + +export interface ClusterStatsClusterNodes { + count: ClusterStatsClusterNodeCount + discovery_types: Record + fs: ClusterStatsClusterFileSystem + ingest: ClusterStatsClusterIngest + jvm: ClusterStatsClusterJvm + network_types: ClusterStatsClusterNetworkTypes + os: ClusterStatsClusterOperatingSystem + packaging_types: ClusterStatsNodePackagingType[] + plugins: PluginStats[] + process: ClusterStatsClusterProcess + versions: VersionString[] +} + +export interface ClusterStatsClusterOperatingSystem { + allocated_processors: integer + available_processors: integer + mem: ClusterStatsOperatingSystemMemoryInfo + names: ClusterStatsClusterOperatingSystemName[] + pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] + architectures?: ClusterStatsClusterOperatingSystemArchitecture[] +} + +export interface ClusterStatsClusterOperatingSystemArchitecture { + count: integer + arch: string +} + +export interface ClusterStatsClusterOperatingSystemName { + count: integer + name: Name +} + +export interface ClusterStatsClusterOperatingSystemPrettyName { + count: integer + pretty_name: Name +} + +export interface ClusterStatsClusterProcess { + cpu: ClusterStatsClusterProcessCpu + open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors +} + +export interface ClusterStatsClusterProcessCpu { + percent: integer +} + +export interface ClusterStatsClusterProcessOpenFileDescriptors { + avg: long + max: long + min: long +} + +export interface ClusterStatsClusterProcessor { + count: long + current: long + failed: long + time_in_millis: long +} + +export interface ClusterStatsClusterShardMetrics { + avg: double + max: double + min: double +} + +export interface ClusterStatsFieldTypes { + name: Name + count: integer + index_count: integer + script_count?: integer +} + +export interface ClusterStatsFieldTypesMappings { + field_types: ClusterStatsFieldTypes[] + runtime_field_types?: ClusterStatsRuntimeFieldTypes[] +} + +export interface ClusterStatsIndicesVersions { + index_count: integer + primary_shard_count: integer + total_primary_bytes: long + version: VersionString +} + +export interface ClusterStatsNodePackagingType { + count: integer + flavor: string + type: string +} + +export interface ClusterStatsOperatingSystemMemoryInfo { + free_in_bytes: long + free_percent: integer + total_in_bytes: long + used_in_bytes: long + used_percent: integer +} + +export interface ClusterStatsRequest extends RequestBase { + node_id?: NodeIds + flat_settings?: boolean + timeout?: Time +} + +export interface ClusterStatsResponse extends NodesNodesResponseBase { + cluster_name: Name + cluster_uuid: Uuid + indices: ClusterStatsClusterIndices + nodes: ClusterStatsClusterNodes + status: ClusterClusterStatus + timestamp: long +} + +export interface ClusterStatsRuntimeFieldTypes { + name: Name + count: integer + index_count: integer + scriptless_count: integer + shadowed_count: integer + lang: string[] + lines_max: integer + lines_total: integer + chars_max: integer + chars_total: integer + source_max: integer + source_total: integer + doc_max: integer + doc_total: integer +} + +export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { + index_uuid: Uuid + accept_data_loss: boolean + master_timeout?: Time + timeout?: Time +} + +export interface DanglingIndicesDeleteDanglingIndexResponse extends AcknowledgedResponseBase { +} + +export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { + index_uuid: Uuid + accept_data_loss: boolean + master_timeout?: Time + timeout?: Time +} + +export interface DanglingIndicesImportDanglingIndexResponse extends AcknowledgedResponseBase { +} + +export interface DanglingIndicesListDanglingIndicesDanglingIndex { + index_name: string + index_uuid: string + creation_date_millis: EpochMillis + node_ids: Ids +} + +export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { +} + +export interface DanglingIndicesListDanglingIndicesResponse { + dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] +} + +export interface EnrichConfiguration { + geo_match?: EnrichPolicy + match: EnrichPolicy +} + +export interface EnrichPolicy { + enrich_fields: Fields + indices: Indices + match_field: Field + query?: string + name?: Name +} + +export interface EnrichSummary { + config: EnrichConfiguration +} + +export interface EnrichDeletePolicyRequest extends RequestBase { + name: Name +} + +export interface EnrichDeletePolicyResponse extends AcknowledgedResponseBase { +} + +export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' + +export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { + phase: EnrichExecutePolicyEnrichPolicyPhase +} + +export interface EnrichExecutePolicyRequest extends RequestBase { + name: Name + wait_for_completion?: boolean +} + +export interface EnrichExecutePolicyResponse { + status: EnrichExecutePolicyExecuteEnrichPolicyStatus + task_id?: TaskId +} + +export interface EnrichGetPolicyRequest extends RequestBase { + name?: Names +} + +export interface EnrichGetPolicyResponse { + policies: EnrichSummary[] +} + +export interface EnrichPutPolicyRequest extends RequestBase { + name: Name + geo_match?: EnrichPolicy + match?: EnrichPolicy +} + +export interface EnrichPutPolicyResponse extends AcknowledgedResponseBase { +} + +export interface EnrichStatsCacheStats { + node_id: Id + count: integer + hits: integer + misses: integer + evictions: integer +} + +export interface EnrichStatsCoordinatorStats { + executed_searches_total: long + node_id: Id + queue_size: integer + remote_requests_current: integer + remote_requests_total: long +} + +export interface EnrichStatsExecutingPolicy { + name: Name + task: TasksInfo +} + +export interface EnrichStatsRequest extends RequestBase { +} + +export interface EnrichStatsResponse { + coordinator_stats: EnrichStatsCoordinatorStats[] + executing_policies: EnrichStatsExecutingPolicy[] + cache_stats?: EnrichStatsCacheStats[] +} + +export interface EqlEqlHits { + total?: SearchTotalHits + events?: EqlHitsEvent[] + sequences?: EqlHitsSequence[] +} + +export interface EqlEqlSearchResponseBase { + id?: Id + is_partial?: boolean + is_running?: boolean + took?: integer + timed_out?: boolean + hits: EqlEqlHits +} + +export interface EqlHitsEvent { + _index: IndexName + _id: Id + _source: TEvent + fields?: Record +} + +export interface EqlHitsSequence { + events: EqlHitsEvent[] + join_keys: any[] +} + +export interface EqlDeleteRequest extends RequestBase { + id: Id +} + +export interface EqlDeleteResponse extends AcknowledgedResponseBase { +} + +export interface EqlGetRequest extends RequestBase { + id: Id + keep_alive?: Time + wait_for_completion_timeout?: Time +} + +export interface EqlGetResponse extends EqlEqlSearchResponseBase { +} + +export interface EqlGetStatusRequest extends RequestBase { + id: Id +} + +export interface EqlGetStatusResponse { + id: Id + is_partial: boolean + is_running: boolean + start_time_in_millis?: EpochMillis + expiration_time_in_millis?: EpochMillis + completion_status?: integer +} + +export interface EqlSearchRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + keep_alive?: Time + keep_on_completion?: boolean + wait_for_completion_timeout?: Time + query: string + case_sensitive?: boolean + event_category_field?: Field + tiebreaker_field?: Field + timestamp_field?: Field + fetch_size?: uint + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + size?: uint | float + fields?: (Field | EqlSearchSearchFieldFormatted)[] + result_position?: EqlSearchResultPosition +} + +export interface EqlSearchResponse extends EqlEqlSearchResponseBase { +} + +export type EqlSearchResultPosition = 'tail' | 'head' + +export interface EqlSearchSearchFieldFormatted { + field: Field + format?: string +} + +export interface FeaturesFeature { + name: string + description: string +} + +export interface FeaturesGetFeaturesRequest extends RequestBase { +} + +export interface FeaturesGetFeaturesResponse { + features: FeaturesFeature[] +} + +export interface FeaturesResetFeaturesRequest extends RequestBase { +} + +export interface FeaturesResetFeaturesResponse { + features: FeaturesFeature[] +} + +export interface GraphConnection { + doc_count: long + source: long + target: long + weight: double +} + +export interface GraphExploreControls { + sample_diversity?: GraphSampleDiversity + sample_size?: integer + timeout?: Time + use_significance: boolean +} + +export interface GraphHop { + connections?: GraphHop + query: QueryDslQueryContainer + vertices: GraphVertexDefinition[] +} + +export interface GraphSampleDiversity { + field: Field + max_docs_per_value: integer +} + +export interface GraphVertex { + depth: long + field: Field + term: string + weight: double +} + +export interface GraphVertexDefinition { + exclude?: string[] + field: Field + include?: GraphVertexInclude[] + min_doc_count?: long + shard_min_doc_count?: long + size?: integer +} + +export interface GraphVertexInclude { + boost: double + term: string +} + +export interface GraphExploreRequest extends RequestBase { + index: Indices + routing?: Routing + timeout?: Time + connections?: GraphHop + controls?: GraphExploreControls + query?: QueryDslQueryContainer + vertices?: GraphVertexDefinition[] +} + +export interface GraphExploreResponse { + connections: GraphConnection[] + failures: ShardFailure[] + timed_out: boolean + took: long + vertices: GraphVertex[] +} + +export interface IlmAction { + [key: string]: never +} + +export interface IlmPhase { + actions: Record | string[] + min_age?: Time +} + +export interface IlmPhases { + cold?: IlmPhase + delete?: IlmPhase + hot?: IlmPhase + warm?: IlmPhase +} + +export interface IlmPolicy { + phases: IlmPhases + name?: Name +} + +export interface IlmDeleteLifecycleRequest extends RequestBase { + policy: Name +} + +export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { +} + +export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged + +export interface IlmExplainLifecycleLifecycleExplainManaged { + action: Name + action_time_millis: EpochMillis + age: Time + failed_step?: Name + failed_step_retry_count?: integer + index: IndexName + is_auto_retryable_error?: boolean + lifecycle_date_millis: EpochMillis + managed: true + phase: Name + phase_time_millis: EpochMillis + policy: Name + step: Name + step_info?: Record + step_time_millis: EpochMillis + phase_execution: IlmExplainLifecycleLifecycleExplainPhaseExecution +} + +export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { + policy: Name + version: VersionNumber + modified_date_in_millis: EpochMillis +} + +export interface IlmExplainLifecycleLifecycleExplainUnmanaged { + index: IndexName + managed: false +} + +export interface IlmExplainLifecycleRequest extends RequestBase { + index: IndexName + only_errors?: boolean + only_managed?: boolean +} + +export interface IlmExplainLifecycleResponse { + indices: Record +} + +export interface IlmGetLifecycleLifecycle { + modified_date: DateString + policy: IlmPolicy + version: VersionNumber +} + +export interface IlmGetLifecycleRequest extends RequestBase { + policy?: Name +} + +export interface IlmGetLifecycleResponse extends DictionaryResponseBase { +} + +export interface IlmGetStatusRequest extends RequestBase { +} + +export interface IlmGetStatusResponse { + operation_mode: LifecycleOperationMode +} + +export interface IlmMoveToStepRequest extends RequestBase { + index: IndexName + current_step?: IlmMoveToStepStepKey + next_step?: IlmMoveToStepStepKey +} + +export interface IlmMoveToStepResponse extends AcknowledgedResponseBase { +} + +export interface IlmMoveToStepStepKey { + action: string + name: string + phase: string +} + +export interface IlmPutLifecycleRequest extends RequestBase { + policy: Name +} + +export interface IlmPutLifecycleResponse extends AcknowledgedResponseBase { +} + +export interface IlmRemovePolicyRequest extends RequestBase { + index: IndexName +} + +export interface IlmRemovePolicyResponse { + failed_indexes: IndexName[] + has_failures: boolean +} + +export interface IlmRetryRequest extends RequestBase { + index: IndexName +} + +export interface IlmRetryResponse extends AcknowledgedResponseBase { +} + +export interface IlmStartRequest extends RequestBase { + master_timeout?: Time + timeout?: Time +} + +export interface IlmStartResponse extends AcknowledgedResponseBase { +} + +export interface IlmStopRequest extends RequestBase { + master_timeout?: Time + timeout?: Time +} + +export interface IlmStopResponse extends AcknowledgedResponseBase { +} + +export interface IndicesAlias { + filter?: QueryDslQueryContainer + index_routing?: Routing + is_hidden?: boolean + is_write_index?: boolean + routing?: Routing + search_routing?: Routing +} + +export interface IndicesAliasDefinition { + filter?: QueryDslQueryContainer + index_routing?: string + is_write_index?: boolean + routing?: string + search_routing?: string +} + +export type IndicesDataStreamHealthStatus = 'green' | 'yellow' | 'red' + +export interface IndicesFielddataFrequencyFilter { + max: double + min: double + min_segment_size: integer +} + +export type IndicesIndexCheckOnStartup = 'false' | 'checksum' | 'true' + +export interface IndicesIndexRouting { + allocation?: IndicesIndexRoutingAllocation + rebalance?: IndicesIndexRoutingRebalance +} + +export interface IndicesIndexRoutingAllocation { + enable?: IndicesIndexRoutingAllocationOptions + include?: IndicesIndexRoutingAllocationInclude + initial_recovery?: IndicesIndexRoutingAllocationInitialRecovery + disk?: IndicesIndexRoutingAllocationDisk +} + +export interface IndicesIndexRoutingAllocationDisk { + threshold_enabled: boolean | string +} + +export interface IndicesIndexRoutingAllocationInclude { + _tier_preference?: string + _id?: Id +} + +export interface IndicesIndexRoutingAllocationInitialRecovery { + _id?: Id +} + +export type IndicesIndexRoutingAllocationOptions = 'all' | 'primaries' | 'new_primaries' | 'none' + +export interface IndicesIndexRoutingRebalance { + enable: IndicesIndexRoutingRebalanceOptions +} + +export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' + +export interface IndicesIndexSettingBlocks { + read_only?: boolean + 'index.blocks.read_only'?: boolean + read_only_allow_delete?: boolean + 'index.blocks.read_only_allow_delete'?: boolean + read?: boolean + 'index.blocks.read'?: boolean + write?: boolean | string + 'index.blocks.write'?: boolean | string + metadata?: boolean + 'index.blocks.metadata'?: boolean +} + +export interface IndicesIndexSettings { + number_of_shards?: integer | string + 'index.number_of_shards'?: integer | string + number_of_replicas?: integer | string + 'index.number_of_replicas'?: integer | string + number_of_routing_shards?: integer + 'index.number_of_routing_shards'?: integer + check_on_startup?: IndicesIndexCheckOnStartup + 'index.check_on_startup'?: IndicesIndexCheckOnStartup + codec?: string + 'index.codec'?: string + routing_partition_size?: integer | string + 'index.routing_partition_size'?: integer | string + 'soft_deletes.retention_lease.period'?: Time + 'index.soft_deletes.retention_lease.period'?: Time + load_fixed_bitset_filters_eagerly?: boolean + 'index.load_fixed_bitset_filters_eagerly'?: boolean + hidden?: boolean | string + 'index.hidden'?: boolean | string + auto_expand_replicas?: string + 'index.auto_expand_replicas'?: string + 'search.idle.after'?: Time + 'index.search.idle.after'?: Time + refresh_interval?: Time + 'index.refresh_interval'?: Time + max_result_window?: integer + 'index.max_result_window'?: integer + max_inner_result_window?: integer + 'index.max_inner_result_window'?: integer + max_rescore_window?: integer + 'index.max_rescore_window'?: integer + max_docvalue_fields_search?: integer + 'index.max_docvalue_fields_search'?: integer + max_script_fields?: integer + 'index.max_script_fields'?: integer + max_ngram_diff?: integer + 'index.max_ngram_diff'?: integer + max_shingle_diff?: integer + 'index.max_shingle_diff'?: integer + blocks?: IndicesIndexSettingBlocks + 'index.blocks'?: IndicesIndexSettingBlocks + max_refresh_listeners?: integer + 'index.max_refresh_listeners'?: integer + 'analyze.max_token_count'?: integer + 'index.analyze.max_token_count'?: integer + 'highlight.max_analyzed_offset'?: integer + 'index.highlight.max_analyzed_offset'?: integer + max_terms_count?: integer + 'index.max_terms_count'?: integer + max_regex_length?: integer + 'index.max_regex_length'?: integer + routing?: IndicesIndexRouting + 'index.routing'?: IndicesIndexRouting + gc_deletes?: Time + 'index.gc_deletes'?: Time + default_pipeline?: PipelineName + 'index.default_pipeline'?: PipelineName + final_pipeline?: PipelineName + 'index.final_pipeline'?: PipelineName + lifecycle?: IndicesIndexSettingsLifecycle + 'index.lifecycle'?: IndicesIndexSettingsLifecycle + provided_name?: Name + 'index.provided_name'?: Name + creation_date?: DateString + 'index.creation_date'?: DateString + uuid?: Uuid + 'index.uuid'?: Uuid + version?: IndicesIndexVersioning + 'index.version'?: IndicesIndexVersioning + verified_before_close?: boolean | string + 'index.verified_before_close'?: boolean | string + format?: string | integer + 'index.format'?: string | integer + max_slices_per_scroll?: integer + 'index.max_slices_per_scroll'?: integer + 'translog.durability'?: string + 'index.translog.durability'?: string + 'query_string.lenient'?: boolean | string + 'index.query_string.lenient'?: boolean | string + priority?: integer | string + 'index.priority'?: integer | string + top_metrics_max_size?: integer + analysis?: IndicesIndexSettingsAnalysis +} + +export interface IndicesIndexSettingsAnalysis { + analyzer?: Record + char_filter?: Record + filter?: Record + normalizer?: Record +} + +export interface IndicesIndexSettingsLifecycle { + name: Name +} + +export interface IndicesIndexState { + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings | IndicesIndexStatePrefixedSettings + data_stream?: DataStreamName +} + +export interface IndicesIndexStatePrefixedSettings { + index: IndicesIndexSettings +} + +export interface IndicesIndexVersioning { + created: VersionString +} + +export interface IndicesNumericFielddata { + format: IndicesNumericFielddataFormat +} + +export type IndicesNumericFielddataFormat = 'array' | 'disabled' + +export interface IndicesOverlappingIndexTemplate { + name: Name + index_patterns?: IndexName[] +} + +export interface IndicesStringFielddata { + format: IndicesStringFielddataFormat +} + +export type IndicesStringFielddataFormat = 'paged_bytes' | 'disabled' + +export interface IndicesTemplateMapping { + aliases: Record + index_patterns: Name[] + mappings: MappingTypeMapping + order: integer + settings: Record + version?: VersionNumber +} + +export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' + +export interface IndicesAddBlockIndicesBlockStatus { + name: IndexName + blocked: boolean +} + +export interface IndicesAddBlockRequest extends RequestBase { + index: IndexName + block: IndicesAddBlockIndicesBlockOptions + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time +} + +export interface IndicesAddBlockResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean + indices: IndicesAddBlockIndicesBlockStatus[] +} + +export interface IndicesAnalyzeAnalyzeDetail { + analyzer?: IndicesAnalyzeAnalyzerDetail + charfilters?: IndicesAnalyzeCharFilterDetail[] + custom_analyzer: boolean + tokenfilters?: IndicesAnalyzeTokenDetail[] + tokenizer?: IndicesAnalyzeTokenDetail +} + +export interface IndicesAnalyzeAnalyzeToken { + end_offset: long + position: long + position_length?: long + start_offset: long + token: string + type: string +} + +export interface IndicesAnalyzeAnalyzerDetail { + name: string + tokens: IndicesAnalyzeExplainAnalyzeToken[] +} + +export interface IndicesAnalyzeCharFilterDetail { + filtered_text: string[] + name: string +} + +export interface IndicesAnalyzeExplainAnalyzeToken { + bytes: string + end_offset: long + keyword?: boolean + position: long + positionLength: long + start_offset: long + termFrequency: long + token: string + type: string +} + +export interface IndicesAnalyzeRequest extends RequestBase { + index?: IndexName + analyzer?: string + attributes?: string[] + char_filter?: (string | AnalysisCharFilter)[] + explain?: boolean + field?: Field + filter?: (string | AnalysisTokenFilter)[] + normalizer?: string + text?: IndicesAnalyzeTextToAnalyze + tokenizer?: string | AnalysisTokenizer +} + +export interface IndicesAnalyzeResponse { + detail?: IndicesAnalyzeAnalyzeDetail + tokens?: IndicesAnalyzeAnalyzeToken[] +} + +export type IndicesAnalyzeTextToAnalyze = string | string[] + +export interface IndicesAnalyzeTokenDetail { + name: string + tokens: IndicesAnalyzeExplainAnalyzeToken[] +} + +export interface IndicesClearCacheRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + fielddata?: boolean + fields?: Fields + ignore_unavailable?: boolean + query?: boolean + request?: boolean +} + +export interface IndicesClearCacheResponse extends ShardsOperationResponseBase { +} + +export interface IndicesCloneRequest extends RequestBase { + index: IndexName + target: Name + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + aliases?: Record + settings?: Record +} + +export interface IndicesCloneResponse extends AcknowledgedResponseBase { + index: IndexName + shards_acknowledged: boolean +} + +export interface IndicesCloseCloseIndexResult { + closed: boolean + shards?: Record +} + +export interface IndicesCloseCloseShardResult { + failures: ShardFailure[] +} + +export interface IndicesCloseRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards +} + +export interface IndicesCloseResponse extends AcknowledgedResponseBase { + indices: Record + shards_acknowledged: boolean +} + +export interface IndicesCreateRequest extends RequestBase { + index: IndexName + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + aliases?: Record + mappings?: Record | MappingTypeMapping + settings?: Record +} + +export interface IndicesCreateResponse extends AcknowledgedResponseBase { + index: IndexName + shards_acknowledged: boolean +} + +export interface IndicesCreateDataStreamRequest extends RequestBase { + name: DataStreamName +} + +export interface IndicesCreateDataStreamResponse extends AcknowledgedResponseBase { +} + +export interface IndicesDataStreamsStatsDataStreamsStatsItem { + backing_indices: integer + data_stream: Name + store_size?: ByteSize + store_size_bytes: integer + maximum_timestamp: integer +} + +export interface IndicesDataStreamsStatsRequest extends RequestBase { + name?: IndexName + expand_wildcards?: ExpandWildcards +} + +export interface IndicesDataStreamsStatsResponse { + _shards: ShardStatistics + backing_indices: integer + data_stream_count: integer + total_store_sizes?: ByteSize + total_store_size_bytes: integer + data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] +} + +export interface IndicesDeleteRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time +} + +export interface IndicesDeleteResponse extends IndicesResponseBase { +} + +export interface IndicesDeleteAliasRequest extends RequestBase { + index: Indices + name: Names + master_timeout?: Time + timeout?: Time +} + +export interface IndicesDeleteAliasResponse extends AcknowledgedResponseBase { +} + +export interface IndicesDeleteDataStreamRequest extends RequestBase { + name: DataStreamNames + expand_wildcards?: ExpandWildcards +} + +export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBase { +} + +export interface IndicesDeleteIndexTemplateRequest extends RequestBase { + name: Name +} + +export interface IndicesDeleteIndexTemplateResponse extends AcknowledgedResponseBase { +} + +export interface IndicesDeleteTemplateRequest extends RequestBase { + name: Name + master_timeout?: Time + timeout?: Time +} + +export interface IndicesDeleteTemplateResponse extends AcknowledgedResponseBase { +} + +export interface IndicesDiskUsageRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flush?: boolean + ignore_unavailable?: boolean + master_timeout?: TimeUnit + timeout?: TimeUnit + run_expensive_tasks?: boolean + wait_for_active_shards?: string +} + +export type IndicesDiskUsageResponse = any + +export interface IndicesExistsRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + local?: boolean +} + +export type IndicesExistsResponse = boolean + +export interface IndicesExistsAliasRequest extends RequestBase { + name: Names + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean +} + +export type IndicesExistsAliasResponse = boolean + +export interface IndicesExistsIndexTemplateRequest extends RequestBase { + name: Name + master_timeout?: Time +} + +export type IndicesExistsIndexTemplateResponse = boolean + +export interface IndicesExistsTemplateRequest extends RequestBase { + name: Names + flat_settings?: boolean + local?: boolean + master_timeout?: Time +} + +export type IndicesExistsTemplateResponse = boolean + +export interface IndicesExistsTypeRequest extends RequestBase { + index: Indices + type: Types + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean +} + +export type IndicesExistsTypeResponse = boolean + +export interface IndicesFlushRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + force?: boolean + ignore_unavailable?: boolean + wait_if_ongoing?: boolean +} + +export interface IndicesFlushResponse extends ShardsOperationResponseBase { +} + +export interface IndicesForcemergeRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flush?: boolean + ignore_unavailable?: boolean + max_num_segments?: long + only_expunge_deletes?: boolean +} + +export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { +} + +export interface IndicesFreezeRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards +} + +export interface IndicesFreezeResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean +} + +export interface IndicesGetRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + include_type_name?: boolean + local?: boolean + master_timeout?: Time +} + +export interface IndicesGetResponse extends DictionaryResponseBase { +} + +export interface IndicesGetAliasIndexAliases { + aliases: Record +} + +export interface IndicesGetAliasRequest extends RequestBase { + name?: Names + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + local?: boolean +} + +export interface IndicesGetAliasResponse extends DictionaryResponseBase { +} + +export interface IndicesGetDataStreamIndicesGetDataStreamItem { + name: DataStreamName + timestamp_field: IndicesGetDataStreamIndicesGetDataStreamItemTimestampField + indices: IndicesGetDataStreamIndicesGetDataStreamItemIndex[] + generation: integer + template: Name + hidden: boolean + system?: boolean + status: IndicesDataStreamHealthStatus + ilm_policy?: Name + _meta?: Metadata +} + +export interface IndicesGetDataStreamIndicesGetDataStreamItemIndex { + index_name: IndexName + index_uuid: Uuid +} + +export interface IndicesGetDataStreamIndicesGetDataStreamItemTimestampField { + name: Field +} + +export interface IndicesGetDataStreamRequest extends RequestBase { + name?: DataStreamNames + expand_wildcards?: ExpandWildcards +} + +export interface IndicesGetDataStreamResponse { + data_streams: IndicesGetDataStreamIndicesGetDataStreamItem[] +} + +export interface IndicesGetFieldMappingRequest extends RequestBase { + fields: Fields + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + include_defaults?: boolean + include_type_name?: boolean + local?: boolean +} + +export interface IndicesGetFieldMappingResponse extends DictionaryResponseBase { +} + +export interface IndicesGetFieldMappingTypeFieldMappings { + mappings: Record +} + +export interface IndicesGetIndexTemplateIndexTemplate { + index_patterns: Name[] + composed_of: Name[] + template: IndicesGetIndexTemplateIndexTemplateSummary + version?: VersionNumber + priority?: long + _meta?: Metadata + allow_auto_create?: boolean + data_stream?: Record +} + +export interface IndicesGetIndexTemplateIndexTemplateItem { + name: Name + index_template: IndicesGetIndexTemplateIndexTemplate +} + +export interface IndicesGetIndexTemplateIndexTemplateSummary { + aliases?: Record + mappings?: MappingTypeMapping + settings?: Record +} + +export interface IndicesGetIndexTemplateRequest extends RequestBase { + name?: Name + local?: boolean + flat_settings?: boolean + include_type_name?: boolean + master_timeout?: Time +} + +export interface IndicesGetIndexTemplateResponse { + index_templates: IndicesGetIndexTemplateIndexTemplateItem[] +} + +export interface IndicesGetMappingIndexMappingRecord { + item?: MappingTypeMapping + mappings: MappingTypeMapping +} + +export interface IndicesGetMappingRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + include_type_name?: boolean + local?: boolean + master_timeout?: Time +} + +export interface IndicesGetMappingResponse extends DictionaryResponseBase { +} + +export interface IndicesGetSettingsRequest extends RequestBase { + index?: Indices + name?: Names + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + local?: boolean + master_timeout?: Time +} + +export interface IndicesGetSettingsResponse extends DictionaryResponseBase { +} + +export interface IndicesGetTemplateRequest extends RequestBase { + name?: Names + flat_settings?: boolean + include_type_name?: boolean + local?: boolean + master_timeout?: Time +} + +export interface IndicesGetTemplateResponse extends DictionaryResponseBase { +} + +export interface IndicesMigrateToDataStreamRequest extends RequestBase { + name: IndexName +} + +export interface IndicesMigrateToDataStreamResponse extends AcknowledgedResponseBase { +} + +export interface IndicesOpenRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards +} + +export interface IndicesOpenResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean +} + +export interface IndicesPromoteDataStreamRequest extends RequestBase { + name: IndexName +} + +export type IndicesPromoteDataStreamResponse = any + +export interface IndicesPutAliasRequest extends RequestBase { + index: Indices + name: Name + master_timeout?: Time + timeout?: Time + filter?: QueryDslQueryContainer + index_routing?: Routing + is_write_index?: boolean + routing?: Routing + search_routing?: Routing +} + +export interface IndicesPutAliasResponse extends AcknowledgedResponseBase { +} + +export interface IndicesPutIndexTemplateIndexTemplateMapping { + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings +} + +export interface IndicesPutIndexTemplateRequest extends RequestBase { + name: Name + index_patterns?: Indices + composed_of?: Name[] + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: EmptyObject + priority?: integer + version?: VersionNumber + _meta?: Metadata +} + +export interface IndicesPutIndexTemplateResponse extends AcknowledgedResponseBase { +} + +export interface IndicesPutMappingRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + write_index_only?: boolean + date_detection?: boolean + dynamic?: boolean | MappingDynamicMapping + dynamic_date_formats?: string[] + dynamic_templates?: Record | Record[] + _field_names?: MappingFieldNamesField + _meta?: Record + numeric_detection?: boolean + properties?: Record + _routing?: MappingRoutingField + _source?: MappingSourceField + runtime?: MappingRuntimeFields +} + +export interface IndicesPutMappingResponse extends IndicesResponseBase { +} + +export interface IndicesPutSettingsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flat_settings?: boolean + ignore_unavailable?: boolean + master_timeout?: Time + preserve_existing?: boolean + timeout?: Time + settings?: IndicesIndexSettings +} + +export interface IndicesPutSettingsResponse extends AcknowledgedResponseBase { +} + +export interface IndicesPutTemplateRequest extends RequestBase { + name: Name + create?: boolean + flat_settings?: boolean + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + order?: integer + aliases?: Record + index_patterns?: string | string[] + mappings?: MappingTypeMapping + settings?: Record + version?: VersionNumber +} + +export interface IndicesPutTemplateResponse extends AcknowledgedResponseBase { +} + +export interface IndicesRecoveryFileDetails { + length: long + name: string + recovered: long +} + +export interface IndicesRecoveryRecoveryBytes { + percent: Percentage + recovered?: ByteSize + recovered_in_bytes: ByteSize + recovered_from_snapshot?: ByteSize + recovered_from_snapshot_in_bytes?: ByteSize + reused?: ByteSize + reused_in_bytes: ByteSize + total?: ByteSize + total_in_bytes: ByteSize +} + +export interface IndicesRecoveryRecoveryFiles { + details?: IndicesRecoveryFileDetails[] + percent: Percentage + recovered: long + reused: long + total: long +} + +export interface IndicesRecoveryRecoveryIndexStatus { + bytes?: IndicesRecoveryRecoveryBytes + files: IndicesRecoveryRecoveryFiles + size: IndicesRecoveryRecoveryBytes + source_throttle_time?: Time + source_throttle_time_in_millis: EpochMillis + target_throttle_time?: Time + target_throttle_time_in_millis: EpochMillis + total_time_in_millis: EpochMillis + total_time?: Time +} + +export interface IndicesRecoveryRecoveryOrigin { + hostname?: string + host?: Host + transport_address?: TransportAddress + id?: Id + ip?: Ip + name?: Name + bootstrap_new_history_uuid?: boolean + repository?: Name + snapshot?: Name + version?: VersionString + restoreUUID?: Uuid + index?: IndexName +} + +export interface IndicesRecoveryRecoveryStartStatus { + check_index_time: long + total_time_in_millis: string +} + +export interface IndicesRecoveryRecoveryStatus { + shards: IndicesRecoveryShardRecovery[] +} + +export interface IndicesRecoveryRequest extends RequestBase { + index?: Indices + active_only?: boolean + detailed?: boolean +} + +export interface IndicesRecoveryResponse extends DictionaryResponseBase { +} + +export interface IndicesRecoveryShardRecovery { + id: long + index: IndicesRecoveryRecoveryIndexStatus + primary: boolean + source: IndicesRecoveryRecoveryOrigin + stage: string + start?: IndicesRecoveryRecoveryStartStatus + start_time?: DateString + start_time_in_millis: EpochMillis + stop_time?: DateString + stop_time_in_millis: EpochMillis + target: IndicesRecoveryRecoveryOrigin + total_time?: DateString + total_time_in_millis: EpochMillis + translog: IndicesRecoveryTranslogStatus + type: Type + verify_index: IndicesRecoveryVerifyIndex +} + +export interface IndicesRecoveryTranslogStatus { + percent: Percentage + recovered: long + total: long + total_on_start: long + total_time?: string + total_time_in_millis: EpochMillis +} + +export interface IndicesRecoveryVerifyIndex { + check_index_time?: Time + check_index_time_in_millis: EpochMillis + total_time?: Time + total_time_in_millis: EpochMillis +} + +export interface IndicesRefreshRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean +} + +export interface IndicesRefreshResponse extends ShardsOperationResponseBase { +} + +export interface IndicesReloadSearchAnalyzersReloadDetails { + index: string + reloaded_analyzers: string[] + reloaded_node_ids: string[] +} + +export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean +} + +export interface IndicesReloadSearchAnalyzersResponse { + reload_details: IndicesReloadSearchAnalyzersReloadDetails[] + _shards: ShardStatistics +} + +export interface IndicesResolveIndexRequest extends RequestBase { + name: Names + expand_wildcards?: ExpandWildcards +} + +export interface IndicesResolveIndexResolveIndexAliasItem { + name: Name + indices: Indices +} + +export interface IndicesResolveIndexResolveIndexDataStreamsItem { + name: DataStreamName + timestamp_field: Field + backing_indices: Indices +} + +export interface IndicesResolveIndexResolveIndexItem { + name: Name + aliases?: string[] + attributes: string[] + data_stream?: DataStreamName +} + +export interface IndicesResolveIndexResponse { + indices: IndicesResolveIndexResolveIndexItem[] + aliases: IndicesResolveIndexResolveIndexAliasItem[] + data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] +} + +export interface IndicesRolloverRequest extends RequestBase { + alias: IndexAlias + new_index?: IndexName + dry_run?: boolean + include_type_name?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + aliases?: Record + conditions?: IndicesRolloverRolloverConditions + mappings?: Record | MappingTypeMapping + settings?: Record +} + +export interface IndicesRolloverResponse extends AcknowledgedResponseBase { + conditions: Record + dry_run: boolean + new_index: string + old_index: string + rolled_over: boolean + shards_acknowledged: boolean +} + +export interface IndicesRolloverRolloverConditions { + max_age?: Time + max_docs?: long + max_size?: string + max_primary_shard_size?: ByteSize +} + +export interface IndicesSegmentsIndexSegment { + shards: Record +} + +export interface IndicesSegmentsRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + verbose?: boolean +} + +export interface IndicesSegmentsResponse { + indices: Record + _shards: ShardStatistics +} + +export interface IndicesSegmentsSegment { + attributes: Record + committed: boolean + compound: boolean + deleted_docs: long + generation: integer + memory_in_bytes: double + search: boolean + size_in_bytes: double + num_docs: long + version: VersionString +} + +export interface IndicesSegmentsShardSegmentRouting { + node: string + primary: boolean + state: string +} + +export interface IndicesSegmentsShardsSegment { + num_committed_segments: integer + routing: IndicesSegmentsShardSegmentRouting + num_search_segments: integer + segments: Record +} + +export interface IndicesShardStoresIndicesShardStores { + shards: Record +} + +export interface IndicesShardStoresRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + status?: IndicesShardStoresShardStatus | IndicesShardStoresShardStatus[] +} + +export interface IndicesShardStoresResponse { + indices: Record +} + +export type IndicesShardStoresShardStatus = 'green' | 'yellow' | 'red' | 'all' + +export interface IndicesShardStoresShardStore { + allocation: IndicesShardStoresShardStoreAllocation + allocation_id: Id + attributes: Record + id: Id + legacy_version: VersionNumber + name: Name + store_exception: IndicesShardStoresShardStoreException + transport_address: TransportAddress +} + +export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused' + +export interface IndicesShardStoresShardStoreException { + reason: string + type: string +} + +export interface IndicesShardStoresShardStoreWrapper { + stores: IndicesShardStoresShardStore[] +} + +export interface IndicesShrinkRequest extends RequestBase { + index: IndexName + target: IndexName + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + aliases?: Record + settings?: Record +} + +export interface IndicesShrinkResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean + index: IndexName +} + +export interface IndicesSimulateIndexTemplateRequest extends RequestBase { + name: Name + index_patterns?: IndexName[] + composed_of?: Name[] + overlapping?: IndicesOverlappingIndexTemplate[] + template?: IndicesTemplateMapping +} + +export interface IndicesSimulateIndexTemplateResponse { +} + +export interface IndicesSimulateTemplateOverlapping { + name: Name + index_patterns: string[] +} + +export interface IndicesSimulateTemplateRequest extends RequestBase { + name?: Name + create?: boolean + master_timeout?: Time + template?: IndicesGetIndexTemplateIndexTemplate +} + +export interface IndicesSimulateTemplateResponse { + template: IndicesSimulateTemplateTemplate +} + +export interface IndicesSimulateTemplateTemplate { + aliases: Record + mappings: MappingTypeMapping + settings: Record + overlapping: IndicesSimulateTemplateOverlapping[] +} + +export interface IndicesSplitRequest extends RequestBase { + index: IndexName + target: IndexName + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards + aliases?: Record + settings?: Record +} + +export interface IndicesSplitResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean + index: IndexName +} + +export interface IndicesStatsIndexStats { + completion?: CompletionStats + docs?: DocStats + fielddata?: FielddataStats + flush?: FlushStats + get?: GetStats + indexing?: IndexingStats + merges?: MergesStats + query_cache?: QueryCacheStats + recovery?: RecoveryStats + refresh?: RefreshStats + request_cache?: RequestCacheStats + search?: SearchStats + segments?: SegmentsStats + store?: StoreStats + translog?: TranslogStats + warmer?: WarmerStats + bulk?: BulkStats + shards?: IndicesStatsShardsTotalStats +} + +export interface IndicesStatsIndicesStats { + primaries: IndicesStatsIndexStats + shards?: Record + total: IndicesStatsIndexStats + uuid?: Uuid +} + +export interface IndicesStatsRequest extends RequestBase { + metric?: Metrics + index?: Indices + completion_fields?: Fields + expand_wildcards?: ExpandWildcards + fielddata_fields?: Fields + fields?: Fields + forbid_closed_indices?: boolean + groups?: string | string[] + include_segment_file_sizes?: boolean + include_unloaded_segments?: boolean + level?: Level + types?: Types +} + +export interface IndicesStatsResponse { + indices?: Record + _shards: ShardStatistics + _all: IndicesStatsIndicesStats +} + +export interface IndicesStatsShardCommit { + generation: integer + id: Id + num_docs: long + user_data: Record +} + +export interface IndicesStatsShardFileSizeInfo { + description: string + size_in_bytes: long + min_size_in_bytes?: long + max_size_in_bytes?: long + average_size_in_bytes?: long + count?: long +} + +export interface IndicesStatsShardLease { + id: Id + retaining_seq_no: SequenceNumber + timestamp: long + source: string +} + +export interface IndicesStatsShardPath { + data_path: string + is_custom_data_path: boolean + state_path: string +} + +export interface IndicesStatsShardQueryCache { + cache_count: long + cache_size: long + evictions: long + hit_count: long + memory_size_in_bytes: long + miss_count: long + total_count: long +} + +export interface IndicesStatsShardRetentionLeases { + primary_term: long + version: VersionNumber + leases: IndicesStatsShardLease[] +} + +export interface IndicesStatsShardRouting { + node: string + primary: boolean + relocating_node?: string + state: IndicesStatsShardRoutingState +} + +export type IndicesStatsShardRoutingState = 'UNASSIGNED' | 'INITIALIZING' | 'STARTED' | 'RELOCATING' + +export interface IndicesStatsShardSequenceNumber { + global_checkpoint: long + local_checkpoint: long + max_seq_no: SequenceNumber +} + +export interface IndicesStatsShardStats { + commit: IndicesStatsShardCommit + completion: CompletionStats + docs: DocStats + fielddata: FielddataStats + flush: FlushStats + get: GetStats + indexing: IndexingStats + merges: MergesStats + shard_path: IndicesStatsShardPath + query_cache: IndicesStatsShardQueryCache + recovery: RecoveryStats + refresh: RefreshStats + request_cache: RequestCacheStats + retention_leases: IndicesStatsShardRetentionLeases + routing: IndicesStatsShardRouting + search: SearchStats + segments: SegmentsStats + seq_no: IndicesStatsShardSequenceNumber + store: StoreStats + translog: TranslogStats + warmer: WarmerStats + bulk?: BulkStats + shards: IndicesStatsShardsTotalStats +} + +export interface IndicesStatsShardsTotalStats { + total_count: long +} + +export interface IndicesUnfreezeRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: string +} + +export interface IndicesUnfreezeResponse extends AcknowledgedResponseBase { + shards_acknowledged: boolean +} + +export interface IndicesUpdateAliasesIndicesUpdateAliasBulk { + [key: string]: never +} + +export interface IndicesUpdateAliasesRequest extends RequestBase { + master_timeout?: Time + timeout?: Time + actions?: IndicesUpdateAliasesIndicesUpdateAliasBulk[] +} + +export interface IndicesUpdateAliasesResponse extends AcknowledgedResponseBase { +} + +export interface IndicesValidateQueryIndicesValidationExplanation { + error?: string + explanation?: string + index: IndexName + valid: boolean +} + +export interface IndicesValidateQueryRequest extends RequestBase { + index?: Indices + type?: Types + allow_no_indices?: boolean + all_shards?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: DefaultOperator + df?: string + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_unavailable?: boolean + lenient?: boolean + rewrite?: boolean + q?: string + query?: QueryDslQueryContainer +} + +export interface IndicesValidateQueryResponse { + explanations?: IndicesValidateQueryIndicesValidationExplanation[] + _shards?: ShardStatistics + valid: boolean + error?: string +} + +export interface IngestAppendProcessor extends IngestProcessorBase { + field: Field + value: any[] + allow_duplicates?: boolean +} + +export interface IngestAttachmentProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + indexed_chars?: long + indexed_chars_field?: Field + properties?: string[] + target_field?: Field + resource_name?: string +} + +export interface IngestBytesProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field +} + +export interface IngestCircleProcessor extends IngestProcessorBase { + error_distance: double + field: Field + ignore_missing: boolean + shape_type: IngestShapeType + target_field: Field +} + +export interface IngestConvertProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field: Field + type: IngestConvertType +} + +export type IngestConvertType = 'integer' | 'long' | 'float' | 'double' | 'string' | 'boolean' | 'auto' + +export interface IngestCsvProcessor extends IngestProcessorBase { + empty_value: any + description?: string + field: Field + ignore_missing?: boolean + quote?: string + separator?: string + target_fields: Fields + trim: boolean +} + +export interface IngestDateIndexNameProcessor extends IngestProcessorBase { + date_formats: string[] + date_rounding: string | IngestDateRounding + field: Field + index_name_format: string + index_name_prefix: string + locale: string + timezone: string +} + +export interface IngestDateProcessor extends IngestProcessorBase { + field: Field + formats: string[] + locale?: string + target_field?: Field + timezone?: string +} + +export type IngestDateRounding = 's' | 'm' | 'h' | 'd' | 'w' | 'M' | 'y' + +export interface IngestDissectProcessor extends IngestProcessorBase { + append_separator: string + field: Field + ignore_missing: boolean + pattern: string +} + +export interface IngestDotExpanderProcessor extends IngestProcessorBase { + field: Field + path?: string +} + +export interface IngestDropProcessor extends IngestProcessorBase { +} + +export interface IngestEnrichProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + max_matches?: integer + override?: boolean + policy_name: string + shape_relation?: GeoShapeRelation + target_field: Field +} + +export interface IngestFailProcessor extends IngestProcessorBase { + message: string +} + +export interface IngestForeachProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + processor: IngestProcessorContainer +} + +export interface IngestGeoIpProcessor extends IngestProcessorBase { + database_file: string + field: Field + first_only: boolean + ignore_missing: boolean + properties: string[] + target_field: Field +} + +export interface IngestGrokProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + pattern_definitions: Record + patterns: string[] + trace_match?: boolean +} + +export interface IngestGsubProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + pattern: string + replacement: string + target_field?: Field +} + +export interface IngestInferenceConfig { + regression?: IngestInferenceConfigRegression +} + +export interface IngestInferenceConfigRegression { + results_field: string +} + +export interface IngestInferenceProcessor extends IngestProcessorBase { + model_id: Id + target_field: Field + field_map?: Record + inference_config?: IngestInferenceConfig +} + +export interface IngestJoinProcessor extends IngestProcessorBase { + field: Field + separator: string + target_field?: Field +} + +export interface IngestJsonProcessor extends IngestProcessorBase { + add_to_root: boolean + field: Field + target_field: Field +} + +export interface IngestKeyValueProcessor extends IngestProcessorBase { + exclude_keys?: string[] + field: Field + field_split: string + ignore_missing?: boolean + include_keys?: string[] + prefix?: string + strip_brackets?: boolean + target_field?: Field + trim_key?: string + trim_value?: string + value_split: string +} + +export interface IngestLowercaseProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field +} + +export interface IngestPipeline { + description?: string + on_failure?: IngestProcessorContainer[] + processors?: IngestProcessorContainer[] + version?: VersionNumber +} + +export interface IngestPipelineConfig { + description?: string + version?: VersionNumber + processors: IngestProcessorContainer[] +} + +export interface IngestPipelineProcessor extends IngestProcessorBase { + name: Name +} + +export interface IngestProcessorBase { + if?: string + ignore_failure?: boolean + on_failure?: IngestProcessorContainer[] + tag?: string +} + +export interface IngestProcessorContainer { + attachment?: IngestAttachmentProcessor + append?: IngestAppendProcessor + csv?: IngestCsvProcessor + convert?: IngestConvertProcessor + date?: IngestDateProcessor + date_index_name?: IngestDateIndexNameProcessor + dot_expander?: IngestDotExpanderProcessor + enrich?: IngestEnrichProcessor + fail?: IngestFailProcessor + foreach?: IngestForeachProcessor + json?: IngestJsonProcessor + user_agent?: IngestUserAgentProcessor + kv?: IngestKeyValueProcessor + geoip?: IngestGeoIpProcessor + grok?: IngestGrokProcessor + gsub?: IngestGsubProcessor + join?: IngestJoinProcessor + lowercase?: IngestLowercaseProcessor + remove?: IngestRemoveProcessor + rename?: IngestRenameProcessor + script?: Script + set?: IngestSetProcessor + sort?: IngestSortProcessor + split?: IngestSplitProcessor + trim?: IngestTrimProcessor + uppercase?: IngestUppercaseProcessor + urldecode?: IngestUrlDecodeProcessor + bytes?: IngestBytesProcessor + dissect?: IngestDissectProcessor + set_security_user?: IngestSetSecurityUserProcessor + pipeline?: IngestPipelineProcessor + drop?: IngestDropProcessor + circle?: IngestCircleProcessor + inference?: IngestInferenceProcessor +} + +export interface IngestRemoveProcessor extends IngestProcessorBase { + field: Fields + ignore_missing?: boolean +} + +export interface IngestRenameProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field: Field +} + +export interface IngestSetProcessor extends IngestProcessorBase { + field: Field + override?: boolean + value: any +} + +export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { + field: Field + properties?: string[] +} + +export type IngestShapeType = 'geo_shape' | 'shape' + +export interface IngestSortProcessor extends IngestProcessorBase { + field: Field + order: SearchSortOrder + target_field: Field +} + +export interface IngestSplitProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + preserve_trailing?: boolean + separator: string + target_field?: Field +} + +export interface IngestTrimProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field +} + +export interface IngestUppercaseProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field +} + +export interface IngestUrlDecodeProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field +} + +export interface IngestUserAgentProcessor extends IngestProcessorBase { + field: Field + ignore_missing: boolean + options: IngestUserAgentProperty[] + regex_file: string + target_field: Field +} + +export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' + +export interface IngestDeletePipelineRequest extends RequestBase { + id: Id + master_timeout?: Time + timeout?: Time +} + +export interface IngestDeletePipelineResponse extends AcknowledgedResponseBase { +} + +export interface IngestGeoIpStatsGeoIpDownloadStatistics { + successful_downloads: integer + failed_downloads: integer + total_download_time: integer + database_count: integer + skipped_updates: integer +} + +export interface IngestGeoIpStatsGeoIpNodeDatabaseName { + name: Name +} + +export interface IngestGeoIpStatsGeoIpNodeDatabases { + databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] + files_in_temp: string[] +} + +export interface IngestGeoIpStatsRequest extends RequestBase { +} + +export interface IngestGeoIpStatsResponse { + stats: IngestGeoIpStatsGeoIpDownloadStatistics + nodes: Record +} + +export interface IngestGetPipelineRequest extends RequestBase { + id?: Id + master_timeout?: Time + summary?: boolean +} + +export interface IngestGetPipelineResponse extends DictionaryResponseBase { +} + +export interface IngestProcessorGrokRequest extends RequestBase { +} + +export interface IngestProcessorGrokResponse { + patterns: Record +} + +export interface IngestPutPipelineRequest extends RequestBase { + id: Id + master_timeout?: Time + timeout?: Time + _meta?: Metadata + description?: string + on_failure?: IngestProcessorContainer[] + processors?: IngestProcessorContainer[] + version?: VersionNumber +} + +export interface IngestPutPipelineResponse extends AcknowledgedResponseBase { +} + +export interface IngestSimulateDocument { + _id?: Id + _index?: IndexName + _source: any +} + +export interface IngestSimulateDocumentSimulation { + _id: Id + _index: IndexName + _ingest: IngestSimulateIngest + _parent?: string + _routing?: string + _source: Record + _type?: Type +} + +export interface IngestSimulateIngest { + timestamp: DateString + pipeline?: Name +} + +export interface IngestSimulatePipelineSimulation { + doc?: IngestSimulateDocumentSimulation + processor_results?: IngestSimulatePipelineSimulation[] + tag?: string + processor_type?: string + status?: WatcherActionStatusOptions +} + +export interface IngestSimulateRequest extends RequestBase { + id?: Id + verbose?: boolean + docs?: IngestSimulateDocument[] + pipeline?: IngestPipeline +} + +export interface IngestSimulateResponse { + docs: IngestSimulatePipelineSimulation[] +} + +export interface LicenseLicense { + expiry_date_in_millis: EpochMillis + issue_date_in_millis: EpochMillis + issued_to: string + issuer: string + max_nodes?: long + max_resource_units?: long + signature: string + start_date_in_millis: EpochMillis + type: LicenseLicenseType + uid: string +} + +export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' + +export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' + +export interface LicenseDeleteRequest extends RequestBase { +} + +export interface LicenseDeleteResponse extends AcknowledgedResponseBase { +} + +export interface LicenseGetLicenseInformation { + expiry_date: DateString + expiry_date_in_millis: EpochMillis + issue_date: DateString + issue_date_in_millis: EpochMillis + issued_to: string + issuer: string + max_nodes: long + max_resource_units?: integer + status: LicenseLicenseStatus + type: LicenseLicenseType + uid: Uuid + start_date_in_millis: EpochMillis +} + +export interface LicenseGetRequest extends RequestBase { + accept_enterprise?: boolean + local?: boolean +} + +export interface LicenseGetResponse { + license: LicenseGetLicenseInformation +} + +export interface LicenseGetBasicStatusRequest extends RequestBase { +} + +export interface LicenseGetBasicStatusResponse { + eligible_to_start_basic: boolean +} + +export interface LicenseGetTrialStatusRequest extends RequestBase { +} + +export interface LicenseGetTrialStatusResponse { + eligible_to_start_trial: boolean +} + +export interface LicensePostAcknowledgement { + license: string[] + message: string +} + +export interface LicensePostRequest extends RequestBase { + acknowledge?: boolean + license?: LicenseLicense + licenses?: LicenseLicense[] +} + +export interface LicensePostResponse { + acknowledge?: LicensePostAcknowledgement + acknowledged: boolean + license_status: LicenseLicenseStatus +} + +export interface LicensePostStartBasicRequest extends RequestBase { + acknowledge?: boolean +} + +export interface LicensePostStartBasicResponse extends AcknowledgedResponseBase { + acknowledge: Record + basic_was_started: boolean + error_message: string +} + +export interface LicensePostStartTrialRequest extends RequestBase { + acknowledge?: boolean + type_query_string?: string +} + +export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { + error_message?: string + acknowledged: boolean + trial_was_started: boolean + type: LicenseLicenseType +} + +export interface LogstashPipeline { + description: string + last_modified: Timestamp + pipeline_metadata: LogstashPipelineMetadata + username: string + pipeline: string + pipeline_settings: LogstashPipelineSettings +} + +export interface LogstashPipelineMetadata { + type: string + version: string +} + +export interface LogstashPipelineSettings { + 'pipeline.workers': integer + 'pipeline.batch.size': integer + 'pipeline.batch.delay': integer + 'queue.type': string + 'queue.max_bytes.number': integer + 'queue.max_bytes.units': string + 'queue.checkpoint.writes': integer +} + +export interface LogstashDeletePipelineRequest extends RequestBase { + id: Id +} + +export type LogstashDeletePipelineResponse = boolean + +export interface LogstashGetPipelineRequest extends RequestBase { + id: Ids +} + +export type LogstashGetPipelineResponse = Record + +export interface LogstashPutPipelineRequest extends RequestBase { + id: Id + pipeline?: LogstashPipeline +} + +export type LogstashPutPipelineResponse = boolean + +export interface MigrationDeprecationsDeprecation { + details: string + level: MigrationDeprecationsDeprecationLevel + message: string + url: string +} + +export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' + +export interface MigrationDeprecationsRequest extends RequestBase { + index?: IndexName +} + +export interface MigrationDeprecationsResponse { + cluster_settings: MigrationDeprecationsDeprecation[] + index_settings: Record + node_settings: MigrationDeprecationsDeprecation[] + ml_settings: MigrationDeprecationsDeprecation[] +} + +export interface MlAnalysisConfig { + bucket_span: TimeSpan + categorization_analyzer?: MlCategorizationAnalyzer | string + categorization_field_name?: Field + categorization_filters?: string[] + detectors: MlDetector[] + influencers?: Field[] + model_prune_window?: Time + latency?: Time + multivariate_by_fields?: boolean + per_partition_categorization?: MlPerPartitionCategorization + summary_count_field_name?: Field +} + +export interface MlAnalysisConfigRead { + bucket_span: TimeSpan + categorization_analyzer?: MlCategorizationAnalyzer | string + categorization_field_name?: Field + categorization_filters?: string[] + detectors: MlDetector[] + influencers: Field[] + model_prune_window?: Time + latency?: Time + multivariate_by_fields?: boolean + per_partition_categorization?: MlPerPartitionCategorization + summary_count_field_name?: Field +} + +export interface MlAnalysisLimits { + categorization_examples_limit?: long + model_memory_limit?: string +} + +export interface MlAnalysisMemoryLimit { + model_memory_limit: string +} + +export interface MlAnomaly { + actual?: double[] + bucket_span: Time + by_field_name?: string + by_field_value?: string + causes?: MlAnomalyCause[] + detector_index: integer + field_name?: string + function?: string + function_description?: string + influencers?: MlInfluence[] + initial_record_score: double + is_interim: boolean + job_id: string + over_field_name?: string + over_field_value?: string + partition_field_name?: string + partition_field_value?: string + probability: double + record_score: double + result_type: string + timestamp: EpochMillis + typical?: double[] +} + +export interface MlAnomalyCause { + actual: double[] + by_field_name: Name + by_field_value: string + correlated_by_field_value: string + field_name: Field + function: string + function_description: string + influencers: MlInfluence[] + over_field_name: Name + over_field_value: string + partition_field_name: string + partition_field_value: string + probability: double + typical: double[] +} + +export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' + +export interface MlBucketInfluencer { + bucket_span: long + influencer_score: double + influencer_field_name: Field + influencer_field_value: string + initial_influencer_score: double + is_interim: boolean + job_id: Id + probability: double + result_type: string + timestamp: Time + foo?: string +} + +export interface MlBucketSummary { + anomaly_score: double + bucket_influencers: MlBucketInfluencer[] + bucket_span: Time + event_count: long + initial_anomaly_score: double + is_interim: boolean + job_id: Id + partition_scores?: MlPartitionScore[] + processing_time_ms: double + result_type: string + timestamp: Time +} + +export interface MlCalendarEvent { + calendar_id?: Id + event_id?: Id + description: string + end_time: EpochMillis + start_time: EpochMillis +} + +export interface MlCategorizationAnalyzer { + char_filter?: (string | AnalysisCharFilter)[] + filter?: (string | AnalysisTokenFilter)[] + tokenizer?: string | AnalysisTokenizer +} + +export type MlCategorizationStatus = 'ok' | 'warn' + +export interface MlCategory { + category_id: ulong + examples: string[] + grok_pattern?: string + job_id: Id + max_matching_length: ulong + partition_field_name?: string + partition_field_value?: string + regex: string + terms: string + num_matches?: long + preferred_to_categories?: Id[] + p?: string + result_type: string + mlcategory: string +} + +export interface MlChunkingConfig { + mode: MlChunkingMode + time_span?: Time +} + +export type MlChunkingMode = 'auto' | 'manual' | 'off' + +export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' + +export interface MlCustomSettings { + custom_urls?: XpackUsageUrlConfig[] + created_by?: string + job_tags?: Record +} + +export interface MlDataCounts { + bucket_count: long + earliest_record_timestamp: long + empty_bucket_count: long + input_bytes: long + input_field_count: long + input_record_count: long + invalid_date_count: long + job_id: Id + last_data_time: long + latest_empty_bucket_timestamp: long + latest_record_timestamp: long + latest_sparse_bucket_timestamp: long + latest_bucket_timestamp: long + missing_field_count: long + out_of_order_timestamp_count: long + processed_field_count: long + processed_record_count: long + sparse_bucket_count: long +} + +export interface MlDataDescription { + format?: string + time_field: Field + time_format?: string + field_delimiter?: string +} + +export interface MlDatafeed { + aggregations?: Record + aggs?: Record + chunking_config?: MlChunkingConfig + datafeed_id: Id + frequency?: Timestamp + indices: string[] + indexes?: string[] + job_id: Id + max_empty_searches?: integer + query: QueryDslQueryContainer + query_delay?: Timestamp + script_fields?: Record + scroll_size?: integer + delayed_data_check_config: MlDelayedDataCheckConfig + runtime_mappings?: MappingRuntimeFields + indices_options?: MlDatafeedIndicesOptions +} + +export interface MlDatafeedConfig { + aggregations?: Record + aggs?: Record + chunking_config?: MlChunkingConfig + datafeed_id?: Id + delayed_data_check_config?: MlDelayedDataCheckConfig + frequency?: Timestamp + indexes?: string[] + indices: string[] + indices_options?: MlDatafeedIndicesOptions + job_id?: Id + max_empty_searches?: integer + query: QueryDslQueryContainer + query_delay?: Timestamp + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size?: integer +} + +export interface MlDatafeedIndicesOptions { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + ignore_throttled?: boolean +} + +export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' + +export interface MlDatafeedStats { + assignment_explanation: string + datafeed_id: Id + node: MlDiscoveryNode + state: MlDatafeedState + timing_stats: MlDatafeedTimingStats +} + +export interface MlDatafeedTimingStats { + bucket_count: long + exponential_average_search_time_per_hour_ms: double + job_id: Id + search_count: long + total_search_time_ms: double + average_search_time_per_bucket_ms: number +} + +export interface MlDataframeAnalysis { + alpha?: double + dependent_variable: string + downsample_factor?: double + early_stopping_enabled?: boolean + eta?: double + eta_growth_rate_per_tree?: double + feature_bag_fraction?: double + feature_processors?: MlDataframeAnalysisFeatureProcessor[] + gamma?: double + lambda?: double + max_optimization_rounds_per_hyperparameter?: integer + max_trees?: integer + maximum_number_trees?: integer + num_top_feature_importance_values?: integer + prediction_field_name?: Field + randomize_seed?: double + soft_tree_depth_limit?: integer + soft_tree_depth_tolerance?: double + training_percent?: Percentage +} + +export type MlDataframeAnalysisAnalyzedFields = string[] | MlDataframeAnalysisAnalyzedFieldsIncludeExclude + +export interface MlDataframeAnalysisAnalyzedFieldsIncludeExclude { + includes: string[] + excludes: string[] +} + +export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { + class_assignment_objective?: string + num_top_classes?: integer +} + +export interface MlDataframeAnalysisContainer { + classification?: MlDataframeAnalysisClassification + outlier_detection?: MlDataframeAnalysisOutlierDetection + regression?: MlDataframeAnalysisRegression +} + +export interface MlDataframeAnalysisFeatureProcessor { + frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding + multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding + n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding + one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding + target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding +} + +export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { + feature_name: Name + field: Field + frequency_map: Record +} + +export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { + processors: integer[] +} + +export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { + feature_prefix?: string + field: Field + length?: integer + n_grams: integer[] + start?: integer + custom?: boolean +} + +export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { + field: Field + hot_map: string +} + +export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { + default_value: integer + feature_name: Name + field: Field + target_map: Record +} + +export interface MlDataframeAnalysisOutlierDetection { + compute_feature_influence?: boolean + feature_influence_threshold?: double + method?: string + n_neighbors?: integer + outlier_fraction?: double + standardization_enabled?: boolean +} + +export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { + loss_function?: string + loss_function_parameter?: double +} + +export interface MlDataframeAnalytics { + analysis_stats?: MlDataframeAnalyticsStatsContainer + assignment_explanation?: string + data_counts: MlDataframeAnalyticsStatsDataCounts + id: Id + memory_usage: MlDataframeAnalyticsStatsMemoryUsage + node?: NodeAttributes + progress: MlDataframeAnalyticsStatsProgress[] + state: MlDataframeState +} + +export interface MlDataframeAnalyticsDestination { + index: IndexName + results_field?: Field +} + +export interface MlDataframeAnalyticsFieldSelection { + is_included: boolean + is_required: boolean + feature_type?: string + mapping_types: string[] + name: Field + reason?: string +} + +export interface MlDataframeAnalyticsMemoryEstimation { + expected_memory_with_disk: string + expected_memory_without_disk: string +} + +export interface MlDataframeAnalyticsSource { + index: Indices + query?: QueryDslQueryContainer + runtime_mappings?: MappingRuntimeFields + _source?: MlDataframeAnalysisAnalyzedFields +} + +export interface MlDataframeAnalyticsStatsContainer { + classification_stats?: MlDataframeAnalyticsStatsHyperparameters + outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection + regression_stats?: MlDataframeAnalyticsStatsHyperparameters +} + +export interface MlDataframeAnalyticsStatsDataCounts { + skipped_docs_count: integer + test_docs_count: integer + training_docs_count: integer +} + +export interface MlDataframeAnalyticsStatsHyperparameters { + hyperparameters: MlHyperparameters + iteration: integer + timestamp: DateString + timing_stats: MlTimingStats + validation_loss: MlValidationLoss +} + +export interface MlDataframeAnalyticsStatsMemoryUsage { + memory_reestimate_bytes?: long + peak_usage_bytes: long + status: string + timestamp?: DateString +} + +export interface MlDataframeAnalyticsStatsOutlierDetection { + parameters: MlOutlierDetectionParameters + timestamp: DateString + timing_stats: MlTimingStats +} + +export interface MlDataframeAnalyticsStatsProgress { + phase: string + progress_percent: integer +} + +export interface MlDataframeAnalyticsSummary { + id: Id + source: MlDataframeAnalyticsSource + dest: MlDataframeAnalyticsDestination + analysis: MlDataframeAnalysisContainer + description?: string + model_memory_limit?: string + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields + allow_lazy_start?: boolean + create_time?: long + version?: VersionString +} + +export interface MlDataframeEvaluationClassification { + actual_field: Field + predicted_field?: Field + top_classes_field?: Field + metrics?: MlDataframeEvaluationClassificationMetrics +} + +export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { + accuracy?: Record + multiclass_confusion_matrix?: Record +} + +export interface MlDataframeEvaluationClassificationMetricsAucRoc { + class_name?: Name + include_curve?: boolean +} + +export interface MlDataframeEvaluationContainer { + classification?: MlDataframeEvaluationClassification + outlier_detection?: MlDataframeEvaluationOutlierDetection + regression?: MlDataframeEvaluationRegression +} + +export interface MlDataframeEvaluationMetrics { + auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc + precision?: Record + recall?: Record +} + +export interface MlDataframeEvaluationOutlierDetection { + actual_field: Field + predicted_probability_field: Field + metrics?: MlDataframeEvaluationOutlierDetectionMetrics +} + +export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { + confusion_matrix?: Record +} + +export interface MlDataframeEvaluationRegression { + actual_field: Field + predicted_field: Field + metrics?: MlDataframeEvaluationRegressionMetrics +} + +export interface MlDataframeEvaluationRegressionMetrics { + mse?: Record + msle?: MlDataframeEvaluationRegressionMetricsMsle + huber?: MlDataframeEvaluationRegressionMetricsHuber + r_squared?: Record +} + +export interface MlDataframeEvaluationRegressionMetricsHuber { + delta?: double +} + +export interface MlDataframeEvaluationRegressionMetricsMsle { + offset?: double +} + +export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' + +export interface MlDelayedDataCheckConfig { + check_window?: Time + enabled: boolean +} + +export interface MlDetectionRule { + actions?: MlRuleAction[] + conditions?: MlRuleCondition[] + scope?: Record +} + +export interface MlDetector { + by_field_name?: Field + custom_rules?: MlDetectionRule[] + detector_description?: string + detector_index?: integer + exclude_frequent?: MlExcludeFrequent + field_name?: Field + function: string + over_field_name?: Field + partition_field_name?: Field + use_null?: boolean +} + +export interface MlDiscoveryNode { + attributes: Record + ephemeral_id: Id + id: Id + name: Name + transport_address: TransportAddress +} + +export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' + +export interface MlFilter { + description?: string + filter_id: Id + items: string[] +} + +export interface MlFilterRef { + filter_id: Id + filter_type?: MlFilterType +} + +export type MlFilterType = 'include' | 'exclude' + +export interface MlHyperparameter { + absolute_importance?: double + name: Name + relative_importance?: double + supplied: boolean + value: double +} + +export interface MlHyperparameters { + alpha?: double + lambda?: double + gamma?: double + eta?: double + eta_growth_rate_per_tree?: double + feature_bag_fraction?: double + downsample_factor?: double + max_attempts_to_add_tree?: integer + max_optimization_rounds_per_hyperparameter?: integer + max_trees?: integer + num_folds?: integer + num_splits_per_feature?: integer + soft_tree_depth_limit?: integer + soft_tree_depth_tolerance?: double +} + +export interface MlInfluence { + influencer_field_name: string + influencer_field_values: string[] +} + +export interface MlJob { + allow_lazy_open: boolean + analysis_config: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + background_persist_interval: Time + blocked?: MlJobBlocked + create_time: integer + custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days?: long + data_description: MlDataDescription + datafeed_config?: MlDatafeed + deleting?: boolean + description?: string + finished_time?: integer + groups?: string[] + job_id: Id + job_type: string + job_version: VersionString + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days: long + renormalization_window_days?: long + results_index_name: IndexName + results_retention_days?: long +} + +export interface MlJobBlocked { + reason: MlJobBlockedReason + task_id?: TaskId +} + +export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' + +export interface MlJobConfig { + allow_lazy_open?: boolean + analysis_config: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + background_persist_interval?: Time + custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days?: long + data_description: MlDataDescription + datafeed_config?: MlDatafeedConfig + description?: string + groups?: string[] + job_id?: Id + job_type?: string + model_plot_config?: MlModelPlotConfig + model_snapshot_retention_days?: long + renormalization_window_days?: long + results_index_name?: IndexName + results_retention_days?: long +} + +export interface MlJobForecastStatistics { + memory_bytes?: MlJobStatistics + processing_time_ms?: MlJobStatistics + records?: MlJobStatistics + status?: Record + total: long + forecasted_jobs: integer +} + +export type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening' + +export interface MlJobStatistics { + avg: double + max: double + min: double + total: double +} + +export interface MlJobStats { + assignment_explanation: string + data_counts: MlDataCounts + forecasts_stats: MlJobForecastStatistics + job_id: string + model_size_stats: MlModelSizeStats + node: MlDiscoveryNode + open_time?: DateString + state: MlJobState + timing_stats: MlJobTimingStats + deleting?: boolean +} + +export interface MlJobTimingStats { + average_bucket_processing_time_ms: double + bucket_count: long + exponential_average_bucket_processing_time_ms: double + exponential_average_bucket_processing_time_per_hour_ms: double + job_id: Id + total_bucket_processing_time_ms: double + maximum_bucket_processing_time_ms: double + minimum_bucket_processing_time_ms: double +} + +export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' + +export interface MlModelPlotConfig { + annotations_enabled?: boolean + enabled?: boolean + terms?: Field +} + +export interface MlModelSizeStats { + bucket_allocation_failures_count: long + job_id: Id + log_time: Time + memory_status: MlMemoryStatus + model_bytes: long + model_bytes_exceeded: long + model_bytes_memory_limit: long + peak_model_bytes: long + assignment_memory_basis?: string + result_type: string + total_by_field_count: long + total_over_field_count: long + total_partition_field_count: long + categorization_status: MlCategorizationStatus + categorized_doc_count: integer + dead_category_count: integer + failed_category_count: integer + frequent_category_count: integer + rare_category_count: integer + total_category_count: integer + timestamp?: long +} + +export interface MlModelSnapshot { + description?: string + job_id: Id + latest_record_time_stamp: integer + latest_result_time_stamp: integer + min_version: VersionString + model_size_stats: MlModelSizeStats + retain: boolean + snapshot_doc_count: long + snapshot_id: Id + timestamp: integer +} + +export interface MlOutlierDetectionParameters { + compute_feature_influence?: boolean + feature_influence_threshold?: double + method?: string + n_neighbors?: integer + outlier_fraction?: double + standardization_enabled?: boolean +} + +export interface MlOverallBucket { + bucket_span: long + is_interim: boolean + jobs: MlOverallBucketJob[] + overall_score: double + result_type: string + timestamp: Time +} + +export interface MlOverallBucketJob { + job_id: Id + max_anomaly_score: double +} + +export interface MlPage { + from?: integer + size?: integer +} + +export interface MlPartitionScore { + initial_record_score: double + partition_field_name: Field + partition_field_value: string + probability: double + record_score: double +} + +export interface MlPerPartitionCategorization { + enabled?: boolean + stop_on_warn?: boolean +} + +export type MlRuleAction = 'skip_result' | 'skip_model_update' + +export interface MlRuleCondition { + applies_to: MlAppliesTo + operator: MlConditionOperator + value: double +} + +export interface MlTimingStats { + elapsed_time: integer + iteration_time?: integer +} + +export interface MlTotalFeatureImportance { + feature_name: Name + importance: MlTotalFeatureImportanceStatistics[] + classes: MlTotalFeatureImportanceClass[] +} + +export interface MlTotalFeatureImportanceClass { + class_name: Name + importance: MlTotalFeatureImportanceStatistics[] +} + +export interface MlTotalFeatureImportanceStatistics { + mean_magnitude: double + max: integer + min: integer +} + +export interface MlTrainedModelConfig { + model_id: Id + tags: string[] + version?: VersionString + compressed_definition?: string + created_by?: string + create_time?: Time + default_field_map?: Record + description?: string + estimated_heap_memory_usage_bytes?: integer + estimated_operations?: integer + inference_config: AggregationsInferenceConfigContainer + input: MlTrainedModelConfigInput + license_level?: string + metadata?: MlTrainedModelConfigMetadata +} + +export interface MlTrainedModelConfigInput { + field_names: Field[] +} + +export interface MlTrainedModelConfigMetadata { + model_aliases?: string[] + feature_importance_baseline?: Record + hyperparameters?: MlHyperparameter[] + total_feature_importance?: MlTotalFeatureImportance[] +} + +export interface MlTrainedModelInferenceStats { + failure_count: long + inference_count: long + cache_miss_count: long + missing_all_fields_count: long + timestamp: Time +} + +export interface MlTrainedModelStats { + model_id: Id + pipeline_count: integer + inference_stats?: MlTrainedModelInferenceStats + ingest?: Record +} + +export interface MlValidationLoss { + fold_values: string[] + loss_type: string +} + +export interface MlCloseJobRequest extends RequestBase { + job_id: Id + allow_no_match?: boolean + allow_no_jobs?: boolean + force?: boolean + timeout?: Time +} + +export interface MlCloseJobResponse { + closed: boolean +} + +export interface MlDeleteCalendarRequest extends RequestBase { + calendar_id: Id +} + +export interface MlDeleteCalendarResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteCalendarEventRequest extends RequestBase { + calendar_id: Id + event_id: Id +} + +export interface MlDeleteCalendarEventResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteCalendarJobRequest extends RequestBase { + calendar_id: Id + job_id: Id +} + +export interface MlDeleteCalendarJobResponse { + calendar_id: Id + description?: string + job_ids: Ids +} + +export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { + id: Id + force?: boolean + timeout?: Time +} + +export interface MlDeleteDataFrameAnalyticsResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteDatafeedRequest extends RequestBase { + datafeed_id: Id + force?: boolean +} + +export interface MlDeleteDatafeedResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteExpiredDataRequest extends RequestBase { + job_id?: Id + requests_per_second?: float + timeout?: Time +} + +export interface MlDeleteExpiredDataResponse { + deleted: boolean +} + +export interface MlDeleteFilterRequest extends RequestBase { + filter_id: Id +} + +export interface MlDeleteFilterResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteForecastRequest extends RequestBase { + job_id: Id + forecast_id?: Id + allow_no_forecasts?: boolean + timeout?: Time +} + +export interface MlDeleteForecastResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteJobRequest extends RequestBase { + job_id: Id + force?: boolean + wait_for_completion?: boolean +} + +export interface MlDeleteJobResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteModelSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id +} + +export interface MlDeleteModelSnapshotResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteTrainedModelRequest extends RequestBase { + model_id: Id +} + +export interface MlDeleteTrainedModelResponse extends AcknowledgedResponseBase { +} + +export interface MlDeleteTrainedModelAliasRequest extends RequestBase { + model_alias: Name + model_id: Id +} + +export interface MlDeleteTrainedModelAliasResponse extends AcknowledgedResponseBase { +} + +export interface MlEstimateModelMemoryRequest extends RequestBase { + analysis_config?: MlAnalysisConfig + max_bucket_cardinality?: Record + overall_cardinality?: Record +} + +export interface MlEstimateModelMemoryResponse { + model_memory_estimate: string +} + +export interface MlEvaluateDataFrameConfusionMatrixItem { + actual_class: Name + actual_class_doc_count: integer + predicted_classes: MlEvaluateDataFrameConfusionMatrixPrediction[] + other_predicted_class_doc_count: integer +} + +export interface MlEvaluateDataFrameConfusionMatrixPrediction { + predicted_class: Name + count: integer +} + +export interface MlEvaluateDataFrameConfusionMatrixThreshold { + tp: integer + fp: integer + tn: integer + fn: integer +} + +export interface MlEvaluateDataFrameDataframeClassificationSummary { + auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy + multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix + precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision + recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryAccuracy { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + overall_accuracy: double +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix { + confusion_matrix: MlEvaluateDataFrameConfusionMatrixItem[] + other_actual_class_count: integer +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryPrecision { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + avg_precision: double +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryRecall { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + avg_recall: double +} + +export interface MlEvaluateDataFrameDataframeEvaluationClass extends MlEvaluateDataFrameDataframeEvaluationValue { + class_name: Name +} + +export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc extends MlEvaluateDataFrameDataframeEvaluationValue { + curve?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem[] +} + +export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem { + tpr: double + fpr: double + threshold: double +} + +export interface MlEvaluateDataFrameDataframeEvaluationValue { + value: double +} + +export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { + auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + precision?: Record + recall?: Record + confusion_matrix?: Record +} + +export interface MlEvaluateDataFrameDataframeRegressionSummary { + huber?: MlEvaluateDataFrameDataframeEvaluationValue + mse?: MlEvaluateDataFrameDataframeEvaluationValue + msle?: MlEvaluateDataFrameDataframeEvaluationValue + r_squared?: MlEvaluateDataFrameDataframeEvaluationValue +} + +export interface MlEvaluateDataFrameRequest extends RequestBase { + evaluation: MlDataframeEvaluationContainer + index: IndexName + query?: QueryDslQueryContainer +} + +export interface MlEvaluateDataFrameResponse { + classification?: MlEvaluateDataFrameDataframeClassificationSummary + outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary + regression?: MlEvaluateDataFrameDataframeRegressionSummary +} + +export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { + id?: Id + source?: MlDataframeAnalyticsSource + dest?: MlDataframeAnalyticsDestination + analysis: MlDataframeAnalysisContainer + description?: string + model_memory_limit?: string + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields + allow_lazy_start?: boolean +} + +export interface MlExplainDataFrameAnalyticsResponse { + field_selection: MlDataframeAnalyticsFieldSelection[] + memory_estimation: MlDataframeAnalyticsMemoryEstimation +} + +export interface MlFlushJobRequest extends RequestBase { + job_id: Id + skip_time?: string + advance_time?: DateString + calc_interim?: boolean + end?: DateString + start?: DateString +} + +export interface MlFlushJobResponse { + flushed: boolean + last_finalized_bucket_end?: integer +} + +export interface MlForecastRequest extends RequestBase { + job_id: Id + duration?: Time + expires_in?: Time +} + +export interface MlForecastResponse extends AcknowledgedResponseBase { + forecast_id: Id +} + +export interface MlGetBucketsRequest extends RequestBase { + job_id: Id + timestamp?: Timestamp + from?: integer + size?: integer + exclude_interim?: boolean + sort?: Field + desc?: boolean + start?: DateString + end?: DateString + anomaly_score?: double + expand?: boolean + page?: MlPage +} + +export interface MlGetBucketsResponse { + buckets: MlBucketSummary[] + count: long +} + +export interface MlGetCalendarEventsRequest extends RequestBase { + calendar_id: Id + job_id?: Id + end?: DateString + from?: integer + start?: string + size?: integer +} + +export interface MlGetCalendarEventsResponse { + count: long + events: MlCalendarEvent[] +} + +export interface MlGetCalendarsCalendar { + calendar_id: Id + description?: string + job_ids: Id[] +} + +export interface MlGetCalendarsRequest extends RequestBase { + calendar_id?: Id + from?: integer + size?: integer + page?: MlPage +} + +export interface MlGetCalendarsResponse { + calendars: MlGetCalendarsCalendar[] + count: long +} + +export interface MlGetCategoriesRequest extends RequestBase { + job_id: Id + category_id?: CategoryId + from?: integer + size?: integer + partition_field_value?: string + page?: MlPage +} + +export interface MlGetCategoriesResponse { + categories: MlCategory[] + count: long +} + +export interface MlGetDataFrameAnalyticsRequest extends RequestBase { + id?: Id + allow_no_match?: boolean + from?: integer + size?: integer + exclude_generated?: boolean +} + +export interface MlGetDataFrameAnalyticsResponse { + count: integer + data_frame_analytics: MlDataframeAnalyticsSummary[] +} + +export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { + id?: Id + allow_no_match?: boolean + from?: integer + size?: integer + verbose?: boolean +} + +export interface MlGetDataFrameAnalyticsStatsResponse { + count: long + data_frame_analytics: MlDataframeAnalytics[] +} + +export interface MlGetDatafeedStatsRequest extends RequestBase { + datafeed_id?: Ids + allow_no_datafeeds?: boolean +} + +export interface MlGetDatafeedStatsResponse { + count: long + datafeeds: MlDatafeedStats[] +} + +export interface MlGetDatafeedsRequest extends RequestBase { + datafeed_id?: Ids + allow_no_datafeeds?: boolean + exclude_generated?: boolean +} + +export interface MlGetDatafeedsResponse { + count: long + datafeeds: MlDatafeed[] +} + +export interface MlGetFiltersRequest extends RequestBase { + filter_id?: Id + from?: integer + size?: integer +} + +export interface MlGetFiltersResponse { + count: long + filters: MlFilter[] +} + +export interface MlGetInfluencersRequest extends RequestBase { + job_id: Id + desc?: boolean + end?: DateString + exclude_interim?: boolean + influencer_score?: double + from?: integer + size?: integer + sort?: Field + start?: DateString + page?: MlPage +} + +export interface MlGetInfluencersResponse { + count: long + influencers: MlBucketInfluencer[] +} + +export interface MlGetJobStatsRequest extends RequestBase { + job_id?: Id + allow_no_jobs?: boolean +} + +export interface MlGetJobStatsResponse { + count: long + jobs: MlJobStats[] +} + +export interface MlGetJobsRequest extends RequestBase { + job_id?: Ids + allow_no_match?: boolean + allow_no_jobs?: boolean + exclude_generated?: boolean +} + +export interface MlGetJobsResponse { + count: long + jobs: MlJob[] +} + +export interface MlGetModelSnapshotsRequest extends RequestBase { + job_id: Id + snapshot_id?: Id + desc?: boolean + end?: Time + from?: integer + size?: integer + sort?: Field + start?: Time +} + +export interface MlGetModelSnapshotsResponse { + count: long + model_snapshots: MlModelSnapshot[] +} + +export interface MlGetOverallBucketsRequest extends RequestBase { + job_id: Id + bucket_span?: Time + overall_score?: double | string + top_n?: integer + end?: Time + start?: Time + exclude_interim?: boolean + allow_no_match?: boolean + allow_no_jobs?: boolean +} + +export interface MlGetOverallBucketsResponse { + count: long + overall_buckets: MlOverallBucket[] +} + +export interface MlGetRecordsRequest extends RequestBase { + job_id: Id + exclude_interim?: boolean + from?: integer + size?: integer + start?: DateString + end?: DateString + desc?: boolean + page?: MlPage + record_score?: double + sort?: Field +} + +export interface MlGetRecordsResponse { + count: long + records: MlAnomaly[] +} + +export interface MlGetTrainedModelsRequest extends RequestBase { + model_id?: Id + allow_no_match?: boolean + decompress_definition?: boolean + exclude_generated?: boolean + from?: integer + include?: string + size?: integer + tags?: string +} + +export interface MlGetTrainedModelsResponse { + count: integer + trained_model_configs: MlTrainedModelConfig[] +} + +export interface MlGetTrainedModelsStatsRequest extends RequestBase { + model_id?: Id + allow_no_match?: boolean + from?: integer + size?: integer +} + +export interface MlGetTrainedModelsStatsResponse { + count: integer + trained_model_stats: MlTrainedModelStats[] +} + +export interface MlInfoAnomalyDetectors { + categorization_analyzer: MlCategorizationAnalyzer + categorization_examples_limit: integer + model_memory_limit: string + model_snapshot_retention_days: integer + daily_model_snapshot_retention_after_days: integer +} + +export interface MlInfoDatafeeds { + scroll_size: integer +} + +export interface MlInfoDefaults { + anomaly_detectors: MlInfoAnomalyDetectors + datafeeds: MlInfoDatafeeds +} + +export interface MlInfoLimits { + max_model_memory_limit?: string + effective_max_model_memory_limit: string + total_ml_memory: string +} + +export interface MlInfoNativeCode { + build_hash: string + version: VersionString +} + +export interface MlInfoRequest extends RequestBase { +} + +export interface MlInfoResponse { + defaults: MlInfoDefaults + limits: MlInfoLimits + upgrade_mode: boolean + native_code: MlInfoNativeCode +} + +export interface MlOpenJobRequest extends RequestBase { + job_id: Id + timeout?: Time +} + +export interface MlOpenJobResponse { + opened: boolean +} + +export interface MlPostCalendarEventsRequest extends RequestBase { + calendar_id: Id + events: MlCalendarEvent[] +} + +export interface MlPostCalendarEventsResponse { + events: MlCalendarEvent[] +} + +export interface MlPostDataRequest extends RequestBase { + job_id: Id + reset_end?: DateString + reset_start?: DateString + data?: TData[] +} + +export interface MlPostDataResponse { + bucket_count: long + earliest_record_timestamp: integer + empty_bucket_count: long + input_bytes: long + input_field_count: long + input_record_count: long + invalid_date_count: long + job_id: Id + last_data_time: integer + latest_record_timestamp: integer + missing_field_count: long + out_of_order_timestamp_count: long + processed_field_count: long + processed_record_count: long + sparse_bucket_count: long +} + +export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { + source: MlDataframeAnalyticsSource + analysis: MlDataframeAnalysisContainer + model_memory_limit?: string + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields +} + +export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { + id?: Id + config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig +} + +export interface MlPreviewDataFrameAnalyticsResponse { + feature_values: Record[] +} + +export interface MlPreviewDatafeedRequest extends RequestBase { + datafeed_id?: Id + job_config?: MlJobConfig + datafeed_config?: MlDatafeedConfig +} + +export interface MlPreviewDatafeedResponse { + data: TDocument[] +} + +export interface MlPutCalendarRequest extends RequestBase { + calendar_id: Id + description?: string +} + +export interface MlPutCalendarResponse { + calendar_id: Id + description: string + job_ids: Ids +} + +export interface MlPutCalendarJobRequest extends RequestBase { + calendar_id: Id + job_id: Id +} + +export interface MlPutCalendarJobResponse { + calendar_id: Id + description?: string + job_ids: Ids +} + +export interface MlPutDataFrameAnalyticsRequest extends RequestBase { + id: Id + allow_lazy_start?: boolean + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields + description?: string + dest: MlDataframeAnalyticsDestination + max_num_threads?: integer + model_memory_limit?: string + source: MlDataframeAnalyticsSource +} + +export interface MlPutDataFrameAnalyticsResponse { + id: Id + create_time: long + version: VersionString + source: MlDataframeAnalyticsSource + description?: string + dest: MlDataframeAnalyticsDestination + model_memory_limit: string + allow_lazy_start: boolean + max_num_threads: integer + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields +} + +export interface MlPutDatafeedRequest extends RequestBase { + datafeed_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + aggregations?: Record + chunking_config?: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + frequency?: Time + indices?: string[] + indices_options?: MlDatafeedIndicesOptions + job_id?: Id + max_empty_searches?: integer + query?: QueryDslQueryContainer + query_delay?: Time + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size?: integer +} + +export interface MlPutDatafeedResponse { + aggregations: Record + chunking_config: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + datafeed_id: Id + frequency: Time + indices: string[] + job_id: Id + indices_options?: MlDatafeedIndicesOptions + max_empty_searches: integer + query: QueryDslQueryContainer + query_delay: Time + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size: integer +} + +export interface MlPutFilterRequest extends RequestBase { + filter_id: Id + description?: string + items?: string[] +} + +export interface MlPutFilterResponse { + description: string + filter_id: Id + items: string[] +} + +export interface MlPutJobRequest extends RequestBase { + job_id: Id + allow_lazy_open?: boolean + analysis_config: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + background_persist_interval: Time + custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days?: long + data_description: MlDataDescription + datafeed_config?: MlDatafeedConfig + description?: string + groups?: string[] + model_plot_config?: MlModelPlotConfig + model_snapshot_retention_days?: long + renormalization_window_days?: long + results_index_name?: IndexName + results_retention_days?: long +} + +export interface MlPutJobResponse { + allow_lazy_open: boolean + analysis_config: MlAnalysisConfigRead + analysis_limits: MlAnalysisLimits + background_persist_interval?: Time + create_time: DateString + custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days: long + data_description: MlDataDescription + datafeed_config?: MlDatafeed + description?: string + groups?: string[] + job_id: Id + job_type: string + job_version: string + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days: long + renormalization_window_days?: long + results_index_name: string + results_retention_days?: long +} + +export interface MlPutTrainedModelAggregateOutput { + logistic_regression?: MlPutTrainedModelWeights + weighted_sum?: MlPutTrainedModelWeights + weighted_mode?: MlPutTrainedModelWeights + exponent?: MlPutTrainedModelWeights +} + +export interface MlPutTrainedModelDefinition { + preprocessors?: MlPutTrainedModelPreprocessor[] + trained_model: MlPutTrainedModelTrainedModel +} + +export interface MlPutTrainedModelEnsemble { + aggregate_output?: MlPutTrainedModelAggregateOutput + classification_labels?: string[] + feature_names?: string[] + target_type?: string + trained_models: MlPutTrainedModelTrainedModel[] +} + +export interface MlPutTrainedModelFrequencyEncodingPreprocessor { + field: string + feature_name: string + frequency_map: Record +} + +export interface MlPutTrainedModelInput { + field_names: Names +} + +export interface MlPutTrainedModelOneHotEncodingPreprocessor { + field: string + hot_map: Record +} + +export interface MlPutTrainedModelPreprocessor { + frequency_encoding?: MlPutTrainedModelFrequencyEncodingPreprocessor + one_hot_encoding?: MlPutTrainedModelOneHotEncodingPreprocessor + target_mean_encoding?: MlPutTrainedModelTargetMeanEncodingPreprocessor +} + +export interface MlPutTrainedModelRequest extends RequestBase { + model_id: Id + compressed_definition?: string + definition?: MlPutTrainedModelDefinition + description?: string + inference_config: AggregationsInferenceConfigContainer + input: MlPutTrainedModelInput + metadata?: any + tags?: string[] +} + +export type MlPutTrainedModelResponse = MlTrainedModelConfig + +export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { + field: string + feature_name: string + target_map: Record + default_value: double +} + +export interface MlPutTrainedModelTrainedModel { + tree?: MlPutTrainedModelTrainedModelTree + tree_node?: MlPutTrainedModelTrainedModelTreeNode + ensemble?: MlPutTrainedModelEnsemble +} + +export interface MlPutTrainedModelTrainedModelTree { + classification_labels?: string[] + feature_names: string[] + target_type?: string + tree_structure: MlPutTrainedModelTrainedModelTreeNode[] +} + +export interface MlPutTrainedModelTrainedModelTreeNode { + decision_type?: string + default_left?: boolean + leaf_value?: double + left_child?: integer + node_index: integer + right_child?: integer + split_feature?: integer + split_gain?: integer + threshold?: double +} + +export interface MlPutTrainedModelWeights { + weights: double +} + +export interface MlPutTrainedModelAliasRequest extends RequestBase { + model_alias: Name + model_id: Id + reassign?: boolean +} + +export interface MlPutTrainedModelAliasResponse extends AcknowledgedResponseBase { +} + +export interface MlResetJobRequest extends RequestBase { + job_id: Id + wait_for_completion?: boolean +} + +export interface MlResetJobResponse extends AcknowledgedResponseBase { +} + +export interface MlRevertModelSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id + delete_intervening_results?: boolean +} + +export interface MlRevertModelSnapshotResponse { + model: MlModelSnapshot +} + +export interface MlSetUpgradeModeRequest extends RequestBase { + enabled?: boolean + timeout?: Time +} + +export interface MlSetUpgradeModeResponse extends AcknowledgedResponseBase { +} + +export interface MlStartDataFrameAnalyticsRequest extends RequestBase { + id: Id + timeout?: Time +} + +export interface MlStartDataFrameAnalyticsResponse extends AcknowledgedResponseBase { + node: NodeId +} + +export interface MlStartDatafeedRequest extends RequestBase { + datafeed_id: Id + start?: Time + end?: Time + timeout?: Time +} + +export interface MlStartDatafeedResponse { + node: NodeIds + started: boolean +} + +export interface MlStopDataFrameAnalyticsRequest extends RequestBase { + id: Id + allow_no_match?: boolean + force?: boolean + timeout?: Time +} + +export interface MlStopDataFrameAnalyticsResponse { + stopped: boolean +} + +export interface MlStopDatafeedRequest extends RequestBase { + datafeed_id: Id + allow_no_match?: boolean + force?: boolean + timeout?: Time +} + +export interface MlStopDatafeedResponse { + stopped: boolean +} + +export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { + id: Id + description?: string + model_memory_limit?: string + max_num_threads?: integer + allow_lazy_start?: boolean +} + +export interface MlUpdateDataFrameAnalyticsResponse { + id: Id + create_time: long + version: VersionString + source: MlDataframeAnalyticsSource + description?: string + dest: MlDataframeAnalyticsDestination + model_memory_limit: string + allow_lazy_start: boolean + max_num_threads: integer + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields +} + +export interface MlUpdateFilterRequest extends RequestBase { + filter_id: Id + add_items?: string[] + description?: string + remove_items?: string[] +} + +export interface MlUpdateFilterResponse { + description: string + filter_id: Id + items: string[] +} + +export interface MlUpdateJobRequest extends RequestBase { + job_id: Id + allow_lazy_open?: boolean + analysis_limits?: MlAnalysisMemoryLimit + background_persist_interval?: Time + custom_settings?: Record + categorization_filters?: string[] + description?: string + model_plot_config?: MlModelPlotConfig + daily_model_snapshot_retention_after_days?: long + model_snapshot_retention_days?: long + renormalization_window_days?: long + results_retention_days?: long + groups?: string[] + detectors?: MlDetector[] + per_partition_categorization?: MlPerPartitionCategorization +} + +export interface MlUpdateJobResponse { + allow_lazy_open: boolean + analysis_config: MlAnalysisConfigRead + analysis_limits: MlAnalysisLimits + background_persist_interval?: Time + create_time: EpochMillis + finished_time?: EpochMillis + custom_settings?: Record + daily_model_snapshot_retention_after_days: long + data_description: MlDataDescription + datafeed_config?: MlDatafeed + description?: string + groups?: string[] + job_id: Id + job_type: string + job_version: VersionString + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days: long + renormalization_window_days?: long + results_index_name: IndexName + results_retention_days?: long +} + +export interface MlUpdateModelSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id + description?: string + retain?: boolean +} + +export interface MlUpdateModelSnapshotResponse extends AcknowledgedResponseBase { + model: MlModelSnapshot +} + +export interface MlUpgradeJobSnapshotRequest extends RequestBase { + job_id: Id + snapshot_id: Id + wait_for_completion?: boolean + timeout?: Time +} + +export interface MlUpgradeJobSnapshotResponse { + node: NodeId + completed: boolean +} + +export interface MlValidateRequest extends RequestBase { + job_id?: Id + analysis_config?: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + data_description?: MlDataDescription + description?: string + model_plot?: MlModelPlotConfig + model_snapshot_retention_days?: long + results_index_name?: IndexName +} + +export interface MlValidateResponse extends AcknowledgedResponseBase { +} + +export interface MlValidateDetectorRequest extends RequestBase { + detector?: MlDetector +} + +export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { +} + +export interface MonitoringBulkRequest extends RequestBase { + type?: string + system_id: string + system_api_version: string + interval: TimeSpan + operations?: (BulkOperationContainer | TSource)[] +} + +export interface MonitoringBulkResponse { + error?: ErrorCause + errors: boolean + ignored: boolean + took: long +} + +export interface NodesAdaptiveSelection { + avg_queue_size: long + avg_response_time: long + avg_response_time_ns: long + avg_service_time: string + avg_service_time_ns: long + outgoing_searches: long + rank: string +} + +export interface NodesBreaker { + estimated_size: string + estimated_size_in_bytes: long + limit_size: string + limit_size_in_bytes: long + overhead: float + tripped: float +} + +export interface NodesCpu { + percent: integer + sys?: string + sys_in_millis?: long + total?: string + total_in_millis?: long + user?: string + user_in_millis?: long + load_average?: Record +} + +export interface NodesDataPathStats { + available: string + available_in_bytes: long + disk_queue: string + disk_reads: long + disk_read_size: string + disk_read_size_in_bytes: long + disk_writes: long + disk_write_size: string + disk_write_size_in_bytes: long + free: string + free_in_bytes: long + mount: string + path: string + total: string + total_in_bytes: long + type: string +} + +export interface NodesExtendedMemoryStats extends NodesMemoryStats { + free_percent: integer + used_percent: integer + total_in_bytes: integer + free_in_bytes: integer + used_in_bytes: integer +} + +export interface NodesFileSystem { + data: NodesDataPathStats[] + timestamp: long + total: NodesFileSystemTotal +} + +export interface NodesFileSystemTotal { + available: string + available_in_bytes: long + free: string + free_in_bytes: long + total: string + total_in_bytes: long +} + +export interface NodesGarbageCollector { + collectors: Record +} + +export interface NodesGarbageCollectorTotal { + collection_count: long + collection_time: string + collection_time_in_millis: long +} + +export interface NodesHttp { + current_open: integer + total_opened: long +} + +export interface NodesIngest { + pipelines: Record + total: NodesIngestTotal +} + +export interface NodesIngestTotal { + count: long + current: long + failed: long + processors: NodesKeyedProcessor[] + time_in_millis: long +} + +export interface NodesJvm { + buffer_pools: Record + classes: NodesJvmClasses + gc: NodesGarbageCollector + mem: NodesMemoryStats + threads: NodesJvmThreads + timestamp: long + uptime: string + uptime_in_millis: long +} + +export interface NodesJvmClasses { + current_loaded_count: long + total_loaded_count: long + total_unloaded_count: long +} + +export interface NodesJvmThreads { + count: long + peak_count: long +} + +export interface NodesKeyedProcessor { + statistics: NodesProcess + type: string +} + +export interface NodesMemoryStats { + resident?: string + resident_in_bytes?: long + share?: string + share_in_bytes?: long + total_virtual?: string + total_virtual_in_bytes?: long + total_in_bytes: long + free_in_bytes: long + used_in_bytes: long +} + +export interface NodesNodeBufferPool { + count: long + total_capacity: string + total_capacity_in_bytes: long + used: string + used_in_bytes: long +} + +export interface NodesNodesResponseBase { + _nodes: NodeStatistics +} + +export interface NodesOperatingSystem { + cpu: NodesCpu + mem: NodesExtendedMemoryStats + swap: NodesMemoryStats + timestamp: long +} + +export interface NodesProcess { + cpu: NodesCpu + mem: NodesMemoryStats + open_file_descriptors: integer + timestamp: long +} + +export interface NodesScripting { + cache_evictions: long + compilations: long +} + +export interface NodesStats { + adaptive_selection: Record + breakers: Record + fs: NodesFileSystem + host: Host + http: NodesHttp + indices: IndicesStatsIndexStats + ingest: NodesIngest + ip: Ip | Ip[] + jvm: NodesJvm + name: Name + os: NodesOperatingSystem + process: NodesProcess + roles: NodeRoles + script: NodesScripting + thread_pool: Record + timestamp: long + transport: NodesTransport + transport_address: TransportAddress + attributes: Record +} + +export interface NodesThreadCount { + active: long + completed: long + largest: long + queue: long + rejected: long + threads: long +} + +export interface NodesTransport { + rx_count: long + rx_size: string + rx_size_in_bytes: long + server_open: integer + tx_count: long + tx_size: string + tx_size_in_bytes: long +} + +export interface NodesHotThreadsHotThread { + hosts: Host[] + node_id: Id + node_name: Name + threads: string[] +} + +export interface NodesHotThreadsRequest extends RequestBase { + node_id?: NodeIds + ignore_idle_threads?: boolean + interval?: Time + snapshots?: long + threads?: long + thread_type?: ThreadType + timeout?: Time +} + +export interface NodesHotThreadsResponse { + hot_threads: NodesHotThreadsHotThread[] +} + +export interface NodesInfoNodeInfo { + attributes: Record + build_flavor: string + build_hash: string + build_type: string + host: Host + http?: NodesInfoNodeInfoHttp + ip: Ip + jvm?: NodesInfoNodeJvmInfo + name: Name + network?: NodesInfoNodeInfoNetwork + os?: NodesInfoNodeOperatingSystemInfo + plugins?: PluginStats[] + process?: NodesInfoNodeProcessInfo + roles: NodeRoles + settings?: NodesInfoNodeInfoSettings + thread_pool?: Record + total_indexing_buffer?: long + total_indexing_buffer_in_bytes?: ByteSize + transport?: NodesInfoNodeInfoTransport + transport_address: TransportAddress + version: VersionString + modules?: PluginStats[] + ingest?: NodesInfoNodeInfoIngest + aggregations?: Record +} + +export interface NodesInfoNodeInfoAction { + destructive_requires_name: string +} + +export interface NodesInfoNodeInfoAggregation { + types: string[] +} + +export interface NodesInfoNodeInfoBootstrap { + memory_lock: string +} + +export interface NodesInfoNodeInfoClient { + type: string +} + +export interface NodesInfoNodeInfoDiscover { + seed_hosts: string +} + +export interface NodesInfoNodeInfoHttp { + bound_address: string[] + max_content_length?: ByteSize + max_content_length_in_bytes: long + publish_address: string +} + +export interface NodesInfoNodeInfoIngest { + processors: NodesInfoNodeInfoIngestProcessor[] +} + +export interface NodesInfoNodeInfoIngestDownloader { + enabled: string +} + +export interface NodesInfoNodeInfoIngestInfo { + downloader: NodesInfoNodeInfoIngestDownloader +} + +export interface NodesInfoNodeInfoIngestProcessor { + type: string +} + +export interface NodesInfoNodeInfoJvmMemory { + direct_max?: ByteSize + direct_max_in_bytes: long + heap_init?: ByteSize + heap_init_in_bytes: long + heap_max?: ByteSize + heap_max_in_bytes: long + non_heap_init?: ByteSize + non_heap_init_in_bytes: long + non_heap_max?: ByteSize + non_heap_max_in_bytes: long +} + +export interface NodesInfoNodeInfoMemory { + total: string + total_in_bytes: long +} + +export interface NodesInfoNodeInfoNetwork { + primary_interface: NodesInfoNodeInfoNetworkInterface + refresh_interval: integer +} + +export interface NodesInfoNodeInfoNetworkInterface { + address: string + mac_address: string + name: Name +} + +export interface NodesInfoNodeInfoOSCPU { + cache_size: string + cache_size_in_bytes: integer + cores_per_socket: integer + mhz: integer + model: string + total_cores: integer + total_sockets: integer + vendor: string +} + +export interface NodesInfoNodeInfoPath { + logs: string + home: string + repo: string[] + data?: string[] +} + +export interface NodesInfoNodeInfoRepositories { + url: NodesInfoNodeInfoRepositoriesUrl +} + +export interface NodesInfoNodeInfoRepositoriesUrl { + allowed_urls: string +} + +export interface NodesInfoNodeInfoScript { + allowed_types: string + disable_max_compilations_rate: string +} + +export interface NodesInfoNodeInfoSearch { + remote: NodesInfoNodeInfoSearchRemote +} + +export interface NodesInfoNodeInfoSearchRemote { + connect: string +} + +export interface NodesInfoNodeInfoSettings { + cluster: NodesInfoNodeInfoSettingsCluster + node: NodesInfoNodeInfoSettingsNode + path: NodesInfoNodeInfoPath + repositories?: NodesInfoNodeInfoRepositories + discovery?: NodesInfoNodeInfoDiscover + action?: NodesInfoNodeInfoAction + client: NodesInfoNodeInfoClient + http: NodesInfoNodeInfoSettingsHttp + bootstrap?: NodesInfoNodeInfoBootstrap + transport: NodesInfoNodeInfoSettingsTransport + network?: NodesInfoNodeInfoSettingsNetwork + xpack?: NodesInfoNodeInfoXpack + script?: NodesInfoNodeInfoScript + search?: NodesInfoNodeInfoSearch + ingest?: NodesInfoNodeInfoSettingsIngest +} + +export interface NodesInfoNodeInfoSettingsCluster { + name: Name + routing?: IndicesIndexRouting + election: NodesInfoNodeInfoSettingsClusterElection + initial_master_nodes?: string +} + +export interface NodesInfoNodeInfoSettingsClusterElection { + strategy: Name +} + +export interface NodesInfoNodeInfoSettingsHttp { + type: string | NodesInfoNodeInfoSettingsHttpType + 'type.default'?: string + compression?: boolean | string + port?: integer | string +} + +export interface NodesInfoNodeInfoSettingsHttpType { + default: string +} + +export interface NodesInfoNodeInfoSettingsIngest { + attachment?: NodesInfoNodeInfoIngestInfo + append?: NodesInfoNodeInfoIngestInfo + csv?: NodesInfoNodeInfoIngestInfo + convert?: NodesInfoNodeInfoIngestInfo + date?: NodesInfoNodeInfoIngestInfo + date_index_name?: NodesInfoNodeInfoIngestInfo + dot_expander?: NodesInfoNodeInfoIngestInfo + enrich?: NodesInfoNodeInfoIngestInfo + fail?: NodesInfoNodeInfoIngestInfo + foreach?: NodesInfoNodeInfoIngestInfo + json?: NodesInfoNodeInfoIngestInfo + user_agent?: NodesInfoNodeInfoIngestInfo + kv?: NodesInfoNodeInfoIngestInfo + geoip?: NodesInfoNodeInfoIngestInfo + grok?: NodesInfoNodeInfoIngestInfo + gsub?: NodesInfoNodeInfoIngestInfo + join?: NodesInfoNodeInfoIngestInfo + lowercase?: NodesInfoNodeInfoIngestInfo + remove?: NodesInfoNodeInfoIngestInfo + rename?: NodesInfoNodeInfoIngestInfo + script?: NodesInfoNodeInfoIngestInfo + set?: NodesInfoNodeInfoIngestInfo + sort?: NodesInfoNodeInfoIngestInfo + split?: NodesInfoNodeInfoIngestInfo + trim?: NodesInfoNodeInfoIngestInfo + uppercase?: NodesInfoNodeInfoIngestInfo + urldecode?: NodesInfoNodeInfoIngestInfo + bytes?: NodesInfoNodeInfoIngestInfo + dissect?: NodesInfoNodeInfoIngestInfo + set_security_user?: NodesInfoNodeInfoIngestInfo + pipeline?: NodesInfoNodeInfoIngestInfo + drop?: NodesInfoNodeInfoIngestInfo + circle?: NodesInfoNodeInfoIngestInfo + inference?: NodesInfoNodeInfoIngestInfo +} + +export interface NodesInfoNodeInfoSettingsNetwork { + host: Host +} + +export interface NodesInfoNodeInfoSettingsNode { + name: Name + attr: Record + max_local_storage_nodes?: string +} + +export interface NodesInfoNodeInfoSettingsTransport { + type: string | NodesInfoNodeInfoSettingsTransportType + 'type.default'?: string + features?: NodesInfoNodeInfoSettingsTransportFeatures +} + +export interface NodesInfoNodeInfoSettingsTransportFeatures { + 'x-pack': string +} + +export interface NodesInfoNodeInfoSettingsTransportType { + default: string +} + +export interface NodesInfoNodeInfoTransport { + bound_address: string[] + publish_address: string + profiles: Record +} + +export interface NodesInfoNodeInfoXpack { + license?: NodesInfoNodeInfoXpackLicense + security: NodesInfoNodeInfoXpackSecurity + notification?: Record +} + +export interface NodesInfoNodeInfoXpackLicense { + self_generated: NodesInfoNodeInfoXpackLicenseType +} + +export interface NodesInfoNodeInfoXpackLicenseType { + type: string +} + +export interface NodesInfoNodeInfoXpackSecurity { + http: NodesInfoNodeInfoXpackSecuritySsl + enabled: string + transport: NodesInfoNodeInfoXpackSecuritySsl + authc?: NodesInfoNodeInfoXpackSecurityAuthc +} + +export interface NodesInfoNodeInfoXpackSecurityAuthc { + realms: NodesInfoNodeInfoXpackSecurityAuthcRealms + token: NodesInfoNodeInfoXpackSecurityAuthcToken +} + +export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { + file?: Record + native?: Record + pki?: Record +} + +export interface NodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { + enabled?: string + order: string +} + +export interface NodesInfoNodeInfoXpackSecurityAuthcToken { + enabled: string +} + +export interface NodesInfoNodeInfoXpackSecuritySsl { + ssl: Record +} + +export interface NodesInfoNodeJvmInfo { + gc_collectors: string[] + mem: NodesInfoNodeInfoJvmMemory + memory_pools: string[] + pid: integer + start_time_in_millis: long + version: VersionString + vm_name: Name + vm_vendor: string + vm_version: VersionString + bundled_jdk: boolean + using_bundled_jdk: boolean + using_compressed_ordinary_object_pointers?: boolean | string + input_arguments: string[] +} + +export interface NodesInfoNodeOperatingSystemInfo { + arch: string + available_processors: integer + allocated_processors?: integer + name: Name + pretty_name: Name + refresh_interval_in_millis: integer + version: VersionString + cpu?: NodesInfoNodeInfoOSCPU + mem?: NodesInfoNodeInfoMemory + swap?: NodesInfoNodeInfoMemory +} + +export interface NodesInfoNodeProcessInfo { + id: long + mlockall: boolean + refresh_interval_in_millis: long +} + +export interface NodesInfoNodeThreadPoolInfo { + core?: integer + keep_alive?: string + max?: integer + queue_size: integer + size?: integer + type: string +} + +export interface NodesInfoRequest extends RequestBase { + node_id?: NodeIds + metric?: Metrics + flat_settings?: boolean + master_timeout?: Time + timeout?: Time +} + +export interface NodesInfoResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export interface NodesReloadSecureSettingsNodeReloadException { + name: Name + reload_exception?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy +} + +export interface NodesReloadSecureSettingsNodeReloadExceptionCausedBy { + type: string + reason: string + caused_by?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy +} + +export interface NodesReloadSecureSettingsRequest extends RequestBase { + node_id?: NodeIds + timeout?: Time + secure_settings_password?: Password +} + +export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export interface NodesStatsRequest extends RequestBase { + node_id?: NodeIds + metric?: Metrics + index_metric?: Metrics + completion_fields?: Fields + fielddata_fields?: Fields + fields?: Fields + groups?: boolean + include_segment_file_sizes?: boolean + level?: Level + master_timeout?: Time + timeout?: Time + types?: string[] + include_unloaded_segments?: boolean +} + +export interface NodesStatsResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export interface NodesUsageNodeUsage { + rest_actions: Record + since: EpochMillis + timestamp: EpochMillis + aggregations: Record +} + +export interface NodesUsageRequest extends RequestBase { + node_id?: NodeIds + metric?: Metrics + timeout?: Time +} + +export interface NodesUsageResponse extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export interface RollupDateHistogramGrouping { + delay?: Time + field: Field + format?: string + interval?: Time + calendar_interval?: Time + fixed_interval?: Time + time_zone?: string +} + +export interface RollupFieldMetric { + field: Field + metrics: RollupMetric[] +} + +export interface RollupGroupings { + date_histogram?: RollupDateHistogramGrouping + histogram?: RollupHistogramGrouping + terms?: RollupTermsGrouping +} + +export interface RollupHistogramGrouping { + fields: Fields + interval: long +} + +export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' + +export interface RollupTermsGrouping { + fields: Fields +} + +export interface RollupDeleteJobRequest extends RequestBase { + id: Id +} + +export interface RollupDeleteJobResponse extends AcknowledgedResponseBase { + task_failures?: RollupDeleteJobTaskFailure[] +} + +export interface RollupDeleteJobTaskFailure { + task_id: TaskId + node_id: Id + status: string + reason: RollupDeleteJobTaskFailureReason +} + +export interface RollupDeleteJobTaskFailureReason { + type: string + reason: string +} + +export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' + +export interface RollupGetJobsRequest extends RequestBase { + id?: Id +} + +export interface RollupGetJobsResponse { + jobs: RollupGetJobsRollupJob[] +} + +export interface RollupGetJobsRollupJob { + config: RollupGetJobsRollupJobConfiguration + stats: RollupGetJobsRollupJobStats + status: RollupGetJobsRollupJobStatus +} + +export interface RollupGetJobsRollupJobConfiguration { + cron: string + groups: RollupGroupings + id: Id + index_pattern: string + metrics: RollupFieldMetric[] + page_size: long + rollup_index: IndexName + timeout: Time +} + +export interface RollupGetJobsRollupJobStats { + documents_processed: long + index_failures: long + index_time_in_ms: long + index_total: long + pages_processed: long + rollups_indexed: long + search_failures: long + search_time_in_ms: long + search_total: long + trigger_count: long + processing_time_in_ms: long + processing_total: long +} + +export interface RollupGetJobsRollupJobStatus { + current_position?: Record + job_state: RollupGetJobsIndexingJobState + upgraded_doc_id?: boolean +} + +export interface RollupGetRollupCapsRequest extends RequestBase { + id?: Id +} + +export interface RollupGetRollupCapsResponse extends DictionaryResponseBase { +} + +export interface RollupGetRollupCapsRollupCapabilities { + rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] +} + +export interface RollupGetRollupCapsRollupCapabilitySummary { + fields: Record> + index_pattern: string + job_id: string + rollup_index: string +} + +export interface RollupGetRollupIndexCapsIndexCapabilities { + rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[] +} + +export interface RollupGetRollupIndexCapsRequest extends RequestBase { + index: Id +} + +export interface RollupGetRollupIndexCapsResponse extends DictionaryResponseBase { +} + +export interface RollupGetRollupIndexCapsRollupJobSummary { + fields: Record + index_pattern: string + job_id: Id + rollup_index: IndexName +} + +export interface RollupGetRollupIndexCapsRollupJobSummaryField { + agg: string + time_zone?: string + calendar_interval?: Time +} + +export interface RollupPutJobRequest extends RequestBase { + id: Id + cron?: string + groups?: RollupGroupings + index_pattern?: string + metrics?: RollupFieldMetric[] + page_size?: long + rollup_index?: IndexName +} + +export interface RollupPutJobResponse extends AcknowledgedResponseBase { +} + +export interface RollupRollupRequest extends RequestBase { + index: IndexName + rollup_index: IndexName + config?: any +} + +export type RollupRollupResponse = any + +export interface RollupRollupSearchRequest extends RequestBase { + index: Indices + type?: Type + rest_total_hits_as_int?: boolean + typed_keys?: boolean + aggs?: Record + query?: QueryDslQueryContainer + size?: integer +} + +export interface RollupRollupSearchResponse { + took: long + timed_out: boolean + terminated_early?: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: Record +} + +export interface RollupStartJobRequest extends RequestBase { + id: Id +} + +export interface RollupStartJobResponse { + started: boolean +} + +export interface RollupStopJobRequest extends RequestBase { + id: Id + timeout?: Time + wait_for_completion?: boolean +} + +export interface RollupStopJobResponse { + stopped: boolean +} + +export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' + +export interface SearchableSnapshotsClearCacheRequest extends RequestBase { + index?: Indices + expand_wildcards?: ExpandWildcards + allow_no_indices?: boolean + ignore_unavailable?: boolean + pretty?: boolean + human?: boolean +} + +export type SearchableSnapshotsClearCacheResponse = any + +export interface SearchableSnapshotsMountMountedSnapshot { + snapshot: Name + indices: Indices + shards: ShardStatistics +} + +export interface SearchableSnapshotsMountRequest extends RequestBase { + repository: Name + snapshot: Name + master_timeout?: Time + wait_for_completion?: boolean + storage?: string + index: IndexName + renamed_index?: IndexName + index_settings?: Record + ignore_index_settings?: string[] +} + +export interface SearchableSnapshotsMountResponse { + snapshot: SearchableSnapshotsMountMountedSnapshot +} + +export interface SearchableSnapshotsStatsRequest extends RequestBase { + index?: Indices + level?: SearchableSnapshotsStatsLevel +} + +export interface SearchableSnapshotsStatsResponse { + stats: any + total: any +} + +export interface SecurityApplicationGlobalUserPrivileges { + manage: SecurityManageUserPrivileges +} + +export interface SecurityApplicationPrivileges { + application: string + privileges: string[] + resources: string[] +} + +export interface SecurityClusterNode { + name: Name +} + +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' + +export interface SecurityCreatedStatus { + created: boolean +} + +export interface SecurityFieldRule { + username?: Name + dn?: Names + groups?: Names + metadata?: any + realm?: SecurityRealm +} + +export interface SecurityFieldSecurity { + except?: Fields + grant: Fields +} + +export interface SecurityGlobalPrivilege { + application: SecurityApplicationGlobalUserPrivileges +} + +export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' + +export interface SecurityIndicesPrivileges { + field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] + names: Indices + privileges: SecurityIndexPrivilege[] + query?: string | string[] | QueryDslQueryContainer + allow_restricted_indices?: boolean +} + +export interface SecurityManageUserPrivileges { + applications: string[] +} + +export interface SecurityRealm { + name: Name +} + +export interface SecurityRealmInfo { + name: Name + type: string +} + +export interface SecurityRoleMapping { + enabled: boolean + metadata: Metadata + roles: string[] + rules: SecurityRoleMappingRule + role_templates?: SecurityGetRoleRoleTemplate[] +} + +export interface SecurityRoleMappingRule { + any?: SecurityRoleMappingRule[] + all?: SecurityRoleMappingRule[] + field?: SecurityFieldRule + except?: SecurityRoleMappingRule +} + +export interface SecurityUser { + email?: string + full_name?: Name + metadata: Metadata + roles: string[] + username: Username + enabled: boolean +} + +export interface SecurityAuthenticateRequest extends RequestBase { +} + +export interface SecurityAuthenticateResponse { + authentication_realm: SecurityRealmInfo + email?: string + full_name?: Name + lookup_realm: SecurityRealmInfo + metadata: Metadata + roles: string[] + username: Username + enabled: boolean + authentication_type: string + token?: SecurityAuthenticateToken +} + +export interface SecurityAuthenticateToken { + name: Name + type?: string +} + +export interface SecurityChangePasswordRequest extends RequestBase { + username?: Username + refresh?: Refresh + password?: Password +} + +export interface SecurityChangePasswordResponse { +} + +export interface SecurityClearApiKeyCacheRequest extends RequestBase { + ids: Ids +} + +export interface SecurityClearApiKeyCacheResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedPrivilegesRequest extends RequestBase { + application: Name +} + +export interface SecurityClearCachedPrivilegesResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedRealmsRequest extends RequestBase { + realms: Names + usernames?: string[] +} + +export interface SecurityClearCachedRealmsResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedRolesRequest extends RequestBase { + name: Names +} + +export interface SecurityClearCachedRolesResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedServiceTokensRequest extends RequestBase { + namespace: Namespace + service: Service + name: Names +} + +export interface SecurityClearCachedServiceTokensResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityCreateApiKeyIndexPrivileges { + names: Indices + privileges: SecurityIndexPrivilege[] +} + +export interface SecurityCreateApiKeyRequest extends RequestBase { + refresh?: Refresh + expiration?: Time + name?: Name + role_descriptors?: Record + metadata?: Metadata +} + +export interface SecurityCreateApiKeyResponse { + api_key: string + expiration?: long + id: Id + name: Name +} + +export interface SecurityCreateApiKeyRoleDescriptor { + cluster: string[] + index: SecurityCreateApiKeyIndexPrivileges[] + applications?: SecurityApplicationPrivileges[] +} + +export interface SecurityCreateServiceTokenRequest extends RequestBase { + namespace: Namespace + service: Service + name: Name +} + +export interface SecurityCreateServiceTokenResponse { + created: boolean + token: SecurityCreateServiceTokenToken +} + +export interface SecurityCreateServiceTokenToken { + name: Name + value: string +} + +export interface SecurityDeletePrivilegesFoundStatus { + found: boolean +} + +export interface SecurityDeletePrivilegesRequest extends RequestBase { + application: Name + name: Name + refresh?: Refresh +} + +export interface SecurityDeletePrivilegesResponse extends DictionaryResponseBase> { +} + +export interface SecurityDeleteRoleRequest extends RequestBase { + name: Name + refresh?: Refresh +} + +export interface SecurityDeleteRoleResponse { + found: boolean +} + +export interface SecurityDeleteRoleMappingRequest extends RequestBase { + name: Name + refresh?: Refresh +} + +export interface SecurityDeleteRoleMappingResponse { + found: boolean +} + +export interface SecurityDeleteServiceTokenRequest extends RequestBase { + namespace: Namespace + service: Service + name: Name + refresh?: Refresh +} + +export interface SecurityDeleteServiceTokenResponse { + found: boolean +} + +export interface SecurityDeleteUserRequest extends RequestBase { + username: Username + refresh?: Refresh +} + +export interface SecurityDeleteUserResponse { + found: boolean +} + +export interface SecurityDisableUserRequest extends RequestBase { + username: Username + refresh?: Refresh +} + +export interface SecurityDisableUserResponse { +} + +export interface SecurityEnableUserRequest extends RequestBase { + username: Username + refresh?: Refresh +} + +export interface SecurityEnableUserResponse { +} + +export interface SecurityGetApiKeyApiKey { + creation: long + expiration?: long + id: Id + invalidated: boolean + name: Name + realm: string + username: Username + metadata?: Metadata +} + +export interface SecurityGetApiKeyRequest extends RequestBase { + id?: Id + name?: Name + owner?: boolean + realm_name?: Name + username?: Username +} + +export interface SecurityGetApiKeyResponse { + api_keys: SecurityGetApiKeyApiKey[] +} + +export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { +} + +export interface SecurityGetBuiltinPrivilegesResponse { + cluster: string[] + index: Indices +} + +export interface SecurityGetPrivilegesRequest extends RequestBase { + application?: Name + name?: Name +} + +export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { +} + +export interface SecurityGetRoleInlineRoleTemplate { + template: SecurityGetRoleInlineRoleTemplateSource + format?: SecurityGetRoleTemplateFormat +} + +export interface SecurityGetRoleInlineRoleTemplateSource { + source: string +} + +export interface SecurityGetRoleInvalidRoleTemplate { + template: string + format?: SecurityGetRoleTemplateFormat +} + +export interface SecurityGetRoleRequest extends RequestBase { + name?: Name +} + +export interface SecurityGetRoleResponse extends DictionaryResponseBase { +} + +export interface SecurityGetRoleRole { + cluster: string[] + indices: SecurityIndicesPrivileges[] + metadata: Metadata + run_as: string[] + transient_metadata: SecurityGetRoleTransientMetadata + applications: SecurityApplicationPrivileges[] + role_templates?: SecurityGetRoleRoleTemplate[] +} + +export type SecurityGetRoleRoleTemplate = SecurityGetRoleInlineRoleTemplate | SecurityGetRoleStoredRoleTemplate | SecurityGetRoleInvalidRoleTemplate + +export interface SecurityGetRoleStoredRoleTemplate { + template: SecurityGetRoleStoredRoleTemplateId + format?: SecurityGetRoleTemplateFormat +} + +export interface SecurityGetRoleStoredRoleTemplateId { + id: string +} + +export type SecurityGetRoleTemplateFormat = 'string' | 'json' + +export interface SecurityGetRoleTransientMetadata { + enabled: boolean +} + +export interface SecurityGetRoleMappingRequest extends RequestBase { + name?: Name +} + +export interface SecurityGetRoleMappingResponse extends DictionaryResponseBase { +} + +export interface SecurityGetServiceAccountsRequest extends RequestBase { + namespace?: Namespace + service?: Service +} + +export interface SecurityGetServiceAccountsResponse extends DictionaryResponseBase { +} + +export interface SecurityGetServiceAccountsRoleDescriptor { + cluster: string[] + indices: SecurityIndicesPrivileges[] + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + applications?: SecurityApplicationPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: Record +} + +export interface SecurityGetServiceAccountsRoleDescriptorWrapper { + role_descriptor: SecurityGetServiceAccountsRoleDescriptor +} + +export interface SecurityGetServiceCredentialsNodesCredentials { + _nodes: NodeStatistics + file_tokens: Record +} + +export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { + nodes: string[] +} + +export interface SecurityGetServiceCredentialsRequest extends RequestBase { + namespace: Namespace + service: Name +} + +export interface SecurityGetServiceCredentialsResponse { + service_account: string + count: integer + tokens: Record + nodes_credentials: SecurityGetServiceCredentialsNodesCredentials +} + +export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' + +export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { + authentication_realm: SecurityGetTokenUserRealm + lookup_realm: SecurityGetTokenUserRealm + authentication_provider?: SecurityGetTokenAuthenticationProvider + authentication_type: string +} + +export interface SecurityGetTokenAuthenticationProvider { + type: string + name: Name +} + +export interface SecurityGetTokenRequest extends RequestBase { + grant_type?: SecurityGetTokenAccessTokenGrantType + scope?: string + password?: Password + kerberos_ticket?: string + refresh_token?: string + username?: Username +} + +export interface SecurityGetTokenResponse { + access_token: string + expires_in: long + scope?: string + type: string + refresh_token: string + kerberos_authentication_response_token?: string + authentication: SecurityGetTokenAuthenticatedUser +} + +export interface SecurityGetTokenUserRealm { + name: Name + type: string +} + +export interface SecurityGetUserRequest extends RequestBase { + username?: Username | Username[] +} + +export interface SecurityGetUserResponse extends DictionaryResponseBase { +} + +export interface SecurityGetUserPrivilegesRequest extends RequestBase { + application?: Name + priviledge?: Name +} + +export interface SecurityGetUserPrivilegesResponse { + applications: SecurityApplicationPrivileges[] + cluster: string[] + global: SecurityGlobalPrivilege[] + indices: SecurityIndicesPrivileges[] + run_as: string[] +} + +export interface SecurityGrantApiKeyApiKey { + name: Name + expiration?: Time + role_descriptors?: Record[] +} + +export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' + +export interface SecurityGrantApiKeyRequest extends RequestBase { + api_key: SecurityGrantApiKeyApiKey + grant_type: SecurityGrantApiKeyApiKeyGrantType + access_token?: string + username?: Username + password?: Password +} + +export interface SecurityGrantApiKeyResponse { + api_key: string + id: Id + name: Name + expiration?: EpochMillis +} + +export interface SecurityHasPrivilegesApplicationPrivilegesCheck { + application: string + privileges: string[] + resources: string[] +} + +export type SecurityHasPrivilegesApplicationsPrivileges = Record + +export interface SecurityHasPrivilegesIndexPrivilegesCheck { + names: Indices + privileges: SecurityIndexPrivilege[] +} + +export type SecurityHasPrivilegesPrivileges = Record + +export interface SecurityHasPrivilegesRequest extends RequestBase { + user?: Name + application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + cluster?: SecurityClusterPrivilege[] + index?: SecurityHasPrivilegesIndexPrivilegesCheck[] +} + +export type SecurityHasPrivilegesResourcePrivileges = Record + +export interface SecurityHasPrivilegesResponse { + application: SecurityHasPrivilegesApplicationsPrivileges + cluster: Record + has_all_requested: boolean + index: Record + username: Username +} + +export interface SecurityInvalidateApiKeyRequest extends RequestBase { + id?: Id + ids?: Id[] + name?: Name + owner?: boolean + realm_name?: string + username?: Username +} + +export interface SecurityInvalidateApiKeyResponse { + error_count: integer + error_details?: ErrorCause[] + invalidated_api_keys: string[] + previously_invalidated_api_keys: string[] +} + +export interface SecurityInvalidateTokenRequest extends RequestBase { + token?: string + refresh_token?: string + realm_name?: Name + username?: Username +} + +export interface SecurityInvalidateTokenResponse { + error_count: long + error_details?: ErrorCause[] + invalidated_tokens: long + previously_invalidated_tokens: long +} + +export interface SecurityPutPrivilegesActions { + actions: string[] + application?: string + name?: Name + metadata?: Metadata +} + +export interface SecurityPutPrivilegesRequest extends RequestBase { + refresh?: Refresh + privileges?: Record> +} + +export interface SecurityPutPrivilegesResponse extends DictionaryResponseBase> { +} + +export interface SecurityPutRoleRequest extends RequestBase { + name: Name + refresh?: Refresh + applications?: SecurityApplicationPrivileges[] + cluster?: SecurityClusterPrivilege[] + global?: Record + indices?: SecurityIndicesPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: SecurityGetRoleTransientMetadata +} + +export interface SecurityPutRoleResponse { + role: SecurityCreatedStatus +} + +export interface SecurityPutRoleMappingRequest extends RequestBase { + name: Name + refresh?: Refresh + enabled?: boolean + metadata?: Metadata + roles?: string[] + rules?: SecurityRoleMappingRule + run_as?: string[] +} + +export interface SecurityPutRoleMappingResponse { + created?: boolean + role_mapping: SecurityCreatedStatus +} + +export interface SecurityPutUserRequest extends RequestBase { + username: Username + refresh?: Refresh + email?: string | null + full_name?: string | null + metadata?: Metadata + password?: Password + password_hash?: string + roles?: string[] + enabled?: boolean +} + +export interface SecurityPutUserResponse { + created: boolean +} + +export interface ShutdownDeleteNodeRequest extends RequestBase { + node_id: NodeId +} + +export interface ShutdownDeleteNodeResponse extends AcknowledgedResponseBase { +} + +export interface ShutdownGetNodeNodeShutdownStatus { + node_id: NodeId + type: ShutdownGetNodeShutdownType + reason: string + shutdown_startedmillis: EpochMillis + status: ShutdownGetNodeShutdownStatus + shard_migration: ShutdownGetNodeShardMigrationStatus + persistent_tasks: ShutdownGetNodePersistentTaskStatus + plugins: ShutdownGetNodePluginsStatus +} + +export interface ShutdownGetNodePersistentTaskStatus { + status: ShutdownGetNodeShutdownStatus +} + +export interface ShutdownGetNodePluginsStatus { + status: ShutdownGetNodeShutdownStatus +} + +export interface ShutdownGetNodeRequest extends RequestBase { + node_id?: NodeIds +} + +export interface ShutdownGetNodeResponse { + nodes: ShutdownGetNodeNodeShutdownStatus[] +} + +export interface ShutdownGetNodeShardMigrationStatus { + status: ShutdownGetNodeShutdownStatus +} + +export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'stalled' | 'complete' + +export type ShutdownGetNodeShutdownType = 'remove' | 'restart' + +export interface ShutdownPutNodeRequest extends RequestBase { + node_id: NodeId +} + +export interface ShutdownPutNodeResponse extends AcknowledgedResponseBase { +} + +export interface SlmConfiguration { + ignore_unavailable?: boolean + indices: Indices + include_global_state?: boolean + feature_states?: string[] + metadata?: Metadata + partial?: boolean +} + +export interface SlmInProgress { + name: Name + start_time_millis: DateString + state: string + uuid: Uuid +} + +export interface SlmInvocation { + snapshot_name: Name + time: DateString +} + +export interface SlmPolicy { + config: SlmConfiguration + name: Name + repository: string + retention: SlmRetention + schedule: WatcherCronExpression +} + +export interface SlmRetention { + expire_after: Time + max_count: integer + min_count: integer +} + +export interface SlmSnapshotLifecycle { + in_progress?: SlmInProgress + last_failure?: SlmInvocation + last_success?: SlmInvocation + modified_date?: DateString + modified_date_millis: EpochMillis + next_execution?: DateString + next_execution_millis: EpochMillis + policy: SlmPolicy + version: VersionNumber + stats: SlmStatistics +} + +export interface SlmStatistics { + retention_deletion_time?: DateString + retention_deletion_time_millis?: EpochMillis + retention_failed?: long + retention_runs?: long + retention_timed_out?: long + policy?: Id + total_snapshots_deleted?: long + snapshots_deleted?: long + total_snapshot_deletion_failures?: long + snapshot_deletion_failures?: long + total_snapshots_failed?: long + snapshots_failed?: long + total_snapshots_taken?: long + snapshots_taken?: long +} + +export interface SlmDeleteLifecycleRequest extends RequestBase { + policy_id: Name +} + +export interface SlmDeleteLifecycleResponse extends AcknowledgedResponseBase { +} + +export interface SlmExecuteLifecycleRequest extends RequestBase { + policy_id: Name +} + +export interface SlmExecuteLifecycleResponse { + snapshot_name: Name +} + +export interface SlmExecuteRetentionRequest extends RequestBase { +} + +export interface SlmExecuteRetentionResponse extends AcknowledgedResponseBase { +} + +export interface SlmGetLifecycleRequest extends RequestBase { + policy_id?: Names +} + +export interface SlmGetLifecycleResponse extends DictionaryResponseBase { +} + +export interface SlmGetStatsRequest extends RequestBase { +} + +export interface SlmGetStatsResponse { + retention_deletion_time: string + retention_deletion_time_millis: EpochMillis + retention_failed: long + retention_runs: long + retention_timed_out: long + total_snapshots_deleted: long + total_snapshot_deletion_failures: long + total_snapshots_failed: long + total_snapshots_taken: long + policy_stats: string[] +} + +export interface SlmGetStatusRequest extends RequestBase { +} + +export interface SlmGetStatusResponse { + operation_mode: LifecycleOperationMode +} + +export interface SlmPutLifecycleRequest extends RequestBase { + policy_id: Name + master_timeout?: Time + timeout?: Time + config?: SlmConfiguration + name?: Name + repository?: string + retention?: SlmRetention + schedule?: WatcherCronExpression +} + +export interface SlmPutLifecycleResponse extends AcknowledgedResponseBase { +} + +export interface SlmStartRequest extends RequestBase { +} + +export interface SlmStartResponse extends AcknowledgedResponseBase { +} + +export interface SlmStopRequest extends RequestBase { +} + +export interface SlmStopResponse extends AcknowledgedResponseBase { +} + +export interface SnapshotFileCountSnapshotStats { + file_count: integer + size_in_bytes: long +} + +export interface SnapshotIndexDetails { + shard_count: integer + size?: ByteSize + size_in_bytes: long + max_segments_per_shard: long +} + +export interface SnapshotInfoFeatureState { + feature_name: string + indices: Indices +} + +export interface SnapshotRepository { + type: string + uuid?: Uuid + settings: SnapshotRepositorySettings +} + +export interface SnapshotRepositorySettings { + chunk_size?: string + compress?: string | boolean + concurrent_streams?: string | integer + location: string + read_only?: string | boolean + readonly?: string | boolean +} + +export interface SnapshotShardsStats { + done: long + failed: long + finalizing: long + initializing: long + started: long + total: long +} + +export type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' | 'STARTED' + +export interface SnapshotShardsStatsSummary { + incremental: SnapshotShardsStatsSummaryItem + total: SnapshotShardsStatsSummaryItem + start_time_in_millis: long + time_in_millis: long +} + +export interface SnapshotShardsStatsSummaryItem { + file_count: long + size_in_bytes: long +} + +export interface SnapshotSnapshotIndexStats { + shards: Record + shards_stats: SnapshotShardsStats + stats: SnapshotSnapshotStats +} + +export interface SnapshotSnapshotInfo { + data_streams: string[] + duration?: Time + duration_in_millis?: EpochMillis + end_time?: Time + end_time_in_millis?: EpochMillis + failures?: SnapshotSnapshotShardFailure[] + include_global_state?: boolean + indices: IndexName[] + index_details?: Record + metadata?: Metadata + reason?: string + repository?: Name + snapshot: Name + shards?: ShardStatistics + start_time?: Time + start_time_in_millis?: EpochMillis + state?: string + uuid: Uuid + version?: VersionString + version_id?: VersionNumber + feature_states?: SnapshotInfoFeatureState[] +} + +export interface SnapshotSnapshotShardFailure { + index: IndexName + node_id: Id + reason: string + shard_id: Id + status: string +} + +export interface SnapshotSnapshotShardsStatus { + stage: SnapshotShardsStatsStage + stats: SnapshotShardsStatsSummary +} + +export interface SnapshotSnapshotStats { + incremental: SnapshotFileCountSnapshotStats + start_time_in_millis: long + time_in_millis: long + total: SnapshotFileCountSnapshotStats +} + +export interface SnapshotStatus { + include_global_state: boolean + indices: Record + repository: string + shards_stats: SnapshotShardsStats + snapshot: string + state: string + stats: SnapshotSnapshotStats + uuid: Uuid +} + +export interface SnapshotCleanupRepositoryCleanupRepositoryResults { + deleted_blobs: long + deleted_bytes: long +} + +export interface SnapshotCleanupRepositoryRequest extends RequestBase { + repository: Name + master_timeout?: Time + timeout?: Time +} + +export interface SnapshotCleanupRepositoryResponse { + results: SnapshotCleanupRepositoryCleanupRepositoryResults +} + +export interface SnapshotCloneRequest extends RequestBase { + repository: Name + snapshot: Name + target_snapshot: Name + master_timeout?: Time + timeout?: Time + indices: string +} + +export interface SnapshotCloneResponse extends AcknowledgedResponseBase { +} + +export interface SnapshotCreateRequest extends RequestBase { + repository: Name + snapshot: Name + master_timeout?: Time + wait_for_completion?: boolean + ignore_unavailable?: boolean + include_global_state?: boolean + indices?: Indices + feature_states?: string[] + metadata?: Metadata + partial?: boolean +} + +export interface SnapshotCreateResponse { + accepted?: boolean + snapshot: SnapshotSnapshotInfo +} + +export interface SnapshotCreateRepositoryRequest extends RequestBase { + repository: Name + master_timeout?: Time + timeout?: Time + verify?: boolean + type: string + settings: SnapshotRepositorySettings +} + +export interface SnapshotCreateRepositoryResponse extends AcknowledgedResponseBase { +} + +export interface SnapshotDeleteRequest extends RequestBase { + repository: Name + snapshot: Name + master_timeout?: Time +} + +export interface SnapshotDeleteResponse extends AcknowledgedResponseBase { +} + +export interface SnapshotDeleteRepositoryRequest extends RequestBase { + repository: Names + master_timeout?: Time + timeout?: Time +} + +export interface SnapshotDeleteRepositoryResponse extends AcknowledgedResponseBase { +} + +export interface SnapshotGetRequest extends RequestBase { + repository: Name + snapshot: Names + ignore_unavailable?: boolean + master_timeout?: Time + verbose?: boolean + index_details?: boolean + human?: boolean + include_repository?: boolean +} + +export interface SnapshotGetResponse { + responses?: SnapshotGetSnapshotResponseItem[] + snapshots?: SnapshotSnapshotInfo[] + total: integer + remaining: integer +} + +export interface SnapshotGetSnapshotResponseItem { + repository: Name + snapshots?: SnapshotSnapshotInfo[] + error?: ErrorCause +} + +export interface SnapshotGetRepositoryRequest extends RequestBase { + repository?: Names + local?: boolean + master_timeout?: Time +} + +export interface SnapshotGetRepositoryResponse extends DictionaryResponseBase { +} + +export interface SnapshotRestoreRequest extends RequestBase { + repository: Name + snapshot: Name + master_timeout?: Time + wait_for_completion?: boolean + ignore_index_settings?: string[] + ignore_unavailable?: boolean + include_aliases?: boolean + include_global_state?: boolean + index_settings?: IndicesPutSettingsRequest + indices?: Indices + partial?: boolean + rename_pattern?: string + rename_replacement?: string +} + +export interface SnapshotRestoreResponse { + snapshot: SnapshotRestoreSnapshotRestore +} + +export interface SnapshotRestoreSnapshotRestore { + indices: IndexName[] + snapshot: string + shards: ShardStatistics +} + +export interface SnapshotStatusRequest extends RequestBase { + repository?: Name + snapshot?: Names + ignore_unavailable?: boolean + master_timeout?: Time +} + +export interface SnapshotStatusResponse { + snapshots: SnapshotStatus[] +} + +export interface SnapshotVerifyRepositoryCompactNodeInfo { + name: Name +} + +export interface SnapshotVerifyRepositoryRequest extends RequestBase { + repository: Name + master_timeout?: Time + timeout?: Time +} + +export interface SnapshotVerifyRepositoryResponse { + nodes: Record +} + +export interface SqlClearCursorRequest extends RequestBase { + cursor: string +} + +export interface SqlClearCursorResponse { + succeeded: boolean +} + +export interface SqlQueryColumn { + name: Name + type: string +} + +export interface SqlQueryRequest extends RequestBase { + format?: string + columnar?: boolean + cursor?: string + fetch_size?: integer + filter?: QueryDslQueryContainer + query?: string + request_timeout?: Time + page_timeout?: Time + time_zone?: string + field_multi_value_leniency?: boolean +} + +export interface SqlQueryResponse { + columns?: SqlQueryColumn[] + cursor?: string + rows: SqlQueryRow[] +} + +export type SqlQueryRow = any[] + +export interface SqlTranslateRequest extends RequestBase { + fetch_size?: integer + filter?: QueryDslQueryContainer + query: string + time_zone?: string +} + +export interface SqlTranslateResponse { + size: long + _source: boolean | Fields | SearchSourceFilter + fields: Record[] + sort: SearchSort +} + +export interface SslCertificatesCertificateInformation { + alias?: string + expiry: DateString + format: string + has_private_key: boolean + path: string + serial_number: string + subject_dn: string +} + +export interface SslCertificatesRequest extends RequestBase { +} + +export type SslCertificatesResponse = SslCertificatesCertificateInformation[] + +export interface TasksInfo { + action: string + cancellable: boolean + children?: TasksInfo[] + description?: string + headers: HttpHeaders + id: long + node: string + running_time_in_nanos: long + start_time_in_millis: long + status?: TasksStatus + type: string + parent_task_id?: Id +} + +export interface TasksState { + action: string + cancellable: boolean + description?: string + headers: HttpHeaders + id: long + node: string + parent_task_id?: TaskId + running_time_in_nanos: long + start_time_in_millis: long + status?: TasksStatus + type: string +} + +export interface TasksStatus { + batches: long + canceled?: string + created: long + deleted: long + noops: long + failures?: string[] + requests_per_second: float + retries: Retries + throttled?: Time + throttled_millis: long + throttled_until?: Time + throttled_until_millis: long + timed_out?: boolean + took?: long + total: long + updated: long + version_conflicts: long +} + +export interface TasksTaskExecutingNode extends SpecUtilsBaseNode { + tasks: Record +} + +export interface TasksCancelRequest extends RequestBase { + task_id?: TaskId + actions?: string | string[] + nodes?: string[] + parent_task_id?: string + wait_for_completion?: boolean +} + +export interface TasksCancelResponse { + node_failures?: ErrorCause[] + nodes: Record +} + +export interface TasksGetRequest extends RequestBase { + task_id: Id + timeout?: Time + wait_for_completion?: boolean +} + +export interface TasksGetResponse { + completed: boolean + task: TasksInfo + response?: TasksStatus + error?: ErrorCause +} + +export interface TasksListRequest extends RequestBase { + actions?: string | string[] + detailed?: boolean + group_by?: GroupBy + nodes?: string[] + parent_task_id?: Id + timeout?: Time + wait_for_completion?: boolean +} + +export interface TasksListResponse { + node_failures?: ErrorCause[] + nodes?: Record + tasks?: Record | TasksInfo[] +} + +export interface TextStructureFindStructureFieldStat { + count: integer + cardinality: integer + top_hits: TextStructureFindStructureTopHit[] + mean_value?: integer + median_value?: integer + max_value?: integer + min_value?: integer + earliest?: string + latest?: string +} + +export interface TextStructureFindStructureRequest { + charset?: string + column_names?: string + delimiter?: string + explain?: boolean + format?: string + grok_pattern?: string + has_header_row?: boolean + line_merge_size_limit?: uint + lines_to_sample?: uint + quote?: string + should_trim_fields?: boolean + timeout?: Time + timestamp_field?: Field + timestamp_format?: string + text_files?: TJsonDocument[] +} + +export interface TextStructureFindStructureResponse { + charset: string + has_header_row?: boolean + has_byte_order_marker: boolean + format: string + field_stats: Record + sample_start: string + num_messages_analyzed: integer + mappings: MappingTypeMapping + quote?: string + delimiter?: string + need_client_timezone: boolean + num_lines_analyzed: integer + column_names?: string[] + explanation?: string[] + grok_pattern?: string + multiline_start_pattern?: string + exclude_lines_pattern?: string + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + timestamp_field?: Field + should_trim_fields?: boolean + ingest_pipeline: IngestPipelineConfig +} + +export interface TextStructureFindStructureTopHit { + count: long + value: any +} + +export interface TransformLatest { + sort: Field + unique_key: Field[] +} + +export interface TransformPivot { + aggregations?: Record + aggs?: Record + group_by?: Record + max_page_search_size?: integer +} + +export interface TransformPivotGroupByContainer { + date_histogram?: AggregationsDateHistogramAggregation + geotile_grid?: AggregationsGeoTileGridAggregation + histogram?: AggregationsHistogramAggregation + terms?: AggregationsTermsAggregation +} + +export interface TransformRetentionPolicy { + field: Field + max_age: Time +} + +export interface TransformRetentionPolicyContainer { + time: TransformRetentionPolicy +} + +export interface TransformSettings { + dates_as_epoch_millis?: boolean + docs_per_second?: float + max_page_search_size?: integer +} + +export interface TransformSyncContainer { + time: TransformTimeSync +} + +export interface TransformTimeSync { + delay?: Time + field: Field +} + +export interface TransformDeleteTransformRequest extends RequestBase { + transform_id: Name + force?: boolean +} + +export interface TransformDeleteTransformResponse extends AcknowledgedResponseBase { +} + +export interface TransformGetTransformRequest extends RequestBase { + transform_id?: Name + allow_no_match?: boolean + from?: integer + size?: integer + exclude_generated?: boolean +} + +export interface TransformGetTransformResponse { + count: long + transforms: Transform[] +} + +export interface TransformGetTransformStatsCheckpointStats { + checkpoint: long + checkpoint_progress?: TransformGetTransformStatsTransformProgress + timestamp?: DateString + timestamp_millis: EpochMillis + time_upper_bound?: DateString + time_upper_bound_millis?: EpochMillis +} + +export interface TransformGetTransformStatsCheckpointing { + changes_last_detected_at: long + changes_last_detected_at_date_time?: DateString + last: TransformGetTransformStatsCheckpointStats + next?: TransformGetTransformStatsCheckpointStats + operations_behind?: long +} + +export interface TransformGetTransformStatsRequest extends RequestBase { + transform_id: Name + allow_no_match?: boolean + from?: long + size?: long +} + +export interface TransformGetTransformStatsResponse { + count: long + transforms: TransformGetTransformStatsTransformStats[] +} + +export interface TransformGetTransformStatsTransformIndexerStats { + documents_indexed: long + documents_processed: long + exponential_avg_checkpoint_duration_ms: double + exponential_avg_documents_indexed: double + exponential_avg_documents_processed: double + index_failures: long + index_time_in_ms: long + index_total: long + pages_processed: long + processing_time_in_ms: long + processing_total: long + search_failures: long + search_time_in_ms: long + search_total: long + trigger_count: long +} + +export interface TransformGetTransformStatsTransformProgress { + docs_indexed: long + docs_processed: long + docs_remaining: long + percent_complete: double + total_docs: long +} + +export interface TransformGetTransformStatsTransformStats { + checkpointing: TransformGetTransformStatsCheckpointing + id: Id + node?: NodeAttributes + reason?: string + state: string + stats: TransformGetTransformStatsTransformIndexerStats +} + +export interface TransformPreviewTransformRequest extends RequestBase { + transform_id?: Id + dest?: ReindexDestination + description?: string + frequency?: Time + pivot?: TransformPivot + source?: ReindexSource + settings?: TransformSettings + sync?: TransformSyncContainer + retention_policy?: TransformRetentionPolicyContainer + latest?: TransformLatest +} + +export interface TransformPreviewTransformResponse { + generated_dest_index: IndicesIndexState + preview: TTransform[] +} + +export interface TransformPutTransformRequest extends TransformPreviewTransformRequest { + transform_id: Id + defer_validation?: boolean +} + +export interface TransformPutTransformResponse extends AcknowledgedResponseBase { +} + +export interface TransformStartTransformRequest extends RequestBase { + transform_id: Name + timeout?: Time +} + +export interface TransformStartTransformResponse extends AcknowledgedResponseBase { +} + +export interface TransformStopTransformRequest extends RequestBase { + transform_id: Name + allow_no_match?: boolean + force?: boolean + timeout?: Time + wait_for_checkpoint?: boolean + wait_for_completion?: boolean +} + +export interface TransformStopTransformResponse extends AcknowledgedResponseBase { +} + +export interface TransformUpdateTransformRequest extends TransformPutTransformRequest { +} + +export interface TransformUpdateTransformResponse { + create_time: long + description: string + dest: ReindexDestination + frequency: Time + id: Id + pivot: TransformPivot + settings: TransformSettings + source: ReindexSource + sync?: TransformSyncContainer + version: VersionString +} + +export interface WatcherAcknowledgeState { + state: WatcherAcknowledgementOptions + timestamp: DateString +} + +export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' + +export interface WatcherAction { + action_type?: WatcherActionType + condition?: WatcherConditionContainer + foreach?: string + max_iterations?: integer + name?: Name + throttle_period?: Time + throttle_period_in_millis?: EpochMillis + transform?: TransformContainer + index?: WatcherIndex + logging?: WatcherLogging + webhook?: WatcherActionWebhook +} + +export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' + +export interface WatcherActionStatus { + ack: WatcherAcknowledgeState + last_execution?: WatcherExecutionState + last_successful_execution?: WatcherExecutionState + last_throttle?: WatcherThrottleState +} + +export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | 'throttled' + +export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' + +export interface WatcherActionWebhook { + host: Host + port: integer +} + +export type WatcherActions = Record + +export interface WatcherActivationState { + active: boolean + timestamp: Timestamp +} + +export interface WatcherActivationStatus { + actions: WatcherActions + state: WatcherActivationState + version: VersionNumber +} + +export interface WatcherAlwaysCondition { + [key: string]: never +} + +export interface WatcherArrayCompareCondition { + array_path: string + comparison: string + path: string + quantifier: WatcherQuantifier + value: any +} + +export interface WatcherChainInput { + inputs: WatcherInputContainer[] +} + +export interface WatcherCompareCondition { + comparison?: string + path?: string + value?: any + 'ctx.payload.match'?: WatcherCompareContextPayloadCondition + 'ctx.payload.value'?: WatcherCompareContextPayloadCondition +} + +export interface WatcherCompareContextPayloadCondition { + eq?: any + lt?: any + gt?: any + lte?: any + gte?: any +} + +export interface WatcherConditionContainer { + always?: WatcherAlwaysCondition + array_compare?: WatcherArrayCompareCondition + compare?: WatcherCompareCondition + never?: WatcherNeverCondition + script?: WatcherScriptCondition +} + +export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' + +export type WatcherConnectionScheme = 'http' | 'https' + +export type WatcherCronExpression = string + +export interface WatcherDailySchedule { + at: string[] | WatcherTimeOfDay +} + +export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' + +export interface WatcherEmailResult { + account?: string + message: WatcherEmailResult + reason?: string +} + +export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished' + +export interface WatcherExecutionResult { + actions: WatcherExecutionResultAction[] + condition: WatcherExecutionResultCondition + execution_duration: integer + execution_time: DateString + input: WatcherExecutionResultInput +} + +export interface WatcherExecutionResultAction { + email?: WatcherEmailResult + id: Id + index?: WatcherIndexResult + logging?: WatcherLoggingResult + pagerduty?: WatcherPagerDutyResult + reason?: string + slack?: WatcherSlackResult + status: WatcherActionStatusOptions + type: WatcherActionType + webhook?: WatcherWebhookResult +} + +export interface WatcherExecutionResultCondition { + met: boolean + status: WatcherActionStatusOptions + type: WatcherConditionType +} + +export interface WatcherExecutionResultInput { + payload: Record + status: WatcherActionStatusOptions + type: WatcherInputType +} + +export interface WatcherExecutionState { + successful: boolean + timestamp: DateString +} + +export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' + +export interface WatcherExecutionThreadPool { + max_size: long + queue_size: long +} + +export interface WatcherHourlySchedule { + minute: integer[] +} + +export interface WatcherHttpInput { + http?: WatcherHttpInput + extract?: string[] + request?: WatcherHttpInputRequestDefinition + response_content_type?: WatcherResponseContentType +} + +export interface WatcherHttpInputAuthentication { + basic: WatcherHttpInputBasicAuthentication +} + +export interface WatcherHttpInputBasicAuthentication { + password: Password + username: Username +} + +export type WatcherHttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete' + +export interface WatcherHttpInputProxy { + host: Host + port: uint +} + +export interface WatcherHttpInputRequestDefinition { + auth?: WatcherHttpInputAuthentication + body?: string + connection_timeout?: Time + headers?: Record + host?: Host + method?: WatcherHttpInputMethod + params?: Record + path?: string + port?: uint + proxy?: WatcherHttpInputProxy + read_timeout?: Time + scheme?: WatcherConnectionScheme + url?: string +} + +export interface WatcherHttpInputRequestResult extends WatcherHttpInputRequestDefinition { +} + +export interface WatcherHttpInputResponseResult { + body: string + headers: HttpHeaders + status: integer +} + +export interface WatcherIndex { + index: IndexName + doc_id?: Id + refresh?: Refresh +} + +export interface WatcherIndexResult { + response: WatcherIndexResultSummary +} + +export interface WatcherIndexResultSummary { + created: boolean + id: Id + index: IndexName + result: Result + version: VersionNumber + type?: Type +} + +export interface WatcherIndicesOptions { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + ignore_throttled?: boolean +} + +export interface WatcherInputContainer { + chain?: WatcherChainInput + http?: WatcherHttpInput + search?: WatcherSearchInput + simple?: Record +} + +export type WatcherInputType = 'http' | 'search' | 'simple' + +export interface WatcherLogging { + level?: string + text: string + category?: string +} + +export interface WatcherLoggingResult { + logged_text: string +} + +export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' + +export interface WatcherNeverCondition { + [key: string]: never +} + +export interface WatcherPagerDutyActionEventResult { + event: WatcherPagerDutyEvent + reason: string + request: WatcherHttpInputRequestResult + response: WatcherHttpInputResponseResult +} + +export interface WatcherPagerDutyContext { + href: string + src: string + type: WatcherPagerDutyContextType +} + +export type WatcherPagerDutyContextType = 'link' | 'image' + +export interface WatcherPagerDutyEvent { + account: string + attach_payload: boolean + client: string + client_url: string + context: WatcherPagerDutyContext[] + description: string + event_type: WatcherPagerDutyEventType + incident_key: string +} + +export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' + +export interface WatcherPagerDutyResult { + sent_event: WatcherPagerDutyActionEventResult +} + +export type WatcherQuantifier = 'some' | 'all' + +export interface WatcherQueryWatch { + _id: Id + status?: WatcherWatchStatus + watch?: WatcherWatch + _primary_term?: integer + _seq_no?: SequenceNumber +} + +export type WatcherResponseContentType = 'json' | 'yaml' | 'text' + +export interface WatcherScheduleContainer { + cron?: WatcherCronExpression + daily?: WatcherDailySchedule + hourly?: WatcherHourlySchedule + interval?: Time + monthly?: WatcherTimeOfMonth[] + weekly?: WatcherTimeOfWeek[] + yearly?: WatcherTimeOfYear[] +} + +export interface WatcherScheduleTriggerEvent { + scheduled_time: DateString | string + triggered_time?: DateString | string +} + +export interface WatcherScriptCondition { + lang: string + params?: Record + source: string +} + +export interface WatcherSearchInput { + extract?: string[] + request: WatcherSearchInputRequestDefinition + timeout?: Time +} + +export interface WatcherSearchInputRequestBody { + query: QueryDslQueryContainer +} + +export interface WatcherSearchInputRequestDefinition { + body?: WatcherSearchInputRequestBody + indices?: IndexName[] + indices_options?: WatcherIndicesOptions + search_type?: SearchType + template?: SearchTemplateRequest + rest_total_hits_as_int?: boolean +} + +export interface WatcherSimulatedActions { + actions: string[] + all: WatcherSimulatedActions + use_all: boolean +} + +export interface WatcherSlackAttachment { + author_icon?: string + author_link?: string + author_name: string + color?: string + fallback?: string + fields?: WatcherSlackAttachmentField[] + footer?: string + footer_icon?: string + image_url?: string + pretext?: string + text?: string + thumb_url?: string + title: string + title_link?: string + ts?: DateString +} + +export interface WatcherSlackAttachmentField { + short: boolean + title: string + value: string +} + +export interface WatcherSlackDynamicAttachment { + attachment_template: WatcherSlackAttachment + list_path: string +} + +export interface WatcherSlackMessage { + attachments: WatcherSlackAttachment[] + dynamic_attachments?: WatcherSlackDynamicAttachment + from: string + icon?: string + text: string + to: string[] +} + +export interface WatcherSlackResult { + account?: string + message: WatcherSlackMessage +} + +export interface WatcherThrottleState { + reason: string + timestamp: DateString +} + +export interface WatcherTimeOfDay { + hour: integer[] + minute: integer[] +} + +export interface WatcherTimeOfMonth { + at: string[] + on: integer[] +} + +export interface WatcherTimeOfWeek { + at: string[] + on: WatcherDay[] +} + +export interface WatcherTimeOfYear { + at: string[] + int: WatcherMonth[] + on: integer[] +} + +export interface WatcherTriggerContainer { + schedule: WatcherScheduleContainer +} + +export interface WatcherTriggerEventContainer { + schedule: WatcherScheduleTriggerEvent +} + +export interface WatcherTriggerEventResult { + manual: WatcherTriggerEventContainer + triggered_time: DateString + type: string +} + +export interface WatcherWatch { + actions: Record + condition: WatcherConditionContainer + input: WatcherInputContainer + metadata?: Metadata + status?: WatcherWatchStatus + throttle_period?: string + transform?: TransformContainer + trigger: WatcherTriggerContainer + throttle_period_in_millis?: long +} + +export interface WatcherWatchStatus { + actions: WatcherActions + last_checked?: DateString + last_met_condition?: DateString + state: WatcherActivationState + version: VersionNumber + execution_state?: string +} + +export interface WatcherWebhookResult { + request: WatcherHttpInputRequestResult + response?: WatcherHttpInputResponseResult +} + +export interface WatcherAckWatchRequest extends RequestBase { + watch_id: Name + action_id?: Names +} + +export interface WatcherAckWatchResponse { + status: WatcherWatchStatus +} + +export interface WatcherActivateWatchRequest extends RequestBase { + watch_id: Name +} + +export interface WatcherActivateWatchResponse { + status: WatcherActivationStatus +} + +export interface WatcherDeactivateWatchRequest extends RequestBase { + watch_id: Name +} + +export interface WatcherDeactivateWatchResponse { + status: WatcherActivationStatus +} + +export interface WatcherDeleteWatchRequest extends RequestBase { + id: Name +} + +export interface WatcherDeleteWatchResponse { + found: boolean + _id: Id + _version: VersionNumber +} + +export interface WatcherExecuteWatchRequest extends RequestBase { + id?: Id + debug?: boolean + action_modes?: Record + alternative_input?: Record + ignore_condition?: boolean + record_execution?: boolean + simulated_actions?: WatcherSimulatedActions + trigger_data?: WatcherScheduleTriggerEvent + watch?: WatcherWatch +} + +export interface WatcherExecuteWatchResponse { + _id: Id + watch_record: WatcherExecuteWatchWatchRecord +} + +export interface WatcherExecuteWatchWatchRecord { + condition: WatcherConditionContainer + input: WatcherInputContainer + messages: string[] + metadata: Metadata + node: string + result: WatcherExecutionResult + state: WatcherExecutionStatus + trigger_event: WatcherTriggerEventResult + user: Username + watch_id: Id +} + +export interface WatcherGetWatchRequest extends RequestBase { + id: Name +} + +export interface WatcherGetWatchResponse { + found: boolean + _id: Id + status?: WatcherWatchStatus + watch?: WatcherWatch + _primary_term?: integer + _seq_no?: SequenceNumber + _version?: VersionNumber +} + +export interface WatcherPutWatchRequest extends RequestBase { + id: Id + active?: boolean + if_primary_term?: long + if_sequence_number?: long + version?: VersionNumber + actions?: Record + condition?: WatcherConditionContainer + input?: WatcherInputContainer + metadata?: Metadata + throttle_period?: string + transform?: TransformContainer + trigger?: WatcherTriggerContainer +} + +export interface WatcherPutWatchResponse { + created: boolean + _id: Id + _primary_term: long + _seq_no: SequenceNumber + _version: VersionNumber +} + +export interface WatcherQueryWatchesRequest extends RequestBase { + from?: integer + size?: integer + query?: QueryDslQueryContainer + sort?: SearchSort + search_after?: SearchSortResults +} + +export interface WatcherQueryWatchesResponse { + count: integer + watches: WatcherQueryWatch[] +} + +export interface WatcherStartRequest extends RequestBase { +} + +export interface WatcherStartResponse extends AcknowledgedResponseBase { +} + +export interface WatcherStatsRequest extends RequestBase { + metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] + emit_stacktraces?: boolean +} + +export interface WatcherStatsResponse { + _nodes: NodeStatistics + cluster_name: Name + manually_stopped: boolean + stats: WatcherStatsWatcherNodeStats[] +} + +export interface WatcherStatsWatchRecordQueuedStats { + execution_time: DateString +} + +export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { + execution_phase: WatcherExecutionPhase + triggered_time: DateString + executed_actions?: string[] + watch_id: Id + watch_record_id: Id +} + +export type WatcherStatsWatcherMetric = '_all' | 'queued_watches' | 'current_watches' | 'pending_watches' + +export interface WatcherStatsWatcherNodeStats { + current_watches?: WatcherStatsWatchRecordStats[] + execution_thread_pool: WatcherExecutionThreadPool + queued_watches?: WatcherStatsWatchRecordQueuedStats[] + watch_count: long + watcher_state: WatcherStatsWatcherState + node_id: Id +} + +export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' + +export interface WatcherStopRequest extends RequestBase { +} + +export interface WatcherStopResponse extends AcknowledgedResponseBase { +} + +export interface XpackInfoBuildInformation { + date: DateString + hash: string +} + +export interface XpackInfoFeature { + available: boolean + description?: string + enabled: boolean + native_code_info?: XpackInfoNativeCodeInformation +} + +export interface XpackInfoFeatures { + aggregate_metric: XpackInfoFeature + analytics: XpackInfoFeature + ccr: XpackInfoFeature + data_frame?: XpackInfoFeature + data_science?: XpackInfoFeature + data_streams: XpackInfoFeature + data_tiers: XpackInfoFeature + enrich: XpackInfoFeature + eql: XpackInfoFeature + flattened?: XpackInfoFeature + frozen_indices: XpackInfoFeature + graph: XpackInfoFeature + ilm: XpackInfoFeature + logstash: XpackInfoFeature + ml: XpackInfoFeature + monitoring: XpackInfoFeature + rollup: XpackInfoFeature + runtime_fields?: XpackInfoFeature + searchable_snapshots: XpackInfoFeature + security: XpackInfoFeature + slm: XpackInfoFeature + spatial: XpackInfoFeature + sql: XpackInfoFeature + transform: XpackInfoFeature + vectors: XpackInfoFeature + voting_only: XpackInfoFeature + watcher: XpackInfoFeature +} + +export interface XpackInfoMinimalLicenseInformation { + expiry_date_in_millis: EpochMillis + mode: LicenseLicenseType + status: LicenseLicenseStatus + type: LicenseLicenseType + uid: string +} + +export interface XpackInfoNativeCodeInformation { + build_hash: string + version: VersionString +} + +export interface XpackInfoRequest extends RequestBase { + categories?: string[] +} + +export interface XpackInfoResponse { + build: XpackInfoBuildInformation + features: XpackInfoFeatures + license: XpackInfoMinimalLicenseInformation + tagline: string +} + +export interface XpackUsageAnalytics extends XpackUsageBase { + stats: XpackUsageAnalyticsStatistics +} + +export interface XpackUsageAnalyticsStatistics { + boxplot_usage: long + cumulative_cardinality_usage: long + string_stats_usage: long + top_metrics_usage: long + t_test_usage: long + moving_percentiles_usage: long + normalize_usage: long + rate_usage: long + multi_terms_usage?: long +} + +export interface XpackUsageAudit extends XpackUsageFeatureToggle { + outputs?: string[] +} + +export interface XpackUsageBase { + available: boolean + enabled: boolean +} + +export interface XpackUsageBaseUrlConfig { + url_name: string + url_value: string +} + +export interface XpackUsageCcr extends XpackUsageBase { + auto_follow_patterns_count: integer + follower_indices_count: integer +} + +export interface XpackUsageCounter { + active: long + total: long +} + +export interface XpackUsageDataStreams extends XpackUsageBase { + data_streams: long + indices_count: long +} + +export interface XpackUsageDataTierPhaseStatistics { + node_count: long + index_count: long + total_shard_count: long + primary_shard_count: long + doc_count: long + total_size_bytes: long + primary_size_bytes: long + primary_shard_size_avg_bytes: long + primary_shard_size_median_bytes: long + primary_shard_size_mad_bytes: long +} + +export interface XpackUsageDataTiers extends XpackUsageBase { + data_warm: XpackUsageDataTierPhaseStatistics + data_frozen?: XpackUsageDataTierPhaseStatistics + data_cold: XpackUsageDataTierPhaseStatistics + data_content: XpackUsageDataTierPhaseStatistics + data_hot: XpackUsageDataTierPhaseStatistics +} + +export interface XpackUsageDatafeed { + count: long +} + +export interface XpackUsageEql extends XpackUsageBase { + features: XpackUsageEqlFeatures + queries: Record +} + +export interface XpackUsageEqlFeatures { + join: uint + joins: XpackUsageEqlFeaturesJoin + keys: XpackUsageEqlFeaturesKeys + event: uint + pipes: XpackUsageEqlFeaturesPipes + sequence: uint + sequences: XpackUsageEqlFeaturesSequences +} + +export interface XpackUsageEqlFeaturesJoin { + join_queries_two: uint + join_queries_three: uint + join_until: uint + join_queries_five_or_more: uint + join_queries_four: uint +} + +export interface XpackUsageEqlFeaturesKeys { + join_keys_two: uint + join_keys_one: uint + join_keys_three: uint + join_keys_five_or_more: uint + join_keys_four: uint +} + +export interface XpackUsageEqlFeaturesPipes { + pipe_tail: uint + pipe_head: uint +} + +export interface XpackUsageEqlFeaturesSequences { + sequence_queries_three: uint + sequence_queries_four: uint + sequence_queries_two: uint + sequence_until: uint + sequence_queries_five_or_more: uint + sequence_maxspan: uint +} + +export interface XpackUsageFeatureToggle { + enabled: boolean +} + +export interface XpackUsageFlattened extends XpackUsageBase { + field_count: integer +} + +export interface XpackUsageFrozenIndices extends XpackUsageBase { + indices_count: long +} + +export interface XpackUsageIlm { + policy_count: integer + policy_stats: XpackUsageIlmPolicyStatistics[] +} + +export interface XpackUsageIlmPolicyStatistics { + indices_managed: integer + phases: IlmPhases +} + +export interface XpackUsageIpFilter { + http: boolean + transport: boolean +} + +export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { + time_range?: string +} + +export interface XpackUsageMachineLearning extends XpackUsageBase { + datafeeds: Record + jobs: Record + node_count: integer + data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs + inference: XpackUsageMlInference +} + +export interface XpackUsageMlCounter { + count: long +} + +export interface XpackUsageMlDataFrameAnalyticsJobs { + memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory + _all: XpackUsageMlDataFrameAnalyticsJobsCount + analysis_counts?: EmptyObject +} + +export interface XpackUsageMlDataFrameAnalyticsJobsCount { + count: long +} + +export interface XpackUsageMlDataFrameAnalyticsJobsMemory { + peak_usage_bytes: MlJobStatistics +} + +export interface XpackUsageMlInference { + ingest_processors: Record + trained_models: XpackUsageMlInferenceTrainedModels +} + +export interface XpackUsageMlInferenceIngestProcessor { + num_docs_processed: XpackUsageMlInferenceIngestProcessorCount + pipelines: XpackUsageMlCounter + num_failures: XpackUsageMlInferenceIngestProcessorCount + time_ms: XpackUsageMlInferenceIngestProcessorCount +} + +export interface XpackUsageMlInferenceIngestProcessorCount { + max: long + sum: long + min: long +} + +export interface XpackUsageMlInferenceTrainedModels { + estimated_operations?: MlJobStatistics + estimated_heap_memory_usage_bytes?: MlJobStatistics + count?: XpackUsageMlInferenceTrainedModelsCount + _all: XpackUsageMlCounter +} + +export interface XpackUsageMlInferenceTrainedModelsCount { + total: long + prepackaged: long + other: long + regression: long + classification: long +} + +export interface XpackUsageMonitoring extends XpackUsageBase { + collection_enabled: boolean + enabled_exporters: Record +} + +export interface XpackUsageQuery { + count?: integer + failed?: integer + paging?: integer + total?: integer +} + +export interface XpackUsageRealm extends XpackUsageBase { + name?: string[] + order?: long[] + size?: long[] + cache?: XpackUsageRealmCache[] + has_authorization_realms?: boolean[] + has_default_username_pattern?: boolean[] + has_truststore?: boolean[] + is_authentication_delegated?: boolean[] +} + +export interface XpackUsageRealmCache { + size: long +} + +export interface XpackUsageRequest extends RequestBase { + master_timeout?: Time +} + +export interface XpackUsageResponse { + aggregate_metric: XpackUsageBase + analytics: XpackUsageAnalytics + watcher: XpackUsageWatcher + ccr: XpackUsageCcr + data_frame?: XpackUsageBase + data_science?: XpackUsageBase + data_streams?: XpackUsageDataStreams + data_tiers: XpackUsageDataTiers + enrich?: XpackUsageBase + eql: XpackUsageEql + flattened?: XpackUsageFlattened + frozen_indices: XpackUsageFrozenIndices + graph: XpackUsageBase + ilm: XpackUsageIlm + logstash: XpackUsageBase + ml: XpackUsageMachineLearning + monitoring: XpackUsageMonitoring + rollup: XpackUsageBase + runtime_fields?: XpackUsageRuntimeFieldTypes + spatial: XpackUsageBase + searchable_snapshots: XpackUsageSearchableSnapshots + security: XpackUsageSecurity + slm: XpackUsageSlm + sql: XpackUsageSql + transform: XpackUsageBase + vectors: XpackUsageVector + voting_only: XpackUsageBase +} + +export interface XpackUsageRoleMapping { + enabled: integer + size: integer +} + +export interface XpackUsageRuntimeFieldTypes extends XpackUsageBase { + field_types: XpackUsageRuntimeFieldsType[] +} + +export interface XpackUsageRuntimeFieldsType { + chars_max: long + chars_total: long + count: long + doc_max: long + doc_total: long + index_count: long + lang: string[] + lines_max: long + lines_total: long + name: Field + scriptless_count: long + shadowed_count: long + source_max: long + source_total: long +} + +export interface XpackUsageSearchableSnapshots extends XpackUsageBase { + indices_count: integer + full_copy_indices_count?: integer + shared_cache_indices_count?: integer +} + +export interface XpackUsageSecurity extends XpackUsageBase { + api_key_service: XpackUsageFeatureToggle + anonymous: XpackUsageFeatureToggle + audit: XpackUsageAudit + fips_140: XpackUsageFeatureToggle + ipfilter: XpackUsageIpFilter + realms: Record + role_mapping: Record + roles: XpackUsageSecurityRoles + ssl: XpackUsageSsl + system_key?: XpackUsageFeatureToggle + token_service: XpackUsageFeatureToggle + operator_privileges: XpackUsageBase +} + +export interface XpackUsageSecurityRoles { + native: XpackUsageSecurityRolesNative + dls: XpackUsageSecurityRolesDls + file: XpackUsageSecurityRolesFile +} + +export interface XpackUsageSecurityRolesDls { + bit_set_cache: XpackUsageSecurityRolesDlsBitSetCache +} + +export interface XpackUsageSecurityRolesDlsBitSetCache { + count: integer + memory?: ByteSize + memory_in_bytes: ulong +} + +export interface XpackUsageSecurityRolesFile { + dls: boolean + fls: boolean + size: long +} + +export interface XpackUsageSecurityRolesNative { + dls: boolean + fls: boolean + size: long +} + +export interface XpackUsageSlm extends XpackUsageBase { + policy_count?: integer + policy_stats?: SlmStatistics +} + +export interface XpackUsageSql extends XpackUsageBase { + features: Record + queries: Record +} + +export interface XpackUsageSsl { + http: XpackUsageFeatureToggle + transport: XpackUsageFeatureToggle +} + +export type XpackUsageUrlConfig = XpackUsageBaseUrlConfig | XpackUsageKibanaUrlConfig + +export interface XpackUsageVector extends XpackUsageBase { + dense_vector_dims_avg_count: integer + dense_vector_fields_count: integer + sparse_vector_fields_count?: integer +} + +export interface XpackUsageWatcher extends XpackUsageBase { + execution: XpackUsageWatcherActions + watch: XpackUsageWatcherWatch + count: XpackUsageCounter +} + +export interface XpackUsageWatcherActionTotals { + total: long + total_time_in_ms: long +} + +export interface XpackUsageWatcherActions { + actions: Record +} + +export interface XpackUsageWatcherWatch { + input: Record + condition?: Record + action?: Record + trigger: XpackUsageWatcherWatchTrigger +} + +export interface XpackUsageWatcherWatchTrigger { + schedule?: XpackUsageWatcherWatchTriggerSchedule + _all: XpackUsageCounter +} + +export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter { + cron: XpackUsageCounter + _all: XpackUsageCounter +} + +export interface SpecUtilsAdditionalProperties { + [key: string]: never +} + +export interface SpecUtilsCommonQueryParameters { + error_trace?: boolean + filter_path?: string | string[] + human?: boolean + pretty?: boolean + source_query_string?: string +} + +export interface SpecUtilsAdditionalProperty { + [key: string]: never +} + +export interface SpecUtilsCommonCatQueryParameters { + format?: string + h?: Names + help?: boolean + local?: boolean + master_timeout?: Time + s?: string[] + v?: boolean +} + +export interface SpecUtilsOverloadOf { + [key: string]: never +} diff --git a/api/types.d.ts b/src/api/typesWithBodyKey.ts similarity index 88% rename from api/types.d.ts rename to src/api/typesWithBodyKey.ts index 9536d1616..9dbb12f26 100644 --- a/api/types.d.ts +++ b/src/api/typesWithBodyKey.ts @@ -17,6 +17,17 @@ * under the License. */ +/* eslint-disable @typescript-eslint/array-type */ +/* eslint-disable @typescript-eslint/no-empty-interface */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +/** + * We are still working on this type, it will arrive soon. + * If it's critical for you, please open an issue. + * https://github.com/elastic/elasticsearch-js + */ +export type TODO = Record + export interface BulkCreateOperation extends BulkOperation { } @@ -36,12 +47,12 @@ export interface BulkIndexResponseItem extends BulkResponseItemBase { } export interface BulkOperation { - _id: Id - _index: IndexName - retry_on_conflict: integer - routing: Routing - version: VersionNumber - version_type: VersionType + _id?: Id + _index?: IndexName + retry_on_conflict?: integer + routing?: Routing + version?: VersionNumber + version_type?: VersionType } export interface BulkOperationContainer { @@ -63,6 +74,7 @@ export interface BulkRequest extends RequestBase { timeout?: Time wait_for_active_shards?: WaitForActiveShards require_alias?: boolean + /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ body?: (BulkOperationContainer | TSource)[] } @@ -103,6 +115,7 @@ export interface BulkUpdateResponseItem extends BulkResponseItemBase { export interface ClearScrollRequest extends RequestBase { scroll_id?: Ids + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { scroll_id?: Ids } @@ -114,6 +127,7 @@ export interface ClearScrollResponse { } export interface ClosePointInTimeRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { id: Id } @@ -126,7 +140,6 @@ export interface ClosePointInTimeResponse { export interface CountRequest extends RequestBase { index?: Indices - type?: Types allow_no_indices?: boolean analyzer?: string analyze_wildcard?: boolean @@ -138,10 +151,10 @@ export interface CountRequest extends RequestBase { lenient?: boolean min_score?: double preference?: string - query_on_query_string?: string routing?: Routing terminate_after?: long q?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { query?: QueryDslQueryContainer } @@ -163,6 +176,7 @@ export interface CreateRequest extends RequestBase { version?: VersionNumber version_type?: VersionType wait_for_active_shards?: WaitForActiveShards + /** @deprecated The use of the 'body' key has been deprecated, use 'document' instead. */ body?: TDocument } @@ -188,7 +202,6 @@ export interface DeleteResponse extends WriteResponseBase { export interface DeleteByQueryRequest extends RequestBase { index: Indices - type?: Types allow_no_indices?: boolean analyzer?: string analyze_wildcard?: boolean @@ -222,6 +235,7 @@ export interface DeleteByQueryRequest extends RequestBase { version?: boolean wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { max_docs?: long query?: QueryDslQueryContainer @@ -251,7 +265,7 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { requests_per_second?: long } -export interface DeleteByQueryRethrottleResponse extends TaskListResponse { +export interface DeleteByQueryRethrottleResponse extends TasksListResponse { } export interface DeleteScriptRequest extends RequestBase { @@ -266,14 +280,13 @@ export interface DeleteScriptResponse extends AcknowledgedResponseBase { export interface ExistsRequest extends RequestBase { id: Id index: IndexName - type?: Type preference?: string realtime?: boolean refresh?: boolean routing?: Routing - source_enabled?: boolean - source_excludes?: Fields - source_includes?: Fields + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields stored_fields?: Fields version?: VersionNumber version_type?: VersionType @@ -289,9 +302,9 @@ export interface ExistsSourceRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - source_enabled?: boolean - source_excludes?: Fields - source_includes?: Fields + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields version?: VersionNumber version_type?: VersionType } @@ -313,20 +326,19 @@ export interface ExplainExplanationDetail { export interface ExplainRequest extends RequestBase { id: Id index: IndexName - type?: Type analyzer?: string analyze_wildcard?: boolean default_operator?: DefaultOperator df?: string lenient?: boolean preference?: string - query_on_query_string?: string routing?: Routing _source?: boolean | Fields _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields q?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { query?: QueryDslQueryContainer } @@ -341,31 +353,6 @@ export interface ExplainResponse { get?: InlineGet } -export interface FieldCapsFieldCapabilitiesBodyIndexFilter { - range?: FieldCapsFieldCapabilitiesBodyIndexFilterRange - match_none?: EmptyObject - term?: FieldCapsFieldCapabilitiesBodyIndexFilterTerm -} - -export interface FieldCapsFieldCapabilitiesBodyIndexFilterRange { - timestamp: FieldCapsFieldCapabilitiesBodyIndexFilterRangeTimestamp -} - -export interface FieldCapsFieldCapabilitiesBodyIndexFilterRangeTimestamp { - gte?: integer - gt?: integer - lte?: integer - lt?: integer -} - -export interface FieldCapsFieldCapabilitiesBodyIndexFilterTerm { - versionControl: FieldCapsFieldCapabilitiesBodyIndexFilterTermVersionControl -} - -export interface FieldCapsFieldCapabilitiesBodyIndexFilterTermVersionControl { - value: string -} - export interface FieldCapsFieldCapability { aggregatable: boolean indices?: Indices @@ -374,6 +361,7 @@ export interface FieldCapsFieldCapability { non_searchable_indices?: Indices searchable: boolean type: string + metadata_field?: boolean } export interface FieldCapsRequest extends RequestBase { @@ -383,8 +371,10 @@ export interface FieldCapsRequest extends RequestBase { fields?: Fields ignore_unavailable?: boolean include_unmapped?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - index_filter?: FieldCapsFieldCapabilitiesBodyIndexFilter + index_filter?: QueryDslQueryContainer + runtime_mappings?: MappingRuntimeFields } } @@ -396,18 +386,16 @@ export interface FieldCapsResponse { export interface GetRequest extends RequestBase { id: Id index: IndexName - type?: Type preference?: string realtime?: boolean refresh?: boolean routing?: Routing - source_enabled?: boolean + _source?: boolean | Fields _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields version?: VersionNumber version_type?: VersionType - _source?: boolean | Fields } export interface GetResponse { @@ -478,7 +466,6 @@ export type GetSourceResponse = TDocument export interface IndexRequest extends RequestBase { id?: Id index: IndexName - type?: Type if_primary_term?: long if_seq_no?: SequenceNumber op_type?: OpType @@ -490,6 +477,7 @@ export interface IndexRequest extends RequestBase { version_type?: VersionType wait_for_active_shards?: WaitForActiveShards require_alias?: boolean + /** @deprecated The use of the 'body' key has been deprecated, use 'document' instead. */ body?: TDocument } @@ -536,7 +524,6 @@ export interface MgetOperation { export interface MgetRequest extends RequestBase { index?: IndexName - type?: Type preference?: string realtime?: boolean refresh?: boolean @@ -545,6 +532,7 @@ export interface MgetRequest extends RequestBase { _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { docs?: MgetOperation[] ids?: MgetMultiGetId[] @@ -579,7 +567,6 @@ export interface MsearchHeader { export interface MsearchRequest extends RequestBase { index?: Indices - type?: Types allow_no_indices?: boolean ccs_minimize_roundtrips?: boolean expand_wildcards?: ExpandWildcards @@ -591,6 +578,7 @@ export interface MsearchRequest extends RequestBase { search_type?: SearchType rest_total_hits_as_int?: boolean typed_keys?: boolean + /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ body?: (MsearchHeader | MsearchBody)[] } @@ -605,17 +593,17 @@ export interface MsearchSearchResult extends SearchResponse export interface MsearchTemplateRequest extends RequestBase { index?: Indices - type?: Types ccs_minimize_roundtrips?: boolean max_concurrent_searches?: long search_type?: SearchType rest_total_hits_as_int?: boolean typed_keys?: boolean + /** @deprecated The use of the 'body' key has been deprecated, use 'search_templates' instead. */ body?: MsearchTemplateTemplateItem[] } export interface MsearchTemplateResponse { - responses: SearchResponse[] + responses: (SearchResponse | ErrorResponseBase)[] took: long } @@ -627,24 +615,24 @@ export interface MsearchTemplateTemplateItem { } export interface MtermvectorsOperation { - doc: object - fields: Fields - field_statistics: boolean - filter: TermvectorsFilter _id: Id - _index: IndexName - offsets: boolean - payloads: boolean - positions: boolean - routing: Routing - term_statistics: boolean - version: VersionNumber - version_type: VersionType + _index?: IndexName + doc?: object + fields?: Fields + field_statistics?: boolean + filter?: TermvectorsFilter + offsets?: boolean + payloads?: boolean + positions?: boolean + routing?: Routing + term_statistics?: boolean + version?: VersionNumber + version_type?: VersionType } export interface MtermvectorsRequest extends RequestBase { index?: IndexName - type?: Type + ids?: Id[] fields?: Fields field_statistics?: boolean offsets?: boolean @@ -656,6 +644,7 @@ export interface MtermvectorsRequest extends RequestBase { term_statistics?: boolean version?: VersionNumber version_type?: VersionType + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { docs?: MtermvectorsOperation[] ids?: Id[] @@ -667,12 +656,13 @@ export interface MtermvectorsResponse { } export interface MtermvectorsTermVectorsResult { - found: boolean - id: Id - index: IndexName - term_vectors: Record - took: long - version: VersionNumber + _id: Id + _index: IndexName + _version?: VersionNumber + took?: long + found?: boolean + term_vectors?: Record + error?: ErrorCause } export interface OpenPointInTimeRequest extends RequestBase { @@ -694,6 +684,7 @@ export interface PutScriptRequest extends RequestBase { context?: Name master_timeout?: Time timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { script?: StoredScript } @@ -780,6 +771,7 @@ export interface RankEvalRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean search_type?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { requests: RankEvalRankEvalRequestItem[] metric?: RankEvalRankEvalMetric @@ -822,6 +814,7 @@ export interface ReindexRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean require_alias?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { conflicts?: Conflicts dest?: ReindexDestination @@ -902,6 +895,8 @@ export interface ReindexRethrottleResponse { } export interface RenderSearchTemplateRequest extends RequestBase { + id?: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { file?: string params?: Record @@ -926,6 +921,7 @@ export interface ScriptsPainlessExecutePainlessExecutionPosition { } export interface ScriptsPainlessExecuteRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { context?: string context_setup?: ScriptsPainlessExecutePainlessContextSetup @@ -941,11 +937,10 @@ export interface ScrollRequest extends RequestBase { scroll_id?: Id scroll?: Time rest_total_hits_as_int?: boolean - total_hits_as_integer?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { scroll?: Time scroll_id: ScrollId - rest_total_hits_as_int?: boolean } } @@ -954,7 +949,6 @@ export interface ScrollResponse extends SearchResponse aggregations?: Record @@ -1076,6 +1071,7 @@ export interface SearchAggregationProfile { } export interface SearchAggregationProfileDebug { + [key: string]: never } export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' @@ -1156,7 +1152,7 @@ export interface SearchGeoDistanceSortKeys { unit?: DistanceUnit } export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys | - { [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] } +{ [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] } export interface SearchHighlight { fields: Record @@ -1424,7 +1420,7 @@ export interface SearchSortContainerKeys { _script?: SearchScriptSort } export type SearchSortContainer = SearchSortContainerKeys | - { [property: string]: SearchFieldSort | SearchSortOrder } +{ [property: string]: SearchFieldSort | SearchSortOrder } export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' @@ -1515,6 +1511,40 @@ export interface SearchTotalHits { export type SearchTotalHitsRelation = 'eq' | 'gte' +export interface SearchMvtRequest extends RequestBase { + index: Indices + field: Field + zoom: SearchMvtZoomLevel + x: SearchMvtCoordinate + y: SearchMvtCoordinate + exact_bounds?: boolean + extent?: integer + grid_precision?: integer + grid_type?: SearchMvtGridType + size?: integer + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + aggs?: Record + exact_bounds?: boolean + extent?: integer + fields?: Fields + grid_precision?: integer + grid_type?: SearchMvtGridType + query?: QueryDslQueryContainer + runtime_mappings?: MappingRuntimeFields + size?: integer + sort?: SearchSort + } +} + +export type SearchMvtResponse = MapboxVectorTiles + +export type SearchMvtCoordinate = integer + +export type SearchMvtGridType = 'grid' | 'point' + +export type SearchMvtZoomLevel = integer + export interface SearchShardsRequest extends RequestBase { index?: Indices allow_no_indices?: boolean @@ -1538,7 +1568,6 @@ export interface SearchShardsShardStoreIndex { export interface SearchTemplateRequest extends RequestBase { index?: Indices - type?: Types allow_no_indices?: boolean ccs_minimize_roundtrips?: boolean expand_wildcards?: ExpandWildcards @@ -1550,11 +1579,14 @@ export interface SearchTemplateRequest extends RequestBase { routing?: Routing scroll?: Time search_type?: SearchType - total_hits_as_integer?: boolean + rest_total_hits_as_int?: boolean typed_keys?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + explain?: boolean id?: Id params?: Record + profile?: boolean source?: string } } @@ -1568,6 +1600,7 @@ export interface SearchTemplateResponse { export interface TermsEnumRequest extends RequestBase { index: IndexName + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { field: Field size?: integer @@ -1604,7 +1637,6 @@ export interface TermvectorsFilter { export interface TermvectorsRequest extends RequestBase { index: IndexName id?: Id - type?: Type fields?: Fields field_statistics?: boolean offsets?: boolean @@ -1616,6 +1648,7 @@ export interface TermvectorsRequest extends RequestBase { term_statistics?: boolean version?: VersionNumber version_type?: VersionType + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { doc?: TDocument filter?: TermvectorsFilter @@ -1664,12 +1697,12 @@ export interface UpdateRequest require_alias?: boolean retry_on_conflict?: long routing?: Routing - source_enabled?: boolean timeout?: Time wait_for_active_shards?: WaitForActiveShards _source?: boolean | Fields _source_excludes?: Fields _source_includes?: Fields + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { detect_noop?: boolean doc?: TPartialDocument @@ -1687,7 +1720,6 @@ export interface UpdateResponse extends WriteResponseBase { export interface UpdateByQueryRequest extends RequestBase { index: Indices - type?: Types allow_no_indices?: boolean analyzer?: string analyze_wildcard?: boolean @@ -1700,7 +1732,6 @@ export interface UpdateByQueryRequest extends RequestBase { lenient?: boolean pipeline?: string preference?: string - query_on_query_string?: string refresh?: boolean request_cache?: boolean requests_per_second?: long @@ -1712,9 +1743,9 @@ export interface UpdateByQueryRequest extends RequestBase { size?: long slices?: long sort?: string[] - source_enabled?: boolean - source_excludes?: Fields - source_includes?: Fields + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields stats?: string[] terminate_after?: long timeout?: Time @@ -1722,6 +1753,7 @@ export interface UpdateByQueryRequest extends RequestBase { version_type?: boolean wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { max_docs?: long query?: QueryDslQueryContainer @@ -1758,7 +1790,7 @@ export interface UpdateByQueryRethrottleResponse { } export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { - tasks: Record + tasks: Record } export interface SpecUtilsBaseNode { @@ -1798,7 +1830,7 @@ export interface BulkStats { export type ByteSize = long | string -export type Bytes = 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb' +export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string @@ -1822,6 +1854,8 @@ export type Conflicts = 'abort' | 'proceed' export type DataStreamName = string +export type DataStreamNames = DataStreamName | DataStreamName[] + export interface DateField { field: Field format?: string @@ -1864,6 +1898,7 @@ export interface ElasticsearchVersionInfo { } export interface EmptyObject { + [key: string]: never } export type EpochMillis = string | long @@ -1903,7 +1938,7 @@ export interface ErrorCause { export interface ErrorResponseBase { error: MainError | string - status: integer + status?: integer } export type ExpandWildcardOptions = 'all' | 'open' | 'closed' | 'hidden' | 'none' @@ -2004,7 +2039,7 @@ export interface IndexingStats { types?: Record } -export type Indices = string | string[] +export type Indices = IndexName | IndexName[] export interface IndicesResponseBase extends AcknowledgedResponseBase { _shards?: ShardStatistics @@ -2038,6 +2073,8 @@ export interface MainError extends ErrorCause { headers?: Record } +export type MapboxVectorTiles = ArrayBuffer + export interface MergesStats { current: long current_docs: long @@ -2067,7 +2104,7 @@ export type MultiTermQueryRewrite = string export type Name = string -export type Names = string | string[] +export type Names = Name | Name[] export type Namespace = string @@ -2082,7 +2119,7 @@ export interface NodeAttributes { export type NodeId = string -export type NodeIds = string +export type NodeIds = NodeId | NodeId[] export type NodeName = string @@ -2187,7 +2224,7 @@ export type Routing = string export type Script = InlineScript | IndexedScript | string export interface ScriptBase { - lang?: ScriptLanguage + lang?: ScriptLanguage | string params?: Record } @@ -2300,7 +2337,7 @@ export interface StoreStats { } export interface StoredScript { - lang?: ScriptLanguage + lang?: ScriptLanguage | string source: string } @@ -2316,11 +2353,14 @@ export type Time = string | integer export type TimeSpan = string +export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' + export type TimeZone = string export type Timestamp = string export interface Transform { + [key: string]: never } export interface TransformContainer { @@ -2557,6 +2597,7 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati } export interface AggregationsBucketsPath { + [key: string]: never } export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { @@ -2597,7 +2638,7 @@ export interface AggregationsCompositeAggregationSource { export interface AggregationsCompositeBucketKeys { } export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsCompositeBucketAggregate extends AggregationsMultiBucketAggregate> { after_key: Record @@ -2624,12 +2665,13 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket params?: Record script?: Script time_zone?: string + keyed?: boolean } export interface AggregationsDateHistogramBucketKeys { } export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export type AggregationsDateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' @@ -2639,6 +2681,7 @@ export interface AggregationsDateRangeAggregation extends AggregationsBucketAggr missing?: AggregationsMissing ranges?: AggregationsDateRangeExpression[] time_zone?: string + keyed?: boolean } export interface AggregationsDateRangeExpression { @@ -2697,13 +2740,14 @@ export interface AggregationsFiltersAggregation extends AggregationsBucketAggreg filters?: Record | QueryDslQueryContainer[] other_bucket?: boolean other_bucket_key?: string + keyed?: boolean } export interface AggregationsFiltersBucketItemKeys { doc_count: long } export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { format?: string @@ -2820,6 +2864,7 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr order?: AggregationsHistogramOrder script?: Script format?: string + keyed?: boolean } export interface AggregationsHistogramOrder { @@ -2867,7 +2912,7 @@ export interface AggregationsIpRangeAggregationRange { export interface AggregationsIpRangeBucketKeys { } export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsKeyedBucketKeys { doc_count: long @@ -2875,7 +2920,7 @@ export interface AggregationsKeyedBucketKeys { key_as_string: string } export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsKeyedValueAggregate extends AggregationsValueAggregate { keys: string[] @@ -2961,6 +3006,7 @@ export interface AggregationsMovingFunctionAggregation extends AggregationsPipel export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { window?: integer shift?: integer + keyed?: boolean } export interface AggregationsMultiBucketAggregate extends AggregationsAggregateBase { @@ -2988,13 +3034,14 @@ export interface AggregationsNormalizeAggregation extends AggregationsPipelineAg method?: AggregationsNormalizeMethod } -export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'zscore' | 'softmax' +export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { type?: RelationName } export interface AggregationsPercentageScoreHeuristic { + [key: string]: never } export interface AggregationsPercentileItem { @@ -3034,12 +3081,13 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat field?: Field ranges?: AggregationsAggregationRange[] script?: Script + keyed?: boolean } export interface AggregationsRangeBucketKeys { } export type AggregationsRangeBucket = AggregationsRangeBucketKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { exclude?: string | string[] @@ -3054,7 +3102,7 @@ export interface AggregationsRareTermsAggregation extends AggregationsBucketAggr export interface AggregationsRareTermsBucketKeys { } export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { unit?: AggregationsDateInterval @@ -3064,7 +3112,7 @@ export interface AggregationsRateAggregation extends AggregationsFormatMetricAgg export type AggregationsRateMode = 'sum' | 'value_count' export interface AggregationsRegressionInferenceOptions { - results_field: Field + results_field?: Field num_top_feature_importance_values?: integer } @@ -3123,7 +3171,7 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc export interface AggregationsSignificantTermsBucketKeys { } export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer @@ -3148,7 +3196,7 @@ export interface AggregationsSingleBucketAggregateKeys extends AggregationsAggre doc_count: double } export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys | - { [property: string]: AggregationsAggregate } +{ [property: string]: AggregationsAggregate } export interface AggregationsStandardDeviationBounds { lower?: double @@ -3315,22 +3363,26 @@ export interface AggregationsWeightedAverageValue { script?: Script } +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer + export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'asciifolding' preserve_original: boolean } export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceTokenFilter export interface AnalysisCharFilterBase { - type: string version?: VersionString } export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { + type: 'char_group' tokenize_on_chars: string[] } export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { + type: 'common_grams' common_words: string[] common_words_path: string ignore_case: boolean @@ -3348,13 +3400,30 @@ export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilter } export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { + type: 'condition' filter: string[] script: Script } +export interface AnalysisCustomAnalyzer { + type: 'custom' + char_filter?: string[] + filter?: string[] + position_increment_gap?: integer + position_offset_gap?: integer + tokenizer: string +} + +export interface AnalysisCustomNormalizer { + type: 'custom' + char_filter?: string[] + filter?: string[] +} + export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { + type: 'delimited_payload' delimiter: string encoding: AnalysisDelimitedPayloadEncoding } @@ -3362,12 +3431,14 @@ export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilter export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { + type: 'edge_ngram' max_gram: integer min_gram: integer side: AnalysisEdgeNGramSide } export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { + type: 'edge_ngram' custom_token_chars: string max_gram: integer min_gram: integer @@ -3375,19 +3446,33 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { } export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { + type: 'elision' articles: string[] articles_case: boolean } +export interface AnalysisFingerprintAnalyzer { + type: 'fingerprint' + version: VersionString + max_output_size: integer + preserve_original: boolean + separator: string + stopwords: AnalysisStopWords + stopwords_path: string +} + export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { + type: 'fingerprint' max_output_size: integer separator: string } export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { + type: 'html_strip' } export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { + type: 'hunspell' dedup: boolean dictionary: string locale: string @@ -3395,25 +3480,45 @@ export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { } export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { + type: 'hyphenation_decompounder' +} + +export interface AnalysisIcuAnalyzer { + type: 'icu_analyzer' + method: AnalysisIcuNormalizationType + mode: AnalysisIcuNormalizationMode } +export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' + +export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { + type: 'kstem' } export type AnalysisKeepTypesMode = 'include' | 'exclude' export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { + type: 'keep_types' mode: AnalysisKeepTypesMode types: string[] } export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { + type: 'keep' keep_words: string[] keep_words_case: boolean keep_words_path: string } +export interface AnalysisKeywordAnalyzer { + type: 'keyword' + version: VersionString +} + export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { + type: 'keyword_marker' ignore_case: boolean keywords: string[] keywords_path: string @@ -3421,58 +3526,122 @@ export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBas } export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { + type: 'keyword' buffer_size: integer } +export interface AnalysisKuromojiAnalyzer { + type: 'kuromoji' + mode: AnalysisKuromojiTokenizationMode + user_dictionary: string +} + +export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_part_of_speech' + stoptags: string[] +} + +export interface AnalysisKuromojiReadingFormTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_readingform' + use_romaji: boolean +} + +export interface AnalysisKuromojiStemmerTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_stemmer' + minimum_length: integer +} + +export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' + +export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { + type: 'kuromoji_tokenizer' + discard_punctuation: boolean + mode: AnalysisKuromojiTokenizationMode + nbest_cost: integer + nbest_examples: string + user_dictionary: string + user_dictionary_rules: string[] +} + +export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' + +export interface AnalysisLanguageAnalyzer { + type: 'language' + version: VersionString + language: AnalysisLanguage + stem_exclusion: string[] + stopwords: AnalysisStopWords + stopwords_path: string +} + export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { + type: 'length' max: integer min: integer } export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { + type: 'letter' } export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { + type: 'limit' consume_all_tokens: boolean max_token_count: integer } export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { + type: 'lowercase' language: string } export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { + type: 'lowercase' } export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { + type: 'mapping' mappings: string[] mappings_path: string } export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { + type: 'multiplexer' filters: string[] preserve_original: boolean } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { + type: 'ngram' max_gram: integer min_gram: integer } export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { + type: 'ngram' custom_token_chars: string max_gram: integer min_gram: integer token_chars: AnalysisTokenChar[] } +export interface AnalysisNoriAnalyzer { + type: 'nori' + version: VersionString + decompound_mode: AnalysisNoriDecompoundMode + stoptags: string[] + user_dictionary: string +} + export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { + type: 'nori_part_of_speech' stoptags: string[] } export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { + type: 'nori_tokenizer' decompound_mode: AnalysisNoriDecompoundMode discard_punctuation: boolean user_dictionary: string @@ -3480,6 +3649,7 @@ export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { } export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { + type: 'path_hierarchy' buffer_size: integer delimiter: string replacement: string @@ -3487,31 +3657,47 @@ export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { skip: integer } +export interface AnalysisPatternAnalyzer { + type: 'pattern' + version: VersionString + flags: string + lowercase: boolean + pattern: string + stopwords: AnalysisStopWords +} + export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { + type: 'pattern_capture' patterns: string[] preserve_original: boolean } export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { + type: 'pattern_replace' flags: string pattern: string replacement: string } export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { + type: 'porter_stem' } export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { + type: 'predicate_token_filter' script: Script } export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { + type: 'remove_duplicates' } export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { + type: 'reverse' } export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { + type: 'shingle' filler_token: string max_shingle_size: integer min_shingle_size: integer @@ -3520,37 +3706,61 @@ export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { token_separator: string } +export interface AnalysisSimpleAnalyzer { + type: 'simple' + version: VersionString +} + export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { + type: 'snowball' language: AnalysisSnowballLanguage } +export interface AnalysisStandardAnalyzer { + type: 'standard' + max_token_length: integer + stopwords: AnalysisStopWords +} + export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { + type: 'standard' max_token_length: integer } export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { + type: 'stemmer_override' rules: string[] rules_path: string } export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { + type: 'stemmer' language: string } +export interface AnalysisStopAnalyzer { + type: 'stop' + version: VersionString + stopwords: AnalysisStopWords + stopwords_path: string +} + export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { + type: 'stop' ignore_case?: boolean remove_trailing?: boolean stopwords: AnalysisStopWords stopwords_path?: string } -export type AnalysisStopWords = string[] +export type AnalysisStopWords = string | string[] export type AnalysisSynonymFormat = 'solr' | 'wordnet' export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { + type: 'synonym_graph' expand: boolean format: AnalysisSynonymFormat lenient: boolean @@ -3561,6 +3771,7 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase } export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { + type: 'synonym' expand: boolean format: AnalysisSynonymFormat lenient: boolean @@ -3572,43 +3783,53 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' -export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter +export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter export interface AnalysisTokenFilterBase { - type: string version?: VersionString } -export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer +export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer export interface AnalysisTokenizerBase { - type: string version?: VersionString } export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { + type: 'trim' } export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { + type: 'truncate' length: integer } export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { + type: 'uax_url_email' max_token_length: integer } export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { + type: 'unique' only_on_same_position: boolean } export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { + type: 'uppercase' +} + +export interface AnalysisWhitespaceAnalyzer { + type: 'whitespace' + version: VersionString } export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { + type: 'whitespace' max_token_length: integer } export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { + type: 'word_delimiter_graph' adjust_offsets: boolean catenate_all: boolean catenate_numbers: boolean @@ -3626,6 +3847,7 @@ export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilt } export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { + type: 'word_delimiter' catenate_all: boolean catenate_numbers: boolean catenate_words: boolean @@ -3641,6 +3863,12 @@ export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBas type_table_path: string } +export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { + type: 'aggregate_metric_double' + default_metric: string + metrics: string[] +} + export interface MappingAllField { analyzer: string enabled: boolean @@ -3715,6 +3943,11 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { type: 'date_range' } +export interface MappingDenseVectorProperty extends MappingPropertyBase { + type: 'dense_vector' + dims: integer +} + export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingGenericProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { @@ -3728,7 +3961,7 @@ export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { export type MappingDynamicMapping = 'strict' | 'runtime' | 'true' | 'false' export interface MappingDynamicTemplate { - mapping?: MappingPropertyBase + mapping?: MappingProperty match?: string match_mapping_type?: string match_pattern?: MappingMatchType @@ -3743,13 +3976,14 @@ export interface MappingFieldAliasProperty extends MappingPropertyBase { } export interface MappingFieldMapping { + [key: string]: never } export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -3892,7 +4126,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingCoreProperty +export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingDenseVectorProperty | MappingAggregateMetricDoubleProperty | MappingCoreProperty export interface MappingPropertyBase { local_metadata?: Metadata @@ -3964,7 +4198,7 @@ export interface MappingSizeField { export interface MappingSourceField { compress?: boolean compress_threshold?: string - enabled: boolean + enabled?: boolean excludes?: string[] includes?: string[] } @@ -3973,7 +4207,7 @@ export interface MappingSuggestContext { name: Name path?: Field type: string - precision?: integer + precision?: integer | string } export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' @@ -4033,6 +4267,7 @@ export interface MappingVersionProperty extends MappingDocValuesPropertyBase { export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { type: 'wildcard' + null_value?: string } export interface QueryDslBoolQuery extends QueryDslQueryBase { @@ -4092,7 +4327,7 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys | - { [property: string]: QueryDslDecayPlacement } +{ [property: string]: QueryDslDecayPlacement } export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -4188,16 +4423,17 @@ export interface QueryDslFuzzyQuery extends QueryDslQueryBase { export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { type?: QueryDslGeoExecution validation_method?: QueryDslGeoValidationMethod + ignore_unmapped?: boolean } export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys | - { [property: string]: QueryDslBoundingBox } +{ [property: string]: QueryDslBoundingBox } export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys | - { [property: string]: QueryDslDecayPlacement } +{ [property: string]: QueryDslDecayPlacement } export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -4208,7 +4444,7 @@ export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys | - { [property: string]: QueryDslGeoLocation } +{ [property: string]: QueryDslGeoLocation } export type QueryDslGeoExecution = 'memory' | 'indexed' @@ -4220,9 +4456,10 @@ export interface QueryDslGeoPolygonPoints { export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod + ignore_unmapped?: boolean } export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys | - { [property: string]: QueryDslGeoPolygonPoints } +{ [property: string]: QueryDslGeoPolygonPoints } export interface QueryDslGeoShapeFieldQuery { shape?: GeoShape @@ -4234,7 +4471,7 @@ export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { ignore_unmapped?: boolean } export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys | - { [property: string]: QueryDslGeoShapeFieldQuery } +{ [property: string]: QueryDslGeoShapeFieldQuery } export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' @@ -4459,7 +4696,7 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys | - { [property: string]: QueryDslDecayPlacement } +{ [property: string]: QueryDslDecayPlacement } export type QueryDslOperator = 'and' | 'or' @@ -4595,20 +4832,22 @@ export interface QueryDslRangeQueryBase extends QueryDslQueryBase { export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' export interface QueryDslRankFeatureFunction { + [key: string]: never } -export interface QueryDslRankFeatureFunctionLinear extends QueryDslRankFeatureFunction { +export interface QueryDslRankFeatureFunctionLinear { + [key: string]: never } -export interface QueryDslRankFeatureFunctionLogarithm extends QueryDslRankFeatureFunction { +export interface QueryDslRankFeatureFunctionLogarithm { scaling_factor: float } -export interface QueryDslRankFeatureFunctionSaturation extends QueryDslRankFeatureFunction { +export interface QueryDslRankFeatureFunctionSaturation { pivot?: float } -export interface QueryDslRankFeatureFunctionSigmoid extends QueryDslRankFeatureFunction { +export interface QueryDslRankFeatureFunctionSigmoid { pivot: float exponent: float } @@ -4658,7 +4897,7 @@ export interface QueryDslShapeFieldQuery { export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { } export type QueryDslShapeQuery = QueryDslShapeQueryKeys | - { [property: string]: QueryDslShapeFieldQuery } +{ [property: string]: QueryDslShapeFieldQuery } export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' @@ -4693,6 +4932,8 @@ export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { match: QueryDslSpanQuery } +export type QueryDslSpanGapQuery = Record + export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { match: QueryDslQueryContainer } @@ -4719,7 +4960,7 @@ export interface QueryDslSpanQuery { span_containing?: QueryDslSpanContainingQuery field_masking_span?: QueryDslSpanFieldMaskingQuery span_first?: QueryDslSpanFirstQuery - span_gap?: Record + span_gap?: QueryDslSpanGapQuery span_multi?: QueryDslSpanMultiTermQuery span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery @@ -4752,7 +4993,7 @@ export interface QueryDslTermsLookup { export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { } export type QueryDslTermsQuery = QueryDslTermsQueryKeys | - { [property: string]: string[] | long[] | QueryDslTermsLookup } +{ [property: string]: string[] | long[] | QueryDslTermsLookup } export interface QueryDslTermsSetFieldQuery { minimum_should_match_field?: Field @@ -4763,7 +5004,7 @@ export interface QueryDslTermsSetFieldQuery { export interface QueryDslTermsSetQueryKeys extends QueryDslQueryBase { } export type QueryDslTermsSetQuery = QueryDslTermsSetQueryKeys | - { [property: string]: QueryDslTermsSetFieldQuery } +{ [property: string]: QueryDslTermsSetFieldQuery } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' @@ -4851,13 +5092,13 @@ export interface AsyncSearchSubmitRequest extends RequestBase { wait_for_completion_timeout?: Time keep_on_completion?: boolean typed_keys?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggs?: Record allow_no_indices?: boolean allow_partial_search_results?: boolean analyzer?: string analyze_wildcard?: boolean - batched_reduce_size?: long collapse?: SearchFieldCollapse default_operator?: DefaultOperator df?: string @@ -4870,7 +5111,6 @@ export interface AsyncSearchSubmitRequest extends RequestBase { ignore_unavailable?: boolean indices_boost?: Record[] keep_alive?: Time - keep_on_completion?: boolean lenient?: boolean max_concurrent_shard_requests?: long min_score?: double @@ -4879,7 +5119,6 @@ export interface AsyncSearchSubmitRequest extends RequestBase { profile?: boolean pit?: SearchPointInTimeReference query?: QueryDslQueryContainer - query_on_query_string?: string request_cache?: boolean rescore?: SearchRescore[] routing?: Routing @@ -4901,62 +5140,74 @@ export interface AsyncSearchSubmitRequest extends RequestBase { timeout?: string track_scores?: boolean track_total_hits?: boolean - typed_keys?: boolean version?: boolean - wait_for_completion_timeout?: Time fields?: (Field | DateField)[] + runtime_mappings?: MappingRuntimeFields } } export interface AsyncSearchSubmitResponse extends AsyncSearchAsyncSearchDocumentResponseBase { } -export interface AutoscalingCapacityGetRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface AutoscalingAutoscalingPolicy { + roles: string[] + deciders: Record } -export interface AutoscalingCapacityGetResponse { - stub: integer +export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { + name: Name } -export interface AutoscalingPolicyDeleteRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface AutoscalingDeleteAutoscalingPolicyResponse extends AcknowledgedResponseBase { } -export interface AutoscalingPolicyDeleteResponse { - stub: integer +export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { + node: AutoscalingGetAutoscalingCapacityAutoscalingResources + total: AutoscalingGetAutoscalingCapacityAutoscalingResources } -export interface AutoscalingPolicyGetRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface AutoscalingGetAutoscalingCapacityAutoscalingDecider { + required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + reason_summary?: string + reason_details?: any } -export interface AutoscalingPolicyGetResponse { - stub: integer +export interface AutoscalingGetAutoscalingCapacityAutoscalingDeciders { + required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + current_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + current_nodes: AutoscalingGetAutoscalingCapacityAutoscalingNode[] + deciders: Record } -export interface AutoscalingPolicyPutRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface AutoscalingGetAutoscalingCapacityAutoscalingNode { + name: NodeName +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { + storage: integer + memory: integer +} + +export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { +} + +export interface AutoscalingGetAutoscalingCapacityResponse { + policies: Record +} + +export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { + name: Name +} + +export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy + +export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { + name: Name + /** @deprecated The use of the 'body' key has been deprecated, use 'policy' instead. */ + body?: AutoscalingAutoscalingPolicy } -export interface AutoscalingPolicyPutResponse { - stub: integer +export interface AutoscalingPutAutoscalingPolicyResponse extends AcknowledgedResponseBase { } export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { @@ -5041,100 +5292,6 @@ export interface CatCountRequest extends CatCatRequestBase { export type CatCountResponse = CatCountCountRecord[] -export interface CatDataFrameAnalyticsDataFrameAnalyticsRecord { - id?: Id - type?: Type - t?: Type - create_time?: string - ct?: string - createTime?: string - version?: VersionString - v?: VersionString - source_index?: IndexName - si?: IndexName - sourceIndex?: IndexName - dest_index?: IndexName - di?: IndexName - destIndex?: IndexName - description?: string - d?: string - model_memory_limit?: string - mml?: string - modelMemoryLimit?: string - state?: string - s?: string - failure_reason?: string - fr?: string - failureReason?: string - progress?: string - p?: string - assignment_explanation?: string - ae?: string - assignmentExplanation?: string - 'node.id'?: Id - ni?: Id - nodeId?: Id - 'node.name'?: Name - nn?: Name - nodeName?: Name - 'node.ephemeral_id'?: Id - ne?: Id - nodeEphemeralId?: Id - 'node.address'?: string - na?: string - nodeAddress?: string -} - -export interface CatDataFrameAnalyticsRequest extends CatCatRequestBase { - id?: Id - allow_no_match?: boolean - bytes?: Bytes -} - -export type CatDataFrameAnalyticsResponse = CatDataFrameAnalyticsDataFrameAnalyticsRecord[] - -export interface CatDatafeedsDatafeedsRecord { - id?: string - state?: MlDatafeedState - s?: MlDatafeedState - assignment_explanation?: string - ae?: string - 'buckets.count'?: string - bc?: string - bucketsCount?: string - 'search.count'?: string - sc?: string - searchCount?: string - 'search.time'?: string - st?: string - searchTime?: string - 'search.bucket_avg'?: string - sba?: string - searchBucketAvg?: string - 'search.exp_avg_hour'?: string - seah?: string - searchExpAvgHour?: string - 'node.id'?: string - ni?: string - nodeId?: string - 'node.name'?: string - nn?: string - nodeName?: string - 'node.ephemeral_id'?: string - ne?: string - nodeEphemeralId?: string - 'node.address'?: string - na?: string - nodeAddress?: string -} - -export interface CatDatafeedsRequest extends CatCatRequestBase { - datafeed_id?: Id - allow_no_datafeeds?: boolean -} - -export type CatDatafeedsResponse = CatDatafeedsDatafeedsRecord[] - export interface CatFielddataFielddataRecord { id?: string host?: string @@ -5520,7 +5677,115 @@ export interface CatIndicesRequest extends CatCatRequestBase { export type CatIndicesResponse = CatIndicesIndicesRecord[] -export interface CatJobsJobsRecord { +export interface CatMasterMasterRecord { + id?: string + host?: string + h?: string + ip?: string + node?: string + n?: string +} + +export interface CatMasterRequest extends CatCatRequestBase { +} + +export type CatMasterResponse = CatMasterMasterRecord[] + +export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { + id?: Id + type?: Type + t?: Type + create_time?: string + ct?: string + createTime?: string + version?: VersionString + v?: VersionString + source_index?: IndexName + si?: IndexName + sourceIndex?: IndexName + dest_index?: IndexName + di?: IndexName + destIndex?: IndexName + description?: string + d?: string + model_memory_limit?: string + mml?: string + modelMemoryLimit?: string + state?: string + s?: string + failure_reason?: string + fr?: string + failureReason?: string + progress?: string + p?: string + assignment_explanation?: string + ae?: string + assignmentExplanation?: string + 'node.id'?: Id + ni?: Id + nodeId?: Id + 'node.name'?: Name + nn?: Name + nodeName?: Name + 'node.ephemeral_id'?: Id + ne?: Id + nodeEphemeralId?: Id + 'node.address'?: string + na?: string + nodeAddress?: string +} + +export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { + id?: Id + allow_no_match?: boolean + bytes?: Bytes +} + +export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] + +export interface CatMlDatafeedsDatafeedsRecord { + id?: string + state?: MlDatafeedState + s?: MlDatafeedState + assignment_explanation?: string + ae?: string + 'buckets.count'?: string + bc?: string + bucketsCount?: string + 'search.count'?: string + sc?: string + searchCount?: string + 'search.time'?: string + st?: string + searchTime?: string + 'search.bucket_avg'?: string + sba?: string + searchBucketAvg?: string + 'search.exp_avg_hour'?: string + seah?: string + searchExpAvgHour?: string + 'node.id'?: string + ni?: string + nodeId?: string + 'node.name'?: string + nn?: string + nodeName?: string + 'node.ephemeral_id'?: string + ne?: string + nodeEphemeralId?: string + 'node.address'?: string + na?: string + nodeAddress?: string +} + +export interface CatMlDatafeedsRequest extends CatCatRequestBase { + datafeed_id?: Id + allow_no_datafeeds?: boolean +} + +export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] + +export interface CatMlJobsJobsRecord { id?: Id state?: MlJobState s?: MlJobState @@ -5697,29 +5962,74 @@ export interface CatJobsJobsRecord { bucketsTimeExpAvgHour?: string } -export interface CatJobsRequest extends CatCatRequestBase { +export interface CatMlJobsRequest extends CatCatRequestBase { job_id?: Id allow_no_jobs?: boolean bytes?: Bytes } -export type CatJobsResponse = CatJobsJobsRecord[] +export type CatMlJobsResponse = CatMlJobsJobsRecord[] -export interface CatMasterMasterRecord { - id?: string - host?: string - h?: string - ip?: string - node?: string - n?: string +export interface CatMlTrainedModelsRequest extends CatCatRequestBase { + model_id?: Id + allow_no_match?: boolean + bytes?: Bytes + from?: integer + size?: integer } -export interface CatMasterRequest extends CatCatRequestBase { -} +export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] -export type CatMasterResponse = CatMasterMasterRecord[] +export interface CatMlTrainedModelsTrainedModelsRecord { + id?: Id + created_by?: string + c?: string + createdBy?: string + heap_size?: ByteSize + hs?: ByteSize + modelHeapSize?: ByteSize + operations?: string + o?: string + modelOperations?: string + license?: string + l?: string + create_time?: DateString + ct?: DateString + version?: VersionString + v?: VersionString + description?: string + d?: string + 'ingest.pipelines'?: string + ip?: string + ingestPipelines?: string + 'ingest.count'?: string + ic?: string + ingestCount?: string + 'ingest.time'?: string + it?: string + ingestTime?: string + 'ingest.current'?: string + icurr?: string + ingestCurrent?: string + 'ingest.failed'?: string + if?: string + ingestFailed?: string + 'data_frame.id'?: string + dfid?: string + dataFrameAnalytics?: string + 'data_frame.create_time'?: string + dft?: string + dataFrameAnalyticsTime?: string + 'data_frame.source_index'?: string + dfsi?: string + dataFrameAnalyticsSrcIndex?: string + 'data_frame.analysis'?: string + dfa?: string + dataFrameAnalyticsAnalysis?: string + type?: string +} -export interface CatNodeAttributesNodeAttributesRecord { +export interface CatNodeattrsNodeAttributesRecord { node?: string id?: string pid?: string @@ -5732,10 +6042,10 @@ export interface CatNodeAttributesNodeAttributesRecord { value?: string } -export interface CatNodeAttributesRequest extends CatCatRequestBase { +export interface CatNodeattrsRequest extends CatCatRequestBase { } -export type CatNodeAttributesResponse = CatNodeAttributesNodeAttributesRecord[] +export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] export interface CatNodesNodesRecord { id?: Id @@ -6553,64 +6863,6 @@ export interface CatThreadPoolThreadPoolRecord { ka?: string } -export interface CatTrainedModelsRequest extends CatCatRequestBase { - model_id?: Id - allow_no_match?: boolean - bytes?: Bytes - from?: integer - size?: integer -} - -export type CatTrainedModelsResponse = CatTrainedModelsTrainedModelsRecord[] - -export interface CatTrainedModelsTrainedModelsRecord { - id?: Id - created_by?: string - c?: string - createdBy?: string - heap_size?: ByteSize - hs?: ByteSize - modelHeapSize?: ByteSize - operations?: string - o?: string - modelOperations?: string - license?: string - l?: string - create_time?: DateString - ct?: DateString - version?: VersionString - v?: VersionString - description?: string - d?: string - 'ingest.pipelines'?: string - ip?: string - ingestPipelines?: string - 'ingest.count'?: string - ic?: string - ingestCount?: string - 'ingest.time'?: string - it?: string - ingestTime?: string - 'ingest.current'?: string - icurr?: string - ingestCurrent?: string - 'ingest.failed'?: string - if?: string - ingestFailed?: string - 'data_frame.id'?: string - dfid?: string - dataFrameAnalytics?: string - 'data_frame.create_time'?: string - dft?: string - dataFrameAnalyticsTime?: string - 'data_frame.source_index'?: string - dfsi?: string - dataFrameAnalyticsSrcIndex?: string - 'data_frame.analysis'?: string - dfa?: string - dataFrameAnalyticsAnalysis?: string -} - export interface CatTransformsRequest extends CatCatRequestBase { transform_id?: Id allow_no_match?: boolean @@ -6738,9 +6990,17 @@ export interface CcrShardStats { write_buffer_size_in_bytes: ByteSize } -export interface CcrCreateFollowIndexRequest extends RequestBase { +export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { + name: Name +} + +export interface CcrDeleteAutoFollowPatternResponse extends AcknowledgedResponseBase { +} + +export interface CcrFollowRequest extends RequestBase { index: IndexName wait_for_active_shards?: WaitForActiveShards + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { leader_index?: IndexName max_outstanding_read_requests?: long @@ -6757,27 +7017,12 @@ export interface CcrCreateFollowIndexRequest extends RequestBase { } } -export interface CcrCreateFollowIndexResponse { +export interface CcrFollowResponse { follow_index_created: boolean follow_index_shards_acked: boolean index_following_started: boolean } -export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { - name: Name -} - -export interface CcrDeleteAutoFollowPatternResponse extends AcknowledgedResponseBase { -} - -export interface CcrFollowIndexStatsRequest extends RequestBase { - index: Indices -} - -export interface CcrFollowIndexStatsResponse { - indices: CcrFollowIndexStats[] -} - export interface CcrFollowInfoFollowerIndex { follower_index: IndexName leader_index: IndexName @@ -6809,8 +7054,17 @@ export interface CcrFollowInfoResponse { follower_indices: CcrFollowInfoFollowerIndex[] } -export interface CcrForgetFollowerIndexRequest extends RequestBase { +export interface CcrFollowStatsRequest extends RequestBase { + index: Indices +} + +export interface CcrFollowStatsResponse { + indices: CcrFollowIndexStats[] +} + +export interface CcrForgetFollowerRequest extends RequestBase { index: IndexName + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { follower_cluster?: string follower_index?: IndexName @@ -6819,7 +7073,7 @@ export interface CcrForgetFollowerIndexRequest extends RequestBase { } } -export interface CcrForgetFollowerIndexResponse { +export interface CcrForgetFollowerResponse { _shards: ShardStatistics } @@ -6833,6 +7087,7 @@ export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { remote_cluster: string follow_index_pattern?: IndexPattern leader_index_patterns: IndexPatterns + leader_index_exclusion_patterns: IndexPatterns max_outstanding_read_requests: integer } @@ -6851,19 +7106,21 @@ export interface CcrPauseAutoFollowPatternRequest extends RequestBase { export interface CcrPauseAutoFollowPatternResponse extends AcknowledgedResponseBase { } -export interface CcrPauseFollowIndexRequest extends RequestBase { +export interface CcrPauseFollowRequest extends RequestBase { index: IndexName } -export interface CcrPauseFollowIndexResponse extends AcknowledgedResponseBase { +export interface CcrPauseFollowResponse extends AcknowledgedResponseBase { } export interface CcrPutAutoFollowPatternRequest extends RequestBase { name: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { remote_cluster: string follow_index_pattern?: IndexPattern leader_index_patterns?: IndexPatterns + leader_index_exclusion_patterns?: IndexPatterns max_outstanding_read_requests?: integer settings?: Record max_outstanding_write_requests?: integer @@ -6888,8 +7145,9 @@ export interface CcrResumeAutoFollowPatternRequest extends RequestBase { export interface CcrResumeAutoFollowPatternResponse extends AcknowledgedResponseBase { } -export interface CcrResumeFollowIndexRequest extends RequestBase { +export interface CcrResumeFollowRequest extends RequestBase { index: IndexName + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { max_outstanding_read_requests?: long max_outstanding_write_requests?: long @@ -6904,7 +7162,7 @@ export interface CcrResumeFollowIndexRequest extends RequestBase { } } -export interface CcrResumeFollowIndexResponse extends AcknowledgedResponseBase { +export interface CcrResumeFollowResponse extends AcknowledgedResponseBase { } export interface CcrStatsAutoFollowStats { @@ -6933,104 +7191,11 @@ export interface CcrStatsResponse { follow_stats: CcrStatsFollowStats } -export interface CcrUnfollowIndexRequest extends RequestBase { +export interface CcrUnfollowRequest extends RequestBase { index: IndexName } -export interface CcrUnfollowIndexResponse extends AcknowledgedResponseBase { -} - -export interface ClusterClusterStateBlockIndex { - description?: string - retryable?: boolean - levels?: string[] - aliases?: IndexAlias[] - aliases_version?: VersionNumber - version?: VersionNumber - mapping_version?: VersionNumber - settings_version?: VersionNumber - routing_num_shards?: VersionNumber - state?: string - settings?: Record - in_sync_allocations?: Record - primary_terms?: Record - mappings?: Record - rollover_info?: Record - timestamp_range?: Record - system?: boolean -} - -export interface ClusterClusterStateDeletedSnapshots { - snapshot_deletions: string[] -} - -export interface ClusterClusterStateIndexLifecycle { - policies: Record - operation_mode: LifecycleOperationMode -} - -export interface ClusterClusterStateIndexLifecyclePolicy { - phases: IlmPhases -} - -export interface ClusterClusterStateIndexLifecycleSummary { - policy: ClusterClusterStateIndexLifecyclePolicy - headers: HttpHeaders - version: VersionNumber - modified_date: long - modified_date_string: DateString -} - -export interface ClusterClusterStateIngest { - pipeline: ClusterClusterStateIngestPipeline[] -} - -export interface ClusterClusterStateIngestPipeline { - id: Id - config: ClusterClusterStateIngestPipelineConfig -} - -export interface ClusterClusterStateIngestPipelineConfig { - description?: string - version?: VersionNumber - processors: IngestProcessorContainer[] -} - -export interface ClusterClusterStateMetadata { - cluster_uuid: Uuid - cluster_uuid_committed: boolean - templates: ClusterClusterStateMetadataTemplate - indices?: Record - 'index-graveyard': ClusterClusterStateMetadataIndexGraveyard - cluster_coordination: ClusterClusterStateMetadataClusterCoordination - ingest?: ClusterClusterStateIngest - repositories?: Record - component_template?: Record - index_template?: Record - index_lifecycle?: ClusterClusterStateIndexLifecycle -} - -export interface ClusterClusterStateMetadataClusterCoordination { - term: integer - last_committed_config: string[] - last_accepted_config: string[] - voting_config_exclusions: ClusterVotingConfigExclusionsItem[] -} - -export interface ClusterClusterStateMetadataIndexGraveyard { - tombstones: ClusterTombstone[] -} - -export interface ClusterClusterStateMetadataTemplate { -} - -export interface ClusterClusterStateRoutingNodes { - unassigned: NodeShard[] - nodes: Record -} - -export interface ClusterClusterStateSnapshots { - snapshots: SnapshotStatus[] +export interface CcrUnfollowResponse extends AcknowledgedResponseBase { } export type ClusterClusterStatus = 'green' | 'yellow' | 'red' @@ -7054,22 +7219,6 @@ export interface ClusterComponentTemplateSummary { aliases?: Record } -export interface ClusterTombstone { - index: ClusterTombstoneIndex - delete_date?: DateString - delete_date_in_millis: long -} - -export interface ClusterTombstoneIndex { - index_name: Name - index_uuid: Uuid -} - -export interface ClusterVotingConfigExclusionsItem { - node_id: Id - node_name: Name -} - export interface ClusterAllocationExplainAllocationDecision { decider: string decision: ClusterAllocationExplainAllocationExplainDecision @@ -7134,6 +7283,7 @@ export interface ClusterAllocationExplainNodeDiskUsage { export interface ClusterAllocationExplainRequest extends RequestBase { include_disk_info?: boolean include_yes_decisions?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { current_node?: string index?: IndexName @@ -7174,6 +7324,7 @@ export interface ClusterAllocationExplainResponse { remaining_delay_in_millis?: long shard: integer unassigned_info?: ClusterAllocationExplainUnassignedInformation + note?: string } export interface ClusterAllocationExplainUnassignedInformation { @@ -7198,26 +7349,18 @@ export interface ClusterDeleteComponentTemplateResponse extends AcknowledgedResp } export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { - body?: { - stub: string - } + wait_for_removal?: boolean } -export interface ClusterDeleteVotingConfigExclusionsResponse { - stub: integer -} +export type ClusterDeleteVotingConfigExclusionsResponse = boolean export interface ClusterExistsComponentTemplateRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } + name: Names + master_timeout?: Time + local?: boolean } -export interface ClusterExistsComponentTemplateResponse { - stub: integer -} +export type ClusterExistsComponentTemplateResponse = boolean export interface ClusterGetComponentTemplateRequest extends RequestBase { name?: Name @@ -7315,10 +7458,19 @@ export interface ClusterPendingTasksResponse { tasks: ClusterPendingTasksPendingTask[] } +export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { + node_names?: Names + node_ids?: Ids + timeout?: Time +} + +export type ClusterPostVotingConfigExclusionsResponse = boolean + export interface ClusterPutComponentTemplateRequest extends RequestBase { name: Name create?: boolean master_timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { template: IndicesIndexState aliases?: Record @@ -7336,6 +7488,7 @@ export interface ClusterPutSettingsRequest extends RequestBase { flat_settings?: boolean master_timeout?: Time timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { persistent?: Record transient?: Record @@ -7348,30 +7501,30 @@ export interface ClusterPutSettingsResponse { transient: Record } -export interface ClusterPutVotingConfigExclusionsRequest extends RequestBase { - node_names?: Names - node_ids?: Ids - timeout?: Time - wait_for_removal?: boolean -} +export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo -export interface ClusterPutVotingConfigExclusionsResponse { - stub: integer +export interface ClusterRemoteInfoClusterRemoteProxyInfo { + mode: 'proxy' + connected: boolean + initial_connect_timeout: Time + skip_unavailable: boolean + proxy_address: string + server_name: string + num_proxy_sockets_connected: integer + max_proxy_socket_connections: integer } -export interface ClusterRemoteInfoClusterRemoteInfo { +export interface ClusterRemoteInfoClusterRemoteSniffInfo { + mode: 'sniff' connected: boolean - initial_connect_timeout: Time max_connections_per_cluster: integer num_nodes_connected: long - seeds: string[] + initial_connect_timeout: Time skip_unavailable: boolean + seeds: string[] } export interface ClusterRemoteInfoRequest extends RequestBase { - body?: { - stub: string - } } export interface ClusterRemoteInfoResponse extends DictionaryResponseBase { @@ -7419,6 +7572,7 @@ export interface ClusterRerouteRequest extends RequestBase { retry_failed?: boolean master_timeout?: Time timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { commands?: ClusterRerouteCommand[] } @@ -7445,28 +7599,9 @@ export interface ClusterRerouteRerouteParameters { to_node?: NodeName } -export interface ClusterRerouteRerouteState { - cluster_uuid: Uuid - state_uuid?: Uuid - master_node?: string - version?: VersionNumber - blocks?: EmptyObject - nodes?: Record - routing_table?: Record - routing_nodes?: ClusterClusterStateRoutingNodes - security_tokens?: Record - snapshots?: ClusterClusterStateSnapshots - snapshot_deletions?: ClusterClusterStateDeletedSnapshots - metadata?: ClusterClusterStateMetadata -} - -export interface ClusterRerouteResponse extends AcknowledgedResponseBase { +export interface ClusterRerouteResponse { explanations?: ClusterRerouteRerouteExplanation[] - state: ClusterRerouteRerouteState -} - -export interface ClusterStateClusterStateBlocks { - indices?: Record> + state: any } export interface ClusterStateRequest extends RequestBase { @@ -7482,21 +7617,7 @@ export interface ClusterStateRequest extends RequestBase { wait_for_timeout?: Time } -export interface ClusterStateResponse { - cluster_name: Name - cluster_uuid: Uuid - master_node?: string - state?: string[] - state_uuid?: Uuid - version?: VersionNumber - blocks?: ClusterStateClusterStateBlocks - metadata?: ClusterClusterStateMetadata - nodes?: Record - routing_table?: Record - routing_nodes?: ClusterClusterStateRoutingNodes - snapshots?: ClusterClusterStateSnapshots - snapshot_deletions?: ClusterClusterStateDeletedSnapshots -} +export type ClusterStateResponse = any export interface ClusterStatsCharFilterTypes { char_filter_types: ClusterStatsFieldTypes[] @@ -7610,7 +7731,7 @@ export interface ClusterStatsClusterOperatingSystem { available_processors: integer mem: ClusterStatsOperatingSystemMemoryInfo names: ClusterStatsClusterOperatingSystemName[] - pretty_names: ClusterStatsClusterOperatingSystemName[] + pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] architectures?: ClusterStatsClusterOperatingSystemArchitecture[] } @@ -7624,6 +7745,11 @@ export interface ClusterStatsClusterOperatingSystemName { name: Name } +export interface ClusterStatsClusterOperatingSystemPrettyName { + count: integer + pretty_name: Name +} + export interface ClusterStatsClusterProcess { cpu: ClusterStatsClusterProcessCpu open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors @@ -7717,40 +7843,38 @@ export interface ClusterStatsRuntimeFieldTypes { doc_total: integer } -export interface DanglingIndicesIndexDeleteRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { + index_uuid: Uuid + accept_data_loss: boolean + master_timeout?: Time + timeout?: Time } -export interface DanglingIndicesIndexDeleteResponse { - stub: integer +export interface DanglingIndicesDeleteDanglingIndexResponse extends AcknowledgedResponseBase { } -export interface DanglingIndicesIndexImportRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { + index_uuid: Uuid + accept_data_loss: boolean + master_timeout?: Time + timeout?: Time } -export interface DanglingIndicesIndexImportResponse { - stub: integer +export interface DanglingIndicesImportDanglingIndexResponse extends AcknowledgedResponseBase { } -export interface DanglingIndicesIndicesListRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface DanglingIndicesListDanglingIndicesDanglingIndex { + index_name: string + index_uuid: string + creation_date_millis: EpochMillis + node_ids: Ids +} + +export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { } -export interface DanglingIndicesIndicesListResponse { - stub: integer +export interface DanglingIndicesListDanglingIndicesResponse { + dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] } export interface EnrichConfiguration { @@ -7803,6 +7927,7 @@ export interface EnrichGetPolicyResponse { export interface EnrichPutPolicyRequest extends RequestBase { name: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { geo_match?: EnrichPolicy match?: EnrichPolicy @@ -7812,6 +7937,14 @@ export interface EnrichPutPolicyRequest extends RequestBase { export interface EnrichPutPolicyResponse extends AcknowledgedResponseBase { } +export interface EnrichStatsCacheStats { + node_id: Id + count: integer + hits: integer + misses: integer + evictions: integer +} + export interface EnrichStatsCoordinatorStats { executed_searches_total: long node_id: Id @@ -7822,7 +7955,7 @@ export interface EnrichStatsCoordinatorStats { export interface EnrichStatsExecutingPolicy { name: Name - task: TaskInfo + task: TasksInfo } export interface EnrichStatsRequest extends RequestBase { @@ -7831,6 +7964,7 @@ export interface EnrichStatsRequest extends RequestBase { export interface EnrichStatsResponse { coordinator_stats: EnrichStatsCoordinatorStats[] executing_policies: EnrichStatsExecutingPolicy[] + cache_stats?: EnrichStatsCacheStats[] } export interface EqlEqlHits { @@ -7897,6 +8031,7 @@ export interface EqlSearchRequest extends RequestBase { keep_alive?: Time keep_on_completion?: boolean wait_for_completion_timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { query: string case_sensitive?: boolean @@ -7924,28 +8059,23 @@ export interface EqlSearchSearchFieldFormatted { format?: string } +export interface FeaturesFeature { + name: string + description: string +} + export interface FeaturesGetFeaturesRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } } export interface FeaturesGetFeaturesResponse { - stub: integer + features: FeaturesFeature[] } export interface FeaturesResetFeaturesRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } } export interface FeaturesResetFeaturesResponse { - stub: integer + features: FeaturesFeature[] } export interface GraphConnection { @@ -7996,9 +8126,9 @@ export interface GraphVertexInclude { export interface GraphExploreRequest extends RequestBase { index: Indices - type?: Types routing?: Routing timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { connections?: GraphHop controls?: GraphExploreControls @@ -8016,6 +8146,7 @@ export interface GraphExploreResponse { } export interface IlmAction { + [key: string]: never } export interface IlmPhase { @@ -8036,14 +8167,15 @@ export interface IlmPolicy { } export interface IlmDeleteLifecycleRequest extends RequestBase { - policy?: Name - policy_id: Id + policy: Name } export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { } -export interface IlmExplainLifecycleLifecycleExplain { +export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged + +export interface IlmExplainLifecycleLifecycleExplainManaged { action: Name action_time_millis: EpochMillis age: Time @@ -8052,7 +8184,7 @@ export interface IlmExplainLifecycleLifecycleExplain { index: IndexName is_auto_retryable_error?: boolean lifecycle_date_millis: EpochMillis - managed: boolean + managed: true phase: Name phase_time_millis: EpochMillis policy: Name @@ -8068,13 +8200,9 @@ export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { modified_date_in_millis: EpochMillis } -export interface IlmExplainLifecycleLifecycleExplainProject { - project: IlmExplainLifecycleLifecycleExplainProjectSummary -} - -export interface IlmExplainLifecycleLifecycleExplainProjectSummary { +export interface IlmExplainLifecycleLifecycleExplainUnmanaged { index: IndexName - managed: boolean + managed: false } export interface IlmExplainLifecycleRequest extends RequestBase { @@ -8084,7 +8212,7 @@ export interface IlmExplainLifecycleRequest extends RequestBase { } export interface IlmExplainLifecycleResponse { - indices: Record | IlmExplainLifecycleLifecycleExplainProject + indices: Record } export interface IlmGetLifecycleLifecycle { @@ -8095,7 +8223,6 @@ export interface IlmGetLifecycleLifecycle { export interface IlmGetLifecycleRequest extends RequestBase { policy?: Name - policy_id?: Id } export interface IlmGetLifecycleResponse extends DictionaryResponseBase { @@ -8110,6 +8237,7 @@ export interface IlmGetStatusResponse { export interface IlmMoveToStepRequest extends RequestBase { index: IndexName + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { current_step?: IlmMoveToStepStepKey next_step?: IlmMoveToStepStepKey @@ -8126,8 +8254,8 @@ export interface IlmMoveToStepStepKey { } export interface IlmPutLifecycleRequest extends RequestBase { - policy?: Name - policy_id?: Id + policy: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { policy?: IlmPolicy } @@ -8153,18 +8281,16 @@ export interface IlmRetryResponse extends AcknowledgedResponseBase { } export interface IlmStartRequest extends RequestBase { - body?: { - stub: boolean - } + master_timeout?: Time + timeout?: Time } export interface IlmStartResponse extends AcknowledgedResponseBase { } export interface IlmStopRequest extends RequestBase { - body?: { - stub: boolean - } + master_timeout?: Time + timeout?: Time } export interface IlmStopResponse extends AcknowledgedResponseBase { @@ -8187,7 +8313,7 @@ export interface IndicesAliasDefinition { search_routing?: string } -export type IndicesDataStreamHealthStatus = 'GREEN' | 'green' | 'YELLOW' | 'yellow' | 'RED' | 'red' +export type IndicesDataStreamHealthStatus = 'green' | 'yellow' | 'red' export interface IndicesFielddataFrequencyFilter { max: double @@ -8329,7 +8455,10 @@ export interface IndicesIndexSettings { } export interface IndicesIndexSettingsAnalysis { + analyzer?: Record char_filter?: Record + filter?: Record + normalizer?: Record } export interface IndicesIndexSettingsLifecycle { @@ -8339,7 +8468,8 @@ export interface IndicesIndexSettingsLifecycle { export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping - settings: IndicesIndexSettings | IndicesIndexStatePrefixedSettings + settings?: IndicesIndexSettings | IndicesIndexStatePrefixedSettings + data_stream?: DataStreamName } export interface IndicesIndexStatePrefixedSettings { @@ -8439,6 +8569,7 @@ export interface IndicesAnalyzeExplainAnalyzeToken { export interface IndicesAnalyzeRequest extends RequestBase { index?: IndexName + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { analyzer?: string attributes?: string[] @@ -8484,6 +8615,7 @@ export interface IndicesCloneRequest extends RequestBase { master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record settings?: Record @@ -8525,6 +8657,7 @@ export interface IndicesCreateRequest extends RequestBase { master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record mappings?: Record | MappingTypeMapping @@ -8589,7 +8722,8 @@ export interface IndicesDeleteAliasResponse extends AcknowledgedResponseBase { } export interface IndicesDeleteDataStreamRequest extends RequestBase { - name: DataStreamName + name: DataStreamNames + expand_wildcards?: ExpandWildcards } export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBase { @@ -8611,6 +8745,20 @@ export interface IndicesDeleteTemplateRequest extends RequestBase { export interface IndicesDeleteTemplateResponse extends AcknowledgedResponseBase { } +export interface IndicesDiskUsageRequest extends RequestBase { + index: IndexName + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + flush?: boolean + ignore_unavailable?: boolean + master_timeout?: TimeUnit + timeout?: TimeUnit + run_expensive_tasks?: boolean + wait_for_active_shards?: string +} + +export type IndicesDiskUsageResponse = any + export interface IndicesExistsRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -8673,17 +8821,6 @@ export interface IndicesFlushRequest extends RequestBase { export interface IndicesFlushResponse extends ShardsOperationResponseBase { } -export interface IndicesFlushSyncedRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export interface IndicesFlushSyncedResponse extends DictionaryResponseBase { - _shards: ShardStatistics -} - export interface IndicesForcemergeRequest extends RequestBase { index?: Indices allow_no_indices?: boolean @@ -8765,7 +8902,7 @@ export interface IndicesGetDataStreamIndicesGetDataStreamItemTimestampField { } export interface IndicesGetDataStreamRequest extends RequestBase { - name?: IndexName + name?: DataStreamNames expand_wildcards?: ExpandWildcards } @@ -8776,7 +8913,6 @@ export interface IndicesGetDataStreamResponse { export interface IndicesGetFieldMappingRequest extends RequestBase { fields: Fields index?: Indices - type?: Types allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -8817,11 +8953,9 @@ export interface IndicesGetIndexTemplateIndexTemplateSummary { export interface IndicesGetIndexTemplateRequest extends RequestBase { name?: Name local?: boolean - body?: { - flat_settings?: boolean - include_type_name?: boolean - master_timeout?: Time - } + flat_settings?: boolean + include_type_name?: boolean + master_timeout?: Time } export interface IndicesGetIndexTemplateResponse { @@ -8835,7 +8969,6 @@ export interface IndicesGetMappingIndexMappingRecord { export interface IndicesGetMappingRequest extends RequestBase { index?: Indices - type?: Types allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -8873,15 +9006,6 @@ export interface IndicesGetTemplateRequest extends RequestBase { export interface IndicesGetTemplateResponse extends DictionaryResponseBase { } -export interface IndicesGetUpgradeRequest extends RequestBase { - stub: string -} - -export interface IndicesGetUpgradeResponse { - overlapping?: IndicesOverlappingIndexTemplate[] - template?: IndicesTemplateMapping -} - export interface IndicesMigrateToDataStreamRequest extends RequestBase { name: IndexName } @@ -8907,15 +9031,14 @@ export interface IndicesPromoteDataStreamRequest extends RequestBase { name: IndexName } -export interface IndicesPromoteDataStreamResponse { - stub: integer -} +export type IndicesPromoteDataStreamResponse = any export interface IndicesPutAliasRequest extends RequestBase { index: Indices name: Name master_timeout?: Time timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { filter?: QueryDslQueryContainer index_routing?: Routing @@ -8936,6 +9059,7 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { export interface IndicesPutIndexTemplateRequest extends RequestBase { name: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { index_patterns?: Indices composed_of?: Name[] @@ -8951,8 +9075,7 @@ export interface IndicesPutIndexTemplateResponse extends AcknowledgedResponseBas } export interface IndicesPutMappingRequest extends RequestBase { - index?: Indices - type?: Type + index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -8960,20 +9083,18 @@ export interface IndicesPutMappingRequest extends RequestBase { master_timeout?: Time timeout?: Time write_index_only?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - all_field?: MappingAllField date_detection?: boolean dynamic?: boolean | MappingDynamicMapping dynamic_date_formats?: string[] dynamic_templates?: Record | Record[] - field_names_field?: MappingFieldNamesField - index_field?: MappingIndexField - meta?: Record + _field_names?: MappingFieldNamesField + _meta?: Record numeric_detection?: boolean properties?: Record - routing_field?: MappingRoutingField - size_field?: MappingSizeField - source_field?: MappingSourceField + _routing?: MappingRoutingField + _source?: MappingSourceField runtime?: MappingRuntimeFields } } @@ -8981,10 +9102,6 @@ export interface IndicesPutMappingRequest extends RequestBase { export interface IndicesPutMappingResponse extends IndicesResponseBase { } -export interface IndicesPutSettingsIndexSettingsBody extends IndicesIndexSettings { - settings?: IndicesIndexSettings -} - export interface IndicesPutSettingsRequest extends RequestBase { index?: Indices allow_no_indices?: boolean @@ -8994,7 +9111,8 @@ export interface IndicesPutSettingsRequest extends RequestBase { master_timeout?: Time preserve_existing?: boolean timeout?: Time - body?: IndicesPutSettingsIndexSettingsBody + /** @deprecated The use of the 'body' key has been deprecated, use 'settings' instead. */ + body?: IndicesIndexSettings } export interface IndicesPutSettingsResponse extends AcknowledgedResponseBase { @@ -9007,6 +9125,8 @@ export interface IndicesPutTemplateRequest extends RequestBase { include_type_name?: boolean master_timeout?: Time timeout?: Time + order?: integer + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record index_patterns?: string | string[] @@ -9030,6 +9150,8 @@ export interface IndicesRecoveryRecoveryBytes { percent: Percentage recovered?: ByteSize recovered_in_bytes: ByteSize + recovered_from_snapshot?: ByteSize + recovered_from_snapshot_in_bytes?: ByteSize reused?: ByteSize reused_in_bytes: ByteSize total?: ByteSize @@ -9189,6 +9311,7 @@ export interface IndicesRolloverRequest extends RequestBase { master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record conditions?: IndicesRolloverRolloverConditions @@ -9265,13 +9388,15 @@ export interface IndicesShardStoresRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - status?: string | string[] + status?: IndicesShardStoresShardStatus | IndicesShardStoresShardStatus[] } export interface IndicesShardStoresResponse { indices: Record } +export type IndicesShardStoresShardStatus = 'green' | 'yellow' | 'red' | 'all' + export interface IndicesShardStoresShardStore { allocation: IndicesShardStoresShardStoreAllocation allocation_id: Id @@ -9300,6 +9425,7 @@ export interface IndicesShrinkRequest extends RequestBase { master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record settings?: Record @@ -9312,7 +9438,8 @@ export interface IndicesShrinkResponse extends AcknowledgedResponseBase { } export interface IndicesSimulateIndexTemplateRequest extends RequestBase { - name?: Name + name: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { index_patterns?: IndexName[] composed_of?: Name[] @@ -9324,15 +9451,28 @@ export interface IndicesSimulateIndexTemplateRequest extends RequestBase { export interface IndicesSimulateIndexTemplateResponse { } +export interface IndicesSimulateTemplateOverlapping { + name: Name + index_patterns: string[] +} + export interface IndicesSimulateTemplateRequest extends RequestBase { name?: Name create?: boolean master_timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, use 'template' instead. */ body?: IndicesGetIndexTemplateIndexTemplate } export interface IndicesSimulateTemplateResponse { - stub: string + template: IndicesSimulateTemplateTemplate +} + +export interface IndicesSimulateTemplateTemplate { + aliases: Record + mappings: MappingTypeMapping + settings: Record + overlapping: IndicesSimulateTemplateOverlapping[] } export interface IndicesSplitRequest extends RequestBase { @@ -9341,6 +9481,7 @@ export interface IndicesSplitRequest extends RequestBase { master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record settings?: Record @@ -9370,6 +9511,7 @@ export interface IndicesStatsIndexStats { translog?: TranslogStats warmer?: WarmerStats bulk?: BulkStats + shards?: IndicesStatsShardsTotalStats } export interface IndicesStatsIndicesStats { @@ -9483,6 +9625,11 @@ export interface IndicesStatsShardStats { translog: TranslogStats warmer: WarmerStats bulk?: BulkStats + shards: IndicesStatsShardsTotalStats +} + +export interface IndicesStatsShardsTotalStats { + total_count: long } export interface IndicesUnfreezeRequest extends RequestBase { @@ -9500,11 +9647,13 @@ export interface IndicesUnfreezeResponse extends AcknowledgedResponseBase { } export interface IndicesUpdateAliasesIndicesUpdateAliasBulk { + [key: string]: never } export interface IndicesUpdateAliasesRequest extends RequestBase { master_timeout?: Time timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { actions?: IndicesUpdateAliasesIndicesUpdateAliasBulk[] } @@ -9513,18 +9662,6 @@ export interface IndicesUpdateAliasesRequest extends RequestBase { export interface IndicesUpdateAliasesResponse extends AcknowledgedResponseBase { } -export interface IndicesUpgradeRequest extends RequestBase { - stub_b: integer - stub_a: integer - body?: { - stub_c: integer - } -} - -export interface IndicesUpgradeResponse { - stub: integer -} - export interface IndicesValidateQueryIndicesValidationExplanation { error?: string explanation?: string @@ -9545,9 +9682,9 @@ export interface IndicesValidateQueryRequest extends RequestBase { explain?: boolean ignore_unavailable?: boolean lenient?: boolean - query_on_query_string?: string rewrite?: boolean q?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { query?: QueryDslQueryContainer } @@ -9918,7 +10055,9 @@ export interface IngestPutPipelineRequest extends RequestBase { id: Id master_timeout?: Time timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + _meta?: Metadata description?: string on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] @@ -9929,46 +10068,47 @@ export interface IngestPutPipelineRequest extends RequestBase { export interface IngestPutPipelineResponse extends AcknowledgedResponseBase { } -export interface IngestSimulatePipelineDocument { +export interface IngestSimulateDocument { _id?: Id _index?: IndexName _source: any } -export interface IngestSimulatePipelineDocumentSimulation { +export interface IngestSimulateDocumentSimulation { _id: Id _index: IndexName - _ingest: IngestSimulatePipelineIngest + _ingest: IngestSimulateIngest _parent?: string _routing?: string _source: Record _type?: Type } -export interface IngestSimulatePipelineIngest { +export interface IngestSimulateIngest { timestamp: DateString pipeline?: Name } -export interface IngestSimulatePipelinePipelineSimulation { - doc?: IngestSimulatePipelineDocumentSimulation - processor_results?: IngestSimulatePipelinePipelineSimulation[] +export interface IngestSimulatePipelineSimulation { + doc?: IngestSimulateDocumentSimulation + processor_results?: IngestSimulatePipelineSimulation[] tag?: string processor_type?: string status?: WatcherActionStatusOptions } -export interface IngestSimulatePipelineRequest extends RequestBase { +export interface IngestSimulateRequest extends RequestBase { id?: Id verbose?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - docs?: IngestSimulatePipelineDocument[] + docs?: IngestSimulateDocument[] pipeline?: IngestPipeline } } -export interface IngestSimulatePipelineResponse { - docs: IngestSimulatePipelinePipelineSimulation[] +export interface IngestSimulateResponse { + docs: IngestSimulatePipelineSimulation[] } export interface LicenseLicense { @@ -10039,6 +10179,7 @@ export interface LicensePostAcknowledgement { export interface LicensePostRequest extends RequestBase { acknowledge?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { license?: LicenseLicense licenses?: LicenseLicense[] @@ -10073,63 +10214,85 @@ export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase type: LicenseLicenseType } -export interface LogstashPipelineDeleteRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface LogstashPipeline { + description: string + last_modified: Timestamp + pipeline_metadata: LogstashPipelineMetadata + username: string + pipeline: string + pipeline_settings: LogstashPipelineSettings } -export interface LogstashPipelineDeleteResponse { - stub: integer +export interface LogstashPipelineMetadata { + type: string + version: string } -export interface LogstashPipelineGetRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface LogstashPipelineSettings { + 'pipeline.workers': integer + 'pipeline.batch.size': integer + 'pipeline.batch.delay': integer + 'queue.type': string + 'queue.max_bytes.number': integer + 'queue.max_bytes.units': string + 'queue.checkpoint.writes': integer } -export interface LogstashPipelineGetResponse { - stub: integer +export interface LogstashDeletePipelineRequest extends RequestBase { + id: Id } -export interface LogstashPipelinePutRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export type LogstashDeletePipelineResponse = boolean + +export interface LogstashGetPipelineRequest extends RequestBase { + id: Ids } -export interface LogstashPipelinePutResponse { - stub: integer +export type LogstashGetPipelineResponse = Record + +export interface LogstashPutPipelineRequest extends RequestBase { + id: Id + /** @deprecated The use of the 'body' key has been deprecated, use 'pipeline' instead. */ + body?: LogstashPipeline } -export interface MigrationDeprecationInfoDeprecation { +export type LogstashPutPipelineResponse = boolean + +export interface MigrationDeprecationsDeprecation { details: string - level: MigrationDeprecationInfoDeprecationLevel + level: MigrationDeprecationsDeprecationLevel message: string url: string } -export type MigrationDeprecationInfoDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' +export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' -export interface MigrationDeprecationInfoRequest extends RequestBase { +export interface MigrationDeprecationsRequest extends RequestBase { index?: IndexName } -export interface MigrationDeprecationInfoResponse { - cluster_settings: MigrationDeprecationInfoDeprecation[] - index_settings: Record - node_settings: MigrationDeprecationInfoDeprecation[] - ml_settings: MigrationDeprecationInfoDeprecation[] +export interface MigrationDeprecationsResponse { + cluster_settings: MigrationDeprecationsDeprecation[] + index_settings: Record + node_settings: MigrationDeprecationsDeprecation[] + ml_settings: MigrationDeprecationsDeprecation[] } export interface MlAnalysisConfig { + bucket_span: TimeSpan + categorization_analyzer?: MlCategorizationAnalyzer | string + categorization_field_name?: Field + categorization_filters?: string[] + detectors: MlDetector[] + influencers?: Field[] + model_prune_window?: Time + latency?: Time + multivariate_by_fields?: boolean + per_partition_categorization?: MlPerPartitionCategorization + summary_count_field_name?: Field +} + +export interface MlAnalysisConfigRead { bucket_span: TimeSpan categorization_analyzer?: MlCategorizationAnalyzer | string categorization_field_name?: Field @@ -10365,24 +10528,24 @@ export interface MlDatafeedTimingStats { } export interface MlDataframeAnalysis { - dependent_variable: string - prediction_field_name?: Field alpha?: double - lambda?: double - gamma?: double + dependent_variable: string + downsample_factor?: double + early_stopping_enabled?: boolean eta?: double eta_growth_rate_per_tree?: double feature_bag_fraction?: double + feature_processors?: MlDataframeAnalysisFeatureProcessor[] + gamma?: double + lambda?: double + max_optimization_rounds_per_hyperparameter?: integer max_trees?: integer maximum_number_trees?: integer - soft_tree_depth_limit?: integer - soft_tree_depth_tolerance?: double - downsample_factor?: double - max_optimization_rounds_per_hyperparameter?: integer - early_stopping_enabled?: boolean num_top_feature_importance_values?: integer - feature_processors?: MlDataframeAnalysisFeatureProcessor[] + prediction_field_name?: Field randomize_seed?: double + soft_tree_depth_limit?: integer + soft_tree_depth_tolerance?: double training_percent?: Percentage } @@ -10399,9 +10562,9 @@ export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { } export interface MlDataframeAnalysisContainer { + classification?: MlDataframeAnalysisClassification outlier_detection?: MlDataframeAnalysisOutlierDetection regression?: MlDataframeAnalysisRegression - classification?: MlDataframeAnalysisClassification } export interface MlDataframeAnalysisFeatureProcessor { @@ -10444,10 +10607,10 @@ export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { } export interface MlDataframeAnalysisOutlierDetection { - n_neighbors?: integer - method?: string - feature_influence_threshold?: double compute_feature_influence?: boolean + feature_influence_threshold?: double + method?: string + n_neighbors?: integer outlier_fraction?: double standardization_enabled?: boolean } @@ -10490,8 +10653,8 @@ export interface MlDataframeAnalyticsMemoryEstimation { export interface MlDataframeAnalyticsSource { index: Indices query?: QueryDslQueryContainer - _source?: MlDataframeAnalysisAnalyzedFields runtime_mappings?: MappingRuntimeFields + _source?: MlDataframeAnalysisAnalyzedFields } export interface MlDataframeAnalyticsStatsContainer { @@ -10904,7 +11067,7 @@ export interface MlTrainedModelConfig { created_by?: string create_time?: Time default_field_map?: Record - description: string + description?: string estimated_heap_memory_usage_bytes?: integer estimated_operations?: integer inference_config: AggregationsInferenceConfigContainer @@ -10946,6 +11109,7 @@ export interface MlValidationLoss { export interface MlCloseJobRequest extends RequestBase { job_id: Id + allow_no_match?: boolean allow_no_jobs?: boolean force?: boolean timeout?: Time @@ -10999,9 +11163,10 @@ export interface MlDeleteDatafeedResponse extends AcknowledgedResponseBase { } export interface MlDeleteExpiredDataRequest extends RequestBase { - name?: Name + job_id?: Id requests_per_second?: float timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { requests_per_second?: float timeout?: Time @@ -11062,6 +11227,7 @@ export interface MlDeleteTrainedModelAliasResponse extends AcknowledgedResponseB } export interface MlEstimateModelMemoryRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { analysis_config?: MlAnalysisConfig max_bucket_cardinality?: Record @@ -11153,6 +11319,7 @@ export interface MlEvaluateDataFrameDataframeRegressionSummary { } export interface MlEvaluateDataFrameRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { evaluation: MlDataframeEvaluationContainer index: IndexName @@ -11168,6 +11335,7 @@ export interface MlEvaluateDataFrameResponse { export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { id?: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { source?: MlDataframeAnalyticsSource dest?: MlDataframeAnalyticsDestination @@ -11185,17 +11353,10 @@ export interface MlExplainDataFrameAnalyticsResponse { memory_estimation: MlDataframeAnalyticsMemoryEstimation } -export interface MlFindFileStructureRequest extends RequestBase { - stub: string -} - -export interface MlFindFileStructureResponse { - stub: string -} - export interface MlFlushJobRequest extends RequestBase { job_id: Id skip_time?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { advance_time?: DateString calc_interim?: boolean @@ -11209,41 +11370,19 @@ export interface MlFlushJobResponse { last_finalized_bucket_end?: integer } -export interface MlForecastJobRequest extends RequestBase { +export interface MlForecastRequest extends RequestBase { job_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { duration?: Time expires_in?: Time } } -export interface MlForecastJobResponse extends AcknowledgedResponseBase { +export interface MlForecastResponse extends AcknowledgedResponseBase { forecast_id: Id } -export interface MlGetAnomalyRecordsRequest extends RequestBase { - job_id: Id - exclude_interim?: boolean - from?: integer - size?: integer - start?: DateString - end?: DateString - body?: { - desc?: boolean - exclude_interim?: boolean - page?: MlPage - record_score?: double - sort?: Field - start?: DateString - end?: DateString - } -} - -export interface MlGetAnomalyRecordsResponse { - count: long - records: MlAnomaly[] -} - export interface MlGetBucketsRequest extends RequestBase { job_id: Id timestamp?: Timestamp @@ -11254,6 +11393,7 @@ export interface MlGetBucketsRequest extends RequestBase { desc?: boolean start?: DateString end?: DateString + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { anomaly_score?: double desc?: boolean @@ -11278,12 +11418,6 @@ export interface MlGetCalendarEventsRequest extends RequestBase { from?: integer start?: string size?: integer - body?: { - end?: DateString - from?: integer - start?: string - size?: integer - } } export interface MlGetCalendarEventsResponse { @@ -11301,6 +11435,7 @@ export interface MlGetCalendarsRequest extends RequestBase { calendar_id?: Id from?: integer size?: integer + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { page?: MlPage } @@ -11317,6 +11452,7 @@ export interface MlGetCategoriesRequest extends RequestBase { from?: integer size?: integer partition_field_value?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { page?: MlPage } @@ -11395,6 +11531,7 @@ export interface MlGetInfluencersRequest extends RequestBase { size?: integer sort?: Field start?: DateString + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { page?: MlPage } @@ -11436,6 +11573,7 @@ export interface MlGetModelSnapshotsRequest extends RequestBase { size?: integer sort?: Field start?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { start?: Time end?: Time @@ -11456,6 +11594,7 @@ export interface MlGetOverallBucketsRequest extends RequestBase { start?: Time exclude_interim?: boolean allow_no_match?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_no_jobs?: boolean } @@ -11466,6 +11605,30 @@ export interface MlGetOverallBucketsResponse { overall_buckets: MlOverallBucket[] } +export interface MlGetRecordsRequest extends RequestBase { + job_id: Id + exclude_interim?: boolean + from?: integer + size?: integer + start?: DateString + end?: DateString + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + desc?: boolean + exclude_interim?: boolean + page?: MlPage + record_score?: double + sort?: Field + start?: DateString + end?: DateString + } +} + +export interface MlGetRecordsResponse { + count: long + records: MlAnomaly[] +} + export interface MlGetTrainedModelsRequest extends RequestBase { model_id?: Id allow_no_match?: boolean @@ -11534,6 +11697,7 @@ export interface MlInfoResponse { export interface MlOpenJobRequest extends RequestBase { job_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { timeout?: Time } @@ -11544,7 +11708,8 @@ export interface MlOpenJobResponse { } export interface MlPostCalendarEventsRequest extends RequestBase { - calendar_id?: Id + calendar_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { events: MlCalendarEvent[] } @@ -11554,16 +11719,15 @@ export interface MlPostCalendarEventsResponse { events: MlCalendarEvent[] } -export interface MlPostJobDataRequest extends RequestBase { +export interface MlPostDataRequest extends RequestBase { job_id: Id reset_end?: DateString reset_start?: DateString - body?: { - data?: any[] - } + /** @deprecated The use of the 'body' key has been deprecated, use 'data' instead. */ + body?: TData[] } -export interface MlPostJobDataResponse { +export interface MlPostDataResponse { bucket_count: long earliest_record_timestamp: integer empty_bucket_count: long @@ -11591,6 +11755,7 @@ export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { id?: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig } @@ -11602,6 +11767,7 @@ export interface MlPreviewDataFrameAnalyticsResponse { export interface MlPreviewDatafeedRequest extends RequestBase { datafeed_id?: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { job_config?: MlJobConfig datafeed_config?: MlDatafeedConfig @@ -11614,6 +11780,7 @@ export interface MlPreviewDatafeedResponse { export interface MlPutCalendarRequest extends RequestBase { calendar_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { description?: string } @@ -11638,15 +11805,16 @@ export interface MlPutCalendarJobResponse { export interface MlPutDataFrameAnalyticsRequest extends RequestBase { id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - source?: MlDataframeAnalyticsSource - dest: MlDataframeAnalyticsDestination + allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields description?: string - model_memory_limit?: string + dest: MlDataframeAnalyticsDestination max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields - allow_lazy_start?: boolean + model_memory_limit?: string + source: MlDataframeAnalyticsSource } } @@ -11670,13 +11838,13 @@ export interface MlPutDatafeedRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time indices?: string[] - indexes?: string[] indices_options?: MlDatafeedIndicesOptions job_id?: Id max_empty_searches?: integer @@ -11707,6 +11875,7 @@ export interface MlPutDatafeedResponse { export interface MlPutFilterRequest extends RequestBase { filter_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { description?: string items?: string[] @@ -11721,6 +11890,7 @@ export interface MlPutFilterResponse { export interface MlPutJobRequest extends RequestBase { job_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_lazy_open?: boolean analysis_config: MlAnalysisConfig @@ -11742,7 +11912,7 @@ export interface MlPutJobRequest extends RequestBase { export interface MlPutJobResponse { allow_lazy_open: boolean - analysis_config: MlAnalysisConfig + analysis_config: MlAnalysisConfigRead analysis_limits: MlAnalysisLimits background_persist_interval?: Time create_time: DateString @@ -11763,19 +11933,101 @@ export interface MlPutJobResponse { results_retention_days?: long } +export interface MlPutTrainedModelAggregateOutput { + logistic_regression?: MlPutTrainedModelWeights + weighted_sum?: MlPutTrainedModelWeights + weighted_mode?: MlPutTrainedModelWeights + exponent?: MlPutTrainedModelWeights +} + +export interface MlPutTrainedModelDefinition { + preprocessors?: MlPutTrainedModelPreprocessor[] + trained_model: MlPutTrainedModelTrainedModel +} + +export interface MlPutTrainedModelEnsemble { + aggregate_output?: MlPutTrainedModelAggregateOutput + classification_labels?: string[] + feature_names?: string[] + target_type?: string + trained_models: MlPutTrainedModelTrainedModel[] +} + +export interface MlPutTrainedModelFrequencyEncodingPreprocessor { + field: string + feature_name: string + frequency_map: Record +} + +export interface MlPutTrainedModelInput { + field_names: Names +} + +export interface MlPutTrainedModelOneHotEncodingPreprocessor { + field: string + hot_map: Record +} + +export interface MlPutTrainedModelPreprocessor { + frequency_encoding?: MlPutTrainedModelFrequencyEncodingPreprocessor + one_hot_encoding?: MlPutTrainedModelOneHotEncodingPreprocessor + target_mean_encoding?: MlPutTrainedModelTargetMeanEncodingPreprocessor +} + export interface MlPutTrainedModelRequest extends RequestBase { - stub: string + model_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - stub?: string + compressed_definition?: string + definition?: MlPutTrainedModelDefinition + description?: string + inference_config: AggregationsInferenceConfigContainer + input: MlPutTrainedModelInput + metadata?: any + tags?: string[] } } -export interface MlPutTrainedModelResponse { - stub: boolean +export type MlPutTrainedModelResponse = MlTrainedModelConfig + +export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { + field: string + feature_name: string + target_map: Record + default_value: double +} + +export interface MlPutTrainedModelTrainedModel { + tree?: MlPutTrainedModelTrainedModelTree + tree_node?: MlPutTrainedModelTrainedModelTreeNode + ensemble?: MlPutTrainedModelEnsemble +} + +export interface MlPutTrainedModelTrainedModelTree { + classification_labels?: string[] + feature_names: string[] + target_type?: string + tree_structure: MlPutTrainedModelTrainedModelTreeNode[] +} + +export interface MlPutTrainedModelTrainedModelTreeNode { + decision_type?: string + default_left?: boolean + leaf_value?: double + left_child?: integer + node_index: integer + right_child?: integer + split_feature?: integer + split_gain?: integer + threshold?: double +} + +export interface MlPutTrainedModelWeights { + weights: double } export interface MlPutTrainedModelAliasRequest extends RequestBase { - model_alias: string + model_alias: Name model_id: Id reassign?: boolean } @@ -11794,6 +12046,7 @@ export interface MlResetJobResponse extends AcknowledgedResponseBase { export interface MlRevertModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { delete_intervening_results?: boolean } @@ -11823,6 +12076,7 @@ export interface MlStartDataFrameAnalyticsResponse extends AcknowledgedResponseB export interface MlStartDatafeedRequest extends RequestBase { datafeed_id: Id start?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { end?: Time start?: Time @@ -11850,6 +12104,7 @@ export interface MlStopDatafeedRequest extends RequestBase { datafeed_id: Id allow_no_match?: boolean force?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { force?: boolean timeout?: Time @@ -11862,6 +12117,7 @@ export interface MlStopDatafeedResponse { export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { description?: string model_memory_limit?: string @@ -11886,6 +12142,7 @@ export interface MlUpdateDataFrameAnalyticsResponse { export interface MlUpdateFilterRequest extends RequestBase { filter_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { add_items?: string[] description?: string @@ -11901,6 +12158,7 @@ export interface MlUpdateFilterResponse { export interface MlUpdateJobRequest extends RequestBase { job_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_lazy_open?: boolean analysis_limits?: MlAnalysisMemoryLimit @@ -11920,12 +12178,33 @@ export interface MlUpdateJobRequest extends RequestBase { } export interface MlUpdateJobResponse { - stub: boolean + allow_lazy_open: boolean + analysis_config: MlAnalysisConfigRead + analysis_limits: MlAnalysisLimits + background_persist_interval?: Time + create_time: EpochMillis + finished_time?: EpochMillis + custom_settings?: Record + daily_model_snapshot_retention_after_days: long + data_description: MlDataDescription + datafeed_config?: MlDatafeed + description?: string + groups?: string[] + job_id: Id + job_type: string + job_version: VersionString + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days: long + renormalization_window_days?: long + results_index_name: IndexName + results_retention_days?: long } export interface MlUpdateModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { description?: string retain?: boolean @@ -11948,14 +12227,8 @@ export interface MlUpgradeJobSnapshotResponse { completed: boolean } -export interface MlValidateDetectorRequest extends RequestBase { - body?: MlDetector -} - -export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { -} - -export interface MlValidateJobRequest extends RequestBase { +export interface MlValidateRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { job_id?: Id analysis_config?: MlAnalysisConfig @@ -11968,19 +12241,31 @@ export interface MlValidateJobRequest extends RequestBase { } } -export interface MlValidateJobResponse extends AcknowledgedResponseBase { +export interface MlValidateResponse extends AcknowledgedResponseBase { } -export interface MonitoringBulkRequest extends RequestBase { - stub_a: string - stub_b: string - body?: { - stub_c: string - } +export interface MlValidateDetectorRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, use 'detector' instead. */ + body?: MlDetector +} + +export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { +} + +export interface MonitoringBulkRequest extends RequestBase { + type?: string + system_id: string + system_api_version: string + interval: TimeSpan + /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ + body?: (BulkOperationContainer | TSource)[] } export interface MonitoringBulkResponse { - stub: integer + error?: ErrorCause + errors: boolean + ignored: boolean + took: long } export interface NodesAdaptiveSelection { @@ -12273,6 +12558,14 @@ export interface NodesInfoNodeInfoIngest { processors: NodesInfoNodeInfoIngestProcessor[] } +export interface NodesInfoNodeInfoIngestDownloader { + enabled: string +} + +export interface NodesInfoNodeInfoIngestInfo { + downloader: NodesInfoNodeInfoIngestDownloader +} + export interface NodesInfoNodeInfoIngestProcessor { type: string } @@ -12360,6 +12653,7 @@ export interface NodesInfoNodeInfoSettings { xpack?: NodesInfoNodeInfoXpack script?: NodesInfoNodeInfoScript search?: NodesInfoNodeInfoSearch + ingest?: NodesInfoNodeInfoSettingsIngest } export interface NodesInfoNodeInfoSettingsCluster { @@ -12384,6 +12678,43 @@ export interface NodesInfoNodeInfoSettingsHttpType { default: string } +export interface NodesInfoNodeInfoSettingsIngest { + attachment?: NodesInfoNodeInfoIngestInfo + append?: NodesInfoNodeInfoIngestInfo + csv?: NodesInfoNodeInfoIngestInfo + convert?: NodesInfoNodeInfoIngestInfo + date?: NodesInfoNodeInfoIngestInfo + date_index_name?: NodesInfoNodeInfoIngestInfo + dot_expander?: NodesInfoNodeInfoIngestInfo + enrich?: NodesInfoNodeInfoIngestInfo + fail?: NodesInfoNodeInfoIngestInfo + foreach?: NodesInfoNodeInfoIngestInfo + json?: NodesInfoNodeInfoIngestInfo + user_agent?: NodesInfoNodeInfoIngestInfo + kv?: NodesInfoNodeInfoIngestInfo + geoip?: NodesInfoNodeInfoIngestInfo + grok?: NodesInfoNodeInfoIngestInfo + gsub?: NodesInfoNodeInfoIngestInfo + join?: NodesInfoNodeInfoIngestInfo + lowercase?: NodesInfoNodeInfoIngestInfo + remove?: NodesInfoNodeInfoIngestInfo + rename?: NodesInfoNodeInfoIngestInfo + script?: NodesInfoNodeInfoIngestInfo + set?: NodesInfoNodeInfoIngestInfo + sort?: NodesInfoNodeInfoIngestInfo + split?: NodesInfoNodeInfoIngestInfo + trim?: NodesInfoNodeInfoIngestInfo + uppercase?: NodesInfoNodeInfoIngestInfo + urldecode?: NodesInfoNodeInfoIngestInfo + bytes?: NodesInfoNodeInfoIngestInfo + dissect?: NodesInfoNodeInfoIngestInfo + set_security_user?: NodesInfoNodeInfoIngestInfo + pipeline?: NodesInfoNodeInfoIngestInfo + drop?: NodesInfoNodeInfoIngestInfo + circle?: NodesInfoNodeInfoIngestInfo + inference?: NodesInfoNodeInfoIngestInfo +} + export interface NodesInfoNodeInfoSettingsNetwork { host: Host } @@ -12530,6 +12861,7 @@ export interface NodesReloadSecureSettingsNodeReloadExceptionCausedBy { export interface NodesReloadSecureSettingsRequest extends RequestBase { node_id?: NodeIds timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { secure_settings_password?: Password } @@ -12611,100 +12943,43 @@ export interface RollupTermsGrouping { fields: Fields } -export interface RollupCreateRollupJobRequest extends RequestBase { - id: Id - body?: { - cron?: string - groups?: RollupGroupings - index_pattern?: string - metrics?: RollupFieldMetric[] - page_size?: long - rollup_index?: IndexName - } -} - -export interface RollupCreateRollupJobResponse extends AcknowledgedResponseBase { -} - -export interface RollupDeleteRollupJobRequest extends RequestBase { +export interface RollupDeleteJobRequest extends RequestBase { id: Id } -export interface RollupDeleteRollupJobResponse extends AcknowledgedResponseBase { - task_failures?: RollupDeleteRollupJobTaskFailure[] +export interface RollupDeleteJobResponse extends AcknowledgedResponseBase { + task_failures?: RollupDeleteJobTaskFailure[] } -export interface RollupDeleteRollupJobTaskFailure { +export interface RollupDeleteJobTaskFailure { task_id: TaskId node_id: Id status: string - reason: RollupDeleteRollupJobTaskFailureReason + reason: RollupDeleteJobTaskFailureReason } -export interface RollupDeleteRollupJobTaskFailureReason { +export interface RollupDeleteJobTaskFailureReason { type: string reason: string } -export interface RollupGetRollupCapabilitiesRequest extends RequestBase { - id?: Id -} - -export interface RollupGetRollupCapabilitiesResponse extends DictionaryResponseBase { -} - -export interface RollupGetRollupCapabilitiesRollupCapabilities { - rollup_jobs: RollupGetRollupCapabilitiesRollupCapabilitySummary[] -} - -export interface RollupGetRollupCapabilitiesRollupCapabilitySummary { - fields: Record> - index_pattern: string - job_id: string - rollup_index: string -} - -export interface RollupGetRollupIndexCapabilitiesIndexCapabilities { - rollup_jobs: RollupGetRollupIndexCapabilitiesRollupJobSummary[] -} - -export interface RollupGetRollupIndexCapabilitiesRequest extends RequestBase { - index: Id -} - -export interface RollupGetRollupIndexCapabilitiesResponse extends DictionaryResponseBase { -} - -export interface RollupGetRollupIndexCapabilitiesRollupJobSummary { - fields: Record - index_pattern: string - job_id: Id - rollup_index: IndexName -} - -export interface RollupGetRollupIndexCapabilitiesRollupJobSummaryField { - agg: string - time_zone?: string - calendar_interval?: Time -} - -export type RollupGetRollupJobIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' +export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' -export interface RollupGetRollupJobRequest extends RequestBase { +export interface RollupGetJobsRequest extends RequestBase { id?: Id } -export interface RollupGetRollupJobResponse { - jobs: RollupGetRollupJobRollupJob[] +export interface RollupGetJobsResponse { + jobs: RollupGetJobsRollupJob[] } -export interface RollupGetRollupJobRollupJob { - config: RollupGetRollupJobRollupJobConfiguration - stats: RollupGetRollupJobRollupJobStats - status: RollupGetRollupJobRollupJobStatus +export interface RollupGetJobsRollupJob { + config: RollupGetJobsRollupJobConfiguration + stats: RollupGetJobsRollupJobStats + status: RollupGetJobsRollupJobStatus } -export interface RollupGetRollupJobRollupJobConfiguration { +export interface RollupGetJobsRollupJobConfiguration { cron: string groups: RollupGroupings id: Id @@ -12715,7 +12990,7 @@ export interface RollupGetRollupJobRollupJobConfiguration { timeout: Time } -export interface RollupGetRollupJobRollupJobStats { +export interface RollupGetJobsRollupJobStats { documents_processed: long index_failures: long index_time_in_ms: long @@ -12730,29 +13005,85 @@ export interface RollupGetRollupJobRollupJobStats { processing_total: long } -export interface RollupGetRollupJobRollupJobStatus { +export interface RollupGetJobsRollupJobStatus { current_position?: Record - job_state: RollupGetRollupJobIndexingJobState + job_state: RollupGetJobsIndexingJobState upgraded_doc_id?: boolean } -export interface RollupRollupRequest extends RequestBase { - stubb: integer - stuba: integer +export interface RollupGetRollupCapsRequest extends RequestBase { + id?: Id +} + +export interface RollupGetRollupCapsResponse extends DictionaryResponseBase { +} + +export interface RollupGetRollupCapsRollupCapabilities { + rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] +} + +export interface RollupGetRollupCapsRollupCapabilitySummary { + fields: Record> + index_pattern: string + job_id: string + rollup_index: string +} + +export interface RollupGetRollupIndexCapsIndexCapabilities { + rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[] +} + +export interface RollupGetRollupIndexCapsRequest extends RequestBase { + index: Id +} + +export interface RollupGetRollupIndexCapsResponse extends DictionaryResponseBase { +} + +export interface RollupGetRollupIndexCapsRollupJobSummary { + fields: Record + index_pattern: string + job_id: Id + rollup_index: IndexName +} + +export interface RollupGetRollupIndexCapsRollupJobSummaryField { + agg: string + time_zone?: string + calendar_interval?: Time +} + +export interface RollupPutJobRequest extends RequestBase { + id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - stub: integer + cron?: string + groups?: RollupGroupings + index_pattern?: string + metrics?: RollupFieldMetric[] + page_size?: long + rollup_index?: IndexName } } -export interface RollupRollupResponse { - stub: integer +export interface RollupPutJobResponse extends AcknowledgedResponseBase { +} + +export interface RollupRollupRequest extends RequestBase { + index: IndexName + rollup_index: IndexName + /** @deprecated The use of the 'body' key has been deprecated, use 'config' instead. */ + body?: any } +export type RollupRollupResponse = any + export interface RollupRollupSearchRequest extends RequestBase { index: Indices type?: Type rest_total_hits_as_int?: boolean typed_keys?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggs?: Record query?: QueryDslQueryContainer @@ -12769,24 +13100,26 @@ export interface RollupRollupSearchResponse { aggregations?: Record } -export interface RollupStartRollupJobRequest extends RequestBase { +export interface RollupStartJobRequest extends RequestBase { id: Id } -export interface RollupStartRollupJobResponse { +export interface RollupStartJobResponse { started: boolean } -export interface RollupStopRollupJobRequest extends RequestBase { +export interface RollupStopJobRequest extends RequestBase { id: Id timeout?: Time wait_for_completion?: boolean } -export interface RollupStopRollupJobResponse { +export interface RollupStopJobResponse { stopped: boolean } +export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' + export interface SearchableSnapshotsClearCacheRequest extends RequestBase { index?: Indices expand_wildcards?: ExpandWildcards @@ -12796,9 +13129,7 @@ export interface SearchableSnapshotsClearCacheRequest extends RequestBase { human?: boolean } -export interface SearchableSnapshotsClearCacheResponse { - stub: integer -} +export type SearchableSnapshotsClearCacheResponse = any export interface SearchableSnapshotsMountMountedSnapshot { snapshot: Name @@ -12812,6 +13143,7 @@ export interface SearchableSnapshotsMountRequest extends RequestBase { master_timeout?: Time wait_for_completion?: boolean storage?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { index: IndexName renamed_index?: IndexName @@ -12824,28 +13156,14 @@ export interface SearchableSnapshotsMountResponse { snapshot: SearchableSnapshotsMountMountedSnapshot } -export interface SearchableSnapshotsRepositoryStatsRequest extends RequestBase { - stub_a: integer - stub_b: integer - body?: { - stub_c: integer - } -} - -export interface SearchableSnapshotsRepositoryStatsResponse { - stub: integer -} - export interface SearchableSnapshotsStatsRequest extends RequestBase { - stub_a: integer - stub_b: integer - body?: { - stub_c: integer - } + index?: Indices + level?: SearchableSnapshotsStatsLevel } export interface SearchableSnapshotsStatsResponse { - stub: integer + stats: any + total: any } export interface SecurityApplicationGlobalUserPrivileges { @@ -12862,24 +13180,36 @@ export interface SecurityClusterNode { name: Name } +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' + export interface SecurityCreatedStatus { created: boolean } +export interface SecurityFieldRule { + username?: Name + dn?: Names + groups?: Names + metadata?: any + realm?: SecurityRealm +} + export interface SecurityFieldSecurity { except?: Fields grant: Fields } -export interface SecurityGlobalPrivileges { +export interface SecurityGlobalPrivilege { application: SecurityApplicationGlobalUserPrivileges } +export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' + export interface SecurityIndicesPrivileges { - field_security?: SecurityFieldSecurity + field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices - privileges: string[] - query?: string | QueryDslQueryContainer + privileges: SecurityIndexPrivilege[] + query?: string | string[] | QueryDslQueryContainer allow_restricted_indices?: boolean } @@ -12887,6 +13217,10 @@ export interface SecurityManageUserPrivileges { applications: string[] } +export interface SecurityRealm { + name: Name +} + export interface SecurityRealmInfo { name: Name type: string @@ -12896,10 +13230,15 @@ export interface SecurityRoleMapping { enabled: boolean metadata: Metadata roles: string[] - rules: SecurityRoleMappingRuleBase + rules: SecurityRoleMappingRule + role_templates?: SecurityGetRoleRoleTemplate[] } -export interface SecurityRoleMappingRuleBase { +export interface SecurityRoleMappingRule { + any?: SecurityRoleMappingRule[] + all?: SecurityRoleMappingRule[] + field?: SecurityFieldRule + except?: SecurityRoleMappingRule } export interface SecurityUser { @@ -12929,11 +13268,13 @@ export interface SecurityAuthenticateResponse { export interface SecurityAuthenticateToken { name: Name + type?: string } export interface SecurityChangePasswordRequest extends RequestBase { username?: Username refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { password?: Password } @@ -12943,7 +13284,7 @@ export interface SecurityChangePasswordResponse { } export interface SecurityClearApiKeyCacheRequest extends RequestBase { - ids?: Ids + ids: Ids } export interface SecurityClearApiKeyCacheResponse { @@ -12997,11 +13338,12 @@ export interface SecurityClearCachedServiceTokensResponse { export interface SecurityCreateApiKeyIndexPrivileges { names: Indices - privileges: string[] + privileges: SecurityIndexPrivilege[] } export interface SecurityCreateApiKeyRequest extends RequestBase { refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { expiration?: Time name?: Name @@ -13211,7 +13553,7 @@ export interface SecurityGetServiceAccountsResponse extends DictionaryResponseBa export interface SecurityGetServiceAccountsRoleDescriptor { cluster: string[] indices: SecurityIndicesPrivileges[] - global?: SecurityGlobalPrivileges[] + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] @@ -13222,17 +13564,25 @@ export interface SecurityGetServiceAccountsRoleDescriptorWrapper { role_descriptor: SecurityGetServiceAccountsRoleDescriptor } +export interface SecurityGetServiceCredentialsNodesCredentials { + _nodes: NodeStatistics + file_tokens: Record +} + +export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { + nodes: string[] +} + export interface SecurityGetServiceCredentialsRequest extends RequestBase { namespace: Namespace - service: Service + service: Name } export interface SecurityGetServiceCredentialsResponse { service_account: string - node_name: NodeName count: integer - tokens: Record - file_tokens: Record + tokens: Record + nodes_credentials: SecurityGetServiceCredentialsNodesCredentials } export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' @@ -13250,6 +13600,7 @@ export interface SecurityGetTokenAuthenticationProvider { } export interface SecurityGetTokenRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { grant_type?: SecurityGetTokenAccessTokenGrantType scope?: string @@ -13290,7 +13641,7 @@ export interface SecurityGetUserPrivilegesRequest extends RequestBase { export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[] cluster: string[] - global: SecurityGlobalPrivileges[] + global: SecurityGlobalPrivilege[] indices: SecurityIndicesPrivileges[] run_as: string[] } @@ -13304,6 +13655,7 @@ export interface SecurityGrantApiKeyApiKey { export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { api_key: SecurityGrantApiKeyApiKey grant_type: SecurityGrantApiKeyApiKeyGrantType @@ -13329,17 +13681,18 @@ export interface SecurityHasPrivilegesApplicationPrivilegesCheck { export type SecurityHasPrivilegesApplicationsPrivileges = Record export interface SecurityHasPrivilegesIndexPrivilegesCheck { - names: string[] - privileges: string[] + names: Indices + privileges: SecurityIndexPrivilege[] } export type SecurityHasPrivilegesPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { user?: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] - cluster?: string[] + cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] } } @@ -13355,6 +13708,7 @@ export interface SecurityHasPrivilegesResponse { } export interface SecurityInvalidateApiKeyRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { id?: Id ids?: Id[] @@ -13373,6 +13727,7 @@ export interface SecurityInvalidateApiKeyResponse { } export interface SecurityInvalidateTokenRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { token?: string refresh_token?: string @@ -13397,6 +13752,7 @@ export interface SecurityPutPrivilegesActions { export interface SecurityPutPrivilegesRequest extends RequestBase { refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, use 'privileges' instead. */ body?: Record> } @@ -13406,9 +13762,10 @@ export interface SecurityPutPrivilegesResponse extends DictionaryResponseBase indices?: SecurityIndicesPrivileges[] metadata?: Metadata @@ -13424,11 +13781,12 @@ export interface SecurityPutRoleResponse { export interface SecurityPutRoleMappingRequest extends RequestBase { name: Name refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { enabled?: boolean metadata?: Metadata roles?: string[] - rules?: SecurityRoleMappingRuleBase + rules?: SecurityRoleMappingRule run_as?: string[] } } @@ -13441,6 +13799,7 @@ export interface SecurityPutRoleMappingResponse { export interface SecurityPutUserRequest extends RequestBase { username: Username refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { username?: Username email?: string | null @@ -13458,39 +13817,61 @@ export interface SecurityPutUserResponse { } export interface ShutdownDeleteNodeRequest extends RequestBase { - body?: { - stub: string - } + node_id: NodeId } -export interface ShutdownDeleteNodeResponse { - stub: boolean +export interface ShutdownDeleteNodeResponse extends AcknowledgedResponseBase { +} + +export interface ShutdownGetNodeNodeShutdownStatus { + node_id: NodeId + type: ShutdownGetNodeShutdownType + reason: string + shutdown_startedmillis: EpochMillis + status: ShutdownGetNodeShutdownStatus + shard_migration: ShutdownGetNodeShardMigrationStatus + persistent_tasks: ShutdownGetNodePersistentTaskStatus + plugins: ShutdownGetNodePluginsStatus +} + +export interface ShutdownGetNodePersistentTaskStatus { + status: ShutdownGetNodeShutdownStatus +} + +export interface ShutdownGetNodePluginsStatus { + status: ShutdownGetNodeShutdownStatus } export interface ShutdownGetNodeRequest extends RequestBase { - body?: { - stub: string - } + node_id?: NodeIds } export interface ShutdownGetNodeResponse { - stub: boolean + nodes: ShutdownGetNodeNodeShutdownStatus[] +} + +export interface ShutdownGetNodeShardMigrationStatus { + status: ShutdownGetNodeShutdownStatus } +export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'stalled' | 'complete' + +export type ShutdownGetNodeShutdownType = 'remove' | 'restart' + export interface ShutdownPutNodeRequest extends RequestBase { - body?: { - stub: string - } + node_id: NodeId } -export interface ShutdownPutNodeResponse { - stub: boolean +export interface ShutdownPutNodeResponse extends AcknowledgedResponseBase { } export interface SlmConfiguration { ignore_unavailable?: boolean - include_global_state?: boolean indices: Indices + include_global_state?: boolean + feature_states?: string[] + metadata?: Metadata + partial?: boolean } export interface SlmInProgress { @@ -13602,6 +13983,9 @@ export interface SlmGetStatusResponse { export interface SlmPutLifecycleRequest extends RequestBase { policy_id: Name + master_timeout?: Time + timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { config?: SlmConfiguration name?: Name @@ -13699,6 +14083,7 @@ export interface SnapshotSnapshotInfo { index_details?: Record metadata?: Metadata reason?: string + repository?: Name snapshot: Name shards?: ShardStatistics start_time?: Time @@ -13762,6 +14147,7 @@ export interface SnapshotCloneRequest extends RequestBase { target_snapshot: Name master_timeout?: Time timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { indices: string } @@ -13775,6 +14161,7 @@ export interface SnapshotCreateRequest extends RequestBase { snapshot: Name master_timeout?: Time wait_for_completion?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { ignore_unavailable?: boolean include_global_state?: boolean @@ -13787,7 +14174,7 @@ export interface SnapshotCreateRequest extends RequestBase { export interface SnapshotCreateResponse { accepted?: boolean - snapshot?: SnapshotSnapshotInfo + snapshot: SnapshotSnapshotInfo } export interface SnapshotCreateRepositoryRequest extends RequestBase { @@ -13795,6 +14182,7 @@ export interface SnapshotCreateRepositoryRequest extends RequestBase { master_timeout?: Time timeout?: Time verify?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { repository?: SnapshotRepository type: string @@ -13831,11 +14219,14 @@ export interface SnapshotGetRequest extends RequestBase { verbose?: boolean index_details?: boolean human?: boolean + include_repository?: boolean } export interface SnapshotGetResponse { responses?: SnapshotGetSnapshotResponseItem[] snapshots?: SnapshotSnapshotInfo[] + total: integer + remaining: integer } export interface SnapshotGetSnapshotResponseItem { @@ -13858,6 +14249,7 @@ export interface SnapshotRestoreRequest extends RequestBase { snapshot: Name master_timeout?: Time wait_for_completion?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { ignore_index_settings?: string[] ignore_unavailable?: boolean @@ -13907,6 +14299,7 @@ export interface SnapshotVerifyRepositoryResponse { } export interface SqlClearCursorRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { cursor: string } @@ -13923,6 +14316,7 @@ export interface SqlQueryColumn { export interface SqlQueryRequest extends RequestBase { format?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { columnar?: boolean cursor?: string @@ -13945,6 +14339,7 @@ export interface SqlQueryResponse { export type SqlQueryRow = any[] export interface SqlTranslateRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { fetch_size?: integer filter?: QueryDslQueryContainer @@ -13960,7 +14355,7 @@ export interface SqlTranslateResponse { sort: SearchSort } -export interface SslGetCertificatesCertificateInformation { +export interface SslCertificatesCertificateInformation { alias?: string expiry: DateString format: string @@ -13970,27 +14365,27 @@ export interface SslGetCertificatesCertificateInformation { subject_dn: string } -export interface SslGetCertificatesRequest extends RequestBase { +export interface SslCertificatesRequest extends RequestBase { } -export type SslGetCertificatesResponse = SslGetCertificatesCertificateInformation[] +export type SslCertificatesResponse = SslCertificatesCertificateInformation[] -export interface TaskInfo { +export interface TasksInfo { action: string cancellable: boolean - children?: TaskInfo[] + children?: TasksInfo[] description?: string headers: HttpHeaders id: long node: string running_time_in_nanos: long start_time_in_millis: long - status?: TaskStatus + status?: TasksStatus type: string parent_task_id?: Id } -export interface TaskState { +export interface TasksState { action: string cancellable: boolean description?: string @@ -14000,11 +14395,11 @@ export interface TaskState { parent_task_id?: TaskId running_time_in_nanos: long start_time_in_millis: long - status?: TaskStatus + status?: TasksStatus type: string } -export interface TaskStatus { +export interface TasksStatus { batches: long canceled?: string created: long @@ -14024,11 +14419,11 @@ export interface TaskStatus { version_conflicts: long } -export interface TaskTaskExecutingNode extends SpecUtilsBaseNode { - tasks: Record +export interface TasksTaskExecutingNode extends SpecUtilsBaseNode { + tasks: Record } -export interface TaskCancelRequest extends RequestBase { +export interface TasksCancelRequest extends RequestBase { task_id?: TaskId actions?: string | string[] nodes?: string[] @@ -14036,25 +14431,25 @@ export interface TaskCancelRequest extends RequestBase { wait_for_completion?: boolean } -export interface TaskCancelResponse { +export interface TasksCancelResponse { node_failures?: ErrorCause[] - nodes: Record + nodes: Record } -export interface TaskGetRequest extends RequestBase { +export interface TasksGetRequest extends RequestBase { task_id: Id timeout?: Time wait_for_completion?: boolean } -export interface TaskGetResponse { +export interface TasksGetResponse { completed: boolean - task: TaskInfo - response?: TaskStatus + task: TasksInfo + response?: TasksStatus error?: ErrorCause } -export interface TaskListRequest extends RequestBase { +export interface TasksListRequest extends RequestBase { actions?: string | string[] detailed?: boolean group_by?: GroupBy @@ -14064,10 +14459,10 @@ export interface TaskListRequest extends RequestBase { wait_for_completion?: boolean } -export interface TaskListResponse { +export interface TasksListResponse { node_failures?: ErrorCause[] - nodes?: Record - tasks?: Record | TaskInfo[] + nodes?: Record + tasks?: Record | TasksInfo[] } export interface TextStructureFindStructureFieldStat { @@ -14097,6 +14492,7 @@ export interface TextStructureFindStructureRequest { timeout?: Time timestamp_field?: Field timestamp_format?: string + /** @deprecated The use of the 'body' key has been deprecated, use 'text_files' instead. */ body?: TJsonDocument[] } @@ -14138,7 +14534,7 @@ export interface TransformLatest { export interface TransformPivot { aggregations?: Record aggs?: Record - group_by: Record + group_by?: Record max_page_search_size?: integer } @@ -14259,6 +14655,8 @@ export interface TransformGetTransformStatsTransformStats { } export interface TransformPreviewTransformRequest extends RequestBase { + transform_id?: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { dest?: ReindexDestination description?: string @@ -14339,6 +14737,7 @@ export interface WatcherAction { transform?: TransformContainer index?: WatcherIndex logging?: WatcherLogging + webhook?: WatcherActionWebhook } export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' @@ -14354,6 +14753,11 @@ export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | ' export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' +export interface WatcherActionWebhook { + host: Host + port: integer +} + export type WatcherActions = Record export interface WatcherActivationState { @@ -14368,6 +14772,7 @@ export interface WatcherActivationStatus { } export interface WatcherAlwaysCondition { + [key: string]: never } export interface WatcherArrayCompareCondition { @@ -14410,8 +14815,7 @@ export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | ' export type WatcherConnectionScheme = 'http' | 'https' -export interface WatcherCronExpression extends WatcherScheduleBase { -} +export type WatcherCronExpression = string export interface WatcherDailySchedule { at: string[] | WatcherTimeOfDay @@ -14527,6 +14931,7 @@ export interface WatcherHttpInputResponseResult { export interface WatcherIndex { index: IndexName doc_id?: Id + refresh?: Refresh } export interface WatcherIndexResult { @@ -14543,9 +14948,9 @@ export interface WatcherIndexResultSummary { } export interface WatcherIndicesOptions { - allow_no_indices: boolean - expand_wildcards: ExpandWildcards - ignore_unavailable: boolean + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean ignore_throttled?: boolean } @@ -14559,8 +14964,9 @@ export interface WatcherInputContainer { export type WatcherInputType = 'http' | 'search' | 'simple' export interface WatcherLogging { - level: string + level?: string text: string + category?: string } export interface WatcherLoggingResult { @@ -14570,6 +14976,7 @@ export interface WatcherLoggingResult { export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' export interface WatcherNeverCondition { + [key: string]: never } export interface WatcherPagerDutyActionEventResult { @@ -14606,11 +15013,16 @@ export interface WatcherPagerDutyResult { export type WatcherQuantifier = 'some' | 'all' -export type WatcherResponseContentType = 'json' | 'yaml' | 'text' - -export interface WatcherScheduleBase { +export interface WatcherQueryWatch { + _id: Id + status?: WatcherWatchStatus + watch?: WatcherWatch + _primary_term?: integer + _seq_no?: SequenceNumber } +export type WatcherResponseContentType = 'json' | 'yaml' | 'text' + export interface WatcherScheduleContainer { cron?: WatcherCronExpression daily?: WatcherDailySchedule @@ -14804,6 +15216,7 @@ export interface WatcherDeleteWatchResponse { export interface WatcherExecuteWatchRequest extends RequestBase { id?: Id debug?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { action_modes?: Record alternative_input?: Record @@ -14853,6 +15266,7 @@ export interface WatcherPutWatchRequest extends RequestBase { if_primary_term?: long if_sequence_number?: long version?: VersionNumber + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { actions?: Record condition?: WatcherConditionContainer @@ -14873,15 +15287,19 @@ export interface WatcherPutWatchResponse { } export interface WatcherQueryWatchesRequest extends RequestBase { - stub_a: string - stub_b: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - stub_c: string + from?: integer + size?: integer + query?: QueryDslQueryContainer + sort?: SearchSort + search_after?: SearchSortResults } } export interface WatcherQueryWatchesResponse { - stub: integer + count: integer + watches: WatcherQueryWatch[] } export interface WatcherStartRequest extends RequestBase { @@ -15396,6 +15814,7 @@ export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter } export interface SpecUtilsAdditionalProperties { + [key: string]: never } export interface SpecUtilsCommonQueryParameters { @@ -15407,6 +15826,7 @@ export interface SpecUtilsCommonQueryParameters { } export interface SpecUtilsAdditionalProperty { + [key: string]: never } export interface SpecUtilsCommonCatQueryParameters { @@ -15419,3 +15839,6 @@ export interface SpecUtilsCommonCatQueryParameters { v?: boolean } +export interface SpecUtilsOverloadOf { + [key: string]: never +} diff --git a/test/acceptance/events-order.test.js b/test/acceptance/events-order.test.js deleted file mode 100644 index 335fd4ba8..000000000 --- a/test/acceptance/events-order.test.js +++ /dev/null @@ -1,463 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const intoStream = require('into-stream') -const { Connection, events } = require('../../index') -const { - TimeoutError, - ConnectionError, - ResponseError, - RequestAbortedError, - SerializationError, - DeserializationError -} = require('../../lib/errors') -const { - Client, - buildServer, - connection: { - MockConnection, - MockConnectionError, - MockConnectionTimeout, - buildMockConnection - } -} = require('../utils') - -test('No errors', t => { - t.plan(10) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.DESERIALIZATION, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.DESERIALIZATION) - }) - - client.on(events.RESPONSE, (err, request) => { - t.error(err) - t.equal(order.shift(), events.RESPONSE) - }) - - client.info((err, result) => { - t.error(err) - t.equal(order.length, 0) - }) -}) - -test('Connection error', t => { - t.plan(10) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionError, - maxRetries: 1 - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.REQUEST, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (_err, request) => { - t.fail('Should not be called') - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof ConnectionError) - t.equal(order.shift(), events.RESPONSE) - }) - - client.info((err, result) => { - t.ok(err instanceof ConnectionError) - t.equal(order.length, 0) - }) -}) - -test('TimeoutError error', t => { - t.plan(10) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionTimeout, - maxRetries: 1 - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.REQUEST, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (_err, request) => { - t.fail('Should not be called') - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof TimeoutError) - t.equal(order.shift(), events.RESPONSE) - }) - - client.info((err, result) => { - t.ok(err instanceof TimeoutError) - t.equal(order.length, 0) - }) -}) - -test('RequestAbortedError error', t => { - t.plan(8) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionTimeout, - maxRetries: 1 - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (_err, request) => { - t.fail('Should not be called') - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof RequestAbortedError) - t.equal(order.shift(), events.RESPONSE) - }) - - const request = client.info((err, result) => { - t.ok(err instanceof RequestAbortedError) - t.equal(order.length, 0) - }) - - request.abort() -}) - -test('ResponseError error (no retry)', t => { - t.plan(10) - - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 400, - body: { hello: 'world' } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxRetries: 1 - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.DESERIALIZATION, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.DESERIALIZATION) - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof ResponseError) - t.equal(order.shift(), events.RESPONSE) - }) - - client.info((err, result) => { - t.ok(err instanceof ResponseError) - t.equal(order.length, 0) - }) -}) - -test('ResponseError error (with retry)', t => { - t.plan(14) - - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 504, - body: { hello: 'world' } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxRetries: 1 - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.DESERIALIZATION, - events.REQUEST, - events.DESERIALIZATION, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.DESERIALIZATION) - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof ResponseError) - t.equal(order.shift(), events.RESPONSE) - }) - - client.info((err, result) => { - t.ok(err instanceof ResponseError) - t.equal(order.length, 0) - }) -}) - -test('Serialization Error', t => { - t.plan(6) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxRetries: 1 - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.ok(err instanceof SerializationError) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (_err, request) => { - t.fail('Should not be called') - }) - - client.on(events.RESPONSE, (_err, request) => { - t.fail('Should not be called') - }) - - const body = {} - body.o = body - client.index({ index: 'test', body }, (err, result) => { - t.ok(err instanceof SerializationError) - t.equal(order.length, 0) - }) -}) - -test('Deserialization Error', t => { - t.plan(10) - - class MockConnection extends Connection { - request (params, callback) { - const body = '{"hello":"wor' - const stream = intoStream(body) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': body.length, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxRetries: 1 - }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.DESERIALIZATION, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.DESERIALIZATION) - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof DeserializationError) - t.equal(order.shift(), events.RESPONSE) - }) - - client.info((err, result) => { - t.ok(err instanceof DeserializationError) - t.equal(order.length, 0) - }) -}) - -test('Socket destroyed while reading the body', t => { - t.plan(14) - - function handler (req, res) { - const body = JSON.stringify({ hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Content-Length', body.length + '') - res.write(body.slice(0, -5)) - setTimeout(() => { - res.socket.destroy() - }, 500) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ node: `http://localhost:${port}`, maxRetries: 1 }) - - const order = [ - events.SERIALIZATION, - events.REQUEST, - events.DESERIALIZATION, - events.REQUEST, - events.DESERIALIZATION, - events.RESPONSE - ] - - client.on(events.SERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.SERIALIZATION) - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.equal(order.shift(), events.REQUEST) - }) - - client.on(events.DESERIALIZATION, (err, request) => { - t.error(err) - t.equal(order.shift(), events.DESERIALIZATION) - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof ConnectionError) - t.equal(order.shift(), events.RESPONSE) - }) - - client.info((err, result) => { - t.ok(err instanceof ConnectionError) - t.equal(order.length, 0) - server.stop() - }) - }) -}) diff --git a/test/acceptance/observability.test.js b/test/acceptance/observability.test.js deleted file mode 100644 index df889f22c..000000000 --- a/test/acceptance/observability.test.js +++ /dev/null @@ -1,404 +0,0 @@ -'use strict' - -const { test } = require('tap') -const FakeTimers = require('@sinonjs/fake-timers') -const { Transport } = require('../../index') -const { - Client, - connection: { MockConnection, MockConnectionSniff } -} = require('../utils') -const noop = () => {} - -test('Request id', t => { - t.test('Default generateRequestId', t => { - const { generateRequestId } = Transport.internals - t.type(generateRequestId, 'function') - - const genReqId = generateRequestId() - t.type(genReqId, 'function') - - for (let i = 1; i <= 10; i++) { - t.equal(genReqId(), i) - } - - t.end() - }) - - t.test('Custom generateRequestId', t => { - t.plan(7) - - const options = { context: { winter: 'is coming' } } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - generateRequestId: function (requestParams, requestOptions) { - t.match(requestParams, { method: 'GET', path: '/' }) - t.match(requestOptions, options) - return 'custom-id' - } - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.equal(meta.request.id, 'custom-id') - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.equal(meta.request.id, 'custom-id') - }) - - client.info({}, options, t.error) - }) - - t.test('Custom request id in method options', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.equal(meta.request.id, 'custom-id') - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.equal(meta.request.id, 'custom-id') - }) - - client.info({}, { id: 'custom-id' }, t.error) - }) - - t.test('Sniff and correlation id', t => { - t.test('sniffOnStart - should autogenerate the id', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionSniff, - sniffOnStart: true - }) - - client.on('sniff', (err, { meta }) => { - t.error(err) - t.equal(meta.request.id, 1) - }) - }) - - t.test('sniffOnConnectionFault - should use the request id', t => { - t.plan(5) - - const client = new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9201/'], - Connection: MockConnectionSniff, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - client.on('request', (e, { meta }) => { - t.equal(meta.request.id, 'custom') - }) - - client.on('response', (e, { meta }) => { - t.equal(meta.request.id, 'custom') - }) - - client.on('sniff', (e, { meta }) => { - t.equal(meta.request.id, 'custom') - }) - - client.transport.request({ - path: '/500', - method: 'GET' - }, { - id: 'custom', - headers: { timeout: 'true' } - }, noop) - }) - - t.end() - }) - - t.test('Resurrect should use the same request id of the request that starts it', t => { - t.plan(2) - - const clock = FakeTimers.install({ toFake: ['Date'] }) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - const conn = client.connectionPool.getConnection() - client.connectionPool.markDead(conn) - clock.tick(1000 * 61) - - client.on('resurrect', (err, meta) => { - t.error(err) - t.equal(meta.request.id, 'custom') - clock.uninstall() - }) - - client.info({}, { id: 'custom' }, noop) - }) - - t.end() -}) - -test('Request context', t => { - t.test('no value', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.equal(meta.context, null) - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.equal(meta.context, null) - }) - - client.info(t.error) - }) - - t.test('custom value', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.same(meta.context, { winter: 'is coming' }) - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.same(meta.context, { winter: 'is coming' }) - }) - - client.info({}, { context: { winter: 'is coming' } }, t.error) - }) - - t.test('global value', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - context: { winter: 'is coming' } - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.same(meta.context, { winter: 'is coming' }) - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.same(meta.context, { winter: 'is coming' }) - }) - - client.info(t.error) - }) - - t.test('override global', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - context: { winter: 'is coming' } - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.same(meta.context, { winter: 'has come' }) - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.same(meta.context, { winter: 'has come' }) - }) - - client.info({}, { context: { winter: 'has come' } }, t.error) - }) - - t.end() -}) - -test('Client name', t => { - t.test('Property of the client instance', t => { - const client = new Client({ - node: 'http://localhost:9200/', - name: 'cluster' - }) - t.equal(client.name, 'cluster') - t.end() - }) - - t.test('Is present in the event metadata (as string)', t => { - t.plan(6) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - name: 'cluster' - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.equal(meta.name, 'cluster') - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.equal(meta.name, 'cluster') - }) - - client.info((err, { meta }) => { - t.error(err) - t.equal(meta.name, 'cluster') - }) - }) - - t.test('Is present in the event metadata (as symbol)', t => { - t.plan(6) - const symbol = Symbol('cluster') - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - name: symbol - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.equal(meta.name, symbol) - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.equal(meta.name, symbol) - }) - - client.info((err, { meta }) => { - t.error(err) - t.equal(meta.name, symbol) - }) - }) - - t.test('Sniff and client name', t => { - t.test('sniffOnStart', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionSniff, - sniffOnStart: true - }) - - client.on('sniff', (err, { meta }) => { - t.error(err) - t.equal(meta.name, 'elasticsearch-js') - }) - }) - - t.test('sniffOnConnectionFault', t => { - t.plan(5) - - const client = new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9201/'], - Connection: MockConnectionSniff, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - client.on('request', (e, { meta }) => { - t.equal(meta.name, 'elasticsearch-js') - }) - - client.on('response', (e, { meta }) => { - t.equal(meta.name, 'elasticsearch-js') - }) - - client.on('sniff', (e, { meta }) => { - t.equal(meta.name, 'elasticsearch-js') - }) - - client.transport.request({ - path: '/500', - method: 'GET' - }, { - headers: { timeout: 'true' } - }, noop) - }) - - t.end() - }) - - t.test('Resurrect should have the client name configured', t => { - t.plan(2) - - const clock = FakeTimers.install({ toFake: ['Date'] }) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - const conn = client.connectionPool.getConnection() - client.connectionPool.markDead(conn) - clock.tick(1000 * 61) - - client.on('resurrect', (err, meta) => { - t.error(err) - t.equal(meta.name, 'elasticsearch-js') - clock.uninstall() - }) - - client.info({}, { id: 'custom' }, noop) - }) - - t.test('Resurrect should have the client name configured (child client)', t => { - t.plan(2) - - const clock = FakeTimers.install({ toFake: ['Date'] }) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - const child = client.child({ - name: 'child-client' - }) - - const conn = client.connectionPool.getConnection() - client.connectionPool.markDead(conn) - clock.tick(1000 * 61) - - client.on('resurrect', (err, meta) => { - t.error(err) - t.equal(meta.name, 'child-client') - clock.uninstall() - }) - - child.info({}, { id: 'custom' }, noop) - }) - - t.end() -}) diff --git a/test/acceptance/product-check.test.js b/test/acceptance/product-check.test.js deleted file mode 100644 index 2ed7cec88..000000000 --- a/test/acceptance/product-check.test.js +++ /dev/null @@ -1,1348 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { Client, errors } = require('../../') -const { - connection: { - MockConnectionTimeout, - MockConnectionError, - buildMockConnection - } -} = require('../utils') - -test('No errors v8', t => { - t.plan(7) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) -}) - -test('Errors v8', t => { - t.plan(3) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) -}) - -test('No errors ≤v7.13', t => { - t.plan(7) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '7.13.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) -}) - -test('Errors ≤v7.13 (tagline)', t => { - t.plan(3) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '7.13.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'Other' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) -}) - -test('Errors ≤v7.13 (build flavor)', t => { - t.plan(5) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '7.13.0-SNAPSHOT', - build_flavor: 'other', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not a supported distribution of Elasticsearch') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not a supported distribution of Elasticsearch') - }) - - setTimeout(() => { - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not a supported distribution of Elasticsearch') - }) - }, 100) -}) - -test('No errors v6', t => { - t.plan(7) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '6.8.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) -}) - -test('Errors v6', t => { - t.plan(3) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '6.8.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'Other' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) -}) - -test('Auth error - 401', t => { - t.plan(9) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 401, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - security: 'exception' - } - } - } - }) - - process.on('warning', onWarning) - function onWarning (warning) { - t.equal(warning.name, 'ProductNotSupportedSecurityError') - t.equal(warning.message, 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') - } - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.statusCode, 401) - process.removeListener('warning', onWarning) - }) -}) - -test('Auth error - 403', t => { - t.plan(9) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 403, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - security: 'exception' - } - } - } - }) - - process.on('warning', onWarning) - function onWarning (warning) { - t.equal(warning.name, 'ProductNotSupportedSecurityError') - t.equal(warning.message, 'The client is unable to verify that the server is Elasticsearch due to security privileges on the server side. Some functionality may not be compatible if the server is running an unsupported product.') - } - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.statusCode, 403) - process.removeListener('warning', onWarning) - }) -}) - -test('500 error', t => { - t.plan(8) - - let count = 0 - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const MockConnection = buildMockConnection({ - onRequest (params) { - const req = requests.shift() - t.equal(req.method, params.method) - t.equal(req.path, params.path) - - if (count++ >= 1) { - return { - statusCode: 200, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } else { - return { - statusCode: 500, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - error: 'kaboom' - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, '{"error":"kaboom"}') - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) - }) -}) - -test('TimeoutError', t => { - t.plan(3) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionTimeout, - maxRetries: 0 - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'Request timed out') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'Request timed out') - }) -}) - -test('ConnectionError', t => { - t.plan(3) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionError, - maxRetries: 0 - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'Kaboom') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'Kaboom') - }) -}) - -test('Multiple subsequent calls, no errors', t => { - t.plan(15) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }, { - method: 'HEAD', - path: '/' - }, { - method: 'POST', - path: '/foo/_doc' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) - - client.ping((err, result) => { - t.error(err) - }) - - client.index({ - index: 'foo', - body: { - foo: 'bar' - } - }, (err, result) => { - t.error(err) - }) -}) - -test('Multiple subsequent calls, with errors', t => { - t.plan(7) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }, { - method: 'HEAD', - path: '/' - }, { - method: 'POST', - path: '/foo/_doc' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) - - client.ping((err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) - - client.index({ - index: 'foo', - body: { - foo: 'bar' - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) -}) - -test('Later successful call', t => { - t.plan(11) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) - - setTimeout(() => { - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) - }, 100) -}) - -test('Later errored call', t => { - t.plan(5) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) - - setTimeout(() => { - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) - }, 100) -}) - -test('Errors ≤v5', t => { - t.plan(3) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '5.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) -}) - -test('Bad info response', t => { - t.plan(3) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - const req = requests.shift() - if (req.method === 'GET') { - t.error(err) - } else { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - } - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) -}) - -test('No multiple checks with child clients', t => { - t.plan(11) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const requests = [{ - method: 'GET', - path: '/' - }, { - method: 'POST', - path: '/foo/_search' - }, { - method: 'POST', - path: '/foo/_search' - }] - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - t.error(err) - const req = requests.shift() - t.equal(event.meta.request.params.method, req.method) - t.equal(event.meta.request.params.path, req.path) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) - - setTimeout(() => { - const child = client.child() - child.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.error(err) - }) - }, 100) -}) - -test('Observability events should have all the expected properties', t => { - t.plan(5) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - tagline: 'You Know, for Search' - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (e, event) => { - t.ok(event.meta.request.params) - t.ok(event.meta.request.options) - }) - - client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.equal(err.message, 'The client noticed that the server is not Elasticsearch and we do not support this unknown product.') - }) -}) - -test('Abort a request while running the product check', t => { - t.plan(4) - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 200, - headers: { - 'x-elastic-product': 'Elasticsearch' - }, - body: { - name: '1ef419078577', - cluster_name: 'docker-cluster', - cluster_uuid: 'cQ5pAMvRRTyEzObH4L5mTA', - version: { - number: '8.0.0-SNAPSHOT', - build_flavor: 'default', - build_type: 'docker', - build_hash: '5fb4c050958a6b0b6a70a6fb3e616d0e390eaac3', - build_date: '2021-07-10T01:45:02.136546168Z', - build_snapshot: true, - lucene_version: '8.9.0', - minimum_wire_compatibility_version: '7.15.0', - minimum_index_compatibility_version: '7.0.0' - }, - tagline: 'You Know, for Search' - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, event) => { - if (event.meta.request.params.path.includes('search')) { - t.ok(err instanceof errors.RequestAbortedError) - } - }) - - // the response event won't be executed for the search - client.on('response', (err, event) => { - t.error(err) - t.equal(event.meta.request.params.path, '/') - }) - - const req = client.search({ - index: 'foo', - body: { - query: { - match_all: {} - } - } - }, (err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - }) - - setImmediate(() => req.abort()) -}) diff --git a/test/acceptance/proxy.test.js b/test/acceptance/proxy.test.js deleted file mode 100644 index 29dd6b2a3..000000000 --- a/test/acceptance/proxy.test.js +++ /dev/null @@ -1,149 +0,0 @@ -'use strict' - -// We are using self-signed certificates -process.env.NODE_TLS_REJECT_UNAUTHORIZED = 0 - -const { test } = require('tap') -const { - Client, - buildProxy: { - createProxy, - createSecureProxy, - createServer, - createSecureServer - } -} = require('../utils') - -test('http-http proxy support', async t => { - const server = await createServer() - const proxy = await createProxy() - server.on('request', (req, res) => { - t.equal(req.url, '/_cluster/health') - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - }) - - const client = new Client({ - node: `http://${server.address().address}:${server.address().port}`, - proxy: `http://${proxy.address().address}:${proxy.address().port}` - }) - - const response = await client.cluster.health() - t.same(response.body, { hello: 'world' }) - - server.close() - proxy.close() -}) - -test('http-https proxy support', async t => { - const server = await createSecureServer() - const proxy = await createProxy() - server.on('request', (req, res) => { - t.equal(req.url, '/_cluster/health') - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - }) - - const client = new Client({ - node: `https://${server.address().address}:${server.address().port}`, - proxy: `http://${proxy.address().address}:${proxy.address().port}` - }) - - const response = await client.cluster.health() - t.same(response.body, { hello: 'world' }) - - server.close() - proxy.close() -}) - -test('https-http proxy support', async t => { - const server = await createServer() - const proxy = await createSecureProxy() - server.on('request', (req, res) => { - t.equal(req.url, '/_cluster/health') - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - }) - - const client = new Client({ - node: `http://${server.address().address}:${server.address().port}`, - proxy: `https://${proxy.address().address}:${proxy.address().port}` - }) - - const response = await client.cluster.health() - t.same(response.body, { hello: 'world' }) - - server.close() - proxy.close() -}) - -test('https-https proxy support', async t => { - const server = await createSecureServer() - const proxy = await createSecureProxy() - server.on('request', (req, res) => { - t.equal(req.url, '/_cluster/health') - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - }) - - const client = new Client({ - node: `https://${server.address().address}:${server.address().port}`, - proxy: `https://${proxy.address().address}:${proxy.address().port}` - }) - - const response = await client.cluster.health() - t.same(response.body, { hello: 'world' }) - - server.close() - proxy.close() -}) - -test('http basic authentication', async t => { - const server = await createServer() - const proxy = await createProxy() - server.on('request', (req, res) => { - t.equal(req.url, '/_cluster/health') - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - }) - - proxy.authenticate = function (req, fn) { - fn(null, req.headers['proxy-authorization'] === `Basic ${Buffer.from('hello:world').toString('base64')}`) - } - - const client = new Client({ - node: `http://${server.address().address}:${server.address().port}`, - proxy: `http://hello:world@${proxy.address().address}:${proxy.address().port}` - }) - - const response = await client.cluster.health() - t.same(response.body, { hello: 'world' }) - - server.close() - proxy.close() -}) - -test('https basic authentication', async t => { - const server = await createSecureServer() - const proxy = await createProxy() - server.on('request', (req, res) => { - t.equal(req.url, '/_cluster/health') - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - }) - - proxy.authenticate = function (req, fn) { - fn(null, req.headers['proxy-authorization'] === `Basic ${Buffer.from('hello:world').toString('base64')}`) - } - - const client = new Client({ - node: `https://${server.address().address}:${server.address().port}`, - proxy: `http://hello:world@${proxy.address().address}:${proxy.address().port}` - }) - - const response = await client.cluster.health() - t.same(response.body, { hello: 'world' }) - - server.close() - proxy.close() -}) diff --git a/test/acceptance/resurrect.test.js b/test/acceptance/resurrect.test.js deleted file mode 100644 index c2e43a6c6..000000000 --- a/test/acceptance/resurrect.test.js +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const FakeTimers = require('@sinonjs/fake-timers') -const workq = require('workq') -const { Client, buildCluster } = require('../utils') -const { events } = require('../../index') - -/** - * The aim of this test is to verify how the resurrect logic behaves - * in a multi node situation. - * The `buildCluster` utility can boot an arbitrary number - * of nodes, that you can kill or spawn at your will. - * The resurrect API can be tested with its callback - * or by using the `resurrect` event (to handle automatically - * triggered resurrections). - */ - -test('Should execute the recurrect API with the ping strategy', t => { - t.plan(8) - - const clock = FakeTimers.install({ toFake: ['Date'] }) - const q = workq() - - buildCluster({ numberOfNodes: 2 }, cluster => { - const client = new Client({ - nodes: [{ - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[0]].url), - id: 'node0' - }, { - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[1]].url), - id: 'node1' - }], - maxRetries: 0 - }) - - client.on(events.RESURRECT, (err, meta) => { - t.error(err) - t.equal(meta.strategy, 'ping') - t.notOk(meta.isAlive) - t.equal(meta.connection.id, 'node0') - t.equal(meta.name, 'elasticsearch-js') - t.same(meta.request, { id: 2 }) - }) - - q.add((q, done) => { - cluster.kill('node0', done) - }) - - q.add((q, done) => { - client.info((err, result) => { - t.ok(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 61) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - t.teardown(() => { - clock.uninstall() - cluster.shutdown() - }) - }) -}) - -test('Resurrect a node and handle 502/3/4 status code', t => { - t.plan(15) - - const clock = FakeTimers.install({ toFake: ['Date'] }) - const q = workq() - - let count = 0 - function handler (req, res) { - res.statusCode = count++ < 2 ? 502 : 200 - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildCluster({ handler, numberOfNodes: 2 }, ({ nodes, shutdown }) => { - const client = new Client({ - nodes: [{ - url: new URL(nodes[Object.keys(nodes)[0]].url), - id: 'node0' - }, { - url: new URL(nodes[Object.keys(nodes)[1]].url), - id: 'node1' - }], - maxRetries: 0 - }) - - let idCount = 2 - client.on(events.RESURRECT, (err, meta) => { - t.error(err) - t.equal(meta.strategy, 'ping') - t.equal(meta.connection.id, 'node0') - t.equal(meta.name, 'elasticsearch-js') - t.same(meta.request, { id: idCount++ }) - if (count < 4) { - t.notOk(meta.isAlive) - } else { - t.ok(meta.isAlive) - } - }) - - q.add((q, done) => { - client.info((err, result) => { - t.ok(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 61) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 10 * 60) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - t.teardown(() => { - clock.uninstall() - shutdown() - }) - }) -}) - -test('Should execute the recurrect API with the optimistic strategy', t => { - t.plan(8) - - const clock = FakeTimers.install({ toFake: ['Date'] }) - const q = workq() - - buildCluster({ numberOfNodes: 2 }, cluster => { - const client = new Client({ - nodes: [{ - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[0]].url), - id: 'node0' - }, { - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[1]].url), - id: 'node1' - }], - maxRetries: 0, - resurrectStrategy: 'optimistic' - }) - - client.on(events.RESURRECT, (err, meta) => { - t.error(err) - t.equal(meta.strategy, 'optimistic') - t.ok(meta.isAlive) - t.equal(meta.connection.id, 'node0') - t.equal(meta.name, 'elasticsearch-js') - t.same(meta.request, { id: 2 }) - }) - - q.add((q, done) => { - cluster.kill('node0', done) - }) - - q.add((q, done) => { - client.info((err, result) => { - t.ok(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 61) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - t.teardown(() => { - clock.uninstall() - cluster.shutdown() - }) - }) -}) diff --git a/test/acceptance/sniff.test.js b/test/acceptance/sniff.test.js deleted file mode 100644 index d18e8a2a9..000000000 --- a/test/acceptance/sniff.test.js +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const FakeTimers = require('@sinonjs/fake-timers') -const workq = require('workq') -const { Client, buildCluster } = require('../utils') -const { Connection, Transport, events, errors } = require('../../index') - -/** - * The aim of this test is to verify how the sniffer behaves - * in a multi node situation. - * The `buildCluster` utility can boot an arbitrary number - * of nodes, that you can kill or spawn at your will. - * The sniffer component can be tested with its callback - * or by using the `sniff` event (to handle automatically - * triggered sniff). - */ - -test('Should update the connection pool', t => { - t.plan(10) - - buildCluster(({ nodes, shutdown }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url - }) - t.equal(client.connectionPool.size, 1) - - client.on(events.SNIFF, (err, request) => { - t.error(err) - t.equal( - request.meta.sniff.reason, - Transport.sniffReasons.DEFAULT - ) - }) - - // run the sniffer - client.transport.sniff((err, hosts) => { - t.error(err) - t.equal(hosts.length, 4) - - const ids = Object.keys(nodes) - for (let i = 0; i < hosts.length; i++) { - const id = ids[i] - // the first node will be an update of the existing one - if (id === 'node0') { - t.same(hosts[i], { - url: new URL(nodes[id].url), - id: id, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - } else { - t.same(hosts[i], { - url: new URL(nodes[id].url), - id: id, - caFingerprint: null, - roles: { - master: true, - data: true, - ingest: true, - ml: false - }, - ssl: null, - agent: null, - proxy: null - }) - } - } - - t.equal(client.connectionPool.size, 4) - }) - t.teardown(shutdown) - }) -}) - -test('Should handle hostnames in publish_address', t => { - t.plan(10) - - buildCluster({ hostPublishAddress: true }, ({ nodes, shutdown }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url - }) - t.equal(client.connectionPool.size, 1) - - client.on(events.SNIFF, (err, request) => { - t.error(err) - t.equal( - request.meta.sniff.reason, - Transport.sniffReasons.DEFAULT - ) - }) - - // run the sniffer - client.transport.sniff((err, hosts) => { - t.error(err) - t.equal(hosts.length, 4) - - for (let i = 0; i < hosts.length; i++) { - // the first node will be an update of the existing one - t.equal(hosts[i].url.hostname, 'localhost') - } - - t.equal(client.connectionPool.size, 4) - }) - t.teardown(shutdown) - }) -}) - -test('Sniff interval', t => { - t.plan(11) - const clock = FakeTimers.install({ toFake: ['Date'] }) - const q = workq() - - buildCluster(({ nodes, shutdown, kill }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url, - sniffInterval: 50 - }) - - // this event will be triggered by api calls - client.on(events.SNIFF, (err, request) => { - t.error(err) - const { hosts, reason } = request.meta.sniff - t.equal( - client.connectionPool.size, - hosts.length - ) - t.equal(reason, Transport.sniffReasons.SNIFF_INTERVAL) - }) - - t.equal(client.connectionPool.size, 1) - - q.add((q, done) => { - clock.tick(51) - client.info(err => { - t.error(err) - waitSniffEnd(() => { - t.equal(client.connectionPool.size, 4) - done() - }) - }) - }) - - q.add((q, done) => { - kill('node1', done) - }) - - q.add((q, done) => { - clock.tick(51) - client.info(err => { - t.error(err) - waitSniffEnd(() => { - t.equal(client.connectionPool.size, 3) - done() - }) - }) - }) - - t.teardown(shutdown) - - // it can happen that the sniff operation resolves - // after the API call that trioggered it, so to - // be sure that we are checking the connectionPool size - // at the right moment, we verify that the transport - // is no longer sniffing - function waitSniffEnd (callback) { - if (client.transport._isSniffing) { - setTimeout(waitSniffEnd, 500, callback) - } else { - callback() - } - } - }) -}) - -test('Sniff on start', t => { - t.plan(4) - - buildCluster(({ nodes, shutdown, kill }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url, - sniffOnStart: true - }) - - client.on(events.SNIFF, (err, request) => { - t.error(err) - const { hosts, reason } = request.meta.sniff - t.equal( - client.connectionPool.size, - hosts.length - ) - t.equal(reason, Transport.sniffReasons.SNIFF_ON_START) - }) - - t.equal(client.connectionPool.size, 1) - t.teardown(shutdown) - }) -}) - -test('Should not close living connections', t => { - t.plan(3) - - buildCluster(({ nodes, shutdown, kill }) => { - class MyConnection extends Connection { - close () { - t.fail('Should not be called') - } - } - - const client = new Client({ - node: { - url: new URL(nodes[Object.keys(nodes)[0]].url), - id: 'node1' - }, - Connection: MyConnection - }) - - t.equal(client.connectionPool.size, 1) - client.transport.sniff((err, hosts) => { - t.error(err) - t.equal( - client.connectionPool.size, - hosts.length - ) - }) - - t.teardown(shutdown) - }) -}) - -test('Sniff on connection fault', t => { - t.plan(5) - - buildCluster(({ nodes, shutdown, kill }) => { - class MyConnection extends Connection { - request (params, callback) { - if (this.id === 'http://localhost:9200/') { - callback(new errors.ConnectionError('kaboom'), null) - return {} - } else { - return super.request(params, callback) - } - } - } - - const client = new Client({ - nodes: [ - 'http://localhost:9200/', - nodes[Object.keys(nodes)[0]].url - ], - maxRetries: 0, - sniffOnConnectionFault: true, - Connection: MyConnection - }) - - t.equal(client.connectionPool.size, 2) - // this event will be triggered by the connection fault - client.on(events.SNIFF, (err, request) => { - t.error(err) - const { hosts, reason } = request.meta.sniff - t.equal( - client.connectionPool.size, - hosts.length - ) - t.equal(reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) - }) - - client.info((err, result) => { - t.ok(err instanceof errors.ConnectionError) - }) - - t.teardown(shutdown) - }) -}) diff --git a/test/benchmarks/macro/complex.bench.js b/test/benchmarks/macro/complex.bench.js deleted file mode 100644 index fedc5a1ea..000000000 --- a/test/benchmarks/macro/complex.bench.js +++ /dev/null @@ -1,101 +0,0 @@ -'use strict' - -// This file must be run with --max-old-space-size=8192 -// because we need more than 1Gb of memory -// eg: node --max-old-space-size=8192 complex.bench.js - -const { Client } = require('../../../index') -const { statSync, createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const { bench, beforeEach, afterEach } = require('../suite')({ - report: { - url: process.env.ES_RESULT_CLUSTER_URL, - username: process.env.ES_RESULT_CLUSTER_USERNAME, - password: process.env.ES_RESULT_CLUSTER_PASSWORD - } -}) - -let stackoverflow = [] -const stackoverflowPath = join( - __dirname, - 'fixtures', - 'stackoverflow.json' -) -const stackoverflowInfo = { - name: 'stackoverflow.json', - size: statSync(join(stackoverflowPath)).size, - num_documents: 2000000 -} - -const INDEX = 'stackoverflow' -const node = process.env.ELASTICSEARCH_URL || 'http://localhost:9200/' - -const client = new Client({ node }) - -beforeEach(async b => { - if (stackoverflow.length === 0) { - stackoverflow = await readSOfile() - } - b.client = client - await b.client.indices.delete({ index: 'test-*' }) -}) - -afterEach(async b => { - await b.client.indices.delete({ index: 'test-*' }) -}) - -bench('Bulk index documents', { - warmup: 1, - measure: 1, - iterations: 1, - dataset: stackoverflowInfo, - action: 'bulk' -}, async b => { - b.start() - for (let i = 0; i < stackoverflow.length; i++) { - await b.client.bulk({ body: stackoverflow[i] }) - } - b.end() -}) - -bench('Complex search request', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: stackoverflowInfo, - action: 'search' -}, async b => { - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.search({ - index: INDEX, - body: { - query: { - match: { title: 'safe' } - } - } - }) - } - b.end() -}) - -function readSOfile () { - let i = 0 - const stackoverflow = [] - return new Promise((resolve, reject) => { - createReadStream(stackoverflowPath) - .pipe(split(JSON.parse)) - .on('data', chunk => { - stackoverflow[i] = stackoverflow[i] || [] - stackoverflow[i].push({ index: { _index: INDEX } }) - stackoverflow[i].push(chunk) - // 10k documents - if (stackoverflow[i].length >= 10000 * 2) { - i++ - } - }) - .on('error', reject) - .on('end', () => resolve(stackoverflow)) - }) -} diff --git a/test/benchmarks/macro/simple.bench.js b/test/benchmarks/macro/simple.bench.js deleted file mode 100644 index f8c735bf7..000000000 --- a/test/benchmarks/macro/simple.bench.js +++ /dev/null @@ -1,269 +0,0 @@ -'use strict' - -const { Client } = require('../../../index') -const { statSync } = require('fs') -const { join } = require('path') -const { bench, beforeEach, afterEach } = require('../suite')({ - report: { - url: process.env.ES_RESULT_CLUSTER_URL, - username: process.env.ES_RESULT_CLUSTER_USERNAME, - password: process.env.ES_RESULT_CLUSTER_PASSWORD - } -}) - -const node = process.env.ELASTICSEARCH_URL || 'http://localhost:9200/' - -const smallDocument = require('./fixtures/small_document.json') -const smallDocumentInfo = { - name: 'small_document.json', - size: statSync(join(__dirname, 'fixtures', 'small_document.json')).size, - num_documents: 1 -} -const largeDocument = require('./fixtures/large_document.json') -const largeDocumentInfo = { - name: 'large_document.json', - size: statSync(join(__dirname, 'fixtures', 'large_document.json')).size, - num_documents: 1 -} - -const client = new Client({ node }) - -beforeEach(async b => { - b.client = client - await b.client.indices.delete({ index: 'test-*' }) -}) - -afterEach(async b => { - await b.client.indices.delete({ index: 'test-*' }) -}) - -bench('Ping', { - warmup: 3, - measure: 5, - iterations: 100, - action: 'ping' -}, async b => { - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.ping() - } - b.end() -}) - -bench('Create index', { - warmup: 3, - measure: 5, - iterations: 10, - action: 'indices.create' -}, async b => { - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.indices.create({ index: `test-create-${i}` }) - } - b.end() -}) - -bench('Index small document', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: smallDocumentInfo, - action: 'create' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.create({ - index, - type: '_doc', - id: i + now, - body: smallDocument - }) - } - b.end() -}) - -bench('Index large document', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: largeDocumentInfo, - action: 'create' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.create({ - index, - type: '_doc', - id: i + now, - body: largeDocument - }) - } - b.end() -}) - -bench('Get small document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: smallDocumentInfo, - action: 'get' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - body: smallDocument - }) - - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.get({ - index, - type: '_doc', - id: now - }) - } - b.end() -}) - -bench('Get large document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: largeDocumentInfo, - action: 'get' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - body: largeDocument - }) - - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.get({ - index, - type: '_doc', - id: now - }) - } - b.end() -}) - -bench('Search small document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: smallDocumentInfo, - action: 'search' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - refresh: true, - body: smallDocument - }) - - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.search({ - index, - type: '_doc', - body: { - query: { - match: { cuisine: 'mexican' } - } - } - }) - } - b.end() -}) - -bench('Search large document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: largeDocumentInfo, - action: 'search' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - refresh: true, - body: largeDocument - }) - - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.search({ - index, - type: '_doc', - body: { - query: { - match: { 'user.lang': 'en' } - } - } - }) - } - b.end() -}) - -bench('Update small document', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: smallDocumentInfo, - action: 'update' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - refresh: true, - body: smallDocument - }) - - b.start() - for (let i = 0; i < b.iterations; i++) { - await b.client.update({ - index, - type: '_doc', - id: now, - body: { - doc: { cuisine: 'italian' + i } - } - }) - } - b.end() -}) diff --git a/test/benchmarks/micro/basic.bench.js b/test/benchmarks/micro/basic.bench.js deleted file mode 100644 index 271a02ff4..000000000 --- a/test/benchmarks/micro/basic.bench.js +++ /dev/null @@ -1,98 +0,0 @@ -'use strict' - -const { bench } = require('../suite')({ - report: { - url: process.env.ES_RESULT_CLUSTER_URL, - username: process.env.ES_RESULT_CLUSTER_USERNAME, - password: process.env.ES_RESULT_CLUSTER_PASSWORD - } -}) -const { Client } = require('../../../index') -const { connection } = require('../../utils') - -bench('Initialization', { warmup: 5, measure: 10, iterations: 1000 }, async b => { - b.start() - for (let i = 0; i < b.iterations; i++) { - const client = new Client({ // eslint-disable-line - node: 'http://localhost:9200/' - }) - } - b.end() -}) - -bench('Call api with lazy loading', { warmup: 5, measure: 10 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - b.start() - await client.info() - b.end() -}) - -bench('Call api without lazy loading', { warmup: 5, measure: 10 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - await client.info() - b.start() - await client.info() - b.end() -}) - -bench('Basic get', { warmup: 5, measure: 10, iterations: 1000 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - // we run the method twice to skip the lazy loading overhead - await client.search({ - index: 'test', - type: 'doc', - q: 'foo:bar' - }) - b.start() - for (let i = 0; i < b.iterations; i++) { - await client.search({ - index: 'test', - type: 'doc', - q: 'foo:bar' - }) - } - b.end() -}) - -bench('Basic post', { warmup: 5, measure: 10, iterations: 1000 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - // we run the method twice to skip the lazy loading overhead - await client.search({ - index: 'test', - type: 'doc', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - b.start() - for (let i = 0; i < b.iterations; i++) { - await client.search({ - index: 'test', - type: 'doc', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - } - b.end() -}) diff --git a/test/benchmarks/suite.js b/test/benchmarks/suite.js deleted file mode 100644 index 8fe575556..000000000 --- a/test/benchmarks/suite.js +++ /dev/null @@ -1,272 +0,0 @@ -'use strict' - -const { Client } = require('../../index') -const clientVersion = require('../../package.json').version -const { EventEmitter } = require('events') -const os = require('os') -const dezalgo = require('dezalgo') -const convertHrtime = require('convert-hrtime') -const Git = require('simple-git/promise') -const workq = require('workq') -const dedent = require('dedent') -const ss = require('simple-statistics') - -function buildBenchmark (options = {}) { - const q = workq() - const stats = {} - const reports = [] - let beforeEach = null - let afterEach = null - let setup = null - let teardown = null - - function setBeforeEach (fn) { - beforeEach = fn - } - - function setAfterEach (fn) { - afterEach = fn - } - - function setSetup (fn) { - setup = fn - } - - function setTeardown (fn) { - teardown = fn - } - - function runSetup (q, done) { - if (setup !== null) { - setup(() => { - setup = null - done() - }) - } else { - done() - } - } - - function benchmark (title, opts, fn) { - if (fn == null) { - fn = opts - opts = {} - } - - stats[title] = [] - let { measure, warmup } = opts - const b = new B({ iterations: opts.iterations }) - - q.add(runSetup) - q.add(runBenchmark) - q.add(elaborateStats) - - // Task that runs the benchmark and collects the stats - function runBenchmark (q, done) { - b.comment(`\n# ${title}`) - b.once('fail', err => { - b.comment(err) - if (b.client) { - b.client.close(done) - } else { - done() - } - }) - - process.nextTick(run) - async function run () { - if (beforeEach) { - try { - await beforeEach(b) - } catch (err) { - b.comment('Error: beforeEach hook has failed') - return b.fail(err) - } - } - - try { - await fn(b) - } catch (err) { - return b.fail(err) - } - - if (afterEach) { - try { - await afterEach(b) - } catch (err) { - b.comment('Error: afterEach hook has failed') - return b.fail(err) - } - } - - // still need to warmup - if (warmup-- > 0) { - process.nextTick(run) - // save the actual measure - } else if (measure-- > 0) { - stats[title].push(convertHrtime(b.time)) - process.nextTick(run) - // calculate the statistics - } else { - done() - } - } - } - - // task that elaborate the collected stats - async function elaborateStats (q) { - const times = stats[title].map(s => s.milliseconds / b.iterations) - reports.push({ - description: title, - action: opts.action, - category: opts.category || 'simple', - dataset: opts.dataset || null, - stats: { - mean: ss.mean(times), - median: ss.median(times), - min: ss.min(times), - max: ss.max(times), - standard_deviation: ss.standardDeviation(times) - }, - repetitions: { - measured: opts.measure, - warmup: opts.warmup, - iterations: opts.iterations - } - }) - - if (b.client) { - const { body } = await b.client.nodes.stats({ metric: 'http,jvm,os' }) - const esStats = body.nodes[Object.keys(body.nodes)[0]] - b.comment(dedent` - mean: ${ss.mean(times)} ms - median: ${ss.median(times)} ms - min: ${ss.min(times)} ms - max: ${ss.max(times)} ms - standard deviation: ${ss.standardDeviation(times)} - http total connections: ${esStats.http.total_opened} - jvm heap used: ${esStats.jvm.mem.heap_used_percent}% - `) - } else { - b.comment(dedent` - mean: ${ss.mean(times)} ms - median: ${ss.median(times)} ms - min: ${ss.min(times)} ms - max: ${ss.max(times)} ms - standard deviation: ${ss.standardDeviation(times)} - `) - } - } - } - - q.drain(done => { - if (teardown) { - teardown(done) - } else { - done() - } - if (options.report && options.report.url) { - sendReport() - } - }) - - async function sendReport () { - const client = new Client({ - node: { - url: new URL(options.report.url), - username: options.report.username, - password: options.report.password - } - }) - const git = Git(__dirname) - const commit = await git.log(['-1']) - const branch = await git.revparse(['--abbrev-ref', 'HEAD']) - const { body: esInfo } = await client.info() - const { body: esNodes } = await client.nodes.stats({ metric: 'os' }) - - const results = reports.map(report => { - return { - '@timestamp': new Date(), - event: { - description: report.description, - category: report.category, - action: report.action, - duration: 0, - statistics: report.stats, - repetitions: report.repetitions, - dataset: (report.dataset && report.dataset.name) || null, - dataset_details: { - size: (report.dataset && report.dataset.size) || 0, - num_documents: (report.dataset && report.dataset.num_documents) || 0 - } - }, - agent: { - version: clientVersion, - name: '@elastic/elasticsearch-js', - git: { - branch: branch.slice(0, -1), - sha: commit.latest.hash, - commit_message: commit.latest.message, - repository: 'elasticsearch-js' - }, - language: { - version: process.version - }, - os: { - platform: `${os.platform()} ${os.release()}`, - type: os.type(), - architecture: os.arch() - } - }, - server: { - version: esInfo.version.number, - nodes_info: esNodes - } - } - }) - - for (let i = 0; i < results.length; i++) { - await client.index({ - index: 'benchmarking_results', - type: '_doc', - body: results[i] - }) - } - } - - return { - bench: dezalgo(benchmark), - beforeEach: setBeforeEach, - afterEach: setAfterEach, - setup: setSetup, - teardown: setTeardown - } -} - -class B extends EventEmitter { - constructor (opts) { - super() - this.begin = 0 - this.time = 0 - this.iterations = opts.iterations || 1 - this.client = null - } - - start () { - this.begin = process.hrtime() - } - - end () { - this.time = process.hrtime(this.begin) - } - - fail (err) { - this.emit('fail', err) - } - - comment (...args) { - console.log(...args) - } -} - -module.exports = buildBenchmark diff --git a/test/bundlers/parcel-test/index.js b/test/bundlers/parcel-test/index.js deleted file mode 100644 index 792b4b873..000000000 --- a/test/bundlers/parcel-test/index.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { Client } = require('../../../index') -const client = new Client({ node: 'http://localhost:9200/' }) -client.info((err, result) => { - process.exit(err ? 1 : 0) -}) diff --git a/test/bundlers/parcel-test/package.json b/test/bundlers/parcel-test/package.json deleted file mode 100644 index aae44d09d..000000000 --- a/test/bundlers/parcel-test/package.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "name": "parcel-test", - "version": "1.0.0", - "description": "", - "scripts": { - "start": "node index.js", - "build": "parcel build index.js --no-source-maps" - }, - "keywords": [], - "author": "", - "license": "ISC", - "engines": { - "node": ">=12" - }, - "devDependencies": { - "parcel": "^2.0.0-beta.1" - } -} diff --git a/test/bundlers/rollup-test/index.js b/test/bundlers/rollup-test/index.js deleted file mode 100644 index 792b4b873..000000000 --- a/test/bundlers/rollup-test/index.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { Client } = require('../../../index') -const client = new Client({ node: 'http://localhost:9200/' }) -client.info((err, result) => { - process.exit(err ? 1 : 0) -}) diff --git a/test/bundlers/rollup-test/package.json b/test/bundlers/rollup-test/package.json deleted file mode 100644 index 7f74a670d..000000000 --- a/test/bundlers/rollup-test/package.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "rollup-test", - "version": "1.0.0", - "description": "", - "main": "index.js", - "scripts": { - "start": "node bundle.js", - "build": "rollup -c" - }, - "keywords": [], - "author": "", - "license": "ISC", - "devDependencies": { - "@rollup/plugin-commonjs": "^15.1.0", - "@rollup/plugin-json": "^4.1.0", - "@rollup/plugin-node-resolve": "^9.0.0", - "rollup": "^2.28.0" - } -} diff --git a/test/bundlers/rollup-test/rollup.config.js b/test/bundlers/rollup-test/rollup.config.js deleted file mode 100644 index 55a936824..000000000 --- a/test/bundlers/rollup-test/rollup.config.js +++ /dev/null @@ -1,13 +0,0 @@ -import resolve from '@rollup/plugin-node-resolve' -import commonjs from '@rollup/plugin-commonjs' -import json from '@rollup/plugin-json' - -export default { - input: 'index.js', - output: { - file: 'bundle.js', - format: 'iife', - name: 'MyModule' - }, - plugins: [resolve(), commonjs({ include: ['../../../node_modules/**'] }), json()] -} diff --git a/test/bundlers/webpack-test/index.js b/test/bundlers/webpack-test/index.js deleted file mode 100644 index 792b4b873..000000000 --- a/test/bundlers/webpack-test/index.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { Client } = require('../../../index') -const client = new Client({ node: 'http://localhost:9200/' }) -client.info((err, result) => { - process.exit(err ? 1 : 0) -}) diff --git a/test/bundlers/webpack-test/package.json b/test/bundlers/webpack-test/package.json deleted file mode 100644 index 12e6b80f6..000000000 --- a/test/bundlers/webpack-test/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "webpack-test", - "version": "1.0.0", - "description": "", - "main": "index.js", - "scripts": { - "start": "node bundle.js", - "build": "webpack" - }, - "keywords": [], - "author": "", - "license": "ISC", - "devDependencies": { - "webpack": "^4.44.2", - "webpack-cli": "^3.3.12" - } -} diff --git a/test/bundlers/webpack-test/webpack.config.js b/test/bundlers/webpack-test/webpack.config.js deleted file mode 100644 index d4da84724..000000000 --- a/test/bundlers/webpack-test/webpack.config.js +++ /dev/null @@ -1,12 +0,0 @@ -'use strict' - -const path = require('path') - -module.exports = { - entry: './index.js', - target: 'node', - output: { - filename: 'bundle.js', - path: path.resolve(__dirname) - } -} diff --git a/test/integration/index.js b/test/integration/index.js index 098b52073..eceb2ea24 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -42,6 +42,13 @@ const MAX_FILE_TIME = 1000 * 30 const MAX_TEST_TIME = 1000 * 3 const freeSkips = { + // the v8 client never sends the scroll_id in querystgring, + // the way the test is structured causes a security exception + 'free/scroll/10_basic.yml': ['Body params override query string'], + 'free/scroll/11_clear.yml': [ + 'Body params with array param override query string', + 'Body params with string param scroll id override query string' + ], // TODO: remove this once 'arbitrary_key' is implemented // https://github.com/elastic/elasticsearch/pull/41492 'indices.split/30_copy_settings.yml': ['*'], @@ -124,19 +131,31 @@ const platinumBlackList = { 'unsigned_long/30_multi_fields.yml': ['*'], 'unsigned_long/40_different_numeric.yml': ['*'], 'unsigned_long/50_script_values.yml': ['*'], + // the v8 client flattens the body into the parent object + 'platinum/users/10_basic.yml': ['Test put user with different username in body'], // docker issue? 'watcher/execute_watch/60_http_input.yml': ['*'], // the checks are correct, but for some reason the test is failing on js side // I bet is because the backslashes in the rg 'watcher/execute_watch/70_invalid.yml': ['*'], 'watcher/put_watch/10_basic.yml': ['*'], - 'xpack/15_basic.yml': ['*'] + 'xpack/15_basic.yml': ['*'], + + // test that are failing that needs to be investigated + // the error cause can either be in the yaml test or in the specification + + // start should be a string in the yaml test + 'platinum/ml/delete_job_force.yml': ['Test force delete an open job that is referred by a started datafeed'], + 'platinum/ml/evaluate_data_frame.yml': ['*'], + 'platinum/ml/get_datafeed_stats.yml': ['*'], + // start should be a string in the yaml test + 'platinum/ml/start_stop_datafeed.yml': ['*'] } function runner (opts = {}) { const options = { node: opts.node } if (opts.isXPack) { - options.ssl = { + options.tls = { ca: readFileSync(join(__dirname, '..', '..', '.ci', 'certs', 'ca.crt'), 'utf8'), rejectUnauthorized: false } @@ -157,7 +176,7 @@ function runner (opts = {}) { async function waitCluster (client, times = 0) { try { - await client.cluster.health({ waitForStatus: 'green', timeout: '50s' }) + await client.cluster.health({ wait_for_status: 'green', timeout: '50s' }) } catch (err) { if (++times < 10) { await sleep(5000) @@ -172,7 +191,7 @@ async function start ({ client, isXPack }) { log('Waiting for Elasticsearch') await waitCluster(client) - const { body } = await client.info() + const body = await client.info() const { number: version, build_hash: hash } = body.version log(`Downloading artifacts for hash ${hash}...`) @@ -337,7 +356,7 @@ function generateJunitXmlReport (junit, suite) { } if (require.main === module) { - const node = process.env.TEST_ES_SERVER || 'https://elastic:changeme@localhost:9200/' + const node = process.env.TEST_ES_SERVER || 'http://elastic:changeme@localhost:9200/' const opts = { node, isXPack: process.env.TEST_SUITE !== 'free' diff --git a/test/integration/integration/README.md b/test/integration/integration/README.md new file mode 100644 index 000000000..0861dd8b9 --- /dev/null +++ b/test/integration/integration/README.md @@ -0,0 +1,52 @@ +# `elasticsearch-js` integration test suite + +> What? A README to explain how the integration test work?? + +Yes. + +## Background +Elasticsearch offers its entire API via HTTP REST endpoints. You can find the whole API specification for every version [here](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/api).
+To support different languages at the same time, the Elasticsearch team decided to provide a [YAML specification](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/test) to test every endpoint, body, headers, warning, error and so on.
+This testing suite uses that specification to generate the test for the specified version of Elasticsearch on the fly. + +## Run +Run the testing suite is very easy, you just need to run the preconfigured npm script: +```sh +npm run test:integration +``` + +The first time you run this command, the Elasticsearch repository will be cloned inside the integration test folder, to be able to access the YAML specification, so it might take some time *(luckily, only the first time)*.
+Once the Elasticsearch repository has been cloned, the testing suite will connect to the provided Elasticsearch instance and then checkout the build hash in the repository. Finally, it will start running every test. + +The specification does not allow the test to be run in parallel, so it might take a while to run the entire testing suite; on my machine, `MacBookPro15,2 core i7 2.7GHz 16GB of RAM` it takes around four minutes. + +### Exit on the first failure +Bu default the suite will run all the test, even if one assertion has failed. If you want to stop the test at the first failure, use the bailout option: +```sh +npm run test:integration -- --bail +``` + +### Calculate the code coverage +If you want to calculate the code coverage just run the testing suite with the following parameters, once the test ends, it will open a browser window with the results. +```sh +npm run test:integration -- --cov --coverage-report=html +``` + +## How does this thing work? +At first sight, it might seem complicated, but once you understand what the moving parts are, it's quite easy. +1. Connects to the given Elasticsearch instance +1. Gets the ES version and build hash +1. Checkout to the given hash (and clone the repository if it is not present) +1. Reads the folder list and for each folder the yaml file list +1. Starts running folder by folder every file + 1. Read and parse the yaml files + 1. Creates a subtest structure to have a cleaner output + 1. Runs the assertions + 1. Repeat! + +Inside the `index.js` file, you will find the connection, cloning, reading and parsing part of the test, while inside the `test-runner.js` file you will find the function to handle the assertions. Inside `test-runner.js`, we use a [queue](https://github.com/delvedor/workq) to be sure that everything is run in the correct order. + +Checkout the [rest-api-spec readme](https://github.com/elastic/elasticsearch/blob/master/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc) if you want to know more about how the assertions work. + +#### Why are we running the test with the `--harmony` flag? +Because on Node v6 the regex lookbehinds are not supported. diff --git a/test/integration/integration/helper.js b/test/integration/integration/helper.js new file mode 100644 index 000000000..eb2021040 --- /dev/null +++ b/test/integration/integration/helper.js @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +function runInParallel (client, operation, options, clientOptions) { + if (options.length === 0) return Promise.resolve() + const operations = options.map(opts => { + const api = delve(client, operation).bind(client) + return api(opts, clientOptions) + }) + + return Promise.all(operations) +} + +// code from https://github.com/developit/dlv +// needed to support an edge case: `a\.b` +// where `a.b` is a single field: { 'a.b': true } +function delve (obj, key, def, p) { + p = 0 + // handle the key with a dot inside that is not a part of the path + // and removes the backslashes from the key + key = key.split + ? key.split(/(? k.replace(/\\/g, '')) + : key.replace(/\\/g, '') + while (obj && p < key.length) obj = obj[key[p++]] + return (obj === undefined || p < key.length) ? def : obj +} + +function to (promise) { + return promise.then(data => [null, data], err => [err, undefined]) +} + +const sleep = ms => new Promise(resolve => setTimeout(resolve, ms)) + +function isXPackTemplate (name) { + if (name.startsWith('.monitoring-')) { + return true + } + if (name.startsWith('.watch') || name.startsWith('.triggered_watches')) { + return true + } + if (name.startsWith('.data-frame-')) { + return true + } + if (name.startsWith('.ml-')) { + return true + } + if (name.startsWith('.transform-')) { + return true + } + switch (name) { + case '.watches': + case 'logstash-index-template': + case '.logstash-management': + case 'security_audit_log': + case '.slm-history': + case '.async-search': + case 'saml-service-provider': + case 'ilm-history': + case 'logs': + case 'logs-settings': + case 'logs-mappings': + case 'metrics': + case 'metrics-settings': + case 'metrics-mappings': + case 'synthetics': + case 'synthetics-settings': + case 'synthetics-mappings': + case '.snapshot-blob-cache': + case '.deprecation-indexing-template': + case '.deprecation-indexing-mappings': + case '.deprecation-indexing-settings': + case 'data-streams-mappings': + return true + } + return false +} + +module.exports = { runInParallel, delve, to, sleep, isXPackTemplate } diff --git a/test/integration/integration/helpers/bulk.test.js b/test/integration/integration/helpers/bulk.test.js new file mode 100644 index 000000000..011f524c3 --- /dev/null +++ b/test/integration/integration/helpers/bulk.test.js @@ -0,0 +1,204 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +const { createReadStream } = require('fs') +const { join } = require('path') +const split = require('split2') +const { test, beforeEach, afterEach } = require('tap') +const { waitCluster } = require('../../utils') +const { Client } = require('../../../') + +const datasetPath = join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson') +const INDEX = `test-helpers-${process.pid}` +const client = new Client({ + node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' +}) + +beforeEach(async () => { + await waitCluster(client) + await client.indices.create({ index: INDEX }) +}) + +afterEach(async () => { + await client.indices.delete({ index: INDEX }, { ignore: 404 }) +}) + +test('bulk index', async t => { + const stream = createReadStream(datasetPath) + const result = await client.helpers.bulk({ + datasource: stream.pipe(split()), + refreshOnCompletion: INDEX, + onDrop (doc) { + t.fail('It should not drop any document') + }, + onDocument (doc) { + return { + index: { _index: INDEX } + } + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 5000, + successful: 5000, + retry: 0, + failed: 0, + aborted: false + }) + + const { body } = await client.count({ index: INDEX }) + t.match(body, { count: 5000 }) +}) + +test('bulk index with custom id', async t => { + const stream = createReadStream(datasetPath) + const result = await client.helpers.bulk({ + datasource: stream.pipe(split(JSON.parse)), + onDrop (doc) { + t.fail('It should not drop any document') + }, + onDocument (doc) { + return { + index: { + _index: INDEX, + _id: doc.id + } + } + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 5000, + successful: 5000, + retry: 0, + failed: 0, + aborted: false + }) + + const { body } = await client.get({ + index: INDEX, + id: '19273860' // id of document n° 4242 + }) + + t.equal(body._index, INDEX) + t.equal(body._id, '19273860') + t.equal(body._source.id, '19273860') +}) + +test('abort the operation on document drop', async t => { + const stream = createReadStream(datasetPath) + const b = client.helpers.bulk({ + datasource: stream.pipe(split(JSON.parse)), + concurrency: 1, + onDrop (doc) { + t.equal(doc.status, 400) + t.equal(doc.error.type, 'mapper_parsing_exception') + t.equal(doc.document.id, '45924372') + b.abort() + }, + onDocument (doc) { + if (doc.id === '45924372') { // id of document n° 500 + // this will break the mapping + doc.title = { foo: 'bar' } + } + return { + index: { + _index: INDEX, + _id: doc.id + } + } + } + }) + + const result = await b + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.equal(result.total - 1, result.successful) + t.match(result, { + retry: 0, + failed: 1, + aborted: true + }) +}) + +test('bulk delete', async t => { + const indexResult = await client.helpers.bulk({ + datasource: createReadStream(datasetPath).pipe(split(JSON.parse)), + refreshOnCompletion: true, + onDrop (doc) { + t.fail('It should not drop any document') + }, + onDocument (doc) { + return { + index: { + _index: INDEX, + _id: doc.id + } + } + } + }) + + t.type(indexResult.time, 'number') + t.type(indexResult.bytes, 'number') + t.match(indexResult, { + total: 5000, + successful: 5000, + retry: 0, + failed: 0, + aborted: false + }) + + const { body: afterIndex } = await client.count({ index: INDEX }) + t.match(afterIndex, { count: 5000 }) + + const deleteResult = await client.helpers.bulk({ + datasource: createReadStream(datasetPath).pipe(split(JSON.parse)), + refreshOnCompletion: true, + onDrop (doc) { + t.fail('It should not drop any document') + }, + onDocument (doc) { + return { + delete: { + _index: INDEX, + _id: doc.id + } + } + } + }) + + t.type(deleteResult.time, 'number') + t.type(deleteResult.bytes, 'number') + t.match(deleteResult, { + total: 5000, + successful: 5000, + retry: 0, + failed: 0, + aborted: false + }) + + const { body: afterDelete } = await client.count({ index: INDEX }) + t.match(afterDelete, { count: 0 }) +}) diff --git a/test/integration/integration/helpers/msearch.test.js b/test/integration/integration/helpers/msearch.test.js new file mode 100644 index 000000000..c9c726ecc --- /dev/null +++ b/test/integration/integration/helpers/msearch.test.js @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +const { createReadStream } = require('fs') +const { join } = require('path') +const split = require('split2') +const { test, beforeEach, afterEach } = require('tap') +const { waitCluster } = require('../../utils') +const { Client, errors } = require('../../../') + +const INDEX = `test-helpers-${process.pid}` +const client = new Client({ + node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' +}) + +beforeEach(async () => { + await waitCluster(client) + await client.indices.create({ index: INDEX }) + const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson')) + const result = await client.helpers.bulk({ + datasource: stream.pipe(split()), + refreshOnCompletion: true, + onDocument (doc) { + return { + index: { _index: INDEX } + } + } + }) + if (result.failed > 0) { + throw new Error('Failed bulk indexing docs') + } +}) + +afterEach(async () => { + await client.indices.delete({ index: INDEX }, { ignore: 404 }) +}) + +test('Basic', t => { + t.plan(4) + const m = client.helpers.msearch({ operations: 1 }) + + m.search( + { index: INDEX }, + { query: { match: { title: 'javascript' } } }, + (err, result) => { + t.error(err) + t.equal(result.body.hits.total.value, 106) + } + ) + + m.search( + { index: INDEX }, + { query: { match: { title: 'ruby' } } }, + (err, result) => { + t.error(err) + t.equal(result.body.hits.total.value, 29) + } + ) + + t.teardown(() => m.stop()) +}) + +test('Bad request', t => { + t.plan(3) + const m = client.helpers.msearch({ operations: 1 }) + + m.search( + { index: INDEX }, + { query: { match: { title: 'javascript' } } }, + (err, result) => { + t.error(err) + t.equal(result.body.hits.total.value, 106) + } + ) + + m.search( + { index: INDEX }, + { query: { foo: { title: 'ruby' } } }, + (err, result) => { + t.ok(err instanceof errors.ResponseError) + } + ) + + t.teardown(() => m.stop()) +}) + +test('Send multiple request concurrently over the concurrency limit', t => { + t.plan(20) + const m = client.helpers.msearch({ operations: 1 }) + + for (let i = 0; i < 10; i++) { + m.search( + { index: INDEX }, + { query: { match: { title: 'javascript' } } }, + (err, result) => { + t.error(err) + t.equal(result.body.hits.total.value, 106) + } + ) + } + + t.teardown(() => m.stop()) +}) diff --git a/test/integration/integration/helpers/scroll.test.js b/test/integration/integration/helpers/scroll.test.js new file mode 100644 index 000000000..e197ce21a --- /dev/null +++ b/test/integration/integration/helpers/scroll.test.js @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +const { createReadStream } = require('fs') +const { join } = require('path') +const split = require('split2') +const { test, beforeEach, afterEach } = require('tap') +const { waitCluster } = require('../../utils') +const { Client } = require('../../../') + +const INDEX = `test-helpers-${process.pid}` +const client = new Client({ + node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' +}) + +beforeEach(async () => { + await waitCluster(client) + await client.indices.create({ index: INDEX }) + const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson')) + const result = await client.helpers.bulk({ + datasource: stream.pipe(split()), + refreshOnCompletion: true, + onDocument (doc) { + return { + index: { _index: INDEX } + } + } + }) + if (result.failed > 0) { + throw new Error('Failed bulk indexing docs') + } +}) + +afterEach(async () => { + await client.indices.delete({ index: INDEX }, { ignore: 404 }) +}) + +test('search helper', async t => { + const scrollSearch = client.helpers.scrollSearch({ + index: INDEX, + body: { + query: { + match: { + title: 'javascript' + } + } + } + }) + + let count = 0 + for await (const search of scrollSearch) { + count += 1 + for (const doc of search.documents) { + t.ok(doc.title.toLowerCase().includes('javascript')) + } + } + t.equal(count, 11) +}) + +test('clear a scroll search', async t => { + const scrollSearch = client.helpers.scrollSearch({ + index: INDEX, + body: { + query: { + match: { + title: 'javascript' + } + } + } + }) + + let count = 0 + for await (const search of scrollSearch) { + count += 1 + if (count === 2) { + search.clear() + } + } + t.equal(count, 2) +}) + +test('scroll documents', async t => { + const scrollSearch = client.helpers.scrollDocuments({ + index: INDEX, + body: { + query: { + match: { + title: 'javascript' + } + } + } + }) + + let count = 0 + for await (const doc of scrollSearch) { + count += 1 + t.ok(doc.title.toLowerCase().includes('javascript')) + } + t.equal(count, 106) +}) diff --git a/test/integration/integration/helpers/search.test.js b/test/integration/integration/helpers/search.test.js new file mode 100644 index 000000000..d4aa57c9a --- /dev/null +++ b/test/integration/integration/helpers/search.test.js @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +const { createReadStream } = require('fs') +const { join } = require('path') +const split = require('split2') +const { test, beforeEach, afterEach } = require('tap') +const { waitCluster } = require('../../utils') +const { Client } = require('../../../') + +const INDEX = `test-helpers-${process.pid}` +const client = new Client({ + node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' +}) + +beforeEach(async () => { + await waitCluster(client) + await client.indices.create({ index: INDEX }) + const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson')) + const result = await client.helpers.bulk({ + datasource: stream.pipe(split()), + refreshOnCompletion: true, + onDocument (doc) { + return { + index: { _index: INDEX } + } + } + }) + if (result.failed > 0) { + throw new Error('Failed bulk indexing docs') + } +}) + +afterEach(async () => { + await client.indices.delete({ index: INDEX }, { ignore: 404 }) +}) + +test('search helper', async t => { + const results = await client.helpers.search({ + index: INDEX, + body: { + query: { + match: { + title: 'javascript' + } + } + } + }) + t.equal(results.length, 10) + for (const result of results) { + t.ok(result.title.toLowerCase().includes('javascript')) + } +}) diff --git a/test/integration/integration/index.js b/test/integration/integration/index.js new file mode 100644 index 000000000..098b52073 --- /dev/null +++ b/test/integration/integration/index.js @@ -0,0 +1,385 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +process.on('unhandledRejection', function (err) { + console.error(err) + process.exit(1) +}) + +const { writeFileSync, readFileSync, readdirSync, statSync } = require('fs') +const { join, sep } = require('path') +const yaml = require('js-yaml') +const ms = require('ms') +const { Client } = require('../../index') +const build = require('./test-runner') +const { sleep } = require('./helper') +const createJunitReporter = require('./reporter') +const downloadArtifacts = require('../../scripts/download-artifacts') + +const yamlFolder = downloadArtifacts.locations.freeTestFolder +const xPackYamlFolder = downloadArtifacts.locations.xPackTestFolder + +const MAX_API_TIME = 1000 * 90 +const MAX_FILE_TIME = 1000 * 30 +const MAX_TEST_TIME = 1000 * 3 + +const freeSkips = { + // TODO: remove this once 'arbitrary_key' is implemented + // https://github.com/elastic/elasticsearch/pull/41492 + 'indices.split/30_copy_settings.yml': ['*'], + 'indices.stats/50_disk_usage.yml': ['Disk usage stats'], + 'indices.stats/60_field_usage.yml': ['Field usage stats'], + // skipping because we are booting ES with `discovery.type=single-node` + // and this test will fail because of this configuration + 'nodes.stats/30_discovery.yml': ['*'], + // the expected error is returning a 503, + // which triggers a retry and the node to be marked as dead + 'search.aggregation/240_max_buckets.yml': ['*'], + // the yaml runner assumes that null means "does not exists", + // while null is a valid json value, so the check will fail + 'search/320_disallow_queries.yml': ['Test disallow expensive queries'] +} +const platinumBlackList = { + 'analytics/histogram.yml': ['Histogram requires values in increasing order'], + // this two test cases are broken, we should + // return on those in the future. + 'analytics/top_metrics.yml': [ + 'sort by keyword field fails', + 'sort by string script fails' + ], + 'cat.aliases/10_basic.yml': ['Empty cluster'], + 'index/10_with_id.yml': ['Index with ID'], + 'indices.get_alias/10_basic.yml': ['Get alias against closed indices'], + 'indices.get_alias/20_empty.yml': ['Check empty aliases when getting all aliases via /_alias'], + 'text_structure/find_structure.yml': ['*'], + // https://github.com/elastic/elasticsearch/pull/39400 + 'ml/jobs_crud.yml': ['Test put job with id that is already taken'], + // object keys must me strings, and `0.0.toString()` is `0` + 'ml/evaluate_data_frame.yml': [ + 'Test binary_soft_classifition precision', + 'Test binary_soft_classifition recall', + 'Test binary_soft_classifition confusion_matrix' + ], + // it gets random failures on CI, must investigate + 'ml/set_upgrade_mode.yml': [ + 'Attempt to open job when upgrade_mode is enabled', + 'Setting upgrade mode to disabled from enabled' + ], + // The cleanup fails with a index not found when retrieving the jobs + 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], + 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], + 'ml/preview_datafeed.yml': ['*'], + // Investigate why is failing + 'ml/inference_crud.yml': ['*'], + // investigate why this is failing + 'monitoring/bulk/10_basic.yml': ['*'], + 'monitoring/bulk/20_privileges.yml': ['*'], + 'license/20_put_license.yml': ['*'], + 'snapshot/10_basic.yml': ['*'], + 'snapshot/20_operator_privileges_disabled.yml': ['*'], + // the body is correct, but the regex is failing + 'sql/sql.yml': ['Getting textual representation'], + 'searchable_snapshots/10_usage.yml': ['*'], + 'service_accounts/10_basic.yml': ['*'], + // we are setting two certificates in the docker config + 'ssl/10_basic.yml': ['*'], + // very likely, the index template has not been loaded yet. + // we should run a indices.existsTemplate, but the name of the + // template may vary during time. + 'transforms_crud.yml': [ + 'Test basic transform crud', + 'Test transform with query and array of indices in source', + 'Test PUT continuous transform', + 'Test PUT continuous transform without delay set' + ], + 'transforms_force_delete.yml': [ + 'Test force deleting a running transform' + ], + 'transforms_cat_apis.yml': ['*'], + 'transforms_start_stop.yml': ['*'], + 'transforms_stats.yml': ['*'], + 'transforms_stats_continuous.yml': ['*'], + 'transforms_update.yml': ['*'], + // js does not support ulongs + 'unsigned_long/10_basic.yml': ['*'], + 'unsigned_long/20_null_value.yml': ['*'], + 'unsigned_long/30_multi_fields.yml': ['*'], + 'unsigned_long/40_different_numeric.yml': ['*'], + 'unsigned_long/50_script_values.yml': ['*'], + // docker issue? + 'watcher/execute_watch/60_http_input.yml': ['*'], + // the checks are correct, but for some reason the test is failing on js side + // I bet is because the backslashes in the rg + 'watcher/execute_watch/70_invalid.yml': ['*'], + 'watcher/put_watch/10_basic.yml': ['*'], + 'xpack/15_basic.yml': ['*'] +} + +function runner (opts = {}) { + const options = { node: opts.node } + if (opts.isXPack) { + options.ssl = { + ca: readFileSync(join(__dirname, '..', '..', '.ci', 'certs', 'ca.crt'), 'utf8'), + rejectUnauthorized: false + } + } + const client = new Client(options) + log('Loading yaml suite') + start({ client, isXPack: opts.isXPack }) + .catch(err => { + if (err.name === 'ResponseError') { + console.error(err) + console.log(JSON.stringify(err.meta, null, 2)) + } else { + console.error(err) + } + process.exit(1) + }) +} + +async function waitCluster (client, times = 0) { + try { + await client.cluster.health({ waitForStatus: 'green', timeout: '50s' }) + } catch (err) { + if (++times < 10) { + await sleep(5000) + return waitCluster(client, times) + } + console.error(err) + process.exit(1) + } +} + +async function start ({ client, isXPack }) { + log('Waiting for Elasticsearch') + await waitCluster(client) + + const { body } = await client.info() + const { number: version, build_hash: hash } = body.version + + log(`Downloading artifacts for hash ${hash}...`) + await downloadArtifacts({ hash, version }) + + log(`Testing ${isXPack ? 'Platinum' : 'Free'} api...`) + const junit = createJunitReporter() + const junitTestSuites = junit.testsuites(`Integration test for ${isXPack ? 'Platinum' : 'Free'} api`) + + const stats = { + total: 0, + skip: 0, + pass: 0, + assertions: 0 + } + const folders = getAllFiles(isXPack ? xPackYamlFolder : yamlFolder) + .filter(t => !/(README|TODO)/g.test(t)) + // we cluster the array based on the folder names, + // to provide a better test log output + .reduce((arr, file) => { + const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/')) + let inserted = false + for (let i = 0; i < arr.length; i++) { + if (arr[i][0].includes(path)) { + inserted = true + arr[i].push(file) + break + } + } + if (!inserted) arr.push([file]) + return arr + }, []) + + const totalTime = now() + for (const folder of folders) { + // pretty name + const apiName = folder[0].slice( + folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19, + folder[0].lastIndexOf(sep) + ) + + log('Testing ' + apiName.slice(1)) + const apiTime = now() + + for (const file of folder) { + const testRunner = build({ + client, + version, + isXPack: file.includes('platinum') + }) + const fileTime = now() + const data = readFileSync(file, 'utf8') + // get the test yaml (as object), some file has multiple yaml documents inside, + // every document is separated by '---', so we split on the separator + // and then we remove the empty strings, finally we parse them + const tests = data + .split('\n---\n') + .map(s => s.trim()) + // empty strings + .filter(Boolean) + .map(parse) + // null values + .filter(Boolean) + + // get setup and teardown if present + let setupTest = null + let teardownTest = null + for (const test of tests) { + if (test.setup) setupTest = test.setup + if (test.teardown) teardownTest = test.teardown + } + + const cleanPath = file.slice(file.lastIndexOf(apiName)) + log(' ' + cleanPath) + const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath) + + for (const test of tests) { + const testTime = now() + const name = Object.keys(test)[0] + if (name === 'setup' || name === 'teardown') continue + const junitTestCase = junitTestSuite.testcase(name) + + stats.total += 1 + if (shouldSkip(isXPack, file, name)) { + stats.skip += 1 + junitTestCase.skip('This test is in the skip list of the client') + junitTestCase.end() + continue + } + log(' - ' + name) + try { + await testRunner.run(setupTest, test[name], teardownTest, stats, junitTestCase) + stats.pass += 1 + } catch (err) { + junitTestCase.failure(err) + junitTestCase.end() + junitTestSuite.end() + junitTestSuites.end() + generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') + console.error(err) + process.exit(1) + } + const totalTestTime = now() - testTime + junitTestCase.end() + if (totalTestTime > MAX_TEST_TIME) { + log(' took too long: ' + ms(totalTestTime)) + } else { + log(' took: ' + ms(totalTestTime)) + } + } + junitTestSuite.end() + const totalFileTime = now() - fileTime + if (totalFileTime > MAX_FILE_TIME) { + log(` ${cleanPath} took too long: ` + ms(totalFileTime)) + } else { + log(` ${cleanPath} took: ` + ms(totalFileTime)) + } + } + const totalApiTime = now() - apiTime + if (totalApiTime > MAX_API_TIME) { + log(`${apiName} took too long: ` + ms(totalApiTime)) + } else { + log(`${apiName} took: ` + ms(totalApiTime)) + } + } + junitTestSuites.end() + generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') + log(`Total testing time: ${ms(now() - totalTime)}`) + log(`Test stats: + - Total: ${stats.total} + - Skip: ${stats.skip} + - Pass: ${stats.pass} + - Assertions: ${stats.assertions} + `) +} + +function log (text) { + process.stdout.write(text + '\n') +} + +function now () { + const ts = process.hrtime() + return (ts[0] * 1e3) + (ts[1] / 1e6) +} + +function parse (data) { + let doc + try { + doc = yaml.load(data, { schema: yaml.CORE_SCHEMA }) + } catch (err) { + console.error(err) + return + } + return doc +} + +function generateJunitXmlReport (junit, suite) { + writeFileSync( + join(__dirname, '..', '..', `${suite}-report-junit.xml`), + junit.prettyPrint() + ) +} + +if (require.main === module) { + const node = process.env.TEST_ES_SERVER || 'https://elastic:changeme@localhost:9200/' + const opts = { + node, + isXPack: process.env.TEST_SUITE !== 'free' + } + runner(opts) +} + +const shouldSkip = (isXPack, file, name) => { + let list = Object.keys(freeSkips) + for (let i = 0; i < list.length; i++) { + const freeTest = freeSkips[list[i]] + for (let j = 0; j < freeTest.length; j++) { + if (file.endsWith(list[i]) && (name === freeTest[j] || freeTest[j] === '*')) { + const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name + log(`Skipping test ${testName} because is blacklisted in the free test`) + return true + } + } + } + + if (file.includes('x-pack') || isXPack) { + list = Object.keys(platinumBlackList) + for (let i = 0; i < list.length; i++) { + const platTest = platinumBlackList[list[i]] + for (let j = 0; j < platTest.length; j++) { + if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) { + const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name + log(`Skipping test ${testName} because is blacklisted in the platinum test`) + return true + } + } + } + } + + return false +} + +const getAllFiles = dir => + readdirSync(dir).reduce((files, file) => { + const name = join(dir, file) + const isDirectory = statSync(name).isDirectory() + return isDirectory ? [...files, ...getAllFiles(name)] : [...files, name] + }, []) + +module.exports = runner diff --git a/test/integration/integration/reporter.js b/test/integration/integration/reporter.js new file mode 100644 index 000000000..0d3621de7 --- /dev/null +++ b/test/integration/integration/reporter.js @@ -0,0 +1,109 @@ +'use strict' + +const assert = require('assert') +const { create } = require('xmlbuilder2') + +function createJunitReporter () { + const report = {} + + return { testsuites, prettyPrint } + + function prettyPrint () { + return create(report).end({ prettyPrint: true }) + } + + function testsuites (name) { + assert(name, 'The testsuites name is required') + assert(report.testsuites === undefined, 'Cannot set more than one testsuites block') + const startTime = Date.now() + + report.testsuites = { + '@id': new Date().toISOString(), + '@name': name + } + + const testsuiteList = [] + + return { + testsuite: createTestSuite(testsuiteList), + end () { + report.testsuites['@time'] = Math.round((Date.now() - startTime) / 1000) + report.testsuites['@tests'] = testsuiteList.reduce((acc, val) => { + acc += val['@tests'] + return acc + }, 0) + report.testsuites['@failures'] = testsuiteList.reduce((acc, val) => { + acc += val['@failures'] + return acc + }, 0) + report.testsuites['@skipped'] = testsuiteList.reduce((acc, val) => { + acc += val['@skipped'] + return acc + }, 0) + if (testsuiteList.length) { + report.testsuites.testsuite = testsuiteList + } + } + } + } + + function createTestSuite (testsuiteList) { + return function testsuite (name) { + assert(name, 'The testsuite name is required') + const startTime = Date.now() + const suite = { + '@id': new Date().toISOString(), + '@name': name + } + const testcaseList = [] + testsuiteList.push(suite) + return { + testcase: createTestCase(testcaseList), + end () { + suite['@time'] = Math.round((Date.now() - startTime) / 1000) + suite['@tests'] = testcaseList.length + suite['@failures'] = testcaseList.filter(t => t.failure).length + suite['@skipped'] = testcaseList.filter(t => t.skipped).length + if (testcaseList.length) { + suite.testcase = testcaseList + } + } + } + } + } + + function createTestCase (testcaseList) { + return function testcase (name) { + assert(name, 'The testcase name is required') + const startTime = Date.now() + const tcase = { + '@id': new Date().toISOString(), + '@name': name + } + testcaseList.push(tcase) + return { + failure (error) { + assert(error, 'The failure error object is required') + tcase.failure = { + '#': error.stack, + '@message': error.message, + '@type': error.code + } + }, + skip (reason) { + if (typeof reason !== 'string') { + reason = JSON.stringify(reason, null, 2) + } + tcase.skipped = { + '#': reason + } + }, + end () { + tcase['@time'] = Math.round((Date.now() - startTime) / 1000) + } + } + } + } +} + +module.exports = createJunitReporter diff --git a/test/integration/integration/test-runner.js b/test/integration/integration/test-runner.js new file mode 100644 index 000000000..824fb05fb --- /dev/null +++ b/test/integration/integration/test-runner.js @@ -0,0 +1,909 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use strict' + +/* eslint camelcase: 0 */ + +const assert = require('assert') +const semver = require('semver') +const helper = require('./helper') +const deepEqual = require('fast-deep-equal') +const { join } = require('path') +const { locations } = require('../../scripts/download-artifacts') +const { ConfigurationError } = require('../../lib/errors') + +const { delve, to, isXPackTemplate, sleep } = helper + +const supportedFeatures = [ + 'gtelte', + 'regex', + 'benchmark', + 'stash_in_path', + 'groovy_scripting', + 'headers', + 'transform_and_set', + 'catch_unauthorized', + 'arbitrary_key' +] + +function build (opts = {}) { + const client = opts.client + const esVersion = opts.version + const isXPack = opts.isXPack + const stash = new Map() + let response = null + + /** + * Runs a cleanup, removes all indices, aliases, templates, and snapshots + * @returns {Promise} + */ + async function cleanup (isXPack) { + response = null + stash.clear() + + if (isXPack) { + // wipe rollup jobs + const { body: jobsList } = await client.rollup.getJobs({ id: '_all' }) + const jobsIds = jobsList.jobs.map(j => j.config.id) + await helper.runInParallel( + client, 'rollup.stopJob', + jobsIds.map(j => ({ id: j, waitForCompletion: true })) + ) + await helper.runInParallel( + client, 'rollup.deleteJob', + jobsIds.map(j => ({ id: j })) + ) + + // delete slm policies + const { body: policies } = await client.slm.getLifecycle() + await helper.runInParallel( + client, 'slm.deleteLifecycle', + Object.keys(policies).map(p => ({ policy_id: p })) + ) + + // remove 'x_pack_rest_user', used in some xpack test + await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) + + const { body: searchableSnapshotIndices } = await client.cluster.state({ + metric: 'metadata', + filter_path: 'metadata.indices.*.settings.index.store.snapshot' + }) + if (searchableSnapshotIndices.metadata != null && searchableSnapshotIndices.metadata.indices != null) { + await helper.runInParallel( + client, 'indices.delete', + Object.keys(searchableSnapshotIndices.metadata.indices).map(i => ({ index: i })), + { ignore: [404] } + ) + } + } + + // clean snapshots + const { body: repositories } = await client.snapshot.getRepository() + for (const repository of Object.keys(repositories)) { + await client.snapshot.delete({ repository, snapshot: '*' }, { ignore: [404] }) + await client.snapshot.deleteRepository({ repository }, { ignore: [404] }) + } + + if (isXPack) { + // clean data streams + await client.indices.deleteDataStream({ name: '*' }) + } + + // clean all indices + await client.indices.delete({ index: '*,-.ds-ilm-history-*', expand_wildcards: 'open,closed,hidden' }, { ignore: [404] }) + + // delete templates + const { body: templates } = await client.cat.templates({ h: 'name' }) + for (const template of templates.split('\n').filter(Boolean)) { + if (isXPackTemplate(template)) continue + const { body } = await client.indices.deleteTemplate({ name: template }, { ignore: [404] }) + if (JSON.stringify(body).includes(`index_template [${template}] missing`)) { + await client.indices.deleteIndexTemplate({ name: template }, { ignore: [404] }) + } + } + + // delete component template + const { body } = await client.cluster.getComponentTemplate() + const components = body.component_templates.filter(c => !isXPackTemplate(c.name)).map(c => c.name) + if (components.length > 0) { + await client.cluster.deleteComponentTemplate({ name: components.join(',') }, { ignore: [404] }) + } + + // Remove any cluster setting + const { body: settings } = await client.cluster.getSettings() + const newSettings = {} + for (const setting in settings) { + if (Object.keys(settings[setting]).length === 0) continue + newSettings[setting] = {} + for (const key in settings[setting]) { + newSettings[setting][`${key}.*`] = null + } + } + if (Object.keys(newSettings).length > 0) { + await client.cluster.putSettings({ body: newSettings }) + } + + if (isXPack) { + // delete ilm policies + const preserveIlmPolicies = [ + 'ilm-history-ilm-policy', 'slm-history-ilm-policy', + 'watch-history-ilm-policy', 'ml-size-based-ilm-policy', + 'logs', 'metrics' + ] + const { body: policies } = await client.ilm.getLifecycle() + for (const policy in policies) { + if (preserveIlmPolicies.includes(policy)) continue + await client.ilm.deleteLifecycle({ policy }) + } + + // delete autofollow patterns + const { body: patterns } = await client.ccr.getAutoFollowPattern() + for (const { name } of patterns.patterns) { + await client.ccr.deleteAutoFollowPattern({ name }) + } + + // delete all tasks + const { body: nodesTask } = await client.tasks.list() + const tasks = Object.keys(nodesTask.nodes) + .reduce((acc, node) => { + const { tasks } = nodesTask.nodes[node] + Object.keys(tasks).forEach(id => { + if (tasks[id].cancellable) acc.push(id) + }) + return acc + }, []) + + await helper.runInParallel( + client, 'tasks.cancel', + tasks.map(id => ({ taskId: id })) + ) + } + + const { body: shutdownNodes } = await client.shutdown.getNode() + if (shutdownNodes._nodes == null && shutdownNodes.cluster_name == null) { + for (const node of shutdownNodes.nodes) { + await client.shutdown.deleteNode({ node_id: node.node_id }) + } + } + + // wait for pending task before resolving the promise + await sleep(100) + while (true) { + const { body } = await client.cluster.pendingTasks() + if (body.tasks.length === 0) break + await sleep(500) + } + } + + /** + * Runs the given test. + * It runs the test components in the following order: + * - skip check + * - xpack user + * - setup + * - the actual test + * - teardown + * - xpack cleanup + * - cleanup + * @param {object} setup (null if not needed) + * @param {object} test + * @oaram {object} teardown (null if not needed) + * @returns {Promise} + */ + async function run (setup, test, teardown, stats, junit) { + // if we should skip a feature in the setup/teardown section + // we should skip the entire test file + const skip = getSkip(setup) || getSkip(teardown) + if (skip && shouldSkip(esVersion, skip)) { + junit.skip(skip) + logSkip(skip) + return + } + + if (isXPack) { + // Some xpack test requires this user + // tap.comment('Creating x-pack user') + try { + await client.security.putUser({ + username: 'x_pack_rest_user', + body: { password: 'x-pack-test-password', roles: ['superuser'] } + }) + } catch (err) { + assert.ifError(err, 'should not error: security.putUser') + } + } + + if (setup) await exec('Setup', setup, stats, junit) + + await exec('Test', test, stats, junit) + + if (teardown) await exec('Teardown', teardown, stats, junit) + + await cleanup(isXPack) + } + + /** + * Fill the stashed values of a command + * let's say the we have stashed the `master` value, + * is_true: nodes.$master.transport.profiles + * becomes + * is_true: nodes.new_value.transport.profiles + * @param {object|string} the action to update + * @returns {object|string} the updated action + */ + function fillStashedValues (obj) { + if (typeof obj === 'string') { + return getStashedValues(obj) + } + // iterate every key of the object + for (const key in obj) { + const val = obj[key] + // if the key value is a string, and the string includes '${' + // that we must update the content of '${...}'. + // eg: 'Basic ${auth}' we search the stahed value 'auth' + // and the resulting value will be 'Basic valueOfAuth' + if (typeof val === 'string' && val.includes('${')) { + while (obj[key].includes('${')) { + const val = obj[key] + const start = val.indexOf('${') + const end = val.indexOf('}', val.indexOf('${')) + const stashedKey = val.slice(start + 2, end) + const stashed = stash.get(stashedKey) + obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) + } + continue + } + // handle json strings, eg: '{"hello":"$world"}' + if (typeof val === 'string' && val.includes('"$')) { + while (obj[key].includes('"$')) { + const val = obj[key] + const start = val.indexOf('"$') + const end = val.indexOf('"', start + 1) + const stashedKey = val.slice(start + 2, end) + const stashed = '"' + stash.get(stashedKey) + '"' + obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) + } + continue + } + // if the key value is a string, and the string includes '$' + // we run the "update value" code + if (typeof val === 'string' && val.includes('$')) { + // update the key value + obj[key] = getStashedValues(val) + continue + } + + // go deep in the object + if (val !== null && typeof val === 'object') { + fillStashedValues(val) + } + } + + return obj + + function getStashedValues (str) { + const arr = str + // we split the string on the dots + // handle the key with a dot inside that is not a part of the path + .split(/(? { + if (part[0] === '$') { + const stashed = stash.get(part.slice(1)) + if (stashed == null) { + throw new Error(`Cannot find stashed value '${part}' for '${JSON.stringify(obj)}'`) + } + return stashed + } + return part + }) + + // recreate the string value only if the array length is higher than one + // otherwise return the first element which in some test this could be a number, + // and call `.join` will coerce it to a string. + return arr.length > 1 ? arr.join('.') : arr[0] + } + } + + /** + * Stashes a value + * @param {string} the key to search in the previous response + * @param {string} the name to identify the stashed value + * @returns {TestRunner} + */ + function set (key, name) { + if (key.includes('_arbitrary_key_')) { + let currentVisit = null + for (const path of key.split('.')) { + if (path === '_arbitrary_key_') { + const keys = Object.keys(currentVisit) + const arbitraryKey = keys[getRandomInt(0, keys.length)] + stash.set(name, arbitraryKey) + } else { + currentVisit = delve(response, path) + } + } + } else { + stash.set(name, delve(response, key)) + } + } + + /** + * Applies a given transformation and stashes the result. + * @param {string} the name to identify the stashed value + * @param {string} the transformation function as string + * @returns {TestRunner} + */ + function transform_and_set (name, transform) { + if (/base64EncodeCredentials/.test(transform)) { + const [user, password] = transform + .slice(transform.indexOf('(') + 1, -1) + .replace(/ /g, '') + .split(',') + const userAndPassword = `${delve(response, user)}:${delve(response, password)}` + stash.set(name, Buffer.from(userAndPassword).toString('base64')) + } else { + throw new Error(`Unknown transform: '${transform}'`) + } + } + + /** + * Runs a client command + * @param {object} the action to perform + * @returns {Promise} + */ + async function doAction (action, stats) { + const cmd = parseDo(action) + let api + try { + api = delve(client, cmd.method).bind(client) + } catch (err) { + console.error(`\nError: Cannot find the method '${cmd.method}' in the client.\n`) + process.exit(1) + } + + const options = { ignore: cmd.params.ignore, headers: action.headers } + if (!Array.isArray(options.ignore)) options.ignore = [options.ignore] + if (cmd.params.ignore) delete cmd.params.ignore + + // ndjson apis should always send the body as an array + if (isNDJson(cmd.api) && !Array.isArray(cmd.params.body)) { + cmd.params.body = [cmd.params.body] + } + + const [err, result] = await to(api(cmd.params, options)) + let warnings = result ? result.warnings : null + const body = result ? result.body : null + + if (action.warnings && warnings === null) { + assert.fail('We should get a warning header', action.warnings) + } else if (!action.warnings && warnings !== null) { + // if there is only the 'default shard will change' + // warning we skip the check, because the yaml + // spec may not be updated + let hasDefaultShardsWarning = false + warnings.forEach(h => { + if (/default\snumber\sof\sshards/g.test(h)) { + hasDefaultShardsWarning = true + } + }) + + if (hasDefaultShardsWarning === true && warnings.length > 1) { + assert.fail('We are not expecting warnings', warnings) + } + } else if (action.warnings && warnings !== null) { + // if the yaml warnings do not contain the + // 'default shard will change' warning + // we do not check it presence in the warnings array + // because the yaml spec may not be updated + let hasDefaultShardsWarning = false + action.warnings.forEach(h => { + if (/default\snumber\sof\sshards/g.test(h)) { + hasDefaultShardsWarning = true + } + }) + + if (hasDefaultShardsWarning === false) { + warnings = warnings.filter(h => !h.test(/default\snumber\sof\sshards/g)) + } + + stats.assertions += 1 + assert.ok(deepEqual(warnings, action.warnings)) + } + + if (action.catch) { + stats.assertions += 1 + assert.ok( + parseDoError(err, action.catch), + `the error should be: ${action.catch}` + ) + try { + response = JSON.parse(err.body) + } catch (e) { + response = err.body + } + } else { + stats.assertions += 1 + assert.ifError(err, `should not error: ${cmd.method}`, action) + response = body + } + } + + /** + * Runs an actual test + * @param {string} the name of the test + * @param {object} the actions to perform + * @returns {Promise} + */ + async function exec (name, actions, stats, junit) { + // tap.comment(name) + for (const action of actions) { + if (action.skip) { + if (shouldSkip(esVersion, action.skip)) { + junit.skip(fillStashedValues(action.skip)) + logSkip(fillStashedValues(action.skip)) + break + } + } + + if (action.do) { + await doAction(fillStashedValues(action.do), stats) + } + + if (action.set) { + const key = Object.keys(action.set)[0] + set(fillStashedValues(key), action.set[key]) + } + + if (action.transform_and_set) { + const key = Object.keys(action.transform_and_set)[0] + transform_and_set(key, action.transform_and_set[key]) + } + + if (action.match) { + stats.assertions += 1 + const key = Object.keys(action.match)[0] + match( + // in some cases, the yaml refers to the body with an empty string + key === '$body' || key === '' + ? response + : delve(response, fillStashedValues(key)), + key === '$body' + ? action.match[key] + : fillStashedValues(action.match)[key], + action.match + ) + } + + if (action.lt) { + stats.assertions += 1 + const key = Object.keys(action.lt)[0] + lt( + delve(response, fillStashedValues(key)), + fillStashedValues(action.lt)[key] + ) + } + + if (action.gt) { + stats.assertions += 1 + const key = Object.keys(action.gt)[0] + gt( + delve(response, fillStashedValues(key)), + fillStashedValues(action.gt)[key] + ) + } + + if (action.lte) { + stats.assertions += 1 + const key = Object.keys(action.lte)[0] + lte( + delve(response, fillStashedValues(key)), + fillStashedValues(action.lte)[key] + ) + } + + if (action.gte) { + stats.assertions += 1 + const key = Object.keys(action.gte)[0] + gte( + delve(response, fillStashedValues(key)), + fillStashedValues(action.gte)[key] + ) + } + + if (action.length) { + stats.assertions += 1 + const key = Object.keys(action.length)[0] + length( + key === '$body' || key === '' + ? response + : delve(response, fillStashedValues(key)), + key === '$body' + ? action.length[key] + : fillStashedValues(action.length)[key] + ) + } + + if (action.is_true) { + stats.assertions += 1 + const isTrue = fillStashedValues(action.is_true) + is_true( + delve(response, isTrue), + isTrue + ) + } + + if (action.is_false) { + stats.assertions += 1 + const isFalse = fillStashedValues(action.is_false) + is_false( + delve(response, isFalse), + isFalse + ) + } + } + } + + return { run } +} + +/** + * Asserts that the given value is truthy + * @param {any} the value to check + * @param {string} an optional message + * @returns {TestRunner} + */ +function is_true (val, msg) { + assert.ok(val, `expect truthy value: ${msg} - value: ${JSON.stringify(val)}`) +} + +/** + * Asserts that the given value is falsey + * @param {any} the value to check + * @param {string} an optional message + * @returns {TestRunner} + */ +function is_false (val, msg) { + assert.ok(!val, `expect falsey value: ${msg} - value: ${JSON.stringify(val)}`) +} + +/** + * Asserts that two values are the same + * @param {any} the first value + * @param {any} the second value + * @returns {TestRunner} + */ +function match (val1, val2, action) { + // both values are objects + if (typeof val1 === 'object' && typeof val2 === 'object') { + assert.ok(deepEqual(val1, val2), action) + // the first value is the body as string and the second a pattern string + } else if ( + typeof val1 === 'string' && typeof val2 === 'string' && + val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) + ) { + const regStr = val2 + // match all comments within a "regexp" match arg + .replace(/([\S\s]?)#[^\n]*\n/g, (match, prevChar) => { + return prevChar === '\\' ? match : `${prevChar}\n` + }) + // remove all whitespace from the expression, all meaningful + // whitespace is represented with \s + .replace(/\s/g, '') + .slice(1, -1) + // 'm' adds the support for multiline regex + assert.ok(new RegExp(regStr, 'm').test(val1), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) + // tap.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) + // everything else + } else { + assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) + } +} + +/** + * Asserts that the first value is less than the second + * It also verifies that the two values are numbers + * @param {any} the first value + * @param {any} the second value + * @returns {TestRunner} + */ +function lt (val1, val2) { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 < val2) +} + +/** + * Asserts that the first value is greater than the second + * It also verifies that the two values are numbers + * @param {any} the first value + * @param {any} the second value + * @returns {TestRunner} + */ +function gt (val1, val2) { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 > val2) +} + +/** + * Asserts that the first value is less than or equal the second + * It also verifies that the two values are numbers + * @param {any} the first value + * @param {any} the second value + * @returns {TestRunner} + */ +function lte (val1, val2) { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 <= val2) +} + +/** + * Asserts that the first value is greater than or equal the second + * It also verifies that the two values are numbers + * @param {any} the first value + * @param {any} the second value + * @returns {TestRunner} +*/ +function gte (val1, val2) { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 >= val2) +} + +/** + * Asserts that the given value has the specified length + * @param {string|object|array} the object to check + * @param {number} the expected length + * @returns {TestRunner} + */ +function length (val, len) { + if (typeof val === 'string' || Array.isArray(val)) { + assert.equal(val.length, len) + } else if (typeof val === 'object' && val !== null) { + assert.equal(Object.keys(val).length, len) + } else { + assert.fail(`length: the given value is invalid: ${val}`) + } +} + +/** + * Gets a `do` action object and returns a structured object, + * where the action is the key and the parameter is the value. + * Eg: + * { + * 'indices.create': { + * 'index': 'test' + * }, + * 'warnings': [ + * '[index] is deprecated' + * ] + * } + * becomes + * { + * method: 'indices.create', + * params: { + * index: 'test' + * }, + * warnings: [ + * '[index] is deprecated' + * ] + * } + * @param {object} + * @returns {object} + */ +function parseDo (action) { + return Object.keys(action).reduce((acc, val) => { + switch (val) { + case 'catch': + acc.catch = action.catch + break + case 'warnings': + acc.warnings = action.warnings + break + case 'node_selector': + acc.node_selector = action.node_selector + break + default: + // converts underscore to camelCase + // eg: put_mapping => putMapping + acc.method = val.replace(/_([a-z])/g, g => g[1].toUpperCase()) + acc.api = val + acc.params = camelify(action[val]) + } + return acc + }, {}) + + function camelify (obj) { + const newObj = {} + + // TODO: add camelCase support for this fields + const doNotCamelify = ['copy_settings'] + + for (const key in obj) { + const val = obj[key] + let newKey = key + if (!~doNotCamelify.indexOf(key)) { + // if the key starts with `_` we should not camelify the first occurence + // eg: _source_include => _sourceInclude + newKey = key[0] === '_' + ? '_' + key.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) + : key.replace(/_([a-z])/g, k => k[1].toUpperCase()) + } + + if ( + val !== null && + typeof val === 'object' && + !Array.isArray(val) && + key !== 'body' + ) { + newObj[newKey] = camelify(val) + } else { + newObj[newKey] = val + } + } + + return newObj + } +} + +function parseDoError (err, spec) { + const httpErrors = { + bad_request: 400, + unauthorized: 401, + forbidden: 403, + missing: 404, + request_timeout: 408, + conflict: 409, + unavailable: 503 + } + + if (httpErrors[spec]) { + return err.statusCode === httpErrors[spec] + } + + if (spec === 'request') { + return err.statusCode >= 400 && err.statusCode < 600 + } + + if (spec.startsWith('/') && spec.endsWith('/')) { + return new RegExp(spec.slice(1, -1), 'g').test(JSON.stringify(err.body)) + } + + if (spec === 'param') { + return err instanceof ConfigurationError + } + + return false +} + +function getSkip (arr) { + if (!Array.isArray(arr)) return null + for (let i = 0; i < arr.length; i++) { + if (arr[i].skip) return arr[i].skip + } + return null +} + +// Gets two *maybe* numbers and returns two valida numbers +// it throws if one or both are not a valid number +// the returned value is an array with the new values +function getNumbers (val1, val2) { + const val1Numeric = Number(val1) + if (isNaN(val1Numeric)) { + throw new TypeError(`val1 is not a valid number: ${val1}`) + } + const val2Numeric = Number(val2) + if (isNaN(val2Numeric)) { + throw new TypeError(`val2 is not a valid number: ${val2}`) + } + return [val1Numeric, val2Numeric] +} + +function getRandomInt (min, max) { + return Math.floor(Math.random() * (max - min)) + min +} + +/** + * Logs a skip + * @param {object} the actions + * @returns {TestRunner} + */ +function logSkip (action) { + if (action.reason && action.version) { + console.log(`Skip: ${action.reason} (${action.version})`) + } else if (action.features) { + console.log(`Skip: ${JSON.stringify(action.features)})`) + } else { + console.log('Skipped') + } +} + +/** + * Decides if a test should be skipped + * @param {object} the actions + * @returns {boolean} + */ +function shouldSkip (esVersion, action) { + let shouldSkip = false + // skip based on the version + if (action.version) { + if (action.version.trim() === 'all') return true + const versions = action.version.split(',').filter(Boolean) + for (const version of versions) { + const [min, max] = version.split('-').map(v => v.trim()) + // if both `min` and `max` are specified + if (min && max) { + shouldSkip = semver.satisfies(esVersion, action.version) + // if only `min` is specified + } else if (min) { + shouldSkip = semver.gte(esVersion, min) + // if only `max` is specified + } else if (max) { + shouldSkip = semver.lte(esVersion, max) + // something went wrong! + } else { + throw new Error(`skip: Bad version range: ${action.version}`) + } + } + } + + if (shouldSkip) return true + + if (action.features) { + if (!Array.isArray(action.features)) action.features = [action.features] + // returns true if one of the features is not present in the supportedFeatures + shouldSkip = !!action.features.filter(f => !~supportedFeatures.indexOf(f)).length + } + + if (shouldSkip) return true + + return false +} + +function isNDJson (api) { + const spec = require(join(locations.specFolder, `${api}.json`)) + const { content_type } = spec[Object.keys(spec)[0]].headers + return Boolean(content_type && content_type.includes('application/x-ndjson')) +} + +/** + * Updates the array syntax of keys and values + * eg: 'hits.hits.1.stuff' to 'hits.hits[1].stuff' + * @param {object} the action to update + * @returns {obj} the updated action + */ +// function updateArraySyntax (obj) { +// const newObj = {} + +// for (const key in obj) { +// const newKey = key.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) +// const val = obj[key] + +// if (typeof val === 'string') { +// newObj[newKey] = val.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) +// } else if (val !== null && typeof val === 'object') { +// newObj[newKey] = updateArraySyntax(val) +// } else { +// newObj[newKey] = val +// } +// } + +// return newObj +// } + +module.exports = build diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index 824fb05fb..c5f2a2fec 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -27,7 +27,7 @@ const helper = require('./helper') const deepEqual = require('fast-deep-equal') const { join } = require('path') const { locations } = require('../../scripts/download-artifacts') -const { ConfigurationError } = require('../../lib/errors') +const packageJson = require('../../package.json') const { delve, to, isXPackTemplate, sleep } = helper @@ -60,11 +60,11 @@ function build (opts = {}) { if (isXPack) { // wipe rollup jobs - const { body: jobsList } = await client.rollup.getJobs({ id: '_all' }) + const jobsList = await client.rollup.getJobs({ id: '_all' }) const jobsIds = jobsList.jobs.map(j => j.config.id) await helper.runInParallel( client, 'rollup.stopJob', - jobsIds.map(j => ({ id: j, waitForCompletion: true })) + jobsIds.map(j => ({ id: j, wait_for_completion: true })) ) await helper.runInParallel( client, 'rollup.deleteJob', @@ -72,7 +72,7 @@ function build (opts = {}) { ) // delete slm policies - const { body: policies } = await client.slm.getLifecycle() + const policies = await client.slm.getLifecycle() await helper.runInParallel( client, 'slm.deleteLifecycle', Object.keys(policies).map(p => ({ policy_id: p })) @@ -81,7 +81,7 @@ function build (opts = {}) { // remove 'x_pack_rest_user', used in some xpack test await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) - const { body: searchableSnapshotIndices } = await client.cluster.state({ + const searchableSnapshotIndices = await client.cluster.state({ metric: 'metadata', filter_path: 'metadata.indices.*.settings.index.store.snapshot' }) @@ -95,7 +95,7 @@ function build (opts = {}) { } // clean snapshots - const { body: repositories } = await client.snapshot.getRepository() + const repositories = await client.snapshot.getRepository() for (const repository of Object.keys(repositories)) { await client.snapshot.delete({ repository, snapshot: '*' }, { ignore: [404] }) await client.snapshot.deleteRepository({ repository }, { ignore: [404] }) @@ -110,24 +110,24 @@ function build (opts = {}) { await client.indices.delete({ index: '*,-.ds-ilm-history-*', expand_wildcards: 'open,closed,hidden' }, { ignore: [404] }) // delete templates - const { body: templates } = await client.cat.templates({ h: 'name' }) + const templates = await client.cat.templates({ h: 'name' }) for (const template of templates.split('\n').filter(Boolean)) { if (isXPackTemplate(template)) continue - const { body } = await client.indices.deleteTemplate({ name: template }, { ignore: [404] }) + const body = await client.indices.deleteTemplate({ name: template }, { ignore: [404] }) if (JSON.stringify(body).includes(`index_template [${template}] missing`)) { await client.indices.deleteIndexTemplate({ name: template }, { ignore: [404] }) } } // delete component template - const { body } = await client.cluster.getComponentTemplate() + const body = await client.cluster.getComponentTemplate() const components = body.component_templates.filter(c => !isXPackTemplate(c.name)).map(c => c.name) if (components.length > 0) { await client.cluster.deleteComponentTemplate({ name: components.join(',') }, { ignore: [404] }) } // Remove any cluster setting - const { body: settings } = await client.cluster.getSettings() + const settings = await client.cluster.getSettings() const newSettings = {} for (const setting in settings) { if (Object.keys(settings[setting]).length === 0) continue @@ -137,7 +137,7 @@ function build (opts = {}) { } } if (Object.keys(newSettings).length > 0) { - await client.cluster.putSettings({ body: newSettings }) + await client.cluster.putSettings(newSettings) } if (isXPack) { @@ -147,20 +147,20 @@ function build (opts = {}) { 'watch-history-ilm-policy', 'ml-size-based-ilm-policy', 'logs', 'metrics' ] - const { body: policies } = await client.ilm.getLifecycle() + const policies = await client.ilm.getLifecycle() for (const policy in policies) { if (preserveIlmPolicies.includes(policy)) continue await client.ilm.deleteLifecycle({ policy }) } // delete autofollow patterns - const { body: patterns } = await client.ccr.getAutoFollowPattern() + const patterns = await client.ccr.getAutoFollowPattern() for (const { name } of patterns.patterns) { await client.ccr.deleteAutoFollowPattern({ name }) } // delete all tasks - const { body: nodesTask } = await client.tasks.list() + const nodesTask = await client.tasks.list() const tasks = Object.keys(nodesTask.nodes) .reduce((acc, node) => { const { tasks } = nodesTask.nodes[node] @@ -172,11 +172,11 @@ function build (opts = {}) { await helper.runInParallel( client, 'tasks.cancel', - tasks.map(id => ({ taskId: id })) + tasks.map(id => ({ task_id: id })) ) } - const { body: shutdownNodes } = await client.shutdown.getNode() + const shutdownNodes = await client.shutdown.getNode() if (shutdownNodes._nodes == null && shutdownNodes.cluster_name == null) { for (const node of shutdownNodes.nodes) { await client.shutdown.deleteNode({ node_id: node.node_id }) @@ -186,7 +186,7 @@ function build (opts = {}) { // wait for pending task before resolving the promise await sleep(100) while (true) { - const { body } = await client.cluster.pendingTasks() + const body = await client.cluster.pendingTasks() if (body.tasks.length === 0) break await sleep(500) } @@ -223,7 +223,8 @@ function build (opts = {}) { try { await client.security.putUser({ username: 'x_pack_rest_user', - body: { password: 'x-pack-test-password', roles: ['superuser'] } + password: 'x-pack-test-password', + roles: ['superuser'] }) } catch (err) { assert.ifError(err, 'should not error: security.putUser') @@ -379,7 +380,22 @@ function build (opts = {}) { process.exit(1) } - const options = { ignore: cmd.params.ignore, headers: action.headers } + if (action.headers) { + switch (action.headers['Content-Type'] || action.headers['content-type']) { + case 'application/json': + delete action.headers['Content-Type'] + delete action.headers['content-type'] + action.headers['Content-Type'] = `application/vnd.elasticsearch+json; compatible-with=${packageJson.version.split('.')[0]}` + break + case 'application/x-ndjson': + delete action.headers['Content-Type'] + delete action.headers['content-type'] + action.headers['Content-Type'] = `application/vnd.elasticsearch+x-ndjson; compatible-with=${packageJson.version.split('.')[0]}` + break + } + } + + const options = { ignore: cmd.params.ignore, headers: action.headers, meta: true } if (!Array.isArray(options.ignore)) options.ignore = [options.ignore] if (cmd.params.ignore) delete cmd.params.ignore @@ -388,6 +404,10 @@ function build (opts = {}) { cmd.params.body = [cmd.params.body] } + if (typeof cmd.params.body === 'string' && !isNDJson(cmd.api)) { + cmd.params.body = JSON.parse(cmd.params.body) + } + const [err, result] = await to(api(cmd.params, options)) let warnings = result ? result.warnings : null const body = result ? result.body : null @@ -707,6 +727,7 @@ function length (val, len) { * @returns {object} */ function parseDo (action) { + action = JSON.parse(JSON.stringify(action)) return Object.keys(action).reduce((acc, val) => { switch (val) { case 'catch': @@ -723,42 +744,42 @@ function parseDo (action) { // eg: put_mapping => putMapping acc.method = val.replace(/_([a-z])/g, g => g[1].toUpperCase()) acc.api = val - acc.params = camelify(action[val]) + acc.params = action[val] // camelify(action[val]) } return acc }, {}) - function camelify (obj) { - const newObj = {} - - // TODO: add camelCase support for this fields - const doNotCamelify = ['copy_settings'] - - for (const key in obj) { - const val = obj[key] - let newKey = key - if (!~doNotCamelify.indexOf(key)) { - // if the key starts with `_` we should not camelify the first occurence - // eg: _source_include => _sourceInclude - newKey = key[0] === '_' - ? '_' + key.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : key.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - if ( - val !== null && - typeof val === 'object' && - !Array.isArray(val) && - key !== 'body' - ) { - newObj[newKey] = camelify(val) - } else { - newObj[newKey] = val - } - } - - return newObj - } + // function camelify (obj) { + // const newObj = {} + + // // TODO: add camelCase support for this fields + // const doNotCamelify = ['copy_settings'] + + // for (const key in obj) { + // const val = obj[key] + // let newKey = key + // if (!~doNotCamelify.indexOf(key)) { + // // if the key starts with `_` we should not camelify the first occurence + // // eg: _source_include => _sourceInclude + // newKey = key[0] === '_' + // ? '_' + key.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) + // : key.replace(/_([a-z])/g, k => k[1].toUpperCase()) + // } + + // if ( + // val !== null && + // typeof val === 'object' && + // !Array.isArray(val) && + // key !== 'body' + // ) { + // newObj[newKey] = camelify(val) + // } else { + // newObj[newKey] = val + // } + // } + + // return newObj + // } } function parseDoError (err, spec) { @@ -785,7 +806,10 @@ function parseDoError (err, spec) { } if (spec === 'param') { - return err instanceof ConfigurationError + // the new client do not perform runtime checks, + // but it relies on typescript informing the user + return true + // return err instanceof ConfigurationError } return false diff --git a/test/types/api-response-body.test-d.ts b/test/types/api-response-body.test-d.ts deleted file mode 100644 index ccd236b8d..000000000 --- a/test/types/api-response-body.test-d.ts +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType, expectError } from 'tsd' -import { Readable as ReadableStream } from 'stream'; -import { TransportRequestCallback, Context } from '../../lib/Transport' -import { Client, ApiError } from '../../' - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -interface SearchBody { - query: { - match: { foo: string } - } -} - -interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; -} - -interface Explanation { - value: number; - description: string; - details: Explanation[]; -} - -interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; -} - -interface Source { - foo: string -} - -// body that does not respect the RequestBody constraint -expectError( - client.search({ - index: 'hello', - body: 42 - }).then(console.log) -) - -// No generics (promise style) -{ - const response = await client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define only the response body (promise style) -{ - const response = await client.search>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define response body and request body (promise style) -{ - const response = await client.search, SearchBody>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define response body, request body and the context (promise style) -{ - const response = await client.search, SearchBody, Context>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Send request body as string (promise style) -{ - const response = await client.search({ - index: 'test', - body: 'hello world' - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Send request body as buffer (promise style) -{ - const response = await client.search({ - index: 'test', - body: Buffer.from('hello world') - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Send request body as readable stream (promise style) -{ - const response = await client.search({ - index: 'test', - body: new ReadableStream() - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// No generics (callback style) -{ - const result = client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define only the response body (callback style) -{ - const result = client.search>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define response body and request body (callback style) -{ - const result = client.search, SearchBody>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define response body, request body and the context (callback style) -{ - const result = client.search, SearchBody, Context>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Send request body as string (callback style) -{ - const result = client.search({ - index: 'test', - body: 'hello world' - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Send request body as buffer (callback style) -{ - const result = client.search({ - index: 'test', - body: Buffer.from('hello world') - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Send request body as readable stream (callback style) -{ - const result = client.search({ - index: 'test', - body: new ReadableStream() - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} diff --git a/test/types/api-response.test-d.ts b/test/types/api-response.test-d.ts deleted file mode 100644 index 1765c2211..000000000 --- a/test/types/api-response.test-d.ts +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType } from 'tsd' -import { TransportRequestCallback, Context } from '../../lib/Transport' -import { Client, ApiError } from '../../' - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -// No generics (promise style) -{ - const response = await client.cat.count({ index: 'test' }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define only the response body (promise style) -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// Define response body and the context (promise style) -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// No generics (callback style) -{ - const result = client.cat.count({ index: 'test' }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define only the response body (callback style) -{ - const result = client.cat.count({ index: 'test' }, (err, response) => { - expectType(err) - expectType(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define response body and the context (callback style) -{ - const result = client.cat.count({ index: 'test' }, (err, response) => { - expectType(err) - expectType(response.body) - expectType(response.meta.context) - }) - expectType(result) -} diff --git a/test/types/client-options.test-d.ts b/test/types/client-options.test-d.ts deleted file mode 100644 index 814ba08df..000000000 --- a/test/types/client-options.test-d.ts +++ /dev/null @@ -1,695 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { URL } from 'url' -import { expectType, expectError } from 'tsd' -import { TransportGetConnectionOptions } from '../../lib/Transport' -import { - Client, - Serializer, - Connection, - ConnectionPool, - Transport, - errors -} from '../../' - -/** - * `node` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/' - }) -) - -expectType( - new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9200/'] - }) -) - -expectType( - new Client({ - node: { - url: new URL('http://localhost:9200/'), - id: 'my-node' - } - }) -) - -expectType( - new Client({ - nodes: [{ - url: new URL('http://localhost:9200/'), - id: 'my-node-1' - }, { - url: new URL('http://localhost:9201/'), - id: 'my-node-2' - }] - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 42 - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: { - url: 'http://localhost:9200/', - id: 'my-node' - } - }) -) - -/** - * `maxRetries` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - maxRetries: 5 - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - maxRetries: 'five' - }) -) - -/** - * `requestTimeout` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - requestTimeout: 5 - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - requestTimeout: 'five' - }) -) - -/** - * `pingTimeout` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - pingTimeout: 5 - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - pingTimeout: 'five' - }) -) - -/** - * `sniffInterval` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffInterval: 5 - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffInterval: false - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - sniffInterval: 'five' - }) -) - -/** - * `sniffOnStart` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffOnStart: true - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - sniffOnStart: 'no' - }) -) - -/** - * `sniffEndpoint` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffEndpoint: '/custom' - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - sniffEndpoint: false - }) -) - -/** - * `sniffOnConnectionFault` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffOnConnectionFault: true - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - sniffOnConnectionFault: 'yes' - }) -) - -/** - * `resurrectStrategy` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'ping' - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'optimistic' - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'none' - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'custom' - }) -) - -/** - * `suggestCompression` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - suggestCompression: true - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - suggestCompression: 'no' - }) -) - -/** - * `compression` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - compression: 'gzip' - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - compression: 'deflate' - }) -) - -/** - * `headers` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - headers: { foo: 'bar' } - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - headers: 'foo=bar' - }) -) - -/** - * `opaqueIdPrefix` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - opaqueIdPrefix: 'foo-' - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - opaqueIdPrefix: 42 - }) -) - -/** - * `name` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - name: 'foo' - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - name: Symbol('foo') - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - name: 42 - }) -) - -/** - * `auth` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - auth: { - username: 'username', - password: 'password' - } - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - auth: { - apiKey: 'abcd' - } - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - auth: { - apiKey: { - api_key: 'foo', - id: 'bar' - } - } - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - auth: 'password' - }) -) - -/** - * `cloud` option - */ -expectType( - new Client({ - cloud: { - id: 'localhost:9200' - } - }) -) - -expectError( - // @ts-expect-error - new Client({ - cloud: { - id: 42 - } - }) -) - -/** - * `agent` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - agent: { - keepAlive: true, - keepAliveMsecs: 42, - maxSockets: 42, - maxFreeSockets: 42 - } - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - agent: false - }) -) -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - agent: { - // @ts-expect-error - keepAlive: 'yes', - // @ts-expect-error - keepAliveMsecs: true, - // @ts-expect-error - maxSockets: 'all', - maxFreeSockets: null - } - }) -) - -/** - * `ssl` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - ssl: { - ca: 'cert', - rejectUnauthorized: true - } - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - ssl: { - // @ts-expect-error - ca: 42, - // @ts-expect-error - rejectUnauthorized: 'yes' - } - }) -) - -/** - * `generateRequestId` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - generateRequestId (params, options) { - return 'id' - } - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - generateRequestId: 'id' - }) -) - -/** - * `nodeSelector` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - nodeSelector (connections) { - return connections[0] - } - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - nodeSelector: 'round-robin' - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - nodeSelector (connections) { - return 'id' - } - }) -) - -/** - * `nodeFilter` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - nodeFilter (connection) { - return true - } - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - nodeFilter (connection) { - return 'id' - } - }) -) - -/** - * `Serializer` option - */ -{ - class CustomSerializer extends Serializer { - deserialize (str: string) { - return super.deserialize(str) - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - Serializer: CustomSerializer - }) - ) -} - -/** - * `Connection` option - */ -{ - class CustomConnection extends Connection { - close () { - return super.close() - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - Connection: CustomConnection - }) - ) -} - -{ - class CustomConnection { - close () { - return Promise.resolve() - } - } - - expectError( - new Client({ - node: 'http://localhost:9200/', - // @ts-expect-error - Connection: CustomConnection - }) - ) -} - -/** - * `ConnectionPool` option - */ -{ - class CustomConnectionPool extends ConnectionPool { - empty () { - return super.empty() - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - ConnectionPool: CustomConnectionPool - }) - ) -} - -{ - class CustomConnectionPool { - empty () { - return this - } - } - - expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - ConnectionPool: CustomConnectionPool - }) - ) -} - -/** - * `Transport` option - */ -{ - class CustomTransport extends Transport { - getConnection (opts: TransportGetConnectionOptions) { - return super.getConnection(opts) - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - Transport: CustomTransport - }) - ) -} - -{ - class CustomTransport { - getConnection (opts: TransportGetConnectionOptions) { - return null - } - } - - expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - Transport: CustomTransport - }) - ) -} - -/** - * `context` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - context: { hello: 'world' } - }) -) - -/** - * `proxy` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - proxy: 'http://localhost:8080/' - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - proxy: new URL('http://localhost:8080/') - }) -) - -expectError( - // @ts-expect-error - new Client({ - node: 'http://localhost:9200/', - proxy: 42 - }) -) diff --git a/test/types/client.test-d.ts b/test/types/client.test-d.ts deleted file mode 100644 index 49d7d03d8..000000000 --- a/test/types/client.test-d.ts +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType } from 'tsd' -import { Client, ApiError, ApiResponse, RequestEvent, ResurrectEvent } from '../../' -import { TransportRequestCallback, TransportRequestPromise } from '../../lib/Transport' - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -client.on('request', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('response', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('sniff', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('resurrect', (err, meta) => { - expectType(err) - expectType(meta) -}) - -// Test all overloads - -// Callbacks style -{ - const result = client.info((err, result) => { - expectType(err) - expectType(result) - }) - expectType(result) - expectType(result.abort()) -} - -{ - const result = client.info({ pretty: true }, (err, result) => { - expectType(err) - expectType(result) - }) - expectType(result) - expectType(result.abort()) -} - -{ - const result = client.info({ pretty: true }, { ignore: [404] }, (err, result) => { - expectType(err) - expectType(result) - }) - expectType(result) - expectType(result.abort()) -} - -// Promise style -{ - const promise = client.info() - expectType>(promise) - promise - .then(result => expectType(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }) - expectType>(promise) - promise - .then(result => expectType(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>(promise) - promise - .then(result => expectType(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -// Promise style with async await -{ - const promise = client.info() - expectType>(promise) - expectType(promise.abort()) - try { - expectType(await promise) - } catch (err) { - expectType(err) - } -} - -{ - const promise = client.info({ pretty: true }) - expectType>(promise) - expectType(promise.abort()) - try { - expectType(await promise) - } catch (err) { - expectType(err) - } -} - -{ - const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>(promise) - expectType(promise.abort()) - try { - expectType(await promise) - } catch (err) { - expectType(err) - } -} diff --git a/test/types/connection-pool.test-d.ts b/test/types/connection-pool.test-d.ts deleted file mode 100644 index f46f23e01..000000000 --- a/test/types/connection-pool.test-d.ts +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType, expectAssignable } from 'tsd' -import { URL } from 'url' -import { - BaseConnectionPool, - ConnectionPool, - CloudConnectionPool, - Connection -} from '../../' - -{ - const pool = new BaseConnectionPool({ - Connection: Connection, - ssl: { ca: 'stirng' }, - emit: (event, ...args) => true, - agent: { keepAlive: true }, - auth: { username: 'username', password: 'password' } - }) - - expectType(pool) - expectType(pool.connections) - expectType(pool.size) - - expectType(pool.markAlive(new Connection())) - expectType(pool.markDead(new Connection())) - expectType(pool.getConnection({ - filter (node) { return true }, - selector (connections) { return connections[0] }, - requestId: 'id', - name: 'name', - now: Date.now() - })) - expectType(pool.addConnection({})) - expectType(pool.removeConnection(new Connection())) - expectType(pool.empty()) - expectType(pool.update([])) - expectType(pool.nodesToHost([], 'https')) - expectType<{ url: URL }>(pool.urlToHost('url')) -} - -{ - const pool = new ConnectionPool({ - Connection: Connection, - ssl: { ca: 'stirng' }, - emit: (event, ...args) => true, - agent: { keepAlive: true }, - auth: { username: 'username', password: 'password' }, - pingTimeout: 1000, - resurrectStrategy: 'ping', - sniffEnabled: true - }) - - expectAssignable(pool) - expectType(pool.connections) - expectType(pool.size) - expectType(pool.dead) - - expectAssignable(pool.markAlive(new Connection())) - expectAssignable(pool.markDead(new Connection())) - expectType(pool.getConnection({ - filter (node) { return true }, - selector (connections) { return connections[0] }, - requestId: 'id', - name: 'name', - now: Date.now() - })) - expectType(pool.addConnection({})) - expectAssignable(pool.removeConnection(new Connection())) - expectAssignable(pool.empty()) - expectAssignable(pool.update([])) - expectType(pool.nodesToHost([], 'https')) - expectType<{ url: URL }>(pool.urlToHost('url')) - expectType(pool.resurrect({ - now: Date.now(), - requestId: 'id', - name: 'name' - })) -} - -{ - const pool = new CloudConnectionPool({ - Connection: Connection, - ssl: { ca: 'stirng' }, - emit: (event, ...args) => true, - agent: { keepAlive: true }, - auth: { username: 'username', password: 'password' } - }) - - expectAssignable(pool) - expectType(pool.cloudConnection) - expectType(pool.getConnection()) -} diff --git a/test/types/connection.test-d.ts b/test/types/connection.test-d.ts deleted file mode 100644 index 66e9d257b..000000000 --- a/test/types/connection.test-d.ts +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType } from 'tsd' -import { URL } from 'url' -import { Connection } from '../../' -import { ConnectionOptions } from '../../lib/Connection' - -{ - const conn = new Connection({ - url: new URL('http://localhost:9200/'), - ssl: { ca: 'string' }, - id: 'id', - headers: {}, - agent: { keepAlive: false }, - status: 'alive', - roles: { master: true }, - auth: { username: 'username', password: 'password' } - }) - - expectType(conn) - expectType(conn.url) - expectType(conn.id) - expectType>(conn.headers) - expectType(conn.deadCount) - expectType(conn.resurrectTimeout) - expectType(conn.status) -} - -{ - const conn = new Connection({ - url: new URL('http://localhost:9200/'), - agent (opts) { - expectType(opts) - return 'the agent' - } - }) -} diff --git a/test/types/errors.test-d.ts b/test/types/errors.test-d.ts deleted file mode 100644 index e43cea39b..000000000 --- a/test/types/errors.test-d.ts +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType } from 'tsd' -import { errors, ApiResponse, Connection } from '../../' - -const response = { - body: {}, - statusCode: 200, - headers: {}, - warnings: null, - meta: { - context: {}, - name: 'name', - request: { - params: { method: 'GET', path: '/' }, - options: {}, - id: 42 - }, - connection: new Connection(), - attempts: 0, - aborted: false, - } -} - -{ - const err = new errors.ElasticsearchClientError() - expectType(err.name) - expectType(err.message) -} - -{ - const err = new errors.TimeoutError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} - -{ - const err = new errors.ConnectionError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} - -{ - const err = new errors.NoLivingConnectionsError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} - -{ - const err = new errors.SerializationError('message', {}) - expectType(err.name) - expectType(err.message) - expectType(err.data) -} - -{ - const err = new errors.DeserializationError('message', 'data') - expectType(err.name) - expectType(err.message) - expectType(err.data) -} - -{ - const err = new errors.ConfigurationError('message') - expectType(err.name) - expectType(err.message) -} - -{ - const err = new errors.ResponseError(response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) - expectType>(err.body) - expectType(err.statusCode) - expectType>(err.headers) -} - -{ - const err = new errors.RequestAbortedError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} diff --git a/test/types/helpers.test-d.ts b/test/types/helpers.test-d.ts deleted file mode 100644 index 202d4aa84..000000000 --- a/test/types/helpers.test-d.ts +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType, expectError, expectAssignable } from 'tsd' -import { Client } from '../../' -import { - BulkHelper, - BulkStats, - BulkHelperOptions, - ScrollSearchResponse, - OnDropDocument, - MsearchHelper -} from '../../lib/Helpers' -import { ApiResponse, ApiError, Context } from '../../lib/Transport' - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -/// .helpers.bulk - -const b = client.helpers.bulk>({ - datasource: [], - onDocument (doc) { - expectType>(doc) - return { index: { _index: 'test' } } - }, - flushBytes: 5000000, - flushInterval: 30000, - concurrency: 5, - retries: 3, - wait: 5000, - onDrop (doc) { - expectType>>(doc) - }, - refreshOnCompletion: true, - pipeline: 'my-pipeline' -}) - -expectType>(b) -expectType>(b.abort()) -b.then(stats => expectType(stats)) - -// body can't be provided -expectError( - client.helpers.bulk({ - datasource: [], - onDocument (doc) { - return { index: { _index: 'test' } } - }, - body: [] - }) -) - -// test onDocument actions -// index -{ - const options = { - datasource: [], - onDocument (doc: Record) { - return { index: { _index: 'test' } } - } - } - expectAssignable>>(options) -} -// create -{ - const options = { - datasource: [], - onDocument (doc: Record) { - return { create: { _index: 'test' } } - } - } - expectAssignable>>(options) -} -// update -{ - // without `:BulkHelperOptions` this test cannot pass - // but if we write these options inline inside - // a `.helper.bulk`, it works as expected - const options: BulkHelperOptions> = { - datasource: [], - onDocument (doc: Record) { - return [{ update: { _index: 'test' } }, doc] - } - } - expectAssignable>>(options) -} -// delete -{ - const options = { - datasource: [], - onDocument (doc: Record) { - return { delete: { _index: 'test' } } - } - } - expectAssignable>>(options) -} - -/// .helpers.scrollSearch - -// just search params -{ - async function test () { - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const response of scrollSearch) { - expectAssignable(response) - } - } -} - -// search params and options -{ - async function test () { - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, { ignore: [404] }) - - for await (const response of scrollSearch) { - expectAssignable(response) - expectType>(response.body) - expectType(response.documents) - expectType(response.meta.context) - } - } -} - -// with type defs -{ - interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; - } - - interface Explanation { - value: number; - description: string; - details: Explanation[]; - } - - interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; - } - - interface Source { - foo: string - } - - async function test () { - const scrollSearch = client.helpers.scrollSearch>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const response of scrollSearch) { - expectAssignable(response) - expectType>(response.body) - expectType(response.documents) - expectType(response.meta.context) - } - } -} - -{ - interface SearchBody { - query: { - match: { foo: string } - } - } - - interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; - } - - interface Explanation { - value: number; - description: string; - details: Explanation[]; - } - - interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; - } - - interface Source { - foo: string - } - - async function test () { - const scrollSearch = client.helpers.scrollSearch, SearchBody, Record>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const response of scrollSearch) { - expectAssignable(response) - expectType>(response.body) - expectType(response.documents) - expectType>(response.meta.context) - } - } -} - -/// .helpers.scrollDocuments - -// just search params -{ - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -// search params and options -{ - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, { ignore: [404] }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -// with type defs -{ - interface Source { - foo: string - } - - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -{ - interface SearchBody { - query: { - match: { foo: string } - } - } - - interface Source { - foo: string - } - - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -/// .helpers.search - -// just search params -{ - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(p) - expectType(await p) -} - -// search params and options -{ - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, { ignore: [404] }) - - expectType>(p) - expectType(await p) -} - -// with type defs -{ - interface Source { - foo: string - } - - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(p) - expectType(await p) -} - -{ - interface SearchBody { - query: { - match: { foo: string } - } - } - - interface Source { - foo: string - } - - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(p) - expectType(await p) -} - -/// .helpers.msearch - -const s = client.helpers.msearch({ - operations: 5, - flushInterval: 500, - concurrency: 5, - retries: 5, - wait: 5000 -}) - -expectType(s) -expectType(s.stop()) -expectType(s.stop(new Error('kaboom'))) - -expectType, unknown>>>(s.search({ index: 'foo'}, { query: {} })) -expectType>>(s.search, string>({ index: 'foo'}, { query: {} })) - -expectType(s.search({ index: 'foo'}, { query: {} }, (err, result) => { - expectType(err) - expectType(result) -})) -expectType(s.search, string>({ index: 'foo'}, { query: {} }, (err, result) => { - expectType(err) - expectType>(result) -})) diff --git a/test/types/kibana.test-d.ts b/test/types/kibana.test-d.ts deleted file mode 100644 index 6385ebd02..000000000 --- a/test/types/kibana.test-d.ts +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType, expectNotType, expectError } from 'tsd' -import { Client, RequestEvent, ResurrectEvent, ApiError, ApiResponse, estypes } from '../../' -import { KibanaClient } from '../../api/kibana' -import { TransportRequestPromise, Context } from '../../lib/Transport' - -// @ts-expect-error -const client: KibanaClient = new Client({ - node: 'http://localhost:9200/' -}) - -client.on('request', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('response', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('sniff', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('resurrect', (err, meta) => { - expectType(err) - expectType(meta) -}) - -// No generics -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// Define only the context -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// Check API returned type and optional parameters -{ - const promise = client.info() - expectType>>(promise) - promise - .then(result => expectType>(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }) - expectType>>(promise) - promise - .then(result => expectType>(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>>(promise) - promise - .then(result => expectType>(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -// body that does not respect the RequestBody constraint -expectError( - client.search({ - index: 'hello', - body: 42 - }).then(console.log) -) - -// @ts-expect-error -client.async_search.get() - -// callback api is not supported -expectError(client.cat.count({ index: 'test' }, {}, (err: any, result: any) => {})) - -// close api, only promises should be supported -// callback api is not supported -expectType>(client.close()) -expectError(client.close(() => {})) - -// the child api should return a KibanaClient instance -const child = client.child() -expectType(child) -expectNotType(child) \ No newline at end of file diff --git a/test/types/new-types.test-d.ts b/test/types/new-types.test-d.ts deleted file mode 100644 index 4a237dcf4..000000000 --- a/test/types/new-types.test-d.ts +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType, expectNotType, expectError } from 'tsd' -import { Client, RequestEvent, ResurrectEvent, ApiError, ApiResponse, estypes } from '../../' -import type { Client as NewTypes } from '../../api/new' -import { TransportRequestPromise, Context } from '../../lib/Transport' - -// @ts-expect-error -const client: NewTypes = new Client({ - node: 'http://localhost:9200/' -}) - -client.on('request', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('response', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('sniff', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('resurrect', (err, meta) => { - expectType(err) - expectType(meta) -}) - -// No generics -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// Define only the context -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// Check API returned type and optional parameters -{ - const promise = client.info() - expectType>>(promise) - promise - .then(result => expectType>(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }) - expectType>>(promise) - promise - .then(result => expectType>(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>>(promise) - promise - .then(result => expectType>(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -// body that does not respect the RequestBody constraint -expectError( - client.search({ - index: 'hello', - body: 42 - }).then(console.log) -) - -// @ts-expect-error -client.async_search.get() - -// the child api should return a KibanaClient instance -const child = client.child() -expectType(child) -expectNotType(child) \ No newline at end of file diff --git a/test/types/serializer.test-d.ts b/test/types/serializer.test-d.ts deleted file mode 100644 index 69c41ba46..000000000 --- a/test/types/serializer.test-d.ts +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { expectType } from 'tsd' -import { Serializer } from '../../' - -const serializer = new Serializer() - -expectType(serializer.serialize({})) -expectType(serializer.deserialize('')) -expectType(serializer.ndserialize([])) -expectType(serializer.qserialize({})) diff --git a/test/types/transport.test-d.ts b/test/types/transport.test-d.ts deleted file mode 100644 index 5fa2573e0..000000000 --- a/test/types/transport.test-d.ts +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { Readable as ReadableStream } from 'stream'; -import { expectType, expectAssignable, expectError } from 'tsd' -import { - Transport, - Connection, - ConnectionPool, - Serializer -} from '../..' -import { - TransportRequestParams, - TransportRequestOptions, - TransportRequestCallback, - TransportRequestPromise, - RequestEvent, - ApiError, - RequestBody, - RequestNDBody, - ApiResponse -} from '../../lib/Transport' - -const params = { - method: 'POST', - path: '/search', - body: { foo: 'bar' }, - querystring: { baz: 'faz' } -} - -const options = { - ignore: [404], - requestTimeout: 5000, - maxRetries: 3, - asStream: false, - headers: {}, - querystring: {}, - id: 'id', - context: {}, - warnings: ['warn'], - opaqueId: 'id' -} - -const response = { - body: {}, - statusCode: 200, - headers: {}, - warnings: null, - meta: { - context: {}, - name: 'name', - request: { - params, - options, - id: 'id' - }, - connection: new Connection(), - attempts: 0, - aborted: false - } -} - -expectAssignable(params) -expectAssignable({ method: 'GET', path: '/' }) -expectAssignable(options) -expectAssignable(response) -expectAssignable(response) - -// verify that RequestBody, RequestNDBody and ResponseBody works as expected -interface TestBody { hello: string } -expectAssignable({ foo: 'bar' }) -expectAssignable>({ hello: 'world' }) -expectError>({ foo: 'bar' }) -expectAssignable('string') -expectAssignable>('string') -expectAssignable(Buffer.from('hello world')) -expectAssignable(new ReadableStream()) - -expectAssignable([{ foo: 'bar' }]) -expectAssignable[]>([{ hello: 'world' }]) -expectError({ foo: 'bar' }) -expectError[]>([{ foo: 'bar' }]) -expectAssignable(['string']) -expectAssignable(Buffer.from('hello world')) -expectAssignable(new ReadableStream()) - -const transport = new Transport({ - emit: (event, ...args) => true, - serializer: new Serializer(), - connectionPool: new ConnectionPool(), - maxRetries: 5, - requestTimeout: 1000, - suggestCompression: true, - compression: 'gzip', - sniffInterval: 1000, - sniffOnConnectionFault: true, - sniffEndpoint: '/sniff', - sniffOnStart: false -}) - -expectType(transport) - -expectType(transport.request(params, options, (err, result) => {})) - -// querystring as string -transport.request({ - method: 'GET', - path: '/search', - querystring: 'baz=faz' -}, options, (err, result) => { - expectType(err) - expectType(result) -}) - -// body as object -transport.request(params, options, (err, result) => { - expectType(err) - expectType(result) -}) - -// body as string -transport.request({ - method: 'POST', - path: '/search', - body: 'hello world', - querystring: { baz: 'faz' } -}, options, (err, result) => { - expectType(err) - expectType(result) -}) - -// body as Buffer -transport.request({ - method: 'POST', - path: '/search', - body: Buffer.from('hello world'), - querystring: { baz: 'faz' } -}, options, (err, result) => { - expectType(err) - expectType(result) -}) - -// body as ReadableStream -transport.request({ - method: 'POST', - path: '/search', - body: new ReadableStream(), - querystring: { baz: 'faz' } -}, options, (err, result) => { - expectType(err) - expectType(result) -}) - -const promise = transport.request(params, options) -expectType>(promise) -promise.then(result => expectType(result)) -expectType(await promise) - -// body that does not respect the RequestBody constraint -expectError( - transport.request({ - method: 'POST', - path: '/', - body: 42 - }) -) diff --git a/test/unit/api-async.js b/test/unit/api-async.js deleted file mode 100644 index 219df5533..000000000 --- a/test/unit/api-async.js +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { errors } = require('../../index') -const { Client, buildServer } = require('../utils') - -function runAsyncTest (test) { - test('async await (search)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, async ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - try { - const { body } = await client.search({ - index: 'test', - type: 'doc', - q: 'foo:bar' - }) - t.same(body, { hello: 'world' }) - } catch (err) { - t.fail(err) - } - server.stop() - }) - }) - - test('async await (index)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, async ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - try { - await client.index({ - index: 'test', - body: { foo: 'bar' } - }) - t.pass('ok') - } catch (err) { - t.fail(err) - } - server.stop() - }) - }) - - test('async await (ConfigurationError)', async t => { - t.plan(1) - - const client = new Client({ - node: 'http://localhost:9200/' - }) - - try { - await client.index({ body: { foo: 'bar' } }) - t.fail('Should throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - } - }) -} - -module.exports = runAsyncTest diff --git a/test/unit/api.test.js b/test/unit/api.test.js deleted file mode 100644 index de5021b66..000000000 --- a/test/unit/api.test.js +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { errors } = require('../../index') -const { Client, buildServer } = require('../utils') - -test('Basic (callback)', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Basic (promises)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client - .search({ - index: 'test', - q: 'foo:bar' - }) - .then(({ body }) => { - t.same(body, { hello: 'world' }) - server.stop() - }) - .catch(t.fail) - }) -}) - -test('Error (callback)', t => { - t.plan(1) - - function handler (req, res) { - res.statusCode = 500 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.ok(err) - server.stop() - }) - }) -}) - -test('Error (promises)', t => { - t.plan(1) - - function handler (req, res) { - res.statusCode = 500 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client - .search({ - index: 'test', - q: 'foo:bar' - }) - .then(t.fail) - .catch(err => { - t.ok(err) - server.stop() - }) - }) -}) - -test('Finally method (promises)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - const request = client.search({ - index: 'test', - q: 'foo:bar' - }) - - t.type(request.finally, 'function') - - request - .finally(() => { - server.stop() - }) - }) -}) - -test('Abort method (callback)', t => { - t.plan(3) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - const request = client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - - t.type(request.abort, 'function') - }) -}) - -test('Abort method (promises)', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - const request = client.search({ - index: 'test', - q: 'foo:bar' - }) - - request - .then(({ body }) => { - t.same(body, { hello: 'world' }) - server.stop() - }) - .catch(t.fail) - - t.type(request.abort, 'function') - }) -}) - -test('Basic (options and callback)', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - requestTimeout: 10000 - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Basic (options and promises)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client - .search({ - index: 'test', - q: 'foo:bar' - }, { - requestTimeout: 10000 - }) - .then(({ body }) => { - t.same(body, { hello: 'world' }) - server.stop() - }) - .catch(t.fail) - }) -}) - -test('If the API uses the same key for both url and query parameter, the url should win', t => { - t.plan(2) - - function handler (req, res) { - t.equal(req.url, '/index/_bulk') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - // bulk has two `type` parameters - client.bulk({ - index: 'index', - body: [] - }, (err, { body, warnings }) => { - t.error(err) - server.stop() - }) - }) -}) - -test('ConfigurationError (callback)', t => { - t.plan(1) - - const client = new Client({ - node: 'http://localhost:9200/' - }) - - client.index({ - body: { foo: 'bar' } - }, (err, { body }) => { - t.ok(err instanceof errors.ConfigurationError) - }) -}) - -test('ConfigurationError (promises)', t => { - t.plan(1) - - const client = new Client({ - node: 'http://localhost:9200/' - }) - - client - .index({ body: { foo: 'bar' } }) - .then(t.fail) - .catch(err => { - t.ok(err instanceof errors.ConfigurationError) - }) -}) - -test('The callback with a sync error should be called in the next tick', t => { - t.plan(4) - - const client = new Client({ - node: 'http://localhost:9200/' - }) - - const transportReturn = client.index({ body: { foo: 'bar' } }, (err, result) => { - t.ok(err instanceof errors.ConfigurationError) - }) - - t.type(transportReturn.then, 'function') - t.type(transportReturn.catch, 'function') - t.type(transportReturn.abort, 'function') -}) - -if (Number(process.version.split('.')[0].slice(1)) >= 8) { - require('./api-async')(test) -} diff --git a/test/unit/base-connection-pool.test.js b/test/unit/base-connection-pool.test.js deleted file mode 100644 index 6fe8c7206..000000000 --- a/test/unit/base-connection-pool.test.js +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const BaseConnectionPool = require('../../lib/pool/BaseConnectionPool') -const Connection = require('../../lib/Connection') - -test('API', t => { - t.test('addConnection', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - t.ok(pool.connections.find(c => c.id === href) instanceof Connection) - t.equal(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) - t.end() - }) - - t.test('addConnection should throw with two connections with the same id', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - try { - pool.addConnection(href) - t.fail('Should throw') - } catch (err) { - t.equal(err.message, `Connection with id '${href}' is already present`) - } - t.end() - }) - - t.test('addConnection should handle not-friendly url parameters for user and password', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://us/"er:p@assword@localhost:9200/' - pool.addConnection(href) - const conn = pool.connections[0] - t.equal(conn.url.username, 'us%22er') - t.equal(conn.url.password, 'p%40assword') - t.match(conn.headers, { - authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64') - }) - t.end() - }) - - t.test('markDead', t => { - const pool = new BaseConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - let connection = pool.addConnection(href) - t.same(pool.markDead(connection), pool) - connection = pool.connections.find(c => c.id === href) - t.equal(connection.status, Connection.statuses.ALIVE) - t.end() - }) - - t.test('markAlive', t => { - const pool = new BaseConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - let connection = pool.addConnection(href) - t.same(pool.markAlive(connection), pool) - connection = pool.connections.find(c => c.id === href) - t.equal(connection.status, Connection.statuses.ALIVE) - t.end() - }) - - t.test('getConnection should throw', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - try { - pool.getConnection() - t.fail('Should fail') - } catch (err) { - t.equal(err.message, 'getConnection must be implemented') - } - t.end() - }) - - t.test('removeConnection', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - const connection = pool.addConnection(href) - pool.removeConnection(connection) - t.equal(pool.size, 0) - t.end() - }) - - t.test('empty', t => { - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - pool.addConnection('http://localhost:9201/') - pool.empty(() => { - t.equal(pool.size, 0) - t.end() - }) - }) - - t.test('urlToHost', t => { - const pool = new BaseConnectionPool({ Connection }) - const url = 'http://localhost:9200/' - t.same( - pool.urlToHost(url), - { url: new URL(url) } - ) - t.end() - }) - - t.test('nodesToHost', t => { - t.test('publish_address as ip address (IPv4)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') - t.end() - }) - - t.test('publish_address as ip address (IPv6)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://[::1]:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://[::1]:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv4)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv6)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('Should use the configure protocol', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.equal(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') - t.end() - }) - - t.end() - }) - - t.test('update', t => { - t.test('Should not update existing connections', t => { - t.plan(2) - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (mark alive)', t => { - t.plan(5) - class CustomBaseConnectionPool extends BaseConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomBaseConnectionPool({ Connection }) - const conn1 = pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - const conn2 = pool.addConnection({ - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.markDead(conn1) - pool.markDead(conn2) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (same url, different id)', t => { - t.plan(3) - class CustomBaseConnectionPool extends BaseConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomBaseConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'http://127.0.0.1:9200/', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: true - }]) - - // roles will never be updated, we only use it to do - // a dummy check to see if the connection has been updated - t.same(pool.connections.find(c => c.id === 'a1').roles, { - master: true, - data: true, - ingest: true, - ml: false - }) - t.equal(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) - }) - - t.test('Add a new connection', t => { - t.plan(2) - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2')) - }) - - t.test('Remove old connections', t => { - t.plan(3) - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a2', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a3', - roles: null - }]) - - t.notOk(pool.connections.find(c => c.id === 'a1')) - t.ok(pool.connections.find(c => c.id === 'a2')) - t.ok(pool.connections.find(c => c.id === 'a3')) - }) - - t.end() - }) - - t.test('CreateConnection', t => { - t.plan(1) - const pool = new BaseConnectionPool({ Connection }) - const conn = pool.createConnection('http://localhost:9200/') - pool.connections.push(conn) - try { - pool.createConnection('http://localhost:9200/') - t.fail('Should throw') - } catch (err) { - t.equal(err.message, 'Connection with id \'http://localhost:9200//' is already present') - } - }) - - t.end() -}) diff --git a/test/unit/child.test.js b/test/unit/child.test.js deleted file mode 100644 index a07a6d135..000000000 --- a/test/unit/child.test.js +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { errors } = require('../../index') -const { - Client, - buildServer, - connection: { MockConnection } -} = require('../utils') - -test('Should create a child client (headers check)', t => { - t.plan(4) - - let count = 0 - function handler (req, res) { - if (count++ === 0) { - t.match(req.headers, { 'x-foo': 'bar' }) - } else { - t.match(req.headers, { 'x-baz': 'faz' }) - } - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - headers: { 'x-foo': 'bar' } - }) - const child = client.child({ - headers: { 'x-baz': 'faz' } - }) - - client.info((err, res) => { - t.error(err) - child.info((err, res) => { - t.error(err) - server.stop() - }) - }) - }) -}) - -test('Should create a child client (timeout check)', t => { - t.plan(2) - - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 50) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ node: `http://localhost:${port}` }) - const child = client.child({ requestTimeout: 25, maxRetries: 0 }) - - client.info((err, res) => { - t.error(err) - child.info((err, res) => { - t.ok(err instanceof errors.TimeoutError) - server.stop() - }) - }) - }) -}) - -test('Client extensions', t => { - t.test('One level', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('utility.index', () => { - return () => t.ok('called') - }) - - const child = client.child() - child.utility.index() - }) - - t.test('Two levels', t => { - t.plan(2) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('utility.index', () => { - return () => t.ok('called') - }) - - const child = client.child() - child.extend('utility.search', () => { - return () => t.ok('called') - }) - - const grandchild = child.child() - grandchild.utility.index() - grandchild.utility.search() - }) - - t.test('The child should not extend the parent', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - const child = client.child() - - child.extend('utility.index', () => { - return () => t.fail('Should not be called') - }) - - try { - client.utility.index() - } catch (err) { - t.ok(err) - } - }) - - t.end() -}) - -test('Should share the event emitter', t => { - t.test('One level', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - - client.on('response', (err, meta) => { - t.error(err) - }) - - child.info((err, res) => { - t.error(err) - }) - }) - - t.test('Two levels', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - const grandchild = child.child() - - client.on('response', (err, meta) => { - t.error(err) - }) - - grandchild.info((err, res) => { - t.error(err) - }) - }) - - t.test('Child listener - one level', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - - child.on('response', (err, meta) => { - t.error(err) - }) - - child.info((err, res) => { - t.error(err) - }) - }) - - t.test('Child listener - two levels', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - const grandchild = child.child() - - child.on('response', (err, meta) => { - t.error(err) - }) - - grandchild.info((err, res) => { - t.error(err) - }) - }) - - t.end() -}) - -test('Should create a child client (generateRequestId check)', t => { - t.plan(6) - - function generateRequestId1 () { - let id = 0 - return () => `trace-1-${id++}` - } - - function generateRequestId2 () { - let id = 0 - return () => `trace-2-${id++}` - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - generateRequestId: generateRequestId1() - }) - const child = client.child({ - Connection: MockConnection, - generateRequestId: generateRequestId2() - }) - - let count = 0 - client.on('request', (err, { meta }) => { - t.error(err) - t.equal( - meta.request.id, - count++ === 0 ? 'trace-1-0' : 'trace-2-0' - ) - }) - - client.info(err => { - t.error(err) - child.info(t.error) - }) -}) - -test('Should create a child client (name check)', t => { - t.plan(8) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - name: 'parent' - }) - const child = client.child({ - Connection: MockConnection, - name: 'child' - }) - - t.equal(client.name, 'parent') - t.equal(child.name, 'child') - - let count = 0 - client.on('request', (err, { meta }) => { - t.error(err) - t.equal( - meta.name, - count++ === 0 ? 'parent' : 'child' - ) - }) - - client.info(err => { - t.error(err) - child.info(t.error) - }) -}) - -test('Should create a child client (auth check)', t => { - t.plan(4) - - let count = 0 - function handler (req, res) { - if (count++ === 0) { - t.match(req.headers, { authorization: 'Basic Zm9vOmJhcg==' }) - } else { - t.match(req.headers, { authorization: 'ApiKey foobar' }) - } - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - username: 'foo', - password: 'bar' - } - }) - const child = client.child({ - auth: { - apiKey: 'foobar' - } - }) - - client.info((err, res) => { - t.error(err) - child.info((err, res) => { - t.error(err) - server.stop() - }) - }) - }) -}) diff --git a/test/unit/client.test.js b/test/unit/client.test.js deleted file mode 100644 index 3503e17a7..000000000 --- a/test/unit/client.test.js +++ /dev/null @@ -1,1862 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const buffer = require('buffer') -const intoStream = require('into-stream') -const { ConnectionPool, Transport, Connection, errors } = require('../../index') -const { CloudConnectionPool } = require('../../lib/pool') -const { Client, buildServer, connection } = require('../utils') -const { buildMockConnection } = connection - -let clientVersion = require('../../package.json').version -if (clientVersion.includes('-')) { - clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' -} -const nodeVersion = process.versions.node - -test('Configure host', t => { - t.test('Single string', t => { - const client = new Client({ - node: 'http://localhost:9200/' - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'http://localhost:9200/'), { - url: new URL('http://localhost:9200/'), - id: 'http://localhost:9200/', - ssl: null, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - t.end() - }) - - t.test('Array of strings', t => { - const client = new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9201/'] - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'http://localhost:9200/'), { - url: new URL('http://localhost:9200/'), - id: 'http://localhost:9200/', - ssl: null, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - t.match(pool.connections.find(c => c.id === 'http://localhost:9201/'), { - url: new URL('http://localhost:9201/'), - id: 'http://localhost:9201/', - ssl: null, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.end() - }) - - t.test('Single object', t => { - const client = new Client({ - node: { - url: new URL('http://localhost:9200/'), - id: 'node', - roles: { - master: true, - data: false, - ingest: false - }, - ssl: 'ssl' - } - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'node'), { - url: new URL('http://localhost:9200/'), - id: 'node', - ssl: 'ssl', - deadCount: 0, - resurrectTimeout: 0 - }) - - t.same(pool.connections.find(c => c.id === 'node').roles, { - master: true, - data: false, - ingest: false, - ml: false - }) - - t.end() - }) - - t.test('Array of objects', t => { - const client = new Client({ - nodes: [{ - url: new URL('http://localhost:9200/'), - id: 'node1', - roles: { - master: true, - data: false, - ingest: false - }, - ssl: 'ssl' - }, { - url: new URL('http://localhost:9200/'), - id: 'node2', - roles: { - master: false, - data: true, - ingest: false - }, - ssl: 'ssl' - }] - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'node1'), { - url: new URL('http://localhost:9200/'), - id: 'node1', - ssl: 'ssl', - deadCount: 0, - resurrectTimeout: 0 - }) - - t.same(pool.connections.find(c => c.id === 'node1').roles, { - master: true, - data: false, - ingest: false, - ml: false - }) - - t.match(pool.connections.find(c => c.id === 'node2'), { - url: new URL('http://localhost:9200/'), - id: 'node2', - ssl: 'ssl', - deadCount: 0, - resurrectTimeout: 0 - }) - - t.same(pool.connections.find(c => c.id === 'node2').roles, { - master: false, - data: true, - ingest: false, - ml: false - }) - - t.end() - }) - - t.test('Custom headers', t => { - const client = new Client({ - node: { - url: new URL('http://localhost:9200/'), - headers: { 'x-foo': 'bar' }, - id: 'node' - } - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'node'), { - url: new URL('http://localhost:9200/'), - headers: { 'x-foo': 'bar' } - }) - t.end() - }) - - t.test('Missing node conf', t => { - try { - new Client() // eslint-disable-line - t.fail('Should fail') - } catch (err) { - t.ok(err) - } - t.end() - }) - - t.end() -}) - -test('Authentication', t => { - t.test('Basic', t => { - t.test('Node with basic auth data in the url', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Node with basic auth data in the url (array of nodes)', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - nodes: [`http://foo:bar@localhost:${port}`] - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Node with basic auth data in the options', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - username: 'foo', - password: 'bar' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Custom basic authentication per request', t => { - t.plan(6) - - let first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'hello' : 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info({}, { - headers: { - authorization: 'hello' - } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.test('Override default basic authentication per request', t => { - t.plan(6) - - let first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'hello' : 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - username: 'foo', - password: 'bar' - } - }) - - client.info({}, { - headers: { - authorization: 'hello' - } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.end() - }) - - t.test('ApiKey', t => { - t.test('Node with ApiKey auth data in the options as string', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Node with ApiKey auth data in the options as object', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: { id: 'foo', api_key: 'bar' } - } - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Custom ApiKey authentication per request', t => { - t.plan(6) - - let first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'ApiKey Zm9vOmJhcg==' : 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info({}, { - headers: { - authorization: 'ApiKey Zm9vOmJhcg==' - } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.test('Override default ApiKey authentication per request', t => { - t.plan(6) - - let first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'hello' : 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==' - } - }) - - client.info({}, { - headers: { - authorization: 'hello' - } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.test('ApiKey should take precedence over basic auth (in url)', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://user:pwd@localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('ApiKey should take precedence over basic auth (in opts)', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==', - username: 'user', - password: 'pwd' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() - }) - - t.end() -}) - -test('Custom headers per request', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - 'x-foo': 'bar', - 'x-baz': 'faz' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info({}, { - headers: { - 'x-foo': 'bar', - 'x-baz': 'faz' - } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Client close', t => { - t.plan(2) - - class MyConnectionPool extends ConnectionPool { - empty (callback) { - t.ok('called') - super.empty(callback) - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - ConnectionPool: MyConnectionPool - }) - - client.close(() => t.pass('Closed')) -}) - -test('Client close (promise)', t => { - t.plan(2) - - class MyConnectionPool extends ConnectionPool { - empty (callback) { - t.ok('called') - super.empty(callback) - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - ConnectionPool: MyConnectionPool - }) - - client.close() - .then(() => t.pass('Closed')) -}) - -test('Extend client APIs', t => { - t.test('Extend a single method', t => { - t.plan(5) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.ok(new ConfigurationError() instanceof Error) - t.same(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.same(params, { you_know: 'for search' }) - t.same(options, { winter: 'is coming' }) - } - }) - - client.method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Create a namespace and a method', t => { - t.plan(5) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('namespace.method', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.ok(new ConfigurationError() instanceof Error) - t.same(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.same(params, { you_know: 'for search' }) - t.same(options, { winter: 'is coming' }) - } - }) - - client.namespace.method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Create a namespace and multiple methods', t => { - t.plan(10) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('namespace.method1', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.ok(new ConfigurationError() instanceof Error) - t.same(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.same(params, { you_know: 'for search' }) - t.same(options, { winter: 'is coming' }) - } - }) - - client.extend('namespace.method2', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.ok(new ConfigurationError() instanceof Error) - t.same(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.same(params, { you_know: 'for search' }) - t.same(options, { winter: 'is coming' }) - } - }) - - client.namespace.method1( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - - client.namespace.method2( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Cannot override an existing method', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('index', () => {}) - t.fail('Should throw') - } catch (err) { - t.equal(err.message, 'The method "index" already exists') - } - }) - - t.test('Cannot override an existing namespace and method', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('indices.delete', () => {}) - t.fail('Should throw') - } catch (err) { - t.equal(err.message, 'The method "delete" already exists on namespace "indices"') - } - }) - - t.test('Can override an existing method with { force: true }', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('index', { force: true }, () => t.pass('Called')) - } catch (err) { - t.fail('Should not throw') - } - }) - - t.test('Can override an existing namespace and method with { force: true }', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('indices.delete', { force: true }, () => t.pass('Called')) - } catch (err) { - t.fail('Should not throw') - } - }) - - t.test('Should call the transport.request method', t => { - t.plan(2) - - class MyTransport extends Transport { - request (params, options) { - t.same(params, { you_know: 'for search' }) - t.same(options, { winter: 'is coming' }) - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Transport: MyTransport - }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - return (params, options) => makeRequest(params, options) - }) - - client.method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Should support callbacks', t => { - t.plan(2) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - return (params, options, callback) => { - callback(null, { hello: 'world' }) - } - }) - - client.method( - { you_know: 'for search' }, - { winter: 'is coming' }, - (err, res) => { - t.error(err) - t.same(res, { hello: 'world' }) - } - ) - }) - - t.test('Should support promises', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - return (params, options) => { - return new Promise((resolve, reject) => { - resolve({ hello: 'world' }) - }) - } - }) - - client - .method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - .then(res => t.same(res, { hello: 'world' })) - .catch(err => t.fail(err)) - }) - - t.end() -}) - -test('Elastic cloud config', t => { - t.test('Basic', t => { - t.plan(5) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', - username: 'elastic', - password: 'changeme' - } - }) - - const pool = client.connectionPool - t.ok(pool instanceof CloudConnectionPool) - t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64') - }, - ssl: { secureProtocol: 'TLSv1_2_method' }, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.equal(client.transport.compression, 'gzip') - t.equal(client.transport.suggestCompression, true) - t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) - }) - - t.test('Without kibana component', t => { - t.plan(5) - const client = new Client({ - cloud: { - // 'localhost$abcd$' - id: 'name:bG9jYWxob3N0JGFiY2Qk', - username: 'elastic', - password: 'changeme' - } - }) - - const pool = client.connectionPool - t.ok(pool instanceof CloudConnectionPool) - t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64') - }, - ssl: { secureProtocol: 'TLSv1_2_method' }, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.equal(client.transport.compression, 'gzip') - t.equal(client.transport.suggestCompression, true) - t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) - }) - - t.test('Auth as separate option', t => { - t.plan(5) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==' - }, - auth: { - username: 'elastic', - password: 'changeme' - } - }) - - const pool = client.connectionPool - t.ok(pool instanceof CloudConnectionPool) - t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64') - }, - ssl: { secureProtocol: 'TLSv1_2_method' }, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.equal(client.transport.compression, 'gzip') - t.equal(client.transport.suggestCompression, true) - t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) - }) - - t.test('ApiKey should take precedence over basic auth', t => { - t.plan(5) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==' - }, - auth: { - username: 'elastic', - password: 'changeme', - apiKey: 'Zm9vOmJhcg==' - } - }) - - const pool = client.connectionPool - t.ok(pool instanceof CloudConnectionPool) - t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'ApiKey Zm9vOmJhcg==' - }, - ssl: { secureProtocol: 'TLSv1_2_method' }, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.equal(client.transport.compression, 'gzip') - t.equal(client.transport.suggestCompression, true) - t.same(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) - }) - - t.test('Override default options', t => { - t.plan(4) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', - username: 'elastic', - password: 'changeme' - }, - compression: false, - suggestCompression: false, - ssl: { - secureProtocol: 'TLSv1_1_method' - } - }) - - t.ok(client.connectionPool instanceof CloudConnectionPool) - t.equal(client.transport.compression, false) - t.equal(client.transport.suggestCompression, false) - t.same(client.connectionPool._ssl, { secureProtocol: 'TLSv1_1_method' }) - }) - - t.end() -}) - -test('Opaque Id support', t => { - t.test('No opaqueId', t => { - t.plan(3) - - function handler (req, res) { - t.equal(req.headers['x-opaque-id'], undefined) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('No prefix', t => { - t.plan(3) - - function handler (req, res) { - t.equal(req.headers['x-opaque-id'], 'bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - opaqueId: 'bar' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('With prefix', t => { - t.plan(3) - - function handler (req, res) { - t.equal(req.headers['x-opaque-id'], 'foo-bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - opaqueIdPrefix: 'foo-' - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - opaqueId: 'bar' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Correctly handles the same header cased differently', t => { - t.plan(4) - - function handler (req, res) { - t.equal(req.headers.authorization, 'Basic foobar') - t.equal(req.headers.foo, 'baz') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - username: 'hello', - password: 'world' - }, - headers: { - Authorization: 'Basic foobar', - Foo: 'bar' - } - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - headers: { - foo: 'baz' - } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Random selector', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - nodeSelector: 'random' - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Disable keep alive agent', t => { - t.plan(3) - - function handler (req, res) { - t.equal(req.headers.connection, 'close') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - agent: false - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('name property as string', t => { - t.plan(1) - - const client = new Client({ - node: 'http://localhost:9200/', - name: 'client-name' - }) - - t.equal(client.name, 'client-name') -}) - -test('name property as symbol', t => { - t.plan(1) - - const symbol = Symbol('client-name') - const client = new Client({ - node: 'http://localhost:9200/', - name: symbol - }) - - t.equal(client.name, symbol) -}) - -// The nodejs http agent will try to wait for the whole -// body to arrive before closing the request, so this -// test might take some time. -test('Bad content length', t => { - t.plan(3) - - let count = 0 - function handler (req, res) { - count += 1 - const body = JSON.stringify({ hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Content-Length', body.length + '') - res.end(body.slice(0, -5)) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ node: `http://localhost:${port}`, maxRetries: 1 }) - client.info((err, { body }) => { - t.ok(err instanceof errors.ConnectionError) - t.equal(err.message, 'Response aborted while reading the body') - t.equal(count, 2) - server.stop() - }) - }) -}) - -test('Socket destryed while reading the body', t => { - t.plan(3) - - let count = 0 - function handler (req, res) { - count += 1 - const body = JSON.stringify({ hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Content-Length', body.length + '') - res.write(body.slice(0, -5)) - setTimeout(() => { - res.socket.destroy() - }, 500) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ node: `http://localhost:${port}`, maxRetries: 1 }) - client.info((err, { body }) => { - t.ok(err instanceof errors.ConnectionError) - t.equal(err.message, 'Response aborted while reading the body') - t.equal(count, 2) - server.stop() - }) - }) -}) - -test('Content length too big (buffer)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-encoding': 'gzip', - 'content-length': buffer.constants.MAX_LENGTH + 10, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ node: 'http://localhost:9200/', Connection: MockConnection }) - client.info((err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, `The content length (${buffer.constants.MAX_LENGTH + 10}) is bigger than the maximum allowed buffer (${buffer.constants.MAX_LENGTH})`) - t.equal(result.meta.attempts, 0) - }) -}) - -test('Content length too big (string)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': buffer.constants.MAX_STRING_LENGTH + 10, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ node: 'http://localhost:9200/', Connection: MockConnection }) - client.info((err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, `The content length (${buffer.constants.MAX_STRING_LENGTH + 10}) is bigger than the maximum allowed string (${buffer.constants.MAX_STRING_LENGTH})`) - t.equal(result.meta.attempts, 0) - }) -}) - -test('Content length too big custom (buffer)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-encoding': 'gzip', - 'content-length': 1100, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxCompressedResponseSize: 1000 - }) - client.info((err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed buffer (1000)') - t.equal(result.meta.attempts, 0) - }) -}) - -test('Content length too big custom (string)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': 1100, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxResponseSize: 1000 - }) - client.info((err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed string (1000)') - t.equal(result.meta.attempts, 0) - }) -}) - -test('Content length too big custom option (buffer)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-encoding': 'gzip', - 'content-length': 1100, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - client.info({}, { maxCompressedResponseSize: 1000 }, (err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed buffer (1000)') - t.equal(result.meta.attempts, 0) - }) -}) - -test('Content length too big custom option (string)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': 1100, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - client.info({}, { maxResponseSize: 1000 }, (err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed string (1000)') - t.equal(result.meta.attempts, 0) - }) -}) - -test('Content length too big custom option override (buffer)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-encoding': 'gzip', - 'content-length': 1100, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxCompressedResponseSize: 2000 - }) - client.info({}, { maxCompressedResponseSize: 1000 }, (err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed buffer (1000)') - t.equal(result.meta.attempts, 0) - }) -}) - -test('Content length too big custom option override (string)', t => { - t.plan(4) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': 1100, - connection: 'keep-alive', - date: new Date().toISOString() - } - stream.on('close', () => t.pass('Stream destroyed')) - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxResponseSize: 2000 - }) - client.info({}, { maxResponseSize: 1000 }, (err, result) => { - t.ok(err instanceof errors.RequestAbortedError) - t.equal(err.message, 'The content length (1100) is bigger than the maximum allowed string (1000)') - t.equal(result.meta.attempts, 0) - }) -}) - -test('maxResponseSize cannot be bigger than buffer.constants.MAX_STRING_LENGTH', t => { - t.plan(2) - - try { - new Client({ // eslint-disable-line - node: 'http://localhost:9200/', - maxResponseSize: buffer.constants.MAX_STRING_LENGTH + 10 - }) - t.fail('should throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - t.equal(err.message, `The maxResponseSize cannot be bigger than ${buffer.constants.MAX_STRING_LENGTH}`) - } -}) - -test('maxCompressedResponseSize cannot be bigger than buffer.constants.MAX_STRING_LENGTH', t => { - t.plan(2) - - try { - new Client({ // eslint-disable-line - node: 'http://localhost:9200/', - maxCompressedResponseSize: buffer.constants.MAX_LENGTH + 10 - }) - t.fail('should throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - t.equal(err.message, `The maxCompressedResponseSize cannot be bigger than ${buffer.constants.MAX_LENGTH}`) - } -}) - -test('Meta header enabled', t => { - t.plan(2) - - class MockConnection extends Connection { - request (params, callback) { - t.match(params.headers, { 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion}` }) - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': '17', - connection: 'keep-alive', - date: new Date().toISOString() - } - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.info((err, result) => { - t.error(err) - }) -}) - -test('Meta header disabled', t => { - t.plan(2) - - class MockConnection extends Connection { - request (params, callback) { - t.notMatch(params.headers, { 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion}` }) - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': '17', - connection: 'keep-alive', - date: new Date().toISOString() - } - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - enableMetaHeader: false - }) - - client.info((err, result) => { - t.error(err) - }) -}) - -test('Prototype poisoning protection enabled by default', t => { - t.plan(1) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream('{"__proto__":{"foo":"bar"}}') - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': '27', - connection: 'keep-alive', - date: new Date().toISOString() - } - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.info((err, result) => { - t.ok(err instanceof errors.DeserializationError) - }) -}) - -test('Disable prototype poisoning protection', t => { - t.plan(1) - - class MockConnection extends Connection { - request (params, callback) { - const stream = intoStream('{"__proto__":{"foo":"bar"}}') - stream.statusCode = 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - 'content-length': '27', - connection: 'keep-alive', - date: new Date().toISOString() - } - process.nextTick(callback, null, stream) - return { abort () {} } - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - disablePrototypePoisoningProtection: true - }) - - client.info((err, result) => { - t.error(err) - }) -}) - -test('API compatibility header (json)', t => { - t.plan(4) - - function handler (req, res) { - t.equal(req.headers.accept, 'application/vnd.elasticsearch+json; compatible-with=7') - t.equal(req.headers['content-type'], 'application/vnd.elasticsearch+json; compatible-with=7') - res.setHeader('Content-Type', 'application/vnd.elasticsearch+json; compatible-with=7') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - process.env.ELASTIC_CLIENT_APIVERSIONING = 'true' - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.index({ index: 'foo', body: {} }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - delete process.env.ELASTIC_CLIENT_APIVERSIONING - }) - }) -}) - -test('API compatibility header (x-ndjson)', t => { - t.plan(4) - - function handler (req, res) { - t.equal(req.headers.accept, 'application/vnd.elasticsearch+json; compatible-with=7') - t.equal(req.headers['content-type'], 'application/vnd.elasticsearch+x-ndjson; compatible-with=7') - res.setHeader('Content-Type', 'application/vnd.elasticsearch+json; compatible-with=7') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - process.env.ELASTIC_CLIENT_APIVERSIONING = 'true' - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.bulk({ index: 'foo', body: [{}, {}] }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - delete process.env.ELASTIC_CLIENT_APIVERSIONING - }) - }) -}) - -test('Bearer auth', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Bearer Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - bearer: 'Zm9vOmJhcg==' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Check server fingerprint (success)', t => { - t.plan(1) - - function handler (req, res) { - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port, caFingerprint }, server) => { - const client = new Client({ - node: `https://localhost:${port}`, - caFingerprint - }) - - client.info((err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Check server fingerprint (failure)', t => { - t.plan(2) - - function handler (req, res) { - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port }, server) => { - const client = new Client({ - node: `https://localhost:${port}`, - caFingerprint: 'FO:OB:AR' - }) - - client.info((err, res) => { - t.ok(err instanceof errors.ConnectionError) - t.equal(err.message, 'Server certificate CA fingerprint does not match the value configured in caFingerprint') - server.stop() - }) - }) -}) - -test('caFingerprint can\'t be configured over http / 1', t => { - t.plan(2) - - try { - new Client({ // eslint-disable-line - node: 'http://localhost:9200/', - caFingerprint: 'FO:OB:AR' - }) - t.fail('shuld throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') - } -}) - -test('caFingerprint can\'t be configured over http / 2', t => { - t.plan(2) - - try { - new Client({ // eslint-disable-line - nodes: ['http://localhost:9200/'], - caFingerprint: 'FO:OB:AR' - }) - t.fail('should throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') - } -}) - -test('caFingerprint can\'t be configured over http / 3', t => { - t.plan(1) - - try { - new Client({ // eslint-disable-line - nodes: ['https://localhost:9200/'], - caFingerprint: 'FO:OB:AR' - }) - t.pass('should not throw') - } catch (err) { - t.fail('shuld not throw') - } -}) - -test('caFingerprint can\'t be configured over http / 4', t => { - t.plan(2) - - try { - new Client({ // eslint-disable-line - node: { url: new URL('http://localhost:9200/') }, - caFingerprint: 'FO:OB:AR' - }) - t.fail('shuld throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') - } -}) - -test('caFingerprint can\'t be configured over http / 5', t => { - t.plan(2) - - try { - new Client({ // eslint-disable-line - nodes: [{ url: new URL('http://localhost:9200/') }], - caFingerprint: 'FO:OB:AR' - }) - t.fail('should throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - t.equal(err.message, 'You can\'t configure the caFingerprint with a http connection') - } -}) - -test('Error body that is not a json', t => { - t.plan(5) - - const MockConnection = buildMockConnection({ - onRequest (params) { - return { - statusCode: 400, - body: 'error!', - headers: { 'content-type': 'text/html' } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxRetries: 1 - }) - - client.info((err, result) => { - t.ok(err instanceof errors.ResponseError) - t.equal(err.name, 'ResponseError') - t.equal(err.body, 'error!') - t.equal(err.message, 'error!') - t.equal(err.statusCode, 400) - }) -}) diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts new file mode 100644 index 000000000..3131a8d6f --- /dev/null +++ b/test/unit/client.test.ts @@ -0,0 +1,434 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { test } from 'tap' +import { URL } from 'url' +import { connection } from '../utils' +import { Client, errors } from '../..' +import * as symbols from '@elastic/transport/lib/symbols' +import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool } from '@elastic/transport' + +let clientVersion: string = require('../../package.json').version // eslint-disable-line +if (clientVersion.includes('-')) { + clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' +} +let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +if (transportVersion.includes('-')) { + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} +const nodeVersion = process.versions.node + +test('Create a client instance, single node as string', t => { + const client = new Client({ node: 'http://localhost:9200/' }) + t.ok(client.connectionPool instanceof WeightedConnectionPool) + t.equal(client.connectionPool.size, 1) + t.end() +}) + +test('Create a client instance, multi node as strings', t => { + const client = new Client({ nodes: ['http://localhost:9200/', 'http://localhost:9201/'] }) + t.ok(client.connectionPool instanceof WeightedConnectionPool) + t.equal(client.connectionPool.size, 2) + t.end() +}) + +test('Create a client instance, single node as object', t => { + const client = new Client({ + node: { + url: new URL('http://localhost:9200/') + } + }) + t.equal(client.connectionPool.size, 1) + t.end() +}) + +test('Create a client instance, multi node as object', t => { + const client = new Client({ + nodes: [{ + url: new URL('http://localhost:9200/') + }, { + url: new URL('http://localhost:9201/') + }] + }) + t.equal(client.connectionPool.size, 2) + t.end() +}) + +test('Missing node(s)', t => { + t.throws(() => new Client({}), errors.ConfigurationError) + t.end() +}) + +test('Custom headers', t => { + const client = new Client({ + node: 'http://localhost:9200/', + headers: { foo: 'bar' } + }) + t.match(client.transport[symbols.kHeaders], { foo: 'bar' }) + t.end() +}) + +test('Basic auth', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { authorization: 'Basic aGVsbG86d29ybGQ=' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + username: 'hello', + password: 'world' + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Basic auth via url', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { authorization: 'Basic aGVsbG86d29ybGQ=' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://hello:world@localhost:9200/', + Connection + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('ApiKey as string', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { authorization: 'ApiKey foobar' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + apiKey: 'foobar' + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('ApiKey as object', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + apiKey: { + id: 'foo', + api_key: 'bar' + } + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Bearer auth', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { authorization: 'Bearer token' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + bearer: 'token' + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Override authentication per request', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { authorization: 'Basic foobar' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + username: 'hello', + password: 'world' + } + }) + + await client.transport.request( + { method: 'GET', path: '/' }, + { headers: { authorization: 'Basic foobar' } } + ) +}) + +test('Custom headers per request', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { + foo: 'bar', + faz: 'bar' + }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + headers: { foo: 'bar' } + }) + + await client.transport.request( + { method: 'GET', path: '/' }, + { headers: { faz: 'bar' } } + ) +}) + +test('Close the client', async t => { + t.plan(1) + + class MyConnectionPool extends BaseConnectionPool { + async empty (): Promise { + t.pass('called') + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + ConnectionPool: MyConnectionPool + }) + + await client.close() +}) + +test('Elastic Cloud config', t => { + const client = new Client({ + cloud: { + // 'localhost$abcd$' + id: 'name:bG9jYWxob3N0JGFiY2Qk' + }, + auth: { + username: 'elastic', + password: 'changeme' + } + }) + + t.ok(client.connectionPool instanceof CloudConnectionPool) + t.match(client.connectionPool.connections.find(c => c.id === 'https://abcd.localhost/'), { + url: new URL('https://elastic:changeme@abcd.localhost/'), + id: 'https://abcd.localhost/', + headers: { + authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64') + }, + tls: { secureProtocol: 'TLSv1_2_method' } + }) + + t.end() +}) + +test('Override default Elastic Cloud options', t => { + const client = new Client({ + cloud: { + // 'localhost$abcd$efgh' + id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', + }, + auth: { + username: 'elastic', + password: 'changeme' + }, + compression: false, + tls: { + secureProtocol: 'TLSv1_1_method' + } + }) + + t.ok(client.connectionPool instanceof CloudConnectionPool) + t.equal(client.transport[symbols.kCompression], false) + t.same(client.connectionPool._tls, { secureProtocol: 'TLSv1_1_method' }) + + t.end() +}) + +test('Configure opaqueIdPrefix', t => { + const client = new Client({ + node: 'http://localhost:9200/', + opaqueIdPrefix: 'foobar' + }) + + t.equal(client.transport[symbols.kOpaqueIdPrefix], 'foobar') + + t.end() +}) + +test('name as string', t => { + const client = new Client({ + node: 'http://localhost:9200/', + name: 'es-client' + }) + + t.equal(client.name, 'es-client') + + t.end() +}) + +test('name as symbol', t => { + const s = Symbol() + const client = new Client({ + node: 'http://localhost:9200/', + name: s + }) + + t.equal(client.name, s) + + t.end() +}) + +test('Meta header enabled by default', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.match(opts.headers, { 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}` }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Meta header disabled', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.notOk(opts.headers?.['x-elastic-client-meta']) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + enableMetaHeader: false + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('caFingerprint', t => { + const client = new Client({ + node: 'https://localhost:9200/', + caFingerprint: 'FO:OB:AR' + }) + + t.equal(client.connectionPool[symbols.kCaFingerprint], 'FO:OB:AR') + t.end() +}) + +test('caFingerprint can\'t be configured over http / 1', t => { + t.throws(() => new Client({ + node: 'http://localhost:9200/', + caFingerprint: 'FO:OB:AR' + }), + errors.ConfigurationError + ) + t.end() +}) + +test('caFingerprint can\'t be configured over http / 2', t => { + t.throws(() => new Client({ + nodes: ['http://localhost:9200/'], + caFingerprint: 'FO:OB:AR' + }), + errors.ConfigurationError + ) + t.end() +}) diff --git a/test/unit/cloud-connection-pool.test.js b/test/unit/cloud-connection-pool.test.js deleted file mode 100644 index 76c135509..000000000 --- a/test/unit/cloud-connection-pool.test.js +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { CloudConnectionPool } = require('../../lib/pool') -const Connection = require('../../lib/Connection') - -test('Should expose a cloudConnection property', t => { - const pool = new CloudConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.ok(pool.cloudConnection instanceof Connection) - t.end() -}) - -test('Get connection should always return cloudConnection', t => { - const pool = new CloudConnectionPool({ Connection }) - const conn = pool.addConnection('http://localhost:9200/') - t.same(pool.getConnection(), conn) - t.end() -}) - -test('pool.empty should reset cloudConnection', t => { - const pool = new CloudConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.ok(pool.cloudConnection instanceof Connection) - pool.empty(() => { - t.equal(pool.cloudConnection, null) - t.end() - }) -}) diff --git a/test/unit/connection-pool.test.js b/test/unit/connection-pool.test.js deleted file mode 100644 index 7a75879dd..000000000 --- a/test/unit/connection-pool.test.js +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const ConnectionPool = require('../../lib/pool/ConnectionPool') -const Connection = require('../../lib/Connection') -const { defaultNodeFilter, roundRobinSelector } = require('../../lib/Transport').internals -const { connection: { MockConnection, MockConnectionTimeout } } = require('../utils') - -test('API', t => { - t.test('addConnection', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - t.ok(pool.connections.find(c => c.id === href) instanceof Connection) - t.equal(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) - t.same(pool.dead, []) - t.end() - }) - - t.test('addConnection should throw with two connections with the same id', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - try { - pool.addConnection(href) - t.fail('Should throw') - } catch (err) { - t.equal(err.message, `Connection with id '${href}' is already present`) - } - t.end() - }) - - t.test('addConnection should handle not-friendly url parameters for user and password', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://us/"er:p@assword@localhost:9200/' - pool.addConnection(href) - const conn = pool.getConnection() - t.equal(conn.url.username, 'us%22er') - t.equal(conn.url.password, 'p%40assword') - t.match(conn.headers, { - authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64') - }) - t.end() - }) - - t.test('markDead', t => { - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - let connection = pool.addConnection(href) - pool.markDead(connection) - connection = pool.connections.find(c => c.id === href) - t.equal(connection.deadCount, 1) - t.ok(connection.resurrectTimeout > 0) - t.same(pool.dead, [href]) - t.end() - }) - - t.test('markDead should sort the dead queue by deadTimeout', t => { - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/1' - const href2 = 'http://localhost:9200/2' - const conn1 = pool.addConnection(href1) - const conn2 = pool.addConnection(href2) - pool.markDead(conn2) - setTimeout(() => { - pool.markDead(conn1) - t.same(pool.dead, [href2, href1]) - t.end() - }, 10) - }) - - t.test('markDead should ignore connections that no longer exists', t => { - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - pool.addConnection('http://localhost:9200/') - pool.markDead({ id: 'foo-bar' }) - t.same(pool.dead, []) - t.end() - }) - - t.test('markAlive', t => { - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - let connection = pool.addConnection(href) - pool.markDead(connection) - pool.markAlive(connection) - connection = pool.connections.find(c => c.id === href) - t.equal(connection.deadCount, 0) - t.equal(connection.resurrectTimeout, 0) - t.equal(connection.status, Connection.statuses.ALIVE) - t.same(pool.dead, []) - t.end() - }) - - t.test('resurrect', t => { - t.test('ping strategy', t => { - t.test('alive', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'ping', - pingTimeout: 3000, - Connection: MockConnection, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - const connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.ok(isAlive) - connection = pool.connections.find(c => c.id === connection.id) - t.equal(connection.deadCount, 0) - t.equal(connection.resurrectTimeout, 0) - t.equal(connection.status, Connection.statuses.ALIVE) - t.same(pool.dead, []) - t.end() - }) - }) - - t.test('dead', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'ping', - pingTimeout: 3000, - Connection: MockConnectionTimeout, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - const connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.notOk(isAlive) - connection = pool.connections.find(c => c.id === connection.id) - t.equal(connection.deadCount, 2) - t.ok(connection.resurrectTimeout > 0) - t.equal(connection.status, Connection.statuses.DEAD) - t.same(pool.dead, [href]) - t.end() - }) - }) - - t.end() - }) - - t.test('optimistic strategy', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'optimistic', - Connection, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - const connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.ok(isAlive) - connection = pool.connections.find(c => c.id === connection.id) - t.equal(connection.deadCount, 1) - t.ok(connection.resurrectTimeout > 0) - t.equal(connection.status, Connection.statuses.ALIVE) - t.same(pool.dead, []) - t.end() - }) - }) - - t.test('none strategy', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'none', - Connection, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - const connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.ok(isAlive === null) - t.ok(connection === null) - connection = pool.connections.find(c => c.id === href) - t.equal(connection.deadCount, 1) - t.ok(connection.resurrectTimeout > 0) - t.equal(connection.status, Connection.statuses.DEAD) - t.same(pool.dead, [href]) - t.end() - }) - }) - - t.end() - }) - - t.test('getConnection', t => { - t.test('Should return a connection', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - t.ok(pool.getConnection() instanceof Connection) - t.end() - }) - - t.test('filter option', t => { - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - pool.addConnection([href1, href2]) - - const filter = node => node.id === href1 - t.equal(pool.getConnection({ filter }).id, href1) - t.end() - }) - - t.test('filter should get Connection objects', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - pool.addConnection([href1, href2]) - - const filter = node => { - t.ok(node instanceof Connection) - return true - } - pool.getConnection({ filter }) - }) - - t.test('filter should get alive connections', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - const conn = pool.addConnection(href1) - pool.addConnection([href2, `${href2}/stuff`]) - pool.markDead(conn) - - const filter = node => { - t.equal(node.status, Connection.statuses.ALIVE) - return true - } - pool.getConnection({ filter }) - }) - - t.test('If all connections are marked as dead, getConnection should return a dead connection', t => { - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - const conn1 = pool.addConnection(href1) - const conn2 = pool.addConnection(href2) - pool.markDead(conn1) - pool.markDead(conn2) - const conn = pool.getConnection() - t.ok(conn instanceof Connection) - t.equal(conn.status, 'dead') - t.end() - }) - - t.end() - }) - - t.test('removeConnection', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - const connection = pool.addConnection(href) - t.ok(pool.getConnection() instanceof Connection) - pool.removeConnection(connection) - t.equal(pool.getConnection(), null) - t.end() - }) - - t.test('empty', t => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - pool.addConnection('http://localhost:9201/') - pool.empty(() => { - t.equal(pool.size, 0) - t.same(pool.dead, []) - t.end() - }) - }) - - t.test('urlToHost', t => { - const pool = new ConnectionPool({ Connection }) - const url = 'http://localhost:9200/' - t.same( - pool.urlToHost(url), - { url: new URL(url) } - ) - t.end() - }) - - t.test('nodesToHost', t => { - t.test('publish_address as ip address (IPv4)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') - t.end() - }) - - t.test('publish_address as ip address (IPv6)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://[::1]:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://[::1]:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv4)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv6)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.equal(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('Should use the configure protocol', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.equal(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') - t.equal(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') - t.end() - }) - - t.test('Should map roles', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com:9200' - }, - roles: ['master', 'data', 'ingest', 'ml'] - }, - a2: { - http: { - publish_address: 'example.com:9201' - }, - roles: [] - } - } - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: true - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: false, - data: false, - ingest: false, - ml: false - } - }]) - - t.end() - }) - - t.end() - }) - - t.test('update', t => { - t.test('Should not update existing connections', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (mark alive)', t => { - t.plan(5) - class CustomConnectionPool extends ConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomConnectionPool({ Connection }) - const conn1 = pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - const conn2 = pool.addConnection({ - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.markDead(conn1) - pool.markDead(conn2) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (same url, different id)', t => { - t.plan(3) - class CustomConnectionPool extends ConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'http://127.0.0.1:9200/', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: true - }]) - - // roles will never be updated, we only use it to do - // a dummy check to see if the connection has been updated - t.same(pool.connections.find(c => c.id === 'a1').roles, { - master: true, - data: true, - ingest: true, - ml: false - }) - t.equal(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) - }) - - t.test('Add a new connection', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2')) - }) - - t.test('Remove old connections', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a2', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a3', - roles: null - }]) - - t.notOk(pool.connections.find(c => c.id === 'a1')) - t.ok(pool.connections.find(c => c.id === 'a2')) - t.ok(pool.connections.find(c => c.id === 'a3')) - }) - - t.test('Remove old connections (markDead)', t => { - t.plan(5) - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - const conn = pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }) - - pool.markDead(conn) - t.same(pool.dead, ['a1']) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a2', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a3', - roles: null - }]) - - t.same(pool.dead, []) - t.notOk(pool.connections.find(c => c.id === 'a1')) - t.ok(pool.connections.find(c => c.id === 'a2')) - t.ok(pool.connections.find(c => c.id === 'a3')) - }) - - t.end() - }) - - t.end() -}) - -test('Node selector', t => { - t.test('round-robin', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.ok(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) - }) - - t.test('random', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.ok(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) - }) - - t.end() -}) - -test('Node filter', t => { - t.test('default', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ url: new URL('http://localhost:9200/') }) - t.ok(pool.getConnection({ filter: defaultNodeFilter }) instanceof Connection) - }) - - t.test('Should filter master only nodes', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - roles: { - master: true, - data: false, - ingest: false, - ml: false - } - }) - t.equal(pool.getConnection({ filter: defaultNodeFilter }), null) - }) - - t.end() -}) diff --git a/test/unit/connection.test.js b/test/unit/connection.test.js deleted file mode 100644 index a556564a0..000000000 --- a/test/unit/connection.test.js +++ /dev/null @@ -1,1108 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { inspect } = require('util') -const { URL } = require('url') -const { Agent } = require('http') -const { Readable } = require('stream') -const hpagent = require('hpagent') -const intoStream = require('into-stream') -const { buildServer } = require('../utils') -const Connection = require('../../lib/Connection') -const { TimeoutError, ConfigurationError, RequestAbortedError, ConnectionError } = require('../../lib/errors') -const { getIssuerCertificate } = Connection.internals - -test('Basic (http)', t => { - t.plan(4) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Basic (https)', t => { - t.plan(4) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`https://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Basic (https with ssl agent)', t => { - t.plan(4) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port, key, cert }, server) => { - const connection = new Connection({ - url: new URL(`https://localhost:${port}`), - ssl: { key, cert } - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Custom http agent', t => { - t.plan(6) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const agent = new Agent({ - keepAlive: true, - keepAliveMsecs: 1000, - maxSockets: 256, - maxFreeSockets: 256 - }) - agent.custom = true - const connection = new Connection({ - url: new URL(`http://localhost:${port}`), - agent: opts => { - t.match(opts, { - url: new URL(`http://localhost:${port}`) - }) - return agent - } - }) - t.ok(connection.agent.custom) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Disable keep alive', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'close' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`), - agent: false - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'close' - }) - server.stop() - }) - }) -}) - -test('Timeout support', t => { - t.plan(1) - - function handler (req, res) { - setTimeout( - () => res.end('ok'), - 1000 - ) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - timeout: 500 - }, (err, res) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) -}) - -test('querystring', t => { - t.test('Should concatenate the querystring', t => { - t.plan(2) - - function handler (req, res) { - t.equal(req.url, '/hello?hello=world&you_know=for%20search') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - querystring: 'hello=world&you_know=for%20search' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) - }) - - t.test('If the querystring is null should not do anything', t => { - t.plan(2) - - function handler (req, res) { - t.equal(req.url, '/hello') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - querystring: null - }, (err, res) => { - t.error(err) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Body request', t => { - t.plan(2) - - function handler (req, res) { - let payload = '' - req.setEncoding('utf8') - req.on('data', chunk => { payload += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.equal(payload, 'hello') - res.end('ok') - }) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'POST', - body: 'hello' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Send body as buffer', t => { - t.plan(2) - - function handler (req, res) { - let payload = '' - req.setEncoding('utf8') - req.on('data', chunk => { payload += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.equal(payload, 'hello') - res.end('ok') - }) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'POST', - body: Buffer.from('hello') - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Send body as stream', t => { - t.plan(2) - - function handler (req, res) { - let payload = '' - req.setEncoding('utf8') - req.on('data', chunk => { payload += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.equal(payload, 'hello') - res.end('ok') - }) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'POST', - body: intoStream('hello') - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Should not close a connection if there are open requests', t => { - t.plan(4) - - function handler (req, res) { - setTimeout(() => res.end('ok'), 1000) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - - setTimeout(() => { - t.equal(connection._openRequests, 1) - connection.close() - }, 500) - - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - t.equal(connection._openRequests, 0) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Should not close a connection if there are open requests (with agent disabled)', t => { - t.plan(4) - - function handler (req, res) { - setTimeout(() => res.end('ok'), 1000) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`), - agent: false - }) - - setTimeout(() => { - t.equal(connection._openRequests, 1) - connection.close() - }, 500) - - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - t.equal(connection._openRequests, 0) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Url with auth', t => { - t.plan(2) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://foo:bar@localhost:${port}`), - auth: { username: 'foo', password: 'bar' } - }) - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Url with querystring', t => { - t.plan(2) - - function handler (req, res) { - t.equal(req.url, '/hello?foo=bar&baz=faz') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}?foo=bar`) - }) - connection.request({ - path: '/hello', - method: 'GET', - querystring: 'baz=faz' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Custom headers for connection', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - 'x-foo': 'bar' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`), - headers: { 'x-foo': 'bar' } - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - // should not update the default - t.same(connection.headers, { 'x-foo': 'bar' }) - server.stop() - }) - }) -}) - -// TODO: add a check that the response is not decompressed -test('asStream set to true', t => { - t.plan(2) - - function handler (req, res) { - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - asStream: true - }, (err, res) => { - t.error(err) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Connection id should not contain credentials', t => { - const connection = new Connection({ - url: new URL('http://user:password@localhost:9200/') - }) - t.equal(connection.id, 'http://localhost:9200/') - t.end() -}) - -test('Ipv6 support', t => { - const connection = new Connection({ - url: new URL('http://[::1]:9200/') - }) - t.equal(connection.buildRequestObject({}).hostname, '::1') - t.end() -}) - -test('Should throw if the protocol is not http or https', t => { - try { - new Connection({ // eslint-disable-line - url: new URL('nope://nope') - }) - t.fail('Should throw') - } catch (err) { - t.ok(err instanceof ConfigurationError) - t.equal(err.message, 'Invalid protocol: \'nope:\'') - } - t.end() -}) - -// https://github.com/nodejs/node/commit/b961d9fd83 -test('Should disallow two-byte characters in URL path', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - connection.request({ - path: '/thisisinvalid\uffe2', - method: 'GET' - }, (err, res) => { - t.equal( - err.message, - 'ERR_UNESCAPED_CHARACTERS: /thisisinvalid\uffe2' - ) - }) -}) - -test('setRole', t => { - t.test('Update the value of a role', t => { - t.plan(2) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - t.same(connection.roles, { - master: true, - data: true, - ingest: true, - ml: false - }) - - connection.setRole('master', false) - - t.same(connection.roles, { - master: false, - data: true, - ingest: true, - ml: false - }) - }) - - t.test('Invalid role', t => { - t.plan(2) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - try { - connection.setRole('car', true) - t.fail('Shoud throw') - } catch (err) { - t.ok(err instanceof ConfigurationError) - t.equal(err.message, 'Unsupported role: \'car\'') - } - }) - - t.test('Invalid value', t => { - t.plan(2) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - try { - connection.setRole('master', 1) - t.fail('Shoud throw') - } catch (err) { - t.ok(err instanceof ConfigurationError) - t.equal(err.message, 'enabled should be a boolean') - } - }) - - t.end() -}) - -test('Util.inspect Connection class should hide agent, ssl and auth', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://user:password@localhost:9200/'), - id: 'node-id', - headers: { foo: 'bar' } - }) - - // Removes spaces and new lines because - // utils.inspect is handled differently - // between major versions of Node.js - function cleanStr (str) { - return str - .replace(/\s/g, '') - .replace(/(\r\n|\n|\r)/gm, '') - } - - t.equal(cleanStr(inspect(connection)), cleanStr(`{ url: 'http://localhost:9200/', - id: 'node-id', - headers: { foo: 'bar' }, - deadCount: 0, - resurrectTimeout: 0, - _openRequests: 0, - status: 'alive', - roles: { master: true, data: true, ingest: true, ml: false }}`) - ) -}) - -test('connection.toJSON should hide agent, ssl and auth', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://user:password@localhost:9200/'), - id: 'node-id', - headers: { foo: 'bar' } - }) - - t.same(connection.toJSON(), { - url: 'http://localhost:9200/', - id: 'node-id', - headers: { - foo: 'bar' - }, - deadCount: 0, - resurrectTimeout: 0, - _openRequests: 0, - status: 'alive', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) -}) - -// https://github.com/elastic/elasticsearch-js/issues/843 -test('Port handling', t => { - t.test('http 80', t => { - const connection = new Connection({ - url: new URL('http://localhost/') - }) - - t.equal( - connection.buildRequestObject({}).port, - undefined - ) - - t.end() - }) - - t.test('https 443', t => { - const connection = new Connection({ - url: new URL('https://localhost/') - }) - - t.equal( - connection.buildRequestObject({}).port, - undefined - ) - - t.end() - }) - - t.end() -}) - -test('Authorization header', t => { - t.test('None', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - t.same(connection.headers, {}) - - t.end() - }) - - t.test('Basic', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/'), - auth: { username: 'foo', password: 'bar' } - }) - - t.same(connection.headers, { authorization: 'Basic Zm9vOmJhcg==' }) - - t.end() - }) - - t.test('ApiKey (string)', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/'), - auth: { apiKey: 'Zm9vOmJhcg==' } - }) - - t.same(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) - - t.end() - }) - - t.test('ApiKey (object)', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/'), - auth: { apiKey: { id: 'foo', api_key: 'bar' } } - }) - - t.same(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) - - t.end() - }) - - t.end() -}) - -test('Should not add agent and ssl to the serialized connection', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - t.equal( - JSON.stringify(connection), - '{"url":"http://localhost:9200/","id":"http://localhost:9200/","headers":{},"deadCount":0,"resurrectTimeout":0,"_openRequests":0,"status":"alive","roles":{"master":true,"data":true,"ingest":true,"ml":false}}' - ) - - t.end() -}) - -test('Abort a request syncronously', t => { - t.plan(1) - - function handler (req, res) { - t.fail('The server should not be contacted') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - const request = connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.ok(err instanceof RequestAbortedError) - server.stop() - }) - request.abort() - }) -}) - -test('Abort a request asyncronously', t => { - t.plan(1) - - function handler (req, res) { - // might be called or not - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - const request = connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.ok(err instanceof RequestAbortedError) - server.stop() - }) - setImmediate(() => request.abort()) - }) -}) - -test('Should correctly resolve request pathname', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://localhost/test') - }) - - t.equal( - connection.buildRequestObject({ - path: 'hello' - }).pathname, - '/test/hello' - ) -}) - -test('Proxy agent (http)', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://localhost:9200/'), - proxy: 'http://localhost:8080/' - }) - - t.ok(connection.agent instanceof hpagent.HttpProxyAgent) -}) - -test('Proxy agent (https)', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('https://localhost:9200/'), - proxy: 'http://localhost:8080/' - }) - - t.ok(connection.agent instanceof hpagent.HttpsProxyAgent) -}) - -test('Abort with a slow body', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('https://localhost:9200/'), - proxy: 'http://localhost:8080/' - }) - - const slowBody = new Readable({ - read (size) { - setTimeout(() => { - this.push('{"size":1, "query":{"match_all":{}}}') - this.push(null) // EOF - }, 1000).unref() - } - }) - - const request = connection.request({ - method: 'GET', - path: '/', - body: slowBody - }, (err, response) => { - t.ok(err instanceof RequestAbortedError) - }) - - setImmediate(() => request.abort()) -}) - -test('Check server fingerprint (success)', t => { - t.plan(2) - - function handler (req, res) { - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port, caFingerprint }, server) => { - const connection = new Connection({ - url: new URL(`https://localhost:${port}`), - caFingerprint - }) - - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - - let payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.equal(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Check server fingerprint (failure)', t => { - t.plan(2) - - function handler (req, res) { - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`https://localhost:${port}`), - caFingerprint: 'FO:OB:AR' - }) - - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.ok(err instanceof ConnectionError) - t.equal(err.message, 'Server certificate CA fingerprint does not match the value configured in caFingerprint') - server.stop() - }) - }) -}) - -test('getIssuerCertificate returns the root CA', t => { - t.plan(2) - const issuerCertificate = { - fingerprint256: 'BA:ZF:AZ', - subject: { - C: '1', - ST: '1', - L: '1', - O: '1', - OU: '1', - CN: '1' - }, - issuer: { - C: '1', - ST: '1', - L: '1', - O: '1', - OU: '1', - CN: '1' - } - } - issuerCertificate.issuerCertificate = issuerCertificate - - const socket = { - getPeerCertificate (bool) { - t.ok(bool) - return { - fingerprint256: 'FO:OB:AR', - subject: { - C: '1', - ST: '1', - L: '1', - O: '1', - OU: '1', - CN: '1' - }, - issuer: { - C: '2', - ST: '2', - L: '2', - O: '2', - OU: '2', - CN: '2' - }, - issuerCertificate - } - } - } - t.same(getIssuerCertificate(socket), issuerCertificate) -}) - -test('getIssuerCertificate detects invalid/malformed certificates', t => { - t.plan(2) - const socket = { - getPeerCertificate (bool) { - t.ok(bool) - return { - fingerprint256: 'FO:OB:AR', - subject: { - C: '1', - ST: '1', - L: '1', - O: '1', - OU: '1', - CN: '1' - }, - issuer: { - C: '2', - ST: '2', - L: '2', - O: '2', - OU: '2', - CN: '2' - } - // missing issuerCertificate - } - } - } - t.equal(getIssuerCertificate(socket), null) -}) - -test('Should show local/remote socket address in case of ECONNRESET', t => { - t.plan(2) - - function handler (req, res) { - res.destroy() - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.ok(err instanceof ConnectionError) - t.match(err.message, /socket\shang\sup\s-\sLocal:\s127.0.0.1:\d+,\sRemote:\s127.0.0.1:\d+/) - server.stop() - }) - }) -}) diff --git a/test/unit/errors.test.js b/test/unit/errors.test.js deleted file mode 100644 index b8db815d6..000000000 --- a/test/unit/errors.test.js +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint no-prototype-builtins: 0 */ - -const { test } = require('tap') -const { errors } = require('../../index') - -test('ElasticsearchClientError', t => { - const err = new errors.ElasticsearchClientError() - t.ok(err instanceof Error) - t.end() -}) - -test('TimeoutError', t => { - const err = new errors.TimeoutError() - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.ok(err.hasOwnProperty('meta')) - t.end() -}) - -test('ConnectionError', t => { - const err = new errors.ConnectionError() - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.ok(err.hasOwnProperty('meta')) - t.end() -}) - -test('NoLivingConnectionsError', t => { - const err = new errors.NoLivingConnectionsError() - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.ok(err.hasOwnProperty('meta')) - t.end() -}) - -test('SerializationError', t => { - const err = new errors.SerializationError() - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.notOk(err.hasOwnProperty('meta')) - t.ok(err.hasOwnProperty('data')) - t.end() -}) - -test('DeserializationError', t => { - const err = new errors.DeserializationError() - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.notOk(err.hasOwnProperty('meta')) - t.ok(err.hasOwnProperty('data')) - t.end() -}) - -test('ConfigurationError', t => { - const err = new errors.ConfigurationError() - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.notOk(err.hasOwnProperty('meta')) - t.end() -}) - -test('ResponseError', t => { - const meta = { - body: 1, - statusCode: 1, - headers: 1 - } - const err = new errors.ResponseError(meta) - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.ok(err.hasOwnProperty('meta')) - t.ok(err.body) - t.ok(err.statusCode) - t.ok(err.headers) - t.end() -}) - -test('RequestAbortedError', t => { - const err = new errors.RequestAbortedError() - t.ok(err instanceof Error) - t.ok(err instanceof errors.ElasticsearchClientError) - t.ok(err.hasOwnProperty('meta')) - t.end() -}) - -test('ResponseError with meaningful message / 1', t => { - const meta = { - body: { - error: { - root_cause: [ - { - type: 'index_not_found_exception', - reason: 'no such index [foo]', - 'resource.type': 'index_expression', - 'resource.id': 'foo', - index_uuid: '_na_', - index: 'foo' - } - ], - type: 'index_not_found_exception', - reason: 'no such index [foo]', - 'resource.type': 'index_expression', - 'resource.id': 'foo', - index_uuid: '_na_', - index: 'foo' - }, - status: 404 - }, - statusCode: 404, - headers: {} - } - const err = new errors.ResponseError(meta) - t.equal(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]') - t.equal(err.toString(), JSON.stringify(meta.body)) - t.end() -}) - -test('ResponseError with meaningful message / 2', t => { - const meta = { - body: { - error: { - root_cause: [ - { - type: 'index_not_found_exception', - reason: 'no such index [foo]', - 'resource.type': 'index_expression', - 'resource.id': 'foo', - index_uuid: '_na_', - index: 'foo' - }, - { - type: 'nested_cause', - reason: 'this is a nested cause', - 'resource.type': 'index_expression', - 'resource.id': 'foo', - index_uuid: '_na_', - index: 'foo' - } - ], - type: 'index_not_found_exception', - reason: 'no such index [foo]', - 'resource.type': 'index_expression', - 'resource.id': 'foo', - index_uuid: '_na_', - index: 'foo' - }, - status: 404 - }, - statusCode: 404, - headers: {} - } - const err = new errors.ResponseError(meta) - t.equal(err.message, 'index_not_found_exception: [index_not_found_exception] Reason: no such index [foo]; [nested_cause] Reason: this is a nested cause') - t.equal(err.toString(), JSON.stringify(meta.body)) - t.end() -}) - -test('ResponseError with meaningful message / 3', t => { - const meta = { - body: { - error: { - type: 'index_not_found_exception', - reason: 'no such index [foo]', - 'resource.type': 'index_expression', - 'resource.id': 'foo', - index_uuid: '_na_', - index: 'foo' - }, - status: 404 - }, - statusCode: 404, - headers: {} - } - const err = new errors.ResponseError(meta) - t.equal(err.message, 'index_not_found_exception') - t.equal(err.toString(), JSON.stringify(meta.body)) - t.end() -}) - -test('ResponseError with meaningful message when body is not json', t => { - const meta = { - statusCode: 400, - body: 'error!', - headers: { 'content-type': 'text/html' } - } - const err = new errors.ResponseError(meta) - t.equal(err.name, 'ResponseError') - t.equal(err.message, 'error!') - t.equal(err.toString(), JSON.stringify(meta.body)) - t.end() -}) - -test('ResponseError with meaningful message when body is falsy', t => { - const meta = { - statusCode: 400, - body: '', - headers: { 'content-type': 'text/plain' } - } - const err = new errors.ResponseError(meta) - t.equal(err.name, 'ResponseError') - t.equal(err.message, 'Response Error') - t.equal(err.toString(), JSON.stringify(meta.body)) - t.end() -}) diff --git a/test/unit/esm/index.mjs b/test/unit/esm/index.mjs deleted file mode 100644 index 4f5d17f6a..000000000 --- a/test/unit/esm/index.mjs +++ /dev/null @@ -1,8 +0,0 @@ -import t from 'tap' -import { Client } from '../../../index.mjs' - -t.test('esm support', t => { - t.plan(1) - const client = new Client({ node: 'http://localhost:9200/' }) - t.equal(client.name, 'elasticsearch-js') -}) diff --git a/test/unit/esm/index.test.js b/test/unit/esm/index.test.js deleted file mode 100644 index 48772ae2f..000000000 --- a/test/unit/esm/index.test.js +++ /dev/null @@ -1,19 +0,0 @@ -'use strict' - -const t = require('tap') -const semver = require('semver') - -if (semver.lt(process.versions.node, '12.17.0')) { - t.skip('Skip because Node version < 12.17.0') - t.end() -} else { - // Node v8 throw a `SyntaxError: Unexpected token import` - // even if this branch is never touch in the code, - // by using `eval` we can avoid this issue. - // eslint-disable-next-line - new Function('module', 'return import(module)')('./index.mjs').catch((err) => { - process.nextTick(() => { - throw err - }) - }) -} diff --git a/test/unit/events.test.js b/test/unit/events.test.js deleted file mode 100644 index 5286a3f40..000000000 --- a/test/unit/events.test.js +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { events } = require('../../index') -const { TimeoutError } = require('../../lib/errors') -const { - Client, - connection: { - MockConnection, - MockConnectionTimeout - } -} = require('../utils') - -test('Should emit a request event when a request is performed', t => { - t.plan(3) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.match(request, { - body: null, - statusCode: null, - headers: null, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: {}, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) -}) - -test('Should emit a request event once when a request is performed', t => { - t.plan(4) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.once(events.REQUEST, (err, request) => { - t.error(err) - t.match(request, { - body: null, - statusCode: null, - headers: null, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: {}, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) -}) - -test('Remove an event', t => { - t.plan(4) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on(events.REQUEST, onRequest) - function onRequest (err, request) { - t.error(err) - t.match(request, { - body: null, - statusCode: null, - headers: null, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: {}, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - - client.off('request', onRequest) - } - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) -}) - -test('Should emit a response event in case of a successful response', t => { - t.plan(3) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on(events.RESPONSE, (err, request) => { - t.error(err) - t.match(request, { - body: { hello: 'world' }, - statusCode: 200, - headers: { - 'content-type': 'application/json;utf=8', - connection: 'keep-alive' - }, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: {}, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) -}) - -test('Should emit a response event with the error set', t => { - t.plan(3) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionTimeout, - maxRetries: 0 - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof TimeoutError) - t.match(request, { - body: null, - statusCode: null, - headers: null, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: { - requestTimeout: 500 - }, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - requestTimeout: 500 - }, (err, result) => { - t.ok(err instanceof TimeoutError) - }) -}) - -test('Emit event', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.same(request, { hello: 'world' }) - }) - - client.emit(events.REQUEST, null, { hello: 'world' }) -}) diff --git a/test/unit/helpers/bulk.test.js b/test/unit/helpers/bulk.test.ts similarity index 88% rename from test/unit/helpers/bulk.test.js rename to test/unit/helpers/bulk.test.ts index b8a00a1f0..d9bfcda6c 100644 --- a/test/unit/helpers/bulk.test.js +++ b/test/unit/helpers/bulk.test.ts @@ -17,19 +17,23 @@ * under the License. */ -'use strict' - -const { createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const FakeTimers = require('@sinonjs/fake-timers') -const { test } = require('tap') -const { errors } = require('../../../') -const { Client, buildServer, connection } = require('../../utils') -let clientVersion = require('../../../package.json').version +import * as http from 'http' +import { createReadStream } from 'fs' +import { join } from 'path' +import split from 'split2' +import FakeTimers from '@sinonjs/fake-timers' +import { test } from 'tap' +import { Client, errors } from '../../../' +import { buildServer, connection } from '../../utils' + +let clientVersion: string = require('../../../package.json').version // eslint-disable-line if (clientVersion.includes('-')) { clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' } +let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +if (transportVersion.includes('-')) { + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} const nodeVersion = process.versions.node const dataset = [ @@ -46,9 +50,10 @@ test('bulk index', t => { onRequest (params) { t.equal(params.path, '/_bulk') t.match(params.headers, { - 'content-type': 'application/x-ndjson', - 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=bp` + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) t.same(JSON.parse(payload), dataset[count++]) @@ -90,10 +95,11 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) t.notMatch(params.headers, { - 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=bp` + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) t.same(JSON.parse(payload), dataset[count++]) @@ -135,7 +141,8 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error t.equal(params.body.split('\n').filter(Boolean).length, 6) return { body: { errors: false, items: new Array(3).fill({}) } } } @@ -179,7 +186,8 @@ test('bulk index', t => { return { body: { acknowledged: true } } } else { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) t.same(JSON.parse(payload), dataset[count++]) @@ -224,7 +232,8 @@ test('bulk index', t => { return { body: { acknowledged: true } } } else { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) t.same(JSON.parse(payload), dataset[count++]) @@ -265,7 +274,8 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) t.same(JSON.parse(payload), dataset[count++]) @@ -286,7 +296,7 @@ test('bulk index', t => { return { index: { _index: 'test', - _id: id++ + _id: String(id++) } } }, @@ -307,9 +317,9 @@ test('bulk index', t => { }) t.test('Should perform a bulk request (retry)', async t => { - async function handler (req, res) { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) let body = '' req.setEncoding('utf8') @@ -376,7 +386,7 @@ test('bulk index', t => { }) t.test('Should perform a bulk request (retry a single document from batch)', async t => { - function handler (req, res) { + function handler (req: http.IncomingMessage, res: http.ServerResponse) { res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ took: 0, @@ -425,9 +435,9 @@ test('bulk index', t => { }) t.test('Should perform a bulk request (failure)', async t => { - async function handler (req, res) { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) let body = '' req.setEncoding('utf8') @@ -524,7 +534,7 @@ test('bulk index', t => { try { await b t.fail('Should throw') - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ResponseError) } }) @@ -560,15 +570,15 @@ test('bulk index', t => { try { await b t.fail('Should throw') - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ResponseError) } }) t.test('Should abort a bulk request', async t => { - async function handler (req, res) { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) let body = '' req.setEncoding('utf8') @@ -646,6 +656,7 @@ test('bulk index', t => { datasource: dataset.slice(), flushBytes: 1, concurrency: 1, + // @ts-expect-error onDocument (doc) { return { foo: { _index: 'test' } @@ -667,7 +678,8 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) t.same(JSON.parse(payload), dataset[count++]) @@ -690,7 +702,7 @@ test('bulk index', t => { return { index: { _index: 'test', - _id: id++ + _id: String(id++) } } }, @@ -719,7 +731,8 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) t.same(JSON.parse(payload), dataset[count++]) @@ -775,7 +788,8 @@ test('bulk create', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { create: { _index: 'test', _id: count } }) t.same(JSON.parse(payload), dataset[count++]) @@ -796,7 +810,7 @@ test('bulk create', t => { return { create: { _index: 'test', - _id: id++ + _id: String(id++) } } }, @@ -824,7 +838,8 @@ test('bulk update', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) t.same(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) @@ -845,7 +860,7 @@ test('bulk update', t => { return [{ update: { _index: 'test', - _id: id++ + _id: String(id++) } }, { doc_as_upsert: true @@ -872,7 +887,8 @@ test('bulk update', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) t.same(JSON.parse(payload), { doc: dataset[count++] }) @@ -893,9 +909,9 @@ test('bulk update', t => { return [{ update: { _index: 'test', - _id: id++ + _id: String(id++) } - }] + }, {}] }, onDrop (doc) { t.fail('This should never be called') @@ -917,11 +933,12 @@ test('bulk update', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { update: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) + t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) return { body: { errors: false, items: [{ update: { result: 'noop' } }] } } } }) @@ -939,7 +956,7 @@ test('bulk update', t => { return [{ update: { _index: 'test', - _id: id++ + _id: String(id++) } }, { doc_as_upsert: true @@ -971,7 +988,8 @@ test('bulk delete', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error t.same(JSON.parse(params.body), { delete: { _index: 'test', _id: count++ } }) return { body: { errors: false, items: [{}] } } } @@ -990,7 +1008,7 @@ test('bulk delete', t => { return { delete: { _index: 'test', - _id: id++ + _id: String(id++) } } }, @@ -1011,9 +1029,9 @@ test('bulk delete', t => { }) t.test('Should perform a bulk request (failure)', async t => { - async function handler (req, res) { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) let body = '' req.setEncoding('utf8') @@ -1023,7 +1041,7 @@ test('bulk delete', t => { res.setHeader('content-type', 'application/json') - if (JSON.parse(body).delete._id === 1) { + if (JSON.parse(body).delete._id === '1') { res.end(JSON.stringify({ took: 0, errors: true, @@ -1055,7 +1073,7 @@ test('bulk delete', t => { return { delete: { _index: 'test', - _id: id++ + _id: String(id++) } } }, @@ -1094,7 +1112,7 @@ test('transport options', t => { if (params.path === '/_bulk') { t.match(params.headers, { - 'content-type': 'application/x-ndjson', + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', foo: 'bar' }) return { body: { errors: false, items: [{}] } } @@ -1152,6 +1170,7 @@ test('errors', t => { }) try { await client.helpers.bulk({ + // @ts-expect-error datasource: 'hello', onDocument (doc) { return { @@ -1159,7 +1178,7 @@ test('errors', t => { } } }) - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ConfigurationError) t.equal(err.message, 'bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator') } @@ -1170,6 +1189,7 @@ test('errors', t => { node: 'http://localhost:9200/' }) try { + // @ts-expect-error await client.helpers.bulk({ onDocument (doc) { return { @@ -1177,7 +1197,7 @@ test('errors', t => { } } }) - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ConfigurationError) t.equal(err.message, 'bulk helper: the datasource is required') } @@ -1188,10 +1208,11 @@ test('errors', t => { node: 'http://localhost:9200/' }) try { + // @ts-expect-error await client.helpers.bulk({ datasource: dataset.slice() }) - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ConfigurationError) t.equal(err.message, 'bulk helper: the onDocument callback is required') } @@ -1209,7 +1230,8 @@ test('Flush interval', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) t.same(JSON.parse(payload), dataset[count++]) @@ -1261,7 +1283,8 @@ test('Flush interval', t => { onRequest (params) { t.ok(count < 2) t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) t.same(JSON.parse(payload), dataset[count++]) @@ -1282,6 +1305,7 @@ test('Flush interval', t => { // Needed otherwise in Node.js 10 // the second request will never be sent await Promise.resolve() + // @ts-ignore b.abort() } yield chunk @@ -1316,14 +1340,15 @@ test('Flush interval', t => { let count = 0 const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.strictEqual(params.path, '/_bulk') + t.equal(params.path, '/_bulk') t.match(params.headers, { - 'content-type': 'application/x-ndjson', - 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=bp` + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` }) + // @ts-expect-error const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) return { body: { errors: false, items: [{}] } } } }) diff --git a/test/unit/helpers/msearch.test.js b/test/unit/helpers/msearch.test.ts similarity index 76% rename from test/unit/helpers/msearch.test.js rename to test/unit/helpers/msearch.test.ts index b7a06ac8f..f0290d3b3 100644 --- a/test/unit/helpers/msearch.test.js +++ b/test/unit/helpers/msearch.test.ts @@ -17,12 +17,10 @@ * under the License. */ -'use strict' - -const { test } = require('tap') -const { errors } = require('../../../') -const { Client, connection } = require('../../utils') -const FakeTimers = require('@sinonjs/fake-timers') +import { test } from 'tap' +import { Client, errors } from '../../../' +import { connection } from '../../utils' +import FakeTimers from '@sinonjs/fake-timers' test('Basic', async t => { const MockConnection = connection.buildMockConnection({ @@ -77,7 +75,7 @@ test('Basic', async t => { }) test('Multiple searches (inside async iterator)', t => { - t.plan(6) + t.plan(4) const MockConnection = connection.buildMockConnection({ onRequest (params) { @@ -114,51 +112,53 @@ test('Multiple searches (inside async iterator)', t => { const m = client.helpers.msearch({ operations: 2 }) - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.same(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) - t.same(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) - - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.same(result.body, { - status: 200, - hits: { - hits: [ - { _source: { four: 'four' } }, - { _source: { five: 'five' } }, - { _source: { six: 'six' } } - ] - } + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) }) + .catch(t.error) + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { four: 'four' } }, + { _source: { five: 'five' } }, + { _source: { six: 'six' } } + ] + } + }) - t.same(result.documents, [ - { four: 'four' }, - { five: 'five' }, - { six: 'six' } - ]) - }) + t.same(result.documents, [ + { four: 'four' }, + { five: 'five' }, + { six: 'six' } + ]) + }) + .catch(t.error) t.teardown(() => m.stop()) }) test('Multiple searches (async iterator exits)', t => { - t.plan(6) + t.plan(4) const MockConnection = connection.buildMockConnection({ onRequest (params) { @@ -195,45 +195,47 @@ test('Multiple searches (async iterator exits)', t => { const m = client.helpers.msearch() - m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.error(err) - t.same(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) + m.search({ index: 'test' }, { query: {} }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) - t.same(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) - - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.same(result.body, { - status: 200, - hits: { - hits: [ - { _source: { four: 'four' } }, - { _source: { five: 'five' } }, - { _source: { six: 'six' } } - ] - } + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) }) + .catch(t.error) + + m.search({ index: 'test' }, { query: {} }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { four: 'four' } }, + { _source: { five: 'five' } }, + { _source: { six: 'six' } } + ] + } + }) - t.same(result.documents, [ - { four: 'four' }, - { five: 'five' }, - { six: 'six' } - ]) - }) + t.same(result.documents, [ + { four: 'four' }, + { five: 'five' }, + { six: 'six' } + ]) + }) + .catch(t.error) setImmediate(() => m.stop()) }) @@ -241,7 +243,7 @@ test('Multiple searches (async iterator exits)', t => { test('Stop a msearch processor (promises)', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - return {} + return { body: {} } } }) @@ -259,42 +261,19 @@ test('Stop a msearch processor (promises)', async t => { { index: 'test' }, { query: { match: { foo: 'bar' } } } ) - } catch (err) { + } catch (err: any) { t.equal(err.message, 'The msearch processor has been stopped') } t.teardown(() => m.stop()) }) -test('Stop a msearch processor (callbacks)', t => { - t.plan(1) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return {} - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const m = client.helpers.msearch() - - m.stop() - - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.equal(err.message, 'The msearch processor has been stopped') - }) -}) - test('Bad header', t => { - t.plan(2) + t.plan(1) const MockConnection = connection.buildMockConnection({ onRequest (params) { - return {} + return { body: {} } } }) @@ -305,10 +284,7 @@ test('Bad header', t => { const m = client.helpers.msearch() - m.search(null, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.equal(err.message, 'The header should be an object') - }) - + // @ts-expect-error m.search(null, { query: { match: { foo: 'bar' } } }) .catch(err => { t.equal(err.message, 'The header should be an object') @@ -318,11 +294,11 @@ test('Bad header', t => { }) test('Bad body', t => { - t.plan(2) + t.plan(1) const MockConnection = connection.buildMockConnection({ onRequest (params) { - return {} + return { body: {} } } }) @@ -333,10 +309,7 @@ test('Bad body', t => { const m = client.helpers.msearch() - m.search({ index: 'test' }, null, (err, result) => { - t.equal(err.message, 'The body should be an object') - }) - + // @ts-expect-error m.search({ index: 'test' }, null) .catch(err => { t.equal(err.message, 'The body should be an object') @@ -435,7 +408,7 @@ test('Single search errors', async t => { { index: 'test' }, { query: { match: { foo: 'bar' } } } ) - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ResponseError) } @@ -443,7 +416,7 @@ test('Single search errors', async t => { }) test('Entire msearch fails', t => { - t.plan(4) + t.plan(2) const MockConnection = connection.buildMockConnection({ onRequest (params) { @@ -464,15 +437,15 @@ test('Entire msearch fails', t => { const m = client.helpers.msearch({ operations: 1 }) - m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.ok(err instanceof errors.ResponseError) - t.same(result.documents, []) - }) + m.search({ index: 'test' }, { query: {} }) + .catch(err => { + t.ok(err instanceof errors.ResponseError) + }) - m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.ok(err instanceof errors.ResponseError) - t.same(result.documents, []) - }) + m.search({ index: 'test' }, { query: {} }) + .catch(err => { + t.ok(err instanceof errors.ResponseError) + }) t.teardown(() => m.stop()) }) @@ -482,7 +455,7 @@ test('Resolves the msearch helper', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - return {} + return { body: {} } } }) @@ -508,7 +481,7 @@ test('Stop the msearch helper with an error', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - return {} + return { body: {} } } }) @@ -528,13 +501,14 @@ test('Stop the msearch helper with an error', t => { m.catch(err => t.equal(err.message, 'kaboom')) - m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.equal(err.message, 'kaboom') - }) + m.search({ index: 'test' }, { query: {} }) + .catch(err => { + t.equal(err.message, 'kaboom') + }) }) test('Multiple searches (concurrency = 1)', t => { - t.plan(6) + t.plan(4) const MockConnection = connection.buildMockConnection({ onRequest (params) { @@ -562,51 +536,53 @@ test('Multiple searches (concurrency = 1)', t => { const m = client.helpers.msearch({ operations: 1, concurrency: 1 }) - m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.error(err) - t.same(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) - t.same(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) - - m.search({ index: 'test' }, { query: {} }, (err, result) => { - t.error(err) - t.same(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) }) + .catch(t.error) + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) - t.same(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) + }) + .catch(t.error) t.teardown(() => m.stop()) }) test('Flush interval', t => { - t.plan(4) + t.plan(2) const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) t.teardown(() => clock.uninstall()) @@ -645,15 +621,15 @@ test('Flush interval', t => { const m = client.helpers.msearch() - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.equal(result.documents.length, 3) - }) + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.equal(result.documents.length, 3) + }) - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.equal(result.documents.length, 3) - }) + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.equal(result.documents.length, 3) + }) setImmediate(clock.next) @@ -661,7 +637,7 @@ test('Flush interval', t => { }) test('Flush interval - early stop', t => { - t.plan(3) + t.plan(2) const MockConnection = connection.buildMockConnection({ onRequest (params) { @@ -689,15 +665,16 @@ test('Flush interval - early stop', t => { const m = client.helpers.msearch() - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.equal(result.documents.length, 3) - }) + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.equal(result.documents.length, 3) + }) setImmediate(() => { - m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.ok(err instanceof errors.ConfigurationError) - }) + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .catch(err => { + t.ok(err instanceof errors.ConfigurationError) + }) }) m.stop() diff --git a/test/unit/helpers/scroll.test.js b/test/unit/helpers/scroll.test.ts similarity index 83% rename from test/unit/helpers/scroll.test.js rename to test/unit/helpers/scroll.test.ts index 2a8ea2a4f..b7ab9f735 100644 --- a/test/unit/helpers/scroll.test.js +++ b/test/unit/helpers/scroll.test.ts @@ -17,15 +17,18 @@ * under the License. */ -'use strict' +import { test } from 'tap' +import { Client, errors } from '../../../' +import { connection } from '../../utils' -const { test } = require('tap') -const { errors } = require('../../../') -const { Client, connection } = require('../../utils') -let clientVersion = require('../../../package.json').version +let clientVersion: string = require('../../../package.json').version // eslint-disable-line if (clientVersion.includes('-')) { clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' } +let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +if (transportVersion.includes('-')) { + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} const nodeVersion = process.versions.node test('Scroll search', async t => { @@ -33,12 +36,17 @@ test('Scroll search', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.match(params.headers, { - 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=s` + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=s` }) count += 1 if (params.method === 'POST') { - t.equal(params.querystring, 'scroll=1m') + if (params.path === '/test/_search') { + t.equal(params.querystring, 'scroll=1m') + } else { + // @ts-expect-error + t.equal(JSON.parse(params.body).scroll, '1m') + } } if (count === 4) { // final automated clear @@ -69,10 +77,11 @@ test('Scroll search', async t => { const scrollSearch = client.helpers.scrollSearch({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }) for await (const result of scrollSearch) { + // @ts-expect-error t.equal(result.body.count, count) t.equal(result.body._scroll_id, 'id') } @@ -83,9 +92,10 @@ test('Clear a scroll search', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.notMatch(params.headers, { - 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${clientVersion},hc=${nodeVersion},h=s` + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=s` }) if (params.method === 'DELETE') { + // @ts-expect-error const body = JSON.parse(params.body) t.equal(body.scroll_id, 'id') } @@ -113,13 +123,14 @@ test('Clear a scroll search', async t => { const scrollSearch = client.helpers.scrollSearch({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }) for await (const result of scrollSearch) { if (count === 2) { t.fail('The scroll search should be cleared') } + // @ts-expect-error t.equal(result.body.count, count) if (count === 1) { await result.clear() @@ -166,13 +177,15 @@ test('Scroll search (retry)', async t => { const scrollSearch = client.helpers.scrollSearch({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }, { wait: 10 }) for await (const result of scrollSearch) { + // @ts-expect-error t.equal(result.body.count, count) + // @ts-expect-error t.not(result.body.count, 1) t.equal(result.body._scroll_id, 'id') } @@ -197,17 +210,18 @@ test('Scroll search (retry throws and maxRetries)', async t => { const scrollSearch = client.helpers.scrollSearch({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }, { wait: 10, ignore: [404] }) try { + // @ts-expect-error for await (const result of scrollSearch) { // eslint-disable-line t.fail('we should not be here') } - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ResponseError) t.equal(err.statusCode, 429) t.equal(count, expectedAttempts) @@ -222,7 +236,14 @@ test('Scroll search (retry throws later)', async t => { onRequest (params) { count += 1 // filter_path should not be added if is not already present - t.equal(params.querystring, 'scroll=1m') + if (params.method === 'POST') { + if (params.path === '/test/_search') { + t.equal(params.querystring, 'scroll=1m') + } else { + // @ts-expect-error + t.equal(JSON.parse(params.body).scroll, '1m') + } + } if (count > 1) { return { body: {}, statusCode: 429 } } @@ -251,16 +272,17 @@ test('Scroll search (retry throws later)', async t => { const scrollSearch = client.helpers.scrollSearch({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }, { wait: 10 }) try { for await (const result of scrollSearch) { // eslint-disable-line + // @ts-expect-error t.equal(result.body.count, count) } - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ResponseError) t.equal(err.statusCode, 429) t.equal(count, expectedAttempts) @@ -275,8 +297,7 @@ test('Scroll search documents', async t => { t.equal(params.querystring, 'filter_path=hits.hits._source%2C_scroll_id&scroll=1m') } else { if (params.method !== 'DELETE') { - t.equal(params.querystring, 'scroll=1m') - t.equal(params.body, '{"scroll_id":"id"}') + t.equal(params.body, '{"scroll":"1m","scroll_id":"id"}') } } return { @@ -304,7 +325,7 @@ test('Scroll search documents', async t => { const scrollSearch = client.helpers.scrollDocuments({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }) let n = 1 @@ -337,17 +358,18 @@ test('Should not retry if maxRetries = 0', async t => { const scrollSearch = client.helpers.scrollSearch({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }, { wait: 10, ignore: [404] }) try { + // @ts-expect-error for await (const result of scrollSearch) { // eslint-disable-line t.fail('we should not be here') } - } catch (err) { + } catch (err: any) { t.ok(err instanceof errors.ResponseError) t.equal(err.statusCode, 429) t.equal(count, expectedAttempts) @@ -359,10 +381,17 @@ test('Fix querystring for scroll search', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { if (count === 0) { - t.equal(params.querystring, 'size=1&scroll=1m') + t.equal(params.querystring, 'scroll=1m') } else { if (params.method !== 'DELETE') { - t.equal(params.querystring, 'scroll=1m') + if (params.method === 'POST') { + if (params.path === '/test/_search') { + t.equal(params.querystring, 'scroll=1m') + } else { + // @ts-expect-error + t.equal(JSON.parse(params.body).scroll, '1m') + } + } } } return { @@ -388,7 +417,7 @@ test('Fix querystring for scroll search', async t => { const scrollSearch = client.helpers.scrollSearch({ index: 'test', size: 1, - body: { foo: 'bar' } + query: { match_all: {} } }) for await (const response of scrollSearch) { diff --git a/test/unit/helpers/search.test.js b/test/unit/helpers/search.test.ts similarity index 74% rename from test/unit/helpers/search.test.js rename to test/unit/helpers/search.test.ts index bea39bc62..697237ef3 100644 --- a/test/unit/helpers/search.test.js +++ b/test/unit/helpers/search.test.ts @@ -17,10 +17,9 @@ * under the License. */ -'use strict' - -const { test } = require('tap') -const { Client, connection } = require('../../utils') +import { test } from 'tap' +import { Client } from '../../../' +import { connection } from '../../utils' test('Search should have an additional documents property', async t => { const MockConnection = connection.buildMockConnection({ @@ -47,7 +46,7 @@ test('Search should have an additional documents property', async t => { const result = await client.helpers.search({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }) t.same(result, [ { one: 'one' }, @@ -71,7 +70,7 @@ test('kGetHits fallback', async t => { const result = await client.helpers.search({ index: 'test', - body: { foo: 'bar' } + query: { match_all: {} } }) t.same(result, []) }) @@ -102,46 +101,11 @@ test('Merge filter paths (snake_case)', async t => { const result = await client.helpers.search({ index: 'test', filter_path: 'foo', - body: { foo: 'bar' } - }) - t.same(result, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) -}) - -test('Merge filter paths (camelCase)', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.equal(params.querystring, 'filter_path=foo%2Chits.hits._source') - return { - body: { - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const result = await client.helpers.search({ - index: 'test', - filterPath: 'foo', - body: { foo: 'bar' } + query: { match_all: {} } }) t.same(result, [ { one: 'one' }, { two: 'two' }, { three: 'three' } ]) -}) +}) \ No newline at end of file diff --git a/test/unit/selectors.test.js b/test/unit/selectors.test.js deleted file mode 100644 index 54f3087c2..000000000 --- a/test/unit/selectors.test.js +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { roundRobinSelector, randomSelector } = require('../../lib/Transport').internals - -test('RoundRobinSelector', t => { - const selector = roundRobinSelector() - const arr = [0, 1, 2, 3, 4, 5] - - t.plan(arr.length + 1) - for (let i = 0; i <= arr.length; i++) { - t.equal( - selector(arr), - i === arr.length ? arr[0] : arr[i] - ) - } -}) - -test('RandomSelector', t => { - t.plan(1) - const arr = [0, 1, 2, 3, 4, 5] - t.type(randomSelector(arr), 'number') -}) diff --git a/test/unit/serializer.test.js b/test/unit/serializer.test.js deleted file mode 100644 index edf29c880..000000000 --- a/test/unit/serializer.test.js +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { stringify } = require('querystring') -const Serializer = require('../../lib/Serializer') -const { SerializationError, DeserializationError } = require('../../lib/errors') - -test('Basic', t => { - t.plan(2) - const s = new Serializer() - const obj = { hello: 'world' } - const json = JSON.stringify(obj) - t.equal(s.serialize(obj), json) - t.same(s.deserialize(json), obj) -}) - -test('ndserialize', t => { - t.plan(1) - const s = new Serializer() - const obj = [ - { hello: 'world' }, - { winter: 'is coming' }, - { you_know: 'for search' } - ] - t.equal( - s.ndserialize(obj), - JSON.stringify(obj[0]) + '\n' + - JSON.stringify(obj[1]) + '\n' + - JSON.stringify(obj[2]) + '\n' - ) -}) - -test('ndserialize (strings)', t => { - t.plan(1) - const s = new Serializer() - const obj = [ - JSON.stringify({ hello: 'world' }), - JSON.stringify({ winter: 'is coming' }), - JSON.stringify({ you_know: 'for search' }) - ] - t.equal( - s.ndserialize(obj), - obj[0] + '\n' + - obj[1] + '\n' + - obj[2] + '\n' - ) -}) - -test('qserialize', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - you_know: 'for search' - } - - t.equal( - s.qserialize(obj), - stringify(obj) - ) -}) - -test('qserialize (array)', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - arr: ['foo', 'bar'] - } - - t.equal( - s.qserialize(obj), - 'hello=world&arr=foo%2Cbar' - ) -}) - -test('qserialize (string)', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - you_know: 'for search' - } - - t.equal( - s.qserialize(stringify(obj)), - stringify(obj) - ) -}) - -test('qserialize (key with undefined value)', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - key: undefined, - foo: 'bar' - } - - t.equal( - s.qserialize(obj), - 'hello=world&foo=bar' - ) -}) - -test('SerializationError', t => { - t.plan(1) - const s = new Serializer() - const obj = { hello: 'world' } - obj.o = obj - try { - s.serialize(obj) - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof SerializationError) - } -}) - -test('SerializationError ndserialize', t => { - t.plan(1) - const s = new Serializer() - try { - s.ndserialize({ hello: 'world' }) - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof SerializationError) - } -}) - -test('DeserializationError', t => { - t.plan(1) - const s = new Serializer() - const json = '{"hello' - try { - s.deserialize(json) - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof DeserializationError) - } -}) - -test('prototype poisoning protection', t => { - t.plan(2) - const s = new Serializer() - try { - s.deserialize('{"__proto__":{"foo":"bar"}}') - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof DeserializationError) - } - - try { - s.deserialize('{"constructor":{"prototype":{"foo":"bar"}}}') - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof DeserializationError) - } -}) - -test('disable prototype poisoning protection', t => { - t.plan(2) - const s = new Serializer({ disablePrototypePoisoningProtection: true }) - try { - s.deserialize('{"__proto__":{"foo":"bar"}}') - t.pass('Should not fail') - } catch (err) { - t.fail(err) - } - - try { - s.deserialize('{"constructor":{"prototype":{"foo":"bar"}}}') - t.pass('Should not fail') - } catch (err) { - t.fail(err) - } -}) - -test('disable prototype poisoning protection only for proto', t => { - t.plan(2) - const s = new Serializer({ disablePrototypePoisoningProtection: 'proto' }) - try { - s.deserialize('{"__proto__":{"foo":"bar"}}') - t.pass('Should not fail') - } catch (err) { - t.fail(err) - } - - try { - s.deserialize('{"constructor":{"prototype":{"foo":"bar"}}}') - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof DeserializationError) - } -}) - -test('disable prototype poisoning protection only for constructor', t => { - t.plan(2) - const s = new Serializer({ disablePrototypePoisoningProtection: 'constructor' }) - try { - s.deserialize('{"__proto__":{"foo":"bar"}}') - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof DeserializationError) - } - - try { - s.deserialize('{"constructor":{"prototype":{"foo":"bar"}}}') - t.pass('Should not fail') - } catch (err) { - t.fail(err) - } -}) diff --git a/test/unit/transport.test.js b/test/unit/transport.test.js deleted file mode 100644 index e1a6aaa9f..000000000 --- a/test/unit/transport.test.js +++ /dev/null @@ -1,2764 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const FakeTimers = require('@sinonjs/fake-timers') -const { createGunzip, gzipSync } = require('zlib') -const os = require('os') -const intoStream = require('into-stream') -const { - buildServer, - skipProductCheck, - connection: { MockConnection, MockConnectionTimeout, MockConnectionError } -} = require('../utils') -const { - NoLivingConnectionsError, - SerializationError, - DeserializationError, - TimeoutError, - ResponseError, - ConnectionError, - ConfigurationError, - RequestAbortedError -} = require('../../lib/errors') - -const ConnectionPool = require('../../lib/pool/ConnectionPool') -const Connection = require('../../lib/Connection') -const Serializer = require('../../lib/Serializer') -const Transport = require('../../lib/Transport') - -test('Basic', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Basic (promises support)', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport - .request({ - method: 'GET', - path: '/hello' - }) - .then(({ body }) => { - t.same(body, { hello: 'world' }) - }) - .catch(t.fail) -}) - -test('Basic - failing (promises support)', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnectionTimeout }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport - .request({ - method: 'GET', - path: '/hello' - }) - .catch(err => { - t.ok(err instanceof TimeoutError) - }) -}) - -test('Basic (options + promises support)', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport - .request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: 1000 - }) - .then(({ body }) => { - t.same(body, { hello: 'world' }) - }) - .catch(t.fail) -}) - -test('Send POST', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-length': '17' - }) - let json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.same(JSON.parse(json), { hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: { hello: 'world' } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Send POST (ndjson)', t => { - t.plan(4) - - const bulkBody = [ - { hello: 'world' }, - { winter: 'is coming' }, - { you_know: 'for search' } - ] - - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/x-ndjson', - 'content-length': '67' - }) - let json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.equal( - json, - JSON.stringify(bulkBody[0]) + '\n' + - JSON.stringify(bulkBody[1]) + '\n' + - JSON.stringify(bulkBody[2]) + '\n' - ) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - bulkBody - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Send stream', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json' - }) - let json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.same(JSON.parse(json), { hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: intoStream(JSON.stringify({ hello: 'world' })) - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Send stream (bulkBody)', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/x-ndjson' - }) - let json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.same(JSON.parse(json), { hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - bulkBody: intoStream(JSON.stringify({ hello: 'world' })) - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Not JSON payload from server', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'text/plain') - res.end('hello!') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.equal(body, 'hello!') - server.stop() - }) - }) -}) - -test('NoLivingConnectionsError (null connection)', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - nodeSelector (connections) { - t.equal(connections.length, 1) - t.ok(connections[0] instanceof Connection) - return null - } - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof NoLivingConnectionsError) - }) -}) - -test('NoLivingConnectionsError (undefined connection)', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - nodeSelector (connections) { - t.equal(connections.length, 1) - t.ok(connections[0] instanceof Connection) - return undefined - } - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof NoLivingConnectionsError) - }) -}) - -test('SerializationError', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - const body = { hello: 'world' } - body.o = body - transport.request({ - method: 'POST', - path: '/hello', - body - }, (err, { body }) => { - t.ok(err instanceof SerializationError) - }) -}) - -test('SerializationError (bulk)', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - const bulkBody = { hello: 'world' } - bulkBody.o = bulkBody - transport.request({ - method: 'POST', - path: '/hello', - bulkBody - }, (err, { body }) => { - t.ok(err instanceof SerializationError) - }) -}) - -test('DeserializationError', t => { - t.plan(1) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end('{"hello)') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof DeserializationError) - server.stop() - }) - }) -}) - -test('TimeoutError (should call markDead on the failing connection)', t => { - t.plan(2) - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.equal(connection.id, 'node1') - super.markDead(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnectionTimeout }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 500, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - }) -}) - -test('ConnectionError (should call markDead on the failing connection)', t => { - t.plan(2) - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.equal(connection.id, 'node1') - super.markDead(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnectionError }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof ConnectionError) - }) -}) - -test('Retry mechanism', t => { - t.plan(2) - - let count = 0 - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - if (count > 0) { - res.end(JSON.stringify({ hello: 'world' })) - } else { - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - count++ - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 1, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Should not retry if the body is a stream', t => { - t.plan(2) - - let count = 0 - function handler (req, res) { - count++ - res.setHeader('Content-Type', 'application/json;utf=8') - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 1, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: intoStream(JSON.stringify({ hello: 'world' })) - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.equal(count, 1) - server.stop() - }) - }) -}) - -test('Should not retry if the bulkBody is a stream', t => { - t.plan(2) - - let count = 0 - function handler (req, res) { - count++ - res.setHeader('Content-Type', 'application/json;utf=8') - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 1, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - bulkBody: intoStream(JSON.stringify([{ hello: 'world' }])) - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.equal(count, 1) - server.stop() - }) - }) -}) - -test('No retry', t => { - t.plan(2) - - let count = 0 - function handler (req, res) { - count++ - res.setHeader('Content-Type', 'application/json;utf=8') - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: intoStream(JSON.stringify({ hello: 'world' })) - }, { - maxRetries: 0 - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.equal(count, 1) - server.stop() - }) - }) -}) - -test('Custom retry mechanism', t => { - t.plan(2) - - let count = 0 - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - if (count > 0) { - res.end(JSON.stringify({ hello: 'world' })) - } else { - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - count++ - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - maxRetries: 1 - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Should not retry on 429', t => { - t.plan(3) - - let count = 0 - function handler (req, res) { - t.equal(count++, 0) - res.statusCode = 429 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 5, - requestTimeout: 250, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, result) => { - t.ok(err) - t.equal(err.statusCode, 429) - server.stop() - }) - }) -}) - -test('Should call markAlive with a successful response', t => { - t.plan(3) - - class CustomConnectionPool extends ConnectionPool { - markAlive (connection) { - t.equal(connection.id, 'node1') - super.markAlive(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - }) -}) - -test('Should call resurrect on every request', t => { - t.plan(5) - - class CustomConnectionPool extends ConnectionPool { - resurrect ({ now, requestId, name }) { - t.type(now, 'number') - t.type(requestId, 'number') - t.type(name, 'string') - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - name: 'elasticsearch-js' - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - }) -}) - -test('Should return a request aborter utility', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - const request = transport.request({ - method: 'GET', - path: '/hello' - }, (err, result) => { - t.ok(err instanceof RequestAbortedError) - }) - - request.abort() -}) - -test('Retry mechanism and abort', t => { - t.plan(1) - - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 1000) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - let count = 0 - const transport = new Transport({ - emit: event => { - if (event === 'request' && count++ > 0) { - request.abort() - } - }, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 2, - requestTimeout: 100, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - const request = transport.request({ - method: 'GET', - path: '/hello' - }, (err, result) => { - t.ok(err instanceof RequestAbortedError) - server.stop() - }) - }) -}) - -test('Abort a request with the promise API', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - const request = transport.request({ - method: 'GET', - path: '/hello' - }) - - request - .then(() => { - t.fail('Should not be called') - }) - .catch(err => { - t.ok(err instanceof RequestAbortedError) - }) - - request.abort() -}) - -test('ResponseError', t => { - t.plan(3) - - function handler (req, res) { - res.statusCode = 500 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ status: 500 })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.same(err.body, { status: 500 }) - t.equal(err.statusCode, 500) - server.stop() - }) - }) -}) - -test('Override requestTimeout', t => { - t.plan(2) - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 1000) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 500, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: 2000 - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('sniff', t => { - t.test('sniffOnStart', t => { - t.plan(1) - - class MyTransport extends Transport { - sniff (opts) { - t.equal(opts.reason, Transport.sniffReasons.SNIFF_ON_START) - } - } - - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - // eslint-disable-next-line - new MyTransport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: true, - sniffEndpoint: '/sniff' - }) - }) - - t.test('sniffOnConnectionFault', t => { - t.plan(2) - - class MyTransport extends Transport { - sniff (opts) { - t.equal(opts.reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) - } - } - - const pool = new ConnectionPool({ Connection: MockConnectionTimeout }) - pool.addConnection('http://localhost:9200/') - - const transport = new MyTransport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 500, - sniffInterval: false, - sniffOnConnectionFault: true, - sniffEndpoint: '/sniff' - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - }) - }) - - t.test('sniffInterval', t => { - t.plan(6) - - const clock = FakeTimers.install({ toFake: ['Date'] }) - t.teardown(() => clock.uninstall()) - - class MyTransport extends Transport { - sniff (opts) { - t.equal(opts.reason, Transport.sniffReasons.SNIFF_INTERVAL) - } - } - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new MyTransport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 3000, - sniffInterval: 1, - sniffEndpoint: '/sniff' - }) - skipProductCheck(transport) - - const params = { method: 'GET', path: '/' } - clock.tick(100) - transport.request(params, t.error) - - clock.tick(200) - transport.request(params, t.error) - - clock.tick(300) - transport.request(params, t.error) - }) - - t.test('errored', t => { - t.plan(1) - - class CustomConnectionPool extends ConnectionPool { - nodesToHost () { - t.fail('This should not be called') - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnectionError }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffEndpoint: '/sniff' - }) - skipProductCheck(transport) - - transport.sniff((err, hosts) => { - t.ok(err instanceof ConnectionError) - }) - }) - - t.end() -}) - -test(`Should mark as dead connections where the statusCode is 502/3/4 - and return a ResponseError if there are no more attempts`, t => { - ;[502, 503, 504].forEach(runTest) - - function runTest (statusCode) { - t.test(statusCode, t => { - t.plan(3) - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.ok('called') - super.markDead(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: `/${statusCode}` - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.match(err, { - body: { hello: 'world' }, - headers: { 'content-type': 'application/json;utf=8' }, - statusCode: statusCode - }) - }) - }) - } - - t.end() -}) - -test('Should retry the request if the statusCode is 502/3/4', t => { - ;[502, 503, 504].forEach(runTest) - - function runTest (statusCode) { - t.test(statusCode, t => { - t.plan(3) - - let first = true - function handler (req, res) { - if (first) { - first = false - res.statusCode = statusCode - } - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.ok('called') - } - } - - buildServer(handler, ({ port }, server) => { - const pool = new CustomConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 1, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - } - - t.end() -}) - -test('Ignore status code', t => { - t.plan(4) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/404' - }, { - ignore: [404] - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - }) - - transport.request({ - method: 'GET', - path: '/404' - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - }) - - transport.request({ - method: 'GET', - path: '/404' - }, { - ignore: [403, 405] - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - }) -}) - -test('Should serialize the querystring', t => { - t.plan(2) - - function handler (req, res) { - t.equal(req.url, '/hello?hello=world&you_know=for%20search') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello', - querystring: { - hello: 'world', - you_know: 'for search' - } - }, (err, { body }) => { - t.error(err) - server.stop() - }) - }) -}) - -test('timeout option', t => { - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 1000) - } - - t.test('as number', t => { - t.test('global', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 500, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.test('custom', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: 500 - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.end() - }) - - t.test('as string', t => { - t.test('global', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: '0.5s', - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.test('custom', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: '30s', - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: '0.5s' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.end() - }) - - t.end() -}) - -test('Should cast to boolean HEAD request', t => { - t.test('2xx response', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'HEAD', - path: '/200' - }, (err, { body, statusCode }) => { - t.error(err) - t.equal(statusCode, 200) - t.equal(body, true) - }) - }) - - t.test('404 response', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'HEAD', - path: '/404' - }, (err, { body, statusCode }) => { - t.error(err) - t.equal(statusCode, 404) - t.equal(body, false) - }) - }) - - t.test('4xx response', t => { - t.plan(3) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'HEAD', - path: '/400' - }, (err, { body, statusCode }) => { - t.ok(err instanceof ResponseError) - t.notOk(typeof err.body === 'boolean') - t.equal(statusCode, 400) - }) - }) - - t.test('5xx response', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'HEAD', - path: '/500' - }, (err, { body, statusCode }) => { - t.ok(err instanceof ResponseError) - t.notOk(typeof err.body === 'boolean') - t.equal(statusCode, 500) - }) - }) - - t.end() -}) - -test('Suggest compression', t => { - t.plan(3) - function handler (req, res) { - t.match(req.headers, { - 'accept-encoding': 'gzip,deflate' - }) - - const body = gzipSync(JSON.stringify({ hello: 'world' })) - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Content-Encoding', 'gzip') - res.setHeader('Content-Length', Buffer.byteLength(body)) - res.end(body) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - suggestCompression: true - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Broken compression', t => { - t.plan(2) - function handler (req, res) { - t.match(req.headers, { - 'accept-encoding': 'gzip,deflate' - }) - - const body = gzipSync(JSON.stringify({ hello: 'world' })) - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Content-Encoding', 'gzip') - // we are not setting the content length on purpose - res.end(body.slice(0, -5)) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - suggestCompression: true - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err) - server.stop() - }) - }) -}) - -test('Warning header', t => { - t.test('Single warning', t => { - t.plan(3) - - const warn = '112 - "cache down" "Wed, 21 Oct 2015 07:28:00 GMT"' - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Warning', warn) - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { warnings }) => { - t.error(err) - t.same(warnings, [warn]) - warnings.forEach(w => t.type(w, 'string')) - server.stop() - }) - }) - }) - - t.test('Multiple warnings', t => { - t.plan(4) - - const warn1 = '112 - "cache down" "Wed, 21 Oct 2015 07:28:00 GMT"' - const warn2 = '199 agent "Error message" "2015-01-01"' - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Warning', warn1 + ',' + warn2) - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { warnings }) => { - t.error(err) - t.same(warnings, [warn1, warn2]) - warnings.forEach(w => t.type(w, 'string')) - server.stop() - }) - }) - }) - - t.test('No warnings', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { warnings }) => { - t.error(err) - t.equal(warnings, null) - server.stop() - }) - }) - }) - - t.end() -}) - -test('asStream set to true', t => { - t.plan(3) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - asStream: true - }, (err, { body, headers }) => { - t.error(err) - t.match(headers, { - connection: 'keep-alive', - 'content-type': 'application/json;utf=8' - }) - - let payload = '' - body.setEncoding('utf8') - body.on('data', chunk => { payload += chunk }) - body.on('error', err => t.fail(err)) - body.on('end', () => { - t.same(JSON.parse(payload), { hello: 'world' }) - server.stop() - }) - }) - }) -}) - -test('Compress request', t => { - t.test('gzip as request option', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - let json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.same(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ you_know: 'for search' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: { you_know: 'for search' } - }, { - compression: 'gzip' - }, (err, { body }) => { - t.error(err) - t.same(body, { you_know: 'for search' }) - server.stop() - }) - }) - }) - - t.test('gzip as transport option', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - let json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.same(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ you_know: 'for search' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - compression: 'gzip' - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: { you_know: 'for search' } - }, (err, { body }) => { - t.error(err) - t.same(body, { you_know: 'for search' }) - server.stop() - }) - }) - }) - - t.test('gzip stream body', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - let json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.same(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ you_know: 'for search' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: intoStream(JSON.stringify({ you_know: 'for search' })) - }, { - compression: 'gzip' - }, (err, { body }) => { - t.error(err) - t.same(body, { you_know: 'for search' }) - server.stop() - }) - }) - }) - - t.test('Should throw on invalid compression value', t => { - t.plan(2) - - try { - new Transport({ // eslint-disable-line - emit: () => {}, - connectionPool: new ConnectionPool({ Connection }), - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - compression: 'deflate' - }) - t.fail('Should throw') - } catch (err) { - t.ok(err instanceof ConfigurationError) - t.equal(err.message, 'Invalid compression: \'deflate\'') - } - }) - - t.test('Should skip the compression for empty strings/null/undefined', t => { - t.plan(9) - - function handler (req, res) { - t.equal(req.headers['content-encoding'], undefined) - t.equal(req.headers['content-type'], undefined) - res.end() - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - compression: 'gzip', - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'DELETE', - path: '/hello', - body: '' - }, (err, { body }) => { - t.error(err) - transport.request({ - method: 'GET', - path: '/hello', - body: null - }, (err, { body }) => { - t.error(err) - transport.request({ - method: 'GET', - path: '/hello', - body: undefined - }, (err, { body }) => { - t.error(err) - server.stop() - }) - }) - }) - }) - }) - - t.test('Retry a gzipped body', t => { - t.plan(7) - - let count = 0 - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - let json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.same(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - if (count++ > 0) { - res.end(JSON.stringify({ you_know: 'for search' })) - } else { - setTimeout(() => { - res.end(JSON.stringify({ you_know: 'for search' })) - }, 1000) - } - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 250, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'POST', - path: '/hello', - body: { you_know: 'for search' } - }, { - compression: 'gzip' - }, (err, { body, meta }) => { - t.error(err) - t.same(body, { you_know: 'for search' }) - t.equal(count, 2) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Headers configuration', t => { - t.test('Global option', t => { - t.plan(3) - function handler (req, res) { - t.match(req.headers, { 'x-foo': 'bar' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - headers: { - 'x-foo': 'bar' - } - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Global option and custom option', t => { - t.plan(3) - function handler (req, res) { - t.match(req.headers, { - 'x-foo': 'bar', - 'x-baz': 'faz' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - headers: { - 'x-foo': 'bar' - } - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - headers: { 'x-baz': 'faz' } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Custom options should override global option', t => { - t.plan(3) - function handler (req, res) { - t.match(req.headers, { 'x-foo': 'faz' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - headers: { - 'x-foo': 'bar' - } - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - headers: { 'x-foo': 'faz' } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() -}) - -test('nodeFilter and nodeSelector', t => { - t.plan(4) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - nodeFilter: () => { - t.ok('called') - return true - }, - nodeSelector: conns => { - t.ok('called') - return conns[0] - } - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - }) -}) - -test('Should accept custom querystring in the optons object', t => { - t.test('Options object', t => { - t.plan(3) - - function handler (req, res) { - t.equal(req.url, '/hello?foo=bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - querystring: { foo: 'bar' } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Options object and params', t => { - t.plan(3) - - function handler (req, res) { - t.equal(req.url, '/hello?baz=faz&foo=bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello', - querystring: { baz: 'faz' } - }, { - querystring: { foo: 'bar' } - }, (err, { body }) => { - t.error(err) - t.same(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Should add an User-Agent header', t => { - t.plan(2) - const clientVersion = require('../../package.json').version - const userAgent = `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${process.version})` - - function handler (req, res) { - t.match(req.headers, { - 'user-agent': userAgent - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Should pass request params and options to generateRequestId', t => { - t.plan(3) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const params = { method: 'GET', path: '/hello' } - const options = { context: { winter: 'is coming' } } - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - generateRequestId: function (requestParams, requestOptions) { - t.same(requestParams, params) - t.same(requestOptions, options) - return 'id' - } - }) - skipProductCheck(transport) - - transport.request(params, options, t.error) -}) - -test('Secure json parsing', t => { - t.test('__proto__ protection', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end('{"__proto__":{"a":1}}') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof DeserializationError) - t.equal(err.message, 'Object contains forbidden prototype property') - server.stop() - }) - }) - }) - - t.test('constructor protection', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end('{"constructor":{"prototype":{"bar":"baz"}}}') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof DeserializationError) - t.equal(err.message, 'Object contains forbidden prototype property') - server.stop() - }) - }) - }) - - t.end() -}) - -test('Lowercase headers utilty', t => { - t.plan(4) - const { lowerCaseHeaders } = Transport.internals - - t.same(lowerCaseHeaders({ - Foo: 'bar', - Faz: 'baz', - 'X-Hello': 'world' - }), { - foo: 'bar', - faz: 'baz', - 'x-hello': 'world' - }) - - t.same(lowerCaseHeaders({ - Foo: 'bar', - faz: 'baz', - 'X-hello': 'world' - }), { - foo: 'bar', - faz: 'baz', - 'x-hello': 'world' - }) - - t.equal(lowerCaseHeaders(null), null) - - t.equal(lowerCaseHeaders(undefined), undefined) -}) - -test('The callback with a sync error should be called in the next tick - json', t => { - t.plan(4) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - const body = { a: true } - body.o = body - - const transportReturn = transport.request({ - method: 'POST', - path: '/hello', - body - }, (err, { body }) => { - t.ok(err instanceof SerializationError) - }) - - t.type(transportReturn.then, 'function') - t.type(transportReturn.catch, 'function') - t.type(transportReturn.abort, 'function') -}) - -test('The callback with a sync error should be called in the next tick - ndjson', t => { - t.plan(4) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - const field = { a: true } - field.o = field - - const transportReturn = transport.request({ - method: 'POST', - path: '/hello', - bulkBody: [field] - }, (err, { body }) => { - t.ok(err instanceof SerializationError) - }) - - t.type(transportReturn.then, 'function') - t.type(transportReturn.catch, 'function') - t.type(transportReturn.abort, 'function') -}) - -test('Support mapbox vector tile', t => { - t.plan(2) - const mvtContent = 'GoMCCgRtZXRhEikSFAAAAQACAQMBBAAFAgYDBwAIBAkAGAMiDwkAgEAagEAAAP8//z8ADxoOX3NoYXJkcy5mYWlsZWQaD19zaGFyZHMuc2tpcHBlZBoSX3NoYXJkcy5zdWNjZXNzZnVsGg1fc2hhcmRzLnRvdGFsGhlhZ2dyZWdhdGlvbnMuX2NvdW50LmNvdW50GhdhZ2dyZWdhdGlvbnMuX2NvdW50LnN1bRoTaGl0cy50b3RhbC5yZWxhdGlvbhoQaGl0cy50b3RhbC52YWx1ZRoJdGltZWRfb3V0GgR0b29rIgIwACICMAIiCRkAAAAAAAAAACIECgJlcSICOAAogCB4Ag==' - - function handler (req, res) { - res.setHeader('Content-Type', 'application/vnd.mapbox-vector-tile') - res.end(Buffer.from(mvtContent, 'base64')) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body.toString('base64'), Buffer.from(mvtContent, 'base64').toString('base64')) - server.stop() - }) - }) -}) - -test('Compressed mapbox vector tile', t => { - t.plan(2) - const mvtContent = 'GoMCCgRtZXRhEikSFAAAAQACAQMBBAAFAgYDBwAIBAkAGAMiDwkAgEAagEAAAP8//z8ADxoOX3NoYXJkcy5mYWlsZWQaD19zaGFyZHMuc2tpcHBlZBoSX3NoYXJkcy5zdWNjZXNzZnVsGg1fc2hhcmRzLnRvdGFsGhlhZ2dyZWdhdGlvbnMuX2NvdW50LmNvdW50GhdhZ2dyZWdhdGlvbnMuX2NvdW50LnN1bRoTaGl0cy50b3RhbC5yZWxhdGlvbhoQaGl0cy50b3RhbC52YWx1ZRoJdGltZWRfb3V0GgR0b29rIgIwACICMAIiCRkAAAAAAAAAACIECgJlcSICOAAogCB4Ag==' - - function handler (req, res) { - const body = gzipSync(Buffer.from(mvtContent, 'base64')) - res.setHeader('Content-Type', 'application/vnd.mapbox-vector-tile') - res.setHeader('Content-Encoding', 'gzip') - res.setHeader('Content-Length', Buffer.byteLength(body)) - res.end(body) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - skipProductCheck(transport) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.same(body.toString('base64'), Buffer.from(mvtContent, 'base64').toString('base64')) - server.stop() - }) - }) -}) diff --git a/test/utils/MockConnection.js b/test/utils/MockConnection.js deleted file mode 100644 index 0719696af..000000000 --- a/test/utils/MockConnection.js +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const assert = require('assert') -const { Connection } = require('../../index') -const { - ConnectionError, - RequestAbortedError, - TimeoutError -} = require('../../lib/errors') -const intoStream = require('into-stream') - -class MockConnection extends Connection { - request (params, callback) { - let aborted = false - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = setStatusCode(params.path) - stream.headers = { - 'content-type': 'application/json;utf=8', - date: new Date().toISOString(), - connection: 'keep-alive', - 'content-length': '17' - } - process.nextTick(() => { - if (!aborted) { - callback(null, stream) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -class MockConnectionTimeout extends Connection { - request (params, callback) { - let aborted = false - process.nextTick(() => { - if (!aborted) { - callback(new TimeoutError('Request timed out', params), null) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -class MockConnectionError extends Connection { - request (params, callback) { - let aborted = false - process.nextTick(() => { - if (!aborted) { - callback(new ConnectionError('Kaboom'), null) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -class MockConnectionSniff extends Connection { - request (params, callback) { - let aborted = false - const sniffResult = { - nodes: { - 'node-1': { - http: { - publish_address: 'localhost:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - 'node-2': { - http: { - publish_address: 'localhost:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - } - const stream = intoStream(JSON.stringify(sniffResult)) - stream.statusCode = setStatusCode(params.path) - stream.headers = { - 'content-type': 'application/json;utf=8', - date: new Date().toISOString(), - connection: 'keep-alive', - 'content-length': '191' - } - process.nextTick(() => { - if (!aborted) { - if (params.headers.timeout) { - callback(new TimeoutError('Request timed out', params), null) - } else { - callback(null, stream) - } - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -function buildMockConnection (opts) { - assert(opts.onRequest, 'Missing required onRequest option') - - class MockConnection extends Connection { - request (params, callback) { - let { body, statusCode, headers } = opts.onRequest(params) - if (typeof body !== 'string') { - body = JSON.stringify(body) - } - let aborted = false - const stream = intoStream(body) - stream.statusCode = statusCode || 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - date: new Date().toISOString(), - connection: 'keep-alive', - 'content-length': Buffer.byteLength(body), - ...headers - } - process.nextTick(() => { - if (!aborted) { - callback(null, stream) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } - } - - return MockConnection -} - -function setStatusCode (path) { - const statusCode = Number(path.slice(1)) - if (Number.isInteger(statusCode)) { - return statusCode - } - return 200 -} - -module.exports = { - MockConnection, - MockConnectionTimeout, - MockConnectionError, - MockConnectionSniff, - buildMockConnection -} diff --git a/test/utils/MockConnection.ts b/test/utils/MockConnection.ts new file mode 100644 index 000000000..3bb508fed --- /dev/null +++ b/test/utils/MockConnection.ts @@ -0,0 +1,139 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import assert from 'assert' +import * as http from 'http' +import { + BaseConnection, + ConnectionRequestParams, + ConnectionRequestOptions, + ConnectionRequestResponse, + errors +} from '@elastic/transport' +const { + ConnectionError, + TimeoutError +} = errors + +export class MockConnection extends BaseConnection { + request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + return new Promise((resolve, reject) => { + const body = JSON.stringify({ hello: 'world' }) + const statusCode = setStatusCode(params.path) + const headers = { + 'content-type': 'application/json;utf=8', + date: new Date().toISOString(), + connection: 'keep-alive', + 'content-length': '17', + 'x-elastic-product': 'Elasticsearch' + } + process.nextTick(resolve, { body, statusCode, headers }) + }) + } +} + +export class MockConnectionTimeout extends BaseConnection { + request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + return new Promise((resolve, reject) => { + process.nextTick(reject, new TimeoutError('Request timed out')) + }) + } +} + +export class MockConnectionError extends BaseConnection { + request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + return new Promise((resolve, reject) => { + process.nextTick(reject, new ConnectionError('kaboom')) + }) + } +} + +export class MockConnectionSniff extends BaseConnection { + request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + return new Promise((resolve, reject) => { + const sniffResult = { + nodes: { + 'node-1': { + http: { + publish_address: 'localhost:9200' + } + }, + 'node-2': { + http: { + publish_address: 'localhost:9201' + } + } + } + } + const body = JSON.stringify(sniffResult) + const statusCode = setStatusCode(params.path) + const headers = { + 'content-type': 'application/json;utf=8', + date: new Date().toISOString(), + connection: 'keep-alive', + 'content-length': '191', + 'x-elastic-product': 'Elasticsearch' + } + if (params.headers?.timeout != null) { + process.nextTick(reject, new TimeoutError('Request timed out')) + } else { + process.nextTick(resolve, { body, statusCode, headers }) + } + }) + } +} + +interface onRequestMock { + onRequest(opts: ConnectionRequestParams): { body: any, statusCode?: number, headers?: http.IncomingHttpHeaders } +} +export function buildMockConnection (opts: onRequestMock) { + assert(opts.onRequest, 'Missing required onRequest option') + + class MockConnection extends BaseConnection { + request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + return new Promise((resolve, reject) => { + params.headers = { ...this.headers, ...params.headers } + let { body, statusCode, headers } = opts.onRequest(params) + if (typeof body !== 'string' && !(body instanceof Buffer)) { + body = JSON.stringify(body) + } + statusCode = statusCode || 200 + headers = { + 'content-type': 'application/json;utf=8', + date: new Date().toISOString(), + connection: 'keep-alive', + 'content-length': Buffer.byteLength(body) + '', + 'x-elastic-product': 'Elasticsearch', + ...headers + } + process.nextTick(resolve, { body, statusCode, headers }) + }) + } + } + + return MockConnection +} + +function setStatusCode (path: string): number { + const statusCode = Number(path.slice(1)) + if (Number.isInteger(statusCode)) { + return statusCode + } + return 200 +} diff --git a/test/utils/buildCluster.js b/test/utils/buildCluster.js deleted file mode 100644 index 95d20449e..000000000 --- a/test/utils/buildCluster.js +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const debug = require('debug')('elasticsearch-test') -const workq = require('workq') -const buildServer = require('./buildServer') - -let id = 0 -function buildCluster (options, callback) { - const clusterId = id++ - debug(`Booting cluster '${clusterId}'`) - if (typeof options === 'function') { - callback = options - options = {} - } - - const q = workq() - const nodes = {} - const sniffResult = { nodes: {} } - - options.numberOfNodes = options.numberOfNodes || 4 - for (let i = 0; i < options.numberOfNodes; i++) { - q.add(bootNode, { id: `node${i}` }) - } - - function bootNode (q, opts, done) { - function handler (req, res) { - res.setHeader('content-type', 'application/json') - if (req.url === '/_nodes/_all/http') { - res.end(JSON.stringify(sniffResult)) - } else { - res.end(JSON.stringify({ hello: 'world' })) - } - } - - buildServer(options.handler || handler, ({ port }, server) => { - nodes[opts.id] = { - url: `http://127.0.0.1:${port}`, - server - } - sniffResult.nodes[opts.id] = { - http: { - publish_address: options.hostPublishAddress - ? `localhost/127.0.0.1:${port}` - : `127.0.0.1:${port}` - }, - roles: ['master', 'data', 'ingest'] - } - debug(`Booted cluster node '${opts.id}' on port ${port} (cluster id: '${clusterId}')`) - done() - }) - } - - function shutdown () { - debug(`Shutting down cluster '${clusterId}'`) - for (const id in nodes) { - kill(id) - } - } - - function kill (id, callback) { - debug(`Shutting down cluster node '${id}' (cluster id: '${clusterId}')`) - const node = nodes[id] - delete nodes[id] - delete sniffResult.nodes[id] - node.server.stop(callback) - } - - function spawn (id, callback) { - debug(`Spawning cluster node '${id}' (cluster id: '${clusterId}')`) - q.add(bootNode, { id }) - q.add((q, done) => { - callback() - done() - }) - } - - const cluster = { - nodes, - shutdown, - kill, - spawn - } - - q.drain(done => { - debug(`Cluster '${clusterId}' booted with ${options.numberOfNodes} nodes`) - callback(cluster) - done() - }) -} - -module.exports = buildCluster diff --git a/test/utils/buildCluster.ts b/test/utils/buildCluster.ts new file mode 100644 index 000000000..608fcc268 --- /dev/null +++ b/test/utils/buildCluster.ts @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import Debug from 'debug' +import * as http from 'http' +import buildServer, { ServerHandler } from './buildServer' +import { StoppableServer } from 'stoppable' + +interface BuildClusterOptions { + numberOfNodes?: number + handler?: ServerHandler + hostPublishAddress?: boolean +} + +interface Node { + url: string + server: StoppableServer +} + +interface Cluster { + nodes: Record, + shutdown(): Promise, + kill(id: string): Promise, + spawn(id: string): Promise +} + +interface SniffNode { + http: { + publish_address: string + }, + roles: string[] +} + +type SniffResult = Record + +const debug = Debug('elasticsearch-test') +let id = 0 +export default async function buildCluster (options: BuildClusterOptions): Promise { + const clusterId = id++ + debug(`Booting cluster '${clusterId}'`) + + const cluster: Cluster = { + nodes: {}, + shutdown, + kill, + spawn + } + + options.numberOfNodes = options.numberOfNodes || 4 + for (let i = 0; i < options.numberOfNodes; i++) { + await bootNode(`node${i}`) + } + + async function bootNode (id: string): Promise { + const [{ port }, server] = await buildServer(options.handler ?? handler) + cluster.nodes[id] = { + url: `http://127.0.0.1:${port}`, + server + } + } + + function handler (req: http.IncomingMessage, res: http.ServerResponse): void { + res.setHeader('content-type', 'application/json') + if (req.url === '/_nodes/_all/http') { + const sniffResult: SniffResult = Object.keys(cluster.nodes).reduce((acc: SniffResult, val: string) => { + const node = cluster.nodes[val] + acc[val] = { + http: { + publish_address: options.hostPublishAddress + ? `localhost/${node.url}` + : node.url + }, + roles: ['master', 'data', 'ingest'] + } + return acc + }, {}) + res.end(JSON.stringify(sniffResult)) + } else { + res.end(JSON.stringify({ hello: 'world' })) + } + } + + async function shutdown (): Promise { + debug(`Shutting down cluster '${clusterId}'`) + for (const id in cluster.nodes) { + await kill(id) + } + } + + async function kill (id: string): Promise { + debug(`Shutting down cluster node '${id}' (cluster id: '${clusterId}')`) + const node = cluster.nodes[id] + delete cluster.nodes[id] + node.server.stop() + } + + async function spawn (id: string): Promise { + debug(`Spawning cluster node '${id}' (cluster id: '${clusterId}')`) + await bootNode(id) + } + + return cluster +} diff --git a/test/utils/buildProxy.js b/test/utils/buildProxy.js deleted file mode 100644 index 442df4608..000000000 --- a/test/utils/buildProxy.js +++ /dev/null @@ -1,60 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const proxy = require('proxy') -const { readFileSync } = require('fs') -const { join } = require('path') -const http = require('http') -const https = require('https') - -const ssl = { - key: readFileSync(join(__dirname, '..', 'fixtures', 'https.key')), - cert: readFileSync(join(__dirname, '..', 'fixtures', 'https.cert')) -} - -function createProxy () { - return new Promise((resolve, reject) => { - const server = proxy(http.createServer()) - server.listen(0, '127.0.0.1', () => { - resolve(server) - }) - }) -} - -function createSecureProxy () { - return new Promise((resolve, reject) => { - const server = proxy(https.createServer(ssl)) - server.listen(0, '127.0.0.1', () => { - resolve(server) - }) - }) -} - -function createServer (handler, callback) { - return new Promise((resolve, reject) => { - const server = http.createServer() - server.listen(0, '127.0.0.1', () => { - resolve(server) - }) - }) -} - -function createSecureServer (handler, callback) { - return new Promise((resolve, reject) => { - const server = https.createServer(ssl) - server.listen(0, '127.0.0.1', () => { - resolve(server) - }) - }) -} - -module.exports = { - ssl, - createProxy, - createSecureProxy, - createServer, - createSecureServer -} diff --git a/test/utils/buildProxy.ts b/test/utils/buildProxy.ts new file mode 100644 index 000000000..37f58d55c --- /dev/null +++ b/test/utils/buildProxy.ts @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// @ts-ignore +import proxy from 'proxy' +import { readFileSync } from 'fs' +import { join } from 'path' +import * as http from 'http' +import * as https from 'https' + +export const ssl = { + key: readFileSync(join(__dirname, '..', 'fixtures', 'https.key')), + cert: readFileSync(join(__dirname, '..', 'fixtures', 'https.cert')) +} + +type AuthenticateFn = (err: Error | null, valid: boolean) => void +interface ProxyServer extends http.Server { + authenticate?(req: http.IncomingMessage, fn: AuthenticateFn): void +} + +export function createProxy (): Promise { + return new Promise((resolve, reject) => { + const server = proxy(http.createServer()) + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} + +export function createSecureProxy (): Promise { + return new Promise((resolve, reject) => { + const server = proxy(https.createServer(ssl)) + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} + +export function createServer (): Promise { + return new Promise((resolve, reject) => { + const server = http.createServer() + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} + +export function createSecureServer (): Promise { + return new Promise((resolve, reject) => { + const server = https.createServer(ssl) + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} diff --git a/test/utils/buildServer.js b/test/utils/buildServer.ts similarity index 58% rename from test/utils/buildServer.js rename to test/utils/buildServer.ts index ef907c05f..586f1b68f 100644 --- a/test/utils/buildServer.js +++ b/test/utils/buildServer.ts @@ -17,19 +17,18 @@ * under the License. */ -'use strict' +import { readFileSync } from 'fs' +import crypto from 'crypto' +import { join } from 'path' +import https from 'https' +import http from 'http' +import Debug from 'debug' +import stoppable, { StoppableServer } from 'stoppable' -const crypto = require('crypto') -const debug = require('debug')('elasticsearch-test') -const stoppable = require('stoppable') +const debug = Debug('elasticsearch-test') // allow self signed certificates for testing purposes -process.env.NODE_TLS_REJECT_UNAUTHORIZED = 0 - -const { readFileSync } = require('fs') -const { join } = require('path') -const https = require('https') -const http = require('http') +process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0' const secureOpts = { key: readFileSync(join(__dirname, '..', 'fixtures', 'https.key'), 'utf8'), @@ -43,46 +42,48 @@ const caFingerprint = getFingerprint(secureOpts.cert .join('') ) +export type ServerHandler = (req: http.IncomingMessage, res: http.ServerResponse) => void +interface Options { secure?: boolean } +type Server = [{ key: string, cert: string, port: number, caFingerprint: string }, StoppableServer] + let id = 0 -function buildServer (handler, opts, cb) { +export default function buildServer (handler: ServerHandler, opts: Options = {}): Promise { const serverId = id++ debug(`Booting server '${serverId}'`) - if (cb == null) { - cb = opts - opts = {} - } const server = opts.secure ? stoppable(https.createServer(secureOpts)) : stoppable(http.createServer()) - server.on('request', handler) + server.on('request', (req, res) => { + res.setHeader('x-elastic-product', 'Elasticsearch') + handler(req, res) + }) + server.on('error', err => { console.log('http server error', err) process.exit(1) }) - if (cb === undefined) { - return new Promise((resolve, reject) => { - server.listen(0, () => { - const port = server.address().port - debug(`Server '${serverId}' booted on port ${port}`) - resolve([Object.assign({}, secureOpts, { port, caFingerprint }), server]) - }) - }) - } else { + + return new Promise((resolve, reject) => { server.listen(0, () => { + // @ts-expect-error const port = server.address().port debug(`Server '${serverId}' booted on port ${port}`) - cb(Object.assign({}, secureOpts, { port }), server) + resolve([Object.assign({}, secureOpts, { port, caFingerprint }), server]) }) - } + }) } -function getFingerprint (content, inputEncoding = 'base64', outputEncoding = 'hex') { +function getFingerprint (content: string, inputEncoding = 'base64', outputEncoding = 'hex'): string { const shasum = crypto.createHash('sha256') + // @ts-expect-error shasum.update(content, inputEncoding) + // @ts-expect-error const res = shasum.digest(outputEncoding) - return res.toUpperCase().match(/.{1,2}/g).join(':') + const arr = res.toUpperCase().match(/.{1,2}/g) + if (arr == null) { + throw new Error('Should produce a match') + } + return arr.join(':') } - -module.exports = buildServer diff --git a/test/utils/index.js b/test/utils/index.js deleted file mode 100644 index 1adff9f3b..000000000 --- a/test/utils/index.js +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { promisify } = require('util') -const sleep = promisify(setTimeout) -const buildServer = require('./buildServer') -const buildCluster = require('./buildCluster') -const buildProxy = require('./buildProxy') -const connection = require('./MockConnection') -const { Client } = require('../../') - -async function waitCluster (client, waitForStatus = 'green', timeout = '50s', times = 0) { - if (!client) { - throw new Error('waitCluster helper: missing client instance') - } - try { - await client.cluster.health({ waitForStatus, timeout }) - } catch (err) { - if (++times < 10) { - await sleep(5000) - return waitCluster(client, waitForStatus, timeout, times) - } - throw err - } -} - -function skipProductCheck (client) { - const tSymbol = Object.getOwnPropertySymbols(client.transport || client) - .filter(symbol => symbol.description === 'product check')[0] - ;(client.transport || client)[tSymbol] = 2 -} - -class NoProductCheckClient extends Client { - constructor (opts) { - super(opts) - skipProductCheck(this) - } -} - -module.exports = { - buildServer, - buildCluster, - buildProxy, - connection, - waitCluster, - skipProductCheck, - Client: NoProductCheckClient -} diff --git a/lib/pool/index.js b/test/utils/index.ts similarity index 75% rename from lib/pool/index.js rename to test/utils/index.ts index 59fc74b3e..62d5cc578 100644 --- a/lib/pool/index.js +++ b/test/utils/index.ts @@ -17,14 +17,14 @@ * under the License. */ -'use strict' +import buildServer from './buildServer' +import * as connection from './MockConnection' +import buildCluster from './buildCluster' +import * as buildProxy from './buildProxy' -const BaseConnectionPool = require('./BaseConnectionPool') -const ConnectionPool = require('./ConnectionPool') -const CloudConnectionPool = require('./CloudConnectionPool') - -module.exports = { - BaseConnectionPool, - ConnectionPool, - CloudConnectionPool +export { + buildServer, + connection, + buildCluster, + buildProxy } diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 000000000..26f91feff --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,37 @@ +{ + "compilerOptions": { + "target": "es2019", + "module": "commonjs", + "moduleResolution": "node", + "declaration": true, + "pretty": true, + "noEmitOnError": true, + "strict": true, + "resolveJsonModule": true, + "removeComments": false, + "sourceMap": true, + "newLine": "lf", + "noUnusedLocals": true, + "noFallthroughCasesInSwitch": true, + "useDefineForClassFields": true, + "forceConsistentCasingInFileNames": true, + "skipLibCheck": true, + "esModuleInterop": true, + "isolatedModules": true, + "importHelpers": true, + "outDir": "lib", + "lib": [ + "esnext" + ] + }, + "formatCodeOptions": { + "identSize": 2, + "tabSize": 2 + }, + "exclude": [ + "node_modules" + ], + "include": [ + "./src/**/*.ts" + ] +} From a26692c7e40af6f6cebce396f6f22ba182a6f201 Mon Sep 17 00:00:00 2001 From: James Rodewig <40268737+jrodewig@users.noreply.github.com> Date: Wed, 13 Oct 2021 08:53:30 -0400 Subject: [PATCH 076/647] [DOCS] Retitle Elasticsearch JavaScript Client doc book (#1564) --- docs/index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 11ac4f3ec..959296357 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -1,4 +1,4 @@ -= Elasticsearch Node.js client += Elasticsearch JavaScript Client :branch: master include::{asciidoc-dir}/../../shared/attributes.asciidoc[] From dfddfecfb21e1b808149892d4143efa18b5a7f61 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 14 Oct 2021 15:15:51 +0200 Subject: [PATCH 077/647] Fixes in custom Kibana type def (#1559) --- index.d.ts | 4 +- package.json | 2 +- src/Helpers.ts | 1 + src/api/types.ts | 260 ++++++++++++++++++++++-------------- src/api/typesWithBodyKey.ts | 260 ++++++++++++++++++++++-------------- 5 files changed, 330 insertions(+), 197 deletions(-) diff --git a/index.d.ts b/index.d.ts index a96aa2b92..9faaea9b1 100644 --- a/index.d.ts +++ b/index.d.ts @@ -17,7 +17,7 @@ * under the License. */ -import { errors } from '@elastic/transport' import Client from './lib/Client' -export { Client, errors } +export * from '@elastic/transport' +export { Client } diff --git a/package.json b/package.json index 94b29c66e..13b617da9 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.6", + "@elastic/transport": "^0.0.7", "tslib": "^2.3.0" }, "tap": { diff --git a/src/Helpers.ts b/src/Helpers.ts index cc36b9841..ecb4f75b7 100644 --- a/src/Helpers.ts +++ b/src/Helpers.ts @@ -841,6 +841,7 @@ export default class Helpers { } else { onDrop({ status: responseItem.status, + // @ts-expect-error error: responseItem.error ?? null, operation: serializer.deserialize(bulkBody[indexSlice]), document: operation !== 'delete' diff --git a/src/api/types.ts b/src/api/types.ts index 12ea36bd0..a9eeea661 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -88,7 +88,7 @@ export interface BulkResponseItemBase { _id?: string | null _index: string status: integer - error?: ErrorCause + error?: ErrorCause | string _primary_term?: long result?: string _seq_no?: SequenceNumber @@ -473,7 +473,7 @@ export interface InfoResponse { } export interface MgetHit { - error?: MainError + error?: ErrorCause | string fields?: Record found?: boolean _id: Id @@ -630,7 +630,7 @@ export interface MtermvectorsTermVectorsResult { took?: long found?: boolean term_vectors?: Record - error?: ErrorCause + error?: ErrorCause | string } export interface OpenPointInTimeRequest extends RequestBase { @@ -870,12 +870,6 @@ export interface ScriptsPainlessExecutePainlessContextSetup { query: QueryDslQueryContainer } -export interface ScriptsPainlessExecutePainlessExecutionPosition { - offset: integer - start: integer - end: integer -} - export interface ScriptsPainlessExecuteRequest extends RequestBase { context?: string context_setup?: ScriptsPainlessExecutePainlessContextSetup @@ -998,11 +992,46 @@ export interface SearchAggregationProfile { time_in_nanos: long type: string debug?: SearchAggregationProfileDebug - children?: SearchAggregationProfileDebug[] + children?: SearchAggregationProfile[] } export interface SearchAggregationProfileDebug { - [key: string]: never + segments_with_multi_valued_ords?: integer + collection_strategy?: string + segments_with_single_valued_ords?: integer + total_buckets?: integer + built_buckets?: integer + result_strategy?: string + has_filter?: boolean + delegate?: string + delegate_debug?: SearchAggregationProfileDelegateDebug + chars_fetched?: integer + extract_count?: integer + extract_ns?: integer + values_fetched?: integer + collect_analyzed_ns?: integer + collect_analyzed_count?: integer + surviving_buckets?: integer + ordinals_collectors_used?: integer + ordinals_collectors_overhead_too_high?: integer + string_hashing_collectors_used?: integer + numeric_collectors_used?: integer + empty_collectors_used?: integer + deferred_aggregators?: string[] +} + +export interface SearchAggregationProfileDelegateDebug { + segments_with_doc_count_field?: integer + segments_with_deleted_docs?: integer + filters?: SearchAggregationProfileDelegateDebugFilter[] + segments_counted?: integer + segments_collected?: integer +} + +export interface SearchAggregationProfileDelegateDebugFilter { + results_from_metadata?: integer + query?: string + specialized_for?: string } export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' @@ -1056,6 +1085,29 @@ export interface SearchDocValueField { format?: string } +export interface SearchFetchProfile { + type: string + description: string + time_in_nanos: long + breakdown: SearchFetchProfileBreakdown + debug?: SearchFetchProfileDebug + children?: SearchFetchProfile[] +} + +export interface SearchFetchProfileBreakdown { + load_stored_fields?: integer + load_stored_fields_count?: integer + next_reader?: integer + next_reader_count?: integer + process_count?: integer + process?: integer +} + +export interface SearchFetchProfileDebug { + stored_fields?: string[] + fast_path?: integer +} + export interface SearchFieldAndFormat { field: Field format?: string @@ -1332,6 +1384,7 @@ export interface SearchShardProfile { aggregations: SearchAggregationProfile[] id: string searches: SearchSearchProfile[] + fetch?: SearchFetchProfile } export interface SearchSmoothingModelContainer { @@ -1464,7 +1517,7 @@ export type SearchMvtResponse = MapboxVectorTiles export type SearchMvtCoordinate = integer -export type SearchMvtGridType = 'grid' | 'point' +export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer @@ -1713,7 +1766,7 @@ export interface AcknowledgedResponseBase { export type AggregateName = string export interface BulkIndexByScrollFailure { - cause: MainError + cause: ErrorCause | string id: Id index: IndexName status: integer @@ -1807,42 +1860,20 @@ export interface EmptyObject { export type EpochMillis = string | long -export interface ErrorCause { - type: string +export interface ErrorCauseKeys { + type?: string reason: string - caused_by?: ErrorCause - shard?: integer | string stack_trace?: string - root_cause?: ErrorCause[] - bytes_limit?: long - bytes_wanted?: long - column?: integer - col?: integer - failed_shards?: ShardFailure[] - grouped?: boolean - index?: IndexName - index_uuid?: Uuid - language?: string - licensed_expired_feature?: string - line?: integer - max_buckets?: integer - phase?: string - property_name?: string - processor_type?: string - resource_id?: Ids - 'resource.id'?: Ids - resource_type?: string - 'resource.type'?: string - script?: string - script_stack?: string[] - header?: HttpHeaders - lang?: string - position?: ScriptsPainlessExecutePainlessExecutionPosition + caused_by?: ErrorCause | string + root_cause: (ErrorCause | string)[] + suppressed?: (ErrorCause | string)[] } +export type ErrorCause = ErrorCauseKeys | +{ [property: string]: any } export interface ErrorResponseBase { - error: MainError | string - status?: integer + error: ErrorCause | string + status: integer } export type ExpandWildcardOptions = 'all' | 'open' | 'closed' | 'hidden' | 'none' @@ -1973,10 +2004,6 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' -export interface MainError extends ErrorCause { - headers?: Record -} - export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -2043,7 +2070,7 @@ export interface NodeShard { } export interface NodeStatistics { - failures?: ErrorCause[] + failures?: (ErrorCause | string)[] total: integer successful: integer failed: integer @@ -2206,7 +2233,7 @@ export type ShapeRelation = 'intersects' | 'disjoint' | 'within' export interface ShardFailure { index?: IndexName node?: string - reason: ErrorCause + reason: ErrorCause | string shard: integer status?: string } @@ -2500,10 +2527,6 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati sort?: SearchSort } -export interface AggregationsBucketsPath { - [key: string]: never -} - export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { precision_threshold?: integer rehash?: boolean @@ -2741,7 +2764,7 @@ export interface AggregationsGlobalAggregation extends AggregationsBucketAggrega } export interface AggregationsGoogleNormalizedDistanceHeuristic { - background_is_superset: boolean + background_is_superset?: boolean } export interface AggregationsHdrMethod { @@ -2889,6 +2912,8 @@ export interface AggregationsMissingAggregation extends AggregationsBucketAggreg missing?: AggregationsMissing } +export type AggregationsMissingOrder = 'first' | 'last' | 'default' + export interface AggregationsMovingAverageAggregation extends AggregationsPipelineAggregationBase { minimize?: boolean model?: AggregationsMovingAverageModel @@ -2926,8 +2951,8 @@ export interface AggregationsMultiTermsAggregation extends AggregationsBucketAgg } export interface AggregationsMutualInformationHeuristic { - background_is_superset: boolean - include_negatives: boolean + background_is_superset?: boolean + include_negatives?: boolean } export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { @@ -2976,13 +3001,14 @@ export interface AggregationsPercentilesBucketAggregation extends AggregationsPi } export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { - buckets_path?: AggregationsBucketsPath + buckets_path?: string | string[] | Record format?: string gap_policy?: AggregationsGapPolicy } export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { field?: Field + missing?: integer ranges?: AggregationsAggregationRange[] script?: Script keyed?: boolean @@ -3173,6 +3199,7 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat include?: string | string[] | AggregationsTermsInclude min_doc_count?: integer missing?: AggregationsMissing + missing_order?: AggregationsMissingOrder missing_bucket?: boolean value_type?: string order?: AggregationsTermsAggregationOrder @@ -3437,7 +3464,7 @@ export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { export interface AnalysisKuromojiAnalyzer { type: 'kuromoji' mode: AnalysisKuromojiTokenizationMode - user_dictionary: string + user_dictionary?: string } export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { @@ -3494,6 +3521,10 @@ export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterB max_token_count: integer } +export interface AnalysisLowercaseNormalizer { + type: 'lowercase' +} + export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase' language: string @@ -3506,7 +3537,7 @@ export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { type: 'mapping' mappings: string[] - mappings_path: string + mappings_path?: string } export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { @@ -3552,6 +3583,8 @@ export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { user_dictionary_rules: string[] } +export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer + export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' buffer_size: integer @@ -3676,13 +3709,13 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { type: 'synonym' - expand: boolean - format: AnalysisSynonymFormat - lenient: boolean + expand?: boolean + format?: AnalysisSynonymFormat + lenient?: boolean synonyms: string[] - synonyms_path: string - tokenizer: string - updateable: boolean + synonyms_path?: string + tokenizer?: string + updateable?: boolean } export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' @@ -3880,7 +3913,8 @@ export interface MappingFieldAliasProperty extends MappingPropertyBase { } export interface MappingFieldMapping { - [key: string]: never + full_name: string + mapping: Record } export interface MappingFieldNamesField { @@ -3921,7 +3955,7 @@ export interface MappingGenericProperty extends MappingDocValuesPropertyBase { type: string } -export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'COUNTERCLOCKWISE' | 'ccw' | 'CCW' | 'left' | 'LEFT' | 'clockwise' | 'CLOCKWISE' | 'cw' | 'CW' +export type MappingGeoOrientation = 'right' | 'left' export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean @@ -3960,6 +3994,7 @@ export interface MappingIpProperty extends MappingDocValuesPropertyBase { boost?: double index?: boolean null_value?: string + ignore_malformed?: boolean type: 'ip' } @@ -4085,13 +4120,11 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase type: 'search_as_you_type' } -export type MappingShapeOrientation = 'right' | 'counterclockwise' | 'ccw' | 'left' | 'clockwise' | 'cw' - export interface MappingShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean ignore_malformed?: boolean ignore_z_value?: boolean - orientation?: MappingShapeOrientation + orientation?: MappingGeoOrientation type: 'shape' } @@ -4163,6 +4196,7 @@ export interface MappingTypeMapping { _size?: MappingSizeField _source?: MappingSourceField runtime?: Record + enabled?: boolean } export interface MappingVersionProperty extends MappingDocValuesPropertyBase { @@ -4241,6 +4275,8 @@ export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { gte?: DateMath lt?: DateMath lte?: DateMath + from?: DateMath + to?: DateMath format?: DateFormat time_zone?: TimeZone } @@ -4321,7 +4357,7 @@ export interface QueryDslFuzzyQuery extends QueryDslQueryBase { rewrite?: MultiTermQueryRewrite transpositions?: boolean fuzziness?: Fuzziness - value: string + value: string | double | boolean } export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { @@ -4595,6 +4631,8 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { gte?: double lt?: double lte?: double + from?: double + to?: double } export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { @@ -4622,9 +4660,15 @@ export interface QueryDslPercolateQuery extends QueryDslQueryBase { version?: VersionNumber } +export interface QueryDslPinnedDoc { + _id: Id + _index: IndexName +} + export interface QueryDslPinnedQuery extends QueryDslQueryBase { - ids: Id[] organic: QueryDslQueryContainer + ids?: Id[] + docs?: QueryDslPinnedDoc[] } export interface QueryDslPrefixQuery extends QueryDslQueryBase { @@ -4648,7 +4692,7 @@ export interface QueryDslQueryContainer { distance_feature?: QueryDslDistanceFeatureQuery exists?: QueryDslExistsQuery function_score?: QueryDslFunctionScoreQuery - fuzzy?: Record + fuzzy?: Record geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery geo_polygon?: QueryDslGeoPolygonQuery @@ -6853,7 +6897,7 @@ export interface CcrFollowIndexStats { } export interface CcrReadException { - exception: ErrorCause + exception: ErrorCause | string from_seq_no: SequenceNumber retries: integer } @@ -6862,7 +6906,7 @@ export interface CcrShardStats { bytes_read: long failed_read_requests: long failed_write_requests: long - fatal_exception?: ErrorCause + fatal_exception?: ErrorCause | string follower_aliases_version: VersionNumber follower_global_checkpoint: long follower_index: string @@ -7058,7 +7102,7 @@ export interface CcrStatsAutoFollowStats { number_of_failed_follow_indices: long number_of_failed_remote_cluster_state_requests: long number_of_successful_follow_indices: long - recent_auto_follow_errors: ErrorCause[] + recent_auto_follow_errors: (ErrorCause | string)[] } export interface CcrStatsAutoFollowedCluster { @@ -8014,8 +8058,9 @@ export interface IlmAction { } export interface IlmPhase { - actions: Record | string[] + actions?: Record | string[] min_age?: Time + configurations?: Record> } export interface IlmPhases { @@ -8170,6 +8215,10 @@ export interface IndicesAliasDefinition { search_routing?: string } +export interface IndicesDataStream { + hidden?: boolean +} + export type IndicesDataStreamHealthStatus = 'green' | 'yellow' | 'red' export interface IndicesFielddataFrequencyFilter { @@ -8215,15 +8264,10 @@ export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replica export interface IndicesIndexSettingBlocks { read_only?: boolean - 'index.blocks.read_only'?: boolean read_only_allow_delete?: boolean - 'index.blocks.read_only_allow_delete'?: boolean read?: boolean - 'index.blocks.read'?: boolean write?: boolean | string - 'index.blocks.write'?: boolean | string metadata?: boolean - 'index.blocks.metadata'?: boolean } export interface IndicesIndexSettings { @@ -8267,6 +8311,16 @@ export interface IndicesIndexSettings { 'index.max_shingle_diff'?: integer blocks?: IndicesIndexSettingBlocks 'index.blocks'?: IndicesIndexSettingBlocks + 'blocks.read_only'?: boolean + 'index.blocks.read_only'?: boolean + 'blocks.read_only_allow_delete'?: boolean + 'index.blocks.read_only_allow_delete'?: boolean + 'blocks.read'?: boolean + 'index.blocks.read'?: boolean + 'blocks.write'?: boolean | string + 'index.blocks.write'?: boolean | string + 'blocks.metadata'?: boolean + 'index.blocks.metadata'?: boolean max_refresh_listeners?: integer 'index.max_refresh_listeners'?: integer 'analyze.max_token_count'?: integer @@ -8287,6 +8341,8 @@ export interface IndicesIndexSettings { 'index.final_pipeline'?: PipelineName lifecycle?: IndicesIndexSettingsLifecycle 'index.lifecycle'?: IndicesIndexSettingsLifecycle + 'lifecycle.name'?: string + 'index.lifecycle.name'?: string provided_name?: Name 'index.provided_name'?: Name creation_date?: DateString @@ -8309,13 +8365,14 @@ export interface IndicesIndexSettings { 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis + settings?: IndicesIndexSettings } export interface IndicesIndexSettingsAnalysis { analyzer?: Record char_filter?: Record filter?: Record - normalizer?: Record + normalizer?: Record } export interface IndicesIndexSettingsLifecycle { @@ -8513,9 +8570,10 @@ export interface IndicesCreateRequest extends RequestBase { settings?: Record } -export interface IndicesCreateResponse extends AcknowledgedResponseBase { +export interface IndicesCreateResponse { index: IndexName shards_acknowledged: boolean + acknowledged?: boolean } export interface IndicesCreateDataStreamRequest extends RequestBase { @@ -8779,7 +8837,7 @@ export interface IndicesGetFieldMappingTypeFieldMappings { export interface IndicesGetIndexTemplateIndexTemplate { index_patterns: Name[] composed_of: Name[] - template: IndicesGetIndexTemplateIndexTemplateSummary + template?: IndicesGetIndexTemplateIndexTemplateSummary version?: VersionNumber priority?: long _meta?: Metadata @@ -8907,7 +8965,7 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: EmptyObject + data_stream?: IndicesDataStream priority?: integer version?: VersionNumber _meta?: Metadata @@ -11957,7 +12015,7 @@ export interface MonitoringBulkRequest extends RequestBase { } export interface MonitoringBulkResponse { - error?: ErrorCause + error?: ErrorCause | string errors: boolean ignored: boolean took: long @@ -13385,7 +13443,7 @@ export interface SecurityInvalidateApiKeyRequest extends RequestBase { export interface SecurityInvalidateApiKeyResponse { error_count: integer - error_details?: ErrorCause[] + error_details?: (ErrorCause | string)[] invalidated_api_keys: string[] previously_invalidated_api_keys: string[] } @@ -13399,7 +13457,7 @@ export interface SecurityInvalidateTokenRequest extends RequestBase { export interface SecurityInvalidateTokenResponse { error_count: long - error_details?: ErrorCause[] + error_details?: (ErrorCause | string)[] invalidated_tokens: long previously_invalidated_tokens: long } @@ -13869,7 +13927,7 @@ export interface SnapshotGetResponse { export interface SnapshotGetSnapshotResponseItem { repository: Name snapshots?: SnapshotSnapshotInfo[] - error?: ErrorCause + error?: ErrorCause | string } export interface SnapshotGetRepositoryRequest extends RequestBase { @@ -14057,7 +14115,7 @@ export interface TasksCancelRequest extends RequestBase { } export interface TasksCancelResponse { - node_failures?: ErrorCause[] + node_failures?: (ErrorCause | string)[] nodes: Record } @@ -14071,7 +14129,7 @@ export interface TasksGetResponse { completed: boolean task: TasksInfo response?: TasksStatus - error?: ErrorCause + error?: ErrorCause | string } export interface TasksListRequest extends RequestBase { @@ -14085,7 +14143,7 @@ export interface TasksListRequest extends RequestBase { } export interface TasksListResponse { - node_failures?: ErrorCause[] + node_failures?: (ErrorCause | string)[] nodes?: Record tasks?: Record | TasksInfo[] } @@ -15000,7 +15058,7 @@ export interface XpackInfoFeatures { spatial: XpackInfoFeature sql: XpackInfoFeature transform: XpackInfoFeature - vectors: XpackInfoFeature + vectors?: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature } @@ -15029,6 +15087,14 @@ export interface XpackInfoResponse { tagline: string } +export interface XpackUsageAllJobs { + count: integer + detectors: Record + created_by: Record + model_size: Record + forecasts: Record +} + export interface XpackUsageAnalytics extends XpackUsageBase { stats: XpackUsageAnalyticsStatistics } @@ -15177,7 +15243,7 @@ export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: Record + jobs: Record | Record node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -15291,7 +15357,7 @@ export interface XpackUsageResponse { slm: XpackUsageSlm sql: XpackUsageSql transform: XpackUsageBase - vectors: XpackUsageVector + vectors?: XpackUsageVector voting_only: XpackUsageBase } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 9dbb12f26..2671958fd 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -89,7 +89,7 @@ export interface BulkResponseItemBase { _id?: string | null _index: string status: integer - error?: ErrorCause + error?: ErrorCause | string _primary_term?: long result?: string _seq_no?: SequenceNumber @@ -496,7 +496,7 @@ export interface InfoResponse { } export interface MgetHit { - error?: MainError + error?: ErrorCause | string fields?: Record found?: boolean _id: Id @@ -662,7 +662,7 @@ export interface MtermvectorsTermVectorsResult { took?: long found?: boolean term_vectors?: Record - error?: ErrorCause + error?: ErrorCause | string } export interface OpenPointInTimeRequest extends RequestBase { @@ -914,12 +914,6 @@ export interface ScriptsPainlessExecutePainlessContextSetup { query: QueryDslQueryContainer } -export interface ScriptsPainlessExecutePainlessExecutionPosition { - offset: integer - start: integer - end: integer -} - export interface ScriptsPainlessExecuteRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -1067,11 +1061,46 @@ export interface SearchAggregationProfile { time_in_nanos: long type: string debug?: SearchAggregationProfileDebug - children?: SearchAggregationProfileDebug[] + children?: SearchAggregationProfile[] } export interface SearchAggregationProfileDebug { - [key: string]: never + segments_with_multi_valued_ords?: integer + collection_strategy?: string + segments_with_single_valued_ords?: integer + total_buckets?: integer + built_buckets?: integer + result_strategy?: string + has_filter?: boolean + delegate?: string + delegate_debug?: SearchAggregationProfileDelegateDebug + chars_fetched?: integer + extract_count?: integer + extract_ns?: integer + values_fetched?: integer + collect_analyzed_ns?: integer + collect_analyzed_count?: integer + surviving_buckets?: integer + ordinals_collectors_used?: integer + ordinals_collectors_overhead_too_high?: integer + string_hashing_collectors_used?: integer + numeric_collectors_used?: integer + empty_collectors_used?: integer + deferred_aggregators?: string[] +} + +export interface SearchAggregationProfileDelegateDebug { + segments_with_doc_count_field?: integer + segments_with_deleted_docs?: integer + filters?: SearchAggregationProfileDelegateDebugFilter[] + segments_counted?: integer + segments_collected?: integer +} + +export interface SearchAggregationProfileDelegateDebugFilter { + results_from_metadata?: integer + query?: string + specialized_for?: string } export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' @@ -1125,6 +1154,29 @@ export interface SearchDocValueField { format?: string } +export interface SearchFetchProfile { + type: string + description: string + time_in_nanos: long + breakdown: SearchFetchProfileBreakdown + debug?: SearchFetchProfileDebug + children?: SearchFetchProfile[] +} + +export interface SearchFetchProfileBreakdown { + load_stored_fields?: integer + load_stored_fields_count?: integer + next_reader?: integer + next_reader_count?: integer + process_count?: integer + process?: integer +} + +export interface SearchFetchProfileDebug { + stored_fields?: string[] + fast_path?: integer +} + export interface SearchFieldAndFormat { field: Field format?: string @@ -1401,6 +1453,7 @@ export interface SearchShardProfile { aggregations: SearchAggregationProfile[] id: string searches: SearchSearchProfile[] + fetch?: SearchFetchProfile } export interface SearchSmoothingModelContainer { @@ -1541,7 +1594,7 @@ export type SearchMvtResponse = MapboxVectorTiles export type SearchMvtCoordinate = integer -export type SearchMvtGridType = 'grid' | 'point' +export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer @@ -1809,7 +1862,7 @@ export interface AcknowledgedResponseBase { export type AggregateName = string export interface BulkIndexByScrollFailure { - cause: MainError + cause: ErrorCause | string id: Id index: IndexName status: integer @@ -1903,42 +1956,20 @@ export interface EmptyObject { export type EpochMillis = string | long -export interface ErrorCause { - type: string +export interface ErrorCauseKeys { + type?: string reason: string - caused_by?: ErrorCause - shard?: integer | string stack_trace?: string - root_cause?: ErrorCause[] - bytes_limit?: long - bytes_wanted?: long - column?: integer - col?: integer - failed_shards?: ShardFailure[] - grouped?: boolean - index?: IndexName - index_uuid?: Uuid - language?: string - licensed_expired_feature?: string - line?: integer - max_buckets?: integer - phase?: string - property_name?: string - processor_type?: string - resource_id?: Ids - 'resource.id'?: Ids - resource_type?: string - 'resource.type'?: string - script?: string - script_stack?: string[] - header?: HttpHeaders - lang?: string - position?: ScriptsPainlessExecutePainlessExecutionPosition + caused_by?: ErrorCause | string + root_cause: (ErrorCause | string)[] + suppressed?: (ErrorCause | string)[] } +export type ErrorCause = ErrorCauseKeys | +{ [property: string]: any } export interface ErrorResponseBase { - error: MainError | string - status?: integer + error: ErrorCause | string + status: integer } export type ExpandWildcardOptions = 'all' | 'open' | 'closed' | 'hidden' | 'none' @@ -2069,10 +2100,6 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' -export interface MainError extends ErrorCause { - headers?: Record -} - export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -2139,7 +2166,7 @@ export interface NodeShard { } export interface NodeStatistics { - failures?: ErrorCause[] + failures?: (ErrorCause | string)[] total: integer successful: integer failed: integer @@ -2302,7 +2329,7 @@ export type ShapeRelation = 'intersects' | 'disjoint' | 'within' export interface ShardFailure { index?: IndexName node?: string - reason: ErrorCause + reason: ErrorCause | string shard: integer status?: string } @@ -2596,10 +2623,6 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati sort?: SearchSort } -export interface AggregationsBucketsPath { - [key: string]: never -} - export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { precision_threshold?: integer rehash?: boolean @@ -2837,7 +2860,7 @@ export interface AggregationsGlobalAggregation extends AggregationsBucketAggrega } export interface AggregationsGoogleNormalizedDistanceHeuristic { - background_is_superset: boolean + background_is_superset?: boolean } export interface AggregationsHdrMethod { @@ -2985,6 +3008,8 @@ export interface AggregationsMissingAggregation extends AggregationsBucketAggreg missing?: AggregationsMissing } +export type AggregationsMissingOrder = 'first' | 'last' | 'default' + export interface AggregationsMovingAverageAggregation extends AggregationsPipelineAggregationBase { minimize?: boolean model?: AggregationsMovingAverageModel @@ -3022,8 +3047,8 @@ export interface AggregationsMultiTermsAggregation extends AggregationsBucketAgg } export interface AggregationsMutualInformationHeuristic { - background_is_superset: boolean - include_negatives: boolean + background_is_superset?: boolean + include_negatives?: boolean } export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { @@ -3072,13 +3097,14 @@ export interface AggregationsPercentilesBucketAggregation extends AggregationsPi } export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { - buckets_path?: AggregationsBucketsPath + buckets_path?: string | string[] | Record format?: string gap_policy?: AggregationsGapPolicy } export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { field?: Field + missing?: integer ranges?: AggregationsAggregationRange[] script?: Script keyed?: boolean @@ -3269,6 +3295,7 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat include?: string | string[] | AggregationsTermsInclude min_doc_count?: integer missing?: AggregationsMissing + missing_order?: AggregationsMissingOrder missing_bucket?: boolean value_type?: string order?: AggregationsTermsAggregationOrder @@ -3533,7 +3560,7 @@ export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { export interface AnalysisKuromojiAnalyzer { type: 'kuromoji' mode: AnalysisKuromojiTokenizationMode - user_dictionary: string + user_dictionary?: string } export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { @@ -3590,6 +3617,10 @@ export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterB max_token_count: integer } +export interface AnalysisLowercaseNormalizer { + type: 'lowercase' +} + export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase' language: string @@ -3602,7 +3633,7 @@ export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { type: 'mapping' mappings: string[] - mappings_path: string + mappings_path?: string } export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { @@ -3648,6 +3679,8 @@ export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { user_dictionary_rules: string[] } +export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer + export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' buffer_size: integer @@ -3772,13 +3805,13 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { type: 'synonym' - expand: boolean - format: AnalysisSynonymFormat - lenient: boolean + expand?: boolean + format?: AnalysisSynonymFormat + lenient?: boolean synonyms: string[] - synonyms_path: string - tokenizer: string - updateable: boolean + synonyms_path?: string + tokenizer?: string + updateable?: boolean } export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' @@ -3976,7 +4009,8 @@ export interface MappingFieldAliasProperty extends MappingPropertyBase { } export interface MappingFieldMapping { - [key: string]: never + full_name: string + mapping: Record } export interface MappingFieldNamesField { @@ -4017,7 +4051,7 @@ export interface MappingGenericProperty extends MappingDocValuesPropertyBase { type: string } -export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'COUNTERCLOCKWISE' | 'ccw' | 'CCW' | 'left' | 'LEFT' | 'clockwise' | 'CLOCKWISE' | 'cw' | 'CW' +export type MappingGeoOrientation = 'right' | 'left' export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean @@ -4056,6 +4090,7 @@ export interface MappingIpProperty extends MappingDocValuesPropertyBase { boost?: double index?: boolean null_value?: string + ignore_malformed?: boolean type: 'ip' } @@ -4181,13 +4216,11 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase type: 'search_as_you_type' } -export type MappingShapeOrientation = 'right' | 'counterclockwise' | 'ccw' | 'left' | 'clockwise' | 'cw' - export interface MappingShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean ignore_malformed?: boolean ignore_z_value?: boolean - orientation?: MappingShapeOrientation + orientation?: MappingGeoOrientation type: 'shape' } @@ -4259,6 +4292,7 @@ export interface MappingTypeMapping { _size?: MappingSizeField _source?: MappingSourceField runtime?: Record + enabled?: boolean } export interface MappingVersionProperty extends MappingDocValuesPropertyBase { @@ -4337,6 +4371,8 @@ export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { gte?: DateMath lt?: DateMath lte?: DateMath + from?: DateMath + to?: DateMath format?: DateFormat time_zone?: TimeZone } @@ -4417,7 +4453,7 @@ export interface QueryDslFuzzyQuery extends QueryDslQueryBase { rewrite?: MultiTermQueryRewrite transpositions?: boolean fuzziness?: Fuzziness - value: string + value: string | double | boolean } export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { @@ -4691,6 +4727,8 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { gte?: double lt?: double lte?: double + from?: double + to?: double } export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { @@ -4718,9 +4756,15 @@ export interface QueryDslPercolateQuery extends QueryDslQueryBase { version?: VersionNumber } +export interface QueryDslPinnedDoc { + _id: Id + _index: IndexName +} + export interface QueryDslPinnedQuery extends QueryDslQueryBase { - ids: Id[] organic: QueryDslQueryContainer + ids?: Id[] + docs?: QueryDslPinnedDoc[] } export interface QueryDslPrefixQuery extends QueryDslQueryBase { @@ -4744,7 +4788,7 @@ export interface QueryDslQueryContainer { distance_feature?: QueryDslDistanceFeatureQuery exists?: QueryDslExistsQuery function_score?: QueryDslFunctionScoreQuery - fuzzy?: Record + fuzzy?: Record geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery geo_polygon?: QueryDslGeoPolygonQuery @@ -6953,7 +6997,7 @@ export interface CcrFollowIndexStats { } export interface CcrReadException { - exception: ErrorCause + exception: ErrorCause | string from_seq_no: SequenceNumber retries: integer } @@ -6962,7 +7006,7 @@ export interface CcrShardStats { bytes_read: long failed_read_requests: long failed_write_requests: long - fatal_exception?: ErrorCause + fatal_exception?: ErrorCause | string follower_aliases_version: VersionNumber follower_global_checkpoint: long follower_index: string @@ -7170,7 +7214,7 @@ export interface CcrStatsAutoFollowStats { number_of_failed_follow_indices: long number_of_failed_remote_cluster_state_requests: long number_of_successful_follow_indices: long - recent_auto_follow_errors: ErrorCause[] + recent_auto_follow_errors: (ErrorCause | string)[] } export interface CcrStatsAutoFollowedCluster { @@ -8150,8 +8194,9 @@ export interface IlmAction { } export interface IlmPhase { - actions: Record | string[] + actions?: Record | string[] min_age?: Time + configurations?: Record> } export interface IlmPhases { @@ -8313,6 +8358,10 @@ export interface IndicesAliasDefinition { search_routing?: string } +export interface IndicesDataStream { + hidden?: boolean +} + export type IndicesDataStreamHealthStatus = 'green' | 'yellow' | 'red' export interface IndicesFielddataFrequencyFilter { @@ -8358,15 +8407,10 @@ export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replica export interface IndicesIndexSettingBlocks { read_only?: boolean - 'index.blocks.read_only'?: boolean read_only_allow_delete?: boolean - 'index.blocks.read_only_allow_delete'?: boolean read?: boolean - 'index.blocks.read'?: boolean write?: boolean | string - 'index.blocks.write'?: boolean | string metadata?: boolean - 'index.blocks.metadata'?: boolean } export interface IndicesIndexSettings { @@ -8410,6 +8454,16 @@ export interface IndicesIndexSettings { 'index.max_shingle_diff'?: integer blocks?: IndicesIndexSettingBlocks 'index.blocks'?: IndicesIndexSettingBlocks + 'blocks.read_only'?: boolean + 'index.blocks.read_only'?: boolean + 'blocks.read_only_allow_delete'?: boolean + 'index.blocks.read_only_allow_delete'?: boolean + 'blocks.read'?: boolean + 'index.blocks.read'?: boolean + 'blocks.write'?: boolean | string + 'index.blocks.write'?: boolean | string + 'blocks.metadata'?: boolean + 'index.blocks.metadata'?: boolean max_refresh_listeners?: integer 'index.max_refresh_listeners'?: integer 'analyze.max_token_count'?: integer @@ -8430,6 +8484,8 @@ export interface IndicesIndexSettings { 'index.final_pipeline'?: PipelineName lifecycle?: IndicesIndexSettingsLifecycle 'index.lifecycle'?: IndicesIndexSettingsLifecycle + 'lifecycle.name'?: string + 'index.lifecycle.name'?: string provided_name?: Name 'index.provided_name'?: Name creation_date?: DateString @@ -8452,13 +8508,14 @@ export interface IndicesIndexSettings { 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis + settings?: IndicesIndexSettings } export interface IndicesIndexSettingsAnalysis { analyzer?: Record char_filter?: Record filter?: Record - normalizer?: Record + normalizer?: Record } export interface IndicesIndexSettingsLifecycle { @@ -8665,9 +8722,10 @@ export interface IndicesCreateRequest extends RequestBase { } } -export interface IndicesCreateResponse extends AcknowledgedResponseBase { +export interface IndicesCreateResponse { index: IndexName shards_acknowledged: boolean + acknowledged?: boolean } export interface IndicesCreateDataStreamRequest extends RequestBase { @@ -8931,7 +8989,7 @@ export interface IndicesGetFieldMappingTypeFieldMappings { export interface IndicesGetIndexTemplateIndexTemplate { index_patterns: Name[] composed_of: Name[] - template: IndicesGetIndexTemplateIndexTemplateSummary + template?: IndicesGetIndexTemplateIndexTemplateSummary version?: VersionNumber priority?: long _meta?: Metadata @@ -9064,7 +9122,7 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: EmptyObject + data_stream?: IndicesDataStream priority?: integer version?: VersionNumber _meta?: Metadata @@ -12262,7 +12320,7 @@ export interface MonitoringBulkRequest extends RequestBase { } export interface MonitoringBulkResponse { - error?: ErrorCause + error?: ErrorCause | string errors: boolean ignored: boolean took: long @@ -13721,7 +13779,7 @@ export interface SecurityInvalidateApiKeyRequest extends RequestBase { export interface SecurityInvalidateApiKeyResponse { error_count: integer - error_details?: ErrorCause[] + error_details?: (ErrorCause | string)[] invalidated_api_keys: string[] previously_invalidated_api_keys: string[] } @@ -13738,7 +13796,7 @@ export interface SecurityInvalidateTokenRequest extends RequestBase { export interface SecurityInvalidateTokenResponse { error_count: long - error_details?: ErrorCause[] + error_details?: (ErrorCause | string)[] invalidated_tokens: long previously_invalidated_tokens: long } @@ -14232,7 +14290,7 @@ export interface SnapshotGetResponse { export interface SnapshotGetSnapshotResponseItem { repository: Name snapshots?: SnapshotSnapshotInfo[] - error?: ErrorCause + error?: ErrorCause | string } export interface SnapshotGetRepositoryRequest extends RequestBase { @@ -14432,7 +14490,7 @@ export interface TasksCancelRequest extends RequestBase { } export interface TasksCancelResponse { - node_failures?: ErrorCause[] + node_failures?: (ErrorCause | string)[] nodes: Record } @@ -14446,7 +14504,7 @@ export interface TasksGetResponse { completed: boolean task: TasksInfo response?: TasksStatus - error?: ErrorCause + error?: ErrorCause | string } export interface TasksListRequest extends RequestBase { @@ -14460,7 +14518,7 @@ export interface TasksListRequest extends RequestBase { } export interface TasksListResponse { - node_failures?: ErrorCause[] + node_failures?: (ErrorCause | string)[] nodes?: Record tasks?: Record | TasksInfo[] } @@ -15388,7 +15446,7 @@ export interface XpackInfoFeatures { spatial: XpackInfoFeature sql: XpackInfoFeature transform: XpackInfoFeature - vectors: XpackInfoFeature + vectors?: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature } @@ -15417,6 +15475,14 @@ export interface XpackInfoResponse { tagline: string } +export interface XpackUsageAllJobs { + count: integer + detectors: Record + created_by: Record + model_size: Record + forecasts: Record +} + export interface XpackUsageAnalytics extends XpackUsageBase { stats: XpackUsageAnalyticsStatistics } @@ -15565,7 +15631,7 @@ export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: Record + jobs: Record | Record node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -15679,7 +15745,7 @@ export interface XpackUsageResponse { slm: XpackUsageSlm sql: XpackUsageSql transform: XpackUsageBase - vectors: XpackUsageVector + vectors?: XpackUsageVector voting_only: XpackUsageBase } From 8fffae2f76f80183ed3fab924e4a0a926536809d Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 15 Oct 2021 09:06:41 +0200 Subject: [PATCH 078/647] Bumped v8.0.0-canary.23 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 13b617da9..6bcaff965 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0", - "versionCanary": "8.0.0-canary.21", + "versionCanary": "8.0.0-canary.23", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 070b9d75b939c99a552147e80336aff828318617 Mon Sep 17 00:00:00 2001 From: Mikhail Shustov Date: Mon, 18 Oct 2021 13:21:37 +0300 Subject: [PATCH 079/647] pre-8.0 improvements (#1567) --- index.d.ts | 3 +- index.js | 34 +++++++++++++++++-- package.json | 2 +- src/api/kibana.ts | 6 ++-- src/{Client.ts => client.ts} | 4 +-- src/{Helpers.ts => helpers.ts} | 5 +-- ...ffingTransport.ts => sniffingTransport.ts} | 0 7 files changed, 40 insertions(+), 14 deletions(-) rename src/{Client.ts => client.ts} (99%) rename src/{Helpers.ts => helpers.ts} (99%) rename src/{SniffingTransport.ts => sniffingTransport.ts} (100%) diff --git a/index.d.ts b/index.d.ts index 9faaea9b1..05de61f8d 100644 --- a/index.d.ts +++ b/index.d.ts @@ -17,7 +17,8 @@ * under the License. */ -import Client from './lib/Client' +import Client from './lib/client' export * from '@elastic/transport' export { Client } +export type { ClientOptions, NodeOptions } from './lib/client' diff --git a/index.js b/index.js index 8a45b40a5..9b39327fb 100644 --- a/index.js +++ b/index.js @@ -19,7 +19,35 @@ 'use strict' -const { errors } = require('@elastic/transport') -const { default: Client } = require('./lib/Client') +const { + Diagnostic, + Transport, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, + Serializer, + errors, + events +} = require('@elastic/transport') -module.exports = { Client, errors } +const { default: Client } = require('./lib/client') + +module.exports = { + Client, + Diagnostic, + Transport, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, + Serializer, + errors, + events +} diff --git a/package.json b/package.json index 6bcaff965..b4a0c2eab 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.7", + "@elastic/transport": "^0.0.9", "tslib": "^2.3.0" }, "tap": { diff --git a/src/api/kibana.ts b/src/api/kibana.ts index fbd2b61b8..8d8e99013 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -35,9 +35,9 @@ import { } from '@elastic/transport' import * as T from './types' import * as TB from './typesWithBodyKey' -import SniffingTransport from '../SniffingTransport' -import Helpers from '../Helpers' -import { ClientOptions } from '../Client' +import SniffingTransport from '../sniffingTransport' +import Helpers from '../helpers' +import { ClientOptions } from '../client' interface KibanaClient { diagnostic: Diagnostic diff --git a/src/Client.ts b/src/client.ts similarity index 99% rename from src/Client.ts rename to src/client.ts index 7c6e7768e..a22843c9e 100644 --- a/src/Client.ts +++ b/src/client.ts @@ -43,8 +43,8 @@ import { Context } from '@elastic/transport/lib/types' import BaseConnection, { prepareHeaders } from '@elastic/transport/lib/connection/BaseConnection' -import SniffingTransport from './SniffingTransport' -import Helpers from './Helpers' +import SniffingTransport from './sniffingTransport' +import Helpers from './helpers' import API from './api' const kChild = Symbol('elasticsearchjs-child') diff --git a/src/Helpers.ts b/src/helpers.ts similarity index 99% rename from src/Helpers.ts rename to src/helpers.ts index ecb4f75b7..8c3e7c9c4 100644 --- a/src/Helpers.ts +++ b/src/helpers.ts @@ -24,7 +24,7 @@ import assert from 'assert' import { promisify } from 'util' import { Readable } from 'stream' import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' -import Client from './Client' +import Client from './client' import * as T from './api/types' export interface HelpersOptions { @@ -194,7 +194,6 @@ export default class Helpers { } assert(response !== undefined, 'The response is undefined, please file a bug report') if (response.statusCode === 429) { - // @ts-expect-error throw new ResponseError(response) } @@ -235,7 +234,6 @@ export default class Helpers { await sleep(wait) } if (response.statusCode === 429) { - // @ts-expect-error throw new ResponseError(response) } } @@ -498,7 +496,6 @@ export default class Helpers { // @ts-expect-error addDocumentsGetter(result) if (response.status != null && response.status >= 400) { - // @ts-expect-error callbacks[i](new ResponseError(result), result) } else { callbacks[i](null, result) diff --git a/src/SniffingTransport.ts b/src/sniffingTransport.ts similarity index 100% rename from src/SniffingTransport.ts rename to src/sniffingTransport.ts From 4492532727055da684f56e5af411af539f20cb80 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 18 Oct 2021 16:01:55 +0200 Subject: [PATCH 080/647] Update unknown parameter handling. (#1568) --- index.js | 52 +- src/api/api/async_search.ts | 46 +- src/api/api/autoscaling.ts | 44 +- src/api/api/bulk.ts | 8 +- src/api/api/cat.ts | 300 ++------ src/api/api/ccr.ts | 148 ++-- src/api/api/clear_scroll.ts | 10 +- src/api/api/close_point_in_time.ts | 10 +- src/api/api/cluster.ts | 172 ++--- src/api/api/count.ts | 10 +- src/api/api/create.ts | 8 +- src/api/api/dangling_indices.ts | 36 +- src/api/api/delete.ts | 12 +- src/api/api/delete_by_query.ts | 10 +- src/api/api/delete_by_query_rethrottle.ts | 12 +- src/api/api/delete_script.ts | 12 +- src/api/api/enrich.ts | 58 +- src/api/api/eql.ts | 46 +- src/api/api/exists.ts | 12 +- src/api/api/exists_source.ts | 12 +- src/api/api/explain.ts | 10 +- src/api/api/features.ts | 24 +- src/api/api/field_caps.ts | 10 +- src/api/api/fleet.ts | 10 +- src/api/api/get.ts | 12 +- src/api/api/get_script.ts | 12 +- src/api/api/get_script_context.ts | 12 +- src/api/api/get_script_languages.ts | 12 +- src/api/api/get_source.ts | 12 +- src/api/api/graph.ts | 10 +- src/api/api/ilm.ts | 126 +--- src/api/api/index.ts | 8 +- src/api/api/indices.ts | 588 +++++---------- src/api/api/info.ts | 12 +- src/api/api/ingest.ts | 68 +- src/api/api/license.ts | 82 +-- src/api/api/logstash.ts | 32 +- src/api/api/mget.ts | 10 +- src/api/api/migration.ts | 56 +- src/api/api/ml.ts | 760 +++++++------------- src/api/api/monitoring.ts | 8 +- src/api/api/msearch.ts | 8 +- src/api/api/msearch_template.ts | 8 +- src/api/api/mtermvectors.ts | 10 +- src/api/api/nodes.ts | 78 +- src/api/api/open_point_in_time.ts | 12 +- src/api/api/ping.ts | 12 +- src/api/api/put_script.ts | 10 +- src/api/api/rank_eval.ts | 10 +- src/api/api/reindex.ts | 10 +- src/api/api/reindex_rethrottle.ts | 12 +- src/api/api/render_search_template.ts | 10 +- src/api/api/rollup.ts | 100 +-- src/api/api/scripts_painless_execute.ts | 10 +- src/api/api/scroll.ts | 12 +- src/api/api/search.ts | 10 +- src/api/api/search_mvt.ts | 10 +- src/api/api/search_shards.ts | 12 +- src/api/api/search_template.ts | 10 +- src/api/api/searchable_snapshots.ts | 44 +- src/api/api/security.ts | 474 ++++-------- src/api/api/shutdown.ts | 36 +- src/api/api/slm.ts | 106 +-- src/api/api/snapshot.ts | 134 ++-- src/api/api/sql.ts | 60 +- src/api/api/ssl.ts | 12 +- src/api/api/tasks.ts | 36 +- src/api/api/terms_enum.ts | 10 +- src/api/api/termvectors.ts | 10 +- src/api/api/text_structure.ts | 8 +- src/api/api/transform.ts | 116 ++- src/api/api/update.ts | 10 +- src/api/api/update_by_query.ts | 10 +- src/api/api/update_by_query_rethrottle.ts | 12 +- src/api/api/watcher.ts | 126 +--- src/api/api/xpack.ts | 24 +- src/api/kibana.ts | 18 +- src/api/types.ts | 202 +++--- src/api/typesWithBodyKey.ts | 232 +++--- src/helpers.ts | 2 +- test/integration/helper.js | 1 + test/integration/integration/test-runner.js | 2 +- test/unit/api.test.ts | 136 ++++ test/unit/helpers/scroll.test.ts | 4 +- 84 files changed, 1884 insertions(+), 3177 deletions(-) create mode 100644 test/unit/api.test.ts diff --git a/index.js b/index.js index 9b39327fb..57068f4b2 100644 --- a/index.js +++ b/index.js @@ -20,34 +20,34 @@ 'use strict' const { - Diagnostic, - Transport, - WeightedConnectionPool, - ClusterConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - BaseConnection, - HttpConnection, - UndiciConnection, - Serializer, - errors, - events + Diagnostic, + Transport, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, + Serializer, + errors, + events } = require('@elastic/transport') const { default: Client } = require('./lib/client') -module.exports = { - Client, - Diagnostic, - Transport, - WeightedConnectionPool, - ClusterConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - BaseConnection, - HttpConnection, - UndiciConnection, - Serializer, - errors, - events +module.exports = { + Client, + Diagnostic, + Transport, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, + Serializer, + errors, + events } diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index d071d3119..264c881b4 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -48,21 +48,15 @@ export default class AsyncSearch { async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class AsyncSearch { async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['keep_alive', 'typed_keys', 'wait_for_completion_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,21 +92,15 @@ export default class AsyncSearch { async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise> async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -132,22 +114,22 @@ export default class AsyncSearch { async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['batched_reduce_size', 'wait_for_completion_timeout', 'keep_on_completion', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'highlight', 'indices_boost', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'slice', 'fields', 'suggest', 'pit', 'runtime_mappings'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index 413d9ef75..af5fc0c66 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -48,21 +48,15 @@ export default class Autoscaling { async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,22 +70,16 @@ export default class Autoscaling { async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -105,21 +93,15 @@ export default class Autoscaling { async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -133,20 +115,20 @@ export default class Autoscaling { async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['policy'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index f41f1ca5d..3b4e4ae04 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -42,20 +42,20 @@ export default async function BulkApi (this: That, params: T. export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'type'] - const acceptedQuery: string[] = ['pipeline', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'timeout', 'type', 'wait_for_active_shards', 'require_alias', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['operations'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index dbadd370e..d052c4528 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -48,22 +48,16 @@ export default class Cat { async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['expand_wildcards', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -84,22 +78,16 @@ export default class Cat { async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -120,22 +108,16 @@ export default class Cat { async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -156,22 +138,16 @@ export default class Cat { async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['fields'] - const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -192,22 +168,16 @@ export default class Cat { async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['include_timestamp', 'ts', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -221,22 +191,16 @@ export default class Cat { async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -250,22 +214,16 @@ export default class Cat { async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['bytes', 'expand_wildcards', 'health', 'include_unloaded_segments', 'pri', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -286,22 +244,16 @@ export default class Cat { async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -315,22 +267,16 @@ export default class Cat { async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['allow_no_match', 'bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -351,22 +297,16 @@ export default class Cat { async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['allow_no_datafeeds', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -387,22 +327,16 @@ export default class Cat { async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['allow_no_jobs', 'bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -423,22 +357,16 @@ export default class Cat { async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = ['allow_no_match', 'bytes', 'from', 'size', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -459,22 +387,16 @@ export default class Cat { async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -488,22 +410,16 @@ export default class Cat { async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['bytes', 'full_id', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -517,22 +433,16 @@ export default class Cat { async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -546,22 +456,16 @@ export default class Cat { async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -575,22 +479,16 @@ export default class Cat { async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['active_only', 'bytes', 'detailed', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -611,22 +509,16 @@ export default class Cat { async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -640,22 +532,16 @@ export default class Cat { async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -676,22 +562,16 @@ export default class Cat { async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -712,22 +592,16 @@ export default class Cat { async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] - const acceptedQuery: string[] = ['ignore_unavailable', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -748,22 +622,16 @@ export default class Cat { async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['actions', 'detailed', 'node_id', 'parent_task', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -777,22 +645,16 @@ export default class Cat { async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -813,22 +675,16 @@ export default class Cat { async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['thread_pool_patterns'] - const acceptedQuery: string[] = ['size', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -849,22 +705,16 @@ export default class Cat { async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 644257e36..7dc3e650a 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -48,21 +48,15 @@ export default class Ccr { async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,21 @@ export default class Ccr { async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,21 +98,15 @@ export default class Ccr { async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -132,21 +120,15 @@ export default class Ccr { async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -160,21 +142,21 @@ export default class Ccr { async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -188,22 +170,16 @@ export default class Ccr { async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -224,21 +200,15 @@ export default class Ccr { async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -252,21 +222,15 @@ export default class Ccr { async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -280,21 +244,21 @@ export default class Ccr { async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -308,21 +272,15 @@ export default class Ccr { async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -336,21 +294,21 @@ export default class Ccr { async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -364,22 +322,16 @@ export default class Ccr { async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -393,21 +345,15 @@ export default class Ccr { async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 1db7bc45c..56512249c 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -42,22 +42,22 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['scroll_id'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 677d953b2..fd930ba00 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -42,21 +42,21 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['id'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 1656788f9..ac6f9b16a 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -48,22 +48,22 @@ export default class Cluster { async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['include_disk_info', 'include_yes_decisions', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -77,21 +77,15 @@ export default class Cluster { async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -105,22 +99,16 @@ export default class Cluster { async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['wait_for_removal', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -134,21 +122,15 @@ export default class Cluster { async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['master_timeout', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -162,22 +144,16 @@ export default class Cluster { async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['flat_settings', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -198,22 +174,16 @@ export default class Cluster { async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['flat_settings', 'include_defaults', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -227,22 +197,16 @@ export default class Cluster { async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['expand_wildcards', 'level', 'local', 'master_timeout', 'timeout', 'wait_for_active_shards', 'wait_for_events', 'wait_for_nodes', 'wait_for_no_initializing_shards', 'wait_for_no_relocating_shards', 'wait_for_status', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -263,22 +227,16 @@ export default class Cluster { async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -292,22 +250,16 @@ export default class Cluster { async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['node_names', 'node_ids', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -321,21 +273,21 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['create', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['template', 'aliases', 'mappings', 'settings', 'version', '_meta'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -349,22 +301,22 @@ export default class Cluster { async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['flat_settings', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['persistent', 'transient'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -378,22 +330,16 @@ export default class Cluster { async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -407,22 +353,22 @@ export default class Cluster { async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['dry_run', 'explain', 'metric', 'retry_failed', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['commands'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -436,22 +382,16 @@ export default class Cluster { async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric', 'index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'local', 'master_timeout', 'wait_for_metadata_version', 'wait_for_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -475,22 +415,16 @@ export default class Cluster { async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = ['flat_settings', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 1b786e867..999cbf3bb 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -42,22 +42,22 @@ export default async function CountApi (this: That, params?: T.CountRequest | TB export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'lenient', 'min_score', 'preference', 'routing', 'terminate_after', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['query'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 5f78ec9b9..31e3fca29 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -42,20 +42,20 @@ export default async function CreateApi (this: That, params export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index', 'type'] - const acceptedQuery: string[] = ['pipeline', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['document'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 585d13504..493260a94 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -48,21 +48,15 @@ export default class DanglingIndices { async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index_uuid'] - const acceptedQuery: string[] = ['accept_data_loss', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class DanglingIndices { async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index_uuid'] - const acceptedQuery: string[] = ['accept_data_loss', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,22 +92,16 @@ export default class DanglingIndices { async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 1a937b06b..ee4dba53b 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -42,21 +42,15 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest | T export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index', 'type'] - const acceptedQuery: string[] = ['if_primary_term', 'if_seq_no', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 96fa6681f..d957c4faf 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -42,21 +42,21 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'analyzer', 'analyze_wildcard', 'conflicts', 'default_operator', 'df', 'expand_wildcards', 'from', 'ignore_unavailable', 'lenient', 'preference', 'refresh', 'request_cache', 'requests_per_second', 'routing', 'q', 'scroll', 'scroll_size', 'search_timeout', 'search_type', 'size', 'slices', 'sort', '_source', '_source_excludes', '_source_includes', 'stats', 'terminate_after', 'timeout', 'version', 'wait_for_active_shards', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['query', 'slice'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 3ca74fd52..60c722e0a 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -42,21 +42,15 @@ export default async function DeleteByQueryRethrottleApi (this: That, params: T. export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const acceptedQuery: string[] = ['requests_per_second', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 3f603f40b..5e2e4f50f 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -42,21 +42,15 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index c3c575c35..0fb852e05 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -48,21 +48,15 @@ export default class Enrich { async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class Enrich { async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,22 +92,16 @@ export default class Enrich { async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -140,21 +122,21 @@ export default class Enrich { async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['geo_match', 'match'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -168,22 +150,16 @@ export default class Enrich { async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 8a7d5ae9c..49d2bfb9d 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -48,21 +48,15 @@ export default class Eql { async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class Eql { async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise> async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['keep_alive', 'wait_for_completion_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,21 +92,15 @@ export default class Eql { async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -132,21 +114,21 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'size', 'fields', 'result_position'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index b1c8d14cd..f0cfd6276 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -42,21 +42,15 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest | T export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 375ef4f3a..eec41b8cd 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -42,21 +42,15 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index', 'type'] - const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 62e5f0198..eea152590 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -42,21 +42,21 @@ export default async function ExplainApi (this: That, param export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const acceptedQuery: string[] = ['analyzer', 'analyze_wildcard', 'default_operator', 'df', 'lenient', 'preference', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['query'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index f1b834ed8..d05d4831a 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -48,22 +48,16 @@ export default class Features { async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -77,22 +71,16 @@ export default class Features { async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 0fa9ca6bf..6c5692125 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -42,22 +42,22 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'fields', 'ignore_unavailable', 'include_unmapped', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['index_filter', 'runtime_mappings'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 44f1f2758..fbf076cce 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -48,19 +48,15 @@ export default class Fleet { async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['wait_for_advance', 'wait_for_index', 'checkpoints', 'timeout'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 9344e573b..12aa2faf9 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -42,21 +42,15 @@ export default async function GetApi (this: That, params: T export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise> export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index 0e27e86d0..515926b66 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -42,21 +42,15 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index 0e0bc0f7a..9cb53cdb0 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -42,22 +42,16 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 6d492458f..e4ce35ca0 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -42,22 +42,16 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 60c6e7e55..0e0aa6737 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -42,21 +42,15 @@ export default async function GetSourceApi (this: That, par export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 38ec4fa0a..c92260c0e 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -48,21 +48,21 @@ export default class Graph { async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['routing', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index eebf27269..7ccac54f0 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -48,21 +48,15 @@ export default class Ilm { async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class Ilm { async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['only_errors', 'only_managed', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,22 +92,16 @@ export default class Ilm { async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -140,22 +122,16 @@ export default class Ilm { async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -169,19 +145,15 @@ export default class Ilm { async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['dry_run'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -195,21 +167,21 @@ export default class Ilm { async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['current_step', 'next_step'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -223,21 +195,21 @@ export default class Ilm { async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['policy'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -251,21 +223,15 @@ export default class Ilm { async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -279,21 +245,15 @@ export default class Ilm { async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -307,22 +267,16 @@ export default class Ilm { async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -336,22 +290,16 @@ export default class Ilm { async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/index.ts b/src/api/api/index.ts index 2800f42da..f1aac6a12 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -42,20 +42,20 @@ export default async function IndexApi (this: That, params: export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const acceptedQuery: string[] = ['if_primary_term', 'if_seq_no', 'op_type', 'pipeline', 'refresh', 'routing', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'require_alias', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['document'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index c51601c59..52fd341f6 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -48,21 +48,15 @@ export default class Indices { async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'block'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,22 +70,22 @@ export default class Indices { async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -112,22 +106,16 @@ export default class Indices { async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'fielddata', 'fields', 'ignore_unavailable', 'query', 'request', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -148,21 +136,21 @@ export default class Indices { async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -176,21 +164,15 @@ export default class Indices { async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -204,21 +186,21 @@ export default class Indices { async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['include_type_name', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -232,21 +214,15 @@ export default class Indices { async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -260,22 +236,16 @@ export default class Indices { async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -296,21 +266,15 @@ export default class Indices { async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -324,21 +288,15 @@ export default class Indices { async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -359,21 +317,15 @@ export default class Indices { async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -387,21 +339,15 @@ export default class Indices { async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -415,21 +361,15 @@ export default class Indices { async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -443,21 +383,15 @@ export default class Indices { async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flush', 'ignore_unavailable', 'master_timeout', 'timeout', 'run_expensive_tasks', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -471,21 +405,15 @@ export default class Indices { async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -499,21 +427,15 @@ export default class Indices { async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name', 'index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -534,21 +456,15 @@ export default class Indices { async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -562,21 +478,15 @@ export default class Indices { async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['flat_settings', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -590,21 +500,15 @@ export default class Indices { async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptions): Promise async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'type'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -618,19 +522,15 @@ export default class Indices { async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['fields', 'ignore_unavailable', 'allow_no_indices', 'expand_wildcards'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -644,22 +544,16 @@ export default class Indices { async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'force', 'ignore_unavailable', 'wait_if_ongoing', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -680,22 +574,16 @@ export default class Indices { async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flush', 'ignore_unavailable', 'max_num_segments', 'only_expunge_deletes', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -716,21 +604,15 @@ export default class Indices { async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'include_type_name', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -744,22 +626,16 @@ export default class Indices { async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name', 'index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -786,22 +662,16 @@ export default class Indices { async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -822,21 +692,15 @@ export default class Indices { async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['fields', 'index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'include_defaults', 'include_type_name', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -857,22 +721,16 @@ export default class Indices { async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['local', 'flat_settings', 'include_type_name', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -893,22 +751,16 @@ export default class Indices { async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'include_type_name', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -929,22 +781,16 @@ export default class Indices { async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'include_defaults', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -971,22 +817,16 @@ export default class Indices { async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['flat_settings', 'include_type_name', 'local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1007,21 +847,15 @@ export default class Indices { async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1035,21 +869,15 @@ export default class Indices { async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1063,21 +891,15 @@ export default class Indices { async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1091,21 +913,21 @@ export default class Indices { async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1126,21 +948,21 @@ export default class Indices { async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1154,21 +976,21 @@ export default class Indices { async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'include_type_name', 'master_timeout', 'timeout', 'write_index_only', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1182,20 +1004,20 @@ export default class Indices { async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'flat_settings', 'ignore_unavailable', 'master_timeout', 'preserve_existing', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['settings'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } @@ -1216,21 +1038,21 @@ export default class Indices { async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['create', 'flat_settings', 'include_type_name', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'settings', 'version'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1244,22 +1066,16 @@ export default class Indices { async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['active_only', 'detailed', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1280,22 +1096,16 @@ export default class Indices { async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1316,21 +1126,15 @@ export default class Indices { async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1344,21 +1148,15 @@ export default class Indices { async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['expand_wildcards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1372,21 +1170,21 @@ export default class Indices { async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['alias', 'new_index'] - const acceptedQuery: string[] = ['dry_run', 'include_type_name', 'master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1407,22 +1205,16 @@ export default class Indices { async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'verbose', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1443,22 +1235,16 @@ export default class Indices { async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'status', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1479,21 +1265,21 @@ export default class Indices { async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1507,21 +1293,21 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['index_patterns', 'composed_of', 'overlapping', 'template'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1535,20 +1321,20 @@ export default class Indices { async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['create', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['template'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } @@ -1569,21 +1355,21 @@ export default class Indices { async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'wait_for_active_shards', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1597,22 +1383,16 @@ export default class Indices { async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric', 'index'] - const acceptedQuery: string[] = ['completion_fields', 'expand_wildcards', 'fielddata_fields', 'fields', 'forbid_closed_indices', 'groups', 'include_segment_file_sizes', 'include_unloaded_segments', 'level', 'types', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1639,22 +1419,22 @@ export default class Indices { async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['actions'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1668,22 +1448,22 @@ export default class Indices { async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'type'] - const acceptedQuery: string[] = ['allow_no_indices', 'all_shards', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'expand_wildcards', 'explain', 'ignore_unavailable', 'lenient', 'rewrite', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['query'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 336e08268..b7d58c650 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -42,22 +42,16 @@ export default async function InfoApi (this: That, params?: T.InfoRequest | TB.I export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index e9bc410fd..8ab5146a0 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -48,21 +48,15 @@ export default class Ingest { async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,22 +70,16 @@ export default class Ingest { async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -105,22 +93,16 @@ export default class Ingest { async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['master_timeout', 'summary', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -141,22 +123,16 @@ export default class Ingest { async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -170,21 +146,21 @@ export default class Ingest { async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -198,22 +174,22 @@ export default class Ingest { async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['verbose', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['docs', 'pipeline'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 1ab3ee8e2..fafeb3f4a 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -48,22 +48,16 @@ export default class License { async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -77,22 +71,16 @@ export default class License { async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['accept_enterprise', 'local', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -106,22 +94,16 @@ export default class License { async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -135,22 +117,16 @@ export default class License { async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -164,22 +140,22 @@ export default class License { async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['acknowledge', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['license', 'licenses'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -193,22 +169,16 @@ export default class License { async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['acknowledge', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -222,22 +192,16 @@ export default class License { async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['acknowledge', 'type_query_string', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 7080312a4..9fc6d44bb 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -48,21 +48,15 @@ export default class Logstash { async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class Logstash { async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,20 +92,20 @@ export default class Logstash { async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['pipeline'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 292c07b01..d4a11874f 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -42,22 +42,22 @@ export default async function MgetApi (this: That, params?: export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['preference', 'realtime', 'refresh', 'routing', '_source', '_source_excludes', '_source_includes', 'stored_fields', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index de7253afd..27682a2f5 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -48,22 +48,16 @@ export default class Migration { async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -78,4 +72,48 @@ export default class Migration { } return await this.transport.request({ path, method, querystring, body }, options) } + + async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_migration/system_features' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_migration/system_features' + return await this.transport.request({ path, method, querystring, body }, options) + } } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 57596a6f2..945dad7a9 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -48,21 +48,15 @@ export default class Ml { async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['allow_no_match', 'allow_no_jobs', 'force', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class Ml { async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,21 +92,15 @@ export default class Ml { async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'event_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -132,21 +114,15 @@ export default class Ml { async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'job_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -160,21 +136,15 @@ export default class Ml { async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['force', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -188,21 +158,15 @@ export default class Ml { async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['force', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -216,22 +180,16 @@ export default class Ml { async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -252,21 +210,15 @@ export default class Ml { async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -280,21 +232,15 @@ export default class Ml { async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'forecast_id'] - const acceptedQuery: string[] = ['allow_no_forecasts', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -315,21 +261,15 @@ export default class Ml { async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['force', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -343,21 +283,15 @@ export default class Ml { async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -371,21 +305,15 @@ export default class Ml { async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -399,21 +327,15 @@ export default class Ml { async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_alias', 'model_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -427,22 +349,22 @@ export default class Ml { async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -456,21 +378,21 @@ export default class Ml { async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['evaluation', 'index', 'query'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -484,21 +406,21 @@ export default class Ml { async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -519,21 +441,21 @@ export default class Ml { async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['skip_time', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'start'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -547,21 +469,21 @@ export default class Ml { async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -575,21 +497,21 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['anomaly_score', 'expand', 'page'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -610,21 +532,15 @@ export default class Ml { async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const acceptedQuery: string[] = ['job_id', 'end', 'from', 'start', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -638,22 +554,22 @@ export default class Ml { async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['page'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -674,21 +590,21 @@ export default class Ml { async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'category_id'] - const acceptedQuery: string[] = ['from', 'size', 'partition_field_value', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['page'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -709,22 +625,16 @@ export default class Ml { async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -745,22 +655,16 @@ export default class Ml { async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'verbose', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -781,22 +685,16 @@ export default class Ml { async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['allow_no_datafeeds', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -817,22 +715,16 @@ export default class Ml { async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['allow_no_datafeeds', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -853,22 +745,16 @@ export default class Ml { async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] - const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -889,21 +775,21 @@ export default class Ml { async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['desc', 'end', 'exclude_interim', 'influencer_score', 'from', 'size', 'sort', 'start', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['page'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -917,22 +803,16 @@ export default class Ml { async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['allow_no_jobs', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -953,22 +833,16 @@ export default class Ml { async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['allow_no_match', 'allow_no_jobs', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -989,21 +863,15 @@ export default class Ml { async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedQuery: string[] = ['desc', 'from', 'size', 'sort', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1024,21 +892,21 @@ export default class Ml { async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['bucket_span', 'overall_score', 'top_n', 'end', 'start', 'exclude_interim', 'allow_no_match', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['allow_no_jobs'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1052,21 +920,21 @@ export default class Ml { async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['desc', 'page', 'record_score', 'sort'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1080,19 +948,15 @@ export default class Ml { async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1106,22 +970,16 @@ export default class Ml { async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = ['allow_no_match', 'decompress_definition', 'exclude_generated', 'from', 'include', 'size', 'tags', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1142,22 +1000,16 @@ export default class Ml { async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1178,19 +1030,15 @@ export default class Ml { async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = ['timeout'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1204,22 +1052,16 @@ export default class Ml { async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1233,21 +1075,21 @@ export default class Ml { async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['timeout'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1261,21 +1103,21 @@ export default class Ml { async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['events'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1289,20 +1131,20 @@ export default class Ml { async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['reset_end', 'reset_start', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['data'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } @@ -1316,22 +1158,22 @@ export default class Ml { async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['config'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1352,22 +1194,22 @@ export default class Ml { async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['job_config', 'datafeed_config'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1388,21 +1230,21 @@ export default class Ml { async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['description'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1416,21 +1258,15 @@ export default class Ml { async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'job_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1444,21 +1280,21 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1472,21 +1308,21 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1500,21 +1336,21 @@ export default class Ml { async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['description', 'items'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1528,21 +1364,21 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1556,21 +1392,21 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'tags'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1584,21 +1420,15 @@ export default class Ml { async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_alias', 'model_id'] - const acceptedQuery: string[] = ['reassign', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1612,19 +1442,15 @@ export default class Ml { async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id', 'part'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1638,19 +1464,15 @@ export default class Ml { async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1664,21 +1486,15 @@ export default class Ml { async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1692,21 +1508,21 @@ export default class Ml { async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['delete_intervening_results'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1720,22 +1536,16 @@ export default class Ml { async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['enabled', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1749,21 +1559,15 @@ export default class Ml { async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1777,21 +1581,21 @@ export default class Ml { async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['end', 'timeout'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1805,19 +1609,15 @@ export default class Ml { async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = ['timeout', 'wait_for'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1831,21 +1631,15 @@ export default class Ml { async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['allow_no_match', 'force', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1859,21 +1653,21 @@ export default class Ml { async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['allow_no_match', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['timeout'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1887,19 +1681,15 @@ export default class Ml { async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1913,21 +1703,21 @@ export default class Ml { async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1941,19 +1731,15 @@ export default class Ml { async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedQuery: string[] = ['ignore_unavailable', 'allow_no_indices', 'ignore_throttled', 'expand_wildcards'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1967,21 +1753,21 @@ export default class Ml { async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1995,21 +1781,21 @@ export default class Ml { async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -2023,21 +1809,21 @@ export default class Ml { async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['description', 'retain'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -2051,21 +1837,15 @@ export default class Ml { async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedQuery: string[] = ['wait_for_completion', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -2079,22 +1859,22 @@ export default class Ml { async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_retention_days', 'results_index_name'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -2108,20 +1888,20 @@ export default class Ml { async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['detector'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 2822b7699..6db509050 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -48,20 +48,20 @@ export default class Monitoring { async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['type'] - const acceptedQuery: string[] = ['system_id', 'system_api_version', 'interval', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['operations'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index b55b46093..67139b26e 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -42,20 +42,20 @@ export default async function MsearchApi (this: That, param export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise> export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'ccs_minimize_roundtrips', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'max_concurrent_searches', 'max_concurrent_shard_requests', 'pre_filter_shard_size', 'search_type', 'rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['searches'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index b6298aff1..8cd92bcb7 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -42,20 +42,20 @@ export default async function MsearchTemplateApi (this: Tha export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['ccs_minimize_roundtrips', 'max_concurrent_searches', 'search_type', 'rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['search_templates'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index 4cdc15eab..449504437 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -42,22 +42,22 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['fields', 'field_statistics', 'offsets', 'payloads', 'positions', 'preference', 'realtime', 'routing', 'term_statistics', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['docs'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index a07b701e0..6fc2c0798 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -48,19 +48,15 @@ export default class Nodes { async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'max_archive_version'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -74,19 +70,15 @@ export default class Nodes { async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -100,22 +92,16 @@ export default class Nodes { async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = ['ignore_idle_threads', 'interval', 'snapshots', 'threads', 'thread_type', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -136,22 +122,16 @@ export default class Nodes { async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric'] - const acceptedQuery: string[] = ['flat_settings', 'master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -178,22 +158,22 @@ export default class Nodes { async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['secure_settings_password'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -214,22 +194,16 @@ export default class Nodes { async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] - const acceptedQuery: string[] = ['completion_fields', 'fielddata_fields', 'fields', 'groups', 'include_segment_file_sizes', 'level', 'master_timeout', 'timeout', 'types', 'include_unloaded_segments', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -262,22 +236,16 @@ export default class Nodes { async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric'] - const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 94be5c04b..81507983a 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -42,21 +42,15 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['keep_alive', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 49ec7c223..4b8a07e8f 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -42,22 +42,16 @@ export default async function PingApi (this: That, params?: T.PingRequest | TB.P export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 1f82c1445..1d75aeda1 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -42,21 +42,21 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'context'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['script'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 921372d5c..c47112fef 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -42,21 +42,21 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'search_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['requests', 'metric'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 513f1cd60..c182e1ed6 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -42,22 +42,22 @@ export default async function ReindexApi (this: That, params?: T.ReindexRequest export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['refresh', 'requests_per_second', 'scroll', 'slices', 'timeout', 'wait_for_active_shards', 'wait_for_completion', 'require_alias', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 436f34394..030ff50a5 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -42,21 +42,15 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const acceptedQuery: string[] = ['requests_per_second', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 03285bf5a..2fa44bd57 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -42,22 +42,22 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['file', 'params', 'source'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 0a728192f..a3e7592ea 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -48,21 +48,15 @@ export default class Rollup { async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,22 +70,16 @@ export default class Rollup { async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -112,22 +100,16 @@ export default class Rollup { async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -148,21 +130,15 @@ export default class Rollup { async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -176,21 +152,21 @@ export default class Rollup { async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -204,20 +180,20 @@ export default class Rollup { async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptions): Promise async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'rollup_index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['config'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } @@ -231,21 +207,21 @@ export default class Rollup { async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'type'] - const acceptedQuery: string[] = ['rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aggs', 'query', 'size'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -259,21 +235,15 @@ export default class Rollup { async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -287,21 +257,15 @@ export default class Rollup { async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index 968516d40..cd04ce880 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -42,22 +42,22 @@ export default async function ScriptsPainlessExecuteApi (this export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['context', 'context_setup', 'script'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 0592663ba..e6a16fe9a 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -42,21 +42,15 @@ export default async function ScrollApi (this: That, params export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['rest_total_hits_as_int', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 388cc6f33..a888230c5 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -42,22 +42,22 @@ export default async function SearchApi (this: That, params export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'allow_partial_search_results', 'analyzer', 'analyze_wildcard', 'batched_reduce_size', 'ccs_minimize_roundtrips', 'default_operator', 'df', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'lenient', 'max_concurrent_shard_requests', 'min_compatible_shard_node', 'preference', 'pre_filter_shard_size', 'request_cache', 'routing', 'scroll', 'search_type', 'suggest_field', 'suggest_mode', 'suggest_size', 'suggest_text', 'typed_keys', 'rest_total_hits_as_int', '_source_excludes', '_source_includes', 'q', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'highlight', 'indices_boost', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'slice', 'fields', 'suggest', 'pit', 'runtime_mappings'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 43a63b254..e15c7e2d0 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -42,21 +42,21 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['aggs', 'fields', 'query', 'runtime_mappings', 'sort'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index 24216f115..999ea6be4 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -42,22 +42,16 @@ export default async function SearchShardsApi (this: That, params?: T.SearchShar export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 'local', 'preference', 'routing', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index f15905827..5c240f484 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -42,22 +42,22 @@ export default async function SearchTemplateApi (this: That export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'ccs_minimize_roundtrips', 'expand_wildcards', 'ignore_throttled', 'ignore_unavailable', 'preference', 'routing', 'scroll', 'search_type', 'rest_total_hits_as_int', 'typed_keys', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['id', 'params', 'source'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 08156add7..1604d4def 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -48,19 +48,15 @@ export default class SearchableSnapshots { async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -81,22 +77,16 @@ export default class SearchableSnapshots { async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['expand_wildcards', 'allow_no_indices', 'ignore_unavailable', 'pretty', 'human', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -117,21 +107,21 @@ export default class SearchableSnapshots { async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedQuery: string[] = ['master_timeout', 'wait_for_completion', 'storage', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -145,22 +135,16 @@ export default class SearchableSnapshots { async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['level', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 673023465..55363814a 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -48,22 +48,16 @@ export default class Security { async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -77,22 +71,22 @@ export default class Security { async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['password'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -113,21 +107,15 @@ export default class Security { async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ids'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -141,21 +129,15 @@ export default class Security { async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -169,21 +151,15 @@ export default class Security { async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['realms'] - const acceptedQuery: string[] = ['usernames', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -197,21 +173,15 @@ export default class Security { async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -225,21 +195,15 @@ export default class Security { async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -253,22 +217,22 @@ export default class Security { async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -282,21 +246,15 @@ export default class Security { async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -317,21 +275,15 @@ export default class Security { async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application', 'name'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -345,21 +297,15 @@ export default class Security { async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -373,21 +319,15 @@ export default class Security { async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -401,21 +341,15 @@ export default class Security { async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -429,21 +363,15 @@ export default class Security { async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -457,21 +385,15 @@ export default class Security { async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -485,21 +407,15 @@ export default class Security { async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -513,19 +429,15 @@ export default class Security { async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -539,19 +451,15 @@ export default class Security { async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -565,22 +473,16 @@ export default class Security { async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['id', 'name', 'owner', 'realm_name', 'username', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -594,22 +496,16 @@ export default class Security { async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -623,22 +519,16 @@ export default class Security { async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application', 'name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -662,22 +552,16 @@ export default class Security { async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -698,22 +582,16 @@ export default class Security { async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -734,22 +612,16 @@ export default class Security { async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -773,21 +645,15 @@ export default class Security { async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -801,22 +667,22 @@ export default class Security { async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -830,22 +696,16 @@ export default class Security { async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -866,22 +726,16 @@ export default class Security { async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['application', 'priviledge', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -895,21 +749,21 @@ export default class Security { async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -923,22 +777,22 @@ export default class Security { async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['user'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['application', 'cluster', 'index'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -959,22 +813,22 @@ export default class Security { async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -988,22 +842,22 @@ export default class Security { async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1017,20 +871,20 @@ export default class Security { async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['privileges'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } @@ -1044,21 +898,21 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'transient_metadata'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1072,21 +926,21 @@ export default class Security { async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'rules', 'run_as'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1100,21 +954,21 @@ export default class Security { async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['refresh', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -1128,19 +982,15 @@ export default class Security { async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1154,19 +1004,15 @@ export default class Security { async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1180,19 +1026,15 @@ export default class Security { async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1206,19 +1048,15 @@ export default class Security { async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1232,19 +1070,15 @@ export default class Security { async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1258,19 +1092,15 @@ export default class Security { async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -1284,19 +1114,15 @@ export default class Security { async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['realm_name'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 10f9bb2f8..9ee79c1d1 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -48,21 +48,15 @@ export default class Shutdown { async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,22 +70,16 @@ export default class Shutdown { async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -112,21 +100,15 @@ export default class Shutdown { async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index a4033c13c..57d289040 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -48,21 +48,15 @@ export default class Slm { async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,15 @@ export default class Slm { async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,22 +92,16 @@ export default class Slm { async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -133,22 +115,16 @@ export default class Slm { async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -169,22 +145,16 @@ export default class Slm { async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -198,22 +168,16 @@ export default class Slm { async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -227,21 +191,21 @@ export default class Slm { async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -255,22 +219,16 @@ export default class Slm { async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -284,22 +242,16 @@ export default class Slm { async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 62b0df495..be6066590 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -48,21 +48,15 @@ export default class Snapshot { async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,21 +70,21 @@ export default class Snapshot { async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['indices'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -104,21 +98,21 @@ export default class Snapshot { async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedQuery: string[] = ['master_timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['ignore_unavailable', 'include_global_state', 'indices', 'feature_states', 'metadata', 'partial'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -132,21 +126,21 @@ export default class Snapshot { async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'verify', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['repository', 'type', 'settings'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -160,21 +154,15 @@ export default class Snapshot { async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -188,21 +176,15 @@ export default class Snapshot { async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -216,21 +198,15 @@ export default class Snapshot { async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedQuery: string[] = ['ignore_unavailable', 'master_timeout', 'verbose', 'index_details', 'human', 'include_repository', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -244,22 +220,16 @@ export default class Snapshot { async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] - const acceptedQuery: string[] = ['local', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -280,19 +250,15 @@ export default class Snapshot { async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] - const acceptedQuery: string[] = ['blob_count', 'concurrency', 'read_node_count', 'early_read_node_count', 'seed', 'rare_action_probability', 'max_blob_size', 'max_total_data_size', 'timeout', 'detailed', 'rarely_abort_writes'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -306,21 +272,21 @@ export default class Snapshot { async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedQuery: string[] = ['master_timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -334,22 +300,16 @@ export default class Snapshot { async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedQuery: string[] = ['ignore_unavailable', 'master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -373,21 +333,15 @@ export default class Snapshot { async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] - const acceptedQuery: string[] = ['master_timeout', 'timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 2e180dd11..6fe2a6191 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -48,21 +48,21 @@ export default class Sql { async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['cursor'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,19 +76,15 @@ export default class Sql { async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -102,19 +98,15 @@ export default class Sql { async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['delimiter', 'format', 'keep_alive', 'wait_for_completion_timeout'] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -128,19 +120,15 @@ export default class Sql { async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = [] const querystring: Record = {} - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} - body[key] = params[key] + querystring[key] = params[key] } } @@ -154,22 +142,22 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['format', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -183,21 +171,21 @@ export default class Sql { async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 416d0ea0c..08c360806 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -48,22 +48,16 @@ export default class Ssl { async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index 69d4cf61f..def117eaa 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -48,22 +48,16 @@ export default class Tasks { async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const acceptedQuery: string[] = ['actions', 'nodes', 'parent_task_id', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -84,21 +78,15 @@ export default class Tasks { async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const acceptedQuery: string[] = ['timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -112,22 +100,16 @@ export default class Tasks { async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['actions', 'detailed', 'group_by', 'nodes', 'parent_task_id', 'timeout', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index d072cfb8d..5cb9ba471 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -42,21 +42,21 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 8c1994730..ad3a6ebfc 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -42,21 +42,21 @@ export default async function TermvectorsApi (this: That, p export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'id'] - const acceptedQuery: string[] = ['fields', 'field_statistics', 'offsets', 'payloads', 'positions', 'preference', 'realtime', 'routing', 'term_statistics', 'version', 'version_type', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index ad3ab4fea..6274fca23 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -48,20 +48,20 @@ export default class TextStructure { async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['charset', 'column_names', 'delimiter', 'explain', 'format', 'grok_pattern', 'has_header_row', 'line_merge_size_limit', 'lines_to_sample', 'quote', 'should_trim_fields', 'timeout', 'timestamp_field', 'timestamp_format'] + const acceptedBody: string[] = ['text_files'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { // @ts-expect-error - querystring[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error - body = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index de9e0b877..6a804f970 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -48,21 +48,15 @@ export default class Transform { async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedQuery: string[] = ['force', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -76,22 +70,16 @@ export default class Transform { async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'exclude_generated', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -112,21 +100,15 @@ export default class Transform { async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedQuery: string[] = ['allow_no_match', 'from', 'size', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -140,22 +122,22 @@ export default class Transform { async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -176,21 +158,15 @@ export default class Transform { async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id', 'transform_id'] - const acceptedQuery: string[] = ['defer_validation', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -204,21 +180,15 @@ export default class Transform { async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedQuery: string[] = ['timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -232,21 +202,15 @@ export default class Transform { async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedQuery: string[] = ['allow_no_match', 'force', 'timeout', 'wait_for_checkpoint', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -260,21 +224,15 @@ export default class Transform { async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id', 'transform_id'] - const acceptedQuery: string[] = ['defer_validation', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -282,4 +240,26 @@ export default class Transform { const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_update` return await this.transport.request({ path, method, querystring, body }, options) } + + async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_transform/_upgrade' + return await this.transport.request({ path, method, querystring, body }, options) + } } diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 83f547c92..4ab127557 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -42,21 +42,21 @@ export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index', 'type'] - const acceptedQuery: string[] = ['if_primary_term', 'if_seq_no', 'lang', 'refresh', 'require_alias', 'retry_on_conflict', 'routing', 'timeout', 'wait_for_active_shards', '_source_excludes', '_source_includes', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', 'upsert'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 4e964edd6..2dc41349e 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -42,21 +42,21 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedQuery: string[] = ['allow_no_indices', 'analyzer', 'analyze_wildcard', 'default_operator', 'df', 'expand_wildcards', 'from', 'ignore_unavailable', 'lenient', 'pipeline', 'preference', 'refresh', 'request_cache', 'requests_per_second', 'routing', 'scroll', 'scroll_size', 'search_timeout', 'search_type', 'size', 'slices', 'sort', '_source', '_source_excludes', '_source_includes', 'stats', 'terminate_after', 'timeout', 'version', 'version_type', 'wait_for_active_shards', 'wait_for_completion', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index e5252fae9..8af59d09b 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -42,21 +42,15 @@ export default async function UpdateByQueryRethrottleApi (this: That, params: T. export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const acceptedQuery: string[] = ['requests_per_second', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 0fa326977..6e0c9fd5a 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -48,21 +48,15 @@ export default class Watcher { async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id', 'action_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -83,21 +77,15 @@ export default class Watcher { async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -111,21 +99,15 @@ export default class Watcher { async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -139,21 +121,15 @@ export default class Watcher { async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -167,22 +143,22 @@ export default class Watcher { async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['debug', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -203,21 +179,15 @@ export default class Watcher { async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params.body ?? undefined + const body = undefined for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -231,21 +201,21 @@ export default class Watcher { async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedQuery: string[] = ['active', 'if_primary_term', 'if_sequence_number', 'version', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'transform', 'trigger'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -259,22 +229,22 @@ export default class Watcher { async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] + const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -288,22 +258,16 @@ export default class Watcher { async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -317,22 +281,16 @@ export default class Watcher { async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric'] - const acceptedQuery: string[] = ['emit_stacktraces', 'metric', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -353,22 +311,16 @@ export default class Watcher { async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 190c2a93f..8e67e2635 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -48,22 +48,16 @@ export default class Xpack { async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['categories', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } @@ -77,22 +71,16 @@ export default class Xpack { async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedQuery: string[] = ['master_timeout', 'error_trace', 'filter_path', 'human', 'pretty', 'source_query_string'] const querystring: Record = {} - // @ts-expect-error - let body: Record = params?.body ?? undefined + const body = undefined params = params ?? {} for (const key in params) { - if (acceptedQuery.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + querystring[key] = params[key] } } diff --git a/src/api/kibana.ts b/src/api/kibana.ts index 8d8e99013..b358ceed3 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -30,6 +30,7 @@ import { Serializer, Diagnostic, BaseConnectionPool, + TransportRequestParams, TransportRequestOptions, TransportResult } from '@elastic/transport' @@ -43,7 +44,9 @@ interface KibanaClient { diagnostic: Diagnostic name: string | symbol connectionPool: BaseConnectionPool - transport: SniffingTransport + transport: Omit & { + request: (params: TransportRequestParams, options?: TransportRequestOptions) => Promise> + } serializer: Serializer helpers: Helpers child: (opts?: ClientOptions) => KibanaClient @@ -129,16 +132,6 @@ interface KibanaClient { importDanglingIndex: (params: T.DanglingIndicesImportDanglingIndexRequest| TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions) => Promise> listDanglingIndices: (params?: T.DanglingIndicesListDanglingIndicesRequest| TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions) => Promise> } - dataFrameTransformDeprecated: { - deleteTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getTransformStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - previewTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - putTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - startTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - stopTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - updateTransform: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - } delete: (params: T.DeleteRequest| TB.DeleteRequest, options?: TransportRequestOptions) => Promise> deleteByQuery: (params: T.DeleteByQueryRequest| TB.DeleteByQueryRequest, options?: TransportRequestOptions) => Promise> deleteByQueryRethrottle: (params: T.DeleteByQueryRethrottleRequest| TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions) => Promise> @@ -271,6 +264,8 @@ interface KibanaClient { mget: (params?: T.MgetRequest| TB.MgetRequest, options?: TransportRequestOptions) => Promise, TContext>> migration: { deprecations: (params?: T.MigrationDeprecationsRequest| TB.MigrationDeprecationsRequest, options?: TransportRequestOptions) => Promise> + getFeatureUpgradeStatus: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + postFeatureUpgrade: (params?: T.TODO, options?: TransportRequestOptions) => Promise> } ml: { closeJob: (params: T.MlCloseJobRequest| TB.MlCloseJobRequest, options?: TransportRequestOptions) => Promise> @@ -494,6 +489,7 @@ interface KibanaClient { startTransform: (params: T.TransformStartTransformRequest| TB.TransformStartTransformRequest, options?: TransportRequestOptions) => Promise> stopTransform: (params: T.TransformStopTransformRequest| TB.TransformStopTransformRequest, options?: TransportRequestOptions) => Promise> updateTransform: (params?: T.TransformUpdateTransformRequest| TB.TransformUpdateTransformRequest, options?: TransportRequestOptions) => Promise> + upgradeTransforms: (params?: T.TODO, options?: TransportRequestOptions) => Promise> } update: (params: T.UpdateRequest| TB.UpdateRequest, options?: TransportRequestOptions) => Promise, TContext>> updateByQuery: (params: T.UpdateByQueryRequest| TB.UpdateByQueryRequest, options?: TransportRequestOptions) => Promise> diff --git a/src/api/types.ts b/src/api/types.ts index a9eeea661..c7791a450 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -28,29 +28,21 @@ */ export type TODO = Record -export interface BulkCreateOperation extends BulkOperation { +export interface BulkCreateOperation extends BulkWriteOperation { } -export interface BulkCreateResponseItem extends BulkResponseItemBase { +export interface BulkDeleteOperation extends BulkOperationBase { } -export interface BulkDeleteOperation extends BulkOperation { +export interface BulkIndexOperation extends BulkWriteOperation { } -export interface BulkDeleteResponseItem extends BulkResponseItemBase { -} - -export interface BulkIndexOperation extends BulkOperation { -} - -export interface BulkIndexResponseItem extends BulkResponseItemBase { -} - -export interface BulkOperation { +export interface BulkOperationBase { _id?: Id _index?: IndexName - retry_on_conflict?: integer routing?: Routing + if_primary_term?: long + if_seq_no?: SequenceNumber version?: VersionNumber version_type?: VersionType } @@ -62,6 +54,8 @@ export interface BulkOperationContainer { delete?: BulkDeleteOperation } +export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' + export interface BulkRequest extends RequestBase { index?: IndexName type?: Type @@ -79,12 +73,12 @@ export interface BulkRequest extends RequestBase { export interface BulkResponse { errors: boolean - items: BulkResponseItemContainer[] + items: Partial>[] took: long ingest_took?: long } -export interface BulkResponseItemBase { +export interface BulkResponseItem { _id?: string | null _index: string status: integer @@ -99,17 +93,15 @@ export interface BulkResponseItemBase { get?: InlineGet> } -export interface BulkResponseItemContainer { - index?: BulkIndexResponseItem - create?: BulkCreateResponseItem - update?: BulkUpdateResponseItem - delete?: BulkDeleteResponseItem -} - -export interface BulkUpdateOperation extends BulkOperation { +export interface BulkUpdateOperation extends BulkOperationBase { + require_alias?: boolean + retry_on_conflict?: integer } -export interface BulkUpdateResponseItem extends BulkResponseItemBase { +export interface BulkWriteOperation extends BulkOperationBase { + dynamic_templates?: Record + pipeline?: string + require_alias?: boolean } export interface ClearScrollRequest extends RequestBase { @@ -589,7 +581,7 @@ export interface MsearchTemplateTemplateItem { export interface MtermvectorsOperation { _id: Id _index?: IndexName - doc?: object + doc?: any fields?: Fields field_statistics?: boolean filter?: TermvectorsFilter @@ -1660,7 +1652,7 @@ export interface UpdateRequest lang?: string refresh?: Refresh require_alias?: boolean - retry_on_conflict?: long + retry_on_conflict?: integer routing?: Routing timeout?: Time wait_for_active_shards?: WaitForActiveShards @@ -1980,14 +1972,16 @@ export interface IndicesResponseBase extends AcknowledgedResponseBase { _shards?: ShardStatistics } -export interface InlineGet { +export interface InlineGetKeys { fields?: Record found: boolean - _seq_no: SequenceNumber - _primary_term: long + _seq_no?: SequenceNumber + _primary_term?: long _routing?: Routing _source: TDocument } +export type InlineGet = InlineGetKeys | +{ [property: string]: any } export interface InlineScript extends ScriptBase { source: string @@ -2224,7 +2218,7 @@ export interface SegmentsStats { version_map_memory_in_bytes: integer } -export type SequenceNumber = integer +export type SequenceNumber = long export type Service = string @@ -3914,7 +3908,7 @@ export interface MappingFieldAliasProperty extends MappingPropertyBase { export interface MappingFieldMapping { full_name: string - mapping: Record + mapping: Partial> } export interface MappingFieldNamesField { @@ -3955,7 +3949,7 @@ export interface MappingGenericProperty extends MappingDocValuesPropertyBase { type: string } -export type MappingGeoOrientation = 'right' | 'left' +export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean @@ -4685,14 +4679,14 @@ export interface QueryDslQueryBase { export interface QueryDslQueryContainer { bool?: QueryDslBoolQuery boosting?: QueryDslBoostingQuery - common?: Record + common?: Partial> combined_fields?: QueryDslCombinedFieldsQuery constant_score?: QueryDslConstantScoreQuery dis_max?: QueryDslDisMaxQuery distance_feature?: QueryDslDistanceFeatureQuery exists?: QueryDslExistsQuery function_score?: QueryDslFunctionScoreQuery - fuzzy?: Record + fuzzy?: Partial> geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery geo_polygon?: QueryDslGeoPolygonQuery @@ -4700,24 +4694,24 @@ export interface QueryDslQueryContainer { has_child?: QueryDslHasChildQuery has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery - intervals?: Record - match?: Record + intervals?: Partial> + match?: Partial> match_all?: QueryDslMatchAllQuery - match_bool_prefix?: Record + match_bool_prefix?: Partial> match_none?: QueryDslMatchNoneQuery - match_phrase?: Record - match_phrase_prefix?: Record + match_phrase?: Partial> + match_phrase_prefix?: Partial> more_like_this?: QueryDslMoreLikeThisQuery multi_match?: QueryDslMultiMatchQuery nested?: QueryDslNestedQuery parent_id?: QueryDslParentIdQuery percolate?: QueryDslPercolateQuery pinned?: QueryDslPinnedQuery - prefix?: Record + prefix?: Partial> query_string?: QueryDslQueryStringQuery - range?: Record + range?: Partial> rank_feature?: QueryDslRankFeatureQuery - regexp?: Record + regexp?: Partial> script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery shape?: QueryDslShapeQuery @@ -4729,12 +4723,12 @@ export interface QueryDslQueryContainer { span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery span_or?: QueryDslSpanOrQuery - span_term?: Record + span_term?: Partial> span_within?: QueryDslSpanWithinQuery - term?: Record + term?: Partial> terms?: QueryDslTermsQuery - terms_set?: Record - wildcard?: Record + terms_set?: Partial> + wildcard?: Partial> type?: QueryDslTypeQuery } @@ -4880,7 +4874,7 @@ export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { match: QueryDslSpanQuery } -export type QueryDslSpanGapQuery = Record +export type QueryDslSpanGapQuery = Partial> export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { match: QueryDslQueryContainer @@ -4913,7 +4907,7 @@ export interface QueryDslSpanQuery { span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery span_or?: QueryDslSpanOrQuery - span_term?: Record + span_term?: Partial> span_within?: QueryDslSpanWithinQuery } @@ -4943,17 +4937,12 @@ export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { export type QueryDslTermsQuery = QueryDslTermsQueryKeys | { [property: string]: string[] | long[] | QueryDslTermsLookup } -export interface QueryDslTermsSetFieldQuery { +export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field minimum_should_match_script?: Script terms: string[] } -export interface QueryDslTermsSetQueryKeys extends QueryDslQueryBase { -} -export type QueryDslTermsSetQuery = QueryDslTermsSetQueryKeys | -{ [property: string]: QueryDslTermsSetFieldQuery } - export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' export interface QueryDslThreeDimensionalPoint { @@ -5036,58 +5025,68 @@ export interface AsyncSearchStatusResponse extends AsyncSea export interface AsyncSearchSubmitRequest extends RequestBase { index?: Indices - batched_reduce_size?: long wait_for_completion_timeout?: Time keep_on_completion?: boolean - typed_keys?: boolean - aggs?: Record + keep_alive?: Time allow_no_indices?: boolean allow_partial_search_results?: boolean analyzer?: string analyze_wildcard?: boolean - collapse?: SearchFieldCollapse + batched_reduce_size?: long + ccs_minimize_roundtrips?: boolean default_operator?: DefaultOperator df?: string docvalue_fields?: Fields expand_wildcards?: ExpandWildcards explain?: boolean - from?: integer - highlight?: SearchHighlight ignore_throttled?: boolean ignore_unavailable?: boolean - indices_boost?: Record[] - keep_alive?: Time lenient?: boolean max_concurrent_shard_requests?: long - min_score?: double - post_filter?: QueryDslQueryContainer + min_compatible_shard_node?: VersionString preference?: string - profile?: boolean - pit?: SearchPointInTimeReference - query?: QueryDslQueryContainer + pre_filter_shard_size?: long request_cache?: boolean - rescore?: SearchRescore[] routing?: Routing - script_fields?: Record - search_after?: SearchSortResults + scroll?: Time search_type?: SearchType - sequence_number_primary_term?: boolean - size?: integer - sort?: SearchSort - _source?: boolean | SearchSourceFilter stats?: string[] stored_fields?: Fields - suggest?: Record suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long suggest_text?: string terminate_after?: long - timeout?: string + timeout?: Time + track_total_hits?: boolean | integer track_scores?: boolean - track_total_hits?: boolean + typed_keys?: boolean + rest_total_hits_as_int?: boolean version?: boolean + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + seq_no_primary_term?: boolean + q?: string + size?: integer + from?: integer + sort?: string | string[] + aggs?: Record + aggregations?: Record + collapse?: SearchFieldCollapse + highlight?: SearchHighlight + indices_boost?: Record[] + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + query?: QueryDslQueryContainer + rescore?: SearchRescore | SearchRescore[] + script_fields?: Record + search_after?: SearchSortResults + slice?: SlicedScroll fields?: (Field | DateField)[] + suggest?: SearchSuggestContainer | Record + pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields } @@ -8831,7 +8830,7 @@ export interface IndicesGetFieldMappingResponse extends DictionaryResponseBase + mappings: Partial> } export interface IndicesGetIndexTemplateIndexTemplate { @@ -9525,14 +9524,42 @@ export interface IndicesUnfreezeResponse extends AcknowledgedResponseBase { shards_acknowledged: boolean } -export interface IndicesUpdateAliasesIndicesUpdateAliasBulk { - [key: string]: never +export interface IndicesUpdateAliasesAction { + add?: IndicesUpdateAliasesAddAction + remove?: IndicesUpdateAliasesRemoveAction + remove_index?: IndicesUpdateAliasesRemoveIndexAction +} + +export interface IndicesUpdateAliasesAddAction { + alias?: IndexAlias + aliases?: IndexAlias | IndexAlias[] + filter?: QueryDslQueryContainer + index?: IndexName + indices?: Indices + index_routing?: Routing + is_hidden?: boolean + is_write_index?: boolean + routing?: Routing + search_routing?: Routing +} + +export interface IndicesUpdateAliasesRemoveAction { + alias?: IndexAlias + aliases?: IndexAlias | IndexAlias[] + index?: IndexName + indices?: Indices + must_exist?: boolean +} + +export interface IndicesUpdateAliasesRemoveIndexAction { + index?: IndexName + indices?: Indices } export interface IndicesUpdateAliasesRequest extends RequestBase { master_timeout?: Time timeout?: Time - actions?: IndicesUpdateAliasesIndicesUpdateAliasBulk[] + actions?: IndicesUpdateAliasesAction[] } export interface IndicesUpdateAliasesResponse extends AcknowledgedResponseBase { @@ -11220,6 +11247,7 @@ export interface MlForecastRequest extends RequestBase { job_id: Id duration?: Time expires_in?: Time + max_model_memory?: string } export interface MlForecastResponse extends AcknowledgedResponseBase { @@ -15243,7 +15271,7 @@ export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: Record | Record + jobs: Record | Partial> node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -15491,6 +15519,10 @@ export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter _all: XpackUsageCounter } +export interface SpecUtilsAdditionalProperty { + [key: string]: never +} + export interface SpecUtilsAdditionalProperties { [key: string]: never } @@ -15503,10 +15535,6 @@ export interface SpecUtilsCommonQueryParameters { source_query_string?: string } -export interface SpecUtilsAdditionalProperty { - [key: string]: never -} - export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 2671958fd..a86224423 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -28,29 +28,21 @@ */ export type TODO = Record -export interface BulkCreateOperation extends BulkOperation { +export interface BulkCreateOperation extends BulkWriteOperation { } -export interface BulkCreateResponseItem extends BulkResponseItemBase { +export interface BulkDeleteOperation extends BulkOperationBase { } -export interface BulkDeleteOperation extends BulkOperation { +export interface BulkIndexOperation extends BulkWriteOperation { } -export interface BulkDeleteResponseItem extends BulkResponseItemBase { -} - -export interface BulkIndexOperation extends BulkOperation { -} - -export interface BulkIndexResponseItem extends BulkResponseItemBase { -} - -export interface BulkOperation { +export interface BulkOperationBase { _id?: Id _index?: IndexName - retry_on_conflict?: integer routing?: Routing + if_primary_term?: long + if_seq_no?: SequenceNumber version?: VersionNumber version_type?: VersionType } @@ -62,6 +54,8 @@ export interface BulkOperationContainer { delete?: BulkDeleteOperation } +export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' + export interface BulkRequest extends RequestBase { index?: IndexName type?: Type @@ -80,12 +74,12 @@ export interface BulkRequest extends RequestBase { export interface BulkResponse { errors: boolean - items: BulkResponseItemContainer[] + items: Partial>[] took: long ingest_took?: long } -export interface BulkResponseItemBase { +export interface BulkResponseItem { _id?: string | null _index: string status: integer @@ -100,17 +94,15 @@ export interface BulkResponseItemBase { get?: InlineGet> } -export interface BulkResponseItemContainer { - index?: BulkIndexResponseItem - create?: BulkCreateResponseItem - update?: BulkUpdateResponseItem - delete?: BulkDeleteResponseItem -} - -export interface BulkUpdateOperation extends BulkOperation { +export interface BulkUpdateOperation extends BulkOperationBase { + require_alias?: boolean + retry_on_conflict?: integer } -export interface BulkUpdateResponseItem extends BulkResponseItemBase { +export interface BulkWriteOperation extends BulkOperationBase { + dynamic_templates?: Record + pipeline?: string + require_alias?: boolean } export interface ClearScrollRequest extends RequestBase { @@ -617,7 +609,7 @@ export interface MsearchTemplateTemplateItem { export interface MtermvectorsOperation { _id: Id _index?: IndexName - doc?: object + doc?: any fields?: Fields field_statistics?: boolean filter?: TermvectorsFilter @@ -1748,7 +1740,7 @@ export interface UpdateRequest lang?: string refresh?: Refresh require_alias?: boolean - retry_on_conflict?: long + retry_on_conflict?: integer routing?: Routing timeout?: Time wait_for_active_shards?: WaitForActiveShards @@ -2076,14 +2068,16 @@ export interface IndicesResponseBase extends AcknowledgedResponseBase { _shards?: ShardStatistics } -export interface InlineGet { +export interface InlineGetKeys { fields?: Record found: boolean - _seq_no: SequenceNumber - _primary_term: long + _seq_no?: SequenceNumber + _primary_term?: long _routing?: Routing _source: TDocument } +export type InlineGet = InlineGetKeys | +{ [property: string]: any } export interface InlineScript extends ScriptBase { source: string @@ -2320,7 +2314,7 @@ export interface SegmentsStats { version_map_memory_in_bytes: integer } -export type SequenceNumber = integer +export type SequenceNumber = long export type Service = string @@ -4010,7 +4004,7 @@ export interface MappingFieldAliasProperty extends MappingPropertyBase { export interface MappingFieldMapping { full_name: string - mapping: Record + mapping: Partial> } export interface MappingFieldNamesField { @@ -4051,7 +4045,7 @@ export interface MappingGenericProperty extends MappingDocValuesPropertyBase { type: string } -export type MappingGeoOrientation = 'right' | 'left' +export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean @@ -4781,14 +4775,14 @@ export interface QueryDslQueryBase { export interface QueryDslQueryContainer { bool?: QueryDslBoolQuery boosting?: QueryDslBoostingQuery - common?: Record + common?: Partial> combined_fields?: QueryDslCombinedFieldsQuery constant_score?: QueryDslConstantScoreQuery dis_max?: QueryDslDisMaxQuery distance_feature?: QueryDslDistanceFeatureQuery exists?: QueryDslExistsQuery function_score?: QueryDslFunctionScoreQuery - fuzzy?: Record + fuzzy?: Partial> geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery geo_polygon?: QueryDslGeoPolygonQuery @@ -4796,24 +4790,24 @@ export interface QueryDslQueryContainer { has_child?: QueryDslHasChildQuery has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery - intervals?: Record - match?: Record + intervals?: Partial> + match?: Partial> match_all?: QueryDslMatchAllQuery - match_bool_prefix?: Record + match_bool_prefix?: Partial> match_none?: QueryDslMatchNoneQuery - match_phrase?: Record - match_phrase_prefix?: Record + match_phrase?: Partial> + match_phrase_prefix?: Partial> more_like_this?: QueryDslMoreLikeThisQuery multi_match?: QueryDslMultiMatchQuery nested?: QueryDslNestedQuery parent_id?: QueryDslParentIdQuery percolate?: QueryDslPercolateQuery pinned?: QueryDslPinnedQuery - prefix?: Record + prefix?: Partial> query_string?: QueryDslQueryStringQuery - range?: Record + range?: Partial> rank_feature?: QueryDslRankFeatureQuery - regexp?: Record + regexp?: Partial> script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery shape?: QueryDslShapeQuery @@ -4825,12 +4819,12 @@ export interface QueryDslQueryContainer { span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery span_or?: QueryDslSpanOrQuery - span_term?: Record + span_term?: Partial> span_within?: QueryDslSpanWithinQuery - term?: Record + term?: Partial> terms?: QueryDslTermsQuery - terms_set?: Record - wildcard?: Record + terms_set?: Partial> + wildcard?: Partial> type?: QueryDslTypeQuery } @@ -4976,7 +4970,7 @@ export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { match: QueryDslSpanQuery } -export type QueryDslSpanGapQuery = Record +export type QueryDslSpanGapQuery = Partial> export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { match: QueryDslQueryContainer @@ -5009,7 +5003,7 @@ export interface QueryDslSpanQuery { span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery span_or?: QueryDslSpanOrQuery - span_term?: Record + span_term?: Partial> span_within?: QueryDslSpanWithinQuery } @@ -5039,17 +5033,12 @@ export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { export type QueryDslTermsQuery = QueryDslTermsQueryKeys | { [property: string]: string[] | long[] | QueryDslTermsLookup } -export interface QueryDslTermsSetFieldQuery { +export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field minimum_should_match_script?: Script terms: string[] } -export interface QueryDslTermsSetQueryKeys extends QueryDslQueryBase { -} -export type QueryDslTermsSetQuery = QueryDslTermsSetQueryKeys | -{ [property: string]: QueryDslTermsSetFieldQuery } - export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' export interface QueryDslThreeDimensionalPoint { @@ -5132,61 +5121,85 @@ export interface AsyncSearchStatusResponse extends AsyncSea export interface AsyncSearchSubmitRequest extends RequestBase { index?: Indices - batched_reduce_size?: long wait_for_completion_timeout?: Time keep_on_completion?: boolean + keep_alive?: Time + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: long + ccs_minimize_roundtrips?: boolean + default_operator?: DefaultOperator + df?: string + docvalue_fields?: Fields + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: long + min_compatible_shard_node?: VersionString + preference?: string + pre_filter_shard_size?: long + request_cache?: boolean + routing?: Routing + scroll?: Time + search_type?: SearchType + stats?: string[] + stored_fields?: Fields + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + suggest_text?: string + terminate_after?: long + timeout?: Time + track_total_hits?: boolean | integer + track_scores?: boolean typed_keys?: boolean + rest_total_hits_as_int?: boolean + version?: boolean + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + seq_no_primary_term?: boolean + q?: string + size?: integer + from?: integer + sort?: string | string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggs?: Record - allow_no_indices?: boolean - allow_partial_search_results?: boolean - analyzer?: string - analyze_wildcard?: boolean + aggregations?: Record collapse?: SearchFieldCollapse - default_operator?: DefaultOperator - df?: string - docvalue_fields?: Fields - expand_wildcards?: ExpandWildcards explain?: boolean from?: integer highlight?: SearchHighlight - ignore_throttled?: boolean - ignore_unavailable?: boolean + track_total_hits?: boolean | integer indices_boost?: Record[] - keep_alive?: Time - lenient?: boolean - max_concurrent_shard_requests?: long + docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] min_score?: double post_filter?: QueryDslQueryContainer - preference?: string profile?: boolean - pit?: SearchPointInTimeReference query?: QueryDslQueryContainer - request_cache?: boolean - rescore?: SearchRescore[] - routing?: Routing + rescore?: SearchRescore | SearchRescore[] script_fields?: Record search_after?: SearchSortResults - search_type?: SearchType - sequence_number_primary_term?: boolean size?: integer + slice?: SlicedScroll sort?: SearchSort - _source?: boolean | SearchSourceFilter - stats?: string[] - stored_fields?: Fields - suggest?: Record - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string + _source?: boolean | Fields | SearchSourceFilter + fields?: (Field | DateField)[] + suggest?: SearchSuggestContainer | Record terminate_after?: long timeout?: string track_scores?: boolean - track_total_hits?: boolean version?: boolean - fields?: (Field | DateField)[] + seq_no_primary_term?: boolean + stored_fields?: Fields + pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields + stats?: string[] } } @@ -8983,7 +8996,7 @@ export interface IndicesGetFieldMappingResponse extends DictionaryResponseBase + mappings: Partial> } export interface IndicesGetIndexTemplateIndexTemplate { @@ -9704,8 +9717,36 @@ export interface IndicesUnfreezeResponse extends AcknowledgedResponseBase { shards_acknowledged: boolean } -export interface IndicesUpdateAliasesIndicesUpdateAliasBulk { - [key: string]: never +export interface IndicesUpdateAliasesAction { + add?: IndicesUpdateAliasesAddAction + remove?: IndicesUpdateAliasesRemoveAction + remove_index?: IndicesUpdateAliasesRemoveIndexAction +} + +export interface IndicesUpdateAliasesAddAction { + alias?: IndexAlias + aliases?: IndexAlias | IndexAlias[] + filter?: QueryDslQueryContainer + index?: IndexName + indices?: Indices + index_routing?: Routing + is_hidden?: boolean + is_write_index?: boolean + routing?: Routing + search_routing?: Routing +} + +export interface IndicesUpdateAliasesRemoveAction { + alias?: IndexAlias + aliases?: IndexAlias | IndexAlias[] + index?: IndexName + indices?: Indices + must_exist?: boolean +} + +export interface IndicesUpdateAliasesRemoveIndexAction { + index?: IndexName + indices?: Indices } export interface IndicesUpdateAliasesRequest extends RequestBase { @@ -9713,7 +9754,7 @@ export interface IndicesUpdateAliasesRequest extends RequestBase { timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - actions?: IndicesUpdateAliasesIndicesUpdateAliasBulk[] + actions?: IndicesUpdateAliasesAction[] } } @@ -11434,6 +11475,7 @@ export interface MlForecastRequest extends RequestBase { body?: { duration?: Time expires_in?: Time + max_model_memory?: string } } @@ -15631,7 +15673,7 @@ export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: Record | Record + jobs: Record | Partial> node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -15879,6 +15921,10 @@ export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter _all: XpackUsageCounter } +export interface SpecUtilsAdditionalProperty { + [key: string]: never +} + export interface SpecUtilsAdditionalProperties { [key: string]: never } @@ -15891,10 +15937,6 @@ export interface SpecUtilsCommonQueryParameters { source_query_string?: string } -export interface SpecUtilsAdditionalProperty { - [key: string]: never -} - export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names diff --git a/src/helpers.ts b/src/helpers.ts index 8c3e7c9c4..b945ac20c 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -821,6 +821,7 @@ export default class Helpers { for (let i = 0, len = items.length; i < len; i++) { const action = items[i] const operation = Object.keys(action)[0] + // @ts-expect-error const responseItem = action[operation as keyof T.BulkResponseItemContainer] assert(responseItem !== undefined, 'The responseItem is undefined, please file a bug report') const indexSlice = operation !== 'delete' ? i * 2 : i @@ -838,7 +839,6 @@ export default class Helpers { } else { onDrop({ status: responseItem.status, - // @ts-expect-error error: responseItem.error ?? null, operation: serializer.deserialize(bulkBody[indexSlice]), document: operation !== 'delete' diff --git a/test/integration/helper.js b/test/integration/helper.js index eb2021040..b8e965b89 100644 --- a/test/integration/helper.js +++ b/test/integration/helper.js @@ -88,6 +88,7 @@ function isXPackTemplate (name) { case '.deprecation-indexing-mappings': case '.deprecation-indexing-settings': case 'data-streams-mappings': + case '.logs-deprecation.elasticsearch-default': return true } return false diff --git a/test/integration/integration/test-runner.js b/test/integration/integration/test-runner.js index 824fb05fb..49807d548 100644 --- a/test/integration/integration/test-runner.js +++ b/test/integration/integration/test-runner.js @@ -103,7 +103,7 @@ function build (opts = {}) { if (isXPack) { // clean data streams - await client.indices.deleteDataStream({ name: '*' }) + await client.indices.deleteDataStream({ name: '*', expand_wildcards: 'all' }) } // clean all indices diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts new file mode 100644 index 000000000..95f352035 --- /dev/null +++ b/test/unit/api.test.ts @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { test } from 'tap' +import { connection } from '../utils' +import { Client } from '../..' + +test('Api without body key and top level body', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { query: { match_all: {} } }) + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.search({ + index: 'test', + allow_no_indices: true, + query: { match_all: {} } + }) + + t.equal(response.took, 42) +}) + +test('Api with body key and top level body', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { query: { match_all: {} } }) + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.search({ + index: 'test', + allow_no_indices: true, + body: { + query: { match_all: {} } + } + }) + + t.equal(response.took, 42) +}) + +test('Api without body key and keyed body', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { foo: 'bar' }) + return { + statusCode: 200, + body: { result: 'created' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.create({ + index: 'test', + id: '1', + document: { foo: 'bar' } + }) + + t.equal(response.result, 'created') +}) + +test('Api with body key and keyed body', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { foo: 'bar' }) + return { + statusCode: 200, + body: { result: 'created' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.create({ + index: 'test', + id: '1', + body: { foo: 'bar' } + }) + + t.equal(response.result, 'created') +}) diff --git a/test/unit/helpers/scroll.test.ts b/test/unit/helpers/scroll.test.ts index b7ab9f735..43885cb29 100644 --- a/test/unit/helpers/scroll.test.ts +++ b/test/unit/helpers/scroll.test.ts @@ -297,7 +297,7 @@ test('Scroll search documents', async t => { t.equal(params.querystring, 'filter_path=hits.hits._source%2C_scroll_id&scroll=1m') } else { if (params.method !== 'DELETE') { - t.equal(params.body, '{"scroll":"1m","scroll_id":"id"}') + t.equal(params.querystring, 'scroll=1m&scroll_id=id') } } return { @@ -381,7 +381,7 @@ test('Fix querystring for scroll search', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { if (count === 0) { - t.equal(params.querystring, 'scroll=1m') + t.equal(params.querystring, 'size=1&scroll=1m') } else { if (params.method !== 'DELETE') { if (params.method === 'POST') { From 23633a18021d2638d2a6dd8f71a4cc2dde7aee3e Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 18 Oct 2021 16:03:38 +0200 Subject: [PATCH 081/647] Bumped v8.0.0-canary.24 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b4a0c2eab..46ee4a390 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0", - "versionCanary": "8.0.0-canary.23", + "versionCanary": "8.0.0-canary.24", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From dc74a804025e4503badded5ddd20fede3b0042db Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 20 Oct 2021 08:08:07 +0200 Subject: [PATCH 082/647] API generation for v8 (#1571) --- src/api/api/ilm.ts | 14 ++-- src/api/api/snapshot.ts | 22 +++--- src/api/api/transform.ts | 24 ++++-- src/api/kibana.ts | 4 +- src/api/types.ts | 149 ++++++++++++++++++++++++++--------- src/api/typesWithBodyKey.ts | 153 +++++++++++++++++++++++++++--------- 6 files changed, 264 insertions(+), 102 deletions(-) diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 7ccac54f0..f0918962b 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -47,7 +47,7 @@ export default class Ilm { async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy'] + const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -61,7 +61,7 @@ export default class Ilm { } const method = 'DELETE' - const path = `/_ilm/policy/${encodeURIComponent(params.policy.toString())}` + const path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } @@ -91,7 +91,7 @@ export default class Ilm { async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy'] + const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -107,9 +107,9 @@ export default class Ilm { let method = '' let path = '' - if (params.policy != null) { + if (params.name != null) { method = 'GET' - path = `/_ilm/policy/${encodeURIComponent(params.policy.toString())}` + path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` } else { method = 'GET' path = '/_ilm/policy' @@ -194,7 +194,7 @@ export default class Ilm { async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['policy'] const querystring: Record = {} // @ts-expect-error @@ -214,7 +214,7 @@ export default class Ilm { } const method = 'PUT' - const path = `/_ilm/policy/${encodeURIComponent(params.policy.toString())}` + const path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index be6066590..9363932ff 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -47,7 +47,7 @@ export default class Snapshot { async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] + const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -61,7 +61,7 @@ export default class Snapshot { } const method = 'POST' - const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_cleanup` + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_cleanup` return await this.transport.request({ path, method, querystring, body }, options) } @@ -125,7 +125,7 @@ export default class Snapshot { async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['repository', 'type', 'settings'] const querystring: Record = {} // @ts-expect-error @@ -145,7 +145,7 @@ export default class Snapshot { } const method = 'PUT' - const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}` + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } @@ -175,7 +175,7 @@ export default class Snapshot { async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] + const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -189,7 +189,7 @@ export default class Snapshot { } const method = 'DELETE' - const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}` + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } @@ -219,7 +219,7 @@ export default class Snapshot { async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] + const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -235,9 +235,9 @@ export default class Snapshot { let method = '' let path = '' - if (params.repository != null) { + if (params.name != null) { method = 'GET' - path = `/_snapshot/${encodeURIComponent(params.repository.toString())}` + path = `/_snapshot/${encodeURIComponent(params.name.toString())}` } else { method = 'GET' path = '/_snapshot' @@ -332,7 +332,7 @@ export default class Snapshot { async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] + const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -346,7 +346,7 @@ export default class Snapshot { } const method = 'POST' - const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_verify` + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_verify` return await this.transport.request({ path, method, querystring, body }, options) } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 6a804f970..3e9778fea 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -157,12 +157,18 @@ export default class Transform { async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id', 'transform_id'] + const acceptedPath: string[] = ['transform_id'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error @@ -223,12 +229,18 @@ export default class Transform { async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id', 'transform_id'] + const acceptedPath: string[] = ['transform_id'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', 'source', 'settings', 'sync', 'retention_policy'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error diff --git a/src/api/kibana.ts b/src/api/kibana.ts index b358ceed3..745da0d25 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -164,7 +164,7 @@ interface KibanaClient { getScript: (params: T.GetScriptRequest| TB.GetScriptRequest, options?: TransportRequestOptions) => Promise> getScriptContext: (params?: T.GetScriptContextRequest| TB.GetScriptContextRequest, options?: TransportRequestOptions) => Promise> getScriptLanguages: (params?: T.GetScriptLanguagesRequest| TB.GetScriptLanguagesRequest, options?: TransportRequestOptions) => Promise> - getSource: (params?: T.GetSourceRequest| TB.GetSourceRequest, options?: TransportRequestOptions) => Promise, TContext>> + getSource: (params: T.GetSourceRequest| TB.GetSourceRequest, options?: TransportRequestOptions) => Promise, TContext>> graph: { explore: (params: T.GraphExploreRequest| TB.GraphExploreRequest, options?: TransportRequestOptions) => Promise> } @@ -488,7 +488,7 @@ interface KibanaClient { putTransform: (params: T.TransformPutTransformRequest| TB.TransformPutTransformRequest, options?: TransportRequestOptions) => Promise> startTransform: (params: T.TransformStartTransformRequest| TB.TransformStartTransformRequest, options?: TransportRequestOptions) => Promise> stopTransform: (params: T.TransformStopTransformRequest| TB.TransformStopTransformRequest, options?: TransportRequestOptions) => Promise> - updateTransform: (params?: T.TransformUpdateTransformRequest| TB.TransformUpdateTransformRequest, options?: TransportRequestOptions) => Promise> + updateTransform: (params: T.TransformUpdateTransformRequest| TB.TransformUpdateTransformRequest, options?: TransportRequestOptions) => Promise> upgradeTransforms: (params?: T.TODO, options?: TransportRequestOptions) => Promise> } update: (params: T.UpdateRequest| TB.UpdateRequest, options?: TransportRequestOptions) => Promise, TContext>> diff --git a/src/api/types.ts b/src/api/types.ts index c7791a450..2408dff62 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -428,7 +428,19 @@ export interface GetScriptLanguagesResponse { types_allowed: string[] } -export interface GetSourceRequest extends GetRequest { +export interface GetSourceRequest { + id: Id + index: IndexName + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + version?: VersionNumber + version_type?: VersionType } export type GetSourceResponse = TDocument @@ -803,6 +815,7 @@ export interface ReindexSource { slice?: SlicedScroll sort?: SearchSort _source?: Fields + runtime_mappings?: MappingRuntimeFields } export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { @@ -873,7 +886,7 @@ export interface ScriptsPainlessExecuteResponse { } export interface ScrollRequest extends RequestBase { - scroll_id?: Id + scroll_id?: ScrollId scroll?: Time rest_total_hits_as_int?: boolean } @@ -1847,7 +1860,6 @@ export interface ElasticsearchVersionInfo { } export interface EmptyObject { - [key: string]: never } export type EpochMillis = string | long @@ -1857,7 +1869,7 @@ export interface ErrorCauseKeys { reason: string stack_trace?: string caused_by?: ErrorCause | string - root_cause: (ErrorCause | string)[] + root_cause?: (ErrorCause | string)[] suppressed?: (ErrorCause | string)[] } export type ErrorCause = ErrorCauseKeys | @@ -2285,7 +2297,6 @@ export type TimeZone = string export type Timestamp = string export interface Transform { - [key: string]: never } export interface TransformContainer { @@ -2347,6 +2358,8 @@ export interface WriteResponseBase { forced_refresh?: boolean } +export type byte = number + export type double = number export type float = number @@ -2355,6 +2368,8 @@ export type integer = number export type long = number +export type short = number + export type uint = number export type ulong = number @@ -2964,7 +2979,6 @@ export interface AggregationsParentAggregation extends AggregationsBucketAggrega } export interface AggregationsPercentageScoreHeuristic { - [key: string]: never } export interface AggregationsPercentileItem { @@ -3825,6 +3839,11 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { type: 'boolean' } +export interface MappingByteNumberProperty extends MappingStandardNumberProperty { + type: 'byte' + null_value?: byte +} + export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { analyzer?: string contexts?: MappingSuggestContext[] @@ -3885,6 +3904,11 @@ export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } +export interface MappingDoubleNumberProperty extends MappingStandardNumberProperty { + type: 'double' + null_value?: double +} + export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { type: 'double_range' } @@ -3908,7 +3932,7 @@ export interface MappingFieldAliasProperty extends MappingPropertyBase { export interface MappingFieldMapping { full_name: string - mapping: Partial> + mapping: Partial> } export interface MappingFieldNamesField { @@ -3930,6 +3954,11 @@ export interface MappingFlattenedProperty extends MappingPropertyBase { type: 'flattened' } +export interface MappingFloatNumberProperty extends MappingStandardNumberProperty { + type: 'float' + null_value?: float +} + export interface MappingFloatRangeProperty extends MappingRangePropertyBase { type: 'float_range' } @@ -3969,6 +3998,11 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { export type MappingGeoStrategy = 'recursive' | 'term' +export interface MappingHalfFloatNumberProperty extends MappingStandardNumberProperty { + type: 'half_float' + null_value?: float +} + export interface MappingHistogramProperty extends MappingPropertyBase { ignore_malformed?: boolean type: 'histogram' @@ -3980,6 +4014,11 @@ export interface MappingIndexField { export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' +export interface MappingIntegerNumberProperty extends MappingStandardNumberProperty { + type: 'integer' + null_value?: integer +} + export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { type: 'integer_range' } @@ -4013,6 +4052,11 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { type: 'keyword' } +export interface MappingLongNumberProperty extends MappingStandardNumberProperty { + type: 'long' + null_value?: long +} + export interface MappingLongRangeProperty extends MappingRangePropertyBase { type: 'long_range' } @@ -4030,24 +4074,20 @@ export interface MappingNestedProperty extends MappingCorePropertyBase { type: 'nested' } -export interface MappingNumberProperty extends MappingDocValuesPropertyBase { - boost?: double - coerce?: boolean - fielddata?: IndicesNumericFielddata - ignore_malformed?: boolean +export type MappingNumberProperty = MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingDoubleNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingShortNumberProperty | MappingByteNumberProperty | MappingUnsignedLongNumberProperty | MappingScaledFloatNumberProperty + +export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { index?: boolean - null_value?: double - scaling_factor?: double - type: MappingNumberType + ignore_malformed?: boolean } -export type MappingNumberType = 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer' | 'long' | 'short' | 'byte' | 'unsigned_long' - export interface MappingObjectProperty extends MappingCorePropertyBase { enabled?: boolean type?: 'object' } +export type MappingOnScriptError = 'fail' | 'continue' + export interface MappingPercolatorProperty extends MappingPropertyBase { type: 'percolator' } @@ -4102,6 +4142,13 @@ export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point export type MappingRuntimeFields = Record +export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { + type: 'scaled_float' + coerce?: boolean + null_value?: double + scaling_factor?: double +} + export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { analyzer?: string index?: boolean @@ -4122,6 +4169,11 @@ export interface MappingShapeProperty extends MappingDocValuesPropertyBase { type: 'shape' } +export interface MappingShortNumberProperty extends MappingStandardNumberProperty { + type: 'short' + null_value?: short +} + export interface MappingSizeField { enabled: boolean } @@ -4134,6 +4186,12 @@ export interface MappingSourceField { includes?: string[] } +export interface MappingStandardNumberProperty extends MappingNumberPropertyBase { + coerce?: boolean + script?: Script + on_script_error?: MappingOnScriptError +} + export interface MappingSuggestContext { name: Name path?: Field @@ -4193,6 +4251,11 @@ export interface MappingTypeMapping { enabled?: boolean } +export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { + type: 'unsigned_long' + null_value?: ulong +} + export interface MappingVersionProperty extends MappingDocValuesPropertyBase { type: 'version' } @@ -4774,11 +4837,9 @@ export interface QueryDslRangeQueryBase extends QueryDslQueryBase { export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' export interface QueryDslRankFeatureFunction { - [key: string]: never } export interface QueryDslRankFeatureFunctionLinear { - [key: string]: never } export interface QueryDslRankFeatureFunctionLogarithm { @@ -7370,6 +7431,7 @@ export interface ClusterHealthShardHealthStats { } export interface ClusterPendingTasksPendingTask { + executing: boolean insert_order: integer priority: string source: string @@ -8053,7 +8115,6 @@ export interface GraphExploreResponse { } export interface IlmAction { - [key: string]: never } export interface IlmPhase { @@ -8075,7 +8136,7 @@ export interface IlmPolicy { } export interface IlmDeleteLifecycleRequest extends RequestBase { - policy: Name + name: Name } export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { @@ -8130,7 +8191,7 @@ export interface IlmGetLifecycleLifecycle { } export interface IlmGetLifecycleRequest extends RequestBase { - policy?: Name + name?: Name } export interface IlmGetLifecycleResponse extends DictionaryResponseBase { @@ -8159,7 +8220,8 @@ export interface IlmMoveToStepStepKey { } export interface IlmPutLifecycleRequest extends RequestBase { - policy: Name + name: Name + policy?: IlmPolicy } export interface IlmPutLifecycleResponse extends AcknowledgedResponseBase { @@ -12101,9 +12163,6 @@ export interface NodesDataPathStats { export interface NodesExtendedMemoryStats extends NodesMemoryStats { free_percent: integer used_percent: integer - total_in_bytes: integer - free_in_bytes: integer - used_in_bytes: integer } export interface NodesFileSystem { @@ -13865,7 +13924,7 @@ export interface SnapshotCleanupRepositoryCleanupRepositoryResults { } export interface SnapshotCleanupRepositoryRequest extends RequestBase { - repository: Name + name: Name master_timeout?: Time timeout?: Time } @@ -13905,10 +13964,11 @@ export interface SnapshotCreateResponse { } export interface SnapshotCreateRepositoryRequest extends RequestBase { - repository: Name + name: Name master_timeout?: Time timeout?: Time verify?: boolean + repository?: SnapshotRepository type: string settings: SnapshotRepositorySettings } @@ -13926,7 +13986,7 @@ export interface SnapshotDeleteResponse extends AcknowledgedResponseBase { } export interface SnapshotDeleteRepositoryRequest extends RequestBase { - repository: Names + name: Names master_timeout?: Time timeout?: Time } @@ -13959,7 +14019,7 @@ export interface SnapshotGetSnapshotResponseItem { } export interface SnapshotGetRepositoryRequest extends RequestBase { - repository?: Names + name?: Names local?: boolean master_timeout?: Time } @@ -14009,7 +14069,7 @@ export interface SnapshotVerifyRepositoryCompactNodeInfo { } export interface SnapshotVerifyRepositoryRequest extends RequestBase { - repository: Name + name: Name master_timeout?: Time timeout?: Time } @@ -14382,9 +14442,18 @@ export interface TransformPreviewTransformResponse { preview: TTransform[] } -export interface TransformPutTransformRequest extends TransformPreviewTransformRequest { +export interface TransformPutTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean + dest: ReindexDestination + description?: string + frequency?: Time + pivot?: TransformPivot + source: ReindexSource + settings?: TransformSettings + sync?: TransformSyncContainer + retention_policy?: TransformRetentionPolicyContainer + latest?: TransformLatest } export interface TransformPutTransformResponse extends AcknowledgedResponseBase { @@ -14410,7 +14479,16 @@ export interface TransformStopTransformRequest extends RequestBase { export interface TransformStopTransformResponse extends AcknowledgedResponseBase { } -export interface TransformUpdateTransformRequest extends TransformPutTransformRequest { +export interface TransformUpdateTransformRequest extends RequestBase { + transform_id: Id + defer_validation?: boolean + dest?: ReindexDestination + description?: string + frequency?: Time + source?: ReindexSource + settings?: TransformSettings + sync?: TransformSyncContainer + retention_policy?: TransformRetentionPolicyContainer } export interface TransformUpdateTransformResponse { @@ -14479,7 +14557,6 @@ export interface WatcherActivationStatus { } export interface WatcherAlwaysCondition { - [key: string]: never } export interface WatcherArrayCompareCondition { @@ -14683,7 +14760,6 @@ export interface WatcherLoggingResult { export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' export interface WatcherNeverCondition { - [key: string]: never } export interface WatcherPagerDutyActionEventResult { @@ -15520,11 +15596,9 @@ export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter } export interface SpecUtilsAdditionalProperty { - [key: string]: never } export interface SpecUtilsAdditionalProperties { - [key: string]: never } export interface SpecUtilsCommonQueryParameters { @@ -15546,5 +15620,4 @@ export interface SpecUtilsCommonCatQueryParameters { } export interface SpecUtilsOverloadOf { - [key: string]: never } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index a86224423..d429e624f 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -450,7 +450,19 @@ export interface GetScriptLanguagesResponse { types_allowed: string[] } -export interface GetSourceRequest extends GetRequest { +export interface GetSourceRequest { + id: Id + index: IndexName + preference?: string + realtime?: boolean + refresh?: boolean + routing?: Routing + _source?: boolean | Fields + _source_excludes?: Fields + _source_includes?: Fields + stored_fields?: Fields + version?: VersionNumber + version_type?: VersionType } export type GetSourceResponse = TDocument @@ -844,6 +856,7 @@ export interface ReindexSource { slice?: SlicedScroll sort?: SearchSort _source?: Fields + runtime_mappings?: MappingRuntimeFields } export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { @@ -920,7 +933,7 @@ export interface ScriptsPainlessExecuteResponse { } export interface ScrollRequest extends RequestBase { - scroll_id?: Id + scroll_id?: ScrollId scroll?: Time rest_total_hits_as_int?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -1943,7 +1956,6 @@ export interface ElasticsearchVersionInfo { } export interface EmptyObject { - [key: string]: never } export type EpochMillis = string | long @@ -1953,7 +1965,7 @@ export interface ErrorCauseKeys { reason: string stack_trace?: string caused_by?: ErrorCause | string - root_cause: (ErrorCause | string)[] + root_cause?: (ErrorCause | string)[] suppressed?: (ErrorCause | string)[] } export type ErrorCause = ErrorCauseKeys | @@ -2381,7 +2393,6 @@ export type TimeZone = string export type Timestamp = string export interface Transform { - [key: string]: never } export interface TransformContainer { @@ -2443,6 +2454,8 @@ export interface WriteResponseBase { forced_refresh?: boolean } +export type byte = number + export type double = number export type float = number @@ -2451,6 +2464,8 @@ export type integer = number export type long = number +export type short = number + export type uint = number export type ulong = number @@ -3060,7 +3075,6 @@ export interface AggregationsParentAggregation extends AggregationsBucketAggrega } export interface AggregationsPercentageScoreHeuristic { - [key: string]: never } export interface AggregationsPercentileItem { @@ -3921,6 +3935,11 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { type: 'boolean' } +export interface MappingByteNumberProperty extends MappingStandardNumberProperty { + type: 'byte' + null_value?: byte +} + export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { analyzer?: string contexts?: MappingSuggestContext[] @@ -3981,6 +4000,11 @@ export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } +export interface MappingDoubleNumberProperty extends MappingStandardNumberProperty { + type: 'double' + null_value?: double +} + export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { type: 'double_range' } @@ -4004,7 +4028,7 @@ export interface MappingFieldAliasProperty extends MappingPropertyBase { export interface MappingFieldMapping { full_name: string - mapping: Partial> + mapping: Partial> } export interface MappingFieldNamesField { @@ -4026,6 +4050,11 @@ export interface MappingFlattenedProperty extends MappingPropertyBase { type: 'flattened' } +export interface MappingFloatNumberProperty extends MappingStandardNumberProperty { + type: 'float' + null_value?: float +} + export interface MappingFloatRangeProperty extends MappingRangePropertyBase { type: 'float_range' } @@ -4065,6 +4094,11 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { export type MappingGeoStrategy = 'recursive' | 'term' +export interface MappingHalfFloatNumberProperty extends MappingStandardNumberProperty { + type: 'half_float' + null_value?: float +} + export interface MappingHistogramProperty extends MappingPropertyBase { ignore_malformed?: boolean type: 'histogram' @@ -4076,6 +4110,11 @@ export interface MappingIndexField { export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' +export interface MappingIntegerNumberProperty extends MappingStandardNumberProperty { + type: 'integer' + null_value?: integer +} + export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { type: 'integer_range' } @@ -4109,6 +4148,11 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { type: 'keyword' } +export interface MappingLongNumberProperty extends MappingStandardNumberProperty { + type: 'long' + null_value?: long +} + export interface MappingLongRangeProperty extends MappingRangePropertyBase { type: 'long_range' } @@ -4126,24 +4170,20 @@ export interface MappingNestedProperty extends MappingCorePropertyBase { type: 'nested' } -export interface MappingNumberProperty extends MappingDocValuesPropertyBase { - boost?: double - coerce?: boolean - fielddata?: IndicesNumericFielddata - ignore_malformed?: boolean +export type MappingNumberProperty = MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingDoubleNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingShortNumberProperty | MappingByteNumberProperty | MappingUnsignedLongNumberProperty | MappingScaledFloatNumberProperty + +export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { index?: boolean - null_value?: double - scaling_factor?: double - type: MappingNumberType + ignore_malformed?: boolean } -export type MappingNumberType = 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer' | 'long' | 'short' | 'byte' | 'unsigned_long' - export interface MappingObjectProperty extends MappingCorePropertyBase { enabled?: boolean type?: 'object' } +export type MappingOnScriptError = 'fail' | 'continue' + export interface MappingPercolatorProperty extends MappingPropertyBase { type: 'percolator' } @@ -4198,6 +4238,13 @@ export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point export type MappingRuntimeFields = Record +export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { + type: 'scaled_float' + coerce?: boolean + null_value?: double + scaling_factor?: double +} + export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { analyzer?: string index?: boolean @@ -4218,6 +4265,11 @@ export interface MappingShapeProperty extends MappingDocValuesPropertyBase { type: 'shape' } +export interface MappingShortNumberProperty extends MappingStandardNumberProperty { + type: 'short' + null_value?: short +} + export interface MappingSizeField { enabled: boolean } @@ -4230,6 +4282,12 @@ export interface MappingSourceField { includes?: string[] } +export interface MappingStandardNumberProperty extends MappingNumberPropertyBase { + coerce?: boolean + script?: Script + on_script_error?: MappingOnScriptError +} + export interface MappingSuggestContext { name: Name path?: Field @@ -4289,6 +4347,11 @@ export interface MappingTypeMapping { enabled?: boolean } +export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { + type: 'unsigned_long' + null_value?: ulong +} + export interface MappingVersionProperty extends MappingDocValuesPropertyBase { type: 'version' } @@ -4870,11 +4933,9 @@ export interface QueryDslRangeQueryBase extends QueryDslQueryBase { export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' export interface QueryDslRankFeatureFunction { - [key: string]: never } export interface QueryDslRankFeatureFunctionLinear { - [key: string]: never } export interface QueryDslRankFeatureFunctionLogarithm { @@ -7499,6 +7560,7 @@ export interface ClusterHealthShardHealthStats { } export interface ClusterPendingTasksPendingTask { + executing: boolean insert_order: integer priority: string source: string @@ -8203,7 +8265,6 @@ export interface GraphExploreResponse { } export interface IlmAction { - [key: string]: never } export interface IlmPhase { @@ -8225,7 +8286,7 @@ export interface IlmPolicy { } export interface IlmDeleteLifecycleRequest extends RequestBase { - policy: Name + name: Name } export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { @@ -8280,7 +8341,7 @@ export interface IlmGetLifecycleLifecycle { } export interface IlmGetLifecycleRequest extends RequestBase { - policy?: Name + name?: Name } export interface IlmGetLifecycleResponse extends DictionaryResponseBase { @@ -8312,7 +8373,7 @@ export interface IlmMoveToStepStepKey { } export interface IlmPutLifecycleRequest extends RequestBase { - policy: Name + name: Name /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { policy?: IlmPolicy @@ -12420,9 +12481,6 @@ export interface NodesDataPathStats { export interface NodesExtendedMemoryStats extends NodesMemoryStats { free_percent: integer used_percent: integer - total_in_bytes: integer - free_in_bytes: integer - used_in_bytes: integer } export interface NodesFileSystem { @@ -14232,7 +14290,7 @@ export interface SnapshotCleanupRepositoryCleanupRepositoryResults { } export interface SnapshotCleanupRepositoryRequest extends RequestBase { - repository: Name + name: Name master_timeout?: Time timeout?: Time } @@ -14278,7 +14336,7 @@ export interface SnapshotCreateResponse { } export interface SnapshotCreateRepositoryRequest extends RequestBase { - repository: Name + name: Name master_timeout?: Time timeout?: Time verify?: boolean @@ -14303,7 +14361,7 @@ export interface SnapshotDeleteResponse extends AcknowledgedResponseBase { } export interface SnapshotDeleteRepositoryRequest extends RequestBase { - repository: Names + name: Names master_timeout?: Time timeout?: Time } @@ -14336,7 +14394,7 @@ export interface SnapshotGetSnapshotResponseItem { } export interface SnapshotGetRepositoryRequest extends RequestBase { - repository?: Names + name?: Names local?: boolean master_timeout?: Time } @@ -14389,7 +14447,7 @@ export interface SnapshotVerifyRepositoryCompactNodeInfo { } export interface SnapshotVerifyRepositoryRequest extends RequestBase { - repository: Name + name: Name master_timeout?: Time timeout?: Time } @@ -14775,9 +14833,21 @@ export interface TransformPreviewTransformResponse { preview: TTransform[] } -export interface TransformPutTransformRequest extends TransformPreviewTransformRequest { +export interface TransformPutTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + dest: ReindexDestination + description?: string + frequency?: Time + pivot?: TransformPivot + source: ReindexSource + settings?: TransformSettings + sync?: TransformSyncContainer + retention_policy?: TransformRetentionPolicyContainer + latest?: TransformLatest + } } export interface TransformPutTransformResponse extends AcknowledgedResponseBase { @@ -14803,7 +14873,19 @@ export interface TransformStopTransformRequest extends RequestBase { export interface TransformStopTransformResponse extends AcknowledgedResponseBase { } -export interface TransformUpdateTransformRequest extends TransformPutTransformRequest { +export interface TransformUpdateTransformRequest extends RequestBase { + transform_id: Id + defer_validation?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + dest?: ReindexDestination + description?: string + frequency?: Time + source?: ReindexSource + settings?: TransformSettings + sync?: TransformSyncContainer + retention_policy?: TransformRetentionPolicyContainer + } } export interface TransformUpdateTransformResponse { @@ -14872,7 +14954,6 @@ export interface WatcherActivationStatus { } export interface WatcherAlwaysCondition { - [key: string]: never } export interface WatcherArrayCompareCondition { @@ -15076,7 +15157,6 @@ export interface WatcherLoggingResult { export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' export interface WatcherNeverCondition { - [key: string]: never } export interface WatcherPagerDutyActionEventResult { @@ -15922,11 +16002,9 @@ export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter } export interface SpecUtilsAdditionalProperty { - [key: string]: never } export interface SpecUtilsAdditionalProperties { - [key: string]: never } export interface SpecUtilsCommonQueryParameters { @@ -15948,5 +16026,4 @@ export interface SpecUtilsCommonCatQueryParameters { } export interface SpecUtilsOverloadOf { - [key: string]: never } From 0e35840e1becee657b3a13531835e31b705cb6b6 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 20 Oct 2021 08:09:16 +0200 Subject: [PATCH 083/647] Bumped v8.0.0-canary.25 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 46ee4a390..783028dd8 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0", - "versionCanary": "8.0.0-canary.24", + "versionCanary": "8.0.0-canary.25", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 875f969ef84f99a704eac320c8b8213a68211a17 Mon Sep 17 00:00:00 2001 From: Mikhail Shustov Date: Wed, 20 Oct 2021 09:00:30 +0200 Subject: [PATCH 084/647] enable type check for d.ts files (#1569) --- src/client.ts | 2 +- tsconfig.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client.ts b/src/client.ts index a22843c9e..42e78dbc7 100644 --- a/src/client.ts +++ b/src/client.ts @@ -63,7 +63,7 @@ if (transportVersion.includes('-')) { } const nodeVersion = process.versions.node -interface NodeOptions { +export interface NodeOptions { url: URL id?: string agent?: HttpAgentOptions | UndiciAgentOptions diff --git a/tsconfig.json b/tsconfig.json index 26f91feff..e93828bd8 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -15,7 +15,7 @@ "noFallthroughCasesInSwitch": true, "useDefineForClassFields": true, "forceConsistentCasingInFileNames": true, - "skipLibCheck": true, + "skipLibCheck": false, "esModuleInterop": true, "isolatedModules": true, "importHelpers": true, From 898fac2ec1686970361833d577fe9d95ff269bc4 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 20 Oct 2021 09:13:34 +0200 Subject: [PATCH 085/647] Bumped v8.0.0-alpha.1 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 783028dd8..1657a56eb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "8.0.0", + "version": "8.0.0-alpha.1", "versionCanary": "8.0.0-canary.25", "description": "The official Elasticsearch client for Node.js", "main": "index.js", From 1e1ac6a203dc5f99fc8e2aa6825396949b702fbd Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 20 Oct 2021 17:32:28 +0200 Subject: [PATCH 086/647] API generation --- src/api/api/fleet.ts | 51 +++++++++++++++++++++++++++++++ src/api/api/indices.ts | 22 ++++++++++++++ src/api/api/knn_search.ts | 60 +++++++++++++++++++++++++++++++++++++ src/api/index.ts | 3 ++ src/api/kibana.ts | 4 +++ src/api/types.ts | 40 ++++++++++++------------- src/api/typesWithBodyKey.ts | 40 ++++++++++++------------- 7 files changed, 180 insertions(+), 40 deletions(-) create mode 100644 src/api/api/knn_search.ts diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index fbf076cce..c50913d9a 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -64,4 +64,55 @@ export default class Fleet { const path = `/${encodeURIComponent(params.index.toString())}/_fleet/global_checkpoints` return await this.transport.request({ path, method, querystring, body }, options) } + + async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_fleet/_msearch` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_fleet/_msearch' + } + return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + } + + async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_fleet/_search` + return await this.transport.request({ path, method, querystring, body }, options) + } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 52fd341f6..5fefa4501 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -864,6 +864,28 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_data_stream/_modify' + return await this.transport.request({ path, method, querystring, body }, options) + } + async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts new file mode 100644 index 000000000..4f6ffff5f --- /dev/null +++ b/src/api/api/knn_search.ts @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_knn_search` + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/index.ts b/src/api/index.ts index 6bb6b461d..c4f253984 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -60,6 +60,7 @@ import indexApi from './api/index' import IndicesApi from './api/indices' import infoApi from './api/info' import IngestApi from './api/ingest' +import knnSearchApi from './api/knn_search' import LicenseApi from './api/license' import LogstashApi from './api/logstash' import mgetApi from './api/mget' @@ -138,6 +139,7 @@ export default interface API { indices: IndicesApi info: typeof infoApi ingest: IngestApi + knnSearch: typeof knnSearchApi license: LicenseApi logstash: LogstashApi mget: typeof mgetApi @@ -306,6 +308,7 @@ API.prototype.getScriptLanguages = getScriptLanguagesApi API.prototype.getSource = getSourceApi API.prototype.index = indexApi API.prototype.info = infoApi +API.prototype.knnSearch = knnSearchApi API.prototype.mget = mgetApi API.prototype.msearch = msearchApi API.prototype.msearchTemplate = msearchTemplateApi diff --git a/src/api/kibana.ts b/src/api/kibana.ts index 745da0d25..7d00e1f2d 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -159,6 +159,8 @@ interface KibanaClient { fieldCaps: (params?: T.FieldCapsRequest| TB.FieldCapsRequest, options?: TransportRequestOptions) => Promise> fleet: { globalCheckpoints: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + msearch: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + search: (params?: T.TODO, options?: TransportRequestOptions) => Promise> } get: (params: T.GetRequest| TB.GetRequest, options?: TransportRequestOptions) => Promise, TContext>> getScript: (params: T.GetScriptRequest| TB.GetScriptRequest, options?: TransportRequestOptions) => Promise> @@ -215,6 +217,7 @@ interface KibanaClient { getSettings: (params?: T.IndicesGetSettingsRequest| TB.IndicesGetSettingsRequest, options?: TransportRequestOptions) => Promise> getTemplate: (params?: T.IndicesGetTemplateRequest| TB.IndicesGetTemplateRequest, options?: TransportRequestOptions) => Promise> migrateToDataStream: (params: T.IndicesMigrateToDataStreamRequest| TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions) => Promise> + modifyDataStream: (params?: T.TODO, options?: TransportRequestOptions) => Promise> open: (params: T.IndicesOpenRequest| TB.IndicesOpenRequest, options?: TransportRequestOptions) => Promise> promoteDataStream: (params: T.IndicesPromoteDataStreamRequest| TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions) => Promise> putAlias: (params: T.IndicesPutAliasRequest| TB.IndicesPutAliasRequest, options?: TransportRequestOptions) => Promise> @@ -247,6 +250,7 @@ interface KibanaClient { putPipeline: (params: T.IngestPutPipelineRequest| TB.IngestPutPipelineRequest, options?: TransportRequestOptions) => Promise> simulate: (params?: T.IngestSimulateRequest| TB.IngestSimulateRequest, options?: TransportRequestOptions) => Promise> } + knnSearch: (params?: T.TODO, options?: TransportRequestOptions) => Promise> license: { delete: (params?: T.LicenseDeleteRequest| TB.LicenseDeleteRequest, options?: TransportRequestOptions) => Promise> get: (params?: T.LicenseGetRequest| TB.LicenseGetRequest, options?: TransportRequestOptions) => Promise> diff --git a/src/api/types.ts b/src/api/types.ts index 2408dff62..b21a28f37 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -82,7 +82,7 @@ export interface BulkResponseItem { _id?: string | null _index: string status: integer - error?: ErrorCause | string + error?: ErrorCause _primary_term?: long result?: string _seq_no?: SequenceNumber @@ -477,7 +477,7 @@ export interface InfoResponse { } export interface MgetHit { - error?: ErrorCause | string + error?: ErrorCause fields?: Record found?: boolean _id: Id @@ -634,7 +634,7 @@ export interface MtermvectorsTermVectorsResult { took?: long found?: boolean term_vectors?: Record - error?: ErrorCause | string + error?: ErrorCause } export interface OpenPointInTimeRequest extends RequestBase { @@ -1771,7 +1771,7 @@ export interface AcknowledgedResponseBase { export type AggregateName = string export interface BulkIndexByScrollFailure { - cause: ErrorCause | string + cause: ErrorCause id: Id index: IndexName status: integer @@ -1868,15 +1868,15 @@ export interface ErrorCauseKeys { type?: string reason: string stack_trace?: string - caused_by?: ErrorCause | string - root_cause?: (ErrorCause | string)[] - suppressed?: (ErrorCause | string)[] + caused_by?: ErrorCause + root_cause?: ErrorCause[] + suppressed?: ErrorCause[] } export type ErrorCause = ErrorCauseKeys | { [property: string]: any } export interface ErrorResponseBase { - error: ErrorCause | string + error: ErrorCause status: integer } @@ -2076,7 +2076,7 @@ export interface NodeShard { } export interface NodeStatistics { - failures?: (ErrorCause | string)[] + failures?: ErrorCause[] total: integer successful: integer failed: integer @@ -2239,7 +2239,7 @@ export type ShapeRelation = 'intersects' | 'disjoint' | 'within' export interface ShardFailure { index?: IndexName node?: string - reason: ErrorCause | string + reason: ErrorCause shard: integer status?: string } @@ -6957,7 +6957,7 @@ export interface CcrFollowIndexStats { } export interface CcrReadException { - exception: ErrorCause | string + exception: ErrorCause from_seq_no: SequenceNumber retries: integer } @@ -6966,7 +6966,7 @@ export interface CcrShardStats { bytes_read: long failed_read_requests: long failed_write_requests: long - fatal_exception?: ErrorCause | string + fatal_exception?: ErrorCause follower_aliases_version: VersionNumber follower_global_checkpoint: long follower_index: string @@ -7162,7 +7162,7 @@ export interface CcrStatsAutoFollowStats { number_of_failed_follow_indices: long number_of_failed_remote_cluster_state_requests: long number_of_successful_follow_indices: long - recent_auto_follow_errors: (ErrorCause | string)[] + recent_auto_follow_errors: ErrorCause[] } export interface CcrStatsAutoFollowedCluster { @@ -12105,7 +12105,7 @@ export interface MonitoringBulkRequest extends RequestBase { } export interface MonitoringBulkResponse { - error?: ErrorCause | string + error?: ErrorCause errors: boolean ignored: boolean took: long @@ -13530,7 +13530,7 @@ export interface SecurityInvalidateApiKeyRequest extends RequestBase { export interface SecurityInvalidateApiKeyResponse { error_count: integer - error_details?: (ErrorCause | string)[] + error_details?: ErrorCause[] invalidated_api_keys: string[] previously_invalidated_api_keys: string[] } @@ -13544,7 +13544,7 @@ export interface SecurityInvalidateTokenRequest extends RequestBase { export interface SecurityInvalidateTokenResponse { error_count: long - error_details?: (ErrorCause | string)[] + error_details?: ErrorCause[] invalidated_tokens: long previously_invalidated_tokens: long } @@ -14015,7 +14015,7 @@ export interface SnapshotGetResponse { export interface SnapshotGetSnapshotResponseItem { repository: Name snapshots?: SnapshotSnapshotInfo[] - error?: ErrorCause | string + error?: ErrorCause } export interface SnapshotGetRepositoryRequest extends RequestBase { @@ -14203,7 +14203,7 @@ export interface TasksCancelRequest extends RequestBase { } export interface TasksCancelResponse { - node_failures?: (ErrorCause | string)[] + node_failures?: ErrorCause[] nodes: Record } @@ -14217,7 +14217,7 @@ export interface TasksGetResponse { completed: boolean task: TasksInfo response?: TasksStatus - error?: ErrorCause | string + error?: ErrorCause } export interface TasksListRequest extends RequestBase { @@ -14231,7 +14231,7 @@ export interface TasksListRequest extends RequestBase { } export interface TasksListResponse { - node_failures?: (ErrorCause | string)[] + node_failures?: ErrorCause[] nodes?: Record tasks?: Record | TasksInfo[] } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index d429e624f..519bd8245 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -83,7 +83,7 @@ export interface BulkResponseItem { _id?: string | null _index: string status: integer - error?: ErrorCause | string + error?: ErrorCause _primary_term?: long result?: string _seq_no?: SequenceNumber @@ -500,7 +500,7 @@ export interface InfoResponse { } export interface MgetHit { - error?: ErrorCause | string + error?: ErrorCause fields?: Record found?: boolean _id: Id @@ -666,7 +666,7 @@ export interface MtermvectorsTermVectorsResult { took?: long found?: boolean term_vectors?: Record - error?: ErrorCause | string + error?: ErrorCause } export interface OpenPointInTimeRequest extends RequestBase { @@ -1867,7 +1867,7 @@ export interface AcknowledgedResponseBase { export type AggregateName = string export interface BulkIndexByScrollFailure { - cause: ErrorCause | string + cause: ErrorCause id: Id index: IndexName status: integer @@ -1964,15 +1964,15 @@ export interface ErrorCauseKeys { type?: string reason: string stack_trace?: string - caused_by?: ErrorCause | string - root_cause?: (ErrorCause | string)[] - suppressed?: (ErrorCause | string)[] + caused_by?: ErrorCause + root_cause?: ErrorCause[] + suppressed?: ErrorCause[] } export type ErrorCause = ErrorCauseKeys | { [property: string]: any } export interface ErrorResponseBase { - error: ErrorCause | string + error: ErrorCause status: integer } @@ -2172,7 +2172,7 @@ export interface NodeShard { } export interface NodeStatistics { - failures?: (ErrorCause | string)[] + failures?: ErrorCause[] total: integer successful: integer failed: integer @@ -2335,7 +2335,7 @@ export type ShapeRelation = 'intersects' | 'disjoint' | 'within' export interface ShardFailure { index?: IndexName node?: string - reason: ErrorCause | string + reason: ErrorCause shard: integer status?: string } @@ -7071,7 +7071,7 @@ export interface CcrFollowIndexStats { } export interface CcrReadException { - exception: ErrorCause | string + exception: ErrorCause from_seq_no: SequenceNumber retries: integer } @@ -7080,7 +7080,7 @@ export interface CcrShardStats { bytes_read: long failed_read_requests: long failed_write_requests: long - fatal_exception?: ErrorCause | string + fatal_exception?: ErrorCause follower_aliases_version: VersionNumber follower_global_checkpoint: long follower_index: string @@ -7288,7 +7288,7 @@ export interface CcrStatsAutoFollowStats { number_of_failed_follow_indices: long number_of_failed_remote_cluster_state_requests: long number_of_successful_follow_indices: long - recent_auto_follow_errors: (ErrorCause | string)[] + recent_auto_follow_errors: ErrorCause[] } export interface CcrStatsAutoFollowedCluster { @@ -12423,7 +12423,7 @@ export interface MonitoringBulkRequest extends RequestBase { } export interface MonitoringBulkResponse { - error?: ErrorCause | string + error?: ErrorCause errors: boolean ignored: boolean took: long @@ -13879,7 +13879,7 @@ export interface SecurityInvalidateApiKeyRequest extends RequestBase { export interface SecurityInvalidateApiKeyResponse { error_count: integer - error_details?: (ErrorCause | string)[] + error_details?: ErrorCause[] invalidated_api_keys: string[] previously_invalidated_api_keys: string[] } @@ -13896,7 +13896,7 @@ export interface SecurityInvalidateTokenRequest extends RequestBase { export interface SecurityInvalidateTokenResponse { error_count: long - error_details?: (ErrorCause | string)[] + error_details?: ErrorCause[] invalidated_tokens: long previously_invalidated_tokens: long } @@ -14390,7 +14390,7 @@ export interface SnapshotGetResponse { export interface SnapshotGetSnapshotResponseItem { repository: Name snapshots?: SnapshotSnapshotInfo[] - error?: ErrorCause | string + error?: ErrorCause } export interface SnapshotGetRepositoryRequest extends RequestBase { @@ -14590,7 +14590,7 @@ export interface TasksCancelRequest extends RequestBase { } export interface TasksCancelResponse { - node_failures?: (ErrorCause | string)[] + node_failures?: ErrorCause[] nodes: Record } @@ -14604,7 +14604,7 @@ export interface TasksGetResponse { completed: boolean task: TasksInfo response?: TasksStatus - error?: ErrorCause | string + error?: ErrorCause } export interface TasksListRequest extends RequestBase { @@ -14618,7 +14618,7 @@ export interface TasksListRequest extends RequestBase { } export interface TasksListResponse { - node_failures?: (ErrorCause | string)[] + node_failures?: ErrorCause[] nodes?: Record tasks?: Record | TasksInfo[] } From 4ae687416b9e6b0c531db8c7c2854da68413ce71 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 20 Oct 2021 17:33:15 +0200 Subject: [PATCH 087/647] Bumped v8.0.0-canary.26 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 1657a56eb..a5e296240 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.1", - "versionCanary": "8.0.0-canary.25", + "versionCanary": "8.0.0-canary.26", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 87ce166df1efe88eb6e30dffea12ebb6c839d61a Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 20 Oct 2021 21:31:27 +0200 Subject: [PATCH 088/647] API generation --- src/api/api/indices.ts | 44 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 5fefa4501..59a55d524 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -599,6 +599,28 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptions): Promise + async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_freeze` + return await this.transport.request({ path, method, querystring, body }, options) + } + async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise @@ -1436,6 +1458,28 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise + async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_unfreeze` + return await this.transport.request({ path, method, querystring, body }, options) + } + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise From 63542d807f77c2f200fae7bdc85facd92a0d17e7 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 20 Oct 2021 21:32:08 +0200 Subject: [PATCH 089/647] Bumped v8.0.0-canary.27 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index a5e296240..91b4104e9 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.1", - "versionCanary": "8.0.0-canary.26", + "versionCanary": "8.0.0-canary.27", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 6dfd66ad064daff1d37a9ced420c66f474696aba Mon Sep 17 00:00:00 2001 From: James Rodewig <40268737+jrodewig@users.noreply.github.com> Date: Wed, 20 Oct 2021 19:55:09 -0400 Subject: [PATCH 090/647] [DOCS] Update hardcoded 7.x docs links (#1579) Co-authored-by: Greg Back <1045796+gtback@users.noreply.github.com> --- docs/connecting.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index acfbe120f..03bc4532d 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -61,11 +61,11 @@ const client = new Client({ ==== ApiKey authentication You can use the -https://www.elastic.co/guide/en/elasticsearch/reference/7.x/security-api-create-api-key.html[ApiKey] +{ref-7x}/security-api-create-api-key.html[ApiKey] authentication by passing the `apiKey` parameter via the `auth` option. The `apiKey` parameter can be either a base64 encoded string or an object with the values that you can obtain from the -https://www.elastic.co/guide/en/elasticsearch/reference/7.x/security-api-create-api-key.html[create api key endpoint]. +{ref-7x}/security-api-create-api-key.html[create api key endpoint]. NOTE: If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence. From 0b965c8c4dae33d464e5c370612c2e690f95904c Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 21 Oct 2021 09:50:21 +0200 Subject: [PATCH 091/647] API generation --- src/api/api/async_search.ts | 2 +- src/api/api/delete_by_query.ts | 2 +- src/api/api/eql.ts | 2 +- src/api/api/indices.ts | 2 +- src/api/api/ml.ts | 8 ++++---- src/api/api/mtermvectors.ts | 2 +- src/api/api/search.ts | 2 +- src/api/api/search_mvt.ts | 2 +- src/api/api/search_template.ts | 2 +- src/api/api/update.ts | 2 +- src/api/api/update_by_query.ts | 2 +- 11 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 264c881b4..4ffc92af2 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -114,7 +114,7 @@ export default class AsyncSearch { async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'highlight', 'indices_boost', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'slice', 'fields', 'suggest', 'pit', 'runtime_mappings'] + const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index d957c4faf..edb765b7c 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -42,7 +42,7 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'slice'] + const acceptedBody: string[] = ['max_docs', 'query', 'slice'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 49d2bfb9d..e5679de75 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -114,7 +114,7 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'size', 'fields', 'result_position'] + const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 59a55d524..213998e31 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1082,7 +1082,7 @@ export default class Indices { async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'settings', 'version'] + const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 945dad7a9..ad376d4a7 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -497,7 +497,7 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedBody: string[] = ['anomaly_score', 'expand', 'page'] + const acceptedBody: string[] = ['anomaly_score', 'desc', 'exclude_interim', 'expand', 'page', 'sort', 'start', 'end'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined @@ -920,7 +920,7 @@ export default class Ml { async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['desc', 'page', 'record_score', 'sort'] + const acceptedBody: string[] = ['desc', 'exclude_interim', 'page', 'record_score', 'sort', 'start', 'end'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined @@ -1581,7 +1581,7 @@ export default class Ml { async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['end', 'timeout'] + const acceptedBody: string[] = ['end', 'start', 'timeout'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined @@ -1653,7 +1653,7 @@ export default class Ml { async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['timeout'] + const acceptedBody: string[] = ['force', 'timeout'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index 449504437..1aed66f78 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -42,7 +42,7 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs'] + const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined diff --git a/src/api/api/search.ts b/src/api/api/search.ts index a888230c5..38715356b 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -42,7 +42,7 @@ export default async function SearchApi (this: That, params export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'highlight', 'indices_boost', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'slice', 'fields', 'suggest', 'pit', 'runtime_mappings'] + const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index e15c7e2d0..15980f840 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -42,7 +42,7 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedBody: string[] = ['aggs', 'fields', 'query', 'runtime_mappings', 'sort'] + const acceptedBody: string[] = ['aggs', 'exact_bounds', 'extent', 'fields', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index 5c240f484..97a260fc3 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -42,7 +42,7 @@ export default async function SearchTemplateApi (this: That export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['id', 'params', 'source'] + const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] const querystring: Record = {} // @ts-expect-error let body: Record = params?.body ?? undefined diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 4ab127557..c82e94204 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -42,7 +42,7 @@ export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index', 'type'] - const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', 'upsert'] + const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 2dc41349e..a49791570 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -42,7 +42,7 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice'] + const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] const querystring: Record = {} // @ts-expect-error let body: Record = params.body ?? undefined From 79b0a2f13cd1a2fa9823b482e37707710f0813c5 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 21 Oct 2021 09:50:44 +0200 Subject: [PATCH 092/647] Bumped dependencies --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 91b4104e9..896b40334 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.9", + "@elastic/transport": "^0.0.10", "tslib": "^2.3.0" }, "tap": { From 4cf1fc6cd4afd83fe616aac93d85d98491027e49 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 21 Oct 2021 09:53:24 +0200 Subject: [PATCH 093/647] Updated test --- test/unit/helpers/scroll.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/helpers/scroll.test.ts b/test/unit/helpers/scroll.test.ts index 43885cb29..6ea6bb5f5 100644 --- a/test/unit/helpers/scroll.test.ts +++ b/test/unit/helpers/scroll.test.ts @@ -381,7 +381,7 @@ test('Fix querystring for scroll search', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { if (count === 0) { - t.equal(params.querystring, 'size=1&scroll=1m') + t.equal(params.querystring, 'scroll=1m') } else { if (params.method !== 'DELETE') { if (params.method === 'POST') { From 5fc8dd9f59d4617b857ee84a9edd878858c1e23a Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 21 Oct 2021 09:54:50 +0200 Subject: [PATCH 094/647] Bumped v8.0.0-canary.28 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 896b40334..235614757 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.1", - "versionCanary": "8.0.0-canary.27", + "versionCanary": "8.0.0-canary.28", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 9af0ea8e1b41e6527002782e40c38601695a7d4f Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 22 Oct 2021 11:05:12 +0200 Subject: [PATCH 095/647] Bumped dependencies --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 235614757..4985ff80c 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.10", + "@elastic/transport": "^0.0.11", "tslib": "^2.3.0" }, "tap": { From 8c1504ff010bb5d3f81764ce97249f8c7dbab6a1 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 22 Oct 2021 11:06:20 +0200 Subject: [PATCH 096/647] Bumped v8.0.0-canary.29 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 4985ff80c..26ade8552 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.1", - "versionCanary": "8.0.0-canary.28", + "versionCanary": "8.0.0-canary.29", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 5af2d6cd4d4711bac7e9bd268e00d5770a48ebe0 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 22 Oct 2021 11:06:54 +0200 Subject: [PATCH 097/647] Bumped v8.0.0-alpha.2 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 26ade8552..5c4fa8d46 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "8.0.0-alpha.1", + "version": "8.0.0-alpha.2", "versionCanary": "8.0.0-canary.29", "description": "The official Elasticsearch client for Node.js", "main": "index.js", From a761539ee550d6b6acb4d846ad3eebe51b12e07d Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 22 Oct 2021 13:18:24 +0200 Subject: [PATCH 098/647] Bumped dependencies --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5c4fa8d46..d272d4d1c 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.11", + "@elastic/transport": "^0.0.12", "tslib": "^2.3.0" }, "tap": { From 0ad96fb921980f6c68f40e24276d071bc38db18e Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 22 Oct 2021 13:19:36 +0200 Subject: [PATCH 099/647] Bumped v8.0.0-canary.30 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d272d4d1c..b9cae59e2 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.2", - "versionCanary": "8.0.0-canary.29", + "versionCanary": "8.0.0-canary.30", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 57608103fbd7c3b199895c77e1947f02dd93a348 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 22 Oct 2021 15:16:15 +0200 Subject: [PATCH 100/647] Bumped dependencies --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b9cae59e2..beeda7506 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.12", + "@elastic/transport": "^0.0.13", "tslib": "^2.3.0" }, "tap": { From 702e71ec1857ecda62e46488b2044a4b790ecb9c Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 22 Oct 2021 15:17:10 +0200 Subject: [PATCH 101/647] Bumped v8.0.0-canary.31 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index beeda7506..ac4667493 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.2", - "versionCanary": "8.0.0-canary.30", + "versionCanary": "8.0.0-canary.31", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 66222f01110991626d0d65e3c4be7a1633031019 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 09:32:19 +0200 Subject: [PATCH 102/647] API generation --- src/api/api/ml.ts | 20 ++++++++++++++++---- src/api/api/scroll.ts | 10 ++++++++-- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 4 files changed, 26 insertions(+), 8 deletions(-) diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index ad376d4a7..28bc3d7f9 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -180,12 +180,18 @@ export default class Ml { async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['requests_per_second', 'timeout'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: Record = params?.body ?? undefined params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error @@ -863,11 +869,17 @@ export default class Ml { async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedBody: string[] = ['start', 'end'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index e6a16fe9a..b71ba0523 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -42,11 +42,17 @@ export default async function ScrollApi (this: That, params export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['scroll', 'scroll_id'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: Record = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error diff --git a/src/api/types.ts b/src/api/types.ts index b21a28f37..b2e433035 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -8280,7 +8280,7 @@ export interface IndicesDataStream { hidden?: boolean } -export type IndicesDataStreamHealthStatus = 'green' | 'yellow' | 'red' +export type IndicesDataStreamHealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' export interface IndicesFielddataFrequencyFilter { max: double diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 519bd8245..7f023a232 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -8436,7 +8436,7 @@ export interface IndicesDataStream { hidden?: boolean } -export type IndicesDataStreamHealthStatus = 'green' | 'yellow' | 'red' +export type IndicesDataStreamHealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' export interface IndicesFielddataFrequencyFilter { max: double From 529742ac1face5418fccdd5ab03fece122fd5f72 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 09:38:16 +0200 Subject: [PATCH 103/647] Updated test --- test/unit/helpers/scroll.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/helpers/scroll.test.ts b/test/unit/helpers/scroll.test.ts index 6ea6bb5f5..b7ab9f735 100644 --- a/test/unit/helpers/scroll.test.ts +++ b/test/unit/helpers/scroll.test.ts @@ -297,7 +297,7 @@ test('Scroll search documents', async t => { t.equal(params.querystring, 'filter_path=hits.hits._source%2C_scroll_id&scroll=1m') } else { if (params.method !== 'DELETE') { - t.equal(params.querystring, 'scroll=1m&scroll_id=id') + t.equal(params.body, '{"scroll":"1m","scroll_id":"id"}') } } return { From 8db750d1b01d8195b883032b523373585822285e Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 09:38:21 +0200 Subject: [PATCH 104/647] Bumped dependencies --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index ac4667493..75e1e13e6 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.13", + "@elastic/transport": "^0.0.14", "tslib": "^2.3.0" }, "tap": { From 2a2fa720e4b8cf0259afbc169c9b0eb0970e754b Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 09:39:10 +0200 Subject: [PATCH 105/647] Bumped v8.0.0-canary.32 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 75e1e13e6..f5f8392db 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.2", - "versionCanary": "8.0.0-canary.31", + "versionCanary": "8.0.0-canary.32", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From a397d708a36ff9af3db1c7b14b1cf8081a8ee1ce Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 15:57:56 +0200 Subject: [PATCH 106/647] API generation --- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index b2e433035..140608572 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1865,7 +1865,7 @@ export interface EmptyObject { export type EpochMillis = string | long export interface ErrorCauseKeys { - type?: string + type: string reason: string stack_trace?: string caused_by?: ErrorCause diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 7f023a232..67f407c2f 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -1961,7 +1961,7 @@ export interface EmptyObject { export type EpochMillis = string | long export interface ErrorCauseKeys { - type?: string + type: string reason: string stack_trace?: string caused_by?: ErrorCause From 78c038d307f4ddfdac43ee9fe4136966cffc9184 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 15:58:10 +0200 Subject: [PATCH 107/647] Bumped dependencies --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index f5f8392db..eb6aa8fb9 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.14", + "@elastic/transport": "^0.0.15", "tslib": "^2.3.0" }, "tap": { From 185697b6fe41787999e7b83fb7b537136ff53bce Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 15:59:20 +0200 Subject: [PATCH 108/647] Bumped v8.0.0-canary.33 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index eb6aa8fb9..32ca27736 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.2", - "versionCanary": "8.0.0-canary.32", + "versionCanary": "8.0.0-canary.33", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From a5a0a1c58974de2ae8cc42cc8021668159c0ffea Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 25 Oct 2021 16:56:33 +0200 Subject: [PATCH 109/647] Do a shallow clone copy of the body when the body key is defined (#1584) --- src/api/api/async_search.ts | 2 +- src/api/api/ccr.ts | 8 ++-- src/api/api/clear_scroll.ts | 2 +- src/api/api/close_point_in_time.ts | 2 +- src/api/api/cluster.ts | 8 ++-- src/api/api/count.ts | 2 +- src/api/api/delete_by_query.ts | 2 +- src/api/api/enrich.ts | 2 +- src/api/api/eql.ts | 2 +- src/api/api/explain.ts | 2 +- src/api/api/field_caps.ts | 2 +- src/api/api/graph.ts | 2 +- src/api/api/ilm.ts | 4 +- src/api/api/indices.ts | 26 +++++------ src/api/api/ingest.ts | 4 +- src/api/api/license.ts | 2 +- src/api/api/mget.ts | 2 +- src/api/api/ml.ts | 62 ++++++++++++------------- src/api/api/mtermvectors.ts | 2 +- src/api/api/nodes.ts | 2 +- src/api/api/put_script.ts | 2 +- src/api/api/rank_eval.ts | 2 +- src/api/api/reindex.ts | 2 +- src/api/api/render_search_template.ts | 2 +- src/api/api/rollup.ts | 4 +- src/api/api/scripts_painless_execute.ts | 2 +- src/api/api/scroll.ts | 2 +- src/api/api/search.ts | 2 +- src/api/api/search_mvt.ts | 2 +- src/api/api/search_template.ts | 2 +- src/api/api/searchable_snapshots.ts | 2 +- src/api/api/security.ts | 20 ++++---- src/api/api/slm.ts | 2 +- src/api/api/snapshot.ts | 8 ++-- src/api/api/sql.ts | 6 +-- src/api/api/terms_enum.ts | 2 +- src/api/api/termvectors.ts | 2 +- src/api/api/transform.ts | 6 +-- src/api/api/update.ts | 2 +- src/api/api/update_by_query.ts | 2 +- src/api/api/watcher.ts | 6 +-- test/unit/api.test.ts | 29 ++++++++++++ 42 files changed, 139 insertions(+), 110 deletions(-) diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 4ffc92af2..dbd0ed79b 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -117,7 +117,7 @@ export default class AsyncSearch { const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 7dc3e650a..5d7b54fa2 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -73,7 +73,7 @@ export default class Ccr { const acceptedBody: string[] = ['leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -145,7 +145,7 @@ export default class Ccr { const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -247,7 +247,7 @@ export default class Ccr { const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -297,7 +297,7 @@ export default class Ccr { const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 56512249c..fe26f5558 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -45,7 +45,7 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll const acceptedBody: string[] = ['scroll_id'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index fd930ba00..47addf4b3 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -45,7 +45,7 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo const acceptedBody: string[] = ['id'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index ac6f9b16a..9d5473dde 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -51,7 +51,7 @@ export default class Cluster { const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -276,7 +276,7 @@ export default class Cluster { const acceptedBody: string[] = ['template', 'aliases', 'mappings', 'settings', 'version', '_meta'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -304,7 +304,7 @@ export default class Cluster { const acceptedBody: string[] = ['persistent', 'transient'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -356,7 +356,7 @@ export default class Cluster { const acceptedBody: string[] = ['commands'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 999cbf3bb..712646428 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -45,7 +45,7 @@ export default async function CountApi (this: That, params?: T.CountRequest | TB const acceptedBody: string[] = ['query'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index edb765b7c..1f26f9139 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -45,7 +45,7 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu const acceptedBody: string[] = ['max_docs', 'query', 'slice'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 0fb852e05..6a0b37eca 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -125,7 +125,7 @@ export default class Enrich { const acceptedBody: string[] = ['geo_match', 'match'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index e5679de75..b6392362a 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -117,7 +117,7 @@ export default class Eql { const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index eea152590..f1b93a3d4 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -45,7 +45,7 @@ export default async function ExplainApi (this: That, param const acceptedBody: string[] = ['query'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 6c5692125..0dd47446d 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -45,7 +45,7 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ const acceptedBody: string[] = ['index_filter', 'runtime_mappings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index c92260c0e..16fb298c1 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -51,7 +51,7 @@ export default class Graph { const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index f0918962b..8fe6064d7 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -170,7 +170,7 @@ export default class Ilm { const acceptedBody: string[] = ['current_step', 'next_step'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -198,7 +198,7 @@ export default class Ilm { const acceptedBody: string[] = ['policy'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 213998e31..ecd09aa9d 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -73,7 +73,7 @@ export default class Indices { const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -139,7 +139,7 @@ export default class Indices { const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -189,7 +189,7 @@ export default class Indices { const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -960,7 +960,7 @@ export default class Indices { const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -995,7 +995,7 @@ export default class Indices { const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1023,7 +1023,7 @@ export default class Indices { const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1085,7 +1085,7 @@ export default class Indices { const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1217,7 +1217,7 @@ export default class Indices { const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1312,7 +1312,7 @@ export default class Indices { const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1340,7 +1340,7 @@ export default class Indices { const acceptedBody: string[] = ['index_patterns', 'composed_of', 'overlapping', 'template'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1402,7 +1402,7 @@ export default class Indices { const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1488,7 +1488,7 @@ export default class Indices { const acceptedBody: string[] = ['actions'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -1517,7 +1517,7 @@ export default class Indices { const acceptedBody: string[] = ['query'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 8ab5146a0..c94f41cf9 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -149,7 +149,7 @@ export default class Ingest { const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -177,7 +177,7 @@ export default class Ingest { const acceptedBody: string[] = ['docs', 'pipeline'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/license.ts b/src/api/api/license.ts index fafeb3f4a..c015ee11f 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -143,7 +143,7 @@ export default class License { const acceptedBody: string[] = ['license', 'licenses'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index d4a11874f..6e43aec35 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -45,7 +45,7 @@ export default async function MgetApi (this: That, params?: const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 28bc3d7f9..7d1165d7c 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -183,7 +183,7 @@ export default class Ml { const acceptedBody: string[] = ['requests_per_second', 'timeout'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -358,7 +358,7 @@ export default class Ml { const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -387,7 +387,7 @@ export default class Ml { const acceptedBody: string[] = ['evaluation', 'index', 'query'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -415,7 +415,7 @@ export default class Ml { const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -450,7 +450,7 @@ export default class Ml { const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'start'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -478,7 +478,7 @@ export default class Ml { const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -506,7 +506,7 @@ export default class Ml { const acceptedBody: string[] = ['anomaly_score', 'desc', 'exclude_interim', 'expand', 'page', 'sort', 'start', 'end'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -563,7 +563,7 @@ export default class Ml { const acceptedBody: string[] = ['page'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -599,7 +599,7 @@ export default class Ml { const acceptedBody: string[] = ['page'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -784,7 +784,7 @@ export default class Ml { const acceptedBody: string[] = ['page'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -872,7 +872,7 @@ export default class Ml { const acceptedBody: string[] = ['start', 'end'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -907,7 +907,7 @@ export default class Ml { const acceptedBody: string[] = ['allow_no_jobs'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -935,7 +935,7 @@ export default class Ml { const acceptedBody: string[] = ['desc', 'exclude_interim', 'page', 'record_score', 'sort', 'start', 'end'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1090,7 +1090,7 @@ export default class Ml { const acceptedBody: string[] = ['timeout'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1118,7 +1118,7 @@ export default class Ml { const acceptedBody: string[] = ['events'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1173,7 +1173,7 @@ export default class Ml { const acceptedBody: string[] = ['config'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -1209,7 +1209,7 @@ export default class Ml { const acceptedBody: string[] = ['job_config', 'datafeed_config'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -1245,7 +1245,7 @@ export default class Ml { const acceptedBody: string[] = ['description'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1295,7 +1295,7 @@ export default class Ml { const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1323,7 +1323,7 @@ export default class Ml { const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1351,7 +1351,7 @@ export default class Ml { const acceptedBody: string[] = ['description', 'items'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1379,7 +1379,7 @@ export default class Ml { const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1407,7 +1407,7 @@ export default class Ml { const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'tags'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1523,7 +1523,7 @@ export default class Ml { const acceptedBody: string[] = ['delete_intervening_results'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1596,7 +1596,7 @@ export default class Ml { const acceptedBody: string[] = ['end', 'start', 'timeout'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1668,7 +1668,7 @@ export default class Ml { const acceptedBody: string[] = ['force', 'timeout'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1718,7 +1718,7 @@ export default class Ml { const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1768,7 +1768,7 @@ export default class Ml { const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1796,7 +1796,7 @@ export default class Ml { const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1824,7 +1824,7 @@ export default class Ml { const acceptedBody: string[] = ['description', 'retain'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -1874,7 +1874,7 @@ export default class Ml { const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_retention_days', 'results_index_name'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index 1aed66f78..afef707c0 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -45,7 +45,7 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 6fc2c0798..7bad55c83 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -161,7 +161,7 @@ export default class Nodes { const acceptedBody: string[] = ['secure_settings_password'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 1d75aeda1..ca6a3caf6 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -45,7 +45,7 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque const acceptedBody: string[] = ['script'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index c47112fef..b5c79c7c7 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -45,7 +45,7 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest const acceptedBody: string[] = ['requests', 'metric'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index c182e1ed6..ecdfca986 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -45,7 +45,7 @@ export default async function ReindexApi (this: That, params?: T.ReindexRequest const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 2fa44bd57..70174a010 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -45,7 +45,7 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re const acceptedBody: string[] = ['file', 'params', 'source'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index a3e7592ea..d38149171 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -155,7 +155,7 @@ export default class Rollup { const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -210,7 +210,7 @@ export default class Rollup { const acceptedBody: string[] = ['aggs', 'query', 'size'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index cd04ce880..35a67af58 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -45,7 +45,7 @@ export default async function ScriptsPainlessExecuteApi (this const acceptedBody: string[] = ['context', 'context_setup', 'script'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index b71ba0523..e151f6f61 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -45,7 +45,7 @@ export default async function ScrollApi (this: That, params const acceptedBody: string[] = ['scroll', 'scroll_id'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 38715356b..698188d58 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -45,7 +45,7 @@ export default async function SearchApi (this: That, params const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 15980f840..3291911b2 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -45,7 +45,7 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque const acceptedBody: string[] = ['aggs', 'exact_bounds', 'extent', 'fields', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index 97a260fc3..b57a950ef 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -45,7 +45,7 @@ export default async function SearchTemplateApi (this: That const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 1604d4def..f7e080999 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -110,7 +110,7 @@ export default class SearchableSnapshots { const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 55363814a..e0b3e497f 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -74,7 +74,7 @@ export default class Security { const acceptedBody: string[] = ['password'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -220,7 +220,7 @@ export default class Security { const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -670,7 +670,7 @@ export default class Security { const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -752,7 +752,7 @@ export default class Security { const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -780,7 +780,7 @@ export default class Security { const acceptedBody: string[] = ['application', 'cluster', 'index'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -816,7 +816,7 @@ export default class Security { const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -845,7 +845,7 @@ export default class Security { const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -901,7 +901,7 @@ export default class Security { const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'transient_metadata'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -929,7 +929,7 @@ export default class Security { const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'rules', 'run_as'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -957,7 +957,7 @@ export default class Security { const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 57d289040..e5a924508 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -194,7 +194,7 @@ export default class Slm { const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 9363932ff..4d60fb84b 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -73,7 +73,7 @@ export default class Snapshot { const acceptedBody: string[] = ['indices'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -101,7 +101,7 @@ export default class Snapshot { const acceptedBody: string[] = ['ignore_unavailable', 'include_global_state', 'indices', 'feature_states', 'metadata', 'partial'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -129,7 +129,7 @@ export default class Snapshot { const acceptedBody: string[] = ['repository', 'type', 'settings'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -275,7 +275,7 @@ export default class Snapshot { const acceptedBody: string[] = ['ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 6fe2a6191..d49765dc4 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -51,7 +51,7 @@ export default class Sql { const acceptedBody: string[] = ['cursor'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -145,7 +145,7 @@ export default class Sql { const acceptedBody: string[] = ['columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -174,7 +174,7 @@ export default class Sql { const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index 5cb9ba471..5f5565020 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -45,7 +45,7 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index ad3a6ebfc..0aa6846f3 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -45,7 +45,7 @@ export default async function TermvectorsApi (this: That, p const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 3e9778fea..85f2d7341 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -125,7 +125,7 @@ export default class Transform { const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -161,7 +161,7 @@ export default class Transform { const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -233,7 +233,7 @@ export default class Transform { const acceptedBody: string[] = ['dest', 'description', 'frequency', 'source', 'settings', 'sync', 'retention_policy'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/update.ts b/src/api/api/update.ts index c82e94204..69f7ff82b 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -45,7 +45,7 @@ export default async function UpdateApi = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index a49791570..639e6f1de 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -45,7 +45,7 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 6e0c9fd5a..3d4140d67 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -146,7 +146,7 @@ export default class Watcher { const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { @@ -204,7 +204,7 @@ export default class Watcher { const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'transform', 'trigger'] const querystring: Record = {} // @ts-expect-error - let body: Record = params.body ?? undefined + let body: Record = params.body != null ? { ...params.body } : undefined for (const key in params) { if (acceptedBody.includes(key)) { @@ -232,7 +232,7 @@ export default class Watcher { const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] const querystring: Record = {} // @ts-expect-error - let body: Record = params?.body ?? undefined + let body: Record = params?.body != null ? { ...params.body } : undefined params = params ?? {} for (const key in params) { diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index 95f352035..9de738365 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -134,3 +134,32 @@ test('Api with body key and keyed body', async t => { t.equal(response.result, 'created') }) + +test('Using the body key should not mutate the body', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { query: { match_all: {} }, sort: 'foo' }) + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const body = { query: { match_all: {} } } + await client.search({ + index: 'test', + sort: 'foo', + body + }) + + t.same(body, { query: { match_all: {} } }) +}) From f53c74e1a2779cef90ad6d7cda98904de27ba2bf Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 16:57:36 +0200 Subject: [PATCH 110/647] Bumped v8.0.0-canary.34 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 32ca27736..da8da902b 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.2", - "versionCanary": "8.0.0-canary.33", + "versionCanary": "8.0.0-canary.34", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 853fe27505505761391abf3f9035deeb1a0d782a Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 25 Oct 2021 19:06:44 +0200 Subject: [PATCH 111/647] Support params.body as string. (#1585) --- src/api/api/async_search.ts | 9 +- src/api/api/ccr.ts | 36 ++- src/api/api/clear_scroll.ts | 9 +- src/api/api/close_point_in_time.ts | 9 +- src/api/api/cluster.ts | 36 ++- src/api/api/count.ts | 9 +- src/api/api/delete_by_query.ts | 9 +- src/api/api/enrich.ts | 9 +- src/api/api/eql.ts | 9 +- src/api/api/explain.ts | 9 +- src/api/api/field_caps.ts | 9 +- src/api/api/graph.ts | 9 +- src/api/api/ilm.ts | 18 +- src/api/api/indices.ts | 117 ++++++++-- src/api/api/ingest.ts | 18 +- src/api/api/license.ts | 9 +- src/api/api/mget.ts | 9 +- src/api/api/ml.ts | 279 +++++++++++++++++++++--- src/api/api/mtermvectors.ts | 9 +- src/api/api/nodes.ts | 9 +- src/api/api/put_script.ts | 9 +- src/api/api/rank_eval.ts | 9 +- src/api/api/reindex.ts | 9 +- src/api/api/render_search_template.ts | 9 +- src/api/api/rollup.ts | 18 +- src/api/api/scripts_painless_execute.ts | 9 +- src/api/api/scroll.ts | 9 +- src/api/api/search.ts | 9 +- src/api/api/search_mvt.ts | 9 +- src/api/api/search_template.ts | 9 +- src/api/api/searchable_snapshots.ts | 9 +- src/api/api/security.ts | 90 +++++++- src/api/api/slm.ts | 9 +- src/api/api/snapshot.ts | 36 ++- src/api/api/sql.ts | 27 ++- src/api/api/terms_enum.ts | 9 +- src/api/api/termvectors.ts | 9 +- src/api/api/transform.ts | 27 ++- src/api/api/update.ts | 9 +- src/api/api/update_by_query.ts | 9 +- src/api/api/watcher.ts | 27 ++- test/unit/api.test.ts | 32 +++ 42 files changed, 912 insertions(+), 110 deletions(-) diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index dbd0ed79b..8cd6b4175 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -116,8 +116,15 @@ export default class AsyncSearch { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 5d7b54fa2..ae8243b25 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -72,8 +72,15 @@ export default class Ccr { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -144,8 +151,15 @@ export default class Ccr { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -246,8 +260,15 @@ export default class Ccr { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -296,8 +317,15 @@ export default class Ccr { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index fe26f5558..d29ecb2d6 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -44,8 +44,15 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll_id'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 47addf4b3..6b5234372 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -44,8 +44,15 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo const acceptedPath: string[] = [] const acceptedBody: string[] = ['id'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 9d5473dde..b812e99f6 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -50,8 +50,15 @@ export default class Cluster { const acceptedPath: string[] = [] const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -275,8 +282,15 @@ export default class Cluster { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['template', 'aliases', 'mappings', 'settings', 'version', '_meta'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -303,8 +317,15 @@ export default class Cluster { const acceptedPath: string[] = [] const acceptedBody: string[] = ['persistent', 'transient'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -355,8 +376,15 @@ export default class Cluster { const acceptedPath: string[] = [] const acceptedBody: string[] = ['commands'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 712646428..4cb8af96a 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -44,8 +44,15 @@ export default async function CountApi (this: That, params?: T.CountRequest | TB const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 1f26f9139..7c4194f78 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -44,8 +44,15 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'slice'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 6a0b37eca..8ab9e6167 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -124,8 +124,15 @@ export default class Enrich { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['geo_match', 'match'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index b6392362a..93c815f92 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -116,8 +116,15 @@ export default class Eql { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index f1b93a3d4..37a29cd5f 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -44,8 +44,15 @@ export default async function ExplainApi (this: That, param const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 0dd47446d..f1d1e33a4 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -44,8 +44,15 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['index_filter', 'runtime_mappings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 16fb298c1..9342c73ca 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -50,8 +50,15 @@ export default class Graph { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 8fe6064d7..e8775bd9f 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -169,8 +169,15 @@ export default class Ilm { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['current_step', 'next_step'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -197,8 +204,15 @@ export default class Ilm { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['policy'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index ecd09aa9d..45c8e3039 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -72,8 +72,15 @@ export default class Indices { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -138,8 +145,15 @@ export default class Indices { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -188,8 +202,15 @@ export default class Indices { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -959,8 +980,15 @@ export default class Indices { const acceptedPath: string[] = ['index', 'name'] const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -994,8 +1022,15 @@ export default class Indices { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1022,8 +1057,15 @@ export default class Indices { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1084,8 +1126,15 @@ export default class Indices { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1216,8 +1265,15 @@ export default class Indices { const acceptedPath: string[] = ['alias', 'new_index'] const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1311,8 +1367,15 @@ export default class Indices { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1339,8 +1402,15 @@ export default class Indices { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['index_patterns', 'composed_of', 'overlapping', 'template'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1401,8 +1471,15 @@ export default class Indices { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1487,8 +1564,15 @@ export default class Indices { const acceptedPath: string[] = [] const acceptedBody: string[] = ['actions'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -1516,8 +1600,15 @@ export default class Indices { const acceptedPath: string[] = ['index', 'type'] const acceptedBody: string[] = ['query'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index c94f41cf9..58a9fce71 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -148,8 +148,15 @@ export default class Ingest { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -176,8 +183,15 @@ export default class Ingest { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['docs', 'pipeline'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/license.ts b/src/api/api/license.ts index c015ee11f..3960c0f44 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -142,8 +142,15 @@ export default class License { const acceptedPath: string[] = [] const acceptedBody: string[] = ['license', 'licenses'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 6e43aec35..5e9217999 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -44,8 +44,15 @@ export default async function MgetApi (this: That, params?: const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 7d1165d7c..e01d99111 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -182,8 +182,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['requests_per_second', 'timeout'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -357,8 +364,15 @@ export default class Ml { const acceptedPath: string[] = [] const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -386,8 +400,15 @@ export default class Ml { const acceptedPath: string[] = [] const acceptedBody: string[] = ['evaluation', 'index', 'query'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -414,8 +435,15 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -449,8 +477,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'start'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -477,8 +512,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -505,8 +547,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'timestamp'] const acceptedBody: string[] = ['anomaly_score', 'desc', 'exclude_interim', 'expand', 'page', 'sort', 'start', 'end'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -562,8 +611,15 @@ export default class Ml { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -598,8 +654,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'category_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -783,8 +846,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -871,8 +941,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['start', 'end'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -906,8 +983,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_no_jobs'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -934,8 +1018,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['desc', 'exclude_interim', 'page', 'record_score', 'sort', 'start', 'end'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1089,8 +1180,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['timeout'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1117,8 +1215,15 @@ export default class Ml { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['events'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1172,8 +1277,15 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['config'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -1208,8 +1320,15 @@ export default class Ml { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['job_config', 'datafeed_config'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -1244,8 +1363,15 @@ export default class Ml { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['description'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1294,8 +1420,15 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1322,8 +1455,15 @@ export default class Ml { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1350,8 +1490,15 @@ export default class Ml { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['description', 'items'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1378,8 +1525,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1406,8 +1560,15 @@ export default class Ml { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'tags'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1522,8 +1683,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['delete_intervening_results'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1595,8 +1763,15 @@ export default class Ml { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['end', 'start', 'timeout'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1667,8 +1842,15 @@ export default class Ml { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['force', 'timeout'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1717,8 +1899,15 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1767,8 +1956,15 @@ export default class Ml { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1795,8 +1991,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1823,8 +2026,15 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['description', 'retain'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -1873,8 +2083,15 @@ export default class Ml { const acceptedPath: string[] = [] const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_retention_days', 'results_index_name'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index afef707c0..dd1be8659 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -44,8 +44,15 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 7bad55c83..438e53454 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -160,8 +160,15 @@ export default class Nodes { const acceptedPath: string[] = ['node_id'] const acceptedBody: string[] = ['secure_settings_password'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index ca6a3caf6..da5b93544 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -44,8 +44,15 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque const acceptedPath: string[] = ['id', 'context'] const acceptedBody: string[] = ['script'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index b5c79c7c7..016091227 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -44,8 +44,15 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['requests', 'metric'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index ecdfca986..0c121f661 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -44,8 +44,15 @@ export default async function ReindexApi (this: That, params?: T.ReindexRequest const acceptedPath: string[] = [] const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 70174a010..cf9547a32 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -44,8 +44,15 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['file', 'params', 'source'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index d38149171..df24e9005 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -154,8 +154,15 @@ export default class Rollup { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -209,8 +216,15 @@ export default class Rollup { const acceptedPath: string[] = ['index', 'type'] const acceptedBody: string[] = ['aggs', 'query', 'size'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index 35a67af58..b15438660 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -44,8 +44,15 @@ export default async function ScriptsPainlessExecuteApi (this const acceptedPath: string[] = [] const acceptedBody: string[] = ['context', 'context_setup', 'script'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index e151f6f61..26191be83 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -44,8 +44,15 @@ export default async function ScrollApi (this: That, params const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll', 'scroll_id'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 698188d58..34125fafd 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -44,8 +44,15 @@ export default async function SearchApi (this: That, params const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 3291911b2..fd001aa06 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -44,8 +44,15 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] const acceptedBody: string[] = ['aggs', 'exact_bounds', 'extent', 'fields', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index b57a950ef..e716d7c7b 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -44,8 +44,15 @@ export default async function SearchTemplateApi (this: That const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index f7e080999..20547eadb 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -109,8 +109,15 @@ export default class SearchableSnapshots { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/security.ts b/src/api/api/security.ts index e0b3e497f..cf97ed46c 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -73,8 +73,15 @@ export default class Security { const acceptedPath: string[] = ['username'] const acceptedBody: string[] = ['password'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -219,8 +226,15 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -669,8 +683,15 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -751,8 +772,15 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -779,8 +807,15 @@ export default class Security { const acceptedPath: string[] = ['user'] const acceptedBody: string[] = ['application', 'cluster', 'index'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -815,8 +850,15 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -844,8 +886,15 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -900,8 +949,15 @@ export default class Security { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'transient_metadata'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -928,8 +984,15 @@ export default class Security { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'rules', 'run_as'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -956,8 +1019,15 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index e5a924508..f12afa39e 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -193,8 +193,15 @@ export default class Slm { const acceptedPath: string[] = ['policy_id'] const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 4d60fb84b..e086a0b60 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -72,8 +72,15 @@ export default class Snapshot { const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] const acceptedBody: string[] = ['indices'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -100,8 +107,15 @@ export default class Snapshot { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['ignore_unavailable', 'include_global_state', 'indices', 'feature_states', 'metadata', 'partial'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -128,8 +142,15 @@ export default class Snapshot { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['repository', 'type', 'settings'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -274,8 +295,15 @@ export default class Snapshot { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index d49765dc4..24224218d 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -50,8 +50,15 @@ export default class Sql { const acceptedPath: string[] = [] const acceptedBody: string[] = ['cursor'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -144,8 +151,15 @@ export default class Sql { const acceptedPath: string[] = [] const acceptedBody: string[] = ['columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -173,8 +187,15 @@ export default class Sql { const acceptedPath: string[] = [] const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index 5f5565020..ce3895dd1 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -44,8 +44,15 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 0aa6846f3..32c3456e5 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -44,8 +44,15 @@ export default async function TermvectorsApi (this: That, p const acceptedPath: string[] = ['index', 'id'] const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 85f2d7341..fdc47bf51 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -124,8 +124,15 @@ export default class Transform { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -160,8 +167,15 @@ export default class Transform { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -232,8 +246,15 @@ export default class Transform { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'source', 'settings', 'sync', 'retention_policy'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 69f7ff82b..454e6556f 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -44,8 +44,15 @@ export default async function UpdateApi = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 639e6f1de..808d3157e 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -44,8 +44,15 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 3d4140d67..9eeb6ae92 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -145,8 +145,15 @@ export default class Watcher { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { @@ -203,8 +210,15 @@ export default class Watcher { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'transform', 'trigger'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params.body != null ? { ...params.body } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { @@ -231,8 +245,15 @@ export default class Watcher { const acceptedPath: string[] = [] const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] const querystring: Record = {} + let body: Record | string // @ts-expect-error - let body: Record = params?.body != null ? { ...params.body } : undefined + if (typeof params?.body === 'string') { + // @ts-expect-error + body = params.body + } else { + // @ts-expect-error + body = params?.body != null ? { ...params.body } : undefined + } params = params ?? {} for (const key in params) { diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index 9de738365..7d8f14fcb 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -163,3 +163,35 @@ test('Using the body key should not mutate the body', async t => { t.same(body, { query: { match_all: {} } }) }) + +test('Using the body key with a string value', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { query: { match_all: {} } }) + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + try { + const body = { query: { match_all: {} } } + await client.search({ + index: 'test', + // @ts-expect-error + body: JSON.stringify(body) + }) + t.pass('ok!') + } catch (err: any) { + t.fail(err) + } +}) From bf317be2a788d04b8be9e148fe897e6444548284 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 25 Oct 2021 19:08:01 +0200 Subject: [PATCH 112/647] Bumped v8.0.0-canary.35 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index da8da902b..0670d3834 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-alpha.2", - "versionCanary": "8.0.0-canary.34", + "versionCanary": "8.0.0-canary.35", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 88933ad39e4ff7d8d3f74a1280e4d5fd00910055 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 27 Oct 2021 17:06:03 +0200 Subject: [PATCH 113/647] Updated Jenkins jobs --- ...7.14.yml => elastic+elasticsearch-js+7.16.yml} | 8 ++++---- .ci/jobs/elastic+elasticsearch-js+7.x.yml | 15 --------------- 2 files changed, 4 insertions(+), 19 deletions(-) rename .ci/jobs/{elastic+elasticsearch-js+7.14.yml => elastic+elasticsearch-js+7.16.yml} (60%) delete mode 100644 .ci/jobs/elastic+elasticsearch-js+7.x.yml diff --git a/.ci/jobs/elastic+elasticsearch-js+7.14.yml b/.ci/jobs/elastic+elasticsearch-js+7.16.yml similarity index 60% rename from .ci/jobs/elastic+elasticsearch-js+7.14.yml rename to .ci/jobs/elastic+elasticsearch-js+7.16.yml index 0cab6d3eb..c2a4ffe70 100644 --- a/.ci/jobs/elastic+elasticsearch-js+7.14.yml +++ b/.ci/jobs/elastic+elasticsearch-js+7.16.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+7.14 - display-name: 'elastic / elasticsearch-js # 7.14' - description: Testing the elasticsearch-js 7.14 branch. + name: elastic+elasticsearch-js+7.16 + display-name: 'elastic / elasticsearch-js # 7.16' + description: Testing the elasticsearch-js 7.16 branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/7.14 + default: refs/heads/7.16 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: diff --git a/.ci/jobs/elastic+elasticsearch-js+7.x.yml b/.ci/jobs/elastic+elasticsearch-js+7.x.yml deleted file mode 100644 index ec38aeebe..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+7.x.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+7.x - display-name: 'elastic / elasticsearch-js # 7.x' - description: Testing the elasticsearch-js 7.x branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/7.x - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' From 76659b604f97993da84a5e3d12d9df7c3e0cbd52 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 3 Nov 2021 14:28:43 +0100 Subject: [PATCH 114/647] API generation --- src/api/api/async_search.ts | 20 +- src/api/api/ccr.ts | 44 +- src/api/api/clear_scroll.ts | 11 +- src/api/api/close_point_in_time.ts | 11 +- src/api/api/cluster.ts | 44 +- src/api/api/count.ts | 11 +- src/api/api/delete_by_query.ts | 11 +- src/api/api/enrich.ts | 11 +- src/api/api/eql.ts | 11 +- src/api/api/explain.ts | 11 +- src/api/api/field_caps.ts | 11 +- src/api/api/fleet.ts | 6 +- src/api/api/graph.ts | 11 +- src/api/api/ilm.ts | 22 +- src/api/api/indices.ts | 165 +++---- src/api/api/ingest.ts | 22 +- src/api/api/knn_search.ts | 26 +- src/api/api/license.ts | 11 +- src/api/api/mget.ts | 11 +- src/api/api/ml.ts | 351 +++++++-------- src/api/api/mtermvectors.ts | 11 +- src/api/api/nodes.ts | 11 +- src/api/api/put_script.ts | 11 +- src/api/api/rank_eval.ts | 11 +- src/api/api/reindex.ts | 11 +- src/api/api/render_search_template.ts | 11 +- src/api/api/rollup.ts | 24 +- src/api/api/scripts_painless_execute.ts | 11 +- src/api/api/scroll.ts | 11 +- src/api/api/search.ts | 20 +- src/api/api/search_mvt.ts | 11 +- src/api/api/search_template.ts | 11 +- src/api/api/searchable_snapshots.ts | 11 +- src/api/api/security.ts | 110 +++-- src/api/api/slm.ts | 11 +- src/api/api/snapshot.ts | 44 +- src/api/api/sql.ts | 33 +- src/api/api/terms_enum.ts | 11 +- src/api/api/termvectors.ts | 11 +- src/api/api/transform.ts | 33 +- src/api/api/update.ts | 11 +- src/api/api/update_by_query.ts | 11 +- src/api/api/watcher.ts | 33 +- src/api/kibana.ts | 3 +- src/api/types.ts | 559 +++++++++++++++-------- src/api/typesWithBodyKey.ts | 565 ++++++++++++++++-------- 46 files changed, 1322 insertions(+), 1099 deletions(-) diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 8cd6b4175..4385269e7 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -114,16 +114,15 @@ export default class AsyncSearch { async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -131,7 +130,12 @@ export default class AsyncSearch { if (acceptedBody.includes(key)) { body = body ?? {} // @ts-expect-error - body[key] = params[key] + if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { + querystring[key] = params[key] + } else { + // @ts-expect-error + body[key] = params[key] + } } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index ae8243b25..63c7ada21 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -72,14 +72,13 @@ export default class Ccr { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -151,14 +150,13 @@ export default class Ccr { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -260,14 +258,13 @@ export default class Ccr { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -317,14 +314,13 @@ export default class Ccr { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index d29ecb2d6..a14482cd7 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -44,14 +44,13 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll_id'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 6b5234372..4e1a7d4b1 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -44,14 +44,13 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo const acceptedPath: string[] = [] const acceptedBody: string[] = ['id'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index b812e99f6..1abdbdf8c 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -50,14 +50,13 @@ export default class Cluster { const acceptedPath: string[] = [] const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -282,14 +281,13 @@ export default class Cluster { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['template', 'aliases', 'mappings', 'settings', 'version', '_meta'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -317,14 +315,13 @@ export default class Cluster { const acceptedPath: string[] = [] const acceptedBody: string[] = ['persistent', 'transient'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -376,14 +373,13 @@ export default class Cluster { const acceptedPath: string[] = [] const acceptedBody: string[] = ['commands'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 4cb8af96a..aec469fd7 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -44,14 +44,13 @@ export default async function CountApi (this: That, params?: T.CountRequest | TB const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 7c4194f78..665bfe810 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -44,14 +44,13 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'slice'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 8ab9e6167..74909ed40 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -124,14 +124,13 @@ export default class Enrich { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['geo_match', 'match'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 93c815f92..73aabaa57 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -116,14 +116,13 @@ export default class Eql { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 37a29cd5f..4235f64c1 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -44,14 +44,13 @@ export default async function ExplainApi (this: That, param const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index f1d1e33a4..d558bd531 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -44,14 +44,13 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['index_filter', 'runtime_mappings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index c50913d9a..91a82d88c 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -86,10 +86,10 @@ export default class Fleet { let path = '' if (params.index != null) { method = body != null ? 'POST' : 'GET' - path = `/${encodeURIComponent(params.index.toString())}/_fleet/_msearch` + path = `/${encodeURIComponent(params.index.toString())}/_fleet/_fleet_msearch` } else { method = body != null ? 'POST' : 'GET' - path = '/_fleet/_msearch' + path = '/_fleet/_fleet_msearch' } return await this.transport.request({ path, method, querystring, bulkBody: body }, options) } @@ -112,7 +112,7 @@ export default class Fleet { } const method = body != null ? 'POST' : 'GET' - const path = `/${encodeURIComponent(params.index.toString())}/_fleet/_search` + const path = `/${encodeURIComponent(params.index.toString())}/_fleet/_fleet_search` return await this.transport.request({ path, method, querystring, body }, options) } } diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 9342c73ca..fdfb8d7f6 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -50,14 +50,13 @@ export default class Graph { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index e8775bd9f..a7887e21e 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -169,14 +169,13 @@ export default class Ilm { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['current_step', 'next_step'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -204,14 +203,13 @@ export default class Ilm { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['policy'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 45c8e3039..b71fdc405 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -72,14 +72,13 @@ export default class Indices { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -145,14 +144,13 @@ export default class Indices { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -202,14 +200,13 @@ export default class Indices { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -620,28 +617,6 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptions): Promise - async freeze (this: That, params: T.IndicesFreezeRequest | TB.IndicesFreezeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined - - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'POST' - const path = `/${encodeURIComponent(params.index.toString())}/_freeze` - return await this.transport.request({ path, method, querystring, body }, options) - } - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise @@ -980,14 +955,13 @@ export default class Indices { const acceptedPath: string[] = ['index', 'name'] const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1022,14 +996,13 @@ export default class Indices { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1057,14 +1030,13 @@ export default class Indices { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1126,14 +1098,13 @@ export default class Indices { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1265,14 +1236,13 @@ export default class Indices { const acceptedPath: string[] = ['alias', 'new_index'] const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1367,14 +1337,13 @@ export default class Indices { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1402,14 +1371,13 @@ export default class Indices { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['index_patterns', 'composed_of', 'overlapping', 'template'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1471,14 +1439,13 @@ export default class Indices { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1564,14 +1531,13 @@ export default class Indices { const acceptedPath: string[] = [] const acceptedBody: string[] = ['actions'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -1600,14 +1566,13 @@ export default class Indices { const acceptedPath: string[] = ['index', 'type'] const acceptedBody: string[] = ['query'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 58a9fce71..ac7b77cd2 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -148,14 +148,13 @@ export default class Ingest { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -183,14 +182,13 @@ export default class Ingest { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['docs', 'pipeline'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 4f6ffff5f..6431d505e 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -37,19 +37,31 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> -export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise -export default async function KnnSearchApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'knn'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 3960c0f44..26c4e59e7 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -142,14 +142,13 @@ export default class License { const acceptedPath: string[] = [] const acceptedBody: string[] = ['license', 'licenses'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 5e9217999..a0d678656 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -44,14 +44,13 @@ export default async function MgetApi (this: That, params?: const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index e01d99111..4d0476bae 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -182,14 +182,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['requests_per_second', 'timeout'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -364,14 +363,13 @@ export default class Ml { const acceptedPath: string[] = [] const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -400,14 +398,13 @@ export default class Ml { const acceptedPath: string[] = [] const acceptedBody: string[] = ['evaluation', 'index', 'query'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -435,14 +432,13 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -477,14 +473,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'start'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -512,14 +507,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -545,16 +539,15 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedBody: string[] = ['anomaly_score', 'desc', 'exclude_interim', 'expand', 'page', 'sort', 'start', 'end'] + const acceptedBody: string[] = ['anomaly_score', 'desc', 'exclude_interim', 'expand', 'sort', 'start', 'end'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -611,14 +604,13 @@ export default class Ml { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -654,14 +646,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'category_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -846,14 +837,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -941,14 +931,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['start', 'end'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -981,24 +970,11 @@ export default class Ml { async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_jobs'] const querystring: Record = {} - let body: Record | string - // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body - } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined - } + const body = undefined for (const key in params) { - if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error @@ -1018,14 +994,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['desc', 'exclude_interim', 'page', 'record_score', 'sort', 'start', 'end'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1180,14 +1155,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['timeout'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1215,14 +1189,13 @@ export default class Ml { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['events'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1277,14 +1250,13 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['config'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -1318,16 +1290,15 @@ export default class Ml { async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['job_config', 'datafeed_config'] + const acceptedBody: string[] = ['datafeed_config', 'job_config'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -1363,14 +1334,13 @@ export default class Ml { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['description'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1420,14 +1390,13 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1455,14 +1424,13 @@ export default class Ml { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1490,14 +1458,13 @@ export default class Ml { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['description', 'items'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1525,14 +1492,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1560,14 +1526,13 @@ export default class Ml { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'tags'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1683,14 +1648,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['delete_intervening_results'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1763,14 +1727,13 @@ export default class Ml { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['end', 'start', 'timeout'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1842,14 +1805,13 @@ export default class Ml { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['force', 'timeout'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1899,14 +1861,13 @@ export default class Ml { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1956,14 +1917,13 @@ export default class Ml { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1991,14 +1951,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -2026,14 +1985,13 @@ export default class Ml { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['description', 'retain'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -2083,14 +2041,13 @@ export default class Ml { const acceptedPath: string[] = [] const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_retention_days', 'results_index_name'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index dd1be8659..c3dcf2e07 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -44,14 +44,13 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 438e53454..214684566 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -160,14 +160,13 @@ export default class Nodes { const acceptedPath: string[] = ['node_id'] const acceptedBody: string[] = ['secure_settings_password'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index da5b93544..5d6711fd0 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -44,14 +44,13 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque const acceptedPath: string[] = ['id', 'context'] const acceptedBody: string[] = ['script'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 016091227..056e5bf7f 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -44,14 +44,13 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['requests', 'metric'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 0c121f661..85e20deee 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -44,14 +44,13 @@ export default async function ReindexApi (this: That, params?: T.ReindexRequest const acceptedPath: string[] = [] const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index cf9547a32..bf4d68841 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -44,14 +44,13 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['file', 'params', 'source'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index df24e9005..848d0724e 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -154,14 +154,13 @@ export default class Rollup { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -214,16 +213,15 @@ export default class Rollup { async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'type'] - const acceptedBody: string[] = ['aggs', 'query', 'size'] + const acceptedBody: string[] = ['aggregations', 'query', 'size'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index b15438660..f7757f513 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -44,14 +44,13 @@ export default async function ScriptsPainlessExecuteApi (this const acceptedPath: string[] = [] const acceptedBody: string[] = ['context', 'context_setup', 'script'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 26191be83..c35577d1d 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -44,14 +44,13 @@ export default async function ScrollApi (this: That, params const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll', 'scroll_id'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 34125fafd..12d7aa1c3 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -42,16 +42,15 @@ export default async function SearchApi (this: That, params export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggs', 'aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -59,7 +58,12 @@ export default async function SearchApi (this: That, params if (acceptedBody.includes(key)) { body = body ?? {} // @ts-expect-error - body[key] = params[key] + if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { + querystring[key] = params[key] + } else { + // @ts-expect-error + body[key] = params[key] + } } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index fd001aa06..23ec4c9a3 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -44,14 +44,13 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] const acceptedBody: string[] = ['aggs', 'exact_bounds', 'extent', 'fields', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index e716d7c7b..84c0fb9df 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -44,14 +44,13 @@ export default async function SearchTemplateApi (this: That const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 20547eadb..642eaa2e7 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -109,14 +109,13 @@ export default class SearchableSnapshots { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/security.ts b/src/api/api/security.ts index cf97ed46c..c3430b533 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -73,14 +73,13 @@ export default class Security { const acceptedPath: string[] = ['username'] const acceptedBody: string[] = ['password'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -226,14 +225,13 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -683,14 +681,13 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -772,14 +769,13 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -807,14 +803,13 @@ export default class Security { const acceptedPath: string[] = ['user'] const acceptedBody: string[] = ['application', 'cluster', 'index'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -850,14 +845,13 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -886,14 +880,13 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -949,14 +942,13 @@ export default class Security { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'transient_metadata'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -984,14 +976,13 @@ export default class Security { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'rules', 'run_as'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -1019,14 +1010,13 @@ export default class Security { const acceptedPath: string[] = [] const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index f12afa39e..a24fa13e5 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -193,14 +193,13 @@ export default class Slm { const acceptedPath: string[] = ['policy_id'] const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index e086a0b60..580e0fc97 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -72,14 +72,13 @@ export default class Snapshot { const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] const acceptedBody: string[] = ['indices'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -107,14 +106,13 @@ export default class Snapshot { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['ignore_unavailable', 'include_global_state', 'indices', 'feature_states', 'metadata', 'partial'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -142,14 +140,13 @@ export default class Snapshot { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['repository', 'type', 'settings'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -295,14 +292,13 @@ export default class Snapshot { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 24224218d..4273da298 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -50,14 +50,13 @@ export default class Sql { const acceptedPath: string[] = [] const acceptedBody: string[] = ['cursor'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -151,14 +150,13 @@ export default class Sql { const acceptedPath: string[] = [] const acceptedBody: string[] = ['columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -187,14 +185,13 @@ export default class Sql { const acceptedPath: string[] = [] const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index ce3895dd1..eb88eb9db 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -44,14 +44,13 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 32c3456e5..d2cf887ba 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -44,14 +44,13 @@ export default async function TermvectorsApi (this: That, p const acceptedPath: string[] = ['index', 'id'] const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index fdc47bf51..1edbc932c 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -124,14 +124,13 @@ export default class Transform { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -167,14 +166,13 @@ export default class Transform { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -246,14 +244,13 @@ export default class Transform { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'source', 'settings', 'sync', 'retention_policy'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 454e6556f..a38f31c22 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -44,14 +44,13 @@ export default async function UpdateApi = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 808d3157e..ada1a9595 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -44,14 +44,13 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 9eeb6ae92..d01f43cf3 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -145,14 +145,13 @@ export default class Watcher { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} @@ -210,14 +209,13 @@ export default class Watcher { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'transform', 'trigger'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } for (const key in params) { @@ -245,14 +243,13 @@ export default class Watcher { const acceptedPath: string[] = [] const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] const querystring: Record = {} - let body: Record | string // @ts-expect-error - if (typeof params?.body === 'string') { - // @ts-expect-error - body = params.body + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody } else { - // @ts-expect-error - body = params?.body != null ? { ...params.body } : undefined + body = userBody != null ? { ...userBody } : undefined } params = params ?? {} diff --git a/src/api/kibana.ts b/src/api/kibana.ts index 7d00e1f2d..4aab8b1eb 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -207,7 +207,6 @@ interface KibanaClient { fieldUsageStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> flush: (params?: T.IndicesFlushRequest| TB.IndicesFlushRequest, options?: TransportRequestOptions) => Promise> forcemerge: (params?: T.IndicesForcemergeRequest| TB.IndicesForcemergeRequest, options?: TransportRequestOptions) => Promise> - freeze: (params: T.IndicesFreezeRequest| TB.IndicesFreezeRequest, options?: TransportRequestOptions) => Promise> get: (params: T.IndicesGetRequest| TB.IndicesGetRequest, options?: TransportRequestOptions) => Promise> getAlias: (params?: T.IndicesGetAliasRequest| TB.IndicesGetAliasRequest, options?: TransportRequestOptions) => Promise> getDataStream: (params?: T.IndicesGetDataStreamRequest| TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions) => Promise> @@ -250,7 +249,7 @@ interface KibanaClient { putPipeline: (params: T.IngestPutPipelineRequest| TB.IngestPutPipelineRequest, options?: TransportRequestOptions) => Promise> simulate: (params?: T.IngestSimulateRequest| TB.IngestSimulateRequest, options?: TransportRequestOptions) => Promise> } - knnSearch: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + knnSearch: (params: T.KnnSearchRequest| TB.KnnSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> license: { delete: (params?: T.LicenseDeleteRequest| TB.LicenseDeleteRequest, options?: TransportRequestOptions) => Promise> get: (params?: T.LicenseGetRequest| TB.LicenseGetRequest, options?: TransportRequestOptions) => Promise> diff --git a/src/api/types.ts b/src/api/types.ts index 140608572..10df318d7 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -476,6 +476,34 @@ export interface InfoResponse { version: ElasticsearchVersionInfo } +export interface KnnSearchRequest extends RequestBase { + index: Indices + routing?: Routing + _source?: boolean | Fields | SearchSourceFilter + docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] + stored_fields?: Fields + fields?: Fields + knn: KnnSearchQuery +} + +export interface KnnSearchResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + fields?: Record + max_score?: double +} + +export interface KnnSearchQuery { + field: Field + query_vector: KnnSearchQueryVector + k: long + num_candidates: long +} + +export type KnnSearchQueryVector = double[] + export interface MgetHit { error?: ErrorCause fields?: Record @@ -939,8 +967,8 @@ export interface SearchRequest extends RequestBase { size?: integer from?: integer sort?: string | string[] - aggs?: Record aggregations?: Record + aggs?: Record collapse?: SearchFieldCollapse highlight?: SearchHighlight indices_boost?: Record[] @@ -1139,8 +1167,8 @@ export interface SearchGeoDistanceSortKeys { order?: SearchSortOrder unit?: DistanceUnit } -export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys | -{ [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] } +export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys +& { [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] | SearchSortMode | GeoDistanceType | SearchSortOrder | DistanceUnit } export interface SearchHighlight { fields: Record @@ -1408,8 +1436,8 @@ export interface SearchSortContainerKeys { _geo_distance?: SearchGeoDistanceSort _script?: SearchScriptSort } -export type SearchSortContainer = SearchSortContainerKeys | -{ [property: string]: SearchFieldSort | SearchSortOrder } +export type SearchSortContainer = SearchSortContainerKeys +& { [property: string]: SearchFieldSort | SearchSortOrder | SearchScoreSort | SearchGeoDistanceSort | SearchScriptSort } export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' @@ -1872,8 +1900,8 @@ export interface ErrorCauseKeys { root_cause?: ErrorCause[] suppressed?: ErrorCause[] } -export type ErrorCause = ErrorCauseKeys | -{ [property: string]: any } +export type ErrorCause = ErrorCauseKeys +& { [property: string]: any } export interface ErrorResponseBase { error: ErrorCause @@ -1992,8 +2020,8 @@ export interface InlineGetKeys { _routing?: Routing _source: TDocument } -export type InlineGet = InlineGetKeys | -{ [property: string]: any } +export type InlineGet = InlineGetKeys +& { [property: string]: any } export interface InlineScript extends ScriptBase { source: string @@ -2573,8 +2601,8 @@ export interface AggregationsCompositeAggregationSource { export interface AggregationsCompositeBucketKeys { } -export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsCompositeBucketAggregate extends AggregationsMultiBucketAggregate> { after_key: Record @@ -2606,8 +2634,8 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket export interface AggregationsDateHistogramBucketKeys { } -export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys +& { [property: string]: AggregationsAggregate } export type AggregationsDateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' @@ -2682,8 +2710,8 @@ export interface AggregationsFiltersAggregation extends AggregationsBucketAggreg export interface AggregationsFiltersBucketItemKeys { doc_count: long } -export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys +& { [property: string]: AggregationsAggregate | long } export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { format?: string @@ -2847,16 +2875,16 @@ export interface AggregationsIpRangeAggregationRange { export interface AggregationsIpRangeBucketKeys { } -export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsKeyedBucketKeys { doc_count: long key: TKey key_as_string: string } -export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys +& { [property: string]: AggregationsAggregate | long | TKey | string } export interface AggregationsKeyedValueAggregate extends AggregationsValueAggregate { keys: string[] @@ -3024,8 +3052,8 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat export interface AggregationsRangeBucketKeys { } -export type AggregationsRangeBucket = AggregationsRangeBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsRangeBucket = AggregationsRangeBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { exclude?: string | string[] @@ -3039,8 +3067,8 @@ export interface AggregationsRareTermsAggregation extends AggregationsBucketAggr export interface AggregationsRareTermsBucketKeys { } -export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { unit?: AggregationsDateInterval @@ -3108,8 +3136,8 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc export interface AggregationsSignificantTermsBucketKeys { } -export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer @@ -3133,8 +3161,8 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck export interface AggregationsSingleBucketAggregateKeys extends AggregationsAggregateBase { doc_count: double } -export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys +& { [property: string]: AggregationsAggregate | double | Record } export interface AggregationsStandardDeviationBounds { lower?: double @@ -3302,14 +3330,14 @@ export interface AggregationsWeightedAverageValue { script?: Script } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' preserve_original: boolean } -export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceTokenFilter +export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter export interface AnalysisCharFilterBase { version?: VersionString @@ -3318,24 +3346,25 @@ export interface AnalysisCharFilterBase { export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { type: 'char_group' tokenize_on_chars: string[] + max_token_length?: integer } export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' - common_words: string[] - common_words_path: string - ignore_case: boolean - query_mode: boolean + common_words?: string[] + common_words_path?: string + ignore_case?: boolean + query_mode?: boolean } export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { - hyphenation_patterns_path: string - max_subword_size: integer - min_subword_size: integer - min_word_size: integer - only_longest_match: boolean - word_list: string[] - word_list_path: string + hyphenation_patterns_path?: string + max_subword_size?: integer + min_subword_size?: integer + min_word_size?: integer + only_longest_match?: boolean + word_list?: string[] + word_list_path?: string } export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { @@ -3367,18 +3396,28 @@ export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilter encoding: AnalysisDelimitedPayloadEncoding } +export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { + type: 'dictionary_decompounder' +} + +export interface AnalysisDutchAnalyzer { + type: 'dutch' + stopwords?: AnalysisStopWords +} + export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { type: 'edge_ngram' max_gram: integer min_gram: integer - side: AnalysisEdgeNGramSide + side?: AnalysisEdgeNGramSide + preserve_original?: boolean } export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { type: 'edge_ngram' - custom_token_chars: string + custom_token_chars?: string max_gram: integer min_gram: integer token_chars: AnalysisTokenChar[] @@ -3392,12 +3431,12 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' - version: VersionString + version?: VersionString max_output_size: integer preserve_original: boolean separator: string - stopwords: AnalysisStopWords - stopwords_path: string + stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { @@ -3428,10 +3467,62 @@ export interface AnalysisIcuAnalyzer { mode: AnalysisIcuNormalizationMode } +export type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable' + +export type AnalysisIcuCollationCaseFirst = 'lower' | 'upper' + +export type AnalysisIcuCollationDecomposition = 'no' | 'identical' + +export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical' + +export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_collation' + alternate: AnalysisIcuCollationAlternate + caseFirst: AnalysisIcuCollationCaseFirst + caseLevel: boolean + country: string + decomposition: AnalysisIcuCollationDecomposition + hiraganaQuaternaryMode: boolean + language: string + numeric: boolean + strength: AnalysisIcuCollationStrength + variableTop?: string + variant: string +} + +export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_folding' + unicode_set_filter: string +} + +export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBase { + type: 'icu_normalizer' + mode?: AnalysisIcuNormalizationMode + name?: AnalysisIcuNormalizationType +} + export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' +export interface AnalysisIcuNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_normalizer' + name: AnalysisIcuNormalizationType +} + export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' +export interface AnalysisIcuTokenizer extends AnalysisTokenizerBase { + type: 'icu_tokenizer' + rule_files: string +} + +export type AnalysisIcuTransformDirection = 'forward' | 'reverse' + +export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_transform' + dir: AnalysisIcuTransformDirection + id: string +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -3440,28 +3531,28 @@ export type AnalysisKeepTypesMode = 'include' | 'exclude' export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { type: 'keep_types' - mode: AnalysisKeepTypesMode - types: string[] + mode?: AnalysisKeepTypesMode + types?: string[] } export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { type: 'keep' - keep_words: string[] - keep_words_case: boolean - keep_words_path: string + keep_words?: string[] + keep_words_case?: boolean + keep_words_path?: string } export interface AnalysisKeywordAnalyzer { type: 'keyword' - version: VersionString + version?: VersionString } export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_marker' - ignore_case: boolean - keywords: string[] - keywords_path: string - keywords_pattern: string + ignore_case?: boolean + keywords?: string[] + keywords_path?: string + keywords_pattern?: string } export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { @@ -3475,6 +3566,12 @@ export interface AnalysisKuromojiAnalyzer { user_dictionary?: string } +export interface AnalysisKuromojiIterationMarkCharFilter extends AnalysisCharFilterBase { + type: 'kuromoji_iteration_mark' + normalize_kana: boolean + normalize_kanji: boolean +} + export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'kuromoji_part_of_speech' stoptags: string[] @@ -3494,23 +3591,24 @@ export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { type: 'kuromoji_tokenizer' - discard_punctuation: boolean + discard_punctuation?: boolean mode: AnalysisKuromojiTokenizationMode - nbest_cost: integer - nbest_examples: string - user_dictionary: string - user_dictionary_rules: string[] + nbest_cost?: integer + nbest_examples?: string + user_dictionary?: string + user_dictionary_rules?: string[] + discard_compound_token?: boolean } export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' export interface AnalysisLanguageAnalyzer { type: 'language' - version: VersionString + version?: VersionString language: AnalysisLanguage stem_exclusion: string[] - stopwords: AnalysisStopWords - stopwords_path: string + stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { @@ -3535,7 +3633,7 @@ export interface AnalysisLowercaseNormalizer { export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase' - language: string + language?: string } export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { @@ -3556,13 +3654,14 @@ export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram' - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer + preserve_original?: boolean } export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { type: 'ngram' - custom_token_chars: string + custom_token_chars?: string max_gram: integer min_gram: integer token_chars: AnalysisTokenChar[] @@ -3570,10 +3669,10 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { export interface AnalysisNoriAnalyzer { type: 'nori' - version: VersionString - decompound_mode: AnalysisNoriDecompoundMode - stoptags: string[] - user_dictionary: string + version?: VersionString + decompound_mode?: AnalysisNoriDecompoundMode + stoptags?: string[] + user_dictionary?: string } export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' @@ -3585,10 +3684,10 @@ export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilter export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { type: 'nori_tokenizer' - decompound_mode: AnalysisNoriDecompoundMode - discard_punctuation: boolean - user_dictionary: string - user_dictionary_rules: string[] + decompound_mode?: AnalysisNoriDecompoundMode + discard_punctuation?: boolean + user_dictionary?: string + user_dictionary_rules?: string[] } export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer @@ -3604,11 +3703,11 @@ export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { export interface AnalysisPatternAnalyzer { type: 'pattern' - version: VersionString - flags: string - lowercase: boolean + version?: VersionString + flags?: string + lowercase?: boolean pattern: string - stopwords: AnalysisStopWords + stopwords?: AnalysisStopWords } export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { @@ -3617,6 +3716,13 @@ export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBa preserve_original: boolean } +export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { + type: 'pattern_replace' + flags: string + pattern: string + replacement: string +} + export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace' flags: string @@ -3624,6 +3730,31 @@ export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBa replacement: string } +export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { + type: 'pattern' + flags: string + group: integer + pattern: string +} + +export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' + +export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' + +export type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic' + +export type AnalysisPhoneticRuleType = 'approx' | 'exact' + +export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { + type: 'phonetic' + encoder: AnalysisPhoneticEncoder + languageset: AnalysisPhoneticLanguage[] + max_code_len?: integer + name_type: AnalysisPhoneticNameType + replace?: boolean + rule_type: AnalysisPhoneticRuleType +} + export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { type: 'porter_stem' } @@ -3643,17 +3774,24 @@ export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' - filler_token: string - max_shingle_size: integer - min_shingle_size: integer - output_unigrams: boolean - output_unigrams_if_no_shingles: boolean - token_separator: string + filler_token?: string + max_shingle_size?: integer | string + min_shingle_size?: integer | string + output_unigrams?: boolean + output_unigrams_if_no_shingles?: boolean + token_separator?: string } export interface AnalysisSimpleAnalyzer { type: 'simple' - version: VersionString + version?: VersionString +} + +export interface AnalysisSnowballAnalyzer { + type: 'snowball' + version?: VersionString + language: AnalysisSnowballLanguage + stopwords?: AnalysisStopWords } export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' @@ -3665,19 +3803,19 @@ export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisStandardAnalyzer { type: 'standard' - max_token_length: integer - stopwords: AnalysisStopWords + max_token_length?: integer + stopwords?: AnalysisStopWords } export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { type: 'standard' - max_token_length: integer + max_token_length?: integer } export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer_override' - rules: string[] - rules_path: string + rules?: string[] + rules_path?: string } export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { @@ -3687,9 +3825,9 @@ export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisStopAnalyzer { type: 'stop' - version: VersionString - stopwords: AnalysisStopWords - stopwords_path: string + version?: VersionString + stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { @@ -3706,13 +3844,13 @@ export type AnalysisSynonymFormat = 'solr' | 'wordnet' export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { type: 'synonym_graph' - expand: boolean - format: AnalysisSynonymFormat - lenient: boolean - synonyms: string[] - synonyms_path: string - tokenizer: string - updateable: boolean + expand?: boolean + format?: AnalysisSynonymFormat + lenient?: boolean + synonyms?: string[] + synonyms_path?: string + tokenizer?: string + updateable?: boolean } export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { @@ -3720,7 +3858,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { expand?: boolean format?: AnalysisSynonymFormat lenient?: boolean - synonyms: string[] + synonyms?: string[] synonyms_path?: string tokenizer?: string updateable?: boolean @@ -3728,13 +3866,13 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' -export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter +export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer +export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer export interface AnalysisTokenizerBase { version?: VersionString @@ -3751,12 +3889,12 @@ export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { type: 'uax_url_email' - max_token_length: integer + max_token_length?: integer } export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { type: 'unique' - only_on_same_position: boolean + only_on_same_position?: boolean } export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { @@ -3765,47 +3903,48 @@ export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisWhitespaceAnalyzer { type: 'whitespace' - version: VersionString + version?: VersionString } export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { type: 'whitespace' - max_token_length: integer + max_token_length?: integer } export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { type: 'word_delimiter_graph' - adjust_offsets: boolean - catenate_all: boolean - catenate_numbers: boolean - catenate_words: boolean - generate_number_parts: boolean - generate_word_parts: boolean - preserve_original: boolean - protected_words: string[] - protected_words_path: string - split_on_case_change: boolean - split_on_numerics: boolean - stem_english_possessive: boolean - type_table: string[] - type_table_path: string + adjust_offsets?: boolean + catenate_all?: boolean + catenate_numbers?: boolean + catenate_words?: boolean + generate_number_parts?: boolean + generate_word_parts?: boolean + ignore_keywords?: boolean + preserve_original?: boolean + protected_words?: string[] + protected_words_path?: string + split_on_case_change?: boolean + split_on_numerics?: boolean + stem_english_possessive?: boolean + type_table?: string[] + type_table_path?: string } export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { type: 'word_delimiter' - catenate_all: boolean - catenate_numbers: boolean - catenate_words: boolean - generate_number_parts: boolean - generate_word_parts: boolean - preserve_original: boolean - protected_words: string[] - protected_words_path: string - split_on_case_change: boolean - split_on_numerics: boolean - stem_english_possessive: boolean - type_table: string[] - type_table_path: string + catenate_all?: boolean + catenate_numbers?: boolean + catenate_words?: boolean + generate_number_parts?: boolean + generate_word_parts?: boolean + preserve_original?: boolean + protected_words?: string[] + protected_words_path?: string + split_on_case_change?: boolean + split_on_numerics?: boolean + stem_english_possessive?: boolean + type_table?: string[] + type_table_path?: string } export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { @@ -3885,6 +4024,7 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase { index?: boolean null_value?: DateString precision_step?: integer + locale?: string type: 'date' } @@ -3893,9 +4033,18 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { type: 'date_range' } +export interface MappingDenseVectorIndexOptions { + type: string + m: integer + ef_construction: integer +} + export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' dims: integer + similarity?: string + index?: boolean + index_options?: MappingDenseVectorIndexOptions } export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingGenericProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty @@ -4049,6 +4198,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { norms?: boolean null_value?: string split_queries_on_whitespace?: boolean + time_series_dimension?: boolean type: 'keyword' } @@ -4079,6 +4229,7 @@ export type MappingNumberProperty = MappingFloatNumberProperty | MappingHalfFloa export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { index?: boolean ignore_malformed?: boolean + time_series_metric?: MappingTimeSeriesMetricType } export interface MappingObjectProperty extends MappingCorePropertyBase { @@ -4224,6 +4375,8 @@ export interface MappingTextProperty extends MappingCorePropertyBase { type: 'text' } +export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' + export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { analyzer?: string boost?: double @@ -4321,8 +4474,8 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } -export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys | -{ [property: string]: QueryDslDecayPlacement } +export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -4422,15 +4575,15 @@ export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod ignore_unmapped?: boolean } -export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys | -{ [property: string]: QueryDslBoundingBox } +export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys +& { [property: string]: QueryDslBoundingBox | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } -export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys | -{ [property: string]: QueryDslDecayPlacement } +export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -4440,8 +4593,8 @@ export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { distance_type?: GeoDistanceType validation_method?: QueryDslGeoValidationMethod } -export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys | -{ [property: string]: QueryDslGeoLocation } +export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys +& { [property: string]: QueryDslGeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } export type QueryDslGeoExecution = 'memory' | 'indexed' @@ -4455,8 +4608,8 @@ export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod ignore_unmapped?: boolean } -export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys | -{ [property: string]: QueryDslGeoPolygonPoints } +export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys +& { [property: string]: QueryDslGeoPolygonPoints | QueryDslGeoValidationMethod | boolean | float | string } export interface QueryDslGeoShapeFieldQuery { shape?: GeoShape @@ -4467,8 +4620,8 @@ export interface QueryDslGeoShapeFieldQuery { export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { ignore_unmapped?: boolean } -export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys | -{ [property: string]: QueryDslGeoShapeFieldQuery } +export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys +& { [property: string]: QueryDslGeoShapeFieldQuery | boolean | float | string } export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' @@ -4694,8 +4847,8 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } -export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys | -{ [property: string]: QueryDslDecayPlacement } +export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } export type QueryDslOperator = 'and' | 'or' @@ -4899,8 +5052,8 @@ export interface QueryDslShapeFieldQuery { export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { } -export type QueryDslShapeQuery = QueryDslShapeQueryKeys | -{ [property: string]: QueryDslShapeFieldQuery } +export type QueryDslShapeQuery = QueryDslShapeQueryKeys +& { [property: string]: QueryDslShapeFieldQuery | float | string } export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' @@ -4995,8 +5148,8 @@ export interface QueryDslTermsLookup { export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { } -export type QueryDslTermsQuery = QueryDslTermsQueryKeys | -{ [property: string]: string[] | long[] | QueryDslTermsLookup } +export type QueryDslTermsQuery = QueryDslTermsQueryKeys +& { [property: string]: string[] | long[] | QueryDslTermsLookup | float | string } export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field @@ -5024,13 +5177,14 @@ export interface QueryDslTypeQuery extends QueryDslQueryBase { export interface QueryDslWildcardQuery extends QueryDslQueryBase { case_insensitive?: boolean rewrite?: MultiTermQueryRewrite - value: string + value?: string + wildcard?: string } export type QueryDslZeroTermsQuery = 'all' | 'none' export interface AsyncSearchAsyncSearch { - aggregations?: Record + aggregations?: Record _clusters?: ClusterStatistics fields?: Record hits: SearchHitsMetadata @@ -5132,8 +5286,8 @@ export interface AsyncSearchSubmitRequest extends RequestBase { size?: integer from?: integer sort?: string | string[] - aggs?: Record aggregations?: Record + aggs?: Record collapse?: SearchFieldCollapse highlight?: SearchHighlight indices_boost?: Record[] @@ -5784,7 +5938,8 @@ export interface CatMlDatafeedsDatafeedsRecord { export interface CatMlDatafeedsRequest extends CatCatRequestBase { datafeed_id?: Id - allow_no_datafeeds?: boolean + allow_no_match?: boolean + time?: TimeUnit } export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] @@ -5968,8 +6123,9 @@ export interface CatMlJobsJobsRecord { export interface CatMlJobsRequest extends CatCatRequestBase { job_id?: Id - allow_no_jobs?: boolean + allow_no_match?: boolean bytes?: Bytes + time?: TimeUnit } export type CatMlJobsResponse = CatMlJobsJobsRecord[] @@ -8323,6 +8479,13 @@ export interface IndicesIndexRoutingRebalance { export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' +export interface IndicesIndexSegmentSort { + field: Fields + order: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] + mode?: IndicesSegmentSortMode + missing?: IndicesSegmentSortMissing +} + export interface IndicesIndexSettingBlocks { read_only?: boolean read_only_allow_delete?: boolean @@ -8332,6 +8495,15 @@ export interface IndicesIndexSettingBlocks { } export interface IndicesIndexSettings { + index?: IndicesIndexSettings + mode?: string + 'index.mode'?: string + routing_path?: string[] + 'index.routing_path'?: string[] + soft_deletes?: IndicesSoftDeletes + 'index.soft_deletes'?: IndicesSoftDeletes + sort?: IndicesIndexSegmentSort + 'index.sort'?: IndicesIndexSegmentSort number_of_shards?: integer | string 'index.number_of_shards'?: integer | string number_of_replicas?: integer | string @@ -8342,8 +8514,8 @@ export interface IndicesIndexSettings { 'index.check_on_startup'?: IndicesIndexCheckOnStartup codec?: string 'index.codec'?: string - routing_partition_size?: integer | string - 'index.routing_partition_size'?: integer | string + routing_partition_size?: integer + 'index.routing_partition_size'?: integer 'soft_deletes.retention_lease.period'?: Time 'index.soft_deletes.retention_lease.period'?: Time load_fixed_bitset_filters_eagerly?: boolean @@ -8352,6 +8524,8 @@ export interface IndicesIndexSettings { 'index.hidden'?: boolean | string auto_expand_replicas?: string 'index.auto_expand_replicas'?: string + 'merge.scheduler.max_thread_count'?: integer + 'index.merge.scheduler.max_thread_count'?: integer 'search.idle.after'?: Time 'index.search.idle.after'?: Time refresh_interval?: Time @@ -8420,12 +8594,15 @@ export interface IndicesIndexSettings { 'index.max_slices_per_scroll'?: integer 'translog.durability'?: string 'index.translog.durability'?: string + 'translog.flush_threshold_size'?: string + 'index.translog.flush_threshold_size'?: string 'query_string.lenient'?: boolean | string 'index.query_string.lenient'?: boolean | string priority?: integer | string 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis + 'index.analysis'?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings } @@ -8434,6 +8611,7 @@ export interface IndicesIndexSettingsAnalysis { char_filter?: Record filter?: Record normalizer?: Record + tokenizer?: Record } export interface IndicesIndexSettingsLifecycle { @@ -8466,6 +8644,16 @@ export interface IndicesOverlappingIndexTemplate { index_patterns?: IndexName[] } +export type IndicesSegmentSortMissing = '_last' | '_first' + +export type IndicesSegmentSortMode = 'min' | 'max' + +export type IndicesSegmentSortOrder = 'asc' | 'desc' + +export interface IndicesSoftDeletes { + enabled: boolean +} + export interface IndicesStringFielddata { format: IndicesStringFielddataFormat } @@ -8626,9 +8814,9 @@ export interface IndicesCreateRequest extends RequestBase { master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards - aliases?: Record - mappings?: Record | MappingTypeMapping - settings?: Record + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings } export interface IndicesCreateResponse { @@ -8801,20 +8989,6 @@ export interface IndicesForcemergeRequest extends RequestBase { export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { } -export interface IndicesFreezeRequest extends RequestBase { - index: IndexName - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards -} - -export interface IndicesFreezeResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean -} - export interface IndicesGetRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -10162,7 +10336,6 @@ export interface LicensePostStartTrialRequest extends RequestBase { export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { error_message?: string - acknowledged: boolean trial_was_started: boolean type: LicenseLicenseType } @@ -10237,8 +10410,8 @@ export interface MlAnalysisConfig { categorization_filters?: string[] detectors: MlDetector[] influencers?: Field[] - model_prune_window?: Time latency?: Time + model_prune_window?: Time multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization summary_count_field_name?: Field @@ -10410,7 +10583,7 @@ export interface MlDataCounts { export interface MlDataDescription { format?: string - time_field: Field + time_field?: Field time_format?: string field_delimiter?: string } @@ -10795,6 +10968,8 @@ export interface MlHyperparameters { soft_tree_depth_tolerance?: double } +export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' + export interface MlInfluence { influencer_field_name: string influencer_field_values: string[] @@ -11062,7 +11237,6 @@ export interface MlValidationLoss { export interface MlCloseJobRequest extends RequestBase { job_id: Id allow_no_match?: boolean - allow_no_jobs?: boolean force?: boolean timeout?: Time } @@ -11328,7 +11502,6 @@ export interface MlGetBucketsRequest extends RequestBase { end?: DateString anomaly_score?: double expand?: boolean - page?: MlPage } export interface MlGetBucketsResponse { @@ -11410,7 +11583,7 @@ export interface MlGetDataFrameAnalyticsStatsResponse { export interface MlGetDatafeedStatsRequest extends RequestBase { datafeed_id?: Ids - allow_no_datafeeds?: boolean + allow_no_match?: boolean } export interface MlGetDatafeedStatsResponse { @@ -11420,7 +11593,7 @@ export interface MlGetDatafeedStatsResponse { export interface MlGetDatafeedsRequest extends RequestBase { datafeed_id?: Ids - allow_no_datafeeds?: boolean + allow_no_match?: boolean exclude_generated?: boolean } @@ -11460,7 +11633,7 @@ export interface MlGetInfluencersResponse { export interface MlGetJobStatsRequest extends RequestBase { job_id?: Id - allow_no_jobs?: boolean + allow_no_match?: boolean } export interface MlGetJobStatsResponse { @@ -11471,7 +11644,6 @@ export interface MlGetJobStatsResponse { export interface MlGetJobsRequest extends RequestBase { job_id?: Ids allow_no_match?: boolean - allow_no_jobs?: boolean exclude_generated?: boolean } @@ -11498,14 +11670,13 @@ export interface MlGetModelSnapshotsResponse { export interface MlGetOverallBucketsRequest extends RequestBase { job_id: Id + allow_no_match?: boolean bucket_span?: Time - overall_score?: double | string - top_n?: integer end?: Time - start?: Time exclude_interim?: boolean - allow_no_match?: boolean - allow_no_jobs?: boolean + overall_score?: double | string + start?: Time + top_n?: integer } export interface MlGetOverallBucketsResponse { @@ -11537,7 +11708,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase { decompress_definition?: boolean exclude_generated?: boolean from?: integer - include?: string + include?: MlInclude size?: integer tags?: string } @@ -11659,8 +11830,8 @@ export interface MlPreviewDataFrameAnalyticsResponse { export interface MlPreviewDatafeedRequest extends RequestBase { datafeed_id?: Id - job_config?: MlJobConfig datafeed_config?: MlDatafeedConfig + job_config?: MlJobConfig } export interface MlPreviewDatafeedResponse { @@ -11726,6 +11897,7 @@ export interface MlPutDatafeedRequest extends RequestBase { delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time indices?: string[] + indexes?: string[] indices_options?: MlDatafeedIndicesOptions job_id?: Id max_empty_searches?: integer @@ -12916,6 +13088,7 @@ export interface RollupRollupSearchRequest extends RequestBase { type?: Type rest_total_hits_as_int?: boolean typed_keys?: boolean + aggregations?: Record aggs?: Record query?: QueryDslQueryContainer size?: integer diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 67f407c2f..3e5780abb 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -499,6 +499,37 @@ export interface InfoResponse { version: ElasticsearchVersionInfo } +export interface KnnSearchRequest extends RequestBase { + index: Indices + routing?: Routing + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + _source?: boolean | Fields | SearchSourceFilter + docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] + stored_fields?: Fields + fields?: Fields + knn: KnnSearchQuery + } +} + +export interface KnnSearchResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + fields?: Record + max_score?: double +} + +export interface KnnSearchQuery { + field: Field + query_vector: KnnSearchQueryVector + k: long + num_candidates: long +} + +export type KnnSearchQueryVector = double[] + export interface MgetHit { error?: ErrorCause fields?: Record @@ -993,8 +1024,8 @@ export interface SearchRequest extends RequestBase { sort?: string | string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - aggs?: Record aggregations?: Record + aggs?: Record collapse?: SearchFieldCollapse explain?: boolean from?: integer @@ -1208,8 +1239,8 @@ export interface SearchGeoDistanceSortKeys { order?: SearchSortOrder unit?: DistanceUnit } -export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys | -{ [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] } +export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys +& { [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] | SearchSortMode | GeoDistanceType | SearchSortOrder | DistanceUnit } export interface SearchHighlight { fields: Record @@ -1477,8 +1508,8 @@ export interface SearchSortContainerKeys { _geo_distance?: SearchGeoDistanceSort _script?: SearchScriptSort } -export type SearchSortContainer = SearchSortContainerKeys | -{ [property: string]: SearchFieldSort | SearchSortOrder } +export type SearchSortContainer = SearchSortContainerKeys +& { [property: string]: SearchFieldSort | SearchSortOrder | SearchScoreSort | SearchGeoDistanceSort | SearchScriptSort } export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' @@ -1968,8 +1999,8 @@ export interface ErrorCauseKeys { root_cause?: ErrorCause[] suppressed?: ErrorCause[] } -export type ErrorCause = ErrorCauseKeys | -{ [property: string]: any } +export type ErrorCause = ErrorCauseKeys +& { [property: string]: any } export interface ErrorResponseBase { error: ErrorCause @@ -2088,8 +2119,8 @@ export interface InlineGetKeys { _routing?: Routing _source: TDocument } -export type InlineGet = InlineGetKeys | -{ [property: string]: any } +export type InlineGet = InlineGetKeys +& { [property: string]: any } export interface InlineScript extends ScriptBase { source: string @@ -2669,8 +2700,8 @@ export interface AggregationsCompositeAggregationSource { export interface AggregationsCompositeBucketKeys { } -export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsCompositeBucketAggregate extends AggregationsMultiBucketAggregate> { after_key: Record @@ -2702,8 +2733,8 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket export interface AggregationsDateHistogramBucketKeys { } -export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys +& { [property: string]: AggregationsAggregate } export type AggregationsDateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' @@ -2778,8 +2809,8 @@ export interface AggregationsFiltersAggregation extends AggregationsBucketAggreg export interface AggregationsFiltersBucketItemKeys { doc_count: long } -export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys +& { [property: string]: AggregationsAggregate | long } export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { format?: string @@ -2943,16 +2974,16 @@ export interface AggregationsIpRangeAggregationRange { export interface AggregationsIpRangeBucketKeys { } -export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsKeyedBucketKeys { doc_count: long key: TKey key_as_string: string } -export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys +& { [property: string]: AggregationsAggregate | long | TKey | string } export interface AggregationsKeyedValueAggregate extends AggregationsValueAggregate { keys: string[] @@ -3120,8 +3151,8 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat export interface AggregationsRangeBucketKeys { } -export type AggregationsRangeBucket = AggregationsRangeBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsRangeBucket = AggregationsRangeBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { exclude?: string | string[] @@ -3135,8 +3166,8 @@ export interface AggregationsRareTermsAggregation extends AggregationsBucketAggr export interface AggregationsRareTermsBucketKeys { } -export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { unit?: AggregationsDateInterval @@ -3204,8 +3235,8 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc export interface AggregationsSignificantTermsBucketKeys { } -export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys +& { [property: string]: AggregationsAggregate } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer @@ -3229,8 +3260,8 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck export interface AggregationsSingleBucketAggregateKeys extends AggregationsAggregateBase { doc_count: double } -export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys | -{ [property: string]: AggregationsAggregate } +export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys +& { [property: string]: AggregationsAggregate | double | Record } export interface AggregationsStandardDeviationBounds { lower?: double @@ -3398,14 +3429,14 @@ export interface AggregationsWeightedAverageValue { script?: Script } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' preserve_original: boolean } -export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceTokenFilter +export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter export interface AnalysisCharFilterBase { version?: VersionString @@ -3414,24 +3445,25 @@ export interface AnalysisCharFilterBase { export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { type: 'char_group' tokenize_on_chars: string[] + max_token_length?: integer } export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' - common_words: string[] - common_words_path: string - ignore_case: boolean - query_mode: boolean + common_words?: string[] + common_words_path?: string + ignore_case?: boolean + query_mode?: boolean } export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { - hyphenation_patterns_path: string - max_subword_size: integer - min_subword_size: integer - min_word_size: integer - only_longest_match: boolean - word_list: string[] - word_list_path: string + hyphenation_patterns_path?: string + max_subword_size?: integer + min_subword_size?: integer + min_word_size?: integer + only_longest_match?: boolean + word_list?: string[] + word_list_path?: string } export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { @@ -3463,18 +3495,28 @@ export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilter encoding: AnalysisDelimitedPayloadEncoding } +export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { + type: 'dictionary_decompounder' +} + +export interface AnalysisDutchAnalyzer { + type: 'dutch' + stopwords?: AnalysisStopWords +} + export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { type: 'edge_ngram' max_gram: integer min_gram: integer - side: AnalysisEdgeNGramSide + side?: AnalysisEdgeNGramSide + preserve_original?: boolean } export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { type: 'edge_ngram' - custom_token_chars: string + custom_token_chars?: string max_gram: integer min_gram: integer token_chars: AnalysisTokenChar[] @@ -3488,12 +3530,12 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' - version: VersionString + version?: VersionString max_output_size: integer preserve_original: boolean separator: string - stopwords: AnalysisStopWords - stopwords_path: string + stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { @@ -3524,10 +3566,62 @@ export interface AnalysisIcuAnalyzer { mode: AnalysisIcuNormalizationMode } +export type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable' + +export type AnalysisIcuCollationCaseFirst = 'lower' | 'upper' + +export type AnalysisIcuCollationDecomposition = 'no' | 'identical' + +export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical' + +export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_collation' + alternate: AnalysisIcuCollationAlternate + caseFirst: AnalysisIcuCollationCaseFirst + caseLevel: boolean + country: string + decomposition: AnalysisIcuCollationDecomposition + hiraganaQuaternaryMode: boolean + language: string + numeric: boolean + strength: AnalysisIcuCollationStrength + variableTop?: string + variant: string +} + +export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_folding' + unicode_set_filter: string +} + +export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBase { + type: 'icu_normalizer' + mode?: AnalysisIcuNormalizationMode + name?: AnalysisIcuNormalizationType +} + export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' +export interface AnalysisIcuNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_normalizer' + name: AnalysisIcuNormalizationType +} + export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' +export interface AnalysisIcuTokenizer extends AnalysisTokenizerBase { + type: 'icu_tokenizer' + rule_files: string +} + +export type AnalysisIcuTransformDirection = 'forward' | 'reverse' + +export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_transform' + dir: AnalysisIcuTransformDirection + id: string +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -3536,28 +3630,28 @@ export type AnalysisKeepTypesMode = 'include' | 'exclude' export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { type: 'keep_types' - mode: AnalysisKeepTypesMode - types: string[] + mode?: AnalysisKeepTypesMode + types?: string[] } export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { type: 'keep' - keep_words: string[] - keep_words_case: boolean - keep_words_path: string + keep_words?: string[] + keep_words_case?: boolean + keep_words_path?: string } export interface AnalysisKeywordAnalyzer { type: 'keyword' - version: VersionString + version?: VersionString } export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_marker' - ignore_case: boolean - keywords: string[] - keywords_path: string - keywords_pattern: string + ignore_case?: boolean + keywords?: string[] + keywords_path?: string + keywords_pattern?: string } export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { @@ -3571,6 +3665,12 @@ export interface AnalysisKuromojiAnalyzer { user_dictionary?: string } +export interface AnalysisKuromojiIterationMarkCharFilter extends AnalysisCharFilterBase { + type: 'kuromoji_iteration_mark' + normalize_kana: boolean + normalize_kanji: boolean +} + export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'kuromoji_part_of_speech' stoptags: string[] @@ -3590,23 +3690,24 @@ export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { type: 'kuromoji_tokenizer' - discard_punctuation: boolean + discard_punctuation?: boolean mode: AnalysisKuromojiTokenizationMode - nbest_cost: integer - nbest_examples: string - user_dictionary: string - user_dictionary_rules: string[] + nbest_cost?: integer + nbest_examples?: string + user_dictionary?: string + user_dictionary_rules?: string[] + discard_compound_token?: boolean } export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' export interface AnalysisLanguageAnalyzer { type: 'language' - version: VersionString + version?: VersionString language: AnalysisLanguage stem_exclusion: string[] - stopwords: AnalysisStopWords - stopwords_path: string + stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { @@ -3631,7 +3732,7 @@ export interface AnalysisLowercaseNormalizer { export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase' - language: string + language?: string } export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { @@ -3652,13 +3753,14 @@ export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram' - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer + preserve_original?: boolean } export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { type: 'ngram' - custom_token_chars: string + custom_token_chars?: string max_gram: integer min_gram: integer token_chars: AnalysisTokenChar[] @@ -3666,10 +3768,10 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { export interface AnalysisNoriAnalyzer { type: 'nori' - version: VersionString - decompound_mode: AnalysisNoriDecompoundMode - stoptags: string[] - user_dictionary: string + version?: VersionString + decompound_mode?: AnalysisNoriDecompoundMode + stoptags?: string[] + user_dictionary?: string } export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' @@ -3681,10 +3783,10 @@ export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilter export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { type: 'nori_tokenizer' - decompound_mode: AnalysisNoriDecompoundMode - discard_punctuation: boolean - user_dictionary: string - user_dictionary_rules: string[] + decompound_mode?: AnalysisNoriDecompoundMode + discard_punctuation?: boolean + user_dictionary?: string + user_dictionary_rules?: string[] } export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer @@ -3700,11 +3802,11 @@ export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { export interface AnalysisPatternAnalyzer { type: 'pattern' - version: VersionString - flags: string - lowercase: boolean + version?: VersionString + flags?: string + lowercase?: boolean pattern: string - stopwords: AnalysisStopWords + stopwords?: AnalysisStopWords } export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { @@ -3713,6 +3815,13 @@ export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBa preserve_original: boolean } +export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { + type: 'pattern_replace' + flags: string + pattern: string + replacement: string +} + export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace' flags: string @@ -3720,6 +3829,31 @@ export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBa replacement: string } +export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { + type: 'pattern' + flags: string + group: integer + pattern: string +} + +export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' + +export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' + +export type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic' + +export type AnalysisPhoneticRuleType = 'approx' | 'exact' + +export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { + type: 'phonetic' + encoder: AnalysisPhoneticEncoder + languageset: AnalysisPhoneticLanguage[] + max_code_len?: integer + name_type: AnalysisPhoneticNameType + replace?: boolean + rule_type: AnalysisPhoneticRuleType +} + export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { type: 'porter_stem' } @@ -3739,17 +3873,24 @@ export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' - filler_token: string - max_shingle_size: integer - min_shingle_size: integer - output_unigrams: boolean - output_unigrams_if_no_shingles: boolean - token_separator: string + filler_token?: string + max_shingle_size?: integer | string + min_shingle_size?: integer | string + output_unigrams?: boolean + output_unigrams_if_no_shingles?: boolean + token_separator?: string } export interface AnalysisSimpleAnalyzer { type: 'simple' - version: VersionString + version?: VersionString +} + +export interface AnalysisSnowballAnalyzer { + type: 'snowball' + version?: VersionString + language: AnalysisSnowballLanguage + stopwords?: AnalysisStopWords } export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' @@ -3761,19 +3902,19 @@ export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisStandardAnalyzer { type: 'standard' - max_token_length: integer - stopwords: AnalysisStopWords + max_token_length?: integer + stopwords?: AnalysisStopWords } export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { type: 'standard' - max_token_length: integer + max_token_length?: integer } export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer_override' - rules: string[] - rules_path: string + rules?: string[] + rules_path?: string } export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { @@ -3783,9 +3924,9 @@ export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisStopAnalyzer { type: 'stop' - version: VersionString - stopwords: AnalysisStopWords - stopwords_path: string + version?: VersionString + stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { @@ -3802,13 +3943,13 @@ export type AnalysisSynonymFormat = 'solr' | 'wordnet' export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { type: 'synonym_graph' - expand: boolean - format: AnalysisSynonymFormat - lenient: boolean - synonyms: string[] - synonyms_path: string - tokenizer: string - updateable: boolean + expand?: boolean + format?: AnalysisSynonymFormat + lenient?: boolean + synonyms?: string[] + synonyms_path?: string + tokenizer?: string + updateable?: boolean } export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { @@ -3816,7 +3957,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { expand?: boolean format?: AnalysisSynonymFormat lenient?: boolean - synonyms: string[] + synonyms?: string[] synonyms_path?: string tokenizer?: string updateable?: boolean @@ -3824,13 +3965,13 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' -export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter +export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer +export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer export interface AnalysisTokenizerBase { version?: VersionString @@ -3847,12 +3988,12 @@ export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { type: 'uax_url_email' - max_token_length: integer + max_token_length?: integer } export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { type: 'unique' - only_on_same_position: boolean + only_on_same_position?: boolean } export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { @@ -3861,47 +4002,48 @@ export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisWhitespaceAnalyzer { type: 'whitespace' - version: VersionString + version?: VersionString } export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { type: 'whitespace' - max_token_length: integer + max_token_length?: integer } export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { type: 'word_delimiter_graph' - adjust_offsets: boolean - catenate_all: boolean - catenate_numbers: boolean - catenate_words: boolean - generate_number_parts: boolean - generate_word_parts: boolean - preserve_original: boolean - protected_words: string[] - protected_words_path: string - split_on_case_change: boolean - split_on_numerics: boolean - stem_english_possessive: boolean - type_table: string[] - type_table_path: string + adjust_offsets?: boolean + catenate_all?: boolean + catenate_numbers?: boolean + catenate_words?: boolean + generate_number_parts?: boolean + generate_word_parts?: boolean + ignore_keywords?: boolean + preserve_original?: boolean + protected_words?: string[] + protected_words_path?: string + split_on_case_change?: boolean + split_on_numerics?: boolean + stem_english_possessive?: boolean + type_table?: string[] + type_table_path?: string } export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { type: 'word_delimiter' - catenate_all: boolean - catenate_numbers: boolean - catenate_words: boolean - generate_number_parts: boolean - generate_word_parts: boolean - preserve_original: boolean - protected_words: string[] - protected_words_path: string - split_on_case_change: boolean - split_on_numerics: boolean - stem_english_possessive: boolean - type_table: string[] - type_table_path: string + catenate_all?: boolean + catenate_numbers?: boolean + catenate_words?: boolean + generate_number_parts?: boolean + generate_word_parts?: boolean + preserve_original?: boolean + protected_words?: string[] + protected_words_path?: string + split_on_case_change?: boolean + split_on_numerics?: boolean + stem_english_possessive?: boolean + type_table?: string[] + type_table_path?: string } export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { @@ -3981,6 +4123,7 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase { index?: boolean null_value?: DateString precision_step?: integer + locale?: string type: 'date' } @@ -3989,9 +4132,18 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { type: 'date_range' } +export interface MappingDenseVectorIndexOptions { + type: string + m: integer + ef_construction: integer +} + export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' dims: integer + similarity?: string + index?: boolean + index_options?: MappingDenseVectorIndexOptions } export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingGenericProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty @@ -4145,6 +4297,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { norms?: boolean null_value?: string split_queries_on_whitespace?: boolean + time_series_dimension?: boolean type: 'keyword' } @@ -4175,6 +4328,7 @@ export type MappingNumberProperty = MappingFloatNumberProperty | MappingHalfFloa export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { index?: boolean ignore_malformed?: boolean + time_series_metric?: MappingTimeSeriesMetricType } export interface MappingObjectProperty extends MappingCorePropertyBase { @@ -4320,6 +4474,8 @@ export interface MappingTextProperty extends MappingCorePropertyBase { type: 'text' } +export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' + export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { analyzer?: string boost?: double @@ -4417,8 +4573,8 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } -export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys | -{ [property: string]: QueryDslDecayPlacement } +export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -4518,15 +4674,15 @@ export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod ignore_unmapped?: boolean } -export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys | -{ [property: string]: QueryDslBoundingBox } +export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys +& { [property: string]: QueryDslBoundingBox | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } -export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys | -{ [property: string]: QueryDslDecayPlacement } +export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -4536,8 +4692,8 @@ export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { distance_type?: GeoDistanceType validation_method?: QueryDslGeoValidationMethod } -export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys | -{ [property: string]: QueryDslGeoLocation } +export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys +& { [property: string]: QueryDslGeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } export type QueryDslGeoExecution = 'memory' | 'indexed' @@ -4551,8 +4707,8 @@ export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod ignore_unmapped?: boolean } -export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys | -{ [property: string]: QueryDslGeoPolygonPoints } +export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys +& { [property: string]: QueryDslGeoPolygonPoints | QueryDslGeoValidationMethod | boolean | float | string } export interface QueryDslGeoShapeFieldQuery { shape?: GeoShape @@ -4563,8 +4719,8 @@ export interface QueryDslGeoShapeFieldQuery { export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { ignore_unmapped?: boolean } -export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys | -{ [property: string]: QueryDslGeoShapeFieldQuery } +export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys +& { [property: string]: QueryDslGeoShapeFieldQuery | boolean | float | string } export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' @@ -4790,8 +4946,8 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } -export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys | -{ [property: string]: QueryDslDecayPlacement } +export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } export type QueryDslOperator = 'and' | 'or' @@ -4995,8 +5151,8 @@ export interface QueryDslShapeFieldQuery { export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { } -export type QueryDslShapeQuery = QueryDslShapeQueryKeys | -{ [property: string]: QueryDslShapeFieldQuery } +export type QueryDslShapeQuery = QueryDslShapeQueryKeys +& { [property: string]: QueryDslShapeFieldQuery | float | string } export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' @@ -5091,8 +5247,8 @@ export interface QueryDslTermsLookup { export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { } -export type QueryDslTermsQuery = QueryDslTermsQueryKeys | -{ [property: string]: string[] | long[] | QueryDslTermsLookup } +export type QueryDslTermsQuery = QueryDslTermsQueryKeys +& { [property: string]: string[] | long[] | QueryDslTermsLookup | float | string } export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field @@ -5120,13 +5276,14 @@ export interface QueryDslTypeQuery extends QueryDslQueryBase { export interface QueryDslWildcardQuery extends QueryDslQueryBase { case_insensitive?: boolean rewrite?: MultiTermQueryRewrite - value: string + value?: string + wildcard?: string } export type QueryDslZeroTermsQuery = 'all' | 'none' export interface AsyncSearchAsyncSearch { - aggregations?: Record + aggregations?: Record _clusters?: ClusterStatistics fields?: Record hits: SearchHitsMetadata @@ -5230,8 +5387,8 @@ export interface AsyncSearchSubmitRequest extends RequestBase { sort?: string | string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - aggs?: Record aggregations?: Record + aggs?: Record collapse?: SearchFieldCollapse explain?: boolean from?: integer @@ -5898,7 +6055,8 @@ export interface CatMlDatafeedsDatafeedsRecord { export interface CatMlDatafeedsRequest extends CatCatRequestBase { datafeed_id?: Id - allow_no_datafeeds?: boolean + allow_no_match?: boolean + time?: TimeUnit } export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] @@ -6082,8 +6240,9 @@ export interface CatMlJobsJobsRecord { export interface CatMlJobsRequest extends CatCatRequestBase { job_id?: Id - allow_no_jobs?: boolean + allow_no_match?: boolean bytes?: Bytes + time?: TimeUnit } export type CatMlJobsResponse = CatMlJobsJobsRecord[] @@ -8479,6 +8638,13 @@ export interface IndicesIndexRoutingRebalance { export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' +export interface IndicesIndexSegmentSort { + field: Fields + order: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] + mode?: IndicesSegmentSortMode + missing?: IndicesSegmentSortMissing +} + export interface IndicesIndexSettingBlocks { read_only?: boolean read_only_allow_delete?: boolean @@ -8488,6 +8654,15 @@ export interface IndicesIndexSettingBlocks { } export interface IndicesIndexSettings { + index?: IndicesIndexSettings + mode?: string + 'index.mode'?: string + routing_path?: string[] + 'index.routing_path'?: string[] + soft_deletes?: IndicesSoftDeletes + 'index.soft_deletes'?: IndicesSoftDeletes + sort?: IndicesIndexSegmentSort + 'index.sort'?: IndicesIndexSegmentSort number_of_shards?: integer | string 'index.number_of_shards'?: integer | string number_of_replicas?: integer | string @@ -8498,8 +8673,8 @@ export interface IndicesIndexSettings { 'index.check_on_startup'?: IndicesIndexCheckOnStartup codec?: string 'index.codec'?: string - routing_partition_size?: integer | string - 'index.routing_partition_size'?: integer | string + routing_partition_size?: integer + 'index.routing_partition_size'?: integer 'soft_deletes.retention_lease.period'?: Time 'index.soft_deletes.retention_lease.period'?: Time load_fixed_bitset_filters_eagerly?: boolean @@ -8508,6 +8683,8 @@ export interface IndicesIndexSettings { 'index.hidden'?: boolean | string auto_expand_replicas?: string 'index.auto_expand_replicas'?: string + 'merge.scheduler.max_thread_count'?: integer + 'index.merge.scheduler.max_thread_count'?: integer 'search.idle.after'?: Time 'index.search.idle.after'?: Time refresh_interval?: Time @@ -8576,12 +8753,15 @@ export interface IndicesIndexSettings { 'index.max_slices_per_scroll'?: integer 'translog.durability'?: string 'index.translog.durability'?: string + 'translog.flush_threshold_size'?: string + 'index.translog.flush_threshold_size'?: string 'query_string.lenient'?: boolean | string 'index.query_string.lenient'?: boolean | string priority?: integer | string 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis + 'index.analysis'?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings } @@ -8590,6 +8770,7 @@ export interface IndicesIndexSettingsAnalysis { char_filter?: Record filter?: Record normalizer?: Record + tokenizer?: Record } export interface IndicesIndexSettingsLifecycle { @@ -8622,6 +8803,16 @@ export interface IndicesOverlappingIndexTemplate { index_patterns?: IndexName[] } +export type IndicesSegmentSortMissing = '_last' | '_first' + +export type IndicesSegmentSortMode = 'min' | 'max' + +export type IndicesSegmentSortOrder = 'asc' | 'desc' + +export interface IndicesSoftDeletes { + enabled: boolean +} + export interface IndicesStringFielddata { format: IndicesStringFielddataFormat } @@ -8790,9 +8981,9 @@ export interface IndicesCreateRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - aliases?: Record - mappings?: Record | MappingTypeMapping - settings?: Record + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings } } @@ -8966,20 +9157,6 @@ export interface IndicesForcemergeRequest extends RequestBase { export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { } -export interface IndicesFreezeRequest extends RequestBase { - index: IndexName - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time - wait_for_active_shards?: WaitForActiveShards -} - -export interface IndicesFreezeResponse extends AcknowledgedResponseBase { - shards_acknowledged: boolean -} - export interface IndicesGetRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -10369,7 +10546,6 @@ export interface LicensePostStartTrialRequest extends RequestBase { export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { error_message?: string - acknowledged: boolean trial_was_started: boolean type: LicenseLicenseType } @@ -10445,8 +10621,8 @@ export interface MlAnalysisConfig { categorization_filters?: string[] detectors: MlDetector[] influencers?: Field[] - model_prune_window?: Time latency?: Time + model_prune_window?: Time multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization summary_count_field_name?: Field @@ -10618,7 +10794,7 @@ export interface MlDataCounts { export interface MlDataDescription { format?: string - time_field: Field + time_field?: Field time_format?: string field_delimiter?: string } @@ -11003,6 +11179,8 @@ export interface MlHyperparameters { soft_tree_depth_tolerance?: double } +export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' + export interface MlInfluence { influencer_field_name: string influencer_field_values: string[] @@ -11270,7 +11448,6 @@ export interface MlValidationLoss { export interface MlCloseJobRequest extends RequestBase { job_id: Id allow_no_match?: boolean - allow_no_jobs?: boolean force?: boolean timeout?: Time } @@ -11560,7 +11737,6 @@ export interface MlGetBucketsRequest extends RequestBase { desc?: boolean exclude_interim?: boolean expand?: boolean - page?: MlPage sort?: Field start?: DateString end?: DateString @@ -11652,7 +11828,7 @@ export interface MlGetDataFrameAnalyticsStatsResponse { export interface MlGetDatafeedStatsRequest extends RequestBase { datafeed_id?: Ids - allow_no_datafeeds?: boolean + allow_no_match?: boolean } export interface MlGetDatafeedStatsResponse { @@ -11662,7 +11838,7 @@ export interface MlGetDatafeedStatsResponse { export interface MlGetDatafeedsRequest extends RequestBase { datafeed_id?: Ids - allow_no_datafeeds?: boolean + allow_no_match?: boolean exclude_generated?: boolean } @@ -11705,7 +11881,7 @@ export interface MlGetInfluencersResponse { export interface MlGetJobStatsRequest extends RequestBase { job_id?: Id - allow_no_jobs?: boolean + allow_no_match?: boolean } export interface MlGetJobStatsResponse { @@ -11716,7 +11892,6 @@ export interface MlGetJobStatsResponse { export interface MlGetJobsRequest extends RequestBase { job_id?: Ids allow_no_match?: boolean - allow_no_jobs?: boolean exclude_generated?: boolean } @@ -11748,17 +11923,13 @@ export interface MlGetModelSnapshotsResponse { export interface MlGetOverallBucketsRequest extends RequestBase { job_id: Id + allow_no_match?: boolean bucket_span?: Time - overall_score?: double | string - top_n?: integer end?: Time - start?: Time exclude_interim?: boolean - allow_no_match?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_no_jobs?: boolean - } + overall_score?: double | string + start?: Time + top_n?: integer } export interface MlGetOverallBucketsResponse { @@ -11796,7 +11967,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase { decompress_definition?: boolean exclude_generated?: boolean from?: integer - include?: string + include?: MlInclude size?: integer tags?: string } @@ -11930,8 +12101,8 @@ export interface MlPreviewDatafeedRequest extends RequestBase { datafeed_id?: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - job_config?: MlJobConfig datafeed_config?: MlDatafeedConfig + job_config?: MlJobConfig } } @@ -12006,6 +12177,7 @@ export interface MlPutDatafeedRequest extends RequestBase { delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time indices?: string[] + indexes?: string[] indices_options?: MlDatafeedIndicesOptions job_id?: Id max_empty_searches?: integer @@ -13243,6 +13415,7 @@ export interface RollupRollupSearchRequest extends RequestBase { typed_keys?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + aggregations?: Record aggs?: Record query?: QueryDslQueryContainer size?: integer From 288f377dae3bfb1371243a91978fdf00266b6b24 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 3 Nov 2021 14:28:53 +0100 Subject: [PATCH 115/647] Bumped transport --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0670d3834..20803585b 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^0.0.15", + "@elastic/transport": "^8.0.0-beta.1", "tslib": "^2.3.0" }, "tap": { From d6fb2ca56905f994b3e98fb17ba074cb30497df6 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 3 Nov 2021 14:29:16 +0100 Subject: [PATCH 116/647] Bumped v8.0.0-beta.1 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 20803585b..3aeebedac 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "8.0.0-alpha.2", + "version": "8.0.0-beta.1", "versionCanary": "8.0.0-canary.35", "description": "The official Elasticsearch client for Node.js", "main": "index.js", From ae3ea3013f6c522d6188d35d12c97ac0fb08f82b Mon Sep 17 00:00:00 2001 From: delvedor Date: Tue, 23 Nov 2021 12:09:45 +0100 Subject: [PATCH 117/647] Bumped transport --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 3aeebedac..d6a630e41 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^8.0.0-beta.1", + "@elastic/transport": "^8.0.0-beta.2", "tslib": "^2.3.0" }, "tap": { From e9b905f04251463c61f3aedaaa010eec79bd327d Mon Sep 17 00:00:00 2001 From: delvedor Date: Tue, 23 Nov 2021 14:01:33 +0100 Subject: [PATCH 118/647] Bumped v8.0.0-canary.36 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d6a630e41..f5bbc5f2f 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-beta.1", - "versionCanary": "8.0.0-canary.35", + "versionCanary": "8.0.0-canary.36", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From fbaee7f54fb38801113c91e1825c793c7a25ce75 Mon Sep 17 00:00:00 2001 From: delvedor Date: Tue, 23 Nov 2021 14:08:33 +0100 Subject: [PATCH 119/647] API generation --- src/api/api/async_search.ts | 2 +- src/api/api/fleet.ts | 10 +- src/api/api/indices.ts | 2 +- src/api/api/ml.ts | 105 +-- src/api/api/rollup.ts | 2 +- src/api/api/search.ts | 2 +- src/api/api/security.ts | 2 +- src/api/kibana.ts | 5 +- src/api/types.ts | 1396 ++++++++++++++++++++------------- src/api/typesWithBodyKey.ts | 1442 ++++++++++++++++++++++------------- 10 files changed, 1853 insertions(+), 1115 deletions(-) diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 4385269e7..00a7437dc 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -114,7 +114,7 @@ export default class AsyncSearch { async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 91a82d88c..84b15f358 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -43,19 +43,19 @@ export default class Fleet { this.transport = transport } - async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async globalCheckpoints (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index b71fdc405..b6d6a5b00 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1369,7 +1369,7 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['index_patterns', 'composed_of', 'overlapping', 'template'] + const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 4d0476bae..500f38024 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -48,11 +48,23 @@ export default class Ml { async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error @@ -425,10 +437,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async explainDataFrameAnalytics (this: That, params: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] const querystring: Record = {} @@ -441,6 +453,7 @@ export default class Ml { body = userBody != null ? { ...userBody } : undefined } + params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} @@ -471,7 +484,7 @@ export default class Ml { async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'start'] + const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -539,7 +552,7 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedBody: string[] = ['anomaly_score', 'desc', 'exclude_interim', 'expand', 'sort', 'start', 'end'] + const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -929,7 +942,7 @@ export default class Ml { async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['start', 'end'] + const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -970,11 +983,23 @@ export default class Ml { async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error @@ -992,7 +1017,7 @@ export default class Ml { async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['desc', 'exclude_interim', 'page', 'record_score', 'sort', 'start', 'end'] + const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1021,28 +1046,6 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getTrainedModelDeploymentStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined - - params = params ?? {} - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - querystring[key] = params[key] - } - } - - const method = 'GET' - const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_stats` - return await this.transport.request({ path, method, querystring, body }, options) - } - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise @@ -1332,7 +1335,7 @@ export default class Ml { async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['description'] + const acceptedBody: string[] = ['job_ids', 'description'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1422,7 +1425,7 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] + const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1524,7 +1527,7 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'tags'] + const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'tags'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1803,7 +1806,7 @@ export default class Ml { async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['force', 'timeout'] + const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1888,19 +1891,31 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateDatafeed (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] + const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 848d0724e..cf9ed6ac8 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -213,7 +213,7 @@ export default class Rollup { async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'type'] - const acceptedBody: string[] = ['aggregations', 'query', 'size'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 12d7aa1c3..754e7b9f9 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -42,7 +42,7 @@ export default async function SearchApi (this: That, params export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/security.ts b/src/api/api/security.ts index c3430b533..78e12b4a1 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -71,7 +71,7 @@ export default class Security { async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const acceptedBody: string[] = ['password'] + const acceptedBody: string[] = ['password', 'password_hash'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/kibana.ts b/src/api/kibana.ts index 4aab8b1eb..2e06a7d6d 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -158,7 +158,7 @@ interface KibanaClient { } fieldCaps: (params?: T.FieldCapsRequest| TB.FieldCapsRequest, options?: TransportRequestOptions) => Promise> fleet: { - globalCheckpoints: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + globalCheckpoints: (params: T.FleetGlobalCheckpointsRequest| TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions) => Promise> msearch: (params?: T.TODO, options?: TransportRequestOptions) => Promise> search: (params?: T.TODO, options?: TransportRequestOptions) => Promise> } @@ -304,7 +304,6 @@ interface KibanaClient { getModelSnapshots: (params: T.MlGetModelSnapshotsRequest| TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions) => Promise> getOverallBuckets: (params: T.MlGetOverallBucketsRequest| TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions) => Promise> getRecords: (params: T.MlGetRecordsRequest| TB.MlGetRecordsRequest, options?: TransportRequestOptions) => Promise> - getTrainedModelDeploymentStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> getTrainedModels: (params?: T.MlGetTrainedModelsRequest| TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions) => Promise> getTrainedModelsStats: (params?: T.MlGetTrainedModelsStatsRequest| TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions) => Promise> inferTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> @@ -334,7 +333,7 @@ interface KibanaClient { stopDatafeed: (params: T.MlStopDatafeedRequest| TB.MlStopDatafeedRequest, options?: TransportRequestOptions) => Promise> stopTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> updateDataFrameAnalytics: (params: T.MlUpdateDataFrameAnalyticsRequest| TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - updateDatafeed: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + updateDatafeed: (params: T.MlUpdateDatafeedRequest| TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions) => Promise> updateFilter: (params: T.MlUpdateFilterRequest| TB.MlUpdateFilterRequest, options?: TransportRequestOptions) => Promise> updateJob: (params: T.MlUpdateJobRequest| TB.MlUpdateJobRequest, options?: TransportRequestOptions) => Promise> updateModelSnapshot: (params: T.MlUpdateModelSnapshotRequest| TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions) => Promise> diff --git a/src/api/types.ts b/src/api/types.ts index 10df318d7..d4100af9c 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -62,7 +62,7 @@ export interface BulkRequest extends RequestBase { pipeline?: string refresh?: Refresh routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields timeout?: Time @@ -127,7 +127,7 @@ export interface CountRequest extends RequestBase { allow_no_indices?: boolean analyzer?: string analyze_wildcard?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards ignore_throttled?: boolean @@ -186,7 +186,7 @@ export interface DeleteByQueryRequest extends RequestBase { analyzer?: string analyze_wildcard?: boolean conflicts?: Conflicts - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards from?: long @@ -206,7 +206,7 @@ export interface DeleteByQueryRequest extends RequestBase { size?: long slices?: long sort?: string[] - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stats?: string[] @@ -260,7 +260,7 @@ export interface ExistsRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -278,7 +278,7 @@ export interface ExistsSourceRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields version?: VersionNumber @@ -304,12 +304,12 @@ export interface ExplainRequest extends RequestBase { index: IndexName analyzer?: string analyze_wildcard?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string lenient?: boolean preference?: string routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -353,6 +353,19 @@ export interface FieldCapsResponse { fields: Record> } +export interface GetGetResult { + _index: IndexName + fields?: Record + found: boolean + _id: Id + _primary_term?: long + _routing?: string + _seq_no?: SequenceNumber + _source?: TDocument + _type?: Type + _version?: VersionNumber +} + export interface GetRequest extends RequestBase { id: Id index: IndexName @@ -360,7 +373,7 @@ export interface GetRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -368,18 +381,7 @@ export interface GetRequest extends RequestBase { version_type?: VersionType } -export interface GetResponse { - _index: IndexName - fields?: Record - found: boolean - _id: Id - _primary_term?: long - _routing?: string - _seq_no?: SequenceNumber - _source?: TDocument - _type?: Type - _version?: VersionNumber -} +export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { id: Id @@ -435,7 +437,7 @@ export interface GetSourceRequest { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -479,8 +481,8 @@ export interface InfoResponse { export interface KnnSearchRequest extends RequestBase { index: Indices routing?: Routing - _source?: boolean | Fields | SearchSourceFilter - docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] + _source?: SearchSourceConfig + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] stored_fields?: Fields fields?: Fields knn: KnnSearchQuery @@ -504,27 +506,18 @@ export interface KnnSearchQuery { export type KnnSearchQueryVector = double[] -export interface MgetHit { - error?: ErrorCause - fields?: Record - found?: boolean +export interface MgetMultiGetError { + error: ErrorCause _id: Id _index: IndexName - _primary_term?: long - _routing?: Routing - _seq_no?: SequenceNumber - _source?: TDocument _type?: Type - _version?: VersionNumber } -export type MgetMultiGetId = string | integer - export interface MgetOperation { - _id: MgetMultiGetId + _id: Id _index?: IndexName routing?: Routing - _source?: boolean | Fields | SearchSourceFilter + _source?: SearchSourceConfig stored_fields?: Fields _type?: Type version?: VersionNumber @@ -537,30 +530,41 @@ export interface MgetRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields docs?: MgetOperation[] - ids?: MgetMultiGetId[] + ids?: Ids } export interface MgetResponse { - docs: MgetHit[] + docs: MgetResponseItem[] } -export interface MsearchBody { +export type MgetResponseItem = GetGetResult | MgetMultiGetError + +export interface MsearchMultiSearchItem extends SearchResponse { + status: integer +} + +export interface MsearchMultiSearchResult { + took: long + responses: MsearchResponseItem[] +} + +export interface MsearchMultisearchBody { aggregations?: Record aggs?: Record query?: QueryDslQueryContainer from?: integer size?: integer pit?: SearchPointInTimeReference - track_total_hits?: boolean | integer - suggest?: SearchSuggestContainer | Record + track_total_hits?: SearchTrackHits + suggest?: SearchSuggester } -export interface MsearchHeader { +export interface MsearchMultisearchHeader { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -584,17 +588,14 @@ export interface MsearchRequest extends RequestBase { search_type?: SearchType rest_total_hits_as_int?: boolean typed_keys?: boolean - searches?: (MsearchHeader | MsearchBody)[] + searches?: MsearchRequestItem[] } -export interface MsearchResponse { - took: long - responses: (MsearchSearchResult | ErrorResponseBase)[] -} +export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody -export interface MsearchSearchResult extends SearchResponse { - status: integer -} +export type MsearchResponse = MsearchMultiSearchResult + +export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase export interface MsearchTemplateRequest extends RequestBase { index?: Indices @@ -603,18 +604,18 @@ export interface MsearchTemplateRequest extends RequestBase { search_type?: SearchType rest_total_hits_as_int?: boolean typed_keys?: boolean - search_templates?: MsearchTemplateTemplateItem[] + search_templates?: MsearchTemplateRequestItem[] } -export interface MsearchTemplateResponse { - responses: (SearchResponse | ErrorResponseBase)[] - took: long -} +export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig + +export type MsearchTemplateResponse = MsearchMultiSearchResult -export interface MsearchTemplateTemplateItem { +export interface MsearchTemplateTemplateConfig { + explain?: boolean id?: Id - index?: Indices params?: Record + profile?: boolean source?: string } @@ -667,7 +668,7 @@ export interface MtermvectorsTermVectorsResult { export interface OpenPointInTimeRequest extends RequestBase { index: Indices - keep_alive?: Time + keep_alive: Time } export interface OpenPointInTimeResponse { @@ -684,7 +685,7 @@ export interface PutScriptRequest extends RequestBase { context?: Name master_timeout?: Time timeout?: Time - script?: StoredScript + script: StoredScript } export interface PutScriptResponse extends AcknowledgedResponseBase { @@ -906,7 +907,7 @@ export interface ScriptsPainlessExecutePainlessContextSetup { export interface ScriptsPainlessExecuteRequest extends RequestBase { context?: string context_setup?: ScriptsPainlessExecutePainlessContextSetup - script?: InlineScript + script?: InlineScript | string } export interface ScriptsPainlessExecuteResponse { @@ -930,7 +931,7 @@ export interface SearchRequest extends RequestBase { analyze_wildcard?: boolean batched_reduce_size?: long ccs_minimize_roundtrips?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string docvalue_fields?: Fields expand_wildcards?: ExpandWildcards @@ -954,12 +955,12 @@ export interface SearchRequest extends RequestBase { suggest_text?: string terminate_after?: long timeout?: Time - track_total_hits?: boolean | integer + track_total_hits?: SearchTrackHits track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean version?: boolean - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields seq_no_primary_term?: boolean @@ -980,8 +981,8 @@ export interface SearchRequest extends RequestBase { script_fields?: Record search_after?: SearchSortResults slice?: SlicedScroll - fields?: (Field | DateField)[] - suggest?: SearchSuggestContainer | Record + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields } @@ -1069,6 +1070,8 @@ export interface SearchAggregationProfileDelegateDebugFilter { export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' +export type SearchBuiltinHighlighterType = 'plain' | 'fvh' | 'unified' + export interface SearchCollector { name: string reason: string @@ -1076,6 +1079,14 @@ export interface SearchCollector { children?: SearchCollector[] } +export interface SearchCompletionContext { + boost?: double + context: SearchContext + neighbours?: GeoHashPrecision[] + precision?: GeoHashPrecision + prefix?: boolean +} + export interface SearchCompletionSuggestOption { collate_match?: boolean contexts?: Record @@ -1090,14 +1101,14 @@ export interface SearchCompletionSuggestOption { } export interface SearchCompletionSuggester extends SearchSuggesterBase { - contexts?: Record + contexts?: Record fuzzy?: SearchSuggestFuzziness prefix?: string regex?: string skip_duplicates?: boolean } -export type SearchContext = string | QueryDslGeoLocation +export type SearchContext = string | GeoLocation export interface SearchDirectGenerator { field: Field @@ -1113,11 +1124,6 @@ export interface SearchDirectGenerator { suggest_mode?: SuggestMode } -export interface SearchDocValueField { - field: Field - format?: string -} - export interface SearchFetchProfile { type: string description: string @@ -1141,12 +1147,6 @@ export interface SearchFetchProfileDebug { fast_path?: integer } -export interface SearchFieldAndFormat { - field: Field - format?: string - include_unmapped?: boolean -} - export interface SearchFieldCollapse { field: Field inner_hits?: SearchInnerHits | SearchInnerHits[] @@ -1159,16 +1159,30 @@ export interface SearchFieldSort { nested?: SearchNestedSortValue order?: SearchSortOrder unmapped_type?: MappingFieldType + numeric_type?: SearchFieldSortNumericType + format?: string +} + +export type SearchFieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' + +export interface SearchFieldSuggester { + completion?: SearchCompletionSuggester + phrase?: SearchPhraseSuggester + prefix?: string + regex?: string + term?: SearchTermSuggester + text?: string } export interface SearchGeoDistanceSortKeys { mode?: SearchSortMode distance_type?: GeoDistanceType + ignore_unmapped?: boolean order?: SearchSortOrder unit?: DistanceUnit } export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys -& { [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] | SearchSortMode | GeoDistanceType | SearchSortOrder | DistanceUnit } +& { [property: string]: GeoLocation | GeoLocation[] | SearchSortMode | GeoDistanceType | boolean | SearchSortOrder | DistanceUnit } export interface SearchHighlight { fields: Record @@ -1214,7 +1228,7 @@ export interface SearchHighlightField { pre_tags?: string[] require_field_match?: boolean tags_schema?: SearchHighlighterTagsSchema - type?: SearchHighlighterType | string + type?: SearchHighlighterType } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1225,7 +1239,7 @@ export type SearchHighlighterOrder = 'score' export type SearchHighlighterTagsSchema = 'styled' -export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' +export type SearchHighlighterType = SearchBuiltinHighlighterType | string export interface SearchHit { _index: IndexName @@ -1260,7 +1274,7 @@ export interface SearchInnerHits { size?: integer from?: integer collapse?: SearchFieldCollapse - docvalue_fields?: (SearchFieldAndFormat | Field)[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] explain?: boolean highlight?: SearchHighlight ignore_unmapped?: boolean @@ -1268,20 +1282,14 @@ export interface SearchInnerHits { seq_no_primary_term?: boolean fields?: Fields sort?: SearchSort - _source?: boolean | SearchSourceFilter + _source?: SearchSourceConfig stored_field?: Fields track_scores?: boolean version?: boolean } -export interface SearchInnerHitsMetadata { - total: SearchTotalHits | long - hits: SearchHit>[] - max_score?: double -} - export interface SearchInnerHitsResult { - hits: SearchInnerHitsMetadata + hits: SearchHitsMetadata } export interface SearchLaplaceSmoothingModel { @@ -1303,6 +1311,7 @@ export interface SearchNestedIdentity { export interface SearchNestedSortValue { filter?: QueryDslQueryContainer max_children?: integer + nested?: SearchNestedSortValue path: Field } @@ -1397,16 +1406,19 @@ export interface SearchRescoreQuery { export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' export interface SearchScoreSort { - mode?: SearchSortMode order?: SearchSortOrder } export interface SearchScriptSort { order?: SearchSortOrder script: Script - type?: string + type?: SearchScriptSortType + mode?: SearchSortMode + nested?: SearchNestedSortValue } +export type SearchScriptSortType = 'string' | 'number' + export interface SearchSearchProfile { collector: SearchCollector[] query: SearchQueryProfile[] @@ -1428,27 +1440,31 @@ export interface SearchSmoothingModelContainer { export type SearchSort = SearchSortCombinations | SearchSortCombinations[] -export type SearchSortCombinations = Field | SearchSortContainer | SearchSortOrder +export type SearchSortCombinations = Field | SearchSortOptions -export interface SearchSortContainerKeys { +export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' + +export interface SearchSortOptionsKeys { _score?: SearchScoreSort _doc?: SearchScoreSort _geo_distance?: SearchGeoDistanceSort _script?: SearchScriptSort } -export type SearchSortContainer = SearchSortContainerKeys +export type SearchSortOptions = SearchSortOptionsKeys & { [property: string]: SearchFieldSort | SearchSortOrder | SearchScoreSort | SearchGeoDistanceSort | SearchScriptSort } -export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' - -export type SearchSortOrder = 'asc' | 'desc' | '_doc' +export type SearchSortOrder = 'asc' | 'desc' export type SearchSortResults = (long | double | string | null)[] +export type SearchSourceConfig = boolean | SearchSourceFilter | Fields + +export type SearchSourceConfigParam = boolean | Fields + export interface SearchSourceFilter { excludes?: Fields - includes?: Fields exclude?: Fields + includes?: Fields include?: Fields } @@ -1465,23 +1481,6 @@ export interface SearchSuggest { text: string } -export interface SearchSuggestContainer { - completion?: SearchCompletionSuggester - phrase?: SearchPhraseSuggester - prefix?: string - regex?: string - term?: SearchTermSuggester - text?: string -} - -export interface SearchSuggestContextQuery { - boost?: double - context: SearchContext - neighbours?: Distance[] | integer[] - precision?: Distance | integer - prefix?: boolean -} - export interface SearchSuggestFuzziness { fuzziness: Fuzziness min_length: integer @@ -1494,6 +1493,12 @@ export type SearchSuggestOption = SearchCompletionSuggestOp export type SearchSuggestSort = 'score' | 'frequency' +export interface SearchSuggesterKeys { + text?: string +} +export type SearchSuggester = SearchSuggesterKeys +& { [property: string]: SearchFieldSuggester | string } + export interface SearchSuggesterBase { field: Field analyzer?: string @@ -1528,6 +1533,8 @@ export interface SearchTotalHits { export type SearchTotalHitsRelation = 'eq' | 'gte' +export type SearchTrackHits = boolean | integer + export interface SearchMvtRequest extends RequestBase { index: Indices field: Field @@ -1697,7 +1704,7 @@ export interface UpdateRequest routing?: Routing timeout?: Time wait_for_active_shards?: WaitForActiveShards - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields detect_noop?: boolean @@ -1718,7 +1725,7 @@ export interface UpdateByQueryRequest extends RequestBase { analyzer?: string analyze_wildcard?: boolean conflicts?: Conflicts - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards from?: long @@ -1737,7 +1744,7 @@ export interface UpdateByQueryRequest extends RequestBase { size?: long slices?: long sort?: string[] - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stats?: string[] @@ -1798,6 +1805,8 @@ export interface AcknowledgedResponseBase { export type AggregateName = string +export type BuiltinScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' + export interface BulkIndexByScrollFailure { cause: ErrorCause id: Id @@ -1842,16 +1851,17 @@ export interface CompletionStats { export type Conflicts = 'abort' | 'proceed' +export interface CoordsGeoBounds { + top: double + bottom: double + left: double + right: double +} + export type DataStreamName = string export type DataStreamNames = DataStreamName | DataStreamName[] -export interface DateField { - field: Field - format?: string - include_unmapped?: boolean -} - export type DateFormat = string export type DateMath = string @@ -1860,8 +1870,6 @@ export type DateMathTime = string export type DateString = string -export type DefaultOperator = 'AND' | 'OR' - export interface DictionaryResponseBase { [key: string]: TValue } @@ -1908,9 +1916,9 @@ export interface ErrorResponseBase { status: integer } -export type ExpandWildcardOptions = 'all' | 'open' | 'closed' | 'hidden' | 'none' +export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' -export type ExpandWildcards = ExpandWildcardOptions | ExpandWildcardOptions[] | string +export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] export type Field = string @@ -1924,6 +1932,8 @@ export interface FieldSizeUsage { size_in_bytes: long } +export type FieldValue = long | double | string | boolean + export interface FielddataStats { evictions?: long memory_size?: ByteSize @@ -1942,14 +1952,31 @@ export interface FlushStats { export type Fuzziness = string | integer +export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds + export type GeoDistanceType = 'arc' | 'plane' -export type GeoHashPrecision = number +export type GeoHash = string + +export interface GeoHashLocation { + geohash: GeoHash +} + +export type GeoHashPrecision = number | string + +export interface GeoLine { + type: string + coordinates: double[][] +} + +export type GeoLocation = LatLonGeoLocation | GeoHashLocation | double[] | string export type GeoShape = any export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' +export type GeoTile = string + export type GeoTilePrecision = number export interface GetStats { @@ -1965,9 +1992,7 @@ export interface GetStats { total: long } -export type GroupBy = 'nodes' | 'parents' | 'none' - -export type Health = 'green' | 'yellow' | 'red' +export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' export type Host = string @@ -1985,10 +2010,6 @@ export type IndexPattern = string export type IndexPatterns = IndexPattern[] -export interface IndexedScript extends ScriptBase { - id: Id -} - export interface IndexingStats { index_current: long delete_current: long @@ -2024,12 +2045,14 @@ export type InlineGet = InlineGetKeys & { [property: string]: any } export interface InlineScript extends ScriptBase { + lang?: ScriptLanguage + options?: Record source: string } export type Ip = string -export interface LatLon { +export interface LatLonGeoLocation { lat: double lon: double } @@ -2151,9 +2174,7 @@ export interface RecoveryStats { throttle_time_in_millis: long } -export type Refresh = boolean | RefreshOptions - -export type RefreshOptions = 'wait_for' +export type Refresh = boolean | 'true' | 'false' | 'wait_for' export interface RefreshStats { external_total: long @@ -2186,10 +2207,9 @@ export interface Retries { export type Routing = string -export type Script = InlineScript | IndexedScript | string +export type Script = InlineScript | string | StoredScriptId export interface ScriptBase { - lang?: ScriptLanguage | string params?: Record } @@ -2198,7 +2218,7 @@ export interface ScriptField { ignore_failure?: boolean } -export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' +export type ScriptLanguage = BuiltinScriptLanguage | string export interface ScriptTransform { lang: string @@ -2262,8 +2282,6 @@ export type SequenceNumber = long export type Service = string -export type ShapeRelation = 'intersects' | 'disjoint' | 'within' - export interface ShardFailure { index?: IndexName node?: string @@ -2284,8 +2302,6 @@ export interface ShardsOperationResponseBase { _shards: ShardStatistics } -export type Size = 'Raw' | 'k' | 'm' | 'g' | 't' | 'p' - export interface SlicedScroll { field?: Field id: integer @@ -2302,10 +2318,15 @@ export interface StoreStats { } export interface StoredScript { - lang?: ScriptLanguage | string + lang: ScriptLanguage + options?: Record source: string } +export interface StoredScriptId extends ScriptBase { + id: Id +} + export type SuggestMode = 'missing' | 'popular' | 'always' export type SuggestionName = string @@ -2324,6 +2345,16 @@ export type TimeZone = string export type Timestamp = string +export interface TopLeftBottomRightGeoBounds { + top_left: GeoLocation + bottom_right: GeoLocation +} + +export interface TopRightBottomLeftGeoBounds { + top_right: GeoLocation + bottom_left: GeoLocation +} + export interface Transform { } @@ -2365,8 +2396,6 @@ export type WaitForActiveShards = integer | WaitForActiveShardOptions export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' -export type WaitForStatus = 'green' | 'yellow' | 'red' - export interface WarmerStats { current: long total: long @@ -2374,6 +2403,10 @@ export interface WarmerStats { total_time_in_millis: long } +export interface WktGeoBounds { + wkt: string +} + export interface WriteResponseBase { _id: Id _index: IndexName @@ -2402,11 +2435,19 @@ export type uint = number export type ulong = number +export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { filters?: Record } -export type AggregationsAggregate = AggregationsSingleBucketAggregate | AggregationsAutoDateHistogramAggregate | AggregationsFiltersAggregate | AggregationsSignificantTermsAggregate | AggregationsTermsAggregate | AggregationsBucketAggregate | AggregationsCompositeBucketAggregate | AggregationsMultiBucketAggregate | AggregationsMatrixStatsAggregate | AggregationsKeyedValueAggregate | AggregationsMetricAggregate +export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { +} +export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys +& { [property: string]: AggregationsAggregate | long } + +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsChildrenAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { meta?: Record @@ -2418,10 +2459,10 @@ export interface AggregationsAggregation { } export interface AggregationsAggregationContainer { + aggregations?: Record aggs?: Record meta?: Record adjacency_matrix?: AggregationsAdjacencyMatrixAggregation - aggregations?: Record auto_date_histogram?: AggregationsAutoDateHistogramAggregation avg?: AggregationsAverageAggregation avg_bucket?: AggregationsAverageBucketAggregation @@ -2499,7 +2540,13 @@ export interface AggregationsAggregationRange { to?: double | string } -export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregate> { +export interface AggregationsArrayPercentilesItem { + key: string + value: double | null + value_as_string?: string +} + +export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { interval: DateMathTime } @@ -2521,32 +2568,35 @@ export interface AggregationsAverageAggregation extends AggregationsFormatMetric export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsAvgAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { min: double max: double q1: double q2: double q3: double + lower: double + upper: double + min_as_string?: string + max_as_string?: string + q1_as_string?: string + q2_as_string?: string + q3_as_string?: string + lower_as_string?: string + upper_as_string?: string } export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { compression?: double } -export type AggregationsBucket = AggregationsCompositeBucket | AggregationsDateHistogramBucket | AggregationsFiltersBucketItem | AggregationsIpRangeBucket | AggregationsRangeBucket | AggregationsRareTermsBucket | AggregationsSignificantTermsBucket | AggregationsKeyedBucket - -export interface AggregationsBucketAggregate extends AggregationsAggregateBase { - after_key: Record - bg_count: long - doc_count: long - doc_count_error_upper_bound: long - sum_other_doc_count: long - interval: DateMathTime - items: AggregationsBucket +export interface AggregationsBucketAggregationBase extends AggregationsAggregation { } -export interface AggregationsBucketAggregationBase extends AggregationsAggregation { - aggregations?: Record +export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { + keys: string[] } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { @@ -2564,6 +2614,16 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati sort?: SearchSort } +export type AggregationsBuckets = Record | TBucket[] + +export type AggregationsBucketsPath = string | string[] | Record + +export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1Y' + +export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { + value: long +} + export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { precision_threshold?: integer rehash?: boolean @@ -2574,6 +2634,14 @@ export interface AggregationsChiSquareHeuristic { include_negatives: boolean } +export interface AggregationsChildrenAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsChildrenAggregateBucketKeys extends AggregationsMultiBucketBase { +} +export type AggregationsChildrenAggregateBucket = AggregationsChildrenAggregateBucketKeys +& { [property: string]: AggregationsAggregate | long } + export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { type?: RelationName } @@ -2586,6 +2654,10 @@ export interface AggregationsClassificationInferenceOptions { top_classes_results_field?: string } +export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { + after_key?: Record +} + export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { after?: Record size?: integer @@ -2599,13 +2671,15 @@ export interface AggregationsCompositeAggregationSource { geotile_grid?: AggregationsGeoTileGridAggregation } -export interface AggregationsCompositeBucketKeys { +export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { + key: Record } export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | Record | long } -export interface AggregationsCompositeBucketAggregate extends AggregationsMultiBucketAggregate> { - after_key: Record +export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { + value: long + value_as_string?: string } export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { @@ -2614,14 +2688,17 @@ export interface AggregationsCumulativeCardinalityAggregation extends Aggregatio export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { - calendar_interval?: AggregationsDateInterval | Time - extended_bounds?: AggregationsExtendedBounds - hard_bounds?: AggregationsExtendedBounds + calendar_interval?: AggregationsCalendarInterval + extended_bounds?: AggregationsExtendedBounds + hard_bounds?: AggregationsExtendedBounds field?: Field - fixed_interval?: AggregationsDateInterval | Time + fixed_interval?: Time format?: string - interval?: AggregationsDateInterval | Time + interval?: Time min_doc_count?: integer missing?: DateString offset?: Time @@ -2632,12 +2709,15 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket keyed?: boolean } -export interface AggregationsDateHistogramBucketKeys { +export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { + key_as_string?: string + key: EpochMillis } export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | string | EpochMillis | long } -export type AggregationsDateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' +export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { +} export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { field?: Field @@ -2649,12 +2729,14 @@ export interface AggregationsDateRangeAggregation extends AggregationsBucketAggr } export interface AggregationsDateRangeExpression { - from?: DateMath | float - from_as_string?: string - to_as_string?: string + from?: AggregationsFieldDateMath key?: string - to?: DateMath | float - doc_count?: long + to?: AggregationsFieldDateMath +} + +export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { + normalized_value?: double + normalized_value_as_string?: string } export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { @@ -2668,49 +2750,74 @@ export interface AggregationsDiversifiedSamplerAggregation extends AggregationsB field?: Field } +export interface AggregationsDoubleTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsDoubleTermsBucketKeys extends AggregationsTermsBucketBase { + key: double + key_as_string?: string +} +export type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys +& { [property: string]: AggregationsAggregate | double | string | long } + export interface AggregationsEwmaModelSettings { alpha?: float } +export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'ewma' + settings: AggregationsEwmaModelSettings +} + export interface AggregationsExtendedBounds { max: T min: T } export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { - std_deviation_bounds: AggregationsStandardDeviationBounds - sum_of_squares?: double - variance?: double - variance_population?: double - variance_sampling?: double - std_deviation?: double - std_deviation_population?: double - std_deviation_sampling?: double + sum_of_squares: double | null + variance: double | null + variance_population: double | null + variance_sampling: double | null + std_deviation: double | null + std_deviation_bounds?: AggregationsStandardDeviationBounds + sum_of_squares_as_string?: string + variance_as_string?: string + variance_population_as_string?: string + variance_sampling_as_string?: string + std_deviation_as_string?: string + std_deviation_bounds_as_string?: AggregationsStandardDeviationBoundsAsString } export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { sigma?: double } +export interface AggregationsExtendedStatsBucketAggregate extends AggregationsExtendedStatsAggregate { +} + export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { sigma?: double } -export interface AggregationsFiltersAggregate extends AggregationsAggregateBase { - buckets: AggregationsFiltersBucketItem[] | Record +export type AggregationsFieldDateMath = DateMath | double + +export interface AggregationsFilterAggregate extends AggregationsSingleBucketAggregateBase { +} + +export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: Record | QueryDslQueryContainer[] + filters?: AggregationsBuckets other_bucket?: boolean other_bucket_key?: string keyed?: boolean } -export interface AggregationsFiltersBucketItemKeys { - doc_count: long +export interface AggregationsFiltersBucketKeys extends AggregationsMultiBucketBase { } -export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys +export type AggregationsFiltersBucket = AggregationsFiltersBucketKeys & { [property: string]: AggregationsAggregate | long } export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { @@ -2723,13 +2830,8 @@ export interface AggregationsFormattableMetricAggregation extends AggregationsMe export type AggregationsGapPolicy = 'skip' | 'insert_zeros' -export interface AggregationsGeoBounds { - bottom_right: LatLon - top_left: LatLon -} - export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { - bounds: AggregationsGeoBounds + bounds: GeoBounds } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { @@ -2738,34 +2840,45 @@ export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggr export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { count: long - location: QueryDslGeoLocation + location?: GeoLocation } export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { count?: long - location?: QueryDslGeoLocation + location?: GeoLocation +} + +export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggregate { } export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { distance_type?: GeoDistanceType field?: Field - origin?: QueryDslGeoLocation | string + origin?: GeoLocation ranges?: AggregationsAggregationRange[] unit?: DistanceUnit } +export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { - bounds?: QueryDslBoundingBox + bounds?: GeoBounds field?: Field precision?: GeoHashPrecision shard_size?: integer size?: integer } +export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoHash +} +export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoHash | long } + export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { type: string - geometry: AggregationsLineStringGeoShape - properties: AggregationsGeoLineProperties + geometry: GeoLine } export interface AggregationsGeoLineAggregation { @@ -2780,21 +2893,28 @@ export interface AggregationsGeoLinePoint { field: Field } -export interface AggregationsGeoLineProperties { - complete: boolean - sort_values: double[] -} - export interface AggregationsGeoLineSort { field: Field } +export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { field?: Field precision?: GeoTilePrecision shard_size?: integer size?: integer - bounds?: AggregationsGeoBounds + bounds?: GeoBounds +} + +export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoTile +} +export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoTile | long } + +export interface AggregationsGlobalAggregate extends AggregationsSingleBucketAggregateBase { } export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { @@ -2808,13 +2928,13 @@ export interface AggregationsHdrMethod { number_of_significant_value_digits?: integer } -export interface AggregationsHdrPercentileItem { - key: double - value: double +export interface AggregationsHdrPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { } -export interface AggregationsHdrPercentilesAggregate extends AggregationsAggregateBase { - values: AggregationsHdrPercentileItem[] +export interface AggregationsHdrPercentilesAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsHistogramAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { @@ -2831,6 +2951,13 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr keyed?: boolean } +export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucketBase { + key_as_string?: string + key: double +} +export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys +& { [property: string]: AggregationsAggregate | string | double | long } + export interface AggregationsHistogramOrder { _count?: SearchSortOrder _key?: SearchSortOrder @@ -2841,6 +2968,11 @@ export interface AggregationsHoltLinearModelSettings { beta?: float } +export interface AggregationsHoltMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'holt' + settings: AggregationsHoltLinearModelSettings +} + export interface AggregationsHoltWintersModelSettings { alpha?: float beta?: float @@ -2850,18 +2982,52 @@ export interface AggregationsHoltWintersModelSettings { type?: AggregationsHoltWintersType } +export interface AggregationsHoltWintersMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'holt_winters' + settings: AggregationsHoltWintersModelSettings +} + export type AggregationsHoltWintersType = 'add' | 'mult' +export interface AggregationsInferenceAggregateKeys extends AggregationsAggregateBase { + value?: FieldValue + feature_importance?: AggregationsInferenceFeatureImportance[] + top_classes?: AggregationsInferenceTopClassEntry[] + warning?: string +} +export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys +& { [property: string]: any } + export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { model_id: Name inference_config?: AggregationsInferenceConfigContainer } +export interface AggregationsInferenceClassImportance { + class_name: string + importance: double +} + export interface AggregationsInferenceConfigContainer { regression?: AggregationsRegressionInferenceOptions classification?: AggregationsClassificationInferenceOptions } +export interface AggregationsInferenceFeatureImportance { + feature_name: string + importance?: double + classes?: AggregationsInferenceClassImportance[] +} + +export interface AggregationsInferenceTopClassEntry { + class_name: FieldValue + class_probability: double + class_score: double +} + +export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { field?: Field ranges?: AggregationsIpRangeAggregationRange[] @@ -2873,26 +3039,39 @@ export interface AggregationsIpRangeAggregationRange { to?: string } -export interface AggregationsIpRangeBucketKeys { +export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { + from?: string + to?: string } export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | string | long } -export interface AggregationsKeyedBucketKeys { - doc_count: long - key: TKey - key_as_string: string +export type AggregationsKeyedPercentiles = Record + +export interface AggregationsLinearMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'linear' + settings: EmptyObject } -export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys -& { [property: string]: AggregationsAggregate | long | TKey | string } -export interface AggregationsKeyedValueAggregate extends AggregationsValueAggregate { - keys: string[] +export interface AggregationsLongRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsLongRareTermsBucketKeys extends AggregationsMultiBucketBase { + key: long + key_as_string?: string +} +export type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys +& { [property: string]: AggregationsAggregate | long | string } + +export interface AggregationsLongTermsAggregate extends AggregationsTermsAggregateBase { } -export interface AggregationsLineStringGeoShape { - coordinates: QueryDslGeoCoordinate[] +export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { + key: string + key_as_string?: string } +export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | long } export interface AggregationsMatrixAggregation extends AggregationsAggregation { fields?: Fields @@ -2900,40 +3079,52 @@ export interface AggregationsMatrixAggregation extends AggregationsAggregation { } export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { - correlation: Record - covariance: Record - count: integer - kurtosis: double - mean: double - skewness: double - variance: double - name: string + doc_count: long + fields: AggregationsMatrixStatsFields[] } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { mode?: AggregationsMatrixStatsMode } +export interface AggregationsMatrixStatsFields { + name: Field + count: long + mean: double + variance: double + skewness: double + kurtosis: double + covariance: Record + correlation: Record +} + export type AggregationsMatrixStatsMode = 'avg' | 'min' | 'max' | 'sum' | 'median' +export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { } export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsMedianAbsoluteDeviationAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { compression?: double } -export type AggregationsMetricAggregate = AggregationsValueAggregate | AggregationsBoxPlotAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsGeoLineAggregate | AggregationsPercentilesAggregate | AggregationsScriptedMetricAggregate | AggregationsStatsAggregate | AggregationsStringStatsAggregate | AggregationsTopHitsAggregate | AggregationsTopMetricsAggregate | AggregationsExtendedStatsAggregate | AggregationsTDigestPercentilesAggregate | AggregationsHdrPercentilesAggregate - export interface AggregationsMetricAggregationBase { field?: Field missing?: AggregationsMissing script?: Script } +export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { } @@ -2944,6 +3135,9 @@ export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | export type AggregationsMissing = string | integer | double | boolean +export interface AggregationsMissingAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { field?: Field missing?: AggregationsMissing @@ -2951,18 +3145,14 @@ export interface AggregationsMissingAggregation extends AggregationsBucketAggreg export type AggregationsMissingOrder = 'first' | 'last' | 'default' -export interface AggregationsMovingAverageAggregation extends AggregationsPipelineAggregationBase { +export type AggregationsMovingAverageAggregation = AggregationsLinearMovingAverageAggregation | AggregationsSimpleMovingAverageAggregation | AggregationsEwmaMovingAverageAggregation | AggregationsHoltMovingAverageAggregation | AggregationsHoltWintersMovingAverageAggregation + +export interface AggregationsMovingAverageAggregationBase extends AggregationsPipelineAggregationBase { minimize?: boolean - model?: AggregationsMovingAverageModel - settings: AggregationsMovingAverageSettings predict?: integer window?: integer } -export type AggregationsMovingAverageModel = 'linear' | 'simple' | 'ewma' | 'holt' | 'holt_winters' - -export type AggregationsMovingAverageSettings = AggregationsEwmaModelSettings | AggregationsHoltLinearModelSettings | AggregationsHoltWintersModelSettings - export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { script?: string shift?: integer @@ -2975,23 +3165,41 @@ export interface AggregationsMovingPercentilesAggregation extends AggregationsPi keyed?: boolean } -export interface AggregationsMultiBucketAggregate extends AggregationsAggregateBase { - buckets: TBucket[] +export interface AggregationsMultiBucketAggregateBase extends AggregationsAggregateBase { + buckets: AggregationsBuckets +} + +export interface AggregationsMultiBucketBase { + doc_count: long } export interface AggregationsMultiTermLookup { field: Field } +export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { +} + export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { terms: AggregationsMultiTermLookup[] } +export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { + key: (long | double | string)[] + key_as_string?: string + doc_count_error_upper_bound?: long +} +export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys +& { [property: string]: AggregationsAggregate | (long | double | string)[] | string | long } + export interface AggregationsMutualInformationHeuristic { background_is_superset?: boolean include_negatives?: boolean } +export interface AggregationsNestedAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { path?: Field } @@ -3009,11 +3217,6 @@ export interface AggregationsParentAggregation extends AggregationsBucketAggrega export interface AggregationsPercentageScoreHeuristic { } -export interface AggregationsPercentileItem { - percentile: double - value: double -} - export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { keyed?: boolean values?: double[] @@ -3021,8 +3224,10 @@ export interface AggregationsPercentileRanksAggregation extends AggregationsForm tdigest?: AggregationsTDigest } -export interface AggregationsPercentilesAggregate extends AggregationsAggregateBase { - items: AggregationsPercentileItem[] +export type AggregationsPercentiles = AggregationsKeyedPercentiles | AggregationsArrayPercentilesItem[] + +export interface AggregationsPercentilesAggregateBase extends AggregationsAggregateBase { + values: AggregationsPercentiles } export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { @@ -3032,16 +3237,22 @@ export interface AggregationsPercentilesAggregation extends AggregationsFormatMe tdigest?: AggregationsTDigest } +export interface AggregationsPercentilesBucketAggregate extends AggregationsPercentilesAggregateBase { +} + export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { percents?: double[] } export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { - buckets_path?: string | string[] | Record + buckets_path?: AggregationsBucketsPath format?: string gap_policy?: AggregationsGapPolicy } +export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { field?: Field missing?: integer @@ -3050,28 +3261,32 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat keyed?: boolean } -export interface AggregationsRangeBucketKeys { +export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { + from?: double + to?: double + from_as_string?: string + to_as_string?: string } export type AggregationsRangeBucket = AggregationsRangeBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { - exclude?: string | string[] + exclude?: AggregationsTermsExclude field?: Field - include?: string | string[] | AggregationsTermsInclude + include?: AggregationsTermsInclude max_doc_count?: long missing?: AggregationsMissing precision?: double value_type?: string } -export interface AggregationsRareTermsBucketKeys { +export interface AggregationsRateAggregate extends AggregationsAggregateBase { + value: double + value_as_string?: string } -export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys -& { [property: string]: AggregationsAggregate } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { - unit?: AggregationsDateInterval + unit?: AggregationsCalendarInterval mode?: AggregationsRateMode } @@ -3082,10 +3297,16 @@ export interface AggregationsRegressionInferenceOptions { num_top_feature_importance_values?: integer } +export interface AggregationsReverseNestedAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { path?: Field } +export interface AggregationsSamplerAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { shard_size?: integer } @@ -3112,15 +3333,29 @@ export interface AggregationsSerialDifferencingAggregation extends AggregationsP lag?: integer } -export interface AggregationsSignificantTermsAggregate extends AggregationsMultiBucketAggregate { - bg_count: long - doc_count: long +export interface AggregationsSignificantLongTermsAggregate extends AggregationsMultiBucketAggregateBase { } +export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { + key: long + key_as_string?: string +} +export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys +& { [property: string]: AggregationsAggregate | long | string | double } + +export interface AggregationsSignificantStringTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { + key: string +} +export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | double | long } + export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer chi_square?: AggregationsChiSquareHeuristic - exclude?: string | string[] + exclude?: AggregationsTermsExclude execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field gnd?: AggregationsGoogleNormalizedDistanceHeuristic @@ -3134,15 +3369,15 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc size?: integer } -export interface AggregationsSignificantTermsBucketKeys { +export interface AggregationsSignificantTermsBucketBase extends AggregationsMultiBucketBase { + score: double + bg_count: long } -export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys -& { [property: string]: AggregationsAggregate } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer chi_square?: AggregationsChiSquareHeuristic - exclude?: string | string[] + exclude?: AggregationsTermsExclude execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field filter_duplicate_text?: boolean @@ -3158,48 +3393,99 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck source_fields?: Fields } -export interface AggregationsSingleBucketAggregateKeys extends AggregationsAggregateBase { - doc_count: double +export interface AggregationsSimpleMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'simple' + settings: EmptyObject +} + +export interface AggregationsSimpleValueAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsSingleBucketAggregateBase extends AggregationsAggregateBase { + doc_count: long +} + +export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { + value: double | null + value_as_string?: string } -export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys -& { [property: string]: AggregationsAggregate | double | Record } export interface AggregationsStandardDeviationBounds { - lower?: double - upper?: double - lower_population?: double - upper_population?: double - lower_sampling?: double - upper_sampling?: double + upper: double | null + lower: double | null + upper_population: double | null + lower_population: double | null + upper_sampling: double | null + lower_sampling: double | null +} + +export interface AggregationsStandardDeviationBoundsAsString { + upper: string + lower: string + upper_population: string + lower_population: string + upper_sampling: string + lower_sampling: string } export interface AggregationsStatsAggregate extends AggregationsAggregateBase { - count: double + count: long + min: double | null + max: double | null + avg: double | null sum: double - avg?: double - max?: double - min?: double + min_as_string?: string + max_as_string?: string + avg_as_string?: string + sum_as_string?: string } export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { } +export interface AggregationsStatsBucketAggregate extends AggregationsStatsAggregate { +} + export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsStringRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsStringRareTermsBucketKeys extends AggregationsMultiBucketBase { + key: string +} +export type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { count: long - min_length: integer - max_length: integer - avg_length: double - entropy: double - distribution?: Record + min_length: integer | null + max_length: integer | null + avg_length: double | null + entropy: double | null + distribution?: string | null + min_length_as_string?: string + max_length_as_string?: string + avg_length_as_string?: string } export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { show_distribution?: boolean } +export interface AggregationsStringTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { + key: string +} +export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + +export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { } @@ -3210,8 +3496,15 @@ export interface AggregationsTDigest { compression?: integer } -export interface AggregationsTDigestPercentilesAggregate extends AggregationsAggregateBase { - values: Record +export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsTDigestPercentilesAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsTTestAggregate extends AggregationsAggregateBase { + value: double | null + value_as_string?: string } export interface AggregationsTTestAggregation extends AggregationsAggregation { @@ -3222,17 +3515,17 @@ export interface AggregationsTTestAggregation extends AggregationsAggregation { export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' -export interface AggregationsTermsAggregate extends AggregationsMultiBucketAggregate { - doc_count_error_upper_bound: long +export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { + doc_count_error_upper_bound?: long sum_other_doc_count: long } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { collect_mode?: AggregationsTermsAggregationCollectMode - exclude?: string | string[] + exclude?: AggregationsTermsExclude execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field - include?: string | string[] | AggregationsTermsInclude + include?: AggregationsTermsInclude min_doc_count?: integer missing?: AggregationsMissing missing_order?: AggregationsMissingOrder @@ -3249,9 +3542,17 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' -export type AggregationsTermsAggregationOrder = SearchSortOrder | Record | Record[] +export type AggregationsTermsAggregationOrder = Record | Record[] + +export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { + doc_count_error?: long +} + +export type AggregationsTermsExclude = string | string[] -export interface AggregationsTermsInclude { +export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition + +export interface AggregationsTermsPartition { num_partitions: long partition: long } @@ -3263,7 +3564,7 @@ export interface AggregationsTestPopulation { } export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { - hits: SearchHitsMetadata> + hits: SearchHitsMetadata } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { @@ -3274,7 +3575,7 @@ export interface AggregationsTopHitsAggregation extends AggregationsMetricAggreg script_fields?: Record size?: integer sort?: SearchSort - _source?: boolean | SearchSourceFilter | Fields + _source?: SearchSourceConfig stored_fields?: Fields track_scores?: boolean version?: boolean @@ -3282,12 +3583,11 @@ export interface AggregationsTopHitsAggregation extends AggregationsMetricAggreg } export interface AggregationsTopMetrics { - sort: (long | double | string)[] - metrics: Record + sort: (FieldValue | null)[] + metrics: Record } -export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { - top: AggregationsTopMetrics[] +export interface AggregationsTopMetricsAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { @@ -3296,13 +3596,26 @@ export interface AggregationsTopMetricsAggregation extends AggregationsMetricAgg sort?: SearchSort } +export interface AggregationsTopMetricsBucketKeys extends AggregationsMultiBucketBase { + top: AggregationsTopMetrics[] +} +export type AggregationsTopMetricsBucket = AggregationsTopMetricsBucketKeys +& { [property: string]: AggregationsAggregate | AggregationsTopMetrics[] | long } + export interface AggregationsTopMetricsValue { field: Field } -export interface AggregationsValueAggregate extends AggregationsAggregateBase { - value: double - value_as_string?: string +export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { @@ -3310,6 +3623,9 @@ export interface AggregationsValueCountAggregation extends AggregationsFormattab export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' +export interface AggregationsVariableWidthHistogramAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsVariableWidthHistogramAggregation { field?: Field buckets?: integer @@ -3317,6 +3633,17 @@ export interface AggregationsVariableWidthHistogramAggregation { initial_buffer?: integer } +export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { + min: double + key: double + max: double + min_as_string?: string + key_as_string?: string + max_as_string?: string +} +export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys +& { [property: string]: AggregationsAggregate | double | string | long } + export interface AggregationsWeightedAverageAggregation extends AggregationsAggregation { format?: string value?: AggregationsWeightedAverageValue @@ -3330,6 +3657,9 @@ export interface AggregationsWeightedAverageValue { script?: Script } +export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { +} + export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { @@ -3337,12 +3667,14 @@ export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase preserve_original: boolean } -export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter +export type AnalysisCharFilter = string | AnalysisCharFilterDefinition export interface AnalysisCharFilterBase { version?: VersionString } +export type AnalysisCharFilterDefinition = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter + export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { type: 'char_group' tokenize_on_chars: string[] @@ -3866,18 +4198,22 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' -export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer +export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter + +export type AnalysisTokenizer = string | AnalysisTokenizerDefinition export interface AnalysisTokenizerBase { version?: VersionString } +export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer + export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { type: 'trim' } @@ -4062,7 +4398,7 @@ export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { type: 'double_range' } -export type MappingDynamicMapping = 'strict' | 'runtime' | 'true' | 'false' +export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' export interface MappingDynamicTemplate { mapping?: MappingProperty @@ -4132,7 +4468,7 @@ export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'cc export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean ignore_z_value?: boolean - null_value?: QueryDslGeoLocation + null_value?: GeoLocation type: 'geo_point' } @@ -4258,7 +4594,7 @@ export interface MappingPropertyBase { name?: PropertyName properties?: Record ignore_above?: integer - dynamic?: boolean | MappingDynamicMapping + dynamic?: MappingDynamicMapping fields?: Record } @@ -4350,7 +4686,7 @@ export interface MappingSuggestContext { precision?: integer | string } -export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' +export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' export interface MappingTextIndexPrefixes { max_chars: integer @@ -4389,7 +4725,7 @@ export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase export interface MappingTypeMapping { all_field?: MappingAllField date_detection?: boolean - dynamic?: boolean | MappingDynamicMapping + dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] dynamic_templates?: Record | Record[] _field_names?: MappingFieldNamesField @@ -4432,18 +4768,6 @@ export interface QueryDslBoostingQuery extends QueryDslQueryBase { positive: QueryDslQueryContainer } -export interface QueryDslBoundingBox { - bottom_right?: QueryDslGeoLocation - top_left?: QueryDslGeoLocation - top_right?: QueryDslGeoLocation - bottom_left?: QueryDslGeoLocation - top?: double - left?: double - right?: double - bottom?: double - wkt?: string -} - export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' export type QueryDslCombinedFieldsOperator = 'or' | 'and' @@ -4521,6 +4845,12 @@ export interface QueryDslExistsQuery extends QueryDslQueryBase { field: Field } +export interface QueryDslFieldAndFormat { + field: Field + format?: string + include_unmapped?: boolean +} + export interface QueryDslFieldLookup { id: Id index?: IndexName @@ -4576,16 +4906,14 @@ export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { ignore_unmapped?: boolean } export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys -& { [property: string]: QueryDslBoundingBox | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } - -export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint +& { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } -export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { @@ -4594,14 +4922,12 @@ export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys -& { [property: string]: QueryDslGeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } +& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } export type QueryDslGeoExecution = 'memory' | 'indexed' -export type QueryDslGeoLocation = string | double[] | QueryDslTwoDimensionalPoint - export interface QueryDslGeoPolygonPoints { - points: QueryDslGeoLocation[] + points: GeoLocation[] } export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { @@ -4850,7 +5176,7 @@ export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionB export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys & { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } -export type QueryDslOperator = 'and' | 'or' +export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' export interface QueryDslParentIdQuery extends QueryDslQueryBase { id?: Id @@ -4941,7 +5267,7 @@ export interface QueryDslQueryContainer { span_or?: QueryDslSpanOrQuery span_term?: Partial> span_within?: QueryDslSpanWithinQuery - term?: Partial> + term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> wildcard?: Partial> @@ -5044,18 +5370,20 @@ export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { } export interface QueryDslShapeFieldQuery { - ignore_unmapped?: boolean indexed_shape?: QueryDslFieldLookup - relation?: ShapeRelation + relation?: GeoShapeRelation shape?: GeoShape } export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { + ignore_unmapped?: boolean } export type QueryDslShapeQuery = QueryDslShapeQueryKeys -& { [property: string]: QueryDslShapeFieldQuery | float | string } +& { [property: string]: QueryDslShapeFieldQuery | boolean | float | string } + +export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' -export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' +export type QueryDslSimpleQueryStringFlags = QueryDslSimpleQueryStringFlag | string export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { analyzer?: string @@ -5063,7 +5391,7 @@ export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { auto_generate_synonyms_phrase_query?: boolean default_operator?: QueryDslOperator fields?: Field[] - flags?: QueryDslSimpleQueryStringFlags | string + flags?: QueryDslSimpleQueryStringFlags fuzzy_max_expansions?: integer fuzzy_prefix_length?: integer fuzzy_transpositions?: boolean @@ -5135,7 +5463,7 @@ export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { } export interface QueryDslTermQuery extends QueryDslQueryBase { - value: string | float | boolean + value: FieldValue case_insensitive?: boolean } @@ -5149,7 +5477,9 @@ export interface QueryDslTermsLookup { export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { } export type QueryDslTermsQuery = QueryDslTermsQueryKeys -& { [property: string]: string[] | long[] | QueryDslTermsLookup | float | string } +& { [property: string]: QueryDslTermsQueryField | float | string } + +export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field @@ -5159,17 +5489,6 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' -export interface QueryDslThreeDimensionalPoint { - lat: double - lon: double - z?: double -} - -export interface QueryDslTwoDimensionalPoint { - lat: double - lon: double -} - export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } @@ -5249,7 +5568,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { analyze_wildcard?: boolean batched_reduce_size?: long ccs_minimize_roundtrips?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string docvalue_fields?: Fields expand_wildcards?: ExpandWildcards @@ -5273,12 +5592,12 @@ export interface AsyncSearchSubmitRequest extends RequestBase { suggest_text?: string terminate_after?: long timeout?: Time - track_total_hits?: boolean | integer + track_total_hits?: SearchTrackHits track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean version?: boolean - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields seq_no_primary_term?: boolean @@ -5299,8 +5618,8 @@ export interface AsyncSearchSubmitRequest extends RequestBase { script_fields?: Record search_after?: SearchSortResults slice?: SlicedScroll - fields?: (Field | DateField)[] - suggest?: SearchSuggestContainer | Record + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields } @@ -5519,7 +5838,6 @@ export interface CatHealthHealthRecord { } export interface CatHealthRequest extends CatCatRequestBase { - include_timestamp?: boolean ts?: boolean } @@ -5828,7 +6146,7 @@ export interface CatIndicesRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes expand_wildcards?: ExpandWildcards - health?: Health + health?: HealthStatus include_unloaded_segments?: boolean pri?: boolean } @@ -6975,7 +7293,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names - size?: Size | boolean + size?: CatThreadPoolThreadPoolSize } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -7023,6 +7341,8 @@ export interface CatThreadPoolThreadPoolRecord { ka?: string } +export type CatThreadPoolThreadPoolSize = 'k' | 'm' | 'g' | 't' | 'p' + export interface CatTransformsRequest extends CatCatRequestBase { transform_id?: Id allow_no_match?: boolean @@ -7346,8 +7666,6 @@ export interface CcrUnfollowRequest extends RequestBase { export interface CcrUnfollowResponse extends AcknowledgedResponseBase { } -export type ClusterClusterStatus = 'green' | 'yellow' | 'red' - export interface ClusterComponentTemplate { name: Name component_template: ClusterComponentTemplateNode @@ -7539,7 +7857,7 @@ export interface ClusterHealthIndexHealthStats { number_of_shards: integer relocating_shards: integer shards?: Record - status: Health + status: HealthStatus unassigned_shards: integer } @@ -7555,7 +7873,7 @@ export interface ClusterHealthRequest extends RequestBase { wait_for_nodes?: string wait_for_no_initializing_shards?: boolean wait_for_no_relocating_shards?: boolean - wait_for_status?: WaitForStatus + wait_for_status?: HealthStatus } export interface ClusterHealthResponse { @@ -7571,7 +7889,7 @@ export interface ClusterHealthResponse { number_of_nodes: integer number_of_pending_tasks: integer relocating_shards: integer - status: Health + status: HealthStatus task_max_waiting_in_queue_millis: EpochMillis timed_out: boolean unassigned_shards: integer @@ -7582,7 +7900,7 @@ export interface ClusterHealthShardHealthStats { initializing_shards: integer primary_active: boolean relocating_shards: integer - status: Health + status: HealthStatus unassigned_shards: integer } @@ -7959,7 +8277,7 @@ export interface ClusterStatsResponse extends NodesNodesResponseBase { cluster_uuid: Uuid indices: ClusterStatsClusterIndices nodes: ClusterStatsClusterNodes - status: ClusterClusterStatus + status: HealthStatus timestamp: long } @@ -8172,8 +8490,8 @@ export interface EqlSearchRequest extends RequestBase { timestamp_field?: Field fetch_size?: uint filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - size?: uint | float - fields?: (Field | EqlSearchSearchFieldFormatted)[] + size?: uint + fields?: QueryDslFieldAndFormat | Field result_position?: EqlSearchResultPosition } @@ -8182,11 +8500,6 @@ export interface EqlSearchResponse extends EqlEqlSearchRespons export type EqlSearchResultPosition = 'tail' | 'head' -export interface EqlSearchSearchFieldFormatted { - field: Field - format?: string -} - export interface FeaturesFeature { name: string description: string @@ -8206,6 +8519,21 @@ export interface FeaturesResetFeaturesResponse { features: FeaturesFeature[] } +export type FleetCheckpoint = long + +export interface FleetGlobalCheckpointsRequest extends RequestBase { + index: IndexName | IndexAlias + wait_for_advance?: boolean + wait_for_index?: boolean + checkpoints?: FleetCheckpoint[] + timeout?: Time +} + +export interface FleetGlobalCheckpointsResponse { + global_checkpoints: FleetCheckpoint[] + timed_out: boolean +} + export interface GraphConnection { doc_count: long source: long @@ -8270,13 +8598,11 @@ export interface GraphExploreResponse { vertices: GraphVertex[] } -export interface IlmAction { -} +export type IlmActions = any export interface IlmPhase { - actions?: Record | string[] + actions?: IlmActions min_age?: Time - configurations?: Record> } export interface IlmPhases { @@ -8436,15 +8762,13 @@ export interface IndicesDataStream { hidden?: boolean } -export type IndicesDataStreamHealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' - export interface IndicesFielddataFrequencyFilter { max: double min: double min_segment_size: integer } -export type IndicesIndexCheckOnStartup = 'false' | 'checksum' | 'true' +export type IndicesIndexCheckOnStartup = boolean | 'false' | 'checksum' | 'true' export interface IndicesIndexRouting { allocation?: IndicesIndexRoutingAllocation @@ -8621,14 +8945,10 @@ export interface IndicesIndexSettingsLifecycle { export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping - settings?: IndicesIndexSettings | IndicesIndexStatePrefixedSettings + settings?: IndicesIndexSettings data_stream?: DataStreamName } -export interface IndicesIndexStatePrefixedSettings { - index: IndicesIndexSettings -} - export interface IndicesIndexVersioning { created: VersionString } @@ -8639,11 +8959,6 @@ export interface IndicesNumericFielddata { export type IndicesNumericFielddataFormat = 'array' | 'disabled' -export interface IndicesOverlappingIndexTemplate { - name: Name - index_patterns?: IndexName[] -} - export type IndicesSegmentSortMissing = '_last' | '_first' export type IndicesSegmentSortMode = 'min' | 'max' @@ -8734,13 +9049,13 @@ export interface IndicesAnalyzeRequest extends RequestBase { index?: IndexName analyzer?: string attributes?: string[] - char_filter?: (string | AnalysisCharFilter)[] + char_filter?: AnalysisCharFilter[] explain?: boolean field?: Field - filter?: (string | AnalysisTokenFilter)[] + filter?: AnalysisTokenFilter[] normalizer?: string text?: IndicesAnalyzeTextToAnalyze - tokenizer?: string | AnalysisTokenizer + tokenizer?: AnalysisTokenizer } export interface IndicesAnalyzeResponse { @@ -9028,7 +9343,7 @@ export interface IndicesGetDataStreamIndicesGetDataStreamItem { template: Name hidden: boolean system?: boolean - status: IndicesDataStreamHealthStatus + status: HealthStatus ilm_policy?: Name _meta?: Metadata } @@ -9219,7 +9534,7 @@ export interface IndicesPutMappingRequest extends RequestBase { timeout?: Time write_index_only?: boolean date_detection?: boolean - dynamic?: boolean | MappingDynamicMapping + dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] dynamic_templates?: Record | Record[] _field_names?: MappingFieldNamesField @@ -9430,6 +9745,8 @@ export interface IndicesResolveIndexResponse { data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] } +export type IndicesRolloverIndexRolloverMapping = MappingTypeMapping | Record + export interface IndicesRolloverRequest extends RequestBase { alias: IndexAlias new_index?: IndexName @@ -9440,7 +9757,7 @@ export interface IndicesRolloverRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards aliases?: Record conditions?: IndicesRolloverRolloverConditions - mappings?: Record | MappingTypeMapping + mappings?: IndicesRolloverIndexRolloverMapping settings?: Record } @@ -9512,15 +9829,13 @@ export interface IndicesShardStoresRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - status?: IndicesShardStoresShardStatus | IndicesShardStoresShardStatus[] + status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] } export interface IndicesShardStoresResponse { indices: Record } -export type IndicesShardStoresShardStatus = 'green' | 'yellow' | 'red' | 'all' - export interface IndicesShardStoresShardStore { allocation: IndicesShardStoresShardStoreAllocation allocation_id: Id @@ -9539,6 +9854,8 @@ export interface IndicesShardStoresShardStoreException { type: string } +export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all' + export interface IndicesShardStoresShardStoreWrapper { stores: IndicesShardStoresShardStore[] } @@ -9560,10 +9877,16 @@ export interface IndicesShrinkResponse extends AcknowledgedResponseBase { export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name - index_patterns?: IndexName[] + create?: boolean + master_timeout?: Time + allow_auto_create?: boolean + index_patterns?: Indices composed_of?: Name[] - overlapping?: IndicesOverlappingIndexTemplate[] - template?: IndicesTemplateMapping + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: IndicesDataStream + priority?: integer + version?: VersionNumber + _meta?: Metadata } export interface IndicesSimulateIndexTemplateResponse { @@ -9815,7 +10138,7 @@ export interface IndicesValidateQueryRequest extends RequestBase { all_shards?: boolean analyzer?: string analyze_wildcard?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards explain?: boolean @@ -9885,7 +10208,7 @@ export interface IngestCsvProcessor extends IngestProcessorBase { export interface IngestDateIndexNameProcessor extends IngestProcessorBase { date_formats: string[] - date_rounding: string | IngestDateRounding + date_rounding: string field: Field index_name_format: string index_name_prefix: string @@ -9901,8 +10224,6 @@ export interface IngestDateProcessor extends IngestProcessorBase { timezone?: string } -export type IngestDateRounding = 's' | 'm' | 'h' | 'd' | 'w' | 'M' | 'y' - export interface IngestDissectProcessor extends IngestProcessorBase { append_separator: string field: Field @@ -10405,7 +10726,7 @@ export interface MigrationDeprecationsResponse { export interface MlAnalysisConfig { bucket_span: TimeSpan - categorization_analyzer?: MlCategorizationAnalyzer | string + categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] @@ -10419,7 +10740,7 @@ export interface MlAnalysisConfig { export interface MlAnalysisConfigRead { bucket_span: TimeSpan - categorization_analyzer?: MlCategorizationAnalyzer | string + categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] @@ -10485,17 +10806,16 @@ export interface MlAnomalyCause { export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' export interface MlBucketInfluencer { + anomaly_score: double bucket_span: long - influencer_score: double influencer_field_name: Field - influencer_field_value: string - initial_influencer_score: double + initial_anomaly_score: double is_interim: boolean job_id: Id probability: double + raw_anomaly_score: double result_type: string timestamp: Time - foo?: string } export interface MlBucketSummary { @@ -10506,7 +10826,6 @@ export interface MlBucketSummary { initial_anomaly_score: double is_interim: boolean job_id: Id - partition_scores?: MlPartitionScore[] processing_time_ms: double result_type: string timestamp: Time @@ -10520,10 +10839,12 @@ export interface MlCalendarEvent { start_time: EpochMillis } -export interface MlCategorizationAnalyzer { - char_filter?: (string | AnalysisCharFilter)[] - filter?: (string | AnalysisTokenFilter)[] - tokenizer?: string | AnalysisTokenizer +export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition + +export interface MlCategorizationAnalyzerDefinition { + char_filter?: AnalysisCharFilter[] + filter?: AnalysisTokenFilter[] + tokenizer?: AnalysisTokenizer } export type MlCategorizationStatus = 'ok' | 'warn' @@ -10554,11 +10875,7 @@ export type MlChunkingMode = 'auto' | 'manual' | 'off' export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' -export interface MlCustomSettings { - custom_urls?: XpackUsageUrlConfig[] - created_by?: string - job_tags?: Record -} +export type MlCustomSettings = any export interface MlDataCounts { bucket_count: long @@ -10674,9 +10991,7 @@ export interface MlDataframeAnalysis { training_percent?: Percentage } -export type MlDataframeAnalysisAnalyzedFields = string[] | MlDataframeAnalysisAnalyzedFieldsIncludeExclude - -export interface MlDataframeAnalysisAnalyzedFieldsIncludeExclude { +export interface MlDataframeAnalysisAnalyzedFields { includes: string[] excludes: string[] } @@ -10779,7 +11094,7 @@ export interface MlDataframeAnalyticsSource { index: Indices query?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields - _source?: MlDataframeAnalysisAnalyzedFields + _source?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlDataframeAnalyticsStatsContainer { @@ -10828,7 +11143,7 @@ export interface MlDataframeAnalyticsSummary { description?: string model_memory_limit?: string max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] allow_lazy_start?: boolean create_time?: long version?: VersionString @@ -10975,6 +11290,20 @@ export interface MlInfluence { influencer_field_values: string[] } +export interface MlInfluencer { + bucket_span: long + influencer_score: double + influencer_field_name: Field + influencer_field_value: string + initial_influencer_score: double + is_interim: boolean + job_id: Id + probability: double + result_type: string + timestamp: Time + foo?: string +} + export interface MlJob { allow_lazy_open: boolean analysis_config: MlAnalysisConfig @@ -11083,10 +11412,10 @@ export interface MlModelSizeStats { job_id: Id log_time: Time memory_status: MlMemoryStatus - model_bytes: long - model_bytes_exceeded: long - model_bytes_memory_limit: long - peak_model_bytes: long + model_bytes: ByteSize + model_bytes_exceeded?: ByteSize + model_bytes_memory_limit?: ByteSize + peak_model_bytes?: ByteSize assignment_memory_basis?: string result_type: string total_by_field_count: long @@ -11143,14 +11472,6 @@ export interface MlPage { size?: integer } -export interface MlPartitionScore { - initial_record_score: double - partition_field_name: Field - partition_field_value: string - probability: double - record_score: double -} - export interface MlPerPartitionCategorization { enabled?: boolean stop_on_warn?: boolean @@ -11188,6 +11509,7 @@ export interface MlTotalFeatureImportanceStatistics { export interface MlTrainedModelConfig { model_id: Id + model_type: MlTrainedModelType tags: string[] version?: VersionString compressed_definition?: string @@ -11229,6 +11551,8 @@ export interface MlTrainedModelStats { ingest?: Record } +export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' + export interface MlValidationLoss { fold_values: string[] loss_type: string @@ -11452,11 +11776,11 @@ export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { id?: Id source?: MlDataframeAnalyticsSource dest?: MlDataframeAnalyticsDestination - analysis: MlDataframeAnalysisContainer + analysis?: MlDataframeAnalysisContainer description?: string model_memory_limit?: string max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] allow_lazy_start?: boolean } @@ -11467,10 +11791,10 @@ export interface MlExplainDataFrameAnalyticsResponse { export interface MlFlushJobRequest extends RequestBase { job_id: Id - skip_time?: string advance_time?: DateString calc_interim?: boolean end?: DateString + skip_time?: string start?: DateString } @@ -11493,15 +11817,16 @@ export interface MlForecastResponse extends AcknowledgedResponseBase { export interface MlGetBucketsRequest extends RequestBase { job_id: Id timestamp?: Timestamp + anomaly_score?: double + desc?: boolean + end?: DateString + exclude_interim?: boolean + expand?: boolean from?: integer size?: integer - exclude_interim?: boolean sort?: Field - desc?: boolean start?: DateString - end?: DateString - anomaly_score?: double - expand?: boolean + page?: MlPage } export interface MlGetBucketsResponse { @@ -11511,11 +11836,11 @@ export interface MlGetBucketsResponse { export interface MlGetCalendarEventsRequest extends RequestBase { calendar_id: Id - job_id?: Id end?: DateString from?: integer - start?: string + job_id?: Id size?: integer + start?: string } export interface MlGetCalendarEventsResponse { @@ -11628,7 +11953,7 @@ export interface MlGetInfluencersRequest extends RequestBase { export interface MlGetInfluencersResponse { count: long - influencers: MlBucketInfluencer[] + influencers: MlInfluencer[] } export interface MlGetJobStatsRequest extends RequestBase { @@ -11661,6 +11986,7 @@ export interface MlGetModelSnapshotsRequest extends RequestBase { size?: integer sort?: Field start?: Time + page?: MlPage } export interface MlGetModelSnapshotsResponse { @@ -11686,15 +12012,15 @@ export interface MlGetOverallBucketsResponse { export interface MlGetRecordsRequest extends RequestBase { job_id: Id + desc?: boolean + end?: DateString exclude_interim?: boolean from?: integer + record_score?: double size?: integer + sort?: Field start?: DateString - end?: DateString - desc?: boolean page?: MlPage - record_score?: double - sort?: Field } export interface MlGetRecordsResponse { @@ -11816,7 +12142,7 @@ export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { analysis: MlDataframeAnalysisContainer model_memory_limit?: string max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { @@ -11840,6 +12166,7 @@ export interface MlPreviewDatafeedResponse { export interface MlPutCalendarRequest extends RequestBase { calendar_id: Id + job_ids?: Id[] description?: string } @@ -11864,7 +12191,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { id: Id allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] description?: string dest: MlDataframeAnalyticsDestination max_num_threads?: integer @@ -11883,7 +12210,7 @@ export interface MlPutDataFrameAnalyticsResponse { allow_lazy_start: boolean max_num_threads: integer analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlPutDatafeedRequest extends RequestBase { @@ -12028,6 +12355,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { inference_config: AggregationsInferenceConfigContainer input: MlPutTrainedModelInput metadata?: any + model_type?: MlTrainedModelType tags?: string[] } @@ -12115,8 +12443,8 @@ export interface MlStartDataFrameAnalyticsResponse extends AcknowledgedResponseB export interface MlStartDatafeedRequest extends RequestBase { datafeed_id: Id - start?: Time end?: Time + start?: Time timeout?: Time } @@ -12166,7 +12494,45 @@ export interface MlUpdateDataFrameAnalyticsResponse { allow_lazy_start: boolean max_num_threads: integer analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] +} + +export interface MlUpdateDatafeedRequest extends RequestBase { + datafeed_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + aggregations?: Record + chunking_config?: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + frequency?: Time + indices?: string[] + indexes?: string[] + indices_options?: MlDatafeedIndicesOptions + max_empty_searches?: integer + query?: QueryDslQueryContainer + query_delay?: Time + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size?: integer +} + +export interface MlUpdateDatafeedResponse { + aggregations: Record + chunking_config: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + datafeed_id: Id + frequency: Time + indices: string[] + job_id: Id + indices_options?: MlDatafeedIndicesOptions + max_empty_searches: integer + query: QueryDslQueryContainer + query_delay: Time + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size: integer } export interface MlUpdateFilterRequest extends RequestBase { @@ -12503,9 +12869,10 @@ export interface NodesHotThreadsRequest extends RequestBase { ignore_idle_threads?: boolean interval?: Time snapshots?: long + master_timeout?: Time threads?: long - thread_type?: ThreadType timeout?: Time + type?: ThreadType } export interface NodesHotThreadsResponse { @@ -12680,7 +13047,7 @@ export interface NodesInfoNodeInfoSettingsClusterElection { } export interface NodesInfoNodeInfoSettingsHttp { - type: string | NodesInfoNodeInfoSettingsHttpType + type: NodesInfoNodeInfoSettingsHttpType | string 'type.default'?: string compression?: boolean | string port?: integer | string @@ -12738,7 +13105,7 @@ export interface NodesInfoNodeInfoSettingsNode { } export interface NodesInfoNodeInfoSettingsTransport { - type: string | NodesInfoNodeInfoSettingsTransportType + type: NodesInfoNodeInfoSettingsTransportType | string 'type.default'?: string features?: NodesInfoNodeInfoSettingsTransportFeatures } @@ -12859,16 +13226,12 @@ export interface NodesInfoResponse extends NodesNodesResponseBase { nodes: Record } -export interface NodesReloadSecureSettingsNodeReloadException { +export interface NodesReloadSecureSettingsNodeReloadError { name: Name - reload_exception?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy + reload_exception?: ErrorCause } -export interface NodesReloadSecureSettingsNodeReloadExceptionCausedBy { - type: string - reason: string - caused_by?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy -} +export type NodesReloadSecureSettingsNodeReloadResult = NodesStats | NodesReloadSecureSettingsNodeReloadError export interface NodesReloadSecureSettingsRequest extends RequestBase { node_id?: NodeIds @@ -12878,7 +13241,7 @@ export interface NodesReloadSecureSettingsRequest extends RequestBase { export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { cluster_name: Name - nodes: Record + nodes: Record } export interface NodesStatsRequest extends RequestBase { @@ -13209,7 +13572,7 @@ export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: string | string[] | QueryDslQueryContainer + query?: string | string[] allow_restricted_indices?: boolean } @@ -13275,6 +13638,7 @@ export interface SecurityChangePasswordRequest extends RequestBase { username?: Username refresh?: Refresh password?: Password + password_hash?: string } export interface SecurityChangePasswordResponse { @@ -13481,20 +13845,6 @@ export interface SecurityGetPrivilegesRequest extends RequestBase { export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { } -export interface SecurityGetRoleInlineRoleTemplate { - template: SecurityGetRoleInlineRoleTemplateSource - format?: SecurityGetRoleTemplateFormat -} - -export interface SecurityGetRoleInlineRoleTemplateSource { - source: string -} - -export interface SecurityGetRoleInvalidRoleTemplate { - template: string - format?: SecurityGetRoleTemplateFormat -} - export interface SecurityGetRoleRequest extends RequestBase { name?: Name } @@ -13512,15 +13862,9 @@ export interface SecurityGetRoleRole { role_templates?: SecurityGetRoleRoleTemplate[] } -export type SecurityGetRoleRoleTemplate = SecurityGetRoleInlineRoleTemplate | SecurityGetRoleStoredRoleTemplate | SecurityGetRoleInvalidRoleTemplate - -export interface SecurityGetRoleStoredRoleTemplate { - template: SecurityGetRoleStoredRoleTemplateId +export interface SecurityGetRoleRoleTemplate { format?: SecurityGetRoleTemplateFormat -} - -export interface SecurityGetRoleStoredRoleTemplateId { - id: string + template: Script } export type SecurityGetRoleTemplateFormat = 'string' | 'json' @@ -14294,7 +14638,7 @@ export interface SqlTranslateRequest extends RequestBase { export interface SqlTranslateResponse { size: long - _source: boolean | Fields | SearchSourceFilter + _source: SearchSourceConfig fields: Record[] sort: SearchSort } @@ -14314,6 +14658,8 @@ export interface SslCertificatesRequest extends RequestBase { export type SslCertificatesResponse = SslCertificatesCertificateInformation[] +export type TasksGroupBy = 'nodes' | 'parents' | 'none' + export interface TasksInfo { action: string cancellable: boolean @@ -14396,7 +14742,7 @@ export interface TasksGetResponse { export interface TasksListRequest extends RequestBase { actions?: string | string[] detailed?: boolean - group_by?: GroupBy + group_by?: TasksGroupBy nodes?: string[] parent_task_id?: Id timeout?: Time @@ -14406,7 +14752,7 @@ export interface TasksListRequest extends RequestBase { export interface TasksListResponse { node_failures?: ErrorCause[] nodes?: Record - tasks?: Record | TasksInfo[] + tasks?: Record } export interface TextStructureFindStructureFieldStat { @@ -14775,7 +15121,7 @@ export type WatcherConnectionScheme = 'http' | 'https' export type WatcherCronExpression = string export interface WatcherDailySchedule { - at: string[] | WatcherTimeOfDay + at: WatcherTimeOfDay[] } export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' @@ -14833,6 +15179,11 @@ export interface WatcherExecutionThreadPool { queue_size: long } +export interface WatcherHourAndMinute { + hour: integer[] + minute: integer[] +} + export interface WatcherHourlySchedule { minute: integer[] } @@ -14990,8 +15341,8 @@ export interface WatcherScheduleContainer { } export interface WatcherScheduleTriggerEvent { - scheduled_time: DateString | string - triggered_time?: DateString | string + scheduled_time: DateString + triggered_time?: DateString } export interface WatcherScriptCondition { @@ -15073,10 +15424,7 @@ export interface WatcherThrottleState { timestamp: DateString } -export interface WatcherTimeOfDay { - hour: integer[] - minute: integer[] -} +export type WatcherTimeOfDay = string | WatcherHourAndMinute export interface WatcherTimeOfMonth { at: string[] @@ -15217,7 +15565,7 @@ export interface WatcherPutWatchRequest extends RequestBase { id: Id active?: boolean if_primary_term?: long - if_sequence_number?: long + if_seq_no?: SequenceNumber version?: VersionNumber actions?: Record condition?: WatcherConditionContainer @@ -15279,7 +15627,7 @@ export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQue watch_record_id: Id } -export type WatcherStatsWatcherMetric = '_all' | 'queued_watches' | 'current_watches' | 'pending_watches' +export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' export interface WatcherStatsWatcherNodeStats { current_watches?: WatcherStatsWatchRecordStats[] @@ -15397,11 +15745,6 @@ export interface XpackUsageBase { enabled: boolean } -export interface XpackUsageBaseUrlConfig { - url_name: string - url_value: string -} - export interface XpackUsageCcr extends XpackUsageBase { auto_follow_patterns_count: integer follower_indices_count: integer @@ -15514,13 +15857,15 @@ export interface XpackUsageIpFilter { transport: boolean } -export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { - time_range?: string +export interface XpackUsageJobsKeys { + _all?: XpackUsageAllJobs } +export type XpackUsageJobs = XpackUsageJobsKeys +& { [property: string]: MlJob | XpackUsageAllJobs } export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: Record | Partial> + jobs: XpackUsageJobs node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -15728,8 +16073,6 @@ export interface XpackUsageSsl { transport: XpackUsageFeatureToggle } -export type XpackUsageUrlConfig = XpackUsageBaseUrlConfig | XpackUsageKibanaUrlConfig - export interface XpackUsageVector extends XpackUsageBase { dense_vector_dims_avg_count: integer dense_vector_fields_count: integer @@ -15779,7 +16122,6 @@ export interface SpecUtilsCommonQueryParameters { filter_path?: string | string[] human?: boolean pretty?: boolean - source_query_string?: string } export interface SpecUtilsCommonCatQueryParameters { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 3e5780abb..be3b3a16d 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -62,7 +62,7 @@ export interface BulkRequest extends RequestBase { pipeline?: string refresh?: Refresh routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields timeout?: Time @@ -135,7 +135,7 @@ export interface CountRequest extends RequestBase { allow_no_indices?: boolean analyzer?: string analyze_wildcard?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards ignore_throttled?: boolean @@ -198,7 +198,7 @@ export interface DeleteByQueryRequest extends RequestBase { analyzer?: string analyze_wildcard?: boolean conflicts?: Conflicts - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards from?: long @@ -218,7 +218,7 @@ export interface DeleteByQueryRequest extends RequestBase { size?: long slices?: long sort?: string[] - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stats?: string[] @@ -276,7 +276,7 @@ export interface ExistsRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -294,7 +294,7 @@ export interface ExistsSourceRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields version?: VersionNumber @@ -320,12 +320,12 @@ export interface ExplainRequest extends RequestBase { index: IndexName analyzer?: string analyze_wildcard?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string lenient?: boolean preference?: string routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -375,6 +375,19 @@ export interface FieldCapsResponse { fields: Record> } +export interface GetGetResult { + _index: IndexName + fields?: Record + found: boolean + _id: Id + _primary_term?: long + _routing?: string + _seq_no?: SequenceNumber + _source?: TDocument + _type?: Type + _version?: VersionNumber +} + export interface GetRequest extends RequestBase { id: Id index: IndexName @@ -382,7 +395,7 @@ export interface GetRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -390,18 +403,7 @@ export interface GetRequest extends RequestBase { version_type?: VersionType } -export interface GetResponse { - _index: IndexName - fields?: Record - found: boolean - _id: Id - _primary_term?: long - _routing?: string - _seq_no?: SequenceNumber - _source?: TDocument - _type?: Type - _version?: VersionNumber -} +export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { id: Id @@ -457,7 +459,7 @@ export interface GetSourceRequest { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields @@ -504,8 +506,8 @@ export interface KnnSearchRequest extends RequestBase { routing?: Routing /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - _source?: boolean | Fields | SearchSourceFilter - docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] + _source?: SearchSourceConfig + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] stored_fields?: Fields fields?: Fields knn: KnnSearchQuery @@ -530,27 +532,18 @@ export interface KnnSearchQuery { export type KnnSearchQueryVector = double[] -export interface MgetHit { - error?: ErrorCause - fields?: Record - found?: boolean +export interface MgetMultiGetError { + error: ErrorCause _id: Id _index: IndexName - _primary_term?: long - _routing?: Routing - _seq_no?: SequenceNumber - _source?: TDocument _type?: Type - _version?: VersionNumber } -export type MgetMultiGetId = string | integer - export interface MgetOperation { - _id: MgetMultiGetId + _id: Id _index?: IndexName routing?: Routing - _source?: boolean | Fields | SearchSourceFilter + _source?: SearchSourceConfig stored_fields?: Fields _type?: Type version?: VersionNumber @@ -563,33 +556,44 @@ export interface MgetRequest extends RequestBase { realtime?: boolean refresh?: boolean routing?: Routing - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stored_fields?: Fields /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { docs?: MgetOperation[] - ids?: MgetMultiGetId[] + ids?: Ids } } export interface MgetResponse { - docs: MgetHit[] + docs: MgetResponseItem[] } -export interface MsearchBody { +export type MgetResponseItem = GetGetResult | MgetMultiGetError + +export interface MsearchMultiSearchItem extends SearchResponse { + status: integer +} + +export interface MsearchMultiSearchResult { + took: long + responses: MsearchResponseItem[] +} + +export interface MsearchMultisearchBody { aggregations?: Record aggs?: Record query?: QueryDslQueryContainer from?: integer size?: integer pit?: SearchPointInTimeReference - track_total_hits?: boolean | integer - suggest?: SearchSuggestContainer | Record + track_total_hits?: SearchTrackHits + suggest?: SearchSuggester } -export interface MsearchHeader { +export interface MsearchMultisearchHeader { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -614,17 +618,14 @@ export interface MsearchRequest extends RequestBase { rest_total_hits_as_int?: boolean typed_keys?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ - body?: (MsearchHeader | MsearchBody)[] + body?: MsearchRequestItem[] } -export interface MsearchResponse { - took: long - responses: (MsearchSearchResult | ErrorResponseBase)[] -} +export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody -export interface MsearchSearchResult extends SearchResponse { - status: integer -} +export type MsearchResponse = MsearchMultiSearchResult + +export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase export interface MsearchTemplateRequest extends RequestBase { index?: Indices @@ -634,18 +635,18 @@ export interface MsearchTemplateRequest extends RequestBase { rest_total_hits_as_int?: boolean typed_keys?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'search_templates' instead. */ - body?: MsearchTemplateTemplateItem[] + body?: MsearchTemplateRequestItem[] } -export interface MsearchTemplateResponse { - responses: (SearchResponse | ErrorResponseBase)[] - took: long -} +export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig -export interface MsearchTemplateTemplateItem { +export type MsearchTemplateResponse = MsearchMultiSearchResult + +export interface MsearchTemplateTemplateConfig { + explain?: boolean id?: Id - index?: Indices params?: Record + profile?: boolean source?: string } @@ -702,7 +703,7 @@ export interface MtermvectorsTermVectorsResult { export interface OpenPointInTimeRequest extends RequestBase { index: Indices - keep_alive?: Time + keep_alive: Time } export interface OpenPointInTimeResponse { @@ -721,7 +722,7 @@ export interface PutScriptRequest extends RequestBase { timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - script?: StoredScript + script: StoredScript } } @@ -955,7 +956,7 @@ export interface ScriptsPainlessExecuteRequest extends RequestBase { body?: { context?: string context_setup?: ScriptsPainlessExecutePainlessContextSetup - script?: InlineScript + script?: InlineScript | string } } @@ -985,7 +986,7 @@ export interface SearchRequest extends RequestBase { analyze_wildcard?: boolean batched_reduce_size?: long ccs_minimize_roundtrips?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string docvalue_fields?: Fields expand_wildcards?: ExpandWildcards @@ -1009,12 +1010,12 @@ export interface SearchRequest extends RequestBase { suggest_text?: string terminate_after?: long timeout?: Time - track_total_hits?: boolean | integer + track_total_hits?: SearchTrackHits track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean version?: boolean - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields seq_no_primary_term?: boolean @@ -1030,9 +1031,9 @@ export interface SearchRequest extends RequestBase { explain?: boolean from?: integer highlight?: SearchHighlight - track_total_hits?: boolean | integer + track_total_hits?: SearchTrackHits indices_boost?: Record[] - docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -1043,9 +1044,9 @@ export interface SearchRequest extends RequestBase { size?: integer slice?: SlicedScroll sort?: SearchSort - _source?: boolean | Fields | SearchSourceFilter - fields?: (Field | DateField)[] - suggest?: SearchSuggestContainer | Record + _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester terminate_after?: long timeout?: string track_scores?: boolean @@ -1141,6 +1142,8 @@ export interface SearchAggregationProfileDelegateDebugFilter { export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' +export type SearchBuiltinHighlighterType = 'plain' | 'fvh' | 'unified' + export interface SearchCollector { name: string reason: string @@ -1148,6 +1151,14 @@ export interface SearchCollector { children?: SearchCollector[] } +export interface SearchCompletionContext { + boost?: double + context: SearchContext + neighbours?: GeoHashPrecision[] + precision?: GeoHashPrecision + prefix?: boolean +} + export interface SearchCompletionSuggestOption { collate_match?: boolean contexts?: Record @@ -1162,14 +1173,14 @@ export interface SearchCompletionSuggestOption { } export interface SearchCompletionSuggester extends SearchSuggesterBase { - contexts?: Record + contexts?: Record fuzzy?: SearchSuggestFuzziness prefix?: string regex?: string skip_duplicates?: boolean } -export type SearchContext = string | QueryDslGeoLocation +export type SearchContext = string | GeoLocation export interface SearchDirectGenerator { field: Field @@ -1185,11 +1196,6 @@ export interface SearchDirectGenerator { suggest_mode?: SuggestMode } -export interface SearchDocValueField { - field: Field - format?: string -} - export interface SearchFetchProfile { type: string description: string @@ -1213,12 +1219,6 @@ export interface SearchFetchProfileDebug { fast_path?: integer } -export interface SearchFieldAndFormat { - field: Field - format?: string - include_unmapped?: boolean -} - export interface SearchFieldCollapse { field: Field inner_hits?: SearchInnerHits | SearchInnerHits[] @@ -1231,16 +1231,30 @@ export interface SearchFieldSort { nested?: SearchNestedSortValue order?: SearchSortOrder unmapped_type?: MappingFieldType + numeric_type?: SearchFieldSortNumericType + format?: string +} + +export type SearchFieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' + +export interface SearchFieldSuggester { + completion?: SearchCompletionSuggester + phrase?: SearchPhraseSuggester + prefix?: string + regex?: string + term?: SearchTermSuggester + text?: string } export interface SearchGeoDistanceSortKeys { mode?: SearchSortMode distance_type?: GeoDistanceType + ignore_unmapped?: boolean order?: SearchSortOrder unit?: DistanceUnit } export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys -& { [property: string]: QueryDslGeoLocation | QueryDslGeoLocation[] | SearchSortMode | GeoDistanceType | SearchSortOrder | DistanceUnit } +& { [property: string]: GeoLocation | GeoLocation[] | SearchSortMode | GeoDistanceType | boolean | SearchSortOrder | DistanceUnit } export interface SearchHighlight { fields: Record @@ -1286,7 +1300,7 @@ export interface SearchHighlightField { pre_tags?: string[] require_field_match?: boolean tags_schema?: SearchHighlighterTagsSchema - type?: SearchHighlighterType | string + type?: SearchHighlighterType } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1297,7 +1311,7 @@ export type SearchHighlighterOrder = 'score' export type SearchHighlighterTagsSchema = 'styled' -export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' +export type SearchHighlighterType = SearchBuiltinHighlighterType | string export interface SearchHit { _index: IndexName @@ -1332,7 +1346,7 @@ export interface SearchInnerHits { size?: integer from?: integer collapse?: SearchFieldCollapse - docvalue_fields?: (SearchFieldAndFormat | Field)[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] explain?: boolean highlight?: SearchHighlight ignore_unmapped?: boolean @@ -1340,20 +1354,14 @@ export interface SearchInnerHits { seq_no_primary_term?: boolean fields?: Fields sort?: SearchSort - _source?: boolean | SearchSourceFilter + _source?: SearchSourceConfig stored_field?: Fields track_scores?: boolean version?: boolean } -export interface SearchInnerHitsMetadata { - total: SearchTotalHits | long - hits: SearchHit>[] - max_score?: double -} - export interface SearchInnerHitsResult { - hits: SearchInnerHitsMetadata + hits: SearchHitsMetadata } export interface SearchLaplaceSmoothingModel { @@ -1375,6 +1383,7 @@ export interface SearchNestedIdentity { export interface SearchNestedSortValue { filter?: QueryDslQueryContainer max_children?: integer + nested?: SearchNestedSortValue path: Field } @@ -1469,16 +1478,19 @@ export interface SearchRescoreQuery { export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' export interface SearchScoreSort { - mode?: SearchSortMode order?: SearchSortOrder } export interface SearchScriptSort { order?: SearchSortOrder script: Script - type?: string + type?: SearchScriptSortType + mode?: SearchSortMode + nested?: SearchNestedSortValue } +export type SearchScriptSortType = 'string' | 'number' + export interface SearchSearchProfile { collector: SearchCollector[] query: SearchQueryProfile[] @@ -1500,27 +1512,31 @@ export interface SearchSmoothingModelContainer { export type SearchSort = SearchSortCombinations | SearchSortCombinations[] -export type SearchSortCombinations = Field | SearchSortContainer | SearchSortOrder +export type SearchSortCombinations = Field | SearchSortOptions + +export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' -export interface SearchSortContainerKeys { +export interface SearchSortOptionsKeys { _score?: SearchScoreSort _doc?: SearchScoreSort _geo_distance?: SearchGeoDistanceSort _script?: SearchScriptSort } -export type SearchSortContainer = SearchSortContainerKeys +export type SearchSortOptions = SearchSortOptionsKeys & { [property: string]: SearchFieldSort | SearchSortOrder | SearchScoreSort | SearchGeoDistanceSort | SearchScriptSort } -export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' - -export type SearchSortOrder = 'asc' | 'desc' | '_doc' +export type SearchSortOrder = 'asc' | 'desc' export type SearchSortResults = (long | double | string | null)[] +export type SearchSourceConfig = boolean | SearchSourceFilter | Fields + +export type SearchSourceConfigParam = boolean | Fields + export interface SearchSourceFilter { excludes?: Fields - includes?: Fields exclude?: Fields + includes?: Fields include?: Fields } @@ -1537,23 +1553,6 @@ export interface SearchSuggest { text: string } -export interface SearchSuggestContainer { - completion?: SearchCompletionSuggester - phrase?: SearchPhraseSuggester - prefix?: string - regex?: string - term?: SearchTermSuggester - text?: string -} - -export interface SearchSuggestContextQuery { - boost?: double - context: SearchContext - neighbours?: Distance[] | integer[] - precision?: Distance | integer - prefix?: boolean -} - export interface SearchSuggestFuzziness { fuzziness: Fuzziness min_length: integer @@ -1566,6 +1565,12 @@ export type SearchSuggestOption = SearchCompletionSuggestOp export type SearchSuggestSort = 'score' | 'frequency' +export interface SearchSuggesterKeys { + text?: string +} +export type SearchSuggester = SearchSuggesterKeys +& { [property: string]: SearchFieldSuggester | string } + export interface SearchSuggesterBase { field: Field analyzer?: string @@ -1600,6 +1605,8 @@ export interface SearchTotalHits { export type SearchTotalHitsRelation = 'eq' | 'gte' +export type SearchTrackHits = boolean | integer + export interface SearchMvtRequest extends RequestBase { index: Indices field: Field @@ -1788,7 +1795,7 @@ export interface UpdateRequest routing?: Routing timeout?: Time wait_for_active_shards?: WaitForActiveShards - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -1798,7 +1805,7 @@ export interface UpdateRequest doc_as_upsert?: boolean script?: Script scripted_upsert?: boolean - _source?: boolean | SearchSourceFilter + _source?: SearchSourceConfig upsert?: TDocument } } @@ -1813,7 +1820,7 @@ export interface UpdateByQueryRequest extends RequestBase { analyzer?: string analyze_wildcard?: boolean conflicts?: Conflicts - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards from?: long @@ -1832,7 +1839,7 @@ export interface UpdateByQueryRequest extends RequestBase { size?: long slices?: long sort?: string[] - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields stats?: string[] @@ -1897,6 +1904,8 @@ export interface AcknowledgedResponseBase { export type AggregateName = string +export type BuiltinScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' + export interface BulkIndexByScrollFailure { cause: ErrorCause id: Id @@ -1941,16 +1950,17 @@ export interface CompletionStats { export type Conflicts = 'abort' | 'proceed' +export interface CoordsGeoBounds { + top: double + bottom: double + left: double + right: double +} + export type DataStreamName = string export type DataStreamNames = DataStreamName | DataStreamName[] -export interface DateField { - field: Field - format?: string - include_unmapped?: boolean -} - export type DateFormat = string export type DateMath = string @@ -1959,8 +1969,6 @@ export type DateMathTime = string export type DateString = string -export type DefaultOperator = 'AND' | 'OR' - export interface DictionaryResponseBase { [key: string]: TValue } @@ -2007,9 +2015,9 @@ export interface ErrorResponseBase { status: integer } -export type ExpandWildcardOptions = 'all' | 'open' | 'closed' | 'hidden' | 'none' +export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' -export type ExpandWildcards = ExpandWildcardOptions | ExpandWildcardOptions[] | string +export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] export type Field = string @@ -2023,6 +2031,8 @@ export interface FieldSizeUsage { size_in_bytes: long } +export type FieldValue = long | double | string | boolean + export interface FielddataStats { evictions?: long memory_size?: ByteSize @@ -2041,14 +2051,31 @@ export interface FlushStats { export type Fuzziness = string | integer +export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds + export type GeoDistanceType = 'arc' | 'plane' -export type GeoHashPrecision = number +export type GeoHash = string + +export interface GeoHashLocation { + geohash: GeoHash +} + +export type GeoHashPrecision = number | string + +export interface GeoLine { + type: string + coordinates: double[][] +} + +export type GeoLocation = LatLonGeoLocation | GeoHashLocation | double[] | string export type GeoShape = any export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' +export type GeoTile = string + export type GeoTilePrecision = number export interface GetStats { @@ -2064,9 +2091,7 @@ export interface GetStats { total: long } -export type GroupBy = 'nodes' | 'parents' | 'none' - -export type Health = 'green' | 'yellow' | 'red' +export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' export type Host = string @@ -2084,10 +2109,6 @@ export type IndexPattern = string export type IndexPatterns = IndexPattern[] -export interface IndexedScript extends ScriptBase { - id: Id -} - export interface IndexingStats { index_current: long delete_current: long @@ -2123,12 +2144,14 @@ export type InlineGet = InlineGetKeys & { [property: string]: any } export interface InlineScript extends ScriptBase { + lang?: ScriptLanguage + options?: Record source: string } export type Ip = string -export interface LatLon { +export interface LatLonGeoLocation { lat: double lon: double } @@ -2250,9 +2273,7 @@ export interface RecoveryStats { throttle_time_in_millis: long } -export type Refresh = boolean | RefreshOptions - -export type RefreshOptions = 'wait_for' +export type Refresh = boolean | 'true' | 'false' | 'wait_for' export interface RefreshStats { external_total: long @@ -2285,10 +2306,9 @@ export interface Retries { export type Routing = string -export type Script = InlineScript | IndexedScript | string +export type Script = InlineScript | string | StoredScriptId export interface ScriptBase { - lang?: ScriptLanguage | string params?: Record } @@ -2297,7 +2317,7 @@ export interface ScriptField { ignore_failure?: boolean } -export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' +export type ScriptLanguage = BuiltinScriptLanguage | string export interface ScriptTransform { lang: string @@ -2361,8 +2381,6 @@ export type SequenceNumber = long export type Service = string -export type ShapeRelation = 'intersects' | 'disjoint' | 'within' - export interface ShardFailure { index?: IndexName node?: string @@ -2383,8 +2401,6 @@ export interface ShardsOperationResponseBase { _shards: ShardStatistics } -export type Size = 'Raw' | 'k' | 'm' | 'g' | 't' | 'p' - export interface SlicedScroll { field?: Field id: integer @@ -2401,10 +2417,15 @@ export interface StoreStats { } export interface StoredScript { - lang?: ScriptLanguage | string + lang: ScriptLanguage + options?: Record source: string } +export interface StoredScriptId extends ScriptBase { + id: Id +} + export type SuggestMode = 'missing' | 'popular' | 'always' export type SuggestionName = string @@ -2423,6 +2444,16 @@ export type TimeZone = string export type Timestamp = string +export interface TopLeftBottomRightGeoBounds { + top_left: GeoLocation + bottom_right: GeoLocation +} + +export interface TopRightBottomLeftGeoBounds { + top_right: GeoLocation + bottom_left: GeoLocation +} + export interface Transform { } @@ -2464,8 +2495,6 @@ export type WaitForActiveShards = integer | WaitForActiveShardOptions export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' -export type WaitForStatus = 'green' | 'yellow' | 'red' - export interface WarmerStats { current: long total: long @@ -2473,6 +2502,10 @@ export interface WarmerStats { total_time_in_millis: long } +export interface WktGeoBounds { + wkt: string +} + export interface WriteResponseBase { _id: Id _index: IndexName @@ -2501,11 +2534,19 @@ export type uint = number export type ulong = number +export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { filters?: Record } -export type AggregationsAggregate = AggregationsSingleBucketAggregate | AggregationsAutoDateHistogramAggregate | AggregationsFiltersAggregate | AggregationsSignificantTermsAggregate | AggregationsTermsAggregate | AggregationsBucketAggregate | AggregationsCompositeBucketAggregate | AggregationsMultiBucketAggregate | AggregationsMatrixStatsAggregate | AggregationsKeyedValueAggregate | AggregationsMetricAggregate +export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { +} +export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys +& { [property: string]: AggregationsAggregate | long } + +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsChildrenAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { meta?: Record @@ -2517,10 +2558,10 @@ export interface AggregationsAggregation { } export interface AggregationsAggregationContainer { + aggregations?: Record aggs?: Record meta?: Record adjacency_matrix?: AggregationsAdjacencyMatrixAggregation - aggregations?: Record auto_date_histogram?: AggregationsAutoDateHistogramAggregation avg?: AggregationsAverageAggregation avg_bucket?: AggregationsAverageBucketAggregation @@ -2598,7 +2639,13 @@ export interface AggregationsAggregationRange { to?: double | string } -export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregate> { +export interface AggregationsArrayPercentilesItem { + key: string + value: double | null + value_as_string?: string +} + +export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { interval: DateMathTime } @@ -2620,32 +2667,35 @@ export interface AggregationsAverageAggregation extends AggregationsFormatMetric export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsAvgAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { min: double max: double q1: double q2: double q3: double + lower: double + upper: double + min_as_string?: string + max_as_string?: string + q1_as_string?: string + q2_as_string?: string + q3_as_string?: string + lower_as_string?: string + upper_as_string?: string } export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { compression?: double } -export type AggregationsBucket = AggregationsCompositeBucket | AggregationsDateHistogramBucket | AggregationsFiltersBucketItem | AggregationsIpRangeBucket | AggregationsRangeBucket | AggregationsRareTermsBucket | AggregationsSignificantTermsBucket | AggregationsKeyedBucket - -export interface AggregationsBucketAggregate extends AggregationsAggregateBase { - after_key: Record - bg_count: long - doc_count: long - doc_count_error_upper_bound: long - sum_other_doc_count: long - interval: DateMathTime - items: AggregationsBucket +export interface AggregationsBucketAggregationBase extends AggregationsAggregation { } -export interface AggregationsBucketAggregationBase extends AggregationsAggregation { - aggregations?: Record +export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { + keys: string[] } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { @@ -2663,6 +2713,16 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati sort?: SearchSort } +export type AggregationsBuckets = Record | TBucket[] + +export type AggregationsBucketsPath = string | string[] | Record + +export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1Y' + +export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { + value: long +} + export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { precision_threshold?: integer rehash?: boolean @@ -2673,6 +2733,14 @@ export interface AggregationsChiSquareHeuristic { include_negatives: boolean } +export interface AggregationsChildrenAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsChildrenAggregateBucketKeys extends AggregationsMultiBucketBase { +} +export type AggregationsChildrenAggregateBucket = AggregationsChildrenAggregateBucketKeys +& { [property: string]: AggregationsAggregate | long } + export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { type?: RelationName } @@ -2685,6 +2753,10 @@ export interface AggregationsClassificationInferenceOptions { top_classes_results_field?: string } +export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { + after_key?: Record +} + export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { after?: Record size?: integer @@ -2698,13 +2770,15 @@ export interface AggregationsCompositeAggregationSource { geotile_grid?: AggregationsGeoTileGridAggregation } -export interface AggregationsCompositeBucketKeys { +export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { + key: Record } export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | Record | long } -export interface AggregationsCompositeBucketAggregate extends AggregationsMultiBucketAggregate> { - after_key: Record +export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { + value: long + value_as_string?: string } export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { @@ -2713,14 +2787,17 @@ export interface AggregationsCumulativeCardinalityAggregation extends Aggregatio export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { - calendar_interval?: AggregationsDateInterval | Time - extended_bounds?: AggregationsExtendedBounds - hard_bounds?: AggregationsExtendedBounds + calendar_interval?: AggregationsCalendarInterval + extended_bounds?: AggregationsExtendedBounds + hard_bounds?: AggregationsExtendedBounds field?: Field - fixed_interval?: AggregationsDateInterval | Time + fixed_interval?: Time format?: string - interval?: AggregationsDateInterval | Time + interval?: Time min_doc_count?: integer missing?: DateString offset?: Time @@ -2731,12 +2808,15 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket keyed?: boolean } -export interface AggregationsDateHistogramBucketKeys { +export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { + key_as_string?: string + key: EpochMillis } export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | string | EpochMillis | long } -export type AggregationsDateInterval = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'quarter' | 'year' +export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { +} export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { field?: Field @@ -2748,12 +2828,14 @@ export interface AggregationsDateRangeAggregation extends AggregationsBucketAggr } export interface AggregationsDateRangeExpression { - from?: DateMath | float - from_as_string?: string - to_as_string?: string + from?: AggregationsFieldDateMath key?: string - to?: DateMath | float - doc_count?: long + to?: AggregationsFieldDateMath +} + +export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { + normalized_value?: double + normalized_value_as_string?: string } export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { @@ -2767,49 +2849,74 @@ export interface AggregationsDiversifiedSamplerAggregation extends AggregationsB field?: Field } +export interface AggregationsDoubleTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsDoubleTermsBucketKeys extends AggregationsTermsBucketBase { + key: double + key_as_string?: string +} +export type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys +& { [property: string]: AggregationsAggregate | double | string | long } + export interface AggregationsEwmaModelSettings { alpha?: float } +export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'ewma' + settings: AggregationsEwmaModelSettings +} + export interface AggregationsExtendedBounds { max: T min: T } export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { - std_deviation_bounds: AggregationsStandardDeviationBounds - sum_of_squares?: double - variance?: double - variance_population?: double - variance_sampling?: double - std_deviation?: double - std_deviation_population?: double - std_deviation_sampling?: double + sum_of_squares: double | null + variance: double | null + variance_population: double | null + variance_sampling: double | null + std_deviation: double | null + std_deviation_bounds?: AggregationsStandardDeviationBounds + sum_of_squares_as_string?: string + variance_as_string?: string + variance_population_as_string?: string + variance_sampling_as_string?: string + std_deviation_as_string?: string + std_deviation_bounds_as_string?: AggregationsStandardDeviationBoundsAsString } export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { sigma?: double } +export interface AggregationsExtendedStatsBucketAggregate extends AggregationsExtendedStatsAggregate { +} + export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { sigma?: double } -export interface AggregationsFiltersAggregate extends AggregationsAggregateBase { - buckets: AggregationsFiltersBucketItem[] | Record +export type AggregationsFieldDateMath = DateMath | double + +export interface AggregationsFilterAggregate extends AggregationsSingleBucketAggregateBase { +} + +export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: Record | QueryDslQueryContainer[] + filters?: AggregationsBuckets other_bucket?: boolean other_bucket_key?: string keyed?: boolean } -export interface AggregationsFiltersBucketItemKeys { - doc_count: long +export interface AggregationsFiltersBucketKeys extends AggregationsMultiBucketBase { } -export type AggregationsFiltersBucketItem = AggregationsFiltersBucketItemKeys +export type AggregationsFiltersBucket = AggregationsFiltersBucketKeys & { [property: string]: AggregationsAggregate | long } export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { @@ -2822,13 +2929,8 @@ export interface AggregationsFormattableMetricAggregation extends AggregationsMe export type AggregationsGapPolicy = 'skip' | 'insert_zeros' -export interface AggregationsGeoBounds { - bottom_right: LatLon - top_left: LatLon -} - export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { - bounds: AggregationsGeoBounds + bounds: GeoBounds } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { @@ -2837,34 +2939,45 @@ export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggr export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { count: long - location: QueryDslGeoLocation + location?: GeoLocation } export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { count?: long - location?: QueryDslGeoLocation + location?: GeoLocation +} + +export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggregate { } export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { distance_type?: GeoDistanceType field?: Field - origin?: QueryDslGeoLocation | string + origin?: GeoLocation ranges?: AggregationsAggregationRange[] unit?: DistanceUnit } +export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { - bounds?: QueryDslBoundingBox + bounds?: GeoBounds field?: Field precision?: GeoHashPrecision shard_size?: integer size?: integer } +export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoHash +} +export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoHash | long } + export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { type: string - geometry: AggregationsLineStringGeoShape - properties: AggregationsGeoLineProperties + geometry: GeoLine } export interface AggregationsGeoLineAggregation { @@ -2879,21 +2992,28 @@ export interface AggregationsGeoLinePoint { field: Field } -export interface AggregationsGeoLineProperties { - complete: boolean - sort_values: double[] -} - export interface AggregationsGeoLineSort { field: Field } +export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { field?: Field precision?: GeoTilePrecision shard_size?: integer size?: integer - bounds?: AggregationsGeoBounds + bounds?: GeoBounds +} + +export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoTile +} +export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoTile | long } + +export interface AggregationsGlobalAggregate extends AggregationsSingleBucketAggregateBase { } export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { @@ -2907,13 +3027,13 @@ export interface AggregationsHdrMethod { number_of_significant_value_digits?: integer } -export interface AggregationsHdrPercentileItem { - key: double - value: double +export interface AggregationsHdrPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { } -export interface AggregationsHdrPercentilesAggregate extends AggregationsAggregateBase { - values: AggregationsHdrPercentileItem[] +export interface AggregationsHdrPercentilesAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsHistogramAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { @@ -2930,6 +3050,13 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr keyed?: boolean } +export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucketBase { + key_as_string?: string + key: double +} +export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys +& { [property: string]: AggregationsAggregate | string | double | long } + export interface AggregationsHistogramOrder { _count?: SearchSortOrder _key?: SearchSortOrder @@ -2940,6 +3067,11 @@ export interface AggregationsHoltLinearModelSettings { beta?: float } +export interface AggregationsHoltMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'holt' + settings: AggregationsHoltLinearModelSettings +} + export interface AggregationsHoltWintersModelSettings { alpha?: float beta?: float @@ -2949,18 +3081,52 @@ export interface AggregationsHoltWintersModelSettings { type?: AggregationsHoltWintersType } +export interface AggregationsHoltWintersMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'holt_winters' + settings: AggregationsHoltWintersModelSettings +} + export type AggregationsHoltWintersType = 'add' | 'mult' +export interface AggregationsInferenceAggregateKeys extends AggregationsAggregateBase { + value?: FieldValue + feature_importance?: AggregationsInferenceFeatureImportance[] + top_classes?: AggregationsInferenceTopClassEntry[] + warning?: string +} +export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys +& { [property: string]: any } + export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { model_id: Name inference_config?: AggregationsInferenceConfigContainer } +export interface AggregationsInferenceClassImportance { + class_name: string + importance: double +} + export interface AggregationsInferenceConfigContainer { regression?: AggregationsRegressionInferenceOptions classification?: AggregationsClassificationInferenceOptions } +export interface AggregationsInferenceFeatureImportance { + feature_name: string + importance?: double + classes?: AggregationsInferenceClassImportance[] +} + +export interface AggregationsInferenceTopClassEntry { + class_name: FieldValue + class_probability: double + class_score: double +} + +export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { field?: Field ranges?: AggregationsIpRangeAggregationRange[] @@ -2972,26 +3138,39 @@ export interface AggregationsIpRangeAggregationRange { to?: string } -export interface AggregationsIpRangeBucketKeys { +export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { + from?: string + to?: string } export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | string | long } -export interface AggregationsKeyedBucketKeys { - doc_count: long - key: TKey - key_as_string: string +export type AggregationsKeyedPercentiles = Record + +export interface AggregationsLinearMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'linear' + settings: EmptyObject } -export type AggregationsKeyedBucket = AggregationsKeyedBucketKeys -& { [property: string]: AggregationsAggregate | long | TKey | string } -export interface AggregationsKeyedValueAggregate extends AggregationsValueAggregate { - keys: string[] +export interface AggregationsLongRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsLongRareTermsBucketKeys extends AggregationsMultiBucketBase { + key: long + key_as_string?: string +} +export type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys +& { [property: string]: AggregationsAggregate | long | string } + +export interface AggregationsLongTermsAggregate extends AggregationsTermsAggregateBase { } -export interface AggregationsLineStringGeoShape { - coordinates: QueryDslGeoCoordinate[] +export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { + key: string + key_as_string?: string } +export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | long } export interface AggregationsMatrixAggregation extends AggregationsAggregation { fields?: Fields @@ -2999,40 +3178,52 @@ export interface AggregationsMatrixAggregation extends AggregationsAggregation { } export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { - correlation: Record - covariance: Record - count: integer - kurtosis: double - mean: double - skewness: double - variance: double - name: string + doc_count: long + fields: AggregationsMatrixStatsFields[] } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { mode?: AggregationsMatrixStatsMode } +export interface AggregationsMatrixStatsFields { + name: Field + count: long + mean: double + variance: double + skewness: double + kurtosis: double + covariance: Record + correlation: Record +} + export type AggregationsMatrixStatsMode = 'avg' | 'min' | 'max' | 'sum' | 'median' +export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { } export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsMedianAbsoluteDeviationAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { compression?: double } -export type AggregationsMetricAggregate = AggregationsValueAggregate | AggregationsBoxPlotAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsGeoLineAggregate | AggregationsPercentilesAggregate | AggregationsScriptedMetricAggregate | AggregationsStatsAggregate | AggregationsStringStatsAggregate | AggregationsTopHitsAggregate | AggregationsTopMetricsAggregate | AggregationsExtendedStatsAggregate | AggregationsTDigestPercentilesAggregate | AggregationsHdrPercentilesAggregate - export interface AggregationsMetricAggregationBase { field?: Field missing?: AggregationsMissing script?: Script } +export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { } @@ -3043,6 +3234,9 @@ export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | export type AggregationsMissing = string | integer | double | boolean +export interface AggregationsMissingAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { field?: Field missing?: AggregationsMissing @@ -3050,18 +3244,14 @@ export interface AggregationsMissingAggregation extends AggregationsBucketAggreg export type AggregationsMissingOrder = 'first' | 'last' | 'default' -export interface AggregationsMovingAverageAggregation extends AggregationsPipelineAggregationBase { +export type AggregationsMovingAverageAggregation = AggregationsLinearMovingAverageAggregation | AggregationsSimpleMovingAverageAggregation | AggregationsEwmaMovingAverageAggregation | AggregationsHoltMovingAverageAggregation | AggregationsHoltWintersMovingAverageAggregation + +export interface AggregationsMovingAverageAggregationBase extends AggregationsPipelineAggregationBase { minimize?: boolean - model?: AggregationsMovingAverageModel - settings: AggregationsMovingAverageSettings predict?: integer window?: integer } -export type AggregationsMovingAverageModel = 'linear' | 'simple' | 'ewma' | 'holt' | 'holt_winters' - -export type AggregationsMovingAverageSettings = AggregationsEwmaModelSettings | AggregationsHoltLinearModelSettings | AggregationsHoltWintersModelSettings - export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { script?: string shift?: integer @@ -3074,23 +3264,41 @@ export interface AggregationsMovingPercentilesAggregation extends AggregationsPi keyed?: boolean } -export interface AggregationsMultiBucketAggregate extends AggregationsAggregateBase { - buckets: TBucket[] +export interface AggregationsMultiBucketAggregateBase extends AggregationsAggregateBase { + buckets: AggregationsBuckets +} + +export interface AggregationsMultiBucketBase { + doc_count: long } export interface AggregationsMultiTermLookup { field: Field } +export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { +} + export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { terms: AggregationsMultiTermLookup[] } +export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { + key: (long | double | string)[] + key_as_string?: string + doc_count_error_upper_bound?: long +} +export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys +& { [property: string]: AggregationsAggregate | (long | double | string)[] | string | long } + export interface AggregationsMutualInformationHeuristic { background_is_superset?: boolean include_negatives?: boolean } +export interface AggregationsNestedAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { path?: Field } @@ -3108,11 +3316,6 @@ export interface AggregationsParentAggregation extends AggregationsBucketAggrega export interface AggregationsPercentageScoreHeuristic { } -export interface AggregationsPercentileItem { - percentile: double - value: double -} - export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { keyed?: boolean values?: double[] @@ -3120,8 +3323,10 @@ export interface AggregationsPercentileRanksAggregation extends AggregationsForm tdigest?: AggregationsTDigest } -export interface AggregationsPercentilesAggregate extends AggregationsAggregateBase { - items: AggregationsPercentileItem[] +export type AggregationsPercentiles = AggregationsKeyedPercentiles | AggregationsArrayPercentilesItem[] + +export interface AggregationsPercentilesAggregateBase extends AggregationsAggregateBase { + values: AggregationsPercentiles } export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { @@ -3131,16 +3336,22 @@ export interface AggregationsPercentilesAggregation extends AggregationsFormatMe tdigest?: AggregationsTDigest } +export interface AggregationsPercentilesBucketAggregate extends AggregationsPercentilesAggregateBase { +} + export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { percents?: double[] } export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { - buckets_path?: string | string[] | Record + buckets_path?: AggregationsBucketsPath format?: string gap_policy?: AggregationsGapPolicy } +export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { field?: Field missing?: integer @@ -3149,28 +3360,32 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat keyed?: boolean } -export interface AggregationsRangeBucketKeys { +export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { + from?: double + to?: double + from_as_string?: string + to_as_string?: string } export type AggregationsRangeBucket = AggregationsRangeBucketKeys -& { [property: string]: AggregationsAggregate } +& { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { - exclude?: string | string[] + exclude?: AggregationsTermsExclude field?: Field - include?: string | string[] | AggregationsTermsInclude + include?: AggregationsTermsInclude max_doc_count?: long missing?: AggregationsMissing precision?: double value_type?: string } -export interface AggregationsRareTermsBucketKeys { +export interface AggregationsRateAggregate extends AggregationsAggregateBase { + value: double + value_as_string?: string } -export type AggregationsRareTermsBucket = AggregationsRareTermsBucketKeys -& { [property: string]: AggregationsAggregate } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { - unit?: AggregationsDateInterval + unit?: AggregationsCalendarInterval mode?: AggregationsRateMode } @@ -3181,10 +3396,16 @@ export interface AggregationsRegressionInferenceOptions { num_top_feature_importance_values?: integer } +export interface AggregationsReverseNestedAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { path?: Field } +export interface AggregationsSamplerAggregate extends AggregationsSingleBucketAggregateBase { +} + export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { shard_size?: integer } @@ -3211,15 +3432,29 @@ export interface AggregationsSerialDifferencingAggregation extends AggregationsP lag?: integer } -export interface AggregationsSignificantTermsAggregate extends AggregationsMultiBucketAggregate { - bg_count: long - doc_count: long +export interface AggregationsSignificantLongTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { + key: long + key_as_string?: string +} +export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys +& { [property: string]: AggregationsAggregate | long | string | double } + +export interface AggregationsSignificantStringTermsAggregate extends AggregationsMultiBucketAggregateBase { } +export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { + key: string +} +export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | double | long } + export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer chi_square?: AggregationsChiSquareHeuristic - exclude?: string | string[] + exclude?: AggregationsTermsExclude execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field gnd?: AggregationsGoogleNormalizedDistanceHeuristic @@ -3233,15 +3468,15 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc size?: integer } -export interface AggregationsSignificantTermsBucketKeys { +export interface AggregationsSignificantTermsBucketBase extends AggregationsMultiBucketBase { + score: double + bg_count: long } -export type AggregationsSignificantTermsBucket = AggregationsSignificantTermsBucketKeys -& { [property: string]: AggregationsAggregate } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer chi_square?: AggregationsChiSquareHeuristic - exclude?: string | string[] + exclude?: AggregationsTermsExclude execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field filter_duplicate_text?: boolean @@ -3257,48 +3492,99 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck source_fields?: Fields } -export interface AggregationsSingleBucketAggregateKeys extends AggregationsAggregateBase { - doc_count: double +export interface AggregationsSimpleMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'simple' + settings: EmptyObject +} + +export interface AggregationsSimpleValueAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsSingleBucketAggregateBase extends AggregationsAggregateBase { + doc_count: long +} + +export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { + value: double | null + value_as_string?: string } -export type AggregationsSingleBucketAggregate = AggregationsSingleBucketAggregateKeys -& { [property: string]: AggregationsAggregate | double | Record } export interface AggregationsStandardDeviationBounds { - lower?: double - upper?: double - lower_population?: double - upper_population?: double - lower_sampling?: double - upper_sampling?: double + upper: double | null + lower: double | null + upper_population: double | null + lower_population: double | null + upper_sampling: double | null + lower_sampling: double | null +} + +export interface AggregationsStandardDeviationBoundsAsString { + upper: string + lower: string + upper_population: string + lower_population: string + upper_sampling: string + lower_sampling: string } export interface AggregationsStatsAggregate extends AggregationsAggregateBase { - count: double + count: long + min: double | null + max: double | null + avg: double | null sum: double - avg?: double - max?: double - min?: double + min_as_string?: string + max_as_string?: string + avg_as_string?: string + sum_as_string?: string } export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { } +export interface AggregationsStatsBucketAggregate extends AggregationsStatsAggregate { +} + export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsStringRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsStringRareTermsBucketKeys extends AggregationsMultiBucketBase { + key: string +} +export type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { count: long - min_length: integer - max_length: integer - avg_length: double - entropy: double - distribution?: Record + min_length: integer | null + max_length: integer | null + avg_length: double | null + entropy: double | null + distribution?: string | null + min_length_as_string?: string + max_length_as_string?: string + avg_length_as_string?: string } export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { show_distribution?: boolean } +export interface AggregationsStringTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { + key: string +} +export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + +export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { +} + export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { } @@ -3309,8 +3595,15 @@ export interface AggregationsTDigest { compression?: integer } -export interface AggregationsTDigestPercentilesAggregate extends AggregationsAggregateBase { - values: Record +export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsTDigestPercentilesAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsTTestAggregate extends AggregationsAggregateBase { + value: double | null + value_as_string?: string } export interface AggregationsTTestAggregation extends AggregationsAggregation { @@ -3321,17 +3614,17 @@ export interface AggregationsTTestAggregation extends AggregationsAggregation { export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' -export interface AggregationsTermsAggregate extends AggregationsMultiBucketAggregate { - doc_count_error_upper_bound: long +export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { + doc_count_error_upper_bound?: long sum_other_doc_count: long } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { collect_mode?: AggregationsTermsAggregationCollectMode - exclude?: string | string[] + exclude?: AggregationsTermsExclude execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field - include?: string | string[] | AggregationsTermsInclude + include?: AggregationsTermsInclude min_doc_count?: integer missing?: AggregationsMissing missing_order?: AggregationsMissingOrder @@ -3348,9 +3641,17 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' -export type AggregationsTermsAggregationOrder = SearchSortOrder | Record | Record[] +export type AggregationsTermsAggregationOrder = Record | Record[] + +export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { + doc_count_error?: long +} + +export type AggregationsTermsExclude = string | string[] + +export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition -export interface AggregationsTermsInclude { +export interface AggregationsTermsPartition { num_partitions: long partition: long } @@ -3362,7 +3663,7 @@ export interface AggregationsTestPopulation { } export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { - hits: SearchHitsMetadata> + hits: SearchHitsMetadata } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { @@ -3373,7 +3674,7 @@ export interface AggregationsTopHitsAggregation extends AggregationsMetricAggreg script_fields?: Record size?: integer sort?: SearchSort - _source?: boolean | SearchSourceFilter | Fields + _source?: SearchSourceConfig stored_fields?: Fields track_scores?: boolean version?: boolean @@ -3381,12 +3682,11 @@ export interface AggregationsTopHitsAggregation extends AggregationsMetricAggreg } export interface AggregationsTopMetrics { - sort: (long | double | string)[] - metrics: Record + sort: (FieldValue | null)[] + metrics: Record } -export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { - top: AggregationsTopMetrics[] +export interface AggregationsTopMetricsAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { @@ -3395,13 +3695,26 @@ export interface AggregationsTopMetricsAggregation extends AggregationsMetricAgg sort?: SearchSort } +export interface AggregationsTopMetricsBucketKeys extends AggregationsMultiBucketBase { + top: AggregationsTopMetrics[] +} +export type AggregationsTopMetricsBucket = AggregationsTopMetricsBucketKeys +& { [property: string]: AggregationsAggregate | AggregationsTopMetrics[] | long } + export interface AggregationsTopMetricsValue { field: Field } -export interface AggregationsValueAggregate extends AggregationsAggregateBase { - value: double - value_as_string?: string +export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { @@ -3409,6 +3722,9 @@ export interface AggregationsValueCountAggregation extends AggregationsFormattab export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' +export interface AggregationsVariableWidthHistogramAggregate extends AggregationsMultiBucketAggregateBase { +} + export interface AggregationsVariableWidthHistogramAggregation { field?: Field buckets?: integer @@ -3416,6 +3732,17 @@ export interface AggregationsVariableWidthHistogramAggregation { initial_buffer?: integer } +export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { + min: double + key: double + max: double + min_as_string?: string + key_as_string?: string + max_as_string?: string +} +export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys +& { [property: string]: AggregationsAggregate | double | string | long } + export interface AggregationsWeightedAverageAggregation extends AggregationsAggregation { format?: string value?: AggregationsWeightedAverageValue @@ -3429,6 +3756,9 @@ export interface AggregationsWeightedAverageValue { script?: Script } +export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { +} + export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { @@ -3436,12 +3766,14 @@ export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase preserve_original: boolean } -export type AnalysisCharFilter = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter +export type AnalysisCharFilter = string | AnalysisCharFilterDefinition export interface AnalysisCharFilterBase { version?: VersionString } +export type AnalysisCharFilterDefinition = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter + export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { type: 'char_group' tokenize_on_chars: string[] @@ -3965,18 +4297,22 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' -export type AnalysisTokenFilter = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenizer = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer +export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter + +export type AnalysisTokenizer = string | AnalysisTokenizerDefinition export interface AnalysisTokenizerBase { version?: VersionString } +export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer + export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { type: 'trim' } @@ -4161,7 +4497,7 @@ export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { type: 'double_range' } -export type MappingDynamicMapping = 'strict' | 'runtime' | 'true' | 'false' +export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' export interface MappingDynamicTemplate { mapping?: MappingProperty @@ -4231,7 +4567,7 @@ export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'cc export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean ignore_z_value?: boolean - null_value?: QueryDslGeoLocation + null_value?: GeoLocation type: 'geo_point' } @@ -4357,7 +4693,7 @@ export interface MappingPropertyBase { name?: PropertyName properties?: Record ignore_above?: integer - dynamic?: boolean | MappingDynamicMapping + dynamic?: MappingDynamicMapping fields?: Record } @@ -4449,7 +4785,7 @@ export interface MappingSuggestContext { precision?: integer | string } -export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' +export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' export interface MappingTextIndexPrefixes { max_chars: integer @@ -4488,7 +4824,7 @@ export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase export interface MappingTypeMapping { all_field?: MappingAllField date_detection?: boolean - dynamic?: boolean | MappingDynamicMapping + dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] dynamic_templates?: Record | Record[] _field_names?: MappingFieldNamesField @@ -4531,18 +4867,6 @@ export interface QueryDslBoostingQuery extends QueryDslQueryBase { positive: QueryDslQueryContainer } -export interface QueryDslBoundingBox { - bottom_right?: QueryDslGeoLocation - top_left?: QueryDslGeoLocation - top_right?: QueryDslGeoLocation - bottom_left?: QueryDslGeoLocation - top?: double - left?: double - right?: double - bottom?: double - wkt?: string -} - export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' export type QueryDslCombinedFieldsOperator = 'or' | 'and' @@ -4620,6 +4944,12 @@ export interface QueryDslExistsQuery extends QueryDslQueryBase { field: Field } +export interface QueryDslFieldAndFormat { + field: Field + format?: string + include_unmapped?: boolean +} + export interface QueryDslFieldLookup { id: Id index?: IndexName @@ -4675,16 +5005,14 @@ export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { ignore_unmapped?: boolean } export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys -& { [property: string]: QueryDslBoundingBox | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } - -export type QueryDslGeoCoordinate = string | double[] | QueryDslThreeDimensionalPoint +& { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } -export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { @@ -4693,14 +5021,12 @@ export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys -& { [property: string]: QueryDslGeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } +& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } export type QueryDslGeoExecution = 'memory' | 'indexed' -export type QueryDslGeoLocation = string | double[] | QueryDslTwoDimensionalPoint - export interface QueryDslGeoPolygonPoints { - points: QueryDslGeoLocation[] + points: GeoLocation[] } export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { @@ -4949,7 +5275,7 @@ export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionB export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys & { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } -export type QueryDslOperator = 'and' | 'or' +export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' export interface QueryDslParentIdQuery extends QueryDslQueryBase { id?: Id @@ -5040,7 +5366,7 @@ export interface QueryDslQueryContainer { span_or?: QueryDslSpanOrQuery span_term?: Partial> span_within?: QueryDslSpanWithinQuery - term?: Partial> + term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> wildcard?: Partial> @@ -5143,18 +5469,20 @@ export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { } export interface QueryDslShapeFieldQuery { - ignore_unmapped?: boolean indexed_shape?: QueryDslFieldLookup - relation?: ShapeRelation + relation?: GeoShapeRelation shape?: GeoShape } export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { + ignore_unmapped?: boolean } export type QueryDslShapeQuery = QueryDslShapeQueryKeys -& { [property: string]: QueryDslShapeFieldQuery | float | string } +& { [property: string]: QueryDslShapeFieldQuery | boolean | float | string } -export type QueryDslSimpleQueryStringFlags = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' +export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' + +export type QueryDslSimpleQueryStringFlags = QueryDslSimpleQueryStringFlag | string export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { analyzer?: string @@ -5162,7 +5490,7 @@ export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { auto_generate_synonyms_phrase_query?: boolean default_operator?: QueryDslOperator fields?: Field[] - flags?: QueryDslSimpleQueryStringFlags | string + flags?: QueryDslSimpleQueryStringFlags fuzzy_max_expansions?: integer fuzzy_prefix_length?: integer fuzzy_transpositions?: boolean @@ -5234,7 +5562,7 @@ export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { } export interface QueryDslTermQuery extends QueryDslQueryBase { - value: string | float | boolean + value: FieldValue case_insensitive?: boolean } @@ -5248,7 +5576,9 @@ export interface QueryDslTermsLookup { export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { } export type QueryDslTermsQuery = QueryDslTermsQueryKeys -& { [property: string]: string[] | long[] | QueryDslTermsLookup | float | string } +& { [property: string]: QueryDslTermsQueryField | float | string } + +export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field @@ -5258,17 +5588,6 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' -export interface QueryDslThreeDimensionalPoint { - lat: double - lon: double - z?: double -} - -export interface QueryDslTwoDimensionalPoint { - lat: double - lon: double -} - export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } @@ -5348,7 +5667,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { analyze_wildcard?: boolean batched_reduce_size?: long ccs_minimize_roundtrips?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string docvalue_fields?: Fields expand_wildcards?: ExpandWildcards @@ -5372,12 +5691,12 @@ export interface AsyncSearchSubmitRequest extends RequestBase { suggest_text?: string terminate_after?: long timeout?: Time - track_total_hits?: boolean | integer + track_total_hits?: SearchTrackHits track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean version?: boolean - _source?: boolean | Fields + _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields seq_no_primary_term?: boolean @@ -5393,9 +5712,9 @@ export interface AsyncSearchSubmitRequest extends RequestBase { explain?: boolean from?: integer highlight?: SearchHighlight - track_total_hits?: boolean | integer + track_total_hits?: SearchTrackHits indices_boost?: Record[] - docvalue_fields?: SearchDocValueField | (Field | SearchDocValueField)[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -5406,9 +5725,9 @@ export interface AsyncSearchSubmitRequest extends RequestBase { size?: integer slice?: SlicedScroll sort?: SearchSort - _source?: boolean | Fields | SearchSourceFilter - fields?: (Field | DateField)[] - suggest?: SearchSuggestContainer | Record + _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester terminate_after?: long timeout?: string track_scores?: boolean @@ -5636,7 +5955,6 @@ export interface CatHealthHealthRecord { } export interface CatHealthRequest extends CatCatRequestBase { - include_timestamp?: boolean ts?: boolean } @@ -5945,7 +6263,7 @@ export interface CatIndicesRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes expand_wildcards?: ExpandWildcards - health?: Health + health?: HealthStatus include_unloaded_segments?: boolean pri?: boolean } @@ -7092,7 +7410,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names - size?: Size | boolean + size?: CatThreadPoolThreadPoolSize } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -7140,6 +7458,8 @@ export interface CatThreadPoolThreadPoolRecord { ka?: string } +export type CatThreadPoolThreadPoolSize = 'k' | 'm' | 'g' | 't' | 'p' + export interface CatTransformsRequest extends CatCatRequestBase { transform_id?: Id allow_no_match?: boolean @@ -7475,8 +7795,6 @@ export interface CcrUnfollowRequest extends RequestBase { export interface CcrUnfollowResponse extends AcknowledgedResponseBase { } -export type ClusterClusterStatus = 'green' | 'yellow' | 'red' - export interface ClusterComponentTemplate { name: Name component_template: ClusterComponentTemplateNode @@ -7671,7 +7989,7 @@ export interface ClusterHealthIndexHealthStats { number_of_shards: integer relocating_shards: integer shards?: Record - status: Health + status: HealthStatus unassigned_shards: integer } @@ -7687,7 +8005,7 @@ export interface ClusterHealthRequest extends RequestBase { wait_for_nodes?: string wait_for_no_initializing_shards?: boolean wait_for_no_relocating_shards?: boolean - wait_for_status?: WaitForStatus + wait_for_status?: HealthStatus } export interface ClusterHealthResponse { @@ -7703,7 +8021,7 @@ export interface ClusterHealthResponse { number_of_nodes: integer number_of_pending_tasks: integer relocating_shards: integer - status: Health + status: HealthStatus task_max_waiting_in_queue_millis: EpochMillis timed_out: boolean unassigned_shards: integer @@ -7714,7 +8032,7 @@ export interface ClusterHealthShardHealthStats { initializing_shards: integer primary_active: boolean relocating_shards: integer - status: Health + status: HealthStatus unassigned_shards: integer } @@ -8100,7 +8418,7 @@ export interface ClusterStatsResponse extends NodesNodesResponseBase { cluster_uuid: Uuid indices: ClusterStatsClusterIndices nodes: ClusterStatsClusterNodes - status: ClusterClusterStatus + status: HealthStatus timestamp: long } @@ -8321,8 +8639,8 @@ export interface EqlSearchRequest extends RequestBase { keep_alive?: Time keep_on_completion?: boolean wait_for_completion_timeout?: Time - size?: uint | float - fields?: (Field | EqlSearchSearchFieldFormatted)[] + size?: uint + fields?: QueryDslFieldAndFormat | Field result_position?: EqlSearchResultPosition } } @@ -8332,11 +8650,6 @@ export interface EqlSearchResponse extends EqlEqlSearchRespons export type EqlSearchResultPosition = 'tail' | 'head' -export interface EqlSearchSearchFieldFormatted { - field: Field - format?: string -} - export interface FeaturesFeature { name: string description: string @@ -8356,6 +8669,21 @@ export interface FeaturesResetFeaturesResponse { features: FeaturesFeature[] } +export type FleetCheckpoint = long + +export interface FleetGlobalCheckpointsRequest extends RequestBase { + index: IndexName | IndexAlias + wait_for_advance?: boolean + wait_for_index?: boolean + checkpoints?: FleetCheckpoint[] + timeout?: Time +} + +export interface FleetGlobalCheckpointsResponse { + global_checkpoints: FleetCheckpoint[] + timed_out: boolean +} + export interface GraphConnection { doc_count: long source: long @@ -8423,13 +8751,11 @@ export interface GraphExploreResponse { vertices: GraphVertex[] } -export interface IlmAction { -} +export type IlmActions = any export interface IlmPhase { - actions?: Record | string[] + actions?: IlmActions min_age?: Time - configurations?: Record> } export interface IlmPhases { @@ -8595,15 +8921,13 @@ export interface IndicesDataStream { hidden?: boolean } -export type IndicesDataStreamHealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' - export interface IndicesFielddataFrequencyFilter { max: double min: double min_segment_size: integer } -export type IndicesIndexCheckOnStartup = 'false' | 'checksum' | 'true' +export type IndicesIndexCheckOnStartup = boolean | 'false' | 'checksum' | 'true' export interface IndicesIndexRouting { allocation?: IndicesIndexRoutingAllocation @@ -8780,14 +9104,10 @@ export interface IndicesIndexSettingsLifecycle { export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping - settings?: IndicesIndexSettings | IndicesIndexStatePrefixedSettings + settings?: IndicesIndexSettings data_stream?: DataStreamName } -export interface IndicesIndexStatePrefixedSettings { - index: IndicesIndexSettings -} - export interface IndicesIndexVersioning { created: VersionString } @@ -8798,11 +9118,6 @@ export interface IndicesNumericFielddata { export type IndicesNumericFielddataFormat = 'array' | 'disabled' -export interface IndicesOverlappingIndexTemplate { - name: Name - index_patterns?: IndexName[] -} - export type IndicesSegmentSortMissing = '_last' | '_first' export type IndicesSegmentSortMode = 'min' | 'max' @@ -8895,13 +9210,13 @@ export interface IndicesAnalyzeRequest extends RequestBase { body?: { analyzer?: string attributes?: string[] - char_filter?: (string | AnalysisCharFilter)[] + char_filter?: AnalysisCharFilter[] explain?: boolean field?: Field - filter?: (string | AnalysisTokenFilter)[] + filter?: AnalysisTokenFilter[] normalizer?: string text?: IndicesAnalyzeTextToAnalyze - tokenizer?: string | AnalysisTokenizer + tokenizer?: AnalysisTokenizer } } @@ -9196,7 +9511,7 @@ export interface IndicesGetDataStreamIndicesGetDataStreamItem { template: Name hidden: boolean system?: boolean - status: IndicesDataStreamHealthStatus + status: HealthStatus ilm_policy?: Name _meta?: Metadata } @@ -9395,7 +9710,7 @@ export interface IndicesPutMappingRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { date_detection?: boolean - dynamic?: boolean | MappingDynamicMapping + dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] dynamic_templates?: Record | Record[] _field_names?: MappingFieldNamesField @@ -9612,6 +9927,8 @@ export interface IndicesResolveIndexResponse { data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] } +export type IndicesRolloverIndexRolloverMapping = MappingTypeMapping | Record + export interface IndicesRolloverRequest extends RequestBase { alias: IndexAlias new_index?: IndexName @@ -9624,7 +9941,7 @@ export interface IndicesRolloverRequest extends RequestBase { body?: { aliases?: Record conditions?: IndicesRolloverRolloverConditions - mappings?: Record | MappingTypeMapping + mappings?: IndicesRolloverIndexRolloverMapping settings?: Record } } @@ -9697,15 +10014,13 @@ export interface IndicesShardStoresRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - status?: IndicesShardStoresShardStatus | IndicesShardStoresShardStatus[] + status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] } export interface IndicesShardStoresResponse { indices: Record } -export type IndicesShardStoresShardStatus = 'green' | 'yellow' | 'red' | 'all' - export interface IndicesShardStoresShardStore { allocation: IndicesShardStoresShardStoreAllocation allocation_id: Id @@ -9724,6 +10039,8 @@ export interface IndicesShardStoresShardStoreException { type: string } +export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all' + export interface IndicesShardStoresShardStoreWrapper { stores: IndicesShardStoresShardStore[] } @@ -9748,12 +10065,18 @@ export interface IndicesShrinkResponse extends AcknowledgedResponseBase { export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name + create?: boolean + master_timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - index_patterns?: IndexName[] + allow_auto_create?: boolean + index_patterns?: Indices composed_of?: Name[] - overlapping?: IndicesOverlappingIndexTemplate[] - template?: IndicesTemplateMapping + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: IndicesDataStream + priority?: integer + version?: VersionNumber + _meta?: Metadata } } @@ -10013,7 +10336,7 @@ export interface IndicesValidateQueryRequest extends RequestBase { all_shards?: boolean analyzer?: string analyze_wildcard?: boolean - default_operator?: DefaultOperator + default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards explain?: boolean @@ -10086,7 +10409,7 @@ export interface IngestCsvProcessor extends IngestProcessorBase { export interface IngestDateIndexNameProcessor extends IngestProcessorBase { date_formats: string[] - date_rounding: string | IngestDateRounding + date_rounding: string field: Field index_name_format: string index_name_prefix: string @@ -10102,8 +10425,6 @@ export interface IngestDateProcessor extends IngestProcessorBase { timezone?: string } -export type IngestDateRounding = 's' | 'm' | 'h' | 'd' | 'w' | 'M' | 'y' - export interface IngestDissectProcessor extends IngestProcessorBase { append_separator: string field: Field @@ -10616,7 +10937,7 @@ export interface MigrationDeprecationsResponse { export interface MlAnalysisConfig { bucket_span: TimeSpan - categorization_analyzer?: MlCategorizationAnalyzer | string + categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] @@ -10630,7 +10951,7 @@ export interface MlAnalysisConfig { export interface MlAnalysisConfigRead { bucket_span: TimeSpan - categorization_analyzer?: MlCategorizationAnalyzer | string + categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] @@ -10696,17 +11017,16 @@ export interface MlAnomalyCause { export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' export interface MlBucketInfluencer { + anomaly_score: double bucket_span: long - influencer_score: double influencer_field_name: Field - influencer_field_value: string - initial_influencer_score: double + initial_anomaly_score: double is_interim: boolean job_id: Id probability: double + raw_anomaly_score: double result_type: string timestamp: Time - foo?: string } export interface MlBucketSummary { @@ -10717,7 +11037,6 @@ export interface MlBucketSummary { initial_anomaly_score: double is_interim: boolean job_id: Id - partition_scores?: MlPartitionScore[] processing_time_ms: double result_type: string timestamp: Time @@ -10731,10 +11050,12 @@ export interface MlCalendarEvent { start_time: EpochMillis } -export interface MlCategorizationAnalyzer { - char_filter?: (string | AnalysisCharFilter)[] - filter?: (string | AnalysisTokenFilter)[] - tokenizer?: string | AnalysisTokenizer +export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition + +export interface MlCategorizationAnalyzerDefinition { + char_filter?: AnalysisCharFilter[] + filter?: AnalysisTokenFilter[] + tokenizer?: AnalysisTokenizer } export type MlCategorizationStatus = 'ok' | 'warn' @@ -10765,11 +11086,7 @@ export type MlChunkingMode = 'auto' | 'manual' | 'off' export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' -export interface MlCustomSettings { - custom_urls?: XpackUsageUrlConfig[] - created_by?: string - job_tags?: Record -} +export type MlCustomSettings = any export interface MlDataCounts { bucket_count: long @@ -10885,9 +11202,7 @@ export interface MlDataframeAnalysis { training_percent?: Percentage } -export type MlDataframeAnalysisAnalyzedFields = string[] | MlDataframeAnalysisAnalyzedFieldsIncludeExclude - -export interface MlDataframeAnalysisAnalyzedFieldsIncludeExclude { +export interface MlDataframeAnalysisAnalyzedFields { includes: string[] excludes: string[] } @@ -10990,7 +11305,7 @@ export interface MlDataframeAnalyticsSource { index: Indices query?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields - _source?: MlDataframeAnalysisAnalyzedFields + _source?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlDataframeAnalyticsStatsContainer { @@ -11039,7 +11354,7 @@ export interface MlDataframeAnalyticsSummary { description?: string model_memory_limit?: string max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] allow_lazy_start?: boolean create_time?: long version?: VersionString @@ -11186,6 +11501,20 @@ export interface MlInfluence { influencer_field_values: string[] } +export interface MlInfluencer { + bucket_span: long + influencer_score: double + influencer_field_name: Field + influencer_field_value: string + initial_influencer_score: double + is_interim: boolean + job_id: Id + probability: double + result_type: string + timestamp: Time + foo?: string +} + export interface MlJob { allow_lazy_open: boolean analysis_config: MlAnalysisConfig @@ -11294,10 +11623,10 @@ export interface MlModelSizeStats { job_id: Id log_time: Time memory_status: MlMemoryStatus - model_bytes: long - model_bytes_exceeded: long - model_bytes_memory_limit: long - peak_model_bytes: long + model_bytes: ByteSize + model_bytes_exceeded?: ByteSize + model_bytes_memory_limit?: ByteSize + peak_model_bytes?: ByteSize assignment_memory_basis?: string result_type: string total_by_field_count: long @@ -11354,14 +11683,6 @@ export interface MlPage { size?: integer } -export interface MlPartitionScore { - initial_record_score: double - partition_field_name: Field - partition_field_value: string - probability: double - record_score: double -} - export interface MlPerPartitionCategorization { enabled?: boolean stop_on_warn?: boolean @@ -11399,6 +11720,7 @@ export interface MlTotalFeatureImportanceStatistics { export interface MlTrainedModelConfig { model_id: Id + model_type: MlTrainedModelType tags: string[] version?: VersionString compressed_definition?: string @@ -11440,6 +11762,8 @@ export interface MlTrainedModelStats { ingest?: Record } +export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' + export interface MlValidationLoss { fold_values: string[] loss_type: string @@ -11450,6 +11774,12 @@ export interface MlCloseJobRequest extends RequestBase { allow_no_match?: boolean force?: boolean timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + allow_no_match?: boolean + force?: boolean + timeout?: Time + } } export interface MlCloseJobResponse { @@ -11676,11 +12006,11 @@ export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { body?: { source?: MlDataframeAnalyticsSource dest?: MlDataframeAnalyticsDestination - analysis: MlDataframeAnalysisContainer + analysis?: MlDataframeAnalysisContainer description?: string model_memory_limit?: string max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] allow_lazy_start?: boolean } } @@ -11692,12 +12022,17 @@ export interface MlExplainDataFrameAnalyticsResponse { export interface MlFlushJobRequest extends RequestBase { job_id: Id + advance_time?: DateString + calc_interim?: boolean + end?: DateString skip_time?: string + start?: DateString /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { advance_time?: DateString calc_interim?: boolean end?: DateString + skip_time?: string start?: DateString } } @@ -11709,6 +12044,9 @@ export interface MlFlushJobResponse { export interface MlForecastRequest extends RequestBase { job_id: Id + duration?: Time + expires_in?: Time + max_model_memory?: string /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { duration?: Time @@ -11724,22 +12062,25 @@ export interface MlForecastResponse extends AcknowledgedResponseBase { export interface MlGetBucketsRequest extends RequestBase { job_id: Id timestamp?: Timestamp + anomaly_score?: double + desc?: boolean + end?: DateString + exclude_interim?: boolean + expand?: boolean from?: integer size?: integer - exclude_interim?: boolean sort?: Field - desc?: boolean start?: DateString - end?: DateString /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { anomaly_score?: double desc?: boolean + end?: DateString exclude_interim?: boolean expand?: boolean + page?: MlPage sort?: Field start?: DateString - end?: DateString } } @@ -11750,11 +12091,11 @@ export interface MlGetBucketsResponse { export interface MlGetCalendarEventsRequest extends RequestBase { calendar_id: Id - job_id?: Id end?: DateString from?: integer - start?: string + job_id?: Id size?: integer + start?: string } export interface MlGetCalendarEventsResponse { @@ -11876,7 +12217,7 @@ export interface MlGetInfluencersRequest extends RequestBase { export interface MlGetInfluencersResponse { count: long - influencers: MlBucketInfluencer[] + influencers: MlInfluencer[] } export interface MlGetJobStatsRequest extends RequestBase { @@ -11911,8 +12252,11 @@ export interface MlGetModelSnapshotsRequest extends RequestBase { start?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - start?: Time + desc?: boolean end?: Time + page?: MlPage + sort?: Field + start?: Time } } @@ -11930,6 +12274,16 @@ export interface MlGetOverallBucketsRequest extends RequestBase { overall_score?: double | string start?: Time top_n?: integer + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + allow_no_match?: boolean + bucket_span?: Time + end?: Time + exclude_interim?: boolean + overall_score?: double | string + start?: Time + top_n?: integer + } } export interface MlGetOverallBucketsResponse { @@ -11939,20 +12293,23 @@ export interface MlGetOverallBucketsResponse { export interface MlGetRecordsRequest extends RequestBase { job_id: Id + desc?: boolean + end?: DateString exclude_interim?: boolean from?: integer + record_score?: double size?: integer + sort?: Field start?: DateString - end?: DateString /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { desc?: boolean + end?: DateString exclude_interim?: boolean page?: MlPage record_score?: double sort?: Field start?: DateString - end?: DateString } } @@ -12029,6 +12386,7 @@ export interface MlInfoResponse { export interface MlOpenJobRequest extends RequestBase { job_id: Id + timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { timeout?: Time @@ -12082,7 +12440,7 @@ export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { analysis: MlDataframeAnalysisContainer model_memory_limit?: string max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { @@ -12114,6 +12472,7 @@ export interface MlPutCalendarRequest extends RequestBase { calendar_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + job_ids?: Id[] description?: string } } @@ -12141,7 +12500,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { body?: { allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] description?: string dest: MlDataframeAnalyticsDestination max_num_threads?: integer @@ -12161,7 +12520,7 @@ export interface MlPutDataFrameAnalyticsResponse { allow_lazy_start: boolean max_num_threads: integer analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlPutDatafeedRequest extends RequestBase { @@ -12317,6 +12676,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { inference_config: AggregationsInferenceConfigContainer input: MlPutTrainedModelInput metadata?: any + model_type?: MlTrainedModelType tags?: string[] } } @@ -12379,6 +12739,7 @@ export interface MlResetJobResponse extends AcknowledgedResponseBase { export interface MlRevertModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id + delete_intervening_results?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { delete_intervening_results?: boolean @@ -12408,7 +12769,9 @@ export interface MlStartDataFrameAnalyticsResponse extends AcknowledgedResponseB export interface MlStartDatafeedRequest extends RequestBase { datafeed_id: Id + end?: Time start?: Time + timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { end?: Time @@ -12437,8 +12800,10 @@ export interface MlStopDatafeedRequest extends RequestBase { datafeed_id: Id allow_no_match?: boolean force?: boolean + timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + allow_no_match?: boolean force?: boolean timeout?: Time } @@ -12470,7 +12835,48 @@ export interface MlUpdateDataFrameAnalyticsResponse { allow_lazy_start: boolean max_num_threads: integer analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] +} + +export interface MlUpdateDatafeedRequest extends RequestBase { + datafeed_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + aggregations?: Record + chunking_config?: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + frequency?: Time + indices?: string[] + indexes?: string[] + indices_options?: MlDatafeedIndicesOptions + max_empty_searches?: integer + query?: QueryDslQueryContainer + query_delay?: Time + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size?: integer + } +} + +export interface MlUpdateDatafeedResponse { + aggregations: Record + chunking_config: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + datafeed_id: Id + frequency: Time + indices: string[] + job_id: Id + indices_options?: MlDatafeedIndicesOptions + max_empty_searches: integer + query: QueryDslQueryContainer + query_delay: Time + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size: integer } export interface MlUpdateFilterRequest extends RequestBase { @@ -12821,9 +13227,10 @@ export interface NodesHotThreadsRequest extends RequestBase { ignore_idle_threads?: boolean interval?: Time snapshots?: long + master_timeout?: Time threads?: long - thread_type?: ThreadType timeout?: Time + type?: ThreadType } export interface NodesHotThreadsResponse { @@ -12998,7 +13405,7 @@ export interface NodesInfoNodeInfoSettingsClusterElection { } export interface NodesInfoNodeInfoSettingsHttp { - type: string | NodesInfoNodeInfoSettingsHttpType + type: NodesInfoNodeInfoSettingsHttpType | string 'type.default'?: string compression?: boolean | string port?: integer | string @@ -13056,7 +13463,7 @@ export interface NodesInfoNodeInfoSettingsNode { } export interface NodesInfoNodeInfoSettingsTransport { - type: string | NodesInfoNodeInfoSettingsTransportType + type: NodesInfoNodeInfoSettingsTransportType | string 'type.default'?: string features?: NodesInfoNodeInfoSettingsTransportFeatures } @@ -13177,16 +13584,12 @@ export interface NodesInfoResponse extends NodesNodesResponseBase { nodes: Record } -export interface NodesReloadSecureSettingsNodeReloadException { +export interface NodesReloadSecureSettingsNodeReloadError { name: Name - reload_exception?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy + reload_exception?: ErrorCause } -export interface NodesReloadSecureSettingsNodeReloadExceptionCausedBy { - type: string - reason: string - caused_by?: NodesReloadSecureSettingsNodeReloadExceptionCausedBy -} +export type NodesReloadSecureSettingsNodeReloadResult = NodesStats | NodesReloadSecureSettingsNodeReloadError export interface NodesReloadSecureSettingsRequest extends RequestBase { node_id?: NodeIds @@ -13199,7 +13602,7 @@ export interface NodesReloadSecureSettingsRequest extends RequestBase { export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { cluster_name: Name - nodes: Record + nodes: Record } export interface NodesStatsRequest extends RequestBase { @@ -13540,7 +13943,7 @@ export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: string | string[] | QueryDslQueryContainer + query?: string | string[] allow_restricted_indices?: boolean } @@ -13608,6 +14011,7 @@ export interface SecurityChangePasswordRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { password?: Password + password_hash?: string } } @@ -13818,20 +14222,6 @@ export interface SecurityGetPrivilegesRequest extends RequestBase { export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { } -export interface SecurityGetRoleInlineRoleTemplate { - template: SecurityGetRoleInlineRoleTemplateSource - format?: SecurityGetRoleTemplateFormat -} - -export interface SecurityGetRoleInlineRoleTemplateSource { - source: string -} - -export interface SecurityGetRoleInvalidRoleTemplate { - template: string - format?: SecurityGetRoleTemplateFormat -} - export interface SecurityGetRoleRequest extends RequestBase { name?: Name } @@ -13849,15 +14239,9 @@ export interface SecurityGetRoleRole { role_templates?: SecurityGetRoleRoleTemplate[] } -export type SecurityGetRoleRoleTemplate = SecurityGetRoleInlineRoleTemplate | SecurityGetRoleStoredRoleTemplate | SecurityGetRoleInvalidRoleTemplate - -export interface SecurityGetRoleStoredRoleTemplate { - template: SecurityGetRoleStoredRoleTemplateId +export interface SecurityGetRoleRoleTemplate { format?: SecurityGetRoleTemplateFormat -} - -export interface SecurityGetRoleStoredRoleTemplateId { - id: string + template: Script } export type SecurityGetRoleTemplateFormat = 'string' | 'json' @@ -14681,7 +15065,7 @@ export interface SqlTranslateRequest extends RequestBase { export interface SqlTranslateResponse { size: long - _source: boolean | Fields | SearchSourceFilter + _source: SearchSourceConfig fields: Record[] sort: SearchSort } @@ -14701,6 +15085,8 @@ export interface SslCertificatesRequest extends RequestBase { export type SslCertificatesResponse = SslCertificatesCertificateInformation[] +export type TasksGroupBy = 'nodes' | 'parents' | 'none' + export interface TasksInfo { action: string cancellable: boolean @@ -14783,7 +15169,7 @@ export interface TasksGetResponse { export interface TasksListRequest extends RequestBase { actions?: string | string[] detailed?: boolean - group_by?: GroupBy + group_by?: TasksGroupBy nodes?: string[] parent_task_id?: Id timeout?: Time @@ -14793,7 +15179,7 @@ export interface TasksListRequest extends RequestBase { export interface TasksListResponse { node_failures?: ErrorCause[] nodes?: Record - tasks?: Record | TasksInfo[] + tasks?: Record } export interface TextStructureFindStructureFieldStat { @@ -15172,7 +15558,7 @@ export type WatcherConnectionScheme = 'http' | 'https' export type WatcherCronExpression = string export interface WatcherDailySchedule { - at: string[] | WatcherTimeOfDay + at: WatcherTimeOfDay[] } export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' @@ -15230,6 +15616,11 @@ export interface WatcherExecutionThreadPool { queue_size: long } +export interface WatcherHourAndMinute { + hour: integer[] + minute: integer[] +} + export interface WatcherHourlySchedule { minute: integer[] } @@ -15387,8 +15778,8 @@ export interface WatcherScheduleContainer { } export interface WatcherScheduleTriggerEvent { - scheduled_time: DateString | string - triggered_time?: DateString | string + scheduled_time: DateString + triggered_time?: DateString } export interface WatcherScriptCondition { @@ -15470,10 +15861,7 @@ export interface WatcherThrottleState { timestamp: DateString } -export interface WatcherTimeOfDay { - hour: integer[] - minute: integer[] -} +export type WatcherTimeOfDay = string | WatcherHourAndMinute export interface WatcherTimeOfMonth { at: string[] @@ -15617,7 +16005,7 @@ export interface WatcherPutWatchRequest extends RequestBase { id: Id active?: boolean if_primary_term?: long - if_sequence_number?: long + if_seq_no?: SequenceNumber version?: VersionNumber /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -15685,7 +16073,7 @@ export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQue watch_record_id: Id } -export type WatcherStatsWatcherMetric = '_all' | 'queued_watches' | 'current_watches' | 'pending_watches' +export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' export interface WatcherStatsWatcherNodeStats { current_watches?: WatcherStatsWatchRecordStats[] @@ -15803,11 +16191,6 @@ export interface XpackUsageBase { enabled: boolean } -export interface XpackUsageBaseUrlConfig { - url_name: string - url_value: string -} - export interface XpackUsageCcr extends XpackUsageBase { auto_follow_patterns_count: integer follower_indices_count: integer @@ -15920,13 +16303,15 @@ export interface XpackUsageIpFilter { transport: boolean } -export interface XpackUsageKibanaUrlConfig extends XpackUsageBaseUrlConfig { - time_range?: string +export interface XpackUsageJobsKeys { + _all?: XpackUsageAllJobs } +export type XpackUsageJobs = XpackUsageJobsKeys +& { [property: string]: MlJob | XpackUsageAllJobs } export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: Record | Partial> + jobs: XpackUsageJobs node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -16134,8 +16519,6 @@ export interface XpackUsageSsl { transport: XpackUsageFeatureToggle } -export type XpackUsageUrlConfig = XpackUsageBaseUrlConfig | XpackUsageKibanaUrlConfig - export interface XpackUsageVector extends XpackUsageBase { dense_vector_dims_avg_count: integer dense_vector_fields_count: integer @@ -16185,7 +16568,6 @@ export interface SpecUtilsCommonQueryParameters { filter_path?: string | string[] human?: boolean pretty?: boolean - source_query_string?: string } export interface SpecUtilsCommonCatQueryParameters { From 748b45e2ca40b492d82be09c41bc75c505782bbf Mon Sep 17 00:00:00 2001 From: delvedor Date: Tue, 23 Nov 2021 14:10:05 +0100 Subject: [PATCH 120/647] Updated imports --- src/helpers.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/helpers.ts b/src/helpers.ts index b945ac20c..ceaed7524 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -52,7 +52,7 @@ export interface MsearchHelperOptions extends T.MsearchRequest { export interface MsearchHelper extends Promise { stop: (error?: Error | null) => void - search: (header: T.MsearchHeader, body: T.MsearchBody) => Promise> + search: (header: T.MsearchMultisearchHeader, body: T.MsearchMultisearchBody) => Promise> } export interface MsearchHelperResponse { @@ -315,7 +315,7 @@ export default class Helpers { // TODO: support abort a single search? // NOTE: the validation checks are synchronous and the callback/promise will // be resolved in the same tick. We might want to fix this in the future. - search (header: T.MsearchHeader, body: T.MsearchBody): Promise> { + search (header: T.MsearchMultisearchHeader, body: T.MsearchMultisearchBody): Promise> { if (stopReading) { const error = stopError === null ? new ConfigurationError('The msearch processor has been stopped') @@ -350,7 +350,7 @@ export default class Helpers { async function iterate (): Promise { const { semaphore, finish } = buildSemaphore() - const msearchBody: Array = [] + const msearchBody: Array = [] const callbacks: any[] = [] let loadedOperations = 0 timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line @@ -440,7 +440,7 @@ export default class Helpers { } } - function send (msearchBody: Array, callbacks: any[]): void { + function send (msearchBody: Array, callbacks: any[]): void { /* istanbul ignore if */ if (running > concurrency) { throw new Error('Max concurrency reached') @@ -458,7 +458,7 @@ export default class Helpers { } } - function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { + function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { let retryCount = retries // Instead of going full on async-await, which would make the code easier to read, @@ -466,7 +466,7 @@ export default class Helpers { // This because every time we use async await, V8 will create multiple promises // behind the scenes, making the code slightly slower. tryMsearch(msearchBody, callbacks, retrySearch) - function retrySearch (msearchBody: Array, callbacks: any[]): void { + function retrySearch (msearchBody: Array, callbacks: any[]): void { if (msearchBody.length > 0 && retryCount > 0) { retryCount -= 1 setTimeout(tryMsearch, wait, msearchBody, callbacks, retrySearch) @@ -478,7 +478,7 @@ export default class Helpers { // This function never returns an error, if the msearch operation fails, // the error is dispatched to all search executors. - function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { + function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { client.msearch(Object.assign({}, msearchOptions, { body: msearchBody }), reqOptions as TransportRequestOptionsWithMeta) .then(results => { const retryBody = [] From 194614564a55a9cc90e9091da219958275af91f8 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 24 Nov 2021 10:33:14 +0100 Subject: [PATCH 121/647] Bumped v8.0.0-canary.37 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index f5bbc5f2f..9905a3fca 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.0.0-beta.1", - "versionCanary": "8.0.0-canary.36", + "versionCanary": "8.0.0-canary.37", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From dbfc8fc4d19fce93f3a078ce87f14e5e837c80db Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 16 Dec 2021 16:45:48 +0100 Subject: [PATCH 122/647] Propagate bulk helper document generic (#1606) --- src/helpers.ts | 4 +++- test/unit/helpers/bulk.test.ts | 8 +++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/helpers.ts b/src/helpers.ts index ceaed7524..525977193 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -520,7 +520,7 @@ export default class Helpers { * @param {object} reqOptions - The client optional configuration for this request. * @return {object} The possible operations to run with the datasource. */ - bulk (options: BulkHelperOptions, reqOptions: TransportRequestOptions = {}): BulkHelper { + bulk (options: BulkHelperOptions, reqOptions: TransportRequestOptions = {}): BulkHelper { const client = this[kClient] const { serializer } = client if (this[kMetaHeader] !== null) { @@ -790,6 +790,7 @@ export default class Helpers { status: 429, error: null, operation: serializer.deserialize(bulkBody[i]), + // @ts-expect-error document: operation !== 'delete' ? serializer.deserialize(bulkBody[i + 1]) /* istanbul ignore next */ @@ -841,6 +842,7 @@ export default class Helpers { status: responseItem.status, error: responseItem.error ?? null, operation: serializer.deserialize(bulkBody[indexSlice]), + // @ts-expect-error document: operation !== 'delete' ? serializer.deserialize(bulkBody[indexSlice + 1]) : null, diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index d9bfcda6c..5a182009e 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -42,6 +42,11 @@ const dataset = [ { user: 'tyrion', age: 39 } ] +interface Document { + user: string + age: number +} + test('bulk index', t => { t.test('datasource as array', t => { t.test('Should perform a bulk request', async t => { @@ -65,11 +70,12 @@ test('bulk index', t => { node: 'http://localhost:9200/', Connection: MockConnection }) - const result = await client.helpers.bulk({ + const result = await client.helpers.bulk({ datasource: dataset.slice(), flushBytes: 1, concurrency: 1, onDocument (doc) { + t.type(doc.user, 'string') // testing that doc is type of Document return { index: { _index: 'test' } } From 4ad5daeaf401ce8ebb28b940075e0a67e56ff9ce Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 16 Dec 2021 17:49:05 +0100 Subject: [PATCH 123/647] Aggregations should be a generic in responses (#1596) --- src/api/api/bulk.ts | 7 +- src/api/api/create.ts | 2 +- src/api/api/delete.ts | 2 +- src/api/api/exists_source.ts | 2 +- src/api/api/indices.ts | 24 +- src/api/api/ml.ts | 120 +++- src/api/api/msearch.ts | 8 +- src/api/api/msearch_template.ts | 8 +- src/api/api/rollup.ts | 10 +- src/api/api/scroll.ts | 8 +- src/api/api/search.ts | 8 +- src/api/api/transform.ts | 33 +- src/api/api/update.ts | 2 +- src/api/kibana.ts | 25 +- src/api/types.ts | 990 +++++++++++++++++++----------- src/api/typesWithBodyKey.ts | 1002 ++++++++++++++++++++----------- src/helpers.ts | 15 +- test/unit/api.test.ts | 105 ++++ 18 files changed, 1538 insertions(+), 833 deletions(-) diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index 3b4e4ae04..30dfb4995 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -41,7 +41,7 @@ export default async function BulkApi (this: That, params: T. export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'type'] + const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['operations'] const querystring: Record = {} // @ts-expect-error @@ -61,10 +61,7 @@ export default async function BulkApi (this: That, params: T. let method = '' let path = '' - if (params.index != null && params.type != null) { - method = 'POST' - path = `/${encodeURIComponent(params.index.toString())}/${encodeURIComponent(params.type.toString())}/_bulk` - } else if (params.index != null) { + if (params.index != null) { method = 'POST' path = `/${encodeURIComponent(params.index.toString())}/_bulk` } else { diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 31e3fca29..61e3fbc75 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -41,7 +41,7 @@ export default async function CreateApi (this: That, params export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['document'] const querystring: Record = {} // @ts-expect-error diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index ee4dba53b..1f7c06ddf 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -41,7 +41,7 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest | T export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedPath: string[] = ['id', 'index'] const querystring: Record = {} const body = undefined diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index eec41b8cd..590c4f7f1 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -41,7 +41,7 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedPath: string[] = ['id', 'index'] const querystring: Record = {} const body = undefined diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index b6d6a5b00..ad1f8fe3e 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -513,28 +513,6 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptions): Promise - async existsType (this: That, params: T.IndicesExistsTypeRequest | TB.IndicesExistsTypeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'type'] - const querystring: Record = {} - const body = undefined - - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'HEAD' - const path = `/${encodeURIComponent(params.index.toString())}/_mapping/${encodeURIComponent(params.type.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) - } - async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -1563,7 +1541,7 @@ export default class Indices { async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'type'] + const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} // @ts-expect-error diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 500f38024..0cbe94600 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -937,6 +937,28 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_upgrade/_stats` + return await this.transport.request({ path, method, querystring, body }, options) + } + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise @@ -1106,19 +1128,31 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async inferTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['docs'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1578,19 +1612,31 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putTrainedModelDefinitionPart (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id', 'part'] + const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1600,19 +1646,31 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putTrainedModelVocabulary (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['vocabulary'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1757,19 +1815,19 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async startTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1835,19 +1893,19 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async stopTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 67139b26e..523885cd3 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -37,10 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise> -export default async function MsearchApi (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise { +export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['searches'] const querystring: Record = {} diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index 8cd92bcb7..ea9f3dfd7 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -37,10 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> -export default async function MsearchTemplateApi (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['search_templates'] const querystring: Record = {} diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index cf9ed6ac8..4e19528ed 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -208,11 +208,11 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } - async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> - async rollupSearch (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'type'] + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] const querystring: Record = {} // @ts-expect-error diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index c35577d1d..e04fd9aba 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -37,10 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> -export default async function ScrollApi (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise { +export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll', 'scroll_id'] const querystring: Record = {} diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 754e7b9f9..0ab920b9e 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -37,10 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> -export default async function SearchApi (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { +export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 1edbc932c..beef02852 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -164,7 +164,7 @@ export default class Transform { async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -193,6 +193,28 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptions): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_reset` + return await this.transport.request({ path, method, querystring, body }, options) + } + async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise @@ -271,10 +293,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } - async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async upgradeTransforms (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -284,6 +306,7 @@ export default class Transform { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/update.ts b/src/api/api/update.ts index a38f31c22..52be55709 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -41,7 +41,7 @@ export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index', 'type'] + const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] const querystring: Record = {} // @ts-expect-error diff --git a/src/api/kibana.ts b/src/api/kibana.ts index 2e06a7d6d..226314932 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -203,7 +203,6 @@ interface KibanaClient { existsAlias: (params: T.IndicesExistsAliasRequest| TB.IndicesExistsAliasRequest, options?: TransportRequestOptions) => Promise> existsIndexTemplate: (params: T.IndicesExistsIndexTemplateRequest| TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions) => Promise> existsTemplate: (params: T.IndicesExistsTemplateRequest| TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions) => Promise> - existsType: (params: T.IndicesExistsTypeRequest| TB.IndicesExistsTypeRequest, options?: TransportRequestOptions) => Promise> fieldUsageStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> flush: (params?: T.IndicesFlushRequest| TB.IndicesFlushRequest, options?: TransportRequestOptions) => Promise> forcemerge: (params?: T.IndicesForcemergeRequest| TB.IndicesForcemergeRequest, options?: TransportRequestOptions) => Promise> @@ -301,12 +300,13 @@ interface KibanaClient { getInfluencers: (params: T.MlGetInfluencersRequest| TB.MlGetInfluencersRequest, options?: TransportRequestOptions) => Promise> getJobStats: (params?: T.MlGetJobStatsRequest| TB.MlGetJobStatsRequest, options?: TransportRequestOptions) => Promise> getJobs: (params?: T.MlGetJobsRequest| TB.MlGetJobsRequest, options?: TransportRequestOptions) => Promise> + getModelSnapshotUpgradeStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> getModelSnapshots: (params: T.MlGetModelSnapshotsRequest| TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions) => Promise> getOverallBuckets: (params: T.MlGetOverallBucketsRequest| TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions) => Promise> getRecords: (params: T.MlGetRecordsRequest| TB.MlGetRecordsRequest, options?: TransportRequestOptions) => Promise> getTrainedModels: (params?: T.MlGetTrainedModelsRequest| TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions) => Promise> getTrainedModelsStats: (params?: T.MlGetTrainedModelsStatsRequest| TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions) => Promise> - inferTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + inferTrainedModelDeployment: (params: T.MlInferTrainedModelDeploymentRequest| TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise> info: (params?: T.MlInfoRequest| TB.MlInfoRequest, options?: TransportRequestOptions) => Promise> openJob: (params: T.MlOpenJobRequest| TB.MlOpenJobRequest, options?: TransportRequestOptions) => Promise> postCalendarEvents: (params: T.MlPostCalendarEventsRequest| TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions) => Promise> @@ -321,17 +321,17 @@ interface KibanaClient { putJob: (params: T.MlPutJobRequest| TB.MlPutJobRequest, options?: TransportRequestOptions) => Promise> putTrainedModel: (params: T.MlPutTrainedModelRequest| TB.MlPutTrainedModelRequest, options?: TransportRequestOptions) => Promise> putTrainedModelAlias: (params: T.MlPutTrainedModelAliasRequest| TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise> - putTrainedModelDefinitionPart: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - putTrainedModelVocabulary: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + putTrainedModelDefinitionPart: (params: T.MlPutTrainedModelDefinitionPartRequest| TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions) => Promise> + putTrainedModelVocabulary: (params: T.MlPutTrainedModelVocabularyRequest| TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions) => Promise> resetJob: (params: T.MlResetJobRequest| TB.MlResetJobRequest, options?: TransportRequestOptions) => Promise> revertModelSnapshot: (params: T.MlRevertModelSnapshotRequest| TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions) => Promise> setUpgradeMode: (params?: T.MlSetUpgradeModeRequest| TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions) => Promise> startDataFrameAnalytics: (params: T.MlStartDataFrameAnalyticsRequest| TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> startDatafeed: (params: T.MlStartDatafeedRequest| TB.MlStartDatafeedRequest, options?: TransportRequestOptions) => Promise> - startTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + startTrainedModelDeployment: (params: T.MlStartTrainedModelDeploymentRequest| TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise> stopDataFrameAnalytics: (params: T.MlStopDataFrameAnalyticsRequest| TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> stopDatafeed: (params: T.MlStopDatafeedRequest| TB.MlStopDatafeedRequest, options?: TransportRequestOptions) => Promise> - stopTrainedModelDeployment: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + stopTrainedModelDeployment: (params: T.MlStopTrainedModelDeploymentRequest| TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise> updateDataFrameAnalytics: (params: T.MlUpdateDataFrameAnalyticsRequest| TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> updateDatafeed: (params: T.MlUpdateDatafeedRequest| TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions) => Promise> updateFilter: (params: T.MlUpdateFilterRequest| TB.MlUpdateFilterRequest, options?: TransportRequestOptions) => Promise> @@ -344,8 +344,8 @@ interface KibanaClient { monitoring: { bulk: (params: T.MonitoringBulkRequest| TB.MonitoringBulkRequest, options?: TransportRequestOptions) => Promise> } - msearch: (params?: T.MsearchRequest| TB.MsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - msearchTemplate: (params?: T.MsearchTemplateRequest| TB.MsearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> + msearch: (params?: T.MsearchRequest| TB.MsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + msearchTemplate: (params?: T.MsearchTemplateRequest| TB.MsearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> mtermvectors: (params?: T.MtermvectorsRequest| TB.MtermvectorsRequest, options?: TransportRequestOptions) => Promise> nodes: { clearRepositoriesMeteringArchive: (params?: T.TODO, options?: TransportRequestOptions) => Promise> @@ -370,13 +370,13 @@ interface KibanaClient { getRollupIndexCaps: (params: T.RollupGetRollupIndexCapsRequest| TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions) => Promise> putJob: (params: T.RollupPutJobRequest| TB.RollupPutJobRequest, options?: TransportRequestOptions) => Promise> rollup: (params: T.RollupRollupRequest| TB.RollupRollupRequest, options?: TransportRequestOptions) => Promise> - rollupSearch: (params: T.RollupRollupSearchRequest| TB.RollupRollupSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + rollupSearch: (params: T.RollupRollupSearchRequest| TB.RollupRollupSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> startJob: (params: T.RollupStartJobRequest| TB.RollupStartJobRequest, options?: TransportRequestOptions) => Promise> stopJob: (params: T.RollupStopJobRequest| TB.RollupStopJobRequest, options?: TransportRequestOptions) => Promise> } scriptsPainlessExecute: (params?: T.ScriptsPainlessExecuteRequest| TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions) => Promise, TContext>> - scroll: (params?: T.ScrollRequest| TB.ScrollRequest, options?: TransportRequestOptions) => Promise, TContext>> - search: (params?: T.SearchRequest| TB.SearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + scroll: (params?: T.ScrollRequest| TB.ScrollRequest, options?: TransportRequestOptions) => Promise, TContext>> + search: (params?: T.SearchRequest| TB.SearchRequest, options?: TransportRequestOptions) => Promise, TContext>> searchMvt: (params: T.SearchMvtRequest| TB.SearchMvtRequest, options?: TransportRequestOptions) => Promise> searchShards: (params?: T.SearchShardsRequest| TB.SearchShardsRequest, options?: TransportRequestOptions) => Promise> searchTemplate: (params?: T.SearchTemplateRequest| TB.SearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> @@ -488,10 +488,11 @@ interface KibanaClient { getTransformStats: (params: T.TransformGetTransformStatsRequest| TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions) => Promise> previewTransform: (params?: T.TransformPreviewTransformRequest| TB.TransformPreviewTransformRequest, options?: TransportRequestOptions) => Promise, TContext>> putTransform: (params: T.TransformPutTransformRequest| TB.TransformPutTransformRequest, options?: TransportRequestOptions) => Promise> + resetTransform: (params: T.TransformResetTransformRequest| TB.TransformResetTransformRequest, options?: TransportRequestOptions) => Promise> startTransform: (params: T.TransformStartTransformRequest| TB.TransformStartTransformRequest, options?: TransportRequestOptions) => Promise> stopTransform: (params: T.TransformStopTransformRequest| TB.TransformStopTransformRequest, options?: TransportRequestOptions) => Promise> updateTransform: (params: T.TransformUpdateTransformRequest| TB.TransformUpdateTransformRequest, options?: TransportRequestOptions) => Promise> - upgradeTransforms: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + upgradeTransforms: (params?: T.TransformUpgradeTransformsRequest| TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions) => Promise> } update: (params: T.UpdateRequest| TB.UpdateRequest, options?: TransportRequestOptions) => Promise, TContext>> updateByQuery: (params: T.UpdateByQueryRequest| TB.UpdateByQueryRequest, options?: TransportRequestOptions) => Promise> diff --git a/src/api/types.ts b/src/api/types.ts index d4100af9c..b06fa5ce7 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -58,7 +58,6 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { index?: IndexName - type?: Type pipeline?: string refresh?: Refresh routing?: Routing @@ -87,7 +86,6 @@ export interface BulkResponseItem { result?: string _seq_no?: SequenceNumber _shards?: ShardStatistics - _type?: string _version?: VersionNumber forced_refresh?: boolean get?: InlineGet> @@ -149,7 +147,6 @@ export interface CountResponse { export interface CreateRequest extends RequestBase { id: Id index: IndexName - type?: Type pipeline?: string refresh?: Refresh routing?: Routing @@ -166,7 +163,6 @@ export interface CreateResponse extends WriteResponseBase { export interface DeleteRequest extends RequestBase { id: Id index: IndexName - type?: Type if_primary_term?: long if_seq_no?: SequenceNumber refresh?: Refresh @@ -203,12 +199,8 @@ export interface DeleteByQueryRequest extends RequestBase { scroll_size?: long search_timeout?: Time search_type?: SearchType - size?: long slices?: long sort?: string[] - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields stats?: string[] terminate_after?: long timeout?: Time @@ -273,7 +265,6 @@ export type ExistsResponse = boolean export interface ExistsSourceRequest extends RequestBase { id: Id index: IndexName - type?: Type preference?: string realtime?: boolean refresh?: boolean @@ -319,7 +310,6 @@ export interface ExplainRequest extends RequestBase { export interface ExplainResponse { _index: IndexName - _type?: Type _id: Id matched: boolean explanation?: ExplainExplanationDetail @@ -362,7 +352,6 @@ export interface GetGetResult { _routing?: string _seq_no?: SequenceNumber _source?: TDocument - _type?: Type _version?: VersionNumber } @@ -510,7 +499,6 @@ export interface MgetMultiGetError { error: ErrorCause _id: Id _index: IndexName - _type?: Type } export interface MgetOperation { @@ -519,7 +507,6 @@ export interface MgetOperation { routing?: Routing _source?: SearchSourceConfig stored_fields?: Fields - _type?: Type version?: VersionNumber version_type?: VersionType } @@ -593,7 +580,7 @@ export interface MsearchRequest extends RequestBase { export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody -export type MsearchResponse = MsearchMultiSearchResult +export type MsearchResponse> = MsearchMultiSearchResult export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase @@ -609,7 +596,7 @@ export interface MsearchTemplateRequest extends RequestBase { export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig -export type MsearchTemplateResponse = MsearchMultiSearchResult +export type MsearchTemplateResponse> = MsearchMultiSearchResult export interface MsearchTemplateTemplateConfig { explain?: boolean @@ -700,7 +687,6 @@ export interface RankEvalDocumentRating { export interface RankEvalRankEvalHit { _id: Id _index: IndexName - _type?: Type _score: double } @@ -842,7 +828,7 @@ export interface ReindexSource { remote?: ReindexRemoteSource size?: integer slice?: SlicedScroll - sort?: SearchSort + sort?: Sort _source?: Fields runtime_mappings?: MappingRuntimeFields } @@ -920,7 +906,7 @@ export interface ScrollRequest extends RequestBase { rest_total_hits_as_int?: boolean } -export interface ScrollResponse extends SearchResponse { +export interface ScrollResponse> extends SearchResponse { } export interface SearchRequest extends RequestBase { @@ -979,7 +965,7 @@ export interface SearchRequest extends RequestBase { query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] script_fields?: Record - search_after?: SearchSortResults + search_after?: SortResults slice?: SlicedScroll fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester @@ -987,14 +973,13 @@ export interface SearchRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields } -export interface SearchResponse { +export interface SearchResponse> { took: long timed_out: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: Record + aggregations?: TAggregations _clusters?: ClusterStatistics - documents?: TDocument[] fields?: Record max_score?: double num_reduce_phases?: long @@ -1093,7 +1078,6 @@ export interface SearchCompletionSuggestOption { fields?: Record _id: string _index: IndexName - _type?: Type _routing?: Routing _score: double _source: TDocument @@ -1153,18 +1137,6 @@ export interface SearchFieldCollapse { max_concurrent_group_searches?: integer } -export interface SearchFieldSort { - missing?: AggregationsMissing - mode?: SearchSortMode - nested?: SearchNestedSortValue - order?: SearchSortOrder - unmapped_type?: MappingFieldType - numeric_type?: SearchFieldSortNumericType - format?: string -} - -export type SearchFieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' - export interface SearchFieldSuggester { completion?: SearchCompletionSuggester phrase?: SearchPhraseSuggester @@ -1174,16 +1146,6 @@ export interface SearchFieldSuggester { text?: string } -export interface SearchGeoDistanceSortKeys { - mode?: SearchSortMode - distance_type?: GeoDistanceType - ignore_unmapped?: boolean - order?: SearchSortOrder - unit?: DistanceUnit -} -export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys -& { [property: string]: GeoLocation | GeoLocation[] | SearchSortMode | GeoDistanceType | boolean | SearchSortOrder | DistanceUnit } - export interface SearchHighlight { fields: Record type?: SearchHighlighterType @@ -1244,8 +1206,7 @@ export type SearchHighlighterType = SearchBuiltinHighlighterType | string export interface SearchHit { _index: IndexName _id: Id - _score?: double - _type?: Type + _score?: double | null _explanation?: ExplainExplanation fields?: Record highlight?: Record @@ -1260,13 +1221,13 @@ export interface SearchHit { _seq_no?: SequenceNumber _primary_term?: long _version?: VersionNumber - sort?: SearchSortResults + sort?: SortResults } export interface SearchHitsMetadata { - total: SearchTotalHits | long + total?: SearchTotalHits | long hits: SearchHit[] - max_score?: double + max_score?: double | null } export interface SearchInnerHits { @@ -1281,7 +1242,7 @@ export interface SearchInnerHits { script_fields?: Record seq_no_primary_term?: boolean fields?: Fields - sort?: SearchSort + sort?: Sort _source?: SearchSourceConfig stored_field?: Fields track_scores?: boolean @@ -1308,13 +1269,6 @@ export interface SearchNestedIdentity { _nested?: SearchNestedIdentity } -export interface SearchNestedSortValue { - filter?: QueryDslQueryContainer - max_children?: integer - nested?: SearchNestedSortValue - path: Field -} - export interface SearchPhraseSuggestCollate { params?: Record prune?: boolean @@ -1405,20 +1359,6 @@ export interface SearchRescoreQuery { export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' -export interface SearchScoreSort { - order?: SearchSortOrder -} - -export interface SearchScriptSort { - order?: SearchSortOrder - script: Script - type?: SearchScriptSortType - mode?: SearchSortMode - nested?: SearchNestedSortValue -} - -export type SearchScriptSortType = 'string' | 'number' - export interface SearchSearchProfile { collector: SearchCollector[] query: SearchQueryProfile[] @@ -1438,25 +1378,6 @@ export interface SearchSmoothingModelContainer { stupid_backoff?: SearchStupidBackoffSmoothingModel } -export type SearchSort = SearchSortCombinations | SearchSortCombinations[] - -export type SearchSortCombinations = Field | SearchSortOptions - -export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' - -export interface SearchSortOptionsKeys { - _score?: SearchScoreSort - _doc?: SearchScoreSort - _geo_distance?: SearchGeoDistanceSort - _script?: SearchScriptSort -} -export type SearchSortOptions = SearchSortOptionsKeys -& { [property: string]: SearchFieldSort | SearchSortOrder | SearchScoreSort | SearchGeoDistanceSort | SearchScriptSort } - -export type SearchSortOrder = 'asc' | 'desc' - -export type SearchSortResults = (long | double | string | null)[] - export type SearchSourceConfig = boolean | SearchSourceFilter | Fields export type SearchSourceConfigParam = boolean | Fields @@ -1550,7 +1471,7 @@ export interface SearchMvtRequest extends RequestBase { fields?: Fields query?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields - sort?: SearchSort + sort?: Sort } export type SearchMvtResponse = MapboxVectorTiles @@ -1667,7 +1588,6 @@ export interface TermvectorsResponse { _index: IndexName term_vectors?: Record took: long - _type?: Type _version: VersionNumber } @@ -1694,7 +1614,6 @@ export interface TermvectorsToken { export interface UpdateRequest extends RequestBase { id: Id index: IndexName - type?: Type if_primary_term?: long if_seq_no?: SequenceNumber lang?: string @@ -1731,6 +1650,7 @@ export interface UpdateByQueryRequest extends RequestBase { from?: long ignore_unavailable?: boolean lenient?: boolean + max_docs?: long pipeline?: string preference?: string refresh?: boolean @@ -1741,12 +1661,8 @@ export interface UpdateByQueryRequest extends RequestBase { scroll_size?: long search_timeout?: Time search_type?: SearchType - size?: long slices?: long sort?: string[] - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields stats?: string[] terminate_after?: long timeout?: Time @@ -1754,7 +1670,6 @@ export interface UpdateByQueryRequest extends RequestBase { version_type?: boolean wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean - max_docs?: long query?: QueryDslQueryContainer script?: Script slice?: SlicedScroll @@ -1880,7 +1795,7 @@ export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' export interface DocStats { count: long - deleted: long + deleted?: long } export interface ElasticsearchVersionInfo { @@ -1932,6 +1847,18 @@ export interface FieldSizeUsage { size_in_bytes: long } +export interface FieldSort { + missing?: AggregationsMissing + mode?: SortMode + nested?: NestedSortValue + order?: SortOrder + unmapped_type?: MappingFieldType + numeric_type?: FieldSortNumericType + format?: string +} + +export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' + export type FieldValue = long | double | string | boolean export interface FielddataStats { @@ -1954,6 +1881,16 @@ export type Fuzziness = string | integer export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds +export interface GeoDistanceSortKeys { + mode?: SortMode + distance_type?: GeoDistanceType + ignore_unmapped?: boolean + order?: SortOrder + unit?: DistanceUnit +} +export type GeoDistanceSort = GeoDistanceSortKeys +& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit } + export type GeoDistanceType = 'arc' | 'plane' export type GeoHash = string @@ -2029,6 +1966,13 @@ export interface IndexingStats { export type Indices = IndexName | IndexName[] +export interface IndicesOptions { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + ignore_throttled?: boolean +} + export interface IndicesResponseBase extends AcknowledgedResponseBase { _shards?: ShardStatistics } @@ -2096,6 +2040,13 @@ export type Names = Name | Name[] export type Namespace = string +export interface NestedSortValue { + filter?: QueryDslQueryContainer + max_children?: integer + nested?: NestedSortValue + path: Field +} + export interface NodeAttributes { attributes: Record ephemeral_id: Id @@ -2198,7 +2149,7 @@ export interface RequestCacheStats { miss_count: long } -export type Result = 'Error' | 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' +export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { bulk: long @@ -2207,6 +2158,10 @@ export interface Retries { export type Routing = string +export interface ScoreSort { + order?: SortOrder +} + export type Script = InlineScript | string | StoredScriptId export interface ScriptBase { @@ -2220,6 +2175,16 @@ export interface ScriptField { export type ScriptLanguage = BuiltinScriptLanguage | string +export interface ScriptSort { + order?: SortOrder + script: Script + type?: ScriptSortType + mode?: SortMode + nested?: NestedSortValue +} + +export type ScriptSortType = 'string' | 'number' + export interface ScriptTransform { lang: string params: Record @@ -2308,6 +2273,25 @@ export interface SlicedScroll { max: integer } +export type Sort = SortCombinations | SortCombinations[] + +export type SortCombinations = Field | SortOptions + +export type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' + +export interface SortOptionsKeys { + _score?: ScoreSort + _doc?: ScoreSort + _geo_distance?: GeoDistanceSort + _script?: ScriptSort +} +export type SortOptions = SortOptionsKeys +& { [property: string]: FieldSort | SortOrder | ScoreSort | GeoDistanceSort | ScriptSort } + +export type SortOrder = 'asc' | 'desc' + +export type SortResults = (long | double | string | null)[] + export interface StoreStats { size?: ByteSize size_in_bytes: integer @@ -2376,10 +2360,6 @@ export interface TranslogStats { export type TransportAddress = string -export type Type = string - -export type Types = Type | Type[] - export type Username = string export type Uuid = string @@ -2414,7 +2394,6 @@ export interface WriteResponseBase { result: Result _seq_no: SequenceNumber _shards: ShardStatistics - _type?: Type _version: VersionNumber forced_refresh?: boolean } @@ -2611,7 +2590,7 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati from?: integer gap_policy?: AggregationsGapPolicy size?: integer - sort?: SearchSort + sort?: Sort } export type AggregationsBuckets = Record | TBucket[] @@ -2885,7 +2864,7 @@ export interface AggregationsGeoLineAggregation { point: AggregationsGeoLinePoint sort: AggregationsGeoLineSort include_sort?: boolean - sort_order?: SearchSortOrder + sort_order?: SortOrder size?: integer } @@ -2959,8 +2938,8 @@ export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys & { [property: string]: AggregationsAggregate | string | double | long } export interface AggregationsHistogramOrder { - _count?: SearchSortOrder - _key?: SearchSortOrder + _count?: SortOrder + _key?: SortOrder } export interface AggregationsHoltLinearModelSettings { @@ -3067,11 +3046,11 @@ export interface AggregationsLongTermsAggregate extends AggregationsTermsAggrega } export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { - key: string + key: long key_as_string?: string } export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | long } +& { [property: string]: AggregationsAggregate | long | string } export interface AggregationsMatrixAggregation extends AggregationsAggregation { fields?: Fields @@ -3084,7 +3063,7 @@ export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateB } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { - mode?: AggregationsMatrixStatsMode + mode?: SortMode } export interface AggregationsMatrixStatsFields { @@ -3098,8 +3077,6 @@ export interface AggregationsMatrixStatsFields { correlation: Record } -export type AggregationsMatrixStatsMode = 'avg' | 'min' | 'max' | 'sum' | 'median' - export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { } @@ -3542,7 +3519,7 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' -export type AggregationsTermsAggregationOrder = Record | Record[] +export type AggregationsTermsAggregationOrder = Record | Record[] export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { doc_count_error?: long @@ -3574,7 +3551,7 @@ export interface AggregationsTopHitsAggregation extends AggregationsMetricAggreg highlight?: SearchHighlight script_fields?: Record size?: integer - sort?: SearchSort + sort?: Sort _source?: SearchSourceConfig stored_fields?: Fields track_scores?: boolean @@ -3593,7 +3570,7 @@ export interface AggregationsTopMetricsAggregate extends AggregationsMultiBucket export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] size?: integer - sort?: SearchSort + sort?: Sort } export interface AggregationsTopMetricsBucketKeys extends AggregationsMultiBucketBase { @@ -5051,7 +5028,6 @@ export interface QueryDslLikeDocument { doc?: any fields?: Field[] _id?: Id - _type?: Type _index?: IndexName per_field_analyzer?: Record routing?: Routing @@ -5157,11 +5133,9 @@ export interface QueryDslNestedQuery extends QueryDslQueryBase { inner_hits?: SearchInnerHits path: Field query: QueryDslQueryContainer - score_mode?: QueryDslNestedScoreMode + score_mode?: QueryDslChildScoreMode } -export type QueryDslNestedScoreMode = 'avg' | 'sum' | 'min' | 'max' | 'none' - export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { gt?: double gte?: double @@ -5271,6 +5245,7 @@ export interface QueryDslQueryContainer { terms?: QueryDslTermsQuery terms_set?: Partial> wildcard?: Partial> + wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery } @@ -5500,10 +5475,14 @@ export interface QueryDslWildcardQuery extends QueryDslQueryBase { wildcard?: string } +export interface QueryDslWrapperQuery extends QueryDslQueryBase { + query: string +} + export type QueryDslZeroTermsQuery = 'all' | 'none' -export interface AsyncSearchAsyncSearch { - aggregations?: Record +export interface AsyncSearchAsyncSearch> { + aggregations?: TAggregations _clusters?: ClusterStatistics fields?: Record hits: SearchHitsMetadata @@ -5519,8 +5498,8 @@ export interface AsyncSearchAsyncSearch { took: long } -export interface AsyncSearchAsyncSearchDocumentResponseBase extends AsyncSearchAsyncSearchResponseBase { - response: AsyncSearchAsyncSearch +export interface AsyncSearchAsyncSearchDocumentResponseBase> extends AsyncSearchAsyncSearchResponseBase { + response: AsyncSearchAsyncSearch } export interface AsyncSearchAsyncSearchResponseBase { @@ -5616,7 +5595,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] script_fields?: Record - search_after?: SearchSortResults + search_after?: SortResults slice?: SlicedScroll fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester @@ -6169,8 +6148,8 @@ export type CatMasterResponse = CatMasterMasterRecord[] export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { id?: Id - type?: Type - t?: Type + type?: string + t?: string create_time?: string ct?: string createTime?: string @@ -6540,8 +6519,8 @@ export interface CatNodesNodesRecord { v?: VersionString flavor?: string f?: string - type?: Type - t?: Type + type?: string + t?: string build?: string b?: string jdk?: string @@ -6828,8 +6807,8 @@ export interface CatPluginsPluginsRecord { v?: VersionString description?: string d?: string - type?: Type - t?: Type + type?: string + t?: string } export interface CatPluginsRequest extends CatCatRequestBase { @@ -6855,8 +6834,8 @@ export interface CatRecoveryRecoveryRecord { time?: string t?: string ti?: string - type?: Type - ty?: Type + type?: string + ty?: string stage?: string st?: string source_host?: string @@ -7244,8 +7223,8 @@ export interface CatTasksTasksRecord { ti?: Id parent_task_id?: string pti?: string - type?: Type - ty?: Type + type?: string + ty?: string start_time?: string start?: string timestamp?: string @@ -7293,7 +7272,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names - size?: CatThreadPoolThreadPoolSize + time?: Time } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -7341,8 +7320,6 @@ export interface CatThreadPoolThreadPoolRecord { ka?: string } -export type CatThreadPoolThreadPoolSize = 'k' | 'm' | 'g' | 't' | 'p' - export interface CatTransformsRequest extends CatCatRequestBase { transform_id?: Id allow_no_match?: boolean @@ -8783,7 +8760,7 @@ export interface IndicesIndexRoutingAllocation { } export interface IndicesIndexRoutingAllocationDisk { - threshold_enabled: boolean | string + threshold_enabled?: boolean | string } export interface IndicesIndexRoutingAllocationInclude { @@ -8806,8 +8783,8 @@ export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replica export interface IndicesIndexSegmentSort { field: Fields order: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] - mode?: IndicesSegmentSortMode - missing?: IndicesSegmentSortMissing + mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[] + missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[] } export interface IndicesIndexSettingBlocks { @@ -8946,6 +8923,7 @@ export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + defaults?: IndicesIndexSettings data_stream?: DataStreamName } @@ -9125,7 +9103,6 @@ export interface IndicesCloseResponse extends AcknowledgedResponseBase { export interface IndicesCreateRequest extends RequestBase { index: IndexName - include_type_name?: boolean master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards @@ -9268,17 +9245,6 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean -export interface IndicesExistsTypeRequest extends RequestBase { - index: Indices - type: Types - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean -} - -export type IndicesExistsTypeResponse = boolean - export interface IndicesFlushRequest extends RequestBase { index?: Indices allow_no_indices?: boolean @@ -9311,7 +9277,6 @@ export interface IndicesGetRequest extends RequestBase { flat_settings?: boolean ignore_unavailable?: boolean include_defaults?: boolean - include_type_name?: boolean local?: boolean master_timeout?: Time } @@ -9373,7 +9338,6 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean include_defaults?: boolean - include_type_name?: boolean local?: boolean } @@ -9410,7 +9374,6 @@ export interface IndicesGetIndexTemplateRequest extends RequestBase { name?: Name local?: boolean flat_settings?: boolean - include_type_name?: boolean master_timeout?: Time } @@ -9428,7 +9391,6 @@ export interface IndicesGetMappingRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - include_type_name?: boolean local?: boolean master_timeout?: Time } @@ -9454,7 +9416,6 @@ export interface IndicesGetSettingsResponse extends DictionaryResponseBase - export interface IndicesRolloverRequest extends RequestBase { alias: IndexAlias new_index?: IndexName dry_run?: boolean - include_type_name?: boolean master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards aliases?: Record conditions?: IndicesRolloverRolloverConditions - mappings?: IndicesRolloverIndexRolloverMapping + mappings?: MappingTypeMapping settings?: Record } @@ -9937,6 +9893,7 @@ export interface IndicesStatsIndexStats { flush?: FlushStats get?: GetStats indexing?: IndexingStats + indices?: IndicesStatsIndicesStats merges?: MergesStats query_cache?: QueryCacheStats recovery?: RecoveryStats @@ -9948,13 +9905,13 @@ export interface IndicesStatsIndexStats { translog?: TranslogStats warmer?: WarmerStats bulk?: BulkStats - shards?: IndicesStatsShardsTotalStats + shard_stats?: IndicesStatsShardsTotalStats } export interface IndicesStatsIndicesStats { - primaries: IndicesStatsIndexStats + primaries?: IndicesStatsIndexStats shards?: Record - total: IndicesStatsIndexStats + total?: IndicesStatsIndexStats uuid?: Uuid } @@ -9970,7 +9927,6 @@ export interface IndicesStatsRequest extends RequestBase { include_segment_file_sizes?: boolean include_unloaded_segments?: boolean level?: Level - types?: Types } export interface IndicesStatsResponse { @@ -10040,29 +9996,31 @@ export interface IndicesStatsShardSequenceNumber { } export interface IndicesStatsShardStats { - commit: IndicesStatsShardCommit - completion: CompletionStats - docs: DocStats - fielddata: FielddataStats - flush: FlushStats - get: GetStats - indexing: IndexingStats - merges: MergesStats - shard_path: IndicesStatsShardPath - query_cache: IndicesStatsShardQueryCache - recovery: RecoveryStats - refresh: RefreshStats - request_cache: RequestCacheStats - retention_leases: IndicesStatsShardRetentionLeases - routing: IndicesStatsShardRouting - search: SearchStats - segments: SegmentsStats - seq_no: IndicesStatsShardSequenceNumber - store: StoreStats - translog: TranslogStats - warmer: WarmerStats + commit?: IndicesStatsShardCommit + completion?: CompletionStats + docs?: DocStats + fielddata?: FielddataStats + flush?: FlushStats + get?: GetStats + indexing?: IndexingStats + merges?: MergesStats + shard_path?: IndicesStatsShardPath + query_cache?: IndicesStatsShardQueryCache + recovery?: RecoveryStats + refresh?: RefreshStats + request_cache?: RequestCacheStats + retention_leases?: IndicesStatsShardRetentionLeases + routing?: IndicesStatsShardRouting + search?: SearchStats + segments?: SegmentsStats + seq_no?: IndicesStatsShardSequenceNumber + store?: StoreStats + translog?: TranslogStats + warmer?: WarmerStats bulk?: BulkStats - shards: IndicesStatsShardsTotalStats + shards?: IndicesStatsShardsTotalStats + shard_stats?: IndicesStatsShardsTotalStats + indices?: IndicesStatsIndicesStats } export interface IndicesStatsShardsTotalStats { @@ -10133,7 +10091,6 @@ export interface IndicesValidateQueryIndicesValidationExplanation { export interface IndicesValidateQueryRequest extends RequestBase { index?: Indices - type?: Types allow_no_indices?: boolean all_shards?: boolean analyzer?: string @@ -10418,7 +10375,7 @@ export type IngestShapeType = 'geo_shape' | 'shape' export interface IngestSortProcessor extends IngestProcessorBase { field: Field - order: SearchSortOrder + order: SortOrder target_field: Field } @@ -10535,7 +10492,6 @@ export interface IngestSimulateDocumentSimulation { _parent?: string _routing?: string _source: Record - _type?: Type } export interface IngestSimulateIngest { @@ -10921,7 +10877,7 @@ export interface MlDatafeed { scroll_size?: integer delayed_data_check_config: MlDelayedDataCheckConfig runtime_mappings?: MappingRuntimeFields - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions } export interface MlDatafeedConfig { @@ -10933,7 +10889,7 @@ export interface MlDatafeedConfig { frequency?: Timestamp indexes?: string[] indices: string[] - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query: QueryDslQueryContainer @@ -10943,13 +10899,6 @@ export interface MlDatafeedConfig { scroll_size?: integer } -export interface MlDatafeedIndicesOptions { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - ignore_throttled?: boolean -} - export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { @@ -11216,6 +11165,8 @@ export interface MlDelayedDataCheckConfig { enabled: boolean } +export type MlDeploymentState = 'started' | 'starting' | 'fully_allocated' + export interface MlDetectionRule { actions?: MlRuleAction[] conditions?: MlRuleCondition[] @@ -11477,6 +11428,10 @@ export interface MlPerPartitionCategorization { stop_on_warn?: boolean } +export type MlPredictedValue = string | double + +export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' + export type MlRuleAction = 'skip_result' | 'skip_model_update' export interface MlRuleCondition { @@ -11490,6 +11445,12 @@ export interface MlTimingStats { iteration_time?: integer } +export interface MlTopClassEntry { + class_name: string + class_probability: double + class_score: double +} + export interface MlTotalFeatureImportance { feature_name: Name importance: MlTotalFeatureImportanceStatistics[] @@ -11507,6 +11468,23 @@ export interface MlTotalFeatureImportanceStatistics { min: integer } +export interface MlTrainedModelAllocation { + allocation_state: MlDeploymentState + routing_table: Record + start_time: DateString + task_parameters: MlTrainedModelAllocationTaskParameters +} + +export interface MlTrainedModelAllocationRoutingTable { + reason: string + routing_state: MlRoutingState +} + +export interface MlTrainedModelAllocationTaskParameters { + model_bytes: integer + model_id: Id +} + export interface MlTrainedModelConfig { model_id: Id model_type: MlTrainedModelType @@ -11536,6 +11514,14 @@ export interface MlTrainedModelConfigMetadata { total_feature_importance?: MlTotalFeatureImportance[] } +export interface MlTrainedModelEntities { + class_name: string + class_probability: double + entity: string + start_pos: integer + end_pos: integer +} + export interface MlTrainedModelInferenceStats { failure_count: long inference_count: long @@ -11870,8 +11856,8 @@ export interface MlGetCategoriesRequest extends RequestBase { job_id: Id category_id?: CategoryId from?: integer - size?: integer partition_field_value?: string + size?: integer page?: MlPage } @@ -12056,6 +12042,22 @@ export interface MlGetTrainedModelsStatsResponse { trained_model_stats: MlTrainedModelStats[] } +export interface MlInferTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + timeout?: Time + docs: Record[] +} + +export interface MlInferTrainedModelDeploymentResponse { + entities?: MlTrainedModelEntities[] + is_truncated?: boolean + predicted_value?: MlPredictedValue[] + predicted_value_sequence?: string + prediction_probability?: double + top_classes: MlTopClassEntry[] + warning?: string +} + export interface MlInfoAnomalyDetectors { categorization_analyzer: MlCategorizationAnalyzer categorization_examples_limit: integer @@ -12225,7 +12227,7 @@ export interface MlPutDatafeedRequest extends RequestBase { frequency?: Time indices?: string[] indexes?: string[] - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query?: QueryDslQueryContainer @@ -12243,7 +12245,7 @@ export interface MlPutDatafeedResponse { frequency: Time indices: string[] job_id: Id - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions max_empty_searches: integer query: QueryDslQueryContainer query_delay: Time @@ -12406,6 +12408,25 @@ export interface MlPutTrainedModelAliasRequest extends RequestBase { export interface MlPutTrainedModelAliasResponse extends AcknowledgedResponseBase { } +export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { + model_id: Id + part: integer + definition: string + total_definition_length: long + total_parts: integer +} + +export interface MlPutTrainedModelDefinitionPartResponse extends AcknowledgedResponseBase { +} + +export interface MlPutTrainedModelVocabularyRequest extends RequestBase { + model_id: Id + vocabulary: string[] +} + +export interface MlPutTrainedModelVocabularyResponse extends AcknowledgedResponseBase { +} + export interface MlResetJobRequest extends RequestBase { job_id: Id wait_for_completion?: boolean @@ -12453,6 +12474,19 @@ export interface MlStartDatafeedResponse { started: boolean } +export interface MlStartTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + inference_threads?: integer + model_threads?: integer + queue_capacity?: integer + timeout?: Time + wait_for?: MlDeploymentState +} + +export interface MlStartTrainedModelDeploymentResponse { + allocation: MlTrainedModelAllocation +} + export interface MlStopDataFrameAnalyticsRequest extends RequestBase { id: Id allow_no_match?: boolean @@ -12475,6 +12509,16 @@ export interface MlStopDatafeedResponse { stopped: boolean } +export interface MlStopTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + allow_no_match?: boolean + force?: boolean +} + +export interface MlStopTrainedModelDeploymentResponse { + stopped: boolean +} + export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { id: Id description?: string @@ -12509,7 +12553,7 @@ export interface MlUpdateDatafeedRequest extends RequestBase { frequency?: Time indices?: string[] indexes?: string[] - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions max_empty_searches?: integer query?: QueryDslQueryContainer query_delay?: Time @@ -12526,7 +12570,7 @@ export interface MlUpdateDatafeedResponse { frequency: Time indices: string[] job_id: Id - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions max_empty_searches: integer query: QueryDslQueryContainer query_delay: Time @@ -12650,26 +12694,100 @@ export interface MonitoringBulkResponse { } export interface NodesAdaptiveSelection { - avg_queue_size: long - avg_response_time: long - avg_response_time_ns: long - avg_service_time: string - avg_service_time_ns: long - outgoing_searches: long - rank: string + avg_queue_size?: long + avg_response_time?: long + avg_response_time_ns?: long + avg_service_time?: string + avg_service_time_ns?: long + outgoing_searches?: long + rank?: string } export interface NodesBreaker { - estimated_size: string - estimated_size_in_bytes: long - limit_size: string - limit_size_in_bytes: long - overhead: float - tripped: float + estimated_size?: string + estimated_size_in_bytes?: long + limit_size?: string + limit_size_in_bytes?: long + overhead?: float + tripped?: float +} + +export interface NodesCgroup { + cpuacct?: NodesCpuAcct + cpu?: NodesCgroupCpu + memory?: NodesCgroupMemory +} + +export interface NodesCgroupCpu { + control_group?: string + cfs_period_micros?: integer + cfs_quota_micros?: integer + stat?: NodesCgroupCpuStat +} + +export interface NodesCgroupCpuStat { + number_of_elapsed_periods?: long + number_of_times_throttled?: long + time_throttled_nanos?: long +} + +export interface NodesCgroupMemory { + control_group?: string + limit_in_bytes?: string + usage_in_bytes?: string +} + +export interface NodesClient { + id?: long + agent?: string + local_address?: string + remote_address?: string + last_uri?: string + opened_time_millis?: long + closed_time_millis?: long + last_request_time_millis?: long + request_count?: long + request_size_bytes?: long + x_opaque_id?: string +} + +export interface NodesClusterAppliedStats { + recordings?: NodesRecording[] +} + +export interface NodesClusterStateQueue { + total?: long + pending?: long + committed?: long +} + +export interface NodesClusterStateUpdate { + count?: long + computation_time?: string + computation_time_millis?: long + publication_time?: string + publication_time_millis?: long + context_construction_time?: string + context_construction_time_millis?: long + commit_time?: string + commit_time_millis?: long + completion_time?: string + completion_time_millis?: long + master_apply_time?: string + master_apply_time_millis?: long + notification_time?: string + notification_time_millis?: long +} + +export interface NodesContext { + context?: string + compilations?: long + cache_evictions?: long + compilation_limit_triggered?: long } export interface NodesCpu { - percent: integer + percent?: integer sys?: string sys_in_millis?: long total?: string @@ -12679,182 +12797,306 @@ export interface NodesCpu { load_average?: Record } +export interface NodesCpuAcct { + control_group?: string + usage_nanos?: long +} + export interface NodesDataPathStats { - available: string - available_in_bytes: long - disk_queue: string - disk_reads: long - disk_read_size: string - disk_read_size_in_bytes: long - disk_writes: long - disk_write_size: string - disk_write_size_in_bytes: long - free: string - free_in_bytes: long - mount: string - path: string - total: string - total_in_bytes: long - type: string + available?: string + available_in_bytes?: long + disk_queue?: string + disk_reads?: long + disk_read_size?: string + disk_read_size_in_bytes?: long + disk_writes?: long + disk_write_size?: string + disk_write_size_in_bytes?: long + free?: string + free_in_bytes?: long + mount?: string + path?: string + total?: string + total_in_bytes?: long + type?: string +} + +export interface NodesDiscovery { + cluster_state_queue?: NodesClusterStateQueue + published_cluster_states?: NodesPublishedClusterStates + cluster_state_update?: Record + serialized_cluster_states?: NodesSerializedClusterState + cluster_applier_stats?: NodesClusterAppliedStats } export interface NodesExtendedMemoryStats extends NodesMemoryStats { - free_percent: integer - used_percent: integer + free_percent?: integer + used_percent?: integer } export interface NodesFileSystem { - data: NodesDataPathStats[] - timestamp: long - total: NodesFileSystemTotal + data?: NodesDataPathStats[] + timestamp?: long + total?: NodesFileSystemTotal + io_stats?: NodesIoStats } export interface NodesFileSystemTotal { - available: string - available_in_bytes: long - free: string - free_in_bytes: long - total: string - total_in_bytes: long + available?: string + available_in_bytes?: long + free?: string + free_in_bytes?: long + total?: string + total_in_bytes?: long } export interface NodesGarbageCollector { - collectors: Record + collectors?: Record } export interface NodesGarbageCollectorTotal { - collection_count: long - collection_time: string - collection_time_in_millis: long + collection_count?: long + collection_time?: string + collection_time_in_millis?: long } export interface NodesHttp { - current_open: integer - total_opened: long + current_open?: integer + total_opened?: long + clients?: NodesClient[] +} + +export interface NodesIndexingPressure { + memory?: NodesIndexingPressureMemory +} + +export interface NodesIndexingPressureMemory { + limit_in_bytes?: long + current?: NodesPressureMemory + total?: NodesPressureMemory } export interface NodesIngest { - pipelines: Record - total: NodesIngestTotal + pipelines?: Record + total?: NodesIngestTotal } export interface NodesIngestTotal { - count: long - current: long - failed: long - processors: NodesKeyedProcessor[] - time_in_millis: long + count?: long + current?: long + failed?: long + processors?: Record[] + time_in_millis?: long +} + +export interface NodesIoStatDevice { + device_name?: string + operations?: long + read_kilobytes?: long + read_operations?: long + write_kilobytes?: long + write_operations?: long +} + +export interface NodesIoStats { + devices?: NodesIoStatDevice[] + total?: NodesIoStatDevice } export interface NodesJvm { - buffer_pools: Record - classes: NodesJvmClasses - gc: NodesGarbageCollector - mem: NodesMemoryStats - threads: NodesJvmThreads - timestamp: long - uptime: string - uptime_in_millis: long + buffer_pools?: Record + classes?: NodesJvmClasses + gc?: NodesGarbageCollector + mem?: NodesJvmMemoryStats + threads?: NodesJvmThreads + timestamp?: long + uptime?: string + uptime_in_millis?: long } export interface NodesJvmClasses { - current_loaded_count: long - total_loaded_count: long - total_unloaded_count: long + current_loaded_count?: long + total_loaded_count?: long + total_unloaded_count?: long +} + +export interface NodesJvmMemoryStats { + heap_used_in_bytes?: long + heap_used_percent?: long + heap_committed_in_bytes?: long + heap_max_in_bytes?: long + non_heap_used_in_bytes?: long + non_heap_committed_in_bytes?: long + pools?: Record } export interface NodesJvmThreads { - count: long - peak_count: long + count?: long + peak_count?: long } export interface NodesKeyedProcessor { - statistics: NodesProcess - type: string + stats?: NodesProcessor + type?: string } export interface NodesMemoryStats { + adjusted_total_in_bytes?: long resident?: string resident_in_bytes?: long share?: string share_in_bytes?: long total_virtual?: string total_virtual_in_bytes?: long - total_in_bytes: long - free_in_bytes: long - used_in_bytes: long + total_in_bytes?: long + free_in_bytes?: long + used_in_bytes?: long } export interface NodesNodeBufferPool { - count: long - total_capacity: string - total_capacity_in_bytes: long - used: string - used_in_bytes: long + count?: long + total_capacity?: string + total_capacity_in_bytes?: long + used?: string + used_in_bytes?: long } export interface NodesNodesResponseBase { - _nodes: NodeStatistics + _nodes?: NodeStatistics } export interface NodesOperatingSystem { - cpu: NodesCpu - mem: NodesExtendedMemoryStats - swap: NodesMemoryStats - timestamp: long + cpu?: NodesCpu + mem?: NodesExtendedMemoryStats + swap?: NodesMemoryStats + cgroup?: NodesCgroup + timestamp?: long +} + +export interface NodesPool { + used_in_bytes?: long + max_in_bytes?: long + peak_used_in_bytes?: long + peak_max_in_bytes?: long +} + +export interface NodesPressureMemory { + combined_coordinating_and_primary_in_bytes?: long + coordinating_in_bytes?: long + primary_in_bytes?: long + replica_in_bytes?: long + all_in_bytes?: long + coordinating_rejections?: long + primary_rejections?: long + replica_rejections?: long } export interface NodesProcess { - cpu: NodesCpu - mem: NodesMemoryStats - open_file_descriptors: integer - timestamp: long + cpu?: NodesCpu + mem?: NodesMemoryStats + open_file_descriptors?: integer + max_file_descriptors?: integer + timestamp?: long +} + +export interface NodesProcessor { + count?: long + current?: long + failed?: long + time_in_millis?: long +} + +export interface NodesPublishedClusterStates { + full_states?: long + incompatible_diffs?: long + compatible_diffs?: long +} + +export interface NodesRecording { + name?: string + cumulative_execution_count?: long + cumulative_execution_time?: string + cumulative_execution_time_millis?: long +} + +export interface NodesScriptCache { + cache_evictions?: long + compilation_limit_triggered?: long + compilations?: long + context?: string } export interface NodesScripting { - cache_evictions: long - compilations: long + cache_evictions?: long + compilations?: long + compilation_limit_triggered?: long + contexts?: NodesContext[] +} + +export interface NodesSerializedClusterState { + full_states?: NodesSerializedClusterStateDetail + diffs?: NodesSerializedClusterStateDetail +} + +export interface NodesSerializedClusterStateDetail { + count?: long + uncompressed_size?: string + uncompressed_size_in_bytes?: long + compressed_size?: string + compressed_size_in_bytes?: long } export interface NodesStats { - adaptive_selection: Record - breakers: Record - fs: NodesFileSystem - host: Host - http: NodesHttp - indices: IndicesStatsIndexStats - ingest: NodesIngest - ip: Ip | Ip[] - jvm: NodesJvm - name: Name - os: NodesOperatingSystem - process: NodesProcess - roles: NodeRoles - script: NodesScripting - thread_pool: Record - timestamp: long - transport: NodesTransport - transport_address: TransportAddress - attributes: Record + adaptive_selection?: Record + breakers?: Record + fs?: NodesFileSystem + host?: Host + http?: NodesHttp + ingest?: NodesIngest + ip?: Ip | Ip[] + jvm?: NodesJvm + name?: Name + os?: NodesOperatingSystem + process?: NodesProcess + roles?: NodeRoles + script?: NodesScripting + script_cache?: Record + thread_pool?: Record + timestamp?: long + transport?: NodesTransport + transport_address?: TransportAddress + attributes?: Record + discovery?: NodesDiscovery + indexing_pressure?: NodesIndexingPressure + indices?: IndicesStatsShardStats } export interface NodesThreadCount { - active: long - completed: long - largest: long - queue: long - rejected: long - threads: long + active?: long + completed?: long + largest?: long + queue?: long + rejected?: long + threads?: long } export interface NodesTransport { - rx_count: long - rx_size: string - rx_size_in_bytes: long - server_open: integer - tx_count: long - tx_size: string - tx_size_in_bytes: long + inbound_handling_time_histogram?: NodesTransportHistogram[] + outbound_handling_time_histogram?: NodesTransportHistogram[] + rx_count?: long + rx_size?: string + rx_size_in_bytes?: long + server_open?: integer + tx_count?: long + tx_size?: string + tx_size_in_bytes?: long + total_outbound_connections?: long +} + +export interface NodesTransportHistogram { + count?: long + lt_millis?: long + ge_millis?: long } export interface NodesHotThreadsHotThread { @@ -13261,7 +13503,7 @@ export interface NodesStatsRequest extends RequestBase { } export interface NodesStatsResponse extends NodesNodesResponseBase { - cluster_name: Name + cluster_name?: Name nodes: Record } @@ -13448,7 +13690,6 @@ export type RollupRollupResponse = any export interface RollupRollupSearchRequest extends RequestBase { index: Indices - type?: Type rest_total_hits_as_int?: boolean typed_keys?: boolean aggregations?: Record @@ -13457,13 +13698,13 @@ export interface RollupRollupSearchRequest extends RequestBase { size?: integer } -export interface RollupRollupSearchResponse { +export interface RollupRollupSearchResponse> { took: long timed_out: boolean terminated_early?: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: Record + aggregations?: TAggregations } export interface RollupStartJobRequest extends RequestBase { @@ -13613,13 +13854,19 @@ export interface SecurityUser { enabled: boolean } +export interface SecurityAuthenticateApiKey { + id: string + name: Name +} + export interface SecurityAuthenticateRequest extends RequestBase { } export interface SecurityAuthenticateResponse { + api_key?: SecurityAuthenticateApiKey authentication_realm: SecurityRealmInfo - email?: string - full_name?: Name + email?: string | null + full_name?: Name | null lookup_realm: SecurityRealmInfo metadata: Metadata roles: string[] @@ -13715,6 +13962,7 @@ export interface SecurityCreateApiKeyResponse { expiration?: long id: Id name: Name + encoded: string } export interface SecurityCreateApiKeyRoleDescriptor { @@ -14640,11 +14888,11 @@ export interface SqlTranslateResponse { size: long _source: SearchSourceConfig fields: Record[] - sort: SearchSort + sort: Sort } export interface SslCertificatesCertificateInformation { - alias?: string + alias: string | null expiry: DateString format: string has_private_key: boolean @@ -14824,7 +15072,6 @@ export interface TransformPivot { aggregations?: Record aggs?: Record group_by?: Record - max_page_search_size?: integer } export interface TransformPivotGroupByContainer { @@ -14840,17 +15087,18 @@ export interface TransformRetentionPolicy { } export interface TransformRetentionPolicyContainer { - time: TransformRetentionPolicy + time?: TransformRetentionPolicy } export interface TransformSettings { + align_checkpoints?: boolean dates_as_epoch_millis?: boolean docs_per_second?: float max_page_search_size?: integer } export interface TransformSyncContainer { - time: TransformTimeSync + time?: TransformTimeSync } export interface TransformTimeSync { @@ -14859,8 +15107,9 @@ export interface TransformTimeSync { } export interface TransformDeleteTransformRequest extends RequestBase { - transform_id: Name + transform_id: Id force?: boolean + timeout?: Time } export interface TransformDeleteTransformResponse extends AcknowledgedResponseBase { @@ -14945,6 +15194,7 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformPreviewTransformRequest extends RequestBase { transform_id?: Id + timeout?: Time dest?: ReindexDestination description?: string frequency?: Time @@ -14964,22 +15214,32 @@ export interface TransformPreviewTransformResponse { export interface TransformPutTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean + timeout?: Time dest: ReindexDestination description?: string frequency?: Time + latest?: TransformLatest + _meta?: Record pivot?: TransformPivot - source: ReindexSource + retention_policy?: TransformRetentionPolicyContainer settings?: TransformSettings + source: ReindexSource sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer - latest?: TransformLatest } export interface TransformPutTransformResponse extends AcknowledgedResponseBase { } +export interface TransformResetTransformRequest extends RequestBase { + transform_id: Id + force?: boolean +} + +export interface TransformResetTransformResponse extends AcknowledgedResponseBase { +} + export interface TransformStartTransformRequest extends RequestBase { - transform_id: Name + transform_id: Id timeout?: Time } @@ -15001,6 +15261,7 @@ export interface TransformStopTransformResponse extends AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean + timeout?: Time dest?: ReindexDestination description?: string frequency?: Time @@ -15016,13 +15277,26 @@ export interface TransformUpdateTransformResponse { dest: ReindexDestination frequency: Time id: Id - pivot: TransformPivot + latest?: TransformLatest + pivot?: TransformPivot + retention_policy?: TransformRetentionPolicyContainer settings: TransformSettings source: ReindexSource sync?: TransformSyncContainer version: VersionString } +export interface TransformUpgradeTransformsRequest extends RequestBase { + dry_run?: boolean + timeout?: Time +} + +export interface TransformUpgradeTransformsResponse { + needs_update: integer + no_action: integer + updated: integer +} + export interface WatcherAcknowledgeState { state: WatcherAcknowledgementOptions timestamp: DateString @@ -15252,14 +15526,6 @@ export interface WatcherIndexResultSummary { index: IndexName result: Result version: VersionNumber - type?: Type -} - -export interface WatcherIndicesOptions { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - ignore_throttled?: boolean } export interface WatcherInputContainer { @@ -15364,7 +15630,7 @@ export interface WatcherSearchInputRequestBody { export interface WatcherSearchInputRequestDefinition { body?: WatcherSearchInputRequestBody indices?: IndexName[] - indices_options?: WatcherIndicesOptions + indices_options?: IndicesOptions search_type?: SearchType template?: SearchTemplateRequest rest_total_hits_as_int?: boolean @@ -15443,11 +15709,11 @@ export interface WatcherTimeOfYear { } export interface WatcherTriggerContainer { - schedule: WatcherScheduleContainer + schedule?: WatcherScheduleContainer } export interface WatcherTriggerEventContainer { - schedule: WatcherScheduleTriggerEvent + schedule?: WatcherScheduleTriggerEvent } export interface WatcherTriggerEventResult { @@ -15588,8 +15854,8 @@ export interface WatcherQueryWatchesRequest extends RequestBase { from?: integer size?: integer query?: QueryDslQueryContainer - sort?: SearchSort - search_after?: SearchSortResults + sort?: Sort + search_after?: SortResults } export interface WatcherQueryWatchesResponse { @@ -16111,10 +16377,10 @@ export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter _all: XpackUsageCounter } -export interface SpecUtilsAdditionalProperty { +export interface SpecUtilsAdditionalProperties { } -export interface SpecUtilsAdditionalProperties { +export interface SpecUtilsAdditionalProperty { } export interface SpecUtilsCommonQueryParameters { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index be3b3a16d..58c965bd9 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -58,7 +58,6 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { index?: IndexName - type?: Type pipeline?: string refresh?: Refresh routing?: Routing @@ -88,7 +87,6 @@ export interface BulkResponseItem { result?: string _seq_no?: SequenceNumber _shards?: ShardStatistics - _type?: string _version?: VersionNumber forced_refresh?: boolean get?: InlineGet> @@ -160,7 +158,6 @@ export interface CountResponse { export interface CreateRequest extends RequestBase { id: Id index: IndexName - type?: Type pipeline?: string refresh?: Refresh routing?: Routing @@ -178,7 +175,6 @@ export interface CreateResponse extends WriteResponseBase { export interface DeleteRequest extends RequestBase { id: Id index: IndexName - type?: Type if_primary_term?: long if_seq_no?: SequenceNumber refresh?: Refresh @@ -215,12 +211,8 @@ export interface DeleteByQueryRequest extends RequestBase { scroll_size?: long search_timeout?: Time search_type?: SearchType - size?: long slices?: long sort?: string[] - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields stats?: string[] terminate_after?: long timeout?: Time @@ -289,7 +281,6 @@ export type ExistsResponse = boolean export interface ExistsSourceRequest extends RequestBase { id: Id index: IndexName - type?: Type preference?: string realtime?: boolean refresh?: boolean @@ -338,7 +329,6 @@ export interface ExplainRequest extends RequestBase { export interface ExplainResponse { _index: IndexName - _type?: Type _id: Id matched: boolean explanation?: ExplainExplanationDetail @@ -384,7 +374,6 @@ export interface GetGetResult { _routing?: string _seq_no?: SequenceNumber _source?: TDocument - _type?: Type _version?: VersionNumber } @@ -536,7 +525,6 @@ export interface MgetMultiGetError { error: ErrorCause _id: Id _index: IndexName - _type?: Type } export interface MgetOperation { @@ -545,7 +533,6 @@ export interface MgetOperation { routing?: Routing _source?: SearchSourceConfig stored_fields?: Fields - _type?: Type version?: VersionNumber version_type?: VersionType } @@ -623,7 +610,7 @@ export interface MsearchRequest extends RequestBase { export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody -export type MsearchResponse = MsearchMultiSearchResult +export type MsearchResponse> = MsearchMultiSearchResult export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase @@ -640,7 +627,7 @@ export interface MsearchTemplateRequest extends RequestBase { export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig -export type MsearchTemplateResponse = MsearchMultiSearchResult +export type MsearchTemplateResponse> = MsearchMultiSearchResult export interface MsearchTemplateTemplateConfig { explain?: boolean @@ -738,7 +725,6 @@ export interface RankEvalDocumentRating { export interface RankEvalRankEvalHit { _id: Id _index: IndexName - _type?: Type _score: double } @@ -886,7 +872,7 @@ export interface ReindexSource { remote?: ReindexRemoteSource size?: integer slice?: SlicedScroll - sort?: SearchSort + sort?: Sort _source?: Fields runtime_mappings?: MappingRuntimeFields } @@ -975,7 +961,7 @@ export interface ScrollRequest extends RequestBase { } } -export interface ScrollResponse extends SearchResponse { +export interface ScrollResponse> extends SearchResponse { } export interface SearchRequest extends RequestBase { @@ -1040,10 +1026,10 @@ export interface SearchRequest extends RequestBase { query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] script_fields?: Record - search_after?: SearchSortResults + search_after?: SortResults size?: integer slice?: SlicedScroll - sort?: SearchSort + sort?: Sort _source?: SearchSourceConfig fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester @@ -1059,14 +1045,13 @@ export interface SearchRequest extends RequestBase { } } -export interface SearchResponse { +export interface SearchResponse> { took: long timed_out: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: Record + aggregations?: TAggregations _clusters?: ClusterStatistics - documents?: TDocument[] fields?: Record max_score?: double num_reduce_phases?: long @@ -1165,7 +1150,6 @@ export interface SearchCompletionSuggestOption { fields?: Record _id: string _index: IndexName - _type?: Type _routing?: Routing _score: double _source: TDocument @@ -1225,18 +1209,6 @@ export interface SearchFieldCollapse { max_concurrent_group_searches?: integer } -export interface SearchFieldSort { - missing?: AggregationsMissing - mode?: SearchSortMode - nested?: SearchNestedSortValue - order?: SearchSortOrder - unmapped_type?: MappingFieldType - numeric_type?: SearchFieldSortNumericType - format?: string -} - -export type SearchFieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' - export interface SearchFieldSuggester { completion?: SearchCompletionSuggester phrase?: SearchPhraseSuggester @@ -1246,16 +1218,6 @@ export interface SearchFieldSuggester { text?: string } -export interface SearchGeoDistanceSortKeys { - mode?: SearchSortMode - distance_type?: GeoDistanceType - ignore_unmapped?: boolean - order?: SearchSortOrder - unit?: DistanceUnit -} -export type SearchGeoDistanceSort = SearchGeoDistanceSortKeys -& { [property: string]: GeoLocation | GeoLocation[] | SearchSortMode | GeoDistanceType | boolean | SearchSortOrder | DistanceUnit } - export interface SearchHighlight { fields: Record type?: SearchHighlighterType @@ -1316,8 +1278,7 @@ export type SearchHighlighterType = SearchBuiltinHighlighterType | string export interface SearchHit { _index: IndexName _id: Id - _score?: double - _type?: Type + _score?: double | null _explanation?: ExplainExplanation fields?: Record highlight?: Record @@ -1332,13 +1293,13 @@ export interface SearchHit { _seq_no?: SequenceNumber _primary_term?: long _version?: VersionNumber - sort?: SearchSortResults + sort?: SortResults } export interface SearchHitsMetadata { - total: SearchTotalHits | long + total?: SearchTotalHits | long hits: SearchHit[] - max_score?: double + max_score?: double | null } export interface SearchInnerHits { @@ -1353,7 +1314,7 @@ export interface SearchInnerHits { script_fields?: Record seq_no_primary_term?: boolean fields?: Fields - sort?: SearchSort + sort?: Sort _source?: SearchSourceConfig stored_field?: Fields track_scores?: boolean @@ -1380,13 +1341,6 @@ export interface SearchNestedIdentity { _nested?: SearchNestedIdentity } -export interface SearchNestedSortValue { - filter?: QueryDslQueryContainer - max_children?: integer - nested?: SearchNestedSortValue - path: Field -} - export interface SearchPhraseSuggestCollate { params?: Record prune?: boolean @@ -1477,20 +1431,6 @@ export interface SearchRescoreQuery { export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' -export interface SearchScoreSort { - order?: SearchSortOrder -} - -export interface SearchScriptSort { - order?: SearchSortOrder - script: Script - type?: SearchScriptSortType - mode?: SearchSortMode - nested?: SearchNestedSortValue -} - -export type SearchScriptSortType = 'string' | 'number' - export interface SearchSearchProfile { collector: SearchCollector[] query: SearchQueryProfile[] @@ -1510,25 +1450,6 @@ export interface SearchSmoothingModelContainer { stupid_backoff?: SearchStupidBackoffSmoothingModel } -export type SearchSort = SearchSortCombinations | SearchSortCombinations[] - -export type SearchSortCombinations = Field | SearchSortOptions - -export type SearchSortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' - -export interface SearchSortOptionsKeys { - _score?: SearchScoreSort - _doc?: SearchScoreSort - _geo_distance?: SearchGeoDistanceSort - _script?: SearchScriptSort -} -export type SearchSortOptions = SearchSortOptionsKeys -& { [property: string]: SearchFieldSort | SearchSortOrder | SearchScoreSort | SearchGeoDistanceSort | SearchScriptSort } - -export type SearchSortOrder = 'asc' | 'desc' - -export type SearchSortResults = (long | double | string | null)[] - export type SearchSourceConfig = boolean | SearchSourceFilter | Fields export type SearchSourceConfigParam = boolean | Fields @@ -1629,7 +1550,7 @@ export interface SearchMvtRequest extends RequestBase { query?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields size?: integer - sort?: SearchSort + sort?: Sort } } @@ -1758,7 +1679,6 @@ export interface TermvectorsResponse { _index: IndexName term_vectors?: Record took: long - _type?: Type _version: VersionNumber } @@ -1785,7 +1705,6 @@ export interface TermvectorsToken { export interface UpdateRequest extends RequestBase { id: Id index: IndexName - type?: Type if_primary_term?: long if_seq_no?: SequenceNumber lang?: string @@ -1826,6 +1745,7 @@ export interface UpdateByQueryRequest extends RequestBase { from?: long ignore_unavailable?: boolean lenient?: boolean + max_docs?: long pipeline?: string preference?: string refresh?: boolean @@ -1836,12 +1756,8 @@ export interface UpdateByQueryRequest extends RequestBase { scroll_size?: long search_timeout?: Time search_type?: SearchType - size?: long slices?: long sort?: string[] - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields stats?: string[] terminate_after?: long timeout?: Time @@ -1979,7 +1895,7 @@ export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' export interface DocStats { count: long - deleted: long + deleted?: long } export interface ElasticsearchVersionInfo { @@ -2031,6 +1947,18 @@ export interface FieldSizeUsage { size_in_bytes: long } +export interface FieldSort { + missing?: AggregationsMissing + mode?: SortMode + nested?: NestedSortValue + order?: SortOrder + unmapped_type?: MappingFieldType + numeric_type?: FieldSortNumericType + format?: string +} + +export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' + export type FieldValue = long | double | string | boolean export interface FielddataStats { @@ -2053,6 +1981,16 @@ export type Fuzziness = string | integer export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds +export interface GeoDistanceSortKeys { + mode?: SortMode + distance_type?: GeoDistanceType + ignore_unmapped?: boolean + order?: SortOrder + unit?: DistanceUnit +} +export type GeoDistanceSort = GeoDistanceSortKeys +& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit } + export type GeoDistanceType = 'arc' | 'plane' export type GeoHash = string @@ -2128,6 +2066,13 @@ export interface IndexingStats { export type Indices = IndexName | IndexName[] +export interface IndicesOptions { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + ignore_throttled?: boolean +} + export interface IndicesResponseBase extends AcknowledgedResponseBase { _shards?: ShardStatistics } @@ -2195,6 +2140,13 @@ export type Names = Name | Name[] export type Namespace = string +export interface NestedSortValue { + filter?: QueryDslQueryContainer + max_children?: integer + nested?: NestedSortValue + path: Field +} + export interface NodeAttributes { attributes: Record ephemeral_id: Id @@ -2297,7 +2249,7 @@ export interface RequestCacheStats { miss_count: long } -export type Result = 'Error' | 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' +export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { bulk: long @@ -2306,6 +2258,10 @@ export interface Retries { export type Routing = string +export interface ScoreSort { + order?: SortOrder +} + export type Script = InlineScript | string | StoredScriptId export interface ScriptBase { @@ -2319,6 +2275,16 @@ export interface ScriptField { export type ScriptLanguage = BuiltinScriptLanguage | string +export interface ScriptSort { + order?: SortOrder + script: Script + type?: ScriptSortType + mode?: SortMode + nested?: NestedSortValue +} + +export type ScriptSortType = 'string' | 'number' + export interface ScriptTransform { lang: string params: Record @@ -2407,6 +2373,25 @@ export interface SlicedScroll { max: integer } +export type Sort = SortCombinations | SortCombinations[] + +export type SortCombinations = Field | SortOptions + +export type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' + +export interface SortOptionsKeys { + _score?: ScoreSort + _doc?: ScoreSort + _geo_distance?: GeoDistanceSort + _script?: ScriptSort +} +export type SortOptions = SortOptionsKeys +& { [property: string]: FieldSort | SortOrder | ScoreSort | GeoDistanceSort | ScriptSort } + +export type SortOrder = 'asc' | 'desc' + +export type SortResults = (long | double | string | null)[] + export interface StoreStats { size?: ByteSize size_in_bytes: integer @@ -2475,10 +2460,6 @@ export interface TranslogStats { export type TransportAddress = string -export type Type = string - -export type Types = Type | Type[] - export type Username = string export type Uuid = string @@ -2513,7 +2494,6 @@ export interface WriteResponseBase { result: Result _seq_no: SequenceNumber _shards: ShardStatistics - _type?: Type _version: VersionNumber forced_refresh?: boolean } @@ -2710,7 +2690,7 @@ export interface AggregationsBucketSortAggregation extends AggregationsAggregati from?: integer gap_policy?: AggregationsGapPolicy size?: integer - sort?: SearchSort + sort?: Sort } export type AggregationsBuckets = Record | TBucket[] @@ -2984,7 +2964,7 @@ export interface AggregationsGeoLineAggregation { point: AggregationsGeoLinePoint sort: AggregationsGeoLineSort include_sort?: boolean - sort_order?: SearchSortOrder + sort_order?: SortOrder size?: integer } @@ -3058,8 +3038,8 @@ export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys & { [property: string]: AggregationsAggregate | string | double | long } export interface AggregationsHistogramOrder { - _count?: SearchSortOrder - _key?: SearchSortOrder + _count?: SortOrder + _key?: SortOrder } export interface AggregationsHoltLinearModelSettings { @@ -3166,11 +3146,11 @@ export interface AggregationsLongTermsAggregate extends AggregationsTermsAggrega } export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { - key: string + key: long key_as_string?: string } export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | long } +& { [property: string]: AggregationsAggregate | long | string } export interface AggregationsMatrixAggregation extends AggregationsAggregation { fields?: Fields @@ -3183,7 +3163,7 @@ export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateB } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { - mode?: AggregationsMatrixStatsMode + mode?: SortMode } export interface AggregationsMatrixStatsFields { @@ -3197,8 +3177,6 @@ export interface AggregationsMatrixStatsFields { correlation: Record } -export type AggregationsMatrixStatsMode = 'avg' | 'min' | 'max' | 'sum' | 'median' - export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { } @@ -3641,7 +3619,7 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' -export type AggregationsTermsAggregationOrder = Record | Record[] +export type AggregationsTermsAggregationOrder = Record | Record[] export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { doc_count_error?: long @@ -3673,7 +3651,7 @@ export interface AggregationsTopHitsAggregation extends AggregationsMetricAggreg highlight?: SearchHighlight script_fields?: Record size?: integer - sort?: SearchSort + sort?: Sort _source?: SearchSourceConfig stored_fields?: Fields track_scores?: boolean @@ -3692,7 +3670,7 @@ export interface AggregationsTopMetricsAggregate extends AggregationsMultiBucket export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] size?: integer - sort?: SearchSort + sort?: Sort } export interface AggregationsTopMetricsBucketKeys extends AggregationsMultiBucketBase { @@ -5150,7 +5128,6 @@ export interface QueryDslLikeDocument { doc?: any fields?: Field[] _id?: Id - _type?: Type _index?: IndexName per_field_analyzer?: Record routing?: Routing @@ -5256,11 +5233,9 @@ export interface QueryDslNestedQuery extends QueryDslQueryBase { inner_hits?: SearchInnerHits path: Field query: QueryDslQueryContainer - score_mode?: QueryDslNestedScoreMode + score_mode?: QueryDslChildScoreMode } -export type QueryDslNestedScoreMode = 'avg' | 'sum' | 'min' | 'max' | 'none' - export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { gt?: double gte?: double @@ -5370,6 +5345,7 @@ export interface QueryDslQueryContainer { terms?: QueryDslTermsQuery terms_set?: Partial> wildcard?: Partial> + wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery } @@ -5599,10 +5575,14 @@ export interface QueryDslWildcardQuery extends QueryDslQueryBase { wildcard?: string } +export interface QueryDslWrapperQuery extends QueryDslQueryBase { + query: string +} + export type QueryDslZeroTermsQuery = 'all' | 'none' -export interface AsyncSearchAsyncSearch { - aggregations?: Record +export interface AsyncSearchAsyncSearch> { + aggregations?: TAggregations _clusters?: ClusterStatistics fields?: Record hits: SearchHitsMetadata @@ -5618,8 +5598,8 @@ export interface AsyncSearchAsyncSearch { took: long } -export interface AsyncSearchAsyncSearchDocumentResponseBase extends AsyncSearchAsyncSearchResponseBase { - response: AsyncSearchAsyncSearch +export interface AsyncSearchAsyncSearchDocumentResponseBase> extends AsyncSearchAsyncSearchResponseBase { + response: AsyncSearchAsyncSearch } export interface AsyncSearchAsyncSearchResponseBase { @@ -5721,10 +5701,10 @@ export interface AsyncSearchSubmitRequest extends RequestBase { query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] script_fields?: Record - search_after?: SearchSortResults + search_after?: SortResults size?: integer slice?: SlicedScroll - sort?: SearchSort + sort?: Sort _source?: SearchSourceConfig fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester @@ -6286,8 +6266,8 @@ export type CatMasterResponse = CatMasterMasterRecord[] export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { id?: Id - type?: Type - t?: Type + type?: string + t?: string create_time?: string ct?: string createTime?: string @@ -6657,8 +6637,8 @@ export interface CatNodesNodesRecord { v?: VersionString flavor?: string f?: string - type?: Type - t?: Type + type?: string + t?: string build?: string b?: string jdk?: string @@ -6945,8 +6925,8 @@ export interface CatPluginsPluginsRecord { v?: VersionString description?: string d?: string - type?: Type - t?: Type + type?: string + t?: string } export interface CatPluginsRequest extends CatCatRequestBase { @@ -6972,8 +6952,8 @@ export interface CatRecoveryRecoveryRecord { time?: string t?: string ti?: string - type?: Type - ty?: Type + type?: string + ty?: string stage?: string st?: string source_host?: string @@ -7361,8 +7341,8 @@ export interface CatTasksTasksRecord { ti?: Id parent_task_id?: string pti?: string - type?: Type - ty?: Type + type?: string + ty?: string start_time?: string start?: string timestamp?: string @@ -7410,7 +7390,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names - size?: CatThreadPoolThreadPoolSize + time?: Time } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -7458,8 +7438,6 @@ export interface CatThreadPoolThreadPoolRecord { ka?: string } -export type CatThreadPoolThreadPoolSize = 'k' | 'm' | 'g' | 't' | 'p' - export interface CatTransformsRequest extends CatCatRequestBase { transform_id?: Id allow_no_match?: boolean @@ -8942,7 +8920,7 @@ export interface IndicesIndexRoutingAllocation { } export interface IndicesIndexRoutingAllocationDisk { - threshold_enabled: boolean | string + threshold_enabled?: boolean | string } export interface IndicesIndexRoutingAllocationInclude { @@ -8965,8 +8943,8 @@ export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replica export interface IndicesIndexSegmentSort { field: Fields order: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] - mode?: IndicesSegmentSortMode - missing?: IndicesSegmentSortMissing + mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[] + missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[] } export interface IndicesIndexSettingBlocks { @@ -9105,6 +9083,7 @@ export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + defaults?: IndicesIndexSettings data_stream?: DataStreamName } @@ -9290,7 +9269,6 @@ export interface IndicesCloseResponse extends AcknowledgedResponseBase { export interface IndicesCreateRequest extends RequestBase { index: IndexName - include_type_name?: boolean master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards @@ -9436,17 +9414,6 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean -export interface IndicesExistsTypeRequest extends RequestBase { - index: Indices - type: Types - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean -} - -export type IndicesExistsTypeResponse = boolean - export interface IndicesFlushRequest extends RequestBase { index?: Indices allow_no_indices?: boolean @@ -9479,7 +9446,6 @@ export interface IndicesGetRequest extends RequestBase { flat_settings?: boolean ignore_unavailable?: boolean include_defaults?: boolean - include_type_name?: boolean local?: boolean master_timeout?: Time } @@ -9541,7 +9507,6 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean include_defaults?: boolean - include_type_name?: boolean local?: boolean } @@ -9578,7 +9543,6 @@ export interface IndicesGetIndexTemplateRequest extends RequestBase { name?: Name local?: boolean flat_settings?: boolean - include_type_name?: boolean master_timeout?: Time } @@ -9596,7 +9560,6 @@ export interface IndicesGetMappingRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - include_type_name?: boolean local?: boolean master_timeout?: Time } @@ -9622,7 +9585,6 @@ export interface IndicesGetSettingsResponse extends DictionaryResponseBase - export interface IndicesRolloverRequest extends RequestBase { alias: IndexAlias new_index?: IndexName dry_run?: boolean - include_type_name?: boolean master_timeout?: Time timeout?: Time wait_for_active_shards?: WaitForActiveShards @@ -9941,7 +9898,7 @@ export interface IndicesRolloverRequest extends RequestBase { body?: { aliases?: Record conditions?: IndicesRolloverRolloverConditions - mappings?: IndicesRolloverIndexRolloverMapping + mappings?: MappingTypeMapping settings?: Record } } @@ -10132,6 +10089,7 @@ export interface IndicesStatsIndexStats { flush?: FlushStats get?: GetStats indexing?: IndexingStats + indices?: IndicesStatsIndicesStats merges?: MergesStats query_cache?: QueryCacheStats recovery?: RecoveryStats @@ -10143,13 +10101,13 @@ export interface IndicesStatsIndexStats { translog?: TranslogStats warmer?: WarmerStats bulk?: BulkStats - shards?: IndicesStatsShardsTotalStats + shard_stats?: IndicesStatsShardsTotalStats } export interface IndicesStatsIndicesStats { - primaries: IndicesStatsIndexStats + primaries?: IndicesStatsIndexStats shards?: Record - total: IndicesStatsIndexStats + total?: IndicesStatsIndexStats uuid?: Uuid } @@ -10165,7 +10123,6 @@ export interface IndicesStatsRequest extends RequestBase { include_segment_file_sizes?: boolean include_unloaded_segments?: boolean level?: Level - types?: Types } export interface IndicesStatsResponse { @@ -10235,29 +10192,31 @@ export interface IndicesStatsShardSequenceNumber { } export interface IndicesStatsShardStats { - commit: IndicesStatsShardCommit - completion: CompletionStats - docs: DocStats - fielddata: FielddataStats - flush: FlushStats - get: GetStats - indexing: IndexingStats - merges: MergesStats - shard_path: IndicesStatsShardPath - query_cache: IndicesStatsShardQueryCache - recovery: RecoveryStats - refresh: RefreshStats - request_cache: RequestCacheStats - retention_leases: IndicesStatsShardRetentionLeases - routing: IndicesStatsShardRouting - search: SearchStats - segments: SegmentsStats - seq_no: IndicesStatsShardSequenceNumber - store: StoreStats - translog: TranslogStats - warmer: WarmerStats + commit?: IndicesStatsShardCommit + completion?: CompletionStats + docs?: DocStats + fielddata?: FielddataStats + flush?: FlushStats + get?: GetStats + indexing?: IndexingStats + merges?: MergesStats + shard_path?: IndicesStatsShardPath + query_cache?: IndicesStatsShardQueryCache + recovery?: RecoveryStats + refresh?: RefreshStats + request_cache?: RequestCacheStats + retention_leases?: IndicesStatsShardRetentionLeases + routing?: IndicesStatsShardRouting + search?: SearchStats + segments?: SegmentsStats + seq_no?: IndicesStatsShardSequenceNumber + store?: StoreStats + translog?: TranslogStats + warmer?: WarmerStats bulk?: BulkStats - shards: IndicesStatsShardsTotalStats + shards?: IndicesStatsShardsTotalStats + shard_stats?: IndicesStatsShardsTotalStats + indices?: IndicesStatsIndicesStats } export interface IndicesStatsShardsTotalStats { @@ -10331,7 +10290,6 @@ export interface IndicesValidateQueryIndicesValidationExplanation { export interface IndicesValidateQueryRequest extends RequestBase { index?: Indices - type?: Types allow_no_indices?: boolean all_shards?: boolean analyzer?: string @@ -10619,7 +10577,7 @@ export type IngestShapeType = 'geo_shape' | 'shape' export interface IngestSortProcessor extends IngestProcessorBase { field: Field - order: SearchSortOrder + order: SortOrder target_field: Field } @@ -10739,7 +10697,6 @@ export interface IngestSimulateDocumentSimulation { _parent?: string _routing?: string _source: Record - _type?: Type } export interface IngestSimulateIngest { @@ -11132,7 +11089,7 @@ export interface MlDatafeed { scroll_size?: integer delayed_data_check_config: MlDelayedDataCheckConfig runtime_mappings?: MappingRuntimeFields - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions } export interface MlDatafeedConfig { @@ -11144,7 +11101,7 @@ export interface MlDatafeedConfig { frequency?: Timestamp indexes?: string[] indices: string[] - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query: QueryDslQueryContainer @@ -11154,13 +11111,6 @@ export interface MlDatafeedConfig { scroll_size?: integer } -export interface MlDatafeedIndicesOptions { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - ignore_throttled?: boolean -} - export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { @@ -11427,6 +11377,8 @@ export interface MlDelayedDataCheckConfig { enabled: boolean } +export type MlDeploymentState = 'started' | 'starting' | 'fully_allocated' + export interface MlDetectionRule { actions?: MlRuleAction[] conditions?: MlRuleCondition[] @@ -11688,6 +11640,10 @@ export interface MlPerPartitionCategorization { stop_on_warn?: boolean } +export type MlPredictedValue = string | double + +export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' + export type MlRuleAction = 'skip_result' | 'skip_model_update' export interface MlRuleCondition { @@ -11701,6 +11657,12 @@ export interface MlTimingStats { iteration_time?: integer } +export interface MlTopClassEntry { + class_name: string + class_probability: double + class_score: double +} + export interface MlTotalFeatureImportance { feature_name: Name importance: MlTotalFeatureImportanceStatistics[] @@ -11718,6 +11680,23 @@ export interface MlTotalFeatureImportanceStatistics { min: integer } +export interface MlTrainedModelAllocation { + allocation_state: MlDeploymentState + routing_table: Record + start_time: DateString + task_parameters: MlTrainedModelAllocationTaskParameters +} + +export interface MlTrainedModelAllocationRoutingTable { + reason: string + routing_state: MlRoutingState +} + +export interface MlTrainedModelAllocationTaskParameters { + model_bytes: integer + model_id: Id +} + export interface MlTrainedModelConfig { model_id: Id model_type: MlTrainedModelType @@ -11747,6 +11726,14 @@ export interface MlTrainedModelConfigMetadata { total_feature_importance?: MlTotalFeatureImportance[] } +export interface MlTrainedModelEntities { + class_name: string + class_probability: double + entity: string + start_pos: integer + end_pos: integer +} + export interface MlTrainedModelInferenceStats { failure_count: long inference_count: long @@ -12128,8 +12115,8 @@ export interface MlGetCategoriesRequest extends RequestBase { job_id: Id category_id?: CategoryId from?: integer - size?: integer partition_field_value?: string + size?: integer /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { page?: MlPage @@ -12346,6 +12333,25 @@ export interface MlGetTrainedModelsStatsResponse { trained_model_stats: MlTrainedModelStats[] } +export interface MlInferTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + timeout?: Time + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + docs: Record[] + } +} + +export interface MlInferTrainedModelDeploymentResponse { + entities?: MlTrainedModelEntities[] + is_truncated?: boolean + predicted_value?: MlPredictedValue[] + predicted_value_sequence?: string + prediction_probability?: double + top_classes: MlTopClassEntry[] + warning?: string +} + export interface MlInfoAnomalyDetectors { categorization_analyzer: MlCategorizationAnalyzer categorization_examples_limit: integer @@ -12537,7 +12543,7 @@ export interface MlPutDatafeedRequest extends RequestBase { frequency?: Time indices?: string[] indexes?: string[] - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query?: QueryDslQueryContainer @@ -12556,7 +12562,7 @@ export interface MlPutDatafeedResponse { frequency: Time indices: string[] job_id: Id - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions max_empty_searches: integer query: QueryDslQueryContainer query_delay: Time @@ -12728,6 +12734,31 @@ export interface MlPutTrainedModelAliasRequest extends RequestBase { export interface MlPutTrainedModelAliasResponse extends AcknowledgedResponseBase { } +export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { + model_id: Id + part: integer + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + definition: string + total_definition_length: long + total_parts: integer + } +} + +export interface MlPutTrainedModelDefinitionPartResponse extends AcknowledgedResponseBase { +} + +export interface MlPutTrainedModelVocabularyRequest extends RequestBase { + model_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + vocabulary: string[] + } +} + +export interface MlPutTrainedModelVocabularyResponse extends AcknowledgedResponseBase { +} + export interface MlResetJobRequest extends RequestBase { job_id: Id wait_for_completion?: boolean @@ -12785,6 +12816,19 @@ export interface MlStartDatafeedResponse { started: boolean } +export interface MlStartTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + inference_threads?: integer + model_threads?: integer + queue_capacity?: integer + timeout?: Time + wait_for?: MlDeploymentState +} + +export interface MlStartTrainedModelDeploymentResponse { + allocation: MlTrainedModelAllocation +} + export interface MlStopDataFrameAnalyticsRequest extends RequestBase { id: Id allow_no_match?: boolean @@ -12813,6 +12857,16 @@ export interface MlStopDatafeedResponse { stopped: boolean } +export interface MlStopTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + allow_no_match?: boolean + force?: boolean +} + +export interface MlStopTrainedModelDeploymentResponse { + stopped: boolean +} + export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -12852,7 +12906,7 @@ export interface MlUpdateDatafeedRequest extends RequestBase { frequency?: Time indices?: string[] indexes?: string[] - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions max_empty_searches?: integer query?: QueryDslQueryContainer query_delay?: Time @@ -12870,7 +12924,7 @@ export interface MlUpdateDatafeedResponse { frequency: Time indices: string[] job_id: Id - indices_options?: MlDatafeedIndicesOptions + indices_options?: IndicesOptions max_empty_searches: integer query: QueryDslQueryContainer query_delay: Time @@ -13008,26 +13062,100 @@ export interface MonitoringBulkResponse { } export interface NodesAdaptiveSelection { - avg_queue_size: long - avg_response_time: long - avg_response_time_ns: long - avg_service_time: string - avg_service_time_ns: long - outgoing_searches: long - rank: string + avg_queue_size?: long + avg_response_time?: long + avg_response_time_ns?: long + avg_service_time?: string + avg_service_time_ns?: long + outgoing_searches?: long + rank?: string } export interface NodesBreaker { - estimated_size: string - estimated_size_in_bytes: long - limit_size: string - limit_size_in_bytes: long - overhead: float - tripped: float + estimated_size?: string + estimated_size_in_bytes?: long + limit_size?: string + limit_size_in_bytes?: long + overhead?: float + tripped?: float +} + +export interface NodesCgroup { + cpuacct?: NodesCpuAcct + cpu?: NodesCgroupCpu + memory?: NodesCgroupMemory +} + +export interface NodesCgroupCpu { + control_group?: string + cfs_period_micros?: integer + cfs_quota_micros?: integer + stat?: NodesCgroupCpuStat +} + +export interface NodesCgroupCpuStat { + number_of_elapsed_periods?: long + number_of_times_throttled?: long + time_throttled_nanos?: long +} + +export interface NodesCgroupMemory { + control_group?: string + limit_in_bytes?: string + usage_in_bytes?: string +} + +export interface NodesClient { + id?: long + agent?: string + local_address?: string + remote_address?: string + last_uri?: string + opened_time_millis?: long + closed_time_millis?: long + last_request_time_millis?: long + request_count?: long + request_size_bytes?: long + x_opaque_id?: string +} + +export interface NodesClusterAppliedStats { + recordings?: NodesRecording[] +} + +export interface NodesClusterStateQueue { + total?: long + pending?: long + committed?: long +} + +export interface NodesClusterStateUpdate { + count?: long + computation_time?: string + computation_time_millis?: long + publication_time?: string + publication_time_millis?: long + context_construction_time?: string + context_construction_time_millis?: long + commit_time?: string + commit_time_millis?: long + completion_time?: string + completion_time_millis?: long + master_apply_time?: string + master_apply_time_millis?: long + notification_time?: string + notification_time_millis?: long +} + +export interface NodesContext { + context?: string + compilations?: long + cache_evictions?: long + compilation_limit_triggered?: long } export interface NodesCpu { - percent: integer + percent?: integer sys?: string sys_in_millis?: long total?: string @@ -13037,182 +13165,306 @@ export interface NodesCpu { load_average?: Record } +export interface NodesCpuAcct { + control_group?: string + usage_nanos?: long +} + export interface NodesDataPathStats { - available: string - available_in_bytes: long - disk_queue: string - disk_reads: long - disk_read_size: string - disk_read_size_in_bytes: long - disk_writes: long - disk_write_size: string - disk_write_size_in_bytes: long - free: string - free_in_bytes: long - mount: string - path: string - total: string - total_in_bytes: long - type: string + available?: string + available_in_bytes?: long + disk_queue?: string + disk_reads?: long + disk_read_size?: string + disk_read_size_in_bytes?: long + disk_writes?: long + disk_write_size?: string + disk_write_size_in_bytes?: long + free?: string + free_in_bytes?: long + mount?: string + path?: string + total?: string + total_in_bytes?: long + type?: string +} + +export interface NodesDiscovery { + cluster_state_queue?: NodesClusterStateQueue + published_cluster_states?: NodesPublishedClusterStates + cluster_state_update?: Record + serialized_cluster_states?: NodesSerializedClusterState + cluster_applier_stats?: NodesClusterAppliedStats } export interface NodesExtendedMemoryStats extends NodesMemoryStats { - free_percent: integer - used_percent: integer + free_percent?: integer + used_percent?: integer } export interface NodesFileSystem { - data: NodesDataPathStats[] - timestamp: long - total: NodesFileSystemTotal + data?: NodesDataPathStats[] + timestamp?: long + total?: NodesFileSystemTotal + io_stats?: NodesIoStats } export interface NodesFileSystemTotal { - available: string - available_in_bytes: long - free: string - free_in_bytes: long - total: string - total_in_bytes: long + available?: string + available_in_bytes?: long + free?: string + free_in_bytes?: long + total?: string + total_in_bytes?: long } export interface NodesGarbageCollector { - collectors: Record + collectors?: Record } export interface NodesGarbageCollectorTotal { - collection_count: long - collection_time: string - collection_time_in_millis: long + collection_count?: long + collection_time?: string + collection_time_in_millis?: long } export interface NodesHttp { - current_open: integer - total_opened: long + current_open?: integer + total_opened?: long + clients?: NodesClient[] +} + +export interface NodesIndexingPressure { + memory?: NodesIndexingPressureMemory +} + +export interface NodesIndexingPressureMemory { + limit_in_bytes?: long + current?: NodesPressureMemory + total?: NodesPressureMemory } export interface NodesIngest { - pipelines: Record - total: NodesIngestTotal + pipelines?: Record + total?: NodesIngestTotal } export interface NodesIngestTotal { - count: long - current: long - failed: long - processors: NodesKeyedProcessor[] - time_in_millis: long + count?: long + current?: long + failed?: long + processors?: Record[] + time_in_millis?: long +} + +export interface NodesIoStatDevice { + device_name?: string + operations?: long + read_kilobytes?: long + read_operations?: long + write_kilobytes?: long + write_operations?: long +} + +export interface NodesIoStats { + devices?: NodesIoStatDevice[] + total?: NodesIoStatDevice } export interface NodesJvm { - buffer_pools: Record - classes: NodesJvmClasses - gc: NodesGarbageCollector - mem: NodesMemoryStats - threads: NodesJvmThreads - timestamp: long - uptime: string - uptime_in_millis: long + buffer_pools?: Record + classes?: NodesJvmClasses + gc?: NodesGarbageCollector + mem?: NodesJvmMemoryStats + threads?: NodesJvmThreads + timestamp?: long + uptime?: string + uptime_in_millis?: long } export interface NodesJvmClasses { - current_loaded_count: long - total_loaded_count: long - total_unloaded_count: long + current_loaded_count?: long + total_loaded_count?: long + total_unloaded_count?: long +} + +export interface NodesJvmMemoryStats { + heap_used_in_bytes?: long + heap_used_percent?: long + heap_committed_in_bytes?: long + heap_max_in_bytes?: long + non_heap_used_in_bytes?: long + non_heap_committed_in_bytes?: long + pools?: Record } export interface NodesJvmThreads { - count: long - peak_count: long + count?: long + peak_count?: long } export interface NodesKeyedProcessor { - statistics: NodesProcess - type: string + stats?: NodesProcessor + type?: string } export interface NodesMemoryStats { + adjusted_total_in_bytes?: long resident?: string resident_in_bytes?: long share?: string share_in_bytes?: long total_virtual?: string total_virtual_in_bytes?: long - total_in_bytes: long - free_in_bytes: long - used_in_bytes: long + total_in_bytes?: long + free_in_bytes?: long + used_in_bytes?: long } export interface NodesNodeBufferPool { - count: long - total_capacity: string - total_capacity_in_bytes: long - used: string - used_in_bytes: long + count?: long + total_capacity?: string + total_capacity_in_bytes?: long + used?: string + used_in_bytes?: long } export interface NodesNodesResponseBase { - _nodes: NodeStatistics + _nodes?: NodeStatistics } export interface NodesOperatingSystem { - cpu: NodesCpu - mem: NodesExtendedMemoryStats - swap: NodesMemoryStats - timestamp: long + cpu?: NodesCpu + mem?: NodesExtendedMemoryStats + swap?: NodesMemoryStats + cgroup?: NodesCgroup + timestamp?: long +} + +export interface NodesPool { + used_in_bytes?: long + max_in_bytes?: long + peak_used_in_bytes?: long + peak_max_in_bytes?: long +} + +export interface NodesPressureMemory { + combined_coordinating_and_primary_in_bytes?: long + coordinating_in_bytes?: long + primary_in_bytes?: long + replica_in_bytes?: long + all_in_bytes?: long + coordinating_rejections?: long + primary_rejections?: long + replica_rejections?: long } export interface NodesProcess { - cpu: NodesCpu - mem: NodesMemoryStats - open_file_descriptors: integer - timestamp: long + cpu?: NodesCpu + mem?: NodesMemoryStats + open_file_descriptors?: integer + max_file_descriptors?: integer + timestamp?: long +} + +export interface NodesProcessor { + count?: long + current?: long + failed?: long + time_in_millis?: long +} + +export interface NodesPublishedClusterStates { + full_states?: long + incompatible_diffs?: long + compatible_diffs?: long +} + +export interface NodesRecording { + name?: string + cumulative_execution_count?: long + cumulative_execution_time?: string + cumulative_execution_time_millis?: long +} + +export interface NodesScriptCache { + cache_evictions?: long + compilation_limit_triggered?: long + compilations?: long + context?: string } export interface NodesScripting { - cache_evictions: long - compilations: long + cache_evictions?: long + compilations?: long + compilation_limit_triggered?: long + contexts?: NodesContext[] +} + +export interface NodesSerializedClusterState { + full_states?: NodesSerializedClusterStateDetail + diffs?: NodesSerializedClusterStateDetail +} + +export interface NodesSerializedClusterStateDetail { + count?: long + uncompressed_size?: string + uncompressed_size_in_bytes?: long + compressed_size?: string + compressed_size_in_bytes?: long } export interface NodesStats { - adaptive_selection: Record - breakers: Record - fs: NodesFileSystem - host: Host - http: NodesHttp - indices: IndicesStatsIndexStats - ingest: NodesIngest - ip: Ip | Ip[] - jvm: NodesJvm - name: Name - os: NodesOperatingSystem - process: NodesProcess - roles: NodeRoles - script: NodesScripting - thread_pool: Record - timestamp: long - transport: NodesTransport - transport_address: TransportAddress - attributes: Record + adaptive_selection?: Record + breakers?: Record + fs?: NodesFileSystem + host?: Host + http?: NodesHttp + ingest?: NodesIngest + ip?: Ip | Ip[] + jvm?: NodesJvm + name?: Name + os?: NodesOperatingSystem + process?: NodesProcess + roles?: NodeRoles + script?: NodesScripting + script_cache?: Record + thread_pool?: Record + timestamp?: long + transport?: NodesTransport + transport_address?: TransportAddress + attributes?: Record + discovery?: NodesDiscovery + indexing_pressure?: NodesIndexingPressure + indices?: IndicesStatsShardStats } export interface NodesThreadCount { - active: long - completed: long - largest: long - queue: long - rejected: long - threads: long + active?: long + completed?: long + largest?: long + queue?: long + rejected?: long + threads?: long } export interface NodesTransport { - rx_count: long - rx_size: string - rx_size_in_bytes: long - server_open: integer - tx_count: long - tx_size: string - tx_size_in_bytes: long + inbound_handling_time_histogram?: NodesTransportHistogram[] + outbound_handling_time_histogram?: NodesTransportHistogram[] + rx_count?: long + rx_size?: string + rx_size_in_bytes?: long + server_open?: integer + tx_count?: long + tx_size?: string + tx_size_in_bytes?: long + total_outbound_connections?: long +} + +export interface NodesTransportHistogram { + count?: long + lt_millis?: long + ge_millis?: long } export interface NodesHotThreadsHotThread { @@ -13622,7 +13874,7 @@ export interface NodesStatsRequest extends RequestBase { } export interface NodesStatsResponse extends NodesNodesResponseBase { - cluster_name: Name + cluster_name?: Name nodes: Record } @@ -13813,7 +14065,6 @@ export type RollupRollupResponse = any export interface RollupRollupSearchRequest extends RequestBase { index: Indices - type?: Type rest_total_hits_as_int?: boolean typed_keys?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -13825,13 +14076,13 @@ export interface RollupRollupSearchRequest extends RequestBase { } } -export interface RollupRollupSearchResponse { +export interface RollupRollupSearchResponse> { took: long timed_out: boolean terminated_early?: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: Record + aggregations?: TAggregations } export interface RollupStartJobRequest extends RequestBase { @@ -13984,13 +14235,19 @@ export interface SecurityUser { enabled: boolean } +export interface SecurityAuthenticateApiKey { + id: string + name: Name +} + export interface SecurityAuthenticateRequest extends RequestBase { } export interface SecurityAuthenticateResponse { + api_key?: SecurityAuthenticateApiKey authentication_realm: SecurityRealmInfo - email?: string - full_name?: Name + email?: string | null + full_name?: Name | null lookup_realm: SecurityRealmInfo metadata: Metadata roles: string[] @@ -14092,6 +14349,7 @@ export interface SecurityCreateApiKeyResponse { expiration?: long id: Id name: Name + encoded: string } export interface SecurityCreateApiKeyRoleDescriptor { @@ -15067,11 +15325,11 @@ export interface SqlTranslateResponse { size: long _source: SearchSourceConfig fields: Record[] - sort: SearchSort + sort: Sort } export interface SslCertificatesCertificateInformation { - alias?: string + alias: string | null expiry: DateString format: string has_private_key: boolean @@ -15252,7 +15510,6 @@ export interface TransformPivot { aggregations?: Record aggs?: Record group_by?: Record - max_page_search_size?: integer } export interface TransformPivotGroupByContainer { @@ -15268,17 +15525,18 @@ export interface TransformRetentionPolicy { } export interface TransformRetentionPolicyContainer { - time: TransformRetentionPolicy + time?: TransformRetentionPolicy } export interface TransformSettings { + align_checkpoints?: boolean dates_as_epoch_millis?: boolean docs_per_second?: float max_page_search_size?: integer } export interface TransformSyncContainer { - time: TransformTimeSync + time?: TransformTimeSync } export interface TransformTimeSync { @@ -15287,8 +15545,9 @@ export interface TransformTimeSync { } export interface TransformDeleteTransformRequest extends RequestBase { - transform_id: Name + transform_id: Id force?: boolean + timeout?: Time } export interface TransformDeleteTransformResponse extends AcknowledgedResponseBase { @@ -15373,6 +15632,7 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformPreviewTransformRequest extends RequestBase { transform_id?: Id + timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { dest?: ReindexDestination @@ -15395,25 +15655,35 @@ export interface TransformPreviewTransformResponse { export interface TransformPutTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean + timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { dest: ReindexDestination description?: string frequency?: Time + latest?: TransformLatest + _meta?: Record pivot?: TransformPivot - source: ReindexSource + retention_policy?: TransformRetentionPolicyContainer settings?: TransformSettings + source: ReindexSource sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer - latest?: TransformLatest } } export interface TransformPutTransformResponse extends AcknowledgedResponseBase { } +export interface TransformResetTransformRequest extends RequestBase { + transform_id: Id + force?: boolean +} + +export interface TransformResetTransformResponse extends AcknowledgedResponseBase { +} + export interface TransformStartTransformRequest extends RequestBase { - transform_id: Name + transform_id: Id timeout?: Time } @@ -15435,6 +15705,7 @@ export interface TransformStopTransformResponse extends AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean + timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { dest?: ReindexDestination @@ -15453,13 +15724,26 @@ export interface TransformUpdateTransformResponse { dest: ReindexDestination frequency: Time id: Id - pivot: TransformPivot + latest?: TransformLatest + pivot?: TransformPivot + retention_policy?: TransformRetentionPolicyContainer settings: TransformSettings source: ReindexSource sync?: TransformSyncContainer version: VersionString } +export interface TransformUpgradeTransformsRequest extends RequestBase { + dry_run?: boolean + timeout?: Time +} + +export interface TransformUpgradeTransformsResponse { + needs_update: integer + no_action: integer + updated: integer +} + export interface WatcherAcknowledgeState { state: WatcherAcknowledgementOptions timestamp: DateString @@ -15689,14 +15973,6 @@ export interface WatcherIndexResultSummary { index: IndexName result: Result version: VersionNumber - type?: Type -} - -export interface WatcherIndicesOptions { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - ignore_throttled?: boolean } export interface WatcherInputContainer { @@ -15801,7 +16077,7 @@ export interface WatcherSearchInputRequestBody { export interface WatcherSearchInputRequestDefinition { body?: WatcherSearchInputRequestBody indices?: IndexName[] - indices_options?: WatcherIndicesOptions + indices_options?: IndicesOptions search_type?: SearchType template?: SearchTemplateRequest rest_total_hits_as_int?: boolean @@ -15880,11 +16156,11 @@ export interface WatcherTimeOfYear { } export interface WatcherTriggerContainer { - schedule: WatcherScheduleContainer + schedule?: WatcherScheduleContainer } export interface WatcherTriggerEventContainer { - schedule: WatcherScheduleTriggerEvent + schedule?: WatcherScheduleTriggerEvent } export interface WatcherTriggerEventResult { @@ -16033,8 +16309,8 @@ export interface WatcherQueryWatchesRequest extends RequestBase { from?: integer size?: integer query?: QueryDslQueryContainer - sort?: SearchSort - search_after?: SearchSortResults + sort?: Sort + search_after?: SortResults } } @@ -16557,10 +16833,10 @@ export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter _all: XpackUsageCounter } -export interface SpecUtilsAdditionalProperty { +export interface SpecUtilsAdditionalProperties { } -export interface SpecUtilsAdditionalProperties { +export interface SpecUtilsAdditionalProperty { } export interface SpecUtilsCommonQueryParameters { diff --git a/src/helpers.ts b/src/helpers.ts index 525977193..293c23bbe 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -37,7 +37,7 @@ export interface ScrollSearchOptions extends TransportRequestOptions { wait?: number } -export interface ScrollSearchResponse extends TransportResult, unknown> { +export interface ScrollSearchResponse extends TransportResult, unknown> { clear: () => Promise documents: TDocument[] } @@ -170,7 +170,7 @@ export default class Helpers { * @param {object} options - The client optional configuration for this request. * @return {iterator} the async iterator */ - async * scrollSearch (params: T.SearchRequest, options: ScrollSearchOptions = {}): AsyncIterable> { + async * scrollSearch (params: T.SearchRequest, options: ScrollSearchOptions = {}): AsyncIterable> { options.meta = true if (this[kMetaHeader] !== null) { options.headers = options.headers ?? {} @@ -186,9 +186,9 @@ export default class Helpers { params.scroll = params.scroll ?? '1m' appendFilterPath('_scroll_id', params, false) - let response: TransportResult, unknown> | undefined + let response: TransportResult, unknown> | undefined for (let i = 0; i <= maxRetries; i++) { - response = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) + response = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) if (response.statusCode !== 429) break await sleep(wait) } @@ -213,7 +213,7 @@ export default class Helpers { scroll_id = response.body._scroll_id // @ts-expect-error response.clear = clear - addDocumentsGetter(response) + addDocumentsGetter(response) // @ts-expect-error yield response @@ -228,7 +228,8 @@ export default class Helpers { rest_total_hits_as_int: params.rest_total_hits_as_int, scroll_id }, options as TransportRequestOptionsWithMeta) - response = r as TransportResult, unknown> + // @ts-expect-error + response = r as TransportResult, unknown> assert(response !== undefined, 'The response is undefined, please file a bug report') if (response.statusCode !== 429) break await sleep(wait) @@ -866,7 +867,7 @@ export default class Helpers { // Using a getter will improve the overall performances of the code, // as we will reed the documents only if needed. -function addDocumentsGetter (result: TransportResult, unknown>): void { +function addDocumentsGetter (result: TransportResult, unknown>): void { Object.defineProperty(result, 'documents', { get () { if (this.body.hits?.hits != null) { diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index 7d8f14fcb..80b3a1f7f 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -20,6 +20,7 @@ import { test } from 'tap' import { connection } from '../utils' import { Client } from '../..' +import * as T from '../../lib/api/types' test('Api without body key and top level body', async t => { t.plan(2) @@ -195,3 +196,107 @@ test('Using the body key with a string value', async t => { t.fail(err) } }) + +test('With generic document', async t => { + t.plan(1) + + interface Doc { + foo: string + } + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + return { + statusCode: 200, + body: { + took: 42, + hits: { + hits: [{ + _source: { foo: 'bar' } + }] + }, + aggregations: { + unique: { + buckets: [{ key: 'bar' }] + } + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.search({ + index: 'test', + allow_no_indices: true, + query: { match_all: {} }, + aggregations: { + unique: { + terms: { + field: 'foo' + } + } + } + }) + + t.equal(response.hits.hits[0]._source?.foo, 'bar') +}) + +test('With generic document and aggregation', async t => { + t.plan(2) + + interface Doc { + foo: string + } + + interface Aggregations { + unique: T.AggregationsTermsAggregateBase<{ key: string }> + } + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + return { + statusCode: 200, + body: { + took: 42, + hits: { + hits: [{ + _source: { foo: 'bar' } + }] + }, + aggregations: { + unique: { + buckets: [{ key: 'bar' }] + } + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.search({ + index: 'test', + allow_no_indices: true, + query: { match_all: {} }, + aggregations: { + unique: { + terms: { + field: 'foo' + } + } + } + }) + + t.equal(response.hits.hits[0]._source?.foo, 'bar') + t.ok(Array.isArray(response.aggregations?.unique.buckets)) +}) + From 7e84827593ec4c53b882375ddebce842315b25c4 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 16 Dec 2021 17:50:18 +0100 Subject: [PATCH 124/647] Bumped v8.1.0 --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 9905a3fca..6a15cc1a7 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.0.0-beta.1", - "versionCanary": "8.0.0-canary.37", + "version": "8.1.0-beta.1", + "versionCanary": "8.1.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 85d1968ce5f26f87cace5f67df0c1b8a07398148 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 16 Dec 2021 17:56:01 +0100 Subject: [PATCH 125/647] Updated transport dependency --- package.json | 2 +- test/utils/MockConnection.ts | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/package.json b/package.json index 6a15cc1a7..8e74947ae 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^8.0.0-beta.2", + "@elastic/transport": "^8.1.0-beta.1", "tslib": "^2.3.0" }, "tap": { diff --git a/test/utils/MockConnection.ts b/test/utils/MockConnection.ts index 3bb508fed..19af3dd54 100644 --- a/test/utils/MockConnection.ts +++ b/test/utils/MockConnection.ts @@ -24,7 +24,9 @@ import { ConnectionRequestParams, ConnectionRequestOptions, ConnectionRequestResponse, - errors + errors, + ConnectionRequestOptionsAsStream, + ConnectionRequestResponseAsStream } from '@elastic/transport' const { ConnectionError, @@ -32,7 +34,9 @@ const { } = errors export class MockConnection extends BaseConnection { - request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { return new Promise((resolve, reject) => { const body = JSON.stringify({ hello: 'world' }) const statusCode = setStatusCode(params.path) @@ -49,7 +53,9 @@ export class MockConnection extends BaseConnection { } export class MockConnectionTimeout extends BaseConnection { - request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { return new Promise((resolve, reject) => { process.nextTick(reject, new TimeoutError('Request timed out')) }) @@ -57,7 +63,9 @@ export class MockConnectionTimeout extends BaseConnection { } export class MockConnectionError extends BaseConnection { - request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { return new Promise((resolve, reject) => { process.nextTick(reject, new ConnectionError('kaboom')) }) @@ -65,7 +73,9 @@ export class MockConnectionError extends BaseConnection { } export class MockConnectionSniff extends BaseConnection { - request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { return new Promise((resolve, reject) => { const sniffResult = { nodes: { @@ -106,7 +116,9 @@ export function buildMockConnection (opts: onRequestMock) { assert(opts.onRequest, 'Missing required onRequest option') class MockConnection extends BaseConnection { - request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { return new Promise((resolve, reject) => { params.headers = { ...this.headers, ...params.headers } let { body, statusCode, headers } = opts.onRequest(params) From 3cfc31902e9adafadcea3f9eff6dbb2a81349bb5 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 16 Dec 2021 17:57:27 +0100 Subject: [PATCH 126/647] Bumped v8.1.0-canary.1 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 8e74947ae..b725c837b 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.1.0-beta.1", - "versionCanary": "8.1.0-canary.0", + "versionCanary": "8.1.0-canary.1", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From b3c24659d852a8d60819d2f5ef7f15d28c3f9cbb Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 17 Dec 2021 10:08:12 +0100 Subject: [PATCH 127/647] API generation --- src/api/kibana.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/api/kibana.ts b/src/api/kibana.ts index 226314932..fd95f34bd 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -344,8 +344,8 @@ interface KibanaClient { monitoring: { bulk: (params: T.MonitoringBulkRequest| TB.MonitoringBulkRequest, options?: TransportRequestOptions) => Promise> } - msearch: (params?: T.MsearchRequest| TB.MsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - msearchTemplate: (params?: T.MsearchTemplateRequest| TB.MsearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> + msearch: , TContext = unknown>(params?: T.MsearchRequest| TB.MsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + msearchTemplate: , TContext = unknown>(params?: T.MsearchTemplateRequest| TB.MsearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> mtermvectors: (params?: T.MtermvectorsRequest| TB.MtermvectorsRequest, options?: TransportRequestOptions) => Promise> nodes: { clearRepositoriesMeteringArchive: (params?: T.TODO, options?: TransportRequestOptions) => Promise> @@ -370,13 +370,13 @@ interface KibanaClient { getRollupIndexCaps: (params: T.RollupGetRollupIndexCapsRequest| TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions) => Promise> putJob: (params: T.RollupPutJobRequest| TB.RollupPutJobRequest, options?: TransportRequestOptions) => Promise> rollup: (params: T.RollupRollupRequest| TB.RollupRollupRequest, options?: TransportRequestOptions) => Promise> - rollupSearch: (params: T.RollupRollupSearchRequest| TB.RollupRollupSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + rollupSearch: , TContext = unknown>(params: T.RollupRollupSearchRequest| TB.RollupRollupSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> startJob: (params: T.RollupStartJobRequest| TB.RollupStartJobRequest, options?: TransportRequestOptions) => Promise> stopJob: (params: T.RollupStopJobRequest| TB.RollupStopJobRequest, options?: TransportRequestOptions) => Promise> } scriptsPainlessExecute: (params?: T.ScriptsPainlessExecuteRequest| TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions) => Promise, TContext>> - scroll: (params?: T.ScrollRequest| TB.ScrollRequest, options?: TransportRequestOptions) => Promise, TContext>> - search: (params?: T.SearchRequest| TB.SearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + scroll: , TContext = unknown>(params?: T.ScrollRequest| TB.ScrollRequest, options?: TransportRequestOptions) => Promise, TContext>> + search: , TContext = unknown>(params?: T.SearchRequest| TB.SearchRequest, options?: TransportRequestOptions) => Promise, TContext>> searchMvt: (params: T.SearchMvtRequest| TB.SearchMvtRequest, options?: TransportRequestOptions) => Promise> searchShards: (params?: T.SearchShardsRequest| TB.SearchShardsRequest, options?: TransportRequestOptions) => Promise> searchTemplate: (params?: T.SearchTemplateRequest| TB.SearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> From f718b22e9b4419b84958e5db4b031448b296898c Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 27 Dec 2021 09:27:45 +0100 Subject: [PATCH 128/647] Bumped v8.1.0-canary.2 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b725c837b..d7f97e538 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.1.0-beta.1", - "versionCanary": "8.1.0-canary.1", + "versionCanary": "8.1.0-canary.2", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 16cefd83832aa502b7cc74f7747ab70375c0261a Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 10 Jan 2022 15:52:18 +0100 Subject: [PATCH 129/647] API generation --- src/api/api/license.ts | 9 +- src/api/api/ml.ts | 8 +- src/api/api/rollup.ts | 2 +- src/api/api/search_mvt.ts | 2 +- src/api/api/security.ts | 25 +- src/api/api/transform.ts | 2 +- src/api/kibana.ts | 2 +- src/api/types.ts | 665 ++++++++++++++++++++++++----------- src/api/typesWithBodyKey.ts | 670 +++++++++++++++++++++++++----------- 9 files changed, 949 insertions(+), 436 deletions(-) diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 26c4e59e7..a4fbf34d6 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -135,10 +135,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { + async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['license', 'licenses'] const querystring: Record = {} @@ -151,7 +151,6 @@ export default class License { body = userBody != null ? { ...userBody } : undefined } - params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 0cbe94600..434d20133 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1425,7 +1425,7 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source'] + const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source', 'headers', 'version'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1459,7 +1459,7 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] + const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1561,7 +1561,7 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'tags'] + const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'tags'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -2112,7 +2112,7 @@ export default class Ml { async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_retention_days', 'results_index_name'] + const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 4e19528ed..11ad960f3 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -152,7 +152,7 @@ export default class Rollup { async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index'] + const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 23ec4c9a3..b58059daa 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -42,7 +42,7 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedBody: string[] = ['aggs', 'exact_bounds', 'extent', 'fields', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort'] + const acceptedBody: string[] = ['aggs', 'exact_bounds', 'extent', 'fields', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 78e12b4a1..553680244 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1037,19 +1037,32 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async queryApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index beef02852..27fce8f29 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -264,7 +264,7 @@ export default class Transform { async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'source', 'settings', 'sync', 'retention_policy'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/kibana.ts b/src/api/kibana.ts index fd95f34bd..9e4aff53f 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -423,7 +423,7 @@ interface KibanaClient { putRole: (params: T.SecurityPutRoleRequest| TB.SecurityPutRoleRequest, options?: TransportRequestOptions) => Promise> putRoleMapping: (params: T.SecurityPutRoleMappingRequest| TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions) => Promise> putUser: (params: T.SecurityPutUserRequest| TB.SecurityPutUserRequest, options?: TransportRequestOptions) => Promise> - queryApiKeys: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + queryApiKeys: (params?: T.SecurityQueryApiKeysRequest| TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions) => Promise> samlAuthenticate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> samlCompleteLogout: (params?: T.TODO, options?: TransportRequestOptions) => Promise> samlInvalidate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> diff --git a/src/api/types.ts b/src/api/types.ts index b06fa5ce7..4ecde761f 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -319,12 +319,16 @@ export interface ExplainResponse { export interface FieldCapsFieldCapability { aggregatable: boolean indices?: Indices - meta?: Record + meta?: Metadata non_aggregatable_indices?: Indices non_searchable_indices?: Indices searchable: boolean type: string metadata_field?: boolean + time_series_dimension?: boolean + time_series_metric?: MappingTimeSeriesMetricType + non_dimension_indices?: IndexName[] + metric_conflicts_indices?: IndexName[] } export interface FieldCapsRequest extends RequestBase { @@ -692,7 +696,7 @@ export interface RankEvalRankEvalHit { export interface RankEvalRankEvalHitItem { hit: RankEvalRankEvalHit - rating?: double + rating?: double | null } export interface RankEvalRankEvalMetric { @@ -1010,7 +1014,7 @@ export interface SearchAggregationProfile { description: string time_in_nanos: long type: string - debug?: SearchAggregationProfileDebug + debug?: SearchAggregationProfileDebug | SearchAggregationProfileDelegateDebug children?: SearchAggregationProfile[] } @@ -1135,6 +1139,7 @@ export interface SearchFieldCollapse { field: Field inner_hits?: SearchInnerHits | SearchInnerHits[] max_concurrent_group_searches?: integer + collapse?: SearchFieldCollapse } export interface SearchFieldSuggester { @@ -1214,6 +1219,7 @@ export interface SearchHit { matched_queries?: string[] _nested?: SearchNestedIdentity _ignored?: string[] + ignored_field_values?: Record _shard?: string _node?: string _routing?: string @@ -1472,6 +1478,7 @@ export interface SearchMvtRequest extends RequestBase { query?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields sort?: Sort + track_total_hits?: SearchTrackHits } export type SearchMvtResponse = MapboxVectorTiles @@ -2075,6 +2082,7 @@ export interface NodeShard { allocation_id?: Record recovery_source?: Record unassigned_info?: ClusterAllocationExplainUnassignedInformation + relocating_node?: null } export interface NodeStatistics { @@ -2317,7 +2325,7 @@ export type SuggestionName = string export type TaskId = string | integer -export type ThreadType = 'cpu' | 'wait' | 'block' +export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' export type Time = string | integer @@ -2339,9 +2347,6 @@ export interface TopRightBottomLeftGeoBounds { bottom_left: GeoLocation } -export interface Transform { -} - export interface TransformContainer { chain?: ChainTransform script?: ScriptTransform @@ -2449,7 +2454,10 @@ export interface AggregationsAggregationContainer { bucket_script?: AggregationsBucketScriptAggregation bucket_selector?: AggregationsBucketSelectorAggregation bucket_sort?: AggregationsBucketSortAggregation + bucket_count_ks_test?: AggregationsBucketKsAggregation + bucket_correlation?: AggregationsBucketCorrelationAggregation cardinality?: AggregationsCardinalityAggregation + categorize_text?: AggregationsCategorizeTextAggregation children?: AggregationsChildrenAggregation composite?: AggregationsCompositeAggregation cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation @@ -2514,9 +2522,9 @@ export interface AggregationsAggregationContainer { } export interface AggregationsAggregationRange { - from?: double | string + from?: double | string | null key?: string - to?: double | string + to?: double | string | null } export interface AggregationsArrayPercentilesItem { @@ -2574,10 +2582,38 @@ export interface AggregationsBoxplotAggregation extends AggregationsMetricAggreg export interface AggregationsBucketAggregationBase extends AggregationsAggregation { } +export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { + function: AggregationsBucketCorrelationFunction +} + +export interface AggregationsBucketCorrelationFunction { + count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation +} + +export interface AggregationsBucketCorrelationFunctionCountCorrelation { + indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator +} + +export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { + doc_count: integer + expectations: double[] + fractions?: double[] +} + +export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { + alternative?: string[] + fractions?: double[] + sampling_method?: string +} + export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { keys: string[] } +export interface AggregationsBucketPathAggregation extends AggregationsAggregation { + buckets_path?: AggregationsBucketsPath +} + export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { script?: Script } @@ -2608,6 +2644,25 @@ export interface AggregationsCardinalityAggregation extends AggregationsMetricAg rehash?: boolean } +export interface AggregationsCategorizeTextAggregation extends AggregationsAggregation { + field: Field + max_unique_tokens?: integer + max_matched_tokens?: integer + similarity_threshold?: integer + categorization_filters?: string[] + categorization_analyzer?: string | AggregationsCategorizeTextAnalyzer + shard_size?: integer + size?: integer + min_doc_count?: integer + shard_min_doc_count?: integer +} + +export interface AggregationsCategorizeTextAnalyzer { + char_filter?: string[] + tokenizer?: string + filter?: string[] +} + export interface AggregationsChiSquareHeuristic { background_is_superset: boolean include_negatives: boolean @@ -3013,9 +3068,9 @@ export interface AggregationsIpRangeAggregation extends AggregationsBucketAggreg } export interface AggregationsIpRangeAggregationRange { - from?: string + from?: string | null mask?: string - to?: string + to?: string | null } export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { @@ -3196,7 +3251,7 @@ export interface AggregationsPercentageScoreHeuristic { export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { keyed?: boolean - values?: double[] + values?: double[] | null hdr?: AggregationsHdrMethod tdigest?: AggregationsTDigest } @@ -3221,8 +3276,7 @@ export interface AggregationsPercentilesBucketAggregation extends AggregationsPi percents?: double[] } -export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { - buckets_path?: AggregationsBucketsPath +export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { format?: string gap_policy?: AggregationsGapPolicy } @@ -3236,6 +3290,7 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat ranges?: AggregationsAggregationRange[] script?: Script keyed?: boolean + format?: string } export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { @@ -3513,6 +3568,7 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat shard_size?: integer show_term_doc_count_error?: boolean size?: integer + format?: string } export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' @@ -4319,6 +4375,10 @@ export interface MappingCorePropertyBase extends MappingPropertyBase { store?: boolean } +export interface MappingDataStreamTimestamp { + enabled: boolean +} + export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { boost?: double format?: string @@ -4715,6 +4775,7 @@ export interface MappingTypeMapping { _source?: MappingSourceField runtime?: Record enabled?: boolean + _data_stream_timestamp?: MappingDataStreamTimestamp } export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { @@ -4754,7 +4815,7 @@ export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { query: string auto_generate_synonyms_phrase_query?: boolean operator?: QueryDslCombinedFieldsOperator - mimimum_should_match?: MinimumShouldMatch + minimum_should_match?: MinimumShouldMatch zero_terms_query?: QueryDslCombinedFieldsZeroTerms } @@ -4786,8 +4847,8 @@ export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { gte?: DateMath lt?: DateMath lte?: DateMath - from?: DateMath - to?: DateMath + from?: DateMath | null + to?: DateMath | null format?: DateFormat time_zone?: TimeZone } @@ -5022,6 +5083,12 @@ export interface QueryDslIntervalsWildcard { use_field?: Field } +export interface QueryDslKnnQuery extends QueryDslQueryBase { + field: Field + num_candidates: integer + query_vector: double[] +} + export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { @@ -5141,8 +5208,8 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { gte?: double lt?: double lte?: double - from?: double - to?: double + from?: double | null + to?: double | null } export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { @@ -5211,6 +5278,7 @@ export interface QueryDslQueryContainer { has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery intervals?: Partial> + knn?: QueryDslKnnQuery match?: Partial> match_all?: QueryDslMatchAllQuery match_bool_prefix?: Partial> @@ -5699,24 +5767,24 @@ export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { shards?: string s?: string - 'disk.indices'?: ByteSize - di?: ByteSize - diskIndices?: ByteSize - 'disk.used'?: ByteSize - du?: ByteSize - diskUsed?: ByteSize - 'disk.avail'?: ByteSize - da?: ByteSize - diskAvail?: ByteSize - 'disk.total'?: ByteSize - dt?: ByteSize - diskTotal?: ByteSize - 'disk.percent'?: Percentage - dp?: Percentage - diskPercent?: Percentage - host?: Host - h?: Host - ip?: Ip + 'disk.indices'?: ByteSize | null + di?: ByteSize | null + diskIndices?: ByteSize | null + 'disk.used'?: ByteSize | null + du?: ByteSize | null + diskUsed?: ByteSize | null + 'disk.avail'?: ByteSize | null + da?: ByteSize | null + diskAvail?: ByteSize | null + 'disk.total'?: ByteSize | null + dt?: ByteSize | null + diskTotal?: ByteSize | null + 'disk.percent'?: Percentage | null + dp?: Percentage | null + diskPercent?: Percentage | null + host?: Host | null + h?: Host | null + ip?: Ip | null node?: string n?: string } @@ -5849,20 +5917,20 @@ export interface CatIndicesIndicesRecord { r?: string 'shards.replica'?: string shardsReplica?: string - 'docs.count'?: string - dc?: string - docsCount?: string - 'docs.deleted'?: string - dd?: string - docsDeleted?: string + 'docs.count'?: string | null + dc?: string | null + docsCount?: string | null + 'docs.deleted'?: string | null + dd?: string | null + docsDeleted?: string | null 'creation.date'?: string cd?: string 'creation.date.string'?: string cds?: string - 'store.size'?: string - ss?: string - storeSize?: string - 'pri.store.size'?: string + 'store.size'?: string | null + ss?: string | null + storeSize?: string | null + 'pri.store.size'?: string | null 'completion.size'?: string cs?: string completionSize?: string @@ -6964,15 +7032,15 @@ export interface CatShardsShardsRecord { primaryOrReplica?: string state?: string st?: string - docs?: string - d?: string - dc?: string - store?: string - sto?: string - ip?: string + docs?: string | null + d?: string | null + dc?: string | null + store?: string | null + sto?: string | null + ip?: string | null id?: string - node?: string - n?: string + node?: string | null + n?: string | null sync_id?: string 'unassigned.reason'?: string ur?: string @@ -7264,8 +7332,8 @@ export interface CatTemplatesTemplatesRecord { order?: string o?: string p?: string - version?: VersionString - v?: VersionString + version?: VersionString | null + v?: VersionString | null composed_of?: string c?: string } @@ -7310,14 +7378,14 @@ export interface CatThreadPoolThreadPoolRecord { l?: string completed?: string c?: string - core?: string - cr?: string - max?: string - mx?: string - size?: string - sz?: string - keep_alive?: string - ka?: string + core?: string | null + cr?: string | null + max?: string | null + mx?: string | null + size?: string | null + sz?: string | null + keep_alive?: string | null + ka?: string | null } export interface CatTransformsRequest extends CatCatRequestBase { @@ -7338,14 +7406,14 @@ export interface CatTransformsTransformsRecord { documents_processed?: string docp?: string documentsProcessed?: string - checkpoint_progress?: string - cp?: string - checkpointProgress?: string - last_search_time?: string - lst?: string - lastSearchTime?: string - changes_last_detection_time?: string - cldt?: string + checkpoint_progress?: string | null + cp?: string | null + checkpointProgress?: string | null + last_search_time?: string | null + lst?: string | null + lastSearchTime?: string | null + changes_last_detection_time?: string | null + cldt?: string | null create_time?: string ct?: string createTime?: string @@ -7847,7 +7915,7 @@ export interface ClusterHealthRequest extends RequestBase { timeout?: Time wait_for_active_shards?: WaitForActiveShards wait_for_events?: WaitForEvents - wait_for_nodes?: string + wait_for_nodes?: string | integer wait_for_no_initializing_shards?: boolean wait_for_no_relocating_shards?: boolean wait_for_status?: HealthStatus @@ -7857,7 +7925,7 @@ export interface ClusterHealthResponse { active_primary_shards: integer active_shards: integer active_shards_percent_as_number: Percentage - cluster_name: string + cluster_name: Name delayed_unassigned_shards: integer indices?: Record initializing_shards: integer @@ -8032,6 +8100,7 @@ export interface ClusterRerouteRerouteParameters { } export interface ClusterRerouteResponse { + acknowledged: boolean explanations?: ClusterRerouteRerouteExplanation[] state: any } @@ -8156,6 +8225,7 @@ export interface ClusterStatsClusterNodes { plugins: PluginStats[] process: ClusterStatsClusterProcess versions: VersionString[] + indexing_pressure: ClusterStatsIndexingPressure } export interface ClusterStatsClusterOperatingSystem { @@ -8222,6 +8292,27 @@ export interface ClusterStatsFieldTypesMappings { runtime_field_types?: ClusterStatsRuntimeFieldTypes[] } +export interface ClusterStatsIndexingPressure { + memory: ClusterStatsIndexingPressureMemory +} + +export interface ClusterStatsIndexingPressureMemory { + limit_in_bytes: long + current: ClusterStatsIndexingPressureMemorySummary + total: ClusterStatsIndexingPressureMemorySummary +} + +export interface ClusterStatsIndexingPressureMemorySummary { + all_in_bytes: long + combined_coordinating_and_primary_in_bytes: long + coordinating_in_bytes: long + coordinating_rejections?: long + primary_in_bytes: long + primary_rejections?: long + replica_in_bytes: long + replica_rejections?: long +} + export interface ClusterStatsIndicesVersions { index_count: integer primary_shard_count: integer @@ -8241,6 +8332,7 @@ export interface ClusterStatsOperatingSystemMemoryInfo { total_in_bytes: long used_in_bytes: long used_percent: integer + adjusted_total_in_bytes?: long } export interface ClusterStatsRequest extends RequestBase { @@ -8577,9 +8669,20 @@ export interface GraphExploreResponse { export type IlmActions = any +export interface IlmConfigurations { + rollover?: IndicesRolloverRolloverConditions + forcemerge?: IlmForceMergeConfiguration + shrink?: IlmShrinkConfiguration +} + +export interface IlmForceMergeConfiguration { + max_num_segments: integer +} + export interface IlmPhase { actions?: IlmActions min_age?: Time + configurations?: IlmConfigurations } export interface IlmPhases { @@ -8594,8 +8697,14 @@ export interface IlmPolicy { name?: Name } +export interface IlmShrinkConfiguration { + number_of_shards: integer +} + export interface IlmDeleteLifecycleRequest extends RequestBase { name: Name + master_timeout?: Time + timeout?: Time } export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { @@ -8610,6 +8719,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { failed_step?: Name failed_step_retry_count?: integer index: IndexName + index_creation_date_millis?: EpochMillis is_auto_retryable_error?: boolean lifecycle_date_millis: EpochMillis managed: true @@ -8620,6 +8730,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { step_info?: Record step_time_millis: EpochMillis phase_execution: IlmExplainLifecycleLifecycleExplainPhaseExecution + time_since_index_creation?: Time } export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { @@ -8637,6 +8748,8 @@ export interface IlmExplainLifecycleRequest extends RequestBase { index: IndexName only_errors?: boolean only_managed?: boolean + master_timeout?: Time + timeout?: Time } export interface IlmExplainLifecycleResponse { @@ -8651,6 +8764,8 @@ export interface IlmGetLifecycleLifecycle { export interface IlmGetLifecycleRequest extends RequestBase { name?: Name + master_timeout?: Time + timeout?: Time } export interface IlmGetLifecycleResponse extends DictionaryResponseBase { @@ -8680,6 +8795,8 @@ export interface IlmMoveToStepStepKey { export interface IlmPutLifecycleRequest extends RequestBase { name: Name + master_timeout?: Time + timeout?: Time policy?: IlmPolicy } @@ -8733,9 +8850,34 @@ export interface IndicesAliasDefinition { is_write_index?: boolean routing?: string search_routing?: string + is_hidden?: boolean } export interface IndicesDataStream { + name: DataStreamName + timestamp_field: IndicesDataStreamTimestampField + indices: IndicesDataStreamIndex[] + generation: integer + template: Name + hidden: boolean + replicated?: boolean + system?: boolean + status: HealthStatus + ilm_policy?: Name + _meta?: Metadata + allow_custom_routing?: boolean +} + +export interface IndicesDataStreamIndex { + index_name: IndexName + index_uuid: Uuid +} + +export interface IndicesDataStreamTimestampField { + name: Field +} + +export interface IndicesDataStreamVisibility { hidden?: boolean } @@ -8803,6 +8945,10 @@ export interface IndicesIndexSettings { 'index.routing_path'?: string[] soft_deletes?: IndicesSoftDeletes 'index.soft_deletes'?: IndicesSoftDeletes + 'soft_deletes.enabled'?: boolean + 'index.soft_deletes.enabled'?: boolean + 'soft_deletes.retention_lease.period'?: Time + 'index.soft_deletes.retention_lease.period'?: Time sort?: IndicesIndexSegmentSort 'index.sort'?: IndicesIndexSegmentSort number_of_shards?: integer | string @@ -8817,8 +8963,6 @@ export interface IndicesIndexSettings { 'index.codec'?: string routing_partition_size?: integer 'index.routing_partition_size'?: integer - 'soft_deletes.retention_lease.period'?: Time - 'index.soft_deletes.retention_lease.period'?: Time load_fixed_bitset_filters_eagerly?: boolean 'index.load_fixed_bitset_filters_eagerly'?: boolean hidden?: boolean | string @@ -8827,6 +8971,8 @@ export interface IndicesIndexSettings { 'index.auto_expand_replicas'?: string 'merge.scheduler.max_thread_count'?: integer 'index.merge.scheduler.max_thread_count'?: integer + 'merge.scheduler.max_merge_count'?: integer + 'index.merge.scheduler.max_merge_count'?: integer 'search.idle.after'?: Time 'index.search.idle.after'?: Time refresh_interval?: Time @@ -8883,12 +9029,14 @@ export interface IndicesIndexSettings { 'index.provided_name'?: Name creation_date?: DateString 'index.creation_date'?: DateString + creation_date_string?: DateString + 'index.creation_date_string'?: DateString uuid?: Uuid 'index.uuid'?: Uuid version?: IndicesIndexVersioning 'index.version'?: IndicesIndexVersioning - verified_before_close?: boolean | string - 'index.verified_before_close'?: boolean | string + verified_before_close?: boolean + 'index.verified_before_close'?: boolean format?: string | integer 'index.format'?: string | integer max_slices_per_scroll?: integer @@ -8897,14 +9045,15 @@ export interface IndicesIndexSettings { 'index.translog.durability'?: string 'translog.flush_threshold_size'?: string 'index.translog.flush_threshold_size'?: string - 'query_string.lenient'?: boolean | string - 'index.query_string.lenient'?: boolean | string + 'query_string.lenient'?: boolean + 'index.query_string.lenient'?: boolean priority?: integer | string 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis 'index.analysis'?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings + time_series?: IndicesIndexSettingsTimeSeries } export interface IndicesIndexSettingsAnalysis { @@ -8919,6 +9068,11 @@ export interface IndicesIndexSettingsLifecycle { name: Name } +export interface IndicesIndexSettingsTimeSeries { + end_time: DateString + start_time: DateString +} + export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping @@ -8929,6 +9083,7 @@ export interface IndicesIndexState { export interface IndicesIndexVersioning { created: VersionString + created_string?: VersionString } export interface IndicesNumericFielddata { @@ -8945,6 +9100,7 @@ export type IndicesSegmentSortOrder = 'asc' | 'desc' export interface IndicesSoftDeletes { enabled: boolean + 'retention_lease.period'?: Time } export interface IndicesStringFielddata { @@ -9265,6 +9421,7 @@ export interface IndicesForcemergeRequest extends RequestBase { ignore_unavailable?: boolean max_num_segments?: long only_expunge_deletes?: boolean + wait_for_completion?: boolean } export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { @@ -9300,35 +9457,13 @@ export interface IndicesGetAliasRequest extends RequestBase { export interface IndicesGetAliasResponse extends DictionaryResponseBase { } -export interface IndicesGetDataStreamIndicesGetDataStreamItem { - name: DataStreamName - timestamp_field: IndicesGetDataStreamIndicesGetDataStreamItemTimestampField - indices: IndicesGetDataStreamIndicesGetDataStreamItemIndex[] - generation: integer - template: Name - hidden: boolean - system?: boolean - status: HealthStatus - ilm_policy?: Name - _meta?: Metadata -} - -export interface IndicesGetDataStreamIndicesGetDataStreamItemIndex { - index_name: IndexName - index_uuid: Uuid -} - -export interface IndicesGetDataStreamIndicesGetDataStreamItemTimestampField { - name: Field -} - export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards } export interface IndicesGetDataStreamResponse { - data_streams: IndicesGetDataStreamIndicesGetDataStreamItem[] + data_streams: IndicesDataStream[] } export interface IndicesGetFieldMappingRequest extends RequestBase { @@ -9473,10 +9608,11 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { export interface IndicesPutIndexTemplateRequest extends RequestBase { name: Name + create?: boolean index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStream + data_stream?: IndicesDataStreamVisibility priority?: integer version?: VersionNumber _meta?: Metadata @@ -9730,7 +9866,10 @@ export interface IndicesRolloverRolloverConditions { max_age?: Time max_docs?: long max_size?: string + max_size_bytes?: ByteSize max_primary_shard_size?: ByteSize + max_primary_shard_size_bytes?: ByteSize + max_age_millis?: EpochMillis } export interface IndicesSegmentsIndexSegment { @@ -9839,7 +9978,7 @@ export interface IndicesSimulateIndexTemplateRequest extends RequestBase { index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStream + data_stream?: IndicesDataStreamVisibility priority?: integer version?: VersionNumber _meta?: Metadata @@ -9983,7 +10122,7 @@ export interface IndicesStatsShardRetentionLeases { export interface IndicesStatsShardRouting { node: string primary: boolean - relocating_node?: string + relocating_node?: string | null state: IndicesStatsShardRoutingState } @@ -10521,12 +10660,12 @@ export interface IngestSimulateResponse { export interface LicenseLicense { expiry_date_in_millis: EpochMillis issue_date_in_millis: EpochMillis + start_date_in_millis?: EpochMillis issued_to: string issuer: string - max_nodes?: long + max_nodes?: long | null max_resource_units?: long signature: string - start_date_in_millis: EpochMillis type: LicenseLicenseType uid: string } @@ -10542,14 +10681,14 @@ export interface LicenseDeleteResponse extends AcknowledgedResponseBase { } export interface LicenseGetLicenseInformation { - expiry_date: DateString - expiry_date_in_millis: EpochMillis + expiry_date?: DateString + expiry_date_in_millis?: EpochMillis issue_date: DateString issue_date_in_millis: EpochMillis issued_to: string issuer: string - max_nodes: long - max_resource_units?: integer + max_nodes: long | null + max_resource_units?: integer | null status: LicenseLicenseStatus type: LicenseLicenseType uid: Uuid @@ -10587,7 +10726,7 @@ export interface LicensePostAcknowledgement { export interface LicensePostRequest extends RequestBase { acknowledge?: boolean license?: LicenseLicense - licenses?: LicenseLicense[] + licenses: LicenseLicense[] } export interface LicensePostResponse { @@ -10601,9 +10740,10 @@ export interface LicensePostStartBasicRequest extends RequestBase { } export interface LicensePostStartBasicResponse extends AcknowledgedResponseBase { - acknowledge: Record basic_was_started: boolean - error_message: string + error_message?: string + type?: LicenseLicenseType + acknowledge?: Record } export interface LicensePostStartTrialRequest extends RequestBase { @@ -10614,7 +10754,7 @@ export interface LicensePostStartTrialRequest extends RequestBase { export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { error_message?: string trial_was_started: boolean - type: LicenseLicenseType + type?: LicenseLicenseType } export interface LogstashPipeline { @@ -10835,18 +10975,19 @@ export type MlCustomSettings = any export interface MlDataCounts { bucket_count: long - earliest_record_timestamp: long + earliest_record_timestamp?: long empty_bucket_count: long input_bytes: long input_field_count: long input_record_count: long invalid_date_count: long job_id: Id - last_data_time: long - latest_empty_bucket_timestamp: long - latest_record_timestamp: long - latest_sparse_bucket_timestamp: long - latest_bucket_timestamp: long + last_data_time?: long + latest_empty_bucket_timestamp?: long + latest_record_timestamp?: long + latest_sparse_bucket_timestamp?: long + latest_bucket_timestamp?: long + log_time?: long missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -10899,14 +11040,20 @@ export interface MlDatafeedConfig { scroll_size?: integer } +export interface MlDatafeedRunningState { + real_time_configured: boolean + real_time_running: boolean +} + export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { - assignment_explanation: string + assignment_explanation?: string datafeed_id: Id - node: MlDiscoveryNode + node?: MlDiscoveryNode state: MlDatafeedState timing_stats: MlDatafeedTimingStats + running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { @@ -10915,7 +11062,7 @@ export interface MlDatafeedTimingStats { job_id: Id search_count: long total_search_time_ms: double - average_search_time_per_bucket_ms: number + average_search_time_per_bucket_ms?: number } export interface MlDataframeAnalysis { @@ -11259,9 +11406,9 @@ export interface MlJob { allow_lazy_open: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval: Time + background_persist_interval?: Time blocked?: MlJobBlocked - create_time: integer + create_time?: integer custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription @@ -11271,8 +11418,8 @@ export interface MlJob { finished_time?: integer groups?: string[] job_id: Id - job_type: string - job_version: VersionString + job_type?: string + job_version?: VersionString model_plot_config?: MlModelPlotConfig model_snapshot_id?: Id model_snapshot_retention_days: long @@ -11327,12 +11474,12 @@ export interface MlJobStatistics { } export interface MlJobStats { - assignment_explanation: string + assignment_explanation?: string data_counts: MlDataCounts forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node: MlDiscoveryNode + node?: MlDiscoveryNode open_time?: DateString state: MlJobState timing_stats: MlJobTimingStats @@ -11340,14 +11487,14 @@ export interface MlJobStats { } export interface MlJobTimingStats { - average_bucket_processing_time_ms: double + average_bucket_processing_time_ms?: double bucket_count: long - exponential_average_bucket_processing_time_ms: double + exponential_average_bucket_processing_time_ms?: double exponential_average_bucket_processing_time_per_hour_ms: double job_id: Id total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms: double - minimum_bucket_processing_time_ms: double + maximum_bucket_processing_time_ms?: double + minimum_bucket_processing_time_ms?: double } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' @@ -11487,7 +11634,7 @@ export interface MlTrainedModelAllocationTaskParameters { export interface MlTrainedModelConfig { model_id: Id - model_type: MlTrainedModelType + model_type?: MlTrainedModelType tags: string[] version?: VersionString compressed_definition?: string @@ -11501,6 +11648,8 @@ export interface MlTrainedModelConfig { input: MlTrainedModelConfigInput license_level?: string metadata?: MlTrainedModelConfigMetadata + model_size_bytes?: ByteSize + location?: MlTrainedModelLocation } export interface MlTrainedModelConfigInput { @@ -11530,6 +11679,14 @@ export interface MlTrainedModelInferenceStats { timestamp: Time } +export interface MlTrainedModelLocation { + index: MlTrainedModelLocationIndex +} + +export interface MlTrainedModelLocationIndex { + name: IndexName +} + export interface MlTrainedModelStats { model_id: Id pipeline_count: integer @@ -11644,6 +11801,7 @@ export interface MlDeleteModelSnapshotResponse extends AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { model_id: Id + force?: boolean } export interface MlDeleteTrainedModelResponse extends AcknowledgedResponseBase { @@ -11780,7 +11938,7 @@ export interface MlFlushJobRequest extends RequestBase { advance_time?: DateString calc_interim?: boolean end?: DateString - skip_time?: string + skip_time?: EpochMillis start?: DateString } @@ -12174,7 +12332,7 @@ export interface MlPutCalendarRequest extends RequestBase { export interface MlPutCalendarResponse { calendar_id: Id - description: string + description?: string job_ids: Ids } @@ -12199,6 +12357,8 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { max_num_threads?: integer model_memory_limit?: string source: MlDataframeAnalyticsSource + headers?: HttpHeaders + version?: VersionString } export interface MlPutDataFrameAnalyticsResponse { @@ -12225,8 +12385,8 @@ export interface MlPutDatafeedRequest extends RequestBase { chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time - indices?: string[] - indexes?: string[] + indices?: Indices + indexes?: Indices indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer @@ -12235,6 +12395,7 @@ export interface MlPutDatafeedRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer + headers?: HttpHeaders } export interface MlPutDatafeedResponse { @@ -12351,6 +12512,7 @@ export interface MlPutTrainedModelPreprocessor { export interface MlPutTrainedModelRequest extends RequestBase { model_id: Id + defer_definition_decompression?: boolean compressed_definition?: string definition?: MlPutTrainedModelDefinition description?: string @@ -12358,6 +12520,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { input: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType + model_size_bytes?: long tags?: string[] } @@ -12664,6 +12827,7 @@ export interface MlValidateRequest extends RequestBase { data_description?: MlDataDescription description?: string model_plot?: MlModelPlotConfig + model_snapshot_id?: Id model_snapshot_retention_days?: long results_index_name?: IndexName } @@ -13115,12 +13279,17 @@ export interface NodesHotThreadsRequest extends RequestBase { threads?: long timeout?: Time type?: ThreadType + sort?: ThreadType } export interface NodesHotThreadsResponse { hot_threads: NodesHotThreadsHotThread[] } +export interface NodesInfoDeprecationIndexing { + enabled: boolean | string +} + export interface NodesInfoNodeInfo { attributes: Record build_flavor: string @@ -13282,6 +13451,7 @@ export interface NodesInfoNodeInfoSettingsCluster { routing?: IndicesIndexRouting election: NodesInfoNodeInfoSettingsClusterElection initial_master_nodes?: string + deprecation_indexing?: NodesInfoDeprecationIndexing } export interface NodesInfoNodeInfoSettingsClusterElection { @@ -13383,7 +13553,7 @@ export interface NodesInfoNodeInfoXpackLicenseType { export interface NodesInfoNodeInfoXpackSecurity { http: NodesInfoNodeInfoXpackSecuritySsl enabled: string - transport: NodesInfoNodeInfoXpackSecuritySsl + transport?: NodesInfoNodeInfoXpackSecuritySsl authc?: NodesInfoNodeInfoXpackSecurityAuthc } @@ -13669,12 +13839,14 @@ export interface RollupGetRollupIndexCapsRollupJobSummaryField { export interface RollupPutJobRequest extends RequestBase { id: Id - cron?: string - groups?: RollupGroupings - index_pattern?: string + cron: string + groups: RollupGroupings + index_pattern: string metrics?: RollupFieldMetric[] - page_size?: long - rollup_index?: IndexName + page_size: integer + rollup_index: IndexName + timeout?: Time + headers?: HttpHeaders } export interface RollupPutJobResponse extends AcknowledgedResponseBase { @@ -13770,6 +13942,17 @@ export interface SearchableSnapshotsStatsResponse { total: any } +export interface SecurityApiKey { + creation?: long + expiration?: long + id: Id + invalidated?: boolean + name: Name + realm?: string + username?: Username + metadata?: Metadata +} + export interface SecurityApplicationGlobalUserPrivileges { manage: SecurityManageUserPrivileges } @@ -13807,13 +13990,13 @@ export interface SecurityGlobalPrivilege { application: SecurityApplicationGlobalUserPrivileges } -export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' +export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: string | string[] + query?: QueryDslQueryContainer allow_restricted_indices?: boolean } @@ -13846,24 +14029,19 @@ export interface SecurityRoleMappingRule { } export interface SecurityUser { - email?: string - full_name?: Name + email?: string | null + full_name?: Name | null metadata: Metadata roles: string[] username: Username enabled: boolean } -export interface SecurityAuthenticateApiKey { - id: string - name: Name -} - export interface SecurityAuthenticateRequest extends RequestBase { } export interface SecurityAuthenticateResponse { - api_key?: SecurityAuthenticateApiKey + api_key?: SecurityApiKey authentication_realm: SecurityRealmInfo email?: string | null full_name?: Name | null @@ -14054,17 +14232,6 @@ export interface SecurityEnableUserRequest extends RequestBase { export interface SecurityEnableUserResponse { } -export interface SecurityGetApiKeyApiKey { - creation: long - expiration?: long - id: Id - invalidated: boolean - name: Name - realm: string - username: Username - metadata?: Metadata -} - export interface SecurityGetApiKeyRequest extends RequestBase { id?: Id name?: Name @@ -14074,7 +14241,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { } export interface SecurityGetApiKeyResponse { - api_keys: SecurityGetApiKeyApiKey[] + api_keys: SecurityApiKey[] } export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { @@ -14094,7 +14261,7 @@ export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase { @@ -14108,6 +14275,7 @@ export interface SecurityGetRoleRole { transient_metadata: SecurityGetRoleTransientMetadata applications: SecurityApplicationPrivileges[] role_templates?: SecurityGetRoleRoleTemplate[] + global?: Record>> } export interface SecurityGetRoleRoleTemplate { @@ -14122,7 +14290,7 @@ export interface SecurityGetRoleTransientMetadata { } export interface SecurityGetRoleMappingRequest extends RequestBase { - name?: Name + name?: Names } export interface SecurityGetRoleMappingResponse extends DictionaryResponseBase { @@ -14219,6 +14387,7 @@ export interface SecurityGetUserResponse extends DictionaryResponseBase[] } -export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' - export interface SecurityGrantApiKeyRequest extends RequestBase { - api_key: SecurityGrantApiKeyApiKey + api_key: SecurityGrantApiKeyGrantApiKey grant_type: SecurityGrantApiKeyApiKeyGrantType access_token?: string username?: Username @@ -14263,12 +14432,13 @@ export type SecurityHasPrivilegesApplicationsPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { - user?: Name + user?: Name | null application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] @@ -14376,6 +14546,20 @@ export interface SecurityPutUserResponse { created: boolean } +export interface SecurityQueryApiKeysRequest extends RequestBase { + query?: QueryDslQueryContainer + from?: integer + sort?: Sort + size?: integer + search_after?: SortResults +} + +export interface SecurityQueryApiKeysResponse { + total: integer + count: integer + api_keys: SecurityApiKey[] +} + export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId } @@ -14991,8 +15175,9 @@ export interface TasksListRequest extends RequestBase { actions?: string | string[] detailed?: boolean group_by?: TasksGroupBy - nodes?: string[] + node_id?: string[] parent_task_id?: Id + master_timeout?: Time timeout?: Time wait_for_completion?: boolean } @@ -15000,7 +15185,7 @@ export interface TasksListRequest extends RequestBase { export interface TasksListResponse { node_failures?: ErrorCause[] nodes?: Record - tasks?: Record + tasks?: TasksInfo[] | Record } export interface TextStructureFindStructureFieldStat { @@ -15063,6 +15248,11 @@ export interface TextStructureFindStructureTopHit { value: any } +export interface TransformDestination { + index?: IndexName + pipeline?: string +} + export interface TransformLatest { sort: Field unique_key: Field[] @@ -15097,6 +15287,12 @@ export interface TransformSettings { max_page_search_size?: integer } +export interface TransformSource { + index: Indices + query?: QueryDslQueryContainer + runtime_mappings?: MappingRuntimeFields +} + export interface TransformSyncContainer { time?: TransformTimeSync } @@ -15125,24 +15321,40 @@ export interface TransformGetTransformRequest extends RequestBase { export interface TransformGetTransformResponse { count: long - transforms: Transform[] + transforms: TransformGetTransformTransformSummary[] +} + +export interface TransformGetTransformTransformSummary { + dest: ReindexDestination + description?: string + frequency?: Time + id: Id + pivot?: TransformPivot + settings?: TransformSettings + source: TransformSource + sync?: TransformSyncContainer + create_time?: EpochMillis + version?: VersionString + latest?: TransformLatest + _meta?: Metadata } export interface TransformGetTransformStatsCheckpointStats { checkpoint: long checkpoint_progress?: TransformGetTransformStatsTransformProgress timestamp?: DateString - timestamp_millis: EpochMillis + timestamp_millis?: EpochMillis time_upper_bound?: DateString time_upper_bound_millis?: EpochMillis } export interface TransformGetTransformStatsCheckpointing { - changes_last_detected_at: long + changes_last_detected_at?: long changes_last_detected_at_date_time?: DateString last: TransformGetTransformStatsCheckpointStats next?: TransformGetTransformStatsCheckpointStats operations_behind?: long + last_search_time?: long } export interface TransformGetTransformStatsRequest extends RequestBase { @@ -15158,7 +15370,9 @@ export interface TransformGetTransformStatsResponse { } export interface TransformGetTransformStatsTransformIndexerStats { + delete_time_in_ms?: EpochMillis documents_indexed: long + documents_deleted?: long documents_processed: long exponential_avg_checkpoint_duration_ms: double exponential_avg_documents_indexed: double @@ -15195,11 +15409,11 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformPreviewTransformRequest extends RequestBase { transform_id?: Id timeout?: Time - dest?: ReindexDestination + dest?: TransformDestination description?: string frequency?: Time pivot?: TransformPivot - source?: ReindexSource + source?: TransformSource settings?: TransformSettings sync?: TransformSyncContainer retention_policy?: TransformRetentionPolicyContainer @@ -15215,15 +15429,15 @@ export interface TransformPutTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean timeout?: Time - dest: ReindexDestination + dest: TransformDestination description?: string frequency?: Time latest?: TransformLatest - _meta?: Record + _meta?: Metadata pivot?: TransformPivot retention_policy?: TransformRetentionPolicyContainer settings?: TransformSettings - source: ReindexSource + source: TransformSource sync?: TransformSyncContainer } @@ -15262,10 +15476,11 @@ export interface TransformUpdateTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean timeout?: Time - dest?: ReindexDestination + dest?: TransformDestination description?: string frequency?: Time - source?: ReindexSource + _meta?: Metadata + source?: TransformSource settings?: TransformSettings sync?: TransformSyncContainer retention_policy?: TransformRetentionPolicyContainer @@ -15275,7 +15490,7 @@ export interface TransformUpdateTransformResponse { create_time: long description: string dest: ReindexDestination - frequency: Time + frequency?: Time id: Id latest?: TransformLatest pivot?: TransformPivot @@ -15284,6 +15499,7 @@ export interface TransformUpdateTransformResponse { source: ReindexSource sync?: TransformSyncContainer version: VersionString + _meta?: Metadata } export interface TransformUpgradeTransformsRequest extends RequestBase { @@ -15400,9 +15616,29 @@ export interface WatcherDailySchedule { export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' +export interface WatcherEmail { + bcc?: string[] + body?: WatcherEmailBody + cc?: string[] + from?: string + id: Id + priority?: WatcherEmailPriority + reply_to?: string[] + sent_date: DateString + subject: string + to: string[] +} + +export interface WatcherEmailBody { + html: string + text: string +} + +export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' + export interface WatcherEmailResult { account?: string - message: WatcherEmailResult + message: WatcherEmail reason?: string } @@ -15427,6 +15663,7 @@ export interface WatcherExecutionResultAction { status: WatcherActionStatusOptions type: WatcherActionType webhook?: WatcherWebhookResult + error?: ErrorCause } export interface WatcherExecutionResultCondition { @@ -15444,6 +15681,7 @@ export interface WatcherExecutionResultInput { export interface WatcherExecutionState { successful: boolean timestamp: DateString + reason?: string } export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' @@ -15552,16 +15790,9 @@ export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | export interface WatcherNeverCondition { } -export interface WatcherPagerDutyActionEventResult { - event: WatcherPagerDutyEvent - reason: string - request: WatcherHttpInputRequestResult - response: WatcherHttpInputResponseResult -} - export interface WatcherPagerDutyContext { - href: string - src: string + href?: string + src?: string type: WatcherPagerDutyContextType } @@ -15570,18 +15801,21 @@ export type WatcherPagerDutyContextType = 'link' | 'image' export interface WatcherPagerDutyEvent { account: string attach_payload: boolean - client: string - client_url: string - context: WatcherPagerDutyContext[] - description: string - event_type: WatcherPagerDutyEventType + client?: string + client_url?: string + contexts: WatcherPagerDutyContext[] + description?: string + event_type?: WatcherPagerDutyEventType incident_key: string } export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' export interface WatcherPagerDutyResult { - sent_event: WatcherPagerDutyActionEventResult + event: WatcherPagerDutyEvent + reason?: string + request?: WatcherHttpInputRequestResult + response?: WatcherHttpInputResponseResult } export type WatcherQuantifier = 'some' | 'all' @@ -15804,13 +16038,14 @@ export interface WatcherExecuteWatchWatchRecord { condition: WatcherConditionContainer input: WatcherInputContainer messages: string[] - metadata: Metadata + metadata?: Metadata node: string result: WatcherExecutionResult state: WatcherExecutionStatus trigger_event: WatcherTriggerEventResult user: Username watch_id: Id + status?: WatcherWatchStatus } export interface WatcherGetWatchRequest extends RequestBase { @@ -15969,6 +16204,7 @@ export interface XpackInfoNativeCodeInformation { export interface XpackInfoRequest extends RequestBase { categories?: string[] + accept_enterprise?: boolean } export interface XpackInfoResponse { @@ -16158,6 +16394,18 @@ export interface XpackUsageMlDataFrameAnalyticsJobsMemory { export interface XpackUsageMlInference { ingest_processors: Record trained_models: XpackUsageMlInferenceTrainedModels + deployments?: XpackUsageMlInferenceDeployments +} + +export interface XpackUsageMlInferenceDeployments { + count: integer + inference_counts: MlJobStatistics + model_sizes_bytes: MlJobStatistics + time_ms: XpackUsageMlInferenceDeploymentsTimeMs +} + +export interface XpackUsageMlInferenceDeploymentsTimeMs { + avg: double } export interface XpackUsageMlInferenceIngestProcessor { @@ -16178,14 +16426,15 @@ export interface XpackUsageMlInferenceTrainedModels { estimated_heap_memory_usage_bytes?: MlJobStatistics count?: XpackUsageMlInferenceTrainedModelsCount _all: XpackUsageMlCounter + model_size_bytes?: MlJobStatistics } export interface XpackUsageMlInferenceTrainedModelsCount { total: long prepackaged: long other: long - regression: long - classification: long + regression?: long + classification?: long } export interface XpackUsageMonitoring extends XpackUsageBase { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 58c965bd9..f63131965 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -338,12 +338,16 @@ export interface ExplainResponse { export interface FieldCapsFieldCapability { aggregatable: boolean indices?: Indices - meta?: Record + meta?: Metadata non_aggregatable_indices?: Indices non_searchable_indices?: Indices searchable: boolean type: string metadata_field?: boolean + time_series_dimension?: boolean + time_series_metric?: MappingTimeSeriesMetricType + non_dimension_indices?: IndexName[] + metric_conflicts_indices?: IndexName[] } export interface FieldCapsRequest extends RequestBase { @@ -730,7 +734,7 @@ export interface RankEvalRankEvalHit { export interface RankEvalRankEvalHitItem { hit: RankEvalRankEvalHit - rating?: double + rating?: double | null } export interface RankEvalRankEvalMetric { @@ -1082,7 +1086,7 @@ export interface SearchAggregationProfile { description: string time_in_nanos: long type: string - debug?: SearchAggregationProfileDebug + debug?: SearchAggregationProfileDebug | SearchAggregationProfileDelegateDebug children?: SearchAggregationProfile[] } @@ -1207,6 +1211,7 @@ export interface SearchFieldCollapse { field: Field inner_hits?: SearchInnerHits | SearchInnerHits[] max_concurrent_group_searches?: integer + collapse?: SearchFieldCollapse } export interface SearchFieldSuggester { @@ -1286,6 +1291,7 @@ export interface SearchHit { matched_queries?: string[] _nested?: SearchNestedIdentity _ignored?: string[] + ignored_field_values?: Record _shard?: string _node?: string _routing?: string @@ -1551,6 +1557,7 @@ export interface SearchMvtRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields size?: integer sort?: Sort + track_total_hits?: SearchTrackHits } } @@ -2175,6 +2182,7 @@ export interface NodeShard { allocation_id?: Record recovery_source?: Record unassigned_info?: ClusterAllocationExplainUnassignedInformation + relocating_node?: null } export interface NodeStatistics { @@ -2417,7 +2425,7 @@ export type SuggestionName = string export type TaskId = string | integer -export type ThreadType = 'cpu' | 'wait' | 'block' +export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' export type Time = string | integer @@ -2439,9 +2447,6 @@ export interface TopRightBottomLeftGeoBounds { bottom_left: GeoLocation } -export interface Transform { -} - export interface TransformContainer { chain?: ChainTransform script?: ScriptTransform @@ -2549,7 +2554,10 @@ export interface AggregationsAggregationContainer { bucket_script?: AggregationsBucketScriptAggregation bucket_selector?: AggregationsBucketSelectorAggregation bucket_sort?: AggregationsBucketSortAggregation + bucket_count_ks_test?: AggregationsBucketKsAggregation + bucket_correlation?: AggregationsBucketCorrelationAggregation cardinality?: AggregationsCardinalityAggregation + categorize_text?: AggregationsCategorizeTextAggregation children?: AggregationsChildrenAggregation composite?: AggregationsCompositeAggregation cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation @@ -2614,9 +2622,9 @@ export interface AggregationsAggregationContainer { } export interface AggregationsAggregationRange { - from?: double | string + from?: double | string | null key?: string - to?: double | string + to?: double | string | null } export interface AggregationsArrayPercentilesItem { @@ -2674,10 +2682,38 @@ export interface AggregationsBoxplotAggregation extends AggregationsMetricAggreg export interface AggregationsBucketAggregationBase extends AggregationsAggregation { } +export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { + function: AggregationsBucketCorrelationFunction +} + +export interface AggregationsBucketCorrelationFunction { + count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation +} + +export interface AggregationsBucketCorrelationFunctionCountCorrelation { + indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator +} + +export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { + doc_count: integer + expectations: double[] + fractions?: double[] +} + +export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { + alternative?: string[] + fractions?: double[] + sampling_method?: string +} + export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { keys: string[] } +export interface AggregationsBucketPathAggregation extends AggregationsAggregation { + buckets_path?: AggregationsBucketsPath +} + export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { script?: Script } @@ -2708,6 +2744,25 @@ export interface AggregationsCardinalityAggregation extends AggregationsMetricAg rehash?: boolean } +export interface AggregationsCategorizeTextAggregation extends AggregationsAggregation { + field: Field + max_unique_tokens?: integer + max_matched_tokens?: integer + similarity_threshold?: integer + categorization_filters?: string[] + categorization_analyzer?: string | AggregationsCategorizeTextAnalyzer + shard_size?: integer + size?: integer + min_doc_count?: integer + shard_min_doc_count?: integer +} + +export interface AggregationsCategorizeTextAnalyzer { + char_filter?: string[] + tokenizer?: string + filter?: string[] +} + export interface AggregationsChiSquareHeuristic { background_is_superset: boolean include_negatives: boolean @@ -3113,9 +3168,9 @@ export interface AggregationsIpRangeAggregation extends AggregationsBucketAggreg } export interface AggregationsIpRangeAggregationRange { - from?: string + from?: string | null mask?: string - to?: string + to?: string | null } export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { @@ -3296,7 +3351,7 @@ export interface AggregationsPercentageScoreHeuristic { export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { keyed?: boolean - values?: double[] + values?: double[] | null hdr?: AggregationsHdrMethod tdigest?: AggregationsTDigest } @@ -3321,8 +3376,7 @@ export interface AggregationsPercentilesBucketAggregation extends AggregationsPi percents?: double[] } -export interface AggregationsPipelineAggregationBase extends AggregationsAggregation { - buckets_path?: AggregationsBucketsPath +export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { format?: string gap_policy?: AggregationsGapPolicy } @@ -3336,6 +3390,7 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat ranges?: AggregationsAggregationRange[] script?: Script keyed?: boolean + format?: string } export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { @@ -3613,6 +3668,7 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat shard_size?: integer show_term_doc_count_error?: boolean size?: integer + format?: string } export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' @@ -4419,6 +4475,10 @@ export interface MappingCorePropertyBase extends MappingPropertyBase { store?: boolean } +export interface MappingDataStreamTimestamp { + enabled: boolean +} + export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { boost?: double format?: string @@ -4815,6 +4875,7 @@ export interface MappingTypeMapping { _source?: MappingSourceField runtime?: Record enabled?: boolean + _data_stream_timestamp?: MappingDataStreamTimestamp } export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { @@ -4854,7 +4915,7 @@ export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { query: string auto_generate_synonyms_phrase_query?: boolean operator?: QueryDslCombinedFieldsOperator - mimimum_should_match?: MinimumShouldMatch + minimum_should_match?: MinimumShouldMatch zero_terms_query?: QueryDslCombinedFieldsZeroTerms } @@ -4886,8 +4947,8 @@ export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { gte?: DateMath lt?: DateMath lte?: DateMath - from?: DateMath - to?: DateMath + from?: DateMath | null + to?: DateMath | null format?: DateFormat time_zone?: TimeZone } @@ -5122,6 +5183,12 @@ export interface QueryDslIntervalsWildcard { use_field?: Field } +export interface QueryDslKnnQuery extends QueryDslQueryBase { + field: Field + num_candidates: integer + query_vector: double[] +} + export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { @@ -5241,8 +5308,8 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { gte?: double lt?: double lte?: double - from?: double - to?: double + from?: double | null + to?: double | null } export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { @@ -5311,6 +5378,7 @@ export interface QueryDslQueryContainer { has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery intervals?: Partial> + knn?: QueryDslKnnQuery match?: Partial> match_all?: QueryDslMatchAllQuery match_bool_prefix?: Partial> @@ -5817,24 +5885,24 @@ export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { shards?: string s?: string - 'disk.indices'?: ByteSize - di?: ByteSize - diskIndices?: ByteSize - 'disk.used'?: ByteSize - du?: ByteSize - diskUsed?: ByteSize - 'disk.avail'?: ByteSize - da?: ByteSize - diskAvail?: ByteSize - 'disk.total'?: ByteSize - dt?: ByteSize - diskTotal?: ByteSize - 'disk.percent'?: Percentage - dp?: Percentage - diskPercent?: Percentage - host?: Host - h?: Host - ip?: Ip + 'disk.indices'?: ByteSize | null + di?: ByteSize | null + diskIndices?: ByteSize | null + 'disk.used'?: ByteSize | null + du?: ByteSize | null + diskUsed?: ByteSize | null + 'disk.avail'?: ByteSize | null + da?: ByteSize | null + diskAvail?: ByteSize | null + 'disk.total'?: ByteSize | null + dt?: ByteSize | null + diskTotal?: ByteSize | null + 'disk.percent'?: Percentage | null + dp?: Percentage | null + diskPercent?: Percentage | null + host?: Host | null + h?: Host | null + ip?: Ip | null node?: string n?: string } @@ -5967,20 +6035,20 @@ export interface CatIndicesIndicesRecord { r?: string 'shards.replica'?: string shardsReplica?: string - 'docs.count'?: string - dc?: string - docsCount?: string - 'docs.deleted'?: string - dd?: string - docsDeleted?: string + 'docs.count'?: string | null + dc?: string | null + docsCount?: string | null + 'docs.deleted'?: string | null + dd?: string | null + docsDeleted?: string | null 'creation.date'?: string cd?: string 'creation.date.string'?: string cds?: string - 'store.size'?: string - ss?: string - storeSize?: string - 'pri.store.size'?: string + 'store.size'?: string | null + ss?: string | null + storeSize?: string | null + 'pri.store.size'?: string | null 'completion.size'?: string cs?: string completionSize?: string @@ -7082,15 +7150,15 @@ export interface CatShardsShardsRecord { primaryOrReplica?: string state?: string st?: string - docs?: string - d?: string - dc?: string - store?: string - sto?: string - ip?: string + docs?: string | null + d?: string | null + dc?: string | null + store?: string | null + sto?: string | null + ip?: string | null id?: string - node?: string - n?: string + node?: string | null + n?: string | null sync_id?: string 'unassigned.reason'?: string ur?: string @@ -7382,8 +7450,8 @@ export interface CatTemplatesTemplatesRecord { order?: string o?: string p?: string - version?: VersionString - v?: VersionString + version?: VersionString | null + v?: VersionString | null composed_of?: string c?: string } @@ -7428,14 +7496,14 @@ export interface CatThreadPoolThreadPoolRecord { l?: string completed?: string c?: string - core?: string - cr?: string - max?: string - mx?: string - size?: string - sz?: string - keep_alive?: string - ka?: string + core?: string | null + cr?: string | null + max?: string | null + mx?: string | null + size?: string | null + sz?: string | null + keep_alive?: string | null + ka?: string | null } export interface CatTransformsRequest extends CatCatRequestBase { @@ -7456,14 +7524,14 @@ export interface CatTransformsTransformsRecord { documents_processed?: string docp?: string documentsProcessed?: string - checkpoint_progress?: string - cp?: string - checkpointProgress?: string - last_search_time?: string - lst?: string - lastSearchTime?: string - changes_last_detection_time?: string - cldt?: string + checkpoint_progress?: string | null + cp?: string | null + checkpointProgress?: string | null + last_search_time?: string | null + lst?: string | null + lastSearchTime?: string | null + changes_last_detection_time?: string | null + cldt?: string | null create_time?: string ct?: string createTime?: string @@ -7980,7 +8048,7 @@ export interface ClusterHealthRequest extends RequestBase { timeout?: Time wait_for_active_shards?: WaitForActiveShards wait_for_events?: WaitForEvents - wait_for_nodes?: string + wait_for_nodes?: string | integer wait_for_no_initializing_shards?: boolean wait_for_no_relocating_shards?: boolean wait_for_status?: HealthStatus @@ -7990,7 +8058,7 @@ export interface ClusterHealthResponse { active_primary_shards: integer active_shards: integer active_shards_percent_as_number: Percentage - cluster_name: string + cluster_name: Name delayed_unassigned_shards: integer indices?: Record initializing_shards: integer @@ -8174,6 +8242,7 @@ export interface ClusterRerouteRerouteParameters { } export interface ClusterRerouteResponse { + acknowledged: boolean explanations?: ClusterRerouteRerouteExplanation[] state: any } @@ -8298,6 +8367,7 @@ export interface ClusterStatsClusterNodes { plugins: PluginStats[] process: ClusterStatsClusterProcess versions: VersionString[] + indexing_pressure: ClusterStatsIndexingPressure } export interface ClusterStatsClusterOperatingSystem { @@ -8364,6 +8434,27 @@ export interface ClusterStatsFieldTypesMappings { runtime_field_types?: ClusterStatsRuntimeFieldTypes[] } +export interface ClusterStatsIndexingPressure { + memory: ClusterStatsIndexingPressureMemory +} + +export interface ClusterStatsIndexingPressureMemory { + limit_in_bytes: long + current: ClusterStatsIndexingPressureMemorySummary + total: ClusterStatsIndexingPressureMemorySummary +} + +export interface ClusterStatsIndexingPressureMemorySummary { + all_in_bytes: long + combined_coordinating_and_primary_in_bytes: long + coordinating_in_bytes: long + coordinating_rejections?: long + primary_in_bytes: long + primary_rejections?: long + replica_in_bytes: long + replica_rejections?: long +} + export interface ClusterStatsIndicesVersions { index_count: integer primary_shard_count: integer @@ -8383,6 +8474,7 @@ export interface ClusterStatsOperatingSystemMemoryInfo { total_in_bytes: long used_in_bytes: long used_percent: integer + adjusted_total_in_bytes?: long } export interface ClusterStatsRequest extends RequestBase { @@ -8731,9 +8823,20 @@ export interface GraphExploreResponse { export type IlmActions = any +export interface IlmConfigurations { + rollover?: IndicesRolloverRolloverConditions + forcemerge?: IlmForceMergeConfiguration + shrink?: IlmShrinkConfiguration +} + +export interface IlmForceMergeConfiguration { + max_num_segments: integer +} + export interface IlmPhase { actions?: IlmActions min_age?: Time + configurations?: IlmConfigurations } export interface IlmPhases { @@ -8748,8 +8851,14 @@ export interface IlmPolicy { name?: Name } +export interface IlmShrinkConfiguration { + number_of_shards: integer +} + export interface IlmDeleteLifecycleRequest extends RequestBase { name: Name + master_timeout?: Time + timeout?: Time } export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { @@ -8764,6 +8873,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { failed_step?: Name failed_step_retry_count?: integer index: IndexName + index_creation_date_millis?: EpochMillis is_auto_retryable_error?: boolean lifecycle_date_millis: EpochMillis managed: true @@ -8774,6 +8884,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { step_info?: Record step_time_millis: EpochMillis phase_execution: IlmExplainLifecycleLifecycleExplainPhaseExecution + time_since_index_creation?: Time } export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { @@ -8791,6 +8902,8 @@ export interface IlmExplainLifecycleRequest extends RequestBase { index: IndexName only_errors?: boolean only_managed?: boolean + master_timeout?: Time + timeout?: Time } export interface IlmExplainLifecycleResponse { @@ -8805,6 +8918,8 @@ export interface IlmGetLifecycleLifecycle { export interface IlmGetLifecycleRequest extends RequestBase { name?: Name + master_timeout?: Time + timeout?: Time } export interface IlmGetLifecycleResponse extends DictionaryResponseBase { @@ -8837,6 +8952,8 @@ export interface IlmMoveToStepStepKey { export interface IlmPutLifecycleRequest extends RequestBase { name: Name + master_timeout?: Time + timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { policy?: IlmPolicy @@ -8893,9 +9010,34 @@ export interface IndicesAliasDefinition { is_write_index?: boolean routing?: string search_routing?: string + is_hidden?: boolean } export interface IndicesDataStream { + name: DataStreamName + timestamp_field: IndicesDataStreamTimestampField + indices: IndicesDataStreamIndex[] + generation: integer + template: Name + hidden: boolean + replicated?: boolean + system?: boolean + status: HealthStatus + ilm_policy?: Name + _meta?: Metadata + allow_custom_routing?: boolean +} + +export interface IndicesDataStreamIndex { + index_name: IndexName + index_uuid: Uuid +} + +export interface IndicesDataStreamTimestampField { + name: Field +} + +export interface IndicesDataStreamVisibility { hidden?: boolean } @@ -8963,6 +9105,10 @@ export interface IndicesIndexSettings { 'index.routing_path'?: string[] soft_deletes?: IndicesSoftDeletes 'index.soft_deletes'?: IndicesSoftDeletes + 'soft_deletes.enabled'?: boolean + 'index.soft_deletes.enabled'?: boolean + 'soft_deletes.retention_lease.period'?: Time + 'index.soft_deletes.retention_lease.period'?: Time sort?: IndicesIndexSegmentSort 'index.sort'?: IndicesIndexSegmentSort number_of_shards?: integer | string @@ -8977,8 +9123,6 @@ export interface IndicesIndexSettings { 'index.codec'?: string routing_partition_size?: integer 'index.routing_partition_size'?: integer - 'soft_deletes.retention_lease.period'?: Time - 'index.soft_deletes.retention_lease.period'?: Time load_fixed_bitset_filters_eagerly?: boolean 'index.load_fixed_bitset_filters_eagerly'?: boolean hidden?: boolean | string @@ -8987,6 +9131,8 @@ export interface IndicesIndexSettings { 'index.auto_expand_replicas'?: string 'merge.scheduler.max_thread_count'?: integer 'index.merge.scheduler.max_thread_count'?: integer + 'merge.scheduler.max_merge_count'?: integer + 'index.merge.scheduler.max_merge_count'?: integer 'search.idle.after'?: Time 'index.search.idle.after'?: Time refresh_interval?: Time @@ -9043,12 +9189,14 @@ export interface IndicesIndexSettings { 'index.provided_name'?: Name creation_date?: DateString 'index.creation_date'?: DateString + creation_date_string?: DateString + 'index.creation_date_string'?: DateString uuid?: Uuid 'index.uuid'?: Uuid version?: IndicesIndexVersioning 'index.version'?: IndicesIndexVersioning - verified_before_close?: boolean | string - 'index.verified_before_close'?: boolean | string + verified_before_close?: boolean + 'index.verified_before_close'?: boolean format?: string | integer 'index.format'?: string | integer max_slices_per_scroll?: integer @@ -9057,14 +9205,15 @@ export interface IndicesIndexSettings { 'index.translog.durability'?: string 'translog.flush_threshold_size'?: string 'index.translog.flush_threshold_size'?: string - 'query_string.lenient'?: boolean | string - 'index.query_string.lenient'?: boolean | string + 'query_string.lenient'?: boolean + 'index.query_string.lenient'?: boolean priority?: integer | string 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis 'index.analysis'?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings + time_series?: IndicesIndexSettingsTimeSeries } export interface IndicesIndexSettingsAnalysis { @@ -9079,6 +9228,11 @@ export interface IndicesIndexSettingsLifecycle { name: Name } +export interface IndicesIndexSettingsTimeSeries { + end_time: DateString + start_time: DateString +} + export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping @@ -9089,6 +9243,7 @@ export interface IndicesIndexState { export interface IndicesIndexVersioning { created: VersionString + created_string?: VersionString } export interface IndicesNumericFielddata { @@ -9105,6 +9260,7 @@ export type IndicesSegmentSortOrder = 'asc' | 'desc' export interface IndicesSoftDeletes { enabled: boolean + 'retention_lease.period'?: Time } export interface IndicesStringFielddata { @@ -9434,6 +9590,7 @@ export interface IndicesForcemergeRequest extends RequestBase { ignore_unavailable?: boolean max_num_segments?: long only_expunge_deletes?: boolean + wait_for_completion?: boolean } export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { @@ -9469,35 +9626,13 @@ export interface IndicesGetAliasRequest extends RequestBase { export interface IndicesGetAliasResponse extends DictionaryResponseBase { } -export interface IndicesGetDataStreamIndicesGetDataStreamItem { - name: DataStreamName - timestamp_field: IndicesGetDataStreamIndicesGetDataStreamItemTimestampField - indices: IndicesGetDataStreamIndicesGetDataStreamItemIndex[] - generation: integer - template: Name - hidden: boolean - system?: boolean - status: HealthStatus - ilm_policy?: Name - _meta?: Metadata -} - -export interface IndicesGetDataStreamIndicesGetDataStreamItemIndex { - index_name: IndexName - index_uuid: Uuid -} - -export interface IndicesGetDataStreamIndicesGetDataStreamItemTimestampField { - name: Field -} - export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards } export interface IndicesGetDataStreamResponse { - data_streams: IndicesGetDataStreamIndicesGetDataStreamItem[] + data_streams: IndicesDataStream[] } export interface IndicesGetFieldMappingRequest extends RequestBase { @@ -9645,12 +9780,13 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { export interface IndicesPutIndexTemplateRequest extends RequestBase { name: Name + create?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStream + data_stream?: IndicesDataStreamVisibility priority?: integer version?: VersionNumber _meta?: Metadata @@ -9916,7 +10052,10 @@ export interface IndicesRolloverRolloverConditions { max_age?: Time max_docs?: long max_size?: string + max_size_bytes?: ByteSize max_primary_shard_size?: ByteSize + max_primary_shard_size_bytes?: ByteSize + max_age_millis?: EpochMillis } export interface IndicesSegmentsIndexSegment { @@ -10030,7 +10169,7 @@ export interface IndicesSimulateIndexTemplateRequest extends RequestBase { index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStream + data_stream?: IndicesDataStreamVisibility priority?: integer version?: VersionNumber _meta?: Metadata @@ -10179,7 +10318,7 @@ export interface IndicesStatsShardRetentionLeases { export interface IndicesStatsShardRouting { node: string primary: boolean - relocating_node?: string + relocating_node?: string | null state: IndicesStatsShardRoutingState } @@ -10729,12 +10868,12 @@ export interface IngestSimulateResponse { export interface LicenseLicense { expiry_date_in_millis: EpochMillis issue_date_in_millis: EpochMillis + start_date_in_millis?: EpochMillis issued_to: string issuer: string - max_nodes?: long + max_nodes?: long | null max_resource_units?: long signature: string - start_date_in_millis: EpochMillis type: LicenseLicenseType uid: string } @@ -10750,14 +10889,14 @@ export interface LicenseDeleteResponse extends AcknowledgedResponseBase { } export interface LicenseGetLicenseInformation { - expiry_date: DateString - expiry_date_in_millis: EpochMillis + expiry_date?: DateString + expiry_date_in_millis?: EpochMillis issue_date: DateString issue_date_in_millis: EpochMillis issued_to: string issuer: string - max_nodes: long - max_resource_units?: integer + max_nodes: long | null + max_resource_units?: integer | null status: LicenseLicenseStatus type: LicenseLicenseType uid: Uuid @@ -10797,7 +10936,7 @@ export interface LicensePostRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { license?: LicenseLicense - licenses?: LicenseLicense[] + licenses: LicenseLicense[] } } @@ -10812,9 +10951,10 @@ export interface LicensePostStartBasicRequest extends RequestBase { } export interface LicensePostStartBasicResponse extends AcknowledgedResponseBase { - acknowledge: Record basic_was_started: boolean - error_message: string + error_message?: string + type?: LicenseLicenseType + acknowledge?: Record } export interface LicensePostStartTrialRequest extends RequestBase { @@ -10825,7 +10965,7 @@ export interface LicensePostStartTrialRequest extends RequestBase { export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { error_message?: string trial_was_started: boolean - type: LicenseLicenseType + type?: LicenseLicenseType } export interface LogstashPipeline { @@ -11047,18 +11187,19 @@ export type MlCustomSettings = any export interface MlDataCounts { bucket_count: long - earliest_record_timestamp: long + earliest_record_timestamp?: long empty_bucket_count: long input_bytes: long input_field_count: long input_record_count: long invalid_date_count: long job_id: Id - last_data_time: long - latest_empty_bucket_timestamp: long - latest_record_timestamp: long - latest_sparse_bucket_timestamp: long - latest_bucket_timestamp: long + last_data_time?: long + latest_empty_bucket_timestamp?: long + latest_record_timestamp?: long + latest_sparse_bucket_timestamp?: long + latest_bucket_timestamp?: long + log_time?: long missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -11111,14 +11252,20 @@ export interface MlDatafeedConfig { scroll_size?: integer } +export interface MlDatafeedRunningState { + real_time_configured: boolean + real_time_running: boolean +} + export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { - assignment_explanation: string + assignment_explanation?: string datafeed_id: Id - node: MlDiscoveryNode + node?: MlDiscoveryNode state: MlDatafeedState timing_stats: MlDatafeedTimingStats + running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { @@ -11127,7 +11274,7 @@ export interface MlDatafeedTimingStats { job_id: Id search_count: long total_search_time_ms: double - average_search_time_per_bucket_ms: number + average_search_time_per_bucket_ms?: number } export interface MlDataframeAnalysis { @@ -11471,9 +11618,9 @@ export interface MlJob { allow_lazy_open: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval: Time + background_persist_interval?: Time blocked?: MlJobBlocked - create_time: integer + create_time?: integer custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription @@ -11483,8 +11630,8 @@ export interface MlJob { finished_time?: integer groups?: string[] job_id: Id - job_type: string - job_version: VersionString + job_type?: string + job_version?: VersionString model_plot_config?: MlModelPlotConfig model_snapshot_id?: Id model_snapshot_retention_days: long @@ -11539,12 +11686,12 @@ export interface MlJobStatistics { } export interface MlJobStats { - assignment_explanation: string + assignment_explanation?: string data_counts: MlDataCounts forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node: MlDiscoveryNode + node?: MlDiscoveryNode open_time?: DateString state: MlJobState timing_stats: MlJobTimingStats @@ -11552,14 +11699,14 @@ export interface MlJobStats { } export interface MlJobTimingStats { - average_bucket_processing_time_ms: double + average_bucket_processing_time_ms?: double bucket_count: long - exponential_average_bucket_processing_time_ms: double + exponential_average_bucket_processing_time_ms?: double exponential_average_bucket_processing_time_per_hour_ms: double job_id: Id total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms: double - minimum_bucket_processing_time_ms: double + maximum_bucket_processing_time_ms?: double + minimum_bucket_processing_time_ms?: double } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' @@ -11699,7 +11846,7 @@ export interface MlTrainedModelAllocationTaskParameters { export interface MlTrainedModelConfig { model_id: Id - model_type: MlTrainedModelType + model_type?: MlTrainedModelType tags: string[] version?: VersionString compressed_definition?: string @@ -11713,6 +11860,8 @@ export interface MlTrainedModelConfig { input: MlTrainedModelConfigInput license_level?: string metadata?: MlTrainedModelConfigMetadata + model_size_bytes?: ByteSize + location?: MlTrainedModelLocation } export interface MlTrainedModelConfigInput { @@ -11742,6 +11891,14 @@ export interface MlTrainedModelInferenceStats { timestamp: Time } +export interface MlTrainedModelLocation { + index: MlTrainedModelLocationIndex +} + +export interface MlTrainedModelLocationIndex { + name: IndexName +} + export interface MlTrainedModelStats { model_id: Id pipeline_count: integer @@ -11867,6 +12024,7 @@ export interface MlDeleteModelSnapshotResponse extends AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { model_id: Id + force?: boolean } export interface MlDeleteTrainedModelResponse extends AcknowledgedResponseBase { @@ -12012,14 +12170,14 @@ export interface MlFlushJobRequest extends RequestBase { advance_time?: DateString calc_interim?: boolean end?: DateString - skip_time?: string + skip_time?: EpochMillis start?: DateString /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { advance_time?: DateString calc_interim?: boolean end?: DateString - skip_time?: string + skip_time?: EpochMillis start?: DateString } } @@ -12485,7 +12643,7 @@ export interface MlPutCalendarRequest extends RequestBase { export interface MlPutCalendarResponse { calendar_id: Id - description: string + description?: string job_ids: Ids } @@ -12512,6 +12670,8 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { max_num_threads?: integer model_memory_limit?: string source: MlDataframeAnalyticsSource + headers?: HttpHeaders + version?: VersionString } } @@ -12541,8 +12701,8 @@ export interface MlPutDatafeedRequest extends RequestBase { chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Time - indices?: string[] - indexes?: string[] + indices?: Indices + indexes?: Indices indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer @@ -12551,6 +12711,7 @@ export interface MlPutDatafeedRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer + headers?: HttpHeaders } } @@ -12674,6 +12835,7 @@ export interface MlPutTrainedModelPreprocessor { export interface MlPutTrainedModelRequest extends RequestBase { model_id: Id + defer_definition_decompression?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { compressed_definition?: string @@ -12683,6 +12845,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { input: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType + model_size_bytes?: long tags?: string[] } } @@ -13029,6 +13192,7 @@ export interface MlValidateRequest extends RequestBase { data_description?: MlDataDescription description?: string model_plot?: MlModelPlotConfig + model_snapshot_id?: Id model_snapshot_retention_days?: long results_index_name?: IndexName } @@ -13483,12 +13647,17 @@ export interface NodesHotThreadsRequest extends RequestBase { threads?: long timeout?: Time type?: ThreadType + sort?: ThreadType } export interface NodesHotThreadsResponse { hot_threads: NodesHotThreadsHotThread[] } +export interface NodesInfoDeprecationIndexing { + enabled: boolean | string +} + export interface NodesInfoNodeInfo { attributes: Record build_flavor: string @@ -13650,6 +13819,7 @@ export interface NodesInfoNodeInfoSettingsCluster { routing?: IndicesIndexRouting election: NodesInfoNodeInfoSettingsClusterElection initial_master_nodes?: string + deprecation_indexing?: NodesInfoDeprecationIndexing } export interface NodesInfoNodeInfoSettingsClusterElection { @@ -13751,7 +13921,7 @@ export interface NodesInfoNodeInfoXpackLicenseType { export interface NodesInfoNodeInfoXpackSecurity { http: NodesInfoNodeInfoXpackSecuritySsl enabled: string - transport: NodesInfoNodeInfoXpackSecuritySsl + transport?: NodesInfoNodeInfoXpackSecuritySsl authc?: NodesInfoNodeInfoXpackSecurityAuthc } @@ -14042,12 +14212,14 @@ export interface RollupPutJobRequest extends RequestBase { id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - cron?: string - groups?: RollupGroupings - index_pattern?: string + cron: string + groups: RollupGroupings + index_pattern: string metrics?: RollupFieldMetric[] - page_size?: long - rollup_index?: IndexName + page_size: integer + rollup_index: IndexName + timeout?: Time + headers?: HttpHeaders } } @@ -14151,6 +14323,17 @@ export interface SearchableSnapshotsStatsResponse { total: any } +export interface SecurityApiKey { + creation?: long + expiration?: long + id: Id + invalidated?: boolean + name: Name + realm?: string + username?: Username + metadata?: Metadata +} + export interface SecurityApplicationGlobalUserPrivileges { manage: SecurityManageUserPrivileges } @@ -14188,13 +14371,13 @@ export interface SecurityGlobalPrivilege { application: SecurityApplicationGlobalUserPrivileges } -export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' +export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: string | string[] + query?: QueryDslQueryContainer allow_restricted_indices?: boolean } @@ -14227,24 +14410,19 @@ export interface SecurityRoleMappingRule { } export interface SecurityUser { - email?: string - full_name?: Name + email?: string | null + full_name?: Name | null metadata: Metadata roles: string[] username: Username enabled: boolean } -export interface SecurityAuthenticateApiKey { - id: string - name: Name -} - export interface SecurityAuthenticateRequest extends RequestBase { } export interface SecurityAuthenticateResponse { - api_key?: SecurityAuthenticateApiKey + api_key?: SecurityApiKey authentication_realm: SecurityRealmInfo email?: string | null full_name?: Name | null @@ -14441,17 +14619,6 @@ export interface SecurityEnableUserRequest extends RequestBase { export interface SecurityEnableUserResponse { } -export interface SecurityGetApiKeyApiKey { - creation: long - expiration?: long - id: Id - invalidated: boolean - name: Name - realm: string - username: Username - metadata?: Metadata -} - export interface SecurityGetApiKeyRequest extends RequestBase { id?: Id name?: Name @@ -14461,7 +14628,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { } export interface SecurityGetApiKeyResponse { - api_keys: SecurityGetApiKeyApiKey[] + api_keys: SecurityApiKey[] } export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { @@ -14481,7 +14648,7 @@ export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase { @@ -14495,6 +14662,7 @@ export interface SecurityGetRoleRole { transient_metadata: SecurityGetRoleTransientMetadata applications: SecurityApplicationPrivileges[] role_templates?: SecurityGetRoleRoleTemplate[] + global?: Record>> } export interface SecurityGetRoleRoleTemplate { @@ -14509,7 +14677,7 @@ export interface SecurityGetRoleTransientMetadata { } export interface SecurityGetRoleMappingRequest extends RequestBase { - name?: Name + name?: Names } export interface SecurityGetRoleMappingResponse extends DictionaryResponseBase { @@ -14609,6 +14777,7 @@ export interface SecurityGetUserResponse extends DictionaryResponseBase[] } -export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' - export interface SecurityGrantApiKeyRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - api_key: SecurityGrantApiKeyApiKey + api_key: SecurityGrantApiKeyGrantApiKey grant_type: SecurityGrantApiKeyApiKeyGrantType access_token?: string username?: Username @@ -14656,12 +14825,13 @@ export type SecurityHasPrivilegesApplicationsPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { - user?: Name + user?: Name | null /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] @@ -14789,6 +14959,23 @@ export interface SecurityPutUserResponse { created: boolean } +export interface SecurityQueryApiKeysRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + query?: QueryDslQueryContainer + from?: integer + sort?: Sort + size?: integer + search_after?: SortResults + } +} + +export interface SecurityQueryApiKeysResponse { + total: integer + count: integer + api_keys: SecurityApiKey[] +} + export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId } @@ -15428,8 +15615,9 @@ export interface TasksListRequest extends RequestBase { actions?: string | string[] detailed?: boolean group_by?: TasksGroupBy - nodes?: string[] + node_id?: string[] parent_task_id?: Id + master_timeout?: Time timeout?: Time wait_for_completion?: boolean } @@ -15437,7 +15625,7 @@ export interface TasksListRequest extends RequestBase { export interface TasksListResponse { node_failures?: ErrorCause[] nodes?: Record - tasks?: Record + tasks?: TasksInfo[] | Record } export interface TextStructureFindStructureFieldStat { @@ -15501,6 +15689,11 @@ export interface TextStructureFindStructureTopHit { value: any } +export interface TransformDestination { + index?: IndexName + pipeline?: string +} + export interface TransformLatest { sort: Field unique_key: Field[] @@ -15535,6 +15728,12 @@ export interface TransformSettings { max_page_search_size?: integer } +export interface TransformSource { + index: Indices + query?: QueryDslQueryContainer + runtime_mappings?: MappingRuntimeFields +} + export interface TransformSyncContainer { time?: TransformTimeSync } @@ -15563,24 +15762,40 @@ export interface TransformGetTransformRequest extends RequestBase { export interface TransformGetTransformResponse { count: long - transforms: Transform[] + transforms: TransformGetTransformTransformSummary[] +} + +export interface TransformGetTransformTransformSummary { + dest: ReindexDestination + description?: string + frequency?: Time + id: Id + pivot?: TransformPivot + settings?: TransformSettings + source: TransformSource + sync?: TransformSyncContainer + create_time?: EpochMillis + version?: VersionString + latest?: TransformLatest + _meta?: Metadata } export interface TransformGetTransformStatsCheckpointStats { checkpoint: long checkpoint_progress?: TransformGetTransformStatsTransformProgress timestamp?: DateString - timestamp_millis: EpochMillis + timestamp_millis?: EpochMillis time_upper_bound?: DateString time_upper_bound_millis?: EpochMillis } export interface TransformGetTransformStatsCheckpointing { - changes_last_detected_at: long + changes_last_detected_at?: long changes_last_detected_at_date_time?: DateString last: TransformGetTransformStatsCheckpointStats next?: TransformGetTransformStatsCheckpointStats operations_behind?: long + last_search_time?: long } export interface TransformGetTransformStatsRequest extends RequestBase { @@ -15596,7 +15811,9 @@ export interface TransformGetTransformStatsResponse { } export interface TransformGetTransformStatsTransformIndexerStats { + delete_time_in_ms?: EpochMillis documents_indexed: long + documents_deleted?: long documents_processed: long exponential_avg_checkpoint_duration_ms: double exponential_avg_documents_indexed: double @@ -15635,11 +15852,11 @@ export interface TransformPreviewTransformRequest extends RequestBase { timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - dest?: ReindexDestination + dest?: TransformDestination description?: string frequency?: Time pivot?: TransformPivot - source?: ReindexSource + source?: TransformSource settings?: TransformSettings sync?: TransformSyncContainer retention_policy?: TransformRetentionPolicyContainer @@ -15658,15 +15875,15 @@ export interface TransformPutTransformRequest extends RequestBase { timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - dest: ReindexDestination + dest: TransformDestination description?: string frequency?: Time latest?: TransformLatest - _meta?: Record + _meta?: Metadata pivot?: TransformPivot retention_policy?: TransformRetentionPolicyContainer settings?: TransformSettings - source: ReindexSource + source: TransformSource sync?: TransformSyncContainer } } @@ -15708,10 +15925,11 @@ export interface TransformUpdateTransformRequest extends RequestBase { timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - dest?: ReindexDestination + dest?: TransformDestination description?: string frequency?: Time - source?: ReindexSource + _meta?: Metadata + source?: TransformSource settings?: TransformSettings sync?: TransformSyncContainer retention_policy?: TransformRetentionPolicyContainer @@ -15722,7 +15940,7 @@ export interface TransformUpdateTransformResponse { create_time: long description: string dest: ReindexDestination - frequency: Time + frequency?: Time id: Id latest?: TransformLatest pivot?: TransformPivot @@ -15731,6 +15949,7 @@ export interface TransformUpdateTransformResponse { source: ReindexSource sync?: TransformSyncContainer version: VersionString + _meta?: Metadata } export interface TransformUpgradeTransformsRequest extends RequestBase { @@ -15847,9 +16066,29 @@ export interface WatcherDailySchedule { export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' +export interface WatcherEmail { + bcc?: string[] + body?: WatcherEmailBody + cc?: string[] + from?: string + id: Id + priority?: WatcherEmailPriority + reply_to?: string[] + sent_date: DateString + subject: string + to: string[] +} + +export interface WatcherEmailBody { + html: string + text: string +} + +export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' + export interface WatcherEmailResult { account?: string - message: WatcherEmailResult + message: WatcherEmail reason?: string } @@ -15874,6 +16113,7 @@ export interface WatcherExecutionResultAction { status: WatcherActionStatusOptions type: WatcherActionType webhook?: WatcherWebhookResult + error?: ErrorCause } export interface WatcherExecutionResultCondition { @@ -15891,6 +16131,7 @@ export interface WatcherExecutionResultInput { export interface WatcherExecutionState { successful: boolean timestamp: DateString + reason?: string } export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' @@ -15999,16 +16240,9 @@ export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | export interface WatcherNeverCondition { } -export interface WatcherPagerDutyActionEventResult { - event: WatcherPagerDutyEvent - reason: string - request: WatcherHttpInputRequestResult - response: WatcherHttpInputResponseResult -} - export interface WatcherPagerDutyContext { - href: string - src: string + href?: string + src?: string type: WatcherPagerDutyContextType } @@ -16017,18 +16251,21 @@ export type WatcherPagerDutyContextType = 'link' | 'image' export interface WatcherPagerDutyEvent { account: string attach_payload: boolean - client: string - client_url: string - context: WatcherPagerDutyContext[] - description: string - event_type: WatcherPagerDutyEventType + client?: string + client_url?: string + contexts: WatcherPagerDutyContext[] + description?: string + event_type?: WatcherPagerDutyEventType incident_key: string } export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' export interface WatcherPagerDutyResult { - sent_event: WatcherPagerDutyActionEventResult + event: WatcherPagerDutyEvent + reason?: string + request?: WatcherHttpInputRequestResult + response?: WatcherHttpInputResponseResult } export type WatcherQuantifier = 'some' | 'all' @@ -16254,13 +16491,14 @@ export interface WatcherExecuteWatchWatchRecord { condition: WatcherConditionContainer input: WatcherInputContainer messages: string[] - metadata: Metadata + metadata?: Metadata node: string result: WatcherExecutionResult state: WatcherExecutionStatus trigger_event: WatcherTriggerEventResult user: Username watch_id: Id + status?: WatcherWatchStatus } export interface WatcherGetWatchRequest extends RequestBase { @@ -16425,6 +16663,7 @@ export interface XpackInfoNativeCodeInformation { export interface XpackInfoRequest extends RequestBase { categories?: string[] + accept_enterprise?: boolean } export interface XpackInfoResponse { @@ -16614,6 +16853,18 @@ export interface XpackUsageMlDataFrameAnalyticsJobsMemory { export interface XpackUsageMlInference { ingest_processors: Record trained_models: XpackUsageMlInferenceTrainedModels + deployments?: XpackUsageMlInferenceDeployments +} + +export interface XpackUsageMlInferenceDeployments { + count: integer + inference_counts: MlJobStatistics + model_sizes_bytes: MlJobStatistics + time_ms: XpackUsageMlInferenceDeploymentsTimeMs +} + +export interface XpackUsageMlInferenceDeploymentsTimeMs { + avg: double } export interface XpackUsageMlInferenceIngestProcessor { @@ -16634,14 +16885,15 @@ export interface XpackUsageMlInferenceTrainedModels { estimated_heap_memory_usage_bytes?: MlJobStatistics count?: XpackUsageMlInferenceTrainedModelsCount _all: XpackUsageMlCounter + model_size_bytes?: MlJobStatistics } export interface XpackUsageMlInferenceTrainedModelsCount { total: long prepackaged: long other: long - regression: long - classification: long + regression?: long + classification?: long } export interface XpackUsageMonitoring extends XpackUsageBase { From 0fbdf10a6840ec19e6faf92ab1591e95a0608e43 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 12 Jan 2022 08:14:19 +0100 Subject: [PATCH 130/647] Fix integration test (#1581) --- .ci/packer_cache.sh | 5 +- .ci/run-elasticsearch.sh | 15 +- test/integration/helper.js | 51 +- test/integration/index.js | 15 +- test/integration/integration/README.md | 52 - test/integration/integration/helper.js | 96 -- .../integration/helpers/bulk.test.js | 204 ---- .../integration/helpers/msearch.test.js | 121 --- .../integration/helpers/scroll.test.js | 118 --- .../integration/helpers/search.test.js | 71 -- test/integration/integration/index.js | 385 -------- test/integration/integration/reporter.js | 109 --- test/integration/integration/test-runner.js | 909 ------------------ test/integration/test-runner.js | 43 +- 14 files changed, 103 insertions(+), 2091 deletions(-) delete mode 100644 test/integration/integration/README.md delete mode 100644 test/integration/integration/helper.js delete mode 100644 test/integration/integration/helpers/bulk.test.js delete mode 100644 test/integration/integration/helpers/msearch.test.js delete mode 100644 test/integration/integration/helpers/scroll.test.js delete mode 100644 test/integration/integration/helpers/search.test.js delete mode 100644 test/integration/integration/index.js delete mode 100644 test/integration/integration/reporter.js delete mode 100644 test/integration/integration/test-runner.js diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 41f3c12db..97903810d 100644 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -2,10 +2,9 @@ source /usr/local/bin/bash_standard_lib.sh -DOCKER_IMAGES="node:14-alpine +DOCKER_IMAGES="node:16-alpine +node:14-alpine node:12-alpine -node:10-alpine -node:8-alpine " for di in ${DOCKER_IMAGES} diff --git a/.ci/run-elasticsearch.sh b/.ci/run-elasticsearch.sh index 89bce8cbd..3f4e2f1da 100755 --- a/.ci/run-elasticsearch.sh +++ b/.ci/run-elasticsearch.sh @@ -7,7 +7,7 @@ # Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'. # Export the NUMBER_OF_NODES variable to start more than 1 node -# Version 1.5.0 +# Version 1.6.1 # - Initial version of the run-elasticsearch.sh script # - Deleting the volume should not dependent on the container still running # - Fixed `ES_JAVA_OPTS` config @@ -18,7 +18,9 @@ # - Added flags to make local CCR configurations work # - Added action.destructive_requires_name=false as the default will be true in v8 # - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing -# - Moved ELASTIC_PASSWORD to the base arguments for "Security On by default" +# - Moved ELASTIC_PASSWORD and xpack.security.enabled to the base arguments for "Security On by default" +# - Use https only when TEST_SUITE is "platinum", when "free" use http +# - Set xpack.security.enabled=false for "free" and xpack.security.enabled=true for "platinum" script_path=$(dirname $(realpath -s $0)) source $script_path/functions/imports.sh @@ -44,12 +46,13 @@ environment=($(cat <<-END --env repositories.url.allowed_urls=http://snapshot.test* --env action.destructive_requires_name=false --env ingest.geoip.downloader.enabled=false + --env cluster.deprecation_indexing.enabled=false END )) if [[ "$TEST_SUITE" == "platinum" ]]; then environment+=($(cat <<-END - --env xpack.license.self_generated.type=trial --env xpack.security.enabled=true + --env xpack.license.self_generated.type=trial --env xpack.security.http.ssl.enabled=true --env xpack.security.http.ssl.verification_mode=certificate --env xpack.security.http.ssl.key=certs/testnode.key @@ -68,6 +71,12 @@ END --volume $ssl_ca:/usr/share/elasticsearch/config/certs/ca.crt END )) +else + environment+=($(cat <<-END + --env xpack.security.enabled=false + --env xpack.security.http.ssl.enabled=false +END +)) fi cert_validation_flags="" diff --git a/test/integration/helper.js b/test/integration/helper.js index b8e965b89..d58252580 100644 --- a/test/integration/helper.js +++ b/test/integration/helper.js @@ -19,6 +19,9 @@ 'use strict' +const assert = require('assert') +const fetch = require('node-fetch') + function runInParallel (client, operation, options, clientOptions) { if (options.length === 0) return Promise.resolve() const operations = options.map(opts => { @@ -65,6 +68,9 @@ function isXPackTemplate (name) { if (name.startsWith('.transform-')) { return true } + if (name.startsWith('.deprecation-')) { + return true + } switch (name) { case '.watches': case 'logstash-index-template': @@ -84,14 +90,49 @@ function isXPackTemplate (name) { case 'synthetics-settings': case 'synthetics-mappings': case '.snapshot-blob-cache': - case '.deprecation-indexing-template': - case '.deprecation-indexing-mappings': - case '.deprecation-indexing-settings': case 'data-streams-mappings': - case '.logs-deprecation.elasticsearch-default': return true } return false } -module.exports = { runInParallel, delve, to, sleep, isXPackTemplate } +async function getSpec () { + const response = await fetch('http://github.com/service/https://raw.githubusercontent.com/elastic/elasticsearch-specification/main/output/schema/schema.json') + return await response.json() +} + +let spec = null + +// some keys for the path used in the yaml test are not support in the client +// for example: snapshot.createRepository({ repository }) will not work. +// This code changes the params to the appropriate name, in the example above, +// "repository" will be renamed to "name" +async function updateParams (cmd) { + if (spec == null) { + spec = await getSpec() + } + const endpoint = spec.endpoints.find(endpoint => endpoint.name === cmd.api) + assert(endpoint != null) + if (endpoint.request == null) return cmd + + const type = spec.types.find(type => type.name.name === endpoint.request.name && type.name.namespace === endpoint.request.namespace) + assert(type != null) + + const pathParams = type.path.reduce((acc, val) => { + if (val.codegenName != null) { + acc[val.name] = val.codegenName + } + return acc + }, {}) + + for (const key in cmd.params) { + if (pathParams[key] != null) { + cmd.params[pathParams[key]] = cmd.params[key] + delete cmd.params[key] + } + } + + return cmd +} + +module.exports = { runInParallel, delve, to, sleep, isXPackTemplate, updateParams } diff --git a/test/integration/index.js b/test/integration/index.js index eceb2ea24..02073cbb3 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -29,6 +29,7 @@ const { join, sep } = require('path') const yaml = require('js-yaml') const ms = require('ms') const { Client } = require('../../index') +const { kProductCheck } = require('@elastic/transport/lib/symbols') const build = require('./test-runner') const { sleep } = require('./helper') const createJunitReporter = require('./reporter') @@ -49,6 +50,8 @@ const freeSkips = { 'Body params with array param override query string', 'Body params with string param scroll id override query string' ], + 'free/cat.allocation/10_basic.yml': ['*'], + 'free/cat.snapshots/10_basic.yml': ['Test cat snapshots output'], // TODO: remove this once 'arbitrary_key' is implemented // https://github.com/elastic/elasticsearch/pull/41492 'indices.split/30_copy_settings.yml': ['*'], @@ -62,9 +65,11 @@ const freeSkips = { 'search.aggregation/240_max_buckets.yml': ['*'], // the yaml runner assumes that null means "does not exists", // while null is a valid json value, so the check will fail - 'search/320_disallow_queries.yml': ['Test disallow expensive queries'] + 'search/320_disallow_queries.yml': ['Test disallow expensive queries'], + 'free/tsdb/90_unsupported_operations.yml': ['noop update'] } const platinumBlackList = { + 'api_key/20_query.yml': ['*'], 'analytics/histogram.yml': ['Histogram requires values in increasing order'], // this two test cases are broken, we should // return on those in the future. @@ -93,9 +98,15 @@ const platinumBlackList = { // The cleanup fails with a index not found when retrieving the jobs 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], + // start should be a string + 'ml/jobs_get_result_overall_buckets.yml': ['Test overall buckets given epoch start and end params'], + // this can't happen with the client + 'ml/start_data_frame_analytics.yml': ['Test start with inconsistent body/param ids'], + 'ml/stop_data_frame_analytics.yml': ['Test stop with inconsistent body/param ids'], 'ml/preview_datafeed.yml': ['*'], // Investigate why is failing 'ml/inference_crud.yml': ['*'], + 'ml/categorization_agg.yml': ['Test categorization aggregation with poor settings'], // investigate why this is failing 'monitoring/bulk/10_basic.yml': ['*'], 'monitoring/bulk/20_privileges.yml': ['*'], @@ -161,6 +172,8 @@ function runner (opts = {}) { } } const client = new Client(options) + // TODO: remove the following line once https://github.com/elastic/elasticsearch/issues/82358 is fixed + client.transport[kProductCheck] = null log('Loading yaml suite') start({ client, isXPack: opts.isXPack }) .catch(err => { diff --git a/test/integration/integration/README.md b/test/integration/integration/README.md deleted file mode 100644 index 0861dd8b9..000000000 --- a/test/integration/integration/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# `elasticsearch-js` integration test suite - -> What? A README to explain how the integration test work?? - -Yes. - -## Background -Elasticsearch offers its entire API via HTTP REST endpoints. You can find the whole API specification for every version [here](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/api).
-To support different languages at the same time, the Elasticsearch team decided to provide a [YAML specification](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/test) to test every endpoint, body, headers, warning, error and so on.
-This testing suite uses that specification to generate the test for the specified version of Elasticsearch on the fly. - -## Run -Run the testing suite is very easy, you just need to run the preconfigured npm script: -```sh -npm run test:integration -``` - -The first time you run this command, the Elasticsearch repository will be cloned inside the integration test folder, to be able to access the YAML specification, so it might take some time *(luckily, only the first time)*.
-Once the Elasticsearch repository has been cloned, the testing suite will connect to the provided Elasticsearch instance and then checkout the build hash in the repository. Finally, it will start running every test. - -The specification does not allow the test to be run in parallel, so it might take a while to run the entire testing suite; on my machine, `MacBookPro15,2 core i7 2.7GHz 16GB of RAM` it takes around four minutes. - -### Exit on the first failure -Bu default the suite will run all the test, even if one assertion has failed. If you want to stop the test at the first failure, use the bailout option: -```sh -npm run test:integration -- --bail -``` - -### Calculate the code coverage -If you want to calculate the code coverage just run the testing suite with the following parameters, once the test ends, it will open a browser window with the results. -```sh -npm run test:integration -- --cov --coverage-report=html -``` - -## How does this thing work? -At first sight, it might seem complicated, but once you understand what the moving parts are, it's quite easy. -1. Connects to the given Elasticsearch instance -1. Gets the ES version and build hash -1. Checkout to the given hash (and clone the repository if it is not present) -1. Reads the folder list and for each folder the yaml file list -1. Starts running folder by folder every file - 1. Read and parse the yaml files - 1. Creates a subtest structure to have a cleaner output - 1. Runs the assertions - 1. Repeat! - -Inside the `index.js` file, you will find the connection, cloning, reading and parsing part of the test, while inside the `test-runner.js` file you will find the function to handle the assertions. Inside `test-runner.js`, we use a [queue](https://github.com/delvedor/workq) to be sure that everything is run in the correct order. - -Checkout the [rest-api-spec readme](https://github.com/elastic/elasticsearch/blob/master/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc) if you want to know more about how the assertions work. - -#### Why are we running the test with the `--harmony` flag? -Because on Node v6 the regex lookbehinds are not supported. diff --git a/test/integration/integration/helper.js b/test/integration/integration/helper.js deleted file mode 100644 index eb2021040..000000000 --- a/test/integration/integration/helper.js +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -function runInParallel (client, operation, options, clientOptions) { - if (options.length === 0) return Promise.resolve() - const operations = options.map(opts => { - const api = delve(client, operation).bind(client) - return api(opts, clientOptions) - }) - - return Promise.all(operations) -} - -// code from https://github.com/developit/dlv -// needed to support an edge case: `a\.b` -// where `a.b` is a single field: { 'a.b': true } -function delve (obj, key, def, p) { - p = 0 - // handle the key with a dot inside that is not a part of the path - // and removes the backslashes from the key - key = key.split - ? key.split(/(? k.replace(/\\/g, '')) - : key.replace(/\\/g, '') - while (obj && p < key.length) obj = obj[key[p++]] - return (obj === undefined || p < key.length) ? def : obj -} - -function to (promise) { - return promise.then(data => [null, data], err => [err, undefined]) -} - -const sleep = ms => new Promise(resolve => setTimeout(resolve, ms)) - -function isXPackTemplate (name) { - if (name.startsWith('.monitoring-')) { - return true - } - if (name.startsWith('.watch') || name.startsWith('.triggered_watches')) { - return true - } - if (name.startsWith('.data-frame-')) { - return true - } - if (name.startsWith('.ml-')) { - return true - } - if (name.startsWith('.transform-')) { - return true - } - switch (name) { - case '.watches': - case 'logstash-index-template': - case '.logstash-management': - case 'security_audit_log': - case '.slm-history': - case '.async-search': - case 'saml-service-provider': - case 'ilm-history': - case 'logs': - case 'logs-settings': - case 'logs-mappings': - case 'metrics': - case 'metrics-settings': - case 'metrics-mappings': - case 'synthetics': - case 'synthetics-settings': - case 'synthetics-mappings': - case '.snapshot-blob-cache': - case '.deprecation-indexing-template': - case '.deprecation-indexing-mappings': - case '.deprecation-indexing-settings': - case 'data-streams-mappings': - return true - } - return false -} - -module.exports = { runInParallel, delve, to, sleep, isXPackTemplate } diff --git a/test/integration/integration/helpers/bulk.test.js b/test/integration/integration/helpers/bulk.test.js deleted file mode 100644 index 011f524c3..000000000 --- a/test/integration/integration/helpers/bulk.test.js +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const { test, beforeEach, afterEach } = require('tap') -const { waitCluster } = require('../../utils') -const { Client } = require('../../../') - -const datasetPath = join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson') -const INDEX = `test-helpers-${process.pid}` -const client = new Client({ - node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' -}) - -beforeEach(async () => { - await waitCluster(client) - await client.indices.create({ index: INDEX }) -}) - -afterEach(async () => { - await client.indices.delete({ index: INDEX }, { ignore: 404 }) -}) - -test('bulk index', async t => { - const stream = createReadStream(datasetPath) - const result = await client.helpers.bulk({ - datasource: stream.pipe(split()), - refreshOnCompletion: INDEX, - onDrop (doc) { - t.fail('It should not drop any document') - }, - onDocument (doc) { - return { - index: { _index: INDEX } - } - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 5000, - successful: 5000, - retry: 0, - failed: 0, - aborted: false - }) - - const { body } = await client.count({ index: INDEX }) - t.match(body, { count: 5000 }) -}) - -test('bulk index with custom id', async t => { - const stream = createReadStream(datasetPath) - const result = await client.helpers.bulk({ - datasource: stream.pipe(split(JSON.parse)), - onDrop (doc) { - t.fail('It should not drop any document') - }, - onDocument (doc) { - return { - index: { - _index: INDEX, - _id: doc.id - } - } - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 5000, - successful: 5000, - retry: 0, - failed: 0, - aborted: false - }) - - const { body } = await client.get({ - index: INDEX, - id: '19273860' // id of document n° 4242 - }) - - t.equal(body._index, INDEX) - t.equal(body._id, '19273860') - t.equal(body._source.id, '19273860') -}) - -test('abort the operation on document drop', async t => { - const stream = createReadStream(datasetPath) - const b = client.helpers.bulk({ - datasource: stream.pipe(split(JSON.parse)), - concurrency: 1, - onDrop (doc) { - t.equal(doc.status, 400) - t.equal(doc.error.type, 'mapper_parsing_exception') - t.equal(doc.document.id, '45924372') - b.abort() - }, - onDocument (doc) { - if (doc.id === '45924372') { // id of document n° 500 - // this will break the mapping - doc.title = { foo: 'bar' } - } - return { - index: { - _index: INDEX, - _id: doc.id - } - } - } - }) - - const result = await b - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.equal(result.total - 1, result.successful) - t.match(result, { - retry: 0, - failed: 1, - aborted: true - }) -}) - -test('bulk delete', async t => { - const indexResult = await client.helpers.bulk({ - datasource: createReadStream(datasetPath).pipe(split(JSON.parse)), - refreshOnCompletion: true, - onDrop (doc) { - t.fail('It should not drop any document') - }, - onDocument (doc) { - return { - index: { - _index: INDEX, - _id: doc.id - } - } - } - }) - - t.type(indexResult.time, 'number') - t.type(indexResult.bytes, 'number') - t.match(indexResult, { - total: 5000, - successful: 5000, - retry: 0, - failed: 0, - aborted: false - }) - - const { body: afterIndex } = await client.count({ index: INDEX }) - t.match(afterIndex, { count: 5000 }) - - const deleteResult = await client.helpers.bulk({ - datasource: createReadStream(datasetPath).pipe(split(JSON.parse)), - refreshOnCompletion: true, - onDrop (doc) { - t.fail('It should not drop any document') - }, - onDocument (doc) { - return { - delete: { - _index: INDEX, - _id: doc.id - } - } - } - }) - - t.type(deleteResult.time, 'number') - t.type(deleteResult.bytes, 'number') - t.match(deleteResult, { - total: 5000, - successful: 5000, - retry: 0, - failed: 0, - aborted: false - }) - - const { body: afterDelete } = await client.count({ index: INDEX }) - t.match(afterDelete, { count: 0 }) -}) diff --git a/test/integration/integration/helpers/msearch.test.js b/test/integration/integration/helpers/msearch.test.js deleted file mode 100644 index c9c726ecc..000000000 --- a/test/integration/integration/helpers/msearch.test.js +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const { test, beforeEach, afterEach } = require('tap') -const { waitCluster } = require('../../utils') -const { Client, errors } = require('../../../') - -const INDEX = `test-helpers-${process.pid}` -const client = new Client({ - node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' -}) - -beforeEach(async () => { - await waitCluster(client) - await client.indices.create({ index: INDEX }) - const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson')) - const result = await client.helpers.bulk({ - datasource: stream.pipe(split()), - refreshOnCompletion: true, - onDocument (doc) { - return { - index: { _index: INDEX } - } - } - }) - if (result.failed > 0) { - throw new Error('Failed bulk indexing docs') - } -}) - -afterEach(async () => { - await client.indices.delete({ index: INDEX }, { ignore: 404 }) -}) - -test('Basic', t => { - t.plan(4) - const m = client.helpers.msearch({ operations: 1 }) - - m.search( - { index: INDEX }, - { query: { match: { title: 'javascript' } } }, - (err, result) => { - t.error(err) - t.equal(result.body.hits.total.value, 106) - } - ) - - m.search( - { index: INDEX }, - { query: { match: { title: 'ruby' } } }, - (err, result) => { - t.error(err) - t.equal(result.body.hits.total.value, 29) - } - ) - - t.teardown(() => m.stop()) -}) - -test('Bad request', t => { - t.plan(3) - const m = client.helpers.msearch({ operations: 1 }) - - m.search( - { index: INDEX }, - { query: { match: { title: 'javascript' } } }, - (err, result) => { - t.error(err) - t.equal(result.body.hits.total.value, 106) - } - ) - - m.search( - { index: INDEX }, - { query: { foo: { title: 'ruby' } } }, - (err, result) => { - t.ok(err instanceof errors.ResponseError) - } - ) - - t.teardown(() => m.stop()) -}) - -test('Send multiple request concurrently over the concurrency limit', t => { - t.plan(20) - const m = client.helpers.msearch({ operations: 1 }) - - for (let i = 0; i < 10; i++) { - m.search( - { index: INDEX }, - { query: { match: { title: 'javascript' } } }, - (err, result) => { - t.error(err) - t.equal(result.body.hits.total.value, 106) - } - ) - } - - t.teardown(() => m.stop()) -}) diff --git a/test/integration/integration/helpers/scroll.test.js b/test/integration/integration/helpers/scroll.test.js deleted file mode 100644 index e197ce21a..000000000 --- a/test/integration/integration/helpers/scroll.test.js +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const { test, beforeEach, afterEach } = require('tap') -const { waitCluster } = require('../../utils') -const { Client } = require('../../../') - -const INDEX = `test-helpers-${process.pid}` -const client = new Client({ - node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' -}) - -beforeEach(async () => { - await waitCluster(client) - await client.indices.create({ index: INDEX }) - const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson')) - const result = await client.helpers.bulk({ - datasource: stream.pipe(split()), - refreshOnCompletion: true, - onDocument (doc) { - return { - index: { _index: INDEX } - } - } - }) - if (result.failed > 0) { - throw new Error('Failed bulk indexing docs') - } -}) - -afterEach(async () => { - await client.indices.delete({ index: INDEX }, { ignore: 404 }) -}) - -test('search helper', async t => { - const scrollSearch = client.helpers.scrollSearch({ - index: INDEX, - body: { - query: { - match: { - title: 'javascript' - } - } - } - }) - - let count = 0 - for await (const search of scrollSearch) { - count += 1 - for (const doc of search.documents) { - t.ok(doc.title.toLowerCase().includes('javascript')) - } - } - t.equal(count, 11) -}) - -test('clear a scroll search', async t => { - const scrollSearch = client.helpers.scrollSearch({ - index: INDEX, - body: { - query: { - match: { - title: 'javascript' - } - } - } - }) - - let count = 0 - for await (const search of scrollSearch) { - count += 1 - if (count === 2) { - search.clear() - } - } - t.equal(count, 2) -}) - -test('scroll documents', async t => { - const scrollSearch = client.helpers.scrollDocuments({ - index: INDEX, - body: { - query: { - match: { - title: 'javascript' - } - } - } - }) - - let count = 0 - for await (const doc of scrollSearch) { - count += 1 - t.ok(doc.title.toLowerCase().includes('javascript')) - } - t.equal(count, 106) -}) diff --git a/test/integration/integration/helpers/search.test.js b/test/integration/integration/helpers/search.test.js deleted file mode 100644 index d4aa57c9a..000000000 --- a/test/integration/integration/helpers/search.test.js +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const { test, beforeEach, afterEach } = require('tap') -const { waitCluster } = require('../../utils') -const { Client } = require('../../../') - -const INDEX = `test-helpers-${process.pid}` -const client = new Client({ - node: process.env.TEST_ES_SERVER || 'http://localhost:9200/' -}) - -beforeEach(async () => { - await waitCluster(client) - await client.indices.create({ index: INDEX }) - const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'stackoverflow.ndjson')) - const result = await client.helpers.bulk({ - datasource: stream.pipe(split()), - refreshOnCompletion: true, - onDocument (doc) { - return { - index: { _index: INDEX } - } - } - }) - if (result.failed > 0) { - throw new Error('Failed bulk indexing docs') - } -}) - -afterEach(async () => { - await client.indices.delete({ index: INDEX }, { ignore: 404 }) -}) - -test('search helper', async t => { - const results = await client.helpers.search({ - index: INDEX, - body: { - query: { - match: { - title: 'javascript' - } - } - } - }) - t.equal(results.length, 10) - for (const result of results) { - t.ok(result.title.toLowerCase().includes('javascript')) - } -}) diff --git a/test/integration/integration/index.js b/test/integration/integration/index.js deleted file mode 100644 index 098b52073..000000000 --- a/test/integration/integration/index.js +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -process.on('unhandledRejection', function (err) { - console.error(err) - process.exit(1) -}) - -const { writeFileSync, readFileSync, readdirSync, statSync } = require('fs') -const { join, sep } = require('path') -const yaml = require('js-yaml') -const ms = require('ms') -const { Client } = require('../../index') -const build = require('./test-runner') -const { sleep } = require('./helper') -const createJunitReporter = require('./reporter') -const downloadArtifacts = require('../../scripts/download-artifacts') - -const yamlFolder = downloadArtifacts.locations.freeTestFolder -const xPackYamlFolder = downloadArtifacts.locations.xPackTestFolder - -const MAX_API_TIME = 1000 * 90 -const MAX_FILE_TIME = 1000 * 30 -const MAX_TEST_TIME = 1000 * 3 - -const freeSkips = { - // TODO: remove this once 'arbitrary_key' is implemented - // https://github.com/elastic/elasticsearch/pull/41492 - 'indices.split/30_copy_settings.yml': ['*'], - 'indices.stats/50_disk_usage.yml': ['Disk usage stats'], - 'indices.stats/60_field_usage.yml': ['Field usage stats'], - // skipping because we are booting ES with `discovery.type=single-node` - // and this test will fail because of this configuration - 'nodes.stats/30_discovery.yml': ['*'], - // the expected error is returning a 503, - // which triggers a retry and the node to be marked as dead - 'search.aggregation/240_max_buckets.yml': ['*'], - // the yaml runner assumes that null means "does not exists", - // while null is a valid json value, so the check will fail - 'search/320_disallow_queries.yml': ['Test disallow expensive queries'] -} -const platinumBlackList = { - 'analytics/histogram.yml': ['Histogram requires values in increasing order'], - // this two test cases are broken, we should - // return on those in the future. - 'analytics/top_metrics.yml': [ - 'sort by keyword field fails', - 'sort by string script fails' - ], - 'cat.aliases/10_basic.yml': ['Empty cluster'], - 'index/10_with_id.yml': ['Index with ID'], - 'indices.get_alias/10_basic.yml': ['Get alias against closed indices'], - 'indices.get_alias/20_empty.yml': ['Check empty aliases when getting all aliases via /_alias'], - 'text_structure/find_structure.yml': ['*'], - // https://github.com/elastic/elasticsearch/pull/39400 - 'ml/jobs_crud.yml': ['Test put job with id that is already taken'], - // object keys must me strings, and `0.0.toString()` is `0` - 'ml/evaluate_data_frame.yml': [ - 'Test binary_soft_classifition precision', - 'Test binary_soft_classifition recall', - 'Test binary_soft_classifition confusion_matrix' - ], - // it gets random failures on CI, must investigate - 'ml/set_upgrade_mode.yml': [ - 'Attempt to open job when upgrade_mode is enabled', - 'Setting upgrade mode to disabled from enabled' - ], - // The cleanup fails with a index not found when retrieving the jobs - 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], - 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], - 'ml/preview_datafeed.yml': ['*'], - // Investigate why is failing - 'ml/inference_crud.yml': ['*'], - // investigate why this is failing - 'monitoring/bulk/10_basic.yml': ['*'], - 'monitoring/bulk/20_privileges.yml': ['*'], - 'license/20_put_license.yml': ['*'], - 'snapshot/10_basic.yml': ['*'], - 'snapshot/20_operator_privileges_disabled.yml': ['*'], - // the body is correct, but the regex is failing - 'sql/sql.yml': ['Getting textual representation'], - 'searchable_snapshots/10_usage.yml': ['*'], - 'service_accounts/10_basic.yml': ['*'], - // we are setting two certificates in the docker config - 'ssl/10_basic.yml': ['*'], - // very likely, the index template has not been loaded yet. - // we should run a indices.existsTemplate, but the name of the - // template may vary during time. - 'transforms_crud.yml': [ - 'Test basic transform crud', - 'Test transform with query and array of indices in source', - 'Test PUT continuous transform', - 'Test PUT continuous transform without delay set' - ], - 'transforms_force_delete.yml': [ - 'Test force deleting a running transform' - ], - 'transforms_cat_apis.yml': ['*'], - 'transforms_start_stop.yml': ['*'], - 'transforms_stats.yml': ['*'], - 'transforms_stats_continuous.yml': ['*'], - 'transforms_update.yml': ['*'], - // js does not support ulongs - 'unsigned_long/10_basic.yml': ['*'], - 'unsigned_long/20_null_value.yml': ['*'], - 'unsigned_long/30_multi_fields.yml': ['*'], - 'unsigned_long/40_different_numeric.yml': ['*'], - 'unsigned_long/50_script_values.yml': ['*'], - // docker issue? - 'watcher/execute_watch/60_http_input.yml': ['*'], - // the checks are correct, but for some reason the test is failing on js side - // I bet is because the backslashes in the rg - 'watcher/execute_watch/70_invalid.yml': ['*'], - 'watcher/put_watch/10_basic.yml': ['*'], - 'xpack/15_basic.yml': ['*'] -} - -function runner (opts = {}) { - const options = { node: opts.node } - if (opts.isXPack) { - options.ssl = { - ca: readFileSync(join(__dirname, '..', '..', '.ci', 'certs', 'ca.crt'), 'utf8'), - rejectUnauthorized: false - } - } - const client = new Client(options) - log('Loading yaml suite') - start({ client, isXPack: opts.isXPack }) - .catch(err => { - if (err.name === 'ResponseError') { - console.error(err) - console.log(JSON.stringify(err.meta, null, 2)) - } else { - console.error(err) - } - process.exit(1) - }) -} - -async function waitCluster (client, times = 0) { - try { - await client.cluster.health({ waitForStatus: 'green', timeout: '50s' }) - } catch (err) { - if (++times < 10) { - await sleep(5000) - return waitCluster(client, times) - } - console.error(err) - process.exit(1) - } -} - -async function start ({ client, isXPack }) { - log('Waiting for Elasticsearch') - await waitCluster(client) - - const { body } = await client.info() - const { number: version, build_hash: hash } = body.version - - log(`Downloading artifacts for hash ${hash}...`) - await downloadArtifacts({ hash, version }) - - log(`Testing ${isXPack ? 'Platinum' : 'Free'} api...`) - const junit = createJunitReporter() - const junitTestSuites = junit.testsuites(`Integration test for ${isXPack ? 'Platinum' : 'Free'} api`) - - const stats = { - total: 0, - skip: 0, - pass: 0, - assertions: 0 - } - const folders = getAllFiles(isXPack ? xPackYamlFolder : yamlFolder) - .filter(t => !/(README|TODO)/g.test(t)) - // we cluster the array based on the folder names, - // to provide a better test log output - .reduce((arr, file) => { - const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/')) - let inserted = false - for (let i = 0; i < arr.length; i++) { - if (arr[i][0].includes(path)) { - inserted = true - arr[i].push(file) - break - } - } - if (!inserted) arr.push([file]) - return arr - }, []) - - const totalTime = now() - for (const folder of folders) { - // pretty name - const apiName = folder[0].slice( - folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19, - folder[0].lastIndexOf(sep) - ) - - log('Testing ' + apiName.slice(1)) - const apiTime = now() - - for (const file of folder) { - const testRunner = build({ - client, - version, - isXPack: file.includes('platinum') - }) - const fileTime = now() - const data = readFileSync(file, 'utf8') - // get the test yaml (as object), some file has multiple yaml documents inside, - // every document is separated by '---', so we split on the separator - // and then we remove the empty strings, finally we parse them - const tests = data - .split('\n---\n') - .map(s => s.trim()) - // empty strings - .filter(Boolean) - .map(parse) - // null values - .filter(Boolean) - - // get setup and teardown if present - let setupTest = null - let teardownTest = null - for (const test of tests) { - if (test.setup) setupTest = test.setup - if (test.teardown) teardownTest = test.teardown - } - - const cleanPath = file.slice(file.lastIndexOf(apiName)) - log(' ' + cleanPath) - const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath) - - for (const test of tests) { - const testTime = now() - const name = Object.keys(test)[0] - if (name === 'setup' || name === 'teardown') continue - const junitTestCase = junitTestSuite.testcase(name) - - stats.total += 1 - if (shouldSkip(isXPack, file, name)) { - stats.skip += 1 - junitTestCase.skip('This test is in the skip list of the client') - junitTestCase.end() - continue - } - log(' - ' + name) - try { - await testRunner.run(setupTest, test[name], teardownTest, stats, junitTestCase) - stats.pass += 1 - } catch (err) { - junitTestCase.failure(err) - junitTestCase.end() - junitTestSuite.end() - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') - console.error(err) - process.exit(1) - } - const totalTestTime = now() - testTime - junitTestCase.end() - if (totalTestTime > MAX_TEST_TIME) { - log(' took too long: ' + ms(totalTestTime)) - } else { - log(' took: ' + ms(totalTestTime)) - } - } - junitTestSuite.end() - const totalFileTime = now() - fileTime - if (totalFileTime > MAX_FILE_TIME) { - log(` ${cleanPath} took too long: ` + ms(totalFileTime)) - } else { - log(` ${cleanPath} took: ` + ms(totalFileTime)) - } - } - const totalApiTime = now() - apiTime - if (totalApiTime > MAX_API_TIME) { - log(`${apiName} took too long: ` + ms(totalApiTime)) - } else { - log(`${apiName} took: ` + ms(totalApiTime)) - } - } - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') - log(`Total testing time: ${ms(now() - totalTime)}`) - log(`Test stats: - - Total: ${stats.total} - - Skip: ${stats.skip} - - Pass: ${stats.pass} - - Assertions: ${stats.assertions} - `) -} - -function log (text) { - process.stdout.write(text + '\n') -} - -function now () { - const ts = process.hrtime() - return (ts[0] * 1e3) + (ts[1] / 1e6) -} - -function parse (data) { - let doc - try { - doc = yaml.load(data, { schema: yaml.CORE_SCHEMA }) - } catch (err) { - console.error(err) - return - } - return doc -} - -function generateJunitXmlReport (junit, suite) { - writeFileSync( - join(__dirname, '..', '..', `${suite}-report-junit.xml`), - junit.prettyPrint() - ) -} - -if (require.main === module) { - const node = process.env.TEST_ES_SERVER || 'https://elastic:changeme@localhost:9200/' - const opts = { - node, - isXPack: process.env.TEST_SUITE !== 'free' - } - runner(opts) -} - -const shouldSkip = (isXPack, file, name) => { - let list = Object.keys(freeSkips) - for (let i = 0; i < list.length; i++) { - const freeTest = freeSkips[list[i]] - for (let j = 0; j < freeTest.length; j++) { - if (file.endsWith(list[i]) && (name === freeTest[j] || freeTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because is blacklisted in the free test`) - return true - } - } - } - - if (file.includes('x-pack') || isXPack) { - list = Object.keys(platinumBlackList) - for (let i = 0; i < list.length; i++) { - const platTest = platinumBlackList[list[i]] - for (let j = 0; j < platTest.length; j++) { - if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because is blacklisted in the platinum test`) - return true - } - } - } - } - - return false -} - -const getAllFiles = dir => - readdirSync(dir).reduce((files, file) => { - const name = join(dir, file) - const isDirectory = statSync(name).isDirectory() - return isDirectory ? [...files, ...getAllFiles(name)] : [...files, name] - }, []) - -module.exports = runner diff --git a/test/integration/integration/reporter.js b/test/integration/integration/reporter.js deleted file mode 100644 index 0d3621de7..000000000 --- a/test/integration/integration/reporter.js +++ /dev/null @@ -1,109 +0,0 @@ -'use strict' - -const assert = require('assert') -const { create } = require('xmlbuilder2') - -function createJunitReporter () { - const report = {} - - return { testsuites, prettyPrint } - - function prettyPrint () { - return create(report).end({ prettyPrint: true }) - } - - function testsuites (name) { - assert(name, 'The testsuites name is required') - assert(report.testsuites === undefined, 'Cannot set more than one testsuites block') - const startTime = Date.now() - - report.testsuites = { - '@id': new Date().toISOString(), - '@name': name - } - - const testsuiteList = [] - - return { - testsuite: createTestSuite(testsuiteList), - end () { - report.testsuites['@time'] = Math.round((Date.now() - startTime) / 1000) - report.testsuites['@tests'] = testsuiteList.reduce((acc, val) => { - acc += val['@tests'] - return acc - }, 0) - report.testsuites['@failures'] = testsuiteList.reduce((acc, val) => { - acc += val['@failures'] - return acc - }, 0) - report.testsuites['@skipped'] = testsuiteList.reduce((acc, val) => { - acc += val['@skipped'] - return acc - }, 0) - if (testsuiteList.length) { - report.testsuites.testsuite = testsuiteList - } - } - } - } - - function createTestSuite (testsuiteList) { - return function testsuite (name) { - assert(name, 'The testsuite name is required') - const startTime = Date.now() - const suite = { - '@id': new Date().toISOString(), - '@name': name - } - const testcaseList = [] - testsuiteList.push(suite) - return { - testcase: createTestCase(testcaseList), - end () { - suite['@time'] = Math.round((Date.now() - startTime) / 1000) - suite['@tests'] = testcaseList.length - suite['@failures'] = testcaseList.filter(t => t.failure).length - suite['@skipped'] = testcaseList.filter(t => t.skipped).length - if (testcaseList.length) { - suite.testcase = testcaseList - } - } - } - } - } - - function createTestCase (testcaseList) { - return function testcase (name) { - assert(name, 'The testcase name is required') - const startTime = Date.now() - const tcase = { - '@id': new Date().toISOString(), - '@name': name - } - testcaseList.push(tcase) - return { - failure (error) { - assert(error, 'The failure error object is required') - tcase.failure = { - '#': error.stack, - '@message': error.message, - '@type': error.code - } - }, - skip (reason) { - if (typeof reason !== 'string') { - reason = JSON.stringify(reason, null, 2) - } - tcase.skipped = { - '#': reason - } - }, - end () { - tcase['@time'] = Math.round((Date.now() - startTime) / 1000) - } - } - } - } -} - -module.exports = createJunitReporter diff --git a/test/integration/integration/test-runner.js b/test/integration/integration/test-runner.js deleted file mode 100644 index 49807d548..000000000 --- a/test/integration/integration/test-runner.js +++ /dev/null @@ -1,909 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ - -const assert = require('assert') -const semver = require('semver') -const helper = require('./helper') -const deepEqual = require('fast-deep-equal') -const { join } = require('path') -const { locations } = require('../../scripts/download-artifacts') -const { ConfigurationError } = require('../../lib/errors') - -const { delve, to, isXPackTemplate, sleep } = helper - -const supportedFeatures = [ - 'gtelte', - 'regex', - 'benchmark', - 'stash_in_path', - 'groovy_scripting', - 'headers', - 'transform_and_set', - 'catch_unauthorized', - 'arbitrary_key' -] - -function build (opts = {}) { - const client = opts.client - const esVersion = opts.version - const isXPack = opts.isXPack - const stash = new Map() - let response = null - - /** - * Runs a cleanup, removes all indices, aliases, templates, and snapshots - * @returns {Promise} - */ - async function cleanup (isXPack) { - response = null - stash.clear() - - if (isXPack) { - // wipe rollup jobs - const { body: jobsList } = await client.rollup.getJobs({ id: '_all' }) - const jobsIds = jobsList.jobs.map(j => j.config.id) - await helper.runInParallel( - client, 'rollup.stopJob', - jobsIds.map(j => ({ id: j, waitForCompletion: true })) - ) - await helper.runInParallel( - client, 'rollup.deleteJob', - jobsIds.map(j => ({ id: j })) - ) - - // delete slm policies - const { body: policies } = await client.slm.getLifecycle() - await helper.runInParallel( - client, 'slm.deleteLifecycle', - Object.keys(policies).map(p => ({ policy_id: p })) - ) - - // remove 'x_pack_rest_user', used in some xpack test - await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) - - const { body: searchableSnapshotIndices } = await client.cluster.state({ - metric: 'metadata', - filter_path: 'metadata.indices.*.settings.index.store.snapshot' - }) - if (searchableSnapshotIndices.metadata != null && searchableSnapshotIndices.metadata.indices != null) { - await helper.runInParallel( - client, 'indices.delete', - Object.keys(searchableSnapshotIndices.metadata.indices).map(i => ({ index: i })), - { ignore: [404] } - ) - } - } - - // clean snapshots - const { body: repositories } = await client.snapshot.getRepository() - for (const repository of Object.keys(repositories)) { - await client.snapshot.delete({ repository, snapshot: '*' }, { ignore: [404] }) - await client.snapshot.deleteRepository({ repository }, { ignore: [404] }) - } - - if (isXPack) { - // clean data streams - await client.indices.deleteDataStream({ name: '*', expand_wildcards: 'all' }) - } - - // clean all indices - await client.indices.delete({ index: '*,-.ds-ilm-history-*', expand_wildcards: 'open,closed,hidden' }, { ignore: [404] }) - - // delete templates - const { body: templates } = await client.cat.templates({ h: 'name' }) - for (const template of templates.split('\n').filter(Boolean)) { - if (isXPackTemplate(template)) continue - const { body } = await client.indices.deleteTemplate({ name: template }, { ignore: [404] }) - if (JSON.stringify(body).includes(`index_template [${template}] missing`)) { - await client.indices.deleteIndexTemplate({ name: template }, { ignore: [404] }) - } - } - - // delete component template - const { body } = await client.cluster.getComponentTemplate() - const components = body.component_templates.filter(c => !isXPackTemplate(c.name)).map(c => c.name) - if (components.length > 0) { - await client.cluster.deleteComponentTemplate({ name: components.join(',') }, { ignore: [404] }) - } - - // Remove any cluster setting - const { body: settings } = await client.cluster.getSettings() - const newSettings = {} - for (const setting in settings) { - if (Object.keys(settings[setting]).length === 0) continue - newSettings[setting] = {} - for (const key in settings[setting]) { - newSettings[setting][`${key}.*`] = null - } - } - if (Object.keys(newSettings).length > 0) { - await client.cluster.putSettings({ body: newSettings }) - } - - if (isXPack) { - // delete ilm policies - const preserveIlmPolicies = [ - 'ilm-history-ilm-policy', 'slm-history-ilm-policy', - 'watch-history-ilm-policy', 'ml-size-based-ilm-policy', - 'logs', 'metrics' - ] - const { body: policies } = await client.ilm.getLifecycle() - for (const policy in policies) { - if (preserveIlmPolicies.includes(policy)) continue - await client.ilm.deleteLifecycle({ policy }) - } - - // delete autofollow patterns - const { body: patterns } = await client.ccr.getAutoFollowPattern() - for (const { name } of patterns.patterns) { - await client.ccr.deleteAutoFollowPattern({ name }) - } - - // delete all tasks - const { body: nodesTask } = await client.tasks.list() - const tasks = Object.keys(nodesTask.nodes) - .reduce((acc, node) => { - const { tasks } = nodesTask.nodes[node] - Object.keys(tasks).forEach(id => { - if (tasks[id].cancellable) acc.push(id) - }) - return acc - }, []) - - await helper.runInParallel( - client, 'tasks.cancel', - tasks.map(id => ({ taskId: id })) - ) - } - - const { body: shutdownNodes } = await client.shutdown.getNode() - if (shutdownNodes._nodes == null && shutdownNodes.cluster_name == null) { - for (const node of shutdownNodes.nodes) { - await client.shutdown.deleteNode({ node_id: node.node_id }) - } - } - - // wait for pending task before resolving the promise - await sleep(100) - while (true) { - const { body } = await client.cluster.pendingTasks() - if (body.tasks.length === 0) break - await sleep(500) - } - } - - /** - * Runs the given test. - * It runs the test components in the following order: - * - skip check - * - xpack user - * - setup - * - the actual test - * - teardown - * - xpack cleanup - * - cleanup - * @param {object} setup (null if not needed) - * @param {object} test - * @oaram {object} teardown (null if not needed) - * @returns {Promise} - */ - async function run (setup, test, teardown, stats, junit) { - // if we should skip a feature in the setup/teardown section - // we should skip the entire test file - const skip = getSkip(setup) || getSkip(teardown) - if (skip && shouldSkip(esVersion, skip)) { - junit.skip(skip) - logSkip(skip) - return - } - - if (isXPack) { - // Some xpack test requires this user - // tap.comment('Creating x-pack user') - try { - await client.security.putUser({ - username: 'x_pack_rest_user', - body: { password: 'x-pack-test-password', roles: ['superuser'] } - }) - } catch (err) { - assert.ifError(err, 'should not error: security.putUser') - } - } - - if (setup) await exec('Setup', setup, stats, junit) - - await exec('Test', test, stats, junit) - - if (teardown) await exec('Teardown', teardown, stats, junit) - - await cleanup(isXPack) - } - - /** - * Fill the stashed values of a command - * let's say the we have stashed the `master` value, - * is_true: nodes.$master.transport.profiles - * becomes - * is_true: nodes.new_value.transport.profiles - * @param {object|string} the action to update - * @returns {object|string} the updated action - */ - function fillStashedValues (obj) { - if (typeof obj === 'string') { - return getStashedValues(obj) - } - // iterate every key of the object - for (const key in obj) { - const val = obj[key] - // if the key value is a string, and the string includes '${' - // that we must update the content of '${...}'. - // eg: 'Basic ${auth}' we search the stahed value 'auth' - // and the resulting value will be 'Basic valueOfAuth' - if (typeof val === 'string' && val.includes('${')) { - while (obj[key].includes('${')) { - const val = obj[key] - const start = val.indexOf('${') - const end = val.indexOf('}', val.indexOf('${')) - const stashedKey = val.slice(start + 2, end) - const stashed = stash.get(stashedKey) - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - } - continue - } - // handle json strings, eg: '{"hello":"$world"}' - if (typeof val === 'string' && val.includes('"$')) { - while (obj[key].includes('"$')) { - const val = obj[key] - const start = val.indexOf('"$') - const end = val.indexOf('"', start + 1) - const stashedKey = val.slice(start + 2, end) - const stashed = '"' + stash.get(stashedKey) + '"' - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - } - continue - } - // if the key value is a string, and the string includes '$' - // we run the "update value" code - if (typeof val === 'string' && val.includes('$')) { - // update the key value - obj[key] = getStashedValues(val) - continue - } - - // go deep in the object - if (val !== null && typeof val === 'object') { - fillStashedValues(val) - } - } - - return obj - - function getStashedValues (str) { - const arr = str - // we split the string on the dots - // handle the key with a dot inside that is not a part of the path - .split(/(? { - if (part[0] === '$') { - const stashed = stash.get(part.slice(1)) - if (stashed == null) { - throw new Error(`Cannot find stashed value '${part}' for '${JSON.stringify(obj)}'`) - } - return stashed - } - return part - }) - - // recreate the string value only if the array length is higher than one - // otherwise return the first element which in some test this could be a number, - // and call `.join` will coerce it to a string. - return arr.length > 1 ? arr.join('.') : arr[0] - } - } - - /** - * Stashes a value - * @param {string} the key to search in the previous response - * @param {string} the name to identify the stashed value - * @returns {TestRunner} - */ - function set (key, name) { - if (key.includes('_arbitrary_key_')) { - let currentVisit = null - for (const path of key.split('.')) { - if (path === '_arbitrary_key_') { - const keys = Object.keys(currentVisit) - const arbitraryKey = keys[getRandomInt(0, keys.length)] - stash.set(name, arbitraryKey) - } else { - currentVisit = delve(response, path) - } - } - } else { - stash.set(name, delve(response, key)) - } - } - - /** - * Applies a given transformation and stashes the result. - * @param {string} the name to identify the stashed value - * @param {string} the transformation function as string - * @returns {TestRunner} - */ - function transform_and_set (name, transform) { - if (/base64EncodeCredentials/.test(transform)) { - const [user, password] = transform - .slice(transform.indexOf('(') + 1, -1) - .replace(/ /g, '') - .split(',') - const userAndPassword = `${delve(response, user)}:${delve(response, password)}` - stash.set(name, Buffer.from(userAndPassword).toString('base64')) - } else { - throw new Error(`Unknown transform: '${transform}'`) - } - } - - /** - * Runs a client command - * @param {object} the action to perform - * @returns {Promise} - */ - async function doAction (action, stats) { - const cmd = parseDo(action) - let api - try { - api = delve(client, cmd.method).bind(client) - } catch (err) { - console.error(`\nError: Cannot find the method '${cmd.method}' in the client.\n`) - process.exit(1) - } - - const options = { ignore: cmd.params.ignore, headers: action.headers } - if (!Array.isArray(options.ignore)) options.ignore = [options.ignore] - if (cmd.params.ignore) delete cmd.params.ignore - - // ndjson apis should always send the body as an array - if (isNDJson(cmd.api) && !Array.isArray(cmd.params.body)) { - cmd.params.body = [cmd.params.body] - } - - const [err, result] = await to(api(cmd.params, options)) - let warnings = result ? result.warnings : null - const body = result ? result.body : null - - if (action.warnings && warnings === null) { - assert.fail('We should get a warning header', action.warnings) - } else if (!action.warnings && warnings !== null) { - // if there is only the 'default shard will change' - // warning we skip the check, because the yaml - // spec may not be updated - let hasDefaultShardsWarning = false - warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === true && warnings.length > 1) { - assert.fail('We are not expecting warnings', warnings) - } - } else if (action.warnings && warnings !== null) { - // if the yaml warnings do not contain the - // 'default shard will change' warning - // we do not check it presence in the warnings array - // because the yaml spec may not be updated - let hasDefaultShardsWarning = false - action.warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === false) { - warnings = warnings.filter(h => !h.test(/default\snumber\sof\sshards/g)) - } - - stats.assertions += 1 - assert.ok(deepEqual(warnings, action.warnings)) - } - - if (action.catch) { - stats.assertions += 1 - assert.ok( - parseDoError(err, action.catch), - `the error should be: ${action.catch}` - ) - try { - response = JSON.parse(err.body) - } catch (e) { - response = err.body - } - } else { - stats.assertions += 1 - assert.ifError(err, `should not error: ${cmd.method}`, action) - response = body - } - } - - /** - * Runs an actual test - * @param {string} the name of the test - * @param {object} the actions to perform - * @returns {Promise} - */ - async function exec (name, actions, stats, junit) { - // tap.comment(name) - for (const action of actions) { - if (action.skip) { - if (shouldSkip(esVersion, action.skip)) { - junit.skip(fillStashedValues(action.skip)) - logSkip(fillStashedValues(action.skip)) - break - } - } - - if (action.do) { - await doAction(fillStashedValues(action.do), stats) - } - - if (action.set) { - const key = Object.keys(action.set)[0] - set(fillStashedValues(key), action.set[key]) - } - - if (action.transform_and_set) { - const key = Object.keys(action.transform_and_set)[0] - transform_and_set(key, action.transform_and_set[key]) - } - - if (action.match) { - stats.assertions += 1 - const key = Object.keys(action.match)[0] - match( - // in some cases, the yaml refers to the body with an empty string - key === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key === '$body' - ? action.match[key] - : fillStashedValues(action.match)[key], - action.match - ) - } - - if (action.lt) { - stats.assertions += 1 - const key = Object.keys(action.lt)[0] - lt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lt)[key] - ) - } - - if (action.gt) { - stats.assertions += 1 - const key = Object.keys(action.gt)[0] - gt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gt)[key] - ) - } - - if (action.lte) { - stats.assertions += 1 - const key = Object.keys(action.lte)[0] - lte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lte)[key] - ) - } - - if (action.gte) { - stats.assertions += 1 - const key = Object.keys(action.gte)[0] - gte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gte)[key] - ) - } - - if (action.length) { - stats.assertions += 1 - const key = Object.keys(action.length)[0] - length( - key === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key === '$body' - ? action.length[key] - : fillStashedValues(action.length)[key] - ) - } - - if (action.is_true) { - stats.assertions += 1 - const isTrue = fillStashedValues(action.is_true) - is_true( - delve(response, isTrue), - isTrue - ) - } - - if (action.is_false) { - stats.assertions += 1 - const isFalse = fillStashedValues(action.is_false) - is_false( - delve(response, isFalse), - isFalse - ) - } - } - } - - return { run } -} - -/** - * Asserts that the given value is truthy - * @param {any} the value to check - * @param {string} an optional message - * @returns {TestRunner} - */ -function is_true (val, msg) { - assert.ok(val, `expect truthy value: ${msg} - value: ${JSON.stringify(val)}`) -} - -/** - * Asserts that the given value is falsey - * @param {any} the value to check - * @param {string} an optional message - * @returns {TestRunner} - */ -function is_false (val, msg) { - assert.ok(!val, `expect falsey value: ${msg} - value: ${JSON.stringify(val)}`) -} - -/** - * Asserts that two values are the same - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function match (val1, val2, action) { - // both values are objects - if (typeof val1 === 'object' && typeof val2 === 'object') { - assert.ok(deepEqual(val1, val2), action) - // the first value is the body as string and the second a pattern string - } else if ( - typeof val1 === 'string' && typeof val2 === 'string' && - val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) - ) { - const regStr = val2 - // match all comments within a "regexp" match arg - .replace(/([\S\s]?)#[^\n]*\n/g, (match, prevChar) => { - return prevChar === '\\' ? match : `${prevChar}\n` - }) - // remove all whitespace from the expression, all meaningful - // whitespace is represented with \s - .replace(/\s/g, '') - .slice(1, -1) - // 'm' adds the support for multiline regex - assert.ok(new RegExp(regStr, 'm').test(val1), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) - // tap.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) - // everything else - } else { - assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) - } -} - -/** - * Asserts that the first value is less than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function lt (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 < val2) -} - -/** - * Asserts that the first value is greater than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function gt (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 > val2) -} - -/** - * Asserts that the first value is less than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function lte (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 <= val2) -} - -/** - * Asserts that the first value is greater than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} -*/ -function gte (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 >= val2) -} - -/** - * Asserts that the given value has the specified length - * @param {string|object|array} the object to check - * @param {number} the expected length - * @returns {TestRunner} - */ -function length (val, len) { - if (typeof val === 'string' || Array.isArray(val)) { - assert.equal(val.length, len) - } else if (typeof val === 'object' && val !== null) { - assert.equal(Object.keys(val).length, len) - } else { - assert.fail(`length: the given value is invalid: ${val}`) - } -} - -/** - * Gets a `do` action object and returns a structured object, - * where the action is the key and the parameter is the value. - * Eg: - * { - * 'indices.create': { - * 'index': 'test' - * }, - * 'warnings': [ - * '[index] is deprecated' - * ] - * } - * becomes - * { - * method: 'indices.create', - * params: { - * index: 'test' - * }, - * warnings: [ - * '[index] is deprecated' - * ] - * } - * @param {object} - * @returns {object} - */ -function parseDo (action) { - return Object.keys(action).reduce((acc, val) => { - switch (val) { - case 'catch': - acc.catch = action.catch - break - case 'warnings': - acc.warnings = action.warnings - break - case 'node_selector': - acc.node_selector = action.node_selector - break - default: - // converts underscore to camelCase - // eg: put_mapping => putMapping - acc.method = val.replace(/_([a-z])/g, g => g[1].toUpperCase()) - acc.api = val - acc.params = camelify(action[val]) - } - return acc - }, {}) - - function camelify (obj) { - const newObj = {} - - // TODO: add camelCase support for this fields - const doNotCamelify = ['copy_settings'] - - for (const key in obj) { - const val = obj[key] - let newKey = key - if (!~doNotCamelify.indexOf(key)) { - // if the key starts with `_` we should not camelify the first occurence - // eg: _source_include => _sourceInclude - newKey = key[0] === '_' - ? '_' + key.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : key.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - if ( - val !== null && - typeof val === 'object' && - !Array.isArray(val) && - key !== 'body' - ) { - newObj[newKey] = camelify(val) - } else { - newObj[newKey] = val - } - } - - return newObj - } -} - -function parseDoError (err, spec) { - const httpErrors = { - bad_request: 400, - unauthorized: 401, - forbidden: 403, - missing: 404, - request_timeout: 408, - conflict: 409, - unavailable: 503 - } - - if (httpErrors[spec]) { - return err.statusCode === httpErrors[spec] - } - - if (spec === 'request') { - return err.statusCode >= 400 && err.statusCode < 600 - } - - if (spec.startsWith('/') && spec.endsWith('/')) { - return new RegExp(spec.slice(1, -1), 'g').test(JSON.stringify(err.body)) - } - - if (spec === 'param') { - return err instanceof ConfigurationError - } - - return false -} - -function getSkip (arr) { - if (!Array.isArray(arr)) return null - for (let i = 0; i < arr.length; i++) { - if (arr[i].skip) return arr[i].skip - } - return null -} - -// Gets two *maybe* numbers and returns two valida numbers -// it throws if one or both are not a valid number -// the returned value is an array with the new values -function getNumbers (val1, val2) { - const val1Numeric = Number(val1) - if (isNaN(val1Numeric)) { - throw new TypeError(`val1 is not a valid number: ${val1}`) - } - const val2Numeric = Number(val2) - if (isNaN(val2Numeric)) { - throw new TypeError(`val2 is not a valid number: ${val2}`) - } - return [val1Numeric, val2Numeric] -} - -function getRandomInt (min, max) { - return Math.floor(Math.random() * (max - min)) + min -} - -/** - * Logs a skip - * @param {object} the actions - * @returns {TestRunner} - */ -function logSkip (action) { - if (action.reason && action.version) { - console.log(`Skip: ${action.reason} (${action.version})`) - } else if (action.features) { - console.log(`Skip: ${JSON.stringify(action.features)})`) - } else { - console.log('Skipped') - } -} - -/** - * Decides if a test should be skipped - * @param {object} the actions - * @returns {boolean} - */ -function shouldSkip (esVersion, action) { - let shouldSkip = false - // skip based on the version - if (action.version) { - if (action.version.trim() === 'all') return true - const versions = action.version.split(',').filter(Boolean) - for (const version of versions) { - const [min, max] = version.split('-').map(v => v.trim()) - // if both `min` and `max` are specified - if (min && max) { - shouldSkip = semver.satisfies(esVersion, action.version) - // if only `min` is specified - } else if (min) { - shouldSkip = semver.gte(esVersion, min) - // if only `max` is specified - } else if (max) { - shouldSkip = semver.lte(esVersion, max) - // something went wrong! - } else { - throw new Error(`skip: Bad version range: ${action.version}`) - } - } - } - - if (shouldSkip) return true - - if (action.features) { - if (!Array.isArray(action.features)) action.features = [action.features] - // returns true if one of the features is not present in the supportedFeatures - shouldSkip = !!action.features.filter(f => !~supportedFeatures.indexOf(f)).length - } - - if (shouldSkip) return true - - return false -} - -function isNDJson (api) { - const spec = require(join(locations.specFolder, `${api}.json`)) - const { content_type } = spec[Object.keys(spec)[0]].headers - return Boolean(content_type && content_type.includes('application/x-ndjson')) -} - -/** - * Updates the array syntax of keys and values - * eg: 'hits.hits.1.stuff' to 'hits.hits[1].stuff' - * @param {object} the action to update - * @returns {obj} the updated action - */ -// function updateArraySyntax (obj) { -// const newObj = {} - -// for (const key in obj) { -// const newKey = key.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// const val = obj[key] - -// if (typeof val === 'string') { -// newObj[newKey] = val.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// } else if (val !== null && typeof val === 'object') { -// newObj[newKey] = updateArraySyntax(val) -// } else { -// newObj[newKey] = val -// } -// } - -// return newObj -// } - -module.exports = build diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index c5f2a2fec..a1a92a981 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -29,7 +29,7 @@ const { join } = require('path') const { locations } = require('../../scripts/download-artifacts') const packageJson = require('../../package.json') -const { delve, to, isXPackTemplate, sleep } = helper +const { delve, to, isXPackTemplate, sleep, updateParams } = helper const supportedFeatures = [ 'gtelte', @@ -58,6 +58,12 @@ function build (opts = {}) { response = null stash.clear() + await client.cluster.health({ + wait_for_no_initializing_shards: true, + timeout: '70s', + level: 'shards' + }) + if (isXPack) { // wipe rollup jobs const jobsList = await client.rollup.getJobs({ id: '_all' }) @@ -98,16 +104,24 @@ function build (opts = {}) { const repositories = await client.snapshot.getRepository() for (const repository of Object.keys(repositories)) { await client.snapshot.delete({ repository, snapshot: '*' }, { ignore: [404] }) - await client.snapshot.deleteRepository({ repository }, { ignore: [404] }) + await client.snapshot.deleteRepository({ name: repository }, { ignore: [404] }) } if (isXPack) { // clean data streams - await client.indices.deleteDataStream({ name: '*' }) + await client.indices.deleteDataStream({ name: '*', expand_wildcards: 'all' }) } // clean all indices - await client.indices.delete({ index: '*,-.ds-ilm-history-*', expand_wildcards: 'open,closed,hidden' }, { ignore: [404] }) + await client.indices.delete({ + index: [ + '*', + '-.ds-ilm-history-*' + ], + expand_wildcards: 'open,closed,hidden' + }, { + ignore: [404] + }) // delete templates const templates = await client.cat.templates({ h: 'name' }) @@ -150,7 +164,7 @@ function build (opts = {}) { const policies = await client.ilm.getLifecycle() for (const policy in policies) { if (preserveIlmPolicies.includes(policy)) continue - await client.ilm.deleteLifecycle({ policy }) + await client.ilm.deleteLifecycle({ name: policy }) } // delete autofollow patterns @@ -371,7 +385,7 @@ function build (opts = {}) { * @returns {Promise} */ async function doAction (action, stats) { - const cmd = parseDo(action) + const cmd = await updateParams(parseDo(action)) let api try { api = delve(client, cmd.method).bind(client) @@ -450,6 +464,7 @@ function build (opts = {}) { if (action.catch) { stats.assertions += 1 + assert.ok(err, `Expecting an error, but instead got ${JSON.stringify(err)}, the response was ${JSON.stringify(result)}`) assert.ok( parseDoError(err, action.catch), `the error should be: ${action.catch}` @@ -620,16 +635,11 @@ function match (val1, val2, action) { val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) ) { const regStr = val2 - // match all comments within a "regexp" match arg - .replace(/([\S\s]?)#[^\n]*\n/g, (match, prevChar) => { - return prevChar === '\\' ? match : `${prevChar}\n` - }) - // remove all whitespace from the expression, all meaningful - // whitespace is represented with \s - .replace(/\s/g, '') + .replace(/(^|[^\\])#.*/g, '$1') + .replace(/(^|[^\\])\s+/g, '$1') .slice(1, -1) // 'm' adds the support for multiline regex - assert.ok(new RegExp(regStr, 'm').test(val1), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) + assert.ok(new RegExp(regStr, 'm').test(val1), `should match pattern provided: ${val2}, but got: ${val1}`) // tap.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) // everything else } else { @@ -745,6 +755,11 @@ function parseDo (action) { acc.method = val.replace(/_([a-z])/g, g => g[1].toUpperCase()) acc.api = val acc.params = action[val] // camelify(action[val]) + if (typeof acc.params.body === 'string') { + try { + acc.params.body = JSON.parse(acc.params.body) + } catch (err) {} + } } return acc }, {}) From 048ad273b2e1327640e1baee1d978d30d5b3062c Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 12 Jan 2022 11:24:26 +0100 Subject: [PATCH 131/647] API generation --- src/api/types.ts | 8 ++------ src/api/typesWithBodyKey.ts | 8 ++------ 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index 4ecde761f..6cc2643ca 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1059,8 +1059,6 @@ export interface SearchAggregationProfileDelegateDebugFilter { export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' -export type SearchBuiltinHighlighterType = 'plain' | 'fvh' | 'unified' - export interface SearchCollector { name: string reason: string @@ -1206,7 +1204,7 @@ export type SearchHighlighterOrder = 'score' export type SearchHighlighterTagsSchema = 'styled' -export type SearchHighlighterType = SearchBuiltinHighlighterType | string +export type SearchHighlighterType = 'plain' | 'fvh' | 'unified'| string export interface SearchHit { _index: IndexName @@ -1727,8 +1725,6 @@ export interface AcknowledgedResponseBase { export type AggregateName = string -export type BuiltinScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' - export interface BulkIndexByScrollFailure { cause: ErrorCause id: Id @@ -2181,7 +2177,7 @@ export interface ScriptField { ignore_failure?: boolean } -export type ScriptLanguage = BuiltinScriptLanguage | string +export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java'| string export interface ScriptSort { order?: SortOrder diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index f63131965..a16e076bc 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -1131,8 +1131,6 @@ export interface SearchAggregationProfileDelegateDebugFilter { export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' -export type SearchBuiltinHighlighterType = 'plain' | 'fvh' | 'unified' - export interface SearchCollector { name: string reason: string @@ -1278,7 +1276,7 @@ export type SearchHighlighterOrder = 'score' export type SearchHighlighterTagsSchema = 'styled' -export type SearchHighlighterType = SearchBuiltinHighlighterType | string +export type SearchHighlighterType = 'plain' | 'fvh' | 'unified'| string export interface SearchHit { _index: IndexName @@ -1827,8 +1825,6 @@ export interface AcknowledgedResponseBase { export type AggregateName = string -export type BuiltinScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' - export interface BulkIndexByScrollFailure { cause: ErrorCause id: Id @@ -2281,7 +2277,7 @@ export interface ScriptField { ignore_failure?: boolean } -export type ScriptLanguage = BuiltinScriptLanguage | string +export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java'| string export interface ScriptSort { order?: SortOrder From e915e4b9af026409d5779609c055bab80bcbbc0e Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 12 Jan 2022 11:29:22 +0100 Subject: [PATCH 132/647] Updated docs --- README.md | 10 ++-------- docs/installation.asciidoc | 4 ++-- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 673bbc1f7..5d06668ac 100644 --- a/README.md +++ b/README.md @@ -2,16 +2,10 @@ # Elasticsearch Node.js client -[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Belasticsearch-js%2Bmaster)](https://clients-ci.elastic.co/view/Javascript/job/elastic+elasticsearch-js+master/) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) +[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Belasticsearch-js%2Bmain)](https://clients-ci.elastic.co/view/Javascript/job/elastic+elasticsearch-js+main/) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) The official Node.js client for Elasticsearch. ---- - -**Note:** In the past months we have worked on the new Elasticsearch Node.js client and you can use it by following the instructions below. If you're going to use the legacy one or report an issue, however, please check out [elastic/elasticsearch-js-legacy](https://github.com/elastic/elasticsearch-js-legacy). - ---- - ## Features - One-to-one mapping with REST API. - Generalized, pluggable architecture. @@ -59,7 +53,7 @@ Elasticsearch language clients are only backwards compatible with default distri | Elasticsearch Version | Client Version | | --------------------- |----------------| -| `master` | `master` | +| `main` | `main` | | `7.x` | `7.x` | | `6.x` | `6.x` | | `5.x` | `5.x` | diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc index ff9fdd2ee..0d44a1518 100644 --- a/docs/installation.asciidoc +++ b/docs/installation.asciidoc @@ -70,8 +70,8 @@ Elasticsearch language clients are only backwards compatible with default distri |{es} Version |Client Version -|`master` -|`master` +|`main` +|`main` |`7.x` |`7.x` From 4643a359fe56cc03d9345a24bb1da65a5ff6e694 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 24 Jan 2022 10:19:30 +0100 Subject: [PATCH 133/647] Add missing files to repo (#1613) --- CHANGELOG.md | 3 +++ NOTICE.txt | 2 ++ 2 files changed, 5 insertions(+) create mode 100644 CHANGELOG.md create mode 100644 NOTICE.txt diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..56baaa2b7 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,3 @@ +303 See Other + +Location: https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog-client.html diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 000000000..72d057cab --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,2 @@ +Elasticsearch JavaScript Client +Copyright 2022 Elasticsearch B.V. From 7985aab8605be89c1ade802dcc63b4b66a8b7e46 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 2 Feb 2022 10:36:09 +0100 Subject: [PATCH 134/647] Updated CI configuration --- ...7.15.yml => elastic+elasticsearch-js+7.17.yml} | 8 ++++---- .ci/jobs/elastic+elasticsearch-js+8.0.yml | 15 +++++++++++++++ .ci/test-matrix.yml | 2 +- 3 files changed, 20 insertions(+), 5 deletions(-) rename .ci/jobs/{elastic+elasticsearch-js+7.15.yml => elastic+elasticsearch-js+7.17.yml} (60%) create mode 100644 .ci/jobs/elastic+elasticsearch-js+8.0.yml diff --git a/.ci/jobs/elastic+elasticsearch-js+7.15.yml b/.ci/jobs/elastic+elasticsearch-js+7.17.yml similarity index 60% rename from .ci/jobs/elastic+elasticsearch-js+7.15.yml rename to .ci/jobs/elastic+elasticsearch-js+7.17.yml index e37c37998..3e167ef70 100644 --- a/.ci/jobs/elastic+elasticsearch-js+7.15.yml +++ b/.ci/jobs/elastic+elasticsearch-js+7.17.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+7.15 - display-name: 'elastic / elasticsearch-js # 7.15' - description: Testing the elasticsearch-js 7.15 branch. + name: elastic+elasticsearch-js+7.17 + display-name: 'elastic / elasticsearch-js # 7.17' + description: Testing the elasticsearch-js 7.17 branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/7.15 + default: refs/heads/7.17 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: diff --git a/.ci/jobs/elastic+elasticsearch-js+8.0.yml b/.ci/jobs/elastic+elasticsearch-js+8.0.yml new file mode 100644 index 000000000..d70b83a61 --- /dev/null +++ b/.ci/jobs/elastic+elasticsearch-js+8.0.yml @@ -0,0 +1,15 @@ +--- +- job: + name: elastic+elasticsearch-js+8.0 + display-name: 'elastic / elasticsearch-js # 8.0' + description: Testing the elasticsearch-js 8.0 branch. + junit_results: "*-junit.xml" + parameters: + - string: + name: branch_specifier + default: refs/heads8.0 + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + triggers: + - github + - timed: 'H */12 * * *' diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 26b7dbcd5..e94350277 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - 8.0.0-SNAPSHOT + - 8.1.0-SNAPSHOT NODE_JS_VERSION: - 16 From 071a6ba4bbfdbec28385b3ae335cb25ed9d6023d Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 2 Feb 2022 17:35:58 +0100 Subject: [PATCH 135/647] Updated CI configuration --- .ci/jobs/elastic+elasticsearch-js+8.0.yml | 2 +- .ci/jobs/elastic+elasticsearch-js+8.1.yml | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 .ci/jobs/elastic+elasticsearch-js+8.1.yml diff --git a/.ci/jobs/elastic+elasticsearch-js+8.0.yml b/.ci/jobs/elastic+elasticsearch-js+8.0.yml index d70b83a61..7c8f25244 100644 --- a/.ci/jobs/elastic+elasticsearch-js+8.0.yml +++ b/.ci/jobs/elastic+elasticsearch-js+8.0.yml @@ -7,7 +7,7 @@ parameters: - string: name: branch_specifier - default: refs/heads8.0 + default: refs/heads/8.0 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: diff --git a/.ci/jobs/elastic+elasticsearch-js+8.1.yml b/.ci/jobs/elastic+elasticsearch-js+8.1.yml new file mode 100644 index 000000000..61e86ad5a --- /dev/null +++ b/.ci/jobs/elastic+elasticsearch-js+8.1.yml @@ -0,0 +1,15 @@ +--- +- job: + name: elastic+elasticsearch-js+8.1 + display-name: 'elastic / elasticsearch-js # 8.1' + description: Testing the elasticsearch-js 8.1 branch. + junit_results: "*-junit.xml" + parameters: + - string: + name: branch_specifier + default: refs/heads/8.1 + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + triggers: + - github + - timed: 'H */12 * * *' From 451a805ecd06762a20d3a963ed8664dc389eeed4 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 3 Feb 2022 10:52:42 +0100 Subject: [PATCH 136/647] Bumped v8.2.0 --- .ci/test-matrix.yml | 2 +- package.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index e94350277..94bfd779a 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - 8.1.0-SNAPSHOT + - 8.2.0-SNAPSHOT NODE_JS_VERSION: - 16 diff --git a/package.json b/package.json index d7f97e538..d7125c402 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.1.0-beta.1", - "versionCanary": "8.1.0-canary.2", + "version": "8.2.0", + "versionCanary": "8.2.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From a0c5c98a996709ba23e306dfd609aec6d38e27dc Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Fri, 11 Feb 2022 09:19:34 +0100 Subject: [PATCH 137/647] Upgrade transport (#1618) --- package.json | 2 +- src/client.ts | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index d7125c402..db79de583 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^8.1.0-beta.1", + "@elastic/transport": "^8.0.1", "tslib": "^2.3.0" }, "tap": { diff --git a/src/client.ts b/src/client.ts index 42e78dbc7..92650608a 100644 --- a/src/client.ts +++ b/src/client.ts @@ -251,7 +251,12 @@ export default class Client extends API { context: options.context, productCheck: 'Elasticsearch', maxResponseSize: options.maxResponseSize, - maxCompressedResponseSize: options.maxCompressedResponseSize + maxCompressedResponseSize: options.maxCompressedResponseSize, + vendoredHeaders: { + jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=8', + ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + accept: 'application/vnd.elasticsearch+json; compatible-with=8,text/plain' + } }) this.helpers = new Helpers({ From 759138c3751baa939f74b620274f2d46d60120fc Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Fri, 11 Feb 2022 10:23:07 +0100 Subject: [PATCH 138/647] Update docs for v8 (#1572) --- README.md | 75 +--- docs/advanced-config.asciidoc | 4 +- docs/basic-config.asciidoc | 28 +- docs/breaking-changes.asciidoc | 334 ------------------ docs/child.asciidoc | 4 +- docs/configuration.asciidoc | 1 - docs/connecting.asciidoc | 151 ++------ docs/examples/asStream.asciidoc | 29 +- docs/examples/bulk.asciidoc | 8 +- docs/examples/exists.asciidoc | 6 +- docs/examples/get.asciidoc | 6 +- docs/examples/ignore.asciidoc | 8 +- docs/examples/index.asciidoc | 2 - docs/examples/msearch.asciidoc | 10 +- docs/examples/proxy/api/autocomplete.js | 22 +- docs/examples/proxy/api/delete.js | 2 +- docs/examples/proxy/api/index.js | 5 +- docs/examples/proxy/api/search.js | 8 +- docs/examples/proxy/package.json | 2 +- .../proxy/utils/prepare-elasticsearch.js | 20 +- docs/examples/reindex.asciidoc | 40 +-- docs/examples/scroll.asciidoc | 26 +- docs/examples/search.asciidoc | 18 +- docs/examples/sql.query.asciidoc | 18 +- docs/examples/suggest.asciidoc | 24 +- docs/examples/transport.request.asciidoc | 8 +- docs/examples/typescript.asciidoc | 72 ---- docs/examples/update.asciidoc | 32 +- docs/examples/update_by_query.asciidoc | 28 +- docs/extend.asciidoc | 72 ---- docs/helpers.asciidoc | 35 +- docs/index.asciidoc | 1 - docs/installation.asciidoc | 8 +- docs/introduction.asciidoc | 93 +---- docs/observability.asciidoc | 76 ++-- docs/testing.asciidoc | 6 +- docs/typescript.asciidoc | 289 +++------------ 37 files changed, 311 insertions(+), 1260 deletions(-) delete mode 100644 docs/breaking-changes.asciidoc delete mode 100644 docs/examples/typescript.asciidoc delete mode 100644 docs/extend.asciidoc diff --git a/README.md b/README.md index 5d06668ac..66cd87671 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ of `^7.10.0`). | --------------- |------------------| ---------------------- | | `8.x` | `December 2019` | `7.11` (early 2021) | | `10.x` | `April 2021` | `7.12` (mid 2021) | +| `12.x` | `April 2022` | `8.2` (early 2022) | ### Compatibility @@ -53,7 +54,7 @@ Elasticsearch language clients are only backwards compatible with default distri | Elasticsearch Version | Client Version | | --------------------- |----------------| -| `main` | `main` | +| `8.x` | `8.x` | | `7.x` | `7.x` | | `6.x` | `6.x` | | `5.x` | `5.x` | @@ -74,11 +75,9 @@ We recommend that you write a lightweight proxy that uses this client instead, y - [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage) - [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html) - [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html) -- [Breaking changes coming from the old client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/breaking-changes.html) - [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication) - [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html) - [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html) -- [Extend the client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/extend.html) - [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html) - [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html) - [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html) @@ -86,48 +85,6 @@ We recommend that you write a lightweight proxy that uses this client instead, y ## Quick start -First of all, require the client and initialize it: -```js -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) -``` - -You can use both the callback-style API and the promise-style API, both behave the same way. -```js -// promise API -const result = await client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}) - -// callback API -client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}, (err, result) => { - if (err) console.log(err) -}) -``` -The returned value of **every** API call is formed as follows: -```ts -{ - body: object | boolean - statusCode: number - headers: object - warnings: [string] - meta: object -} -``` - -Let's see a complete example! ```js 'use strict' @@ -138,8 +95,7 @@ async function run () { // Let's start by indexing some data await client.index({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.' } @@ -147,8 +103,7 @@ async function run () { await client.index({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { + document: { character: 'Daenerys Targaryen', quote: 'I am the blood of the dragon.' } @@ -156,8 +111,7 @@ async function run () { await client.index({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { + document: { character: 'Tyrion Lannister', quote: 'A mind needs books like a sword needs a whetstone.' } @@ -168,17 +122,14 @@ async function run () { await client.indices.refresh({ index: 'game-of-thrones' }) // Let's search! - const { body } = await client.search({ + const result= await client.search({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { - query: { - match: { quote: 'winter' } - } + query: { + match: { quote: 'winter' } } }) - console.log(body.hits.hits) + console.log(result.hits.hits) } run().catch(console.log) @@ -211,13 +162,13 @@ const { Client: Client7 } = require('es7') const client6 = new Client6({ node: 'http://localhost:9200/' }) const client7 = new Client7({ node: 'http://localhost:9201/' }) -client6.info(console.log) -client7.info(console.log) +client6.info().then(console.log, console.log) +client7.info().then(console.log, console.log) ``` -Finally, if you want to install the client for the next version of Elasticsearch *(the one that lives in Elasticsearch’s master branch)*, you can use the following command: +Finally, if you want to install the client for the next version of Elasticsearch *(the one that lives in Elasticsearch’s main branch)*, you can use the following command: ```sh -npm install esmaster@github:elastic/elasticsearch-js +npm install esmain@github:elastic/elasticsearch-js ``` ## License diff --git a/docs/advanced-config.asciidoc b/docs/advanced-config.asciidoc index 1308b806a..34eb3d750 100644 --- a/docs/advanced-config.asciidoc +++ b/docs/advanced-config.asciidoc @@ -45,9 +45,9 @@ is performed here, this means that if you want to swap the default HTTP client [source,js] ---- -const { Client, Connection } = require('@elastic/elasticsearch') +const { Client, BaseConnection } = require('@elastic/elasticsearch') -class MyConnection extends Connection { +class MyConnection extends BaseConnection { request (params, callback) { // your code } diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index c9b4660dd..363326f20 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -32,7 +32,7 @@ Or it can be an object (or an array of objects) that represents the node: ---- node: { url: new URL('http://localhost:9200/'), - ssl: 'ssl options', + tls: 'tls options', agent: 'http agent options', id: 'custom node id', headers: { 'custom': 'headers' } @@ -118,8 +118,8 @@ _Default:_ `false` _Options:_ `'gzip'`, `false` + _Default:_ `false` -|`ssl` -|`http.SecureContextOptions` - ssl https://nodejs.org/api/tls.html[configuraton]. + +|`tls` +|`http.SecureContextOptions` - tls https://nodejs.org/api/tls.html[configuraton]. + _Default:_ `null` |`proxy` @@ -267,24 +267,4 @@ _Default:_ `null` |`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH + _Default:_ `null` -|=== - -[discrete] -==== Performances considerations - -By default, the client will protection you against prototype poisoning attacks. -Read https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08[this article] to learn more. -If needed you can disable prototype poisoning protection entirely or one of the two checks. -Read the `secure-json-parse` https://github.com/fastify/secure-json-parse[documentation] to learn more. - -While it's good to be safe, you should know that security always comes with a cost. -With big enough payloads, this security check could causea drop in the overall performances, -which might be a problem for your application. -If you know you can trust the data stored in Elasticsearch, you can safely disable this check. - -[source,js] ----- -const client = new Client({ - disablePrototypePoisoningProtection: true -}) ----- +|=== \ No newline at end of file diff --git a/docs/breaking-changes.asciidoc b/docs/breaking-changes.asciidoc deleted file mode 100644 index 9942eb9a8..000000000 --- a/docs/breaking-changes.asciidoc +++ /dev/null @@ -1,334 +0,0 @@ -[[breaking-changes]] -=== Breaking changes coming from the old client - -If you were already using the previous version of this client – the one you used -to install with `npm install elasticsearch` – you will encounter some breaking -changes. - - -[discrete] -==== Don’t panic! - -Every breaking change was carefully weighed, and each is justified. Furthermore, -the new codebase has been rewritten with modern JavaScript and has been -carefully designed to be easy to maintain. - - -[discrete] -==== Breaking changes - -* Minimum supported version of Node.js is `v8`. - -* Everything has been rewritten using ES6 classes to help users extend the -defaults more easily. - -* There is no longer an integrated logger. The client now is an event emitter -that emits the following events: `request`, `response`, and `error`. - -* The code is no longer shipped with all the versions of the API, but only that -of the package’s major version. This means that if you are using {es} `v6`, you -are required to install `@elastic/elasticsearch@6`, and so on. - -* The internals are completely different, so if you used to tweak them a lot, -you will need to refactor your code. The public API should be almost the same. - -* There is no longer browser support, for that will be distributed via another -module: `@elastic/elasticsearch-browser`. This module is intended for Node.js -only. - -* The returned value of an API call will no longer be the `body`, `statusCode`, -and `headers` for callbacks, and only the `body` for promises. The new returned -value will be a unique object containing the `body`, `statusCode`, `headers`, -`warnings`, and `meta`, for both callback and promises. - - -[source,js] ----- -// before -const body = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, body, statusCode, headers) => { - if (err) console.log(err) -}) - -// after -const { body, statusCode, headers, warnings } = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, { body, statusCode, headers, warnings }) => { - if (err) console.log(err) -}) ----- - - -* Errors: there is no longer a custom error class for every HTTP status code -(such as `BadRequest` or `NotFound`). There is instead a single `ResponseError`. -Every error class has been renamed, and now each is suffixed with `Error` at the -end. - -* Removed errors: `RequestTypeError`, `Generic`, and all the status code -specific errors (such as `BadRequest` or `NotFound`). - -* Added errors: `ConfigurationError` (in case of bad configurations) and -`ResponseError` that contains all the data you may need to handle the specific -error, such as `statusCode`, `headers`, `body`, and `message`. - - -* Renamed errors: - -** `RequestTimeout` (408 statusCode) => `TimeoutError` -** `ConnectionFault` => `ConnectionError` -** `NoConnections` => `NoLivingConnectionsError` -** `Serialization` => `SerializationError` -** `Serialization` => `DeserializationError` - -* You must specify the port number in the configuration. In the previous -version, you can specify the host and port in a variety of ways. With the new -client, there is only one way to do it, via the `node` parameter. - -* Certificates are verified by default, if you want to disable certificates verification, you should set the `rejectUnauthorized` option to `false` inside the `ssl` configuration: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - ssl: { rejectUnauthorized: false } -}) ----- - -* The `plugins` option has been removed. If you want to extend the client now, -you should use the `client.extend` API. - -[source,js] ----- -// before -const { Client } = require('elasticsearch') -const client = new Client({ plugins: [...] }) - -// after -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ ... }) -client.extend(...) ----- - -* There is a clear distinction between the API related parameters and the client -related configurations. The parameters `ignore`, `headers`, `requestTimeout` and -`maxRetries` are no longer part of the API object and you need to specify them -in a second option object. - -[source,js] ----- -// before -const body = await client.search({ - index: 'my-index', - body: { foo: 'bar' }, - ignore: [404] -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' }, - ignore: [404] -}, (err, body, statusCode, headers) => { - if (err) console.log(err) -}) - -// after -const { body, statusCode, headers, warnings } = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404] -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404] -}, (err, { body, statusCode, headers, warnings }) => { - if (err) console.log(err) -}) ----- - -* The `transport.request` method no longer accepts the `query` key. Use the -`querystring` key instead (which can be a string or an object). You also -need to send a bulk-like request instead of the `body` key, use the `bulkBody` -key. In this method, the client specific parameters should be passed as a second -object. - -[source,js] ----- -// before -const body = await client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - query: { bar: 'baz' } - ignore: [404] -}) - -client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - query: { bar: 'baz' } - ignore: [404] -}, (err, body, statusCode, headers) => { - if (err) console.log(err) -}) - -// after -const { body, statusCode, headers, warnings } = await client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - querystring: { bar: 'baz' } -}, { - ignore: [404] -}) - -client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - querystring: { bar: 'baz' } -}, { - ignore: [404] -}, (err, { body, statusCode, headers, warnings }) => { - if (err) console.log(err) -}) ----- - -[discrete] -==== Talk is cheap. Show me the code. - -You can find a code snippet with the old client below followed by the same code -logic but with the new client. - -[source,js] ----- -const { Client, errors } = require('elasticsearch') -const client = new Client({ - host: 'http://localhost:9200/', - plugins: [utility] -}) - -async function run () { - try { - const body = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { quote: 'winter' } - } - } - ignore: [404] - }) - console.log(body) - } catch (err) { - if (err instanceof errors.BadRequest) { - console.log('Bad request') - } else { - console.log(err) - } - } -} - -function utility (Client, config, components) { - const ca = components.clientAction.factory - Client.prototype.utility = components.clientAction.namespaceFactory() - const utility = Client.prototype.utility.prototype - - utility.index = ca({ - params: { - refresh: { - type: 'enum', - options: [ - 'true', - 'false', - 'wait_for', - '' - ] - }, - }, - urls: [ - { - fmt: '/<%=index%>/_doc', - req: { - index: { - type: 'string', - required: true - } - } - } - ], - needBody: true, - method: 'POST' - }) -}) ----- - -And now with the new client. - -[source,js] ----- -const { Client, errors } = require('@elastic/elasticsearch') -// NOTE: `host` has been renamed to `node`, -// and `plugins` is no longer supported -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - try { - // NOTE: we are using the destructuring assignment - const { body } = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { quote: 'winter' } - } - } - // NOTE: `ignore` now is in a separated object - }, { - ignore: [404] - }) - console.log(body) - } catch (err) { - // NOTE: we are checking the `statusCode` property - if (err.statusCode === 400) { - console.log('Bad request') - } else { - console.log(err) - } - } -} - -// NOTE: we can still extend the client, but with a different API. -// This new API is a little bit more verbose, since you must write -// your own validations, but it's way more flexible. -client.extend('utility.index', ({ makeRequest, ConfigurationError }) => { - return function utilityIndex (params, options) { - const { body, index, ...querystring } = params - if (body == null) throw new ConfigurationError('Missing body') - if (index == null) throw new ConfigurationError('Missing index') - const requestParams = { - method: 'POST', - path: `/${index}/_doc`, - body: body, - querystring - } - return makeRequest(requestParams, options) - } -}) ----- diff --git a/docs/child.asciidoc b/docs/child.asciidoc index 9c06c9f25..5e1abdee7 100644 --- a/docs/child.asciidoc +++ b/docs/child.asciidoc @@ -28,6 +28,6 @@ const child = client.child({ requestTimeout: 1000 }) -client.info(console.log) -child.info(console.log) +client.info().then(console.log, console.log) +child.info().then(console.log, console.log) ---- \ No newline at end of file diff --git a/docs/configuration.asciidoc b/docs/configuration.asciidoc index 2cd2114a8..e5c4f32f8 100644 --- a/docs/configuration.asciidoc +++ b/docs/configuration.asciidoc @@ -8,5 +8,4 @@ section, you can see the possible options that you can use to configure it. * <> * <> * <> -* <> * <> diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 03bc4532d..57510ae12 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -32,7 +32,7 @@ the `auth` option. NOTE: When connecting to Elastic Cloud, the client will automatically enable both request and response compression by default, since it yields significant -throughput improvements. Moreover, the client will also set the ssl option +throughput improvements. Moreover, the client will also set the tls option `secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still override this option by configuring them. @@ -151,13 +151,13 @@ const client = new Client({ [discrete] -[[auth-ssl]] -==== SSL configuration +[[auth-tls]] +==== TLS configuration Without any additional configuration you can specify `https://` node urls, and the certificates used to sign these requests will be verified. To turn off -certificate verification, you must specify an `ssl` object in the top level -config and set `rejectUnauthorized: false`. The default `ssl` values are the +certificate verification, you must specify an `tls` object in the top level +config and set `rejectUnauthorized: false`. The default `tls` values are the same that Node.js's https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[`tls.connect()`] uses. @@ -170,7 +170,7 @@ const client = new Client({ username: 'elastic', password: 'changeme' }, - ssl: { + tls: { ca: fs.readFileSync('./cacert.pem'), rejectUnauthorized: false } @@ -193,7 +193,7 @@ const client = new Client({ auth: { ... }, // the fingerprint (SHA256) of the CA certificate that is used to sign the certificate that the Elasticsearch node presents for TLS. caFingerprint: '20:0D:CA:FA:76:...', - ssl: { + tls: { // might be required if it's a self-signed certificate rejectUnauthorized: false } @@ -214,31 +214,32 @@ and every method exposes the same signature. const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) -// promise API const result = await client.search({ index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } + query: { + match: { hello: 'world' } } }) +---- + +The returned value of every API call is the response body from {es}. +If you need to access additonal metadata, such as the status code or headers, +you must specify `meta: true` in the request options: + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ node: 'http://localhost:9200/' }) -// callback API -client.search({ +const result = await client.search({ index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } + query: { + match: { hello: 'world' } } -}, (err, result) => { - if (err) console.log(err) -}) +}, { meta: true }) ---- -The returned value of every API call is designed as follows: - +In this case, the result will be: [source,ts] ---- { @@ -252,44 +253,10 @@ The returned value of every API call is designed as follows: NOTE: The body is a boolean value when you use `HEAD` APIs. -The above value is returned even if there is an error during the execution of -the request, this means that you can safely use the -https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Destructuring_assignment[destructuring assignment]. - -The `meta` key contains all the information about the request, such as attempt, -options, and the connection that has been used. - -[source,js] ----- -// promise API -const { body } = await client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}) - -// callback API -client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}, (err, { body }) => { - if (err) console.log(err) -}) ----- - - [discrete] ==== Aborting a request -If needed, you can abort a running request by calling the `request.abort()` -method returned by the API. +If needed, you can abort a running request by using the `AbortController` standard. CAUTION: If you abort a request, the request will fail with a `RequestAbortedError`. @@ -297,51 +264,21 @@ CAUTION: If you abort a request, the request will fail with a [source,js] ---- -const request = client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}, { - ignore: [404], - maxRetries: 3 -}, (err, result) => { - if (err) { - console.log(err) // RequestAbortedError - } else { - console.log(result) - } -}) - -request.abort() ----- +const AbortController = require('node-abort-controller') +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ node: 'http://localhost:9200/' }) -The same behavior is valid for the promise style API as well. +const abortController = new AbortController() +setImmediate(() => abortController.abort()) -[source,js] ----- -const request = client.search({ +const result = await client.search({ index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } + query: { + match: { hello: 'world' } } -}, { - ignore: [404], - maxRetries: 3 -}) - -request - .then(result => console.log(result)) - .catch(err => console.log(err)) // RequestAbortedError - -request.abort() +}, { signal: abortController.signal }) ---- - [discrete] ==== Request specific options @@ -349,7 +286,6 @@ If needed you can pass request specific options in a second object: [source,js] ---- -// promise API const result = await client.search({ index: 'my-index', body: { @@ -361,21 +297,6 @@ const result = await client.search({ ignore: [404], maxRetries: 3 }) - -// callback API -client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}, { - ignore: [404], - maxRetries: 3 -}, (err, { body }) => { - if (err) console.log(err) -}) ---- @@ -427,6 +348,10 @@ _Default:_ `null` |`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH + _Default:_ `null` +|`signal` +|`AbortSignal` - The AbortSignal instance to allow request abortion. + +_Default:_ `null` + |=== [discrete] diff --git a/docs/examples/asStream.asciidoc b/docs/examples/asStream.asciidoc index a80fd549b..dd7f9f21a 100644 --- a/docs/examples/asStream.asciidoc +++ b/docs/examples/asStream.asciidoc @@ -12,9 +12,9 @@ const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) async function run () { - const { body: bulkResponse } = await client.bulk({ + const bulkResponse = await client.bulk({ refresh: true, - body: [ + operations: [ // operation to perform { index: { _index: 'game-of-thrones' } }, // the document to index @@ -43,13 +43,11 @@ async function run () { } // Let's search! - const { body } = await client.search({ + const result = await client.search({ index: 'game-of-thrones', - body: { - query: { - match: { - quote: 'winter' - } + query: { + match: { + quote: 'winter' } } }, { @@ -59,17 +57,17 @@ async function run () { // stream async iteration, available in Node.js ≥ 10 let payload = '' body.setEncoding('utf8') - for await (const chunk of body) { + for await (const chunk of result) { payload += chunk } console.log(JSON.parse(payload)) // classic stream callback style let payload = '' - body.setEncoding('utf8') - body.on('data', chunk => { payload += chunk }) - body.on('error', console.log) - body.on('end', () => { + result.setEncoding('utf8') + result.on('data', chunk => { payload += chunk }) + result.on('error', console.log) + result.on('end', () => { console.log(JSON.parse(payload)) }) } @@ -91,9 +89,10 @@ const fastify = require('fastify')() fastify.post('/search/:index', async (req, reply) => { const { body, statusCode, headers } = await client.search({ index: req.params.index, - body: req.body + ...req.body }, { - asStream: true + asStream: true, + meta: true }) reply.code(statusCode).headers(headers) diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc index 2f05b4cf1..e7d9261be 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/examples/bulk.asciidoc @@ -19,7 +19,7 @@ const client = new Client({ async function run () { await client.indices.create({ index: 'tweets', - body: { + operations: { mappings: { properties: { id: { type: 'integer' }, @@ -58,9 +58,9 @@ async function run () { date: new Date() }] - const body = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc]) + const operations = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc]) - const { body: bulkResponse } = await client.bulk({ refresh: true, body }) + const bulkResponse = await client.bulk({ refresh: true, operations }) if (bulkResponse.errors) { const erroredDocuments = [] @@ -84,7 +84,7 @@ async function run () { console.log(erroredDocuments) } - const { body: count } = await client.count({ index: 'tweets' }) + const count = await client.count({ index: 'tweets' }) console.log(count) } diff --git a/docs/examples/exists.asciidoc b/docs/examples/exists.asciidoc index 851f8c471..368f4ae2b 100644 --- a/docs/examples/exists.asciidoc +++ b/docs/examples/exists.asciidoc @@ -16,18 +16,18 @@ async function run () { await client.index({ index: 'game-of-thrones', id: '1', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.' } }) - const { body } = await client.exists({ + const exists = await client.exists({ index: 'game-of-thrones', id: 1 }) - console.log(body) // true + console.log(exists) // true } run().catch(console.log) diff --git a/docs/examples/get.asciidoc b/docs/examples/get.asciidoc index 18caf1f4d..9302c7607 100644 --- a/docs/examples/get.asciidoc +++ b/docs/examples/get.asciidoc @@ -16,18 +16,18 @@ async function run () { await client.index({ index: 'game-of-thrones', id: '1', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.' } }) - const { body } = await client.get({ + const document = await client.get({ index: 'game-of-thrones', id: '1' }) - console.log(body) + console.log(document) } run().catch(console.log) diff --git a/docs/examples/ignore.asciidoc b/docs/examples/ignore.asciidoc index a46f3e708..40b570726 100644 --- a/docs/examples/ignore.asciidoc +++ b/docs/examples/ignore.asciidoc @@ -11,9 +11,9 @@ const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) async function run () { - const { body: bulkResponse } = await client.bulk({ + const bulkResponse = await client.bulk({ refresh: true, - body: [ + operations: [ // operation to perform { index: { _index: 'game-of-thrones' } }, // the document to index @@ -42,7 +42,7 @@ async function run () { } // Let's search! - const { body } = await client.search({ + const result = await client.search({ index: 'game-of-thrones', body: { query: { @@ -55,7 +55,7 @@ async function run () { ignore: [404] }) - console.log(body) // ResponseError + console.log(result) // ResponseError } run().catch(console.log) diff --git a/docs/examples/index.asciidoc b/docs/examples/index.asciidoc index 7aaf38f56..e786675ec 100644 --- a/docs/examples/index.asciidoc +++ b/docs/examples/index.asciidoc @@ -17,7 +17,6 @@ Following you can find some examples on how to use the client. * Executing a <> request; * I need <>; * How to use the <> method; -* How to use <>; include::asStream.asciidoc[] include::bulk.asciidoc[] @@ -29,7 +28,6 @@ include::scroll.asciidoc[] include::search.asciidoc[] include::suggest.asciidoc[] include::transport.request.asciidoc[] -include::typescript.asciidoc[] include::sql.query.asciidoc[] include::update.asciidoc[] include::update_by_query.asciidoc[] diff --git a/docs/examples/msearch.asciidoc b/docs/examples/msearch.asciidoc index 3773318f4..445bf866c 100644 --- a/docs/examples/msearch.asciidoc +++ b/docs/examples/msearch.asciidoc @@ -12,9 +12,9 @@ const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) async function run () { - const { body: bulkResponse } = await client.bulk({ + const bulkResponse = await client.bulk({ refresh: true, - body: [ + operations: [ { index: { _index: 'game-of-thrones' } }, { character: 'Ned Stark', @@ -40,8 +40,8 @@ async function run () { process.exit(1) } - const { body } = await client.msearch({ - body: [ + const result = await client.msearch({ + searches: [ { index: 'game-of-thrones' }, { query: { match: { character: 'Daenerys' } } }, @@ -50,7 +50,7 @@ async function run () { ] }) - console.log(body.responses) + console.log(result.responses) } run().catch(console.log) diff --git a/docs/examples/proxy/api/autocomplete.js b/docs/examples/proxy/api/autocomplete.js index e103a6eaf..fb18298cf 100644 --- a/docs/examples/proxy/api/autocomplete.js +++ b/docs/examples/proxy/api/autocomplete.js @@ -70,17 +70,15 @@ module.exports = async (req, res) => { // expose you to the risk that a malicious user // could overload your cluster by crafting // expensive queries. - body: { - _source: ['id', 'url', 'name'], // the fields you want to show in the autocompletion - size: 0, - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters-completion.html - suggest: { - suggestions: { - prefix: req.query.q, - completion: { - field: 'suggest', - size: 5 - } + _source: ['id', 'url', 'name'], // the fields you want to show in the autocompletion + size: 0, + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters-completion.html + suggest: { + suggestions: { + prefix: req.query.q, + completion: { + field: 'suggest', + size: 5 } } } @@ -93,7 +91,7 @@ module.exports = async (req, res) => { // It might be useful to configure http control caching headers // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control // res.setHeader('stale-while-revalidate', '30') - res.json(response.body) + res.json(response) } catch (err) { res.status(err.statusCode || 500) res.json({ diff --git a/docs/examples/proxy/api/delete.js b/docs/examples/proxy/api/delete.js index 75d4c9a00..b76108428 100644 --- a/docs/examples/proxy/api/delete.js +++ b/docs/examples/proxy/api/delete.js @@ -62,7 +62,7 @@ module.exports = async (req, res) => { } }) - res.json(response.body) + res.json(response) } catch (err) { res.status(err.statusCode || 500) res.json({ diff --git a/docs/examples/proxy/api/index.js b/docs/examples/proxy/api/index.js index a75affe5e..901139713 100644 --- a/docs/examples/proxy/api/index.js +++ b/docs/examples/proxy/api/index.js @@ -56,11 +56,12 @@ module.exports = async (req, res) => { const response = await client.index({ index: INDEX, id: req.query.id, - body: req.body + document: req.body }, { headers: { Authorization: `ApiKey ${token}` - } + }, + meta: true }) res.status(response.statusCode) diff --git a/docs/examples/proxy/api/search.js b/docs/examples/proxy/api/search.js index da8896f58..8659e08f4 100644 --- a/docs/examples/proxy/api/search.js +++ b/docs/examples/proxy/api/search.js @@ -60,10 +60,8 @@ module.exports = async (req, res) => { // expose you to the risk that a malicious user // could overload your cluster by crafting // expensive queries. - body: { - query: { - match: { field: req.body.text } - } + query: { + match: { field: req.body.text } } }, { headers: { @@ -74,7 +72,7 @@ module.exports = async (req, res) => { // It might be useful to configure http control caching headers // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control // res.setHeader('stale-while-revalidate', '30') - res.json(response.body) + res.json(response) } catch (err) { res.status(err.statusCode || 500) res.json({ diff --git a/docs/examples/proxy/package.json b/docs/examples/proxy/package.json index 34af9b020..bfe19ae84 100644 --- a/docs/examples/proxy/package.json +++ b/docs/examples/proxy/package.json @@ -11,7 +11,7 @@ "author": "Tomas Della Vedova", "license": "Apache-2.0", "dependencies": { - "@elastic/elasticsearch": "^7.10.0" + "@elastic/elasticsearch": "^8.0.0" }, "devDependencies": { "standard": "^16.0.3" diff --git a/docs/examples/proxy/utils/prepare-elasticsearch.js b/docs/examples/proxy/utils/prepare-elasticsearch.js index 257837f51..bf833f0c2 100644 --- a/docs/examples/proxy/utils/prepare-elasticsearch.js +++ b/docs/examples/proxy/utils/prepare-elasticsearch.js @@ -43,21 +43,19 @@ async function generateApiKeys (opts) { } }) - const { body } = await client.security.createApiKey({ - body: { - name: 'elasticsearch-proxy', - role_descriptors: { - 'elasticsearch-proxy-users': { - index: [{ - names: indexNames, - privileges - }] - } + const result = await client.security.createApiKey({ + name: 'elasticsearch-proxy', + role_descriptors: { + 'elasticsearch-proxy-users': { + index: [{ + names: indexNames, + privileges + }] } } }) - return Buffer.from(`${body.id}:${body.api_key}`).toString('base64') + return Buffer.from(`${result.id}:${result.api_key}`).toString('base64') } generateApiKeys() diff --git a/docs/examples/reindex.asciidoc b/docs/examples/reindex.asciidoc index aa722707f..984e21c99 100644 --- a/docs/examples/reindex.asciidoc +++ b/docs/examples/reindex.asciidoc @@ -20,7 +20,7 @@ const client = new Client({ node: 'http://localhost:9200/' }) async function run () { await client.index({ index: 'game-of-thrones', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.', house: 'stark' @@ -29,7 +29,7 @@ async function run () { await client.index({ index: 'game-of-thrones', - body: { + document: { character: 'Arya Stark', quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.', house: 'stark' @@ -39,7 +39,7 @@ async function run () { await client.index({ index: 'game-of-thrones', refresh: true, - body: { + document: { character: 'Tyrion Lannister', quote: 'A Lannister always pays his debts.', house: 'lannister' @@ -47,33 +47,29 @@ async function run () { }) await client.reindex({ - waitForCompletion: true, + wait_for_completion: true, refresh: true, - body: { - source: { - index: 'game-of-thrones', - query: { - match: { character: 'stark' } - } - }, - dest: { - index: 'stark-index' - }, - script: { - lang: 'painless', - source: 'ctx._source.remove("house")' + source: { + index: 'game-of-thrones', + query: { + match: { character: 'stark' } } + }, + dest: { + index: 'stark-index' + }, + script: { + lang: 'painless', + source: 'ctx._source.remove("house")' } }) - const { body } = await client.search({ + const result = await client.search({ index: 'stark-index', - body: { - query: { match_all: {} } - } + query: { match_all: {} } }) - console.log(body.hits.hits) + console.log(result.hits.hits) } run().catch(console.log) diff --git a/docs/examples/scroll.asciidoc b/docs/examples/scroll.asciidoc index c46493e73..90e6e6524 100644 --- a/docs/examples/scroll.asciidoc +++ b/docs/examples/scroll.asciidoc @@ -33,12 +33,12 @@ async function run () { const responseQueue = [] // Let's index some data! - const { body: bulkResponse } = await client.bulk({ + const bulkResponse = await client.bulk({ // here we are forcing an index refresh, // otherwise we will not get any result // in the consequent search refresh: true, - body: [ + operations: [ // operation to perform { index: { _index: 'game-of-thrones' } }, // the document to index @@ -76,17 +76,15 @@ async function run () { size: 1, // filter the source to only include the quote field _source: ['quote'], - body: { - query: { - match_all: {} - } + query: { + match_all: {} } }) responseQueue.push(response) while (responseQueue.length) { - const { body } = responseQueue.shift() + const body = responseQueue.shift() // collect the titles from this response body.hits.hits.forEach(function (hit) { @@ -127,7 +125,7 @@ async function * scrollSearch (params) { let response = await client.search(params) while (true) { - const sourceHits = response.body.hits.hits + const sourceHits = response.hits.hits if (sourceHits.length === 0) { break @@ -137,12 +135,12 @@ async function * scrollSearch (params) { yield hit } - if (!response.body._scroll_id) { + if (!response._scroll_id) { break } response = await client.scroll({ - scrollId: response.body._scroll_id, + scrollId: response._scroll_id, scroll: params.scroll }) } @@ -151,7 +149,7 @@ async function * scrollSearch (params) { async function run () { await client.bulk({ refresh: true, - body: [ + operations: [ { index: { _index: 'game-of-thrones' } }, { character: 'Ned Stark', @@ -177,10 +175,8 @@ async function run () { scroll: '30s', size: 1, _source: ['quote'], - body: { - query: { - match_all: {} - } + query: { + match_all: {} } } diff --git a/docs/examples/search.asciidoc b/docs/examples/search.asciidoc index fb4fe4d2b..2cf5b3c50 100644 --- a/docs/examples/search.asciidoc +++ b/docs/examples/search.asciidoc @@ -18,7 +18,7 @@ async function run () { // Let's start by indexing some data await client.index({ index: 'game-of-thrones', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.' } @@ -26,7 +26,7 @@ async function run () { await client.index({ index: 'game-of-thrones', - body: { + document: { character: 'Daenerys Targaryen', quote: 'I am the blood of the dragon.' } @@ -38,25 +38,23 @@ async function run () { // otherwise we will not get any result // in the consequent search refresh: true, - body: { + document: { character: 'Tyrion Lannister', quote: 'A mind needs books like a sword needs a whetstone.' } }) // Let's search! - const { body } = await client.search({ + const result = await client.search({ index: 'game-of-thrones', - body: { - query: { - match: { - quote: 'winter' - } + query: { + match: { + quote: 'winter' } } }) - console.log(body.hits.hits) + console.log(result.hits.hits) } run().catch(console.log) diff --git a/docs/examples/sql.query.asciidoc b/docs/examples/sql.query.asciidoc index da5ff8b12..00505d2fd 100644 --- a/docs/examples/sql.query.asciidoc +++ b/docs/examples/sql.query.asciidoc @@ -22,7 +22,7 @@ const client = new Client({ node: 'http://localhost:9200/' }) async function run () { await client.index({ index: 'game-of-thrones', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.', house: 'stark' @@ -31,7 +31,7 @@ async function run () { await client.index({ index: 'game-of-thrones', - body: { + document: { character: 'Arya Stark', quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.', house: 'stark' @@ -41,25 +41,23 @@ async function run () { await client.index({ index: 'game-of-thrones', refresh: true, - body: { + document: { character: 'Tyrion Lannister', quote: 'A Lannister always pays his debts.', house: 'lannister' } }) - const { body } = await client.sql.query({ - body: { - query: "SELECT * FROM \"game-of-thrones\" WHERE house='stark'" - } + const result = await client.sql.query({ + query: "SELECT * FROM \"game-of-thrones\" WHERE house='stark'" }) - console.log(body) + console.log(result) - const data = body.rows.map(row => { + const data = result.rows.map(row => { const obj = {} for (let i = 0; i < row.length; i++) { - obj[body.columns[i].name] = row[i] + obj[result.columns[i].name] = row[i] } return obj }) diff --git a/docs/examples/suggest.asciidoc b/docs/examples/suggest.asciidoc index b03ece4e1..d4448a1a4 100644 --- a/docs/examples/suggest.asciidoc +++ b/docs/examples/suggest.asciidoc @@ -15,9 +15,9 @@ const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) async function run () { - const { body: bulkResponse } = await client.bulk({ + const bulkResponse = await client.bulk({ refresh: true, - body: [ + operations: [ { index: { _index: 'game-of-thrones' } }, { character: 'Ned Stark', @@ -43,22 +43,20 @@ async function run () { process.exit(1) } - const { body } = await client.search({ + const result = await client.search({ index: 'game-of-thrones', - body: { - query: { - match: { quote: 'witner' } - }, - suggest: { - gotsuggest: { - text: 'witner', - term: { field: 'quote' } - } + query: { + match: { quote: 'winter' } + }, + suggest: { + gotsuggest: { + text: 'winter', + term: { field: 'quote' } } } }) - console.log(body) + console.log(result) } run().catch(console.log) diff --git a/docs/examples/transport.request.asciidoc b/docs/examples/transport.request.asciidoc index d74c6c68d..86482046f 100644 --- a/docs/examples/transport.request.asciidoc +++ b/docs/examples/transport.request.asciidoc @@ -23,9 +23,9 @@ const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) async function run () { - const { body: bulkResponse } = await client.bulk({ + const bulkResponse = await client.bulk({ refresh: true, - body: [ + operations: [ { index: { _index: 'game-of-thrones' } }, { character: 'Ned Stark', @@ -51,7 +51,7 @@ async function run () { process.exit(1) } - const { body } = await client.transport.request({ + const response = await client.transport.request({ method: 'POST', path: '/game-of-thrones/_search', body: { @@ -64,7 +64,7 @@ async function run () { querystring: {} }) - console.log(body) + console.log(response) } run().catch(console.log) diff --git a/docs/examples/typescript.asciidoc b/docs/examples/typescript.asciidoc deleted file mode 100644 index 2d39ed2ac..000000000 --- a/docs/examples/typescript.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[typescript_examples]] -=== Typescript - -The client offers a first-class support for TypeScript, since it ships the type -definitions for every exposed API. - -NOTE: If you are using TypeScript you will be required to use _snake_case_ style -to define the API parameters instead of _camelCase_. - -[source,ts] ----- -'use strict' - -import { Client, ApiResponse, RequestParams } from '@elastic/elasticsearch' -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run (): void { - // Let's start by indexing some data - const doc1: RequestParams.Index = { - index: 'game-of-thrones', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - } - await client.index(doc1) - - const doc2: RequestParams.Index = { - index: 'game-of-thrones', - body: { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - } - } - await client.index(doc2) - - const doc3: RequestParams.Index = { - index: 'game-of-thrones', - // here we are forcing an index refresh, - // otherwise we will not get any result - // in the consequent search - refresh: true, - body: { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - } - await client.index(doc3) - - // Let's search! - const params: RequestParams.Search = { - index: 'game-of-thrones', - body: { - query: { - match: { - quote: 'winter' - } - } - } - } - client - .search(params) - .then((result: ApiResponse) => { - console.log(result.body.hits.hits) - }) - .catch((err: Error) => { - console.log(err) - }) -} - -run() ----- \ No newline at end of file diff --git a/docs/examples/update.asciidoc b/docs/examples/update.asciidoc index 342bf2348..784a7d6d8 100644 --- a/docs/examples/update.asciidoc +++ b/docs/examples/update.asciidoc @@ -16,7 +16,7 @@ async function run () { await client.index({ index: 'game-of-thrones', id: '1', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.', times: 0 @@ -26,23 +26,21 @@ async function run () { await client.update({ index: 'game-of-thrones', id: '1', - body: { - script: { - lang: 'painless', - source: 'ctx._source.times++' - // you can also use parameters - // source: 'ctx._source.times += params.count', - // params: { count: 1 } - } + script: { + lang: 'painless', + source: 'ctx._source.times++' + // you can also use parameters + // source: 'ctx._source.times += params.count', + // params: { count: 1 } } }) - const { body } = await client.get({ + const document = await client.get({ index: 'game-of-thrones', id: '1' }) - console.log(body) + console.log(document) } run().catch(console.log) @@ -62,7 +60,7 @@ async function run () { await client.index({ index: 'game-of-thrones', id: '1', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.', isAlive: true @@ -72,19 +70,17 @@ async function run () { await client.update({ index: 'game-of-thrones', id: '1', - body: { - doc: { - isAlive: false - } + doc: { + isAlive: false } }) - const { body } = await client.get({ + const document = await client.get({ index: 'game-of-thrones', id: '1' }) - console.log(body) + console.log(document) } run().catch(console.log) diff --git a/docs/examples/update_by_query.asciidoc b/docs/examples/update_by_query.asciidoc index 85e4f3ff9..fdb198aec 100644 --- a/docs/examples/update_by_query.asciidoc +++ b/docs/examples/update_by_query.asciidoc @@ -15,7 +15,7 @@ const client = new Client({ node: 'http://localhost:9200/' }) async function run () { await client.index({ index: 'game-of-thrones', - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.' } @@ -24,7 +24,7 @@ async function run () { await client.index({ index: 'game-of-thrones', refresh: true, - body: { + document: { character: 'Arya Stark', quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.' } @@ -33,27 +33,23 @@ async function run () { await client.updateByQuery({ index: 'game-of-thrones', refresh: true, - body: { - script: { - lang: 'painless', - source: 'ctx._source["house"] = "stark"' - }, - query: { - match: { - character: 'stark' - } + script: { + lang: 'painless', + source: 'ctx._source["house"] = "stark"' + }, + query: { + match: { + character: 'stark' } } }) - const { body } = await client.search({ + const result = await client.search({ index: 'game-of-thrones', - body: { - query: { match_all: {} } - } + query: { match_all: {} } }) - console.log(body.hits.hits) + console.log(result.hits.hits) } run().catch(console.log) diff --git a/docs/extend.asciidoc b/docs/extend.asciidoc deleted file mode 100644 index 069cacd86..000000000 --- a/docs/extend.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[extend]] -=== Extend the client - -Sometimes you need to reuse the same logic, or you want to build a custom API to -allow you simplify your code. The easiest way to achieve that is by extending -the client. - -NOTE: If you want to override existing methods, you should specify the -`{ force: true }` option. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -client.extend('supersearch', ({ makeRequest, ConfigurationError }) => { - return function supersearch (params, options) { - const { - body, - index, - method, - ...querystring - } = params - - // params validation - if (body == null) { - throw new ConfigurationError('Missing required parameter: body') - } - - // build request object - const request = { - method: method || 'POST', - path: `/${encodeURIComponent(index)}/_search_`, - body, - querystring - } - - // build request options object - const requestOptions = { - ignore: options.ignore || null, - requestTimeout: options.requestTimeout || null, - maxRetries: options.maxRetries || null, - asStream: options.asStream || false, - headers: options.headers || null - } - - return makeRequest(request, requestOptions) - } -}) - -client.extend('utility.index', ({ makeRequest }) => { - return function _index (params, options) { - // your code - } -}) - -client.extend('utility.delete', ({ makeRequest }) => { - return function _delete (params, options) { - // your code - } -}) - -client.extend('indices.delete', { force: true }, ({ makeRequest }) => { - return function _delete (params, options) { - // your code - } -}) - -client.supersearch(...) -client.utility.index(...) -client.utility.delete(...) ----- \ No newline at end of file diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index 71dd9de15..f83d29144 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -341,23 +341,12 @@ const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) const m = client.helpers.msearch() -// promise style API m.search( { index: 'stackoverflow' }, { query: { match: { title: 'javascript' } } } ) .then(result => console.log(result.body)) // or result.documents .catch(err => console.error(err)) - -// callback style API -m.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'ruby' } } }, - (err, result) => { - if (err) console.error(err) - console.log(result.body)) // or result.documents - } -) ---- To create a new instance of the multi search (msearch) helper, you should access @@ -474,11 +463,9 @@ the query string. ---- const documents = await client.helpers.search({ index: 'stackoverflow', - body: { - query: { - match: { - title: 'javascript' - } + query: { + match: { + title: 'javascript' } } }) @@ -505,11 +492,9 @@ the `429` error and uses the `maxRetries` option of the client. ---- const scrollSearch = client.helpers.scrollSearch({ index: 'stackoverflow', - body: { - query: { - match: { - title: 'javascript' - } + query: { + match: { + title: 'javascript' } } }) @@ -564,11 +549,9 @@ automatically adds `filter_path=hits.hits._source` to the query string. ---- const scrollSearch = client.helpers.scrollDocuments({ index: 'stackoverflow', - body: { - query: { - match: { - title: 'javascript' - } + query: { + match: { + title: 'javascript' } } }) diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 959296357..fd8ab0484 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -10,7 +10,6 @@ include::configuration.asciidoc[] include::basic-config.asciidoc[] include::advanced-config.asciidoc[] include::child.asciidoc[] -include::extend.asciidoc[] include::testing.asciidoc[] include::integrations.asciidoc[] include::observability.asciidoc[] diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc index 0d44a1518..bd8684aba 100644 --- a/docs/installation.asciidoc +++ b/docs/installation.asciidoc @@ -56,6 +56,10 @@ of `^7.10.0`). |`10.x` |April 2021 |`7.12` (mid 2021) + +|`12.x` +|April 2022 +|`8.2` (early 2022) |=== [discrete] @@ -70,8 +74,8 @@ Elasticsearch language clients are only backwards compatible with default distri |{es} Version |Client Version -|`main` -|`main` +|`8.x` +|`8.x` |`7.x` |`7.x` diff --git a/docs/introduction.asciidoc b/docs/introduction.asciidoc index 6cb9df2bc..5d8ba3cbe 100644 --- a/docs/introduction.asciidoc +++ b/docs/introduction.asciidoc @@ -4,9 +4,6 @@ This is the official Node.js client for {es}. This page gives a quick overview about the features of the client. -Refer to <> for breaking changes coming from the old -client. - [discrete] === Features @@ -23,59 +20,6 @@ client. [discrete] === Quick start -First of all, require, then initialize the client: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) ----- - - -You can use both the callback API and the promise API, both behave the same way. - -[source,js] ----- -// promise API -const result = await client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}) - -// callback API -client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}, (err, result) => { - if (err) console.log(err) -}) ----- - - -The returned value of **every** API call is formed as follows: - -[source,ts] ----- -{ - body: object | boolean - statusCode: number - headers: object - warnings: [string] - meta: object -} ----- - - -Let's see a complete example! - [source,js] ---- 'use strict' @@ -87,8 +31,7 @@ async function run () { // Let's start by indexing some data await client.index({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { + document: { character: 'Ned Stark', quote: 'Winter is coming.' } @@ -96,8 +39,7 @@ async function run () { await client.index({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { + document: { character: 'Daenerys Targaryen', quote: 'I am the blood of the dragon.' } @@ -105,29 +47,25 @@ async function run () { await client.index({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { + document: { character: 'Tyrion Lannister', quote: 'A mind needs books like a sword needs a whetstone.' } }) - // We need to force an index refresh at this point, otherwise we will not + // here we are forcing an index refresh, otherwise we will not // get any result in the consequent search await client.indices.refresh({ index: 'game-of-thrones' }) // Let's search! - const { body } = await client.search({ + const result= await client.search({ index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { - query: { - match: { quote: 'winter' } - } + query: { + match: { quote: 'winter' } } }) - console.log(body.hits.hits) + console.log(result.hits.hits) } run().catch(console.log) @@ -181,20 +119,17 @@ const { Client: Client7 } = require('es7') const client6 = new Client6({ node: 'http://localhost:9200/' }) const client7 = new Client7({ node: 'http://localhost:9201/' }) -client6.info(console.log) -client7.info(console.log) +client6.info().then(console.log, console.log) +client7.info().then(console.log, console.log) ---- Finally, if you want to install the client for the next version of {es} (the one -that lives in the {es} master branch), use the following command: +that lives in the {es} main branch), use the following command: [source,sh] ---- -npm install esmaster@github:elastic/elasticsearch-js +npm install esmain@github:elastic/elasticsearch-js ---- -WARNING: This command installs the master branch of the client which is not -considered stable. - - -include::breaking-changes.asciidoc[] \ No newline at end of file +WARNING: This command installs the main branch of the client which is not +considered stable. \ No newline at end of file diff --git a/docs/observability.asciidoc b/docs/observability.asciidoc index 66c45fb42..44307f68b 100644 --- a/docs/observability.asciidoc +++ b/docs/observability.asciidoc @@ -2,7 +2,7 @@ === Observability The client does not provide a default logger, but instead it offers an event -emitter interfaces to hook into internal events, such as `request` and +emitter interface to hook into internal events, such as `request` and `response`. Correlating those events can be hard, especially if your applications have a @@ -36,7 +36,7 @@ const logger = require('my-logger')() const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) -client.on('response', (err, result) => { +client.diagnostic.on('response', (err, result) => { if (err) { logger.error(err) } else { @@ -53,7 +53,7 @@ The client emits the following events: a|Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`. [source,js] ---- -client.on('serialization', (err, result) => { +client.diagnostic.on('serialization', (err, result) => { console.log(err, result) }) ---- @@ -62,7 +62,7 @@ client.on('serialization', (err, result) => { a|Emitted before sending the actual request to {es} _(emitted multiple times in case of retries)_. [source,js] ---- -client.on('request', (err, result) => { +client.diagnostic.on('request', (err, result) => { console.log(err, result) }) ---- @@ -71,7 +71,7 @@ client.on('request', (err, result) => { a|Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. _(This event might not be emitted in certain situations)_. [source,js] ---- -client.on('deserialization', (err, result) => { +client.diagnostic.on('deserialization', (err, result) => { console.log(err, result) }) ---- @@ -80,7 +80,7 @@ client.on('deserialization', (err, result) => { a|Emitted once {es} response has been received and parsed. [source,js] ---- -client.on('response', (err, result) => { +client.diagnostic.on('response', (err, result) => { console.log(err, result) }) ---- @@ -89,7 +89,7 @@ client.on('response', (err, result) => { a|Emitted when the client ends a sniffing request. [source,js] ---- -client.on('sniff', (err, result) => { +client.diagnostic.on('sniff', (err, result) => { console.log(err, result) }) ---- @@ -98,7 +98,7 @@ client.on('sniff', (err, result) => { a|Emitted if the client is able to resurrect a dead node. [source,js] ---- -client.on('resurrect', (err, result) => { +client.diagnostic.on('resurrect', (err, result) => { console.log(err, result) }) ---- @@ -185,14 +185,14 @@ handle this problem. const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) -client.on('request', (err, result) => { +client.diagnostic.on('request', (err, result) => { const { id } = result.meta.request if (err) { console.log({ error: err, reqId: id }) } }) -client.on('response', (err, result) => { +client.diagnostic.on('response', (err, result) => { const { id } = result.meta.request if (err) { console.log({ error: err, reqId: id }) @@ -201,10 +201,8 @@ client.on('response', (err, result) => { client.search({ index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) + query: { match_all: {} } +}).then(console.log, console.log) ---- @@ -232,12 +230,10 @@ You can also specify a custom id per request: ---- client.search({ index: 'my-index', - body: { foo: 'bar' } + query: { match_all: {} } }, { id: 'custom-id' -}, (err, result) => { - if (err) console.log(err) -}) +}).then(console.log, console.log) ---- @@ -252,7 +248,7 @@ can do that via the `context` option of a request: const { Client } = require('@elastic/elasticsearch') const client = new Client({ node: 'http://localhost:9200/' }) -client.on('request', (err, result) => { +client.diagnostic.on('request', (err, result) => { const { id } = result.meta.request const { context } = result.meta if (err) { @@ -260,7 +256,7 @@ client.on('request', (err, result) => { } }) -client.on('response', (err, result) => { +client.diagnostic.on('response', (err, result) => { const { id } = result.meta.request const { winter } = result.meta.context if (err) { @@ -270,12 +266,10 @@ client.on('response', (err, result) => { client.search({ index: 'my-index', - body: { foo: 'bar' } + query: { match_all: {} } }, { context: { winter: 'is coming' } -}, (err, result) => { - if (err) console.log(err) -}) +}).then(console.log, console.log) ---- The context object can also be configured as a global option in the client @@ -290,7 +284,7 @@ const client = new Client({ context: { winter: 'is coming' } }) -client.on('request', (err, result) => { +client.diagnostic.on('request', (err, result) => { const { id } = result.meta.request const { context } = result.meta if (err) { @@ -298,7 +292,7 @@ client.on('request', (err, result) => { } }) -client.on('response', (err, result) => { +client.diagnostic.on('response', (err, result) => { const { id } = result.meta.request const { winter } = result.meta.context if (err) { @@ -308,12 +302,10 @@ client.on('response', (err, result) => { client.search({ index: 'my-index', - body: { foo: 'bar' } + query: { match_all: {} } }, { context: { winter: 'has come' } -}, (err, result) => { - if (err) console.log(err) -}) +}).then(console.log, console.log) ---- @@ -339,7 +331,7 @@ const child = client.child({ console.log(client.name, child.name) -client.on('request', (err, result) => { +client.diagnostic.on('request', (err, result) => { const { id } = result.meta.request const { name } = result.meta if (err) { @@ -347,7 +339,7 @@ client.on('request', (err, result) => { } }) -client.on('response', (err, result) => { +client.diagnostic.on('response', (err, result) => { const { id } = result.meta.request const { name } = result.meta if (err) { @@ -357,17 +349,13 @@ client.on('response', (err, result) => { client.search({ index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) + query: { match_all: {} } +}).then(console.log, console.log) child.search({ index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) + query: { match_all: {} } +}).then(console.log, console.log) ---- @@ -397,9 +385,7 @@ client.search({ body: { foo: 'bar' } }, { opaqueId: 'my-search' -}, (err, result) => { - if (err) console.log(err) -}) +}).then(console.log, console.log) ---- Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a @@ -421,8 +407,6 @@ client.search({ body: { foo: 'bar' } }, { opaqueId: 'my-search' -}, (err, result) => { - if (err) console.log(err) -}) +}).then(console.log, console.log) ---- diff --git a/docs/testing.asciidoc b/docs/testing.asciidoc index af9fcf25f..34778ba06 100644 --- a/docs/testing.asciidoc +++ b/docs/testing.asciidoc @@ -72,7 +72,7 @@ mock.add({ return { status: 'ok' } }) -client.info(console.log) +client.info().then(console.log, console.log) ---- As you can see it works closely with the client itself, once you have created a @@ -129,8 +129,8 @@ mock.add({ return { count: 42 } }) -client.count({ index: 'foo' }, console.log) // => { count: 42 } -client.count({ index: 'bar' }, console.log) // => { count: 42 } +client.count({ index: 'foo' }).then(console.log, console.log) // => { count: 42 } +client.count({ index: 'bar' }).then(console.log, console.log) // => { count: 42 } ---- And wildcards are supported as well. diff --git a/docs/typescript.asciidoc b/docs/typescript.asciidoc index 13da1b10f..eace26e63 100644 --- a/docs/typescript.asciidoc +++ b/docs/typescript.asciidoc @@ -4,274 +4,73 @@ The client offers a first-class support for TypeScript, shipping a complete set of type definitions of Elasticsearch's API surface. - -NOTE: If you are using TypeScript you need to use _snake_case_ style to define -the API parameters instead of _camelCase_. - -Currently the client exposes two type definitions, the legacy one, which is the default -and the new one, which will be the default in the next major. -We strongly recommend to migrate to the new one as soon as possible, as the new types -are offering a vastly improved developer experience and guarantee you that your code -will always be in sync with the latest Elasticsearch features. - -[discrete] -==== New type definitions - -The new type definition is more advanced compared to the legacy one. In the legacy -type definitions you were expected to configure via generics both request and response -bodies. The new type definitions comes with a complete type definition for every -Elasticsearch endpoint. - -For example: - -[source,ts] ----- -// legacy definitions -const response = await client.search, SearchBody>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } -}) - -// new definitions -const response = await client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } -}) ----- - The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g. EQL), and others may contain some errors, but we are continuously pushing fixes & improvements. [discrete] -==== Request & Response types - -Once you migrate to the new types, those are automatically integrated into the Elasticsearch client, you will get them out of the box. -If everything works, meaning that you won’t get compiler errors, you are good to go! -The types are already correct, and there is nothing more to do. - -If a type is incorrect, you should add a comment `// @ts-expect-error @elastic/elasticsearch` -telling TypeScript that you are aware of the warning and you would like to temporarily suppress it. -In this way, your code will compile until the type is fixed, and when it happens, you’ll only need to remove the -`// @ts-expect-error @elastic/elasticsearch` comment (TypeScript will let you know when it is time). -Finally, if the type you need is missing, you’ll see that the client method returns (or defines as a parameter) -a `TODO` type, which accepts any object. - -Open an issue in the client repository letting us know if you encounter any problem! - -If needed you can import the request and response types. - -[source,ts] ----- -import { Client, estypes } from '@elastic/elasticsearch' -import type { Client as NewTypes } from '@elastic/elasticsearch/api/new' - -// @ts-expect-error @elastic/elasticsearch -const client: NewTypes = new Client({ - node: 'http://localhost:9200/' -}) - -interface Source { - foo: string -} - -const request: estypes.IndexRequest = { - index: 'test', - body: { foo: 'bar' } -} - -await client.index(request) ----- - -[discrete] -===== How to migrate to the new type definitions - -Since the new type definitions can be considered a breaking change we couldn't add the directly to the client. -Following you will find a snippet that shows you how to override the default types with the new ones. +==== Example [source,ts] ---- import { Client } from '@elastic/elasticsearch' -import type { Client as NewTypes } from '@elastic/elasticsearch/api/new' - -// @ts-expect-error @elastic/elasticsearch -const client: NewTypes = new Client({ - node: 'http://localhost:9200/' -}) - -interface Source { - foo: string -} - -// try the new code completion when building a query! -const response = await client.search({ - index: 'test', - body: { - query: { - match_all: {} - } - } -}) - -// try the new code completion when traversing a response! -const results = response.body.hits.hits.map(hit => hit._source) -// results type will be `Source[]` -console.log(results) ----- - -[discrete] -==== Legacy type definitions - -By default event API uses -https://www.typescriptlang.org/docs/handbook/generics.html[generics] to specify -the requests and response bodies and the `meta.context`. Currently, we can't -provide those definitions, but we are working to improve this situation. - -You can find a partial definition of the request types by importing -`RequestParams`, which is used by default in the client and accepts a body (when -needed) as a generic to provide a better specification. - -The body defaults to `RequestBody` and `RequestNDBody`, which are defined as -follows: - -[source,ts] ----- -type RequestBody> = T | string | Buffer | ReadableStream -type RequestNDBody[]> = T | string | string[] | Buffer | ReadableStream ----- - -You can specify the response and request body in each API as follows: - -[source,ts] ----- -const response = await client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } -}) - -console.log(response.body) ----- - -You don't have to specify all the generics, but the order must be respected. - - -[discrete] -===== A complete example - -[source,ts] ----- -import { - Client, - // Object that contains the type definitions of every API method - RequestParams, - // Interface of the generic API response - ApiResponse, -} from '@elastic/elasticsearch' const client = new Client({ node: 'http://localhost:9200/' }) -// Define the type of the body for the Search request -interface SearchBody { - query: { - match: { foo: string } - } -} - -// Complete definition of the Search response -interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; -} - -interface Explanation { - value: number; - description: string; - details: Explanation[]; -} - -interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; -} - -// Define the interface of the source object -interface Source { - foo: string +interface Document { + character: string + quote: string } async function run () { - // All of the examples below are valid code, by default, - // the request body will be `RequestBody` and response will be `Record`. - let response = await client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } + // Let's start by indexing some data + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.' } }) - // body here is `ResponseBody` - console.log(response.body) - // The first generic is the response body - response = await client.search>({ - index: 'test', - // Here the body must follow the `RequestBody` interface - body: { - query: { - match: { foo: 'bar' } - } + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' } }) - // body here is `SearchResponse` - console.log(response.body) - response = await client.search, SearchBody>({ - index: 'test', - // Here the body must follow the `SearchBody` interface - body: { - query: { - match: { foo: 'bar' } - } + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' } }) - // body here is `SearchResponse` - console.log(response.body) + + // here we are forcing an index refresh, otherwise we will not + // get any result in the consequent search + await client.indices.refresh({ index: 'game-of-thrones' }) + + // Let's search! + const result= await client.search({ + index: 'game-of-thrones', + query: { + match: { quote: 'winter' } + } + }) + + console.log(result.hits.hits) } run().catch(console.log) ---- + +[discrete] +==== Request & Response types + +You can import the full TypeScript requests & responses defintions as it follows: + +[source,ts] +---- +import { estypes } from '@elastic/elasticsearch' +---- \ No newline at end of file From 82cf15097dc0f65575b15a96fbd64f97d88a2a4b Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Fri, 11 Feb 2022 12:02:40 +0100 Subject: [PATCH 139/647] Changelog for 8.0 (#1621) --- docs/changelog.asciidoc | 307 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 307 insertions(+) create mode 100644 docs/changelog.asciidoc diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc new file mode 100644 index 000000000..5dd160a2c --- /dev/null +++ b/docs/changelog.asciidoc @@ -0,0 +1,307 @@ +[[changelog-client]] +== Release notes + +[discrete] +=== 8.0.0 + +[discrete] +==== Features + +[discrete] +[discrete] +===== Support for Elasticsearch `v8.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.0/release-notes-8.0.0.html[here]. + +[discrete] +===== Drop old typescript definitions + +*Breaking: Yes* | *Migration effort: Medium* + +The current TypeScript definitions will be removed from the client, and the new definitions, which contain request and response definitions as well will be shipped by default. + +[discrete] +===== Drop callback-style API + +*Breaking: Yes* | *Migration effort: Large* + +Maintaining both API styles is not a problem per se, but it makes error handling more convoluted due to async stack traces. +Moving to a full-promise API will solve this issue. + +[source,js] +---- +// callback-style api +client.search({ params }, { options }, (err, result) => { + console.log(err || result) +}) + +// promise-style api +client.search({ params }, { options }) + .then(console.log) + .catch(console.log) + +// async-style (sugar syntax on top of promises) +const response = await client.search({ params }, { options }) +console.log(response) +---- + +If you are already using the promise-style API, this won't be a breaking change for you. + +[discrete] +===== Remove the current abort API and use the new AbortController standard + +*Breaking: Yes* | *Migration effort: Small* + +The old abort API makes sense for callbacks but it's annoying to use with promises + +[source,js] +---- +// callback-style api +const request = client.search({ params }, { options }, (err, result) => { + console.log(err) // RequestAbortedError +}) + +request.abort() + +// promise-style api +const promise = client.search({ params }, { options }) + +promise + .then(console.log) + .catch(console.log) // RequestAbortedError + +promise.abort() +---- + +Node v12 has added the standard https://nodejs.org/api/globals.html#globals_class_abortcontroller[`AbortController`] API which is designed to work well with both callbacks and promises. +[source,js] +---- +const ac = new AbortController() +client.search({ params }, { signal: ac.signal }) + .then(console.log) + .catch(console.log) // RequestAbortedError + +ac.abort() +---- + +[discrete] +===== Remove the body key from the request + +*Breaking: Yes* | *Migration effort: Small* + +Thanks to the new types we are developing now we know exactly where a parameter should go. +The client API leaks HTTP-related notions in many places, and removing them would definitely improve the DX. + +This could be a rather big breaking change, so a double solution could be used during the 8.x lifecycle. (accepting body keys without them being wrapped in the body as well as the current solution). + +[source,js] +---- +// from +const response = await client.search({ + index: 'test', + body: { + query: { + match_all: {} + } + } +}) + +// to +const response = await client.search({ + index: 'test', + query: { + match_all: {} + } +}) +---- + +[discrete] +===== Migrate to new separate transport + +*Breaking: Yes* | *Migration effort: Small to none* + +The separated transport has been rewritten in TypeScript and has already dropped the callback style API. +Given that now is separated, most of the Elasticsearch specific concepts have been removed, and the client will likely need to extend parts of it for reintroducing them. +If you weren't extending the internals of the client, this won't be a breaking change for you. + +[discrete] +===== The returned value of API calls is the body and not the HTTP related keys + +*Breaking: Yes* | *Migration effort: Small* + +The client API leaks HTTP-related notions in many places, and removing them would definitely improve the DX. +The client will expose a new request-specific option to still get the full response details. + +[source,js] +---- +// from +const response = await client.search({ + index: 'test', + body: { + query: { + match_all: {} + } + } +}) +console.log(response) // { body: SearchResponse, statusCode: number, headers: object, warnings: array } + +// to +const response = await client.search({ + index: 'test', + query: { + match_all: {} + } +}) +console.log(response) // SearchResponse + +// with a bit of TypeScript and JavaScript magic... +const response = await client.search({ + index: 'test', + query: { + match_all: {} + } +}, { + meta: true +}) +console.log(response) // { body: SearchResponse, statusCode: number, headers: object, warnings: array } +---- + +[discrete] +===== Use a weighted connection pool + +*Breaking: Yes* | *Migration effort: Small to none* + +Move from the current cluster connection pool to a weight-based implementation. +This new implementation offers better performances and runs less code in the background, the old connection pool can still be used. +If you weren't extending the internals of the client, this won't be a breaking change for you. + +[discrete] +===== Migrate to the "undici" http client + +*Breaking: Yes* | *Migration effort: Small to none* + +By default, the HTTP client will no longer be the default Node.js HTTP client, but https://github.com/nodejs/undici[undici] instead. +Undici is a brand new HTTP client written from scratch, it offers vastly improved performances and has better support for promises. +Furthermore, it offers comprehensive and predictable error handling. The old HTTP client can still be used. +If you weren't extending the internals of the client, this won't be a breaking change for you. + +[discrete] +===== Drop support for old camelCased keys + +*Breaking: Yes* | *Migration effort: Medium* + +Currently, every path or query parameter could be expressed in both `snake_case` and `camelCase`. Internally the client will convert everything to `snake_case`. +This was done in an effort to reduce the friction of migrating from the legacy to the new client, but now it no longer makes sense. +If you are already using `snake_case` keys, this won't be a breaking change for you. + +[discrete] +===== Rename `ssl` option to `tls` + +*Breaking: Yes* | *Migration effort: Small* + +People usually refers to this as `tls`, furthermore, internally we use the tls API and Node.js refers to it as tls everywhere. +[source,js] +---- +// before +const client = new Client({ + node: 'https://localhost:9200/', + ssl: { + rejectUnauthorized: false + } +}) + +// after +const client = new Client({ + node: 'https://localhost:9200/', + tls: { + rejectUnauthorized: false + } +}) +---- + +[discrete] +===== Remove prototype poisoning protection + +*Breaking: Yes* | *Migration effort: Small* + +Prototype poisoning protection is very useful, but it can cause performances issues with big payloads. +In v8 it will be removed, and the documentation will show how to add it back with a custom serializer. + +[discrete] +===== Remove client extensions API + +*Breaking: Yes* | *Migration effort: Large* + +Nowadays the client support the entire Elasticsearch API, and the `transport.request` method can be used if necessary. The client extensions API have no reason to exist. +[source,js] +---- +client.extend('utility.index', ({ makeRequest }) => { + return function _index (params, options) { + // your code + } +}) + +client.utility.index(...) +---- + +If you weren't using client extensions, this won't be a breaking change for you. + +[discrete] +===== Move to TypeScript + +*Breaking: No* | *Migration effort: None* + +The new separated transport is already written in TypeScript, and it makes sense that the client v8 will be fully written in TypeScript as well. + +[discrete] +===== Move from emitter-like interface to a diagnostic method + +*Breaking: Yes* | *Migration effort: Small* + +Currently, the client offers a subset of methods of the `EventEmitter` class, v8 will ship with a `diagnostic` property which will be a proper event emitter. +[source,js] +---- +// from +client.on('request', console.log) + +// to +client.diagnostic.on('request', console.log) +---- + +[discrete] +===== Remove username & password properties from Cloud configuration + +*Breaking: Yes* | *Migration effort: Small* + +The Cloud configuration does not support ApiKey and Bearer auth, while the `auth` options does. +There is no need to keep the legacy basic auth support in the cloud configuration. +[source,js] +---- +// before +const client = new Client({ + cloud: { + id: '', + username: 'elastic', + password: 'changeme' + } +}) + +// after +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + } +}) +---- + +If you are already passing the basic auth options in the `auth` configuration, this won't be a breaking change for you. + +[discrete] +===== Calling `client.close` will reject new requests + +Once you call `client.close` every new request after that will be rejected with a `NoLivingConnectionsError`. In-flight requests will be executed normally unless an in-flight request requires a retry, in which case it will be rejected. \ No newline at end of file From ca6948fb827be7e6cd87ccd2e5b9e434bccfb87f Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 11 Feb 2022 12:26:05 +0100 Subject: [PATCH 140/647] Update docs index --- docs/index.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/index.asciidoc b/docs/index.asciidoc index fd8ab0484..9997b4ab9 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -4,6 +4,7 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] include::introduction.asciidoc[] +include::changelog.asciidoc[] include::installation.asciidoc[] include::connecting.asciidoc[] include::configuration.asciidoc[] From 03ecf707c90f1f3783b7ee67a631997cf59bf606 Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 11 Feb 2022 14:30:43 +0100 Subject: [PATCH 141/647] API generation --- src/api/api/_internal.ts | 133 +++ src/api/api/async_search.ts | 8 +- src/api/api/bulk.ts | 8 +- src/api/api/eql.ts | 2 +- src/api/api/fleet.ts | 45 +- src/api/api/ilm.ts | 25 +- src/api/api/indices.ts | 10 +- src/api/api/migration.ts | 18 +- src/api/api/monitoring.ts | 8 +- src/api/api/nodes.ts | 20 +- src/api/api/searchable_snapshots.ts | 9 +- src/api/api/security.ts | 223 ++++- src/api/api/sql.ts | 32 +- src/api/index.ts | 8 + src/api/kibana.ts | 55 +- src/api/types.ts | 1242 ++++++++++++++++++++------ src/api/typesWithBodyKey.ts | 1256 +++++++++++++++++++++------ 17 files changed, 2419 insertions(+), 683 deletions(-) create mode 100644 src/api/api/_internal.ts diff --git a/src/api/api/_internal.ts b/src/api/api/_internal.ts new file mode 100644 index 000000000..cea5da529 --- /dev/null +++ b/src/api/api/_internal.ts @@ -0,0 +1,133 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Internal { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_internal/desired_nodes' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_internal/desired_nodes/_latest' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_internal/_health' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['history_id', 'version'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_internal/desired_nodes/${encodeURIComponent(params.history_id.toString())}/${encodeURIComponent(params.version.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 00a7437dc..3560f488d 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -87,10 +87,10 @@ export default class AsyncSearch { return await this.transport.request({ path, method, querystring, body }, options) } - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise> - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index 30dfb4995..b3fc16666 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -37,10 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise { +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise +export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['operations'] const querystring: Record = {} diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 73aabaa57..415fbc470 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -114,7 +114,7 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position'] + const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position', 'runtime_mappings'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 84b15f358..bdd46c811 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -65,19 +65,24 @@ export default class Fleet { return await this.transport.request({ path, method, querystring, body }, options) } - async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async msearch (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptions): Promise> + async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['searches'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: any = params.body ?? undefined - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -94,19 +99,31 @@ export default class Fleet { return await this.transport.request({ path, method, querystring, bulkBody: body }, options) } - async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async search (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index a7887e21e..81c620b2e 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -140,19 +140,32 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } - async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async migrateToDataTiers (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index ad1f8fe3e..1fd863f40 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -513,19 +513,19 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async fieldUsageStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 27682a2f5..069ed2d66 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -73,10 +73,10 @@ export default class Migration { return await this.transport.request({ path, method, querystring, body }, options) } - async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getFeatureUpgradeStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -86,6 +86,7 @@ export default class Migration { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -95,10 +96,10 @@ export default class Migration { return await this.transport.request({ path, method, querystring, body }, options) } - async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async postFeatureUpgrade (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -108,6 +109,7 @@ export default class Migration { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 6db509050..8cf13c461 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -43,10 +43,10 @@ export default class Monitoring { this.transport = transport } - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise + async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['type'] const acceptedBody: string[] = ['operations'] const querystring: Record = {} diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 214684566..6dadf200b 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -43,19 +43,19 @@ export default class Nodes { this.transport = transport } - async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async clearRepositoriesMeteringArchive (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'max_archive_version'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -65,19 +65,19 @@ export default class Nodes { return await this.transport.request({ path, method, querystring, body }, options) } - async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getRepositoriesMeteringInfo (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 642eaa2e7..725d72d22 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -43,10 +43,10 @@ export default class SearchableSnapshots { this.transport = transport } - async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async cacheStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -56,6 +56,7 @@ export default class SearchableSnapshots { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 553680244..7cd54d048 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -43,6 +43,28 @@ export default class Security { this.transport = transport } + async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/profile/_activate' + return await this.transport.request({ path, method, querystring, body }, options) + } + async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise @@ -436,10 +458,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async enrollKibana (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -449,6 +471,7 @@ export default class Security { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -458,10 +481,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async enrollNode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -471,6 +494,7 @@ export default class Security { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -762,6 +786,28 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['uid'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise @@ -1072,19 +1118,31 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async samlAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['content', 'ids', 'realm'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1094,19 +1152,31 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async samlCompleteLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1116,19 +1186,31 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async samlInvalidate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['acs', 'query_string', 'realm'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1138,19 +1220,31 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async samlLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['token', 'refresh_token'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1160,19 +1254,32 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async samlPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1182,19 +1289,19 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async samlServiceProviderMetadata (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['realm_name'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1203,4 +1310,26 @@ export default class Security { const path = `/_security/saml/metadata/${encodeURIComponent(params.realm_name.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } + + async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['uid'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/profile/_data/${encodeURIComponent(params.uid.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } } diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 4273da298..63e808ff6 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -77,19 +77,19 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } - async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -99,19 +99,19 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } - async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getAsync (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -121,19 +121,19 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } - async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getAsyncStatus (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -148,7 +148,7 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency'] + const acceptedBody: string[] = ['catalog', 'columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency', 'runtime_mappings', 'wait_for_completion_timeout', 'params', 'keep_alive', 'keep_on_completion', 'index_using_frozen'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/index.ts b/src/api/index.ts index c4f253984..62e786c69 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -26,6 +26,7 @@ // DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, // and elastic/elastic-client-generator-js to regenerate this file again. +import InternalApi from './api/_internal' import AsyncSearchApi from './api/async_search' import AutoscalingApi from './api/autoscaling' import bulkApi from './api/bulk' @@ -105,6 +106,7 @@ import XpackApi from './api/xpack' export default interface API { new(): API + Internal: InternalApi asyncSearch: AsyncSearchApi autoscaling: AutoscalingApi bulk: typeof bulkApi @@ -183,6 +185,7 @@ export default interface API { xpack: XpackApi } +const kInternal = Symbol('Internal') const kAsyncSearch = Symbol('AsyncSearch') const kAutoscaling = Symbol('Autoscaling') const kCat = Symbol('Cat') @@ -218,6 +221,7 @@ const kWatcher = Symbol('Watcher') const kXpack = Symbol('Xpack') export default class API { + [kInternal]: symbol | null [kAsyncSearch]: symbol | null [kAutoscaling]: symbol | null [kCat]: symbol | null @@ -252,6 +256,7 @@ export default class API { [kWatcher]: symbol | null [kXpack]: symbol | null constructor () { + this[kInternal] = null this[kAsyncSearch] = null this[kAutoscaling] = null this[kCat] = null @@ -333,6 +338,9 @@ API.prototype.updateByQuery = updateByQueryApi API.prototype.updateByQueryRethrottle = updateByQueryRethrottleApi Object.defineProperties(API.prototype, { + Internal: { + get () { return this[kInternal] === null ? (this[kInternal] = new InternalApi(this.transport)) : this[kInternal] } + }, asyncSearch: { get () { return this[kAsyncSearch] === null ? (this[kAsyncSearch] = new AsyncSearchApi(this.transport)) : this[kAsyncSearch] } }, diff --git a/src/api/kibana.ts b/src/api/kibana.ts index 9e4aff53f..ce1fb15a5 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -51,10 +51,16 @@ interface KibanaClient { helpers: Helpers child: (opts?: ClientOptions) => KibanaClient close: () => Promise + Internal: { + deleteDesiredNodes: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getDesiredNodes: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + health: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + updateDesiredNodes: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + } asyncSearch: { delete: (params: T.AsyncSearchDeleteRequest| TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions) => Promise> get: (params: T.AsyncSearchGetRequest| TB.AsyncSearchGetRequest, options?: TransportRequestOptions) => Promise, TContext>> - status: (params: T.AsyncSearchStatusRequest| TB.AsyncSearchStatusRequest, options?: TransportRequestOptions) => Promise, TContext>> + status: (params: T.AsyncSearchStatusRequest| TB.AsyncSearchStatusRequest, options?: TransportRequestOptions) => Promise> submit: (params?: T.AsyncSearchSubmitRequest| TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions) => Promise, TContext>> } autoscaling: { @@ -63,7 +69,7 @@ interface KibanaClient { getAutoscalingPolicy: (params: T.AutoscalingGetAutoscalingPolicyRequest| TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> putAutoscalingPolicy: (params: T.AutoscalingPutAutoscalingPolicyRequest| TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> } - bulk: (params: T.BulkRequest| TB.BulkRequest, options?: TransportRequestOptions) => Promise> + bulk: (params: T.BulkRequest| TB.BulkRequest, options?: TransportRequestOptions) => Promise> cat: { aliases: (params?: T.CatAliasesRequest| TB.CatAliasesRequest, options?: TransportRequestOptions) => Promise> allocation: (params?: T.CatAllocationRequest| TB.CatAllocationRequest, options?: TransportRequestOptions) => Promise> @@ -159,8 +165,8 @@ interface KibanaClient { fieldCaps: (params?: T.FieldCapsRequest| TB.FieldCapsRequest, options?: TransportRequestOptions) => Promise> fleet: { globalCheckpoints: (params: T.FleetGlobalCheckpointsRequest| TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions) => Promise> - msearch: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - search: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + msearch: (params: T.FleetMsearchRequest| TB.FleetMsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> + search: (params: T.FleetSearchRequest| TB.FleetSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> } get: (params: T.GetRequest| TB.GetRequest, options?: TransportRequestOptions) => Promise, TContext>> getScript: (params: T.GetScriptRequest| TB.GetScriptRequest, options?: TransportRequestOptions) => Promise> @@ -175,7 +181,7 @@ interface KibanaClient { explainLifecycle: (params: T.IlmExplainLifecycleRequest| TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions) => Promise> getLifecycle: (params?: T.IlmGetLifecycleRequest| TB.IlmGetLifecycleRequest, options?: TransportRequestOptions) => Promise> getStatus: (params?: T.IlmGetStatusRequest| TB.IlmGetStatusRequest, options?: TransportRequestOptions) => Promise> - migrateToDataTiers: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + migrateToDataTiers: (params?: T.IlmMigrateToDataTiersRequest| TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions) => Promise> moveToStep: (params: T.IlmMoveToStepRequest| TB.IlmMoveToStepRequest, options?: TransportRequestOptions) => Promise> putLifecycle: (params: T.IlmPutLifecycleRequest| TB.IlmPutLifecycleRequest, options?: TransportRequestOptions) => Promise> removePolicy: (params: T.IlmRemovePolicyRequest| TB.IlmRemovePolicyRequest, options?: TransportRequestOptions) => Promise> @@ -203,7 +209,7 @@ interface KibanaClient { existsAlias: (params: T.IndicesExistsAliasRequest| TB.IndicesExistsAliasRequest, options?: TransportRequestOptions) => Promise> existsIndexTemplate: (params: T.IndicesExistsIndexTemplateRequest| TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions) => Promise> existsTemplate: (params: T.IndicesExistsTemplateRequest| TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions) => Promise> - fieldUsageStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + fieldUsageStats: (params: T.IndicesFieldUsageStatsRequest| TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions) => Promise> flush: (params?: T.IndicesFlushRequest| TB.IndicesFlushRequest, options?: TransportRequestOptions) => Promise> forcemerge: (params?: T.IndicesForcemergeRequest| TB.IndicesForcemergeRequest, options?: TransportRequestOptions) => Promise> get: (params: T.IndicesGetRequest| TB.IndicesGetRequest, options?: TransportRequestOptions) => Promise> @@ -266,8 +272,8 @@ interface KibanaClient { mget: (params?: T.MgetRequest| TB.MgetRequest, options?: TransportRequestOptions) => Promise, TContext>> migration: { deprecations: (params?: T.MigrationDeprecationsRequest| TB.MigrationDeprecationsRequest, options?: TransportRequestOptions) => Promise> - getFeatureUpgradeStatus: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - postFeatureUpgrade: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + getFeatureUpgradeStatus: (params?: T.MigrationGetFeatureUpgradeStatusRequest| TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions) => Promise> + postFeatureUpgrade: (params?: T.MigrationPostFeatureUpgradeRequest| TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions) => Promise> } ml: { closeJob: (params: T.MlCloseJobRequest| TB.MlCloseJobRequest, options?: TransportRequestOptions) => Promise> @@ -342,14 +348,14 @@ interface KibanaClient { validateDetector: (params?: T.MlValidateDetectorRequest| TB.MlValidateDetectorRequest, options?: TransportRequestOptions) => Promise> } monitoring: { - bulk: (params: T.MonitoringBulkRequest| TB.MonitoringBulkRequest, options?: TransportRequestOptions) => Promise> + bulk: (params: T.MonitoringBulkRequest| TB.MonitoringBulkRequest, options?: TransportRequestOptions) => Promise> } msearch: , TContext = unknown>(params?: T.MsearchRequest| TB.MsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> msearchTemplate: , TContext = unknown>(params?: T.MsearchTemplateRequest| TB.MsearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> mtermvectors: (params?: T.MtermvectorsRequest| TB.MtermvectorsRequest, options?: TransportRequestOptions) => Promise> nodes: { - clearRepositoriesMeteringArchive: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getRepositoriesMeteringInfo: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + clearRepositoriesMeteringArchive: (params: T.NodesClearRepositoriesMeteringArchiveRequest| TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions) => Promise> + getRepositoriesMeteringInfo: (params: T.NodesGetRepositoriesMeteringInfoRequest| TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions) => Promise> hotThreads: (params?: T.NodesHotThreadsRequest| TB.NodesHotThreadsRequest, options?: TransportRequestOptions) => Promise> info: (params?: T.NodesInfoRequest| TB.NodesInfoRequest, options?: TransportRequestOptions) => Promise> reloadSecureSettings: (params?: T.NodesReloadSecureSettingsRequest| TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions) => Promise> @@ -381,12 +387,13 @@ interface KibanaClient { searchShards: (params?: T.SearchShardsRequest| TB.SearchShardsRequest, options?: TransportRequestOptions) => Promise> searchTemplate: (params?: T.SearchTemplateRequest| TB.SearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> searchableSnapshots: { - cacheStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + cacheStats: (params?: T.SearchableSnapshotsCacheStatsRequest| TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions) => Promise> clearCache: (params?: T.SearchableSnapshotsClearCacheRequest| TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions) => Promise> mount: (params: T.SearchableSnapshotsMountRequest| TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions) => Promise> stats: (params?: T.SearchableSnapshotsStatsRequest| TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions) => Promise> } security: { + activateUserProfile: (params?: T.TODO, options?: TransportRequestOptions) => Promise> authenticate: (params?: T.SecurityAuthenticateRequest| TB.SecurityAuthenticateRequest, options?: TransportRequestOptions) => Promise> changePassword: (params?: T.SecurityChangePasswordRequest| TB.SecurityChangePasswordRequest, options?: TransportRequestOptions) => Promise> clearApiKeyCache: (params: T.SecurityClearApiKeyCacheRequest| TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions) => Promise> @@ -403,8 +410,8 @@ interface KibanaClient { deleteUser: (params: T.SecurityDeleteUserRequest| TB.SecurityDeleteUserRequest, options?: TransportRequestOptions) => Promise> disableUser: (params: T.SecurityDisableUserRequest| TB.SecurityDisableUserRequest, options?: TransportRequestOptions) => Promise> enableUser: (params: T.SecurityEnableUserRequest| TB.SecurityEnableUserRequest, options?: TransportRequestOptions) => Promise> - enrollKibana: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - enrollNode: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + enrollKibana: (params?: T.SecurityEnrollKibanaRequest| TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions) => Promise> + enrollNode: (params?: T.SecurityEnrollNodeRequest| TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions) => Promise> getApiKey: (params?: T.SecurityGetApiKeyRequest| TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions) => Promise> getBuiltinPrivileges: (params?: T.SecurityGetBuiltinPrivilegesRequest| TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions) => Promise> getPrivileges: (params?: T.SecurityGetPrivilegesRequest| TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions) => Promise> @@ -415,6 +422,7 @@ interface KibanaClient { getToken: (params?: T.SecurityGetTokenRequest| TB.SecurityGetTokenRequest, options?: TransportRequestOptions) => Promise> getUser: (params?: T.SecurityGetUserRequest| TB.SecurityGetUserRequest, options?: TransportRequestOptions) => Promise> getUserPrivileges: (params?: T.SecurityGetUserPrivilegesRequest| TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions) => Promise> + getUserProfile: (params?: T.TODO, options?: TransportRequestOptions) => Promise> grantApiKey: (params?: T.SecurityGrantApiKeyRequest| TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions) => Promise> hasPrivileges: (params?: T.SecurityHasPrivilegesRequest| TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions) => Promise> invalidateApiKey: (params?: T.SecurityInvalidateApiKeyRequest| TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions) => Promise> @@ -424,12 +432,13 @@ interface KibanaClient { putRoleMapping: (params: T.SecurityPutRoleMappingRequest| TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions) => Promise> putUser: (params: T.SecurityPutUserRequest| TB.SecurityPutUserRequest, options?: TransportRequestOptions) => Promise> queryApiKeys: (params?: T.SecurityQueryApiKeysRequest| TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions) => Promise> - samlAuthenticate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - samlCompleteLogout: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - samlInvalidate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - samlLogout: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - samlPrepareAuthentication: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - samlServiceProviderMetadata: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + samlAuthenticate: (params?: T.SecuritySamlAuthenticateRequest| TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions) => Promise> + samlCompleteLogout: (params?: T.SecuritySamlCompleteLogoutRequest| TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions) => Promise> + samlInvalidate: (params?: T.SecuritySamlInvalidateRequest| TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions) => Promise> + samlLogout: (params?: T.SecuritySamlLogoutRequest| TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions) => Promise> + samlPrepareAuthentication: (params?: T.SecuritySamlPrepareAuthenticationRequest| TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions) => Promise> + samlServiceProviderMetadata: (params: T.SecuritySamlServiceProviderMetadataRequest| TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions) => Promise> + updateUserProfileData: (params?: T.TODO, options?: TransportRequestOptions) => Promise> } shutdown: { deleteNode: (params: T.ShutdownDeleteNodeRequest| TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions) => Promise> @@ -463,9 +472,9 @@ interface KibanaClient { } sql: { clearCursor: (params?: T.SqlClearCursorRequest| TB.SqlClearCursorRequest, options?: TransportRequestOptions) => Promise> - deleteAsync: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getAsync: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getAsyncStatus: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + deleteAsync: (params: T.SqlDeleteAsyncRequest| TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions) => Promise> + getAsync: (params: T.SqlGetAsyncRequest| TB.SqlGetAsyncRequest, options?: TransportRequestOptions) => Promise> + getAsyncStatus: (params: T.SqlGetAsyncStatusRequest| TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions) => Promise> query: (params?: T.SqlQueryRequest| TB.SqlQueryRequest, options?: TransportRequestOptions) => Promise> translate: (params?: T.SqlTranslateRequest| TB.SqlTranslateRequest, options?: TransportRequestOptions) => Promise> } diff --git a/src/api/types.ts b/src/api/types.ts index 6cc2643ca..f81a5de01 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -56,7 +56,7 @@ export interface BulkOperationContainer { export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' -export interface BulkRequest extends RequestBase { +export interface BulkRequest extends RequestBase { index?: IndexName pipeline?: string refresh?: Refresh @@ -67,7 +67,7 @@ export interface BulkRequest extends RequestBase { timeout?: Time wait_for_active_shards?: WaitForActiveShards require_alias?: boolean - operations?: (BulkOperationContainer | TSource)[] + operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } export interface BulkResponse { @@ -91,6 +91,16 @@ export interface BulkResponseItem { get?: InlineGet> } +export interface BulkUpdateAction { + detect_noop?: boolean + doc?: TPartialDocument + doc_as_upsert?: boolean + script?: Script + scripted_upsert?: boolean + _source?: SearchSourceConfig + upsert?: TDocument +} + export interface BulkUpdateOperation extends BulkOperationBase { require_alias?: boolean retry_on_conflict?: integer @@ -536,7 +546,7 @@ export interface MgetResponse { export type MgetResponseItem = GetGetResult | MgetMultiGetError export interface MsearchMultiSearchItem extends SearchResponse { - status: integer + status?: integer } export interface MsearchMultiSearchResult { @@ -1014,7 +1024,7 @@ export interface SearchAggregationProfile { description: string time_in_nanos: long type: string - debug?: SearchAggregationProfileDebug | SearchAggregationProfileDelegateDebug + debug?: SearchAggregationProfileDebug children?: SearchAggregationProfile[] } @@ -1027,7 +1037,7 @@ export interface SearchAggregationProfileDebug { result_strategy?: string has_filter?: boolean delegate?: string - delegate_debug?: SearchAggregationProfileDelegateDebug + delegate_debug?: SearchAggregationProfileDebug chars_fetched?: integer extract_count?: integer extract_ns?: integer @@ -1041,9 +1051,6 @@ export interface SearchAggregationProfileDebug { numeric_collectors_used?: integer empty_collectors_used?: integer deferred_aggregators?: string[] -} - -export interface SearchAggregationProfileDelegateDebug { segments_with_doc_count_field?: integer segments_with_deleted_docs?: integer filters?: SearchAggregationProfileDelegateDebugFilter[] @@ -1074,6 +1081,10 @@ export interface SearchCompletionContext { prefix?: boolean } +export interface SearchCompletionSuggest extends SearchSuggestBase { + options: SearchCompletionSuggestOption[] +} + export interface SearchCompletionSuggestOption { collate_match?: boolean contexts?: Record @@ -1081,8 +1092,8 @@ export interface SearchCompletionSuggestOption { _id: string _index: IndexName _routing?: Routing - _score: double - _source: TDocument + _score?: double + _source?: TDocument text: string } @@ -1273,6 +1284,10 @@ export interface SearchNestedIdentity { _nested?: SearchNestedIdentity } +export interface SearchPhraseSuggest extends SearchSuggestBase { + options: SearchPhraseSuggestOption +} + export interface SearchPhraseSuggestCollate { params?: Record prune?: boolean @@ -1399,10 +1414,11 @@ export interface SearchStupidBackoffSmoothingModel { discount: double } -export interface SearchSuggest { +export type SearchSuggest = SearchCompletionSuggest | SearchPhraseSuggest | SearchTermSuggest + +export interface SearchSuggestBase { length: integer offset: integer - options: SearchSuggestOption[] text: string } @@ -1414,8 +1430,6 @@ export interface SearchSuggestFuzziness { unicode_aware: boolean } -export type SearchSuggestOption = SearchCompletionSuggestOption | SearchPhraseSuggestOption | SearchTermSuggestOption - export type SearchSuggestSort = 'score' | 'frequency' export interface SearchSuggesterKeys { @@ -1430,9 +1444,13 @@ export interface SearchSuggesterBase { size?: integer } +export interface SearchTermSuggest extends SearchSuggestBase { + options: SearchTermSuggestOption +} + export interface SearchTermSuggestOption { text: string - freq?: long + freq: long score: double } @@ -1529,10 +1547,20 @@ export interface SearchTemplateRequest extends RequestBase { } export interface SearchTemplateResponse { - _shards: ShardStatistics + took: long timed_out: boolean - took: integer + _shards: ShardStatistics hits: SearchHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean } export interface TermsEnumRequest extends RequestBase { @@ -1707,7 +1735,7 @@ export interface UpdateByQueryRethrottleResponse { } export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { - tasks: Record + tasks: Record } export interface SpecUtilsBaseNode { @@ -1751,10 +1779,6 @@ export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string -export interface ChainTransform { - transforms: TransformContainer[] -} - export interface ClusterStatistics { skipped: integer successful: integer @@ -1776,6 +1800,12 @@ export interface CoordsGeoBounds { right: double } +export type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared' + +export type DFRAfterEffect = 'no' | 'b' | 'l' + +export type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p' + export type DataStreamName = string export type DataStreamNames = DataStreamName | DataStreamName[] @@ -1786,6 +1816,8 @@ export type DateMath = string export type DateMathTime = string +export type DateOrEpochMillis = DateString | EpochMillis + export type DateString = string export interface DictionaryResponseBase { @@ -1938,6 +1970,10 @@ export type Host = string export type HttpHeaders = Record +export type IBDistribution = 'll' | 'spl' + +export type IBLambda = 'df' | 'ttf' + export type Id = string export type Ids = Id | Id[] @@ -2078,7 +2114,7 @@ export interface NodeShard { allocation_id?: Record recovery_source?: Record unassigned_info?: ClusterAllocationExplainUnassignedInformation - relocating_node?: null + relocating_node?: NodeId | null } export interface NodeStatistics { @@ -2088,6 +2124,8 @@ export interface NodeStatistics { failed: integer } +export type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z' + export type OpType = 'index' | 'create' export type Password = string @@ -2190,8 +2228,10 @@ export interface ScriptSort { export type ScriptSortType = 'string' | 'number' export interface ScriptTransform { - lang: string - params: Record + lang?: string + params?: Record + source?: string + id?: string } export type ScrollId = string @@ -2230,7 +2270,7 @@ export interface SegmentsStats { index_writer_memory?: ByteSize index_writer_max_memory_in_bytes?: integer index_writer_memory_in_bytes: integer - max_unsafe_auto_id_timestamp: integer + max_unsafe_auto_id_timestamp: long memory?: ByteSize memory_in_bytes: integer norms_memory?: ByteSize @@ -2319,6 +2359,13 @@ export type SuggestMode = 'missing' | 'popular' | 'always' export type SuggestionName = string +export interface TaskFailure { + task_id: long + node_id: NodeId + status: string + reason: ErrorCause +} + export type TaskId = string | integer export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' @@ -2344,7 +2391,7 @@ export interface TopRightBottomLeftGeoBounds { } export interface TransformContainer { - chain?: ChainTransform + chain?: TransformContainer[] script?: ScriptTransform search?: SearchTransform } @@ -2371,7 +2418,7 @@ export type VersionString = string export type VersionType = 'internal' | 'external' | 'external_gte' | 'force' -export type WaitForActiveShardOptions = 'all' +export type WaitForActiveShardOptions = 'all' | 'index-setting' export type WaitForActiveShards = integer | WaitForActiveShardOptions @@ -2427,21 +2474,21 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys & { [property: string]: AggregationsAggregate | long } -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsChildrenAggregate | AggregationsGeoLineAggregate +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { - meta?: Record + meta?: Metadata } export interface AggregationsAggregation { - meta?: Record + meta?: Metadata name?: string } export interface AggregationsAggregationContainer { aggregations?: Record aggs?: Record - meta?: Record + meta?: Metadata adjacency_matrix?: AggregationsAdjacencyMatrixAggregation auto_date_histogram?: AggregationsAutoDateHistogramAggregation avg?: AggregationsAverageAggregation @@ -2646,31 +2693,24 @@ export interface AggregationsCategorizeTextAggregation extends AggregationsAggre max_matched_tokens?: integer similarity_threshold?: integer categorization_filters?: string[] - categorization_analyzer?: string | AggregationsCategorizeTextAnalyzer + categorization_analyzer?: AggregationsCategorizeTextAnalyzer shard_size?: integer size?: integer min_doc_count?: integer shard_min_doc_count?: integer } -export interface AggregationsCategorizeTextAnalyzer { - char_filter?: string[] - tokenizer?: string - filter?: string[] -} +export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer export interface AggregationsChiSquareHeuristic { background_is_superset: boolean include_negatives: boolean } -export interface AggregationsChildrenAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsChildrenAggregateKeys extends AggregationsSingleBucketAggregateBase { } - -export interface AggregationsChildrenAggregateBucketKeys extends AggregationsMultiBucketBase { -} -export type AggregationsChildrenAggregateBucket = AggregationsChildrenAggregateBucketKeys -& { [property: string]: AggregationsAggregate | long } +export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { type?: RelationName @@ -2718,6 +2758,12 @@ export interface AggregationsCumulativeCardinalityAggregation extends Aggregatio export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsCustomCategorizeTextAnalyzer { + char_filter?: string[] + tokenizer?: string + filter?: string[] +} + export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { } @@ -2832,8 +2878,10 @@ export interface AggregationsExtendedStatsBucketAggregation extends Aggregations export type AggregationsFieldDateMath = DateMath | double -export interface AggregationsFilterAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsFilterAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsFilterAggregate = AggregationsFilterAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { } @@ -2944,8 +2992,10 @@ export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBuck export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long } -export interface AggregationsGlobalAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { } @@ -3163,8 +3213,10 @@ export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | export type AggregationsMissing = string | integer | double | boolean -export interface AggregationsMissingAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsMissingAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { field?: Field @@ -3225,8 +3277,10 @@ export interface AggregationsMutualInformationHeuristic { include_negatives?: boolean } -export interface AggregationsNestedAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { path?: Field @@ -3238,6 +3292,11 @@ export interface AggregationsNormalizeAggregation extends AggregationsPipelineAg export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' +export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsParentAggregate = AggregationsParentAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { type?: RelationName } @@ -3294,6 +3353,7 @@ export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase to?: double from_as_string?: string to_as_string?: string + key?: string } export type AggregationsRangeBucket = AggregationsRangeBucketKeys & { [property: string]: AggregationsAggregate | double | string | long } @@ -3325,15 +3385,19 @@ export interface AggregationsRegressionInferenceOptions { num_top_feature_importance_values?: integer } -export interface AggregationsReverseNestedAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { path?: Field } -export interface AggregationsSamplerAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { shard_size?: integer @@ -3492,7 +3556,7 @@ export interface AggregationsStringStatsAggregate extends AggregationsAggregateB max_length: integer | null avg_length: double | null entropy: double | null - distribution?: string | null + distribution?: Record | null min_length_as_string?: string max_length_as_string?: string avg_length_as_string?: string @@ -3616,7 +3680,8 @@ export interface AggregationsTopMetrics { metrics: Record } -export interface AggregationsTopMetricsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { + top: AggregationsTopMetrics[] } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { @@ -3625,12 +3690,6 @@ export interface AggregationsTopMetricsAggregation extends AggregationsMetricAgg sort?: Sort } -export interface AggregationsTopMetricsBucketKeys extends AggregationsMultiBucketBase { - top: AggregationsTopMetrics[] -} -export type AggregationsTopMetricsBucket = AggregationsTopMetricsBucketKeys -& { [property: string]: AggregationsAggregate | AggregationsTopMetrics[] | long } - export interface AggregationsTopMetricsValue { field: Field } @@ -3638,6 +3697,11 @@ export interface AggregationsTopMetricsValue { export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { } +export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsMultiBucketAggregateBase { } @@ -4079,9 +4143,9 @@ export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBa export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { type: 'pattern_replace' - flags: string + flags?: string pattern: string - replacement: string + replacement?: string } export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { @@ -4660,7 +4724,7 @@ export interface MappingRuntimeField { export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' -export type MappingRuntimeFields = Record +export type MappingRuntimeFields = Record export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { type: 'scaled_float' @@ -5595,9 +5659,9 @@ export interface AsyncSearchStatusRequest extends RequestBase { id: Id } -export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { +export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { _shards: ShardStatistics - completion_status: integer + completion_status?: integer } export interface AsyncSearchSubmitRequest extends RequestBase { @@ -5730,9 +5794,25 @@ export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { export interface AutoscalingPutAutoscalingPolicyResponse extends AcknowledgedResponseBase { } +export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' + +export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] + +export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' + +export type CatCatDatafeedColumns = CatCatDatafeedColumn | CatCatDatafeedColumn[] + +export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v' + +export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] + export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } +export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' + +export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] + export interface CatAliasesAliasesRecord { alias?: string a?: string @@ -6258,6 +6338,9 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { id?: Id allow_no_match?: boolean bytes?: Bytes + h?: CatCatDfaColumns + s?: CatCatDfaColumns + time?: Time } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -6300,6 +6383,8 @@ export interface CatMlDatafeedsDatafeedsRecord { export interface CatMlDatafeedsRequest extends CatCatRequestBase { datafeed_id?: Id allow_no_match?: boolean + h?: CatCatDatafeedColumns + s?: CatCatDatafeedColumns time?: TimeUnit } @@ -6486,6 +6571,8 @@ export interface CatMlJobsRequest extends CatCatRequestBase { job_id?: Id allow_no_match?: boolean bytes?: Bytes + h?: CatCatAnonalyDetectorColumns + s?: CatCatAnonalyDetectorColumns time?: TimeUnit } @@ -7388,6 +7475,9 @@ export interface CatTransformsRequest extends CatCatRequestBase { transform_id?: Id allow_no_match?: boolean from?: integer + h?: CatCatTransformColumns + s?: CatCatTransformColumns + time?: Time size?: integer } @@ -8472,7 +8562,7 @@ export interface EnrichStatsCoordinatorStats { export interface EnrichStatsExecutingPolicy { name: Name - task: TasksInfo + task: TasksTaskInfo } export interface EnrichStatsRequest extends RequestBase { @@ -8541,7 +8631,7 @@ export interface EqlGetStatusResponse { } export interface EqlSearchRequest extends RequestBase { - index: IndexName + index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -8556,8 +8646,9 @@ export interface EqlSearchRequest extends RequestBase { fetch_size?: uint filter?: QueryDslQueryContainer | QueryDslQueryContainer[] size?: uint - fields?: QueryDslFieldAndFormat | Field + fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition + runtime_mappings?: MappingRuntimeFields } export interface EqlSearchResponse extends EqlEqlSearchResponseBase { @@ -8599,6 +8690,110 @@ export interface FleetGlobalCheckpointsResponse { timed_out: boolean } +export interface FleetMsearchRequest extends RequestBase { + index: IndexName | IndexAlias + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + max_concurrent_searches?: long + max_concurrent_shard_requests?: long + pre_filter_shard_size?: long + search_type?: SearchType + rest_total_hits_as_int?: boolean + typed_keys?: boolean + wait_for_checkpoints?: FleetCheckpoint[] + allow_partial_search_results?: boolean + searches?: MsearchRequestItem[] +} + +export interface FleetMsearchResponse { + docs: MsearchResponseItem[] +} + +export interface FleetSearchRequest extends RequestBase { + index: IndexName | IndexAlias + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: long + ccs_minimize_roundtrips?: boolean + default_operator?: QueryDslOperator + df?: string + docvalue_fields?: Fields + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: long + min_compatible_shard_node?: VersionString + preference?: string + pre_filter_shard_size?: long + request_cache?: boolean + routing?: Routing + scroll?: Time + search_type?: SearchType + stats?: string[] + stored_fields?: Fields + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + suggest_text?: string + terminate_after?: long + timeout?: Time + track_total_hits?: SearchTrackHits + track_scores?: boolean + typed_keys?: boolean + rest_total_hits_as_int?: boolean + version?: boolean + _source?: SearchSourceConfigParam + _source_excludes?: Fields + _source_includes?: Fields + seq_no_primary_term?: boolean + q?: string + size?: integer + from?: integer + sort?: string | string[] + wait_for_checkpoints?: FleetCheckpoint[] + allow_partial_search_results?: boolean + aggregations?: Record + aggs?: Record + collapse?: SearchFieldCollapse + highlight?: SearchHighlight + indices_boost?: Record[] + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + query?: QueryDslQueryContainer + rescore?: SearchRescore | SearchRescore[] + script_fields?: Record + search_after?: SortResults + slice?: SlicedScroll + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester + pit?: SearchPointInTimeReference + runtime_mappings?: MappingRuntimeFields +} + +export interface FleetSearchResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean +} + export interface GraphConnection { doc_count: long source: long @@ -8774,6 +8969,22 @@ export interface IlmGetStatusResponse { operation_mode: LifecycleOperationMode } +export interface IlmMigrateToDataTiersRequest extends RequestBase { + dry_run?: boolean + legacy_template_to_delete?: string + node_attribute?: string +} + +export interface IlmMigrateToDataTiersResponse { + dry_run: boolean + removed_legacy_template: string + migrated_ilm_policies: string[] + migrated_indices: Indices + migrated_legacy_templates: string[] + migrated_composable_templates: string[] + migrated_component_templates: string[] +} + export interface IlmMoveToStepRequest extends RequestBase { index: IndexName current_step?: IlmMoveToStepStepKey @@ -8849,6 +9060,10 @@ export interface IndicesAliasDefinition { is_hidden?: boolean } +export interface IndicesCacheQueries { + enabled: boolean +} + export interface IndicesDataStream { name: DataStreamName timestamp_field: IndicesDataStreamTimestampField @@ -8919,8 +9134,8 @@ export interface IndicesIndexRoutingRebalance { export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' export interface IndicesIndexSegmentSort { - field: Fields - order: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] + field?: Fields + order?: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[] missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[] } @@ -8936,120 +9151,57 @@ export interface IndicesIndexSettingBlocks { export interface IndicesIndexSettings { index?: IndicesIndexSettings mode?: string - 'index.mode'?: string - routing_path?: string[] - 'index.routing_path'?: string[] + routing_path?: string | string[] soft_deletes?: IndicesSoftDeletes - 'index.soft_deletes'?: IndicesSoftDeletes - 'soft_deletes.enabled'?: boolean - 'index.soft_deletes.enabled'?: boolean - 'soft_deletes.retention_lease.period'?: Time - 'index.soft_deletes.retention_lease.period'?: Time sort?: IndicesIndexSegmentSort - 'index.sort'?: IndicesIndexSegmentSort number_of_shards?: integer | string - 'index.number_of_shards'?: integer | string number_of_replicas?: integer | string - 'index.number_of_replicas'?: integer | string number_of_routing_shards?: integer - 'index.number_of_routing_shards'?: integer check_on_startup?: IndicesIndexCheckOnStartup - 'index.check_on_startup'?: IndicesIndexCheckOnStartup codec?: string - 'index.codec'?: string routing_partition_size?: integer - 'index.routing_partition_size'?: integer load_fixed_bitset_filters_eagerly?: boolean - 'index.load_fixed_bitset_filters_eagerly'?: boolean hidden?: boolean | string - 'index.hidden'?: boolean | string auto_expand_replicas?: string - 'index.auto_expand_replicas'?: string - 'merge.scheduler.max_thread_count'?: integer - 'index.merge.scheduler.max_thread_count'?: integer - 'merge.scheduler.max_merge_count'?: integer - 'index.merge.scheduler.max_merge_count'?: integer - 'search.idle.after'?: Time - 'index.search.idle.after'?: Time + merge?: IndicesMerge + search?: IndicesSettingsSearch refresh_interval?: Time - 'index.refresh_interval'?: Time max_result_window?: integer - 'index.max_result_window'?: integer max_inner_result_window?: integer - 'index.max_inner_result_window'?: integer max_rescore_window?: integer - 'index.max_rescore_window'?: integer max_docvalue_fields_search?: integer - 'index.max_docvalue_fields_search'?: integer max_script_fields?: integer - 'index.max_script_fields'?: integer max_ngram_diff?: integer - 'index.max_ngram_diff'?: integer max_shingle_diff?: integer - 'index.max_shingle_diff'?: integer blocks?: IndicesIndexSettingBlocks - 'index.blocks'?: IndicesIndexSettingBlocks - 'blocks.read_only'?: boolean - 'index.blocks.read_only'?: boolean - 'blocks.read_only_allow_delete'?: boolean - 'index.blocks.read_only_allow_delete'?: boolean - 'blocks.read'?: boolean - 'index.blocks.read'?: boolean - 'blocks.write'?: boolean | string - 'index.blocks.write'?: boolean | string - 'blocks.metadata'?: boolean - 'index.blocks.metadata'?: boolean max_refresh_listeners?: integer - 'index.max_refresh_listeners'?: integer - 'analyze.max_token_count'?: integer - 'index.analyze.max_token_count'?: integer - 'highlight.max_analyzed_offset'?: integer - 'index.highlight.max_analyzed_offset'?: integer + analyze?: IndicesSettingsAnalyze + highlight?: IndicesSettingsHighlight max_terms_count?: integer - 'index.max_terms_count'?: integer max_regex_length?: integer - 'index.max_regex_length'?: integer routing?: IndicesIndexRouting - 'index.routing'?: IndicesIndexRouting gc_deletes?: Time - 'index.gc_deletes'?: Time default_pipeline?: PipelineName - 'index.default_pipeline'?: PipelineName final_pipeline?: PipelineName - 'index.final_pipeline'?: PipelineName lifecycle?: IndicesIndexSettingsLifecycle - 'index.lifecycle'?: IndicesIndexSettingsLifecycle - 'lifecycle.name'?: string - 'index.lifecycle.name'?: string provided_name?: Name - 'index.provided_name'?: Name creation_date?: DateString - 'index.creation_date'?: DateString creation_date_string?: DateString - 'index.creation_date_string'?: DateString uuid?: Uuid - 'index.uuid'?: Uuid version?: IndicesIndexVersioning - 'index.version'?: IndicesIndexVersioning - verified_before_close?: boolean - 'index.verified_before_close'?: boolean + verified_before_close?: boolean | string format?: string | integer - 'index.format'?: string | integer max_slices_per_scroll?: integer - 'index.max_slices_per_scroll'?: integer - 'translog.durability'?: string - 'index.translog.durability'?: string - 'translog.flush_threshold_size'?: string - 'index.translog.flush_threshold_size'?: string - 'query_string.lenient'?: boolean - 'index.query_string.lenient'?: boolean + translog?: IndicesTranslog + query_string?: IndicesSettingsQueryString priority?: integer | string - 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis - 'index.analysis'?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries + shards?: integer + queries?: IndicesQueries + similarity?: IndicesSettingsSimilarity } export interface IndicesIndexSettingsAnalysis { @@ -9065,8 +9217,8 @@ export interface IndicesIndexSettingsLifecycle { } export interface IndicesIndexSettingsTimeSeries { - end_time: DateString - start_time: DateString + end_time?: DateOrEpochMillis + start_time?: DateOrEpochMillis } export interface IndicesIndexState { @@ -9082,21 +9234,109 @@ export interface IndicesIndexVersioning { created_string?: VersionString } +export interface IndicesMerge { + scheduler?: IndicesMergeScheduler +} + +export interface IndicesMergeScheduler { + max_thread_count?: integer + max_merge_count?: integer +} + export interface IndicesNumericFielddata { format: IndicesNumericFielddataFormat } export type IndicesNumericFielddataFormat = 'array' | 'disabled' +export interface IndicesQueries { + cache?: IndicesCacheQueries +} + +export interface IndicesRetentionLease { + period: Time +} + +export interface IndicesSearchIdle { + after?: Time +} + export type IndicesSegmentSortMissing = '_last' | '_first' -export type IndicesSegmentSortMode = 'min' | 'max' +export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' + +export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' + +export interface IndicesSettingsAnalyze { + max_token_count?: integer +} + +export interface IndicesSettingsHighlight { + max_analyzed_offset?: integer +} + +export interface IndicesSettingsQueryString { + lenient: boolean +} + +export interface IndicesSettingsSearch { + idle: IndicesSearchIdle +} + +export interface IndicesSettingsSimilarity { + bm25?: IndicesSettingsSimilarityBm25 + dfi?: IndicesSettingsSimilarityDfi + dfr?: IndicesSettingsSimilarityDfr + ib?: IndicesSettingsSimilarityIb + lmd?: IndicesSettingsSimilarityLmd + lmj?: IndicesSettingsSimilarityLmj + scripted_tfidf?: IndicesSettingsSimilarityScriptedTfidf +} + +export interface IndicesSettingsSimilarityBm25 { + b: integer + discount_overlaps: boolean + k1: double + type: 'BM25' +} + +export interface IndicesSettingsSimilarityDfi { + independence_measure: DFIIndependenceMeasure + type: 'DFI' +} + +export interface IndicesSettingsSimilarityDfr { + after_effect: DFRAfterEffect + basic_model: DFRBasicModel + normalization: Normalization + type: 'DFR' +} + +export interface IndicesSettingsSimilarityIb { + distribution: IBDistribution + lambda: IBLambda + normalization: Normalization + type: 'IB' +} + +export interface IndicesSettingsSimilarityLmd { + mu: integer + type: 'LMDirichlet' +} + +export interface IndicesSettingsSimilarityLmj { + lambda: double + type: 'LMJelinekMercer' +} -export type IndicesSegmentSortOrder = 'asc' | 'desc' +export interface IndicesSettingsSimilarityScriptedTfidf { + script: Script + type: 'scripted' +} export interface IndicesSoftDeletes { enabled: boolean - 'retention_lease.period'?: Time + retention_lease?: IndicesRetentionLease } export interface IndicesStringFielddata { @@ -9114,6 +9354,16 @@ export interface IndicesTemplateMapping { version?: VersionNumber } +export interface IndicesTranslog { + durability?: string + flush_threshold_size?: string + retention?: IndicesTranslogRetention +} + +export interface IndicesTranslogRetention { + size: ByteSize +} + export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' export interface IndicesAddBlockIndicesBlockStatus { @@ -9281,7 +9531,7 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { data_stream: Name store_size?: ByteSize store_size_bytes: integer - maximum_timestamp: integer + maximum_timestamp: long } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -9329,7 +9579,9 @@ export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBas } export interface IndicesDeleteIndexTemplateRequest extends RequestBase { - name: Name + name: Names + master_timeout?: Time + timeout?: Time } export interface IndicesDeleteIndexTemplateResponse extends AcknowledgedResponseBase { @@ -9397,21 +9649,77 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean -export interface IndicesFlushRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - force?: boolean - ignore_unavailable?: boolean - wait_if_ongoing?: boolean +export interface IndicesFieldUsageStatsFieldSummary { + any: uint + stored_fields: uint + doc_values: uint + points: uint + norms: uint + term_vectors: uint + knn_vectors: uint + inverted_index: IndicesFieldUsageStatsInvertedIndex } -export interface IndicesFlushResponse extends ShardsOperationResponseBase { +export interface IndicesFieldUsageStatsFieldsUsageBodyKeys { + _shards: ShardStatistics } +export type IndicesFieldUsageStatsFieldsUsageBody = IndicesFieldUsageStatsFieldsUsageBodyKeys +& { [property: string]: IndicesFieldUsageStatsUsageStatsIndex | ShardStatistics } -export interface IndicesForcemergeRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean +export interface IndicesFieldUsageStatsInvertedIndex { + terms: uint + postings: uint + proximity: uint + positions: uint + term_frequencies: uint + offsets: uint + payloads: uint +} + +export interface IndicesFieldUsageStatsRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + fields?: Fields + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards +} + +export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody + +export interface IndicesFieldUsageStatsShardsStats { + all_fields: IndicesFieldUsageStatsFieldSummary + fields: Record +} + +export interface IndicesFieldUsageStatsUsageStatsIndex { + shards: IndicesFieldUsageStatsUsageStatsShards[] +} + +export interface IndicesFieldUsageStatsUsageStatsShards { + routing: IndicesStatsShardRouting + stats: IndicesFieldUsageStatsShardsStats + tracking_id: string + tracking_started_at_millis: EpochMillis +} + +export interface IndicesFlushRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + force?: boolean + ignore_unavailable?: boolean + wait_if_ongoing?: boolean +} + +export interface IndicesFlushResponse extends ShardsOperationResponseBase { +} + +export interface IndicesForcemergeRequest extends RequestBase { + index?: Indices + allow_no_indices?: boolean expand_wildcards?: ExpandWildcards flush?: boolean ignore_unavailable?: boolean @@ -9630,7 +9938,7 @@ export interface IndicesPutMappingRequest extends RequestBase { dynamic_date_formats?: string[] dynamic_templates?: Record | Record[] _field_names?: MappingFieldNamesField - _meta?: Record + _meta?: Metadata numeric_detection?: boolean properties?: Record _routing?: MappingRoutingField @@ -10021,6 +10329,8 @@ export interface IndicesSplitResponse extends AcknowledgedResponseBase { index: IndexName } +export type IndicesStatsIndexMetadataState = 'open' | 'close' + export interface IndicesStatsIndexStats { completion?: CompletionStats docs?: DocStats @@ -10048,6 +10358,8 @@ export interface IndicesStatsIndicesStats { shards?: Record total?: IndicesStatsIndexStats uuid?: Uuid + health?: HealthStatus + status?: IndicesStatsIndexMetadataState } export interface IndicesStatsRequest extends RequestBase { @@ -10193,6 +10505,7 @@ export interface IndicesUpdateAliasesAddAction { is_write_index?: boolean routing?: Routing search_routing?: Routing + must_exist?: boolean } export interface IndicesUpdateAliasesRemoveAction { @@ -10206,6 +10519,7 @@ export interface IndicesUpdateAliasesRemoveAction { export interface IndicesUpdateAliasesRemoveIndexAction { index?: IndexName indices?: Indices + must_exist?: boolean } export interface IndicesUpdateAliasesRequest extends RequestBase { @@ -10816,6 +11130,41 @@ export interface MigrationDeprecationsResponse { ml_settings: MigrationDeprecationsDeprecation[] } +export interface MigrationGetFeatureUpgradeStatusMigrationFeature { + feature_name: string + minimum_index_version: VersionString + migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus + indices: MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo[] +} + +export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { + index: IndexName + version: VersionString + failure_cause?: ErrorCause +} + +export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR' + +export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { +} + +export interface MigrationGetFeatureUpgradeStatusResponse { + features: MigrationGetFeatureUpgradeStatusMigrationFeature[] + migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus +} + +export interface MigrationPostFeatureUpgradeMigrationFeature { + feature_name: string +} + +export interface MigrationPostFeatureUpgradeRequest extends RequestBase { +} + +export interface MigrationPostFeatureUpgradeResponse { + accepted: boolean + features: MigrationPostFeatureUpgradeMigrationFeature[] +} + export interface MlAnalysisConfig { bucket_span: TimeSpan categorization_analyzer?: MlCategorizationAnalyzer @@ -10835,7 +11184,7 @@ export interface MlAnalysisConfigRead { categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] - detectors: MlDetector[] + detectors: MlDetectorRead[] influencers: Field[] model_prune_window?: Time latency?: Time @@ -11039,6 +11388,7 @@ export interface MlDatafeedConfig { export interface MlDatafeedRunningState { real_time_configured: boolean real_time_running: boolean + search_interval?: MlRunningStateSearchInterval } export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' @@ -11308,7 +11658,9 @@ export interface MlDelayedDataCheckConfig { enabled: boolean } -export type MlDeploymentState = 'started' | 'starting' | 'fully_allocated' +export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' + +export type MlDeploymentState = 'started' | 'starting' | 'stopping' export interface MlDetectionRule { actions?: MlRuleAction[] @@ -11329,6 +11681,19 @@ export interface MlDetector { use_null?: boolean } +export interface MlDetectorRead { + by_field_name?: Field + custom_rules?: MlDetectionRule[] + detector_description?: string + detector_index?: integer + exclude_frequent?: MlExcludeFrequent + field_name?: Field + function: string + over_field_name?: Field + partition_field_name?: Field + use_null?: boolean +} + export interface MlDiscoveryNode { attributes: Record ephemeral_id: Id @@ -11535,7 +11900,7 @@ export interface MlModelSnapshot { retain: boolean snapshot_doc_count: long snapshot_id: Id - timestamp: integer + timestamp: long } export interface MlOutlierDetectionParameters { @@ -11583,6 +11948,11 @@ export interface MlRuleCondition { value: double } +export interface MlRunningStateSearchInterval { + end_ms: long + start_ms: long +} + export interface MlTimingStats { elapsed_time: integer iteration_time?: integer @@ -11612,7 +11982,7 @@ export interface MlTotalFeatureImportanceStatistics { } export interface MlTrainedModelAllocation { - allocation_state: MlDeploymentState + allocation_state: MlDeploymentAllocationState routing_table: Record start_time: DateString task_parameters: MlTrainedModelAllocationTaskParameters @@ -11659,6 +12029,43 @@ export interface MlTrainedModelConfigMetadata { total_feature_importance?: MlTotalFeatureImportance[] } +export interface MlTrainedModelDeploymentAllocationStatus { + allocation_count: integer + state: MlDeploymentAllocationState + target_allocation_count: integer +} + +export interface MlTrainedModelDeploymentNodesStats { + average_inference_time_ms: double + error_count: integer + inference_count: integer + inference_threads: integer + last_access: long + model_threads: integer + node: MlDiscoveryNode + number_of_pending_requests: integer + rejection_execution_count: integer + routing_state: MlTrainedModelAllocationRoutingTable + start_time: long + timeout_count: integer +} + +export interface MlTrainedModelDeploymentStats { + allocation_status: MlTrainedModelDeploymentAllocationStatus + error_count: integer + inference_count: integer + inference_threads: integer + model_id: Id + model_threads: integer + nodes: MlTrainedModelDeploymentNodesStats + queue_capacity: integer + rejected_execution_count: integer + reason: string + start_time: long + state: MlDeploymentState + timeout_count: integer +} + export interface MlTrainedModelEntities { class_name: string class_probability: double @@ -11668,10 +12075,10 @@ export interface MlTrainedModelEntities { } export interface MlTrainedModelInferenceStats { - failure_count: long - inference_count: long - cache_miss_count: long - missing_all_fields_count: long + cache_miss_count: integer + failure_count: integer + inference_count: integer + missing_all_fields_count: integer timestamp: Time } @@ -11683,11 +12090,18 @@ export interface MlTrainedModelLocationIndex { name: IndexName } +export interface MlTrainedModelSizeStats { + model_size_bytes: ByteSize + required_native_memory_bytes: integer +} + export interface MlTrainedModelStats { - model_id: Id - pipeline_count: integer + deployment_stats?: MlTrainedModelDeploymentStats inference_stats?: MlTrainedModelInferenceStats ingest?: Record + model_id: Id + model_size_stats: MlTrainedModelSizeStats + pipeline_count: integer } export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' @@ -12277,7 +12691,7 @@ export interface MlPostDataRequest extends RequestBase { export interface MlPostDataResponse { bucket_count: long - earliest_record_timestamp: integer + earliest_record_timestamp: long empty_bucket_count: long input_bytes: long input_field_count: long @@ -12285,7 +12699,7 @@ export interface MlPostDataResponse { invalid_date_count: long job_id: Id last_data_time: integer - latest_record_timestamp: integer + latest_record_timestamp: long missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -12639,7 +13053,7 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_threads?: integer queue_capacity?: integer timeout?: Time - wait_for?: MlDeploymentState + wait_for?: MlDeploymentAllocationState } export interface MlStartTrainedModelDeploymentResponse { @@ -12838,12 +13252,12 @@ export interface MlValidateDetectorRequest extends RequestBase { export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { } -export interface MonitoringBulkRequest extends RequestBase { +export interface MonitoringBulkRequest extends RequestBase { type?: string system_id: string system_api_version: string interval: TimeSpan - operations?: (BulkOperationContainer | TSource)[] + operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } export interface MonitoringBulkResponse { @@ -13121,6 +13535,13 @@ export interface NodesNodeBufferPool { used_in_bytes?: long } +export interface NodesNodeReloadError { + name: Name + reload_exception?: ErrorCause +} + +export type NodesNodeReloadResult = NodesStats | NodesNodeReloadError + export interface NodesNodesResponseBase { _nodes?: NodeStatistics } @@ -13179,6 +13600,38 @@ export interface NodesRecording { cumulative_execution_time_millis?: long } +export interface NodesRepositoryLocation { + base_path: string + container?: string + bucket?: string +} + +export interface NodesRepositoryMeteringInformation { + repository_name: Name + repository_type: string + repository_location: NodesRepositoryLocation + repository_ephemeral_id: Id + repository_started_at: EpochMillis + repository_stopped_at?: EpochMillis + archived: boolean + cluster_version?: VersionNumber + request_counts: NodesRequestCounts +} + +export interface NodesRequestCounts { + GetBlobProperties?: long + GetBlob?: long + ListBlobs?: long + PutBlob?: long + PutBlock?: long + PutBlockList?: long + GetObject?: long + ListObjects?: long + InsertObject?: long + PutObject?: long + PutMultipartObject?: long +} + export interface NodesScriptCache { cache_evictions?: long compilation_limit_triggered?: long @@ -13259,6 +13712,27 @@ export interface NodesTransportHistogram { ge_millis?: long } +export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { + node_id: NodeIds + max_archive_version: long +} + +export interface NodesClearRepositoriesMeteringArchiveResponse extends NodesNodesResponseBase { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { + node_id: NodeIds +} + +export interface NodesGetRepositoriesMeteringInfoResponse extends NodesNodesResponseBase { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + export interface NodesHotThreadsHotThread { hosts: Host[] node_id: Id @@ -13634,13 +14108,6 @@ export interface NodesInfoResponse extends NodesNodesResponseBase { nodes: Record } -export interface NodesReloadSecureSettingsNodeReloadError { - name: Name - reload_exception?: ErrorCause -} - -export type NodesReloadSecureSettingsNodeReloadResult = NodesStats | NodesReloadSecureSettingsNodeReloadError - export interface NodesReloadSecureSettingsRequest extends RequestBase { node_id?: NodeIds timeout?: Time @@ -13649,7 +14116,7 @@ export interface NodesReloadSecureSettingsRequest extends RequestBase { export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { cluster_name: Name - nodes: Record + nodes: Record } export interface NodesStatsRequest extends RequestBase { @@ -13728,19 +14195,7 @@ export interface RollupDeleteJobRequest extends RequestBase { } export interface RollupDeleteJobResponse extends AcknowledgedResponseBase { - task_failures?: RollupDeleteJobTaskFailure[] -} - -export interface RollupDeleteJobTaskFailure { - task_id: TaskId - node_id: Id - status: string - reason: RollupDeleteJobTaskFailureReason -} - -export interface RollupDeleteJobTaskFailureReason { - type: string - reason: string + task_failures?: TaskFailure[] } export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' @@ -13814,7 +14269,7 @@ export interface RollupGetRollupIndexCapsIndexCapabilities { } export interface RollupGetRollupIndexCapsRequest extends RequestBase { - index: Id + index: Ids } export interface RollupGetRollupIndexCapsResponse extends DictionaryResponseBase { @@ -13895,6 +14350,30 @@ export interface RollupStopJobResponse { export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' +export interface SearchableSnapshotsCacheStatsNode { + shared_cache: SearchableSnapshotsCacheStatsShared +} + +export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { + node_id?: NodeIds + master_timeout?: Time +} + +export interface SearchableSnapshotsCacheStatsResponse { + nodes: Record +} + +export interface SearchableSnapshotsCacheStatsShared { + reads: long + bytes_read_in_bytes: ByteSize + writes: long + bytes_written_in_bytes: ByteSize + evictions: long + num_regions: integer + size_in_bytes: ByteSize + region_size_in_bytes: ByteSize +} + export interface SearchableSnapshotsClearCacheRequest extends RequestBase { index?: Indices expand_wildcards?: ExpandWildcards @@ -13992,7 +14471,7 @@ export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: QueryDslQueryContainer + query?: string[] | QueryDslQueryContainer | SecurityRoleTemplateQueryContainer allow_restricted_indices?: boolean } @@ -14024,6 +14503,22 @@ export interface SecurityRoleMappingRule { except?: SecurityRoleMappingRule } +export interface SecurityRoleTemplateInlineScript extends ScriptBase { + lang?: ScriptLanguage + options?: Record + source: string | QueryDslQueryContainer +} + +export interface SecurityRoleTemplateQueryContainer { + template?: SecurityRoleTemplateScript +} + +export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | string | QueryDslQueryContainer | StoredScriptId + +export interface SecurityTransientMetadataConfig { + enabled: boolean +} + export interface SecurityUser { email?: string | null full_name?: Name | null @@ -14167,7 +14662,7 @@ export interface SecurityDeletePrivilegesFoundStatus { export interface SecurityDeletePrivilegesRequest extends RequestBase { application: Name - name: Name + name: Names refresh?: Refresh } @@ -14228,6 +14723,31 @@ export interface SecurityEnableUserRequest extends RequestBase { export interface SecurityEnableUserResponse { } +export interface SecurityEnrollKibanaRequest extends RequestBase { +} + +export interface SecurityEnrollKibanaResponse { + token: SecurityEnrollKibanaToken + http_ca: string +} + +export interface SecurityEnrollKibanaToken { + name: string + value: string +} + +export interface SecurityEnrollNodeRequest extends RequestBase { +} + +export interface SecurityEnrollNodeResponse { + http_ca_key: string + http_ca_cert: string + transport_ca_cert: string + transport_key: string + transport_cert: string + nodes_addresses: string[] +} + export interface SecurityGetApiKeyRequest extends RequestBase { id?: Id name?: Name @@ -14250,7 +14770,7 @@ export interface SecurityGetBuiltinPrivilegesResponse { export interface SecurityGetPrivilegesRequest extends RequestBase { application?: Name - name?: Name + name?: Names } export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { @@ -14268,7 +14788,7 @@ export interface SecurityGetRoleRole { indices: SecurityIndicesPrivileges[] metadata: Metadata run_as: string[] - transient_metadata: SecurityGetRoleTransientMetadata + transient_metadata: SecurityTransientMetadataConfig applications: SecurityApplicationPrivileges[] role_templates?: SecurityGetRoleRoleTemplate[] global?: Record>> @@ -14281,10 +14801,6 @@ export interface SecurityGetRoleRoleTemplate { export type SecurityGetRoleTemplateFormat = 'string' | 'json' -export interface SecurityGetRoleTransientMetadata { - enabled: boolean -} - export interface SecurityGetRoleMappingRequest extends RequestBase { name?: Names } @@ -14307,7 +14823,7 @@ export interface SecurityGetServiceAccountsRoleDescriptor { applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: Record + transient_metadata?: SecurityTransientMetadataConfig } export interface SecurityGetServiceAccountsRoleDescriptorWrapper { @@ -14504,7 +15020,7 @@ export interface SecurityPutRoleRequest extends RequestBase { indices?: SecurityIndicesPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityGetRoleTransientMetadata + transient_metadata?: SecurityTransientMetadataConfig } export interface SecurityPutRoleResponse { @@ -14556,6 +15072,70 @@ export interface SecurityQueryApiKeysResponse { api_keys: SecurityApiKey[] } +export interface SecuritySamlAuthenticateRequest extends RequestBase { + content: string + ids: Ids + realm?: string +} + +export interface SecuritySamlAuthenticateResponse { + access_token: string + username: string + expires_in: integer + refresh_token: string + realm: string +} + +export interface SecuritySamlCompleteLogoutRequest extends RequestBase { + realm: string + ids: Ids + query_string?: string + content?: string +} + +export type SecuritySamlCompleteLogoutResponse = boolean + +export interface SecuritySamlInvalidateRequest extends RequestBase { + acs?: string + query_string: string + realm?: string +} + +export interface SecuritySamlInvalidateResponse { + invalidated: integer + realm: string + redirect: string +} + +export interface SecuritySamlLogoutRequest extends RequestBase { + token: string + refresh_token?: string +} + +export interface SecuritySamlLogoutResponse { + redirect: string +} + +export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { + acs?: string + realm?: string + relay_state?: string +} + +export interface SecuritySamlPrepareAuthenticationResponse { + id: Id + realm: string + redirect: string +} + +export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { + realm_name: Name +} + +export interface SecuritySamlServiceProviderMetadataResponse { + metadata: string +} + export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId } @@ -14845,6 +15425,8 @@ export interface SnapshotSnapshotShardsStatus { stats: SnapshotShardsStatsSummary } +export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' + export interface SnapshotSnapshotStats { incremental: SnapshotFileCountSnapshotStats start_time_in_millis: long @@ -14905,7 +15487,7 @@ export interface SnapshotCreateRequest extends RequestBase { export interface SnapshotCreateResponse { accepted?: boolean - snapshot: SnapshotSnapshotInfo + snapshot?: SnapshotSnapshotInfo } export interface SnapshotCreateRepositoryRequest extends RequestBase { @@ -14948,6 +15530,13 @@ export interface SnapshotGetRequest extends RequestBase { index_details?: boolean human?: boolean include_repository?: boolean + sort?: SnapshotSnapshotSort + size?: integer + order?: SortOrder + after?: string + offset?: integer + from_sort_value?: string + slm_policy_filter?: Name } export interface SnapshotGetResponse { @@ -15023,6 +15612,13 @@ export interface SnapshotVerifyRepositoryResponse { nodes: Record } +export interface SqlColumn { + name: Name + type: string +} + +export type SqlRow = any[] + export interface SqlClearCursorRequest extends RequestBase { cursor: string } @@ -15031,13 +15627,46 @@ export interface SqlClearCursorResponse { succeeded: boolean } -export interface SqlQueryColumn { - name: Name - type: string +export interface SqlDeleteAsyncRequest extends RequestBase { + id: Id +} + +export interface SqlDeleteAsyncResponse extends AcknowledgedResponseBase { +} + +export interface SqlGetAsyncRequest extends RequestBase { + id: Id + delimiter?: string + format?: string + keep_alive?: Time + wait_for_completion_timeout?: Time +} + +export interface SqlGetAsyncResponse { + id: Id + is_running: boolean + is_partial: boolean + columns?: SqlColumn[] + cursor?: string + rows: SqlRow[] +} + +export interface SqlGetAsyncStatusRequest extends RequestBase { + id: Id +} + +export interface SqlGetAsyncStatusResponse { + id: string + is_running: boolean + is_partial: boolean + start_time_in_millis: ulong + expiration_time_in_millis: ulong + completion_status?: uint } export interface SqlQueryRequest extends RequestBase { format?: string + catalog?: string columnar?: boolean cursor?: string fetch_size?: integer @@ -15047,16 +15676,23 @@ export interface SqlQueryRequest extends RequestBase { page_timeout?: Time time_zone?: string field_multi_value_leniency?: boolean + runtime_mappings?: MappingRuntimeFields + wait_for_completion_timeout?: Time + params?: Record + keep_alive?: Time + keep_on_completion?: boolean + index_using_frozen?: boolean } export interface SqlQueryResponse { - columns?: SqlQueryColumn[] + id?: Id + is_running?: boolean + is_partial?: boolean + columns?: SqlColumn[] cursor?: string - rows: SqlQueryRow[] + rows: SqlRow[] } -export type SqlQueryRow = any[] - export interface SqlTranslateRequest extends RequestBase { fetch_size?: integer filter?: QueryDslQueryContainer @@ -15088,36 +15724,45 @@ export type SslCertificatesResponse = SslCertificatesCertificateInformation[] export type TasksGroupBy = 'nodes' | 'parents' | 'none' -export interface TasksInfo { - action: string - cancellable: boolean - children?: TasksInfo[] - description?: string - headers: HttpHeaders - id: long - node: string - running_time_in_nanos: long - start_time_in_millis: long - status?: TasksStatus - type: string - parent_task_id?: Id +export interface TasksNodeTasks { + name?: NodeId + transport_address?: TransportAddress + host?: Host + ip?: Ip + roles?: string[] + attributes?: Record + tasks: Record +} + +export interface TasksParentTaskInfo extends TasksTaskInfo { + children?: TasksTaskInfo[] } -export interface TasksState { +export interface TasksTaskInfo { action: string cancellable: boolean description?: string - headers: HttpHeaders + headers: Record id: long - node: string - parent_task_id?: TaskId + node: NodeId + running_time?: string running_time_in_nanos: long start_time_in_millis: long - status?: TasksStatus + status?: TasksTaskStatus type: string + parent_task_id?: TaskId +} + +export type TasksTaskInfos = TasksTaskInfo[] | Record + +export interface TasksTaskListResponseBase { + node_failures?: ErrorCause[] + task_failures?: TaskFailure[] + nodes?: Record + tasks?: TasksTaskInfos } -export interface TasksStatus { +export interface TasksTaskStatus { batches: long canceled?: string created: long @@ -15137,10 +15782,6 @@ export interface TasksStatus { version_conflicts: long } -export interface TasksTaskExecutingNode extends SpecUtilsBaseNode { - tasks: Record -} - export interface TasksCancelRequest extends RequestBase { task_id?: TaskId actions?: string | string[] @@ -15149,9 +15790,7 @@ export interface TasksCancelRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksCancelResponse { - node_failures?: ErrorCause[] - nodes: Record +export interface TasksCancelResponse extends TasksTaskListResponseBase { } export interface TasksGetRequest extends RequestBase { @@ -15162,8 +15801,8 @@ export interface TasksGetRequest extends RequestBase { export interface TasksGetResponse { completed: boolean - task: TasksInfo - response?: TasksStatus + task: TasksTaskInfo + response?: TasksTaskStatus error?: ErrorCause } @@ -15178,10 +15817,7 @@ export interface TasksListRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksListResponse { - node_failures?: ErrorCause[] - nodes?: Record - tasks?: TasksInfo[] | Record +export interface TasksListResponse extends TasksTaskListResponseBase { } export interface TextStructureFindStructureFieldStat { @@ -15279,6 +15915,7 @@ export interface TransformRetentionPolicyContainer { export interface TransformSettings { align_checkpoints?: boolean dates_as_epoch_millis?: boolean + deduce_mappings?: boolean docs_per_second?: float max_page_search_size?: integer } @@ -15354,7 +15991,7 @@ export interface TransformGetTransformStatsCheckpointing { } export interface TransformGetTransformStatsRequest extends RequestBase { - transform_id: Name + transform_id: Names allow_no_match?: boolean from?: long size?: long @@ -15525,9 +16162,12 @@ export interface WatcherAction { throttle_period?: Time throttle_period_in_millis?: EpochMillis transform?: TransformContainer - index?: WatcherIndex - logging?: WatcherLogging - webhook?: WatcherActionWebhook + index?: WatcherIndexAction + logging?: WatcherLoggingAction + email?: WatcherEmailAction + pagerduty?: WatcherPagerDutyAction + slack?: WatcherSlackAction + webhook?: WatcherWebhookAction } export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' @@ -15543,11 +16183,6 @@ export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | ' export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' -export interface WatcherActionWebhook { - host: Host - port: integer -} - export type WatcherActions = Record export interface WatcherActivationState { @@ -15610,6 +16245,12 @@ export interface WatcherDailySchedule { at: WatcherTimeOfDay[] } +export type WatcherDataAttachmentFormat = 'json' | 'yaml' + +export interface WatcherDataEmailAttachment { + format?: WatcherDataAttachmentFormat +} + export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' export interface WatcherEmail { @@ -15617,17 +16258,26 @@ export interface WatcherEmail { body?: WatcherEmailBody cc?: string[] from?: string - id: Id priority?: WatcherEmailPriority reply_to?: string[] - sent_date: DateString + sent_date?: DateString subject: string to: string[] + attachments?: Record +} + +export interface WatcherEmailAction extends WatcherEmail { +} + +export interface WatcherEmailAttachmentContainer { + http?: WatcherHttpEmailAttachment + reporting?: WatcherReportingEmailAttachment + data?: WatcherDataEmailAttachment } export interface WatcherEmailBody { - html: string - text: string + html?: string + text?: string } export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' @@ -15696,6 +16346,12 @@ export interface WatcherHourlySchedule { minute: integer[] } +export interface WatcherHttpEmailAttachment { + content_type?: string + inline?: boolean + request?: WatcherHttpInputRequestDefinition +} + export interface WatcherHttpInput { http?: WatcherHttpInput extract?: string[] @@ -15744,10 +16400,13 @@ export interface WatcherHttpInputResponseResult { status: integer } -export interface WatcherIndex { +export interface WatcherIndexAction { index: IndexName doc_id?: Id refresh?: Refresh + op_type?: OpType + timeout?: Time + execution_time_field?: Field } export interface WatcherIndexResult { @@ -15771,7 +16430,7 @@ export interface WatcherInputContainer { export type WatcherInputType = 'http' | 'search' | 'simple' -export interface WatcherLogging { +export interface WatcherLoggingAction { level?: string text: string category?: string @@ -15786,6 +16445,9 @@ export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | export interface WatcherNeverCondition { } +export interface WatcherPagerDutyAction extends WatcherPagerDutyEvent { +} + export interface WatcherPagerDutyContext { href?: string src?: string @@ -15795,14 +16457,21 @@ export interface WatcherPagerDutyContext { export type WatcherPagerDutyContextType = 'link' | 'image' export interface WatcherPagerDutyEvent { - account: string + account?: string attach_payload: boolean client?: string client_url?: string - contexts: WatcherPagerDutyContext[] - description?: string + contexts?: WatcherPagerDutyContext[] + context?: WatcherPagerDutyContext[] + description: string event_type?: WatcherPagerDutyEventType incident_key: string + proxy?: WatcherPagerDutyEventProxy +} + +export interface WatcherPagerDutyEventProxy { + host?: Host + port?: integer } export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' @@ -15824,6 +16493,14 @@ export interface WatcherQueryWatch { _seq_no?: SequenceNumber } +export interface WatcherReportingEmailAttachment { + url: string + inline?: boolean + retries?: integer + interval?: Time + request?: WatcherHttpInputRequestDefinition +} + export type WatcherResponseContentType = 'json' | 'yaml' | 'text' export interface WatcherScheduleContainer { @@ -15831,9 +16508,9 @@ export interface WatcherScheduleContainer { daily?: WatcherDailySchedule hourly?: WatcherHourlySchedule interval?: Time - monthly?: WatcherTimeOfMonth[] - weekly?: WatcherTimeOfWeek[] - yearly?: WatcherTimeOfYear[] + monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[] + weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[] + yearly?: WatcherTimeOfYear | WatcherTimeOfYear[] } export interface WatcherScheduleTriggerEvent { @@ -15842,9 +16519,10 @@ export interface WatcherScheduleTriggerEvent { } export interface WatcherScriptCondition { - lang: string + lang?: string params?: Record - source: string + source?: string + id?: string } export interface WatcherSearchInput { @@ -15872,6 +16550,11 @@ export interface WatcherSimulatedActions { use_all: boolean } +export interface WatcherSlackAction { + account?: string + message: WatcherSlackMessage +} + export interface WatcherSlackAttachment { author_icon?: string author_link?: string @@ -15973,6 +16656,9 @@ export interface WatcherWatchStatus { execution_state?: string } +export interface WatcherWebhookAction extends WatcherHttpInputRequestDefinition { +} + export interface WatcherWebhookResult { request: WatcherHttpInputRequestResult response?: WatcherHttpInputResponseResult @@ -16213,7 +16899,7 @@ export interface XpackInfoResponse { export interface XpackUsageAllJobs { count: integer detectors: Record - created_by: Record + created_by: Record model_size: Record forecasts: Record } @@ -16376,7 +17062,14 @@ export interface XpackUsageMlCounter { export interface XpackUsageMlDataFrameAnalyticsJobs { memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory _all: XpackUsageMlDataFrameAnalyticsJobsCount - analysis_counts?: EmptyObject + analysis_counts?: XpackUsageMlDataFrameAnalyticsJobsAnalysis + stopped?: XpackUsageMlDataFrameAnalyticsJobsCount +} + +export interface XpackUsageMlDataFrameAnalyticsJobsAnalysis { + classification?: integer + outlier_detection?: integer + regression?: integer } export interface XpackUsageMlDataFrameAnalyticsJobsCount { @@ -16431,6 +17124,7 @@ export interface XpackUsageMlInferenceTrainedModelsCount { other: long regression?: long classification?: long + ner?: long } export interface XpackUsageMonitoring extends XpackUsageBase { @@ -16641,7 +17335,7 @@ export interface SpecUtilsCommonCatQueryParameters { help?: boolean local?: boolean master_timeout?: Time - s?: string[] + s?: Names v?: boolean } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index a16e076bc..1406e71b1 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -56,7 +56,7 @@ export interface BulkOperationContainer { export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' -export interface BulkRequest extends RequestBase { +export interface BulkRequest extends RequestBase { index?: IndexName pipeline?: string refresh?: Refresh @@ -68,7 +68,7 @@ export interface BulkRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards require_alias?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ - body?: (BulkOperationContainer | TSource)[] + body?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } export interface BulkResponse { @@ -92,6 +92,16 @@ export interface BulkResponseItem { get?: InlineGet> } +export interface BulkUpdateAction { + detect_noop?: boolean + doc?: TPartialDocument + doc_as_upsert?: boolean + script?: Script + scripted_upsert?: boolean + _source?: SearchSourceConfig + upsert?: TDocument +} + export interface BulkUpdateOperation extends BulkOperationBase { require_alias?: boolean retry_on_conflict?: integer @@ -565,7 +575,7 @@ export interface MgetResponse { export type MgetResponseItem = GetGetResult | MgetMultiGetError export interface MsearchMultiSearchItem extends SearchResponse { - status: integer + status?: integer } export interface MsearchMultiSearchResult { @@ -1086,7 +1096,7 @@ export interface SearchAggregationProfile { description: string time_in_nanos: long type: string - debug?: SearchAggregationProfileDebug | SearchAggregationProfileDelegateDebug + debug?: SearchAggregationProfileDebug children?: SearchAggregationProfile[] } @@ -1099,7 +1109,7 @@ export interface SearchAggregationProfileDebug { result_strategy?: string has_filter?: boolean delegate?: string - delegate_debug?: SearchAggregationProfileDelegateDebug + delegate_debug?: SearchAggregationProfileDebug chars_fetched?: integer extract_count?: integer extract_ns?: integer @@ -1113,9 +1123,6 @@ export interface SearchAggregationProfileDebug { numeric_collectors_used?: integer empty_collectors_used?: integer deferred_aggregators?: string[] -} - -export interface SearchAggregationProfileDelegateDebug { segments_with_doc_count_field?: integer segments_with_deleted_docs?: integer filters?: SearchAggregationProfileDelegateDebugFilter[] @@ -1146,6 +1153,10 @@ export interface SearchCompletionContext { prefix?: boolean } +export interface SearchCompletionSuggest extends SearchSuggestBase { + options: SearchCompletionSuggestOption[] +} + export interface SearchCompletionSuggestOption { collate_match?: boolean contexts?: Record @@ -1153,8 +1164,8 @@ export interface SearchCompletionSuggestOption { _id: string _index: IndexName _routing?: Routing - _score: double - _source: TDocument + _score?: double + _source?: TDocument text: string } @@ -1345,6 +1356,10 @@ export interface SearchNestedIdentity { _nested?: SearchNestedIdentity } +export interface SearchPhraseSuggest extends SearchSuggestBase { + options: SearchPhraseSuggestOption +} + export interface SearchPhraseSuggestCollate { params?: Record prune?: boolean @@ -1471,10 +1486,11 @@ export interface SearchStupidBackoffSmoothingModel { discount: double } -export interface SearchSuggest { +export type SearchSuggest = SearchCompletionSuggest | SearchPhraseSuggest | SearchTermSuggest + +export interface SearchSuggestBase { length: integer offset: integer - options: SearchSuggestOption[] text: string } @@ -1486,8 +1502,6 @@ export interface SearchSuggestFuzziness { unicode_aware: boolean } -export type SearchSuggestOption = SearchCompletionSuggestOption | SearchPhraseSuggestOption | SearchTermSuggestOption - export type SearchSuggestSort = 'score' | 'frequency' export interface SearchSuggesterKeys { @@ -1502,9 +1516,13 @@ export interface SearchSuggesterBase { size?: integer } +export interface SearchTermSuggest extends SearchSuggestBase { + options: SearchTermSuggestOption +} + export interface SearchTermSuggestOption { text: string - freq?: long + freq: long score: double } @@ -1614,10 +1632,20 @@ export interface SearchTemplateRequest extends RequestBase { } export interface SearchTemplateResponse { - _shards: ShardStatistics + took: long timed_out: boolean - took: integer + _shards: ShardStatistics hits: SearchHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean } export interface TermsEnumRequest extends RequestBase { @@ -1807,7 +1835,7 @@ export interface UpdateByQueryRethrottleResponse { } export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { - tasks: Record + tasks: Record } export interface SpecUtilsBaseNode { @@ -1851,10 +1879,6 @@ export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string -export interface ChainTransform { - transforms: TransformContainer[] -} - export interface ClusterStatistics { skipped: integer successful: integer @@ -1876,6 +1900,12 @@ export interface CoordsGeoBounds { right: double } +export type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared' + +export type DFRAfterEffect = 'no' | 'b' | 'l' + +export type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p' + export type DataStreamName = string export type DataStreamNames = DataStreamName | DataStreamName[] @@ -1886,6 +1916,8 @@ export type DateMath = string export type DateMathTime = string +export type DateOrEpochMillis = DateString | EpochMillis + export type DateString = string export interface DictionaryResponseBase { @@ -2038,6 +2070,10 @@ export type Host = string export type HttpHeaders = Record +export type IBDistribution = 'll' | 'spl' + +export type IBLambda = 'df' | 'ttf' + export type Id = string export type Ids = Id | Id[] @@ -2178,7 +2214,7 @@ export interface NodeShard { allocation_id?: Record recovery_source?: Record unassigned_info?: ClusterAllocationExplainUnassignedInformation - relocating_node?: null + relocating_node?: NodeId | null } export interface NodeStatistics { @@ -2188,6 +2224,8 @@ export interface NodeStatistics { failed: integer } +export type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z' + export type OpType = 'index' | 'create' export type Password = string @@ -2290,8 +2328,10 @@ export interface ScriptSort { export type ScriptSortType = 'string' | 'number' export interface ScriptTransform { - lang: string - params: Record + lang?: string + params?: Record + source?: string + id?: string } export type ScrollId = string @@ -2330,7 +2370,7 @@ export interface SegmentsStats { index_writer_memory?: ByteSize index_writer_max_memory_in_bytes?: integer index_writer_memory_in_bytes: integer - max_unsafe_auto_id_timestamp: integer + max_unsafe_auto_id_timestamp: long memory?: ByteSize memory_in_bytes: integer norms_memory?: ByteSize @@ -2419,6 +2459,13 @@ export type SuggestMode = 'missing' | 'popular' | 'always' export type SuggestionName = string +export interface TaskFailure { + task_id: long + node_id: NodeId + status: string + reason: ErrorCause +} + export type TaskId = string | integer export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' @@ -2444,7 +2491,7 @@ export interface TopRightBottomLeftGeoBounds { } export interface TransformContainer { - chain?: ChainTransform + chain?: TransformContainer[] script?: ScriptTransform search?: SearchTransform } @@ -2471,7 +2518,7 @@ export type VersionString = string export type VersionType = 'internal' | 'external' | 'external_gte' | 'force' -export type WaitForActiveShardOptions = 'all' +export type WaitForActiveShardOptions = 'all' | 'index-setting' export type WaitForActiveShards = integer | WaitForActiveShardOptions @@ -2527,21 +2574,21 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys & { [property: string]: AggregationsAggregate | long } -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsChildrenAggregate | AggregationsGeoLineAggregate +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { - meta?: Record + meta?: Metadata } export interface AggregationsAggregation { - meta?: Record + meta?: Metadata name?: string } export interface AggregationsAggregationContainer { aggregations?: Record aggs?: Record - meta?: Record + meta?: Metadata adjacency_matrix?: AggregationsAdjacencyMatrixAggregation auto_date_histogram?: AggregationsAutoDateHistogramAggregation avg?: AggregationsAverageAggregation @@ -2746,31 +2793,24 @@ export interface AggregationsCategorizeTextAggregation extends AggregationsAggre max_matched_tokens?: integer similarity_threshold?: integer categorization_filters?: string[] - categorization_analyzer?: string | AggregationsCategorizeTextAnalyzer + categorization_analyzer?: AggregationsCategorizeTextAnalyzer shard_size?: integer size?: integer min_doc_count?: integer shard_min_doc_count?: integer } -export interface AggregationsCategorizeTextAnalyzer { - char_filter?: string[] - tokenizer?: string - filter?: string[] -} +export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer export interface AggregationsChiSquareHeuristic { background_is_superset: boolean include_negatives: boolean } -export interface AggregationsChildrenAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsChildrenAggregateBucketKeys extends AggregationsMultiBucketBase { +export interface AggregationsChildrenAggregateKeys extends AggregationsSingleBucketAggregateBase { } -export type AggregationsChildrenAggregateBucket = AggregationsChildrenAggregateBucketKeys -& { [property: string]: AggregationsAggregate | long } +export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { type?: RelationName @@ -2818,6 +2858,12 @@ export interface AggregationsCumulativeCardinalityAggregation extends Aggregatio export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsCustomCategorizeTextAnalyzer { + char_filter?: string[] + tokenizer?: string + filter?: string[] +} + export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { } @@ -2932,8 +2978,10 @@ export interface AggregationsExtendedStatsBucketAggregation extends Aggregations export type AggregationsFieldDateMath = DateMath | double -export interface AggregationsFilterAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsFilterAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsFilterAggregate = AggregationsFilterAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { } @@ -3044,8 +3092,10 @@ export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBuck export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long } -export interface AggregationsGlobalAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { } @@ -3263,8 +3313,10 @@ export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | export type AggregationsMissing = string | integer | double | boolean -export interface AggregationsMissingAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsMissingAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { field?: Field @@ -3325,8 +3377,10 @@ export interface AggregationsMutualInformationHeuristic { include_negatives?: boolean } -export interface AggregationsNestedAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { path?: Field @@ -3338,6 +3392,11 @@ export interface AggregationsNormalizeAggregation extends AggregationsPipelineAg export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' +export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsParentAggregate = AggregationsParentAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { type?: RelationName } @@ -3394,6 +3453,7 @@ export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase to?: double from_as_string?: string to_as_string?: string + key?: string } export type AggregationsRangeBucket = AggregationsRangeBucketKeys & { [property: string]: AggregationsAggregate | double | string | long } @@ -3425,15 +3485,19 @@ export interface AggregationsRegressionInferenceOptions { num_top_feature_importance_values?: integer } -export interface AggregationsReverseNestedAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { path?: Field } -export interface AggregationsSamplerAggregate extends AggregationsSingleBucketAggregateBase { +export interface AggregationsSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { } +export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { shard_size?: integer @@ -3592,7 +3656,7 @@ export interface AggregationsStringStatsAggregate extends AggregationsAggregateB max_length: integer | null avg_length: double | null entropy: double | null - distribution?: string | null + distribution?: Record | null min_length_as_string?: string max_length_as_string?: string avg_length_as_string?: string @@ -3716,7 +3780,8 @@ export interface AggregationsTopMetrics { metrics: Record } -export interface AggregationsTopMetricsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { + top: AggregationsTopMetrics[] } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { @@ -3725,12 +3790,6 @@ export interface AggregationsTopMetricsAggregation extends AggregationsMetricAgg sort?: Sort } -export interface AggregationsTopMetricsBucketKeys extends AggregationsMultiBucketBase { - top: AggregationsTopMetrics[] -} -export type AggregationsTopMetricsBucket = AggregationsTopMetricsBucketKeys -& { [property: string]: AggregationsAggregate | AggregationsTopMetrics[] | long } - export interface AggregationsTopMetricsValue { field: Field } @@ -3738,6 +3797,11 @@ export interface AggregationsTopMetricsValue { export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { } +export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsMultiBucketAggregateBase { } @@ -4179,9 +4243,9 @@ export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBa export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { type: 'pattern_replace' - flags: string + flags?: string pattern: string - replacement: string + replacement?: string } export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { @@ -4760,7 +4824,7 @@ export interface MappingRuntimeField { export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' -export type MappingRuntimeFields = Record +export type MappingRuntimeFields = Record export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { type: 'scaled_float' @@ -5695,9 +5759,9 @@ export interface AsyncSearchStatusRequest extends RequestBase { id: Id } -export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { +export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { _shards: ShardStatistics - completion_status: integer + completion_status?: integer } export interface AsyncSearchSubmitRequest extends RequestBase { @@ -5848,9 +5912,25 @@ export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { export interface AutoscalingPutAutoscalingPolicyResponse extends AcknowledgedResponseBase { } +export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' + +export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] + +export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' + +export type CatCatDatafeedColumns = CatCatDatafeedColumn | CatCatDatafeedColumn[] + +export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v' + +export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] + export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } +export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' + +export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] + export interface CatAliasesAliasesRecord { alias?: string a?: string @@ -6376,6 +6456,9 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { id?: Id allow_no_match?: boolean bytes?: Bytes + h?: CatCatDfaColumns + s?: CatCatDfaColumns + time?: Time } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -6418,6 +6501,8 @@ export interface CatMlDatafeedsDatafeedsRecord { export interface CatMlDatafeedsRequest extends CatCatRequestBase { datafeed_id?: Id allow_no_match?: boolean + h?: CatCatDatafeedColumns + s?: CatCatDatafeedColumns time?: TimeUnit } @@ -6604,6 +6689,8 @@ export interface CatMlJobsRequest extends CatCatRequestBase { job_id?: Id allow_no_match?: boolean bytes?: Bytes + h?: CatCatAnonalyDetectorColumns + s?: CatCatAnonalyDetectorColumns time?: TimeUnit } @@ -7506,6 +7593,9 @@ export interface CatTransformsRequest extends CatCatRequestBase { transform_id?: Id allow_no_match?: boolean from?: integer + h?: CatCatTransformColumns + s?: CatCatTransformColumns + time?: Time size?: integer } @@ -8617,7 +8707,7 @@ export interface EnrichStatsCoordinatorStats { export interface EnrichStatsExecutingPolicy { name: Name - task: TasksInfo + task: TasksTaskInfo } export interface EnrichStatsRequest extends RequestBase { @@ -8686,7 +8776,7 @@ export interface EqlGetStatusResponse { } export interface EqlSearchRequest extends RequestBase { - index: IndexName + index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -8706,8 +8796,9 @@ export interface EqlSearchRequest extends RequestBase { keep_on_completion?: boolean wait_for_completion_timeout?: Time size?: uint - fields?: QueryDslFieldAndFormat | Field + fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition + runtime_mappings?: MappingRuntimeFields } } @@ -8750,6 +8841,128 @@ export interface FleetGlobalCheckpointsResponse { timed_out: boolean } +export interface FleetMsearchRequest extends RequestBase { + index: IndexName | IndexAlias + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + max_concurrent_searches?: long + max_concurrent_shard_requests?: long + pre_filter_shard_size?: long + search_type?: SearchType + rest_total_hits_as_int?: boolean + typed_keys?: boolean + wait_for_checkpoints?: FleetCheckpoint[] + allow_partial_search_results?: boolean + /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ + body?: MsearchRequestItem[] +} + +export interface FleetMsearchResponse { + docs: MsearchResponseItem[] +} + +export interface FleetSearchRequest extends RequestBase { + index: IndexName | IndexAlias + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: long + ccs_minimize_roundtrips?: boolean + default_operator?: QueryDslOperator + df?: string + docvalue_fields?: Fields + expand_wildcards?: ExpandWildcards + explain?: boolean + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: long + min_compatible_shard_node?: VersionString + preference?: string + pre_filter_shard_size?: long + request_cache?: boolean + routing?: Routing + scroll?: Time + search_type?: SearchType + stats?: string[] + stored_fields?: Fields + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + suggest_text?: string + terminate_after?: long + timeout?: Time + track_total_hits?: SearchTrackHits + track_scores?: boolean + typed_keys?: boolean + rest_total_hits_as_int?: boolean + version?: boolean + _source?: SearchSourceConfigParam + _source_excludes?: Fields + _source_includes?: Fields + seq_no_primary_term?: boolean + q?: string + size?: integer + from?: integer + sort?: string | string[] + wait_for_checkpoints?: FleetCheckpoint[] + allow_partial_search_results?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + aggregations?: Record + aggs?: Record + collapse?: SearchFieldCollapse + explain?: boolean + from?: integer + highlight?: SearchHighlight + track_total_hits?: SearchTrackHits + indices_boost?: Record[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + query?: QueryDslQueryContainer + rescore?: SearchRescore | SearchRescore[] + script_fields?: Record + search_after?: SortResults + size?: integer + slice?: SlicedScroll + sort?: Sort + _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester + terminate_after?: long + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: Fields + pit?: SearchPointInTimeReference + runtime_mappings?: MappingRuntimeFields + stats?: string[] + } +} + +export interface FleetSearchResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean +} + export interface GraphConnection { doc_count: long source: long @@ -8928,6 +9141,25 @@ export interface IlmGetStatusResponse { operation_mode: LifecycleOperationMode } +export interface IlmMigrateToDataTiersRequest extends RequestBase { + dry_run?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + legacy_template_to_delete?: string + node_attribute?: string + } +} + +export interface IlmMigrateToDataTiersResponse { + dry_run: boolean + removed_legacy_template: string + migrated_ilm_policies: string[] + migrated_indices: Indices + migrated_legacy_templates: string[] + migrated_composable_templates: string[] + migrated_component_templates: string[] +} + export interface IlmMoveToStepRequest extends RequestBase { index: IndexName /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -9009,6 +9241,10 @@ export interface IndicesAliasDefinition { is_hidden?: boolean } +export interface IndicesCacheQueries { + enabled: boolean +} + export interface IndicesDataStream { name: DataStreamName timestamp_field: IndicesDataStreamTimestampField @@ -9079,8 +9315,8 @@ export interface IndicesIndexRoutingRebalance { export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' export interface IndicesIndexSegmentSort { - field: Fields - order: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] + field?: Fields + order?: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[] missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[] } @@ -9096,120 +9332,57 @@ export interface IndicesIndexSettingBlocks { export interface IndicesIndexSettings { index?: IndicesIndexSettings mode?: string - 'index.mode'?: string - routing_path?: string[] - 'index.routing_path'?: string[] + routing_path?: string | string[] soft_deletes?: IndicesSoftDeletes - 'index.soft_deletes'?: IndicesSoftDeletes - 'soft_deletes.enabled'?: boolean - 'index.soft_deletes.enabled'?: boolean - 'soft_deletes.retention_lease.period'?: Time - 'index.soft_deletes.retention_lease.period'?: Time sort?: IndicesIndexSegmentSort - 'index.sort'?: IndicesIndexSegmentSort number_of_shards?: integer | string - 'index.number_of_shards'?: integer | string number_of_replicas?: integer | string - 'index.number_of_replicas'?: integer | string number_of_routing_shards?: integer - 'index.number_of_routing_shards'?: integer check_on_startup?: IndicesIndexCheckOnStartup - 'index.check_on_startup'?: IndicesIndexCheckOnStartup codec?: string - 'index.codec'?: string routing_partition_size?: integer - 'index.routing_partition_size'?: integer load_fixed_bitset_filters_eagerly?: boolean - 'index.load_fixed_bitset_filters_eagerly'?: boolean hidden?: boolean | string - 'index.hidden'?: boolean | string auto_expand_replicas?: string - 'index.auto_expand_replicas'?: string - 'merge.scheduler.max_thread_count'?: integer - 'index.merge.scheduler.max_thread_count'?: integer - 'merge.scheduler.max_merge_count'?: integer - 'index.merge.scheduler.max_merge_count'?: integer - 'search.idle.after'?: Time - 'index.search.idle.after'?: Time + merge?: IndicesMerge + search?: IndicesSettingsSearch refresh_interval?: Time - 'index.refresh_interval'?: Time max_result_window?: integer - 'index.max_result_window'?: integer max_inner_result_window?: integer - 'index.max_inner_result_window'?: integer max_rescore_window?: integer - 'index.max_rescore_window'?: integer max_docvalue_fields_search?: integer - 'index.max_docvalue_fields_search'?: integer max_script_fields?: integer - 'index.max_script_fields'?: integer max_ngram_diff?: integer - 'index.max_ngram_diff'?: integer max_shingle_diff?: integer - 'index.max_shingle_diff'?: integer blocks?: IndicesIndexSettingBlocks - 'index.blocks'?: IndicesIndexSettingBlocks - 'blocks.read_only'?: boolean - 'index.blocks.read_only'?: boolean - 'blocks.read_only_allow_delete'?: boolean - 'index.blocks.read_only_allow_delete'?: boolean - 'blocks.read'?: boolean - 'index.blocks.read'?: boolean - 'blocks.write'?: boolean | string - 'index.blocks.write'?: boolean | string - 'blocks.metadata'?: boolean - 'index.blocks.metadata'?: boolean max_refresh_listeners?: integer - 'index.max_refresh_listeners'?: integer - 'analyze.max_token_count'?: integer - 'index.analyze.max_token_count'?: integer - 'highlight.max_analyzed_offset'?: integer - 'index.highlight.max_analyzed_offset'?: integer + analyze?: IndicesSettingsAnalyze + highlight?: IndicesSettingsHighlight max_terms_count?: integer - 'index.max_terms_count'?: integer max_regex_length?: integer - 'index.max_regex_length'?: integer routing?: IndicesIndexRouting - 'index.routing'?: IndicesIndexRouting gc_deletes?: Time - 'index.gc_deletes'?: Time default_pipeline?: PipelineName - 'index.default_pipeline'?: PipelineName final_pipeline?: PipelineName - 'index.final_pipeline'?: PipelineName lifecycle?: IndicesIndexSettingsLifecycle - 'index.lifecycle'?: IndicesIndexSettingsLifecycle - 'lifecycle.name'?: string - 'index.lifecycle.name'?: string provided_name?: Name - 'index.provided_name'?: Name creation_date?: DateString - 'index.creation_date'?: DateString creation_date_string?: DateString - 'index.creation_date_string'?: DateString uuid?: Uuid - 'index.uuid'?: Uuid version?: IndicesIndexVersioning - 'index.version'?: IndicesIndexVersioning - verified_before_close?: boolean - 'index.verified_before_close'?: boolean + verified_before_close?: boolean | string format?: string | integer - 'index.format'?: string | integer max_slices_per_scroll?: integer - 'index.max_slices_per_scroll'?: integer - 'translog.durability'?: string - 'index.translog.durability'?: string - 'translog.flush_threshold_size'?: string - 'index.translog.flush_threshold_size'?: string - 'query_string.lenient'?: boolean - 'index.query_string.lenient'?: boolean + translog?: IndicesTranslog + query_string?: IndicesSettingsQueryString priority?: integer | string - 'index.priority'?: integer | string top_metrics_max_size?: integer analysis?: IndicesIndexSettingsAnalysis - 'index.analysis'?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries + shards?: integer + queries?: IndicesQueries + similarity?: IndicesSettingsSimilarity } export interface IndicesIndexSettingsAnalysis { @@ -9225,8 +9398,8 @@ export interface IndicesIndexSettingsLifecycle { } export interface IndicesIndexSettingsTimeSeries { - end_time: DateString - start_time: DateString + end_time?: DateOrEpochMillis + start_time?: DateOrEpochMillis } export interface IndicesIndexState { @@ -9242,21 +9415,109 @@ export interface IndicesIndexVersioning { created_string?: VersionString } +export interface IndicesMerge { + scheduler?: IndicesMergeScheduler +} + +export interface IndicesMergeScheduler { + max_thread_count?: integer + max_merge_count?: integer +} + export interface IndicesNumericFielddata { format: IndicesNumericFielddataFormat } export type IndicesNumericFielddataFormat = 'array' | 'disabled' +export interface IndicesQueries { + cache?: IndicesCacheQueries +} + +export interface IndicesRetentionLease { + period: Time +} + +export interface IndicesSearchIdle { + after?: Time +} + export type IndicesSegmentSortMissing = '_last' | '_first' -export type IndicesSegmentSortMode = 'min' | 'max' +export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' + +export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' + +export interface IndicesSettingsAnalyze { + max_token_count?: integer +} + +export interface IndicesSettingsHighlight { + max_analyzed_offset?: integer +} + +export interface IndicesSettingsQueryString { + lenient: boolean +} + +export interface IndicesSettingsSearch { + idle: IndicesSearchIdle +} + +export interface IndicesSettingsSimilarity { + bm25?: IndicesSettingsSimilarityBm25 + dfi?: IndicesSettingsSimilarityDfi + dfr?: IndicesSettingsSimilarityDfr + ib?: IndicesSettingsSimilarityIb + lmd?: IndicesSettingsSimilarityLmd + lmj?: IndicesSettingsSimilarityLmj + scripted_tfidf?: IndicesSettingsSimilarityScriptedTfidf +} + +export interface IndicesSettingsSimilarityBm25 { + b: integer + discount_overlaps: boolean + k1: double + type: 'BM25' +} + +export interface IndicesSettingsSimilarityDfi { + independence_measure: DFIIndependenceMeasure + type: 'DFI' +} + +export interface IndicesSettingsSimilarityDfr { + after_effect: DFRAfterEffect + basic_model: DFRBasicModel + normalization: Normalization + type: 'DFR' +} + +export interface IndicesSettingsSimilarityIb { + distribution: IBDistribution + lambda: IBLambda + normalization: Normalization + type: 'IB' +} + +export interface IndicesSettingsSimilarityLmd { + mu: integer + type: 'LMDirichlet' +} + +export interface IndicesSettingsSimilarityLmj { + lambda: double + type: 'LMJelinekMercer' +} -export type IndicesSegmentSortOrder = 'asc' | 'desc' +export interface IndicesSettingsSimilarityScriptedTfidf { + script: Script + type: 'scripted' +} export interface IndicesSoftDeletes { enabled: boolean - 'retention_lease.period'?: Time + retention_lease?: IndicesRetentionLease } export interface IndicesStringFielddata { @@ -9274,6 +9535,16 @@ export interface IndicesTemplateMapping { version?: VersionNumber } +export interface IndicesTranslog { + durability?: string + flush_threshold_size?: string + retention?: IndicesTranslogRetention +} + +export interface IndicesTranslogRetention { + size: ByteSize +} + export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' export interface IndicesAddBlockIndicesBlockStatus { @@ -9450,7 +9721,7 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { data_stream: Name store_size?: ByteSize store_size_bytes: integer - maximum_timestamp: integer + maximum_timestamp: long } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -9498,7 +9769,9 @@ export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBas } export interface IndicesDeleteIndexTemplateRequest extends RequestBase { - name: Name + name: Names + master_timeout?: Time + timeout?: Time } export interface IndicesDeleteIndexTemplateResponse extends AcknowledgedResponseBase { @@ -9566,6 +9839,62 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean +export interface IndicesFieldUsageStatsFieldSummary { + any: uint + stored_fields: uint + doc_values: uint + points: uint + norms: uint + term_vectors: uint + knn_vectors: uint + inverted_index: IndicesFieldUsageStatsInvertedIndex +} + +export interface IndicesFieldUsageStatsFieldsUsageBodyKeys { + _shards: ShardStatistics +} +export type IndicesFieldUsageStatsFieldsUsageBody = IndicesFieldUsageStatsFieldsUsageBodyKeys +& { [property: string]: IndicesFieldUsageStatsUsageStatsIndex | ShardStatistics } + +export interface IndicesFieldUsageStatsInvertedIndex { + terms: uint + postings: uint + proximity: uint + positions: uint + term_frequencies: uint + offsets: uint + payloads: uint +} + +export interface IndicesFieldUsageStatsRequest extends RequestBase { + index: Indices + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + fields?: Fields + master_timeout?: Time + timeout?: Time + wait_for_active_shards?: WaitForActiveShards +} + +export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody + +export interface IndicesFieldUsageStatsShardsStats { + all_fields: IndicesFieldUsageStatsFieldSummary + fields: Record +} + +export interface IndicesFieldUsageStatsUsageStatsIndex { + shards: IndicesFieldUsageStatsUsageStatsShards[] +} + +export interface IndicesFieldUsageStatsUsageStatsShards { + routing: IndicesStatsShardRouting + stats: IndicesFieldUsageStatsShardsStats + tracking_id: string + tracking_started_at_millis: EpochMillis +} + export interface IndicesFlushRequest extends RequestBase { index?: Indices allow_no_indices?: boolean @@ -9807,7 +10136,7 @@ export interface IndicesPutMappingRequest extends RequestBase { dynamic_date_formats?: string[] dynamic_templates?: Record | Record[] _field_names?: MappingFieldNamesField - _meta?: Record + _meta?: Metadata numeric_detection?: boolean properties?: Record _routing?: MappingRoutingField @@ -10217,6 +10546,8 @@ export interface IndicesSplitResponse extends AcknowledgedResponseBase { index: IndexName } +export type IndicesStatsIndexMetadataState = 'open' | 'close' + export interface IndicesStatsIndexStats { completion?: CompletionStats docs?: DocStats @@ -10244,6 +10575,8 @@ export interface IndicesStatsIndicesStats { shards?: Record total?: IndicesStatsIndexStats uuid?: Uuid + health?: HealthStatus + status?: IndicesStatsIndexMetadataState } export interface IndicesStatsRequest extends RequestBase { @@ -10389,6 +10722,7 @@ export interface IndicesUpdateAliasesAddAction { is_write_index?: boolean routing?: Routing search_routing?: Routing + must_exist?: boolean } export interface IndicesUpdateAliasesRemoveAction { @@ -10402,6 +10736,7 @@ export interface IndicesUpdateAliasesRemoveAction { export interface IndicesUpdateAliasesRemoveIndexAction { index?: IndexName indices?: Indices + must_exist?: boolean } export interface IndicesUpdateAliasesRequest extends RequestBase { @@ -11028,6 +11363,41 @@ export interface MigrationDeprecationsResponse { ml_settings: MigrationDeprecationsDeprecation[] } +export interface MigrationGetFeatureUpgradeStatusMigrationFeature { + feature_name: string + minimum_index_version: VersionString + migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus + indices: MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo[] +} + +export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { + index: IndexName + version: VersionString + failure_cause?: ErrorCause +} + +export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR' + +export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { +} + +export interface MigrationGetFeatureUpgradeStatusResponse { + features: MigrationGetFeatureUpgradeStatusMigrationFeature[] + migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus +} + +export interface MigrationPostFeatureUpgradeMigrationFeature { + feature_name: string +} + +export interface MigrationPostFeatureUpgradeRequest extends RequestBase { +} + +export interface MigrationPostFeatureUpgradeResponse { + accepted: boolean + features: MigrationPostFeatureUpgradeMigrationFeature[] +} + export interface MlAnalysisConfig { bucket_span: TimeSpan categorization_analyzer?: MlCategorizationAnalyzer @@ -11047,7 +11417,7 @@ export interface MlAnalysisConfigRead { categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] - detectors: MlDetector[] + detectors: MlDetectorRead[] influencers: Field[] model_prune_window?: Time latency?: Time @@ -11251,6 +11621,7 @@ export interface MlDatafeedConfig { export interface MlDatafeedRunningState { real_time_configured: boolean real_time_running: boolean + search_interval?: MlRunningStateSearchInterval } export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' @@ -11520,7 +11891,9 @@ export interface MlDelayedDataCheckConfig { enabled: boolean } -export type MlDeploymentState = 'started' | 'starting' | 'fully_allocated' +export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' + +export type MlDeploymentState = 'started' | 'starting' | 'stopping' export interface MlDetectionRule { actions?: MlRuleAction[] @@ -11541,6 +11914,19 @@ export interface MlDetector { use_null?: boolean } +export interface MlDetectorRead { + by_field_name?: Field + custom_rules?: MlDetectionRule[] + detector_description?: string + detector_index?: integer + exclude_frequent?: MlExcludeFrequent + field_name?: Field + function: string + over_field_name?: Field + partition_field_name?: Field + use_null?: boolean +} + export interface MlDiscoveryNode { attributes: Record ephemeral_id: Id @@ -11747,7 +12133,7 @@ export interface MlModelSnapshot { retain: boolean snapshot_doc_count: long snapshot_id: Id - timestamp: integer + timestamp: long } export interface MlOutlierDetectionParameters { @@ -11795,6 +12181,11 @@ export interface MlRuleCondition { value: double } +export interface MlRunningStateSearchInterval { + end_ms: long + start_ms: long +} + export interface MlTimingStats { elapsed_time: integer iteration_time?: integer @@ -11824,7 +12215,7 @@ export interface MlTotalFeatureImportanceStatistics { } export interface MlTrainedModelAllocation { - allocation_state: MlDeploymentState + allocation_state: MlDeploymentAllocationState routing_table: Record start_time: DateString task_parameters: MlTrainedModelAllocationTaskParameters @@ -11871,6 +12262,43 @@ export interface MlTrainedModelConfigMetadata { total_feature_importance?: MlTotalFeatureImportance[] } +export interface MlTrainedModelDeploymentAllocationStatus { + allocation_count: integer + state: MlDeploymentAllocationState + target_allocation_count: integer +} + +export interface MlTrainedModelDeploymentNodesStats { + average_inference_time_ms: double + error_count: integer + inference_count: integer + inference_threads: integer + last_access: long + model_threads: integer + node: MlDiscoveryNode + number_of_pending_requests: integer + rejection_execution_count: integer + routing_state: MlTrainedModelAllocationRoutingTable + start_time: long + timeout_count: integer +} + +export interface MlTrainedModelDeploymentStats { + allocation_status: MlTrainedModelDeploymentAllocationStatus + error_count: integer + inference_count: integer + inference_threads: integer + model_id: Id + model_threads: integer + nodes: MlTrainedModelDeploymentNodesStats + queue_capacity: integer + rejected_execution_count: integer + reason: string + start_time: long + state: MlDeploymentState + timeout_count: integer +} + export interface MlTrainedModelEntities { class_name: string class_probability: double @@ -11880,10 +12308,10 @@ export interface MlTrainedModelEntities { } export interface MlTrainedModelInferenceStats { - failure_count: long - inference_count: long - cache_miss_count: long - missing_all_fields_count: long + cache_miss_count: integer + failure_count: integer + inference_count: integer + missing_all_fields_count: integer timestamp: Time } @@ -11895,11 +12323,18 @@ export interface MlTrainedModelLocationIndex { name: IndexName } +export interface MlTrainedModelSizeStats { + model_size_bytes: ByteSize + required_native_memory_bytes: integer +} + export interface MlTrainedModelStats { - model_id: Id - pipeline_count: integer + deployment_stats?: MlTrainedModelDeploymentStats inference_stats?: MlTrainedModelInferenceStats ingest?: Record + model_id: Id + model_size_stats: MlTrainedModelSizeStats + pipeline_count: integer } export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' @@ -12579,7 +13014,7 @@ export interface MlPostDataRequest extends RequestBase { export interface MlPostDataResponse { bucket_count: long - earliest_record_timestamp: integer + earliest_record_timestamp: long empty_bucket_count: long input_bytes: long input_field_count: long @@ -12587,7 +13022,7 @@ export interface MlPostDataResponse { invalid_date_count: long job_id: Id last_data_time: integer - latest_record_timestamp: integer + latest_record_timestamp: long missing_field_count: long out_of_order_timestamp_count: long processed_field_count: long @@ -12981,7 +13416,7 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_threads?: integer queue_capacity?: integer timeout?: Time - wait_for?: MlDeploymentState + wait_for?: MlDeploymentAllocationState } export interface MlStartTrainedModelDeploymentResponse { @@ -13205,13 +13640,13 @@ export interface MlValidateDetectorRequest extends RequestBase { export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { } -export interface MonitoringBulkRequest extends RequestBase { +export interface MonitoringBulkRequest extends RequestBase { type?: string system_id: string system_api_version: string interval: TimeSpan /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ - body?: (BulkOperationContainer | TSource)[] + body?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } export interface MonitoringBulkResponse { @@ -13489,6 +13924,13 @@ export interface NodesNodeBufferPool { used_in_bytes?: long } +export interface NodesNodeReloadError { + name: Name + reload_exception?: ErrorCause +} + +export type NodesNodeReloadResult = NodesStats | NodesNodeReloadError + export interface NodesNodesResponseBase { _nodes?: NodeStatistics } @@ -13547,6 +13989,38 @@ export interface NodesRecording { cumulative_execution_time_millis?: long } +export interface NodesRepositoryLocation { + base_path: string + container?: string + bucket?: string +} + +export interface NodesRepositoryMeteringInformation { + repository_name: Name + repository_type: string + repository_location: NodesRepositoryLocation + repository_ephemeral_id: Id + repository_started_at: EpochMillis + repository_stopped_at?: EpochMillis + archived: boolean + cluster_version?: VersionNumber + request_counts: NodesRequestCounts +} + +export interface NodesRequestCounts { + GetBlobProperties?: long + GetBlob?: long + ListBlobs?: long + PutBlob?: long + PutBlock?: long + PutBlockList?: long + GetObject?: long + ListObjects?: long + InsertObject?: long + PutObject?: long + PutMultipartObject?: long +} + export interface NodesScriptCache { cache_evictions?: long compilation_limit_triggered?: long @@ -13627,6 +14101,27 @@ export interface NodesTransportHistogram { ge_millis?: long } +export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { + node_id: NodeIds + max_archive_version: long +} + +export interface NodesClearRepositoriesMeteringArchiveResponse extends NodesNodesResponseBase { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { + node_id: NodeIds +} + +export interface NodesGetRepositoriesMeteringInfoResponse extends NodesNodesResponseBase { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + export interface NodesHotThreadsHotThread { hosts: Host[] node_id: Id @@ -14002,13 +14497,6 @@ export interface NodesInfoResponse extends NodesNodesResponseBase { nodes: Record } -export interface NodesReloadSecureSettingsNodeReloadError { - name: Name - reload_exception?: ErrorCause -} - -export type NodesReloadSecureSettingsNodeReloadResult = NodesStats | NodesReloadSecureSettingsNodeReloadError - export interface NodesReloadSecureSettingsRequest extends RequestBase { node_id?: NodeIds timeout?: Time @@ -14020,7 +14508,7 @@ export interface NodesReloadSecureSettingsRequest extends RequestBase { export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { cluster_name: Name - nodes: Record + nodes: Record } export interface NodesStatsRequest extends RequestBase { @@ -14099,19 +14587,7 @@ export interface RollupDeleteJobRequest extends RequestBase { } export interface RollupDeleteJobResponse extends AcknowledgedResponseBase { - task_failures?: RollupDeleteJobTaskFailure[] -} - -export interface RollupDeleteJobTaskFailure { - task_id: TaskId - node_id: Id - status: string - reason: RollupDeleteJobTaskFailureReason -} - -export interface RollupDeleteJobTaskFailureReason { - type: string - reason: string + task_failures?: TaskFailure[] } export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' @@ -14185,7 +14661,7 @@ export interface RollupGetRollupIndexCapsIndexCapabilities { } export interface RollupGetRollupIndexCapsRequest extends RequestBase { - index: Id + index: Ids } export interface RollupGetRollupIndexCapsResponse extends DictionaryResponseBase { @@ -14273,6 +14749,30 @@ export interface RollupStopJobResponse { export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' +export interface SearchableSnapshotsCacheStatsNode { + shared_cache: SearchableSnapshotsCacheStatsShared +} + +export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { + node_id?: NodeIds + master_timeout?: Time +} + +export interface SearchableSnapshotsCacheStatsResponse { + nodes: Record +} + +export interface SearchableSnapshotsCacheStatsShared { + reads: long + bytes_read_in_bytes: ByteSize + writes: long + bytes_written_in_bytes: ByteSize + evictions: long + num_regions: integer + size_in_bytes: ByteSize + region_size_in_bytes: ByteSize +} + export interface SearchableSnapshotsClearCacheRequest extends RequestBase { index?: Indices expand_wildcards?: ExpandWildcards @@ -14373,7 +14873,7 @@ export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: QueryDslQueryContainer + query?: string[] | QueryDslQueryContainer | SecurityRoleTemplateQueryContainer allow_restricted_indices?: boolean } @@ -14405,6 +14905,22 @@ export interface SecurityRoleMappingRule { except?: SecurityRoleMappingRule } +export interface SecurityRoleTemplateInlineScript extends ScriptBase { + lang?: ScriptLanguage + options?: Record + source: string | QueryDslQueryContainer +} + +export interface SecurityRoleTemplateQueryContainer { + template?: SecurityRoleTemplateScript +} + +export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | string | QueryDslQueryContainer | StoredScriptId + +export interface SecurityTransientMetadataConfig { + enabled: boolean +} + export interface SecurityUser { email?: string | null full_name?: Name | null @@ -14554,7 +15070,7 @@ export interface SecurityDeletePrivilegesFoundStatus { export interface SecurityDeletePrivilegesRequest extends RequestBase { application: Name - name: Name + name: Names refresh?: Refresh } @@ -14615,6 +15131,31 @@ export interface SecurityEnableUserRequest extends RequestBase { export interface SecurityEnableUserResponse { } +export interface SecurityEnrollKibanaRequest extends RequestBase { +} + +export interface SecurityEnrollKibanaResponse { + token: SecurityEnrollKibanaToken + http_ca: string +} + +export interface SecurityEnrollKibanaToken { + name: string + value: string +} + +export interface SecurityEnrollNodeRequest extends RequestBase { +} + +export interface SecurityEnrollNodeResponse { + http_ca_key: string + http_ca_cert: string + transport_ca_cert: string + transport_key: string + transport_cert: string + nodes_addresses: string[] +} + export interface SecurityGetApiKeyRequest extends RequestBase { id?: Id name?: Name @@ -14637,7 +15178,7 @@ export interface SecurityGetBuiltinPrivilegesResponse { export interface SecurityGetPrivilegesRequest extends RequestBase { application?: Name - name?: Name + name?: Names } export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { @@ -14655,7 +15196,7 @@ export interface SecurityGetRoleRole { indices: SecurityIndicesPrivileges[] metadata: Metadata run_as: string[] - transient_metadata: SecurityGetRoleTransientMetadata + transient_metadata: SecurityTransientMetadataConfig applications: SecurityApplicationPrivileges[] role_templates?: SecurityGetRoleRoleTemplate[] global?: Record>> @@ -14668,10 +15209,6 @@ export interface SecurityGetRoleRoleTemplate { export type SecurityGetRoleTemplateFormat = 'string' | 'json' -export interface SecurityGetRoleTransientMetadata { - enabled: boolean -} - export interface SecurityGetRoleMappingRequest extends RequestBase { name?: Names } @@ -14694,7 +15231,7 @@ export interface SecurityGetServiceAccountsRoleDescriptor { applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: Record + transient_metadata?: SecurityTransientMetadataConfig } export interface SecurityGetServiceAccountsRoleDescriptorWrapper { @@ -14909,7 +15446,7 @@ export interface SecurityPutRoleRequest extends RequestBase { indices?: SecurityIndicesPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityGetRoleTransientMetadata + transient_metadata?: SecurityTransientMetadataConfig } } @@ -14972,6 +15509,85 @@ export interface SecurityQueryApiKeysResponse { api_keys: SecurityApiKey[] } +export interface SecuritySamlAuthenticateRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + content: string + ids: Ids + realm?: string + } +} + +export interface SecuritySamlAuthenticateResponse { + access_token: string + username: string + expires_in: integer + refresh_token: string + realm: string +} + +export interface SecuritySamlCompleteLogoutRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + realm: string + ids: Ids + query_string?: string + content?: string + } +} + +export type SecuritySamlCompleteLogoutResponse = boolean + +export interface SecuritySamlInvalidateRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + acs?: string + query_string: string + realm?: string + } +} + +export interface SecuritySamlInvalidateResponse { + invalidated: integer + realm: string + redirect: string +} + +export interface SecuritySamlLogoutRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + token: string + refresh_token?: string + } +} + +export interface SecuritySamlLogoutResponse { + redirect: string +} + +export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + acs?: string + realm?: string + relay_state?: string + } +} + +export interface SecuritySamlPrepareAuthenticationResponse { + id: Id + realm: string + redirect: string +} + +export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { + realm_name: Name +} + +export interface SecuritySamlServiceProviderMetadataResponse { + metadata: string +} + export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId } @@ -15264,6 +15880,8 @@ export interface SnapshotSnapshotShardsStatus { stats: SnapshotShardsStatsSummary } +export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' + export interface SnapshotSnapshotStats { incremental: SnapshotFileCountSnapshotStats start_time_in_millis: long @@ -15330,7 +15948,7 @@ export interface SnapshotCreateRequest extends RequestBase { export interface SnapshotCreateResponse { accepted?: boolean - snapshot: SnapshotSnapshotInfo + snapshot?: SnapshotSnapshotInfo } export interface SnapshotCreateRepositoryRequest extends RequestBase { @@ -15376,6 +15994,13 @@ export interface SnapshotGetRequest extends RequestBase { index_details?: boolean human?: boolean include_repository?: boolean + sort?: SnapshotSnapshotSort + size?: integer + order?: SortOrder + after?: string + offset?: integer + from_sort_value?: string + slm_policy_filter?: Name } export interface SnapshotGetResponse { @@ -15454,6 +16079,13 @@ export interface SnapshotVerifyRepositoryResponse { nodes: Record } +export interface SqlColumn { + name: Name + type: string +} + +export type SqlRow = any[] + export interface SqlClearCursorRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -15465,15 +16097,48 @@ export interface SqlClearCursorResponse { succeeded: boolean } -export interface SqlQueryColumn { - name: Name - type: string +export interface SqlDeleteAsyncRequest extends RequestBase { + id: Id +} + +export interface SqlDeleteAsyncResponse extends AcknowledgedResponseBase { +} + +export interface SqlGetAsyncRequest extends RequestBase { + id: Id + delimiter?: string + format?: string + keep_alive?: Time + wait_for_completion_timeout?: Time +} + +export interface SqlGetAsyncResponse { + id: Id + is_running: boolean + is_partial: boolean + columns?: SqlColumn[] + cursor?: string + rows: SqlRow[] +} + +export interface SqlGetAsyncStatusRequest extends RequestBase { + id: Id +} + +export interface SqlGetAsyncStatusResponse { + id: string + is_running: boolean + is_partial: boolean + start_time_in_millis: ulong + expiration_time_in_millis: ulong + completion_status?: uint } export interface SqlQueryRequest extends RequestBase { format?: string /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + catalog?: string columnar?: boolean cursor?: string fetch_size?: integer @@ -15483,17 +16148,24 @@ export interface SqlQueryRequest extends RequestBase { page_timeout?: Time time_zone?: string field_multi_value_leniency?: boolean + runtime_mappings?: MappingRuntimeFields + wait_for_completion_timeout?: Time + params?: Record + keep_alive?: Time + keep_on_completion?: boolean + index_using_frozen?: boolean } } export interface SqlQueryResponse { - columns?: SqlQueryColumn[] + id?: Id + is_running?: boolean + is_partial?: boolean + columns?: SqlColumn[] cursor?: string - rows: SqlQueryRow[] + rows: SqlRow[] } -export type SqlQueryRow = any[] - export interface SqlTranslateRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -15528,36 +16200,45 @@ export type SslCertificatesResponse = SslCertificatesCertificateInformation[] export type TasksGroupBy = 'nodes' | 'parents' | 'none' -export interface TasksInfo { - action: string - cancellable: boolean - children?: TasksInfo[] - description?: string - headers: HttpHeaders - id: long - node: string - running_time_in_nanos: long - start_time_in_millis: long - status?: TasksStatus - type: string - parent_task_id?: Id +export interface TasksNodeTasks { + name?: NodeId + transport_address?: TransportAddress + host?: Host + ip?: Ip + roles?: string[] + attributes?: Record + tasks: Record +} + +export interface TasksParentTaskInfo extends TasksTaskInfo { + children?: TasksTaskInfo[] } -export interface TasksState { +export interface TasksTaskInfo { action: string cancellable: boolean description?: string - headers: HttpHeaders + headers: Record id: long - node: string - parent_task_id?: TaskId + node: NodeId + running_time?: string running_time_in_nanos: long start_time_in_millis: long - status?: TasksStatus + status?: TasksTaskStatus type: string + parent_task_id?: TaskId +} + +export type TasksTaskInfos = TasksTaskInfo[] | Record + +export interface TasksTaskListResponseBase { + node_failures?: ErrorCause[] + task_failures?: TaskFailure[] + nodes?: Record + tasks?: TasksTaskInfos } -export interface TasksStatus { +export interface TasksTaskStatus { batches: long canceled?: string created: long @@ -15577,10 +16258,6 @@ export interface TasksStatus { version_conflicts: long } -export interface TasksTaskExecutingNode extends SpecUtilsBaseNode { - tasks: Record -} - export interface TasksCancelRequest extends RequestBase { task_id?: TaskId actions?: string | string[] @@ -15589,9 +16266,7 @@ export interface TasksCancelRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksCancelResponse { - node_failures?: ErrorCause[] - nodes: Record +export interface TasksCancelResponse extends TasksTaskListResponseBase { } export interface TasksGetRequest extends RequestBase { @@ -15602,8 +16277,8 @@ export interface TasksGetRequest extends RequestBase { export interface TasksGetResponse { completed: boolean - task: TasksInfo - response?: TasksStatus + task: TasksTaskInfo + response?: TasksTaskStatus error?: ErrorCause } @@ -15618,10 +16293,7 @@ export interface TasksListRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksListResponse { - node_failures?: ErrorCause[] - nodes?: Record - tasks?: TasksInfo[] | Record +export interface TasksListResponse extends TasksTaskListResponseBase { } export interface TextStructureFindStructureFieldStat { @@ -15720,6 +16392,7 @@ export interface TransformRetentionPolicyContainer { export interface TransformSettings { align_checkpoints?: boolean dates_as_epoch_millis?: boolean + deduce_mappings?: boolean docs_per_second?: float max_page_search_size?: integer } @@ -15795,7 +16468,7 @@ export interface TransformGetTransformStatsCheckpointing { } export interface TransformGetTransformStatsRequest extends RequestBase { - transform_id: Name + transform_id: Names allow_no_match?: boolean from?: long size?: long @@ -15975,9 +16648,12 @@ export interface WatcherAction { throttle_period?: Time throttle_period_in_millis?: EpochMillis transform?: TransformContainer - index?: WatcherIndex - logging?: WatcherLogging - webhook?: WatcherActionWebhook + index?: WatcherIndexAction + logging?: WatcherLoggingAction + email?: WatcherEmailAction + pagerduty?: WatcherPagerDutyAction + slack?: WatcherSlackAction + webhook?: WatcherWebhookAction } export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' @@ -15993,11 +16669,6 @@ export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | ' export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' -export interface WatcherActionWebhook { - host: Host - port: integer -} - export type WatcherActions = Record export interface WatcherActivationState { @@ -16060,6 +16731,12 @@ export interface WatcherDailySchedule { at: WatcherTimeOfDay[] } +export type WatcherDataAttachmentFormat = 'json' | 'yaml' + +export interface WatcherDataEmailAttachment { + format?: WatcherDataAttachmentFormat +} + export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' export interface WatcherEmail { @@ -16067,17 +16744,26 @@ export interface WatcherEmail { body?: WatcherEmailBody cc?: string[] from?: string - id: Id priority?: WatcherEmailPriority reply_to?: string[] - sent_date: DateString + sent_date?: DateString subject: string to: string[] + attachments?: Record +} + +export interface WatcherEmailAction extends WatcherEmail { +} + +export interface WatcherEmailAttachmentContainer { + http?: WatcherHttpEmailAttachment + reporting?: WatcherReportingEmailAttachment + data?: WatcherDataEmailAttachment } export interface WatcherEmailBody { - html: string - text: string + html?: string + text?: string } export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' @@ -16146,6 +16832,12 @@ export interface WatcherHourlySchedule { minute: integer[] } +export interface WatcherHttpEmailAttachment { + content_type?: string + inline?: boolean + request?: WatcherHttpInputRequestDefinition +} + export interface WatcherHttpInput { http?: WatcherHttpInput extract?: string[] @@ -16194,10 +16886,13 @@ export interface WatcherHttpInputResponseResult { status: integer } -export interface WatcherIndex { +export interface WatcherIndexAction { index: IndexName doc_id?: Id refresh?: Refresh + op_type?: OpType + timeout?: Time + execution_time_field?: Field } export interface WatcherIndexResult { @@ -16221,7 +16916,7 @@ export interface WatcherInputContainer { export type WatcherInputType = 'http' | 'search' | 'simple' -export interface WatcherLogging { +export interface WatcherLoggingAction { level?: string text: string category?: string @@ -16236,6 +16931,9 @@ export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | export interface WatcherNeverCondition { } +export interface WatcherPagerDutyAction extends WatcherPagerDutyEvent { +} + export interface WatcherPagerDutyContext { href?: string src?: string @@ -16245,14 +16943,21 @@ export interface WatcherPagerDutyContext { export type WatcherPagerDutyContextType = 'link' | 'image' export interface WatcherPagerDutyEvent { - account: string + account?: string attach_payload: boolean client?: string client_url?: string - contexts: WatcherPagerDutyContext[] - description?: string + contexts?: WatcherPagerDutyContext[] + context?: WatcherPagerDutyContext[] + description: string event_type?: WatcherPagerDutyEventType incident_key: string + proxy?: WatcherPagerDutyEventProxy +} + +export interface WatcherPagerDutyEventProxy { + host?: Host + port?: integer } export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' @@ -16274,6 +16979,14 @@ export interface WatcherQueryWatch { _seq_no?: SequenceNumber } +export interface WatcherReportingEmailAttachment { + url: string + inline?: boolean + retries?: integer + interval?: Time + request?: WatcherHttpInputRequestDefinition +} + export type WatcherResponseContentType = 'json' | 'yaml' | 'text' export interface WatcherScheduleContainer { @@ -16281,9 +16994,9 @@ export interface WatcherScheduleContainer { daily?: WatcherDailySchedule hourly?: WatcherHourlySchedule interval?: Time - monthly?: WatcherTimeOfMonth[] - weekly?: WatcherTimeOfWeek[] - yearly?: WatcherTimeOfYear[] + monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[] + weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[] + yearly?: WatcherTimeOfYear | WatcherTimeOfYear[] } export interface WatcherScheduleTriggerEvent { @@ -16292,9 +17005,10 @@ export interface WatcherScheduleTriggerEvent { } export interface WatcherScriptCondition { - lang: string + lang?: string params?: Record - source: string + source?: string + id?: string } export interface WatcherSearchInput { @@ -16322,6 +17036,11 @@ export interface WatcherSimulatedActions { use_all: boolean } +export interface WatcherSlackAction { + account?: string + message: WatcherSlackMessage +} + export interface WatcherSlackAttachment { author_icon?: string author_link?: string @@ -16423,6 +17142,9 @@ export interface WatcherWatchStatus { execution_state?: string } +export interface WatcherWebhookAction extends WatcherHttpInputRequestDefinition { +} + export interface WatcherWebhookResult { request: WatcherHttpInputRequestResult response?: WatcherHttpInputResponseResult @@ -16672,7 +17394,7 @@ export interface XpackInfoResponse { export interface XpackUsageAllJobs { count: integer detectors: Record - created_by: Record + created_by: Record model_size: Record forecasts: Record } @@ -16835,7 +17557,14 @@ export interface XpackUsageMlCounter { export interface XpackUsageMlDataFrameAnalyticsJobs { memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory _all: XpackUsageMlDataFrameAnalyticsJobsCount - analysis_counts?: EmptyObject + analysis_counts?: XpackUsageMlDataFrameAnalyticsJobsAnalysis + stopped?: XpackUsageMlDataFrameAnalyticsJobsCount +} + +export interface XpackUsageMlDataFrameAnalyticsJobsAnalysis { + classification?: integer + outlier_detection?: integer + regression?: integer } export interface XpackUsageMlDataFrameAnalyticsJobsCount { @@ -16890,6 +17619,7 @@ export interface XpackUsageMlInferenceTrainedModelsCount { other: long regression?: long classification?: long + ner?: long } export interface XpackUsageMonitoring extends XpackUsageBase { @@ -17100,7 +17830,7 @@ export interface SpecUtilsCommonCatQueryParameters { help?: boolean local?: boolean master_timeout?: Time - s?: string[] + s?: Names v?: boolean } From 10277e217cc83f096e3e5db4c0298f293922f853 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 21 Feb 2022 19:17:17 +0100 Subject: [PATCH 142/647] Bump transport version (#1629) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index db79de583..838925c00 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^8.0.1", + "@elastic/transport": "^8.0.2", "tslib": "^2.3.0" }, "tap": { From e7c5b3dafa9e23d3466bfc31d262b77fdf62a872 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 24 Feb 2022 11:11:52 +0100 Subject: [PATCH 143/647] API generation --- src/api/api/ml.ts | 29 +++++++++++++ src/api/api/security.ts | 66 ++++++++++++++++++++++++++++++ src/api/kibana.ts | 4 ++ src/api/types.ts | 81 +++++++++++++++++++++---------------- src/api/typesWithBodyKey.ts | 81 +++++++++++++++++++++---------------- src/helpers.ts | 1 - 6 files changed, 191 insertions(+), 71 deletions(-) diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 434d20133..64f10f486 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -937,6 +937,35 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['node_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_ml/memory/${encodeURIComponent(params.node_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/memory/_stats' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 7cd54d048..082db4b76 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -954,6 +954,72 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/oidc/authenticate' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/oidc/logout' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/oidc/prepare' + return await this.transport.request({ path, method, querystring, body }, options) + } + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/kibana.ts b/src/api/kibana.ts index ce1fb15a5..b2a60034c 100644 --- a/src/api/kibana.ts +++ b/src/api/kibana.ts @@ -306,6 +306,7 @@ interface KibanaClient { getInfluencers: (params: T.MlGetInfluencersRequest| TB.MlGetInfluencersRequest, options?: TransportRequestOptions) => Promise> getJobStats: (params?: T.MlGetJobStatsRequest| TB.MlGetJobStatsRequest, options?: TransportRequestOptions) => Promise> getJobs: (params?: T.MlGetJobsRequest| TB.MlGetJobsRequest, options?: TransportRequestOptions) => Promise> + getMemoryStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> getModelSnapshotUpgradeStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> getModelSnapshots: (params: T.MlGetModelSnapshotsRequest| TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions) => Promise> getOverallBuckets: (params: T.MlGetOverallBucketsRequest| TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions) => Promise> @@ -427,6 +428,9 @@ interface KibanaClient { hasPrivileges: (params?: T.SecurityHasPrivilegesRequest| TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions) => Promise> invalidateApiKey: (params?: T.SecurityInvalidateApiKeyRequest| TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions) => Promise> invalidateToken: (params?: T.SecurityInvalidateTokenRequest| TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions) => Promise> + oidcAuthenticate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + oidcLogout: (params?: T.TODO, options?: TransportRequestOptions) => Promise> + oidcPrepareAuthentication: (params?: T.TODO, options?: TransportRequestOptions) => Promise> putPrivileges: (params?: T.SecurityPutPrivilegesRequest| TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions) => Promise> putRole: (params: T.SecurityPutRoleRequest| TB.SecurityPutRoleRequest, options?: TransportRequestOptions) => Promise> putRoleMapping: (params: T.SecurityPutRoleMappingRequest| TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions) => Promise> diff --git a/src/api/types.ts b/src/api/types.ts index f81a5de01..2156290ce 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -545,7 +545,7 @@ export interface MgetResponse { export type MgetResponseItem = GetGetResult | MgetMultiGetError -export interface MsearchMultiSearchItem extends SearchResponse { +export interface MsearchMultiSearchItem extends SearchResponseBody { status?: integer } @@ -920,8 +920,7 @@ export interface ScrollRequest extends RequestBase { rest_total_hits_as_int?: boolean } -export interface ScrollResponse> extends SearchResponse { -} +export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { index?: Indices @@ -987,12 +986,14 @@ export interface SearchRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields } -export interface SearchResponse> { +export type SearchResponse> = SearchResponseBody + +export interface SearchResponseBody { took: long timed_out: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: TAggregations + aggregations?: Record _clusters?: ClusterStatistics fields?: Record max_score?: double @@ -5809,6 +5810,10 @@ export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } +export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' + +export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] + export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] @@ -6582,6 +6587,8 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { model_id?: Id allow_no_match?: boolean bytes?: Bytes + h?: CatCatTrainedModelsColumns + s?: CatCatTrainedModelsColumns from?: integer size?: integer } @@ -9229,6 +9236,28 @@ export interface IndicesIndexState { data_stream?: DataStreamName } +export interface IndicesIndexTemplate { + index_patterns: Names + composed_of: Name[] + template?: IndicesIndexTemplateSummary + version?: VersionNumber + priority?: long + _meta?: Metadata + allow_auto_create?: boolean + data_stream?: IndicesIndexTemplateDataStreamConfiguration +} + +export interface IndicesIndexTemplateDataStreamConfiguration { + hidden?: boolean + allow_custom_routing?: boolean +} + +export interface IndicesIndexTemplateSummary { + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings +} + export interface IndicesIndexVersioning { created: VersionString created_string?: VersionString @@ -9787,26 +9816,9 @@ export interface IndicesGetFieldMappingTypeFieldMappings { mappings: Partial> } -export interface IndicesGetIndexTemplateIndexTemplate { - index_patterns: Name[] - composed_of: Name[] - template?: IndicesGetIndexTemplateIndexTemplateSummary - version?: VersionNumber - priority?: long - _meta?: Metadata - allow_auto_create?: boolean - data_stream?: Record -} - export interface IndicesGetIndexTemplateIndexTemplateItem { name: Name - index_template: IndicesGetIndexTemplateIndexTemplate -} - -export interface IndicesGetIndexTemplateIndexTemplateSummary { - aliases?: Record - mappings?: MappingTypeMapping - settings?: Record + index_template: IndicesIndexTemplate } export interface IndicesGetIndexTemplateRequest extends RequestBase { @@ -10300,18 +10312,18 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { name?: Name create?: boolean master_timeout?: Time - template?: IndicesGetIndexTemplateIndexTemplate + template?: IndicesIndexTemplate } export interface IndicesSimulateTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] template: IndicesSimulateTemplateTemplate } export interface IndicesSimulateTemplateTemplate { aliases: Record mappings: MappingTypeMapping - settings: Record - overlapping: IndicesSimulateTemplateOverlapping[] + settings: IndicesIndexSettings } export interface IndicesSplitRequest extends RequestBase { @@ -11893,10 +11905,10 @@ export interface MlModelSizeStats { export interface MlModelSnapshot { description?: string job_id: Id - latest_record_time_stamp: integer - latest_result_time_stamp: integer + latest_record_time_stamp?: integer + latest_result_time_stamp?: integer min_version: VersionString - model_size_stats: MlModelSizeStats + model_size_stats?: MlModelSizeStats retain: boolean snapshot_doc_count: long snapshot_id: Id @@ -12139,7 +12151,7 @@ export interface MlDeleteCalendarEventResponse extends AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { calendar_id: Id - job_id: Id + job_id: Ids } export interface MlDeleteCalendarJobResponse { @@ -12482,7 +12494,7 @@ export interface MlGetDatafeedsResponse { } export interface MlGetFiltersRequest extends RequestBase { - filter_id?: Id + filter_id?: Ids from?: integer size?: integer } @@ -12599,7 +12611,7 @@ export interface MlGetTrainedModelsResponse { } export interface MlGetTrainedModelsStatsRequest extends RequestBase { - model_id?: Id + model_id?: Ids allow_no_match?: boolean from?: integer size?: integer @@ -13718,7 +13730,6 @@ export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBas } export interface NodesClearRepositoriesMeteringArchiveResponse extends NodesNodesResponseBase { - _nodes: NodeStatistics cluster_name: Name nodes: Record } @@ -13728,7 +13739,6 @@ export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { } export interface NodesGetRepositoriesMeteringInfoResponse extends NodesNodesResponseBase { - _nodes: NodeStatistics cluster_name: Name nodes: Record } @@ -15945,7 +15955,7 @@ export interface TransformDeleteTransformResponse extends AcknowledgedResponseBa } export interface TransformGetTransformRequest extends RequestBase { - transform_id?: Name + transform_id?: Names allow_no_match?: boolean from?: integer size?: integer @@ -16254,6 +16264,7 @@ export interface WatcherDataEmailAttachment { export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' export interface WatcherEmail { + id?: Id bcc?: string[] body?: WatcherEmailBody cc?: string[] diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 1406e71b1..87f1ac3d1 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -574,7 +574,7 @@ export interface MgetResponse { export type MgetResponseItem = GetGetResult | MgetMultiGetError -export interface MsearchMultiSearchItem extends SearchResponse { +export interface MsearchMultiSearchItem extends SearchResponseBody { status?: integer } @@ -975,8 +975,7 @@ export interface ScrollRequest extends RequestBase { } } -export interface ScrollResponse> extends SearchResponse { -} +export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { index?: Indices @@ -1059,12 +1058,14 @@ export interface SearchRequest extends RequestBase { } } -export interface SearchResponse> { +export type SearchResponse> = SearchResponseBody + +export interface SearchResponseBody { took: long timed_out: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: TAggregations + aggregations?: Record _clusters?: ClusterStatistics fields?: Record max_score?: double @@ -5927,6 +5928,10 @@ export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } +export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' + +export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] + export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] @@ -6700,6 +6705,8 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { model_id?: Id allow_no_match?: boolean bytes?: Bytes + h?: CatCatTrainedModelsColumns + s?: CatCatTrainedModelsColumns from?: integer size?: integer } @@ -9410,6 +9417,28 @@ export interface IndicesIndexState { data_stream?: DataStreamName } +export interface IndicesIndexTemplate { + index_patterns: Names + composed_of: Name[] + template?: IndicesIndexTemplateSummary + version?: VersionNumber + priority?: long + _meta?: Metadata + allow_auto_create?: boolean + data_stream?: IndicesIndexTemplateDataStreamConfiguration +} + +export interface IndicesIndexTemplateDataStreamConfiguration { + hidden?: boolean + allow_custom_routing?: boolean +} + +export interface IndicesIndexTemplateSummary { + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings +} + export interface IndicesIndexVersioning { created: VersionString created_string?: VersionString @@ -9977,26 +10006,9 @@ export interface IndicesGetFieldMappingTypeFieldMappings { mappings: Partial> } -export interface IndicesGetIndexTemplateIndexTemplate { - index_patterns: Name[] - composed_of: Name[] - template?: IndicesGetIndexTemplateIndexTemplateSummary - version?: VersionNumber - priority?: long - _meta?: Metadata - allow_auto_create?: boolean - data_stream?: Record -} - export interface IndicesGetIndexTemplateIndexTemplateItem { name: Name - index_template: IndicesGetIndexTemplateIndexTemplate -} - -export interface IndicesGetIndexTemplateIndexTemplateSummary { - aliases?: Record - mappings?: MappingTypeMapping - settings?: Record + index_template: IndicesIndexTemplate } export interface IndicesGetIndexTemplateRequest extends RequestBase { @@ -10514,18 +10526,18 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { create?: boolean master_timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, use 'template' instead. */ - body?: IndicesGetIndexTemplateIndexTemplate + body?: IndicesIndexTemplate } export interface IndicesSimulateTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] template: IndicesSimulateTemplateTemplate } export interface IndicesSimulateTemplateTemplate { aliases: Record mappings: MappingTypeMapping - settings: Record - overlapping: IndicesSimulateTemplateOverlapping[] + settings: IndicesIndexSettings } export interface IndicesSplitRequest extends RequestBase { @@ -12126,10 +12138,10 @@ export interface MlModelSizeStats { export interface MlModelSnapshot { description?: string job_id: Id - latest_record_time_stamp: integer - latest_result_time_stamp: integer + latest_record_time_stamp?: integer + latest_result_time_stamp?: integer min_version: VersionString - model_size_stats: MlModelSizeStats + model_size_stats?: MlModelSizeStats retain: boolean snapshot_doc_count: long snapshot_id: Id @@ -12378,7 +12390,7 @@ export interface MlDeleteCalendarEventResponse extends AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { calendar_id: Id - job_id: Id + job_id: Ids } export interface MlDeleteCalendarJobResponse { @@ -12765,7 +12777,7 @@ export interface MlGetDatafeedsResponse { } export interface MlGetFiltersRequest extends RequestBase { - filter_id?: Id + filter_id?: Ids from?: integer size?: integer } @@ -12911,7 +12923,7 @@ export interface MlGetTrainedModelsResponse { } export interface MlGetTrainedModelsStatsRequest extends RequestBase { - model_id?: Id + model_id?: Ids allow_no_match?: boolean from?: integer size?: integer @@ -14107,7 +14119,6 @@ export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBas } export interface NodesClearRepositoriesMeteringArchiveResponse extends NodesNodesResponseBase { - _nodes: NodeStatistics cluster_name: Name nodes: Record } @@ -14117,7 +14128,6 @@ export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { } export interface NodesGetRepositoriesMeteringInfoResponse extends NodesNodesResponseBase { - _nodes: NodeStatistics cluster_name: Name nodes: Record } @@ -16422,7 +16432,7 @@ export interface TransformDeleteTransformResponse extends AcknowledgedResponseBa } export interface TransformGetTransformRequest extends RequestBase { - transform_id?: Name + transform_id?: Names allow_no_match?: boolean from?: integer size?: integer @@ -16740,6 +16750,7 @@ export interface WatcherDataEmailAttachment { export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' export interface WatcherEmail { + id?: Id bcc?: string[] body?: WatcherEmailBody cc?: string[] diff --git a/src/helpers.ts b/src/helpers.ts index 293c23bbe..59b11cc4a 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -228,7 +228,6 @@ export default class Helpers { rest_total_hits_as_int: params.rest_total_hits_as_int, scroll_id }, options as TransportRequestOptionsWithMeta) - // @ts-expect-error response = r as TransportResult, unknown> assert(response !== undefined, 'The response is undefined, please file a bug report') if (response.statusCode !== 429) break From d01582803c69823c587276a31a641bfbd32c91ee Mon Sep 17 00:00:00 2001 From: Rostislav Provodenko <60982217+rprovodenko@users.noreply.github.com> Date: Thu, 24 Feb 2022 13:29:20 +0300 Subject: [PATCH 144/647] Fix onFlushTimeout timer not being cleared when upstream errors (#1616) --- src/helpers.ts | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/helpers.ts b/src/helpers.ts index 59b11cc4a..e6a8cf84c 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -355,21 +355,24 @@ export default class Helpers { let loadedOperations = 0 timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line - for await (const operation of operationsStream) { - timeoutRef.refresh() - loadedOperations += 1 - msearchBody.push(operation[0], operation[1]) - callbacks.push(operation[2]) - if (loadedOperations >= operations) { - const send = await semaphore() - send(msearchBody.slice(), callbacks.slice()) - msearchBody.length = 0 - callbacks.length = 0 - loadedOperations = 0 + try { + for await (const operation of operationsStream) { + timeoutRef.refresh() + loadedOperations += 1 + msearchBody.push(operation[0], operation[1]) + callbacks.push(operation[2]) + if (loadedOperations >= operations) { + const send = await semaphore() + send(msearchBody.slice(), callbacks.slice()) + msearchBody.length = 0 + callbacks.length = 0 + loadedOperations = 0 + } } + } finally { + clearTimeout(timeoutRef) } - clearTimeout(timeoutRef) // In some cases the previos http call does not have finished, // or we didn't reach the flush bytes threshold, so we force one last operation. if (loadedOperations > 0) { From 3e79c8e8257e27ef6abbf63837c769051b597884 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 24 Feb 2022 11:37:33 +0100 Subject: [PATCH 145/647] Remove custom kibana interface (#1633) --- .npmignore | 4 - scripts/release-canary.js | 16 -- src/api/kibana.ts | 532 ------------------------------------ src/api/types.ts | 8 +- src/api/typesWithBodyKey.ts | 8 +- src/helpers.ts | 1 + 6 files changed, 9 insertions(+), 560 deletions(-) delete mode 100644 src/api/kibana.ts diff --git a/.npmignore b/.npmignore index 2a7110656..ddfa7b5b6 100644 --- a/.npmignore +++ b/.npmignore @@ -72,7 +72,3 @@ CODE_OF_CONDUCT.md CONTRIBUTING.md src - -# CANARY-PACKAGE -lib/api/kibana.* -# /CANARY-PACKAGE diff --git a/scripts/release-canary.js b/scripts/release-canary.js index 0f3e42c2b..bac865393 100644 --- a/scripts/release-canary.js +++ b/scripts/release-canary.js @@ -31,7 +31,6 @@ async function release (opts) { const originalVersion = packageJson.version const currentCanaryVersion = packageJson.versionCanary const originalTypes = packageJson.types - const originalNpmIgnore = await readFile(join(__dirname, '..', '.npmignore'), 'utf8') const newCanaryInteger = opts.reset ? 1 : (Number(currentCanaryVersion.split('-')[1].split('.')[1]) + 1) const newCanaryVersion = `${originalVersion.split('-')[0]}-canary.${newCanaryInteger}` @@ -49,15 +48,6 @@ async function release (opts) { 'utf8' ) - // update the npmignore to publish the kibana types as well - const newNpmIgnore = originalNpmIgnore.slice(0, originalNpmIgnore.indexOf('# CANARY-PACKAGE')) + - originalNpmIgnore.slice(originalNpmIgnore.indexOf('# /CANARY-PACKAGE') + 17) - await writeFile( - join(__dirname, '..', '.npmignore'), - newNpmIgnore, - 'utf8' - ) - // confirm the package.json changes with the user const diff = execSync('git diff').toString().split('\n').map(colorDiff).join('\n') console.log(diff) @@ -81,12 +71,6 @@ async function release (opts) { JSON.stringify(packageJson, null, 2) + '\n', 'utf8' ) - - await writeFile( - join(__dirname, '..', '.npmignore'), - originalNpmIgnore, - 'utf8' - ) } function confirm (question) { diff --git a/src/api/kibana.ts b/src/api/kibana.ts deleted file mode 100644 index b2a60034c..000000000 --- a/src/api/kibana.ts +++ /dev/null @@ -1,532 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint-disable import/export */ -/* eslint-disable @typescript-eslint/no-misused-new */ -/* eslint-disable @typescript-eslint/no-extraneous-class */ -/* eslint-disable @typescript-eslint/no-unused-vars */ - -// This file was automatically generated by elastic/elastic-client-generator-js -// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, -// and elastic/elastic-client-generator-js to regenerate this file again. - -import { - Serializer, - Diagnostic, - BaseConnectionPool, - TransportRequestParams, - TransportRequestOptions, - TransportResult -} from '@elastic/transport' -import * as T from './types' -import * as TB from './typesWithBodyKey' -import SniffingTransport from '../sniffingTransport' -import Helpers from '../helpers' -import { ClientOptions } from '../client' - -interface KibanaClient { - diagnostic: Diagnostic - name: string | symbol - connectionPool: BaseConnectionPool - transport: Omit & { - request: (params: TransportRequestParams, options?: TransportRequestOptions) => Promise> - } - serializer: Serializer - helpers: Helpers - child: (opts?: ClientOptions) => KibanaClient - close: () => Promise - Internal: { - deleteDesiredNodes: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getDesiredNodes: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - health: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - updateDesiredNodes: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - } - asyncSearch: { - delete: (params: T.AsyncSearchDeleteRequest| TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions) => Promise> - get: (params: T.AsyncSearchGetRequest| TB.AsyncSearchGetRequest, options?: TransportRequestOptions) => Promise, TContext>> - status: (params: T.AsyncSearchStatusRequest| TB.AsyncSearchStatusRequest, options?: TransportRequestOptions) => Promise> - submit: (params?: T.AsyncSearchSubmitRequest| TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions) => Promise, TContext>> - } - autoscaling: { - deleteAutoscalingPolicy: (params: T.AutoscalingDeleteAutoscalingPolicyRequest| TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> - getAutoscalingCapacity: (params?: T.AutoscalingGetAutoscalingCapacityRequest| TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions) => Promise> - getAutoscalingPolicy: (params: T.AutoscalingGetAutoscalingPolicyRequest| TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> - putAutoscalingPolicy: (params: T.AutoscalingPutAutoscalingPolicyRequest| TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise> - } - bulk: (params: T.BulkRequest| TB.BulkRequest, options?: TransportRequestOptions) => Promise> - cat: { - aliases: (params?: T.CatAliasesRequest| TB.CatAliasesRequest, options?: TransportRequestOptions) => Promise> - allocation: (params?: T.CatAllocationRequest| TB.CatAllocationRequest, options?: TransportRequestOptions) => Promise> - count: (params?: T.CatCountRequest| TB.CatCountRequest, options?: TransportRequestOptions) => Promise> - fielddata: (params?: T.CatFielddataRequest| TB.CatFielddataRequest, options?: TransportRequestOptions) => Promise> - health: (params?: T.CatHealthRequest| TB.CatHealthRequest, options?: TransportRequestOptions) => Promise> - help: (params?: T.CatHelpRequest| TB.CatHelpRequest, options?: TransportRequestOptions) => Promise> - indices: (params?: T.CatIndicesRequest| TB.CatIndicesRequest, options?: TransportRequestOptions) => Promise> - master: (params?: T.CatMasterRequest| TB.CatMasterRequest, options?: TransportRequestOptions) => Promise> - mlDataFrameAnalytics: (params?: T.CatMlDataFrameAnalyticsRequest| TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - mlDatafeeds: (params?: T.CatMlDatafeedsRequest| TB.CatMlDatafeedsRequest, options?: TransportRequestOptions) => Promise> - mlJobs: (params?: T.CatMlJobsRequest| TB.CatMlJobsRequest, options?: TransportRequestOptions) => Promise> - mlTrainedModels: (params?: T.CatMlTrainedModelsRequest| TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions) => Promise> - nodeattrs: (params?: T.CatNodeattrsRequest| TB.CatNodeattrsRequest, options?: TransportRequestOptions) => Promise> - nodes: (params?: T.CatNodesRequest| TB.CatNodesRequest, options?: TransportRequestOptions) => Promise> - pendingTasks: (params?: T.CatPendingTasksRequest| TB.CatPendingTasksRequest, options?: TransportRequestOptions) => Promise> - plugins: (params?: T.CatPluginsRequest| TB.CatPluginsRequest, options?: TransportRequestOptions) => Promise> - recovery: (params?: T.CatRecoveryRequest| TB.CatRecoveryRequest, options?: TransportRequestOptions) => Promise> - repositories: (params?: T.CatRepositoriesRequest| TB.CatRepositoriesRequest, options?: TransportRequestOptions) => Promise> - segments: (params?: T.CatSegmentsRequest| TB.CatSegmentsRequest, options?: TransportRequestOptions) => Promise> - shards: (params?: T.CatShardsRequest| TB.CatShardsRequest, options?: TransportRequestOptions) => Promise> - snapshots: (params?: T.CatSnapshotsRequest| TB.CatSnapshotsRequest, options?: TransportRequestOptions) => Promise> - tasks: (params?: T.CatTasksRequest| TB.CatTasksRequest, options?: TransportRequestOptions) => Promise> - templates: (params?: T.CatTemplatesRequest| TB.CatTemplatesRequest, options?: TransportRequestOptions) => Promise> - threadPool: (params?: T.CatThreadPoolRequest| TB.CatThreadPoolRequest, options?: TransportRequestOptions) => Promise> - transforms: (params?: T.CatTransformsRequest| TB.CatTransformsRequest, options?: TransportRequestOptions) => Promise> - } - ccr: { - deleteAutoFollowPattern: (params: T.CcrDeleteAutoFollowPatternRequest| TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> - follow: (params: T.CcrFollowRequest| TB.CcrFollowRequest, options?: TransportRequestOptions) => Promise> - followInfo: (params: T.CcrFollowInfoRequest| TB.CcrFollowInfoRequest, options?: TransportRequestOptions) => Promise> - followStats: (params: T.CcrFollowStatsRequest| TB.CcrFollowStatsRequest, options?: TransportRequestOptions) => Promise> - forgetFollower: (params: T.CcrForgetFollowerRequest| TB.CcrForgetFollowerRequest, options?: TransportRequestOptions) => Promise> - getAutoFollowPattern: (params?: T.CcrGetAutoFollowPatternRequest| TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> - pauseAutoFollowPattern: (params: T.CcrPauseAutoFollowPatternRequest| TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> - pauseFollow: (params: T.CcrPauseFollowRequest| TB.CcrPauseFollowRequest, options?: TransportRequestOptions) => Promise> - putAutoFollowPattern: (params: T.CcrPutAutoFollowPatternRequest| TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> - resumeAutoFollowPattern: (params: T.CcrResumeAutoFollowPatternRequest| TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise> - resumeFollow: (params: T.CcrResumeFollowRequest| TB.CcrResumeFollowRequest, options?: TransportRequestOptions) => Promise> - stats: (params?: T.CcrStatsRequest| TB.CcrStatsRequest, options?: TransportRequestOptions) => Promise> - unfollow: (params: T.CcrUnfollowRequest| TB.CcrUnfollowRequest, options?: TransportRequestOptions) => Promise> - } - clearScroll: (params?: T.ClearScrollRequest| TB.ClearScrollRequest, options?: TransportRequestOptions) => Promise> - closePointInTime: (params?: T.ClosePointInTimeRequest| TB.ClosePointInTimeRequest, options?: TransportRequestOptions) => Promise> - cluster: { - allocationExplain: (params?: T.ClusterAllocationExplainRequest| TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions) => Promise> - deleteComponentTemplate: (params: T.ClusterDeleteComponentTemplateRequest| TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions) => Promise> - deleteVotingConfigExclusions: (params?: T.ClusterDeleteVotingConfigExclusionsRequest| TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions) => Promise> - existsComponentTemplate: (params: T.ClusterExistsComponentTemplateRequest| TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions) => Promise> - getComponentTemplate: (params?: T.ClusterGetComponentTemplateRequest| TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions) => Promise> - getSettings: (params?: T.ClusterGetSettingsRequest| TB.ClusterGetSettingsRequest, options?: TransportRequestOptions) => Promise> - health: (params?: T.ClusterHealthRequest| TB.ClusterHealthRequest, options?: TransportRequestOptions) => Promise> - pendingTasks: (params?: T.ClusterPendingTasksRequest| TB.ClusterPendingTasksRequest, options?: TransportRequestOptions) => Promise> - postVotingConfigExclusions: (params?: T.ClusterPostVotingConfigExclusionsRequest| TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions) => Promise> - putComponentTemplate: (params: T.ClusterPutComponentTemplateRequest| TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions) => Promise> - putSettings: (params?: T.ClusterPutSettingsRequest| TB.ClusterPutSettingsRequest, options?: TransportRequestOptions) => Promise> - remoteInfo: (params?: T.ClusterRemoteInfoRequest| TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions) => Promise> - reroute: (params?: T.ClusterRerouteRequest| TB.ClusterRerouteRequest, options?: TransportRequestOptions) => Promise> - state: (params?: T.ClusterStateRequest| TB.ClusterStateRequest, options?: TransportRequestOptions) => Promise> - stats: (params?: T.ClusterStatsRequest| TB.ClusterStatsRequest, options?: TransportRequestOptions) => Promise> - } - count: (params?: T.CountRequest| TB.CountRequest, options?: TransportRequestOptions) => Promise> - create: (params: T.CreateRequest| TB.CreateRequest, options?: TransportRequestOptions) => Promise> - danglingIndices: { - deleteDanglingIndex: (params: T.DanglingIndicesDeleteDanglingIndexRequest| TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions) => Promise> - importDanglingIndex: (params: T.DanglingIndicesImportDanglingIndexRequest| TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions) => Promise> - listDanglingIndices: (params?: T.DanglingIndicesListDanglingIndicesRequest| TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions) => Promise> - } - delete: (params: T.DeleteRequest| TB.DeleteRequest, options?: TransportRequestOptions) => Promise> - deleteByQuery: (params: T.DeleteByQueryRequest| TB.DeleteByQueryRequest, options?: TransportRequestOptions) => Promise> - deleteByQueryRethrottle: (params: T.DeleteByQueryRethrottleRequest| TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions) => Promise> - deleteScript: (params: T.DeleteScriptRequest| TB.DeleteScriptRequest, options?: TransportRequestOptions) => Promise> - enrich: { - deletePolicy: (params: T.EnrichDeletePolicyRequest| TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions) => Promise> - executePolicy: (params: T.EnrichExecutePolicyRequest| TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions) => Promise> - getPolicy: (params?: T.EnrichGetPolicyRequest| TB.EnrichGetPolicyRequest, options?: TransportRequestOptions) => Promise> - putPolicy: (params: T.EnrichPutPolicyRequest| TB.EnrichPutPolicyRequest, options?: TransportRequestOptions) => Promise> - stats: (params?: T.EnrichStatsRequest| TB.EnrichStatsRequest, options?: TransportRequestOptions) => Promise> - } - eql: { - delete: (params: T.EqlDeleteRequest| TB.EqlDeleteRequest, options?: TransportRequestOptions) => Promise> - get: (params: T.EqlGetRequest| TB.EqlGetRequest, options?: TransportRequestOptions) => Promise, TContext>> - getStatus: (params: T.EqlGetStatusRequest| TB.EqlGetStatusRequest, options?: TransportRequestOptions) => Promise> - search: (params: T.EqlSearchRequest| TB.EqlSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - } - exists: (params: T.ExistsRequest| TB.ExistsRequest, options?: TransportRequestOptions) => Promise> - existsSource: (params: T.ExistsSourceRequest| TB.ExistsSourceRequest, options?: TransportRequestOptions) => Promise> - explain: (params: T.ExplainRequest| TB.ExplainRequest, options?: TransportRequestOptions) => Promise, TContext>> - features: { - getFeatures: (params?: T.FeaturesGetFeaturesRequest| TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions) => Promise> - resetFeatures: (params?: T.FeaturesResetFeaturesRequest| TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions) => Promise> - } - fieldCaps: (params?: T.FieldCapsRequest| TB.FieldCapsRequest, options?: TransportRequestOptions) => Promise> - fleet: { - globalCheckpoints: (params: T.FleetGlobalCheckpointsRequest| TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions) => Promise> - msearch: (params: T.FleetMsearchRequest| TB.FleetMsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - search: (params: T.FleetSearchRequest| TB.FleetSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - } - get: (params: T.GetRequest| TB.GetRequest, options?: TransportRequestOptions) => Promise, TContext>> - getScript: (params: T.GetScriptRequest| TB.GetScriptRequest, options?: TransportRequestOptions) => Promise> - getScriptContext: (params?: T.GetScriptContextRequest| TB.GetScriptContextRequest, options?: TransportRequestOptions) => Promise> - getScriptLanguages: (params?: T.GetScriptLanguagesRequest| TB.GetScriptLanguagesRequest, options?: TransportRequestOptions) => Promise> - getSource: (params: T.GetSourceRequest| TB.GetSourceRequest, options?: TransportRequestOptions) => Promise, TContext>> - graph: { - explore: (params: T.GraphExploreRequest| TB.GraphExploreRequest, options?: TransportRequestOptions) => Promise> - } - ilm: { - deleteLifecycle: (params: T.IlmDeleteLifecycleRequest| TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions) => Promise> - explainLifecycle: (params: T.IlmExplainLifecycleRequest| TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions) => Promise> - getLifecycle: (params?: T.IlmGetLifecycleRequest| TB.IlmGetLifecycleRequest, options?: TransportRequestOptions) => Promise> - getStatus: (params?: T.IlmGetStatusRequest| TB.IlmGetStatusRequest, options?: TransportRequestOptions) => Promise> - migrateToDataTiers: (params?: T.IlmMigrateToDataTiersRequest| TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions) => Promise> - moveToStep: (params: T.IlmMoveToStepRequest| TB.IlmMoveToStepRequest, options?: TransportRequestOptions) => Promise> - putLifecycle: (params: T.IlmPutLifecycleRequest| TB.IlmPutLifecycleRequest, options?: TransportRequestOptions) => Promise> - removePolicy: (params: T.IlmRemovePolicyRequest| TB.IlmRemovePolicyRequest, options?: TransportRequestOptions) => Promise> - retry: (params: T.IlmRetryRequest| TB.IlmRetryRequest, options?: TransportRequestOptions) => Promise> - start: (params?: T.IlmStartRequest| TB.IlmStartRequest, options?: TransportRequestOptions) => Promise> - stop: (params?: T.IlmStopRequest| TB.IlmStopRequest, options?: TransportRequestOptions) => Promise> - } - index: (params: T.IndexRequest| TB.IndexRequest, options?: TransportRequestOptions) => Promise> - indices: { - addBlock: (params: T.IndicesAddBlockRequest| TB.IndicesAddBlockRequest, options?: TransportRequestOptions) => Promise> - analyze: (params?: T.IndicesAnalyzeRequest| TB.IndicesAnalyzeRequest, options?: TransportRequestOptions) => Promise> - clearCache: (params?: T.IndicesClearCacheRequest| TB.IndicesClearCacheRequest, options?: TransportRequestOptions) => Promise> - clone: (params: T.IndicesCloneRequest| TB.IndicesCloneRequest, options?: TransportRequestOptions) => Promise> - close: (params: T.IndicesCloseRequest| TB.IndicesCloseRequest, options?: TransportRequestOptions) => Promise> - create: (params: T.IndicesCreateRequest| TB.IndicesCreateRequest, options?: TransportRequestOptions) => Promise> - createDataStream: (params: T.IndicesCreateDataStreamRequest| TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions) => Promise> - dataStreamsStats: (params?: T.IndicesDataStreamsStatsRequest| TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions) => Promise> - delete: (params: T.IndicesDeleteRequest| TB.IndicesDeleteRequest, options?: TransportRequestOptions) => Promise> - deleteAlias: (params: T.IndicesDeleteAliasRequest| TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions) => Promise> - deleteDataStream: (params: T.IndicesDeleteDataStreamRequest| TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions) => Promise> - deleteIndexTemplate: (params: T.IndicesDeleteIndexTemplateRequest| TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions) => Promise> - deleteTemplate: (params: T.IndicesDeleteTemplateRequest| TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions) => Promise> - diskUsage: (params: T.IndicesDiskUsageRequest| TB.IndicesDiskUsageRequest, options?: TransportRequestOptions) => Promise> - exists: (params: T.IndicesExistsRequest| TB.IndicesExistsRequest, options?: TransportRequestOptions) => Promise> - existsAlias: (params: T.IndicesExistsAliasRequest| TB.IndicesExistsAliasRequest, options?: TransportRequestOptions) => Promise> - existsIndexTemplate: (params: T.IndicesExistsIndexTemplateRequest| TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions) => Promise> - existsTemplate: (params: T.IndicesExistsTemplateRequest| TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions) => Promise> - fieldUsageStats: (params: T.IndicesFieldUsageStatsRequest| TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions) => Promise> - flush: (params?: T.IndicesFlushRequest| TB.IndicesFlushRequest, options?: TransportRequestOptions) => Promise> - forcemerge: (params?: T.IndicesForcemergeRequest| TB.IndicesForcemergeRequest, options?: TransportRequestOptions) => Promise> - get: (params: T.IndicesGetRequest| TB.IndicesGetRequest, options?: TransportRequestOptions) => Promise> - getAlias: (params?: T.IndicesGetAliasRequest| TB.IndicesGetAliasRequest, options?: TransportRequestOptions) => Promise> - getDataStream: (params?: T.IndicesGetDataStreamRequest| TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions) => Promise> - getFieldMapping: (params: T.IndicesGetFieldMappingRequest| TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions) => Promise> - getIndexTemplate: (params?: T.IndicesGetIndexTemplateRequest| TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions) => Promise> - getMapping: (params?: T.IndicesGetMappingRequest| TB.IndicesGetMappingRequest, options?: TransportRequestOptions) => Promise> - getSettings: (params?: T.IndicesGetSettingsRequest| TB.IndicesGetSettingsRequest, options?: TransportRequestOptions) => Promise> - getTemplate: (params?: T.IndicesGetTemplateRequest| TB.IndicesGetTemplateRequest, options?: TransportRequestOptions) => Promise> - migrateToDataStream: (params: T.IndicesMigrateToDataStreamRequest| TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions) => Promise> - modifyDataStream: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - open: (params: T.IndicesOpenRequest| TB.IndicesOpenRequest, options?: TransportRequestOptions) => Promise> - promoteDataStream: (params: T.IndicesPromoteDataStreamRequest| TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions) => Promise> - putAlias: (params: T.IndicesPutAliasRequest| TB.IndicesPutAliasRequest, options?: TransportRequestOptions) => Promise> - putIndexTemplate: (params: T.IndicesPutIndexTemplateRequest| TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions) => Promise> - putMapping: (params: T.IndicesPutMappingRequest| TB.IndicesPutMappingRequest, options?: TransportRequestOptions) => Promise> - putSettings: (params?: T.IndicesPutSettingsRequest| TB.IndicesPutSettingsRequest, options?: TransportRequestOptions) => Promise> - putTemplate: (params: T.IndicesPutTemplateRequest| TB.IndicesPutTemplateRequest, options?: TransportRequestOptions) => Promise> - recovery: (params?: T.IndicesRecoveryRequest| TB.IndicesRecoveryRequest, options?: TransportRequestOptions) => Promise> - refresh: (params?: T.IndicesRefreshRequest| TB.IndicesRefreshRequest, options?: TransportRequestOptions) => Promise> - reloadSearchAnalyzers: (params: T.IndicesReloadSearchAnalyzersRequest| TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions) => Promise> - resolveIndex: (params: T.IndicesResolveIndexRequest| TB.IndicesResolveIndexRequest, options?: TransportRequestOptions) => Promise> - rollover: (params: T.IndicesRolloverRequest| TB.IndicesRolloverRequest, options?: TransportRequestOptions) => Promise> - segments: (params?: T.IndicesSegmentsRequest| TB.IndicesSegmentsRequest, options?: TransportRequestOptions) => Promise> - shardStores: (params?: T.IndicesShardStoresRequest| TB.IndicesShardStoresRequest, options?: TransportRequestOptions) => Promise> - shrink: (params: T.IndicesShrinkRequest| TB.IndicesShrinkRequest, options?: TransportRequestOptions) => Promise> - simulateIndexTemplate: (params: T.IndicesSimulateIndexTemplateRequest| TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions) => Promise> - simulateTemplate: (params?: T.IndicesSimulateTemplateRequest| TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions) => Promise> - split: (params: T.IndicesSplitRequest| TB.IndicesSplitRequest, options?: TransportRequestOptions) => Promise> - stats: (params?: T.IndicesStatsRequest| TB.IndicesStatsRequest, options?: TransportRequestOptions) => Promise> - unfreeze: (params: T.IndicesUnfreezeRequest| TB.IndicesUnfreezeRequest, options?: TransportRequestOptions) => Promise> - updateAliases: (params?: T.IndicesUpdateAliasesRequest| TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions) => Promise> - validateQuery: (params?: T.IndicesValidateQueryRequest| TB.IndicesValidateQueryRequest, options?: TransportRequestOptions) => Promise> - } - info: (params?: T.InfoRequest| TB.InfoRequest, options?: TransportRequestOptions) => Promise> - ingest: { - deletePipeline: (params: T.IngestDeletePipelineRequest| TB.IngestDeletePipelineRequest, options?: TransportRequestOptions) => Promise> - geoIpStats: (params?: T.IngestGeoIpStatsRequest| TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions) => Promise> - getPipeline: (params?: T.IngestGetPipelineRequest| TB.IngestGetPipelineRequest, options?: TransportRequestOptions) => Promise> - processorGrok: (params?: T.IngestProcessorGrokRequest| TB.IngestProcessorGrokRequest, options?: TransportRequestOptions) => Promise> - putPipeline: (params: T.IngestPutPipelineRequest| TB.IngestPutPipelineRequest, options?: TransportRequestOptions) => Promise> - simulate: (params?: T.IngestSimulateRequest| TB.IngestSimulateRequest, options?: TransportRequestOptions) => Promise> - } - knnSearch: (params: T.KnnSearchRequest| TB.KnnSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - license: { - delete: (params?: T.LicenseDeleteRequest| TB.LicenseDeleteRequest, options?: TransportRequestOptions) => Promise> - get: (params?: T.LicenseGetRequest| TB.LicenseGetRequest, options?: TransportRequestOptions) => Promise> - getBasicStatus: (params?: T.LicenseGetBasicStatusRequest| TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions) => Promise> - getTrialStatus: (params?: T.LicenseGetTrialStatusRequest| TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions) => Promise> - post: (params?: T.LicensePostRequest| TB.LicensePostRequest, options?: TransportRequestOptions) => Promise> - postStartBasic: (params?: T.LicensePostStartBasicRequest| TB.LicensePostStartBasicRequest, options?: TransportRequestOptions) => Promise> - postStartTrial: (params?: T.LicensePostStartTrialRequest| TB.LicensePostStartTrialRequest, options?: TransportRequestOptions) => Promise> - } - logstash: { - deletePipeline: (params: T.LogstashDeletePipelineRequest| TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions) => Promise> - getPipeline: (params: T.LogstashGetPipelineRequest| TB.LogstashGetPipelineRequest, options?: TransportRequestOptions) => Promise> - putPipeline: (params: T.LogstashPutPipelineRequest| TB.LogstashPutPipelineRequest, options?: TransportRequestOptions) => Promise> - } - mget: (params?: T.MgetRequest| TB.MgetRequest, options?: TransportRequestOptions) => Promise, TContext>> - migration: { - deprecations: (params?: T.MigrationDeprecationsRequest| TB.MigrationDeprecationsRequest, options?: TransportRequestOptions) => Promise> - getFeatureUpgradeStatus: (params?: T.MigrationGetFeatureUpgradeStatusRequest| TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions) => Promise> - postFeatureUpgrade: (params?: T.MigrationPostFeatureUpgradeRequest| TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions) => Promise> - } - ml: { - closeJob: (params: T.MlCloseJobRequest| TB.MlCloseJobRequest, options?: TransportRequestOptions) => Promise> - deleteCalendar: (params: T.MlDeleteCalendarRequest| TB.MlDeleteCalendarRequest, options?: TransportRequestOptions) => Promise> - deleteCalendarEvent: (params: T.MlDeleteCalendarEventRequest| TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions) => Promise> - deleteCalendarJob: (params: T.MlDeleteCalendarJobRequest| TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions) => Promise> - deleteDataFrameAnalytics: (params: T.MlDeleteDataFrameAnalyticsRequest| TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - deleteDatafeed: (params: T.MlDeleteDatafeedRequest| TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions) => Promise> - deleteExpiredData: (params?: T.MlDeleteExpiredDataRequest| TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions) => Promise> - deleteFilter: (params: T.MlDeleteFilterRequest| TB.MlDeleteFilterRequest, options?: TransportRequestOptions) => Promise> - deleteForecast: (params: T.MlDeleteForecastRequest| TB.MlDeleteForecastRequest, options?: TransportRequestOptions) => Promise> - deleteJob: (params: T.MlDeleteJobRequest| TB.MlDeleteJobRequest, options?: TransportRequestOptions) => Promise> - deleteModelSnapshot: (params: T.MlDeleteModelSnapshotRequest| TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions) => Promise> - deleteTrainedModel: (params: T.MlDeleteTrainedModelRequest| TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions) => Promise> - deleteTrainedModelAlias: (params: T.MlDeleteTrainedModelAliasRequest| TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise> - estimateModelMemory: (params?: T.MlEstimateModelMemoryRequest| TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions) => Promise> - evaluateDataFrame: (params?: T.MlEvaluateDataFrameRequest| TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions) => Promise> - explainDataFrameAnalytics: (params?: T.MlExplainDataFrameAnalyticsRequest| TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - flushJob: (params: T.MlFlushJobRequest| TB.MlFlushJobRequest, options?: TransportRequestOptions) => Promise> - forecast: (params: T.MlForecastRequest| TB.MlForecastRequest, options?: TransportRequestOptions) => Promise> - getBuckets: (params: T.MlGetBucketsRequest| TB.MlGetBucketsRequest, options?: TransportRequestOptions) => Promise> - getCalendarEvents: (params: T.MlGetCalendarEventsRequest| TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions) => Promise> - getCalendars: (params?: T.MlGetCalendarsRequest| TB.MlGetCalendarsRequest, options?: TransportRequestOptions) => Promise> - getCategories: (params: T.MlGetCategoriesRequest| TB.MlGetCategoriesRequest, options?: TransportRequestOptions) => Promise> - getDataFrameAnalytics: (params?: T.MlGetDataFrameAnalyticsRequest| TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - getDataFrameAnalyticsStats: (params?: T.MlGetDataFrameAnalyticsStatsRequest| TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions) => Promise> - getDatafeedStats: (params?: T.MlGetDatafeedStatsRequest| TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions) => Promise> - getDatafeeds: (params?: T.MlGetDatafeedsRequest| TB.MlGetDatafeedsRequest, options?: TransportRequestOptions) => Promise> - getFilters: (params?: T.MlGetFiltersRequest| TB.MlGetFiltersRequest, options?: TransportRequestOptions) => Promise> - getInfluencers: (params: T.MlGetInfluencersRequest| TB.MlGetInfluencersRequest, options?: TransportRequestOptions) => Promise> - getJobStats: (params?: T.MlGetJobStatsRequest| TB.MlGetJobStatsRequest, options?: TransportRequestOptions) => Promise> - getJobs: (params?: T.MlGetJobsRequest| TB.MlGetJobsRequest, options?: TransportRequestOptions) => Promise> - getMemoryStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getModelSnapshotUpgradeStats: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - getModelSnapshots: (params: T.MlGetModelSnapshotsRequest| TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions) => Promise> - getOverallBuckets: (params: T.MlGetOverallBucketsRequest| TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions) => Promise> - getRecords: (params: T.MlGetRecordsRequest| TB.MlGetRecordsRequest, options?: TransportRequestOptions) => Promise> - getTrainedModels: (params?: T.MlGetTrainedModelsRequest| TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions) => Promise> - getTrainedModelsStats: (params?: T.MlGetTrainedModelsStatsRequest| TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions) => Promise> - inferTrainedModelDeployment: (params: T.MlInferTrainedModelDeploymentRequest| TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise> - info: (params?: T.MlInfoRequest| TB.MlInfoRequest, options?: TransportRequestOptions) => Promise> - openJob: (params: T.MlOpenJobRequest| TB.MlOpenJobRequest, options?: TransportRequestOptions) => Promise> - postCalendarEvents: (params: T.MlPostCalendarEventsRequest| TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions) => Promise> - postData: (params: T.MlPostDataRequest| TB.MlPostDataRequest, options?: TransportRequestOptions) => Promise> - previewDataFrameAnalytics: (params?: T.MlPreviewDataFrameAnalyticsRequest| TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - previewDatafeed: (params?: T.MlPreviewDatafeedRequest| TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions) => Promise, TContext>> - putCalendar: (params: T.MlPutCalendarRequest| TB.MlPutCalendarRequest, options?: TransportRequestOptions) => Promise> - putCalendarJob: (params: T.MlPutCalendarJobRequest| TB.MlPutCalendarJobRequest, options?: TransportRequestOptions) => Promise> - putDataFrameAnalytics: (params: T.MlPutDataFrameAnalyticsRequest| TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - putDatafeed: (params: T.MlPutDatafeedRequest| TB.MlPutDatafeedRequest, options?: TransportRequestOptions) => Promise> - putFilter: (params: T.MlPutFilterRequest| TB.MlPutFilterRequest, options?: TransportRequestOptions) => Promise> - putJob: (params: T.MlPutJobRequest| TB.MlPutJobRequest, options?: TransportRequestOptions) => Promise> - putTrainedModel: (params: T.MlPutTrainedModelRequest| TB.MlPutTrainedModelRequest, options?: TransportRequestOptions) => Promise> - putTrainedModelAlias: (params: T.MlPutTrainedModelAliasRequest| TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise> - putTrainedModelDefinitionPart: (params: T.MlPutTrainedModelDefinitionPartRequest| TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions) => Promise> - putTrainedModelVocabulary: (params: T.MlPutTrainedModelVocabularyRequest| TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions) => Promise> - resetJob: (params: T.MlResetJobRequest| TB.MlResetJobRequest, options?: TransportRequestOptions) => Promise> - revertModelSnapshot: (params: T.MlRevertModelSnapshotRequest| TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions) => Promise> - setUpgradeMode: (params?: T.MlSetUpgradeModeRequest| TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions) => Promise> - startDataFrameAnalytics: (params: T.MlStartDataFrameAnalyticsRequest| TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - startDatafeed: (params: T.MlStartDatafeedRequest| TB.MlStartDatafeedRequest, options?: TransportRequestOptions) => Promise> - startTrainedModelDeployment: (params: T.MlStartTrainedModelDeploymentRequest| TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise> - stopDataFrameAnalytics: (params: T.MlStopDataFrameAnalyticsRequest| TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - stopDatafeed: (params: T.MlStopDatafeedRequest| TB.MlStopDatafeedRequest, options?: TransportRequestOptions) => Promise> - stopTrainedModelDeployment: (params: T.MlStopTrainedModelDeploymentRequest| TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise> - updateDataFrameAnalytics: (params: T.MlUpdateDataFrameAnalyticsRequest| TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise> - updateDatafeed: (params: T.MlUpdateDatafeedRequest| TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions) => Promise> - updateFilter: (params: T.MlUpdateFilterRequest| TB.MlUpdateFilterRequest, options?: TransportRequestOptions) => Promise> - updateJob: (params: T.MlUpdateJobRequest| TB.MlUpdateJobRequest, options?: TransportRequestOptions) => Promise> - updateModelSnapshot: (params: T.MlUpdateModelSnapshotRequest| TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions) => Promise> - upgradeJobSnapshot: (params: T.MlUpgradeJobSnapshotRequest| TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions) => Promise> - validate: (params?: T.MlValidateRequest| TB.MlValidateRequest, options?: TransportRequestOptions) => Promise> - validateDetector: (params?: T.MlValidateDetectorRequest| TB.MlValidateDetectorRequest, options?: TransportRequestOptions) => Promise> - } - monitoring: { - bulk: (params: T.MonitoringBulkRequest| TB.MonitoringBulkRequest, options?: TransportRequestOptions) => Promise> - } - msearch: , TContext = unknown>(params?: T.MsearchRequest| TB.MsearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - msearchTemplate: , TContext = unknown>(params?: T.MsearchTemplateRequest| TB.MsearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> - mtermvectors: (params?: T.MtermvectorsRequest| TB.MtermvectorsRequest, options?: TransportRequestOptions) => Promise> - nodes: { - clearRepositoriesMeteringArchive: (params: T.NodesClearRepositoriesMeteringArchiveRequest| TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions) => Promise> - getRepositoriesMeteringInfo: (params: T.NodesGetRepositoriesMeteringInfoRequest| TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions) => Promise> - hotThreads: (params?: T.NodesHotThreadsRequest| TB.NodesHotThreadsRequest, options?: TransportRequestOptions) => Promise> - info: (params?: T.NodesInfoRequest| TB.NodesInfoRequest, options?: TransportRequestOptions) => Promise> - reloadSecureSettings: (params?: T.NodesReloadSecureSettingsRequest| TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions) => Promise> - stats: (params?: T.NodesStatsRequest| TB.NodesStatsRequest, options?: TransportRequestOptions) => Promise> - usage: (params?: T.NodesUsageRequest| TB.NodesUsageRequest, options?: TransportRequestOptions) => Promise> - } - openPointInTime: (params: T.OpenPointInTimeRequest| TB.OpenPointInTimeRequest, options?: TransportRequestOptions) => Promise> - ping: (params?: T.PingRequest| TB.PingRequest, options?: TransportRequestOptions) => Promise> - putScript: (params: T.PutScriptRequest| TB.PutScriptRequest, options?: TransportRequestOptions) => Promise> - rankEval: (params: T.RankEvalRequest| TB.RankEvalRequest, options?: TransportRequestOptions) => Promise> - reindex: (params?: T.ReindexRequest| TB.ReindexRequest, options?: TransportRequestOptions) => Promise> - reindexRethrottle: (params: T.ReindexRethrottleRequest| TB.ReindexRethrottleRequest, options?: TransportRequestOptions) => Promise> - renderSearchTemplate: (params?: T.RenderSearchTemplateRequest| TB.RenderSearchTemplateRequest, options?: TransportRequestOptions) => Promise> - rollup: { - deleteJob: (params: T.RollupDeleteJobRequest| TB.RollupDeleteJobRequest, options?: TransportRequestOptions) => Promise> - getJobs: (params?: T.RollupGetJobsRequest| TB.RollupGetJobsRequest, options?: TransportRequestOptions) => Promise> - getRollupCaps: (params?: T.RollupGetRollupCapsRequest| TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions) => Promise> - getRollupIndexCaps: (params: T.RollupGetRollupIndexCapsRequest| TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions) => Promise> - putJob: (params: T.RollupPutJobRequest| TB.RollupPutJobRequest, options?: TransportRequestOptions) => Promise> - rollup: (params: T.RollupRollupRequest| TB.RollupRollupRequest, options?: TransportRequestOptions) => Promise> - rollupSearch: , TContext = unknown>(params: T.RollupRollupSearchRequest| TB.RollupRollupSearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - startJob: (params: T.RollupStartJobRequest| TB.RollupStartJobRequest, options?: TransportRequestOptions) => Promise> - stopJob: (params: T.RollupStopJobRequest| TB.RollupStopJobRequest, options?: TransportRequestOptions) => Promise> - } - scriptsPainlessExecute: (params?: T.ScriptsPainlessExecuteRequest| TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions) => Promise, TContext>> - scroll: , TContext = unknown>(params?: T.ScrollRequest| TB.ScrollRequest, options?: TransportRequestOptions) => Promise, TContext>> - search: , TContext = unknown>(params?: T.SearchRequest| TB.SearchRequest, options?: TransportRequestOptions) => Promise, TContext>> - searchMvt: (params: T.SearchMvtRequest| TB.SearchMvtRequest, options?: TransportRequestOptions) => Promise> - searchShards: (params?: T.SearchShardsRequest| TB.SearchShardsRequest, options?: TransportRequestOptions) => Promise> - searchTemplate: (params?: T.SearchTemplateRequest| TB.SearchTemplateRequest, options?: TransportRequestOptions) => Promise, TContext>> - searchableSnapshots: { - cacheStats: (params?: T.SearchableSnapshotsCacheStatsRequest| TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions) => Promise> - clearCache: (params?: T.SearchableSnapshotsClearCacheRequest| TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions) => Promise> - mount: (params: T.SearchableSnapshotsMountRequest| TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions) => Promise> - stats: (params?: T.SearchableSnapshotsStatsRequest| TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions) => Promise> - } - security: { - activateUserProfile: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - authenticate: (params?: T.SecurityAuthenticateRequest| TB.SecurityAuthenticateRequest, options?: TransportRequestOptions) => Promise> - changePassword: (params?: T.SecurityChangePasswordRequest| TB.SecurityChangePasswordRequest, options?: TransportRequestOptions) => Promise> - clearApiKeyCache: (params: T.SecurityClearApiKeyCacheRequest| TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions) => Promise> - clearCachedPrivileges: (params: T.SecurityClearCachedPrivilegesRequest| TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions) => Promise> - clearCachedRealms: (params: T.SecurityClearCachedRealmsRequest| TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions) => Promise> - clearCachedRoles: (params: T.SecurityClearCachedRolesRequest| TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions) => Promise> - clearCachedServiceTokens: (params: T.SecurityClearCachedServiceTokensRequest| TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions) => Promise> - createApiKey: (params?: T.SecurityCreateApiKeyRequest| TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions) => Promise> - createServiceToken: (params: T.SecurityCreateServiceTokenRequest| TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions) => Promise> - deletePrivileges: (params: T.SecurityDeletePrivilegesRequest| TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions) => Promise> - deleteRole: (params: T.SecurityDeleteRoleRequest| TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions) => Promise> - deleteRoleMapping: (params: T.SecurityDeleteRoleMappingRequest| TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions) => Promise> - deleteServiceToken: (params: T.SecurityDeleteServiceTokenRequest| TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions) => Promise> - deleteUser: (params: T.SecurityDeleteUserRequest| TB.SecurityDeleteUserRequest, options?: TransportRequestOptions) => Promise> - disableUser: (params: T.SecurityDisableUserRequest| TB.SecurityDisableUserRequest, options?: TransportRequestOptions) => Promise> - enableUser: (params: T.SecurityEnableUserRequest| TB.SecurityEnableUserRequest, options?: TransportRequestOptions) => Promise> - enrollKibana: (params?: T.SecurityEnrollKibanaRequest| TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions) => Promise> - enrollNode: (params?: T.SecurityEnrollNodeRequest| TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions) => Promise> - getApiKey: (params?: T.SecurityGetApiKeyRequest| TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions) => Promise> - getBuiltinPrivileges: (params?: T.SecurityGetBuiltinPrivilegesRequest| TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions) => Promise> - getPrivileges: (params?: T.SecurityGetPrivilegesRequest| TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions) => Promise> - getRole: (params?: T.SecurityGetRoleRequest| TB.SecurityGetRoleRequest, options?: TransportRequestOptions) => Promise> - getRoleMapping: (params?: T.SecurityGetRoleMappingRequest| TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions) => Promise> - getServiceAccounts: (params?: T.SecurityGetServiceAccountsRequest| TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions) => Promise> - getServiceCredentials: (params: T.SecurityGetServiceCredentialsRequest| TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions) => Promise> - getToken: (params?: T.SecurityGetTokenRequest| TB.SecurityGetTokenRequest, options?: TransportRequestOptions) => Promise> - getUser: (params?: T.SecurityGetUserRequest| TB.SecurityGetUserRequest, options?: TransportRequestOptions) => Promise> - getUserPrivileges: (params?: T.SecurityGetUserPrivilegesRequest| TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions) => Promise> - getUserProfile: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - grantApiKey: (params?: T.SecurityGrantApiKeyRequest| TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions) => Promise> - hasPrivileges: (params?: T.SecurityHasPrivilegesRequest| TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions) => Promise> - invalidateApiKey: (params?: T.SecurityInvalidateApiKeyRequest| TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions) => Promise> - invalidateToken: (params?: T.SecurityInvalidateTokenRequest| TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions) => Promise> - oidcAuthenticate: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - oidcLogout: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - oidcPrepareAuthentication: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - putPrivileges: (params?: T.SecurityPutPrivilegesRequest| TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions) => Promise> - putRole: (params: T.SecurityPutRoleRequest| TB.SecurityPutRoleRequest, options?: TransportRequestOptions) => Promise> - putRoleMapping: (params: T.SecurityPutRoleMappingRequest| TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions) => Promise> - putUser: (params: T.SecurityPutUserRequest| TB.SecurityPutUserRequest, options?: TransportRequestOptions) => Promise> - queryApiKeys: (params?: T.SecurityQueryApiKeysRequest| TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions) => Promise> - samlAuthenticate: (params?: T.SecuritySamlAuthenticateRequest| TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions) => Promise> - samlCompleteLogout: (params?: T.SecuritySamlCompleteLogoutRequest| TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions) => Promise> - samlInvalidate: (params?: T.SecuritySamlInvalidateRequest| TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions) => Promise> - samlLogout: (params?: T.SecuritySamlLogoutRequest| TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions) => Promise> - samlPrepareAuthentication: (params?: T.SecuritySamlPrepareAuthenticationRequest| TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions) => Promise> - samlServiceProviderMetadata: (params: T.SecuritySamlServiceProviderMetadataRequest| TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions) => Promise> - updateUserProfileData: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - } - shutdown: { - deleteNode: (params: T.ShutdownDeleteNodeRequest| TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions) => Promise> - getNode: (params?: T.ShutdownGetNodeRequest| TB.ShutdownGetNodeRequest, options?: TransportRequestOptions) => Promise> - putNode: (params: T.ShutdownPutNodeRequest| TB.ShutdownPutNodeRequest, options?: TransportRequestOptions) => Promise> - } - slm: { - deleteLifecycle: (params: T.SlmDeleteLifecycleRequest| TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions) => Promise> - executeLifecycle: (params: T.SlmExecuteLifecycleRequest| TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions) => Promise> - executeRetention: (params?: T.SlmExecuteRetentionRequest| TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions) => Promise> - getLifecycle: (params?: T.SlmGetLifecycleRequest| TB.SlmGetLifecycleRequest, options?: TransportRequestOptions) => Promise> - getStats: (params?: T.SlmGetStatsRequest| TB.SlmGetStatsRequest, options?: TransportRequestOptions) => Promise> - getStatus: (params?: T.SlmGetStatusRequest| TB.SlmGetStatusRequest, options?: TransportRequestOptions) => Promise> - putLifecycle: (params: T.SlmPutLifecycleRequest| TB.SlmPutLifecycleRequest, options?: TransportRequestOptions) => Promise> - start: (params?: T.SlmStartRequest| TB.SlmStartRequest, options?: TransportRequestOptions) => Promise> - stop: (params?: T.SlmStopRequest| TB.SlmStopRequest, options?: TransportRequestOptions) => Promise> - } - snapshot: { - cleanupRepository: (params: T.SnapshotCleanupRepositoryRequest| TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions) => Promise> - clone: (params: T.SnapshotCloneRequest| TB.SnapshotCloneRequest, options?: TransportRequestOptions) => Promise> - create: (params: T.SnapshotCreateRequest| TB.SnapshotCreateRequest, options?: TransportRequestOptions) => Promise> - createRepository: (params: T.SnapshotCreateRepositoryRequest| TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions) => Promise> - delete: (params: T.SnapshotDeleteRequest| TB.SnapshotDeleteRequest, options?: TransportRequestOptions) => Promise> - deleteRepository: (params: T.SnapshotDeleteRepositoryRequest| TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions) => Promise> - get: (params: T.SnapshotGetRequest| TB.SnapshotGetRequest, options?: TransportRequestOptions) => Promise> - getRepository: (params?: T.SnapshotGetRepositoryRequest| TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions) => Promise> - repositoryAnalyze: (params?: T.TODO, options?: TransportRequestOptions) => Promise> - restore: (params: T.SnapshotRestoreRequest| TB.SnapshotRestoreRequest, options?: TransportRequestOptions) => Promise> - status: (params?: T.SnapshotStatusRequest| TB.SnapshotStatusRequest, options?: TransportRequestOptions) => Promise> - verifyRepository: (params: T.SnapshotVerifyRepositoryRequest| TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions) => Promise> - } - sql: { - clearCursor: (params?: T.SqlClearCursorRequest| TB.SqlClearCursorRequest, options?: TransportRequestOptions) => Promise> - deleteAsync: (params: T.SqlDeleteAsyncRequest| TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions) => Promise> - getAsync: (params: T.SqlGetAsyncRequest| TB.SqlGetAsyncRequest, options?: TransportRequestOptions) => Promise> - getAsyncStatus: (params: T.SqlGetAsyncStatusRequest| TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions) => Promise> - query: (params?: T.SqlQueryRequest| TB.SqlQueryRequest, options?: TransportRequestOptions) => Promise> - translate: (params?: T.SqlTranslateRequest| TB.SqlTranslateRequest, options?: TransportRequestOptions) => Promise> - } - ssl: { - certificates: (params?: T.SslCertificatesRequest| TB.SslCertificatesRequest, options?: TransportRequestOptions) => Promise> - } - tasks: { - cancel: (params?: T.TasksCancelRequest| TB.TasksCancelRequest, options?: TransportRequestOptions) => Promise> - get: (params: T.TasksGetRequest| TB.TasksGetRequest, options?: TransportRequestOptions) => Promise> - list: (params?: T.TasksListRequest| TB.TasksListRequest, options?: TransportRequestOptions) => Promise> - } - termsEnum: (params: T.TermsEnumRequest| TB.TermsEnumRequest, options?: TransportRequestOptions) => Promise> - termvectors: (params: T.TermvectorsRequest| TB.TermvectorsRequest, options?: TransportRequestOptions) => Promise> - textStructure: { - findStructure: (params: T.TextStructureFindStructureRequest| TB.TextStructureFindStructureRequest, options?: TransportRequestOptions) => Promise> - } - transform: { - deleteTransform: (params: T.TransformDeleteTransformRequest| TB.TransformDeleteTransformRequest, options?: TransportRequestOptions) => Promise> - getTransform: (params?: T.TransformGetTransformRequest| TB.TransformGetTransformRequest, options?: TransportRequestOptions) => Promise> - getTransformStats: (params: T.TransformGetTransformStatsRequest| TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions) => Promise> - previewTransform: (params?: T.TransformPreviewTransformRequest| TB.TransformPreviewTransformRequest, options?: TransportRequestOptions) => Promise, TContext>> - putTransform: (params: T.TransformPutTransformRequest| TB.TransformPutTransformRequest, options?: TransportRequestOptions) => Promise> - resetTransform: (params: T.TransformResetTransformRequest| TB.TransformResetTransformRequest, options?: TransportRequestOptions) => Promise> - startTransform: (params: T.TransformStartTransformRequest| TB.TransformStartTransformRequest, options?: TransportRequestOptions) => Promise> - stopTransform: (params: T.TransformStopTransformRequest| TB.TransformStopTransformRequest, options?: TransportRequestOptions) => Promise> - updateTransform: (params: T.TransformUpdateTransformRequest| TB.TransformUpdateTransformRequest, options?: TransportRequestOptions) => Promise> - upgradeTransforms: (params?: T.TransformUpgradeTransformsRequest| TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions) => Promise> - } - update: (params: T.UpdateRequest| TB.UpdateRequest, options?: TransportRequestOptions) => Promise, TContext>> - updateByQuery: (params: T.UpdateByQueryRequest| TB.UpdateByQueryRequest, options?: TransportRequestOptions) => Promise> - updateByQueryRethrottle: (params: T.UpdateByQueryRethrottleRequest| TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions) => Promise> - watcher: { - ackWatch: (params: T.WatcherAckWatchRequest| TB.WatcherAckWatchRequest, options?: TransportRequestOptions) => Promise> - activateWatch: (params: T.WatcherActivateWatchRequest| TB.WatcherActivateWatchRequest, options?: TransportRequestOptions) => Promise> - deactivateWatch: (params: T.WatcherDeactivateWatchRequest| TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions) => Promise> - deleteWatch: (params: T.WatcherDeleteWatchRequest| TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions) => Promise> - executeWatch: (params?: T.WatcherExecuteWatchRequest| TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions) => Promise> - getWatch: (params: T.WatcherGetWatchRequest| TB.WatcherGetWatchRequest, options?: TransportRequestOptions) => Promise> - putWatch: (params: T.WatcherPutWatchRequest| TB.WatcherPutWatchRequest, options?: TransportRequestOptions) => Promise> - queryWatches: (params?: T.WatcherQueryWatchesRequest| TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions) => Promise> - start: (params?: T.WatcherStartRequest| TB.WatcherStartRequest, options?: TransportRequestOptions) => Promise> - stats: (params?: T.WatcherStatsRequest| TB.WatcherStatsRequest, options?: TransportRequestOptions) => Promise> - stop: (params?: T.WatcherStopRequest| TB.WatcherStopRequest, options?: TransportRequestOptions) => Promise> - } - xpack: { - info: (params?: T.XpackInfoRequest| TB.XpackInfoRequest, options?: TransportRequestOptions) => Promise> - usage: (params?: T.XpackUsageRequest| TB.XpackUsageRequest, options?: TransportRequestOptions) => Promise> - } -} - -export type { KibanaClient } diff --git a/src/api/types.ts b/src/api/types.ts index 2156290ce..bb8768138 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -920,7 +920,7 @@ export interface ScrollRequest extends RequestBase { rest_total_hits_as_int?: boolean } -export type ScrollResponse> = SearchResponseBody +export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { index?: Indices @@ -986,14 +986,14 @@ export interface SearchRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields } -export type SearchResponse> = SearchResponseBody +export type SearchResponse> = SearchResponseBody -export interface SearchResponseBody { +export interface SearchResponseBody> { took: long timed_out: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: Record + aggregations?: TAggregations _clusters?: ClusterStatistics fields?: Record max_score?: double diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 87f1ac3d1..c2a1ef00d 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -975,7 +975,7 @@ export interface ScrollRequest extends RequestBase { } } -export type ScrollResponse> = SearchResponseBody +export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { index?: Indices @@ -1058,14 +1058,14 @@ export interface SearchRequest extends RequestBase { } } -export type SearchResponse> = SearchResponseBody +export type SearchResponse> = SearchResponseBody -export interface SearchResponseBody { +export interface SearchResponseBody> { took: long timed_out: boolean _shards: ShardStatistics hits: SearchHitsMetadata - aggregations?: Record + aggregations?: TAggregations _clusters?: ClusterStatistics fields?: Record max_score?: double diff --git a/src/helpers.ts b/src/helpers.ts index e6a8cf84c..a96de9c7f 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -228,6 +228,7 @@ export default class Helpers { rest_total_hits_as_int: params.rest_total_hits_as_int, scroll_id }, options as TransportRequestOptionsWithMeta) + // @ts-expect-error response = r as TransportResult, unknown> assert(response !== undefined, 'The response is undefined, please file a bug report') if (response.statusCode !== 429) break From 33c4630a346e625b3d94fbed36f97b081da710d8 Mon Sep 17 00:00:00 2001 From: Yaniv Davidi Date: Thu, 24 Feb 2022 12:38:48 +0200 Subject: [PATCH 146/647] docs: fix typo (#1628) --- docs/typescript.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/typescript.asciidoc b/docs/typescript.asciidoc index eace26e63..85e61d780 100644 --- a/docs/typescript.asciidoc +++ b/docs/typescript.asciidoc @@ -68,9 +68,9 @@ run().catch(console.log) [discrete] ==== Request & Response types -You can import the full TypeScript requests & responses defintions as it follows: +You can import the full TypeScript requests & responses definitions as it follows: [source,ts] ---- import { estypes } from '@elastic/elasticsearch' ----- \ No newline at end of file +---- From 904c3bb28c1e9f718481481a7086116db742ac93 Mon Sep 17 00:00:00 2001 From: Bo Andersen Date: Thu, 24 Feb 2022 11:39:49 +0100 Subject: [PATCH 147/647] [DOCS] Fix double backticks for inline code snippets (#1540) --- docs/advanced-config.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/advanced-config.asciidoc b/docs/advanced-config.asciidoc index 34eb3d750..1f9d2bc07 100644 --- a/docs/advanced-config.asciidoc +++ b/docs/advanced-config.asciidoc @@ -89,12 +89,12 @@ const client = new Client({ ==== Migrate to v8 The Node.js client can be configured to emit an HTTP header -``Accept: application/vnd.elasticsearch+json; compatible-with=7`` +`Accept: application/vnd.elasticsearch+json; compatible-with=7` which signals to Elasticsearch that the client is requesting -``7.x`` version of request and response bodies. This allows for +`7.x` version of request and response bodies. This allows for upgrading from 7.x to 8.x version of Elasticsearch without upgrading everything at once. Elasticsearch should be upgraded first after the compatibility header is configured and clients should be upgraded second. To enable to setting, configure the environment variable -``ELASTIC_CLIENT_APIVERSIONING`` to ``true``. +`ELASTIC_CLIENT_APIVERSIONING` to `true`. From d7e5ff51914a5705bfef8407ba510e7cd20e39f3 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 24 Feb 2022 11:43:14 +0100 Subject: [PATCH 148/647] Bumped v8.2.0-canary.1 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 838925c00..19b917a5e 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.2.0", - "versionCanary": "8.2.0-canary.0", + "versionCanary": "8.2.0-canary.1", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 25d9afbc0b9b69f6e433eeb34e529fea96f6ba41 Mon Sep 17 00:00:00 2001 From: delvedor Date: Thu, 24 Feb 2022 16:06:02 +0100 Subject: [PATCH 149/647] Remove old jobs --- .ci/jobs/elastic+elasticsearch-js+5.x.yml | 15 --------------- .ci/jobs/elastic+elasticsearch-js+6.x.yml | 15 --------------- .ci/jobs/elastic+elasticsearch-js+7.16.yml | 15 --------------- 3 files changed, 45 deletions(-) delete mode 100644 .ci/jobs/elastic+elasticsearch-js+5.x.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+6.x.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+7.16.yml diff --git a/.ci/jobs/elastic+elasticsearch-js+5.x.yml b/.ci/jobs/elastic+elasticsearch-js+5.x.yml deleted file mode 100644 index a2f6e51f9..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+5.x.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+5.x - display-name: 'elastic / elasticsearch-js # 5.x' - description: Testing the elasticsearch-js 5.x branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/5.x - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: '@weekly' diff --git a/.ci/jobs/elastic+elasticsearch-js+6.x.yml b/.ci/jobs/elastic+elasticsearch-js+6.x.yml deleted file mode 100644 index 014018042..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+6.x.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+6.x - display-name: 'elastic / elasticsearch-js # 6.x' - description: Testing the elasticsearch-js 6.x branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/6.x - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+7.16.yml b/.ci/jobs/elastic+elasticsearch-js+7.16.yml deleted file mode 100644 index c2a4ffe70..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+7.16.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+7.16 - display-name: 'elastic / elasticsearch-js # 7.16' - description: Testing the elasticsearch-js 7.16 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/7.16 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' From 6d2774d2a055f361254b966ff6da39d56340e410 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 28 Feb 2022 09:56:10 -0500 Subject: [PATCH 150/647] [DOCS] Add redirects for authentication and migration guide (#1634) * [DOCS] Add redirects for authentication and migration guide * Update auth refs --- docs/basic-config.asciidoc | 3 +-- docs/connecting.asciidoc | 4 ++-- docs/index.asciidoc | 1 + docs/redirects.asciidoc | 17 +++++++++++++++++ 4 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 docs/redirects.asciidoc diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index 363326f20..536dfead4 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -48,8 +48,7 @@ node: { |`auth` a|Your authentication data. You can use both basic authentication and {ref}/security-api-create-api-key.html[ApiKey]. + -See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] -for more details. + +See <> for more details. + _Default:_ `null` Basic authentication: diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 57510ae12..282e12512 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -6,14 +6,14 @@ This page contains the information you need to connect and use the Client with **On this page** -* <> +* <> * <> * <> * <> * <> * <> -[[auth-reference]] +[[authentication]] [discrete] === Authentication diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 9997b4ab9..f57b2f8d5 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -19,3 +19,4 @@ include::typescript.asciidoc[] include::reference.asciidoc[] include::examples/index.asciidoc[] include::helpers.asciidoc[] +include::redirects.asciidoc[] diff --git a/docs/redirects.asciidoc b/docs/redirects.asciidoc new file mode 100644 index 000000000..f2d0aecbb --- /dev/null +++ b/docs/redirects.asciidoc @@ -0,0 +1,17 @@ +["appendix",role="exclude",id="redirects"] += Deleted pages + +The following pages have moved or been deleted. + +[role="exclude",id="auth-reference"] +== Authentication + +This page has moved. See <>. + +[role="exclude",id="breaking-changes"] +== Breaking changes + +For information about migrating from the legacy elasticsearch.js client to the +new Elasticsearch JavaScript client, refer to the +https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/7.17/breaking-changes.html[7.17 +JavaScript client migration guide]. From e0f54c789ba106fdbffba5a37c8da97f233db1ad Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 3 Mar 2022 16:07:41 +0100 Subject: [PATCH 151/647] Update v8 changelog (#1647) --- docs/changelog.asciidoc | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 5dd160a2c..1e01389b1 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -304,4 +304,23 @@ If you are already passing the basic auth options in the `auth` configuration, t [discrete] ===== Calling `client.close` will reject new requests -Once you call `client.close` every new request after that will be rejected with a `NoLivingConnectionsError`. In-flight requests will be executed normally unless an in-flight request requires a retry, in which case it will be rejected. \ No newline at end of file +Once you call `client.close` every new request after that will be rejected with a `NoLivingConnectionsError`. In-flight requests will be executed normally unless an in-flight request requires a retry, in which case it will be rejected. + +[discrete] +===== Parameters rename + +- `ilm.delete_lifecycle`: `policy` parameter has been renamed to `name` +- `ilm.get_lifecycle`: `policy` parameter has been renamed to `name` +- `ilm.put_lifecycle`: `policy` parameter has been renamed to `name` +- `snapshot.cleanup_repository`: `repository` parameter has been renamed to `name` +- `snapshot.create_repository`: `repository` parameter has been renamed to `name` +- `snapshot.delete_repository`: `repository` parameter has been renamed to `name` +- `snapshot.get_repository`: `repository` parameter has been renamed to `name` +- `snapshot.verify_repository`: `repository` parameter has been renamed to `name` + +[discrete] +===== Removal of snake_cased methods + +The v7 client provided snake_cased methods, such as `client.delete_by_query`. This is no longer supported, now only camelCased method are present. +So `client.delete_by_query` can be accessed with `client.deleteByQuery` + From 46b2c99b7c5c0ea16c6ee7f39bf3576d9c56adf9 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 3 Mar 2022 16:08:45 +0100 Subject: [PATCH 152/647] Update API reference for v8 (#1646) --- docs/reference.asciidoc | 12420 +++++++------------------------------- 1 file changed, 2105 insertions(+), 10315 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 7bbdfbfb7..4c82b6382 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -1,9 +1,5 @@ [[api-reference]] - //////// - - - =========================================================================================================================== || || || || @@ -11,11797 +7,3591 @@ || ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || || ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || || ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || -|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || -|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || -|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || -|| || -|| || -|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || -|| You should update the script that does the generation, which can be found in '/scripts/utils/generateDocs.js'. || -|| || -|| You can run the script with the following command: || -|| node scripts/generate --branch || -|| or || -|| node scripts/generate --tag || -|| || -|| || -|| || -=========================================================================================================================== - - - -//////// - -== API Reference - -This document contains the entire list of the Elasticsearch API supported by the client, both OSS and commercial. The client is entirely licensed under Apache 2.0. - -Elasticsearch exposes an HTTP layer to communicate with, and the client is a library that will help you do this. Because of this reason, you will see HTTP related parameters, such as `body` or `headers`. - -Every API can accept two objects, the first contains all the parameters that will be sent to Elasticsearch, while the second includes the request specific parameters, such as timeouts, headers, and so on. -In the first object, every parameter but the body will be sent via querystring or url parameter, depending on the API, and every unrecognized parameter will be sent as querystring. - -[source,js] ----- -// promise API -const result = await client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}) - -// callback API -client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}, (err, result) => { - if (err) console.log(err) -}) ----- - -In this document, you will find the reference of every parameter accepted by the querystring or the url. If you also need to send the body, you can find the documentation of its format in the reference link that is present along with every endpoint. - - - -[discrete] -=== Common parameters -Parameters that are accepted by all API endpoints. - -link:{ref}/common-options.html[Documentation] -[cols=2*] -|=== -|`pretty` -|`boolean` - Pretty format the returned JSON response. - -|`human` -|`boolean` - Return human readable values for statistics. + - _Default:_ `true` - -|`error_trace` or `errorTrace` -|`boolean` - Include the stack trace of returned errors. - -|`source` -|`string` - The URL-encoded request definition. Useful for libraries that do not accept a request body for non-POST requests. - -|`filter_path` or `filterPath` -|`list` - A comma-separated list of filters used to reduce the response. - -|=== -[discrete] -=== asyncSearch.delete - -[source,ts] ----- -client.asyncSearch.delete({ - id: string -}) ----- -link:{ref}/async-search.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|=== - -[discrete] -=== asyncSearch.get - -[source,ts] ----- -client.asyncSearch.get({ - id: string, - wait_for_completion_timeout: string, - keep_alive: string, - typed_keys: boolean -}) ----- -link:{ref}/async-search.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|`wait_for_completion_timeout` or `waitForCompletionTimeout` -|`string` - Specify the time that the request should block waiting for the final response - -|`keep_alive` or `keepAlive` -|`string` - Specify the time interval in which the results (partial or final) for this search will be available - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|=== - -[discrete] -=== asyncSearch.status - -[source,ts] ----- -client.asyncSearch.status({ - id: string -}) ----- -link:{ref}/async-search.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|=== - -[discrete] -=== asyncSearch.submit - -[source,ts] ----- -client.asyncSearch.submit({ - index: string | string[], - wait_for_completion_timeout: string, - keep_on_completion: boolean, - keep_alive: string, - batched_reduce_size: number, - request_cache: boolean, - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - explain: boolean, - stored_fields: string | string[], - docvalue_fields: string | string[], - from: number, - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - preference: string, - q: string, - routing: string | string[], - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - size: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - suggest_field: string, - suggest_mode: 'missing' | 'popular' | 'always', - suggest_size: number, - suggest_text: string, - timeout: string, - track_scores: boolean, - track_total_hits: boolean|long, - allow_partial_search_results: boolean, - typed_keys: boolean, - version: boolean, - seq_no_primary_term: boolean, - max_concurrent_shard_requests: number, - body: object -}) ----- -link:{ref}/async-search.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`wait_for_completion_timeout` or `waitForCompletionTimeout` -|`string` - Specify the time that the request should block waiting for the final response + -_Default:_ `1s` - -|`keep_on_completion` or `keepOnCompletion` -|`boolean` - Control whether the response should be stored in the cluster if it completed within the provided [wait_for_completion] time (default: false) - -|`keep_alive` or `keepAlive` -|`string` - Update the time interval in which the results (partial or final) for this search will be available + -_Default:_ `5d` - -|`batched_reduce_size` or `batchedReduceSize` -|`number` - The number of shard results that should be reduced at once on the coordinating node. This value should be used as the granularity at which progress results will be made available. + -_Default:_ `5` - -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to true - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`explain` -|`boolean` - Specify whether to return detailed information about score computation as part of a hit - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return as part of a hit - -|`docvalue_fields` or `docvalueFields` -|`string \| string[]` - A comma-separated list of fields to return as the docvalue representation of a field for each hit - -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`size` -|`number` - Number of hits to return (default: 10) - -|`sort` -|`string \| string[]` - A comma-separated list of : pairs - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes - -|`suggest_field` or `suggestField` -|`string` - Specify which field to use for suggestions - -|`suggest_mode` or `suggestMode` -|`'missing' \| 'popular' \| 'always'` - Specify suggest mode + -_Default:_ `missing` - -|`suggest_size` or `suggestSize` -|`number` - How many suggestions to return in response - -|`suggest_text` or `suggestText` -|`string` - The source text for which the suggestions should be returned - -|`timeout` -|`string` - Explicit operation timeout - -|`track_scores` or `trackScores` -|`boolean` - Whether to calculate and return scores even if they are not used for sorting - -|`track_total_hits` or `trackTotalHits` -|`boolean\|long` - Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. - -|`allow_partial_search_results` or `allowPartialSearchResults` -|`boolean` - Indicate if an error should be returned if there is a partial search failure or timeout + -_Default:_ `true` - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`version` -|`boolean` - Specify whether to return document version as part of a hit - -|`seq_no_primary_term` or `seqNoPrimaryTerm` -|`boolean` - Specify whether to return sequence number and primary term of the last modification of each hit - -|`max_concurrent_shard_requests` or `maxConcurrentShardRequests` -|`number` - The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests + -_Default:_ `5` - -|`body` -|`object` - The search definition using the Query DSL - -|=== - -[discrete] -=== autoscaling.deleteAutoscalingPolicy - -[source,ts] ----- -client.autoscaling.deleteAutoscalingPolicy({ - name: string -}) ----- -link:{ref}/autoscaling-delete-autoscaling-policy.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - the name of the autoscaling policy - -|=== - -[discrete] -=== autoscaling.getAutoscalingCapacity - -[source,ts] ----- -client.autoscaling.getAutoscalingCapacity() ----- -link:{ref}/autoscaling-get-autoscaling-capacity.html[Documentation] + - - -[discrete] -=== autoscaling.getAutoscalingPolicy - -[source,ts] ----- -client.autoscaling.getAutoscalingPolicy({ - name: string -}) ----- -link:{ref}/autoscaling-get-autoscaling-policy.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - the name of the autoscaling policy - -|=== - -[discrete] -=== autoscaling.putAutoscalingPolicy - -[source,ts] ----- -client.autoscaling.putAutoscalingPolicy({ - name: string, - body: object -}) ----- -link:{ref}/autoscaling-put-autoscaling-policy.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - the name of the autoscaling policy - -|`body` -|`object` - the specification of the autoscaling policy - -|=== - -[discrete] -=== bulk - -[source,ts] ----- -client.bulk({ - index: string, - type: string, - wait_for_active_shards: string, - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - pipeline: string, - require_alias: boolean, - body: object -}) ----- -link:{ref}/docs-bulk.html[Documentation] + -{jsclient}/bulk_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string` - Default index for items which don't provide one - -|`type` -|`string` - Default document type for items which don't provide one - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - Default list of fields to exclude from the returned _source field, can be overridden on each sub-request - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - Default list of fields to extract and return from the _source field, can be overridden on each sub-request - -|`pipeline` -|`string` - The pipeline id to preprocess incoming documents with - -|`require_alias` or `requireAlias` -|`boolean` - Sets require_alias for all incoming documents. Defaults to unset (false) - -|`body` -|`object` - The operation definition and data (action-data pairs), separated by newlines - -|=== - -[discrete] -=== cat.aliases - -[source,ts] ----- -client.cat.aliases({ - name: string | string[], - format: string, - local: boolean, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/cat-alias.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of alias names to return - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|=== - -[discrete] -=== cat.allocation - -[source,ts] ----- -client.cat.allocation({ - node_id: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-allocation.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.count - -[source,ts] ----- -client.cat.count({ - index: string | string[], - format: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-count.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.fielddata - -[source,ts] ----- -client.cat.fielddata({ - fields: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-fielddata.html[Documentation] + -[cols=2*] -|=== -|`fields` -|`string \| string[]` - A comma-separated list of fields to return the fielddata size - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.health - -[source,ts] ----- -client.cat.health({ - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - ts: boolean, - v: boolean -}) ----- -link:{ref}/cat-health.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`ts` -|`boolean` - Set to false to disable timestamping + -_Default:_ `true` - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.help - -[source,ts] ----- -client.cat.help({ - help: boolean, - s: string | string[] -}) ----- -link:{ref}/cat.html[Documentation] + -[cols=2*] -|=== -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|=== - -[discrete] -=== cat.indices - -[source,ts] ----- -client.cat.indices({ - index: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - master_timeout: string, - h: string | string[], - health: 'green' | 'yellow' | 'red', - help: boolean, - pri: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean, - include_unloaded_segments: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/cat-indices.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`health` -|`'green' \| 'yellow' \| 'red'` - A health status ("green", "yellow", or "red" to filter only indices matching the specified health status - -|`help` -|`boolean` - Return help information - -|`pri` -|`boolean` - Set to true to return stats only for primary shards - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|`include_unloaded_segments` or `includeUnloadedSegments` -|`boolean` - If set to true segment stats will include stats for segments that are not currently loaded into memory - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|=== - -[discrete] -=== cat.master - -[source,ts] ----- -client.cat.master({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-master.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.mlDataFrameAnalytics - -[source,ts] ----- -client.cat.mlDataFrameAnalytics({ - id: string, - allow_no_match: boolean, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.mlDatafeeds - -[source,ts] ----- -client.cat.mlDatafeeds({ - datafeed_id: string, - allow_no_match: boolean, - allow_no_datafeeds: boolean, - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-datafeeds.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeeds stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) - -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) + - -WARNING: This parameter has been deprecated. - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.mlJobs - -[source,ts] ----- -client.cat.mlJobs({ - job_id: string, - allow_no_match: boolean, - allow_no_jobs: boolean, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-anomaly-detectors.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the jobs stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) + - -WARNING: This parameter has been deprecated. - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.mlTrainedModels - -[source,ts] ----- -client.cat.mlTrainedModels({ - model_id: string, - allow_no_match: boolean, - from: number, - size: number, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-trained-model.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified) + -_Default:_ `true` - -|`from` -|`number` - skips a number of trained models - -|`size` -|`number` - specifies a max number of trained models to get + -_Default:_ `100` - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.nodeattrs - -[source,ts] ----- -client.cat.nodeattrs({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-nodeattrs.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.nodes - -[source,ts] ----- -client.cat.nodes({ - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - full_id: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean, - include_unloaded_segments: boolean -}) ----- -link:{ref}/cat-nodes.html[Documentation] + -[cols=2*] -|=== -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`full_id` or `fullId` -|`boolean` - Return the full node ID instead of the shortened version (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|`include_unloaded_segments` or `includeUnloadedSegments` -|`boolean` - If set to true segment stats will include stats for segments that are not currently loaded into memory - -|=== - -[discrete] -=== cat.pendingTasks - -[source,ts] ----- -client.cat.pendingTasks({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-pending-tasks.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.plugins - -[source,ts] ----- -client.cat.plugins({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - include_bootstrap: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-plugins.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`include_bootstrap` or `includeBootstrap` -|`boolean` - Include bootstrap plugins in the response - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.recovery - -[source,ts] ----- -client.cat.recovery({ - index: string | string[], - format: string, - active_only: boolean, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - detailed: boolean, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-recovery.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - Comma-separated list or wildcard expression of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`active_only` or `activeOnly` -|`boolean` - If `true`, the response only includes ongoing shard recoveries - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`detailed` -|`boolean` - If `true`, the response includes detailed information about shard recoveries - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.repositories - -[source,ts] ----- -client.cat.repositories({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-repositories.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.segments - -[source,ts] ----- -client.cat.segments({ - index: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-segments.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.shards - -[source,ts] ----- -client.cat.shards({ - index: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-shards.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.snapshots - -[source,ts] ----- -client.cat.snapshots({ - repository: string | string[], - format: string, - ignore_unavailable: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string \| string[]` - Name of repository from which to fetch the snapshot information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Set to true to ignore unavailable snapshots - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.tasks -*Stability:* experimental -[source,ts] ----- -client.cat.tasks({ - format: string, - nodes: string | string[], - actions: string | string[], - detailed: boolean, - parent_task_id: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`nodes` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`actions` -|`string \| string[]` - A comma-separated list of actions that should be returned. Leave empty to return all. - -|`detailed` -|`boolean` - Return detailed task information (default: false) - -|`parent_task_id` or `parentTaskId` -|`string` - Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all. - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.templates - -[source,ts] ----- -client.cat.templates({ - name: string, - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - A pattern that returned template names must match - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.threadPool - -[source,ts] ----- -client.cat.threadPool({ - thread_pool_patterns: string | string[], - format: string, - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-thread-pool.html[Documentation] + -[cols=2*] -|=== -|`thread_pool_patterns` or `threadPoolPatterns` -|`string \| string[]` - A comma-separated list of regular-expressions to filter the thread pools in the output - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== cat.transforms - -[source,ts] ----- -client.cat.transforms({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean, - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-transforms.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform for which to get stats. '_all' or '*' implies all transforms - -|`from` -|`number` - skips a number of transform configs, defaults to 0 - -|`size` -|`number` - specifies a max number of transforms to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -[discrete] -=== ccr.deleteAutoFollowPattern - -[source,ts] ----- -client.ccr.deleteAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-delete-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern. - -|=== - -[discrete] -=== ccr.follow - -[source,ts] ----- -client.ccr.follow({ - index: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/ccr-put-follow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follower index - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) + -_Default:_ `0` - -|`body` -|`object` - The name of the leader index and other optional ccr related parameters - -|=== - -[discrete] -=== ccr.followInfo - -[source,ts] ----- -client.ccr.followInfo({ - index: string | string[] -}) ----- -link:{ref}/ccr-get-follow-info.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index patterns; use `_all` to perform the operation on all indices - -|=== - -[discrete] -=== ccr.followStats - -[source,ts] ----- -client.ccr.followStats({ - index: string | string[] -}) ----- -link:{ref}/ccr-get-follow-stats.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index patterns; use `_all` to perform the operation on all indices - -|=== - -[discrete] -=== ccr.forgetFollower - -[source,ts] ----- -client.ccr.forgetFollower({ - index: string, - body: object -}) ----- -link:{ref}/ccr-post-forget-follower.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - the name of the leader index for which specified follower retention leases should be removed - -|`body` -|`object` - the name and UUID of the follower index, the name of the cluster containing the follower index, and the alias from the perspective of that cluster for the remote cluster containing the leader index - -|=== - -[discrete] -=== ccr.getAutoFollowPattern - -[source,ts] ----- -client.ccr.getAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-get-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern. - -|=== - -[discrete] -=== ccr.pauseAutoFollowPattern - -[source,ts] ----- -client.ccr.pauseAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-pause-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern that should pause discovering new indices to follow. - -|=== - -[discrete] -=== ccr.pauseFollow - -[source,ts] ----- -client.ccr.pauseFollow({ - index: string -}) ----- -link:{ref}/ccr-post-pause-follow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follower index that should pause following its leader index. - -|=== - -[discrete] -=== ccr.putAutoFollowPattern - -[source,ts] ----- -client.ccr.putAutoFollowPattern({ - name: string, - body: object -}) ----- -link:{ref}/ccr-put-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern. - -|`body` -|`object` - The specification of the auto follow pattern - -|=== - -[discrete] -=== ccr.resumeAutoFollowPattern - -[source,ts] ----- -client.ccr.resumeAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-resume-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern to resume discovering new indices to follow. - -|=== - -[discrete] -=== ccr.resumeFollow - -[source,ts] ----- -client.ccr.resumeFollow({ - index: string, - body: object -}) ----- -link:{ref}/ccr-post-resume-follow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follow index to resume following. - -|`body` -|`object` - The name of the leader index and other optional ccr related parameters - -|=== - -[discrete] -=== ccr.stats - -[source,ts] ----- -client.ccr.stats() ----- -link:{ref}/ccr-get-stats.html[Documentation] + - - -[discrete] -=== ccr.unfollow - -[source,ts] ----- -client.ccr.unfollow({ - index: string -}) ----- -link:{ref}/ccr-post-unfollow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follower index that should be turned into a regular index. - -|=== - -[discrete] -=== clearScroll - -[source,ts] ----- -client.clearScroll({ - scroll_id: string | string[], - body: object -}) ----- -link:{ref}/clear-scroll-api.html[Documentation] + -[cols=2*] -|=== -|`scroll_id` or `scrollId` -|`string \| string[]` - A comma-separated list of scroll IDs to clear + - -WARNING: This parameter has been deprecated. - -|`body` -|`object` - A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter - -|=== - -[discrete] -=== closePointInTime - -[source,ts] ----- -client.closePointInTime({ - body: object -}) ----- -link:{ref}/point-in-time-api.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - a point-in-time id to close - -|=== - -[discrete] -=== cluster.allocationExplain - -[source,ts] ----- -client.cluster.allocationExplain({ - include_yes_decisions: boolean, - include_disk_info: boolean, - body: object -}) ----- -link:{ref}/cluster-allocation-explain.html[Documentation] + -[cols=2*] -|=== -|`include_yes_decisions` or `includeYesDecisions` -|`boolean` - Return 'YES' decisions in explanation (default: false) - -|`include_disk_info` or `includeDiskInfo` -|`boolean` - Return information about disk usage and shard sizes (default: false) - -|`body` -|`object` - The index, shard, and primary flag to explain. Empty means 'explain a randomly-chosen unassigned shard' - -|=== - -[discrete] -=== cluster.deleteComponentTemplate - -[source,ts] ----- -client.cluster.deleteComponentTemplate({ - name: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== cluster.deleteVotingConfigExclusions - -[source,ts] ----- -client.cluster.deleteVotingConfigExclusions({ - wait_for_removal: boolean -}) ----- -link:{ref}/voting-config-exclusions.html[Documentation] + -[cols=2*] -|=== -|`wait_for_removal` or `waitForRemoval` -|`boolean` - Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. + -_Default:_ `true` - -|=== - -[discrete] -=== cluster.existsComponentTemplate - -[source,ts] ----- -client.cluster.existsComponentTemplate({ - name: string, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== cluster.getComponentTemplate - -[source,ts] ----- -client.cluster.getComponentTemplate({ - name: string | string[], - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the component templates - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== cluster.getSettings - -[source,ts] ----- -client.cluster.getSettings({ - flat_settings: boolean, - master_timeout: string, - timeout: string, - include_defaults: boolean -}) ----- -link:{ref}/cluster-get-settings.html[Documentation] + -[cols=2*] -|=== -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default clusters setting. - -|=== - -[discrete] -=== cluster.health - -[source,ts] ----- -client.cluster.health({ - index: string | string[], - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - level: 'cluster' | 'indices' | 'shards', - local: boolean, - master_timeout: string, - timeout: string, - wait_for_active_shards: string, - wait_for_nodes: string, - wait_for_events: 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid', - wait_for_no_relocating_shards: boolean, - wait_for_no_initializing_shards: boolean, - wait_for_status: 'green' | 'yellow' | 'red' -}) ----- -link:{ref}/cluster-health.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - Limit the information returned to a specific index - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`level` -|`'cluster' \| 'indices' \| 'shards'` - Specify the level of detail for returned information + -_Default:_ `cluster` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Wait until the specified number of shards is active - -|`wait_for_nodes` or `waitForNodes` -|`string` - Wait until the specified number of nodes is available - -|`wait_for_events` or `waitForEvents` -|`'immediate' \| 'urgent' \| 'high' \| 'normal' \| 'low' \| 'languid'` - Wait until all currently queued events with the given priority are processed - -|`wait_for_no_relocating_shards` or `waitForNoRelocatingShards` -|`boolean` - Whether to wait until there are no relocating shards in the cluster - -|`wait_for_no_initializing_shards` or `waitForNoInitializingShards` -|`boolean` - Whether to wait until there are no initializing shards in the cluster - -|`wait_for_status` or `waitForStatus` -|`'green' \| 'yellow' \| 'red'` - Wait until cluster is in a specific state - -|=== - -[discrete] -=== cluster.pendingTasks - -[source,ts] ----- -client.cluster.pendingTasks({ - local: boolean, - master_timeout: string -}) ----- -link:{ref}/cluster-pending.html[Documentation] + -[cols=2*] -|=== -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== cluster.postVotingConfigExclusions - -[source,ts] ----- -client.cluster.postVotingConfigExclusions({ - node_ids: string, - node_names: string, - timeout: string -}) ----- -link:{ref}/voting-config-exclusions.html[Documentation] + -[cols=2*] -|=== -|`node_ids` or `nodeIds` -|`string` - A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify ?node_names. - -|`node_names` or `nodeNames` -|`string` - A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify ?node_ids. - -|`timeout` -|`string` - Explicit operation timeout + -_Default:_ `30s` - -|=== - -[discrete] -=== cluster.putComponentTemplate - -[source,ts] ----- -client.cluster.putComponentTemplate({ - name: string, - create: boolean, - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`create` -|`boolean` - Whether the index template should only be added if new or can also replace an existing one - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The template definition - -|=== - -[discrete] -=== cluster.putSettings - -[source,ts] ----- -client.cluster.putSettings({ - flat_settings: boolean, - master_timeout: string, - timeout: string, - body: object -}) ----- -link:{ref}/cluster-update-settings.html[Documentation] + -[cols=2*] -|=== -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart). - -|=== - -[discrete] -=== cluster.remoteInfo - -[source,ts] ----- -client.cluster.remoteInfo() ----- -link:{ref}/cluster-remote-info.html[Documentation] + - - -[discrete] -=== cluster.reroute - -[source,ts] ----- -client.cluster.reroute({ - dry_run: boolean, - explain: boolean, - retry_failed: boolean, - metric: string | string[], - master_timeout: string, - timeout: string, - body: object -}) ----- -link:{ref}/cluster-reroute.html[Documentation] + -[cols=2*] -|=== -|`dry_run` or `dryRun` -|`boolean` - Simulate the operation only and return the resulting state - -|`explain` -|`boolean` - Return an explanation of why the commands can or cannot be executed - -|`retry_failed` or `retryFailed` -|`boolean` - Retries allocation of shards that are blocked due to too many subsequent allocation failures - -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics. Defaults to all but metadata - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - The definition of `commands` to perform (`move`, `cancel`, `allocate`) - -|=== - -[discrete] -=== cluster.state - -[source,ts] ----- -client.cluster.state({ - index: string | string[], - metric: string | string[], - local: boolean, - master_timeout: string, - flat_settings: boolean, - wait_for_metadata_version: number, - wait_for_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/cluster-state.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`wait_for_metadata_version` or `waitForMetadataVersion` -|`number` - Wait for the metadata version to be equal or greater than the specified metadata version - -|`wait_for_timeout` or `waitForTimeout` -|`string` - The maximum time to wait for wait_for_metadata_version before timing out - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -[discrete] -=== cluster.stats - -[source,ts] ----- -client.cluster.stats({ - node_id: string | string[], - flat_settings: boolean, - timeout: string -}) ----- -link:{ref}/cluster-stats.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -[discrete] -=== count - -[source,ts] ----- -client.count({ - index: string | string[], - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - min_score: number, - preference: string, - routing: string | string[], - q: string, - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - lenient: boolean, - terminate_after: number, - body: object -}) ----- -link:{ref}/search-count.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of indices to restrict the results - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`min_score` or `minScore` -|`number` - Include only documents with a specific `_score` value in the result - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`q` -|`string` - Query in the Lucene query string syntax - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`terminate_after` or `terminateAfter` -|`number` - The maximum count for each shard, upon reaching which the query execution will terminate early - -|`body` -|`object` - A query to restrict the results specified with the Query DSL (optional) - -|=== - -[discrete] -=== create - -[source,ts] ----- -client.create({ - id: string, - index: string, - type: string, - wait_for_active_shards: string, - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - pipeline: string, - body: object -}) ----- -link:{ref}/docs-index_.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document + - -WARNING: This parameter has been deprecated. - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|`pipeline` -|`string` - The pipeline id to preprocess incoming documents with - -|`body` -|`object` - The document - -|=== - -[discrete] -=== danglingIndices.deleteDanglingIndex - -[source,ts] ----- -client.danglingIndices.deleteDanglingIndex({ - index_uuid: string, - accept_data_loss: boolean, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/modules-gateway-dangling-indices.html[Documentation] + -[cols=2*] -|=== -|`index_uuid` or `indexUuid` -|`string` - The UUID of the dangling index - -|`accept_data_loss` or `acceptDataLoss` -|`boolean` - Must be set to true in order to delete the dangling index - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== danglingIndices.importDanglingIndex - -[source,ts] ----- -client.danglingIndices.importDanglingIndex({ - index_uuid: string, - accept_data_loss: boolean, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/modules-gateway-dangling-indices.html[Documentation] + -[cols=2*] -|=== -|`index_uuid` or `indexUuid` -|`string` - The UUID of the dangling index - -|`accept_data_loss` or `acceptDataLoss` -|`boolean` - Must be set to true in order to import the dangling index - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== danglingIndices.listDanglingIndices - -[source,ts] ----- -client.danglingIndices.listDanglingIndices() ----- -link:{ref}/modules-gateway-dangling-indices.html[Documentation] + - - -[discrete] -=== delete - -[source,ts] ----- -client.delete({ - id: string, - index: string, - type: string, - wait_for_active_shards: string, - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - if_seq_no: number, - if_primary_term: number, - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-delete.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document + - -WARNING: This parameter has been deprecated. - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`if_seq_no` or `ifSeqNo` -|`number` - only perform the delete operation if the last operation that has changed the document has the specified sequence number - -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only perform the delete operation if the last operation that has changed the document has the specified primary term - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -[discrete] -=== deleteByQuery - -[source,ts] ----- -client.deleteByQuery({ - index: string | string[], - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - from: number, - ignore_unavailable: boolean, - allow_no_indices: boolean, - conflicts: 'abort' | 'proceed', - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - preference: string, - q: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - search_timeout: string, - max_docs: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - version: boolean, - request_cache: boolean, - refresh: boolean, - timeout: string, - wait_for_active_shards: string, - scroll_size: number, - wait_for_completion: boolean, - requests_per_second: number, - slices: number|string, - body: object -}) ----- -link:{ref}/docs-delete-by-query.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`conflicts` -|`'abort' \| 'proceed'` - What to do when the delete by query hits version conflicts? + -_Default:_ `abort` - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`search_timeout` or `searchTimeout` -|`string` - Explicit timeout for each search request. Defaults to no timeout. - -|`max_docs` or `maxDocs` -|`number` - Maximum number of documents to process (default: all documents) - -|`sort` -|`string \| string[]` - A comma-separated list of : pairs - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes - -|`version` -|`boolean` - Specify whether to return document version as part of a hit - -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to index level setting - -|`refresh` -|`boolean` - Should the affected indexes be refreshed? - -|`timeout` -|`string` - Time each individual bulk request should wait for shards that are unavailable. + -_Default:_ `1m` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`scroll_size` or `scrollSize` -|`number` - Size on the scroll request powering the delete by query + -_Default:_ `100` - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the delete by query is complete. + -_Default:_ `true` - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle for this request in sub-requests per second. -1 means no throttle. - -|`slices` -|`number\|string` - The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. + -_Default:_ `1` - -|`body` -|`object` - The search definition using the Query DSL - -|=== - -[discrete] -=== deleteByQueryRethrottle - -[source,ts] ----- -client.deleteByQueryRethrottle({ - task_id: string, - requests_per_second: number -}) ----- -link:{ref}/docs-delete-by-query.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - The task id to rethrottle - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. - -|=== - -[discrete] -=== deleteScript - -[source,ts] ----- -client.deleteScript({ - id: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/modules-scripting.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Script ID - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== enrich.deletePolicy - -[source,ts] ----- -client.enrich.deletePolicy({ - name: string -}) ----- -link:{ref}/delete-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the enrich policy - -|=== - -[discrete] -=== enrich.executePolicy - -[source,ts] ----- -client.enrich.executePolicy({ - name: string, - wait_for_completion: boolean -}) ----- -link:{ref}/execute-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the enrich policy - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the execution is complete. + -_Default:_ `true` - -|=== - -[discrete] -=== enrich.getPolicy - -[source,ts] ----- -client.enrich.getPolicy({ - name: string | string[] -}) ----- -link:{ref}/get-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of enrich policy names - -|=== - -[discrete] -=== enrich.putPolicy - -[source,ts] ----- -client.enrich.putPolicy({ - name: string, - body: object -}) ----- -link:{ref}/put-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the enrich policy - -|`body` -|`object` - The enrich policy to register - -|=== - -[discrete] -=== enrich.stats - -[source,ts] ----- -client.enrich.stats() ----- -link:{ref}/enrich-stats-api.html[Documentation] + - - -[discrete] -=== eql.delete - -[source,ts] ----- -client.eql.delete({ - id: string -}) ----- -link:{ref}/eql-search-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|=== - -[discrete] -=== eql.get - -[source,ts] ----- -client.eql.get({ - id: string, - wait_for_completion_timeout: string, - keep_alive: string -}) ----- -link:{ref}/eql-search-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|`wait_for_completion_timeout` or `waitForCompletionTimeout` -|`string` - Specify the time that the request should block waiting for the final response - -|`keep_alive` or `keepAlive` -|`string` - Update the time interval in which the results (partial or final) for this search will be available + -_Default:_ `5d` - -|=== - -[discrete] -=== eql.getStatus - -[source,ts] ----- -client.eql.getStatus({ - id: string -}) ----- -link:{ref}/eql-search-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|=== - -[discrete] -=== eql.search - -[source,ts] ----- -client.eql.search({ - index: string, - wait_for_completion_timeout: string, - keep_on_completion: boolean, - keep_alive: string, - body: object -}) ----- -link:{ref}/eql-search-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to scope the operation - -|`wait_for_completion_timeout` or `waitForCompletionTimeout` -|`string` - Specify the time that the request should block waiting for the final response - -|`keep_on_completion` or `keepOnCompletion` -|`boolean` - Control whether the response should be stored in the cluster if it completed within the provided [wait_for_completion] time (default: false) - -|`keep_alive` or `keepAlive` -|`string` - Update the time interval in which the results (partial or final) for this search will be available + -_Default:_ `5d` - -|`body` -|`object` - Eql request body. Use the `query` to limit the query scope. - -|=== - -[discrete] -=== exists - -[source,ts] ----- -client.exists({ - id: string, - index: string, - stored_fields: string | string[], - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -{jsclient}/exists_examples.html[Code Example] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -[discrete] -=== existsSource - -[source,ts] ----- -client.existsSource({ - id: string, - index: string, - type: string, - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document; deprecated and optional starting with 7.0 + - -WARNING: This parameter has been deprecated. - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -[discrete] -=== explain - -[source,ts] ----- -client.explain({ - id: string, - index: string, - analyze_wildcard: boolean, - analyzer: string, - default_operator: 'AND' | 'OR', - df: string, - stored_fields: string | string[], - lenient: boolean, - preference: string, - q: string, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - body: object -}) ----- -link:{ref}/search-explain.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false) - -|`analyzer` -|`string` - The analyzer for the query string query - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The default field for query string query (default: _all) - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`body` -|`object` - The query definition using the Query DSL - -|=== - -[discrete] -=== features.getFeatures - -[source,ts] ----- -client.features.getFeatures({ - master_timeout: string -}) ----- -link:{ref}/get-features-api.html[Documentation] + -[cols=2*] -|=== -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|=== - -[discrete] -=== features.resetFeatures -*Stability:* experimental -[source,ts] ----- -client.features.resetFeatures() ----- -link:{ref}/modules-snapshots.html[Documentation] + - - -[discrete] -=== fieldCaps - -[source,ts] ----- -client.fieldCaps({ - index: string | string[], - fields: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - include_unmapped: boolean, - body: object -}) ----- -link:{ref}/search-field-caps.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`fields` -|`string \| string[]` - A comma-separated list of field names - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`include_unmapped` or `includeUnmapped` -|`boolean` - Indicates whether unmapped fields should be included in the response. - -|`body` -|`object` - An index filter specified with the Query DSL - -|=== - -[discrete] -=== fleet.globalCheckpoints -*Stability:* experimental -[source,ts] ----- -client.fleet.globalCheckpoints({ - index: string, - wait_for_advance: boolean, - wait_for_index: boolean, - checkpoints: string | string[], - timeout: string -}) ----- -[cols=2*] -|=== -|`index` -|`string` - The name of the index. - -|`wait_for_advance` or `waitForAdvance` -|`boolean` - Whether to wait for the global checkpoint to advance past the specified current checkpoints + -_Default:_ `false` - -|`wait_for_index` or `waitForIndex` -|`boolean` - Whether to wait for the target index to exist and all primary shards be active + -_Default:_ `false` - -|`checkpoints` -|`string \| string[]` - Comma separated list of checkpoints - -|`timeout` -|`string` - Timeout to wait for global checkpoint to advance + -_Default:_ `30s` - -|=== - -[discrete] -=== get - -[source,ts] ----- -client.get({ - id: string, - index: string, - stored_fields: string | string[], - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -{jsclient}/get_examples.html[Code Example] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -[discrete] -=== getScript - -[source,ts] ----- -client.getScript({ - id: string, - master_timeout: string -}) ----- -link:{ref}/modules-scripting.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Script ID - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== getScriptContext -*Stability:* experimental -[source,ts] ----- -client.getScriptContext() ----- -link:https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html[Documentation] + - - -[discrete] -=== getScriptLanguages -*Stability:* experimental -[source,ts] ----- -client.getScriptLanguages() ----- -link:{ref}/modules-scripting.html[Documentation] + - - -[discrete] -=== getSource - -[source,ts] ----- -client.getSource({ - id: string, - index: string, - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -[discrete] -=== graph.explore - -[source,ts] ----- -client.graph.explore({ - index: string | string[], - routing: string, - timeout: string, - body: object -}) ----- -link:{ref}/graph-explore-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - Graph Query DSL - -|=== - -[discrete] -=== ilm.deleteLifecycle - -[source,ts] ----- -client.ilm.deleteLifecycle({ - policy: string -}) ----- -link:{ref}/ilm-delete-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy` -|`string` - The name of the index lifecycle policy - -|=== - -[discrete] -=== ilm.explainLifecycle - -[source,ts] ----- -client.ilm.explainLifecycle({ - index: string, - only_managed: boolean, - only_errors: boolean -}) ----- -link:{ref}/ilm-explain-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to explain - -|`only_managed` or `onlyManaged` -|`boolean` - filters the indices included in the response to ones managed by ILM - -|`only_errors` or `onlyErrors` -|`boolean` - filters the indices included in the response to ones in an ILM error state, implies only_managed - -|=== - -[discrete] -=== ilm.getLifecycle - -[source,ts] ----- -client.ilm.getLifecycle({ - policy: string -}) ----- -link:{ref}/ilm-get-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy` -|`string` - The name of the index lifecycle policy - -|=== - -[discrete] -=== ilm.getStatus - -[source,ts] ----- -client.ilm.getStatus() ----- -link:{ref}/ilm-get-status.html[Documentation] + - - -[discrete] -=== ilm.migrateToDataTiers - -[source,ts] ----- -client.ilm.migrateToDataTiers({ - dry_run: boolean, - body: object -}) ----- -link:{ref}/ilm-migrate-to-data-tiers.html[Documentation] + -[cols=2*] -|=== -|`dry_run` or `dryRun` -|`boolean` - If set to true it will simulate the migration, providing a way to retrieve the ILM policies and indices that need to be migrated. The default is false - -|`body` -|`object` - Optionally specify a legacy index template name to delete and optionally specify a node attribute name used for index shard routing (defaults to "data") - -|=== - -[discrete] -=== ilm.moveToStep - -[source,ts] ----- -client.ilm.moveToStep({ - index: string, - body: object -}) ----- -link:{ref}/ilm-move-to-step.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index whose lifecycle step is to change - -|`body` -|`object` - The new lifecycle step to move to - -|=== - -[discrete] -=== ilm.putLifecycle - -[source,ts] ----- -client.ilm.putLifecycle({ - policy: string, - body: object -}) ----- -link:{ref}/ilm-put-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy` -|`string` - The name of the index lifecycle policy - -|`body` -|`object` - The lifecycle policy definition to register - -|=== - -[discrete] -=== ilm.removePolicy - -[source,ts] ----- -client.ilm.removePolicy({ - index: string -}) ----- -link:{ref}/ilm-remove-policy.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to remove policy on - -|=== - -[discrete] -=== ilm.retry - -[source,ts] ----- -client.ilm.retry({ - index: string -}) ----- -link:{ref}/ilm-retry-policy.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the indices (comma-separated) whose failed lifecycle step is to be retry - -|=== - -[discrete] -=== ilm.start - -[source,ts] ----- -client.ilm.start() ----- -link:{ref}/ilm-start.html[Documentation] + - - -[discrete] -=== ilm.stop - -[source,ts] ----- -client.ilm.stop() ----- -link:{ref}/ilm-stop.html[Documentation] + - - -[discrete] -=== index - -[source,ts] ----- -client.index({ - id: string, - index: string, - wait_for_active_shards: string, - op_type: 'index' | 'create', - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - if_seq_no: number, - if_primary_term: number, - pipeline: string, - require_alias: boolean, - body: object -}) ----- -link:{ref}/docs-index_.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Document ID - -|`index` -|`string` - The name of the index - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`op_type` or `opType` -|`'index' \| 'create'` - Explicit operation type. Defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|`if_seq_no` or `ifSeqNo` -|`number` - only perform the index operation if the last operation that has changed the document has the specified sequence number - -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only perform the index operation if the last operation that has changed the document has the specified primary term - -|`pipeline` -|`string` - The pipeline id to preprocess incoming documents with - -|`require_alias` or `requireAlias` -|`boolean` - When true, requires destination to be an alias. Default is false - -|`body` -|`object` - The document - -|=== - -[discrete] -=== indices.addBlock - -[source,ts] ----- -client.indices.addBlock({ - index: string | string[], - block: string, - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/index-modules-blocks.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma separated list of indices to add a block to - -|`block` -|`string` - The block to add (one of read, write, read_only or metadata) - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -[discrete] -=== indices.analyze - -[source,ts] ----- -client.indices.analyze({ - index: string, - body: object -}) ----- -link:{ref}/indices-analyze.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to scope the operation - -|`body` -|`object` - Define analyzer/tokenizer parameters and the text on which the analysis should be performed - -|=== - -[discrete] -=== indices.clearCache - -[source,ts] ----- -client.indices.clearCache({ - index: string | string[], - fielddata: boolean, - fields: string | string[], - query: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - request: boolean -}) ----- -link:{ref}/indices-clearcache.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index name to limit the operation - -|`fielddata` -|`boolean` - Clear field data - -|`fields` -|`string \| string[]` - A comma-separated list of fields to clear when using the `fielddata` parameter (default: all) - -|`query` -|`boolean` - Clear query caches - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`request` -|`boolean` - Clear request cache - -|=== - -[discrete] -=== indices.clone - -[source,ts] ----- -client.indices.clone({ - index: string, - target: string, - timeout: string, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/indices-clone-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the source index to clone - -|`target` -|`string` - The name of the target index to clone into - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the cloned index before the operation returns. - -|`body` -|`object` - The configuration for the target index (`settings` and `aliases`) - -|=== - -[discrete] -=== indices.close - -[source,ts] ----- -client.indices.close({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) ----- -link:{ref}/indices-open-close.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma separated list of indices to close - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -[discrete] -=== indices.create - -[source,ts] ----- -client.indices.create({ - index: string, - wait_for_active_shards: string, - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-create-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for before the operation returns. - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The configuration for the index (`settings` and `mappings`) - -|=== - -[discrete] -=== indices.createDataStream - -[source,ts] ----- -client.indices.createDataStream({ - name: string -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the data stream - -|=== - -[discrete] -=== indices.dataStreamsStats - -[source,ts] ----- -client.indices.dataStreamsStats({ - name: string | string[] -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of data stream names; use `_all` or empty string to perform the operation on all data streams - -|=== - -[discrete] -=== indices.delete - -[source,ts] ----- -client.indices.delete({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-delete-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of indices to delete; use `_all` or `*` string to delete all indices - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if a wildcard expression resolves to no concrete indices (default: false) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|=== - -[discrete] -=== indices.deleteAlias - -[source,ts] ----- -client.indices.deleteAlias({ - index: string | string[], - name: string | string[], - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names (supports wildcards); use `_all` for all indices - -|`name` -|`string \| string[]` - A comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. - -|`timeout` -|`string` - Explicit timestamp for the document - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== indices.deleteDataStream - -[source,ts] ----- -client.indices.deleteDataStream({ - name: string | string[], - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of data streams to delete; use `*` to delete all data streams - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|=== - -[discrete] -=== indices.deleteIndexTemplate - -[source,ts] ----- -client.indices.deleteIndexTemplate({ - name: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== indices.deleteTemplate - -[source,ts] ----- -client.indices.deleteTemplate({ - name: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== indices.diskUsage -*Stability:* experimental -[source,ts] ----- -client.indices.diskUsage({ - index: string, - run_expensive_tasks: boolean, - flush: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-disk-usage.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - Comma-separated list of indices or data streams to analyze the disk usage - -|`run_expensive_tasks` or `runExpensiveTasks` -|`boolean` - Must be set to [true] in order for the task to be performed. Defaults to false. - -|`flush` -|`boolean` - Whether flush or not before analyzing the index disk usage. Defaults to true - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -[discrete] -=== indices.exists - -[source,ts] ----- -client.indices.exists({ - index: string | string[], - local: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - include_defaults: boolean -}) ----- -link:{ref}/indices-exists.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if a wildcard expression resolves to no concrete indices (default: false) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default setting for each of the indices. - -|=== - -[discrete] -=== indices.existsAlias - -[source,ts] ----- -client.indices.existsAlias({ - name: string | string[], - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of alias names to return - -|`index` -|`string \| string[]` - A comma-separated list of index names to filter aliases - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.existsIndexTemplate - -[source,ts] ----- -client.indices.existsIndexTemplate({ - name: string, - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.existsTemplate - -[source,ts] ----- -client.indices.existsTemplate({ - name: string | string[], - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the index templates - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.existsType - -[source,ts] ----- -client.indices.existsType({ - index: string | string[], - type: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-types-exists.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` to check the types across all indices - -|`type` -|`string \| string[]` - A comma-separated list of document types to check - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.fieldUsageStats -*Stability:* experimental -[source,ts] ----- -client.indices.fieldUsageStats({ - index: string, - fields: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/field-usage-stats.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`fields` -|`string \| string[]` - A comma-separated list of fields to include in the stats if only a subset of fields should be returned (supports wildcards) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -[discrete] -=== indices.flush - -[source,ts] ----- -client.indices.flush({ - index: string | string[], - force: boolean, - wait_if_ongoing: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-flush.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string for all indices - -|`force` -|`boolean` - Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal) - -|`wait_if_ongoing` or `waitIfOngoing` -|`boolean` - If set to true the flush operation will block until the flush can be executed if another flush operation is already executing. The default is true. If set to false the flush will be skipped iff if another flush operation is already running. - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -[discrete] -=== indices.forcemerge - -[source,ts] ----- -client.indices.forcemerge({ - index: string | string[], - flush: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - max_num_segments: number, - only_expunge_deletes: boolean -}) ----- -link:{ref}/indices-forcemerge.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`flush` -|`boolean` - Specify whether the index should be flushed after performing the operation (default: true) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`max_num_segments` or `maxNumSegments` -|`number` - The number of segments the index should be merged into (default: dynamic) - -|`only_expunge_deletes` or `onlyExpungeDeletes` -|`boolean` - Specify whether the operation should only expunge deleted documents - -|=== - -[discrete] -=== indices.freeze - -[source,ts] ----- -client.indices.freeze({ - index: string, - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) ----- -link:{ref}/freeze-index-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to freeze - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `closed` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -[discrete] -=== indices.get - -[source,ts] ----- -client.indices.get({ - index: string | string[], - local: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - include_defaults: boolean, - master_timeout: string -}) ----- -link:{ref}/indices-get-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if a wildcard expression resolves to no concrete indices (default: false) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default setting for each of the indices. - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -[discrete] -=== indices.getAlias - -[source,ts] ----- -client.indices.getAlias({ - name: string | string[], - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of alias names to return - -|`index` -|`string \| string[]` - A comma-separated list of index names to filter aliases - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.getDataStream - -[source,ts] ----- -client.indices.getDataStream({ - name: string | string[], - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of data streams to get; use `*` to get all data streams - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|=== - -[discrete] -=== indices.getFieldMapping - -[source,ts] ----- -client.indices.getFieldMapping({ - fields: string | string[], - index: string | string[], - include_defaults: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-get-field-mapping.html[Documentation] + -[cols=2*] -|=== -|`fields` -|`string \| string[]` - A comma-separated list of fields - -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether the default mapping values should be returned as well - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.getIndexTemplate - -[source,ts] ----- -client.indices.getIndexTemplate({ - name: string | string[], - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the index templates - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.getMapping - -[source,ts] ----- -client.indices.getMapping({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-get-mapping.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) + - -WARNING: This parameter has been deprecated. - -|=== - -[discrete] -=== indices.getSettings - -[source,ts] ----- -client.indices.getSettings({ - index: string | string[], - name: string | string[], - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - local: boolean, - include_defaults: boolean -}) ----- -link:{ref}/indices-get-settings.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`name` -|`string \| string[]` - The name of the settings that should be included - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default setting for each of the indices. - -|=== - -[discrete] -=== indices.getTemplate - -[source,ts] ----- -client.indices.getTemplate({ - name: string | string[], - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the index templates - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -[discrete] -=== indices.migrateToDataStream - -[source,ts] ----- -client.indices.migrateToDataStream({ - name: string -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the alias to migrate - -|=== - -[discrete] -=== indices.open - -[source,ts] ----- -client.indices.open({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) ----- -link:{ref}/indices-open-close.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma separated list of indices to open - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `closed` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -[discrete] -=== indices.promoteDataStream - -[source,ts] ----- -client.indices.promoteDataStream({ - name: string -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the data stream - -|=== - -[discrete] -=== indices.putAlias - -[source,ts] ----- -client.indices.putAlias({ - index: string | string[], - name: string, - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. - -|`name` -|`string` - The name of the alias to be created or updated - -|`timeout` -|`string` - Explicit timestamp for the document - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The settings for the alias, such as `routing` or `filter` - -|=== - -[discrete] -=== indices.putIndexTemplate - -[source,ts] ----- -client.indices.putIndexTemplate({ - name: string, - create: boolean, - cause: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`create` -|`boolean` - Whether the index template should only be added if new or can also replace an existing one - -|`cause` -|`string` - User defined reason for creating/updating the index template - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The template definition - -|=== - -[discrete] -=== indices.putMapping - -[source,ts] ----- -client.indices.putMapping({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - write_index_only: boolean, - body: object -}) ----- -link:{ref}/indices-put-mapping.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`write_index_only` or `writeIndexOnly` -|`boolean` - When true, applies mappings only to the write index of an alias or data stream - -|`body` -|`object` - The mapping definition - -|=== - -[discrete] -=== indices.putSettings - -[source,ts] ----- -client.indices.putSettings({ - index: string | string[], - master_timeout: string, - timeout: string, - preserve_existing: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - body: object -}) ----- -link:{ref}/indices-update-settings.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`timeout` -|`string` - Explicit operation timeout - -|`preserve_existing` or `preserveExisting` -|`boolean` - Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false` - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`body` -|`object` - The index settings to be updated - -|=== - -[discrete] -=== indices.putTemplate - -[source,ts] ----- -client.indices.putTemplate({ - name: string, - order: number, - create: boolean, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`order` -|`number` - The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower numbers) - -|`create` -|`boolean` - Whether the index template should only be added if new or can also replace an existing one - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The template definition - -|=== - -[discrete] -=== indices.recovery - -[source,ts] ----- -client.indices.recovery({ - index: string | string[], - detailed: boolean, - active_only: boolean -}) ----- -link:{ref}/indices-recovery.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`detailed` -|`boolean` - Whether to display detailed information about shard recovery - -|`active_only` or `activeOnly` -|`boolean` - Display only those recoveries that are currently on-going - -|=== - -[discrete] -=== indices.refresh - -[source,ts] ----- -client.indices.refresh({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-refresh.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -[discrete] -=== indices.reloadSearchAnalyzers - -[source,ts] ----- -client.indices.reloadSearchAnalyzers({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-reload-analyzers.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to reload analyzers for - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -[discrete] -=== indices.resolveIndex -*Stability:* experimental -[source,ts] ----- -client.indices.resolveIndex({ - name: string | string[], - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-resolve-index-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of names or wildcard expressions - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|=== - -[discrete] -=== indices.rollover - -[source,ts] ----- -client.indices.rollover({ - alias: string, - new_index: string, - timeout: string, - dry_run: boolean, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/indices-rollover-index.html[Documentation] + -[cols=2*] -|=== -|`alias` -|`string` - The name of the alias to rollover - -|`new_index` or `newIndex` -|`string` - The name of the rollover index - -|`timeout` -|`string` - Explicit operation timeout - -|`dry_run` or `dryRun` -|`boolean` - If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the newly created rollover index before the operation returns. - -|`body` -|`object` - The conditions that needs to be met for executing rollover - -|=== - -[discrete] -=== indices.segments - -[source,ts] ----- -client.indices.segments({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - verbose: boolean -}) ----- -link:{ref}/indices-segments.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`verbose` -|`boolean` - Includes detailed memory usage by Lucene. - -|=== - -[discrete] -=== indices.shardStores - -[source,ts] ----- -client.indices.shardStores({ - index: string | string[], - status: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-shards-stores.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`status` -|`string \| string[]` - A comma-separated list of statuses used to filter on shards to get store information for - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version || +|| || +|| || +|| || +=========================================================================================================================== +//////// +== API Reference -[discrete] -=== indices.shrink +=== bulk +Allows to perform multiple index/update/delete operations in a single request. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html[Endpoint documentation] [source,ts] ---- -client.indices.shrink({ - index: string, - target: string, - timeout: string, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) +client.bulk(...) ---- -link:{ref}/indices-shrink-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the source index to shrink - -|`target` -|`string` - The name of the target index to shrink into -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the shrunken index before the operation returns. - -|`body` -|`object` - The configuration for the target index (`settings` and `aliases`) - -|=== - -[discrete] -=== indices.simulateIndexTemplate +=== clear_scroll +Explicitly clears the search context for a scroll. +https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api.html[Endpoint documentation] [source,ts] ---- -client.indices.simulateIndexTemplate({ - name: string, - create: boolean, - cause: string, - master_timeout: string, - body: object -}) +client.clearScroll(...) ---- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the index (it must be a concrete index name) - -|`create` -|`boolean` - Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one - -|`cause` -|`string` - User defined reason for dry-run creating the new template for simulation purposes - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master -|`body` -|`object` - New index template definition, which will be included in the simulation, as if it already exists in the system - -|=== - -[discrete] -=== indices.simulateTemplate +=== close_point_in_time +Close a point in time +https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html[Endpoint documentation] [source,ts] ---- -client.indices.simulateTemplate({ - name: string, - create: boolean, - cause: string, - master_timeout: string, - body: object -}) +client.closePointInTime(...) ---- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the index template - -|`create` -|`boolean` - Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one - -|`cause` -|`string` - User defined reason for dry-run creating the new template for simulation purposes -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - New index template definition to be simulated, if no index template name is specified - -|=== - -[discrete] -=== indices.split +=== count +Returns number of documents matching a query. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html[Endpoint documentation] [source,ts] ---- -client.indices.split({ - index: string, - target: string, - timeout: string, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) +client.count(...) ---- -link:{ref}/indices-split-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the source index to split -|`target` -|`string` - The name of the target index to split into - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the shrunken index before the operation returns. - -|`body` -|`object` - The configuration for the target index (`settings` and `aliases`) - -|=== +=== create +Creates a new document in the index. -[discrete] -=== indices.stats +Returns a 409 response when a document with a same ID already exists in the index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html[Endpoint documentation] [source,ts] ---- -client.indices.stats({ - metric: string | string[], - index: string | string[], - completion_fields: string | string[], - fielddata_fields: string | string[], - fields: string | string[], - groups: string | string[], - level: 'cluster' | 'indices' | 'shards', - types: string | string[], - include_segment_file_sizes: boolean, - include_unloaded_segments: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - forbid_closed_indices: boolean -}) +client.create(...) ---- -link:{ref}/indices-stats.html[Documentation] + -[cols=2*] -|=== -|`metric` -|`string \| string[]` - Limit the information returned the specific metrics. - -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`completion_fields` or `completionFields` -|`string \| string[]` - A comma-separated list of fields for the `completion` index metric (supports wildcards) - -|`fielddata_fields` or `fielddataFields` -|`string \| string[]` - A comma-separated list of fields for the `fielddata` index metric (supports wildcards) - -|`fields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` and `completion` index metric (supports wildcards) - -|`groups` -|`string \| string[]` - A comma-separated list of search groups for `search` index metric - -|`level` -|`'cluster' \| 'indices' \| 'shards'` - Return stats aggregated at cluster, index or shard level + -_Default:_ `indices` - -|`types` -|`string \| string[]` - A comma-separated list of document types for the `indexing` index metric - -|`include_segment_file_sizes` or `includeSegmentFileSizes` -|`boolean` - Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested) - -|`include_unloaded_segments` or `includeUnloadedSegments` -|`boolean` - If set to true segment stats will include stats for segments that are not currently loaded into memory - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` -|`forbid_closed_indices` or `forbidClosedIndices` -|`boolean` - If set to false stats will also collected from closed indices if explicitly specified or if expand_wildcards expands to closed indices + -_Default:_ `true` - -|=== - -[discrete] -=== indices.unfreeze +=== delete +Removes a document from the index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html[Endpoint documentation] [source,ts] ---- -client.indices.unfreeze({ - index: string, - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) +client.delete(...) ---- -link:{ref}/unfreeze-index-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to unfreeze - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `closed` -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -[discrete] -=== indices.updateAliases +=== delete_by_query +Deletes documents matching the provided query. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- -client.indices.updateAliases({ - timeout: string, - master_timeout: string, - body: object -}) +client.deleteByQuery(...) ---- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`timeout` -|`string` - Request timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The definition of `actions` to perform -|=== - -[discrete] -=== indices.validateQuery +=== delete_by_query_rethrottle +Changes the number of requests per second for a particular Delete By Query operation. +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- -client.indices.validateQuery({ - index: string | string[], - type: string | string[], - explain: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - q: string, - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - lenient: boolean, - rewrite: boolean, - all_shards: boolean, - body: object -}) +client.deleteByQueryRethrottle(...) ---- -link:{ref}/search-validate.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices - -|`type` -|`string \| string[]` - A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types + - -WARNING: This parameter has been deprecated. - -|`explain` -|`boolean` - Return detailed information about the error -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`q` -|`string` - Query in the Lucene query string syntax - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`rewrite` -|`boolean` - Provide a more detailed explanation showing the actual Lucene query that will be executed. - -|`all_shards` or `allShards` -|`boolean` - Execute validation on all shards instead of one random shard per index - -|`body` -|`object` - The query definition specified with the Query DSL - -|=== - -[discrete] -=== info +=== delete_script +Deletes a script. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.info() +client.deleteScript(...) ---- -link:{ref}/index.html[Documentation] + +=== exists +Returns information about whether a document exists in an index. -[discrete] -=== ingest.deletePipeline - +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] [source,ts] ---- -client.ingest.deletePipeline({ - id: string, - master_timeout: string, - timeout: string -}) +client.exists(...) ---- -link:{ref}/delete-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Pipeline ID -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -[discrete] -=== ingest.geoIpStats +=== exists_source +Returns information about whether a document source exists in an index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] [source,ts] ---- -client.ingest.geoIpStats() +client.existsSource(...) ---- -link:{ref}/geoip-stats-api.html[Documentation] + - -[discrete] -=== ingest.getPipeline +=== explain +Returns information about why a specific matches (or doesn't match) a query. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html[Endpoint documentation] [source,ts] ---- -client.ingest.getPipeline({ - id: string, - summary: boolean, - master_timeout: string -}) +client.explain(...) ---- -link:{ref}/get-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Comma separated list of pipeline ids. Wildcards supported - -|`summary` -|`boolean` - Return pipelines without their definitions (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|=== -[discrete] -=== ingest.processorGrok +=== field_caps +Returns the information about the capabilities of fields among multiple indices. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html[Endpoint documentation] [source,ts] ---- -client.ingest.processorGrok() +client.fieldCaps(...) ---- -link:{ref}/grok-processor.html#grok-processor-rest-get[Documentation] + +=== get +Returns a document. -[discrete] -=== ingest.putPipeline - +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] [source,ts] ---- -client.ingest.putPipeline({ - id: string, - master_timeout: string, - timeout: string, - body: object -}) +client.get(...) ---- -link:{ref}/put-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Pipeline ID - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout -|`body` -|`object` - The ingest definition - -|=== - -[discrete] -=== ingest.simulate +=== get_script +Returns a script. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.ingest.simulate({ - id: string, - verbose: boolean, - body: object -}) +client.getScript(...) ---- -link:{ref}/simulate-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Pipeline ID - -|`verbose` -|`boolean` - Verbose mode. Display data output for each processor in executed pipeline - -|`body` -|`object` - The simulate definition -|=== - -[discrete] -=== license.delete +=== get_script_context +Returns all script contexts. +https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html[Endpoint documentation] [source,ts] ---- -client.license.delete() +client.getScriptContext(...) ---- -link:{ref}/delete-license.html[Documentation] + - -[discrete] -=== license.get +=== get_script_languages +Returns available script types, languages and contexts +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.license.get({ - local: boolean, - accept_enterprise: boolean -}) +client.getScriptLanguages(...) ---- -link:{ref}/get-license.html[Documentation] + -[cols=2*] -|=== -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`accept_enterprise` or `acceptEnterprise` -|`boolean` - Supported for backwards compatibility with 7.x. If this param is used it must be set to true + - -WARNING: This parameter has been deprecated. -|=== - -[discrete] -=== license.getBasicStatus +=== get_source +Returns the source of a document. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] [source,ts] ---- -client.license.getBasicStatus() +client.getSource(...) ---- -link:{ref}/get-basic-status.html[Documentation] + - -[discrete] -=== license.getTrialStatus +=== index +Creates or updates a document in an index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html[Endpoint documentation] [source,ts] ---- -client.license.getTrialStatus() +client.index(...) ---- -link:{ref}/get-trial-status.html[Documentation] + - -[discrete] -=== license.post +=== info +Returns basic information about the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Endpoint documentation] [source,ts] ---- -client.license.post({ - acknowledge: boolean, - body: object -}) +client.info(...) ---- -link:{ref}/update-license.html[Documentation] + -[cols=2*] -|=== -|`acknowledge` -|`boolean` - whether the user has acknowledged acknowledge messages (default: false) - -|`body` -|`object` - licenses to be installed -|=== - -[discrete] -=== license.postStartBasic +=== knn_search +Performs a kNN search. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html[Endpoint documentation] [source,ts] ---- -client.license.postStartBasic({ - acknowledge: boolean -}) +client.knnSearch(...) ---- -link:{ref}/start-basic.html[Documentation] + -[cols=2*] -|=== -|`acknowledge` -|`boolean` - whether the user has acknowledged acknowledge messages (default: false) - -|=== -[discrete] -=== license.postStartTrial +=== mget +Allows to get multiple documents in one request. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html[Endpoint documentation] [source,ts] ---- -client.license.postStartTrial({ - type: string, - acknowledge: boolean -}) +client.mget(...) ---- -link:{ref}/start-trial.html[Documentation] + -[cols=2*] -|=== -|`type` -|`string` - The type of trial license to generate (default: "trial") - -|`acknowledge` -|`boolean` - whether the user has acknowledged acknowledge messages (default: false) -|=== - -[discrete] -=== logstash.deletePipeline +=== msearch +Allows to execute several search operations in one request. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html[Endpoint documentation] [source,ts] ---- -client.logstash.deletePipeline({ - id: string -}) +client.msearch(...) ---- -link:{ref}/logstash-api-delete-pipeline.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the Pipeline -|=== - -[discrete] -=== logstash.getPipeline +=== msearch_template +Allows to execute several search template operations in one request. +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html[Endpoint documentation] [source,ts] ---- -client.logstash.getPipeline({ - id: string -}) +client.msearchTemplate(...) ---- -link:{ref}/logstash-api-get-pipeline.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - A comma-separated list of Pipeline IDs - -|=== -[discrete] -=== logstash.putPipeline +=== mtermvectors +Returns multiple termvectors in one request. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html[Endpoint documentation] [source,ts] ---- -client.logstash.putPipeline({ - id: string, - body: object -}) +client.mtermvectors(...) ---- -link:{ref}/logstash-api-put-pipeline.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the Pipeline - -|`body` -|`object` - The Pipeline to add or update -|=== - -[discrete] -=== mget +=== open_point_in_time +Open a point in time that can be used in subsequent searches +https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html[Endpoint documentation] [source,ts] ---- -client.mget({ - index: string, - stored_fields: string | string[], - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - body: object -}) +client.openPointInTime(...) ---- -link:{ref}/docs-multi-get.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`body` -|`object` - Document identifiers; can be either `docs` (containing full document information) or `ids` (when index is provided in the URL. - -|=== -[discrete] -=== migration.deprecations +=== ping +Returns whether the cluster is running. +https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Endpoint documentation] [source,ts] ---- -client.migration.deprecations({ - index: string -}) +client.ping(...) ---- -link:{ref}/migration-api-deprecation.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - Index pattern - -|=== -[discrete] -=== ml.closeJob +=== put_script +Creates or updates a script. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.ml.closeJob({ - job_id: string, - allow_no_match: boolean, - allow_no_jobs: boolean, - force: boolean, - timeout: string, - body: object -}) +client.putScript(...) ---- -link:{ref}/ml-close-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job to close - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) + - -WARNING: This parameter has been deprecated. - -|`force` -|`boolean` - True if the job should be forcefully closed - -|`timeout` -|`string` - Controls the time to wait until a job has closed. Default to 30 minutes - -|`body` -|`object` - The URL params optionally sent in the body -|=== - -[discrete] -=== ml.deleteCalendar +=== rank_eval +Allows to evaluate the quality of ranked search results over a set of typical search queries +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteCalendar({ - calendar_id: string -}) +client.rankEval(...) ---- -link:{ref}/ml-delete-calendar.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to delete - -|=== -[discrete] -=== ml.deleteCalendarEvent +=== reindex +Allows to copy documents from one index to another, optionally filtering the source +documents by a query, changing the destination index settings, or fetching the +documents from a remote cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteCalendarEvent({ - calendar_id: string, - event_id: string -}) +client.reindex(...) ---- -link:{ref}/ml-delete-calendar-event.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify - -|`event_id` or `eventId` -|`string` - The ID of the event to remove from the calendar -|=== - -[discrete] -=== ml.deleteCalendarJob +=== reindex_rethrottle +Changes the number of requests per second for a particular Reindex operation. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteCalendarJob({ - calendar_id: string, - job_id: string -}) +client.reindexRethrottle(...) ---- -link:{ref}/ml-delete-calendar-job.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify - -|`job_id` or `jobId` -|`string` - The ID of the job to remove from the calendar -|=== - -[discrete] -=== ml.deleteDataFrameAnalytics +=== render_search_template +Allows to use the Mustache language to pre-render a search definition. +https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteDataFrameAnalytics({ - id: string, - force: boolean, - timeout: string -}) +client.renderSearchTemplate(...) ---- -link:{ref}/delete-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to delete - -|`force` -|`boolean` - True if the job should be forcefully deleted -|`timeout` -|`string` - Controls the time to wait until a job is deleted. Defaults to 1 minute - -|=== - -[discrete] -=== ml.deleteDatafeed +=== scripts_painless_execute +Allows an arbitrary script to be executed and a result to be returned +https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteDatafeed({ - datafeed_id: string, - force: boolean -}) +client.scriptsPainlessExecute(...) ---- -link:{ref}/ml-delete-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to delete - -|`force` -|`boolean` - True if the datafeed should be forcefully deleted - -|=== -[discrete] -=== ml.deleteExpiredData +=== scroll +Allows to retrieve a large numbers of results from a single search request. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll[Endpoint documentation] [source,ts] ---- -client.ml.deleteExpiredData({ - job_id: string, - requests_per_second: number, - timeout: string, - body: object -}) +client.scroll(...) ---- -link:{ref}/ml-delete-expired-data.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job(s) to perform expired data hygiene for - -|`requests_per_second` or `requestsPerSecond` -|`number` - The desired requests per second for the deletion processes. - -|`timeout` -|`string` - How long can the underlying delete processes run until they are canceled - -|`body` -|`object` - deleting expired data parameters -|=== - -[discrete] -=== ml.deleteFilter +=== search +Returns results matching a query. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteFilter({ - filter_id: string -}) +client.search(...) ---- -link:{ref}/ml-delete-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to delete -|=== - -[discrete] -=== ml.deleteForecast +=== search_mvt +Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteForecast({ - job_id: string, - forecast_id: string, - allow_no_forecasts: boolean, - timeout: string -}) +client.searchMvt(...) ---- -link:{ref}/ml-delete-forecast.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job from which to delete forecasts - -|`forecast_id` or `forecastId` -|`string` - The ID of the forecast to delete, can be comma delimited list. Leaving blank implies `_all` -|`allow_no_forecasts` or `allowNoForecasts` -|`boolean` - Whether to ignore if `_all` matches no forecasts - -|`timeout` -|`string` - Controls the time to wait until the forecast(s) are deleted. Default to 30 seconds - -|=== - -[discrete] -=== ml.deleteJob +=== search_shards +Returns information about the indices and shards that a search request would be executed against. +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteJob({ - job_id: string, - force: boolean, - wait_for_completion: boolean -}) +client.searchShards(...) ---- -link:{ref}/ml-delete-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to delete - -|`force` -|`boolean` - True if the job should be forcefully deleted -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning + -_Default:_ `true` - -|=== - -[discrete] -=== ml.deleteModelSnapshot +=== search_template +Allows to use the Mustache language to pre-render a search definition. +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteModelSnapshot({ - job_id: string, - snapshot_id: string -}) +client.searchTemplate(...) ---- -link:{ref}/ml-delete-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch - -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to delete - -|=== -[discrete] -=== ml.deleteTrainedModel +=== terms_enum +The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteTrainedModel({ - model_id: string -}) +client.termsEnum(...) ---- -link:{ref}/delete-trained-models.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained model to delete -|=== - -[discrete] -=== ml.deleteTrainedModelAlias +=== termvectors +Returns information and statistics about terms in the fields of a particular document. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteTrainedModelAlias({ - model_alias: string, - model_id: string -}) +client.termvectors(...) ---- -link:{ref}/delete-trained-models-aliases.html[Documentation] + -[cols=2*] -|=== -|`model_alias` or `modelAlias` -|`string` - The trained model alias to delete - -|`model_id` or `modelId` -|`string` - The trained model where the model alias is assigned - -|=== -[discrete] -=== ml.estimateModelMemory +=== update +Updates a document with a script or partial document. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html[Endpoint documentation] [source,ts] ---- -client.ml.estimateModelMemory({ - body: object -}) +client.update(...) ---- -link:{ref}/ml-apis.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The analysis config, plus cardinality estimates for fields it references - -|=== -[discrete] -=== ml.evaluateDataFrame +=== update_by_query +Performs an update on every document in the index without changing the source, +for example to pick up a mapping change. +https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- -client.ml.evaluateDataFrame({ - body: object -}) +client.updateByQuery(...) ---- -link:{ref}/evaluate-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The evaluation definition -|=== - -[discrete] -=== ml.explainDataFrameAnalytics +=== update_by_query_rethrottle +Changes the number of requests per second for a particular Update By Query operation. +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- -client.ml.explainDataFrameAnalytics({ - id: string, - body: object -}) +client.updateByQueryRethrottle(...) ---- -link:{ref}/explain-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to explain - -|`body` -|`object` - The data frame analytics config to explain -|=== - -[discrete] -=== ml.flushJob +=== async_search +==== delete +Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. +https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] [source,ts] ---- -client.ml.flushJob({ - job_id: string, - calc_interim: boolean, - start: string, - end: string, - advance_time: string, - skip_time: string, - body: object -}) +client.asyncSearch.delete(...) ---- -link:{ref}/ml-flush-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job to flush - -|`calc_interim` or `calcInterim` -|`boolean` - Calculates interim results for the most recent bucket or all buckets within the latency period - -|`start` -|`string` - When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results - -|`end` -|`string` - When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results -|`advance_time` or `advanceTime` -|`string` - Advances time to the given value generating results and updating the model for the advanced interval - -|`skip_time` or `skipTime` -|`string` - Skips time to the given value without generating results or updating the model for the skipped interval - -|`body` -|`object` - Flush parameters - -|=== - -[discrete] -=== ml.forecast +==== get +Retrieves the results of a previously submitted async search request given its ID. +https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] [source,ts] ---- -client.ml.forecast({ - job_id: string, - duration: string, - expires_in: string, - max_model_memory: string -}) +client.asyncSearch.get(...) ---- -link:{ref}/ml-forecast.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to forecast for - -|`duration` -|`string` - The duration of the forecast - -|`expires_in` or `expiresIn` -|`string` - The time interval after which the forecast expires. Expired forecasts will be deleted at the first opportunity. - -|`max_model_memory` or `maxModelMemory` -|`string` - The max memory able to be used by the forecast. Default is 20mb. - -|=== -[discrete] -=== ml.getBuckets +==== status +Retrieves the status of a previously submitted async search request given its ID. +https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] [source,ts] ---- -client.ml.getBuckets({ - job_id: string, - timestamp: string, - expand: boolean, - exclude_interim: boolean, - from: number, - size: number, - start: string, - end: string, - anomaly_score: number, - sort: string, - desc: boolean, - body: object -}) +client.asyncSearch.status(...) ---- -link:{ref}/ml-get-bucket.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - ID of the job to get bucket results from - -|`timestamp` -|`string` - The timestamp of the desired single bucket result - -|`expand` -|`boolean` - Include anomaly records - -|`exclude_interim` or `excludeInterim` -|`boolean` - Exclude interim results - -|`from` -|`number` - skips a number of buckets - -|`size` -|`number` - specifies a max number of buckets to get - -|`start` -|`string` - Start time filter for buckets - -|`end` -|`string` - End time filter for buckets - -|`anomaly_score` or `anomalyScore` -|`number` - Filter for the most anomalous buckets -|`sort` -|`string` - Sort buckets by a particular field - -|`desc` -|`boolean` - Set the sort direction - -|`body` -|`object` - Bucket selection details if not provided in URI - -|=== - -[discrete] -=== ml.getCalendarEvents +==== submit +Executes a search request asynchronously. +https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] [source,ts] ---- -client.ml.getCalendarEvents({ - calendar_id: string, - job_id: string, - start: string, - end: string, - from: number, - size: number -}) +client.asyncSearch.submit(...) ---- -link:{ref}/ml-get-calendar-event.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar containing the events - -|`job_id` or `jobId` -|`string` - Get events for the job. When this option is used calendar_id must be '_all' - -|`start` -|`string` - Get events after this time - -|`end` -|`string` - Get events before this time - -|`from` -|`number` - Skips a number of events -|`size` -|`number` - Specifies a max number of events to get - -|=== - -[discrete] -=== ml.getCalendars +=== cat +==== aliases +Shows information about currently configured aliases to indices including filter and routing infos. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html[Endpoint documentation] [source,ts] ---- -client.ml.getCalendars({ - calendar_id: string, - from: number, - size: number, - body: object -}) +client.cat.aliases(...) ---- -link:{ref}/ml-get-calendar.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to fetch - -|`from` -|`number` - skips a number of calendars - -|`size` -|`number` - specifies a max number of calendars to get -|`body` -|`object` - The from and size parameters optionally sent in the body - -|=== - -[discrete] -=== ml.getCategories +==== allocation +Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html[Endpoint documentation] [source,ts] ---- -client.ml.getCategories({ - job_id: string, - category_id: number, - from: number, - size: number, - partition_field_value: string, - body: object -}) +client.cat.allocation(...) ---- -link:{ref}/ml-get-category.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job - -|`category_id` or `categoryId` -|`number` - The identifier of the category definition of interest - -|`from` -|`number` - skips a number of categories -|`size` -|`number` - specifies a max number of categories to get - -|`partition_field_value` or `partitionFieldValue` -|`string` - Specifies the partition to retrieve categories for. This is optional, and should never be used for jobs where per-partition categorization is disabled. - -|`body` -|`object` - Category selection details if not provided in URI - -|=== - -[discrete] -=== ml.getDataFrameAnalytics +==== count +Provides quick access to the document count of the entire cluster, or individual indices. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html[Endpoint documentation] [source,ts] ---- -client.ml.getDataFrameAnalytics({ - id: string, - allow_no_match: boolean, - from: number, - size: number, - exclude_generated: boolean -}) +client.cat.count(...) ---- -link:{ref}/get-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no data frame analytics. (This includes `_all` string or when no data frame analytics have been specified) + -_Default:_ `true` - -|`from` -|`number` - skips a number of analytics - -|`size` -|`number` - specifies a max number of analytics to get + -_Default:_ `100` - -|`exclude_generated` or `excludeGenerated` -|`boolean` - Omits fields that are illegal to set on data frame analytics PUT -|=== - -[discrete] -=== ml.getDataFrameAnalyticsStats +==== fielddata +Shows how much heap memory is currently being used by fielddata on every data node in the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html[Endpoint documentation] [source,ts] ---- -client.ml.getDataFrameAnalyticsStats({ - id: string, - allow_no_match: boolean, - from: number, - size: number, - verbose: boolean -}) +client.cat.fielddata(...) ---- -link:{ref}/get-dfanalytics-stats.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no data frame analytics. (This includes `_all` string or when no data frame analytics have been specified) + -_Default:_ `true` - -|`from` -|`number` - skips a number of analytics - -|`size` -|`number` - specifies a max number of analytics to get + -_Default:_ `100` -|`verbose` -|`boolean` - whether the stats response should be verbose - -|=== - -[discrete] -=== ml.getDatafeedStats +==== health +Returns a concise representation of the cluster health. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html[Endpoint documentation] [source,ts] ---- -client.ml.getDatafeedStats({ - datafeed_id: string, - allow_no_match: boolean, - allow_no_datafeeds: boolean -}) +client.cat.health(...) ---- -link:{ref}/ml-get-datafeed-stats.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeeds stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) - -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) + -WARNING: This parameter has been deprecated. - -|=== - -[discrete] -=== ml.getDatafeeds +==== help +Returns help for the Cat APIs. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html[Endpoint documentation] [source,ts] ---- -client.ml.getDatafeeds({ - datafeed_id: string, - allow_no_match: boolean, - allow_no_datafeeds: boolean, - exclude_generated: boolean -}) +client.cat.help(...) ---- -link:{ref}/ml-get-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeeds to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) - -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) + -WARNING: This parameter has been deprecated. - -|`exclude_generated` or `excludeGenerated` -|`boolean` - Omits fields that are illegal to set on datafeed PUT - -|=== - -[discrete] -=== ml.getFilters +==== indices +Returns information about indices: number of primaries and replicas, document counts, disk size, ... +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html[Endpoint documentation] [source,ts] ---- -client.ml.getFilters({ - filter_id: string, - from: number, - size: number -}) +client.cat.indices(...) ---- -link:{ref}/ml-get-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to fetch - -|`from` -|`number` - skips a number of filters -|`size` -|`number` - specifies a max number of filters to get - -|=== - -[discrete] -=== ml.getInfluencers +==== master +Returns information about the master node. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html[Endpoint documentation] [source,ts] ---- -client.ml.getInfluencers({ - job_id: string, - exclude_interim: boolean, - from: number, - size: number, - start: string, - end: string, - influencer_score: number, - sort: string, - desc: boolean, - body: object -}) +client.cat.master(...) ---- -link:{ref}/ml-get-influencer.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - Identifier for the anomaly detection job - -|`exclude_interim` or `excludeInterim` -|`boolean` - Exclude interim results - -|`from` -|`number` - skips a number of influencers - -|`size` -|`number` - specifies a max number of influencers to get - -|`start` -|`string` - start timestamp for the requested influencers - -|`end` -|`string` - end timestamp for the requested influencers - -|`influencer_score` or `influencerScore` -|`number` - influencer score threshold for the requested influencers -|`sort` -|`string` - sort field for the requested influencers - -|`desc` -|`boolean` - whether the results should be sorted in decending order - -|`body` -|`object` - Influencer selection criteria - -|=== - -[discrete] -=== ml.getJobStats +==== ml_data_frame_analytics +Gets configuration and usage information about data frame analytics jobs. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.getJobStats({ - job_id: string, - allow_no_match: boolean, - allow_no_jobs: boolean -}) +client.cat.mlDataFrameAnalytics(...) ---- -link:{ref}/ml-get-job-stats.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the jobs stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) + - -WARNING: This parameter has been deprecated. - -|=== -[discrete] -=== ml.getJobs +==== ml_datafeeds +Gets configuration and usage information about datafeeds. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html[Endpoint documentation] [source,ts] ---- -client.ml.getJobs({ - job_id: string, - allow_no_match: boolean, - allow_no_jobs: boolean, - exclude_generated: boolean -}) +client.cat.mlDatafeeds(...) ---- -link:{ref}/ml-get-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the jobs to fetch -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) + - -WARNING: This parameter has been deprecated. - -|`exclude_generated` or `excludeGenerated` -|`boolean` - Omits fields that are illegal to set on job PUT - -|=== - -[discrete] -=== ml.getModelSnapshots +==== ml_jobs +Gets configuration and usage information about anomaly detection jobs. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] ---- -client.ml.getModelSnapshots({ - job_id: string, - snapshot_id: string, - from: number, - size: number, - start: string, - end: string, - sort: string, - desc: boolean, - body: object -}) +client.cat.mlJobs(...) ---- -link:{ref}/ml-get-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch - -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to fetch - -|`from` -|`number` - Skips a number of documents - -|`size` -|`number` - The default number of documents returned in queries as a string. -|`start` -|`string` - The filter 'start' query parameter +==== ml_trained_models +Gets configuration and usage information about inference trained models. -|`end` -|`string` - The filter 'end' query parameter +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html[Endpoint documentation] +[source,ts] +---- +client.cat.mlTrainedModels(...) +---- -|`sort` -|`string` - Name of the field to sort on +==== nodeattrs +Returns information about custom node attributes. -|`desc` -|`boolean` - True if the results should be sorted in descending order +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html[Endpoint documentation] +[source,ts] +---- +client.cat.nodeattrs(...) +---- -|`body` -|`object` - Model snapshot selection criteria +==== nodes +Returns basic statistics about performance of cluster nodes. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html[Endpoint documentation] +[source,ts] +---- +client.cat.nodes(...) +---- -[discrete] -=== ml.getOverallBuckets +==== pending_tasks +Returns a concise representation of the cluster pending tasks. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html[Endpoint documentation] [source,ts] ---- -client.ml.getOverallBuckets({ - job_id: string, - top_n: number, - bucket_span: string, - overall_score: number, - exclude_interim: boolean, - start: string, - end: string, - allow_no_match: boolean, - allow_no_jobs: boolean, - body: object -}) +client.cat.pendingTasks(...) ---- -link:{ref}/ml-get-overall-buckets.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The job IDs for which to calculate overall bucket results - -|`top_n` or `topN` -|`number` - The number of top job bucket scores to be used in the overall_score calculation -|`bucket_span` or `bucketSpan` -|`string` - The span of the overall buckets. Defaults to the longest job bucket_span +==== plugins +Returns information about installed plugins across nodes node. -|`overall_score` or `overallScore` -|`number` - Returns overall buckets with overall scores higher than this value +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html[Endpoint documentation] +[source,ts] +---- +client.cat.plugins(...) +---- -|`exclude_interim` or `excludeInterim` -|`boolean` - If true overall buckets that include interim buckets will be excluded +==== recovery +Returns information about index shard recoveries, both on-going completed. -|`start` -|`string` - Returns overall buckets with timestamps after this time +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html[Endpoint documentation] +[source,ts] +---- +client.cat.recovery(...) +---- -|`end` -|`string` - Returns overall buckets with timestamps earlier than this time +==== repositories +Returns information about snapshot repositories registered in the cluster. -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html[Endpoint documentation] +[source,ts] +---- +client.cat.repositories(...) +---- -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) + +==== segments +Provides low-level information about the segments in the shards of an index. -WARNING: This parameter has been deprecated. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html[Endpoint documentation] +[source,ts] +---- +client.cat.segments(...) +---- -|`body` -|`object` - Overall bucket selection details if not provided in URI +==== shards +Provides a detailed view of shard allocation on nodes. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html[Endpoint documentation] +[source,ts] +---- +client.cat.shards(...) +---- -[discrete] -=== ml.getRecords +==== snapshots +Returns all snapshots in a specific repository. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html[Endpoint documentation] [source,ts] ---- -client.ml.getRecords({ - job_id: string, - exclude_interim: boolean, - from: number, - size: number, - start: string, - end: string, - record_score: number, - sort: string, - desc: boolean, - body: object -}) +client.cat.snapshots(...) ---- -link:{ref}/ml-get-record.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job -|`exclude_interim` or `excludeInterim` -|`boolean` - Exclude interim results +==== tasks +Returns information about the tasks currently executing on one or more nodes in the cluster. -|`from` -|`number` - skips a number of records +https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] +[source,ts] +---- +client.cat.tasks(...) +---- -|`size` -|`number` - specifies a max number of records to get +==== templates +Returns information about existing templates. -|`start` -|`string` - Start time filter for records +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html[Endpoint documentation] +[source,ts] +---- +client.cat.templates(...) +---- -|`end` -|`string` - End time filter for records +==== thread_pool +Returns cluster-wide thread pool statistics per node. +By default the active, queue and rejected statistics are returned for all thread pools. -|`record_score` or `recordScore` -|`number` - Returns records with anomaly scores greater or equal than this value +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html[Endpoint documentation] +[source,ts] +---- +client.cat.threadPool(...) +---- -|`sort` -|`string` - Sort records by a particular field +==== transforms +Gets configuration and usage information about transforms. -|`desc` -|`boolean` - Set the sort direction +https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html[Endpoint documentation] +[source,ts] +---- +client.cat.transforms(...) +---- -|`body` -|`object` - Record selection criteria +=== ccr +==== delete_auto_follow_pattern +Deletes auto-follow patterns. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-follow-pattern.html[Endpoint documentation] +[source,ts] +---- +client.ccr.deleteAutoFollowPattern(...) +---- -[discrete] -=== ml.getTrainedModels +==== follow +Creates a new follower index configured to follow the referenced leader index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.html[Endpoint documentation] [source,ts] ---- -client.ml.getTrainedModels({ - model_id: string, - allow_no_match: boolean, - include: string, - include_model_definition: boolean, - decompress_definition: boolean, - from: number, - size: number, - tags: string | string[], - exclude_generated: boolean -}) +client.ccr.follow(...) ---- -link:{ref}/get-trained-models.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models to fetch -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified) + -_Default:_ `true` +==== follow_info +Retrieves information about all follower indices, including parameters and status for each follower index -|`include` -|`string` - A comma-separate list of fields to optionally include. Valid options are 'definition' and 'total_feature_importance'. Default is none. +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-info.html[Endpoint documentation] +[source,ts] +---- +client.ccr.followInfo(...) +---- -|`include_model_definition` or `includeModelDefinition` -|`boolean` - Should the full model definition be included in the results. These definitions can be large. So be cautious when including them. Defaults to false. + +==== follow_stats +Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. -WARNING: This parameter has been deprecated. +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-stats.html[Endpoint documentation] +[source,ts] +---- +client.ccr.followStats(...) +---- -|`decompress_definition` or `decompressDefinition` -|`boolean` - Should the model definition be decompressed into valid JSON or returned in a custom compressed format. Defaults to true. + -_Default:_ `true` +==== forget_follower +Removes the follower retention leases from the leader. -|`from` -|`number` - skips a number of trained models +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-follower.html[Endpoint documentation] +[source,ts] +---- +client.ccr.forgetFollower(...) +---- -|`size` -|`number` - specifies a max number of trained models to get + -_Default:_ `100` +==== get_auto_follow_pattern +Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. -|`tags` -|`string \| string[]` - A comma-separated list of tags that the model must have. +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-follow-pattern.html[Endpoint documentation] +[source,ts] +---- +client.ccr.getAutoFollowPattern(...) +---- -|`exclude_generated` or `excludeGenerated` -|`boolean` - Omits fields that are illegal to set on model PUT +==== pause_auto_follow_pattern +Pauses an auto-follow pattern -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-follow-pattern.html[Endpoint documentation] +[source,ts] +---- +client.ccr.pauseAutoFollowPattern(...) +---- -[discrete] -=== ml.getTrainedModelsStats +==== pause_follow +Pauses a follower index. The follower index will not fetch any additional operations from the leader index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-follow.html[Endpoint documentation] [source,ts] ---- -client.ml.getTrainedModelsStats({ - model_id: string, - allow_no_match: boolean, - from: number, - size: number -}) +client.ccr.pauseFollow(...) ---- -link:{ref}/get-trained-models-stats.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified) + -_Default:_ `true` -|`from` -|`number` - skips a number of trained models +==== put_auto_follow_pattern +Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. -|`size` -|`number` - specifies a max number of trained models to get + -_Default:_ `100` +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-follow-pattern.html[Endpoint documentation] +[source,ts] +---- +client.ccr.putAutoFollowPattern(...) +---- -|=== +==== resume_auto_follow_pattern +Resumes an auto-follow pattern that has been paused -[discrete] -=== ml.inferTrainedModelDeployment -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- -client.ml.inferTrainedModelDeployment({ - model_id: string, - timeout: string, - body: object -}) +client.ccr.resumeAutoFollowPattern(...) ---- -link:{ref}/infer-trained-model-deployment.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The unique identifier of the trained model. - -|`timeout` -|`string` - Controls the amount of time to wait for inference results. + -_Default:_ `10s` -|`body` -|`object` - The input text to be evaluated. +==== resume_follow +Resumes a follower index that has been paused -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-follow.html[Endpoint documentation] +[source,ts] +---- +client.ccr.resumeFollow(...) +---- -[discrete] -=== ml.info +==== stats +Gets all stats related to cross-cluster replication. +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.html[Endpoint documentation] [source,ts] ---- -client.ml.info() +client.ccr.stats(...) ---- -link:{ref}/get-ml-info.html[Documentation] + - -[discrete] -=== ml.openJob +==== unfollow +Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow.html[Endpoint documentation] [source,ts] ---- -client.ml.openJob({ - job_id: string -}) +client.ccr.unfollow(...) ---- -link:{ref}/ml-open-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to open -|=== - -[discrete] -=== ml.postCalendarEvents +=== cluster +==== allocation_explain +Provides explanations for shard allocations in the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html[Endpoint documentation] [source,ts] ---- -client.ml.postCalendarEvents({ - calendar_id: string, - body: object -}) +client.cluster.allocationExplain(...) ---- -link:{ref}/ml-post-calendar-event.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify -|`body` -|`object` - A list of events +==== delete_component_template +Deletes a component template -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] +[source,ts] +---- +client.cluster.deleteComponentTemplate(...) +---- -[discrete] -=== ml.postData +==== delete_voting_config_exclusions +Clears cluster voting config exclusions. +https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html[Endpoint documentation] [source,ts] ---- -client.ml.postData({ - job_id: string, - reset_start: string, - reset_end: string, - body: object -}) +client.cluster.deleteVotingConfigExclusions(...) ---- -link:{ref}/ml-post-data.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job receiving the data -|`reset_start` or `resetStart` -|`string` - Optional parameter to specify the start of the bucket resetting range +==== exists_component_template +Returns information about whether a particular component template exist -|`reset_end` or `resetEnd` -|`string` - Optional parameter to specify the end of the bucket resetting range +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] +[source,ts] +---- +client.cluster.existsComponentTemplate(...) +---- -|`body` -|`object` - The data to process +==== get_component_template +Returns one or more component templates -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] +[source,ts] +---- +client.cluster.getComponentTemplate(...) +---- -[discrete] -=== ml.previewDataFrameAnalytics +==== get_settings +Returns cluster settings. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html[Endpoint documentation] [source,ts] ---- -client.ml.previewDataFrameAnalytics({ - id: string, - body: object -}) +client.cluster.getSettings(...) ---- -link:{ref}/preview-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to preview -|`body` -|`object` - The data frame analytics config to preview +==== health +Returns basic information about the health of the cluster. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html[Endpoint documentation] +[source,ts] +---- +client.cluster.health(...) +---- -[discrete] -=== ml.previewDatafeed +==== pending_tasks +Returns a list of any cluster-level changes (e.g. create index, update mapping, +allocate or fail shard) which have not yet been executed. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html[Endpoint documentation] [source,ts] ---- -client.ml.previewDatafeed({ - datafeed_id: string, - body: object -}) +client.cluster.pendingTasks(...) ---- -link:{ref}/ml-preview-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to preview -|`body` -|`object` - The datafeed config and job config with which to execute the preview +==== post_voting_config_exclusions +Updates the cluster voting config exclusions by node ids or node names. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html[Endpoint documentation] +[source,ts] +---- +client.cluster.postVotingConfigExclusions(...) +---- -[discrete] -=== ml.putCalendar +==== put_component_template +Creates or updates a component template +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] [source,ts] ---- -client.ml.putCalendar({ - calendar_id: string, - body: object -}) +client.cluster.putComponentTemplate(...) ---- -link:{ref}/ml-put-calendar.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to create -|`body` -|`object` - The calendar details +==== put_settings +Updates the cluster settings. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html[Endpoint documentation] +[source,ts] +---- +client.cluster.putSettings(...) +---- -[discrete] -=== ml.putCalendarJob +==== remote_info +Returns the information about configured remote clusters. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html[Endpoint documentation] [source,ts] ---- -client.ml.putCalendarJob({ - calendar_id: string, - job_id: string -}) +client.cluster.remoteInfo(...) ---- -link:{ref}/ml-put-calendar-job.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify -|`job_id` or `jobId` -|`string` - The ID of the job to add to the calendar +==== reroute +Allows to manually change the allocation of individual shards in the cluster. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html[Endpoint documentation] +[source,ts] +---- +client.cluster.reroute(...) +---- -[discrete] -=== ml.putDataFrameAnalytics +==== state +Returns a comprehensive information about the state of the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html[Endpoint documentation] [source,ts] ---- -client.ml.putDataFrameAnalytics({ - id: string, - body: object -}) +client.cluster.state(...) ---- -link:{ref}/put-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to create -|`body` -|`object` - The data frame analytics configuration +==== stats +Returns high-level overview of cluster statistics. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html[Endpoint documentation] +[source,ts] +---- +client.cluster.stats(...) +---- -[discrete] -=== ml.putDatafeed +=== dangling_indices +==== delete_dangling_index +Deletes the specified dangling index +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- -client.ml.putDatafeed({ - datafeed_id: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - ignore_throttled: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - body: object -}) +client.danglingIndices.deleteDanglingIndex(...) ---- -link:{ref}/ml-put-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to create -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) +==== import_dangling_index +Imports the specified dangling index -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if the source indices expressions resolves to no concrete indices (default: true) +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html[Endpoint documentation] +[source,ts] +---- +client.danglingIndices.importDanglingIndex(...) +---- -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Ignore indices that are marked as throttled (default: true) +==== list_dangling_indices +Returns all dangling indices. -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether source index expressions should get expanded to open or closed indices (default: open) +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html[Endpoint documentation] +[source,ts] +---- +client.danglingIndices.listDanglingIndices(...) +---- -|`body` -|`object` - The datafeed config +=== enrich +==== delete_policy +Deletes an existing enrich policy and its enrich index. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html[Endpoint documentation] +[source,ts] +---- +client.enrich.deletePolicy(...) +---- -[discrete] -=== ml.putFilter +==== execute_policy +Creates the enrich index for an existing enrich policy. +https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- -client.ml.putFilter({ - filter_id: string, - body: object -}) +client.enrich.executePolicy(...) ---- -link:{ref}/ml-put-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to create -|`body` -|`object` - The filter details +==== get_policy +Gets information about an enrich policy. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html[Endpoint documentation] +[source,ts] +---- +client.enrich.getPolicy(...) +---- -[discrete] -=== ml.putJob +==== put_policy +Creates a new enrich policy. +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- -client.ml.putJob({ - job_id: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - ignore_throttled: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - body: object -}) +client.enrich.putPolicy(...) ---- -link:{ref}/ml-put-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to create -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false). Only set if datafeed_config is provided. +==== stats +Gets enrich coordinator statistics and information about enrich policies that are currently executing. -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if the source indices expressions resolves to no concrete indices (default: true). Only set if datafeed_config is provided. +https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html[Endpoint documentation] +[source,ts] +---- +client.enrich.stats(...) +---- -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Ignore indices that are marked as throttled (default: true). Only set if datafeed_config is provided. +=== eql +==== delete +Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether source index expressions should get expanded to open or closed indices (default: open). Only set if datafeed_config is provided. +https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] +[source,ts] +---- +client.eql.delete(...) +---- -|`body` -|`object` - The job +==== get +Returns async results from previously executed Event Query Language (EQL) search -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] +[source,ts] +---- +client.eql.get(...) +---- -[discrete] -=== ml.putTrainedModel +==== get_status +Returns the status of a previously submitted async or stored Event Query Language (EQL) search +https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] [source,ts] ---- -client.ml.putTrainedModel({ - model_id: string, - body: object -}) +client.eql.getStatus(...) ---- -link:{ref}/put-trained-models.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models to store -|`body` -|`object` - The trained model configuration +==== search +Returns results matching a query expressed in Event Query Language (EQL) -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] +[source,ts] +---- +client.eql.search(...) +---- -[discrete] -=== ml.putTrainedModelAlias +=== features +==== get_features +Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot +https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html[Endpoint documentation] [source,ts] ---- -client.ml.putTrainedModelAlias({ - model_alias: string, - model_id: string, - reassign: boolean -}) +client.features.getFeatures(...) ---- -link:{ref}/put-trained-models-aliases.html[Documentation] + -[cols=2*] -|=== -|`model_alias` or `modelAlias` -|`string` - The trained model alias to update - -|`model_id` or `modelId` -|`string` - The trained model where the model alias should be assigned -|`reassign` -|`boolean` - If the model_alias already exists and points to a separate model_id, this parameter must be true. Defaults to false. +==== reset_features +Resets the internal state of features, usually by deleting system indices -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +[source,ts] +---- +client.features.resetFeatures(...) +---- -[discrete] -=== ml.resetJob +=== fleet +==== global_checkpoints +Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html[Endpoint documentation] [source,ts] ---- -client.ml.resetJob({ - job_id: string, - wait_for_completion: boolean -}) +client.fleet.globalCheckpoints(...) ---- -link:{ref}/ml-reset-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to reset -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning + -_Default:_ `true` +==== msearch +Multi Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. +[source,ts] +---- +client.fleet.msearch(...) +---- -|=== +==== search +Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. +[source,ts] +---- +client.fleet.search(...) +---- -[discrete] -=== ml.revertModelSnapshot +=== graph +==== explore +Explore extracted and summarized information about the documents and terms in an index. +https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html[Endpoint documentation] [source,ts] ---- -client.ml.revertModelSnapshot({ - job_id: string, - snapshot_id: string, - delete_intervening_results: boolean, - body: object -}) +client.graph.explore(...) ---- -link:{ref}/ml-revert-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to revert to +=== ilm +==== delete_lifecycle +Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. -|`delete_intervening_results` or `deleteInterveningResults` -|`boolean` - Should we reset the results back to the time of the snapshot? +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html[Endpoint documentation] +[source,ts] +---- +client.ilm.deleteLifecycle(...) +---- -|`body` -|`object` - Reversion options +==== explain_lifecycle +Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html[Endpoint documentation] +[source,ts] +---- +client.ilm.explainLifecycle(...) +---- -[discrete] -=== ml.setUpgradeMode +==== get_lifecycle +Returns the specified policy definition. Includes the policy version and last modified date. +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.ml.setUpgradeMode({ - enabled: boolean, - timeout: string -}) +client.ilm.getLifecycle(...) ---- -link:{ref}/ml-set-upgrade-mode.html[Documentation] + -[cols=2*] -|=== -|`enabled` -|`boolean` - Whether to enable upgrade_mode ML setting or not. Defaults to false. -|`timeout` -|`string` - Controls the time to wait before action times out. Defaults to 30 seconds +==== get_status +Retrieves the current index lifecycle management (ILM) status. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html[Endpoint documentation] +[source,ts] +---- +client.ilm.getStatus(...) +---- -[discrete] -=== ml.startDataFrameAnalytics +==== migrate_to_data_tiers +Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html[Endpoint documentation] [source,ts] ---- -client.ml.startDataFrameAnalytics({ - id: string, - timeout: string, - body: object -}) +client.ilm.migrateToDataTiers(...) ---- -link:{ref}/start-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to start -|`timeout` -|`string` - Controls the time to wait until the task has started. Defaults to 20 seconds +==== move_to_step +Manually moves an index into the specified step and executes that step. -|`body` -|`object` - The start data frame analytics parameters - -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html[Endpoint documentation] +[source,ts] +---- +client.ilm.moveToStep(...) +---- -[discrete] -=== ml.startDatafeed +==== put_lifecycle +Creates a lifecycle policy +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.ml.startDatafeed({ - datafeed_id: string, - start: string, - end: string, - timeout: string, - body: object -}) +client.ilm.putLifecycle(...) ---- -link:{ref}/ml-start-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to start -|`start` -|`string` - The start time from where the datafeed should begin +==== remove_policy +Removes the assigned lifecycle policy and stops managing the specified index -|`end` -|`string` - The end time when the datafeed should stop. When not set, the datafeed continues in real time +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html[Endpoint documentation] +[source,ts] +---- +client.ilm.removePolicy(...) +---- -|`timeout` -|`string` - Controls the time to wait until a datafeed has started. Default to 20 seconds +==== retry +Retries executing the policy for an index that is in the ERROR step. -|`body` -|`object` - The start datafeed parameters +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html[Endpoint documentation] +[source,ts] +---- +client.ilm.retry(...) +---- -|=== +==== start +Start the index lifecycle management (ILM) plugin. -[discrete] -=== ml.startTrainedModelDeployment -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html[Endpoint documentation] [source,ts] ---- -client.ml.startTrainedModelDeployment({ - model_id: string, - timeout: string -}) +client.ilm.start(...) ---- -link:{ref}/start-trained-model-deployment.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The unique identifier of the trained model. -|`timeout` -|`string` - Controls the amount of time to wait for the model to deploy. + -_Default:_ `20s` +==== stop +Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html[Endpoint documentation] +[source,ts] +---- +client.ilm.stop(...) +---- -[discrete] -=== ml.stopDataFrameAnalytics +=== indices +==== add_block +Adds a block to an index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html[Endpoint documentation] [source,ts] ---- -client.ml.stopDataFrameAnalytics({ - id: string, - allow_no_match: boolean, - force: boolean, - timeout: string, - body: object -}) +client.indices.addBlock(...) ---- -link:{ref}/stop-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to stop - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no data frame analytics. (This includes `_all` string or when no data frame analytics have been specified) -|`force` -|`boolean` - True if the data frame analytics should be forcefully stopped +==== analyze +Performs the analysis process on a text and return the tokens breakdown of the text. -|`timeout` -|`string` - Controls the time to wait until the task has stopped. Defaults to 20 seconds +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html[Endpoint documentation] +[source,ts] +---- +client.indices.analyze(...) +---- -|`body` -|`object` - The stop data frame analytics parameters +==== clear_cache +Clears all or specific caches for one or more indices. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html[Endpoint documentation] +[source,ts] +---- +client.indices.clearCache(...) +---- -[discrete] -=== ml.stopDatafeed +==== clone +Clones an index +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html[Endpoint documentation] [source,ts] ---- -client.ml.stopDatafeed({ - datafeed_id: string, - allow_no_match: boolean, - allow_no_datafeeds: boolean, - force: boolean, - timeout: string, - body: object -}) +client.indices.clone(...) ---- -link:{ref}/ml-stop-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to stop -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) +==== close +Closes an index. -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) + +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html[Endpoint documentation] +[source,ts] +---- +client.indices.close(...) +---- -WARNING: This parameter has been deprecated. +==== create +Creates an index with optional settings and mappings. -|`force` -|`boolean` - True if the datafeed should be forcefully stopped. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html[Endpoint documentation] +[source,ts] +---- +client.indices.create(...) +---- -|`timeout` -|`string` - Controls the time to wait until a datafeed has stopped. Default to 20 seconds +==== create_data_stream +Creates a data stream -|`body` -|`object` - The URL params optionally sent in the body +https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +[source,ts] +---- +client.indices.createDataStream(...) +---- -|=== +==== data_streams_stats +Provides statistics on operations happening in a data stream. -[discrete] -=== ml.stopTrainedModelDeployment -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] [source,ts] ---- -client.ml.stopTrainedModelDeployment({ - model_id: string -}) +client.indices.dataStreamsStats(...) ---- -link:{ref}/stop-trained-model-deployment.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The unique identifier of the trained model. - -|=== -[discrete] -=== ml.updateDataFrameAnalytics +==== delete +Deletes an index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html[Endpoint documentation] [source,ts] ---- -client.ml.updateDataFrameAnalytics({ - id: string, - body: object -}) +client.indices.delete(...) ---- -link:{ref}/update-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to update -|`body` -|`object` - The data frame analytics settings to update +==== delete_alias +Deletes an alias. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +[source,ts] +---- +client.indices.deleteAlias(...) +---- -[discrete] -=== ml.updateDatafeed +==== delete_data_stream +Deletes a data stream. +https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] [source,ts] ---- -client.ml.updateDatafeed({ - datafeed_id: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - ignore_throttled: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - body: object -}) +client.indices.deleteDataStream(...) ---- -link:{ref}/ml-update-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to update -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) +==== delete_index_template +Deletes an index template. -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if the source indices expressions resolves to no concrete indices (default: true) +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +[source,ts] +---- +client.indices.deleteIndexTemplate(...) +---- -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Ignore indices that are marked as throttled (default: true) +==== delete_template +Deletes an index template. -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether source index expressions should get expanded to open or closed indices (default: open) +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +[source,ts] +---- +client.indices.deleteTemplate(...) +---- -|`body` -|`object` - The datafeed update settings +==== disk_usage +Analyzes the disk usage of each field of an index or data stream -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html[Endpoint documentation] +[source,ts] +---- +client.indices.diskUsage(...) +---- -[discrete] -=== ml.updateFilter +==== exists +Returns information about whether a particular index exists. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html[Endpoint documentation] [source,ts] ---- -client.ml.updateFilter({ - filter_id: string, - body: object -}) +client.indices.exists(...) ---- -link:{ref}/ml-update-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to update -|`body` -|`object` - The filter update +==== exists_alias +Returns information about whether a particular alias exists. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +[source,ts] +---- +client.indices.existsAlias(...) +---- -[discrete] -=== ml.updateJob +==== exists_index_template +Returns information about whether a particular index template exists. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.ml.updateJob({ - job_id: string, - body: object -}) +client.indices.existsIndexTemplate(...) ---- -link:{ref}/ml-update-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to create -|`body` -|`object` - The job update settings +==== exists_template +Returns information about whether a particular index template exists. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +[source,ts] +---- +client.indices.existsTemplate(...) +---- -[discrete] -=== ml.updateModelSnapshot +==== field_usage_stats +Returns the field usage stats for each field of an index +https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html[Endpoint documentation] [source,ts] ---- -client.ml.updateModelSnapshot({ - job_id: string, - snapshot_id: string, - body: object -}) +client.indices.fieldUsageStats(...) ---- -link:{ref}/ml-update-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to update +==== flush +Performs the flush operation on one or more indices. -|`body` -|`object` - The model snapshot properties to update - -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html[Endpoint documentation] +[source,ts] +---- +client.indices.flush(...) +---- -[discrete] -=== ml.upgradeJobSnapshot +==== forcemerge +Performs the force merge operation on one or more indices. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html[Endpoint documentation] [source,ts] ---- -client.ml.upgradeJobSnapshot({ - job_id: string, - snapshot_id: string, - timeout: string, - wait_for_completion: boolean -}) +client.indices.forcemerge(...) ---- -link:{ref}/ml-upgrade-job-model-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot +==== get +Returns information about one or more indices. -|`timeout` -|`string` - How long should the API wait for the job to be opened and the old snapshot to be loaded. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html[Endpoint documentation] +[source,ts] +---- +client.indices.get(...) +---- -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request wait until the task is complete before responding to the caller. Default is false. +==== get_alias +Returns an alias. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +[source,ts] +---- +client.indices.getAlias(...) +---- -[discrete] -=== ml.validate +==== get_data_stream +Returns data streams. +https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] [source,ts] ---- -client.ml.validate({ - body: object -}) +client.indices.getDataStream(...) ---- -link:https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The job config -|=== - -[discrete] -=== ml.validateDetector +==== get_field_mapping +Returns mapping for one or more fields. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html[Endpoint documentation] [source,ts] ---- -client.ml.validateDetector({ - body: object -}) +client.indices.getFieldMapping(...) ---- -link:https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The detector -|=== +==== get_index_template +Returns an index template. -[discrete] -=== monitoring.bulk -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.monitoring.bulk({ - type: string, - system_id: string, - system_api_version: string, - interval: string, - body: object -}) +client.indices.getIndexTemplate(...) ---- -link:{ref}/monitor-elasticsearch-cluster.html[Documentation] + -[cols=2*] -|=== -|`type` -|`string` - Default document type for items which don't provide one + -WARNING: This parameter has been deprecated. +==== get_mapping +Returns mappings for one or more indices. -|`system_id` or `systemId` -|`string` - Identifier of the monitored system +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html[Endpoint documentation] +[source,ts] +---- +client.indices.getMapping(...) +---- -|`system_api_version` or `systemApiVersion` -|`string` - API Version of the monitored system +==== get_settings +Returns settings for one or more indices. -|`interval` -|`string` - Collection interval (e.g., '10s' or '10000ms') of the payload +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html[Endpoint documentation] +[source,ts] +---- +client.indices.getSettings(...) +---- -|`body` -|`object` - The operation definition and data (action-data pairs), separated by newlines +==== get_template +Returns an index template. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +[source,ts] +---- +client.indices.getTemplate(...) +---- -[discrete] -=== msearch +==== migrate_to_data_stream +Migrates an alias to a data stream +https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] [source,ts] ---- -client.msearch({ - index: string | string[], - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - max_concurrent_searches: number, - typed_keys: boolean, - pre_filter_shard_size: number, - max_concurrent_shard_requests: number, - rest_total_hits_as_int: boolean, - ccs_minimize_roundtrips: boolean, - body: object -}) +client.indices.migrateToDataStream(...) ---- -link:{ref}/search-multi-search.html[Documentation] + -{jsclient}/msearch_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to use as default - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type -|`max_concurrent_searches` or `maxConcurrentSearches` -|`number` - Controls the maximum number of concurrent searches the multi search api will execute +==== modify_data_stream +Modifies a data stream -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response +https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +[source,ts] +---- +client.indices.modifyDataStream(...) +---- -|`pre_filter_shard_size` or `preFilterShardSize` -|`number` - A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint. +==== open +Opens an index. -|`max_concurrent_shard_requests` or `maxConcurrentShardRequests` -|`number` - The number of concurrent shard requests each sub search executes concurrently per node. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests + -_Default:_ `5` +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html[Endpoint documentation] +[source,ts] +---- +client.indices.open(...) +---- -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response +==== promote_data_stream +Promotes a data stream from a replicated data stream managed by CCR to a regular data stream -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` +https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +[source,ts] +---- +client.indices.promoteDataStream(...) +---- -|`body` -|`object` - The request definitions (metadata-search request definition pairs), separated by newlines +==== put_alias +Creates or updates an alias. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +[source,ts] +---- +client.indices.putAlias(...) +---- -[discrete] -=== msearchTemplate +==== put_index_template +Creates or updates an index template. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.msearchTemplate({ - index: string | string[], - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - typed_keys: boolean, - max_concurrent_searches: number, - rest_total_hits_as_int: boolean, - ccs_minimize_roundtrips: boolean, - body: object -}) +client.indices.putIndexTemplate(...) ---- -link:{ref}/search-multi-search.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to use as default -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type +==== put_mapping +Updates the index mappings. -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`max_concurrent_searches` or `maxConcurrentSearches` -|`number` - Controls the maximum number of concurrent searches the multi search api will execute +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html[Endpoint documentation] +[source,ts] +---- +client.indices.putMapping(...) +---- -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response +==== put_settings +Updates the index settings. -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html[Endpoint documentation] +[source,ts] +---- +client.indices.putSettings(...) +---- -|`body` -|`object` - The request definitions (metadata-search request definition pairs), separated by newlines +==== put_template +Creates or updates an index template. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +[source,ts] +---- +client.indices.putTemplate(...) +---- -[discrete] -=== mtermvectors +==== recovery +Returns information about ongoing index shard recoveries. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html[Endpoint documentation] [source,ts] ---- -client.mtermvectors({ - index: string, - ids: string | string[], - term_statistics: boolean, - field_statistics: boolean, - fields: string | string[], - offsets: boolean, - positions: boolean, - payloads: boolean, - preference: string, - routing: string, - realtime: boolean, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - body: object -}) +client.indices.recovery(...) ---- -link:{ref}/docs-multi-termvectors.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The index in which the document resides. - -|`ids` -|`string \| string[]` - A comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body -|`term_statistics` or `termStatistics` -|`boolean` - Specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +==== refresh +Performs the refresh operation in one or more indices. -|`field_statistics` or `fieldStatistics` -|`boolean` - Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html[Endpoint documentation] +[source,ts] +---- +client.indices.refresh(...) +---- -|`fields` -|`string \| string[]` - A comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". +==== reload_search_analyzers +Reloads an index's search analyzers and their resources. -|`offsets` -|`boolean` - Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html[Endpoint documentation] +[source,ts] +---- +client.indices.reloadSearchAnalyzers(...) +---- -|`positions` -|`boolean` - Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` +==== resolve_index +Returns information about any matching indices, aliases, and data streams -|`payloads` -|`boolean` - Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html[Endpoint documentation] +[source,ts] +---- +client.indices.resolveIndex(...) +---- -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) .Applies to all returned documents unless otherwise specified in body "params" or "docs". +==== rollover +Updates an alias to point to a new index when the existing index +is considered to be too large or too old. -|`routing` -|`string` - Specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html[Endpoint documentation] +[source,ts] +---- +client.indices.rollover(...) +---- -|`realtime` -|`boolean` - Specifies if requests are real-time as opposed to near-real-time (default: true). +==== segments +Provides low-level information about segments in a Lucene index. -|`version` -|`number` - Explicit version number for concurrency control +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html[Endpoint documentation] +[source,ts] +---- +client.indices.segments(...) +---- -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type +==== shard_stores +Provides store information for shard copies of indices. -|`body` -|`object` - Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html[Endpoint documentation] +[source,ts] +---- +client.indices.shardStores(...) +---- -|=== +==== shrink +Allow to shrink an existing index into a new index with fewer primary shards. -[discrete] -=== nodes.clearMeteringArchive -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html[Endpoint documentation] [source,ts] ---- -client.nodes.clearMeteringArchive({ - node_id: string | string[], - max_archive_version: number -}) +client.indices.shrink(...) ---- -link:{ref}/clear-repositories-metering-archive-api.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - Comma-separated list of node IDs or names used to limit returned information. -|`max_archive_version` or `maxArchiveVersion` -|`number` - Specifies the maximum archive_version to be cleared from the archive. +==== simulate_index_template +Simulate matching the given index name against the index templates in the system -|=== - -[discrete] -=== nodes.getMeteringInfo -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.nodes.getMeteringInfo({ - node_id: string | string[] -}) +client.indices.simulateIndexTemplate(...) ---- -link:{ref}/get-repositories-metering-api.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information. - -|=== -[discrete] -=== nodes.hotThreads +==== simulate_template +Simulate resolving the given template name or body +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.nodes.hotThreads({ - node_id: string | string[], - interval: string, - snapshots: number, - threads: number, - ignore_idle_threads: boolean, - type: 'cpu' | 'wait' | 'block', - timeout: string -}) +client.indices.simulateTemplate(...) ---- -link:{ref}/cluster-nodes-hot-threads.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -|`interval` -|`string` - The interval for the second sampling of threads +==== split +Allows you to split an existing index into a new index with more primary shards. -|`snapshots` -|`number` - Number of samples of thread stacktrace (default: 10) - -|`threads` -|`number` - Specify the number of threads to provide information for (default: 3) +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html[Endpoint documentation] +[source,ts] +---- +client.indices.split(...) +---- -|`ignore_idle_threads` or `ignoreIdleThreads` -|`boolean` - Don't show threads that are in known-idle places, such as waiting on a socket select or pulling from an empty task queue (default: true) +==== stats +Provides statistics on operations happening in an index. -|`type` -|`'cpu' \| 'wait' \| 'block'` - The type to sample (default: cpu) +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html[Endpoint documentation] +[source,ts] +---- +client.indices.stats(...) +---- -|`timeout` -|`string` - Explicit operation timeout +==== unfreeze +Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html[Endpoint documentation] +[source,ts] +---- +client.indices.unfreeze(...) +---- -[discrete] -=== nodes.info +==== update_aliases +Updates index aliases. +https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] [source,ts] ---- -client.nodes.info({ - node_id: string | string[], - metric: string | string[], - flat_settings: boolean, - timeout: string -}) +client.indices.updateAliases(...) ---- -link:{ref}/cluster-nodes-info.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -|`metric` -|`string \| string[]` - A comma-separated list of metrics you wish returned. Leave empty to return all. +==== validate_query +Allows a user to validate a potentially expensive query without executing it. -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`timeout` -|`string` - Explicit operation timeout - -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html[Endpoint documentation] +[source,ts] +---- +client.indices.validateQuery(...) +---- -[discrete] -=== nodes.reloadSecureSettings +=== ingest +==== delete_pipeline +Deletes a pipeline. +https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html[Endpoint documentation] [source,ts] ---- -client.nodes.reloadSecureSettings({ - node_id: string | string[], - timeout: string, - body: object -}) +client.ingest.deletePipeline(...) ---- -link:{ref}/secure-settings.html#reloadable-secure-settings[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes. - -|`timeout` -|`string` - Explicit operation timeout -|`body` -|`object` - An object containing the password for the elasticsearch keystore +==== geo_ip_stats +Returns statistical information about geoip databases -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-stats-api.html[Endpoint documentation] +[source,ts] +---- +client.ingest.geoIpStats(...) +---- -[discrete] -=== nodes.stats +==== get_pipeline +Returns a pipeline. +https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html[Endpoint documentation] [source,ts] ---- -client.nodes.stats({ - node_id: string | string[], - metric: string | string[], - index_metric: string | string[], - completion_fields: string | string[], - fielddata_fields: string | string[], - fields: string | string[], - groups: boolean, - level: 'indices' | 'node' | 'shards', - types: string | string[], - timeout: string, - include_segment_file_sizes: boolean, - include_unloaded_segments: boolean -}) +client.ingest.getPipeline(...) ---- -link:{ref}/cluster-nodes-stats.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics +==== processor_grok +Returns a list of the built-in patterns. -|`index_metric` or `indexMetric` -|`string \| string[]` - Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified. +https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get[Endpoint documentation] +[source,ts] +---- +client.ingest.processorGrok(...) +---- -|`completion_fields` or `completionFields` -|`string \| string[]` - A comma-separated list of fields for the `completion` index metric (supports wildcards) +==== put_pipeline +Creates or updates a pipeline. -|`fielddata_fields` or `fielddataFields` -|`string \| string[]` - A comma-separated list of fields for the `fielddata` index metric (supports wildcards) +https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html[Endpoint documentation] +[source,ts] +---- +client.ingest.putPipeline(...) +---- -|`fields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` and `completion` index metric (supports wildcards) +==== simulate +Allows to simulate a pipeline with example documents. -|`groups` -|`boolean` - A comma-separated list of search groups for `search` index metric +https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html[Endpoint documentation] +[source,ts] +---- +client.ingest.simulate(...) +---- -|`level` -|`'indices' \| 'node' \| 'shards'` - Return indices stats aggregated at index, node or shard level + -_Default:_ `node` +=== license +==== delete +Deletes licensing information for the cluster -|`types` -|`string \| string[]` - A comma-separated list of document types for the `indexing` index metric +https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html[Endpoint documentation] +[source,ts] +---- +client.license.delete(...) +---- -|`timeout` -|`string` - Explicit operation timeout +==== get +Retrieves licensing information for the cluster -|`include_segment_file_sizes` or `includeSegmentFileSizes` -|`boolean` - Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested) +https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html[Endpoint documentation] +[source,ts] +---- +client.license.get(...) +---- -|`include_unloaded_segments` or `includeUnloadedSegments` -|`boolean` - If set to true segment stats will include stats for segments that are not currently loaded into memory +==== get_basic_status +Retrieves information about the status of the basic license. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html[Endpoint documentation] +[source,ts] +---- +client.license.getBasicStatus(...) +---- -[discrete] -=== nodes.usage +==== get_trial_status +Retrieves information about the status of the trial license. +https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html[Endpoint documentation] [source,ts] ---- -client.nodes.usage({ - node_id: string | string[], - metric: string | string[], - timeout: string -}) +client.license.getTrialStatus(...) ---- -link:{ref}/cluster-nodes-usage.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics -|`timeout` -|`string` - Explicit operation timeout +==== post +Updates the license for the cluster. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html[Endpoint documentation] +[source,ts] +---- +client.license.post(...) +---- -[discrete] -=== openPointInTime +==== post_start_basic +Starts an indefinite basic license. +https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html[Endpoint documentation] [source,ts] ---- -client.openPointInTime({ - index: string | string[], - preference: string, - routing: string, - ignore_unavailable: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - keep_alive: string -}) +client.license.postStartBasic(...) ---- -link:{ref}/point-in-time-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) +==== post_start_trial +starts a limited time trial license. -|`routing` -|`string` - Specific routing value +https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html[Endpoint documentation] +[source,ts] +---- +client.license.postStartTrial(...) +---- -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) +=== logstash +==== delete_pipeline +Deletes Logstash Pipelines used by Central Management -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` +https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html[Endpoint documentation] +[source,ts] +---- +client.logstash.deletePipeline(...) +---- -|`keep_alive` or `keepAlive` -|`string` - Specific the time to live for the point in time +==== get_pipeline +Retrieves Logstash Pipelines used by Central Management -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html[Endpoint documentation] +[source,ts] +---- +client.logstash.getPipeline(...) +---- -[discrete] -=== ping +==== put_pipeline +Adds and updates Logstash Pipelines used for Central Management +https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html[Endpoint documentation] [source,ts] ---- -client.ping() +client.logstash.putPipeline(...) ---- -link:{ref}/index.html[Documentation] + - -[discrete] -=== putScript +=== migration +==== deprecations +Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. +https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html[Endpoint documentation] [source,ts] ---- -client.putScript({ - id: string, - context: string, - timeout: string, - master_timeout: string, - body: object -}) +client.migration.deprecations(...) ---- -link:{ref}/modules-scripting.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Script ID -|`context` -|`string` - Script context +==== get_feature_upgrade_status +Find out whether system features need to be upgraded or not -|`timeout` -|`string` - Explicit operation timeout +https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html[Endpoint documentation] +[source,ts] +---- +client.migration.getFeatureUpgradeStatus(...) +---- -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master +==== post_feature_upgrade +Begin upgrades for system features -|`body` -|`object` - The document +https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html[Endpoint documentation] +[source,ts] +---- +client.migration.postFeatureUpgrade(...) +---- -|=== +=== ml +==== close_job +Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. -[discrete] -=== rankEval -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-close-job.html[Endpoint documentation] [source,ts] ---- -client.rankEval({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - body: object -}) +client.ml.closeJob(...) ---- -link:{ref}/search-rank-eval.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) +==== delete_calendar +Deletes a calendar. -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteCalendar(...) +---- -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` +==== delete_calendar_event +Deletes scheduled events from a calendar. -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type +https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-event.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteCalendarEvent(...) +---- -|`body` -|`object` - The ranking evaluation search definition, including search requests, document ratings and ranking metric definition. +==== delete_calendar_job +Deletes anomaly detection jobs from a calendar. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-job.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteCalendarJob(...) +---- -[discrete] -=== reindex +==== delete_data_frame_analytics +Deletes an existing data frame analytics job. +https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.reindex({ - refresh: boolean, - timeout: string, - wait_for_active_shards: string, - wait_for_completion: boolean, - requests_per_second: number, - scroll: string, - slices: number|string, - max_docs: number, - body: object -}) +client.ml.deleteDataFrameAnalytics(...) ---- -link:{ref}/docs-reindex.html[Documentation] + -{jsclient}/reindex_examples.html[Code Example] + -[cols=2*] -|=== -|`refresh` -|`boolean` - Should the affected indexes be refreshed? -|`timeout` -|`string` - Time each individual bulk request should wait for shards that are unavailable. + -_Default:_ `1m` +==== delete_datafeed +Deletes an existing datafeed. -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the reindex is complete. + -_Default:_ `true` +https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafeed.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteDatafeed(...) +---- -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in sub-requests per second. -1 means no throttle. +==== delete_expired_data +Deletes expired and unused machine learning data. -|`scroll` -|`string` - Control how long to keep the search context alive + -_Default:_ `5m` +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteExpiredData(...) +---- -|`slices` -|`number\|string` - The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. + -_Default:_ `1` +==== delete_filter +Deletes a filter. -|`max_docs` or `maxDocs` -|`number` - Maximum number of documents to process (default: all documents) +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteFilter(...) +---- -|`body` -|`object` - The search definition using the Query DSL and the prototype for the index request. +==== delete_forecast +Deletes forecasts from a machine learning job. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteForecast(...) +---- -[discrete] -=== reindexRethrottle +==== delete_job +Deletes an existing anomaly detection job. +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html[Endpoint documentation] [source,ts] ---- -client.reindexRethrottle({ - task_id: string, - requests_per_second: number -}) +client.ml.deleteJob(...) ---- -link:{ref}/docs-reindex.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - The task id to rethrottle -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. +==== delete_model_snapshot +Deletes an existing model snapshot. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html[Endpoint documentation] +[source,ts] +---- +client.ml.deleteModelSnapshot(...) +---- -[discrete] -=== renderSearchTemplate +==== delete_trained_model +Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html[Endpoint documentation] [source,ts] ---- -client.renderSearchTemplate({ - id: string, - body: object -}) +client.ml.deleteTrainedModel(...) ---- -link:{ref}/render-search-template-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The id of the stored search template -|`body` -|`object` - The search definition template and its params +==== delete_trained_model_alias +Deletes a model alias that refers to the trained model -|=== - -[discrete] -=== rollup.deleteJob -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html[Endpoint documentation] [source,ts] ---- -client.rollup.deleteJob({ - id: string -}) +client.ml.deleteTrainedModelAlias(...) ---- -link:{ref}/rollup-delete-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to delete -|=== +==== estimate_model_memory +Estimates the model memory -[discrete] -=== rollup.getJobs -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html[Endpoint documentation] [source,ts] ---- -client.rollup.getJobs({ - id: string -}) +client.ml.estimateModelMemory(...) ---- -link:{ref}/rollup-get-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs -|=== +==== evaluate_data_frame +Evaluates the data frame analytics for an annotated index. -[discrete] -=== rollup.getRollupCaps -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.rollup.getRollupCaps({ - id: string -}) +client.ml.evaluateDataFrame(...) ---- -link:{ref}/rollup-get-rollup-caps.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the index to check rollup capabilities on, or left blank for all jobs -|=== +==== explain_data_frame_analytics +Explains a data frame analytics config. -[discrete] -=== rollup.getRollupIndexCaps -*Stability:* experimental +http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.rollup.getRollupIndexCaps({ - index: string -}) +client.ml.explainDataFrameAnalytics(...) ---- -link:{ref}/rollup-get-rollup-index-caps.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The rollup index or index pattern to obtain rollup capabilities from. -|=== +==== flush_job +Forces any buffered data to be processed by the job. -[discrete] -=== rollup.putJob -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html[Endpoint documentation] [source,ts] ---- -client.rollup.putJob({ - id: string, - body: object -}) +client.ml.flushJob(...) ---- -link:{ref}/rollup-put-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to create - -|`body` -|`object` - The job configuration -|=== +==== forecast +Predicts the future behavior of a time series by using its historical behavior. -[discrete] -=== rollup.rollup -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html[Endpoint documentation] [source,ts] ---- -client.rollup.rollup({ - index: string, - rollup_index: string, - body: object -}) +client.ml.forecast(...) ---- -link:{ref}/xpack-rollup.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The index to roll up -|`rollup_index` or `rollupIndex` -|`string` - The name of the rollup index to create +==== get_buckets +Retrieves anomaly detection job results for one or more buckets. -|`body` -|`object` - The rollup configuration +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html[Endpoint documentation] +[source,ts] +---- +client.ml.getBuckets(...) +---- -|=== +==== get_calendar_events +Retrieves information about the scheduled events in calendars. -[discrete] -=== rollup.rollupSearch -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html[Endpoint documentation] [source,ts] ---- -client.rollup.rollupSearch({ - index: string | string[], - type: string, - typed_keys: boolean, - rest_total_hits_as_int: boolean, - body: object -}) +client.ml.getCalendarEvents(...) ---- -link:{ref}/rollup-search.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - The indices or index-pattern(s) (containing rollup or regular data) that should be searched - -|`type` -|`string` - The doc type inside the index + -WARNING: This parameter has been deprecated. +==== get_calendars +Retrieves configuration information for calendars. -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html[Endpoint documentation] +[source,ts] +---- +client.ml.getCalendars(...) +---- -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response +==== get_categories +Retrieves anomaly detection job results for one or more categories. -|`body` -|`object` - The search request body +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html[Endpoint documentation] +[source,ts] +---- +client.ml.getCategories(...) +---- -|=== +==== get_data_frame_analytics +Retrieves configuration information for data frame analytics jobs. -[discrete] -=== rollup.startJob -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.rollup.startJob({ - id: string -}) +client.ml.getDataFrameAnalytics(...) ---- -link:{ref}/rollup-start-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to start -|=== +==== get_data_frame_analytics_stats +Retrieves usage information for data frame analytics jobs. -[discrete] -=== rollup.stopJob -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html[Endpoint documentation] [source,ts] ---- -client.rollup.stopJob({ - id: string, - wait_for_completion: boolean, - timeout: string -}) +client.ml.getDataFrameAnalyticsStats(...) ---- -link:{ref}/rollup-stop-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to stop -|`wait_for_completion` or `waitForCompletion` -|`boolean` - True if the API should block until the job has fully stopped, false if should be executed async. Defaults to false. +==== get_datafeed_stats +Retrieves usage information for datafeeds. -|`timeout` -|`string` - Block for (at maximum) the specified duration while waiting for the job to stop. Defaults to 30s. +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html[Endpoint documentation] +[source,ts] +---- +client.ml.getDatafeedStats(...) +---- -|=== +==== get_datafeeds +Retrieves configuration information for datafeeds. -[discrete] -=== scriptsPainlessExecute -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html[Endpoint documentation] [source,ts] ---- -client.scriptsPainlessExecute({ - body: object -}) +client.ml.getDatafeeds(...) ---- -link:https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The script to execute - -|=== -[discrete] -=== scroll +==== get_filters +Retrieves filters. +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html[Endpoint documentation] [source,ts] ---- -client.scroll({ - scroll_id: string, - scroll: string, - rest_total_hits_as_int: boolean, - body: object -}) +client.ml.getFilters(...) ---- -link:{ref}/search-request-body.html#request-body-search-scroll[Documentation] + -{jsclient}/scroll_examples.html[Code Example] + -[cols=2*] -|=== -|`scroll_id` or `scrollId` -|`string` - The scroll ID + - -WARNING: This parameter has been deprecated. -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search +==== get_influencers +Retrieves anomaly detection job results for one or more influencers. -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html[Endpoint documentation] +[source,ts] +---- +client.ml.getInfluencers(...) +---- -|`body` -|`object` - The scroll ID if not passed by URL or query parameter. +==== get_job_stats +Retrieves usage information for anomaly detection jobs. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html[Endpoint documentation] +[source,ts] +---- +client.ml.getJobStats(...) +---- -[discrete] -=== search +==== get_jobs +Retrieves configuration information for anomaly detection jobs. +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html[Endpoint documentation] [source,ts] ---- -client.search({ - index: string | string[], - analyzer: string, - analyze_wildcard: boolean, - ccs_minimize_roundtrips: boolean, - default_operator: 'AND' | 'OR', - df: string, - explain: boolean, - stored_fields: string | string[], - docvalue_fields: string | string[], - from: number, - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - preference: string, - q: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - size: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - suggest_field: string, - suggest_mode: 'missing' | 'popular' | 'always', - suggest_size: number, - suggest_text: string, - timeout: string, - track_scores: boolean, - track_total_hits: boolean|long, - allow_partial_search_results: boolean, - typed_keys: boolean, - version: boolean, - seq_no_primary_term: boolean, - request_cache: boolean, - batched_reduce_size: number, - max_concurrent_shard_requests: number, - pre_filter_shard_size: number, - rest_total_hits_as_int: boolean, - min_compatible_shard_node: string, - body: object -}) ----- -link:{ref}/search-search.html[Documentation] + -{jsclient}/search_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`explain` -|`boolean` - Specify whether to return detailed information about score computation as part of a hit - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return as part of a hit - -|`docvalue_fields` or `docvalueFields` -|`string \| string[]` - A comma-separated list of fields to return as the docvalue representation of a field for each hit - -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type +client.ml.getJobs(...) +---- -|`size` -|`number` - Number of hits to return (default: 10) +==== get_memory_stats +Returns information on how ML is using memory. +[source,ts] +---- +client.ml.getMemoryStats(...) +---- -|`sort` -|`string \| string[]` - A comma-separated list of : pairs +==== get_model_snapshot_upgrade_stats +Gets stats for anomaly detection job model snapshot upgrades that are in progress. -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] +[source,ts] +---- +client.ml.getModelSnapshotUpgradeStats(...) +---- -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field +==== get_model_snapshots +Retrieves information about model snapshots. -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html[Endpoint documentation] +[source,ts] +---- +client.ml.getModelSnapshots(...) +---- -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. +==== get_overall_buckets +Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html[Endpoint documentation] +[source,ts] +---- +client.ml.getOverallBuckets(...) +---- -|`suggest_field` or `suggestField` -|`string` - Specify which field to use for suggestions +==== get_records +Retrieves anomaly records for an anomaly detection job. -|`suggest_mode` or `suggestMode` -|`'missing' \| 'popular' \| 'always'` - Specify suggest mode + -_Default:_ `missing` +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html[Endpoint documentation] +[source,ts] +---- +client.ml.getRecords(...) +---- -|`suggest_size` or `suggestSize` -|`number` - How many suggestions to return in response +==== get_trained_models +Retrieves configuration information for a trained inference model. -|`suggest_text` or `suggestText` -|`string` - The source text for which the suggestions should be returned +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html[Endpoint documentation] +[source,ts] +---- +client.ml.getTrainedModels(...) +---- -|`timeout` -|`string` - Explicit operation timeout +==== get_trained_models_stats +Retrieves usage information for trained inference models. -|`track_scores` or `trackScores` -|`boolean` - Whether to calculate and return scores even if they are not used for sorting +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html[Endpoint documentation] +[source,ts] +---- +client.ml.getTrainedModelsStats(...) +---- -|`track_total_hits` or `trackTotalHits` -|`boolean\|long` - Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. +==== infer_trained_model_deployment +Evaluate a trained model. -|`allow_partial_search_results` or `allowPartialSearchResults` -|`boolean` - Indicate if an error should be returned if there is a partial search failure or timeout + -_Default:_ `true` +https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model-deployment.html[Endpoint documentation] +[source,ts] +---- +client.ml.inferTrainedModelDeployment(...) +---- -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response +==== info +Returns defaults and limits used by machine learning. -|`version` -|`boolean` - Specify whether to return document version as part of a hit +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html[Endpoint documentation] +[source,ts] +---- +client.ml.info(...) +---- -|`seq_no_primary_term` or `seqNoPrimaryTerm` -|`boolean` - Specify whether to return sequence number and primary term of the last modification of each hit +==== open_job +Opens one or more anomaly detection jobs. -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to index level setting +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html[Endpoint documentation] +[source,ts] +---- +client.ml.openJob(...) +---- -|`batched_reduce_size` or `batchedReduceSize` -|`number` - The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + -_Default:_ `512` +==== post_calendar_events +Posts scheduled events in a calendar. -|`max_concurrent_shard_requests` or `maxConcurrentShardRequests` -|`number` - The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests + -_Default:_ `5` +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html[Endpoint documentation] +[source,ts] +---- +client.ml.postCalendarEvents(...) +---- -|`pre_filter_shard_size` or `preFilterShardSize` -|`number` - A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint. +==== post_data +Sends data to an anomaly detection job for analysis. -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html[Endpoint documentation] +[source,ts] +---- +client.ml.postData(...) +---- -|`min_compatible_shard_node` or `minCompatibleShardNode` -|`string` - The minimum compatible version that all shards involved in search should have for this request to be successful +==== preview_data_frame_analytics +Previews that will be analyzed given a data frame analytics config. -|`body` -|`object` - The search definition using the Query DSL +http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html[Endpoint documentation] +[source,ts] +---- +client.ml.previewDataFrameAnalytics(...) +---- -|=== +==== preview_datafeed +Previews a datafeed. -[discrete] -=== searchMvt -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html[Endpoint documentation] [source,ts] ---- -client.searchMvt({ - index: string | string[], - field: string, - zoom: number, - x: number, - y: number, - exact_bounds: boolean, - extent: number, - grid_precision: number, - grid_type: 'grid' | 'point', - size: number, - body: object -}) +client.ml.previewDatafeed(...) ---- -link:{ref}/search-vector-tile-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - Comma-separated list of data streams, indices, or aliases to search -|`field` -|`string` - Field containing geospatial data to return +==== put_calendar +Instantiates a calendar. -|`zoom` -|`number` - Zoom level for the vector tile to search - -|`x` -|`number` - X coordinate for the vector tile to search +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html[Endpoint documentation] +[source,ts] +---- +client.ml.putCalendar(...) +---- -|`y` -|`number` - Y coordinate for the vector tile to search +==== put_calendar_job +Adds an anomaly detection job to a calendar. -|`exact_bounds` or `exactBounds` -|`boolean` - If false, the meta layer's feature is the bounding box of the tile. If true, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html[Endpoint documentation] +[source,ts] +---- +client.ml.putCalendarJob(...) +---- -|`extent` -|`number` - Size, in pixels, of a side of the vector tile. + -_Default:_ `4096` +==== put_data_frame_analytics +Instantiates a data frame analytics job. -|`grid_precision` or `gridPrecision` -|`number` - Additional zoom levels available through the aggs layer. Accepts 0-8. + -_Default:_ `8` +https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html[Endpoint documentation] +[source,ts] +---- +client.ml.putDataFrameAnalytics(...) +---- -|`grid_type` or `gridType` -|`'grid' \| 'point'` - Determines the geometry type for features in the aggs layer. + -_Default:_ `grid` +==== put_datafeed +Instantiates a datafeed. -|`size` -|`number` - Maximum number of features to return in the hits layer. Accepts 0-10000. + -_Default:_ `10000` +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html[Endpoint documentation] +[source,ts] +---- +client.ml.putDatafeed(...) +---- -|`body` -|`object` - Search request body. +==== put_filter +Instantiates a filter. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html[Endpoint documentation] +[source,ts] +---- +client.ml.putFilter(...) +---- -[discrete] -=== searchShards +==== put_job +Instantiates an anomaly detection job. +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html[Endpoint documentation] [source,ts] ---- -client.searchShards({ - index: string | string[], - preference: string, - routing: string, - local: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) +client.ml.putJob(...) ---- -link:{ref}/search-shards.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) +==== put_trained_model +Creates an inference trained model. -|`routing` -|`string` - Specific routing value - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html[Endpoint documentation] +[source,ts] +---- +client.ml.putTrainedModel(...) +---- -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) +==== put_trained_model_alias +Creates a new model alias (or reassigns an existing one) to refer to the trained model -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html[Endpoint documentation] +[source,ts] +---- +client.ml.putTrainedModelAlias(...) +---- -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` +==== put_trained_model_definition_part +Creates part of a trained model definition -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html[Endpoint documentation] +[source,ts] +---- +client.ml.putTrainedModelDefinitionPart(...) +---- -[discrete] -=== searchTemplate +==== put_trained_model_vocabulary +Creates a trained model vocabulary +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html[Endpoint documentation] [source,ts] ---- -client.searchTemplate({ - index: string | string[], - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - preference: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - explain: boolean, - profile: boolean, - typed_keys: boolean, - rest_total_hits_as_int: boolean, - ccs_minimize_roundtrips: boolean, - body: object -}) +client.ml.putTrainedModelVocabulary(...) ---- -link:{ref}/search-template.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) +==== reset_job +Resets an existing anomaly detection job. -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html[Endpoint documentation] +[source,ts] +---- +client.ml.resetJob(...) +---- -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +==== revert_model_snapshot +Reverts to a specific snapshot. -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html[Endpoint documentation] +[source,ts] +---- +client.ml.revertModelSnapshot(...) +---- -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) +==== set_upgrade_mode +Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html[Endpoint documentation] +[source,ts] +---- +client.ml.setUpgradeMode(...) +---- -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search +==== start_data_frame_analytics +Starts a data frame analytics job. -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type +https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html[Endpoint documentation] +[source,ts] +---- +client.ml.startDataFrameAnalytics(...) +---- -|`explain` -|`boolean` - Specify whether to return detailed information about score computation as part of a hit +==== start_datafeed +Starts one or more datafeeds. -|`profile` -|`boolean` - Specify whether to profile the query execution +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html[Endpoint documentation] +[source,ts] +---- +client.ml.startDatafeed(...) +---- -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response +==== start_trained_model_deployment +Start a trained model deployment. -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response +https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html[Endpoint documentation] +[source,ts] +---- +client.ml.startTrainedModelDeployment(...) +---- -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` +==== stop_data_frame_analytics +Stops one or more data frame analytics jobs. -|`body` -|`object` - The search definition template and its params +https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html[Endpoint documentation] +[source,ts] +---- +client.ml.stopDataFrameAnalytics(...) +---- -|=== +==== stop_datafeed +Stops one or more datafeeds. -[discrete] -=== searchableSnapshots.cacheStats -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.cacheStats({ - node_id: string | string[] -}) +client.ml.stopDatafeed(...) ---- -link:{ref}/searchable-snapshots-apis.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -|=== +==== stop_trained_model_deployment +Stop a trained model deployment. -[discrete] -=== searchableSnapshots.clearCache -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.clearCache({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'none' | 'all' -}) +client.ml.stopTrainedModelDeployment(...) ---- -link:{ref}/searchable-snapshots-apis.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +==== update_data_frame_analytics +Updates certain properties of a data frame analytics job. -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` +https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html[Endpoint documentation] +[source,ts] +---- +client.ml.updateDataFrameAnalytics(...) +---- -|=== +==== update_datafeed +Updates certain properties of a datafeed. -[discrete] -=== searchableSnapshots.mount -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.mount({ - repository: string, - snapshot: string, - master_timeout: string, - wait_for_completion: boolean, - storage: string, - body: object -}) +client.ml.updateDatafeed(...) ---- -link:{ref}/searchable-snapshots-api-mount-snapshot.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - The name of the repository containing the snapshot of the index to mount - -|`snapshot` -|`string` - The name of the snapshot of the index to mount -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node +==== update_filter +Updates the description of a filter, adds items, or removes items. -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html[Endpoint documentation] +[source,ts] +---- +client.ml.updateFilter(...) +---- -|`storage` -|`string` - Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy` +==== update_job +Updates certain properties of an anomaly detection job. -|`body` -|`object` - The restore configuration for mounting the snapshot as searchable +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html[Endpoint documentation] +[source,ts] +---- +client.ml.updateJob(...) +---- -|=== +==== update_model_snapshot +Updates certain properties of a snapshot. -[discrete] -=== searchableSnapshots.stats -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.stats({ - index: string | string[], - level: 'cluster' | 'indices' | 'shards' -}) +client.ml.updateModelSnapshot(...) ---- -link:{ref}/searchable-snapshots-apis.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names -|`level` -|`'cluster' \| 'indices' \| 'shards'` - Return stats aggregated at cluster, index or shard level + -_Default:_ `indices` +==== upgrade_job_snapshot +Upgrades a given job snapshot to the current major version. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-model-snapshot.html[Endpoint documentation] +[source,ts] +---- +client.ml.upgradeJobSnapshot(...) +---- -[discrete] -=== security.authenticate +=== nodes +==== clear_repositories_metering_archive +Removes the archived repositories metering information present in the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html[Endpoint documentation] [source,ts] ---- -client.security.authenticate() +client.nodes.clearRepositoriesMeteringArchive(...) ---- -link:{ref}/security-api-authenticate.html[Documentation] + - -[discrete] -=== security.changePassword +==== get_repositories_metering_info +Returns cluster repositories metering information. +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html[Endpoint documentation] [source,ts] ---- -client.security.changePassword({ - username: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) +client.nodes.getRepositoriesMeteringInfo(...) ---- -link:{ref}/security-api-change-password.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the user to change the password for -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== hot_threads +Returns information about hot threads on each node in the cluster. -|`body` -|`object` - the new password for the user - -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html[Endpoint documentation] +[source,ts] +---- +client.nodes.hotThreads(...) +---- -[discrete] -=== security.clearApiKeyCache +==== info +Returns information about nodes in the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html[Endpoint documentation] [source,ts] ---- -client.security.clearApiKeyCache({ - ids: string | string[] -}) +client.nodes.info(...) ---- -link:{ref}/security-api-clear-api-key-cache.html[Documentation] + -[cols=2*] -|=== -|`ids` -|`string \| string[]` - A comma-separated list of IDs of API keys to clear from the cache -|=== - -[discrete] -=== security.clearCachedPrivileges +==== reload_secure_settings +Reloads secure settings. +https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings[Endpoint documentation] [source,ts] ---- -client.security.clearCachedPrivileges({ - application: string | string[] -}) +client.nodes.reloadSecureSettings(...) ---- -link:{ref}/security-api-clear-privilege-cache.html[Documentation] + -[cols=2*] -|=== -|`application` -|`string \| string[]` - A comma-separated list of application names - -|=== -[discrete] -=== security.clearCachedRealms +==== stats +Returns statistical information about nodes in the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html[Endpoint documentation] [source,ts] ---- -client.security.clearCachedRealms({ - realms: string | string[], - usernames: string | string[] -}) +client.nodes.stats(...) ---- -link:{ref}/security-api-clear-cache.html[Documentation] + -[cols=2*] -|=== -|`realms` -|`string \| string[]` - Comma-separated list of realms to clear -|`usernames` -|`string \| string[]` - Comma-separated list of usernames to clear from the cache +==== usage +Returns low-level information about REST actions usage on nodes. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html[Endpoint documentation] +[source,ts] +---- +client.nodes.usage(...) +---- -[discrete] -=== security.clearCachedRoles +=== rollup +==== delete_job +Deletes an existing rollup job. +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html[Endpoint documentation] [source,ts] ---- -client.security.clearCachedRoles({ - name: string | string[] -}) +client.rollup.deleteJob(...) ---- -link:{ref}/security-api-clear-role-cache.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - Role name -|=== +==== get_jobs +Retrieves the configuration, stats, and status of rollup jobs. -[discrete] -=== security.clearCachedServiceTokens -*Stability:* beta +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html[Endpoint documentation] [source,ts] ---- -client.security.clearCachedServiceTokens({ - namespace: string, - service: string, - name: string | string[] -}) +client.rollup.getJobs(...) ---- -link:{ref}/security-api-clear-service-token-caches.html[Documentation] + -[cols=2*] -|=== -|`namespace` -|`string` - An identifier for the namespace - -|`service` -|`string` - An identifier for the service name -|`name` -|`string \| string[]` - A comma-separated list of service token names +==== get_rollup_caps +Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html[Endpoint documentation] +[source,ts] +---- +client.rollup.getRollupCaps(...) +---- -[discrete] -=== security.createApiKey +==== get_rollup_index_caps +Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html[Endpoint documentation] [source,ts] ---- -client.security.createApiKey({ - refresh: 'true' | 'false' | 'wait_for', - body: object -}) +client.rollup.getRollupIndexCaps(...) ---- -link:{ref}/security-api-create-api-key.html[Documentation] + -[cols=2*] -|=== -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The api key request to create an API key -|=== +==== put_job +Creates a rollup job. -[discrete] -=== security.createServiceToken -*Stability:* beta +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html[Endpoint documentation] [source,ts] ---- -client.security.createServiceToken({ - namespace: string, - service: string, - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.rollup.putJob(...) ---- -link:{ref}/security-api-create-service-token.html[Documentation] + -[cols=2*] -|=== -|`namespace` -|`string` - An identifier for the namespace -|`service` -|`string` - An identifier for the service name +==== rollup +Rollup an index -|`name` -|`string` - An identifier for the token name +https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html[Endpoint documentation] +[source,ts] +---- +client.rollup.rollup(...) +---- -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== rollup_search +Enables searching rolled-up data using the standard query DSL. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html[Endpoint documentation] +[source,ts] +---- +client.rollup.rollupSearch(...) +---- -[discrete] -=== security.deletePrivileges +==== start_job +Starts an existing, stopped rollup job. +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html[Endpoint documentation] [source,ts] ---- -client.security.deletePrivileges({ - application: string, - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.rollup.startJob(...) ---- -link:{ref}/security-api-delete-privilege.html[Documentation] + -[cols=2*] -|=== -|`application` -|`string` - Application name -|`name` -|`string` - Privilege name +==== stop_job +Stops an existing, started rollup job. -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html[Endpoint documentation] +[source,ts] +---- +client.rollup.stopJob(...) +---- -[discrete] -=== security.deleteRole +=== searchable_snapshots +==== cache_stats +Retrieve node-level cache statistics about searchable snapshots. +https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- -client.security.deleteRole({ - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.searchableSnapshots.cacheStats(...) ---- -link:{ref}/security-api-delete-role.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role name -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== clear_cache +Clear the cache of searchable snapshots. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html[Endpoint documentation] +[source,ts] +---- +client.searchableSnapshots.clearCache(...) +---- -[discrete] -=== security.deleteRoleMapping +==== mount +Mount a snapshot as a searchable index. +https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] [source,ts] ---- -client.security.deleteRoleMapping({ - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.searchableSnapshots.mount(...) ---- -link:{ref}/security-api-delete-role-mapping.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role-mapping name -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== stats +Retrieve shard-level statistics about searchable snapshots. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html[Endpoint documentation] +[source,ts] +---- +client.searchableSnapshots.stats(...) +---- -[discrete] -=== security.deleteServiceToken -*Stability:* beta +=== security +==== activate_user_profile +Creates or updates the user profile on behalf of another user. [source,ts] ---- -client.security.deleteServiceToken({ - namespace: string, - service: string, - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.security.activateUserProfile(...) ---- -link:{ref}/security-api-delete-service-token.html[Documentation] + -[cols=2*] -|=== -|`namespace` -|`string` - An identifier for the namespace -|`service` -|`string` - An identifier for the service name +==== authenticate +Enables authentication as a user and retrieve information about the authenticated user. -|`name` -|`string` - An identifier for the token name +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html[Endpoint documentation] +[source,ts] +---- +client.security.authenticate(...) +---- -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== change_password +Changes the passwords of users in the native realm and built-in users. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html[Endpoint documentation] +[source,ts] +---- +client.security.changePassword(...) +---- -[discrete] -=== security.deleteUser +==== clear_api_key_cache +Clear a subset or all entries from the API key cache. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-api-key-cache.html[Endpoint documentation] [source,ts] ---- -client.security.deleteUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.security.clearApiKeyCache(...) ---- -link:{ref}/security-api-delete-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - username -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== clear_cached_privileges +Evicts application privileges from the native application privileges cache. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html[Endpoint documentation] +[source,ts] +---- +client.security.clearCachedPrivileges(...) +---- -[discrete] -=== security.disableUser +==== clear_cached_realms +Evicts users from the user cache. Can completely clear the cache or evict specific users. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html[Endpoint documentation] [source,ts] ---- -client.security.disableUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.security.clearCachedRealms(...) ---- -link:{ref}/security-api-disable-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the user to disable -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== clear_cached_roles +Evicts roles from the native role cache. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html[Endpoint documentation] +[source,ts] +---- +client.security.clearCachedRoles(...) +---- -[discrete] -=== security.enableUser +==== clear_cached_service_tokens +Evicts tokens from the service account token caches. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html[Endpoint documentation] [source,ts] ---- -client.security.enableUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for' -}) +client.security.clearCachedServiceTokens(...) ---- -link:{ref}/security-api-enable-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the user to enable -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +==== create_api_key +Creates an API key for access without requiring basic authentication. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html[Endpoint documentation] +[source,ts] +---- +client.security.createApiKey(...) +---- -[discrete] -=== security.enrollKibana +==== create_service_token +Creates a service account token for access without requiring basic authentication. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html[Endpoint documentation] [source,ts] ---- -client.security.enrollKibana() +client.security.createServiceToken(...) ---- -link:{ref}/security-api-kibana-enrollment.html[Documentation] + - -[discrete] -=== security.enrollNode +==== delete_privileges +Removes application privileges. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html[Endpoint documentation] [source,ts] ---- -client.security.enrollNode() +client.security.deletePrivileges(...) ---- -link:{ref}/security-api-node-enrollment.html[Documentation] + +==== delete_role +Removes roles in the native realm. -[discrete] -=== security.getApiKey - +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html[Endpoint documentation] [source,ts] ---- -client.security.getApiKey({ - id: string, - name: string, - username: string, - realm_name: string, - owner: boolean -}) +client.security.deleteRole(...) ---- -link:{ref}/security-api-get-api-key.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - API key id of the API key to be retrieved - -|`name` -|`string` - API key name of the API key to be retrieved -|`username` -|`string` - user name of the user who created this API key to be retrieved +==== delete_role_mapping +Removes role mappings. -|`realm_name` or `realmName` -|`string` - realm name of the user who created this API key to be retrieved +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html[Endpoint documentation] +[source,ts] +---- +client.security.deleteRoleMapping(...) +---- -|`owner` -|`boolean` - flag to query API keys owned by the currently authenticated user +==== delete_service_token +Deletes a service account token. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html[Endpoint documentation] +[source,ts] +---- +client.security.deleteServiceToken(...) +---- -[discrete] -=== security.getBuiltinPrivileges +==== delete_user +Deletes users from the native realm. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html[Endpoint documentation] [source,ts] ---- -client.security.getBuiltinPrivileges() +client.security.deleteUser(...) ---- -link:{ref}/security-api-get-builtin-privileges.html[Documentation] + +==== disable_user +Disables users in the native realm. -[discrete] -=== security.getPrivileges - +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html[Endpoint documentation] [source,ts] ---- -client.security.getPrivileges({ - application: string, - name: string -}) +client.security.disableUser(...) ---- -link:{ref}/security-api-get-privileges.html[Documentation] + -[cols=2*] -|=== -|`application` -|`string` - Application name - -|`name` -|`string` - Privilege name -|=== - -[discrete] -=== security.getRole +==== enable_user +Enables users in the native realm. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html[Endpoint documentation] [source,ts] ---- -client.security.getRole({ - name: string | string[] -}) +client.security.enableUser(...) ---- -link:{ref}/security-api-get-role.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of role names - -|=== -[discrete] -=== security.getRoleMapping +==== enroll_kibana +Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html[Endpoint documentation] [source,ts] ---- -client.security.getRoleMapping({ - name: string | string[] -}) +client.security.enrollKibana(...) ---- -link:{ref}/security-api-get-role-mapping.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of role-mapping names -|=== +==== enroll_node +Allows a new node to enroll to an existing cluster with security enabled. -[discrete] -=== security.getServiceAccounts -*Stability:* beta +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html[Endpoint documentation] [source,ts] ---- -client.security.getServiceAccounts({ - namespace: string, - service: string -}) +client.security.enrollNode(...) ---- -link:{ref}/security-api-get-service-accounts.html[Documentation] + -[cols=2*] -|=== -|`namespace` -|`string` - An identifier for the namespace -|`service` -|`string` - An identifier for the service name +==== get_api_key +Retrieves information for one or more API keys. -|=== - -[discrete] -=== security.getServiceCredentials -*Stability:* beta +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.getServiceCredentials({ - namespace: string, - service: string -}) +client.security.getApiKey(...) ---- -link:{ref}/security-api-get-service-credentials.html[Documentation] + -[cols=2*] -|=== -|`namespace` -|`string` - An identifier for the namespace - -|`service` -|`string` - An identifier for the service name -|=== - -[discrete] -=== security.getToken +==== get_builtin_privileges +Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.getToken({ - body: object -}) +client.security.getBuiltinPrivileges(...) ---- -link:{ref}/security-api-get-token.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The token request to get - -|=== -[discrete] -=== security.getUser +==== get_privileges +Retrieves application privileges. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.getUser({ - username: string | string[] -}) +client.security.getPrivileges(...) ---- -link:{ref}/security-api-get-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string \| string[]` - A comma-separated list of usernames -|=== - -[discrete] -=== security.getUserPrivileges +==== get_role +Retrieves roles in the native realm. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html[Endpoint documentation] [source,ts] ---- -client.security.getUserPrivileges() +client.security.getRole(...) ---- -link:{ref}/security-api-get-privileges.html[Documentation] + - -[discrete] -=== security.grantApiKey +==== get_role_mapping +Retrieves role mappings. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html[Endpoint documentation] [source,ts] ---- -client.security.grantApiKey({ - refresh: 'true' | 'false' | 'wait_for', - body: object -}) +client.security.getRoleMapping(...) ---- -link:{ref}/security-api-grant-api-key.html[Documentation] + -[cols=2*] -|=== -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The api key request to create an API key - -|=== -[discrete] -=== security.hasPrivileges +==== get_service_accounts +Retrieves information about service accounts. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html[Endpoint documentation] [source,ts] ---- -client.security.hasPrivileges({ - user: string, - body: object -}) +client.security.getServiceAccounts(...) ---- -link:{ref}/security-api-has-privileges.html[Documentation] + -[cols=2*] -|=== -|`user` -|`string` - Username -|`body` -|`object` - The privileges to test - -|=== - -[discrete] -=== security.invalidateApiKey +==== get_service_credentials +Retrieves information of all service credentials for a service account. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html[Endpoint documentation] [source,ts] ---- -client.security.invalidateApiKey({ - body: object -}) +client.security.getServiceCredentials(...) ---- -link:{ref}/security-api-invalidate-api-key.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The api key request to invalidate API key(s) -|=== - -[discrete] -=== security.invalidateToken +==== get_token +Creates a bearer token for access without requiring basic authentication. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html[Endpoint documentation] [source,ts] ---- -client.security.invalidateToken({ - body: object -}) +client.security.getToken(...) ---- -link:{ref}/security-api-invalidate-token.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The token to invalidate - -|=== -[discrete] -=== security.putPrivileges +==== get_user +Retrieves information about users in the native realm and built-in users. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html[Endpoint documentation] [source,ts] ---- -client.security.putPrivileges({ - refresh: 'true' | 'false' | 'wait_for', - body: object -}) +client.security.getUser(...) ---- -link:{ref}/security-api-put-privileges.html[Documentation] + -[cols=2*] -|=== -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -|`body` -|`object` - The privilege(s) to add - -|=== - -[discrete] -=== security.putRole +==== get_user_privileges +Retrieves security privileges for the logged in user. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.putRole({ - name: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) +client.security.getUserPrivileges(...) ---- -link:{ref}/security-api-put-role.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role name - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The role to add - -|=== - -[discrete] -=== security.putRoleMapping +==== get_user_profile +Retrieves user profile for the given unique ID. [source,ts] ---- -client.security.putRoleMapping({ - name: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) +client.security.getUserProfile(...) ---- -link:{ref}/security-api-put-role-mapping.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role-mapping name - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The role mapping to add -|=== - -[discrete] -=== security.putUser +==== grant_api_key +Creates an API key on behalf of another user. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-grant-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.putUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) +client.security.grantApiKey(...) ---- -link:{ref}/security-api-put-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the User - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The user to add - -|=== -[discrete] -=== security.queryApiKeys +==== has_privileges +Determines whether the specified user has a specified list of privileges. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.queryApiKeys({ - body: object -}) +client.security.hasPrivileges(...) ---- -link:{ref}/security-api-query-api-key.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - From, size, query, sort and search_after -|=== - -[discrete] -=== security.samlAuthenticate +==== invalidate_api_key +Invalidates one or more API keys. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.samlAuthenticate({ - body: object -}) +client.security.invalidateApiKey(...) ---- -link:{ref}/security-api-saml-authenticate.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The SAML response to authenticate - -|=== -[discrete] -=== security.samlCompleteLogout +==== invalidate_token +Invalidates one or more access tokens or refresh tokens. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html[Endpoint documentation] [source,ts] ---- -client.security.samlCompleteLogout({ - body: object -}) +client.security.invalidateToken(...) ---- -link:{ref}/security-api-saml-complete-logout.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The logout response to verify -|=== - -[discrete] -=== security.samlInvalidate +==== oidc_authenticate +Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html[Endpoint documentation] [source,ts] ---- -client.security.samlInvalidate({ - body: object -}) +client.security.oidcAuthenticate(...) ---- -link:{ref}/security-api-saml-invalidate.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The LogoutRequest message - -|=== -[discrete] -=== security.samlLogout +==== oidc_logout +Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html[Endpoint documentation] [source,ts] ---- -client.security.samlLogout({ - body: object -}) +client.security.oidcLogout(...) ---- -link:{ref}/security-api-saml-logout.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The tokens to invalidate -|=== - -[discrete] -=== security.samlPrepareAuthentication +==== oidc_prepare_authentication +Creates an OAuth 2.0 authentication request as a URL string +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html[Endpoint documentation] [source,ts] ---- -client.security.samlPrepareAuthentication({ - body: object -}) +client.security.oidcPrepareAuthentication(...) ---- -link:{ref}/security-api-saml-prepare-authentication.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The realm for which to create the authentication request, identified by either its name or the ACS URL - -|=== -[discrete] -=== security.samlServiceProviderMetadata +==== put_privileges +Adds or updates application privileges. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.samlServiceProviderMetadata({ - realm_name: string -}) +client.security.putPrivileges(...) ---- -link:{ref}/security-api-saml-sp-metadata.html[Documentation] + -[cols=2*] -|=== -|`realm_name` or `realmName` -|`string` - The name of the SAML realm to get the metadata for -|=== +==== put_role +Adds and updates roles in the native realm. -[discrete] -=== shutdown.deleteNode -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html[Endpoint documentation] [source,ts] ---- -client.shutdown.deleteNode({ - node_id: string -}) +client.security.putRole(...) ---- -link:https://www.elastic.co/guide/en/elasticsearch/reference/current[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string` - The node id of node to be removed from the shutdown state -|=== +==== put_role_mapping +Creates and updates role mappings. -[discrete] -=== shutdown.getNode -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html[Endpoint documentation] [source,ts] ---- -client.shutdown.getNode({ - node_id: string -}) +client.security.putRoleMapping(...) ---- -link:https://www.elastic.co/guide/en/elasticsearch/reference/current[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string` - Which node for which to retrieve the shutdown status -|=== +==== put_user +Adds and updates users in the native realm. These users are commonly referred to as native users. -[discrete] -=== shutdown.putNode -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html[Endpoint documentation] [source,ts] ---- -client.shutdown.putNode({ - node_id: string, - body: object -}) +client.security.putUser(...) ---- -link:https://www.elastic.co/guide/en/elasticsearch/reference/current[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string` - The node id of node to be shut down - -|`body` -|`object` - The shutdown type definition to register - -|=== -[discrete] -=== slm.deleteLifecycle +==== query_api_keys +Retrieves information for API keys using a subset of query DSL +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html[Endpoint documentation] [source,ts] ---- -client.slm.deleteLifecycle({ - policy_id: string -}) +client.security.queryApiKeys(...) ---- -link:{ref}/slm-api-delete-policy.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string` - The id of the snapshot lifecycle policy to remove -|=== - -[discrete] -=== slm.executeLifecycle +==== saml_authenticate +Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html[Endpoint documentation] [source,ts] ---- -client.slm.executeLifecycle({ - policy_id: string -}) +client.security.samlAuthenticate(...) ---- -link:{ref}/slm-api-execute-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string` - The id of the snapshot lifecycle policy to be executed - -|=== -[discrete] -=== slm.executeRetention +==== saml_complete_logout +Verifies the logout response sent from the SAML IdP +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html[Endpoint documentation] [source,ts] ---- -client.slm.executeRetention() +client.security.samlCompleteLogout(...) ---- -link:{ref}/slm-api-execute-retention.html[Documentation] + +==== saml_invalidate +Consumes a SAML LogoutRequest -[discrete] -=== slm.getLifecycle - +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html[Endpoint documentation] [source,ts] ---- -client.slm.getLifecycle({ - policy_id: string | string[] -}) +client.security.samlInvalidate(...) ---- -link:{ref}/slm-api-get-policy.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string \| string[]` - Comma-separated list of snapshot lifecycle policies to retrieve - -|=== -[discrete] -=== slm.getStats +==== saml_logout +Invalidates an access token and a refresh token that were generated via the SAML Authenticate API +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html[Endpoint documentation] [source,ts] ---- -client.slm.getStats() +client.security.samlLogout(...) ---- -link:{ref}/slm-api-get-stats.html[Documentation] + +==== saml_prepare_authentication +Creates a SAML authentication request -[discrete] -=== slm.getStatus - +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html[Endpoint documentation] [source,ts] ---- -client.slm.getStatus() +client.security.samlPrepareAuthentication(...) ---- -link:{ref}/slm-api-get-status.html[Documentation] + - -[discrete] -=== slm.putLifecycle +==== saml_service_provider_metadata +Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html[Endpoint documentation] [source,ts] ---- -client.slm.putLifecycle({ - policy_id: string, - body: object -}) +client.security.samlServiceProviderMetadata(...) ---- -link:{ref}/slm-api-put-policy.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string` - The id of the snapshot lifecycle policy - -|`body` -|`object` - The snapshot lifecycle policy definition to register - -|=== - -[discrete] -=== slm.start +==== update_user_profile_data +Update application specific data for the user profile of the given unique ID. [source,ts] ---- -client.slm.start() +client.security.updateUserProfileData(...) ---- -link:{ref}/slm-api-start.html[Documentation] + - -[discrete] -=== slm.stop +=== slm +==== delete_lifecycle +Deletes an existing snapshot lifecycle policy. +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html[Endpoint documentation] [source,ts] ---- -client.slm.stop() +client.slm.deleteLifecycle(...) ---- -link:{ref}/slm-api-stop.html[Documentation] + +==== execute_lifecycle +Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. -[discrete] -=== snapshot.cleanupRepository - +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.snapshot.cleanupRepository({ - repository: string, - master_timeout: string, - timeout: string -}) +client.slm.executeLifecycle(...) ---- -link:{ref}/clean-up-snapshot-repo-api.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node -|`timeout` -|`string` - Explicit operation timeout - -|=== - -[discrete] -=== snapshot.clone +==== execute_retention +Deletes any snapshots that are expired according to the policy's retention rules. +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html[Endpoint documentation] [source,ts] ---- -client.snapshot.clone({ - repository: string, - snapshot: string, - target_snapshot: string, - master_timeout: string, - body: object -}) +client.slm.executeRetention(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string` - The name of the snapshot to clone from - -|`target_snapshot` or `targetSnapshot` -|`string` - The name of the cloned snapshot to create -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`body` -|`object` - The snapshot clone definition - -|=== - -[discrete] -=== snapshot.create +==== get_lifecycle +Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html[Endpoint documentation] [source,ts] ---- -client.snapshot.create({ - repository: string, - snapshot: string, - master_timeout: string, - wait_for_completion: boolean, - body: object -}) +client.slm.getLifecycle(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string` - A snapshot name -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning - -|`body` -|`object` - The snapshot definition - -|=== - -[discrete] -=== snapshot.createRepository +==== get_stats +Returns global and policy-level statistics about actions taken by snapshot lifecycle management. +https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html[Endpoint documentation] [source,ts] ---- -client.snapshot.createRepository({ - repository: string, - master_timeout: string, - timeout: string, - verify: boolean, - body: object -}) +client.slm.getStats(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`verify` -|`boolean` - Whether to verify the repository after creation - -|`body` -|`object` - The repository definition -|=== - -[discrete] -=== snapshot.delete +==== get_status +Retrieves the status of snapshot lifecycle management (SLM). +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html[Endpoint documentation] [source,ts] ---- -client.snapshot.delete({ - repository: string, - snapshot: string | string[], - master_timeout: string -}) +client.slm.getStatus(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string \| string[]` - A comma-separated list of snapshot names - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|=== -[discrete] -=== snapshot.deleteRepository +==== put_lifecycle +Creates or updates a snapshot lifecycle policy. +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html[Endpoint documentation] [source,ts] ---- -client.snapshot.deleteRepository({ - repository: string | string[], - master_timeout: string, - timeout: string -}) +client.slm.putLifecycle(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string \| string[]` - Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -[discrete] -=== snapshot.get +==== start +Turns on snapshot lifecycle management (SLM). +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html[Endpoint documentation] [source,ts] ---- -client.snapshot.get({ - repository: string, - snapshot: string | string[], - master_timeout: string, - ignore_unavailable: boolean, - index_details: boolean, - include_repository: boolean, - verbose: boolean -}) +client.slm.start(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string \| string[]` - A comma-separated list of snapshot names -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown - -|`index_details` or `indexDetails` -|`boolean` - Whether to include details of each index in the snapshot, if those details are available. Defaults to false. - -|`include_repository` or `includeRepository` -|`boolean` - Whether to include the repository name in the snapshot info. Defaults to true. - -|`verbose` -|`boolean` - Whether to show verbose snapshot info or only show the basic info found in the repository index blob - -|=== - -[discrete] -=== snapshot.getRepository +==== stop +Turns off snapshot lifecycle management (SLM). +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html[Endpoint documentation] [source,ts] ---- -client.snapshot.getRepository({ - repository: string | string[], - master_timeout: string, - local: boolean -}) +client.slm.stop(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string \| string[]` - A comma-separated list of repository names - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) -|=== - -[discrete] -=== snapshot.repositoryAnalyze +=== snapshot +==== cleanup_repository +Removes stale data from repository. +https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- -client.snapshot.repositoryAnalyze({ - repository: string, - blob_count: number, - concurrency: number, - read_node_count: number, - early_read_node_count: number, - seed: number, - rare_action_probability: number, - max_blob_size: string, - max_total_data_size: string, - timeout: string, - detailed: boolean, - rarely_abort_writes: boolean -}) +client.snapshot.cleanupRepository(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`blob_count` or `blobCount` -|`number` - Number of blobs to create during the test. Defaults to 100. - -|`concurrency` -|`number` - Number of operations to run concurrently during the test. Defaults to 10. - -|`read_node_count` or `readNodeCount` -|`number` - Number of nodes on which to read a blob after writing. Defaults to 10. - -|`early_read_node_count` or `earlyReadNodeCount` -|`number` - Number of nodes on which to perform an early read on a blob, i.e. before writing has completed. Early reads are rare actions so the 'rare_action_probability' parameter is also relevant. Defaults to 2. - -|`seed` -|`number` - Seed for the random number generator used to create the test workload. Defaults to a random value. - -|`rare_action_probability` or `rareActionProbability` -|`number` - Probability of taking a rare action such as an early read or an overwrite. Defaults to 0.02. - -|`max_blob_size` or `maxBlobSize` -|`string` - Maximum size of a blob to create during the test, e.g '1gb' or '100mb'. Defaults to '10mb'. -|`max_total_data_size` or `maxTotalDataSize` -|`string` - Maximum total size of all blobs to create during the test, e.g '1tb' or '100gb'. Defaults to '1gb'. - -|`timeout` -|`string` - Explicit operation timeout. Defaults to '30s'. - -|`detailed` -|`boolean` - Whether to return detailed results or a summary. Defaults to 'false' so that only the summary is returned. - -|`rarely_abort_writes` or `rarelyAbortWrites` -|`boolean` - Whether to rarely abort writes before they complete. Defaults to 'true'. - -|=== - -[discrete] -=== snapshot.restore +==== clone +Clones indices from one snapshot into another snapshot in the same repository. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.restore({ - repository: string, - snapshot: string, - master_timeout: string, - wait_for_completion: boolean, - body: object -}) +client.snapshot.clone(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string` - A snapshot name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning -|`body` -|`object` - Details of what to restore - -|=== - -[discrete] -=== snapshot.status +==== create +Creates a snapshot in a repository. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.status({ - repository: string, - snapshot: string | string[], - master_timeout: string, - ignore_unavailable: boolean -}) +client.snapshot.create(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string \| string[]` - A comma-separated list of snapshot names - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown - -|=== - -[discrete] -=== snapshot.verifyRepository +==== create_repository +Creates a repository. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.verifyRepository({ - repository: string, - master_timeout: string, - timeout: string -}) +client.snapshot.createRepository(...) ---- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout -|=== - -[discrete] -=== sql.clearCursor +==== delete +Deletes one or more snapshots. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.sql.clearCursor({ - body: object -}) +client.snapshot.delete(...) ---- -link:{ref}/clear-sql-cursor-api.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - Specify the cursor value in the `cursor` element to clean the cursor. - -|=== -[discrete] -=== sql.deleteAsync +==== delete_repository +Deletes a repository. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.sql.deleteAsync({ - id: string -}) +client.snapshot.deleteRepository(...) ---- -link:{ref}/delete-async-sql-search-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID -|=== - -[discrete] -=== sql.getAsync +==== get +Returns information about a snapshot. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.sql.getAsync({ - id: string, - delimiter: string, - format: string, - keep_alive: string, - wait_for_completion_timeout: string -}) +client.snapshot.get(...) ---- -link:{ref}/get-async-sql-search-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|`delimiter` -|`string` - Separator for CSV results + -_Default:_ `,` - -|`format` -|`string` - Short version of the Accept header, e.g. json, yaml - -|`keep_alive` or `keepAlive` -|`string` - Retention period for the search and its results + -_Default:_ `5d` - -|`wait_for_completion_timeout` or `waitForCompletionTimeout` -|`string` - Duration to wait for complete results - -|=== -[discrete] -=== sql.getAsyncStatus +==== get_repository +Returns information about a repository. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.sql.getAsyncStatus({ - id: string -}) +client.snapshot.getRepository(...) ---- -link:{ref}/get-async-sql-search-status-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID -|=== - -[discrete] -=== sql.query +==== repository_analyze +Analyzes a repository for correctness and performance +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.sql.query({ - format: string, - body: object -}) +client.snapshot.repositoryAnalyze(...) ---- -link:{ref}/sql-search-api.html[Documentation] + -{jsclient}/sql_query_examples.html[Code Example] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`body` -|`object` - Use the `query` element to start a query. Use the `cursor` element to continue a query. -|=== - -[discrete] -=== sql.translate +==== restore +Restores a snapshot. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.sql.translate({ - body: object -}) +client.snapshot.restore(...) ---- -link:{ref}/sql-translate-api.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - Specify the query in the `query` element. - -|=== -[discrete] -=== ssl.certificates +==== status +Returns information about the status of a snapshot. +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.ssl.certificates() +client.snapshot.status(...) ---- -link:{ref}/security-api-ssl.html[Documentation] + +==== verify_repository +Verifies a repository. -[discrete] -=== tasks.cancel -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.tasks.cancel({ - task_id: string, - nodes: string | string[], - actions: string | string[], - parent_task_id: string, - wait_for_completion: boolean -}) +client.snapshot.verifyRepository(...) ---- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - Cancel the task with specified task id (node_id:task_number) - -|`nodes` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`actions` -|`string \| string[]` - A comma-separated list of actions that should be cancelled. Leave empty to cancel all. - -|`parent_task_id` or `parentTaskId` -|`string` - Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request block until the cancellation of the task and its descendant tasks is completed. Defaults to false -|=== +=== sql +==== clear_cursor +Clears the SQL cursor -[discrete] -=== tasks.get -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html[Endpoint documentation] [source,ts] ---- -client.tasks.get({ - task_id: string, - wait_for_completion: boolean, - timeout: string -}) +client.sql.clearCursor(...) ---- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - Return the task with specified id (node_id:task_number) -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Wait for the matching tasks to complete (default: false) +==== delete_async +Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. -|`timeout` -|`string` - Explicit operation timeout - -|=== - -[discrete] -=== tasks.list -*Stability:* experimental +https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- -client.tasks.list({ - nodes: string | string[], - actions: string | string[], - detailed: boolean, - parent_task_id: string, - wait_for_completion: boolean, - group_by: 'nodes' | 'parents' | 'none', - timeout: string -}) +client.sql.deleteAsync(...) ---- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`nodes` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`actions` -|`string \| string[]` - A comma-separated list of actions that should be returned. Leave empty to return all. - -|`detailed` -|`boolean` - Return detailed task information (default: false) - -|`parent_task_id` or `parentTaskId` -|`string` - Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all. - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Wait for the matching tasks to complete (default: false) - -|`group_by` or `groupBy` -|`'nodes' \| 'parents' \| 'none'` - Group tasks by nodes or parent/child relationships + -_Default:_ `nodes` - -|`timeout` -|`string` - Explicit operation timeout -|=== +==== get_async +Returns the current status and available results for an async SQL search or stored synchronous SQL search -[discrete] -=== termsEnum -*Stability:* beta +https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- -client.termsEnum({ - index: string | string[], - body: object -}) +client.sql.getAsync(...) ---- -link:{ref}/search-terms-enum.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices -|`body` -|`object` - field name, string which is the prefix expected in matching terms, timeout and size for max number of results - -|=== - -[discrete] -=== termvectors +==== get_async_status +Returns the current status of an async SQL search or a stored synchronous SQL search +https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-status-api.html[Endpoint documentation] [source,ts] ---- -client.termvectors({ - index: string, - id: string, - term_statistics: boolean, - field_statistics: boolean, - fields: string | string[], - offsets: boolean, - positions: boolean, - payloads: boolean, - preference: string, - routing: string, - realtime: boolean, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - body: object -}) ----- -link:{ref}/docs-termvectors.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The index in which the document resides. - -|`id` -|`string` - The id of the document, when not specified a doc param should be supplied. - -|`term_statistics` or `termStatistics` -|`boolean` - Specifies if total term frequency and document frequency should be returned. - -|`field_statistics` or `fieldStatistics` -|`boolean` - Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. + -_Default:_ `true` - -|`fields` -|`string \| string[]` - A comma-separated list of fields to return. - -|`offsets` -|`boolean` - Specifies if term offsets should be returned. + -_Default:_ `true` - -|`positions` -|`boolean` - Specifies if term positions should be returned. + -_Default:_ `true` - -|`payloads` -|`boolean` - Specifies if term payloads should be returned. + -_Default:_ `true` - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random). - -|`routing` -|`string` - Specific routing value. - -|`realtime` -|`boolean` - Specifies if request is real-time as opposed to near-real-time (default: true). - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|`body` -|`object` - Define parameters and or supply a document to get termvectors for. See documentation. - -|=== +client.sql.getAsyncStatus(...) +---- -[discrete] -=== textStructure.findStructure +==== query +Executes a SQL request +https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html[Endpoint documentation] [source,ts] ---- -client.textStructure.findStructure({ - lines_to_sample: number, - line_merge_size_limit: number, - timeout: string, - charset: string, - format: 'ndjson' | 'xml' | 'delimited' | 'semi_structured_text', - has_header_row: boolean, - column_names: string | string[], - delimiter: string, - quote: string, - should_trim_fields: boolean, - grok_pattern: string, - timestamp_field: string, - timestamp_format: string, - explain: boolean, - body: object -}) +client.sql.query(...) ---- -link:{ref}/find-structure.html[Documentation] + -[cols=2*] -|=== -|`lines_to_sample` or `linesToSample` -|`number` - How many lines of the file should be included in the analysis + -_Default:_ `1000` - -|`line_merge_size_limit` or `lineMergeSizeLimit` -|`number` - Maximum number of characters permitted in a single message when lines are merged to create messages. + -_Default:_ `10000` - -|`timeout` -|`string` - Timeout after which the analysis will be aborted + -_Default:_ `25s` - -|`charset` -|`string` - Optional parameter to specify the character set of the file - -|`format` -|`'ndjson' \| 'xml' \| 'delimited' \| 'semi_structured_text'` - Optional parameter to specify the high level file format - -|`has_header_row` or `hasHeaderRow` -|`boolean` - Optional parameter to specify whether a delimited file includes the column names in its first row - -|`column_names` or `columnNames` -|`string \| string[]` - Optional parameter containing a comma separated list of the column names for a delimited file - -|`delimiter` -|`string` - Optional parameter to specify the delimiter character for a delimited file - must be a single character - -|`quote` -|`string` - Optional parameter to specify the quote character for a delimited file - must be a single character - -|`should_trim_fields` or `shouldTrimFields` -|`boolean` - Optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them - -|`grok_pattern` or `grokPattern` -|`string` - Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi-structured text file - -|`timestamp_field` or `timestampField` -|`string` - Optional parameter to specify the timestamp field in the file - -|`timestamp_format` or `timestampFormat` -|`string` - Optional parameter to specify the timestamp format in the file - may be either a Joda or Java time format - -|`explain` -|`boolean` - Whether to include a commentary on how the structure was derived - -|`body` -|`object` - The contents of the file to be analyzed -|=== - -[discrete] -=== transform.deleteTransform +==== translate +Translates SQL into Elasticsearch queries +https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html[Endpoint documentation] [source,ts] ---- -client.transform.deleteTransform({ - transform_id: string, - force: boolean -}) +client.sql.translate(...) ---- -link:{ref}/delete-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to delete - -|`force` -|`boolean` - When `true`, the transform is deleted regardless of its current state. The default value is `false`, meaning that the transform must be `stopped` before it can be deleted. -|=== - -[discrete] -=== transform.getTransform +=== ssl +==== certificates +Retrieves information about the X.509 certificates used to encrypt communications in the cluster. +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html[Endpoint documentation] [source,ts] ---- -client.transform.getTransform({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean, - exclude_generated: boolean -}) +client.ssl.certificates(...) ---- -link:{ref}/get-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id or comma delimited list of id expressions of the transforms to get, '_all' or '*' implies get all transforms - -|`from` -|`number` - skips a number of transform configs, defaults to 0 - -|`size` -|`number` - specifies a max number of transforms to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|`exclude_generated` or `excludeGenerated` -|`boolean` - Omits fields that are illegal to set on transform PUT - -|=== -[discrete] -=== transform.getTransformStats +=== tasks +==== cancel +Cancels a task, if it can be cancelled through an API. +https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] [source,ts] ---- -client.transform.getTransformStats({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean -}) +client.tasks.cancel(...) ---- -link:{ref}/get-transform-stats.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform for which to get stats. '_all' or '*' implies all transforms -|`from` -|`number` - skips a number of transform stats, defaults to 0 - -|`size` -|`number` - specifies a max number of transform stats to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|=== - -[discrete] -=== transform.previewTransform +==== get +Returns information about a task. +https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] [source,ts] ---- -client.transform.previewTransform({ - body: object -}) +client.tasks.get(...) ---- -link:{ref}/preview-transform.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The definition for the transform to preview -|=== - -[discrete] -=== transform.putTransform +==== list +Returns a list of tasks. +https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] [source,ts] ---- -client.transform.putTransform({ - transform_id: string, - defer_validation: boolean, - body: object -}) +client.tasks.list(...) ---- -link:{ref}/put-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the new transform. - -|`defer_validation` or `deferValidation` -|`boolean` - If validations should be deferred until transform starts, defaults to false. - -|`body` -|`object` - The transform definition - -|=== -[discrete] -=== transform.startTransform +=== text_structure +==== find_structure +Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. +https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html[Endpoint documentation] [source,ts] ---- -client.transform.startTransform({ - transform_id: string, - timeout: string -}) +client.textStructure.findStructure(...) ---- -link:{ref}/start-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to start -|`timeout` -|`string` - Controls the time to wait for the transform to start - -|=== - -[discrete] -=== transform.stopTransform +=== transform +==== delete_transform +Deletes an existing transform. +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.stopTransform({ - transform_id: string, - force: boolean, - wait_for_completion: boolean, - timeout: string, - allow_no_match: boolean, - wait_for_checkpoint: boolean -}) +client.transform.deleteTransform(...) ---- -link:{ref}/stop-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to stop - -|`force` -|`boolean` - Whether to force stop a failed transform or not. Default to false - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Whether to wait for the transform to fully stop before returning or not. Default to false - -|`timeout` -|`string` - Controls the time to wait until the transform has stopped. Default to 30 seconds - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|`wait_for_checkpoint` or `waitForCheckpoint` -|`boolean` - Whether to wait for the transform to reach a checkpoint before stopping. Default to false - -|=== -[discrete] -=== transform.updateTransform +==== get_transform +Retrieves configuration information for transforms. +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.updateTransform({ - transform_id: string, - defer_validation: boolean, - body: object -}) +client.transform.getTransform(...) ---- -link:{ref}/update-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform. -|`defer_validation` or `deferValidation` -|`boolean` - If validations should be deferred until transform starts, defaults to false. - -|`body` -|`object` - The update transform definition - -|=== - -[discrete] -=== update +==== get_transform_stats +Retrieves usage information for transforms. +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html[Endpoint documentation] [source,ts] ---- -client.update({ - id: string, - index: string, - type: string, - wait_for_active_shards: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - lang: string, - refresh: 'true' | 'false' | 'wait_for', - retry_on_conflict: number, - routing: string, - timeout: string, - if_seq_no: number, - if_primary_term: number, - require_alias: boolean, - body: object -}) ----- -link:{ref}/docs-update.html[Documentation] + -{jsclient}/update_examples.html[Code Example] + -[cols=2*] -|=== -|`id` -|`string` - Document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document + - -WARNING: This parameter has been deprecated. - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the update operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`lang` -|`string` - The script language (default: painless) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`retry_on_conflict` or `retryOnConflict` -|`number` - Specify how many times should the operation be retried when a conflict occurs (default: 0) - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`if_seq_no` or `ifSeqNo` -|`number` - only perform the update operation if the last operation that has changed the document has the specified sequence number - -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only perform the update operation if the last operation that has changed the document has the specified primary term - -|`require_alias` or `requireAlias` -|`boolean` - When true, requires destination is an alias. Default is false - -|`body` -|`object` - The request definition requires either `script` or partial `doc` - -|=== - -[discrete] -=== updateByQuery - -[source,ts] ----- -client.updateByQuery({ - index: string | string[], - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - from: number, - ignore_unavailable: boolean, - allow_no_indices: boolean, - conflicts: 'abort' | 'proceed', - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - pipeline: string, - preference: string, - q: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - search_timeout: string, - max_docs: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - version: boolean, - version_type: boolean, - request_cache: boolean, - refresh: boolean, - timeout: string, - wait_for_active_shards: string, - scroll_size: number, - wait_for_completion: boolean, - requests_per_second: number, - slices: number|string, - body: object -}) +client.transform.getTransformStats(...) ---- -link:{ref}/docs-update-by-query.html[Documentation] + -{jsclient}/update_by_query_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`conflicts` -|`'abort' \| 'proceed'` - What to do when the update by query hits version conflicts? + -_Default:_ `abort` - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`pipeline` -|`string` - Ingest pipeline to set on index requests made by this action. (default: none) - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`search_timeout` or `searchTimeout` -|`string` - Explicit timeout for each search request. Defaults to no timeout. - -|`max_docs` or `maxDocs` -|`number` - Maximum number of documents to process (default: all documents) - -|`sort` -|`string \| string[]` - A comma-separated list of : pairs - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes - -|`version` -|`boolean` - Specify whether to return document version as part of a hit - -|`version_type` or `versionType` -|`boolean` - Should the document increment the version number (internal) on hit or not (reindex) - -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to index level setting - -|`refresh` -|`boolean` - Should the affected indexes be refreshed? - -|`timeout` -|`string` - Time each individual bulk request should wait for shards that are unavailable. + -_Default:_ `1m` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`scroll_size` or `scrollSize` -|`number` - Size on the scroll request powering the update by query + -_Default:_ `100` - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the update by query operation is complete. + -_Default:_ `true` - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in sub-requests per second. -1 means no throttle. - -|`slices` -|`number\|string` - The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. + -_Default:_ `1` - -|`body` -|`object` - The search definition using the Query DSL - -|=== - -[discrete] -=== updateByQueryRethrottle +==== preview_transform +Previews a transform. +https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html[Endpoint documentation] [source,ts] ---- -client.updateByQueryRethrottle({ - task_id: string, - requests_per_second: number -}) +client.transform.previewTransform(...) ---- -link:{ref}/docs-update-by-query.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - The task id to rethrottle -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. - -|=== - -[discrete] -=== watcher.ackWatch +==== put_transform +Instantiates a transform. +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html[Endpoint documentation] [source,ts] ---- -client.watcher.ackWatch({ - watch_id: string, - action_id: string | string[] -}) +client.transform.putTransform(...) ---- -link:{ref}/watcher-api-ack-watch.html[Documentation] + -[cols=2*] -|=== -|`watch_id` or `watchId` -|`string` - Watch ID - -|`action_id` or `actionId` -|`string \| string[]` - A comma-separated list of the action ids to be acked - -|=== - -[discrete] -=== watcher.activateWatch +==== reset_transform +Resets an existing transform. [source,ts] ---- -client.watcher.activateWatch({ - watch_id: string -}) +client.transform.resetTransform(...) ---- -link:{ref}/watcher-api-activate-watch.html[Documentation] + -[cols=2*] -|=== -|`watch_id` or `watchId` -|`string` - Watch ID - -|=== -[discrete] -=== watcher.deactivateWatch +==== start_transform +Starts one or more transforms. +https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html[Endpoint documentation] [source,ts] ---- -client.watcher.deactivateWatch({ - watch_id: string -}) +client.transform.startTransform(...) ---- -link:{ref}/watcher-api-deactivate-watch.html[Documentation] + -[cols=2*] -|=== -|`watch_id` or `watchId` -|`string` - Watch ID -|=== - -[discrete] -=== watcher.deleteWatch +==== stop_transform +Stops one or more transforms. +https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html[Endpoint documentation] [source,ts] ---- -client.watcher.deleteWatch({ - id: string -}) +client.transform.stopTransform(...) ---- -link:{ref}/watcher-api-delete-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID - -|=== -[discrete] -=== watcher.executeWatch +==== update_transform +Updates certain properties of a transform. +https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html[Endpoint documentation] [source,ts] ---- -client.watcher.executeWatch({ - id: string, - debug: boolean, - body: object -}) +client.transform.updateTransform(...) ---- -link:{ref}/watcher-api-execute-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID -|`debug` -|`boolean` - indicates whether the watch should execute in debug mode +==== upgrade_transforms +Upgrades all transforms. -|`body` -|`object` - Execution control - -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html[Endpoint documentation] +[source,ts] +---- +client.transform.upgradeTransforms(...) +---- -[discrete] -=== watcher.getWatch +=== watcher +==== ack_watch +Acknowledges a watch, manually throttling the execution of the watch's actions. +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.getWatch({ - id: string -}) +client.watcher.ackWatch(...) ---- -link:{ref}/watcher-api-get-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID -|=== - -[discrete] -=== watcher.putWatch +==== activate_watch +Activates a currently inactive watch. +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.putWatch({ - id: string, - active: boolean, - version: number, - if_seq_no: number, - if_primary_term: number, - body: object -}) +client.watcher.activateWatch(...) ---- -link:{ref}/watcher-api-put-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID -|`active` -|`boolean` - Specify whether the watch is in/active by default +==== deactivate_watch +Deactivates a currently active watch. -|`version` -|`number` - Explicit version number for concurrency control +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html[Endpoint documentation] +[source,ts] +---- +client.watcher.deactivateWatch(...) +---- -|`if_seq_no` or `ifSeqNo` -|`number` - only update the watch if the last operation that has changed the watch has the specified sequence number +==== delete_watch +Removes a watch from Watcher. -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only update the watch if the last operation that has changed the watch has the specified primary term +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html[Endpoint documentation] +[source,ts] +---- +client.watcher.deleteWatch(...) +---- -|`body` -|`object` - The watch +==== execute_watch +Forces the execution of a stored watch. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html[Endpoint documentation] +[source,ts] +---- +client.watcher.executeWatch(...) +---- -[discrete] -=== watcher.queryWatches +==== get_watch +Retrieves a watch by its ID. +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.queryWatches({ - body: object -}) +client.watcher.getWatch(...) ---- -link:{ref}/watcher-api-query-watches.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - From, size, query, sort and search_after -|=== - -[discrete] -=== watcher.start +==== put_watch +Creates a new watch, or updates an existing one. +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.start() +client.watcher.putWatch(...) ---- -link:{ref}/watcher-api-start.html[Documentation] + - -[discrete] -=== watcher.stats +==== query_watches +Retrieves stored watches. +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html[Endpoint documentation] [source,ts] ---- -client.watcher.stats({ - metric: string | string[], - emit_stacktraces: boolean -}) +client.watcher.queryWatches(...) ---- -link:{ref}/watcher-api-stats.html[Documentation] + -[cols=2*] -|=== -|`metric` -|`string \| string[]` - Controls what additional stat metrics should be include in the response -|`emit_stacktraces` or `emitStacktraces` -|`boolean` - Emits stack traces of currently running watches +==== start +Starts Watcher if it is not already running. -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html[Endpoint documentation] +[source,ts] +---- +client.watcher.start(...) +---- -[discrete] -=== watcher.stop +==== stats +Retrieves the current Watcher metrics. +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html[Endpoint documentation] [source,ts] ---- -client.watcher.stop() +client.watcher.stats(...) ---- -link:{ref}/watcher-api-stop.html[Documentation] + - -[discrete] -=== xpack.info +==== stop +Stops Watcher if it is running. +https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html[Endpoint documentation] [source,ts] ---- -client.xpack.info({ - categories: string | string[], - accept_enterprise: boolean -}) +client.watcher.stop(...) ---- -link:{ref}/info-api.html[Documentation] + -[cols=2*] -|=== -|`categories` -|`string \| string[]` - Comma-separated list of info categories. Can be any of: build, license, features -|`accept_enterprise` or `acceptEnterprise` -|`boolean` - If this param is used it must be set to true + +=== xpack +==== info +Retrieves information about the installed X-Pack features. -WARNING: This parameter has been deprecated. - -|=== +https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html[Endpoint documentation] +[source,ts] +---- +client.xpack.info(...) +---- -[discrete] -=== xpack.usage +==== usage +Retrieves usage information about the installed X-Pack features. +https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html[Endpoint documentation] [source,ts] ---- -client.xpack.usage({ - master_timeout: string -}) +client.xpack.usage(...) ---- -link:{ref}/usage-api.html[Documentation] + -[cols=2*] -|=== -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for watch write operation -|=== From af97ece807a7e350753eb68097609e5652df6a81 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 3 Mar 2022 16:10:32 +0100 Subject: [PATCH 153/647] Export SniffingTransport (#1648) --- index.d.ts | 3 ++- index.js | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/index.d.ts b/index.d.ts index 05de61f8d..8fb595c75 100644 --- a/index.d.ts +++ b/index.d.ts @@ -18,7 +18,8 @@ */ import Client from './lib/client' +import SniffingTransport from './lib/sniffingTransport' export * from '@elastic/transport' -export { Client } +export { Client, SniffingTransport } export type { ClientOptions, NodeOptions } from './lib/client' diff --git a/index.js b/index.js index 57068f4b2..0bf3da3da 100644 --- a/index.js +++ b/index.js @@ -35,9 +35,11 @@ const { } = require('@elastic/transport') const { default: Client } = require('./lib/client') +const { default: SniffingTransport } = require('./lib/sniffingTransport') module.exports = { Client, + SniffingTransport, Diagnostic, Transport, WeightedConnectionPool, From 4f1713c894bb1733ad0509cbf6db74a93d093266 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 3 Mar 2022 11:46:45 -0500 Subject: [PATCH 154/647] [DOCS] Add discrete tags to API ref (#1654) --- docs/reference.asciidoc | 424 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 424 insertions(+) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 4c82b6382..e24ba7a18 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -25,6 +25,7 @@ //////// == API Reference +[discrete] === bulk Allows to perform multiple index/update/delete operations in a single request. @@ -34,6 +35,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html[En client.bulk(...) ---- +[discrete] === clear_scroll Explicitly clears the search context for a scroll. @@ -43,6 +45,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api. client.clearScroll(...) ---- +[discrete] === close_point_in_time Close a point in time @@ -52,6 +55,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api client.closePointInTime(...) ---- +[discrete] === count Returns number of documents matching a query. @@ -61,6 +65,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html client.count(...) ---- +[discrete] === create Creates a new document in the index. @@ -72,6 +77,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html[ client.create(...) ---- +[discrete] === delete Removes a document from the index. @@ -81,6 +87,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html[ client.delete(...) ---- +[discrete] === delete_by_query Deletes documents matching the provided query. @@ -90,6 +97,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-qu client.deleteByQuery(...) ---- +[discrete] === delete_by_query_rethrottle Changes the number of requests per second for a particular Delete By Query operation. @@ -99,6 +107,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-q client.deleteByQueryRethrottle(...) ---- +[discrete] === delete_script Deletes a script. @@ -108,6 +117,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting client.deleteScript(...) ---- +[discrete] === exists Returns information about whether a document exists in an index. @@ -117,6 +127,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[End client.exists(...) ---- +[discrete] === exists_source Returns information about whether a document source exists in an index. @@ -126,6 +137,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[End client.existsSource(...) ---- +[discrete] === explain Returns information about why a specific matches (or doesn't match) a query. @@ -135,6 +147,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.ht client.explain(...) ---- +[discrete] === field_caps Returns the information about the capabilities of fields among multiple indices. @@ -144,6 +157,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps client.fieldCaps(...) ---- +[discrete] === get Returns a document. @@ -153,6 +167,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[End client.get(...) ---- +[discrete] === get_script Returns a script. @@ -162,6 +177,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting client.getScript(...) ---- +[discrete] === get_script_context Returns all script contexts. @@ -171,6 +187,7 @@ https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts. client.getScriptContext(...) ---- +[discrete] === get_script_languages Returns available script types, languages and contexts @@ -180,6 +197,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting client.getScriptLanguages(...) ---- +[discrete] === get_source Returns the source of a document. @@ -189,6 +207,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[End client.getSource(...) ---- +[discrete] === index Creates or updates a document in an index. @@ -198,6 +217,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html[ client.index(...) ---- +[discrete] === info Returns basic information about the cluster. @@ -207,6 +227,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Endpo client.info(...) ---- +[discrete] === knn_search Performs a kNN search. @@ -216,6 +237,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.htm client.knnSearch(...) ---- +[discrete] === mget Allows to get multiple documents in one request. @@ -225,6 +247,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.ht client.mget(...) ---- +[discrete] === msearch Allows to execute several search operations in one request. @@ -234,6 +257,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-sear client.msearch(...) ---- +[discrete] === msearch_template Allows to execute several search template operations in one request. @@ -243,6 +267,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-sea client.msearchTemplate(...) ---- +[discrete] === mtermvectors Returns multiple termvectors in one request. @@ -252,6 +277,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termve client.mtermvectors(...) ---- +[discrete] === open_point_in_time Open a point in time that can be used in subsequent searches @@ -261,6 +287,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api client.openPointInTime(...) ---- +[discrete] === ping Returns whether the cluster is running. @@ -270,6 +297,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Endpo client.ping(...) ---- +[discrete] === put_script Creates or updates a script. @@ -279,6 +307,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting client.putScript(...) ---- +[discrete] === rank_eval Allows to evaluate the quality of ranked search results over a set of typical search queries @@ -288,6 +317,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval. client.rankEval(...) ---- +[discrete] === reindex Allows to copy documents from one index to another, optionally filtering the source documents by a query, changing the destination index settings, or fetching the @@ -299,6 +329,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html client.reindex(...) ---- +[discrete] === reindex_rethrottle Changes the number of requests per second for a particular Reindex operation. @@ -308,6 +339,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html client.reindexRethrottle(...) ---- +[discrete] === render_search_template Allows to use the Mustache language to pre-render a search definition. @@ -317,6 +349,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-te client.renderSearchTemplate(...) ---- +[discrete] === scripts_painless_execute Allows an arbitrary script to be executed and a result to be returned @@ -326,6 +359,7 @@ https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-a client.scriptsPainlessExecute(...) ---- +[discrete] === scroll Allows to retrieve a large numbers of results from a single search request. @@ -335,6 +369,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-bo client.scroll(...) ---- +[discrete] === search Returns results matching a query. @@ -344,6 +379,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.htm client.search(...) ---- +[discrete] === search_mvt Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. @@ -353,6 +389,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-til client.searchMvt(...) ---- +[discrete] === search_shards Returns information about the indices and shards that a search request would be executed against. @@ -362,6 +399,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.htm client.searchShards(...) ---- +[discrete] === search_template Allows to use the Mustache language to pre-render a search definition. @@ -371,6 +409,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template. client.searchTemplate(...) ---- +[discrete] === terms_enum The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. @@ -380,6 +419,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enu client.termsEnum(...) ---- +[discrete] === termvectors Returns information and statistics about terms in the fields of a particular document. @@ -389,6 +429,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors. client.termvectors(...) ---- +[discrete] === update Updates a document with a script or partial document. @@ -398,6 +439,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html[ client.update(...) ---- +[discrete] === update_by_query Performs an update on every document in the index without changing the source, for example to pick up a mapping change. @@ -408,6 +450,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-qu client.updateByQuery(...) ---- +[discrete] === update_by_query_rethrottle Changes the number of requests per second for a particular Update By Query operation. @@ -417,7 +460,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-q client.updateByQueryRethrottle(...) ---- +[discrete] === async_search +[discrete] ==== delete Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. @@ -427,6 +472,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html client.asyncSearch.delete(...) ---- +[discrete] ==== get Retrieves the results of a previously submitted async search request given its ID. @@ -436,6 +482,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html client.asyncSearch.get(...) ---- +[discrete] ==== status Retrieves the status of a previously submitted async search request given its ID. @@ -445,6 +492,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html client.asyncSearch.status(...) ---- +[discrete] ==== submit Executes a search request asynchronously. @@ -454,7 +502,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html client.asyncSearch.submit(...) ---- +[discrete] === cat +[discrete] ==== aliases Shows information about currently configured aliases to indices including filter and routing infos. @@ -464,6 +514,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html[En client.cat.aliases(...) ---- +[discrete] ==== allocation Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. @@ -473,6 +524,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.ht client.cat.allocation(...) ---- +[discrete] ==== count Provides quick access to the document count of the entire cluster, or individual indices. @@ -482,6 +534,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html[En client.cat.count(...) ---- +[discrete] ==== fielddata Shows how much heap memory is currently being used by fielddata on every data node in the cluster. @@ -491,6 +544,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.htm client.cat.fielddata(...) ---- +[discrete] ==== health Returns a concise representation of the cluster health. @@ -500,6 +554,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html[E client.cat.health(...) ---- +[discrete] ==== help Returns help for the Cat APIs. @@ -509,6 +564,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html[Endpoint client.cat.help(...) ---- +[discrete] ==== indices Returns information about indices: number of primaries and replicas, document counts, disk size, ... @@ -518,6 +574,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html[ client.cat.indices(...) ---- +[discrete] ==== master Returns information about the master node. @@ -527,6 +584,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html[E client.cat.master(...) ---- +[discrete] ==== ml_data_frame_analytics Gets configuration and usage information about data frame analytics jobs. @@ -536,6 +594,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.h client.cat.mlDataFrameAnalytics(...) ---- +[discrete] ==== ml_datafeeds Gets configuration and usage information about datafeeds. @@ -545,6 +604,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.htm client.cat.mlDatafeeds(...) ---- +[discrete] ==== ml_jobs Gets configuration and usage information about anomaly detection jobs. @@ -554,6 +614,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detec client.cat.mlJobs(...) ---- +[discrete] ==== ml_trained_models Gets configuration and usage information about inference trained models. @@ -563,6 +624,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model client.cat.mlTrainedModels(...) ---- +[discrete] ==== nodeattrs Returns information about custom node attributes. @@ -572,6 +634,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.htm client.cat.nodeattrs(...) ---- +[discrete] ==== nodes Returns basic statistics about performance of cluster nodes. @@ -581,6 +644,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html[En client.cat.nodes(...) ---- +[discrete] ==== pending_tasks Returns a concise representation of the cluster pending tasks. @@ -590,6 +654,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks client.cat.pendingTasks(...) ---- +[discrete] ==== plugins Returns information about installed plugins across nodes node. @@ -599,6 +664,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html[ client.cat.plugins(...) ---- +[discrete] ==== recovery Returns information about index shard recoveries, both on-going completed. @@ -608,6 +674,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html client.cat.recovery(...) ---- +[discrete] ==== repositories Returns information about snapshot repositories registered in the cluster. @@ -617,6 +684,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories. client.cat.repositories(...) ---- +[discrete] ==== segments Provides low-level information about the segments in the shards of an index. @@ -626,6 +694,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html client.cat.segments(...) ---- +[discrete] ==== shards Provides a detailed view of shard allocation on nodes. @@ -635,6 +704,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html[E client.cat.shards(...) ---- +[discrete] ==== snapshots Returns all snapshots in a specific repository. @@ -644,6 +714,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.htm client.cat.snapshots(...) ---- +[discrete] ==== tasks Returns information about the tasks currently executing on one or more nodes in the cluster. @@ -653,6 +724,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoi client.cat.tasks(...) ---- +[discrete] ==== templates Returns information about existing templates. @@ -662,6 +734,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.htm client.cat.templates(...) ---- +[discrete] ==== thread_pool Returns cluster-wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. @@ -672,6 +745,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.h client.cat.threadPool(...) ---- +[discrete] ==== transforms Gets configuration and usage information about transforms. @@ -681,7 +755,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.ht client.cat.transforms(...) ---- +[discrete] === ccr +[discrete] ==== delete_auto_follow_pattern Deletes auto-follow patterns. @@ -691,6 +767,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-f client.ccr.deleteAutoFollowPattern(...) ---- +[discrete] ==== follow Creates a new follower index configured to follow the referenced leader index. @@ -700,6 +777,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.ht client.ccr.follow(...) ---- +[discrete] ==== follow_info Retrieves information about all follower indices, including parameters and status for each follower index @@ -709,6 +787,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-in client.ccr.followInfo(...) ---- +[discrete] ==== follow_stats Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. @@ -718,6 +797,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-st client.ccr.followStats(...) ---- +[discrete] ==== forget_follower Removes the follower retention leases from the leader. @@ -727,6 +807,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-f client.ccr.forgetFollower(...) ---- +[discrete] ==== get_auto_follow_pattern Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. @@ -736,6 +817,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-foll client.ccr.getAutoFollowPattern(...) ---- +[discrete] ==== pause_auto_follow_pattern Pauses an auto-follow pattern @@ -745,6 +827,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-fo client.ccr.pauseAutoFollowPattern(...) ---- +[discrete] ==== pause_follow Pauses a follower index. The follower index will not fetch any additional operations from the leader index. @@ -754,6 +837,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-fo client.ccr.pauseFollow(...) ---- +[discrete] ==== put_auto_follow_pattern Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. @@ -763,6 +847,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-foll client.ccr.putAutoFollowPattern(...) ---- +[discrete] ==== resume_auto_follow_pattern Resumes an auto-follow pattern that has been paused @@ -772,6 +857,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-f client.ccr.resumeAutoFollowPattern(...) ---- +[discrete] ==== resume_follow Resumes a follower index that has been paused @@ -781,6 +867,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-f client.ccr.resumeFollow(...) ---- +[discrete] ==== stats Gets all stats related to cross-cluster replication. @@ -790,6 +877,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.htm client.ccr.stats(...) ---- +[discrete] ==== unfollow Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. @@ -799,7 +887,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow client.ccr.unfollow(...) ---- +[discrete] === cluster +[discrete] ==== allocation_explain Provides explanations for shard allocations in the cluster. @@ -809,6 +899,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocatio client.cluster.allocationExplain(...) ---- +[discrete] ==== delete_component_template Deletes a component template @@ -818,6 +909,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component client.cluster.deleteComponentTemplate(...) ---- +[discrete] ==== delete_voting_config_exclusions Clears cluster voting config exclusions. @@ -827,6 +919,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exc client.cluster.deleteVotingConfigExclusions(...) ---- +[discrete] ==== exists_component_template Returns information about whether a particular component template exist @@ -836,6 +929,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component client.cluster.existsComponentTemplate(...) ---- +[discrete] ==== get_component_template Returns one or more component templates @@ -845,6 +939,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component client.cluster.getComponentTemplate(...) ---- +[discrete] ==== get_settings Returns cluster settings. @@ -854,6 +949,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-setti client.cluster.getSettings(...) ---- +[discrete] ==== health Returns basic information about the health of the cluster. @@ -863,6 +959,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.ht client.cluster.health(...) ---- +[discrete] ==== pending_tasks Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. @@ -873,6 +970,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.h client.cluster.pendingTasks(...) ---- +[discrete] ==== post_voting_config_exclusions Updates the cluster voting config exclusions by node ids or node names. @@ -882,6 +980,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exc client.cluster.postVotingConfigExclusions(...) ---- +[discrete] ==== put_component_template Creates or updates a component template @@ -891,6 +990,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component client.cluster.putComponentTemplate(...) ---- +[discrete] ==== put_settings Updates the cluster settings. @@ -900,6 +1000,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-se client.cluster.putSettings(...) ---- +[discrete] ==== remote_info Returns the information about configured remote clusters. @@ -909,6 +1010,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-in client.cluster.remoteInfo(...) ---- +[discrete] ==== reroute Allows to manually change the allocation of individual shards in the cluster. @@ -918,6 +1020,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.h client.cluster.reroute(...) ---- +[discrete] ==== state Returns a comprehensive information about the state of the cluster. @@ -927,6 +1030,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.htm client.cluster.state(...) ---- +[discrete] ==== stats Returns high-level overview of cluster statistics. @@ -936,7 +1040,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.htm client.cluster.stats(...) ---- +[discrete] === dangling_indices +[discrete] ==== delete_dangling_index Deletes the specified dangling index @@ -946,6 +1052,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-d client.danglingIndices.deleteDanglingIndex(...) ---- +[discrete] ==== import_dangling_index Imports the specified dangling index @@ -955,6 +1062,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-d client.danglingIndices.importDanglingIndex(...) ---- +[discrete] ==== list_dangling_indices Returns all dangling indices. @@ -964,7 +1072,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-d client.danglingIndices.listDanglingIndices(...) ---- +[discrete] === enrich +[discrete] ==== delete_policy Deletes an existing enrich policy and its enrich index. @@ -974,6 +1084,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-po client.enrich.deletePolicy(...) ---- +[discrete] ==== execute_policy Creates the enrich index for an existing enrich policy. @@ -983,6 +1094,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-p client.enrich.executePolicy(...) ---- +[discrete] ==== get_policy Gets information about an enrich policy. @@ -992,6 +1104,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-polic client.enrich.getPolicy(...) ---- +[discrete] ==== put_policy Creates a new enrich policy. @@ -1001,6 +1114,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-polic client.enrich.putPolicy(...) ---- +[discrete] ==== stats Gets enrich coordinator statistics and information about enrich policies that are currently executing. @@ -1010,7 +1124,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api client.enrich.stats(...) ---- +[discrete] === eql +[discrete] ==== delete Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. @@ -1020,6 +1136,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.h client.eql.delete(...) ---- +[discrete] ==== get Returns async results from previously executed Event Query Language (EQL) search @@ -1029,6 +1146,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.h client.eql.get(...) ---- +[discrete] ==== get_status Returns the status of a previously submitted async or stored Event Query Language (EQL) search @@ -1038,6 +1156,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.h client.eql.getStatus(...) ---- +[discrete] ==== search Returns results matching a query expressed in Event Query Language (EQL) @@ -1047,7 +1166,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.h client.eql.search(...) ---- +[discrete] === features +[discrete] ==== get_features Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot @@ -1057,6 +1178,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api. client.features.getFeatures(...) ---- +[discrete] ==== reset_features Resets the internal state of features, usually by deleting system indices @@ -1066,7 +1188,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.features.resetFeatures(...) ---- +[discrete] === fleet +[discrete] ==== global_checkpoints Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. @@ -1076,6 +1200,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-check client.fleet.globalCheckpoints(...) ---- +[discrete] ==== msearch Multi Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. [source,ts] @@ -1083,6 +1208,7 @@ Multi Search API where the search will only be executed after specified checkpoi client.fleet.msearch(...) ---- +[discrete] ==== search Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. [source,ts] @@ -1090,7 +1216,9 @@ Search API where the search will only be executed after specified checkpoints ar client.fleet.search(...) ---- +[discrete] === graph +[discrete] ==== explore Explore extracted and summarized information about the documents and terms in an index. @@ -1100,7 +1228,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-ap client.graph.explore(...) ---- +[discrete] === ilm +[discrete] ==== delete_lifecycle Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. @@ -1110,6 +1240,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifec client.ilm.deleteLifecycle(...) ---- +[discrete] ==== explain_lifecycle Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. @@ -1119,6 +1250,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-life client.ilm.explainLifecycle(...) ---- +[discrete] ==== get_lifecycle Returns the specified policy definition. Includes the policy version and last modified date. @@ -1128,6 +1260,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycl client.ilm.getLifecycle(...) ---- +[discrete] ==== get_status Retrieves the current index lifecycle management (ILM) status. @@ -1137,6 +1270,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.h client.ilm.getStatus(...) ---- +[discrete] ==== migrate_to_data_tiers Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing @@ -1146,6 +1280,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-d client.ilm.migrateToDataTiers(...) ---- +[discrete] ==== move_to_step Manually moves an index into the specified step and executes that step. @@ -1155,6 +1290,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step client.ilm.moveToStep(...) ---- +[discrete] ==== put_lifecycle Creates a lifecycle policy @@ -1164,6 +1300,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycl client.ilm.putLifecycle(...) ---- +[discrete] ==== remove_policy Removes the assigned lifecycle policy and stops managing the specified index @@ -1173,6 +1310,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-polic client.ilm.removePolicy(...) ---- +[discrete] ==== retry Retries executing the policy for an index that is in the ERROR step. @@ -1182,6 +1320,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy client.ilm.retry(...) ---- +[discrete] ==== start Start the index lifecycle management (ILM) plugin. @@ -1191,6 +1330,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html[E client.ilm.start(...) ---- +[discrete] ==== stop Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin @@ -1200,7 +1340,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html[En client.ilm.stop(...) ---- +[discrete] === indices +[discrete] ==== add_block Adds a block to an index. @@ -1210,6 +1352,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blo client.indices.addBlock(...) ---- +[discrete] ==== analyze Performs the analysis process on a text and return the tokens breakdown of the text. @@ -1219,6 +1362,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.h client.indices.analyze(...) ---- +[discrete] ==== clear_cache Clears all or specific caches for one or more indices. @@ -1228,6 +1372,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcach client.indices.clearCache(...) ---- +[discrete] ==== clone Clones an index @@ -1237,6 +1382,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-ind client.indices.clone(...) ---- +[discrete] ==== close Closes an index. @@ -1246,6 +1392,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-clos client.indices.close(...) ---- +[discrete] ==== create Creates an index with optional settings and mappings. @@ -1255,6 +1402,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-in client.indices.create(...) ---- +[discrete] ==== create_data_stream Creates a data stream @@ -1264,6 +1412,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html client.indices.createDataStream(...) ---- +[discrete] ==== data_streams_stats Provides statistics on operations happening in a data stream. @@ -1273,6 +1422,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html client.indices.dataStreamsStats(...) ---- +[discrete] ==== delete Deletes an index. @@ -1282,6 +1432,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-in client.indices.delete(...) ---- +[discrete] ==== delete_alias Deletes an alias. @@ -1291,6 +1442,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.h client.indices.deleteAlias(...) ---- +[discrete] ==== delete_data_stream Deletes a data stream. @@ -1300,6 +1452,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html client.indices.deleteDataStream(...) ---- +[discrete] ==== delete_index_template Deletes an index template. @@ -1309,6 +1462,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.deleteIndexTemplate(...) ---- +[discrete] ==== delete_template Deletes an index template. @@ -1318,6 +1472,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.deleteTemplate(...) ---- +[discrete] ==== disk_usage Analyzes the disk usage of each field of an index or data stream @@ -1327,6 +1482,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usag client.indices.diskUsage(...) ---- +[discrete] ==== exists Returns information about whether a particular index exists. @@ -1336,6 +1492,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.ht client.indices.exists(...) ---- +[discrete] ==== exists_alias Returns information about whether a particular alias exists. @@ -1345,6 +1502,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.h client.indices.existsAlias(...) ---- +[discrete] ==== exists_index_template Returns information about whether a particular index template exists. @@ -1354,6 +1512,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.existsIndexTemplate(...) ---- +[discrete] ==== exists_template Returns information about whether a particular index template exists. @@ -1363,6 +1522,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.existsTemplate(...) ---- +[discrete] ==== field_usage_stats Returns the field usage stats for each field of an index @@ -1372,6 +1532,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats client.indices.fieldUsageStats(...) ---- +[discrete] ==== flush Performs the flush operation on one or more indices. @@ -1381,6 +1542,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.htm client.indices.flush(...) ---- +[discrete] ==== forcemerge Performs the force merge operation on one or more indices. @@ -1390,6 +1552,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerg client.indices.forcemerge(...) ---- +[discrete] ==== get Returns information about one or more indices. @@ -1399,6 +1562,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index client.indices.get(...) ---- +[discrete] ==== get_alias Returns an alias. @@ -1408,6 +1572,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.h client.indices.getAlias(...) ---- +[discrete] ==== get_data_stream Returns data streams. @@ -1417,6 +1582,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html client.indices.getDataStream(...) ---- +[discrete] ==== get_field_mapping Returns mapping for one or more fields. @@ -1426,6 +1592,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field client.indices.getFieldMapping(...) ---- +[discrete] ==== get_index_template Returns an index template. @@ -1435,6 +1602,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.getIndexTemplate(...) ---- +[discrete] ==== get_mapping Returns mappings for one or more indices. @@ -1444,6 +1612,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mappi client.indices.getMapping(...) ---- +[discrete] ==== get_settings Returns settings for one or more indices. @@ -1453,6 +1622,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-setti client.indices.getSettings(...) ---- +[discrete] ==== get_template Returns an index template. @@ -1462,6 +1632,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.getTemplate(...) ---- +[discrete] ==== migrate_to_data_stream Migrates an alias to a data stream @@ -1471,6 +1642,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html client.indices.migrateToDataStream(...) ---- +[discrete] ==== modify_data_stream Modifies a data stream @@ -1480,6 +1652,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html client.indices.modifyDataStream(...) ---- +[discrete] ==== open Opens an index. @@ -1489,6 +1662,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-clos client.indices.open(...) ---- +[discrete] ==== promote_data_stream Promotes a data stream from a replicated data stream managed by CCR to a regular data stream @@ -1498,6 +1672,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html client.indices.promoteDataStream(...) ---- +[discrete] ==== put_alias Creates or updates an alias. @@ -1507,6 +1682,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.h client.indices.putAlias(...) ---- +[discrete] ==== put_index_template Creates or updates an index template. @@ -1516,6 +1692,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.putIndexTemplate(...) ---- +[discrete] ==== put_mapping Updates the index mappings. @@ -1525,6 +1702,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mappi client.indices.putMapping(...) ---- +[discrete] ==== put_settings Updates the index settings. @@ -1534,6 +1712,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-se client.indices.putSettings(...) ---- +[discrete] ==== put_template Creates or updates an index template. @@ -1543,6 +1722,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.putTemplate(...) ---- +[discrete] ==== recovery Returns information about ongoing index shard recoveries. @@ -1552,6 +1732,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery. client.indices.recovery(...) ---- +[discrete] ==== refresh Performs the refresh operation in one or more indices. @@ -1561,6 +1742,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.h client.indices.refresh(...) ---- +[discrete] ==== reload_search_analyzers Reloads an index's search analyzers and their resources. @@ -1570,6 +1752,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-an client.indices.reloadSearchAnalyzers(...) ---- +[discrete] ==== resolve_index Returns information about any matching indices, aliases, and data streams @@ -1579,6 +1762,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-i client.indices.resolveIndex(...) ---- +[discrete] ==== rollover Updates an alias to point to a new index when the existing index is considered to be too large or too old. @@ -1589,6 +1773,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover- client.indices.rollover(...) ---- +[discrete] ==== segments Provides low-level information about segments in a Lucene index. @@ -1598,6 +1783,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments. client.indices.segments(...) ---- +[discrete] ==== shard_stores Provides store information for shard copies of indices. @@ -1607,6 +1793,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-st client.indices.shardStores(...) ---- +[discrete] ==== shrink Allow to shrink an existing index into a new index with fewer primary shards. @@ -1616,6 +1803,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-in client.indices.shrink(...) ---- +[discrete] ==== simulate_index_template Simulate matching the given index name against the index templates in the system @@ -1625,6 +1813,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.simulateIndexTemplate(...) ---- +[discrete] ==== simulate_template Simulate resolving the given template name or body @@ -1634,6 +1823,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates client.indices.simulateTemplate(...) ---- +[discrete] ==== split Allows you to split an existing index into a new index with more primary shards. @@ -1643,6 +1833,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-ind client.indices.split(...) ---- +[discrete] ==== stats Provides statistics on operations happening in an index. @@ -1652,6 +1843,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.htm client.indices.stats(...) ---- +[discrete] ==== unfreeze Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. @@ -1661,6 +1853,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-a client.indices.unfreeze(...) ---- +[discrete] ==== update_aliases Updates index aliases. @@ -1670,6 +1863,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.h client.indices.updateAliases(...) ---- +[discrete] ==== validate_query Allows a user to validate a potentially expensive query without executing it. @@ -1679,7 +1873,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.h client.indices.validateQuery(...) ---- +[discrete] === ingest +[discrete] ==== delete_pipeline Deletes a pipeline. @@ -1689,6 +1885,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-a client.ingest.deletePipeline(...) ---- +[discrete] ==== geo_ip_stats Returns statistical information about geoip databases @@ -1698,6 +1895,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-stats-api.h client.ingest.geoIpStats(...) ---- +[discrete] ==== get_pipeline Returns a pipeline. @@ -1707,6 +1905,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api. client.ingest.getPipeline(...) ---- +[discrete] ==== processor_grok Returns a list of the built-in patterns. @@ -1716,6 +1915,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.ht client.ingest.processorGrok(...) ---- +[discrete] ==== put_pipeline Creates or updates a pipeline. @@ -1725,6 +1925,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api. client.ingest.putPipeline(...) ---- +[discrete] ==== simulate Allows to simulate a pipeline with example documents. @@ -1734,7 +1935,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline client.ingest.simulate(...) ---- +[discrete] === license +[discrete] ==== delete Deletes licensing information for the cluster @@ -1744,6 +1947,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.ht client.license.delete(...) ---- +[discrete] ==== get Retrieves licensing information for the cluster @@ -1753,6 +1957,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html[ client.license.get(...) ---- +[discrete] ==== get_basic_status Retrieves information about the status of the basic license. @@ -1762,6 +1967,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status. client.license.getBasicStatus(...) ---- +[discrete] ==== get_trial_status Retrieves information about the status of the trial license. @@ -1771,6 +1977,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status. client.license.getTrialStatus(...) ---- +[discrete] ==== post Updates the license for the cluster. @@ -1780,6 +1987,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.ht client.license.post(...) ---- +[discrete] ==== post_start_basic Starts an indefinite basic license. @@ -1789,6 +1997,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html[ client.license.postStartBasic(...) ---- +[discrete] ==== post_start_trial starts a limited time trial license. @@ -1798,7 +2007,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html[ client.license.postStartTrial(...) ---- +[discrete] === logstash +[discrete] ==== delete_pipeline Deletes Logstash Pipelines used by Central Management @@ -1808,6 +2019,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-del client.logstash.deletePipeline(...) ---- +[discrete] ==== get_pipeline Retrieves Logstash Pipelines used by Central Management @@ -1817,6 +2029,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get client.logstash.getPipeline(...) ---- +[discrete] ==== put_pipeline Adds and updates Logstash Pipelines used for Central Management @@ -1826,7 +2039,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put client.logstash.putPipeline(...) ---- +[discrete] === migration +[discrete] ==== deprecations Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. @@ -1836,6 +2051,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-de client.migration.deprecations(...) ---- +[discrete] ==== get_feature_upgrade_status Find out whether system features need to be upgraded or not @@ -1845,6 +2061,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-fe client.migration.getFeatureUpgradeStatus(...) ---- +[discrete] ==== post_feature_upgrade Begin upgrades for system features @@ -1854,7 +2071,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-fe client.migration.postFeatureUpgrade(...) ---- +[discrete] === ml +[discrete] ==== close_job Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. @@ -1864,6 +2083,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-close-job.html client.ml.closeJob(...) ---- +[discrete] ==== delete_calendar Deletes a calendar. @@ -1873,6 +2093,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calenda client.ml.deleteCalendar(...) ---- +[discrete] ==== delete_calendar_event Deletes scheduled events from a calendar. @@ -1882,6 +2103,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calenda client.ml.deleteCalendarEvent(...) ---- +[discrete] ==== delete_calendar_job Deletes anomaly detection jobs from a calendar. @@ -1891,6 +2113,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calenda client.ml.deleteCalendarJob(...) ---- +[discrete] ==== delete_data_frame_analytics Deletes an existing data frame analytics job. @@ -1900,6 +2123,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytic client.ml.deleteDataFrameAnalytics(...) ---- +[discrete] ==== delete_datafeed Deletes an existing datafeed. @@ -1909,6 +2133,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafee client.ml.deleteDatafeed(...) ---- +[discrete] ==== delete_expired_data Deletes expired and unused machine learning data. @@ -1918,6 +2143,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expire client.ml.deleteExpiredData(...) ---- +[discrete] ==== delete_filter Deletes a filter. @@ -1927,6 +2153,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter client.ml.deleteFilter(...) ---- +[discrete] ==== delete_forecast Deletes forecasts from a machine learning job. @@ -1936,6 +2163,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-foreca client.ml.deleteForecast(...) ---- +[discrete] ==== delete_job Deletes an existing anomaly detection job. @@ -1945,6 +2173,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.ht client.ml.deleteJob(...) ---- +[discrete] ==== delete_model_snapshot Deletes an existing model snapshot. @@ -1954,6 +2183,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapsh client.ml.deleteModelSnapshot(...) ---- +[discrete] ==== delete_trained_model Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. @@ -1963,6 +2193,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-m client.ml.deleteTrainedModel(...) ---- +[discrete] ==== delete_trained_model_alias Deletes a model alias that refers to the trained model @@ -1972,6 +2203,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-m client.ml.deleteTrainedModelAlias(...) ---- +[discrete] ==== estimate_model_memory Estimates the model memory @@ -1981,6 +2213,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html[End client.ml.estimateModelMemory(...) ---- +[discrete] ==== evaluate_data_frame Evaluates the data frame analytics for an annotated index. @@ -1990,6 +2223,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanaly client.ml.evaluateDataFrame(...) ---- +[discrete] ==== explain_data_frame_analytics Explains a data frame analytics config. @@ -1999,6 +2233,7 @@ http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalyti client.ml.explainDataFrameAnalytics(...) ---- +[discrete] ==== flush_job Forces any buffered data to be processed by the job. @@ -2008,6 +2243,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.htm client.ml.flushJob(...) ---- +[discrete] ==== forecast Predicts the future behavior of a time series by using its historical behavior. @@ -2017,6 +2253,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html client.ml.forecast(...) ---- +[discrete] ==== get_buckets Retrieves anomaly detection job results for one or more buckets. @@ -2026,6 +2263,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.ht client.ml.getBuckets(...) ---- +[discrete] ==== get_calendar_events Retrieves information about the scheduled events in calendars. @@ -2035,6 +2273,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar- client.ml.getCalendarEvents(...) ---- +[discrete] ==== get_calendars Retrieves configuration information for calendars. @@ -2044,6 +2283,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar. client.ml.getCalendars(...) ---- +[discrete] ==== get_categories Retrieves anomaly detection job results for one or more categories. @@ -2053,6 +2293,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category. client.ml.getCategories(...) ---- +[discrete] ==== get_data_frame_analytics Retrieves configuration information for data frame analytics jobs. @@ -2062,6 +2303,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics. client.ml.getDataFrameAnalytics(...) ---- +[discrete] ==== get_data_frame_analytics_stats Retrieves usage information for data frame analytics jobs. @@ -2071,6 +2313,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics- client.ml.getDataFrameAnalyticsStats(...) ---- +[discrete] ==== get_datafeed_stats Retrieves usage information for datafeeds. @@ -2080,6 +2323,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed- client.ml.getDatafeedStats(...) ---- +[discrete] ==== get_datafeeds Retrieves configuration information for datafeeds. @@ -2089,6 +2333,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed. client.ml.getDatafeeds(...) ---- +[discrete] ==== get_filters Retrieves filters. @@ -2098,6 +2343,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.ht client.ml.getFilters(...) ---- +[discrete] ==== get_influencers Retrieves anomaly detection job results for one or more influencers. @@ -2107,6 +2353,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influence client.ml.getInfluencers(...) ---- +[discrete] ==== get_job_stats Retrieves usage information for anomaly detection jobs. @@ -2116,6 +2363,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats client.ml.getJobStats(...) ---- +[discrete] ==== get_jobs Retrieves configuration information for anomaly detection jobs. @@ -2125,6 +2373,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html[ client.ml.getJobs(...) ---- +[discrete] ==== get_memory_stats Returns information on how ML is using memory. [source,ts] @@ -2132,6 +2381,7 @@ Returns information on how ML is using memory. client.ml.getMemoryStats(...) ---- +[discrete] ==== get_model_snapshot_upgrade_stats Gets stats for anomaly detection job model snapshot upgrades that are in progress. @@ -2141,6 +2391,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model client.ml.getModelSnapshotUpgradeStats(...) ---- +[discrete] ==== get_model_snapshots Retrieves information about model snapshots. @@ -2150,6 +2401,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot. client.ml.getModelSnapshots(...) ---- +[discrete] ==== get_overall_buckets Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. @@ -2159,6 +2411,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-b client.ml.getOverallBuckets(...) ---- +[discrete] ==== get_records Retrieves anomaly records for an anomaly detection job. @@ -2168,6 +2421,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.ht client.ml.getRecords(...) ---- +[discrete] ==== get_trained_models Retrieves configuration information for a trained inference model. @@ -2177,6 +2431,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-mode client.ml.getTrainedModels(...) ---- +[discrete] ==== get_trained_models_stats Retrieves usage information for trained inference models. @@ -2186,6 +2441,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-mode client.ml.getTrainedModelsStats(...) ---- +[discrete] ==== infer_trained_model_deployment Evaluate a trained model. @@ -2195,6 +2451,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-mod client.ml.inferTrainedModelDeployment(...) ---- +[discrete] ==== info Returns defaults and limits used by machine learning. @@ -2204,6 +2461,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html client.ml.info(...) ---- +[discrete] ==== open_job Opens one or more anomaly detection jobs. @@ -2213,6 +2471,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html client.ml.openJob(...) ---- +[discrete] ==== post_calendar_events Posts scheduled events in a calendar. @@ -2222,6 +2481,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar client.ml.postCalendarEvents(...) ---- +[discrete] ==== post_data Sends data to an anomaly detection job for analysis. @@ -2231,6 +2491,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.htm client.ml.postData(...) ---- +[discrete] ==== preview_data_frame_analytics Previews that will be analyzed given a data frame analytics config. @@ -2240,6 +2501,7 @@ http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalyti client.ml.previewDataFrameAnalytics(...) ---- +[discrete] ==== preview_datafeed Previews a datafeed. @@ -2249,6 +2511,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-dataf client.ml.previewDatafeed(...) ---- +[discrete] ==== put_calendar Instantiates a calendar. @@ -2258,6 +2521,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar. client.ml.putCalendar(...) ---- +[discrete] ==== put_calendar_job Adds an anomaly detection job to a calendar. @@ -2267,6 +2531,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar- client.ml.putCalendarJob(...) ---- +[discrete] ==== put_data_frame_analytics Instantiates a data frame analytics job. @@ -2276,6 +2541,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.h client.ml.putDataFrameAnalytics(...) ---- +[discrete] ==== put_datafeed Instantiates a datafeed. @@ -2285,6 +2551,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed. client.ml.putDatafeed(...) ---- +[discrete] ==== put_filter Instantiates a filter. @@ -2294,6 +2561,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.ht client.ml.putFilter(...) ---- +[discrete] ==== put_job Instantiates an anomaly detection job. @@ -2303,6 +2571,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html[ client.ml.putJob(...) ---- +[discrete] ==== put_trained_model Creates an inference trained model. @@ -2312,6 +2581,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-mode client.ml.putTrainedModel(...) ---- +[discrete] ==== put_trained_model_alias Creates a new model alias (or reassigns an existing one) to refer to the trained model @@ -2321,6 +2591,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-mode client.ml.putTrainedModelAlias(...) ---- +[discrete] ==== put_trained_model_definition_part Creates part of a trained model definition @@ -2330,6 +2601,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-mode client.ml.putTrainedModelDefinitionPart(...) ---- +[discrete] ==== put_trained_model_vocabulary Creates a trained model vocabulary @@ -2339,6 +2611,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-mode client.ml.putTrainedModelVocabulary(...) ---- +[discrete] ==== reset_job Resets an existing anomaly detection job. @@ -2348,6 +2621,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.htm client.ml.resetJob(...) ---- +[discrete] ==== revert_model_snapshot Reverts to a specific snapshot. @@ -2357,6 +2631,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapsh client.ml.revertModelSnapshot(...) ---- +[discrete] ==== set_upgrade_mode Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. @@ -2366,6 +2641,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-m client.ml.setUpgradeMode(...) ---- +[discrete] ==== start_data_frame_analytics Starts a data frame analytics job. @@ -2375,6 +2651,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytic client.ml.startDataFrameAnalytics(...) ---- +[discrete] ==== start_datafeed Starts one or more datafeeds. @@ -2384,6 +2661,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafee client.ml.startDatafeed(...) ---- +[discrete] ==== start_trained_model_deployment Start a trained model deployment. @@ -2393,6 +2671,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-mod client.ml.startTrainedModelDeployment(...) ---- +[discrete] ==== stop_data_frame_analytics Stops one or more data frame analytics jobs. @@ -2402,6 +2681,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics client.ml.stopDataFrameAnalytics(...) ---- +[discrete] ==== stop_datafeed Stops one or more datafeeds. @@ -2411,6 +2691,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed client.ml.stopDatafeed(...) ---- +[discrete] ==== stop_trained_model_deployment Stop a trained model deployment. @@ -2420,6 +2701,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-mode client.ml.stopTrainedModelDeployment(...) ---- +[discrete] ==== update_data_frame_analytics Updates certain properties of a data frame analytics job. @@ -2429,6 +2711,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalyti client.ml.updateDataFrameAnalytics(...) ---- +[discrete] ==== update_datafeed Updates certain properties of a datafeed. @@ -2438,6 +2721,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafe client.ml.updateDatafeed(...) ---- +[discrete] ==== update_filter Updates the description of a filter, adds items, or removes items. @@ -2447,6 +2731,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter client.ml.updateFilter(...) ---- +[discrete] ==== update_job Updates certain properties of an anomaly detection job. @@ -2456,6 +2741,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.ht client.ml.updateJob(...) ---- +[discrete] ==== update_model_snapshot Updates certain properties of a snapshot. @@ -2465,6 +2751,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapsh client.ml.updateModelSnapshot(...) ---- +[discrete] ==== upgrade_job_snapshot Upgrades a given job snapshot to the current major version. @@ -2474,7 +2761,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-m client.ml.upgradeJobSnapshot(...) ---- +[discrete] === nodes +[discrete] ==== clear_repositories_metering_archive Removes the archived repositories metering information present in the cluster. @@ -2484,6 +2773,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositori client.nodes.clearRepositoriesMeteringArchive(...) ---- +[discrete] ==== get_repositories_metering_info Returns cluster repositories metering information. @@ -2493,6 +2783,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories client.nodes.getRepositoriesMeteringInfo(...) ---- +[discrete] ==== hot_threads Returns information about hot threads on each node in the cluster. @@ -2502,6 +2793,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot client.nodes.hotThreads(...) ---- +[discrete] ==== info Returns information about nodes in the cluster. @@ -2511,6 +2803,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-inf client.nodes.info(...) ---- +[discrete] ==== reload_secure_settings Reloads secure settings. @@ -2520,6 +2813,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.h client.nodes.reloadSecureSettings(...) ---- +[discrete] ==== stats Returns statistical information about nodes in the cluster. @@ -2529,6 +2823,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-sta client.nodes.stats(...) ---- +[discrete] ==== usage Returns low-level information about REST actions usage on nodes. @@ -2538,7 +2833,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usa client.nodes.usage(...) ---- +[discrete] === rollup +[discrete] ==== delete_job Deletes an existing rollup job. @@ -2548,6 +2845,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job client.rollup.deleteJob(...) ---- +[discrete] ==== get_jobs Retrieves the configuration, stats, and status of rollup jobs. @@ -2557,6 +2855,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.ht client.rollup.getJobs(...) ---- +[discrete] ==== get_rollup_caps Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. @@ -2566,6 +2865,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup client.rollup.getRollupCaps(...) ---- +[discrete] ==== get_rollup_index_caps Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). @@ -2575,6 +2875,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup client.rollup.getRollupIndexCaps(...) ---- +[discrete] ==== put_job Creates a rollup job. @@ -2584,6 +2885,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.ht client.rollup.putJob(...) ---- +[discrete] ==== rollup Rollup an index @@ -2593,6 +2895,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.htm client.rollup.rollup(...) ---- +[discrete] ==== rollup_search Enables searching rolled-up data using the standard query DSL. @@ -2602,6 +2905,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.htm client.rollup.rollupSearch(...) ---- +[discrete] ==== start_job Starts an existing, stopped rollup job. @@ -2611,6 +2915,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job. client.rollup.startJob(...) ---- +[discrete] ==== stop_job Stops an existing, started rollup job. @@ -2620,7 +2925,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.h client.rollup.stopJob(...) ---- +[discrete] === searchable_snapshots +[discrete] ==== cache_stats Retrieve node-level cache statistics about searchable snapshots. @@ -2630,6 +2937,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapsh client.searchableSnapshots.cacheStats(...) ---- +[discrete] ==== clear_cache Clear the cache of searchable snapshots. @@ -2639,6 +2947,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapsh client.searchableSnapshots.clearCache(...) ---- +[discrete] ==== mount Mount a snapshot as a searchable index. @@ -2648,6 +2957,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapsh client.searchableSnapshots.mount(...) ---- +[discrete] ==== stats Retrieve shard-level statistics about searchable snapshots. @@ -2657,7 +2967,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapsh client.searchableSnapshots.stats(...) ---- +[discrete] === security +[discrete] ==== activate_user_profile Creates or updates the user profile on behalf of another user. [source,ts] @@ -2665,6 +2977,7 @@ Creates or updates the user profile on behalf of another user. client.security.activateUserProfile(...) ---- +[discrete] ==== authenticate Enables authentication as a user and retrieve information about the authenticated user. @@ -2674,6 +2987,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-aut client.security.authenticate(...) ---- +[discrete] ==== change_password Changes the passwords of users in the native realm and built-in users. @@ -2683,6 +2997,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cha client.security.changePassword(...) ---- +[discrete] ==== clear_api_key_cache Clear a subset or all entries from the API key cache. @@ -2692,6 +3007,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cle client.security.clearApiKeyCache(...) ---- +[discrete] ==== clear_cached_privileges Evicts application privileges from the native application privileges cache. @@ -2701,6 +3017,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cle client.security.clearCachedPrivileges(...) ---- +[discrete] ==== clear_cached_realms Evicts users from the user cache. Can completely clear the cache or evict specific users. @@ -2710,6 +3027,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cle client.security.clearCachedRealms(...) ---- +[discrete] ==== clear_cached_roles Evicts roles from the native role cache. @@ -2719,6 +3037,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cle client.security.clearCachedRoles(...) ---- +[discrete] ==== clear_cached_service_tokens Evicts tokens from the service account token caches. @@ -2728,6 +3047,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cle client.security.clearCachedServiceTokens(...) ---- +[discrete] ==== create_api_key Creates an API key for access without requiring basic authentication. @@ -2737,6 +3057,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cre client.security.createApiKey(...) ---- +[discrete] ==== create_service_token Creates a service account token for access without requiring basic authentication. @@ -2746,6 +3067,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-cre client.security.createServiceToken(...) ---- +[discrete] ==== delete_privileges Removes application privileges. @@ -2755,6 +3077,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-del client.security.deletePrivileges(...) ---- +[discrete] ==== delete_role Removes roles in the native realm. @@ -2764,6 +3087,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-del client.security.deleteRole(...) ---- +[discrete] ==== delete_role_mapping Removes role mappings. @@ -2773,6 +3097,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-del client.security.deleteRoleMapping(...) ---- +[discrete] ==== delete_service_token Deletes a service account token. @@ -2782,6 +3107,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-del client.security.deleteServiceToken(...) ---- +[discrete] ==== delete_user Deletes users from the native realm. @@ -2791,6 +3117,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-del client.security.deleteUser(...) ---- +[discrete] ==== disable_user Disables users in the native realm. @@ -2800,6 +3127,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-dis client.security.disableUser(...) ---- +[discrete] ==== enable_user Enables users in the native realm. @@ -2809,6 +3137,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ena client.security.enableUser(...) ---- +[discrete] ==== enroll_kibana Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. @@ -2818,6 +3147,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kiba client.security.enrollKibana(...) ---- +[discrete] ==== enroll_node Allows a new node to enroll to an existing cluster with security enabled. @@ -2827,6 +3157,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node client.security.enrollNode(...) ---- +[discrete] ==== get_api_key Retrieves information for one or more API keys. @@ -2836,6 +3167,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getApiKey(...) ---- +[discrete] ==== get_builtin_privileges Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. @@ -2845,6 +3177,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getBuiltinPrivileges(...) ---- +[discrete] ==== get_privileges Retrieves application privileges. @@ -2854,6 +3187,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getPrivileges(...) ---- +[discrete] ==== get_role Retrieves roles in the native realm. @@ -2863,6 +3197,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getRole(...) ---- +[discrete] ==== get_role_mapping Retrieves role mappings. @@ -2872,6 +3207,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getRoleMapping(...) ---- +[discrete] ==== get_service_accounts Retrieves information about service accounts. @@ -2881,6 +3217,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getServiceAccounts(...) ---- +[discrete] ==== get_service_credentials Retrieves information of all service credentials for a service account. @@ -2890,6 +3227,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getServiceCredentials(...) ---- +[discrete] ==== get_token Creates a bearer token for access without requiring basic authentication. @@ -2899,6 +3237,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getToken(...) ---- +[discrete] ==== get_user Retrieves information about users in the native realm and built-in users. @@ -2908,6 +3247,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getUser(...) ---- +[discrete] ==== get_user_privileges Retrieves security privileges for the logged in user. @@ -2917,6 +3257,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getUserPrivileges(...) ---- +[discrete] ==== get_user_profile Retrieves user profile for the given unique ID. [source,ts] @@ -2924,6 +3265,7 @@ Retrieves user profile for the given unique ID. client.security.getUserProfile(...) ---- +[discrete] ==== grant_api_key Creates an API key on behalf of another user. @@ -2933,6 +3275,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-gra client.security.grantApiKey(...) ---- +[discrete] ==== has_privileges Determines whether the specified user has a specified list of privileges. @@ -2942,6 +3285,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has client.security.hasPrivileges(...) ---- +[discrete] ==== invalidate_api_key Invalidates one or more API keys. @@ -2951,6 +3295,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-inv client.security.invalidateApiKey(...) ---- +[discrete] ==== invalidate_token Invalidates one or more access tokens or refresh tokens. @@ -2960,6 +3305,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-inv client.security.invalidateToken(...) ---- +[discrete] ==== oidc_authenticate Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair @@ -2969,6 +3315,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oid client.security.oidcAuthenticate(...) ---- +[discrete] ==== oidc_logout Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API @@ -2978,6 +3325,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oid client.security.oidcLogout(...) ---- +[discrete] ==== oidc_prepare_authentication Creates an OAuth 2.0 authentication request as a URL string @@ -2987,6 +3335,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oid client.security.oidcPrepareAuthentication(...) ---- +[discrete] ==== put_privileges Adds or updates application privileges. @@ -2996,6 +3345,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put client.security.putPrivileges(...) ---- +[discrete] ==== put_role Adds and updates roles in the native realm. @@ -3005,6 +3355,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put client.security.putRole(...) ---- +[discrete] ==== put_role_mapping Creates and updates role mappings. @@ -3014,6 +3365,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put client.security.putRoleMapping(...) ---- +[discrete] ==== put_user Adds and updates users in the native realm. These users are commonly referred to as native users. @@ -3023,6 +3375,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put client.security.putUser(...) ---- +[discrete] ==== query_api_keys Retrieves information for API keys using a subset of query DSL @@ -3032,6 +3385,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-que client.security.queryApiKeys(...) ---- +[discrete] ==== saml_authenticate Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair @@ -3041,6 +3395,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-sam client.security.samlAuthenticate(...) ---- +[discrete] ==== saml_complete_logout Verifies the logout response sent from the SAML IdP @@ -3050,6 +3405,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-sam client.security.samlCompleteLogout(...) ---- +[discrete] ==== saml_invalidate Consumes a SAML LogoutRequest @@ -3059,6 +3415,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-sam client.security.samlInvalidate(...) ---- +[discrete] ==== saml_logout Invalidates an access token and a refresh token that were generated via the SAML Authenticate API @@ -3068,6 +3425,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-sam client.security.samlLogout(...) ---- +[discrete] ==== saml_prepare_authentication Creates a SAML authentication request @@ -3077,6 +3435,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-sam client.security.samlPrepareAuthentication(...) ---- +[discrete] ==== saml_service_provider_metadata Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider @@ -3086,6 +3445,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-sam client.security.samlServiceProviderMetadata(...) ---- +[discrete] ==== update_user_profile_data Update application specific data for the user profile of the given unique ID. [source,ts] @@ -3093,7 +3453,9 @@ Update application specific data for the user profile of the given unique ID. client.security.updateUserProfileData(...) ---- +[discrete] === slm +[discrete] ==== delete_lifecycle Deletes an existing snapshot lifecycle policy. @@ -3103,6 +3465,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-p client.slm.deleteLifecycle(...) ---- +[discrete] ==== execute_lifecycle Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. @@ -3112,6 +3475,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute- client.slm.executeLifecycle(...) ---- +[discrete] ==== execute_retention Deletes any snapshots that are expired according to the policy's retention rules. @@ -3121,6 +3485,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute- client.slm.executeRetention(...) ---- +[discrete] ==== get_lifecycle Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. @@ -3130,6 +3495,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-poli client.slm.getLifecycle(...) ---- +[discrete] ==== get_stats Returns global and policy-level statistics about actions taken by snapshot lifecycle management. @@ -3139,6 +3505,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats client.slm.getStats(...) ---- +[discrete] ==== get_status Retrieves the status of snapshot lifecycle management (SLM). @@ -3148,6 +3515,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stat client.slm.getStatus(...) ---- +[discrete] ==== put_lifecycle Creates or updates a snapshot lifecycle policy. @@ -3157,6 +3525,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-poli client.slm.putLifecycle(...) ---- +[discrete] ==== start Turns on snapshot lifecycle management (SLM). @@ -3166,6 +3535,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.ht client.slm.start(...) ---- +[discrete] ==== stop Turns off snapshot lifecycle management (SLM). @@ -3175,7 +3545,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.htm client.slm.stop(...) ---- +[discrete] === snapshot +[discrete] ==== cleanup_repository Removes stale data from repository. @@ -3185,6 +3557,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot client.snapshot.cleanupRepository(...) ---- +[discrete] ==== clone Clones indices from one snapshot into another snapshot in the same repository. @@ -3194,6 +3567,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.clone(...) ---- +[discrete] ==== create Creates a snapshot in a repository. @@ -3203,6 +3577,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.create(...) ---- +[discrete] ==== create_repository Creates a repository. @@ -3212,6 +3587,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.createRepository(...) ---- +[discrete] ==== delete Deletes one or more snapshots. @@ -3221,6 +3597,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.delete(...) ---- +[discrete] ==== delete_repository Deletes a repository. @@ -3230,6 +3607,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.deleteRepository(...) ---- +[discrete] ==== get Returns information about a snapshot. @@ -3239,6 +3617,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.get(...) ---- +[discrete] ==== get_repository Returns information about a repository. @@ -3248,6 +3627,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.getRepository(...) ---- +[discrete] ==== repository_analyze Analyzes a repository for correctness and performance @@ -3257,6 +3637,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.repositoryAnalyze(...) ---- +[discrete] ==== restore Restores a snapshot. @@ -3266,6 +3647,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.restore(...) ---- +[discrete] ==== status Returns information about the status of a snapshot. @@ -3275,6 +3657,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.status(...) ---- +[discrete] ==== verify_repository Verifies a repository. @@ -3284,7 +3667,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots client.snapshot.verifyRepository(...) ---- +[discrete] === sql +[discrete] ==== clear_cursor Clears the SQL cursor @@ -3294,6 +3679,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor client.sql.clearCursor(...) ---- +[discrete] ==== delete_async Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. @@ -3303,6 +3689,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql- client.sql.deleteAsync(...) ---- +[discrete] ==== get_async Returns the current status and available results for an async SQL search or stored synchronous SQL search @@ -3312,6 +3699,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-sea client.sql.getAsync(...) ---- +[discrete] ==== get_async_status Returns the current status of an async SQL search or a stored synchronous SQL search @@ -3321,6 +3709,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-sea client.sql.getAsyncStatus(...) ---- +[discrete] ==== query Executes a SQL request @@ -3330,6 +3719,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.h client.sql.query(...) ---- +[discrete] ==== translate Translates SQL into Elasticsearch queries @@ -3339,7 +3729,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-ap client.sql.translate(...) ---- +[discrete] === ssl +[discrete] ==== certificates Retrieves information about the X.509 certificates used to encrypt communications in the cluster. @@ -3349,7 +3741,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl client.ssl.certificates(...) ---- +[discrete] === tasks +[discrete] ==== cancel Cancels a task, if it can be cancelled through an API. @@ -3359,6 +3753,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoi client.tasks.cancel(...) ---- +[discrete] ==== get Returns information about a task. @@ -3368,6 +3763,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoi client.tasks.get(...) ---- +[discrete] ==== list Returns a list of tasks. @@ -3377,7 +3773,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoi client.tasks.list(...) ---- +[discrete] === text_structure +[discrete] ==== find_structure Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. @@ -3387,7 +3785,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.h client.textStructure.findStructure(...) ---- +[discrete] === transform +[discrete] ==== delete_transform Deletes an existing transform. @@ -3397,6 +3797,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform client.transform.deleteTransform(...) ---- +[discrete] ==== get_transform Retrieves configuration information for transforms. @@ -3406,6 +3807,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.ht client.transform.getTransform(...) ---- +[discrete] ==== get_transform_stats Retrieves usage information for transforms. @@ -3415,6 +3817,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-st client.transform.getTransformStats(...) ---- +[discrete] ==== preview_transform Previews a transform. @@ -3424,6 +3827,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transfor client.transform.previewTransform(...) ---- +[discrete] ==== put_transform Instantiates a transform. @@ -3433,6 +3837,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.ht client.transform.putTransform(...) ---- +[discrete] ==== reset_transform Resets an existing transform. [source,ts] @@ -3440,6 +3845,7 @@ Resets an existing transform. client.transform.resetTransform(...) ---- +[discrete] ==== start_transform Starts one or more transforms. @@ -3449,6 +3855,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform. client.transform.startTransform(...) ---- +[discrete] ==== stop_transform Stops one or more transforms. @@ -3458,6 +3865,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.h client.transform.stopTransform(...) ---- +[discrete] ==== update_transform Updates certain properties of a transform. @@ -3467,6 +3875,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform client.transform.updateTransform(...) ---- +[discrete] ==== upgrade_transforms Upgrades all transforms. @@ -3476,7 +3885,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transfor client.transform.upgradeTransforms(...) ---- +[discrete] === watcher +[discrete] ==== ack_watch Acknowledges a watch, manually throttling the execution of the watch's actions. @@ -3486,6 +3897,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack- client.watcher.ackWatch(...) ---- +[discrete] ==== activate_watch Activates a currently inactive watch. @@ -3495,6 +3907,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-acti client.watcher.activateWatch(...) ---- +[discrete] ==== deactivate_watch Deactivates a currently active watch. @@ -3504,6 +3917,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deac client.watcher.deactivateWatch(...) ---- +[discrete] ==== delete_watch Removes a watch from Watcher. @@ -3513,6 +3927,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-dele client.watcher.deleteWatch(...) ---- +[discrete] ==== execute_watch Forces the execution of a stored watch. @@ -3522,6 +3937,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-exec client.watcher.executeWatch(...) ---- +[discrete] ==== get_watch Retrieves a watch by its ID. @@ -3531,6 +3947,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get- client.watcher.getWatch(...) ---- +[discrete] ==== put_watch Creates a new watch, or updates an existing one. @@ -3540,6 +3957,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put- client.watcher.putWatch(...) ---- +[discrete] ==== query_watches Retrieves stored watches. @@ -3549,6 +3967,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-quer client.watcher.queryWatches(...) ---- +[discrete] ==== start Starts Watcher if it is not already running. @@ -3558,6 +3977,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-star client.watcher.start(...) ---- +[discrete] ==== stats Retrieves the current Watcher metrics. @@ -3567,6 +3987,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stat client.watcher.stats(...) ---- +[discrete] ==== stop Stops Watcher if it is running. @@ -3576,7 +3997,9 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop client.watcher.stop(...) ---- +[discrete] === xpack +[discrete] ==== info Retrieves information about the installed X-Pack features. @@ -3586,6 +4009,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html[En client.xpack.info(...) ---- +[discrete] ==== usage Retrieves usage information about the installed X-Pack features. From f6c14b72686f618fa3e03080b18708266ee0e995 Mon Sep 17 00:00:00 2001 From: Ville Lahdenvuo Date: Fri, 4 Mar 2022 08:22:53 +0100 Subject: [PATCH 155/647] README: use correct license reference (#1642) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 66cd87671..2c8b411c5 100644 --- a/README.md +++ b/README.md @@ -173,4 +173,4 @@ npm install esmain@github:elastic/elasticsearch-js ## License -This software is licensed under the [Apache 2 license](./LICENSE). +This software is licensed under the [Apache License 2.0](./LICENSE). From 53ccd17dbf2ccbd254f09e418795f9117f62af81 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 9 Mar 2022 11:43:48 +0100 Subject: [PATCH 156/647] Changelog for 8.1 (#1657) --- docs/changelog.asciidoc | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 1e01389b1..d69e61016 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -2,12 +2,41 @@ == Release notes [discrete] -=== 8.0.0 +=== 8.1.0 [discrete] ==== Features [discrete] +===== Support for Elasticsearch `v8.1` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.1/release-notes-8.1.0.html[here]. + +[discrete] +===== Export SniffingTransport https://github.com/elastic/elasticsearch-js/pull/1653[#1653] + +Now the client exports the SniffingTransport class. + +[discrete] +==== Fixes + +[discrete] +===== Cleanup abort listener https://github.com/elastic/elastic-transport-js/pull/42[transport/#42] + +The legacy http client was not cleaning up the abort listener, which could cause a memory leak. + +[discrete] +===== Improve undici performances https://github.com/elastic/elastic-transport-js/pull/41[transport/#41] + +Improve the stream body collection and keep alive timeout. + +[discrete] +=== 8.0.0 + +[discrete] +==== Features + [discrete] ===== Support for Elasticsearch `v8.0` From a298517692a993d178c61b9e84c36d053c681ac2 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 9 Mar 2022 11:49:26 +0100 Subject: [PATCH 157/647] Updated changelog.asciidoc --- docs/changelog.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index d69e61016..52c2bf673 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -21,6 +21,11 @@ Now the client exports the SniffingTransport class. [discrete] ==== Fixes +[discrete] +===== Fix onFlushTimeout timer not being cleared when upstream errors https://github.com/elastic/elasticsearch-js/pull/1616[#1616] + +Fixes a memory leak caused by an error in the upstream dataset of the bulk helper. + [discrete] ===== Cleanup abort listener https://github.com/elastic/elastic-transport-js/pull/42[transport/#42] From 720b5b449bde49528210d147332d37a96122520e Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 23 Mar 2022 11:31:34 +0100 Subject: [PATCH 158/647] API generation --- docs/reference.asciidoc | 40 +++ src/api/api/cat.ts | 29 ++ src/api/api/field_caps.ts | 9 +- src/api/api/knn_search.ts | 2 +- src/api/api/ml.ts | 11 +- src/api/api/reindex.ts | 9 +- src/api/api/security.ts | 70 +++- src/api/types.ts | 617 +++++++++++++++++++++--------------- src/api/typesWithBodyKey.ts | 617 +++++++++++++++++++++--------------- 9 files changed, 858 insertions(+), 546 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index e24ba7a18..bef00bbe8 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -524,6 +524,14 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.ht client.cat.allocation(...) ---- +[discrete] +==== component_templates +Returns information about existing component_templates templates. +[source,ts] +---- +client.cat.componentTemplates(...) +---- + [discrete] ==== count Provides quick access to the document count of the entire cluster, or individual indices. @@ -3127,6 +3135,16 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-dis client.security.disableUser(...) ---- +[discrete] +==== disable_user_profile +Disables a user profile so it's not visible in user profile searches. + +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.disableUserProfile(...) +---- + [discrete] ==== enable_user Enables users in the native realm. @@ -3137,6 +3155,16 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ena client.security.enableUser(...) ---- +[discrete] +==== enable_user_profile +Enables a user profile so it's visible in user profile searches. + +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.enableUserProfile(...) +---- + [discrete] ==== enroll_kibana Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. @@ -3445,6 +3473,16 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-sam client.security.samlServiceProviderMetadata(...) ---- +[discrete] +==== search_user_profiles +Searches for user profiles that match specified criteria. + +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-search-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.searchUserProfiles(...) +---- + [discrete] ==== update_user_profile_data Update application specific data for the user profile of the given unique ID. @@ -3840,6 +3878,8 @@ client.transform.putTransform(...) [discrete] ==== reset_transform Resets an existing transform. + +https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html[Endpoint documentation] [source,ts] ---- client.transform.resetTransform(...) diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index d052c4528..a4dea6a34 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -103,6 +103,35 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_cat/component_templates/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_cat/component_templates' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index d558bd531..606357720 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -37,10 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { +export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise +export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['index_filter', 'runtime_mappings'] const querystring: Record = {} @@ -53,7 +53,6 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ body = userBody != null ? { ...userBody } : undefined } - params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 6431d505e..dbe6f9c5f 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -42,7 +42,7 @@ export default async function KnnSearchApi (this: That, par export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'knn'] + const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 64f10f486..83e6932c8 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -937,10 +937,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getMemoryStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -950,6 +950,7 @@ export default class Ml { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1680,7 +1681,7 @@ export default class Ml { async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['vocabulary'] + const acceptedBody: string[] = ['vocabulary', 'merges'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 85e20deee..3dadca6b6 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -37,10 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise -export default async function ReindexApi (this: That, params?: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise { +export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] const querystring: Record = {} @@ -53,7 +53,6 @@ export default async function ReindexApi (this: That, params?: T.ReindexRequest body = userBody != null ? { ...userBody } : undefined } - params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 082db4b76..d480295e3 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -436,6 +436,28 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['uid'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_disable` + return await this.transport.request({ path, method, querystring, body }, options) + } + async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise @@ -458,6 +480,28 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['uid'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_enable` + return await this.transport.request({ path, method, querystring, body }, options) + } + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise @@ -1377,6 +1421,28 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/profile/_search' + return await this.transport.request({ path, method, querystring, body }, options) + } + async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -1394,8 +1460,8 @@ export default class Security { } } - const method = 'POST' - const path = `/_security/profile/_data/${encodeURIComponent(params.uid.toString())}` + const method = 'PUT' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_data` return await this.transport.request({ path, method, querystring, body }, options) } } diff --git a/src/api/types.ts b/src/api/types.ts index bb8768138..dcbfe4a37 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -167,8 +167,7 @@ export interface CreateRequest extends RequestBase { document?: TDocument } -export interface CreateResponse extends WriteResponseBase { -} +export type CreateResponse = WriteResponseBase export interface DeleteRequest extends RequestBase { id: Id @@ -183,8 +182,7 @@ export interface DeleteRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards } -export interface DeleteResponse extends WriteResponseBase { -} +export type DeleteResponse = WriteResponseBase export interface DeleteByQueryRequest extends RequestBase { index: Indices @@ -243,8 +241,7 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { requests_per_second?: long } -export interface DeleteByQueryRethrottleResponse extends TasksListResponse { -} +export type DeleteByQueryRethrottleResponse = TasksListResponse export interface DeleteScriptRequest extends RequestBase { id: Id @@ -252,8 +249,7 @@ export interface DeleteScriptRequest extends RequestBase { timeout?: Time } -export interface DeleteScriptResponse extends AcknowledgedResponseBase { -} +export type DeleteScriptResponse = AcknowledgedResponseBase export interface ExistsRequest extends RequestBase { id: Id @@ -345,9 +341,11 @@ export interface FieldCapsRequest extends RequestBase { index?: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards - fields?: Fields + fields: Fields ignore_unavailable?: boolean include_unmapped?: boolean + filters?: string + types?: string[] index_filter?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields } @@ -467,8 +465,7 @@ export interface IndexRequest extends RequestBase { document?: TDocument } -export interface IndexResponse extends WriteResponseBase { -} +export type IndexResponse = WriteResponseBase export interface InfoRequest extends RequestBase { } @@ -488,6 +485,7 @@ export interface KnnSearchRequest extends RequestBase { docvalue_fields?: (QueryDslFieldAndFormat | Field)[] stored_fields?: Fields fields?: Fields + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] knn: KnnSearchQuery } @@ -670,6 +668,7 @@ export interface MtermvectorsTermVectorsResult { export interface OpenPointInTimeRequest extends RequestBase { index: Indices keep_alive: Time + ignore_unavailable?: boolean } export interface OpenPointInTimeResponse { @@ -689,8 +688,7 @@ export interface PutScriptRequest extends RequestBase { script: StoredScript } -export interface PutScriptResponse extends AcknowledgedResponseBase { -} +export type PutScriptResponse = AcknowledgedResponseBase export interface RankEvalDocumentRating { _id: Id @@ -810,11 +808,11 @@ export interface ReindexRequest extends RequestBase { wait_for_completion?: boolean require_alias?: boolean conflicts?: Conflicts - dest?: ReindexDestination + dest: ReindexDestination max_docs?: long script?: Script size?: long - source?: ReindexSource + source: ReindexSource } export interface ReindexResponse { @@ -1668,7 +1666,9 @@ export interface UpdateRequest upsert?: TDocument } -export interface UpdateResponse extends WriteResponseBase { +export type UpdateResponse = UpdateUpdateWriteResponseBase + +export interface UpdateUpdateWriteResponseBase extends WriteResponseBase { get?: InlineGet } @@ -1821,10 +1821,6 @@ export type DateOrEpochMillis = DateString | EpochMillis export type DateString = string -export interface DictionaryResponseBase { - [key: string]: TValue -} - export type Distance = string export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' @@ -4428,7 +4424,7 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase { type: 'constant_keyword' } -export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty +export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty | MappingMatchOnlyTextProperty export interface MappingCorePropertyBase extends MappingPropertyBase { copy_to?: Fields @@ -4522,7 +4518,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'match_only_text' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -4645,6 +4641,13 @@ export interface MappingLongRangeProperty extends MappingRangePropertyBase { type: 'long_range' } +export interface MappingMatchOnlyTextProperty { + type: 'match_only_text' + fields?: Record + meta?: Record + copy_to?: Fields +} + export type MappingMatchType = 'simple' | 'regex' export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { @@ -5643,8 +5646,7 @@ export interface AsyncSearchDeleteRequest extends RequestBase { id: Id } -export interface AsyncSearchDeleteResponse extends AcknowledgedResponseBase { -} +export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { id: Id @@ -5660,7 +5662,9 @@ export interface AsyncSearchStatusRequest extends RequestBase { id: Id } -export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { +export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase + +export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { _shards: ShardStatistics completion_status?: integer } @@ -5744,8 +5748,7 @@ export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { name: Name } -export interface AutoscalingDeleteAutoscalingPolicyResponse extends AcknowledgedResponseBase { -} +export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { node: AutoscalingGetAutoscalingCapacityAutoscalingResources @@ -5792,8 +5795,7 @@ export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { policy?: AutoscalingAutoscalingPolicy } -export interface AutoscalingPutAutoscalingPolicyResponse extends AcknowledgedResponseBase { -} +export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' @@ -7612,8 +7614,7 @@ export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { name: Name } -export interface CcrDeleteAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { index: IndexName @@ -7715,15 +7716,13 @@ export interface CcrPauseAutoFollowPatternRequest extends RequestBase { name: Name } -export interface CcrPauseAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { index: IndexName } -export interface CcrPauseFollowResponse extends AcknowledgedResponseBase { -} +export type CcrPauseFollowResponse = AcknowledgedResponseBase export interface CcrPutAutoFollowPatternRequest extends RequestBase { name: Name @@ -7744,15 +7743,13 @@ export interface CcrPutAutoFollowPatternRequest extends RequestBase { max_write_request_size?: ByteSize } -export interface CcrPutAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { name: Name } -export interface CcrResumeAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { index: IndexName @@ -7768,8 +7765,7 @@ export interface CcrResumeFollowRequest extends RequestBase { read_poll_timeout?: Time } -export interface CcrResumeFollowResponse extends AcknowledgedResponseBase { -} +export type CcrResumeFollowResponse = AcknowledgedResponseBase export interface CcrStatsAutoFollowStats { auto_followed_clusters: CcrStatsAutoFollowedCluster[] @@ -7801,8 +7797,7 @@ export interface CcrUnfollowRequest extends RequestBase { index: IndexName } -export interface CcrUnfollowResponse extends AcknowledgedResponseBase { -} +export type CcrUnfollowResponse = AcknowledgedResponseBase export interface ClusterComponentTemplate { name: Name @@ -7941,13 +7936,12 @@ export interface ClusterAllocationExplainUnassignedInformation { export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' export interface ClusterDeleteComponentTemplateRequest extends RequestBase { - name: Name + name: Names master_timeout?: Time timeout?: Time } -export interface ClusterDeleteComponentTemplateResponse extends AcknowledgedResponseBase { -} +export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { wait_for_removal?: boolean @@ -8080,8 +8074,7 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { _meta?: Metadata } -export interface ClusterPutComponentTemplateResponse extends AcknowledgedResponseBase { -} +export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { flat_settings?: boolean @@ -8123,8 +8116,7 @@ export interface ClusterRemoteInfoClusterRemoteSniffInfo { export interface ClusterRemoteInfoRequest extends RequestBase { } -export interface ClusterRemoteInfoResponse extends DictionaryResponseBase { -} +export type ClusterRemoteInfoResponse = Record export interface ClusterRerouteCommand { cancel?: ClusterRerouteCommandCancelAction @@ -8434,14 +8426,7 @@ export interface ClusterStatsRequest extends RequestBase { timeout?: Time } -export interface ClusterStatsResponse extends NodesNodesResponseBase { - cluster_name: Name - cluster_uuid: Uuid - indices: ClusterStatsClusterIndices - nodes: ClusterStatsClusterNodes - status: HealthStatus - timestamp: long -} +export type ClusterStatsResponse = ClusterStatsStatsResponseBase export interface ClusterStatsRuntimeFieldTypes { name: Name @@ -8460,6 +8445,15 @@ export interface ClusterStatsRuntimeFieldTypes { doc_total: integer } +export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { + cluster_name: Name + cluster_uuid: Uuid + indices: ClusterStatsClusterIndices + nodes: ClusterStatsClusterNodes + status: HealthStatus + timestamp: long +} + export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean @@ -8467,8 +8461,7 @@ export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { timeout?: Time } -export interface DanglingIndicesDeleteDanglingIndexResponse extends AcknowledgedResponseBase { -} +export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { index_uuid: Uuid @@ -8477,8 +8470,7 @@ export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { timeout?: Time } -export interface DanglingIndicesImportDanglingIndexResponse extends AcknowledgedResponseBase { -} +export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesListDanglingIndicesDanglingIndex { index_name: string @@ -8515,8 +8507,7 @@ export interface EnrichDeletePolicyRequest extends RequestBase { name: Name } -export interface EnrichDeletePolicyResponse extends AcknowledgedResponseBase { -} +export type EnrichDeletePolicyResponse = AcknowledgedResponseBase export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' @@ -8548,8 +8539,7 @@ export interface EnrichPutPolicyRequest extends RequestBase { match?: EnrichPolicy } -export interface EnrichPutPolicyResponse extends AcknowledgedResponseBase { -} +export type EnrichPutPolicyResponse = AcknowledgedResponseBase export interface EnrichStatsCacheStats { node_id: Id @@ -8612,8 +8602,7 @@ export interface EqlDeleteRequest extends RequestBase { id: Id } -export interface EqlDeleteResponse extends AcknowledgedResponseBase { -} +export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { id: Id @@ -8905,8 +8894,7 @@ export interface IlmDeleteLifecycleRequest extends RequestBase { timeout?: Time } -export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { -} +export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged @@ -8966,8 +8954,7 @@ export interface IlmGetLifecycleRequest extends RequestBase { timeout?: Time } -export interface IlmGetLifecycleResponse extends DictionaryResponseBase { -} +export type IlmGetLifecycleResponse = Record export interface IlmGetStatusRequest extends RequestBase { } @@ -8998,8 +8985,7 @@ export interface IlmMoveToStepRequest extends RequestBase { next_step?: IlmMoveToStepStepKey } -export interface IlmMoveToStepResponse extends AcknowledgedResponseBase { -} +export type IlmMoveToStepResponse = AcknowledgedResponseBase export interface IlmMoveToStepStepKey { action: string @@ -9014,8 +9000,7 @@ export interface IlmPutLifecycleRequest extends RequestBase { policy?: IlmPolicy } -export interface IlmPutLifecycleResponse extends AcknowledgedResponseBase { -} +export type IlmPutLifecycleResponse = AcknowledgedResponseBase export interface IlmRemovePolicyRequest extends RequestBase { index: IndexName @@ -9030,24 +9015,21 @@ export interface IlmRetryRequest extends RequestBase { index: IndexName } -export interface IlmRetryResponse extends AcknowledgedResponseBase { -} +export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { master_timeout?: Time timeout?: Time } -export interface IlmStartResponse extends AcknowledgedResponseBase { -} +export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { master_timeout?: Time timeout?: Time } -export interface IlmStopResponse extends AcknowledgedResponseBase { -} +export type IlmStopResponse = AcknowledgedResponseBase export interface IndicesAlias { filter?: QueryDslQueryContainer @@ -9209,6 +9191,10 @@ export interface IndicesIndexSettings { shards?: integer queries?: IndicesQueries similarity?: IndicesSettingsSimilarity + mappings?: IndicesMappingLimitSettings + 'indexing.slowlog'?: IndicesSlowlogSettings + indexing_pressure?: IndicesIndexingPressure + store?: IndicesStorage } export interface IndicesIndexSettingsAnalysis { @@ -9221,6 +9207,15 @@ export interface IndicesIndexSettingsAnalysis { export interface IndicesIndexSettingsLifecycle { name: Name + indexing_complete?: boolean + origination_date?: long + parse_origination_date?: boolean + step?: IndicesIndexSettingsLifecycleStep + rollover_alias?: string +} + +export interface IndicesIndexSettingsLifecycleStep { + wait_time_threshold?: Time } export interface IndicesIndexSettingsTimeSeries { @@ -9263,6 +9258,47 @@ export interface IndicesIndexVersioning { created_string?: VersionString } +export interface IndicesIndexingPressure { + memory: IndicesIndexingPressureMemory +} + +export interface IndicesIndexingPressureMemory { + limit?: integer +} + +export interface IndicesMappingLimitSettings { + total_fields?: IndicesMappingLimitSettingsTotalFields + depth?: IndicesMappingLimitSettingsDepth + nested_fields?: IndicesMappingLimitSettingsNestedFields + nested_objects?: IndicesMappingLimitSettingsNestedObjects + field_name_length?: IndicesMappingLimitSettingsFieldNameLength + dimension_fields?: IndicesMappingLimitSettingsDimensionFields +} + +export interface IndicesMappingLimitSettingsDepth { + limit?: integer +} + +export interface IndicesMappingLimitSettingsDimensionFields { + limit?: integer +} + +export interface IndicesMappingLimitSettingsFieldNameLength { + limit?: long +} + +export interface IndicesMappingLimitSettingsNestedFields { + limit?: integer +} + +export interface IndicesMappingLimitSettingsNestedObjects { + limit?: integer +} + +export interface IndicesMappingLimitSettingsTotalFields { + limit?: integer +} + export interface IndicesMerge { scheduler?: IndicesMergeScheduler } @@ -9309,7 +9345,8 @@ export interface IndicesSettingsQueryString { } export interface IndicesSettingsSearch { - idle: IndicesSearchIdle + idle?: IndicesSearchIdle + slowlog?: IndicesSlowlogSettings } export interface IndicesSettingsSimilarity { @@ -9363,11 +9400,38 @@ export interface IndicesSettingsSimilarityScriptedTfidf { type: 'scripted' } +export interface IndicesSlowlogSettings { + level?: string + source?: integer + reformat?: boolean + threshold?: IndicesSlowlogTresholds +} + +export interface IndicesSlowlogTresholdLevels { + warn?: Time + info?: Time + debug?: Time + trace?: Time +} + +export interface IndicesSlowlogTresholds { + query?: IndicesSlowlogTresholdLevels + fetch?: IndicesSlowlogTresholdLevels + index?: IndicesSlowlogTresholdLevels +} + export interface IndicesSoftDeletes { - enabled: boolean + enabled?: boolean retention_lease?: IndicesRetentionLease } +export interface IndicesStorage { + type: IndicesStorageType + allow_mmap?: boolean +} + +export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' + export interface IndicesStringFielddata { format: IndicesStringFielddataFormat } @@ -9384,13 +9448,17 @@ export interface IndicesTemplateMapping { } export interface IndicesTranslog { - durability?: string - flush_threshold_size?: string + sync_interval?: Time + durability?: IndicesTranslogDurability + flush_threshold_size?: ByteSize retention?: IndicesTranslogRetention } +export type IndicesTranslogDurability = 'request' | 'async' + export interface IndicesTranslogRetention { - size: ByteSize + size?: ByteSize + age?: Time } export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' @@ -9410,7 +9478,8 @@ export interface IndicesAddBlockRequest extends RequestBase { timeout?: Time } -export interface IndicesAddBlockResponse extends AcknowledgedResponseBase { +export interface IndicesAddBlockResponse { + acknowledged: boolean shards_acknowledged: boolean indices: IndicesAddBlockIndicesBlockStatus[] } @@ -9490,8 +9559,7 @@ export interface IndicesClearCacheRequest extends RequestBase { request?: boolean } -export interface IndicesClearCacheResponse extends ShardsOperationResponseBase { -} +export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { index: IndexName @@ -9503,7 +9571,8 @@ export interface IndicesCloneRequest extends RequestBase { settings?: Record } -export interface IndicesCloneResponse extends AcknowledgedResponseBase { +export interface IndicesCloneResponse { + acknowledged: boolean index: IndexName shards_acknowledged: boolean } @@ -9527,7 +9596,8 @@ export interface IndicesCloseRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards } -export interface IndicesCloseResponse extends AcknowledgedResponseBase { +export interface IndicesCloseResponse { + acknowledged: boolean indices: Record shards_acknowledged: boolean } @@ -9552,8 +9622,7 @@ export interface IndicesCreateDataStreamRequest extends RequestBase { name: DataStreamName } -export interface IndicesCreateDataStreamResponse extends AcknowledgedResponseBase { -} +export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase export interface IndicesDataStreamsStatsDataStreamsStatsItem { backing_indices: integer @@ -9586,8 +9655,7 @@ export interface IndicesDeleteRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteResponse extends IndicesResponseBase { -} +export type IndicesDeleteResponse = IndicesResponseBase export interface IndicesDeleteAliasRequest extends RequestBase { index: Indices @@ -9596,16 +9664,14 @@ export interface IndicesDeleteAliasRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteAliasResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteAliasResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards } -export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { name: Names @@ -9613,8 +9679,7 @@ export interface IndicesDeleteIndexTemplateRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteIndexTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { name: Name @@ -9622,8 +9687,7 @@ export interface IndicesDeleteTemplateRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { index: IndexName @@ -9743,8 +9807,7 @@ export interface IndicesFlushRequest extends RequestBase { wait_if_ongoing?: boolean } -export interface IndicesFlushResponse extends ShardsOperationResponseBase { -} +export type IndicesFlushResponse = ShardsOperationResponseBase export interface IndicesForcemergeRequest extends RequestBase { index?: Indices @@ -9757,8 +9820,11 @@ export interface IndicesForcemergeRequest extends RequestBase { wait_for_completion?: boolean } -export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { -} +export type IndicesForcemergeResponse = ShardsOperationResponseBase + +export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' + +export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] export interface IndicesGetRequest extends RequestBase { index: Indices @@ -9769,10 +9835,10 @@ export interface IndicesGetRequest extends RequestBase { include_defaults?: boolean local?: boolean master_timeout?: Time + features?: IndicesGetFeatures } -export interface IndicesGetResponse extends DictionaryResponseBase { -} +export type IndicesGetResponse = Record export interface IndicesGetAliasIndexAliases { aliases: Record @@ -9787,8 +9853,7 @@ export interface IndicesGetAliasRequest extends RequestBase { local?: boolean } -export interface IndicesGetAliasResponse extends DictionaryResponseBase { -} +export type IndicesGetAliasResponse = Record export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames @@ -9809,8 +9874,7 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { local?: boolean } -export interface IndicesGetFieldMappingResponse extends DictionaryResponseBase { -} +export type IndicesGetFieldMappingResponse = Record export interface IndicesGetFieldMappingTypeFieldMappings { mappings: Partial> @@ -9846,8 +9910,7 @@ export interface IndicesGetMappingRequest extends RequestBase { master_timeout?: Time } -export interface IndicesGetMappingResponse extends DictionaryResponseBase { -} +export type IndicesGetMappingResponse = Record export interface IndicesGetSettingsRequest extends RequestBase { index?: Indices @@ -9861,8 +9924,7 @@ export interface IndicesGetSettingsRequest extends RequestBase { master_timeout?: Time } -export interface IndicesGetSettingsResponse extends DictionaryResponseBase { -} +export type IndicesGetSettingsResponse = Record export interface IndicesGetTemplateRequest extends RequestBase { name?: Names @@ -9871,15 +9933,13 @@ export interface IndicesGetTemplateRequest extends RequestBase { master_timeout?: Time } -export interface IndicesGetTemplateResponse extends DictionaryResponseBase { -} +export type IndicesGetTemplateResponse = Record export interface IndicesMigrateToDataStreamRequest extends RequestBase { name: IndexName } -export interface IndicesMigrateToDataStreamResponse extends AcknowledgedResponseBase { -} +export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase export interface IndicesOpenRequest extends RequestBase { index: Indices @@ -9891,7 +9951,8 @@ export interface IndicesOpenRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards } -export interface IndicesOpenResponse extends AcknowledgedResponseBase { +export interface IndicesOpenResponse { + acknowledged: boolean shards_acknowledged: boolean } @@ -9913,8 +9974,7 @@ export interface IndicesPutAliasRequest extends RequestBase { search_routing?: Routing } -export interface IndicesPutAliasResponse extends AcknowledgedResponseBase { -} +export type IndicesPutAliasResponse = AcknowledgedResponseBase export interface IndicesPutIndexTemplateIndexTemplateMapping { aliases?: Record @@ -9934,8 +9994,7 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { _meta?: Metadata } -export interface IndicesPutIndexTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesPutMappingRequest extends RequestBase { index: Indices @@ -9958,8 +10017,7 @@ export interface IndicesPutMappingRequest extends RequestBase { runtime?: MappingRuntimeFields } -export interface IndicesPutMappingResponse extends IndicesResponseBase { -} +export type IndicesPutMappingResponse = IndicesResponseBase export interface IndicesPutSettingsRequest extends RequestBase { index?: Indices @@ -9973,8 +10031,7 @@ export interface IndicesPutSettingsRequest extends RequestBase { settings?: IndicesIndexSettings } -export interface IndicesPutSettingsResponse extends AcknowledgedResponseBase { -} +export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { name: Name @@ -9990,8 +10047,7 @@ export interface IndicesPutTemplateRequest extends RequestBase { version?: VersionNumber } -export interface IndicesPutTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesPutTemplateResponse = AcknowledgedResponseBase export interface IndicesRecoveryFileDetails { length: long @@ -10061,8 +10117,7 @@ export interface IndicesRecoveryRequest extends RequestBase { detailed?: boolean } -export interface IndicesRecoveryResponse extends DictionaryResponseBase { -} +export type IndicesRecoveryResponse = Record export interface IndicesRecoveryShardRecovery { id: long @@ -10106,8 +10161,7 @@ export interface IndicesRefreshRequest extends RequestBase { ignore_unavailable?: boolean } -export interface IndicesRefreshResponse extends ShardsOperationResponseBase { -} +export type IndicesRefreshResponse = ShardsOperationResponseBase export interface IndicesReloadSearchAnalyzersReloadDetails { index: string @@ -10169,7 +10223,8 @@ export interface IndicesRolloverRequest extends RequestBase { settings?: Record } -export interface IndicesRolloverResponse extends AcknowledgedResponseBase { +export interface IndicesRolloverResponse { + acknowledged: boolean conditions: Record dry_run: boolean new_index: string @@ -10281,7 +10336,8 @@ export interface IndicesShrinkRequest extends RequestBase { settings?: Record } -export interface IndicesShrinkResponse extends AcknowledgedResponseBase { +export interface IndicesShrinkResponse { + acknowledged: boolean shards_acknowledged: boolean index: IndexName } @@ -10336,7 +10392,8 @@ export interface IndicesSplitRequest extends RequestBase { settings?: Record } -export interface IndicesSplitResponse extends AcknowledgedResponseBase { +export interface IndicesSplitResponse { + acknowledged: boolean shards_acknowledged: boolean index: IndexName } @@ -10496,7 +10553,8 @@ export interface IndicesUnfreezeRequest extends RequestBase { wait_for_active_shards?: string } -export interface IndicesUnfreezeResponse extends AcknowledgedResponseBase { +export interface IndicesUnfreezeResponse { + acknowledged: boolean shards_acknowledged: boolean } @@ -10540,8 +10598,7 @@ export interface IndicesUpdateAliasesRequest extends RequestBase { actions?: IndicesUpdateAliasesAction[] } -export interface IndicesUpdateAliasesResponse extends AcknowledgedResponseBase { -} +export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase export interface IndicesValidateQueryIndicesValidationExplanation { error?: string @@ -10882,8 +10939,7 @@ export interface IngestDeletePipelineRequest extends RequestBase { timeout?: Time } -export interface IngestDeletePipelineResponse extends AcknowledgedResponseBase { -} +export type IngestDeletePipelineResponse = AcknowledgedResponseBase export interface IngestGeoIpStatsGeoIpDownloadStatistics { successful_downloads: integer @@ -10916,8 +10972,7 @@ export interface IngestGetPipelineRequest extends RequestBase { summary?: boolean } -export interface IngestGetPipelineResponse extends DictionaryResponseBase { -} +export type IngestGetPipelineResponse = Record export interface IngestProcessorGrokRequest extends RequestBase { } @@ -10937,8 +10992,7 @@ export interface IngestPutPipelineRequest extends RequestBase { version?: VersionNumber } -export interface IngestPutPipelineResponse extends AcknowledgedResponseBase { -} +export type IngestPutPipelineResponse = AcknowledgedResponseBase export interface IngestSimulateDocument { _id?: Id @@ -10999,8 +11053,7 @@ export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'd export interface LicenseDeleteRequest extends RequestBase { } -export interface LicenseDeleteResponse extends AcknowledgedResponseBase { -} +export type LicenseDeleteResponse = AcknowledgedResponseBase export interface LicenseGetLicenseInformation { expiry_date?: DateString @@ -11061,7 +11114,8 @@ export interface LicensePostStartBasicRequest extends RequestBase { acknowledge?: boolean } -export interface LicensePostStartBasicResponse extends AcknowledgedResponseBase { +export interface LicensePostStartBasicResponse { + acknowledged: boolean basic_was_started: boolean error_message?: string type?: LicenseLicenseType @@ -11073,7 +11127,8 @@ export interface LicensePostStartTrialRequest extends RequestBase { type_query_string?: string } -export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { +export interface LicensePostStartTrialResponse { + acknowledged: boolean error_message?: string trial_was_started: boolean type?: LicenseLicenseType @@ -12138,16 +12193,14 @@ export interface MlDeleteCalendarRequest extends RequestBase { calendar_id: Id } -export interface MlDeleteCalendarResponse extends AcknowledgedResponseBase { -} +export type MlDeleteCalendarResponse = AcknowledgedResponseBase export interface MlDeleteCalendarEventRequest extends RequestBase { calendar_id: Id event_id: Id } -export interface MlDeleteCalendarEventResponse extends AcknowledgedResponseBase { -} +export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { calendar_id: Id @@ -12166,16 +12219,14 @@ export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { timeout?: Time } -export interface MlDeleteDataFrameAnalyticsResponse extends AcknowledgedResponseBase { -} +export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase export interface MlDeleteDatafeedRequest extends RequestBase { datafeed_id: Id force?: boolean } -export interface MlDeleteDatafeedResponse extends AcknowledgedResponseBase { -} +export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { job_id?: Id @@ -12191,8 +12242,7 @@ export interface MlDeleteFilterRequest extends RequestBase { filter_id: Id } -export interface MlDeleteFilterResponse extends AcknowledgedResponseBase { -} +export type MlDeleteFilterResponse = AcknowledgedResponseBase export interface MlDeleteForecastRequest extends RequestBase { job_id: Id @@ -12201,8 +12251,7 @@ export interface MlDeleteForecastRequest extends RequestBase { timeout?: Time } -export interface MlDeleteForecastResponse extends AcknowledgedResponseBase { -} +export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { job_id: Id @@ -12210,32 +12259,28 @@ export interface MlDeleteJobRequest extends RequestBase { wait_for_completion?: boolean } -export interface MlDeleteJobResponse extends AcknowledgedResponseBase { -} +export type MlDeleteJobResponse = AcknowledgedResponseBase export interface MlDeleteModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id } -export interface MlDeleteModelSnapshotResponse extends AcknowledgedResponseBase { -} +export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { model_id: Id force?: boolean } -export interface MlDeleteTrainedModelResponse extends AcknowledgedResponseBase { -} +export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelAliasRequest extends RequestBase { model_alias: Name model_id: Id } -export interface MlDeleteTrainedModelAliasResponse extends AcknowledgedResponseBase { -} +export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlEstimateModelMemoryRequest extends RequestBase { analysis_config?: MlAnalysisConfig @@ -12376,7 +12421,8 @@ export interface MlForecastRequest extends RequestBase { max_model_memory?: string } -export interface MlForecastResponse extends AcknowledgedResponseBase { +export interface MlForecastResponse { + acknowledged: boolean forecast_id: Id } @@ -12543,6 +12589,58 @@ export interface MlGetJobsResponse { jobs: MlJob[] } +export interface MlGetMemoryStatsJvmStats { + heap_max: ByteSize + heap_max_in_bytes: integer + java_inference: ByteSize + java_inference_in_bytes: integer + java_inference_max: ByteSize + java_inference_max_in_bytes: integer +} + +export interface MlGetMemoryStatsMemMlStats { + anomaly_detectors: ByteSize + anomaly_detectors_in_bytes: integer + data_frame_analytics: ByteSize + data_frame_analytics_in_bytes: integer + max: ByteSize + max_in_bytes: integer + native_code_overhead: ByteSize + native_code_overhead_in_bytes: integer + native_inference: ByteSize + native_inference_in_bytes: integer +} + +export interface MlGetMemoryStatsMemStats { + adjusted_total: ByteSize + adjusted_total_in_bytes: integer + total: ByteSize + total_in_bytes: integer + ml: MlGetMemoryStatsMemMlStats +} + +export interface MlGetMemoryStatsMemory { + attributes: string[] + jvm: MlGetMemoryStatsJvmStats + mem: MlGetMemoryStatsMemStats + name: Name + roles: string[] + transport_address: TransportAddress +} + +export interface MlGetMemoryStatsRequest extends RequestBase { + node_id?: Id + human?: boolean + master_timeout?: Time + timeout?: Time +} + +export interface MlGetMemoryStatsResponse { + _nodes: NodeStatistics + cluser_name: Name + nodes: Record +} + export interface MlGetModelSnapshotsRequest extends RequestBase { job_id: Id snapshot_id?: Id @@ -12990,8 +13088,7 @@ export interface MlPutTrainedModelAliasRequest extends RequestBase { reassign?: boolean } -export interface MlPutTrainedModelAliasResponse extends AcknowledgedResponseBase { -} +export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { model_id: Id @@ -13001,24 +13098,22 @@ export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { total_parts: integer } -export interface MlPutTrainedModelDefinitionPartResponse extends AcknowledgedResponseBase { -} +export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase export interface MlPutTrainedModelVocabularyRequest extends RequestBase { model_id: Id vocabulary: string[] + merges?: string[] } -export interface MlPutTrainedModelVocabularyResponse extends AcknowledgedResponseBase { -} +export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { job_id: Id wait_for_completion?: boolean } -export interface MlResetJobResponse extends AcknowledgedResponseBase { -} +export type MlResetJobResponse = AcknowledgedResponseBase export interface MlRevertModelSnapshotRequest extends RequestBase { job_id: Id @@ -13035,15 +13130,15 @@ export interface MlSetUpgradeModeRequest extends RequestBase { timeout?: Time } -export interface MlSetUpgradeModeResponse extends AcknowledgedResponseBase { -} +export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { id: Id timeout?: Time } -export interface MlStartDataFrameAnalyticsResponse extends AcknowledgedResponseBase { +export interface MlStartDataFrameAnalyticsResponse { + acknowledged: boolean node: NodeId } @@ -13226,7 +13321,8 @@ export interface MlUpdateModelSnapshotRequest extends RequestBase { retain?: boolean } -export interface MlUpdateModelSnapshotResponse extends AcknowledgedResponseBase { +export interface MlUpdateModelSnapshotResponse { + acknowledged: boolean model: MlModelSnapshot } @@ -13254,15 +13350,13 @@ export interface MlValidateRequest extends RequestBase { results_index_name?: IndexName } -export interface MlValidateResponse extends AcknowledgedResponseBase { -} +export type MlValidateResponse = AcknowledgedResponseBase export interface MlValidateDetectorRequest extends RequestBase { detector?: MlDetector } -export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { -} +export type MlValidateDetectorResponse = AcknowledgedResponseBase export interface MonitoringBulkRequest extends RequestBase { type?: string @@ -13729,7 +13823,9 @@ export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBas max_archive_version: long } -export interface NodesClearRepositoriesMeteringArchiveResponse extends NodesNodesResponseBase { +export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase + +export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -13738,7 +13834,9 @@ export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { node_id: NodeIds } -export interface NodesGetRepositoriesMeteringInfoResponse extends NodesNodesResponseBase { +export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase + +export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14113,7 +14211,9 @@ export interface NodesInfoRequest extends RequestBase { timeout?: Time } -export interface NodesInfoResponse extends NodesNodesResponseBase { +export type NodesInfoResponse = NodesInfoResponseBase + +export interface NodesInfoResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14124,7 +14224,9 @@ export interface NodesReloadSecureSettingsRequest extends RequestBase { secure_settings_password?: Password } -export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { +export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase + +export interface NodesReloadSecureSettingsResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14145,7 +14247,9 @@ export interface NodesStatsRequest extends RequestBase { include_unloaded_segments?: boolean } -export interface NodesStatsResponse extends NodesNodesResponseBase { +export type NodesStatsResponse = NodesStatsResponseBase + +export interface NodesStatsResponseBase extends NodesNodesResponseBase { cluster_name?: Name nodes: Record } @@ -14163,7 +14267,9 @@ export interface NodesUsageRequest extends RequestBase { timeout?: Time } -export interface NodesUsageResponse extends NodesNodesResponseBase { +export type NodesUsageResponse = NodesUsageResponseBase + +export interface NodesUsageResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14204,7 +14310,8 @@ export interface RollupDeleteJobRequest extends RequestBase { id: Id } -export interface RollupDeleteJobResponse extends AcknowledgedResponseBase { +export interface RollupDeleteJobResponse { + acknowledged: boolean task_failures?: TaskFailure[] } @@ -14260,8 +14367,7 @@ export interface RollupGetRollupCapsRequest extends RequestBase { id?: Id } -export interface RollupGetRollupCapsResponse extends DictionaryResponseBase { -} +export type RollupGetRollupCapsResponse = Record export interface RollupGetRollupCapsRollupCapabilities { rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] @@ -14282,8 +14388,7 @@ export interface RollupGetRollupIndexCapsRequest extends RequestBase { index: Ids } -export interface RollupGetRollupIndexCapsResponse extends DictionaryResponseBase { -} +export type RollupGetRollupIndexCapsResponse = Record export interface RollupGetRollupIndexCapsRollupJobSummary { fields: Record @@ -14310,8 +14415,7 @@ export interface RollupPutJobRequest extends RequestBase { headers?: HttpHeaders } -export interface RollupPutJobResponse extends AcknowledgedResponseBase { -} +export type RollupPutJobResponse = AcknowledgedResponseBase export interface RollupRollupRequest extends RequestBase { index: IndexName @@ -14676,8 +14780,7 @@ export interface SecurityDeletePrivilegesRequest extends RequestBase { refresh?: Refresh } -export interface SecurityDeletePrivilegesResponse extends DictionaryResponseBase> { -} +export type SecurityDeletePrivilegesResponse = Record> export interface SecurityDeleteRoleRequest extends RequestBase { name: Name @@ -14783,15 +14886,13 @@ export interface SecurityGetPrivilegesRequest extends RequestBase { name?: Names } -export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { -} +export type SecurityGetPrivilegesResponse = Record> export interface SecurityGetRoleRequest extends RequestBase { name?: Names } -export interface SecurityGetRoleResponse extends DictionaryResponseBase { -} +export type SecurityGetRoleResponse = Record export interface SecurityGetRoleRole { cluster: string[] @@ -14815,16 +14916,14 @@ export interface SecurityGetRoleMappingRequest extends RequestBase { name?: Names } -export interface SecurityGetRoleMappingResponse extends DictionaryResponseBase { -} +export type SecurityGetRoleMappingResponse = Record export interface SecurityGetServiceAccountsRequest extends RequestBase { namespace?: Namespace service?: Service } -export interface SecurityGetServiceAccountsResponse extends DictionaryResponseBase { -} +export type SecurityGetServiceAccountsResponse = Record export interface SecurityGetServiceAccountsRoleDescriptor { cluster: string[] @@ -14903,8 +15002,7 @@ export interface SecurityGetUserRequest extends RequestBase { username?: Username | Username[] } -export interface SecurityGetUserResponse extends DictionaryResponseBase { -} +export type SecurityGetUserResponse = Record export interface SecurityGetUserPrivilegesRequest extends RequestBase { application?: Name @@ -15018,8 +15116,7 @@ export interface SecurityPutPrivilegesRequest extends RequestBase { privileges?: Record> } -export interface SecurityPutPrivilegesResponse extends DictionaryResponseBase> { -} +export type SecurityPutPrivilegesResponse = Record> export interface SecurityPutRoleRequest extends RequestBase { name: Name @@ -15150,8 +15247,7 @@ export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId } -export interface ShutdownDeleteNodeResponse extends AcknowledgedResponseBase { -} +export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase export interface ShutdownGetNodeNodeShutdownStatus { node_id: NodeId @@ -15192,8 +15288,7 @@ export interface ShutdownPutNodeRequest extends RequestBase { node_id: NodeId } -export interface ShutdownPutNodeResponse extends AcknowledgedResponseBase { -} +export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SlmConfiguration { ignore_unavailable?: boolean @@ -15264,8 +15359,7 @@ export interface SlmDeleteLifecycleRequest extends RequestBase { policy_id: Name } -export interface SlmDeleteLifecycleResponse extends AcknowledgedResponseBase { -} +export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { policy_id: Name @@ -15278,15 +15372,13 @@ export interface SlmExecuteLifecycleResponse { export interface SlmExecuteRetentionRequest extends RequestBase { } -export interface SlmExecuteRetentionResponse extends AcknowledgedResponseBase { -} +export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { policy_id?: Names } -export interface SlmGetLifecycleResponse extends DictionaryResponseBase { -} +export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { } @@ -15322,20 +15414,17 @@ export interface SlmPutLifecycleRequest extends RequestBase { schedule?: WatcherCronExpression } -export interface SlmPutLifecycleResponse extends AcknowledgedResponseBase { -} +export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { } -export interface SlmStartResponse extends AcknowledgedResponseBase { -} +export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { } -export interface SlmStopResponse extends AcknowledgedResponseBase { -} +export type SlmStopResponse = AcknowledgedResponseBase export interface SnapshotFileCountSnapshotStats { file_count: integer @@ -15479,8 +15568,7 @@ export interface SnapshotCloneRequest extends RequestBase { indices: string } -export interface SnapshotCloneResponse extends AcknowledgedResponseBase { -} +export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { repository: Name @@ -15510,8 +15598,7 @@ export interface SnapshotCreateRepositoryRequest extends RequestBase { settings: SnapshotRepositorySettings } -export interface SnapshotCreateRepositoryResponse extends AcknowledgedResponseBase { -} +export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { repository: Name @@ -15519,8 +15606,7 @@ export interface SnapshotDeleteRequest extends RequestBase { master_timeout?: Time } -export interface SnapshotDeleteResponse extends AcknowledgedResponseBase { -} +export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { name: Names @@ -15528,8 +15614,7 @@ export interface SnapshotDeleteRepositoryRequest extends RequestBase { timeout?: Time } -export interface SnapshotDeleteRepositoryResponse extends AcknowledgedResponseBase { -} +export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase export interface SnapshotGetRequest extends RequestBase { repository: Name @@ -15568,8 +15653,7 @@ export interface SnapshotGetRepositoryRequest extends RequestBase { master_timeout?: Time } -export interface SnapshotGetRepositoryResponse extends DictionaryResponseBase { -} +export type SnapshotGetRepositoryResponse = Record export interface SnapshotRestoreRequest extends RequestBase { repository: Name @@ -15641,8 +15725,7 @@ export interface SqlDeleteAsyncRequest extends RequestBase { id: Id } -export interface SqlDeleteAsyncResponse extends AcknowledgedResponseBase { -} +export type SqlDeleteAsyncResponse = AcknowledgedResponseBase export interface SqlGetAsyncRequest extends RequestBase { id: Id @@ -15800,8 +15883,7 @@ export interface TasksCancelRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksCancelResponse extends TasksTaskListResponseBase { -} +export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { task_id: Id @@ -15827,8 +15909,7 @@ export interface TasksListRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksListResponse extends TasksTaskListResponseBase { -} +export type TasksListResponse = TasksTaskListResponseBase export interface TextStructureFindStructureFieldStat { count: integer @@ -15951,8 +16032,7 @@ export interface TransformDeleteTransformRequest extends RequestBase { timeout?: Time } -export interface TransformDeleteTransformResponse extends AcknowledgedResponseBase { -} +export type TransformDeleteTransformResponse = AcknowledgedResponseBase export interface TransformGetTransformRequest extends RequestBase { transform_id?: Names @@ -16084,24 +16164,21 @@ export interface TransformPutTransformRequest extends RequestBase { sync?: TransformSyncContainer } -export interface TransformPutTransformResponse extends AcknowledgedResponseBase { -} +export type TransformPutTransformResponse = AcknowledgedResponseBase export interface TransformResetTransformRequest extends RequestBase { transform_id: Id force?: boolean } -export interface TransformResetTransformResponse extends AcknowledgedResponseBase { -} +export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformStartTransformRequest extends RequestBase { transform_id: Id timeout?: Time } -export interface TransformStartTransformResponse extends AcknowledgedResponseBase { -} +export type TransformStartTransformResponse = AcknowledgedResponseBase export interface TransformStopTransformRequest extends RequestBase { transform_id: Name @@ -16112,8 +16189,7 @@ export interface TransformStopTransformRequest extends RequestBase { wait_for_completion?: boolean } -export interface TransformStopTransformResponse extends AcknowledgedResponseBase { -} +export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { transform_id: Id @@ -16551,10 +16627,18 @@ export interface WatcherSearchInputRequestDefinition { indices?: IndexName[] indices_options?: IndicesOptions search_type?: SearchType - template?: SearchTemplateRequest + template?: WatcherSearchTemplateRequestBody rest_total_hits_as_int?: boolean } +export interface WatcherSearchTemplateRequestBody { + explain?: boolean + id?: Id + params?: Record + profile?: boolean + source?: string +} + export interface WatcherSimulatedActions { actions: string[] all: WatcherSimulatedActions @@ -16794,8 +16878,7 @@ export interface WatcherQueryWatchesResponse { export interface WatcherStartRequest extends RequestBase { } -export interface WatcherStartResponse extends AcknowledgedResponseBase { -} +export type WatcherStartResponse = AcknowledgedResponseBase export interface WatcherStatsRequest extends RequestBase { metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] @@ -16837,8 +16920,7 @@ export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'sto export interface WatcherStopRequest extends RequestBase { } -export interface WatcherStopResponse extends AcknowledgedResponseBase { -} +export type WatcherStopResponse = AcknowledgedResponseBase export interface XpackInfoBuildInformation { date: DateString @@ -16880,6 +16962,7 @@ export interface XpackInfoFeatures { vectors?: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature + archive: XpackInfoFeature } export interface XpackInfoMinimalLicenseInformation { @@ -16898,6 +16981,7 @@ export interface XpackInfoNativeCodeInformation { export interface XpackInfoRequest extends RequestBase { categories?: string[] accept_enterprise?: boolean + human?: boolean } export interface XpackInfoResponse { @@ -16931,6 +17015,10 @@ export interface XpackUsageAnalyticsStatistics { multi_terms_usage?: long } +export interface XpackUsageArchive extends XpackUsageBase { + indices_count: long +} + export interface XpackUsageAudit extends XpackUsageFeatureToggle { outputs?: string[] } @@ -17172,6 +17260,7 @@ export interface XpackUsageRequest extends RequestBase { export interface XpackUsageResponse { aggregate_metric: XpackUsageBase analytics: XpackUsageAnalytics + archive: XpackUsageArchive watcher: XpackUsageWatcher ccr: XpackUsageCcr data_frame?: XpackUsageBase diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index c2a1ef00d..a9c2646a8 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -179,8 +179,7 @@ export interface CreateRequest extends RequestBase { body?: TDocument } -export interface CreateResponse extends WriteResponseBase { -} +export type CreateResponse = WriteResponseBase export interface DeleteRequest extends RequestBase { id: Id @@ -195,8 +194,7 @@ export interface DeleteRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards } -export interface DeleteResponse extends WriteResponseBase { -} +export type DeleteResponse = WriteResponseBase export interface DeleteByQueryRequest extends RequestBase { index: Indices @@ -259,8 +257,7 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { requests_per_second?: long } -export interface DeleteByQueryRethrottleResponse extends TasksListResponse { -} +export type DeleteByQueryRethrottleResponse = TasksListResponse export interface DeleteScriptRequest extends RequestBase { id: Id @@ -268,8 +265,7 @@ export interface DeleteScriptRequest extends RequestBase { timeout?: Time } -export interface DeleteScriptResponse extends AcknowledgedResponseBase { -} +export type DeleteScriptResponse = AcknowledgedResponseBase export interface ExistsRequest extends RequestBase { id: Id @@ -364,9 +360,11 @@ export interface FieldCapsRequest extends RequestBase { index?: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards - fields?: Fields + fields: Fields ignore_unavailable?: boolean include_unmapped?: boolean + filters?: string + types?: string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { index_filter?: QueryDslQueryContainer @@ -490,8 +488,7 @@ export interface IndexRequest extends RequestBase { body?: TDocument } -export interface IndexResponse extends WriteResponseBase { -} +export type IndexResponse = WriteResponseBase export interface InfoRequest extends RequestBase { } @@ -513,6 +510,7 @@ export interface KnnSearchRequest extends RequestBase { docvalue_fields?: (QueryDslFieldAndFormat | Field)[] stored_fields?: Fields fields?: Fields + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] knn: KnnSearchQuery } } @@ -705,6 +703,7 @@ export interface MtermvectorsTermVectorsResult { export interface OpenPointInTimeRequest extends RequestBase { index: Indices keep_alive: Time + ignore_unavailable?: boolean } export interface OpenPointInTimeResponse { @@ -727,8 +726,7 @@ export interface PutScriptRequest extends RequestBase { } } -export interface PutScriptResponse extends AcknowledgedResponseBase { -} +export type PutScriptResponse = AcknowledgedResponseBase export interface RankEvalDocumentRating { _id: Id @@ -853,11 +851,11 @@ export interface ReindexRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { conflicts?: Conflicts - dest?: ReindexDestination + dest: ReindexDestination max_docs?: long script?: Script size?: long - source?: ReindexSource + source: ReindexSource } } @@ -1763,7 +1761,9 @@ export interface UpdateRequest } } -export interface UpdateResponse extends WriteResponseBase { +export type UpdateResponse = UpdateUpdateWriteResponseBase + +export interface UpdateUpdateWriteResponseBase extends WriteResponseBase { get?: InlineGet } @@ -1921,10 +1921,6 @@ export type DateOrEpochMillis = DateString | EpochMillis export type DateString = string -export interface DictionaryResponseBase { - [key: string]: TValue -} - export type Distance = string export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' @@ -4528,7 +4524,7 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase { type: 'constant_keyword' } -export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty +export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty | MappingMatchOnlyTextProperty export interface MappingCorePropertyBase extends MappingPropertyBase { copy_to?: Fields @@ -4622,7 +4618,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'match_only_text' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -4745,6 +4741,13 @@ export interface MappingLongRangeProperty extends MappingRangePropertyBase { type: 'long_range' } +export interface MappingMatchOnlyTextProperty { + type: 'match_only_text' + fields?: Record + meta?: Record + copy_to?: Fields +} + export type MappingMatchType = 'simple' | 'regex' export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { @@ -5743,8 +5746,7 @@ export interface AsyncSearchDeleteRequest extends RequestBase { id: Id } -export interface AsyncSearchDeleteResponse extends AcknowledgedResponseBase { -} +export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { id: Id @@ -5760,7 +5762,9 @@ export interface AsyncSearchStatusRequest extends RequestBase { id: Id } -export interface AsyncSearchStatusResponse extends AsyncSearchAsyncSearchResponseBase { +export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase + +export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { _shards: ShardStatistics completion_status?: integer } @@ -5861,8 +5865,7 @@ export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { name: Name } -export interface AutoscalingDeleteAutoscalingPolicyResponse extends AcknowledgedResponseBase { -} +export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { node: AutoscalingGetAutoscalingCapacityAutoscalingResources @@ -5910,8 +5913,7 @@ export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { body?: AutoscalingAutoscalingPolicy } -export interface AutoscalingPutAutoscalingPolicyResponse extends AcknowledgedResponseBase { -} +export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' @@ -7730,8 +7732,7 @@ export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { name: Name } -export interface CcrDeleteAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { index: IndexName @@ -7839,15 +7840,13 @@ export interface CcrPauseAutoFollowPatternRequest extends RequestBase { name: Name } -export interface CcrPauseAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { index: IndexName } -export interface CcrPauseFollowResponse extends AcknowledgedResponseBase { -} +export type CcrPauseFollowResponse = AcknowledgedResponseBase export interface CcrPutAutoFollowPatternRequest extends RequestBase { name: Name @@ -7871,15 +7870,13 @@ export interface CcrPutAutoFollowPatternRequest extends RequestBase { } } -export interface CcrPutAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { name: Name } -export interface CcrResumeAutoFollowPatternResponse extends AcknowledgedResponseBase { -} +export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { index: IndexName @@ -7898,8 +7895,7 @@ export interface CcrResumeFollowRequest extends RequestBase { } } -export interface CcrResumeFollowResponse extends AcknowledgedResponseBase { -} +export type CcrResumeFollowResponse = AcknowledgedResponseBase export interface CcrStatsAutoFollowStats { auto_followed_clusters: CcrStatsAutoFollowedCluster[] @@ -7931,8 +7927,7 @@ export interface CcrUnfollowRequest extends RequestBase { index: IndexName } -export interface CcrUnfollowResponse extends AcknowledgedResponseBase { -} +export type CcrUnfollowResponse = AcknowledgedResponseBase export interface ClusterComponentTemplate { name: Name @@ -8074,13 +8069,12 @@ export interface ClusterAllocationExplainUnassignedInformation { export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' export interface ClusterDeleteComponentTemplateRequest extends RequestBase { - name: Name + name: Names master_timeout?: Time timeout?: Time } -export interface ClusterDeleteComponentTemplateResponse extends AcknowledgedResponseBase { -} +export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { wait_for_removal?: boolean @@ -8216,8 +8210,7 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { } } -export interface ClusterPutComponentTemplateResponse extends AcknowledgedResponseBase { -} +export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { flat_settings?: boolean @@ -8262,8 +8255,7 @@ export interface ClusterRemoteInfoClusterRemoteSniffInfo { export interface ClusterRemoteInfoRequest extends RequestBase { } -export interface ClusterRemoteInfoResponse extends DictionaryResponseBase { -} +export type ClusterRemoteInfoResponse = Record export interface ClusterRerouteCommand { cancel?: ClusterRerouteCommandCancelAction @@ -8576,14 +8568,7 @@ export interface ClusterStatsRequest extends RequestBase { timeout?: Time } -export interface ClusterStatsResponse extends NodesNodesResponseBase { - cluster_name: Name - cluster_uuid: Uuid - indices: ClusterStatsClusterIndices - nodes: ClusterStatsClusterNodes - status: HealthStatus - timestamp: long -} +export type ClusterStatsResponse = ClusterStatsStatsResponseBase export interface ClusterStatsRuntimeFieldTypes { name: Name @@ -8602,6 +8587,15 @@ export interface ClusterStatsRuntimeFieldTypes { doc_total: integer } +export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { + cluster_name: Name + cluster_uuid: Uuid + indices: ClusterStatsClusterIndices + nodes: ClusterStatsClusterNodes + status: HealthStatus + timestamp: long +} + export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean @@ -8609,8 +8603,7 @@ export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { timeout?: Time } -export interface DanglingIndicesDeleteDanglingIndexResponse extends AcknowledgedResponseBase { -} +export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { index_uuid: Uuid @@ -8619,8 +8612,7 @@ export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { timeout?: Time } -export interface DanglingIndicesImportDanglingIndexResponse extends AcknowledgedResponseBase { -} +export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesListDanglingIndicesDanglingIndex { index_name: string @@ -8657,8 +8649,7 @@ export interface EnrichDeletePolicyRequest extends RequestBase { name: Name } -export interface EnrichDeletePolicyResponse extends AcknowledgedResponseBase { -} +export type EnrichDeletePolicyResponse = AcknowledgedResponseBase export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' @@ -8693,8 +8684,7 @@ export interface EnrichPutPolicyRequest extends RequestBase { } } -export interface EnrichPutPolicyResponse extends AcknowledgedResponseBase { -} +export type EnrichPutPolicyResponse = AcknowledgedResponseBase export interface EnrichStatsCacheStats { node_id: Id @@ -8757,8 +8747,7 @@ export interface EqlDeleteRequest extends RequestBase { id: Id } -export interface EqlDeleteResponse extends AcknowledgedResponseBase { -} +export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { id: Id @@ -9077,8 +9066,7 @@ export interface IlmDeleteLifecycleRequest extends RequestBase { timeout?: Time } -export interface IlmDeleteLifecycleResponse extends AcknowledgedResponseBase { -} +export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged @@ -9138,8 +9126,7 @@ export interface IlmGetLifecycleRequest extends RequestBase { timeout?: Time } -export interface IlmGetLifecycleResponse extends DictionaryResponseBase { -} +export type IlmGetLifecycleResponse = Record export interface IlmGetStatusRequest extends RequestBase { } @@ -9176,8 +9163,7 @@ export interface IlmMoveToStepRequest extends RequestBase { } } -export interface IlmMoveToStepResponse extends AcknowledgedResponseBase { -} +export type IlmMoveToStepResponse = AcknowledgedResponseBase export interface IlmMoveToStepStepKey { action: string @@ -9195,8 +9181,7 @@ export interface IlmPutLifecycleRequest extends RequestBase { } } -export interface IlmPutLifecycleResponse extends AcknowledgedResponseBase { -} +export type IlmPutLifecycleResponse = AcknowledgedResponseBase export interface IlmRemovePolicyRequest extends RequestBase { index: IndexName @@ -9211,24 +9196,21 @@ export interface IlmRetryRequest extends RequestBase { index: IndexName } -export interface IlmRetryResponse extends AcknowledgedResponseBase { -} +export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { master_timeout?: Time timeout?: Time } -export interface IlmStartResponse extends AcknowledgedResponseBase { -} +export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { master_timeout?: Time timeout?: Time } -export interface IlmStopResponse extends AcknowledgedResponseBase { -} +export type IlmStopResponse = AcknowledgedResponseBase export interface IndicesAlias { filter?: QueryDslQueryContainer @@ -9390,6 +9372,10 @@ export interface IndicesIndexSettings { shards?: integer queries?: IndicesQueries similarity?: IndicesSettingsSimilarity + mappings?: IndicesMappingLimitSettings + 'indexing.slowlog'?: IndicesSlowlogSettings + indexing_pressure?: IndicesIndexingPressure + store?: IndicesStorage } export interface IndicesIndexSettingsAnalysis { @@ -9402,6 +9388,15 @@ export interface IndicesIndexSettingsAnalysis { export interface IndicesIndexSettingsLifecycle { name: Name + indexing_complete?: boolean + origination_date?: long + parse_origination_date?: boolean + step?: IndicesIndexSettingsLifecycleStep + rollover_alias?: string +} + +export interface IndicesIndexSettingsLifecycleStep { + wait_time_threshold?: Time } export interface IndicesIndexSettingsTimeSeries { @@ -9444,6 +9439,47 @@ export interface IndicesIndexVersioning { created_string?: VersionString } +export interface IndicesIndexingPressure { + memory: IndicesIndexingPressureMemory +} + +export interface IndicesIndexingPressureMemory { + limit?: integer +} + +export interface IndicesMappingLimitSettings { + total_fields?: IndicesMappingLimitSettingsTotalFields + depth?: IndicesMappingLimitSettingsDepth + nested_fields?: IndicesMappingLimitSettingsNestedFields + nested_objects?: IndicesMappingLimitSettingsNestedObjects + field_name_length?: IndicesMappingLimitSettingsFieldNameLength + dimension_fields?: IndicesMappingLimitSettingsDimensionFields +} + +export interface IndicesMappingLimitSettingsDepth { + limit?: integer +} + +export interface IndicesMappingLimitSettingsDimensionFields { + limit?: integer +} + +export interface IndicesMappingLimitSettingsFieldNameLength { + limit?: long +} + +export interface IndicesMappingLimitSettingsNestedFields { + limit?: integer +} + +export interface IndicesMappingLimitSettingsNestedObjects { + limit?: integer +} + +export interface IndicesMappingLimitSettingsTotalFields { + limit?: integer +} + export interface IndicesMerge { scheduler?: IndicesMergeScheduler } @@ -9490,7 +9526,8 @@ export interface IndicesSettingsQueryString { } export interface IndicesSettingsSearch { - idle: IndicesSearchIdle + idle?: IndicesSearchIdle + slowlog?: IndicesSlowlogSettings } export interface IndicesSettingsSimilarity { @@ -9544,11 +9581,38 @@ export interface IndicesSettingsSimilarityScriptedTfidf { type: 'scripted' } +export interface IndicesSlowlogSettings { + level?: string + source?: integer + reformat?: boolean + threshold?: IndicesSlowlogTresholds +} + +export interface IndicesSlowlogTresholdLevels { + warn?: Time + info?: Time + debug?: Time + trace?: Time +} + +export interface IndicesSlowlogTresholds { + query?: IndicesSlowlogTresholdLevels + fetch?: IndicesSlowlogTresholdLevels + index?: IndicesSlowlogTresholdLevels +} + export interface IndicesSoftDeletes { - enabled: boolean + enabled?: boolean retention_lease?: IndicesRetentionLease } +export interface IndicesStorage { + type: IndicesStorageType + allow_mmap?: boolean +} + +export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' + export interface IndicesStringFielddata { format: IndicesStringFielddataFormat } @@ -9565,13 +9629,17 @@ export interface IndicesTemplateMapping { } export interface IndicesTranslog { - durability?: string - flush_threshold_size?: string + sync_interval?: Time + durability?: IndicesTranslogDurability + flush_threshold_size?: ByteSize retention?: IndicesTranslogRetention } +export type IndicesTranslogDurability = 'request' | 'async' + export interface IndicesTranslogRetention { - size: ByteSize + size?: ByteSize + age?: Time } export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' @@ -9591,7 +9659,8 @@ export interface IndicesAddBlockRequest extends RequestBase { timeout?: Time } -export interface IndicesAddBlockResponse extends AcknowledgedResponseBase { +export interface IndicesAddBlockResponse { + acknowledged: boolean shards_acknowledged: boolean indices: IndicesAddBlockIndicesBlockStatus[] } @@ -9674,8 +9743,7 @@ export interface IndicesClearCacheRequest extends RequestBase { request?: boolean } -export interface IndicesClearCacheResponse extends ShardsOperationResponseBase { -} +export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { index: IndexName @@ -9690,7 +9758,8 @@ export interface IndicesCloneRequest extends RequestBase { } } -export interface IndicesCloneResponse extends AcknowledgedResponseBase { +export interface IndicesCloneResponse { + acknowledged: boolean index: IndexName shards_acknowledged: boolean } @@ -9714,7 +9783,8 @@ export interface IndicesCloseRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards } -export interface IndicesCloseResponse extends AcknowledgedResponseBase { +export interface IndicesCloseResponse { + acknowledged: boolean indices: Record shards_acknowledged: boolean } @@ -9742,8 +9812,7 @@ export interface IndicesCreateDataStreamRequest extends RequestBase { name: DataStreamName } -export interface IndicesCreateDataStreamResponse extends AcknowledgedResponseBase { -} +export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase export interface IndicesDataStreamsStatsDataStreamsStatsItem { backing_indices: integer @@ -9776,8 +9845,7 @@ export interface IndicesDeleteRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteResponse extends IndicesResponseBase { -} +export type IndicesDeleteResponse = IndicesResponseBase export interface IndicesDeleteAliasRequest extends RequestBase { index: Indices @@ -9786,16 +9854,14 @@ export interface IndicesDeleteAliasRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteAliasResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteAliasResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards } -export interface IndicesDeleteDataStreamResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { name: Names @@ -9803,8 +9869,7 @@ export interface IndicesDeleteIndexTemplateRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteIndexTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { name: Name @@ -9812,8 +9877,7 @@ export interface IndicesDeleteTemplateRequest extends RequestBase { timeout?: Time } -export interface IndicesDeleteTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { index: IndexName @@ -9933,8 +9997,7 @@ export interface IndicesFlushRequest extends RequestBase { wait_if_ongoing?: boolean } -export interface IndicesFlushResponse extends ShardsOperationResponseBase { -} +export type IndicesFlushResponse = ShardsOperationResponseBase export interface IndicesForcemergeRequest extends RequestBase { index?: Indices @@ -9947,8 +10010,11 @@ export interface IndicesForcemergeRequest extends RequestBase { wait_for_completion?: boolean } -export interface IndicesForcemergeResponse extends ShardsOperationResponseBase { -} +export type IndicesForcemergeResponse = ShardsOperationResponseBase + +export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' + +export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] export interface IndicesGetRequest extends RequestBase { index: Indices @@ -9959,10 +10025,10 @@ export interface IndicesGetRequest extends RequestBase { include_defaults?: boolean local?: boolean master_timeout?: Time + features?: IndicesGetFeatures } -export interface IndicesGetResponse extends DictionaryResponseBase { -} +export type IndicesGetResponse = Record export interface IndicesGetAliasIndexAliases { aliases: Record @@ -9977,8 +10043,7 @@ export interface IndicesGetAliasRequest extends RequestBase { local?: boolean } -export interface IndicesGetAliasResponse extends DictionaryResponseBase { -} +export type IndicesGetAliasResponse = Record export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames @@ -9999,8 +10064,7 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { local?: boolean } -export interface IndicesGetFieldMappingResponse extends DictionaryResponseBase { -} +export type IndicesGetFieldMappingResponse = Record export interface IndicesGetFieldMappingTypeFieldMappings { mappings: Partial> @@ -10036,8 +10100,7 @@ export interface IndicesGetMappingRequest extends RequestBase { master_timeout?: Time } -export interface IndicesGetMappingResponse extends DictionaryResponseBase { -} +export type IndicesGetMappingResponse = Record export interface IndicesGetSettingsRequest extends RequestBase { index?: Indices @@ -10051,8 +10114,7 @@ export interface IndicesGetSettingsRequest extends RequestBase { master_timeout?: Time } -export interface IndicesGetSettingsResponse extends DictionaryResponseBase { -} +export type IndicesGetSettingsResponse = Record export interface IndicesGetTemplateRequest extends RequestBase { name?: Names @@ -10061,15 +10123,13 @@ export interface IndicesGetTemplateRequest extends RequestBase { master_timeout?: Time } -export interface IndicesGetTemplateResponse extends DictionaryResponseBase { -} +export type IndicesGetTemplateResponse = Record export interface IndicesMigrateToDataStreamRequest extends RequestBase { name: IndexName } -export interface IndicesMigrateToDataStreamResponse extends AcknowledgedResponseBase { -} +export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase export interface IndicesOpenRequest extends RequestBase { index: Indices @@ -10081,7 +10141,8 @@ export interface IndicesOpenRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards } -export interface IndicesOpenResponse extends AcknowledgedResponseBase { +export interface IndicesOpenResponse { + acknowledged: boolean shards_acknowledged: boolean } @@ -10106,8 +10167,7 @@ export interface IndicesPutAliasRequest extends RequestBase { } } -export interface IndicesPutAliasResponse extends AcknowledgedResponseBase { -} +export type IndicesPutAliasResponse = AcknowledgedResponseBase export interface IndicesPutIndexTemplateIndexTemplateMapping { aliases?: Record @@ -10130,8 +10190,7 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { } } -export interface IndicesPutIndexTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesPutMappingRequest extends RequestBase { index: Indices @@ -10157,8 +10216,7 @@ export interface IndicesPutMappingRequest extends RequestBase { } } -export interface IndicesPutMappingResponse extends IndicesResponseBase { -} +export type IndicesPutMappingResponse = IndicesResponseBase export interface IndicesPutSettingsRequest extends RequestBase { index?: Indices @@ -10173,8 +10231,7 @@ export interface IndicesPutSettingsRequest extends RequestBase { body?: IndicesIndexSettings } -export interface IndicesPutSettingsResponse extends AcknowledgedResponseBase { -} +export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { name: Name @@ -10194,8 +10251,7 @@ export interface IndicesPutTemplateRequest extends RequestBase { } } -export interface IndicesPutTemplateResponse extends AcknowledgedResponseBase { -} +export type IndicesPutTemplateResponse = AcknowledgedResponseBase export interface IndicesRecoveryFileDetails { length: long @@ -10265,8 +10321,7 @@ export interface IndicesRecoveryRequest extends RequestBase { detailed?: boolean } -export interface IndicesRecoveryResponse extends DictionaryResponseBase { -} +export type IndicesRecoveryResponse = Record export interface IndicesRecoveryShardRecovery { id: long @@ -10310,8 +10365,7 @@ export interface IndicesRefreshRequest extends RequestBase { ignore_unavailable?: boolean } -export interface IndicesRefreshResponse extends ShardsOperationResponseBase { -} +export type IndicesRefreshResponse = ShardsOperationResponseBase export interface IndicesReloadSearchAnalyzersReloadDetails { index: string @@ -10376,7 +10430,8 @@ export interface IndicesRolloverRequest extends RequestBase { } } -export interface IndicesRolloverResponse extends AcknowledgedResponseBase { +export interface IndicesRolloverResponse { + acknowledged: boolean conditions: Record dry_run: boolean new_index: string @@ -10491,7 +10546,8 @@ export interface IndicesShrinkRequest extends RequestBase { } } -export interface IndicesShrinkResponse extends AcknowledgedResponseBase { +export interface IndicesShrinkResponse { + acknowledged: boolean shards_acknowledged: boolean index: IndexName } @@ -10553,7 +10609,8 @@ export interface IndicesSplitRequest extends RequestBase { } } -export interface IndicesSplitResponse extends AcknowledgedResponseBase { +export interface IndicesSplitResponse { + acknowledged: boolean shards_acknowledged: boolean index: IndexName } @@ -10713,7 +10770,8 @@ export interface IndicesUnfreezeRequest extends RequestBase { wait_for_active_shards?: string } -export interface IndicesUnfreezeResponse extends AcknowledgedResponseBase { +export interface IndicesUnfreezeResponse { + acknowledged: boolean shards_acknowledged: boolean } @@ -10760,8 +10818,7 @@ export interface IndicesUpdateAliasesRequest extends RequestBase { } } -export interface IndicesUpdateAliasesResponse extends AcknowledgedResponseBase { -} +export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase export interface IndicesValidateQueryIndicesValidationExplanation { error?: string @@ -11105,8 +11162,7 @@ export interface IngestDeletePipelineRequest extends RequestBase { timeout?: Time } -export interface IngestDeletePipelineResponse extends AcknowledgedResponseBase { -} +export type IngestDeletePipelineResponse = AcknowledgedResponseBase export interface IngestGeoIpStatsGeoIpDownloadStatistics { successful_downloads: integer @@ -11139,8 +11195,7 @@ export interface IngestGetPipelineRequest extends RequestBase { summary?: boolean } -export interface IngestGetPipelineResponse extends DictionaryResponseBase { -} +export type IngestGetPipelineResponse = Record export interface IngestProcessorGrokRequest extends RequestBase { } @@ -11163,8 +11218,7 @@ export interface IngestPutPipelineRequest extends RequestBase { } } -export interface IngestPutPipelineResponse extends AcknowledgedResponseBase { -} +export type IngestPutPipelineResponse = AcknowledgedResponseBase export interface IngestSimulateDocument { _id?: Id @@ -11228,8 +11282,7 @@ export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'd export interface LicenseDeleteRequest extends RequestBase { } -export interface LicenseDeleteResponse extends AcknowledgedResponseBase { -} +export type LicenseDeleteResponse = AcknowledgedResponseBase export interface LicenseGetLicenseInformation { expiry_date?: DateString @@ -11293,7 +11346,8 @@ export interface LicensePostStartBasicRequest extends RequestBase { acknowledge?: boolean } -export interface LicensePostStartBasicResponse extends AcknowledgedResponseBase { +export interface LicensePostStartBasicResponse { + acknowledged: boolean basic_was_started: boolean error_message?: string type?: LicenseLicenseType @@ -11305,7 +11359,8 @@ export interface LicensePostStartTrialRequest extends RequestBase { type_query_string?: string } -export interface LicensePostStartTrialResponse extends AcknowledgedResponseBase { +export interface LicensePostStartTrialResponse { + acknowledged: boolean error_message?: string trial_was_started: boolean type?: LicenseLicenseType @@ -12377,16 +12432,14 @@ export interface MlDeleteCalendarRequest extends RequestBase { calendar_id: Id } -export interface MlDeleteCalendarResponse extends AcknowledgedResponseBase { -} +export type MlDeleteCalendarResponse = AcknowledgedResponseBase export interface MlDeleteCalendarEventRequest extends RequestBase { calendar_id: Id event_id: Id } -export interface MlDeleteCalendarEventResponse extends AcknowledgedResponseBase { -} +export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { calendar_id: Id @@ -12405,16 +12458,14 @@ export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { timeout?: Time } -export interface MlDeleteDataFrameAnalyticsResponse extends AcknowledgedResponseBase { -} +export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase export interface MlDeleteDatafeedRequest extends RequestBase { datafeed_id: Id force?: boolean } -export interface MlDeleteDatafeedResponse extends AcknowledgedResponseBase { -} +export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { job_id?: Id @@ -12435,8 +12486,7 @@ export interface MlDeleteFilterRequest extends RequestBase { filter_id: Id } -export interface MlDeleteFilterResponse extends AcknowledgedResponseBase { -} +export type MlDeleteFilterResponse = AcknowledgedResponseBase export interface MlDeleteForecastRequest extends RequestBase { job_id: Id @@ -12445,8 +12495,7 @@ export interface MlDeleteForecastRequest extends RequestBase { timeout?: Time } -export interface MlDeleteForecastResponse extends AcknowledgedResponseBase { -} +export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { job_id: Id @@ -12454,32 +12503,28 @@ export interface MlDeleteJobRequest extends RequestBase { wait_for_completion?: boolean } -export interface MlDeleteJobResponse extends AcknowledgedResponseBase { -} +export type MlDeleteJobResponse = AcknowledgedResponseBase export interface MlDeleteModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id } -export interface MlDeleteModelSnapshotResponse extends AcknowledgedResponseBase { -} +export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { model_id: Id force?: boolean } -export interface MlDeleteTrainedModelResponse extends AcknowledgedResponseBase { -} +export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelAliasRequest extends RequestBase { model_alias: Name model_id: Id } -export interface MlDeleteTrainedModelAliasResponse extends AcknowledgedResponseBase { -} +export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlEstimateModelMemoryRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -12643,7 +12688,8 @@ export interface MlForecastRequest extends RequestBase { } } -export interface MlForecastResponse extends AcknowledgedResponseBase { +export interface MlForecastResponse { + acknowledged: boolean forecast_id: Id } @@ -12829,6 +12875,58 @@ export interface MlGetJobsResponse { jobs: MlJob[] } +export interface MlGetMemoryStatsJvmStats { + heap_max: ByteSize + heap_max_in_bytes: integer + java_inference: ByteSize + java_inference_in_bytes: integer + java_inference_max: ByteSize + java_inference_max_in_bytes: integer +} + +export interface MlGetMemoryStatsMemMlStats { + anomaly_detectors: ByteSize + anomaly_detectors_in_bytes: integer + data_frame_analytics: ByteSize + data_frame_analytics_in_bytes: integer + max: ByteSize + max_in_bytes: integer + native_code_overhead: ByteSize + native_code_overhead_in_bytes: integer + native_inference: ByteSize + native_inference_in_bytes: integer +} + +export interface MlGetMemoryStatsMemStats { + adjusted_total: ByteSize + adjusted_total_in_bytes: integer + total: ByteSize + total_in_bytes: integer + ml: MlGetMemoryStatsMemMlStats +} + +export interface MlGetMemoryStatsMemory { + attributes: string[] + jvm: MlGetMemoryStatsJvmStats + mem: MlGetMemoryStatsMemStats + name: Name + roles: string[] + transport_address: TransportAddress +} + +export interface MlGetMemoryStatsRequest extends RequestBase { + node_id?: Id + human?: boolean + master_timeout?: Time + timeout?: Time +} + +export interface MlGetMemoryStatsResponse { + _nodes: NodeStatistics + cluser_name: Name + nodes: Record +} + export interface MlGetModelSnapshotsRequest extends RequestBase { job_id: Id snapshot_id?: Id @@ -13337,8 +13435,7 @@ export interface MlPutTrainedModelAliasRequest extends RequestBase { reassign?: boolean } -export interface MlPutTrainedModelAliasResponse extends AcknowledgedResponseBase { -} +export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { model_id: Id @@ -13351,27 +13448,25 @@ export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { } } -export interface MlPutTrainedModelDefinitionPartResponse extends AcknowledgedResponseBase { -} +export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase export interface MlPutTrainedModelVocabularyRequest extends RequestBase { model_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { vocabulary: string[] + merges?: string[] } } -export interface MlPutTrainedModelVocabularyResponse extends AcknowledgedResponseBase { -} +export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { job_id: Id wait_for_completion?: boolean } -export interface MlResetJobResponse extends AcknowledgedResponseBase { -} +export type MlResetJobResponse = AcknowledgedResponseBase export interface MlRevertModelSnapshotRequest extends RequestBase { job_id: Id @@ -13392,15 +13487,15 @@ export interface MlSetUpgradeModeRequest extends RequestBase { timeout?: Time } -export interface MlSetUpgradeModeResponse extends AcknowledgedResponseBase { -} +export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { id: Id timeout?: Time } -export interface MlStartDataFrameAnalyticsResponse extends AcknowledgedResponseBase { +export interface MlStartDataFrameAnalyticsResponse { + acknowledged: boolean node: NodeId } @@ -13610,7 +13705,8 @@ export interface MlUpdateModelSnapshotRequest extends RequestBase { } } -export interface MlUpdateModelSnapshotResponse extends AcknowledgedResponseBase { +export interface MlUpdateModelSnapshotResponse { + acknowledged: boolean model: MlModelSnapshot } @@ -13641,16 +13737,14 @@ export interface MlValidateRequest extends RequestBase { } } -export interface MlValidateResponse extends AcknowledgedResponseBase { -} +export type MlValidateResponse = AcknowledgedResponseBase export interface MlValidateDetectorRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, use 'detector' instead. */ body?: MlDetector } -export interface MlValidateDetectorResponse extends AcknowledgedResponseBase { -} +export type MlValidateDetectorResponse = AcknowledgedResponseBase export interface MonitoringBulkRequest extends RequestBase { type?: string @@ -14118,7 +14212,9 @@ export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBas max_archive_version: long } -export interface NodesClearRepositoriesMeteringArchiveResponse extends NodesNodesResponseBase { +export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase + +export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14127,7 +14223,9 @@ export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { node_id: NodeIds } -export interface NodesGetRepositoriesMeteringInfoResponse extends NodesNodesResponseBase { +export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase + +export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14502,7 +14600,9 @@ export interface NodesInfoRequest extends RequestBase { timeout?: Time } -export interface NodesInfoResponse extends NodesNodesResponseBase { +export type NodesInfoResponse = NodesInfoResponseBase + +export interface NodesInfoResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14516,7 +14616,9 @@ export interface NodesReloadSecureSettingsRequest extends RequestBase { } } -export interface NodesReloadSecureSettingsResponse extends NodesNodesResponseBase { +export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase + +export interface NodesReloadSecureSettingsResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14537,7 +14639,9 @@ export interface NodesStatsRequest extends RequestBase { include_unloaded_segments?: boolean } -export interface NodesStatsResponse extends NodesNodesResponseBase { +export type NodesStatsResponse = NodesStatsResponseBase + +export interface NodesStatsResponseBase extends NodesNodesResponseBase { cluster_name?: Name nodes: Record } @@ -14555,7 +14659,9 @@ export interface NodesUsageRequest extends RequestBase { timeout?: Time } -export interface NodesUsageResponse extends NodesNodesResponseBase { +export type NodesUsageResponse = NodesUsageResponseBase + +export interface NodesUsageResponseBase extends NodesNodesResponseBase { cluster_name: Name nodes: Record } @@ -14596,7 +14702,8 @@ export interface RollupDeleteJobRequest extends RequestBase { id: Id } -export interface RollupDeleteJobResponse extends AcknowledgedResponseBase { +export interface RollupDeleteJobResponse { + acknowledged: boolean task_failures?: TaskFailure[] } @@ -14652,8 +14759,7 @@ export interface RollupGetRollupCapsRequest extends RequestBase { id?: Id } -export interface RollupGetRollupCapsResponse extends DictionaryResponseBase { -} +export type RollupGetRollupCapsResponse = Record export interface RollupGetRollupCapsRollupCapabilities { rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] @@ -14674,8 +14780,7 @@ export interface RollupGetRollupIndexCapsRequest extends RequestBase { index: Ids } -export interface RollupGetRollupIndexCapsResponse extends DictionaryResponseBase { -} +export type RollupGetRollupIndexCapsResponse = Record export interface RollupGetRollupIndexCapsRollupJobSummary { fields: Record @@ -14705,8 +14810,7 @@ export interface RollupPutJobRequest extends RequestBase { } } -export interface RollupPutJobResponse extends AcknowledgedResponseBase { -} +export type RollupPutJobResponse = AcknowledgedResponseBase export interface RollupRollupRequest extends RequestBase { index: IndexName @@ -15084,8 +15188,7 @@ export interface SecurityDeletePrivilegesRequest extends RequestBase { refresh?: Refresh } -export interface SecurityDeletePrivilegesResponse extends DictionaryResponseBase> { -} +export type SecurityDeletePrivilegesResponse = Record> export interface SecurityDeleteRoleRequest extends RequestBase { name: Name @@ -15191,15 +15294,13 @@ export interface SecurityGetPrivilegesRequest extends RequestBase { name?: Names } -export interface SecurityGetPrivilegesResponse extends DictionaryResponseBase> { -} +export type SecurityGetPrivilegesResponse = Record> export interface SecurityGetRoleRequest extends RequestBase { name?: Names } -export interface SecurityGetRoleResponse extends DictionaryResponseBase { -} +export type SecurityGetRoleResponse = Record export interface SecurityGetRoleRole { cluster: string[] @@ -15223,16 +15324,14 @@ export interface SecurityGetRoleMappingRequest extends RequestBase { name?: Names } -export interface SecurityGetRoleMappingResponse extends DictionaryResponseBase { -} +export type SecurityGetRoleMappingResponse = Record export interface SecurityGetServiceAccountsRequest extends RequestBase { namespace?: Namespace service?: Service } -export interface SecurityGetServiceAccountsResponse extends DictionaryResponseBase { -} +export type SecurityGetServiceAccountsResponse = Record export interface SecurityGetServiceAccountsRoleDescriptor { cluster: string[] @@ -15314,8 +15413,7 @@ export interface SecurityGetUserRequest extends RequestBase { username?: Username | Username[] } -export interface SecurityGetUserResponse extends DictionaryResponseBase { -} +export type SecurityGetUserResponse = Record export interface SecurityGetUserPrivilegesRequest extends RequestBase { application?: Name @@ -15442,8 +15540,7 @@ export interface SecurityPutPrivilegesRequest extends RequestBase { body?: Record> } -export interface SecurityPutPrivilegesResponse extends DictionaryResponseBase> { -} +export type SecurityPutPrivilegesResponse = Record> export interface SecurityPutRoleRequest extends RequestBase { name: Name @@ -15602,8 +15699,7 @@ export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId } -export interface ShutdownDeleteNodeResponse extends AcknowledgedResponseBase { -} +export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase export interface ShutdownGetNodeNodeShutdownStatus { node_id: NodeId @@ -15644,8 +15740,7 @@ export interface ShutdownPutNodeRequest extends RequestBase { node_id: NodeId } -export interface ShutdownPutNodeResponse extends AcknowledgedResponseBase { -} +export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SlmConfiguration { ignore_unavailable?: boolean @@ -15716,8 +15811,7 @@ export interface SlmDeleteLifecycleRequest extends RequestBase { policy_id: Name } -export interface SlmDeleteLifecycleResponse extends AcknowledgedResponseBase { -} +export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { policy_id: Name @@ -15730,15 +15824,13 @@ export interface SlmExecuteLifecycleResponse { export interface SlmExecuteRetentionRequest extends RequestBase { } -export interface SlmExecuteRetentionResponse extends AcknowledgedResponseBase { -} +export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { policy_id?: Names } -export interface SlmGetLifecycleResponse extends DictionaryResponseBase { -} +export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { } @@ -15777,20 +15869,17 @@ export interface SlmPutLifecycleRequest extends RequestBase { } } -export interface SlmPutLifecycleResponse extends AcknowledgedResponseBase { -} +export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { } -export interface SlmStartResponse extends AcknowledgedResponseBase { -} +export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { } -export interface SlmStopResponse extends AcknowledgedResponseBase { -} +export type SlmStopResponse = AcknowledgedResponseBase export interface SnapshotFileCountSnapshotStats { file_count: integer @@ -15937,8 +16026,7 @@ export interface SnapshotCloneRequest extends RequestBase { } } -export interface SnapshotCloneResponse extends AcknowledgedResponseBase { -} +export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { repository: Name @@ -15974,8 +16062,7 @@ export interface SnapshotCreateRepositoryRequest extends RequestBase { } } -export interface SnapshotCreateRepositoryResponse extends AcknowledgedResponseBase { -} +export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { repository: Name @@ -15983,8 +16070,7 @@ export interface SnapshotDeleteRequest extends RequestBase { master_timeout?: Time } -export interface SnapshotDeleteResponse extends AcknowledgedResponseBase { -} +export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { name: Names @@ -15992,8 +16078,7 @@ export interface SnapshotDeleteRepositoryRequest extends RequestBase { timeout?: Time } -export interface SnapshotDeleteRepositoryResponse extends AcknowledgedResponseBase { -} +export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase export interface SnapshotGetRequest extends RequestBase { repository: Name @@ -16032,8 +16117,7 @@ export interface SnapshotGetRepositoryRequest extends RequestBase { master_timeout?: Time } -export interface SnapshotGetRepositoryResponse extends DictionaryResponseBase { -} +export type SnapshotGetRepositoryResponse = Record export interface SnapshotRestoreRequest extends RequestBase { repository: Name @@ -16111,8 +16195,7 @@ export interface SqlDeleteAsyncRequest extends RequestBase { id: Id } -export interface SqlDeleteAsyncResponse extends AcknowledgedResponseBase { -} +export type SqlDeleteAsyncResponse = AcknowledgedResponseBase export interface SqlGetAsyncRequest extends RequestBase { id: Id @@ -16276,8 +16359,7 @@ export interface TasksCancelRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksCancelResponse extends TasksTaskListResponseBase { -} +export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { task_id: Id @@ -16303,8 +16385,7 @@ export interface TasksListRequest extends RequestBase { wait_for_completion?: boolean } -export interface TasksListResponse extends TasksTaskListResponseBase { -} +export type TasksListResponse = TasksTaskListResponseBase export interface TextStructureFindStructureFieldStat { count: integer @@ -16428,8 +16509,7 @@ export interface TransformDeleteTransformRequest extends RequestBase { timeout?: Time } -export interface TransformDeleteTransformResponse extends AcknowledgedResponseBase { -} +export type TransformDeleteTransformResponse = AcknowledgedResponseBase export interface TransformGetTransformRequest extends RequestBase { transform_id?: Names @@ -16567,24 +16647,21 @@ export interface TransformPutTransformRequest extends RequestBase { } } -export interface TransformPutTransformResponse extends AcknowledgedResponseBase { -} +export type TransformPutTransformResponse = AcknowledgedResponseBase export interface TransformResetTransformRequest extends RequestBase { transform_id: Id force?: boolean } -export interface TransformResetTransformResponse extends AcknowledgedResponseBase { -} +export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformStartTransformRequest extends RequestBase { transform_id: Id timeout?: Time } -export interface TransformStartTransformResponse extends AcknowledgedResponseBase { -} +export type TransformStartTransformResponse = AcknowledgedResponseBase export interface TransformStopTransformRequest extends RequestBase { transform_id: Name @@ -16595,8 +16672,7 @@ export interface TransformStopTransformRequest extends RequestBase { wait_for_completion?: boolean } -export interface TransformStopTransformResponse extends AcknowledgedResponseBase { -} +export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { transform_id: Id @@ -17037,10 +17113,18 @@ export interface WatcherSearchInputRequestDefinition { indices?: IndexName[] indices_options?: IndicesOptions search_type?: SearchType - template?: SearchTemplateRequest + template?: WatcherSearchTemplateRequestBody rest_total_hits_as_int?: boolean } +export interface WatcherSearchTemplateRequestBody { + explain?: boolean + id?: Id + params?: Record + profile?: boolean + source?: string +} + export interface WatcherSimulatedActions { actions: string[] all: WatcherSimulatedActions @@ -17289,8 +17373,7 @@ export interface WatcherQueryWatchesResponse { export interface WatcherStartRequest extends RequestBase { } -export interface WatcherStartResponse extends AcknowledgedResponseBase { -} +export type WatcherStartResponse = AcknowledgedResponseBase export interface WatcherStatsRequest extends RequestBase { metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] @@ -17332,8 +17415,7 @@ export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'sto export interface WatcherStopRequest extends RequestBase { } -export interface WatcherStopResponse extends AcknowledgedResponseBase { -} +export type WatcherStopResponse = AcknowledgedResponseBase export interface XpackInfoBuildInformation { date: DateString @@ -17375,6 +17457,7 @@ export interface XpackInfoFeatures { vectors?: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature + archive: XpackInfoFeature } export interface XpackInfoMinimalLicenseInformation { @@ -17393,6 +17476,7 @@ export interface XpackInfoNativeCodeInformation { export interface XpackInfoRequest extends RequestBase { categories?: string[] accept_enterprise?: boolean + human?: boolean } export interface XpackInfoResponse { @@ -17426,6 +17510,10 @@ export interface XpackUsageAnalyticsStatistics { multi_terms_usage?: long } +export interface XpackUsageArchive extends XpackUsageBase { + indices_count: long +} + export interface XpackUsageAudit extends XpackUsageFeatureToggle { outputs?: string[] } @@ -17667,6 +17755,7 @@ export interface XpackUsageRequest extends RequestBase { export interface XpackUsageResponse { aggregate_metric: XpackUsageBase analytics: XpackUsageAnalytics + archive: XpackUsageArchive watcher: XpackUsageWatcher ccr: XpackUsageCcr data_frame?: XpackUsageBase From 77c1ef36aadb416b67c5b900ed2f6dfef1da346b Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 23 Mar 2022 11:33:41 +0100 Subject: [PATCH 159/647] Bumped v8.2.0-canary.2 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 19b917a5e..32f3747a6 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.2.0", - "versionCanary": "8.2.0-canary.1", + "versionCanary": "8.2.0-canary.2", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From eac00e120014a8d0c143db094f8af6e67694f7b7 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 28 Mar 2022 12:22:47 +0200 Subject: [PATCH 160/647] Update connecting documentation (#1666) --- README.md | 15 +- docs/advanced-config.asciidoc | 12 +- docs/basic-config.asciidoc | 5 +- docs/child.asciidoc | 5 +- docs/connecting.asciidoc | 214 +++++++++++++++++------ docs/examples/asStream.asciidoc | 10 +- docs/examples/bulk.asciidoc | 3 +- docs/examples/exists.asciidoc | 5 +- docs/examples/get.asciidoc | 5 +- docs/examples/ignore.asciidoc | 5 +- docs/examples/msearch.asciidoc | 5 +- docs/examples/reindex.asciidoc | 5 +- docs/examples/scroll.asciidoc | 10 +- docs/examples/search.asciidoc | 5 +- docs/examples/sql.query.asciidoc | 5 +- docs/examples/suggest.asciidoc | 5 +- docs/examples/transport.request.asciidoc | 5 +- docs/examples/update.asciidoc | 10 +- docs/examples/update_by_query.asciidoc | 5 +- docs/helpers.asciidoc | 25 ++- docs/introduction.asciidoc | 15 +- docs/observability.asciidoc | 30 +++- docs/testing.asciidoc | 3 +- docs/transport.asciidoc | 3 +- docs/typescript.asciidoc | 5 +- 25 files changed, 315 insertions(+), 100 deletions(-) diff --git a/README.md b/README.md index 2c8b411c5..5210226fc 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,10 @@ We recommend that you write a lightweight proxy that uses this client instead, y 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { // Let's start by indexing some data @@ -159,8 +162,14 @@ You will require the packages from your code by using the alias you have defined const { Client: Client6 } = require('es6') const { Client: Client7 } = require('es7') -const client6 = new Client6({ node: 'http://localhost:9200/' }) -const client7 = new Client7({ node: 'http://localhost:9201/' }) +const client6 = new Client6({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const client7 = new Client7({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) client6.info().then(console.log, console.log) client7.info().then(console.log, console.log) diff --git a/docs/advanced-config.asciidoc b/docs/advanced-config.asciidoc index 1f9d2bc07..638aeada4 100644 --- a/docs/advanced-config.asciidoc +++ b/docs/advanced-config.asciidoc @@ -30,7 +30,9 @@ class MyConnectionPool extends ConnectionPool { } const client = new Client({ - ConnectionPool: MyConnectionPool + ConnectionPool: MyConnectionPool, + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } }) ---- @@ -54,7 +56,9 @@ class MyConnection extends BaseConnection { } const client = new Client({ - Connection: MyConnection + Connection: MyConnection, + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } }) ---- @@ -81,7 +85,9 @@ class MySerializer extends Serializer { } const client = new Client({ - Serializer: MySerializer + Serializer: MySerializer, + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } }) ---- diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index 536dfead4..04ca7b1ee 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -10,7 +10,8 @@ offers. const { Client } = require('@elastic/elasticsearch') const client = new Client({ - node: 'http://localhost:9200/', + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } maxRetries: 5, requestTimeout: 60000, sniffOnStart: true @@ -241,7 +242,7 @@ _Cloud configuration example:_ ---- const client = new Client({ cloud: { - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==' + id: '' }, auth: { username: 'elastic', diff --git a/docs/child.asciidoc b/docs/child.asciidoc index 5e1abdee7..0bd7ace21 100644 --- a/docs/child.asciidoc +++ b/docs/child.asciidoc @@ -22,7 +22,10 @@ will be closed. [source,js] ---- const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const child = client.child({ headers: { 'x-foo': 'bar' }, requestTimeout: 1000 diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 282e12512..239eea79e 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -46,7 +46,7 @@ to know more. const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', + id: '' }, auth: { username: 'elastic', @@ -55,6 +55,152 @@ const client = new Client({ }) ---- +[discrete] +[[connect-self-managed-new]] +=== Connecting to a self-managed cluster + +By default {es} will start with security features like authentication and TLS +enabled. To connect to the {es} cluster you'll need to configure the Node.js {es} +client to use HTTPS with the generated CA certificate in order to make requests +successfully. + +If you're just getting started with {es} we recommend reading the documentation +on https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html[configuring] +and +https://www.elastic.co/guide/en/elasticsearch/reference/current/starting-elasticsearch.html[starting {es}] +to ensure your cluster is running as expected. + +When you start {es} for the first time you'll see a distinct block like the one +below in the output from {es} (you may have to scroll up if it's been a while): + +[source,sh] +---- + +-> Elasticsearch security features have been automatically configured! +-> Authentication is enabled and cluster connections are encrypted. + +-> Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`): + lhQpLELkjkrawaBoaz0Q + +-> HTTP CA certificate SHA-256 fingerprint: + a52dd93511e8c6045e21f16654b77c9ee0f34aea26d9f40320b531c474676228 +... + +---- + +Depending on the circumstances there are two options for verifying the HTTPS +connection, either verifying with the CA certificate itself or via the HTTP CA +certificate fingerprint. + +[discrete] +[[auth-tls]] +==== TLS configuration + +The generated root CA certificate can be found in the `certs` directory in your +{es} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you're running {es} +in Docker there is +https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html[additional documentation for retrieving the CA certificate]. + +Without any additional configuration you can specify `https://` node urls, and +the certificates used to sign these requests will be verified. To turn off +certificate verification, you must specify an `tls` object in the top level +config and set `rejectUnauthorized: false`. The default `tls` values are the +same that Node.js's https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[`tls.connect()`] +uses. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + username: 'elastic', + password: 'changeme' + }, + tls: { + ca: fs.readFileSync('./http_ca.crt'), + rejectUnauthorized: false + } +}) +---- + +[discrete] +[[auth-ca-fingerprint]] +==== CA fingerprint + +You can configure the client to only trust certificates that are signed by a specific CA certificate +(CA certificate pinning) by providing a `caFingerprint` option. +This will verify that the fingerprint of the CA certificate that has signed +the certificate of the server matches the supplied value. +You must configure a SHA256 digest. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://example.com/' + auth: { ... }, + // the fingerprint (SHA256) of the CA certificate that is used to sign + // the certificate that the Elasticsearch node presents for TLS. + caFingerprint: '20:0D:CA:FA:76:...', + tls: { + // might be required if it's a self-signed certificate + rejectUnauthorized: false + } +}) +---- + +The certificate fingerprint can be calculated using `openssl x509` with the +certificate file: + +[source,sh] +---- +openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt +---- + +If you don't have access to the generated CA file from {es} you can use the +following script to output the root CA fingerprint of the {es} instance with +`openssl s_client`: + +[source,sh] +---- +# Replace the values of 'localhost' and '9200' to the +# corresponding host and port values for the cluster. +openssl s_client -connect localhost:9200 -servername localhost -showcerts /dev/null \ + | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin +---- + +The output of `openssl x509` will look something like this: + +[source,sh] +---- +SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28 +---- + + +[discrete] +[[connect-no-security]] +=== Connecting without security enabled + +WARNING: Running {es} without security enabled is not recommended. + +If your cluster is configured with +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html[security explicitly disabled] +then you can connect via HTTP: + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'http://example.com/' +}) +---- + +[discrete] +[[auth-strategies]] +=== Authentication strategies + +Following you can find all the supported authentication strategies. [discrete] [[auth-apikey]] @@ -150,57 +296,6 @@ const client = new Client({ ---- -[discrete] -[[auth-tls]] -==== TLS configuration - -Without any additional configuration you can specify `https://` node urls, and -the certificates used to sign these requests will be verified. To turn off -certificate verification, you must specify an `tls` object in the top level -config and set `rejectUnauthorized: false`. The default `tls` values are the -same that Node.js's https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[`tls.connect()`] -uses. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - username: 'elastic', - password: 'changeme' - }, - tls: { - ca: fs.readFileSync('./cacert.pem'), - rejectUnauthorized: false - } -}) ----- - -[discrete] -[[auth-ca-fingerprint]] -==== CA fingerprint - -You can configure the client to only trust certificates that are signed by a specific CA certificate ( CA certificate pinning ) by providing a `caFingerprint` option. This will verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied value. -a `caFingerprint` option, which will verify the supplied certificate authority fingerprint. -You must configure a SHA256 digest. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://example.com/' - auth: { ... }, - // the fingerprint (SHA256) of the CA certificate that is used to sign the certificate that the Elasticsearch node presents for TLS. - caFingerprint: '20:0D:CA:FA:76:...', - tls: { - // might be required if it's a self-signed certificate - rejectUnauthorized: false - } -}) ----- - - [discrete] [[client-usage]] === Usage @@ -212,7 +307,10 @@ and every method exposes the same signature. [source,js] ---- const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const result = await client.search({ index: 'my-index', @@ -229,7 +327,10 @@ you must specify `meta: true` in the request options: [source,js] ---- const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const result = await client.search({ index: 'my-index', @@ -266,7 +367,10 @@ CAUTION: If you abort a request, the request will fail with a ---- const AbortController = require('node-abort-controller') const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const abortController = new AbortController() setImmediate(() => abortController.abort()) diff --git a/docs/examples/asStream.asciidoc b/docs/examples/asStream.asciidoc index dd7f9f21a..e77025fcf 100644 --- a/docs/examples/asStream.asciidoc +++ b/docs/examples/asStream.asciidoc @@ -9,7 +9,10 @@ data. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { const bulkResponse = await client.bulk({ @@ -83,7 +86,10 @@ send it directly to another source. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const fastify = require('fastify')() fastify.post('/search/:index', async (req, reply) => { diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc index e7d9261be..c6117c249 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/examples/bulk.asciidoc @@ -13,7 +13,8 @@ NOTE: Did you know that we provide an helper for sending bulk request? You can f require('array.prototype.flatmap').shim() const { Client } = require('@elastic/elasticsearch') const client = new Client({ - node: 'http://localhost:9200/' + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } }) async function run () { diff --git a/docs/examples/exists.asciidoc b/docs/examples/exists.asciidoc index 368f4ae2b..29a39a196 100644 --- a/docs/examples/exists.asciidoc +++ b/docs/examples/exists.asciidoc @@ -10,7 +10,10 @@ NOTE: Since this API uses the `HEAD` method, the body value will be boolean. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { await client.index({ diff --git a/docs/examples/get.asciidoc b/docs/examples/get.asciidoc index 9302c7607..f6dd94ddf 100644 --- a/docs/examples/get.asciidoc +++ b/docs/examples/get.asciidoc @@ -10,7 +10,10 @@ The following example gets a JSON document from an index called 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { await client.index({ diff --git a/docs/examples/ignore.asciidoc b/docs/examples/ignore.asciidoc index 40b570726..de5577dcd 100644 --- a/docs/examples/ignore.asciidoc +++ b/docs/examples/ignore.asciidoc @@ -8,7 +8,10 @@ HTTP status codes which should not be considered errors for this request. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { const bulkResponse = await client.bulk({ diff --git a/docs/examples/msearch.asciidoc b/docs/examples/msearch.asciidoc index 445bf866c..66222a34e 100644 --- a/docs/examples/msearch.asciidoc +++ b/docs/examples/msearch.asciidoc @@ -9,7 +9,10 @@ API. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { const bulkResponse = await client.bulk({ diff --git a/docs/examples/reindex.asciidoc b/docs/examples/reindex.asciidoc index 984e21c99..9d917dbd6 100644 --- a/docs/examples/reindex.asciidoc +++ b/docs/examples/reindex.asciidoc @@ -15,7 +15,10 @@ the house Stark and remove the `house` field from the document source. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { await client.index({ diff --git a/docs/examples/scroll.asciidoc b/docs/examples/scroll.asciidoc index 90e6e6524..5cc76d8a6 100644 --- a/docs/examples/scroll.asciidoc +++ b/docs/examples/scroll.asciidoc @@ -26,7 +26,10 @@ NOTE: Did you know that we provide an helper for sending scroll requests? You ca 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { const allQuotes = [] @@ -118,7 +121,10 @@ async iteration! 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) // Scroll utility async function * scrollSearch (params) { diff --git a/docs/examples/search.asciidoc b/docs/examples/search.asciidoc index 2cf5b3c50..229d1b09b 100644 --- a/docs/examples/search.asciidoc +++ b/docs/examples/search.asciidoc @@ -12,7 +12,10 @@ https://www.elastic.co/guide/en/elasticsearch/reference/6.6/search-request-body. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { // Let's start by indexing some data diff --git a/docs/examples/sql.query.asciidoc b/docs/examples/sql.query.asciidoc index 00505d2fd..cdf61147c 100644 --- a/docs/examples/sql.query.asciidoc +++ b/docs/examples/sql.query.asciidoc @@ -17,7 +17,10 @@ manipulate the result to obtain an object easy to navigate. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { await client.index({ diff --git a/docs/examples/suggest.asciidoc b/docs/examples/suggest.asciidoc index d4448a1a4..6096bc753 100644 --- a/docs/examples/suggest.asciidoc +++ b/docs/examples/suggest.asciidoc @@ -12,7 +12,10 @@ request. If the query part is left out, only suggestions are returned. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { const bulkResponse = await client.bulk({ diff --git a/docs/examples/transport.request.asciidoc b/docs/examples/transport.request.asciidoc index 86482046f..7c325e07e 100644 --- a/docs/examples/transport.request.asciidoc +++ b/docs/examples/transport.request.asciidoc @@ -20,7 +20,10 @@ maintain. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { const bulkResponse = await client.bulk({ diff --git a/docs/examples/update.asciidoc b/docs/examples/update.asciidoc index 784a7d6d8..3c83acd25 100644 --- a/docs/examples/update.asciidoc +++ b/docs/examples/update.asciidoc @@ -10,7 +10,10 @@ a character has said the given quote, and then we will update the `times` field. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { await client.index({ @@ -54,7 +57,10 @@ With the update API, you can also run a partial update of a document. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { await client.index({ diff --git a/docs/examples/update_by_query.asciidoc b/docs/examples/update_by_query.asciidoc index fdb198aec..d17b5c455 100644 --- a/docs/examples/update_by_query.asciidoc +++ b/docs/examples/update_by_query.asciidoc @@ -10,7 +10,10 @@ property or some other online mapping change. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { await client.index({ diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index f83d29144..b78f79399 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -27,7 +27,10 @@ const { createReadStream } = require('fs') const split = require('split2') const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const result = await client.helpers.bulk({ datasource: createReadStream('./dataset.ndjson').pipe(split()), onDocument (doc) { @@ -248,7 +251,10 @@ const { createReadStream } = require('fs') const split = require('split2') const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const b = client.helpers.bulk({ datasource: createReadStream('./dataset.ndjson').pipe(split()), onDocument (doc) { @@ -304,7 +310,10 @@ async function * generator () { } } -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const result = await client.helpers.bulk({ datasource: generator(), onDocument (doc) { @@ -338,7 +347,10 @@ sources. ---- const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const m = client.helpers.msearch() m.search( @@ -427,7 +439,10 @@ running will not be stopped. ---- const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) const m = client.helpers.msearch() m.search( diff --git a/docs/introduction.asciidoc b/docs/introduction.asciidoc index 5d8ba3cbe..83885d09e 100644 --- a/docs/introduction.asciidoc +++ b/docs/introduction.asciidoc @@ -25,7 +25,10 @@ about the features of the client. 'use strict' const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) async function run () { // Let's start by indexing some data @@ -116,8 +119,14 @@ Require the packages from your code by using the alias you have defined. const { Client: Client6 } = require('es6') const { Client: Client7 } = require('es7') -const client6 = new Client6({ node: 'http://localhost:9200/' }) -const client7 = new Client7({ node: 'http://localhost:9201/' }) +const client6 = new Client6({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const client7 = new Client7({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) client6.info().then(console.log, console.log) client7.info().then(console.log, console.log) diff --git a/docs/observability.asciidoc b/docs/observability.asciidoc index 44307f68b..c5e4d380c 100644 --- a/docs/observability.asciidoc +++ b/docs/observability.asciidoc @@ -34,7 +34,10 @@ response and error that is happening during the use of the client. ---- const logger = require('my-logger')() const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) client.diagnostic.on('response', (err, result) => { if (err) { @@ -183,7 +186,10 @@ handle this problem. [source,js] ---- const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) client.diagnostic.on('request', (err, result) => { const { id } = result.meta.request @@ -213,7 +219,8 @@ By default the id is an incremental integer, but you can configure it with the ---- const { Client } = require('@elastic/elasticsearch') const client = new Client({ - node: 'http://localhost:9200/', + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, // it takes two parameters, the request parameters and options generateRequestId: function (params, options) { // your id generation logic @@ -246,7 +253,10 @@ can do that via the `context` option of a request: [source,js] ---- const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) client.diagnostic.on('request', (err, result) => { const { id } = result.meta.request @@ -280,7 +290,8 @@ merged, and the API level object will take precedence. ---- const { Client } = require('@elastic/elasticsearch') const client = new Client({ - node: 'http://localhost:9200/', + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, context: { winter: 'is coming' } }) @@ -321,7 +332,8 @@ options help you in this regard. ---- const { Client } = require('@elastic/elasticsearch') const client = new Client({ - node: 'http://localhost:9200/', + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, name: 'parent-client' // default to 'elasticsearch-js' }) @@ -377,7 +389,8 @@ resulting header will be `{ 'X-Opaque-Id': 'my-search' }`. ---- const { Client } = require('@elastic/elasticsearch') const client = new Client({ - node: 'http://localhost:9200/' + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } }) client.search({ @@ -398,7 +411,8 @@ doing this, the client offers a top-level configuration option: ---- const { Client } = require('@elastic/elasticsearch') const client = new Client({ - node: 'http://localhost:9200/', + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, opaqueIdPrefix: 'proxy-client::' }) diff --git a/docs/testing.asciidoc b/docs/testing.asciidoc index 34778ba06..35b937474 100644 --- a/docs/testing.asciidoc +++ b/docs/testing.asciidoc @@ -61,7 +61,8 @@ const Mock = require('@elastic/elasticsearch-mock') const mock = new Mock() const client = new Client({ - node: 'http://localhost:9200/', + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, Connection: mock.getConnection() }) diff --git a/docs/transport.asciidoc b/docs/transport.asciidoc index 308e7098e..5096616ea 100644 --- a/docs/transport.asciidoc +++ b/docs/transport.asciidoc @@ -6,7 +6,8 @@ errors, it also handles sniffing. [source,js] ---- -const { Client, Transport } = require('@elastic/elasticsearch') +const { Client } = require('@elastic/elasticsearch') +const { Transport } = require('@elastic/transport') class MyTransport extends Transport { request (params, options, callback) { diff --git a/docs/typescript.asciidoc b/docs/typescript.asciidoc index 85e61d780..86ca67389 100644 --- a/docs/typescript.asciidoc +++ b/docs/typescript.asciidoc @@ -14,7 +14,10 @@ and others may contain some errors, but we are continuously pushing fixes & impr ---- import { Client } from '@elastic/elasticsearch' -const client = new Client({ node: 'http://localhost:9200/' }) +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) interface Document { character: string From 96b5b8eabac91519685e76069d87560db023912b Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 30 Mar 2022 13:34:36 +0200 Subject: [PATCH 161/647] More lenient parameter checks (#1662) --- src/client.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client.ts b/src/client.ts index 92650608a..0c12c1968 100644 --- a/src/client.ts +++ b/src/client.ts @@ -186,15 +186,15 @@ export default class Client extends API { maxCompressedResponseSize: null }, opts) - if (options.caFingerprint !== null && isHttpConnection(opts.node ?? opts.nodes)) { + if (options.caFingerprint != null && isHttpConnection(opts.node ?? opts.nodes)) { throw new errors.ConfigurationError('You can\'t configure the caFingerprint with a http connection') } - if (options.maxResponseSize !== null && options.maxResponseSize > buffer.constants.MAX_STRING_LENGTH) { + if (options.maxResponseSize != null && options.maxResponseSize > buffer.constants.MAX_STRING_LENGTH) { throw new errors.ConfigurationError(`The maxResponseSize cannot be bigger than ${buffer.constants.MAX_STRING_LENGTH}`) } - if (options.maxCompressedResponseSize !== null && options.maxCompressedResponseSize > buffer.constants.MAX_LENGTH) { + if (options.maxCompressedResponseSize != null && options.maxCompressedResponseSize > buffer.constants.MAX_LENGTH) { throw new errors.ConfigurationError(`The maxCompressedResponseSize cannot be bigger than ${buffer.constants.MAX_LENGTH}`) } From 57426c968b478ed00db8783b7044680816ecd6e6 Mon Sep 17 00:00:00 2001 From: delvedor Date: Wed, 30 Mar 2022 13:39:47 +0200 Subject: [PATCH 162/647] Bump versions --- .ci/test-matrix.yml | 2 +- package.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 94bfd779a..66c19bf6d 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - 8.2.0-SNAPSHOT + - 8.3.0-SNAPSHOT NODE_JS_VERSION: - 16 diff --git a/package.json b/package.json index 32f3747a6..6d6d86115 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.2.0", - "versionCanary": "8.2.0-canary.2", + "version": "8.3.0", + "versionCanary": "8.3.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 8604da555fdc15871d4398af8499d1cf33f5d2b1 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 31 Mar 2022 09:37:50 +0200 Subject: [PATCH 163/647] Drop Node.js v12 (#1670) --- .ci/packer_cache.sh | 4 ++-- .ci/run-repository.sh | 2 +- .ci/test-matrix.yml | 2 +- .github/workflows/nodejs.yml | 2 +- README.md | 7 ++++--- docs/installation.asciidoc | 6 +++++- package.json | 2 +- 7 files changed, 15 insertions(+), 10 deletions(-) diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 97903810d..6316fd91e 100644 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -2,9 +2,9 @@ source /usr/local/bin/bash_standard_lib.sh -DOCKER_IMAGES="node:16-alpine +DOCKER_IMAGES="node:17-alpine +node:16-alpine node:14-alpine -node:12-alpine " for di in ${DOCKER_IMAGES} diff --git a/.ci/run-repository.sh b/.ci/run-repository.sh index 846abfc26..9ba13f5df 100755 --- a/.ci/run-repository.sh +++ b/.ci/run-repository.sh @@ -9,7 +9,7 @@ script_path=$(dirname $(realpath -s $0)) source $script_path/functions/imports.sh set -euo pipefail -NODE_JS_VERSION=${NODE_JS_VERSION-12} +NODE_JS_VERSION=${NODE_JS_VERSION-16} ELASTICSEARCH_URL=${ELASTICSEARCH_URL-"$elasticsearch_url"} elasticsearch_container=${elasticsearch_container-} diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 66c19bf6d..b569c7dab 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -3,9 +3,9 @@ STACK_VERSION: - 8.3.0-SNAPSHOT NODE_JS_VERSION: + - 17 - 16 - 14 - - 12 TEST_SUITE: - free diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 1bb004ad1..fce16c6f3 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -9,7 +9,7 @@ jobs: strategy: matrix: - node-version: [12.x, 14.x, 16.x] + node-version: [14.x, 16.x, 17.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: diff --git a/README.md b/README.md index 5210226fc..2ff6817e2 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ npm install @elastic/elasticsearch ### Node.js support -NOTE: The minimum supported version of Node.js is `v12`. +NOTE: The minimum supported version of Node.js is `v14`. The client versioning follows the Elastic Stack versioning, this means that major, minor, and patch releases are done following a precise schedule that @@ -44,8 +44,9 @@ of `^7.10.0`). | Node.js Version | Node.js EOL date | End of support | | --------------- |------------------| ---------------------- | | `8.x` | `December 2019` | `7.11` (early 2021) | -| `10.x` | `April 2021` | `7.12` (mid 2021) | -| `12.x` | `April 2022` | `8.2` (early 2022) | +| `10.x` | `April 2021` | `7.12` (mid 2021) | +| `12.x` | `April 2022` | `8.2` (early 2022) | +| `14.x` | `April 2023` | `8.8` (early 2023) | ### Compatibility diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc index bd8684aba..b04a1a1cd 100644 --- a/docs/installation.asciidoc +++ b/docs/installation.asciidoc @@ -24,7 +24,7 @@ To learn more about the supported major versions, please refer to the [[nodejs-support]] === Node.js support -NOTE: The minimum supported version of Node.js is `v12`. +NOTE: The minimum supported version of Node.js is `v14`. The client versioning follows the {stack} versioning, this means that major, minor, and patch releases are done following a precise schedule that @@ -60,6 +60,10 @@ of `^7.10.0`). |`12.x` |April 2022 |`8.2` (early 2022) + +|`14.x` +|April 2023 +|`8.8` (early 2023) |=== [discrete] diff --git a/package.json b/package.json index 6d6d86115..c2b4681c4 100644 --- a/package.json +++ b/package.json @@ -45,7 +45,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "engines": { - "node": ">=12" + "node": ">=14" }, "devDependencies": { "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", From c4e793ca71c3678050365175f4e9cac3bb62ce37 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 12 Apr 2022 10:33:54 +0200 Subject: [PATCH 164/647] Update TypeScript docs and export estypes (#1675) * Update docs * Update issue template * Export estypes --- .github/ISSUE_TEMPLATE/regression.md | 1 + docs/typescript.asciidoc | 11 +++++++++++ index.d.ts | 2 ++ 3 files changed, 14 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/regression.md b/.github/ISSUE_TEMPLATE/regression.md index b3c6c2606..7984b3129 100644 --- a/.github/ISSUE_TEMPLATE/regression.md +++ b/.github/ISSUE_TEMPLATE/regression.md @@ -51,5 +51,6 @@ Paste the results here: - *node version*: 6,8,10 - `@elastic/elasticsearch` *version*: >=7.0.0 +- *typescript version*: 4.x (if applicable) - *os*: Mac, Windows, Linux - *any other relevant information* diff --git a/docs/typescript.asciidoc b/docs/typescript.asciidoc index 86ca67389..becaf488b 100644 --- a/docs/typescript.asciidoc +++ b/docs/typescript.asciidoc @@ -7,6 +7,10 @@ of type definitions of Elasticsearch's API surface. The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g. EQL), and others may contain some errors, but we are continuously pushing fixes & improvements. +NOTE: The client is developed against the https://www.npmjs.com/package/typescript?activeTab=versions[latest] +version of TypeScript. Furthermore, unless you have set `skipLibCheck` to `true`, +you should configure `esModuleInterop` to `true`. + [discrete] ==== Example @@ -77,3 +81,10 @@ You can import the full TypeScript requests & responses definitions as it follow ---- import { estypes } from '@elastic/elasticsearch' ---- + +If you need the legacy definitions with the body, you can do the following: + +[source,ts] +---- +import { estypesWithBody } from '@elastic/elasticsearch' +---- \ No newline at end of file diff --git a/index.d.ts b/index.d.ts index 8fb595c75..2fbbb3652 100644 --- a/index.d.ts +++ b/index.d.ts @@ -21,5 +21,7 @@ import Client from './lib/client' import SniffingTransport from './lib/sniffingTransport' export * from '@elastic/transport' +export * as estypes from './lib/api/types' +export * as estypesWithBody from './lib/api/types' export { Client, SniffingTransport } export type { ClientOptions, NodeOptions } from './lib/client' From c58e93a77a468ac534a0a7ab7c0bc36ab10d1c0d Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 3 May 2022 19:31:42 +0200 Subject: [PATCH 165/647] Updated dependencies (#1684) * Updated dependencies * Updated dependencies --- .ci/test-matrix.yml | 2 +- .github/workflows/nodejs.yml | 4 ++-- package.json | 35 +++++++++++++++++------------------ 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index b569c7dab..6b491ee9c 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -3,7 +3,7 @@ STACK_VERSION: - 8.3.0-SNAPSHOT NODE_JS_VERSION: - - 17 + - 18 - 16 - 14 diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index fce16c6f3..b81f89dec 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -9,7 +9,7 @@ jobs: strategy: matrix: - node-version: [14.x, 16.x, 17.x] + node-version: [14.x, 16.x, 18.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: @@ -176,7 +176,7 @@ jobs: strategy: matrix: - node-version: [14.x] + node-version: [16.x] steps: - uses: actions/checkout@v2 diff --git a/package.json b/package.json index c2b4681c4..cc973e5ac 100644 --- a/package.json +++ b/package.json @@ -49,39 +49,38 @@ }, "devDependencies": { "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", - "@types/debug": "^4.1.6", + "@types/debug": "^4.1.7", "@types/ms": "^0.7.31", - "@types/node": "^16.4.1", - "@types/sinonjs__fake-timers": "^6.0.3", + "@types/node": "^17.0.31", + "@types/sinonjs__fake-timers": "^8.1.2", "@types/split2": "^3.2.1", "@types/stoppable": "^1.1.1", - "@types/tap": "^15.0.5", + "@types/tap": "^15.0.7", "cross-zip": "^4.0.0", "fast-deep-equal": "^3.1.3", - "into-stream": "^6.0.0", + "into-stream": "^7.0.0", "js-yaml": "^4.1.0", "license-checker": "^25.0.1", - "minimist": "^1.2.5", + "minimist": "^1.2.6", "ms": "^2.1.3", - "node-abort-controller": "^2.0.0", - "node-fetch": "^2.6.2", - "ora": "^5.4.1", + "node-abort-controller": "^3.0.1", + "node-fetch": "^2.6.7", + "ora": "^6.1.0", "proxy": "^1.0.2", "rimraf": "^3.0.2", - "semver": "^7.3.5", - "split2": "^3.2.2", - "standard": "^16.0.3", + "semver": "^7.3.7", + "split2": "^4.1.0", "stoppable": "^1.1.0", - "tap": "^15.0.9", - "ts-node": "^10.1.0", - "ts-standard": "^10.0.0", - "typescript": "^4.3.5", + "tap": "^16.1.0", + "ts-node": "^10.7.0", + "ts-standard": "^11.0.0", + "typescript": "^4.6.4", "workq": "^3.0.0", "xmlbuilder2": "^3.0.2" }, "dependencies": { - "@elastic/transport": "^8.0.2", - "tslib": "^2.3.0" + "@elastic/transport": "^8.2.0", + "tslib": "^2.4.0" }, "tap": { "ts": true, From 651165d842e813a8be5f93846d5cc8578f852708 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 3 May 2022 19:37:44 +0200 Subject: [PATCH 166/647] Updated changelog.asciidoc (#1683) --- docs/changelog.asciidoc | 49 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 52c2bf673..ba5ec954f 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,55 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.2.0 + +[discrete] +==== Breaking changes + +[discrete] +===== Drop Node.js v12 https://github.com/elastic/elasticsearch-js/pull/1670[#1670] + +According to our https://github.com/elastic/elasticsearch-js#nodejs-support[Node.js support matrix]. + +[discrete] +==== Features + +[discrete] +===== Support for Elasticsearch `v8.2` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.2/release-notes-8.2.0.html[here]. + +[discrete] +===== More lenient parameter checks https://github.com/elastic/elasticsearch-js/pull/1662[#1662] + +When creating a new client, an `undefined` `caFingerprint` no longer trigger an error for a http connection. + +[discrete] +===== Update TypeScript docs and export estypes https://github.com/elastic/elasticsearch-js/pull/1675[#1675] + +You can import the full TypeScript requests & responses definitions as it follows: +[source,ts] +---- +import { estypes } from '@elastic/elasticsearch' +---- + +If you need the legacy definitions with the body, you can do the following: + +[source,ts] +---- +import { estypesWithBody } from '@elastic/elasticsearch' +---- + +[discrete] +==== Fixes + +[discrete] +===== Updated hpagent to the latest version https://github.com/elastic/elastic-transport-js/pull/49[transport/#49] + +You can fing the related changes https://github.com/delvedor/hpagent/releases/tag/v1.0.0[here]. + [discrete] === 8.1.0 From 911af982b236d6ac969f0a25648fde07c4670625 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Fri, 6 May 2022 15:15:19 +0200 Subject: [PATCH 167/647] Fix integration test (#1689) --- README.md | 2 +- package.json | 2 +- test/integration/index.js | 11 ++++++++++ test/integration/test-runner.js | 39 +++++++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2ff6817e2..1912c49ed 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ # Elasticsearch Node.js client -[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Belasticsearch-js%2Bmain)](https://clients-ci.elastic.co/view/Javascript/job/elastic+elasticsearch-js+main/) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) +[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Belasticsearch-js%2Bmain)](https://clients-ci.elastic.co/view/JavaScript/job/elastic+elasticsearch-js+main/) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) The official Node.js client for Elasticsearch. diff --git a/package.json b/package.json index cc973e5ac..259cd7c9b 100644 --- a/package.json +++ b/package.json @@ -65,7 +65,7 @@ "ms": "^2.1.3", "node-abort-controller": "^3.0.1", "node-fetch": "^2.6.7", - "ora": "^6.1.0", + "ora": "^5.4.1", "proxy": "^1.0.2", "rimraf": "^3.0.2", "semver": "^7.3.7", diff --git a/test/integration/index.js b/test/integration/index.js index 02073cbb3..c794beb6c 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -43,6 +43,10 @@ const MAX_FILE_TIME = 1000 * 30 const MAX_TEST_TIME = 1000 * 3 const freeSkips = { + // not supported yet + '/free/cluster.desired_nodes/10_basic.yml': ['*'], + '/free/health/30_feature.yml': ['*'], + '/free/health/40_useractions.yml': ['*'], // the v8 client never sends the scroll_id in querystgring, // the way the test is structured causes a security exception 'free/scroll/10_basic.yml': ['Body params override query string'], @@ -63,13 +67,17 @@ const freeSkips = { // the expected error is returning a 503, // which triggers a retry and the node to be marked as dead 'search.aggregation/240_max_buckets.yml': ['*'], + // long values and json do not play nicely together + 'search.aggregation/40_range.yml': ['Min and max long range bounds'], // the yaml runner assumes that null means "does not exists", // while null is a valid json value, so the check will fail 'search/320_disallow_queries.yml': ['Test disallow expensive queries'], 'free/tsdb/90_unsupported_operations.yml': ['noop update'] } const platinumBlackList = { + 'api_key/10_basic.yml': ['Test get api key'], 'api_key/20_query.yml': ['*'], + 'api_key/11_invalidation.yml': ['Test invalidate api key by realm name'], 'analytics/histogram.yml': ['Histogram requires values in increasing order'], // this two test cases are broken, we should // return on those in the future. @@ -107,6 +115,7 @@ const platinumBlackList = { // Investigate why is failing 'ml/inference_crud.yml': ['*'], 'ml/categorization_agg.yml': ['Test categorization aggregation with poor settings'], + 'ml/filter_crud.yml': ['*'], // investigate why this is failing 'monitoring/bulk/10_basic.yml': ['*'], 'monitoring/bulk/20_privileges.yml': ['*'], @@ -119,6 +128,8 @@ const platinumBlackList = { 'service_accounts/10_basic.yml': ['*'], // we are setting two certificates in the docker config 'ssl/10_basic.yml': ['*'], + 'token/10_basic.yml': ['*'], + 'token/11_invalidation.yml': ['*'], // very likely, the index template has not been loaded yet. // we should run a indices.existsTemplate, but the name of the // template may vary during time. diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index a1a92a981..4a5e279f4 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -188,6 +188,45 @@ function build (opts = {}) { client, 'tasks.cancel', tasks.map(id => ({ task_id: id })) ) + + // cleanup ml + const jobsList = await client.ml.getJobs() + const jobsIds = jobsList.jobs.map(j => j.job_id) + await helper.runInParallel( + client, 'ml.deleteJob', + jobsIds.map(j => ({ job_id: j, force: true })) + ) + + const dataFrame = await client.ml.getDataFrameAnalytics() + const dataFrameIds = dataFrame.data_frame_analytics.map(d => d.id) + await helper.runInParallel( + client, 'ml.deleteDataFrameAnalytics', + dataFrameIds.map(d => ({ id: d, force: true })) + ) + + const calendars = await client.ml.getCalendars() + const calendarsId = calendars.calendars.map(c => c.calendar_id) + await helper.runInParallel( + client, 'ml.deleteCalendar', + calendarsId.map(c => ({ calendar_id: c })) + ) + + const training = await client.ml.getTrainedModels() + const trainingId = training.trained_model_configs + .filter(t => t.created_by !== '_xpack') + .map(t => t.model_id) + await helper.runInParallel( + client, 'ml.deleteTrainedModel', + trainingId.map(t => ({ model_id: t, force: true })) + ) + + // cleanup transforms + const transforms = await client.transform.getTransform() + const transformsId = transforms.transforms.map(t => t.id) + await helper.runInParallel( + client, 'transform.deleteTransform', + transformsId.map(t => ({ transform_id: t, force: true })) + ) } const shutdownNodes = await client.shutdown.getNode() From 697b594ea29acdc4b42dd73c3de5f979a807d9fa Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Thu, 19 May 2022 18:23:24 +0200 Subject: [PATCH 168/647] Add make.sh workflows (#1696) --- .ci/Dockerfile | 2 +- .ci/make.mjs | 125 ++++++++++++++++++++++++++++++ .ci/make.sh | 180 ++++++++++++++++++++++++++++++++++++++++++++ .ci/test-matrix.yml | 2 +- package.json | 6 +- 5 files changed, 311 insertions(+), 4 deletions(-) create mode 100644 .ci/make.mjs create mode 100755 .ci/make.sh diff --git a/.ci/Dockerfile b/.ci/Dockerfile index 9e3716246..81f8ae8f2 100644 --- a/.ci/Dockerfile +++ b/.ci/Dockerfile @@ -1,4 +1,4 @@ -ARG NODE_JS_VERSION=10 +ARG NODE_JS_VERSION=16 FROM node:${NODE_JS_VERSION} # Create app directory diff --git a/.ci/make.mjs b/.ci/make.mjs new file mode 100644 index 000000000..1695a4869 --- /dev/null +++ b/.ci/make.mjs @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* global $ argv */ + +'use strict' + +import 'zx/globals' + +import { readFile, writeFile } from 'fs/promises' +import assert from 'assert' +import { join } from 'desm' +import semver from 'semver' + +assert(typeof argv.task === 'string', 'Missing task parameter') + +switch (argv.task) { + case 'release': + release(argv._).catch(onError) + break + case 'bump': + bump(argv._).catch(onError) + break + case 'codegen': + codegen(argv._).catch(onError) + break + default: + console.log(`Unknown task: ${argv.task}`) + process.exit(1) +} + +async function release (args) { + assert(args.length === 2, 'Release task expects two parameters') + let [version, outputFolder] = args + + if (process.env.WORKFLOW === 'snapshot' && !version.endsWith('SNAPSHOT')) { + version = `${version}-SNAPSHOT` + } + + await bump([version]) + + const packageJson = JSON.parse(await readFile( + join(import.meta.url, '..', 'package.json'), + 'utf8' + )) + + await $`npm run build` + await $`npm pack` + await $`zip elasticsearch-js-${version}.zip elastic-elasticsearch-${packageJson.version}.tgz` + await $`rm elastic-elasticsearch-${packageJson.version}.tgz` + await $`mv ${join(import.meta.url, '..', `elasticsearch-js-${version}.zip`)} ${join(import.meta.url, '..', outputFolder, `elasticsearch-js-${version}.zip`)}` +} + +async function bump (args) { + assert(args.length === 1, 'Bump task expects one parameter') + const [version] = args + const packageJson = JSON.parse(await readFile( + join(import.meta.url, '..', 'package.json'), + 'utf8' + )) + + const cleanVersion = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version) + assert(semver.valid(cleanVersion)) + packageJson.version = cleanVersion + packageJson.versionCanary = `${cleanVersion}-canary.0` + + await writeFile( + join(import.meta.url, '..', 'package.json'), + JSON.stringify(packageJson, null, 2), + 'utf8' + ) + + const testMatrix = await readFile(join(import.meta.url, 'test-matrix.yml'), 'utf8') + await writeFile( + join(import.meta.url, 'test-matrix.yml'), + testMatrix.replace(/STACK_VERSION:\s+\- "[0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?"/, `STACK_VERSION:\n - "${cleanVersion}-SNAPSHOT"`), // eslint-disable-line + 'utf8' + ) +} + +// this command can only be executed locally for now +async function codegen (args) { + assert(args.length === 1, 'Bump task expects one parameter') + const clientGeneratorPath = join(import.meta.url, '..', '..', 'elastic-client-generator-js') + const [version] = args + + const isGeneratorCloned = await $`[[ -d ${clientGeneratorPath} ]]`.exitCode === 0 + assert(isGeneratorCloned, 'You must clone the elastic-client-generator-js first') + + await $`npm install --prefix ${clientGeneratorPath}` + // this command will take a while! + if (version === 'main') { + await $`npm run elasticsearch --prefix ${clientGeneratorPath} -- --version main` + } else { + await $`npm run elasticsearch --prefix ${clientGeneratorPath} -- --version ${version.split('.').slice(0, 2).join('.')}` + } + await $`npm run lint --prefix ${clientGeneratorPath}` + + await $`rm -rf ${join(import.meta.url, '..', 'src', 'api')}` + await $`mkdir ${join(import.meta.url, '..', 'src', 'api')}` + await $`cp -R ${join(import.meta.url, '..', '..', 'elastic-client-generator-js', 'output')}/* ${join(import.meta.url, '..', 'src', 'api')}` + await $`mv ${join(import.meta.url, '..', 'src', 'api', 'reference.asciidoc')} ${join(import.meta.url, '..', 'docs', 'reference.asciidoc')}` + await $`npm run build` +} + +function onError (err) { + console.log(err) + process.exit(1) +} diff --git a/.ci/make.sh b/.ci/make.sh new file mode 100755 index 000000000..39755599f --- /dev/null +++ b/.ci/make.sh @@ -0,0 +1,180 @@ +#!/usr/bin/env bash + +# ------------------------------------------------------- # +# +# Skeleton for common build entry script for all elastic +# clients. Needs to be adapted to individual client usage. +# +# Must be called: ./.ci/make.sh +# +# Version: 1.1.0 +# +# Targets: +# --------------------------- +# assemble : build client artefacts with version +# bump : bump client internals to version +# codegen : generate endpoints +# docsgen : generate documentation +# examplegen : generate the doc examples +# clean : clean workspace +# +# ------------------------------------------------------- # + +# ------------------------------------------------------- # +# Bootstrap +# ------------------------------------------------------- # + +script_path=$(dirname "$(realpath -s "$0")") +repo=$(realpath "$script_path/../") +generator=$(realpath "$script_path/../../elastic-client-generator-js") + +# shellcheck disable=SC1090 +CMD=$1 +TASK=$1 +TASK_ARGS=() +VERSION=$2 +STACK_VERSION=$VERSION +NODE_JS_VERSION=16 +WORKFLOW=${WORKFLOW-staging} +set -euo pipefail + +product="elastic/elasticsearch-js" +output_folder=".ci/output" +OUTPUT_DIR="$repo/${output_folder}" +REPO_BINDING="${OUTPUT_DIR}:/sln/${output_folder}" +mkdir -p "$OUTPUT_DIR" + +echo -e "\033[34;1mINFO:\033[0m PRODUCT ${product}\033[0m" +echo -e "\033[34;1mINFO:\033[0m VERSION ${STACK_VERSION}\033[0m" +echo -e "\033[34;1mINFO:\033[0m OUTPUT_DIR ${OUTPUT_DIR}\033[0m" + +# ------------------------------------------------------- # +# Parse Command +# ------------------------------------------------------- # + +case $CMD in + clean) + echo -e "\033[36;1mTARGET: clean workspace $output_folder\033[0m" + rm -rf "$output_folder" + echo -e "\033[32;1mdone.\033[0m" + exit 0 + ;; + assemble) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: assemble -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: assemble artefact $VERSION\033[0m" + TASK=release + TASK_ARGS=("$VERSION" "$output_folder") + ;; + codegen) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: codegen -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: codegen API v$VERSION\033[0m" + TASK=codegen + # VERSION is BRANCH here for now + TASK_ARGS=("$VERSION") + ;; + docsgen) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: docsgen -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: generate docs for $VERSION\033[0m" + TASK=codegen + # VERSION is BRANCH here for now + TASK_ARGS=("$VERSION" "$codegen_folder") + ;; + examplesgen) + echo -e "\033[36;1mTARGET: generate examples\033[0m" + TASK=codegen + # VERSION is BRANCH here for now + TASK_ARGS=("$VERSION" "$codegen_folder") + ;; + bump) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: bump -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: bump to version $VERSION\033[0m" + TASK=bump + # VERSION is BRANCH here for now + TASK_ARGS=("$VERSION") + ;; + *) + echo -e "\nUsage:\n\t $CMD is not supported right now\n" + exit 1 +esac + + +# ------------------------------------------------------- # +# Build Container +# ------------------------------------------------------- # + +echo -e "\033[34;1mINFO: building $product container\033[0m" + +docker build \ + --file .ci/Dockerfile \ + --tag ${product} \ + --build-arg NODE_JS_VERSION=${NODE_JS_VERSION} \ + --build-arg USER_ID="$(id -u)" \ + --build-arg GROUP_ID="$(id -g)" \ + . + +# ------------------------------------------------------- # +# Run the Container +# ------------------------------------------------------- # + +echo -e "\033[34;1mINFO: running $product container\033[0m" + +docker run \ + --volume $repo:/usr/src/app \ + --volume $generator:/usr/src/elastic-client-generator-js \ + --volume /usr/src/app/node_modules \ + --env "WORKFLOW=${WORKFLOW}" \ + --name make-elasticsearch-js \ + --rm \ + $product \ + node .ci/make.mjs --task $TASK ${TASK_ARGS[*]} + +# ------------------------------------------------------- # +# Post Command tasks & checks +# ------------------------------------------------------- # + +if [[ "$CMD" == "assemble" ]]; then + if compgen -G ".ci/output/*" > /dev/null; then + echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m" + exit 1 + fi +fi + +if [[ "$CMD" == "bump" ]]; then + if [ -n "$(git status --porcelain)" ]; then + echo -e "\033[32;1mTARGET: successfully bumped client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: failed bumped client v$VERSION\033[0m" + exit 1 + fi +fi + +if [[ "$CMD" == "codegen" ]]; then + if [ -n "$(git status --porcelain)" ]; then + echo -e "\033[32;1mTARGET: successfully generated client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: failed generating client v$VERSION\033[0m" + exit 1 + fi +fi + +if [[ "$CMD" == "docsgen" ]]; then + echo "TODO" +fi + +if [[ "$CMD" == "examplesgen" ]]; then + echo "TODO" +fi diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 6b491ee9c..d29ab7347 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - 8.3.0-SNAPSHOT + - "8.3.0-SNAPSHOT" NODE_JS_VERSION: - 18 diff --git a/package.json b/package.json index 259cd7c9b..7141a2eb1 100644 --- a/package.json +++ b/package.json @@ -57,6 +57,7 @@ "@types/stoppable": "^1.1.1", "@types/tap": "^15.0.7", "cross-zip": "^4.0.0", + "desm": "^1.2.0", "fast-deep-equal": "^3.1.3", "into-stream": "^7.0.0", "js-yaml": "^4.1.0", @@ -76,7 +77,8 @@ "ts-standard": "^11.0.0", "typescript": "^4.6.4", "workq": "^3.0.0", - "xmlbuilder2": "^3.0.2" + "xmlbuilder2": "^3.0.2", + "zx": "^6.1.0" }, "dependencies": { "@elastic/transport": "^8.2.0", @@ -89,4 +91,4 @@ "coverage": false, "check-coverage": false } -} +} \ No newline at end of file From 27748779c6403daccff1bcb0ebd73ebbec88d38d Mon Sep 17 00:00:00 2001 From: delvedor Date: Fri, 20 May 2022 14:28:42 +0200 Subject: [PATCH 169/647] Update CI conf --- ...search-js+8.0.yml => elastic+elasticsearch-js+8.2.yml} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename .ci/jobs/{elastic+elasticsearch-js+8.0.yml => elastic+elasticsearch-js+8.2.yml} (61%) diff --git a/.ci/jobs/elastic+elasticsearch-js+8.0.yml b/.ci/jobs/elastic+elasticsearch-js+8.2.yml similarity index 61% rename from .ci/jobs/elastic+elasticsearch-js+8.0.yml rename to .ci/jobs/elastic+elasticsearch-js+8.2.yml index 7c8f25244..2c389b017 100644 --- a/.ci/jobs/elastic+elasticsearch-js+8.0.yml +++ b/.ci/jobs/elastic+elasticsearch-js+8.2.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+8.0 - display-name: 'elastic / elasticsearch-js # 8.0' - description: Testing the elasticsearch-js 8.0 branch. + name: elastic+elasticsearch-js+8.2 + display-name: 'elastic / elasticsearch-js # 8.2' + description: Testing the elasticsearch-js 8.2 branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/8.0 + default: refs/heads/8.2 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: From 33e0873ecf44878acc8842ca8a94c953d2a63f79 Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 23 May 2022 16:33:56 +0200 Subject: [PATCH 170/647] API generation --- docs/reference.asciidoc | 22 +- src/api/api/_internal.ts | 14 +- src/api/api/async_search.ts | 16 +- src/api/api/cat.ts | 9 +- src/api/api/enrich.ts | 2 +- src/api/api/indices.ts | 26 +- src/api/api/ml.ts | 12 +- src/api/api/security.ts | 109 +++++--- src/api/api/shutdown.ts | 16 +- src/api/types.ts | 504 +++++++++++++++++++++++++++------- src/api/typesWithBodyKey.ts | 519 +++++++++++++++++++++++++++++------- 11 files changed, 970 insertions(+), 279 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index bef00bbe8..2fd0eda62 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -2384,6 +2384,8 @@ client.ml.getJobs(...) [discrete] ==== get_memory_stats Returns information on how ML is using memory. + +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html[Endpoint documentation] [source,ts] ---- client.ml.getMemoryStats(...) @@ -2450,13 +2452,13 @@ client.ml.getTrainedModelsStats(...) ---- [discrete] -==== infer_trained_model_deployment +==== infer_trained_model Evaluate a trained model. -https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model-deployment.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html[Endpoint documentation] [source,ts] ---- -client.ml.inferTrainedModelDeployment(...) +client.ml.inferTrainedModel(...) ---- [discrete] @@ -2980,6 +2982,8 @@ client.searchableSnapshots.stats(...) [discrete] ==== activate_user_profile Creates or updates the user profile on behalf of another user. + +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html[Endpoint documentation] [source,ts] ---- client.security.activateUserProfile(...) @@ -3288,6 +3292,8 @@ client.security.getUserPrivileges(...) [discrete] ==== get_user_profile Retrieves user profile for the given unique ID. + +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html[Endpoint documentation] [source,ts] ---- client.security.getUserProfile(...) @@ -3474,18 +3480,20 @@ client.security.samlServiceProviderMetadata(...) ---- [discrete] -==== search_user_profiles -Searches for user profiles that match specified criteria. +==== suggest_user_profiles +Get suggestions for user profiles that match specified search criteria. -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-search-user-profile.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html[Endpoint documentation] [source,ts] ---- -client.security.searchUserProfiles(...) +client.security.suggestUserProfiles(...) ---- [discrete] ==== update_user_profile_data Update application specific data for the user profile of the given unique ID. + +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html[Endpoint documentation] [source,ts] ---- client.security.updateUserProfileData(...) diff --git a/src/api/api/_internal.ts b/src/api/api/_internal.ts index cea5da529..581cde7af 100644 --- a/src/api/api/_internal.ts +++ b/src/api/api/_internal.ts @@ -104,8 +104,18 @@ export default class Internal { } } - const method = 'GET' - const path = '/_internal/_health' + let method = '' + let path = '' + if (params.component != null && params.feature != null) { + method = 'GET' + path = `/_internal/_health/${encodeURIComponent(params.component.toString())}/${encodeURIComponent(params.feature.toString())}` + } else if (params.component != null) { + method = 'GET' + path = `/_internal/_health/${encodeURIComponent(params.component.toString())}` + } else { + method = 'GET' + path = '/_internal/_health' + } return await this.transport.request({ path, method, querystring, body }, options) } diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 3560f488d..57bbcfa42 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -65,10 +65,10 @@ export default class AsyncSearch { return await this.transport.request({ path, method, querystring, body }, options) } - async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> - async get (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { + async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -109,10 +109,10 @@ export default class AsyncSearch { return await this.transport.request({ path, method, querystring, body }, options) } - async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> - async submit (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { + async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index a4dea6a34..74e383095 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -103,10 +103,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } - async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async componentTemplates (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -116,6 +116,7 @@ export default class Cat { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 74909ed40..2ebc67be0 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -122,7 +122,7 @@ export default class Enrich { async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['geo_match', 'match'] + const acceptedBody: string[] = ['geo_match', 'match', 'range'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 1fd863f40..cc8abbfa3 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -860,19 +860,31 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async modifyDataStream (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['actions'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 83e6932c8..b293853a7 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1158,12 +1158,12 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> - async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise - async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['docs'] + const acceptedBody: string[] = ['docs', 'inference_config'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1188,7 +1188,7 @@ export default class Ml { } const method = 'POST' - const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_infer` + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/_infer` return await this.transport.request({ path, method, querystring, body }, options) } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index d480295e3..fcde13c7a 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -43,19 +43,31 @@ export default class Security { this.transport = transport } - async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async activateUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -436,19 +448,19 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async disableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -480,19 +492,19 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async enableUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -830,19 +842,19 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getUserProfile (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1421,41 +1433,66 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } - async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async searchUserProfiles (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['name', 'size'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } const method = body != null ? 'POST' : 'GET' - const path = '/_security/profile/_search' + const path = '/_security/profile/_suggest' return await this.transport.request({ path, method, querystring, body }, options) } - async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateUserProfileData (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] + const acceptedBody: string[] = ['access', 'data'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 9ee79c1d1..c995623c7 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -100,11 +100,23 @@ export default class Shutdown { async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] + const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error diff --git a/src/api/types.ts b/src/api/types.ts index dcbfe4a37..b4cbb39a7 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -241,7 +241,7 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { requests_per_second?: long } -export type DeleteByQueryRethrottleResponse = TasksListResponse +export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { id: Id @@ -556,10 +556,21 @@ export interface MsearchMultisearchBody { aggregations?: Record aggs?: Record query?: QueryDslQueryContainer + explain?: boolean + stored_fields?: Fields + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] from?: integer size?: integer - pit?: SearchPointInTimeReference + sort?: Sort + _source?: SearchSourceConfig + terminate_after?: long + stats?: string[] + timeout?: string + track_scores?: boolean track_total_hits?: SearchTrackHits + version?: boolean + seq_no_primary_term?: boolean + pit?: SearchPointInTimeReference suggest?: SearchSuggester } @@ -572,6 +583,9 @@ export interface MsearchMultisearchHeader { request_cache?: boolean routing?: string search_type?: SearchType + ccs_minimize_roundtrips?: boolean + allow_partial_search_results?: boolean + ignore_throttled?: boolean } export interface MsearchRequest extends RequestBase { @@ -1088,12 +1102,13 @@ export interface SearchCompletionSuggestOption { collate_match?: boolean contexts?: Record fields?: Record - _id: string - _index: IndexName + _id?: string + _index?: IndexName _routing?: Routing _score?: double _source?: TDocument text: string + score?: double } export interface SearchCompletionSuggester extends SearchSuggesterBase { @@ -1153,9 +1168,9 @@ export interface SearchFieldCollapse { export interface SearchFieldSuggester { completion?: SearchCompletionSuggester phrase?: SearchPhraseSuggester + term?: SearchTermSuggester prefix?: string regex?: string - term?: SearchTermSuggester text?: string } @@ -2516,6 +2531,7 @@ export interface AggregationsAggregationContainer { geohash_grid?: AggregationsGeoHashGridAggregation geo_line?: AggregationsGeoLineAggregation geotile_grid?: AggregationsGeoTileGridAggregation + geohex_grid?: AggregationsGeohexGridAggregation global?: AggregationsGlobalAggregation histogram?: AggregationsHistogramAggregation ip_range?: AggregationsIpRangeAggregation @@ -2713,14 +2729,6 @@ export interface AggregationsChildrenAggregation extends AggregationsBucketAggre type?: RelationName } -export interface AggregationsClassificationInferenceOptions { - num_top_classes?: integer - num_top_feature_importance_values?: integer - prediction_field_type?: string - results_field?: string - top_classes_results_field?: string -} - export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { after_key?: Record } @@ -2989,6 +2997,14 @@ export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBuck export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long } +export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { + field: Field + precision?: integer + bounds?: GeoBounds + size?: integer + shard_size?: integer +} + export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys @@ -3086,8 +3102,8 @@ export interface AggregationsInferenceClassImportance { } export interface AggregationsInferenceConfigContainer { - regression?: AggregationsRegressionInferenceOptions - classification?: AggregationsClassificationInferenceOptions + regression?: MlRegressionInferenceOptions + classification?: MlClassificationInferenceOptions } export interface AggregationsInferenceFeatureImportance { @@ -3377,11 +3393,6 @@ export interface AggregationsRateAggregation extends AggregationsFormatMetricAgg export type AggregationsRateMode = 'sum' | 'value_count' -export interface AggregationsRegressionInferenceOptions { - results_field?: Field - num_top_feature_importance_values?: integer -} - export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys @@ -3448,7 +3459,8 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: string | string[] + include?: AggregationsTermsInclude + jlh?: EmptyObject min_doc_count?: long mutual_information?: AggregationsMutualInformationHeuristic percentage?: AggregationsPercentageScoreHeuristic @@ -3472,6 +3484,7 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck filter_duplicate_text?: boolean gnd?: AggregationsGoogleNormalizedDistanceHeuristic include?: string | string[] + jlh?: EmptyObject min_doc_count?: long mutual_information?: AggregationsMutualInformationHeuristic percentage?: AggregationsPercentageScoreHeuristic @@ -4477,7 +4490,7 @@ export interface MappingDenseVectorProperty extends MappingPropertyBase { index_options?: MappingDenseVectorIndexOptions } -export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingGenericProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty +export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean @@ -4542,21 +4555,6 @@ export interface MappingFloatRangeProperty extends MappingRangePropertyBase { type: 'float_range' } -export interface MappingGenericProperty extends MappingDocValuesPropertyBase { - analyzer: string - boost: double - fielddata: IndicesStringFielddata - ignore_malformed: boolean - index: boolean - index_options: MappingIndexOptions - norms: boolean - null_value: string - position_increment_gap: integer - search_analyzer: string - term_vector: MappingTermVectorOption - type: string -} - export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { @@ -4692,7 +4690,6 @@ export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | M export interface MappingPropertyBase { local_metadata?: Metadata meta?: Record - name?: PropertyName properties?: Record ignore_above?: integer dynamic?: MappingDynamicMapping @@ -4901,7 +4898,7 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -4919,7 +4916,7 @@ export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { export type QueryDslDecayFunction = QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction -export interface QueryDslDecayFunctionBase extends QueryDslScoreFunctionBase { +export interface QueryDslDecayFunctionBase { multi_value_mode?: QueryDslMultiValueMode } @@ -4962,7 +4959,7 @@ export interface QueryDslFieldLookup { export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' -export interface QueryDslFieldValueFactorScoreFunction extends QueryDslScoreFunctionBase { +export interface QueryDslFieldValueFactorScoreFunction { field: Field factor?: double missing?: double @@ -5013,7 +5010,7 @@ export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -5279,7 +5276,7 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' @@ -5409,7 +5406,7 @@ export interface QueryDslQueryStringQuery extends QueryDslQueryBase { type?: QueryDslTextQueryType } -export interface QueryDslRandomScoreFunction extends QueryDslScoreFunctionBase { +export interface QueryDslRandomScoreFunction { field?: Field seed?: long | string } @@ -5457,16 +5454,11 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { value: string } -export interface QueryDslScoreFunctionBase { - filter?: QueryDslQueryContainer - weight?: double -} - export interface QueryDslScriptQuery extends QueryDslQueryBase { script: Script } -export interface QueryDslScriptScoreFunction extends QueryDslScoreFunctionBase { +export interface QueryDslScriptScoreFunction { script: Script } @@ -5655,8 +5647,7 @@ export interface AsyncSearchGetRequest extends RequestBase { wait_for_completion_timeout?: Time } -export interface AsyncSearchGetResponse extends AsyncSearchAsyncSearchDocumentResponseBase { -} +export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AsyncSearchStatusRequest extends RequestBase { id: Id @@ -5736,8 +5727,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields } -export interface AsyncSearchSubmitResponse extends AsyncSearchAsyncSearchDocumentResponseBase { -} +export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AutoscalingAutoscalingPolicy { roles: string[] @@ -5879,6 +5869,22 @@ export interface CatAllocationRequest extends CatCatRequestBase { export type CatAllocationResponse = CatAllocationAllocationRecord[] +export interface CatComponentTemplatesComponentTemplate { + name: string + version: string + alias_count: string + mapping_count: string + settings_count: string + metadata_count: string + included_in: string +} + +export interface CatComponentTemplatesRequest extends CatCatRequestBase { + name?: string +} + +export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] + export interface CatCountCountRecord { epoch?: EpochMillis t?: EpochMillis @@ -8489,6 +8495,7 @@ export interface DanglingIndicesListDanglingIndicesResponse { export interface EnrichConfiguration { geo_match?: EnrichPolicy match: EnrichPolicy + range: EnrichPolicy } export interface EnrichPolicy { @@ -8537,6 +8544,7 @@ export interface EnrichPutPolicyRequest extends RequestBase { name: Name geo_match?: EnrichPolicy match?: EnrichPolicy + range?: EnrichPolicy } export type EnrichPutPolicyResponse = AcknowledgedResponseBase @@ -8610,8 +8618,7 @@ export interface EqlGetRequest extends RequestBase { wait_for_completion_timeout?: Time } -export interface EqlGetResponse extends EqlEqlSearchResponseBase { -} +export type EqlGetResponse = EqlEqlSearchResponseBase export interface EqlGetStatusRequest extends RequestBase { id: Id @@ -8647,8 +8654,7 @@ export interface EqlSearchRequest extends RequestBase { runtime_mappings?: MappingRuntimeFields } -export interface EqlSearchResponse extends EqlEqlSearchResponseBase { -} +export type EqlSearchResponse = EqlEqlSearchResponseBase export type EqlSearchResultPosition = 'tail' | 'head' @@ -9087,7 +9093,7 @@ export interface IndicesFielddataFrequencyFilter { min_segment_size: integer } -export type IndicesIndexCheckOnStartup = boolean | 'false' | 'checksum' | 'true' +export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum' export interface IndicesIndexRouting { allocation?: IndicesIndexRoutingAllocation @@ -9273,6 +9279,7 @@ export interface IndicesMappingLimitSettings { nested_objects?: IndicesMappingLimitSettingsNestedObjects field_name_length?: IndicesMappingLimitSettingsFieldNameLength dimension_fields?: IndicesMappingLimitSettingsDimensionFields + ignore_malformed?: boolean } export interface IndicesMappingLimitSettingsDepth { @@ -9430,13 +9437,7 @@ export interface IndicesStorage { allow_mmap?: boolean } -export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' - -export interface IndicesStringFielddata { - format: IndicesStringFielddataFormat -} - -export type IndicesStringFielddataFormat = 'paged_bytes' | 'disabled' +export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs' export interface IndicesTemplateMapping { aliases: Record @@ -9454,7 +9455,7 @@ export interface IndicesTranslog { retention?: IndicesTranslogRetention } -export type IndicesTranslogDurability = 'request' | 'async' +export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' export interface IndicesTranslogRetention { size?: ByteSize @@ -9615,7 +9616,7 @@ export interface IndicesCreateRequest extends RequestBase { export interface IndicesCreateResponse { index: IndexName shards_acknowledged: boolean - acknowledged?: boolean + acknowledged: boolean } export interface IndicesCreateDataStreamRequest extends RequestBase { @@ -9941,6 +9942,22 @@ export interface IndicesMigrateToDataStreamRequest extends RequestBase { export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase +export interface IndicesModifyDataStreamAction { + add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction + remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction +} + +export interface IndicesModifyDataStreamIndexAndDataStreamAction { + index: IndexName + data_stream: DataStreamName +} + +export interface IndicesModifyDataStreamRequest extends RequestBase { + actions: IndicesModifyDataStreamAction[] +} + +export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase + export interface IndicesOpenRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -10761,10 +10778,20 @@ export interface IngestGsubProcessor extends IngestProcessorBase { export interface IngestInferenceConfig { regression?: IngestInferenceConfigRegression + classification?: IngestInferenceConfigClassification +} + +export interface IngestInferenceConfigClassification { + num_top_classes?: integer + num_top_feature_importance_values?: integer + results_field?: Field + top_classes_results_field?: Field + prediction_field_type?: string } export interface IngestInferenceConfigRegression { - results_field: string + results_field?: Field + num_top_feature_importance_values?: integer } export interface IngestInferenceProcessor extends IngestProcessorBase { @@ -11381,6 +11408,14 @@ export interface MlChunkingConfig { export type MlChunkingMode = 'auto' | 'manual' | 'off' +export interface MlClassificationInferenceOptions { + num_top_classes?: integer + num_top_feature_importance_values?: integer + prediction_field_type?: string + results_field?: string + top_classes_results_field?: string +} + export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' export type MlCustomSettings = any @@ -11771,6 +11806,18 @@ export interface MlDiscoveryNode { export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' +export interface MlFillMaskInferenceOptions { + num_top_classes?: integer + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlFillMaskInferenceUpdateOptions { + num_top_classes?: integer + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlFilter { description?: string filter_id: Id @@ -11811,6 +11858,42 @@ export interface MlHyperparameters { export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' +export interface MlInferenceConfigCreateContainer { + regression?: MlRegressionInferenceOptions + classification?: MlClassificationInferenceOptions + text_classification?: MlTextClassificationInferenceOptions + zero_shot_classification?: MlZeroShotClassificationInferenceOptions + fill_mask?: MlFillMaskInferenceOptions + ner?: MlNerInferenceOptions + pass_through?: MlPassThroughInferenceOptions + text_embedding?: MlTextEmbeddingInferenceOptions + question_answering?: MlQuestionAnsweringInferenceOptions +} + +export interface MlInferenceConfigUpdateContainer { + regression?: MlRegressionInferenceOptions + classification?: MlClassificationInferenceOptions + text_classification?: MlTextClassificationInferenceUpdateOptions + zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions + fill_mask?: MlFillMaskInferenceUpdateOptions + ner?: MlNerInferenceUpdateOptions + pass_through?: MlPassThroughInferenceUpdateOptions + text_embedding?: MlTextEmbeddingInferenceUpdateOptions + question_answering?: MlQuestionAnsweringInferenceUpdateOptions +} + +export interface MlInferenceResponseResult { + entities?: MlTrainedModelEntities[] + is_truncated?: boolean + predicted_value?: MlPredictedValue[] + predicted_value_sequence?: string + prediction_probability?: double + prediction_score?: double + top_classes?: MlTopClassEntry[] + warning?: string + feature_importance?: MlTrainedModelInferenceFeatureImportance[] +} + export interface MlInfluence { influencer_field_name: string influencer_field_values: string[] @@ -11970,6 +12053,38 @@ export interface MlModelSnapshot { timestamp: long } +export interface MlNerInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string + classification_labels?: string[] +} + +export interface MlNerInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + +export interface MlNlpBertTokenizationConfig { + do_lower_case?: boolean + with_special_tokens?: boolean + max_sequence_length?: integer + truncate?: MlTokenizationTruncate + span?: integer +} + +export interface MlNlpRobertaTokenizationConfig { + add_prefix_space?: boolean + with_special_tokens?: boolean + max_sequence_length?: integer + truncate?: MlTokenizationTruncate + span?: integer +} + +export interface MlNlpTokenizationUpdateOptions { + truncate?: MlTokenizationTruncate + span?: integer +} + export interface MlOutlierDetectionParameters { compute_feature_influence?: boolean feature_influence_threshold?: double @@ -11998,12 +12113,42 @@ export interface MlPage { size?: integer } +export interface MlPassThroughInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlPassThroughInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlPerPartitionCategorization { enabled?: boolean stop_on_warn?: boolean } -export type MlPredictedValue = string | double +export type MlPredictedValue = string | double | boolean | integer + +export interface MlQuestionAnsweringInferenceOptions { + num_top_classes?: integer + tokenization?: MlTokenizationConfigContainer + results_field?: string + max_answer_length?: integer +} + +export interface MlQuestionAnsweringInferenceUpdateOptions { + question: string + num_top_classes?: integer + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string + max_answer_length?: integer +} + +export interface MlRegressionInferenceOptions { + results_field?: Field + num_top_feature_importance_values?: integer +} export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' @@ -12020,11 +12165,43 @@ export interface MlRunningStateSearchInterval { start_ms: long } +export interface MlTextClassificationInferenceOptions { + num_top_classes?: integer + tokenization?: MlTokenizationConfigContainer + results_field?: string + classification_labels?: string[] +} + +export interface MlTextClassificationInferenceUpdateOptions { + num_top_classes?: integer + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string + classification_labels?: string[] +} + +export interface MlTextEmbeddingInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlTextEmbeddingInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlTimingStats { elapsed_time: integer iteration_time?: integer } +export interface MlTokenizationConfigContainer { + bert?: MlNlpBertTokenizationConfig + mpnet?: MlNlpBertTokenizationConfig + roberta?: MlNlpRobertaTokenizationConfig +} + +export type MlTokenizationTruncate = 'first' | 'second' | 'none' + export interface MlTopClassEntry { class_name: string class_probability: double @@ -12077,7 +12254,7 @@ export interface MlTrainedModelConfig { description?: string estimated_heap_memory_usage_bytes?: integer estimated_operations?: integer - inference_config: AggregationsInferenceConfigContainer + inference_config: MlInferenceConfigCreateContainer input: MlTrainedModelConfigInput license_level?: string metadata?: MlTrainedModelConfigMetadata @@ -12106,14 +12283,14 @@ export interface MlTrainedModelDeploymentNodesStats { average_inference_time_ms: double error_count: integer inference_count: integer - inference_threads: integer last_access: long - model_threads: integer node: MlDiscoveryNode + number_of_allocations: integer number_of_pending_requests: integer rejection_execution_count: integer routing_state: MlTrainedModelAllocationRoutingTable start_time: long + threads_per_allocation: integer timeout_count: integer } @@ -12121,15 +12298,15 @@ export interface MlTrainedModelDeploymentStats { allocation_status: MlTrainedModelDeploymentAllocationStatus error_count: integer inference_count: integer - inference_threads: integer model_id: Id - model_threads: integer nodes: MlTrainedModelDeploymentNodesStats + number_of_allocations: integer queue_capacity: integer rejected_execution_count: integer reason: string start_time: long state: MlDeploymentState + threads_per_allocation: integer timeout_count: integer } @@ -12141,6 +12318,17 @@ export interface MlTrainedModelEntities { end_pos: integer } +export interface MlTrainedModelInferenceClassImportance { + class_name: string + importance: double +} + +export interface MlTrainedModelInferenceFeatureImportance { + feature_name: string + importance?: double + classes?: MlTrainedModelInferenceClassImportance[] +} + export interface MlTrainedModelInferenceStats { cache_miss_count: integer failure_count: integer @@ -12178,6 +12366,22 @@ export interface MlValidationLoss { loss_type: string } +export interface MlZeroShotClassificationInferenceOptions { + tokenization?: MlTokenizationConfigContainer + hypothesis_template?: string + classification_labels: string[] + results_field?: string + multi_label?: boolean + labels?: string[] +} + +export interface MlZeroShotClassificationInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string + multi_label?: boolean + labels: string[] +} + export interface MlCloseJobRequest extends RequestBase { job_id: Id allow_no_match?: boolean @@ -12590,42 +12794,43 @@ export interface MlGetJobsResponse { } export interface MlGetMemoryStatsJvmStats { - heap_max: ByteSize + heap_max?: ByteSize heap_max_in_bytes: integer - java_inference: ByteSize + java_inference?: ByteSize java_inference_in_bytes: integer - java_inference_max: ByteSize + java_inference_max?: ByteSize java_inference_max_in_bytes: integer } export interface MlGetMemoryStatsMemMlStats { - anomaly_detectors: ByteSize + anomaly_detectors?: ByteSize anomaly_detectors_in_bytes: integer - data_frame_analytics: ByteSize + data_frame_analytics?: ByteSize data_frame_analytics_in_bytes: integer - max: ByteSize + max?: ByteSize max_in_bytes: integer - native_code_overhead: ByteSize + native_code_overhead?: ByteSize native_code_overhead_in_bytes: integer - native_inference: ByteSize + native_inference?: ByteSize native_inference_in_bytes: integer } export interface MlGetMemoryStatsMemStats { - adjusted_total: ByteSize + adjusted_total?: ByteSize adjusted_total_in_bytes: integer - total: ByteSize + total?: ByteSize total_in_bytes: integer ml: MlGetMemoryStatsMemMlStats } export interface MlGetMemoryStatsMemory { - attributes: string[] + attributes: Record jvm: MlGetMemoryStatsJvmStats mem: MlGetMemoryStatsMemStats name: Name roles: string[] transport_address: TransportAddress + ephemeral_id: Id } export interface MlGetMemoryStatsRequest extends RequestBase { @@ -12637,7 +12842,7 @@ export interface MlGetMemoryStatsRequest extends RequestBase { export interface MlGetMemoryStatsResponse { _nodes: NodeStatistics - cluser_name: Name + cluster_name: Name nodes: Record } @@ -12720,20 +12925,15 @@ export interface MlGetTrainedModelsStatsResponse { trained_model_stats: MlTrainedModelStats[] } -export interface MlInferTrainedModelDeploymentRequest extends RequestBase { +export interface MlInferTrainedModelRequest extends RequestBase { model_id: Id timeout?: Time - docs: Record[] + docs: Record[] + inference_config?: MlInferenceConfigUpdateContainer } -export interface MlInferTrainedModelDeploymentResponse { - entities?: MlTrainedModelEntities[] - is_truncated?: boolean - predicted_value?: MlPredictedValue[] - predicted_value_sequence?: string - prediction_probability?: double - top_classes: MlTopClassEntry[] - warning?: string +export interface MlInferTrainedModelResponse { + inference_results: MlInferenceResponseResult[] } export interface MlInfoAnomalyDetectors { @@ -13036,7 +13236,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { compressed_definition?: string definition?: MlPutTrainedModelDefinition description?: string - inference_config: AggregationsInferenceConfigContainer + inference_config: MlInferenceConfigCreateContainer input: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType @@ -13156,9 +13356,9 @@ export interface MlStartDatafeedResponse { export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id - inference_threads?: integer - model_threads?: integer + number_of_allocations?: integer queue_capacity?: integer + threads_per_allocation?: integer timeout?: Time wait_for?: MlDeploymentAllocationState } @@ -14579,6 +14779,8 @@ export interface SecurityGlobalPrivilege { application: SecurityApplicationGlobalUserPrivileges } +export type SecurityGrantType = 'password' | 'access_token' + export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' export interface SecurityIndicesPrivileges { @@ -14642,6 +14844,41 @@ export interface SecurityUser { enabled: boolean } +export interface SecurityUserProfile { + uid: string + user: SecurityUserProfileUser + data?: Record + labels?: Record + enabled?: boolean +} + +export interface SecurityUserProfileHitMetadata { + _primary_term: long + _seq_no: SequenceNumber +} + +export interface SecurityUserProfileUser { + email?: string | null + full_name?: Name | null + metadata: Metadata + roles: string[] + username: Username +} + +export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { + last_synchronized: long + _doc?: SecurityUserProfileHitMetadata +} + +export interface SecurityActivateUserProfileRequest extends RequestBase { + access_token?: string + grant_type: SecurityGrantType + password?: string + username?: string +} + +export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata + export interface SecurityAuthenticateRequest extends RequestBase { } @@ -14828,6 +15065,13 @@ export interface SecurityDisableUserRequest extends RequestBase { export interface SecurityDisableUserResponse { } +export interface SecurityDisableUserProfileRequest extends RequestBase { + uid: string + refresh?: Refresh +} + +export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase + export interface SecurityEnableUserRequest extends RequestBase { username: Username refresh?: Refresh @@ -14836,6 +15080,13 @@ export interface SecurityEnableUserRequest extends RequestBase { export interface SecurityEnableUserResponse { } +export interface SecurityEnableUserProfileRequest extends RequestBase { + uid: string + refresh?: Refresh +} + +export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase + export interface SecurityEnrollKibanaRequest extends RequestBase { } @@ -15018,6 +15269,13 @@ export interface SecurityGetUserPrivilegesResponse { run_as: string[] } +export interface SecurityGetUserProfileRequest extends RequestBase { + uid: string + data?: string | string[] +} + +export type SecurityGetUserProfileResponse = Record + export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { @@ -15058,7 +15316,7 @@ export interface SecurityHasPrivilegesIndexPrivilegesCheck { export type SecurityHasPrivilegesPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { - user?: Name | null + user?: Name application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] @@ -15243,8 +15501,40 @@ export interface SecuritySamlServiceProviderMetadataResponse { metadata: string } +export interface SecuritySuggestUserProfilesRequest extends RequestBase { + data?: string | string[] + name?: string + size?: long +} + +export interface SecuritySuggestUserProfilesResponse { + total: SecuritySuggestUserProfilesTotalUserProfiles + took: long + profiles: SecurityUserProfile[] +} + +export interface SecuritySuggestUserProfilesTotalUserProfiles { + value: long + relation: RelationName +} + +export interface SecurityUpdateUserProfileDataRequest extends RequestBase { + uid: string + if_seq_no?: SequenceNumber + if_primary_term?: long + refresh?: Refresh + access?: Record + data?: Record +} + +export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase + +export type ShutdownType = 'restart' | 'remove' | 'replace' + export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId + master_timeout?: TimeUnit + timeout?: TimeUnit } export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase @@ -15270,6 +15560,8 @@ export interface ShutdownGetNodePluginsStatus { export interface ShutdownGetNodeRequest extends RequestBase { node_id?: NodeIds + master_timeout?: TimeUnit + timeout?: TimeUnit } export interface ShutdownGetNodeResponse { @@ -15286,6 +15578,12 @@ export type ShutdownGetNodeShutdownType = 'remove' | 'restart' export interface ShutdownPutNodeRequest extends RequestBase { node_id: NodeId + master_timeout?: TimeUnit + timeout?: TimeUnit + type: ShutdownType + reason: string + allocation_delay?: string + target_node_name?: string } export type ShutdownPutNodeResponse = AcknowledgedResponseBase @@ -15664,7 +15962,7 @@ export interface SnapshotRestoreRequest extends RequestBase { ignore_unavailable?: boolean include_aliases?: boolean include_global_state?: boolean - index_settings?: IndicesPutSettingsRequest + index_settings?: IndicesIndexSettings indices?: Indices partial?: boolean rename_pattern?: string @@ -16202,7 +16500,7 @@ export interface TransformUpdateTransformRequest extends RequestBase { source?: TransformSource settings?: TransformSettings sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer + retention_policy?: TransformRetentionPolicyContainer | null } export interface TransformUpdateTransformResponse { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index a9c2646a8..20ba75e68 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -257,7 +257,7 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { requests_per_second?: long } -export type DeleteByQueryRethrottleResponse = TasksListResponse +export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { id: Id @@ -585,10 +585,21 @@ export interface MsearchMultisearchBody { aggregations?: Record aggs?: Record query?: QueryDslQueryContainer + explain?: boolean + stored_fields?: Fields + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] from?: integer size?: integer - pit?: SearchPointInTimeReference + sort?: Sort + _source?: SearchSourceConfig + terminate_after?: long + stats?: string[] + timeout?: string + track_scores?: boolean track_total_hits?: SearchTrackHits + version?: boolean + seq_no_primary_term?: boolean + pit?: SearchPointInTimeReference suggest?: SearchSuggester } @@ -601,6 +612,9 @@ export interface MsearchMultisearchHeader { request_cache?: boolean routing?: string search_type?: SearchType + ccs_minimize_roundtrips?: boolean + allow_partial_search_results?: boolean + ignore_throttled?: boolean } export interface MsearchRequest extends RequestBase { @@ -1160,12 +1174,13 @@ export interface SearchCompletionSuggestOption { collate_match?: boolean contexts?: Record fields?: Record - _id: string - _index: IndexName + _id?: string + _index?: IndexName _routing?: Routing _score?: double _source?: TDocument text: string + score?: double } export interface SearchCompletionSuggester extends SearchSuggesterBase { @@ -1225,9 +1240,9 @@ export interface SearchFieldCollapse { export interface SearchFieldSuggester { completion?: SearchCompletionSuggester phrase?: SearchPhraseSuggester + term?: SearchTermSuggester prefix?: string regex?: string - term?: SearchTermSuggester text?: string } @@ -2616,6 +2631,7 @@ export interface AggregationsAggregationContainer { geohash_grid?: AggregationsGeoHashGridAggregation geo_line?: AggregationsGeoLineAggregation geotile_grid?: AggregationsGeoTileGridAggregation + geohex_grid?: AggregationsGeohexGridAggregation global?: AggregationsGlobalAggregation histogram?: AggregationsHistogramAggregation ip_range?: AggregationsIpRangeAggregation @@ -2813,14 +2829,6 @@ export interface AggregationsChildrenAggregation extends AggregationsBucketAggre type?: RelationName } -export interface AggregationsClassificationInferenceOptions { - num_top_classes?: integer - num_top_feature_importance_values?: integer - prediction_field_type?: string - results_field?: string - top_classes_results_field?: string -} - export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { after_key?: Record } @@ -3089,6 +3097,14 @@ export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBuck export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long } +export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { + field: Field + precision?: integer + bounds?: GeoBounds + size?: integer + shard_size?: integer +} + export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys @@ -3186,8 +3202,8 @@ export interface AggregationsInferenceClassImportance { } export interface AggregationsInferenceConfigContainer { - regression?: AggregationsRegressionInferenceOptions - classification?: AggregationsClassificationInferenceOptions + regression?: MlRegressionInferenceOptions + classification?: MlClassificationInferenceOptions } export interface AggregationsInferenceFeatureImportance { @@ -3477,11 +3493,6 @@ export interface AggregationsRateAggregation extends AggregationsFormatMetricAgg export type AggregationsRateMode = 'sum' | 'value_count' -export interface AggregationsRegressionInferenceOptions { - results_field?: Field - num_top_feature_importance_values?: integer -} - export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys @@ -3548,7 +3559,8 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc execution_hint?: AggregationsTermsAggregationExecutionHint field?: Field gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: string | string[] + include?: AggregationsTermsInclude + jlh?: EmptyObject min_doc_count?: long mutual_information?: AggregationsMutualInformationHeuristic percentage?: AggregationsPercentageScoreHeuristic @@ -3572,6 +3584,7 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck filter_duplicate_text?: boolean gnd?: AggregationsGoogleNormalizedDistanceHeuristic include?: string | string[] + jlh?: EmptyObject min_doc_count?: long mutual_information?: AggregationsMutualInformationHeuristic percentage?: AggregationsPercentageScoreHeuristic @@ -4577,7 +4590,7 @@ export interface MappingDenseVectorProperty extends MappingPropertyBase { index_options?: MappingDenseVectorIndexOptions } -export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingGenericProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty +export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean @@ -4642,21 +4655,6 @@ export interface MappingFloatRangeProperty extends MappingRangePropertyBase { type: 'float_range' } -export interface MappingGenericProperty extends MappingDocValuesPropertyBase { - analyzer: string - boost: double - fielddata: IndicesStringFielddata - ignore_malformed: boolean - index: boolean - index_options: MappingIndexOptions - norms: boolean - null_value: string - position_increment_gap: integer - search_analyzer: string - term_vector: MappingTermVectorOption - type: string -} - export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { @@ -4792,7 +4790,6 @@ export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | M export interface MappingPropertyBase { local_metadata?: Metadata meta?: Record - name?: PropertyName properties?: Record ignore_above?: integer dynamic?: MappingDynamicMapping @@ -5001,7 +4998,7 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -5019,7 +5016,7 @@ export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { export type QueryDslDecayFunction = QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction -export interface QueryDslDecayFunctionBase extends QueryDslScoreFunctionBase { +export interface QueryDslDecayFunctionBase { multi_value_mode?: QueryDslMultiValueMode } @@ -5062,7 +5059,7 @@ export interface QueryDslFieldLookup { export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' -export interface QueryDslFieldValueFactorScoreFunction extends QueryDslScoreFunctionBase { +export interface QueryDslFieldValueFactorScoreFunction { field: Field factor?: double missing?: double @@ -5113,7 +5110,7 @@ export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -5379,7 +5376,7 @@ export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode | QueryDslQueryContainer | double } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' @@ -5509,7 +5506,7 @@ export interface QueryDslQueryStringQuery extends QueryDslQueryBase { type?: QueryDslTextQueryType } -export interface QueryDslRandomScoreFunction extends QueryDslScoreFunctionBase { +export interface QueryDslRandomScoreFunction { field?: Field seed?: long | string } @@ -5557,16 +5554,11 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { value: string } -export interface QueryDslScoreFunctionBase { - filter?: QueryDslQueryContainer - weight?: double -} - export interface QueryDslScriptQuery extends QueryDslQueryBase { script: Script } -export interface QueryDslScriptScoreFunction extends QueryDslScoreFunctionBase { +export interface QueryDslScriptScoreFunction { script: Script } @@ -5755,8 +5747,7 @@ export interface AsyncSearchGetRequest extends RequestBase { wait_for_completion_timeout?: Time } -export interface AsyncSearchGetResponse extends AsyncSearchAsyncSearchDocumentResponseBase { -} +export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AsyncSearchStatusRequest extends RequestBase { id: Id @@ -5853,8 +5844,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { } } -export interface AsyncSearchSubmitResponse extends AsyncSearchAsyncSearchDocumentResponseBase { -} +export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AutoscalingAutoscalingPolicy { roles: string[] @@ -5997,6 +5987,22 @@ export interface CatAllocationRequest extends CatCatRequestBase { export type CatAllocationResponse = CatAllocationAllocationRecord[] +export interface CatComponentTemplatesComponentTemplate { + name: string + version: string + alias_count: string + mapping_count: string + settings_count: string + metadata_count: string + included_in: string +} + +export interface CatComponentTemplatesRequest extends CatCatRequestBase { + name?: string +} + +export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] + export interface CatCountCountRecord { epoch?: EpochMillis t?: EpochMillis @@ -8631,6 +8637,7 @@ export interface DanglingIndicesListDanglingIndicesResponse { export interface EnrichConfiguration { geo_match?: EnrichPolicy match: EnrichPolicy + range: EnrichPolicy } export interface EnrichPolicy { @@ -8681,6 +8688,7 @@ export interface EnrichPutPolicyRequest extends RequestBase { body?: { geo_match?: EnrichPolicy match?: EnrichPolicy + range?: EnrichPolicy } } @@ -8755,8 +8763,7 @@ export interface EqlGetRequest extends RequestBase { wait_for_completion_timeout?: Time } -export interface EqlGetResponse extends EqlEqlSearchResponseBase { -} +export type EqlGetResponse = EqlEqlSearchResponseBase export interface EqlGetStatusRequest extends RequestBase { id: Id @@ -8798,8 +8805,7 @@ export interface EqlSearchRequest extends RequestBase { } } -export interface EqlSearchResponse extends EqlEqlSearchResponseBase { -} +export type EqlSearchResponse = EqlEqlSearchResponseBase export type EqlSearchResultPosition = 'tail' | 'head' @@ -9268,7 +9274,7 @@ export interface IndicesFielddataFrequencyFilter { min_segment_size: integer } -export type IndicesIndexCheckOnStartup = boolean | 'false' | 'checksum' | 'true' +export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum' export interface IndicesIndexRouting { allocation?: IndicesIndexRoutingAllocation @@ -9454,6 +9460,7 @@ export interface IndicesMappingLimitSettings { nested_objects?: IndicesMappingLimitSettingsNestedObjects field_name_length?: IndicesMappingLimitSettingsFieldNameLength dimension_fields?: IndicesMappingLimitSettingsDimensionFields + ignore_malformed?: boolean } export interface IndicesMappingLimitSettingsDepth { @@ -9611,13 +9618,7 @@ export interface IndicesStorage { allow_mmap?: boolean } -export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' - -export interface IndicesStringFielddata { - format: IndicesStringFielddataFormat -} - -export type IndicesStringFielddataFormat = 'paged_bytes' | 'disabled' +export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs' export interface IndicesTemplateMapping { aliases: Record @@ -9635,7 +9636,7 @@ export interface IndicesTranslog { retention?: IndicesTranslogRetention } -export type IndicesTranslogDurability = 'request' | 'async' +export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' export interface IndicesTranslogRetention { size?: ByteSize @@ -9805,7 +9806,7 @@ export interface IndicesCreateRequest extends RequestBase { export interface IndicesCreateResponse { index: IndexName shards_acknowledged: boolean - acknowledged?: boolean + acknowledged: boolean } export interface IndicesCreateDataStreamRequest extends RequestBase { @@ -10131,6 +10132,25 @@ export interface IndicesMigrateToDataStreamRequest extends RequestBase { export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase +export interface IndicesModifyDataStreamAction { + add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction + remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction +} + +export interface IndicesModifyDataStreamIndexAndDataStreamAction { + index: IndexName + data_stream: DataStreamName +} + +export interface IndicesModifyDataStreamRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + actions: IndicesModifyDataStreamAction[] + } +} + +export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase + export interface IndicesOpenRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -10984,10 +11004,20 @@ export interface IngestGsubProcessor extends IngestProcessorBase { export interface IngestInferenceConfig { regression?: IngestInferenceConfigRegression + classification?: IngestInferenceConfigClassification +} + +export interface IngestInferenceConfigClassification { + num_top_classes?: integer + num_top_feature_importance_values?: integer + results_field?: Field + top_classes_results_field?: Field + prediction_field_type?: string } export interface IngestInferenceConfigRegression { - results_field: string + results_field?: Field + num_top_feature_importance_values?: integer } export interface IngestInferenceProcessor extends IngestProcessorBase { @@ -11614,6 +11644,14 @@ export interface MlChunkingConfig { export type MlChunkingMode = 'auto' | 'manual' | 'off' +export interface MlClassificationInferenceOptions { + num_top_classes?: integer + num_top_feature_importance_values?: integer + prediction_field_type?: string + results_field?: string + top_classes_results_field?: string +} + export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' export type MlCustomSettings = any @@ -12004,6 +12042,18 @@ export interface MlDiscoveryNode { export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' +export interface MlFillMaskInferenceOptions { + num_top_classes?: integer + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlFillMaskInferenceUpdateOptions { + num_top_classes?: integer + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlFilter { description?: string filter_id: Id @@ -12044,6 +12094,42 @@ export interface MlHyperparameters { export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' +export interface MlInferenceConfigCreateContainer { + regression?: MlRegressionInferenceOptions + classification?: MlClassificationInferenceOptions + text_classification?: MlTextClassificationInferenceOptions + zero_shot_classification?: MlZeroShotClassificationInferenceOptions + fill_mask?: MlFillMaskInferenceOptions + ner?: MlNerInferenceOptions + pass_through?: MlPassThroughInferenceOptions + text_embedding?: MlTextEmbeddingInferenceOptions + question_answering?: MlQuestionAnsweringInferenceOptions +} + +export interface MlInferenceConfigUpdateContainer { + regression?: MlRegressionInferenceOptions + classification?: MlClassificationInferenceOptions + text_classification?: MlTextClassificationInferenceUpdateOptions + zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions + fill_mask?: MlFillMaskInferenceUpdateOptions + ner?: MlNerInferenceUpdateOptions + pass_through?: MlPassThroughInferenceUpdateOptions + text_embedding?: MlTextEmbeddingInferenceUpdateOptions + question_answering?: MlQuestionAnsweringInferenceUpdateOptions +} + +export interface MlInferenceResponseResult { + entities?: MlTrainedModelEntities[] + is_truncated?: boolean + predicted_value?: MlPredictedValue[] + predicted_value_sequence?: string + prediction_probability?: double + prediction_score?: double + top_classes?: MlTopClassEntry[] + warning?: string + feature_importance?: MlTrainedModelInferenceFeatureImportance[] +} + export interface MlInfluence { influencer_field_name: string influencer_field_values: string[] @@ -12203,6 +12289,38 @@ export interface MlModelSnapshot { timestamp: long } +export interface MlNerInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string + classification_labels?: string[] +} + +export interface MlNerInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + +export interface MlNlpBertTokenizationConfig { + do_lower_case?: boolean + with_special_tokens?: boolean + max_sequence_length?: integer + truncate?: MlTokenizationTruncate + span?: integer +} + +export interface MlNlpRobertaTokenizationConfig { + add_prefix_space?: boolean + with_special_tokens?: boolean + max_sequence_length?: integer + truncate?: MlTokenizationTruncate + span?: integer +} + +export interface MlNlpTokenizationUpdateOptions { + truncate?: MlTokenizationTruncate + span?: integer +} + export interface MlOutlierDetectionParameters { compute_feature_influence?: boolean feature_influence_threshold?: double @@ -12231,12 +12349,42 @@ export interface MlPage { size?: integer } +export interface MlPassThroughInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlPassThroughInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlPerPartitionCategorization { enabled?: boolean stop_on_warn?: boolean } -export type MlPredictedValue = string | double +export type MlPredictedValue = string | double | boolean | integer + +export interface MlQuestionAnsweringInferenceOptions { + num_top_classes?: integer + tokenization?: MlTokenizationConfigContainer + results_field?: string + max_answer_length?: integer +} + +export interface MlQuestionAnsweringInferenceUpdateOptions { + question: string + num_top_classes?: integer + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string + max_answer_length?: integer +} + +export interface MlRegressionInferenceOptions { + results_field?: Field + num_top_feature_importance_values?: integer +} export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' @@ -12253,11 +12401,43 @@ export interface MlRunningStateSearchInterval { start_ms: long } +export interface MlTextClassificationInferenceOptions { + num_top_classes?: integer + tokenization?: MlTokenizationConfigContainer + results_field?: string + classification_labels?: string[] +} + +export interface MlTextClassificationInferenceUpdateOptions { + num_top_classes?: integer + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string + classification_labels?: string[] +} + +export interface MlTextEmbeddingInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlTextEmbeddingInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlTimingStats { elapsed_time: integer iteration_time?: integer } +export interface MlTokenizationConfigContainer { + bert?: MlNlpBertTokenizationConfig + mpnet?: MlNlpBertTokenizationConfig + roberta?: MlNlpRobertaTokenizationConfig +} + +export type MlTokenizationTruncate = 'first' | 'second' | 'none' + export interface MlTopClassEntry { class_name: string class_probability: double @@ -12310,7 +12490,7 @@ export interface MlTrainedModelConfig { description?: string estimated_heap_memory_usage_bytes?: integer estimated_operations?: integer - inference_config: AggregationsInferenceConfigContainer + inference_config: MlInferenceConfigCreateContainer input: MlTrainedModelConfigInput license_level?: string metadata?: MlTrainedModelConfigMetadata @@ -12339,14 +12519,14 @@ export interface MlTrainedModelDeploymentNodesStats { average_inference_time_ms: double error_count: integer inference_count: integer - inference_threads: integer last_access: long - model_threads: integer node: MlDiscoveryNode + number_of_allocations: integer number_of_pending_requests: integer rejection_execution_count: integer routing_state: MlTrainedModelAllocationRoutingTable start_time: long + threads_per_allocation: integer timeout_count: integer } @@ -12354,15 +12534,15 @@ export interface MlTrainedModelDeploymentStats { allocation_status: MlTrainedModelDeploymentAllocationStatus error_count: integer inference_count: integer - inference_threads: integer model_id: Id - model_threads: integer nodes: MlTrainedModelDeploymentNodesStats + number_of_allocations: integer queue_capacity: integer rejected_execution_count: integer reason: string start_time: long state: MlDeploymentState + threads_per_allocation: integer timeout_count: integer } @@ -12374,6 +12554,17 @@ export interface MlTrainedModelEntities { end_pos: integer } +export interface MlTrainedModelInferenceClassImportance { + class_name: string + importance: double +} + +export interface MlTrainedModelInferenceFeatureImportance { + feature_name: string + importance?: double + classes?: MlTrainedModelInferenceClassImportance[] +} + export interface MlTrainedModelInferenceStats { cache_miss_count: integer failure_count: integer @@ -12411,6 +12602,22 @@ export interface MlValidationLoss { loss_type: string } +export interface MlZeroShotClassificationInferenceOptions { + tokenization?: MlTokenizationConfigContainer + hypothesis_template?: string + classification_labels: string[] + results_field?: string + multi_label?: boolean + labels?: string[] +} + +export interface MlZeroShotClassificationInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string + multi_label?: boolean + labels: string[] +} + export interface MlCloseJobRequest extends RequestBase { job_id: Id allow_no_match?: boolean @@ -12876,42 +13083,43 @@ export interface MlGetJobsResponse { } export interface MlGetMemoryStatsJvmStats { - heap_max: ByteSize + heap_max?: ByteSize heap_max_in_bytes: integer - java_inference: ByteSize + java_inference?: ByteSize java_inference_in_bytes: integer - java_inference_max: ByteSize + java_inference_max?: ByteSize java_inference_max_in_bytes: integer } export interface MlGetMemoryStatsMemMlStats { - anomaly_detectors: ByteSize + anomaly_detectors?: ByteSize anomaly_detectors_in_bytes: integer - data_frame_analytics: ByteSize + data_frame_analytics?: ByteSize data_frame_analytics_in_bytes: integer - max: ByteSize + max?: ByteSize max_in_bytes: integer - native_code_overhead: ByteSize + native_code_overhead?: ByteSize native_code_overhead_in_bytes: integer - native_inference: ByteSize + native_inference?: ByteSize native_inference_in_bytes: integer } export interface MlGetMemoryStatsMemStats { - adjusted_total: ByteSize + adjusted_total?: ByteSize adjusted_total_in_bytes: integer - total: ByteSize + total?: ByteSize total_in_bytes: integer ml: MlGetMemoryStatsMemMlStats } export interface MlGetMemoryStatsMemory { - attributes: string[] + attributes: Record jvm: MlGetMemoryStatsJvmStats mem: MlGetMemoryStatsMemStats name: Name roles: string[] transport_address: TransportAddress + ephemeral_id: Id } export interface MlGetMemoryStatsRequest extends RequestBase { @@ -12923,7 +13131,7 @@ export interface MlGetMemoryStatsRequest extends RequestBase { export interface MlGetMemoryStatsResponse { _nodes: NodeStatistics - cluser_name: Name + cluster_name: Name nodes: Record } @@ -13032,23 +13240,18 @@ export interface MlGetTrainedModelsStatsResponse { trained_model_stats: MlTrainedModelStats[] } -export interface MlInferTrainedModelDeploymentRequest extends RequestBase { +export interface MlInferTrainedModelRequest extends RequestBase { model_id: Id timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - docs: Record[] + docs: Record[] + inference_config?: MlInferenceConfigUpdateContainer } } -export interface MlInferTrainedModelDeploymentResponse { - entities?: MlTrainedModelEntities[] - is_truncated?: boolean - predicted_value?: MlPredictedValue[] - predicted_value_sequence?: string - prediction_probability?: double - top_classes: MlTopClassEntry[] - warning?: string +export interface MlInferTrainedModelResponse { + inference_results: MlInferenceResponseResult[] } export interface MlInfoAnomalyDetectors { @@ -13382,7 +13585,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { compressed_definition?: string definition?: MlPutTrainedModelDefinition description?: string - inference_config: AggregationsInferenceConfigContainer + inference_config: MlInferenceConfigCreateContainer input: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType @@ -13519,9 +13722,9 @@ export interface MlStartDatafeedResponse { export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id - inference_threads?: integer - model_threads?: integer + number_of_allocations?: integer queue_capacity?: integer + threads_per_allocation?: integer timeout?: Time wait_for?: MlDeploymentAllocationState } @@ -14981,6 +15184,8 @@ export interface SecurityGlobalPrivilege { application: SecurityApplicationGlobalUserPrivileges } +export type SecurityGrantType = 'password' | 'access_token' + export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' export interface SecurityIndicesPrivileges { @@ -15044,6 +15249,44 @@ export interface SecurityUser { enabled: boolean } +export interface SecurityUserProfile { + uid: string + user: SecurityUserProfileUser + data?: Record + labels?: Record + enabled?: boolean +} + +export interface SecurityUserProfileHitMetadata { + _primary_term: long + _seq_no: SequenceNumber +} + +export interface SecurityUserProfileUser { + email?: string | null + full_name?: Name | null + metadata: Metadata + roles: string[] + username: Username +} + +export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { + last_synchronized: long + _doc?: SecurityUserProfileHitMetadata +} + +export interface SecurityActivateUserProfileRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + access_token?: string + grant_type: SecurityGrantType + password?: string + username?: string + } +} + +export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata + export interface SecurityAuthenticateRequest extends RequestBase { } @@ -15236,6 +15479,13 @@ export interface SecurityDisableUserRequest extends RequestBase { export interface SecurityDisableUserResponse { } +export interface SecurityDisableUserProfileRequest extends RequestBase { + uid: string + refresh?: Refresh +} + +export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase + export interface SecurityEnableUserRequest extends RequestBase { username: Username refresh?: Refresh @@ -15244,6 +15494,13 @@ export interface SecurityEnableUserRequest extends RequestBase { export interface SecurityEnableUserResponse { } +export interface SecurityEnableUserProfileRequest extends RequestBase { + uid: string + refresh?: Refresh +} + +export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase + export interface SecurityEnrollKibanaRequest extends RequestBase { } @@ -15429,6 +15686,13 @@ export interface SecurityGetUserPrivilegesResponse { run_as: string[] } +export interface SecurityGetUserProfileRequest extends RequestBase { + uid: string + data?: string | string[] +} + +export type SecurityGetUserProfileResponse = Record + export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { @@ -15472,7 +15736,7 @@ export interface SecurityHasPrivilegesIndexPrivilegesCheck { export type SecurityHasPrivilegesPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { - user?: Name | null + user?: Name /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] @@ -15695,8 +15959,46 @@ export interface SecuritySamlServiceProviderMetadataResponse { metadata: string } +export interface SecuritySuggestUserProfilesRequest extends RequestBase { + data?: string | string[] + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + name?: string + size?: long + } +} + +export interface SecuritySuggestUserProfilesResponse { + total: SecuritySuggestUserProfilesTotalUserProfiles + took: long + profiles: SecurityUserProfile[] +} + +export interface SecuritySuggestUserProfilesTotalUserProfiles { + value: long + relation: RelationName +} + +export interface SecurityUpdateUserProfileDataRequest extends RequestBase { + uid: string + if_seq_no?: SequenceNumber + if_primary_term?: long + refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + access?: Record + data?: Record + } +} + +export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase + +export type ShutdownType = 'restart' | 'remove' | 'replace' + export interface ShutdownDeleteNodeRequest extends RequestBase { node_id: NodeId + master_timeout?: TimeUnit + timeout?: TimeUnit } export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase @@ -15722,6 +16024,8 @@ export interface ShutdownGetNodePluginsStatus { export interface ShutdownGetNodeRequest extends RequestBase { node_id?: NodeIds + master_timeout?: TimeUnit + timeout?: TimeUnit } export interface ShutdownGetNodeResponse { @@ -15738,6 +16042,15 @@ export type ShutdownGetNodeShutdownType = 'remove' | 'restart' export interface ShutdownPutNodeRequest extends RequestBase { node_id: NodeId + master_timeout?: TimeUnit + timeout?: TimeUnit + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + type: ShutdownType + reason: string + allocation_delay?: string + target_node_name?: string + } } export type ShutdownPutNodeResponse = AcknowledgedResponseBase @@ -16130,7 +16443,7 @@ export interface SnapshotRestoreRequest extends RequestBase { ignore_unavailable?: boolean include_aliases?: boolean include_global_state?: boolean - index_settings?: IndicesPutSettingsRequest + index_settings?: IndicesIndexSettings indices?: Indices partial?: boolean rename_pattern?: string @@ -16687,7 +17000,7 @@ export interface TransformUpdateTransformRequest extends RequestBase { source?: TransformSource settings?: TransformSettings sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer + retention_policy?: TransformRetentionPolicyContainer | null } } From be54dcd30168db9bc6ac9287298485ed1e0cbefb Mon Sep 17 00:00:00 2001 From: delvedor Date: Mon, 23 May 2022 16:36:35 +0200 Subject: [PATCH 171/647] Bumped v8.3.0-canary.1 --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 7141a2eb1..31a865fe0 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", "version": "8.3.0", - "versionCanary": "8.3.0-canary.0", + "versionCanary": "8.3.0-canary.1", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", @@ -91,4 +91,4 @@ "coverage": false, "check-coverage": false } -} \ No newline at end of file +} From 24890fd11d87c12fd6275c62f9b37dfd6cf1401a Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Wed, 25 May 2022 18:15:47 +0200 Subject: [PATCH 172/647] Changelog for 8.2.1 (#1703) --- docs/changelog.asciidoc | 29 +++++++++++++++++++++++++++++ src/helpers.ts | 1 + 2 files changed, 30 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index ba5ec954f..9d0d1c2dd 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,35 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.2.1 + +[discrete] +==== Fixes + +[discrete] +===== Support for Elasticsearch `v8.2.1` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.2/release-notes-8.2.1.html[here]. + +[discrete] +===== Fix ndjson APIs https://github.com/elastic/elasticsearch-js/pull/1688[#1688] + +The previous release contained a bug that broken ndjson APIs. +We have released `v8.2.0-patch.1` to address this. +This fix is the same as the one we have released and we storngly recommend upgrading to this version. + +[discrete] +===== Fix node shutdown apis https://github.com/elastic/elasticsearch-js/pull/1697[#1697] + +The shutdown APIs wheren't complete, this fix completes them. + +[discrete] +==== Types: move query keys to body https://github.com/elastic/elasticsearch-js/pull/1693[#1693] + +The types definitions where wrongly representing the types of fields present in both query and body. + [discrete] === 8.2.0 diff --git a/src/helpers.ts b/src/helpers.ts index a96de9c7f..7df2016f2 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -19,6 +19,7 @@ /* eslint-disable @typescript-eslint/naming-convention */ /* eslint-disable @typescript-eslint/promise-function-async */ +/* eslint-disable @typescript-eslint/no-unnecessary-type-assertion */ import assert from 'assert' import { promisify } from 'util' From 0a510b15f9b785eaf327521f2c2d59f7e6a2ee66 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 21 Jun 2022 15:49:45 -0500 Subject: [PATCH 173/647] Fix typo in changelog Fixes a typo: `storngly` -> `strongly` Co-authored-by: Christiane (Tina) Heiligers --- docs/changelog.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 9d0d1c2dd..626313bbf 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -18,7 +18,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.2/release-notes-8.2.1. The previous release contained a bug that broken ndjson APIs. We have released `v8.2.0-patch.1` to address this. -This fix is the same as the one we have released and we storngly recommend upgrading to this version. +This fix is the same as the one we have released and we strongly recommend upgrading to this version. [discrete] ===== Fix node shutdown apis https://github.com/elastic/elasticsearch-js/pull/1697[#1697] From 282c76fb6a8260fd7f7d04165e8c65c79ae1627c Mon Sep 17 00:00:00 2001 From: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> Date: Fri, 24 Jun 2022 16:59:43 -0400 Subject: [PATCH 174/647] Change 'current' to 'master' in user profile API links --- docs/reference.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 2fd0eda62..146c5053e 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -2983,7 +2983,7 @@ client.searchableSnapshots.stats(...) ==== activate_user_profile Creates or updates the user profile on behalf of another user. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html[Endpoint documentation] [source,ts] ---- client.security.activateUserProfile(...) @@ -3293,7 +3293,7 @@ client.security.getUserPrivileges(...) ==== get_user_profile Retrieves user profile for the given unique ID. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html[Endpoint documentation] [source,ts] ---- client.security.getUserProfile(...) @@ -3493,7 +3493,7 @@ client.security.suggestUserProfiles(...) ==== update_user_profile_data Update application specific data for the user profile of the given unique ID. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html[Endpoint documentation] [source,ts] ---- client.security.updateUserProfileData(...) From 4ebffbc0e895cec77beb3c585ea46b41391edd4c Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 28 Jul 2022 22:48:46 +0930 Subject: [PATCH 175/647] Bumps to version 8.5.0 --- .ci/test-matrix.yml | 2 +- package.json | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index d29ab7347..0ab8cb1d8 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - "8.3.0-SNAPSHOT" + - "8.5.0-SNAPSHOT" NODE_JS_VERSION: - 18 diff --git a/package.json b/package.json index 31a865fe0..7895eb0b6 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.3.0", - "versionCanary": "8.3.0-canary.1", + "version": "8.5.0", + "versionCanary": "8.5.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", @@ -91,4 +91,4 @@ "coverage": false, "check-coverage": false } -} +} \ No newline at end of file From 8f9ed67fda9f8887275a809b46dec23a358c7753 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Mon, 15 Aug 2022 10:40:56 -0500 Subject: [PATCH 176/647] Update APIs to 8.5.0-SNAPSHOT --- docs/reference.asciidoc | 74 +- src/api/api/_internal.ts | 143 --- src/api/api/async_search.ts | 5 +- src/api/api/ml.ts | 32 +- src/api/api/search.ts | 5 +- src/api/api/security.ts | 94 +- src/api/index.ts | 8 - src/api/types.ts | 1951 +++++++++++++++++++---------------- src/api/typesWithBodyKey.ts | 1939 +++++++++++++++++----------------- 9 files changed, 2222 insertions(+), 2029 deletions(-) delete mode 100644 src/api/api/_internal.ts diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 146c5053e..521987720 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -2081,6 +2081,16 @@ client.migration.postFeatureUpgrade(...) [discrete] === ml +[discrete] +==== clear_trained_model_deployment_cache +Clear the cached results from a trained model deployment + +https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html[Endpoint documentation] +[source,ts] +---- +client.ml.clearTrainedModelDeploymentCache(...) +---- + [discrete] ==== close_job Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. @@ -2980,23 +2990,21 @@ client.searchableSnapshots.stats(...) [discrete] === security [discrete] -==== activate_user_profile -Creates or updates the user profile on behalf of another user. +==== authenticate +Enables authentication as a user and retrieve information about the authenticated user. -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html[Endpoint documentation] [source,ts] ---- -client.security.activateUserProfile(...) +client.security.authenticate(...) ---- [discrete] -==== authenticate -Enables authentication as a user and retrieve information about the authenticated user. - -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html[Endpoint documentation] +==== bulk_update_api_keys +Updates the attributes of multiple existing API keys. [source,ts] ---- -client.security.authenticate(...) +client.security.bulkUpdateApiKeys(...) ---- [discrete] @@ -3139,16 +3147,6 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-dis client.security.disableUser(...) ---- -[discrete] -==== disable_user_profile -Disables a user profile so it's not visible in user profile searches. - -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.disableUserProfile(...) ----- - [discrete] ==== enable_user Enables users in the native realm. @@ -3159,16 +3157,6 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ena client.security.enableUser(...) ---- -[discrete] -==== enable_user_profile -Enables a user profile so it's visible in user profile searches. - -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.enableUserProfile(...) ----- - [discrete] ==== enroll_kibana Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. @@ -3289,16 +3277,6 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get client.security.getUserPrivileges(...) ---- -[discrete] -==== get_user_profile -Retrieves user profile for the given unique ID. - -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.getUserProfile(...) ----- - [discrete] ==== grant_api_key Creates an API key on behalf of another user. @@ -3480,23 +3458,11 @@ client.security.samlServiceProviderMetadata(...) ---- [discrete] -==== suggest_user_profiles -Get suggestions for user profiles that match specified search criteria. - -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.suggestUserProfiles(...) ----- - -[discrete] -==== update_user_profile_data -Update application specific data for the user profile of the given unique ID. - -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html[Endpoint documentation] +==== update_api_key +Updates attributes of an existing API key. [source,ts] ---- -client.security.updateUserProfileData(...) +client.security.updateApiKey(...) ---- [discrete] diff --git a/src/api/api/_internal.ts b/src/api/api/_internal.ts deleted file mode 100644 index 581cde7af..000000000 --- a/src/api/api/_internal.ts +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint-disable import/export */ -/* eslint-disable @typescript-eslint/no-misused-new */ -/* eslint-disable @typescript-eslint/no-extraneous-class */ -/* eslint-disable @typescript-eslint/no-unused-vars */ - -// This file was automatically generated by elastic/elastic-client-generator-js -// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, -// and elastic/elastic-client-generator-js to regenerate this file again. - -import { - Transport, - TransportRequestOptions, - TransportRequestOptionsWithMeta, - TransportRequestOptionsWithOutMeta, - TransportResult -} from '@elastic/transport' -import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } - -export default class Internal { - transport: Transport - constructor (transport: Transport) { - this.transport = transport - } - - async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined - - params = params ?? {} - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - querystring[key] = params[key] - } - } - - const method = 'DELETE' - const path = '/_internal/desired_nodes' - return await this.transport.request({ path, method, querystring, body }, options) - } - - async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined - - params = params ?? {} - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - querystring[key] = params[key] - } - } - - const method = 'GET' - const path = '/_internal/desired_nodes/_latest' - return await this.transport.request({ path, method, querystring, body }, options) - } - - async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined - - params = params ?? {} - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - querystring[key] = params[key] - } - } - - let method = '' - let path = '' - if (params.component != null && params.feature != null) { - method = 'GET' - path = `/_internal/_health/${encodeURIComponent(params.component.toString())}/${encodeURIComponent(params.feature.toString())}` - } else if (params.component != null) { - method = 'GET' - path = `/_internal/_health/${encodeURIComponent(params.component.toString())}` - } else { - method = 'GET' - path = '/_internal/_health' - } - return await this.transport.request({ path, method, querystring, body }, options) - } - - async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateDesiredNodes (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['history_id', 'version'] - const querystring: Record = {} - const body = undefined - - params = params ?? {} - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - querystring[key] = params[key] - } - } - - const method = 'PUT' - const path = `/_internal/desired_nodes/${encodeURIComponent(params.history_id.toString())}/${encodeURIComponent(params.version.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) - } -} diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 57bbcfa42..a2ea00bcc 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -114,7 +114,7 @@ export default class AsyncSearch { async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -130,7 +130,8 @@ export default class AsyncSearch { if (acceptedBody.includes(key)) { body = body ?? {} // @ts-expect-error - if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { + if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line + // @ts-expect-error querystring[key] = params[key] } else { // @ts-expect-error diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index b293853a7..78edd3629 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -43,6 +43,28 @@ export default class Ml { this.transport = transport } + async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/cache/_clear` + return await this.transport.request({ path, method, querystring, body }, options) + } + async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise @@ -967,19 +989,19 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } - async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getModelSnapshotUpgradeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 0ab920b9e..0f68a72f2 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -42,7 +42,7 @@ export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -58,7 +58,8 @@ export default async function SearchApi + async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/api_key/_bulk_update' + return await this.transport.request({ path, method, querystring, body }, options) + } + async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise @@ -940,6 +962,40 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['uids', 'privileges'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/profile/_has_privileges' + return await this.transport.request({ path, method, querystring, body }, options) + } + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise @@ -1438,7 +1494,7 @@ export default class Security { async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['name', 'size'] + const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1468,12 +1524,46 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['role_descriptors', 'metadata'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/api_key/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] - const acceptedBody: string[] = ['access', 'data'] + const acceptedBody: string[] = ['labels', 'data'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/index.ts b/src/api/index.ts index 62e786c69..c4f253984 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -26,7 +26,6 @@ // DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, // and elastic/elastic-client-generator-js to regenerate this file again. -import InternalApi from './api/_internal' import AsyncSearchApi from './api/async_search' import AutoscalingApi from './api/autoscaling' import bulkApi from './api/bulk' @@ -106,7 +105,6 @@ import XpackApi from './api/xpack' export default interface API { new(): API - Internal: InternalApi asyncSearch: AsyncSearchApi autoscaling: AutoscalingApi bulk: typeof bulkApi @@ -185,7 +183,6 @@ export default interface API { xpack: XpackApi } -const kInternal = Symbol('Internal') const kAsyncSearch = Symbol('AsyncSearch') const kAutoscaling = Symbol('Autoscaling') const kCat = Symbol('Cat') @@ -221,7 +218,6 @@ const kWatcher = Symbol('Watcher') const kXpack = Symbol('Xpack') export default class API { - [kInternal]: symbol | null [kAsyncSearch]: symbol | null [kAutoscaling]: symbol | null [kCat]: symbol | null @@ -256,7 +252,6 @@ export default class API { [kWatcher]: symbol | null [kXpack]: symbol | null constructor () { - this[kInternal] = null this[kAsyncSearch] = null this[kAutoscaling] = null this[kCat] = null @@ -338,9 +333,6 @@ API.prototype.updateByQuery = updateByQueryApi API.prototype.updateByQueryRethrottle = updateByQueryRethrottleApi Object.defineProperties(API.prototype, { - Internal: { - get () { return this[kInternal] === null ? (this[kInternal] = new InternalApi(this.transport)) : this[kInternal] } - }, asyncSearch: { get () { return this[kAsyncSearch] === null ? (this[kAsyncSearch] = new AsyncSearchApi(this.transport)) : this[kAsyncSearch] } }, diff --git a/src/api/types.ts b/src/api/types.ts index b4cbb39a7..d12e2f4c1 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -64,7 +64,7 @@ export interface BulkRequest ex _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - timeout?: Time + timeout?: Duration wait_for_active_shards?: WaitForActiveShards require_alias?: boolean operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] @@ -113,7 +113,7 @@ export interface BulkWriteOperation extends BulkOperationBase { } export interface ClearScrollRequest extends RequestBase { - scroll_id?: Ids + scroll_id?: ScrollIds } export interface ClearScrollResponse { @@ -160,7 +160,7 @@ export interface CreateRequest extends RequestBase { pipeline?: string refresh?: Refresh routing?: Routing - timeout?: Time + timeout?: Duration version?: VersionNumber version_type?: VersionType wait_for_active_shards?: WaitForActiveShards @@ -176,7 +176,7 @@ export interface DeleteRequest extends RequestBase { if_seq_no?: SequenceNumber refresh?: Refresh routing?: Routing - timeout?: Time + timeout?: Duration version?: VersionNumber version_type?: VersionType wait_for_active_shards?: WaitForActiveShards @@ -196,25 +196,25 @@ export interface DeleteByQueryRequest extends RequestBase { from?: long ignore_unavailable?: boolean lenient?: boolean - max_docs?: long preference?: string refresh?: boolean request_cache?: boolean - requests_per_second?: long + requests_per_second?: float routing?: Routing q?: string - scroll?: Time + scroll?: Duration scroll_size?: long - search_timeout?: Time + search_timeout?: Duration search_type?: SearchType - slices?: long + slices?: Slices sort?: string[] stats?: string[] terminate_after?: long - timeout?: Time + timeout?: Duration version?: boolean wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean + max_docs?: long query?: QueryDslQueryContainer slice?: SlicedScroll } @@ -228,25 +228,27 @@ export interface DeleteByQueryResponse { retries?: Retries slice_id?: integer task?: TaskId - throttled_millis?: long - throttled_until_millis?: long + throttled?: Duration + throttled_millis: DurationValue + throttled_until?: Duration + throttled_until_millis: DurationValue timed_out?: boolean - took?: long + took?: DurationValue total?: long version_conflicts?: long } export interface DeleteByQueryRethrottleRequest extends RequestBase { task_id: Id - requests_per_second?: long + requests_per_second?: float } export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { id: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type DeleteScriptResponse = AcknowledgedResponseBase @@ -386,7 +388,7 @@ export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { id: Id - master_timeout?: Time + master_timeout?: Duration } export interface GetScriptResponse { @@ -457,7 +459,7 @@ export interface IndexRequest extends RequestBase { pipeline?: string refresh?: Refresh routing?: Routing - timeout?: Time + timeout?: Duration version?: VersionNumber version_type?: VersionType wait_for_active_shards?: WaitForActiveShards @@ -555,20 +557,32 @@ export interface MsearchMultiSearchResult { export interface MsearchMultisearchBody { aggregations?: Record aggs?: Record + collapse?: SearchFieldCollapse query?: QueryDslQueryContainer explain?: boolean stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + knn?: KnnQuery from?: integer + highlight?: SearchHighlight + indices_boost?: Record[] + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + rescore?: SearchRescore | SearchRescore[] + script_fields?: Record + search_after?: SortResults size?: integer sort?: Sort _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] terminate_after?: long stats?: string[] timeout?: string track_scores?: boolean track_total_hits?: SearchTrackHits version?: boolean + runtime_mappings?: MappingRuntimeFields seq_no_primary_term?: boolean pit?: SearchPointInTimeReference suggest?: SearchSuggester @@ -581,7 +595,7 @@ export interface MsearchMultisearchHeader { index?: Indices preference?: string request_cache?: boolean - routing?: string + routing?: Routing search_type?: SearchType ccs_minimize_roundtrips?: boolean allow_partial_search_results?: boolean @@ -598,8 +612,9 @@ export interface MsearchRequest extends RequestBase { max_concurrent_searches?: long max_concurrent_shard_requests?: long pre_filter_shard_size?: long - search_type?: SearchType rest_total_hits_as_int?: boolean + routing?: Routing + search_type?: SearchType typed_keys?: boolean searches?: MsearchRequestItem[] } @@ -650,7 +665,6 @@ export interface MtermvectorsOperation { export interface MtermvectorsRequest extends RequestBase { index?: IndexName - ids?: Id[] fields?: Fields field_statistics?: boolean offsets?: boolean @@ -663,6 +677,7 @@ export interface MtermvectorsRequest extends RequestBase { version?: VersionNumber version_type?: VersionType docs?: MtermvectorsOperation[] + ids?: Id[] } export interface MtermvectorsResponse { @@ -681,7 +696,7 @@ export interface MtermvectorsTermVectorsResult { export interface OpenPointInTimeRequest extends RequestBase { index: Indices - keep_alive: Time + keep_alive: Duration ignore_unavailable?: boolean } @@ -697,8 +712,8 @@ export type PingResponse = boolean export interface PutScriptRequest extends RequestBase { id: Id context?: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration script: StoredScript } @@ -805,19 +820,20 @@ export interface ReindexDestination { } export interface ReindexRemoteSource { - connect_timeout: Time + connect_timeout?: Duration + headers?: Record host: Host - username: Username - password: Password - socket_timeout: Time + username?: Username + password?: Password + socket_timeout?: Duration } export interface ReindexRequest extends RequestBase { refresh?: boolean - requests_per_second?: long - scroll?: Time - slices?: long - timeout?: Time + requests_per_second?: float + scroll?: Duration + slices?: Slices + timeout?: Duration wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean require_alias?: boolean @@ -836,13 +852,13 @@ export interface ReindexResponse { failures?: BulkIndexByScrollFailure[] noops?: long retries?: Retries - requests_per_second?: long + requests_per_second?: float slice_id?: integer task?: TaskId - throttled_millis?: EpochMillis - throttled_until_millis?: EpochMillis + throttled_millis?: EpochTime + throttled_until_millis?: EpochTime timed_out?: boolean - took?: Time + took?: DurationValue total?: long updated?: long version_conflicts?: long @@ -870,8 +886,10 @@ export interface ReindexRethrottleReindexStatus { noops: long requests_per_second: float retries: Retries - throttled_millis: long - throttled_until_millis: long + throttled?: Duration + throttled_millis: DurationValue + throttled_until?: Duration + throttled_until_millis: DurationValue total: long updated: long version_conflicts: long @@ -883,8 +901,8 @@ export interface ReindexRethrottleReindexTask { description: string id: long node: Name - running_time_in_nanos: long - start_time_in_millis: long + running_time_in_nanos: DurationValue + start_time_in_millis: EpochTime status: ReindexRethrottleReindexStatus type: string headers: HttpHeaders @@ -892,7 +910,7 @@ export interface ReindexRethrottleReindexTask { export interface ReindexRethrottleRequest extends RequestBase { task_id: Id - requests_per_second?: long + requests_per_second?: float } export interface ReindexRethrottleResponse { @@ -928,8 +946,8 @@ export interface ScriptsPainlessExecuteResponse { export interface ScrollRequest extends RequestBase { scroll_id?: ScrollId - scroll?: Time rest_total_hits_as_int?: boolean + scroll?: Duration } export type ScrollResponse> = SearchResponseBody @@ -944,9 +962,7 @@ export interface SearchRequest extends RequestBase { ccs_minimize_roundtrips?: boolean default_operator?: QueryDslOperator df?: string - docvalue_fields?: Fields expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean @@ -956,34 +972,27 @@ export interface SearchRequest extends RequestBase { pre_filter_shard_size?: long request_cache?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType - stats?: string[] - stored_fields?: Fields suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long suggest_text?: string - terminate_after?: long - timeout?: Time - track_total_hits?: SearchTrackHits - track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean - version?: boolean - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - seq_no_primary_term?: boolean q?: string - size?: integer - from?: integer - sort?: string | string[] aggregations?: Record aggs?: Record collapse?: SearchFieldCollapse + explain?: boolean + from?: integer highlight?: SearchHighlight + track_total_hits?: SearchTrackHits indices_boost?: Record[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + knn?: KnnQuery min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -991,11 +1000,21 @@ export interface SearchRequest extends RequestBase { rescore?: SearchRescore | SearchRescore[] script_fields?: Record search_after?: SortResults + size?: integer slice?: SlicedScroll + sort?: Sort + _source?: SearchSourceConfig fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester + terminate_after?: long + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: Fields pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields + stats?: string[] } export type SearchResponse> = SearchResponseBody @@ -1035,7 +1054,7 @@ export interface SearchAggregationBreakdown { export interface SearchAggregationProfile { breakdown: SearchAggregationBreakdown description: string - time_in_nanos: long + time_in_nanos: DurationValue type: string debug?: SearchAggregationProfileDebug children?: SearchAggregationProfile[] @@ -1082,7 +1101,7 @@ export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' export interface SearchCollector { name: string reason: string - time_in_nanos: long + time_in_nanos: DurationValue children?: SearchCollector[] } @@ -1095,7 +1114,7 @@ export interface SearchCompletionContext { } export interface SearchCompletionSuggest extends SearchSuggestBase { - options: SearchCompletionSuggestOption[] + options: SearchCompletionSuggestOption | SearchCompletionSuggestOption[] } export interface SearchCompletionSuggestOption { @@ -1138,7 +1157,7 @@ export interface SearchDirectGenerator { export interface SearchFetchProfile { type: string description: string - time_in_nanos: long + time_in_nanos: DurationValue breakdown: SearchFetchProfileBreakdown debug?: SearchFetchProfileDebug children?: SearchFetchProfile[] @@ -1174,51 +1193,38 @@ export interface SearchFieldSuggester { text?: string } -export interface SearchHighlight { - fields: Record - type?: SearchHighlighterType - boundary_chars?: string - boundary_max_scan?: integer - boundary_scanner?: SearchBoundaryScanner - boundary_scanner_locale?: string +export interface SearchHighlight extends SearchHighlightBase { encoder?: SearchHighlighterEncoder - fragmenter?: SearchHighlighterFragmenter - fragment_offset?: integer - fragment_size?: integer - max_fragment_length?: integer - no_match_size?: integer - number_of_fragments?: integer - order?: SearchHighlighterOrder - post_tags?: string[] - pre_tags?: string[] - require_field_match?: boolean - tags_schema?: SearchHighlighterTagsSchema - highlight_query?: QueryDslQueryContainer - max_analyzed_offset?: string | integer + fields: Record } -export interface SearchHighlightField { +export interface SearchHighlightBase { + type?: SearchHighlighterType boundary_chars?: string boundary_max_scan?: integer boundary_scanner?: SearchBoundaryScanner boundary_scanner_locale?: string - field?: Field force_source?: boolean fragmenter?: SearchHighlighterFragmenter - fragment_offset?: integer fragment_size?: integer + highlight_filter?: boolean highlight_query?: QueryDslQueryContainer - matched_fields?: Fields max_fragment_length?: integer + max_analyzed_offset?: integer no_match_size?: integer number_of_fragments?: integer + options?: Record order?: SearchHighlighterOrder phrase_limit?: integer post_tags?: string[] pre_tags?: string[] require_field_match?: boolean tags_schema?: SearchHighlighterTagsSchema - type?: SearchHighlighterType +} + +export interface SearchHighlightField extends SearchHighlightBase { + fragment_offset?: integer + matched_fields?: Fields } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1299,7 +1305,7 @@ export interface SearchNestedIdentity { } export interface SearchPhraseSuggest extends SearchSuggestBase { - options: SearchPhraseSuggestOption + options: SearchPhraseSuggestOption | SearchPhraseSuggestOption[] } export interface SearchPhraseSuggestCollate { @@ -1342,7 +1348,7 @@ export interface SearchPhraseSuggester extends SearchSuggesterBase { export interface SearchPointInTimeReference { id: Id - keep_alive?: Time + keep_alive?: Duration } export interface SearchProfile { @@ -1373,7 +1379,7 @@ export interface SearchQueryBreakdown { export interface SearchQueryProfile { breakdown: SearchQueryBreakdown description: string - time_in_nanos: long + time_in_nanos: DurationValue type: string children?: SearchQueryProfile[] } @@ -1437,11 +1443,11 @@ export interface SearchSuggestBase { } export interface SearchSuggestFuzziness { - fuzziness: Fuzziness - min_length: integer - prefix_length: integer - transpositions: boolean - unicode_aware: boolean + fuzziness?: Fuzziness + min_length?: integer + prefix_length?: integer + transpositions?: boolean + unicode_aware?: boolean } export type SearchSuggestSort = 'score' | 'frequency' @@ -1459,7 +1465,7 @@ export interface SearchSuggesterBase { } export interface SearchTermSuggest extends SearchSuggestBase { - options: SearchTermSuggestOption + options: SearchTermSuggestOption | SearchTermSuggestOption[] } export interface SearchTermSuggestOption { @@ -1498,15 +1504,15 @@ export interface SearchMvtRequest extends RequestBase { zoom: SearchMvtZoomLevel x: SearchMvtCoordinate y: SearchMvtCoordinate + aggs?: Record exact_bounds?: boolean extent?: integer + fields?: Fields grid_precision?: integer grid_type?: SearchMvtGridType - size?: integer - aggs?: Record - fields?: Fields query?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields + size?: integer sort?: Sort track_total_hits?: SearchTrackHits } @@ -1545,18 +1551,18 @@ export interface SearchTemplateRequest extends RequestBase { allow_no_indices?: boolean ccs_minimize_roundtrips?: boolean expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean preference?: string - profile?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType rest_total_hits_as_int?: boolean typed_keys?: boolean + explain?: boolean id?: Id params?: Record + profile?: boolean source?: string } @@ -1581,7 +1587,7 @@ export interface TermsEnumRequest extends RequestBase { index: IndexName field: Field size?: integer - timeout?: Time + timeout?: Duration case_insensitive?: boolean index_filter?: QueryDslQueryContainer string?: string @@ -1642,7 +1648,7 @@ export interface TermvectorsTerm { doc_freq?: integer score?: double term_freq: integer - tokens: TermvectorsToken[] + tokens?: TermvectorsToken[] ttf?: integer } @@ -1668,9 +1674,8 @@ export interface UpdateRequest require_alias?: boolean retry_on_conflict?: integer routing?: Routing - timeout?: Time + timeout?: Duration wait_for_active_shards?: WaitForActiveShards - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields detect_noop?: boolean @@ -1678,6 +1683,7 @@ export interface UpdateRequest doc_as_upsert?: boolean script?: Script scripted_upsert?: boolean + _source?: SearchSourceConfig upsert?: TDocument } @@ -1692,36 +1698,36 @@ export interface UpdateByQueryRequest extends RequestBase { allow_no_indices?: boolean analyzer?: string analyze_wildcard?: boolean - conflicts?: Conflicts default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards from?: long ignore_unavailable?: boolean lenient?: boolean - max_docs?: long pipeline?: string preference?: string refresh?: boolean request_cache?: boolean - requests_per_second?: long + requests_per_second?: float routing?: Routing - scroll?: Time + scroll?: Duration scroll_size?: long - search_timeout?: Time + search_timeout?: Duration search_type?: SearchType - slices?: long + slices?: Slices sort?: string[] stats?: string[] terminate_after?: long - timeout?: Time + timeout?: Duration version?: boolean version_type?: boolean wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean + max_docs?: long query?: QueryDslQueryContainer script?: Script slice?: SlicedScroll + conflicts?: Conflicts } export interface UpdateByQueryResponse { @@ -1733,17 +1739,19 @@ export interface UpdateByQueryResponse { retries?: Retries task?: TaskId timed_out?: boolean - took?: long + took?: DurationValue total?: long updated?: long version_conflicts?: long - throttled_millis?: ulong - throttled_until_millis?: ulong + throttled?: Duration + throttled_millis?: DurationValue + throttled_until?: Duration + throttled_until_millis?: DurationValue } export interface UpdateByQueryRethrottleRequest extends RequestBase { task_id: Id - requests_per_second?: long + requests_per_second?: float } export interface UpdateByQueryRethrottleResponse { @@ -1763,6 +1771,8 @@ export interface SpecUtilsBaseNode { transport_address: TransportAddress } +export type SpecUtilsStringified = T | string + export interface AcknowledgedResponseBase { acknowledged: boolean } @@ -1779,12 +1789,12 @@ export interface BulkIndexByScrollFailure { export interface BulkStats { total_operations: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue total_size?: ByteSize total_size_in_bytes: long - avg_time?: string - avg_time_in_millis: long + avg_time?: Duration + avg_time_in_millis: DurationValue avg_size?: ByteSize avg_size_in_bytes: long } @@ -1830,11 +1840,7 @@ export type DateFormat = string export type DateMath = string -export type DateMathTime = string - -export type DateOrEpochMillis = DateString | EpochMillis - -export type DateString = string +export type DateTime = string | EpochTime export type Distance = string @@ -1845,8 +1851,14 @@ export interface DocStats { deleted?: long } +export type Duration = string | -1 | 0 + +export type DurationLarge = string + +export type DurationValue = Unit + export interface ElasticsearchVersionInfo { - build_date: DateString + build_date: DateTime build_flavor: string build_hash: string build_snapshot: boolean @@ -1860,7 +1872,7 @@ export interface ElasticsearchVersionInfo { export interface EmptyObject { } -export type EpochMillis = string | long +export type EpochTime = Unit export interface ErrorCauseKeys { type: string @@ -1920,8 +1932,8 @@ export type Fields = Field | Field[] export interface FlushStats { periodic: long total: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue } export type Fuzziness = string | integer @@ -1965,14 +1977,14 @@ export type GeoTilePrecision = number export interface GetStats { current: long - exists_time?: string - exists_time_in_millis: long + exists_time?: Duration + exists_time_in_millis: DurationValue exists_total: long - missing_time?: string - missing_time_in_millis: long + missing_time?: Duration + missing_time_in_millis: DurationValue missing_total: long - time?: string - time_in_millis: long + time?: Duration + time_in_millis: DurationValue total: long } @@ -2001,15 +2013,15 @@ export type IndexPatterns = IndexPattern[] export interface IndexingStats { index_current: long delete_current: long - delete_time?: string - delete_time_in_millis: long + delete_time?: Duration + delete_time_in_millis: DurationValue delete_total: long is_throttled: boolean noop_update_total: long - throttle_time?: string - throttle_time_in_millis: long - index_time?: string - index_time_in_millis: long + throttle_time?: Duration + throttle_time_in_millis: DurationValue + index_time?: Duration + index_time_in_millis: DurationValue index_total: long index_failed: long types?: Record @@ -2047,6 +2059,15 @@ export interface InlineScript extends ScriptBase { export type Ip = string +export interface KnnQuery { + field: Field + query_vector: double[] + k: long + num_candidates: long + boost?: float + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] +} + export interface LatLonGeoLocation { lat: double lon: double @@ -2069,12 +2090,12 @@ export interface MergesStats { total_docs: long total_size?: string total_size_in_bytes: long - total_stopped_time?: string - total_stopped_time_in_millis: long - total_throttled_time?: string - total_throttled_time_in_millis: long - total_time?: string - total_time_in_millis: long + total_stopped_time?: Duration + total_stopped_time_in_millis: DurationValue + total_throttled_time?: Duration + total_throttled_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export type Metadata = Record @@ -2175,19 +2196,19 @@ export interface QueryCacheStats { export interface RecoveryStats { current_as_source: long current_as_target: long - throttle_time?: string - throttle_time_in_millis: long + throttle_time?: Duration + throttle_time_in_millis: DurationValue } export type Refresh = boolean | 'true' | 'false' | 'wait_for' export interface RefreshStats { external_total: long - external_total_time_in_millis: long + external_total_time_in_millis: DurationValue listeners: long total: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue } export type RelationName = string @@ -2248,26 +2269,32 @@ export interface ScriptTransform { export type ScrollId = string +export type ScrollIds = ScrollId | ScrollId[] + export interface SearchStats { fetch_current: long - fetch_time_in_millis: long + fetch_time?: Duration + fetch_time_in_millis: DurationValue fetch_total: long open_contexts?: long query_current: long - query_time_in_millis: long + query_time?: Duration + query_time_in_millis: DurationValue query_total: long scroll_current: long - scroll_time_in_millis: long + scroll_time?: Duration + scroll_time_in_millis: DurationValue scroll_total: long suggest_current: long - suggest_time_in_millis: long + suggest_time?: Duration + suggest_time_in_millis: DurationValue suggest_total: long groups?: Record } export interface SearchTransform { request: WatcherSearchInputRequestDefinition - timeout: Time + timeout: Duration } export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' @@ -2329,6 +2356,10 @@ export interface SlicedScroll { max: integer } +export type Slices = integer | SlicesCalculation + +export type SlicesCalculation = 'auto' + export type Sort = SortCombinations | SortCombinations[] export type SortCombinations = Field | SortOptions @@ -2382,16 +2413,12 @@ export type TaskId = string | integer export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' -export type Time = string | integer - -export type TimeSpan = string +export type TimeOfDay = string export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' export type TimeZone = string -export type Timestamp = string - export interface TopLeftBottomRightGeoBounds { top_left: GeoLocation bottom_right: GeoLocation @@ -2420,6 +2447,14 @@ export interface TranslogStats { export type TransportAddress = string +export type UnitFloatMillis = double + +export type UnitMillis = long + +export type UnitNanos = long + +export type UnitSeconds = long + export type Username = string export type Uuid = string @@ -2439,8 +2474,8 @@ export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | export interface WarmerStats { current: long total: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue } export interface WktGeoBounds { @@ -2492,6 +2527,8 @@ export interface AggregationsAggregateBase { meta?: Metadata } +export type AggregationsAggregateOrder = Partial> | Partial>[] + export interface AggregationsAggregation { meta?: Metadata name?: string @@ -2590,7 +2627,7 @@ export interface AggregationsArrayPercentilesItem { } export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { - interval: DateMathTime + interval: DurationLarge } export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { @@ -2598,11 +2635,11 @@ export interface AggregationsAutoDateHistogramAggregation extends AggregationsBu field?: Field format?: string minimum_interval?: AggregationsMinimumInterval - missing?: DateString + missing?: DateTime offset?: string params?: Record script?: Script - time_zone?: string + time_zone?: TimeZone } export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { @@ -2698,8 +2735,11 @@ export interface AggregationsCardinalityAggregate extends AggregationsAggregateB export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { precision_threshold?: integer rehash?: boolean + execution_hint?: AggregationsCardinalityExecutionMode } +export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' + export interface AggregationsCategorizeTextAggregation extends AggregationsAggregation { field: Field max_unique_tokens?: integer @@ -2777,25 +2817,25 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket extended_bounds?: AggregationsExtendedBounds hard_bounds?: AggregationsExtendedBounds field?: Field - fixed_interval?: Time + fixed_interval?: Duration format?: string - interval?: Time + interval?: Duration min_doc_count?: integer - missing?: DateString - offset?: Time - order?: AggregationsHistogramOrder + missing?: DateTime + offset?: Duration + order?: AggregationsAggregateOrder params?: Record script?: Script - time_zone?: string + time_zone?: TimeZone keyed?: boolean } export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { key_as_string?: string - key: EpochMillis + key: EpochTime } export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys -& { [property: string]: AggregationsAggregate | string | EpochMillis | long } +& { [property: string]: AggregationsAggregate | string | EpochTime | long } export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { } @@ -2805,7 +2845,7 @@ export interface AggregationsDateRangeAggregation extends AggregationsBucketAggr format?: string missing?: AggregationsMissing ranges?: AggregationsDateRangeExpression[] - time_zone?: string + time_zone?: TimeZone keyed?: boolean } @@ -3038,7 +3078,7 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr min_doc_count?: integer missing?: double offset?: double - order?: AggregationsHistogramOrder + order?: AggregationsAggregateOrder script?: Script format?: string keyed?: boolean @@ -3051,11 +3091,6 @@ export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucket export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys & { [property: string]: AggregationsAggregate | string | double | long } -export interface AggregationsHistogramOrder { - _count?: SortOrder - _key?: SortOrder -} - export interface AggregationsHoltLinearModelSettings { alpha?: float beta?: float @@ -3274,6 +3309,13 @@ export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggreg } export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + collect_mode?: AggregationsTermsAggregationCollectMode + order?: AggregationsAggregateOrder + min_doc_count?: long + shard_min_doc_count?: long + shard_size?: integer + show_term_doc_count_error?: boolean + size?: integer terms: AggregationsMultiTermLookup[] } @@ -3633,7 +3675,7 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat missing_order?: AggregationsMissingOrder missing_bucket?: boolean value_type?: string - order?: AggregationsTermsAggregationOrder + order?: AggregationsAggregateOrder script?: Script shard_size?: integer show_term_doc_count_error?: boolean @@ -3645,8 +3687,6 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' -export type AggregationsTermsAggregationOrder = Record | Record[] - export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { doc_count_error?: long } @@ -3767,7 +3807,7 @@ export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnaly export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' - preserve_original: boolean + preserve_original?: boolean } export type AnalysisCharFilter = string | AnalysisCharFilterDefinition @@ -3827,8 +3867,8 @@ export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { type: 'delimited_payload' - delimiter: string - encoding: AnalysisDelimitedPayloadEncoding + delimiter?: string + encoding?: AnalysisDelimitedPayloadEncoding } export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { @@ -3844,8 +3884,8 @@ export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { type: 'edge_ngram' - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer side?: AnalysisEdgeNGramSide preserve_original?: boolean } @@ -3860,8 +3900,9 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { type: 'elision' - articles: string[] - articles_case: boolean + articles?: string[] + articles_path?: string + articles_case?: boolean } export interface AnalysisFingerprintAnalyzer { @@ -3876,8 +3917,8 @@ export interface AnalysisFingerprintAnalyzer { export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { type: 'fingerprint' - max_output_size: integer - separator: string + max_output_size?: integer + separator?: string } export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { @@ -3886,10 +3927,10 @@ export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' - dedup: boolean - dictionary: string + dedup?: boolean + dictionary?: string locale: string - longest_only: boolean + longest_only?: boolean } export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { @@ -4048,8 +4089,8 @@ export interface AnalysisLanguageAnalyzer { export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' - max: integer - min: integer + max?: integer + min?: integer } export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { @@ -4058,8 +4099,8 @@ export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { type: 'limit' - consume_all_tokens: boolean - max_token_count: integer + consume_all_tokens?: boolean + max_token_count?: integer } export interface AnalysisLowercaseNormalizer { @@ -4077,14 +4118,14 @@ export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { type: 'mapping' - mappings: string[] + mappings?: string[] mappings_path?: string } export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' filters: string[] - preserve_original: boolean + preserve_original?: boolean } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { @@ -4114,7 +4155,7 @@ export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'nori_part_of_speech' - stoptags: string[] + stoptags?: string[] } export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { @@ -4148,7 +4189,7 @@ export interface AnalysisPatternAnalyzer { export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture' patterns: string[] - preserve_original: boolean + preserve_original?: boolean } export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { @@ -4160,9 +4201,10 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace' - flags: string + all?: boolean + flags?: string pattern: string - replacement: string + replacement?: string } export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { @@ -4269,7 +4311,7 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { type: 'stop' ignore_case?: boolean remove_trailing?: boolean - stopwords: AnalysisStopWords + stopwords?: AnalysisStopWords stopwords_path?: string } @@ -4323,7 +4365,7 @@ export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { type: 'truncate' - length: integer + length?: integer } export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { @@ -4437,8 +4479,6 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase { type: 'constant_keyword' } -export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty | MappingMatchOnlyTextProperty - export interface MappingCorePropertyBase extends MappingPropertyBase { copy_to?: Fields similarity?: string @@ -4454,7 +4494,7 @@ export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean - null_value?: DateString + null_value?: DateTime precision_step?: integer type: 'date_nanos' } @@ -4465,7 +4505,7 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean - null_value?: DateString + null_value?: DateTime precision_step?: integer locale?: string type: 'date' @@ -4490,8 +4530,6 @@ export interface MappingDenseVectorProperty extends MappingPropertyBase { index_options?: MappingDenseVectorIndexOptions } -export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty - export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } @@ -4507,6 +4545,32 @@ export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' +export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { + type: '{dynamic_property}' + enabled?: boolean + null_value?: FieldValue + boost?: double + coerce?: boolean + script?: Script + on_script_error?: MappingOnScriptError + ignore_malformed?: boolean + time_series_metric?: MappingTimeSeriesMetricType + analyzer?: string + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + index_phrases?: boolean + index_prefixes?: MappingTextIndexPrefixes + norms?: boolean + position_increment_gap?: integer + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: MappingTermVectorOption + format?: string + precision_step?: integer + locale?: string +} + export interface MappingDynamicTemplate { mapping?: MappingProperty match?: string @@ -4659,8 +4723,6 @@ export interface MappingNestedProperty extends MappingCorePropertyBase { type: 'nested' } -export type MappingNumberProperty = MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingDoubleNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingShortNumberProperty | MappingByteNumberProperty | MappingUnsignedLongNumberProperty | MappingScaledFloatNumberProperty - export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { index?: boolean ignore_malformed?: boolean @@ -4685,7 +4747,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingDenseVectorProperty | MappingAggregateMetricDoubleProperty | MappingCoreProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty export interface MappingPropertyBase { local_metadata?: Metadata @@ -4696,8 +4758,6 @@ export interface MappingPropertyBase { fields?: Record } -export type MappingRangeProperty = MappingLongRangeProperty | MappingIpRangeProperty | MappingIntegerRangeProperty | MappingFloatRangeProperty | MappingDoubleRangeProperty | MappingDateRangeProperty - export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { boost?: double coerce?: boolean @@ -4898,9 +4958,9 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } -export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { @@ -5144,12 +5204,6 @@ export interface QueryDslIntervalsWildcard { use_field?: Field } -export interface QueryDslKnnQuery extends QueryDslQueryBase { - field: Field - num_candidates: integer - query_vector: double[] -} - export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { @@ -5339,7 +5393,6 @@ export interface QueryDslQueryContainer { has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery intervals?: Partial> - knn?: QueryDslKnnQuery match?: Partial> match_all?: QueryDslMatchAllQuery match_bool_prefix?: Partial> @@ -5614,7 +5667,7 @@ export interface AsyncSearchAsyncSearch[]> terminated_early?: boolean @@ -5630,8 +5683,10 @@ export interface AsyncSearchAsyncSearchResponseBase { id?: Id is_partial: boolean is_running: boolean - expiration_time_in_millis: EpochMillis - start_time_in_millis: EpochMillis + expiration_time?: DateTime + expiration_time_in_millis: EpochTime + start_time?: DateTime + start_time_in_millis: EpochTime } export interface AsyncSearchDeleteRequest extends RequestBase { @@ -5642,9 +5697,9 @@ export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { id: Id - keep_alive?: Time + keep_alive?: Duration typed_keys?: boolean - wait_for_completion_timeout?: Time + wait_for_completion_timeout?: Duration } export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase @@ -5662,9 +5717,9 @@ export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSea export interface AsyncSearchSubmitRequest extends RequestBase { index?: Indices - wait_for_completion_timeout?: Time + wait_for_completion_timeout?: Duration keep_on_completion?: boolean - keep_alive?: Time + keep_alive?: Duration allow_no_indices?: boolean allow_partial_search_results?: boolean analyzer?: string @@ -5673,9 +5728,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { ccs_minimize_roundtrips?: boolean default_operator?: QueryDslOperator df?: string - docvalue_fields?: Fields expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean @@ -5685,34 +5738,27 @@ export interface AsyncSearchSubmitRequest extends RequestBase { pre_filter_shard_size?: long request_cache?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType - stats?: string[] - stored_fields?: Fields suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long suggest_text?: string - terminate_after?: long - timeout?: Time - track_total_hits?: SearchTrackHits - track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean - version?: boolean - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - seq_no_primary_term?: boolean q?: string - size?: integer - from?: integer - sort?: string | string[] aggregations?: Record aggs?: Record collapse?: SearchFieldCollapse + explain?: boolean + from?: integer highlight?: SearchHighlight + track_total_hits?: SearchTrackHits indices_boost?: Record[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + knn?: KnnQuery min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -5720,11 +5766,21 @@ export interface AsyncSearchSubmitRequest extends RequestBase { rescore?: SearchRescore | SearchRescore[] script_fields?: Record search_after?: SortResults + size?: integer slice?: SlicedScroll + sort?: Sort + _source?: SearchSourceConfig fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester + terminate_after?: long + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: Fields pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields + stats?: string[] } export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase @@ -5886,13 +5942,13 @@ export interface CatComponentTemplatesRequest extends CatCatRequestBase { export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] export interface CatCountCountRecord { - epoch?: EpochMillis - t?: EpochMillis - time?: EpochMillis - timestamp?: DateString - ts?: DateString - hms?: DateString - hhmmss?: DateString + epoch?: SpecUtilsStringified> + t?: SpecUtilsStringified> + time?: SpecUtilsStringified> + timestamp?: TimeOfDay + ts?: TimeOfDay + hms?: TimeOfDay + hhmmss?: TimeOfDay count?: string dc?: string 'docs.count'?: string @@ -5925,12 +5981,12 @@ export interface CatFielddataRequest extends CatCatRequestBase { export type CatFielddataResponse = CatFielddataFielddataRecord[] export interface CatHealthHealthRecord { - epoch?: EpochMillis - time?: EpochMillis - timestamp?: DateString - ts?: DateString - hms?: DateString - hhmmss?: DateString + epoch?: SpecUtilsStringified> + time?: SpecUtilsStringified> + timestamp?: TimeOfDay + ts?: TimeOfDay + hms?: TimeOfDay + hhmmss?: TimeOfDay cluster?: string cl?: string status?: string @@ -6353,7 +6409,7 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { bytes?: Bytes h?: CatCatDfaColumns s?: CatCatDfaColumns - time?: Time + time?: Duration } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -6616,8 +6672,8 @@ export interface CatMlTrainedModelsTrainedModelsRecord { modelOperations?: string license?: string l?: string - create_time?: DateString - ct?: DateString + create_time?: DateTime + ct?: DateTime version?: VersionString v?: VersionString description?: string @@ -6989,17 +7045,17 @@ export interface CatRecoveryRecoveryRecord { shard?: string s?: string sh?: string - start_time?: string - start?: string - start_time_millis?: string - start_millis?: string - stop_time?: string - stop?: string - stop_time_millis?: string - stop_millis?: string - time?: string - t?: string - ti?: string + start_time?: DateTime + start?: DateTime + start_time_millis?: EpochTime + start_millis?: EpochTime + stop_time?: DateTime + stop?: DateTime + stop_time_millis?: EpochTime + stop_millis?: EpochTime + time?: Duration + t?: Duration + ti?: Duration type?: string ty?: string stage?: string @@ -7346,20 +7402,20 @@ export interface CatSnapshotsSnapshotsRecord { repo?: string status?: string s?: string - start_epoch?: EpochMillis - ste?: EpochMillis - startEpoch?: EpochMillis - start_time?: DateString - sti?: DateString - startTime?: DateString - end_epoch?: EpochMillis - ete?: EpochMillis - endEpoch?: EpochMillis - end_time?: DateString - eti?: DateString - endTime?: DateString - duration?: Time - dur?: Time + start_epoch?: SpecUtilsStringified> + ste?: SpecUtilsStringified> + startEpoch?: SpecUtilsStringified> + start_time?: WatcherScheduleTimeOfDay + sti?: WatcherScheduleTimeOfDay + startTime?: WatcherScheduleTimeOfDay + end_epoch?: SpecUtilsStringified> + ete?: SpecUtilsStringified> + endEpoch?: SpecUtilsStringified> + end_time?: TimeOfDay + eti?: TimeOfDay + endTime?: TimeOfDay + duration?: Duration + dur?: Duration indices?: string i?: string successful_shards?: string @@ -7438,7 +7494,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names - time?: Time + time?: TimeUnit } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -7492,7 +7548,7 @@ export interface CatTransformsRequest extends CatCatRequestBase { from?: integer h?: CatCatTransformColumns s?: CatCatTransformColumns - time?: Time + time?: TimeUnit size?: integer } @@ -7608,10 +7664,14 @@ export interface CcrShardStats { shard_id: integer successful_read_requests: long successful_write_requests: long - time_since_last_read_millis: EpochMillis - total_read_remote_exec_time_millis: EpochMillis - total_read_time_millis: EpochMillis - total_write_time_millis: EpochMillis + time_since_last_read?: Duration + time_since_last_read_millis: DurationValue + total_read_remote_exec_time?: Duration + total_read_remote_exec_time_millis: DurationValue + total_read_time?: Duration + total_read_time_millis: DurationValue + total_write_time?: Duration + total_write_time_millis: DurationValue write_buffer_operation_count: long write_buffer_size_in_bytes: ByteSize } @@ -7630,12 +7690,12 @@ export interface CcrFollowRequest extends RequestBase { max_outstanding_write_requests?: long max_read_request_operation_count?: long max_read_request_size?: string - max_retry_delay?: Time + max_retry_delay?: Duration max_write_buffer_count?: long max_write_buffer_size?: string max_write_request_operation_count?: long max_write_request_size?: string - read_poll_timeout?: Time + read_poll_timeout?: Duration remote_cluster?: string } @@ -7658,12 +7718,12 @@ export interface CcrFollowInfoFollowerIndexParameters { max_outstanding_write_requests: integer max_read_request_operation_count: integer max_read_request_size: string - max_retry_delay: Time + max_retry_delay: Duration max_write_buffer_count: integer max_write_buffer_size: string max_write_request_operation_count: integer max_write_request_size: string - read_poll_timeout: Time + read_poll_timeout: Duration } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' @@ -7739,10 +7799,10 @@ export interface CcrPutAutoFollowPatternRequest extends RequestBase { max_outstanding_read_requests?: integer settings?: Record max_outstanding_write_requests?: integer - read_poll_timeout?: Time + read_poll_timeout?: Duration max_read_request_operation_count?: integer max_read_request_size?: ByteSize - max_retry_delay?: Time + max_retry_delay?: Duration max_write_buffer_count?: integer max_write_buffer_size?: ByteSize max_write_request_operation_count?: integer @@ -7763,12 +7823,12 @@ export interface CcrResumeFollowRequest extends RequestBase { max_outstanding_write_requests?: long max_read_request_operation_count?: long max_read_request_size?: string - max_retry_delay?: Time + max_retry_delay?: Duration max_write_buffer_count?: long max_write_buffer_size?: string max_write_request_operation_count?: long max_write_request_size?: string - read_poll_timeout?: Time + read_poll_timeout?: Duration } export type CcrResumeFollowResponse = AcknowledgedResponseBase @@ -7784,7 +7844,7 @@ export interface CcrStatsAutoFollowStats { export interface CcrStatsAutoFollowedCluster { cluster_name: Name last_seen_metadata_version: VersionNumber - time_since_last_check_millis: DateString + time_since_last_check_millis: DurationValue } export interface CcrStatsFollowStats { @@ -7903,8 +7963,8 @@ export interface ClusterAllocationExplainReservedSize { export interface ClusterAllocationExplainResponse { allocate_explanation?: string - allocation_delay?: string - allocation_delay_in_millis?: long + allocation_delay?: Duration + allocation_delay_in_millis?: DurationValue can_allocate?: ClusterAllocationExplainDecision can_move_to_other_node?: ClusterAllocationExplainDecision can_rebalance_cluster?: ClusterAllocationExplainDecision @@ -7913,8 +7973,8 @@ export interface ClusterAllocationExplainResponse { can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] can_remain_on_current_node?: ClusterAllocationExplainDecision cluster_info?: ClusterAllocationExplainClusterInfo - configured_delay?: string - configured_delay_in_millis?: long + configured_delay?: Duration + configured_delay_in_millis?: DurationValue current_node?: ClusterAllocationExplainCurrentNode current_state: string index: IndexName @@ -7922,15 +7982,15 @@ export interface ClusterAllocationExplainResponse { node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] primary: boolean rebalance_explanation?: string - remaining_delay?: string - remaining_delay_in_millis?: long + remaining_delay?: Duration + remaining_delay_in_millis?: DurationValue shard: integer unassigned_info?: ClusterAllocationExplainUnassignedInformation note?: string } export interface ClusterAllocationExplainUnassignedInformation { - at: DateString + at: DateTime last_allocation_status?: string reason: ClusterAllocationExplainUnassignedInformationReason details?: string @@ -7943,8 +8003,8 @@ export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED export interface ClusterDeleteComponentTemplateRequest extends RequestBase { name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase @@ -7957,7 +8017,7 @@ export type ClusterDeleteVotingConfigExclusionsResponse = boolean export interface ClusterExistsComponentTemplateRequest extends RequestBase { name: Names - master_timeout?: Time + master_timeout?: Duration local?: boolean } @@ -7967,7 +8027,7 @@ export interface ClusterGetComponentTemplateRequest extends RequestBase { name?: Name flat_settings?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface ClusterGetComponentTemplateResponse { @@ -7977,8 +8037,8 @@ export interface ClusterGetComponentTemplateResponse { export interface ClusterGetSettingsRequest extends RequestBase { flat_settings?: boolean include_defaults?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface ClusterGetSettingsResponse { @@ -8004,8 +8064,8 @@ export interface ClusterHealthRequest extends RequestBase { expand_wildcards?: ExpandWildcards level?: Level local?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards wait_for_events?: WaitForEvents wait_for_nodes?: string | integer @@ -8028,7 +8088,8 @@ export interface ClusterHealthResponse { number_of_pending_tasks: integer relocating_shards: integer status: HealthStatus - task_max_waiting_in_queue_millis: EpochMillis + task_max_waiting_in_queue?: Duration + task_max_waiting_in_queue_millis: DurationValue timed_out: boolean unassigned_shards: integer } @@ -8047,13 +8108,13 @@ export interface ClusterPendingTasksPendingTask { insert_order: integer priority: string source: string - time_in_queue: string - time_in_queue_millis: integer + time_in_queue?: Duration + time_in_queue_millis: DurationValue } export interface ClusterPendingTasksRequest extends RequestBase { local?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface ClusterPendingTasksResponse { @@ -8063,7 +8124,7 @@ export interface ClusterPendingTasksResponse { export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { node_names?: Names node_ids?: Ids - timeout?: Time + timeout?: Duration } export type ClusterPostVotingConfigExclusionsResponse = boolean @@ -8071,7 +8132,7 @@ export type ClusterPostVotingConfigExclusionsResponse = boolean export interface ClusterPutComponentTemplateRequest extends RequestBase { name: Name create?: boolean - master_timeout?: Time + master_timeout?: Duration template: IndicesIndexState aliases?: Record mappings?: MappingTypeMapping @@ -8084,8 +8145,8 @@ export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { flat_settings?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration persistent?: Record transient?: Record } @@ -8101,7 +8162,7 @@ export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteS export interface ClusterRemoteInfoClusterRemoteProxyInfo { mode: 'proxy' connected: boolean - initial_connect_timeout: Time + initial_connect_timeout: Duration skip_unavailable: boolean proxy_address: string server_name: string @@ -8114,7 +8175,7 @@ export interface ClusterRemoteInfoClusterRemoteSniffInfo { connected: boolean max_connections_per_cluster: integer num_nodes_connected: long - initial_connect_timeout: Time + initial_connect_timeout: Duration skip_unavailable: boolean seeds: string[] } @@ -8164,8 +8225,8 @@ export interface ClusterRerouteRequest extends RequestBase { explain?: boolean metric?: Metrics retry_failed?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration commands?: ClusterRerouteCommand[] } @@ -8204,9 +8265,9 @@ export interface ClusterStateRequest extends RequestBase { flat_settings?: boolean ignore_unavailable?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration wait_for_metadata_version?: VersionNumber - wait_for_timeout?: Time + wait_for_timeout?: Duration } export type ClusterStateResponse = any @@ -8261,7 +8322,7 @@ export interface ClusterStatsClusterIngest { } export interface ClusterStatsClusterJvm { - max_uptime_in_millis: long + max_uptime_in_millis: DurationValue mem: ClusterStatsClusterJvmMemory threads: long versions: ClusterStatsClusterJvmVersion[] @@ -8362,7 +8423,8 @@ export interface ClusterStatsClusterProcessor { count: long current: long failed: long - time_in_millis: long + time?: Duration + time_in_millis: DurationValue } export interface ClusterStatsClusterShardMetrics { @@ -8429,7 +8491,7 @@ export interface ClusterStatsOperatingSystemMemoryInfo { export interface ClusterStatsRequest extends RequestBase { node_id?: NodeIds flat_settings?: boolean - timeout?: Time + timeout?: Duration } export type ClusterStatsResponse = ClusterStatsStatsResponseBase @@ -8463,8 +8525,8 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase @@ -8472,8 +8534,8 @@ export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBas export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase @@ -8481,7 +8543,7 @@ export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBas export interface DanglingIndicesListDanglingIndicesDanglingIndex { index_name: string index_uuid: string - creation_date_millis: EpochMillis + creation_date_millis: EpochTime node_ids: Ids } @@ -8589,7 +8651,7 @@ export interface EqlEqlSearchResponseBase { id?: Id is_partial?: boolean is_running?: boolean - took?: integer + took?: DurationValue timed_out?: boolean hits: EqlEqlHits } @@ -8614,8 +8676,8 @@ export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { id: Id - keep_alive?: Time - wait_for_completion_timeout?: Time + keep_alive?: Duration + wait_for_completion_timeout?: Duration } export type EqlGetResponse = EqlEqlSearchResponseBase @@ -8628,8 +8690,8 @@ export interface EqlGetStatusResponse { id: Id is_partial: boolean is_running: boolean - start_time_in_millis?: EpochMillis - expiration_time_in_millis?: EpochMillis + start_time_in_millis?: EpochTime + expiration_time_in_millis?: EpochTime completion_status?: integer } @@ -8638,9 +8700,6 @@ export interface EqlSearchRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - keep_alive?: Time - keep_on_completion?: boolean - wait_for_completion_timeout?: Time query: string case_sensitive?: boolean event_category_field?: Field @@ -8648,6 +8707,9 @@ export interface EqlSearchRequest extends RequestBase { timestamp_field?: Field fetch_size?: uint filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + keep_alive?: Duration + keep_on_completion?: boolean + wait_for_completion_timeout?: Duration size?: uint fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition @@ -8684,7 +8746,7 @@ export interface FleetGlobalCheckpointsRequest extends RequestBase { wait_for_advance?: boolean wait_for_index?: boolean checkpoints?: FleetCheckpoint[] - timeout?: Time + timeout?: Duration } export interface FleetGlobalCheckpointsResponse { @@ -8723,9 +8785,7 @@ export interface FleetSearchRequest extends RequestBase { ccs_minimize_roundtrips?: boolean default_operator?: QueryDslOperator df?: string - docvalue_fields?: Fields expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean @@ -8735,36 +8795,28 @@ export interface FleetSearchRequest extends RequestBase { pre_filter_shard_size?: long request_cache?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType - stats?: string[] - stored_fields?: Fields suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long suggest_text?: string - terminate_after?: long - timeout?: Time - track_total_hits?: SearchTrackHits - track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean - version?: boolean - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - seq_no_primary_term?: boolean q?: string - size?: integer - from?: integer - sort?: string | string[] wait_for_checkpoints?: FleetCheckpoint[] allow_partial_search_results?: boolean aggregations?: Record aggs?: Record collapse?: SearchFieldCollapse + explain?: boolean + from?: integer highlight?: SearchHighlight + track_total_hits?: SearchTrackHits indices_boost?: Record[] + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -8772,11 +8824,21 @@ export interface FleetSearchRequest extends RequestBase { rescore?: SearchRescore | SearchRescore[] script_fields?: Record search_after?: SortResults + size?: integer slice?: SlicedScroll + sort?: Sort + _source?: SearchSourceConfig fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester + terminate_after?: long + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: Fields pit?: SearchPointInTimeReference runtime_mappings?: MappingRuntimeFields + stats?: string[] } export interface FleetSearchResponse { @@ -8806,7 +8868,7 @@ export interface GraphConnection { export interface GraphExploreControls { sample_diversity?: GraphSampleDiversity sample_size?: integer - timeout?: Time + timeout?: Duration use_significance: boolean } @@ -8845,7 +8907,7 @@ export interface GraphVertexInclude { export interface GraphExploreRequest extends RequestBase { index: Indices routing?: Routing - timeout?: Time + timeout?: Duration connections?: GraphHop controls?: GraphExploreControls query?: QueryDslQueryContainer @@ -8874,7 +8936,7 @@ export interface IlmForceMergeConfiguration { export interface IlmPhase { actions?: IlmActions - min_age?: Time + min_age?: Duration | long configurations?: IlmConfigurations } @@ -8887,7 +8949,7 @@ export interface IlmPhases { export interface IlmPolicy { phases: IlmPhases - name?: Name + _meta?: Metadata } export interface IlmShrinkConfiguration { @@ -8896,8 +8958,8 @@ export interface IlmShrinkConfiguration { export interface IlmDeleteLifecycleRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase @@ -8905,30 +8967,35 @@ export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged export interface IlmExplainLifecycleLifecycleExplainManaged { - action: Name - action_time_millis: EpochMillis - age: Time + action?: Name + action_time?: DateTime + action_time_millis?: EpochTime + age?: Duration failed_step?: Name failed_step_retry_count?: integer - index: IndexName - index_creation_date_millis?: EpochMillis + index?: IndexName + index_creation_date?: DateTime + index_creation_date_millis?: EpochTime is_auto_retryable_error?: boolean - lifecycle_date_millis: EpochMillis + lifecycle_date?: DateTime + lifecycle_date_millis?: EpochTime managed: true phase: Name - phase_time_millis: EpochMillis + phase_time?: DateTime + phase_time_millis?: EpochTime policy: Name - step: Name + step?: Name step_info?: Record - step_time_millis: EpochMillis - phase_execution: IlmExplainLifecycleLifecycleExplainPhaseExecution - time_since_index_creation?: Time + step_time?: DateTime + step_time_millis?: EpochTime + phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution + time_since_index_creation?: Duration } export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { policy: Name version: VersionNumber - modified_date_in_millis: EpochMillis + modified_date_in_millis: EpochTime } export interface IlmExplainLifecycleLifecycleExplainUnmanaged { @@ -8940,8 +9007,8 @@ export interface IlmExplainLifecycleRequest extends RequestBase { index: IndexName only_errors?: boolean only_managed?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface IlmExplainLifecycleResponse { @@ -8949,15 +9016,15 @@ export interface IlmExplainLifecycleResponse { } export interface IlmGetLifecycleLifecycle { - modified_date: DateString + modified_date: DateTime policy: IlmPolicy version: VersionNumber } export interface IlmGetLifecycleRequest extends RequestBase { name?: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmGetLifecycleResponse = Record @@ -9001,8 +9068,8 @@ export interface IlmMoveToStepStepKey { export interface IlmPutLifecycleRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration policy?: IlmPolicy } @@ -9024,15 +9091,15 @@ export interface IlmRetryRequest extends RequestBase { export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmStopResponse = AcknowledgedResponseBase @@ -9143,7 +9210,7 @@ export interface IndicesIndexSettingBlocks { metadata?: boolean } -export interface IndicesIndexSettings { +export interface IndicesIndexSettingsKeys { index?: IndicesIndexSettings mode?: string routing_path?: string | string[] @@ -9160,7 +9227,7 @@ export interface IndicesIndexSettings { auto_expand_replicas?: string merge?: IndicesMerge search?: IndicesSettingsSearch - refresh_interval?: Time + refresh_interval?: Duration max_result_window?: integer max_inner_result_window?: integer max_rescore_window?: integer @@ -9175,13 +9242,13 @@ export interface IndicesIndexSettings { max_terms_count?: integer max_regex_length?: integer routing?: IndicesIndexRouting - gc_deletes?: Time + gc_deletes?: Duration default_pipeline?: PipelineName final_pipeline?: PipelineName lifecycle?: IndicesIndexSettingsLifecycle provided_name?: Name - creation_date?: DateString - creation_date_string?: DateString + creation_date?: SpecUtilsStringified> + creation_date_string?: DateTime uuid?: Uuid version?: IndicesIndexVersioning verified_before_close?: boolean | string @@ -9197,11 +9264,13 @@ export interface IndicesIndexSettings { shards?: integer queries?: IndicesQueries similarity?: IndicesSettingsSimilarity - mappings?: IndicesMappingLimitSettings + mapping?: IndicesMappingLimitSettings 'indexing.slowlog'?: IndicesSlowlogSettings indexing_pressure?: IndicesIndexingPressure store?: IndicesStorage } +export type IndicesIndexSettings = IndicesIndexSettingsKeys +& { [property: string]: any } export interface IndicesIndexSettingsAnalysis { analyzer?: Record @@ -9221,12 +9290,12 @@ export interface IndicesIndexSettingsLifecycle { } export interface IndicesIndexSettingsLifecycleStep { - wait_time_threshold?: Time + wait_time_threshold?: Duration } export interface IndicesIndexSettingsTimeSeries { - end_time?: DateOrEpochMillis - start_time?: DateOrEpochMillis + end_time?: DateTime + start_time?: DateTime } export interface IndicesIndexState { @@ -9326,11 +9395,11 @@ export interface IndicesQueries { } export interface IndicesRetentionLease { - period: Time + period: Duration } export interface IndicesSearchIdle { - after?: Time + after?: Duration } export type IndicesSegmentSortMissing = '_last' | '_first' @@ -9415,10 +9484,10 @@ export interface IndicesSlowlogSettings { } export interface IndicesSlowlogTresholdLevels { - warn?: Time - info?: Time - debug?: Time - trace?: Time + warn?: Duration + info?: Duration + debug?: Duration + trace?: Duration } export interface IndicesSlowlogTresholds { @@ -9449,7 +9518,7 @@ export interface IndicesTemplateMapping { } export interface IndicesTranslog { - sync_interval?: Time + sync_interval?: Duration durability?: IndicesTranslogDurability flush_threshold_size?: ByteSize retention?: IndicesTranslogRetention @@ -9459,7 +9528,7 @@ export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC export interface IndicesTranslogRetention { size?: ByteSize - age?: Time + age?: Duration } export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' @@ -9475,8 +9544,8 @@ export interface IndicesAddBlockRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface IndicesAddBlockResponse { @@ -9512,7 +9581,7 @@ export interface IndicesAnalyzeCharFilterDetail { name: string } -export interface IndicesAnalyzeExplainAnalyzeToken { +export interface IndicesAnalyzeExplainAnalyzeTokenKeys { bytes: string end_offset: long keyword?: boolean @@ -9523,6 +9592,8 @@ export interface IndicesAnalyzeExplainAnalyzeToken { token: string type: string } +export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeTokenKeys +& { [property: string]: any } export interface IndicesAnalyzeRequest extends RequestBase { index?: IndexName @@ -9565,8 +9636,8 @@ export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { index: IndexName target: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards aliases?: Record settings?: Record @@ -9592,8 +9663,8 @@ export interface IndicesCloseRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards } @@ -9605,8 +9676,8 @@ export interface IndicesCloseResponse { export interface IndicesCreateRequest extends RequestBase { index: IndexName - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards aliases?: Record mappings?: MappingTypeMapping @@ -9630,7 +9701,7 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { data_stream: Name store_size?: ByteSize store_size_bytes: integer - maximum_timestamp: long + maximum_timestamp: EpochTime } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -9652,8 +9723,8 @@ export interface IndicesDeleteRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteResponse = IndicesResponseBase @@ -9661,8 +9732,8 @@ export type IndicesDeleteResponse = IndicesResponseBase export interface IndicesDeleteAliasRequest extends RequestBase { index: Indices name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteAliasResponse = AcknowledgedResponseBase @@ -9676,30 +9747,27 @@ export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { - index: IndexName + index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards flush?: boolean ignore_unavailable?: boolean - master_timeout?: TimeUnit - timeout?: TimeUnit run_expensive_tasks?: boolean - wait_for_active_shards?: string } export type IndicesDiskUsageResponse = any @@ -9729,7 +9797,7 @@ export type IndicesExistsAliasResponse = boolean export interface IndicesExistsIndexTemplateRequest extends RequestBase { name: Name - master_timeout?: Time + master_timeout?: Duration } export type IndicesExistsIndexTemplateResponse = boolean @@ -9738,7 +9806,7 @@ export interface IndicesExistsTemplateRequest extends RequestBase { name: Names flat_settings?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesExistsTemplateResponse = boolean @@ -9776,8 +9844,8 @@ export interface IndicesFieldUsageStatsRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean fields?: Fields - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards } @@ -9796,7 +9864,7 @@ export interface IndicesFieldUsageStatsUsageStatsShards { routing: IndicesStatsShardRouting stats: IndicesFieldUsageStatsShardsStats tracking_id: string - tracking_started_at_millis: EpochMillis + tracking_started_at_millis: EpochTime } export interface IndicesFlushRequest extends RequestBase { @@ -9835,7 +9903,7 @@ export interface IndicesGetRequest extends RequestBase { ignore_unavailable?: boolean include_defaults?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration features?: IndicesGetFeatures } @@ -9878,7 +9946,7 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { export type IndicesGetFieldMappingResponse = Record export interface IndicesGetFieldMappingTypeFieldMappings { - mappings: Partial> + mappings: Record } export interface IndicesGetIndexTemplateIndexTemplateItem { @@ -9890,7 +9958,7 @@ export interface IndicesGetIndexTemplateRequest extends RequestBase { name?: Name local?: boolean flat_settings?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface IndicesGetIndexTemplateResponse { @@ -9908,7 +9976,7 @@ export interface IndicesGetMappingRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesGetMappingResponse = Record @@ -9922,7 +9990,7 @@ export interface IndicesGetSettingsRequest extends RequestBase { ignore_unavailable?: boolean include_defaults?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesGetSettingsResponse = Record @@ -9931,7 +9999,7 @@ export interface IndicesGetTemplateRequest extends RequestBase { name?: Names flat_settings?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesGetTemplateResponse = Record @@ -9963,8 +10031,8 @@ export interface IndicesOpenRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards } @@ -9982,8 +10050,8 @@ export type IndicesPromoteDataStreamResponse = any export interface IndicesPutAliasRequest extends RequestBase { index: Indices name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration filter?: QueryDslQueryContainer index_routing?: Routing is_write_index?: boolean @@ -10018,8 +10086,8 @@ export interface IndicesPutMappingRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration write_index_only?: boolean date_detection?: boolean dynamic?: MappingDynamicMapping @@ -10042,9 +10110,9 @@ export interface IndicesPutSettingsRequest extends RequestBase { expand_wildcards?: ExpandWildcards flat_settings?: boolean ignore_unavailable?: boolean - master_timeout?: Time + master_timeout?: Duration preserve_existing?: boolean - timeout?: Time + timeout?: Duration settings?: IndicesIndexSettings } @@ -10054,12 +10122,12 @@ export interface IndicesPutTemplateRequest extends RequestBase { name: Name create?: boolean flat_settings?: boolean - master_timeout?: Time - timeout?: Time - order?: integer + master_timeout?: Duration + timeout?: Duration aliases?: Record index_patterns?: string | string[] mappings?: MappingTypeMapping + order?: integer settings?: Record version?: VersionNumber } @@ -10096,12 +10164,12 @@ export interface IndicesRecoveryRecoveryIndexStatus { bytes?: IndicesRecoveryRecoveryBytes files: IndicesRecoveryRecoveryFiles size: IndicesRecoveryRecoveryBytes - source_throttle_time?: Time - source_throttle_time_in_millis: EpochMillis - target_throttle_time?: Time - target_throttle_time_in_millis: EpochMillis - total_time_in_millis: EpochMillis - total_time?: Time + source_throttle_time?: Duration + source_throttle_time_in_millis: DurationValue + target_throttle_time?: Duration + target_throttle_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRecoveryRecoveryOrigin { @@ -10120,8 +10188,10 @@ export interface IndicesRecoveryRecoveryOrigin { } export interface IndicesRecoveryRecoveryStartStatus { - check_index_time: long - total_time_in_millis: string + check_index_time?: Duration + check_index_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRecoveryRecoveryStatus { @@ -10143,13 +10213,13 @@ export interface IndicesRecoveryShardRecovery { source: IndicesRecoveryRecoveryOrigin stage: string start?: IndicesRecoveryRecoveryStartStatus - start_time?: DateString - start_time_in_millis: EpochMillis - stop_time?: DateString - stop_time_in_millis: EpochMillis + start_time?: DateTime + start_time_in_millis: EpochTime + stop_time?: DateTime + stop_time_in_millis?: EpochTime target: IndicesRecoveryRecoveryOrigin - total_time?: DateString - total_time_in_millis: EpochMillis + total_time?: Duration + total_time_in_millis: DurationValue translog: IndicesRecoveryTranslogStatus type: string verify_index: IndicesRecoveryVerifyIndex @@ -10160,15 +10230,15 @@ export interface IndicesRecoveryTranslogStatus { recovered: long total: long total_on_start: long - total_time?: string - total_time_in_millis: EpochMillis + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRecoveryVerifyIndex { - check_index_time?: Time - check_index_time_in_millis: EpochMillis - total_time?: Time - total_time_in_millis: EpochMillis + check_index_time?: Duration + check_index_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRefreshRequest extends RequestBase { @@ -10231,8 +10301,8 @@ export interface IndicesRolloverRequest extends RequestBase { alias: IndexAlias new_index?: IndexName dry_run?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards aliases?: Record conditions?: IndicesRolloverRolloverConditions @@ -10251,13 +10321,13 @@ export interface IndicesRolloverResponse { } export interface IndicesRolloverRolloverConditions { - max_age?: Time + max_age?: Duration + max_age_millis?: DurationValue max_docs?: long max_size?: string max_size_bytes?: ByteSize max_primary_shard_size?: ByteSize max_primary_shard_size_bytes?: ByteSize - max_age_millis?: EpochMillis } export interface IndicesSegmentsIndexSegment { @@ -10346,8 +10416,8 @@ export interface IndicesShardStoresShardStoreWrapper { export interface IndicesShrinkRequest extends RequestBase { index: IndexName target: IndexName - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards aliases?: Record settings?: Record @@ -10362,7 +10432,7 @@ export interface IndicesShrinkResponse { export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name create?: boolean - master_timeout?: Time + master_timeout?: Duration allow_auto_create?: boolean index_patterns?: Indices composed_of?: Name[] @@ -10384,7 +10454,7 @@ export interface IndicesSimulateTemplateOverlapping { export interface IndicesSimulateTemplateRequest extends RequestBase { name?: Name create?: boolean - master_timeout?: Time + master_timeout?: Duration template?: IndicesIndexTemplate } @@ -10402,8 +10472,8 @@ export interface IndicesSimulateTemplateTemplate { export interface IndicesSplitRequest extends RequestBase { index: IndexName target: IndexName - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards aliases?: Record settings?: Record @@ -10565,8 +10635,8 @@ export interface IndicesUnfreezeRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: string } @@ -10610,8 +10680,8 @@ export interface IndicesUpdateAliasesRemoveIndexAction { } export interface IndicesUpdateAliasesRequest extends RequestBase { - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration actions?: IndicesUpdateAliasesAction[] } @@ -10962,8 +11032,8 @@ export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS export interface IngestDeletePipelineRequest extends RequestBase { id: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IngestDeletePipelineResponse = AcknowledgedResponseBase @@ -10971,7 +11041,7 @@ export type IngestDeletePipelineResponse = AcknowledgedResponseBase export interface IngestGeoIpStatsGeoIpDownloadStatistics { successful_downloads: integer failed_downloads: integer - total_download_time: integer + total_download_time: DurationValue database_count: integer skipped_updates: integer } @@ -10995,7 +11065,7 @@ export interface IngestGeoIpStatsResponse { export interface IngestGetPipelineRequest extends RequestBase { id?: Id - master_timeout?: Time + master_timeout?: Duration summary?: boolean } @@ -11010,8 +11080,9 @@ export interface IngestProcessorGrokResponse { export interface IngestPutPipelineRequest extends RequestBase { id: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration + if_version?: VersionNumber _meta?: Metadata description?: string on_failure?: IngestProcessorContainer[] @@ -11037,7 +11108,7 @@ export interface IngestSimulateDocumentSimulation { } export interface IngestSimulateIngest { - timestamp: DateString + timestamp: DateTime pipeline?: Name } @@ -11061,9 +11132,9 @@ export interface IngestSimulateResponse { } export interface LicenseLicense { - expiry_date_in_millis: EpochMillis - issue_date_in_millis: EpochMillis - start_date_in_millis?: EpochMillis + expiry_date_in_millis: EpochTime + issue_date_in_millis: EpochTime + start_date_in_millis?: EpochTime issued_to: string issuer: string max_nodes?: long | null @@ -11083,10 +11154,10 @@ export interface LicenseDeleteRequest extends RequestBase { export type LicenseDeleteResponse = AcknowledgedResponseBase export interface LicenseGetLicenseInformation { - expiry_date?: DateString - expiry_date_in_millis?: EpochMillis - issue_date: DateString - issue_date_in_millis: EpochMillis + expiry_date?: DateTime + expiry_date_in_millis?: EpochTime + issue_date: DateTime + issue_date_in_millis: EpochTime issued_to: string issuer: string max_nodes: long | null @@ -11094,7 +11165,7 @@ export interface LicenseGetLicenseInformation { status: LicenseLicenseStatus type: LicenseLicenseType uid: Uuid - start_date_in_millis: EpochMillis + start_date_in_millis: EpochTime } export interface LicenseGetRequest extends RequestBase { @@ -11163,7 +11234,7 @@ export interface LicensePostStartTrialResponse { export interface LogstashPipeline { description: string - last_modified: Timestamp + last_modified: DateTime pipeline_metadata: LogstashPipelineMetadata username: string pipeline: string @@ -11260,28 +11331,28 @@ export interface MigrationPostFeatureUpgradeResponse { } export interface MlAnalysisConfig { - bucket_span: TimeSpan + bucket_span: Duration categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] influencers?: Field[] - latency?: Time - model_prune_window?: Time + latency?: Duration + model_prune_window?: Duration multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization summary_count_field_name?: Field } export interface MlAnalysisConfigRead { - bucket_span: TimeSpan + bucket_span: Duration categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetectorRead[] influencers: Field[] - model_prune_window?: Time - latency?: Time + model_prune_window?: Duration + latency?: Duration multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization summary_count_field_name?: Field @@ -11298,7 +11369,7 @@ export interface MlAnalysisMemoryLimit { export interface MlAnomaly { actual?: double[] - bucket_span: Time + bucket_span: DurationValue by_field_name?: string by_field_value?: string causes?: MlAnomalyCause[] @@ -11317,7 +11388,7 @@ export interface MlAnomaly { probability: double record_score: double result_type: string - timestamp: EpochMillis + timestamp: EpochTime typical?: double[] } @@ -11338,11 +11409,16 @@ export interface MlAnomalyCause { typical: double[] } +export interface MlApiKeyAuthorization { + id: string + name: string +} + export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' export interface MlBucketInfluencer { anomaly_score: double - bucket_span: long + bucket_span: DurationValue influencer_field_name: Field initial_anomaly_score: double is_interim: boolean @@ -11350,28 +11426,30 @@ export interface MlBucketInfluencer { probability: double raw_anomaly_score: double result_type: string - timestamp: Time + timestamp: EpochTime + timestamp_string?: DateTime } export interface MlBucketSummary { anomaly_score: double bucket_influencers: MlBucketInfluencer[] - bucket_span: Time + bucket_span: DurationValue event_count: long initial_anomaly_score: double is_interim: boolean job_id: Id - processing_time_ms: double + processing_time_ms: DurationValue result_type: string - timestamp: Time + timestamp: EpochTime + timestamp_string?: DateTime } export interface MlCalendarEvent { calendar_id?: Id event_id?: Id description: string - end_time: EpochMillis - start_time: EpochMillis + end_time: DateTime + start_time: DateTime } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition @@ -11403,7 +11481,7 @@ export interface MlCategory { export interface MlChunkingConfig { mode: MlChunkingMode - time_span?: Time + time_span?: Duration } export type MlChunkingMode = 'auto' | 'manual' | 'off' @@ -11452,15 +11530,16 @@ export interface MlDataDescription { export interface MlDatafeed { aggregations?: Record aggs?: Record + authorization?: MlDatafeedAuthorization chunking_config?: MlChunkingConfig datafeed_id: Id - frequency?: Timestamp + frequency?: Duration indices: string[] indexes?: string[] job_id: Id max_empty_searches?: integer query: QueryDslQueryContainer - query_delay?: Timestamp + query_delay?: Duration script_fields?: Record scroll_size?: integer delayed_data_check_config: MlDelayedDataCheckConfig @@ -11468,20 +11547,26 @@ export interface MlDatafeed { indices_options?: IndicesOptions } +export interface MlDatafeedAuthorization { + api_key?: MlApiKeyAuthorization + roles?: string[] + service_account?: string +} + export interface MlDatafeedConfig { aggregations?: Record aggs?: Record chunking_config?: MlChunkingConfig datafeed_id?: Id delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Timestamp + frequency?: Duration indexes?: string[] indices: string[] indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query: QueryDslQueryContainer - query_delay?: Timestamp + query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer @@ -11506,11 +11591,11 @@ export interface MlDatafeedStats { export interface MlDatafeedTimingStats { bucket_count: long - exponential_average_search_time_per_hour_ms: double + exponential_average_search_time_per_hour_ms: DurationValue job_id: Id search_count: long - total_search_time_ms: double - average_search_time_per_bucket_ms?: number + total_search_time_ms: DurationValue + average_search_time_per_bucket_ms?: DurationValue } export interface MlDataframeAnalysis { @@ -11615,6 +11700,12 @@ export interface MlDataframeAnalytics { state: MlDataframeState } +export interface MlDataframeAnalyticsAuthorization { + api_key?: MlApiKeyAuthorization + roles?: string[] + service_account?: string +} + export interface MlDataframeAnalyticsDestination { index: IndexName results_field?: Field @@ -11656,7 +11747,7 @@ export interface MlDataframeAnalyticsStatsDataCounts { export interface MlDataframeAnalyticsStatsHyperparameters { hyperparameters: MlHyperparameters iteration: integer - timestamp: DateString + timestamp: EpochTime timing_stats: MlTimingStats validation_loss: MlValidationLoss } @@ -11665,12 +11756,12 @@ export interface MlDataframeAnalyticsStatsMemoryUsage { memory_reestimate_bytes?: long peak_usage_bytes: long status: string - timestamp?: DateString + timestamp?: EpochTime } export interface MlDataframeAnalyticsStatsOutlierDetection { parameters: MlOutlierDetectionParameters - timestamp: DateString + timestamp: EpochTime timing_stats: MlTimingStats } @@ -11680,16 +11771,17 @@ export interface MlDataframeAnalyticsStatsProgress { } export interface MlDataframeAnalyticsSummary { - id: Id - source: MlDataframeAnalyticsSource - dest: MlDataframeAnalyticsDestination + allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + authorization?: MlDataframeAnalyticsAuthorization + create_time?: EpochTime description?: string - model_memory_limit?: string + dest: MlDataframeAnalyticsDestination + id: Id max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - allow_lazy_start?: boolean - create_time?: long + model_memory_limit?: string + source: MlDataframeAnalyticsSource version?: VersionString } @@ -11756,12 +11848,14 @@ export interface MlDataframeEvaluationRegressionMetricsMsle { export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' export interface MlDelayedDataCheckConfig { - check_window?: Time + check_window?: Duration enabled: boolean } export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' +export type MlDeploymentAssignmentState = 'starting' | 'started' | 'stopping' | 'failed' + export type MlDeploymentState = 'started' | 'starting' | 'stopping' export interface MlDetectionRule { @@ -11900,7 +11994,7 @@ export interface MlInfluence { } export interface MlInfluencer { - bucket_span: long + bucket_span: DurationValue influencer_score: double influencer_field_name: Field influencer_field_value: string @@ -11909,7 +12003,7 @@ export interface MlInfluencer { job_id: Id probability: double result_type: string - timestamp: Time + timestamp: EpochTime foo?: string } @@ -11917,16 +12011,16 @@ export interface MlJob { allow_lazy_open: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval?: Time + background_persist_interval?: Duration blocked?: MlJobBlocked - create_time?: integer + create_time?: DateTime custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription datafeed_config?: MlDatafeed deleting?: boolean description?: string - finished_time?: integer + finished_time?: DateTime groups?: string[] job_id: Id job_type?: string @@ -11950,7 +12044,7 @@ export interface MlJobConfig { allow_lazy_open?: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval?: Time + background_persist_interval?: Duration custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription @@ -11991,21 +12085,21 @@ export interface MlJobStats { job_id: string model_size_stats: MlModelSizeStats node?: MlDiscoveryNode - open_time?: DateString + open_time?: DateTime state: MlJobState timing_stats: MlJobTimingStats deleting?: boolean } export interface MlJobTimingStats { - average_bucket_processing_time_ms?: double + average_bucket_processing_time_ms?: DurationValue bucket_count: long - exponential_average_bucket_processing_time_ms?: double - exponential_average_bucket_processing_time_per_hour_ms: double + exponential_average_bucket_processing_time_ms?: DurationValue + exponential_average_bucket_processing_time_per_hour_ms: DurationValue job_id: Id - total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms?: double - minimum_bucket_processing_time_ms?: double + total_bucket_processing_time_ms: DurationValue + maximum_bucket_processing_time_ms?: DurationValue + minimum_bucket_processing_time_ms?: DurationValue } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' @@ -12019,7 +12113,7 @@ export interface MlModelPlotConfig { export interface MlModelSizeStats { bucket_allocation_failures_count: long job_id: Id - log_time: Time + log_time: DateTime memory_status: MlMemoryStatus model_bytes: ByteSize model_bytes_exceeded?: ByteSize @@ -12053,6 +12147,14 @@ export interface MlModelSnapshot { timestamp: long } +export interface MlModelSnapshotUpgrade { + job_id: Id + snapshot_id: Id + state: MlSnapshotUpgradeState + node: MlDiscoveryNode + assignment_explanation: string +} + export interface MlNerInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string @@ -12095,12 +12197,13 @@ export interface MlOutlierDetectionParameters { } export interface MlOverallBucket { - bucket_span: long + bucket_span: DurationValue is_interim: boolean jobs: MlOverallBucketJob[] overall_score: double result_type: string - timestamp: Time + timestamp: EpochTime + timestamp_string: DateTime } export interface MlOverallBucketJob { @@ -12161,10 +12264,14 @@ export interface MlRuleCondition { } export interface MlRunningStateSearchInterval { - end_ms: long - start_ms: long + end?: Duration + end_ms: DurationValue + start?: Duration + start_ms: DurationValue } +export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' + export interface MlTextClassificationInferenceOptions { num_top_classes?: integer tokenization?: MlTokenizationConfigContainer @@ -12190,8 +12297,8 @@ export interface MlTextEmbeddingInferenceUpdateOptions { } export interface MlTimingStats { - elapsed_time: integer - iteration_time?: integer + elapsed_time: DurationValue + iteration_time?: DurationValue } export interface MlTokenizationConfigContainer { @@ -12225,21 +12332,27 @@ export interface MlTotalFeatureImportanceStatistics { min: integer } -export interface MlTrainedModelAllocation { - allocation_state: MlDeploymentAllocationState - routing_table: Record - start_time: DateString - task_parameters: MlTrainedModelAllocationTaskParameters +export interface MlTrainedModelAssignment { + assignment_state: MlDeploymentAssignmentState + routing_table: Record + start_time: DateTime + task_parameters: MlTrainedModelAssignmentTaskParameters } -export interface MlTrainedModelAllocationRoutingTable { +export interface MlTrainedModelAssignmentRoutingTable { reason: string routing_state: MlRoutingState + current_allocations: integer + target_allocations: integer } -export interface MlTrainedModelAllocationTaskParameters { +export interface MlTrainedModelAssignmentTaskParameters { model_bytes: integer model_id: Id + cache_size: ByteSize + number_of_allocations: integer + queue_capacity: integer + threads_per_allocation: integer } export interface MlTrainedModelConfig { @@ -12249,7 +12362,7 @@ export interface MlTrainedModelConfig { version?: VersionString compressed_definition?: string created_by?: string - create_time?: Time + create_time?: DateTime default_field_map?: Record description?: string estimated_heap_memory_usage_bytes?: integer @@ -12280,7 +12393,7 @@ export interface MlTrainedModelDeploymentAllocationStatus { } export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms: double + average_inference_time_ms: DurationValue error_count: integer inference_count: integer last_access: long @@ -12288,8 +12401,8 @@ export interface MlTrainedModelDeploymentNodesStats { number_of_allocations: integer number_of_pending_requests: integer rejection_execution_count: integer - routing_state: MlTrainedModelAllocationRoutingTable - start_time: long + routing_state: MlTrainedModelAssignmentRoutingTable + start_time: EpochTime threads_per_allocation: integer timeout_count: integer } @@ -12304,7 +12417,7 @@ export interface MlTrainedModelDeploymentStats { queue_capacity: integer rejected_execution_count: integer reason: string - start_time: long + start_time: EpochTime state: MlDeploymentState threads_per_allocation: integer timeout_count: integer @@ -12334,7 +12447,7 @@ export interface MlTrainedModelInferenceStats { failure_count: integer inference_count: integer missing_all_fields_count: integer - timestamp: Time + timestamp: DateTime } export interface MlTrainedModelLocation { @@ -12361,6 +12474,12 @@ export interface MlTrainedModelStats { export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' +export interface MlTransformAuthorization { + api_key?: MlApiKeyAuthorization + roles?: string[] + service_account?: string +} + export interface MlValidationLoss { fold_values: string[] loss_type: string @@ -12386,7 +12505,7 @@ export interface MlCloseJobRequest extends RequestBase { job_id: Id allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration } export interface MlCloseJobResponse { @@ -12420,7 +12539,7 @@ export interface MlDeleteCalendarJobResponse { export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { id: Id force?: boolean - timeout?: Time + timeout?: Duration } export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase @@ -12435,7 +12554,7 @@ export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { job_id?: Id requests_per_second?: float - timeout?: Time + timeout?: Duration } export interface MlDeleteExpiredDataResponse { @@ -12452,7 +12571,7 @@ export interface MlDeleteForecastRequest extends RequestBase { job_id: Id forecast_id?: Id allow_no_forecasts?: boolean - timeout?: Time + timeout?: Duration } export type MlDeleteForecastResponse = AcknowledgedResponseBase @@ -12606,11 +12725,11 @@ export interface MlExplainDataFrameAnalyticsResponse { export interface MlFlushJobRequest extends RequestBase { job_id: Id - advance_time?: DateString + advance_time?: DateTime calc_interim?: boolean - end?: DateString - skip_time?: EpochMillis - start?: DateString + end?: DateTime + skip_time?: DateTime + start?: DateTime } export interface MlFlushJobResponse { @@ -12620,8 +12739,8 @@ export interface MlFlushJobResponse { export interface MlForecastRequest extends RequestBase { job_id: Id - duration?: Time - expires_in?: Time + duration?: Duration + expires_in?: Duration max_model_memory?: string } @@ -12632,17 +12751,17 @@ export interface MlForecastResponse { export interface MlGetBucketsRequest extends RequestBase { job_id: Id - timestamp?: Timestamp + timestamp?: DateTime + from?: integer + size?: integer anomaly_score?: double desc?: boolean - end?: DateString + end?: DateTime exclude_interim?: boolean expand?: boolean - from?: integer - size?: integer - sort?: Field - start?: DateString page?: MlPage + sort?: Field + start?: DateTime } export interface MlGetBucketsResponse { @@ -12652,11 +12771,11 @@ export interface MlGetBucketsResponse { export interface MlGetCalendarEventsRequest extends RequestBase { calendar_id: Id - end?: DateString + end?: DateTime from?: integer job_id?: Id size?: integer - start?: string + start?: DateTime } export interface MlGetCalendarEventsResponse { @@ -12757,13 +12876,13 @@ export interface MlGetFiltersResponse { export interface MlGetInfluencersRequest extends RequestBase { job_id: Id desc?: boolean - end?: DateString + end?: DateTime exclude_interim?: boolean influencer_score?: double from?: integer size?: integer sort?: Field - start?: DateString + start?: DateTime page?: MlPage } @@ -12836,8 +12955,8 @@ export interface MlGetMemoryStatsMemory { export interface MlGetMemoryStatsRequest extends RequestBase { node_id?: Id human?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface MlGetMemoryStatsResponse { @@ -12846,16 +12965,27 @@ export interface MlGetMemoryStatsResponse { nodes: Record } +export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { + job_id: Id + snapshot_id: Id + allow_no_match?: boolean +} + +export interface MlGetModelSnapshotUpgradeStatsResponse { + count: long + model_snapshot_upgrades: MlModelSnapshotUpgrade[] +} + export interface MlGetModelSnapshotsRequest extends RequestBase { job_id: Id snapshot_id?: Id - desc?: boolean - end?: Time from?: integer size?: integer - sort?: Field - start?: Time + desc?: boolean + end?: DateTime page?: MlPage + sort?: Field + start?: DateTime } export interface MlGetModelSnapshotsResponse { @@ -12866,11 +12996,11 @@ export interface MlGetModelSnapshotsResponse { export interface MlGetOverallBucketsRequest extends RequestBase { job_id: Id allow_no_match?: boolean - bucket_span?: Time - end?: Time + bucket_span?: Duration + end?: DateTime exclude_interim?: boolean overall_score?: double | string - start?: Time + start?: DateTime top_n?: integer } @@ -12881,15 +13011,15 @@ export interface MlGetOverallBucketsResponse { export interface MlGetRecordsRequest extends RequestBase { job_id: Id + from?: integer + size?: integer desc?: boolean - end?: DateString + end?: DateTime exclude_interim?: boolean - from?: integer + page?: MlPage record_score?: double - size?: integer sort?: Field - start?: DateString - page?: MlPage + start?: DateTime } export interface MlGetRecordsResponse { @@ -12927,7 +13057,7 @@ export interface MlGetTrainedModelsStatsResponse { export interface MlInferTrainedModelRequest extends RequestBase { model_id: Id - timeout?: Time + timeout?: Duration docs: Record[] inference_config?: MlInferenceConfigUpdateContainer } @@ -12976,7 +13106,7 @@ export interface MlInfoResponse { export interface MlOpenJobRequest extends RequestBase { job_id: Id - timeout?: Time + timeout?: Duration } export interface MlOpenJobResponse { @@ -12994,8 +13124,8 @@ export interface MlPostCalendarEventsResponse { export interface MlPostDataRequest extends RequestBase { job_id: Id - reset_end?: DateString - reset_start?: DateString + reset_end?: DateTime + reset_start?: DateTime data?: TData[] } @@ -13082,17 +13212,18 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { } export interface MlPutDataFrameAnalyticsResponse { - id: Id - create_time: long - version: VersionString - source: MlDataframeAnalyticsSource - description?: string - dest: MlDataframeAnalyticsDestination - model_memory_limit: string + authorization?: MlDataframeAnalyticsAuthorization allow_lazy_start: boolean - max_num_threads: integer analysis: MlDataframeAnalysisContainer analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + create_time: EpochTime + description?: string + dest: MlDataframeAnalyticsDestination + id: Id + max_num_threads: integer + model_memory_limit: string + source: MlDataframeAnalyticsSource + version: VersionString } export interface MlPutDatafeedRequest extends RequestBase { @@ -13104,14 +13235,14 @@ export interface MlPutDatafeedRequest extends RequestBase { aggregations?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Time + frequency?: Duration indices?: Indices indexes?: Indices indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query?: QueryDslQueryContainer - query_delay?: Time + query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer @@ -13120,16 +13251,17 @@ export interface MlPutDatafeedRequest extends RequestBase { export interface MlPutDatafeedResponse { aggregations: Record + authorization?: MlDatafeedAuthorization chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Time + frequency: Duration indices: string[] job_id: Id indices_options?: IndicesOptions max_empty_searches: integer query: QueryDslQueryContainer - query_delay: Time + query_delay: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size: integer @@ -13152,7 +13284,7 @@ export interface MlPutJobRequest extends RequestBase { allow_lazy_open?: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval: Time + background_persist_interval?: Duration custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription @@ -13170,8 +13302,8 @@ export interface MlPutJobResponse { allow_lazy_open: boolean analysis_config: MlAnalysisConfigRead analysis_limits: MlAnalysisLimits - background_persist_interval?: Time - create_time: DateString + background_persist_interval?: Duration + create_time: DateTime custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days: long data_description: MlDataDescription @@ -13327,14 +13459,14 @@ export interface MlRevertModelSnapshotResponse { export interface MlSetUpgradeModeRequest extends RequestBase { enabled?: boolean - timeout?: Time + timeout?: Duration } export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { id: Id - timeout?: Time + timeout?: Duration } export interface MlStartDataFrameAnalyticsResponse { @@ -13344,9 +13476,9 @@ export interface MlStartDataFrameAnalyticsResponse { export interface MlStartDatafeedRequest extends RequestBase { datafeed_id: Id - end?: Time - start?: Time - timeout?: Time + end?: DateTime + start?: DateTime + timeout?: Duration } export interface MlStartDatafeedResponse { @@ -13356,22 +13488,23 @@ export interface MlStartDatafeedResponse { export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id + cache_size?: ByteSize number_of_allocations?: integer queue_capacity?: integer threads_per_allocation?: integer - timeout?: Time + timeout?: Duration wait_for?: MlDeploymentAllocationState } export interface MlStartTrainedModelDeploymentResponse { - allocation: MlTrainedModelAllocation + assignment: MlTrainedModelAssignment } export interface MlStopDataFrameAnalyticsRequest extends RequestBase { id: Id allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration } export interface MlStopDataFrameAnalyticsResponse { @@ -13382,7 +13515,7 @@ export interface MlStopDatafeedRequest extends RequestBase { datafeed_id: Id allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration } export interface MlStopDatafeedResponse { @@ -13408,17 +13541,18 @@ export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { } export interface MlUpdateDataFrameAnalyticsResponse { - id: Id + authorization?: MlDataframeAnalyticsAuthorization + allow_lazy_start: boolean + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] create_time: long - version: VersionString - source: MlDataframeAnalyticsSource description?: string dest: MlDataframeAnalyticsDestination - model_memory_limit: string - allow_lazy_start: boolean + id: Id max_num_threads: integer - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + model_memory_limit: string + source: MlDataframeAnalyticsSource + version: VersionString } export interface MlUpdateDatafeedRequest extends RequestBase { @@ -13430,30 +13564,31 @@ export interface MlUpdateDatafeedRequest extends RequestBase { aggregations?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Time + frequency?: Duration indices?: string[] indexes?: string[] indices_options?: IndicesOptions max_empty_searches?: integer query?: QueryDslQueryContainer - query_delay?: Time + query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer } export interface MlUpdateDatafeedResponse { + authorization?: MlDatafeedAuthorization aggregations: Record chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Time + frequency: Duration indices: string[] - job_id: Id indices_options?: IndicesOptions + job_id: Id max_empty_searches: integer query: QueryDslQueryContainer - query_delay: Time + query_delay: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size: integer @@ -13476,7 +13611,7 @@ export interface MlUpdateJobRequest extends RequestBase { job_id: Id allow_lazy_open?: boolean analysis_limits?: MlAnalysisMemoryLimit - background_persist_interval?: Time + background_persist_interval?: Duration custom_settings?: Record categorization_filters?: string[] description?: string @@ -13494,9 +13629,9 @@ export interface MlUpdateJobResponse { allow_lazy_open: boolean analysis_config: MlAnalysisConfigRead analysis_limits: MlAnalysisLimits - background_persist_interval?: Time - create_time: EpochMillis - finished_time?: EpochMillis + background_persist_interval?: Duration + create_time: EpochTime + finished_time?: EpochTime custom_settings?: Record daily_model_snapshot_retention_after_days: long data_description: MlDataDescription @@ -13530,7 +13665,7 @@ export interface MlUpgradeJobSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id wait_for_completion?: boolean - timeout?: Time + timeout?: Duration } export interface MlUpgradeJobSnapshotResponse { @@ -13562,7 +13697,7 @@ export interface MonitoringBulkRequest | TDocument)[] } @@ -13608,7 +13743,7 @@ export interface NodesCgroupCpu { export interface NodesCgroupCpuStat { number_of_elapsed_periods?: long number_of_times_throttled?: long - time_throttled_nanos?: long + time_throttled_nanos?: DurationValue } export interface NodesCgroupMemory { @@ -13642,21 +13777,21 @@ export interface NodesClusterStateQueue { } export interface NodesClusterStateUpdate { - count?: long - computation_time?: string - computation_time_millis?: long - publication_time?: string - publication_time_millis?: long - context_construction_time?: string - context_construction_time_millis?: long - commit_time?: string - commit_time_millis?: long - completion_time?: string - completion_time_millis?: long - master_apply_time?: string - master_apply_time_millis?: long - notification_time?: string - notification_time_millis?: long + count: long + computation_time?: Duration + computation_time_millis?: DurationValue + publication_time?: Duration + publication_time_millis?: DurationValue + context_construction_time?: Duration + context_construction_time_millis?: DurationValue + commit_time?: Duration + commit_time_millis?: DurationValue + completion_time?: Duration + completion_time_millis?: DurationValue + master_apply_time?: Duration + master_apply_time_millis?: DurationValue + notification_time?: Duration + notification_time_millis?: DurationValue } export interface NodesContext { @@ -13668,18 +13803,18 @@ export interface NodesContext { export interface NodesCpu { percent?: integer - sys?: string - sys_in_millis?: long - total?: string - total_in_millis?: long - user?: string - user_in_millis?: long + sys?: Duration + sys_in_millis?: DurationValue + total?: Duration + total_in_millis?: DurationValue + user?: Duration + user_in_millis?: DurationValue load_average?: Record } export interface NodesCpuAcct { control_group?: string - usage_nanos?: long + usage_nanos?: DurationValue } export interface NodesDataPathStats { @@ -13766,7 +13901,7 @@ export interface NodesIngestTotal { current?: long failed?: long processors?: Record[] - time_in_millis?: long + time_in_millis?: DurationValue } export interface NodesIoStatDevice { @@ -13890,7 +14025,7 @@ export interface NodesProcessor { count?: long current?: long failed?: long - time_in_millis?: long + time_in_millis?: DurationValue } export interface NodesPublishedClusterStates { @@ -13902,8 +14037,8 @@ export interface NodesPublishedClusterStates { export interface NodesRecording { name?: string cumulative_execution_count?: long - cumulative_execution_time?: string - cumulative_execution_time_millis?: long + cumulative_execution_time?: Duration + cumulative_execution_time_millis?: DurationValue } export interface NodesRepositoryLocation { @@ -13917,8 +14052,8 @@ export interface NodesRepositoryMeteringInformation { repository_type: string repository_location: NodesRepositoryLocation repository_ephemeral_id: Id - repository_started_at: EpochMillis - repository_stopped_at?: EpochMillis + repository_started_at: EpochTime + repository_stopped_at?: EpochTime archived: boolean cluster_version?: VersionNumber request_counts: NodesRequestCounts @@ -14051,11 +14186,11 @@ export interface NodesHotThreadsHotThread { export interface NodesHotThreadsRequest extends RequestBase { node_id?: NodeIds ignore_idle_threads?: boolean - interval?: Time + interval?: Duration snapshots?: long - master_timeout?: Time + master_timeout?: Duration threads?: long - timeout?: Time + timeout?: Duration type?: ThreadType sort?: ThreadType } @@ -14364,7 +14499,7 @@ export interface NodesInfoNodeJvmInfo { mem: NodesInfoNodeInfoJvmMemory memory_pools: string[] pid: integer - start_time_in_millis: long + start_time_in_millis: EpochTime version: VersionString vm_name: Name vm_vendor: string @@ -14381,7 +14516,7 @@ export interface NodesInfoNodeOperatingSystemInfo { allocated_processors?: integer name: Name pretty_name: Name - refresh_interval_in_millis: integer + refresh_interval_in_millis: DurationValue version: VersionString cpu?: NodesInfoNodeInfoOSCPU mem?: NodesInfoNodeInfoMemory @@ -14391,12 +14526,12 @@ export interface NodesInfoNodeOperatingSystemInfo { export interface NodesInfoNodeProcessInfo { id: long mlockall: boolean - refresh_interval_in_millis: long + refresh_interval_in_millis: DurationValue } export interface NodesInfoNodeThreadPoolInfo { core?: integer - keep_alive?: string + keep_alive?: Duration max?: integer queue_size: integer size?: integer @@ -14407,8 +14542,8 @@ export interface NodesInfoRequest extends RequestBase { node_id?: NodeIds metric?: Metrics flat_settings?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type NodesInfoResponse = NodesInfoResponseBase @@ -14420,7 +14555,7 @@ export interface NodesInfoResponseBase extends NodesNodesResponseBase { export interface NodesReloadSecureSettingsRequest extends RequestBase { node_id?: NodeIds - timeout?: Time + timeout?: Duration secure_settings_password?: Password } @@ -14441,8 +14576,8 @@ export interface NodesStatsRequest extends RequestBase { groups?: boolean include_segment_file_sizes?: boolean level?: Level - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration types?: string[] include_unloaded_segments?: boolean } @@ -14456,15 +14591,15 @@ export interface NodesStatsResponseBase extends NodesNodesResponseBase { export interface NodesUsageNodeUsage { rest_actions: Record - since: EpochMillis - timestamp: EpochMillis + since: EpochTime + timestamp: EpochTime aggregations: Record } export interface NodesUsageRequest extends RequestBase { node_id?: NodeIds metric?: Metrics - timeout?: Time + timeout?: Duration } export type NodesUsageResponse = NodesUsageResponseBase @@ -14475,13 +14610,13 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { } export interface RollupDateHistogramGrouping { - delay?: Time + delay?: Duration field: Field format?: string - interval?: Time - calendar_interval?: Time - fixed_interval?: Time - time_zone?: string + interval?: Duration + calendar_interval?: Duration + fixed_interval?: Duration + time_zone?: TimeZone } export interface RollupFieldMetric { @@ -14539,21 +14674,21 @@ export interface RollupGetJobsRollupJobConfiguration { metrics: RollupFieldMetric[] page_size: long rollup_index: IndexName - timeout: Time + timeout: Duration } export interface RollupGetJobsRollupJobStats { documents_processed: long index_failures: long - index_time_in_ms: long + index_time_in_ms: DurationValue index_total: long pages_processed: long rollups_indexed: long search_failures: long - search_time_in_ms: long + search_time_in_ms: DurationValue search_total: long trigger_count: long - processing_time_in_ms: long + processing_time_in_ms: DurationValue processing_total: long } @@ -14599,8 +14734,8 @@ export interface RollupGetRollupIndexCapsRollupJobSummary { export interface RollupGetRollupIndexCapsRollupJobSummaryField { agg: string - time_zone?: string - calendar_interval?: Time + time_zone?: TimeZone + calendar_interval?: Duration } export interface RollupPutJobRequest extends RequestBase { @@ -14611,7 +14746,7 @@ export interface RollupPutJobRequest extends RequestBase { metrics?: RollupFieldMetric[] page_size: integer rollup_index: IndexName - timeout?: Time + timeout?: Duration headers?: HttpHeaders } @@ -14654,7 +14789,7 @@ export interface RollupStartJobResponse { export interface RollupStopJobRequest extends RequestBase { id: Id - timeout?: Time + timeout?: Duration wait_for_completion?: boolean } @@ -14670,7 +14805,7 @@ export interface SearchableSnapshotsCacheStatsNode { export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { node_id?: NodeIds - master_timeout?: Time + master_timeout?: Duration } export interface SearchableSnapshotsCacheStatsResponse { @@ -14708,7 +14843,7 @@ export interface SearchableSnapshotsMountMountedSnapshot { export interface SearchableSnapshotsMountRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration wait_for_completion?: boolean storage?: string index: IndexName @@ -14756,7 +14891,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' export interface SecurityCreatedStatus { created: boolean @@ -14787,10 +14922,12 @@ export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: string[] | QueryDslQueryContainer | SecurityRoleTemplateQueryContainer + query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean } +export type SecurityIndicesPrivilegesQuery = string | QueryDslQueryContainer | SecurityRoleTemplateQuery + export interface SecurityManageUserPrivileges { applications: string[] } @@ -14804,6 +14941,28 @@ export interface SecurityRealmInfo { type: string } +export interface SecurityRoleDescriptor { + cluster?: string[] + indices?: SecurityIndicesPrivileges[] + index?: SecurityIndicesPrivileges[] + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + applications?: SecurityApplicationPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: SecurityTransientMetadataConfig +} + +export interface SecurityRoleDescriptorRead { + cluster: string[] + indices: SecurityIndicesPrivileges[] + index: SecurityIndicesPrivileges[] + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + applications?: SecurityApplicationPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: SecurityTransientMetadataConfig +} + export interface SecurityRoleMapping { enabled: boolean metadata: Metadata @@ -14825,7 +14984,7 @@ export interface SecurityRoleTemplateInlineScript extends ScriptBase { source: string | QueryDslQueryContainer } -export interface SecurityRoleTemplateQueryContainer { +export interface SecurityRoleTemplateQuery { template?: SecurityRoleTemplateScript } @@ -14845,10 +15004,10 @@ export interface SecurityUser { } export interface SecurityUserProfile { - uid: string + uid: SecurityUserProfileId user: SecurityUserProfileUser - data?: Record - labels?: Record + data: Record + labels: Record enabled?: boolean } @@ -14857,17 +15016,20 @@ export interface SecurityUserProfileHitMetadata { _seq_no: SequenceNumber } +export type SecurityUserProfileId = string + export interface SecurityUserProfileUser { email?: string | null full_name?: Name | null - metadata: Metadata + realm_name: Name + realm_domain?: Name roles: string[] username: Username } export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { last_synchronized: long - _doc?: SecurityUserProfileHitMetadata + _doc: SecurityUserProfileHitMetadata } export interface SecurityActivateUserProfileRequest extends RequestBase { @@ -14964,16 +15126,11 @@ export interface SecurityClearCachedServiceTokensResponse { nodes: Record } -export interface SecurityCreateApiKeyIndexPrivileges { - names: Indices - privileges: SecurityIndexPrivilege[] -} - export interface SecurityCreateApiKeyRequest extends RequestBase { refresh?: Refresh - expiration?: Time + expiration?: Duration name?: Name - role_descriptors?: Record + role_descriptors?: Record metadata?: Metadata } @@ -14985,16 +15142,11 @@ export interface SecurityCreateApiKeyResponse { encoded: string } -export interface SecurityCreateApiKeyRoleDescriptor { - cluster: string[] - index: SecurityCreateApiKeyIndexPrivileges[] - applications?: SecurityApplicationPrivileges[] -} - export interface SecurityCreateServiceTokenRequest extends RequestBase { namespace: Namespace service: Service - name: Name + name?: Name + refresh?: Refresh } export interface SecurityCreateServiceTokenResponse { @@ -15066,7 +15218,7 @@ export interface SecurityDisableUserResponse { } export interface SecurityDisableUserProfileRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId refresh?: Refresh } @@ -15081,7 +15233,7 @@ export interface SecurityEnableUserResponse { } export interface SecurityEnableUserProfileRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId refresh?: Refresh } @@ -15176,18 +15328,8 @@ export interface SecurityGetServiceAccountsRequest extends RequestBase { export type SecurityGetServiceAccountsResponse = Record -export interface SecurityGetServiceAccountsRoleDescriptor { - cluster: string[] - indices: SecurityIndicesPrivileges[] - global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege - applications?: SecurityApplicationPrivileges[] - metadata?: Metadata - run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig -} - export interface SecurityGetServiceAccountsRoleDescriptorWrapper { - role_descriptor: SecurityGetServiceAccountsRoleDescriptor + role_descriptor: SecurityRoleDescriptorRead } export interface SecurityGetServiceCredentialsNodesCredentials { @@ -15270,7 +15412,7 @@ export interface SecurityGetUserPrivilegesResponse { } export interface SecurityGetUserProfileRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId data?: string | string[] } @@ -15280,7 +15422,7 @@ export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { name: Name - expiration?: Time + expiration?: Duration role_descriptors?: Record[] } @@ -15296,7 +15438,7 @@ export interface SecurityGrantApiKeyResponse { api_key: string id: Id name: Name - expiration?: EpochMillis + expiration?: EpochTime } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { @@ -15332,6 +15474,22 @@ export interface SecurityHasPrivilegesResponse { username: Username } +export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { + application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + cluster?: SecurityClusterPrivilege[] + index?: SecurityHasPrivilegesIndexPrivilegesCheck[] +} + +export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { + uids: SecurityUserProfileId[] + privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck +} + +export interface SecurityHasPrivilegesUserProfileResponse { + has_privilege_uids: SecurityUserProfileId[] + error_uids?: SecurityUserProfileId[] +} + export interface SecurityInvalidateApiKeyRequest extends RequestBase { id?: Id ids?: Id[] @@ -15501,10 +15659,16 @@ export interface SecuritySamlServiceProviderMetadataResponse { metadata: string } +export interface SecuritySuggestUserProfilesHint { + uids?: SecurityUserProfileId[] + labels?: Record +} + export interface SecuritySuggestUserProfilesRequest extends RequestBase { - data?: string | string[] name?: string size?: long + data?: string | string[] + hint?: SecuritySuggestUserProfilesHint } export interface SecuritySuggestUserProfilesResponse { @@ -15518,12 +15682,22 @@ export interface SecuritySuggestUserProfilesTotalUserProfiles { relation: RelationName } +export interface SecurityUpdateApiKeyRequest extends RequestBase { + id: Id + role_descriptors?: Record + metadata?: Metadata +} + +export interface SecurityUpdateApiKeyResponse { + updated: boolean +} + export interface SecurityUpdateUserProfileDataRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId if_seq_no?: SequenceNumber if_primary_term?: long refresh?: Refresh - access?: Record + labels?: Record data?: Record } @@ -15543,7 +15717,7 @@ export interface ShutdownGetNodeNodeShutdownStatus { node_id: NodeId type: ShutdownGetNodeShutdownType reason: string - shutdown_startedmillis: EpochMillis + shutdown_startedmillis: EpochTime status: ShutdownGetNodeShutdownStatus shard_migration: ShutdownGetNodeShardMigrationStatus persistent_tasks: ShutdownGetNodePersistentTaskStatus @@ -15599,14 +15773,14 @@ export interface SlmConfiguration { export interface SlmInProgress { name: Name - start_time_millis: DateString + start_time_millis: EpochTime state: string uuid: Uuid } export interface SlmInvocation { snapshot_name: Name - time: DateString + time: DateTime } export interface SlmPolicy { @@ -15618,7 +15792,7 @@ export interface SlmPolicy { } export interface SlmRetention { - expire_after: Time + expire_after: Duration max_count: integer min_count: integer } @@ -15627,18 +15801,18 @@ export interface SlmSnapshotLifecycle { in_progress?: SlmInProgress last_failure?: SlmInvocation last_success?: SlmInvocation - modified_date?: DateString - modified_date_millis: EpochMillis - next_execution?: DateString - next_execution_millis: EpochMillis + modified_date?: DateTime + modified_date_millis: EpochTime + next_execution?: DateTime + next_execution_millis: EpochTime policy: SlmPolicy version: VersionNumber stats: SlmStatistics } export interface SlmStatistics { - retention_deletion_time?: DateString - retention_deletion_time_millis?: EpochMillis + retention_deletion_time?: Duration + retention_deletion_time_millis?: DurationValue retention_failed?: long retention_runs?: long retention_timed_out?: long @@ -15682,8 +15856,8 @@ export interface SlmGetStatsRequest extends RequestBase { } export interface SlmGetStatsResponse { - retention_deletion_time: string - retention_deletion_time_millis: EpochMillis + retention_deletion_time: Duration + retention_deletion_time_millis: DurationValue retention_failed: long retention_runs: long retention_timed_out: long @@ -15703,8 +15877,8 @@ export interface SlmGetStatusResponse { export interface SlmPutLifecycleRequest extends RequestBase { policy_id: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration config?: SlmConfiguration name?: Name repository?: string @@ -15770,8 +15944,9 @@ export type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' export interface SnapshotShardsStatsSummary { incremental: SnapshotShardsStatsSummaryItem total: SnapshotShardsStatsSummaryItem - start_time_in_millis: long - time_in_millis: long + start_time_in_millis: EpochTime + time?: Duration + time_in_millis: DurationValue } export interface SnapshotShardsStatsSummaryItem { @@ -15787,21 +15962,21 @@ export interface SnapshotSnapshotIndexStats { export interface SnapshotSnapshotInfo { data_streams: string[] - duration?: Time - duration_in_millis?: EpochMillis - end_time?: Time - end_time_in_millis?: EpochMillis + duration?: Duration + duration_in_millis?: DurationValue + end_time?: DateTime + end_time_in_millis?: EpochTime failures?: SnapshotSnapshotShardFailure[] include_global_state?: boolean - indices: IndexName[] + indices?: IndexName[] index_details?: Record metadata?: Metadata reason?: string repository?: Name snapshot: Name shards?: ShardStatistics - start_time?: Time - start_time_in_millis?: EpochMillis + start_time?: DateTime + start_time_in_millis?: EpochTime state?: string uuid: Uuid version?: VersionString @@ -15826,8 +16001,9 @@ export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_c export interface SnapshotSnapshotStats { incremental: SnapshotFileCountSnapshotStats - start_time_in_millis: long - time_in_millis: long + start_time_in_millis: EpochTime + time?: Duration + time_in_millis: DurationValue total: SnapshotFileCountSnapshotStats } @@ -15849,8 +16025,8 @@ export interface SnapshotCleanupRepositoryCleanupRepositoryResults { export interface SnapshotCleanupRepositoryRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface SnapshotCleanupRepositoryResponse { @@ -15861,8 +16037,8 @@ export interface SnapshotCloneRequest extends RequestBase { repository: Name snapshot: Name target_snapshot: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration indices: string } @@ -15871,7 +16047,7 @@ export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration wait_for_completion?: boolean ignore_unavailable?: boolean include_global_state?: boolean @@ -15888,8 +16064,8 @@ export interface SnapshotCreateResponse { export interface SnapshotCreateRepositoryRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration verify?: boolean repository?: SnapshotRepository type: string @@ -15901,15 +16077,15 @@ export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration } export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase @@ -15918,10 +16094,10 @@ export interface SnapshotGetRequest extends RequestBase { repository: Name snapshot: Names ignore_unavailable?: boolean - master_timeout?: Time + master_timeout?: Duration verbose?: boolean index_details?: boolean - human?: boolean + index_names?: boolean include_repository?: boolean sort?: SnapshotSnapshotSort size?: integer @@ -15948,7 +16124,7 @@ export interface SnapshotGetSnapshotResponseItem { export interface SnapshotGetRepositoryRequest extends RequestBase { name?: Names local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type SnapshotGetRepositoryResponse = Record @@ -15956,7 +16132,7 @@ export type SnapshotGetRepositoryResponse = Record export interface SnapshotRestoreRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration wait_for_completion?: boolean ignore_index_settings?: string[] ignore_unavailable?: boolean @@ -15983,7 +16159,7 @@ export interface SnapshotStatusRequest extends RequestBase { repository?: Name snapshot?: Names ignore_unavailable?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface SnapshotStatusResponse { @@ -15996,8 +16172,8 @@ export interface SnapshotVerifyRepositoryCompactNodeInfo { export interface SnapshotVerifyRepositoryRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface SnapshotVerifyRepositoryResponse { @@ -16029,8 +16205,8 @@ export interface SqlGetAsyncRequest extends RequestBase { id: Id delimiter?: string format?: string - keep_alive?: Time - wait_for_completion_timeout?: Time + keep_alive?: Duration + wait_for_completion_timeout?: Duration } export interface SqlGetAsyncResponse { @@ -16050,8 +16226,8 @@ export interface SqlGetAsyncStatusResponse { id: string is_running: boolean is_partial: boolean - start_time_in_millis: ulong - expiration_time_in_millis: ulong + start_time_in_millis: EpochTime + expiration_time_in_millis: EpochTime completion_status?: uint } @@ -16063,14 +16239,14 @@ export interface SqlQueryRequest extends RequestBase { fetch_size?: integer filter?: QueryDslQueryContainer query?: string - request_timeout?: Time - page_timeout?: Time - time_zone?: string + request_timeout?: Duration + page_timeout?: Duration + time_zone?: TimeZone field_multi_value_leniency?: boolean runtime_mappings?: MappingRuntimeFields - wait_for_completion_timeout?: Time + wait_for_completion_timeout?: Duration params?: Record - keep_alive?: Time + keep_alive?: Duration keep_on_completion?: boolean index_using_frozen?: boolean } @@ -16088,19 +16264,21 @@ export interface SqlTranslateRequest extends RequestBase { fetch_size?: integer filter?: QueryDslQueryContainer query: string - time_zone?: string + time_zone?: TimeZone } export interface SqlTranslateResponse { - size: long - _source: SearchSourceConfig - fields: Record[] - sort: Sort + aggregations?: Record + size?: long + _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] + query?: QueryDslQueryContainer + sort?: Sort } export interface SslCertificatesCertificateInformation { alias: string | null - expiry: DateString + expiry: DateTime format: string has_private_key: boolean path: string @@ -16136,9 +16314,9 @@ export interface TasksTaskInfo { headers: Record id: long node: NodeId - running_time?: string - running_time_in_nanos: long - start_time_in_millis: long + running_time?: Duration + running_time_in_nanos: DurationValue + start_time_in_millis: EpochTime status?: TasksTaskStatus type: string parent_task_id?: TaskId @@ -16162,12 +16340,12 @@ export interface TasksTaskStatus { failures?: string[] requests_per_second: float retries: Retries - throttled?: Time - throttled_millis: long - throttled_until?: Time - throttled_until_millis: long + throttled?: Duration + throttled_millis: DurationValue + throttled_until?: Duration + throttled_until_millis: DurationValue timed_out?: boolean - took?: long + took?: DurationValue total: long updated: long version_conflicts: long @@ -16185,7 +16363,7 @@ export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { task_id: Id - timeout?: Time + timeout?: Duration wait_for_completion?: boolean } @@ -16202,8 +16380,8 @@ export interface TasksListRequest extends RequestBase { group_by?: TasksGroupBy node_id?: string[] parent_task_id?: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_completion?: boolean } @@ -16233,7 +16411,7 @@ export interface TextStructureFindStructureRequest { lines_to_sample?: uint quote?: string should_trim_fields?: boolean - timeout?: Time + timeout?: Duration timestamp_field?: Field timestamp_format?: string text_files?: TJsonDocument[] @@ -16294,7 +16472,7 @@ export interface TransformPivotGroupByContainer { export interface TransformRetentionPolicy { field: Field - max_age: Time + max_age: Duration } export interface TransformRetentionPolicyContainer { @@ -16320,14 +16498,14 @@ export interface TransformSyncContainer { } export interface TransformTimeSync { - delay?: Time + delay?: Duration field: Field } export interface TransformDeleteTransformRequest extends RequestBase { transform_id: Id force?: boolean - timeout?: Time + timeout?: Duration } export type TransformDeleteTransformResponse = AcknowledgedResponseBase @@ -16346,32 +16524,34 @@ export interface TransformGetTransformResponse { } export interface TransformGetTransformTransformSummary { - dest: ReindexDestination + authorization?: MlTransformAuthorization + create_time?: EpochTime description?: string - frequency?: Time + dest: ReindexDestination + frequency?: Duration id: Id + latest?: TransformLatest pivot?: TransformPivot + retention_policy?: TransformRetentionPolicyContainer settings?: TransformSettings source: TransformSource sync?: TransformSyncContainer - create_time?: EpochMillis version?: VersionString - latest?: TransformLatest _meta?: Metadata } export interface TransformGetTransformStatsCheckpointStats { checkpoint: long checkpoint_progress?: TransformGetTransformStatsTransformProgress - timestamp?: DateString - timestamp_millis?: EpochMillis - time_upper_bound?: DateString - time_upper_bound_millis?: EpochMillis + timestamp?: DateTime + timestamp_millis?: EpochTime + time_upper_bound?: DateTime + time_upper_bound_millis?: EpochTime } export interface TransformGetTransformStatsCheckpointing { changes_last_detected_at?: long - changes_last_detected_at_date_time?: DateString + changes_last_detected_at_date_time?: DateTime last: TransformGetTransformStatsCheckpointStats next?: TransformGetTransformStatsCheckpointStats operations_behind?: long @@ -16391,21 +16571,21 @@ export interface TransformGetTransformStatsResponse { } export interface TransformGetTransformStatsTransformIndexerStats { - delete_time_in_ms?: EpochMillis + delete_time_in_ms?: EpochTime documents_indexed: long documents_deleted?: long documents_processed: long - exponential_avg_checkpoint_duration_ms: double + exponential_avg_checkpoint_duration_ms: DurationValue exponential_avg_documents_indexed: double exponential_avg_documents_processed: double index_failures: long - index_time_in_ms: long + index_time_in_ms: DurationValue index_total: long pages_processed: long - processing_time_in_ms: long + processing_time_in_ms: DurationValue processing_total: long search_failures: long - search_time_in_ms: long + search_time_in_ms: DurationValue search_total: long trigger_count: long } @@ -16429,10 +16609,10 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformPreviewTransformRequest extends RequestBase { transform_id?: Id - timeout?: Time + timeout?: Duration dest?: TransformDestination description?: string - frequency?: Time + frequency?: Duration pivot?: TransformPivot source?: TransformSource settings?: TransformSettings @@ -16449,10 +16629,10 @@ export interface TransformPreviewTransformResponse { export interface TransformPutTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean - timeout?: Time + timeout?: Duration dest: TransformDestination description?: string - frequency?: Time + frequency?: Duration latest?: TransformLatest _meta?: Metadata pivot?: TransformPivot @@ -16473,7 +16653,7 @@ export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformStartTransformRequest extends RequestBase { transform_id: Id - timeout?: Time + timeout?: Duration } export type TransformStartTransformResponse = AcknowledgedResponseBase @@ -16482,7 +16662,7 @@ export interface TransformStopTransformRequest extends RequestBase { transform_id: Name allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration wait_for_checkpoint?: boolean wait_for_completion?: boolean } @@ -16492,10 +16672,10 @@ export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean - timeout?: Time + timeout?: Duration dest?: TransformDestination description?: string - frequency?: Time + frequency?: Duration _meta?: Metadata source?: TransformSource settings?: TransformSettings @@ -16504,10 +16684,11 @@ export interface TransformUpdateTransformRequest extends RequestBase { } export interface TransformUpdateTransformResponse { + authorization?: MlTransformAuthorization create_time: long description: string dest: ReindexDestination - frequency?: Time + frequency?: Duration id: Id latest?: TransformLatest pivot?: TransformPivot @@ -16521,7 +16702,7 @@ export interface TransformUpdateTransformResponse { export interface TransformUpgradeTransformsRequest extends RequestBase { dry_run?: boolean - timeout?: Time + timeout?: Duration } export interface TransformUpgradeTransformsResponse { @@ -16532,7 +16713,7 @@ export interface TransformUpgradeTransformsResponse { export interface WatcherAcknowledgeState { state: WatcherAcknowledgementOptions - timestamp: DateString + timestamp: DateTime } export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' @@ -16543,8 +16724,8 @@ export interface WatcherAction { foreach?: string max_iterations?: integer name?: Name - throttle_period?: Time - throttle_period_in_millis?: EpochMillis + throttle_period?: Duration + throttle_period_in_millis?: DurationValue transform?: TransformContainer index?: WatcherIndexAction logging?: WatcherLoggingAction @@ -16571,7 +16752,7 @@ export type WatcherActions = Record export interface WatcherActivationState { active: boolean - timestamp: Timestamp + timestamp: DateTime } export interface WatcherActivationStatus { @@ -16626,7 +16807,7 @@ export type WatcherConnectionScheme = 'http' | 'https' export type WatcherCronExpression = string export interface WatcherDailySchedule { - at: WatcherTimeOfDay[] + at: WatcherScheduleTimeOfDay[] } export type WatcherDataAttachmentFormat = 'json' | 'yaml' @@ -16645,7 +16826,7 @@ export interface WatcherEmail { from?: string priority?: WatcherEmailPriority reply_to?: string[] - sent_date?: DateString + sent_date?: DateTime subject: string to: string[] attachments?: Record @@ -16678,8 +16859,8 @@ export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | ' export interface WatcherExecutionResult { actions: WatcherExecutionResultAction[] condition: WatcherExecutionResultCondition - execution_duration: integer - execution_time: DateString + execution_duration: DurationValue + execution_time: DateTime input: WatcherExecutionResultInput } @@ -16711,7 +16892,7 @@ export interface WatcherExecutionResultInput { export interface WatcherExecutionState { successful: boolean - timestamp: DateString + timestamp: DateTime reason?: string } @@ -16763,7 +16944,7 @@ export interface WatcherHttpInputProxy { export interface WatcherHttpInputRequestDefinition { auth?: WatcherHttpInputAuthentication body?: string - connection_timeout?: Time + connection_timeout?: Duration headers?: Record host?: Host method?: WatcherHttpInputMethod @@ -16771,7 +16952,7 @@ export interface WatcherHttpInputRequestDefinition { path?: string port?: uint proxy?: WatcherHttpInputProxy - read_timeout?: Time + read_timeout?: Duration scheme?: WatcherConnectionScheme url?: string } @@ -16790,7 +16971,7 @@ export interface WatcherIndexAction { doc_id?: Id refresh?: Refresh op_type?: OpType - timeout?: Time + timeout?: Duration execution_time_field?: Field } @@ -16882,7 +17063,7 @@ export interface WatcherReportingEmailAttachment { url: string inline?: boolean retries?: integer - interval?: Time + interval?: Duration request?: WatcherHttpInputRequestDefinition } @@ -16892,15 +17073,17 @@ export interface WatcherScheduleContainer { cron?: WatcherCronExpression daily?: WatcherDailySchedule hourly?: WatcherHourlySchedule - interval?: Time + interval?: Duration monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[] weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[] yearly?: WatcherTimeOfYear | WatcherTimeOfYear[] } +export type WatcherScheduleTimeOfDay = string | WatcherHourAndMinute + export interface WatcherScheduleTriggerEvent { - scheduled_time: DateString - triggered_time?: DateString + scheduled_time: DateTime + triggered_time?: DateTime } export interface WatcherScriptCondition { @@ -16913,7 +17096,7 @@ export interface WatcherScriptCondition { export interface WatcherSearchInput { extract?: string[] request: WatcherSearchInputRequestDefinition - timeout?: Time + timeout?: Duration } export interface WatcherSearchInputRequestBody { @@ -16963,7 +17146,7 @@ export interface WatcherSlackAttachment { thumb_url?: string title: string title_link?: string - ts?: DateString + ts?: EpochTime } export interface WatcherSlackAttachmentField { @@ -16993,11 +17176,9 @@ export interface WatcherSlackResult { export interface WatcherThrottleState { reason: string - timestamp: DateString + timestamp: DateTime } -export type WatcherTimeOfDay = string | WatcherHourAndMinute - export interface WatcherTimeOfMonth { at: string[] on: integer[] @@ -17024,7 +17205,7 @@ export interface WatcherTriggerEventContainer { export interface WatcherTriggerEventResult { manual: WatcherTriggerEventContainer - triggered_time: DateString + triggered_time: DateTime type: string } @@ -17034,16 +17215,16 @@ export interface WatcherWatch { input: WatcherInputContainer metadata?: Metadata status?: WatcherWatchStatus - throttle_period?: string + throttle_period?: Duration + throttle_period_in_millis?: DurationValue transform?: TransformContainer trigger: WatcherTriggerContainer - throttle_period_in_millis?: long } export interface WatcherWatchStatus { actions: WatcherActions - last_checked?: DateString - last_met_condition?: DateString + last_checked?: DateTime + last_met_condition?: DateTime state: WatcherActivationState version: VersionNumber execution_state?: string @@ -17191,12 +17372,12 @@ export interface WatcherStatsResponse { } export interface WatcherStatsWatchRecordQueuedStats { - execution_time: DateString + execution_time: DateTime } export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { execution_phase: WatcherExecutionPhase - triggered_time: DateString + triggered_time: DateTime executed_actions?: string[] watch_id: Id watch_record_id: Id @@ -17221,7 +17402,7 @@ export interface WatcherStopRequest extends RequestBase { export type WatcherStopResponse = AcknowledgedResponseBase export interface XpackInfoBuildInformation { - date: DateString + date: DateTime hash: string } @@ -17264,7 +17445,7 @@ export interface XpackInfoFeatures { } export interface XpackInfoMinimalLicenseInformation { - expiry_date_in_millis: EpochMillis + expiry_date_in_millis: EpochTime mode: LicenseLicenseType status: LicenseLicenseStatus type: LicenseLicenseType @@ -17552,7 +17733,7 @@ export interface XpackUsageRealmCache { } export interface XpackUsageRequest extends RequestBase { - master_timeout?: Time + master_timeout?: Duration } export interface XpackUsageResponse { @@ -17689,8 +17870,8 @@ export interface XpackUsageWatcher extends XpackUsageBase { } export interface XpackUsageWatcherActionTotals { - total: long - total_time_in_ms: long + total: Duration + total_time_in_ms: DurationValue } export interface XpackUsageWatcherActions { @@ -17732,7 +17913,7 @@ export interface SpecUtilsCommonCatQueryParameters { h?: Names help?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration s?: Names v?: boolean } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 20ba75e68..bdd9bddd3 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -64,7 +64,7 @@ export interface BulkRequest ex _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - timeout?: Time + timeout?: Duration wait_for_active_shards?: WaitForActiveShards require_alias?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ @@ -114,10 +114,10 @@ export interface BulkWriteOperation extends BulkOperationBase { } export interface ClearScrollRequest extends RequestBase { - scroll_id?: Ids + scroll_id?: ScrollIds /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - scroll_id?: Ids + scroll_id?: ScrollIds } } @@ -171,7 +171,7 @@ export interface CreateRequest extends RequestBase { pipeline?: string refresh?: Refresh routing?: Routing - timeout?: Time + timeout?: Duration version?: VersionNumber version_type?: VersionType wait_for_active_shards?: WaitForActiveShards @@ -188,7 +188,7 @@ export interface DeleteRequest extends RequestBase { if_seq_no?: SequenceNumber refresh?: Refresh routing?: Routing - timeout?: Time + timeout?: Duration version?: VersionNumber version_type?: VersionType wait_for_active_shards?: WaitForActiveShards @@ -208,22 +208,21 @@ export interface DeleteByQueryRequest extends RequestBase { from?: long ignore_unavailable?: boolean lenient?: boolean - max_docs?: long preference?: string refresh?: boolean request_cache?: boolean - requests_per_second?: long + requests_per_second?: float routing?: Routing q?: string - scroll?: Time + scroll?: Duration scroll_size?: long - search_timeout?: Time + search_timeout?: Duration search_type?: SearchType - slices?: long + slices?: Slices sort?: string[] stats?: string[] terminate_after?: long - timeout?: Time + timeout?: Duration version?: boolean wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean @@ -244,25 +243,27 @@ export interface DeleteByQueryResponse { retries?: Retries slice_id?: integer task?: TaskId - throttled_millis?: long - throttled_until_millis?: long + throttled?: Duration + throttled_millis: DurationValue + throttled_until?: Duration + throttled_until_millis: DurationValue timed_out?: boolean - took?: long + took?: DurationValue total?: long version_conflicts?: long } export interface DeleteByQueryRethrottleRequest extends RequestBase { task_id: Id - requests_per_second?: long + requests_per_second?: float } export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { id: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type DeleteScriptResponse = AcknowledgedResponseBase @@ -408,7 +409,7 @@ export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { id: Id - master_timeout?: Time + master_timeout?: Duration } export interface GetScriptResponse { @@ -479,7 +480,7 @@ export interface IndexRequest extends RequestBase { pipeline?: string refresh?: Refresh routing?: Routing - timeout?: Time + timeout?: Duration version?: VersionNumber version_type?: VersionType wait_for_active_shards?: WaitForActiveShards @@ -584,20 +585,32 @@ export interface MsearchMultiSearchResult { export interface MsearchMultisearchBody { aggregations?: Record aggs?: Record + collapse?: SearchFieldCollapse query?: QueryDslQueryContainer explain?: boolean stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + knn?: KnnQuery from?: integer + highlight?: SearchHighlight + indices_boost?: Record[] + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + rescore?: SearchRescore | SearchRescore[] + script_fields?: Record + search_after?: SortResults size?: integer sort?: Sort _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] terminate_after?: long stats?: string[] timeout?: string track_scores?: boolean track_total_hits?: SearchTrackHits version?: boolean + runtime_mappings?: MappingRuntimeFields seq_no_primary_term?: boolean pit?: SearchPointInTimeReference suggest?: SearchSuggester @@ -610,7 +623,7 @@ export interface MsearchMultisearchHeader { index?: Indices preference?: string request_cache?: boolean - routing?: string + routing?: Routing search_type?: SearchType ccs_minimize_roundtrips?: boolean allow_partial_search_results?: boolean @@ -627,8 +640,9 @@ export interface MsearchRequest extends RequestBase { max_concurrent_searches?: long max_concurrent_shard_requests?: long pre_filter_shard_size?: long - search_type?: SearchType rest_total_hits_as_int?: boolean + routing?: Routing + search_type?: SearchType typed_keys?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ body?: MsearchRequestItem[] @@ -681,7 +695,6 @@ export interface MtermvectorsOperation { export interface MtermvectorsRequest extends RequestBase { index?: IndexName - ids?: Id[] fields?: Fields field_statistics?: boolean offsets?: boolean @@ -716,7 +729,7 @@ export interface MtermvectorsTermVectorsResult { export interface OpenPointInTimeRequest extends RequestBase { index: Indices - keep_alive: Time + keep_alive: Duration ignore_unavailable?: boolean } @@ -732,8 +745,8 @@ export type PingResponse = boolean export interface PutScriptRequest extends RequestBase { id: Id context?: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { script: StoredScript @@ -846,19 +859,20 @@ export interface ReindexDestination { } export interface ReindexRemoteSource { - connect_timeout: Time + connect_timeout?: Duration + headers?: Record host: Host - username: Username - password: Password - socket_timeout: Time + username?: Username + password?: Password + socket_timeout?: Duration } export interface ReindexRequest extends RequestBase { refresh?: boolean - requests_per_second?: long - scroll?: Time - slices?: long - timeout?: Time + requests_per_second?: float + scroll?: Duration + slices?: Slices + timeout?: Duration wait_for_active_shards?: WaitForActiveShards wait_for_completion?: boolean require_alias?: boolean @@ -880,13 +894,13 @@ export interface ReindexResponse { failures?: BulkIndexByScrollFailure[] noops?: long retries?: Retries - requests_per_second?: long + requests_per_second?: float slice_id?: integer task?: TaskId - throttled_millis?: EpochMillis - throttled_until_millis?: EpochMillis + throttled_millis?: EpochTime + throttled_until_millis?: EpochTime timed_out?: boolean - took?: Time + took?: DurationValue total?: long updated?: long version_conflicts?: long @@ -914,8 +928,10 @@ export interface ReindexRethrottleReindexStatus { noops: long requests_per_second: float retries: Retries - throttled_millis: long - throttled_until_millis: long + throttled?: Duration + throttled_millis: DurationValue + throttled_until?: Duration + throttled_until_millis: DurationValue total: long updated: long version_conflicts: long @@ -927,8 +943,8 @@ export interface ReindexRethrottleReindexTask { description: string id: long node: Name - running_time_in_nanos: long - start_time_in_millis: long + running_time_in_nanos: DurationValue + start_time_in_millis: EpochTime status: ReindexRethrottleReindexStatus type: string headers: HttpHeaders @@ -936,7 +952,7 @@ export interface ReindexRethrottleReindexTask { export interface ReindexRethrottleRequest extends RequestBase { task_id: Id - requests_per_second?: long + requests_per_second?: float } export interface ReindexRethrottleResponse { @@ -978,11 +994,10 @@ export interface ScriptsPainlessExecuteResponse { export interface ScrollRequest extends RequestBase { scroll_id?: ScrollId - scroll?: Time rest_total_hits_as_int?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - scroll?: Time + scroll?: Duration scroll_id: ScrollId } } @@ -999,9 +1014,7 @@ export interface SearchRequest extends RequestBase { ccs_minimize_roundtrips?: boolean default_operator?: QueryDslOperator df?: string - docvalue_fields?: Fields expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean @@ -1011,29 +1024,17 @@ export interface SearchRequest extends RequestBase { pre_filter_shard_size?: long request_cache?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType - stats?: string[] - stored_fields?: Fields suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long suggest_text?: string - terminate_after?: long - timeout?: Time - track_total_hits?: SearchTrackHits - track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean - version?: boolean - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - seq_no_primary_term?: boolean q?: string - size?: integer - from?: integer - sort?: string | string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record @@ -1045,6 +1046,7 @@ export interface SearchRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + knn?: KnnQuery min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -1107,7 +1109,7 @@ export interface SearchAggregationBreakdown { export interface SearchAggregationProfile { breakdown: SearchAggregationBreakdown description: string - time_in_nanos: long + time_in_nanos: DurationValue type: string debug?: SearchAggregationProfileDebug children?: SearchAggregationProfile[] @@ -1154,7 +1156,7 @@ export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' export interface SearchCollector { name: string reason: string - time_in_nanos: long + time_in_nanos: DurationValue children?: SearchCollector[] } @@ -1167,7 +1169,7 @@ export interface SearchCompletionContext { } export interface SearchCompletionSuggest extends SearchSuggestBase { - options: SearchCompletionSuggestOption[] + options: SearchCompletionSuggestOption | SearchCompletionSuggestOption[] } export interface SearchCompletionSuggestOption { @@ -1210,7 +1212,7 @@ export interface SearchDirectGenerator { export interface SearchFetchProfile { type: string description: string - time_in_nanos: long + time_in_nanos: DurationValue breakdown: SearchFetchProfileBreakdown debug?: SearchFetchProfileDebug children?: SearchFetchProfile[] @@ -1246,51 +1248,38 @@ export interface SearchFieldSuggester { text?: string } -export interface SearchHighlight { - fields: Record - type?: SearchHighlighterType - boundary_chars?: string - boundary_max_scan?: integer - boundary_scanner?: SearchBoundaryScanner - boundary_scanner_locale?: string +export interface SearchHighlight extends SearchHighlightBase { encoder?: SearchHighlighterEncoder - fragmenter?: SearchHighlighterFragmenter - fragment_offset?: integer - fragment_size?: integer - max_fragment_length?: integer - no_match_size?: integer - number_of_fragments?: integer - order?: SearchHighlighterOrder - post_tags?: string[] - pre_tags?: string[] - require_field_match?: boolean - tags_schema?: SearchHighlighterTagsSchema - highlight_query?: QueryDslQueryContainer - max_analyzed_offset?: string | integer + fields: Record } -export interface SearchHighlightField { +export interface SearchHighlightBase { + type?: SearchHighlighterType boundary_chars?: string boundary_max_scan?: integer boundary_scanner?: SearchBoundaryScanner boundary_scanner_locale?: string - field?: Field force_source?: boolean fragmenter?: SearchHighlighterFragmenter - fragment_offset?: integer fragment_size?: integer + highlight_filter?: boolean highlight_query?: QueryDslQueryContainer - matched_fields?: Fields max_fragment_length?: integer + max_analyzed_offset?: integer no_match_size?: integer number_of_fragments?: integer + options?: Record order?: SearchHighlighterOrder phrase_limit?: integer post_tags?: string[] pre_tags?: string[] require_field_match?: boolean tags_schema?: SearchHighlighterTagsSchema - type?: SearchHighlighterType +} + +export interface SearchHighlightField extends SearchHighlightBase { + fragment_offset?: integer + matched_fields?: Fields } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1371,7 +1360,7 @@ export interface SearchNestedIdentity { } export interface SearchPhraseSuggest extends SearchSuggestBase { - options: SearchPhraseSuggestOption + options: SearchPhraseSuggestOption | SearchPhraseSuggestOption[] } export interface SearchPhraseSuggestCollate { @@ -1414,7 +1403,7 @@ export interface SearchPhraseSuggester extends SearchSuggesterBase { export interface SearchPointInTimeReference { id: Id - keep_alive?: Time + keep_alive?: Duration } export interface SearchProfile { @@ -1445,7 +1434,7 @@ export interface SearchQueryBreakdown { export interface SearchQueryProfile { breakdown: SearchQueryBreakdown description: string - time_in_nanos: long + time_in_nanos: DurationValue type: string children?: SearchQueryProfile[] } @@ -1509,11 +1498,11 @@ export interface SearchSuggestBase { } export interface SearchSuggestFuzziness { - fuzziness: Fuzziness - min_length: integer - prefix_length: integer - transpositions: boolean - unicode_aware: boolean + fuzziness?: Fuzziness + min_length?: integer + prefix_length?: integer + transpositions?: boolean + unicode_aware?: boolean } export type SearchSuggestSort = 'score' | 'frequency' @@ -1531,7 +1520,7 @@ export interface SearchSuggesterBase { } export interface SearchTermSuggest extends SearchSuggestBase { - options: SearchTermSuggestOption + options: SearchTermSuggestOption | SearchTermSuggestOption[] } export interface SearchTermSuggestOption { @@ -1570,11 +1559,6 @@ export interface SearchMvtRequest extends RequestBase { zoom: SearchMvtZoomLevel x: SearchMvtCoordinate y: SearchMvtCoordinate - exact_bounds?: boolean - extent?: integer - grid_precision?: integer - grid_type?: SearchMvtGridType - size?: integer /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggs?: Record @@ -1625,13 +1609,11 @@ export interface SearchTemplateRequest extends RequestBase { allow_no_indices?: boolean ccs_minimize_roundtrips?: boolean expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean preference?: string - profile?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType rest_total_hits_as_int?: boolean typed_keys?: boolean @@ -1668,7 +1650,7 @@ export interface TermsEnumRequest extends RequestBase { body?: { field: Field size?: integer - timeout?: Time + timeout?: Duration case_insensitive?: boolean index_filter?: QueryDslQueryContainer string?: string @@ -1733,7 +1715,7 @@ export interface TermvectorsTerm { doc_freq?: integer score?: double term_freq: integer - tokens: TermvectorsToken[] + tokens?: TermvectorsToken[] ttf?: integer } @@ -1759,9 +1741,8 @@ export interface UpdateRequest require_alias?: boolean retry_on_conflict?: integer routing?: Routing - timeout?: Time + timeout?: Duration wait_for_active_shards?: WaitForActiveShards - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -1787,29 +1768,27 @@ export interface UpdateByQueryRequest extends RequestBase { allow_no_indices?: boolean analyzer?: string analyze_wildcard?: boolean - conflicts?: Conflicts default_operator?: QueryDslOperator df?: string expand_wildcards?: ExpandWildcards from?: long ignore_unavailable?: boolean lenient?: boolean - max_docs?: long pipeline?: string preference?: string refresh?: boolean request_cache?: boolean - requests_per_second?: long + requests_per_second?: float routing?: Routing - scroll?: Time + scroll?: Duration scroll_size?: long - search_timeout?: Time + search_timeout?: Duration search_type?: SearchType - slices?: long + slices?: Slices sort?: string[] stats?: string[] terminate_after?: long - timeout?: Time + timeout?: Duration version?: boolean version_type?: boolean wait_for_active_shards?: WaitForActiveShards @@ -1833,17 +1812,19 @@ export interface UpdateByQueryResponse { retries?: Retries task?: TaskId timed_out?: boolean - took?: long + took?: DurationValue total?: long updated?: long version_conflicts?: long - throttled_millis?: ulong - throttled_until_millis?: ulong + throttled?: Duration + throttled_millis?: DurationValue + throttled_until?: Duration + throttled_until_millis?: DurationValue } export interface UpdateByQueryRethrottleRequest extends RequestBase { task_id: Id - requests_per_second?: long + requests_per_second?: float } export interface UpdateByQueryRethrottleResponse { @@ -1863,6 +1844,8 @@ export interface SpecUtilsBaseNode { transport_address: TransportAddress } +export type SpecUtilsStringified = T | string + export interface AcknowledgedResponseBase { acknowledged: boolean } @@ -1879,12 +1862,12 @@ export interface BulkIndexByScrollFailure { export interface BulkStats { total_operations: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue total_size?: ByteSize total_size_in_bytes: long - avg_time?: string - avg_time_in_millis: long + avg_time?: Duration + avg_time_in_millis: DurationValue avg_size?: ByteSize avg_size_in_bytes: long } @@ -1930,11 +1913,7 @@ export type DateFormat = string export type DateMath = string -export type DateMathTime = string - -export type DateOrEpochMillis = DateString | EpochMillis - -export type DateString = string +export type DateTime = string | EpochTime export type Distance = string @@ -1945,8 +1924,14 @@ export interface DocStats { deleted?: long } +export type Duration = string | -1 | 0 + +export type DurationLarge = string + +export type DurationValue = Unit + export interface ElasticsearchVersionInfo { - build_date: DateString + build_date: DateTime build_flavor: string build_hash: string build_snapshot: boolean @@ -1960,7 +1945,7 @@ export interface ElasticsearchVersionInfo { export interface EmptyObject { } -export type EpochMillis = string | long +export type EpochTime = Unit export interface ErrorCauseKeys { type: string @@ -2020,8 +2005,8 @@ export type Fields = Field | Field[] export interface FlushStats { periodic: long total: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue } export type Fuzziness = string | integer @@ -2065,14 +2050,14 @@ export type GeoTilePrecision = number export interface GetStats { current: long - exists_time?: string - exists_time_in_millis: long + exists_time?: Duration + exists_time_in_millis: DurationValue exists_total: long - missing_time?: string - missing_time_in_millis: long + missing_time?: Duration + missing_time_in_millis: DurationValue missing_total: long - time?: string - time_in_millis: long + time?: Duration + time_in_millis: DurationValue total: long } @@ -2101,15 +2086,15 @@ export type IndexPatterns = IndexPattern[] export interface IndexingStats { index_current: long delete_current: long - delete_time?: string - delete_time_in_millis: long + delete_time?: Duration + delete_time_in_millis: DurationValue delete_total: long is_throttled: boolean noop_update_total: long - throttle_time?: string - throttle_time_in_millis: long - index_time?: string - index_time_in_millis: long + throttle_time?: Duration + throttle_time_in_millis: DurationValue + index_time?: Duration + index_time_in_millis: DurationValue index_total: long index_failed: long types?: Record @@ -2147,6 +2132,15 @@ export interface InlineScript extends ScriptBase { export type Ip = string +export interface KnnQuery { + field: Field + query_vector: double[] + k: long + num_candidates: long + boost?: float + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] +} + export interface LatLonGeoLocation { lat: double lon: double @@ -2169,12 +2163,12 @@ export interface MergesStats { total_docs: long total_size?: string total_size_in_bytes: long - total_stopped_time?: string - total_stopped_time_in_millis: long - total_throttled_time?: string - total_throttled_time_in_millis: long - total_time?: string - total_time_in_millis: long + total_stopped_time?: Duration + total_stopped_time_in_millis: DurationValue + total_throttled_time?: Duration + total_throttled_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export type Metadata = Record @@ -2275,19 +2269,19 @@ export interface QueryCacheStats { export interface RecoveryStats { current_as_source: long current_as_target: long - throttle_time?: string - throttle_time_in_millis: long + throttle_time?: Duration + throttle_time_in_millis: DurationValue } export type Refresh = boolean | 'true' | 'false' | 'wait_for' export interface RefreshStats { external_total: long - external_total_time_in_millis: long + external_total_time_in_millis: DurationValue listeners: long total: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue } export type RelationName = string @@ -2348,26 +2342,32 @@ export interface ScriptTransform { export type ScrollId = string +export type ScrollIds = ScrollId | ScrollId[] + export interface SearchStats { fetch_current: long - fetch_time_in_millis: long + fetch_time?: Duration + fetch_time_in_millis: DurationValue fetch_total: long open_contexts?: long query_current: long - query_time_in_millis: long + query_time?: Duration + query_time_in_millis: DurationValue query_total: long scroll_current: long - scroll_time_in_millis: long + scroll_time?: Duration + scroll_time_in_millis: DurationValue scroll_total: long suggest_current: long - suggest_time_in_millis: long + suggest_time?: Duration + suggest_time_in_millis: DurationValue suggest_total: long groups?: Record } export interface SearchTransform { request: WatcherSearchInputRequestDefinition - timeout: Time + timeout: Duration } export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' @@ -2429,6 +2429,10 @@ export interface SlicedScroll { max: integer } +export type Slices = integer | SlicesCalculation + +export type SlicesCalculation = 'auto' + export type Sort = SortCombinations | SortCombinations[] export type SortCombinations = Field | SortOptions @@ -2482,16 +2486,12 @@ export type TaskId = string | integer export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' -export type Time = string | integer - -export type TimeSpan = string +export type TimeOfDay = string export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' export type TimeZone = string -export type Timestamp = string - export interface TopLeftBottomRightGeoBounds { top_left: GeoLocation bottom_right: GeoLocation @@ -2520,6 +2520,14 @@ export interface TranslogStats { export type TransportAddress = string +export type UnitFloatMillis = double + +export type UnitMillis = long + +export type UnitNanos = long + +export type UnitSeconds = long + export type Username = string export type Uuid = string @@ -2539,8 +2547,8 @@ export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | export interface WarmerStats { current: long total: long - total_time?: string - total_time_in_millis: long + total_time?: Duration + total_time_in_millis: DurationValue } export interface WktGeoBounds { @@ -2592,6 +2600,8 @@ export interface AggregationsAggregateBase { meta?: Metadata } +export type AggregationsAggregateOrder = Partial> | Partial>[] + export interface AggregationsAggregation { meta?: Metadata name?: string @@ -2690,7 +2700,7 @@ export interface AggregationsArrayPercentilesItem { } export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { - interval: DateMathTime + interval: DurationLarge } export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { @@ -2698,11 +2708,11 @@ export interface AggregationsAutoDateHistogramAggregation extends AggregationsBu field?: Field format?: string minimum_interval?: AggregationsMinimumInterval - missing?: DateString + missing?: DateTime offset?: string params?: Record script?: Script - time_zone?: string + time_zone?: TimeZone } export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { @@ -2798,8 +2808,11 @@ export interface AggregationsCardinalityAggregate extends AggregationsAggregateB export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { precision_threshold?: integer rehash?: boolean + execution_hint?: AggregationsCardinalityExecutionMode } +export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' + export interface AggregationsCategorizeTextAggregation extends AggregationsAggregation { field: Field max_unique_tokens?: integer @@ -2877,25 +2890,25 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket extended_bounds?: AggregationsExtendedBounds hard_bounds?: AggregationsExtendedBounds field?: Field - fixed_interval?: Time + fixed_interval?: Duration format?: string - interval?: Time + interval?: Duration min_doc_count?: integer - missing?: DateString - offset?: Time - order?: AggregationsHistogramOrder + missing?: DateTime + offset?: Duration + order?: AggregationsAggregateOrder params?: Record script?: Script - time_zone?: string + time_zone?: TimeZone keyed?: boolean } export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { key_as_string?: string - key: EpochMillis + key: EpochTime } export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys -& { [property: string]: AggregationsAggregate | string | EpochMillis | long } +& { [property: string]: AggregationsAggregate | string | EpochTime | long } export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { } @@ -2905,7 +2918,7 @@ export interface AggregationsDateRangeAggregation extends AggregationsBucketAggr format?: string missing?: AggregationsMissing ranges?: AggregationsDateRangeExpression[] - time_zone?: string + time_zone?: TimeZone keyed?: boolean } @@ -3138,7 +3151,7 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr min_doc_count?: integer missing?: double offset?: double - order?: AggregationsHistogramOrder + order?: AggregationsAggregateOrder script?: Script format?: string keyed?: boolean @@ -3151,11 +3164,6 @@ export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucket export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys & { [property: string]: AggregationsAggregate | string | double | long } -export interface AggregationsHistogramOrder { - _count?: SortOrder - _key?: SortOrder -} - export interface AggregationsHoltLinearModelSettings { alpha?: float beta?: float @@ -3374,6 +3382,13 @@ export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggreg } export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + collect_mode?: AggregationsTermsAggregationCollectMode + order?: AggregationsAggregateOrder + min_doc_count?: long + shard_min_doc_count?: long + shard_size?: integer + show_term_doc_count_error?: boolean + size?: integer terms: AggregationsMultiTermLookup[] } @@ -3733,7 +3748,7 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat missing_order?: AggregationsMissingOrder missing_bucket?: boolean value_type?: string - order?: AggregationsTermsAggregationOrder + order?: AggregationsAggregateOrder script?: Script shard_size?: integer show_term_doc_count_error?: boolean @@ -3745,8 +3760,6 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' -export type AggregationsTermsAggregationOrder = Record | Record[] - export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { doc_count_error?: long } @@ -3867,7 +3880,7 @@ export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnaly export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' - preserve_original: boolean + preserve_original?: boolean } export type AnalysisCharFilter = string | AnalysisCharFilterDefinition @@ -3927,8 +3940,8 @@ export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { type: 'delimited_payload' - delimiter: string - encoding: AnalysisDelimitedPayloadEncoding + delimiter?: string + encoding?: AnalysisDelimitedPayloadEncoding } export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { @@ -3944,8 +3957,8 @@ export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { type: 'edge_ngram' - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer side?: AnalysisEdgeNGramSide preserve_original?: boolean } @@ -3960,8 +3973,9 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { type: 'elision' - articles: string[] - articles_case: boolean + articles?: string[] + articles_path?: string + articles_case?: boolean } export interface AnalysisFingerprintAnalyzer { @@ -3976,8 +3990,8 @@ export interface AnalysisFingerprintAnalyzer { export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { type: 'fingerprint' - max_output_size: integer - separator: string + max_output_size?: integer + separator?: string } export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { @@ -3986,10 +4000,10 @@ export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' - dedup: boolean - dictionary: string + dedup?: boolean + dictionary?: string locale: string - longest_only: boolean + longest_only?: boolean } export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { @@ -4148,8 +4162,8 @@ export interface AnalysisLanguageAnalyzer { export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' - max: integer - min: integer + max?: integer + min?: integer } export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { @@ -4158,8 +4172,8 @@ export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { type: 'limit' - consume_all_tokens: boolean - max_token_count: integer + consume_all_tokens?: boolean + max_token_count?: integer } export interface AnalysisLowercaseNormalizer { @@ -4177,14 +4191,14 @@ export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { type: 'mapping' - mappings: string[] + mappings?: string[] mappings_path?: string } export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' filters: string[] - preserve_original: boolean + preserve_original?: boolean } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { @@ -4214,7 +4228,7 @@ export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'nori_part_of_speech' - stoptags: string[] + stoptags?: string[] } export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { @@ -4248,7 +4262,7 @@ export interface AnalysisPatternAnalyzer { export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture' patterns: string[] - preserve_original: boolean + preserve_original?: boolean } export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { @@ -4260,9 +4274,10 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace' - flags: string + all?: boolean + flags?: string pattern: string - replacement: string + replacement?: string } export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { @@ -4369,7 +4384,7 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { type: 'stop' ignore_case?: boolean remove_trailing?: boolean - stopwords: AnalysisStopWords + stopwords?: AnalysisStopWords stopwords_path?: string } @@ -4423,7 +4438,7 @@ export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { type: 'truncate' - length: integer + length?: integer } export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { @@ -4537,8 +4552,6 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase { type: 'constant_keyword' } -export type MappingCoreProperty = MappingObjectProperty | MappingNestedProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingDocValuesProperty | MappingMatchOnlyTextProperty - export interface MappingCorePropertyBase extends MappingPropertyBase { copy_to?: Fields similarity?: string @@ -4554,7 +4567,7 @@ export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean - null_value?: DateString + null_value?: DateTime precision_step?: integer type: 'date_nanos' } @@ -4565,7 +4578,7 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean - null_value?: DateString + null_value?: DateTime precision_step?: integer locale?: string type: 'date' @@ -4590,8 +4603,6 @@ export interface MappingDenseVectorProperty extends MappingPropertyBase { index_options?: MappingDenseVectorIndexOptions } -export type MappingDocValuesProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDateProperty | MappingDateNanosProperty | MappingKeywordProperty | MappingNumberProperty | MappingRangeProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingCompletionProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingShapeProperty | MappingTokenCountProperty | MappingVersionProperty | MappingWildcardProperty | MappingPointProperty - export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } @@ -4607,6 +4618,32 @@ export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' +export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { + type: '{dynamic_property}' + enabled?: boolean + null_value?: FieldValue + boost?: double + coerce?: boolean + script?: Script + on_script_error?: MappingOnScriptError + ignore_malformed?: boolean + time_series_metric?: MappingTimeSeriesMetricType + analyzer?: string + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + index_phrases?: boolean + index_prefixes?: MappingTextIndexPrefixes + norms?: boolean + position_increment_gap?: integer + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: MappingTermVectorOption + format?: string + precision_step?: integer + locale?: string +} + export interface MappingDynamicTemplate { mapping?: MappingProperty match?: string @@ -4759,8 +4796,6 @@ export interface MappingNestedProperty extends MappingCorePropertyBase { type: 'nested' } -export type MappingNumberProperty = MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingDoubleNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingShortNumberProperty | MappingByteNumberProperty | MappingUnsignedLongNumberProperty | MappingScaledFloatNumberProperty - export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { index?: boolean ignore_malformed?: boolean @@ -4785,7 +4820,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingFlattenedProperty | MappingJoinProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingDenseVectorProperty | MappingAggregateMetricDoubleProperty | MappingCoreProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty export interface MappingPropertyBase { local_metadata?: Metadata @@ -4796,8 +4831,6 @@ export interface MappingPropertyBase { fields?: Record } -export type MappingRangeProperty = MappingLongRangeProperty | MappingIpRangeProperty | MappingIntegerRangeProperty | MappingFloatRangeProperty | MappingDoubleRangeProperty | MappingDateRangeProperty - export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { boost?: double coerce?: boolean @@ -4998,9 +5031,9 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } -export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { @@ -5244,12 +5277,6 @@ export interface QueryDslIntervalsWildcard { use_field?: Field } -export interface QueryDslKnnQuery extends QueryDslQueryBase { - field: Field - num_candidates: integer - query_vector: double[] -} - export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { @@ -5439,7 +5466,6 @@ export interface QueryDslQueryContainer { has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery intervals?: Partial> - knn?: QueryDslKnnQuery match?: Partial> match_all?: QueryDslMatchAllQuery match_bool_prefix?: Partial> @@ -5714,7 +5740,7 @@ export interface AsyncSearchAsyncSearch[]> terminated_early?: boolean @@ -5730,8 +5756,10 @@ export interface AsyncSearchAsyncSearchResponseBase { id?: Id is_partial: boolean is_running: boolean - expiration_time_in_millis: EpochMillis - start_time_in_millis: EpochMillis + expiration_time?: DateTime + expiration_time_in_millis: EpochTime + start_time?: DateTime + start_time_in_millis: EpochTime } export interface AsyncSearchDeleteRequest extends RequestBase { @@ -5742,9 +5770,9 @@ export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { id: Id - keep_alive?: Time + keep_alive?: Duration typed_keys?: boolean - wait_for_completion_timeout?: Time + wait_for_completion_timeout?: Duration } export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase @@ -5762,9 +5790,9 @@ export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSea export interface AsyncSearchSubmitRequest extends RequestBase { index?: Indices - wait_for_completion_timeout?: Time + wait_for_completion_timeout?: Duration keep_on_completion?: boolean - keep_alive?: Time + keep_alive?: Duration allow_no_indices?: boolean allow_partial_search_results?: boolean analyzer?: string @@ -5773,9 +5801,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { ccs_minimize_roundtrips?: boolean default_operator?: QueryDslOperator df?: string - docvalue_fields?: Fields expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean @@ -5785,29 +5811,17 @@ export interface AsyncSearchSubmitRequest extends RequestBase { pre_filter_shard_size?: long request_cache?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType - stats?: string[] - stored_fields?: Fields suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long suggest_text?: string - terminate_after?: long - timeout?: Time - track_total_hits?: SearchTrackHits - track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean - version?: boolean - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - seq_no_primary_term?: boolean q?: string - size?: integer - from?: integer - sort?: string | string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record @@ -5819,6 +5833,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + knn?: KnnQuery min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -6004,13 +6019,13 @@ export interface CatComponentTemplatesRequest extends CatCatRequestBase { export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] export interface CatCountCountRecord { - epoch?: EpochMillis - t?: EpochMillis - time?: EpochMillis - timestamp?: DateString - ts?: DateString - hms?: DateString - hhmmss?: DateString + epoch?: SpecUtilsStringified> + t?: SpecUtilsStringified> + time?: SpecUtilsStringified> + timestamp?: TimeOfDay + ts?: TimeOfDay + hms?: TimeOfDay + hhmmss?: TimeOfDay count?: string dc?: string 'docs.count'?: string @@ -6043,12 +6058,12 @@ export interface CatFielddataRequest extends CatCatRequestBase { export type CatFielddataResponse = CatFielddataFielddataRecord[] export interface CatHealthHealthRecord { - epoch?: EpochMillis - time?: EpochMillis - timestamp?: DateString - ts?: DateString - hms?: DateString - hhmmss?: DateString + epoch?: SpecUtilsStringified> + time?: SpecUtilsStringified> + timestamp?: TimeOfDay + ts?: TimeOfDay + hms?: TimeOfDay + hhmmss?: TimeOfDay cluster?: string cl?: string status?: string @@ -6471,7 +6486,7 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { bytes?: Bytes h?: CatCatDfaColumns s?: CatCatDfaColumns - time?: Time + time?: Duration } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -6734,8 +6749,8 @@ export interface CatMlTrainedModelsTrainedModelsRecord { modelOperations?: string license?: string l?: string - create_time?: DateString - ct?: DateString + create_time?: DateTime + ct?: DateTime version?: VersionString v?: VersionString description?: string @@ -7107,17 +7122,17 @@ export interface CatRecoveryRecoveryRecord { shard?: string s?: string sh?: string - start_time?: string - start?: string - start_time_millis?: string - start_millis?: string - stop_time?: string - stop?: string - stop_time_millis?: string - stop_millis?: string - time?: string - t?: string - ti?: string + start_time?: DateTime + start?: DateTime + start_time_millis?: EpochTime + start_millis?: EpochTime + stop_time?: DateTime + stop?: DateTime + stop_time_millis?: EpochTime + stop_millis?: EpochTime + time?: Duration + t?: Duration + ti?: Duration type?: string ty?: string stage?: string @@ -7464,20 +7479,20 @@ export interface CatSnapshotsSnapshotsRecord { repo?: string status?: string s?: string - start_epoch?: EpochMillis - ste?: EpochMillis - startEpoch?: EpochMillis - start_time?: DateString - sti?: DateString - startTime?: DateString - end_epoch?: EpochMillis - ete?: EpochMillis - endEpoch?: EpochMillis - end_time?: DateString - eti?: DateString - endTime?: DateString - duration?: Time - dur?: Time + start_epoch?: SpecUtilsStringified> + ste?: SpecUtilsStringified> + startEpoch?: SpecUtilsStringified> + start_time?: WatcherScheduleTimeOfDay + sti?: WatcherScheduleTimeOfDay + startTime?: WatcherScheduleTimeOfDay + end_epoch?: SpecUtilsStringified> + ete?: SpecUtilsStringified> + endEpoch?: SpecUtilsStringified> + end_time?: TimeOfDay + eti?: TimeOfDay + endTime?: TimeOfDay + duration?: Duration + dur?: Duration indices?: string i?: string successful_shards?: string @@ -7556,7 +7571,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names - time?: Time + time?: TimeUnit } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -7610,7 +7625,7 @@ export interface CatTransformsRequest extends CatCatRequestBase { from?: integer h?: CatCatTransformColumns s?: CatCatTransformColumns - time?: Time + time?: TimeUnit size?: integer } @@ -7726,10 +7741,14 @@ export interface CcrShardStats { shard_id: integer successful_read_requests: long successful_write_requests: long - time_since_last_read_millis: EpochMillis - total_read_remote_exec_time_millis: EpochMillis - total_read_time_millis: EpochMillis - total_write_time_millis: EpochMillis + time_since_last_read?: Duration + time_since_last_read_millis: DurationValue + total_read_remote_exec_time?: Duration + total_read_remote_exec_time_millis: DurationValue + total_read_time?: Duration + total_read_time_millis: DurationValue + total_write_time?: Duration + total_write_time_millis: DurationValue write_buffer_operation_count: long write_buffer_size_in_bytes: ByteSize } @@ -7750,12 +7769,12 @@ export interface CcrFollowRequest extends RequestBase { max_outstanding_write_requests?: long max_read_request_operation_count?: long max_read_request_size?: string - max_retry_delay?: Time + max_retry_delay?: Duration max_write_buffer_count?: long max_write_buffer_size?: string max_write_request_operation_count?: long max_write_request_size?: string - read_poll_timeout?: Time + read_poll_timeout?: Duration remote_cluster?: string } } @@ -7779,12 +7798,12 @@ export interface CcrFollowInfoFollowerIndexParameters { max_outstanding_write_requests: integer max_read_request_operation_count: integer max_read_request_size: string - max_retry_delay: Time + max_retry_delay: Duration max_write_buffer_count: integer max_write_buffer_size: string max_write_request_operation_count: integer max_write_request_size: string - read_poll_timeout: Time + read_poll_timeout: Duration } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' @@ -7865,10 +7884,10 @@ export interface CcrPutAutoFollowPatternRequest extends RequestBase { max_outstanding_read_requests?: integer settings?: Record max_outstanding_write_requests?: integer - read_poll_timeout?: Time + read_poll_timeout?: Duration max_read_request_operation_count?: integer max_read_request_size?: ByteSize - max_retry_delay?: Time + max_retry_delay?: Duration max_write_buffer_count?: integer max_write_buffer_size?: ByteSize max_write_request_operation_count?: integer @@ -7892,12 +7911,12 @@ export interface CcrResumeFollowRequest extends RequestBase { max_outstanding_write_requests?: long max_read_request_operation_count?: long max_read_request_size?: string - max_retry_delay?: Time + max_retry_delay?: Duration max_write_buffer_count?: long max_write_buffer_size?: string max_write_request_operation_count?: long max_write_request_size?: string - read_poll_timeout?: Time + read_poll_timeout?: Duration } } @@ -7914,7 +7933,7 @@ export interface CcrStatsAutoFollowStats { export interface CcrStatsAutoFollowedCluster { cluster_name: Name last_seen_metadata_version: VersionNumber - time_since_last_check_millis: DateString + time_since_last_check_millis: DurationValue } export interface CcrStatsFollowStats { @@ -8036,8 +8055,8 @@ export interface ClusterAllocationExplainReservedSize { export interface ClusterAllocationExplainResponse { allocate_explanation?: string - allocation_delay?: string - allocation_delay_in_millis?: long + allocation_delay?: Duration + allocation_delay_in_millis?: DurationValue can_allocate?: ClusterAllocationExplainDecision can_move_to_other_node?: ClusterAllocationExplainDecision can_rebalance_cluster?: ClusterAllocationExplainDecision @@ -8046,8 +8065,8 @@ export interface ClusterAllocationExplainResponse { can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] can_remain_on_current_node?: ClusterAllocationExplainDecision cluster_info?: ClusterAllocationExplainClusterInfo - configured_delay?: string - configured_delay_in_millis?: long + configured_delay?: Duration + configured_delay_in_millis?: DurationValue current_node?: ClusterAllocationExplainCurrentNode current_state: string index: IndexName @@ -8055,15 +8074,15 @@ export interface ClusterAllocationExplainResponse { node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] primary: boolean rebalance_explanation?: string - remaining_delay?: string - remaining_delay_in_millis?: long + remaining_delay?: Duration + remaining_delay_in_millis?: DurationValue shard: integer unassigned_info?: ClusterAllocationExplainUnassignedInformation note?: string } export interface ClusterAllocationExplainUnassignedInformation { - at: DateString + at: DateTime last_allocation_status?: string reason: ClusterAllocationExplainUnassignedInformationReason details?: string @@ -8076,8 +8095,8 @@ export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED export interface ClusterDeleteComponentTemplateRequest extends RequestBase { name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase @@ -8090,7 +8109,7 @@ export type ClusterDeleteVotingConfigExclusionsResponse = boolean export interface ClusterExistsComponentTemplateRequest extends RequestBase { name: Names - master_timeout?: Time + master_timeout?: Duration local?: boolean } @@ -8100,7 +8119,7 @@ export interface ClusterGetComponentTemplateRequest extends RequestBase { name?: Name flat_settings?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface ClusterGetComponentTemplateResponse { @@ -8110,8 +8129,8 @@ export interface ClusterGetComponentTemplateResponse { export interface ClusterGetSettingsRequest extends RequestBase { flat_settings?: boolean include_defaults?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface ClusterGetSettingsResponse { @@ -8137,8 +8156,8 @@ export interface ClusterHealthRequest extends RequestBase { expand_wildcards?: ExpandWildcards level?: Level local?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards wait_for_events?: WaitForEvents wait_for_nodes?: string | integer @@ -8161,7 +8180,8 @@ export interface ClusterHealthResponse { number_of_pending_tasks: integer relocating_shards: integer status: HealthStatus - task_max_waiting_in_queue_millis: EpochMillis + task_max_waiting_in_queue?: Duration + task_max_waiting_in_queue_millis: DurationValue timed_out: boolean unassigned_shards: integer } @@ -8180,13 +8200,13 @@ export interface ClusterPendingTasksPendingTask { insert_order: integer priority: string source: string - time_in_queue: string - time_in_queue_millis: integer + time_in_queue?: Duration + time_in_queue_millis: DurationValue } export interface ClusterPendingTasksRequest extends RequestBase { local?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface ClusterPendingTasksResponse { @@ -8196,7 +8216,7 @@ export interface ClusterPendingTasksResponse { export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { node_names?: Names node_ids?: Ids - timeout?: Time + timeout?: Duration } export type ClusterPostVotingConfigExclusionsResponse = boolean @@ -8204,7 +8224,7 @@ export type ClusterPostVotingConfigExclusionsResponse = boolean export interface ClusterPutComponentTemplateRequest extends RequestBase { name: Name create?: boolean - master_timeout?: Time + master_timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { template: IndicesIndexState @@ -8220,8 +8240,8 @@ export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { flat_settings?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { persistent?: Record @@ -8240,7 +8260,7 @@ export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteS export interface ClusterRemoteInfoClusterRemoteProxyInfo { mode: 'proxy' connected: boolean - initial_connect_timeout: Time + initial_connect_timeout: Duration skip_unavailable: boolean proxy_address: string server_name: string @@ -8253,7 +8273,7 @@ export interface ClusterRemoteInfoClusterRemoteSniffInfo { connected: boolean max_connections_per_cluster: integer num_nodes_connected: long - initial_connect_timeout: Time + initial_connect_timeout: Duration skip_unavailable: boolean seeds: string[] } @@ -8303,8 +8323,8 @@ export interface ClusterRerouteRequest extends RequestBase { explain?: boolean metric?: Metrics retry_failed?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { commands?: ClusterRerouteCommand[] @@ -8346,9 +8366,9 @@ export interface ClusterStateRequest extends RequestBase { flat_settings?: boolean ignore_unavailable?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration wait_for_metadata_version?: VersionNumber - wait_for_timeout?: Time + wait_for_timeout?: Duration } export type ClusterStateResponse = any @@ -8403,7 +8423,7 @@ export interface ClusterStatsClusterIngest { } export interface ClusterStatsClusterJvm { - max_uptime_in_millis: long + max_uptime_in_millis: DurationValue mem: ClusterStatsClusterJvmMemory threads: long versions: ClusterStatsClusterJvmVersion[] @@ -8504,7 +8524,8 @@ export interface ClusterStatsClusterProcessor { count: long current: long failed: long - time_in_millis: long + time?: Duration + time_in_millis: DurationValue } export interface ClusterStatsClusterShardMetrics { @@ -8571,7 +8592,7 @@ export interface ClusterStatsOperatingSystemMemoryInfo { export interface ClusterStatsRequest extends RequestBase { node_id?: NodeIds flat_settings?: boolean - timeout?: Time + timeout?: Duration } export type ClusterStatsResponse = ClusterStatsStatsResponseBase @@ -8605,8 +8626,8 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase @@ -8614,8 +8635,8 @@ export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBas export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase @@ -8623,7 +8644,7 @@ export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBas export interface DanglingIndicesListDanglingIndicesDanglingIndex { index_name: string index_uuid: string - creation_date_millis: EpochMillis + creation_date_millis: EpochTime node_ids: Ids } @@ -8734,7 +8755,7 @@ export interface EqlEqlSearchResponseBase { id?: Id is_partial?: boolean is_running?: boolean - took?: integer + took?: DurationValue timed_out?: boolean hits: EqlEqlHits } @@ -8759,8 +8780,8 @@ export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { id: Id - keep_alive?: Time - wait_for_completion_timeout?: Time + keep_alive?: Duration + wait_for_completion_timeout?: Duration } export type EqlGetResponse = EqlEqlSearchResponseBase @@ -8773,8 +8794,8 @@ export interface EqlGetStatusResponse { id: Id is_partial: boolean is_running: boolean - start_time_in_millis?: EpochMillis - expiration_time_in_millis?: EpochMillis + start_time_in_millis?: EpochTime + expiration_time_in_millis?: EpochTime completion_status?: integer } @@ -8783,9 +8804,6 @@ export interface EqlSearchRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - keep_alive?: Time - keep_on_completion?: boolean - wait_for_completion_timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { query: string @@ -8795,9 +8813,9 @@ export interface EqlSearchRequest extends RequestBase { timestamp_field?: Field fetch_size?: uint filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - keep_alive?: Time + keep_alive?: Duration keep_on_completion?: boolean - wait_for_completion_timeout?: Time + wait_for_completion_timeout?: Duration size?: uint fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition @@ -8835,7 +8853,7 @@ export interface FleetGlobalCheckpointsRequest extends RequestBase { wait_for_advance?: boolean wait_for_index?: boolean checkpoints?: FleetCheckpoint[] - timeout?: Time + timeout?: Duration } export interface FleetGlobalCheckpointsResponse { @@ -8875,9 +8893,7 @@ export interface FleetSearchRequest extends RequestBase { ccs_minimize_roundtrips?: boolean default_operator?: QueryDslOperator df?: string - docvalue_fields?: Fields expand_wildcards?: ExpandWildcards - explain?: boolean ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean @@ -8887,29 +8903,17 @@ export interface FleetSearchRequest extends RequestBase { pre_filter_shard_size?: long request_cache?: boolean routing?: Routing - scroll?: Time + scroll?: Duration search_type?: SearchType - stats?: string[] - stored_fields?: Fields suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long suggest_text?: string - terminate_after?: long - timeout?: Time - track_total_hits?: SearchTrackHits - track_scores?: boolean typed_keys?: boolean rest_total_hits_as_int?: boolean - version?: boolean - _source?: SearchSourceConfigParam _source_excludes?: Fields _source_includes?: Fields - seq_no_primary_term?: boolean q?: string - size?: integer - from?: integer - sort?: string | string[] wait_for_checkpoints?: FleetCheckpoint[] allow_partial_search_results?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -8975,7 +8979,7 @@ export interface GraphConnection { export interface GraphExploreControls { sample_diversity?: GraphSampleDiversity sample_size?: integer - timeout?: Time + timeout?: Duration use_significance: boolean } @@ -9014,7 +9018,7 @@ export interface GraphVertexInclude { export interface GraphExploreRequest extends RequestBase { index: Indices routing?: Routing - timeout?: Time + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { connections?: GraphHop @@ -9046,7 +9050,7 @@ export interface IlmForceMergeConfiguration { export interface IlmPhase { actions?: IlmActions - min_age?: Time + min_age?: Duration | long configurations?: IlmConfigurations } @@ -9059,7 +9063,7 @@ export interface IlmPhases { export interface IlmPolicy { phases: IlmPhases - name?: Name + _meta?: Metadata } export interface IlmShrinkConfiguration { @@ -9068,8 +9072,8 @@ export interface IlmShrinkConfiguration { export interface IlmDeleteLifecycleRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase @@ -9077,30 +9081,35 @@ export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged export interface IlmExplainLifecycleLifecycleExplainManaged { - action: Name - action_time_millis: EpochMillis - age: Time + action?: Name + action_time?: DateTime + action_time_millis?: EpochTime + age?: Duration failed_step?: Name failed_step_retry_count?: integer - index: IndexName - index_creation_date_millis?: EpochMillis + index?: IndexName + index_creation_date?: DateTime + index_creation_date_millis?: EpochTime is_auto_retryable_error?: boolean - lifecycle_date_millis: EpochMillis + lifecycle_date?: DateTime + lifecycle_date_millis?: EpochTime managed: true phase: Name - phase_time_millis: EpochMillis + phase_time?: DateTime + phase_time_millis?: EpochTime policy: Name - step: Name + step?: Name step_info?: Record - step_time_millis: EpochMillis - phase_execution: IlmExplainLifecycleLifecycleExplainPhaseExecution - time_since_index_creation?: Time + step_time?: DateTime + step_time_millis?: EpochTime + phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution + time_since_index_creation?: Duration } export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { policy: Name version: VersionNumber - modified_date_in_millis: EpochMillis + modified_date_in_millis: EpochTime } export interface IlmExplainLifecycleLifecycleExplainUnmanaged { @@ -9112,8 +9121,8 @@ export interface IlmExplainLifecycleRequest extends RequestBase { index: IndexName only_errors?: boolean only_managed?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface IlmExplainLifecycleResponse { @@ -9121,15 +9130,15 @@ export interface IlmExplainLifecycleResponse { } export interface IlmGetLifecycleLifecycle { - modified_date: DateString + modified_date: DateTime policy: IlmPolicy version: VersionNumber } export interface IlmGetLifecycleRequest extends RequestBase { name?: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmGetLifecycleResponse = Record @@ -9179,8 +9188,8 @@ export interface IlmMoveToStepStepKey { export interface IlmPutLifecycleRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { policy?: IlmPolicy @@ -9205,15 +9214,15 @@ export interface IlmRetryRequest extends RequestBase { export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IlmStopResponse = AcknowledgedResponseBase @@ -9324,7 +9333,7 @@ export interface IndicesIndexSettingBlocks { metadata?: boolean } -export interface IndicesIndexSettings { +export interface IndicesIndexSettingsKeys { index?: IndicesIndexSettings mode?: string routing_path?: string | string[] @@ -9341,7 +9350,7 @@ export interface IndicesIndexSettings { auto_expand_replicas?: string merge?: IndicesMerge search?: IndicesSettingsSearch - refresh_interval?: Time + refresh_interval?: Duration max_result_window?: integer max_inner_result_window?: integer max_rescore_window?: integer @@ -9356,13 +9365,13 @@ export interface IndicesIndexSettings { max_terms_count?: integer max_regex_length?: integer routing?: IndicesIndexRouting - gc_deletes?: Time + gc_deletes?: Duration default_pipeline?: PipelineName final_pipeline?: PipelineName lifecycle?: IndicesIndexSettingsLifecycle provided_name?: Name - creation_date?: DateString - creation_date_string?: DateString + creation_date?: SpecUtilsStringified> + creation_date_string?: DateTime uuid?: Uuid version?: IndicesIndexVersioning verified_before_close?: boolean | string @@ -9378,11 +9387,13 @@ export interface IndicesIndexSettings { shards?: integer queries?: IndicesQueries similarity?: IndicesSettingsSimilarity - mappings?: IndicesMappingLimitSettings + mapping?: IndicesMappingLimitSettings 'indexing.slowlog'?: IndicesSlowlogSettings indexing_pressure?: IndicesIndexingPressure store?: IndicesStorage } +export type IndicesIndexSettings = IndicesIndexSettingsKeys +& { [property: string]: any } export interface IndicesIndexSettingsAnalysis { analyzer?: Record @@ -9402,12 +9413,12 @@ export interface IndicesIndexSettingsLifecycle { } export interface IndicesIndexSettingsLifecycleStep { - wait_time_threshold?: Time + wait_time_threshold?: Duration } export interface IndicesIndexSettingsTimeSeries { - end_time?: DateOrEpochMillis - start_time?: DateOrEpochMillis + end_time?: DateTime + start_time?: DateTime } export interface IndicesIndexState { @@ -9507,11 +9518,11 @@ export interface IndicesQueries { } export interface IndicesRetentionLease { - period: Time + period: Duration } export interface IndicesSearchIdle { - after?: Time + after?: Duration } export type IndicesSegmentSortMissing = '_last' | '_first' @@ -9596,10 +9607,10 @@ export interface IndicesSlowlogSettings { } export interface IndicesSlowlogTresholdLevels { - warn?: Time - info?: Time - debug?: Time - trace?: Time + warn?: Duration + info?: Duration + debug?: Duration + trace?: Duration } export interface IndicesSlowlogTresholds { @@ -9630,7 +9641,7 @@ export interface IndicesTemplateMapping { } export interface IndicesTranslog { - sync_interval?: Time + sync_interval?: Duration durability?: IndicesTranslogDurability flush_threshold_size?: ByteSize retention?: IndicesTranslogRetention @@ -9640,7 +9651,7 @@ export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC export interface IndicesTranslogRetention { size?: ByteSize - age?: Time + age?: Duration } export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' @@ -9656,8 +9667,8 @@ export interface IndicesAddBlockRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface IndicesAddBlockResponse { @@ -9693,7 +9704,7 @@ export interface IndicesAnalyzeCharFilterDetail { name: string } -export interface IndicesAnalyzeExplainAnalyzeToken { +export interface IndicesAnalyzeExplainAnalyzeTokenKeys { bytes: string end_offset: long keyword?: boolean @@ -9704,6 +9715,8 @@ export interface IndicesAnalyzeExplainAnalyzeToken { token: string type: string } +export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeTokenKeys +& { [property: string]: any } export interface IndicesAnalyzeRequest extends RequestBase { index?: IndexName @@ -9749,8 +9762,8 @@ export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { index: IndexName target: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -9779,8 +9792,8 @@ export interface IndicesCloseRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards } @@ -9792,8 +9805,8 @@ export interface IndicesCloseResponse { export interface IndicesCreateRequest extends RequestBase { index: IndexName - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -9820,7 +9833,7 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { data_stream: Name store_size?: ByteSize store_size_bytes: integer - maximum_timestamp: long + maximum_timestamp: EpochTime } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -9842,8 +9855,8 @@ export interface IndicesDeleteRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteResponse = IndicesResponseBase @@ -9851,8 +9864,8 @@ export type IndicesDeleteResponse = IndicesResponseBase export interface IndicesDeleteAliasRequest extends RequestBase { index: Indices name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteAliasResponse = AcknowledgedResponseBase @@ -9866,30 +9879,27 @@ export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { - index: IndexName + index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards flush?: boolean ignore_unavailable?: boolean - master_timeout?: TimeUnit - timeout?: TimeUnit run_expensive_tasks?: boolean - wait_for_active_shards?: string } export type IndicesDiskUsageResponse = any @@ -9919,7 +9929,7 @@ export type IndicesExistsAliasResponse = boolean export interface IndicesExistsIndexTemplateRequest extends RequestBase { name: Name - master_timeout?: Time + master_timeout?: Duration } export type IndicesExistsIndexTemplateResponse = boolean @@ -9928,7 +9938,7 @@ export interface IndicesExistsTemplateRequest extends RequestBase { name: Names flat_settings?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesExistsTemplateResponse = boolean @@ -9966,8 +9976,8 @@ export interface IndicesFieldUsageStatsRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean fields?: Fields - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards } @@ -9986,7 +9996,7 @@ export interface IndicesFieldUsageStatsUsageStatsShards { routing: IndicesStatsShardRouting stats: IndicesFieldUsageStatsShardsStats tracking_id: string - tracking_started_at_millis: EpochMillis + tracking_started_at_millis: EpochTime } export interface IndicesFlushRequest extends RequestBase { @@ -10025,7 +10035,7 @@ export interface IndicesGetRequest extends RequestBase { ignore_unavailable?: boolean include_defaults?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration features?: IndicesGetFeatures } @@ -10068,7 +10078,7 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { export type IndicesGetFieldMappingResponse = Record export interface IndicesGetFieldMappingTypeFieldMappings { - mappings: Partial> + mappings: Record } export interface IndicesGetIndexTemplateIndexTemplateItem { @@ -10080,7 +10090,7 @@ export interface IndicesGetIndexTemplateRequest extends RequestBase { name?: Name local?: boolean flat_settings?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface IndicesGetIndexTemplateResponse { @@ -10098,7 +10108,7 @@ export interface IndicesGetMappingRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesGetMappingResponse = Record @@ -10112,7 +10122,7 @@ export interface IndicesGetSettingsRequest extends RequestBase { ignore_unavailable?: boolean include_defaults?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesGetSettingsResponse = Record @@ -10121,7 +10131,7 @@ export interface IndicesGetTemplateRequest extends RequestBase { name?: Names flat_settings?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type IndicesGetTemplateResponse = Record @@ -10156,8 +10166,8 @@ export interface IndicesOpenRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards } @@ -10175,8 +10185,8 @@ export type IndicesPromoteDataStreamResponse = any export interface IndicesPutAliasRequest extends RequestBase { index: Indices name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { filter?: QueryDslQueryContainer @@ -10217,8 +10227,8 @@ export interface IndicesPutMappingRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration write_index_only?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -10244,9 +10254,9 @@ export interface IndicesPutSettingsRequest extends RequestBase { expand_wildcards?: ExpandWildcards flat_settings?: boolean ignore_unavailable?: boolean - master_timeout?: Time + master_timeout?: Duration preserve_existing?: boolean - timeout?: Time + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, use 'settings' instead. */ body?: IndicesIndexSettings } @@ -10257,9 +10267,8 @@ export interface IndicesPutTemplateRequest extends RequestBase { name: Name create?: boolean flat_settings?: boolean - master_timeout?: Time - timeout?: Time - order?: integer + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record @@ -10303,12 +10312,12 @@ export interface IndicesRecoveryRecoveryIndexStatus { bytes?: IndicesRecoveryRecoveryBytes files: IndicesRecoveryRecoveryFiles size: IndicesRecoveryRecoveryBytes - source_throttle_time?: Time - source_throttle_time_in_millis: EpochMillis - target_throttle_time?: Time - target_throttle_time_in_millis: EpochMillis - total_time_in_millis: EpochMillis - total_time?: Time + source_throttle_time?: Duration + source_throttle_time_in_millis: DurationValue + target_throttle_time?: Duration + target_throttle_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRecoveryRecoveryOrigin { @@ -10327,8 +10336,10 @@ export interface IndicesRecoveryRecoveryOrigin { } export interface IndicesRecoveryRecoveryStartStatus { - check_index_time: long - total_time_in_millis: string + check_index_time?: Duration + check_index_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRecoveryRecoveryStatus { @@ -10350,13 +10361,13 @@ export interface IndicesRecoveryShardRecovery { source: IndicesRecoveryRecoveryOrigin stage: string start?: IndicesRecoveryRecoveryStartStatus - start_time?: DateString - start_time_in_millis: EpochMillis - stop_time?: DateString - stop_time_in_millis: EpochMillis + start_time?: DateTime + start_time_in_millis: EpochTime + stop_time?: DateTime + stop_time_in_millis?: EpochTime target: IndicesRecoveryRecoveryOrigin - total_time?: DateString - total_time_in_millis: EpochMillis + total_time?: Duration + total_time_in_millis: DurationValue translog: IndicesRecoveryTranslogStatus type: string verify_index: IndicesRecoveryVerifyIndex @@ -10367,15 +10378,15 @@ export interface IndicesRecoveryTranslogStatus { recovered: long total: long total_on_start: long - total_time?: string - total_time_in_millis: EpochMillis + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRecoveryVerifyIndex { - check_index_time?: Time - check_index_time_in_millis: EpochMillis - total_time?: Time - total_time_in_millis: EpochMillis + check_index_time?: Duration + check_index_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue } export interface IndicesRefreshRequest extends RequestBase { @@ -10438,8 +10449,8 @@ export interface IndicesRolloverRequest extends RequestBase { alias: IndexAlias new_index?: IndexName dry_run?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -10461,13 +10472,13 @@ export interface IndicesRolloverResponse { } export interface IndicesRolloverRolloverConditions { - max_age?: Time + max_age?: Duration + max_age_millis?: DurationValue max_docs?: long max_size?: string max_size_bytes?: ByteSize max_primary_shard_size?: ByteSize max_primary_shard_size_bytes?: ByteSize - max_age_millis?: EpochMillis } export interface IndicesSegmentsIndexSegment { @@ -10556,8 +10567,8 @@ export interface IndicesShardStoresShardStoreWrapper { export interface IndicesShrinkRequest extends RequestBase { index: IndexName target: IndexName - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -10575,7 +10586,7 @@ export interface IndicesShrinkResponse { export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name create?: boolean - master_timeout?: Time + master_timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_auto_create?: boolean @@ -10600,7 +10611,7 @@ export interface IndicesSimulateTemplateOverlapping { export interface IndicesSimulateTemplateRequest extends RequestBase { name?: Name create?: boolean - master_timeout?: Time + master_timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, use 'template' instead. */ body?: IndicesIndexTemplate } @@ -10619,8 +10630,8 @@ export interface IndicesSimulateTemplateTemplate { export interface IndicesSplitRequest extends RequestBase { index: IndexName target: IndexName - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: WaitForActiveShards /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -10785,8 +10796,8 @@ export interface IndicesUnfreezeRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_active_shards?: string } @@ -10830,8 +10841,8 @@ export interface IndicesUpdateAliasesRemoveIndexAction { } export interface IndicesUpdateAliasesRequest extends RequestBase { - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { actions?: IndicesUpdateAliasesAction[] @@ -11188,8 +11199,8 @@ export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS export interface IngestDeletePipelineRequest extends RequestBase { id: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type IngestDeletePipelineResponse = AcknowledgedResponseBase @@ -11197,7 +11208,7 @@ export type IngestDeletePipelineResponse = AcknowledgedResponseBase export interface IngestGeoIpStatsGeoIpDownloadStatistics { successful_downloads: integer failed_downloads: integer - total_download_time: integer + total_download_time: DurationValue database_count: integer skipped_updates: integer } @@ -11221,7 +11232,7 @@ export interface IngestGeoIpStatsResponse { export interface IngestGetPipelineRequest extends RequestBase { id?: Id - master_timeout?: Time + master_timeout?: Duration summary?: boolean } @@ -11236,8 +11247,9 @@ export interface IngestProcessorGrokResponse { export interface IngestPutPipelineRequest extends RequestBase { id: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration + if_version?: VersionNumber /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { _meta?: Metadata @@ -11266,7 +11278,7 @@ export interface IngestSimulateDocumentSimulation { } export interface IngestSimulateIngest { - timestamp: DateString + timestamp: DateTime pipeline?: Name } @@ -11293,9 +11305,9 @@ export interface IngestSimulateResponse { } export interface LicenseLicense { - expiry_date_in_millis: EpochMillis - issue_date_in_millis: EpochMillis - start_date_in_millis?: EpochMillis + expiry_date_in_millis: EpochTime + issue_date_in_millis: EpochTime + start_date_in_millis?: EpochTime issued_to: string issuer: string max_nodes?: long | null @@ -11315,10 +11327,10 @@ export interface LicenseDeleteRequest extends RequestBase { export type LicenseDeleteResponse = AcknowledgedResponseBase export interface LicenseGetLicenseInformation { - expiry_date?: DateString - expiry_date_in_millis?: EpochMillis - issue_date: DateString - issue_date_in_millis: EpochMillis + expiry_date?: DateTime + expiry_date_in_millis?: EpochTime + issue_date: DateTime + issue_date_in_millis: EpochTime issued_to: string issuer: string max_nodes: long | null @@ -11326,7 +11338,7 @@ export interface LicenseGetLicenseInformation { status: LicenseLicenseStatus type: LicenseLicenseType uid: Uuid - start_date_in_millis: EpochMillis + start_date_in_millis: EpochTime } export interface LicenseGetRequest extends RequestBase { @@ -11398,7 +11410,7 @@ export interface LicensePostStartTrialResponse { export interface LogstashPipeline { description: string - last_modified: Timestamp + last_modified: DateTime pipeline_metadata: LogstashPipelineMetadata username: string pipeline: string @@ -11496,28 +11508,28 @@ export interface MigrationPostFeatureUpgradeResponse { } export interface MlAnalysisConfig { - bucket_span: TimeSpan + bucket_span: Duration categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetector[] influencers?: Field[] - latency?: Time - model_prune_window?: Time + latency?: Duration + model_prune_window?: Duration multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization summary_count_field_name?: Field } export interface MlAnalysisConfigRead { - bucket_span: TimeSpan + bucket_span: Duration categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] detectors: MlDetectorRead[] influencers: Field[] - model_prune_window?: Time - latency?: Time + model_prune_window?: Duration + latency?: Duration multivariate_by_fields?: boolean per_partition_categorization?: MlPerPartitionCategorization summary_count_field_name?: Field @@ -11534,7 +11546,7 @@ export interface MlAnalysisMemoryLimit { export interface MlAnomaly { actual?: double[] - bucket_span: Time + bucket_span: DurationValue by_field_name?: string by_field_value?: string causes?: MlAnomalyCause[] @@ -11553,7 +11565,7 @@ export interface MlAnomaly { probability: double record_score: double result_type: string - timestamp: EpochMillis + timestamp: EpochTime typical?: double[] } @@ -11574,11 +11586,16 @@ export interface MlAnomalyCause { typical: double[] } +export interface MlApiKeyAuthorization { + id: string + name: string +} + export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' export interface MlBucketInfluencer { anomaly_score: double - bucket_span: long + bucket_span: DurationValue influencer_field_name: Field initial_anomaly_score: double is_interim: boolean @@ -11586,28 +11603,30 @@ export interface MlBucketInfluencer { probability: double raw_anomaly_score: double result_type: string - timestamp: Time + timestamp: EpochTime + timestamp_string?: DateTime } export interface MlBucketSummary { anomaly_score: double bucket_influencers: MlBucketInfluencer[] - bucket_span: Time + bucket_span: DurationValue event_count: long initial_anomaly_score: double is_interim: boolean job_id: Id - processing_time_ms: double + processing_time_ms: DurationValue result_type: string - timestamp: Time + timestamp: EpochTime + timestamp_string?: DateTime } export interface MlCalendarEvent { calendar_id?: Id event_id?: Id description: string - end_time: EpochMillis - start_time: EpochMillis + end_time: DateTime + start_time: DateTime } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition @@ -11639,7 +11658,7 @@ export interface MlCategory { export interface MlChunkingConfig { mode: MlChunkingMode - time_span?: Time + time_span?: Duration } export type MlChunkingMode = 'auto' | 'manual' | 'off' @@ -11688,15 +11707,16 @@ export interface MlDataDescription { export interface MlDatafeed { aggregations?: Record aggs?: Record + authorization?: MlDatafeedAuthorization chunking_config?: MlChunkingConfig datafeed_id: Id - frequency?: Timestamp + frequency?: Duration indices: string[] indexes?: string[] job_id: Id max_empty_searches?: integer query: QueryDslQueryContainer - query_delay?: Timestamp + query_delay?: Duration script_fields?: Record scroll_size?: integer delayed_data_check_config: MlDelayedDataCheckConfig @@ -11704,20 +11724,26 @@ export interface MlDatafeed { indices_options?: IndicesOptions } +export interface MlDatafeedAuthorization { + api_key?: MlApiKeyAuthorization + roles?: string[] + service_account?: string +} + export interface MlDatafeedConfig { aggregations?: Record aggs?: Record chunking_config?: MlChunkingConfig datafeed_id?: Id delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Timestamp + frequency?: Duration indexes?: string[] indices: string[] indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query: QueryDslQueryContainer - query_delay?: Timestamp + query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer @@ -11742,11 +11768,11 @@ export interface MlDatafeedStats { export interface MlDatafeedTimingStats { bucket_count: long - exponential_average_search_time_per_hour_ms: double + exponential_average_search_time_per_hour_ms: DurationValue job_id: Id search_count: long - total_search_time_ms: double - average_search_time_per_bucket_ms?: number + total_search_time_ms: DurationValue + average_search_time_per_bucket_ms?: DurationValue } export interface MlDataframeAnalysis { @@ -11851,6 +11877,12 @@ export interface MlDataframeAnalytics { state: MlDataframeState } +export interface MlDataframeAnalyticsAuthorization { + api_key?: MlApiKeyAuthorization + roles?: string[] + service_account?: string +} + export interface MlDataframeAnalyticsDestination { index: IndexName results_field?: Field @@ -11892,7 +11924,7 @@ export interface MlDataframeAnalyticsStatsDataCounts { export interface MlDataframeAnalyticsStatsHyperparameters { hyperparameters: MlHyperparameters iteration: integer - timestamp: DateString + timestamp: EpochTime timing_stats: MlTimingStats validation_loss: MlValidationLoss } @@ -11901,12 +11933,12 @@ export interface MlDataframeAnalyticsStatsMemoryUsage { memory_reestimate_bytes?: long peak_usage_bytes: long status: string - timestamp?: DateString + timestamp?: EpochTime } export interface MlDataframeAnalyticsStatsOutlierDetection { parameters: MlOutlierDetectionParameters - timestamp: DateString + timestamp: EpochTime timing_stats: MlTimingStats } @@ -11916,16 +11948,17 @@ export interface MlDataframeAnalyticsStatsProgress { } export interface MlDataframeAnalyticsSummary { - id: Id - source: MlDataframeAnalyticsSource - dest: MlDataframeAnalyticsDestination + allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + authorization?: MlDataframeAnalyticsAuthorization + create_time?: EpochTime description?: string - model_memory_limit?: string + dest: MlDataframeAnalyticsDestination + id: Id max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - allow_lazy_start?: boolean - create_time?: long + model_memory_limit?: string + source: MlDataframeAnalyticsSource version?: VersionString } @@ -11992,12 +12025,14 @@ export interface MlDataframeEvaluationRegressionMetricsMsle { export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' export interface MlDelayedDataCheckConfig { - check_window?: Time + check_window?: Duration enabled: boolean } export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' +export type MlDeploymentAssignmentState = 'starting' | 'started' | 'stopping' | 'failed' + export type MlDeploymentState = 'started' | 'starting' | 'stopping' export interface MlDetectionRule { @@ -12136,7 +12171,7 @@ export interface MlInfluence { } export interface MlInfluencer { - bucket_span: long + bucket_span: DurationValue influencer_score: double influencer_field_name: Field influencer_field_value: string @@ -12145,7 +12180,7 @@ export interface MlInfluencer { job_id: Id probability: double result_type: string - timestamp: Time + timestamp: EpochTime foo?: string } @@ -12153,16 +12188,16 @@ export interface MlJob { allow_lazy_open: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval?: Time + background_persist_interval?: Duration blocked?: MlJobBlocked - create_time?: integer + create_time?: DateTime custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription datafeed_config?: MlDatafeed deleting?: boolean description?: string - finished_time?: integer + finished_time?: DateTime groups?: string[] job_id: Id job_type?: string @@ -12186,7 +12221,7 @@ export interface MlJobConfig { allow_lazy_open?: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval?: Time + background_persist_interval?: Duration custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription @@ -12227,21 +12262,21 @@ export interface MlJobStats { job_id: string model_size_stats: MlModelSizeStats node?: MlDiscoveryNode - open_time?: DateString + open_time?: DateTime state: MlJobState timing_stats: MlJobTimingStats deleting?: boolean } export interface MlJobTimingStats { - average_bucket_processing_time_ms?: double + average_bucket_processing_time_ms?: DurationValue bucket_count: long - exponential_average_bucket_processing_time_ms?: double - exponential_average_bucket_processing_time_per_hour_ms: double + exponential_average_bucket_processing_time_ms?: DurationValue + exponential_average_bucket_processing_time_per_hour_ms: DurationValue job_id: Id - total_bucket_processing_time_ms: double - maximum_bucket_processing_time_ms?: double - minimum_bucket_processing_time_ms?: double + total_bucket_processing_time_ms: DurationValue + maximum_bucket_processing_time_ms?: DurationValue + minimum_bucket_processing_time_ms?: DurationValue } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' @@ -12255,7 +12290,7 @@ export interface MlModelPlotConfig { export interface MlModelSizeStats { bucket_allocation_failures_count: long job_id: Id - log_time: Time + log_time: DateTime memory_status: MlMemoryStatus model_bytes: ByteSize model_bytes_exceeded?: ByteSize @@ -12289,6 +12324,14 @@ export interface MlModelSnapshot { timestamp: long } +export interface MlModelSnapshotUpgrade { + job_id: Id + snapshot_id: Id + state: MlSnapshotUpgradeState + node: MlDiscoveryNode + assignment_explanation: string +} + export interface MlNerInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string @@ -12331,12 +12374,13 @@ export interface MlOutlierDetectionParameters { } export interface MlOverallBucket { - bucket_span: long + bucket_span: DurationValue is_interim: boolean jobs: MlOverallBucketJob[] overall_score: double result_type: string - timestamp: Time + timestamp: EpochTime + timestamp_string: DateTime } export interface MlOverallBucketJob { @@ -12397,10 +12441,14 @@ export interface MlRuleCondition { } export interface MlRunningStateSearchInterval { - end_ms: long - start_ms: long + end?: Duration + end_ms: DurationValue + start?: Duration + start_ms: DurationValue } +export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' + export interface MlTextClassificationInferenceOptions { num_top_classes?: integer tokenization?: MlTokenizationConfigContainer @@ -12426,8 +12474,8 @@ export interface MlTextEmbeddingInferenceUpdateOptions { } export interface MlTimingStats { - elapsed_time: integer - iteration_time?: integer + elapsed_time: DurationValue + iteration_time?: DurationValue } export interface MlTokenizationConfigContainer { @@ -12461,21 +12509,27 @@ export interface MlTotalFeatureImportanceStatistics { min: integer } -export interface MlTrainedModelAllocation { - allocation_state: MlDeploymentAllocationState - routing_table: Record - start_time: DateString - task_parameters: MlTrainedModelAllocationTaskParameters +export interface MlTrainedModelAssignment { + assignment_state: MlDeploymentAssignmentState + routing_table: Record + start_time: DateTime + task_parameters: MlTrainedModelAssignmentTaskParameters } -export interface MlTrainedModelAllocationRoutingTable { +export interface MlTrainedModelAssignmentRoutingTable { reason: string routing_state: MlRoutingState + current_allocations: integer + target_allocations: integer } -export interface MlTrainedModelAllocationTaskParameters { +export interface MlTrainedModelAssignmentTaskParameters { model_bytes: integer model_id: Id + cache_size: ByteSize + number_of_allocations: integer + queue_capacity: integer + threads_per_allocation: integer } export interface MlTrainedModelConfig { @@ -12485,7 +12539,7 @@ export interface MlTrainedModelConfig { version?: VersionString compressed_definition?: string created_by?: string - create_time?: Time + create_time?: DateTime default_field_map?: Record description?: string estimated_heap_memory_usage_bytes?: integer @@ -12516,7 +12570,7 @@ export interface MlTrainedModelDeploymentAllocationStatus { } export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms: double + average_inference_time_ms: DurationValue error_count: integer inference_count: integer last_access: long @@ -12524,8 +12578,8 @@ export interface MlTrainedModelDeploymentNodesStats { number_of_allocations: integer number_of_pending_requests: integer rejection_execution_count: integer - routing_state: MlTrainedModelAllocationRoutingTable - start_time: long + routing_state: MlTrainedModelAssignmentRoutingTable + start_time: EpochTime threads_per_allocation: integer timeout_count: integer } @@ -12540,7 +12594,7 @@ export interface MlTrainedModelDeploymentStats { queue_capacity: integer rejected_execution_count: integer reason: string - start_time: long + start_time: EpochTime state: MlDeploymentState threads_per_allocation: integer timeout_count: integer @@ -12570,7 +12624,7 @@ export interface MlTrainedModelInferenceStats { failure_count: integer inference_count: integer missing_all_fields_count: integer - timestamp: Time + timestamp: DateTime } export interface MlTrainedModelLocation { @@ -12597,6 +12651,12 @@ export interface MlTrainedModelStats { export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' +export interface MlTransformAuthorization { + api_key?: MlApiKeyAuthorization + roles?: string[] + service_account?: string +} + export interface MlValidationLoss { fold_values: string[] loss_type: string @@ -12620,14 +12680,11 @@ export interface MlZeroShotClassificationInferenceUpdateOptions { export interface MlCloseJobRequest extends RequestBase { job_id: Id - allow_no_match?: boolean - force?: boolean - timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration } } @@ -12662,7 +12719,7 @@ export interface MlDeleteCalendarJobResponse { export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { id: Id force?: boolean - timeout?: Time + timeout?: Duration } export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase @@ -12676,12 +12733,10 @@ export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { job_id?: Id - requests_per_second?: float - timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { requests_per_second?: float - timeout?: Time + timeout?: Duration } } @@ -12699,7 +12754,7 @@ export interface MlDeleteForecastRequest extends RequestBase { job_id: Id forecast_id?: Id allow_no_forecasts?: boolean - timeout?: Time + timeout?: Duration } export type MlDeleteForecastResponse = AcknowledgedResponseBase @@ -12862,18 +12917,13 @@ export interface MlExplainDataFrameAnalyticsResponse { export interface MlFlushJobRequest extends RequestBase { job_id: Id - advance_time?: DateString - calc_interim?: boolean - end?: DateString - skip_time?: EpochMillis - start?: DateString /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - advance_time?: DateString + advance_time?: DateTime calc_interim?: boolean - end?: DateString - skip_time?: EpochMillis - start?: DateString + end?: DateTime + skip_time?: DateTime + start?: DateTime } } @@ -12884,13 +12934,10 @@ export interface MlFlushJobResponse { export interface MlForecastRequest extends RequestBase { job_id: Id - duration?: Time - expires_in?: Time - max_model_memory?: string /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - duration?: Time - expires_in?: Time + duration?: Duration + expires_in?: Duration max_model_memory?: string } } @@ -12902,26 +12949,19 @@ export interface MlForecastResponse { export interface MlGetBucketsRequest extends RequestBase { job_id: Id - timestamp?: Timestamp - anomaly_score?: double - desc?: boolean - end?: DateString - exclude_interim?: boolean - expand?: boolean + timestamp?: DateTime from?: integer size?: integer - sort?: Field - start?: DateString /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { anomaly_score?: double desc?: boolean - end?: DateString + end?: DateTime exclude_interim?: boolean expand?: boolean page?: MlPage sort?: Field - start?: DateString + start?: DateTime } } @@ -12932,11 +12972,11 @@ export interface MlGetBucketsResponse { export interface MlGetCalendarEventsRequest extends RequestBase { calendar_id: Id - end?: DateString + end?: DateTime from?: integer job_id?: Id size?: integer - start?: string + start?: DateTime } export interface MlGetCalendarEventsResponse { @@ -13043,13 +13083,13 @@ export interface MlGetFiltersResponse { export interface MlGetInfluencersRequest extends RequestBase { job_id: Id desc?: boolean - end?: DateString + end?: DateTime exclude_interim?: boolean influencer_score?: double from?: integer size?: integer sort?: Field - start?: DateString + start?: DateTime /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { page?: MlPage @@ -13125,8 +13165,8 @@ export interface MlGetMemoryStatsMemory { export interface MlGetMemoryStatsRequest extends RequestBase { node_id?: Id human?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface MlGetMemoryStatsResponse { @@ -13135,22 +13175,29 @@ export interface MlGetMemoryStatsResponse { nodes: Record } +export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { + job_id: Id + snapshot_id: Id + allow_no_match?: boolean +} + +export interface MlGetModelSnapshotUpgradeStatsResponse { + count: long + model_snapshot_upgrades: MlModelSnapshotUpgrade[] +} + export interface MlGetModelSnapshotsRequest extends RequestBase { job_id: Id snapshot_id?: Id - desc?: boolean - end?: Time from?: integer size?: integer - sort?: Field - start?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { desc?: boolean - end?: Time + end?: DateTime page?: MlPage sort?: Field - start?: Time + start?: DateTime } } @@ -13161,21 +13208,14 @@ export interface MlGetModelSnapshotsResponse { export interface MlGetOverallBucketsRequest extends RequestBase { job_id: Id - allow_no_match?: boolean - bucket_span?: Time - end?: Time - exclude_interim?: boolean - overall_score?: double | string - start?: Time - top_n?: integer /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_no_match?: boolean - bucket_span?: Time - end?: Time + bucket_span?: Duration + end?: DateTime exclude_interim?: boolean overall_score?: double | string - start?: Time + start?: DateTime top_n?: integer } } @@ -13187,23 +13227,17 @@ export interface MlGetOverallBucketsResponse { export interface MlGetRecordsRequest extends RequestBase { job_id: Id - desc?: boolean - end?: DateString - exclude_interim?: boolean from?: integer - record_score?: double size?: integer - sort?: Field - start?: DateString /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { desc?: boolean - end?: DateString + end?: DateTime exclude_interim?: boolean page?: MlPage record_score?: double sort?: Field - start?: DateString + start?: DateTime } } @@ -13242,7 +13276,7 @@ export interface MlGetTrainedModelsStatsResponse { export interface MlInferTrainedModelRequest extends RequestBase { model_id: Id - timeout?: Time + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { docs: Record[] @@ -13294,10 +13328,9 @@ export interface MlInfoResponse { export interface MlOpenJobRequest extends RequestBase { job_id: Id - timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - timeout?: Time + timeout?: Duration } } @@ -13319,8 +13352,8 @@ export interface MlPostCalendarEventsResponse { export interface MlPostDataRequest extends RequestBase { job_id: Id - reset_end?: DateString - reset_start?: DateString + reset_end?: DateTime + reset_start?: DateTime /** @deprecated The use of the 'body' key has been deprecated, use 'data' instead. */ body?: TData[] } @@ -13420,17 +13453,18 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { } export interface MlPutDataFrameAnalyticsResponse { - id: Id - create_time: long - version: VersionString - source: MlDataframeAnalyticsSource - description?: string - dest: MlDataframeAnalyticsDestination - model_memory_limit: string + authorization?: MlDataframeAnalyticsAuthorization allow_lazy_start: boolean - max_num_threads: integer analysis: MlDataframeAnalysisContainer analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + create_time: EpochTime + description?: string + dest: MlDataframeAnalyticsDestination + id: Id + max_num_threads: integer + model_memory_limit: string + source: MlDataframeAnalyticsSource + version: VersionString } export interface MlPutDatafeedRequest extends RequestBase { @@ -13444,14 +13478,14 @@ export interface MlPutDatafeedRequest extends RequestBase { aggregations?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Time + frequency?: Duration indices?: Indices indexes?: Indices indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer query?: QueryDslQueryContainer - query_delay?: Time + query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer @@ -13461,16 +13495,17 @@ export interface MlPutDatafeedRequest extends RequestBase { export interface MlPutDatafeedResponse { aggregations: Record + authorization?: MlDatafeedAuthorization chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Time + frequency: Duration indices: string[] job_id: Id indices_options?: IndicesOptions max_empty_searches: integer query: QueryDslQueryContainer - query_delay: Time + query_delay: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size: integer @@ -13498,7 +13533,7 @@ export interface MlPutJobRequest extends RequestBase { allow_lazy_open?: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits - background_persist_interval: Time + background_persist_interval?: Duration custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days?: long data_description: MlDataDescription @@ -13517,8 +13552,8 @@ export interface MlPutJobResponse { allow_lazy_open: boolean analysis_config: MlAnalysisConfigRead analysis_limits: MlAnalysisLimits - background_persist_interval?: Time - create_time: DateString + background_persist_interval?: Duration + create_time: DateTime custom_settings?: MlCustomSettings daily_model_snapshot_retention_after_days: long data_description: MlDataDescription @@ -13674,7 +13709,6 @@ export type MlResetJobResponse = AcknowledgedResponseBase export interface MlRevertModelSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id - delete_intervening_results?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { delete_intervening_results?: boolean @@ -13687,14 +13721,14 @@ export interface MlRevertModelSnapshotResponse { export interface MlSetUpgradeModeRequest extends RequestBase { enabled?: boolean - timeout?: Time + timeout?: Duration } export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { id: Id - timeout?: Time + timeout?: Duration } export interface MlStartDataFrameAnalyticsResponse { @@ -13704,14 +13738,11 @@ export interface MlStartDataFrameAnalyticsResponse { export interface MlStartDatafeedRequest extends RequestBase { datafeed_id: Id - end?: Time - start?: Time - timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - end?: Time - start?: Time - timeout?: Time + end?: DateTime + start?: DateTime + timeout?: Duration } } @@ -13722,22 +13753,23 @@ export interface MlStartDatafeedResponse { export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id + cache_size?: ByteSize number_of_allocations?: integer queue_capacity?: integer threads_per_allocation?: integer - timeout?: Time + timeout?: Duration wait_for?: MlDeploymentAllocationState } export interface MlStartTrainedModelDeploymentResponse { - allocation: MlTrainedModelAllocation + assignment: MlTrainedModelAssignment } export interface MlStopDataFrameAnalyticsRequest extends RequestBase { id: Id allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration } export interface MlStopDataFrameAnalyticsResponse { @@ -13746,14 +13778,11 @@ export interface MlStopDataFrameAnalyticsResponse { export interface MlStopDatafeedRequest extends RequestBase { datafeed_id: Id - allow_no_match?: boolean - force?: boolean - timeout?: Time /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration } } @@ -13783,17 +13812,18 @@ export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { } export interface MlUpdateDataFrameAnalyticsResponse { - id: Id + authorization?: MlDataframeAnalyticsAuthorization + allow_lazy_start: boolean + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] create_time: long - version: VersionString - source: MlDataframeAnalyticsSource description?: string dest: MlDataframeAnalyticsDestination - model_memory_limit: string - allow_lazy_start: boolean + id: Id max_num_threads: integer - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + model_memory_limit: string + source: MlDataframeAnalyticsSource + version: VersionString } export interface MlUpdateDatafeedRequest extends RequestBase { @@ -13807,13 +13837,13 @@ export interface MlUpdateDatafeedRequest extends RequestBase { aggregations?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Time + frequency?: Duration indices?: string[] indexes?: string[] indices_options?: IndicesOptions max_empty_searches?: integer query?: QueryDslQueryContainer - query_delay?: Time + query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size?: integer @@ -13821,17 +13851,18 @@ export interface MlUpdateDatafeedRequest extends RequestBase { } export interface MlUpdateDatafeedResponse { + authorization?: MlDatafeedAuthorization aggregations: Record chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Time + frequency: Duration indices: string[] - job_id: Id indices_options?: IndicesOptions + job_id: Id max_empty_searches: integer query: QueryDslQueryContainer - query_delay: Time + query_delay: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record scroll_size: integer @@ -13859,7 +13890,7 @@ export interface MlUpdateJobRequest extends RequestBase { body?: { allow_lazy_open?: boolean analysis_limits?: MlAnalysisMemoryLimit - background_persist_interval?: Time + background_persist_interval?: Duration custom_settings?: Record categorization_filters?: string[] description?: string @@ -13878,9 +13909,9 @@ export interface MlUpdateJobResponse { allow_lazy_open: boolean analysis_config: MlAnalysisConfigRead analysis_limits: MlAnalysisLimits - background_persist_interval?: Time - create_time: EpochMillis - finished_time?: EpochMillis + background_persist_interval?: Duration + create_time: EpochTime + finished_time?: EpochTime custom_settings?: Record daily_model_snapshot_retention_after_days: long data_description: MlDataDescription @@ -13917,7 +13948,7 @@ export interface MlUpgradeJobSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id wait_for_completion?: boolean - timeout?: Time + timeout?: Duration } export interface MlUpgradeJobSnapshotResponse { @@ -13953,7 +13984,7 @@ export interface MonitoringBulkRequest | TDocument)[] } @@ -14000,7 +14031,7 @@ export interface NodesCgroupCpu { export interface NodesCgroupCpuStat { number_of_elapsed_periods?: long number_of_times_throttled?: long - time_throttled_nanos?: long + time_throttled_nanos?: DurationValue } export interface NodesCgroupMemory { @@ -14034,21 +14065,21 @@ export interface NodesClusterStateQueue { } export interface NodesClusterStateUpdate { - count?: long - computation_time?: string - computation_time_millis?: long - publication_time?: string - publication_time_millis?: long - context_construction_time?: string - context_construction_time_millis?: long - commit_time?: string - commit_time_millis?: long - completion_time?: string - completion_time_millis?: long - master_apply_time?: string - master_apply_time_millis?: long - notification_time?: string - notification_time_millis?: long + count: long + computation_time?: Duration + computation_time_millis?: DurationValue + publication_time?: Duration + publication_time_millis?: DurationValue + context_construction_time?: Duration + context_construction_time_millis?: DurationValue + commit_time?: Duration + commit_time_millis?: DurationValue + completion_time?: Duration + completion_time_millis?: DurationValue + master_apply_time?: Duration + master_apply_time_millis?: DurationValue + notification_time?: Duration + notification_time_millis?: DurationValue } export interface NodesContext { @@ -14060,18 +14091,18 @@ export interface NodesContext { export interface NodesCpu { percent?: integer - sys?: string - sys_in_millis?: long - total?: string - total_in_millis?: long - user?: string - user_in_millis?: long + sys?: Duration + sys_in_millis?: DurationValue + total?: Duration + total_in_millis?: DurationValue + user?: Duration + user_in_millis?: DurationValue load_average?: Record } export interface NodesCpuAcct { control_group?: string - usage_nanos?: long + usage_nanos?: DurationValue } export interface NodesDataPathStats { @@ -14158,7 +14189,7 @@ export interface NodesIngestTotal { current?: long failed?: long processors?: Record[] - time_in_millis?: long + time_in_millis?: DurationValue } export interface NodesIoStatDevice { @@ -14282,7 +14313,7 @@ export interface NodesProcessor { count?: long current?: long failed?: long - time_in_millis?: long + time_in_millis?: DurationValue } export interface NodesPublishedClusterStates { @@ -14294,8 +14325,8 @@ export interface NodesPublishedClusterStates { export interface NodesRecording { name?: string cumulative_execution_count?: long - cumulative_execution_time?: string - cumulative_execution_time_millis?: long + cumulative_execution_time?: Duration + cumulative_execution_time_millis?: DurationValue } export interface NodesRepositoryLocation { @@ -14309,8 +14340,8 @@ export interface NodesRepositoryMeteringInformation { repository_type: string repository_location: NodesRepositoryLocation repository_ephemeral_id: Id - repository_started_at: EpochMillis - repository_stopped_at?: EpochMillis + repository_started_at: EpochTime + repository_stopped_at?: EpochTime archived: boolean cluster_version?: VersionNumber request_counts: NodesRequestCounts @@ -14443,11 +14474,11 @@ export interface NodesHotThreadsHotThread { export interface NodesHotThreadsRequest extends RequestBase { node_id?: NodeIds ignore_idle_threads?: boolean - interval?: Time + interval?: Duration snapshots?: long - master_timeout?: Time + master_timeout?: Duration threads?: long - timeout?: Time + timeout?: Duration type?: ThreadType sort?: ThreadType } @@ -14756,7 +14787,7 @@ export interface NodesInfoNodeJvmInfo { mem: NodesInfoNodeInfoJvmMemory memory_pools: string[] pid: integer - start_time_in_millis: long + start_time_in_millis: EpochTime version: VersionString vm_name: Name vm_vendor: string @@ -14773,7 +14804,7 @@ export interface NodesInfoNodeOperatingSystemInfo { allocated_processors?: integer name: Name pretty_name: Name - refresh_interval_in_millis: integer + refresh_interval_in_millis: DurationValue version: VersionString cpu?: NodesInfoNodeInfoOSCPU mem?: NodesInfoNodeInfoMemory @@ -14783,12 +14814,12 @@ export interface NodesInfoNodeOperatingSystemInfo { export interface NodesInfoNodeProcessInfo { id: long mlockall: boolean - refresh_interval_in_millis: long + refresh_interval_in_millis: DurationValue } export interface NodesInfoNodeThreadPoolInfo { core?: integer - keep_alive?: string + keep_alive?: Duration max?: integer queue_size: integer size?: integer @@ -14799,8 +14830,8 @@ export interface NodesInfoRequest extends RequestBase { node_id?: NodeIds metric?: Metrics flat_settings?: boolean - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type NodesInfoResponse = NodesInfoResponseBase @@ -14812,7 +14843,7 @@ export interface NodesInfoResponseBase extends NodesNodesResponseBase { export interface NodesReloadSecureSettingsRequest extends RequestBase { node_id?: NodeIds - timeout?: Time + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { secure_settings_password?: Password @@ -14836,8 +14867,8 @@ export interface NodesStatsRequest extends RequestBase { groups?: boolean include_segment_file_sizes?: boolean level?: Level - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration types?: string[] include_unloaded_segments?: boolean } @@ -14851,15 +14882,15 @@ export interface NodesStatsResponseBase extends NodesNodesResponseBase { export interface NodesUsageNodeUsage { rest_actions: Record - since: EpochMillis - timestamp: EpochMillis + since: EpochTime + timestamp: EpochTime aggregations: Record } export interface NodesUsageRequest extends RequestBase { node_id?: NodeIds metric?: Metrics - timeout?: Time + timeout?: Duration } export type NodesUsageResponse = NodesUsageResponseBase @@ -14870,13 +14901,13 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { } export interface RollupDateHistogramGrouping { - delay?: Time + delay?: Duration field: Field format?: string - interval?: Time - calendar_interval?: Time - fixed_interval?: Time - time_zone?: string + interval?: Duration + calendar_interval?: Duration + fixed_interval?: Duration + time_zone?: TimeZone } export interface RollupFieldMetric { @@ -14934,21 +14965,21 @@ export interface RollupGetJobsRollupJobConfiguration { metrics: RollupFieldMetric[] page_size: long rollup_index: IndexName - timeout: Time + timeout: Duration } export interface RollupGetJobsRollupJobStats { documents_processed: long index_failures: long - index_time_in_ms: long + index_time_in_ms: DurationValue index_total: long pages_processed: long rollups_indexed: long search_failures: long - search_time_in_ms: long + search_time_in_ms: DurationValue search_total: long trigger_count: long - processing_time_in_ms: long + processing_time_in_ms: DurationValue processing_total: long } @@ -14994,8 +15025,8 @@ export interface RollupGetRollupIndexCapsRollupJobSummary { export interface RollupGetRollupIndexCapsRollupJobSummaryField { agg: string - time_zone?: string - calendar_interval?: Time + time_zone?: TimeZone + calendar_interval?: Duration } export interface RollupPutJobRequest extends RequestBase { @@ -15008,7 +15039,7 @@ export interface RollupPutJobRequest extends RequestBase { metrics?: RollupFieldMetric[] page_size: integer rollup_index: IndexName - timeout?: Time + timeout?: Duration headers?: HttpHeaders } } @@ -15056,7 +15087,7 @@ export interface RollupStartJobResponse { export interface RollupStopJobRequest extends RequestBase { id: Id - timeout?: Time + timeout?: Duration wait_for_completion?: boolean } @@ -15072,7 +15103,7 @@ export interface SearchableSnapshotsCacheStatsNode { export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { node_id?: NodeIds - master_timeout?: Time + master_timeout?: Duration } export interface SearchableSnapshotsCacheStatsResponse { @@ -15110,7 +15141,7 @@ export interface SearchableSnapshotsMountMountedSnapshot { export interface SearchableSnapshotsMountRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration wait_for_completion?: boolean storage?: string /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -15161,7 +15192,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' export interface SecurityCreatedStatus { created: boolean @@ -15192,10 +15223,12 @@ export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] names: Indices privileges: SecurityIndexPrivilege[] - query?: string[] | QueryDslQueryContainer | SecurityRoleTemplateQueryContainer + query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean } +export type SecurityIndicesPrivilegesQuery = string | QueryDslQueryContainer | SecurityRoleTemplateQuery + export interface SecurityManageUserPrivileges { applications: string[] } @@ -15209,6 +15242,28 @@ export interface SecurityRealmInfo { type: string } +export interface SecurityRoleDescriptor { + cluster?: string[] + indices?: SecurityIndicesPrivileges[] + index?: SecurityIndicesPrivileges[] + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + applications?: SecurityApplicationPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: SecurityTransientMetadataConfig +} + +export interface SecurityRoleDescriptorRead { + cluster: string[] + indices: SecurityIndicesPrivileges[] + index: SecurityIndicesPrivileges[] + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + applications?: SecurityApplicationPrivileges[] + metadata?: Metadata + run_as?: string[] + transient_metadata?: SecurityTransientMetadataConfig +} + export interface SecurityRoleMapping { enabled: boolean metadata: Metadata @@ -15230,7 +15285,7 @@ export interface SecurityRoleTemplateInlineScript extends ScriptBase { source: string | QueryDslQueryContainer } -export interface SecurityRoleTemplateQueryContainer { +export interface SecurityRoleTemplateQuery { template?: SecurityRoleTemplateScript } @@ -15250,10 +15305,10 @@ export interface SecurityUser { } export interface SecurityUserProfile { - uid: string + uid: SecurityUserProfileId user: SecurityUserProfileUser - data?: Record - labels?: Record + data: Record + labels: Record enabled?: boolean } @@ -15262,17 +15317,20 @@ export interface SecurityUserProfileHitMetadata { _seq_no: SequenceNumber } +export type SecurityUserProfileId = string + export interface SecurityUserProfileUser { email?: string | null full_name?: Name | null - metadata: Metadata + realm_name: Name + realm_domain?: Name roles: string[] username: Username } export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { last_synchronized: long - _doc?: SecurityUserProfileHitMetadata + _doc: SecurityUserProfileHitMetadata } export interface SecurityActivateUserProfileRequest extends RequestBase { @@ -15375,18 +15433,13 @@ export interface SecurityClearCachedServiceTokensResponse { nodes: Record } -export interface SecurityCreateApiKeyIndexPrivileges { - names: Indices - privileges: SecurityIndexPrivilege[] -} - export interface SecurityCreateApiKeyRequest extends RequestBase { refresh?: Refresh /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - expiration?: Time + expiration?: Duration name?: Name - role_descriptors?: Record + role_descriptors?: Record metadata?: Metadata } } @@ -15399,16 +15452,11 @@ export interface SecurityCreateApiKeyResponse { encoded: string } -export interface SecurityCreateApiKeyRoleDescriptor { - cluster: string[] - index: SecurityCreateApiKeyIndexPrivileges[] - applications?: SecurityApplicationPrivileges[] -} - export interface SecurityCreateServiceTokenRequest extends RequestBase { namespace: Namespace service: Service - name: Name + name?: Name + refresh?: Refresh } export interface SecurityCreateServiceTokenResponse { @@ -15480,7 +15528,7 @@ export interface SecurityDisableUserResponse { } export interface SecurityDisableUserProfileRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId refresh?: Refresh } @@ -15495,7 +15543,7 @@ export interface SecurityEnableUserResponse { } export interface SecurityEnableUserProfileRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId refresh?: Refresh } @@ -15590,18 +15638,8 @@ export interface SecurityGetServiceAccountsRequest extends RequestBase { export type SecurityGetServiceAccountsResponse = Record -export interface SecurityGetServiceAccountsRoleDescriptor { - cluster: string[] - indices: SecurityIndicesPrivileges[] - global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege - applications?: SecurityApplicationPrivileges[] - metadata?: Metadata - run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig -} - export interface SecurityGetServiceAccountsRoleDescriptorWrapper { - role_descriptor: SecurityGetServiceAccountsRoleDescriptor + role_descriptor: SecurityRoleDescriptorRead } export interface SecurityGetServiceCredentialsNodesCredentials { @@ -15687,7 +15725,7 @@ export interface SecurityGetUserPrivilegesResponse { } export interface SecurityGetUserProfileRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId data?: string | string[] } @@ -15697,7 +15735,7 @@ export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { name: Name - expiration?: Time + expiration?: Duration role_descriptors?: Record[] } @@ -15716,7 +15754,7 @@ export interface SecurityGrantApiKeyResponse { api_key: string id: Id name: Name - expiration?: EpochMillis + expiration?: EpochTime } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { @@ -15755,6 +15793,25 @@ export interface SecurityHasPrivilegesResponse { username: Username } +export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { + application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + cluster?: SecurityClusterPrivilege[] + index?: SecurityHasPrivilegesIndexPrivilegesCheck[] +} + +export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + uids: SecurityUserProfileId[] + privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck + } +} + +export interface SecurityHasPrivilegesUserProfileResponse { + has_privilege_uids: SecurityUserProfileId[] + error_uids?: SecurityUserProfileId[] +} + export interface SecurityInvalidateApiKeyRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -15959,12 +16016,18 @@ export interface SecuritySamlServiceProviderMetadataResponse { metadata: string } +export interface SecuritySuggestUserProfilesHint { + uids?: SecurityUserProfileId[] + labels?: Record +} + export interface SecuritySuggestUserProfilesRequest extends RequestBase { - data?: string | string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { name?: string size?: long + data?: string | string[] + hint?: SecuritySuggestUserProfilesHint } } @@ -15979,14 +16042,27 @@ export interface SecuritySuggestUserProfilesTotalUserProfiles { relation: RelationName } +export interface SecurityUpdateApiKeyRequest extends RequestBase { + id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + role_descriptors?: Record + metadata?: Metadata + } +} + +export interface SecurityUpdateApiKeyResponse { + updated: boolean +} + export interface SecurityUpdateUserProfileDataRequest extends RequestBase { - uid: string + uid: SecurityUserProfileId if_seq_no?: SequenceNumber if_primary_term?: long refresh?: Refresh /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - access?: Record + labels?: Record data?: Record } } @@ -16007,7 +16083,7 @@ export interface ShutdownGetNodeNodeShutdownStatus { node_id: NodeId type: ShutdownGetNodeShutdownType reason: string - shutdown_startedmillis: EpochMillis + shutdown_startedmillis: EpochTime status: ShutdownGetNodeShutdownStatus shard_migration: ShutdownGetNodeShardMigrationStatus persistent_tasks: ShutdownGetNodePersistentTaskStatus @@ -16066,14 +16142,14 @@ export interface SlmConfiguration { export interface SlmInProgress { name: Name - start_time_millis: DateString + start_time_millis: EpochTime state: string uuid: Uuid } export interface SlmInvocation { snapshot_name: Name - time: DateString + time: DateTime } export interface SlmPolicy { @@ -16085,7 +16161,7 @@ export interface SlmPolicy { } export interface SlmRetention { - expire_after: Time + expire_after: Duration max_count: integer min_count: integer } @@ -16094,18 +16170,18 @@ export interface SlmSnapshotLifecycle { in_progress?: SlmInProgress last_failure?: SlmInvocation last_success?: SlmInvocation - modified_date?: DateString - modified_date_millis: EpochMillis - next_execution?: DateString - next_execution_millis: EpochMillis + modified_date?: DateTime + modified_date_millis: EpochTime + next_execution?: DateTime + next_execution_millis: EpochTime policy: SlmPolicy version: VersionNumber stats: SlmStatistics } export interface SlmStatistics { - retention_deletion_time?: DateString - retention_deletion_time_millis?: EpochMillis + retention_deletion_time?: Duration + retention_deletion_time_millis?: DurationValue retention_failed?: long retention_runs?: long retention_timed_out?: long @@ -16149,8 +16225,8 @@ export interface SlmGetStatsRequest extends RequestBase { } export interface SlmGetStatsResponse { - retention_deletion_time: string - retention_deletion_time_millis: EpochMillis + retention_deletion_time: Duration + retention_deletion_time_millis: DurationValue retention_failed: long retention_runs: long retention_timed_out: long @@ -16170,8 +16246,8 @@ export interface SlmGetStatusResponse { export interface SlmPutLifecycleRequest extends RequestBase { policy_id: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { config?: SlmConfiguration @@ -16240,8 +16316,9 @@ export type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' export interface SnapshotShardsStatsSummary { incremental: SnapshotShardsStatsSummaryItem total: SnapshotShardsStatsSummaryItem - start_time_in_millis: long - time_in_millis: long + start_time_in_millis: EpochTime + time?: Duration + time_in_millis: DurationValue } export interface SnapshotShardsStatsSummaryItem { @@ -16257,21 +16334,21 @@ export interface SnapshotSnapshotIndexStats { export interface SnapshotSnapshotInfo { data_streams: string[] - duration?: Time - duration_in_millis?: EpochMillis - end_time?: Time - end_time_in_millis?: EpochMillis + duration?: Duration + duration_in_millis?: DurationValue + end_time?: DateTime + end_time_in_millis?: EpochTime failures?: SnapshotSnapshotShardFailure[] include_global_state?: boolean - indices: IndexName[] + indices?: IndexName[] index_details?: Record metadata?: Metadata reason?: string repository?: Name snapshot: Name shards?: ShardStatistics - start_time?: Time - start_time_in_millis?: EpochMillis + start_time?: DateTime + start_time_in_millis?: EpochTime state?: string uuid: Uuid version?: VersionString @@ -16296,8 +16373,9 @@ export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_c export interface SnapshotSnapshotStats { incremental: SnapshotFileCountSnapshotStats - start_time_in_millis: long - time_in_millis: long + start_time_in_millis: EpochTime + time?: Duration + time_in_millis: DurationValue total: SnapshotFileCountSnapshotStats } @@ -16319,8 +16397,8 @@ export interface SnapshotCleanupRepositoryCleanupRepositoryResults { export interface SnapshotCleanupRepositoryRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface SnapshotCleanupRepositoryResponse { @@ -16331,8 +16409,8 @@ export interface SnapshotCloneRequest extends RequestBase { repository: Name snapshot: Name target_snapshot: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { indices: string @@ -16344,7 +16422,7 @@ export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration wait_for_completion?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -16364,8 +16442,8 @@ export interface SnapshotCreateResponse { export interface SnapshotCreateRepositoryRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration verify?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -16380,15 +16458,15 @@ export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration } export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { name: Names - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase @@ -16397,10 +16475,10 @@ export interface SnapshotGetRequest extends RequestBase { repository: Name snapshot: Names ignore_unavailable?: boolean - master_timeout?: Time + master_timeout?: Duration verbose?: boolean index_details?: boolean - human?: boolean + index_names?: boolean include_repository?: boolean sort?: SnapshotSnapshotSort size?: integer @@ -16427,7 +16505,7 @@ export interface SnapshotGetSnapshotResponseItem { export interface SnapshotGetRepositoryRequest extends RequestBase { name?: Names local?: boolean - master_timeout?: Time + master_timeout?: Duration } export type SnapshotGetRepositoryResponse = Record @@ -16435,7 +16513,7 @@ export type SnapshotGetRepositoryResponse = Record export interface SnapshotRestoreRequest extends RequestBase { repository: Name snapshot: Name - master_timeout?: Time + master_timeout?: Duration wait_for_completion?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -16465,7 +16543,7 @@ export interface SnapshotStatusRequest extends RequestBase { repository?: Name snapshot?: Names ignore_unavailable?: boolean - master_timeout?: Time + master_timeout?: Duration } export interface SnapshotStatusResponse { @@ -16478,8 +16556,8 @@ export interface SnapshotVerifyRepositoryCompactNodeInfo { export interface SnapshotVerifyRepositoryRequest extends RequestBase { name: Name - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration } export interface SnapshotVerifyRepositoryResponse { @@ -16514,8 +16592,8 @@ export interface SqlGetAsyncRequest extends RequestBase { id: Id delimiter?: string format?: string - keep_alive?: Time - wait_for_completion_timeout?: Time + keep_alive?: Duration + wait_for_completion_timeout?: Duration } export interface SqlGetAsyncResponse { @@ -16535,8 +16613,8 @@ export interface SqlGetAsyncStatusResponse { id: string is_running: boolean is_partial: boolean - start_time_in_millis: ulong - expiration_time_in_millis: ulong + start_time_in_millis: EpochTime + expiration_time_in_millis: EpochTime completion_status?: uint } @@ -16550,14 +16628,14 @@ export interface SqlQueryRequest extends RequestBase { fetch_size?: integer filter?: QueryDslQueryContainer query?: string - request_timeout?: Time - page_timeout?: Time - time_zone?: string + request_timeout?: Duration + page_timeout?: Duration + time_zone?: TimeZone field_multi_value_leniency?: boolean runtime_mappings?: MappingRuntimeFields - wait_for_completion_timeout?: Time + wait_for_completion_timeout?: Duration params?: Record - keep_alive?: Time + keep_alive?: Duration keep_on_completion?: boolean index_using_frozen?: boolean } @@ -16578,20 +16656,22 @@ export interface SqlTranslateRequest extends RequestBase { fetch_size?: integer filter?: QueryDslQueryContainer query: string - time_zone?: string + time_zone?: TimeZone } } export interface SqlTranslateResponse { - size: long - _source: SearchSourceConfig - fields: Record[] - sort: Sort + aggregations?: Record + size?: long + _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] + query?: QueryDslQueryContainer + sort?: Sort } export interface SslCertificatesCertificateInformation { alias: string | null - expiry: DateString + expiry: DateTime format: string has_private_key: boolean path: string @@ -16627,9 +16707,9 @@ export interface TasksTaskInfo { headers: Record id: long node: NodeId - running_time?: string - running_time_in_nanos: long - start_time_in_millis: long + running_time?: Duration + running_time_in_nanos: DurationValue + start_time_in_millis: EpochTime status?: TasksTaskStatus type: string parent_task_id?: TaskId @@ -16653,12 +16733,12 @@ export interface TasksTaskStatus { failures?: string[] requests_per_second: float retries: Retries - throttled?: Time - throttled_millis: long - throttled_until?: Time - throttled_until_millis: long + throttled?: Duration + throttled_millis: DurationValue + throttled_until?: Duration + throttled_until_millis: DurationValue timed_out?: boolean - took?: long + took?: DurationValue total: long updated: long version_conflicts: long @@ -16676,7 +16756,7 @@ export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { task_id: Id - timeout?: Time + timeout?: Duration wait_for_completion?: boolean } @@ -16693,8 +16773,8 @@ export interface TasksListRequest extends RequestBase { group_by?: TasksGroupBy node_id?: string[] parent_task_id?: Id - master_timeout?: Time - timeout?: Time + master_timeout?: Duration + timeout?: Duration wait_for_completion?: boolean } @@ -16724,7 +16804,7 @@ export interface TextStructureFindStructureRequest { lines_to_sample?: uint quote?: string should_trim_fields?: boolean - timeout?: Time + timeout?: Duration timestamp_field?: Field timestamp_format?: string /** @deprecated The use of the 'body' key has been deprecated, use 'text_files' instead. */ @@ -16786,7 +16866,7 @@ export interface TransformPivotGroupByContainer { export interface TransformRetentionPolicy { field: Field - max_age: Time + max_age: Duration } export interface TransformRetentionPolicyContainer { @@ -16812,14 +16892,14 @@ export interface TransformSyncContainer { } export interface TransformTimeSync { - delay?: Time + delay?: Duration field: Field } export interface TransformDeleteTransformRequest extends RequestBase { transform_id: Id force?: boolean - timeout?: Time + timeout?: Duration } export type TransformDeleteTransformResponse = AcknowledgedResponseBase @@ -16838,32 +16918,34 @@ export interface TransformGetTransformResponse { } export interface TransformGetTransformTransformSummary { - dest: ReindexDestination + authorization?: MlTransformAuthorization + create_time?: EpochTime description?: string - frequency?: Time + dest: ReindexDestination + frequency?: Duration id: Id + latest?: TransformLatest pivot?: TransformPivot + retention_policy?: TransformRetentionPolicyContainer settings?: TransformSettings source: TransformSource sync?: TransformSyncContainer - create_time?: EpochMillis version?: VersionString - latest?: TransformLatest _meta?: Metadata } export interface TransformGetTransformStatsCheckpointStats { checkpoint: long checkpoint_progress?: TransformGetTransformStatsTransformProgress - timestamp?: DateString - timestamp_millis?: EpochMillis - time_upper_bound?: DateString - time_upper_bound_millis?: EpochMillis + timestamp?: DateTime + timestamp_millis?: EpochTime + time_upper_bound?: DateTime + time_upper_bound_millis?: EpochTime } export interface TransformGetTransformStatsCheckpointing { changes_last_detected_at?: long - changes_last_detected_at_date_time?: DateString + changes_last_detected_at_date_time?: DateTime last: TransformGetTransformStatsCheckpointStats next?: TransformGetTransformStatsCheckpointStats operations_behind?: long @@ -16883,21 +16965,21 @@ export interface TransformGetTransformStatsResponse { } export interface TransformGetTransformStatsTransformIndexerStats { - delete_time_in_ms?: EpochMillis + delete_time_in_ms?: EpochTime documents_indexed: long documents_deleted?: long documents_processed: long - exponential_avg_checkpoint_duration_ms: double + exponential_avg_checkpoint_duration_ms: DurationValue exponential_avg_documents_indexed: double exponential_avg_documents_processed: double index_failures: long - index_time_in_ms: long + index_time_in_ms: DurationValue index_total: long pages_processed: long - processing_time_in_ms: long + processing_time_in_ms: DurationValue processing_total: long search_failures: long - search_time_in_ms: long + search_time_in_ms: DurationValue search_total: long trigger_count: long } @@ -16921,12 +17003,12 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformPreviewTransformRequest extends RequestBase { transform_id?: Id - timeout?: Time + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { dest?: TransformDestination description?: string - frequency?: Time + frequency?: Duration pivot?: TransformPivot source?: TransformSource settings?: TransformSettings @@ -16944,12 +17026,12 @@ export interface TransformPreviewTransformResponse { export interface TransformPutTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean - timeout?: Time + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { dest: TransformDestination description?: string - frequency?: Time + frequency?: Duration latest?: TransformLatest _meta?: Metadata pivot?: TransformPivot @@ -16971,7 +17053,7 @@ export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformStartTransformRequest extends RequestBase { transform_id: Id - timeout?: Time + timeout?: Duration } export type TransformStartTransformResponse = AcknowledgedResponseBase @@ -16980,7 +17062,7 @@ export interface TransformStopTransformRequest extends RequestBase { transform_id: Name allow_no_match?: boolean force?: boolean - timeout?: Time + timeout?: Duration wait_for_checkpoint?: boolean wait_for_completion?: boolean } @@ -16990,12 +17072,12 @@ export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { transform_id: Id defer_validation?: boolean - timeout?: Time + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { dest?: TransformDestination description?: string - frequency?: Time + frequency?: Duration _meta?: Metadata source?: TransformSource settings?: TransformSettings @@ -17005,10 +17087,11 @@ export interface TransformUpdateTransformRequest extends RequestBase { } export interface TransformUpdateTransformResponse { + authorization?: MlTransformAuthorization create_time: long description: string dest: ReindexDestination - frequency?: Time + frequency?: Duration id: Id latest?: TransformLatest pivot?: TransformPivot @@ -17022,7 +17105,7 @@ export interface TransformUpdateTransformResponse { export interface TransformUpgradeTransformsRequest extends RequestBase { dry_run?: boolean - timeout?: Time + timeout?: Duration } export interface TransformUpgradeTransformsResponse { @@ -17033,7 +17116,7 @@ export interface TransformUpgradeTransformsResponse { export interface WatcherAcknowledgeState { state: WatcherAcknowledgementOptions - timestamp: DateString + timestamp: DateTime } export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' @@ -17044,8 +17127,8 @@ export interface WatcherAction { foreach?: string max_iterations?: integer name?: Name - throttle_period?: Time - throttle_period_in_millis?: EpochMillis + throttle_period?: Duration + throttle_period_in_millis?: DurationValue transform?: TransformContainer index?: WatcherIndexAction logging?: WatcherLoggingAction @@ -17072,7 +17155,7 @@ export type WatcherActions = Record export interface WatcherActivationState { active: boolean - timestamp: Timestamp + timestamp: DateTime } export interface WatcherActivationStatus { @@ -17127,7 +17210,7 @@ export type WatcherConnectionScheme = 'http' | 'https' export type WatcherCronExpression = string export interface WatcherDailySchedule { - at: WatcherTimeOfDay[] + at: WatcherScheduleTimeOfDay[] } export type WatcherDataAttachmentFormat = 'json' | 'yaml' @@ -17146,7 +17229,7 @@ export interface WatcherEmail { from?: string priority?: WatcherEmailPriority reply_to?: string[] - sent_date?: DateString + sent_date?: DateTime subject: string to: string[] attachments?: Record @@ -17179,8 +17262,8 @@ export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | ' export interface WatcherExecutionResult { actions: WatcherExecutionResultAction[] condition: WatcherExecutionResultCondition - execution_duration: integer - execution_time: DateString + execution_duration: DurationValue + execution_time: DateTime input: WatcherExecutionResultInput } @@ -17212,7 +17295,7 @@ export interface WatcherExecutionResultInput { export interface WatcherExecutionState { successful: boolean - timestamp: DateString + timestamp: DateTime reason?: string } @@ -17264,7 +17347,7 @@ export interface WatcherHttpInputProxy { export interface WatcherHttpInputRequestDefinition { auth?: WatcherHttpInputAuthentication body?: string - connection_timeout?: Time + connection_timeout?: Duration headers?: Record host?: Host method?: WatcherHttpInputMethod @@ -17272,7 +17355,7 @@ export interface WatcherHttpInputRequestDefinition { path?: string port?: uint proxy?: WatcherHttpInputProxy - read_timeout?: Time + read_timeout?: Duration scheme?: WatcherConnectionScheme url?: string } @@ -17291,7 +17374,7 @@ export interface WatcherIndexAction { doc_id?: Id refresh?: Refresh op_type?: OpType - timeout?: Time + timeout?: Duration execution_time_field?: Field } @@ -17383,7 +17466,7 @@ export interface WatcherReportingEmailAttachment { url: string inline?: boolean retries?: integer - interval?: Time + interval?: Duration request?: WatcherHttpInputRequestDefinition } @@ -17393,15 +17476,17 @@ export interface WatcherScheduleContainer { cron?: WatcherCronExpression daily?: WatcherDailySchedule hourly?: WatcherHourlySchedule - interval?: Time + interval?: Duration monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[] weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[] yearly?: WatcherTimeOfYear | WatcherTimeOfYear[] } +export type WatcherScheduleTimeOfDay = string | WatcherHourAndMinute + export interface WatcherScheduleTriggerEvent { - scheduled_time: DateString - triggered_time?: DateString + scheduled_time: DateTime + triggered_time?: DateTime } export interface WatcherScriptCondition { @@ -17414,7 +17499,7 @@ export interface WatcherScriptCondition { export interface WatcherSearchInput { extract?: string[] request: WatcherSearchInputRequestDefinition - timeout?: Time + timeout?: Duration } export interface WatcherSearchInputRequestBody { @@ -17464,7 +17549,7 @@ export interface WatcherSlackAttachment { thumb_url?: string title: string title_link?: string - ts?: DateString + ts?: EpochTime } export interface WatcherSlackAttachmentField { @@ -17494,11 +17579,9 @@ export interface WatcherSlackResult { export interface WatcherThrottleState { reason: string - timestamp: DateString + timestamp: DateTime } -export type WatcherTimeOfDay = string | WatcherHourAndMinute - export interface WatcherTimeOfMonth { at: string[] on: integer[] @@ -17525,7 +17608,7 @@ export interface WatcherTriggerEventContainer { export interface WatcherTriggerEventResult { manual: WatcherTriggerEventContainer - triggered_time: DateString + triggered_time: DateTime type: string } @@ -17535,16 +17618,16 @@ export interface WatcherWatch { input: WatcherInputContainer metadata?: Metadata status?: WatcherWatchStatus - throttle_period?: string + throttle_period?: Duration + throttle_period_in_millis?: DurationValue transform?: TransformContainer trigger: WatcherTriggerContainer - throttle_period_in_millis?: long } export interface WatcherWatchStatus { actions: WatcherActions - last_checked?: DateString - last_met_condition?: DateString + last_checked?: DateTime + last_met_condition?: DateTime state: WatcherActivationState version: VersionNumber execution_state?: string @@ -17701,12 +17784,12 @@ export interface WatcherStatsResponse { } export interface WatcherStatsWatchRecordQueuedStats { - execution_time: DateString + execution_time: DateTime } export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { execution_phase: WatcherExecutionPhase - triggered_time: DateString + triggered_time: DateTime executed_actions?: string[] watch_id: Id watch_record_id: Id @@ -17731,7 +17814,7 @@ export interface WatcherStopRequest extends RequestBase { export type WatcherStopResponse = AcknowledgedResponseBase export interface XpackInfoBuildInformation { - date: DateString + date: DateTime hash: string } @@ -17774,7 +17857,7 @@ export interface XpackInfoFeatures { } export interface XpackInfoMinimalLicenseInformation { - expiry_date_in_millis: EpochMillis + expiry_date_in_millis: EpochTime mode: LicenseLicenseType status: LicenseLicenseStatus type: LicenseLicenseType @@ -18062,7 +18145,7 @@ export interface XpackUsageRealmCache { } export interface XpackUsageRequest extends RequestBase { - master_timeout?: Time + master_timeout?: Duration } export interface XpackUsageResponse { @@ -18199,8 +18282,8 @@ export interface XpackUsageWatcher extends XpackUsageBase { } export interface XpackUsageWatcherActionTotals { - total: long - total_time_in_ms: long + total: Duration + total_time_in_ms: DurationValue } export interface XpackUsageWatcherActions { @@ -18242,7 +18325,7 @@ export interface SpecUtilsCommonCatQueryParameters { h?: Names help?: boolean local?: boolean - master_timeout?: Time + master_timeout?: Duration s?: Names v?: boolean } From 6ccdab52803dee6d3bb8d7fcec61baf83020dd16 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Wed, 31 Aug 2022 14:04:50 -0500 Subject: [PATCH 177/647] Add changelog for 8.4.0 --- docs/changelog.asciidoc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 626313bbf..61e0913f0 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,15 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.4.0 + +[discrete] +===== Support for Elasticsearch `v8.4.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.4/release-notes-8.4.0.html[here]. + [discrete] === 8.2.1 From ebbc2961e0f463387d36ff6b00401fe9b3b590ce Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 21 Sep 2022 10:03:27 -0400 Subject: [PATCH 178/647] Bumps to version 8.6.0 (#1762) --- .ci/test-matrix.yml | 2 +- package.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 0ab8cb1d8..0d081a1bf 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - "8.5.0-SNAPSHOT" + - "8.6.0-SNAPSHOT" NODE_JS_VERSION: - 18 diff --git a/package.json b/package.json index 7895eb0b6..3ea6cc36d 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.5.0", - "versionCanary": "8.5.0-canary.0", + "version": "8.6.0", + "versionCanary": "8.6.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 560dfd3763ba968a08696e3fbc577a53843c5a27 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Tue, 27 Sep 2022 06:39:47 -0500 Subject: [PATCH 179/647] Fix docs URLs to use 'current' instead of 'master' --- docs/reference.asciidoc | 172 ++++++++++++++++++------------------ src/api/api/async_search.ts | 2 +- src/api/api/fleet.ts | 2 +- src/api/api/indices.ts | 27 ++++++ src/api/api/rollup.ts | 27 ------ src/api/api/search.ts | 2 +- src/api/api/security.ts | 2 +- src/api/types.ts | 132 +++++++++++++++------------ src/api/typesWithBodyKey.ts | 134 +++++++++++++++------------- 9 files changed, 267 insertions(+), 233 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 521987720..1dbc7a9f0 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -29,7 +29,7 @@ === bulk Allows to perform multiple index/update/delete operations in a single request. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Endpoint documentation] [source,ts] ---- client.bulk(...) @@ -39,7 +39,7 @@ client.bulk(...) === clear_scroll Explicitly clears the search context for a scroll. -https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html[Endpoint documentation] [source,ts] ---- client.clearScroll(...) @@ -49,7 +49,7 @@ client.clearScroll(...) === close_point_in_time Close a point in time -https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html[Endpoint documentation] [source,ts] ---- client.closePointInTime(...) @@ -281,7 +281,7 @@ client.mtermvectors(...) === open_point_in_time Open a point in time that can be used in subsequent searches -https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html[Endpoint documentation] [source,ts] ---- client.openPointInTime(...) @@ -466,7 +466,7 @@ client.updateByQueryRethrottle(...) ==== delete Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. -https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.delete(...) @@ -476,7 +476,7 @@ client.asyncSearch.delete(...) ==== get Retrieves the results of a previously submitted async search request given its ID. -https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.get(...) @@ -486,7 +486,7 @@ client.asyncSearch.get(...) ==== status Retrieves the status of a previously submitted async search request given its ID. -https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.status(...) @@ -496,7 +496,7 @@ client.asyncSearch.status(...) ==== submit Executes a search request asynchronously. -https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.submit(...) @@ -508,7 +508,7 @@ client.asyncSearch.submit(...) ==== aliases Shows information about currently configured aliases to indices including filter and routing infos. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html[Endpoint documentation] [source,ts] ---- client.cat.aliases(...) @@ -518,7 +518,7 @@ client.cat.aliases(...) ==== allocation Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html[Endpoint documentation] [source,ts] ---- client.cat.allocation(...) @@ -536,7 +536,7 @@ client.cat.componentTemplates(...) ==== count Provides quick access to the document count of the entire cluster, or individual indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html[Endpoint documentation] [source,ts] ---- client.cat.count(...) @@ -546,7 +546,7 @@ client.cat.count(...) ==== fielddata Shows how much heap memory is currently being used by fielddata on every data node in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html[Endpoint documentation] [source,ts] ---- client.cat.fielddata(...) @@ -556,7 +556,7 @@ client.cat.fielddata(...) ==== health Returns a concise representation of the cluster health. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html[Endpoint documentation] [source,ts] ---- client.cat.health(...) @@ -566,7 +566,7 @@ client.cat.health(...) ==== help Returns help for the Cat APIs. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html[Endpoint documentation] [source,ts] ---- client.cat.help(...) @@ -576,7 +576,7 @@ client.cat.help(...) ==== indices Returns information about indices: number of primaries and replicas, document counts, disk size, ... -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html[Endpoint documentation] [source,ts] ---- client.cat.indices(...) @@ -586,7 +586,7 @@ client.cat.indices(...) ==== master Returns information about the master node. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html[Endpoint documentation] [source,ts] ---- client.cat.master(...) @@ -596,7 +596,7 @@ client.cat.master(...) ==== ml_data_frame_analytics Gets configuration and usage information about data frame analytics jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.cat.mlDataFrameAnalytics(...) @@ -606,7 +606,7 @@ client.cat.mlDataFrameAnalytics(...) ==== ml_datafeeds Gets configuration and usage information about datafeeds. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html[Endpoint documentation] [source,ts] ---- client.cat.mlDatafeeds(...) @@ -616,7 +616,7 @@ client.cat.mlDatafeeds(...) ==== ml_jobs Gets configuration and usage information about anomaly detection jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] ---- client.cat.mlJobs(...) @@ -626,7 +626,7 @@ client.cat.mlJobs(...) ==== ml_trained_models Gets configuration and usage information about inference trained models. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html[Endpoint documentation] [source,ts] ---- client.cat.mlTrainedModels(...) @@ -636,7 +636,7 @@ client.cat.mlTrainedModels(...) ==== nodeattrs Returns information about custom node attributes. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html[Endpoint documentation] [source,ts] ---- client.cat.nodeattrs(...) @@ -646,7 +646,7 @@ client.cat.nodeattrs(...) ==== nodes Returns basic statistics about performance of cluster nodes. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html[Endpoint documentation] [source,ts] ---- client.cat.nodes(...) @@ -656,7 +656,7 @@ client.cat.nodes(...) ==== pending_tasks Returns a concise representation of the cluster pending tasks. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html[Endpoint documentation] [source,ts] ---- client.cat.pendingTasks(...) @@ -666,7 +666,7 @@ client.cat.pendingTasks(...) ==== plugins Returns information about installed plugins across nodes node. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html[Endpoint documentation] [source,ts] ---- client.cat.plugins(...) @@ -676,7 +676,7 @@ client.cat.plugins(...) ==== recovery Returns information about index shard recoveries, both on-going completed. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html[Endpoint documentation] [source,ts] ---- client.cat.recovery(...) @@ -686,7 +686,7 @@ client.cat.recovery(...) ==== repositories Returns information about snapshot repositories registered in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html[Endpoint documentation] [source,ts] ---- client.cat.repositories(...) @@ -696,7 +696,7 @@ client.cat.repositories(...) ==== segments Provides low-level information about the segments in the shards of an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html[Endpoint documentation] [source,ts] ---- client.cat.segments(...) @@ -706,7 +706,7 @@ client.cat.segments(...) ==== shards Provides a detailed view of shard allocation on nodes. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html[Endpoint documentation] [source,ts] ---- client.cat.shards(...) @@ -716,7 +716,7 @@ client.cat.shards(...) ==== snapshots Returns all snapshots in a specific repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html[Endpoint documentation] [source,ts] ---- client.cat.snapshots(...) @@ -726,7 +726,7 @@ client.cat.snapshots(...) ==== tasks Returns information about the tasks currently executing on one or more nodes in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] [source,ts] ---- client.cat.tasks(...) @@ -736,7 +736,7 @@ client.cat.tasks(...) ==== templates Returns information about existing templates. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html[Endpoint documentation] [source,ts] ---- client.cat.templates(...) @@ -747,7 +747,7 @@ client.cat.templates(...) Returns cluster-wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html[Endpoint documentation] [source,ts] ---- client.cat.threadPool(...) @@ -757,7 +757,7 @@ client.cat.threadPool(...) ==== transforms Gets configuration and usage information about transforms. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html[Endpoint documentation] [source,ts] ---- client.cat.transforms(...) @@ -769,7 +769,7 @@ client.cat.transforms(...) ==== delete_auto_follow_pattern Deletes auto-follow patterns. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.deleteAutoFollowPattern(...) @@ -779,7 +779,7 @@ client.ccr.deleteAutoFollowPattern(...) ==== follow Creates a new follower index configured to follow the referenced leader index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html[Endpoint documentation] [source,ts] ---- client.ccr.follow(...) @@ -789,7 +789,7 @@ client.ccr.follow(...) ==== follow_info Retrieves information about all follower indices, including parameters and status for each follower index -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-info.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html[Endpoint documentation] [source,ts] ---- client.ccr.followInfo(...) @@ -799,7 +799,7 @@ client.ccr.followInfo(...) ==== follow_stats Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-stats.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html[Endpoint documentation] [source,ts] ---- client.ccr.followStats(...) @@ -809,7 +809,7 @@ client.ccr.followStats(...) ==== forget_follower Removes the follower retention leases from the leader. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-follower.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html[Endpoint documentation] [source,ts] ---- client.ccr.forgetFollower(...) @@ -819,7 +819,7 @@ client.ccr.forgetFollower(...) ==== get_auto_follow_pattern Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.getAutoFollowPattern(...) @@ -829,7 +829,7 @@ client.ccr.getAutoFollowPattern(...) ==== pause_auto_follow_pattern Pauses an auto-follow pattern -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.pauseAutoFollowPattern(...) @@ -839,7 +839,7 @@ client.ccr.pauseAutoFollowPattern(...) ==== pause_follow Pauses a follower index. The follower index will not fetch any additional operations from the leader index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-follow.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html[Endpoint documentation] [source,ts] ---- client.ccr.pauseFollow(...) @@ -849,7 +849,7 @@ client.ccr.pauseFollow(...) ==== put_auto_follow_pattern Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.putAutoFollowPattern(...) @@ -859,7 +859,7 @@ client.ccr.putAutoFollowPattern(...) ==== resume_auto_follow_pattern Resumes an auto-follow pattern that has been paused -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.resumeAutoFollowPattern(...) @@ -869,7 +869,7 @@ client.ccr.resumeAutoFollowPattern(...) ==== resume_follow Resumes a follower index that has been paused -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-follow.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html[Endpoint documentation] [source,ts] ---- client.ccr.resumeFollow(...) @@ -879,7 +879,7 @@ client.ccr.resumeFollow(...) ==== stats Gets all stats related to cross-cluster replication. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html[Endpoint documentation] [source,ts] ---- client.ccr.stats(...) @@ -889,7 +889,7 @@ client.ccr.stats(...) ==== unfollow Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html[Endpoint documentation] [source,ts] ---- client.ccr.unfollow(...) @@ -901,7 +901,7 @@ client.ccr.unfollow(...) ==== allocation_explain Provides explanations for shard allocations in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html[Endpoint documentation] [source,ts] ---- client.cluster.allocationExplain(...) @@ -911,7 +911,7 @@ client.cluster.allocationExplain(...) ==== delete_component_template Deletes a component template -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.deleteComponentTemplate(...) @@ -921,7 +921,7 @@ client.cluster.deleteComponentTemplate(...) ==== delete_voting_config_exclusions Clears cluster voting config exclusions. -https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html[Endpoint documentation] [source,ts] ---- client.cluster.deleteVotingConfigExclusions(...) @@ -931,7 +931,7 @@ client.cluster.deleteVotingConfigExclusions(...) ==== exists_component_template Returns information about whether a particular component template exist -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.existsComponentTemplate(...) @@ -941,7 +941,7 @@ client.cluster.existsComponentTemplate(...) ==== get_component_template Returns one or more component templates -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.getComponentTemplate(...) @@ -951,7 +951,7 @@ client.cluster.getComponentTemplate(...) ==== get_settings Returns cluster settings. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html[Endpoint documentation] [source,ts] ---- client.cluster.getSettings(...) @@ -961,7 +961,7 @@ client.cluster.getSettings(...) ==== health Returns basic information about the health of the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html[Endpoint documentation] [source,ts] ---- client.cluster.health(...) @@ -972,7 +972,7 @@ client.cluster.health(...) Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html[Endpoint documentation] [source,ts] ---- client.cluster.pendingTasks(...) @@ -982,7 +982,7 @@ client.cluster.pendingTasks(...) ==== post_voting_config_exclusions Updates the cluster voting config exclusions by node ids or node names. -https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html[Endpoint documentation] [source,ts] ---- client.cluster.postVotingConfigExclusions(...) @@ -992,7 +992,7 @@ client.cluster.postVotingConfigExclusions(...) ==== put_component_template Creates or updates a component template -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.putComponentTemplate(...) @@ -1002,7 +1002,7 @@ client.cluster.putComponentTemplate(...) ==== put_settings Updates the cluster settings. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html[Endpoint documentation] [source,ts] ---- client.cluster.putSettings(...) @@ -1012,7 +1012,7 @@ client.cluster.putSettings(...) ==== remote_info Returns the information about configured remote clusters. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html[Endpoint documentation] [source,ts] ---- client.cluster.remoteInfo(...) @@ -1022,7 +1022,7 @@ client.cluster.remoteInfo(...) ==== reroute Allows to manually change the allocation of individual shards in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html[Endpoint documentation] [source,ts] ---- client.cluster.reroute(...) @@ -1032,7 +1032,7 @@ client.cluster.reroute(...) ==== state Returns a comprehensive information about the state of the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html[Endpoint documentation] [source,ts] ---- client.cluster.state(...) @@ -1042,7 +1042,7 @@ client.cluster.state(...) ==== stats Returns high-level overview of cluster statistics. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html[Endpoint documentation] [source,ts] ---- client.cluster.stats(...) @@ -1490,6 +1490,16 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usag client.indices.diskUsage(...) ---- +[discrete] +==== downsample +Downsample an index + +https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html[Endpoint documentation] +[source,ts] +---- +client.indices.downsample(...) +---- + [discrete] ==== exists Returns information about whether a particular index exists. @@ -2095,7 +2105,7 @@ client.ml.clearTrainedModelDeploymentCache(...) ==== close_job Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-close-job.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html[Endpoint documentation] [source,ts] ---- client.ml.closeJob(...) @@ -2105,7 +2115,7 @@ client.ml.closeJob(...) ==== delete_calendar Deletes a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendar(...) @@ -2115,7 +2125,7 @@ client.ml.deleteCalendar(...) ==== delete_calendar_event Deletes scheduled events from a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-event.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendarEvent(...) @@ -2125,7 +2135,7 @@ client.ml.deleteCalendarEvent(...) ==== delete_calendar_job Deletes anomaly detection jobs from a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-job.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendarJob(...) @@ -2135,7 +2145,7 @@ client.ml.deleteCalendarJob(...) ==== delete_data_frame_analytics Deletes an existing data frame analytics job. -https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.deleteDataFrameAnalytics(...) @@ -2145,7 +2155,7 @@ client.ml.deleteDataFrameAnalytics(...) ==== delete_datafeed Deletes an existing datafeed. -https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafeed.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.deleteDatafeed(...) @@ -2555,7 +2565,7 @@ client.ml.putCalendarJob(...) ==== put_data_frame_analytics Instantiates a data frame analytics job. -https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.putDataFrameAnalytics(...) @@ -2807,7 +2817,7 @@ client.nodes.getRepositoriesMeteringInfo(...) ==== hot_threads Returns information about hot threads on each node in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-hot-threads.html[Endpoint documentation] [source,ts] ---- client.nodes.hotThreads(...) @@ -2817,7 +2827,7 @@ client.nodes.hotThreads(...) ==== info Returns information about nodes in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html[Endpoint documentation] [source,ts] ---- client.nodes.info(...) @@ -2837,7 +2847,7 @@ client.nodes.reloadSecureSettings(...) ==== stats Returns statistical information about nodes in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html[Endpoint documentation] [source,ts] ---- client.nodes.stats(...) @@ -2847,7 +2857,7 @@ client.nodes.stats(...) ==== usage Returns low-level information about REST actions usage on nodes. -https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-usage.html[Endpoint documentation] [source,ts] ---- client.nodes.usage(...) @@ -2905,16 +2915,6 @@ https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.ht client.rollup.putJob(...) ---- -[discrete] -==== rollup -Rollup an index - -https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html[Endpoint documentation] -[source,ts] ----- -client.rollup.rollup(...) ----- - [discrete] ==== rollup_search Enables searching rolled-up data using the standard query DSL. @@ -3460,6 +3460,8 @@ client.security.samlServiceProviderMetadata(...) [discrete] ==== update_api_key Updates attributes of an existing API key. + +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html[Endpoint documentation] [source,ts] ---- client.security.updateApiKey(...) @@ -3759,7 +3761,7 @@ client.ssl.certificates(...) ==== cancel Cancels a task, if it can be cancelled through an API. -https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] [source,ts] ---- client.tasks.cancel(...) @@ -3769,7 +3771,7 @@ client.tasks.cancel(...) ==== get Returns information about a task. -https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] [source,ts] ---- client.tasks.get(...) @@ -3779,7 +3781,7 @@ client.tasks.get(...) ==== list Returns a list of tasks. -https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] [source,ts] ---- client.tasks.list(...) diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index a2ea00bcc..3382499d2 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -114,7 +114,7 @@ export default class AsyncSearch { async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index bdd46c811..baae23b8f 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -104,7 +104,7 @@ export default class Fleet { async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index cc8abbfa3..c6401e9bc 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -418,6 +418,33 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index', 'target_index'] + const acceptedBody: string[] = ['config'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_downsample/${encodeURIComponent(params.target_index.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 11ad960f3..6cf8216f4 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -181,33 +181,6 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } - async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptionsWithMeta): Promise> - async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptions): Promise - async rollup (this: That, params: T.RollupRollupRequest | TB.RollupRollupRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'rollup_index'] - const acceptedBody: string[] = ['config'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined - - for (const key in params) { - if (acceptedBody.includes(key)) { - // @ts-expect-error - body = params[key] - } else if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'POST' - const path = `/${encodeURIComponent(params.index.toString())}/_rollup/${encodeURIComponent(params.rollup_index.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) - } - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 0f68a72f2..6c757cc9b 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -42,7 +42,7 @@ export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 1b42c337c..8c9e634b5 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -891,7 +891,7 @@ export default class Security { async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password'] + const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index d12e2f4c1..c98388182 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -433,7 +433,7 @@ export interface GetScriptLanguagesResponse { types_allowed: string[] } -export interface GetSourceRequest { +export interface GetSourceRequest extends RequestBase { id: Id index: IndexName preference?: string @@ -560,6 +560,7 @@ export interface MsearchMultisearchBody { collapse?: SearchFieldCollapse query?: QueryDslQueryContainer explain?: boolean + ext?: Record stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] knn?: KnnQuery @@ -987,6 +988,7 @@ export interface SearchRequest extends RequestBase { aggs?: Record collapse?: SearchFieldCollapse explain?: boolean + ext?: Record from?: integer highlight?: SearchHighlight track_total_hits?: SearchTrackHits @@ -1876,7 +1878,7 @@ export type EpochTime = Unit export interface ErrorCauseKeys { type: string - reason: string + reason?: string stack_trace?: string caused_by?: ErrorCause root_cause?: ErrorCause[] @@ -1918,7 +1920,7 @@ export interface FieldSort { export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' -export type FieldValue = long | double | string | boolean +export type FieldValue = long | double | string | boolean | any export interface FielddataStats { evictions?: long @@ -2954,7 +2956,7 @@ export interface AggregationsFormattableMetricAggregation extends AggregationsMe export type AggregationsGapPolicy = 'skip' | 'insert_zeros' export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { - bounds: GeoBounds + bounds?: GeoBounds } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { @@ -3953,17 +3955,18 @@ export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation' - alternate: AnalysisIcuCollationAlternate - caseFirst: AnalysisIcuCollationCaseFirst - caseLevel: boolean - country: string - decomposition: AnalysisIcuCollationDecomposition - hiraganaQuaternaryMode: boolean - language: string - numeric: boolean - strength: AnalysisIcuCollationStrength + alternate?: AnalysisIcuCollationAlternate + caseFirst?: AnalysisIcuCollationCaseFirst + caseLevel?: boolean + country?: string + decomposition?: AnalysisIcuCollationDecomposition + hiraganaQuaternaryMode?: boolean + language?: string + numeric?: boolean + rules?: string + strength?: AnalysisIcuCollationStrength variableTop?: string - variant: string + variant?: string } export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { @@ -3995,7 +3998,7 @@ export type AnalysisIcuTransformDirection = 'forward' | 'reverse' export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { type: 'icu_transform' - dir: AnalysisIcuTransformDirection + dir?: AnalysisIcuTransformDirection id: string } @@ -4785,7 +4788,7 @@ export interface MappingRuntimeField { export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' -export type MappingRuntimeFields = Record +export type MappingRuntimeFields = Record export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { type: 'scaled_float' @@ -5753,6 +5756,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { aggs?: Record collapse?: SearchFieldCollapse explain?: boolean + ext?: Record from?: integer highlight?: SearchHighlight track_total_hits?: SearchTrackHits @@ -7879,7 +7883,7 @@ export interface ClusterComponentTemplateNode { export interface ClusterComponentTemplateSummary { _meta?: Metadata version?: VersionNumber - settings: Record + settings?: Record mappings?: MappingTypeMapping aliases?: Record } @@ -8812,6 +8816,7 @@ export interface FleetSearchRequest extends RequestBase { aggs?: Record collapse?: SearchFieldCollapse explain?: boolean + ext?: Record from?: integer highlight?: SearchHighlight track_total_hits?: SearchTrackHits @@ -9329,8 +9334,7 @@ export interface IndicesIndexTemplateSummary { } export interface IndicesIndexVersioning { - created: VersionString - created_string?: VersionString + created?: VersionString } export interface IndicesIndexingPressure { @@ -9565,7 +9569,7 @@ export interface IndicesAnalyzeAnalyzeDetail { export interface IndicesAnalyzeAnalyzeToken { end_offset: long position: long - position_length?: long + positionLength?: long start_offset: long token: string type: string @@ -9772,6 +9776,14 @@ export interface IndicesDiskUsageRequest extends RequestBase { export type IndicesDiskUsageResponse = any +export interface IndicesDownsampleRequest extends RequestBase { + index: IndexName + target_index: IndexName + config?: any +} + +export type IndicesDownsampleResponse = any + export interface IndicesExistsRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -14752,14 +14764,6 @@ export interface RollupPutJobRequest extends RequestBase { export type RollupPutJobResponse = AcknowledgedResponseBase -export interface RollupRollupRequest extends RequestBase { - index: IndexName - rollup_index: IndexName - config?: any -} - -export type RollupRollupResponse = any - export interface RollupRollupSearchRequest extends RequestBase { index: Indices rest_total_hits_as_int?: boolean @@ -14875,6 +14879,9 @@ export interface SecurityApiKey { realm?: string username?: Username metadata?: Metadata + role_descriptors?: Record + limited_by?: Record[] + _sort?: SortResults } export interface SecurityApplicationGlobalUserPrivileges { @@ -15001,6 +15008,7 @@ export interface SecurityUser { roles: string[] username: Username enabled: boolean + profile_uid?: SecurityUserProfileId } export interface SecurityUserProfile { @@ -15270,6 +15278,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { owner?: boolean realm_name?: Name username?: Username + with_limited_by?: boolean } export interface SecurityGetApiKeyResponse { @@ -15381,7 +15390,7 @@ export interface SecurityGetTokenResponse { expires_in: long scope?: string type: string - refresh_token: string + refresh_token?: string kerberos_authentication_response_token?: string authentication: SecurityGetTokenAuthenticatedUser } @@ -15393,6 +15402,7 @@ export interface SecurityGetTokenUserRealm { export interface SecurityGetUserRequest extends RequestBase { username?: Username | Username[] + with_profile_uid?: boolean } export type SecurityGetUserResponse = Record @@ -15411,19 +15421,28 @@ export interface SecurityGetUserPrivilegesResponse { run_as: string[] } +export interface SecurityGetUserProfileGetUserProfileErrors { + count: long + details: Record +} + export interface SecurityGetUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId + uid: SecurityUserProfileId | SecurityUserProfileId[] data?: string | string[] } -export type SecurityGetUserProfileResponse = Record +export interface SecurityGetUserProfileResponse { + profiles: SecurityUserProfileWithMetadata[] + errors?: SecurityGetUserProfileGetUserProfileErrors +} export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { name: Name - expiration?: Duration - role_descriptors?: Record[] + expiration?: DurationLarge + role_descriptors?: Record | Record[] + metadata?: Metadata } export interface SecurityGrantApiKeyRequest extends RequestBase { @@ -15432,6 +15451,7 @@ export interface SecurityGrantApiKeyRequest extends RequestBase { access_token?: string username?: Username password?: Password + run_as?: Username } export interface SecurityGrantApiKeyResponse { @@ -15439,6 +15459,7 @@ export interface SecurityGrantApiKeyResponse { id: Id name: Name expiration?: EpochTime + encoded: string } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { @@ -15474,6 +15495,11 @@ export interface SecurityHasPrivilegesResponse { username: Username } +export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors { + count: long + details: Record +} + export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] cluster?: SecurityClusterPrivilege[] @@ -15487,7 +15513,7 @@ export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { export interface SecurityHasPrivilegesUserProfileResponse { has_privilege_uids: SecurityUserProfileId[] - error_uids?: SecurityUserProfileId[] + errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors } export interface SecurityInvalidateApiKeyRequest extends RequestBase { @@ -15582,6 +15608,7 @@ export interface SecurityPutUserResponse { } export interface SecurityQueryApiKeysRequest extends RequestBase { + with_limited_by?: boolean query?: QueryDslQueryContainer from?: integer sort?: Sort @@ -16309,6 +16336,7 @@ export interface TasksParentTaskInfo extends TasksTaskInfo { export interface TasksTaskInfo { action: string + cancelled?: boolean cancellable: boolean description?: string headers: Record @@ -16764,42 +16792,31 @@ export interface WatcherActivationStatus { export interface WatcherAlwaysCondition { } -export interface WatcherArrayCompareCondition { - array_path: string - comparison: string +export interface WatcherArrayCompareConditionKeys { path: string - quantifier: WatcherQuantifier - value: any } +export type WatcherArrayCompareCondition = WatcherArrayCompareConditionKeys +& { [property: string]: WatcherArrayCompareOpParams | string } -export interface WatcherChainInput { - inputs: WatcherInputContainer[] -} - -export interface WatcherCompareCondition { - comparison?: string - path?: string - value?: any - 'ctx.payload.match'?: WatcherCompareContextPayloadCondition - 'ctx.payload.value'?: WatcherCompareContextPayloadCondition +export interface WatcherArrayCompareOpParams { + quantifier: WatcherQuantifier + value: FieldValue } -export interface WatcherCompareContextPayloadCondition { - eq?: any - lt?: any - gt?: any - lte?: any - gte?: any +export interface WatcherChainInput { + inputs: Partial>[] } export interface WatcherConditionContainer { always?: WatcherAlwaysCondition - array_compare?: WatcherArrayCompareCondition - compare?: WatcherCompareCondition + array_compare?: Partial> + compare?: Partial>>> never?: WatcherNeverCondition script?: WatcherScriptCondition } +export type WatcherConditionOp = 'not_eq' | 'eq' | 'lt' | 'gt' | 'lte' | 'gte' + export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' export type WatcherConnectionScheme = 'http' | 'https' @@ -16919,7 +16936,6 @@ export interface WatcherHttpEmailAttachment { } export interface WatcherHttpInput { - http?: WatcherHttpInput extract?: string[] request?: WatcherHttpInputRequestDefinition response_content_type?: WatcherResponseContentType diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index bdd9bddd3..98c43c1d1 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -454,7 +454,7 @@ export interface GetScriptLanguagesResponse { types_allowed: string[] } -export interface GetSourceRequest { +export interface GetSourceRequest extends RequestBase { id: Id index: IndexName preference?: string @@ -588,6 +588,7 @@ export interface MsearchMultisearchBody { collapse?: SearchFieldCollapse query?: QueryDslQueryContainer explain?: boolean + ext?: Record stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] knn?: KnnQuery @@ -1041,6 +1042,7 @@ export interface SearchRequest extends RequestBase { aggs?: Record collapse?: SearchFieldCollapse explain?: boolean + ext?: Record from?: integer highlight?: SearchHighlight track_total_hits?: SearchTrackHits @@ -1949,7 +1951,7 @@ export type EpochTime = Unit export interface ErrorCauseKeys { type: string - reason: string + reason?: string stack_trace?: string caused_by?: ErrorCause root_cause?: ErrorCause[] @@ -1991,7 +1993,7 @@ export interface FieldSort { export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' -export type FieldValue = long | double | string | boolean +export type FieldValue = long | double | string | boolean | any export interface FielddataStats { evictions?: long @@ -3027,7 +3029,7 @@ export interface AggregationsFormattableMetricAggregation extends AggregationsMe export type AggregationsGapPolicy = 'skip' | 'insert_zeros' export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { - bounds: GeoBounds + bounds?: GeoBounds } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { @@ -4026,17 +4028,18 @@ export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation' - alternate: AnalysisIcuCollationAlternate - caseFirst: AnalysisIcuCollationCaseFirst - caseLevel: boolean - country: string - decomposition: AnalysisIcuCollationDecomposition - hiraganaQuaternaryMode: boolean - language: string - numeric: boolean - strength: AnalysisIcuCollationStrength + alternate?: AnalysisIcuCollationAlternate + caseFirst?: AnalysisIcuCollationCaseFirst + caseLevel?: boolean + country?: string + decomposition?: AnalysisIcuCollationDecomposition + hiraganaQuaternaryMode?: boolean + language?: string + numeric?: boolean + rules?: string + strength?: AnalysisIcuCollationStrength variableTop?: string - variant: string + variant?: string } export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { @@ -4068,7 +4071,7 @@ export type AnalysisIcuTransformDirection = 'forward' | 'reverse' export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { type: 'icu_transform' - dir: AnalysisIcuTransformDirection + dir?: AnalysisIcuTransformDirection id: string } @@ -4858,7 +4861,7 @@ export interface MappingRuntimeField { export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' -export type MappingRuntimeFields = Record +export type MappingRuntimeFields = Record export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { type: 'scaled_float' @@ -5828,6 +5831,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { aggs?: Record collapse?: SearchFieldCollapse explain?: boolean + ext?: Record from?: integer highlight?: SearchHighlight track_total_hits?: SearchTrackHits @@ -7968,7 +7972,7 @@ export interface ClusterComponentTemplateNode { export interface ClusterComponentTemplateSummary { _meta?: Metadata version?: VersionNumber - settings: Record + settings?: Record mappings?: MappingTypeMapping aliases?: Record } @@ -8922,6 +8926,7 @@ export interface FleetSearchRequest extends RequestBase { aggs?: Record collapse?: SearchFieldCollapse explain?: boolean + ext?: Record from?: integer highlight?: SearchHighlight track_total_hits?: SearchTrackHits @@ -9452,8 +9457,7 @@ export interface IndicesIndexTemplateSummary { } export interface IndicesIndexVersioning { - created: VersionString - created_string?: VersionString + created?: VersionString } export interface IndicesIndexingPressure { @@ -9688,7 +9692,7 @@ export interface IndicesAnalyzeAnalyzeDetail { export interface IndicesAnalyzeAnalyzeToken { end_offset: long position: long - position_length?: long + positionLength?: long start_offset: long token: string type: string @@ -9904,6 +9908,15 @@ export interface IndicesDiskUsageRequest extends RequestBase { export type IndicesDiskUsageResponse = any +export interface IndicesDownsampleRequest extends RequestBase { + index: IndexName + target_index: IndexName + /** @deprecated The use of the 'body' key has been deprecated, use 'config' instead. */ + body?: any +} + +export type IndicesDownsampleResponse = any + export interface IndicesExistsRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -15046,15 +15059,6 @@ export interface RollupPutJobRequest extends RequestBase { export type RollupPutJobResponse = AcknowledgedResponseBase -export interface RollupRollupRequest extends RequestBase { - index: IndexName - rollup_index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, use 'config' instead. */ - body?: any -} - -export type RollupRollupResponse = any - export interface RollupRollupSearchRequest extends RequestBase { index: Indices rest_total_hits_as_int?: boolean @@ -15176,6 +15180,9 @@ export interface SecurityApiKey { realm?: string username?: Username metadata?: Metadata + role_descriptors?: Record + limited_by?: Record[] + _sort?: SortResults } export interface SecurityApplicationGlobalUserPrivileges { @@ -15302,6 +15309,7 @@ export interface SecurityUser { roles: string[] username: Username enabled: boolean + profile_uid?: SecurityUserProfileId } export interface SecurityUserProfile { @@ -15580,6 +15588,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { owner?: boolean realm_name?: Name username?: Username + with_limited_by?: boolean } export interface SecurityGetApiKeyResponse { @@ -15694,7 +15703,7 @@ export interface SecurityGetTokenResponse { expires_in: long scope?: string type: string - refresh_token: string + refresh_token?: string kerberos_authentication_response_token?: string authentication: SecurityGetTokenAuthenticatedUser } @@ -15706,6 +15715,7 @@ export interface SecurityGetTokenUserRealm { export interface SecurityGetUserRequest extends RequestBase { username?: Username | Username[] + with_profile_uid?: boolean } export type SecurityGetUserResponse = Record @@ -15724,19 +15734,28 @@ export interface SecurityGetUserPrivilegesResponse { run_as: string[] } +export interface SecurityGetUserProfileGetUserProfileErrors { + count: long + details: Record +} + export interface SecurityGetUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId + uid: SecurityUserProfileId | SecurityUserProfileId[] data?: string | string[] } -export type SecurityGetUserProfileResponse = Record +export interface SecurityGetUserProfileResponse { + profiles: SecurityUserProfileWithMetadata[] + errors?: SecurityGetUserProfileGetUserProfileErrors +} export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { name: Name - expiration?: Duration - role_descriptors?: Record[] + expiration?: DurationLarge + role_descriptors?: Record | Record[] + metadata?: Metadata } export interface SecurityGrantApiKeyRequest extends RequestBase { @@ -15747,6 +15766,7 @@ export interface SecurityGrantApiKeyRequest extends RequestBase { access_token?: string username?: Username password?: Password + run_as?: Username } } @@ -15755,6 +15775,7 @@ export interface SecurityGrantApiKeyResponse { id: Id name: Name expiration?: EpochTime + encoded: string } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { @@ -15793,6 +15814,11 @@ export interface SecurityHasPrivilegesResponse { username: Username } +export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors { + count: long + details: Record +} + export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] cluster?: SecurityClusterPrivilege[] @@ -15809,7 +15835,7 @@ export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { export interface SecurityHasPrivilegesUserProfileResponse { has_privilege_uids: SecurityUserProfileId[] - error_uids?: SecurityUserProfileId[] + errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors } export interface SecurityInvalidateApiKeyRequest extends RequestBase { @@ -15921,6 +15947,7 @@ export interface SecurityPutUserResponse { } export interface SecurityQueryApiKeysRequest extends RequestBase { + with_limited_by?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { query?: QueryDslQueryContainer @@ -16702,6 +16729,7 @@ export interface TasksParentTaskInfo extends TasksTaskInfo { export interface TasksTaskInfo { action: string + cancelled?: boolean cancellable: boolean description?: string headers: Record @@ -17167,42 +17195,31 @@ export interface WatcherActivationStatus { export interface WatcherAlwaysCondition { } -export interface WatcherArrayCompareCondition { - array_path: string - comparison: string +export interface WatcherArrayCompareConditionKeys { path: string - quantifier: WatcherQuantifier - value: any } +export type WatcherArrayCompareCondition = WatcherArrayCompareConditionKeys +& { [property: string]: WatcherArrayCompareOpParams | string } -export interface WatcherChainInput { - inputs: WatcherInputContainer[] -} - -export interface WatcherCompareCondition { - comparison?: string - path?: string - value?: any - 'ctx.payload.match'?: WatcherCompareContextPayloadCondition - 'ctx.payload.value'?: WatcherCompareContextPayloadCondition +export interface WatcherArrayCompareOpParams { + quantifier: WatcherQuantifier + value: FieldValue } -export interface WatcherCompareContextPayloadCondition { - eq?: any - lt?: any - gt?: any - lte?: any - gte?: any +export interface WatcherChainInput { + inputs: Partial>[] } export interface WatcherConditionContainer { always?: WatcherAlwaysCondition - array_compare?: WatcherArrayCompareCondition - compare?: WatcherCompareCondition + array_compare?: Partial> + compare?: Partial>>> never?: WatcherNeverCondition script?: WatcherScriptCondition } +export type WatcherConditionOp = 'not_eq' | 'eq' | 'lt' | 'gt' | 'lte' | 'gte' + export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' export type WatcherConnectionScheme = 'http' | 'https' @@ -17322,7 +17339,6 @@ export interface WatcherHttpEmailAttachment { } export interface WatcherHttpInput { - http?: WatcherHttpInput extract?: string[] request?: WatcherHttpInputRequestDefinition response_content_type?: WatcherResponseContentType From f79f4e8f25b488bd12d18b326157f007d0d71133 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Tue, 27 Sep 2022 06:42:31 -0500 Subject: [PATCH 180/647] Remove unnecessary ts-expect-error --- src/helpers.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/helpers.ts b/src/helpers.ts index 7df2016f2..9cba4db8f 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -229,7 +229,6 @@ export default class Helpers { rest_total_hits_as_int: params.rest_total_hits_as_int, scroll_id }, options as TransportRequestOptionsWithMeta) - // @ts-expect-error response = r as TransportResult, unknown> assert(response !== undefined, 'The response is undefined, please file a bug report') if (response.statusCode !== 429) break From 04634af5522d8733f5b1fca8a4c4a3fe4c9347de Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Wed, 28 Sep 2022 12:29:57 -0500 Subject: [PATCH 181/647] Update all docs refs to 'current' instead of 'master' --- docs/observability.asciidoc | 8 +- docs/reference.asciidoc | 274 ++++++++++++++++++------------------ 2 files changed, 139 insertions(+), 143 deletions(-) diff --git a/docs/observability.asciidoc b/docs/observability.asciidoc index c5e4d380c..8ae57bcad 100644 --- a/docs/observability.asciidoc +++ b/docs/observability.asciidoc @@ -155,7 +155,7 @@ request: { The event order is described in the following graph, in some edge cases, the order is not guaranteed. You can find in -https://github.com/elastic/elasticsearch-js/blob/master/test/acceptance/events-order.test.js[`test/acceptance/events-order.test.js`] +https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js[`test/acceptance/events-order.test.js`] how the order changes based on the situation. [source] @@ -377,9 +377,9 @@ child.search({ To improve observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this allows you to discover this identifier in the -https://www.elastic.co/guide/en/elasticsearch/reference/master/logging.html#deprecation-logging[deprecation logs], -helps you with https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin] -as well as https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html#_identifying_running_tasks[identifying running tasks]. +https://www.elastic.co/guide/en/elasticsearch/reference/current/logging.html#deprecation-logging[deprecation logs], +helps you with https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin] +as well as https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html#_identifying_running_tasks[identifying running tasks]. The `X-Opaque-Id` should be configured in each request, for doing that you can use the `opaqueId` option, as you can see in the following example. The diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 1dbc7a9f0..125236b0e 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -59,7 +59,7 @@ client.closePointInTime(...) === count Returns number of documents matching a query. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html[Endpoint documentation] [source,ts] ---- client.count(...) @@ -71,7 +71,7 @@ Creates a new document in the index. Returns a 409 response when a document with a same ID already exists in the index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html[Endpoint documentation] [source,ts] ---- client.create(...) @@ -81,7 +81,7 @@ client.create(...) === delete Removes a document from the index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html[Endpoint documentation] [source,ts] ---- client.delete(...) @@ -91,7 +91,7 @@ client.delete(...) === delete_by_query Deletes documents matching the provided query. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- client.deleteByQuery(...) @@ -111,7 +111,7 @@ client.deleteByQueryRethrottle(...) === delete_script Deletes a script. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.deleteScript(...) @@ -121,7 +121,7 @@ client.deleteScript(...) === exists Returns information about whether a document exists in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] [source,ts] ---- client.exists(...) @@ -131,7 +131,7 @@ client.exists(...) === exists_source Returns information about whether a document source exists in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] [source,ts] ---- client.existsSource(...) @@ -141,7 +141,7 @@ client.existsSource(...) === explain Returns information about why a specific matches (or doesn't match) a query. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html[Endpoint documentation] [source,ts] ---- client.explain(...) @@ -151,7 +151,7 @@ client.explain(...) === field_caps Returns the information about the capabilities of fields among multiple indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html[Endpoint documentation] [source,ts] ---- client.fieldCaps(...) @@ -161,7 +161,7 @@ client.fieldCaps(...) === get Returns a document. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] [source,ts] ---- client.get(...) @@ -171,7 +171,7 @@ client.get(...) === get_script Returns a script. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.getScript(...) @@ -181,7 +181,7 @@ client.getScript(...) === get_script_context Returns all script contexts. -https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html[Endpoint documentation] [source,ts] ---- client.getScriptContext(...) @@ -191,7 +191,7 @@ client.getScriptContext(...) === get_script_languages Returns available script types, languages and contexts -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.getScriptLanguages(...) @@ -201,7 +201,7 @@ client.getScriptLanguages(...) === get_source Returns the source of a document. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] [source,ts] ---- client.getSource(...) @@ -211,7 +211,7 @@ client.getSource(...) === index Creates or updates a document in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html[Endpoint documentation] [source,ts] ---- client.index(...) @@ -231,7 +231,7 @@ client.info(...) === knn_search Performs a kNN search. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html[Endpoint documentation] [source,ts] ---- client.knnSearch(...) @@ -241,7 +241,7 @@ client.knnSearch(...) === mget Allows to get multiple documents in one request. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html[Endpoint documentation] [source,ts] ---- client.mget(...) @@ -251,7 +251,7 @@ client.mget(...) === msearch Allows to execute several search operations in one request. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html[Endpoint documentation] [source,ts] ---- client.msearch(...) @@ -271,7 +271,7 @@ client.msearchTemplate(...) === mtermvectors Returns multiple termvectors in one request. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html[Endpoint documentation] [source,ts] ---- client.mtermvectors(...) @@ -301,7 +301,7 @@ client.ping(...) === put_script Creates or updates a script. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.putScript(...) @@ -311,7 +311,7 @@ client.putScript(...) === rank_eval Allows to evaluate the quality of ranked search results over a set of typical search queries -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html[Endpoint documentation] [source,ts] ---- client.rankEval(...) @@ -323,7 +323,7 @@ Allows to copy documents from one index to another, optionally filtering the sou documents by a query, changing the destination index settings, or fetching the documents from a remote cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html[Endpoint documentation] [source,ts] ---- client.reindex(...) @@ -333,7 +333,7 @@ client.reindex(...) === reindex_rethrottle Changes the number of requests per second for a particular Reindex operation. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html[Endpoint documentation] [source,ts] ---- client.reindexRethrottle(...) @@ -353,7 +353,7 @@ client.renderSearchTemplate(...) === scripts_painless_execute Allows an arbitrary script to be executed and a result to be returned -https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html[Endpoint documentation] [source,ts] ---- client.scriptsPainlessExecute(...) @@ -363,7 +363,7 @@ client.scriptsPainlessExecute(...) === scroll Allows to retrieve a large numbers of results from a single search request. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll[Endpoint documentation] [source,ts] ---- client.scroll(...) @@ -373,7 +373,7 @@ client.scroll(...) === search Returns results matching a query. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html[Endpoint documentation] [source,ts] ---- client.search(...) @@ -383,7 +383,7 @@ client.search(...) === search_mvt Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html[Endpoint documentation] [source,ts] ---- client.searchMvt(...) @@ -393,7 +393,7 @@ client.searchMvt(...) === search_shards Returns information about the indices and shards that a search request would be executed against. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html[Endpoint documentation] [source,ts] ---- client.searchShards(...) @@ -423,7 +423,7 @@ client.termsEnum(...) === termvectors Returns information and statistics about terms in the fields of a particular document. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html[Endpoint documentation] [source,ts] ---- client.termvectors(...) @@ -433,7 +433,7 @@ client.termvectors(...) === update Updates a document with a script or partial document. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html[Endpoint documentation] [source,ts] ---- client.update(...) @@ -444,7 +444,7 @@ client.update(...) Performs an update on every document in the index without changing the source, for example to pick up a mapping change. -https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- client.updateByQuery(...) @@ -585,8 +585,6 @@ client.cat.indices(...) [discrete] ==== master Returns information about the master node. - -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html[Endpoint documentation] [source,ts] ---- client.cat.master(...) @@ -1054,7 +1052,7 @@ client.cluster.stats(...) ==== delete_dangling_index Deletes the specified dangling index -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.deleteDanglingIndex(...) @@ -1064,7 +1062,7 @@ client.danglingIndices.deleteDanglingIndex(...) ==== import_dangling_index Imports the specified dangling index -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.importDanglingIndex(...) @@ -1074,7 +1072,7 @@ client.danglingIndices.importDanglingIndex(...) ==== list_dangling_indices Returns all dangling indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.listDanglingIndices(...) @@ -1180,7 +1178,7 @@ client.eql.search(...) ==== get_features Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot -https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-features-api.html[Endpoint documentation] [source,ts] ---- client.features.getFeatures(...) @@ -1190,7 +1188,7 @@ client.features.getFeatures(...) ==== reset_features Resets the internal state of features, usually by deleting system indices -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.features.resetFeatures(...) @@ -1354,7 +1352,7 @@ client.ilm.stop(...) ==== add_block Adds a block to an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html[Endpoint documentation] [source,ts] ---- client.indices.addBlock(...) @@ -1364,7 +1362,7 @@ client.indices.addBlock(...) ==== analyze Performs the analysis process on a text and return the tokens breakdown of the text. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html[Endpoint documentation] [source,ts] ---- client.indices.analyze(...) @@ -1374,7 +1372,7 @@ client.indices.analyze(...) ==== clear_cache Clears all or specific caches for one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html[Endpoint documentation] [source,ts] ---- client.indices.clearCache(...) @@ -1384,7 +1382,7 @@ client.indices.clearCache(...) ==== clone Clones an index -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html[Endpoint documentation] [source,ts] ---- client.indices.clone(...) @@ -1394,7 +1392,7 @@ client.indices.clone(...) ==== close Closes an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html[Endpoint documentation] [source,ts] ---- client.indices.close(...) @@ -1404,7 +1402,7 @@ client.indices.close(...) ==== create Creates an index with optional settings and mappings. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html[Endpoint documentation] [source,ts] ---- client.indices.create(...) @@ -1414,7 +1412,7 @@ client.indices.create(...) ==== create_data_stream Creates a data stream -https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.createDataStream(...) @@ -1424,7 +1422,7 @@ client.indices.createDataStream(...) ==== data_streams_stats Provides statistics on operations happening in a data stream. -https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.dataStreamsStats(...) @@ -1434,7 +1432,7 @@ client.indices.dataStreamsStats(...) ==== delete Deletes an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html[Endpoint documentation] [source,ts] ---- client.indices.delete(...) @@ -1444,7 +1442,7 @@ client.indices.delete(...) ==== delete_alias Deletes an alias. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.deleteAlias(...) @@ -1454,7 +1452,7 @@ client.indices.deleteAlias(...) ==== delete_data_stream Deletes a data stream. -https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.deleteDataStream(...) @@ -1464,7 +1462,7 @@ client.indices.deleteDataStream(...) ==== delete_index_template Deletes an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.deleteIndexTemplate(...) @@ -1474,7 +1472,7 @@ client.indices.deleteIndexTemplate(...) ==== delete_template Deletes an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.deleteTemplate(...) @@ -1484,7 +1482,7 @@ client.indices.deleteTemplate(...) ==== disk_usage Analyzes the disk usage of each field of an index or data stream -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-disk-usage.html[Endpoint documentation] [source,ts] ---- client.indices.diskUsage(...) @@ -1504,7 +1502,7 @@ client.indices.downsample(...) ==== exists Returns information about whether a particular index exists. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html[Endpoint documentation] [source,ts] ---- client.indices.exists(...) @@ -1514,7 +1512,7 @@ client.indices.exists(...) ==== exists_alias Returns information about whether a particular alias exists. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.existsAlias(...) @@ -1524,7 +1522,7 @@ client.indices.existsAlias(...) ==== exists_index_template Returns information about whether a particular index template exists. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.existsIndexTemplate(...) @@ -1534,7 +1532,7 @@ client.indices.existsIndexTemplate(...) ==== exists_template Returns information about whether a particular index template exists. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.existsTemplate(...) @@ -1544,7 +1542,7 @@ client.indices.existsTemplate(...) ==== field_usage_stats Returns the field usage stats for each field of an index -https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/field-usage-stats.html[Endpoint documentation] [source,ts] ---- client.indices.fieldUsageStats(...) @@ -1554,7 +1552,7 @@ client.indices.fieldUsageStats(...) ==== flush Performs the flush operation on one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html[Endpoint documentation] [source,ts] ---- client.indices.flush(...) @@ -1564,7 +1562,7 @@ client.indices.flush(...) ==== forcemerge Performs the force merge operation on one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html[Endpoint documentation] [source,ts] ---- client.indices.forcemerge(...) @@ -1574,7 +1572,7 @@ client.indices.forcemerge(...) ==== get Returns information about one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html[Endpoint documentation] [source,ts] ---- client.indices.get(...) @@ -1584,7 +1582,7 @@ client.indices.get(...) ==== get_alias Returns an alias. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.getAlias(...) @@ -1594,7 +1592,7 @@ client.indices.getAlias(...) ==== get_data_stream Returns data streams. -https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.getDataStream(...) @@ -1604,7 +1602,7 @@ client.indices.getDataStream(...) ==== get_field_mapping Returns mapping for one or more fields. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html[Endpoint documentation] [source,ts] ---- client.indices.getFieldMapping(...) @@ -1614,7 +1612,7 @@ client.indices.getFieldMapping(...) ==== get_index_template Returns an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.getIndexTemplate(...) @@ -1624,7 +1622,7 @@ client.indices.getIndexTemplate(...) ==== get_mapping Returns mappings for one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html[Endpoint documentation] [source,ts] ---- client.indices.getMapping(...) @@ -1634,7 +1632,7 @@ client.indices.getMapping(...) ==== get_settings Returns settings for one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html[Endpoint documentation] [source,ts] ---- client.indices.getSettings(...) @@ -1644,7 +1642,7 @@ client.indices.getSettings(...) ==== get_template Returns an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.getTemplate(...) @@ -1654,7 +1652,7 @@ client.indices.getTemplate(...) ==== migrate_to_data_stream Migrates an alias to a data stream -https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.migrateToDataStream(...) @@ -1664,7 +1662,7 @@ client.indices.migrateToDataStream(...) ==== modify_data_stream Modifies a data stream -https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.modifyDataStream(...) @@ -1674,7 +1672,7 @@ client.indices.modifyDataStream(...) ==== open Opens an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html[Endpoint documentation] [source,ts] ---- client.indices.open(...) @@ -1684,7 +1682,7 @@ client.indices.open(...) ==== promote_data_stream Promotes a data stream from a replicated data stream managed by CCR to a regular data stream -https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.promoteDataStream(...) @@ -1694,7 +1692,7 @@ client.indices.promoteDataStream(...) ==== put_alias Creates or updates an alias. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.putAlias(...) @@ -1704,7 +1702,7 @@ client.indices.putAlias(...) ==== put_index_template Creates or updates an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.putIndexTemplate(...) @@ -1714,7 +1712,7 @@ client.indices.putIndexTemplate(...) ==== put_mapping Updates the index mappings. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html[Endpoint documentation] [source,ts] ---- client.indices.putMapping(...) @@ -1724,7 +1722,7 @@ client.indices.putMapping(...) ==== put_settings Updates the index settings. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html[Endpoint documentation] [source,ts] ---- client.indices.putSettings(...) @@ -1734,7 +1732,7 @@ client.indices.putSettings(...) ==== put_template Creates or updates an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.putTemplate(...) @@ -1744,7 +1742,7 @@ client.indices.putTemplate(...) ==== recovery Returns information about ongoing index shard recoveries. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html[Endpoint documentation] [source,ts] ---- client.indices.recovery(...) @@ -1754,7 +1752,7 @@ client.indices.recovery(...) ==== refresh Performs the refresh operation in one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html[Endpoint documentation] [source,ts] ---- client.indices.refresh(...) @@ -1764,7 +1762,7 @@ client.indices.refresh(...) ==== reload_search_analyzers Reloads an index's search analyzers and their resources. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-reload-analyzers.html[Endpoint documentation] [source,ts] ---- client.indices.reloadSearchAnalyzers(...) @@ -1774,7 +1772,7 @@ client.indices.reloadSearchAnalyzers(...) ==== resolve_index Returns information about any matching indices, aliases, and data streams -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-index-api.html[Endpoint documentation] [source,ts] ---- client.indices.resolveIndex(...) @@ -1785,7 +1783,7 @@ client.indices.resolveIndex(...) Updates an alias to point to a new index when the existing index is considered to be too large or too old. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html[Endpoint documentation] [source,ts] ---- client.indices.rollover(...) @@ -1795,7 +1793,7 @@ client.indices.rollover(...) ==== segments Provides low-level information about segments in a Lucene index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html[Endpoint documentation] [source,ts] ---- client.indices.segments(...) @@ -1805,7 +1803,7 @@ client.indices.segments(...) ==== shard_stores Provides store information for shard copies of indices. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html[Endpoint documentation] [source,ts] ---- client.indices.shardStores(...) @@ -1815,7 +1813,7 @@ client.indices.shardStores(...) ==== shrink Allow to shrink an existing index into a new index with fewer primary shards. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html[Endpoint documentation] [source,ts] ---- client.indices.shrink(...) @@ -1825,7 +1823,7 @@ client.indices.shrink(...) ==== simulate_index_template Simulate matching the given index name against the index templates in the system -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.simulateIndexTemplate(...) @@ -1835,7 +1833,7 @@ client.indices.simulateIndexTemplate(...) ==== simulate_template Simulate resolving the given template name or body -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.simulateTemplate(...) @@ -1845,7 +1843,7 @@ client.indices.simulateTemplate(...) ==== split Allows you to split an existing index into a new index with more primary shards. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html[Endpoint documentation] [source,ts] ---- client.indices.split(...) @@ -1855,7 +1853,7 @@ client.indices.split(...) ==== stats Provides statistics on operations happening in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html[Endpoint documentation] [source,ts] ---- client.indices.stats(...) @@ -1875,7 +1873,7 @@ client.indices.unfreeze(...) ==== update_aliases Updates index aliases. -https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.updateAliases(...) @@ -1885,7 +1883,7 @@ client.indices.updateAliases(...) ==== validate_query Allows a user to validate a potentially expensive query without executing it. -https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/search-validate.html[Endpoint documentation] [source,ts] ---- client.indices.validateQuery(...) @@ -1897,7 +1895,7 @@ client.indices.validateQuery(...) ==== delete_pipeline Deletes a pipeline. -https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.deletePipeline(...) @@ -1907,7 +1905,7 @@ client.ingest.deletePipeline(...) ==== geo_ip_stats Returns statistical information about geoip databases -https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-stats-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-stats-api.html[Endpoint documentation] [source,ts] ---- client.ingest.geoIpStats(...) @@ -1917,7 +1915,7 @@ client.ingest.geoIpStats(...) ==== get_pipeline Returns a pipeline. -https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.getPipeline(...) @@ -1927,7 +1925,7 @@ client.ingest.getPipeline(...) ==== processor_grok Returns a list of the built-in patterns. -https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html#grok-processor-rest-get[Endpoint documentation] [source,ts] ---- client.ingest.processorGrok(...) @@ -1937,7 +1935,7 @@ client.ingest.processorGrok(...) ==== put_pipeline Creates or updates a pipeline. -https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/put-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.putPipeline(...) @@ -1947,7 +1945,7 @@ client.ingest.putPipeline(...) ==== simulate Allows to simulate a pipeline with example documents. -https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.simulate(...) @@ -1959,7 +1957,7 @@ client.ingest.simulate(...) ==== delete Deletes licensing information for the cluster -https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-license.html[Endpoint documentation] [source,ts] ---- client.license.delete(...) @@ -1969,7 +1967,7 @@ client.license.delete(...) ==== get Retrieves licensing information for the cluster -https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-license.html[Endpoint documentation] [source,ts] ---- client.license.get(...) @@ -1979,7 +1977,7 @@ client.license.get(...) ==== get_basic_status Retrieves information about the status of the basic license. -https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html[Endpoint documentation] [source,ts] ---- client.license.getBasicStatus(...) @@ -1989,7 +1987,7 @@ client.license.getBasicStatus(...) ==== get_trial_status Retrieves information about the status of the trial license. -https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trial-status.html[Endpoint documentation] [source,ts] ---- client.license.getTrialStatus(...) @@ -1999,7 +1997,7 @@ client.license.getTrialStatus(...) ==== post Updates the license for the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/update-license.html[Endpoint documentation] [source,ts] ---- client.license.post(...) @@ -2009,7 +2007,7 @@ client.license.post(...) ==== post_start_basic Starts an indefinite basic license. -https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html[Endpoint documentation] [source,ts] ---- client.license.postStartBasic(...) @@ -2019,7 +2017,7 @@ client.license.postStartBasic(...) ==== post_start_trial starts a limited time trial license. -https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trial.html[Endpoint documentation] [source,ts] ---- client.license.postStartTrial(...) @@ -2094,8 +2092,6 @@ client.migration.postFeatureUpgrade(...) [discrete] ==== clear_trained_model_deployment_cache Clear the cached results from a trained model deployment - -https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html[Endpoint documentation] [source,ts] ---- client.ml.clearTrainedModelDeploymentCache(...) @@ -2475,7 +2471,7 @@ client.ml.getTrainedModelsStats(...) ==== infer_trained_model Evaluate a trained model. -https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html[Endpoint documentation] [source,ts] ---- client.ml.inferTrainedModel(...) @@ -2695,7 +2691,7 @@ client.ml.startDatafeed(...) ==== start_trained_model_deployment Start a trained model deployment. -https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- client.ml.startTrainedModelDeployment(...) @@ -2725,7 +2721,7 @@ client.ml.stopDatafeed(...) ==== stop_trained_model_deployment Stop a trained model deployment. -https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- client.ml.stopTrainedModelDeployment(...) @@ -2837,7 +2833,7 @@ client.nodes.info(...) ==== reload_secure_settings Reloads secure settings. -https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings[Endpoint documentation] [source,ts] ---- client.nodes.reloadSecureSettings(...) @@ -2869,7 +2865,7 @@ client.nodes.usage(...) ==== delete_job Deletes an existing rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-delete-job.html[Endpoint documentation] [source,ts] ---- client.rollup.deleteJob(...) @@ -2879,7 +2875,7 @@ client.rollup.deleteJob(...) ==== get_jobs Retrieves the configuration, stats, and status of rollup jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-job.html[Endpoint documentation] [source,ts] ---- client.rollup.getJobs(...) @@ -2889,7 +2885,7 @@ client.rollup.getJobs(...) ==== get_rollup_caps Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-caps.html[Endpoint documentation] [source,ts] ---- client.rollup.getRollupCaps(...) @@ -2899,7 +2895,7 @@ client.rollup.getRollupCaps(...) ==== get_rollup_index_caps Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html[Endpoint documentation] [source,ts] ---- client.rollup.getRollupIndexCaps(...) @@ -2909,7 +2905,7 @@ client.rollup.getRollupIndexCaps(...) ==== put_job Creates a rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-put-job.html[Endpoint documentation] [source,ts] ---- client.rollup.putJob(...) @@ -2919,7 +2915,7 @@ client.rollup.putJob(...) ==== rollup_search Enables searching rolled-up data using the standard query DSL. -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-search.html[Endpoint documentation] [source,ts] ---- client.rollup.rollupSearch(...) @@ -2929,7 +2925,7 @@ client.rollup.rollupSearch(...) ==== start_job Starts an existing, stopped rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-start-job.html[Endpoint documentation] [source,ts] ---- client.rollup.startJob(...) @@ -2939,7 +2935,7 @@ client.rollup.startJob(...) ==== stop_job Stops an existing, started rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-stop-job.html[Endpoint documentation] [source,ts] ---- client.rollup.stopJob(...) @@ -2951,7 +2947,7 @@ client.rollup.stopJob(...) ==== cache_stats Retrieve node-level cache statistics about searchable snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.cacheStats(...) @@ -2961,7 +2957,7 @@ client.searchableSnapshots.cacheStats(...) ==== clear_cache Clear the cache of searchable snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.clearCache(...) @@ -2971,7 +2967,7 @@ client.searchableSnapshots.clearCache(...) ==== mount Mount a snapshot as a searchable index. -https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.mount(...) @@ -2981,7 +2977,7 @@ client.searchableSnapshots.mount(...) ==== stats Retrieve shard-level statistics about searchable snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.stats(...) @@ -3161,7 +3157,7 @@ client.security.enableUser(...) ==== enroll_kibana Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-kibana-enrollment.html[Endpoint documentation] [source,ts] ---- client.security.enrollKibana(...) @@ -3171,7 +3167,7 @@ client.security.enrollKibana(...) ==== enroll_node Allows a new node to enroll to an existing cluster with security enabled. -https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-node-enrollment.html[Endpoint documentation] [source,ts] ---- client.security.enrollNode(...) @@ -3513,7 +3509,7 @@ client.slm.getLifecycle(...) ==== get_stats Returns global and policy-level statistics about actions taken by snapshot lifecycle management. -https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stats.html[Endpoint documentation] [source,ts] ---- client.slm.getStats(...) @@ -3565,7 +3561,7 @@ client.slm.stop(...) ==== cleanup_repository Removes stale data from repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/clean-up-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.cleanupRepository(...) @@ -3575,7 +3571,7 @@ client.snapshot.cleanupRepository(...) ==== clone Clones indices from one snapshot into another snapshot in the same repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.clone(...) @@ -3585,7 +3581,7 @@ client.snapshot.clone(...) ==== create Creates a snapshot in a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.create(...) @@ -3595,7 +3591,7 @@ client.snapshot.create(...) ==== create_repository Creates a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.createRepository(...) @@ -3605,7 +3601,7 @@ client.snapshot.createRepository(...) ==== delete Deletes one or more snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.delete(...) @@ -3615,7 +3611,7 @@ client.snapshot.delete(...) ==== delete_repository Deletes a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.deleteRepository(...) @@ -3625,7 +3621,7 @@ client.snapshot.deleteRepository(...) ==== get Returns information about a snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.get(...) @@ -3635,7 +3631,7 @@ client.snapshot.get(...) ==== get_repository Returns information about a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.getRepository(...) @@ -3645,7 +3641,7 @@ client.snapshot.getRepository(...) ==== repository_analyze Analyzes a repository for correctness and performance -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.repositoryAnalyze(...) @@ -3655,7 +3651,7 @@ client.snapshot.repositoryAnalyze(...) ==== restore Restores a snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.restore(...) @@ -3665,7 +3661,7 @@ client.snapshot.restore(...) ==== status Returns information about the status of a snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.status(...) @@ -3675,7 +3671,7 @@ client.snapshot.status(...) ==== verify_repository Verifies a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.verifyRepository(...) @@ -3697,7 +3693,7 @@ client.sql.clearCursor(...) ==== delete_async Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. -https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- client.sql.deleteAsync(...) @@ -3707,7 +3703,7 @@ client.sql.deleteAsync(...) ==== get_async Returns the current status and available results for an async SQL search or stored synchronous SQL search -https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- client.sql.getAsync(...) @@ -3717,7 +3713,7 @@ client.sql.getAsync(...) ==== get_async_status Returns the current status of an async SQL search or a stored synchronous SQL search -https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-status-api.html[Endpoint documentation] +https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-status-api.html[Endpoint documentation] [source,ts] ---- client.sql.getAsyncStatus(...) From 38e4b23831e6b6d8e5102cbac6d16c5f475c07a4 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Thu, 3 Nov 2022 11:06:58 -0500 Subject: [PATCH 182/647] Add a changelog for 8.5.0 --- docs/changelog.asciidoc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 61e0913f0..0d1bd8114 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,15 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.5.0 + +[discrete] +===== Support for Elasticsearch `v8.5.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.5/release-notes-8.5.0.html[here]. + [discrete] === 8.4.0 From 096ef47d6a3ced835bab82334d53ecbee5aa1519 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 3 Jan 2023 08:47:38 -0500 Subject: [PATCH 183/647] Bumps main to 8.7.0 --- .ci/test-matrix.yml | 2 +- package.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 0d081a1bf..318b89fa6 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - "8.6.0-SNAPSHOT" + - "8.7.0-SNAPSHOT" NODE_JS_VERSION: - 18 diff --git a/package.json b/package.json index 3ea6cc36d..e4ee53470 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.6.0", - "versionCanary": "8.6.0-canary.0", + "version": "8.7.0", + "versionCanary": "8.7.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 09d802a0ac86729a76541722608a1f0aa18e0227 Mon Sep 17 00:00:00 2001 From: Rudolf Meijering Date: Thu, 5 Jan 2023 16:30:06 +0100 Subject: [PATCH 184/647] Bump `@elastic/transport` to 8.3.1 --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index e4ee53470..49ced928f 100644 --- a/package.json +++ b/package.json @@ -81,7 +81,7 @@ "zx": "^6.1.0" }, "dependencies": { - "@elastic/transport": "^8.2.0", + "@elastic/transport": "^8.3.1", "tslib": "^2.4.0" }, "tap": { @@ -91,4 +91,4 @@ "coverage": false, "check-coverage": false } -} \ No newline at end of file +} From c5da6683f70f8c06669659c97bdd4e64897db5f9 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Tue, 10 Jan 2023 14:14:38 -0600 Subject: [PATCH 185/647] Add release notes for 8.6.0 --- docs/changelog.asciidoc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 0d1bd8114..e4e245616 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,21 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.6.0 + +[discrete] +===== Bump @elastic/transport to 8.3.1+ https://github.com/elastic/elasticsearch-js/pull/1802[#1802] + +The `@elastic/transport` dependency has been bumped to `~8.3.1` to ensure +fixes to the `maxResponseSize` option are available in the client. + +[discrete] +===== Support for Elasticsearch `v8.6.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.6/release-notes-8.6.0.html[here]. + [discrete] === 8.5.0 From f90f4306c291865dcc470064b699fdc46a0ce7d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Laurent=20Saint-F=C3=A9lix?= Date: Thu, 12 Jan 2023 14:52:29 +0100 Subject: [PATCH 186/647] Update bulk.asciidoc (#1752) (#1806) Fix typo Co-authored-by: Ryan Har <31252286+nkwwk@users.noreply.github.com> --- docs/examples/bulk.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc index c6117c249..8d470a305 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/examples/bulk.asciidoc @@ -77,8 +77,8 @@ async function run () { // fix the document before to try it again. status: action[operation].status, error: action[operation].error, - operation: body[i * 2], - document: body[i * 2 + 1] + operation: operations[i * 2], + document: operations[i * 2 + 1] }) } }) From 43ecba4dfa2b3ea681dffa33cba5a8f3dfc76f07 Mon Sep 17 00:00:00 2001 From: Fernando Briano Date: Thu, 9 Feb 2023 07:31:57 +0000 Subject: [PATCH 187/647] Add Buildkite skeleton (#1812) --- .buildkite/pipeline.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .buildkite/pipeline.yml diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml new file mode 100644 index 000000000..b83edb8c3 --- /dev/null +++ b/.buildkite/pipeline.yml @@ -0,0 +1,3 @@ +steps: + - label: ":js: Greetings" + command: "echo 'Hello, world!'" From 948f44a7a0dce274850c768dae7dbb898dbcd988 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 21 Feb 2023 17:34:38 +0100 Subject: [PATCH 188/647] [DOCS] Includes source_branch in docs index --- docs/index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.asciidoc b/docs/index.asciidoc index f57b2f8d5..2de0d54a7 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -1,6 +1,6 @@ = Elasticsearch JavaScript Client -:branch: master +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] include::introduction.asciidoc[] From ce37b0fdf154a0b49fde9e7a5a77b77373ea015b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 29 Mar 2023 19:00:45 +0200 Subject: [PATCH 189/647] [DOCS] Creates custom landing page for the JS client book (#1831 * [DOCS] Creates custom landing page for the JS client book. * [DOCS] Fine-tunes text. * [DOCS] Updates hero image. * [DOCS] Changes page file name. * [DOCS] Adds one-liner to hero. * [DOCS] Reduces padding. * [DOCS] Changes section title. * [DOCS] Fine-tunes text. --- docs/index-custom-title-page.html | 185 ++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 docs/index-custom-title-page.html diff --git a/docs/index-custom-title-page.html b/docs/index-custom-title-page.html new file mode 100644 index 000000000..b934f8e0a --- /dev/null +++ b/docs/index-custom-title-page.html @@ -0,0 +1,185 @@ + + +
+ +
+
+

+

+

Documentation

+

+

+ The official Node.js client provides one-to-one mapping with Elasticsearch REST APIs. +

+

+ + + +

+
+
+ +
+
+ +

Get to know the JavaScript client

+ + + +
+
+

+ + Using the JS client +

+
+ +
+ +
+
+

+ + API and developer docs +

+
+ +
+ +

Explore by use case

+ + + +

View all Elastic docs

\ No newline at end of file From 996f818b1a96f7e827c03c84f1f3312a93f7de54 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 4 Apr 2023 09:49:07 -0500 Subject: [PATCH 190/647] CI updates (#1835 * Add jobs for testing missing 8.x branches * Drop references to acceptance tests They were removed in 1a227459f096951032b881acce18a01352901096 * Add myself as a contributor :sunglasses: * Clean up shellcheck warnings --- .ci/jobs/elastic+elasticsearch-js+8.3.yml | 15 +++++++++++++++ .ci/jobs/elastic+elasticsearch-js+8.4.yml | 15 +++++++++++++++ .ci/jobs/elastic+elasticsearch-js+8.5.yml | 15 +++++++++++++++ .ci/jobs/elastic+elasticsearch-js+8.6.yml | 15 +++++++++++++++ .ci/jobs/elastic+elasticsearch-js+8.7.yml | 15 +++++++++++++++ .ci/run-repository.sh | 12 ++++++------ .ci/run-tests | 8 ++++---- package.json | 23 ++++++++++++++--------- 8 files changed, 99 insertions(+), 19 deletions(-) create mode 100644 .ci/jobs/elastic+elasticsearch-js+8.3.yml create mode 100644 .ci/jobs/elastic+elasticsearch-js+8.4.yml create mode 100644 .ci/jobs/elastic+elasticsearch-js+8.5.yml create mode 100644 .ci/jobs/elastic+elasticsearch-js+8.6.yml create mode 100644 .ci/jobs/elastic+elasticsearch-js+8.7.yml diff --git a/.ci/jobs/elastic+elasticsearch-js+8.3.yml b/.ci/jobs/elastic+elasticsearch-js+8.3.yml new file mode 100644 index 000000000..43362fc3c --- /dev/null +++ b/.ci/jobs/elastic+elasticsearch-js+8.3.yml @@ -0,0 +1,15 @@ +--- +- job: + name: elastic+elasticsearch-js+8.3 + display-name: 'elastic / elasticsearch-js # 8.3' + description: Testing the elasticsearch-js 8.3 branch. + junit_results: "*-junit.xml" + parameters: + - string: + name: branch_specifier + default: refs/heads/8.3 + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + triggers: + - github + - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.4.yml b/.ci/jobs/elastic+elasticsearch-js+8.4.yml new file mode 100644 index 000000000..aa83ef601 --- /dev/null +++ b/.ci/jobs/elastic+elasticsearch-js+8.4.yml @@ -0,0 +1,15 @@ +--- +- job: + name: elastic+elasticsearch-js+8.4 + display-name: 'elastic / elasticsearch-js # 8.3' + description: Testing the elasticsearch-js 8.4 branch. + junit_results: "*-junit.xml" + parameters: + - string: + name: branch_specifier + default: refs/heads/8.4 + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + triggers: + - github + - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.5.yml b/.ci/jobs/elastic+elasticsearch-js+8.5.yml new file mode 100644 index 000000000..def3ba64c --- /dev/null +++ b/.ci/jobs/elastic+elasticsearch-js+8.5.yml @@ -0,0 +1,15 @@ +--- +- job: + name: elastic+elasticsearch-js+8.5 + display-name: 'elastic / elasticsearch-js # 8.5' + description: Testing the elasticsearch-js 8.5 branch. + junit_results: "*-junit.xml" + parameters: + - string: + name: branch_specifier + default: refs/heads/8.5 + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + triggers: + - github + - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.6.yml b/.ci/jobs/elastic+elasticsearch-js+8.6.yml new file mode 100644 index 000000000..b9d63be7d --- /dev/null +++ b/.ci/jobs/elastic+elasticsearch-js+8.6.yml @@ -0,0 +1,15 @@ +--- +- job: + name: elastic+elasticsearch-js+8.6 + display-name: 'elastic / elasticsearch-js # 8.6' + description: Testing the elasticsearch-js 8.6 branch. + junit_results: "*-junit.xml" + parameters: + - string: + name: branch_specifier + default: refs/heads/8.6 + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + triggers: + - github + - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.7.yml b/.ci/jobs/elastic+elasticsearch-js+8.7.yml new file mode 100644 index 000000000..fb6425583 --- /dev/null +++ b/.ci/jobs/elastic+elasticsearch-js+8.7.yml @@ -0,0 +1,15 @@ +--- +- job: + name: elastic+elasticsearch-js+8.7 + display-name: 'elastic / elasticsearch-js # 8.7' + description: Testing the elasticsearch-js 8.7 branch. + junit_results: "*-junit.xml" + parameters: + - string: + name: branch_specifier + default: refs/heads/8.7 + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + triggers: + - github + - timed: 'H */12 * * *' diff --git a/.ci/run-repository.sh b/.ci/run-repository.sh index 9ba13f5df..380d1d899 100755 --- a/.ci/run-repository.sh +++ b/.ci/run-repository.sh @@ -5,8 +5,8 @@ # TEST_SUITE -- which test suite to run: free or platinum # ELASTICSEARCH_URL -- The url at which elasticsearch is reachable, a default is composed based on STACK_VERSION and TEST_SUITE # NODE_JS_VERSION -- node js version (defined in test-matrix.yml, a default is hardcoded here) -script_path=$(dirname $(realpath -s $0)) -source $script_path/functions/imports.sh +script_path=$(dirname "$(realpath -s "$0")") +source "$script_path/functions/imports.sh" set -euo pipefail NODE_JS_VERSION=${NODE_JS_VERSION-16} @@ -24,18 +24,18 @@ echo -e "\033[1m>>>>> Build docker container >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0 docker build \ --file .ci/Dockerfile \ --tag elastic/elasticsearch-js \ - --build-arg NODE_JS_VERSION=${NODE_JS_VERSION} \ + --build-arg NODE_JS_VERSION="${NODE_JS_VERSION}" \ . echo -e "\033[1m>>>>> NPM run test:integration >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" -repo=$(realpath $(dirname $(realpath -s $0))/../) +repo=$(realpath "$(dirname "$(realpath -s "$0")")"/../) docker run \ - --network=${network_name} \ + --network="${network_name}" \ --env "TEST_ES_SERVER=${ELASTICSEARCH_URL}" \ --env "TEST_SUITE=${TEST_SUITE}" \ - --volume $repo:/usr/src/app \ + --volume "$repo:/usr/src/app" \ --volume /usr/src/app/node_modules \ --name elasticsearch-js \ --rm \ diff --git a/.ci/run-tests b/.ci/run-tests index 76bb055a3..a43400f61 100755 --- a/.ci/run-tests +++ b/.ci/run-tests @@ -1,10 +1,10 @@ #!/usr/bin/env bash # # Version 1.1 -# - Moved to .ci folder and seperated out `run-repository.sh` +# - Moved to .ci folder and separated out `run-repository.sh` # - Add `$RUNSCRIPTS` env var for running Elasticsearch dependent products -script_path=$(dirname $(realpath -s $0)) -source $script_path/functions/imports.sh +script_path=$(dirname "$(realpath -s "$0")") +source "$script_path/functions/imports.sh" set -euo pipefail echo -e "\033[1m>>>>> Start [$STACK_VERSION container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" @@ -15,7 +15,7 @@ if [[ -n "$RUNSCRIPTS" ]]; then echo -e "\033[1m>>>>> Running run-$RUNSCRIPT.sh >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" CONTAINER_NAME=${RUNSCRIPT} \ DETACH=true \ - bash .ci/run-${RUNSCRIPT}.sh + bash ".ci/run-${RUNSCRIPT}.sh" done fi diff --git a/package.json b/package.json index 49ced928f..16dc2c8fb 100644 --- a/package.json +++ b/package.json @@ -6,12 +6,11 @@ "main": "index.js", "types": "index.d.ts", "scripts": { - "test": "npm run build && npm run lint && tap test/{unit,acceptance}/{*,**/*}.test.ts", + "test": "npm run build && npm run lint && tap test/unit/{*,**/*}.test.ts", "test:unit": "npm run build && tap test/unit/{*,**/*}.test.ts", - "test:acceptance": "npm run build && tap test/acceptance/*.test.ts", - "test:coverage-100": "npm run build && tap test/{unit,acceptance}/{*,**/*}.test.ts --coverage --100", - "test:coverage-report": "npm run build && tap test/{unit,acceptance}/{*,**/*}.test.ts --coverage && nyc report --reporter=text-lcov > coverage.lcov", - "test:coverage-ui": "npm run build && tap test/{unit,acceptance}/{*,**/*}.test.ts --coverage --coverage-report=html", + "test:coverage-100": "npm run build && tap test/unit/{*,**/*}.test.ts --coverage --100", + "test:coverage-report": "npm run build && tap test/unit/{*,**/*}.test.ts --coverage && nyc report --reporter=text-lcov > coverage.lcov", + "test:coverage-ui": "npm run build && tap test/unit/{*,**/*}.test.ts --coverage --coverage-report=html", "test:integration": "tsc && node test/integration/index.js", "lint": "ts-standard src", "lint:fix": "ts-standard --fix src", @@ -31,10 +30,16 @@ "client", "index" ], - "author": { - "name": "Tomas Della Vedova", - "company": "Elastic BV" - }, + "contributors": [ + { + "name": "Tomas Della Vedova", + "company": "Elastic BV" + }, + { + "name": "Josh Mock", + "company": "Elastic BV" + } + ], "license": "Apache-2.0", "repository": { "type": "git", From 398a8fd0f1e0af12411de03403421256aeadff07 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 4 Apr 2023 09:49:41 -0500 Subject: [PATCH 191/647] Upgrade deprecated Github action steps (#1836) checkout@v2 and setup-node@v1 were deprecated as they run on Node.js v12. --- .github/workflows/nodejs.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index b81f89dec..99514204e 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -13,10 +13,10 @@ jobs: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 + uses: actions/setup-node@v3 with: node-version: ${{ matrix.node-version }} @@ -45,7 +45,7 @@ jobs: # node-version: [12.x, 14.x, 16.x] # steps: - # - uses: actions/checkout@v2 + # - uses: actions/checkout@v3 # - name: Configure sysctl limits # run: | @@ -60,7 +60,7 @@ jobs: # stack-version: 8.0.0-SNAPSHOT # - name: Use Node.js ${{ matrix.node-version }} - # uses: actions/setup-node@v1 + # uses: actions/setup-node@v3 # with: # node-version: ${{ matrix.node-version }} @@ -77,7 +77,7 @@ jobs: # runs-on: ubuntu-latest # steps: - # - uses: actions/checkout@v2 + # - uses: actions/checkout@v3 # - name: Configure sysctl limits # run: | @@ -92,7 +92,7 @@ jobs: # stack-version: 8.0.0-SNAPSHOT # - name: Use Node.js 14.x - # uses: actions/setup-node@v1 + # uses: actions/setup-node@v3 # with: # node-version: 14.x @@ -120,10 +120,10 @@ jobs: # runs-on: ubuntu-latest # steps: - # - uses: actions/checkout@v2 + # - uses: actions/checkout@v3 # - name: Use Node.js 14.x - # uses: actions/setup-node@v1 + # uses: actions/setup-node@v3 # with: # node-version: 14.x @@ -145,10 +145,10 @@ jobs: # node-version: [14.x] # steps: - # - uses: actions/checkout@v2 + # - uses: actions/checkout@v3 # - name: Use Node.js ${{ matrix.node-version }} - # uses: actions/setup-node@v1 + # uses: actions/setup-node@v3 # with: # node-version: ${{ matrix.node-version }} @@ -179,10 +179,10 @@ jobs: node-version: [16.x] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 + uses: actions/setup-node@v3 with: node-version: ${{ matrix.node-version }} From a4ceb47e8d13759135ec3e68062f0f50b55f3d67 Mon Sep 17 00:00:00 2001 From: Nico Kokonas Date: Wed, 5 Apr 2023 10:14:43 -0600 Subject: [PATCH 192/647] Update basic-config.asciidoc (#1832) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [8.6] Bump `@elastic/transport` to 8.3.1 Co-authored-by: Rudolf Meijering * [8.6] Add release notes for 8.6.0 Co-authored-by: Seth Michael Larson * Update bulk.asciidoc (#1752) (#1807) Fix typo Co-authored-by: Ryan Har <31252286+nkwwk@users.noreply.github.com> * [8.6] [DOCS] Includes source_branch in docs index * Update basic-config.asciidoc fix missing comma --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Rudolf Meijering Co-authored-by: Seth Michael Larson Co-authored-by: Laurent Saint-Félix Co-authored-by: Ryan Har <31252286+nkwwk@users.noreply.github.com> Co-authored-by: István Zoltán Szabó --- docs/basic-config.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index 04ca7b1ee..ab288f2b1 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -11,7 +11,7 @@ const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } + auth: { apiKey: 'base64EncodedKey' }, maxRetries: 5, requestTimeout: 60000, sniffOnStart: true @@ -267,4 +267,4 @@ _Default:_ `null` |`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH + _Default:_ `null` -|=== \ No newline at end of file +|=== From 2c277ee2ba7f6d314a88e7932a5b8912e4600fec Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 10 Apr 2023 14:24:14 -0500 Subject: [PATCH 193/647] Fixes to YAML REST integration test suite runner (#1837) * Use more inclusive language * Don't bail on failing tests without --bail * Skip a few more free suite tests * Default to https when running platinum tests * Add make targets for local integration testing * Linter cleanup * Skip some platinum integration tests * Improvements to integration test README * Another free test to skip for now * Continue on non-bail test failure * Output cleanup --- .ci/run-elasticsearch.sh | 8 ++-- Makefile | 12 ++++++ scripts/utils/generateApis.js | 12 +++--- test/integration/README.md | 36 ++++++++++++++---- test/integration/index.js | 70 ++++++++++++++++++++++++++++++----- 5 files changed, 111 insertions(+), 27 deletions(-) create mode 100644 Makefile diff --git a/.ci/run-elasticsearch.sh b/.ci/run-elasticsearch.sh index 3f4e2f1da..2f360ab4f 100755 --- a/.ci/run-elasticsearch.sh +++ b/.ci/run-elasticsearch.sh @@ -22,12 +22,12 @@ # - Use https only when TEST_SUITE is "platinum", when "free" use http # - Set xpack.security.enabled=false for "free" and xpack.security.enabled=true for "platinum" -script_path=$(dirname $(realpath -s $0)) -source $script_path/functions/imports.sh +script_path=$(dirname "$(realpath -s "$0")") +source "$script_path/functions/imports.sh" set -euo pipefail -echo -e "\033[34;1mINFO:\033[0m Take down node if called twice with the same arguments (DETACH=true) or on seperate terminals \033[0m" -cleanup_node $es_node_name +echo -e "\033[34;1mINFO:\033[0m Take down node if called twice with the same arguments (DETACH=true) or on separate terminals \033[0m" +cleanup_node "$es_node_name" master_node_name=${es_node_name} cluster_name=${moniker}${suffix} diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..a374c22db --- /dev/null +++ b/Makefile @@ -0,0 +1,12 @@ +.PHONY: integration-setup +integration-setup: integration-cleanup + DETACH=true .ci/run-elasticsearch.sh + +.PHONY: integration-cleanup +integration-cleanup: + docker stop instance || true + docker volume rm instance-rest-test-data || true + +.PHONY: integration +integration: integration-setup + npm run test:integration diff --git a/scripts/utils/generateApis.js b/scripts/utils/generateApis.js index 53dc1abed..a1dddd063 100644 --- a/scripts/utils/generateApis.js +++ b/scripts/utils/generateApis.js @@ -228,7 +228,7 @@ function generateSingleApi (version, spec, common) { ${genUrlValidation(paths, api)} - let { ${genQueryBlacklist(false)}, ...querystring } = params + let { ${genQueryDenylist(false)}, ...querystring } = params querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) let path = '' @@ -316,20 +316,20 @@ function generateSingleApi (version, spec, common) { }, {}) } - function genQueryBlacklist (addQuotes = true) { + function genQueryDenylist (addQuotes = true) { const toCamelCase = str => { return str[0] === '_' ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) } - const blacklist = ['method', 'body'] + const denylist = ['method', 'body'] parts.forEach(p => { const camelStr = toCamelCase(p) - if (camelStr !== p) blacklist.push(`${camelStr}`) - blacklist.push(`${p}`) + if (camelStr !== p) denylist.push(`${camelStr}`) + denylist.push(`${p}`) }) - return addQuotes ? blacklist.map(q => `'${q}'`) : blacklist + return addQuotes ? denylist.map(q => `'${q}'`) : denylist } function buildPath () { diff --git a/test/integration/README.md b/test/integration/README.md index 0861dd8b9..a52ae2e54 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -5,8 +5,8 @@ Yes. ## Background -Elasticsearch offers its entire API via HTTP REST endpoints. You can find the whole API specification for every version [here](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/api).
-To support different languages at the same time, the Elasticsearch team decided to provide a [YAML specification](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/test) to test every endpoint, body, headers, warning, error and so on.
+Elasticsearch offers its entire API via HTTP REST endpoints. You can find the whole API specification for every version [here](https://github.com/elastic/elasticsearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/api).
+To support different languages at the same time, the Elasticsearch team decided to provide a [YAML specification](https://github.com/elastic/elasticsearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test) to test every endpoint, body, headers, warning, error and so on.
This testing suite uses that specification to generate the test for the specified version of Elasticsearch on the fly. ## Run @@ -20,20 +20,45 @@ Once the Elasticsearch repository has been cloned, the testing suite will connec The specification does not allow the test to be run in parallel, so it might take a while to run the entire testing suite; on my machine, `MacBookPro15,2 core i7 2.7GHz 16GB of RAM` it takes around four minutes. +### Running locally + +If you want to run the integration tests on your development machine, you must have an Elasticsearch instance running first. +A local instance can be spun up in a Docker container by running the [`.ci/run-elasticsearch.sh`](/.ci/run-elasticsearch.sh) script. +This is the same script CI jobs use to run Elasticsearch for integration tests, so your results should be relatively consistent. + +To simplify the process of starting a container, testing, and cleaning up the container, you can run the `make integration` target: + +```sh +# set some parameters +export STACK_VERSION=8.7.0 +export TEST_SUITE=free # can be `free` or `platinum` +make integration +``` + +If Elasticsearch doesn't come up, run `make integration-cleanup` and then `DETACH=false .ci/run-elasticsearch.sh` manually to read the startup logs. + +If you get an error about `vm.max_map_count` being too low, run `sudo sysctl -w vm.max_map_count=262144` to update the setting until the next reboot, or `sudo sysctl -w vm.max_map_count=262144 | sudo tee -a /etc/sysctl.conf` to update the setting permanently. + ### Exit on the first failure -Bu default the suite will run all the test, even if one assertion has failed. If you want to stop the test at the first failure, use the bailout option: + +By default the suite will run all the tests, even if one assertion has failed. If you want to stop the test at the first failure, use the bailout option: + ```sh npm run test:integration -- --bail ``` ### Calculate the code coverage + If you want to calculate the code coverage just run the testing suite with the following parameters, once the test ends, it will open a browser window with the results. + ```sh npm run test:integration -- --cov --coverage-report=html ``` ## How does this thing work? + At first sight, it might seem complicated, but once you understand what the moving parts are, it's quite easy. + 1. Connects to the given Elasticsearch instance 1. Gets the ES version and build hash 1. Checkout to the given hash (and clone the repository if it is not present) @@ -46,7 +71,4 @@ At first sight, it might seem complicated, but once you understand what the movi Inside the `index.js` file, you will find the connection, cloning, reading and parsing part of the test, while inside the `test-runner.js` file you will find the function to handle the assertions. Inside `test-runner.js`, we use a [queue](https://github.com/delvedor/workq) to be sure that everything is run in the correct order. -Checkout the [rest-api-spec readme](https://github.com/elastic/elasticsearch/blob/master/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc) if you want to know more about how the assertions work. - -#### Why are we running the test with the `--harmony` flag? -Because on Node v6 the regex lookbehinds are not supported. +Check out the [rest-api-spec readme](https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc) if you want to know more about how the assertions work. diff --git a/test/integration/index.js b/test/integration/index.js index c794beb6c..5c4addcb7 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -27,6 +27,7 @@ process.on('unhandledRejection', function (err) { const { writeFileSync, readFileSync, readdirSync, statSync } = require('fs') const { join, sep } = require('path') const yaml = require('js-yaml') +const minimist = require('minimist') const ms = require('ms') const { Client } = require('../../index') const { kProductCheck } = require('@elastic/transport/lib/symbols') @@ -42,12 +43,24 @@ const MAX_API_TIME = 1000 * 90 const MAX_FILE_TIME = 1000 * 30 const MAX_TEST_TIME = 1000 * 3 +const options = minimist(process.argv.slice(2), { + boolean: ['bail'] +}) + const freeSkips = { // not supported yet '/free/cluster.desired_nodes/10_basic.yml': ['*'], + + // Cannot find methods on `Internal` object + '/free/cluster.desired_balance/10_basic.yml': ['*'], + '/free/cluster.desired_nodes/20_dry_run.yml': ['*'], + '/free/cluster.prevalidate_node_removal/10_basic.yml': ['*'], + '/free/health/30_feature.yml': ['*'], '/free/health/40_useractions.yml': ['*'], - // the v8 client never sends the scroll_id in querystgring, + '/free/health/40_diagnosis.yml': ['Diagnosis'], + + // the v8 client never sends the scroll_id in querystring, // the way the test is structured causes a security exception 'free/scroll/10_basic.yml': ['Body params override query string'], 'free/scroll/11_clear.yml': [ @@ -56,80 +69,99 @@ const freeSkips = { ], 'free/cat.allocation/10_basic.yml': ['*'], 'free/cat.snapshots/10_basic.yml': ['Test cat snapshots output'], + // TODO: remove this once 'arbitrary_key' is implemented // https://github.com/elastic/elasticsearch/pull/41492 'indices.split/30_copy_settings.yml': ['*'], 'indices.stats/50_disk_usage.yml': ['Disk usage stats'], 'indices.stats/60_field_usage.yml': ['Field usage stats'], + // skipping because we are booting ES with `discovery.type=single-node` // and this test will fail because of this configuration 'nodes.stats/30_discovery.yml': ['*'], + // the expected error is returning a 503, // which triggers a retry and the node to be marked as dead 'search.aggregation/240_max_buckets.yml': ['*'], + // long values and json do not play nicely together 'search.aggregation/40_range.yml': ['Min and max long range bounds'], + // the yaml runner assumes that null means "does not exists", // while null is a valid json value, so the check will fail 'search/320_disallow_queries.yml': ['Test disallow expensive queries'], - 'free/tsdb/90_unsupported_operations.yml': ['noop update'] + 'free/tsdb/90_unsupported_operations.yml': ['noop update'], } -const platinumBlackList = { + +const platinumDenyList = { 'api_key/10_basic.yml': ['Test get api key'], 'api_key/20_query.yml': ['*'], 'api_key/11_invalidation.yml': ['Test invalidate api key by realm name'], 'analytics/histogram.yml': ['Histogram requires values in increasing order'], + // this two test cases are broken, we should // return on those in the future. 'analytics/top_metrics.yml': [ 'sort by keyword field fails', 'sort by string script fails' ], + 'cat.aliases/10_basic.yml': ['Empty cluster'], 'index/10_with_id.yml': ['Index with ID'], 'indices.get_alias/10_basic.yml': ['Get alias against closed indices'], 'indices.get_alias/20_empty.yml': ['Check empty aliases when getting all aliases via /_alias'], 'text_structure/find_structure.yml': ['*'], + // https://github.com/elastic/elasticsearch/pull/39400 'ml/jobs_crud.yml': ['Test put job with id that is already taken'], + // object keys must me strings, and `0.0.toString()` is `0` 'ml/evaluate_data_frame.yml': [ 'Test binary_soft_classifition precision', 'Test binary_soft_classifition recall', 'Test binary_soft_classifition confusion_matrix' ], + // it gets random failures on CI, must investigate 'ml/set_upgrade_mode.yml': [ 'Attempt to open job when upgrade_mode is enabled', 'Setting upgrade mode to disabled from enabled' ], + // The cleanup fails with a index not found when retrieving the jobs 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], + // start should be a string 'ml/jobs_get_result_overall_buckets.yml': ['Test overall buckets given epoch start and end params'], + // this can't happen with the client 'ml/start_data_frame_analytics.yml': ['Test start with inconsistent body/param ids'], 'ml/stop_data_frame_analytics.yml': ['Test stop with inconsistent body/param ids'], 'ml/preview_datafeed.yml': ['*'], + // Investigate why is failing 'ml/inference_crud.yml': ['*'], 'ml/categorization_agg.yml': ['Test categorization aggregation with poor settings'], 'ml/filter_crud.yml': ['*'], + // investigate why this is failing 'monitoring/bulk/10_basic.yml': ['*'], 'monitoring/bulk/20_privileges.yml': ['*'], 'license/20_put_license.yml': ['*'], 'snapshot/10_basic.yml': ['*'], 'snapshot/20_operator_privileges_disabled.yml': ['*'], + // the body is correct, but the regex is failing 'sql/sql.yml': ['Getting textual representation'], 'searchable_snapshots/10_usage.yml': ['*'], 'service_accounts/10_basic.yml': ['*'], + // we are setting two certificates in the docker config 'ssl/10_basic.yml': ['*'], 'token/10_basic.yml': ['*'], 'token/11_invalidation.yml': ['*'], + // very likely, the index template has not been loaded yet. // we should run a indices.existsTemplate, but the name of the // template may vary during time. @@ -147,16 +179,20 @@ const platinumBlackList = { 'transforms_stats.yml': ['*'], 'transforms_stats_continuous.yml': ['*'], 'transforms_update.yml': ['*'], + // js does not support ulongs 'unsigned_long/10_basic.yml': ['*'], 'unsigned_long/20_null_value.yml': ['*'], 'unsigned_long/30_multi_fields.yml': ['*'], 'unsigned_long/40_different_numeric.yml': ['*'], 'unsigned_long/50_script_values.yml': ['*'], + // the v8 client flattens the body into the parent object 'platinum/users/10_basic.yml': ['Test put user with different username in body'], + // docker issue? 'watcher/execute_watch/60_http_input.yml': ['*'], + // the checks are correct, but for some reason the test is failing on js side // I bet is because the backslashes in the rg 'watcher/execute_watch/70_invalid.yml': ['*'], @@ -170,8 +206,16 @@ const platinumBlackList = { 'platinum/ml/delete_job_force.yml': ['Test force delete an open job that is referred by a started datafeed'], 'platinum/ml/evaluate_data_frame.yml': ['*'], 'platinum/ml/get_datafeed_stats.yml': ['*'], + // start should be a string in the yaml test - 'platinum/ml/start_stop_datafeed.yml': ['*'] + 'platinum/ml/start_stop_datafeed.yml': ['*'], + + // health API not yet supported + '/platinum/health/10_usage.yml': ['*'], + + // ML update_trained_model_deployment not supported yet + '/platinum/ml/3rd_party_deployment.yml': ['Test update deployment'], + '/platinum/ml/update_trained_model_deployment.yml': ['Test with unknown model id'] } function runner (opts = {}) { @@ -316,7 +360,12 @@ async function start ({ client, isXPack }) { junitTestSuites.end() generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') console.error(err) - process.exit(1) + + if (options.bail) { + process.exit(1) + } else { + continue + } } const totalTestTime = now() - testTime junitTestCase.end() @@ -380,7 +429,8 @@ function generateJunitXmlReport (junit, suite) { } if (require.main === module) { - const node = process.env.TEST_ES_SERVER || 'http://elastic:changeme@localhost:9200/' + const scheme = process.env.TEST_SUITE === 'platinum' ? 'https' : 'http' + const node = process.env.TEST_ES_SERVER || `${scheme}://elastic:changeme@localhost:9200` const opts = { node, isXPack: process.env.TEST_SUITE !== 'free' @@ -395,20 +445,20 @@ const shouldSkip = (isXPack, file, name) => { for (let j = 0; j < freeTest.length; j++) { if (file.endsWith(list[i]) && (name === freeTest[j] || freeTest[j] === '*')) { const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because is blacklisted in the free test`) + log(`Skipping test ${testName} because it is denylisted in the free test suite`) return true } } } if (file.includes('x-pack') || isXPack) { - list = Object.keys(platinumBlackList) + list = Object.keys(platinumDenyList) for (let i = 0; i < list.length; i++) { - const platTest = platinumBlackList[list[i]] + const platTest = platinumDenyList[list[i]] for (let j = 0; j < platTest.length; j++) { if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) { const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because is blacklisted in the platinum test`) + log(`Skipping test ${testName} because it is denylisted in the platinum test suite`) return true } } From c82005855fa72bc37c423fca4a2c6af3eea60aa2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 13 Apr 2023 11:26:29 -0500 Subject: [PATCH 194/647] Integration test cleanup (#1841 * Fix bad sysctl command in README * Add --suite and --test flags to integration tests So we can run a single suite or a single test without having to edit any code. * Drop several skipped integration tests Many of these skips are no longer necessary. Didn't do an exhaustive check of all skipped tests, so this is just a start. * Simplify cleanup make target --- Makefile | 3 +-- test/integration/README.md | 2 +- test/integration/index.js | 52 +++++++++----------------------------- 3 files changed, 14 insertions(+), 43 deletions(-) diff --git a/Makefile b/Makefile index a374c22db..be93e1de9 100644 --- a/Makefile +++ b/Makefile @@ -4,8 +4,7 @@ integration-setup: integration-cleanup .PHONY: integration-cleanup integration-cleanup: - docker stop instance || true - docker volume rm instance-rest-test-data || true + docker container rm --force --volumes instance || true .PHONY: integration integration: integration-setup diff --git a/test/integration/README.md b/test/integration/README.md index a52ae2e54..36b011975 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -37,7 +37,7 @@ make integration If Elasticsearch doesn't come up, run `make integration-cleanup` and then `DETACH=false .ci/run-elasticsearch.sh` manually to read the startup logs. -If you get an error about `vm.max_map_count` being too low, run `sudo sysctl -w vm.max_map_count=262144` to update the setting until the next reboot, or `sudo sysctl -w vm.max_map_count=262144 | sudo tee -a /etc/sysctl.conf` to update the setting permanently. +If you get an error about `vm.max_map_count` being too low, run `sudo sysctl -w vm.max_map_count=262144` to update the setting until the next reboot, or `sudo sysctl -w vm.max_map_count=262144; echo 'vm.max_map_count=262144' | sudo tee -a /etc/sysctl.conf` to update the setting permanently. ### Exit on the first failure diff --git a/test/integration/index.js b/test/integration/index.js index 5c4addcb7..5f92eee21 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -30,7 +30,6 @@ const yaml = require('js-yaml') const minimist = require('minimist') const ms = require('ms') const { Client } = require('../../index') -const { kProductCheck } = require('@elastic/transport/lib/symbols') const build = require('./test-runner') const { sleep } = require('./helper') const createJunitReporter = require('./reporter') @@ -44,7 +43,8 @@ const MAX_FILE_TIME = 1000 * 30 const MAX_TEST_TIME = 1000 * 3 const options = minimist(process.argv.slice(2), { - boolean: ['bail'] + boolean: ['bail'], + string: ['suite', 'test'], }) const freeSkips = { @@ -56,10 +56,6 @@ const freeSkips = { '/free/cluster.desired_nodes/20_dry_run.yml': ['*'], '/free/cluster.prevalidate_node_removal/10_basic.yml': ['*'], - '/free/health/30_feature.yml': ['*'], - '/free/health/40_useractions.yml': ['*'], - '/free/health/40_diagnosis.yml': ['Diagnosis'], - // the v8 client never sends the scroll_id in querystring, // the way the test is structured causes a security exception 'free/scroll/10_basic.yml': ['Body params override query string'], @@ -70,9 +66,6 @@ const freeSkips = { 'free/cat.allocation/10_basic.yml': ['*'], 'free/cat.snapshots/10_basic.yml': ['Test cat snapshots output'], - // TODO: remove this once 'arbitrary_key' is implemented - // https://github.com/elastic/elasticsearch/pull/41492 - 'indices.split/30_copy_settings.yml': ['*'], 'indices.stats/50_disk_usage.yml': ['Disk usage stats'], 'indices.stats/60_field_usage.yml': ['Field usage stats'], @@ -99,22 +92,6 @@ const platinumDenyList = { 'api_key/11_invalidation.yml': ['Test invalidate api key by realm name'], 'analytics/histogram.yml': ['Histogram requires values in increasing order'], - // this two test cases are broken, we should - // return on those in the future. - 'analytics/top_metrics.yml': [ - 'sort by keyword field fails', - 'sort by string script fails' - ], - - 'cat.aliases/10_basic.yml': ['Empty cluster'], - 'index/10_with_id.yml': ['Index with ID'], - 'indices.get_alias/10_basic.yml': ['Get alias against closed indices'], - 'indices.get_alias/20_empty.yml': ['Check empty aliases when getting all aliases via /_alias'], - 'text_structure/find_structure.yml': ['*'], - - // https://github.com/elastic/elasticsearch/pull/39400 - 'ml/jobs_crud.yml': ['Test put job with id that is already taken'], - // object keys must me strings, and `0.0.toString()` is `0` 'ml/evaluate_data_frame.yml': [ 'Test binary_soft_classifition precision', @@ -122,12 +99,6 @@ const platinumDenyList = { 'Test binary_soft_classifition confusion_matrix' ], - // it gets random failures on CI, must investigate - 'ml/set_upgrade_mode.yml': [ - 'Attempt to open job when upgrade_mode is enabled', - 'Setting upgrade mode to disabled from enabled' - ], - // The cleanup fails with a index not found when retrieving the jobs 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], @@ -209,13 +180,6 @@ const platinumDenyList = { // start should be a string in the yaml test 'platinum/ml/start_stop_datafeed.yml': ['*'], - - // health API not yet supported - '/platinum/health/10_usage.yml': ['*'], - - // ML update_trained_model_deployment not supported yet - '/platinum/ml/3rd_party_deployment.yml': ['Test update deployment'], - '/platinum/ml/update_trained_model_deployment.yml': ['Test with unknown model id'] } function runner (opts = {}) { @@ -227,8 +191,6 @@ function runner (opts = {}) { } } const client = new Client(options) - // TODO: remove the following line once https://github.com/elastic/elasticsearch/issues/82358 is fixed - client.transport[kProductCheck] = null log('Loading yaml suite') start({ client, isXPack: opts.isXPack }) .catch(err => { @@ -333,13 +295,21 @@ async function start ({ client, isXPack }) { } const cleanPath = file.slice(file.lastIndexOf(apiName)) + + // skip if --suite CLI arg doesn't match + if (options.suite && !cleanPath.endsWith(options.suite)) continue + log(' ' + cleanPath) const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath) for (const test of tests) { const testTime = now() const name = Object.keys(test)[0] + + // skip setups, teardowns and anything that doesn't match --test flag when present if (name === 'setup' || name === 'teardown') continue + if (options.test && !name.endsWith(options.test)) continue + const junitTestCase = junitTestSuite.testcase(name) stats.total += 1 @@ -439,6 +409,8 @@ if (require.main === module) { } const shouldSkip = (isXPack, file, name) => { + if (options.suite || options.test) return false + let list = Object.keys(freeSkips) for (let i = 0; i < list.length; i++) { const freeTest = freeSkips[list[i]] From a927c5c14c86ac369d00dae60a0180a661377465 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 13 Apr 2023 14:52:57 -0500 Subject: [PATCH 195/647] Canary release script cleanup (#1843) * add help messaging to canary release script * Don't require OTP during a dry run * Linter/whitespace cleanup --- scripts/release-canary.js | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/scripts/release-canary.js b/scripts/release-canary.js index bac865393..3afcf3983 100644 --- a/scripts/release-canary.js +++ b/scripts/release-canary.js @@ -22,11 +22,25 @@ const { join } = require('path') const minimist = require('minimist') const chalk = require('chalk') +const helpMessage = `usage: node scripts/release-canary.js [options] + + --otp One-time password (required) + --reset Reset the canary version to 1 + --dry-run Run everything but don't actually publish + -h, --help Show this help message` + async function release (opts) { + if (opts.help) { + console.log(helpMessage) + process.exit(0) + } + assert(process.cwd() !== __dirname, 'You should run the script from the top level directory of the repository') - assert(typeof opts.otp === 'string', 'Missing OTP') - const packageJson = JSON.parse(await readFile(join(__dirname, '..', 'package.json'), 'utf8')) + if (!opts['dry-run']) { + assert(typeof opts.otp === 'string', 'Missing OTP') + } + const packageJson = JSON.parse(await readFile(join(__dirname, '..', 'package.json'), 'utf8')) const originalName = packageJson.name const originalVersion = packageJson.version const currentCanaryVersion = packageJson.versionCanary @@ -52,6 +66,7 @@ async function release (opts) { const diff = execSync('git diff').toString().split('\n').map(colorDiff).join('\n') console.log(diff) const answer = await confirm() + // release on npm with provided otp if (answer) { execSync(`npm publish --otp ${opts.otp} ${opts['dry-run'] ? '--dry-run' : ''}`, { stdio: 'inherit' }) @@ -73,8 +88,8 @@ async function release (opts) { ) } -function confirm (question) { - return new Promise((resolve, reject) => { +function confirm () { + return new Promise((resolve) => { const rl = readline.createInterface({ input: process.stdin, output: process.stdout @@ -110,12 +125,18 @@ release( boolean: [ // Reset the canary version to '1' 'reset', - // run all the steps but publish - 'dry-run' - ] + + // run all the steps but don't publish + 'dry-run', + + // help text + 'help', + ], + alias: { help: 'h' }, }) ) .catch(err => { console.log(err) + console.log('\n' + helpMessage) process.exit(1) }) From 230cb774192d09e89603738ec60ca1a7af5812d4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 19 Apr 2023 13:59:03 -0500 Subject: [PATCH 196/647] Bumps main to 8.7.1 (#1849) --- .ci/test-matrix.yml | 2 +- package.json | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 318b89fa6..6115cdf2c 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - "8.7.0-SNAPSHOT" + - "8.7.1-SNAPSHOT" NODE_JS_VERSION: - 18 diff --git a/package.json b/package.json index 16dc2c8fb..09cc5f0e1 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.7.0", - "versionCanary": "8.7.0-canary.0", + "version": "8.7.1", + "versionCanary": "8.7.1-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", @@ -96,4 +96,4 @@ "coverage": false, "check-coverage": false } -} +} \ No newline at end of file From 0d4a29171abbf63e8bf2ebf8276f90e18866c4ee Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 19 Apr 2023 14:49:09 -0500 Subject: [PATCH 197/647] Bumps main to 8.8.0 (#1851) --- .ci/test-matrix.yml | 2 +- package.json | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index 6115cdf2c..e6a3f19dd 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - "8.7.1-SNAPSHOT" + - "8.8.0-SNAPSHOT" NODE_JS_VERSION: - 18 diff --git a/package.json b/package.json index 09cc5f0e1..e7ce54574 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.7.1", - "versionCanary": "8.7.1-canary.0", + "version": "8.8.0", + "versionCanary": "8.8.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", @@ -96,4 +96,4 @@ "coverage": false, "check-coverage": false } -} \ No newline at end of file +} From 41cc5fa4dd76403069b887d477998d056f2426ea Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 19 Apr 2023 14:53:52 -0500 Subject: [PATCH 198/647] Fixes to make.sh (#1850f * Get make.sh targets running on Node.js v18 * Catch up make.sh to match other clients * Readability tweaks to make.mjs code --- .ci/Dockerfile | 2 +- .ci/make.mjs | 10 +++++---- .ci/make.sh | 59 ++++++++++++++++++++++++++++---------------------- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/.ci/Dockerfile b/.ci/Dockerfile index 81f8ae8f2..c54dd761a 100644 --- a/.ci/Dockerfile +++ b/.ci/Dockerfile @@ -1,4 +1,4 @@ -ARG NODE_JS_VERSION=16 +ARG NODE_JS_VERSION=18 FROM node:${NODE_JS_VERSION} # Create app directory diff --git a/.ci/make.mjs b/.ci/make.mjs index 1695a4869..adcc8e92e 100644 --- a/.ci/make.mjs +++ b/.ci/make.mjs @@ -96,21 +96,23 @@ async function bump (args) { // this command can only be executed locally for now async function codegen (args) { - assert(args.length === 1, 'Bump task expects one parameter') - const clientGeneratorPath = join(import.meta.url, '..', '..', 'elastic-client-generator-js') + assert(args.length === 1, 'Codegen task expects one parameter') const [version] = args + const clientGeneratorPath = join(import.meta.url, '..', '..', 'elastic-client-generator-js') const isGeneratorCloned = await $`[[ -d ${clientGeneratorPath} ]]`.exitCode === 0 assert(isGeneratorCloned, 'You must clone the elastic-client-generator-js first') await $`npm install --prefix ${clientGeneratorPath}` - // this command will take a while! + + // generate elasticsearch client. this command will take a while! if (version === 'main') { await $`npm run elasticsearch --prefix ${clientGeneratorPath} -- --version main` } else { await $`npm run elasticsearch --prefix ${clientGeneratorPath} -- --version ${version.split('.').slice(0, 2).join('.')}` } - await $`npm run lint --prefix ${clientGeneratorPath}` + // clean up fixable linter issues + await $`npm run fix --prefix ${clientGeneratorPath}` await $`rm -rf ${join(import.meta.url, '..', 'src', 'api')}` await $`mkdir ${join(import.meta.url, '..', 'src', 'api')}` diff --git a/.ci/make.sh b/.ci/make.sh index 39755599f..70c6f71e9 100755 --- a/.ci/make.sh +++ b/.ci/make.sh @@ -1,9 +1,7 @@ #!/usr/bin/env bash - # ------------------------------------------------------- # # -# Skeleton for common build entry script for all elastic -# clients. Needs to be adapted to individual client usage. +# Build entry script for elasticsearch-js # # Must be called: ./.ci/make.sh # @@ -11,19 +9,19 @@ # # Targets: # --------------------------- -# assemble : build client artefacts with version -# bump : bump client internals to version -# codegen : generate endpoints -# docsgen : generate documentation -# examplegen : generate the doc examples -# clean : clean workspace +# assemble : build client artifacts with version +# bump : bump client internals to version +# bumpmatrix : bump stack version in test matrix to version +# codegen : generate endpoints +# docsgen : generate documentation +# examplegen : generate the doc examples +# clean : clean workspace # # ------------------------------------------------------- # # ------------------------------------------------------- # # Bootstrap # ------------------------------------------------------- # - script_path=$(dirname "$(realpath -s "$0")") repo=$(realpath "$script_path/../") generator=$(realpath "$script_path/../../elastic-client-generator-js") @@ -34,24 +32,21 @@ TASK=$1 TASK_ARGS=() VERSION=$2 STACK_VERSION=$VERSION -NODE_JS_VERSION=16 -WORKFLOW=${WORKFLOW-staging} set -euo pipefail product="elastic/elasticsearch-js" output_folder=".ci/output" +codegen_folder=".ci/output" OUTPUT_DIR="$repo/${output_folder}" -REPO_BINDING="${OUTPUT_DIR}:/sln/${output_folder}" +# REPO_BINDING="${OUTPUT_DIR}:/sln/${output_folder}" +NODE_JS_VERSION=18 +WORKFLOW=${WORKFLOW-staging} mkdir -p "$OUTPUT_DIR" echo -e "\033[34;1mINFO:\033[0m PRODUCT ${product}\033[0m" echo -e "\033[34;1mINFO:\033[0m VERSION ${STACK_VERSION}\033[0m" echo -e "\033[34;1mINFO:\033[0m OUTPUT_DIR ${OUTPUT_DIR}\033[0m" -# ------------------------------------------------------- # -# Parse Command -# ------------------------------------------------------- # - case $CMD in clean) echo -e "\033[36;1mTARGET: clean workspace $output_folder\033[0m" @@ -104,8 +99,21 @@ case $CMD in # VERSION is BRANCH here for now TASK_ARGS=("$VERSION") ;; + bumpmatrix) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: bumpmatrix -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: bump stack in test matrix to version $VERSION\033[0m" + TASK=bumpmatrix + TASK_ARGS=("$VERSION") + ;; *) - echo -e "\nUsage:\n\t $CMD is not supported right now\n" + echo -e "\n'$CMD' is not supported right now\n" + echo -e "\nUsage:" + echo -e "\t $0 release \$VERSION\n" + echo -e "\t $0 bump \$VERSION" + echo -e "\t $0 codegen \$VERSION" exit 1 esac @@ -118,10 +126,8 @@ echo -e "\033[34;1mINFO: building $product container\033[0m" docker build \ --file .ci/Dockerfile \ - --tag ${product} \ - --build-arg NODE_JS_VERSION=${NODE_JS_VERSION} \ - --build-arg USER_ID="$(id -u)" \ - --build-arg GROUP_ID="$(id -g)" \ + --tag "$product" \ + --build-arg NODE_JS_VERSION="$NODE_JS_VERSION" \ . # ------------------------------------------------------- # @@ -131,14 +137,15 @@ docker build \ echo -e "\033[34;1mINFO: running $product container\033[0m" docker run \ - --volume $repo:/usr/src/app \ - --volume $generator:/usr/src/elastic-client-generator-js \ + --volume "$repo:/usr/src/app" \ + --volume "$generator:/usr/src/elastic-client-generator-js" \ --volume /usr/src/app/node_modules \ - --env "WORKFLOW=${WORKFLOW}" \ + -u "$(id -u):$(id -g)" \ + --env "WORKFLOW=$WORKFLOW" \ --name make-elasticsearch-js \ --rm \ $product \ - node .ci/make.mjs --task $TASK ${TASK_ARGS[*]} + node .ci/make.mjs --task $TASK "${TASK_ARGS[@]}" # ------------------------------------------------------- # # Post Command tasks & checks From 583b80bdfa766eb885956b6e575231769fb33f17 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 1 May 2023 11:14:13 -0500 Subject: [PATCH 199/647] Add CI job for 8.8 branch (#1857) * Add CI automation for 8.8 branch * Drop tests for old minor versions --- .ci/jobs/elastic+elasticsearch-js+8.2.yml | 15 --------------- .ci/jobs/elastic+elasticsearch-js+8.3.yml | 15 --------------- .ci/jobs/elastic+elasticsearch-js+8.4.yml | 15 --------------- ...s+8.1.yml => elastic+elasticsearch-js+8.8.yml} | 8 ++++---- 4 files changed, 4 insertions(+), 49 deletions(-) delete mode 100644 .ci/jobs/elastic+elasticsearch-js+8.2.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+8.3.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+8.4.yml rename .ci/jobs/{elastic+elasticsearch-js+8.1.yml => elastic+elasticsearch-js+8.8.yml} (61%) diff --git a/.ci/jobs/elastic+elasticsearch-js+8.2.yml b/.ci/jobs/elastic+elasticsearch-js+8.2.yml deleted file mode 100644 index 2c389b017..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+8.2.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+8.2 - display-name: 'elastic / elasticsearch-js # 8.2' - description: Testing the elasticsearch-js 8.2 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/8.2 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.3.yml b/.ci/jobs/elastic+elasticsearch-js+8.3.yml deleted file mode 100644 index 43362fc3c..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+8.3.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+8.3 - display-name: 'elastic / elasticsearch-js # 8.3' - description: Testing the elasticsearch-js 8.3 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/8.3 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.4.yml b/.ci/jobs/elastic+elasticsearch-js+8.4.yml deleted file mode 100644 index aa83ef601..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+8.4.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+8.4 - display-name: 'elastic / elasticsearch-js # 8.3' - description: Testing the elasticsearch-js 8.4 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/8.4 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.1.yml b/.ci/jobs/elastic+elasticsearch-js+8.8.yml similarity index 61% rename from .ci/jobs/elastic+elasticsearch-js+8.1.yml rename to .ci/jobs/elastic+elasticsearch-js+8.8.yml index 61e86ad5a..786794f1f 100644 --- a/.ci/jobs/elastic+elasticsearch-js+8.1.yml +++ b/.ci/jobs/elastic+elasticsearch-js+8.8.yml @@ -1,13 +1,13 @@ --- - job: - name: elastic+elasticsearch-js+8.1 - display-name: 'elastic / elasticsearch-js # 8.1' - description: Testing the elasticsearch-js 8.1 branch. + name: elastic+elasticsearch-js+8.8 + display-name: 'elastic / elasticsearch-js # 8.8' + description: Testing the elasticsearch-js 8.8 branch. junit_results: "*-junit.xml" parameters: - string: name: branch_specifier - default: refs/heads/8.1 + default: refs/heads/8.8 description: the Git branch specifier to build (<branchName>, <tagName>, <commitId>, etc.) triggers: From 19d2ee324f51c20ee599d075e07f64face011f92 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 1 May 2023 16:00:56 -0500 Subject: [PATCH 200/647] Update APIs from main (#1856) --- .ci/make.mjs | 2 +- .ci/make.sh | 2 +- docs/reference.asciidoc | 948 ++++++++++++++++++------------ src/api/api/cluster.ts | 2 +- src/api/api/field_caps.ts | 11 +- src/api/api/health_report.ts | 68 +++ src/api/api/indices.ts | 109 ++++ src/api/api/license.ts | 9 +- src/api/api/logstash.ts | 11 +- src/api/api/ml.ts | 38 +- src/api/api/rollup.ts | 4 +- src/api/api/search_application.ts | 268 +++++++++ src/api/api/search_mvt.ts | 2 +- src/api/api/security.ts | 2 +- src/api/api/snapshot.ts | 2 +- src/api/api/transform.ts | 22 + src/api/index.ts | 11 + src/api/types.ts | 885 +++++++++++++++++++++------- src/api/typesWithBodyKey.ts | 889 +++++++++++++++++++++------- 19 files changed, 2467 insertions(+), 818 deletions(-) create mode 100644 src/api/api/health_report.ts create mode 100644 src/api/api/search_application.ts diff --git a/.ci/make.mjs b/.ci/make.mjs index adcc8e92e..0937de9ea 100644 --- a/.ci/make.mjs +++ b/.ci/make.mjs @@ -97,7 +97,7 @@ async function bump (args) { // this command can only be executed locally for now async function codegen (args) { assert(args.length === 1, 'Codegen task expects one parameter') - const [version] = args + const version = args[0].toString() const clientGeneratorPath = join(import.meta.url, '..', '..', 'elastic-client-generator-js') const isGeneratorCloned = await $`[[ -d ${clientGeneratorPath} ]]`.exitCode === 0 diff --git a/.ci/make.sh b/.ci/make.sh index 70c6f71e9..e5bcbd5e9 100755 --- a/.ci/make.sh +++ b/.ci/make.sh @@ -145,7 +145,7 @@ docker run \ --name make-elasticsearch-js \ --rm \ $product \ - node .ci/make.mjs --task $TASK "${TASK_ARGS[@]}" + node .ci/make.mjs --task $TASK ${TASK_ARGS[*]} # ------------------------------------------------------- # # Post Command tasks & checks diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 125236b0e..240a4fa72 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -29,7 +29,7 @@ === bulk Allows to perform multiple index/update/delete operations in a single request. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Endpoint documentation] +{ref}/docs-bulk.html[Endpoint documentation] [source,ts] ---- client.bulk(...) @@ -39,7 +39,7 @@ client.bulk(...) === clear_scroll Explicitly clears the search context for a scroll. -https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html[Endpoint documentation] +{ref}/clear-scroll-api.html[Endpoint documentation] [source,ts] ---- client.clearScroll(...) @@ -49,7 +49,7 @@ client.clearScroll(...) === close_point_in_time Close a point in time -https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html[Endpoint documentation] +{ref}/point-in-time-api.html[Endpoint documentation] [source,ts] ---- client.closePointInTime(...) @@ -59,7 +59,7 @@ client.closePointInTime(...) === count Returns number of documents matching a query. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html[Endpoint documentation] +{ref}/search-count.html[Endpoint documentation] [source,ts] ---- client.count(...) @@ -71,7 +71,7 @@ Creates a new document in the index. Returns a 409 response when a document with a same ID already exists in the index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html[Endpoint documentation] +{ref}/docs-index_.html[Endpoint documentation] [source,ts] ---- client.create(...) @@ -81,7 +81,7 @@ client.create(...) === delete Removes a document from the index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html[Endpoint documentation] +{ref}/docs-delete.html[Endpoint documentation] [source,ts] ---- client.delete(...) @@ -91,7 +91,7 @@ client.delete(...) === delete_by_query Deletes documents matching the provided query. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html[Endpoint documentation] +{ref}/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- client.deleteByQuery(...) @@ -101,7 +101,7 @@ client.deleteByQuery(...) === delete_by_query_rethrottle Changes the number of requests per second for a particular Delete By Query operation. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html[Endpoint documentation] +{ref}/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- client.deleteByQueryRethrottle(...) @@ -111,7 +111,7 @@ client.deleteByQueryRethrottle(...) === delete_script Deletes a script. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] +{ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.deleteScript(...) @@ -121,7 +121,7 @@ client.deleteScript(...) === exists Returns information about whether a document exists in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] +{ref}/docs-get.html[Endpoint documentation] [source,ts] ---- client.exists(...) @@ -131,7 +131,7 @@ client.exists(...) === exists_source Returns information about whether a document source exists in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] +{ref}/docs-get.html[Endpoint documentation] [source,ts] ---- client.existsSource(...) @@ -141,7 +141,7 @@ client.existsSource(...) === explain Returns information about why a specific matches (or doesn't match) a query. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html[Endpoint documentation] +{ref}/search-explain.html[Endpoint documentation] [source,ts] ---- client.explain(...) @@ -151,7 +151,7 @@ client.explain(...) === field_caps Returns the information about the capabilities of fields among multiple indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html[Endpoint documentation] +{ref}/search-field-caps.html[Endpoint documentation] [source,ts] ---- client.fieldCaps(...) @@ -161,7 +161,7 @@ client.fieldCaps(...) === get Returns a document. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] +{ref}/docs-get.html[Endpoint documentation] [source,ts] ---- client.get(...) @@ -171,7 +171,7 @@ client.get(...) === get_script Returns a script. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] +{ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.getScript(...) @@ -181,7 +181,7 @@ client.getScript(...) === get_script_context Returns all script contexts. -https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html[Endpoint documentation] +{painless}/painless-contexts.html[Endpoint documentation] [source,ts] ---- client.getScriptContext(...) @@ -191,7 +191,7 @@ client.getScriptContext(...) === get_script_languages Returns available script types, languages and contexts -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] +{ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.getScriptLanguages(...) @@ -201,17 +201,27 @@ client.getScriptLanguages(...) === get_source Returns the source of a document. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html[Endpoint documentation] +{ref}/docs-get.html[Endpoint documentation] [source,ts] ---- client.getSource(...) ---- +[discrete] +=== health_report +Returns the health of the cluster. + +{ref}/health-api.html[Endpoint documentation] +[source,ts] +---- +client.healthReport(...) +---- + [discrete] === index Creates or updates a document in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html[Endpoint documentation] +{ref}/docs-index_.html[Endpoint documentation] [source,ts] ---- client.index(...) @@ -221,7 +231,7 @@ client.index(...) === info Returns basic information about the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Endpoint documentation] +{ref}/index.html[Endpoint documentation] [source,ts] ---- client.info(...) @@ -231,7 +241,7 @@ client.info(...) === knn_search Performs a kNN search. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html[Endpoint documentation] +{ref}/search-search.html[Endpoint documentation] [source,ts] ---- client.knnSearch(...) @@ -241,7 +251,7 @@ client.knnSearch(...) === mget Allows to get multiple documents in one request. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html[Endpoint documentation] +{ref}/docs-multi-get.html[Endpoint documentation] [source,ts] ---- client.mget(...) @@ -251,7 +261,7 @@ client.mget(...) === msearch Allows to execute several search operations in one request. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html[Endpoint documentation] +{ref}/search-multi-search.html[Endpoint documentation] [source,ts] ---- client.msearch(...) @@ -261,7 +271,7 @@ client.msearch(...) === msearch_template Allows to execute several search template operations in one request. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html[Endpoint documentation] +{ref}/search-multi-search.html[Endpoint documentation] [source,ts] ---- client.msearchTemplate(...) @@ -271,7 +281,7 @@ client.msearchTemplate(...) === mtermvectors Returns multiple termvectors in one request. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html[Endpoint documentation] +{ref}/docs-multi-termvectors.html[Endpoint documentation] [source,ts] ---- client.mtermvectors(...) @@ -281,7 +291,7 @@ client.mtermvectors(...) === open_point_in_time Open a point in time that can be used in subsequent searches -https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html[Endpoint documentation] +{ref}/point-in-time-api.html[Endpoint documentation] [source,ts] ---- client.openPointInTime(...) @@ -291,7 +301,7 @@ client.openPointInTime(...) === ping Returns whether the cluster is running. -https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Endpoint documentation] +{ref}/index.html[Endpoint documentation] [source,ts] ---- client.ping(...) @@ -301,7 +311,7 @@ client.ping(...) === put_script Creates or updates a script. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html[Endpoint documentation] +{ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- client.putScript(...) @@ -311,7 +321,7 @@ client.putScript(...) === rank_eval Allows to evaluate the quality of ranked search results over a set of typical search queries -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html[Endpoint documentation] +{ref}/search-rank-eval.html[Endpoint documentation] [source,ts] ---- client.rankEval(...) @@ -323,7 +333,7 @@ Allows to copy documents from one index to another, optionally filtering the sou documents by a query, changing the destination index settings, or fetching the documents from a remote cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html[Endpoint documentation] +{ref}/docs-reindex.html[Endpoint documentation] [source,ts] ---- client.reindex(...) @@ -333,7 +343,7 @@ client.reindex(...) === reindex_rethrottle Changes the number of requests per second for a particular Reindex operation. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html[Endpoint documentation] +{ref}/docs-reindex.html[Endpoint documentation] [source,ts] ---- client.reindexRethrottle(...) @@ -343,7 +353,7 @@ client.reindexRethrottle(...) === render_search_template Allows to use the Mustache language to pre-render a search definition. -https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html[Endpoint documentation] +{ref}/render-search-template-api.html[Endpoint documentation] [source,ts] ---- client.renderSearchTemplate(...) @@ -353,7 +363,7 @@ client.renderSearchTemplate(...) === scripts_painless_execute Allows an arbitrary script to be executed and a result to be returned -https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html[Endpoint documentation] +{painless}/painless-execute-api.html[Endpoint documentation] [source,ts] ---- client.scriptsPainlessExecute(...) @@ -363,7 +373,7 @@ client.scriptsPainlessExecute(...) === scroll Allows to retrieve a large numbers of results from a single search request. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll[Endpoint documentation] +{ref}/search-request-body.html[Endpoint documentation] [source,ts] ---- client.scroll(...) @@ -373,7 +383,7 @@ client.scroll(...) === search Returns results matching a query. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html[Endpoint documentation] +{ref}/search-search.html[Endpoint documentation] [source,ts] ---- client.search(...) @@ -383,7 +393,7 @@ client.search(...) === search_mvt Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html[Endpoint documentation] +{ref}/search-vector-tile-api.html[Endpoint documentation] [source,ts] ---- client.searchMvt(...) @@ -393,7 +403,7 @@ client.searchMvt(...) === search_shards Returns information about the indices and shards that a search request would be executed against. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html[Endpoint documentation] +{ref}/search-shards.html[Endpoint documentation] [source,ts] ---- client.searchShards(...) @@ -403,7 +413,7 @@ client.searchShards(...) === search_template Allows to use the Mustache language to pre-render a search definition. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html[Endpoint documentation] +{ref}/search-template.html[Endpoint documentation] [source,ts] ---- client.searchTemplate(...) @@ -413,7 +423,7 @@ client.searchTemplate(...) === terms_enum The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html[Endpoint documentation] +{ref}/search-terms-enum.html[Endpoint documentation] [source,ts] ---- client.termsEnum(...) @@ -423,7 +433,7 @@ client.termsEnum(...) === termvectors Returns information and statistics about terms in the fields of a particular document. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html[Endpoint documentation] +{ref}/docs-termvectors.html[Endpoint documentation] [source,ts] ---- client.termvectors(...) @@ -433,7 +443,7 @@ client.termvectors(...) === update Updates a document with a script or partial document. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html[Endpoint documentation] +{ref}/docs-update.html[Endpoint documentation] [source,ts] ---- client.update(...) @@ -444,7 +454,7 @@ client.update(...) Performs an update on every document in the index without changing the source, for example to pick up a mapping change. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html[Endpoint documentation] +{ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- client.updateByQuery(...) @@ -454,7 +464,7 @@ client.updateByQuery(...) === update_by_query_rethrottle Changes the number of requests per second for a particular Update By Query operation. -https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html[Endpoint documentation] +{ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- client.updateByQueryRethrottle(...) @@ -466,7 +476,7 @@ client.updateByQueryRethrottle(...) ==== delete Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. -https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] +{ref}/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.delete(...) @@ -476,7 +486,7 @@ client.asyncSearch.delete(...) ==== get Retrieves the results of a previously submitted async search request given its ID. -https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] +{ref}/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.get(...) @@ -486,7 +496,7 @@ client.asyncSearch.get(...) ==== status Retrieves the status of a previously submitted async search request given its ID. -https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] +{ref}/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.status(...) @@ -496,7 +506,7 @@ client.asyncSearch.status(...) ==== submit Executes a search request asynchronously. -https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html[Endpoint documentation] +{ref}/async-search.html[Endpoint documentation] [source,ts] ---- client.asyncSearch.submit(...) @@ -508,7 +518,7 @@ client.asyncSearch.submit(...) ==== aliases Shows information about currently configured aliases to indices including filter and routing infos. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html[Endpoint documentation] +{ref}/cat-alias.html[Endpoint documentation] [source,ts] ---- client.cat.aliases(...) @@ -518,7 +528,7 @@ client.cat.aliases(...) ==== allocation Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html[Endpoint documentation] +{ref}/cat-allocation.html[Endpoint documentation] [source,ts] ---- client.cat.allocation(...) @@ -527,6 +537,8 @@ client.cat.allocation(...) [discrete] ==== component_templates Returns information about existing component_templates templates. + +{ref}/cat-component-templates.html[Endpoint documentation] [source,ts] ---- client.cat.componentTemplates(...) @@ -536,7 +548,7 @@ client.cat.componentTemplates(...) ==== count Provides quick access to the document count of the entire cluster, or individual indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html[Endpoint documentation] +{ref}/cat-count.html[Endpoint documentation] [source,ts] ---- client.cat.count(...) @@ -546,7 +558,7 @@ client.cat.count(...) ==== fielddata Shows how much heap memory is currently being used by fielddata on every data node in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html[Endpoint documentation] +{ref}/cat-fielddata.html[Endpoint documentation] [source,ts] ---- client.cat.fielddata(...) @@ -556,7 +568,7 @@ client.cat.fielddata(...) ==== health Returns a concise representation of the cluster health. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html[Endpoint documentation] +{ref}/cat-health.html[Endpoint documentation] [source,ts] ---- client.cat.health(...) @@ -566,7 +578,7 @@ client.cat.health(...) ==== help Returns help for the Cat APIs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html[Endpoint documentation] +{ref}/cat.html[Endpoint documentation] [source,ts] ---- client.cat.help(...) @@ -576,7 +588,7 @@ client.cat.help(...) ==== indices Returns information about indices: number of primaries and replicas, document counts, disk size, ... -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html[Endpoint documentation] +{ref}/cat-indices.html[Endpoint documentation] [source,ts] ---- client.cat.indices(...) @@ -585,6 +597,8 @@ client.cat.indices(...) [discrete] ==== master Returns information about the master node. + +{ref}/cat-master.html[Endpoint documentation] [source,ts] ---- client.cat.master(...) @@ -594,7 +608,7 @@ client.cat.master(...) ==== ml_data_frame_analytics Gets configuration and usage information about data frame analytics jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html[Endpoint documentation] +{ref}/cat-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.cat.mlDataFrameAnalytics(...) @@ -604,7 +618,7 @@ client.cat.mlDataFrameAnalytics(...) ==== ml_datafeeds Gets configuration and usage information about datafeeds. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html[Endpoint documentation] +{ref}/cat-datafeeds.html[Endpoint documentation] [source,ts] ---- client.cat.mlDatafeeds(...) @@ -614,7 +628,7 @@ client.cat.mlDatafeeds(...) ==== ml_jobs Gets configuration and usage information about anomaly detection jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html[Endpoint documentation] +{ref}/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] ---- client.cat.mlJobs(...) @@ -624,7 +638,7 @@ client.cat.mlJobs(...) ==== ml_trained_models Gets configuration and usage information about inference trained models. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html[Endpoint documentation] +{ref}/cat-trained-model.html[Endpoint documentation] [source,ts] ---- client.cat.mlTrainedModels(...) @@ -634,7 +648,7 @@ client.cat.mlTrainedModels(...) ==== nodeattrs Returns information about custom node attributes. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html[Endpoint documentation] +{ref}/cat-nodeattrs.html[Endpoint documentation] [source,ts] ---- client.cat.nodeattrs(...) @@ -644,7 +658,7 @@ client.cat.nodeattrs(...) ==== nodes Returns basic statistics about performance of cluster nodes. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html[Endpoint documentation] +{ref}/cat-nodes.html[Endpoint documentation] [source,ts] ---- client.cat.nodes(...) @@ -654,7 +668,7 @@ client.cat.nodes(...) ==== pending_tasks Returns a concise representation of the cluster pending tasks. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html[Endpoint documentation] +{ref}/cat-pending-tasks.html[Endpoint documentation] [source,ts] ---- client.cat.pendingTasks(...) @@ -664,7 +678,7 @@ client.cat.pendingTasks(...) ==== plugins Returns information about installed plugins across nodes node. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html[Endpoint documentation] +{ref}/cat-plugins.html[Endpoint documentation] [source,ts] ---- client.cat.plugins(...) @@ -674,7 +688,7 @@ client.cat.plugins(...) ==== recovery Returns information about index shard recoveries, both on-going completed. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html[Endpoint documentation] +{ref}/cat-recovery.html[Endpoint documentation] [source,ts] ---- client.cat.recovery(...) @@ -684,7 +698,7 @@ client.cat.recovery(...) ==== repositories Returns information about snapshot repositories registered in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html[Endpoint documentation] +{ref}/cat-repositories.html[Endpoint documentation] [source,ts] ---- client.cat.repositories(...) @@ -694,7 +708,7 @@ client.cat.repositories(...) ==== segments Provides low-level information about the segments in the shards of an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html[Endpoint documentation] +{ref}/cat-segments.html[Endpoint documentation] [source,ts] ---- client.cat.segments(...) @@ -704,7 +718,7 @@ client.cat.segments(...) ==== shards Provides a detailed view of shard allocation on nodes. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html[Endpoint documentation] +{ref}/cat-shards.html[Endpoint documentation] [source,ts] ---- client.cat.shards(...) @@ -714,7 +728,7 @@ client.cat.shards(...) ==== snapshots Returns all snapshots in a specific repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html[Endpoint documentation] +{ref}/cat-snapshots.html[Endpoint documentation] [source,ts] ---- client.cat.snapshots(...) @@ -724,7 +738,7 @@ client.cat.snapshots(...) ==== tasks Returns information about the tasks currently executing on one or more nodes in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] +{ref}/tasks.html[Endpoint documentation] [source,ts] ---- client.cat.tasks(...) @@ -734,7 +748,7 @@ client.cat.tasks(...) ==== templates Returns information about existing templates. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html[Endpoint documentation] +{ref}/cat-templates.html[Endpoint documentation] [source,ts] ---- client.cat.templates(...) @@ -745,7 +759,7 @@ client.cat.templates(...) Returns cluster-wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html[Endpoint documentation] +{ref}/cat-thread-pool.html[Endpoint documentation] [source,ts] ---- client.cat.threadPool(...) @@ -755,7 +769,7 @@ client.cat.threadPool(...) ==== transforms Gets configuration and usage information about transforms. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html[Endpoint documentation] +{ref}/cat-transforms.html[Endpoint documentation] [source,ts] ---- client.cat.transforms(...) @@ -767,7 +781,7 @@ client.cat.transforms(...) ==== delete_auto_follow_pattern Deletes auto-follow patterns. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html[Endpoint documentation] +{ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.deleteAutoFollowPattern(...) @@ -777,7 +791,7 @@ client.ccr.deleteAutoFollowPattern(...) ==== follow Creates a new follower index configured to follow the referenced leader index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html[Endpoint documentation] +{ref}/ccr-put-follow.html[Endpoint documentation] [source,ts] ---- client.ccr.follow(...) @@ -787,7 +801,7 @@ client.ccr.follow(...) ==== follow_info Retrieves information about all follower indices, including parameters and status for each follower index -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html[Endpoint documentation] +{ref}/ccr-get-follow-info.html[Endpoint documentation] [source,ts] ---- client.ccr.followInfo(...) @@ -797,7 +811,7 @@ client.ccr.followInfo(...) ==== follow_stats Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html[Endpoint documentation] +{ref}/ccr-get-follow-stats.html[Endpoint documentation] [source,ts] ---- client.ccr.followStats(...) @@ -807,7 +821,7 @@ client.ccr.followStats(...) ==== forget_follower Removes the follower retention leases from the leader. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html[Endpoint documentation] +{ref}/ccr-post-forget-follower.html[Endpoint documentation] [source,ts] ---- client.ccr.forgetFollower(...) @@ -817,7 +831,7 @@ client.ccr.forgetFollower(...) ==== get_auto_follow_pattern Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html[Endpoint documentation] +{ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.getAutoFollowPattern(...) @@ -827,7 +841,7 @@ client.ccr.getAutoFollowPattern(...) ==== pause_auto_follow_pattern Pauses an auto-follow pattern -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html[Endpoint documentation] +{ref}/ccr-pause-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.pauseAutoFollowPattern(...) @@ -837,7 +851,7 @@ client.ccr.pauseAutoFollowPattern(...) ==== pause_follow Pauses a follower index. The follower index will not fetch any additional operations from the leader index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html[Endpoint documentation] +{ref}/ccr-post-pause-follow.html[Endpoint documentation] [source,ts] ---- client.ccr.pauseFollow(...) @@ -847,7 +861,7 @@ client.ccr.pauseFollow(...) ==== put_auto_follow_pattern Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html[Endpoint documentation] +{ref}/ccr-put-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.putAutoFollowPattern(...) @@ -857,7 +871,7 @@ client.ccr.putAutoFollowPattern(...) ==== resume_auto_follow_pattern Resumes an auto-follow pattern that has been paused -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html[Endpoint documentation] +{ref}/ccr-resume-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- client.ccr.resumeAutoFollowPattern(...) @@ -867,7 +881,7 @@ client.ccr.resumeAutoFollowPattern(...) ==== resume_follow Resumes a follower index that has been paused -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html[Endpoint documentation] +{ref}/ccr-post-resume-follow.html[Endpoint documentation] [source,ts] ---- client.ccr.resumeFollow(...) @@ -877,7 +891,7 @@ client.ccr.resumeFollow(...) ==== stats Gets all stats related to cross-cluster replication. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html[Endpoint documentation] +{ref}/ccr-get-stats.html[Endpoint documentation] [source,ts] ---- client.ccr.stats(...) @@ -887,7 +901,7 @@ client.ccr.stats(...) ==== unfollow Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html[Endpoint documentation] +{ref}/ccr-post-unfollow.html[Endpoint documentation] [source,ts] ---- client.ccr.unfollow(...) @@ -899,7 +913,7 @@ client.ccr.unfollow(...) ==== allocation_explain Provides explanations for shard allocations in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html[Endpoint documentation] +{ref}/cluster-allocation-explain.html[Endpoint documentation] [source,ts] ---- client.cluster.allocationExplain(...) @@ -909,7 +923,7 @@ client.cluster.allocationExplain(...) ==== delete_component_template Deletes a component template -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] +{ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.deleteComponentTemplate(...) @@ -919,7 +933,7 @@ client.cluster.deleteComponentTemplate(...) ==== delete_voting_config_exclusions Clears cluster voting config exclusions. -https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html[Endpoint documentation] +{ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] ---- client.cluster.deleteVotingConfigExclusions(...) @@ -929,7 +943,7 @@ client.cluster.deleteVotingConfigExclusions(...) ==== exists_component_template Returns information about whether a particular component template exist -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] +{ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.existsComponentTemplate(...) @@ -939,7 +953,7 @@ client.cluster.existsComponentTemplate(...) ==== get_component_template Returns one or more component templates -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] +{ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.getComponentTemplate(...) @@ -949,7 +963,7 @@ client.cluster.getComponentTemplate(...) ==== get_settings Returns cluster settings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html[Endpoint documentation] +{ref}/cluster-get-settings.html[Endpoint documentation] [source,ts] ---- client.cluster.getSettings(...) @@ -959,7 +973,7 @@ client.cluster.getSettings(...) ==== health Returns basic information about the health of the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html[Endpoint documentation] +{ref}/cluster-health.html[Endpoint documentation] [source,ts] ---- client.cluster.health(...) @@ -970,7 +984,7 @@ client.cluster.health(...) Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html[Endpoint documentation] +{ref}/cluster-pending.html[Endpoint documentation] [source,ts] ---- client.cluster.pendingTasks(...) @@ -980,7 +994,7 @@ client.cluster.pendingTasks(...) ==== post_voting_config_exclusions Updates the cluster voting config exclusions by node ids or node names. -https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html[Endpoint documentation] +{ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] ---- client.cluster.postVotingConfigExclusions(...) @@ -990,7 +1004,7 @@ client.cluster.postVotingConfigExclusions(...) ==== put_component_template Creates or updates a component template -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html[Endpoint documentation] +{ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- client.cluster.putComponentTemplate(...) @@ -1000,7 +1014,7 @@ client.cluster.putComponentTemplate(...) ==== put_settings Updates the cluster settings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html[Endpoint documentation] +{ref}/cluster-update-settings.html[Endpoint documentation] [source,ts] ---- client.cluster.putSettings(...) @@ -1010,7 +1024,7 @@ client.cluster.putSettings(...) ==== remote_info Returns the information about configured remote clusters. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html[Endpoint documentation] +{ref}/cluster-remote-info.html[Endpoint documentation] [source,ts] ---- client.cluster.remoteInfo(...) @@ -1020,7 +1034,7 @@ client.cluster.remoteInfo(...) ==== reroute Allows to manually change the allocation of individual shards in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html[Endpoint documentation] +{ref}/cluster-reroute.html[Endpoint documentation] [source,ts] ---- client.cluster.reroute(...) @@ -1030,7 +1044,7 @@ client.cluster.reroute(...) ==== state Returns a comprehensive information about the state of the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html[Endpoint documentation] +{ref}/cluster-state.html[Endpoint documentation] [source,ts] ---- client.cluster.state(...) @@ -1040,7 +1054,7 @@ client.cluster.state(...) ==== stats Returns high-level overview of cluster statistics. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html[Endpoint documentation] +{ref}/cluster-stats.html[Endpoint documentation] [source,ts] ---- client.cluster.stats(...) @@ -1052,7 +1066,7 @@ client.cluster.stats(...) ==== delete_dangling_index Deletes the specified dangling index -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.deleteDanglingIndex(...) @@ -1062,7 +1076,7 @@ client.danglingIndices.deleteDanglingIndex(...) ==== import_dangling_index Imports the specified dangling index -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.importDanglingIndex(...) @@ -1072,7 +1086,7 @@ client.danglingIndices.importDanglingIndex(...) ==== list_dangling_indices Returns all dangling indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.listDanglingIndices(...) @@ -1084,7 +1098,7 @@ client.danglingIndices.listDanglingIndices(...) ==== delete_policy Deletes an existing enrich policy and its enrich index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html[Endpoint documentation] +{ref}/delete-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- client.enrich.deletePolicy(...) @@ -1094,7 +1108,7 @@ client.enrich.deletePolicy(...) ==== execute_policy Creates the enrich index for an existing enrich policy. -https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html[Endpoint documentation] +{ref}/execute-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- client.enrich.executePolicy(...) @@ -1104,7 +1118,7 @@ client.enrich.executePolicy(...) ==== get_policy Gets information about an enrich policy. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html[Endpoint documentation] +{ref}/get-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- client.enrich.getPolicy(...) @@ -1114,7 +1128,7 @@ client.enrich.getPolicy(...) ==== put_policy Creates a new enrich policy. -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html[Endpoint documentation] +{ref}/put-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- client.enrich.putPolicy(...) @@ -1124,7 +1138,7 @@ client.enrich.putPolicy(...) ==== stats Gets enrich coordinator statistics and information about enrich policies that are currently executing. -https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html[Endpoint documentation] +{ref}/enrich-stats-api.html[Endpoint documentation] [source,ts] ---- client.enrich.stats(...) @@ -1136,7 +1150,7 @@ client.enrich.stats(...) ==== delete Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. -https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] +{ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- client.eql.delete(...) @@ -1146,7 +1160,7 @@ client.eql.delete(...) ==== get Returns async results from previously executed Event Query Language (EQL) search -https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] +{ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- client.eql.get(...) @@ -1156,7 +1170,7 @@ client.eql.get(...) ==== get_status Returns the status of a previously submitted async or stored Event Query Language (EQL) search -https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] +{ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- client.eql.getStatus(...) @@ -1166,7 +1180,7 @@ client.eql.getStatus(...) ==== search Returns results matching a query expressed in Event Query Language (EQL) -https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html[Endpoint documentation] +{ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- client.eql.search(...) @@ -1178,7 +1192,7 @@ client.eql.search(...) ==== get_features Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-features-api.html[Endpoint documentation] +{ref}/get-features-api.html[Endpoint documentation] [source,ts] ---- client.features.getFeatures(...) @@ -1188,7 +1202,7 @@ client.features.getFeatures(...) ==== reset_features Resets the internal state of features, usually by deleting system indices -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.features.resetFeatures(...) @@ -1200,7 +1214,7 @@ client.features.resetFeatures(...) ==== global_checkpoints Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html[Endpoint documentation] +{ref}/get-global-checkpoints.html[Endpoint documentation] [source,ts] ---- client.fleet.globalCheckpoints(...) @@ -1228,7 +1242,7 @@ client.fleet.search(...) ==== explore Explore extracted and summarized information about the documents and terms in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html[Endpoint documentation] +{ref}/graph-explore-api.html[Endpoint documentation] [source,ts] ---- client.graph.explore(...) @@ -1240,7 +1254,7 @@ client.graph.explore(...) ==== delete_lifecycle Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html[Endpoint documentation] +{ref}/ilm-delete-lifecycle.html[Endpoint documentation] [source,ts] ---- client.ilm.deleteLifecycle(...) @@ -1250,7 +1264,7 @@ client.ilm.deleteLifecycle(...) ==== explain_lifecycle Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html[Endpoint documentation] +{ref}/ilm-explain-lifecycle.html[Endpoint documentation] [source,ts] ---- client.ilm.explainLifecycle(...) @@ -1260,7 +1274,7 @@ client.ilm.explainLifecycle(...) ==== get_lifecycle Returns the specified policy definition. Includes the policy version and last modified date. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html[Endpoint documentation] +{ref}/ilm-get-lifecycle.html[Endpoint documentation] [source,ts] ---- client.ilm.getLifecycle(...) @@ -1270,7 +1284,7 @@ client.ilm.getLifecycle(...) ==== get_status Retrieves the current index lifecycle management (ILM) status. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html[Endpoint documentation] +{ref}/ilm-get-status.html[Endpoint documentation] [source,ts] ---- client.ilm.getStatus(...) @@ -1280,7 +1294,7 @@ client.ilm.getStatus(...) ==== migrate_to_data_tiers Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html[Endpoint documentation] +{ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] [source,ts] ---- client.ilm.migrateToDataTiers(...) @@ -1290,7 +1304,7 @@ client.ilm.migrateToDataTiers(...) ==== move_to_step Manually moves an index into the specified step and executes that step. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html[Endpoint documentation] +{ref}/ilm-move-to-step.html[Endpoint documentation] [source,ts] ---- client.ilm.moveToStep(...) @@ -1300,7 +1314,7 @@ client.ilm.moveToStep(...) ==== put_lifecycle Creates a lifecycle policy -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html[Endpoint documentation] +{ref}/ilm-put-lifecycle.html[Endpoint documentation] [source,ts] ---- client.ilm.putLifecycle(...) @@ -1310,7 +1324,7 @@ client.ilm.putLifecycle(...) ==== remove_policy Removes the assigned lifecycle policy and stops managing the specified index -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html[Endpoint documentation] +{ref}/ilm-remove-policy.html[Endpoint documentation] [source,ts] ---- client.ilm.removePolicy(...) @@ -1320,7 +1334,7 @@ client.ilm.removePolicy(...) ==== retry Retries executing the policy for an index that is in the ERROR step. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html[Endpoint documentation] +{ref}/ilm-retry-policy.html[Endpoint documentation] [source,ts] ---- client.ilm.retry(...) @@ -1330,7 +1344,7 @@ client.ilm.retry(...) ==== start Start the index lifecycle management (ILM) plugin. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html[Endpoint documentation] +{ref}/ilm-start.html[Endpoint documentation] [source,ts] ---- client.ilm.start(...) @@ -1340,7 +1354,7 @@ client.ilm.start(...) ==== stop Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin -https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html[Endpoint documentation] +{ref}/ilm-stop.html[Endpoint documentation] [source,ts] ---- client.ilm.stop(...) @@ -1352,7 +1366,7 @@ client.ilm.stop(...) ==== add_block Adds a block to an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html[Endpoint documentation] +{ref}/index-modules-blocks.html[Endpoint documentation] [source,ts] ---- client.indices.addBlock(...) @@ -1362,7 +1376,7 @@ client.indices.addBlock(...) ==== analyze Performs the analysis process on a text and return the tokens breakdown of the text. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html[Endpoint documentation] +{ref}/indices-analyze.html[Endpoint documentation] [source,ts] ---- client.indices.analyze(...) @@ -1372,7 +1386,7 @@ client.indices.analyze(...) ==== clear_cache Clears all or specific caches for one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html[Endpoint documentation] +{ref}/indices-clearcache.html[Endpoint documentation] [source,ts] ---- client.indices.clearCache(...) @@ -1382,7 +1396,7 @@ client.indices.clearCache(...) ==== clone Clones an index -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html[Endpoint documentation] +{ref}/indices-clone-index.html[Endpoint documentation] [source,ts] ---- client.indices.clone(...) @@ -1392,7 +1406,7 @@ client.indices.clone(...) ==== close Closes an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html[Endpoint documentation] +{ref}/indices-open-close.html[Endpoint documentation] [source,ts] ---- client.indices.close(...) @@ -1402,7 +1416,7 @@ client.indices.close(...) ==== create Creates an index with optional settings and mappings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html[Endpoint documentation] +{ref}/indices-create-index.html[Endpoint documentation] [source,ts] ---- client.indices.create(...) @@ -1412,7 +1426,7 @@ client.indices.create(...) ==== create_data_stream Creates a data stream -https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] +{ref}/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.createDataStream(...) @@ -1422,7 +1436,7 @@ client.indices.createDataStream(...) ==== data_streams_stats Provides statistics on operations happening in a data stream. -https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] +{ref}/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.dataStreamsStats(...) @@ -1432,7 +1446,7 @@ client.indices.dataStreamsStats(...) ==== delete Deletes an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html[Endpoint documentation] +{ref}/indices-delete-index.html[Endpoint documentation] [source,ts] ---- client.indices.delete(...) @@ -1442,17 +1456,27 @@ client.indices.delete(...) ==== delete_alias Deletes an alias. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] +{ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.deleteAlias(...) ---- +[discrete] +==== delete_data_lifecycle +Deletes the data lifecycle of the selected data streams. + +{ref}/dlm-delete-lifecycle.html[Endpoint documentation] +[source,ts] +---- +client.indices.deleteDataLifecycle(...) +---- + [discrete] ==== delete_data_stream Deletes a data stream. -https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] +{ref}/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.deleteDataStream(...) @@ -1462,7 +1486,7 @@ client.indices.deleteDataStream(...) ==== delete_index_template Deletes an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.deleteIndexTemplate(...) @@ -1472,7 +1496,7 @@ client.indices.deleteIndexTemplate(...) ==== delete_template Deletes an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.deleteTemplate(...) @@ -1482,7 +1506,7 @@ client.indices.deleteTemplate(...) ==== disk_usage Analyzes the disk usage of each field of an index or data stream -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-disk-usage.html[Endpoint documentation] +{ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] ---- client.indices.diskUsage(...) @@ -1492,7 +1516,7 @@ client.indices.diskUsage(...) ==== downsample Downsample an index -https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html[Endpoint documentation] +{ref}/xpack-rollup.html[Endpoint documentation] [source,ts] ---- client.indices.downsample(...) @@ -1502,7 +1526,7 @@ client.indices.downsample(...) ==== exists Returns information about whether a particular index exists. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html[Endpoint documentation] +{ref}/indices-exists.html[Endpoint documentation] [source,ts] ---- client.indices.exists(...) @@ -1512,7 +1536,7 @@ client.indices.exists(...) ==== exists_alias Returns information about whether a particular alias exists. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] +{ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.existsAlias(...) @@ -1522,7 +1546,7 @@ client.indices.existsAlias(...) ==== exists_index_template Returns information about whether a particular index template exists. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.existsIndexTemplate(...) @@ -1532,17 +1556,27 @@ client.indices.existsIndexTemplate(...) ==== exists_template Returns information about whether a particular index template exists. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.existsTemplate(...) ---- +[discrete] +==== explain_data_lifecycle +Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc. + +{ref}/dlm-explain-lifecycle.html[Endpoint documentation] +[source,ts] +---- +client.indices.explainDataLifecycle(...) +---- + [discrete] ==== field_usage_stats Returns the field usage stats for each field of an index -https://www.elastic.co/guide/en/elasticsearch/reference/current/field-usage-stats.html[Endpoint documentation] +{ref}/field-usage-stats.html[Endpoint documentation] [source,ts] ---- client.indices.fieldUsageStats(...) @@ -1552,7 +1586,7 @@ client.indices.fieldUsageStats(...) ==== flush Performs the flush operation on one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html[Endpoint documentation] +{ref}/indices-flush.html[Endpoint documentation] [source,ts] ---- client.indices.flush(...) @@ -1562,7 +1596,7 @@ client.indices.flush(...) ==== forcemerge Performs the force merge operation on one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html[Endpoint documentation] +{ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] ---- client.indices.forcemerge(...) @@ -1572,7 +1606,7 @@ client.indices.forcemerge(...) ==== get Returns information about one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html[Endpoint documentation] +{ref}/indices-get-index.html[Endpoint documentation] [source,ts] ---- client.indices.get(...) @@ -1582,17 +1616,27 @@ client.indices.get(...) ==== get_alias Returns an alias. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] +{ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.getAlias(...) ---- +[discrete] +==== get_data_lifecycle +Returns the data lifecycle of the selected data streams. + +{ref}/dlm-get-lifecycle.html[Endpoint documentation] +[source,ts] +---- +client.indices.getDataLifecycle(...) +---- + [discrete] ==== get_data_stream Returns data streams. -https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] +{ref}/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.getDataStream(...) @@ -1602,7 +1646,7 @@ client.indices.getDataStream(...) ==== get_field_mapping Returns mapping for one or more fields. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html[Endpoint documentation] +{ref}/indices-get-field-mapping.html[Endpoint documentation] [source,ts] ---- client.indices.getFieldMapping(...) @@ -1612,7 +1656,7 @@ client.indices.getFieldMapping(...) ==== get_index_template Returns an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.getIndexTemplate(...) @@ -1622,7 +1666,7 @@ client.indices.getIndexTemplate(...) ==== get_mapping Returns mappings for one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html[Endpoint documentation] +{ref}/indices-get-mapping.html[Endpoint documentation] [source,ts] ---- client.indices.getMapping(...) @@ -1632,7 +1676,7 @@ client.indices.getMapping(...) ==== get_settings Returns settings for one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html[Endpoint documentation] +{ref}/indices-get-settings.html[Endpoint documentation] [source,ts] ---- client.indices.getSettings(...) @@ -1642,7 +1686,7 @@ client.indices.getSettings(...) ==== get_template Returns an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.getTemplate(...) @@ -1652,7 +1696,7 @@ client.indices.getTemplate(...) ==== migrate_to_data_stream Migrates an alias to a data stream -https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] +{ref}/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.migrateToDataStream(...) @@ -1662,7 +1706,7 @@ client.indices.migrateToDataStream(...) ==== modify_data_stream Modifies a data stream -https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] +{ref}/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.modifyDataStream(...) @@ -1672,7 +1716,7 @@ client.indices.modifyDataStream(...) ==== open Opens an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html[Endpoint documentation] +{ref}/indices-open-close.html[Endpoint documentation] [source,ts] ---- client.indices.open(...) @@ -1682,7 +1726,7 @@ client.indices.open(...) ==== promote_data_stream Promotes a data stream from a replicated data stream managed by CCR to a regular data stream -https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html[Endpoint documentation] +{ref}/data-streams.html[Endpoint documentation] [source,ts] ---- client.indices.promoteDataStream(...) @@ -1692,17 +1736,27 @@ client.indices.promoteDataStream(...) ==== put_alias Creates or updates an alias. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] +{ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.putAlias(...) ---- +[discrete] +==== put_data_lifecycle +Updates the data lifecycle of the selected data streams. + +{ref}/dlm-put-lifecycle.html[Endpoint documentation] +[source,ts] +---- +client.indices.putDataLifecycle(...) +---- + [discrete] ==== put_index_template Creates or updates an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.putIndexTemplate(...) @@ -1712,7 +1766,7 @@ client.indices.putIndexTemplate(...) ==== put_mapping Updates the index mappings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html[Endpoint documentation] +{ref}/indices-put-mapping.html[Endpoint documentation] [source,ts] ---- client.indices.putMapping(...) @@ -1722,7 +1776,7 @@ client.indices.putMapping(...) ==== put_settings Updates the index settings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html[Endpoint documentation] +{ref}/indices-update-settings.html[Endpoint documentation] [source,ts] ---- client.indices.putSettings(...) @@ -1732,7 +1786,7 @@ client.indices.putSettings(...) ==== put_template Creates or updates an index template. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.putTemplate(...) @@ -1742,7 +1796,7 @@ client.indices.putTemplate(...) ==== recovery Returns information about ongoing index shard recoveries. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html[Endpoint documentation] +{ref}/indices-recovery.html[Endpoint documentation] [source,ts] ---- client.indices.recovery(...) @@ -1752,7 +1806,7 @@ client.indices.recovery(...) ==== refresh Performs the refresh operation in one or more indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html[Endpoint documentation] +{ref}/indices-refresh.html[Endpoint documentation] [source,ts] ---- client.indices.refresh(...) @@ -1762,7 +1816,7 @@ client.indices.refresh(...) ==== reload_search_analyzers Reloads an index's search analyzers and their resources. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-reload-analyzers.html[Endpoint documentation] +{ref}/indices-reload-analyzers.html[Endpoint documentation] [source,ts] ---- client.indices.reloadSearchAnalyzers(...) @@ -1772,7 +1826,7 @@ client.indices.reloadSearchAnalyzers(...) ==== resolve_index Returns information about any matching indices, aliases, and data streams -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-index-api.html[Endpoint documentation] +{ref}/indices-resolve-index-api.html[Endpoint documentation] [source,ts] ---- client.indices.resolveIndex(...) @@ -1783,7 +1837,7 @@ client.indices.resolveIndex(...) Updates an alias to point to a new index when the existing index is considered to be too large or too old. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html[Endpoint documentation] +{ref}/indices-rollover-index.html[Endpoint documentation] [source,ts] ---- client.indices.rollover(...) @@ -1793,7 +1847,7 @@ client.indices.rollover(...) ==== segments Provides low-level information about segments in a Lucene index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html[Endpoint documentation] +{ref}/indices-segments.html[Endpoint documentation] [source,ts] ---- client.indices.segments(...) @@ -1803,7 +1857,7 @@ client.indices.segments(...) ==== shard_stores Provides store information for shard copies of indices. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html[Endpoint documentation] +{ref}/indices-shards-stores.html[Endpoint documentation] [source,ts] ---- client.indices.shardStores(...) @@ -1813,7 +1867,7 @@ client.indices.shardStores(...) ==== shrink Allow to shrink an existing index into a new index with fewer primary shards. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html[Endpoint documentation] +{ref}/indices-shrink-index.html[Endpoint documentation] [source,ts] ---- client.indices.shrink(...) @@ -1823,7 +1877,7 @@ client.indices.shrink(...) ==== simulate_index_template Simulate matching the given index name against the index templates in the system -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.simulateIndexTemplate(...) @@ -1833,7 +1887,7 @@ client.indices.simulateIndexTemplate(...) ==== simulate_template Simulate resolving the given template name or body -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[Endpoint documentation] +{ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- client.indices.simulateTemplate(...) @@ -1843,7 +1897,7 @@ client.indices.simulateTemplate(...) ==== split Allows you to split an existing index into a new index with more primary shards. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html[Endpoint documentation] +{ref}/indices-split-index.html[Endpoint documentation] [source,ts] ---- client.indices.split(...) @@ -1853,7 +1907,7 @@ client.indices.split(...) ==== stats Provides statistics on operations happening in an index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html[Endpoint documentation] +{ref}/indices-stats.html[Endpoint documentation] [source,ts] ---- client.indices.stats(...) @@ -1863,7 +1917,7 @@ client.indices.stats(...) ==== unfreeze Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. -https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html[Endpoint documentation] +{ref}/unfreeze-index-api.html[Endpoint documentation] [source,ts] ---- client.indices.unfreeze(...) @@ -1873,7 +1927,7 @@ client.indices.unfreeze(...) ==== update_aliases Updates index aliases. -https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html[Endpoint documentation] +{ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.updateAliases(...) @@ -1883,7 +1937,7 @@ client.indices.updateAliases(...) ==== validate_query Allows a user to validate a potentially expensive query without executing it. -https://www.elastic.co/guide/en/elasticsearch/reference/current/search-validate.html[Endpoint documentation] +{ref}/search-validate.html[Endpoint documentation] [source,ts] ---- client.indices.validateQuery(...) @@ -1895,7 +1949,7 @@ client.indices.validateQuery(...) ==== delete_pipeline Deletes a pipeline. -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-pipeline-api.html[Endpoint documentation] +{ref}/delete-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.deletePipeline(...) @@ -1905,7 +1959,7 @@ client.ingest.deletePipeline(...) ==== geo_ip_stats Returns statistical information about geoip databases -https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-stats-api.html[Endpoint documentation] +{ref}/geoip-stats-api.html[Endpoint documentation] [source,ts] ---- client.ingest.geoIpStats(...) @@ -1915,7 +1969,7 @@ client.ingest.geoIpStats(...) ==== get_pipeline Returns a pipeline. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html[Endpoint documentation] +{ref}/get-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.getPipeline(...) @@ -1925,7 +1979,7 @@ client.ingest.getPipeline(...) ==== processor_grok Returns a list of the built-in patterns. -https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html#grok-processor-rest-get[Endpoint documentation] +{ref}/grok-processor.html[Endpoint documentation] [source,ts] ---- client.ingest.processorGrok(...) @@ -1935,7 +1989,7 @@ client.ingest.processorGrok(...) ==== put_pipeline Creates or updates a pipeline. -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-pipeline-api.html[Endpoint documentation] +{ref}/put-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.putPipeline(...) @@ -1945,7 +1999,7 @@ client.ingest.putPipeline(...) ==== simulate Allows to simulate a pipeline with example documents. -https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-pipeline-api.html[Endpoint documentation] +{ref}/simulate-pipeline-api.html[Endpoint documentation] [source,ts] ---- client.ingest.simulate(...) @@ -1957,7 +2011,7 @@ client.ingest.simulate(...) ==== delete Deletes licensing information for the cluster -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-license.html[Endpoint documentation] +{ref}/delete-license.html[Endpoint documentation] [source,ts] ---- client.license.delete(...) @@ -1967,7 +2021,7 @@ client.license.delete(...) ==== get Retrieves licensing information for the cluster -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-license.html[Endpoint documentation] +{ref}/get-license.html[Endpoint documentation] [source,ts] ---- client.license.get(...) @@ -1977,7 +2031,7 @@ client.license.get(...) ==== get_basic_status Retrieves information about the status of the basic license. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html[Endpoint documentation] +{ref}/get-basic-status.html[Endpoint documentation] [source,ts] ---- client.license.getBasicStatus(...) @@ -1987,7 +2041,7 @@ client.license.getBasicStatus(...) ==== get_trial_status Retrieves information about the status of the trial license. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trial-status.html[Endpoint documentation] +{ref}/get-trial-status.html[Endpoint documentation] [source,ts] ---- client.license.getTrialStatus(...) @@ -1997,7 +2051,7 @@ client.license.getTrialStatus(...) ==== post Updates the license for the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/update-license.html[Endpoint documentation] +{ref}/update-license.html[Endpoint documentation] [source,ts] ---- client.license.post(...) @@ -2007,7 +2061,7 @@ client.license.post(...) ==== post_start_basic Starts an indefinite basic license. -https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html[Endpoint documentation] +{ref}/start-basic.html[Endpoint documentation] [source,ts] ---- client.license.postStartBasic(...) @@ -2017,7 +2071,7 @@ client.license.postStartBasic(...) ==== post_start_trial starts a limited time trial license. -https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trial.html[Endpoint documentation] +{ref}/start-trial.html[Endpoint documentation] [source,ts] ---- client.license.postStartTrial(...) @@ -2029,7 +2083,7 @@ client.license.postStartTrial(...) ==== delete_pipeline Deletes Logstash Pipelines used by Central Management -https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html[Endpoint documentation] +{ref}/logstash-api-delete-pipeline.html[Endpoint documentation] [source,ts] ---- client.logstash.deletePipeline(...) @@ -2039,7 +2093,7 @@ client.logstash.deletePipeline(...) ==== get_pipeline Retrieves Logstash Pipelines used by Central Management -https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html[Endpoint documentation] +{ref}/logstash-api-get-pipeline.html[Endpoint documentation] [source,ts] ---- client.logstash.getPipeline(...) @@ -2049,7 +2103,7 @@ client.logstash.getPipeline(...) ==== put_pipeline Adds and updates Logstash Pipelines used for Central Management -https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html[Endpoint documentation] +{ref}/logstash-api-put-pipeline.html[Endpoint documentation] [source,ts] ---- client.logstash.putPipeline(...) @@ -2061,7 +2115,7 @@ client.logstash.putPipeline(...) ==== deprecations Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. -https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html[Endpoint documentation] +{ref}/migration-api-deprecation.html[Endpoint documentation] [source,ts] ---- client.migration.deprecations(...) @@ -2071,7 +2125,7 @@ client.migration.deprecations(...) ==== get_feature_upgrade_status Find out whether system features need to be upgraded or not -https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html[Endpoint documentation] +{ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] ---- client.migration.getFeatureUpgradeStatus(...) @@ -2081,7 +2135,7 @@ client.migration.getFeatureUpgradeStatus(...) ==== post_feature_upgrade Begin upgrades for system features -https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html[Endpoint documentation] +{ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] ---- client.migration.postFeatureUpgrade(...) @@ -2092,6 +2146,8 @@ client.migration.postFeatureUpgrade(...) [discrete] ==== clear_trained_model_deployment_cache Clear the cached results from a trained model deployment + +{ref}/clear-trained-model-deployment-cache.html[Endpoint documentation] [source,ts] ---- client.ml.clearTrainedModelDeploymentCache(...) @@ -2101,7 +2157,7 @@ client.ml.clearTrainedModelDeploymentCache(...) ==== close_job Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html[Endpoint documentation] +{ref}/ml-close-job.html[Endpoint documentation] [source,ts] ---- client.ml.closeJob(...) @@ -2111,7 +2167,7 @@ client.ml.closeJob(...) ==== delete_calendar Deletes a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html[Endpoint documentation] +{ref}/ml-delete-calendar.html[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendar(...) @@ -2121,7 +2177,7 @@ client.ml.deleteCalendar(...) ==== delete_calendar_event Deletes scheduled events from a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html[Endpoint documentation] +{ref}/ml-delete-calendar-event.html[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendarEvent(...) @@ -2131,7 +2187,7 @@ client.ml.deleteCalendarEvent(...) ==== delete_calendar_job Deletes anomaly detection jobs from a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html[Endpoint documentation] +{ref}/ml-delete-calendar-job.html[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendarJob(...) @@ -2141,7 +2197,7 @@ client.ml.deleteCalendarJob(...) ==== delete_data_frame_analytics Deletes an existing data frame analytics job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-dfanalytics.html[Endpoint documentation] +{ref}/delete-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.deleteDataFrameAnalytics(...) @@ -2151,7 +2207,7 @@ client.ml.deleteDataFrameAnalytics(...) ==== delete_datafeed Deletes an existing datafeed. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html[Endpoint documentation] +{ref}/ml-delete-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.deleteDatafeed(...) @@ -2161,7 +2217,7 @@ client.ml.deleteDatafeed(...) ==== delete_expired_data Deletes expired and unused machine learning data. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html[Endpoint documentation] +{ref}/ml-delete-expired-data.html[Endpoint documentation] [source,ts] ---- client.ml.deleteExpiredData(...) @@ -2171,7 +2227,7 @@ client.ml.deleteExpiredData(...) ==== delete_filter Deletes a filter. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html[Endpoint documentation] +{ref}/ml-delete-filter.html[Endpoint documentation] [source,ts] ---- client.ml.deleteFilter(...) @@ -2181,7 +2237,7 @@ client.ml.deleteFilter(...) ==== delete_forecast Deletes forecasts from a machine learning job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html[Endpoint documentation] +{ref}/ml-delete-forecast.html[Endpoint documentation] [source,ts] ---- client.ml.deleteForecast(...) @@ -2191,7 +2247,7 @@ client.ml.deleteForecast(...) ==== delete_job Deletes an existing anomaly detection job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html[Endpoint documentation] +{ref}/ml-delete-job.html[Endpoint documentation] [source,ts] ---- client.ml.deleteJob(...) @@ -2201,7 +2257,7 @@ client.ml.deleteJob(...) ==== delete_model_snapshot Deletes an existing model snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html[Endpoint documentation] +{ref}/ml-delete-snapshot.html[Endpoint documentation] [source,ts] ---- client.ml.deleteModelSnapshot(...) @@ -2211,7 +2267,7 @@ client.ml.deleteModelSnapshot(...) ==== delete_trained_model Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html[Endpoint documentation] +{ref}/delete-trained-models.html[Endpoint documentation] [source,ts] ---- client.ml.deleteTrainedModel(...) @@ -2221,7 +2277,7 @@ client.ml.deleteTrainedModel(...) ==== delete_trained_model_alias Deletes a model alias that refers to the trained model -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html[Endpoint documentation] +{ref}/delete-trained-models-aliases.html[Endpoint documentation] [source,ts] ---- client.ml.deleteTrainedModelAlias(...) @@ -2231,7 +2287,7 @@ client.ml.deleteTrainedModelAlias(...) ==== estimate_model_memory Estimates the model memory -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html[Endpoint documentation] +{ref}/ml-apis.html[Endpoint documentation] [source,ts] ---- client.ml.estimateModelMemory(...) @@ -2241,7 +2297,7 @@ client.ml.estimateModelMemory(...) ==== evaluate_data_frame Evaluates the data frame analytics for an annotated index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html[Endpoint documentation] +{ref}/evaluate-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.evaluateDataFrame(...) @@ -2251,7 +2307,7 @@ client.ml.evaluateDataFrame(...) ==== explain_data_frame_analytics Explains a data frame analytics config. -http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html[Endpoint documentation] +{ref}/explain-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.explainDataFrameAnalytics(...) @@ -2261,7 +2317,7 @@ client.ml.explainDataFrameAnalytics(...) ==== flush_job Forces any buffered data to be processed by the job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html[Endpoint documentation] +{ref}/ml-flush-job.html[Endpoint documentation] [source,ts] ---- client.ml.flushJob(...) @@ -2271,7 +2327,7 @@ client.ml.flushJob(...) ==== forecast Predicts the future behavior of a time series by using its historical behavior. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html[Endpoint documentation] +{ref}/ml-forecast.html[Endpoint documentation] [source,ts] ---- client.ml.forecast(...) @@ -2281,7 +2337,7 @@ client.ml.forecast(...) ==== get_buckets Retrieves anomaly detection job results for one or more buckets. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html[Endpoint documentation] +{ref}/ml-get-bucket.html[Endpoint documentation] [source,ts] ---- client.ml.getBuckets(...) @@ -2291,7 +2347,7 @@ client.ml.getBuckets(...) ==== get_calendar_events Retrieves information about the scheduled events in calendars. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html[Endpoint documentation] +{ref}/ml-get-calendar-event.html[Endpoint documentation] [source,ts] ---- client.ml.getCalendarEvents(...) @@ -2301,7 +2357,7 @@ client.ml.getCalendarEvents(...) ==== get_calendars Retrieves configuration information for calendars. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html[Endpoint documentation] +{ref}/ml-get-calendar.html[Endpoint documentation] [source,ts] ---- client.ml.getCalendars(...) @@ -2311,7 +2367,7 @@ client.ml.getCalendars(...) ==== get_categories Retrieves anomaly detection job results for one or more categories. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html[Endpoint documentation] +{ref}/ml-get-category.html[Endpoint documentation] [source,ts] ---- client.ml.getCategories(...) @@ -2321,7 +2377,7 @@ client.ml.getCategories(...) ==== get_data_frame_analytics Retrieves configuration information for data frame analytics jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html[Endpoint documentation] +{ref}/get-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.getDataFrameAnalytics(...) @@ -2331,7 +2387,7 @@ client.ml.getDataFrameAnalytics(...) ==== get_data_frame_analytics_stats Retrieves usage information for data frame analytics jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html[Endpoint documentation] +{ref}/get-dfanalytics-stats.html[Endpoint documentation] [source,ts] ---- client.ml.getDataFrameAnalyticsStats(...) @@ -2341,7 +2397,7 @@ client.ml.getDataFrameAnalyticsStats(...) ==== get_datafeed_stats Retrieves usage information for datafeeds. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html[Endpoint documentation] +{ref}/ml-get-datafeed-stats.html[Endpoint documentation] [source,ts] ---- client.ml.getDatafeedStats(...) @@ -2351,7 +2407,7 @@ client.ml.getDatafeedStats(...) ==== get_datafeeds Retrieves configuration information for datafeeds. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html[Endpoint documentation] +{ref}/ml-get-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.getDatafeeds(...) @@ -2361,7 +2417,7 @@ client.ml.getDatafeeds(...) ==== get_filters Retrieves filters. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html[Endpoint documentation] +{ref}/ml-get-filter.html[Endpoint documentation] [source,ts] ---- client.ml.getFilters(...) @@ -2371,7 +2427,7 @@ client.ml.getFilters(...) ==== get_influencers Retrieves anomaly detection job results for one or more influencers. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html[Endpoint documentation] +{ref}/ml-get-influencer.html[Endpoint documentation] [source,ts] ---- client.ml.getInfluencers(...) @@ -2381,7 +2437,7 @@ client.ml.getInfluencers(...) ==== get_job_stats Retrieves usage information for anomaly detection jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html[Endpoint documentation] +{ref}/ml-get-job-stats.html[Endpoint documentation] [source,ts] ---- client.ml.getJobStats(...) @@ -2391,7 +2447,7 @@ client.ml.getJobStats(...) ==== get_jobs Retrieves configuration information for anomaly detection jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html[Endpoint documentation] +{ref}/ml-get-job.html[Endpoint documentation] [source,ts] ---- client.ml.getJobs(...) @@ -2401,7 +2457,7 @@ client.ml.getJobs(...) ==== get_memory_stats Returns information on how ML is using memory. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html[Endpoint documentation] +{ref}/get-ml-memory.html[Endpoint documentation] [source,ts] ---- client.ml.getMemoryStats(...) @@ -2411,7 +2467,7 @@ client.ml.getMemoryStats(...) ==== get_model_snapshot_upgrade_stats Gets stats for anomaly detection job model snapshot upgrades that are in progress. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] +{ref}/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] [source,ts] ---- client.ml.getModelSnapshotUpgradeStats(...) @@ -2421,7 +2477,7 @@ client.ml.getModelSnapshotUpgradeStats(...) ==== get_model_snapshots Retrieves information about model snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html[Endpoint documentation] +{ref}/ml-get-snapshot.html[Endpoint documentation] [source,ts] ---- client.ml.getModelSnapshots(...) @@ -2431,7 +2487,7 @@ client.ml.getModelSnapshots(...) ==== get_overall_buckets Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html[Endpoint documentation] +{ref}/ml-get-overall-buckets.html[Endpoint documentation] [source,ts] ---- client.ml.getOverallBuckets(...) @@ -2441,7 +2497,7 @@ client.ml.getOverallBuckets(...) ==== get_records Retrieves anomaly records for an anomaly detection job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html[Endpoint documentation] +{ref}/ml-get-record.html[Endpoint documentation] [source,ts] ---- client.ml.getRecords(...) @@ -2451,7 +2507,7 @@ client.ml.getRecords(...) ==== get_trained_models Retrieves configuration information for a trained inference model. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html[Endpoint documentation] +{ref}/get-trained-models.html[Endpoint documentation] [source,ts] ---- client.ml.getTrainedModels(...) @@ -2461,7 +2517,7 @@ client.ml.getTrainedModels(...) ==== get_trained_models_stats Retrieves usage information for trained inference models. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html[Endpoint documentation] +{ref}/get-trained-models-stats.html[Endpoint documentation] [source,ts] ---- client.ml.getTrainedModelsStats(...) @@ -2471,7 +2527,7 @@ client.ml.getTrainedModelsStats(...) ==== infer_trained_model Evaluate a trained model. -https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html[Endpoint documentation] +{ref}/infer-trained-model.html[Endpoint documentation] [source,ts] ---- client.ml.inferTrainedModel(...) @@ -2481,7 +2537,7 @@ client.ml.inferTrainedModel(...) ==== info Returns defaults and limits used by machine learning. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html[Endpoint documentation] +{ref}/get-ml-info.html[Endpoint documentation] [source,ts] ---- client.ml.info(...) @@ -2491,7 +2547,7 @@ client.ml.info(...) ==== open_job Opens one or more anomaly detection jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html[Endpoint documentation] +{ref}/ml-open-job.html[Endpoint documentation] [source,ts] ---- client.ml.openJob(...) @@ -2501,7 +2557,7 @@ client.ml.openJob(...) ==== post_calendar_events Posts scheduled events in a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html[Endpoint documentation] +{ref}/ml-post-calendar-event.html[Endpoint documentation] [source,ts] ---- client.ml.postCalendarEvents(...) @@ -2511,7 +2567,7 @@ client.ml.postCalendarEvents(...) ==== post_data Sends data to an anomaly detection job for analysis. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html[Endpoint documentation] +{ref}/ml-post-data.html[Endpoint documentation] [source,ts] ---- client.ml.postData(...) @@ -2521,7 +2577,7 @@ client.ml.postData(...) ==== preview_data_frame_analytics Previews that will be analyzed given a data frame analytics config. -http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html[Endpoint documentation] +{ref}/preview-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.previewDataFrameAnalytics(...) @@ -2531,7 +2587,7 @@ client.ml.previewDataFrameAnalytics(...) ==== preview_datafeed Previews a datafeed. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html[Endpoint documentation] +{ref}/ml-preview-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.previewDatafeed(...) @@ -2541,7 +2597,7 @@ client.ml.previewDatafeed(...) ==== put_calendar Instantiates a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html[Endpoint documentation] +{ref}/ml-put-calendar.html[Endpoint documentation] [source,ts] ---- client.ml.putCalendar(...) @@ -2551,7 +2607,7 @@ client.ml.putCalendar(...) ==== put_calendar_job Adds an anomaly detection job to a calendar. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html[Endpoint documentation] +{ref}/ml-put-calendar-job.html[Endpoint documentation] [source,ts] ---- client.ml.putCalendarJob(...) @@ -2561,7 +2617,7 @@ client.ml.putCalendarJob(...) ==== put_data_frame_analytics Instantiates a data frame analytics job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html[Endpoint documentation] +{ref}/put-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.putDataFrameAnalytics(...) @@ -2571,7 +2627,7 @@ client.ml.putDataFrameAnalytics(...) ==== put_datafeed Instantiates a datafeed. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html[Endpoint documentation] +{ref}/ml-put-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.putDatafeed(...) @@ -2581,7 +2637,7 @@ client.ml.putDatafeed(...) ==== put_filter Instantiates a filter. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html[Endpoint documentation] +{ref}/ml-put-filter.html[Endpoint documentation] [source,ts] ---- client.ml.putFilter(...) @@ -2591,7 +2647,7 @@ client.ml.putFilter(...) ==== put_job Instantiates an anomaly detection job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html[Endpoint documentation] +{ref}/ml-put-job.html[Endpoint documentation] [source,ts] ---- client.ml.putJob(...) @@ -2601,7 +2657,7 @@ client.ml.putJob(...) ==== put_trained_model Creates an inference trained model. -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html[Endpoint documentation] +{ref}/put-trained-models.html[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModel(...) @@ -2611,7 +2667,7 @@ client.ml.putTrainedModel(...) ==== put_trained_model_alias Creates a new model alias (or reassigns an existing one) to refer to the trained model -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html[Endpoint documentation] +{ref}/put-trained-models-aliases.html[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModelAlias(...) @@ -2621,7 +2677,7 @@ client.ml.putTrainedModelAlias(...) ==== put_trained_model_definition_part Creates part of a trained model definition -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html[Endpoint documentation] +{ref}/put-trained-model-definition-part.html[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModelDefinitionPart(...) @@ -2631,7 +2687,7 @@ client.ml.putTrainedModelDefinitionPart(...) ==== put_trained_model_vocabulary Creates a trained model vocabulary -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html[Endpoint documentation] +{ref}/put-trained-model-vocabulary.html[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModelVocabulary(...) @@ -2641,7 +2697,7 @@ client.ml.putTrainedModelVocabulary(...) ==== reset_job Resets an existing anomaly detection job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html[Endpoint documentation] +{ref}/ml-reset-job.html[Endpoint documentation] [source,ts] ---- client.ml.resetJob(...) @@ -2651,7 +2707,7 @@ client.ml.resetJob(...) ==== revert_model_snapshot Reverts to a specific snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html[Endpoint documentation] +{ref}/ml-revert-snapshot.html[Endpoint documentation] [source,ts] ---- client.ml.revertModelSnapshot(...) @@ -2661,7 +2717,7 @@ client.ml.revertModelSnapshot(...) ==== set_upgrade_mode Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html[Endpoint documentation] +{ref}/ml-set-upgrade-mode.html[Endpoint documentation] [source,ts] ---- client.ml.setUpgradeMode(...) @@ -2671,7 +2727,7 @@ client.ml.setUpgradeMode(...) ==== start_data_frame_analytics Starts a data frame analytics job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html[Endpoint documentation] +{ref}/start-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.startDataFrameAnalytics(...) @@ -2681,7 +2737,7 @@ client.ml.startDataFrameAnalytics(...) ==== start_datafeed Starts one or more datafeeds. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html[Endpoint documentation] +{ref}/ml-start-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.startDatafeed(...) @@ -2691,7 +2747,7 @@ client.ml.startDatafeed(...) ==== start_trained_model_deployment Start a trained model deployment. -https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html[Endpoint documentation] +{ref}/start-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- client.ml.startTrainedModelDeployment(...) @@ -2701,7 +2757,7 @@ client.ml.startTrainedModelDeployment(...) ==== stop_data_frame_analytics Stops one or more data frame analytics jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html[Endpoint documentation] +{ref}/stop-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.stopDataFrameAnalytics(...) @@ -2711,7 +2767,7 @@ client.ml.stopDataFrameAnalytics(...) ==== stop_datafeed Stops one or more datafeeds. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html[Endpoint documentation] +{ref}/ml-stop-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.stopDatafeed(...) @@ -2721,7 +2777,7 @@ client.ml.stopDatafeed(...) ==== stop_trained_model_deployment Stop a trained model deployment. -https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html[Endpoint documentation] +{ref}/stop-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- client.ml.stopTrainedModelDeployment(...) @@ -2731,7 +2787,7 @@ client.ml.stopTrainedModelDeployment(...) ==== update_data_frame_analytics Updates certain properties of a data frame analytics job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html[Endpoint documentation] +{ref}/update-dfanalytics.html[Endpoint documentation] [source,ts] ---- client.ml.updateDataFrameAnalytics(...) @@ -2741,7 +2797,7 @@ client.ml.updateDataFrameAnalytics(...) ==== update_datafeed Updates certain properties of a datafeed. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html[Endpoint documentation] +{ref}/ml-update-datafeed.html[Endpoint documentation] [source,ts] ---- client.ml.updateDatafeed(...) @@ -2751,7 +2807,7 @@ client.ml.updateDatafeed(...) ==== update_filter Updates the description of a filter, adds items, or removes items. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html[Endpoint documentation] +{ref}/ml-update-filter.html[Endpoint documentation] [source,ts] ---- client.ml.updateFilter(...) @@ -2761,7 +2817,7 @@ client.ml.updateFilter(...) ==== update_job Updates certain properties of an anomaly detection job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html[Endpoint documentation] +{ref}/ml-update-job.html[Endpoint documentation] [source,ts] ---- client.ml.updateJob(...) @@ -2771,17 +2827,27 @@ client.ml.updateJob(...) ==== update_model_snapshot Updates certain properties of a snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html[Endpoint documentation] +{ref}/ml-update-snapshot.html[Endpoint documentation] [source,ts] ---- client.ml.updateModelSnapshot(...) ---- +[discrete] +==== update_trained_model_deployment +Updates certain properties of trained model deployment. + +{ref}/update-trained-model-deployment.html[Endpoint documentation] +[source,ts] +---- +client.ml.updateTrainedModelDeployment(...) +---- + [discrete] ==== upgrade_job_snapshot Upgrades a given job snapshot to the current major version. -https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-model-snapshot.html[Endpoint documentation] +{ref}/ml-upgrade-job-model-snapshot.html[Endpoint documentation] [source,ts] ---- client.ml.upgradeJobSnapshot(...) @@ -2793,7 +2859,7 @@ client.ml.upgradeJobSnapshot(...) ==== clear_repositories_metering_archive Removes the archived repositories metering information present in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html[Endpoint documentation] +{ref}/clear-repositories-metering-archive-api.html[Endpoint documentation] [source,ts] ---- client.nodes.clearRepositoriesMeteringArchive(...) @@ -2803,7 +2869,7 @@ client.nodes.clearRepositoriesMeteringArchive(...) ==== get_repositories_metering_info Returns cluster repositories metering information. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html[Endpoint documentation] +{ref}/get-repositories-metering-api.html[Endpoint documentation] [source,ts] ---- client.nodes.getRepositoriesMeteringInfo(...) @@ -2813,7 +2879,7 @@ client.nodes.getRepositoriesMeteringInfo(...) ==== hot_threads Returns information about hot threads on each node in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-hot-threads.html[Endpoint documentation] +{ref}/cluster-nodes-hot-threads.html[Endpoint documentation] [source,ts] ---- client.nodes.hotThreads(...) @@ -2823,7 +2889,7 @@ client.nodes.hotThreads(...) ==== info Returns information about nodes in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html[Endpoint documentation] +{ref}/cluster-nodes-info.html[Endpoint documentation] [source,ts] ---- client.nodes.info(...) @@ -2833,7 +2899,7 @@ client.nodes.info(...) ==== reload_secure_settings Reloads secure settings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings[Endpoint documentation] +{ref}/secure-settings.html[Endpoint documentation] [source,ts] ---- client.nodes.reloadSecureSettings(...) @@ -2843,7 +2909,7 @@ client.nodes.reloadSecureSettings(...) ==== stats Returns statistical information about nodes in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html[Endpoint documentation] +{ref}/cluster-nodes-stats.html[Endpoint documentation] [source,ts] ---- client.nodes.stats(...) @@ -2853,7 +2919,7 @@ client.nodes.stats(...) ==== usage Returns low-level information about REST actions usage on nodes. -https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-usage.html[Endpoint documentation] +{ref}/cluster-nodes-usage.html[Endpoint documentation] [source,ts] ---- client.nodes.usage(...) @@ -2865,7 +2931,7 @@ client.nodes.usage(...) ==== delete_job Deletes an existing rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-delete-job.html[Endpoint documentation] +{ref}/rollup-delete-job.html[Endpoint documentation] [source,ts] ---- client.rollup.deleteJob(...) @@ -2875,7 +2941,7 @@ client.rollup.deleteJob(...) ==== get_jobs Retrieves the configuration, stats, and status of rollup jobs. -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-job.html[Endpoint documentation] +{ref}/rollup-get-job.html[Endpoint documentation] [source,ts] ---- client.rollup.getJobs(...) @@ -2885,7 +2951,7 @@ client.rollup.getJobs(...) ==== get_rollup_caps Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-caps.html[Endpoint documentation] +{ref}/rollup-get-rollup-caps.html[Endpoint documentation] [source,ts] ---- client.rollup.getRollupCaps(...) @@ -2895,7 +2961,7 @@ client.rollup.getRollupCaps(...) ==== get_rollup_index_caps Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html[Endpoint documentation] +{ref}/rollup-get-rollup-index-caps.html[Endpoint documentation] [source,ts] ---- client.rollup.getRollupIndexCaps(...) @@ -2905,7 +2971,7 @@ client.rollup.getRollupIndexCaps(...) ==== put_job Creates a rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-put-job.html[Endpoint documentation] +{ref}/rollup-put-job.html[Endpoint documentation] [source,ts] ---- client.rollup.putJob(...) @@ -2915,7 +2981,7 @@ client.rollup.putJob(...) ==== rollup_search Enables searching rolled-up data using the standard query DSL. -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-search.html[Endpoint documentation] +{ref}/rollup-search.html[Endpoint documentation] [source,ts] ---- client.rollup.rollupSearch(...) @@ -2925,7 +2991,7 @@ client.rollup.rollupSearch(...) ==== start_job Starts an existing, stopped rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-start-job.html[Endpoint documentation] +{ref}/rollup-start-job.html[Endpoint documentation] [source,ts] ---- client.rollup.startJob(...) @@ -2935,19 +3001,111 @@ client.rollup.startJob(...) ==== stop_job Stops an existing, started rollup job. -https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-stop-job.html[Endpoint documentation] +{ref}/rollup-stop-job.html[Endpoint documentation] [source,ts] ---- client.rollup.stopJob(...) ---- +[discrete] +=== search_application +[discrete] +==== delete +Deletes a search application. + +{ref}/put-search-application.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.delete(...) +---- + +[discrete] +==== delete_behavioral_analytics +Delete a behavioral analytics collection. + +{ref}/delete-analytics-collection.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.deleteBehavioralAnalytics(...) +---- + +[discrete] +==== get +Returns the details about a search application. + +{ref}/get-search-application.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.get(...) +---- + +[discrete] +==== get_behavioral_analytics +Returns the existing behavioral analytics collections. + +{ref}/list-analytics-collection.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.getBehavioralAnalytics(...) +---- + +[discrete] +==== list +Returns the existing search applications. + +{ref}/list-search-applications.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.list(...) +---- + +[discrete] +==== post_behavioral_analytics_event +Creates a behavioral analytics event for existing collection. + +http://todo.com/tbd[Endpoint documentation] +[source,ts] +---- +client.searchApplication.postBehavioralAnalyticsEvent(...) +---- + +[discrete] +==== put +Creates or updates a search application. + +{ref}/put-search-application.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.put(...) +---- + +[discrete] +==== put_behavioral_analytics +Creates a behavioral analytics collection. + +{ref}/put-analytics-collection.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.putBehavioralAnalytics(...) +---- + +[discrete] +==== search +Perform a search against a search application + +{ref}/search-application-search.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.search(...) +---- + [discrete] === searchable_snapshots [discrete] ==== cache_stats Retrieve node-level cache statistics about searchable snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.cacheStats(...) @@ -2957,7 +3115,7 @@ client.searchableSnapshots.cacheStats(...) ==== clear_cache Clear the cache of searchable snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.clearCache(...) @@ -2967,7 +3125,7 @@ client.searchableSnapshots.clearCache(...) ==== mount Mount a snapshot as a searchable index. -https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] +{ref}/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.mount(...) @@ -2977,7 +3135,7 @@ client.searchableSnapshots.mount(...) ==== stats Retrieve shard-level statistics about searchable snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.stats(...) @@ -2989,7 +3147,7 @@ client.searchableSnapshots.stats(...) ==== authenticate Enables authentication as a user and retrieve information about the authenticated user. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html[Endpoint documentation] +{ref}/security-api-authenticate.html[Endpoint documentation] [source,ts] ---- client.security.authenticate(...) @@ -2998,6 +3156,8 @@ client.security.authenticate(...) [discrete] ==== bulk_update_api_keys Updates the attributes of multiple existing API keys. + +{ref}/security-api-bulk-update-api-keys.html[Endpoint documentation] [source,ts] ---- client.security.bulkUpdateApiKeys(...) @@ -3007,7 +3167,7 @@ client.security.bulkUpdateApiKeys(...) ==== change_password Changes the passwords of users in the native realm and built-in users. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html[Endpoint documentation] +{ref}/security-api-change-password.html[Endpoint documentation] [source,ts] ---- client.security.changePassword(...) @@ -3017,7 +3177,7 @@ client.security.changePassword(...) ==== clear_api_key_cache Clear a subset or all entries from the API key cache. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-api-key-cache.html[Endpoint documentation] +{ref}/security-api-clear-api-key-cache.html[Endpoint documentation] [source,ts] ---- client.security.clearApiKeyCache(...) @@ -3027,7 +3187,7 @@ client.security.clearApiKeyCache(...) ==== clear_cached_privileges Evicts application privileges from the native application privileges cache. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html[Endpoint documentation] +{ref}/security-api-clear-privilege-cache.html[Endpoint documentation] [source,ts] ---- client.security.clearCachedPrivileges(...) @@ -3037,7 +3197,7 @@ client.security.clearCachedPrivileges(...) ==== clear_cached_realms Evicts users from the user cache. Can completely clear the cache or evict specific users. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html[Endpoint documentation] +{ref}/security-api-clear-cache.html[Endpoint documentation] [source,ts] ---- client.security.clearCachedRealms(...) @@ -3047,7 +3207,7 @@ client.security.clearCachedRealms(...) ==== clear_cached_roles Evicts roles from the native role cache. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html[Endpoint documentation] +{ref}/security-api-clear-role-cache.html[Endpoint documentation] [source,ts] ---- client.security.clearCachedRoles(...) @@ -3057,7 +3217,7 @@ client.security.clearCachedRoles(...) ==== clear_cached_service_tokens Evicts tokens from the service account token caches. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html[Endpoint documentation] +{ref}/security-api-clear-service-token-caches.html[Endpoint documentation] [source,ts] ---- client.security.clearCachedServiceTokens(...) @@ -3067,7 +3227,7 @@ client.security.clearCachedServiceTokens(...) ==== create_api_key Creates an API key for access without requiring basic authentication. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html[Endpoint documentation] +{ref}/security-api-create-api-key.html[Endpoint documentation] [source,ts] ---- client.security.createApiKey(...) @@ -3077,7 +3237,7 @@ client.security.createApiKey(...) ==== create_service_token Creates a service account token for access without requiring basic authentication. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html[Endpoint documentation] +{ref}/security-api-create-service-token.html[Endpoint documentation] [source,ts] ---- client.security.createServiceToken(...) @@ -3087,7 +3247,7 @@ client.security.createServiceToken(...) ==== delete_privileges Removes application privileges. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html[Endpoint documentation] +{ref}/security-api-delete-privilege.html[Endpoint documentation] [source,ts] ---- client.security.deletePrivileges(...) @@ -3097,7 +3257,7 @@ client.security.deletePrivileges(...) ==== delete_role Removes roles in the native realm. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html[Endpoint documentation] +{ref}/security-api-delete-role.html[Endpoint documentation] [source,ts] ---- client.security.deleteRole(...) @@ -3107,7 +3267,7 @@ client.security.deleteRole(...) ==== delete_role_mapping Removes role mappings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html[Endpoint documentation] +{ref}/security-api-delete-role-mapping.html[Endpoint documentation] [source,ts] ---- client.security.deleteRoleMapping(...) @@ -3117,7 +3277,7 @@ client.security.deleteRoleMapping(...) ==== delete_service_token Deletes a service account token. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html[Endpoint documentation] +{ref}/security-api-delete-service-token.html[Endpoint documentation] [source,ts] ---- client.security.deleteServiceToken(...) @@ -3127,7 +3287,7 @@ client.security.deleteServiceToken(...) ==== delete_user Deletes users from the native realm. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html[Endpoint documentation] +{ref}/security-api-delete-user.html[Endpoint documentation] [source,ts] ---- client.security.deleteUser(...) @@ -3137,7 +3297,7 @@ client.security.deleteUser(...) ==== disable_user Disables users in the native realm. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html[Endpoint documentation] +{ref}/security-api-disable-user.html[Endpoint documentation] [source,ts] ---- client.security.disableUser(...) @@ -3147,7 +3307,7 @@ client.security.disableUser(...) ==== enable_user Enables users in the native realm. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html[Endpoint documentation] +{ref}/security-api-enable-user.html[Endpoint documentation] [source,ts] ---- client.security.enableUser(...) @@ -3157,7 +3317,7 @@ client.security.enableUser(...) ==== enroll_kibana Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-kibana-enrollment.html[Endpoint documentation] +{ref}/security-api-kibana-enrollment.html[Endpoint documentation] [source,ts] ---- client.security.enrollKibana(...) @@ -3167,7 +3327,7 @@ client.security.enrollKibana(...) ==== enroll_node Allows a new node to enroll to an existing cluster with security enabled. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-node-enrollment.html[Endpoint documentation] +{ref}/security-api-node-enrollment.html[Endpoint documentation] [source,ts] ---- client.security.enrollNode(...) @@ -3177,7 +3337,7 @@ client.security.enrollNode(...) ==== get_api_key Retrieves information for one or more API keys. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html[Endpoint documentation] +{ref}/security-api-get-api-key.html[Endpoint documentation] [source,ts] ---- client.security.getApiKey(...) @@ -3187,7 +3347,7 @@ client.security.getApiKey(...) ==== get_builtin_privileges Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html[Endpoint documentation] +{ref}/security-api-get-builtin-privileges.html[Endpoint documentation] [source,ts] ---- client.security.getBuiltinPrivileges(...) @@ -3197,7 +3357,7 @@ client.security.getBuiltinPrivileges(...) ==== get_privileges Retrieves application privileges. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html[Endpoint documentation] +{ref}/security-api-get-privileges.html[Endpoint documentation] [source,ts] ---- client.security.getPrivileges(...) @@ -3207,7 +3367,7 @@ client.security.getPrivileges(...) ==== get_role Retrieves roles in the native realm. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html[Endpoint documentation] +{ref}/security-api-get-role.html[Endpoint documentation] [source,ts] ---- client.security.getRole(...) @@ -3217,7 +3377,7 @@ client.security.getRole(...) ==== get_role_mapping Retrieves role mappings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html[Endpoint documentation] +{ref}/security-api-get-role-mapping.html[Endpoint documentation] [source,ts] ---- client.security.getRoleMapping(...) @@ -3227,7 +3387,7 @@ client.security.getRoleMapping(...) ==== get_service_accounts Retrieves information about service accounts. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html[Endpoint documentation] +{ref}/security-api-get-service-accounts.html[Endpoint documentation] [source,ts] ---- client.security.getServiceAccounts(...) @@ -3237,7 +3397,7 @@ client.security.getServiceAccounts(...) ==== get_service_credentials Retrieves information of all service credentials for a service account. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html[Endpoint documentation] +{ref}/security-api-get-service-credentials.html[Endpoint documentation] [source,ts] ---- client.security.getServiceCredentials(...) @@ -3247,7 +3407,7 @@ client.security.getServiceCredentials(...) ==== get_token Creates a bearer token for access without requiring basic authentication. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html[Endpoint documentation] +{ref}/security-api-get-token.html[Endpoint documentation] [source,ts] ---- client.security.getToken(...) @@ -3257,7 +3417,7 @@ client.security.getToken(...) ==== get_user Retrieves information about users in the native realm and built-in users. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html[Endpoint documentation] +{ref}/security-api-get-user.html[Endpoint documentation] [source,ts] ---- client.security.getUser(...) @@ -3267,7 +3427,7 @@ client.security.getUser(...) ==== get_user_privileges Retrieves security privileges for the logged in user. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html[Endpoint documentation] +{ref}/security-api-get-user-privileges.html[Endpoint documentation] [source,ts] ---- client.security.getUserPrivileges(...) @@ -3277,7 +3437,7 @@ client.security.getUserPrivileges(...) ==== grant_api_key Creates an API key on behalf of another user. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-grant-api-key.html[Endpoint documentation] +{ref}/security-api-grant-api-key.html[Endpoint documentation] [source,ts] ---- client.security.grantApiKey(...) @@ -3287,7 +3447,7 @@ client.security.grantApiKey(...) ==== has_privileges Determines whether the specified user has a specified list of privileges. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html[Endpoint documentation] +{ref}/security-api-has-privileges.html[Endpoint documentation] [source,ts] ---- client.security.hasPrivileges(...) @@ -3297,7 +3457,7 @@ client.security.hasPrivileges(...) ==== invalidate_api_key Invalidates one or more API keys. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html[Endpoint documentation] +{ref}/security-api-invalidate-api-key.html[Endpoint documentation] [source,ts] ---- client.security.invalidateApiKey(...) @@ -3307,7 +3467,7 @@ client.security.invalidateApiKey(...) ==== invalidate_token Invalidates one or more access tokens or refresh tokens. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html[Endpoint documentation] +{ref}/security-api-invalidate-token.html[Endpoint documentation] [source,ts] ---- client.security.invalidateToken(...) @@ -3317,7 +3477,7 @@ client.security.invalidateToken(...) ==== oidc_authenticate Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html[Endpoint documentation] +{ref}/security-api-oidc-authenticate.html[Endpoint documentation] [source,ts] ---- client.security.oidcAuthenticate(...) @@ -3327,7 +3487,7 @@ client.security.oidcAuthenticate(...) ==== oidc_logout Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html[Endpoint documentation] +{ref}/security-api-oidc-logout.html[Endpoint documentation] [source,ts] ---- client.security.oidcLogout(...) @@ -3337,7 +3497,7 @@ client.security.oidcLogout(...) ==== oidc_prepare_authentication Creates an OAuth 2.0 authentication request as a URL string -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html[Endpoint documentation] +{ref}/security-api-oidc-prepare-authentication.html[Endpoint documentation] [source,ts] ---- client.security.oidcPrepareAuthentication(...) @@ -3347,7 +3507,7 @@ client.security.oidcPrepareAuthentication(...) ==== put_privileges Adds or updates application privileges. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html[Endpoint documentation] +{ref}/security-api-put-privileges.html[Endpoint documentation] [source,ts] ---- client.security.putPrivileges(...) @@ -3357,7 +3517,7 @@ client.security.putPrivileges(...) ==== put_role Adds and updates roles in the native realm. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html[Endpoint documentation] +{ref}/security-api-put-role.html[Endpoint documentation] [source,ts] ---- client.security.putRole(...) @@ -3367,7 +3527,7 @@ client.security.putRole(...) ==== put_role_mapping Creates and updates role mappings. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html[Endpoint documentation] +{ref}/security-api-put-role-mapping.html[Endpoint documentation] [source,ts] ---- client.security.putRoleMapping(...) @@ -3377,7 +3537,7 @@ client.security.putRoleMapping(...) ==== put_user Adds and updates users in the native realm. These users are commonly referred to as native users. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html[Endpoint documentation] +{ref}/security-api-put-user.html[Endpoint documentation] [source,ts] ---- client.security.putUser(...) @@ -3387,7 +3547,7 @@ client.security.putUser(...) ==== query_api_keys Retrieves information for API keys using a subset of query DSL -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html[Endpoint documentation] +{ref}/security-api-query-api-key.html[Endpoint documentation] [source,ts] ---- client.security.queryApiKeys(...) @@ -3397,7 +3557,7 @@ client.security.queryApiKeys(...) ==== saml_authenticate Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html[Endpoint documentation] +{ref}/security-api-saml-authenticate.html[Endpoint documentation] [source,ts] ---- client.security.samlAuthenticate(...) @@ -3407,7 +3567,7 @@ client.security.samlAuthenticate(...) ==== saml_complete_logout Verifies the logout response sent from the SAML IdP -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html[Endpoint documentation] +{ref}/security-api-saml-complete-logout.html[Endpoint documentation] [source,ts] ---- client.security.samlCompleteLogout(...) @@ -3417,7 +3577,7 @@ client.security.samlCompleteLogout(...) ==== saml_invalidate Consumes a SAML LogoutRequest -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html[Endpoint documentation] +{ref}/security-api-saml-invalidate.html[Endpoint documentation] [source,ts] ---- client.security.samlInvalidate(...) @@ -3427,7 +3587,7 @@ client.security.samlInvalidate(...) ==== saml_logout Invalidates an access token and a refresh token that were generated via the SAML Authenticate API -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html[Endpoint documentation] +{ref}/security-api-saml-logout.html[Endpoint documentation] [source,ts] ---- client.security.samlLogout(...) @@ -3437,7 +3597,7 @@ client.security.samlLogout(...) ==== saml_prepare_authentication Creates a SAML authentication request -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html[Endpoint documentation] +{ref}/security-api-saml-prepare-authentication.html[Endpoint documentation] [source,ts] ---- client.security.samlPrepareAuthentication(...) @@ -3447,7 +3607,7 @@ client.security.samlPrepareAuthentication(...) ==== saml_service_provider_metadata Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html[Endpoint documentation] +{ref}/security-api-saml-sp-metadata.html[Endpoint documentation] [source,ts] ---- client.security.samlServiceProviderMetadata(...) @@ -3457,7 +3617,7 @@ client.security.samlServiceProviderMetadata(...) ==== update_api_key Updates attributes of an existing API key. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html[Endpoint documentation] +{ref}/security-api-update-api-key.html[Endpoint documentation] [source,ts] ---- client.security.updateApiKey(...) @@ -3469,7 +3629,7 @@ client.security.updateApiKey(...) ==== delete_lifecycle Deletes an existing snapshot lifecycle policy. -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html[Endpoint documentation] +{ref}/slm-api-delete-policy.html[Endpoint documentation] [source,ts] ---- client.slm.deleteLifecycle(...) @@ -3479,7 +3639,7 @@ client.slm.deleteLifecycle(...) ==== execute_lifecycle Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html[Endpoint documentation] +{ref}/slm-api-execute-lifecycle.html[Endpoint documentation] [source,ts] ---- client.slm.executeLifecycle(...) @@ -3489,7 +3649,7 @@ client.slm.executeLifecycle(...) ==== execute_retention Deletes any snapshots that are expired according to the policy's retention rules. -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html[Endpoint documentation] +{ref}/slm-api-execute-retention.html[Endpoint documentation] [source,ts] ---- client.slm.executeRetention(...) @@ -3499,7 +3659,7 @@ client.slm.executeRetention(...) ==== get_lifecycle Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html[Endpoint documentation] +{ref}/slm-api-get-policy.html[Endpoint documentation] [source,ts] ---- client.slm.getLifecycle(...) @@ -3509,7 +3669,7 @@ client.slm.getLifecycle(...) ==== get_stats Returns global and policy-level statistics about actions taken by snapshot lifecycle management. -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stats.html[Endpoint documentation] +{ref}/slm-api-get-stats.html[Endpoint documentation] [source,ts] ---- client.slm.getStats(...) @@ -3519,7 +3679,7 @@ client.slm.getStats(...) ==== get_status Retrieves the status of snapshot lifecycle management (SLM). -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html[Endpoint documentation] +{ref}/slm-api-get-status.html[Endpoint documentation] [source,ts] ---- client.slm.getStatus(...) @@ -3529,7 +3689,7 @@ client.slm.getStatus(...) ==== put_lifecycle Creates or updates a snapshot lifecycle policy. -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html[Endpoint documentation] +{ref}/slm-api-put-policy.html[Endpoint documentation] [source,ts] ---- client.slm.putLifecycle(...) @@ -3539,7 +3699,7 @@ client.slm.putLifecycle(...) ==== start Turns on snapshot lifecycle management (SLM). -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html[Endpoint documentation] +{ref}/slm-api-start.html[Endpoint documentation] [source,ts] ---- client.slm.start(...) @@ -3549,7 +3709,7 @@ client.slm.start(...) ==== stop Turns off snapshot lifecycle management (SLM). -https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html[Endpoint documentation] +{ref}/slm-api-stop.html[Endpoint documentation] [source,ts] ---- client.slm.stop(...) @@ -3561,7 +3721,7 @@ client.slm.stop(...) ==== cleanup_repository Removes stale data from repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/clean-up-snapshot-repo-api.html[Endpoint documentation] +{ref}/clean-up-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.cleanupRepository(...) @@ -3571,7 +3731,7 @@ client.snapshot.cleanupRepository(...) ==== clone Clones indices from one snapshot into another snapshot in the same repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.clone(...) @@ -3581,7 +3741,7 @@ client.snapshot.clone(...) ==== create Creates a snapshot in a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.create(...) @@ -3591,7 +3751,7 @@ client.snapshot.create(...) ==== create_repository Creates a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.createRepository(...) @@ -3601,7 +3761,7 @@ client.snapshot.createRepository(...) ==== delete Deletes one or more snapshots. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.delete(...) @@ -3611,7 +3771,7 @@ client.snapshot.delete(...) ==== delete_repository Deletes a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.deleteRepository(...) @@ -3621,7 +3781,7 @@ client.snapshot.deleteRepository(...) ==== get Returns information about a snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.get(...) @@ -3631,7 +3791,7 @@ client.snapshot.get(...) ==== get_repository Returns information about a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.getRepository(...) @@ -3641,7 +3801,7 @@ client.snapshot.getRepository(...) ==== repository_analyze Analyzes a repository for correctness and performance -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.repositoryAnalyze(...) @@ -3651,7 +3811,7 @@ client.snapshot.repositoryAnalyze(...) ==== restore Restores a snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.restore(...) @@ -3661,7 +3821,7 @@ client.snapshot.restore(...) ==== status Returns information about the status of a snapshot. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.status(...) @@ -3671,7 +3831,7 @@ client.snapshot.status(...) ==== verify_repository Verifies a repository. -https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html[Endpoint documentation] +{ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- client.snapshot.verifyRepository(...) @@ -3683,7 +3843,7 @@ client.snapshot.verifyRepository(...) ==== clear_cursor Clears the SQL cursor -https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html[Endpoint documentation] +{ref}/clear-sql-cursor-api.html[Endpoint documentation] [source,ts] ---- client.sql.clearCursor(...) @@ -3693,7 +3853,7 @@ client.sql.clearCursor(...) ==== delete_async Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-async-sql-search-api.html[Endpoint documentation] +{ref}/delete-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- client.sql.deleteAsync(...) @@ -3703,7 +3863,7 @@ client.sql.deleteAsync(...) ==== get_async Returns the current status and available results for an async SQL search or stored synchronous SQL search -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-api.html[Endpoint documentation] +{ref}/get-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- client.sql.getAsync(...) @@ -3713,7 +3873,7 @@ client.sql.getAsync(...) ==== get_async_status Returns the current status of an async SQL search or a stored synchronous SQL search -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-status-api.html[Endpoint documentation] +{ref}/get-async-sql-search-status-api.html[Endpoint documentation] [source,ts] ---- client.sql.getAsyncStatus(...) @@ -3723,7 +3883,7 @@ client.sql.getAsyncStatus(...) ==== query Executes a SQL request -https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html[Endpoint documentation] +{ref}/sql-search-api.html[Endpoint documentation] [source,ts] ---- client.sql.query(...) @@ -3733,7 +3893,7 @@ client.sql.query(...) ==== translate Translates SQL into Elasticsearch queries -https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html[Endpoint documentation] +{ref}/sql-translate-api.html[Endpoint documentation] [source,ts] ---- client.sql.translate(...) @@ -3745,7 +3905,7 @@ client.sql.translate(...) ==== certificates Retrieves information about the X.509 certificates used to encrypt communications in the cluster. -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html[Endpoint documentation] +{ref}/security-api-ssl.html[Endpoint documentation] [source,ts] ---- client.ssl.certificates(...) @@ -3757,7 +3917,7 @@ client.ssl.certificates(...) ==== cancel Cancels a task, if it can be cancelled through an API. -https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] +{ref}/tasks.html[Endpoint documentation] [source,ts] ---- client.tasks.cancel(...) @@ -3767,7 +3927,7 @@ client.tasks.cancel(...) ==== get Returns information about a task. -https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] +{ref}/tasks.html[Endpoint documentation] [source,ts] ---- client.tasks.get(...) @@ -3777,7 +3937,7 @@ client.tasks.get(...) ==== list Returns a list of tasks. -https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html[Endpoint documentation] +{ref}/tasks.html[Endpoint documentation] [source,ts] ---- client.tasks.list(...) @@ -3789,7 +3949,7 @@ client.tasks.list(...) ==== find_structure Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. -https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html[Endpoint documentation] +{ref}/find-structure.html[Endpoint documentation] [source,ts] ---- client.textStructure.findStructure(...) @@ -3801,7 +3961,7 @@ client.textStructure.findStructure(...) ==== delete_transform Deletes an existing transform. -https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html[Endpoint documentation] +{ref}/delete-transform.html[Endpoint documentation] [source,ts] ---- client.transform.deleteTransform(...) @@ -3811,7 +3971,7 @@ client.transform.deleteTransform(...) ==== get_transform Retrieves configuration information for transforms. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html[Endpoint documentation] +{ref}/get-transform.html[Endpoint documentation] [source,ts] ---- client.transform.getTransform(...) @@ -3821,7 +3981,7 @@ client.transform.getTransform(...) ==== get_transform_stats Retrieves usage information for transforms. -https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html[Endpoint documentation] +{ref}/get-transform-stats.html[Endpoint documentation] [source,ts] ---- client.transform.getTransformStats(...) @@ -3831,7 +3991,7 @@ client.transform.getTransformStats(...) ==== preview_transform Previews a transform. -https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html[Endpoint documentation] +{ref}/preview-transform.html[Endpoint documentation] [source,ts] ---- client.transform.previewTransform(...) @@ -3841,7 +4001,7 @@ client.transform.previewTransform(...) ==== put_transform Instantiates a transform. -https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html[Endpoint documentation] +{ref}/put-transform.html[Endpoint documentation] [source,ts] ---- client.transform.putTransform(...) @@ -3851,17 +4011,27 @@ client.transform.putTransform(...) ==== reset_transform Resets an existing transform. -https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html[Endpoint documentation] +{ref}/reset-transform.html[Endpoint documentation] [source,ts] ---- client.transform.resetTransform(...) ---- +[discrete] +==== schedule_now_transform +Schedules now a transform. + +{ref}/schedule-now-transform.html[Endpoint documentation] +[source,ts] +---- +client.transform.scheduleNowTransform(...) +---- + [discrete] ==== start_transform Starts one or more transforms. -https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html[Endpoint documentation] +{ref}/start-transform.html[Endpoint documentation] [source,ts] ---- client.transform.startTransform(...) @@ -3871,7 +4041,7 @@ client.transform.startTransform(...) ==== stop_transform Stops one or more transforms. -https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html[Endpoint documentation] +{ref}/stop-transform.html[Endpoint documentation] [source,ts] ---- client.transform.stopTransform(...) @@ -3881,7 +4051,7 @@ client.transform.stopTransform(...) ==== update_transform Updates certain properties of a transform. -https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html[Endpoint documentation] +{ref}/update-transform.html[Endpoint documentation] [source,ts] ---- client.transform.updateTransform(...) @@ -3891,7 +4061,7 @@ client.transform.updateTransform(...) ==== upgrade_transforms Upgrades all transforms. -https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html[Endpoint documentation] +{ref}/upgrade-transforms.html[Endpoint documentation] [source,ts] ---- client.transform.upgradeTransforms(...) @@ -3903,7 +4073,7 @@ client.transform.upgradeTransforms(...) ==== ack_watch Acknowledges a watch, manually throttling the execution of the watch's actions. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html[Endpoint documentation] +{ref}/watcher-api-ack-watch.html[Endpoint documentation] [source,ts] ---- client.watcher.ackWatch(...) @@ -3913,7 +4083,7 @@ client.watcher.ackWatch(...) ==== activate_watch Activates a currently inactive watch. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html[Endpoint documentation] +{ref}/watcher-api-activate-watch.html[Endpoint documentation] [source,ts] ---- client.watcher.activateWatch(...) @@ -3923,7 +4093,7 @@ client.watcher.activateWatch(...) ==== deactivate_watch Deactivates a currently active watch. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html[Endpoint documentation] +{ref}/watcher-api-deactivate-watch.html[Endpoint documentation] [source,ts] ---- client.watcher.deactivateWatch(...) @@ -3933,7 +4103,7 @@ client.watcher.deactivateWatch(...) ==== delete_watch Removes a watch from Watcher. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html[Endpoint documentation] +{ref}/watcher-api-delete-watch.html[Endpoint documentation] [source,ts] ---- client.watcher.deleteWatch(...) @@ -3943,7 +4113,7 @@ client.watcher.deleteWatch(...) ==== execute_watch Forces the execution of a stored watch. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html[Endpoint documentation] +{ref}/watcher-api-execute-watch.html[Endpoint documentation] [source,ts] ---- client.watcher.executeWatch(...) @@ -3953,7 +4123,7 @@ client.watcher.executeWatch(...) ==== get_watch Retrieves a watch by its ID. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html[Endpoint documentation] +{ref}/watcher-api-get-watch.html[Endpoint documentation] [source,ts] ---- client.watcher.getWatch(...) @@ -3963,7 +4133,7 @@ client.watcher.getWatch(...) ==== put_watch Creates a new watch, or updates an existing one. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html[Endpoint documentation] +{ref}/watcher-api-put-watch.html[Endpoint documentation] [source,ts] ---- client.watcher.putWatch(...) @@ -3973,7 +4143,7 @@ client.watcher.putWatch(...) ==== query_watches Retrieves stored watches. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html[Endpoint documentation] +{ref}/watcher-api-query-watches.html[Endpoint documentation] [source,ts] ---- client.watcher.queryWatches(...) @@ -3983,7 +4153,7 @@ client.watcher.queryWatches(...) ==== start Starts Watcher if it is not already running. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html[Endpoint documentation] +{ref}/watcher-api-start.html[Endpoint documentation] [source,ts] ---- client.watcher.start(...) @@ -3993,7 +4163,7 @@ client.watcher.start(...) ==== stats Retrieves the current Watcher metrics. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html[Endpoint documentation] +{ref}/watcher-api-stats.html[Endpoint documentation] [source,ts] ---- client.watcher.stats(...) @@ -4003,7 +4173,7 @@ client.watcher.stats(...) ==== stop Stops Watcher if it is running. -https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html[Endpoint documentation] +{ref}/watcher-api-stop.html[Endpoint documentation] [source,ts] ---- client.watcher.stop(...) @@ -4015,7 +4185,7 @@ client.watcher.stop(...) ==== info Retrieves information about the installed X-Pack features. -https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html[Endpoint documentation] +{ref}/info-api.html[Endpoint documentation] [source,ts] ---- client.xpack.info(...) @@ -4025,7 +4195,7 @@ client.xpack.info(...) ==== usage Retrieves usage information about the installed X-Pack features. -https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html[Endpoint documentation] +{ref}/usage-api.html[Endpoint documentation] [source,ts] ---- client.xpack.usage(...) diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 1abdbdf8c..4cb08fe44 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -279,7 +279,7 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['template', 'aliases', 'mappings', 'settings', 'version', '_meta'] + const acceptedBody: string[] = ['template', 'version', '_meta', 'allow_auto_create'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 606357720..5678857a8 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -37,12 +37,12 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } -export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise -export default async function FieldCapsApi (this: That, params: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['index_filter', 'runtime_mappings'] + const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -53,6 +53,7 @@ export default async function FieldCapsApi (this: That, params: T.FieldCapsReque body = userBody != null ? { ...userBody } : undefined } + params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts new file mode 100644 index 000000000..78b97b03d --- /dev/null +++ b/src/api/api/health_report.ts @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptions): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['feature'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.feature != null) { + method = 'GET' + path = `/_health_report/${encodeURIComponent(params.feature.toString())}` + } else { + method = 'GET' + path = '/_health_report' + } + return await this.transport.request({ path, method, querystring, body }, options) +} diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index c6401e9bc..667b95ff3 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -330,6 +330,35 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'DELETE' + path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` + } else { + method = 'DELETE' + path = '/_data_stream/_lifecycle' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise @@ -540,6 +569,28 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_lifecycle/explain` + return await this.transport.request({ path, method, querystring, body }, options) + } + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise @@ -680,6 +731,35 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` + } else { + method = 'GET' + path = '/_data_stream/_lifecycle' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise @@ -1006,6 +1086,35 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'PUT' + path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` + } else { + method = 'PUT' + path = '/_data_stream/_lifecycle' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/license.ts b/src/api/api/license.ts index a4fbf34d6..26c4e59e7 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -135,10 +135,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } - async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise - async post (this: That, params: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['license', 'licenses'] const querystring: Record = {} @@ -151,6 +151,7 @@ export default class License { body = userBody != null ? { ...userBody } : undefined } + params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 9fc6d44bb..0a516c08c 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -82,8 +82,15 @@ export default class Logstash { } } - const method = 'GET' - const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_logstash/pipeline' + } return await this.transport.request({ path, method, querystring, body }, options) } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 78edd3629..75a62305e 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -43,19 +43,19 @@ export default class Ml { this.transport = transport } - async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async clearTrainedModelDeploymentCache (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -710,7 +710,7 @@ export default class Ml { path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories/${encodeURIComponent(params.category_id.toString())}` } else { method = body != null ? 'POST' : 'GET' - path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories/` + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories` } return await this.transport.request({ path, method, querystring, body }, options) } @@ -2006,7 +2006,7 @@ export default class Ml { async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] + const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -2074,7 +2074,7 @@ export default class Ml { async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -2137,6 +2137,28 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['model_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_update` + return await this.transport.request({ path, method, querystring, body }, options) + } + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 6cf8216f4..e75fcdfd7 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -90,7 +90,7 @@ export default class Rollup { path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` } else { method = 'GET' - path = '/_rollup/job/' + path = '/_rollup/job' } return await this.transport.request({ path, method, querystring, body }, options) } @@ -120,7 +120,7 @@ export default class Rollup { path = `/_rollup/data/${encodeURIComponent(params.id.toString())}` } else { method = 'GET' - path = '/_rollup/data/' + path = '/_rollup/data' } return await this.transport.request({ path, method, querystring, body }, options) } diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts new file mode 100644 index 000000000..98ea8bd60 --- /dev/null +++ b/src/api/api/search_application.ts @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class SearchApplication { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_application/analytics/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_application/analytics/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_application/analytics' + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_application/search_application' + return await this.transport.request({ path, method, querystring, body }, options) + } + + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['collection_name', 'event_type'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_application/analytics/${encodeURIComponent(params.collection_name.toString())}/event/${encodeURIComponent(params.event_type.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['search_application'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_application/analytics/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['params'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}/_search` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index b58059daa..16f4759f9 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -42,7 +42,7 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedBody: string[] = ['aggs', 'exact_bounds', 'extent', 'fields', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits'] + const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 8c9e634b5..d4ed0d109 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1155,7 +1155,7 @@ export default class Security { } const method = 'PUT' - const path = '/_security/privilege/' + const path = '/_security/privilege' return await this.transport.request({ path, method, querystring, body }, options) } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 580e0fc97..fa3fa0b03 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -290,7 +290,7 @@ export default class Snapshot { async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] + const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 27fce8f29..904c40d37 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -215,6 +215,28 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['transform_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_schedule_now` + return await this.transport.request({ path, method, querystring, body }, options) + } + async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/index.ts b/src/api/index.ts index c4f253984..a76458829 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -55,6 +55,7 @@ import getScriptContextApi from './api/get_script_context' import getScriptLanguagesApi from './api/get_script_languages' import getSourceApi from './api/get_source' import GraphApi from './api/graph' +import healthReportApi from './api/health_report' import IlmApi from './api/ilm' import indexApi from './api/index' import IndicesApi from './api/indices' @@ -82,6 +83,7 @@ import RollupApi from './api/rollup' import scriptsPainlessExecuteApi from './api/scripts_painless_execute' import scrollApi from './api/scroll' import searchApi from './api/search' +import SearchApplicationApi from './api/search_application' import searchMvtApi from './api/search_mvt' import searchShardsApi from './api/search_shards' import searchTemplateApi from './api/search_template' @@ -134,6 +136,7 @@ export default interface API { getScriptLanguages: typeof getScriptLanguagesApi getSource: typeof getSourceApi graph: GraphApi + healthReport: typeof healthReportApi ilm: IlmApi index: typeof indexApi indices: IndicesApi @@ -161,6 +164,7 @@ export default interface API { scriptsPainlessExecute: typeof scriptsPainlessExecuteApi scroll: typeof scrollApi search: typeof searchApi + searchApplication: SearchApplicationApi searchMvt: typeof searchMvtApi searchShards: typeof searchShardsApi searchTemplate: typeof searchTemplateApi @@ -204,6 +208,7 @@ const kMl = Symbol('Ml') const kMonitoring = Symbol('Monitoring') const kNodes = Symbol('Nodes') const kRollup = Symbol('Rollup') +const kSearchApplication = Symbol('SearchApplication') const kSearchableSnapshots = Symbol('SearchableSnapshots') const kSecurity = Symbol('Security') const kShutdown = Symbol('Shutdown') @@ -239,6 +244,7 @@ export default class API { [kMonitoring]: symbol | null [kNodes]: symbol | null [kRollup]: symbol | null + [kSearchApplication]: symbol | null [kSearchableSnapshots]: symbol | null [kSecurity]: symbol | null [kShutdown]: symbol | null @@ -273,6 +279,7 @@ export default class API { this[kMonitoring] = null this[kNodes] = null this[kRollup] = null + this[kSearchApplication] = null this[kSearchableSnapshots] = null this[kSecurity] = null this[kShutdown] = null @@ -306,6 +313,7 @@ API.prototype.getScript = getScriptApi API.prototype.getScriptContext = getScriptContextApi API.prototype.getScriptLanguages = getScriptLanguagesApi API.prototype.getSource = getSourceApi +API.prototype.healthReport = healthReportApi API.prototype.index = indexApi API.prototype.info = infoApi API.prototype.knnSearch = knnSearchApi @@ -396,6 +404,9 @@ Object.defineProperties(API.prototype, { rollup: { get () { return this[kRollup] === null ? (this[kRollup] = new RollupApi(this.transport)) : this[kRollup] } }, + searchApplication: { + get () { return this[kSearchApplication] === null ? (this[kSearchApplication] = new SearchApplicationApi(this.transport)) : this[kSearchApplication] } + }, searchableSnapshots: { get () { return this[kSearchableSnapshots] === null ? (this[kSearchableSnapshots] = new SearchableSnapshotsApi(this.transport)) : this[kSearchableSnapshots] } }, diff --git a/src/api/types.ts b/src/api/types.ts index c98388182..c5388ce4a 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -239,7 +239,7 @@ export interface DeleteByQueryResponse { } export interface DeleteByQueryRethrottleRequest extends RequestBase { - task_id: Id + task_id: TaskId requests_per_second?: float } @@ -343,11 +343,11 @@ export interface FieldCapsRequest extends RequestBase { index?: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards - fields: Fields ignore_unavailable?: boolean include_unmapped?: boolean filters?: string types?: string[] + fields?: Fields index_filter?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields } @@ -450,6 +450,150 @@ export interface GetSourceRequest extends RequestBase { export type GetSourceResponse = TDocument +export interface HealthReportBaseIndicator { + status: HealthReportIndicatorHealthStatus + symptom: string + impacts?: HealthReportImpact[] + diagnosis?: HealthReportDiagnosis[] +} + +export interface HealthReportDiagnosis { + id: string + action: string + affected_resources: HealthReportDiagnosisAffectedResources + cause: string + help_url: string +} + +export interface HealthReportDiagnosisAffectedResources { + indices?: Indices + nodes?: HealthReportIndicatorNode[] + slm_policies?: string[] + feature_states?: string[] + snapshot_repositories?: string[] +} + +export interface HealthReportDiskIndicator extends HealthReportBaseIndicator { + details?: HealthReportDiskIndicatorDetails +} + +export interface HealthReportDiskIndicatorDetails { + indices_with_readonly_block: long + nodes_with_enough_disk_space: long + nodes_over_high_watermark: long + nodes_over_flood_stage_watermark: long + nodes_with_unknown_disk_status: long +} + +export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { + details?: HealthReportIlmIndicatorDetails +} + +export interface HealthReportIlmIndicatorDetails { + ilm_status: LifecycleOperationMode + policies: long +} + +export interface HealthReportImpact { + description: string + id: string + impact_areas: HealthReportImpactArea[] + severity: integer +} + +export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' + +export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' + +export interface HealthReportIndicatorNode { + name: string | null + node_id: string | null +} + +export interface HealthReportIndicators { + master_is_stable?: HealthReportMasterIsStableIndicator + shards_availability?: HealthReportShardsAvailabilityIndicator + disk?: HealthReportDiskIndicator + repository_integrity?: HealthReportRepositoryIntegrityIndicator + ilm?: HealthReportIlmIndicator + slm?: HealthReportSlmIndicator +} + +export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { + details?: HealthReportMasterIsStableIndicatorDetails +} + +export interface HealthReportMasterIsStableIndicatorClusterFormationNode { + name?: string + node_id: string + cluster_formation_message: string +} + +export interface HealthReportMasterIsStableIndicatorDetails { + current_master: HealthReportIndicatorNode + recent_masters: HealthReportIndicatorNode[] + exception_fetching_history?: HealthReportMasterIsStableIndicatorExceptionFetchingHistory + cluster_formation?: HealthReportMasterIsStableIndicatorClusterFormationNode[] +} + +export interface HealthReportMasterIsStableIndicatorExceptionFetchingHistory { + message: string + stack_trace: string +} + +export interface HealthReportRepositoryIntegrityIndicator extends HealthReportBaseIndicator { + details?: HealthReportRepositoryIntegrityIndicatorDetails +} + +export interface HealthReportRepositoryIntegrityIndicatorDetails { + total_repositories?: long + corrupted_repositories?: long + corrupted?: string[] +} + +export interface HealthReportRequest extends RequestBase { + feature?: string | string[] + timeout?: Duration + verbose?: boolean + size?: integer +} + +export interface HealthReportResponse { + cluster_name: string + indicators: HealthReportIndicators +} + +export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { + details?: HealthReportShardsAvailabilityIndicatorDetails +} + +export interface HealthReportShardsAvailabilityIndicatorDetails { + creating_primaries: long + initializing_primaries: long + initializing_replicas: long + restarting_primaries: long + restarting_replicas: long + started_primaries: long + started_replicas: long + unassigned_primaries: long + unassigned_replicas: long +} + +export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { + details?: HealthReportSlmIndicatorDetails +} + +export interface HealthReportSlmIndicatorDetails { + slm_status: LifecycleOperationMode + policies: long + unhealthy_policies: HealthReportSlmIndicatorUnhealthyPolicies +} + +export interface HealthReportSlmIndicatorUnhealthyPolicies { + count: long + invocations_since_last_success?: Record +} + export interface IndexRequest extends RequestBase { id?: Id index: IndexName @@ -502,13 +646,11 @@ export interface KnnSearchResponse { export interface KnnSearchQuery { field: Field - query_vector: KnnSearchQueryVector + query_vector: QueryVector k: long num_candidates: long } -export type KnnSearchQueryVector = double[] - export interface MgetMultiGetError { error: ErrorCause _id: Id @@ -563,7 +705,7 @@ export interface MsearchMultisearchBody { ext?: Record stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery + knn?: KnnQuery | KnnQuery[] from?: integer highlight?: SearchHighlight indices_boost?: Record[] @@ -699,6 +841,9 @@ export interface OpenPointInTimeRequest extends RequestBase { index: Indices keep_alive: Duration ignore_unavailable?: boolean + preference?: string + routing?: Routing + expand_wildcards?: ExpandWildcards } export interface OpenPointInTimeResponse { @@ -792,7 +937,7 @@ export interface RankEvalRankEvalRequestItem { } export interface RankEvalRequest extends RequestBase { - index: Indices + index?: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -994,7 +1139,7 @@ export interface SearchRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery + knn?: KnnQuery | KnnQuery[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -1090,12 +1235,14 @@ export interface SearchAggregationProfileDebug { filters?: SearchAggregationProfileDelegateDebugFilter[] segments_counted?: integer segments_collected?: integer + map_reducer?: string } export interface SearchAggregationProfileDelegateDebugFilter { results_from_metadata?: integer query?: string specialized_for?: string + segments_counted_in_constant_time?: integer } export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' @@ -1135,8 +1282,7 @@ export interface SearchCompletionSuggestOption { export interface SearchCompletionSuggester extends SearchSuggesterBase { contexts?: Record fuzzy?: SearchSuggestFuzziness - prefix?: string - regex?: string + regex?: SearchRegexOptions skip_duplicates?: boolean } @@ -1166,6 +1312,8 @@ export interface SearchFetchProfile { } export interface SearchFetchProfileBreakdown { + load_source?: integer + load_source_count?: integer load_stored_fields?: integer load_stored_fields_count?: integer next_reader?: integer @@ -1227,6 +1375,7 @@ export interface SearchHighlightBase { export interface SearchHighlightField extends SearchHighlightBase { fragment_offset?: integer matched_fields?: Fields + analyzer?: AnalysisAnalyzer } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1328,8 +1477,9 @@ export interface SearchPhraseSuggestHighlight { export interface SearchPhraseSuggestOption { text: string - highlighted: string score: double + highlighted?: string + collate_match?: boolean } export interface SearchPhraseSuggester extends SearchSuggesterBase { @@ -1386,6 +1536,11 @@ export interface SearchQueryProfile { children?: SearchQueryProfile[] } +export interface SearchRegexOptions { + flags?: integer | string + max_determinized_states?: integer +} + export interface SearchRescore { query: SearchRescoreQuery window_size?: integer @@ -1472,8 +1627,10 @@ export interface SearchTermSuggest extends SearchSuggestBase { export interface SearchTermSuggestOption { text: string - freq: long score: double + freq: long + highlighted?: string + collate_match?: boolean } export interface SearchTermSuggester extends SearchSuggesterBase { @@ -1507,9 +1664,11 @@ export interface SearchMvtRequest extends RequestBase { x: SearchMvtCoordinate y: SearchMvtCoordinate aggs?: Record + buffer?: integer exact_bounds?: boolean extent?: integer fields?: Fields + grid_agg?: SearchMvtGridAggregationType grid_precision?: integer grid_type?: SearchMvtGridType query?: QueryDslQueryContainer @@ -1517,12 +1676,15 @@ export interface SearchMvtRequest extends RequestBase { size?: integer sort?: Sort track_total_hits?: SearchTrackHits + with_labels?: boolean } export type SearchMvtResponse = MapboxVectorTiles export type SearchMvtCoordinate = integer +export type SearchMvtGridAggregationType = 'geotile' | 'geohex' + export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer @@ -1920,7 +2082,7 @@ export interface FieldSort { export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' -export type FieldValue = long | double | string | boolean | any +export type FieldValue = long | double | string | boolean | null | any export interface FielddataStats { evictions?: long @@ -1962,6 +2124,8 @@ export interface GeoHashLocation { export type GeoHashPrecision = number | string +export type GeoHexCell = string + export interface GeoLine { type: string coordinates: double[][] @@ -2027,6 +2191,7 @@ export interface IndexingStats { index_total: long index_failed: long types?: Record + write_load?: double } export type Indices = IndexName | IndexName[] @@ -2063,7 +2228,8 @@ export type Ip = string export interface KnnQuery { field: Field - query_vector: double[] + query_vector?: QueryVector + query_vector_builder?: QueryVectorBuilder k: long num_candidates: long boost?: float @@ -2124,10 +2290,11 @@ export interface NestedSortValue { export interface NodeAttributes { attributes: Record ephemeral_id: Id - id?: Id + id?: NodeId name: NodeName transport_address: TransportAddress roles?: NodeRoles + external_id?: string } export type NodeId = string @@ -2150,6 +2317,7 @@ export interface NodeShard { recovery_source?: Record unassigned_info?: ClusterAllocationExplainUnassignedInformation relocating_node?: NodeId | null + relocation_failure_info?: RelocationFailureInfo } export interface NodeStatistics { @@ -2179,7 +2347,6 @@ export interface PluginStats { name: Name version: VersionString licensed: boolean - type: string } export type PropertyName = string @@ -2190,11 +2357,17 @@ export interface QueryCacheStats { evictions: integer hit_count: integer memory_size?: ByteSize - memory_size_in_bytes: integer + memory_size_in_bytes: long miss_count: integer total_count: integer } +export type QueryVector = float[] + +export interface QueryVectorBuilder { + text_embedding?: TextEmbedding +} + export interface RecoveryStats { current_as_source: long current_as_target: long @@ -2215,6 +2388,10 @@ export interface RefreshStats { export type RelationName = string +export interface RelocationFailureInfo { + failed_attempts: integer +} + export interface RequestBase extends SpecUtilsCommonQueryParameters { } @@ -2260,7 +2437,7 @@ export interface ScriptSort { nested?: NestedSortValue } -export type ScriptSortType = 'string' | 'number' +export type ScriptSortType = 'string' | 'number' | 'version' export interface ScriptTransform { lang?: string @@ -2304,28 +2481,28 @@ export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' export interface SegmentsStats { count: integer doc_values_memory?: ByteSize - doc_values_memory_in_bytes: integer + doc_values_memory_in_bytes: long file_sizes: Record fixed_bit_set?: ByteSize - fixed_bit_set_memory_in_bytes: integer + fixed_bit_set_memory_in_bytes: long index_writer_memory?: ByteSize - index_writer_max_memory_in_bytes?: integer - index_writer_memory_in_bytes: integer + index_writer_max_memory_in_bytes?: long + index_writer_memory_in_bytes: long max_unsafe_auto_id_timestamp: long memory?: ByteSize - memory_in_bytes: integer + memory_in_bytes: long norms_memory?: ByteSize - norms_memory_in_bytes: integer + norms_memory_in_bytes: long points_memory?: ByteSize - points_memory_in_bytes: integer + points_memory_in_bytes: long stored_memory?: ByteSize - stored_fields_memory_in_bytes: integer - terms_memory_in_bytes: integer + stored_fields_memory_in_bytes: long + terms_memory_in_bytes: long terms_memory?: ByteSize term_vectory_memory?: ByteSize - term_vectors_memory_in_bytes: integer + term_vectors_memory_in_bytes: long version_map_memory?: ByteSize - version_map_memory_in_bytes: integer + version_map_memory_in_bytes: long } export type SequenceNumber = long @@ -2354,7 +2531,7 @@ export interface ShardsOperationResponseBase { export interface SlicedScroll { field?: Field - id: integer + id: Id max: integer } @@ -2379,15 +2556,15 @@ export type SortOptions = SortOptionsKeys export type SortOrder = 'asc' | 'desc' -export type SortResults = (long | double | string | null)[] +export type SortResults = FieldValue[] export interface StoreStats { size?: ByteSize - size_in_bytes: integer + size_in_bytes: long reserved?: ByteSize - reserved_in_bytes: integer + reserved_in_bytes: long total_data_set_size?: ByteSize - total_data_set_size_in_bytes?: integer + total_data_set_size_in_bytes?: long } export interface StoredScript { @@ -2413,6 +2590,11 @@ export interface TaskFailure { export type TaskId = string | integer +export interface TextEmbedding { + model_id: string + model_text: string +} + export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' export type TimeOfDay = string @@ -2519,11 +2701,12 @@ export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBuck } export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { + key: string } export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys -& { [property: string]: AggregationsAggregate | long } +& { [property: string]: AggregationsAggregate | string | long } -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { meta?: Metadata @@ -2562,6 +2745,7 @@ export interface AggregationsAggregationContainer { diversified_sampler?: AggregationsDiversifiedSamplerAggregation extended_stats?: AggregationsExtendedStatsAggregation extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + frequent_item_sets?: AggregationsFrequentItemSetsAggregation filter?: QueryDslQueryContainer filters?: AggregationsFiltersAggregation geo_bounds?: AggregationsGeoBoundsAggregation @@ -2574,6 +2758,7 @@ export interface AggregationsAggregationContainer { global?: AggregationsGlobalAggregation histogram?: AggregationsHistogramAggregation ip_range?: AggregationsIpRangeAggregation + ip_prefix?: AggregationsIpPrefixAggregation inference?: AggregationsInferenceAggregation line?: AggregationsGeoLineAggregation matrix_stats?: AggregationsMatrixStatsAggregation @@ -2772,11 +2957,13 @@ export interface AggregationsChildrenAggregation extends AggregationsBucketAggre } export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { - after_key?: Record + after_key?: AggregationsCompositeAggregateKey } +export type AggregationsCompositeAggregateKey = Record + export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { - after?: Record + after?: AggregationsCompositeAggregateKey size?: integer sources?: Record[] } @@ -2789,10 +2976,10 @@ export interface AggregationsCompositeAggregationSource { } export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { - key: Record + key: AggregationsCompositeAggregateKey } export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys -& { [property: string]: AggregationsAggregate | Record | long } +& { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long } export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { value: long @@ -2903,6 +3090,8 @@ export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAgg variance_population: double | null variance_sampling: double | null std_deviation: double | null + std_deviation_population: double | null + std_deviation_sampling: double | null std_deviation_bounds?: AggregationsStandardDeviationBounds sum_of_squares_as_string?: string variance_as_string?: string @@ -2953,7 +3142,31 @@ export interface AggregationsFormattableMetricAggregation extends AggregationsMe format?: string } -export type AggregationsGapPolicy = 'skip' | 'insert_zeros' +export interface AggregationsFrequentItemSetsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsFrequentItemSetsAggregation { + fields: AggregationsFrequentItemSetsField[] + minimum_set_size?: integer + minimum_support?: double + size?: integer + filter?: QueryDslQueryContainer +} + +export interface AggregationsFrequentItemSetsBucketKeys extends AggregationsMultiBucketBase { + key: Record + support: double +} +export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys +& { [property: string]: AggregationsAggregate | Record | double | long } + +export interface AggregationsFrequentItemSetsField { + field: Field + exclude?: string | string[] + include?: string | string[] +} + +export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { bounds?: GeoBounds @@ -3001,9 +3214,19 @@ export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBuck export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys & { [property: string]: AggregationsAggregate | GeoHash | long } +export interface AggregationsGeoHexGridAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsGeoHexGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoHexCell +} +export type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoHexCell | long } + export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { type: string geometry: GeoLine + properties: any } export interface AggregationsGeoLineAggregation { @@ -3155,6 +3378,27 @@ export interface AggregationsInferenceTopClassEntry { class_score: double } +export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { + field: Field + prefix_length: integer + is_ipv6?: boolean + append_prefix_length?: boolean + keyed?: boolean + min_doc_count?: long +} + +export interface AggregationsIpPrefixBucketKeys extends AggregationsMultiBucketBase { + is_ipv6: boolean + key: string + prefix_length: integer + netmask?: string +} +export type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys +& { [property: string]: AggregationsAggregate | boolean | string | integer | long } + export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { } @@ -3170,6 +3414,7 @@ export interface AggregationsIpRangeAggregationRange { } export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { + key?: string from?: string to?: string } @@ -3210,7 +3455,7 @@ export interface AggregationsMatrixAggregation extends AggregationsAggregation { export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { doc_count: long - fields: AggregationsMatrixStatsFields[] + fields?: AggregationsMatrixStatsFields[] } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { @@ -3305,6 +3550,7 @@ export interface AggregationsMultiBucketBase { export interface AggregationsMultiTermLookup { field: Field + missing?: AggregationsMissing } export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { @@ -3322,12 +3568,12 @@ export interface AggregationsMultiTermsAggregation extends AggregationsBucketAgg } export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { - key: (long | double | string)[] + key: FieldValue[] key_as_string?: string doc_count_error_upper_bound?: long } export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys -& { [property: string]: AggregationsAggregate | (long | double | string)[] | string | long } +& { [property: string]: AggregationsAggregate | FieldValue[] | string | long } export interface AggregationsMutualInformationHeuristic { background_is_superset?: boolean @@ -3477,7 +3723,7 @@ export interface AggregationsSerialDifferencingAggregation extends AggregationsP lag?: integer } -export interface AggregationsSignificantLongTermsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsSignificantLongTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { @@ -3487,7 +3733,7 @@ export interface AggregationsSignificantLongTermsBucketKeys extends Aggregations export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string | double } -export interface AggregationsSignificantStringTermsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsSignificantStringTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { @@ -3496,6 +3742,11 @@ export interface AggregationsSignificantStringTermsBucketKeys extends Aggregatio export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys & { [property: string]: AggregationsAggregate | string | double | long } +export interface AggregationsSignificantTermsAggregateBase extends AggregationsMultiBucketAggregateBase { + bg_count?: long + doc_count?: long +} + export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer chi_square?: AggregationsChiSquareHeuristic @@ -3624,10 +3875,10 @@ export interface AggregationsStringTermsAggregate extends AggregationsTermsAggre } export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { - key: string + key: FieldValue } export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | long } +& { [property: string]: AggregationsAggregate | FieldValue | long } export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { } @@ -3663,7 +3914,7 @@ export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedasti export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { doc_count_error_upper_bound?: long - sum_other_doc_count: long + sum_other_doc_count?: long } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { @@ -3754,7 +4005,7 @@ export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSi export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } -export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { @@ -4103,7 +4354,7 @@ export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { type: 'limit' consume_all_tokens?: boolean - max_token_count?: integer + max_token_count?: SpecUtilsStringified } export interface AnalysisLowercaseNormalizer { @@ -4300,7 +4551,8 @@ export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterB export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer' - language: string + language?: string + name?: string } export interface AnalysisStopAnalyzer { @@ -4435,6 +4687,7 @@ export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBas type: 'aggregate_metric_double' default_metric: string metrics: string[] + time_series_metric?: MappingTimeSeriesMetricType } export interface MappingAllField { @@ -4462,7 +4715,7 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { type: 'boolean' } -export interface MappingByteNumberProperty extends MappingStandardNumberProperty { +export interface MappingByteNumberProperty extends MappingNumberPropertyBase { type: 'byte' null_value?: byte } @@ -4537,7 +4790,7 @@ export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } -export interface MappingDoubleNumberProperty extends MappingStandardNumberProperty { +export interface MappingDoubleNumberProperty extends MappingNumberPropertyBase { type: 'double' null_value?: double } @@ -4613,7 +4866,7 @@ export interface MappingFlattenedProperty extends MappingPropertyBase { type: 'flattened' } -export interface MappingFloatNumberProperty extends MappingStandardNumberProperty { +export interface MappingFloatNumberProperty extends MappingNumberPropertyBase { type: 'float' null_value?: float } @@ -4642,7 +4895,7 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { export type MappingGeoStrategy = 'recursive' | 'term' -export interface MappingHalfFloatNumberProperty extends MappingStandardNumberProperty { +export interface MappingHalfFloatNumberProperty extends MappingNumberPropertyBase { type: 'half_float' null_value?: float } @@ -4658,7 +4911,7 @@ export interface MappingIndexField { export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' -export interface MappingIntegerNumberProperty extends MappingStandardNumberProperty { +export interface MappingIntegerNumberProperty extends MappingNumberPropertyBase { type: 'integer' null_value?: integer } @@ -4670,8 +4923,11 @@ export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { export interface MappingIpProperty extends MappingDocValuesPropertyBase { boost?: double index?: boolean - null_value?: string ignore_malformed?: boolean + null_value?: string + on_script_error?: MappingOnScriptError + script?: Script + time_series_dimension?: boolean type: 'ip' } @@ -4681,6 +4937,7 @@ export interface MappingIpRangeProperty extends MappingRangePropertyBase { export interface MappingJoinProperty extends MappingPropertyBase { relations?: Record + eager_global_ordinals?: boolean type: 'join' } @@ -4697,7 +4954,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { type: 'keyword' } -export interface MappingLongNumberProperty extends MappingStandardNumberProperty { +export interface MappingLongNumberProperty extends MappingNumberPropertyBase { type: 'long' null_value?: long } @@ -4727,9 +4984,14 @@ export interface MappingNestedProperty extends MappingCorePropertyBase { } export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { - index?: boolean + boost?: double + coerce?: boolean ignore_malformed?: boolean + index?: boolean + on_script_error?: MappingOnScriptError + script?: Script time_series_metric?: MappingTimeSeriesMetricType + time_series_dimension?: boolean } export interface MappingObjectProperty extends MappingCorePropertyBase { @@ -4753,7 +5015,6 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty export interface MappingPropertyBase { - local_metadata?: Metadata meta?: Record properties?: Record ignore_above?: integer @@ -4781,18 +5042,26 @@ export interface MappingRoutingField { } export interface MappingRuntimeField { + fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] format?: string + input_field?: Field + target_field?: Field + target_index?: IndexName script?: Script type: MappingRuntimeFieldType } -export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' +export interface MappingRuntimeFieldFetchFields { + field: Field + format?: string +} + +export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' export type MappingRuntimeFields = Record export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { type: 'scaled_float' - coerce?: boolean null_value?: double scaling_factor?: double } @@ -4817,7 +5086,7 @@ export interface MappingShapeProperty extends MappingDocValuesPropertyBase { type: 'shape' } -export interface MappingShortNumberProperty extends MappingStandardNumberProperty { +export interface MappingShortNumberProperty extends MappingNumberPropertyBase { type: 'short' null_value?: short } @@ -4832,13 +5101,10 @@ export interface MappingSourceField { enabled?: boolean excludes?: string[] includes?: string[] + mode?: MappingSourceFieldMode } -export interface MappingStandardNumberProperty extends MappingNumberPropertyBase { - coerce?: boolean - script?: Script - on_script_error?: MappingOnScriptError -} +export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' export interface MappingSuggestContext { name: Name @@ -4888,7 +5154,7 @@ export interface MappingTypeMapping { date_detection?: boolean dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] - dynamic_templates?: Record | Record[] + dynamic_templates?: Record[] _field_names?: MappingFieldNamesField index_field?: MappingIndexField _meta?: Metadata @@ -5762,7 +6028,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery + knn?: KnnQuery | KnnQuery[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -5862,7 +6128,7 @@ export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } -export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' +export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] @@ -6345,6 +6611,7 @@ export interface CatIndicesRequest extends CatCatRequestBase { health?: HealthStatus include_unloaded_segments?: boolean pri?: boolean + time?: TimeUnit } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -8051,6 +8318,26 @@ export interface ClusterGetSettingsResponse { defaults?: Record } +export interface ClusterHealthHealthResponseBody { + active_primary_shards: integer + active_shards: integer + active_shards_percent_as_number: Percentage + cluster_name: Name + delayed_unassigned_shards: integer + indices?: Record + initializing_shards: integer + number_of_data_nodes: integer + number_of_in_flight_fetch: integer + number_of_nodes: integer + number_of_pending_tasks: integer + relocating_shards: integer + status: HealthStatus + task_max_waiting_in_queue?: Duration + task_max_waiting_in_queue_millis: DurationValue + timed_out: boolean + unassigned_shards: integer +} + export interface ClusterHealthIndexHealthStats { active_primary_shards: integer active_shards: integer @@ -8078,25 +8365,7 @@ export interface ClusterHealthRequest extends RequestBase { wait_for_status?: HealthStatus } -export interface ClusterHealthResponse { - active_primary_shards: integer - active_shards: integer - active_shards_percent_as_number: Percentage - cluster_name: Name - delayed_unassigned_shards: integer - indices?: Record - initializing_shards: integer - number_of_data_nodes: integer - number_of_in_flight_fetch: integer - number_of_nodes: integer - number_of_pending_tasks: integer - relocating_shards: integer - status: HealthStatus - task_max_waiting_in_queue?: Duration - task_max_waiting_in_queue_millis: DurationValue - timed_out: boolean - unassigned_shards: integer -} +export type ClusterHealthResponse = ClusterHealthHealthResponseBody export interface ClusterHealthShardHealthStats { active_shards: integer @@ -8138,11 +8407,9 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { create?: boolean master_timeout?: Duration template: IndicesIndexState - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings version?: VersionNumber _meta?: Metadata + allow_auto_create?: boolean } export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase @@ -8258,7 +8525,7 @@ export interface ClusterRerouteRerouteParameters { export interface ClusterRerouteResponse { acknowledged: boolean explanations?: ClusterRerouteRerouteExplanation[] - state: any + state?: any } export interface ClusterStateRequest extends RequestBase { @@ -8441,12 +8708,19 @@ export interface ClusterStatsFieldTypes { name: Name count: integer index_count: integer + indexed_vector_count?: long + indexed_vector_dim_max?: long + indexed_vector_dim_min?: long script_count?: integer } export interface ClusterStatsFieldTypesMappings { field_types: ClusterStatsFieldTypes[] runtime_field_types?: ClusterStatsRuntimeFieldTypes[] + total_field_count?: integer + total_deduplicated_field_count?: integer + total_deduplicated_mapping_size?: ByteSize + total_deduplicated_mapping_size_in_bytes?: long } export interface ClusterStatsIndexingPressure { @@ -8558,22 +8832,19 @@ export interface DanglingIndicesListDanglingIndicesResponse { dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] } -export interface EnrichConfiguration { - geo_match?: EnrichPolicy - match: EnrichPolicy - range: EnrichPolicy -} - export interface EnrichPolicy { enrich_fields: Fields indices: Indices match_field: Field query?: string name?: Name + elasticsearch_version?: string } +export type EnrichPolicyType = 'geo_match' | 'match' | 'range' + export interface EnrichSummary { - config: EnrichConfiguration + config: Partial> } export interface EnrichDeletePolicyRequest extends RequestBase { @@ -8759,7 +9030,7 @@ export interface FleetGlobalCheckpointsResponse { } export interface FleetMsearchRequest extends RequestBase { - index: IndexName | IndexAlias + index?: IndexName | IndexAlias allow_no_indices?: boolean ccs_minimize_roundtrips?: boolean expand_wildcards?: ExpandWildcards @@ -8948,6 +9219,7 @@ export interface IlmPhase { export interface IlmPhases { cold?: IlmPhase delete?: IlmPhase + frozen?: IlmPhase hot?: IlmPhase warm?: IlmPhase } @@ -9159,6 +9431,10 @@ export interface IndicesDataStreamVisibility { hidden?: boolean } +export interface IndicesDownsampleConfig { + fixed_interval: DurationLarge +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -9212,7 +9488,7 @@ export interface IndicesIndexSettingBlocks { read_only_allow_delete?: boolean read?: boolean write?: boolean | string - metadata?: boolean + metadata?: SpecUtilsStringified } export interface IndicesIndexSettingsKeys { @@ -9226,7 +9502,7 @@ export interface IndicesIndexSettingsKeys { number_of_routing_shards?: integer check_on_startup?: IndicesIndexCheckOnStartup codec?: string - routing_partition_size?: integer + routing_partition_size?: SpecUtilsStringified load_fixed_bitset_filters_eagerly?: boolean hidden?: boolean | string auto_expand_replicas?: string @@ -9335,6 +9611,7 @@ export interface IndicesIndexTemplateSummary { export interface IndicesIndexVersioning { created?: VersionString + created_string?: string } export interface IndicesIndexingPressure { @@ -9346,6 +9623,7 @@ export interface IndicesIndexingPressureMemory { } export interface IndicesMappingLimitSettings { + coerce?: boolean total_fields?: IndicesMappingLimitSettingsTotalFields depth?: IndicesMappingLimitSettingsDepth nested_fields?: IndicesMappingLimitSettingsNestedFields @@ -9384,8 +9662,8 @@ export interface IndicesMerge { } export interface IndicesMergeScheduler { - max_thread_count?: integer - max_merge_count?: integer + max_thread_count?: SpecUtilsStringified + max_merge_count?: SpecUtilsStringified } export interface IndicesNumericFielddata { @@ -9413,7 +9691,7 @@ export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' export interface IndicesSettingsAnalyze { - max_token_count?: integer + max_token_count?: SpecUtilsStringified } export interface IndicesSettingsHighlight { @@ -9421,7 +9699,7 @@ export interface IndicesSettingsHighlight { } export interface IndicesSettingsQueryString { - lenient: boolean + lenient: SpecUtilsStringified } export interface IndicesSettingsSearch { @@ -9440,7 +9718,7 @@ export interface IndicesSettingsSimilarity { } export interface IndicesSettingsSimilarityBm25 { - b: integer + b: double discount_overlaps: boolean k1: double type: 'BM25' @@ -9510,7 +9788,7 @@ export interface IndicesStorage { allow_mmap?: boolean } -export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs' +export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs'| string export interface IndicesTemplateMapping { aliases: Record @@ -9779,7 +10057,7 @@ export type IndicesDiskUsageResponse = any export interface IndicesDownsampleRequest extends RequestBase { index: IndexName target_index: IndexName - config?: any + config?: IndicesDownsampleConfig } export type IndicesDownsampleResponse = any @@ -9901,7 +10179,11 @@ export interface IndicesForcemergeRequest extends RequestBase { wait_for_completion?: boolean } -export type IndicesForcemergeResponse = ShardsOperationResponseBase +export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody + +export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { + task?: string +} export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' @@ -10333,13 +10615,21 @@ export interface IndicesRolloverResponse { } export interface IndicesRolloverRolloverConditions { + min_age?: Duration max_age?: Duration max_age_millis?: DurationValue + min_docs?: long max_docs?: long - max_size?: string - max_size_bytes?: ByteSize + max_size?: ByteSize + max_size_bytes?: long + min_size?: ByteSize + min_size_bytes?: long max_primary_shard_size?: ByteSize - max_primary_shard_size_bytes?: ByteSize + max_primary_shard_size_bytes?: long + min_primary_shard_size?: ByteSize + min_primary_shard_size_bytes?: long + max_primary_shard_docs?: long + min_primary_shard_docs?: long } export interface IndicesSegmentsIndexSegment { @@ -10365,7 +10655,6 @@ export interface IndicesSegmentsSegment { compound: boolean deleted_docs: long generation: integer - memory_in_bytes: double search: boolean size_in_bytes: double num_docs: long @@ -10401,16 +10690,13 @@ export interface IndicesShardStoresResponse { indices: Record } -export interface IndicesShardStoresShardStore { +export interface IndicesShardStoresShardStoreKeys { allocation: IndicesShardStoresShardStoreAllocation - allocation_id: Id - attributes: Record - id: Id - legacy_version: VersionNumber - name: Name - store_exception: IndicesShardStoresShardStoreException - transport_address: TransportAddress + allocation_id?: Id + store_exception?: IndicesShardStoresShardStoreException } +export type IndicesShardStoresShardStore = IndicesShardStoresShardStoreKeys +& { [property: string]: IndicesShardStoresShardStoreNode | IndicesShardStoresShardStoreAllocation | Id | IndicesShardStoresShardStoreException } export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused' @@ -10419,6 +10705,15 @@ export interface IndicesShardStoresShardStoreException { type: string } +export interface IndicesShardStoresShardStoreNode { + attributes: Record + ephemeral_id?: string + external_id?: string + name: Name + roles: string[] + transport_address: TransportAddress +} + export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all' export interface IndicesShardStoresShardStoreWrapper { @@ -10530,6 +10825,12 @@ export interface IndicesStatsIndicesStats { status?: IndicesStatsIndexMetadataState } +export interface IndicesStatsMappingStats { + total_count: long + total_estimated_overhead?: ByteSize + total_estimated_overhead_in_bytes: long +} + export interface IndicesStatsRequest extends RequestBase { metric?: Metrics index?: Indices @@ -10618,6 +10919,7 @@ export interface IndicesStatsShardStats { flush?: FlushStats get?: GetStats indexing?: IndexingStats + mappings?: IndicesStatsMappingStats merges?: MergesStats shard_path?: IndicesStatsShardPath query_cache?: IndicesStatsShardQueryCache @@ -10633,7 +10935,7 @@ export interface IndicesStatsShardStats { translog?: TranslogStats warmer?: WarmerStats bulk?: BulkStats - shards?: IndicesStatsShardsTotalStats + shards?: Record shard_stats?: IndicesStatsShardsTotalStats indices?: IndicesStatsIndicesStats } @@ -10755,39 +11057,38 @@ export interface IngestBytesProcessor extends IngestProcessorBase { export interface IngestCircleProcessor extends IngestProcessorBase { error_distance: double field: Field - ignore_missing: boolean + ignore_missing?: boolean shape_type: IngestShapeType - target_field: Field + target_field?: Field } export interface IngestConvertProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean - target_field: Field + target_field?: Field type: IngestConvertType } export type IngestConvertType = 'integer' | 'long' | 'float' | 'double' | 'string' | 'boolean' | 'auto' export interface IngestCsvProcessor extends IngestProcessorBase { - empty_value: any - description?: string + empty_value?: any field: Field ignore_missing?: boolean quote?: string separator?: string target_fields: Fields - trim: boolean + trim?: boolean } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { date_formats: string[] date_rounding: string field: Field - index_name_format: string - index_name_prefix: string - locale: string - timezone: string + index_name_format?: string + index_name_prefix?: string + locale?: string + timezone?: string } export interface IngestDateProcessor extends IngestProcessorBase { @@ -10799,9 +11100,9 @@ export interface IngestDateProcessor extends IngestProcessorBase { } export interface IngestDissectProcessor extends IngestProcessorBase { - append_separator: string + append_separator?: string field: Field - ignore_missing: boolean + ignore_missing?: boolean pattern: string } @@ -10834,18 +11135,18 @@ export interface IngestForeachProcessor extends IngestProcessorBase { } export interface IngestGeoIpProcessor extends IngestProcessorBase { - database_file: string + database_file?: string field: Field - first_only: boolean - ignore_missing: boolean - properties: string[] - target_field: Field + first_only?: boolean + ignore_missing?: boolean + properties?: string[] + target_field?: Field } export interface IngestGrokProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean - pattern_definitions: Record + pattern_definitions?: Record patterns: string[] trace_match?: boolean } @@ -10878,7 +11179,7 @@ export interface IngestInferenceConfigRegression { export interface IngestInferenceProcessor extends IngestProcessorBase { model_id: Id - target_field: Field + target_field?: Field field_map?: Record inference_config?: IngestInferenceConfig } @@ -10890,11 +11191,15 @@ export interface IngestJoinProcessor extends IngestProcessorBase { } export interface IngestJsonProcessor extends IngestProcessorBase { - add_to_root: boolean + add_to_root?: boolean + add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy + allow_duplicate_keys?: boolean field: Field - target_field: Field + target_field?: Field } +export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' + export interface IngestKeyValueProcessor extends IngestProcessorBase { exclude_keys?: string[] field: Field @@ -10930,9 +11235,11 @@ export interface IngestPipelineConfig { export interface IngestPipelineProcessor extends IngestProcessorBase { name: Name + ignore_missing_pipeline?: boolean } export interface IngestProcessorBase { + description?: string if?: string ignore_failure?: boolean on_failure?: IngestProcessorContainer[] @@ -10988,9 +11295,12 @@ export interface IngestRenameProcessor extends IngestProcessorBase { } export interface IngestSetProcessor extends IngestProcessorBase { + copy_from?: Field field: Field + ignore_empty_value?: boolean + media_type?: string override?: boolean - value: any + value?: any } export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { @@ -11002,8 +11312,8 @@ export type IngestShapeType = 'geo_shape' | 'shape' export interface IngestSortProcessor extends IngestProcessorBase { field: Field - order: SortOrder - target_field: Field + order?: SortOrder + target_field?: Field } export interface IngestSplitProcessor extends IngestProcessorBase { @@ -11034,10 +11344,10 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase { export interface IngestUserAgentProcessor extends IngestProcessorBase { field: Field - ignore_missing: boolean - options: IngestUserAgentProperty[] - regex_file: string - target_field: Field + ignore_missing?: boolean + options?: IngestUserAgentProperty[] + regex_file?: string + target_field?: Field } export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' @@ -11110,14 +11420,17 @@ export interface IngestSimulateDocument { _source: any } -export interface IngestSimulateDocumentSimulation { +export interface IngestSimulateDocumentSimulationKeys { _id: Id _index: IndexName _ingest: IngestSimulateIngest - _parent?: string _routing?: string _source: Record + _version?: SpecUtilsStringified + _version_type?: VersionType } +export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } export interface IngestSimulateIngest { timestamp: DateTime @@ -11211,7 +11524,7 @@ export interface LicensePostAcknowledgement { export interface LicensePostRequest extends RequestBase { acknowledge?: boolean license?: LicenseLicense - licenses: LicenseLicense[] + licenses?: LicenseLicense[] } export interface LicensePostResponse { @@ -11343,7 +11656,7 @@ export interface MigrationPostFeatureUpgradeResponse { } export interface MlAnalysisConfig { - bucket_span: Duration + bucket_span?: Duration categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] @@ -11381,6 +11694,7 @@ export interface MlAnalysisMemoryLimit { export interface MlAnomaly { actual?: double[] + anomaly_score_explanation?: MlAnomalyExplanation bucket_span: DurationValue by_field_name?: string by_field_value?: string @@ -11389,6 +11703,7 @@ export interface MlAnomaly { field_name?: string function?: string function_description?: string + geo_results?: MlGeoResults influencers?: MlInfluence[] initial_record_score: double is_interim: boolean @@ -11421,6 +11736,19 @@ export interface MlAnomalyCause { typical: double[] } +export interface MlAnomalyExplanation { + anomaly_characteristics_impact?: integer + anomaly_length?: integer + anomaly_type?: string + high_variance_penalty?: boolean + incomplete_bucket_penalty?: boolean + lower_confidence_bound?: double + multi_bucket_impact?: integer + single_bucket_impact?: integer + typical_value?: double + upper_confidence_bound?: double +} + export interface MlApiKeyAuthorization { id: string name: string @@ -11572,12 +11900,12 @@ export interface MlDatafeedConfig { datafeed_id?: Id delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration + indices?: string[] indexes?: string[] - indices: string[] indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer - query: QueryDslQueryContainer + query?: QueryDslQueryContainer query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record @@ -11883,7 +12211,7 @@ export interface MlDetector { detector_index?: integer exclude_frequent?: MlExcludeFrequent field_name?: Field - function: string + function?: string over_field_name?: Field partition_field_name?: Field use_null?: boolean @@ -11937,6 +12265,11 @@ export interface MlFilterRef { export type MlFilterType = 'include' | 'exclude' +export interface MlGeoResults { + actual_point: string + typical_point: string +} + export interface MlHyperparameter { absolute_importance?: double name: Name @@ -11962,7 +12295,7 @@ export interface MlHyperparameters { soft_tree_depth_tolerance?: double } -export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' +export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' export interface MlInferenceConfigCreateContainer { regression?: MlRegressionInferenceOptions @@ -11973,6 +12306,7 @@ export interface MlInferenceConfigCreateContainer { ner?: MlNerInferenceOptions pass_through?: MlPassThroughInferenceOptions text_embedding?: MlTextEmbeddingInferenceOptions + text_expansion?: MlTextExpansionInferenceOptions question_answering?: MlQuestionAnsweringInferenceOptions } @@ -11985,6 +12319,7 @@ export interface MlInferenceConfigUpdateContainer { ner?: MlNerInferenceUpdateOptions pass_through?: MlPassThroughInferenceUpdateOptions text_embedding?: MlTextEmbeddingInferenceUpdateOptions + text_expansion?: MlTextExpansionInferenceUpdateOptions question_answering?: MlQuestionAnsweringInferenceUpdateOptions } @@ -12171,6 +12506,7 @@ export interface MlNerInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string classification_labels?: string[] + vocabulary?: MlVocabulary } export interface MlNerInferenceUpdateOptions { @@ -12231,6 +12567,7 @@ export interface MlPage { export interface MlPassThroughInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary?: MlVocabulary } export interface MlPassThroughInferenceUpdateOptions { @@ -12299,6 +12636,7 @@ export interface MlTextClassificationInferenceUpdateOptions { } export interface MlTextEmbeddingInferenceOptions { + embedding_size?: integer tokenization?: MlTokenizationConfigContainer results_field?: string } @@ -12308,6 +12646,16 @@ export interface MlTextEmbeddingInferenceUpdateOptions { results_field?: string } +export interface MlTextExpansionInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlTextExpansionInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlTimingStats { elapsed_time: DurationValue iteration_time?: DurationValue @@ -12346,6 +12694,7 @@ export interface MlTotalFeatureImportanceStatistics { export interface MlTrainedModelAssignment { assignment_state: MlDeploymentAssignmentState + max_assigned_allocations?: integer routing_table: Record start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters @@ -12361,8 +12710,10 @@ export interface MlTrainedModelAssignmentRoutingTable { export interface MlTrainedModelAssignmentTaskParameters { model_bytes: integer model_id: Id + deployment_id: Id cache_size: ByteSize number_of_allocations: integer + priority: MlTrainingPriority queue_capacity: integer threads_per_allocation: integer } @@ -12379,7 +12730,8 @@ export interface MlTrainedModelConfig { description?: string estimated_heap_memory_usage_bytes?: integer estimated_operations?: integer - inference_config: MlInferenceConfigCreateContainer + fully_defined?: boolean + inference_config?: MlInferenceConfigCreateContainer input: MlTrainedModelConfigInput license_level?: string metadata?: MlTrainedModelConfigMetadata @@ -12421,6 +12773,8 @@ export interface MlTrainedModelDeploymentNodesStats { export interface MlTrainedModelDeploymentStats { allocation_status: MlTrainedModelDeploymentAllocationStatus + cache_size?: ByteSize + deployment_id: Id error_count: integer inference_count: integer model_id: Id @@ -12486,6 +12840,8 @@ export interface MlTrainedModelStats { export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' +export type MlTrainingPriority = 'normal' | 'low' + export interface MlTransformAuthorization { api_key?: MlApiKeyAuthorization roles?: string[] @@ -12497,6 +12853,10 @@ export interface MlValidationLoss { loss_type: string } +export interface MlVocabulary { + index: IndexName +} + export interface MlZeroShotClassificationInferenceOptions { tokenization?: MlTokenizationConfigContainer hypothesis_template?: string @@ -12513,6 +12873,14 @@ export interface MlZeroShotClassificationInferenceUpdateOptions { labels: string[] } +export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { + model_id: Id +} + +export interface MlClearTrainedModelDeploymentCacheResponse { + cleared: boolean +} + export interface MlCloseJobRequest extends RequestBase { job_id: Id allow_no_match?: boolean @@ -12591,6 +12959,7 @@ export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { job_id: Id force?: boolean + delete_user_annotations?: boolean wait_for_completion?: boolean } @@ -13123,6 +13492,7 @@ export interface MlOpenJobRequest extends RequestBase { export interface MlOpenJobResponse { opened: boolean + node: NodeId } export interface MlPostCalendarEventsRequest extends RequestBase { @@ -13178,13 +13548,13 @@ export interface MlPreviewDataFrameAnalyticsResponse { export interface MlPreviewDatafeedRequest extends RequestBase { datafeed_id?: Id + start?: DateTime + end?: DateTime datafeed_config?: MlDatafeedConfig job_config?: MlJobConfig } -export interface MlPreviewDatafeedResponse { - data: TDocument[] -} +export type MlPreviewDatafeedResponse = TDocument[] export interface MlPutCalendarRequest extends RequestBase { calendar_id: Id @@ -13262,16 +13632,16 @@ export interface MlPutDatafeedRequest extends RequestBase { } export interface MlPutDatafeedResponse { - aggregations: Record + aggregations?: Record authorization?: MlDatafeedAuthorization chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Duration + frequency?: Duration indices: string[] job_id: Id indices_options?: IndicesOptions - max_empty_searches: integer + max_empty_searches?: integer query: QueryDslQueryContainer query_delay: Duration runtime_mappings?: MappingRuntimeFields @@ -13381,7 +13751,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { definition?: MlPutTrainedModelDefinition description?: string inference_config: MlInferenceConfigCreateContainer - input: MlPutTrainedModelInput + input?: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType model_size_bytes?: long @@ -13455,6 +13825,7 @@ export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { job_id: Id wait_for_completion?: boolean + delete_user_annotations?: boolean } export type MlResetJobResponse = AcknowledgedResponseBase @@ -13502,6 +13873,7 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id cache_size?: ByteSize number_of_allocations?: integer + priority?: MlTrainingPriority queue_capacity?: integer threads_per_allocation?: integer timeout?: Duration @@ -13580,6 +13952,7 @@ export interface MlUpdateDatafeedRequest extends RequestBase { indices?: string[] indexes?: string[] indices_options?: IndicesOptions + job_id?: Id max_empty_searches?: integer query?: QueryDslQueryContainer query_delay?: Duration @@ -13590,15 +13963,15 @@ export interface MlUpdateDatafeedRequest extends RequestBase { export interface MlUpdateDatafeedResponse { authorization?: MlDatafeedAuthorization - aggregations: Record + aggregations?: Record chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Duration + frequency?: Duration indices: string[] indices_options?: IndicesOptions job_id: Id - max_empty_searches: integer + max_empty_searches?: integer query: QueryDslQueryContainer query_delay: Duration runtime_mappings?: MappingRuntimeFields @@ -13628,6 +14001,7 @@ export interface MlUpdateJobRequest extends RequestBase { categorization_filters?: string[] description?: string model_plot_config?: MlModelPlotConfig + model_prune_window?: Duration daily_model_snapshot_retention_after_days?: long model_snapshot_retention_days?: long renormalization_window_days?: long @@ -13722,9 +14096,9 @@ export interface MonitoringBulkResponse { export interface NodesAdaptiveSelection { avg_queue_size?: long - avg_response_time?: long + avg_response_time?: Duration avg_response_time_ns?: long - avg_service_time?: string + avg_service_time?: Duration avg_service_time_ns?: long outgoing_searches?: long rank?: string @@ -13898,6 +14272,7 @@ export interface NodesIndexingPressure { } export interface NodesIndexingPressureMemory { + limit?: ByteSize limit_in_bytes?: long current?: NodesPressureMemory total?: NodesPressureMemory @@ -14015,11 +14390,16 @@ export interface NodesPool { } export interface NodesPressureMemory { + all?: ByteSize + all_in_bytes?: long + combined_coordinating_and_primary?: ByteSize combined_coordinating_and_primary_in_bytes?: long + coordinating?: ByteSize coordinating_in_bytes?: long + primary?: ByteSize primary_in_bytes?: long + replica?: ByteSize replica_in_bytes?: long - all_in_bytes?: long coordinating_rejections?: long primary_rejections?: long replica_rejections?: long @@ -14095,6 +14475,7 @@ export interface NodesScriptCache { export interface NodesScripting { cache_evictions?: long compilations?: long + compilations_history?: Record compilation_limit_triggered?: long contexts?: NodesContext[] } @@ -14516,8 +14897,8 @@ export interface NodesInfoNodeJvmInfo { vm_name: Name vm_vendor: string vm_version: VersionString - bundled_jdk: boolean using_bundled_jdk: boolean + bundled_jdk: boolean using_compressed_ordinary_object_pointers?: boolean | string input_arguments: string[] } @@ -14721,12 +15102,18 @@ export interface RollupGetRollupCapsRollupCapabilities { } export interface RollupGetRollupCapsRollupCapabilitySummary { - fields: Record> + fields: Record index_pattern: string job_id: string rollup_index: string } +export interface RollupGetRollupCapsRollupFieldSummary { + agg: string + calendar_interval?: Duration + time_zone?: TimeZone +} + export interface RollupGetRollupIndexCapsIndexCapabilities { rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[] } @@ -14801,6 +15188,65 @@ export interface RollupStopJobResponse { stopped: boolean } +export interface SearchApplicationSearchApplication { + name: Name + indices: IndexName[] + updated_at_millis: EpochTime + analytics_collection_name?: Name + template?: SearchApplicationSearchApplicationTemplate +} + +export interface SearchApplicationSearchApplicationTemplate { + script: InlineScript | string +} + +export interface SearchApplicationDeleteRequest extends RequestBase { + name: Name +} + +export type SearchApplicationDeleteResponse = AcknowledgedResponseBase + +export interface SearchApplicationGetRequest extends RequestBase { + name: Name +} + +export type SearchApplicationGetResponse = SearchApplicationSearchApplication + +export interface SearchApplicationListRequest extends RequestBase { + q?: string + from?: integer + size?: integer +} + +export interface SearchApplicationListResponse { + count: long + results: SearchApplicationListSearchApplicationListItem[] +} + +export interface SearchApplicationListSearchApplicationListItem { + name: Name + indices: IndexName[] + updated_at_millis: EpochTime + analytics_collection_name?: Name +} + +export interface SearchApplicationPutRequest extends RequestBase { + name: Name + create?: boolean + search_application?: SearchApplicationSearchApplication +} + +export interface SearchApplicationPutResponse { + result: Result +} + +export interface SearchApplicationSearchRequest extends RequestBase { + name: Name + params?: Record +} + +export type SearchApplicationSearchResponse> = SearchResponseBody + export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' export interface SearchableSnapshotsCacheStatsNode { @@ -14898,7 +15344,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client'| string export interface SecurityCreatedStatus { created: boolean @@ -14914,7 +15360,7 @@ export interface SecurityFieldRule { export interface SecurityFieldSecurity { except?: Fields - grant: Fields + grant?: Fields } export interface SecurityGlobalPrivilege { @@ -14923,10 +15369,10 @@ export interface SecurityGlobalPrivilege { export type SecurityGrantType = 'password' | 'access_token' -export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' +export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write'| string export interface SecurityIndicesPrivileges { - field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] + field_security?: SecurityFieldSecurity names: Indices privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery @@ -14985,17 +15431,19 @@ export interface SecurityRoleMappingRule { except?: SecurityRoleMappingRule } +export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer + export interface SecurityRoleTemplateInlineScript extends ScriptBase { lang?: ScriptLanguage options?: Record - source: string | QueryDslQueryContainer + source: SecurityRoleTemplateInlineQuery } export interface SecurityRoleTemplateQuery { template?: SecurityRoleTemplateScript } -export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | string | QueryDslQueryContainer | StoredScriptId +export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | SecurityRoleTemplateInlineQuery | StoredScriptId export interface SecurityTransientMetadataConfig { enabled: boolean @@ -15011,6 +15459,14 @@ export interface SecurityUser { profile_uid?: SecurityUserProfileId } +export interface SecurityUserIndicesPrivileges { + field_security?: SecurityFieldSecurity[] + names: Indices + privileges: SecurityIndexPrivilege[] + query?: SecurityIndicesPrivilegesQuery[] + allow_restricted_indices: boolean +} + export interface SecurityUserProfile { uid: SecurityUserProfileId user: SecurityUserProfileUser @@ -15417,7 +15873,7 @@ export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[] cluster: string[] global: SecurityGlobalPrivilege[] - indices: SecurityIndicesPrivileges[] + indices: SecurityUserIndicesPrivileges[] run_as: string[] } @@ -15791,7 +16247,7 @@ export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SlmConfiguration { ignore_unavailable?: boolean - indices: Indices + indices?: Indices include_global_state?: boolean feature_states?: string[] metadata?: Metadata @@ -15811,10 +16267,10 @@ export interface SlmInvocation { } export interface SlmPolicy { - config: SlmConfiguration + config?: SlmConfiguration name: Name repository: string - retention: SlmRetention + retention?: SlmRetention schedule: WatcherCronExpression } @@ -16161,6 +16617,7 @@ export interface SnapshotRestoreRequest extends RequestBase { snapshot: Name master_timeout?: Duration wait_for_completion?: boolean + feature_states?: string[] ignore_index_settings?: string[] ignore_unavailable?: boolean include_aliases?: boolean @@ -16308,6 +16765,7 @@ export interface SslCertificatesCertificateInformation { expiry: DateTime format: string has_private_key: boolean + issuer?: string path: string serial_number: string subject_dn: string @@ -16513,6 +16971,7 @@ export interface TransformSettings { deduce_mappings?: boolean docs_per_second?: float max_page_search_size?: integer + unattended?: boolean } export interface TransformSource { @@ -16591,6 +17050,7 @@ export interface TransformGetTransformStatsRequest extends RequestBase { allow_no_match?: boolean from?: long size?: long + timeout?: Duration } export interface TransformGetTransformStatsResponse { @@ -16628,6 +17088,7 @@ export interface TransformGetTransformStatsTransformProgress { export interface TransformGetTransformStatsTransformStats { checkpointing: TransformGetTransformStatsCheckpointing + health?: TransformGetTransformStatsTransformStatsHealth id: Id node?: NodeAttributes reason?: string @@ -16635,6 +17096,10 @@ export interface TransformGetTransformStatsTransformStats { stats: TransformGetTransformStatsTransformIndexerStats } +export interface TransformGetTransformStatsTransformStatsHealth { + status: HealthStatus +} + export interface TransformPreviewTransformRequest extends RequestBase { transform_id?: Id timeout?: Duration @@ -16679,9 +17144,17 @@ export interface TransformResetTransformRequest extends RequestBase { export type TransformResetTransformResponse = AcknowledgedResponseBase +export interface TransformScheduleNowTransformRequest extends RequestBase { + transform_id: Id + timeout?: Duration +} + +export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase + export interface TransformStartTransformRequest extends RequestBase { transform_id: Id timeout?: Duration + from?: string } export type TransformStartTransformResponse = AcknowledgedResponseBase @@ -17486,14 +17959,6 @@ export interface XpackInfoResponse { tagline: string } -export interface XpackUsageAllJobs { - count: integer - detectors: Record - created_by: Record - model_size: Record - forecasts: Record -} - export interface XpackUsageAnalytics extends XpackUsageBase { stats: XpackUsageAnalyticsStatistics } @@ -17620,6 +18085,10 @@ export interface XpackUsageFrozenIndices extends XpackUsageBase { indices_count: long } +export interface XpackUsageHealthStatistics extends XpackUsageBase { + invocations: XpackUsageInvocations +} + export interface XpackUsageIlm { policy_count: integer policy_stats: XpackUsageIlmPolicyStatistics[] @@ -17630,20 +18099,26 @@ export interface XpackUsageIlmPolicyStatistics { phases: IlmPhases } +export interface XpackUsageInvocations { + total: long +} + export interface XpackUsageIpFilter { http: boolean transport: boolean } -export interface XpackUsageJobsKeys { - _all?: XpackUsageAllJobs +export interface XpackUsageJobUsage { + count: integer + created_by: Record + detectors: MlJobStatistics + forecasts: XpackUsageMlJobForecasts + model_size: MlJobStatistics } -export type XpackUsageJobs = XpackUsageJobsKeys -& { [property: string]: MlJob | XpackUsageAllJobs } export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: XpackUsageJobs + jobs: Record node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -17716,9 +18191,16 @@ export interface XpackUsageMlInferenceTrainedModelsCount { total: long prepackaged: long other: long + pass_through?: long regression?: long classification?: long ner?: long + text_embedding?: long +} + +export interface XpackUsageMlJobForecasts { + total: long + forecasted_jobs: long } export interface XpackUsageMonitoring extends XpackUsageBase { @@ -17767,6 +18249,7 @@ export interface XpackUsageResponse { flattened?: XpackUsageFlattened frozen_indices: XpackUsageFrozenIndices graph: XpackUsageBase + health_api?: XpackUsageHealthStatistics ilm: XpackUsageIlm logstash: XpackUsageBase ml: XpackUsageMachineLearning diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 98c43c1d1..6c0a35200 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -254,7 +254,7 @@ export interface DeleteByQueryResponse { } export interface DeleteByQueryRethrottleRequest extends RequestBase { - task_id: Id + task_id: TaskId requests_per_second?: float } @@ -361,13 +361,13 @@ export interface FieldCapsRequest extends RequestBase { index?: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards - fields: Fields ignore_unavailable?: boolean include_unmapped?: boolean filters?: string types?: string[] /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + fields?: Fields index_filter?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields } @@ -471,6 +471,150 @@ export interface GetSourceRequest extends RequestBase { export type GetSourceResponse = TDocument +export interface HealthReportBaseIndicator { + status: HealthReportIndicatorHealthStatus + symptom: string + impacts?: HealthReportImpact[] + diagnosis?: HealthReportDiagnosis[] +} + +export interface HealthReportDiagnosis { + id: string + action: string + affected_resources: HealthReportDiagnosisAffectedResources + cause: string + help_url: string +} + +export interface HealthReportDiagnosisAffectedResources { + indices?: Indices + nodes?: HealthReportIndicatorNode[] + slm_policies?: string[] + feature_states?: string[] + snapshot_repositories?: string[] +} + +export interface HealthReportDiskIndicator extends HealthReportBaseIndicator { + details?: HealthReportDiskIndicatorDetails +} + +export interface HealthReportDiskIndicatorDetails { + indices_with_readonly_block: long + nodes_with_enough_disk_space: long + nodes_over_high_watermark: long + nodes_over_flood_stage_watermark: long + nodes_with_unknown_disk_status: long +} + +export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { + details?: HealthReportIlmIndicatorDetails +} + +export interface HealthReportIlmIndicatorDetails { + ilm_status: LifecycleOperationMode + policies: long +} + +export interface HealthReportImpact { + description: string + id: string + impact_areas: HealthReportImpactArea[] + severity: integer +} + +export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' + +export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' + +export interface HealthReportIndicatorNode { + name: string | null + node_id: string | null +} + +export interface HealthReportIndicators { + master_is_stable?: HealthReportMasterIsStableIndicator + shards_availability?: HealthReportShardsAvailabilityIndicator + disk?: HealthReportDiskIndicator + repository_integrity?: HealthReportRepositoryIntegrityIndicator + ilm?: HealthReportIlmIndicator + slm?: HealthReportSlmIndicator +} + +export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { + details?: HealthReportMasterIsStableIndicatorDetails +} + +export interface HealthReportMasterIsStableIndicatorClusterFormationNode { + name?: string + node_id: string + cluster_formation_message: string +} + +export interface HealthReportMasterIsStableIndicatorDetails { + current_master: HealthReportIndicatorNode + recent_masters: HealthReportIndicatorNode[] + exception_fetching_history?: HealthReportMasterIsStableIndicatorExceptionFetchingHistory + cluster_formation?: HealthReportMasterIsStableIndicatorClusterFormationNode[] +} + +export interface HealthReportMasterIsStableIndicatorExceptionFetchingHistory { + message: string + stack_trace: string +} + +export interface HealthReportRepositoryIntegrityIndicator extends HealthReportBaseIndicator { + details?: HealthReportRepositoryIntegrityIndicatorDetails +} + +export interface HealthReportRepositoryIntegrityIndicatorDetails { + total_repositories?: long + corrupted_repositories?: long + corrupted?: string[] +} + +export interface HealthReportRequest extends RequestBase { + feature?: string | string[] + timeout?: Duration + verbose?: boolean + size?: integer +} + +export interface HealthReportResponse { + cluster_name: string + indicators: HealthReportIndicators +} + +export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { + details?: HealthReportShardsAvailabilityIndicatorDetails +} + +export interface HealthReportShardsAvailabilityIndicatorDetails { + creating_primaries: long + initializing_primaries: long + initializing_replicas: long + restarting_primaries: long + restarting_replicas: long + started_primaries: long + started_replicas: long + unassigned_primaries: long + unassigned_replicas: long +} + +export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { + details?: HealthReportSlmIndicatorDetails +} + +export interface HealthReportSlmIndicatorDetails { + slm_status: LifecycleOperationMode + policies: long + unhealthy_policies: HealthReportSlmIndicatorUnhealthyPolicies +} + +export interface HealthReportSlmIndicatorUnhealthyPolicies { + count: long + invocations_since_last_success?: Record +} + export interface IndexRequest extends RequestBase { id?: Id index: IndexName @@ -527,13 +671,11 @@ export interface KnnSearchResponse { export interface KnnSearchQuery { field: Field - query_vector: KnnSearchQueryVector + query_vector: QueryVector k: long num_candidates: long } -export type KnnSearchQueryVector = double[] - export interface MgetMultiGetError { error: ErrorCause _id: Id @@ -591,7 +733,7 @@ export interface MsearchMultisearchBody { ext?: Record stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery + knn?: KnnQuery | KnnQuery[] from?: integer highlight?: SearchHighlight indices_boost?: Record[] @@ -732,6 +874,9 @@ export interface OpenPointInTimeRequest extends RequestBase { index: Indices keep_alive: Duration ignore_unavailable?: boolean + preference?: string + routing?: Routing + expand_wildcards?: ExpandWildcards } export interface OpenPointInTimeResponse { @@ -828,7 +973,7 @@ export interface RankEvalRankEvalRequestItem { } export interface RankEvalRequest extends RequestBase { - index: Indices + index?: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean @@ -1048,7 +1193,7 @@ export interface SearchRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery + knn?: KnnQuery | KnnQuery[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -1145,12 +1290,14 @@ export interface SearchAggregationProfileDebug { filters?: SearchAggregationProfileDelegateDebugFilter[] segments_counted?: integer segments_collected?: integer + map_reducer?: string } export interface SearchAggregationProfileDelegateDebugFilter { results_from_metadata?: integer query?: string specialized_for?: string + segments_counted_in_constant_time?: integer } export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' @@ -1190,8 +1337,7 @@ export interface SearchCompletionSuggestOption { export interface SearchCompletionSuggester extends SearchSuggesterBase { contexts?: Record fuzzy?: SearchSuggestFuzziness - prefix?: string - regex?: string + regex?: SearchRegexOptions skip_duplicates?: boolean } @@ -1221,6 +1367,8 @@ export interface SearchFetchProfile { } export interface SearchFetchProfileBreakdown { + load_source?: integer + load_source_count?: integer load_stored_fields?: integer load_stored_fields_count?: integer next_reader?: integer @@ -1282,6 +1430,7 @@ export interface SearchHighlightBase { export interface SearchHighlightField extends SearchHighlightBase { fragment_offset?: integer matched_fields?: Fields + analyzer?: AnalysisAnalyzer } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1383,8 +1532,9 @@ export interface SearchPhraseSuggestHighlight { export interface SearchPhraseSuggestOption { text: string - highlighted: string score: double + highlighted?: string + collate_match?: boolean } export interface SearchPhraseSuggester extends SearchSuggesterBase { @@ -1441,6 +1591,11 @@ export interface SearchQueryProfile { children?: SearchQueryProfile[] } +export interface SearchRegexOptions { + flags?: integer | string + max_determinized_states?: integer +} + export interface SearchRescore { query: SearchRescoreQuery window_size?: integer @@ -1527,8 +1682,10 @@ export interface SearchTermSuggest extends SearchSuggestBase { export interface SearchTermSuggestOption { text: string - freq: long score: double + freq: long + highlighted?: string + collate_match?: boolean } export interface SearchTermSuggester extends SearchSuggesterBase { @@ -1564,9 +1721,11 @@ export interface SearchMvtRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggs?: Record + buffer?: integer exact_bounds?: boolean extent?: integer fields?: Fields + grid_agg?: SearchMvtGridAggregationType grid_precision?: integer grid_type?: SearchMvtGridType query?: QueryDslQueryContainer @@ -1574,6 +1733,7 @@ export interface SearchMvtRequest extends RequestBase { size?: integer sort?: Sort track_total_hits?: SearchTrackHits + with_labels?: boolean } } @@ -1581,6 +1741,8 @@ export type SearchMvtResponse = MapboxVectorTiles export type SearchMvtCoordinate = integer +export type SearchMvtGridAggregationType = 'geotile' | 'geohex' + export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer @@ -1993,7 +2155,7 @@ export interface FieldSort { export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' -export type FieldValue = long | double | string | boolean | any +export type FieldValue = long | double | string | boolean | null | any export interface FielddataStats { evictions?: long @@ -2035,6 +2197,8 @@ export interface GeoHashLocation { export type GeoHashPrecision = number | string +export type GeoHexCell = string + export interface GeoLine { type: string coordinates: double[][] @@ -2100,6 +2264,7 @@ export interface IndexingStats { index_total: long index_failed: long types?: Record + write_load?: double } export type Indices = IndexName | IndexName[] @@ -2136,7 +2301,8 @@ export type Ip = string export interface KnnQuery { field: Field - query_vector: double[] + query_vector?: QueryVector + query_vector_builder?: QueryVectorBuilder k: long num_candidates: long boost?: float @@ -2197,10 +2363,11 @@ export interface NestedSortValue { export interface NodeAttributes { attributes: Record ephemeral_id: Id - id?: Id + id?: NodeId name: NodeName transport_address: TransportAddress roles?: NodeRoles + external_id?: string } export type NodeId = string @@ -2223,6 +2390,7 @@ export interface NodeShard { recovery_source?: Record unassigned_info?: ClusterAllocationExplainUnassignedInformation relocating_node?: NodeId | null + relocation_failure_info?: RelocationFailureInfo } export interface NodeStatistics { @@ -2252,7 +2420,6 @@ export interface PluginStats { name: Name version: VersionString licensed: boolean - type: string } export type PropertyName = string @@ -2263,11 +2430,17 @@ export interface QueryCacheStats { evictions: integer hit_count: integer memory_size?: ByteSize - memory_size_in_bytes: integer + memory_size_in_bytes: long miss_count: integer total_count: integer } +export type QueryVector = float[] + +export interface QueryVectorBuilder { + text_embedding?: TextEmbedding +} + export interface RecoveryStats { current_as_source: long current_as_target: long @@ -2288,6 +2461,10 @@ export interface RefreshStats { export type RelationName = string +export interface RelocationFailureInfo { + failed_attempts: integer +} + export interface RequestBase extends SpecUtilsCommonQueryParameters { } @@ -2333,7 +2510,7 @@ export interface ScriptSort { nested?: NestedSortValue } -export type ScriptSortType = 'string' | 'number' +export type ScriptSortType = 'string' | 'number' | 'version' export interface ScriptTransform { lang?: string @@ -2377,28 +2554,28 @@ export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' export interface SegmentsStats { count: integer doc_values_memory?: ByteSize - doc_values_memory_in_bytes: integer + doc_values_memory_in_bytes: long file_sizes: Record fixed_bit_set?: ByteSize - fixed_bit_set_memory_in_bytes: integer + fixed_bit_set_memory_in_bytes: long index_writer_memory?: ByteSize - index_writer_max_memory_in_bytes?: integer - index_writer_memory_in_bytes: integer + index_writer_max_memory_in_bytes?: long + index_writer_memory_in_bytes: long max_unsafe_auto_id_timestamp: long memory?: ByteSize - memory_in_bytes: integer + memory_in_bytes: long norms_memory?: ByteSize - norms_memory_in_bytes: integer + norms_memory_in_bytes: long points_memory?: ByteSize - points_memory_in_bytes: integer + points_memory_in_bytes: long stored_memory?: ByteSize - stored_fields_memory_in_bytes: integer - terms_memory_in_bytes: integer + stored_fields_memory_in_bytes: long + terms_memory_in_bytes: long terms_memory?: ByteSize term_vectory_memory?: ByteSize - term_vectors_memory_in_bytes: integer + term_vectors_memory_in_bytes: long version_map_memory?: ByteSize - version_map_memory_in_bytes: integer + version_map_memory_in_bytes: long } export type SequenceNumber = long @@ -2427,7 +2604,7 @@ export interface ShardsOperationResponseBase { export interface SlicedScroll { field?: Field - id: integer + id: Id max: integer } @@ -2452,15 +2629,15 @@ export type SortOptions = SortOptionsKeys export type SortOrder = 'asc' | 'desc' -export type SortResults = (long | double | string | null)[] +export type SortResults = FieldValue[] export interface StoreStats { size?: ByteSize - size_in_bytes: integer + size_in_bytes: long reserved?: ByteSize - reserved_in_bytes: integer + reserved_in_bytes: long total_data_set_size?: ByteSize - total_data_set_size_in_bytes?: integer + total_data_set_size_in_bytes?: long } export interface StoredScript { @@ -2486,6 +2663,11 @@ export interface TaskFailure { export type TaskId = string | integer +export interface TextEmbedding { + model_id: string + model_text: string +} + export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' export type TimeOfDay = string @@ -2592,11 +2774,12 @@ export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBuck } export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { + key: string } export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys -& { [property: string]: AggregationsAggregate | long } +& { [property: string]: AggregationsAggregate | string | long } -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { meta?: Metadata @@ -2635,6 +2818,7 @@ export interface AggregationsAggregationContainer { diversified_sampler?: AggregationsDiversifiedSamplerAggregation extended_stats?: AggregationsExtendedStatsAggregation extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + frequent_item_sets?: AggregationsFrequentItemSetsAggregation filter?: QueryDslQueryContainer filters?: AggregationsFiltersAggregation geo_bounds?: AggregationsGeoBoundsAggregation @@ -2647,6 +2831,7 @@ export interface AggregationsAggregationContainer { global?: AggregationsGlobalAggregation histogram?: AggregationsHistogramAggregation ip_range?: AggregationsIpRangeAggregation + ip_prefix?: AggregationsIpPrefixAggregation inference?: AggregationsInferenceAggregation line?: AggregationsGeoLineAggregation matrix_stats?: AggregationsMatrixStatsAggregation @@ -2845,11 +3030,13 @@ export interface AggregationsChildrenAggregation extends AggregationsBucketAggre } export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { - after_key?: Record + after_key?: AggregationsCompositeAggregateKey } +export type AggregationsCompositeAggregateKey = Record + export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { - after?: Record + after?: AggregationsCompositeAggregateKey size?: integer sources?: Record[] } @@ -2862,10 +3049,10 @@ export interface AggregationsCompositeAggregationSource { } export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { - key: Record + key: AggregationsCompositeAggregateKey } export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys -& { [property: string]: AggregationsAggregate | Record | long } +& { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long } export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { value: long @@ -2976,6 +3163,8 @@ export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAgg variance_population: double | null variance_sampling: double | null std_deviation: double | null + std_deviation_population: double | null + std_deviation_sampling: double | null std_deviation_bounds?: AggregationsStandardDeviationBounds sum_of_squares_as_string?: string variance_as_string?: string @@ -3026,7 +3215,31 @@ export interface AggregationsFormattableMetricAggregation extends AggregationsMe format?: string } -export type AggregationsGapPolicy = 'skip' | 'insert_zeros' +export interface AggregationsFrequentItemSetsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsFrequentItemSetsAggregation { + fields: AggregationsFrequentItemSetsField[] + minimum_set_size?: integer + minimum_support?: double + size?: integer + filter?: QueryDslQueryContainer +} + +export interface AggregationsFrequentItemSetsBucketKeys extends AggregationsMultiBucketBase { + key: Record + support: double +} +export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys +& { [property: string]: AggregationsAggregate | Record | double | long } + +export interface AggregationsFrequentItemSetsField { + field: Field + exclude?: string | string[] + include?: string | string[] +} + +export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { bounds?: GeoBounds @@ -3074,9 +3287,19 @@ export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBuck export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys & { [property: string]: AggregationsAggregate | GeoHash | long } +export interface AggregationsGeoHexGridAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsGeoHexGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoHexCell +} +export type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoHexCell | long } + export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { type: string geometry: GeoLine + properties: any } export interface AggregationsGeoLineAggregation { @@ -3228,6 +3451,27 @@ export interface AggregationsInferenceTopClassEntry { class_score: double } +export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { + field: Field + prefix_length: integer + is_ipv6?: boolean + append_prefix_length?: boolean + keyed?: boolean + min_doc_count?: long +} + +export interface AggregationsIpPrefixBucketKeys extends AggregationsMultiBucketBase { + is_ipv6: boolean + key: string + prefix_length: integer + netmask?: string +} +export type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys +& { [property: string]: AggregationsAggregate | boolean | string | integer | long } + export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { } @@ -3243,6 +3487,7 @@ export interface AggregationsIpRangeAggregationRange { } export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { + key?: string from?: string to?: string } @@ -3283,7 +3528,7 @@ export interface AggregationsMatrixAggregation extends AggregationsAggregation { export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { doc_count: long - fields: AggregationsMatrixStatsFields[] + fields?: AggregationsMatrixStatsFields[] } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { @@ -3378,6 +3623,7 @@ export interface AggregationsMultiBucketBase { export interface AggregationsMultiTermLookup { field: Field + missing?: AggregationsMissing } export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { @@ -3395,12 +3641,12 @@ export interface AggregationsMultiTermsAggregation extends AggregationsBucketAgg } export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { - key: (long | double | string)[] + key: FieldValue[] key_as_string?: string doc_count_error_upper_bound?: long } export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys -& { [property: string]: AggregationsAggregate | (long | double | string)[] | string | long } +& { [property: string]: AggregationsAggregate | FieldValue[] | string | long } export interface AggregationsMutualInformationHeuristic { background_is_superset?: boolean @@ -3550,7 +3796,7 @@ export interface AggregationsSerialDifferencingAggregation extends AggregationsP lag?: integer } -export interface AggregationsSignificantLongTermsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsSignificantLongTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { @@ -3560,7 +3806,7 @@ export interface AggregationsSignificantLongTermsBucketKeys extends Aggregations export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string | double } -export interface AggregationsSignificantStringTermsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsSignificantStringTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { @@ -3569,6 +3815,11 @@ export interface AggregationsSignificantStringTermsBucketKeys extends Aggregatio export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys & { [property: string]: AggregationsAggregate | string | double | long } +export interface AggregationsSignificantTermsAggregateBase extends AggregationsMultiBucketAggregateBase { + bg_count?: long + doc_count?: long +} + export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { background_filter?: QueryDslQueryContainer chi_square?: AggregationsChiSquareHeuristic @@ -3697,10 +3948,10 @@ export interface AggregationsStringTermsAggregate extends AggregationsTermsAggre } export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { - key: string + key: FieldValue } export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | long } +& { [property: string]: AggregationsAggregate | FieldValue | long } export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { } @@ -3736,7 +3987,7 @@ export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedasti export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { doc_count_error_upper_bound?: long - sum_other_doc_count: long + sum_other_doc_count?: long } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { @@ -3827,7 +4078,7 @@ export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSi export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } -export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsMultiBucketAggregateBase { +export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { @@ -4176,7 +4427,7 @@ export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { type: 'limit' consume_all_tokens?: boolean - max_token_count?: integer + max_token_count?: SpecUtilsStringified } export interface AnalysisLowercaseNormalizer { @@ -4373,7 +4624,8 @@ export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterB export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer' - language: string + language?: string + name?: string } export interface AnalysisStopAnalyzer { @@ -4508,6 +4760,7 @@ export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBas type: 'aggregate_metric_double' default_metric: string metrics: string[] + time_series_metric?: MappingTimeSeriesMetricType } export interface MappingAllField { @@ -4535,7 +4788,7 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { type: 'boolean' } -export interface MappingByteNumberProperty extends MappingStandardNumberProperty { +export interface MappingByteNumberProperty extends MappingNumberPropertyBase { type: 'byte' null_value?: byte } @@ -4610,7 +4863,7 @@ export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } -export interface MappingDoubleNumberProperty extends MappingStandardNumberProperty { +export interface MappingDoubleNumberProperty extends MappingNumberPropertyBase { type: 'double' null_value?: double } @@ -4686,7 +4939,7 @@ export interface MappingFlattenedProperty extends MappingPropertyBase { type: 'flattened' } -export interface MappingFloatNumberProperty extends MappingStandardNumberProperty { +export interface MappingFloatNumberProperty extends MappingNumberPropertyBase { type: 'float' null_value?: float } @@ -4715,7 +4968,7 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { export type MappingGeoStrategy = 'recursive' | 'term' -export interface MappingHalfFloatNumberProperty extends MappingStandardNumberProperty { +export interface MappingHalfFloatNumberProperty extends MappingNumberPropertyBase { type: 'half_float' null_value?: float } @@ -4731,7 +4984,7 @@ export interface MappingIndexField { export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' -export interface MappingIntegerNumberProperty extends MappingStandardNumberProperty { +export interface MappingIntegerNumberProperty extends MappingNumberPropertyBase { type: 'integer' null_value?: integer } @@ -4743,8 +4996,11 @@ export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { export interface MappingIpProperty extends MappingDocValuesPropertyBase { boost?: double index?: boolean - null_value?: string ignore_malformed?: boolean + null_value?: string + on_script_error?: MappingOnScriptError + script?: Script + time_series_dimension?: boolean type: 'ip' } @@ -4754,6 +5010,7 @@ export interface MappingIpRangeProperty extends MappingRangePropertyBase { export interface MappingJoinProperty extends MappingPropertyBase { relations?: Record + eager_global_ordinals?: boolean type: 'join' } @@ -4770,7 +5027,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { type: 'keyword' } -export interface MappingLongNumberProperty extends MappingStandardNumberProperty { +export interface MappingLongNumberProperty extends MappingNumberPropertyBase { type: 'long' null_value?: long } @@ -4800,9 +5057,14 @@ export interface MappingNestedProperty extends MappingCorePropertyBase { } export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { - index?: boolean + boost?: double + coerce?: boolean ignore_malformed?: boolean + index?: boolean + on_script_error?: MappingOnScriptError + script?: Script time_series_metric?: MappingTimeSeriesMetricType + time_series_dimension?: boolean } export interface MappingObjectProperty extends MappingCorePropertyBase { @@ -4826,7 +5088,6 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty export interface MappingPropertyBase { - local_metadata?: Metadata meta?: Record properties?: Record ignore_above?: integer @@ -4854,18 +5115,26 @@ export interface MappingRoutingField { } export interface MappingRuntimeField { + fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] format?: string + input_field?: Field + target_field?: Field + target_index?: IndexName script?: Script type: MappingRuntimeFieldType } -export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' +export interface MappingRuntimeFieldFetchFields { + field: Field + format?: string +} + +export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' export type MappingRuntimeFields = Record export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { type: 'scaled_float' - coerce?: boolean null_value?: double scaling_factor?: double } @@ -4890,7 +5159,7 @@ export interface MappingShapeProperty extends MappingDocValuesPropertyBase { type: 'shape' } -export interface MappingShortNumberProperty extends MappingStandardNumberProperty { +export interface MappingShortNumberProperty extends MappingNumberPropertyBase { type: 'short' null_value?: short } @@ -4905,13 +5174,10 @@ export interface MappingSourceField { enabled?: boolean excludes?: string[] includes?: string[] + mode?: MappingSourceFieldMode } -export interface MappingStandardNumberProperty extends MappingNumberPropertyBase { - coerce?: boolean - script?: Script - on_script_error?: MappingOnScriptError -} +export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' export interface MappingSuggestContext { name: Name @@ -4961,7 +5227,7 @@ export interface MappingTypeMapping { date_detection?: boolean dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] - dynamic_templates?: Record | Record[] + dynamic_templates?: Record[] _field_names?: MappingFieldNamesField index_field?: MappingIndexField _meta?: Metadata @@ -5837,7 +6103,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery + knn?: KnnQuery | KnnQuery[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -5939,7 +6205,7 @@ export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } -export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' +export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] @@ -6422,6 +6688,7 @@ export interface CatIndicesRequest extends CatCatRequestBase { health?: HealthStatus include_unloaded_segments?: boolean pri?: boolean + time?: TimeUnit } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -8143,6 +8410,26 @@ export interface ClusterGetSettingsResponse { defaults?: Record } +export interface ClusterHealthHealthResponseBody { + active_primary_shards: integer + active_shards: integer + active_shards_percent_as_number: Percentage + cluster_name: Name + delayed_unassigned_shards: integer + indices?: Record + initializing_shards: integer + number_of_data_nodes: integer + number_of_in_flight_fetch: integer + number_of_nodes: integer + number_of_pending_tasks: integer + relocating_shards: integer + status: HealthStatus + task_max_waiting_in_queue?: Duration + task_max_waiting_in_queue_millis: DurationValue + timed_out: boolean + unassigned_shards: integer +} + export interface ClusterHealthIndexHealthStats { active_primary_shards: integer active_shards: integer @@ -8170,25 +8457,7 @@ export interface ClusterHealthRequest extends RequestBase { wait_for_status?: HealthStatus } -export interface ClusterHealthResponse { - active_primary_shards: integer - active_shards: integer - active_shards_percent_as_number: Percentage - cluster_name: Name - delayed_unassigned_shards: integer - indices?: Record - initializing_shards: integer - number_of_data_nodes: integer - number_of_in_flight_fetch: integer - number_of_nodes: integer - number_of_pending_tasks: integer - relocating_shards: integer - status: HealthStatus - task_max_waiting_in_queue?: Duration - task_max_waiting_in_queue_millis: DurationValue - timed_out: boolean - unassigned_shards: integer -} +export type ClusterHealthResponse = ClusterHealthHealthResponseBody export interface ClusterHealthShardHealthStats { active_shards: integer @@ -8232,11 +8501,9 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { template: IndicesIndexState - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings version?: VersionNumber _meta?: Metadata + allow_auto_create?: boolean } } @@ -8359,7 +8626,7 @@ export interface ClusterRerouteRerouteParameters { export interface ClusterRerouteResponse { acknowledged: boolean explanations?: ClusterRerouteRerouteExplanation[] - state: any + state?: any } export interface ClusterStateRequest extends RequestBase { @@ -8542,12 +8809,19 @@ export interface ClusterStatsFieldTypes { name: Name count: integer index_count: integer + indexed_vector_count?: long + indexed_vector_dim_max?: long + indexed_vector_dim_min?: long script_count?: integer } export interface ClusterStatsFieldTypesMappings { field_types: ClusterStatsFieldTypes[] runtime_field_types?: ClusterStatsRuntimeFieldTypes[] + total_field_count?: integer + total_deduplicated_field_count?: integer + total_deduplicated_mapping_size?: ByteSize + total_deduplicated_mapping_size_in_bytes?: long } export interface ClusterStatsIndexingPressure { @@ -8659,22 +8933,19 @@ export interface DanglingIndicesListDanglingIndicesResponse { dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] } -export interface EnrichConfiguration { - geo_match?: EnrichPolicy - match: EnrichPolicy - range: EnrichPolicy -} - export interface EnrichPolicy { enrich_fields: Fields indices: Indices match_field: Field query?: string name?: Name + elasticsearch_version?: string } +export type EnrichPolicyType = 'geo_match' | 'match' | 'range' + export interface EnrichSummary { - config: EnrichConfiguration + config: Partial> } export interface EnrichDeletePolicyRequest extends RequestBase { @@ -8866,7 +9137,7 @@ export interface FleetGlobalCheckpointsResponse { } export interface FleetMsearchRequest extends RequestBase { - index: IndexName | IndexAlias + index?: IndexName | IndexAlias allow_no_indices?: boolean ccs_minimize_roundtrips?: boolean expand_wildcards?: ExpandWildcards @@ -9062,6 +9333,7 @@ export interface IlmPhase { export interface IlmPhases { cold?: IlmPhase delete?: IlmPhase + frozen?: IlmPhase hot?: IlmPhase warm?: IlmPhase } @@ -9282,6 +9554,10 @@ export interface IndicesDataStreamVisibility { hidden?: boolean } +export interface IndicesDownsampleConfig { + fixed_interval: DurationLarge +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -9335,7 +9611,7 @@ export interface IndicesIndexSettingBlocks { read_only_allow_delete?: boolean read?: boolean write?: boolean | string - metadata?: boolean + metadata?: SpecUtilsStringified } export interface IndicesIndexSettingsKeys { @@ -9349,7 +9625,7 @@ export interface IndicesIndexSettingsKeys { number_of_routing_shards?: integer check_on_startup?: IndicesIndexCheckOnStartup codec?: string - routing_partition_size?: integer + routing_partition_size?: SpecUtilsStringified load_fixed_bitset_filters_eagerly?: boolean hidden?: boolean | string auto_expand_replicas?: string @@ -9458,6 +9734,7 @@ export interface IndicesIndexTemplateSummary { export interface IndicesIndexVersioning { created?: VersionString + created_string?: string } export interface IndicesIndexingPressure { @@ -9469,6 +9746,7 @@ export interface IndicesIndexingPressureMemory { } export interface IndicesMappingLimitSettings { + coerce?: boolean total_fields?: IndicesMappingLimitSettingsTotalFields depth?: IndicesMappingLimitSettingsDepth nested_fields?: IndicesMappingLimitSettingsNestedFields @@ -9507,8 +9785,8 @@ export interface IndicesMerge { } export interface IndicesMergeScheduler { - max_thread_count?: integer - max_merge_count?: integer + max_thread_count?: SpecUtilsStringified + max_merge_count?: SpecUtilsStringified } export interface IndicesNumericFielddata { @@ -9536,7 +9814,7 @@ export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' export interface IndicesSettingsAnalyze { - max_token_count?: integer + max_token_count?: SpecUtilsStringified } export interface IndicesSettingsHighlight { @@ -9544,7 +9822,7 @@ export interface IndicesSettingsHighlight { } export interface IndicesSettingsQueryString { - lenient: boolean + lenient: SpecUtilsStringified } export interface IndicesSettingsSearch { @@ -9563,7 +9841,7 @@ export interface IndicesSettingsSimilarity { } export interface IndicesSettingsSimilarityBm25 { - b: integer + b: double discount_overlaps: boolean k1: double type: 'BM25' @@ -9633,7 +9911,7 @@ export interface IndicesStorage { allow_mmap?: boolean } -export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs' +export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs'| string export interface IndicesTemplateMapping { aliases: Record @@ -9912,7 +10190,7 @@ export interface IndicesDownsampleRequest extends RequestBase { index: IndexName target_index: IndexName /** @deprecated The use of the 'body' key has been deprecated, use 'config' instead. */ - body?: any + body?: IndicesDownsampleConfig } export type IndicesDownsampleResponse = any @@ -10034,7 +10312,11 @@ export interface IndicesForcemergeRequest extends RequestBase { wait_for_completion?: boolean } -export type IndicesForcemergeResponse = ShardsOperationResponseBase +export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody + +export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { + task?: string +} export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' @@ -10485,13 +10767,21 @@ export interface IndicesRolloverResponse { } export interface IndicesRolloverRolloverConditions { + min_age?: Duration max_age?: Duration max_age_millis?: DurationValue + min_docs?: long max_docs?: long - max_size?: string - max_size_bytes?: ByteSize + max_size?: ByteSize + max_size_bytes?: long + min_size?: ByteSize + min_size_bytes?: long max_primary_shard_size?: ByteSize - max_primary_shard_size_bytes?: ByteSize + max_primary_shard_size_bytes?: long + min_primary_shard_size?: ByteSize + min_primary_shard_size_bytes?: long + max_primary_shard_docs?: long + min_primary_shard_docs?: long } export interface IndicesSegmentsIndexSegment { @@ -10517,7 +10807,6 @@ export interface IndicesSegmentsSegment { compound: boolean deleted_docs: long generation: integer - memory_in_bytes: double search: boolean size_in_bytes: double num_docs: long @@ -10553,16 +10842,13 @@ export interface IndicesShardStoresResponse { indices: Record } -export interface IndicesShardStoresShardStore { +export interface IndicesShardStoresShardStoreKeys { allocation: IndicesShardStoresShardStoreAllocation - allocation_id: Id - attributes: Record - id: Id - legacy_version: VersionNumber - name: Name - store_exception: IndicesShardStoresShardStoreException - transport_address: TransportAddress + allocation_id?: Id + store_exception?: IndicesShardStoresShardStoreException } +export type IndicesShardStoresShardStore = IndicesShardStoresShardStoreKeys +& { [property: string]: IndicesShardStoresShardStoreNode | IndicesShardStoresShardStoreAllocation | Id | IndicesShardStoresShardStoreException } export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused' @@ -10571,6 +10857,15 @@ export interface IndicesShardStoresShardStoreException { type: string } +export interface IndicesShardStoresShardStoreNode { + attributes: Record + ephemeral_id?: string + external_id?: string + name: Name + roles: string[] + transport_address: TransportAddress +} + export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all' export interface IndicesShardStoresShardStoreWrapper { @@ -10692,6 +10987,12 @@ export interface IndicesStatsIndicesStats { status?: IndicesStatsIndexMetadataState } +export interface IndicesStatsMappingStats { + total_count: long + total_estimated_overhead?: ByteSize + total_estimated_overhead_in_bytes: long +} + export interface IndicesStatsRequest extends RequestBase { metric?: Metrics index?: Indices @@ -10780,6 +11081,7 @@ export interface IndicesStatsShardStats { flush?: FlushStats get?: GetStats indexing?: IndexingStats + mappings?: IndicesStatsMappingStats merges?: MergesStats shard_path?: IndicesStatsShardPath query_cache?: IndicesStatsShardQueryCache @@ -10795,7 +11097,7 @@ export interface IndicesStatsShardStats { translog?: TranslogStats warmer?: WarmerStats bulk?: BulkStats - shards?: IndicesStatsShardsTotalStats + shards?: Record shard_stats?: IndicesStatsShardsTotalStats indices?: IndicesStatsIndicesStats } @@ -10923,39 +11225,38 @@ export interface IngestBytesProcessor extends IngestProcessorBase { export interface IngestCircleProcessor extends IngestProcessorBase { error_distance: double field: Field - ignore_missing: boolean + ignore_missing?: boolean shape_type: IngestShapeType - target_field: Field + target_field?: Field } export interface IngestConvertProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean - target_field: Field + target_field?: Field type: IngestConvertType } export type IngestConvertType = 'integer' | 'long' | 'float' | 'double' | 'string' | 'boolean' | 'auto' export interface IngestCsvProcessor extends IngestProcessorBase { - empty_value: any - description?: string + empty_value?: any field: Field ignore_missing?: boolean quote?: string separator?: string target_fields: Fields - trim: boolean + trim?: boolean } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { date_formats: string[] date_rounding: string field: Field - index_name_format: string - index_name_prefix: string - locale: string - timezone: string + index_name_format?: string + index_name_prefix?: string + locale?: string + timezone?: string } export interface IngestDateProcessor extends IngestProcessorBase { @@ -10967,9 +11268,9 @@ export interface IngestDateProcessor extends IngestProcessorBase { } export interface IngestDissectProcessor extends IngestProcessorBase { - append_separator: string + append_separator?: string field: Field - ignore_missing: boolean + ignore_missing?: boolean pattern: string } @@ -11002,18 +11303,18 @@ export interface IngestForeachProcessor extends IngestProcessorBase { } export interface IngestGeoIpProcessor extends IngestProcessorBase { - database_file: string + database_file?: string field: Field - first_only: boolean - ignore_missing: boolean - properties: string[] - target_field: Field + first_only?: boolean + ignore_missing?: boolean + properties?: string[] + target_field?: Field } export interface IngestGrokProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean - pattern_definitions: Record + pattern_definitions?: Record patterns: string[] trace_match?: boolean } @@ -11046,7 +11347,7 @@ export interface IngestInferenceConfigRegression { export interface IngestInferenceProcessor extends IngestProcessorBase { model_id: Id - target_field: Field + target_field?: Field field_map?: Record inference_config?: IngestInferenceConfig } @@ -11058,11 +11359,15 @@ export interface IngestJoinProcessor extends IngestProcessorBase { } export interface IngestJsonProcessor extends IngestProcessorBase { - add_to_root: boolean + add_to_root?: boolean + add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy + allow_duplicate_keys?: boolean field: Field - target_field: Field + target_field?: Field } +export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' + export interface IngestKeyValueProcessor extends IngestProcessorBase { exclude_keys?: string[] field: Field @@ -11098,9 +11403,11 @@ export interface IngestPipelineConfig { export interface IngestPipelineProcessor extends IngestProcessorBase { name: Name + ignore_missing_pipeline?: boolean } export interface IngestProcessorBase { + description?: string if?: string ignore_failure?: boolean on_failure?: IngestProcessorContainer[] @@ -11156,9 +11463,12 @@ export interface IngestRenameProcessor extends IngestProcessorBase { } export interface IngestSetProcessor extends IngestProcessorBase { + copy_from?: Field field: Field + ignore_empty_value?: boolean + media_type?: string override?: boolean - value: any + value?: any } export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { @@ -11170,8 +11480,8 @@ export type IngestShapeType = 'geo_shape' | 'shape' export interface IngestSortProcessor extends IngestProcessorBase { field: Field - order: SortOrder - target_field: Field + order?: SortOrder + target_field?: Field } export interface IngestSplitProcessor extends IngestProcessorBase { @@ -11202,10 +11512,10 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase { export interface IngestUserAgentProcessor extends IngestProcessorBase { field: Field - ignore_missing: boolean - options: IngestUserAgentProperty[] - regex_file: string - target_field: Field + ignore_missing?: boolean + options?: IngestUserAgentProperty[] + regex_file?: string + target_field?: Field } export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' @@ -11281,14 +11591,17 @@ export interface IngestSimulateDocument { _source: any } -export interface IngestSimulateDocumentSimulation { +export interface IngestSimulateDocumentSimulationKeys { _id: Id _index: IndexName _ingest: IngestSimulateIngest - _parent?: string _routing?: string _source: Record + _version?: SpecUtilsStringified + _version_type?: VersionType } +export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } export interface IngestSimulateIngest { timestamp: DateTime @@ -11387,7 +11700,7 @@ export interface LicensePostRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { license?: LicenseLicense - licenses: LicenseLicense[] + licenses?: LicenseLicense[] } } @@ -11521,7 +11834,7 @@ export interface MigrationPostFeatureUpgradeResponse { } export interface MlAnalysisConfig { - bucket_span: Duration + bucket_span?: Duration categorization_analyzer?: MlCategorizationAnalyzer categorization_field_name?: Field categorization_filters?: string[] @@ -11559,6 +11872,7 @@ export interface MlAnalysisMemoryLimit { export interface MlAnomaly { actual?: double[] + anomaly_score_explanation?: MlAnomalyExplanation bucket_span: DurationValue by_field_name?: string by_field_value?: string @@ -11567,6 +11881,7 @@ export interface MlAnomaly { field_name?: string function?: string function_description?: string + geo_results?: MlGeoResults influencers?: MlInfluence[] initial_record_score: double is_interim: boolean @@ -11599,6 +11914,19 @@ export interface MlAnomalyCause { typical: double[] } +export interface MlAnomalyExplanation { + anomaly_characteristics_impact?: integer + anomaly_length?: integer + anomaly_type?: string + high_variance_penalty?: boolean + incomplete_bucket_penalty?: boolean + lower_confidence_bound?: double + multi_bucket_impact?: integer + single_bucket_impact?: integer + typical_value?: double + upper_confidence_bound?: double +} + export interface MlApiKeyAuthorization { id: string name: string @@ -11750,12 +12078,12 @@ export interface MlDatafeedConfig { datafeed_id?: Id delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration + indices?: string[] indexes?: string[] - indices: string[] indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer - query: QueryDslQueryContainer + query?: QueryDslQueryContainer query_delay?: Duration runtime_mappings?: MappingRuntimeFields script_fields?: Record @@ -12061,7 +12389,7 @@ export interface MlDetector { detector_index?: integer exclude_frequent?: MlExcludeFrequent field_name?: Field - function: string + function?: string over_field_name?: Field partition_field_name?: Field use_null?: boolean @@ -12115,6 +12443,11 @@ export interface MlFilterRef { export type MlFilterType = 'include' | 'exclude' +export interface MlGeoResults { + actual_point: string + typical_point: string +} + export interface MlHyperparameter { absolute_importance?: double name: Name @@ -12140,7 +12473,7 @@ export interface MlHyperparameters { soft_tree_depth_tolerance?: double } -export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' +export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' export interface MlInferenceConfigCreateContainer { regression?: MlRegressionInferenceOptions @@ -12151,6 +12484,7 @@ export interface MlInferenceConfigCreateContainer { ner?: MlNerInferenceOptions pass_through?: MlPassThroughInferenceOptions text_embedding?: MlTextEmbeddingInferenceOptions + text_expansion?: MlTextExpansionInferenceOptions question_answering?: MlQuestionAnsweringInferenceOptions } @@ -12163,6 +12497,7 @@ export interface MlInferenceConfigUpdateContainer { ner?: MlNerInferenceUpdateOptions pass_through?: MlPassThroughInferenceUpdateOptions text_embedding?: MlTextEmbeddingInferenceUpdateOptions + text_expansion?: MlTextExpansionInferenceUpdateOptions question_answering?: MlQuestionAnsweringInferenceUpdateOptions } @@ -12349,6 +12684,7 @@ export interface MlNerInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string classification_labels?: string[] + vocabulary?: MlVocabulary } export interface MlNerInferenceUpdateOptions { @@ -12409,6 +12745,7 @@ export interface MlPage { export interface MlPassThroughInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary?: MlVocabulary } export interface MlPassThroughInferenceUpdateOptions { @@ -12477,6 +12814,7 @@ export interface MlTextClassificationInferenceUpdateOptions { } export interface MlTextEmbeddingInferenceOptions { + embedding_size?: integer tokenization?: MlTokenizationConfigContainer results_field?: string } @@ -12486,6 +12824,16 @@ export interface MlTextEmbeddingInferenceUpdateOptions { results_field?: string } +export interface MlTextExpansionInferenceOptions { + tokenization?: MlTokenizationConfigContainer + results_field?: string +} + +export interface MlTextExpansionInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + results_field?: string +} + export interface MlTimingStats { elapsed_time: DurationValue iteration_time?: DurationValue @@ -12524,6 +12872,7 @@ export interface MlTotalFeatureImportanceStatistics { export interface MlTrainedModelAssignment { assignment_state: MlDeploymentAssignmentState + max_assigned_allocations?: integer routing_table: Record start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters @@ -12539,8 +12888,10 @@ export interface MlTrainedModelAssignmentRoutingTable { export interface MlTrainedModelAssignmentTaskParameters { model_bytes: integer model_id: Id + deployment_id: Id cache_size: ByteSize number_of_allocations: integer + priority: MlTrainingPriority queue_capacity: integer threads_per_allocation: integer } @@ -12557,7 +12908,8 @@ export interface MlTrainedModelConfig { description?: string estimated_heap_memory_usage_bytes?: integer estimated_operations?: integer - inference_config: MlInferenceConfigCreateContainer + fully_defined?: boolean + inference_config?: MlInferenceConfigCreateContainer input: MlTrainedModelConfigInput license_level?: string metadata?: MlTrainedModelConfigMetadata @@ -12599,6 +12951,8 @@ export interface MlTrainedModelDeploymentNodesStats { export interface MlTrainedModelDeploymentStats { allocation_status: MlTrainedModelDeploymentAllocationStatus + cache_size?: ByteSize + deployment_id: Id error_count: integer inference_count: integer model_id: Id @@ -12664,6 +13018,8 @@ export interface MlTrainedModelStats { export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' +export type MlTrainingPriority = 'normal' | 'low' + export interface MlTransformAuthorization { api_key?: MlApiKeyAuthorization roles?: string[] @@ -12675,6 +13031,10 @@ export interface MlValidationLoss { loss_type: string } +export interface MlVocabulary { + index: IndexName +} + export interface MlZeroShotClassificationInferenceOptions { tokenization?: MlTokenizationConfigContainer hypothesis_template?: string @@ -12691,6 +13051,14 @@ export interface MlZeroShotClassificationInferenceUpdateOptions { labels: string[] } +export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { + model_id: Id +} + +export interface MlClearTrainedModelDeploymentCacheResponse { + cleared: boolean +} + export interface MlCloseJobRequest extends RequestBase { job_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -12775,6 +13143,7 @@ export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { job_id: Id force?: boolean + delete_user_annotations?: boolean wait_for_completion?: boolean } @@ -13349,6 +13718,7 @@ export interface MlOpenJobRequest extends RequestBase { export interface MlOpenJobResponse { opened: boolean + node: NodeId } export interface MlPostCalendarEventsRequest extends RequestBase { @@ -13411,6 +13781,8 @@ export interface MlPreviewDataFrameAnalyticsResponse { export interface MlPreviewDatafeedRequest extends RequestBase { datafeed_id?: Id + start?: DateTime + end?: DateTime /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { datafeed_config?: MlDatafeedConfig @@ -13418,9 +13790,7 @@ export interface MlPreviewDatafeedRequest extends RequestBase { } } -export interface MlPreviewDatafeedResponse { - data: TDocument[] -} +export type MlPreviewDatafeedResponse = TDocument[] export interface MlPutCalendarRequest extends RequestBase { calendar_id: Id @@ -13507,16 +13877,16 @@ export interface MlPutDatafeedRequest extends RequestBase { } export interface MlPutDatafeedResponse { - aggregations: Record + aggregations?: Record authorization?: MlDatafeedAuthorization chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Duration + frequency?: Duration indices: string[] job_id: Id indices_options?: IndicesOptions - max_empty_searches: integer + max_empty_searches?: integer query: QueryDslQueryContainer query_delay: Duration runtime_mappings?: MappingRuntimeFields @@ -13634,7 +14004,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { definition?: MlPutTrainedModelDefinition description?: string inference_config: MlInferenceConfigCreateContainer - input: MlPutTrainedModelInput + input?: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType model_size_bytes?: long @@ -13715,6 +14085,7 @@ export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { job_id: Id wait_for_completion?: boolean + delete_user_annotations?: boolean } export type MlResetJobResponse = AcknowledgedResponseBase @@ -13768,6 +14139,7 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id cache_size?: ByteSize number_of_allocations?: integer + priority?: MlTrainingPriority queue_capacity?: integer threads_per_allocation?: integer timeout?: Duration @@ -13854,6 +14226,7 @@ export interface MlUpdateDatafeedRequest extends RequestBase { indices?: string[] indexes?: string[] indices_options?: IndicesOptions + job_id?: Id max_empty_searches?: integer query?: QueryDslQueryContainer query_delay?: Duration @@ -13865,15 +14238,15 @@ export interface MlUpdateDatafeedRequest extends RequestBase { export interface MlUpdateDatafeedResponse { authorization?: MlDatafeedAuthorization - aggregations: Record + aggregations?: Record chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id - frequency: Duration + frequency?: Duration indices: string[] indices_options?: IndicesOptions job_id: Id - max_empty_searches: integer + max_empty_searches?: integer query: QueryDslQueryContainer query_delay: Duration runtime_mappings?: MappingRuntimeFields @@ -13908,6 +14281,7 @@ export interface MlUpdateJobRequest extends RequestBase { categorization_filters?: string[] description?: string model_plot_config?: MlModelPlotConfig + model_prune_window?: Duration daily_model_snapshot_retention_after_days?: long model_snapshot_retention_days?: long renormalization_window_days?: long @@ -14011,9 +14385,9 @@ export interface MonitoringBulkResponse { export interface NodesAdaptiveSelection { avg_queue_size?: long - avg_response_time?: long + avg_response_time?: Duration avg_response_time_ns?: long - avg_service_time?: string + avg_service_time?: Duration avg_service_time_ns?: long outgoing_searches?: long rank?: string @@ -14187,6 +14561,7 @@ export interface NodesIndexingPressure { } export interface NodesIndexingPressureMemory { + limit?: ByteSize limit_in_bytes?: long current?: NodesPressureMemory total?: NodesPressureMemory @@ -14304,11 +14679,16 @@ export interface NodesPool { } export interface NodesPressureMemory { + all?: ByteSize + all_in_bytes?: long + combined_coordinating_and_primary?: ByteSize combined_coordinating_and_primary_in_bytes?: long + coordinating?: ByteSize coordinating_in_bytes?: long + primary?: ByteSize primary_in_bytes?: long + replica?: ByteSize replica_in_bytes?: long - all_in_bytes?: long coordinating_rejections?: long primary_rejections?: long replica_rejections?: long @@ -14384,6 +14764,7 @@ export interface NodesScriptCache { export interface NodesScripting { cache_evictions?: long compilations?: long + compilations_history?: Record compilation_limit_triggered?: long contexts?: NodesContext[] } @@ -14805,8 +15186,8 @@ export interface NodesInfoNodeJvmInfo { vm_name: Name vm_vendor: string vm_version: VersionString - bundled_jdk: boolean using_bundled_jdk: boolean + bundled_jdk: boolean using_compressed_ordinary_object_pointers?: boolean | string input_arguments: string[] } @@ -15013,12 +15394,18 @@ export interface RollupGetRollupCapsRollupCapabilities { } export interface RollupGetRollupCapsRollupCapabilitySummary { - fields: Record> + fields: Record index_pattern: string job_id: string rollup_index: string } +export interface RollupGetRollupCapsRollupFieldSummary { + agg: string + calendar_interval?: Duration + time_zone?: TimeZone +} + export interface RollupGetRollupIndexCapsIndexCapabilities { rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[] } @@ -15099,6 +15486,69 @@ export interface RollupStopJobResponse { stopped: boolean } +export interface SearchApplicationSearchApplication { + name: Name + indices: IndexName[] + updated_at_millis: EpochTime + analytics_collection_name?: Name + template?: SearchApplicationSearchApplicationTemplate +} + +export interface SearchApplicationSearchApplicationTemplate { + script: InlineScript | string +} + +export interface SearchApplicationDeleteRequest extends RequestBase { + name: Name +} + +export type SearchApplicationDeleteResponse = AcknowledgedResponseBase + +export interface SearchApplicationGetRequest extends RequestBase { + name: Name +} + +export type SearchApplicationGetResponse = SearchApplicationSearchApplication + +export interface SearchApplicationListRequest extends RequestBase { + q?: string + from?: integer + size?: integer +} + +export interface SearchApplicationListResponse { + count: long + results: SearchApplicationListSearchApplicationListItem[] +} + +export interface SearchApplicationListSearchApplicationListItem { + name: Name + indices: IndexName[] + updated_at_millis: EpochTime + analytics_collection_name?: Name +} + +export interface SearchApplicationPutRequest extends RequestBase { + name: Name + create?: boolean + /** @deprecated The use of the 'body' key has been deprecated, use 'search_application' instead. */ + body?: SearchApplicationSearchApplication +} + +export interface SearchApplicationPutResponse { + result: Result +} + +export interface SearchApplicationSearchRequest extends RequestBase { + name: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + params?: Record + } +} + +export type SearchApplicationSearchResponse> = SearchResponseBody + export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' export interface SearchableSnapshotsCacheStatsNode { @@ -15199,7 +15649,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client'| string export interface SecurityCreatedStatus { created: boolean @@ -15215,7 +15665,7 @@ export interface SecurityFieldRule { export interface SecurityFieldSecurity { except?: Fields - grant: Fields + grant?: Fields } export interface SecurityGlobalPrivilege { @@ -15224,10 +15674,10 @@ export interface SecurityGlobalPrivilege { export type SecurityGrantType = 'password' | 'access_token' -export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' +export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write'| string export interface SecurityIndicesPrivileges { - field_security?: SecurityFieldSecurity | SecurityFieldSecurity[] + field_security?: SecurityFieldSecurity names: Indices privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery @@ -15286,17 +15736,19 @@ export interface SecurityRoleMappingRule { except?: SecurityRoleMappingRule } +export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer + export interface SecurityRoleTemplateInlineScript extends ScriptBase { lang?: ScriptLanguage options?: Record - source: string | QueryDslQueryContainer + source: SecurityRoleTemplateInlineQuery } export interface SecurityRoleTemplateQuery { template?: SecurityRoleTemplateScript } -export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | string | QueryDslQueryContainer | StoredScriptId +export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | SecurityRoleTemplateInlineQuery | StoredScriptId export interface SecurityTransientMetadataConfig { enabled: boolean @@ -15312,6 +15764,14 @@ export interface SecurityUser { profile_uid?: SecurityUserProfileId } +export interface SecurityUserIndicesPrivileges { + field_security?: SecurityFieldSecurity[] + names: Indices + privileges: SecurityIndexPrivilege[] + query?: SecurityIndicesPrivilegesQuery[] + allow_restricted_indices: boolean +} + export interface SecurityUserProfile { uid: SecurityUserProfileId user: SecurityUserProfileUser @@ -15730,7 +16190,7 @@ export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[] cluster: string[] global: SecurityGlobalPrivilege[] - indices: SecurityIndicesPrivileges[] + indices: SecurityUserIndicesPrivileges[] run_as: string[] } @@ -16160,7 +16620,7 @@ export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SlmConfiguration { ignore_unavailable?: boolean - indices: Indices + indices?: Indices include_global_state?: boolean feature_states?: string[] metadata?: Metadata @@ -16180,10 +16640,10 @@ export interface SlmInvocation { } export interface SlmPolicy { - config: SlmConfiguration + config?: SlmConfiguration name: Name repository: string - retention: SlmRetention + retention?: SlmRetention schedule: WatcherCronExpression } @@ -16544,6 +17004,7 @@ export interface SnapshotRestoreRequest extends RequestBase { wait_for_completion?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + feature_states?: string[] ignore_index_settings?: string[] ignore_unavailable?: boolean include_aliases?: boolean @@ -16701,6 +17162,7 @@ export interface SslCertificatesCertificateInformation { expiry: DateTime format: string has_private_key: boolean + issuer?: string path: string serial_number: string subject_dn: string @@ -16907,6 +17369,7 @@ export interface TransformSettings { deduce_mappings?: boolean docs_per_second?: float max_page_search_size?: integer + unattended?: boolean } export interface TransformSource { @@ -16985,6 +17448,7 @@ export interface TransformGetTransformStatsRequest extends RequestBase { allow_no_match?: boolean from?: long size?: long + timeout?: Duration } export interface TransformGetTransformStatsResponse { @@ -17022,6 +17486,7 @@ export interface TransformGetTransformStatsTransformProgress { export interface TransformGetTransformStatsTransformStats { checkpointing: TransformGetTransformStatsCheckpointing + health?: TransformGetTransformStatsTransformStatsHealth id: Id node?: NodeAttributes reason?: string @@ -17029,6 +17494,10 @@ export interface TransformGetTransformStatsTransformStats { stats: TransformGetTransformStatsTransformIndexerStats } +export interface TransformGetTransformStatsTransformStatsHealth { + status: HealthStatus +} + export interface TransformPreviewTransformRequest extends RequestBase { transform_id?: Id timeout?: Duration @@ -17079,9 +17548,17 @@ export interface TransformResetTransformRequest extends RequestBase { export type TransformResetTransformResponse = AcknowledgedResponseBase +export interface TransformScheduleNowTransformRequest extends RequestBase { + transform_id: Id + timeout?: Duration +} + +export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase + export interface TransformStartTransformRequest extends RequestBase { transform_id: Id timeout?: Duration + from?: string } export type TransformStartTransformResponse = AcknowledgedResponseBase @@ -17898,14 +18375,6 @@ export interface XpackInfoResponse { tagline: string } -export interface XpackUsageAllJobs { - count: integer - detectors: Record - created_by: Record - model_size: Record - forecasts: Record -} - export interface XpackUsageAnalytics extends XpackUsageBase { stats: XpackUsageAnalyticsStatistics } @@ -18032,6 +18501,10 @@ export interface XpackUsageFrozenIndices extends XpackUsageBase { indices_count: long } +export interface XpackUsageHealthStatistics extends XpackUsageBase { + invocations: XpackUsageInvocations +} + export interface XpackUsageIlm { policy_count: integer policy_stats: XpackUsageIlmPolicyStatistics[] @@ -18042,20 +18515,26 @@ export interface XpackUsageIlmPolicyStatistics { phases: IlmPhases } +export interface XpackUsageInvocations { + total: long +} + export interface XpackUsageIpFilter { http: boolean transport: boolean } -export interface XpackUsageJobsKeys { - _all?: XpackUsageAllJobs +export interface XpackUsageJobUsage { + count: integer + created_by: Record + detectors: MlJobStatistics + forecasts: XpackUsageMlJobForecasts + model_size: MlJobStatistics } -export type XpackUsageJobs = XpackUsageJobsKeys -& { [property: string]: MlJob | XpackUsageAllJobs } export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record - jobs: XpackUsageJobs + jobs: Record node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs inference: XpackUsageMlInference @@ -18128,9 +18607,16 @@ export interface XpackUsageMlInferenceTrainedModelsCount { total: long prepackaged: long other: long + pass_through?: long regression?: long classification?: long ner?: long + text_embedding?: long +} + +export interface XpackUsageMlJobForecasts { + total: long + forecasted_jobs: long } export interface XpackUsageMonitoring extends XpackUsageBase { @@ -18179,6 +18665,7 @@ export interface XpackUsageResponse { flattened?: XpackUsageFlattened frozen_indices: XpackUsageFrozenIndices graph: XpackUsageBase + health_api?: XpackUsageHealthStatistics ilm: XpackUsageIlm logstash: XpackUsageBase ml: XpackUsageMachineLearning From 4011f0153f2162cac1612a09f8ea60c0988d891a Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 2 May 2023 09:57:03 -0500 Subject: [PATCH 201/647] Fix integration test runner for 8.8 (#1859) --- test/integration/test-runner.js | 36 +++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index 4a5e279f4..826293cf8 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -85,7 +85,11 @@ function build (opts = {}) { ) // remove 'x_pack_rest_user', used in some xpack test - await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) + try { + await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) + } catch { + // do nothing + } const searchableSnapshotIndices = await client.cluster.state({ metric: 'metadata', @@ -137,7 +141,11 @@ function build (opts = {}) { const body = await client.cluster.getComponentTemplate() const components = body.component_templates.filter(c => !isXPackTemplate(c.name)).map(c => c.name) if (components.length > 0) { - await client.cluster.deleteComponentTemplate({ name: components.join(',') }, { ignore: [404] }) + try { + await client.cluster.deleteComponentTemplate({ name: components.join(',') }, { ignore: [404] }) + } catch { + // do nothing + } } // Remove any cluster setting @@ -157,9 +165,25 @@ function build (opts = {}) { if (isXPack) { // delete ilm policies const preserveIlmPolicies = [ - 'ilm-history-ilm-policy', 'slm-history-ilm-policy', - 'watch-history-ilm-policy', 'ml-size-based-ilm-policy', - 'logs', 'metrics' + "ilm-history-ilm-policy", + "slm-history-ilm-policy", + "watch-history-ilm-policy", + "watch-history-ilm-policy-16", + "ml-size-based-ilm-policy", + "logs", + "metrics", + "synthetics", + "7-days-default", + "30-days-default", + "90-days-default", + "180-days-default", + "365-days-default", + ".fleet-actions-results-ilm-policy", + ".fleet-file-data-ilm-policy", + ".fleet-files-ilm-policy", + ".deprecation-indexing-ilm-policy", + ".monitoring-8-ilm-policy", + "behavioral_analytics-events-default_policy", ] const policies = await client.ilm.getLifecycle() for (const policy in policies) { @@ -257,7 +281,7 @@ function build (opts = {}) { * - cleanup * @param {object} setup (null if not needed) * @param {object} test - * @oaram {object} teardown (null if not needed) + * @param {object} teardown (null if not needed) * @returns {Promise} */ async function run (setup, test, teardown, stats, junit) { From b2b54f1ffee1e2168ad6f0f9a809dbcb0e8edff5 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 2 May 2023 10:06:57 -0500 Subject: [PATCH 202/647] Bumps main to 8.9.0 (#1854) --- .ci/test-matrix.yml | 2 +- package.json | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml index e6a3f19dd..50366ab17 100644 --- a/.ci/test-matrix.yml +++ b/.ci/test-matrix.yml @@ -1,6 +1,6 @@ --- STACK_VERSION: - - "8.8.0-SNAPSHOT" + - "8.9.0-SNAPSHOT" NODE_JS_VERSION: - 18 diff --git a/package.json b/package.json index e7ce54574..dfb047ad4 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.8.0", - "versionCanary": "8.8.0-canary.0", + "version": "8.9.0", + "versionCanary": "8.9.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", @@ -96,4 +96,4 @@ "coverage": false, "check-coverage": false } -} +} \ No newline at end of file From bdb44d6d9a2be9c688a5f166a393338d26f421a1 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 5 May 2023 10:48:41 -0500 Subject: [PATCH 203/647] Use correct user-agent header by default (#1865) --- src/client.ts | 5 ++++- test/unit/client.test.ts | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index 0c12c1968..99bb60e4a 100644 --- a/src/client.ts +++ b/src/client.ts @@ -20,6 +20,7 @@ import { ConnectionOptions as TlsConnectionOptions } from 'tls' import { URL } from 'url' import buffer from 'buffer' +import os from 'os' import { Transport, UndiciConnection, @@ -173,7 +174,9 @@ export default class Client extends API { tls: null, caFingerprint: null, agent: null, - headers: {}, + headers: { + 'user-agent': `elasticsearch-js/${clientVersion} Node.js ${nodeVersion}; Transport ${transportVersion}; (${os.platform()} ${os.release()} ${os.arch()})` + }, nodeFilter: null, generateRequestId: null, name: 'elasticsearch-js', diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index 3131a8d6f..b9481d19b 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -432,3 +432,12 @@ test('caFingerprint can\'t be configured over http / 2', t => { ) t.end() }) + +test('user agent is in the correct format', t => { + const client = new Client({ node: 'http://localhost:9200/' }) + const agentRaw = client.transport[symbols.kHeaders]['user-agent'] || '' + const agentSplit = agentRaw.split(/\s+/) + t.equal(agentSplit[0].split('/')[0], 'elasticsearch-js') + t.ok(/^\d+\.\d+\.\d+/.test(agentSplit[0].split('/')[1])) + t.end() +}) From c9f31d22fdba2d73f7829f07792acde330ae0e04 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 5 May 2023 13:18:20 -0500 Subject: [PATCH 204/647] Buildkite pipeline for integration tests (#1863) --- .buildkite/Dockerfile | 14 ++ .buildkite/certs/ca.crt | 20 +++ .buildkite/certs/ca.key | 27 ++++ .buildkite/certs/testnode.crt | 21 +++ .buildkite/certs/testnode.key | 27 ++++ .buildkite/certs/testnode_san.crt | 20 +++ .buildkite/certs/testnode_san.key | 27 ++++ .buildkite/functions/cleanup.sh | 67 +++++++++ .buildkite/functions/imports.sh | 60 ++++++++ .buildkite/functions/wait-for-container.sh | 36 +++++ .buildkite/pipeline.yml | 32 ++++- .buildkite/run-client.sh | 31 +++++ .buildkite/run-elasticsearch.sh | 152 +++++++++++++++++++++ .buildkite/run-tests.sh | 16 +++ .ci/functions/imports.sh | 7 +- .ci/make.mjs | 7 + catalog-info.yaml | 53 +++++++ 17 files changed, 611 insertions(+), 6 deletions(-) create mode 100644 .buildkite/Dockerfile create mode 100755 .buildkite/certs/ca.crt create mode 100644 .buildkite/certs/ca.key create mode 100755 .buildkite/certs/testnode.crt create mode 100755 .buildkite/certs/testnode.key create mode 100644 .buildkite/certs/testnode_san.crt create mode 100644 .buildkite/certs/testnode_san.key create mode 100755 .buildkite/functions/cleanup.sh create mode 100755 .buildkite/functions/imports.sh create mode 100755 .buildkite/functions/wait-for-container.sh create mode 100755 .buildkite/run-client.sh create mode 100755 .buildkite/run-elasticsearch.sh create mode 100755 .buildkite/run-tests.sh create mode 100644 catalog-info.yaml diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile new file mode 100644 index 000000000..5608747b6 --- /dev/null +++ b/.buildkite/Dockerfile @@ -0,0 +1,14 @@ +ARG NODE_VERSION=${NODE_VERSION:-18} +FROM node:$NODE_VERSION + +# Install required tools +RUN apt-get clean -y && \ + apt-get -qy update && \ + apt-get -y install zip && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +WORKDIR /usr/src/app + +COPY . . +RUN npm install --production=false diff --git a/.buildkite/certs/ca.crt b/.buildkite/certs/ca.crt new file mode 100755 index 000000000..71f9bfc81 --- /dev/null +++ b/.buildkite/certs/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVAJQLm8V2LcaCTHUcoIfO+KL63nG3MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIwMDIyNjA1NTA1N1oXDTIzMDIyNTA1NTA1N1owNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDYyajkPvGtUOE5M1OowQfB +kWVrWjo1+LIxzgCeRHp0YztLtdVJ0sk2xoSrt2uZpxcPepdyOseLTjFJex1D2yCR +AEniIqcFif4G72nDih2LlbhpUe/+/MTryj8ZTkFTzI+eMmbQi5FFMaH+kwufmdt/ +5/w8YazO18SxxJUlzMqzfNUrhM8vvvVdxgboU7PWhk28wZHCMHQovomHmzclhRpF +N0FMktA98vHHeRjH19P7rNhifSd7hZzoH3H148HVAKoPgqnZ6vW2O2YfAWOP6ulq +cyszr57p8fS9B2wSdlWW7nVHU1JuKcYD67CxbBS23BeGFgCj4tiNrmxO8S5Yf85v +AgMBAAGjUzBRMB0GA1UdDgQWBBSWAlip9eoPmnG4p4OFZeOUBlAbNDAfBgNVHSME +GDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQA19qqrMTWl7YyId+LR/QIHDrP4jfxmrEELrAL58q5Epc1k +XxZLzOBSXoBfBrPdv+3XklWqXrZjKWfdkux0Xmjnl4qul+srrZDLJVZG3I7IrITh +AmQUmL9MuPiMnAcxoGZp1xpijtW8Qmd2qnambbljWfkuVaa4hcVRfrAX6TciIQ21 +bS5aeLGrPqR14h30YzDp0RMmTujEa1o6ExN0+RSTkE9m89Q6WdM69az8JW7YkWqm +I+UCG3TcLd3TXmN1zNQkq4y2ObDK4Sxy/2p6yFPI1Fds5w/zLfBOvvPQY61vEqs8 +SCCcQIe7f6NDpIRIBlty1C9IaEHj7edyHjF6rtYb +-----END CERTIFICATE----- diff --git a/.buildkite/certs/ca.key b/.buildkite/certs/ca.key new file mode 100644 index 000000000..dfc41b558 --- /dev/null +++ b/.buildkite/certs/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA2Mmo5D7xrVDhOTNTqMEHwZFla1o6NfiyMc4AnkR6dGM7S7XV +SdLJNsaEq7drmacXD3qXcjrHi04xSXsdQ9sgkQBJ4iKnBYn+Bu9pw4odi5W4aVHv +/vzE68o/GU5BU8yPnjJm0IuRRTGh/pMLn5nbf+f8PGGsztfEscSVJczKs3zVK4TP +L771XcYG6FOz1oZNvMGRwjB0KL6Jh5s3JYUaRTdBTJLQPfLxx3kYx9fT+6zYYn0n +e4Wc6B9x9ePB1QCqD4Kp2er1tjtmHwFjj+rpanMrM6+e6fH0vQdsEnZVlu51R1NS +binGA+uwsWwUttwXhhYAo+LYja5sTvEuWH/ObwIDAQABAoIBAQC8QDGnMnmPdWJ+ +13FYY3cmwel+FXXjFDk5QpgK15A2rUz6a8XxO1d7d1wR+U84uH4v9Na6XQyWjaoD +EyPQnuJiyAtgkZLUHoY244PGR5NsePEQlBSCKmGeF5w/j1LvP/2e9EmP4wKdQYJY +nLxFNcgEBCFnFbKIU5n8fKa/klybCrwlBokenyBro02tqH4LL7h1YMRRrl97fv1V +e/y/0WcMN+KnMglfz6haimBRV2yamCCHHmBImC+wzOgT/quqlxPfI+a3ScHxuA65 +3QyCavaqlPh+T3lXnN/Na4UWqFtzMmwgJX2x1zM5qiln46/JoDiXtagvV43L3rNs +LhPRFeIRAoGBAPhEB7nNpEDNjIRUL6WpebWS9brKAVY7gYn7YQrKGhhCyftyaiBZ +zYgxPaJdqYXf+DmkWlANGoYiwEs40QwkR/FZrvO4+Xh3n3dgtl59ZmieuoQvDsG+ +RYIj+TfBaqhewhZNMMl7dxz7DeyQhyRCdsvl3VqJM0RuOsIrzrhCIEItAoGBAN+K +lgWI7swDpOEaLmu+IWMkGImh1LswXoZqIgi/ywZ7htZjPzidOIeUsMi+lrYsKojG +uU3sBxASsf9kYXDnuUuUbGT5M/N2ipXERt7klUAA/f5sg1IKlTrabaN/HGs/uNtf +Efa8v/h2VyTurdPCJ17TNpbOMDwX1qGM62tyt2CLAoGBAIHCnP8iWq18QeuQTO8b +a3/Z9hHRL22w4H4MI6aOB6GSlxuTq6CJD4IVqo9IwSg17fnCy2l3z9s4IqWuZqUf ++XJOW8ELd2jdrT2qEOfGR1Z7UCVyqxXcq1vgDYx0zZh/HpalddB5dcJx/c8do2Ty +UEE2PcHqYB9uNcvzNbLc7RtpAoGBALbuU0yePUTI6qGnajuTcQEPpeDjhRHWSFRZ +ABcG1N8uMS66Mx9iUcNp462zgeP8iqY5caUZtMHreqxT+gWKK7F0+as7386pwElF +QPXgO18QMMqHBIQb0vlBjJ1SRPBjSiSDTVEML1DljvTTOX7kEJHh6HdKrmBO5b54 +cqMQUo53AoGBAPVWRPUXCqlBz914xKna0ZUh2aesRBg5BvOoq9ey9c52EIU5PXL5 +0Isk8sWSsvhl3tjDPBH5WuL5piKgnCTqkVbEHmWu9s1T57Mw6NuxlPMLBWvyv4c6 +tB9brOxv0ui3qGMuBsBoDKbkNnwXyOXLyFg7O+H4l016A3mLQzJM+NGV +-----END RSA PRIVATE KEY----- diff --git a/.buildkite/certs/testnode.crt b/.buildkite/certs/testnode.crt new file mode 100755 index 000000000..0a6e76430 --- /dev/null +++ b/.buildkite/certs/testnode.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDYjCCAkqgAwIBAgIVAIClHav09e9XGWJrnshywAjUHTnXMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIzMDMyODE3MDIzOVoXDTI2MDMyNzE3MDIzOVowEzERMA8G +A1UEAxMIaW5zdGFuY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV ++t5/g6u2r3awCtzqp17KG0hRxzkVoJoF8DYzVh+Rv9ymxQW0C/U8dQihAjkZHaIA +n49lSyNLkwWtmqQgPcimV4d6XuTYx2ahDixXYtjmoOSwH5dRtovKPCNKDPkUj9Vq +NwMW0uB1VxniMKI4DnYFqBgHL9kQKhQqvas6Gx0X6ptGRCLYCtVxeFcau6nnkZJt +urb+HNV5waOh0uTmsqnnslK3NjCQ/f030vPKxM5fOqOU5ajUHpZFJ6ZFmS32074H +l+mZoRT/GtbnVtIg+CJXsWThF3/L4iBImv+rkY9MKX5fyMLJgmIJG68S90IQGR8c +Z2lZYzC0J7zjMsYlODbDAgMBAAGjgYswgYgwHQYDVR0OBBYEFIDIcECn3AVHc3jk +MpQ4r7Kc3WCsMB8GA1UdIwQYMBaAFJYCWKn16g+acbing4Vl45QGUBs0MDsGA1Ud +EQQ0MDKCCWxvY2FsaG9zdIIIaW5zdGFuY2WHBH8AAAGHEAAAAAAAAAAAAAAAAAAA +AAGCA2VzMTAJBgNVHRMEAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQBtX3RQ5ATpfORM +lrnhaUPGOWkjnb3p3BrdAWUaWoh136QhaXqxKiALQQhTtTerkXOcuquy9MmAyYvS +9fDdGvLCAO8pPCXjnzonCHerCLGdS7f/eqvSFWCdy7LPHzTAFYfVWVvbZed+83TL +bDY63AMwIexj34vJEStMapuFwWx05fstE8qZWIbYCL87sF5H/MRhzlz3ScAhQ1N7 +tODH7zvLzSxFGGEzCIKZ0iPFKbd3Y0wE6SptDSKhOqlnC8kkNeI2GjWsqVfHKsoF +pDFmri7IfOucuvalXJ6xiHPr9RDbuxEXs0u8mteT5nFQo7EaEGdHpg1pNGbfBOzP +lmj/dRS9 +-----END CERTIFICATE----- diff --git a/.buildkite/certs/testnode.key b/.buildkite/certs/testnode.key new file mode 100755 index 000000000..a9de563c8 --- /dev/null +++ b/.buildkite/certs/testnode.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAlfref4Ortq92sArc6qdeyhtIUcc5FaCaBfA2M1Yfkb/cpsUF +tAv1PHUIoQI5GR2iAJ+PZUsjS5MFrZqkID3IpleHel7k2MdmoQ4sV2LY5qDksB+X +UbaLyjwjSgz5FI/VajcDFtLgdVcZ4jCiOA52BagYBy/ZECoUKr2rOhsdF+qbRkQi +2ArVcXhXGrup55GSbbq2/hzVecGjodLk5rKp57JStzYwkP39N9LzysTOXzqjlOWo +1B6WRSemRZkt9tO+B5fpmaEU/xrW51bSIPgiV7Fk4Rd/y+IgSJr/q5GPTCl+X8jC +yYJiCRuvEvdCEBkfHGdpWWMwtCe84zLGJTg2wwIDAQABAoIBAAEP7HYNNnDWdYMD ++WAtYM12X/W5s/wUP94juaBI4u4iZH2EZodlixEdZUCTXgq43WsDUhxX05s7cE+p +H5DuSCHtoo2WHvGKAposwRDm2f3YVWQ2Xyb2ahNt69LYHHWrO+XQ60YYTa3r8Gn3 +7dFR3I016/jyn5DeEVaglvS1dfj2UG4ybR4KkMfcKd94X0rKvz3wzAhHIh+hwMtv +sVk7V4vSnKf2mJXwIVECTolnEJEkCjWjjymgUJYKT8yN7JnAsHRcvMa6kWwIGrLp +oQCEaJwYM6ynCRS989pLt3vA2iu5VkYhiHXJ9Ds/5b5yzhzmj+ymzKbFKrrUUrmn ++2Jp1K0CgYEAw8BchALsD/+JuoXjinA14MH7PZjIsXyhtPk+c4pk42iMNyg1J8XF +Y/ITepLYsl2bZqQI1jOJdDqsTwIsva9r749lsmkYI3VOxhi7+qBK0sThR66C87lX +iU2QpnZ9NloC6ort4a3MEvZ/gRQcXdBrNlNoza2p7PHAVDTnsdSrNKUCgYEAxCQV +uo85oZyfnMufn/gcI9IeYOgiB0tO3a8cAFX2wQW1y935t6Z13ApUQc4EnCOH7ZBc +td5kT+xGdRWnfPZ38FM1dd5MBdGE69s3q8pJDUExSgNLqaF6/5bD32qui66L3ugu +eMjxrzqJsc2uQTPCs18SGsyRmf54DpY8HglOmUcCgYAGRDgx+a347SNJl1OrcOAo +q80RMbzrAaRjmL8JD9se9I/YjC73cPtasbsx51WMkDaTWJj30nqJ//7YIKeyAtWf +u6Vzyq19JRo6eTw7T7pVePwFQW7rwnks6hDBY3WqscL6IyxuVxP7X2zBgxVNY4ir +Gox2WSLhdPPFPlRUewxoCQKBgAJvqE1u5fpZ5ame5dao0ECppXLyrymEB/C88g4X +Az+WgJGNqkJbsO8QuccvdeMylcefmWcw4fIULzPZFwF4VjkH74wNPMh9t7buPBzI +IGwnuSMAM3ph5RMzni8yNgTKIDaej6U0abwRcBBjS5zHtc1giusGS3CsNnWH7Cs7 +VlyVAoGBAK+prq9t9x3tC3NfCZH8/Wfs/X0T1qm11RiL5+tOhmbguWAqSSBy8OjX +Yh8AOXrFuMGldcaTXxMeiKvI2cyybnls1MFsPoeV/fSMJbex7whdeJeTi66NOSKr +oftUHvkHS0Vv/LicMEOufFGslb4T9aPJ7oyhoSlz9CfAutDWk/q/ +-----END RSA PRIVATE KEY----- diff --git a/.buildkite/certs/testnode_san.crt b/.buildkite/certs/testnode_san.crt new file mode 100644 index 000000000..8abba55b5 --- /dev/null +++ b/.buildkite/certs/testnode_san.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDVjCCAj6gAwIBAgIULh42yRefYlRRl1hvt055LrUH0HwwDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMjAwMjI4MDMzNzIwWhcNMjMwMjI3MDMzNzIwWjATMREwDwYD +VQQDEwhpbnN0YW5jZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIUP +t267NN21z+3ukajej8eojSXwP6zHxy7CUAp+sQ7bTq2XCKxkYX3CW9ThcS4cV9mL +ayYdWEYnbEDGYPQDo7Wk3Ih5OEXTMZb/yNEx5D4S2lGMOS5bCDdYx6GvwCMG4jNx +aMktosaxpprAJiHh2oLgQk0hQc/a9JfMo6kJKtuhjxsxjxLwcOHhuaUD7NS0Pjop +CJkSYcrL+nnQPQjKe4uLhAbSyiX914h4QX0CJ0e4z1ccdDX2PFWTrwaIf//vQhCR +wP2YKdfjR0JB4oDAlu85GsIs2cFLPysM5ufuNZO4fCr8uOwloKI8zZ2HhlIfBEcY +Gcy4g9N/9epmxMXZlGcCAwEAAaOBgDB+MB0GA1UdDgQWBBRefYm8DHHDdkTPHhS1 +HEUwTb2uiDAfBgNVHSMEGDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAxBgNVHREE +KjAogglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAGCA2VzMTAJBgNV +HRMEAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQC+pauqM2wJjQaHyHu+kIm59P4b/5Oj +IH1cYCQfMB7Y2UMLxp0ew+f7o7zzE2DA52YYFDWy6J5DVWtSBPyeFGgX+RH+aA+9 +Iv4cc9QpAs6aFjncorHrzNOrWLgCHIeRAxTR0CAkeP2dUZfDBuMpRyP6rAsYzyLH +Rb3/BfYJSI5vxgt5Ke49Y/ljDKFJTyDmAVrHQ4JWrseYE1UZ2eDkBXeiRlYE/QtB +YsrUSqdL6zvFZyUcilxDUUabNcA+GgeGZ2lAEA90F8vwi62QwRXo3Iv1Hz+6xc43 +nFofDK9D8/qkrUD9iuhpx1974QwPhwWyjn9RZRpbZA4ngRL+szdRXR4N +-----END CERTIFICATE----- diff --git a/.buildkite/certs/testnode_san.key b/.buildkite/certs/testnode_san.key new file mode 100644 index 000000000..75d19539e --- /dev/null +++ b/.buildkite/certs/testnode_san.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAhQ+3brs03bXP7e6RqN6Px6iNJfA/rMfHLsJQCn6xDttOrZcI +rGRhfcJb1OFxLhxX2YtrJh1YRidsQMZg9AOjtaTciHk4RdMxlv/I0THkPhLaUYw5 +LlsIN1jHoa/AIwbiM3FoyS2ixrGmmsAmIeHaguBCTSFBz9r0l8yjqQkq26GPGzGP +EvBw4eG5pQPs1LQ+OikImRJhysv6edA9CMp7i4uEBtLKJf3XiHhBfQInR7jPVxx0 +NfY8VZOvBoh//+9CEJHA/Zgp1+NHQkHigMCW7zkawizZwUs/Kwzm5+41k7h8Kvy4 +7CWgojzNnYeGUh8ERxgZzLiD03/16mbExdmUZwIDAQABAoIBAEwhjulLMVc9JEfV +PP/qv0cUOBYh3LzF3T/yq4slq7Z9YgnOJYdFM8aZgqNNjc09KEJvE5JOLeiNu9Ff +768Nugg+2HM5MCo7SN9FYCfZLOcbMFCCM2FDcnMAV9A512vzD08xryuT8dNPZ6yZ +DfhK2hQRrb2lrpr3gwSrcGRRu3THqvq7X1RIjpLV3teDMeP8rQPAlpj8fmP+kdVV +5y1ihiDIo87McihG9FMavJtBDXQkUEuVw6eIeir8L/zHHD/ZwhYjNHZGWbrB88sz +CkJkfWh/FlA63tCVdJzkmnERALLTVy9mR0Sq6sUlnFhFNO2BRdWgYLrcp9McfTJC +e8+WsSECgYEAuwQ3nAaFL0jqYu1AREyKT/f3WUenf2UsX7dwwV2/yFtQvkzW7ji4 +uZLnfUnZBojtHf35dRo+hDgtvhZhgZNAuPPsbOl/EIMTcbChEqV/3CSTFlhLFM1d +hfM9PoM+Bt/pyUNabjD1sWM0X7WeUhzcddshY3S4daBsNsLuOzweRRcCgYEAtiSS +4qiiGafYsY7gOHuAlOhs/00+1uWIFEHKgoHM9vzCxDN3LCmBdynHk8ZE2TAdhw+l +7xpu6LUxKQDfGmVZa9Epg0kQmVq9c54oQP57pJ3tR+68++insEkfnaZH8jblfq2s +sSkFrY3pdS19edq60nuft64kswKRUUkamCXTXTECgYBdoSfiMpV9bekC7DsPtq5M +iR3KEgi2zEViCmomNTRuL+GF1NyKWdWJ+xVwcYd5MRZdvKimyyPfeGzWTUg14i42 +KtEEWgZmkukqMz8BIeCYq6sENeIpIQQgqv3PjU+Bi5r1S4Y7wsFPNRakkD4aaB6r +1rCppWcwZMeoxwEUoO2aswKBgBdDIIdWJi3EpAY5SyWrkEZ0UMdiZC4p7nE33ddB +IJ5CtdU9BXFcc652ZYjX/58FaCABvZ2F8LhDu92SwOusGfmNIxIjWL1dO2jywA1c +8wmZKd7P/M7nbdMz45fMzs9+d1zwbWfK53C8+R4AC1BuwQF0zHc3BHTgVRLelUjt +O8thAoGAdO2gHIqEsZzTgbvLbsh52eVbumjfNGnrnEv1fjb+o+/wAol8dymcmzbL +bZCRzoyA0qwU9kdPFgX46H6so6o1tUM2GQtVFoT6kDnPv7EkLQK0C4cDh6OOHxDU +NPvr/9fHhQd9EDWDvS1JnVMAdKDO6ELp3SoKGGmCXR2QplnqWAk= +-----END RSA PRIVATE KEY----- diff --git a/.buildkite/functions/cleanup.sh b/.buildkite/functions/cleanup.sh new file mode 100755 index 000000000..4c25166fb --- /dev/null +++ b/.buildkite/functions/cleanup.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +# +# Shared cleanup routines between different steps +# +# Please source .ci/functions/imports.sh as a whole not just this file +# +# Version 1.0.0 +# - Initial version after refactor + +function cleanup_volume { + if [[ "$(docker volume ls -q -f name=$1)" ]]; then + echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m" + (docker volume rm "$1") || true + fi +} +function container_running { + if [[ "$(docker ps -q -f name=$1)" ]]; then + return 0; + else return 1; + fi +} +function cleanup_node { + if container_running "$1"; then + echo -e "\033[34;1mINFO:\033[0m Removing container $1\033[0m" + (docker container rm --force --volumes "$1") || true + fi + if [[ -n "$1" ]]; then + echo -e "\033[34;1mINFO:\033[0m Removing volume $1-${suffix}-data\033[0m" + cleanup_volume "$1-${suffix}-data" + fi +} +function cleanup_network { + if [[ "$(docker network ls -q -f name=$1)" ]]; then + echo -e "\033[34;1mINFO:\033[0m Removing network $1\033[0m" + (docker network rm "$1") || true + fi +} + +function cleanup_trap { + status=$? + set +x + if [[ "$DETACH" != "true" ]]; then + echo -e "\033[34;1mINFO:\033[0m clean the network if not detached (start and exit)\033[0m" + cleanup_all_in_network "$1" + fi + # status is 0 or SIGINT + if [[ "$status" == "0" || "$status" == "130" ]]; then + echo -e "\n\033[32;1mSUCCESS run-tests\033[0m" + exit 0 + else + echo -e "\n\033[31;1mFAILURE during run-tests\033[0m" + exit ${status} + fi +}; +function cleanup_all_in_network { + + if [[ -z "$(docker network ls -q -f name="^$1\$")" ]]; then + echo -e "\033[34;1mINFO:\033[0m $1 is already deleted\033[0m" + return 0 + fi + containers=$(docker network inspect -f '{{ range $key, $value := .Containers }}{{ printf "%s\n" .Name}}{{ end }}' $1) + while read -r container; do + cleanup_node "$container" + done <<< "$containers" + cleanup_network $1 + echo -e "\033[32;1mSUCCESS:\033[0m Cleaned up and exiting\033[0m" +}; diff --git a/.buildkite/functions/imports.sh b/.buildkite/functions/imports.sh new file mode 100755 index 000000000..c05f36826 --- /dev/null +++ b/.buildkite/functions/imports.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# +# Sets up all the common variables and imports relevant functions +# +# Version 1.0.1 +# - Initial version after refactor +# - Validate STACK_VERSION asap + +function require_stack_version() { + if [[ -z $STACK_VERSION ]]; then + echo -e "\033[31;1mERROR:\033[0m Required environment variable [STACK_VERSION] not set\033[0m" + exit 1 + fi +} + +require_stack_version + +if [[ -z $es_node_name ]]; then + # only set these once + set -euo pipefail + export TEST_SUITE=${TEST_SUITE-free} + export RUNSCRIPTS=${RUNSCRIPTS-} + export DETACH=${DETACH-false} + export CLEANUP=${CLEANUP-false} + + export es_node_name=instance + export elastic_password=changeme + export elasticsearch_image=elasticsearch + export elasticsearch_scheme="https" + if [[ $TEST_SUITE != "platinum" ]]; then + export elasticsearch_scheme="http" + fi + export elasticsearch_url=${elasticsearch_scheme}://elastic:${elastic_password}@${es_node_name}:9200 + export external_elasticsearch_url=${elasticsearch_url/$es_node_name/localhost} + export elasticsearch_container="${elasticsearch_image}:${STACK_VERSION}" + + export suffix=rest-test + export moniker=$(echo "$elasticsearch_container" | tr -C "[:alnum:]" '-') + export network_name=${moniker}${suffix} + + export ssl_cert="${script_path}/certs/testnode.crt" + export ssl_key="${script_path}/certs/testnode.key" + export ssl_ca="${script_path}/certs/ca.crt" + +fi + + export script_path=$(dirname $(realpath -s $0)) + source $script_path/functions/cleanup.sh + source $script_path/functions/wait-for-container.sh + trap "cleanup_trap ${network_name}" EXIT + + +if [[ "$CLEANUP" == "true" ]]; then + cleanup_all_in_network $network_name + exit 0 +fi + +echo -e "\033[34;1mINFO:\033[0m Creating network $network_name if it does not exist already \033[0m" +docker network inspect "$network_name" > /dev/null 2>&1 || docker network create "$network_name" + diff --git a/.buildkite/functions/wait-for-container.sh b/.buildkite/functions/wait-for-container.sh new file mode 100755 index 000000000..1a721b588 --- /dev/null +++ b/.buildkite/functions/wait-for-container.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# +# Exposes a routine scripts can call to wait for a container if that container set up a health command +# +# Please source .ci/functions/imports.sh as a whole not just this file +# +# Version 1.0.1 +# - Initial version after refactor +# - Make sure wait_for_contiainer is silent + +function wait_for_container { + set +x + until ! container_running "$1" || (container_running "$1" && [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "starting" ]]); do + echo "" + docker inspect -f "{{range .State.Health.Log}}{{.Output}}{{end}}" ${1} + echo -e "\033[34;1mINFO:\033[0m waiting for node $1 to be up\033[0m" + sleep 2; + done; + + # Always show logs if the container is running, this is very useful both on CI as well as while developing + if container_running $1; then + docker logs $1 + fi + + if ! container_running $1 || [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "healthy" ]]; then + cleanup_all_in_network $2 + echo + echo -e "\033[31;1mERROR:\033[0m Failed to start $1 in detached mode beyond health checks\033[0m" + echo -e "\033[31;1mERROR:\033[0m dumped the docker log before shutting the node down\033[0m" + return 1 + else + echo + echo -e "\033[32;1mSUCCESS:\033[0m Detached and healthy: ${1} on docker network: ${network_name}\033[0m" + return 0 + fi +} diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index b83edb8c3..006a4ee87 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,3 +1,31 @@ steps: - - label: ":js: Greetings" - command: "echo 'Hello, world!'" + - label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }}) Test Suite: {{ matrix.suite }}" + agents: + provider: "gcp" + env: + NODE_VERSION: "{{ matrix.nodejs }}" + TEST_SUITE: "{{ matrix.suite }}" + STACK_VERSION: 8.8.0-SNAPSHOT + matrix: + setup: + suite: + - "free" + - "platinum" + nodejs: + - "14" + - "16" + - "18" + - "20" + command: ./.buildkite/run-tests.sh + artifact_paths: "./junit-output/junit-*.xml" + - wait: ~ + continue_on_failure: true + - label: ":junit: Test results" + agents: + provider: "gcp" + image: family/core-ubuntu-2204 + plugins: + - junit-annotate#v2.4.1: + artifacts: "junit-output/junit-*.xml" + job-uuid-file-pattern: 'junit-(.*).xml' + fail-build-on-error: true diff --git a/.buildkite/run-client.sh b/.buildkite/run-client.sh new file mode 100755 index 000000000..59ed168e7 --- /dev/null +++ b/.buildkite/run-client.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# +# Once called Elasticsearch should be up and running +# +script_path=$(dirname "$(realpath -s "$0")") +set -euo pipefail +repo=$(pwd) + +export NODE_VERSION=${NODE_VERSION:-18} + +echo "--- :javascript: Building Docker image" +docker build \ + --file "$script_path/Dockerfile" \ + --tag elastic/elasticsearch-js \ + --build-arg NODE_VERSION="$NODE_VERSION" \ + . + +echo "--- :javascript: Running $TEST_SUITE tests" +mkdir -p "$repo/junit-output" +docker run \ + --network="${network_name}" \ + --env "TEST_ES_SERVER=${elasticsearch_url}" \ + --env "ELASTIC_PASSWORD=${elastic_password}" \ + --env "TEST_SUITE=${TEST_SUITE}" \ + --env "ELASTIC_USER=elastic" \ + --env "BUILDKITE=true" \ + --volume "$repo/junit-output:/junit-output" \ + --name elasticsearch-js \ + --rm \ + elastic/elasticsearch-js \ + bash -c "npm run test:integration; [ -f ./$TEST_SUITE-report-junit.xml ] && mv ./$TEST_SUITE-report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'" diff --git a/.buildkite/run-elasticsearch.sh b/.buildkite/run-elasticsearch.sh new file mode 100755 index 000000000..141330093 --- /dev/null +++ b/.buildkite/run-elasticsearch.sh @@ -0,0 +1,152 @@ +#!/usr/bin/env bash +# +# Launch one or more Elasticsearch nodes via the Docker image, +# to form a cluster suitable for running the REST API tests. +# +# Export the STACK_VERSION variable, eg. '8.0.0-SNAPSHOT'. +# Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'. +# Export the NUMBER_OF_NODES variable to start more than 1 node + +# Version 1.6.1 +# - Initial version of the run-elasticsearch.sh script +# - Deleting the volume should not dependent on the container still running +# - Fixed `ES_JAVA_OPTS` config +# - Moved to STACK_VERSION and TEST_VERSION +# - Refactored into functions and imports +# - Support NUMBER_OF_NODES +# - Added 5 retries on docker pull for fixing transient network errors +# - Added flags to make local CCR configurations work +# - Added action.destructive_requires_name=false as the default will be true in v8 +# - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing +# - Moved ELASTIC_PASSWORD and xpack.security.enabled to the base arguments for "Security On by default" +# - Use https only when TEST_SUITE is "platinum", when "free" use http +# - Set xpack.security.enabled=false for "free" and xpack.security.enabled=true for "platinum" + +script_path=$(dirname $(realpath -s $0)) +source $script_path/functions/imports.sh +set -euo pipefail + +echo -e "\033[34;1mINFO:\033[0m Take down node if called twice with the same arguments (DETACH=true) or on separate terminals \033[0m" +cleanup_node $es_node_name + +master_node_name=${es_node_name} +cluster_name=${moniker}${suffix} + +# Set vm.max_map_count kernel setting to 262144 +sudo sysctl -w vm.max_map_count=262144 + +declare -a volumes +environment=($(cat <<-END + --env ELASTIC_PASSWORD=$elastic_password + --env node.name=$es_node_name + --env cluster.name=$cluster_name + --env cluster.initial_master_nodes=$master_node_name + --env discovery.seed_hosts=$master_node_name + --env cluster.routing.allocation.disk.threshold_enabled=false + --env bootstrap.memory_lock=true + --env node.attr.testattr=test + --env path.repo=/tmp + --env repositories.url.allowed_urls=http://snapshot.test* + --env action.destructive_requires_name=false + --env ingest.geoip.downloader.enabled=false + --env cluster.deprecation_indexing.enabled=false +END +)) +if [[ "$TEST_SUITE" == "platinum" ]]; then + environment+=($(cat <<-END + --env xpack.security.enabled=true + --env xpack.license.self_generated.type=trial + --env xpack.security.http.ssl.enabled=true + --env xpack.security.http.ssl.verification_mode=certificate + --env xpack.security.http.ssl.key=certs/testnode.key + --env xpack.security.http.ssl.certificate=certs/testnode.crt + --env xpack.security.http.ssl.certificate_authorities=certs/ca.crt + --env xpack.security.transport.ssl.enabled=true + --env xpack.security.transport.ssl.verification_mode=certificate + --env xpack.security.transport.ssl.key=certs/testnode.key + --env xpack.security.transport.ssl.certificate=certs/testnode.crt + --env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt +END +)) + volumes+=($(cat <<-END + --volume $ssl_cert:/usr/share/elasticsearch/config/certs/testnode.crt + --volume $ssl_key:/usr/share/elasticsearch/config/certs/testnode.key + --volume $ssl_ca:/usr/share/elasticsearch/config/certs/ca.crt +END +)) +else + environment+=($(cat <<-END + --env node.roles=data,data_cold,data_content,data_frozen,data_hot,data_warm,ingest,master,ml,remote_cluster_client,transform + --env xpack.security.enabled=false + --env xpack.security.http.ssl.enabled=false +END +)) +fi + +cert_validation_flags="" +if [[ "$TEST_SUITE" == "platinum" ]]; then + cert_validation_flags="--insecure --cacert /usr/share/elasticsearch/config/certs/ca.crt --resolve ${es_node_name}:443:127.0.0.1" +fi + +echo "--- :elasticsearch: Environment setup" +echo "TEST_SUITE: $TEST_SUITE" +echo "Elasticsearch URL: $elasticsearch_url" +echo "Elasticsearch External URL: $external_elasticsearch_url" + + +echo "--- :elasticsearch: Running container" +# Pull the container, retry on failures up to 5 times with +# short delays between each attempt. Fixes most transient network errors. +docker_pull_attempts=0 +until [ "$docker_pull_attempts" -ge 5 ] +do + docker pull docker.elastic.co/elasticsearch/"$elasticsearch_container" && break + docker_pull_attempts=$((docker_pull_attempts+1)) + echo "Failed to pull image, retrying in 10 seconds (retry $docker_pull_attempts/5)..." + sleep 10 +done + +NUMBER_OF_NODES=${NUMBER_OF_NODES-1} +http_port=9200 +for (( i=0; i<$NUMBER_OF_NODES; i++, http_port++ )); do + node_name=${es_node_name}$i + node_url=${external_elasticsearch_url/9200/${http_port}}$i + if [[ "$i" == "0" ]]; then node_name=$es_node_name; fi + environment+=($(cat <<-END + --env node.name=$node_name +END +)) + echo "$i: $http_port $node_url " + volume_name=${node_name}-${suffix}-data + volumes+=($(cat <<-END + --volume $volume_name:/usr/share/elasticsearch/data${i} +END +)) + + # make sure we detach for all but the last node if DETACH=false (default) so all nodes are started + local_detach="true" + if [[ "$i" == "$((NUMBER_OF_NODES-1))" ]]; then local_detach=$DETACH; fi + echo -e "\033[34;1mINFO:\033[0m Starting container $node_name \033[0m" + set -x + docker run \ + --name "$node_name" \ + --network "$network_name" \ + --env "ES_JAVA_OPTS=-Xms1g -Xmx1g -da:org.elasticsearch.xpack.ccr.index.engine.FollowingEngineAssertions" \ + "${environment[@]}" \ + "${volumes[@]}" \ + --publish "$http_port":9200 \ + --ulimit nofile=65536:65536 \ + --ulimit memlock=-1:-1 \ + --detach="$local_detach" \ + --health-cmd="curl $cert_validation_flags --fail $elasticsearch_url/_cluster/health || exit 1" \ + --health-interval=2s \ + --health-retries=20 \ + --health-timeout=2s \ + --rm \ + docker.elastic.co/elasticsearch/"$elasticsearch_container"; + + set +x + if wait_for_container "$es_node_name" "$network_name"; then + echo -e "\033[32;1mSUCCESS:\033[0m Running on: $node_url\033[0m" + fi +done diff --git a/.buildkite/run-tests.sh b/.buildkite/run-tests.sh new file mode 100755 index 000000000..d9aa181af --- /dev/null +++ b/.buildkite/run-tests.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# Script to run Elasticsearch container and Elasticsearch client integration tests on Buildkite +# +# Version 0.1 +# +script_path=$(dirname "$(realpath -s "$0")") +source "$script_path/functions/imports.sh" + +set -euo pipefail + +echo "--- :elasticsearch: Starting Elasticsearch" +DETACH=true bash "$script_path/run-elasticsearch.sh" + +echo "+++ :javascript: Run Client" +bash "$script_path/run-client.sh" diff --git a/.ci/functions/imports.sh b/.ci/functions/imports.sh index c05f36826..143bc22e9 100644 --- a/.ci/functions/imports.sh +++ b/.ci/functions/imports.sh @@ -44,9 +44,9 @@ if [[ -z $es_node_name ]]; then fi - export script_path=$(dirname $(realpath -s $0)) - source $script_path/functions/cleanup.sh - source $script_path/functions/wait-for-container.sh + export script_path=$(dirname "$(realpath -s "$0")") + source "$script_path/functions/cleanup.sh" + source "$script_path/functions/wait-for-container.sh" trap "cleanup_trap ${network_name}" EXIT @@ -57,4 +57,3 @@ fi echo -e "\033[34;1mINFO:\033[0m Creating network $network_name if it does not exist already \033[0m" docker network inspect "$network_name" > /dev/null 2>&1 || docker network create "$network_name" - diff --git a/.ci/make.mjs b/.ci/make.mjs index 0937de9ea..9eeb16906 100644 --- a/.ci/make.mjs +++ b/.ci/make.mjs @@ -92,6 +92,13 @@ async function bump (args) { testMatrix.replace(/STACK_VERSION:\s+\- "[0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?"/, `STACK_VERSION:\n - "${cleanVersion}-SNAPSHOT"`), // eslint-disable-line 'utf8' ) + + const pipeline = await readFile(join(import.meta.url, '..', '.buildkite', 'pipeline.yml')) + await writeFile( + join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), + pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: - ${cleanVersion}-SNAPSHOT`), // eslint-disable-line + 'utf8' + ) } // this command can only be executed locally for now diff --git a/catalog-info.yaml b/catalog-info.yaml new file mode 100644 index 000000000..4f381d1e9 --- /dev/null +++ b/catalog-info.yaml @@ -0,0 +1,53 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: elasticsearch-js +spec: + type: library + owner: group:clients-team + lifecycle: production + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: elasticsearch-js-integration-tests + description: Elasticsearch JavaScript client integration tests +spec: + type: buildkite-pipeline + owner: group:clients-team + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: Elasticsearch JavaScript client integration tests + spec: + repository: elastic/elasticsearch-js + pipeline_file: .buildkite/pipeline.yml + teams: + clients-team: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: '!main' + schedules: + main_semi_daily: + branch: 'main' + cronline: '*/12 * * *' + 8_8_semi_daily: + branch: '8.8' + cronline: '*/12 * * *' + 8_7_daily: + branch: '8.7' + cronline: '@daily' + 8_6_daily: + branch: '8.6' + cronline: '@daily' + 7_17_daily: + branch: '7.17' + cronline: '@daily' From 4b74e4136eddf5b63c150bda0228ab74b6b41931 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 5 May 2023 14:30:01 -0500 Subject: [PATCH 205/647] Drop Jenkins jobs (#1877) --- .ci/certs/ca.crt | 20 --- .ci/certs/ca.key | 27 ---- .ci/certs/testnode.crt | 19 --- .ci/certs/testnode.key | 27 ---- .ci/certs/testnode_san.crt | 20 --- .ci/certs/testnode_san.key | 27 ---- .ci/docker/Dockerfile | 7 - .ci/functions/cleanup.sh | 67 -------- .ci/functions/imports.sh | 59 -------- .ci/functions/wait-for-container.sh | 36 ----- .ci/jobs/defaults.yml | 81 ---------- .ci/jobs/elastic+elasticsearch-js+7.17.yml | 15 -- .ci/jobs/elastic+elasticsearch-js+8.5.yml | 15 -- .ci/jobs/elastic+elasticsearch-js+8.6.yml | 15 -- .ci/jobs/elastic+elasticsearch-js+8.7.yml | 15 -- .ci/jobs/elastic+elasticsearch-js+8.8.yml | 15 -- .ci/jobs/elastic+elasticsearch-js+main.yml | 15 -- .../elastic+elasticsearch-js+pull-request.yml | 19 --- .ci/make.mjs | 7 - .ci/packer_cache.sh | 14 -- .ci/run-elasticsearch.sh | 143 ------------------ .ci/run-repository.sh | 43 ------ .ci/run-tests | 23 --- .ci/test-matrix.yml | 14 -- .github/workflows/nodejs.yml | 142 +---------------- 25 files changed, 2 insertions(+), 883 deletions(-) delete mode 100755 .ci/certs/ca.crt delete mode 100644 .ci/certs/ca.key delete mode 100755 .ci/certs/testnode.crt delete mode 100755 .ci/certs/testnode.key delete mode 100644 .ci/certs/testnode_san.crt delete mode 100644 .ci/certs/testnode_san.key delete mode 100644 .ci/docker/Dockerfile delete mode 100644 .ci/functions/cleanup.sh delete mode 100644 .ci/functions/imports.sh delete mode 100644 .ci/functions/wait-for-container.sh delete mode 100644 .ci/jobs/defaults.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+7.17.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+8.5.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+8.6.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+8.7.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+8.8.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+main.yml delete mode 100644 .ci/jobs/elastic+elasticsearch-js+pull-request.yml delete mode 100644 .ci/packer_cache.sh delete mode 100755 .ci/run-elasticsearch.sh delete mode 100755 .ci/run-repository.sh delete mode 100755 .ci/run-tests delete mode 100644 .ci/test-matrix.yml diff --git a/.ci/certs/ca.crt b/.ci/certs/ca.crt deleted file mode 100755 index 71f9bfc81..000000000 --- a/.ci/certs/ca.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIVAJQLm8V2LcaCTHUcoIfO+KL63nG3MA0GCSqGSIb3DQEB -CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu -ZXJhdGVkIENBMB4XDTIwMDIyNjA1NTA1N1oXDTIzMDIyNTA1NTA1N1owNDEyMDAG -A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDYyajkPvGtUOE5M1OowQfB -kWVrWjo1+LIxzgCeRHp0YztLtdVJ0sk2xoSrt2uZpxcPepdyOseLTjFJex1D2yCR -AEniIqcFif4G72nDih2LlbhpUe/+/MTryj8ZTkFTzI+eMmbQi5FFMaH+kwufmdt/ -5/w8YazO18SxxJUlzMqzfNUrhM8vvvVdxgboU7PWhk28wZHCMHQovomHmzclhRpF -N0FMktA98vHHeRjH19P7rNhifSd7hZzoH3H148HVAKoPgqnZ6vW2O2YfAWOP6ulq -cyszr57p8fS9B2wSdlWW7nVHU1JuKcYD67CxbBS23BeGFgCj4tiNrmxO8S5Yf85v -AgMBAAGjUzBRMB0GA1UdDgQWBBSWAlip9eoPmnG4p4OFZeOUBlAbNDAfBgNVHSME -GDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG -SIb3DQEBCwUAA4IBAQA19qqrMTWl7YyId+LR/QIHDrP4jfxmrEELrAL58q5Epc1k -XxZLzOBSXoBfBrPdv+3XklWqXrZjKWfdkux0Xmjnl4qul+srrZDLJVZG3I7IrITh -AmQUmL9MuPiMnAcxoGZp1xpijtW8Qmd2qnambbljWfkuVaa4hcVRfrAX6TciIQ21 -bS5aeLGrPqR14h30YzDp0RMmTujEa1o6ExN0+RSTkE9m89Q6WdM69az8JW7YkWqm -I+UCG3TcLd3TXmN1zNQkq4y2ObDK4Sxy/2p6yFPI1Fds5w/zLfBOvvPQY61vEqs8 -SCCcQIe7f6NDpIRIBlty1C9IaEHj7edyHjF6rtYb ------END CERTIFICATE----- diff --git a/.ci/certs/ca.key b/.ci/certs/ca.key deleted file mode 100644 index dfc41b558..000000000 --- a/.ci/certs/ca.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpgIBAAKCAQEA2Mmo5D7xrVDhOTNTqMEHwZFla1o6NfiyMc4AnkR6dGM7S7XV -SdLJNsaEq7drmacXD3qXcjrHi04xSXsdQ9sgkQBJ4iKnBYn+Bu9pw4odi5W4aVHv -/vzE68o/GU5BU8yPnjJm0IuRRTGh/pMLn5nbf+f8PGGsztfEscSVJczKs3zVK4TP -L771XcYG6FOz1oZNvMGRwjB0KL6Jh5s3JYUaRTdBTJLQPfLxx3kYx9fT+6zYYn0n -e4Wc6B9x9ePB1QCqD4Kp2er1tjtmHwFjj+rpanMrM6+e6fH0vQdsEnZVlu51R1NS -binGA+uwsWwUttwXhhYAo+LYja5sTvEuWH/ObwIDAQABAoIBAQC8QDGnMnmPdWJ+ -13FYY3cmwel+FXXjFDk5QpgK15A2rUz6a8XxO1d7d1wR+U84uH4v9Na6XQyWjaoD -EyPQnuJiyAtgkZLUHoY244PGR5NsePEQlBSCKmGeF5w/j1LvP/2e9EmP4wKdQYJY -nLxFNcgEBCFnFbKIU5n8fKa/klybCrwlBokenyBro02tqH4LL7h1YMRRrl97fv1V -e/y/0WcMN+KnMglfz6haimBRV2yamCCHHmBImC+wzOgT/quqlxPfI+a3ScHxuA65 -3QyCavaqlPh+T3lXnN/Na4UWqFtzMmwgJX2x1zM5qiln46/JoDiXtagvV43L3rNs -LhPRFeIRAoGBAPhEB7nNpEDNjIRUL6WpebWS9brKAVY7gYn7YQrKGhhCyftyaiBZ -zYgxPaJdqYXf+DmkWlANGoYiwEs40QwkR/FZrvO4+Xh3n3dgtl59ZmieuoQvDsG+ -RYIj+TfBaqhewhZNMMl7dxz7DeyQhyRCdsvl3VqJM0RuOsIrzrhCIEItAoGBAN+K -lgWI7swDpOEaLmu+IWMkGImh1LswXoZqIgi/ywZ7htZjPzidOIeUsMi+lrYsKojG -uU3sBxASsf9kYXDnuUuUbGT5M/N2ipXERt7klUAA/f5sg1IKlTrabaN/HGs/uNtf -Efa8v/h2VyTurdPCJ17TNpbOMDwX1qGM62tyt2CLAoGBAIHCnP8iWq18QeuQTO8b -a3/Z9hHRL22w4H4MI6aOB6GSlxuTq6CJD4IVqo9IwSg17fnCy2l3z9s4IqWuZqUf -+XJOW8ELd2jdrT2qEOfGR1Z7UCVyqxXcq1vgDYx0zZh/HpalddB5dcJx/c8do2Ty -UEE2PcHqYB9uNcvzNbLc7RtpAoGBALbuU0yePUTI6qGnajuTcQEPpeDjhRHWSFRZ -ABcG1N8uMS66Mx9iUcNp462zgeP8iqY5caUZtMHreqxT+gWKK7F0+as7386pwElF -QPXgO18QMMqHBIQb0vlBjJ1SRPBjSiSDTVEML1DljvTTOX7kEJHh6HdKrmBO5b54 -cqMQUo53AoGBAPVWRPUXCqlBz914xKna0ZUh2aesRBg5BvOoq9ey9c52EIU5PXL5 -0Isk8sWSsvhl3tjDPBH5WuL5piKgnCTqkVbEHmWu9s1T57Mw6NuxlPMLBWvyv4c6 -tB9brOxv0ui3qGMuBsBoDKbkNnwXyOXLyFg7O+H4l016A3mLQzJM+NGV ------END RSA PRIVATE KEY----- diff --git a/.ci/certs/testnode.crt b/.ci/certs/testnode.crt deleted file mode 100755 index a49dfd775..000000000 --- a/.ci/certs/testnode.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDIzCCAgugAwIBAgIVAMTO6uVx9dLox2t0lY4IcBKZXb5WMA0GCSqGSIb3DQEB -CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu -ZXJhdGVkIENBMB4XDTIwMDIyNjA1NTA1OVoXDTIzMDIyNTA1NTA1OVowEzERMA8G -A1UEAxMIaW5zdGFuY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDK -YLTOikVENiN/qYupOsoXd7VYYnryyfCC/dK4FC2aozkbqjFzBdvPGAasoc4yEiH5 -CGeXMgJuOjk1maqetmdIsw00j4oHJviYsnGXzxxS5swhD7spcW4Uk4V4tAUzrbfT -vW/2WW/yYCLe5phVb2chz0jL+WYb4bBmdfs/t6RtP9RqsplYAmVp3gZ6lt2YNtvE -k9gz0TVk3DuO1TquIClfRYUjuywS6xDSvxJ8Jl91EfDWM8QU+9F+YAtiv74xl2U3 -P0wwMqNvMxf9/3ak3lTQGsgO4L6cwbKpVLMMzxSVunZz/sgl19xy3qHHz1Qr2MjJ -/2c2J7vahUL4NPRkjJClAgMBAAGjTTBLMB0GA1UdDgQWBBS2Wn8E2VZv4oenY+pR -O8G3zfQXhzAfBgNVHSMEGDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAJBgNVHRME -AjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAvwPvCiJJ6v9jYcyvYY8I3gP0oCwrylpRL -n91UlgRSHUmuAObyOoVN5518gSV/bTU2SDrstcLkLFxHvnfpoGJoxsQEHuGxwDRI -nhYNd62EKLerehNM/F9ILKmvTh8f6QPCzjUuExTXv+63l2Sr6dBS7FHsGs6UKUYO -llM/y9wMZ1LCuZuBg9RhtgpFXRSgDM9Z7Begu0d/BPX9od/qAeZg9Arz4rwUiCN4 -IJOMEBEPi5q1tgeS0Fb1Grpqd0Uz5tZKtEHNKzLG+zSMmkneL62Nk2HsmEFZKwzg -u2pU42UaUE596G6o78s1aLn9ICcElPHTjiuZNSiyuu9IzvFDjGQw ------END CERTIFICATE----- diff --git a/.ci/certs/testnode.key b/.ci/certs/testnode.key deleted file mode 100755 index 82efeecb9..000000000 --- a/.ci/certs/testnode.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAymC0zopFRDYjf6mLqTrKF3e1WGJ68snwgv3SuBQtmqM5G6ox -cwXbzxgGrKHOMhIh+QhnlzICbjo5NZmqnrZnSLMNNI+KByb4mLJxl88cUubMIQ+7 -KXFuFJOFeLQFM623071v9llv8mAi3uaYVW9nIc9Iy/lmG+GwZnX7P7ekbT/UarKZ -WAJlad4GepbdmDbbxJPYM9E1ZNw7jtU6riApX0WFI7ssEusQ0r8SfCZfdRHw1jPE -FPvRfmALYr++MZdlNz9MMDKjbzMX/f92pN5U0BrIDuC+nMGyqVSzDM8Ulbp2c/7I -Jdfcct6hx89UK9jIyf9nNie72oVC+DT0ZIyQpQIDAQABAoIBADAh7f7NjgnaInlD -ds8KB3SraPsbeQhzlPtiqRJU4j/MIFH/GYG03AGWQkget67a9y+GmzSvlTpoKKEh -6h2TXl9BDpv4o6ht0WRn1HJ5tM/Wyqf2WNpTew3zxCPgFPikkXsPrChYPzLTQJfp -GkP/mfTFmxfAOlPZSp4j41zVLYs53eDkAegFPVfKSr1XNNJ3QODLPcIBfxBYsiC9 -oU+jRW8xYuj31cEl5k5UqrChJ1rm3mt6cguqXKbISuoSvi13gXI6DccqhuLAU+Kr -ib2XYrRP+pWocZo/pM9WUVoNGtFxfY88sAQtvG6gDKo2AURtFyq84Ow0h9mdixV/ -gRIDPcECgYEA5nEqE3OKuG9WuUFGXvjtn4C0F6JjflYWh7AbX51S4F6LKrW6/XHL -Rg4BtF+XReT7OQ6llsV8kZeUxsUckkgDLzSaA8lysNDV5KkhAWHfRqH//QKFbqZi -JL9t3x63Qt81US8s2hQk3khPYTRM8ZB3xHiXvZYSGC/0x/DxfEO3QJECgYEA4NK5 -sxtrat8sFz6SK9nWEKimPjDVzxJ0hxdX4tRq/JdOO5RncawVqt6TNP9gTuxfBvhW -MhJYEsQj8iUoL1dxo9d1eP8HEANNV0iX5OBvJNmgBp+2OyRSyr+PA55+wAxYuAE7 -QKaitOjW57fpArNRt2hQyiSzTuqUFRWTWJHCWNUCgYAEurPTXF6vdFGCUc2g61jt -GhYYGhQSpq+lrz6Qksj9o9MVWE9zHh++21C7o+6V16I0RJGva3QoBMVf4vG4KtQt -5tV2WG8LI+4P2Ey+G4UajP6U8bVNVQrUmD0oBBhcvfn5JY+1Fg6/pRpD82/U0VMz -7AmpMWhDqNBMPiymkTk0kQKBgCuWb05cSI0ly4SOKwS5bRk5uVFhYnKNH255hh6C -FGP4acB/WzbcqC7CjEPAJ0nl5d6SExQOHmk1AcsWjR3wlCWxxiK5PwNJwJrlhh1n -reS1FKN0H36D4lFQpkeLWQOe4Sx7gKNeKzlr0w6Fx3Uwku0+Gju2tdTdAey8jB6l -08opAoGAEe1AuR/OFp2xw6V8TH9UHkkpGxy+OrXI6PX6tgk29PgB+uiMu4RwbjVz -1di1KKq2XecAilVbnyqY+edADxYGbSnci9x5wQRIebfMi3VXKtV8NQBv2as6qwtW -JDcQUWotOHjpdvmfJWWkcBhbAKrgX8ukww00ZI/lC3/rmkGnBBg= ------END RSA PRIVATE KEY----- diff --git a/.ci/certs/testnode_san.crt b/.ci/certs/testnode_san.crt deleted file mode 100644 index 8abba55b5..000000000 --- a/.ci/certs/testnode_san.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDVjCCAj6gAwIBAgIULh42yRefYlRRl1hvt055LrUH0HwwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMjAwMjI4MDMzNzIwWhcNMjMwMjI3MDMzNzIwWjATMREwDwYD -VQQDEwhpbnN0YW5jZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIUP -t267NN21z+3ukajej8eojSXwP6zHxy7CUAp+sQ7bTq2XCKxkYX3CW9ThcS4cV9mL -ayYdWEYnbEDGYPQDo7Wk3Ih5OEXTMZb/yNEx5D4S2lGMOS5bCDdYx6GvwCMG4jNx -aMktosaxpprAJiHh2oLgQk0hQc/a9JfMo6kJKtuhjxsxjxLwcOHhuaUD7NS0Pjop -CJkSYcrL+nnQPQjKe4uLhAbSyiX914h4QX0CJ0e4z1ccdDX2PFWTrwaIf//vQhCR -wP2YKdfjR0JB4oDAlu85GsIs2cFLPysM5ufuNZO4fCr8uOwloKI8zZ2HhlIfBEcY -Gcy4g9N/9epmxMXZlGcCAwEAAaOBgDB+MB0GA1UdDgQWBBRefYm8DHHDdkTPHhS1 -HEUwTb2uiDAfBgNVHSMEGDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAxBgNVHREE -KjAogglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAGCA2VzMTAJBgNV -HRMEAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQC+pauqM2wJjQaHyHu+kIm59P4b/5Oj -IH1cYCQfMB7Y2UMLxp0ew+f7o7zzE2DA52YYFDWy6J5DVWtSBPyeFGgX+RH+aA+9 -Iv4cc9QpAs6aFjncorHrzNOrWLgCHIeRAxTR0CAkeP2dUZfDBuMpRyP6rAsYzyLH -Rb3/BfYJSI5vxgt5Ke49Y/ljDKFJTyDmAVrHQ4JWrseYE1UZ2eDkBXeiRlYE/QtB -YsrUSqdL6zvFZyUcilxDUUabNcA+GgeGZ2lAEA90F8vwi62QwRXo3Iv1Hz+6xc43 -nFofDK9D8/qkrUD9iuhpx1974QwPhwWyjn9RZRpbZA4ngRL+szdRXR4N ------END CERTIFICATE----- diff --git a/.ci/certs/testnode_san.key b/.ci/certs/testnode_san.key deleted file mode 100644 index 75d19539e..000000000 --- a/.ci/certs/testnode_san.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAhQ+3brs03bXP7e6RqN6Px6iNJfA/rMfHLsJQCn6xDttOrZcI -rGRhfcJb1OFxLhxX2YtrJh1YRidsQMZg9AOjtaTciHk4RdMxlv/I0THkPhLaUYw5 -LlsIN1jHoa/AIwbiM3FoyS2ixrGmmsAmIeHaguBCTSFBz9r0l8yjqQkq26GPGzGP -EvBw4eG5pQPs1LQ+OikImRJhysv6edA9CMp7i4uEBtLKJf3XiHhBfQInR7jPVxx0 -NfY8VZOvBoh//+9CEJHA/Zgp1+NHQkHigMCW7zkawizZwUs/Kwzm5+41k7h8Kvy4 -7CWgojzNnYeGUh8ERxgZzLiD03/16mbExdmUZwIDAQABAoIBAEwhjulLMVc9JEfV -PP/qv0cUOBYh3LzF3T/yq4slq7Z9YgnOJYdFM8aZgqNNjc09KEJvE5JOLeiNu9Ff -768Nugg+2HM5MCo7SN9FYCfZLOcbMFCCM2FDcnMAV9A512vzD08xryuT8dNPZ6yZ -DfhK2hQRrb2lrpr3gwSrcGRRu3THqvq7X1RIjpLV3teDMeP8rQPAlpj8fmP+kdVV -5y1ihiDIo87McihG9FMavJtBDXQkUEuVw6eIeir8L/zHHD/ZwhYjNHZGWbrB88sz -CkJkfWh/FlA63tCVdJzkmnERALLTVy9mR0Sq6sUlnFhFNO2BRdWgYLrcp9McfTJC -e8+WsSECgYEAuwQ3nAaFL0jqYu1AREyKT/f3WUenf2UsX7dwwV2/yFtQvkzW7ji4 -uZLnfUnZBojtHf35dRo+hDgtvhZhgZNAuPPsbOl/EIMTcbChEqV/3CSTFlhLFM1d -hfM9PoM+Bt/pyUNabjD1sWM0X7WeUhzcddshY3S4daBsNsLuOzweRRcCgYEAtiSS -4qiiGafYsY7gOHuAlOhs/00+1uWIFEHKgoHM9vzCxDN3LCmBdynHk8ZE2TAdhw+l -7xpu6LUxKQDfGmVZa9Epg0kQmVq9c54oQP57pJ3tR+68++insEkfnaZH8jblfq2s -sSkFrY3pdS19edq60nuft64kswKRUUkamCXTXTECgYBdoSfiMpV9bekC7DsPtq5M -iR3KEgi2zEViCmomNTRuL+GF1NyKWdWJ+xVwcYd5MRZdvKimyyPfeGzWTUg14i42 -KtEEWgZmkukqMz8BIeCYq6sENeIpIQQgqv3PjU+Bi5r1S4Y7wsFPNRakkD4aaB6r -1rCppWcwZMeoxwEUoO2aswKBgBdDIIdWJi3EpAY5SyWrkEZ0UMdiZC4p7nE33ddB -IJ5CtdU9BXFcc652ZYjX/58FaCABvZ2F8LhDu92SwOusGfmNIxIjWL1dO2jywA1c -8wmZKd7P/M7nbdMz45fMzs9+d1zwbWfK53C8+R4AC1BuwQF0zHc3BHTgVRLelUjt -O8thAoGAdO2gHIqEsZzTgbvLbsh52eVbumjfNGnrnEv1fjb+o+/wAol8dymcmzbL -bZCRzoyA0qwU9kdPFgX46H6so6o1tUM2GQtVFoT6kDnPv7EkLQK0C4cDh6OOHxDU -NPvr/9fHhQd9EDWDvS1JnVMAdKDO6ELp3SoKGGmCXR2QplnqWAk= ------END RSA PRIVATE KEY----- diff --git a/.ci/docker/Dockerfile b/.ci/docker/Dockerfile deleted file mode 100644 index 2f37234ae..000000000 --- a/.ci/docker/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -ARG NODE_JS_VERSION=10 -FROM node:${NODE_JS_VERSION}-alpine - -RUN apk --no-cache add git - -# Create app directory -WORKDIR /usr/src/app diff --git a/.ci/functions/cleanup.sh b/.ci/functions/cleanup.sh deleted file mode 100644 index 4c25166fb..000000000 --- a/.ci/functions/cleanup.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bash -# -# Shared cleanup routines between different steps -# -# Please source .ci/functions/imports.sh as a whole not just this file -# -# Version 1.0.0 -# - Initial version after refactor - -function cleanup_volume { - if [[ "$(docker volume ls -q -f name=$1)" ]]; then - echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m" - (docker volume rm "$1") || true - fi -} -function container_running { - if [[ "$(docker ps -q -f name=$1)" ]]; then - return 0; - else return 1; - fi -} -function cleanup_node { - if container_running "$1"; then - echo -e "\033[34;1mINFO:\033[0m Removing container $1\033[0m" - (docker container rm --force --volumes "$1") || true - fi - if [[ -n "$1" ]]; then - echo -e "\033[34;1mINFO:\033[0m Removing volume $1-${suffix}-data\033[0m" - cleanup_volume "$1-${suffix}-data" - fi -} -function cleanup_network { - if [[ "$(docker network ls -q -f name=$1)" ]]; then - echo -e "\033[34;1mINFO:\033[0m Removing network $1\033[0m" - (docker network rm "$1") || true - fi -} - -function cleanup_trap { - status=$? - set +x - if [[ "$DETACH" != "true" ]]; then - echo -e "\033[34;1mINFO:\033[0m clean the network if not detached (start and exit)\033[0m" - cleanup_all_in_network "$1" - fi - # status is 0 or SIGINT - if [[ "$status" == "0" || "$status" == "130" ]]; then - echo -e "\n\033[32;1mSUCCESS run-tests\033[0m" - exit 0 - else - echo -e "\n\033[31;1mFAILURE during run-tests\033[0m" - exit ${status} - fi -}; -function cleanup_all_in_network { - - if [[ -z "$(docker network ls -q -f name="^$1\$")" ]]; then - echo -e "\033[34;1mINFO:\033[0m $1 is already deleted\033[0m" - return 0 - fi - containers=$(docker network inspect -f '{{ range $key, $value := .Containers }}{{ printf "%s\n" .Name}}{{ end }}' $1) - while read -r container; do - cleanup_node "$container" - done <<< "$containers" - cleanup_network $1 - echo -e "\033[32;1mSUCCESS:\033[0m Cleaned up and exiting\033[0m" -}; diff --git a/.ci/functions/imports.sh b/.ci/functions/imports.sh deleted file mode 100644 index 143bc22e9..000000000 --- a/.ci/functions/imports.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash -# -# Sets up all the common variables and imports relevant functions -# -# Version 1.0.1 -# - Initial version after refactor -# - Validate STACK_VERSION asap - -function require_stack_version() { - if [[ -z $STACK_VERSION ]]; then - echo -e "\033[31;1mERROR:\033[0m Required environment variable [STACK_VERSION] not set\033[0m" - exit 1 - fi -} - -require_stack_version - -if [[ -z $es_node_name ]]; then - # only set these once - set -euo pipefail - export TEST_SUITE=${TEST_SUITE-free} - export RUNSCRIPTS=${RUNSCRIPTS-} - export DETACH=${DETACH-false} - export CLEANUP=${CLEANUP-false} - - export es_node_name=instance - export elastic_password=changeme - export elasticsearch_image=elasticsearch - export elasticsearch_scheme="https" - if [[ $TEST_SUITE != "platinum" ]]; then - export elasticsearch_scheme="http" - fi - export elasticsearch_url=${elasticsearch_scheme}://elastic:${elastic_password}@${es_node_name}:9200 - export external_elasticsearch_url=${elasticsearch_url/$es_node_name/localhost} - export elasticsearch_container="${elasticsearch_image}:${STACK_VERSION}" - - export suffix=rest-test - export moniker=$(echo "$elasticsearch_container" | tr -C "[:alnum:]" '-') - export network_name=${moniker}${suffix} - - export ssl_cert="${script_path}/certs/testnode.crt" - export ssl_key="${script_path}/certs/testnode.key" - export ssl_ca="${script_path}/certs/ca.crt" - -fi - - export script_path=$(dirname "$(realpath -s "$0")") - source "$script_path/functions/cleanup.sh" - source "$script_path/functions/wait-for-container.sh" - trap "cleanup_trap ${network_name}" EXIT - - -if [[ "$CLEANUP" == "true" ]]; then - cleanup_all_in_network $network_name - exit 0 -fi - -echo -e "\033[34;1mINFO:\033[0m Creating network $network_name if it does not exist already \033[0m" -docker network inspect "$network_name" > /dev/null 2>&1 || docker network create "$network_name" diff --git a/.ci/functions/wait-for-container.sh b/.ci/functions/wait-for-container.sh deleted file mode 100644 index 1a721b588..000000000 --- a/.ci/functions/wait-for-container.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# -# Exposes a routine scripts can call to wait for a container if that container set up a health command -# -# Please source .ci/functions/imports.sh as a whole not just this file -# -# Version 1.0.1 -# - Initial version after refactor -# - Make sure wait_for_contiainer is silent - -function wait_for_container { - set +x - until ! container_running "$1" || (container_running "$1" && [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "starting" ]]); do - echo "" - docker inspect -f "{{range .State.Health.Log}}{{.Output}}{{end}}" ${1} - echo -e "\033[34;1mINFO:\033[0m waiting for node $1 to be up\033[0m" - sleep 2; - done; - - # Always show logs if the container is running, this is very useful both on CI as well as while developing - if container_running $1; then - docker logs $1 - fi - - if ! container_running $1 || [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "healthy" ]]; then - cleanup_all_in_network $2 - echo - echo -e "\033[31;1mERROR:\033[0m Failed to start $1 in detached mode beyond health checks\033[0m" - echo -e "\033[31;1mERROR:\033[0m dumped the docker log before shutting the node down\033[0m" - return 1 - else - echo - echo -e "\033[32;1mSUCCESS:\033[0m Detached and healthy: ${1} on docker network: ${network_name}\033[0m" - return 0 - fi -} diff --git a/.ci/jobs/defaults.yml b/.ci/jobs/defaults.yml deleted file mode 100644 index d105838af..000000000 --- a/.ci/jobs/defaults.yml +++ /dev/null @@ -1,81 +0,0 @@ ---- - -##### GLOBAL METADATA - -- meta: - cluster: clients-ci - -##### JOB DEFAULTS - -- job: - project-type: matrix - logrotate: - daysToKeep: 30 - numToKeep: 100 - parameters: - - string: - name: branch_specifier - default: refs/heads/main - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - properties: - - github: - url: https://github.com/elastic/elasticsearch-js/ - - inject: - properties-content: HOME=$JENKINS_HOME - concurrent: true - node: flyweight - scm: - - git: - name: origin - credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba - reference-repo: /var/lib/jenkins/.git-references/elasticsearch-js.git - branches: - - ${branch_specifier} - url: https://github.com/elastic/elasticsearch-js.git - basedir: '' - wipe-workspace: 'True' - triggers: - - github - vault: - # vault read auth/approle/role/clients-ci/role-id - role_id: ddbd0d44-0e51-105b-177a-c8fdfd445126 - axes: - - axis: - type: slave - name: label - values: - - linux - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: STACK_VERSION - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: NODE_JS_VERSION - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: TEST_SUITE - yaml-strategy: - exclude-key: exclude - filename: .ci/test-matrix.yml - wrappers: - - ansicolor - - timeout: - type: absolute - timeout: 120 - fail: true - - timestamps - - workspace-cleanup - builders: - - shell: |- - #!/usr/local/bin/runbld - .ci/run-tests - publishers: - - email: - recipients: build-lang-clients@elastic.co - - junit: - results: "**/*-junit.xml" - allow-empty-results: true diff --git a/.ci/jobs/elastic+elasticsearch-js+7.17.yml b/.ci/jobs/elastic+elasticsearch-js+7.17.yml deleted file mode 100644 index 3e167ef70..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+7.17.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+7.17 - display-name: 'elastic / elasticsearch-js # 7.17' - description: Testing the elasticsearch-js 7.17 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/7.17 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.5.yml b/.ci/jobs/elastic+elasticsearch-js+8.5.yml deleted file mode 100644 index def3ba64c..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+8.5.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+8.5 - display-name: 'elastic / elasticsearch-js # 8.5' - description: Testing the elasticsearch-js 8.5 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/8.5 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.6.yml b/.ci/jobs/elastic+elasticsearch-js+8.6.yml deleted file mode 100644 index b9d63be7d..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+8.6.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+8.6 - display-name: 'elastic / elasticsearch-js # 8.6' - description: Testing the elasticsearch-js 8.6 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/8.6 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.7.yml b/.ci/jobs/elastic+elasticsearch-js+8.7.yml deleted file mode 100644 index fb6425583..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+8.7.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+8.7 - display-name: 'elastic / elasticsearch-js # 8.7' - description: Testing the elasticsearch-js 8.7 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/8.7 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+8.8.yml b/.ci/jobs/elastic+elasticsearch-js+8.8.yml deleted file mode 100644 index 786794f1f..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+8.8.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+8.8 - display-name: 'elastic / elasticsearch-js # 8.8' - description: Testing the elasticsearch-js 8.8 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/8.8 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+main.yml b/.ci/jobs/elastic+elasticsearch-js+main.yml deleted file mode 100644 index b41259007..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+main - display-name: 'elastic / elasticsearch-js # main' - description: Testing the elasticsearch-js main branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/main - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: 'H */12 * * *' diff --git a/.ci/jobs/elastic+elasticsearch-js+pull-request.yml b/.ci/jobs/elastic+elasticsearch-js+pull-request.yml deleted file mode 100644 index 722dfc62c..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+pull-request.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+pull-request - display-name: 'elastic / elasticsearch-js # pull-request' - description: Testing of elasticsearch-js pull requests. - junit_results: "*-junit.xml" - scm: - - git: - branches: - - ${ghprbActualCommit} - refspec: +refs/pull/*:refs/remotes/origin/pr/* - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - github-hooks: true - status-context: clients-ci - cancel-builds-on-update: true diff --git a/.ci/make.mjs b/.ci/make.mjs index 9eeb16906..6940aa97f 100644 --- a/.ci/make.mjs +++ b/.ci/make.mjs @@ -86,13 +86,6 @@ async function bump (args) { 'utf8' ) - const testMatrix = await readFile(join(import.meta.url, 'test-matrix.yml'), 'utf8') - await writeFile( - join(import.meta.url, 'test-matrix.yml'), - testMatrix.replace(/STACK_VERSION:\s+\- "[0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?"/, `STACK_VERSION:\n - "${cleanVersion}-SNAPSHOT"`), // eslint-disable-line - 'utf8' - ) - const pipeline = await readFile(join(import.meta.url, '..', '.buildkite', 'pipeline.yml')) await writeFile( join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh deleted file mode 100644 index 6316fd91e..000000000 --- a/.ci/packer_cache.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -source /usr/local/bin/bash_standard_lib.sh - -DOCKER_IMAGES="node:17-alpine -node:16-alpine -node:14-alpine -" - -for di in ${DOCKER_IMAGES} -do -(retry 2 docker pull "${di}") || echo "Error pulling ${di} Docker image, we continue" -done - diff --git a/.ci/run-elasticsearch.sh b/.ci/run-elasticsearch.sh deleted file mode 100755 index 2f360ab4f..000000000 --- a/.ci/run-elasticsearch.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env bash -# -# Launch one or more Elasticsearch nodes via the Docker image, -# to form a cluster suitable for running the REST API tests. -# -# Export the STACK_VERSION variable, eg. '8.0.0-SNAPSHOT'. -# Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'. -# Export the NUMBER_OF_NODES variable to start more than 1 node - -# Version 1.6.1 -# - Initial version of the run-elasticsearch.sh script -# - Deleting the volume should not dependent on the container still running -# - Fixed `ES_JAVA_OPTS` config -# - Moved to STACK_VERSION and TEST_VERSION -# - Refactored into functions and imports -# - Support NUMBER_OF_NODES -# - Added 5 retries on docker pull for fixing transient network errors -# - Added flags to make local CCR configurations work -# - Added action.destructive_requires_name=false as the default will be true in v8 -# - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing -# - Moved ELASTIC_PASSWORD and xpack.security.enabled to the base arguments for "Security On by default" -# - Use https only when TEST_SUITE is "platinum", when "free" use http -# - Set xpack.security.enabled=false for "free" and xpack.security.enabled=true for "platinum" - -script_path=$(dirname "$(realpath -s "$0")") -source "$script_path/functions/imports.sh" -set -euo pipefail - -echo -e "\033[34;1mINFO:\033[0m Take down node if called twice with the same arguments (DETACH=true) or on separate terminals \033[0m" -cleanup_node "$es_node_name" - -master_node_name=${es_node_name} -cluster_name=${moniker}${suffix} - -declare -a volumes -environment=($(cat <<-END - --env ELASTIC_PASSWORD=$elastic_password - --env node.name=$es_node_name - --env cluster.name=$cluster_name - --env cluster.initial_master_nodes=$master_node_name - --env discovery.seed_hosts=$master_node_name - --env cluster.routing.allocation.disk.threshold_enabled=false - --env bootstrap.memory_lock=true - --env node.attr.testattr=test - --env path.repo=/tmp - --env repositories.url.allowed_urls=http://snapshot.test* - --env action.destructive_requires_name=false - --env ingest.geoip.downloader.enabled=false - --env cluster.deprecation_indexing.enabled=false -END -)) -if [[ "$TEST_SUITE" == "platinum" ]]; then - environment+=($(cat <<-END - --env xpack.security.enabled=true - --env xpack.license.self_generated.type=trial - --env xpack.security.http.ssl.enabled=true - --env xpack.security.http.ssl.verification_mode=certificate - --env xpack.security.http.ssl.key=certs/testnode.key - --env xpack.security.http.ssl.certificate=certs/testnode.crt - --env xpack.security.http.ssl.certificate_authorities=certs/ca.crt - --env xpack.security.transport.ssl.enabled=true - --env xpack.security.transport.ssl.verification_mode=certificate - --env xpack.security.transport.ssl.key=certs/testnode.key - --env xpack.security.transport.ssl.certificate=certs/testnode.crt - --env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt -END -)) - volumes+=($(cat <<-END - --volume $ssl_cert:/usr/share/elasticsearch/config/certs/testnode.crt - --volume $ssl_key:/usr/share/elasticsearch/config/certs/testnode.key - --volume $ssl_ca:/usr/share/elasticsearch/config/certs/ca.crt -END -)) -else - environment+=($(cat <<-END - --env xpack.security.enabled=false - --env xpack.security.http.ssl.enabled=false -END -)) -fi - -cert_validation_flags="" -if [[ "$TEST_SUITE" == "platinum" ]]; then - cert_validation_flags="--insecure --cacert /usr/share/elasticsearch/config/certs/ca.crt --resolve ${es_node_name}:443:127.0.0.1" -fi - -# Pull the container, retry on failures up to 5 times with -# short delays between each attempt. Fixes most transient network errors. -docker_pull_attempts=0 -until [ "$docker_pull_attempts" -ge 5 ] -do - docker pull docker.elastic.co/elasticsearch/"$elasticsearch_container" && break - docker_pull_attempts=$((docker_pull_attempts+1)) - echo "Failed to pull image, retrying in 10 seconds (retry $docker_pull_attempts/5)..." - sleep 10 -done - -NUMBER_OF_NODES=${NUMBER_OF_NODES-1} -http_port=9200 -for (( i=0; i<$NUMBER_OF_NODES; i++, http_port++ )); do - node_name=${es_node_name}$i - node_url=${external_elasticsearch_url/9200/${http_port}}$i - if [[ "$i" == "0" ]]; then node_name=$es_node_name; fi - environment+=($(cat <<-END - --env node.name=$node_name -END -)) - echo "$i: $http_port $node_url " - volume_name=${node_name}-${suffix}-data - volumes+=($(cat <<-END - --volume $volume_name:/usr/share/elasticsearch/data${i} -END -)) - - # make sure we detach for all but the last node if DETACH=false (default) so all nodes are started - local_detach="true" - if [[ "$i" == "$((NUMBER_OF_NODES-1))" ]]; then local_detach=$DETACH; fi - echo -e "\033[34;1mINFO:\033[0m Starting container $node_name \033[0m" - set -x - docker run \ - --name "$node_name" \ - --network "$network_name" \ - --env "ES_JAVA_OPTS=-Xms1g -Xmx1g -da:org.elasticsearch.xpack.ccr.index.engine.FollowingEngineAssertions" \ - "${environment[@]}" \ - "${volumes[@]}" \ - --publish "$http_port":9200 \ - --ulimit nofile=65536:65536 \ - --ulimit memlock=-1:-1 \ - --detach="$local_detach" \ - --health-cmd="curl $cert_validation_flags --fail $elasticsearch_url/_cluster/health || exit 1" \ - --health-interval=2s \ - --health-retries=20 \ - --health-timeout=2s \ - --rm \ - docker.elastic.co/elasticsearch/"$elasticsearch_container"; - - set +x - if wait_for_container "$es_node_name" "$network_name"; then - echo -e "\033[32;1mSUCCESS:\033[0m Running on: $node_url\033[0m" - fi - -done - diff --git a/.ci/run-repository.sh b/.ci/run-repository.sh deleted file mode 100755 index 380d1d899..000000000 --- a/.ci/run-repository.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# parameters are available to this script - -# STACK_VERSION -- version e.g Major.Minor.Patch(-Prelease) -# TEST_SUITE -- which test suite to run: free or platinum -# ELASTICSEARCH_URL -- The url at which elasticsearch is reachable, a default is composed based on STACK_VERSION and TEST_SUITE -# NODE_JS_VERSION -- node js version (defined in test-matrix.yml, a default is hardcoded here) -script_path=$(dirname "$(realpath -s "$0")") -source "$script_path/functions/imports.sh" -set -euo pipefail - -NODE_JS_VERSION=${NODE_JS_VERSION-16} -ELASTICSEARCH_URL=${ELASTICSEARCH_URL-"$elasticsearch_url"} -elasticsearch_container=${elasticsearch_container-} - -echo -e "\033[34;1mINFO:\033[0m VERSION ${STACK_VERSION}\033[0m" -echo -e "\033[34;1mINFO:\033[0m TEST_SUITE ${TEST_SUITE}\033[0m" -echo -e "\033[34;1mINFO:\033[0m URL ${ELASTICSEARCH_URL}\033[0m" -echo -e "\033[34;1mINFO:\033[0m CONTAINER ${elasticsearch_container}\033[0m" -echo -e "\033[34;1mINFO:\033[0m NODE_JS_VERSION ${NODE_JS_VERSION}\033[0m" - -echo -e "\033[1m>>>>> Build docker container >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" - -docker build \ - --file .ci/Dockerfile \ - --tag elastic/elasticsearch-js \ - --build-arg NODE_JS_VERSION="${NODE_JS_VERSION}" \ - . - -echo -e "\033[1m>>>>> NPM run test:integration >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" - -repo=$(realpath "$(dirname "$(realpath -s "$0")")"/../) - -docker run \ - --network="${network_name}" \ - --env "TEST_ES_SERVER=${ELASTICSEARCH_URL}" \ - --env "TEST_SUITE=${TEST_SUITE}" \ - --volume "$repo:/usr/src/app" \ - --volume /usr/src/app/node_modules \ - --name elasticsearch-js \ - --rm \ - elastic/elasticsearch-js \ - npm run test:integration diff --git a/.ci/run-tests b/.ci/run-tests deleted file mode 100755 index a43400f61..000000000 --- a/.ci/run-tests +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# -# Version 1.1 -# - Moved to .ci folder and separated out `run-repository.sh` -# - Add `$RUNSCRIPTS` env var for running Elasticsearch dependent products -script_path=$(dirname "$(realpath -s "$0")") -source "$script_path/functions/imports.sh" -set -euo pipefail - -echo -e "\033[1m>>>>> Start [$STACK_VERSION container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" -DETACH=true bash .ci/run-elasticsearch.sh - -if [[ -n "$RUNSCRIPTS" ]]; then - for RUNSCRIPT in ${RUNSCRIPTS//,/ } ; do - echo -e "\033[1m>>>>> Running run-$RUNSCRIPT.sh >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" - CONTAINER_NAME=${RUNSCRIPT} \ - DETACH=true \ - bash ".ci/run-${RUNSCRIPT}.sh" - done -fi - -echo -e "\033[1m>>>>> Repository specific tests >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" -bash .ci/run-repository.sh diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml deleted file mode 100644 index 50366ab17..000000000 --- a/.ci/test-matrix.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -STACK_VERSION: - - "8.9.0-SNAPSHOT" - -NODE_JS_VERSION: - - 18 - - 16 - - 14 - -TEST_SUITE: - - free - - platinum - -exclude: ~ diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 99514204e..3702b049f 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -9,7 +9,7 @@ jobs: strategy: matrix: - node-version: [14.x, 16.x, 18.x] + node-version: [14.x, 16.x, 18.x, 20.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: @@ -32,151 +32,13 @@ jobs: run: | npm run test:unit - # - name: Acceptance test - # run: | - # npm run test:acceptance - - # helpers-integration-test: - # name: Helpers integration test - # runs-on: ubuntu-latest - - # strategy: - # matrix: - # node-version: [12.x, 14.x, 16.x] - - # steps: - # - uses: actions/checkout@v3 - - # - name: Configure sysctl limits - # run: | - # sudo swapoff -a - # sudo sysctl -w vm.swappiness=1 - # sudo sysctl -w fs.file-max=262144 - # sudo sysctl -w vm.max_map_count=262144 - - # - name: Runs Elasticsearch - # uses: elastic/elastic-github-actions/elasticsearch@master - # with: - # stack-version: 8.0.0-SNAPSHOT - - # - name: Use Node.js ${{ matrix.node-version }} - # uses: actions/setup-node@v3 - # with: - # node-version: ${{ matrix.node-version }} - - # - name: Install - # run: | - # npm install - - # - name: Integration test - # run: | - # npm run test:integration:helpers - - # bundler-support: - # name: Bundler support - # runs-on: ubuntu-latest - - # steps: - # - uses: actions/checkout@v3 - - # - name: Configure sysctl limits - # run: | - # sudo swapoff -a - # sudo sysctl -w vm.swappiness=1 - # sudo sysctl -w fs.file-max=262144 - # sudo sysctl -w vm.max_map_count=262144 - - # - name: Runs Elasticsearch - # uses: elastic/elastic-github-actions/elasticsearch@master - # with: - # stack-version: 8.0.0-SNAPSHOT - - # - name: Use Node.js 14.x - # uses: actions/setup-node@v3 - # with: - # node-version: 14.x - - # - name: Install - # run: | - # npm install - # npm install --prefix test/bundlers/parcel-test - # npm install --prefix test/bundlers/rollup-test - # npm install --prefix test/bundlers/webpack-test - - # - name: Build - # run: | - # npm run build --prefix test/bundlers/parcel-test - # npm run build --prefix test/bundlers/rollup-test - # npm run build --prefix test/bundlers/webpack-test - - # - name: Run bundle - # run: | - # npm start --prefix test/bundlers/parcel-test - # npm start --prefix test/bundlers/rollup-test - # npm start --prefix test/bundlers/webpack-test - - # mock-support: - # name: Mock support - # runs-on: ubuntu-latest - - # steps: - # - uses: actions/checkout@v3 - - # - name: Use Node.js 14.x - # uses: actions/setup-node@v3 - # with: - # node-version: 14.x - - # - name: Install - # run: | - # npm install - # npm install --prefix test/mock - - # - name: Run test - # run: | - # npm test --prefix test/mock - - # code-coverage: - # name: Code coverage - # runs-on: ubuntu-latest - - # strategy: - # matrix: - # node-version: [14.x] - - # steps: - # - uses: actions/checkout@v3 - - # - name: Use Node.js ${{ matrix.node-version }} - # uses: actions/setup-node@v3 - # with: - # node-version: ${{ matrix.node-version }} - - # - name: Install - # run: | - # npm install - - # - name: Code coverage report - # run: | - # npm run test:coverage-report - - # - name: Upload coverage to Codecov - # uses: codecov/codecov-action@v1 - # with: - # file: ./coverage.lcov - # fail_ci_if_error: true - - # - name: Code coverage 100% - # run: | - # npm run test:coverage-100 - license: name: License check runs-on: ubuntu-latest strategy: matrix: - node-version: [16.x] + node-version: [20.x] steps: - uses: actions/checkout@v3 From 4f9f09cbd43089ad729689f9e87adb759c469d64 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 8 May 2023 09:57:53 -0500 Subject: [PATCH 206/647] Fix broken cron schedule (#1878) --- catalog-info.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 4f381d1e9..352a7a8a2 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -38,10 +38,10 @@ spec: schedules: main_semi_daily: branch: 'main' - cronline: '*/12 * * *' + cronline: '*/12 * * * *' 8_8_semi_daily: branch: '8.8' - cronline: '*/12 * * *' + cronline: '*/12 * * * *' 8_7_daily: branch: '8.7' cronline: '@daily' From e8d69cf667d7fc924d3f92d90f61b7620d874175 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 8 May 2023 11:58:13 -0500 Subject: [PATCH 207/647] Fix cron schedule again (#1879) --- catalog-info.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 352a7a8a2..b9c2dc399 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -38,10 +38,10 @@ spec: schedules: main_semi_daily: branch: 'main' - cronline: '*/12 * * * *' + cronline: '0 */12 * * *' 8_8_semi_daily: branch: '8.8' - cronline: '*/12 * * * *' + cronline: '0 */12 * * *' 8_7_daily: branch: '8.7' cronline: '@daily' From c55ba915fb17f0c75993ae19f74066cbc41fc508 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 8 May 2023 12:45:16 -0500 Subject: [PATCH 208/647] Fix stuck clients-ci job removal (#1880) --- .ci/jobs/defaults.yml | 81 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 .ci/jobs/defaults.yml diff --git a/.ci/jobs/defaults.yml b/.ci/jobs/defaults.yml new file mode 100644 index 000000000..d105838af --- /dev/null +++ b/.ci/jobs/defaults.yml @@ -0,0 +1,81 @@ +--- + +##### GLOBAL METADATA + +- meta: + cluster: clients-ci + +##### JOB DEFAULTS + +- job: + project-type: matrix + logrotate: + daysToKeep: 30 + numToKeep: 100 + parameters: + - string: + name: branch_specifier + default: refs/heads/main + description: the Git branch specifier to build (<branchName>, <tagName>, + <commitId>, etc.) + properties: + - github: + url: https://github.com/elastic/elasticsearch-js/ + - inject: + properties-content: HOME=$JENKINS_HOME + concurrent: true + node: flyweight + scm: + - git: + name: origin + credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba + reference-repo: /var/lib/jenkins/.git-references/elasticsearch-js.git + branches: + - ${branch_specifier} + url: https://github.com/elastic/elasticsearch-js.git + basedir: '' + wipe-workspace: 'True' + triggers: + - github + vault: + # vault read auth/approle/role/clients-ci/role-id + role_id: ddbd0d44-0e51-105b-177a-c8fdfd445126 + axes: + - axis: + type: slave + name: label + values: + - linux + - axis: + type: yaml + filename: .ci/test-matrix.yml + name: STACK_VERSION + - axis: + type: yaml + filename: .ci/test-matrix.yml + name: NODE_JS_VERSION + - axis: + type: yaml + filename: .ci/test-matrix.yml + name: TEST_SUITE + yaml-strategy: + exclude-key: exclude + filename: .ci/test-matrix.yml + wrappers: + - ansicolor + - timeout: + type: absolute + timeout: 120 + fail: true + - timestamps + - workspace-cleanup + builders: + - shell: |- + #!/usr/local/bin/runbld + .ci/run-tests + publishers: + - email: + recipients: build-lang-clients@elastic.co + - junit: + results: "**/*-junit.xml" + allow-empty-results: true From 84c3de185070cebaf737667fc4c910179edd6cb4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 8 May 2023 16:28:53 -0500 Subject: [PATCH 209/647] Don't run integration tests if no code changes in a PR (#1881) --- .buildkite/pull-requests.json | 20 +++++++++ .ci/jobs/defaults.yml | 81 ----------------------------------- catalog-info.yaml | 2 +- 3 files changed, 21 insertions(+), 82 deletions(-) create mode 100644 .buildkite/pull-requests.json delete mode 100644 .ci/jobs/defaults.yml diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json new file mode 100644 index 000000000..794d8624c --- /dev/null +++ b/.buildkite/pull-requests.json @@ -0,0 +1,20 @@ +{ + "jobs": [ + { + "enabled": true, + "pipeline_slug": "elasticsearch-js-integration-tests", + "allowed_repo_permissions": ["admin", "write"], + "build_on_commit": true, + "skip_ci_on_only_changed": [ + "\\.md$", + "\\.asciidoc$", + "^docs\\/", + "^\\.ci\\/", + "^scripts\\/", + "^catalog-info\\.yaml$", + "^test\\/unit\\/", + "^\\.github\\/" + ] + } + ] +} diff --git a/.ci/jobs/defaults.yml b/.ci/jobs/defaults.yml deleted file mode 100644 index d105838af..000000000 --- a/.ci/jobs/defaults.yml +++ /dev/null @@ -1,81 +0,0 @@ ---- - -##### GLOBAL METADATA - -- meta: - cluster: clients-ci - -##### JOB DEFAULTS - -- job: - project-type: matrix - logrotate: - daysToKeep: 30 - numToKeep: 100 - parameters: - - string: - name: branch_specifier - default: refs/heads/main - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - properties: - - github: - url: https://github.com/elastic/elasticsearch-js/ - - inject: - properties-content: HOME=$JENKINS_HOME - concurrent: true - node: flyweight - scm: - - git: - name: origin - credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba - reference-repo: /var/lib/jenkins/.git-references/elasticsearch-js.git - branches: - - ${branch_specifier} - url: https://github.com/elastic/elasticsearch-js.git - basedir: '' - wipe-workspace: 'True' - triggers: - - github - vault: - # vault read auth/approle/role/clients-ci/role-id - role_id: ddbd0d44-0e51-105b-177a-c8fdfd445126 - axes: - - axis: - type: slave - name: label - values: - - linux - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: STACK_VERSION - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: NODE_JS_VERSION - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: TEST_SUITE - yaml-strategy: - exclude-key: exclude - filename: .ci/test-matrix.yml - wrappers: - - ansicolor - - timeout: - type: absolute - timeout: 120 - fail: true - - timestamps - - workspace-cleanup - builders: - - shell: |- - #!/usr/local/bin/runbld - .ci/run-tests - publishers: - - email: - recipients: build-lang-clients@elastic.co - - junit: - results: "**/*-junit.xml" - allow-empty-results: true diff --git a/catalog-info.yaml b/catalog-info.yaml index b9c2dc399..2fec157ce 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -15,7 +15,7 @@ apiVersion: backstage.io/v1alpha1 kind: Resource metadata: name: elasticsearch-js-integration-tests - description: Elasticsearch JavaScript client integration tests + description: elasticsearch-js - integration tests spec: type: buildkite-pipeline owner: group:clients-team From 89f7d54398a923e71cdb722ea0f974ec77cb2ed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gr=C3=A9goire=20Chauvet?= Date: Wed, 10 May 2023 21:06:47 +0200 Subject: [PATCH 210/647] Fix estypesWithBody definition (#1784) --- index.d.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/index.d.ts b/index.d.ts index 2fbbb3652..89be0131c 100644 --- a/index.d.ts +++ b/index.d.ts @@ -22,6 +22,6 @@ import SniffingTransport from './lib/sniffingTransport' export * from '@elastic/transport' export * as estypes from './lib/api/types' -export * as estypesWithBody from './lib/api/types' +export * as estypesWithBody from './lib/api/typesWithBodyKey' export { Client, SniffingTransport } export type { ClientOptions, NodeOptions } from './lib/client' From e73eef084b1d35986968cc0cbe09c80213e613e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mario=20Ba=C5=A1i=C4=87?= Date: Thu, 11 May 2023 19:13:08 +0200 Subject: [PATCH 211/647] Update scroll.asciidoc (#1763) --- docs/examples/scroll.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/examples/scroll.asciidoc b/docs/examples/scroll.asciidoc index 5cc76d8a6..0f23a1bc1 100644 --- a/docs/examples/scroll.asciidoc +++ b/docs/examples/scroll.asciidoc @@ -103,7 +103,7 @@ async function run () { // get the next response if there are more quotes to fetch responseQueue.push( await client.scroll({ - scrollId: body._scroll_id, + scroll_id: body._scroll_id, scroll: '30s' }) ) @@ -146,7 +146,7 @@ async function * scrollSearch (params) { } response = await client.scroll({ - scrollId: response._scroll_id, + scroll_id: response._scroll_id, scroll: params.scroll }) } From 5d37ca6489dd9ed70f4ac4ff14bad49f69a5a74a Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 11 May 2023 12:18:03 -0500 Subject: [PATCH 212/647] Upgrade backport workflow to v2 (#1884) --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index c0d07fef8..a10d7338a 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -11,6 +11,6 @@ jobs: name: Backport steps: - name: Backport - uses: tibdex/backport@v1 + uses: tibdex/backport@v2 with: github_token: ${{ secrets.GITHUB_TOKEN }} From 10039d9b8f5c15e9af21ab856d0a1263f307b102 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 26 May 2023 13:20:02 -0500 Subject: [PATCH 213/647] Update changelog for 8.7 and 8.8 (#1897) * Update changelog * Add note about a bugfix for types with a body key --- docs/changelog.asciidoc | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index e4e245616..caf394abd 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,35 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.8.0 + +[discrete] +==== Features + +[discrete] +===== Support for Elasticsearch `v8.8.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.7.0.html[here]. + +[discrete] +==== Fixes + +[discrete] +===== Fix type declarations for legacy types with a body key https://github.com/elastic/elasticsearch-js/pull/1784[#1784] + +Prior releases contained a bug where type declarations for legacy types that include a `body` key were not actually importing the type that includes the `body` key. + +[discrete] +=== 8.7.0 + +[discrete] +===== Support for Elasticsearch `v8.7.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.7/release-notes-8.7.0.html[here]. + [discrete] === 8.6.0 From 5c4c29f51c62eab407657cd86725fe503a70c975 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 30 May 2023 13:11:28 -0500 Subject: [PATCH 214/647] More integration test fixes (#1889) --- .buildkite/pipeline.yml | 1 + .buildkite/run-elasticsearch.sh | 5 ++++- .ci/make.mjs | 2 +- .npmignore | 1 + README.md | 2 +- catalog-info.yaml | 2 +- package.json | 6 +++--- test/integration/index.js | 11 +++++++++-- test/integration/reporter.js | 3 ++- test/integration/test-runner.js | 28 ++++++++++++++++++++-------- 10 files changed, 43 insertions(+), 18 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 006a4ee87..bd8927c44 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -29,3 +29,4 @@ steps: artifacts: "junit-output/junit-*.xml" job-uuid-file-pattern: 'junit-(.*).xml' fail-build-on-error: true + failure-format: file diff --git a/.buildkite/run-elasticsearch.sh b/.buildkite/run-elasticsearch.sh index 141330093..d9e0c6fd3 100755 --- a/.buildkite/run-elasticsearch.sh +++ b/.buildkite/run-elasticsearch.sh @@ -33,7 +33,10 @@ master_node_name=${es_node_name} cluster_name=${moniker}${suffix} # Set vm.max_map_count kernel setting to 262144 -sudo sysctl -w vm.max_map_count=262144 +if [ "$(sysctl vm.max_map_count)" != 'vm.max_map_count = 262144' ]; then + echo "vm.max_map_count may be too low. resetting." + sudo sysctl -w vm.max_map_count=262144 +fi declare -a volumes environment=($(cat <<-END diff --git a/.ci/make.mjs b/.ci/make.mjs index 6940aa97f..305f066e2 100644 --- a/.ci/make.mjs +++ b/.ci/make.mjs @@ -89,7 +89,7 @@ async function bump (args) { const pipeline = await readFile(join(import.meta.url, '..', '.buildkite', 'pipeline.yml')) await writeFile( join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), - pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: - ${cleanVersion}-SNAPSHOT`), // eslint-disable-line + pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: ${cleanVersion}-SNAPSHOT`), // eslint-disable-line 'utf8' ) } diff --git a/.npmignore b/.npmignore index ddfa7b5b6..3548958d0 100644 --- a/.npmignore +++ b/.npmignore @@ -66,6 +66,7 @@ scripts # ci configuration .ci .travis.yml +.buildkite certs .github CODE_OF_CONDUCT.md diff --git a/README.md b/README.md index 1912c49ed..b33b20d65 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ # Elasticsearch Node.js client -[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Belasticsearch-js%2Bmain)](https://clients-ci.elastic.co/view/JavaScript/job/elastic+elasticsearch-js+main/) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) +[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://badge.buildkite.com/15e4246eb268ea78f6e10aa90bce38c1abb0a4489e79f5a0ac.svg)](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) The official Node.js client for Elasticsearch. diff --git a/catalog-info.yaml b/catalog-info.yaml index 2fec157ce..daeb73974 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -24,7 +24,7 @@ spec: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline metadata: - name: Elasticsearch JavaScript client integration tests + name: elasticsearch-js - integration tests spec: repository: elastic/elasticsearch-js pipeline_file: .buildkite/pipeline.yml diff --git a/package.json b/package.json index dfb047ad4..52e303621 100644 --- a/package.json +++ b/package.json @@ -61,9 +61,9 @@ "@types/split2": "^3.2.1", "@types/stoppable": "^1.1.1", "@types/tap": "^15.0.7", + "chai": "^4.3.7", "cross-zip": "^4.0.0", "desm": "^1.2.0", - "fast-deep-equal": "^3.1.3", "into-stream": "^7.0.0", "js-yaml": "^4.1.0", "license-checker": "^25.0.1", @@ -83,7 +83,7 @@ "typescript": "^4.6.4", "workq": "^3.0.0", "xmlbuilder2": "^3.0.2", - "zx": "^6.1.0" + "zx": "^7.2.2" }, "dependencies": { "@elastic/transport": "^8.3.1", @@ -96,4 +96,4 @@ "coverage": false, "check-coverage": false } -} \ No newline at end of file +} diff --git a/test/integration/index.js b/test/integration/index.js index 5f92eee21..defdb400f 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -48,6 +48,13 @@ const options = minimist(process.argv.slice(2), { }) const freeSkips = { + // working on fixes for these + '/free/aggregations/bucket_selector.yml': ['bad script'], + '/free/aggregations/bucket_script.yml': ['bad script'], + + // either the YAML test definition is wrong, or this fails because JSON.stringify is coercing "1.0" to "1" + '/free/aggregations/percentiles_bucket.yml': ['*'], + // not supported yet '/free/cluster.desired_nodes/10_basic.yml': ['*'], @@ -186,7 +193,7 @@ function runner (opts = {}) { const options = { node: opts.node } if (opts.isXPack) { options.tls = { - ca: readFileSync(join(__dirname, '..', '..', '.ci', 'certs', 'ca.crt'), 'utf8'), + ca: readFileSync(join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'), rejectUnauthorized: false } } @@ -310,7 +317,7 @@ async function start ({ client, isXPack }) { if (name === 'setup' || name === 'teardown') continue if (options.test && !name.endsWith(options.test)) continue - const junitTestCase = junitTestSuite.testcase(name) + const junitTestCase = junitTestSuite.testcase(name, `node_${process.version}/${cleanPath}`) stats.total += 1 if (shouldSkip(isXPack, file, name)) { diff --git a/test/integration/reporter.js b/test/integration/reporter.js index 0d3621de7..5db288b8e 100644 --- a/test/integration/reporter.js +++ b/test/integration/reporter.js @@ -73,13 +73,14 @@ function createJunitReporter () { } function createTestCase (testcaseList) { - return function testcase (name) { + return function testcase (name, file) { assert(name, 'The testcase name is required') const startTime = Date.now() const tcase = { '@id': new Date().toISOString(), '@name': name } + if (file) tcase['@file'] = file testcaseList.push(tcase) return { failure (error) { diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index 826293cf8..64570945a 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -21,14 +21,17 @@ /* eslint camelcase: 0 */ -const assert = require('assert') +const chai = require('chai') const semver = require('semver') const helper = require('./helper') -const deepEqual = require('fast-deep-equal') const { join } = require('path') const { locations } = require('../../scripts/download-artifacts') const packageJson = require('../../package.json') +chai.config.showDiff = true +chai.config.truncateThreshold = 0 +const { assert } = chai + const { delve, to, isXPackTemplate, sleep, updateParams } = helper const supportedFeatures = [ @@ -485,7 +488,17 @@ function build (opts = {}) { cmd.params.body = JSON.parse(cmd.params.body) } - const [err, result] = await to(api(cmd.params, options)) + let err, result; + try { + [err, result] = await to(api(cmd.params, options)) + } catch (exc) { + if (JSON.stringify(exc).includes('resource_already_exists_exception')) { + console.warn(`Resource already exists: ${JSON.stringify(cmd.params)}`) + // setup task was already done because cleanup didn't catch it? do nothing + } else { + throw exc + } + } let warnings = result ? result.warnings : null const body = result ? result.body : null @@ -522,7 +535,7 @@ function build (opts = {}) { } stats.assertions += 1 - assert.ok(deepEqual(warnings, action.warnings)) + assert.deepEqual(warnings, action.warnings) } if (action.catch) { @@ -530,7 +543,7 @@ function build (opts = {}) { assert.ok(err, `Expecting an error, but instead got ${JSON.stringify(err)}, the response was ${JSON.stringify(result)}`) assert.ok( parseDoError(err, action.catch), - `the error should be: ${action.catch}` + `the error should match: ${action.catch}, found ${JSON.stringify(err.body)}` ) try { response = JSON.parse(err.body) @@ -691,7 +704,7 @@ function is_false (val, msg) { function match (val1, val2, action) { // both values are objects if (typeof val1 === 'object' && typeof val2 === 'object') { - assert.ok(deepEqual(val1, val2), action) + assert.deepEqual(val1, val2, typeof action === 'object' ? JSON.stringify(action) : action) // the first value is the body as string and the second a pattern string } else if ( typeof val1 === 'string' && typeof val2 === 'string' && @@ -702,8 +715,7 @@ function match (val1, val2, action) { .replace(/(^|[^\\])\s+/g, '$1') .slice(1, -1) // 'm' adds the support for multiline regex - assert.ok(new RegExp(regStr, 'm').test(val1), `should match pattern provided: ${val2}, but got: ${val1}`) - // tap.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) + assert.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, but got: ${val1}`) // everything else } else { assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) From de17dc050c38bb8dd47c0c0b341e1a273e63559e Mon Sep 17 00:00:00 2001 From: Karl Riis Date: Wed, 7 Jun 2023 00:08:21 +0300 Subject: [PATCH 215/647] fix: keep track of indexSlice explicitly in bulk helper response handling loop (#1759) Co-authored-by: Josh Mock --- src/helpers.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/helpers.ts b/src/helpers.ts index 9cba4db8f..9c5c822bf 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -823,13 +823,13 @@ export default class Helpers { } const retry = [] const { items } = result + let indexSlice = 0 for (let i = 0, len = items.length; i < len; i++) { const action = items[i] const operation = Object.keys(action)[0] // @ts-expect-error const responseItem = action[operation as keyof T.BulkResponseItemContainer] assert(responseItem !== undefined, 'The responseItem is undefined, please file a bug report') - const indexSlice = operation !== 'delete' ? i * 2 : i if (responseItem.status >= 400) { // 429 is the only staus code where we might want to retry @@ -857,6 +857,7 @@ export default class Helpers { } else { stats.successful += 1 } + operation === 'delete' ? indexSlice += 1 : indexSlice += 2 } callback(null, retry) }) From a89f71490013989a2688415d53d4404c4dd3d829 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 7 Jun 2023 14:27:20 -0500 Subject: [PATCH 216/647] Fix broken Github action on Node 14.x (#1904) --- .github/workflows/nodejs.yml | 59 +++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 3702b049f..ca00cb372 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -1,3 +1,4 @@ +--- name: Node CI on: [push, pull_request] @@ -8,29 +9,37 @@ jobs: runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: node-version: [14.x, 16.x, 18.x, 20.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v3 - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v3 - with: - node-version: ${{ matrix.node-version }} + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v3 + with: + node-version: ${{ matrix.node-version }} - - name: Install - run: | - npm install + # workaround for failing tests on Node.js 14.x + # see https://github.com/actions/setup-node/issues/411 + - name: Force install specific npm version + run: | + npm install --global npm@8.3.1 + npm install --global npm@9.7.1 - - name: Lint - run: | - npm run lint + - name: Install + run: | + npm install - - name: Unit test - run: | - npm run test:unit + - name: Lint + run: | + npm run lint + + - name: Unit test + run: | + npm run test:unit license: name: License check @@ -41,17 +50,17 @@ jobs: node-version: [20.x] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v3 - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v3 - with: - node-version: ${{ matrix.node-version }} + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v3 + with: + node-version: ${{ matrix.node-version }} - - name: Install - run: | - npm install + - name: Install + run: | + npm install - - name: License checker - run: | - npm run license-checker + - name: License checker + run: | + npm run license-checker From 0ab63df56769e44a4ba2bae19bdaf3208114966f Mon Sep 17 00:00:00 2001 From: Nathan Reese Date: Thu, 8 Jun 2023 10:34:55 -0600 Subject: [PATCH 217/647] add link to elasticsearch-specification github repository in typescript documentation (#1907) --- docs/typescript.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/typescript.asciidoc b/docs/typescript.asciidoc index becaf488b..07534d733 100644 --- a/docs/typescript.asciidoc +++ b/docs/typescript.asciidoc @@ -6,6 +6,7 @@ of type definitions of Elasticsearch's API surface. The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g. EQL), and others may contain some errors, but we are continuously pushing fixes & improvements. +Contribute type fixes and improvements to https://github.com/elastic/elasticsearch-specification[elasticsearch-specification github repository]. NOTE: The client is developed against the https://www.npmjs.com/package/typescript?activeTab=versions[latest] version of TypeScript. Furthermore, unless you have set `skipLibCheck` to `true`, From 83c3f0c880633828157d686d39981c4b66c942f6 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 8 Jun 2023 11:42:49 -0500 Subject: [PATCH 218/647] Upgrade transport to 8.3.2 (#1902) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 52e303621..520f7d768 100644 --- a/package.json +++ b/package.json @@ -86,7 +86,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.3.1", + "@elastic/transport": "^8.3.2", "tslib": "^2.4.0" }, "tap": { From f30d06b7947ad3e321d1d0e06ebde1feca604ae2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 8 Jun 2023 12:20:43 -0500 Subject: [PATCH 219/647] Stop running integration tests on PRs (#1910) --- .buildkite/pipeline.yml | 1 - catalog-info.yaml | 8 ++------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index bd8927c44..04dcbf016 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -12,7 +12,6 @@ steps: - "free" - "platinum" nodejs: - - "14" - "16" - "18" - "20" diff --git a/catalog-info.yaml b/catalog-info.yaml index daeb73974..38382017f 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -33,6 +33,8 @@ spec: access_level: MANAGE_BUILD_AND_READ everyone: access_level: READ_ONLY + provider_settings: + build_pull_requests: false cancel_intermediate_builds: true cancel_intermediate_builds_branch_filter: '!main' schedules: @@ -45,9 +47,3 @@ spec: 8_7_daily: branch: '8.7' cronline: '@daily' - 8_6_daily: - branch: '8.6' - cronline: '@daily' - 7_17_daily: - branch: '7.17' - cronline: '@daily' From a2b5e66a467f70159253b22f4bfed8ca109d524f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 12 Jun 2023 16:03:11 -0500 Subject: [PATCH 220/647] Add test to verify bulk helper index drift fix (#1912) --- test/unit/helpers/bulk.test.ts | 65 ++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 5a182009e..dbabef07c 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -1070,6 +1070,7 @@ test('bulk delete', t => { const [{ port }, server] = await buildServer(handler) const client = new Client({ node: `http://localhost:${port}` }) let id = 0 + const result = await client.helpers.bulk({ datasource: dataset.slice(), flushBytes: 1, @@ -1106,6 +1107,70 @@ test('bulk delete', t => { server.stop() }) + t.test('Should call onDrop on the correct document when doing a mix of operations that includes deletes', async t => { + // checks to ensure onDrop doesn't provide the wrong document when some operations are deletes + // see https://github.com/elastic/elasticsearch-js/issues/1751 + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + res.setHeader('content-type', 'application/json') + res.end(JSON.stringify({ + took: 0, + errors: true, + items: [ + { delete: { status: 200 } }, + { index: { status: 429 } }, + { index: { status: 200 } } + ] + })) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + let counter = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + concurrency: 1, + wait: 10, + retries: 0, + onDocument (doc) { + counter++ + if (counter === 1) { + return { + delete: { + _index: 'test', + _id: String(counter) + } + } + } else { + return { + index: { + _index: 'test', + } + } + } + }, + onDrop (doc) { + t.same(doc, { + status: 429, + error: null, + operation: { index: { _index: 'test' } }, + document: { user: "arya", age: 18 }, + retried: false, + }) + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 2, + retry: 0, + failed: 1, + aborted: false + }) + server.stop() + }) + t.end() }) From ac694d3ede915c99e7acafd98688558223d0ad2d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 12 Jun 2023 16:35:11 -0500 Subject: [PATCH 221/647] Update changelog (#1917) --- docs/changelog.asciidoc | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index caf394abd..31210667f 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,27 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.8.1 + +===== Support for Elasticsearch `v8.8.1` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.8.1.html[here]. + +[discrete] +==== Fixes + +[discrete] +===== Fix index drift bug in bulk helper https://github.com/elastic/elasticsearch-js/pull/1759[#1759] + +Fixes a bug in the bulk helper that would cause `onDrop` to send back the wrong JSON document or error on a nonexistent document when an error occurred on a bulk HTTP request that contained a `delete` action. + +[discrete] +===== Fix a memory leak caused by an outdated version of Undici https://github.com/elastic/elasticsearch-js/pull/1902[#1902] + +Undici 5.5.1, used by https://github.com/elastic/elastic-transport-js[elastic-transport-js], could create a memory leak when a high volume of requests created too many HTTP `abort` listeners. Upgrading Undici to 5.22.1 removed the memory leak. + [discrete] === 8.8.0 @@ -11,7 +32,7 @@ ===== Support for Elasticsearch `v8.8.0` You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.7.0.html[here]. +https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.8.0.html[here]. [discrete] ==== Fixes From b717de8f3f43b66bb47813e4bdf8a83bdc0e9f2a Mon Sep 17 00:00:00 2001 From: Brandon Morelli Date: Mon, 12 Jun 2023 15:16:50 -0700 Subject: [PATCH 222/647] Update changelog.asciidoc (#1918) --- docs/changelog.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 31210667f..cf8b14e23 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -4,7 +4,8 @@ [discrete] === 8.8.1 -===== Support for Elasticsearch `v8.8.1` +[discrete] +=== Support for Elasticsearch `v8.8.1` You can find all the API changes https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.8.1.html[here]. From 0b9be7c1792ed68bd6aa7e8ee8aa70b2adbc73b0 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 13 Jun 2023 13:40:05 -0500 Subject: [PATCH 223/647] Add missing header to changelog (#1920) --- docs/changelog.asciidoc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index cf8b14e23..4de89967d 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -5,7 +5,10 @@ === 8.8.1 [discrete] -=== Support for Elasticsearch `v8.8.1` +==== Features + +[discrete] +===== Support for Elasticsearch `v8.8.1` You can find all the API changes https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.8.1.html[here]. From 960dff37f266d64b197298cc80250b68eac7c62f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 27 Jun 2023 11:51:11 -0500 Subject: [PATCH 224/647] Generate docstrings and better API reference docs (#1922) --- docs/reference.asciidoc | 5832 +++++++++++++++++++-- src/api/api/async_search.ts | 16 + src/api/api/autoscaling.ts | 16 + src/api/api/bulk.ts | 4 + src/api/api/cat.ts | 105 + src/api/api/ccr.ts | 52 + src/api/api/clear_scroll.ts | 4 + src/api/api/close_point_in_time.ts | 4 + src/api/api/cluster.ts | 87 + src/api/api/count.ts | 4 + src/api/api/create.ts | 6 + src/api/api/dangling_indices.ts | 12 + src/api/api/delete.ts | 4 + src/api/api/delete_by_query.ts | 4 + src/api/api/delete_by_query_rethrottle.ts | 4 + src/api/api/delete_script.ts | 4 + src/api/api/enrich.ts | 20 + src/api/api/eql.ts | 16 + src/api/api/exists.ts | 4 + src/api/api/exists_source.ts | 4 + src/api/api/explain.ts | 4 + src/api/api/features.ts | 8 + src/api/api/field_caps.ts | 4 + src/api/api/fleet.ts | 10 + src/api/api/get.ts | 4 + src/api/api/get_script.ts | 4 + src/api/api/get_script_context.ts | 4 + src/api/api/get_script_languages.ts | 4 + src/api/api/get_source.ts | 4 + src/api/api/graph.ts | 4 + src/api/api/health_report.ts | 4 + src/api/api/ilm.ts | 44 + src/api/api/index.ts | 4 + src/api/api/indices.ts | 324 +- src/api/api/info.ts | 4 + src/api/api/ingest.ts | 24 + src/api/api/knn_search.ts | 4 + src/api/api/license.ts | 28 + src/api/api/logstash.ts | 12 + src/api/api/mget.ts | 4 + src/api/api/migration.ts | 12 + src/api/api/ml.ts | 292 ++ src/api/api/monitoring.ts | 4 + src/api/api/msearch.ts | 4 + src/api/api/msearch_template.ts | 4 + src/api/api/mtermvectors.ts | 4 + src/api/api/nodes.ts | 28 + src/api/api/open_point_in_time.ts | 4 + src/api/api/ping.ts | 4 + src/api/api/put_script.ts | 4 + src/api/api/rank_eval.ts | 4 + src/api/api/reindex.ts | 6 + src/api/api/reindex_rethrottle.ts | 4 + src/api/api/render_search_template.ts | 4 + src/api/api/rollup.ts | 32 + src/api/api/scripts_painless_execute.ts | 4 + src/api/api/scroll.ts | 4 + src/api/api/search.ts | 6 +- src/api/api/search_application.ts | 91 +- src/api/api/search_mvt.ts | 4 + src/api/api/search_shards.ts | 4 + src/api/api/search_template.ts | 4 + src/api/api/searchable_snapshots.ts | 16 + src/api/api/security.ts | 272 + src/api/api/shutdown.ts | 12 + src/api/api/slm.ts | 36 + src/api/api/snapshot.ts | 48 + src/api/api/sql.ts | 24 + src/api/api/ssl.ts | 4 + src/api/api/synonyms.ts | 123 + src/api/api/tasks.ts | 12 + src/api/api/terms_enum.ts | 4 + src/api/api/termvectors.ts | 4 + src/api/api/text_structure.ts | 4 + src/api/api/transform.ts | 44 + src/api/api/update.ts | 4 + src/api/api/update_by_query.ts | 5 + src/api/api/update_by_query_rethrottle.ts | 4 + src/api/api/watcher.ts | 96 + src/api/api/xpack.ts | 8 + src/api/index.ts | 8 + src/api/types.ts | 174 +- src/api/typesWithBodyKey.ts | 177 +- 83 files changed, 7797 insertions(+), 513 deletions(-) create mode 100644 src/api/api/synonyms.ts diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 240a4fa72..19ffd139a 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -32,8 +32,22 @@ Allows to perform multiple index/update/delete operations in a single request. {ref}/docs-bulk.html[Endpoint documentation] [source,ts] ---- -client.bulk(...) +client.bulk({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: Default index for items which don't provide one +** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. +** *`routing` (Optional, string)*: Specific routing value +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request +** *`_source_excludes` (Optional, string | string[])*: Default list of fields to exclude from the returned _source field, can be overridden on each sub-request +** *`_source_includes` (Optional, string | string[])*: Default list of fields to extract and return from the _source field, can be overridden on each sub-request +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`require_alias` (Optional, boolean)*: Sets require_alias for all incoming documents. Defaults to unset (false) [discrete] === clear_scroll @@ -42,8 +56,13 @@ Explicitly clears the search context for a scroll. {ref}/clear-scroll-api.html[Endpoint documentation] [source,ts] ---- -client.clearScroll(...) +client.clearScroll({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`scroll_id` (Optional, string | string[])*: A list of scroll IDs to clear [discrete] === close_point_in_time @@ -52,8 +71,13 @@ Close a point in time {ref}/point-in-time-api.html[Endpoint documentation] [source,ts] ---- -client.closePointInTime(...) +client.closePointInTime({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)* [discrete] === count @@ -62,8 +86,28 @@ Returns number of documents matching a query. {ref}/search-count.html[Endpoint documentation] [source,ts] ---- -client.count(...) +client.count({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of indices to restrict the results +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`analyzer` (Optional, string)*: The analyzer to use for the query string +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`min_score` (Optional, number)*: Include only documents with a specific `_score` value in the result +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`routing` (Optional, string)*: A list of specific routing values +** *`terminate_after` (Optional, number)*: The maximum count for each shard, upon reaching which the query execution will terminate early +** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* [discrete] === create @@ -74,8 +118,21 @@ Returns a 409 response when a document with a same ID already exists in the inde {ref}/docs-index_.html[Endpoint documentation] [source,ts] ---- -client.create(...) +client.create({ id, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Document ID +** *`index` (string)*: The name of the index +** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. +** *`routing` (Optional, string)*: Specific routing value +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) [discrete] === delete @@ -84,8 +141,22 @@ Removes a document from the index. {ref}/docs-delete.html[Endpoint documentation] [source,ts] ---- -client.delete(...) +client.delete({ id, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The document ID +** *`index` (string)*: The name of the index +** *`if_primary_term` (Optional, number)*: only perform the delete operation if the last operation that has changed the document has the specified primary term +** *`if_seq_no` (Optional, number)*: only perform the delete operation if the last operation that has changed the document has the specified sequence number +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. +** *`routing` (Optional, string)*: Specific routing value +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) [discrete] === delete_by_query @@ -94,8 +165,44 @@ Deletes documents matching the provided query. {ref}/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- -client.deleteByQuery(...) ----- +client.deleteByQuery({ index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`analyzer` (Optional, string)*: The analyzer to use for the query string +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do when the delete by query hits version conflicts? +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`max_docs` (Optional, number)*: Maximum number of documents to process (default: all documents) +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? +** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to index level setting +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -1 means no throttle. +** *`routing` (Optional, string)*: A list of specific routing values +** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`scroll` (Optional, string | -1 | 0)*: Specify how long a consistent view of the index should be maintained for scrolled search +** *`scroll_size` (Optional, number)*: Size on the scroll request powering the delete by query +** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. Defaults to no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. +** *`sort` (Optional, string[])*: A list of : pairs +** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. +** *`timeout` (Optional, string | -1 | 0)*: Time each individual bulk request should wait for shards that are unavailable. +** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the delete by query is complete. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`slice` (Optional, { field, id, max })* [discrete] === delete_by_query_rethrottle @@ -104,8 +211,14 @@ Changes the number of requests per second for a particular Delete By Query opera {ref}/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- -client.deleteByQueryRethrottle(...) +client.deleteByQueryRethrottle({ task_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string | number)*: The task id to rethrottle +** *`requests_per_second` (Optional, float)*: The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. [discrete] === delete_script @@ -114,8 +227,15 @@ Deletes a script. {ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.deleteScript(...) +client.deleteScript({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Script ID +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] === exists @@ -124,8 +244,24 @@ Returns information about whether a document exists in an index. {ref}/docs-get.html[Endpoint documentation] [source,ts] ---- -client.exists(...) +client.exists({ id, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The document ID +** *`index` (string)*: The name of the index +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`realtime` (Optional, boolean)*: Specify whether to perform the operation in realtime or search mode +** *`refresh` (Optional, boolean)*: Refresh the shard containing the document before performing the operation +** *`routing` (Optional, string)*: Specific routing value +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return +** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field +** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type [discrete] === exists_source @@ -134,8 +270,23 @@ Returns information about whether a document source exists in an index. {ref}/docs-get.html[Endpoint documentation] [source,ts] ---- -client.existsSource(...) +client.existsSource({ id, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The document ID +** *`index` (string)*: The name of the index +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`realtime` (Optional, boolean)*: Specify whether to perform the operation in realtime or search mode +** *`refresh` (Optional, boolean)*: Refresh the shard containing the document before performing the operation +** *`routing` (Optional, string)*: Specific routing value +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return +** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field +** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type [discrete] === explain @@ -144,8 +295,27 @@ Returns information about why a specific matches (or doesn't match) a query. {ref}/search-explain.html[Endpoint documentation] [source,ts] ---- -client.explain(...) +client.explain({ id, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The document ID +** *`index` (string)*: The name of the index +** *`analyzer` (Optional, string)*: The analyzer for the query string query +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false) +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The default field for query string query (default: _all) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`routing` (Optional, string)*: Specific routing value +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return +** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field +** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response +** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* [discrete] === field_caps @@ -154,8 +324,25 @@ Returns the information about the capabilities of fields among multiple indices. {ref}/search-field-caps.html[Endpoint documentation] [source,ts] ---- -client.fieldCaps(...) +client.fieldCaps({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, +or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request +targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. +** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. +** *`filters` (Optional, string)*: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent +** *`types` (Optional, string[])*: Only return results for fields that have one of the types in the list +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. +** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. +These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. [discrete] === get @@ -164,8 +351,24 @@ Returns a document. {ref}/docs-get.html[Endpoint documentation] [source,ts] ---- -client.get(...) +client.get({ id, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Unique identifier of the document. +** *`index` (string)*: Name of the index that contains the document. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`realtime` (Optional, boolean)*: Boolean) If true, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. +** *`routing` (Optional, string)*: Target the specified primary shard. +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response +** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: internal, external, external_gte. [discrete] === get_script @@ -174,8 +377,14 @@ Returns a script. {ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.getScript(...) +client.getScript({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Script ID +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master [discrete] === get_script_context @@ -184,8 +393,12 @@ Returns all script contexts. {painless}/painless-contexts.html[Endpoint documentation] [source,ts] ---- -client.getScriptContext(...) +client.getScriptContext() ---- +[discrete] +==== Arguments + +* *Request (object):* [discrete] === get_script_languages @@ -194,8 +407,12 @@ Returns available script types, languages and contexts {ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.getScriptLanguages(...) +client.getScriptLanguages() ---- +[discrete] +==== Arguments + +* *Request (object):* [discrete] === get_source @@ -204,8 +421,24 @@ Returns the source of a document. {ref}/docs-get.html[Endpoint documentation] [source,ts] ---- -client.getSource(...) +client.getSource({ id, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Unique identifier of the document. +** *`index` (string)*: Name of the index that contains the document. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`realtime` (Optional, boolean)*: Boolean) If true, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. +** *`routing` (Optional, string)*: Target the specified primary shard. +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`stored_fields` (Optional, string | string[])* +** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: internal, external, external_gte. [discrete] === health_report @@ -214,8 +447,16 @@ Returns the health of the cluster. {ref}/health-api.html[Endpoint documentation] [source,ts] ---- -client.healthReport(...) +client.healthReport({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`feature` (Optional, string | string[])*: A feature of the cluster, as returned by the top-level health report API. +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout. +** *`verbose` (Optional, boolean)*: Opt-in for more information about the health of the system. +** *`size` (Optional, number)*: Limit the number of affected resources the health report API returns. [discrete] === index @@ -224,8 +465,25 @@ Creates or updates a document in an index. {ref}/docs-index_.html[Endpoint documentation] [source,ts] ---- -client.index(...) +client.index({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index +** *`id` (Optional, string)*: Document ID +** *`if_primary_term` (Optional, number)*: only perform the index operation if the last operation that has changed the document has the specified primary term +** *`if_seq_no` (Optional, number)*: only perform the index operation if the last operation that has changed the document has the specified sequence number +** *`op_type` (Optional, Enum("index" | "create"))*: Explicit operation type. Defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID +** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. +** *`routing` (Optional, string)*: Specific routing value +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`require_alias` (Optional, boolean)*: When true, requires destination to be an alias. Default is false [discrete] === info @@ -234,8 +492,12 @@ Returns basic information about the cluster. {ref}/index.html[Endpoint documentation] [source,ts] ---- -client.info(...) +client.info() ---- +[discrete] +==== Arguments + +* *Request (object):* [discrete] === knn_search @@ -244,8 +506,29 @@ Performs a kNN search. {ref}/search-search.html[Endpoint documentation] [source,ts] ---- -client.knnSearch(...) +client.knnSearch({ index, knn }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to search; +use `_all` or to perform the operation on all indices +** *`knn` ({ field, query_vector, k, num_candidates })*: kNN query to execute +** *`routing` (Optional, string)*: A list of specific routing values +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns +in the hits.fields property of the response. Accepts wildcard (*) patterns. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns +in the hits.fields property of the response. Accepts wildcard (*) patterns. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top +`k` documents that also match this filter. The value can be a single query or a +list of queries. If `filter` isn't provided, all documents are allowed to match. [discrete] === mget @@ -254,8 +537,26 @@ Allows to get multiple documents in one request. {ref}/docs-multi-get.html[Endpoint documentation] [source,ts] ---- -client.mget(...) +client.mget({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes relevant shards before retrieving documents. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. +You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. +If the `_source` parameter is `false`, this parameter is ignored. +** *`stored_fields` (Optional, string | string[])*: If `true`, retrieves the document fields stored in the index rather than the document `_source`. +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. +** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. [discrete] === msearch @@ -264,8 +565,25 @@ Allows to execute several search operations in one request. {ref}/search-multi-search.html[Endpoint documentation] [source,ts] ---- -client.msearch(...) +client.msearch({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases to search. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. +** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. +** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +** *`routing` (Optional, string)*: Custom routing value used to route search operations to a specific shard. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. +** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. [discrete] === msearch_template @@ -274,8 +592,18 @@ Allows to execute several search template operations in one request. {ref}/search-multi-search.html[Endpoint documentation] [source,ts] ---- -client.msearchTemplate(...) +client.msearchTemplate({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names to use as default +** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution +** *`max_concurrent_searches` (Optional, number)*: Controls the maximum number of concurrent searches the multi search api will execute +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response [discrete] === mtermvectors @@ -284,8 +612,26 @@ Returns multiple termvectors in one request. {ref}/docs-multi-termvectors.html[Endpoint documentation] [source,ts] ---- -client.mtermvectors(...) +client.mtermvectors({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: The index in which the document resides. +** *`ids` (Optional, string[])*: A list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body +** *`fields` (Optional, string | string[])*: A list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`field_statistics` (Optional, boolean)*: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`offsets` (Optional, boolean)*: Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`payloads` (Optional, boolean)*: Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`positions` (Optional, boolean)*: Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) .Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`realtime` (Optional, boolean)*: Specifies if requests are real-time as opposed to near-real-time (default: true). +** *`routing` (Optional, string)*: Specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`term_statistics` (Optional, boolean)*: Specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])* [discrete] === open_point_in_time @@ -294,8 +640,18 @@ Open a point in time that can be used in subsequent searches {ref}/point-in-time-api.html[Endpoint documentation] [source,ts] ---- -client.openPointInTime(...) +client.openPointInTime({ index, keep_alive }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices +** *`keep_alive` (string | -1 | 0)*: Specific the time to live for the point in time +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`routing` (Optional, string)*: Specific routing value +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. [discrete] === ping @@ -304,8 +660,12 @@ Returns whether the cluster is running. {ref}/index.html[Endpoint documentation] [source,ts] ---- -client.ping(...) +client.ping() ---- +[discrete] +==== Arguments + +* *Request (object):* [discrete] === put_script @@ -314,8 +674,17 @@ Creates or updates a script. {ref}/modules-scripting.html[Endpoint documentation] [source,ts] ---- -client.putScript(...) +client.putScript({ id, script }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Script ID +** *`script` ({ lang, options, source })* +** *`context` (Optional, string)*: Script context +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] === rank_eval @@ -324,8 +693,20 @@ Allows to evaluate the quality of ranked search results over a set of typical se {ref}/search-rank-eval.html[Endpoint documentation] [source,ts] ---- -client.rankEval(...) +client.rankEval({ requests }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. +** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. +To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. +** *`search_type` (Optional, string)*: Search operation type +** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. [discrete] === reindex @@ -336,8 +717,26 @@ documents from a remote cluster. {ref}/docs-reindex.html[Endpoint documentation] [source,ts] ---- -client.reindex(...) +client.reindex({ dest, source }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`dest` ({ index, op_type, pipeline, routing, version_type })* +** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })* +** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? +** *`requests_per_second` (Optional, float)*: The throttle to set on this request in sub-requests per second. -1 means no throttle. +** *`scroll` (Optional, string | -1 | 0)*: Control how long to keep the search context alive +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. +** *`timeout` (Optional, string | -1 | 0)*: Time each individual bulk request should wait for shards that are unavailable. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the reindex is complete. +** *`require_alias` (Optional, boolean)* +** *`conflicts` (Optional, Enum("abort" | "proceed"))* +** *`max_docs` (Optional, number)* +** *`script` (Optional, { lang, options, source } | { id })* +** *`size` (Optional, number)* [discrete] === reindex_rethrottle @@ -346,8 +745,14 @@ Changes the number of requests per second for a particular Reindex operation. {ref}/docs-reindex.html[Endpoint documentation] [source,ts] ---- -client.reindexRethrottle(...) +client.reindexRethrottle({ task_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string)*: The task id to rethrottle +** *`requests_per_second` (Optional, float)*: The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. [discrete] === render_search_template @@ -356,8 +761,16 @@ Allows to use the Mustache language to pre-render a search definition. {ref}/render-search-template-api.html[Endpoint documentation] [source,ts] ---- -client.renderSearchTemplate(...) +client.renderSearchTemplate({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: The id of the stored search template +** *`file` (Optional, string)* +** *`params` (Optional, Record)* +** *`source` (Optional, string)* [discrete] === scripts_painless_execute @@ -366,8 +779,15 @@ Allows an arbitrary script to be executed and a result to be returned {painless}/painless-execute-api.html[Endpoint documentation] [source,ts] ---- -client.scriptsPainlessExecute(...) +client.scriptsPainlessExecute({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`context` (Optional, string)* +** *`context_setup` (Optional, { document, index, query })* +** *`script` (Optional, { lang, options, source })* [discrete] === scroll @@ -376,8 +796,15 @@ Allows to retrieve a large numbers of results from a single search request. {ref}/search-request-body.html[Endpoint documentation] [source,ts] ---- -client.scroll(...) +client.scroll({ scroll_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`scroll_id` (string)*: Scroll ID of the search. +** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. +** *`rest_total_hits_as_int` (Optional, boolean)*: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. [discrete] === search @@ -386,8 +813,79 @@ Returns results matching a query. {ref}/search-search.html[Endpoint documentation] [source,ts] ---- -client.search(...) ----- +client.search({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout +** *`analyzer` (Optional, string)*: The analyzer to use for the query string +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) +** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string +** *`docvalue_fields` (Optional, string | string[])*: A list of fields to return as the docvalue representation of a field for each hit +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`explain` (Optional, boolean)*: Specify whether to return detailed information about score computation as part of a hit +** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +** *`min_compatible_shard_node` (Optional, string)*: The minimum compatible version that all shards involved in search should have for this request to be successful +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`pre_filter_shard_size` (Optional, number)*: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint. +** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to index level setting +** *`routing` (Optional, string)*: A list of specific routing values +** *`scroll` (Optional, string | -1 | 0)*: Specify how long a consistent view of the index should be maintained for scrolled search +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type +** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit +** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode +** *`suggest_size` (Optional, number)*: How many suggestions to return in response +** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`track_total_hits` (Optional, boolean | number)*: Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. +** *`track_scores` (Optional, boolean)*: Whether to calculate and return scores even if they are not used for sorting +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return +** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field +** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field +** *`seq_no_primary_term` (Optional, boolean)*: Specify whether to return sequence number and primary term of the last modification of each hit +** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`size` (Optional, number)*: Number of hits to return (default: 10) +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`sort` (Optional, string | string[])*: A list of : pairs +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`highlight` (Optional, { encoder, fields })* +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. +** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`slice` (Optional, { field, id, max })* +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. [discrete] === search_mvt @@ -396,8 +894,57 @@ Searches a vector tile for geospatial values. Returns results as a binary Mapbox {ref}/search-vector-tile-api.html[Endpoint documentation] [source,ts] ---- -client.searchMvt(...) ----- +client.searchMvt({ index, field, zoom, x, y }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, or aliases to search +** *`field` (string)*: Field containing geospatial data to return +** *`zoom` (number)*: Zoom level for the vector tile to search +** *`x` (number)*: X coordinate for the vector tile to search +** *`y` (number)*: Y coordinate for the vector tile to search +** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. +If true, the meta layer’s feature is a bounding box resulting from a +geo_bounds aggregation. The aggregation runs on values that intersect +the // tile with wrap_longitude set to false. The resulting +bounding box may be larger than the vector tile. +** *`extent` (Optional, number)*: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: Aggregation used to create a grid for `field`. +** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if is 7 +and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results +don’t include the aggs layer. +** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, +each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon +of the cells bounding box. If 'point' each feature is a Point that is the centroid +of the cell. +** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. +If 0, results don’t include the hits layer. +** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing +suggested label positions for the original features. +** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. + +Supports the following aggregation types: +- avg +- cardinality +- max +- min +- sum +** *`buffer` (Optional, number)*: Size, in pixels, of a clipping buffer outside the tile. This allows renderers +to avoid outline artifacts from geometries that extend past the extent of the tile. +** *`fields` (Optional, string | string[])*: Fields to return in the `hits` layer. Supports wildcards (`*`). +This parameter does not support fields with array values. Fields with array +values may return inconsistent results. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sorts features in the hits layer. By default, the API calculates a bounding +box for each feature. It sorts features based on this box’s diagonal length, +from longest to shortest. +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number +of hits is returned at the cost of some performance. If `false`, the response does +not include the total number of hits matching the query. [discrete] === search_shards @@ -406,8 +953,19 @@ Returns information about the indices and shards that a search request would be {ref}/search-shards.html[Endpoint documentation] [source,ts] ---- -client.searchShards(...) +client.searchShards({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`routing` (Optional, string)*: Specific routing value [discrete] === search_template @@ -416,8 +974,34 @@ Allows to use the Mustache language to pre-render a search definition. {ref}/search-template.html[Endpoint documentation] [source,ts] ---- -client.searchTemplate(...) ----- +client.searchTemplate({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, +and aliases to search. Supports wildcards (*). +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`explain` (Optional, boolean)*: Specify whether to return detailed information about score computation as part of a hit +** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`profile` (Optional, boolean)*: Specify whether to profile the query execution +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index +should be maintained for scrolled search. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. +** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are rendered as an integer in the response. +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, +this parameter is required. +** *`params` (Optional, Record)* +** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's +request body. Also supports Mustache variables. If no id is specified, this +parameter is required. [discrete] === terms_enum @@ -426,8 +1010,20 @@ The terms enum API can be used to discover terms in the index that begin with t {ref}/search-terms-enum.html[Endpoint documentation] [source,ts] ---- -client.termsEnum(...) +client.termsEnum({ index, field }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: List of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. +** *`field` (string)*: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. +** *`size` (Optional, number)*: How many matching terms to return. +** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. +** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. +** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. +** *`search_after` (Optional, string)* [discrete] === termvectors @@ -436,8 +1032,28 @@ Returns information and statistics about terms in the fields of a particular doc {ref}/docs-termvectors.html[Endpoint documentation] [source,ts] ---- -client.termvectors(...) +client.termvectors({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The index in which the document resides. +** *`id` (Optional, string)*: The id of the document, when not specified a doc param should be supplied. +** *`fields` (Optional, string | string[])*: A list of fields to return. +** *`field_statistics` (Optional, boolean)*: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. +** *`offsets` (Optional, boolean)*: Specifies if term offsets should be returned. +** *`payloads` (Optional, boolean)*: Specifies if term payloads should be returned. +** *`positions` (Optional, boolean)*: Specifies if term positions should be returned. +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random). +** *`realtime` (Optional, boolean)*: Specifies if request is real-time as opposed to near-real-time (default: true). +** *`routing` (Optional, string)*: Specific routing value. +** *`term_statistics` (Optional, boolean)*: Specifies if total term frequency and document frequency should be returned. +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`doc` (Optional, document object)* +** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })* +** *`per_field_analyzer` (Optional, Record)* [discrete] === update @@ -446,8 +1062,41 @@ Updates a document with a script or partial document. {ref}/docs-update.html[Endpoint documentation] [source,ts] ---- -client.update(...) ----- +client.update({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Document ID +** *`index` (string)*: The name of the index +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`lang` (Optional, string)*: The script language. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search, if 'wait_for' then wait for a refresh to make this operation +visible to search, if 'false' do nothing with refreshes. +** *`require_alias` (Optional, boolean)*: If true, the destination must be an index alias. +** *`retry_on_conflict` (Optional, number)*: Specify how many times should the operation be retried when a conflict occurs. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for dynamic mapping updates and active shards. +This guarantees Elasticsearch waits for at least the timeout before failing. +The actual wait time could be longer, particularly when multiple waits occur. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operations. +Set to 'all' or any positive integer up to the total number of shards in the index +(number_of_replicas+1). Defaults to 1 meaning the primary shard. +** *`_source` (Optional, boolean | string | string[])*: Set to false to disable source retrieval. You can also specify a comma-separated +list of the fields you want to retrieve. +** *`_source_excludes` (Optional, string | string[])*: Specify the source fields you want to exclude. +** *`_source_includes` (Optional, string | string[])*: Specify the source fields you want to retrieve. +** *`detect_noop` (Optional, boolean)*: Set to false to disable setting 'result' in the response +to 'noop' if no change to the document occurred. +** *`doc` (Optional, partial document object)*: A partial update to an existing document. +** *`doc_as_upsert` (Optional, boolean)*: Set to true to use the contents of 'doc' as the value of 'upsert' +** *`script` (Optional, { lang, options, source } | { id })*: Script to execute to update the document. +** *`scripted_upsert` (Optional, boolean)*: Set to true to execute the script whether or not the document exists. +** *`upsert` (Optional, document object)*: If the document does not already exist, the contents of 'upsert' are inserted as a +new document. If the document exists, the 'script' is executed. [discrete] === update_by_query @@ -457,8 +1106,46 @@ for example to pick up a mapping change. {ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- -client.updateByQuery(...) ----- +client.updateByQuery({ index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`analyzer` (Optional, string)*: The analyzer to use for the query string +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do when the update by query hits version conflicts? +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`max_docs` (Optional, number)*: Maximum number of documents to process (default: all documents) +** *`pipeline` (Optional, string)*: Ingest pipeline to set on index requests made by this action. (default: none) +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? +** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to index level setting +** *`requests_per_second` (Optional, float)*: The throttle to set on this request in sub-requests per second. -1 means no throttle. +** *`routing` (Optional, string)*: A list of specific routing values +** *`scroll` (Optional, string | -1 | 0)*: Specify how long a consistent view of the index should be maintained for scrolled search +** *`scroll_size` (Optional, number)*: Size on the scroll request powering the update by query +** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. Defaults to no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. +** *`sort` (Optional, string[])*: A list of : pairs +** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. +** *`timeout` (Optional, string | -1 | 0)*: Time each individual bulk request should wait for shards that are unavailable. +** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit +** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the update by query operation is complete. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`script` (Optional, { lang, options, source } | { id })* +** *`slice` (Optional, { field, id, max })* [discrete] === update_by_query_rethrottle @@ -467,8 +1154,14 @@ Changes the number of requests per second for a particular Update By Query opera {ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- -client.updateByQueryRethrottle(...) +client.updateByQueryRethrottle({ task_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string)*: The task id to rethrottle +** *`requests_per_second` (Optional, float)*: The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. [discrete] === async_search @@ -479,9 +1172,15 @@ Deletes an async search by ID. If the search is still running, the search reques {ref}/async-search.html[Endpoint documentation] [source,ts] ---- -client.asyncSearch.delete(...) +client.asyncSearch.delete({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the async search. + [discrete] ==== get Retrieves the results of a previously submitted async search request given its ID. @@ -489,9 +1188,24 @@ Retrieves the results of a previously submitted async search request given its I {ref}/async-search.html[Endpoint documentation] [source,ts] ---- -client.asyncSearch.get(...) +client.asyncSearch.get({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the async search. +** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search should be available in the cluster. +When not specified, the `keep_alive` set with the corresponding submit async request will be used. +Otherwise, it is possible to override the value and extend the validity of the request. +When this period expires, the search, if still running, is cancelled. +If the search is completed, its saved results are deleted. +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Specifies to wait for the search to be completed up until the provided timeout. +Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. +By default no timeout is set meaning that the currently available results will be returned without any additional wait. + [discrete] ==== status Retrieves the status of a previously submitted async search request given its ID. @@ -499,9 +1213,15 @@ Retrieves the status of a previously submitted async search request given its ID {ref}/async-search.html[Endpoint documentation] [source,ts] ---- -client.asyncSearch.status(...) +client.asyncSearch.status({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the async search. + [discrete] ==== submit Executes a search request asynchronously. @@ -509,8 +1229,85 @@ Executes a search request asynchronously. {ref}/async-search.html[Endpoint documentation] [source,ts] ---- -client.asyncSearch.submit(...) ----- +client.asyncSearch.submit({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout. +When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. +** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. +** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout +** *`analyzer` (Optional, string)*: The analyzer to use for the query string +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) +** *`batched_reduce_size` (Optional, number)*: Affects how often partial results become available, which happens whenever shard results are reduced. +A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). +** *`ccs_minimize_roundtrips` (Optional, boolean)*: The default value is the only supported value. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string +** *`docvalue_fields` (Optional, string | string[])*: A list of fields to return as the docvalue representation of a field for each hit +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`explain` (Optional, boolean)*: Specify whether to return detailed information about score computation as part of a hit +** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +** *`min_compatible_shard_node` (Optional, string)* +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`pre_filter_shard_size` (Optional, number)*: The default value cannot be changed, which enforces the execution of a pre-filter roundtrip to retrieve statistics from each shard so that the ones that surely don’t hold any document matching the query get skipped. +** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true +** *`routing` (Optional, string)*: A list of specific routing values +** *`scroll` (Optional, string | -1 | 0)* +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type +** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit +** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode +** *`suggest_size` (Optional, number)*: How many suggestions to return in response +** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`track_total_hits` (Optional, boolean | number)*: Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. +** *`track_scores` (Optional, boolean)*: Whether to calculate and return scores even if they are not used for sorting +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`rest_total_hits_as_int` (Optional, boolean)* +** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit +** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return +** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field +** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field +** *`seq_no_primary_term` (Optional, boolean)*: Specify whether to return sequence number and primary term of the last modification of each hit +** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`size` (Optional, number)*: Number of hits to return (default: 10) +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`sort` (Optional, string | string[])*: A list of : pairs +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`highlight` (Optional, { encoder, fields })* +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`slice` (Optional, { field, id, max })* +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. [discrete] === cat @@ -521,9 +1318,16 @@ Shows information about currently configured aliases to indices including filter {ref}/cat-alias.html[Endpoint documentation] [source,ts] ---- -client.cat.aliases(...) +client.cat.aliases({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. + [discrete] ==== allocation Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. @@ -531,9 +1335,16 @@ Provides a snapshot of how many shards are allocated to each data node and how m {ref}/cat-allocation.html[Endpoint documentation] [source,ts] ---- -client.cat.allocation(...) +client.cat.allocation({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node identifiers or names used to limit the returned information. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. + [discrete] ==== component_templates Returns information about existing component_templates templates. @@ -541,9 +1352,15 @@ Returns information about existing component_templates templates. {ref}/cat-component-templates.html[Endpoint documentation] [source,ts] ---- -client.cat.componentTemplates(...) +client.cat.componentTemplates({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. + [discrete] ==== count Provides quick access to the document count of the entire cluster, or individual indices. @@ -551,9 +1368,16 @@ Provides quick access to the document count of the entire cluster, or individual {ref}/cat-count.html[Endpoint documentation] [source,ts] ---- -client.cat.count(...) +client.cat.count({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + [discrete] ==== fielddata Shows how much heap memory is currently being used by fielddata on every data node in the cluster. @@ -561,9 +1385,17 @@ Shows how much heap memory is currently being used by fielddata on every data no {ref}/cat-fielddata.html[Endpoint documentation] [source,ts] ---- -client.cat.fielddata(...) +client.cat.fielddata({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`fields` (Optional, string | string[])*: List of fields used to limit returned information. +To retrieve all fields, omit this parameter. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. + [discrete] ==== health Returns a concise representation of the cluster health. @@ -571,9 +1403,16 @@ Returns a concise representation of the cluster health. {ref}/cat-health.html[Endpoint documentation] [source,ts] ---- -client.cat.health(...) +client.cat.health({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`ts` (Optional, boolean)*: If true, returns `HH:MM:SS` and Unix epoch timestamps. + [discrete] ==== help Returns help for the Cat APIs. @@ -581,9 +1420,14 @@ Returns help for the Cat APIs. {ref}/cat.html[Endpoint documentation] [source,ts] ---- -client.cat.help(...) +client.cat.help() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== indices Returns information about indices: number of primaries and replicas, document counts, disk size, ... @@ -591,9 +1435,22 @@ Returns information about indices: number of primaries and replicas, document co {ref}/cat-indices.html[Endpoint documentation] [source,ts] ---- -client.cat.indices(...) +client.cat.indices({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. +** *`health` (Optional, Enum("green" | "yellow" | "red"))*: The health status used to limit returned indices. By default, the response includes indices of any health status. +** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`pri` (Optional, boolean)*: If true, the response only includes information from primary shards. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. + [discrete] ==== master Returns information about the master node. @@ -601,9 +1458,14 @@ Returns information about the master node. {ref}/cat-master.html[Endpoint documentation] [source,ts] ---- -client.cat.master(...) +client.cat.master() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== ml_data_frame_analytics Gets configuration and usage information about data frame analytics jobs. @@ -611,9 +1473,21 @@ Gets configuration and usage information about data frame analytics jobs. {ref}/cat-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.cat.mlDataFrameAnalytics(...) +client.cat.mlDataFrameAnalytics({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: The ID of the data frame analytics to fetch +** *`allow_no_match` (Optional, boolean)*: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit in which to display byte values +** *`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names to display. +** *`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names or column aliases used to sort the +response. +** *`time` (Optional, string | -1 | 0)*: Unit used to display time values. + [discrete] ==== ml_datafeeds Gets configuration and usage information about datafeeds. @@ -621,9 +1495,27 @@ Gets configuration and usage information about datafeeds. {ref}/cat-datafeeds.html[Endpoint documentation] [source,ts] ---- -client.cat.mlDatafeeds(...) +client.cat.mlDatafeeds({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +* Contains wildcard expressions and there are no datafeeds that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when +there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only +partial matches. +** *`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names to display. +** *`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names or column aliases used to sort the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. + [discrete] ==== ml_jobs Gets configuration and usage information about anomaly detection jobs. @@ -631,9 +1523,28 @@ Gets configuration and usage information about anomaly detection jobs. {ref}/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] ---- -client.cat.mlJobs(...) +client.cat.mlJobs({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there +are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial +matches. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names to display. +** *`s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names or column aliases used to sort the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. + [discrete] ==== ml_trained_models Gets configuration and usage information about inference trained models. @@ -641,9 +1552,23 @@ Gets configuration and usage information about inference trained models. {ref}/cat-trained-model.html[Endpoint documentation] [source,ts] ---- -client.cat.mlTrainedModels(...) +client.cat.mlTrainedModels({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (Optional, string)*: A unique identifier for the trained model. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the API returns a 404 status code when there are no matches or only partial matches. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names to display. +** *`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names or aliases used to sort the response. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`size` (Optional, number)*: The maximum number of transforms to display. + [discrete] ==== nodeattrs Returns information about custom node attributes. @@ -651,9 +1576,14 @@ Returns information about custom node attributes. {ref}/cat-nodeattrs.html[Endpoint documentation] [source,ts] ---- -client.cat.nodeattrs(...) +client.cat.nodeattrs() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== nodes Returns basic statistics about performance of cluster nodes. @@ -661,9 +1591,17 @@ Returns basic statistics about performance of cluster nodes. {ref}/cat-nodes.html[Endpoint documentation] [source,ts] ---- -client.cat.nodes(...) +client.cat.nodes({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID. +** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. + [discrete] ==== pending_tasks Returns a concise representation of the cluster pending tasks. @@ -671,9 +1609,14 @@ Returns a concise representation of the cluster pending tasks. {ref}/cat-pending-tasks.html[Endpoint documentation] [source,ts] ---- -client.cat.pendingTasks(...) +client.cat.pendingTasks() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== plugins Returns information about installed plugins across nodes node. @@ -681,9 +1624,14 @@ Returns information about installed plugins across nodes node. {ref}/cat-plugins.html[Endpoint documentation] [source,ts] ---- -client.cat.plugins(...) +client.cat.plugins() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== recovery Returns information about index shard recoveries, both on-going completed. @@ -691,9 +1639,19 @@ Returns information about index shard recoveries, both on-going completed. {ref}/cat-recovery.html[Endpoint documentation] [source,ts] ---- -client.cat.recovery(...) +client.cat.recovery({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. + [discrete] ==== repositories Returns information about snapshot repositories registered in the cluster. @@ -701,9 +1659,14 @@ Returns information about snapshot repositories registered in the cluster. {ref}/cat-repositories.html[Endpoint documentation] [source,ts] ---- -client.cat.repositories(...) +client.cat.repositories() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== segments Provides low-level information about the segments in the shards of an index. @@ -711,9 +1674,18 @@ Provides low-level information about the segments in the shards of an index. {ref}/cat-segments.html[Endpoint documentation] [source,ts] ---- -client.cat.segments(...) +client.cat.segments({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. + [discrete] ==== shards Provides a detailed view of shard allocation on nodes. @@ -721,9 +1693,18 @@ Provides a detailed view of shard allocation on nodes. {ref}/cat-shards.html[Endpoint documentation] [source,ts] ---- -client.cat.shards(...) +client.cat.shards({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. + [discrete] ==== snapshots Returns all snapshots in a specific repository. @@ -731,9 +1712,19 @@ Returns all snapshots in a specific repository. {ref}/cat-snapshots.html[Endpoint documentation] [source,ts] ---- -client.cat.snapshots(...) +client.cat.snapshots({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (Optional, string | string[])*: A list of snapshot repositories used to limit the request. +Accepts wildcard expressions. +`_all` returns all repositories. +If any repository fails during the request, Elasticsearch returns an error. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, the response does not include information from unavailable snapshots. + [discrete] ==== tasks Returns information about the tasks currently executing on one or more nodes in the cluster. @@ -741,9 +1732,18 @@ Returns information about the tasks currently executing on one or more nodes in {ref}/tasks.html[Endpoint documentation] [source,ts] ---- -client.cat.tasks(...) +client.cat.tasks({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`actions` (Optional, string[])*: The task action names, which are used to limit the response. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`node_id` (Optional, string[])*: Unique node identifiers, which are used to limit the response. +** *`parent_task_id` (Optional, string)*: The parent task identifier, which is used to limit the response. + [discrete] ==== templates Returns information about existing templates. @@ -751,9 +1751,16 @@ Returns information about existing templates. {ref}/cat-templates.html[Endpoint documentation] [source,ts] ---- -client.cat.templates(...) +client.cat.templates({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: The name of the template to return. +Accepts wildcard expressions. If omitted, all templates are returned. + [discrete] ==== thread_pool Returns cluster-wide thread pool statistics per node. @@ -762,9 +1769,17 @@ By default the active, queue and rejected statistics are returned for all thread {ref}/cat-thread-pool.html[Endpoint documentation] [source,ts] ---- -client.cat.threadPool(...) +client.cat.threadPool({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request. +Accepts wildcard expressions. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. + [discrete] ==== transforms Gets configuration and usage information about transforms. @@ -772,9 +1787,24 @@ Gets configuration and usage information about transforms. {ref}/cat-transforms.html[Endpoint documentation] [source,ts] ---- -client.cat.transforms(...) +client.cat.transforms({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (Optional, string)*: A transform identifier or a wildcard expression. +If you do not specify one of these options, the API returns information for all transforms. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names to display. +** *`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names or column aliases used to sort the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`size` (Optional, number)*: The maximum number of transforms to obtain. + [discrete] === ccr [discrete] @@ -784,9 +1814,15 @@ Deletes auto-follow patterns. {ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- -client.ccr.deleteAutoFollowPattern(...) +client.ccr.deleteAutoFollowPattern({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the auto follow pattern. + [discrete] ==== follow Creates a new follower index configured to follow the referenced leader index. @@ -794,9 +1830,28 @@ Creates a new follower index configured to follow the referenced leader index. {ref}/ccr-put-follow.html[Endpoint documentation] [source,ts] ---- -client.ccr.follow(...) +client.ccr.follow({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follower index +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`leader_index` (Optional, string)* +** *`max_outstanding_read_requests` (Optional, number)* +** *`max_outstanding_write_requests` (Optional, number)* +** *`max_read_request_operation_count` (Optional, number)* +** *`max_read_request_size` (Optional, string)* +** *`max_retry_delay` (Optional, string | -1 | 0)* +** *`max_write_buffer_count` (Optional, number)* +** *`max_write_buffer_size` (Optional, string)* +** *`max_write_request_operation_count` (Optional, number)* +** *`max_write_request_size` (Optional, string)* +** *`read_poll_timeout` (Optional, string | -1 | 0)* +** *`remote_cluster` (Optional, string)* + [discrete] ==== follow_info Retrieves information about all follower indices, including parameters and status for each follower index @@ -804,9 +1859,15 @@ Retrieves information about all follower indices, including parameters and statu {ref}/ccr-get-follow-info.html[Endpoint documentation] [source,ts] ---- -client.ccr.followInfo(...) +client.ccr.followInfo({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices + [discrete] ==== follow_stats Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. @@ -814,9 +1875,15 @@ Retrieves follower stats. return shard-level stats about the following tasks ass {ref}/ccr-get-follow-stats.html[Endpoint documentation] [source,ts] ---- -client.ccr.followStats(...) +client.ccr.followStats({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices + [discrete] ==== forget_follower Removes the follower retention leases from the leader. @@ -824,9 +1891,19 @@ Removes the follower retention leases from the leader. {ref}/ccr-post-forget-follower.html[Endpoint documentation] [source,ts] ---- -client.ccr.forgetFollower(...) +client.ccr.forgetFollower({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: the name of the leader index for which specified follower retention leases should be removed +** *`follower_cluster` (Optional, string)* +** *`follower_index` (Optional, string)* +** *`follower_index_uuid` (Optional, string)* +** *`leader_remote_cluster` (Optional, string)* + [discrete] ==== get_auto_follow_pattern Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. @@ -834,9 +1911,15 @@ Gets configured auto-follow patterns. Returns the specified auto-follow pattern {ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- -client.ccr.getAutoFollowPattern(...) +client.ccr.getAutoFollowPattern({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. + [discrete] ==== pause_auto_follow_pattern Pauses an auto-follow pattern @@ -844,9 +1927,15 @@ Pauses an auto-follow pattern {ref}/ccr-pause-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- -client.ccr.pauseAutoFollowPattern(...) +client.ccr.pauseAutoFollowPattern({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the auto follow pattern that should pause discovering new indices to follow. + [discrete] ==== pause_follow Pauses a follower index. The follower index will not fetch any additional operations from the leader index. @@ -854,9 +1943,15 @@ Pauses a follower index. The follower index will not fetch any additional operat {ref}/ccr-post-pause-follow.html[Endpoint documentation] [source,ts] ---- -client.ccr.pauseFollow(...) +client.ccr.pauseFollow({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follower index that should pause following its leader index. + [discrete] ==== put_auto_follow_pattern Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. @@ -864,9 +1959,30 @@ Creates a new named collection of auto-follow patterns against a specified remot {ref}/ccr-put-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- -client.ccr.putAutoFollowPattern(...) +client.ccr.putAutoFollowPattern({ name, remote_cluster }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the collection of auto-follow patterns. +** *`remote_cluster` (string)*: The remote cluster containing the leader indices to match against. +** *`follow_index_pattern` (Optional, string)*: The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. +** *`leader_index_patterns` (Optional, string[])*: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. +** *`leader_index_exclusion_patterns` (Optional, string[])*: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. +** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. +** *`settings` (Optional, Record)*: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). +** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. +** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. +** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. +** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. +** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. +** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. +** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. +** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. + [discrete] ==== resume_auto_follow_pattern Resumes an auto-follow pattern that has been paused @@ -874,9 +1990,15 @@ Resumes an auto-follow pattern that has been paused {ref}/ccr-resume-auto-follow-pattern.html[Endpoint documentation] [source,ts] ---- -client.ccr.resumeAutoFollowPattern(...) +client.ccr.resumeAutoFollowPattern({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the auto follow pattern to resume discovering new indices to follow. + [discrete] ==== resume_follow Resumes a follower index that has been paused @@ -884,9 +2006,25 @@ Resumes a follower index that has been paused {ref}/ccr-post-resume-follow.html[Endpoint documentation] [source,ts] ---- -client.ccr.resumeFollow(...) +client.ccr.resumeFollow({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follow index to resume following. +** *`max_outstanding_read_requests` (Optional, number)* +** *`max_outstanding_write_requests` (Optional, number)* +** *`max_read_request_operation_count` (Optional, number)* +** *`max_read_request_size` (Optional, string)* +** *`max_retry_delay` (Optional, string | -1 | 0)* +** *`max_write_buffer_count` (Optional, number)* +** *`max_write_buffer_size` (Optional, string)* +** *`max_write_request_operation_count` (Optional, number)* +** *`max_write_request_size` (Optional, string)* +** *`read_poll_timeout` (Optional, string | -1 | 0)* + [discrete] ==== stats Gets all stats related to cross-cluster replication. @@ -894,9 +2032,14 @@ Gets all stats related to cross-cluster replication. {ref}/ccr-get-stats.html[Endpoint documentation] [source,ts] ---- -client.ccr.stats(...) +client.ccr.stats() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== unfollow Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. @@ -904,9 +2047,15 @@ Stops the following task associated with a follower index and removes index meta {ref}/ccr-post-unfollow.html[Endpoint documentation] [source,ts] ---- -client.ccr.unfollow(...) +client.ccr.unfollow({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follower index that should be turned into a regular index. + [discrete] === cluster [discrete] @@ -916,9 +2065,20 @@ Provides explanations for shard allocations in the cluster. {ref}/cluster-allocation-explain.html[Endpoint documentation] [source,ts] ---- -client.cluster.allocationExplain(...) +client.cluster.allocationExplain({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`include_disk_info` (Optional, boolean)*: If true, returns information about disk usage and shard sizes. +** *`include_yes_decisions` (Optional, boolean)*: If true, returns YES decisions in explanation. +** *`current_node` (Optional, string)*: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. +** *`index` (Optional, string)*: Specifies the name of the index that you would like an explanation for. +** *`primary` (Optional, boolean)*: If true, returns explanation for the primary shard for the given shard ID. +** *`shard` (Optional, number)*: Specifies the ID of the shard that you would like an explanation for. + [discrete] ==== delete_component_template Deletes a component template @@ -926,9 +2086,17 @@ Deletes a component template {ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- -client.cluster.deleteComponentTemplate(...) +client.cluster.deleteComponentTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List or wildcard expression of component template names used to limit the request. +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== delete_voting_config_exclusions Clears cluster voting config exclusions. @@ -936,9 +2104,20 @@ Clears cluster voting config exclusions. {ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] ---- -client.cluster.deleteVotingConfigExclusions(...) +client.cluster.deleteVotingConfigExclusions({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`wait_for_removal` (Optional, boolean)*: Specifies whether to wait for all excluded nodes to be removed from the +cluster before clearing the voting configuration exclusions list. +Defaults to true, meaning that all excluded nodes must be removed from +the cluster before this API takes any action. If set to false then the +voting configuration exclusions list is cleared even if some excluded +nodes are still in the cluster. + [discrete] ==== exists_component_template Returns information about whether a particular component template exist @@ -946,9 +2125,21 @@ Returns information about whether a particular component template exist {ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- -client.cluster.existsComponentTemplate(...) +client.cluster.existsComponentTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of component template names used to limit the request. +Wildcard (*) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. +Defaults to false, which means information is retrieved from the master node. + [discrete] ==== get_component_template Returns one or more component templates @@ -956,9 +2147,19 @@ Returns one or more component templates {ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- -client.cluster.getComponentTemplate(...) +client.cluster.getComponentTemplate({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: The comma separated names of the component templates +** *`flat_settings` (Optional, boolean)* +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`include_defaults` (Optional, boolean)*: Return all default configurations for the component template (default: false) + [discrete] ==== get_settings Returns cluster settings. @@ -966,9 +2167,18 @@ Returns cluster settings. {ref}/cluster-get-settings.html[Endpoint documentation] [source,ts] ---- -client.cluster.getSettings(...) +client.cluster.getSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`include_defaults` (Optional, boolean)*: Whether to return all default clusters setting. +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== health Returns basic information about the health of the cluster. @@ -976,9 +2186,42 @@ Returns basic information about the health of the cluster. {ref}/cluster-health.html[Endpoint documentation] [source,ts] ---- -client.cluster.health(...) +client.cluster.health({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or *. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Can be one of cluster, indices or shards. Controls the details level of the health information returned. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. +** *`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))*: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. +** *`wait_for_nodes` (Optional, string | number)*: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. + +[discrete] +==== info +Returns different information about the cluster. + +{ref}/cluster-info.html[Endpoint documentation] +[source,ts] +---- +client.cluster.info({ target }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])*: Limits the information returned to the specific target. Supports a list, such as http,ingest. + [discrete] ==== pending_tasks Returns a list of any cluster-level changes (e.g. create index, update mapping, @@ -987,9 +2230,18 @@ allocate or fail shard) which have not yet been executed. {ref}/cluster-pending.html[Endpoint documentation] [source,ts] ---- -client.cluster.pendingTasks(...) +client.cluster.pendingTasks({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== post_voting_config_exclusions Updates the cluster voting config exclusions by node ids or node names. @@ -997,9 +2249,22 @@ Updates the cluster voting config exclusions by node ids or node names. {ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] ---- -client.cluster.postVotingConfigExclusions(...) +client.cluster.postVotingConfigExclusions({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_names` (Optional, string | string[])*: A list of the names of the nodes to exclude from the +voting configuration. If specified, you may not also specify node_ids. +** *`node_ids` (Optional, string | string[])*: A list of the persistent ids of the nodes to exclude +from the voting configuration. If specified, you may not also specify node_names. +** *`timeout` (Optional, string | -1 | 0)*: When adding a voting configuration exclusion, the API waits for the +specified nodes to be excluded from the voting configuration before +returning. If the timeout expires before the appropriate condition +is satisfied, the request fails and returns an error. + [discrete] ==== put_component_template Creates or updates a component template @@ -1007,9 +2272,26 @@ Creates or updates a component template {ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- -client.cluster.putComponentTemplate(...) +client.cluster.putComponentTemplate({ name, template }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the template +** *`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })*: The template to be applied which includes mappings, settings, or aliases configuration. +** *`create` (Optional, boolean)*: Whether the index template should only be added if new or can also replace an existing one +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`version` (Optional, number)*: Version number used to manage component templates externally. +This number isn't automatically generated or incremented by Elasticsearch. +** *`_meta` (Optional, Record)*: Optional user metadata about the component template. +May have any contents. This map is not automatically generated by Elasticsearch. +** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that +template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false` then data streams matching the template must always be explicitly created. + [discrete] ==== put_settings Updates the cluster settings. @@ -1017,9 +2299,19 @@ Updates the cluster settings. {ref}/cluster-update-settings.html[Endpoint documentation] [source,ts] ---- -client.cluster.putSettings(...) +client.cluster.putSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`persistent` (Optional, Record)* +** *`transient` (Optional, Record)* + [discrete] ==== remote_info Returns the information about configured remote clusters. @@ -1027,9 +2319,14 @@ Returns the information about configured remote clusters. {ref}/cluster-remote-info.html[Endpoint documentation] [source,ts] ---- -client.cluster.remoteInfo(...) +client.cluster.remoteInfo() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== reroute Allows to manually change the allocation of individual shards in the cluster. @@ -1037,9 +2334,21 @@ Allows to manually change the allocation of individual shards in the cluster. {ref}/cluster-reroute.html[Endpoint documentation] [source,ts] ---- -client.cluster.reroute(...) +client.cluster.reroute({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation only and returns the resulting state. +** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot be executed. +** *`metric` (Optional, string | string[])*: Limits the information returned to the specified metrics. +** *`retry_failed` (Optional, boolean)*: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. + [discrete] ==== state Returns a comprehensive information about the state of the cluster. @@ -1047,9 +2356,24 @@ Returns a comprehensive information about the state of the cluster. {ref}/cluster-state.html[Endpoint documentation] [source,ts] ---- -client.cluster.state(...) +client.cluster.state({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`wait_for_metadata_version` (Optional, number)*: Wait for the metadata version to be equal or greater than the specified metadata version +** *`wait_for_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for wait_for_metadata_version before timing out + [discrete] ==== stats Returns high-level overview of cluster statistics. @@ -1057,9 +2381,17 @@ Returns high-level overview of cluster statistics. {ref}/cluster-stats.html[Endpoint documentation] [source,ts] ---- -client.cluster.stats(...) +client.cluster.stats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node filters used to limit returned information. Defaults to all nodes in the cluster. +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s _nodes.failed property. Defaults to no timeout. + [discrete] === dangling_indices [discrete] @@ -1069,9 +2401,18 @@ Deletes the specified dangling index {ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- -client.danglingIndices.deleteDanglingIndex(...) +client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index_uuid` (string)*: The UUID of the dangling index +** *`accept_data_loss` (boolean)*: Must be set to true in order to delete the dangling index +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== import_dangling_index Imports the specified dangling index @@ -1079,9 +2420,18 @@ Imports the specified dangling index {ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- -client.danglingIndices.importDanglingIndex(...) +client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index_uuid` (string)*: The UUID of the dangling index +** *`accept_data_loss` (boolean)*: Must be set to true in order to import the dangling index +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== list_dangling_indices Returns all dangling indices. @@ -1089,9 +2439,14 @@ Returns all dangling indices. {ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] ---- -client.danglingIndices.listDanglingIndices(...) +client.danglingIndices.listDanglingIndices() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === enrich [discrete] @@ -1101,9 +2456,15 @@ Deletes an existing enrich policy and its enrich index. {ref}/delete-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- -client.enrich.deletePolicy(...) +client.enrich.deletePolicy({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the enrich policy + [discrete] ==== execute_policy Creates the enrich index for an existing enrich policy. @@ -1111,9 +2472,16 @@ Creates the enrich index for an existing enrich policy. {ref}/execute-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- -client.enrich.executePolicy(...) +client.enrich.executePolicy({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the enrich policy +** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the execution is complete. + [discrete] ==== get_policy Gets information about an enrich policy. @@ -1121,9 +2489,15 @@ Gets information about an enrich policy. {ref}/get-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- -client.enrich.getPolicy(...) +client.enrich.getPolicy({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: A list of enrich policy names + [discrete] ==== put_policy Creates a new enrich policy. @@ -1131,9 +2505,18 @@ Creates a new enrich policy. {ref}/put-enrich-policy-api.html[Endpoint documentation] [source,ts] ---- -client.enrich.putPolicy(...) +client.enrich.putPolicy({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the enrich policy +** *`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })* +** *`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })* +** *`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })* + [discrete] ==== stats Gets enrich coordinator statistics and information about enrich policies that are currently executing. @@ -1141,9 +2524,14 @@ Gets enrich coordinator statistics and information about enrich policies that ar {ref}/enrich-stats-api.html[Endpoint documentation] [source,ts] ---- -client.enrich.stats(...) +client.enrich.stats() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === eql [discrete] @@ -1153,9 +2541,15 @@ Deletes an async EQL search by ID. If the search is still running, the search re {ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- -client.eql.delete(...) +client.eql.delete({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the search to delete. + [discrete] ==== get Returns async results from previously executed Event Query Language (EQL) search @@ -1163,9 +2557,17 @@ Returns async results from previously executed Event Query Language (EQL) search {ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- -client.eql.get(...) +client.eql.get({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the search. +** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. + [discrete] ==== get_status Returns the status of a previously submitted async or stored Event Query Language (EQL) search @@ -1173,9 +2575,15 @@ Returns the status of a previously submitted async or stored Event Query Languag {ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- -client.eql.getStatus(...) +client.eql.getStatus({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the search. + [discrete] ==== search Returns results matching a query expressed in Event Query Language (EQL) @@ -1183,9 +2591,32 @@ Returns results matching a query expressed in Event Query Language (EQL) {ref}/eql-search-api.html[Endpoint documentation] [source,ts] ---- -client.eql.search(...) +client.eql.search({ index, query }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The name of the index to scope the operation +** *`query` (string)*: EQL query you wish to run. +** *`allow_no_indices` (Optional, boolean)* +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. +** *`keep_on_completion` (Optional, boolean)*: If true, the search and its results are stored on the cluster. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. +** *`case_sensitive` (Optional, boolean)* +** *`event_category_field` (Optional, string)*: Field containing the event classification, such as process, file, or network. +** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order +** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" +** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 +** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. +** *`result_position` (Optional, Enum("tail" | "head"))* +** *`runtime_mappings` (Optional, Record)* + [discrete] === features [discrete] @@ -1195,9 +2626,14 @@ Gets a list of features which can be included in snapshots using the feature_sta {ref}/get-features-api.html[Endpoint documentation] [source,ts] ---- -client.features.getFeatures(...) +client.features.getFeatures() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== reset_features Resets the internal state of features, usually by deleting system indices @@ -1205,9 +2641,14 @@ Resets the internal state of features, usually by deleting system indices {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.features.resetFeatures(...) +client.features.resetFeatures() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === fleet [discrete] @@ -1217,24 +2658,136 @@ Returns the current global checkpoints for an index. This API is design for inte {ref}/get-global-checkpoints.html[Endpoint documentation] [source,ts] ---- -client.fleet.globalCheckpoints(...) +client.fleet.globalCheckpoints({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string)*: A single index or index alias that resolves to a single index. +** *`wait_for_advance` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the global checkpoints +to advance past the provided `checkpoints`. +** *`wait_for_index` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the target index to exist +and all primary shards be active. Can only be true when `wait_for_advance` is true. +** *`checkpoints` (Optional, number[])*: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, +the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list +will cause Elasticsearch to immediately return the current global checkpoints. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a global checkpoints to advance past `checkpoints`. + [discrete] ==== msearch Multi Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. [source,ts] ---- -client.fleet.msearch(...) +client.fleet.msearch({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. +** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. +** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. +** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. +** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns +an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` +which is true by default. + [discrete] ==== search Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. [source,ts] ---- -client.fleet.search(...) ----- +client.fleet.search({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. +** *`allow_no_indices` (Optional, boolean)* +** *`analyzer` (Optional, string)* +** *`analyze_wildcard` (Optional, boolean)* +** *`batched_reduce_size` (Optional, number)* +** *`ccs_minimize_roundtrips` (Optional, boolean)* +** *`default_operator` (Optional, Enum("and" | "or"))* +** *`df` (Optional, string)* +** *`docvalue_fields` (Optional, string | string[])* +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* +** *`explain` (Optional, boolean)* +** *`ignore_throttled` (Optional, boolean)* +** *`ignore_unavailable` (Optional, boolean)* +** *`lenient` (Optional, boolean)* +** *`max_concurrent_shard_requests` (Optional, number)* +** *`min_compatible_shard_node` (Optional, string)* +** *`preference` (Optional, string)* +** *`pre_filter_shard_size` (Optional, number)* +** *`request_cache` (Optional, boolean)* +** *`routing` (Optional, string)* +** *`scroll` (Optional, string | -1 | 0)* +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))* +** *`stats` (Optional, string[])* +** *`stored_fields` (Optional, string | string[])* +** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))* +** *`suggest_size` (Optional, number)* +** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. +** *`terminate_after` (Optional, number)* +** *`timeout` (Optional, string | -1 | 0)* +** *`track_total_hits` (Optional, boolean | number)* +** *`track_scores` (Optional, boolean)* +** *`typed_keys` (Optional, boolean)* +** *`rest_total_hits_as_int` (Optional, boolean)* +** *`version` (Optional, boolean)* +** *`_source` (Optional, boolean | string | string[])* +** *`_source_excludes` (Optional, string | string[])* +** *`_source_includes` (Optional, string | string[])* +** *`seq_no_primary_term` (Optional, boolean)* +** *`q` (Optional, string)* +** *`size` (Optional, number)* +** *`from` (Optional, number)* +** *`sort` (Optional, string | string[])* +** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns +an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` +which is true by default. +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`highlight` (Optional, { encoder, fields })* +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`slice` (Optional, { field, id, max })* +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. [discrete] === graph @@ -1245,9 +2798,21 @@ Explore extracted and summarized information about the documents and terms in an {ref}/graph-explore-api.html[Endpoint documentation] [source,ts] ---- -client.graph.explore(...) +client.graph.explore({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`routing` (Optional, string)*: Specific routing value +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`connections` (Optional, { connections, query, vertices })* +** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])* + [discrete] === ilm [discrete] @@ -1257,9 +2822,17 @@ Deletes the specified lifecycle policy definition. A currently used policy canno {ref}/ilm-delete-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.ilm.deleteLifecycle(...) +client.ilm.deleteLifecycle({ policy }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`policy` (string)*: Identifier for the policy. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== explain_lifecycle Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. @@ -1267,9 +2840,20 @@ Retrieves information about the index's current lifecycle state, such as the cur {ref}/ilm-explain-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.ilm.explainLifecycle(...) +client.ilm.explainLifecycle({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: List of data streams, indices, and aliases to target. Supports wildcards (`*`). +To target all data streams and indices, use `*` or `_all`. +** *`only_errors` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. +** *`only_managed` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== get_lifecycle Returns the specified policy definition. Includes the policy version and last modified date. @@ -1277,9 +2861,17 @@ Returns the specified policy definition. Includes the policy version and last mo {ref}/ilm-get-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.ilm.getLifecycle(...) +client.ilm.getLifecycle({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`policy` (Optional, string)*: Identifier for the policy. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== get_status Retrieves the current index lifecycle management (ILM) status. @@ -1287,9 +2879,14 @@ Retrieves the current index lifecycle management (ILM) status. {ref}/ilm-get-status.html[Endpoint documentation] [source,ts] ---- -client.ilm.getStatus(...) +client.ilm.getStatus() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== migrate_to_data_tiers Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing @@ -1297,9 +2894,18 @@ Migrates the indices and ILM policies away from custom node attribute allocation {ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] [source,ts] ---- -client.ilm.migrateToDataTiers(...) +client.ilm.migrateToDataTiers({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`dry_run` (Optional, boolean)*: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. +This provides a way to retrieve the indices and ILM policies that need to be migrated. +** *`legacy_template_to_delete` (Optional, string)* +** *`node_attribute` (Optional, string)* + [discrete] ==== move_to_step Manually moves an index into the specified step and executes that step. @@ -1307,9 +2913,17 @@ Manually moves an index into the specified step and executes that step. {ref}/ilm-move-to-step.html[Endpoint documentation] [source,ts] ---- -client.ilm.moveToStep(...) +client.ilm.moveToStep({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index whose lifecycle step is to change +** *`current_step` (Optional, { action, name, phase })* +** *`next_step` (Optional, { action, name, phase })* + [discrete] ==== put_lifecycle Creates a lifecycle policy @@ -1317,9 +2931,17 @@ Creates a lifecycle policy {ref}/ilm-put-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.ilm.putLifecycle(...) +client.ilm.putLifecycle({ policy }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`policy` (string)*: Identifier for the policy. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== remove_policy Removes the assigned lifecycle policy and stops managing the specified index @@ -1327,9 +2949,15 @@ Removes the assigned lifecycle policy and stops managing the specified index {ref}/ilm-remove-policy.html[Endpoint documentation] [source,ts] ---- -client.ilm.removePolicy(...) +client.ilm.removePolicy({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index to remove policy on + [discrete] ==== retry Retries executing the policy for an index that is in the ERROR step. @@ -1337,9 +2965,15 @@ Retries executing the policy for an index that is in the ERROR step. {ref}/ilm-retry-policy.html[Endpoint documentation] [source,ts] ---- -client.ilm.retry(...) +client.ilm.retry({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the indices (comma-separated) whose failed lifecycle step is to be retry + [discrete] ==== start Start the index lifecycle management (ILM) plugin. @@ -1347,9 +2981,16 @@ Start the index lifecycle management (ILM) plugin. {ref}/ilm-start.html[Endpoint documentation] [source,ts] ---- -client.ilm.start(...) +client.ilm.start({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)* +** *`timeout` (Optional, string | -1 | 0)* + [discrete] ==== stop Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin @@ -1357,9 +2998,16 @@ Halts all lifecycle management operations and stops the index lifecycle manageme {ref}/ilm-stop.html[Endpoint documentation] [source,ts] ---- -client.ilm.stop(...) +client.ilm.stop({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)* +** *`timeout` (Optional, string | -1 | 0)* + [discrete] === indices [discrete] @@ -1369,9 +3017,21 @@ Adds a block to an index. {ref}/index-modules-blocks.html[Endpoint documentation] [source,ts] ---- -client.indices.addBlock(...) +client.indices.addBlock({ index, block }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: A comma separated list of indices to add a block to +** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block to add (one of read, write, read_only or metadata) +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== analyze Performs the analysis process on a text and return the tokens breakdown of the text. @@ -1379,9 +3039,24 @@ Performs the analysis process on a text and return the tokens breakdown of the t {ref}/indices-analyze.html[Endpoint documentation] [source,ts] ---- -client.indices.analyze(...) +client.indices.analyze({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: The name of the index to scope the operation +** *`analyzer` (Optional, string)* +** *`attributes` (Optional, string[])* +** *`char_filter` (Optional, string | { type } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])* +** *`explain` (Optional, boolean)* +** *`field` (Optional, string)* +** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])* +** *`normalizer` (Optional, string)* +** *`text` (Optional, string | string[])* +** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, max_token_length } | { type, max_token_length } | { type, max_token_length } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, flags, group, pattern } | { type, rule_files })* + [discrete] ==== clear_cache Clears all or specific caches for one or more indices. @@ -1389,9 +3064,22 @@ Clears all or specific caches for one or more indices. {ref}/indices-clearcache.html[Endpoint documentation] [source,ts] ---- -client.indices.clearCache(...) +client.indices.clearCache({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index name to limit the operation +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`fielddata` (Optional, boolean)*: Clear field data +** *`fields` (Optional, string | string[])*: A list of fields to clear when using the `fielddata` parameter (default: all) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`query` (Optional, boolean)*: Clear query caches +** *`request` (Optional, boolean)*: Clear request cache + [discrete] ==== clone Clones an index @@ -1399,9 +3087,21 @@ Clones an index {ref}/indices-clone-index.html[Endpoint documentation] [source,ts] ---- -client.indices.clone(...) +client.indices.clone({ index, target }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the source index to clone +** *`target` (string)*: The name of the target index to clone into +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the cloned index before the operation returns. +** *`aliases` (Optional, Record)* +** *`settings` (Optional, Record)* + [discrete] ==== close Closes an index. @@ -1409,9 +3109,21 @@ Closes an index. {ref}/indices-open-close.html[Endpoint documentation] [source,ts] ---- -client.indices.close(...) +client.indices.close({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A comma separated list of indices to close +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of active shards to wait for before the operation returns. + [discrete] ==== create Creates an index with optional settings and mappings. @@ -1419,9 +3131,24 @@ Creates an index with optional settings and mappings. {ref}/indices-create-index.html[Endpoint documentation] [source,ts] ---- -client.indices.create(...) +client.indices.create({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for before the operation returns. +** *`aliases` (Optional, Record)* +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: +- Field names +- Field data types +- Mapping parameters +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* + [discrete] ==== create_data_stream Creates a data stream @@ -1429,9 +3156,15 @@ Creates a data stream {ref}/data-streams.html[Endpoint documentation] [source,ts] ---- -client.indices.createDataStream(...) +client.indices.createDataStream({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the data stream + [discrete] ==== data_streams_stats Provides statistics on operations happening in a data stream. @@ -1439,9 +3172,16 @@ Provides statistics on operations happening in a data stream. {ref}/data-streams.html[Endpoint documentation] [source,ts] ---- -client.indices.dataStreamsStats(...) +client.indices.dataStreamsStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: A list of data stream names; use `_all` or empty string to perform the operation on all data streams +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* + [discrete] ==== delete Deletes an index. @@ -1449,9 +3189,20 @@ Deletes an index. {ref}/indices-delete-index.html[Endpoint documentation] [source,ts] ---- -client.indices.delete(...) +client.indices.delete({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of indices to delete; use `_all` or `*` string to delete all indices +** *`allow_no_indices` (Optional, boolean)*: Ignore if a wildcard expression resolves to no concrete indices (default: false) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open, closed, or hidden indices +** *`ignore_unavailable` (Optional, boolean)*: Ignore unavailable indexes (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== delete_alias Deletes an alias. @@ -1459,9 +3210,18 @@ Deletes an alias. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- -client.indices.deleteAlias(...) +client.indices.deleteAlias({ index, name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names (supports wildcards); use `_all` for all indices +** *`name` (string | string[])*: A list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document + [discrete] ==== delete_data_lifecycle Deletes the data lifecycle of the selected data streams. @@ -1469,9 +3229,18 @@ Deletes the data lifecycle of the selected data streams. {ref}/dlm-delete-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.indices.deleteDataLifecycle(...) +client.indices.deleteDataLifecycle({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of data streams of which the data lifecycle will be deleted; use `*` to get all data streams +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document + [discrete] ==== delete_data_stream Deletes a data stream. @@ -1479,9 +3248,16 @@ Deletes a data stream. {ref}/data-streams.html[Endpoint documentation] [source,ts] ---- -client.indices.deleteDataStream(...) +client.indices.deleteDataStream({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of data streams to delete; use `*` to delete all data streams +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) + [discrete] ==== delete_index_template Deletes an index template. @@ -1489,9 +3265,17 @@ Deletes an index template. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.deleteIndexTemplate(...) +client.indices.deleteIndexTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of index template names used to limit the request. Wildcard (*) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== delete_template Deletes an index template. @@ -1499,9 +3283,17 @@ Deletes an index template. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.deleteTemplate(...) +client.indices.deleteTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the template +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== disk_usage Analyzes the disk usage of each field of an index or data stream @@ -1509,9 +3301,20 @@ Analyzes the disk usage of each field of an index or data stream {ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] ---- -client.indices.diskUsage(...) +client.indices.diskUsage({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as open,hidden. +** *`flush` (Optional, boolean)*: If true, the API performs a flush before analysis. If false, the response may not include uncommitted data. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`run_expensive_tasks` (Optional, boolean)*: Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to true. + [discrete] ==== downsample Downsample an index @@ -1519,9 +3322,16 @@ Downsample an index {ref}/xpack-rollup.html[Endpoint documentation] [source,ts] ---- -client.indices.downsample(...) +client.indices.downsample({ index, target_index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The index to downsample +** *`target_index` (string)*: The name of the target index to store downsampled data + [discrete] ==== exists Returns information about whether a particular index exists. @@ -1529,9 +3339,21 @@ Returns information about whether a particular index exists. {ref}/indices-exists.html[Endpoint documentation] [source,ts] ---- -client.indices.exists(...) +client.indices.exists({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names +** *`allow_no_indices` (Optional, boolean)*: Ignore if a wildcard expression resolves to no concrete indices (default: false) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`ignore_unavailable` (Optional, boolean)*: Ignore unavailable indexes (default: false) +** *`include_defaults` (Optional, boolean)*: Whether to return all default setting for each of the indices. +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) + [discrete] ==== exists_alias Returns information about whether a particular alias exists. @@ -1539,9 +3361,20 @@ Returns information about whether a particular alias exists. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- -client.indices.existsAlias(...) +client.indices.existsAlias({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of alias names to return +** *`index` (Optional, string | string[])*: A list of index names to filter aliases +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) + [discrete] ==== exists_index_template Returns information about whether a particular index template exists. @@ -1549,9 +3382,16 @@ Returns information about whether a particular index template exists. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.existsIndexTemplate(...) +client.indices.existsIndexTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== exists_template Returns information about whether a particular index template exists. @@ -1559,9 +3399,18 @@ Returns information about whether a particular index template exists. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.existsTemplate(...) +client.indices.existsTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: The comma separated names of the index templates +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node + [discrete] ==== explain_data_lifecycle Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc. @@ -1569,9 +3418,17 @@ Retrieves information about the index's current DLM lifecycle, such as any poten {ref}/dlm-explain-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.indices.explainDataLifecycle(...) +client.indices.explainDataLifecycle({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The name of the index to explain +** *`include_defaults` (Optional, boolean)*: indicates if the API should return the default values the system uses for the index's lifecycle +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master + [discrete] ==== field_usage_stats Returns the field usage stats for each field of an index @@ -1579,9 +3436,30 @@ Returns the field usage stats for each field of an index {ref}/field-usage-stats.html[Endpoint documentation] [source,ts] ---- -client.indices.fieldUsageStats(...) +client.indices.fieldUsageStats({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets +only missing or closed indices. This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index +starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as `open,hidden`. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, +the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails +and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to all or any +positive integer up to the total number of shards in the index (`number_of_replicas+1`). + [discrete] ==== flush Performs the flush operation on one or more indices. @@ -1589,9 +3467,20 @@ Performs the flush operation on one or more indices. {ref}/indices-flush.html[Endpoint documentation] [source,ts] ---- -client.indices.flush(...) +client.indices.flush({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string for all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`force` (Optional, boolean)*: Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`wait_if_ongoing` (Optional, boolean)*: If set to true the flush operation will block until the flush can be executed if another flush operation is already executing. The default is true. If set to false the flush will be skipped iff if another flush operation is already running. + [discrete] ==== forcemerge Performs the force merge operation on one or more indices. @@ -1599,9 +3488,22 @@ Performs the force merge operation on one or more indices. {ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] ---- -client.indices.forcemerge(...) +client.indices.forcemerge({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`flush` (Optional, boolean)*: Specify whether the index should be flushed after performing the operation (default: true) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`max_num_segments` (Optional, number)*: The number of segments the index should be merged into (default: dynamic) +** *`only_expunge_deletes` (Optional, boolean)*: Specify whether the operation should only expunge deleted documents +** *`wait_for_completion` (Optional, boolean)*: Should the request wait until the force merge is completed. + [discrete] ==== get Returns information about one or more indices. @@ -1609,9 +3511,28 @@ Returns information about one or more indices. {ref}/indices-get-index.html[Endpoint documentation] [source,ts] ---- -client.indices.get(...) +client.indices.get({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, and index aliases used to limit the request. +Wildcard expressions (*) are supported. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If false, requests that target a missing index return an error. +** *`include_defaults` (Optional, boolean)*: If true, return all default settings in the response. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`features` (Optional, { name, description } | { name, description }[])*: Return only information on specified index features + [discrete] ==== get_alias Returns an alias. @@ -1619,9 +3540,20 @@ Returns an alias. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- -client.indices.getAlias(...) +client.indices.getAlias({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: A list of alias names to return +** *`index` (Optional, string | string[])*: A list of index names to filter aliases +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) + [discrete] ==== get_data_lifecycle Returns the data lifecycle of the selected data streams. @@ -1629,9 +3561,17 @@ Returns the data lifecycle of the selected data streams. {ref}/dlm-get-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.indices.getDataLifecycle(...) +client.indices.getDataLifecycle({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of data streams to get; use `*` to get all data streams +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`include_defaults` (Optional, boolean)*: Return all relevant default configurations for the data stream (default: false) + [discrete] ==== get_data_stream Returns data streams. @@ -1639,9 +3579,17 @@ Returns data streams. {ref}/data-streams.html[Endpoint documentation] [source,ts] ---- -client.indices.getDataStream(...) +client.indices.getDataStream({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: A list of data streams to get; use `*` to get all data streams +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. + [discrete] ==== get_field_mapping Returns mapping for one or more fields. @@ -1649,9 +3597,21 @@ Returns mapping for one or more fields. {ref}/indices-get-field-mapping.html[Endpoint documentation] [source,ts] ---- -client.indices.getFieldMapping(...) +client.indices.getFieldMapping({ fields }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`fields` (string | string[])*: A list of fields +** *`index` (Optional, string | string[])*: A list of index names +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`include_defaults` (Optional, boolean)*: Whether the default mapping values should be returned as well +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) + [discrete] ==== get_index_template Returns an index template. @@ -1659,9 +3619,19 @@ Returns an index template. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.getIndexTemplate(...) +client.indices.getIndexTemplate({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. + [discrete] ==== get_mapping Returns mappings for one or more indices. @@ -1669,9 +3639,20 @@ Returns mappings for one or more indices. {ref}/indices-get-mapping.html[Endpoint documentation] [source,ts] ---- -client.indices.getMapping(...) +client.indices.getMapping({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master + [discrete] ==== get_settings Returns settings for one or more indices. @@ -1679,9 +3660,23 @@ Returns settings for one or more indices. {ref}/indices-get-settings.html[Endpoint documentation] [source,ts] ---- -client.indices.getSettings(...) +client.indices.getSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`name` (Optional, string | string[])*: The name of the settings that should be included +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`include_defaults` (Optional, boolean)*: Whether to return all default setting for each of the indices. +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master + [discrete] ==== get_template Returns an index template. @@ -1689,9 +3684,18 @@ Returns an index template. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.getTemplate(...) +client.indices.getTemplate({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: The comma separated names of the index templates +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node + [discrete] ==== migrate_to_data_stream Migrates an alias to a data stream @@ -1699,9 +3703,15 @@ Migrates an alias to a data stream {ref}/data-streams.html[Endpoint documentation] [source,ts] ---- -client.indices.migrateToDataStream(...) +client.indices.migrateToDataStream({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the alias to migrate + [discrete] ==== modify_data_stream Modifies a data stream @@ -1709,9 +3719,15 @@ Modifies a data stream {ref}/data-streams.html[Endpoint documentation] [source,ts] ---- -client.indices.modifyDataStream(...) +client.indices.modifyDataStream({ actions }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`actions` ({ add_backing_index, remove_backing_index }[])*: Actions to perform. + [discrete] ==== open Opens an index. @@ -1719,9 +3735,21 @@ Opens an index. {ref}/indices-open-close.html[Endpoint documentation] [source,ts] ---- -client.indices.open(...) +client.indices.open({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A comma separated list of indices to open +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of active shards to wait for before the operation returns. + [discrete] ==== promote_data_stream Promotes a data stream from a replicated data stream managed by CCR to a regular data stream @@ -1729,9 +3757,15 @@ Promotes a data stream from a replicated data stream managed by CCR to a regular {ref}/data-streams.html[Endpoint documentation] [source,ts] ---- -client.indices.promoteDataStream(...) +client.indices.promoteDataStream({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the data stream + [discrete] ==== put_alias Creates or updates an alias. @@ -1739,9 +3773,23 @@ Creates or updates an alias. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- -client.indices.putAlias(...) +client.indices.putAlias({ index, name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. +** *`name` (string)*: The name of the alias to be created or updated +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`index_routing` (Optional, string)* +** *`is_write_index` (Optional, boolean)* +** *`routing` (Optional, string)* +** *`search_routing` (Optional, string)* + [discrete] ==== put_data_lifecycle Updates the data lifecycle of the selected data streams. @@ -1749,9 +3797,19 @@ Updates the data lifecycle of the selected data streams. {ref}/dlm-put-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.indices.putDataLifecycle(...) +client.indices.putDataLifecycle({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of data streams whose lifecycle will be updated; use `*` to set the lifecycle to all data streams +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document +** *`data_retention` (Optional, string | -1 | 0)* + [discrete] ==== put_index_template Creates or updates an index template. @@ -1759,9 +3817,23 @@ Creates or updates an index template. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.putIndexTemplate(...) +client.indices.putIndexTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Index or template name +** *`create` (Optional, boolean)*: Whether the index template should only be added if new or can also replace an existing one +** *`index_patterns` (Optional, string | string[])* +** *`composed_of` (Optional, string[])* +** *`template` (Optional, { aliases, mappings, settings, lifecycle })* +** *`data_stream` (Optional, { hidden })* +** *`priority` (Optional, number)* +** *`version` (Optional, number)* +** *`_meta` (Optional, Record)* + [discrete] ==== put_mapping Updates the index mappings. @@ -1769,9 +3841,40 @@ Updates the index mappings. {ref}/indices-put-mapping.html[Endpoint documentation] [source,ts] ---- -client.indices.putMapping(...) +client.indices.putMapping({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`write_index_only` (Optional, boolean)*: When true, applies mappings only to the write index of an alias or data stream +** *`date_detection` (Optional, boolean)*: Controls whether dynamic date detection is enabled. +** *`dynamic` (Optional, Enum("strict" | "runtime" | true | false))*: Controls whether new fields are added dynamically. +** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked +against 'dynamic_date_formats' and if the value matches then +a new date field is added instead of string. +** *`dynamic_templates` (Optional, Record | Record[])*: Specify dynamic templates for the mapping. +** *`_field_names` (Optional, { enabled })*: Control whether field names are enabled for the index. +** *`_meta` (Optional, Record)*: A mapping type can have custom meta data associated with it. These are +not used at all by Elasticsearch, but can be used to store +application-specific metadata. +** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: + +- Field name +- Field data type +- Mapping parameters +** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents. +** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index. +** *`runtime` (Optional, Record)*: Mapping of runtime fields for the index. + [discrete] ==== put_settings Updates the index settings. @@ -1779,9 +3882,22 @@ Updates the index settings. {ref}/indices-update-settings.html[Endpoint documentation] [source,ts] ---- -client.indices.putSettings(...) +client.indices.putSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`preserve_existing` (Optional, boolean)*: Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false` +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== put_template Creates or updates an index template. @@ -1789,9 +3905,32 @@ Creates or updates an index template. {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.putTemplate(...) +client.indices.putTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the template +** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. +** *`flat_settings` (Optional, boolean)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)* +** *`order` (Optional, number)*: Order in which Elasticsearch applies this template if index +matches multiple templates. + +Templates with lower 'order' values are merged first. Templates with higher +'order' values are merged later, overriding templates with lower values. +** *`aliases` (Optional, Record)*: Aliases for the index. +** *`index_patterns` (Optional, string | string[])*: Array of wildcard expressions used to match the names +of indices during creation. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. +** *`settings` (Optional, Record)*: Configuration options for the index. +** *`version` (Optional, number)*: Version number used to manage index templates externally. This number +is not automatically generated by Elasticsearch. + [discrete] ==== recovery Returns information about ongoing index shard recoveries. @@ -1799,9 +3938,17 @@ Returns information about ongoing index shard recoveries. {ref}/indices-recovery.html[Endpoint documentation] [source,ts] ---- -client.indices.recovery(...) +client.indices.recovery({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`active_only` (Optional, boolean)*: Display only those recoveries that are currently on-going +** *`detailed` (Optional, boolean)*: Whether to display detailed information about shard recovery + [discrete] ==== refresh Performs the refresh operation in one or more indices. @@ -1809,9 +3956,18 @@ Performs the refresh operation in one or more indices. {ref}/indices-refresh.html[Endpoint documentation] [source,ts] ---- -client.indices.refresh(...) +client.indices.refresh({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) + [discrete] ==== reload_search_analyzers Reloads an index's search analyzers and their resources. @@ -1819,9 +3975,18 @@ Reloads an index's search analyzers and their resources. {ref}/indices-reload-analyzers.html[Endpoint documentation] [source,ts] ---- -client.indices.reloadSearchAnalyzers(...) +client.indices.reloadSearchAnalyzers({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to reload analyzers for +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) + [discrete] ==== resolve_index Returns information about any matching indices, aliases, and data streams @@ -1829,9 +3994,16 @@ Returns information about any matching indices, aliases, and data streams {ref}/indices-resolve-index-api.html[Endpoint documentation] [source,ts] ---- -client.indices.resolveIndex(...) +client.indices.resolveIndex({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of names or wildcard expressions +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) + [discrete] ==== rollover Updates an alias to point to a new index when the existing index @@ -1840,9 +4012,24 @@ is considered to be too large or too old. {ref}/indices-rollover-index.html[Endpoint documentation] [source,ts] ---- -client.indices.rollover(...) +client.indices.rollover({ alias }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`alias` (string)*: The name of the alias to rollover +** *`new_index` (Optional, string)*: The name of the rollover index +** *`dry_run` (Optional, boolean)*: If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the newly created rollover index before the operation returns. +** *`aliases` (Optional, Record)* +** *`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })* +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })* +** *`settings` (Optional, Record)* + [discrete] ==== segments Provides low-level information about segments in a Lucene index. @@ -1850,9 +4037,19 @@ Provides low-level information about segments in a Lucene index. {ref}/indices-segments.html[Endpoint documentation] [source,ts] ---- -client.indices.segments(...) +client.indices.segments({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`verbose` (Optional, boolean)*: Includes detailed memory usage by Lucene. + [discrete] ==== shard_stores Provides store information for shard copies of indices. @@ -1860,9 +4057,22 @@ Provides store information for shard copies of indices. {ref}/indices-shards-stores.html[Endpoint documentation] [source,ts] ---- -client.indices.shardStores(...) +client.indices.shardStores({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all +value targets only missing or closed indices. This behavior applies even if the request +targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, +this argument determines whether wildcard expressions match hidden data streams. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])*: List of shard health statuses used to limit the request. + [discrete] ==== shrink Allow to shrink an existing index into a new index with fewer primary shards. @@ -1870,9 +4080,21 @@ Allow to shrink an existing index into a new index with fewer primary shards. {ref}/indices-shrink-index.html[Endpoint documentation] [source,ts] ---- -client.indices.shrink(...) +client.indices.shrink({ index, target }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the source index to shrink +** *`target` (string)*: The name of the target index to shrink into +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the shrunken index before the operation returns. +** *`aliases` (Optional, Record)* +** *`settings` (Optional, Record)* + [discrete] ==== simulate_index_template Simulate matching the given index name against the index templates in the system @@ -1880,9 +4102,31 @@ Simulate matching the given index name against the index templates in the system {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.simulateIndexTemplate(...) +client.indices.simulateIndexTemplate({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Index or template name to simulate +** *`create` (Optional, boolean)*: If `true`, the template passed in the body is only used if no existing +templates match the same index patterns. If `false`, the simulation uses +the template with the highest priority. Note that the template is not +permanently added or updated in either case; it is only used for the +simulation. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received +before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. +** *`allow_auto_create` (Optional, boolean)* +** *`index_patterns` (Optional, string | string[])* +** *`composed_of` (Optional, string[])* +** *`template` (Optional, { aliases, mappings, settings, lifecycle })* +** *`data_stream` (Optional, { hidden })* +** *`priority` (Optional, number)* +** *`version` (Optional, number)* +** *`_meta` (Optional, Record)* + [discrete] ==== simulate_template Simulate resolving the given template name or body @@ -1890,19 +4134,41 @@ Simulate resolving the given template name or body {ref}/indices-templates.html[Endpoint documentation] [source,ts] ---- -client.indices.simulateTemplate(...) +client.indices.simulateTemplate({ ... }) ---- [discrete] -==== split +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit +this parameter and specify the template configuration in the request body. +** *`create` (Optional, boolean)*: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. + +[discrete] +==== split Allows you to split an existing index into a new index with more primary shards. {ref}/indices-split-index.html[Endpoint documentation] [source,ts] ---- -client.indices.split(...) +client.indices.split({ index, target }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the source index to split +** *`target` (string)*: The name of the target index to split into +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the shrunken index before the operation returns. +** *`aliases` (Optional, Record)* +** *`settings` (Optional, Record)* + [discrete] ==== stats Provides statistics on operations happening in an index. @@ -1910,9 +4176,27 @@ Provides statistics on operations happening in an index. {ref}/indices-stats.html[Endpoint documentation] [source,ts] ---- -client.indices.stats(...) +client.indices.stats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`metric` (Optional, string | string[])*: Limit the information returned the specific metrics. +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as `open,hidden`. +** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. +** *`forbid_closed_indices` (Optional, boolean)*: If true, statistics are not collected from closed indices. +** *`groups` (Optional, string | string[])*: List of search groups to include in the search statistics. +** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. + [discrete] ==== unfreeze Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. @@ -1920,9 +4204,21 @@ Unfreezes an index. When a frozen index is unfrozen, the index goes through the {ref}/unfreeze-index-api.html[Endpoint documentation] [source,ts] ---- -client.indices.unfreeze(...) +client.indices.unfreeze({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index to unfreeze +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, string)*: Sets the number of active shards to wait for before the operation returns. + [discrete] ==== update_aliases Updates index aliases. @@ -1930,9 +4226,17 @@ Updates index aliases. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- -client.indices.updateAliases(...) +client.indices.updateAliases({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Request timeout +** *`actions` (Optional, { add_backing_index, remove_backing_index }[])* + [discrete] ==== validate_query Allows a user to validate a potentially expensive query without executing it. @@ -1940,9 +4244,28 @@ Allows a user to validate a potentially expensive query without executing it. {ref}/search-validate.html[Endpoint documentation] [source,ts] ---- -client.indices.validateQuery(...) +client.indices.validateQuery({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`all_shards` (Optional, boolean)*: Execute validation on all shards instead of one random shard per index +** *`analyzer` (Optional, string)*: The analyzer to use for the query string +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`explain` (Optional, boolean)*: Return detailed information about the error +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`rewrite` (Optional, boolean)*: Provide a more detailed explanation showing the actual Lucene query that will be executed. +** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* + [discrete] === ingest [discrete] @@ -1952,9 +4275,17 @@ Deletes a pipeline. {ref}/delete-pipeline-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.deletePipeline(...) +client.ingest.deletePipeline({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Pipeline ID +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== geo_ip_stats Returns statistical information about geoip databases @@ -1962,9 +4293,14 @@ Returns statistical information about geoip databases {ref}/geoip-stats-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.geoIpStats(...) +client.ingest.geoIpStats() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get_pipeline Returns a pipeline. @@ -1972,9 +4308,17 @@ Returns a pipeline. {ref}/get-pipeline-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.getPipeline(...) +client.ingest.getPipeline({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Comma separated list of pipeline ids. Wildcards supported +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`summary` (Optional, boolean)*: Return pipelines without their definitions (default: false) + [discrete] ==== processor_grok Returns a list of the built-in patterns. @@ -1982,9 +4326,14 @@ Returns a list of the built-in patterns. {ref}/grok-processor.html[Endpoint documentation] [source,ts] ---- -client.ingest.processorGrok(...) +client.ingest.processorGrok() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== put_pipeline Creates or updates a pipeline. @@ -1992,9 +4341,23 @@ Creates or updates a pipeline. {ref}/put-pipeline-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.putPipeline(...) +client.ingest.putPipeline({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: ID of the ingest pipeline to create or update. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates +** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. +** *`description` (Optional, string)*: Description of the ingest pipeline. +** *`on_failure` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. + [discrete] ==== simulate Allows to simulate a pipeline with example documents. @@ -2002,9 +4365,18 @@ Allows to simulate a pipeline with example documents. {ref}/simulate-pipeline-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.simulate(...) +client.ingest.simulate({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Pipeline ID +** *`verbose` (Optional, boolean)*: Verbose mode. Display data output for each processor in executed pipeline +** *`docs` (Optional, { _id, _index, _source }[])* +** *`pipeline` (Optional, { description, on_failure, processors, version })* + [discrete] === license [discrete] @@ -2014,9 +4386,14 @@ Deletes licensing information for the cluster {ref}/delete-license.html[Endpoint documentation] [source,ts] ---- -client.license.delete(...) +client.license.delete() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get Retrieves licensing information for the cluster @@ -2024,9 +4401,17 @@ Retrieves licensing information for the cluster {ref}/get-license.html[Endpoint documentation] [source,ts] ---- -client.license.get(...) +client.license.get({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`accept_enterprise` (Optional, boolean)*: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. +This parameter is deprecated and will always be set to true in 8.x. +** *`local` (Optional, boolean)*: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. + [discrete] ==== get_basic_status Retrieves information about the status of the basic license. @@ -2034,9 +4419,14 @@ Retrieves information about the status of the basic license. {ref}/get-basic-status.html[Endpoint documentation] [source,ts] ---- -client.license.getBasicStatus(...) +client.license.getBasicStatus() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get_trial_status Retrieves information about the status of the trial license. @@ -2044,9 +4434,14 @@ Retrieves information about the status of the trial license. {ref}/get-trial-status.html[Endpoint documentation] [source,ts] ---- -client.license.getTrialStatus(...) +client.license.getTrialStatus() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== post Updates the license for the cluster. @@ -2054,9 +4449,17 @@ Updates the license for the cluster. {ref}/update-license.html[Endpoint documentation] [source,ts] ---- -client.license.post(...) +client.license.post({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. +** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* +** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. + [discrete] ==== post_start_basic Starts an indefinite basic license. @@ -2064,9 +4467,15 @@ Starts an indefinite basic license. {ref}/start-basic.html[Endpoint documentation] [source,ts] ---- -client.license.postStartBasic(...) +client.license.postStartBasic({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) + [discrete] ==== post_start_trial starts a limited time trial license. @@ -2074,9 +4483,16 @@ starts a limited time trial license. {ref}/start-trial.html[Endpoint documentation] [source,ts] ---- -client.license.postStartTrial(...) +client.license.postStartTrial({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) +** *`type_query_string` (Optional, string)* + [discrete] === logstash [discrete] @@ -2086,9 +4502,15 @@ Deletes Logstash Pipelines used by Central Management {ref}/logstash-api-delete-pipeline.html[Endpoint documentation] [source,ts] ---- -client.logstash.deletePipeline(...) +client.logstash.deletePipeline({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the Pipeline + [discrete] ==== get_pipeline Retrieves Logstash Pipelines used by Central Management @@ -2096,9 +4518,15 @@ Retrieves Logstash Pipelines used by Central Management {ref}/logstash-api-get-pipeline.html[Endpoint documentation] [source,ts] ---- -client.logstash.getPipeline(...) +client.logstash.getPipeline({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string | string[])*: A list of Pipeline IDs + [discrete] ==== put_pipeline Adds and updates Logstash Pipelines used for Central Management @@ -2106,9 +4534,15 @@ Adds and updates Logstash Pipelines used for Central Management {ref}/logstash-api-put-pipeline.html[Endpoint documentation] [source,ts] ---- -client.logstash.putPipeline(...) +client.logstash.putPipeline({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the Pipeline + [discrete] === migration [discrete] @@ -2118,9 +4552,15 @@ Retrieves information about different cluster, node, and index level settings th {ref}/migration-api-deprecation.html[Endpoint documentation] [source,ts] ---- -client.migration.deprecations(...) +client.migration.deprecations({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. + [discrete] ==== get_feature_upgrade_status Find out whether system features need to be upgraded or not @@ -2128,9 +4568,14 @@ Find out whether system features need to be upgraded or not {ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] ---- -client.migration.getFeatureUpgradeStatus(...) +client.migration.getFeatureUpgradeStatus() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== post_feature_upgrade Begin upgrades for system features @@ -2138,9 +4583,14 @@ Begin upgrades for system features {ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] ---- -client.migration.postFeatureUpgrade(...) +client.migration.postFeatureUpgrade() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === ml [discrete] @@ -2150,9 +4600,15 @@ Clear the cached results from a trained model deployment {ref}/clear-trained-model-deployment-cache.html[Endpoint documentation] [source,ts] ---- -client.ml.clearTrainedModelDeploymentCache(...) +client.ml.clearTrainedModelDeploymentCache({ model_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. + [discrete] ==== close_job Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. @@ -2160,9 +4616,20 @@ Closes one or more anomaly detection jobs. A job can be opened and closed multip {ref}/ml-close-job.html[Endpoint documentation] [source,ts] ---- -client.ml.closeJob(...) +client.ml.closeJob({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +** *`force` (Optional, boolean)*: Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots. +If you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait until a job has closed. + [discrete] ==== delete_calendar Deletes a calendar. @@ -2170,9 +4637,15 @@ Deletes a calendar. {ref}/ml-delete-calendar.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteCalendar(...) +client.ml.deleteCalendar({ calendar_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. + [discrete] ==== delete_calendar_event Deletes scheduled events from a calendar. @@ -2180,9 +4653,16 @@ Deletes scheduled events from a calendar. {ref}/ml-delete-calendar-event.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteCalendarEvent(...) +client.ml.deleteCalendarEvent({ calendar_id, event_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: The ID of the calendar to modify +** *`event_id` (string)*: The ID of the event to remove from the calendar + [discrete] ==== delete_calendar_job Deletes anomaly detection jobs from a calendar. @@ -2190,9 +4670,17 @@ Deletes anomaly detection jobs from a calendar. {ref}/ml-delete-calendar-job.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteCalendarJob(...) +client.ml.deleteCalendarJob({ calendar_id, job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a +list of jobs or groups. + [discrete] ==== delete_data_frame_analytics Deletes an existing data frame analytics job. @@ -2200,9 +4688,17 @@ Deletes an existing data frame analytics job. {ref}/delete-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteDataFrameAnalytics(...) +client.ml.deleteDataFrameAnalytics({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. +** *`force` (Optional, boolean)*: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. +** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the job to be deleted. + [discrete] ==== delete_datafeed Deletes an existing datafeed. @@ -2210,9 +4706,20 @@ Deletes an existing datafeed. {ref}/ml-delete-datafeed.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteDatafeed(...) +client.ml.deleteDatafeed({ datafeed_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This +identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It must start and end with alphanumeric +characters. +** *`force` (Optional, boolean)*: Use to forcefully delete a started datafeed; this method is quicker than +stopping and deleting the datafeed. + [discrete] ==== delete_expired_data Deletes expired and unused machine learning data. @@ -2220,9 +4727,19 @@ Deletes expired and unused machine learning data. {ref}/ml-delete-expired-data.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteExpiredData(...) +client.ml.deleteExpiredData({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string)*: Identifier for an anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. +** *`requests_per_second` (Optional, float)*: The desired requests per second for the deletion processes. The default +behavior is no throttling. +** *`timeout` (Optional, string | -1 | 0)*: How long can the underlying delete processes run until they are canceled. + [discrete] ==== delete_filter Deletes a filter. @@ -2230,9 +4747,15 @@ Deletes a filter. {ref}/ml-delete-filter.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteFilter(...) +client.ml.deleteFilter({ filter_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (string)*: A string that uniquely identifies a filter. + [discrete] ==== delete_forecast Deletes forecasts from a machine learning job. @@ -2240,9 +4763,25 @@ Deletes forecasts from a machine learning job. {ref}/ml-delete-forecast.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteForecast(...) +client.ml.deleteForecast({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`forecast_id` (Optional, string)*: A list of forecast identifiers. If you do not specify +this optional parameter or if you specify `_all` or `*` the API deletes +all forecasts from the job. +** *`allow_no_forecasts` (Optional, boolean)*: Specifies whether an error occurs when there are no forecasts. In +particular, if this parameter is set to `false` and there are no +forecasts associated with the job, attempts to delete all forecasts +return an error. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for the completion of the delete +operation. When this period of time elapses, the API fails and returns an +error. + [discrete] ==== delete_job Deletes an existing anomaly detection job. @@ -2250,9 +4789,22 @@ Deletes an existing anomaly detection job. {ref}/ml-delete-job.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteJob(...) +client.ml.deleteJob({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`force` (Optional, boolean)*: Use to forcefully delete an opened job; this method is quicker than +closing and deleting the job. +** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. +** *`wait_for_completion` (Optional, boolean)*: Specifies whether the request should return immediately or wait until the +job deletion completes. + [discrete] ==== delete_model_snapshot Deletes an existing model snapshot. @@ -2260,9 +4812,16 @@ Deletes an existing model snapshot. {ref}/ml-delete-snapshot.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteModelSnapshot(...) +client.ml.deleteModelSnapshot({ job_id, snapshot_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: Identifier for the model snapshot. + [discrete] ==== delete_trained_model Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. @@ -2270,9 +4829,16 @@ Deletes an existing trained inference model that is currently not referenced by {ref}/delete-trained-models.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteTrainedModel(...) +client.ml.deleteTrainedModel({ model_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`force` (Optional, boolean)*: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. + [discrete] ==== delete_trained_model_alias Deletes a model alias that refers to the trained model @@ -2280,9 +4846,16 @@ Deletes a model alias that refers to the trained model {ref}/delete-trained-models-aliases.html[Endpoint documentation] [source,ts] ---- -client.ml.deleteTrainedModelAlias(...) +client.ml.deleteTrainedModelAlias({ model_alias, model_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_alias` (string)*: The model alias to delete. +** *`model_id` (string)*: The trained model ID to which the model alias refers. + [discrete] ==== estimate_model_memory Estimates the model memory @@ -2290,9 +4863,28 @@ Estimates the model memory {ref}/ml-apis.html[Endpoint documentation] [source,ts] ---- -client.ml.estimateModelMemory(...) +client.ml.estimateModelMemory({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: For a list of the properties that you can specify in the +`analysis_config` component of the body of this API. +** *`max_bucket_cardinality` (Optional, Record)*: Estimates of the highest cardinality in a single bucket that is observed +for influencer fields over the time period that the job analyzes data. +To produce a good answer, values must be provided for all influencer +fields. Providing values for fields that are not listed as `influencers` +has no effect on the estimation. +** *`overall_cardinality` (Optional, Record)*: Estimates of the cardinality that is observed for fields over the whole +time period that the job analyzes data. To produce a good answer, values +must be provided for fields referenced in the `by_field_name`, +`over_field_name` and `partition_field_name` of any detectors. Providing +values for other fields has no effect on the estimation. It can be +omitted from the request if no detectors have a `by_field_name`, +`over_field_name` or `partition_field_name`. + [discrete] ==== evaluate_data_frame Evaluates the data frame analytics for an annotated index. @@ -2300,9 +4892,17 @@ Evaluates the data frame analytics for an annotated index. {ref}/evaluate-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.evaluateDataFrame(...) +client.ml.evaluateDataFrame({ evaluation, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. +** *`index` (string)*: Defines the `index` in which the evaluation will be performed. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. + [discrete] ==== explain_data_frame_analytics Explains a data frame analytics config. @@ -2310,8 +4910,40 @@ Explains a data frame analytics config. {ref}/explain-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.explainDataFrameAnalytics(...) ----- +client.ml.explainDataFrameAnalytics({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`source` (Optional, { index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. It requires an +index. Optionally, query and _source may be specified. +** *`dest` (Optional, { index, results_field })*: The destination configuration, consisting of index and optionally +results_field (ml by default). +** *`analysis` (Optional, { classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +** *`description` (Optional, string)*: A description of the job. +** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +** *`analyzed_fields` (Optional, { includes, excludes })*: Specify includes and/or excludes patterns to select which fields will be +included in the analysis. The patterns specified in excludes are applied +last, therefore excludes takes precedence. In other words, if the same +field is specified in both includes and excludes, then the field will not +be included in the analysis. +** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. [discrete] ==== flush_job @@ -2320,9 +4952,25 @@ Forces any buffered data to be processed by the job. {ref}/ml-flush-job.html[Endpoint documentation] [source,ts] ---- -client.ml.flushJob(...) +client.ml.flushJob({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`advance_time` (Optional, string | Unit)*: Specifies to advance to a particular time value. Results are generated +and the model is updated for data from the specified time interval. +** *`calc_interim` (Optional, boolean)*: If true, calculates the interim results for the most recent bucket or all +buckets within the latency period. +** *`end` (Optional, string | Unit)*: When used in conjunction with `calc_interim` and `start`, specifies the +range of buckets on which to calculate interim results. +** *`skip_time` (Optional, string | Unit)*: Specifies to skip to a particular time value. Results are not generated +and the model is not updated for data from the specified time interval. +** *`start` (Optional, string | Unit)*: When used in conjunction with `calc_interim`, specifies the range of +buckets on which to calculate interim results. + [discrete] ==== forecast Predicts the future behavior of a time series by using its historical behavior. @@ -2330,9 +4978,27 @@ Predicts the future behavior of a time series by using its historical behavior. {ref}/ml-forecast.html[Endpoint documentation] [source,ts] ---- -client.ml.forecast(...) +client.ml.forecast({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. The job must be open when you +create a forecast; otherwise, an error occurs. +** *`duration` (Optional, string | -1 | 0)*: A period of time that indicates how far into the future to forecast. For +example, `30d` corresponds to 30 days. The forecast starts at the last +record that was processed. +** *`expires_in` (Optional, string | -1 | 0)*: The period of time that forecast results are retained. After a forecast +expires, the results are deleted. If set to a value of 0, the forecast is +never automatically deleted. +** *`max_model_memory` (Optional, string)*: The maximum memory the forecast can use. If the forecast needs to use +more than the provided amount, it will spool to disk. Default is 20mb, +maximum is 500mb and minimum is 1mb. If set to 40% or more of the job’s +configured memory limit, it is automatically reduced to below that +amount. + [discrete] ==== get_buckets Retrieves anomaly detection job results for one or more buckets. @@ -2340,9 +5006,29 @@ Retrieves anomaly detection job results for one or more buckets. {ref}/ml-get-bucket.html[Endpoint documentation] [source,ts] ---- -client.ml.getBuckets(...) +client.ml.getBuckets({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`timestamp` (Optional, string | Unit)*: The timestamp of a single bucket result. If you do not specify this +parameter, the API returns information about all buckets. +** *`anomaly_score` (Optional, number)*: Returns buckets with anomaly scores greater or equal than this value. +** *`desc` (Optional, boolean)*: If `true`, the buckets are sorted in descending order. +** *`end` (Optional, string | Unit)*: Returns buckets with timestamps earlier than this time. `-1` means it is +unset and results are not limited to specific timestamps. +** *`exclude_interim` (Optional, boolean)*: If `true`, the output excludes interim results. +** *`expand` (Optional, boolean)*: If true, the output includes anomaly records. +** *`from` (Optional, number)*: Skips the specified number of buckets. +** *`size` (Optional, number)*: Specifies the maximum number of buckets to obtain. +** *`sort` (Optional, string)*: Specifies the sort field for the requested buckets. +** *`start` (Optional, string | Unit)*: Returns buckets with timestamps after this time. `-1` means it is unset +and results are not limited to specific timestamps. +** *`page` (Optional, { from, size })* + [discrete] ==== get_calendar_events Retrieves information about the scheduled events in calendars. @@ -2350,9 +5036,20 @@ Retrieves information about the scheduled events in calendars. {ref}/ml-get-calendar-event.html[Endpoint documentation] [source,ts] ---- -client.ml.getCalendarEvents(...) +client.ml.getCalendarEvents({ calendar_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +** *`end` (Optional, string | Unit)*: Specifies to get events with timestamps earlier than this time. +** *`from` (Optional, number)*: Skips the specified number of events. +** *`job_id` (Optional, string)*: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. +** *`size` (Optional, number)*: Specifies the maximum number of events to obtain. +** *`start` (Optional, string | Unit)*: Specifies to get events with timestamps after this time. + [discrete] ==== get_calendars Retrieves configuration information for calendars. @@ -2360,9 +5057,18 @@ Retrieves configuration information for calendars. {ref}/ml-get-calendar.html[Endpoint documentation] [source,ts] ---- -client.ml.getCalendars(...) +client.ml.getCalendars({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (Optional, string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +** *`from` (Optional, number)*: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. +** *`size` (Optional, number)*: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. +** *`page` (Optional, { from, size })*: This object is supported only when you omit the calendar identifier. + [discrete] ==== get_categories Retrieves anomaly detection job results for one or more categories. @@ -2370,9 +5076,24 @@ Retrieves anomaly detection job results for one or more categories. {ref}/ml-get-category.html[Endpoint documentation] [source,ts] ---- -client.ml.getCategories(...) +client.ml.getCategories({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`category_id` (Optional, string)*: Identifier for the category, which is unique in the job. If you specify +neither the category ID nor the partition_field_value, the API returns +information about all categories. If you specify only the +partition_field_value, it returns information about all categories for +the specified partition. +** *`from` (Optional, number)*: Skips the specified number of categories. +** *`partition_field_value` (Optional, string)*: Only return categories for the specified partition. +** *`size` (Optional, number)*: Specifies the maximum number of categories to obtain. +** *`page` (Optional, { from, size })* + [discrete] ==== get_data_frame_analytics Retrieves configuration information for data frame analytics jobs. @@ -2380,9 +5101,33 @@ Retrieves configuration information for data frame analytics jobs. {ref}/get-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.getDataFrameAnalytics(...) +client.ml.getDataFrameAnalytics({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. +** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + [discrete] ==== get_data_frame_analytics_stats Retrieves usage information for data frame analytics jobs. @@ -2390,9 +5135,31 @@ Retrieves usage information for data frame analytics jobs. {ref}/get-dfanalytics-stats.html[Endpoint documentation] [source,ts] ---- -client.ml.getDataFrameAnalyticsStats(...) +client.ml.getDataFrameAnalyticsStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. +** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. +** *`verbose` (Optional, boolean)*: Defines whether the stats response should be verbose. + [discrete] ==== get_datafeed_stats Retrieves usage information for datafeeds. @@ -2400,9 +5167,27 @@ Retrieves usage information for datafeeds. {ref}/ml-get-datafeed-stats.html[Endpoint documentation] [source,ts] ---- -client.ml.getDatafeedStats(...) +client.ml.getDatafeedStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. + [discrete] ==== get_datafeeds Retrieves configuration information for datafeeds. @@ -2410,9 +5195,30 @@ Retrieves configuration information for datafeeds. {ref}/ml-get-datafeed.html[Endpoint documentation] [source,ts] ---- -client.ml.getDatafeeds(...) +client.ml.getDatafeeds({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + [discrete] ==== get_filters Retrieves filters. @@ -2420,9 +5226,17 @@ Retrieves filters. {ref}/ml-get-filter.html[Endpoint documentation] [source,ts] ---- -client.ml.getFilters(...) +client.ml.getFilters({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (Optional, string | string[])*: A string that uniquely identifies a filter. +** *`from` (Optional, number)*: Skips the specified number of filters. +** *`size` (Optional, number)*: Specifies the maximum number of filters to obtain. + [discrete] ==== get_influencers Retrieves anomaly detection job results for one or more influencers. @@ -2430,9 +5244,30 @@ Retrieves anomaly detection job results for one or more influencers. {ref}/ml-get-influencer.html[Endpoint documentation] [source,ts] ---- -client.ml.getInfluencers(...) +client.ml.getInfluencers({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. +** *`end` (Optional, string | Unit)*: Returns influencers with timestamps earlier than this time. +The default value means it is unset and results are not limited to +specific timestamps. +** *`exclude_interim` (Optional, boolean)*: If true, the output excludes interim results. By default, interim results +are included. +** *`influencer_score` (Optional, number)*: Returns influencers with anomaly scores greater than or equal to this +value. +** *`from` (Optional, number)*: Skips the specified number of influencers. +** *`size` (Optional, number)*: Specifies the maximum number of influencers to obtain. +** *`sort` (Optional, string)*: Specifies the sort field for the requested influencers. By default, the +influencers are sorted by the `influencer_score` value. +** *`start` (Optional, string | Unit)*: Returns influencers with timestamps after this time. The default value +means it is unset and results are not limited to specific timestamps. +** *`page` (Optional, { from, size })* + [discrete] ==== get_job_stats Retrieves usage information for anomaly detection jobs. @@ -2440,9 +5275,28 @@ Retrieves usage information for anomaly detection jobs. {ref}/ml-get-job-stats.html[Endpoint documentation] [source,ts] ---- -client.ml.getJobStats(...) +client.ml.getJobStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs, or a wildcard expression. If +you do not specify one of these options, the API returns information for +all anomaly detection jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If `false`, the API returns a `404` status +code when there are no matches or only partial matches. + [discrete] ==== get_jobs Retrieves configuration information for anomaly detection jobs. @@ -2450,9 +5304,30 @@ Retrieves configuration information for anomaly detection jobs. {ref}/ml-get-job.html[Endpoint documentation] [source,ts] ---- -client.ml.getJobs(...) +client.ml.getJobs({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string | string[])*: Identifier for the anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. If you do not specify one of these +options, the API returns information for all anomaly detection jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status +code when there are no matches or only partial matches. +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + [discrete] ==== get_memory_stats Returns information on how ML is using memory. @@ -2460,9 +5335,22 @@ Returns information on how ML is using memory. {ref}/get-ml-memory.html[Endpoint documentation] [source,ts] ---- -client.ml.getMemoryStats(...) +client.ml.getMemoryStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string)*: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or +`ml:true` +** *`human` (Optional, boolean)*: Specify this query parameter to include the fields with units in the response. Otherwise only +the `_in_bytes` sizes are returned in the response. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout +expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request +fails and returns an error. + [discrete] ==== get_model_snapshot_upgrade_stats Gets stats for anomaly detection job model snapshot upgrades that are in progress. @@ -2470,9 +5358,27 @@ Gets stats for anomaly detection job model snapshot upgrades that are in progres {ref}/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] [source,ts] ---- -client.ml.getModelSnapshotUpgradeStats(...) +client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + + - Contains wildcard expressions and there are no jobs that match. + - Contains the _all string or no identifiers and there are no matches. + - Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty jobs array when there are no matches and the subset of results +when there are partial matches. If this parameter is false, the request returns a 404 status code when there are +no matches or only partial matches. + [discrete] ==== get_model_snapshots Retrieves information about model snapshots. @@ -2480,9 +5386,26 @@ Retrieves information about model snapshots. {ref}/ml-get-snapshot.html[Endpoint documentation] [source,ts] ---- -client.ml.getModelSnapshots(...) +client.ml.getModelSnapshots({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (Optional, string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. +** *`end` (Optional, string | Unit)*: Returns snapshots with timestamps earlier than this time. +** *`from` (Optional, number)*: Skips the specified number of snapshots. +** *`size` (Optional, number)*: Specifies the maximum number of snapshots to obtain. +** *`sort` (Optional, string)*: Specifies the sort field for the requested snapshots. By default, the +snapshots are sorted by their timestamp. +** *`start` (Optional, string | Unit)*: Returns snapshots with timestamps after this time. +** *`page` (Optional, { from, size })* + [discrete] ==== get_overall_buckets Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. @@ -2490,9 +5413,44 @@ Retrieves overall bucket results that summarize the bucket results of multiple a {ref}/ml-get-overall-buckets.html[Endpoint documentation] [source,ts] ---- -client.ml.getOverallBuckets(...) +client.ml.getOverallBuckets({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs or groups, or a wildcard +expression. + +You can summarize the bucket results for all anomaly detection jobs by +using `_all` or by specifying `*` as the ``. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If `true`, the request returns an empty `jobs` array when there are no +matches and the subset of results when there are partial matches. If this +parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +** *`bucket_span` (Optional, string | -1 | 0)*: The span of the overall buckets. Must be greater or equal to the largest +bucket span of the specified anomaly detection jobs, which is the default +value. + +By default, an overall bucket has a span equal to the largest bucket span +of the specified anomaly detection jobs. To override that behavior, use +the optional `bucket_span` parameter. +** *`end` (Optional, string | Unit)*: Returns overall buckets with timestamps earlier than this time. +** *`exclude_interim` (Optional, boolean)*: If `true`, the output excludes interim results. +** *`overall_score` (Optional, number | string)*: Returns overall buckets with overall scores greater than or equal to this +value. +** *`start` (Optional, string | Unit)*: Returns overall buckets with timestamps after this time. +** *`top_n` (Optional, number)*: The number of top anomaly detection job bucket scores to be used in the +`overall_score` calculation. + [discrete] ==== get_records Retrieves anomaly records for an anomaly detection job. @@ -2500,9 +5458,26 @@ Retrieves anomaly records for an anomaly detection job. {ref}/ml-get-record.html[Endpoint documentation] [source,ts] ---- -client.ml.getRecords(...) +client.ml.getRecords({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. +** *`end` (Optional, string | Unit)*: Returns records with timestamps earlier than this time. The default value +means results are not limited to specific timestamps. +** *`exclude_interim` (Optional, boolean)*: If `true`, the output excludes interim results. +** *`from` (Optional, number)*: Skips the specified number of records. +** *`record_score` (Optional, number)*: Returns records with anomaly scores greater or equal than this value. +** *`size` (Optional, number)*: Specifies the maximum number of records to obtain. +** *`sort` (Optional, string)*: Specifies the sort field for the requested records. +** *`start` (Optional, string | Unit)*: Returns records with timestamps after this time. The default value means +results are not limited to specific timestamps. +** *`page` (Optional, { from, size })* + [discrete] ==== get_trained_models Retrieves configuration information for a trained inference model. @@ -2510,9 +5485,35 @@ Retrieves configuration information for a trained inference model. {ref}/get-trained-models.html[Endpoint documentation] [source,ts] ---- -client.ml.getTrainedModels(...) +client.ml.getTrainedModels({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (Optional, string)*: The unique identifier of the trained model. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +** *`decompress_definition` (Optional, boolean)*: Specifies whether the included model definition should be returned as a +JSON map (true) or in a custom compressed format (false). +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +** *`from` (Optional, number)*: Skips the specified number of models. +** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response +body. +** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. +** *`tags` (Optional, string)*: A comma delimited string of tags. A trained model can have many tags, or +none. When supplied, only trained models that contain all the supplied +tags are returned. + [discrete] ==== get_trained_models_stats Retrieves usage information for trained inference models. @@ -2520,9 +5521,26 @@ Retrieves usage information for trained inference models. {ref}/get-trained-models-stats.html[Endpoint documentation] [source,ts] ---- -client.ml.getTrainedModelsStats(...) +client.ml.getTrainedModelsStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. It can be a +list or a wildcard expression. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +** *`from` (Optional, number)*: Skips the specified number of models. +** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. + [discrete] ==== infer_trained_model Evaluate a trained model. @@ -2530,9 +5548,20 @@ Evaluate a trained model. {ref}/infer-trained-model.html[Endpoint documentation] [source,ts] ---- -client.ml.inferTrainedModel(...) +client.ml.inferTrainedModel({ model_id, docs }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`docs` (Record[])*: An array of objects to pass to the model for inference. The objects should contain a fields matching your +configured trained model input. Typically, for NLP models, the field name is `text_field`. +Currently, for NLP models, only a single value is allowed. +** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait for inference results. +** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The inference configuration updates to apply on the API call + [discrete] ==== info Returns defaults and limits used by machine learning. @@ -2540,9 +5569,14 @@ Returns defaults and limits used by machine learning. {ref}/get-ml-info.html[Endpoint documentation] [source,ts] ---- -client.ml.info(...) +client.ml.info() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== open_job Opens one or more anomaly detection jobs. @@ -2550,9 +5584,16 @@ Opens one or more anomaly detection jobs. {ref}/ml-open-job.html[Endpoint documentation] [source,ts] ---- -client.ml.openJob(...) +client.ml.openJob({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait until a job has opened. + [discrete] ==== post_calendar_events Posts scheduled events in a calendar. @@ -2560,9 +5601,16 @@ Posts scheduled events in a calendar. {ref}/ml-post-calendar-event.html[Endpoint documentation] [source,ts] ---- -client.ml.postCalendarEvents(...) +client.ml.postCalendarEvents({ calendar_id, events }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`events` ({ calendar_id, event_id, description, end_time, start_time }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. + [discrete] ==== post_data Sends data to an anomaly detection job for analysis. @@ -2570,9 +5618,17 @@ Sends data to an anomaly detection job for analysis. {ref}/ml-post-data.html[Endpoint documentation] [source,ts] ---- -client.ml.postData(...) +client.ml.postData({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. +** *`reset_end` (Optional, string | Unit)*: Specifies the end of the bucket resetting range. +** *`reset_start` (Optional, string | Unit)*: Specifies the start of the bucket resetting range. + [discrete] ==== preview_data_frame_analytics Previews that will be analyzed given a data frame analytics config. @@ -2580,9 +5636,18 @@ Previews that will be analyzed given a data frame analytics config. {ref}/preview-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.previewDataFrameAnalytics(...) +client.ml.previewDataFrameAnalytics({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. +** *`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })*: A data frame analytics config as described in create data frame analytics +jobs. Note that `id` and `dest` don’t need to be provided in the context of +this API. + [discrete] ==== preview_datafeed Previews a datafeed. @@ -2590,9 +5655,25 @@ Previews a datafeed. {ref}/ml-preview-datafeed.html[Endpoint documentation] [source,ts] ---- -client.ml.previewDatafeed(...) +client.ml.previewDatafeed({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job +configuration details in the request body. +** *`start` (Optional, string | Unit)*: The start time from where the datafeed preview should begin +** *`end` (Optional, string | Unit)*: The end time when the datafeed preview should stop +** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: The datafeed definition to preview. +** *`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })*: The configuration details for the anomaly detection job that is associated with the datafeed. If the +`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must +supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is +used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. + [discrete] ==== put_calendar Instantiates a calendar. @@ -2600,9 +5681,17 @@ Instantiates a calendar. {ref}/ml-put-calendar.html[Endpoint documentation] [source,ts] ---- -client.ml.putCalendar(...) +client.ml.putCalendar({ calendar_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`job_ids` (Optional, string[])*: An array of anomaly detection job identifiers. +** *`description` (Optional, string)*: A description of the calendar. + [discrete] ==== put_calendar_job Adds an anomaly detection job to a calendar. @@ -2610,9 +5699,16 @@ Adds an anomaly detection job to a calendar. {ref}/ml-put-calendar-job.html[Endpoint documentation] [source,ts] ---- -client.ml.putCalendarJob(...) +client.ml.putCalendarJob({ calendar_id, job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`job_id` (string)*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. + [discrete] ==== put_data_frame_analytics Instantiates a data frame analytics job. @@ -2620,8 +5716,69 @@ Instantiates a data frame analytics job. {ref}/put-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.putDataFrameAnalytics(...) ----- +client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`analysis` ({ classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +** *`dest` ({ index, results_field })*: The destination configuration. +** *`source` ({ index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. +** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. If +set to `false` and a machine learning node with capacity to run the job +cannot be immediately found, the API returns an error. If set to `true`, +the API does not return an error; the job waits in the `starting` state +until sufficient machine learning node capacity is available. This +behavior is also affected by the cluster-wide +`xpack.ml.max_lazy_ml_nodes` setting. +** *`analyzed_fields` (Optional, { includes, excludes })*: Specifies `includes` and/or `excludes` patterns to select which fields +will be included in the analysis. The patterns specified in `excludes` +are applied last, therefore `excludes` takes precedence. In other words, +if the same field is specified in both `includes` and `excludes`, then +the field will not be included in the analysis. If `analyzed_fields` is +not set, only the relevant fields will be included. For example, all the +numeric fields for outlier detection. +The supported fields vary for each type of analysis. Outlier detection +requires numeric or `boolean` data to analyze. The algorithms don’t +support missing values therefore fields that have data types other than +numeric or boolean are ignored. Documents where included fields contain +missing values, null values, or an array are also ignored. Therefore the +`dest` index may contain documents that don’t have an outlier score. +Regression supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the regression analysis. +Classification supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the classification analysis. +Classification analysis can be improved by mapping ordinal variable +values to a single number. For example, in case of age ranges, you can +model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +** *`description` (Optional, string)*: A description of the job. +** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +** *`headers` (Optional, Record)* +** *`version` (Optional, string)* [discrete] ==== put_datafeed @@ -2630,8 +5787,59 @@ Instantiates a datafeed. {ref}/ml-put-datafeed.html[Endpoint documentation] [source,ts] ---- -client.ml.putDatafeed(...) ----- +client.ml.putDatafeed({ datafeed_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +** *`allow_no_indices` (Optional, boolean)*: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` +string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If true, unavailable indices (missing or closed) are ignored. +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. +Support for aggregations is limited and should be used only with low cardinality data. +** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years. +This search is split into time chunks in order to ensure the load on Elasticsearch is managed. +Chunking configuration controls how the size of these time chunks are calculated; +it is an advanced configuration option. +** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. +The datafeed can optionally search over indices that have already been read in an effort to determine whether +any data has subsequently been added to the index. If missing data is found, it is a good indication that the +`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. +This check runs only on real-time datafeeds. +** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. +The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible +fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last +(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses +aggregations, this value must be divisible by the interval of the date histogram aggregation. +** *`indices` (Optional, string | string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine +learning nodes must have the `remote_cluster_client` role. +** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search +** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. +** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. +** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. +** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`, which is 10,000 by default. +** *`headers` (Optional, Record)* [discrete] ==== put_filter @@ -2640,9 +5848,18 @@ Instantiates a filter. {ref}/ml-put-filter.html[Endpoint documentation] [source,ts] ---- -client.ml.putFilter(...) +client.ml.putFilter({ filter_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (string)*: A string that uniquely identifies a filter. +** *`description` (Optional, string)*: A description of the filter. +** *`items` (Optional, string[])*: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. + [discrete] ==== put_job Instantiates an anomaly detection job. @@ -2650,9 +5867,30 @@ Instantiates an anomaly detection job. {ref}/ml-put-job.html[Endpoint documentation] [source,ts] ---- -client.ml.putJob(...) +client.ml.putJob({ job_id, analysis_config, data_description }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +** *`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. +** *`data_description` ({ format, time_field, time_format, field_delimiter })*: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. +** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. +** *`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })*: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. +** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. +** *`custom_settings` (Optional, User-defined value)*: Advanced configuration option. Contains custom meta data about the job. +** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. +** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. +** *`description` (Optional, string)*: A description of the job. +** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. +** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })*: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. +** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. +** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. +** *`results_index_name` (Optional, string)*: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. +** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. + [discrete] ==== put_trained_model Creates an inference trained model. @@ -2660,9 +5898,32 @@ Creates an inference trained model. {ref}/put-trained-models.html[Endpoint documentation] [source,ts] ---- -client.ml.putTrainedModel(...) +client.ml.putTrainedModel({ model_id, inference_config }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`inference_config` ({ regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression +or classification configuration. It must match the underlying +definition.trained_model's target_type. +** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. +** *`compressed_definition` (Optional, string)*: The compressed (GZipped and Base64 encoded) inference definition of the +model. If compressed_definition is specified, then definition cannot be +specified. +** *`definition` (Optional, { preprocessors, trained_model })*: The inference definition for the model. If definition is specified, then +compressed_definition cannot be specified. +** *`description` (Optional, string)*: A human-readable description of the inference trained model. +** *`input` (Optional, { field_names })*: The input field names for the model definition. +** *`metadata` (Optional, User-defined value)*: An object map that contains metadata about the model. +** *`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))*: The model type. +** *`model_size_bytes` (Optional, number)*: The estimated memory usage in bytes to keep the trained model in memory. +This property is supported only if defer_definition_decompression is true +or the model definition is not supplied. +** *`tags` (Optional, string[])*: An array of tags to organize the model. + [discrete] ==== put_trained_model_alias Creates a new model alias (or reassigns an existing one) to refer to the trained model @@ -2670,9 +5931,19 @@ Creates a new model alias (or reassigns an existing one) to refer to the trained {ref}/put-trained-models-aliases.html[Endpoint documentation] [source,ts] ---- -client.ml.putTrainedModelAlias(...) +client.ml.putTrainedModelAlias({ model_alias, model_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_alias` (string)*: The alias to create or update. This value cannot end in numbers. +** *`model_id` (string)*: The identifier for the trained model that the alias refers to. +** *`reassign` (Optional, boolean)*: Specifies whether the alias gets reassigned to the specified trained +model if it is already assigned to a different model. If the alias is +already assigned and this parameter is false, the API returns an error. + [discrete] ==== put_trained_model_definition_part Creates part of a trained model definition @@ -2680,9 +5951,20 @@ Creates part of a trained model definition {ref}/put-trained-model-definition-part.html[Endpoint documentation] [source,ts] ---- -client.ml.putTrainedModelDefinitionPart(...) +client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`part` (number)*: The definition part number. When the definition is loaded for inference the definition parts are streamed in the +order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. +** *`definition` (string)*: The definition part for the model. Must be a base64 encoded string. +** *`total_definition_length` (number)*: The total uncompressed definition length in bytes. Not base64 encoded. +** *`total_parts` (number)*: The total number of parts that will be uploaded. Must be greater than 0. + [discrete] ==== put_trained_model_vocabulary Creates a trained model vocabulary @@ -2690,19 +5972,38 @@ Creates a trained model vocabulary {ref}/put-trained-model-vocabulary.html[Endpoint documentation] [source,ts] ---- -client.ml.putTrainedModelVocabulary(...) +client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) ---- [discrete] -==== reset_job -Resets an existing anomaly detection job. +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`vocabulary` (string[])*: The model vocabulary, which must not be empty. +** *`merges` (Optional, string[])*: The optional model merges if required by the tokenizer. + +[discrete] +==== reset_job +Resets an existing anomaly detection job. {ref}/ml-reset-job.html[Endpoint documentation] [source,ts] ---- -client.ml.resetJob(...) +client.ml.resetJob({ job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: The ID of the job to reset. +** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before +returning. +** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. + [discrete] ==== revert_model_snapshot Reverts to a specific snapshot. @@ -2710,9 +6011,24 @@ Reverts to a specific snapshot. {ref}/ml-revert-snapshot.html[Endpoint documentation] [source,ts] ---- -client.ml.revertModelSnapshot(...) +client.ml.revertModelSnapshot({ job_id, snapshot_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: You can specify `empty` as the . Reverting to the empty +snapshot means the anomaly detection job starts learning a new model from +scratch when it is started. +** *`delete_intervening_results` (Optional, boolean)*: If true, deletes the results in the time period between the latest +results and the time of the reverted snapshot. It also resets the model +to accept records for this time period. If you choose not to delete +intervening results when reverting a snapshot, the job will not accept +input data that is older than the current time. If you want to resend +data, then delete the intervening results. + [discrete] ==== set_upgrade_mode Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. @@ -2720,9 +6036,18 @@ Sets a cluster wide upgrade_mode setting that prepares machine learning indices {ref}/ml-set-upgrade-mode.html[Endpoint documentation] [source,ts] ---- -client.ml.setUpgradeMode(...) +client.ml.setUpgradeMode({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`enabled` (Optional, boolean)*: When `true`, it enables `upgrade_mode` which temporarily halts all job +and datafeed tasks and prohibits new job and datafeed tasks from +starting. +** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the request to be completed. + [discrete] ==== start_data_frame_analytics Starts a data frame analytics job. @@ -2730,9 +6055,19 @@ Starts a data frame analytics job. {ref}/start-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.startDataFrameAnalytics(...) +client.ml.startDataFrameAnalytics({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job +starts. + [discrete] ==== start_datafeed Starts one or more datafeeds. @@ -2740,9 +6075,35 @@ Starts one or more datafeeds. {ref}/ml-start-datafeed.html[Endpoint documentation] [source,ts] ---- -client.ml.startDatafeed(...) +client.ml.startDatafeed({ datafeed_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. +** *`end` (Optional, string | Unit)*: The time that the datafeed should end, which can be specified by using one of the following formats: + +* ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z` +* ISO 8601 format without milliseconds, for example `2017-01-22T06:00:00+00:00` +* Milliseconds since the epoch, for example `1485061200000` + +Date-time arguments using either of the ISO 8601 formats must have a time zone designator, where `Z` is accepted +as an abbreviation for UTC time. When a URL is expected (for example, in browsers), the `+` used in time zone +designators must be encoded as `%2B`. +The end time value is exclusive. If you do not specify an end time, the datafeed +runs continuously. +** *`start` (Optional, string | Unit)*: The time that the datafeed should begin, which can be specified by using the same formats as the `end` parameter. +This value is inclusive. +If you do not specify a start time and the datafeed is associated with a new anomaly detection job, the analysis +starts from the earliest time for which data is available. +If you restart a stopped datafeed and specify a start value that is earlier than the timestamp of the latest +processed record, the datafeed continues from 1 millisecond after the timestamp of the latest processed record. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait until a datafeed starts. + [discrete] ==== start_trained_model_deployment Start a trained model deployment. @@ -2750,9 +6111,34 @@ Start a trained model deployment. {ref}/start-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- -client.ml.startTrainedModelDeployment(...) +client.ml.startTrainedModelDeployment({ model_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. +** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model. +The default value is the same size as the `model_size_bytes`. To disable the cache, +`0b` can be provided. +** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +** *`priority` (Optional, Enum("normal" | "low"))*: The deployment priority. +** *`queue_capacity` (Optional, number)*: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds +this value, new requests are rejected with a 429 error. +** *`threads_per_allocation` (Optional, number)*: Sets the number of threads used by each model allocation during inference. This generally increases +the inference speed. The inference process is a compute-bound process; any number +greater than the number of available hardware threads on the machine does not increase the +inference speed. If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the model to deploy. +** *`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))*: Specifies the allocation status to wait for before returning. + [discrete] ==== stop_data_frame_analytics Stops one or more data frame analytics jobs. @@ -2760,9 +6146,31 @@ Stops one or more data frame analytics jobs. {ref}/stop-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.stopDataFrameAnalytics(...) +client.ml.stopDataFrameAnalytics({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty data_frame_analytics +array when there are no matches and the subset of results when there are +partial matches. If this parameter is false, the request returns a 404 +status code when there are no matches or only partial matches. +** *`force` (Optional, boolean)*: If true, the data frame analytics job is stopped forcefully. +** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job +stops. Defaults to 20 seconds. + [discrete] ==== stop_datafeed Stops one or more datafeeds. @@ -2770,9 +6178,28 @@ Stops one or more datafeeds. {ref}/ml-stop-datafeed.html[Endpoint documentation] [source,ts] ---- -client.ml.stopDatafeed(...) +client.ml.stopDatafeed({ datafeed_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated +list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as +the identifier. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +* Contains wildcard expressions and there are no datafeeds that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when +there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only +partial matches. +** *`force` (Optional, boolean)*: If `true`, the datafeed is stopped forcefully. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait until a datafeed stops. + [discrete] ==== stop_trained_model_deployment Stop a trained model deployment. @@ -2780,9 +6207,21 @@ Stop a trained model deployment. {ref}/stop-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- -client.ml.stopTrainedModelDeployment(...) +client.ml.stopTrainedModelDeployment({ model_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; +contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and +there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +** *`force` (Optional, boolean)*: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you +restart the model deployment. + [discrete] ==== update_data_frame_analytics Updates certain properties of a data frame analytics job. @@ -2790,9 +6229,29 @@ Updates certain properties of a data frame analytics job. {ref}/update-dfanalytics.html[Endpoint documentation] [source,ts] ---- -client.ml.updateDataFrameAnalytics(...) +client.ml.updateDataFrameAnalytics({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`description` (Optional, string)*: A description of the job. +** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + [discrete] ==== update_datafeed Updates certain properties of a datafeed. @@ -2800,8 +6259,67 @@ Updates certain properties of a datafeed. {ref}/ml-update-datafeed.html[Endpoint documentation] [source,ts] ---- -client.ml.updateDatafeed(...) ----- +client.ml.updateDatafeed({ datafeed_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +with low cardinality data. +** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time +chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of +these time chunks are calculated; it is an advanced configuration option. +** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally +search over indices that have already been read in an effort to determine whether any data has subsequently been +added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and +the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time +datafeeds. +** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is +either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket +span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are +written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value +must be divisible by the interval of the date histogram aggregation. +** *`indices` (Optional, string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine +learning nodes must have the `remote_cluster_client` role. +** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search. +** *`job_id` (Optional, string)* +** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also +changed. Therefore, the time required to learn might be long and the understandability of the results is +unpredictable. If you want to make significant changes to the source data, it is recommended that you +clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one +when you are satisfied with the results of the job. +** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. +** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`. [discrete] ==== update_filter @@ -2810,9 +6328,18 @@ Updates the description of a filter, adds items, or removes items. {ref}/ml-update-filter.html[Endpoint documentation] [source,ts] ---- -client.ml.updateFilter(...) +client.ml.updateFilter({ filter_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (string)*: A string that uniquely identifies a filter. +** *`add_items` (Optional, string[])*: The items to add to the filter. +** *`description` (Optional, string)*: A description for the filter. +** *`remove_items` (Optional, string[])*: The items to remove from the filter. + [discrete] ==== update_job Updates certain properties of an anomaly detection job. @@ -2820,8 +6347,63 @@ Updates certain properties of an anomaly detection job. {ref}/ml-update-job.html[Endpoint documentation] [source,ts] ---- -client.ml.updateJob(...) ----- +client.ml.updateJob({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the job. +** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when +there is insufficient machine learning node capacity for it to be +immediately assigned to a node. If `false` and a machine learning node +with capacity to run the job cannot immediately be found, the open +anomaly detection jobs API returns an error. However, this is also +subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +option is set to `true`, the open anomaly detection jobs API does not +return an error and the job waits in the opening state until sufficient +machine learning node capacity is available. +** *`analysis_limits` (Optional, { model_memory_limit })* +** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence +of the model. +The default value is a randomized value between 3 to 4 hours, which +avoids all jobs persisting at exactly the same time. The smallest allowed +value is 1 hour. +For very large models (several GB), persistence could take 10-20 minutes, +so do not set the value too low. +If the job is open when you make the update, you must stop the datafeed, +close the job, then reopen the job and restart the datafeed for the +changes to take effect. +** *`custom_settings` (Optional, Record)*: Advanced configuration option. Contains custom meta data about the job. +For example, it can contain custom URL information as shown in Adding +custom URLs to machine learning results. +** *`categorization_filters` (Optional, string[])* +** *`description` (Optional, string)*: A description of the job. +** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })* +** *`model_prune_window` (Optional, string | -1 | 0)* +** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies a period of time (in days) +after which only the first snapshot per day is retained. This period is +relative to the timestamp of the most recent snapshot for this job. Valid +values range from 0 to `model_snapshot_retention_days`. For jobs created +before version 7.8.0, the default value matches +`model_snapshot_retention_days`. +** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies the maximum period of time (in +days) that snapshots are retained. This period is relative to the +timestamp of the most recent snapshot for this job. +** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the +score are applied, as new data is seen. +** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results +are retained. Age is calculated relative to the timestamp of the latest +bucket result. If this property has a non-null value, once per day at +00:30 (server time), results that are the specified number of days older +than the latest bucket result are deleted from Elasticsearch. The default +value is null, which means all results are retained. +** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. +** *`detectors` (Optional, { by_field_name, custom_rules, detector_description, detector_index, exclude_frequent, field_name, function, over_field_name, partition_field_name, use_null }[])*: An array of detector update objects. +** *`per_partition_categorization` (Optional, { enabled, stop_on_warn })*: Settings related to how categorization interacts with partition fields. [discrete] ==== update_model_snapshot @@ -2830,9 +6412,20 @@ Updates certain properties of a snapshot. {ref}/ml-update-snapshot.html[Endpoint documentation] [source,ts] ---- -client.ml.updateModelSnapshot(...) +client.ml.updateModelSnapshot({ job_id, snapshot_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: Identifier for the model snapshot. +** *`description` (Optional, string)*: A description of the model snapshot. +** *`retain` (Optional, boolean)*: If `true`, this snapshot will not be deleted during automatic cleanup of +snapshots older than `model_snapshot_retention_days`. However, this +snapshot will be deleted when the job is deleted. + [discrete] ==== update_trained_model_deployment Updates certain properties of trained model deployment. @@ -2840,9 +6433,14 @@ Updates certain properties of trained model deployment. {ref}/update-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- -client.ml.updateTrainedModelDeployment(...) +client.ml.updateTrainedModelDeployment() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== upgrade_job_snapshot Upgrades a given job snapshot to the current major version. @@ -2850,9 +6448,19 @@ Upgrades a given job snapshot to the current major version. {ref}/ml-upgrade-job-model-snapshot.html[Endpoint documentation] [source,ts] ---- -client.ml.upgradeJobSnapshot(...) +client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. +** *`wait_for_completion` (Optional, boolean)*: When true, the API won’t respond until the upgrade is complete. +Otherwise, it responds as soon as the upgrade task is assigned to a node. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the request to complete. + [discrete] === nodes [discrete] @@ -2862,9 +6470,17 @@ Removes the archived repositories metering information present in the cluster. {ref}/clear-repositories-metering-archive-api.html[Endpoint documentation] [source,ts] ---- -client.nodes.clearRepositoriesMeteringArchive(...) +client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. +All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). +** *`max_archive_version` (number)*: Specifies the maximum [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) to be cleared from the archive. + [discrete] ==== get_repositories_metering_info Returns cluster repositories metering information. @@ -2872,9 +6488,16 @@ Returns cluster repositories metering information. {ref}/get-repositories-metering-api.html[Endpoint documentation] [source,ts] ---- -client.nodes.getRepositoriesMeteringInfo(...) +client.nodes.getRepositoriesMeteringInfo({ node_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. +All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). + [discrete] ==== hot_threads Returns information about hot threads on each node in the cluster. @@ -2882,9 +6505,27 @@ Returns information about hot threads on each node in the cluster. {ref}/cluster-nodes-hot-threads.html[Endpoint documentation] [source,ts] ---- -client.nodes.hotThreads(...) +client.nodes.hotThreads({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. +** *`ignore_idle_threads` (Optional, boolean)*: If true, known idle threads (e.g. waiting in a socket select, or to get +a task from an empty queue) are filtered out. +** *`interval` (Optional, string | -1 | 0)*: The interval to do the second sampling of threads. +** *`snapshots` (Optional, number)*: Number of samples of thread stacktrace. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response +is received before the timeout expires, the request fails and +returns an error. +** *`threads` (Optional, number)*: Specifies the number of hot threads to provide information for. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received +before the timeout expires, the request fails and returns an error. +** *`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The type to sample. +** *`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The sort order for 'cpu' type (default: total) + [discrete] ==== info Returns information about nodes in the cluster. @@ -2892,9 +6533,19 @@ Returns information about nodes in the cluster. {ref}/cluster-nodes-info.html[Endpoint documentation] [source,ts] ---- -client.nodes.info(...) +client.nodes.info({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. +** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== reload_secure_settings Reloads secure settings. @@ -2902,9 +6553,17 @@ Reloads secure settings. {ref}/secure-settings.html[Endpoint documentation] [source,ts] ---- -client.nodes.reloadSecureSettings(...) +client.nodes.reloadSecureSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: A list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes. +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`secure_settings_password` (Optional, string)* + [discrete] ==== stats Returns statistical information about nodes in the cluster. @@ -2912,9 +6571,27 @@ Returns statistical information about nodes in the cluster. {ref}/cluster-nodes-stats.html[Endpoint documentation] [source,ts] ---- -client.nodes.stats(...) +client.nodes.stats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. +** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics +** *`index_metric` (Optional, string | string[])*: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. +** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. +** *`groups` (Optional, boolean)*: List of search groups to include in the search statistics. +** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`types` (Optional, string[])*: A list of document types for the indexing index metric. +** *`include_unloaded_segments` (Optional, boolean)*: If set to true segment stats will include stats for segments that are not currently loaded into memory + [discrete] ==== usage Returns low-level information about REST actions usage on nodes. @@ -2922,9 +6599,17 @@ Returns low-level information about REST actions usage on nodes. {ref}/cluster-nodes-usage.html[Endpoint documentation] [source,ts] ---- -client.nodes.usage(...) +client.nodes.usage({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] === rollup [discrete] @@ -2934,9 +6619,15 @@ Deletes an existing rollup job. {ref}/rollup-delete-job.html[Endpoint documentation] [source,ts] ---- -client.rollup.deleteJob(...) +client.rollup.deleteJob({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the job to delete + [discrete] ==== get_jobs Retrieves the configuration, stats, and status of rollup jobs. @@ -2944,9 +6635,15 @@ Retrieves the configuration, stats, and status of rollup jobs. {ref}/rollup-get-job.html[Endpoint documentation] [source,ts] ---- -client.rollup.getJobs(...) +client.rollup.getJobs({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs + [discrete] ==== get_rollup_caps Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. @@ -2954,9 +6651,15 @@ Returns the capabilities of any rollup jobs that have been configured for a spec {ref}/rollup-get-rollup-caps.html[Endpoint documentation] [source,ts] ---- -client.rollup.getRollupCaps(...) +client.rollup.getRollupCaps({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: The ID of the index to check rollup capabilities on, or left blank for all jobs + [discrete] ==== get_rollup_index_caps Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). @@ -2964,9 +6667,15 @@ Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the i {ref}/rollup-get-rollup-index-caps.html[Endpoint documentation] [source,ts] ---- -client.rollup.getRollupIndexCaps(...) +client.rollup.getRollupIndexCaps({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The rollup index or index pattern to obtain rollup capabilities from. + [discrete] ==== put_job Creates a rollup job. @@ -2974,8 +6683,39 @@ Creates a rollup job. {ref}/rollup-put-job.html[Endpoint documentation] [source,ts] ---- -client.rollup.putJob(...) ----- +client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the +data that is associated with the rollup job. The ID is persistent; it is stored with the rolled +up data. If you create a job, let it run for a while, then delete the job, the data that the job +rolled up is still be associated with this job ID. You cannot create a new job with the same ID +since that could lead to problems with mismatched job configurations. +** *`cron` (string)*: A cron string which defines the intervals when the rollup job should be executed. When the interval +triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated +to the time interval of the data being rolled up. For example, you may wish to create hourly rollups +of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The +cron pattern is defined just like a Watcher cron schedule. +** *`groups` ({ date_histogram, histogram, terms })*: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be +available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of +the groups configuration as defining a set of tools that can later be used in aggregations to partition the +data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide +enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. +** *`index_pattern` (string)*: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to +rollup the entire index or index-pattern. +** *`page_size` (number)*: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends +to execute faster, but requires more memory during processing. This value has no effect on how the data is +rolled up; it is merely used for tweaking the speed or memory cost of the indexer. +** *`rollup_index` (string)*: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. +** *`metrics` (Optional, { field, metrics }[])*: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each +group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined +on a per-field basis and for each field you configure which metric should be collected. +** *`timeout` (Optional, string | -1 | 0)*: Time to wait for the request to complete. +** *`headers` (Optional, Record)* [discrete] ==== rollup_search @@ -2984,9 +6724,20 @@ Enables searching rolled-up data using the standard query DSL. {ref}/rollup-search.html[Endpoint documentation] [source,ts] ---- -client.rollup.rollupSearch(...) +client.rollup.rollupSearch({ index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The indices or index-pattern(s) (containing rollup or regular data) that should be searched +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`aggregations` (Optional, Record)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data + [discrete] ==== start_job Starts an existing, stopped rollup job. @@ -2994,9 +6745,15 @@ Starts an existing, stopped rollup job. {ref}/rollup-start-job.html[Endpoint documentation] [source,ts] ---- -client.rollup.startJob(...) +client.rollup.startJob({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the job to start + [discrete] ==== stop_job Stops an existing, started rollup job. @@ -3004,9 +6761,17 @@ Stops an existing, started rollup job. {ref}/rollup-stop-job.html[Endpoint documentation] [source,ts] ---- -client.rollup.stopJob(...) +client.rollup.stopJob({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the job to stop +** *`timeout` (Optional, string | -1 | 0)*: Block for (at maximum) the specified duration while waiting for the job to stop. Defaults to 30s. +** *`wait_for_completion` (Optional, boolean)*: True if the API should block until the job has fully stopped, false if should be executed async. Defaults to false. + [discrete] === search_application [discrete] @@ -3016,9 +6781,15 @@ Deletes a search application. {ref}/put-search-application.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.delete(...) +client.searchApplication.delete({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to delete + [discrete] ==== delete_behavioral_analytics Delete a behavioral analytics collection. @@ -3026,9 +6797,15 @@ Delete a behavioral analytics collection. {ref}/delete-analytics-collection.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.deleteBehavioralAnalytics(...) +client.searchApplication.deleteBehavioralAnalytics({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the analytics collection to be deleted + [discrete] ==== get Returns the details about a search application. @@ -3036,9 +6813,15 @@ Returns the details about a search application. {ref}/get-search-application.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.get(...) +client.searchApplication.get({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application + [discrete] ==== get_behavioral_analytics Returns the existing behavioral analytics collections. @@ -3046,9 +6829,15 @@ Returns the existing behavioral analytics collections. {ref}/list-analytics-collection.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.getBehavioralAnalytics(...) +client.searchApplication.getBehavioralAnalytics({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string[])*: A list of analytics collections to limit the returned information + [discrete] ==== list Returns the existing search applications. @@ -3056,9 +6845,17 @@ Returns the existing search applications. {ref}/list-search-applications.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.list(...) +client.searchApplication.list({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`q` (Optional, string)*: Query in the Lucene query string syntax" +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: specifies a max number of results to get + [discrete] ==== post_behavioral_analytics_event Creates a behavioral analytics event for existing collection. @@ -3066,9 +6863,14 @@ Creates a behavioral analytics event for existing collection. http://todo.com/tbd[Endpoint documentation] [source,ts] ---- -client.searchApplication.postBehavioralAnalyticsEvent(...) +client.searchApplication.postBehavioralAnalyticsEvent() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== put Creates or updates a search application. @@ -3076,9 +6878,16 @@ Creates or updates a search application. {ref}/put-search-application.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.put(...) +client.searchApplication.put({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to be created or updated +** *`create` (Optional, boolean)*: If true, requires that a search application with the specified resource_id does not already exist. (default: false) + [discrete] ==== put_behavioral_analytics Creates a behavioral analytics collection. @@ -3086,9 +6895,30 @@ Creates a behavioral analytics collection. {ref}/put-analytics-collection.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.putBehavioralAnalytics(...) +client.searchApplication.putBehavioralAnalytics({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the analytics collection to be created or updated + +[discrete] +==== render_query +Renders a query for given search application search parameters + +{ref}/search-application-render-query.html[Endpoint documentation] +[source,ts] +---- +client.searchApplication.renderQuery() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== search Perform a search against a search application @@ -3096,9 +6926,16 @@ Perform a search against a search application {ref}/search-application-search.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.search(...) +client.searchApplication.search({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to be searched +** *`params` (Optional, Record)* + [discrete] === searchable_snapshots [discrete] @@ -3108,9 +6945,16 @@ Retrieve node-level cache statistics about searchable snapshots. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.cacheStats(...) +client.searchableSnapshots.cacheStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +** *`master_timeout` (Optional, string | -1 | 0)* + [discrete] ==== clear_cache Clear the cache of searchable snapshots. @@ -3118,9 +6962,20 @@ Clear the cache of searchable snapshots. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.clearCache(...) +client.searchableSnapshots.clearCache({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`pretty` (Optional, boolean)* +** *`human` (Optional, boolean)* + [discrete] ==== mount Mount a snapshot as a searchable index. @@ -3128,9 +6983,23 @@ Mount a snapshot as a searchable index. {ref}/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.mount(...) +client.searchableSnapshots.mount({ repository, snapshot, index }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount +** *`snapshot` (string)*: The name of the snapshot of the index to mount +** *`index` (string)* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning +** *`storage` (Optional, string)*: Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy` +** *`renamed_index` (Optional, string)* +** *`index_settings` (Optional, Record)* +** *`ignore_index_settings` (Optional, string[])* + [discrete] ==== stats Retrieve shard-level statistics about searchable snapshots. @@ -3138,9 +7007,16 @@ Retrieve shard-level statistics about searchable snapshots. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] ---- -client.searchableSnapshots.stats(...) +client.searchableSnapshots.stats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Return stats aggregated at cluster, index or shard level + [discrete] === security [discrete] @@ -3150,9 +7026,14 @@ Enables authentication as a user and retrieve information about the authenticate {ref}/security-api-authenticate.html[Endpoint documentation] [source,ts] ---- -client.security.authenticate(...) +client.security.authenticate() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== bulk_update_api_keys Updates the attributes of multiple existing API keys. @@ -3160,9 +7041,14 @@ Updates the attributes of multiple existing API keys. {ref}/security-api-bulk-update-api-keys.html[Endpoint documentation] [source,ts] ---- -client.security.bulkUpdateApiKeys(...) +client.security.bulkUpdateApiKeys() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== change_password Changes the passwords of users in the native realm and built-in users. @@ -3170,9 +7056,22 @@ Changes the passwords of users in the native realm and built-in users. {ref}/security-api-change-password.html[Endpoint documentation] [source,ts] ---- -client.security.changePassword(...) +client.security.changePassword({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (Optional, string)*: The user whose password you want to change. If you do not specify this +parameter, the password is changed for the current user. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +** *`password` (Optional, string)*: The new password value. Passwords must be at least 6 characters long. +** *`password_hash` (Optional, string)*: A hash of the new password value. This must be produced using the same +hashing algorithm as has been configured for password storage. For more details, +see the explanation of the `xpack.security.authc.password_hashing.algorithm` +setting. + [discrete] ==== clear_api_key_cache Clear a subset or all entries from the API key cache. @@ -3180,9 +7079,15 @@ Clear a subset or all entries from the API key cache. {ref}/security-api-clear-api-key-cache.html[Endpoint documentation] [source,ts] ---- -client.security.clearApiKeyCache(...) +client.security.clearApiKeyCache({ ids }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`ids` (string | string[])*: A list of IDs of API keys to clear from the cache + [discrete] ==== clear_cached_privileges Evicts application privileges from the native application privileges cache. @@ -3190,9 +7095,15 @@ Evicts application privileges from the native application privileges cache. {ref}/security-api-clear-privilege-cache.html[Endpoint documentation] [source,ts] ---- -client.security.clearCachedPrivileges(...) +client.security.clearCachedPrivileges({ application }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (string)*: A list of application names + [discrete] ==== clear_cached_realms Evicts users from the user cache. Can completely clear the cache or evict specific users. @@ -3200,9 +7111,16 @@ Evicts users from the user cache. Can completely clear the cache or evict specif {ref}/security-api-clear-cache.html[Endpoint documentation] [source,ts] ---- -client.security.clearCachedRealms(...) +client.security.clearCachedRealms({ realms }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`realms` (string | string[])*: List of realms to clear +** *`usernames` (Optional, string[])*: List of usernames to clear from the cache + [discrete] ==== clear_cached_roles Evicts roles from the native role cache. @@ -3210,9 +7128,15 @@ Evicts roles from the native role cache. {ref}/security-api-clear-role-cache.html[Endpoint documentation] [source,ts] ---- -client.security.clearCachedRoles(...) +client.security.clearCachedRoles({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: Role name + [discrete] ==== clear_cached_service_tokens Evicts tokens from the service account token caches. @@ -3220,9 +7144,17 @@ Evicts tokens from the service account token caches. {ref}/security-api-clear-service-token-caches.html[Endpoint documentation] [source,ts] ---- -client.security.clearCachedServiceTokens(...) +client.security.clearCachedServiceTokens({ namespace, service, name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: An identifier for the namespace +** *`service` (string)*: An identifier for the service name +** *`name` (string | string[])*: A list of service token names + [discrete] ==== create_api_key Creates an API key for access without requiring basic authentication. @@ -3230,9 +7162,34 @@ Creates an API key for access without requiring basic authentication. {ref}/security-api-create-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.createApiKey(...) +client.security.createApiKey({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. +** *`name` (Optional, string)*: Specifies the name for this API key. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. + +[discrete] +==== create_cross_cluster_api_key +Creates a cross-cluster API key for API key based remote cluster access. + +{ref}/security-api-create-cross-cluster-api-key.html[Endpoint documentation] +[source,ts] +---- +client.security.createCrossClusterApiKey() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== create_service_token Creates a service account token for access without requiring basic authentication. @@ -3240,9 +7197,18 @@ Creates a service account token for access without requiring basic authenticatio {ref}/security-api-create-service-token.html[Endpoint documentation] [source,ts] ---- -client.security.createServiceToken(...) +client.security.createServiceToken({ namespace, service }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: An identifier for the namespace +** *`service` (string)*: An identifier for the service name +** *`name` (Optional, string)*: An identifier for the token name +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== delete_privileges Removes application privileges. @@ -3250,9 +7216,17 @@ Removes application privileges. {ref}/security-api-delete-privilege.html[Endpoint documentation] [source,ts] ---- -client.security.deletePrivileges(...) +client.security.deletePrivileges({ application, name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (string)*: Application name +** *`name` (string | string[])*: Privilege name +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== delete_role Removes roles in the native realm. @@ -3260,9 +7234,16 @@ Removes roles in the native realm. {ref}/security-api-delete-role.html[Endpoint documentation] [source,ts] ---- -client.security.deleteRole(...) +client.security.deleteRole({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Role name +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== delete_role_mapping Removes role mappings. @@ -3270,9 +7251,16 @@ Removes role mappings. {ref}/security-api-delete-role-mapping.html[Endpoint documentation] [source,ts] ---- -client.security.deleteRoleMapping(...) +client.security.deleteRoleMapping({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Role-mapping name +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== delete_service_token Deletes a service account token. @@ -3280,9 +7268,18 @@ Deletes a service account token. {ref}/security-api-delete-service-token.html[Endpoint documentation] [source,ts] ---- -client.security.deleteServiceToken(...) +client.security.deleteServiceToken({ namespace, service, name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: An identifier for the namespace +** *`service` (string)*: An identifier for the service name +** *`name` (string)*: An identifier for the token name +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== delete_user Deletes users from the native realm. @@ -3290,9 +7287,16 @@ Deletes users from the native realm. {ref}/security-api-delete-user.html[Endpoint documentation] [source,ts] ---- -client.security.deleteUser(...) +client.security.deleteUser({ username }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: username +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== disable_user Disables users in the native realm. @@ -3300,9 +7304,16 @@ Disables users in the native realm. {ref}/security-api-disable-user.html[Endpoint documentation] [source,ts] ---- -client.security.disableUser(...) +client.security.disableUser({ username }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: The username of the user to disable +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== enable_user Enables users in the native realm. @@ -3310,9 +7321,16 @@ Enables users in the native realm. {ref}/security-api-enable-user.html[Endpoint documentation] [source,ts] ---- -client.security.enableUser(...) +client.security.enableUser({ username }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: The username of the user to enable +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== enroll_kibana Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. @@ -3320,9 +7338,14 @@ Allows a kibana instance to configure itself to communicate with a secured elast {ref}/security-api-kibana-enrollment.html[Endpoint documentation] [source,ts] ---- -client.security.enrollKibana(...) +client.security.enrollKibana() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== enroll_node Allows a new node to enroll to an existing cluster with security enabled. @@ -3330,9 +7353,14 @@ Allows a new node to enroll to an existing cluster with security enabled. {ref}/security-api-node-enrollment.html[Endpoint documentation] [source,ts] ---- -client.security.enrollNode(...) +client.security.enrollNode() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get_api_key Retrieves information for one or more API keys. @@ -3340,9 +7368,23 @@ Retrieves information for one or more API keys. {ref}/security-api-get-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.getApiKey(...) +client.security.getApiKey({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: API key id of the API key to be retrieved +** *`name` (Optional, string)*: API key name of the API key to be retrieved +** *`owner` (Optional, boolean)*: flag to query API keys owned by the currently authenticated user +** *`realm_name` (Optional, string)*: realm name of the user who created this API key to be retrieved +** *`username` (Optional, string)*: user name of the user who created this API key to be retrieved +** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. + [discrete] ==== get_builtin_privileges Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. @@ -3350,9 +7392,14 @@ Retrieves the list of cluster privileges and index privileges that are available {ref}/security-api-get-builtin-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.getBuiltinPrivileges(...) +client.security.getBuiltinPrivileges() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get_privileges Retrieves application privileges. @@ -3360,9 +7407,16 @@ Retrieves application privileges. {ref}/security-api-get-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.getPrivileges(...) +client.security.getPrivileges({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (Optional, string)*: Application name +** *`name` (Optional, string | string[])*: Privilege name + [discrete] ==== get_role Retrieves roles in the native realm. @@ -3370,9 +7424,15 @@ Retrieves roles in the native realm. {ref}/security-api-get-role.html[Endpoint documentation] [source,ts] ---- -client.security.getRole(...) +client.security.getRole({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: The name of the role. You can specify multiple roles as a list. If you do not specify this parameter, the API returns information about all roles. + [discrete] ==== get_role_mapping Retrieves role mappings. @@ -3380,9 +7440,15 @@ Retrieves role mappings. {ref}/security-api-get-role-mapping.html[Endpoint documentation] [source,ts] ---- -client.security.getRoleMapping(...) +client.security.getRoleMapping({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. + [discrete] ==== get_service_accounts Retrieves information about service accounts. @@ -3390,9 +7456,16 @@ Retrieves information about service accounts. {ref}/security-api-get-service-accounts.html[Endpoint documentation] [source,ts] ---- -client.security.getServiceAccounts(...) +client.security.getServiceAccounts({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (Optional, string)*: Name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. +** *`service` (Optional, string)*: Name of the service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. + [discrete] ==== get_service_credentials Retrieves information of all service credentials for a service account. @@ -3400,9 +7473,16 @@ Retrieves information of all service credentials for a service account. {ref}/security-api-get-service-credentials.html[Endpoint documentation] [source,ts] ---- -client.security.getServiceCredentials(...) +client.security.getServiceCredentials({ namespace, service }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: Name of the namespace. +** *`service` (string)*: Name of the service name. + [discrete] ==== get_token Creates a bearer token for access without requiring basic authentication. @@ -3410,9 +7490,20 @@ Creates a bearer token for access without requiring basic authentication. {ref}/security-api-get-token.html[Endpoint documentation] [source,ts] ---- -client.security.getToken(...) +client.security.getToken({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))* +** *`scope` (Optional, string)* +** *`password` (Optional, string)* +** *`kerberos_ticket` (Optional, string)* +** *`refresh_token` (Optional, string)* +** *`username` (Optional, string)* + [discrete] ==== get_user Retrieves information about users in the native realm and built-in users. @@ -3420,9 +7511,16 @@ Retrieves information about users in the native realm and built-in users. {ref}/security-api-get-user.html[Endpoint documentation] [source,ts] ---- -client.security.getUser(...) +client.security.getUser({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (Optional, string | string[])*: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. +** *`with_profile_uid` (Optional, boolean)*: If true will return the User Profile ID for a user, if any. + [discrete] ==== get_user_privileges Retrieves security privileges for the logged in user. @@ -3430,9 +7528,17 @@ Retrieves security privileges for the logged in user. {ref}/security-api-get-user-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.getUserPrivileges(...) +client.security.getUserPrivileges({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (Optional, string)*: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. +** *`priviledge` (Optional, string)*: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. +** *`username` (Optional, string | null)* + [discrete] ==== grant_api_key Creates an API key on behalf of another user. @@ -3440,9 +7546,20 @@ Creates an API key on behalf of another user. {ref}/security-api-grant-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.grantApiKey(...) +client.security.grantApiKey({ api_key, grant_type }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`api_key` ({ name, expiration, role_descriptors, metadata })* +** *`grant_type` (Enum("access_token" | "password"))* +** *`access_token` (Optional, string)* +** *`username` (Optional, string)* +** *`password` (Optional, string)* +** *`run_as` (Optional, string)* + [discrete] ==== has_privileges Determines whether the specified user has a specified list of privileges. @@ -3450,9 +7567,18 @@ Determines whether the specified user has a specified list of privileges. {ref}/security-api-has-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.hasPrivileges(...) +client.security.hasPrivileges({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`user` (Optional, string)*: Username +** *`application` (Optional, { application, privileges, resources }[])* +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "grant_api_key" | "manage" | "manage_api_key" | "manage_ccr" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "read_ccr" | "read_ilm" | "read_pipeline" | "read_slm" | "transport_client")[])*: A list of the cluster privileges that you want to check. +** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* + [discrete] ==== invalidate_api_key Invalidates one or more API keys. @@ -3460,9 +7586,20 @@ Invalidates one or more API keys. {ref}/security-api-invalidate-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.invalidateApiKey(...) +client.security.invalidateApiKey({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)* +** *`ids` (Optional, string[])* +** *`name` (Optional, string)* +** *`owner` (Optional, boolean)* +** *`realm_name` (Optional, string)* +** *`username` (Optional, string)* + [discrete] ==== invalidate_token Invalidates one or more access tokens or refresh tokens. @@ -3470,9 +7607,18 @@ Invalidates one or more access tokens or refresh tokens. {ref}/security-api-invalidate-token.html[Endpoint documentation] [source,ts] ---- -client.security.invalidateToken(...) +client.security.invalidateToken({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`token` (Optional, string)* +** *`refresh_token` (Optional, string)* +** *`realm_name` (Optional, string)* +** *`username` (Optional, string)* + [discrete] ==== oidc_authenticate Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair @@ -3480,19 +7626,29 @@ Exchanges an OpenID Connection authentication response message for an Elasticsea {ref}/security-api-oidc-authenticate.html[Endpoint documentation] [source,ts] ---- -client.security.oidcAuthenticate(...) +client.security.oidcAuthenticate() ---- [discrete] -==== oidc_logout -Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API +==== Arguments + +* *Request (object):* + +[discrete] +==== oidc_logout +Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API {ref}/security-api-oidc-logout.html[Endpoint documentation] [source,ts] ---- -client.security.oidcLogout(...) +client.security.oidcLogout() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== oidc_prepare_authentication Creates an OAuth 2.0 authentication request as a URL string @@ -3500,9 +7656,14 @@ Creates an OAuth 2.0 authentication request as a URL string {ref}/security-api-oidc-prepare-authentication.html[Endpoint documentation] [source,ts] ---- -client.security.oidcPrepareAuthentication(...) +client.security.oidcPrepareAuthentication() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== put_privileges Adds or updates application privileges. @@ -3510,9 +7671,15 @@ Adds or updates application privileges. {ref}/security-api-put-privileges.html[Endpoint documentation] [source,ts] ---- -client.security.putPrivileges(...) +client.security.putPrivileges({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== put_role Adds and updates roles in the native realm. @@ -3520,9 +7687,23 @@ Adds and updates roles in the native realm. {ref}/security-api-put-role.html[Endpoint documentation] [source,ts] ---- -client.security.putRole(...) +client.security.putRole({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the role. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "grant_api_key" | "manage" | "manage_api_key" | "manage_ccr" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "read_ccr" | "read_ilm" | "read_pipeline" | "read_slm" | "transport_client")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. +** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. +** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. +** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. +** *`transient_metadata` (Optional, { enabled })*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. + [discrete] ==== put_role_mapping Creates and updates role mappings. @@ -3530,9 +7711,21 @@ Creates and updates role mappings. {ref}/security-api-put-role-mapping.html[Endpoint documentation] [source,ts] ---- -client.security.putRoleMapping(...) +client.security.putRoleMapping({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Role-mapping name +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +** *`enabled` (Optional, boolean)* +** *`metadata` (Optional, Record)* +** *`roles` (Optional, string[])* +** *`rules` (Optional, { any, all, field, except })* +** *`run_as` (Optional, string[])* + [discrete] ==== put_user Adds and updates users in the native realm. These users are commonly referred to as native users. @@ -3540,9 +7733,23 @@ Adds and updates users in the native realm. These users are commonly referred to {ref}/security-api-put-user.html[Endpoint documentation] [source,ts] ---- -client.security.putUser(...) +client.security.putUser({ username }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: The username of the User +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +** *`email` (Optional, string | null)* +** *`full_name` (Optional, string | null)* +** *`metadata` (Optional, Record)* +** *`password` (Optional, string)* +** *`password_hash` (Optional, string)* +** *`roles` (Optional, string[])* +** *`enabled` (Optional, boolean)* + [discrete] ==== query_api_keys Retrieves information for API keys using a subset of query DSL @@ -3550,9 +7757,29 @@ Retrieves information for API keys using a subset of query DSL {ref}/security-api-query-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.queryApiKeys(...) +client.security.queryApiKeys({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query to filter which API keys to return. +The query supports a subset of query types, including match_all, bool, term, terms, ids, prefix, wildcard, and range. +You can query all public information associated with an API key +** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* +** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* + [discrete] ==== saml_authenticate Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair @@ -3560,9 +7787,17 @@ Exchanges a SAML Response message for an Elasticsearch access token and refresh {ref}/security-api-saml-authenticate.html[Endpoint documentation] [source,ts] ---- -client.security.samlAuthenticate(...) +client.security.samlAuthenticate({ content, ids }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`content` (string)*: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. +** *`ids` (string | string[])*: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. +** *`realm` (Optional, string)*: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. + [discrete] ==== saml_complete_logout Verifies the logout response sent from the SAML IdP @@ -3570,9 +7805,18 @@ Verifies the logout response sent from the SAML IdP {ref}/security-api-saml-complete-logout.html[Endpoint documentation] [source,ts] ---- -client.security.samlCompleteLogout(...) +client.security.samlCompleteLogout({ realm, ids }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`realm` (string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. +** *`ids` (string | string[])*: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. +** *`query_string` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. +** *`content` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. + [discrete] ==== saml_invalidate Consumes a SAML LogoutRequest @@ -3580,9 +7824,21 @@ Consumes a SAML LogoutRequest {ref}/security-api-saml-invalidate.html[Endpoint documentation] [source,ts] ---- -client.security.samlInvalidate(...) +client.security.samlInvalidate({ query_string }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`query_string` (string)*: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. +This query should include a single parameter named SAMLRequest that contains a SAML logout request that is deflated and Base64 encoded. +If the SAML IdP has signed the logout request, the URL should include two extra parameters named SigAlg and Signature that contain the algorithm used for the signature and the signature value itself. +In order for Elasticsearch to be able to verify the IdP’s signature, the value of the query_string field must be an exact match to the string provided by the browser. +The client application must not attempt to parse or process the string in any way. +** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the realm parameter. +** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the acs parameter. + [discrete] ==== saml_logout Invalidates an access token and a refresh token that were generated via the SAML Authenticate API @@ -3590,9 +7846,18 @@ Invalidates an access token and a refresh token that were generated via the SAML {ref}/security-api-saml-logout.html[Endpoint documentation] [source,ts] ---- -client.security.samlLogout(...) +client.security.samlLogout({ token }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`token` (string)*: The access token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent token that was received after refreshing the original one by using a refresh_token. +** *`refresh_token` (Optional, string)*: The refresh token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent refresh token that was received after refreshing the original access token. + [discrete] ==== saml_prepare_authentication Creates a SAML authentication request @@ -3600,9 +7865,20 @@ Creates a SAML authentication request {ref}/security-api-saml-prepare-authentication.html[Endpoint documentation] [source,ts] ---- -client.security.samlPrepareAuthentication(...) +client.security.samlPrepareAuthentication({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. +The realm is used to generate the authentication request. You must specify either this parameter or the realm parameter. +** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. +You must specify either this parameter or the acs parameter. +** *`relay_state` (Optional, string)*: A string that will be included in the redirect URL that this API returns as the RelayState query parameter. +If the Authentication Request is signed, this value is used as part of the signature computation. + [discrete] ==== saml_service_provider_metadata Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider @@ -3610,9 +7886,15 @@ Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider {ref}/security-api-saml-sp-metadata.html[Endpoint documentation] [source,ts] ---- -client.security.samlServiceProviderMetadata(...) +client.security.samlServiceProviderMetadata({ realm_name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`realm_name` (string)*: The name of the SAML realm in Elasticsearch. + [discrete] ==== update_api_key Updates attributes of an existing API key. @@ -3620,9 +7902,32 @@ Updates attributes of an existing API key. {ref}/security-api-update-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.updateApiKey(...) +client.security.updateApiKey({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the API key to update. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. + +[discrete] +==== update_cross_cluster_api_key +Updates attributes of an existing cross-cluster API key. + +{ref}/security-api-update-cross-cluster-api-key.html[Endpoint documentation] +[source,ts] +---- +client.security.updateCrossClusterApiKey() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === slm [discrete] @@ -3632,9 +7937,15 @@ Deletes an existing snapshot lifecycle policy. {ref}/slm-api-delete-policy.html[Endpoint documentation] [source,ts] ---- -client.slm.deleteLifecycle(...) +client.slm.deleteLifecycle({ policy_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (string)*: The id of the snapshot lifecycle policy to remove + [discrete] ==== execute_lifecycle Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. @@ -3642,9 +7953,15 @@ Immediately creates a snapshot according to the lifecycle policy, without waitin {ref}/slm-api-execute-lifecycle.html[Endpoint documentation] [source,ts] ---- -client.slm.executeLifecycle(...) +client.slm.executeLifecycle({ policy_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (string)*: The id of the snapshot lifecycle policy to be executed + [discrete] ==== execute_retention Deletes any snapshots that are expired according to the policy's retention rules. @@ -3652,9 +7969,14 @@ Deletes any snapshots that are expired according to the policy's retention rules {ref}/slm-api-execute-retention.html[Endpoint documentation] [source,ts] ---- -client.slm.executeRetention(...) +client.slm.executeRetention() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get_lifecycle Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. @@ -3662,9 +7984,15 @@ Retrieves one or more snapshot lifecycle policy definitions and information abou {ref}/slm-api-get-policy.html[Endpoint documentation] [source,ts] ---- -client.slm.getLifecycle(...) +client.slm.getLifecycle({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (Optional, string | string[])*: List of snapshot lifecycle policies to retrieve + [discrete] ==== get_stats Returns global and policy-level statistics about actions taken by snapshot lifecycle management. @@ -3672,9 +8000,14 @@ Returns global and policy-level statistics about actions taken by snapshot lifec {ref}/slm-api-get-stats.html[Endpoint documentation] [source,ts] ---- -client.slm.getStats(...) +client.slm.getStats() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get_status Retrieves the status of snapshot lifecycle management (SLM). @@ -3682,9 +8015,14 @@ Retrieves the status of snapshot lifecycle management (SLM). {ref}/slm-api-get-status.html[Endpoint documentation] [source,ts] ---- -client.slm.getStatus(...) +client.slm.getStatus() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== put_lifecycle Creates or updates a snapshot lifecycle policy. @@ -3692,9 +8030,22 @@ Creates or updates a snapshot lifecycle policy. {ref}/slm-api-put-policy.html[Endpoint documentation] [source,ts] ---- -client.slm.putLifecycle(...) +client.slm.putLifecycle({ policy_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (string)*: ID for the snapshot lifecycle policy you want to create or update. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })*: Configuration for each snapshot created by the policy. +** *`name` (Optional, string)*: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. +** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. +** *`retention` (Optional, { expire_after, max_count, min_count })*: Retention rules used to retain and delete snapshots created by the policy. +** *`schedule` (Optional, string)*: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. + [discrete] ==== start Turns on snapshot lifecycle management (SLM). @@ -3702,9 +8053,14 @@ Turns on snapshot lifecycle management (SLM). {ref}/slm-api-start.html[Endpoint documentation] [source,ts] ---- -client.slm.start(...) +client.slm.start() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== stop Turns off snapshot lifecycle management (SLM). @@ -3712,9 +8068,14 @@ Turns off snapshot lifecycle management (SLM). {ref}/slm-api-stop.html[Endpoint documentation] [source,ts] ---- -client.slm.stop(...) +client.slm.stop() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === snapshot [discrete] @@ -3724,9 +8085,17 @@ Removes stale data from repository. {ref}/clean-up-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- -client.snapshot.cleanupRepository(...) +client.snapshot.cleanupRepository({ repository }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: Snapshot repository to clean up. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. + [discrete] ==== clone Clones indices from one snapshot into another snapshot in the same repository. @@ -3734,9 +8103,20 @@ Clones indices from one snapshot into another snapshot in the same repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.clone(...) +client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: A repository name +** *`snapshot` (string)*: The name of the snapshot to clone from +** *`target_snapshot` (string)*: The name of the cloned snapshot to create +** *`indices` (string)* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)* + [discrete] ==== create Creates a snapshot in a repository. @@ -3744,9 +8124,24 @@ Creates a snapshot in a repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.create(...) +client.snapshot.create({ repository, snapshot }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: Repository for the snapshot. +** *`snapshot` (string)*: Name of the snapshot. Must be unique in the repository. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. +** *`include_global_state` (Optional, boolean)*: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). +** *`indices` (Optional, string | string[])*: Data streams and indices to include in the snapshot. Supports multi-target syntax. Includes all data streams and indices by default. +** *`feature_states` (Optional, string[])*: Feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. +** *`metadata` (Optional, Record)*: Optional metadata for the snapshot. May have any contents. Must be less than 1024 bytes. This map is not automatically generated by Elasticsearch. +** *`partial` (Optional, boolean)*: If `true`, allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + [discrete] ==== create_repository Creates a repository. @@ -3754,9 +8149,20 @@ Creates a repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.createRepository(...) +client.snapshot.createRepository({ repository, type, settings }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: A repository name +** *`type` (string)* +** *`settings` ({ chunk_size, compress, concurrent_streams, location, read_only })* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`verify` (Optional, boolean)*: Whether to verify the repository after creation + [discrete] ==== delete Deletes one or more snapshots. @@ -3764,9 +8170,17 @@ Deletes one or more snapshots. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.delete(...) +client.snapshot.delete({ repository, snapshot }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: A repository name +** *`snapshot` (string)*: A list of snapshot names +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node + [discrete] ==== delete_repository Deletes a repository. @@ -3774,9 +8188,17 @@ Deletes a repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.deleteRepository(...) +client.snapshot.deleteRepository({ repository }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string | string[])*: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] ==== get Returns information about a snapshot. @@ -3784,9 +8206,31 @@ Returns information about a snapshot. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.get(...) +client.snapshot.get({ repository, snapshot }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: List of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. +** *`snapshot` (string | string[])*: List of snapshot names to retrieve. Also accepts wildcards (*). +- To get information about all snapshots in a registered repository, use a wildcard (*) or _all. +- To get information about any snapshots that are currently running, use _current. +** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error for any snapshots that are unavailable. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`verbose` (Optional, boolean)*: If true, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. +** *`index_details` (Optional, boolean)*: If true, returns additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. Defaults to false, meaning that this information is omitted. +** *`index_names` (Optional, boolean)*: If true, returns the name of each index in each snapshot. +** *`include_repository` (Optional, boolean)*: If true, returns the repository name in each snapshot. +** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))*: Allows setting a sort order for the result. Defaults to start_time, i.e. sorting by snapshot start time stamp. +** *`size` (Optional, number)*: Maximum number of snapshots to return. Defaults to 0 which means return all that match the request without limit. +** *`order` (Optional, Enum("asc" | "desc"))*: Sort order. Valid values are asc for ascending and desc for descending order. Defaults to asc, meaning ascending order. +** *`after` (Optional, string)*: Offset identifier to start pagination from as returned by the next field in the response body. +** *`offset` (Optional, number)*: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. +** *`from_sort_value` (Optional, string)*: Value of the current sort column at which to start retrieval. Can either be a string snapshot- or repository name when sorting by snapshot or repository name, a millisecond time value or a number when sorting by index- or shard count. +** *`slm_policy_filter` (Optional, string)*: Filter snapshots by a list of SLM policy names that snapshots belong to. Also accepts wildcards (*) and combinations of wildcards followed by exclude patterns starting with -. To include snapshots not created by an SLM policy you can use the special pattern _none that will match all snapshots without an SLM policy. + [discrete] ==== get_repository Returns information about a repository. @@ -3794,9 +8238,17 @@ Returns information about a repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.getRepository(...) +client.snapshot.getRepository({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (Optional, string | string[])*: A list of repository names +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node + [discrete] ==== repository_analyze Analyzes a repository for correctness and performance @@ -3804,9 +8256,14 @@ Analyzes a repository for correctness and performance {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.repositoryAnalyze(...) +client.snapshot.repositoryAnalyze() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== restore Restores a snapshot. @@ -3814,9 +8271,28 @@ Restores a snapshot. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.restore(...) +client.snapshot.restore({ repository, snapshot }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: A repository name +** *`snapshot` (string)*: A snapshot name +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning +** *`feature_states` (Optional, string[])* +** *`ignore_index_settings` (Optional, string[])* +** *`ignore_unavailable` (Optional, boolean)* +** *`include_aliases` (Optional, boolean)* +** *`include_global_state` (Optional, boolean)* +** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* +** *`indices` (Optional, string | string[])* +** *`partial` (Optional, boolean)* +** *`rename_pattern` (Optional, string)* +** *`rename_replacement` (Optional, string)* + [discrete] ==== status Returns information about the status of a snapshot. @@ -3824,9 +8300,18 @@ Returns information about the status of a snapshot. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.status(...) +client.snapshot.status({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (Optional, string)*: A repository name +** *`snapshot` (Optional, string | string[])*: A list of snapshot names +** *`ignore_unavailable` (Optional, boolean)*: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node + [discrete] ==== verify_repository Verifies a repository. @@ -3834,9 +8319,17 @@ Verifies a repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.verifyRepository(...) +client.snapshot.verifyRepository({ repository }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: A repository name +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + [discrete] === sql [discrete] @@ -3846,9 +8339,15 @@ Clears the SQL cursor {ref}/clear-sql-cursor-api.html[Endpoint documentation] [source,ts] ---- -client.sql.clearCursor(...) +client.sql.clearCursor({ cursor }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`cursor` (string)* + [discrete] ==== delete_async Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. @@ -3856,9 +8355,15 @@ Deletes an async SQL search or a stored synchronous SQL search. If the search is {ref}/delete-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- -client.sql.deleteAsync(...) +client.sql.deleteAsync({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The async search ID + [discrete] ==== get_async Returns the current status and available results for an async SQL search or stored synchronous SQL search @@ -3866,9 +8371,22 @@ Returns the current status and available results for an async SQL search or stor {ref}/get-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- -client.sql.getAsync(...) +client.sql.getAsync({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The async search ID +** *`delimiter` (Optional, string)*: Separator for CSV results. The API only supports this parameter for CSV responses. +** *`format` (Optional, string)*: Format for the response. You must specify a format using this parameter or the +Accept HTTP header. If you specify both, the API uses this parameter. +** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for the search and its results. Defaults +to the `keep_alive` period for the original SQL search. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, +meaning the request waits for complete search results. + [discrete] ==== get_async_status Returns the current status of an async SQL search or a stored synchronous SQL search @@ -3876,9 +8394,15 @@ Returns the current status of an async SQL search or a stored synchronous SQL se {ref}/get-async-sql-search-status-api.html[Endpoint documentation] [source,ts] ---- -client.sql.getAsyncStatus(...) +client.sql.getAsyncStatus({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The async search ID + [discrete] ==== query Executes a SQL request @@ -3886,9 +8410,32 @@ Executes a SQL request {ref}/sql-search-api.html[Endpoint documentation] [source,ts] ---- -client.sql.query(...) +client.sql.query({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`format` (Optional, string)*: a short version of the Accept header, e.g. json, yaml +** *`catalog` (Optional, string)*: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. +** *`columnar` (Optional, boolean)*: If true, the results in a columnar fashion: one row represents all the values of a certain column from the current page of results. +** *`cursor` (Optional, string)* +** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional Elasticsearch query DSL for additional filtering. +** *`query` (Optional, string)*: SQL query to execute +** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. +** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. +** *`time_zone` (Optional, string)*: Time-zone in ISO 8601 used for executing the query on the server. More information available here. +** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. +** *`params` (Optional, Record)*: Values for parameters in the query. +** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search. +** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. +** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false. + [discrete] ==== translate Translates SQL into Elasticsearch queries @@ -3896,9 +8443,18 @@ Translates SQL into Elasticsearch queries {ref}/sql-translate-api.html[Endpoint documentation] [source,ts] ---- -client.sql.translate(...) +client.sql.translate({ query }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (string)* +** *`fetch_size` (Optional, number)* +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`time_zone` (Optional, string)* + [discrete] === ssl [discrete] @@ -3908,9 +8464,61 @@ Retrieves information about the X.509 certificates used to encrypt communication {ref}/security-api-ssl.html[Endpoint documentation] [source,ts] ---- -client.ssl.certificates(...) +client.ssl.certificates() +---- + +[discrete] +==== Arguments + +* *Request (object):* + +[discrete] +=== synonyms +[discrete] +==== delete +Deletes a synonym set + +{ref}/delete-synonyms.html[Endpoint documentation] +[source,ts] +---- +client.synonyms.delete() +---- + +[discrete] +==== Arguments + +* *Request (object):* + +[discrete] +==== get +Retrieves a synonym set + +{ref}/get-synonyms.html[Endpoint documentation] +[source,ts] +---- +client.synonyms.get() ---- +[discrete] +==== Arguments + +* *Request (object):* + +[discrete] +==== put +Creates or updates a synonyms set + +{ref}/put-synonyms.html[Endpoint documentation] +[source,ts] +---- +client.synonyms.put() +---- + +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === tasks [discrete] @@ -3920,9 +8528,19 @@ Cancels a task, if it can be cancelled through an API. {ref}/tasks.html[Endpoint documentation] [source,ts] ---- -client.tasks.cancel(...) +client.tasks.cancel({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (Optional, string | number)*: Cancel the task with specified task id (node_id:task_number) +** *`actions` (Optional, string | string[])*: A list of actions that should be cancelled. Leave empty to cancel all. +** *`nodes` (Optional, string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +** *`parent_task_id` (Optional, string)*: Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. +** *`wait_for_completion` (Optional, boolean)*: Should the request block until the cancellation of the task and its descendant tasks is completed. Defaults to false + [discrete] ==== get Returns information about a task. @@ -3930,9 +8548,17 @@ Returns information about a task. {ref}/tasks.html[Endpoint documentation] [source,ts] ---- -client.tasks.get(...) +client.tasks.get({ task_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string)*: Return the task with specified id (node_id:task_number) +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_completion` (Optional, boolean)*: Wait for the matching tasks to complete (default: false) + [discrete] ==== list Returns a list of tasks. @@ -3940,9 +8566,22 @@ Returns a list of tasks. {ref}/tasks.html[Endpoint documentation] [source,ts] ---- -client.tasks.list(...) +client.tasks.list({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: Key used to group tasks in the response. +** *`node_id` (Optional, string[])*: List of node IDs or names used to limit returned information. +** *`parent_task_id` (Optional, string)*: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. + [discrete] === text_structure [discrete] @@ -3952,9 +8591,28 @@ Finds the structure of a text file. The text file must contain data that is suit {ref}/find-structure.html[Endpoint documentation] [source,ts] ---- -client.textStructure.findStructure(...) +client.textStructure.findStructure({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. +** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", etc. +** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +** *`format` (Optional, string)*: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If you have set format to semi_structured_text, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the timestamp_field parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If grok_pattern is not specified, the structure finder creates a Grok pattern. +** *`has_header_row` (Optional, boolean)*: If you have set format to delimited, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2; If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. +** *`quote` (Optional, string)*: If you have set format to delimited, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote ("). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If you have set format to delimited, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. +** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis make take. If the analysis is still running when the timeout expires then it will be aborted. +** *`timestamp_field` (Optional, string)*: Optional parameter to specify the timestamp field in the file +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. + [discrete] === transform [discrete] @@ -3964,9 +8622,18 @@ Deletes an existing transform. {ref}/delete-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.deleteTransform(...) +client.transform.deleteTransform({ transform_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`force` (Optional, boolean)*: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is +deleted regardless of its current state. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + [discrete] ==== get_transform Retrieves configuration information for transforms. @@ -3974,9 +8641,31 @@ Retrieves configuration information for transforms. {ref}/get-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.getTransform(...) +client.transform.getTransform({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (Optional, string | string[])*: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. +** *`exclude_generated` (Optional, boolean)*: Excludes fields that were automatically added when creating the +transform. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + [discrete] ==== get_transform_stats Retrieves usage information for transforms. @@ -3984,9 +8673,29 @@ Retrieves usage information for transforms. {ref}/get-transform-stats.html[Endpoint documentation] [source,ts] ---- -client.transform.getTransformStats(...) +client.transform.getTransformStats({ transform_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string | string[])*: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the stats + [discrete] ==== preview_transform Previews a transform. @@ -3994,9 +8703,34 @@ Previews a transform. {ref}/preview-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.previewTransform(...) +client.transform.previewTransform({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (Optional, string)*: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform +configuration details in the request body. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. +** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. +** *`description` (Optional, string)*: Free text description of the transform. +** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. +These objects define the group by fields and the aggregation to reduce +the data. +** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. +** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. +** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. +** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for +each unique key. + [discrete] ==== put_transform Instantiates a transform. @@ -4004,9 +8738,36 @@ Instantiates a transform. {ref}/put-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.putTransform(...) +client.transform.putTransform({ transform_id, dest, source }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination for the transform. +** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. +** *`defer_validation` (Optional, boolean)*: When the transform is created, a series of validations occur to ensure its success. For example, there is a +check for the existence of the source indices and a check that the destination index is not part of the source +index pattern. You can use this parameter to skip the checks, for example when the source index does not exist +until after the transform is created. The validations are always run when you start the transform, however, with +the exception of privilege checks. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`description` (Optional, string)*: Free text description of the transform. +** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the transform is running continuously. Also +determines the retry interval in the event of transient failures while the transform is searching or indexing. +The minimum value is `1s` and the maximum is `1h`. +** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for each unique key. +** *`_meta` (Optional, Record)*: Defines optional transform metadata. +** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields +and the aggregation to reduce the data. +** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the +destination index. +** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. +** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. + [discrete] ==== reset_transform Resets an existing transform. @@ -4014,9 +8775,18 @@ Resets an existing transform. {ref}/reset-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.resetTransform(...) +client.transform.resetTransform({ transform_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +** *`force` (Optional, boolean)*: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform +must be stopped before it can be reset. + [discrete] ==== schedule_now_transform Schedules now a transform. @@ -4024,9 +8794,16 @@ Schedules now a transform. {ref}/schedule-now-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.scheduleNowTransform(...) +client.transform.scheduleNowTransform({ transform_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the scheduling to take place + [discrete] ==== start_transform Starts one or more transforms. @@ -4034,9 +8811,17 @@ Starts one or more transforms. {ref}/start-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.startTransform(...) +client.transform.startTransform({ transform_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`from` (Optional, string)*: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. + [discrete] ==== stop_transform Stops one or more transforms. @@ -4044,9 +8829,32 @@ Stops one or more transforms. {ref}/stop-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.stopTransform(...) +client.transform.stopTransform({ transform_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. +To stop all transforms, use `_all` or `*` as the identifier. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; +contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there +are only partial matches. + +If it is true, the API returns a successful acknowledgement message when there are no matches. When there are +only partial matches, the API stops the appropriate transforms. + +If it is false, the request returns a 404 status code when there are no matches or only partial matches. +** *`force` (Optional, boolean)*: If it is true, the API forcefully stops the transforms. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the +timeout expires, the request returns a timeout exception. However, the request continues processing and +eventually moves the transform to a STOPPED state. +** *`wait_for_checkpoint` (Optional, boolean)*: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, +the transform stops as soon as possible. +** *`wait_for_completion` (Optional, boolean)*: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns +immediately and the indexer is stopped asynchronously in the background. + [discrete] ==== update_transform Updates certain properties of a transform. @@ -4054,9 +8862,32 @@ Updates certain properties of a transform. {ref}/update-transform.html[Endpoint documentation] [source,ts] ---- -client.transform.updateTransform(...) +client.transform.updateTransform({ transform_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`defer_validation` (Optional, boolean)*: When true, deferrable validations are not run. This behavior may be +desired if the source index does not exist until after the transform is +created. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. +** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. +** *`description` (Optional, string)*: Free text description of the transform. +** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +** *`_meta` (Optional, Record)*: Defines optional transform metadata. +** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. +** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. +** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. +** *`retention_policy` (Optional, { time } | null)*: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. + [discrete] ==== upgrade_transforms Upgrades all transforms. @@ -4064,9 +8895,17 @@ Upgrades all transforms. {ref}/upgrade-transforms.html[Endpoint documentation] [source,ts] ---- -client.transform.upgradeTransforms(...) +client.transform.upgradeTransforms({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`dry_run` (Optional, boolean)*: When true, the request checks for updates but does not run them. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and +returns an error. + [discrete] === watcher [discrete] @@ -4076,9 +8915,16 @@ Acknowledges a watch, manually throttling the execution of the watch's actions. {ref}/watcher-api-ack-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.ackWatch(...) +client.watcher.ackWatch({ watch_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`watch_id` (string)*: Watch ID +** *`action_id` (Optional, string | string[])*: A list of the action ids to be acked + [discrete] ==== activate_watch Activates a currently inactive watch. @@ -4086,9 +8932,15 @@ Activates a currently inactive watch. {ref}/watcher-api-activate-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.activateWatch(...) +client.watcher.activateWatch({ watch_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`watch_id` (string)*: Watch ID + [discrete] ==== deactivate_watch Deactivates a currently active watch. @@ -4096,9 +8948,15 @@ Deactivates a currently active watch. {ref}/watcher-api-deactivate-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.deactivateWatch(...) +client.watcher.deactivateWatch({ watch_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`watch_id` (string)*: Watch ID + [discrete] ==== delete_watch Removes a watch from Watcher. @@ -4106,9 +8964,15 @@ Removes a watch from Watcher. {ref}/watcher-api-delete-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.deleteWatch(...) +client.watcher.deleteWatch({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Watch ID + [discrete] ==== execute_watch Forces the execution of a stored watch. @@ -4116,9 +8980,38 @@ Forces the execution of a stored watch. {ref}/watcher-api-execute-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.executeWatch(...) +client.watcher.executeWatch({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the watch. +** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. +** *`action_modes` (Optional, Record)*: Determines how to handle the watch actions as part of the watch execution. +** *`alternative_input` (Optional, Record)*: When present, the watch uses this object as a payload instead of executing its own input. +** *`ignore_condition` (Optional, boolean)*: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. +** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent executions. This can also be specified as an HTTP parameter. +** *`simulated_actions` (Optional, { actions, all, use_all })* +** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution +** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and record_execution cannot be set. + +[discrete] +==== get_settings +Retrieve settings for the watcher system index + +{ref}/watcher-api-get-settings.html[Endpoint documentation] +[source,ts] +---- +client.watcher.getSettings() +---- + +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== get_watch Retrieves a watch by its ID. @@ -4126,9 +9019,15 @@ Retrieves a watch by its ID. {ref}/watcher-api-get-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.getWatch(...) +client.watcher.getWatch({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Watch ID + [discrete] ==== put_watch Creates a new watch, or updates an existing one. @@ -4136,9 +9035,26 @@ Creates a new watch, or updates an existing one. {ref}/watcher-api-put-watch.html[Endpoint documentation] [source,ts] ---- -client.watcher.putWatch(...) +client.watcher.putWatch({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Watch ID +** *`active` (Optional, boolean)*: Specify whether the watch is in/active by default +** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term +** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number +** *`version` (Optional, number)*: Explicit version number for concurrency control +** *`actions` (Optional, Record)* +** *`condition` (Optional, { always, array_compare, compare, never, script })* +** *`input` (Optional, { chain, http, search, simple })* +** *`metadata` (Optional, Record)* +** *`throttle_period` (Optional, string)* +** *`transform` (Optional, { chain, script, search })* +** *`trigger` (Optional, { schedule })* + [discrete] ==== query_watches Retrieves stored watches. @@ -4146,9 +9062,19 @@ Retrieves stored watches. {ref}/watcher-api-query-watches.html[Endpoint documentation] [source,ts] ---- -client.watcher.queryWatches(...) +client.watcher.queryWatches({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. +** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional, query filter watches to be returned. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. + [discrete] ==== start Starts Watcher if it is not already running. @@ -4156,9 +9082,14 @@ Starts Watcher if it is not already running. {ref}/watcher-api-start.html[Endpoint documentation] [source,ts] ---- -client.watcher.start(...) +client.watcher.start() ---- +[discrete] +==== Arguments + +* *Request (object):* + [discrete] ==== stats Retrieves the current Watcher metrics. @@ -4166,9 +9097,16 @@ Retrieves the current Watcher metrics. {ref}/watcher-api-stats.html[Endpoint documentation] [source,ts] ---- -client.watcher.stats(...) +client.watcher.stats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])*: Defines which additional metrics are included in the response. +** *`emit_stacktraces` (Optional, boolean)*: Defines whether stack traces are generated for each watch that is running. + [discrete] ==== stop Stops Watcher if it is running. @@ -4176,9 +9114,29 @@ Stops Watcher if it is running. {ref}/watcher-api-stop.html[Endpoint documentation] [source,ts] ---- -client.watcher.stop(...) +client.watcher.stop() ---- +[discrete] +==== Arguments + +* *Request (object):* + +[discrete] +==== update_settings +Update settings for the watcher system index + +{ref}/watcher-api-update-settings.html[Endpoint documentation] +[source,ts] +---- +client.watcher.updateSettings() +---- + +[discrete] +==== Arguments + +* *Request (object):* + [discrete] === xpack [discrete] @@ -4188,9 +9146,17 @@ Retrieves information about the installed X-Pack features. {ref}/info-api.html[Endpoint documentation] [source,ts] ---- -client.xpack.info(...) +client.xpack.info({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`categories` (Optional, string[])*: A list of the information categories to include in the response. For example, `build,license,features`. +** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true +** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. + [discrete] ==== usage Retrieves usage information about the installed X-Pack features. @@ -4198,6 +9164,12 @@ Retrieves usage information about the installed X-Pack features. {ref}/usage-api.html[Endpoint documentation] [source,ts] ---- -client.xpack.usage(...) +client.xpack.usage({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 3382499d2..a0c36598b 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -43,6 +43,10 @@ export default class AsyncSearch { this.transport = transport } + /** + * Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + */ async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class AsyncSearch { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves the results of a previously submitted async search request given its ID. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + */ async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> @@ -87,6 +95,10 @@ export default class AsyncSearch { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves the status of a previously submitted async search request given its ID. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + */ async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise @@ -109,6 +121,10 @@ export default class AsyncSearch { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Executes a search request asynchronously. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + */ async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index af5fc0c66..a23212f2e 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -43,6 +43,10 @@ export default class Autoscaling { this.transport = transport } + /** + * Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-delete-autoscaling-policy.html Elasticsearch API docs} + */ async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Autoscaling { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-get-autoscaling-capacity.html Elasticsearch API docs} + */ async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise @@ -88,6 +96,10 @@ export default class Autoscaling { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-get-autoscaling-capacity.html Elasticsearch API docs} + */ async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise @@ -110,6 +122,10 @@ export default class Autoscaling { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-put-autoscaling-policy.html Elasticsearch API docs} + */ async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index b3fc16666..41a700dfe 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to perform multiple index/update/delete operations in a single request. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-bulk.html Elasticsearch API docs} + */ export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index 74e383095..127c198c8 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -43,6 +43,10 @@ export default class Cat { this.transport = transport } + /** + * Shows information about currently configured aliases to indices including filter and routing infos. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-alias.html Elasticsearch API docs} + */ async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise @@ -73,6 +77,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-allocation.html Elasticsearch API docs} + */ async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise @@ -103,6 +111,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about existing component_templates templates. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-component-templates.html Elasticsearch API docs} + */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise @@ -133,6 +145,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides quick access to the document count of the entire cluster, or individual indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-count.html Elasticsearch API docs} + */ async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise @@ -163,6 +179,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Shows how much heap memory is currently being used by fielddata on every data node in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-fielddata.html Elasticsearch API docs} + */ async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise @@ -193,6 +213,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns a concise representation of the cluster health. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-health.html Elasticsearch API docs} + */ async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise @@ -216,6 +240,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns help for the Cat APIs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat.html Elasticsearch API docs} + */ async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise @@ -239,6 +267,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about indices: number of primaries and replicas, document counts, disk size, ... + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-indices.html Elasticsearch API docs} + */ async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise @@ -269,6 +301,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about the master node. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-master.html Elasticsearch API docs} + */ async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise @@ -292,6 +328,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets configuration and usage information about data frame analytics jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-dfanalytics.html Elasticsearch API docs} + */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -322,6 +362,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets configuration and usage information about datafeeds. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-datafeeds.html Elasticsearch API docs} + */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise @@ -352,6 +396,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets configuration and usage information about anomaly detection jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-anomaly-detectors.html Elasticsearch API docs} + */ async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise @@ -382,6 +430,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets configuration and usage information about inference trained models. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-trained-model.html Elasticsearch API docs} + */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise @@ -412,6 +464,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about custom node attributes. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-nodeattrs.html Elasticsearch API docs} + */ async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise @@ -435,6 +491,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns basic statistics about performance of cluster nodes. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-nodes.html Elasticsearch API docs} + */ async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise @@ -458,6 +518,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns a concise representation of the cluster pending tasks. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-pending-tasks.html Elasticsearch API docs} + */ async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise @@ -481,6 +545,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about installed plugins across nodes node. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-plugins.html Elasticsearch API docs} + */ async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise @@ -504,6 +572,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about index shard recoveries, both on-going completed. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-recovery.html Elasticsearch API docs} + */ async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise @@ -534,6 +606,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about snapshot repositories registered in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-repositories.html Elasticsearch API docs} + */ async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise @@ -557,6 +633,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides low-level information about the segments in the shards of an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-segments.html Elasticsearch API docs} + */ async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise @@ -587,6 +667,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides a detailed view of shard allocation on nodes. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-shards.html Elasticsearch API docs} + */ async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise @@ -617,6 +701,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns all snapshots in a specific repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-snapshots.html Elasticsearch API docs} + */ async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise @@ -647,6 +735,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about the tasks currently executing on one or more nodes in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + */ async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise @@ -670,6 +762,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about existing templates. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-templates.html Elasticsearch API docs} + */ async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise @@ -700,6 +796,11 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns cluster-wide thread pool statistics per node. + By default the active, queue and rejected statistics are returned for all thread pools. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-thread-pool.html Elasticsearch API docs} + */ async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise @@ -730,6 +831,10 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets configuration and usage information about transforms. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-transforms.html Elasticsearch API docs} + */ async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 63c7ada21..5b61ba9c4 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -43,6 +43,10 @@ export default class Ccr { this.transport = transport } + /** + * Deletes auto-follow patterns. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-delete-auto-follow-pattern.html Elasticsearch API docs} + */ async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a new follower index configured to follow the referenced leader index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-put-follow.html Elasticsearch API docs} + */ async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise @@ -99,6 +107,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about all follower indices, including parameters and status for each follower index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-follow-info.html Elasticsearch API docs} + */ async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise @@ -121,6 +133,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-follow-stats.html Elasticsearch API docs} + */ async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise @@ -143,6 +159,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Removes the follower retention leases from the leader. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-forget-follower.html Elasticsearch API docs} + */ async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise @@ -177,6 +197,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-auto-follow-pattern.html Elasticsearch API docs} + */ async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise @@ -207,6 +231,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Pauses an auto-follow pattern + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-pause-auto-follow-pattern.html Elasticsearch API docs} + */ async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise @@ -229,6 +257,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Pauses a follower index. The follower index will not fetch any additional operations from the leader index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-pause-follow.html Elasticsearch API docs} + */ async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise @@ -251,6 +283,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-put-auto-follow-pattern.html Elasticsearch API docs} + */ async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise @@ -285,6 +321,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Resumes an auto-follow pattern that has been paused + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-resume-auto-follow-pattern.html Elasticsearch API docs} + */ async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise @@ -307,6 +347,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Resumes a follower index that has been paused + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-resume-follow.html Elasticsearch API docs} + */ async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise @@ -341,6 +385,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets all stats related to cross-cluster replication. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-stats.html Elasticsearch API docs} + */ async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise @@ -364,6 +412,10 @@ export default class Ccr { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-unfollow.html Elasticsearch API docs} + */ async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index a14482cd7..3e5b25adb 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Explicitly clears the search context for a scroll. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-scroll-api.html Elasticsearch API docs} + */ export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 4e1a7d4b1..f595531a0 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Close a point in time + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/point-in-time-api.html Elasticsearch API docs} + */ export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 4cb08fe44..be7188fd7 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -43,6 +43,10 @@ export default class Cluster { this.transport = transport } + /** + * Provides explanations for shard allocations in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-allocation-explain.html Elasticsearch API docs} + */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise @@ -78,6 +82,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes a component template + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise @@ -100,6 +108,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Clears cluster voting config exclusions. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/voting-config-exclusions.html Elasticsearch API docs} + */ async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise @@ -123,6 +135,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about whether a particular component template exist + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + */ async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise @@ -145,6 +161,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns one or more component templates + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise @@ -175,6 +195,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns cluster settings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-get-settings.html Elasticsearch API docs} + */ async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise @@ -198,6 +222,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns basic information about the health of the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-health.html Elasticsearch API docs} + */ async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise @@ -228,6 +256,37 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns different information about the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-info.html Elasticsearch API docs} + */ + async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['target'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_info/${encodeURIComponent(params.target.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Returns a list of any cluster-level changes (e.g. create index, update mapping, + allocate or fail shard) which have not yet been executed. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-pending.html Elasticsearch API docs} + */ async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise @@ -251,6 +310,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates the cluster voting config exclusions by node ids or node names. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/voting-config-exclusions.html Elasticsearch API docs} + */ async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise @@ -274,6 +337,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates or updates a component template + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise @@ -308,6 +375,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates the cluster settings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-update-settings.html Elasticsearch API docs} + */ async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise @@ -343,6 +414,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the information about configured remote clusters. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-remote-info.html Elasticsearch API docs} + */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise @@ -366,6 +441,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Allows to manually change the allocation of individual shards in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-reroute.html Elasticsearch API docs} + */ async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise @@ -401,6 +480,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns a comprehensive information about the state of the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-state.html Elasticsearch API docs} + */ async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise @@ -434,6 +517,10 @@ export default class Cluster { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns high-level overview of cluster statistics. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-stats.html Elasticsearch API docs} + */ async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/count.ts b/src/api/api/count.ts index aec469fd7..0e222b780 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns number of documents matching a query. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-count.html Elasticsearch API docs} + */ export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 61e3fbc75..0284dc19c 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -37,6 +37,12 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Creates a new document in the index. + +Returns a 409 response when a document with a same ID already exists in the index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-index_.html Elasticsearch API docs} + */ export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 493260a94..825e565aa 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -43,6 +43,10 @@ export default class DanglingIndices { this.transport = transport } + /** + * Deletes the specified dangling index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-gateway-dangling-indices.html Elasticsearch API docs} + */ async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class DanglingIndices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Imports the specified dangling index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-gateway-dangling-indices.html Elasticsearch API docs} + */ async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise @@ -87,6 +95,10 @@ export default class DanglingIndices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns all dangling indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-gateway-dangling-indices.html Elasticsearch API docs} + */ async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 1f7c06ddf..3a8783a88 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Removes a document from the index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-delete.html Elasticsearch API docs} + */ export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 665bfe810..7b7664581 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Deletes documents matching the provided query. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-delete-by-query.html Elasticsearch API docs} + */ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 60c722e0a..c84002519 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Changes the number of requests per second for a particular Delete By Query operation. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-delete-by-query.html Elasticsearch API docs} + */ export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 5e2e4f50f..ff186863f 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Deletes a script. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + */ export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 2ebc67be0..d6a6b990c 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -43,6 +43,10 @@ export default class Enrich { this.transport = transport } + /** + * Deletes an existing enrich policy and its enrich index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-enrich-policy-api.html Elasticsearch API docs} + */ async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Enrich { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates the enrich index for an existing enrich policy. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/execute-enrich-policy-api.html Elasticsearch API docs} + */ async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise @@ -87,6 +95,10 @@ export default class Enrich { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets information about an enrich policy. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-enrich-policy-api.html Elasticsearch API docs} + */ async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise @@ -117,6 +129,10 @@ export default class Enrich { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a new enrich policy. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-enrich-policy-api.html Elasticsearch API docs} + */ async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise @@ -151,6 +167,10 @@ export default class Enrich { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets enrich coordinator statistics and information about enrich policies that are currently executing. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/enrich-stats-api.html Elasticsearch API docs} + */ async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 415fbc470..a181194c8 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -43,6 +43,10 @@ export default class Eql { this.transport = transport } + /** + * Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + */ async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Eql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns async results from previously executed Event Query Language (EQL) search + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + */ async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise> @@ -87,6 +95,10 @@ export default class Eql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the status of a previously submitted async or stored Event Query Language (EQL) search + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + */ async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise @@ -109,6 +121,10 @@ export default class Eql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns results matching a query expressed in Event Query Language (EQL) + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + */ async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index f0cfd6276..309612b31 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns information about whether a document exists in an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + */ export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 590c4f7f1..6c8142f9b 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns information about whether a document source exists in an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + */ export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 4235f64c1..45568b812 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns information about why a specific matches (or doesn't match) a query. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-explain.html Elasticsearch API docs} + */ export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/features.ts b/src/api/api/features.ts index d05d4831a..01a1b3a82 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -43,6 +43,10 @@ export default class Features { this.transport = transport } + /** + * Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-features-api.html Elasticsearch API docs} + */ async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise @@ -66,6 +70,10 @@ export default class Features { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Resets the internal state of features, usually by deleting system indices + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 5678857a8..765b276bf 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns the information about the capabilities of fields among multiple indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-field-caps.html Elasticsearch API docs} + */ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index baae23b8f..16a75fff7 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -43,6 +43,10 @@ export default class Fleet { this.transport = transport } + /** + * Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-global-checkpoints.html Elasticsearch API docs} + */ async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,9 @@ export default class Fleet { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Multi Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. + */ async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptions): Promise> @@ -99,6 +106,9 @@ export default class Fleet { return await this.transport.request({ path, method, querystring, bulkBody: body }, options) } + /** + * Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. + */ async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 12aa2faf9..4bdeaa4c4 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns a document. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + */ export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index 515926b66..d2808c987 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns a script. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + */ export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index 9cb53cdb0..b23e0908a 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns all script contexts. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/main/painless-contexts.html Elasticsearch API docs} + */ export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index e4ce35ca0..d155cd83a 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns available script types, languages and contexts + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + */ export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 0e0aa6737..958738be3 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns the source of a document. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + */ export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index fdfb8d7f6..ff46902a5 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -43,6 +43,10 @@ export default class Graph { this.transport = transport } + /** + * Explore extracted and summarized information about the documents and terms in an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/graph-explore-api.html Elasticsearch API docs} + */ async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 78b97b03d..ee365433e 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns the health of the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/health-api.html Elasticsearch API docs} + */ export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 81c620b2e..41c96e94e 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -43,6 +43,10 @@ export default class Ilm { this.transport = transport } + /** + * Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-delete-lifecycle.html Elasticsearch API docs} + */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-explain-lifecycle.html Elasticsearch API docs} + */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise @@ -87,6 +95,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the specified policy definition. Includes the policy version and last modified date. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-get-lifecycle.html Elasticsearch API docs} + */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise @@ -117,6 +129,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves the current index lifecycle management (ILM) status. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-get-status.html Elasticsearch API docs} + */ async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise @@ -140,6 +156,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-migrate-to-data-tiers.html Elasticsearch API docs} + */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise @@ -175,6 +195,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Manually moves an index into the specified step and executes that step. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-move-to-step.html Elasticsearch API docs} + */ async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise @@ -209,6 +233,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a lifecycle policy + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-put-lifecycle.html Elasticsearch API docs} + */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise @@ -243,6 +271,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Removes the assigned lifecycle policy and stops managing the specified index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-remove-policy.html Elasticsearch API docs} + */ async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise @@ -265,6 +297,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retries executing the policy for an index that is in the ERROR step. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-retry-policy.html Elasticsearch API docs} + */ async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise @@ -287,6 +323,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Start the index lifecycle management (ILM) plugin. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-start.html Elasticsearch API docs} + */ async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise @@ -310,6 +350,10 @@ export default class Ilm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-stop.html Elasticsearch API docs} + */ async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/index.ts b/src/api/api/index.ts index f1aac6a12..d461144a3 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Creates or updates a document in an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-index_.html Elasticsearch API docs} + */ export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 667b95ff3..8413dbf03 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -43,6 +43,10 @@ export default class Indices { this.transport = transport } + /** + * Adds a block to an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/index-modules-blocks.html Elasticsearch API docs} + */ async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Performs the analysis process on a text and return the tokens breakdown of the text. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-analyze.html Elasticsearch API docs} + */ async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise @@ -107,6 +115,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Clears all or specific caches for one or more indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-clearcache.html Elasticsearch API docs} + */ async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise @@ -137,6 +149,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Clones an index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-clone-index.html Elasticsearch API docs} + */ async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise @@ -171,6 +187,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Closes an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-open-close.html Elasticsearch API docs} + */ async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise @@ -193,6 +213,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates an index with optional settings and mappings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-create-index.html Elasticsearch API docs} + */ async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise @@ -227,6 +251,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a data stream + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + */ async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise @@ -249,6 +277,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides statistics on operations happening in a data stream. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + */ async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise @@ -279,6 +311,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-delete-index.html Elasticsearch API docs} + */ async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise @@ -301,6 +337,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an alias. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise @@ -330,35 +370,36 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + /** + * Deletes the data lifecycle of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-delete-lifecycle.html Elasticsearch API docs} + */ + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } - let method = '' - let path = '' - if (params.name != null) { - method = 'DELETE' - path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` - } else { - method = 'DELETE' - path = '/_data_stream/_lifecycle' - } + const method = 'DELETE' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes a data stream. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + */ async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise @@ -381,6 +422,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an index template. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise @@ -403,6 +448,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an index template. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise @@ -425,6 +474,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Analyzes the disk usage of each field of an index or data stream + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-disk-usage.html Elasticsearch API docs} + */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise @@ -447,6 +500,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Downsample an index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/xpack-rollup.html Elasticsearch API docs} + */ async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise @@ -474,6 +531,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about whether a particular index exists. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-exists.html Elasticsearch API docs} + */ async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise @@ -496,6 +557,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about whether a particular alias exists. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + */ async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise @@ -525,6 +590,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about whether a particular index template exists. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise @@ -547,6 +616,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about whether a particular index template exists. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise @@ -569,28 +642,36 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async explainDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + /** + * Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-explain-lifecycle.html Elasticsearch API docs} + */ + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } - const method = 'POST' + const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_lifecycle/explain` return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the field usage stats for each field of an index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/field-usage-stats.html Elasticsearch API docs} + */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise @@ -613,6 +694,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Performs the flush operation on one or more indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-flush.html Elasticsearch API docs} + */ async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise @@ -643,6 +728,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Performs the force merge operation on one or more indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-forcemerge.html Elasticsearch API docs} + */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise @@ -673,6 +762,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about one or more indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-index.html Elasticsearch API docs} + */ async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise @@ -695,6 +788,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns an alias. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + */ async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise @@ -731,35 +828,36 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + /** + * Returns the data lifecycle of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-get-lifecycle.html Elasticsearch API docs} + */ + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } - let method = '' - let path = '' - if (params.name != null) { - method = 'GET' - path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` - } else { - method = 'GET' - path = '/_data_stream/_lifecycle' - } + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + */ async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise @@ -790,6 +888,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns mapping for one or more fields. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-field-mapping.html Elasticsearch API docs} + */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise @@ -819,6 +921,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns an index template. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise @@ -849,6 +955,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns mappings for one or more indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-mapping.html Elasticsearch API docs} + */ async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise @@ -879,6 +989,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns settings for one or more indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-settings.html Elasticsearch API docs} + */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise @@ -915,6 +1029,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns an index template. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise @@ -945,6 +1063,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Migrates an alias to a data stream + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + */ async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise @@ -967,6 +1089,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Modifies a data stream + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + */ async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise @@ -1001,6 +1127,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Opens an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-open-close.html Elasticsearch API docs} + */ async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise @@ -1023,6 +1153,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Promotes a data stream from a replicated data stream managed by CCR to a regular data stream + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + */ async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise @@ -1045,6 +1179,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates or updates an alias. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + */ async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise @@ -1086,35 +1224,48 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } - async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putDataLifecycle (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + /** + * Updates the data lifecycle of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-put-lifecycle.html Elasticsearch API docs} + */ + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['data_retention'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } - let method = '' - let path = '' - if (params.name != null) { - method = 'PUT' - path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` - } else { - method = 'PUT' - path = '/_data_stream/_lifecycle' - } + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates or updates an index template. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise @@ -1149,6 +1300,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates the index mappings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-put-mapping.html Elasticsearch API docs} + */ async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise @@ -1183,6 +1338,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates the index settings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-update-settings.html Elasticsearch API docs} + */ async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise @@ -1217,6 +1376,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates or updates an index template. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise @@ -1251,6 +1414,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about ongoing index shard recoveries. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-recovery.html Elasticsearch API docs} + */ async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise @@ -1281,6 +1448,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Performs the refresh operation in one or more indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-refresh.html Elasticsearch API docs} + */ async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise @@ -1311,6 +1482,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Reloads an index's search analyzers and their resources. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-reload-analyzers.html Elasticsearch API docs} + */ async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise @@ -1333,6 +1508,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about any matching indices, aliases, and data streams + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-resolve-index-api.html Elasticsearch API docs} + */ async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise @@ -1355,6 +1534,11 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates an alias to point to a new index when the existing index + is considered to be too large or too old. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-rollover-index.html Elasticsearch API docs} + */ async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise @@ -1396,6 +1580,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides low-level information about segments in a Lucene index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-segments.html Elasticsearch API docs} + */ async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise @@ -1426,6 +1614,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides store information for shard copies of indices. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-shards-stores.html Elasticsearch API docs} + */ async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise @@ -1456,6 +1648,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Allow to shrink an existing index into a new index with fewer primary shards. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-shrink-index.html Elasticsearch API docs} + */ async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise @@ -1490,6 +1686,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Simulate matching the given index name against the index templates in the system + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise @@ -1524,6 +1724,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Simulate resolving the given template name or body + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + */ async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise @@ -1558,6 +1762,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Allows you to split an existing index into a new index with more primary shards. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-split-index.html Elasticsearch API docs} + */ async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise @@ -1592,6 +1800,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Provides statistics on operations happening in an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-stats.html Elasticsearch API docs} + */ async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise @@ -1628,6 +1840,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/unfreeze-index-api.html Elasticsearch API docs} + */ async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise @@ -1650,6 +1866,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates index aliases. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + */ async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise @@ -1685,6 +1905,10 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Allows a user to validate a potentially expensive query without executing it. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-validate.html Elasticsearch API docs} + */ async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/info.ts b/src/api/api/info.ts index b7d58c650..83478097f 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns basic information about the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/index.html Elasticsearch API docs} + */ export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index ac7b77cd2..ea0d7dae5 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -43,6 +43,10 @@ export default class Ingest { this.transport = transport } + /** + * Deletes a pipeline. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-pipeline-api.html Elasticsearch API docs} + */ async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns statistical information about geoip databases + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/geoip-stats-api.html Elasticsearch API docs} + */ async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise @@ -88,6 +96,10 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns a pipeline. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-pipeline-api.html Elasticsearch API docs} + */ async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise @@ -118,6 +130,10 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns a list of the built-in patterns. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/grok-processor.html#grok-processor-rest-get Elasticsearch API docs} + */ async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise @@ -141,6 +157,10 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates or updates a pipeline. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-pipeline-api.html Elasticsearch API docs} + */ async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise @@ -175,6 +195,10 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Allows to simulate a pipeline with example documents. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/simulate-pipeline-api.html Elasticsearch API docs} + */ async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index dbe6f9c5f..ac7729167 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Performs a kNN search. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-search.html Elasticsearch API docs} + */ export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 26c4e59e7..fb257ad1c 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -43,6 +43,10 @@ export default class License { this.transport = transport } + /** + * Deletes licensing information for the cluster + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-license.html Elasticsearch API docs} + */ async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise @@ -66,6 +70,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves licensing information for the cluster + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-license.html Elasticsearch API docs} + */ async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise @@ -89,6 +97,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about the status of the basic license. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-basic-status.html Elasticsearch API docs} + */ async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise @@ -112,6 +124,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about the status of the trial license. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-trial-status.html Elasticsearch API docs} + */ async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise @@ -135,6 +151,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates the license for the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-license.html Elasticsearch API docs} + */ async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise @@ -170,6 +190,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Starts an indefinite basic license. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-basic.html Elasticsearch API docs} + */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise @@ -193,6 +217,10 @@ export default class License { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * starts a limited time trial license. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-trial.html Elasticsearch API docs} + */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 0a516c08c..c9d547266 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -43,6 +43,10 @@ export default class Logstash { this.transport = transport } + /** + * Deletes Logstash Pipelines used by Central Management + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/logstash-api-delete-pipeline.html Elasticsearch API docs} + */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Logstash { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves Logstash Pipelines used by Central Management + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/logstash-api-get-pipeline.html Elasticsearch API docs} + */ async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise @@ -94,6 +102,10 @@ export default class Logstash { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Adds and updates Logstash Pipelines used for Central Management + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/logstash-api-put-pipeline.html Elasticsearch API docs} + */ async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index a0d678656..0b69f08d9 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to get multiple documents in one request. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-multi-get.html Elasticsearch API docs} + */ export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 069ed2d66..ac09d60e4 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -43,6 +43,10 @@ export default class Migration { this.transport = transport } + /** + * Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/migration-api-deprecation.html Elasticsearch API docs} + */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise @@ -73,6 +77,10 @@ export default class Migration { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Find out whether system features need to be upgraded or not + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/migration-api-feature-upgrade.html Elasticsearch API docs} + */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise @@ -96,6 +104,10 @@ export default class Migration { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Begin upgrades for system features + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/migration-api-feature-upgrade.html Elasticsearch API docs} + */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 75a62305e..97f0b0ddd 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -43,6 +43,10 @@ export default class Ml { this.transport = transport } + /** + * Clear the cached results from a trained model deployment + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-trained-model-deployment-cache.html Elasticsearch API docs} + */ async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-close-job.html Elasticsearch API docs} + */ async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise @@ -99,6 +107,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes a calendar. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-calendar.html Elasticsearch API docs} + */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise @@ -121,6 +133,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes scheduled events from a calendar. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-calendar-event.html Elasticsearch API docs} + */ async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise @@ -143,6 +159,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes anomaly detection jobs from a calendar. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-calendar-job.html Elasticsearch API docs} + */ async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise @@ -165,6 +185,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an existing data frame analytics job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-dfanalytics.html Elasticsearch API docs} + */ async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -187,6 +211,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an existing datafeed. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-datafeed.html Elasticsearch API docs} + */ async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise @@ -209,6 +237,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes expired and unused machine learning data. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-expired-data.html Elasticsearch API docs} + */ async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise @@ -251,6 +283,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes a filter. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-filter.html Elasticsearch API docs} + */ async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise @@ -273,6 +309,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes forecasts from a machine learning job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-forecast.html Elasticsearch API docs} + */ async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise @@ -302,6 +342,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an existing anomaly detection job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-job.html Elasticsearch API docs} + */ async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise @@ -324,6 +368,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an existing model snapshot. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-snapshot.html Elasticsearch API docs} + */ async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise @@ -346,6 +394,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-trained-models.html Elasticsearch API docs} + */ async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise @@ -368,6 +420,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes a model alias that refers to the trained model + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-trained-models-aliases.html Elasticsearch API docs} + */ async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise @@ -390,6 +446,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Estimates the model memory + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-apis.html Elasticsearch API docs} + */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise @@ -425,6 +485,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Evaluates the data frame analytics for an annotated index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/evaluate-dfanalytics.html Elasticsearch API docs} + */ async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise @@ -459,6 +523,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Explains a data frame analytics config. + * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/main/explain-dfanalytics.html Elasticsearch API docs} + */ async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -501,6 +569,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Forces any buffered data to be processed by the job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-flush-job.html Elasticsearch API docs} + */ async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise @@ -535,6 +607,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Predicts the future behavior of a time series by using its historical behavior. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-forecast.html Elasticsearch API docs} + */ async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise @@ -569,6 +645,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves anomaly detection job results for one or more buckets. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-bucket.html Elasticsearch API docs} + */ async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise @@ -610,6 +690,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about the scheduled events in calendars. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-calendar-event.html Elasticsearch API docs} + */ async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise @@ -632,6 +716,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves configuration information for calendars. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-calendar.html Elasticsearch API docs} + */ async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise @@ -674,6 +762,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves anomaly detection job results for one or more categories. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-category.html Elasticsearch API docs} + */ async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise @@ -715,6 +807,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves configuration information for data frame analytics jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-dfanalytics.html Elasticsearch API docs} + */ async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -745,6 +841,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves usage information for data frame analytics jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-dfanalytics-stats.html Elasticsearch API docs} + */ async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise @@ -775,6 +875,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves usage information for datafeeds. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-datafeed-stats.html Elasticsearch API docs} + */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise @@ -805,6 +909,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves configuration information for datafeeds. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-datafeed.html Elasticsearch API docs} + */ async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise @@ -835,6 +943,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves filters. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-filter.html Elasticsearch API docs} + */ async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise @@ -865,6 +977,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves anomaly detection job results for one or more influencers. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-influencer.html Elasticsearch API docs} + */ async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise @@ -899,6 +1015,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves usage information for anomaly detection jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-job-stats.html Elasticsearch API docs} + */ async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise @@ -929,6 +1049,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves configuration information for anomaly detection jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-job.html Elasticsearch API docs} + */ async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise @@ -959,6 +1083,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information on how ML is using memory. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-ml-memory.html Elasticsearch API docs} + */ async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise @@ -989,6 +1117,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Gets stats for anomaly detection job model snapshot upgrades that are in progress. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-job-model-snapshot-upgrade-stats.html Elasticsearch API docs} + */ async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise @@ -1011,6 +1143,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about model snapshots. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-snapshot.html Elasticsearch API docs} + */ async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise @@ -1052,6 +1188,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-overall-buckets.html Elasticsearch API docs} + */ async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise @@ -1086,6 +1226,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves anomaly records for an anomaly detection job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-record.html Elasticsearch API docs} + */ async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise @@ -1120,6 +1264,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves configuration information for a trained inference model. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-trained-models.html Elasticsearch API docs} + */ async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise @@ -1150,6 +1298,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves usage information for trained inference models. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-trained-models-stats.html Elasticsearch API docs} + */ async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise @@ -1180,6 +1332,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Evaluate a trained model. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/infer-trained-model.html Elasticsearch API docs} + */ async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise @@ -1214,6 +1370,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns defaults and limits used by machine learning. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-ml-info.html Elasticsearch API docs} + */ async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise @@ -1237,6 +1397,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Opens one or more anomaly detection jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-open-job.html Elasticsearch API docs} + */ async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise @@ -1271,6 +1435,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Posts scheduled events in a calendar. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-post-calendar-event.html Elasticsearch API docs} + */ async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise @@ -1305,6 +1473,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Sends data to an anomaly detection job for analysis. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-post-data.html Elasticsearch API docs} + */ async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise @@ -1332,6 +1504,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, bulkBody: body }, options) } + /** + * Previews that will be analyzed given a data frame analytics config. + * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/main/preview-dfanalytics.html Elasticsearch API docs} + */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -1374,6 +1550,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Previews a datafeed. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-preview-datafeed.html Elasticsearch API docs} + */ async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> @@ -1416,6 +1596,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Instantiates a calendar. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-calendar.html Elasticsearch API docs} + */ async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise @@ -1450,6 +1634,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Adds an anomaly detection job to a calendar. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-calendar-job.html Elasticsearch API docs} + */ async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise @@ -1472,6 +1660,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Instantiates a data frame analytics job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-dfanalytics.html Elasticsearch API docs} + */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -1506,6 +1698,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Instantiates a datafeed. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-datafeed.html Elasticsearch API docs} + */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise @@ -1540,6 +1736,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Instantiates a filter. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-filter.html Elasticsearch API docs} + */ async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise @@ -1574,6 +1774,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Instantiates an anomaly detection job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-job.html Elasticsearch API docs} + */ async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise @@ -1608,6 +1812,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates an inference trained model. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-models.html Elasticsearch API docs} + */ async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise @@ -1642,6 +1850,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a new model alias (or reassigns an existing one) to refer to the trained model + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-models-aliases.html Elasticsearch API docs} + */ async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise @@ -1664,6 +1876,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates part of a trained model definition + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-model-definition-part.html Elasticsearch API docs} + */ async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise @@ -1698,6 +1914,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a trained model vocabulary + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-model-vocabulary.html Elasticsearch API docs} + */ async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise @@ -1732,6 +1952,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Resets an existing anomaly detection job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-reset-job.html Elasticsearch API docs} + */ async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise @@ -1754,6 +1978,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Reverts to a specific snapshot. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-revert-snapshot.html Elasticsearch API docs} + */ async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise @@ -1788,6 +2016,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-set-upgrade-mode.html Elasticsearch API docs} + */ async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise @@ -1811,6 +2043,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Starts a data frame analytics job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-dfanalytics.html Elasticsearch API docs} + */ async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -1833,6 +2069,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Starts one or more datafeeds. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-start-datafeed.html Elasticsearch API docs} + */ async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise @@ -1867,6 +2107,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Start a trained model deployment. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-trained-model-deployment.html Elasticsearch API docs} + */ async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise @@ -1889,6 +2133,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Stops one or more data frame analytics jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/stop-dfanalytics.html Elasticsearch API docs} + */ async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -1911,6 +2159,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Stops one or more datafeeds. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-stop-datafeed.html Elasticsearch API docs} + */ async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise @@ -1945,6 +2197,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Stop a trained model deployment. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/stop-trained-model-deployment.html Elasticsearch API docs} + */ async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise @@ -1967,6 +2223,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates certain properties of a data frame analytics job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-dfanalytics.html Elasticsearch API docs} + */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise @@ -2001,6 +2261,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates certain properties of a datafeed. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-datafeed.html Elasticsearch API docs} + */ async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise @@ -2035,6 +2299,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates the description of a filter, adds items, or removes items. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-filter.html Elasticsearch API docs} + */ async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise @@ -2069,6 +2337,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates certain properties of an anomaly detection job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-job.html Elasticsearch API docs} + */ async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise @@ -2103,6 +2375,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates certain properties of a snapshot. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-snapshot.html Elasticsearch API docs} + */ async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise @@ -2137,6 +2413,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates certain properties of trained model deployment. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-trained-model-deployment.html Elasticsearch API docs} + */ async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -2159,6 +2439,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Upgrades a given job snapshot to the current major version. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-upgrade-job-model-snapshot.html Elasticsearch API docs} + */ async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise @@ -2181,6 +2465,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Validates an anomaly detection job. + * @see {@link https://www.elastic.co/guide/en/machine-learning/main/ml-jobs.html Elasticsearch API docs} + */ async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise @@ -2216,6 +2504,10 @@ export default class Ml { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Validates an anomaly detection detector. + * @see {@link https://www.elastic.co/guide/en/machine-learning/main/ml-jobs.html Elasticsearch API docs} + */ async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 8cf13c461..6ca1c152c 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -43,6 +43,10 @@ export default class Monitoring { this.transport = transport } + /** + * Used by the monitoring features to send monitoring data. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/monitor-elasticsearch-cluster.html Elasticsearch API docs} + */ async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 523885cd3..5580751d1 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to execute several search operations in one request. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-multi-search.html Elasticsearch API docs} + */ export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index ea9f3dfd7..000477486 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to execute several search template operations in one request. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-multi-search.html Elasticsearch API docs} + */ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index c3dcf2e07..6f5a1d197 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns multiple termvectors in one request. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-multi-termvectors.html Elasticsearch API docs} + */ export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 6dadf200b..97e10edbf 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -43,6 +43,10 @@ export default class Nodes { this.transport = transport } + /** + * Removes the archived repositories metering information present in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-repositories-metering-archive-api.html Elasticsearch API docs} + */ async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Nodes { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns cluster repositories metering information. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-repositories-metering-api.html Elasticsearch API docs} + */ async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise @@ -87,6 +95,10 @@ export default class Nodes { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about hot threads on each node in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-hot-threads.html Elasticsearch API docs} + */ async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise @@ -117,6 +129,10 @@ export default class Nodes { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about nodes in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-info.html Elasticsearch API docs} + */ async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise @@ -153,6 +169,10 @@ export default class Nodes { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Reloads secure settings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/secure-settings.html#reloadable-secure-settings Elasticsearch API docs} + */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise @@ -195,6 +215,10 @@ export default class Nodes { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns statistical information about nodes in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-stats.html Elasticsearch API docs} + */ async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise @@ -237,6 +261,10 @@ export default class Nodes { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns low-level information about REST actions usage on nodes. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-usage.html Elasticsearch API docs} + */ async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 81507983a..38e0165b7 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Open a point in time that can be used in subsequent searches + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/point-in-time-api.html Elasticsearch API docs} + */ export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 4b8a07e8f..b2db543f2 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns whether the cluster is running. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/index.html Elasticsearch API docs} + */ export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 5d6711fd0..0aec2b2d7 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Creates or updates a script. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + */ export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 056e5bf7f..e3618721b 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to evaluate the quality of ranked search results over a set of typical search queries + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-rank-eval.html Elasticsearch API docs} + */ export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 3dadca6b6..43aa34f1f 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -37,6 +37,12 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to copy documents from one index to another, optionally filtering the source +documents by a query, changing the destination index settings, or fetching the +documents from a remote cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-reindex.html Elasticsearch API docs} + */ export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 030ff50a5..9127d4a0f 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Changes the number of requests per second for a particular Reindex operation. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-reindex.html Elasticsearch API docs} + */ export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index bf4d68841..3081bdfa6 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to use the Mustache language to pre-render a search definition. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/render-search-template-api.html Elasticsearch API docs} + */ export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index e75fcdfd7..ccdfca6ac 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -43,6 +43,10 @@ export default class Rollup { this.transport = transport } + /** + * Deletes an existing rollup job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-delete-job.html Elasticsearch API docs} + */ async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves the configuration, stats, and status of rollup jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-get-job.html Elasticsearch API docs} + */ async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise @@ -95,6 +103,10 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-get-rollup-caps.html Elasticsearch API docs} + */ async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise @@ -125,6 +137,10 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-get-rollup-index-caps.html Elasticsearch API docs} + */ async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise @@ -147,6 +163,10 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a rollup job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-put-job.html Elasticsearch API docs} + */ async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise @@ -181,6 +201,10 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Enables searching rolled-up data using the standard query DSL. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-search.html Elasticsearch API docs} + */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> @@ -215,6 +239,10 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Starts an existing, stopped rollup job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-start-job.html Elasticsearch API docs} + */ async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise @@ -237,6 +265,10 @@ export default class Rollup { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Stops an existing, started rollup job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-stop-job.html Elasticsearch API docs} + */ async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index f7757f513..a002cef20 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows an arbitrary script to be executed and a result to be returned + * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/main/painless-execute-api.html Elasticsearch API docs} + */ export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index e04fd9aba..812526193 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to retrieve a large numbers of results from a single search request. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-request-body.html#request-body-search-scroll Elasticsearch API docs} + */ export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 6c757cc9b..e4d909e54 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -37,12 +37,16 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns results matching a query. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-search.html Elasticsearch API docs} + */ export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index 98ea8bd60..2ddb8749a 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -43,6 +43,10 @@ export default class SearchApplication { this.transport = transport } + /** + * Deletes a search application. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-search-application.html Elasticsearch API docs} + */ async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise @@ -65,19 +69,23 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } - async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + /** + * Delete a behavioral analytics collection. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-analytics-collection.html Elasticsearch API docs} + */ + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -87,6 +95,10 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the details about a search application. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-search-application.html Elasticsearch API docs} + */ async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise @@ -109,10 +121,14 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } - async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + /** + * Returns the existing behavioral analytics collections. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/list-analytics-collection.html Elasticsearch API docs} + */ + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -122,6 +138,7 @@ export default class SearchApplication { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -138,6 +155,10 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the existing search applications. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/list-search-applications.html Elasticsearch API docs} + */ async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptions): Promise @@ -161,6 +182,10 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a behavioral analytics event for existing collection. + * @see {@link http://todo.com/tbd Elasticsearch API docs} + */ async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -183,6 +208,10 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates or updates a search application. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-search-application.html Elasticsearch API docs} + */ async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise @@ -210,19 +239,23 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } - async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putBehavioralAnalytics (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + /** + * Creates a behavioral analytics collection. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-analytics-collection.html Elasticsearch API docs} + */ + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -232,6 +265,36 @@ export default class SearchApplication { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Renders a query for given search application search parameters + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-application-render-query.html Elasticsearch API docs} + */ + async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}/_render_query` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Perform a search against a search application + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-application-search.html Elasticsearch API docs} + */ async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 16f4759f9..9f8b0fdc4 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-vector-tile-api.html Elasticsearch API docs} + */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index 999ea6be4..7cf6a048d 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns information about the indices and shards that a search request would be executed against. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-shards.html Elasticsearch API docs} + */ export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index 84c0fb9df..2ba27129d 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Allows to use the Mustache language to pre-render a search definition. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-template.html Elasticsearch API docs} + */ export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 725d72d22..5ed863d2e 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -43,6 +43,10 @@ export default class SearchableSnapshots { this.transport = transport } + /** + * Retrieve node-level cache statistics about searchable snapshots. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-apis.html Elasticsearch API docs} + */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise @@ -73,6 +77,10 @@ export default class SearchableSnapshots { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Clear the cache of searchable snapshots. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-apis.html Elasticsearch API docs} + */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise @@ -103,6 +111,10 @@ export default class SearchableSnapshots { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Mount a snapshot as a searchable index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-api-mount-snapshot.html Elasticsearch API docs} + */ async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise @@ -137,6 +149,10 @@ export default class SearchableSnapshots { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieve shard-level statistics about searchable snapshots. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-apis.html Elasticsearch API docs} + */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/security.ts b/src/api/api/security.ts index d4ed0d109..4332002fe 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -43,6 +43,10 @@ export default class Security { this.transport = transport } + /** + * Creates or updates the user profile on behalf of another user. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-activate-user-profile.html Elasticsearch API docs} + */ async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise @@ -77,6 +81,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Enables authentication as a user and retrieve information about the authenticated user. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-authenticate.html Elasticsearch API docs} + */ async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise @@ -100,6 +108,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates the attributes of multiple existing API keys. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-bulk-update-api-keys.html Elasticsearch API docs} + */ async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -122,6 +134,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Changes the passwords of users in the native realm and built-in users. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-change-password.html Elasticsearch API docs} + */ async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise @@ -164,6 +180,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Clear a subset or all entries from the API key cache. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-api-key-cache.html Elasticsearch API docs} + */ async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise @@ -186,6 +206,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Evicts application privileges from the native application privileges cache. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-privilege-cache.html Elasticsearch API docs} + */ async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise @@ -208,6 +232,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Evicts users from the user cache. Can completely clear the cache or evict specific users. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-cache.html Elasticsearch API docs} + */ async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise @@ -230,6 +258,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Evicts roles from the native role cache. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-role-cache.html Elasticsearch API docs} + */ async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise @@ -252,6 +284,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Evicts tokens from the service account token caches. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-service-token-caches.html Elasticsearch API docs} + */ async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise @@ -274,6 +310,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates an API key for access without requiring basic authentication. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-create-api-key.html Elasticsearch API docs} + */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise @@ -309,6 +349,36 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a cross-cluster API key for API key based remote cluster access. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-create-cross-cluster-api-key.html Elasticsearch API docs} + */ + async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/cross_cluster/api_key' + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Creates a service account token for access without requiring basic authentication. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-create-service-token.html Elasticsearch API docs} + */ async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise @@ -338,6 +408,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Removes application privileges. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-privilege.html Elasticsearch API docs} + */ async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise @@ -360,6 +434,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Removes roles in the native realm. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-role.html Elasticsearch API docs} + */ async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise @@ -382,6 +460,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Removes role mappings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-role-mapping.html Elasticsearch API docs} + */ async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise @@ -404,6 +486,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes a service account token. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-service-token.html Elasticsearch API docs} + */ async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise @@ -426,6 +512,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes users from the native realm. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-user.html Elasticsearch API docs} + */ async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise @@ -448,6 +538,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Disables users in the native realm. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-disable-user.html Elasticsearch API docs} + */ async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise @@ -470,6 +564,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Disables a user profile so it's not visible in user profile searches. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-disable-user-profile.html Elasticsearch API docs} + */ async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise @@ -492,6 +590,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Enables users in the native realm. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-enable-user.html Elasticsearch API docs} + */ async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise @@ -514,6 +616,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Enables a user profile so it's visible in user profile searches. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-enable-user-profile.html Elasticsearch API docs} + */ async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise @@ -536,6 +642,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-kibana-enrollment.html Elasticsearch API docs} + */ async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise @@ -559,6 +669,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Allows a new node to enroll to an existing cluster with security enabled. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-node-enrollment.html Elasticsearch API docs} + */ async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise @@ -582,6 +696,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information for one or more API keys. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-api-key.html Elasticsearch API docs} + */ async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise @@ -605,6 +723,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-builtin-privileges.html Elasticsearch API docs} + */ async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise @@ -628,6 +750,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves application privileges. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-privileges.html Elasticsearch API docs} + */ async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise @@ -661,6 +787,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves roles in the native realm. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-role.html Elasticsearch API docs} + */ async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise @@ -691,6 +821,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves role mappings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-role-mapping.html Elasticsearch API docs} + */ async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise @@ -721,6 +855,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about service accounts. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-service-accounts.html Elasticsearch API docs} + */ async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise @@ -754,6 +892,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information of all service credentials for a service account. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-service-credentials.html Elasticsearch API docs} + */ async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise @@ -776,6 +918,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a bearer token for access without requiring basic authentication. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-token.html Elasticsearch API docs} + */ async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise @@ -811,6 +957,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information about users in the native realm and built-in users. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-user.html Elasticsearch API docs} + */ async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise @@ -841,6 +991,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves security privileges for the logged in user. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-user-privileges.html Elasticsearch API docs} + */ async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise @@ -864,6 +1018,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves user profiles for the given unique ID(s). + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-user-profile.html Elasticsearch API docs} + */ async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise @@ -886,6 +1044,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates an API key on behalf of another user. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-grant-api-key.html Elasticsearch API docs} + */ async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise @@ -920,6 +1082,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Determines whether the specified user has a specified list of privileges. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-has-privileges.html Elasticsearch API docs} + */ async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise @@ -962,6 +1128,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Determines whether the users associated with the specified profile IDs have all the requested privileges. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-has-privileges-user-profile.html Elasticsearch API docs} + */ async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise @@ -996,6 +1166,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Invalidates one or more API keys. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-invalidate-api-key.html Elasticsearch API docs} + */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise @@ -1031,6 +1205,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Invalidates one or more access tokens or refresh tokens. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-invalidate-token.html Elasticsearch API docs} + */ async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise @@ -1066,6 +1244,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-oidc-authenticate.html Elasticsearch API docs} + */ async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -1088,6 +1270,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-oidc-logout.html Elasticsearch API docs} + */ async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -1110,6 +1296,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates an OAuth 2.0 authentication request as a URL string + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-oidc-prepare-authentication.html Elasticsearch API docs} + */ async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -1132,6 +1322,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Adds or updates application privileges. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-privileges.html Elasticsearch API docs} + */ async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise @@ -1159,6 +1353,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Adds and updates roles in the native realm. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-role.html Elasticsearch API docs} + */ async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise @@ -1193,6 +1391,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates and updates role mappings. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-role-mapping.html Elasticsearch API docs} + */ async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise @@ -1227,6 +1429,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Adds and updates users in the native realm. These users are commonly referred to as native users. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-user.html Elasticsearch API docs} + */ async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise @@ -1261,6 +1467,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves information for API keys using a subset of query DSL + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-query-api-key.html Elasticsearch API docs} + */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise @@ -1296,6 +1506,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-authenticate.html Elasticsearch API docs} + */ async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise @@ -1330,6 +1544,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Verifies the logout response sent from the SAML IdP + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-complete-logout.html Elasticsearch API docs} + */ async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise @@ -1364,6 +1582,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Consumes a SAML LogoutRequest + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-invalidate.html Elasticsearch API docs} + */ async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise @@ -1398,6 +1620,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Invalidates an access token and a refresh token that were generated via the SAML Authenticate API + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-logout.html Elasticsearch API docs} + */ async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise @@ -1432,6 +1658,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a SAML authentication request + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-prepare-authentication.html Elasticsearch API docs} + */ async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise @@ -1467,6 +1697,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-sp-metadata.html Elasticsearch API docs} + */ async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise @@ -1489,6 +1723,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Get suggestions for user profiles that match specified search criteria. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-suggest-user-profile.html Elasticsearch API docs} + */ async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise @@ -1524,6 +1762,10 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates attributes of an existing API key. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-update-api-key.html Elasticsearch API docs} + */ async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise @@ -1558,6 +1800,36 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates attributes of an existing cross-cluster API key. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-update-cross-cluster-api-key.html Elasticsearch API docs} + */ + async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/cross_cluster/api_key/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Update application specific data for the user profile of the given unique ID. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-update-user-profile-data.html Elasticsearch API docs} + */ async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index c995623c7..69ae52852 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -43,6 +43,10 @@ export default class Shutdown { this.transport = transport } + /** + * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current Elasticsearch API docs} + */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Shutdown { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current Elasticsearch API docs} + */ async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise @@ -95,6 +103,10 @@ export default class Shutdown { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current Elasticsearch API docs} + */ async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index a24fa13e5..e73f9233d 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -43,6 +43,10 @@ export default class Slm { this.transport = transport } + /** + * Deletes an existing snapshot lifecycle policy. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-delete-policy.html Elasticsearch API docs} + */ async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-execute-lifecycle.html Elasticsearch API docs} + */ async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise @@ -87,6 +95,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes any snapshots that are expired according to the policy's retention rules. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-execute-retention.html Elasticsearch API docs} + */ async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise @@ -110,6 +122,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-get-policy.html Elasticsearch API docs} + */ async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise @@ -140,6 +156,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns global and policy-level statistics about actions taken by snapshot lifecycle management. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-get-stats.html Elasticsearch API docs} + */ async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise @@ -163,6 +183,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves the status of snapshot lifecycle management (SLM). + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-get-status.html Elasticsearch API docs} + */ async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise @@ -186,6 +210,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates or updates a snapshot lifecycle policy. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-put-policy.html Elasticsearch API docs} + */ async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise @@ -220,6 +248,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Turns on snapshot lifecycle management (SLM). + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-start.html Elasticsearch API docs} + */ async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise @@ -243,6 +275,10 @@ export default class Slm { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Turns off snapshot lifecycle management (SLM). + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-stop.html Elasticsearch API docs} + */ async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index fa3fa0b03..09245f182 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -43,6 +43,10 @@ export default class Snapshot { this.transport = transport } + /** + * Removes stale data from repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clean-up-snapshot-repo-api.html Elasticsearch API docs} + */ async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Clones indices from one snapshot into another snapshot in the same repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise @@ -99,6 +107,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a snapshot in a repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise @@ -133,6 +145,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise @@ -167,6 +183,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes one or more snapshots. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise @@ -189,6 +209,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes a repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise @@ -211,6 +235,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about a snapshot. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise @@ -233,6 +261,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about a repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise @@ -263,6 +295,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Analyzes a repository for correctness and performance + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise @@ -285,6 +321,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Restores a snapshot. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise @@ -319,6 +359,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about the status of a snapshot. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise @@ -352,6 +396,10 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Verifies a repository. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 63e808ff6..b3434a35c 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -43,6 +43,10 @@ export default class Sql { this.transport = transport } + /** + * Clears the SQL cursor + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-sql-cursor-api.html Elasticsearch API docs} + */ async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise @@ -77,6 +81,10 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-async-sql-search-api.html Elasticsearch API docs} + */ async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise @@ -99,6 +107,10 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the current status and available results for an async SQL search or stored synchronous SQL search + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-async-sql-search-api.html Elasticsearch API docs} + */ async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise @@ -121,6 +133,10 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns the current status of an async SQL search or a stored synchronous SQL search + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-async-sql-search-status-api.html Elasticsearch API docs} + */ async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise @@ -143,6 +159,10 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Executes a SQL request + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/sql-search-api.html Elasticsearch API docs} + */ async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise @@ -178,6 +198,10 @@ export default class Sql { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Translates SQL into Elasticsearch queries + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/sql-translate-api.html Elasticsearch API docs} + */ async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 08c360806..333be7894 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -43,6 +43,10 @@ export default class Ssl { this.transport = transport } + /** + * Retrieves information about the X.509 certificates used to encrypt communications in the cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-ssl.html Elasticsearch API docs} + */ async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts new file mode 100644 index 000000000..bf1de39e0 --- /dev/null +++ b/src/api/api/synonyms.ts @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Synonyms { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Deletes a synonym set + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-synonyms.html Elasticsearch API docs} + */ + async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['synonyms_set'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_synonyms/${encodeURIComponent(params.synonyms_set.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Retrieves a synonym set + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-synonyms.html Elasticsearch API docs} + */ + async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['synonyms_set'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_synonyms/${encodeURIComponent(params.synonyms_set.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Creates or updates a synonyms set + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-synonyms.html Elasticsearch API docs} + */ + async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['synonyms_set'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_synonyms/${encodeURIComponent(params.synonyms_set.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index def117eaa..36d317804 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -43,6 +43,10 @@ export default class Tasks { this.transport = transport } + /** + * Cancels a task, if it can be cancelled through an API. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + */ async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise @@ -73,6 +77,10 @@ export default class Tasks { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns information about a task. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + */ async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise @@ -95,6 +103,10 @@ export default class Tasks { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Returns a list of tasks. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + */ async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index eb88eb9db..1bff21cb8 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-terms-enum.html Elasticsearch API docs} + */ export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index d2cf887ba..2c3f11472 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Returns information and statistics about terms in the fields of a particular document. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-termvectors.html Elasticsearch API docs} + */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 6274fca23..59e153bcc 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -43,6 +43,10 @@ export default class TextStructure { this.transport = transport } + /** + * Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/find-structure.html Elasticsearch API docs} + */ async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 904c40d37..104619bd7 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -43,6 +43,10 @@ export default class Transform { this.transport = transport } + /** + * Deletes an existing transform. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-transform.html Elasticsearch API docs} + */ async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise @@ -65,6 +69,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves configuration information for transforms. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-transform.html Elasticsearch API docs} + */ async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise @@ -95,6 +103,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves usage information for transforms. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-transform-stats.html Elasticsearch API docs} + */ async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise @@ -117,6 +129,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Previews a transform. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/preview-transform.html Elasticsearch API docs} + */ async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> @@ -159,6 +175,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Instantiates a transform. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-transform.html Elasticsearch API docs} + */ async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise @@ -193,6 +213,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Resets an existing transform. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/reset-transform.html Elasticsearch API docs} + */ async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptions): Promise @@ -215,6 +239,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Schedules now a transform. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/schedule-now-transform.html Elasticsearch API docs} + */ async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise @@ -237,6 +265,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Starts one or more transforms. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-transform.html Elasticsearch API docs} + */ async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise @@ -259,6 +291,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Stops one or more transforms. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/stop-transform.html Elasticsearch API docs} + */ async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise @@ -281,6 +317,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Updates certain properties of a transform. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-transform.html Elasticsearch API docs} + */ async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise @@ -315,6 +355,10 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Upgrades all transforms. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/upgrade-transforms.html Elasticsearch API docs} + */ async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 52be55709..f5170e64c 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Updates a document with a script or partial document. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-update.html Elasticsearch API docs} + */ export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise> diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index ada1a9595..cdf1cac3a 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -37,6 +37,11 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Performs an update on every document in the index without changing the source, +for example to pick up a mapping change. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-update-by-query.html Elasticsearch API docs} + */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index 8af59d09b..c91c4b0fa 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -37,6 +37,10 @@ import * as T from '../types' import * as TB from '../typesWithBodyKey' interface That { transport: Transport } +/** + * Changes the number of requests per second for a particular Update By Query operation. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-update-by-query.html Elasticsearch API docs} + */ export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index d01f43cf3..a026dbc1f 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -43,6 +43,10 @@ export default class Watcher { this.transport = transport } + /** + * Acknowledges a watch, manually throttling the execution of the watch's actions. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-ack-watch.html Elasticsearch API docs} + */ async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise @@ -72,6 +76,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Activates a currently inactive watch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-activate-watch.html Elasticsearch API docs} + */ async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise @@ -94,6 +102,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Deactivates a currently active watch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-deactivate-watch.html Elasticsearch API docs} + */ async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise @@ -116,6 +128,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Removes a watch from Watcher. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-delete-watch.html Elasticsearch API docs} + */ async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise @@ -138,6 +154,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Forces the execution of a stored watch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-execute-watch.html Elasticsearch API docs} + */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise @@ -180,6 +200,36 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieve settings for the watcher system index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-get-settings.html Elasticsearch API docs} + */ + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_watcher/settings' + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Retrieves a watch by its ID. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-get-watch.html Elasticsearch API docs} + */ async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise @@ -202,6 +252,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Creates a new watch, or updates an existing one. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-put-watch.html Elasticsearch API docs} + */ async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise @@ -236,6 +290,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves stored watches. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-query-watches.html Elasticsearch API docs} + */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise @@ -271,6 +329,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Starts Watcher if it is not already running. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-start.html Elasticsearch API docs} + */ async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise @@ -294,6 +356,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves the current Watcher metrics. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-stats.html Elasticsearch API docs} + */ async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise @@ -324,6 +390,10 @@ export default class Watcher { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Stops Watcher if it is running. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-stop.html Elasticsearch API docs} + */ async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise @@ -346,4 +416,30 @@ export default class Watcher { const path = '/_watcher/_stop' return await this.transport.request({ path, method, querystring, body }, options) } + + /** + * Update settings for the watcher system index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-update-settings.html Elasticsearch API docs} + */ + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = '/_watcher/settings' + return await this.transport.request({ path, method, querystring, body }, options) + } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 8e67e2635..eab5e38c1 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -43,6 +43,10 @@ export default class Xpack { this.transport = transport } + /** + * Retrieves information about the installed X-Pack features. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/info-api.html Elasticsearch API docs} + */ async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise @@ -66,6 +70,10 @@ export default class Xpack { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieves usage information about the installed X-Pack features. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/usage-api.html Elasticsearch API docs} + */ async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise diff --git a/src/api/index.ts b/src/api/index.ts index a76458829..46efbd366 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -94,6 +94,7 @@ import SlmApi from './api/slm' import SnapshotApi from './api/snapshot' import SqlApi from './api/sql' import SslApi from './api/ssl' +import SynonymsApi from './api/synonyms' import TasksApi from './api/tasks' import termsEnumApi from './api/terms_enum' import termvectorsApi from './api/termvectors' @@ -175,6 +176,7 @@ export default interface API { snapshot: SnapshotApi sql: SqlApi ssl: SslApi + synonyms: SynonymsApi tasks: TasksApi termsEnum: typeof termsEnumApi termvectors: typeof termvectorsApi @@ -216,6 +218,7 @@ const kSlm = Symbol('Slm') const kSnapshot = Symbol('Snapshot') const kSql = Symbol('Sql') const kSsl = Symbol('Ssl') +const kSynonyms = Symbol('Synonyms') const kTasks = Symbol('Tasks') const kTextStructure = Symbol('TextStructure') const kTransform = Symbol('Transform') @@ -252,6 +255,7 @@ export default class API { [kSnapshot]: symbol | null [kSql]: symbol | null [kSsl]: symbol | null + [kSynonyms]: symbol | null [kTasks]: symbol | null [kTextStructure]: symbol | null [kTransform]: symbol | null @@ -287,6 +291,7 @@ export default class API { this[kSnapshot] = null this[kSql] = null this[kSsl] = null + this[kSynonyms] = null this[kTasks] = null this[kTextStructure] = null this[kTransform] = null @@ -428,6 +433,9 @@ Object.defineProperties(API.prototype, { ssl: { get () { return this[kSsl] === null ? (this[kSsl] = new SslApi(this.transport)) : this[kSsl] } }, + synonyms: { + get () { return this[kSynonyms] === null ? (this[kSynonyms] = new SynonymsApi(this.transport)) : this[kSynonyms] } + }, tasks: { get () { return this[kTasks] === null ? (this[kTasks] = new TasksApi(this.transport)) : this[kTasks] } }, diff --git a/src/api/types.ts b/src/api/types.ts index c5388ce4a..44a5659fd 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -229,9 +229,9 @@ export interface DeleteByQueryResponse { slice_id?: integer task?: TaskId throttled?: Duration - throttled_millis: DurationValue + throttled_millis?: DurationValue throttled_until?: Duration - throttled_until_millis: DurationValue + throttled_until_millis?: DurationValue timed_out?: boolean took?: DurationValue total?: long @@ -1140,6 +1140,7 @@ export interface SearchRequest extends RequestBase { indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] knn?: KnnQuery | KnnQuery[] + rank?: RankContainer min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -1969,6 +1970,10 @@ export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string +export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' + +export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[] + export interface ClusterStatistics { skipped: integer successful: integer @@ -2368,6 +2373,13 @@ export interface QueryVectorBuilder { text_embedding?: TextEmbedding } +export interface RankBase { +} + +export interface RankContainer { + rrf?: RrfRank +} + export interface RecoveryStats { current_as_source: long current_as_target: long @@ -2412,6 +2424,11 @@ export interface Retries { export type Routing = string +export interface RrfRank { + rank_constant?: long + window_size?: long +} + export interface ScoreSort { order?: SortOrder } @@ -5138,7 +5155,7 @@ export interface MappingTextProperty extends MappingCorePropertyBase { type: 'text' } -export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' +export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position' export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { analyzer?: string @@ -5695,6 +5712,7 @@ export interface QueryDslQueryContainer { term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> + text_expansion?: QueryDslTextExpansionQuery | Field wildcard?: Partial> wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery @@ -5908,6 +5926,12 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { terms: string[] } +export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { + value: Field + model_id: string + model_text: string +} + export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' export interface QueryDslTypeQuery extends QueryDslQueryBase { @@ -6300,6 +6324,7 @@ export interface CatHealthHealthRecord { } export interface CatHealthRequest extends CatCatRequestBase { + time?: TimeUnit ts?: boolean } @@ -7270,6 +7295,7 @@ export interface CatNodesNodesRecord { export interface CatNodesRequest extends CatCatRequestBase { bytes?: Bytes full_id?: boolean | string + include_unloaded_segments?: boolean } export type CatNodesResponse = CatNodesNodesRecord[] @@ -7703,7 +7729,7 @@ export interface CatTasksRequest extends CatCatRequestBase { actions?: string[] detailed?: boolean node_id?: string[] - parent_task?: long + parent_task_id?: string } export type CatTasksResponse = CatTasksTasksRecord[] @@ -8153,6 +8179,7 @@ export interface ClusterComponentTemplateSummary { settings?: Record mappings?: MappingTypeMapping aliases?: Record + lifecycle?: IndicesDataLifecycleWithRollover } export interface ClusterAllocationExplainAllocationDecision { @@ -8299,6 +8326,7 @@ export interface ClusterGetComponentTemplateRequest extends RequestBase { flat_settings?: boolean local?: boolean master_timeout?: Duration + include_defaults?: boolean } export interface ClusterGetComponentTemplateResponse { @@ -8376,6 +8404,18 @@ export interface ClusterHealthShardHealthStats { unassigned_shards: integer } +export interface ClusterInfoRequest extends RequestBase { + target: ClusterInfoTargets +} + +export interface ClusterInfoResponse { + cluster_name: Name + http?: NodesHttp + ingest?: NodesIngest + thread_pool?: Record + script?: NodesScripting +} + export interface ClusterPendingTasksPendingTask { executing: boolean insert_order: integer @@ -9403,6 +9443,15 @@ export interface IndicesCacheQueries { enabled: boolean } +export interface IndicesDataLifecycle { + data_retention?: Duration +} + +export interface IndicesDataLifecycleWithRollover { + data_retention?: Duration + rollover?: IndicesDlmRolloverConditions +} + export interface IndicesDataStream { name: DataStreamName timestamp_field: IndicesDataStreamTimestampField @@ -9416,6 +9465,7 @@ export interface IndicesDataStream { ilm_policy?: Name _meta?: Metadata allow_custom_routing?: boolean + lifecycle?: IndicesDataLifecycleWithRollover } export interface IndicesDataStreamIndex { @@ -9431,6 +9481,19 @@ export interface IndicesDataStreamVisibility { hidden?: boolean } +export interface IndicesDlmRolloverConditions { + min_age?: Duration + max_age?: string + min_docs?: long + max_docs?: long + min_size?: ByteSize + max_size?: ByteSize + min_primary_shard_size?: ByteSize + max_primary_shard_size?: ByteSize + min_primary_shard_docs?: long + max_primary_shard_docs?: long +} + export interface IndicesDownsampleConfig { fixed_interval: DurationLarge } @@ -9484,10 +9547,10 @@ export interface IndicesIndexSegmentSort { } export interface IndicesIndexSettingBlocks { - read_only?: boolean - read_only_allow_delete?: boolean - read?: boolean - write?: boolean | string + read_only?: SpecUtilsStringified + read_only_allow_delete?: SpecUtilsStringified + read?: SpecUtilsStringified + write?: SpecUtilsStringified metadata?: SpecUtilsStringified } @@ -9585,6 +9648,7 @@ export interface IndicesIndexState { settings?: IndicesIndexSettings defaults?: IndicesIndexSettings data_stream?: DataStreamName + lifecycle?: IndicesDataLifecycle } export interface IndicesIndexTemplate { @@ -9607,6 +9671,7 @@ export interface IndicesIndexTemplateSummary { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + lifecycle?: IndicesDataLifecycleWithRollover } export interface IndicesIndexVersioning { @@ -10020,6 +10085,15 @@ export interface IndicesDeleteAliasRequest extends RequestBase { export type IndicesDeleteAliasResponse = AcknowledgedResponseBase +export interface IndicesDeleteDataLifecycleRequest extends RequestBase { + name: DataStreamNames + expand_wildcards?: ExpandWildcards + master_timeout?: Duration + timeout?: Duration +} + +export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase + export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards @@ -10101,6 +10175,28 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean +export interface IndicesExplainDataLifecycleDataLifecycleExplain { + index: IndexName + managed_by_dlm: boolean + index_creation_date_millis?: EpochTime + time_since_index_creation?: Duration + rollover_date_millis?: EpochTime + time_since_rollover?: Duration + lifecycle?: IndicesDataLifecycleWithRollover + generation_time?: Duration + error?: string +} + +export interface IndicesExplainDataLifecycleRequest extends RequestBase { + index: Indices + include_defaults?: boolean + master_timeout?: Duration +} + +export interface IndicesExplainDataLifecycleResponse { + indices: Record +} + export interface IndicesFieldUsageStatsFieldSummary { any: uint stored_fields: uint @@ -10218,9 +10314,25 @@ export interface IndicesGetAliasRequest extends RequestBase { export type IndicesGetAliasResponse = Record +export interface IndicesGetDataLifecycleDataStreamLifecycle { + name: DataStreamName + lifecycle?: IndicesDataLifecycle +} + +export interface IndicesGetDataLifecycleRequest extends RequestBase { + name: DataStreamNames + expand_wildcards?: ExpandWildcards + include_defaults?: boolean +} + +export interface IndicesGetDataLifecycleResponse { + data_streams: IndicesGetDataLifecycleDataStreamLifecycle[] +} + export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards + include_defaults?: boolean } export interface IndicesGetDataStreamResponse { @@ -10253,6 +10365,7 @@ export interface IndicesGetIndexTemplateRequest extends RequestBase { local?: boolean flat_settings?: boolean master_timeout?: Duration + include_defaults?: boolean } export interface IndicesGetIndexTemplateResponse { @@ -10355,10 +10468,21 @@ export interface IndicesPutAliasRequest extends RequestBase { export type IndicesPutAliasResponse = AcknowledgedResponseBase +export interface IndicesPutDataLifecycleRequest extends RequestBase { + name: DataStreamNames + expand_wildcards?: ExpandWildcards + master_timeout?: Duration + timeout?: Duration + data_retention?: Duration +} + +export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase + export interface IndicesPutIndexTemplateIndexTemplateMapping { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + lifecycle?: IndicesDataLifecycle } export interface IndicesPutIndexTemplateRequest extends RequestBase { @@ -10740,6 +10864,7 @@ export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name create?: boolean master_timeout?: Duration + include_defaults?: boolean allow_auto_create?: boolean index_patterns?: Indices composed_of?: Name[] @@ -10762,6 +10887,7 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { name?: Name create?: boolean master_timeout?: Duration + include_defaults?: boolean template?: IndicesIndexTemplate } @@ -15188,6 +15314,14 @@ export interface RollupStopJobResponse { stopped: boolean } +export interface SearchApplicationAnalyticsCollection { + event_data_stream: SearchApplicationEventDataStream +} + +export interface SearchApplicationEventDataStream { + name: IndexName +} + export interface SearchApplicationSearchApplication { name: Name indices: IndexName[] @@ -15206,12 +15340,24 @@ export interface SearchApplicationDeleteRequest extends RequestBase { export type SearchApplicationDeleteResponse = AcknowledgedResponseBase +export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { + name: Name +} + +export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase + export interface SearchApplicationGetRequest extends RequestBase { name: Name } export type SearchApplicationGetResponse = SearchApplicationSearchApplication +export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { + name?: Name[] +} + +export type SearchApplicationGetBehavioralAnalyticsResponse = Record + export interface SearchApplicationListRequest extends RequestBase { q?: string from?: integer @@ -15240,6 +15386,16 @@ export interface SearchApplicationPutResponse { result: Result } +export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { + name: Name +} + +export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { + name: Name +} + +export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase + export interface SearchApplicationSearchRequest extends RequestBase { name: Name params?: Record @@ -16469,7 +16625,7 @@ export interface SnapshotSnapshotInfo { export interface SnapshotSnapshotShardFailure { index: IndexName - node_id: Id + node_id?: Id reason: string shard_id: Id status: string diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 6c0a35200..6f2887bd5 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -244,9 +244,9 @@ export interface DeleteByQueryResponse { slice_id?: integer task?: TaskId throttled?: Duration - throttled_millis: DurationValue + throttled_millis?: DurationValue throttled_until?: Duration - throttled_until_millis: DurationValue + throttled_until_millis?: DurationValue timed_out?: boolean took?: DurationValue total?: long @@ -1194,6 +1194,7 @@ export interface SearchRequest extends RequestBase { indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] knn?: KnnQuery | KnnQuery[] + rank?: RankContainer min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -2042,6 +2043,10 @@ export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string +export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' + +export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[] + export interface ClusterStatistics { skipped: integer successful: integer @@ -2441,6 +2446,13 @@ export interface QueryVectorBuilder { text_embedding?: TextEmbedding } +export interface RankBase { +} + +export interface RankContainer { + rrf?: RrfRank +} + export interface RecoveryStats { current_as_source: long current_as_target: long @@ -2485,6 +2497,11 @@ export interface Retries { export type Routing = string +export interface RrfRank { + rank_constant?: long + window_size?: long +} + export interface ScoreSort { order?: SortOrder } @@ -5211,7 +5228,7 @@ export interface MappingTextProperty extends MappingCorePropertyBase { type: 'text' } -export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' +export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position' export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { analyzer?: string @@ -5768,6 +5785,7 @@ export interface QueryDslQueryContainer { term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> + text_expansion?: QueryDslTextExpansionQuery | Field wildcard?: Partial> wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery @@ -5981,6 +5999,12 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { terms: string[] } +export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { + value: Field + model_id: string + model_text: string +} + export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' export interface QueryDslTypeQuery extends QueryDslQueryBase { @@ -6377,6 +6401,7 @@ export interface CatHealthHealthRecord { } export interface CatHealthRequest extends CatCatRequestBase { + time?: TimeUnit ts?: boolean } @@ -7347,6 +7372,7 @@ export interface CatNodesNodesRecord { export interface CatNodesRequest extends CatCatRequestBase { bytes?: Bytes full_id?: boolean | string + include_unloaded_segments?: boolean } export type CatNodesResponse = CatNodesNodesRecord[] @@ -7780,7 +7806,7 @@ export interface CatTasksRequest extends CatCatRequestBase { actions?: string[] detailed?: boolean node_id?: string[] - parent_task?: long + parent_task_id?: string } export type CatTasksResponse = CatTasksTasksRecord[] @@ -8242,6 +8268,7 @@ export interface ClusterComponentTemplateSummary { settings?: Record mappings?: MappingTypeMapping aliases?: Record + lifecycle?: IndicesDataLifecycleWithRollover } export interface ClusterAllocationExplainAllocationDecision { @@ -8391,6 +8418,7 @@ export interface ClusterGetComponentTemplateRequest extends RequestBase { flat_settings?: boolean local?: boolean master_timeout?: Duration + include_defaults?: boolean } export interface ClusterGetComponentTemplateResponse { @@ -8468,6 +8496,18 @@ export interface ClusterHealthShardHealthStats { unassigned_shards: integer } +export interface ClusterInfoRequest extends RequestBase { + target: ClusterInfoTargets +} + +export interface ClusterInfoResponse { + cluster_name: Name + http?: NodesHttp + ingest?: NodesIngest + thread_pool?: Record + script?: NodesScripting +} + export interface ClusterPendingTasksPendingTask { executing: boolean insert_order: integer @@ -9526,6 +9566,15 @@ export interface IndicesCacheQueries { enabled: boolean } +export interface IndicesDataLifecycle { + data_retention?: Duration +} + +export interface IndicesDataLifecycleWithRollover { + data_retention?: Duration + rollover?: IndicesDlmRolloverConditions +} + export interface IndicesDataStream { name: DataStreamName timestamp_field: IndicesDataStreamTimestampField @@ -9539,6 +9588,7 @@ export interface IndicesDataStream { ilm_policy?: Name _meta?: Metadata allow_custom_routing?: boolean + lifecycle?: IndicesDataLifecycleWithRollover } export interface IndicesDataStreamIndex { @@ -9554,6 +9604,19 @@ export interface IndicesDataStreamVisibility { hidden?: boolean } +export interface IndicesDlmRolloverConditions { + min_age?: Duration + max_age?: string + min_docs?: long + max_docs?: long + min_size?: ByteSize + max_size?: ByteSize + min_primary_shard_size?: ByteSize + max_primary_shard_size?: ByteSize + min_primary_shard_docs?: long + max_primary_shard_docs?: long +} + export interface IndicesDownsampleConfig { fixed_interval: DurationLarge } @@ -9607,10 +9670,10 @@ export interface IndicesIndexSegmentSort { } export interface IndicesIndexSettingBlocks { - read_only?: boolean - read_only_allow_delete?: boolean - read?: boolean - write?: boolean | string + read_only?: SpecUtilsStringified + read_only_allow_delete?: SpecUtilsStringified + read?: SpecUtilsStringified + write?: SpecUtilsStringified metadata?: SpecUtilsStringified } @@ -9708,6 +9771,7 @@ export interface IndicesIndexState { settings?: IndicesIndexSettings defaults?: IndicesIndexSettings data_stream?: DataStreamName + lifecycle?: IndicesDataLifecycle } export interface IndicesIndexTemplate { @@ -9730,6 +9794,7 @@ export interface IndicesIndexTemplateSummary { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + lifecycle?: IndicesDataLifecycleWithRollover } export interface IndicesIndexVersioning { @@ -10152,6 +10217,15 @@ export interface IndicesDeleteAliasRequest extends RequestBase { export type IndicesDeleteAliasResponse = AcknowledgedResponseBase +export interface IndicesDeleteDataLifecycleRequest extends RequestBase { + name: DataStreamNames + expand_wildcards?: ExpandWildcards + master_timeout?: Duration + timeout?: Duration +} + +export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase + export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards @@ -10234,6 +10308,28 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean +export interface IndicesExplainDataLifecycleDataLifecycleExplain { + index: IndexName + managed_by_dlm: boolean + index_creation_date_millis?: EpochTime + time_since_index_creation?: Duration + rollover_date_millis?: EpochTime + time_since_rollover?: Duration + lifecycle?: IndicesDataLifecycleWithRollover + generation_time?: Duration + error?: string +} + +export interface IndicesExplainDataLifecycleRequest extends RequestBase { + index: Indices + include_defaults?: boolean + master_timeout?: Duration +} + +export interface IndicesExplainDataLifecycleResponse { + indices: Record +} + export interface IndicesFieldUsageStatsFieldSummary { any: uint stored_fields: uint @@ -10351,9 +10447,25 @@ export interface IndicesGetAliasRequest extends RequestBase { export type IndicesGetAliasResponse = Record +export interface IndicesGetDataLifecycleDataStreamLifecycle { + name: DataStreamName + lifecycle?: IndicesDataLifecycle +} + +export interface IndicesGetDataLifecycleRequest extends RequestBase { + name: DataStreamNames + expand_wildcards?: ExpandWildcards + include_defaults?: boolean +} + +export interface IndicesGetDataLifecycleResponse { + data_streams: IndicesGetDataLifecycleDataStreamLifecycle[] +} + export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards + include_defaults?: boolean } export interface IndicesGetDataStreamResponse { @@ -10386,6 +10498,7 @@ export interface IndicesGetIndexTemplateRequest extends RequestBase { local?: boolean flat_settings?: boolean master_timeout?: Duration + include_defaults?: boolean } export interface IndicesGetIndexTemplateResponse { @@ -10494,10 +10607,24 @@ export interface IndicesPutAliasRequest extends RequestBase { export type IndicesPutAliasResponse = AcknowledgedResponseBase +export interface IndicesPutDataLifecycleRequest extends RequestBase { + name: DataStreamNames + expand_wildcards?: ExpandWildcards + master_timeout?: Duration + timeout?: Duration + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + data_retention?: Duration + } +} + +export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase + export interface IndicesPutIndexTemplateIndexTemplateMapping { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + lifecycle?: IndicesDataLifecycle } export interface IndicesPutIndexTemplateRequest extends RequestBase { @@ -10895,6 +11022,7 @@ export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name create?: boolean master_timeout?: Duration + include_defaults?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_auto_create?: boolean @@ -10920,6 +11048,7 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { name?: Name create?: boolean master_timeout?: Duration + include_defaults?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'template' instead. */ body?: IndicesIndexTemplate } @@ -15486,6 +15615,14 @@ export interface RollupStopJobResponse { stopped: boolean } +export interface SearchApplicationAnalyticsCollection { + event_data_stream: SearchApplicationEventDataStream +} + +export interface SearchApplicationEventDataStream { + name: IndexName +} + export interface SearchApplicationSearchApplication { name: Name indices: IndexName[] @@ -15504,12 +15641,24 @@ export interface SearchApplicationDeleteRequest extends RequestBase { export type SearchApplicationDeleteResponse = AcknowledgedResponseBase +export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { + name: Name +} + +export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase + export interface SearchApplicationGetRequest extends RequestBase { name: Name } export type SearchApplicationGetResponse = SearchApplicationSearchApplication +export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { + name?: Name[] +} + +export type SearchApplicationGetBehavioralAnalyticsResponse = Record + export interface SearchApplicationListRequest extends RequestBase { q?: string from?: integer @@ -15539,6 +15688,16 @@ export interface SearchApplicationPutResponse { result: Result } +export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { + name: Name +} + +export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { + name: Name +} + +export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase + export interface SearchApplicationSearchRequest extends RequestBase { name: Name /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -16845,7 +17004,7 @@ export interface SnapshotSnapshotInfo { export interface SnapshotSnapshotShardFailure { index: IndexName - node_id: Id + node_id?: Id reason: string shard_id: Id status: string From 3bf69c370eac8c945971a45a977f69a3a7363032 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 30 Jun 2023 11:15:31 -0500 Subject: [PATCH 225/647] Hide arguments heading in docs when there are no arguments (#1928) --- docs/reference.asciidoc | 280 +++++++----------------------------- src/api/api/cluster.ts | 2 +- src/api/types.ts | 85 ++++++----- src/api/typesWithBodyKey.ts | 85 ++++++----- 4 files changed, 149 insertions(+), 303 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 19ffd139a..528ff0b63 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -395,10 +395,6 @@ Returns all script contexts. ---- client.getScriptContext() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === get_script_languages @@ -409,10 +405,6 @@ Returns available script types, languages and contexts ---- client.getScriptLanguages() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === get_source @@ -494,10 +486,6 @@ Returns basic information about the cluster. ---- client.info() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === knn_search @@ -662,10 +650,6 @@ Returns whether the cluster is running. ---- client.ping() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === put_script @@ -1423,10 +1407,6 @@ Returns help for the Cat APIs. client.cat.help() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== indices @@ -1461,10 +1441,6 @@ Returns information about the master node. client.cat.master() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== ml_data_frame_analytics @@ -1579,10 +1555,6 @@ Returns information about custom node attributes. client.cat.nodeattrs() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== nodes @@ -1612,10 +1584,6 @@ Returns a concise representation of the cluster pending tasks. client.cat.pendingTasks() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== plugins @@ -1627,10 +1595,6 @@ Returns information about installed plugins across nodes node. client.cat.plugins() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== recovery @@ -1662,10 +1626,6 @@ Returns information about snapshot repositories registered in the cluster. client.cat.repositories() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== segments @@ -2035,10 +1995,6 @@ Gets all stats related to cross-cluster replication. client.ccr.stats() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== unfollow @@ -2094,8 +2050,10 @@ client.cluster.deleteComponentTemplate({ name }) * *Request (object):* ** *`name` (string | string[])*: List or wildcard expression of component template names used to limit the request. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_voting_config_exclusions @@ -2154,11 +2112,14 @@ client.cluster.getComponentTemplate({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string)*: The comma separated names of the component templates -** *`flat_settings` (Optional, boolean)* -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`name` (Optional, string)*: List of component template names used to limit the request. +Wildcard (`*`) expressions are supported. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. ** *`include_defaults` (Optional, boolean)*: Return all default configurations for the component template (default: false) +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_settings @@ -2174,10 +2135,12 @@ client.cluster.getSettings({ ... }) ==== Arguments * *Request (object):* -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`include_defaults` (Optional, boolean)*: Whether to return all default clusters setting. -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`include_defaults` (Optional, boolean)*: If `true`, returns default cluster settings from the local node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== health @@ -2279,18 +2242,26 @@ client.cluster.putComponentTemplate({ name, template }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the template +** *`name` (string)*: Name of the component template to create. +Elasticsearch includes the following built-in component templates: `logs-mappings`; 'logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elastic Agent uses these templates to configure backing indices for its data streams. +If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. +If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. ** *`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })*: The template to be applied which includes mappings, settings, or aliases configuration. -** *`create` (Optional, boolean)*: Whether the index template should only be added if new or can also replace an existing one -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`version` (Optional, number)*: Version number used to manage component templates externally. -This number isn't automatically generated or incremented by Elasticsearch. -** *`_meta` (Optional, Record)*: Optional user metadata about the component template. -May have any contents. This map is not automatically generated by Elasticsearch. +** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing component templates. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. ** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false` then data streams matching the template must always be explicitly created. +** *`version` (Optional, number)*: Version number used to manage component templates externally. +This number isn't automatically generated or incremented by Elasticsearch. +To unset a version, replace the template without specifying a version. +** *`_meta` (Optional, Record)*: Optional user metadata about the component template. +May have any contents. This map is not automatically generated by Elasticsearch. +This information is stored in the cluster state, so keeping it short is preferable. +To unset `_meta`, replace the template without specifying this information. [discrete] ==== put_settings @@ -2322,10 +2293,6 @@ Returns the information about configured remote clusters. client.cluster.remoteInfo() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== reroute @@ -2389,8 +2356,10 @@ client.cluster.stats({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: List of node filters used to limit returned information. Defaults to all nodes in the cluster. -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s _nodes.failed property. Defaults to no timeout. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its stats. +However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. [discrete] === dangling_indices @@ -2442,10 +2411,6 @@ Returns all dangling indices. client.danglingIndices.listDanglingIndices() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === enrich @@ -2527,10 +2492,6 @@ Gets enrich coordinator statistics and information about enrich policies that ar client.enrich.stats() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === eql @@ -2629,10 +2590,6 @@ Gets a list of features which can be included in snapshots using the feature_sta client.features.getFeatures() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== reset_features @@ -2644,10 +2601,6 @@ Resets the internal state of features, usually by deleting system indices client.features.resetFeatures() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === fleet @@ -2882,10 +2835,6 @@ Retrieves the current index lifecycle management (ILM) status. client.ilm.getStatus() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== migrate_to_data_tiers @@ -3163,7 +3112,12 @@ client.indices.createDataStream({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the data stream +** *`name` (string)*: Name of the data stream, which must meet the following criteria: +Lowercase only; +Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; +Cannot start with `-`, `_`, `+`, or `.ds-`; +Cannot be `.` or `..`; +Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. [discrete] ==== data_streams_stats @@ -3255,8 +3209,8 @@ client.indices.deleteDataStream({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: A list of data streams to delete; use `*` to delete all data streams -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`name` (string | string[])*: List of data streams to delete. Wildcard (`*`) expressions are supported. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. [discrete] ==== delete_index_template @@ -3586,8 +3540,10 @@ client.indices.getDataStream({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string | string[])*: A list of data streams to get; use `*` to get all data streams -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`name` (Optional, string | string[])*: List of data stream names used to limit the request. +Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. ** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. [discrete] @@ -4296,10 +4252,6 @@ Returns statistical information about geoip databases client.ingest.geoIpStats() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get_pipeline @@ -4329,10 +4281,6 @@ Returns a list of the built-in patterns. client.ingest.processorGrok() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== put_pipeline @@ -4389,10 +4337,6 @@ Deletes licensing information for the cluster client.license.delete() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get @@ -4422,10 +4366,6 @@ Retrieves information about the status of the basic license. client.license.getBasicStatus() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get_trial_status @@ -4437,10 +4377,6 @@ Retrieves information about the status of the trial license. client.license.getTrialStatus() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== post @@ -4571,10 +4507,6 @@ Find out whether system features need to be upgraded or not client.migration.getFeatureUpgradeStatus() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== post_feature_upgrade @@ -4586,10 +4518,6 @@ Begin upgrades for system features client.migration.postFeatureUpgrade() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === ml @@ -5572,10 +5500,6 @@ Returns defaults and limits used by machine learning. client.ml.info() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== open_job @@ -6436,10 +6360,6 @@ Updates certain properties of trained model deployment. client.ml.updateTrainedModelDeployment() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== upgrade_job_snapshot @@ -6866,10 +6786,6 @@ http://todo.com/tbd[Endpoint documentation] client.searchApplication.postBehavioralAnalyticsEvent() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== put @@ -6914,10 +6830,6 @@ Renders a query for given search application search parameters client.searchApplication.renderQuery() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== search @@ -7029,10 +6941,6 @@ Enables authentication as a user and retrieve information about the authenticate client.security.authenticate() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== bulk_update_api_keys @@ -7044,10 +6952,6 @@ Updates the attributes of multiple existing API keys. client.security.bulkUpdateApiKeys() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== change_password @@ -7185,10 +7089,6 @@ Creates a cross-cluster API key for API key based remote cluster access. client.security.createCrossClusterApiKey() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== create_service_token @@ -7341,10 +7241,6 @@ Allows a kibana instance to configure itself to communicate with a secured elast client.security.enrollKibana() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== enroll_node @@ -7356,10 +7252,6 @@ Allows a new node to enroll to an existing cluster with security enabled. client.security.enrollNode() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get_api_key @@ -7395,10 +7287,6 @@ Retrieves the list of cluster privileges and index privileges that are available client.security.getBuiltinPrivileges() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get_privileges @@ -7629,10 +7517,6 @@ Exchanges an OpenID Connection authentication response message for an Elasticsea client.security.oidcAuthenticate() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== oidc_logout @@ -7644,10 +7528,6 @@ Invalidates a refresh token and access token that was generated from the OpenID client.security.oidcLogout() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== oidc_prepare_authentication @@ -7659,10 +7539,6 @@ Creates an OAuth 2.0 authentication request as a URL string client.security.oidcPrepareAuthentication() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== put_privileges @@ -7923,10 +7799,6 @@ Updates attributes of an existing cross-cluster API key. client.security.updateCrossClusterApiKey() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === slm @@ -7972,10 +7844,6 @@ Deletes any snapshots that are expired according to the policy's retention rules client.slm.executeRetention() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get_lifecycle @@ -8003,10 +7871,6 @@ Returns global and policy-level statistics about actions taken by snapshot lifec client.slm.getStats() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get_status @@ -8018,10 +7882,6 @@ Retrieves the status of snapshot lifecycle management (SLM). client.slm.getStatus() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== put_lifecycle @@ -8056,10 +7916,6 @@ Turns on snapshot lifecycle management (SLM). client.slm.start() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== stop @@ -8071,10 +7927,6 @@ Turns off snapshot lifecycle management (SLM). client.slm.stop() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === snapshot @@ -8259,10 +8111,6 @@ Analyzes a repository for correctness and performance client.snapshot.repositoryAnalyze() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== restore @@ -8467,10 +8315,6 @@ Retrieves information about the X.509 certificates used to encrypt communication client.ssl.certificates() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === synonyms @@ -8484,10 +8328,6 @@ Deletes a synonym set client.synonyms.delete() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get @@ -8499,10 +8339,6 @@ Retrieves a synonym set client.synonyms.get() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== put @@ -8514,10 +8350,6 @@ Creates or updates a synonyms set client.synonyms.put() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === tasks @@ -9007,10 +8839,6 @@ Retrieve settings for the watcher system index client.watcher.getSettings() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== get_watch @@ -9085,10 +8913,6 @@ Starts Watcher if it is not already running. client.watcher.start() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== stats @@ -9117,10 +8941,6 @@ Stops Watcher if it is running. client.watcher.stop() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] ==== update_settings @@ -9132,10 +8952,6 @@ Update settings for the watcher system index client.watcher.updateSettings() ---- -[discrete] -==== Arguments - -* *Request (object):* [discrete] === xpack diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index be7188fd7..c177e0760 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -346,7 +346,7 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['template', 'version', '_meta', 'allow_auto_create'] + const acceptedBody: string[] = ['allow_auto_create', 'template', 'version', '_meta'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index 44a5659fd..fba574307 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -517,6 +517,7 @@ export interface HealthReportIndicators { repository_integrity?: HealthReportRepositoryIntegrityIndicator ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator + shards_capacity?: HealthReportShardsCapacityIndicator } export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { @@ -579,6 +580,20 @@ export interface HealthReportShardsAvailabilityIndicatorDetails { unassigned_replicas: long } +export interface HealthReportShardsCapacityIndicator extends HealthReportBaseIndicator { + details?: HealthReportShardsCapacityIndicatorDetails +} + +export interface HealthReportShardsCapacityIndicatorDetails { + data: HealthReportShardsCapacityIndicatorTierDetail + frozen: HealthReportShardsCapacityIndicatorTierDetail +} + +export interface HealthReportShardsCapacityIndicatorTierDetail { + max_shards_in_cluster: integer + current_used_shards?: integer +} + export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { details?: HealthReportSlmIndicatorDetails } @@ -8324,9 +8339,9 @@ export type ClusterExistsComponentTemplateResponse = boolean export interface ClusterGetComponentTemplateRequest extends RequestBase { name?: Name flat_settings?: boolean + include_defaults?: boolean local?: boolean master_timeout?: Duration - include_defaults?: boolean } export interface ClusterGetComponentTemplateResponse { @@ -8446,10 +8461,10 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { name: Name create?: boolean master_timeout?: Duration + allow_auto_create?: boolean template: IndicesIndexState version?: VersionNumber _meta?: Metadata - allow_auto_create?: boolean } export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase @@ -8584,14 +8599,14 @@ export interface ClusterStateRequest extends RequestBase { export type ClusterStateResponse = any export interface ClusterStatsCharFilterTypes { - char_filter_types: ClusterStatsFieldTypes[] - tokenizer_types: ClusterStatsFieldTypes[] - filter_types: ClusterStatsFieldTypes[] analyzer_types: ClusterStatsFieldTypes[] + built_in_analyzers: ClusterStatsFieldTypes[] built_in_char_filters: ClusterStatsFieldTypes[] - built_in_tokenizers: ClusterStatsFieldTypes[] built_in_filters: ClusterStatsFieldTypes[] - built_in_analyzers: ClusterStatsFieldTypes[] + built_in_tokenizers: ClusterStatsFieldTypes[] + char_filter_types: ClusterStatsFieldTypes[] + filter_types: ClusterStatsFieldTypes[] + tokenizer_types: ClusterStatsFieldTypes[] } export interface ClusterStatsClusterFileSystem { @@ -8601,6 +8616,7 @@ export interface ClusterStatsClusterFileSystem { } export interface ClusterStatsClusterIndices { + analysis: ClusterStatsCharFilterTypes completion: CompletionStats count: long docs: DocStats @@ -8610,7 +8626,6 @@ export interface ClusterStatsClusterIndices { shards: ClusterStatsClusterIndicesShards store: StoreStats mappings: ClusterStatsFieldTypesMappings - analysis: ClusterStatsCharFilterTypes versions?: ClusterStatsIndicesVersions[] } @@ -8662,24 +8677,25 @@ export interface ClusterStatsClusterNetworkTypes { export interface ClusterStatsClusterNodeCount { coordinating_only: integer data: integer - ingest: integer - master: integer - total: integer - voting_only: integer data_cold: integer - data_frozen?: integer data_content: integer - data_warm: integer + data_frozen?: integer data_hot: integer + data_warm: integer + ingest: integer + master: integer ml: integer remote_cluster_client: integer + total: integer transform: integer + voting_only: integer } export interface ClusterStatsClusterNodes { count: ClusterStatsClusterNodeCount discovery_types: Record fs: ClusterStatsClusterFileSystem + indexing_pressure: ClusterStatsIndexingPressure ingest: ClusterStatsClusterIngest jvm: ClusterStatsClusterJvm network_types: ClusterStatsClusterNetworkTypes @@ -8688,21 +8704,20 @@ export interface ClusterStatsClusterNodes { plugins: PluginStats[] process: ClusterStatsClusterProcess versions: VersionString[] - indexing_pressure: ClusterStatsIndexingPressure } export interface ClusterStatsClusterOperatingSystem { allocated_processors: integer + architectures?: ClusterStatsClusterOperatingSystemArchitecture[] available_processors: integer mem: ClusterStatsOperatingSystemMemoryInfo names: ClusterStatsClusterOperatingSystemName[] pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] - architectures?: ClusterStatsClusterOperatingSystemArchitecture[] } export interface ClusterStatsClusterOperatingSystemArchitecture { - count: integer arch: string + count: integer } export interface ClusterStatsClusterOperatingSystemName { @@ -8768,8 +8783,8 @@ export interface ClusterStatsIndexingPressure { } export interface ClusterStatsIndexingPressureMemory { - limit_in_bytes: long current: ClusterStatsIndexingPressureMemorySummary + limit_in_bytes: long total: ClusterStatsIndexingPressureMemorySummary } @@ -8798,12 +8813,12 @@ export interface ClusterStatsNodePackagingType { } export interface ClusterStatsOperatingSystemMemoryInfo { + adjusted_total_in_bytes?: long free_in_bytes: long free_percent: integer total_in_bytes: long used_in_bytes: long used_percent: integer - adjusted_total_in_bytes?: long } export interface ClusterStatsRequest extends RequestBase { @@ -8815,20 +8830,20 @@ export interface ClusterStatsRequest extends RequestBase { export type ClusterStatsResponse = ClusterStatsStatsResponseBase export interface ClusterStatsRuntimeFieldTypes { - name: Name + chars_max: integer + chars_total: integer count: integer + doc_max: integer + doc_total: integer index_count: integer - scriptless_count: integer - shadowed_count: integer lang: string[] lines_max: integer lines_total: integer - chars_max: integer - chars_total: integer + name: Name + scriptless_count: integer + shadowed_count: integer source_max: integer source_total: integer - doc_max: integer - doc_total: integer } export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { @@ -9453,19 +9468,19 @@ export interface IndicesDataLifecycleWithRollover { } export interface IndicesDataStream { - name: DataStreamName - timestamp_field: IndicesDataStreamTimestampField - indices: IndicesDataStreamIndex[] + _meta?: Metadata + allow_custom_routing?: boolean generation: integer - template: Name hidden: boolean - replicated?: boolean - system?: boolean - status: HealthStatus ilm_policy?: Name - _meta?: Metadata - allow_custom_routing?: boolean + indices: IndicesDataStreamIndex[] lifecycle?: IndicesDataLifecycleWithRollover + name: DataStreamName + replicated?: boolean + status: HealthStatus + system?: boolean + template: Name + timestamp_field: IndicesDataStreamTimestampField } export interface IndicesDataStreamIndex { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 6f2887bd5..66aea5837 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -538,6 +538,7 @@ export interface HealthReportIndicators { repository_integrity?: HealthReportRepositoryIntegrityIndicator ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator + shards_capacity?: HealthReportShardsCapacityIndicator } export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { @@ -600,6 +601,20 @@ export interface HealthReportShardsAvailabilityIndicatorDetails { unassigned_replicas: long } +export interface HealthReportShardsCapacityIndicator extends HealthReportBaseIndicator { + details?: HealthReportShardsCapacityIndicatorDetails +} + +export interface HealthReportShardsCapacityIndicatorDetails { + data: HealthReportShardsCapacityIndicatorTierDetail + frozen: HealthReportShardsCapacityIndicatorTierDetail +} + +export interface HealthReportShardsCapacityIndicatorTierDetail { + max_shards_in_cluster: integer + current_used_shards?: integer +} + export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { details?: HealthReportSlmIndicatorDetails } @@ -8416,9 +8431,9 @@ export type ClusterExistsComponentTemplateResponse = boolean export interface ClusterGetComponentTemplateRequest extends RequestBase { name?: Name flat_settings?: boolean + include_defaults?: boolean local?: boolean master_timeout?: Duration - include_defaults?: boolean } export interface ClusterGetComponentTemplateResponse { @@ -8540,10 +8555,10 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { master_timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + allow_auto_create?: boolean template: IndicesIndexState version?: VersionNumber _meta?: Metadata - allow_auto_create?: boolean } } @@ -8685,14 +8700,14 @@ export interface ClusterStateRequest extends RequestBase { export type ClusterStateResponse = any export interface ClusterStatsCharFilterTypes { - char_filter_types: ClusterStatsFieldTypes[] - tokenizer_types: ClusterStatsFieldTypes[] - filter_types: ClusterStatsFieldTypes[] analyzer_types: ClusterStatsFieldTypes[] + built_in_analyzers: ClusterStatsFieldTypes[] built_in_char_filters: ClusterStatsFieldTypes[] - built_in_tokenizers: ClusterStatsFieldTypes[] built_in_filters: ClusterStatsFieldTypes[] - built_in_analyzers: ClusterStatsFieldTypes[] + built_in_tokenizers: ClusterStatsFieldTypes[] + char_filter_types: ClusterStatsFieldTypes[] + filter_types: ClusterStatsFieldTypes[] + tokenizer_types: ClusterStatsFieldTypes[] } export interface ClusterStatsClusterFileSystem { @@ -8702,6 +8717,7 @@ export interface ClusterStatsClusterFileSystem { } export interface ClusterStatsClusterIndices { + analysis: ClusterStatsCharFilterTypes completion: CompletionStats count: long docs: DocStats @@ -8711,7 +8727,6 @@ export interface ClusterStatsClusterIndices { shards: ClusterStatsClusterIndicesShards store: StoreStats mappings: ClusterStatsFieldTypesMappings - analysis: ClusterStatsCharFilterTypes versions?: ClusterStatsIndicesVersions[] } @@ -8763,24 +8778,25 @@ export interface ClusterStatsClusterNetworkTypes { export interface ClusterStatsClusterNodeCount { coordinating_only: integer data: integer - ingest: integer - master: integer - total: integer - voting_only: integer data_cold: integer - data_frozen?: integer data_content: integer - data_warm: integer + data_frozen?: integer data_hot: integer + data_warm: integer + ingest: integer + master: integer ml: integer remote_cluster_client: integer + total: integer transform: integer + voting_only: integer } export interface ClusterStatsClusterNodes { count: ClusterStatsClusterNodeCount discovery_types: Record fs: ClusterStatsClusterFileSystem + indexing_pressure: ClusterStatsIndexingPressure ingest: ClusterStatsClusterIngest jvm: ClusterStatsClusterJvm network_types: ClusterStatsClusterNetworkTypes @@ -8789,21 +8805,20 @@ export interface ClusterStatsClusterNodes { plugins: PluginStats[] process: ClusterStatsClusterProcess versions: VersionString[] - indexing_pressure: ClusterStatsIndexingPressure } export interface ClusterStatsClusterOperatingSystem { allocated_processors: integer + architectures?: ClusterStatsClusterOperatingSystemArchitecture[] available_processors: integer mem: ClusterStatsOperatingSystemMemoryInfo names: ClusterStatsClusterOperatingSystemName[] pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] - architectures?: ClusterStatsClusterOperatingSystemArchitecture[] } export interface ClusterStatsClusterOperatingSystemArchitecture { - count: integer arch: string + count: integer } export interface ClusterStatsClusterOperatingSystemName { @@ -8869,8 +8884,8 @@ export interface ClusterStatsIndexingPressure { } export interface ClusterStatsIndexingPressureMemory { - limit_in_bytes: long current: ClusterStatsIndexingPressureMemorySummary + limit_in_bytes: long total: ClusterStatsIndexingPressureMemorySummary } @@ -8899,12 +8914,12 @@ export interface ClusterStatsNodePackagingType { } export interface ClusterStatsOperatingSystemMemoryInfo { + adjusted_total_in_bytes?: long free_in_bytes: long free_percent: integer total_in_bytes: long used_in_bytes: long used_percent: integer - adjusted_total_in_bytes?: long } export interface ClusterStatsRequest extends RequestBase { @@ -8916,20 +8931,20 @@ export interface ClusterStatsRequest extends RequestBase { export type ClusterStatsResponse = ClusterStatsStatsResponseBase export interface ClusterStatsRuntimeFieldTypes { - name: Name + chars_max: integer + chars_total: integer count: integer + doc_max: integer + doc_total: integer index_count: integer - scriptless_count: integer - shadowed_count: integer lang: string[] lines_max: integer lines_total: integer - chars_max: integer - chars_total: integer + name: Name + scriptless_count: integer + shadowed_count: integer source_max: integer source_total: integer - doc_max: integer - doc_total: integer } export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { @@ -9576,19 +9591,19 @@ export interface IndicesDataLifecycleWithRollover { } export interface IndicesDataStream { - name: DataStreamName - timestamp_field: IndicesDataStreamTimestampField - indices: IndicesDataStreamIndex[] + _meta?: Metadata + allow_custom_routing?: boolean generation: integer - template: Name hidden: boolean - replicated?: boolean - system?: boolean - status: HealthStatus ilm_policy?: Name - _meta?: Metadata - allow_custom_routing?: boolean + indices: IndicesDataStreamIndex[] lifecycle?: IndicesDataLifecycleWithRollover + name: DataStreamName + replicated?: boolean + status: HealthStatus + system?: boolean + template: Name + timestamp_field: IndicesDataStreamTimestampField } export interface IndicesDataStreamIndex { From b223d8597c8cc01d8831ace1ee3613bcb732878d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 5 Jul 2023 16:28:26 +0200 Subject: [PATCH 226/647] [DOCS] Adds getting started content based on the template (#1929) Co-authored-by: Josh Mock --- docs/getting-started.asciidoc | 170 +++++++++++++++++++++++++++++++++ docs/images/create-api-key.png | Bin 0 -> 80572 bytes docs/images/es-endpoint.jpg | Bin 0 -> 369643 bytes docs/index.asciidoc | 1 + docs/introduction.asciidoc | 60 ------------ 5 files changed, 171 insertions(+), 60 deletions(-) create mode 100644 docs/getting-started.asciidoc create mode 100644 docs/images/create-api-key.png create mode 100644 docs/images/es-endpoint.jpg diff --git a/docs/getting-started.asciidoc b/docs/getting-started.asciidoc new file mode 100644 index 000000000..d272d1302 --- /dev/null +++ b/docs/getting-started.asciidoc @@ -0,0 +1,170 @@ +[[getting-started-js]] +== Getting started + +This page guides you through the installation process of the Node.js client, +shows you how to instantiate the client, and how to perform basic Elasticsearch +operations with it. + +[discrete] +=== Requirements + +* https://nodejs.org/[Node.js] version 14.x or newer +* https://docs.npmjs.com/downloading-and-installing-node-js-and-npm[`npm`], usually bundled with Node.js + +[discrete] +=== Installation + +To install the latest version of the client, run the following command: + +[source,shell] +-------------------------- +npm install @elastic/elasticsearch +-------------------------- + +Refer to the <> page to learn more. + + +[discrete] +=== Connecting + +You can connect to the Elastic Cloud using an API key and the Elasticsearch +endpoint. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://.../', // Elasticsearch endpoint + auth: { + apiKey: { // API key ID and secret + id: 'foo', + api_key: 'bar', + } + } +}) +---- + +Your Elasticsearch endpoint can be found on the **My deployment** page of your +deployment: + +image::images/es-endpoint.jpg[alt="Finding Elasticsearch endpoint",align="center"] + +You can generate an API key on the **Management** page under Security. + +image::images/create-api-key.png[alt="Create API key",align="center"] + +For other connection options, refer to the <> section. + + +[discrete] +=== Operations + +Time to use Elasticsearch! This section walks you through the basic, and most +important, operations of Elasticsearch. + + +[discrete] +==== Creating an index + +This is how you create the `my_index` index: + +[source,js] +---- +await client.indices.create({ index: 'my_index' }) +---- + + +[discrete] +==== Indexing documents + +This is a simple way of indexing a document: + +[source,js] +---- +await client.index({ + index: 'my_index', + id: 'my_document_id', + document: { + foo: 'foo', + bar: 'bar', + }, +}) +---- + + +[discrete] +==== Getting documents + +You can get documents by using the following code: + +[source,js] +---- +await client.get({ + index: 'my_index', + id: 'my_document_id', +}) +---- + + +[discrete] +==== Searching documents + +This is how you can create a single match query with the client: + +[source,js] +---- +await client.search({ + query: { + match: { + foo: 'foo' + } + } +}) +---- + + +[discrete] +==== Updating documents + +This is how you can update a document, for example to add a new field: + +[source,js] +---- +await client.update({ + index: 'my_index', + id: 'my_document_id', + doc: { + foo: 'bar', + new_field: 'new value' + } +}) +---- + + +[discrete] +==== Deleting documents + +[source,js] +---- +await client.delete({ + index: 'my_index', + id: 'my_document_id', +}) +---- + + +[discrete] +==== Deleting an index + +[source,js] +---- +await client.indices.delete({ index: 'my_index' }) +---- + + +[discrete] +== Further reading + +* Use <> for a more comfortable experience with the APIs. +* For an elaborate example of how to ingest data into Elastic Cloud, +refer to {cloud}/ec-getting-started-node-js.html[this page]. diff --git a/docs/images/create-api-key.png b/docs/images/create-api-key.png new file mode 100644 index 0000000000000000000000000000000000000000..d75c230300b1509869c461e5f38ddcc3d9d3d690 GIT binary patch literal 80572 zcmeFYbx>T*);BspfIxyIK@te=5Zv9}JwSjUgS!n3o4L zXJ*5=MKmSiY~uz}j8z5^g*%%1`7U`Wwg~%AC#LD1o!;&Q)#^@OF#Fc2rIg|P+`!TI zG4j1}p}1>$&@b0gHW@s#Bk^67tZowZvhr1u0W(=V+v)M8z#Rt4qKv5NjnJN>KVfF= z(_aq`AWq+pMnf)@$Tw$`hAwChHOV-aaSsllbRTLHaHG>!PHxNCv$6_)lHpv&!Oj+& z&28ZlCe0R1my;CBiqXecJ5dM3@y$NC3!P_VvXKZx(%rS66^kh6tXYLK% zcv*`-3H^TiT{_c1o|@sl-Q$-}*mcHr;&x#IY5R@pdK2AJ}<(9ZXEc0|{>qrYd<7fBNZk_^(w zxcF0u*L}i%`PI`Ga}b7T08yB3{xE+-j$+;)yjoG&akcU?u$VfZ)154n%)B*cl`!^% zJxIx+aB?VFV@`OCZgBR)84@SQX9?`!xvC*~o$ybW4Bs>)g@f@l75HIVnu@|9FI{{1 z<6o)m(!^OQFxJOuC^Er65ZM;if6=!sf?p7T`o~Pio)Ykj#caQE; z|13-GmO3e)FV9=`I(m@P2XA9+Z`3rLV#yPR_NQz`;Xc#;3?Z{k+Zv&O+pjfmej54o z#B)nFrM=otVra&!o_*HKO~AEZ>Qdn>FXwycxt@)H^LT2ga9`Z3Iu#=lVFTf?BU+^L zh@sM7GR}`4B39G;rl8d2T5`n0pC<(JJ91+^^UQUq#1h7OORw1k8-zzva|kU&BbDo; zn&k1A6b;zn*hTMQMu&&pjIj-rHlJAc=vhjLL@OFH4fVi2Hp=jOaFEf_ z%-_7W^btlJC!o1i7yDEcQVl&~3aK27l=gqNRWsA=P@>URRe!2kBT~>!rF33BTN5Jq z0rM!4eoD&R!9vJhy=5Y-~4 z4hsz8uNuEn88nyPFZ})jI}l1Z&v{=uxx1*Xnyfo&$|ZqclIIGWuP61 z%$)>fc~T(b`scgJXEO0MJD5~dY_Sp#(&#lPQd@^G;*iRR-G#o&sgcVus;6vP7L-CR zMb4F|(5ypODHT(go~nizt&O~D8ZfNb3)j+m_H?Q}pI%}T7s9iFancZ2i#PPR4}Z{VSm8gp>{(Wl=kU#y-P$+>UFPL8uwHkCvZ%L%60ecf+PI_r@qlRLXO1HBA>|4n(ZL>H8uhHTtK^ zcHH(yS`a1oagmSk7RR#ieha2Il5`u(P4C(-?_|g5oT#oWCQ+jUcuMR*%&(-X{(Utd^;6CTzye$2BS)4oh7^qPPsZNS-(Zt_=cBOXp|9dLZQMUe=< zsh@G{f)CZdxO5r|8LBEhu?P$X(?Wb!j?%w;Nmz(K#T(nn&uuZ1g?~9}KFzrfHo#h& z)5k~KL-z~N?^-eI5bAsr*U%}1-V{%dLaoagz~o#`z8`q>vEQ?9Dr=hXJWA)FI{VYu zU^b1;5mSttRakE(;0-@E#d>x@l%WR%hvBzD#onY52VRLgQR~byX4awWBG?-X ziV)9~_m>~U8Km-(6Y=s(^c9uj|}gi70q^#){F+B{0B$^U>IDG|$_@SrV@|G5j^N>G)= zMp@6}*Q$~v8tCq?R%aDRiLhBlZ%J#!!U&-6$XgRS9;_#UDF{}cBN}m0vqgxgw4s;r}l?j@tm_)mT}dvmqXLUJJPB*bMdWA{H0_c zif4O{gujvH!6PqPR7{vx$OXTZoA{l$2tRx z3t{IT)YPlQ@5jSrt%O^yw{}7viD&mtCcakgdl3f1ApC%XXK}>5b7PKvQ)!OuW_L;>I*b+MrnOZvj-aLAqB7n%?{`*q z@|mI%l&e=Lq|{7$q%%IVMV4m2whUG?eF*2~x4PC&(tfE%X=ENgOdf{8vSJ}FSUXg& z*+v4HI}K{*A_*M~gum|m_+qK$K-)rYwh~p_n@6#&Rb5NXx~*$BnJ5{?1Jhuj8AHy3 zohd$4z#bU#3OmyOQ0@0vg9 znrNTQUVZsa;L|uo5`BOQWe~33tYkIu?y6ii(QSY$<^CJu7I_*oj28{Bu4HjCFtAzt zgVGkNe=2-(PK0pJKYD#tX4GGX<5Wd=J@HcL&17^ePDIc4*PXFU5(=rMkI=xbio=J$ zJ~5*x(Ml2MetJP7MgJC2Pis>+LQQWulUnrVW!J|~tq9)-(=pe2J8F|x2T#JV0ldo!M?;i5&_YKc;VJ$Hs5ARa$)f>`R+^Y3VK{75!e^&)$GQT-EkMW`EIkX%5r8`QX_OViS#e>kA zAf)rPF2@dek)+fSnLtPV;?egDfoEJ4UlL?uBT~Px7%#aQe|l}|tIp!97i6}e@o;;; z=vo)a;OD_upDK-+QA@O>407P1R<;T+C-zAB(R||duRPJ5g+~NY2R^Ezz1M@PExupL ziyxp=k8r2mO!(PjM78pSb!>`wrYX9j80xTjS+=U4v?oubgCkvUPT>Pij$N?XMG*C=6 zk4P|g35u}DDGFCtpSe)#5q-!c&(UhI_SfEDf0EVcO43oyvk&} z%_FDCYgYQ!o0XEtC%TH9xV%P1Un^g%%Jd0C{T*`lUWr=!mupXbv7xJ{Z@-Z~<_sdM z)kV4tf>L6N>zCPsKy;YsSsCSM`40-w-c4g5cM7R{sjPn(e#hypti-Pp^ofm0t9@=h zm|DtVsLZ~BW)q20fvWhEYmAwUMKE#zO2Er@y#8c9LR{3{HZgEy_|>lTS|&g9$~XOa zt;|_9%i2&vwg-Ye8!9~|5(A#f@4pA_4T3hI)~nP5)Apyns&&p!-QE%L7}}$nI;73? z;v`%85w;qY9c!123CM*nXoN|bc#8YIwJ$KEOwIS9hAwapms`71bWzIvz|BUTr&zIU zOC>pUwl)2`iR(ZHxLlZq6q$R`JrNu4O+^&N8$tBtGW;$`5m zW~H=Vj2&03e(?x8d`(X9c^#=v0AFlK^XCm`CBN$WFhPwBM{2ct)PO8+UEa2V zI)uVjl*dD_r&`Z}v+-mzP&IaIY&tp4GT}M0P|+d@*v$&&QB_8=hd2w~mgnf*T55G} zeqxEo$>J9mz{LJ2pQN@+V1xaoiGE4h$IF0p)M?it;(VKlmNzFVNuO)NYM)AI4+XJs zOUbZ=c_^x5P;WoONG}?~O5c8VphY=KjZ{a1_raDXv-t@}YFPc3#AzE@7d@hf#dQ zJiSmo_oVY@U1eG~rgGhp;1n{LKB-n){pAtH5_v_6ajp9i!CPVLvmqgkvXCY$nC*EW^!w#3gpovMsfxr&uG+n z%(T*=U&KPX-^)pRM~B+CSfK0r&MDzLK$Ha#@0T%g`CyHpw2^{Ld z$*)-9doMbQ4v{{}c>1E-5ZkCWlW(gNz8wx0Q_9;9K()sS+o6ZGm)GeS*4AaN*(C8R zxUvOLlzkAI>1$)R`I((G^S!_A>g+d3?MePSj}0O2@vlw~bxG~%2>A28lXt=`((@(8 z{fH*gROA!Xcp6U5F;=Y?U+#Zb`hW#jVj$RknEF&&k2|rb{V56OxC@u~6jUz#R=@U( zgfT^ma|3Fu2*DmxYsh<(KIsXb&lG6+4>9X4THL5e-}|@klUl1*2{Ta_jm1hnESLm0 z)1{*v4df52vnxC#^j0j3_Qgy^7IOr9xw9CLpcI|diYs9tVpA74BXL^4A7pJdvOh0j%Mky4x0Xlw+9Mkd+cm}U0h13C*p=E%FQ)3(qcNQzaE>%6FPNZ*P?3( z56j6Z(rri>v=*PGCOt9U{z*5pRFD60WWxSi+`eyT$yGU|{cPctErP$qgP!M_yNk%e zY@cbSOCmL^!WwGd4#mwdY$F|~n`cH>Ivygw*5 z_*E;K<-OK}7dY?Y-*u3q46Dc&LG5UXhj6dr@Lh|OH7~mpYpjM6H-9=ydQ+wD>4(hO z+b;-k#EOJl1R(NM4;JlD$&sOEv|1DUFG$h+&6f8t`YR-ae@UK!8t8B8X1|N~^wB<8 zLrZ%eSSoAT;QfA)7OkVxM03IjFS*rWIq4wRmkMp|cY}Hq`+i|k5=A5-gXtOKM_B5k zS6x0_9d8yF-tS5|BnOt)Rq~S6px$mG+Jmb?#2(?A%15E)QV|dF1|d7g?O-(2f0#g{ zd&?36`XICx^U}!-1UB~)ZHW3b^aQt-LtZ^SwIV2X-=huxRk>E5d*M}L5qyabgo}ie zh|0P78aIq>Wo0K|QkMePK)!8mg~7_>Ln`Pe(i6eLn`05i^@21Dr~Yw@J5JIUuR(1b z?PS$C$7#PVMBB}8wS6>OBP{Uxgx>O)uLu#&%xoeb-OT@tzEtV8R<-G-NkXdPY$9x{F6kF2ohpAs`lVh~M<%+IxwjCOgR)BX1<8Le1T7px zCdc}Y>YY(QOAY2*t>s5g_O%RlCVO|LX9IG(W_QHrgj~%BV%aEzaUIlz2a4)QvQmk9 zIas?y_7-UR!gk4vj&ginE#SyM7q$3tofUU&1a5)<)Hsk7pJ~pb!|5+k#r^#R>NI)z zKpW0embMZ7ZeQHUSxT%*`=J7kaA3aW5QCr4?C3lgu1%(0Am}1Z?Ii1q^77cS_VF#0 zd(q(qs8LZh;Equ9vA1AmmVgTYC=*!&!0~HkB>^)>dlnOOM^iA1r@a#ZEQ3HoqMlAB zX0~8Aa#OISwSzF_L1POgxwW}4r7pKJo3fK6*veWC>H^k;s%V)(ZO!=2DMdvvg**iS z2KHb#6LL>`I|o+*PhrYGd3@;@YQw!)OJmDR{49bLfWTr6BHY|PT0)*c*` zBADbtF6I^j8d5TUvjFadDXrYxoCH`|ArJ@)gp_ zGx210aHYCu@fQy%u&bGiwUe8*qXYRpPZLu|cQ;{5O5i#9Kl0f-DJ%bry@TuDS^)IH z>S^M{%Fe>ZYH!c_?>$`Iq&)y8e+%?~?BS{f;P0#&U{^fc$IoBgZ5 zle>%EpXr#Jv4ZWu_5i6XFe>|h3@IzGtoE-S_bRZows-o|3sCldP`X)L{ExE!L$>=T zf2Q;Af&ktB#rHp`|C#$AVt`6nSwPCs%>7 zoAaBSFmr+V!OZ+-78cC>TpWC0HVa-p6BG7-VSp3#2EJznFlVs__*ht&nQ(HM zu`}~>@tHGo@tANhn}T`Rm^nCk!Dc*Q6B8abu76{p>S7I8rHS3YOLfo69AL$63TEdt z;{`KYaP#spbD8p+GV}5Aa51xkIr(`xd3ZRu+06d1GB*>Dadfda0hZI+-oz5j>f~Vg z=fSgq z=Vj;S{8@w67PAkv$1gS{po(cECRq}0L7Zz+bO`{PdhLd0ZA9IiJPN~mZPJc zFy*~SHRvADUxCT8ZQ%xrwTz+&;SaWeDqvvV@@@N#mS za&fYobC`qwle?>>xm86+W>2JHZ688 z0d`ISE*>T}b^$gvO4h$8%zD48|CzB6>;Fq9LVpPU%?$wk{%Ql<3*fC-|K+a!*4e!p z|1W<2UW@;WBLJ-bJIMb^zyDR&f7SKB(!l?U_`lZmUv>SjH1NM7{;zfY|E4a?|0#ID z4nP(J0m_+-yu5G_2r0-~Qc_J`Qu3eIYd|3S&%U39<=Vwwb{K0)GcaR&;5v7yr4)Y* z<+H&LRihZgED`wn<`PwzCbX{@WYml>>i-z0hr+4vq`ae9K z=As~Hq>uwphPp}OI5)OcgMDr+278hLzPHg6sfSO3*l9U?zs6?c`E*VSBm2O^j$hrm z`bU0DtbLA)`^hrFWabC)kI|%>?_OzqJqi^HJ}N>sBdDLq>PtPZf7WkSv~p%y&-rI0U?xe48ydy(4n4zqh-$$XRmh2ND~0iKPqYy#Y#7G*d-c zDbU^hzjSzBEYO1CB&X*J0zGhD#Mi^r7#V7{lt*k#*KnOIW_buyT0pYgAeiY6=Px5)%swa*1s1 z$P9{e3tmx$2i&u(wUN(0ii~tV71hGt1w+_jEWipuAMEw1GTcx&CjnLEXrqO zpqNkDt2r2v!jSKWvD7GA5U#d#40UmFDR>MLi{15Ttf<84UUtgYJxG1vnI5rB>IvdQ z_w@E+NGpv71z0fB^SyFpWo1ORGE?z>bR9lbkjQ6}%#z9_9ut@Pxd5@lYjw+1#y~|C z3ju)=kHPjlMGZ6J(!h02VPWWR4G^-=@x|rk_6=kqI&`u-0h*V*W7BZ(6}yqCi(NmA z;8xSIrBUjj`3&g*`HvRph8PVuv$ABj!v;l0_E)KWe^1MXrie%%v>bm>s9ne|FGR;D zbvs!|YE|)&hTIv62zGArYPNGzd2GMnA4*Rr4UHtC5EV6EaKC3KcJ=5mmNs%ybl+Pp zC+d~vaMh%_-;(XkKCi3!&6-ifW_@Ef!Uvwb9pf9jU;m326LZ^n2FFciR||f(Z-9yO zNZ6Lq|Nic5Ajxu<^8N)oP&x_vxh!e3G5X!fCp4|eDmw@_w16v54({v6zueGlq>@wO zujFdkS>4^^yBc-21BWcwx%jE6;dDqv2~iQU-SYr#)Mz%NH^1UnT;A4l)L;7Oa5O#K zWQliHaZTOwv8ATF2MY&B({1b{?S_Ai=ckFX#e6rkW0d=z$@?JVy@QOWA%-F{5cv^@ zSZ$HUc&6fn9dwV1qAR_zk{hL-t}b(GKTwIyQowB@n@KSg4V?*4iII_!g3?Ukfo_S8 zmsbZvyy#o8kC!1qU9+=_YinyWwW8YH-GctrzYxdApz&9x6cfq5kd@*Vn+)IH% z}+PnR~0>jgSwyFKXjQo>uQWlxE7Yi#6lQoz{YTEo<{h1?X!`xGU(ZU z4xot7TF0;R#>Tjs2a!On36Bn|qdqpeD}O${nxuBGtPX29m(xAtu#L%3t?boL6EGxmb#wX;xiy{J)DttY3)A<-Dj@l^P{vy;yiY3$DE#gOqvROp1LvCqX&9oE^n zFR2vrj4UiV+F)2)`ykv%3kHKRGBaIi*WC2h>?N0jKudk7=k=~DCueCm)-C>Vw)!w* zU2sv6(E#&A6+3ScH8{gZD5aCxvJxR)@voj(irE;ZxQPSEXJt;)r($ zrek9@r4u=@xy4orgmBz|PUw}=ix=GfV;1J-Ha8!Xfz1XhbL@qpj?Jq$JAWeZZ_Zv2 zK8i5p0|j6_`?;NRCSv7syD#hwGj@9Y{kyn==+jn%o@J-5m3M^~@F$5{gSoD#9Y>Ey~Qy)ZXSH zPY)X)#>-Gi5h$pr(6~B1D4T?fxGjN#KVSodNtl?JRem=zurDy{sd5TT5@rM z>-G=IGqraCxvGsCq9tLv4i5cPI^KXaemrFJ^7ihwskUkV&@b@jb(cIA@HQ`9>)^+f zLd!2OUV*`_<)S+~k^CKj8>7JB2g$ z1yOkeADWBful$8aN&r>bsn+7Wyh@&SPULcK5;1660()0ytZR9D=(}n`fpt%RZ|tbvorL{rdB*nKw6U%Ko4lSL1+14t z#=klvhINhonL^q#hO=0s$8E+#VMR3Z?88*<3+dv3ms(+@$~GpWDPByEiP6wa2n?LZ zBWp1NudNZ`Jt_VD>*wa|8q=Gd6P&F)chSyX^7omUA8e91vEV%qot#DhH2r4-U; zjn2r30pd4x((D`Fwx<&gP~Q_(fn>PGK$Ru_0@Wh^f@@=QK}fuUvB6H1QHxMZY--w* znPQNr*`*dfVf@R{r!4Z177eq3)dIHCj5CUlCI<%x>~n(lLBj89=EaEWsoGrmXPt^M z3tnfwZGr(7y@P|I%N#%+u>Int&&VvF6S+1G4b8ok5gNK>yo|Kjn{QMVjDZw3H0V1x zSRi&Lbphkn=SfnI0c>A>z7eAN_AM{i!eZ-6yGUDGdk^y#T?41kcu@pVQ%U6&kBNz` z7&AB5x4qbsrc?ZS%?7qmXI&|AiGWS}oV1i@Gq=>~U(_B<6?B}Q1YwIs99Smd}2^9OrNuw#(&D+hf7dL>^BA!5&1<-6>R1)()=aKZXPm*_x;85pZwC+!#KncH{MvJ6 zBpvq1-I|+4=nUC)inPUr8ybx!JK#Bdmy*oj@ zCy%iw_UFA-tOdQiE_!-<0m(FgXpZ$lNY2l%`O@03Nx*HBJhnTZiKQibL4Y>4yV_qR zZ!8bOdT|AbiHeH4z7ZrD9eGo`;~j!cqGh<3Kbg}6EKB3rZb_|*y=NK5{F;m7(0EJ6 zy!WZiVlxUNm;3R7?Ac;|Aj)NtSzhGP@6#_W{&bTK1qB6|Ta&u+h5~vCdU`!QeL5-* z4wXBVW}VnGbuM7qhfLQ-z6N#^*H6JWi$%upL!_k~?nuECZg-5O4MQyJg&(4bcGc6;dQXsD7 zwHRUo7R zGJH^pV>KQgc7kY2mWfZ;HBQH8s^#_;efJJJ_LyffC`AxL2Dt&I`;J!mWm$ONs}kcD z@f}{fIBMWRJ?i>5QgARS|hNz&EoA%!%0k7 zwY6&u;MJm5UE7@_%jD)8(8h|&jNs71^rdnI1qHruK#Ej=e!j9|YAAYTG4DDl)L*}h zjCH%DXutEssSUv>aA`feNOGASa*t{aXP$oJ$+1nHWYq2|A zZLO)qIR`K}=X7tj>^?45@v8$aE%f6<&AdpGYKUWx{A4^38|l< z@X1bIKmZFP6SHR70(|x0-~jW`!~|uWn_LXA87&|&yF~029UWonda{<~tA$|uVLsf< zY-?*XYuKVYD1Pz0ytwJjh1bRQlQBa{G@OaaMW5ck(}2kj=Xtlc#p`{MwmP0=Fb(}o z1BqIuT-CtL_EH@`YhCafA=A`I<#beZ5!O(iOO&_LNf#A3WhULNI?!+Qk}PQ)d?y9u z!HfbnL4a{Pc_3RkfArs&m&U}#))-tOcRxLYpq`(cJZARXQ@+=${^3j#9sbI*3CFSY zk8D^Z0Na}psUIxBBOB*|DuK)VkUT|{QRY6!>h2x};;d>xDUdowdHMMQ+1j2B zn@sEX_trFI}XZ2jH(2mAUoY_`P> zxh|3IryGvKdEPC8bbM;yfO$<B zqtc1X&HN`o-R!Ky=YQ#_+BG`*Uzq z0_3QjyJov51<>=1E5xDXSz|8FW3mZzejVFysh3xxCnt-kL-UY+L5t{S&H4kf`$~Bz z66IRc-&^Ovw?R&_rbM@TDU>Mz5K&r zz;TljY;$8b?jjwh zt}#}N7*HZ=-xrX}CK*5xopp;iDhkp7@daZ1>P$Uc0N$fLZ$Yt|?JH52v)?(sy*d~# zS@g{s-$&DSE3Pk_V?8Uk@OB>}X=inc;AjCRVDlIBM8H;~BYq#nrge?W4CZ{Rc%j9~ zze?j==U|tms&!5NGx^rvCTl!@K<`BaskP4~27}Gbw=+aVWJ-|5n1Ia?Gi?XpRQNC? z5Z`aIi|aI`DtVbg02J`c0fV>43aIdrKvo3)cPc<#nnfAF^Yb3-nm%}IlZb}B%bGQu z|L$FTVR3Ob@SDa?CkP}SNP7@Kj&Im_szw0<05YfCwe_{`Ew@4duDVZlEjW!AK>=of z|1HR_(h`L1>lpco;Qzt8T0S&g``kVUFcYiJI`lsi1R)Fk5uCjB50Y{Zpxu8Sc>ehs zva`q=_0nJ>dW>d+bKeqR%-Q;E;14***Z4e{ZuY^SX3)U%{lGsx13-T@JOqAz34Hz& z$ouE^@4^1R$DW~aH4tqLp084<3Ukx3omcG|i|bWm zG8JmsK)Q_fR>SQq1)yK?My6MwhHo@7PfnB=bHA}NRqCatk{%%t>gb}Pj1K^@_9DM; zxz%)!y?(7-FlA3%-N4qO-9tS2t&$*(SA(P<_!(F#e5PQmok!0ed!WsuG_6HCMM$}H zGC^;ek+DL9y>oeQEoXnevAe%Nq6gq7);Mg@IHY<9mMjW}$m<(^Ed#h3lr-I-20_O7 zUR&vcQx*t;?4iq!N1eS>U0){`NShe%(Y*)fuJdiXTqO*QRtOhe!gX5{j~RyeSYw@> zr-I8Q+=VYyNM2Pn8YL;wqM~_ZnX04%u|HT^TI+rm4-Yro0tOWXi+V-g81*NFKmkSU zq!0oq()Li`Rg-G=l)eNFKo zzh_rPT*86rJC=`;S9Sa0{J;Q_9$HwWVh~*IMt${DOW?UZX=mY{!s6tj;mC05Nw|W4 zvOrNL@BP-Ci8{S_d7d_@_$zPhS}B>pjR!fxmw`kV%#6J3hx~5$+0lVF@G#c8iKaPt{(sjHF>NQl*JB(fMNkei7rClQizean@E{ zUu2)bDL>|EH?`VuoY-zFCuM^s_G&}_beAxulgmQGhW%??y*ZWAtE=x4jn}VIIY{c8 zCQ57r6ciN=#%A8Uex1FV(l}o4YS}?&H#Ek?TEr{LR~O;{Uz5?+)wTxW9Q^mX;P!A< zBh(FN)!+#rxm7RHZ8E-ynhPWzO;(PbDtpHfrc?TvCB;o=&%#JPxFW0vu{uu0RSdC| z*HHC$EK%)-Q5zoh@iLYbPxz9RPOg3zzntZ!s^)E!8+>&oEy#g6KHZrq$cjBlBl(U- zo*|_8@@k|+VSulAp9GFL9HSsws8xj9f0-?E+c6L=^pIa`J#i*St`4o<`74r_E4YSv@|62L(xXJM01s~elxyu2lFB-F;yD)#2$Ykc^|aw+?X?_$0SrB$(sq)H z9~k*4$Kbr9S8Ho{BZN0HuFC3HX()QotsV^5_ien~o;kjfj}d9GZ4#POx^%V6CQTL4 zisAD2s%U_mjL@|tr7uR3MptESvJa=l+0STPywUUOkZt^> zE0_%3%!;U}OF&E}v`1kaR3qJncBLZp=UQfBa^Lzprc74G+Ro$=Yr+zBi)$V0Xi|L&)nHsq#PGqKJPc0el)uDXxw@CiQ2c8SyDG*w+ONkp{}l%V|3@22lY4| zzNQkU6uFs~GGs5_kZ;)l917zrQGo>s#m13NU>8agMryc?Jp_~Ws6(MYo#P>p+Wf+V zUxIi91l#o+#uH!J32ly6F)F6*ptotPMYfY5;fo(>c)~7qMxNGzp^6GzF6P-ab{8Gh z`(H|CxvbQ_HZ=($7VrFWD`H%3E{4&8vd&$>l8tGelN>K zZG!jWW%F)<3xT0>|IUe|5rV*V5l51dgrtG9Nw6hc4y|)`Gc_$|^%sqT>a%&x4_|7>j zeWQZfPB0b4*_NmUq=no&;w>s~>tDB7?w-HCA~iVofnYO*Zfs>z&7>A2eXQ>NX(E}m zXb(K1sj+s(^%z1zqZ*CmrE^x!);BNeo@4EAm3gtUP*f0M`@|SpbT!*P@+{W8<9fBlT(ka5{;{ULB~D`jgoa zL1102sgr9_WA(dekwo|Y$MPemdxF{;`h~9{4?ec>Ses=2* zgR&XIy-{s@zZl~Q`i2%Y9UZw$+wou#3%mx3vZIK{z?uTbQi~R=Yl@azm9>IUJ-0!c z_xFEQteu5~&C|Jy@Uu#hCR?AZUjK8~P=ecXS&OXi_|YUPc`n6k+q=IgSi`2>otLhW z3=Ma{gfLs{5pk@|bvno1->Ol~hQ97bPIS!EK5D}hhHil2$K&kjv9^|}c8?z$+iN;F z*8|I>25V=KO;JCGZ`-xc+i8Je$8|ev@l&f#3cd~JbrU;V2fr{6`K8xpT*}4bg^rcs zzWvOlUUOeN`d=TkeVq2Gf&>m*MYnHh0WsyJ%GNCoZ~p1&Er$V&^2$x5VUg7RY&N}T zd%~aVEu`hrjD2-|o!1tg=5JKCy=^n^dhlMBrH5}kr?RCqmA}8buQ~1}Q5cd+-EGdX zJ)%fwy*YHOW#o6=SpgJv^gwwU2%OCFjc{hAr>g#D{N%XN|is$Z%?J$LoMZG>*o ztIkwOU*4ox&X?}>FpjK5pt~)uy$AEW7%wYITNxlrZ(K0UTRBp=fKK2*y(Tb)@5Xs< zc0L@te{U@ncw;l#p*s0s24sx02GhzP(^k@rh=fd0xIZa|czoy?pG zx^Hwm>71|KqJr*3Xg9_gsbyk35q`rwRGn&6p&g5MHe~@c~% zzdvw}!dfHG>;~~WgodPq3I_*Aex&R3=Nb2u zlgNMG1*l?>kHI705fc&NJ}~HvByqoLN-}4AeqZIoPs((;*{WJ7L$H>UUI3c6lDcx( zR#;%RpJ!pdk!iRRFHlvrUYX|o_@$**+L$J1XIPboiGoIU(Q$B0anV6jj#pII^X!yu zX9LdPLr)*uuJ#I=u^mIMHZm#t1XWr%jWTHSN>kh;H&}CBE zBoOM|@30oHMbBOK8p84>4<=ZlQs=NI{TVjPsr}kQu;6Ugjw@fcGmB87u&4-@;+wCx zL#m{x3m_gWP2@7b;oLo@l(ry@FW{8!r!wVvV*2B=_%8JHv4e+aeD=WYEs?;9ka9Ag zievLdX=s=t5Hsrf94veh{g}X!rR~*e%H{Dd|H+KxM0S+SOiEaVV~d|YVOjnYS=Qp~ zn=QhVvWqf?E44_s)thgx=Pq>akL4d8M%0}}job;pf}a_0y|dc7$fG`Jkc;8auU)z5ZhP}&bu@=r5Yce%`{lNRLAkD6PNV9*g4Ah zPWPq^LP&PAevJ!1?B^+&`{J%~04a;yFO}_*AIXrc@AnIO<8zJXjdykLlip8^R zPjkMWp5`Y~2M!`rg%!bIFm!iNbxZWXC%vdNu@E@4YzAt5Pyp?70IWXNdc;4A_KmaH z$U~p0pMSqcQ{P`x=7r?~Lt&vuk59JZRS{EcwTW@+Lc?07>uuei^7UfV>>NlU^iR&~ zD#r~RTk?u`B`-(Y&#Ix*%O)rnV!+y_8@|OAOm-!`w0fT$?ETR0bvrY(@1>`Q08 zkj*9xH!mu^J0sHTqm78#ES;qdN;~cxFUo41+kPB38B=;}S+pnUu{}PyprJM91b~d! z6L-j*N#}n=nzw?noLD9~f7+?a&0ngD6RvsRq9beMqyXOSuPw;WOsvFVv9I2_-dg{) zSCT6HKK~&wk*Y`bF1U^L^WLfBV94*xaa`Hz4qzAsz|H)rN^zG4w?Jf5nr4FMwa|Lt ze_Mwo0CJB^wQxTq|B)qQuq)tCdw%ZOJ2@E=3h)CV-M%@ySjyMI`VA289Di3Y(J?Ad zOA1F*X+15?3Xx9{iZbeBU|~@IJg(4@ycyPOrFGWo@TZ`jf$@0Z_!vH_9IawwQkJKg zAi%Mk{BAzMMkd<7w@|6Azd)>Mw528u(>!Gid6^uwr$Wf;3ZFP@Yoi3<7IpRrU%eNP zU0hvb8#@e12_E6!Cw711D|}ba3;ueW=Wk@>AE4x~SDgMqM(zQf`_F%(GXLEEgUW38 zCDoFo6a1m%n?-4AoMs4Vr^p-KO5Akg%TLf*HF3^*s)FLk`Uo}@FuE1pn3V8GH8Z9G z5LZ{xqmY(L9;C5H$9&yG`{GuP3}na`3*m5ywK_}{OYfD`OQVMS#Pu@DP9VXnrSvF%H5bxGUc3tXlyvO%2 z`P28!hig`!;8z;JR@uK4E;rDfw5;W6;yv}c_n$*D$UbJva5!3kCf42qmt++%c&&8M zVt&z9Qt;0@S)n5}?~A$b?M2-^3-*xUA0R;d%*{V;Fq!oFvw%Y~C_dgmSJx_-*kbwQ zBR~xBj`%&lk59 zq#C}*1|N7z+~e8+c*ft#i$6zPLkggR3l!-IR=)T24-fNbswV#*;@$!%u4Zc+B_Rm` zf;$8VZo%CXoZt}LfySux)!@$3Ryywi-_sTufEgaafNh7)_z1}ck zBQO!bZX#wYNc$>Cb1G`?g|;l&`Z#$teF%O0cH~7FcPmh*8B>>;h0}?M*GHx?Z`;x? zQt>~3{wOKhm6MWuB-lgU+k0rxv%50y^XWyco5Gf*&?)`M_?nxpy}T*X(karKI{7ig zY=GcmN&uASnjDFGkkGrvWJzPo_qEE(%00T@B66fR`BeA!Er94rzf`eXpJ{AI4GDSm zq2Mj`LTskBI!K5@Bb7{>9e3NKa_wnS1*lkajoN(rmj_1RP1V5 zE*J^RMR@ga1GJS~wEVF@DYOj9lx5)0<{tJOZ-G%gE~-o%SNi7F^bRR7-t3&ZdL&(Q zNf!gCZ?GTww#)nchi967BUeP%yOA)IRT+wiF4FX3wXw0W$3cSn5U-zyut&Q^?4EY0 zb)#ppWPBo*PCj~L0*e&rTxUKpmmPg04MN%~(?O}}rAY}!( zUAucbj<-xWG8(&iriH`neE=6RDxY8u+HQx|W^HF@CGKEnCtck#HWzp=3V75P4>lez zS57R_i0E>V+_}klaqaiZ>Z+^YQkalB(|$R4po~&H+_IARhE=60I)%cpZOdN?XYYbj+D`XJ1BM@Tbgc1tjfDDS*r>~@$%=@3~;-<#e-$JxvX z(8}~jj9Dls=v=03)Y}| z*O)fD`hxaaA;FOM*ej2g!(QTppFs6!aOz^gq8kK#d83Eo(TYB+XZ`N77cykKtedUj znA09yC7U~OyBTMnQl>(`H=ES~wix#uPKS&r7hcKfb!hyiQr2=>hGxRuLC^>Nc}}jK zU93q;Mn94!Bwj-0HkAE|lgZNhgBOOpygove81RAY3FUUvwQh%e7bX!*@hVe31;{JY z8f!?w@G`A)x#cyHc_66g&V)633cHpQ2 znWW1Q)WFO|n&cs6yydf%9}cc(jSN>*G7{P@`G)a!&nvWNQO)Pd1^M}$!=95;k*LNU z^P5isuj}o7zLVu}$eE&=7NGB{oMvF|mG%r<{?NK-l;gkq^&5e(U(vfJ^0=$?nl$sO zgKxVj(`n=UwJK@Xg)VQobrqizL%7U)K)*cfdw?WY6#*gWJLcsy$~rW3%@)!*a-#Cp_ zkavO-SdbJLK{K^79vOKosU#ed1}wbyghzP17uS}grq%Wtw<&8!Tc0!{+%jtFq{d(6 zFQYJTP!G$56SLTS^45|R3#VlV8&<7H8c4|BhOOeGfIRl*JU~)#SK2Ys8xT%lF9-dJ8Vfq=D zLoIy9I$P(tsndCVf4lgsZ;Sl{JUuprcwD_x6(7IDhl?Lb(S?N}(QbD-b%zV+?ChnV zwcH71Xj^Zntk+)*Q;naDy6v)5tTfI!y%j2tUvGV_f#rmxpDXX=KjpSf<9x7OO1as zUiFE;q@Hz4f>w;n9yWe#`d07JI+ba8@N50H%xV5~V>0Kf0DBzWi4%uYFFQb5IELn) zAB%~@E{HPB_{^;%@M1g6;3gv_@N`k(!t|5A-*Z&LvCXPv*2;TGYn)DGzOjNvS z)Ys{bObt`ubSeG@5*rs*8=cxDycwY{!TeY7_)Vg0_%18=rz?6=wjeOsQO<*Z;Sph_ zw(nV3P{6lz4A?T`ue){yqAh(0IKSzKaYrzqLWjf-+r`TE;>7#K$9EWo=U{%^5Oh$$!#<1 zc4xuv7F~`?&%#|J>$LNJ90edAqH4;P5tB zsq&x>rAfiicr1X*j!e4MmvoY=H)8X0+`Ik!dN|4|v6_53v}|-b!w5moi&HXhIK*z_tr8>T z#dO?WX}{9U+u&2n=qLz~^GlYh!f}-UP)WXU8N}!0nk1m+1gkd+iMwCL2zX;1BfMXd zwc&67nC#w*YSL159ftJoAnI;dajaT#d{$J<+}@_X0&P3Y+PT|6Xt#$*WCtKCh26c~ z<1yN9xvCRdYfhLh05zNM`XIY{IY44G-8p67c~K5vk?we1EO1y;OLQUP2#)%+n#lELE&Ie}p4)nCNQC<4sN1ZmCGhe@0QZAnh ztKII}#qqrVP!Xw5w%-}`-Ph#!=Ya8%s5h{023SYo)$(7~ThO=wlrw5->QUYz)4*<1 zj_zCkS9zwCG~A*9_X>!$b0@Cn$JG~y_y(UphB}t{=Wj2kQ_L8!^_gPz+2R#d^c6c0 z`v~3ZGCk`Gnu&9J4NE1-73y3cRXK&0Ba`kF0ori*$k9>?K?{r+y}H-(AOH^LQBs)<9LgR&lS0|%_d{@H*ASVSv5W2EkrmZ zNJ{GFi?mS_sBKx2Jx_h>=-@P7b5K|O?uj7rtlpnNdnRh$%w9o6B-$*0G7%ts(vXly z7?!Hn6`VXs0IEzlo5wG)=}YLymMw&)>kJ$G!`k|Ib#IUCUK(G@8wJq9_T3{_=0gp_VF&yvf)@}c%L@BQnV8@_Fwg;VE-m%US6U^xFh%lGRX~CT zavON|@f1am?2p%n6tDNn@5d9B@m5XU3z=d+>(h<{^i*qbWzS5HgduyWq)7UE%s|pE z!_u7TC0T&H_o=5_A3SBi5(|(qS#W3Zg85{gnV=(fLaJb z)>dvQ+PCG>>`A743|(tD9zP59tlo#HehCUBojrols*bw|d+=1}EzavBDcYHYW3}U_ zkoU!9#G9k=V7%3Myv6tv+`py7UmUc5zwLTq`Ten!2h=^yiz_v$IT&~9x&24=0t|tC zsHMp}ipsz8Z4d}8+|d{0eXQZdr8ej2;{&KJv4_y_VM;%$(Lel~f5VFYC#0IcSkeEa z!bVP<#Zta8+H{Jo@32T)HM-OffIgH*DRX0Tx&3BaY=mvoCP!UmoujF~zP`y>$dMXE zuNK%k!lCz24ypl;S#hwk$seUmuHkeBZP;SH?oLPe2u@I#(vx2`1mv>{IRV0 z`^Idy-<;npO=JpjoNmGPuD9FZX@$jt#$Qta)TzA1vk=!70CxXFkqi-oh1oj}Nl1gC zus8NJyVY(#G%)3hIPGbdA4hVr+em+OmCFL20Wh^NP76CB>O9Q~f?d};7 z1PGNmO(?0jZ&CiRnz~2FWcvcb-jse^S2)svb~@EGWHp?}?PD(%?F;keQ!nh zG$yO6KXmq^{NB!Y;<}$3chT4&(cnIzIt=%>RKTvM4$in#hq2i zyv(Io;Dtt-_{_*>;w}p7=)2QSvTZZ&-{RVqa`iPKyW52{1XbeBs0TwGQ+K$`2e5P8 zzmMi-OX(GXe4SXhZrYEWyjlUvx|6W>;NUP)fVq~=dn1p5KAOn0FiTV0?^aR~@91**+)A=zu3ICFM{3ba1V;IE)^WvcO)& z7Y^e4v}zSi!}j9aS>i|wxm$BP13R{{>>%G!{O%e}7Yq)KUL)bXuc3@Rtr zr9Rxr-aSNs%b*TE81)FP56@`wYFz9qZz+W|;t^8x5rlGV!ewe?~> z_cjr)#7Yk4()Lu(3SUg_NgYVZ%7QT7(tTES^dU7l3b!m{H)zO+8`^?L#2R*e1dXjk z5?MpS@Hil!5U!VL%@>M4fO|EwpVd!Im)2$Uqu#}#vuhoY(J3$6fKA4`t?cNf<(B7L z#~CO@4`!vc5?X_e&_8F4#7{H zTH3nnW@}}&Nz^}V!VnfkhyZjDlm1Es*o;&#wDq|3aVP{C^(CFwOUvAwN9#H{=IS3H z?iCAfflku4?b0sr&pfmoEuzVVFMofz5h2r;sH3kIml%N1d6XpK23WEP@#g89ThnY(- zcy)g-7yMkqdE_8^F!z5c%(G9zaF*9d8TB&h1|8%qL4U0t*>HCQXYs_mzxZ`ietIRb z+H>P$zx&{EMUooziJpgx>j5@igC9Wl0s7@6b$|EnkM^*=^LRi_c(X^9h5cX>`tM*e=n-jSwgLQK;Jf?w?#&VPXZ zpo{=DuVRyL+mM%e+i?EL1mH6K*Gyn9A$+ljYFYK8Ue->I>3?6d+NcyXlaze1F2t`o9Pq8h|x{(QM27G}`EYW%r1R zQnIiJq#N=6$=qU1970RR&i#FZrZ43nDd&1)+D|dQ4s#u<16Q#|#jA`^T{}~DyZ^e4 z?ov8=mR(pb6$SVBes|8@3hzSa;tFgNJl+o+mX?T2F*L6ynj1MruH(H$C6tu<(#W`p zQ7WV3b=|N?vM=fZ6^Z>1%newwipIsGJGV#{DgBgB&F;dNdZRYw9!GiWvG6l=4= zYKkwBuYyKC^tJ5-jwmkoS69H;>lLnwUBI}%jwA~!p;fKmE5r$c{c-`2xSnHQl`asf z<7M+|>dS52vE-a{l#d~f;pwv25vJJAw+uupC&wk)FJ?!42J;Efl}A-0AS#Z@#W{z( zC-_(-G^N!X^^)ZuE#Cm@V_Nekn0}BhYZ% zkLa+s9l>jxAj8c)9uF9AXY02+xlMUn>zn*MJu1tn?TyX@OL**P)s1xfoA0hVp&|y* zV`ts@gDM7-VjDJPiN~rF6RLc|S~w9y_K3H6g={Wf^i+8xqZ5$nnXN1i5qGp#e6XjO zn>s%3J|{>mwSmi}?Hkf_Z0A;r*qN28$fK|Gd%T|C3IoXlf_HD37z9{t%giz~6562; zLm1*%#Sr(%2r7zpal;`Jlk054ZH5iFZFd1d9Bls80+<+=ZT^dDq@xnvNb_UgC1|& zPzU9+k9RsEQ_W)chRxAFYi1ew_qmf-f~(PJ8Tqfc)V{vPd1dH8W6tO$zjV3#Erxo= z9dSTKkaQcFNpSM0?k!(+NM+97u5{iEMa4r~<`JX+arlah{gGiteYbxRcgMtrx`KTI z%}y!wRbz*p%pykkXCvy76Y=g;WCCP=OQN`WDObpf5D5tFr#UfZFr zW#upCv)!Gt(5dj6(N^QMP6M`Ii|R~fp@N3Lb!U={D)X0doWfuZ83Y-!_l^a2v%T?3 z;o&xHQvYMN;ik4rnrQc}Way_*ItVS$hu(qUNrBR!8hSn4kHB7EUP_C`g#9>?HNzhDg4_^E@9ZRi*3cogtbqSjEzo^ih6 z+tC&voYxM%ps5<;DUPzYy=XL&VFNN@GUW^mvyGjX1ZmqoSSSyd<3bge1&L8%%W;T8 z-kY+fZvg;P_Zm4Et(hjNvFv?bmFR0i`d@u_o(Id(w9UmZ#ye9Tmy=4pvwEwyZ9`5u z&Utv_U(IHcXcsh`YU|kf5xxX#JKzpj7aLSj?Jh7>B?=ot9NLo?ONA|WMv`B zzr zN4tOwz102jNozmGZEJiFl`1ojylb7EbtaLVnB?o7|Hy?0a|DgXT^$<+nJHR0it-}s z4&Kvgo$2~ul%PW>S@Yv^kP<(*{`5(M4n!vsdCuaQm5s)DhNT!wj(A`qlvjpLpH-bR zv`9y{m{-(`TTk(ZIfpYt85z2lT+HIHoo_&tiiwG4Y5uIw*P@W+Ut1Yl{znIS#D$r1THcv6PI>@o@_64_F zh9bv;%cEfyAC7zKu&aEQ@&32MroNtFp6QQ47I#K@|LKwS-yj^+FQBJ|#0yEZOBQczrz z)r`@9$3L^6wfnO~r-Nl}i-66=nzk>tiuZYp*@4owY0GCtpB&R^sdirH!A4#okV544 zpYqS0wg2u6uOgxgX;_2Z zx!alcEMcgw=ds^&av_SjLU1r zr*Q9}zOOM$7Wj&HR*neR&T^@mE+~bN;|2$JfRVETzGE-F3NqLn z3gWMcSYJXDa$=H{Hr%uhW(cKDL`aw?v9*`hDbM6|dOzUM@zpx9rYo9gHey9Z8^KVD zUG#oQwH=ilSr*$kwdTuE1%F3zI*wBUj?)aGj(wJ!|I#?$r+$L=kh{=yJ@t2WiJk(& zt+c!%)23fG4;9|dH@qYPN9^}zN-|v>)iN0xk6kU$$uRQZ4l*gN#u}BJaxmMn9~jod zxmj@cK@$umvG|4|SN&*RR#Xxay}{&u#cTwYDp*HZsT* zzA{<-7SjKwE)ixB-T83^4nwl?KspWyr(z%EKZQb>X|`q?Vl9KNIb^`WY9wRk@|k0l zRPJpmF-tRdHu*;FX!FZSu}cbu!G{mQ)}Tu0vIe z-xh~`M)c1Ylt6B*$#&B5;=tbqU$8NU3bat$U!+$tErv_=A#W{+N{lwaO?jg^Z~EWp zGqo9vH3spCiHz5q)$&Ypd@sk`RLR}2}r zb467t55ANQ2!>w+>EVY7A8-npbw&a3Vv34Wn1-{1h05*iSZKtHVyqZDl%_Hz#nBN` zeGj}B$UI>EDSH?Ap;L?P9YPA+s(52Fv|_*In!l5qtC61eSKu!!o&RcTH|B;YW(cxp zG-Zg3{T*x(qB*53Pe$N9*@mOmjCBiYH3)M&eU0&Y1~J*@=S^l^C?G2}ukpToK`jx$ zmPh~{MsHvgV&{qnWtfx>n84ZOSjXt(l;rpACg-L8N=}pgrCa72>e;E)E{aRnLw)@; zpGqd3nY`yqCUhdRD3!7l-Q#MFQ0(6@jDJxj@R47WTI`Mtzz8gO$yb2)MHpAie$U|k zl5e?*h{H~c39TmFk1SA_fqNIld+DLT0shC*G>6<=H&ml%eI%R>4uUZG+$9o16LkvI9xR@t2x zpB&bvLcjX(z{Qe8@*ov*d^~7OEe$NXLbGKj-;~5Uav=mWe9XfB!kJsR*ntvCI4_m8 zx#S3V9W`Cp2xYC3jpRcmD4tw9t|u+Oq1INHnpttIPGHc~yJ2vK*)rR0VHW^)v|{f} z)iS)D`ILnXZ$CVPZ#2nXq{qE!%=vLc!;f9h8T-hdX%NR~5@w`;h>r!>nm6mWpF%O- z^uhGIi*izNhG0i0U3Cp;nk*TqXqlcAAfiLeVlpqPkCV_FP#Y8T4?gWbXz8
    yrzys%8TjQ)A>Fl2Fd^^<2l^4@6d5K{m(Bq)25Q z_*-Q9q#jp*-kNp<+38_D4Ktnb9ewfCSYo)tzc*=6PhK4Hzi$5b&;GOW|Etdb@1Fht z*I+Cghym4^vhQX3g3M7wK#bC?Enaeg$s$sPx#3EO^6$03ZXR8e!M^7`1C7PC@~rKM zFXz=Xzg)sC&7J4eLu z`j=0`Wc?H75+gbNrr{2u>VT)o{U+XbQp<*>);{6${e9d04MD z$Vklo+QR6(L<_XG-ocU)tcNxv#(w|mI>ZnhPq3OaoqxNrVU&DlvqZl&eS9;k=*$js zrRpMt6o$xMZUidp+|I}hUamP`$?pcBD-$>lGB;&7cX*Xu?*9OLg)V0fy-v{ttP3!TvdkQ84LR6Y}f+wsxdgXR| zao=c^NX@tNIn4dq*=4mIukz>z9!rAOUr$|aLVK!Zu2qWN6`P~2YeM=tO(kTW-%zv< zj?eV~>&hG&4${!!?yzidSAPj_F@JY3bvFDBkV(&BPGQlGdyQ|r=VJN5k#8dSXu2DL zMTF1Z9yplJy>X$zi66XhJ5F-HTeYETy725n-;cA0p}5Q~_7XBRg;FtHXc|ZYn9aHT z1(rTflF`_CcE_sayoU}&W1;peZx!6aM$Z$r(r|kPx*99I3rps`p@&M~aoql%i4ArS z9;iKizT$opW1D&Jzme=7t=(G(vX0$ZalMCOcDVyhC_rNEt}&mg?o>SJMoe`-6GeA> zWrdeq?Mhbb6jY}UKB7P}xLPu0|K= zAhCOr!wNphyE*aMNQRsq98L|~_BWrPHMn@nyI6JsbP@BAh%89;Yauc7DMdmHkv z2%~v!x5o%KGGnw=Rm|OPXBL)JFYciYm9yQq)|6RX{xxzjpx2Nt!lC<{hK_d>SU>u0 zM!2S{@9`TD4vS|lISz;E>wD(!CMqH2E0R+5h=+hqw0~O6T-H7oRSzCv9B%4RzN1;x zrrh7rv;6jQfFJmAq`(QqIBj#cwQlvR+aG5S%7T4=4!S(sDI5WG`0SralsgVgY<@sn z?tCV+xZ48aEmlVCq;9iC??88Z*-E|jkfXlc&Dubje3zfgs}Z+{tFNZr-1pK@gE||G zCJx+#L*`EDYY*XLHndeft~P1j4zPlo=R~UoHE*5T{&u^(jO}9tgPHQZjx#Er4AJp^Mq+iDkbl*(pkmHIUiql(<06+4XaT}_c3svJD}vna?Z~G4*=A0nk?61byuquXZR8a1w*L^#OOt&ZM+jxyyPKvwfz=zB> zDs8g^>=V)7mir_-uQo=PkaO0n*uKt6x1V()k><=WEEEWLV{B{#ewhYVRyDPx`3jr- zRjJ+ai1;6MLZX;yF0gy#`aFq^`EK}E4T+sr@lQAPz4(gX=7)k{|4w4K=R6-A18m*! zHCKd_IIBZ`-6JPLpVTcF9M6eYOPFqA_b_tS`>L3Pem+{Ag498mf%)K$>Vjm}IVRl+ zGr&V}=TH%16NqX1@1U=k7Q=8jpt`iwb^}MLT=-#~<0ShFI}b+mt)CUQicp_8bAltr zmo{EsWW@4B8|C$pPD}}B=_<3|V7uu?b{V;KI&@GUCu-`~8tfnN3YhMsM{e|6y2pzf zE<$Owz4Q+A+D6b*_pb z%zr@Is!G}0s2Jf{MA;L!xitO}mLXnI_w#d`-(|`nC5|2*Ob8|?TgA%~?ydglXYmzB zge46+FZ(O*yJg(t>KxRc@R`mV(nh_AT_+E#&c@ecgF_`@*591iQm)a0)+MKZoFgNt ztJov>-{A`dl{#aI61E2-F+}DyRlMKrRJ!K=N@+lD*u#LgxuqiNwx|PF<%@leySepR zPoqp4^GN8Iu9GeiWy}doCmsSRao`1u-p#KB$q|fl*jS>3OU$%XRG)w%QxU=plCL_r z)A@qX{CB0~?W zcf{kyT=BEQ&&On%G#rmnRfY+to#jp5CPc_QCL9lnZff*R*qv*c5Pn{$6i2adQl$*J zLa}x7*auMw1frJ4H

    ShO*RelliA--G7ozt3FxM`JA1?$*>?rZvhM$DQJ?{hQ2m6 z9pC!S$`sE5MA?sn=GWf)^KWx#4nCw~u>t1aBO(Qfcp9Gz_-mFKpt68)TScvfq9vnV zDc#Zty&@prvB>N@@5;=EC9_9nlMdxH+G*q!sAiBVUR|o{pyIYsnb%HP=BPtwqPn=qGcCn&Hm1eI>o z$hsZAZ5s|;oo-oGna%pR+vWog^P?(mFm3tG_mb!fw8zPCwfO2X^L-fXU+mdoGr{F6 z9b6qUa;+0Aq-mWTb%^iPu&|n%VJEsdDum5ScgWC!OMJvC)_9plXeGYLth2(RMSEQ* zdYb>W%Ij;42`o%?J3`0`_v>V59Ab2}>SqJa5M5UPu+wFD3J;whqv>>ytC4k76_-*f zctQt;d6nis1hY4kzHh)2X^DfVb{ST(r#a5q`|#*{Dd)t%Y1lCy_LjO#Jt(xfgehN~ zrP%-71$OKyn9}Ef!XmNerwoyL>%JJ{XnB-_8Ho;j_u5+0qWq0-kvSTeLC_#f>*#U{ z{>3>L5UXY$Po03@dzC1=Ih$MLH5I#RgajPF4+4G$JmBDIjf6Yg!1$U%rbIqh7jHJy zVv{Wn;*$(3z4!)C0sew@Z7hSIy6RZj25rkuvGq1leXEY=WQ~Y57W2OkT4Lh3n)(VVv#dGSz8A_pG?^XUfD0v z&Af)d=`6nEr5kaT9^M9n-2*eddok#bP5`FI80YJXijSZYWj-2VNV&~CO^{td_YtIn zVSdx_k@%FYFut{+No9g-_H)lHW5~*B&@drujHXDXITozm_wy{aVKPX{(+s1F`D^N1 z6#o=OOiO#EraSxnW-V}(Sxs7f(-=FIyvqv`<#fG1ZC>ePUsxT@QJ6g+QvCYQYZirw8t4I%X*;5|N1;7ccZfF+cfT9e25Yahc> zOPa>y@p~blKbs5lrfiKj8S!s%Mdv`WBC(RCf-Ap4YdaeE-3o4?-1nqYp zNM)c&^a1#1;JqmlB+~?UMD%b4$;t#}o-|=r_)>F0=}Eb$HDfsspRnxy{j40IW7LwG zJA80!0picIKJ3pP-630-Jjv%F!>XY5rZqdQ{kcnX+rNU;g{G5y5)yIR# zS2=~jC+8w@G9#LJsWnh*y-I)f3DQot7l!Y+O>@5Tv4AB1&52POuUv-wd3^@JMjM9E zkv21vvNiDnYeaRT#(x5s`SntcHF8bSY_lXH>1Ai&!9B-QORS6hzF^sPlE4stwb~D{ zmiWW&OV!#gGbdwnJ!eB2gjs*OQ>*E^KFm)PKJ^cMkv{hree0I1x;>D`#+H!ro`?%( zeM2Co3q-L+m!GQlzGL;0KsWQFd|L#D`2K$+p|f}!Nr%h5esgzzsDI<2{|fO#Net#q zqT-AsPcw6fJI2O3H_p|N%;nakp=39Db;KzqK3BW-TV9?|zPy5#rVF#Tms1zqCq&cd z8-Ot%(v=>X=MsZuNRRh_p|A=U_3`Z6r=>#V()EEeqT9`+Dx3T!Jn7*?ZW-@-NNXJ!XAH?Q;ikXA%5tgM(i9PZ~LMb(Wp`{Q067|zv+Ztf;Df0eK~RY^Ju9$V!C zMbPf}(md+LL=p{$_dNO38PpQ;2LyatJn`(Tqe;I#8HPA!6TAh%*;hqkrw1vamO6Y) zn2*j1b94fphq-xXgI?V95Nz!#+xt?_q+r2OBpykkL!5J5>}f(b<#jc@_C60!o!+Q* zLZT>j!Hy+-1|ec#o5Ry@1hlTV$9Fh6QH@h{q1&T;3l8nZEyJIcn2qI6MmJpNc;QhT zCR8Q~vWQC(Ls$&!iLzR5QL))j3)s~yF((0%yIx^otgBQloY1o|W-)6T>QwkCpi#2f z3X}88Em10A`){^OmowPgv8e?Ya&WZHei|8hpz{4Y8;5q>a5>*l*wjlc#E!+D9=&Et?LW@Y;E=xClk#tS=HBym> ztS**$?2Ep{S4UxQ!+J1IXK`k+lC!}5so)Z>!X(n&sHR`ByoA}uE@rx2S7K?ly(UM%*Yv?BLkFUCg?2fz7J)UXKb(=VT7%+V}I($ zZ&+PvouKEc$C>w2xWg>y5Pp7JqXGI4lpl5r^faJwhX<)-`!tv@Xsoz~Emg_p7wuohB$O%1pgH}(XQ{(0kN z;%_odY#JHf^oIJFyuV3TN#i@vokQ@A_Hxwqp*p^zI?lm-^#czZXF+guG(`DvohYCs ztfu~|Q`mxN*cn4TGWX_c`1bAPzoo^$==`cp-Xd9JXBY-2xg0F6)o9n$A#!s_=eShR zMH|STuag_i>=gHGB!x)u*?14TBnIIw4|%xQf7ci%3!h|}DCKc3uy8xl1^$s^o2%d} zX$v*`ZLXk3N{Y?#deq*|4-mK;4juKhQGHtH+6@fua*x>|xU97Bz%Y3?RUQLbx$%SX z?L@c5a(wjRX5Gqr^c60Qz({I^YeTtHsG^4a0vNO&OoStkJ1B2MSTWE_- zZa0||!MnN97GvTX2MY+`fS;4)%qd?@f1M^67+HItUe9at;|GX&7D&nXhL-wQK%B+A z4HrUAgP2%mJFHCKfX%)LvoV*s!vtoviPj7Bx&#G{6gh}h)&aiLAa_xG`!hlTw&Gwp zAbgo;YJ{Zi9kVGCxKH|q3g#I^rmnr;x~ChT^ZU;vy<6quGG)D2wZEFPkRZogtNiKB zDA`dpTu|$1?t82Yp?&dwsDxNZ*SrlVawJ=Zo@d_WL<%@azRjFaGqWAFN+^4tCtn1J z@T;2nTp+%quVXfySeu;FccUoRuDb>s#Xdjy01p@XVxfTWkN61v->K%t9cwjniBIke zWn&LFN|sTHPzbKTQ^{!Y2*js5;6r~AU7-h;R@geA{LdLpo+PBELyF zy>QPUB3!v^20Zs>$Zy9d5zSlDEg(CQq-fC5yHVA86&IV^EV;(*PgB;#Lff*5{4TJb zlx;|~%}H~g=WhOBnlSxDEaM_ft(0A5nJZ^>Dn+xU-);E$In_F$QiiDf=e3$IAdl&+ zIsLje$i16e=sK70pFa#h(tYC%GZLkN{Rj^`LdrwN{R*Mm*SO#llhd;B@I~QT@HAh* zhPmHW&WjVf+3pYh+|IYQ;oIY)otYK#?4YE+I#En1)2`2ZOUWHbzbfl4aFx=nTyLCnsj^uMgU^~7^&$OiRr@q zZqSDO2m7adYCA2T$NCPyLcC7`S+#!U;HxizU}5<^YhSbzQ=@xhuAd@4iHIw5Ws;fm zx}{@l&&Al{7g*wtD>vtgn@Ib~TkhJJ-xtVxEm4Apyi6^=Mb0H}owFL7*dokK;d83K zE&pfwT*Ysoi6z}cEO_-7Pi>6x71Z7w8ob?E>5K^hNAO6Lp*2&FZ1cO3B>fa@kE+;W zqhV6zLgJ%Mnaiso%?*4-+5Wu@yGzbyUK8?=H5^msE<5|M9I?%hOL9!@Y={K3)@Ek# z`k>* W`7*8o)ywe^CDFILapSKQEHd3XZ#wN~pHg5(I)e{q5ceZHJG3)h2x>fBrW zK)esti&%KKjxfxrQGMuG6EZk`%iv~<1VdpHbg>}iSmrY>2RhnFbPQH;k4^`|WqMVl z$Hg3_s`nW|UMtC40%zood(67Niy|Gtpm9%J_$! z4mOq<40ATzn`_GeDHXrI!1h5M;qRI3zFF$-vN}ZW4$CFbbYg2Jr>q&9%w) zh?w;>`|qdln`lmRJ@*uKa|nHl=kJ4X7!ukIIHmgMvx3ordD$`Ej32X*p-OqrQaeS6 zqy2IIq?+e7I>a!m&pYr6QvAIG%t@YL^W5DOfa2T zAuCQ4RuarFQn_iGcc|ZX{NYIRM5~}O(r{tZ?EoW79c0zteM<|g@o#5C3H)qknb!=b z$ipi5>!V}E?IP!=0%4BY%3CX2{5OOh>CcR+uBQI)7!EvjNKKsVnjAm^d<(@_pxz!l z@kMDJa_A%=A$0lWPvEwUeIl)m9Re*$jbH2somUra*<1NGoB6XFV-enUOvuF3!yo?e zifh*YFt$Y-nj}v<_o4fISxH7nQST{IH2V>EBv5x-U3WKaW8CmuW?Z&c zMvhXFahk95gFgKV34&32>NiK3IVlPgTskx<7awC6v}lB>aq}3C7krhQBPVQ*QWP*Z z>^i6qjJ&ORabo$Q&D{NTcb5+J9}v9MDRY6Ly0=O1C}+^2hS`$awnaAMSHr@bI`DQ`u#DqS7~62 z58yn)WN?GRsj4rv{BJ+N^zC`A_CV2XopU@;&>Z>dxrND8w}j>bbT0htZw|HxZ?RY`C}YdNRxHbz=I4cL?D%{KtgbT--dC0RUc;Sd*Egnz zKh%Hc{v1j|6e}A|K+}-Y)l~md=5t-mu@~#HdM7t+2IXa?96;y%W4@hURy{^Pr7SoJ z;_UaH8S-O!;Y*$=ex@g>o!vLCk!Sv4ESfqiW}OF)$hm-zmgU=TgU9J*JiukG><(@lQ0K?=&JssERMpd845f6YMOaN8{mt~wi6 zzdYgJk=66>gb9B_u{srjLxY(met#-hw2rm?H2ru{& z_>B~$f?A&_@ci>e3HpX+3HrKbZJujv%UYCBAm2QC8?rNIOnmH_PT=rhy{i83?%XAy zHQZ7_@ipl1n(lF9Hgh+2t6S>iyTS}{8y(QPEXUlpmhKOXS^Y!FuqNZ>Jgf6*IH*2> z$C@oTuG`WRW=;5qDyl(6Uv~@Vu6<1fm3%)pa1@4TS-Dv8koxAkuStepym7xE(Nb$q za6}2aKHMr+B7Pb&TnDzB_oFj2evP<5u+V46c&=y+#`p=-HX*peC)H{38h(G3W;m|^3tD;xgW zN@(>DD0ck;l&t@7aXO_Oq-g5bN|@1*`>aPr8s}d`W;Qo2am{l+awNVFn{X_WoHbDh zEr=@-+*Z-0f;trCiLXdRFObJCJ^@o_wbt!6!d*qBa=+lGcI7Ih!Df{zsgu`k{$l^< z|6%Mcfa+Yfs6ikECundD!QCAaoZuSV-4E^rcL?qWcXxLS5Zv9}?cngA+}!tO-b~fh zRE5e{6xFhOTko~Hd$m7_W7SD=ceVZ!@y}AVJzvp2-&2CQ6oN0jk3U6rG!QQ!Xzd#K zdtP^r7QdXxFfX;n73jnby+-0#{NZoLSjz6wF9Y2lJw`tKru2S*I<$>qr1NwU02iyb zQaS}?!9aErzsqZJVP_$VsK^>8!!dZXP=_#5Q$>eJc?I#pkNlJWi-3IZpPh(SxWLol zx_VRO@-xE4hyw1~d_{8xSn=+o+kJX`PF^sU8m-lF5PZP4^G9b!Xs*F)80azP14hgX z*Idw=8{R!@LbbOO(Ym6IXL@vJewqbW;g_;4gyZ+lgyR!2&i-z#N?oac+2pqXh#4TJ z9p?uQM!|GDoa~liec=}l#rJQg-vusyAzS5p`wRcG-4d{0)1*D#b)Ht{?Hna}2or`m zBW+2`QT|?@Zvjq8XV{FrV8J`b9lYs#k2{7kh__X;hrs1_Z922>YxWiG*0f;zzXyZ) z7ht2+13Q3IYt3d;B}dXeMkoCZuo0=%p?hz8G$jK=eyQMYTxRy}n@D58g(0!olJ&kI zMeZjPEC+6bCRe=S-n6-s#ZB%}o#he@$1%AUOk%CANmbl=jT5MEC=P!In=NM=U)hB< zOt58u>BZkaUvy>aaam8)2!gAgI_T09Yka>>NTrY(K??RDChwbI0>Wje!&4+oHiEBz zd@TJRfAU&Y4A^|o<#WHtX#}L9ELqO4cuzjn&kuGqO<)Lijz9dP&f(%H4190B1p}hO zPog3f4-mxn(-9vCdZMj9SqS~~iI;Bnea(gqS-?uH4Xwx~TnG#vYC0c4lQW>an?V1d zk3-??iNX)abaW4siuw%(FV+2bCUmy+@kbCa&WW=|(@7G_+g89}%%bS;>Z+5-IwbH> z9^y~ha3oy5`)OR&cqcz4f4Tj5iF11-(?AAWqeumRso*7Z$JH()pl zPshbwJ2w3J{&Io+`Zw4P?y(WZPN=T$9nbkLj+J0%i6ZesP!cfCzj6sC!h?U5y#ypn zR=VWhPH3)x9_d|+5=|_f8tfwU(FTXB2MIcX+iD-jk<+VW7M&Q|gV!^<+2=5{>j>I~ zxvaJlqcZu0%n0>c4KN(|6$&GcAN6Lyi`;_`ymK=CJcQxW0-a2V)CNH5Qoem!+b zd0xh(KUUQX>J*x3Q7pu2%P-__IuYj^w?t%26=|sgyZ5om9rARGImQ7TD~p z?Xp_$W`!10O4sGDUp~-aZRLSMsGSDK@{-rSct__!C}tie`Y@YGajqVaLGE4h@3|Xr_c!kxcOZ5H6Y8C(J|<8hk>=lzrikarVCm?Po(iD6Xn4f-LxEy?`Okrz=IID$P)uXP z1Ulq1d<#ApsQ|-}(VEF5N=Q}dvyWI|VCaf{A8gMXn(MD0Ai$H-{7j2gUs^ZUVYqot z8Aazc(mZTUb5|a3WwA4->5cfq*6WE^s=|-)ll|y>*D48TYw~#f&nj?TaMSD!(a_P8 zi#WLz&CxmS;ngvt`-h~0!3x5I6aH#}t88-v=Hn~UHO4R8-7BBx>feiL7FC8%1K}mk zi1Sa|uwkVQUUD7PSZ_Gig}ttO?mm!*BI%K4-f*T^ms}~r4Ur3j)wG=I+{CV0XuKKQpssBKSd5bwaB%Za;$4K#COtr8C<=MVMpt-AaW9=!5o+>6^Q&qFwR0yV$oK!Pj4UaQ;u>RNr3 z(FFT(Z}(*Hx}(jU2$FA|?w)V8;qQ_=pt7AalPyrN4sLHc}w{21|va_%TZ)XHSeEsW6X{2M^4*9 zRXG+5f&A5L`~y(YyAc1h0LQ!;+H7uBPfR6CmAynY?j@xBd8Fx+ZQVo*_gyWm-57J`G{2>fS0A$R4* zQk3Ft!2%Ctlzg@8>s|+Of?BiuON_U#VA*MDXQ5Pr5g#)wloTz79}Y@m3}#jR|$+x!GpFvfd_3`U|%v^F_M zB=FQzk=DSI!15g7CJku9{RP+d&m@{iJL;{deOhnfHI_frI2QXcI6rZE(Q&W7#azv| zJ?RMAzAT^P!g=?ZX8JInQ$7FrZxQ}t!skiA-2Cn`GgDL?^Cw z-k6T@Un%uvma}=$jGsOga=!uX$5-2KZ98E%W_`Q}pZLi3DqoLh+#kh#o_pWRwPFi} zX!`y~NSx&fw28F?KR!#mXK$hcQUV2 zcq6B5C-?sG)>~rx9Mwn4fS(XL|LAattzxP`g2(G41H_v9(;X6$*nF?t{mC^W4$gB3 zkhT2_Tdk*t6=6Hr7G;f)|CP#N`MCCAHn#58^>}C2EI@B<9)Yy)R6^|U()YgxlA61}$^`D6rD7be&5hA?4FMw*;z)m3%DsvRpf?K@<|pQ0$T4@m z5EwpMm$?w2QySOK_(!q9>%TS5_v1U8Sv`h=I|~8=UipVrI%i<<}|5btZ(g( zm%`0)wDR?n5-E>>`1|+VFjLdWJDBG^U$VzMT9hba_b$BGsru?^Orzsfxgd!Yb%$Nb3|ws4H6`k3@&@vGYi$$&d$ z;h6hj*~~_9asNkbMA+3}_(dMB+cRE*O4$KTrEprT?ch~jDiDlLIf(#S8a>QqYugS56QRx@zh{ZHFdV7AYeEy zQB(Oq`nY~Hv81jq_o%LHi9ngKE_jI#1|1DfOiPI;1h$nh2syMpr)dOLu%GYtQB&SF zL@6_M{f7%c2S-p)@k=_Cm8x9suP%}HmE&{;Uo^iQloGPdCmPi67|eX-YP*mN3GjpN z<%vk7SDtGGwAErp?lS?^$z17S{1dYpqwg!>o;={pSSECPBOdhGB_s^HtV**wX8SKS z-U;k88-4uiF~E^Zlj+BN*0ecX3(7WfNq}E_(-CM>-4247bb~6l$P)#q7xELYYy0Ty zY(ZA$cE`2Ylj=&rAlV*~ zc4Tkq1@o&QNVq4E0?_m`>$J3_5(;TCGie{aL5#BRpDrtcLzeuP-7p&~ zqxG0X%#1K&VOb;{NCNoRHkC&ic zcUiAdRS&5&Dz-jehd&kI+1Q&GECSDa@EQ8t z69zSmjFR=vCH>kd0o*mzme$#-c&+~?Oj}r(XZgi>P~z*qpmg}YWt9SR^aM}EoeevT z{arss=cYtuhrU(jOwmC;N1FTaH+kh=D)1IeWyGhji+maqfn9VcuJA(gS*3**Ps(Ck zk*(hyi0BZ95;tG;NDY03CKvQ)2?x?L%HU+Sx`5DbVu)7Om{gDJc_%*gwsQ@SOb`GR z%;oc5;Q!SVoBS_xD(spU9njiAknJ4sJ^s*8!U09BRR{d{z)d2pwPv1ivCC!;IL-YOF=9sUwBe&cgF)i} z30Kt4LVUUP3!}n!WG{7W1U>F~%2B$cfAz7i{L{{wsFIw)Mb_xrH^Xgby2mCG>Yc$w zc^TpP2_sb0qeb!X#-AqF#?VS=UtnHdrRf|^#U+vnD<^-yFo^ntI4lMAZLvF=gykhA zzhdKk!a1ISdy3LoV{@0mQhmZgx*S)r8JyS4-Rase(iV3|+5V8ISe%qd(|1due()zX z03bu#am!McxqYtz7SuYJ8eDwHVmGio&EoKoT#4&-)ml~RPICW|bHk>19?p*dLL-sx zst=Ea!bmwZ&%3HnBmM0`kUhwnsa2PiPSrsc{Yjr25D#*bmi?0iPhZGinN}YY}V6)#^hd{mBc0S2!`z0h>pkH%#g>a(GPS}I}t*rIu zi0q@6xsi=v#>6J-VFmE{EN9VWC%?y~?F<;u!kKuQ{~O08J@RG?q;u-$gIut5suqjB z+&1phjsY)n!Ksb`lCda27AIkm;A9c*r0xwHzvY>)z&5IEY&TIC$ z=EAn4>!0Z0t<^$o!y^9Ka%|!ComXz0Mf-eu@A1O1r-M~GsFNrqbv`uogY4v-{0r#- zH^Z8L6w46^B-0^qnXAHwoG?Rhkcq_?l?A%X2HkBscl^j-`&zonWT?!6&LtuNkV&d9nAKJ+z+RUpC_}|JHcGVoOnT|wr1&1=Clai8Fq-Bkv(GarL z%49w%`)aM642qkxf}%rm*NtFw6e@V&3+v&_Zm1p(u#t(!Ttl9Zk8kAaYIKXqAK!hm z)wSx3+H!EgXt3EUQSG5EWjg3y#lE}UWU_lfR}WYjSDc+Ao)=K6;^0M}Ukmmyr}4oO zKd++;%I$@IqKMkIDoG<$PR%i%RYPEAPaj}KcPWGJ=CbDax-ie0KuSzPMjZZ|3zhT7 zzXCpDta#@YN|srARgKF?xJRA1&viYQmY(5Cuny!>;oL-QF$dg?0z68Y)_8bTSpBvR zA!kv68O|kI(_$RGt1}7n<8W)H9X_bvWpHI)e|?;rQb}|G!4ZYC(=ucc^`B>?^wI6` zN|M-IV{RKt51gY3%8=)?$)K#6QYU#7X4voF5;+r)FI1!4O^i-9kafJ&{YGoU;QKO5 z*WDT$A3)JDu5W~DszM&sHWnf#97ePfn%=YJ8p0y-vkw4yM>_OWj%wLrk-X_;#HoqmD{lrQ|c^{hk+;=xg{m8&xQREazk$T52Z_AnU{!<743}LE< zoSY&|13zK9`!V5~*Re5=i}e>lH!lIdLUR;gQ+Uh|L){e#Mzh+?f|s2%&-+do*E>hp zghk2FuWhxIAnnB&ple*Mt&^TyD5DjEad2)1`Z;ik-;MaVlzu;*v~8Ce=@K%vGPqA5 zh{?NB(B_S<+i1k}(+zyx%+w^dpnWUCF#{~6>?>_yRK0v!8bPD3*agR|?%n$K#s+Fk zo{HMHW3#n+k!#?omi?B$8>gxF8-*!~K+M9KD*xAg5m<%&MA~mPSVROuAnh8@(tM^i|}*7oE;ESELFo7txaDm)-fOL7?Jz)n|>9xbVRz-2Rw@d*cPeadAJoV+YYbB ziYgStjvkrgtqYr-Kc-%g5bb*phd-GKOaj9`&oCd#ac< zoC@J5f;qk7RarU#;mKV^&5Y>dP{X+6r(RzYnuh~O(OmGwHrAR62iM?u(o$KHo>Y?T zSvDZ{+>Vh!*zWpH52zN7de|GsRKFDBWblq`)v*Z%!_ULP3${G2I~O<%QV9s>*h*IW z#5^Sx%=)8!&h7MRS|QjBW5OHC2rG(q%gulO;uobt00n0x9g3 zIYofcrV~P7V<9k(o`Aa}ZV1QugJ1IA9KkqAX@XM&cqW$?CPL z?DA}9G~}PI8J9z3y4@ROV!M^A*o?Z5+C;V<;8=gZxq*nea@W&FNU(Qgom_Ey7KXH0 zLfyNHEm-o3Q(pd&)xlEQauDx&P90{gF4?--tVJi3aNPg?nUv;sX@Tug+qFg9^b{KL z(H(<_4r$KwkIa*g*#bMT3wZ{Q z-2uw#U?SPi<~bq2YW5J(*1;WL_(*GXEZ@7sL$RxGpp6vE;gyK|_cXu)MFeoxCda8D z-vMj8;vw9@Q4%&RN2m827mzH>iTmA+hD2z~;b zRpgoV@=USHU}7dIPCG)#kvLjM2lth#wVka6jL&O2V7!xlsSB{oeJurw2~Gkd^2aAF zT0i94=Xy1@24nCb(ABpV@?>sV>S^$Pa8aQ!Da~xc=$`wIUa>#BAwoN+*RVGrlS@3X z4=>$M2yAlbibbMhd1kO_h0sApMLJ|2+&birEIRHyI!A}SAGtIl@ssZqr7h~JsM-@ObU?cwM0QlBBZvoBzF*(`K%!eJY# zK84$$^1+HWp)IH6OFvi%RbQ2p)SjOSrC$-25)Y`66+G`aE?Aoo*B)Z8y(27M!hULosLzb$tW^XU^P1kuY%E z{TfBL&>I3h0P)Fct=SQ&tFQp4jDdN<03JSfhlJ}`UJ^ll1c$!$NJgZrt?Lg1{hEtP z)BC;>TrBOBCLq48Z}tX?4H5x%B1`-&^mUE%da^0xQm?<5eAlL%mhh;3eoN~}YFcG# z9p8ItfZRx`T(;{rZU@=OSI9r5M!s4IxmwSGU#oZLQp_CTtnC3n+YV-{kJHA`T1CWM zD7T9iP7e0NU=ZqV9+*AvWb6GryxtmU5l}iHL+km5p8B3>RO8tBh_coa8sK!3RSA;E zZ=DSq>qr{-5qK44L!*fZU)(F=D5uA9Gy$>aj%_NRgZ&}SuZlDK-KRC|13lV7e9l1uC2`~JX;<*kb9iin zaNuy2@_8S@=nuc<2FSE|*S~qBsFaO%hQ}Jnu=|;))Wv1Z*y6G-8Xwj+{->ES9>fD%egklz??J* zjr=V-#{6PV?-x537$qz*FD#f`BL6Aj(dppI3gNAi5pLVhp^K^PPOVjw0&TBws)mff zrZf%Of}Q|0Y0+HiPLfYZwR9>t6a>U%GzfU2OZ-0ul)vs5k3zStHnsCZL%sYdONe2- zGpR33xuQX)tYAu6OHa~XGrX_JbNF?K0Gz;LNHHMY)wi(Z%*|YxM+l?9%~<=P^EAr& z!|vSMg6E1`z}nH&op;X2s>>v`GqJv)=_D>ytE_-LZu)@Cm&kKdvF!CM0NS0!OJ@ZL z*_M+P8c{W|{MDjv2w)ob73wRYnBnu+LOnLqFH(XufjDKF%)BlN@)rmu(=fjiJ?SuD zi#OqiZ=5Zz%9=)-lSlEZ)FP^hb5JYLS)Bl;-2E#(iy$F3Jmk%bU-cd7-CC+4xx=zR zPW)PK;#zXzTJH)Boe(`cxLH%m>5o1^gydzjIYKu9m7hg1GPR~q3Dc$yl3Vb)tK2xt zt1*Ch2b2T{1tbn{bsgA*d!c7)v?h2;=h0Q)$fP|@(nJ`%ePYi(Tq&v%VgeXtshij_Wk^I;5y!(zAKXD03tMn_yY`p zqh9~CEO}e!f`(yG!4HtTf@e6ltBZ5&cz{`86H`#uquLqwem@AAXaJYEt&mUbIHq0A z2@v?LWNmU^ona??86n<`qD;3b?C~O&37Pnk^R^nZbOL$b$7TLQFJ& zqT4x_N@l4%*axc0eqMslN||FuZONeQrq)18q{pGtq~O3|p9mkUjxMMu-m%bMXKp*f zI+>^QL)mnCssumDT}0eS8StWr)@IOUMc-%VU!u~PAE;qWtO6TEfpPJWRsu*~fuB#& z&_=*ZPF2Wa%xmblU`f*NE0}c2jBuuj0&U&~PCgVQ=1PFv08cO%Jn!63y*+)IoHI-Y zLI?IwK3UPUUma4mfYG7hf%DVu{p0Lh3u2jqb^=>^ky0Eg-mU(vZ9PBNKnvG9h!wbsPHOGltRY6Qqr-hW_ zGrjGj@$pF03vGl_o?i$++hwH%*`{opE+ZO=J`YNO1S`4$a#sog%Oco9$^dhBzb1K< z58!Jqs2wo3F=m#LvP$56i|6XNnO$LpUf-A9sTREGh*bc%u^qiIN5#&=Z#j!PY_v8z zig7KD9jJ&JCpx@@7_?u- zp3I9adp$o_I7}o*Qy(rE@dvjD1BwG#dA{|$zN>43%^fU+f;o0R87Qumb;NraQsa0; z^8^Z;z3TH6W!*HpoA6?g7!yqED$=W_b?Y+3u3p&{BE_`AXX|~ zndyFDFzr*9D3d%Z%+_3338M($Jnh*G|81{O4J{gq8ugSARpj5d&LL0+18e}+K)&ZP;aPHomiy#_`Cn5MK{tdv3Y`V}} z+>=p$9)Q`{>OoU|k-z0KPJ55Bb*FXGpe0>!qE6JjVAUKt1otwSGe6ErPTn%CJXFwd z34bHC#VC2FG>?|d!;qjgYz3I!6n8hLqVC_k#6Sv!D~9x}(2kGioMHxeazBMg>z#7rXgs|k{lCY&;) z9U)&(l!>GKQ37+>>@2^pT9ve$ODfc*Ag1s@#MgZQc?h`8vQV0^Bsp0X1_0PU=>C+K z=sy<-6D$)hOxw@-(wg%4NcYUn@hvrK>-Oo=BYH~NrjX&MjMuGIk63sCyNGAEa{HIo^*+TUJ%5hf1F zzL&vf6Qvo&$r9q>LjE45((qLHn0jF;q*TzR#Cwjzg3_b~*MXUQaB!_s{WVm_a=Tr?f1OAKIO3N*Ia#`)07+y&l z)kw&)X|Av^H^6(%s`b!+syliXK^qyhkXXh1>AldB&9m%2TdN#sS%J_6& z!0ah@mu6|>XGfi))ksXiXNY1j>QJF0H?-DtI8WED26F7Vg6{vBW_Jf61`9Z^$hTPj zzyo!y{ZQ;$Ad@+tdd-+2+>;#5&0SsFf2v^nn5rthY!1eHar@U5@xO~B3>>w?pK`Id zrWO>w1+@jwRf9GoF>M7S1R09k*E%qkBjBs?z?-)Naq;b%aZ~+-O&0(!?1foPvEWwD z98S-y{8xYiHS!qhfCjd>MPw3z%$!DMZY_uZm$-~JK3euMxCT|qdNFeMA9FlKvv-s7 zby4YjyfVH>N6RjTieC&DWh{-+fq1ms+*rfBLdmJTMLJz4tQqBz+oy6_x95cgeX@X2 zZ_iI#%DXRo-KT9l0~bTSsah7Z#Se*|N2Itnt`~Id4wdK$OV2&3Eic+%BxqOnX{kcdjPsQkUMN86B`u@Xs~Drs+Q6|rf&5Jt%hc61Ll z%6??VW4X}~f^Dw7PJ_%5yu+uinnwR!_&b%Y{GFL;olZC{0EVG45{$Gq=^J#uji#|; zO=PDA0HiPmqpb#ARoW{x=pq?38p~4ece1nYx74S5*`;vRDpWlxV zQJ%6ULRwVMo%c6b45gpfvUB|Q8Nby0X|N*caZG_E$@*AkQs<;3ihr6g%^=~FR$K|B zPuYZneB<-2U-IYGzvqXqC4ZWl5C6u2DQBp_oAfl99H*gfLS4|qPa?7y)nUr&>G7Y^8G;Wp4@g&6iai9^?}>^ELGi@~ zAFDVrt)~^-NnFa7;pQ(tXMk zHaIU%g7b-w1wm5sz!3(i09oxct@>aej%M~9<}SOe42rCK#F$HFFWjmIMGom7|#3?c*+gE?L88kV_r>RB16H!;? z9}QJnHl+X|z}oSGwr_6G*yIc|GZv&^dMX*fWUyGN&{(-Kz7E@kTUVLfn7& z&GdDglt+v5^1<-7>N9+~PV{6Qyyl|G@1)d8oDUloi_z25QY36p-wi4|PFhd&`bul84RK~k$v*e)zsU+j`o zT3g>p*24NJw8+eOTa6vKnqRi+>|XO4zd|wQY}4Vm8u;9#9Srbzgo9QQB2#iQWwPlq zS@-K_nx_zz(h7MgG|4CP$L)AE$|fPwhSyKkAL<+?4VzEL^1Y&SCRe*!z z#1I%m?(F`T1YuLSK{g@Iq;KMMwlcjRh&n9-cMpqUQbjc&lT7Z4%^51H_})PE@ig7_ zd&$>7)=2&bzz?2e3R%8HxD4&DPaIYI?2Ww@t2_?Lr;q%tD*VH~{EBX(UX3Kk?LU-? zCb8!VFE#?4)Q8|aIvU#R7|co`t#7D}xiJMDDLx7;4f58r-#&(>1^vsiYs!Cr;c5!M zmXxPwZMFH$z8n71RP?l)cSJi{VyvLUiPt`k48R)mmuVQHsqTtXS2_^{10VT=g8O|t zHL=Gchm}|gFJqtQr);<|0bCioWG1Nzi(8!0G^$dD{?GpBTNGqq7`sVS0?VDWf_O;i z57oi*`ZMCoZXDi!NM8R!?SHu<|KHRPuwed6>-^tOA*jD}Nbqm}WmNq6Tkt3G#~$@` zu_G^~=$m@1Z+I>8WR2qeqJFDfC00A->_36#Iax-qvOZ=<@Be~hF&q?sM}oSwhr9uq zs8?iXzjuS5&iw*2sM(lvdz#gh9gL6NdpGsdFlWpqq1z)XGB(E)Q0;r9N-56cqEtZK zqhOUghTmd7LjRwZ=IRgJy_<4m#0sZH-*T6Q484fmc7reG_6-zlW@!+&tQMjz+@df> zAIuEK@gg5^oH)Ypbh?}^D8y=Cvhkb z+-ebq?!l-$%V!vr&Nzlpc`r6K9SII2Bw3&-1)G1^eBi3G=Ox)18WcE{+rgET}zNs!si(u>&Qze+{=|ILB?*10!Hn7{q5A$(iRW#K_N)Ks2(IIqmHYlqJu{Q^ z6ECI(69tL1=j6|t`Wg`aPA%a68fgO_8T}J{Bn4g<&v0gc?&7y&coa$=7X#HhCB^xV z*z!RaO+i)l9^4)ljABm2{ryky3EbS*XK8hspSUbRaxitIceU|sw}wSd_DAt)CTV`9 z!?JOLQ2$%i#K0&2j-sIAjaufx?LFKZGceet{t!wKV)FG&dlVy<(dphd*OFo)rU zmQBCN1&8t!`qG!{P!}$bBW}7Q0SgVN@88etP-Jzn>Id7?&40g#s*67-)ms?7WIeYl zaG(WGbqG6#?2iTiS+mbSq63Svb$C*v&{=j3=f3nkK@nQz*8^C^GCpLGCJhf1PI^Rd zr9^Celr~;wK4vp>d{`Le`Sd8GoLWZ}hNZMffwcM6kMsvgMHJD^Ed7iI(t)G9G^m|T zgN%!}B{u^Kf$B$&^yvECKh?XOMPQ^(p|CAYel<_pkJ9i(p?fEuh5|#kGDZMJA+S)lqEacw=r{)l87sy`;Q&dp{x64FG-fF@RsL(VLhg5 zHJHd%db$Ws#=Q@EY0~n?u}U*Nq)Vm@-~^Kjiy;JTQFPArj|e;_=35Txjw>ab4OdZ; zEmqU3Z#Qm18@DIjli;F@EOvqzZs5QCwUD$$rwk$R#VZrXw+FWm%JX{|0CtvY5zT+M zwMP?&1}HKCKk+uhgd~kW!nB926ANDQ`>ijdJVxM>9adN2{<}*~qX|w#wFp=o$zBhT zIUk&yVKu}QIIzg;UX(KugD%gaLfDjF;r%3BlwDlE|No{YLd}<~Oxk`$F3B42*Jy~_ zljR&W`B+#g7X|$vt8@6Uv2Ff)xtI;cj?D=T3iDdkDwW~L;==}FNl`ZukHtwm+wTsL zr?Hiu66&?ZdpksILnvm+BKmuJr-1F&h3k0FkO=C z7{uzNc}FrvLrd-?4qrqn)J>!Xgh(BV3bs_RA(;`?j)}x8Bxc9_ba33W8=b|P_bZ0v zUk8icB{!j8NiTZ57~G2u*)L&jiq8?Hpkzc^sRzrzF}ERu*OczlpdS$lZ(jG-G+f{Z2l%5$Jd2AMo4bu#A0-JGLB$=76sQo3(O6 z*|@OiUY)=fbC^e)E!8yht+Jkd!VG>|p$6C%>jf<>XE-aHQ2Hn1NA&r>XZ&-QcGkm@ z&WNMsY2E5#Yr@A=CEQ3L4vkYD?mw)_KPtRMafh92_;y>b?2pRX`TzF-2pnl2r`2Un zXZ*J&>F_)Ra9bhEBWhLQZ-uAUOxC+^Yw5#V0iE96_~qjcSHSX<#WMfuJHSk5pTX1m z^-SvWbB*3oL(o%H)!hL$pRz*tz;Q?F$)^{_hDluMTq6%s-0=L2wXjFsXX3hcFTh;q zYHM|Br-v(dx<_c|`7_k9pw%vH>aix!wgGuP=;>-2`J}@AqQn-Z{B`8|*?=jWP;h*+ z!qPP=>}D*Y$~j41)Lu;B82=o}dQ+g=giv zRN0ezKBdl&LyT#6?$!F?mCUU&yA9RTnRC;Ar!!rYsd;&GdrV?$^X43%!M@Tq|A5$N zfE;Z*Y_MBbn6&CP$l1|Zp^J~jIr7lR zbeoHha}K&(-r!o-rWd@ceV98EdbPRbbQwy}s7fGIJZ%n)} z6dNnMnf0ULD;Zyl33s^fwYcWJs2%dMi&e-fy>YsaPj%q_dWV0GPm!LNmD!b?m-xc% zmMF(4cYlrC6OFdBHb|VqhJ@Gn@MM{LS$W@lx#MI^ zc}`H{SJ12rUD@yMq=m&f>$GS{W#IZNhvz=pQK_`22jr6vgAd+O@5>eL+S3lA*Z?+Q-({{! zf1AvxkS?{>dbDhv)@@W#buDff1%XJvUEqv8Gh=%#{<*f@&O8p^&U5!()=bZ~f ziyu!b-gToxQ^@0x9$JD)q9$>mJ1?!{54fWRX` zY#kW%f5zZA*ykExXp7F>Bxk_n!_d}Y+P_A8em$I?DU{=mCS;6WP&yO@1o@7~RloKs zmCgTdw-xYsW9QNJPV*fnJp{55x!<61ROkRNO76Gg^Nh}3bUvgZ%-PLv_3f$_W6n=Sl~y5 zxHX@OV#ouK@m_nM@$lpc{YU(Hek>->c$cZtxf|oX@k-|;;&$|!Aj(~t$t?l)_@)4M zrycm3iwuHqK=-h)%+v{Jxv4$ROuJ`tP@(C;$!jdEcT2+Ksodqg|HW81EaeER8|mv4 zgEwpPa6y=9k$3zmCCp>uCRaks{p< zmb;8zrmY$Scw`7>c!cUcA0cH{FkQMzGRyI|q>6UWn_2UQ@?dqE(^Piw{a|>LbhfI@ z+Ib4+ayglQrcf<7czpDC^oF>z^^DCFy#6fu82(&wJ)i#U*kIf7&Txp8FV2CJPP;ep zbo?S3RAzS3@#96XRL-jo{B*#1OwepcLZ+a~ub1#~z`=Lzn`rtrZ^kF~2M!>OTL5v{ z{kt<=qMhAngGyW_?m6dx^~i&&M85v}cIT zL(xg*4dH)d?999Eb4yGi+ANMWAmXGqJ?N`^=%2iPrruP$0i2xXmoU;wYfSYzhKQ0Mxn|-icAs$uakijAcuV#gx zXep8Y6UcO-gIUFUFW@iNvoR9!-Hmrs$SnC}abkF9QvXpd^z z<=UeLuU)Ogx;qR?ODDcCRVTa@M2za=N9-=w^bn4DS~z<1)py*24MD#{NsGg!n%@BU zf+6&;&!FfqbMdN$5Yy)b8FKDwXuyd2n-TM-F5h_R>N~!h>4(D1*TWa^ z;D+q4fQvjSs3)zZPG)wKPm{!7DmA~PoNp{sPdc5AsM76MaL}?$6D{0R6I8CYk&|bGtlovJg zQJ3Z74Y-*RO)A`3_1K<1ujbQ{fb4=!Ye*ALNZ+I@I2crJc|Z5d4Mw2T|H(%~qCr3v zSU%)=7bm*XoJ?C%E1AuD@-)ag4V2!!)UF+8Qut>|)?tKbCncBVj04LWzvzSD>FtgERxIr8y+05@59)0?TBY(ss-Y%yKyJzwYH*>=n< zMaL);rS6&G$y2|TkNxcopv6A@Z4Crti&UQSjfkrw{Z^0`X!Q>Fu7?<3CzVHzL3YnK zQjm0ejNOy7fxq}ppl1TMGw;n`zlqaLY}M;&%t)0128({bf;Y|>p&h~q`p+T6ChLH8 zpEO@%bE9=BN9CW&L=wTijpOvLn8&#&;|`lZ_Gc3BC03~+S37pe%uLe_B|QL5iaXBZ z1)t%U`(DUb*L@2j=KP+)eJ&Ix$0%^{P|py!E2qbuY&6|Zas!3asITc^Sy7@TPdgIor32}uFVF(r}f#3Rj?p7N94yrH&a}yNh}0 zjn8vIL6;JLgJm^rr@~BPviUZ`yYlJdv4Wy&@k!WR$UZR;0WU^RdP5`W{t>l`IK#>J zsz6KVXvq!G6y7rgSSzWJ$GDr)SeyfAX_7i> z+jHKD(n!P;?>o!niw^td3uC|zOZ&}_uyZ&XT-m?NNrM+VlbRv512=uH3;Xv4j|-4B zOcSQnM3oIq{D#+3gC<}VIx{{(#2nu@GU|^l-+Hx4bafdcPytWA3B{xz3w1_X41lNL zF6%gGER^~)^YrydMJO9PH;dqrgdmT%QztOJ%7Fn$c z#_vv`a4dSc3B`Jwm|In>&vl+(CrFZ-^QS%y3YMel*!c>mrEorG$Vf?lhvrYLm#k6f z!s|wn-McY?iB^8ybGhe4o77|+g#`2A6f1OQ@2$Y=vi_&6QfSu-)Hz(A#+=%9yTBP# zciqXUf2@qY)4H*^x#4oM^0rE3SQMydMP!_*EXqkXV#!*oV|jWDxuJ+XRv*5Lh^qML z4Vs>wuQF6To@U|=S^l&TZmh+o+#Gm+Ddpu_5O$W|pJ%jnded8Cc2L5yML|hfVY#(c zy4J>O7jq`oZ(0B_3_BsCLl$nb!3y@;&gqfaSzxl)NFTs#txMN=c=CL@$t6zoJQdza z%0K&dH!=A1WI?*jm^MGtmf(Oi*c+6)llb!fEUv{zSuPfdbt|{7`pXY58py!-;tcl`+g3$KKE0w~Ag2 z?0}Y^hy^BW)-P}8AH9p!Yg{pr1pS-*$BxFYm<))^DajKInF9tU7>f1+|R+bhtQ&K(fIET^+g1ZLjuZz~>x)f+vhwkH4BgktqM{2r)=K~Z&-#2W(i z8ZJjGl;)UmtN^YVE`*t}i^VSzN4CM?53?3nCMPm@s?<10*7=hdfS5;s^&XRb{~~bZ z^49b*N_69gw_yElIUeZshvGNSQdT6OF8RmBw_;RZ=X!;PJcULh<@B&fm|d07PfASo z_yM+<^1A0F#^J-FeB4j#NtUxZtY&bIFAP+-D7=d5{l0x*G-=LvvmBSU4F2nsycHWq zDshq-3Zpl?6e7R6J$+QN?PepLe;t_?K#BlJ;Vn|TilDLyt3$J3%4~_rq~+GLj9Z%> zar)m~yF%?A9lG|brj*Mk6?ITX;!PjO*!&8}0&+U*OsH`*x}xRI3m%s}iLR`S082lS z@)v*W5~lwhiDlLBN5iXz*uB_#-LbjB4*AFPj>V!d96BBfE}QKocg8BoZ01ZCkvX^^ zhd;djXa8v`1>%{Q&nXYfeZfY!dMh|=rF*ir8T>>de;gP$hr zd#4y4Rc%*eKf-3~&`)l6-vGp&3oMc*qxv?2ZcnG`{ZlUdSSfJEVkiWrSTS5%rGuvGWOzl!|&HAe?6{#DGr$!5!5Ml+^)_1I`!>H z*LgS34Fow|kdbjY%CKjdZ?=ibfP;A!JGe4!q*xD*2ZY*DwUaE3F{yQ0mP@vgGhM-~ z*DbXHE6Z`RpFrD>MJOA!;f9~QDPD&O_BQI2)383MG}?u0rPZD8Y}keqd6_XK^H5>v z&CMjeSHDEbS~4lrxkSb|Tx$=j;I-iZKB^SKPm| zeRkt&dsu!Gh`uXpcqCkFX=D2{EaO?)Qq)4 z)i4-sO0G0OqEI;0+gOk$yaUCIRf`ghcpq4l6jCQv)GXRqqf_n`yeAZPON&T@{6R^> zD+g+Ts{?St5--+-oX1!Y%^|8))?JIb-tk_`xjv%bEa&=q_YJ}u1TYM`*uV?T5sz55 zXbpSk7BlhP7?md(LR0Bq{ri}Wp*r=-i}{*6dQ+`U5jeoM;UEa+nub>A>u)DN^M1!L zJ7;CWpKIj&)*3TRY#{&1{B1$ zTkty4`%8Z#x_j^+ks&RuvhKF883YazjA`25M6&(i2lLlQ&ignLkavq}HJpjP?n9rw zF1ZghTfMF7_&dv1n0Bt*Q_E@|1!^mfmtP-yY)&ygSE3QkqLz`qRMn02275_73S;0HV#qJj1QX&SC*Qp zL)Y8)HY*7!Bc64&7ztWPmo&x~dir;u3{sAz7M7M=t*Ir4?EpKIP$=7wdvf~#m*vAU zgC&I@^I5;3i#N86>Yw9CQe1n6jpLvQ2Gxt$wwUVz-s2%j${OW9bV_zFcmge_igo8f zzA=39AvsS19#?pOW>>@mwLqb}_c0c;)ke#9+|%64cz0zI;+&(VBu*jsO9Y_$uQw`| z+1(96i|qC(HjxC4z+HjVXAeUnY9h@NHMj7Y*# zk2?_REcWr6P}f9YeO|!^Wu%zpX|GDeU)M#FaWo{)K2-(gpJ14^wq56EJ%7nKmfsm) z;A*aQv)KU^hpPt`G zn;TK$;)yhfYqja3hONcuNg(11zrwEVH4MXh$35uvc&)DBER`t>hO11n%?KJl=|B$0 z)adG+O;U1tMIq2T;q1bX`J{0uHZnV}_b^VU7vJ0(;n-Ge*^22wk+MQ0?sxWJOLpvw zEE_W4XQ45BIx#N3;2fTRa*8Z?1GKeD7G3M)ziLa!EWbBxro_6~vTVD5!w#BW)2#cE zA4b*)Di!3levQ+nX@C3imkfIp4M4xgVsPF8I|mrr6~h;Lf0s$hZPQh1_F%Ftu2O1W zIpi3=p@-$LXUnib8%5SX7Z~9P=-mt#Pb^*Tn;@YzT^S#;>XNXXt=+0$#9yK4>Lg>A zU3;|nt>xKOm{o*EG27~xpLUD3T3rxz*wRG|)fTMU8lRh!6cvTjYI0Di{Jiih`z$Zi zCgS{YE$KlYxZcnu9cEyuej~#T=JcI67^Y9{tGPKj6exCe+S3cnNcH~!Zwe^DTDHo^ z{mXKQ)xiHMXzr?m9PIIEMYu6puLPxH|Cc?zOsLhWr}vzV(p&8nh8b#|%>!wz zU9ncGsUO;#`(N>(>J2fYS&d_L8T2&&FnInqx}X17ii-YMKL2Y6s)A~V(P2KIn)Ic$ zO}pklf^2ua&Y0_RTbp}3{-h(ZLWsp{XSD#1zU9tL_Kf`U*rry#Okb_R- zphxJz`~iBem(P3|Lv;h?!S?xPGoDM;&j8A#Ki%?~ZQl6K)C!w*_gB9EJmEGK@y3mw z`zY^t%G<3LDtcvZHQ4p=M{g-1z2o6``3dq-eEdj#wlmxD+DC^*zXPV{{)ETut#)63 z%lU2nL2f)G4B{!;uTUyID3u{aXW2e9+=s6G2H#ZS+c!8zNB8&Tzt-Gy4ZhtL=P#1k zWs*1yAA@N@3KjcTj-SOpk1f8HFf@e65uoR59qviKKiLTEdz~Q)Wr%$FvI6Z$%W1aL zux$P=o2?Mnj=g;INvFtg`IZj{HYC9EC!8PUmW96yGEbAiXrxJU*SriFK9t1(T1T*a z=gEli_bEc{xo@C~9|~WdZNyi)hl-u4$F{f zx%XC5&NeES2uT#Pa2AW8tJ!iIC!UWT3WO5}n8phaL?<|(E-&0e3Nj%xBPPc=!-r*I zInQu>;U#Gb9gVq{eQMq3htXXt&$PyP&E361a-b_o@VoFmIHF89pDar&7+pw?)KZN; zcAJM#5D=mGcEbxL8{U1TAw5=;H7LSo}EtoUNMh zlZf#`#ecr$1^VgLH!C-PLb3rTOv<1m{e(#w$7x(m3B}tH*S8Pu<$@#L%;$;#19G|| zN-kvq^`=!@r&$)my{;C8^5sV&?O&L*G_^>}odR#|Au!;jKXIa0M%Nny0*+-EmFHnhJo9cFMRO!afDPttjIFjCmHTy~P-O_Z06WC+F%iwjPZ>o?+^8Gh7&rm>qnibh& z@sq4F`sXWrpW&G(`KgO-KM>tb*Vj%1B-V4^NaCf&j0n`sJnnrXFE;k;5+-6^o9N_7 zl6rD>zfd0fhG9a?4^V^FMrS|>+AM5V+dnospA;w*K*<3Kw3l7uXTib)I^gHzsC`1^ zH+(XML}fcOWie>GxKdB2Y4zDH+r_w05gWs%IW=2sQ-$X|*w2u?zB>IRCC|RD-cbHl z4)*;+F~3FEEgY7sHMr#3Q}w_e7%uPi{*WK_s?EJ=m?9rX6L}y8zc2V&?dcD;ggUJu z1|_`MU90`Ko0mwM<&{mO!33YLqq5pbwLtDfJW9%VVlG0urY*ObkjH2)oorVtw6iO=aIZ5luQ}Rgqgz4<(V+>dCxa$t;14+#E4og-N+m)EEr7|rRx!qrV|3^fEuc>XEJ#gv*tGI6XPu}n zgW}Q%ce(B8?MaVei92iV-@|zJ${F<5tGV2uKdITi9^v?;4*@@3)^@{2bF#)LL}snd z$|P+_%D?(qM;*EB4qeu4Ct=L)jhHU5xkucU%gz84>w7-Q-e^@oS}jDSUj%eZ};6 z_!0xd}6q%`14$(RvEV$OYNfHg=toA>e8n{0)ZNqxct*6 z`aK*9=g%~TtoF+wIoHP4?CJZvOQ5yZ@nqE8X~7pARV0aPZ%OnI&*V(^GfaETcYkV} z)^M$lCNHC`>V#LQP=2;Xy8YZ)^}T!@)L3dyAVA?i1ZWE=W)6LF#^eQ61VH9n+F8*~WL>e=f*|oGeWE2IINeLa9wHiWby z?o zA}ebIchAw!o|OQLS!wv0PK-W9oOW1K~}{LNAX&ZXe*4_2|?2u%$zb!ZNj0Q>GG>bieqi8>sq_ zk~;Twlf@%u_UY7K9Q4YOR8!d`;819hP6YlEe~<-tWHsavI^Imzl7Wj%W(&X@?oUg0 zP0juqbrtt0Do)S6s<%htEML3qZhG`-^I1eb0e*Uo%Ksi|B+^iI>wQJMB{xgz2lEkg zOIf14J(D=cqX&4ik!gdd;1&6-b?vV5Iq8*ue3RS6MLg2N-?p%|EK)~?04|S=f}yjCHV@Fpk5mkSlBwK zQ0o=$J}K|#Kbq{o6sTn15g) zOLlX$%k_bxC@`W;XXNl13h&+aZcQQun8;SY76Ew=$7c#GP$(zyy*lI9A`Z%5CVl??xr3Bom zt056tFEj;LS?fz~kMoCn`{`co>W*JupWkk${j+s&xbF19!R_YFS>pU2w8G@|kBFLk z;z5hZZb8}1*BiJ&^bPKkv~yKOiW%j8%=a^_BTkwv(r9SmQtTtg(|slRT+aTlg+~A7 zt4bc9qsAe9Khk3N#`Np@(I$ckV!1KPS$wgtr zr&o;;9*NH*aviwJ{_aY2IMZvFfVJ<$sek@8j$+Dxjn7~Rj zDBOrnv#Gd87JZ$mRzb9(^9Ted9%BW70T)HC*+ z+1aMXM?alZX)T5AZVmOf0P!tc%-SoEsI~dVaRvp`g%K zD1lD7eYm4wvShui+ma&sOgg-u)5dWov|OY9xeP7huhPIRiaI;rf?)I7~Hx^oyE{ znyRSd72ok4_3c9#Ps-8oW9jt}1KFE|pnET!Veir>B*?=9iQRQ)G}f}zb8NsO@bV;B zsLGe=#OoCgft1MXkt2~p#T_unwQ`-6rDSC^Sec7Bl$*VwjlVMVUvcyY2G~~bpyI~# zYzbO%XTA`N!F$H5AO}>S;7E=mlKQ$C83iF(`kqL0#%R@$tmiL)gvq>`v%3Hp8t7-* z?cgOWahB1>Zj^QQ>Ghb(Ju7kK_H0(Qt%I9jIY#I@5lq2xy$OgdN9a=ScravVJ!PTN ztSGb2!DQM1`Q?W$+TJq}cZg?rLr`8O{xxy_sTp=}10QK?-EN=0Fw$p{Vi9A&ES3rT zp9UnKYz;i_=S`!7MRPq*%Sa~vI>=F6oTBqcaOUq; z>GqkE7A({ku%ddgbUz#Sv@Fb6$(?_6^RY$EGb<*z1u5k;%~Mc`q<42pd#Ej+g!qc% zmIttI?jx#vPw;iqz>x{R)y((`(iE-c9*_LyG3NV19qix)kbXrG<sM{`E>vyN}KaIIuTXQ*P0S#r8Nj}O~J+zSV^@~q5k?5N2V!qLo2-;AWP~VZIVi4-^>Y=f+|;?+1l;y&e$Cw~fTg(E_@CUgms}}x-GA8bwosM;ryQC8 zonp~(O8|IG{Kjcz8q`ws5phZPbkLV9x8a>@Bx+ep%SZvX`3_i3mo|-OO|Vqda7p;_X;RWsOvZe4CFd)KDaaDk!0(529r~Rcx}afR#ctKhE{wp zwP!!xn{JB;``nVV^nl-X`CJV%-NJw@T&<^r>Ir>&#p$8#?m49P+ciHw@iRW&csS`y zhWkpfwYhJbDU%N*s^TuUOtzMA@4JQJX9u_g=NYdM>q%M)RsY_7;X5F-4%_D2u1&>`>?^AFh%`Z32C-Gf;d8FqeN3WsEKFPPaqmNt= zqDKMtyhka`#cTFE9{2C0e99fwCb>Ygs-6plr`20pZffKCI-P;lw~e|R zVQAfYs!TnIN3Yr*6#~ z!A5_O{oFmnU9R4gRbU80mbDoP>?lEkCmie3lpZeI7@W>oBDIkB#iOQjm@2asPrLcU z;ww&BinYdo3z7QF@!a(r-#z^rQ>Rnb0}+_lRZpc+)1KO&O;uM;i|*EgJ&;QnmWNDW zvba1APe`^`H~Zt5qI!G92?+^z7xJq^sVBtIBWRW38)$xv+UU&Kkx-PG^w;x}R%Ci# z-T(wa$rbu5>KxO9T#g8=T*V?60(X_qOA&@CmX`G7<*%P^? z;X=R}oKoMAj{ixlH-8Bk%e;K%!Jy&*OizkRFZM0-4Re+SR>K|!B8^%IV3$nD8Ouoc zF_RvOhg3(Pcn4ZnwR<0W2PfJ{%ule z=c=xzV2>Kj9`)U>N_nr89)jhx_0sb6oCjLTQJ$(IUDm*Iy-LrXcLZ^Ds0VpwLup`C z16`K<5d_10Rd(Plh2pSIB4rVoh%0@ED>KUB-5aRdlxDF?osmbArjpLM1g{%w+ic;t z;}YXeC$4L#U<>A{E~{g&rL}zi_8In2iJ|EX`+zY9&DKHU>LWw{XnD8Fm(KM+N>a;# z!!ZHW90?D%TX{qGZci0|Me4E%UC)eoSbo2uunQb@9P{mn4d!#f&jO6hi-95Rd&mUX zNFyyC?JeO;4oJvnDu@_z2GU;<3Rj@3pm|eb`c>99N~sD($hmO>k8`u^eNLyilTbu)EW1lN9y^RT2CfnM;puoxxV z+GcXeJ!}Sp?UHO$X%x5(_qgbEaqW=0=_jrF#8MpvNqNfIJu^36_rOot#WM7rHc08O(w$!eB7;o3p0@V$1YQg^sh~2(= zCHT~G6Vhwi9B$+}m>gRJub5>rU7(nV=YfM#{#({en-7#Pp+29BlxF1{cS$^INO)V>Jq@DYJ;oAB8B1uYF~yDnAi_4 ze#PRV>tm`N#3y#c}KGMvtycr`rgCTbzxK3HjZJvElTge(Z}k7b*!= zW_xHQ;CVxNb=Ny2#D{oZ2HPQqt%y2kCXOgXoMAJ^juM(fMM{I^pe%*I(?{aB@1l0W^LK&Y?~}Hq@hZ-~N1P{7&2- z)(`3QHbPA)wjwz^xnx7VxhVWq6XM%{)ECqD=p%j0K|%-G>&8Dex4VOq>ufxfWbD&! z&fG9p?Tw#Fn((vU4W~1dKo^G#)U7CTuXD!JHlUbKjJ_hc)2j>I5E|VM7pL_7a3mHrwX-ZX zc86a(hGrq(sh1hC_zoV`2g|^e2Qe25%FIKKJ}Yj&6Mc3mE{k}I=>K7rqULhr;;u4} z3hPcFtY0|s4{8P=3SeQbUu+a!+o1+mzINqH(*7ob?Q)YSwy%#hHesvX03aJ`z#AFA zrY3i%3zeyXZXS`a!o&yNVvIw(7>7N4M)XFtNVb~WvIiZyyfehKs7e}z0{$Iy*o4Cd z>dkb>&gy*3s(fNf+w7nx!D8G<{sSccpC-(Edkk!Ok<`;{iP$nM4`OCRW|J5-R?kc- zsIVpo+K2{x_R3%`-enra$yni}IIp$uh-cd3nAd{`m)70FEdf?$Tk#1AN-F$moqVW$ z+TUesrllI~aFRI!a?px{dzKhG1nGT75>(1jXlhi;D!Sc8jcz2|qK# zJPeRE*T41f>>8d?Pp!z}o=EHFUcGCl%W?@~)d3ea_z#0C%9^Xk6H>`&? zKn*dD-wg30F_Cq2txH|A=*122dW?%+_V2XW`7Qj-7jY10l@%$x*tTRHy$p_gMEWTWOaQq(PWvOQsMonaz(RID?8bL z>WQ<81QPAT>v&R{{7uk%pkmBfc&g*?hFFS$qKKqDdGRnK&4qNf_ouE5HruViX{V@+ z9Em9$?`(GpV)#s`jjVZ&s}hF#2gpX_x`)yWH2MgL-$`4E0z% z!B*fTS04o1(04V>M1xD4XPq;aKHCB{t>RMYu18KRSBwOz6QARpXR)lK2_y#UjL z+lU9W7XK3q5ZKISw#mPI^WfE7#)aMFpIo|p)eoIbhDGcK|D*_p&H~*&zzHVzCktaK z?P;QDeQcK;7}}BIDA60}!|vToFvx%=%FV7pQ{}Q}kUU@<$LH{xvO00qS#We>j~flY zLXO3-F+cW!ZH30w6wzMrF}cBIhL1Lpqd<7HbBat%Nl}V4rNiUIh9+OUZcQzm7bP8A z&6!Pv<45+Mf|7BXc;q7mj-*ES?;cD&I~Fn?4*$y~gnBg0N|#&IyiD_a?z!Yv2Q0H@ z>EmSVK;w~KtCWt39|jHjqFpR%gG%MkFiLxq;RLCj$;~4A19UlLTT`TJl4-a2#

    * zQaWP}L=8%V@fqKZDSM^F7&9Gwbx344QtE6Nd1=`$M}QnO5M`)WuoJCXI0XuT+P#>&H3MS<&({pe3faAKSuubX2KW(Irw1PJ4_#-S-lXXRY>tC=C7OG4*mZ2*D-z> zu<(;)+UFlSAk?D$>G$W#x#~9s&vkb~#p4>62&ZJvVh0W7hxUAE!TDd5cCVLKD zS4#_AvCs9@92?dLzx@7#mhslIVM$y~eLkfG_sym>j{PGr{_oY7j^}R}%>T0UUqYV$ zf3N*F3217-f}=QEcx-vHyt*-E$vM2yTUTq;gMS{F1!4@f?@fyRs}Zb|27~z~(y=Pe z3Oa>ub%3kFli1wM;9530ia+EpCH07*lFN+pcX|-(_B(UlhE(>~x}%S%!7 zbx3!%V^3Tpt|6zpIYV(3-DXk=IWRmUz=Z)$LcU;Z3s^Jab8lofNkY}FAq53eb$xP! z^uM6s`D3O%mO7VJ=p?sNT#s>4}##9pUUzgy0?J{?U5vOX6!sdc?sVRu5SbCJw^#Gh$KfhR4MX?=j{! zsKuaZT18SDP*GfnT|KBY9MX>+4(QPBj^z(aDe_7mDX_h?-gzr{O~5!r8a{2=yPiDM zoFe5CmT2SV(gZ7U%{XCcEGynsR0stU{%d~smZO;;fA<7;=+aQ%lUue*FL}YR^oVV# z*==(O^@Pq_cEmgi#+8Jg{8M;#jsaRN%Cj4pjx4s-@*)P(9>K1tAcI%UnOg9IMEEHc zKOM&~Q*CcxDL_=9%!98h`!u>3`mo!C8fxIGWv^;m7UT=}HBP z#_n$pNTY0hX?IXZ^v=fq$S2@&WRHG_ zK7stmi6Sn>)Ok%)zWStd)?4CAUERKy&t)6E3QHctziW0k`ZY*E-HzA1Zw@@%SgwjGYog|WB#yf>#S~pCu*^n?cV|Xd<*q7J3P|Na8VF@MBN|2%1aY% ztSPE}JXqf>>;bk5wc~g(3CoR&pi9*dHj~^=Z=35wGs)V9WP9@pRNOUs;Tg;3W$73} z2N!Eh8lSDb5E7-{Rp{6Np{)+VCaI)j<3dM#w>CCLuS-ZIsFR&V*n6@S=l6?i>Sgno z|6fKC)_=Qg@t1Bp(!)=`x60}d!#I^VhMbYKZFG2#m|>QizY#@P(i4o* z{J%YU1U@ZJrAzD<#l1ec)wV&hmI4QjCu~w&(=fpg#tdvZGi`d&xXC_eOd69|1phL+ zfBJ|9l!a*?kGuJr1`C``YK0#!_M}dvzT0+Q^l@4*5o!=a-ylw1nFB`_tjEk{Op9}} ze;lnstso&0k(sIKPm81x`b0@0F3!wvF9OKICP!UmN>~e$I8CzJCW!re)pS=kui+nD z2DaG?yQ7w4e*M*!gw{$`&17jCS4Oj2S;&x~yfF&6OLyO>Cq}D#h%+vf@}Fv}y2Q2g zL;LfmBGi_?>0IVZ=-Dh0$84a0vg|+A>p!tupZ_EG`#^}SDkYCl~Qc+R!wgEu~ChkZe zuYg+bZXx|U^YEJN_q-8IhnlpO)L;RC)UHE@p_)%KX=sNzygUiM zXDuOsXLc{3aBXNZ&&g zDmd8fxv9b4`J?S(Gdqjz=_TJ{U(VY^XWp|PCq5Saufs=^>s)&>pT+ys*v|GFFcno{ zjOx0T@HR&vj^+RxQcL`%2`guZw8tkDbuk=lg-Z51?EHPt@p|k{? zRHjG_@0rnKW4V*Z9!TGN`jyN=IeZ;)~)5N%hx$vxC*{1u<4$6%3WXCNN$t z%yVI(L!h?Q;3~~GTGlwHfBHkg-Eu`!Y{##ZD3GDQvPELA{zP@Ix!kY*eV{X%v#D9tX-F%qL+@IQLOC^K`D5AuTZTBxjz`i3j1TQGgFh9Z4%)T?+&f>Sm zyr?$bs2tIsuH~7pb$G>F$PU7pqE-D|_RHyL>MhiTnA6sz1A&Q3nc^g}-XWAX8u&2J zK7f`hSzSk(VkN8IT_7i4Ai$U$PE%6!&L&x~UQiCUF0X>ve#E1oP>n5(X^vKCblvYn z7o*<}@!nuaR;E+GR(CMDgQ^&_4&rWSwF6ED0&yadO*#=!0qrJ(P zE*?4^-fu~5WGrkJcoDJJ793VK`svyZ4{G{>jJfqVX=7rHsaLOU+)mJWt4v~yISkk{ z%QD8#@cA$}#Qm{k77X&fLvz$I$iJQQ7LMQ|lXlv;*HVfica_a@N(Wp70sgEo^28l7 zCMrU(@8d4ZuAtgX$A3XiYL>2do2($f_~Oyq2n$xNOa}3c)0j$#F2N#=oxH3hD=RZ30dL&<(8{k5?nBW)ZV`m z9>yMvOF`SHA2bR(oAR!1&#+l?hgwj}Phhkas6S3FlQW$nhWA$c7mqJBOtYn-t)e5Y zX)4WX9VVlP7;1>BW(Ic=Mqb#-KQ_z+;iL^q#+azeprg-Fm5?`?S77uGXbK8T*3iVn z=ut5Y`htX$u|YP(bX}FHmmN4U#krnyj_D7F9P*hDvIgtW=z_AJPDiVa2Me|tso?T0 z7)<=2xb5U&;=(kkIC?rK4M~Oy7^LqzLMDiN7jV22q}?(6#MKB;?DlTK>!~5q^Z0=T zL*NAEt-gt#)2L`(M;iSx5qKdq_ zI-gN~HC=kO&tZO#;o=ku3xz7k@o2$z`RdN@1ys+jLgI_tkSQbotij9Zs_k}ito zrH*<;0aQbXWaP;&3KXcgj99rj;N_{EL|IC30oHZp%IT2!vb-T@9H)7xO>Tpi=|fO` zJJt1}9j~7M;KK4JBWt=}Pj#O>(T^_co;M|rn!xIye1P#Y^HO$Q73Oj0k&ptO+cVEzCpuMule zoG~lV4EcgsIK3y~Zu6fBl2;GYx;BEJL-o*a#e@&dx<J{O&R#>V z-KpT`Gplgc$z|6GaG%Sj@+!q$4?{&!bH8^7vPPF7qaCwz(ehTdD)zrbZLGA*zGkDQF_X0HF+^<*5Y9@AZQjF16pzUuv_pU& zOv!3{TQ?ds+7jE`oPa+;6RQvZV9`euCtfxgF?s?z_%2#05In!2m8JO?tw3WA`7QJE zlRJ2m!?8CSD1;+ZSXle(eV1ZHCFQf^QjW+cK7HSpD`Kf|a1&XwXUFH~OrVpO)DX-< zyO^HSp^QwC-{yP3mXo4(H6?T{8!=?@^8Ish{j(fY{oa4GDJb1*DMdVc=k1IMslvyP zZ(DEU@Ke(}u}z}?NJ(YcM>)Nty9iA-E29z-`mq_q7ysS6ol4`z)hj_t|88A<&I7fN z*p+3^-bpV*z3(y+c{_YAEvkW);YT)QHQgmO8}PP7r4EymzaYjyTlBb(9Buf!&&t^= z853?mlbPTJZY07ERUr7*s&}&$g&<7L#ZkWTFG%d;Dt$4lQwt1 zJz1JePBu?A%a_)hS)Htg&VLL^s0#MK2HSt_{GVU@|0Yf`tLWprvnZ8Z<*U0J2kiu!8+Z&tJxaku@1dc8 zV+}54H`?%Y8Tv+s*{UY$e{&0kh@(B`3h>UZ#b1t{Ro4*&%TQjClP@0q?lx8Ev>UHc ziO#a59nFvXH?$w+R5hT38(-t++d3YqUrDa4)Yn~`W$lv_oF_e7)+i5%8L0mt=$KKS zb_3~7>$COqbaM;H>N$Re$ukU5LVO0w^%K-EL&a#qdcEo-{PFIKz>9YzGQBT-V*B{o zH^H#iqwB>yCgv(=L`B*$3~k40+PXXu}~L9+2p&4PE2^Gujy$6P~-yzEJmS| zlfNyzchBL13fS`B=LPvVGgBhT7xeKZu)&p5ZCa1v{EM7*vSFeSnuY_KN~-7>-5G4| z@7F45*DCI8njT@`m$zJoq~>Jb{C9w+62Ka1w5K7@-sntYC53ZeJKT=As?i%?f^b7p ztyWVMzl9)mvuzTs>c-!BFP*qrJF0tIAW|tHb@ZLb#rX>Cj$u_g z`Nky__GpF3>L~Zt`gJtw|MN9Ai?LTb_9fvJ5VXlw%i1kS!iP;ejq6q&xe(l=Ebzn^ zbxVBMjIh$U`BIK6blFoY3gp}M&R6+yYSDFJ&;lnjj+Vt()+!BY#dC;Ve@`W28eMb!b_yygXgVNd_#kho%-Acl?r)ew8#o)gz^$DMDrZ9 z`aEf@gj3@>|75EbUr*Jd2NC-8)>)f-{~N6)*2Mxn#0pwHhr|u!>&_z#-NED8D%r6n z*uvshb)cZpCR7NRjIQItsv~Ol02CV=z_zI#=gDX$XJK&%gpSu|SAWNAzu`eZ`_2Ga zHm@NgIQlzrF)dR-66Rj0}TtGb1qFM|a^VFQ=?kjt*>aQYdCS|@zHY=ak61VU8 zAxRf?1BX~;_uDVu-*KPqo+aCVn#M4p+<6I&3d(M$UkL4Zj3$W)?|8JWP)fy8wJ5*| zc2ss#4oVi(zLo!2!oG0p&Y7ilj1fDmA(fxoZIE#$przC~w5Sn`=N;oRj@^m%i=id+ zaPg2L>3)<%pE*gwK@VM73`BeRfy`w|4i=iGR4mv+NM2_gOTUt=XrW1}Hk_JNAT{!W zF4$P|+iV<4R6ZGeG_`sXMO8&=LfDStH{KXE{O{kvWe;!0O@keB_Ufe$s*0|rXsT&# zJu((e6y1>ny8@bo$*TFLSz{#ZDDYWrZSq_4FFF%C4M&`_%L z2_vfS^2axkRyr*q+p>WU<|H`PGVgWbXiOHPUmocB<&_5+cZ@0NXVmjOu#}J{Ya**_ z_qst;nbbp~x1P4myDcS=(SVde$?mdj-ldj5uY2m2KJdD7OqzhlZwVXvG*!x5BENJE z%VvG+y9~|3`d%M6&>2VkhcsnyzlP_HG^4?F-Y7@SC27^!qFNL0_B)rClAIve5-qhV zcT_{4NIBU-kD7oT8^6`LuIjgqB<8a_oCJgAH8hCekS^=0pt;P0WRt4+zGPPl@$b)L z4SkYc4*OwPUQWiMoxc42uR~bvzlQ{?)PJx3dkEM3HwE@~nU|~ortp77g8ycT>VGeX zh5ccR-u+Re*4tu>J}ZfFwxbkdsQdUt z;dR?GxQqJG>c(tuo>@4$wUVE$PaLt*m<|-45kI^WL$i9|Aq8`V=0D%l$Eq&%DPW)( z_GUQSpLy!63PjbwR@hGq3g54CwtDYK6ty;{9);_7h$?%Sv9@ztH?50%*{urFlzRl{ zBe>EEKB5zOk6bb^pFC(oP+@>sC81U2PXx%UB+M#(b2}o$6;*_bzZ(80IhHb z`C*K$oB5SX=6xB%TASB_)3N7`?e!JG2(37piK#08^P?xv`BegX-ghLdK|t5)jFzAW zD8gf-RSRZQvF>7J@OV9D8*WpJGdUJ9?eikK+qN8oqkCoaW0$ygH;v+23cYxR@1mh1unZEUJ7L1Zi>C`ML4~Z>&jG zA@d1hQcuMnE0aShC3x+S`DR}3D7 zluJt0J2#=#)5RSw;)E5hPMb5XMx=Dj*Bw{UW+_%sn9G1pCnRH$V7ri6_cmt`e}D|7 zAoA719QaQJ@#!aXB6lC*+axU1=AH;Y6K4)Vb~K*{TLS{pTl^3vIkek8GMGS#z{-pH zW0TP=M(HBZX-3l`_Jl@}Nn3z6hiDZPX@Z9C1w&SvmVx~ci zP9QJ)GdV8L;2zFRPUovPJ2o7wS6mNKYg3Z2$J-B=AzrN{Yn!sr_C5ZBT;`F}!|4^w zKmv+xA3ra^)5&wxvITvO(<{u>Z$mUf$IPf-EW2XSovW4swuFbuak1BLqAiG4UKUg6mfRx+H|U+of-T?95BLJm9@)~yJ7L>fXr?0J{FP>ta?R2~gwUD{>lU zxI5_x70+`iN{jO_17v102;0trj1IR9&u1di7ZrxqrVIX(oOuJC;!k+jDx4DAw&Qks zdc3k{mS)~VT7|IGLl6v9R8;M?IF}vK4DDh_B=UP%*%Mm{FZLIxJ_V)$w6nQdR4yVt zAR`N#hy#<^OvSpv9WHc-<86T2Im60J9L79X$qd#H?swOZ1EQ}@Ex0MkohpNRL;Vk8 zdr*8E{VnQ*>x};=z5aIy@dp6>4MP0&@gE_?-!8achrgp`ZY-Nr#{}_}*+>MI29a>0 z9fVPS+x$0P`LCO8mvg|^HrwIt)CyH@(uI?&fEDmnuCQ_K}cg%%RYCJb{BH`4vK;I(<^K!Uscv{+wu=}g| z?63AmIbqiC%}xNk82oB)AJ2meF)&C^<)xp!s`3t;rOczYo}DXG2r}8ycb2)>@TvI{ zoxG^|z~*`J7km2tZseCP(+EUoASbBwJ(4mpw*gqB?&;DV{)nG5KIDmNKuF zr*#cTVnm1$6X~_C_nc(i3ulZC&u7(c-1VItR}m=qmg4)^GAHHvEMWkSR0E!#3|W@G z9hG*-BYDP$^86f`;1jBDJ?*aZM9|!|m?p3x{##@b6B?d?xU&5oIr~q3dQlEYG(sZ6V+cD-uhEMQcqpCFliU92BVA3A# zrp_{_{4kT8-0Ag^c(mjcvokCJo4Wy=E@}XGin3tR+*b@vBMwQmqDq!?PNa@)F3De4 zp42*05Uz1dDDn}Og&Rpq1>K+{hDzBLr-kfR%V+Z$4We4eZE!evS}4DzOsfrQ-GA3< z@zSVNYY+}LR}*d;pwqQNTXInA+CiOC3Y#2~l=8JanakkDyquVl10EsE-0utw77v(L zD~-+BtkbIu*e7)Kno3x^>zC9+GT&MlJ<8Rs@=cabw&Z%w=y^Yso$M|Vx*S+DdmvU8 z(zHb(R^l77wDEjXNLs&x<@SpQs={9Mcl^dC3NthPA|fITXCnWG^E1%xM*-k#44ic& zN%U0$-Kqch*uO{Ze~SYD^9A>}aE>_y_&)G$8WwrUMz9+l4uW|rV>ub*q_pC4N6d<@ zWrAaPord%>fFXq1sD}umk4JgS#4ivn-Cx2N^Ig!&jp>cUAo5Z1YMMitKGC$1gUu%C zmea=b#NRauzya(06BfjXV{VEDZdC2r@$}_e^%nmDRoRG8zUp_t9vhM^?Ux%YP_(17 zEHK?yE6)y&x9;L2>`DX*w4q{#EQrm(jeg_)pMF!a`ORoefd_Z-B?lA&EYoT3BqX%d zt@bwsQ+(?hb!Zw-)soX%1FASadh}FqM`B4_qURuP)Tk6>8UA%XsGRYg>`SBY|kUyRrtk`vlo*pie3v8@$ z$`x>lVSUrv8RJ1e#}n;a`HHI>R#~2=IM(B%IJ-dFlIibDM=w+HYbX9S>CM^>w2HF6 z%?QJaR@Glt=(ZJW>s>wOKYXaJ-a~&QYe=L+#)kI6YPq}+)YX+DcbQVUD&gbfRD=SP zR@3UwEti{b%YTxW#=&N9U21`ZS&nz{>bw(oCVdf;)YdP0M&FkKPWBD9YhWH)4 ziYsDB>WxALz1`CLS@?ySLc7aI!{2qi`rnFpobQbBPaKvd^J2|PCkS$@4zk`f`8Q)0 z{juI|QyfQ{LXN@B5jf?IcO1IaA@8myV{V4~T1`LmzAB4a19=lhENp1wG}|`i zp(f^QK%<3<5@)+G3}TL&k9j8$GZyZ5s6F*xj2W?NRt5&B{_Y!Vn@8!Rhte_y=2m4f86=mkZGnM}*Uw+yvJT2mSuvQTEgT2d>md8j4 z#C=NMT*V2>hs3ew9$MG$>+9Fo={zwbovHrL4XiHU;}wg5!Srl(PR=|pv}Hy2?-5)v z_IX`o>dXp!M0-RnQR*`DPEoY$F6~+MWhb&{&~~=P;ud)630MLod+2+CqdTuh1NP;< zI=o+WO|ZzrfDIM39e#b^v4KMMWDN?U$NRxjC&j-EUFRpE3v1iTpRGMWpHsslH|Hpd zu3?NkOt7K8%7TWl2%}c=m1j8-=I6CF@zQOpircmmCTD#2W*eYxc}S`)hizr11RHuA z178bhFJ`e#yK>>%=-JGrPq@?P7TLgK(MerQA?a3eD|(wb;}EE5RpwJq@R;%L1MB%1 z93slex-5&cx~0h7r0wn5#*3l}FuIBI+aEBl9xCPbZtblQ(KLjCjtm*2+^xk4IlT;q z4#!*%G*i5A-dr|>y8z`3-HDi#A2t(l;q9y!QYlvM32#jkNqz82Jl8E?^xPvD+5)Go ztagQ|0(}_xfskScs1^4>@dj>xY}2oc*#_%6=P?J-??<>&a)irYoA<_R*_#u#&w)x- zPp^52!3pZw$vdTXa_s$n$B${2ed*}tAwGu*)_3ow*?A8f$rs&oN}N z&s^#|o_g16J=DWIJuqqG(Z5o#w7@y_O#Ebm+L=N}JWa0d6t7NvO%h!Gn`M#Tju;Ze zzu1CUir(l?U>9@avK7-H5dV<(N+g(BZxOU0Za)?Ft6o7=SvTV>`XKu|OE-`Hk^ClC zg-G)~ZwIc$<*ztTaJ{AYkw7fUxpb%cx%7o{*N;{>x_$~`P4BS}cZO;1M2%lPT-gRR zI5eO*1ST5&aoXEwTPp~uGuEmTRav67u0%RI;})r~wfETQNXnSHgju%+_j}uT_PEZu zi>YqZYC@scxdzuLq;AzXbJ9N1As1S-@viG8ANh3O%QY#`M0eg7rpF`)DkPtAa!((_*m}bs z(RGi)v+9f(gamffRukE8+Z^p(*hIpl=T=_Hn~5$S48ehl`$TFa9y z^jyXb&-JT0UBmRMg&eNK`&FBdFOSKRb|({155(Xy=6DFd?pA z<5Y5s$o)t8C1{`WfOWm_QQL?VxcI7)>}1K4+yTzBUN2TUdBY7-xfz3aJL7IIldlc( zrenydEq6GZ0oFosT*~Yl`+YpFHl9@XU8f*~0FAkodqK0gH>BRv_bw3?;~01MLPglS zu-6%UDspl-=qjy_M%aFF99CPKT3-J2)}2)wnd>+q1^a>zK{P3uw8AS8VmIGh`dMtB zd+~31B~bzsk0(OO9b#5&*V?&e2W~F>rS+jL?5i7>BUD$tk6mgT#K1A)o;R+Z8WJ~K zk^!VnZlHNv6fpaZ%>aJ?*9BY_A6tV-Zjx|{0gd|wpS=#*u!}(_p$D6t6Q0DuzpgJ(0V%>&y3aufCNiaf_h*BwRCp8 z==x9*_@J`OWXxs7c*K>aUv2;-=g$-%o1sW$#iYhczw~c5 zK%vC}U#(XNUc&X!XxFiO9@Nhs=32S#k!&kYN6O#Yb=Ampq?H_)e@rTa^%wn=L^3;* zqSt<}_aeG`Ks41Vt13STFEcZYlFY=p6`VZ7G*yu`sw&3UyBe+9ro@(QsYJr*#mYk_ zBR?`4D{TENx#}nXO_?EFr@p}?B@S-RcTGQ7806ZMv^)iK4Mj{d-bvCaY^>)d>h`-4 zs1?BgC#O9=mP&Y;3S-fld+(5&vN1J-#Wf zj?_rtjYx5iN58tM;-(S9oGK{xOjw=!kBPT8cmk-NtLxA zLvkgmJnI?Io+w_DFV9cK7Qoj`>z*h@V^S2HHvczp`a}sd7D+{hOun6Koaijt?M9hcWP5!4H;6@NVMCn3RAWd^@qKSJ44w@T}(_G5sjeB5*JG zLqfE9@3Sm-Ev><|TBDf9VTDLfjn5zBRe}hJJ-Zfg?{TMf42ICe>X2u8%bGvJYWcd0 zH@F|@^o@V4kRkf#Gr+e5$31oX^gbtdtQn~3_p>xCX~Vm3%!iDUo@3xj?*>!5C>sU` zZqa@Wj3J>k;+d+~=Juvd$H;OJ{v6;4P{^{~v|HF`C@JxepjM@Y#>CKm`C^xh&-9WU zDp_NDdmxHX8=WN3_&joT>h-IS1ej@GvMn)#$H{^}fC|3ra7C^*JYR9h!6=8?D>4P z+`De011od*(vX|NXM^CeJ?{zjZFRC_wPuVqWgI9DTVHbF@v`FFGZYIz4|Zi8#r|_C z0-LrZ306$h+z+4QLsJy7KNX5C)X!{%6oWXDrsRzlB*a=}p4}7J$Sts<*YO}65G)+3 z36}06Wz(4HJfAmf!Oys1{{cbVMl_Hj#B~sh+Lt#>S_rI7H#RkFH7`P&N}m)??A|WO zB`Hn+Y!F`19uDoxa(3JpoRMTo_34lfS>`lY`Uy~P;~O5fE_;8SUQnFWjfK!e8jEbO z8&He(4^QF5I7GqQ^GQ>;+}E+uiBt|z1IahK6TQgWnI|}q0*_KI$b9FFw`4KMe4u+` z`il&YnN2G#Ohw1hm&#vUS*YUU&p^^7%fRKp`>*oNJeyy*8Evp>{p?p{ie;)?W{PKO zitGL~j$~?5&tu=(0>HrW@iX!YC;}x_vd_;qM%bUED<{W{KD8%GIsbD;_T&xB2{2nW znVN1aeQZyR@~GOU&btwJO8zwARRF7&Xo5=f?YvlF+!oCm=liWwpp$(`rXF2cwo?DH z*BW+BbW_ol`MU1d=`Fm*pi@ybg>AY=_2FfqhRkCkK;H0qO==%s3l>q;#5;W%rM%E? z%l1k7%|sTSoprEvN)a;wABzDsX zE(2sIE9|iDWc47wFj=;68Jw-mVswbq3L#T=htznPK4AY^=$q`O`0z?EQP)#EE}}XTw*nkuA^mmv_2qSjI87`?(ygfN1gG#bB?P zdf`L1gbn*As^)q~wlRr4;Z5VPyhE;X7QEa@qO@2z43nsdHoAl$Q6dPUcca%Jdhfj( zbuebW^Z!5PKF@o<-*@l#-tT?yd+%p+=A1LL&pvyty~yhs{S^=>#2!TO z*E|})`HuvY_UAKyIbZp5Z0A#3ZC-K!&pGpeuOz_*V$<2?_sr4W2LXJBWaakeWmI;nf>j zmPEIlY3{#^{z}aGsHl-vyMK>M#j)TGU9w zDWbhc`@*H4l#WYuo*VTCXn#WX?*q*Le+sg{1NJX)O@bbSuKbzsuK=xtj}NpI0gwoZ z2>(b##6*84;=d-+KNHy>iQ?}G2NZ$_lmR?^75JwhAtL$vZ~yKK+%!OzmADBIIX)gh zO!!nFFz8~==eefp+X;l~1pm|V2RCP}S>t@;y!G1?l@hlf+{A(Ye!qSB#U(y2$>Fcu zamfvr(&17#{!Le0a>FGzTyn!DH~iUgyc7oirG){*l_784Fb6q)o$f4>2Q&jycmrum z=X$?OYt%t--0zFi3A+jV!)FB4wQt%6x*pvkFJXOaK;pG{o0|6LJ0m5ON-7)=v1(9j z?}D0m_$vE~&5N0N;r10s&*oUe+&r(3%7)j+U%DBB$}AR`+s1_$R0R%HZ-N7bWngaz z7+|53IMA#B4%9lzr{H;^f!P0SVVs9)H^6~N67p~$Sw|dbHvtD)Si^w=yY+CORB;^W zq#KTM@6u8l0>0$+btpXew~J=e$-sO)%0>QihwpuIKpwQsVo#m0f(Z1}GK#n_B6ctwGxaUj?(Mw|CfrgPTIQDUgVmYT#PatHSSRU9;NhIaCB(eP*a(&pk!#D zenmBgNc&2w({$E8CRpDo4=VP9Y8QRd|F|G!K?qEMYn&U#d5>x*6+sLLoCd0vfHocE-1XpvlrQ@ z>6H-Z4I!!)#tM9bO%wC=K^Cm&|@>Ua5uJX6QMGbg4g-) z_^}i2{6{%L`u z;cPFvd7Q(WNdP_0U>)YB!DE&C-d?7n08&CY2ybg^J)l~g2=~#Adv=-O5)YU3aLE_{ znexHMH^(E%mj*OlUs|IeK!b}k{e2kkz)A$8nvbvF_>La5>82nJwO7MbgPUx$#pCF?^##=7&tX3X(X><8 zu{>P9ZK!eis7SP)x3vrl7wD>13I{`GZ>)ta)-fsVZI=bLSlE{g2tM&&u`{$LK5f5S z$>|ooD%G$(y$)u!L5(&P(x7)Ae>il6KP;+`T$oWXqR2lgWqodKL=*f-nO*vbP+3y! z(EO9TmG8F+^YzFWqp63?<+}`oj6Bbt1WU$ z7{)o4ynopdm94)ynsmewmJ#|rr&iR;zO9#Ut`xL101V zNWrLfM1J?{u8o;QX+(fL&oE4DFRgjz4oYsI&)4gvZB3Q6vToEbvg@Jmzu@(PXb0`V z*JVixHz+gL{B8}Dk&SgyI6{2L2mGZm@0Tlg+iw;I)Y|55=+72=Hb-0?SW)A*xNc#* zesV%Q`JmiY=DzUZi?4_mGy#nTv&SFMGJEIqqfLP*r+0^kzsm|KrpD^xkMi!DcJ$m= zgthmY{hT~ub*XcX^4@r9Xusg)px!#1BiYFHBcC6VJFqiiBA~%*JC+N5?fphuH>XL* zv8JZ&%@}!kPjY&lMr6;t zeUKm4`s8DRjN168bBKk+1l4_gaiE4X+ZK7@y~IFmK3Sc7Lwm9}sx3|hRuu=tn-#yT zdu0~0pfADHWt%wAgkpPRl6gMuXwYkn#6@QGgP&^5v2<^^jfT~t9xxM4Zkt=Zo^gmv;8}v}kc~xnOs;?s#*9rJa`ZL)jZ_D&ZE#B<*kCSS3$m`YQ4?RWk ztEz8QMN_OPm{wr<+3pad(nF--oX>G{>dp*DM!g7XA!AC)u|?QyZC$r>~Q>>&DNZaW7}*h!kDU z7K$uo3zz3W4>rLm1!TpK>uKx7mp1sMmHTTtiZI=R3`h^xrqFZMdYOwf_)-PN0SEe- zUxAINDJ zX+MUO%S)iWXCDr`Kc$d9ZtXUj5}~Gg^D=Qay7c<$YE!%^>aeY5L6Z_{-hQsuQl0WG z-oBD2#Y_4jgH``yDNyX#HOLa>L_I{Uj{(Eqf0sDcOnRgH%GY1fO#=;8>@N7#1tbkJ zOurYS(M7H~=BJyIlC&k2(+E~~1mL5U-1>rxs^E29g) zkWn=BTDl^aqn=|zRu;2hIeW29=2E}qB{~b&%?YaaECP=r> zX2nH_XRv&?{cT&7&^TZLd%nYXEv9`iE)2h$K0K*8TJWX3w37R4ps6~a7py*L_Qm(t zlj<&g9F2p=We$3;uZLUj>{sl{xYURjq?v3NMh}ypGw6(K2XSRfsmh;-8dMklDv0|r zs^*ma>Ev$xt-&%dnHf5u+2Hu(H<{70|HjCZ)LZ>u*@Lya-Y9V%6yCi^hS@Jzu)%l+ zG3pDOeH9KmQ)Zd5r&`VNq567EFP=PnE8K848VvkCzbDZu6gEpqp2uuH5p*iglXjma^s0Q7F_yHcxuv1RE{(Xm zSYlw@!B_3ltZjeLn|L>ASkXClv_dU!(<%Z*sGu*Io0CQAUX`92sk=;@?N{z-@4CCG z`92j7IgI@I3Z{}d{gpSRdy|*(ypXM8LfUPGP!H{%n)p`BVPk zsUW9GRR+@sx4@s#aRh#kqW9AtTd=^C&}4ha=Gl^qw)_l31;^aAvzO9A^+gQ)?gC8& zzCG;cO*#!Xo0c5O-bF$d1ZGNbAkxu!o^1-r1a*Vv5KZh*EaLvcz@kG<)$Ypu#fq~< zw2M~9n~x#%Avyf2+QB(IzcQdl-YVG#vi*oGv7A@Bu!M_&->zLiZnM8@GF1R%(9_xs7q{qhlZ(C zI0{4yC76uq<(ogj@|oCuyfWQ2pq42RnS-?7_n76SPmJ(*5Ft8?&>5hwNwD zZ2qSU9L~=ht^@uGs6ywsfTjvr;+eG2fbm+Xz%-pTz3#%Qsb`gXUz+sM)Be z&|#_wmC6@UGi6J+W|Ua;xTecKT1AnF{kHcmPb@H0$hE6TI62nwe^FT$Zxj6B8i~Op z-F@N4!naruhGXwq1&V&bFQQ=mLymna;|N#Dz*fPhEdOyq?i90lvgaMzGu$(NuY`;d z^X56<(UkK_ZQWoBmAm%y(>l)(>JzQ0v%!JNSs$bn6keSbKr(2XKf0}|QD{}QHoL`? zshJdP0FEY+G?JE&311CvsPn35+Dz8T%%StmXgx=m7Np5yQ$Ev97FA2^;s5j(pA|-eW#XFRb?dNU@N|GA&;L;^x2$N*L?xMo;6FqCa z2u9l=Le4ZJ2Ox5OLf1`43rq!5qHzzPj3 zN?@Ien(6|!xxbs3^+M0eRQEGVrAA`a+^S6CwCii~vJ0(tCgE&Q#$7Wfa$|IeTC#yY z?}_mUgZDEwIMW(4*WldAQLGB?Vd+r4feQ`jdtU&ntqo%QB- z0*fn|GVAwk3(Q-PPhl+4YR=BzjB8WbM~qptlDKd0OD_G4ptiy|d~baq7s+Q`C2ppj z&lu6AyOL`+pqWHfcjyCgU(NXKmFoU+Fv~bbD?(Y?_}j$t=X^)~6MaA9Jdv$YltD1( z@AeO~hCj_15}`xtzaIVmDmis~dO12(q$dC2{44`hskQTND(y(=h1v6l#4QQoDKD?} zTE&(E=Qi)r9vrBYr%BGoxL=lyt$CVmP-_6j4;z(WWtu7fZT3X8-(n#j#&=1ct-Cc-Bq=dZE|UK>U4KG4mU9 zS)o}xe1f*tU8maKuKWiMWh<&3hYqWTp@wVokcWyw@n#3yw?}FVrmE$nOlr;%m3HZ& zr?Klzb&s1S+9w_BTV?#Pcx~Rr-WN)b%`Hr@jOA68!!LY1#yB{ZoQ_5O!Eb8k((MReaiH69w2k%a_(piZ9I;$K z^h$VQjG1uK;#^$BeGZU) z0$M%mZw=%hs(sT7&`vleD2Y#&B$1Jf_%QJX-$OqBbn{#YLydM`aJqrf9KdL02^Lw- z_joU=4&*zrEQgNVxZAZAYI0%q!`UAP@>#km=YVX~3<+sRmW;L*1|`&dGoie09>K!( z;;E;24pY62>o%n^ef(C6))HOk6*uU0rl`l zy`_FY@R>O@O=GBZ`$ZFlj!Jd#ao2`e0$G?BouPF^@MF%ece<1wkz7Rz);>m20|N%D zTdR8&^Ww=;(yO6IhNAp9(BjJgMK}EzO_*41OKM}@*6e#$K6K(SMSZz>CtJ!X zDTr`L(3kRD5W3pRM&(Ct&nkdQIXMo_bCugrcV1Swx*WXlL^Fma;wtTfc}1TC^TaIJ zGqhuY!}b@unLGW3Wn3?U2WGG&wo}(g0}bvfa}wY+Zd!cy_Wa;pNDgn5U#$F{nJ@#{ zJl(QRy-}9o?{iA<`3P^w%a$dz*u5~wk8?!+roFd>fBn^&wN&;}HU4BF>c+fDvHlcN zk*8ZVEG+@f&yQkn_3^PbZ{qcb-P}7;rapCK$Kw0HKnbU>cr@ps3) z;nCo~)Ytx{Ij5|OoQlRr&yY2tWAz&H zTAX~G0UhfB7CaD(eLrc+iw^_vH3Pj?4AmY_P`wz&2wk{H8LH1$Ftctvm82dYe|lfZ z+9Bp?Zo2Gthc0n;kv?a_?GW0UEqeLA*w%X5u!l~xYCqQ3)6+v7RSMGSEY%1J`f9rI zo$+O=Q3mWIhmd>@*kcGBBA&3vlvN-35@H21o3V>y36`tf=@ssU%n1duk`Fr8^J5$? zQtuku>{$F-(?fG1qnpB`g4ZC(s<0IebdRPhwAedW?+$(6BqO?bb^Rb#t-!>V%WXkl4 zQanxavFs-TM{SLV3lSoKQWr~cfM2s`TE%Tu|T`eA%oc1{D>XcC`G@EN{Oi(L-@ z;BPTtY>z-XSTObcz{ppkqltnSg4^0vV~+WKd~Sw*iD=cmz9B+sdVk8(sVkfqb$xS_`F;}^0gvx_#t6YW&B*|>bb^P zD_7C>Oz6T>5nk)NcwPP^4PyJ*Ta$CU?^7w!*^4AuduF=!q>kfm>O;vmnQEz_^Fq`DeAhWt#e?22YDSMJnw`*jovWR)bt@Jjq#DntO=Fz>69&pW`w?qx8gGu zoQfS|ocANZxc-(GMQaS|tGJz0c<5CotUd`olS$6Av;5prEN#g}ylCI~K;F5J6U+?^ zMhPY_Sd8nm*R!I;YQ6JQ-{ow=pGU4g@xL0}C8hP7l{P_zrICf*8P zC_LY-#;~k={=13k1_^c8Crgv@dx+Z-w5-O?*p9=mtz3Dsu5?TIvWawE0 zBKYHdrB{v;edOw&+hRx(U%Plc3i!NTXfYs9gm!ASAaS<{l4tz1eQP*h@OdRN9=2}2 zupz{k?M{e)9n|EX0U|#sbV#pzr2&sDi1|FSVXSy(`_*(z%~92oWhET{(av=r$xS{P zo@xW9cyGhK5%i7u3#GYfL?aGFHC1;}7f2dy(asiqM`y@B6oGmwGB#Rut$Bw=;pmgLLUa!lG-BjHRn@2n+tj@ z;f`z{X_@SKpUK+#XV{Yqcd!S9E|wz3+f5MdGc4%Xg*0Zf&9k=99a{;y&>T@~)pnYP z)-9yTA5+;}g=GzfX|yv^7jv`>p_P>l2{@J~vq``nq4B*I*jR*G3PI3xKv}UTP&xUC-XnXzlktp(?(r?xGTrN*d=s@- zM4my3j@_LIFshm#Jk9y4M|PKsVrI^_qx_CnR&);3S57l;1(D|6eEc?JMeMMZX2dsA zSH7@sXMcaIuzTXTt_G6*!0RgB=vg~YT~AMKe$UKk*?A3Qh&MeH)%P6cTw742NG21N zOQfx-dUU~~8KN*)#DVwr{!{FnD;m#qHqZ@z8agHPJ&@|O;*bVfV~7KNG^_Cz{gu7B z57$8(#D7RG)O!}(?K;rhs`YJC(=S9ge*=*_D>}{jH1qLAlm^L(+~%!N{=q<+7a1%F zc^8-0Px+U^cE@Ylexe6kQib+BTaweRrBP3p4Y$yp*+spp`b1MzZ8j-UerZuR{Ou?g%w`hI58W};w{~FTo@crlY&WJd@i*hM1Mi= zwoJ@?UBH2;bK^sCAng}VqzFB4i6~E&$9yR%?>~94X*^3KPs+xG4$wsP1&K`}NTGS^ z;SEXgwJ)p_@YANdsa}#_T?|MK3?{!vRMT%FyUU1TGmcm58W}g9&WD(EjqD2>u@YL& z(1~u7&3gKrtRTfOyKs@FdKL`pFh+O*f?z^VwpPptjM@Zj^{d<=Gckc8v{XN8ri}|C58hLL5okCjC%?{IOQ|ianfHmHK`3h=L-7t(E7F3yRM}@ zX#3ic67#(#!&b(xu2to4@B3;R#)ZN?Zof(85+sdtQ*f& zkU_))*sk$~J${hWnF~sCwAr4BCfYfDvq(L+@5g36_W_gk@XifNjyDaUN}AN?Zt3P@{o6*cA#1b!ZM#`Qjl~Wkmb~fj1&md_ z;+a`|y2E9H$!!|~9>w0=FFgi2f9fvYY)g#}Pp|M+$>!Rvn;5P^wxCb23{EWu6V`=L^mY^Cme7Wqv{NvjQ7jYSKcZa?sD0wsY5#p) zXMI8;j!LFPu2hx7utQ&WKe?OZ^IPGW`q13L@$qR{m#IePLXz9!ZsgBWvs8!(&JC1~ zmmSL1hgvIm8)y1U-FMT=CRMk;^}@IA(0td;4phCLv|L*#Cj;Rhzr7W!e425rj`sNu z<(dCDbhMTkW`LrZeVt-U@HFbq`|U;Fsc?2dyd|ghGH-f+d0KcA6N;$KTn0OIZj0eA zZsZyF>s$6s?woD^v?B2-Wp>)ss?Pg}Hc~{g#US6f#N)W8kpEj^SND!Y;}RMsM@plO zEJjrlV7nG$Nc1tBXRFdH%?6lchUq&R>m6Dj zOyz?T&6R}&47QC_=kIUo`-u$+ItdEQ%yt+qRL!tbKZ!JTbdi7|ADT}p*y@P2s`qrk z=>-lgY^Lf&%MxZqRK4`+m_zTOz4G(U4OTYucTP^(*c;D9(X%Q=m3xTbdY%nqr$8Rh z$!}y&VxV86Vo=>Dw#LH2nt;l3WS$|i^XRidmbZ7F0wDQDK&RrjQ2R+5ke+;3tq`XLceZZ#BFy>MusWz z^9-6~l~hYRGicBuOWiH~+~Uf-zn71i-JKjZN+n!C`k(EF$x6+Sy_U8AZVb(9&Mfsl zDt#pUW~&jA;!sTJydO@1aUBxAnizn+3K92{E=5P9I9o?q1xHFdSZ6h^4#@CBtfTIi zcsZ9`eI@f8Q~UXI6!F=e&|zC>t4hDNip<5wnSp5@Nap$N=-AGCuXBGaOzNL&mo1h! z96Ng2Bk!e$@^{HuqT2R5{w|O~28H(G*Vnu)4-EN8tyB6QJXlRrxKh*)@_;+bA1ZlXaGP`R zIIKM$Q#bP})kgN$>n6ALzMJrC82qo;o*70|;(M6N0+stW2kFeS<(|$GXi&OZ(0H*# z$`YUq@uben4)q?oIy!pcPnxyFHnTUk-wIJ=4bpg+{cC2S=(O-$0Lr9-N^DQN346JB zQPu>NZ#u@T2{g z5M9YicUkcy_+bSNYd+HqJ!HaxC?LPdQe<>0Vn$YODbLd6>b>P8U}hy=VpV`ye#5q> z@)q~+$Px*)3K(H1IHKCq^M5LGbh<|@-*9Ecfj$k>;vY1{CY`5;(j<*BRvnB6fNfLo zAEB9N@nM=!_ibq{=as1+;Fm6g8n#ML4!FlqQiK`{B+r!FX_v)ki?dsd+yLVil15+V zQ+U;=nqcsGiHT2wKj+7J33Jzt7o@>&QM-cS+)0& zpi7^o)??4>cv5_|`A!MwOO@V~a#z|<)|nY2YWrJCQ0(G;|L(L$ic0f3I>(VC53O39 z{3tF;7F$EwX19IQEc=sHu)og5J$LS>N%S2kf=7I3?;dXUvBCk>JD#nR*zTR!Vi8y` zR=amQr+^Zj)Jjo2V`3)7Rhnv|{LVDC1}xDNBc5=(z92n2B?xg9uwzYpF>kB%%zE1sY+k7FypLwt7RrO zMk}-*bLo#tXVr3W><>3Z&Dc}JHg0Z39X{9Gz)x3>U=C$rrnR<=dbmNnL-Ekw*4C1^ zyvSb$YLMy7{)Fwp7g7f&eL7jlqD5mL+El!rxg0x+?Eqvk+$cnU`q-)T6syeH-JVSM zYjqbkc9^lS+lWLPC$;+@Ic>wYkmx63ACH`bwr1_l4H8hu#58fRki5BsW%Xg<>-3n| z$3^ov5RD%>z#v_yc3@^B1B=zU4EDO&BVYu)#5cL`-1ZZ9$=2*j`?A#*x_7j9=d)`k zOywd~jPg2q52lT_UFc53N2l8nmR+=nFYJ)5McpakBPl~0%ptC$S@cHaW0tdcM}H~r+$qDc@4kMxU6kF=p9MSbN@{c=j(8E z=;rtQ`YMyt@cKNpQw=5T^*SM}_Y`~64#DU~qgLi=^7WfO&5e53#=V$ps z%TjFUM~BGMO<rxFz^J8SvuO{Pv4O zBu0U4FK8WpFtUg}v%ub}-rzl3c3Pp#9l+}Rn-$I2j7NTy|JU_=Cd-g@g(Svx%g74{ zYKQzoEjbNGBm=czp?PH<$N29MSuXofuttJnf=U6yRKg1*7nP;7Dl==^6zRc`hAq?e zM{w$Y8?M1WYJvCe5Btd($~zZf83e2f zyYs!_O6H~Nz8pUI|8NKejvE5hcdhf06Qd9YX^S0G0QBt;k1^^y4wN{H37`j5s)>(~ z7+}~&Gh>xX`_1_;sunFyfoa$s!f-%`zlM%t;oyC83ycD5caI-<3CLBZb(!lD6PGmc zpTHe$qqTO(ePAj_)C-`a%zJDOLE%8`tNU{0p{Ox;3r6+pvD@fg=>9D^%JVXzV?+da zjRVNc31oKgqCXI7$AtVkXRT52qW`Dg?6Cc)>t~&K!1{xQKmtnGdvZiI+<{ z_=9&Y>EMzMF6rP>FZ@lHUdn?@d2lHYF6F_cJh-$M{?W#|)C-q-;ZiSL>V-?aaH$tA z^}?k;@zT$F=~w>mA~E(ek<0^0_uX+g8Q^q8K2P z{fqCw|KSR!KFOB)CZUP@UlPfpWGi{H%6#NJ1S*4c)5M$y^>_|R$2#> z=1@(SX#a&0H*zsEVK1KKOZ(eKsp7wWWaeky&j=0XSfzf{MDHKwE6M7cjn9RG)#*JRg0V}(| z`~p@hg8o+>d`w@nzE!A#egKOmi(mHOC0>~q5*thqp1H443R&OqW(G#&sej@@Fa!emC6Z( z@!;#1YiZ51`^10kuDlwN2th7U{gpe$f3U8+zg$uL|HIY5`sUer80kaiUZ+Dm~CBCv`$&=xPSfOr%88NM%p z16e8!@#8>3%UH{cj1a6-BT(KNjX@d^BETI7(g2?VF)4lmyPp-FIRlb=_9BSwFNKF3 zN3xwFiLgpvw9MhardgvGk~q+k6SfI~)J zL#?%rNO7RrO}uFRku*sDC@Y&~Rg1Sw-bRX9QaE zh$X^tRCJzK-iTi$mzGnkhu#fXc>d5*!E3iAEmWQatv-LSpvkmBX8p>fJ6P>&^sCUy{@%Vz8?1UdY-CRRlUSfc}e z##`8p1EIa>FLaQYd|;103bws{p|foGDG&!^hY`F$kCM2+hc6xCK&SVBqB5G<4sYN< zF}greltZNa`#2Dm6{9b5$NmW_iwtsO>r{U$CcFA&8)T z&8*l&AsL@T%UCtc5=%Kx?hI?`fZbv9udyTkh=?^&qjNYQ>qP}%^ ze&!KsCu<1lj^q@q2hnI5B@xnSJ^0(f25jN;w;oE=p(v=-U{SLP8JA z3EgnK&y_T9Q#LTy)S+5HHVIN;AN9@f8?j)kr+_XDHl>x?BvjBPGO>EezJF02N#s}$ z6SBu5?&lD>1aU>b?Sky0SXr#rdv*CnvZad*-h`*gRg7o)`39pnh4M6LH?;CdwLNs= z47hh4jh%ANiWF?r^6lx)9-;_)wyv*-fvO=@FAe7mNp}ty#)3#HKF5X|B-$ZMg&Kvf zZ&Ix>0>NLC!%j1)6zEfL^h8I}TG^5o{DR`CaP9Fge4ld`v*OdvzMWAQ>v6Q@TleZU zYg`6n8MVvAHvgHYTukUP%xn=s3?*$6&}s9o`xap@+cd*h$VJ83rT3AYCubsa@i*h5 zI|5;G)eYaK8W~<1W;4Vdx-liz6|<*NvltcBsq4<3CyvKFn31@U$9+e4nd`XN$?^D6 zKBB4LQ7`9aS>1%mc8#1%C@L+2m4jlBr*-D`UU^oO8(B>r%cCav*-IntV7p|LzRsc8 zyv0x3549=_g@N8}Ul$^b&kd^)EuQx>PfqTesTPr`DH5~l+mJGR05QLeov9-=;#OIX zcje*dRZ+-;cz6xFcD?zvZvDB!H7v$+paY1UgqnsQ2cY@peT~&AO;HK)j=p3cmjg9d zZhm$*BDeIvw{z29X4KwwahG5M`V76{>(T&)-T@*C_af}em8fzKbmN!Dgv_&P8)a)~ z2^m!5ViG3gd}T0*M-0JqueE{hnN1R}C>Cjk>`k~ULB$FCL9)lQy^5?}R?|yS$C)sk4 z&wgK<$XV;zzrH!SVt-Uk7q+UOFp>Y~ThnV3gb?7XHUol3Yqho+T!$?tcB{0739?~4;FO+Dr>R~%*+ z?gNb!cMC_4=w_BxG@bYeRP5i6v<*mz&ZE!5au-kUR(x95H6WZz3k6fkbN*Oum|-s( zF&@H~^%w}atJ?C`TqHz~i<$bKk8ZsfI&qFI!iryn|t(J4AymKj697rGqyWM?u%rtgkH6f_EQe81yG^J&#;2_ z{&0`4m|V^O@ijBd!{apiv-H)*#^)jdk@3c>3l+@}p&Q+EHGS0UKOs^4qUF?6chng( zZ_kJEaoQO^bkW4~%Gi)h_0m3eS&7Uynikx4qb#&MPToq>`ORB+cW<<9 zH_%Ub1W;~Fd+lMObxJx?Bu*4fiHUZd`z7yg6M;Ei%f}z2ZL6Q_&B7S!21UUk`)f(x zimgA1L1dN_>;-6)Xrf|%_K-Ygk4pRaIWm6jxug$USZh1JMzW5>*|*Z2-Qt5CuhWq0 zbN8T9R=K(wNdxQIh1QO<(Z?6GtMF^}x1pK~5+NBQq7>%s&tiz#9UmAgGYDkI+}nzX zQJq`$_aZ|BW)ww%rQ?X69dDH}G!ml99RaP(1HIY@DZxgp*Fu z%16n;2qV4F@N&m|U$LR%+&jEB!7Dd+1eT0m#6Qk_|Cpbj+@E;PUy4Attf7DE-yIEU zWL@JAe?vac^bbgH2=P~_PZ)ye@=W!{75;Ac$w zJ;0QQ{|TSNo+bibn%GP^UZFfthWv0spKv_U^DCkJTkm**VJEedW25qf@Jy0HKi;mJ zL6hH=KeP+Bhm9I$pN=MLra!CMm^fO}r-O(mb7_?hvV3Rgt_;%}j!>f(%eH%2z^pzg!;w=i-3! z+R(1Wv-+gJDT&(vbKHOQ|Dp{^zNDJi$1KoIpnh7* zFVqc8Sh_mWHtlgM0Jhu}hC;%^Hv{k`T5BB090gyyRr`75PQPtYoXU)rC_s=X%{Lh4 zivlc%gSiZPy3n{!go*N+sY=H9!`NbBhsoQo+@=n$avibrVyP^I4Zg#Ges@eiV2++M ziltrLO{%R&R=SXj$;umrWT+V5p~57t)_{Z2A+$O-J}vO;dB%Q5N}LK@2O^norb+@l z0K_!^HxIlMr!7hpnfCAcJ8E-8pOT-V1b+0gb8V}2OHNovD?NCsyfJn@E9kdYI4+H0 zy9h@vEz7duK+fO`2{VITLbxEbH8@*ME`09Cs)oR6O&2&u=}02rV{W_)!=sGqAQc5K zenpF9OJHBNWu=Wq1)67ge?5qR7}g*rT&J6ITUn zfw_!q6~MGMX-qdmPQWMV{WR5QZT1s;Pk?O}`6p^0&pS0*Od8Bh&wTZn6QWSC70;%A z{AOUrUS!#dJMoHI0?1$8`&!63Hw=nyUJTQu!}d=UE7*e+QrSus#HN0JonCs9kg>Iw zLG5|}C;R6ht`($5yKH?LRV^0U<=4e$q^PRn_&8z2V#SZ_tx<#qqp6%Gq0U5S=~Vh6 z!ROOL&*fzx%I}AZY+36CUy)-tT4YWy)x<|mr@8zjDe!sx0k((mXNA&{F1caOqQ~H{ zg4ZmfR5SPt(R?n))WAksYn_2kYH6%6s0y|<$%x`U7`=;>R}*Y8`vJOX8hXKak%P3A z0z&8usa5P{G}`8)Um=g(TvDRcoNncIzBa@!xiF@YWT$Oxc%k$bz{uV+GatpGJ&wU2 zhhi1Nj|FR1C$gHk9)JeAVtXo?DcWv&R{T!t=Iq-WY|u(&eG|AS)#aeaBDXZ*$i16q z@94v{S_}RVY-xV;y~dl*95*Eg_2h4k=T+>;Z2HPf9i$21keC9#gv~Wka%U-wfh1Emp1Mhmn%ITWhBeT;U)rXw)Az7M|d73Q2sT{f2^;J|= z^;NfdYOvNmU@f?( z^(O{c0hC$%q*yKWt(M|iR$aQyyFu5ZJ8ifGSJ;`!KZ0#%$46U6kR=siSSpyoU@rpv zUd-gf!WBEgP>zby>l)P`ObJ;P7<O+*kz#vD|2qN-Z;&Jybd?bQ$wVxun~4}D-JixCJoB; za;i~8<{L0D=4OOt&8Ol>Yq)=9;!y4;Idu-rc~vLi9$7*79KWw1521%4HX%Fj`b@`pVE zyhp_*da)Yf0o&rE^3N;i*U7mb1^GwIz06f3+%WWdU{Rzrco2k%kRy!QZC;uriz#2 zo^NH*PFk%N5}IzXGF)WJ$}JQI%#fobf@ZAnwP3=F~(Z&Y`nVA^ilqAMX5Ea)*tL$OWr%*AfehbAPq_&t~Ri{3AKb!AALwZ*6}!M;5AujHJAw^E`V)6gbq zV@Q;WZ^kd|?{mG1X>8ZII1C623~X==kYwJ;dF=>z^3Z-^Q$z20t`dc04&Q;ffd>Dl ztRC)VSI;Q@oPZ4Cf$!_xH%KJAn9C!QVwb{TKm9!9pI>~((n2lf*jm#FH{j&+v1%1n zhECL!@ANrYSW3n(O+9`AP+Hs>$VR4D@)RIRu#I zo3i6h)Ps7(pA~ticDc31*u=}>q^T8UTNgW{(ysCh-`7^gUWqeJzy6$4U?$|`)vRXW zzH{cqAkU4avQEeTVg#gNJ@LA_bsjqL>EO=JSG7-BJ_D&by`i?z$EDF;p|o$9WSo1I zR*`}3hA=yvOli_lwD1Nmu=lL^i3#l$p$-E`9?0bNqQ%!)N|@NW zHUxgP3{DJf+6NOulQm2vWo5oAaM$K@>389iB~*9~h3Ji$*jc8Gy>gwa=iI}Awwo7r z38rDUQJ#UNDJ~OB@z}|lgq|HOiJhsKu&fB>wd~G=E9d*H3JAC9SAzUENQCS%m+~aL zt#|nxC9|^mee(4i)0Q`~f3{lQD+xQ+xFZ-EC*c&|4pl*xIVO#|ZugB3j5c_HNlL@O z!AZ+I$oaJ-r{XCG@_bjibu!s&bdu$#ui8ls*jY(#7CINE;XpQN0f|Tgc?N}f0ezH^ zJ5zF%JJU#Dk-yY8<-zXvKM8UHgY>Ei55CwTp`SrYS6m6{z*h;$DjAiXquJ~w#ITFx zaHNHz*!p)B1#^DuZ_^$}eqVtuXM*>;f9P@kt>^i#&cK-B2E^?P>45J>pFDZxV=2pG<{7PWM}cHt*>aKPa|;GIja*0vU6$Y-;PH?nN9f|Vct1f6KeMai#;jfW|Ha;WMm6<*>!PSwD1y?F5|yS@ z6#;=j6r~GDZ$d;m2uKGBiK2plfP#Q@rT2*R8tDQ8QUjqTNGFg`LLkMverJ!p$AAC# z{cy$|=fnMQ24g@5WRbPrGT%9$=b3X_X!fs4tB&_)82<9GRR5TZ`UD||xZ)H3GH6}} zc%IW3fFZex-q)oFQ*BY?If^n_srPtp7;}(@7alhMRGOz7?kF-dAR>8M^Va9ChDT=v zpPcAc9`m*2NFFY{hUBH?(nWLLfKGvZh{F690u{VkjVXOOr{QywM~W|Bf4J8W(xW`& zc~l12fJpchkSSs!iz3(+A$Dah6fS>LwxT1iA+u0Un>*1jdbf@ihcCkHCY&e1#+6p* zEmm+=PwSl{kJ3Heem&3kV79GsXnVXpi6|}K- z*URyAi$1!9!%yV9-$eKCj?pq-dSDF(9F#>7M&@KJaM^nxAoV5e&^v=s$A+~>c3AeA z*Q9cS3z7%vNEERhf9Q8M%*%dpBWSc(`P#_r(Mu^dwvO@>M|#=XC^|Z)mL5wnr=BK& z4^uv@waNVs+$=uulD*E-L+pFjW;fTilhgMhqCnRrE^<N&jW2$^!_ikz5qg-8r6b-aX4PUO{@h>$LeP(Zmo2 zw!-A_j?c$Zgd$Qtu)s<^t2|TNSZL{qlTs{p1btlW&(mGi{Y`!BXodP;-$YJEM2lZWY z48vFIL(kVPXZ>IabPRea-+rq^);^lN3uHCJ0lo zvk;~heYvPHn^-Gk}rR2NE9{YZt>$6c8+s2e`42ut)3rw?he=7AOV8Vi{uB10dBM#nXV^YlHU5 z6dw3=Iy>GRn^7jCH7O&|bIhf4T$goGeBLb+Z(F9Z{JG0EN=q#FDOU<>`8)s)oCYYx zBr_ih|LtMg8IR9}g-O?2*;nO(1xih569CBd)+l;{9v$sT&LtxN+I%MxAn4^B{xal^ zqG~atC=K_w+(#^wCjj+oAVcR3iKJi?ew5i7I6}uLrp&t?*5HFQRVYHNqoY*6R{( zI)Gi!JqQ=X5z+MDh+4TV z8nJSe{RDY(Q=gFfZok1NU^DRh_mn#l^|bDB$=#Wj_ra3=!8dy|85SA9VFCCAmZ=hsL^sh^c=t7vCN zG~p8ekkx+~p83QAMbF`KYYS{`Y~hZZ+&XCCv*`iLQ91n?L2ECIILp`L$F zdGO)WKURQ zDTYH1jW>}dr#RR=ATxQS9$G$~lRlO$MF4d!TpG?>QwgxKwY4tD`c%f>@U1@GDBCmm z!}iDNM#uMyzOO8|vW_%=RrnvD%0*?dzgi%e0!|(~p*KDd8FM^C9=24Rw3z1|^OnI^ zd*XVU?$W>R;17J#$V8(>uxP2re%zwftnkrLn*X|N%yIU5P zU-RpN;n>?VZYPwJod-@ICHFAb=l^pS05``w6H;bDhEl*r!l|zLk6W{LqGLo4c}+Ef z`cvGm*7$am^TS5@^LzeWyI~Xl3F)vW^_VuFi~0~NHv=LSjU9mSI_lR$vI=Jfo*lnQ zYX9I*&ov=Bjz4QhPA5!B0GWvsa23z{*t0z+yg;$PI1CCrQr0dfm|JnRROvFm{5UJ> zkbjy%Thk|5kGgG^Zh2aCc;fgve?-Kuy9`cAzfVA;P_>p+?)#>r2b&8g;e#JBU_}G5 z*5MdA2i}v`pB6LDSsl6D7KwJTv{uGs1T{bSkNKWekvd|XKs+196j?Xf4xnP}#o{%+ ze%LIiWriNcFAQj?#o$_tfYv}<^`tL=@zgq|*Ms6}a@SX_HhraZ9)U9Yo8F23Tn;(? z{vEp*`{ng5e()MSw-|Yrmi3px4RHcKiekqRS6?|L%E=m4d#D5sot}6*^{D4MgXoRR z5%DM~O71 zCaX*COqJ#6>P7)Kb(nhgpG^jX)sd^d*ioMnX@je;z2D-xbyG^$tdGd4GEn{@x zvv1zw_?O{z^Dtc%>HwdI*2?uZ%f+EM)tPO`-`%KOquNDT*K9D)6vu2jc0d__LZ^TR zE(_T&(96p{dOS8CuX}!_CBuh~+e&ckFG1%O@P6B#G<_2S{B3Y(@beYjqK51q-b7uY zXzA@g)f|4heaNTCWy{4?d^P3K8m2cwU3k4sr6HK6e{4R#~t1J!UUs zICg<0fF_ByZ6UfOaK9x6)@9B=FmV*{LddZOdpSh*Jb!_9%-u|vd$+$2*nT-M(tP|I(H|21HeAYIDBzITJGgTfK z&u(=y&E0BF_|fa7%T%+0baK&W@MJn1C|VuWGAdL5JpZ!{3ULF<=%V1eeo`1n6G+ zNmObl>U@pz1JlO>R-zdu0jhN+*%H@AS)F9T4FucOCsLz82SxVqJW|YWg$1%zL^t-E zGxXfiRN^|U@%sR8`Y92=t%nc&dC$Hyt*kVbR1lKf)6G#uoHdd2`sP&1W+7Chbn?QuWFgXy&!6ldR@A>v1jGI&_?5u=+lN| z=1R8Q*kwDfqIrP97@trQ8^j#bdR;aB&_KlOomGyCsy**-R6D*@?^#rnzH(>9>Y{#4 z`THMl%uTuz7*2XBb>?sZ_%+HnNr!-n4rsL?-T38&_s&bvD2w}5S|pt(BHbu_0JW$X z82*WG-6HW$y8HY2OW{2aRGXyOyM!eOt8cd0N^s#O!@AY1oWreIm)}>dea(DnMLLya zEzCoXUW5qLd?agkT?#@^$YR$947mM{TGf9U3X&{S(T9fS^kr@8P3lLg zLy&)eOZ42uUahoEYYIk zRr6?gS@gQ-h~dVm8$4qb?R}#7vyI4~%tv${+zb8{Zd|cu&(NS6$pE62?sm*3iG6Xz zBje!Nr?;_QQSXj4oBXO7ww*(fg(1fG&Cqx-FWgJi=;nhsR@3Ay&6Y%ID(OgBND>qQ zIStk(%H!4(e2fuMeoztJnOunq?-73Zl^ytw&4NPq6>TI$vYKD7J4Cq6biaMYd?KkcD zM!i#i5NV0mbI;c-_(4y*0*;n63|q=GmqbNNj?gGAw(YH z>3XshV>Qqd5E`(YWU`VJH(udXGt%t;tX5qyx3#(*Rh`;13|i~MftblA+na`|;!@F0 zkrQIZRAXAM&ih#UXoP%^=P#H(7I85|$-04X7*n$uCudT3S>MYqQi);r+_1Sr|F`~c ze>kM3zNRg1TBHcaS@rwQT^C1}8m-n??7Sh`}Z&+@p*^%%)-7}0H(CuV?`;K`*4mHU|)YX)Ur(R|@uOL*OjIonAS!NI>4xpp><-I|%Z zfAtsVS&yT3rrr7v8r$vtAD%yFPqW~InO`FMeN_<6FdQOzYPA}VM0Z$r_Gz>T+Uk~- z*{PZCJ7{{WrHdnR2KHn3etU_vjms5SdEv2kqq-si4DkGHhCf6*P?u2@{5shd7us@V zi6x+J+#!ZP&GDnNM$(tSE9lqn@ptH#;Wc1$Zfl2Gw5mZt__G@Cx!QABsU~)gFGrlZ z&F;aamOEcNM>M$`hr;hs_z`{mArLwaaqQiMoM--j6EEBf0az}PCg{Ni zQxqcU>cbqcY$B^pOo&6~6XHiZ1MYy*q09+AuM&xX$vA~-qsAKy~SAogh0`5g4m~O5hN7u`v^EHzI#r0R!zYLP;sec*7EXM;ZtLfFQpnYCb0F6#P z%2ql?CLw-{L8!c-UG>L*86stA>5w%53+pc|0I)GLL?q&+2?3z1X${!dKo$BVe_!_N}x`Ago%q zum`OW$%&MsLqXFvExFm!utBC4Z0pxhyr9rG86k;#hnu8Wi_4h5~ zp`)TUIk{%EcOZw*y-=-9V_G;e8%G^UCXk=*Oy|+x>`+TRpwwij@d$tbl~4E5P6K%M zk7c?GXrt{PefTFJo_>t@dF1~;htC=-H;r6eeJM5}OFjR!E+MqcJ@wMVLna^`S#W8T zw9}Y1kUZ7M^|~G$*nXMP%AI9@Wl*xaqf*?@C@(V|UebKY7&}|~X+qD+!#8odSW{tb zv~gFilYOcl%ibw>VJ4(haa>_eukTG=k!hktB-5v5XPw-b8+I>B{MwB!pFAP_YRPCi zVKKN1!f7AU*&}?3{(ItpT?)?&@@WMY%_WT-Q6(n6F1Nh5{5?j>3Oi=z7C7)|0ir_x zqI#IkhJmV&mrTJyy`TH@d2=K711l_cYbWeZA+q+Mt!|W-kP5-ZqFfS=IZ?YMC5T;f z(0?Dmw!GXjn#&MmO}2=BR&Gy%X4!{YhR(WLd}`SmR#?Ry)dV z#BU&?Pd3m8z-6Gb&%do>^8+C+#J|k71HJ6e!akD`00Z_?|2LjH;UEJ@qxsU5Ll#8+ zf=s|N=&!B=vZ4kcDQ7w7d>b=Wo~(h1g{h99z8jWa;mujMR8n`HK0HDYiP8Wr5ceOr zz>6#{peK5_MVDD%6{la)T-{8#(u=JZQ_o}s6%@4DPxDnI>WZPA3$dD97H5EO|0Q$! zA7s;i{_X!1=>(Aho~9DI`z&3F7XJc4{|H6zMY+5CBZAMzWqK)ecSUm<7Y~5rHA-J? z=>9;i?a`T+0kp3#4>7e0?EQ6a8vF!Rke}SV_#Cn3G>X`-l-$T8q#5}i3zGp1fU%91 z0aGT>hwqc)@&<4@90SaCd1bVw-Eo%J12Q$w4K9i581nj6-{AN2e+7V8k6~wHNq*$j zN|?gfc0udtQ!BZevgjY2YRQY_!vV=BQF6r+fU01L`1%5B-?!je0DB(5{E&N-TkoOfTTh1g30T+hICB_Sw-GQ zl`Loadd7$d>cs`R-fBGHKt7K8%b>S}T)XOXa0}6dNal7KuQ}IkMHkvUE(9#3@5Z4G z@Gjp)=kdA9OXD6X0tH9)_s`S%douqrFl*K_&^KN|`v(ywAi~3(BY6}P0=@l@`?)tU zN!E>A-J1dYLVne! zbb$#$_9cTu#fQGLNDNofwAl;CB@|g4*##xvg?Q|+P0#8sPm{l6HapRbndi~W;`Myn zHY?{oAzJSrmPC$13?RTlx`@IGkp%!OJu0Z0v5^R`5zA6-jsKMW?6okO!#0xwheje8 z)cJ;q8B0d5+xRug9nrN`SDLQrIDLPg<~{4!WSjq9J(+}}a{J(vsA8y^OncV4)IelJ zO_;6tz5dvN5mMvlu+RB#jlLS$96-sU5*@G0VRwnHJ!%>pUR2em>FU{AXU0N{h4yX< zH|Iv&D59lNNFv)~4QfvCKjfvJY!0&SqJef#w#0{ZI*5S(l7jv6wqW;3!nu>$eA2!j z7*4j`2JSwK-n+0bzX2jkwEksSGT#Q(osERuTLMkz;CLg!Qbn^KO?6-MZ(F1U6+5N7 zgS%9X&q_aMKZ#0%k{x$2sq|>q7!<);3Fu5~ZTiM$yiu_Z0!Lw25mkpY*5fET(++|> z5taZbNHD#Nsd&!PjTdqqt@f+N&7(4-8|xC^8){j&;;MAyuSOmf`3t|SCe;TW)bjNi#3#9%Bmf$CdK)geS8vCW=%;Yk7aKqLX>oCD#AU^TCP<{?|u6z{jDS6%A*}5;0khQ!Su$A8m_uQNL_p10+{TKQ<0#^C)>7yzde_Ea3 zH6i|VrX-5!8l5|#^SC~;vKDZmu8Cs0CksNkXA_rbPs=&A8AHHtt%$!BH>kI1H4x`! zVgL64YM;yuORT!W{c$X`2=oct|688qnymqfHX4FPn~ zxrGj__%NQm-c#KOIPhI}@vzz89w66JtvaUv(}F^pNlev|T<059=v9_>zbyVlKMrTvMCjmhMT&|=-vh_a!BrrjFns8wMw>zIc zq>7+eSP{f!(wyheUxvzgyQ1X34Dm+HY;S}DDB50~aM(!`w_0o_vK4q0 zX@h$&-X}$$RJK3VP6U5;sei;%YGAf!fvgK?_t=g1JF4D-oAWmty3YT%soaKx1HhdT^ONiWZM zuA#^d!VPDlhNSF;wKtzOL#wU9}lDbu33`k^9SV1@dO$ptB2f%>5QFPd4f} zM#r@n=ZC0veh4o!N6S_0Z(j3{JLENy_AQZk$#SC)e4-EFH^|Z`>Ki&I0?mE_v1Y+d zfNU{iUeM7%(|5z7`ab$d0}G08FD~mx6cVB2t-8v$665{5K_3V4Y~ulD75G>eAEprX z8GZ7ryn!XP7dmFpjbPsTpm)wslww1?3g0@u2Q}CReBM|=0d9*=&J?DX6eyilekFGDJiGQ75bO5T{NBf4<>|`e%rZehV*)g zh5h^fT#NPmPA}})P&m#l+HLe-`m7t zQwOih&((Loe$YEX7d25niFywCHwqOx1nndSz(TQA0LKZEu>7`**euGhGcmH>` zbTaqwcV0Cla~L@9cGby1b}Uaj_^{szd&=QgMMNVrNYu<256cuMQt9k zE88zA)yEj*y7y#z^k%SFTC`|VqV9$+a$SmK4xFieQ~{tfe7KYixfijy=UQdkU@bvQ!saSV_tcJSj(B^n9jfxAJaZQ ze4|DDQ)$P1`ot^h53)Q_)rPD92qmGMxA~p*(ml6Bq$79}m3I5hgsR7{W?65`E9G{19*q)$qYu~_$<6nP*-71`*CjR{ zLDP4H12fBR!$?OTEDwl;olYM5Em}s4+~N-Z1KBH+)Qd&#P5Fog+X|ICMS{oM9&}AW z3MgSg_F5Xg0E3{lnIf|1{= zx+|*L55K%+A4lVTw)QAm2q$1O-L91r>I*niu_z;AOu(tbm$x3=FO)0pc#KI{h7Pq> z)OO>4Sp@ll$A&h!jI|1~=7zY<*mOk~PmGV~DLxb5haeD(A5hGYsx~1qa*6FL+e`un zR6MDunhoAvbbho6v^@ZRD=^PKyoaPS1M#0k>Qch-QZC@aR*s_bc3whRz1q&h_y~wj zsldC(Y{`tg27DcTFO~+IJ~+}}JO(E$R3Pp^2qp_hV3Z&NEd*>jgP>0(&4X2aHWE^l z3-Cc6M9pE(J**de#TLTmmSRxmv2&JVkkzl3!&Zy>CN$fqb(;I<(Mu#c0V6?YDxj#_ z!Nk$Ea&It#^ZZ1u*uvp32Wh4HVBfjcd@qUVmG~9+dwUU|_IN#|E-c(QZ8U%)Xb}Nn zBB~2m8;>gF|1xMBK%49(f!zeP`pbZ1LDIO!OASigF2+<*bDkp_>j%-ep!kVLc z5bsI1JeQy;*xPZ}hSe%sTU*OVZEHsPMC1M>>WX-Msq4PW##cW;j)P7XZGiv}Kb9~| z|Hr}=$Xg`mkN*BFRZU!_Gs{!>$)`vmQJts@a4v1N$X6A!ZE1mpMz>R?yn>3yw1@`s z;J40kkGX@MGCOeyoWjXXr`og<4)NTC0n2Y6aVa34wf=547EPI1dPeOfsu&p%wuy~l(X_a95Iyz2rc#IphlT6D53nyJ$gK!M9?5w^ zCO|)0CJ@Ru*NNQ>{VHaE&DAnx{gsfCZ5PqWkM(5?-5Jb_^}64ye&crwcLCUUe)BlF zFwJ{UaVo{O&w|U{mBTJ@*-PumDcHeTyfijep22q6!74wPC?yvj47< zO8dzh_b38%7GMT017cRx!AuwAm@Nkiq7UVK=f&$U4kw`4VCH31wTXS2sy@*$!9P4u zvPUFhNVQ?GvuaPi_UWwQEnnypM7s;*4~URTZw>*Dh8F8n0eeCtid$q3upa_>EoPE)RY==u*XYcerRjVK!n`Wha#XbON|Ao3y-qg2wMH4(q-*iQ znMWnyBzxfeXDlY3UrC^tb>_EUx!~D!rodIjzxIuWYQdfL55J#>GYM@1BU_~h!Med9 z&seigu5m7$Tw6Lq52FYVK}UcOc>XWLl&a-lhEZk7ofpARYxChPaLiwZYPla-_p!X) z?C*o&7i*Gw^mAty;%o;qAId(7woq1#eh_M|$v5*l{vLAW3<_y9BS$x|#ii0>Ef!q8z`;wTg5X|7ks`}{h^ zCAF$v_~zcRGd;GbMCk^hpP{-+G}|Ni_BIqv_#aZpD9 z<-WGqW0|RB^8ID@8B?4DsSu7PsiMBG7A`2Fg+hmu-Us!Bm+cF;$&+^opdK_`wTg@; zJjLAsn>umgEfVj%vpbTivbKJwG01mOMcLe`_wwuJg`b`#hIPW_jp6x%reP8R6BG%@ z_Tgf4-@JDP!alAF;wgCRA|HjY9B1{xxD`ilwyphM7TBV3kWH+~L zt24tB50&v*+4cyU`G&d}8uBbB`h{Q{;G(}#@9n$}&v9)V9}P@6e0BqCHLGJR=%m;k z)c!+ZqYiMCZtuK?R7<>Xx$b_WxjE_Xr0fC9x8?_jx2zk)?)9B3EiF|$V;MTW+2?+k70_G}y1Ccap2UJFUsDj=M~ zqT|~55_m&4jp(zDZP~ajV4;>HW`BKE`d2nb4utPFeG*d*IpZh|K2*i(KrT zvi`?)26s@kq{a*jo0FA>}NceUrgHy za~61cic!?z_BP?_TV<4tH7an4$MttUhO{K zi7Ssp^5m2Vc&aMT(T4>F8Vf#sxh((f+?VW^;Wsfb*5BoHLHgvpDmffmL2yG%+rO|$ zl`iewO41&1%8?rW$dza~sVq_PVtA{BFxInhzGj$vhKx3(4<(sQw2gUy$*y}>Fj`xk zB5zl#pHNJ}GK7JiV;{T&4iz2T}n?Hy6 zwkFKGmfZVG17_7G!etx1yi*r^~-<y6>{pJ5Q))+v8$hReq4+Y=V`BYlupJ>ozlIQ(Z{xT&QjtZ^0aV@+|zmzL(bc&%8x?)i%BDeF&>7$@haCx!=l z*sm_&uK+*^h@s{F;&2R#)obQKmV(^XjY*8w^&933$(D|B#HhG&Soyi1L%Qo!TLNO~ z7qrIb9bKBLR$EsWbH^dWC}lUqpe@&8ex6CjUg4A8s~;aCosApM{%T|ulIVnDPA#Fj zWX>9shYK(vj-B=bpC8LqfX_wn`8dvw0sEmzFd63d3{_3sk+R= zB4ugIo^LE-fVFt27goh+c}DBUiqpb3)(d=~bX0u{zdPjtRb>*+8)9iiFjH!JTbsPx zR^UZ?&}bVy$?Nt(|8jf0Mnwegm5?FbV{P0*Cn)xXM6ZBcJe1}3jM2r08e8{$N~4M5 zyLazdO@D>_u+U)s|S#|x^;Mc#0!IV@##V_uIF4pc?qd@9AiJMC#~Pq59RW^trhYm!Wx|0e-E=&D!?ez z*naj*8T~hi4J<~8#UVNtxL_((b{w9AkMH|(SKo=FMpuwBIeZ&M17Od8>@BG81Ooeh z6h0Y*;50QHj?5iBUp9Y4Al{64FtZ$*9X$U;zFK%cr>4FWz^v|~;llQ0Z9@u7g{S;b z<#i~b^G}EUY-HE>D{v8SPKo%;_j}lCv#L8b_7hQ}7xL3ah zS}yZR0uzY%|JMWx@jJuRz_+=|BzO&Vi*A==;5!lg zgw4{kc;+gC;F14u*?q^CEgRf{y3XH$P%{Lzf6*Rfj|Flfeo79|TX8^klV+$jvYiv% zudaLvP*jFQH6fJ$G6(?7aZLz-A@;<%{$W(6a7kA9c z*md8+gV76@EPLgEScCO1gA;KPkTL8>_1gT3@VZwavt5y2m$-3v43bUE<%yqHcpczv zxOIzgOWi-dWa%cq4++jJ(_iyRK)05!en-z@+S>X!NB>9QjS$!X<-U+;p4P2&gZyRD z)~Oy!^G3xKMPjk26BV286Gs=Bqgs5gP3okpB+g^8TZVsLr25m#8c4!_0)J`s(ASA1uegzsl9R`F#@OVg_~^RYa6{2huh zUiAYZYZ)Hfa|w0yg}$3pFh$wm*v?6Vl0IcU`Q$&N-~DQ8-6fpiD?olFW_amL6mh{e zT);fcPNXxv>o~yXF0+?wM2{n9^u5Q`M$3t|s$)SHBTvgkBHVD^4sSVnahe$!L2wxI~2@!d-h9$n&VOM>YpZ_(WtA$9ES1vPrV znBv9^wH!ZWm7QhixxRqO=r~5isRNP#LR;n6EuZa5$ELV=#t%}{Eg%0}&^j<_5!~yB zZBooVfyl4En}uKfI$5Oa(1hHvx)rqRZgOAR7i=6?X>C9IESam)c5wHn1{a5)H+&pb z!}}U+it~x2OOlPtWedWw{1QzcS}T4r32u-^AC!y7b8Pg8vvOtM5p`*J&c|x*=KQ?# zQI@jlTerT3JG;ZvqWN=81FjQCdG(;r4%fy9U1N~UA4^m?VoXg|B1&gsEZ6Gwohb6f zv`T~`M}UgFLwERv|N9~M0Uu1ma*%NWuW(&NwL&|*@(_beP|Fw9M^CTY_0$MlHxU1~v; z4q=hnYAZRsgY?@8CRNrruiO5opU&GBPBUlT;A3tB0R$(EkPyG0#ey~JXaJ+mgt+#F z6ghoCR$>VJ5#14Y&Mae+DbZ_FS9KA z7)o+HH}cDuFVo3mq%yJKw5&_km{IDMp9~58>o|$DOztC|!p`-+tzkdqG3GWyqh;?Pvs31TFMCYi}F{ znOfX&y);s`(`J#Md$coHFgg>i$P?IQR>dFhqi2;Yd8=SsHURhMlgvOxMk@*Jza^tc zmx~9T1z-6BV%d55$zDG>D}Rqe>q>^FO#f9CL6u0|d&=kAL092uP8hs? zj4J;H=Jsu5+iRb&>40=MZlpXe;CANt`Nu1mx$@`LcQ^Ts-!9cfLZ^_poj3c+fn_OV zuZs0=IdwDZzxiORSAthe8P{(u>%QCOV1PSd8s#uKOT#Z=UW-T;>NT=gU?`9X19B5j zwM1E3yLx&C)kphF=2v#xrCT`NwH3H0BoXzTn{d>9&A79#+t7=mr>hd&R{DK3cr9yx zY_uT(bpK+~M2RgYx5T+?6qSmef{MJ{gD5#$n_`rtgX%Gcpv?jcxGya&DCLp5uK}MeL z#(%2i{i^!6J8vXl)7~+knb!LToGYA`NSGRVkLZYX$@0$+0d;X&0VN27>)deJS41ku zu1f!;+wLnrbb6H;FZIV?Fo9Q_Yfmu6I1bRFCm%^nH+$qaSCCw%oEu4Ob==O=v(A#f zWHAB%&KC21-|+02ysfe*g5{R}BG$``WOM-roy33NyC52+7jrKm6_;AyP+4 zJqbrbqkwl{44_^s2%``{S#nhmbwGk1KLAQxA%7;IBmSjQ68qor|Ej3?ucfyB&%VC? zFEyXhS?ou6+gtycrOmMZPa52Rl_7Hs38UY;fB=Y}eZ`9O_H8%dX7m43esgvi5F`xH z4Uzy(qiWa)ctELR>M61k;K_smSCDSGgN`k^5xW1dN6-iq2VIrQJO&dYlxfpvCS_U- zT#I$RNM)W6Z8g1u1==Vj4)3uA_u^*aLj(ok_h0~&9}!BQQaRg&VD@b4eF-0SDBOGz zI5)m9qjLY~`8{8OpA?gWth&;qw~X08($@<_^UCBC$LX16Ml53ki__p=f~vgVnBxa7H9R7}~Ce)Z3Hmu?bsJLPnY~qmxiP zi@@+L=LTJ7d`&}IDl^jngW4vEM^i?ic*E=!d9-va<&{tZ7K18bCyN_V(-z*KHV*-g zeDr9Os81Jaq_`tS0MN959zetUCMYu$7OE@}-Hw{B?rdWqCjd|88!oJwCk_>M638ePv91JhS=CNJ#BP*wI1>mSpyOxF^~YnHQ`bS+dN3sIMv8=PWr?i z!p-WVuXkx!XneLcWX>Pj2YN%*EhZ~<+bG5EN6sL=ZtyfEmkEzy^s2QGXN;jU&WOu@ z8R~c=_T{2bHLG1v<`9>V!ZELdG}$W`J~&35mpe*}e=x8yoO*e!eCv~jP@h|F%_Ucc4y?z4whDov5)7b^L>LHag;)_m;u5KHeaCI1n{1pd^ z0IVNaji}u*W0;4fbT{h!!HP#G=cV(Cd`QyR5xDr1RcDo(_J*oY_Q8T;v(HS>g+exd zLEX~Fg*AU!V>ZlR_lkN=IK^mMYHjtON2n|Sr7RXx|1#7_bkh8=bU|y0c&eIcyq9Zr z`ZXV1tg?AyeOlfTvB%-o^&a?Rd}X?+#j=Z^;i2{hG!nS71oS<=0&8{9>TfY#qh{GR zCmXAn;qUug?9``tH<2(`M%tu1r^xAu-Ltr}_ymfv2c%Z1`y%dhh1e)nHUI9j05x?N zi}X7zJfVdcy`#N4mbYSNl3!%%{h@@St@9L{kZQ{)-6$;SYoT;}nb>^kh_$Uo{`=z;WUIHHbJd;r<+ZTE%a+m<^LRIb%U`d1=^9GeI(C0vEr9ILv|Gz)=G8vk!#oyF|8PLws_|6-+o>u+&f#Wz%8~T9>rad zMm2}sA2%Y}F~w8~ffTf=QR*+M=-8N9ed*rxE$u*{xeA74pe}u^`Uah%5Yj%5zt%hfu1F?uE7%u&6|p0)`ko^UM3pve+YH| zkvwrl!^vq3^;B*~iIh*vhbvo?Rl7Ld@YpQ%p;eQ~Kd%jTv%Hp!WW{t(VePg>-?NkM z3kqI@UZDPCf8FTgboFtE1l!+N2;QH6foEvmD+-cds54$)-M#sVhxe~u2lEPP@APhU zQ{+NUkscH7lWjYMo#_R|Dn{j=N|HP)^Z-U#Gbe3Uhj%fTbsp)8Wu;*x`na7;nqTAq z3!Y$&IAWrT1JWO1z_9!-mm(%~3X4pFg>VK^-EBe*is^;1VLrvyhF>_jyUSyxeO|X? zHrUwDpj;t~K+L_%NjCV)@ECj#P<2~bu3B!{Ey}S_AJGb6R{(S0lcJBv-Ha`>nfo!z zQBi1@9N=9h>V2wb=~E!9^2wS)JWGjvD5+mI=ZdOs+ynBS$d>iP-@HxN=K7iIr#MBG|6+-G&5E0bd&WhKAPCmAu zGkBZ0VrJ6LV9vtf^r7JMhsUM+05`t)8Q9*uo#B2ik-7IRRwMeh&zq;m<}^Kq=m+8u?ot5p#f=Vnzy3$!U0n}vUj z#zY(@oms9Lq<2GD<>1orG5Bc$^+KOac1}rWg@^2GDWU9?4(wqL)|#K^GNSeBt3GZD zkeBt}6a5bmQ)tp10)QA}Jxj!z>|A?n{`gi_l@?1e=R1A3wJCd2e$CMO9lMl62E$b` z2>BWa@Os|m01$@j3PHPrCPDGEp<}+a@Sxo5Yk%E{zF(i_FKSF|@NJg(R=#|DB}VTj z{RteP&VN#{C<@CIVr?I}u}b<))(j<9le~!u5jF4Zm#p;7$M=hKS9E;>NSyiCRCqE) z9>~v80+H#1;*m*42`j5r65IFHameD9@l5WzdkFlOkSQ6^}Y}_^b7eAiJ{P$(h8}z=pDE?jH9LcH(0mi z;h{=lD$MTo=K5?(C@y3K!!-B(v6tH)$L2q(X(;gnfajDVH4^Z@Qqa)z2dZD<&2$~a zTDQDUU_Z4>5njB18SXn;(J<&?Sd!y_Xnz3lMxKVb5C&2039l_pHdOeV|BxUMKa+M6|v?)&5W#(f`V?QDV<%F{fkBbIGXN=sfJ!@7&}Sv>Y}f^r~GCX6(5V z@A>u<4sUKpLFRfT7-&+9P_hIs-gC)IOG-cC95S*3n%ahz#nwmOq>ua7)H70~4R&H^ zL$nX5r^r+A{^N(3Pr;y>`P&nhFKpQSimu|2j#Qldxa%18{B*R~pC>FAPUglNM|@{d zJW%f++mZ=rX7Re!4Q}?l;~2puh>sUz&da&WfLzt5Gxrjb=obJv-wW5S&2vFkA)AJ0 zf=(siWp0RaP|pA6V=wXQ9Y^o`L$e?RGw5kIb#Yp9pjwz&wv^`%i$~C<_{?3QLS2_n zHo7O5M0bE3o>rEtNiT2R%*S8-Qv=DyvoDD^f2Fb#joMkl7pCXM(E%0qTEqRZ4uUwd zUVrJx>FSxjL%u^FvP4lw4%ud*R_^WBrl_GYnCvjlc2VoD-JOzp#(*o8^B3-IJsqeEo{9W{oQWk}qZPK@fR71Fl3x>6+ZQm$XNq_}*VQ91>0Whv zJ`O&NuLl~MV$5empD7z>Iyw1U8N2J7>TUtnj1drXe4N2q(jsbxX+jF{HF6nUGu)Bh zB8%ELwOqEOavG7Ike4YCavnfCbPu0LJek?#;kjU~G_!>SPhUx~@Qu}7k*c||{EaPS zSmC%Ci5U+6>Y->c(Bv7bi;MF+V>gnlWpqmPu)w&|>!d2^|6%Vvw5k=`GBGQEjN|Po6(u7c?2clHzO+adtB2pv06M8Swdr7EL6KWtK-uYkqu6@_q z=iIZ;Id|XR{czU@2@uVkzH`p^ecmyiF-E%<{1xgaoR})w1}0oWkvEA(W(z+z32=h7|~JV)Gx&qyW^E+o9gL zNX$w7^@SIJKk>qQ#I%?Y@U|x~SC4vRn+KsTfLH2Zzyn=ct^^di8xCjpfn_EJMvwo7 z+gkb_j1~<#6Fpulw#KBHs`$byjg7V|qkE5UC-{y1{MObRX!~Yvo%K>o&rpy#;B`C$ zkR41!7L1l>v$^%?dj$P&gUJM7viL3Lt$#eJqb|H14Qvpy5`uNRSjjT)jKu4V2PM>= z%>t7kFJAr!?A3WnrFsPblpVpTOxrLVwl^;W5#t8ozaMj^8~5(Nc768xuP$ROB%(5g z%&SWYHx!z#nvaJ+uN*68D|<0m8E5^tnO-Bl#v)Vk7OG0@0;^o0c{sdMlE_d=R7IM3 ztslGAm5mN;^|*MVi=U{3%)fOtOk)cu>nwuM_f#_CO%15ZHSF%`ORE+r4C8?<;^oWd zDgJzTjak!iIa3R?$g`(Fm9IXr#$0aPPuX8u9~_N-*jr&0{~)$F5d?67z?0)kM`t#L zH$S$hubjAdGq$h5!P6s(KTfMg)7L6CTWvP_H=ocAizm!|OvKJ{xCMM>Q3bJm0>4(7 z!ol};w5LHEJX@WlOms7g!7h`oAOVMt6P<9b7i}(Hs@iWa8lSta2?mJ5j9W+M9$Mz@ zwkEV#OyobIHjYSDoq?Ao@01c=l7`{jkKh+J4{Qg%D$#Ui7xz%4@_KC-kCHotp1xAg zlxo>FmXaz2VU1!=DT(}l_4(ce7W?;S=Pc(*aBJ`d|2~Cl$%@!fiM--Xf0NX~Y8k5a z#`yal1z+#p3XmUI?PM#BH%bW;-$xN|^c}n4`v{8DFu52s9RP4wBc3E1Ivf!kGZ69j zCik-H5=fQx5 z9W(@B!9F&4FUMR0N;BJ6^2CG%V1akAOE}f}!v7dnb8jWFte=8OeSHM=ZASRfLO{ST z63|#z3g3T00>P&!wg8!f7VHFgc7eq)0fk*W53Gcr^TwkT;Y+!ovdJ!J%z_NoqY|6Y z2A-YpF-vf9@Dw->jzS~~4>cM%&bQy4Hgo+N^9naKrF1NMp6gHJKYS({9wO_6UH}8d4@^A0B7%zbJ!0$MWkYH<6xXa_*6qs;+ z?_;)8i{KnfyikI9Hy0e5T9ugRP^Y&WTG*VdFB8>JmHb@2SM$=Q?o;SnUBzQ@;vCEa zcoNNsK3LEvVRZINm-^Wn&$`E}bFi8M4I5q**2c#mfMv(5}MiLW`)dZs8cID~`p z8k_2dA6(F!2Iy>ynF$}o(1jqO3hWNY$H#SzQSBoGj;mdqI@*!j{q1CAy8emhW@9hI zAy2Xv0h?)Iv46x%00`w@PT~JAi~${DBP*Ohj%oetvrFZ)z8ZNU+ftFiN4Mid#3O_w zuFNnnD~GR1MD;RXBfo4JpvboE;1vEty;DPHo>4GgxzqSzAl?CcpoBL5Qa8Kar+Q6G=y~pC?bNbY{40O?kJtiV z$o@xvnEpx09y^4BZq&-lN?gJnm3Jr(CH&r#J~X^sw$dTOkkqNuOMJPRiq%R-6E(Gb)~!wC9f zcP-vlcK)Q|f+wYr<1b5w%atFlkO%N3>g2?Qd%rG48(w<1YJ-LBsNgwezG>r@gn+ka}YI-s(lg53WylX&p3|*1O=J*oQj(AZUa3w`B(+!8?uh z#>QWac8eCJ*(^VMB~JNW_^rFqV*=YCn1$m}?I2E~pD#gm7RHCQD&NB`jR^9V-pYFJQrVet%%4?*_+%zE+QbcXZsUsgIX*q= zmi!d=(>ih5GF*O27NFX4mX@VkkU=nE!c+edk}yltqhAe@4h48$?b!2z1?o4Y1w7Y& zbA~8A?4}R!*y~gjju!f+-eY>Dhj()?1OX!6!Krrm|&8T%wjw=R|xu|mfKqg_sAc3F7gGbk_L33bi}VdB&0!+M23WLZs*@oa?0 zctm^VgGT%doPMi3xc#_T@FNBy0z54haozPPp23%gM_1lcvNnsZFp$Q~S!XdSs3k{o zjq-^S{WAh)@2^%fsvY?+!;z-DYV^Qwyfo5vKnI*Ho~?zLy~W^BuJF_}zQoAwL= zbab}m53L`Ydp4X8n@c&|CCg6l)KrAyY5FW6To273d1)Re;Y-n=F#RSf{QP%iD9vGM zd;d?W@6`u`Q@JXt5Bg|#S);nhVqJ5!L0yPbS;QsyfQ@FAp)!Wljm9f@hMy@()iWL)S6RZB~^xjmltQKySORw zv)Nugc15?bPE|AR;|(Op9!>D>n77=(Vl%pXIgxy#4m{(wj12c@-c$hnlA{;N8FS}L z^$pHsd`NY2*E#5UTe2U3`VVk-8fezIWpB+0qhn&=+`z_=cLFAoHCq`}Hg897vKchH zOZT2D9xA-f@iI|XRqCjOt^?Trd+x9-ad~P@5UtT$rlQK@)S3?8`xp>n^J;g$TYJO_ z=vidUI;-85gH?=Q5q~f6C}u=P_yFDsuYl|@eK(pRs#gATJVa1zK6D&S+PsuJ7`8vm z_9ZE|%#g5YZl= zh%;WAC@%7Ps8x~Nd8RkV+@zu-vaaGa)q80~DE|RVx9CYS?~mv6Y&O0fgv4Vn;}MpJ4}4x$Q5_)bZY#1J7pE)TfRvU%KH=m(9q@mO0)iyRq{8>uo%i8h*_S^dN!>jjK9m>ZTkeK>{FfEaBY6OAxXR8I^C>`wfPTR8Hd71IsXv zM@oEyEmAl1-Xwhrr5GBD!QVFD{)nLq);B{ZSC!xB4Alp`&)7G-zi?`T2cQ|g0@IxtS3PP&|T*+ zd-&PA2IOxyOiVL02*i%iO28n3w~0?@9;dw1TKsbD;<8ODi>2t|{;2ej#!AC=0zYZc zoK{Z9av4I7XVe-IZ+z4}k^GbGSsa_CZFH|%fEH-~nf1>x zx8xW)yr63P5Eqf}JRJ(N#;zNrJ)N)^ub9%^y{zKGSw_DV#xxc}o*qtuDhL7lA&w?V zMF%^aX2)4bqk|Rc9dp^|kZJxF^M~CZc|UdEh~>OcXa)a;rK0 zdk0fb)zqhn+q;vnw`zkrH9R3Neg(#vQQ8n_{Co5zFRpGUS(M`53RjeqT4>jF&;EdeZuHHhPZ`q2-&|}qv~t{MEchMSgZW&`~NE_@Bgc@;O5Go zRzUcP7e+c-R-EN;z=ghqNuhngl9TAo&-?)#7kr)p!U6=?=m|*?;JmlYk-tdLAz~!F zycE=jM;^$L!Vl^2ityirzo}-9-@{v}|ByizPbqbWa$wf~!;lTzbJTC=JS_LPfcQmsY4uL?3YR(5Lm6!#q7tRgI|@$23D^ah@WoY&pN zbRXK*s;%CxH;MY6Jyek&T4nCkIst1y3HdKDCD;KKT9^D$LDFIJMQdwYyC;JK^NXeJ zSW}79WO+%xj*ncAh2WnI`t{C1Fm=4?5+XtYq`-k|%B-$xY3R1nwai>Fv4rW;>NNV^ zM}OorXiY^D86#9vcMwl;;{&;+;JcnaZ%oxF8^T=3z$eYf-?R+47k3S>dc7ObX5xu& zOw+9jBji^Qe%BJ`J4j4#UAxBGS6yD@Nvt~LU>1sf2W}&)>fi638Y%mKg|}71H?!cY zjDN`L0dJL1C%_H%DL_mBTRl&Mad`W4G~(X9#IGo5WQQ7zY6#Cx|$oS0yUbvH|8Q2x5F5@SuQj)HMjwN7TL# zoS1S7|4$ANK#13sLf{*~sqp0kcQ_^S$zMCruTWwp0+28In-81+0PGzW&we0Ifm7by z1fyqBM>^nr;8fX@5I?~G^4Ov3o?u>^5f49?5VyOYeeLg%q}Tx8pm~ z>CpE^O`Ls~$fJYYf8=A5X;kj%w3{A3M{M0dF>07G;qK;S-<8p-wg)n<&(5H;)(i4A^bF_Jxd%u@#OC~FpGPD>bf~Kab zm>27RBlV`I2kbo$_{(-Bz2y(vbjU-lD?G#)WWIIn{ITD`&zgr4+FP52_8xkVBd}`{%+Uv7gX|R;_v=nlpW^#cO5`SXg6S|n_69% zgsU7ZcR)Kn(IN+K)u8C3Y{P_Sgja^=-}v3tDG|5zBs%-&j%Gjh+?ZGz3PHV&mB6nM zeuD|#hN!X)5O!oiYB{&0G=|99VphK-3<44Fv$QoYUIdrJD;kiM@DB4!B%3%?AAr4B z%~tnA>m%3Gai8QwbMRX&{;BR;|;a^s9L9I0@}2&OpRbOl(>><{MK@3|xJ8W_a(a2>VxZGzBs&P)?N zVtC3smjgF5Rf5_kiji~=AgQrMMEND-iHc!&{Rj4%*UCiN93K>V4{$oSU8mKw{=`RK z^CsPNrQZ0tFFUw&H_9C#V>t0$%gjj7ow9T$&-=VMH>8tGcOIdOZi zUVs(TUq>Uh>oHKO-*uns+LV2nk$e%*73L`G4{EvOJ^P(;DOZM|_ATe>C|l7G1xozm zRLZcusOa46K99AzCwu&u0bY%eTVL}n>-*Z6I_J_w#dk-DO85gg2!P^KB&n_FM#o?r zDD?GNc>=kqcz+wK5y$eCiCTbftWU3zx?<$!f(4$n(!f&r*)?)Un=%U%!L7lBUU zojq#O5U%Bipaoa^f=E@ZSKvjuOv)bI(2bTg|jMoxmD)VorU9y!^?WpFSQ`O>E=SY$OUKDc`Z}S+dx9!t^JOYs`n+e#=^m-c2FLj#H z&G%#FwM|0!34#-dt+{@PKU`#oQHAP;GrVGk-Y`L+F5vOot#I4mO`<^MS%|Q zi^Oy2N0|OX*&Iq@B{pp8aE`@2VWBF@pT&e}`&pc~Z6lMSb{l>@I#N;Hk;V=l&o0?~ zJgQv=8dhS0-o1N2|2W#^#!qEN2H)+B8+0cF-Y0Fv8VAS0=UEM4q`mEkv8{|_)Off5 zKwf)rqoM&SHdZ#<&tVZ|ivkfp0aFlRgv6Bh^&9bO z;VwMRVoH}gg{rqtu!WsKI->%}!R#KKF3Py0uC|K#>_L6_@WePL4N~r7yyVRVl^2=U z4MMsvCgyLU6f3>CUl44_7MUA(io|Gwcz`nB+||I1;0e?x)o92WsSa>Fu`^|fJcM%s zQ{nc}A`6j%;CB?{z|?@keo>@tH@HVJrE%vpOfK+LZ;x9E$AYo0%tb_zF2n7VEO|9A z0&N`TP>L>Ra?ZBZgjm@X-H2+@9&-4;$sTXAb18^L((?jW4chbeT;?{V&snk=n4v)n%>lpY04G-%{Wg=^;PMJM zjHx5{SmlK`+;1FXcgWl=GfxX;#24Fl$Xo9$$xAN1RC=to`pIwzlP3G6S9^!IkS%dK zP`30mY^#ekORVhlCCNLRs?cGs^FQ%lJnnTL6?$>a8S(i*O%{7nQ>PPm= zEAQUTT<9*TZ%>sBs(?>}bm5ogHzjBEPCjMTs#gj!FFaZ94Q>^Rze>(RNtg|_-*~Y! zwgo@l!`o!}J7O4@)go_}BkJq(2z)y`kp_ic;gB!|VQih3urdDnjZ{uEgJf&E@OL|w zij-$Ro1S#O6n#y_r0r7-zsdg<#`P3Bb(k$EbSChL{Gya}DwgwX@-l()uUfKW@#xeB zn9)~|O2bINRTK_T8rY9^AaY_9Wp_;#L4e3#g(MVa?FgMlvqQdg&+T>(U3#q#|FDD) z!}c!hPPTw9qRQW(-p8ZiXwhnfuL{+Rl!n&(SAK^N)aTzLIS_k|v)6{EMLV0=u%hjz z{&dWXs=4QNW3x{VR-X?hlvoN-I$Te;iKZHKF)tLyhEy+!IJC=b;gesUXWn#FF=`s- z`-!ikcHZBeGmU5XWLSMyV>QNPa&E$P4q|g@GS)BEn^z@?3j?{+eL2AyU6m;&C2#Lg zu_(s*NdNAzGSkO@tW5q6%>Dc8f0eo5zpMrR&t$LvLMwrPjp=_orr!NnS9|} zNWJ0T>y$(Q#8I-JE`rj*yKm9qL8OrJazpt6z<}dFMveb71phyR=l}KI zzsBl+=N|a;>mME#Ve$^p{>56L^o5_XPn^H@$|gNF03Qzlx9h7Kn40c%+M$l}Dbh0XE;%}tB&eR@k*^RK$N{n$?0*@(+lhMg3VhW(k! z;Qmx-S7JBMeBvf*M+Xcgtg-n+g{%OJ#-^& zUpJnx%#32(ED&Bb$J-&#k^pcH{)en~18|W7O2>RCfO-K+uFOILFAEXz7y67K-9i9J z_AdjH`U?PaRw&47()$m8Ip&`}iPQ~TEdc=J24n7EQRkQ8D~aG!^8fURG24;m`~Jej zw*UVf?@jr6ZpC^mjnb3M8@m=YMlmZrpabMvY+gI$idVO{kI05355q(`-92Z@7ZvAA zvWa|juUf0fudAE6jGk@#N<-LJQWBLlDZ?UjjnVfEW2RVa{aar5ZktGIu_p8oZ_ z{|QU&e~lM_Q4asFtPM#bP;omO!`=6Lw;pDUcyAD#X%(J}xq21*2+c8QWyRonGEa zXPK_4B3tG=8Su+M53rzfE1PmHv1?q^+BFT;cVst8XH{>LS*yB`UE03uvAe|J=ioE3 z<{L*7lkTV>PIST}F(Rf-mp5kAv7J%URqW9}`OK>{via^9v0MnDdS0t1WSZ~rF5}5N>L>xd)Vn=;`vvw?*+RiX z$x4>z>0NS3n0c=8I7M4gk2+q8Xo2zWta;!nb z(-E2@htuSAD{trq{78b+gFe0fVFGf>q#=lE;y!tyIwEqFzGg@_XaY`>H`9mK?`B$_ zqAX{oIwc(s9?r^djRWW`@>FDz1GOz`2ly9@3WJXIC*Bu=S&6#8MDL*kb>NIzw~RG5 z~B4S7P)W*|S z=T*s;rR5AN?+6w)#&mBC=fBFa(~WRVDAUxRo#vm<2xjW{lI-LL0j--M@8m4d4uPJz z_AV)o-)$FeZ5BOW<*iQo4SR68u@*zS<6Gu^ zjH+_fad$JU$ZgDJ08$x}x>uc2*H;Re;ny~=$5;68R-M#4d~bvpWHlL#qZ~ogi%R3r zN2Z$XZpxl{bD9p$f!YnP%UU})oeb5aUaKg!AT{ttxLI^^r{`EU-n#(sT~zy6(l#>$ z{b>T-$E~ZRm9JJmTjrc_X&?Sv%geWzKO2Ugo1|03IH2=bg?$IbG)*gk=+2`};cf*X zm?oG;_M8D+>>8mr;#?f%Ckw+fFWS zX^lH(HB^4NV~Xwn_Bru2n>L+LGrbqj^;bf%uFep>Uzq%4Og^JQ5g}&WW0}kCuP)AspBEZK*gHkus`1}4P~>ooR|W}luJeAd#pmM6 z(OoTH-fzWx@=5sJQ&7=kuRG%qwL_+Ry03G*By-g1)*1sz`yu5N2FAzG66iJ+!^z^}-uDc=O7``Kz00SB9DKue-fE^9akE*z?%{i?DD+B0Y{t9 z{7}fz7MJUH!Q&t5{?xTxH+#D%`7Dh~?&?JU1}2){lcfm%)>&A%^(rwnZzfYVg}g{Vw!ec3bLrWQ$n=b2;qdjj8wM>3Qg{no z-x6gg6zH`S!DnDV>_%Qr3yw-U*u^Ltt!L(jts$CmTr*i(oCSqVJYPAZYIzc&Wer!B zX+$w}0f%7Djc*fOW}`btRLqK%I;Z73cmj}zj1@l>T%;a$i%aGtE z>W$}{KA1jlKmQ%(j6CDzp}IJidN#C zg6TKGYGN<^<6ncm87*n_}$3YQ!^4ULWDM=_PQCDiEZ_84lS9<~@2(ID`!$L=9( zPPE>%c*Dj8uUwN@E>GH^04knXMdSsX!H#5+8Gi7j8FX2W7aS$dC6X#jC>2exz7(#J zI3G#R%Cs;TZb)BWH}Z02{$q~_q+|mbFULW6w&{ZJokX8>#$1IgWfs7HA9 zyc!v;ACNJ<*Y|o|zxFVL374_Bqq9s$)pvQa100;%6;femt%lHzUfzjoH(6f4NR__ z{_S3b5zv4qfdbDszZ-(TdvKEaP)0@&d1=I7*uhK)zxsnHjcshqNu>4d8z8{r9dkX{&{>82Q>T}R0lRK| z9eW}l@%byXFw4L{rNTkT<^`_g%Z&r*(ioV8Z=rlhso z;(!0L8Kl^QnTV3#VJ}8V^m9sH?#1R1M~y4Gk0b(a9F%eOe{3V6Yp7JU{n)j(X)@E4 znJ|d!{`Wy-cp&nNM9Hq!#4T2rG&S#+-AfQn8osxaX?&V;;1<<4x)}Pm16$5)AipSx zkbarn#QWlSHSl(5IE6ZsM=vLgrh@YahLWhxQvLzd^RchE1F}<_OF@ZC`sP zXT9c**cf9kx6xq5UupP^*cw+X=!q_&m*i?ilgWyu_%XICj3WBFn{thWDzSYyFf>fsVNlsSi|Th zge~{dUv8TTu-l($MAv(GJQ=8WZ>){_9F?n)fY44n(qqt)y?<2@83E||$IWgHlLS#M zbRRRXBxC3zi*`CDexU4)WkuxK^a6T>qjG=#u6Mh2$KAA}=edf=#>esoyn4}Zui zFL$-0*tS4d6!^SeEjJ_R->Wr^-b<8g#c-rSlf_yD^(X<;$`!GnRyNOPonUU~d6qy# z;En*=GeGVRe0h=E;l5u97kek9JVzp^E-Wv-i|4V%gU$seP3gxk%U3*qmiQSc!=#sn zX4X4A;0zVuU~^Ctrx{FcTuysIKf*_p@p)N8&E4y77TJXgnXSzeS8k?wUcN@-*Yz}O zV(H7@{Iv(ij~rzcN1*QWZ_pxlA8mO%?UwLKseS!O-M#I{^fF;vSOp~xw?|&v00JP} zFc~ZzF6w?3XhgtSb=YAcT){TGdVHxNk++tb@vFyGG9g0j$KKSVeUmEIIY?RlJ;Pj& zk@{2NlJg;6nf|MYxo=?Xp4QW%kV9e%>1U#85S^yXr~Hidfgo@rUM4yR^YZ{pL>#c(_BV-&W@H~m49 zi=0@yO0jIs!or{_5{e;K$-wFGp-MQj2pbHT##}MqE*Tf*SW=v>e&dr@H*XD3fJ^|D z6i@gxvKu~KH+md#o(R**@^ZsKLkFq@>~aSwhtZs z&8+|rQXvi0)rW2$s2>-moG8~w%t7vkE4-$COwp(vo+mBO!6GDQ+=(h5Y6X3gne~e4 zV800Y(O01*_i*UlTPiZ{*C9`@Ig`z)Y?D3wAPjGVQ<6BmluSe0GueKjM=Pm)LE9TE zA?i;=s+5@at52(3yhFQCMvT@s?pNS zl8$rny`G@P)T4qsZt}a#ipn>1S^pt0^nZ)3_%CB9=su7n>%r~wriLp#l{(uBL)S@f z8!{X7me&ych<4eC@0Uc*D;qX_x$;**6EZk^!ZtO8j@rXKj;PrAe*GE&XpyW)JC&9| zQ84YpcThk}ZXagx+eQB2%7##B_~Gqjn^QLD^yO=Nl+yM5x}Y;%1qnO{;T`@VI0v3VY!_c(?t=50Y z*hqY)FbLKHmr=Qp@5@!h3#l5TX$bSaV>9deSgUzixCc23oi6M`Txo#Ylx%{jiPz+q zMXaj=uv`+85QnUm88H4>zzdSL$1UkHHVL$_(2Ap?7B4D@5TD?<6R>IRnS6ofV@@Nt zjj5+B_a}CZn4i9{{47)BM77KrXzm1)!=C`U9ugDodo>0cTsIj4m6OdY9!#+lZ=5T) zPo_<{e(gbH#oZ1gdajR6f5@bE%YF;?j`$s}9&JLqYP>OaGGBZ=wO*Kz=8d-teML=0 zfeb7VFH8fw86*Xn|no!5Po1G^x;fJ`5b zKQ@J<ye|c4wk#x z=2&@rVZ>->*ec8>Y&}yji5@o?bzQ)Tsmy20BGGU>`j*a!*5Z0qhKOa}CxJwaYrl$8 zX!{*aIx49`BexeQSzT1aTFznFY(8jv5+K6{a8Od?(c-zxD}#;oum-v%Bztayzu2!w zjou>Em35mQ&I?q}zRW#;7E@6*oNg(__S^jPjQd_E6gayIAk^j43|;Hqb+4>ZgSzo| zUmqAQsFHWM_;PMNa#M8_tBa!|C%+`&)&+JaUcnFJ1TfM01%~)_bXES6?On&^q$_(P zX#Lgs@XzK7GZzv$(;55<3oqzjk$E^@4d_nVKo7gv-io=~txXw;+OWSIAH(Dzgdo~Z zPL0=b;ASM57o)*>g#y2qu|9S!!8dElcyquZ&&04rB%MNb_N5Z%wU9?bMZNVeUo%j? zI*yqgef+E80YT<>sz6A^mX?5aV7te6VNF(snFTSNX}x2#_)F7%O-@>Mf&8MOk7ks( zk~o4bOarG~{MA0`mB@Khk%+r+gmhMUdXKOD#;uHBDyxZ_G)1l#ct1D*XX~_g6^{V; z#mn1=$Sc2D&gM6_CKAM}G8{$bTKPkkSne>CU%vc705D5=)%&P8u+X}A^)$0YSZp2B zP@h`7l&y~PeR}E+H8!us{N=*{r6b3K(d)rPeor)n-UN`r)%j=K{gp#;hZ|_5P8k&= zuMA(YA`j07OfJ?MVsvpV9b8Yug_3pc5`?Osm#Oz_AdTT&1k6d zO!8+R;i;+4V88P%Rah!#6Y$q+ ztPXl4qH0=OHJ;;bIr?t3Azo^Rf+NMkz*^T?%xh417j(_IF)ySd&B*e8gM`Fz#XE`5 z!t4P#9=zXW_0T^XYVt+Pxpa@_fP~+;33LIY{_)ZIOc`Y1vz;Bs*}TNrY-ue(SN`;W z&U9w96ly2O@>Om9?wM%<15J;spqoTqd+?!ij?o*h4S+159e>9 zUIMY!r@K53@zeu|dOU|Vlqupzl}`9+)z0a|H#J&N4D_R%G-h77xo!bTQ&M02!+}G> zKt%AZV6C-oSmWmcsP$ZPa8Jo@&449(J)rUr*$unQM-%TOd>f%Erd5*hGe~x=RAWGIDax7g0`2jL(lQ@Sfy4lZKXpaX?31aoV~0*_!=vT}^eeZnXMq`q_r1qNfx+I$d{#+rYL^8hl?1h(S)4pzOs0Nj78` zFE`vug_(O7h5R8~AiMD9ss{QC#eQ%04$lLMJc1L9%a7j7F6mWLZee%u>yd?De3@=^ ziQKwI7n7R#sS(O@E-D6DR;`F&kgYHRDVeq*$9#x3K# zfK{}uWlH$%XU`t6l3)Fbr4S*t*U>~LV--dXs;QjZ3%B~xk~y0!KF?;AZnbvA%Fwi* zJQ|u#Lp!1D&|nsPGDbeGe5{GrbimE~b+2W-u#w!HaQ-S=|2}zB>X?z5> zyamkkh#)W9nIz)L72GxBm?>=)V3;%WawlFwzlAa`o#DbJg~H48ETU4j7v%U89)h8x zpBKr@uu6u=w}n_oX*ymCzO>9|wH%!nUI~~lknZBUFvS8b=H)q_vBtS$t`7b@HRX|Q zP+M)Pmo)}DEg}b(J*=+=7RxRlCw_N;S0uM;2^&=aO6*$WiqwTo{!}&t>9*{EBC&@7 zf_ZEz>YC*ocEyx1zw>PW>-Y$CQu)%*)lFfHsf~G%;q>$?MPbsh{soiL$F)i!dY4L_ zztS#mAUsCV)dlc(a@N_YDm)qm?$o~D3v9csFkKy+`yOcHV1aoT`)fhpbpFbF-@w zK>pXWiF)IrkBTMR7I=midgn|Y)itIXw=exP{n2+f#mOy__5w1-pB}G?L39!pS!V1} z^p*1hZK*2t?hM7nV@Nl-LHM{gz$x=xzmXDH4RU#KsU#uW4cwQ}sw4_vg1 zU4O`){vH?YqC1s_1cHxd2!Ks-ep3OL$QhGUlTE4vf)OT;O+K!`$6x>#-R@Ra;zOX( z|N4Q*c;EMQUDhtG{u}X-t6vkEk?-)DXu2?xcvBr7iPL;FR*^TpGyfh=Gd15wQ%J6Q&&buGZ8i1Y(i>pQn&psp4_!)`z0 zWLD&TecyWMxkELc?ypo&5nLc?0KxQD#KmDKL|n2vdjHpYU(kc0*RZeZGfGP71sB&E zhdg~nYjk|JN|!Sk@ee$I_>Y=NV8!t39TFpUue6HW9O%sx$999_jH;96ruwo@zmRiY zGS@h}>lN63h44lo1Y9Yq9V;iiejYTZe*EITC83RWPZUlt9C+hJNrG5nY|cNt10Skn z7@D(IuHmH#&ul@=L`7?N(cnrEz|u}K33nQ>;5_2|SRUmu#mB5W>&bL0WG)xVqpv}- zaUrOCMYOXEM3d-dZ0j;sgV>Y9Edut1jl*tgetCBMwM!`b!Y%rC$9Iv8TV{T^0Q0aW z-t)XefD6BZ1KQU@i^aioye2)@Mp%n6bj~CFv7vegDbH`!v|Uu~e-PIKGr;#{!%XqT z*dz$$j9FFicdzfm;ELVXwzul)lGY|4v~sK|=$05uNxL*JA#BINEBHrn!VSs=*Xi_ z+?)H3V*25%V-@kN-LVCNt6ze7F7(LJIsocEYfuC?2M+$>aENRscc7^j?I= zP0qS3K0v>63nH*?ZWl|%6B*dTgoAEtp;WM;5yZ=W`Fwe+&=3SS zhkruM)@7{a9)oYpD>@$6hY9Rkm|&z%N~Z$LBCkS2<&-gipz$4?$;;kRdtd9;V(T!3 zIdhVMLv2mOI)#~G;T39d@jlyWOg@qm?}pQSa-Obmlc)(DX=^ z{lw=vv20>^+0q7#9v8U)J}-gkVMg;z?K~z)Ec4cvH@)4`UdDIu#!XhI zvhW_4|y^Ev^S zsA)jEeNE@P23_)9g%^=X@8yU;WZd{sj7J4CfKs z)C;rV^ZnW8)~#tJNcr_V0ls2^2YQAr{f0=9skMt?C+vu3=nPkr%);*z_Rt_;!M&Ya zpk-UXQl7}nr~7j0^?9aPPF(#u*$SD;o(@t(UK#4KCK<5<^Ck%31KUXVB0A3U!W-BM z&F`fY7;Z5>8hJc&2QGF?{rT+mDW9;<|DoM5QGC=HkTt;sV&j(d$oKn{@ys`v|UJN#;F4_|qDRyT)(z4O332LKh z(8*RIqZHUxBi`pg*qZDsBV!f>$6-u_SNMk=KH4cRMS!VasYpl0k9oPI5L3l_Y%KUx zsg!iu6f-~_7Vc?ac7z~a0E>hHdt_OR&Gxv11r7)alb&eMv9$BNCNr2;YyzMTeCtlT zMxQC5pBbpXy;kB@24MCIgHA4;BM24oKK3mNiiYAfcb3PXFhv)sYGxm|bvt!UK^4Df zS2lH_0;>7AbugR1^lwCkeyBCpVQDq-_}dSMN&S5@Ew?KH9JV|zHy2#HuA+@*IrI#o z+CLg2Dni=~8~8Dz7l}H_irfO&>fj3edo3I7DL!FqD)(s9(105nU3?!(^5ZXbGfXH9 zJ0OWQWGsJCNPPPn08%FVX=Ij3eut%v&OwfJQ+P`x`U7{I3*bVMqL!e9?_Hebvb z=N5^&J8%x%xE}w1u=n0kO|@(OcN7$n5_)d}f)Eg-H$haofFQj^rGtR<76?Up2LS;g zN)hSOd+%N89YXIUpco{?-+pG^GjpDK=gfP~tTSuP`n~)CS(`ve?tSljU)Obiug_PH z(^~r>&&vlp{XdF#=QZ30!i?$2F-PsJG$QJr9lt?ftge%Df%>NfwhYZW$o+yeVq2jK zxyN5ucq zpJ`#hc&-RsHyQRu3V>}JWPTEEsPQKkzn1owc2P9d6#Aw zR&%uoOxp*(`SlU;_#zv#n6z;IeFo9C?8K;sZ6LW6%$&-vgk1xa90(E^CnnGmO>ww+ z)CC2vH)K8Y6CM>-;Ja?@Zp(Q)!|gND#fO@Di(oY_4#$WaF$357Ij%x!u!_x@A1B>2 zYU6EBV|RnJ__|N;L}WkLpqA2d_4dL?NqWmAVh))W>g2vN7&S_mPni zm?)MP{Gg~^+lN*QbQljJ@G;^>s84Sjx@$Y@pZ4bpE}mwJ>XGeKT{XDk82A=kXRxwH zo!{uT4Bb27HWr;hJJA>qqw+h^*oP?AUU#%+xJdu4^q*=CQ|yhwTG~hpod6SQLHd5Z zg?V}2fj0dz|E~yon6fM-h6sx4#x5~@L06^ZKbtHGR_DsV|Hc^|`IKNu{N8|r>jv44 zVQu0oIDV6)=l~(5F(_q|B~Z94 zLbir}oWwD<7hnKSoe$Ghd)8{7(robu66l!Cs5@$JZ(=CFHJS zWzqaKtvQ&Sm9Xv|8Poh;DY3?U`}Bh5NPACSi#zNpumS@MlI`N$IK*dJXFz6$%*tD>f6`#`AA|3Pld{5^8@fmu%LZSY*hudQ|jg+DFqX+wE^!#5Ge!NAXIPW15G zhDez#D&f1U0r7MN^i-1*)9L;nbDsRNnnr4dTA@C40#rCr}PS`Va-cjd$g4OAEI!1%B?q$U1+50utb>lf!hN?xdU9B}fJO>pnvHc0O8e2KAZ#s zqTH!z**J_Q(uruNmEgO;O`Ftjdp;1`EafK6AD#d|8xqn#$DG-stpaWErUn!$<+0%jvUQUu7QoT9WNluqUX*_2|drM7QCU`62Fz5CmS;uz1h%cB+{35_F+s2t4~T zQ7PjEM|2_ckn}x{YVWOmB})&0XNTix4)2)*YBNqwFS#?(_>nR=iripS0i5_ku~uo8 zN=v~tok`$L3zrvduTe^Qu9v2Mlvd+cE1F+#oQukmZ}BzJ>XOzgGR9Y|yO-|PEfd4; zqoBc}B*wa^mISAK)4e33n1=@PL&sOUC=(w>S27oc^mEgT&g!a8|9o6F0a+cL*~;%K zJ7+kAm-yvFQ(N)mY*@pV3ETx(ZDob*CxKRtu2g-pi*&H0Dny7Wb1*o;Xe{b=qS@pA z+{w`#xPu(AtyFK@g%Y$e!haEY9;KGSQy%u{ewW_y-NQyhi+*Mz-mH!~c+@f@O#cN6 z5MYLhsB4qZnqoro4b**07FF5QW$D>#g$7Jk6ixFa{R<0c7$Xw~yv}=<_%2F}dXcRi z7erd)Fs`4@fOkPdG`qyWV;8DxRu8G-cLRqK1#hd}Gv8O=0G(;dAPTC?W5aYP- zc#Gjvgm1BeM=e^lovHVxCI^#+#dBcWn#rO4d+VHQZD#S#CxsYuD24?`KV~i!8t4A{)JQ#tCT^(OK_ajCF>hk4KuX!Y`>(-#tIfGG1}|)=ZdMyc_CVxa{9?B(Gvdd(690BwH0f=2l%?+IlT5t7yzE~ zr}L89vYvArhChkj8k9~t5O8ukagd;g-pe4sV}JXs$@^!WwEcij7?6ZAcx{V5C_aYSR>>rsguLHS&6;o z4bY0I_U=>+JxL`R#|lCk9!Jo$WS8YT5=$Y0gT%qpxUYIXKRcbG%_uLji>r33O~I;_ z4iKZz2`hsaa2f9|ABXqxDnu=!#QLvAJ76y`v-!9&hTDkt)?hKfsr0bc-HHEVc!-P0 zSIvt~B@I0EfRw7z9CVk~RgmgKW{&0I52mc23*X@sm_Z{{8~}}kQhh4%;|`YP*y*>b zaWzps{jwy*7Lj&G|L!hBuxR(j8}hU)bD~O1E{NkUy;yM5Yr5-zLQ3vTmX!fM zlLN+u_!~7LT*Qht{l+M#nQM57w$UY|3b$+$j^FbpKXEy$*=cVB*NQ5NH)?KAHev z6845Sk*v;8DY@LDWqsbP8!q-;f-&k=TOVIsGM`q;dwz(@k<-NOPf_*dD{zJdEGK3d zSo861q2%V6FeI2Z$*#rYL+Vc{hnZXEgB*Qi(n5hTg~*Hz;()a3K7bW+ep-yq?~DUu zMs-lQ2$oDsS2u@JPOcj?bN!KXTr2T|d}G872hwWq-zPhO!r|&9{1FoCqJ(>(%^}J_e9iJpH^4=@+jcZ|=D*|Wt5fSiHP&x47q};jo=WA*% z9t{p{iTFa9VAn4`H1V%^pdAZVort_f2&eleH|--nWZm}HNqlhYlwyF^rCiVCv)#!N zBaEvd^|bnH6PJwTs)(Ppa{WZAw(pHoUlWx%26^i|x4SeD@}PTFh?p?sfgqnD7zREqV?rB-j4b zfuQ|~?Aljg*HGlXw1RJA)ajS1pozuB)->nWFCJvQD#L2aic21LrJv!QhW`d7o}(V& zI#T6+iA}Y%e047ZQ*0Y~hF9g=@>Lrv3pixH%@Gm5QE`jc8^4G+xUVc8ZWDJVfWVjA z=K;!fIS%$WiDe1Xyn02uS?C`Q-V<727g6w&s~`r*}7)&MZGX{Yj=Q zl&HkUJK+Cex^0;m>;4fY24t_4YjPB6p_%y@JNbKoGW~M??sWP@35d)d?;TU6@YYwE z0$o0^U$pH`V0(Z&5pnlhb~V>HS=+wg?dQdZ^TLg`PxD!Yujuo-Bt}JUt0wB#JDqZ7 z?T&WH@}r`LrfC|$;iYUf3H6Nh57<1~Ed&A=BaWc<)1!dUzQgYklDpIEd!~1=y-(iY zu2Bk|%EmBnVR&uOqgRg^!#QdBy!t{Ljt4p*5_XD3N$M(U66z=>f{K*`ZB`YE_z zltD2sGfUc*{^4i*MCCS+MLM?oB16<^UQ-!Egj}RV(gwohEOTu7z4V}zgS=3?N_9_> z!>?Vp0~F|NZwjQv%)78M>F=fjcM8rHswocCt8emUqA|Uv7D*qx^iBqy*!pqP`xU9*Zz(y&+rrAVJM3BMUI5FFrWl_ zFN`0Ljp@HLnzJcj^0#=m(B)UP5lg`N9pq8?^O;X*@%$f-1nYK~pFrV?=qWI#D)kvX zJ<-yq9T@Z)9Dh4YXc9(%IzeVmpX!dDQXM(@7VNTEl;5+Z`V48Hmp$E3Abi8O5r**Z zlq2?MtL+vDX8`CM{xvML=$MVH%1=?yMNAM9_FbH^HS_!v|H+#DNssTeS{`N+u!x$Y zT;)Vi77HNSSSf!{mf9LEzU5kl7=KE!H6tW{z!rBuIbbl5=@oS_1y7mxytCWHFQS!Y zz$#l9=q-qlxmW0w1=L+T4ix)yV~}nW3&>x*e7uDeH;&`PT%fJ&!J$hGZkQK& zv%+SWxMx8I-1W_DZ$c-W2!BQ<(i;X9(ahIFw&F;S)FFyL;0y=o9_{;eHS+NSO zxW#Yoef;ZnwL!=^6_)D;$%u^;BQX$=KQTH^FK`Vny3;SLX*<&6l$+C3E z{2`j%96nqZVoU#)yV-xNMHZ8>Ru+S!xp<4CvVZ2K^~0C47!%w@Q#|N?E%L0jA>?fl zheLRx?%PTk`+9wU7-a2dw>snL)D(;oReK5>cPG*FgP4Ztx1u>~;_RJ18TZ66Vo$C; zmv!d2O6#f^88{Pg-}p487A@Y^Iw#k$Tpf~?>{{ZbAX53kD;;us+1Zv{XmzCw&uaQ1 z;VsYZ>NL-SqMGIS7z2M04$wB>%RrH>5820o7DWY@v_(m{}0IoSo4$M{ZsmBa35?W?cptpVQ^dl0q+j%AIYhZs_XweVt7FSFRig($x zF2fj=q-5*Mv&>(F{i51i#DlkPzF=-E`#$MtL-M_CnYr%HH$)f)fn0=jl%3WXGEUUC zvQ*Rzd(SI2%o>~G#IqY5tlP7AQP>!k@;p=H1Dr6u&FGlI0BhT{kUSH^FRu|H{iu5E zRz!z>=kR!!e2-d=x?DY=HP&(cn}TC%k5TmrcY1syzv7=7K79A8-r#PM_(FsI@w`%v z)9%8QNfXQ`ik>gjeb-s%@`EN^j2q~6jpdeU>)kKT33KHeE6fYb-te#a=F^-yoQAm8 z1Qw5ksnZm=)8mH6Os$f`&n(isWADwJggTung0-NYQyO6(cl?>gZ;gp~(JW0}3DKHJ zntbTz>#7ZP{1ll0LH`0)ub$I zin}hWj`weliku{*B%s8p6_lr=_Z0{@Dg3k}d20O(08z2>)dH@Jd?k;$Hy&R~#-l}6 zMJWV-!W`u?O#ui{iMzF}A7O2CkaT!Pp=#g+j`&8A$WweVz+2}_NprCZv9e$RLx?U* z0P&S)|H&Gn#HaNnlqbRB(WJpu@QU375FHbMN%%L>?K4B_iJX9ePJHrs`F#bcNRS=7 zVvxiH@8>pMIuKZ11K{TKeAJ<618F{sRR^Aiw7Vffq^3jsCGF(Mn*^L~SprvmZ?QDL zfJhCWZo9m!3)Hvj*W1{4Ys zfJg(hziA6e-imylt)oBJp8A@ASc*LB$8CUOp#0#uZm9f9@KQ=7D3Xq?R`c$XNQBgZ z!cDnbW&~w0ZuIqnb45TbkEIo}&$ao*+3@PCbl=@M>vJ=DDn)Z1Tzpe1L2&Pr3p@oK zi>VH~b**;%Cr4B^F}D^rbguSJtQ8d4WanCF8Miyq$ulU`9XdqPa{ud31O9@Zx~1mD zk+2afqMvkB@%dUyenG+K>;R5Px);}8C_LyYzX|HUuhQ$P$tDPF8cr}6-MIp~DVPKD zpr)9g&mDeU&OWemJ^l`Hn;ZTQHl2y51gQXJzBgLN9`#cP6&6%lyk92zhQ@VasVOpH zcChOl-?`6lQ7C|k_?~ni-hH_snOnISf(|47;3`pwWu9>{dCK{Sk^qM}(Aj-=X3bf2 zQcHK}U;twOIy;hA@h|JcACC&mO=Oi-_p<3pO$q}He!X`u!^1S95M8WfvTQIhv{{0y z^|kz9cgZrR#Eo@B%UBhDlN%2ecZb6nrPGu+vg&?akNn)@rVvSF@nkCp{7b++WvH-}no89d>zXA`c64cBEMH zU!Bf1cW-5$E+#UUe%V(tEum)%uFA>?Gx|OnmPIhu!iW)2L21OK)QlZ;nY~~%vZYH& z(W95J<+O5y+hY=F*N zpgVzs_sd)xnJi&pcQ*1VOtLbko_;h^%5$Gt6Q%y%y4NCp20zY& z<4j<^zhVM$PS|`P^4a)9UFEb7R5EUI;v|7eU!#L?*IJi2!>1@KcOG}}zCGEu=FIYh z+k%?68*|9-X>W0Ge>tCuglxG!nWlWF?~WF7_^$78d)BmB*81Cqw+5rYhtDsv6Zf*! zTMqjn*dV|>UE(|N{nG#Ie_#y$>_h)|;IQ-*N;XUQ_LxU9*A-nGH1D%IUg;gUuAxa? zrac)bJc47z>Z8EjbO6gotWfk;zKE@xOxg#Vp^uT4R?0J>b2Dt@?PZV4`hZpglL>!j zudRn@lC`x5=)A?{uun1vv4^ZYNdEcT8w%R+5X4vu<{}U4D-eLA+;z&Ygxl>|*!*(o zO}dr7Pin$3w`MmcuYYn2i@7_{DWV$s z5-B*AO+d0ybCW{iC;fLR4EU7qYpD!l2}}cZTEtHHJt|XbZlnDSuk*AG`y1p&=KX!M zA&M1Hk1@6I@A~8&wJp-B#C80#*j(a&WJtl6TJfR(243mcwjf$Z9rF4>3H$=zVg5ih zbYp;*dw!a8!kG0mY7r|xVLZwEt=>p;tJ`uc?ocvClg@1)L z7EvY8U1X}!+s;55%si6jE8;1YxsnbMc@uK{^6`o3oQvMWt>l~fyuwMc{kyUjeoJ2T zWe~s>%C}RFPNeyf9%{Jxph?PM&9G14r9s-eJI@EJ7oG=6a0=dku#yE{aq3~E_F6VV z(+0G#@96!~kAw;BY?XX;Vz+r8Pya~d4f*l=P~P@-x*e0FPH;pkwJaA*6wOkJv}Xmk zuM&aDhF>@^eH$f7YKtOj?4wb1#6Z2k1CSeC{v(>Jo+jQ=hmHs-=iMwkDC^E6@OGuip}|B8GQG4ac}`UpB=Ort8)uc@dE(lDBKC{@jV>Ho|P7{rA?m zBU&iD2;|4AAu%!!PF)wh%X^)`$&|yQ@)SwzSRd3P)sLU zNdr?!&<>z98q6|DP77>)8kyqWLi^t;maYqWzA7+Szq@2{`$Or&g1jSMZ4-#=LOpZAUO&A zn}I&nf9M#z1W5i(4=P|BFQ5BgHU@uX%?RW@e-}V+N-WAl!o`+mRmxq!f%jm;DM=A0L<9^TpebB(*3cnev!O#pXGR5mnTwTB%N%&iE(BB8`d!@7NQlV?m7Ji-FN&?(B!} z&WvOPka{0&0XM#aT5K!y<# zAZw7W(={)=4k+pOy-e=*Dc0H3)KnL!I%_81eCcf+McB|`CvT+S4KoR5J4-E*v15ja z6KE%j;2;VuNGi9g{sKT~zp}ZAYF&SX*1w4HT>iyA*Yb88i-+VF=#fIi?eoc$_!@*t z`_8vY>7Bfc(9(j{xKgYr2J;5Ickz~#p9-*s? z5SC7e2mZBIEP)a8$XbkX0saSbHw346_H$X@s$eEB>de! z4JD1VidwV4MC96N&F=M`DA$sy3n{W7StJ?zD7+;tJqXs1vYIBLEHFHb7Mfd!VL8f8 zq;A54S9C&H&4HsZ8nns%U>yYFBLqEX(v?bZIEzpyVwKxw$cyWen`eNMVdSSy)KF7z zrfR3B0C;dyl28MPlU`3c@1~%?6MuN0w!QDEE*_48HHsuKX>~9_6Gq5YmWw3^W>3HGH)8=Hy_^QlZZrxqf|9f{FAFsw@C@dnF^rXs?ro^<}C_qy66L zNnYwmor?-g^^o#*9AU_59&f~__&d#`8M3#4iY^i+^c(aAxE65{&AAwpLbyFRGWNj& zU-5(cZTH#}Ff00F>2W{{bRnkm3HrxHSVo!6*ZynC%bC1qS_7q}GS$wM7Rn11iNd|| z!D=Y*n<^;vZi~b?pG>BcvGQ7L!%3c#Eil)&YcaV7@WArJbzyv}^FFtfG{#n9gs2|Zl z?8WH}eYu-;1(i~{It=NRN)F2`*^c1!pfz(VTF?EM+?!I$@hVi#+n9b8r~vjBdu^$q z^OINOLTeCY*qi9<7537s)pj(l)KFnBSx3P~F64MTPy}yBp^)a{F{R$bu|Oo8T80PB zy#|2s0jAU7={Y~GVGQR4IsJ?0!S40~ zp#DG3lvPjRH(e$*zQ=}RpXsr(iL^Eh+mrkZg*WDvj1$! z2l`vbpK4XzMH`-8vz0wCH*=HQs(#h~7XtNXy!=1&XJe2@XZZ5K0taAS=W6T;>a&4r z&OaZ8{U0Nn%j5qK+R%T91phA|FV=taK>8!D`};zNj!53n0>6t4bN<6o;lcUiEk{p5 zvpU(TvBrw)(0>Nh`7g+Ye;PXB>iK&OKP&P2XOUc)w?e<0KHNAwJizu_^lUDz zNq|W4u6)&nVjeX~{sze+&S*4oybX4|lPV>Y+$r~Ct#M>Wr0~&voI=uXP-M=Zj!M%b z{ipn9T$IX(fIB)fa`X%!&-GQn(K_(IKGGf6QmRIHFeU7}%Avx%TycJK$9xE^F6&<( zxJ_ssSTBZg`ZA4TZ!IzX1|c-DQQUw=PU+8wrlM57hCg5Qv-ASR<{O@2a$Dm-dsY8$ zP&aTw{&cLbuE28n?*4ixFURh3-d$c5mn+KUYJ7RGxV+C^#weE&)L%l;%jokma=*+g zE_1TWjPWwtzN}PS)=@5NP=8g;yR6?`Ru}*8sm%5;BY+aP?8e;#er&z@$kfsnJw6K& z`PZopDjiAjB6Ah~pvXJ9g4zGRdyMzrv3mVGHV9r$o#>^w$8KpHb4V_7XT{Zm);93C zxg!CO)s({C{zr117=H8j0gv!W8zN zKU@;{qG`R({SJBB-*`abQYYhZG}TaVe&8wGxFPpgRE}~xiwaeb7KjwcDgRPk8><$T z*59@OV#!N;7XZ5U?mbAQOaLGhKv4EQzxHJrzgDj}9YP0d0LLGD_sm9Kna@Ra-*q{K zSwLJZt`|jRKOoDLSbkKZZN;9CUstPLl5#A(vNT7i>Q1@vN{F=Doma0pWvi3X_Uf1< zv_Qv~Z4Xv_*3P+*H(v(x?erQ#YR)zJLpO9|??fn?#54wwg!bs@C?-}oeO~w>yN^zd zZMu#6spR0>WAt9iH2wucQ)61@=q>XYJeimPjS217gxe(Ef8eui5N}w?MBe}kgwP)w zN!l5nNlwoAQ^R7*XPcG^dYmp)V*6K}+sk+`037+M;SL;}12jEJ1GtwZH5=n(OlG)D+X^9gL+dGmM%_!0N-LiKle?YJ7NsV?sTk(D+dMvk z(MGOTMSzaP$O8^0t8i^~J8=lh0h&Jp$1skx3v#|dzPxM=maEMo@?hTM-|JvX z8yJSM9$&u~vTgg%o<)@EgWi#}civEZ#`9}h4K0k*m`6HY1OvU!-*tzry;^$uaC1w< zZ|1wqlR7CIOS|WW0mLGUau4CpPl*++FYL$bpRM&qR2eTvK(98LI(bFpEG#V8@p#^S z5x1Y{7pVn+k5vUyTs%P!cCY!011f%7M36uCG!iwHuqt9dpQgtMmzrC>@hPG1W`!EV zCzX}!9L{WaT_8PhI|1-c7CR;nW#8HGR*ehcS+FQCBhk?$>yWaI;kfeFVU3O;^6Nzf zK)q{p3U=Q0ka8%=8%coM@NLwFu-ha))JjkVJtUK#A7^8-xXDkG{Z4HB)wyis=-A!c zO=HESn{55}o3tgFNJat>6H_6s41em^_{$w$>@KzoBWS?&i<<(`QLGq z`j=hov-49Y%(GPipo_ExH5%c_SL#^mZhQ1?-%N#_T!olC;7LN)d?kI~e6)Fb?fGM@ z;56^^|}((Fv7@t8(91?~Br^d-Csm*BY9%VE7qcjkO#cAx;2-rU@UKnzfE(XH1B zy1}HjYWd5V99=C;365?tDnu{oLx%_E{T)ipwdwsWE%oTz^^VV4m;n-yh?*&0&yvVk zl><8udFkPIuo%DM#?!5v9e0AZWYRh(Zh@P?rPZkjXrwwTUXjy>qpzx|?W_P-+ zKXY|M&@R21yvu>mlR|SX##^91;ri)TGi+;*T(G|^nzhRmZ*{ce;$^Sqbd~X=moF!D zo5I4=G~^97`Ykgj1I^yC1wnRsd3a;M$*kg-X{3HE%pL&6m_eEw$7%N3j~QcLLk5aO zj)}~{~nxbXJ->#IkNI~cDkS+CSV`TGaF!uiayypFBv2n;!W3KcIzahd{Nk&e$ zGyA#ioXkbp_JK$#)8Z5<%e=R_w6#HZ-jzMndidU&$yxvNABJ;4$JjserSAB4*WT^J z%`v~Zo~CX1sNHPR;gsmsBb@J30?j{b=NSLgmJD`%3^W(A{iQeZ@)79Fy>t2XKRpI= z{|$Ggj=y)+{I6E)f8@D;lJ-WOFm}+{Nq@-FfR(B zn51V-laA)ohc8shN;&Hj%#D4mZZIgmq@v*@UgP{VJyo^EqW^;1{dIPW_f)H0TwpqB z`P~TLRkoA)zL#kS2aen!%o;aNH~@Wu@5dKr8`*nCn&IDzOkE!~M*iwJy~jsORD(vbY>qV(UzBLrY|>*OC1@Dy#ur-?=0uO2B52?}Q^DiQJ< z5#=;yH{n4qKKm1RIT@xtoPa6ExH{d6bailWcBRs2mQTDhOBdWeV@wbnd0GNC{f1S` zowskewl4n5Z`C{Am#aPP~P-ZXDk(uRh5Q#d9 z``qcA07nttv@G0zf{p0@bTRL|b>?rj063K$2{0XY-=y%#rB(W>qi0oCVkP(+smmJiS@OeZ9XRw|{AbKL!e6EJ2N2i9$s33=ly6_HuGAMpzu2?W0eN|as z3*&R*8J$`1Trp0Qfbw+^2O*IF%ScsR$} zx0lG-EeFyzG;E{2lC_rPDMtCLyF9uhdWD)^=)&h-3BXnp|8hn)5p(8iB7kW;_Fx3xP}Y5iG!Tx95d?ZYz&a)aZ+B~JuP?$s8oGJJ)n_r@LrXa z9Tb6D$G06nX@}*3hlJigrgLsvw>+10IUx{gj`PeV&W*ou+lyno+3PWDRcpKN;02@F z7c{7$aXK%QQQ~Dy9e;!O6P`^gLH5DexX^Zzm|TV-jM@jR*f@?GrZ@p(ErsqHEp{I> zJC3bObz1W!K56IRry#t(f>gGi`vCIpmt@d?;kw-b>4MpxE5O(=@RHUr8MV^zA9Rst zD0%$p>&%@TL-#m%-!mPI?0f8(0fa^-FU{TtUQAdA!+VC5EW`LgdS0#6!oFDQwdaDb zSD46r$ltDxHOjk}Pkw{S?8;T^x5WtP`LV2ZZ67~AU`lMacfH?G@&36Q)00@R)x*If zvy)G|M*fvAoC|n>1i9RJnG_HG-=pEgba?_wN_~!$V zIrZ^q=fyL9R9t7-99{1c-wh?=AuAfC>B@FndqxWF^4S+x^mL zI4HhoAj%jeTWE3;Hgk+<+8~b^>EjqEjIOjmG|hZkPyW%|Z8fAhJB|DqpuZ9uxM8a& z-f37)xhtf8Rn?Iz9&yitkM!}Yn^(R%lyO`nyOD^i1Ky)$L>M2dpq^!|pq>$fnMwtU z!Ou$cdYmbD?uBx{x2j*p!`}bqRF$#QLIrb1D4t1fNS|?6pz)F*v&3)iRP{nXv`CIkVZJuY#Q-{S^-fs9-HNQ8s&fPy^W2 z7R-y!Fqxn&y0Purt8mH5w6!IIVllk{xUEwK;P55B97`3k9y(f;L3aveVCU+OAh&92--5|i_VQs z?t8`b9HSZOPvJklOeq6#N3t?|<#(bCiXRjj38j10fX0G0|$xDGm@~3Oz|^J z@f6E^)-_sOO#t1QSs=Qr8|%{ceoHiSjWv+vE*ji18s<+>(KN0NAUX7XU_UF=II{_9 z#<-NoDmsR`l8z5``}QJFTciBRCYpRv%VsG5I6kr^yOfqELMD}byz`SSE|L4!2`oh< zi;84Fq-^!D(h4|1N92iI*u1JEiNhb8)oqqR?U zZZF){(}P>K)ZG21;Aaa?ZL;8yc|yd4j~5(|cO_1!6{CKxg_-Wu#?bb%2z<-_)-v)< z?f$!jGJ#0qZl(%4vhdje3c<7R5#3{X$)xJ6A^vqAr+t7{!gYp(o`?TS@nzdq`xA>QYcIyph7`-WG0s=7D?lze?*Usi z-_vM}@hUiK8N7zd4?W~NC>_@d+HXGTXEJ72p^ENhDrvlX?|5w~PsP{xEOs=dHxoAo z2g@)bmuFj_jepPNn!5Hi=Zly&r|Rt(+&KP|b97x(_z-i%pra(`#F!Ue`&a+I<93!L z8!+}(n68kw+NLO~;#~7Rl(F`#X-kHbfJwg`=716_L31PhYgaN!?QI0AZ`Y8kc#OoC zB{@?K*JUer^1dPF*Tvgk0$2dzyPSF?gf}=W;vC1TwZV zT*m|47EKlW?DB566P5Ggh74u{z}11eeSAVfB(KR~G8d)Gk5~vYdW@KiY~)|LdtuiK zF5bbK#1r#}X0gV>TR0=@%@V5d^M2d+Te05}l(z74zB$j>;;eb&@mp2ZmUR%$0xF+e z6;e8ywqp-cuL^kc3|tTjD7`f#XZrAB zSH)a6>uKIeb=R@UBcTOCPQLEQs-mB+$N2+NH`f5ynyoedvW_@PmKdAw?|pdltV6$)dquyEHS$%QNTgS#-c^Ef8HT zZ6K;{Z`RTz{?pUuF4rxS{FGrfZj!;;pHb^u(LS396m7>6eI>fdj4*3U8O|^}>cq#p zcdDjh5v!oW7Ncb-MsE;85+DHrfsTm;g5((esrDH1YTHWSs+z=hl^G%9)n)hQM)c`! z--uOje*hBOuLvP6*5_`;*rJ3@P=`^gjjaq`DS&RW+h0h@cu}KADQw@Hlybl1Sde$P zj_h+2Z^+H2`eu{2L6E$-OqitK8aOZuuMY74)<+slVFhDeJN}5dQ@mTrjDD+I7!EZ)fmr4IYJGIEF2+=D%yl7 z8(9|rDIdKM*A;s?8Vp=#Y}4>VoZdvsI9Ki$Wo79oCAPbdn63*sY_o#&zslY4XPCiq zp>Br-?U$0Q8K>Q@Yp^&TIvMKDtXV(63(-n^__?2(D_Jpp`Dvu*eWA!Z#YBJ7u-Uui z1AsJ;-d|*qdP4}0@-ivbleqvXI2hi?4#u$JLMJN?mRNp+;vci32;z!`nWO56Y&UHESI!Oj*fpI~tB()qjy|=dzWFTW-1ov8O!n z68}kDPTlSG6OaP8&}~Pp$}#P-&NjF%+%6nmAC|R>7v?<(j=#kU`b$_zsGAf?0= zlin>wz55^A?#1-JHgS`Q2Y<9lA4yx~XX%B0%cm7%NN&>C8^6P*LOj6Ch<{Zm_w@sg zkDJv6!DSlhB@&`EbB!9)#$`Izn)DCdnd#IeIWv={My*nJuKz?$oUKhtHktFXDeX%XdoEdSxY0%bf^fGq2X}RIX|Da>7li9OaGN z=i!BsgjQD1L0AnCr>*MS-Qj1S%DA)DJZ3YEzT^>T=TzyHG$)#B)I=W7;6z{&@X!LZ zi@N?Gs`1*Q*YmVA`cE|HMx2VNuiqW8@%%V%nzly5scg(dzVbC`A&U?-w&~IS4plhg z?&d3y%tYS0b=Ia2Q`N5xBDGu#)N>IK2WeOSHUGVuO`Y;OpNgmXBJ-*$TxA2N$&Iq- zjNi3%oyr`l<{>ePKd_Cy23fYyYc;(LDdqu$5+KwS(UBy_zHNuw9!A-9a$(DqFfdwE zrEXe=pa5=XXtcnnVtLG6J1>-7Z(7;Q$eKK{epPbs{j|4{{zvWTMc!}DiHm@X`3;J~UHc9CkiHLz8v_)pH~|bTF3A2%DY1T!C(<8)z(U^b{PkeS{Wmb& z|Cj=pjXW5r)UOz|?l+^jjVC!Lk6@6Sqrz#N#=cP*HimH*LZLb_-dh`5SwHs8m{Ci? zNPhO^2dK$tDG$V}8<9)b=7PC7vo_ux8zic%zn@y9)==M`DF&Jp3+Vo8E8yQ8m;Rog z{nHqoq;fjHL2+mB6}ewGihq5blv~3=F<@K}M7G%oepRLr!3d0XabW2VgGi#F?o+jU z0B$fh9V?Crm(fIfLNOud4{R!>Y?l}(`Lt)-q+3SD==jrxv0S3D;;#v=o&31Ce^DgM z2JdLW@RXX)4{j{)?utaoFqdqaU9Awx`jE@6i0_G)2fKpGSgY;Syapql_#+}>o+>?b z|K1`MGFkleUD{#lKty~~;c{byIY(e9ew@*TTA<*&Vp*C`)?xXhJgS0fnxQ+xdy9rX zh!pga08vEu_~1QiZw|Tvhds1m52J^tf7q^P#}T73OcCSz!E0)!46FVsC<n0&@p8 zsrNSslxzsUx#~{?^FsaXhEsWEbTlYtdRxndY#cjs)*qj$!e-eAUU^6iSsW8wZ+ZSl zL!sAi(7_VZbPMB>TAEvZXr4u}_xb%13QMi+#DLxLsR7hBEUVdrZ(S4KF2>X{gDI*zUWkQJJmq$BC0(HMKR?B)3V8FmiJQQ}X z*mk-$36lloT2gZouuvL`87TpI1p-5nA=Z@HybW{3wJzaS(`nwF!w^2 zcC3QLu8JuRIxLPTU2l||mkh)5FTaUONbpXSURj(sKhrJ4+;PH2nvns7>zHX=q|uK$ z<14`*BL|D5Jd<^$_bSzM4|ejJ4!vb`-t@)ja3T;X;x4t34Kp(Se zV4Zc~c5hh4=QT>Fg#s0wd_uL9gNsxFi0yLy=3uz1;qwLUpF5LoyBha4D~wL=crQY0 z&3B-0pw-|&S-!n7uZIw|3!*Oa9H|*Qtq|MC=59ga+VU1m$su<4diExq$sWCBKy~5GyPm6WMn4%e1Co}V7o36rm z(6sLkN)OGKHVxL;6fGm~KH zQ}4~LPQm`fnbC$`^9F&F_uY(FzMes?4)8EDj_Cs#$yEn|XQ+~H^i{$i*B;qG8f3KYM=I=4v*JhOc=B;%?fU{z63MrENA_8$%d%C`=aUPLFu$3JX6W>(c`0@e&^t zL8<(SFnDX!c`;?Nd`2rfV=^h@k8XupoLp!2ER$vgCGR8?RC_5M5U!2B$pTF^iDujT zgPmBsdZfMJ6@0U>*_7KZ<-*E(4~)}ebVrZJOMQGCMoDME_dF2ym|+KF$h1W8_RfnDUc zaKN6UTRUx@qiZ`kkbb=UiiAl|>k*?VlB#{w)E+&3CKQB_Xy=J&4@4E!i3zS74z|ji zQ=xZb9_r;>?a}H#g6h8M7o34v{U7$;I~>lpT^E)RB8Z6IC5SS5?~;fXL5SWaO4J~t z4F)0V=mgQDN3`gDM(?8cKBEM|48mZ@etzrS>s{}=);hj#eS7cY`(q#Lk2#cMnEQF| z`?>DxKCkOMPa?*rOtJdXW_r`gMJ2}qZR1CuTMsECI#6i+t*v~GH1&>2-(~T^3opA; z>{+)WnVk5g5L6zHEm_}zsuv7d=02e={Mpr+SiMs2byizw^5;mC1s?wB@m$i~FAoKlZu*>7ehzd%oQopDa?S(;cfL86 zUYZ0BR6zwVcEA^JeNmk@Sg2{&)3${y$H>waV=q&#UqwA0T0DSd+ssXvm6q8~-@JDq$#5tNE9XA1;6t+I%CcaGSDR^Y>JyxeS+T4={Og^ylo{@ zk^gl`^{W=12=XR@4$WTO%W#rO0c~H5V8jJccrE#2MyJy{*1AFRjKOpNM2GBK!~5Ex zALH!;u-G|#&d6PL1=yg-^Uq5v=33F8zV^DN4I$mk^=oIH;z14>ao8bx!XT;BkFdwhAlh(Jde2fBrE;gSB&~)~~AC}oH)JeB7nhFYJ&A!@| zDyHV8;vVFYohqtRKNM7G%Td~iiw4)meYhsGSg`jk^Y*Q8f>P&AdTba!(tH0@E;LPF z>3e5T)42jj(hiDIx|yX{D;5}&qR0xTayY$a%UmW#@(a`LYr@5dGg~RbZl5X)X< zY5^(V&fdj-ulZi{_>7W(O%^&F@iik%Lugca0NIue7r)AdbK*eg@K_I75@34>>~u#> zQi(X!SM{WZDGD#J=Gf#2e-hSHn{OOwU(8AD7ZUD4SwHvK6fbq;=gQk%iS+i5D;uq< z?p!N0pk^n^(^9W1{h3mJYCgPAbh?QR^e!`T$mSgr&mrpwSbO5<%^=>WvcvKDL8o1? zzS-U4=!Cs&*{fWrDEbm984hRJ^Yz>Th{^G$jG9{P^fjywpE@P)ufq*`f8l>sT7@5v|lCAM8_&d!`XrlmBw8jC6d@z=-4zhd3eP`nuH%)l~d@~ z*BiAO3>e#7Q$p;z0`G~5VVLW)n7q3XxYvDrwFj^RfC=Jnuy z)&1&Isa^@EeVfmN{O1?ugBNXyb`9VN-l=XYUdvq-F^9>h*81)2k`|{ojP`ZT`4Lg4 zr)Vk2!6B3rN#g&|j-n5$OGCP};e34aKHd+4kW0fBoGxazi(h-qARpECei%~Pab|Ej z(bmc&-l^fnq~74JqJv5fk}GBB`Pm8A#O7eYC~_E_e9we0hj8@?x@)HedRc<4u8-hR zr=fw}Cw=ndG1cp!KI-s1O+Ex#Tho%mYKVIL84*@*cdVWWM;wWBS}5si)Ud>e?OaOk z=Mp9>%d?vHttb9I{spPUM$tl57z(xR+oHEJOBx-UO~6#ddNi~ z@z>X=*Ve~m)1x^cWrmUm?#cWqYl;@gTl;YHzPlnLs{nE>@0aM2a_AS*uqp8-BN| zx)C=O%9^ZY-Gw!Jy$LJSm><{Ohvv^+$UyofAW4X|!I#n~s=BYY=4RG?l#~!soTK=Vc^?9t(2bd{uo5sC&cke3n zVkIw4pmJD51)u@Y&^#`lrk}mCa%QdS{+;Fx>=~E9H{yj3b$oUyDRdx~t1t9~Xbh@> zA`0daPj1tce`J~Vpq_#+gh$asZU5N1q9Z%_JHn})OB0>ihX@Z4MsKJ)mG06CX?kwN zl=G#e3g0exC&f3|LwrDmKhgGL5kGm0euZ7oW201TAx9!}P_lu)e8{B%v09vzgsL$^ zuy@(sDKMwDSs$tdrShLwV$AoQ{1;AGnG`q96c_?$voip9dGF-k4M6yX-@yBa(cc=t??~42CXV15=85gm9&k|cL=AO*~yWkaokiccrBDjq9S>pt8gXC zu+&emb*ChAdMHK0LqQrzMOow3YEW;|!tpER5Z*U_O?TA=CqJK54j;x$T#q3bAy5;L^*A=#yWg?{cimUs5`35JvRJ%-y*Pk@X2WuLOIbD$;e9|eosGU8IbAcHxIJp zqI-j2iNtfIOS2;w7X9G-&|+8@Cvp8`a0x?}KtcNKz-lGxBvUpWH{a<qXK2GC)=!`S&NcTz=e#d1&C z?Z>P2-8b%6c$37zEoFUX+?KezJJUPkv^bv+*jL9CraKsMxb=Rh5f!~h4Fq*AJi`va zVosv?671-T3?$`XAPV!iEx<44YyZUJ zR;&C{SBOp5;DM%GbRmx4DyZG{Qs=I1n$N{o?FuYf>reJH%3V*BW-Z%l z6StpZU*#{CRjIXnX~HNkmxdWIGsr7Q)5=Ge$p=nfSz7R*1CehkVdXAz)a4EdSnHk@ zx>#9f?Z+=^r1D?VNdLlgr@SZidE0d7_A4CU%B-1BLD9EMA{L{*H}JUf>&cX}`5kAz zR10G|nSf=Dblv6sZ#yoL&dxIpGy9)(lzqYrfa(#Ry+a^Sq@u+ES>F@!IYZV$fbkEQ zp9)?P%vze!RYo6n;%+EY846GQ_2z$l1^%yC8~qnLoc^O3%a?n;{~NAR|A)k(P`|YS z$U5~J@Duw28!adMCk=+_Vl?|;3FbTCz}~OGa#f;03Dv<4q2h=je$OW6unHYB=Fb5b z(Et;Yxb-qg^3xHqQP<5kUbzGB37z&!o$UGc^J?p!PzgQkW8m)m#&+ZJDV|kvUYne) zlOaV|en{eTs-(Kg^5sR}wgA*{$EMzQZ2DYd{kDGT%EJ3nj#?A{?as46A~+{R>9jTq znG@{s(P!P?{>$`+MuvUJ(|x(p%s!41(60o8jeUcGH5-hXLJRuiX$atOAkY++1Ap912ie`J0n=)3Q0K7D8Cg%a=GM%vp#6J7lGEVi(CcIRc5+WS9T)v8Y)B$Mg&b9mqviQ?WpYJ&MB z4acNNysEq5^3JDk6tJ;0+z-E0=thSky+Cz=`;df21vQ14aF?x~Xq-AKFp zv&$wJ=$%f}d8G<-oJ`jL<$g{$JS75N8EqML z>F2EX0!G0#XK`Yc5e#v?hcUDiL&&Ig!TEiP8Ebs4TozT2K^~(&c%BR@J^IA-KW{1C z$9kXEVVyAKuQ0S-L%Apf*{+B>hlxvxcgrug{>yGl&U6|wx^ta;uUi2}r>vgi&&^&% zmPDpJY?IQI`g+uO`3+=~d{90y{1si#kj)_FOU*@CusK|KC@sH~mF~UgX}v~%3rB+; z#jv9+(f~(h{qpc|mB@{{iVT|pj>M}Rzo!L#e7R3sT)n()6+O6Fz5d|!9)3`y9r_9Wz^TWgAKX@Wu)d8>-)veP?^x0|k)fZ1qt{2!G>dIi8 ztfg>@*Y{-V4XFhql~l@>f#}c`eOfn#=|x;1mKMVUe*m#Vo^?D6-quB5mAB=bdK|#K z7EXAn z`m}*OUuxSxp%*nkzP>qWU}^={>)BOiC<|@RfCidOAQ0j%iyOlZMXaktF#org!G`x2OQux(wUbfE81nP%M_ zdGZh6XxL>U%r#mVH1|y}mu@0eOji?h8gN>QIj)OOC0fs^_vHPdTY6VrV$$oNwmL98 z`CzqMky+s$^cg1Z*Zg1IkFM&Mwr%-;@T~poCxK7!0o!~}ArAE6D5Z2x2*#DUXGX_5 zk}t5X9d-vY;fT?!ALT#={w+r1S~;hCuC=vSk=D z)0{}XQ}}9WQ#^mgbk)4YdDj)T{+1Im9la^I@>Iyn$$sBi@(_{x!+wU=EBk)c!~v!A zu#AVqK^oD0z9(w^9846^A7MbuJk`fOL4v~q$)OJUIID@=Nj`DaRXJ~3iQ_KL_y(KI z{;|Q`lG=Cx2v5I^P2tywtU2eo6_e{4=k2>7ZQJtUfJW=bysX--f7;zg)h|S8~U8v8$6u`Y4w?&Wa55$5` zD&;!c-&iN;l6+2oriv%EuyOAlXjJusKr6|mIfV8ZVzv;Y&?Z-(9@gOZVBqp?rEGO5 zNA)+;G8;naKB<>aQM8(}*`BliBnU>c|zRvkkG zzWRn^#BixkwEn@n>tIlyuwrqQw=7o#(@3%0l#2qrRzrB+-kwA4%YcqEYs5-}N@PCi zy6Ai|zf?E=ZE~upGJMo*^z&(!!9;XErwhj0w7A$El@rZEepM*u8j*lg!itu{Yl`|y zx3Ml4d(LGpRp-w2JT|csF_@N3PfzzgX1S>DoZEr+t_y}eCZs?DapEu8aX}bi(_f#! zk^Ss*udN=5*bfVB{>OYedZcUFj3=?M-`_J0Bf6fl41SU?Fyzea}BSL95nd-Hhg|XsrDF;j=6N*HExwqfiEtqS+x)RQODF#rZAeA^pvuh zohZga9`(aRHD=vEIybq4-Yn_pYFgRHSj*W|uwrjQeOaoG(_?a39w9nCQk^axN^WUf zXIqf&GUwJwPPheM!%@06>B7&85ZUBm*Hh&%ebSH;?)ferc6PS2O5MDc zn!D_kuzc;ixpXRIgK{OMlE(pUhg0iA*!N*HZ7-BEi+Orfv@~T4k1Wz1T*9{uf9+k0 z;PlX=Ju!l2*bM7U`)taF=Ia4>PglA3FOH<6Io#9^qM98Z4e`de+hv0QIVfP)3tHjE zHWb*K%m!^nIIYlUJG^d4>$dX9>@@-*`k5Ix*}bTugMIDx96|Y%J=lx+7`c(7k`7~z zwnZm@!I^`L-_q!%2On}%wfW3`Pw<}jUWH8ob9+7h7-h4@7K(v8diIkc)>2c?c`AM7e@`nQK0Z?ld51eL zHKk<$#FIBb@n!W2U`SHD#5W~Bi%0PyU=;eT;8xmZmK!|qNUeGMFNaxyVs4mkXO2T9gy7=c;6~KPotXWm4>Sa0XNi z>l;Q%IzRDPJ%IhSB~ik*5`D#;-qv4Uu4)7rKsmAemqs|+30FCtwM7zfM02CWSvE^s zV||(cKS~e474sV@4h$Tq;v^*T+!`f_+}9L?fWAL29QZr7>hf`ci9bAwL)2tvhr`9j z$r?_Filt4k4z)R7^>cZDyMwGTAV$aBC|jy`I{7(#Fw;>}Y%Bke#x08f=;@6Ym+bEYw?%6OsASV=N&2VVoT6~>3O80BN!j2GVW1X3-MO1uZlfA-ly5w z6qBSCKn?oP7J&1z3&?uiH~`4=nuoIvvFpL^KdVqNY_oN4}KvHSdD<%-W&yb(-fkmcz9{%Yg= z&`)Z>)Mj zun)vl4QMXE&dSsFcP|;TcBZ;-u_B#_+#G-GU}L4ux6N_93R}!;5|!IB>R0*X@HEr9 z{7ZT1?xHgS-e+N))JMdlmux)ssjO1cpY~azy8kTDPtoc=_m3Sc|NZ^{eeGRxLrA+W z(?A$avu${}qoKqF0j<@x-lX%j^0cn-=e6{gN_-DrhF-f!PNQ0w90;p6F@7)(~`>Y>yTo)$y1fjK#OKi@D=p>*|i&$246B+h+& z;bljN2=E;b1e2qQYUHm<7cp|mpCaLtzccO?c}C={m=0Fl8F6Lf5OYmPRNL)n(}i(o z`6h)pj4Z3Vg08bI+UA~yN@=}<)udc|T|uv80VoYb`4Yg9@Rc-x{va%34SYdd(b;$< zc&!K^$w7fXAGO?B>w($@(5?Z?w<$LMA228U{{w!kQ}27IFrSN*6BxgJC%r5E)1ha+ z=ut(P;eWzveifztJ@^Tq#8~g%c#62LXx~fT&AZEl0Zsv<+7jDJ~BX)60IkgHKFJJai|3Gy6@9V0TU^nDH0<4t5Wm)YGgT!IyrhG363O z`||UIL065a@RFs?8`}UcAyCRIBx{W%OhNW19DmALfes?k*DYh}M0;P|)7_7(g>xVb zZ~RvxB=W{h3Ac}p(U`t*Fqr~1P76cS!-<{6lr79V`uf)x@5pFImG-&BRNoCZr9h6I(gp{X^|Zd>k+mG12wt|q^O&yf%Mdi9_!_t_rx!tP0WsA^bA{E-mmPY_W-yj(48kDu z9;Kfd^f~MCIvRR=s2kjBWAiGm2Lg!Ey@g*PXFa~zS*|```;d$o+;18q-w1Vw^h4X) z_xZqE?1^|4;isK7wVJf;!|^Hw*rGv^r+CkAZ=C!3uey9f(S3_#b8cKZaSdt!XfVR) z$(K`b`dt^OsGX14?Ah#=(cn^n(0SRGM|9kB#OPqI`3%N5ju~Z8~a%- zp)4XFe&1}f14M#X!zXb}vnM+FM#c*}Cd$)2Nqu+nO@^FE)R|zrlEHV#aGSkKHk4_# z7S+45Z3#cGrLiB^1;Z{LVxxKuS6@%Acr+M?h>WJ$kPb?8#x8}*p_mLM*GFSk$>hvY zJUF8Dax+JUy6P84WkN$6aSwtUDXA&o#Jhz-zns<%nf4e`8ZIfYDu?H*}&ec)6ck?+3(SRohpn(F#cp1rVnf_0_Nm>QD>rWE&_RiEL0)TEvhrN76tc zH+Q)=!J=J~dBC*dO=pirPj}=K4#WGc!o4Y}kF3U>mtNSi?dHGh61uY#$`MRhUw)~I zqZ@CPwM6<=-tXYdE=K%{B6vR}7p6c*r%#%&FUKmZhu`OBfe(yLwo5XQ%rnOKQnS7V z-ZpJllW2@I)}GG`yI)1tzOEsApJ-)Ny1gs#jW#~SV6~5x$|Wqrm3_zeSF>|!1_NS# zKhfb@?F_%+-5o~er?8TzcdnmmnXsBc$dRhS@J9~somhycP*df(NrM@8FzS>jDaeMt zXa7&Nj$fKSXOV#NePwU}#Y`dkx)LcSS${-VL9?*pR}k9wazdH$ksf-mpgQ|DVe7}W z5qR>t;sQB}FwKTid*T6X=147{#Y_hZcM>9r$%W3g)3wVPO0I#KA@3I^bsDf_Xp=MW z*ntbD`ftXe93Gd|P|Gdz@}X*qADt-rO}!+I`O>(woHuj0$7rwOwRt$3LvyB~@2$47 z*~FEn^qopBg`1o=&x7K52Ax-K_loxT8025-;qFX2msJ#D*b-b($5v|20yYWOO9Rcy z6t)U_xXLhg(i;Sn$BOOe6|`%Cef;A4iZzxE43ibvOltM9r@E!jm|jbCFchg0xi~E% z-U!~nsw)sWNY?B~cuO+uE)RTFm*e4SWVKJ>R9#`@uF9)eZJ9!GjPtt1EZ%x_st}Jx zz*^-|n)esG18Cq;I+GP0!rCG;04#U;*h%eR_QWGXu!PF z1K5V?qP828K&wbi4OpOX#+#Z*}Z}hS+O#>X)4iXG`Bgzn=R8O zQB_O0<#oYI-?#-6Mj{A##jx6~K#vFq#+mA2cuztP6-@rLtT@cz#wYY+ab$D&z9E|H z#SE)*Bj#aim6}zey>rE*216p&JzbG9#RapRPML#DSO&?6??P6y!WS2I zQWtf5(umQP_t3?Bf?MqNd1A=v3;d+HtJ_uZ>S@06-~2L=180GJ^)Km<$e9xQ@j)Qc z;Ig|HgYecDI5 zhW`iShOvL*1t|Vgf+`HAUk%0HgmG6`7yRQo`d@JXf^mNV{?Qlm9E2@E9aeaVRb2Bs zB*ms62e)>&4YIFTdNvXtP{nu95ezVCwg_j&H1VoZG+BsrsK;~W)leU29?eE*nJO=lU(2N2^_H84P5a4eh; z@_lz9W=YYmVWM`2Q(EYJ8aZL_+Zt)0iLu()jEvadBW0=o5^FCVDw19ALq&s?PHN<{3ft z2jxZ@-ODz6|K{5A!(*1;hkCl((52<4gEMmZD8bGc5Q^xd4SE_kV&_Skcc|T}WwNkC z9bAPM)2~GRW|VEi@&o&JjP|^g&%-#&GgPM}45qmPU@`^gY%PNgl#-vk~0@7+D;+Y));rl3N|A)28ydvHi6@34 z;eAZPDr2um=n0(KSSg_T6{8LUktkETUshp6@k`%)p7O}GkAD+TajRQs?dlA?t>_RJ z-uw!ufDVM65TN^0z}2hT1Yy@R^qDoE;s!xm_j`hC8p+1ncTT6Nu1aZn6_l|KmkQX@ zcEsIlb}a9qPIYTWkK0OD(UMPfJ&m8wgP4lQ2x==c+BsIZsKq22+#yrhl@lhz8vMJ- zdAn_rJ?&BAbk@2x$I)|%)MwU5jX(6<(|WLaaL_M6L0Z;VP`hw{mj`2<9vy-#DXW~tfKSRG^*iz~^9SmIoarFq5M zXT@&q_tctx_v&QuZXIY;-#q=lWHnas^wyf--L7Yp)#* zhU>g-ILO&HuuOSx^x?M7R=Qvp>q1Qj!xOY*h@A14CHt3z;o5Bob%l8+*B{gP1MxS% z;J&>GQo&=xBU(7o*$2PeRL#N(pjC2#f_|rvy$rm`AIbl?|9Ba^s>xAt1iwMNC0ebL z`&fUp)bml>eHbreOA9yfa6kSJd)mL&q5ps0+bYuo_^!uB(C$dXkXAwD2))&_xTJB{fxi)z0ww_tH+9whpmuJ*8<1!p#Qp6lHmv-?$aB0A9@GtW`|-WF`iplUUHI1cUKLlmV3%G zVPJcc=b>|Zukf*TQpXnu^LbVc9c9Ss{w)lx5mpoJ*PV0kgv8IOTuCL2m@N!r{>7TV zTZb7b85415BU)&i>ZYNWd?E}Lzzl&M_p*|=3L}@FN+FwPPMj5WvZbC-a5Q-BfW8s( zbtnnrb7XoxYT+gLw^_B&0P2pCA%Mof^ulFL)fx)3Rk|RcdMJ=Du>ga*q zvdG3)>uAg_87R9B;JX6Q#Xoca!5z@Ez~wn9*3m{O5G~Em6-9mdNpn%&Hpmx@*-HJD zk)k3(HyBtTffvm}B=0_ndw`+s9r4!3gs{@!BrU(p%{c~WhZokQ4m%CW4W!2VRG;Y3 zlSiDgEQay`BUjUdGMdoZ2ysZOMPsP=ykoXarw#&N9LqKk*BntP|n}4t}y39 z8j+#llMM!xxqN&^eF>9C3xTEmx0_%%0w#ltVv~BoEO1UJXsVpgastOt|EL#o|B1Wc z!b{Qf;Rz8Y%HAn3batYatn{yG5B{IK)?Yi)q_IZ?(!kb67QZFOvMzua{aJgT{Z9!m960Tn}HBmPrQY=27^;t z(Yba37wga!I(l3cAFSXTZI?6YJJFBP+k~x3r|wvkE3l^5FR{~N`FUoGjS0DVrG~?#B9{_DufKFiZPS0@FC6vFgKJC%I_}fzTonOa2Rrcz37s^Wmz#;I{DNZ>P{7PIyI zXIj~Oj*Sx(>x-9<%79gO^KU?G2I5a zhn-X!zk&KEoN!j4RePo)6`bC!DFvyrj);O#1Re*5ppPVXePueynQKF@ynlC`H2%q z+nH(@Pvl`0evy%F@fRS?|99kZ>J6Z*G_vxA& zoYJU2#fLNMAnp(*?1}yTj{Ya`?tgV~hGVDIwWs4R+VLNP?#a9fvuCbtlK=ZM2Ms_2 zF&UkKcjYYC5CnVZRe%>zI`Fo(w?2}EDupK0=*P%IkqSFAY6E%5Z{X1~$%8vweS?cZ zlGY5nq7><{CO>*l4^YOnlj*X*6Oa^Ii8VqL>Xa^o!f_c)*<}9wk4-fI&Bp)vP#v#E zDezCp-9mTuSV&A*h50R$sI^Y0AdYO<`FW1c-^tuk7cm|GJ2T3tn`)<6|ebL*Iv!!7A3 z-b-Erwk^l%jgpH*AVk!JfHBjK2yBVu+-(q)QQLK zzvcd|EhwU%QBvrAv>99$;~|-}Gq$AkcEFNBL*o1gEeS_Lh1B-e7YDG?lzpWjwj9-R zNPuGpT)i~!(AhOl)cVwcj4G0&UktFE+$VVR*7rmE<>WOfc2*Y~gRbnnmYJ~(Ub!@z zv~0}=y&k%w&NgtHIVU&{CD<9NOV4z_@59I&-6Dl>oFx=s;095?0W<#MQNJT0mRi*|R zg(UyLJ1>Z13hIJ3U#>sTD)>r7z~u4pgUlePC(tf1Qf9R0@FUQP9))D|Gu2cw(kM!2 z zyb`9siuL#Z24x<1_B^EQAFc|s`^a+C&rAZya$6K=VfY~g}0!e7QZw1v3-|iSyhJC zpq)^uFolP^IW)gKGe9IB=(}xxg*#i*oCfKPwpsectDxYGXCkj&Z>-(<83<^AMg_28 zm(j2;BvIu4+h2zV6%}icGMZ-}_*)C9g7^|8)R=I6>4eDf6aIh7LZFjnI6KUvtjkwF z!T1kqtDGtI&_jE=OV#S9J&nc!dc5D`RX35;YrWcB*`F^=g6dxX+ghdi2OheQiDKOf=&76n6LBoUA z1IgxAaEh(9faaQovDea~mfSF}V0t4y1?o)Kbw$O{fqn5S<6nCdkm#LDuATKEsBuaF zZCev;R5iJl+P(lUDW2o6eqJhOpi`5Z1Fg`wj}U(PaqJFZlHH7+VMxG;B30cgz;z%z z(T;#~)YsNdiA%N_TUHsWQJI=++}yt}7b{W~{Jt;qgvqdFjLdlMU-B`1ehr=K39-F$ zQ{Cj(wbj8IQIX)#Snf|PRj2v{>F1`WR#&C9ISWs>yA6cxiyya)Ezmy%yHX!ikuPx_vA4a9Wo$-FR0H|;+ zoHD%BNC`eY9Ew53LBB%b0_Ixb(Uder*XGEQFYn`k`_vkTk#?l=7e~FQQe@^~}^!+a( zTH{x1K$Z5(@Lvxd2)~E*=w=h(A{}_aFmv@9l7Ic!Q1KKffGU|ZLe+HV|7I8ZPiKR_ zp<6fqcYc??%riB!^O(BevM$DF44~{!*ZeD-{N--WKlwvSh9fp8h;ZbjIIOn)UkfHD;%`&fcsU)*gm# zwQUiUq+N)?B_X<{&{VnEEv}CnSU~dFX@%}875;Eh)4C8UmaU43S^d27=EA5cY)0ZO zX$@C@OyhY*H0~GmB?yck@MHDJYn2tT2T#LgCy9L=PeIsMGC;O<014`x`B!V#xd4SK zbKxp=(UZ(#jBcZ9V!wOvVZ#yH`T>6dyMwM;3_iD&_GjoTw02M6Jdx()wFpEJNz;bn zXBa#p@eh24p`IM2?v~`T#r`mUnJW8av5kahsa+&{?=9f-dxK}q>^(i)01!-36QxSW6<8WTyGKlu!{FqoXp}6UbkdQ zwWD`~#G$iO@xo$bhR5(c8<8@fTQmTQIo-wG;ar~+cr-O98jU$w42>I=_Pjh+c*eh7)Fe_p* zy4gI`1no~Vb!hMZaTheI=qS(o2XE~U9t=(l1#;<%->zghc?Yo(^Zl1%@up($x)YOs zN~%=9PNanB^GV`cJh-$Cm{cIg&Z1NJ@z?z;@=F|2(#y+dN=rxVPiIoPIikqEJ3LKE zSMi_24}GelqIOfbMU(Y=LZXiP2{n3kUl4m1^U{pvBuvN!^O#|OzVqFo#IoB z_dT$j=1besBVGCRxTPSMl@f4Ttmz^JZ;2CRF#;kUgf%%ENGI&X4nSc z5|cgN7QNT$`AVYAj&Y3I!%b&p>~8RWcZsfQWg zBewP;ZtPXi`A%(}%&A}E`7=Ner+!rvz=q>Nug3n4BgMM#b7?iV`e~$Tn6%f|^Xc|z z06|t$>_aUK-+ATzdnF(3(7M3z><3rh;Z%Ey#85`yd1(kKh`k1Z@@@FE%K5xz2~9)e z4(XEK=heJdbMYfDQUJth2matefdyRU4sanR{AGgLm~pY%PpYxumy*|$*`!#n70H=a z@Y*WtqB&PWrJ4A2Px(l*tc$AE;|HhA{r5y#2zB*M5Nf4ss|%FZT9a!;S6wf1V0Qu)^?K$U=1W zO~ZF19yv7#Tvqr#AmIjP(7q1hSHf_!Vj;AhqH*Ubb=p^4Qzf+ivk8MsOE}f5GPopU zvSUFt>bEulgk*hW^wbRXEN!iIs}Sp;}Mb7;-@C%Hv~NQL3o8~F4n*?3@2RN7Z^c= z)Q{OgD|Y)o+cUAKv7^Wa`K2T}un_5Y5cmw25t`i|4^FmH9%1ipP^_EX=U-c8f`Bwz z8?b@sVa@j!btkqIKK>GP9Ng}S7E(u|$@;d=1$o~k;HWW0JH-f@(O?$^2?!Tbp(gNK zPPj?ik5cNHVo%QLzEipHtqC_chDW}uy0Fr>i}=mv1atxP(rXUK0AUPm*fpPHYHAWT zU$p^~0&8dxdGUQ-T`uAIgJa_mw<-_bs1MZdWl)%fpF_3p+ERF#qgF zRtX^bdWkU+EBc?Hg+O~SY#(j7PSb|DL-MAzwJMgoY%M-M&eQ#27d`a#r>B>5H;MJx z?x_`CWwjCm+0x=#+{ac{jsVS(qEymZhaGS4jHsZ)NE^!&>d?zqP=V=dAwYeN9N;f} z-#xl5@Jm%VI=7uHD8l$GMzvd7 zC${{I4Yrt7W+W0=9+uOm)8mVj;BWIPSb+NF*fFlBQsZ*uKrj9?tTA(CoJXnsFINxS zait}5aCfdqwa|?h!}t`BW_0Jethr{d`Q{50ziUT{-$IjBcD?#GWC`^E*)H|7>Q+Ak z+RUFC7*kLsgPQcX-!grOVcEUv-2p_Fu0?T=vGSG}4P$HzGJ~9E$zI6O*E1$&qyECV zIm(iQLJiiblA_ibS-9NTWTk}4c@5OhD3RN};Q6OOrvU?Eat(7bw&bu=;`q4F@livg zFk>2x)Eh=z5Q4>6N$@2tn_t|AKAcg`-MEWSWq~K5pkvl92G`g+FqVsH29B?hW_LZ? z?Ee%bwB_S5-Q+5-$2DvS@DkX1=_sPdv@XwNTv;J1hwC{(p5%;5moLC8P}K=%`23?x zs1j*?>Z_jc#2Jue>hL?QXO4qIQ#GG1^l^0nT z0!G}olFc$7#NW@C#%G$1#&-3xR@)Y%O<$qUI#|P#9JBct#A4)oW-><EHl3%#~dPQL+oi&q-qMzL11m*^6oD`2{pxisJlc<+ud9 zHFBa>9)3hJE@ml7U zgX0i^1Vda>r}#+uRdd=+pieG6)P`3|caF)b;dkZYWS0Uf+T((y4v?N0MTWMrS|)W( zm_n^m|U*dZ&Y@dcZ``YZj%m#W={`tzY@XNEH! z6jvNZx@aMqY4oaCu4j$n+va-e%V}B z)%R9_dDLv5Gs9li*p#phA64AB?A;5l3G4tzFc3y(vnRF77{qYEeo*f9wP34$#|=D4oX%O!`jg- zkS^xuN>0c3+Hj(5NyF7@^9F?d-B)I7D?Oo?cST`XqSNJI64%HaGN4u*IU^HWj85sP zP^{(yt!!SdO4=o@5kAyyUA60q))n|1v4&n`n#jL`hnhuxmagyl5qRPO#k^bu$=LfZO z2R5eCo6iuuM~@kYyl>Hx4yrB41Jcr%sBQ#4RJ#>s(vbD%@qK7#mvlT~ zh4pC;Fj2yoR>;l1^`7ippY&jpvN$|e7bBX+M}`ygpESrv2Z2#U}pVgiB54#?=~ zTkJS`v^^C;B`1i9>tQI4!YSHclyBDUPbKa&?BaO^+*zKL8*gUO2Jl$q@KaEHU zT%cS#{CF*cy9;x+0uwsrEWR{Rr|fZ)GjK8RRO?@w;HctRkbXnfz+cOx%^I+*a0`&> z@K@?sy(<>uc-a@gS(TyQFn-MC}my(fSvvQq!jm@eO*OeXe#or@3d81IeeMBbT0+N$yGNg<@~ z^zBuyAaLckZNmJY_jUVrEjygKS)b)O>8TfeNcccwv^>SaY)WPkQ#22$IW@fcgpnbdJvJ z<=QCbxl{s9nXXa4+gM0kBW4g}*DR);V8hn>HYa`y z>|8Z4l+Ud&*B*fUH)7VK*GVS$zdn;zj!B~g44-kXr z+3!`l+Rp+=O}PPC1XNdNcme>`uW7tiz75l6U>>pJiAJU{DJ(oNVE8J%4PpmQF+kCH$y z<^qe)W-NSv4dH^rhDHfieb zUflB;YS6^_Zp%41e&~mDP)urq&u8ygzAoCFzY|M^xRXRpjSYqPRbEm_xiLrA=?OLLtePRx{sX1k>%1<#p zFKZV=1z5Z1eA=0ARP{8jK2VFL5tpE!cx*@$*y;@K2EX|S;?ssHFefKL)JVlRQ|5`P z2_KdC&#pj)!!9oO$>_ZqXueof2rArm#EyycsO|h7BPxcxV{#*dbMS~>W`p? zpm&Ey_~1L@CjjfP9*L|HXA<8x8&SlyVBVm)P%5agRRJ&!#U5YMYbTSglaG_*HJf~q z=59O^9R)vZPz>u6dtoB$>A4^&| zxF8t^+7wGrRMP9^P?hS^GElA4)}x-ttGMk%;#B#wapy`aHup^C6#d0V1n|Z z$dG~vF2uUYst#qu0uF-NkkdAAZ84$I#UIRc!QJWV6R(_X>7-s&I{$vqWIhxV(a&VKjnLI*Xc_35oLydK1lqZt;ys+LBP*)yP7)a!j-8(8Z8`im) zuM=>u1@jYP6Q}0xlvdXV#cCwpT|n`L}_?A?@Y@ok#MN z2H97%IB0DbsAgJB$feoH;=fANA-9ZGCI_~Umf)QQya0u3Em{?uoIsT#l^ajie>dNr zp1Msw)&2XuE9rXbp}doeOZd&fnt|b{RlS*^6YV7zeS&r`JRx7}y!7vwbO@&r!KD55 z!!Y;2zTU=1rFk8WW)ULMt$FL6jUzM`Q@A1C~01>J!$4%D?$H_ za`lzx&=r`4RYqiA<+r~@)y%{?qezDw)%*NbDq>NrZlsBI7fQax0Q_td&

    1TiyQU zYVZnagm|A+UFr^|RaT=)LLZ(T^(=qaf4ESkPzo*m7) zX(I4JT1sgR))r&a5>3;vUWTZOJ;liksQpijUa*Vg}c@SE!RHB~k{N1<}(#V{7Bup@9D} z`Rk-9ljB=a|F&T6eP_Lday`em7eZ1r0qZy0Swg_n%XI4|GMzVJXVk=sl3p2^XSG>( z-Sy3LpCqbW*akPJsnLdayo?A<&{tb!JkN)SJxBG4sN$0ps_pdTC^Xp?87qHGkhU*nDz05X!A*sD01Q6i-7n(C$H#x$3pg z#iG7FIh<$Ov#hAHK&rC0KarwJc$-DzSZ=9n)cxk%fJZ^3P>bn%1-yU%fnp${cxW{5 ziw>IJjTT$XL|5w{J^o@_Y<|N@_fKv|`Y>ZU^766dye;`Ks!a|af zwXvsVioV=IaxPr1JuU1XNbbI2&BV#uA2p{r1)Z2AE7Sc5SQV0vbl`$6=}7;wV&{66 z(NJe&y=G-&-(k|MCaWp~(z7YwcNG0j65dR7$C>``D;Azj9sbcUG3{3BNQI&DZHHzY z(pJ@+Qk9JOG@=DjwUuN)IhCC(FzCLgowVg>#-1->h1{|lKjD+a${p#D`6$F zd2^hhcyRc%L2k<1>nXz@J}|WfTLh8sP>ko1f`s`KBI+CA5150ZUJ9!$cNJLbuqJ;I z(ae@}$`HA0@XYs73zpmRezdfv9-NgTeV&I>My#2f7gBhy)NEAqxGA}<3K5tV&s%Ri zfjI^=*(XF@z;B>l4CNIIKM!FgYwoRGY7Zfp+ghv-bjz5?^t_gCqvnRf~dELQ^9;dXxt;{p5#hqtW=_9 zlYNwQU~rk1V>w*XHQCb0p+{z6f?5_E{T~U}i9tZ&E(k-(5=>4KDB_#JQdO_qzgwB7 z9roj}>cWNcTSomZfGg(fduiSv%Sq8^|Ka)Z}}o;H~StWwu0 z{NpbuQ$unrij7o`D_G(MdR6$}I@b|dAUXVF(E3;k{8S}}FFP2ZB7=|jT zliKy;g=C|iVq2IU^VF+|kFH8kg)FV#@3(ay>UBzHybHf8TgJpjMn)6AipY{Q2!VZU z2$8w1CKBO;Zjr1{-Uqi*O_doojXLW=eJIA;)ay}Y;P7jB*FO*!z&cLuTuXwAG}&4W+a&e0`{N@%1G-~t#e)g(Li8&pckl; zZfH)p6G5Z|-}H1=mS@-Hn)_VSU9lv?huhAu6NPt6o)s-aLVv*chij**GH5dME_h64 zh{`~^vWW}qS;HE4n)^zqT1~_&yUOK1`S0cWElrI<+Q{GBfozO45|<4;cW2ZJ7+@!0 zK22#rsI7ZmG!Ut2QE}Q7a7VA8y35I>Sju5bSF)yApKoBa&>iGGI+hj^zp*d?=cDMH zr$MgS?OA;SbCa$On9m44pFX^#s_oLCK)3JP*TlRM`s?n?+dS6@n6Idu`({~D`Vgc_T+%=OC{1L)sF| zlSBL+^%6ZluNn*89ALWbV;T^6nsv))*wJ~lvLTci)VYgdaI;fC|ETRe7ThDEK`^f= z-dp9GKf2xWH}UGv{gVa7#on~CksHuF>oLU+J=;e-lCjr;*OMyMq-WzF=sXPQ8Ygqw z1&Kn9X}>!QxaGt7j`h5K(8)KxY#GWUF->T zo5!o{pK8QMehVz~DfzVppz!nCL?#mO7J=#kVtgNhQyw*S)7nJ;QM0viuFEq- z0ORv4Dvxc&&AYUqXYUDrf#kYmtVJ(0u0xq*0sFXX&I@5mAITwZ-3{O52nu>Ur&^FB z`*SpTn@{M;{V%!cE|`U%)txeAg@^($N&~4r=SYTR2(k(+MRX6iT+Q|}`a}2FC047w zUEut3^)M^x#r%EOuPITF3z_V-?avgC^=kV~gqmWv9|zsNsaeJmj} zeX^PwWxvqONG&qVw!6ow0QUg?0Yj zTRZdKad}gJN`VFp`icB}G5;g1>wK$SZ2~_u@I>Z&y9xf`^d8Z%l_cyMFLQ6>?M!8^ zkk;P{BZ85>P{Xinbcyn@0)?}G#)j~@6nB&$A_ejM{w*nWLs06Wuh@*|Cyvq;!<7-? zI_>hz>y_dU-Ak?>9Y__E25ipjAp*bbXy)i+%Xc9Ht&O(EBfqW2S52GZvmI>0wkrfF z9eQA)q)!Sl9fG7(@(sj9r=9W?RRWRTXT=bsOb!vWnQ7N~brJ_C`?iV_IO`P?ZAv>G zouJuoGphAS@&7y%i!bbbyfAYW=bqk(|c zUXe?ramY1{Jz*<8=SOrpyFwq08 z=S|R*)^u1`*e4VrqDJq_i;OLdFZoUty!~LwRq7%CL)y~#V->1r09?C}OtJU~NK*8> zf?4_3)>Z(}P!o%osrb`oPM$Fl*AY8aSHp5`Uha!4rLS~ys0Tcg6ymK04kFekqM%nO zmgtQU&%o|VVj)+pay{erE`4ySm(YI;SNVv&`$}LJaZnO^AYTA@vQkK0QZO!05T_bL zu>dX=m-|=OocRKrU{gD@7r)I)=01;^M{4@HO!UNmXlMIvzC5WW%?@xx37z{*I3IFN$vzD$7yvq7vcl~CareTS`pHopjVDdytKUPrm~>QK$hMTcxEXcHz^#;aqE!%tE2Sg1KJ;6 zsQFceDNXeHL^w(bKU)pSDkm*JXlY2DDs)ba2sOG%D{@ERjF$+!{L*2Bqw@%he<1z7 zdlmuuai~djeOAn$i8pJ^>v;dI>S)S6g4gQMj9FWX4!p>Eg1c@)F>rX`>&lna-hGo% zrB3$seDzyM`bjye6h-0u<3y5w<+Wg!x!c0Vc2QJebAE~EKiI7+dlOTeIj;OQsU2r!4kUHJ*M)fvTIcE?@VFnSxVvA^2ng+a8zg80@=mg*m8!@+ zieIbgZ?bnPp-7U{SrasEzYGZloUu)t&!yC{Y{SBhfaUiMAs4~8s^?b=8>4qEA}EHi zO)#Tthx}JT&xUVz79L0XwK4j03>T{LcY3_4PQkbp2vc-s+kMv2OEVkJWk~IE#Kz}l z%p5MjI@g%##pvqe2TEx*!@cD)VA&fl_)>`sZ~pw4)>J5h!9Fkl>YndkRoLItV}(0E zQKrB2PMY3eVz{!wm)`r1MXz>4WHNB+k(Q)j@}pna2Us9o;&$_lGRZS2UqdGKYV5^7 z(&d^fX^mEUos=;4x0B*0=n*QdY1v z*N3leJhpsYi;?vK!U9S*p)n9H>I9gHqOm?w?y1^qr!rGtDL?bHJw=y3?@~jkiSKx4 z+5T@fFE2JlRKPlE;=qh-PO{$A?~-%rE?}%bc>1*NTK^t&wxq#5*z*on<4c0U{=jyo zb%E4fn*_&jAGvsRowUf_t`ROl2cN0o@xb zf5)dS8zUMrzPit$-PYo%%IzduJbd6Eh#7wPr~HU>H8W5#=~F4$*FKRzy=bz`k@R8HJ+Cd$`I+ z9&(T!N4KIV=by(S8)n8m94}5?{zX+pGTa&1e~aS8qKkL)c_uvy9%d^3HbfV87x5j_GjE6}&Rl#r zhg+*@EUrTOlTvXCSk=CijXwwCCiwguzQc=%4@uUiyfd%f`5)5mLq5n@=YB2VH0>+k zr!GAKTC%8hPN-JlK(X)pz$E?bfn-*8T&%jot+l zMXRYvap|}cZoM%9-i_9q_3+>O z+_ro>O@}w9E!S0Iyn_GPoqf?d`aUn}6qLIG!-w>(JI}veE!2gdxi?es^eN#=$4R+^ zpH!;ZW<$6IqEpiL%H(?E_Vue;l~bri?PGI*yG<4m1^6+!(#HVvQll$}0T9{gLV&FR zJ{*DBSc|RyRe$j4JkKjBJNUA`z=Nrq11pgiYwS~|LB1eo<;q+s*L<}sVlZi7{hN3< zn%4q{4sVCO&e1wgJgnv^3H)q&J60%Addh&z#>*fo{q;%(BX(pOu}tMTNk)781F`?# z1OR0yi>oimYaT~hwlP|BY<%o?Nc7=l@9~o;7XJ-gq@$s67HtL~bJ~j(K~U&f6(BL- zy#{(;*!207xu-1xs0Q{IL0pD0@V5NK)zH3Yr}pop3l$xjrYJxalVT$boX0#u|N5>1 ztT@HvqaOeiAVumFhP7P=ob-2s7omd_Us*DNRj)Lxe6`W3=hbs)cz9aqIav1G%Y1^q zSvy&qLB0y(4p0qtX!@x)Vn|(oH5L!kDDjCxFD*6m)CuI6=H+^qE!?*rBGBWdC!W7Y z@2i)gf0=WSQ5OVe*O)sX09-RuzO#^J$3NM3(D>00#-25J3t6Cgm(jB6h6EgYcZe%Y zwGeWQhew0EA-pq4oevwyu1INlM6u=dFG_fsF0xzX+b~ zc|&%frKr7dqmx_nmGbE(*;T(}k>WuL#3{rR|A+g=P0yQs3m-L@y!Pe6@`Em*2V>^r zXCAEL4V1Qg&)e=hgLsRi0feA?VbRymUTnY1dt zRVN=P9r`R(e;mxAse`xf1vbDPTy{u0ywQ$`NCN?t$Fdf~YpNm@{OW}5Z6m<8PWkW4 z=#L~Og@@EFpsuyq;&~5R))n|&?`X}pV@=<*?~O2twc@PlbDtz_6`F3kehNsnXnCAQ zZ=lyWi|(twNU=Ik^8l2+iTUgRbjQlK)5x20ulg7DYI_=@>Qjro$X|)ts^^ikrLSc9 z6uTHcpI2%66O9dezEK6GTQewKF`3XV|<|rIfkp=Eo)J?%J#ibDdr5$E@sx zJ!0$>+AW#oIfaJAWdFqtTM`SGgFb3*X>xZD7PN-(ymY#L{qk<{vf%yFE@x5m5EDTSOTrs~o@k(m;FipY9aRZBCt#i<#!w-Okc3+*hg^Y5~PV8o{3(t^e%$ zcHMFgX^%V4v0j{T_QVbecfL0=6!!*w>{<-XbiUy2g_=Y$1FXGE=ebB$8S$}?q(YtA zyv-r~e90K2k(4U}CcTcYZg)W+I?NokYvZ(+bT%duAj*ik0l-y*#RK-W+q1zkcmExRRU*_FQ3^B*_*xwjB)Yuw7^#qWd(K;nfSi7j+E{R_jx5Jhtw7B$Z3*lna654BOGs#m?91O zP1#eAbAi1$lu&4Vo(Tx~r&q^+nh*wh#Dj=Gl7*+_EJCqM# z_U5bDap9*cT6=jHS`**Mcl~#aT^qF$9SEJ^_kWP0nhM;{Kc1|_e;~YG7M?163p(Zh zmK1C6+W6!bi;F0%fiL~V>4cPEGhGC$;yqCr_CXn~px}1}4UrSQZ3T<1GgO}Q_bMpa zc=MVliuDjz>H~+gyXgvPmKRc0G9QNus)1gZt%*=Bljce&GPBfM-zhc(WlZAjZ`W)C zOzwq+iM0z5QA93LzMjC)dmz20L!hIUJ7}Ky z*A9Nds$`l<*%*v>>xj&In+`vmzChnGM*=Rggkb+BwOAO?Mi@AKP;dv|L@WcYy9`kP zXK?Kb(mq_xx({+ANm!cU1`pp~dQRB|5zFC&$8hnof~7 z)z^&?x3Qxt@h69Ag7J&Rv064o#Tj}`+?Um_)-G(i>|1I_F6^T(iM0@V`bD8kpOZA_ zXBF#vuU*p5lo(v$8mdQ^e?OjlV!lW$0G1r}B}s5?2Tx8QTs{TX+7#p5^mSIgQt$7K z?{6JH+2Al~X8ft^LrY{=L^F-e|7SV)UK{`L3$AEyn~m!3c5zdZuTYtsy# zS+$NOL@4pi%BT-vi_6!c@j1!+%LE=R0rZm2y}3%r4U;d{LoQdMVLmS#J5_8(v)T0P z>Uu&;GGByF-YDNkt$g&2bG%?pKZc-d$+j&vX)$u7QDUi5~ ze+%@?7$*~&?t@CVe2J`w1mzlm%m(#VOtVt_Qgp8T1eWpteJ$9P- zSJO5yfs!~+miDyX$(Cct5>0r5lA?r%79q7SB$f;;AcrpeZ;$n|qw*i9sP>T=*5EV( zH*lbfOB5u1Acl9H2cwd9lkI@F;zI%t5Am`h|3jG_#mZs8M-l|Aii< z#Fk_*X@;zYd`2<^SpOY2@JD?LY={qNrd@ z`rBd4qaY^#20`HsdOd&j;v7Y)9O~M%G;@_~Uf?HLLAB04QOM?$@l@mI@9OgeM!VW} zVS@YmL<~h7A8$ObYzuul?c!-*>}nrvG?U8oeZ)r|_x`_%GOm=-9tJgY97+kG(x|rs zAcyU3%ug+Gvk--qGX~-`DGN7oNxNs_($4DDHUK?sxApr6-Lr z3J*Q0xbGbh4eaZpw3(4EA4v=M2`*jWglWV{SF-#BI}A&$IWHw=i7U9_^`&M1fr3J> zinrXa%aCRGM4d)+dq4!AyP8w){BHxuhz?bDR&=)@n{VCjxBVz$tsgnpLqA-Ts!q$A z>#EKC+=?PWa5 zT_GAJPca~{k^X@Q?$2j$2g~V6qOTxSy3brI~>adVENgaaaPyag4C`U4$X>C;*8&)`xuC3AbzG_0O(`? zj=66(4g80fPdkxCY84{T*0zdbb> zJZ4iy0sFHj>kQLd+E7{~hjvmv{(iTM*4QI5Zfh;b+`b{@^MWXd|MvXP#kHEI-fE7$ z*0o;k$kvm%fusrwe~9OKDvncW<=a-?)H~$wD731GRZ8E=);ljzIYiU`&wrq6I)rj+ z8>#6Z$i&BdZC~Dus)TgTQD8-PU#Fdh`Q7{Uu2;?hE@RWKsOjFVmQd=y$!yU1OHte9 zCVC?VrHo!c^ZWztR7vZ0V8lPylBUX$tmhFFRvar!cb$7{ywQ`YM~KR?Nl)8dh}D5jg*X>d1k4TlCPjHaxp>4oep|V$f7I~77T4Ds zh*5h)mG)zj8Ll7>{nfOGMB(JmwZ+fqR$;A6&PI~!w5BzE!qr1+kIZ&QKk7@+V0R1f zk4!J@+Fi^U_;0EqlN1@~Ht$0U@f51JO0VkH?8=)e$`-w_eUopIah*xK*0%k;rok0> z@sl9R11?@2(k(62F7_9>PP08XZ$)bEA3quw*L3+Ik>r*Dt}Pj8#CsFckWPgA6y3Dt zgU_?8gSQKPEk=1FnL;@OhnkaR{f}$aQ{h=O2!q zdIFm1t8Cl6s?N@@E-vC~ zr{bLa*!q;!lfY7aAcup8TKT`>#cD@DL?4(7`#o-L7Sa7y;WTe}kbmir-6{LNDn8~! zh2P*QlneO`rtJb4#CH)`H>wXV3zk zCl{)9&&xiO>Tt4#GbVRNEjrYv>cws#Jeam6LZ%hzUJi!*bQ>QT#Ihil z$#+RjL{|d1+qez9wAp!g3?^BG{rv6xJ#o{xN$FFCr;mywtRyumO6~m z4B;F=NzePMWcKf}SUj8kk^ZRRxBP*a>w2DwrcVZgs7RpdOLs7F7~kK9gD(l{+0qXO zpWa@Wf|tats$}Yh$zfGO0Brm7-RGN;Lz)-eN(R7*fXtsj=0hCyL6}u9YC!jwtp~kc zDr)j-d(@?-w|>1aFj^FGH*;imj?_me89=d+IPv8@D28T*O?a$%j$Q`r%Xjs_D8qw; z-X^+t>M$$A6fchy8;k@9aRjDLK=~mo2#yonLPW<+MDB9}ILb5EeAd(7I{Vs8^~VMg zr|whvqt1j|bm@TGm-NF8_f3~jq(eD{nFI_iotbiFyBOw!+n!GKq~F^UZgQ0pStFPE zYRsFGeZ$gnV`viu&;XR_SwM??M1p;x+#x+K+Ouj(px8UfC-S`WWQMyfe6i2YziX5( zaLaQk^*xWY(p_cL8}0h)0aI>7A<>i+OmOdPYHFn~sUn5v$Sx@?nCjOqsmDyPOgR-vF z?u`^L3197cz;PMY{ z9>dXW0Gq*fR{PJ>r`!kb!lqpl{NI5D0<#Tm!QD%-L6yKOKTw7b8y=Vt)TZyW_MX*< zR6@OcYdxrn%AXuV(<{)fxH#rilE2to#$)gpTr`;6jbR?rRKlQamHS)6R%SHaNIXw{tcsr3NgMxG9z(WF3Zq>3Lz!L_7Y%?SKncI8 z{c?f-K!*E!e;9}!iIM=x4Xi6VjESoMYro5UnT003HUL5c%+5)?#9{(#4ALEllK8n1 z9p#}=U^;o5N!U$Te%D~r%-;Xbh=fy**VE>54)UB49sdb@NgG!kW7-SnLxKq4_{w%P zL-Y$1)Bgw(sxpC+bMfyY^d-v1C0s`buh?coUmrR#C1+5^fC~Gpg`Lb ztUyf2Y6Zt$o1w08A4Gh*`a#SX)J9`}ucxh?PA2mVpenwcYiHP0;6g|uqK%Jbn7a_#tl_rNUV#w0|J1O;f7ksl~87i2XJ z%dB7mX-jUIrsCfKZINc7w`1H@X9jNx`0#;bcCBdy0<)>Q-?llD=<)#A0tt?+PY75V z;oD(w&4sK>AA;b*2NjPr()^l6MJ)NmvaPc5g}Q{#$z*m4fH%Ggmou8V*Aiit)x(q1 zeDzRHwo|2X8uk2p%OlW}uDpBZw4n=6E&hKT&0kPDZI~%17BvKHcG5RD&nv4)mk)pI zr~c3|*~iDwHRbHhW;Op8NOPSo>3JF|@;np$<9i2SD7tug;UDN;bu3``G!d;@z@zPy zoPc4i|7mdK59QkppBxStAm$?WUEU~mVJjuv7`r;vKg+D1+$o6u2dV?)If){4b)=?E zQ(%yqYS+QJ`;_f8=9rA8?9AV^Nmlxf%y>0;!z^x~4zwwI&tV2BMQYn*jRAu?6i9+N z9ewS1lnCr>JigSn-&eld+NM9PiRt%Il&(Wqo9oL95*BiJ3~?TB+Kq|I3G7LZfBmqb zh&0hP|Je0-Cwy{nL2u~4oAq7>k8HX9Ff{7Y&tK%CX(@D@!V2rjz(tMkxbxNX`ZH1< ztiqkKWY9F1u6pTkD<1KpqBv)RSjxZ1weE6(WvvttlEfzNTKpjF(2lzZei>p@5FO>kjhPCz9RTfz^1HAUxL zzHI5MU0AcT+b*DR#>S&2vY1~NutN*RWnPnUm^24NS#R!c>TG?vs>r+a=PM4zF&>-nKD9Y zRr8J-C@=lngI~UFj|UUu3DvPwVI&6uo~S9vnpXh}xM~bfG2DWuiU$pUmF~W@R4f{P z^L+vU%CJy4E0CtZ@@W$srCo1V<43rUTV-lXy!CjVb+NETEl95TtJpTXUf>N$iA3U$ zYcpYIssv~+RUGdU>JR5cSoC<}_Q;p!?-dS6MH#<~r+X{L{=hPW(KC$k#U-H>)*nrM z)%>VIKmg7vHxY&uLl+{=+b+qz`o__XkQbYcm*4V_G{-(;eP3Iho z-a3@ihQcvT3C3aARtL&=@H?*qiVI;)rsrXPi1BUfRHje2AVdxRP_g?7K6Yc}E8nR@ z+I15#n%?>13rP1633K?%eDQYo8oIHGELxv_@8TEtsScu|DMc3<$Gp+?77?gxyTHrq zFsq0ZwYq0UFwb=_v~{cb;vQeo`^*>vb51jeU{wxNmmYiXXO(|-q26>^n`pJ5sfF(y zZC%adDQKN1YKwcGS7)h51ZfD4nmJmq)br5A@P7#5CaPnYQKL`+iqE>cX-q4iIyU86 zL~L5rnLJ!wT|G4TGh&gax%j~~HH#_yBjsyZiKzI6QK~TM_a;0BTC>)x1+{L@eL$#= z5Nn@SfP4IK56Z_aImq}XvOg6{;4AkKg`10l?y`~eiGT6eBRhnb5W)RtkOt4qbB=L2 zl}#P8V03dw7_EE5@}X=6S4z)oFHxCw&QXN*2B3jrBJ&{43B1XkWBF~HjvWi1>tC1~ zBTAkfPn&60ygBqO$zbOG+8O6iN7ny99V=jj+#oIA!)y1WW1$?Bdwy4~vELVedD}GC zxEoAL=p6;uuhr?HHwqvk4%HO#oIq-<6{X|<+n~mlvFx${t~Kqe^m4C` z1;PlR@zIHYszIj0{kb?DU{0GaxRLj+ePeA5dzDr(P_&Mvm#$hBU}r(&p$%09XhK~- zn7#ep`hg1d81ae)+>_;N-Q&(($9GaQTyEd>lUy1X@R#eJ&%x!fOlGrJkf(tbN8|AK%Og6O>Ftv6C=>H4sumB*cAS}Qled5EO zh;Rb{w_I(0Nfx5>e^4WRawX@;wj7YT!~{0{j7k!!0W|JTDC)!A_eKBpC1oq(WOlR=*g&rwc3jdnZRE z*`cE6v+e4o7P3vZrT7XTx4JYXEzZJTZkjrtMb)0y_F@%uU*q_*_{Y}$8Q#8?I+6sv zy@+k-&uN@&d-a=#`{}hiE!Qs_KHd`HoI)|9#zbxr^m~%AE}eL&rS0w$H;52HA_u+v zDZl?Nx4ne?)*=^2nV@?X?S=5pv(o}2>WDU1`#pPtdmp%NyJgcD9h2jg>!=(;(+dk| zN;ibJN(RH6^;0yyycOb+ErzBQ-#_0g+Xb+tte^@gN8`N8@yh7kA@=U##jrp09g^qS z{>f1tYJ}AqoM|MOQ4XoN5dvBIP)l&=Zp;#RQ>zpm#YYR>9p|fKe0Z^H%iOc@Kr7fF z0y_Cun0_a&r_{<6CMJ}UUZ~fLVtFteT#)k*#H0x(xDz{SXFHS-)qvZ`dL_p$F&NG2 z+9BME5D(V?x`WiPM?ciMJm3N5>bls%9dy_k_&{)dgq4S+ic>zo#J-$qYI@UtFT`7O zWU`A`{7Xk-mnO&&@N3)Zbh#k2g$_fD{Fzu%xIYVck)moa(}2ckIlI1`Y#rEAKPxBC zH}dY?ixk^f^L~hDbxlTOuw6XCdJ{U3{AP{IALtShzNS}T)360|D%0E8mvasDOQNuQ ztK2qUE~+GeBt7Uo-@LU+KNT_kLXlqh*oc|yxP_YWb%kg-=NMGhAF&NM6kE1P=!LK9 zH~B`>_;v`{0)+XS5$TX0Dhtz@#;o08pSDvc8U7pRM z_TkN?&E_`9i{RL8X~zIr!EG=EVn)n~gI~--1!;EhS8$LOe!=Y)kJ(3xKC?~~@wM$6o^xs4ovzgmKEmgGQ|2X&R z=Obe6@=!|l@uE2Wi?U(g&Motku4;OU)qDpR1x}iBFIjP2l_Gt}van)#=WM-LiL;Ji zOV|(=rr=VVhI>CXgSaKG8SeSA=x>;RV`-pP5^{;T>GueGx2JVIOAt{S!qQ_yS8wwi zYqh;q6U^WcDSELf_Z=yB{)M8$y6^r42_yJcl~%R)9X3F)U%?X8fD~xIVU3AaMM;wy_ay7j%s+nCnj2d!8|}$@`|i^lMg9%TM;yb} z5+|6tf*vg$pkC|?<1irC!`Lrtd#X>C_yygR`wFdW^de)#{)^IjlOI-h&jbdrdoG>7 z?@&QL#LH#%nQ|f>2zsfwBX+B6`A|QY_ zP0r^%k5?W6B-0j3zfkJahZUaa`ug0yIO|-$5RY=Tw5fs2Y4KVE`jz#(4=OUe8*2wc z>jfZ+%2&WZV@%wthOib{@pAh5G+p8CtSla%m_jGN(5Odc)(An%r0+enBmdWSLDP@S zOp5M9bG_0)U)gOk z#szZ)Y1Bfv*3inL{a<~q0r~MA&)M{g+uMkwz!fJ#`}_6Ff3;(@>413>vlQQ3(pA8Q zbSibvoLnY_V|S0t->>uuzrQ=-$<+OrQWDz2e#{i9Hpq2^t?Rk-6X>}ej0HU}4!e`% zHMA}t8^}!+Wep`dT*e_)y}Q*S$(ishOi?X+5m z?0E%JAD0{rks&=Ms2a~5?bi!xa|{&i5xrN2GqZS_$Ob_;b9PK->PUUFnlsdmki4ve zo9Insz-2}|;jvUIlW8>81|maX=COEkk9+ji+VXOdDii-jGmzAc_3A9~_v|@m01Aj# zpvam6<{|x7w2!mpKh7*auD`6by=i)N%PeGVMCy;*sSEK5ehZMP(NLZOlhND0gIV)uAS7zr~JQv#7N+svGZ$-0|x?4zczCREh)Zlm6pyw7+t3$JeXii zH29eE;Hk>==Fo>NYe$U07BlG)im9hU<3L>Hx))P-YRdWs8-^88F<*hn;eNU990QOys=w; zz4DSQ7&d5=5-te#Ld_O%LnMDaCb6#n>^c1~0~A4?b*r(`yZckHsos~Ct2|CDQLrmZ zDu>UfVjgW7Y${JIRTCpfPB0hTQ3M$1A#eq3*a=QMn8?+3CD>WGVsErZ?Y_;GabwRR zOYZP(%mMp4CN5uO8p29Jgild)aNwxM{~_w>OP$81uOyQ$_(=roaL`nI9AvoAv+34& zTlr{t;6A}Nv)}`D!VWyEX}PIo?wTeCIi5E7(0Ct>n5k7`a1}PAG|<+mUA}k0ZshIX ziDWw=U@Eo-vvd`brbjlSBZ;x?k6$Y=-}%%=XHVBY{7I5AimrF3`Ta(Q_u7p`QXQ4c z0vKBqwjyO=0=De){JQu<{(wzLzF!!N}>*=a}DtFI|n%K}4{>?`a@2Zl1O^0eEn4sN1 z5uTIPiJ_~C-pEDs?@St2Rz@#AecGzp7(JYzBBI-5Hss9Da_y#ejF5Ts!m1QN&Wk~4 zQ+uT_fBesbfTe)ADS^C+jz@A=BA-Ab^Xc)zT{HKscT;z=u-}e{B%U+}iD>;w{=>j%DVkZAUh1GjMLxOvt%ovyIWU5;G`NwvCk#8HU!fPs(AKj~6 zh`%bfGA%!6u6$N1xy9DyOG>;lZ$A7r`Pb`(PI&B{rk*1#9ps{RuNn|DX5DM`Dkmzw z?Aw_eZ-51xY!z{OH2J!unD3)T#jV_cMiN7-1&j++v3=Ypi@uflk$BAYWoMef8$nZ3&CO^j!Wi5WKgA9*>@MP)kJjS z(Savq*-vs{>0sutkN4eJt>Tge==D?NE#QNuWCXw?eY$%x?HWL3jN27fJalxw@l$Cr zLi(=th~>?L-*gkBK|dBEDDtGCjcQ<-iS9GKKyhrsOnWw9*lqS&RX$`^`>=ekKO_xg z0!IA&biN;(%DJ7oW=R(|bE{*uo31AyU9M#W6)x>UE`Kkv-eo_vkaH3`^VPi_o8B~z zb$o=5jD8$`f5jjEahM`L*`bVlO-d%1cB?|!8_sFEu&i+k?Mt#%TkfZh!LezA#zD3d zsc@URZ;=v%t@o~PT$%v&mdo7 zh04C{WobGynJBKo&&efd$F&$N8TpKG(q+6RSP;`H62{V4_z4kq$U`&~d#Asxw;+8S z)W%#Ez3o|*oIq3mYVB(v;NgN>d-Hm`5cdr*S+i`5@3V?*_uJ)FvNcL6Qt5NB$jAoQM$@=klaP^}RcGL9 zxLt~xSQFBeB;Q~apa_yC=!=U0Woy#WlbI50)qIU*_cw93k4Inp^p3gH^j)lTP&7_= z$ZjV{WOLobqEAHjyp*brpg-xgYQOWub^hk_0hS*hU;S)th_rJ5&smE3LyPxt;r5S( zam)n>{r6pTJ-Sbr1tp5;m}zqu z!L;v)$L+P*eC)N-lAW18hu3ZLFB|OH8kIhb4~c-1M$+pWm3(qW@Dsgx*VkDOA_(#` zINiMS#De6iSO1ew{%C}WX7{W7ANJles;RbX7Y)5gm)@mI@1S&$E=7=Df`Zbd3j_!R zPfJA5 zRM@yrwwpSiHTqR-qebRTdc7L^|f2WOAM{F)7L ztanaWGe~n{=`WKgnMw!qIIfqkc>s%TDC^!e^OO^MqG{rMS@a3t!m?(i!t#)}e=#$lzt2>kZ1JwGkFd{?SrSVcvBN{$ zLiL|xndbTWHd3=;1i=0SwDy6pGO5cZ%`T9)b>OEFs{Z8=d97sPp?rwP=ivTkz2_F! z;}LQ3?*T7Hk6prW?d`6Yv-A3=L2U`+kuFAXYS&Usg(|5gIWF#eTFvA1+?H5+*iGzj zM7WP={ubr$6%$6KOo<+xjIL$Tk6jT-cN52NH-$g8kF>%oygA0>W|aR6kzlXz5#*+H z12uhy11v_j?Y#Pal<*aKdCqFHu6|VGt`t(26Br|KSQ;d74^?2#LrsJXTreEOq1}6B zgAYrdha}xFVQ7v%d_ey5mG$~TCX%!7j?-r$&nNue_|I!phOD~>YFCWTUSNoNYe`@w zm%YklXj+Ls5cdsNNokAob3cOLa^)r!9KIE47R2Kk^u}!n>e}j4qtzE%0p(BiZ8l(t zhq-*5ktX^I3#KndwvvTX7dtcq!IeGBE&@cL$Yl?|ZJ!S`MtdFTGmdXQ?utoE7Z{>A zPsh9C=XO=MuCa7P{ zLF+NPSpNx*nd2nf6!1eqZ(=p(o4gV^m>mJc)|5I7p*z(SQF0$c`P((%q)Na^A&}#A zNm%`3Mbf$o*o2#439MqCj`epFD=MGcL@a+OHPIQxe_kOhjR~uw?I&)H@@Ma>Jf%WxlUx<5{CtWbx{gbGv%9zxbpE2TmI)-V z>b)zy?$^USR9xh)%=~H2|8#hF=+NwS@>V2R`%$Fcro8Ev$MD=}Y1TYiw?l4Ag=vpnAXk@STQrr`XVRIa_WX%t z^HE4@^4kw0EP>y{3c@Bq#sZb7E(!pukEtm}RP0YaZV65rD)!->ANkbI_cZQ9#@l)d z3>DK5eP?n99@1#|Ky14O#G>QF#oMQvMAT734Y7|pnv5M<);UvzDaGpE&p5K%A|xjD z&M%%CUgj}}t%b>VfvkL!fErd=f4`ebYzuwxTF3iuvR5{kq&diNTNYX5M3N^kr*<&0r2j=yT{zqpi*8 zY@vkRNDZniDhNT(z#3&vZq%7e_6Jn2gAXKg?#>VO^8F=Yd+VA9>xSgkqsakuzEr~0Od zLYljD_yt~aM=5U~aCnWYLG!{?usA>gI%KOhwnH0i6xra!m?dT+o?<96YV|xFJ~g#I zx;g2|^roN3@bi_fAcbv2eFbcPFiZ%u+0PM)XzGso$zj`trcKjCy6Z@K@{mqz_-?E6 z+;aKAb5j4*g0t+gw2F($Dqf!o!}SeVOz$R9<2ux=tE--7Z+61P;f3C8(IcKC#i;m9 zhg|C@UR_VdI?^6wc2n}|Kpq2J8>~$W0A4DQk)ZaIF490jd#Z7if}|yLZ~4u5g)dX8 z@ykh^WnFwS=8an*NskQn4&Yw_JJ$qSkZfLqnRlO^XXoaWttl;}mT?VOe_el^k$`I& zJoGKiCq?XzZEY+E1=bAZwN$k2*}mm&8dL)loz=qN(|^CAFKxaspp#m*yXI*nn{0XO zMkXg)E%o(U7Y*UD_QVmFWj~jM>@^ElZA9?Y>BVm$=ALfcPxdZAY4RP7{|Uq#?s{5Px(1~r7B zP;oqp$=-;QZp#DFBF-hC9%xpwC$CLTnN{xYiu#Z)gC|3fio)$@;cB5rLz_fcr>i8a z7ejQ|EZb2GT!=2S(!+-&vDfb`wAifG!ggqh^Vg87qQxzgSG2tt=KQA}y3!U+OL%05 zwrkF)RddO2%%WDqBcb7CZ4A%wF3Kb|^Bo{{D#or)Hg6twt+D+Iu z-G)#-7=L@3f#g>m(}}cou7ErCjQnqy7`JGR4F%v(;5QNx%TaflH~60Lc)vgUVB)m6 zdM(>-pt+V6ZQCdnvIW~xF0KeA^lHC{va$9Q0_6hMuszNX!TKbcb&h2>JRSh~`QM=u?&f2@-P79!x7*Y_S>X&%m9lmV->)vg2H;x81EaHukp7 z<)7keQ`J9cc$_(jqgwk20CRgh`~&^h#X=z8!L4BI=lSQ8Om0xLVIHJ3x~MzHAjYCC zXF&W%>!rpI^*<2f&7Kj7swk!id|bMmUDNye8`kHtVf?OASbFrs4Hb0O0WYE!cB1X> zTJt=oI+&$BRI_7F_pv^=)QFiJJtYL2 zWEu03-6$==`qySFi^JN_%)sE`y^ZEoo+4zCc|dky)^0D&^)RK#dRY3&PVU~*r;@rY zpY3XT6Y}2tQeMQAA%;}K#x~EW&@tDqv~?<<1#b><{8ko=gr1QvsQk*^s|8F4HUxp7 zOJ^WqyE{(iJ@`cZ8_F{lR_I@HizaKD?tX;dvTyjOzU6{uZoR$_sbn0Ii05EL>;PRP zh$T5ou!Sz9V)3f!dB{4E20CVVf{-uW6mYFswf|DyuV1pRle(tmGbcsdJBhLc5`Y`MN}2C%B1T{HSl?U;nXDGus%7 zhV6#OANL4R_gZBe3~xm~It#MIYa`msGThZJhFe|Iz$8^juyjqpei;bmH+ocR?e1r^ zz1e)TNI2l5L6>Ip5dNFQF2D0pnTy*fuirOijxGu95#O_FlfaBI;vdqrrO%sv7L>b# z)vNm#eqB;cp5H#jz64JH&z1~m`J-?FU8?nz>8A&JxuA(WsIrm-Rp*}CjBw;Cu2-U(!V=C zb_G34+jfqM;LZ6aXw@98EP|FgOP*GFvPm?C=0F|hm3?0sj`H-p?pNw;x3k$?S-XEy zc@Kpjc`Q4lHS=kdNKi@a%xW5IgkeGPry$%y{q*wg&2*T0v8U%-uVZ{fUMs7=jYsX8 zG}rr*knN&%aN-5^Lw>^efA*r!QTzuS=nJrr9y91g>uODyQ?a&UJ?I!B1+|;}c$+8a zkSF!cAp4Hx)FI@E^Y*CMqRZlAL8s;^LL<`@Ye4FnWCBFa{(*p1FGuzQFl0N}KXP-L z3pH(OyiVt2&d>Ubu;yWt#}KW>*Ong2 zm}^B~Vr&11b`MIcXjL-mQ8D3A*IQ<|XD(mxO4izT=dBJ_X#Xf$g+wMVrmc9Rs2o7q zWAtoSf8X$`v{kIX7f5(QCDp}}=scC(+N-U06{$qwVE0<=@(}kLP4<#2Bm>m;KJDnJg5j zf+?yM*nW3My2#vcp0VCMPzUt7osS`FPLj?p#&>w$k)kgVD=k|xUn<)4VDIVH2iz4i zTO?Mq_!%V|)gDqD4)EPskIx`cdS%CP&qRuV;7vGWls_N0OaH1YOo1|bgC#~69oE=v zFVt}gOk$0`b;ocW^oec^-D#^e=yiWK1zio^(@TY(Xk~;_~^yayZrO4w2v1*Uf zc$*;oVXkdv)=$n5aWjX9slyNKCzd7rL{du2YGu$Kn=jx@=u)ljhIvcC*=R;&_r-(m z$5ji_GA=uzpI_3Oh_|aTkuv1hFld754}$%+s$iwOIP=>jue5Kjt=ii3{amBm7-cS` z3=}dQ*%%45Gb#|Mu^0j=<4VJyTVN;?Wdxp&;X6cH`3|nP3A;q(YH{9dqJ4+!6Yf6Q zar06GdkcuF5{tpg)beB}ws%y9#6E3y1y9G5Qi6i5+^_Nvd-z$IX>BZYM4Q3++5(Bb z6nZwvq`&)vXv)+d)3&WTHyy`>$3OA#_*5zojI8-Yjf^tUrJ#>c(0*vVpONdJ7eekuLD1*zD+fS%fZnt_mD`Wl?L5IT&b^V zv^ynRog>xDpm>2&6Pm7eVC>3F7^@PT-|M63S$m83HP-;`$QQkq&lQF^N`x*uSq^PV zqVX6Gg0N`>5f)K7oDU1M*P46Eqv7R;zc6u|Ewxxv%W+x5Q#k|4L9M7;1T#A8m{L?j z3N8J*k{#=#8TC>8@X~BOmU1~nLFiXTzDW0{Z#YLK-WI72GbyO8ha0Zp+0Pmy7ZP39{%2K`{~?5_3HiT${y!{l z3fe3|MvsGo{u{6~&S8;y^_`1%{xUMOkcU1Rw8Y#I^wLay`J{Kw=y?ATkHVo9SMx09 zOyV}tr;4Yg3C>l>wt5`@T;hHLmzUY-vLxP5QH~#jkK}q4*b+d5J zE4AWed88Vv9yP6lU?M~*ymVUiueRmi7gbJ3z#Ch}SD%Y;)r-8@!%xf0{*@Qx)_D~7 zzED%+*4nNrK}z|ssR^hCdv8khxaxpex{TOD@CGC}JUBiRR0l0H+1R+v?t;7*qxw+GShRT&pLaM{3LORAT z146G{%4VRe38DLcj#Oqm#Hn6elCeZHl_jzYhiH#n9+EGZh|`1DB=WB`*F!lf-fkD@7EhUe)2v6{B(LKq{q>vtWR{0?wB{kSl_z>Du7O)F#$Awl2s(BISwFpkv7d zJ-4*q`1K_c7*|vfgowC}m@_=5cvXSF}H4qo6KSORcxOEdN#Ezrw3{$ZkA1 zF<55v0%1TyUMu{TQ%9a~84n(2ij;ieuU|H-J~9VNRZyXLQo8h?DMa^BuDpJry4p(8 z+#+=&orWpAC*5U(s3dr9?tyCTfOy>fZCah4+i>y$ityY|#&|$n{y6~{}nA)ND9LUAtD@TpCCUoo@ zAW(B^#u49750>kAx18dW)>hZId%NlmSEZVGhQ0sFml+44UH7vH}crT@O;;xNk4%A#$`^=;3w=!pCypw3hv6pE;EE#)IKwE6((0t#8v9sAkkZQL=Jt^txg@Y=L)F z;Q@G^Qru#2+ee8UAXC34^Ju3ZeJmr}G$AyAqd+$UQt1qi9mSk0v$xkbK}mq@Me7cR z6t1@-TTg`RgM3EcQpl4xhQwc(#1p=6;<{914_tvao&+46$R=HeN-eU_i-78FWsjR% z!rRqov}||d1x$wLP}K&S5E#+$CM6t(u}^Z}o{U^6i78Sm>ksm%w%;r9@h3BhUEqHD zNEZ@XVmBDh`BPABl+B{oDC(|CFEn~ZyMKb&sIewDaZyfFR%U*#%HAq|x4t!*&NraU zY;-WVnwH|i5;U*)w21{ESnWkGQ2}kEF}`sxdB^ ztuoaXyX4=jr&;YuYy811Tmu1(jX0ZR@VkeNpkQf1h0zs}Rai_JxIuA? zEFbAIJ84>Ru)KjV)z9*m5B@bm45>u#&r(Db*E}{GJ%>wzk^fm`FKt%il6vCstHhm1 z<*0vvoA2*6{+d}-(t{5Q@5&(cr}BGldIFFSoYXHyq#KQK1uK#&Ab53u*Nz};V#xj7eMe`G36(;cl zW{r4CPiy8<3#lZv_;)veUW)9-M|{ggo^8m3^tr8vuZ?jG%pNitGw>^t`5A)$6(v%Q z$^e(S70=~i1!%D&(oO$$Hh&!sxtZCQDMCX-LP=N9ApQLbwSgDG=b%9950&o@{K+I0 zw&wC3BQ$Aoz&ou<;c+`)T8j$jH7c9b`0IiGK7;z%!m(buE{$U9)$kL7tnWG_|2VBU zhX`KR1!*f*$QN2dhr3RvG9t7%yWK2s4p7~Sg9Q6vD}{8@-K)Yq8=fgX7JS6k7Ki@m z*v)_nt6{`S*Y)|6ZNVa{qeo~#F)5(*@*f+|rc3pow95MRCuJiA@ z7nC3q5;CkWOAR;oCPWgc8rg!Qz*$mJ@y!K~pC6A&dQSkCzPZIUbGuMOFi|LjCHzbY zv<%>M!ycv1=u%suz6^2FwC=xH`A`^6(P7c&xCK_7(J1@*tZjw?av?kP*HYg_Dr?vm z{q~$3Hte%xhxB$XGRiKTp#V>wymoAqV5m! z_o9lQ*;rv~zx9&NoO{>yod9Wkfz}PV*;%!J2fF;9fmTWejXdE|B^88_2m)F&T+j*d zuYS`?d!~@(8{Nw`6wC~IcU%cFE>n3;npd=4;FCT(9SkZR%ENa<&?wUaMLpTlQ#PCK z!Oxpt=|8u6qz!jy)eN}t%OtdN_Iv(K#o29@NYM^*P-iiWc+sF~{V3*FyZ&vb7%`IU z4e_WV(Tb&!tqjxTzE08qm+#R9mjHDnVTLEOv}D;W+p;yBJ)AwkzcEfLf{{Ogp>O34 z8d3+Pdc*hwr0`L@9zIJJe|p>%+SMLZ1MZjK`db`5DU2|wA!U9r2^3=aaS#T#_G|D`euu5@>@+uWa1D>~k6^gwLb zBtwwkY>~#xbG~eimi`+UJtPP1Y8E(< zTmZ&@#zYew4d=aHR9JzzYi>IrZLjZ`gAvHww`$baN*xb4X?T@R!mY|dK4Ld^1l7}~ z^&YBMPhb}4s|bg%_n3aq_t-Qp+lgkj_Xj^wl7%N)i-$^JPaE zM=(??m^m#Ta=WTxX$_}$WaPUmXp(u7cr{on*ccru$s(0{KY*4dQIk~IrISw8SULRf zrU#uZ&Vw;BsnV>w!U?YeK9Nb@W#!QO_c|!|@cya9cz;@mDZ6wAnXiEVqsS^r;&Z}0 zO^5`yPO4*)I#(jWgK)@C$cHq>Cjv8tt%--~Jv60r&IRS*6ocPJp+BrKG~5>)3wS@@ z#oS6lG<>Q48XBwH85q=MF`1N8-_oUE5qUkChpYGieL28>8z{h-S~M@cP1t<3q3V@A-ia(;4Oi5t z(ierfM)1LSsIyM*hmi*5 zhC6XBql_$MymEpWEVHR6o*cI?Ezv2FC(Eo7Kg zwVS< zD0`%=NN;X7q!LrB7&?q@?_s}#zMq{Aj19}#OV|v)w5gt`UGDnqxY<#ARJ{Jxlj%oN z=Oug=eY912ppS0Q`~)`By}+t%3p}e4-Rbmyw|0NyS!}G~*M={$Lmd{=DrC}=Ddu;J zSFu*}jcgi5?rMXTV}cq-ls@{-rD-2b#C zrM23kHzDmO9cAdf;qC|#A|nfKi9k5NH!=e3P>2|1@yLF?j)pEsA@@=*yVP{+N| zNqsV-(@)1rCDERH$Af!Y>_9e`Az?cVM5vH1_EW8fzT8+I=62&75bY)PjH1(3tE>P0 zn)UGUol=43^}g|M%4C3%HSqS50}628@1S1{yt&@*a!gaE*#5fpcqv>*m&Lcfx5u&f zhb1|R1WSaz9^Nh;1``Z^loXa4bd`JA_RE})bJRaua~1Ht3jeb2UG>vwY?SV>n-at) zR`%!N)^yQt0^vXI4zx(w_Exy0k1IyI(#?|Hqv$zU+3yiFr0~{eDZ@%!Re%H-=71Lx zdK2c+Xob>uuQo72dZ@cQsB#|0nQh{I<#(6Z=hKz;EG+#M)hU_c>*yl~tRtiCj(_mo&~5ySKM!MUCE4@b4|&c{%MGmD8wAJEdGSV~)#%l;5H$JP zBt-&JW~OkiSweLr)k>=%mOH$mozk1qSm-B$0R*eLqr^y32sghsFJwvxiZm>1vffqurwecSS?{yD4Q78k-idcS@IebBmr?rLnA9(sO;*(Qe3fn1a;e_-O!>LJhGb9#ZX|D#?(bM8nF9`LWg1x5 zks5lUyU&Q$7Ul_7CVhqp8l{ZHmTDg*YE`u8<-EusdDrO4;z!836LdA)JDH?%6APZS zHM{^p*qap;PCsiAs0gWoP0^lQK z*chxOB&qoH6OL*+3I8}PL9aRmZBkWu_69xRgW0sll;j}06YPasTI!}(Ct9XE6nLh3 zeFQ+#!4L1WJ0)=hhgAE8jwAi_+!l{-D7>ppQG9|vzYomAz=~;T2x>BxKC{01XZ+?8Ov82nlQ6TJ~DKzZptbJO{A3;osXXlnp*V9&K6MJYdBWNrB zeuXCycY>ydc=E89Kl>CxgJlC+wrk^=CkJXHFS=4`fF$ng>ALypGZHGCRwn9Ee6U^| zM9h=&=5Yl~ZcBSmL{B z$DAiO2_@Wlrf3RokYT)$*`ZtO?GxrnW?|@|1-EKU`&K6?R;iH0J;Z)v4rEx#i?Nl> zga1CxRm?mtUST{~(u3~^4+Us3{Nrgpv2Kz&5ZiZ=dj;F|D12{{SN-+*mS|J zacS%ltd)T5j>;9BdA8L!myZ#tZh24U&Z5Yrv;N==TJZHDL?Ke1Ic2ms09z7AkyIyEA0onIBeATnWYl4Fdo1`2Sz;x2tE~ z8+g9Pu{LVATzpUY`@lvrbY-aHJ7Ure@&gN2ayesFwh$LA<`a&vkZUz6L=UJND_fo2 zd*~7wFYw`?fDsJ1h&U<`!`N;##}&SLsEb=3<_|;+O9b{Z)8*M$t3uPA&c9>k#kwH-}!RatxOQN?B1L8GO>T{C`D;U zUCUgL-vxQM6m`*;$bfdFRRxHw$; zj+2-ks<3xF!SExY!v-J|72sTBu&v+)!Wa(nsNDk+$6(D^B%@x&6P5s9MtWCT8ahjH z)|id?6H}}+hUUZ-Peo?XjwP!yX z(327VE$H>&oJdl8bV#UnS z%jn&3z)iYa-re5o!m4w4ZT0J~&EucPT}PAMpgX*B)&b%!g|QxhJ9${eqR`FC`q7YAq86+4)pj;2Sh|JI~J;(K>C+&MQur z40&J6PT)x!(6`BQ9+RE8yUGiN-6C+dLXZ-1`cf6^h!%!YgC>R&rmwZY6_Qf7{@D}t z`;ca1Wnw*kWdJqq8bFydrmBXQe6mgUw~j>`Hs_0lEGpWS1O5GX&%#$0viRzSZ0%Os z=#3SJ`0>sqD+RY%c0DTmUa=}pPVEUDH~b&br@?9S9RA)3p9h6)oWXSB?~WjjSPx+j zqNol7|H75fI3B3v2HtUFms8qPjkTKTpF%&Zv8z{a6dhebpw?z1L$X()>-H4fs@jd5 z^nJD1^7K0`k}U>K0se3U-1s)>eoat z3ri>;Z-8Z{jh_z--jKI!*Lv~$H}68#E`>k_c+fH*Mca#@*H5qCRsxH?au#82%+5K_Ko4J-VzV593V#4npJ z-Mm=NJf1{il8m9-uz=J*gHQ-?@H_tUj0dF7d;v9(pd;HkqDuy=2c*|-t~KX42f7vv zdZP?wQ{1>?4g?Q3;4?$!R!*E!I)?hx=_n|%q67wkT1>tc1P~8_45R^3pEYIt5z=h7 zmiO}6jN%DT0LF-0F5QDncic@j4aK?Tz`%7FR%Hy-GoP*pn+9$eB!> z$roK}4ZH~9xNYIGQXVcYF%!O9f8I&=Tl-QLE(Ww;YQVW0F=EMKLO`wFY<}+EqJGT% zu2fHciO=Kb4Cy1|C)C`GsQcSA5|Jb_`*{u8gGbD;z&pGASABQ zPD-^kZ3E<~zilner_@8o%SgZ>#kb(fSU;?PDF}Ok-05zV8{{AaV*$D^O@aB#ORG^Q zPW1PNo9K}&k2h>Zd>E}b9@ki<(Beg3aAA$*=spw zdvWk+MQ9s(_ALpVzFNH*;aX1*X`pfShvxcRLeoz6-dC-_47xWwAbV6@YU8_CdU6)d z`$}|ZhM$8NsiA8+4u&pvxY@#?qMkDC7f;T4Pm+-Gp8>WCL{Ei>Z+Nl=Hq=Sdt(jS{ z68*Qq9QnT$@=9M@^yM`m231%Azo9OrtGq68x9-*KtZ)&Kd-A}4^aj&mWAj7!7aN|B z!_xOjgg5V)Of*Uyd%f%;0U9oCt=+)D!wSPNz)m((E9TsSzE0U*9py}35%M(+q551M z{afFIV%IX$SQX+Ms9ZDEB?Y8Eyeh|Pq9sH0>87O-?1YekElvuf8T(jB|2F9!XZ)M(Ft8P)OBpA_z zR7x@2?zf|yUlBSPK6?t})c^>TVe6&RUe}!#AhjS#e(!=Pmj2Ys^WkmPvvj=_&Znf~ zOQL!|M}FH99E)%aq+rI7iz&}j68=CKn`)!agV`Ho^YQ_N(Hw(T+@ol1%N;R3?oa6v zuT)@$mg45dIKCX!{)1R;Ob{s04qBDSj|6>dZ;;wl(G`wT3Py?(+_PL9!AiArwxn%!cnRAMlYOhW7Lyo9N9IwW6mMq_o@1mYKXb6l< zjv&Bxp7&^m?a5vaQb1wgqI$==IrZ_>S})zpP~enz5x{L{yX4`LGhJM+XWb6& zC}|5>d#`sMo}8(vD6uBF=VYZWpX=o5K;re`@+{=}fhKx!>-jUD8AWlUgr?IGiOiTB z*bdRMdfu5Q(N=lrQrZC~2X^M+UfGc(opV<ujdlU@4U=- z5PL|du-F+kym_+(JeC7shYS7anVnaUq(Q#ByY~_d(GX3&!;?OMqandBnlecnG3ZsuD-1oF?swy3YSnR5x*-O1Tqx$Uf}DiV&L0Ra z*1rtkKFj1FkoJo6#TualS_`@goCAFj?B%j~fjH^nGUJ|GRH+h9jEv6})OI39izjES zg^thlW|hU^_E)80!f=JzYtH>#7If}gKX;{B%XK}^$HqC2n?DK{wy#+sDs zn&RZNn?UX8B&cYJQW+MDRb~J>F@aWqGe?Y!mGlv%OD1#V_i24v6PG4ZH82z9t*>0SMmH53vz~@w#NZ z#S=B$agd04f|IRo1K|*Rd*P!AuSdVQe^v5aEaqAW9h2fIeWgEO{V1D~)cw36I{J#u z?oryoBbsr=7PF^oIm?!}c_Z9@8x7=->|%q(pA2zOc1R%C)#Z_G3%@*69^}faQS-C}{1B=j6zK}r@M@dg5U>SLi>oy+4H&0RyzRVL9 zO@Z1GD2w$PoB`bie*kz~>LB`{$WXSePjk{98url_i)QM3ABWlMJ_RcIHD-F`A3S-+ ze5DT{kgEuGfT4dwt|?_S!G2amhMN9h<3VejlSFz%T2y>`a^YLaq$Wj2RNhbu4IFGO z>+2@{o*5t~+jVzwCKq3-XYw`lzS z%iBOQj*d_FTGk#o-dh`EAv7T*+#uHUElItw(hNhu?XF6&Hlb*Z?apugE(P@%#NepM zul?IM=#%?gNj1Hv)2cIdLC9t5aik4?xX^1bItS_W(zg~P#KJ^i3MiturW9&WPq4{D zYtgJV&ExI`%Y(1BVrhq1;(l=}xx*pJ4~JxWY|G~sgGOZX7qIiCbQxf36(7ws233aPfR>WyaEt z6WLNj84Tc^y_-|n4g$+~D(qVzDetYvb&Yb&`KWS)w6m*TJ@i=4q+P7EW4Aw25y%DzSP-lic*$^C1)Y%0aF$@h)PVGq?-B91HB zk1iFkx}eX7v!K5XK&e(&R@|8T?4+lbg{nv$+Oi3jc!C(8IO6Z7@LUP4R35)La{|aQ zAkA`*7v(;4%0M?2V5;9@Ga^l{&_65l;XzfPAN{AcemvOHmuOhtr4W)c^O{&p`^?bzv4v?%S!w=C~{&b1(+mH-=`2>I$~q3N2imIIaw zw_vfJ6t=w;nYA;9g>1H+=5yH@X(RJuCd zh;tVioS3VYo*5Y%Q%u&EV&6$~;aagh`z&teg0xEo~SQif}5tZGfx=_B@B}co8lmO4rK+-k&|)1G&9!=rahil zmQ%%6uMLTMv3?4PEB1f7+d?qJidV1Rs?cys9}kj}Y6B^#E4`B;P&nZF_6FEb1%k=v zvF4Q(4fJW(Gh3gzokdQh__N+0oJig!9K$tQ3ND`;%7Q)DjA)|9?{=~&aSC7aP`1oS zwTfgILRRQ7_3+1&xP2MC5IUg*32AFJJP?)er&lFAQ}Eyk=~Nrnja$jXdJVohNdz4! zp1Tx;yZ2(s8Rf_9r@Q1}N5kE9DshTIhzi?PAWv6U`fOf!z1kDp@GqPSkb-98CIr5p z)_g;zpbbAKXZD0n^n1G@T~eU>)tv&Y`r@@1tke+0b{OUESG3}Kp4RI~=I&XzTqFA_ z)AgHbt@P>(h5!}WJKq&?E+2p-g=d0Yzy$IpdnwY6x_Bl{H+sIUO=_5+_c&n=wbgqh{JmWDw zqjp707GE|9uX4if?st*a19>5s^nvDFrFoH?40PX{+U$XHhUWv*o z(5O5zMO_SjUlwJdrLGT|q{PEazn4*r$*QhQYQyr17X5i-$`I@`pmZyTBe9DY=ttUd z3@UR+_=n1_3&|;-( ztd>?mQkMY}morM2kHYDyX=_@{zQ^)nv1*@RGVv#8{Avnw3?vX47Sk> zB0}-A2%eX}9x1fkYWf2)Pc3--Ik}dFV8z&(`=~pbT_|T5L5>wc*}Pd`I$dWTevmEw zWgzn_e(Jp6m-(lhA;$X2Sqc?sqm`Q|Kae21BoFyyI=b1biJ^x-XeK7d7(R(R&gC@Y;Z2GK0IV9evsV>M2 zZpd!_&TMx2)b22)U8Lkvv(bWa^=o05)H_e6A-CY-l0T4R?eg~4-$a{xWoSSQgFaSO)I5s6XyW%+ z1r^X*HnCr-7G;XFw(a?v+j%k%I%yUSF2AR=mDP^^f!qX2jFwR*Q&?9t3W>z3f6V-8 z<95L|RFdVvCE37)%k$C0gf2WCr^xqR-D9DjM#*g;e=75jwwr*;P$oMO`t@E5b62s* z?8@d{{if8%4iTykM~C0DBfZxsLTxR`?#c*kRGj&Xxyp~)WIabN%AB=?t5;wbUZ9PnrJXF%3R&Jw8}Gl0L{1w6dr9M2B8F{;mSf|s4GhQE`0-&-c$TJdRo`V z+QKwe+~%)ztod;mC8;y*dO+SWCXlEx=PVZ;6WPTEL?7v7Sxh05jEw{uwXNTF1eH*|4F5QEF3L-BJg(-*FI*7A11ZW*p?p-T ziOjt%s0WWb9<%tIn-JFN(R9gO(?W%OWcA1~%Cj;?!FGDuuG)Un|NPYXhYa0u;7>0M z|3Ds%^373|1kv0}P@wPj^P{HEf%=LaOD_yAA;m7`d~XqmU*F>nbWpG!afl6G$c>- z-gvrR(x-^yQ`SdB4W>~7>J*I_pWVb(p5ISCwDFd7`mbuYqGpk8CuL>*B%(puxkA}@ zhbuiVp3hoTEL{l{b`b+?ATxOM>X|);a)5(=jIW^#(E94m_@vc$#*W@nd7Ll(NaBQ_ zcs%TJ5l?!$ZC?LTWFB}jfk}gmk8%SO?mF}S{UTr9x;qP3dU!EDu1_l$#5d=TN+hp* zk*15sA_tm$inZ|(mPO(rLo8`O@ z&n-X-82oE-U~v$7ql*WS8)~7r=121;{UR?bN|nyn^sX2jqRgRvmD=G@w_-Gr2G85# z=tqDV8#XKneal}#@P+ikCA$ePXZ)K6B7d(!WG1DTXTW03%uEhX`>qz&y8ZwKa!0j*yI2&(%{Lf#i@@MY9gI>z{^*~d# zFJAD@lX&9shPy)v^KYW?6#b=fctLFIUwrHTmX}PMX?gWbxRCC-JM#(H3#U~1^gmTt zRW_BF?j33(Eae}FhTC$L7bRkZixS|tj-GF@RGRn;XAJ*ONf&i$1P(~nHQrz6I$2F_ zdaVNXlZAtvj9d--^QCC+&*hl9!g#?EWwNWHKad>|TIt+P6ifF6)t2g---DDSO+BiCM=(oY{oZHYUV#z4w6|B3$@1H3w0dRn;&fVn!I81}4#(tp-;^2auIhGy zO4Jbrz<&mtDqh@bZMDDLf$J}m`Uf~%Zy=3TMQ zpToubJtGCUEvKG8#L-sn`;c!tR;YaA>LX&O5Do#;zGq{9AbKLMk2x@Pzq@~`f%GKq zM|TAXM1iYaYsNn)H)l-jr&l zQkr>#4MiA3vxmm9xFCL29zsg^?fKnq$^VPJ_l|0+UH3&%K)MLhL4pF(L8KRnh;-?_ zM(IUBq)SKy>AeUD2#A14@4ZBN6Ob;QgeG7h0SrL`p82gc_Py(zJ?_0{?>*KTIohOcny04zT zsMK)n!%M5BkzJlz?ha@EuOHHt`c6te3yklent?taHsTMVtmxn?Cdjp5!TkX>?iRlL zD^dr1X{P*-Q#EjzYmyzObwThBAfwx z*G2pU3XV2GQXSVVSJ-*LaI(HB3(r5%&l z^wHX0=4`Qo7N)y**k_YI+U2Yv|;xe~8zW*%*Xz92W)=D-wQF%~Qf)oA8* z3ZW?nb^(H?mz_X$`bGn_0(y$$zbZ}5UzG+ZOogN&{i+f|yY|N=z`Z%@4i-KLUX1&@ zu)4D9{b4-XsznsVcMHQVH2?)lS%cm}K=%=W41Zp-Cof%#w8}x$On7&e6*d0+_r$YrfADXJ03Hs<{4 zb&{*zo7sGSciz7V?c)*R{&HL2j+>17=huXUB59U#A<7Gr&qZMrS)AsRixRC={C`tE zJoH)fg+XBBoMQ7(`1y;#je%DONq!#^th&dfA|ie)y~uvG>{`2|7&FUe{!PpHMD&G5 zx03fKmwa-#Gi@v<0EeK3)}#F%Fk8(O&nn8G)N9Utbvz`*kJyQ71jSp8>8sciDk|Xi zZUP@7k5XPQLw!41ip$QSuJ)$3^_W>fjS1w!E0CoFo*=OJ4Pb8^wtn#nI zy4m<*d^?E;=3}mxJFl~&pK7k}>)7Afdfuzk^|0i1RpbpVLh)=gUn=a~{|dYbE&TYP zYZjoSc=5i244K3U8_HwBR7Y2Q%$~6Lw$s>hn?bY5!jXNpHhJ&G$chS4uvymSkgd{8 z7n_@s=*7D(J{$61v{$71ahU<4kMs(6zP>El*;4h(x~6%w$xWo@{TrluWvul~6~n#` zegk#~3IYcKif?ob@}`ZJI8&&oiKQ@!60rtvjaE1)xtVbR+7((TZ2F zJ4lntt;)ots^U-W#LJp~T~lbvdV!Z(+qZ?CQ>o;=2f^hH26ml=h!jk~fc85v@N==( z0U33|HF~ZU!wj)6tCF5?nRBkc$v&IM6;Ba@Fgd8x7{Hf)bKD1JqS9d|5(Q z2!=xzODhSmHm|#z%pf}GIT@3dz*30GiH>NXar?6?=+L1%Y-rM&{MNWrE!^$E> z2A4Ol-V3^xMSXASLoE56+Es4O9kGjdn$?c;Tw2s(*>m+FC5pQs@Xv zSnqoG)ed%=N_uMC4%MfOp{f_Pu>62m0jP{t7tM-7QXm!!+3$_iS{a9&HE40{kc-$M({g51gm;=dkzW70-WVw60nI^r}4njzXk)A{$S)U z9ci;SdNbJ+)$9iKh9hap0Up#clodpRR|F+a-&`fzjZhc9_mwDHDp}~-mA|d{QwB=W z!EG5}AhctS)qAZZ9_XxLW2KFEMa{I-VB%j5Jrm^p8X!Kb=;jk~1-1O}_JTTZdOg_} zKnDYVc=2BGCP0{y+1So6{jqu*6W)B!N>O^uhw-8!O|e$;NegJegm&4%MBzvS^f{d& zSy_mClubb`;I`x6)FCbcDXu~^rF$Zuuf_}&?ba7+*bY9T3(G7Io zp2g-}+4We%DMu}|Dlx?puI}Fp=!nXKK&f4T|IgcJZU`Ju@=J=VrZ-A@Rssef_3&;; zG~2cKJnpnF@}^0jZGVinCmiJR78hEpF}pK^S|09Q;ibAk!2&KZHl%fdX{2N`1_doEHE3e($fFUlv^^3G5$t3)P!2 z*suZ85W%s&CY-1~bC?We=$tgK3MtWsY*os?lW^)h$ZuAsx9>fNq;c;3=L)3L@| zP7{4T%jOoPi=Lv-QioB|px-Vd%-9NGysS^jG=DgZ$_H@s|!MD>i4m##z!6H-?CXfe@?L)Cgc%{D(rU)OY{F8N~TwY2pL@ zoSKv4UtW_T(iJ+`b1^a0dQGvp?)Sm^w;Qzd{05LKRTap7+3E)R{i3Z}?jkM}5Fqq! zR9_@Dul<&Pbv_IsTg(X>^-P(A6H5LU?ZCguhtKJuK52%^x0c*Ed1T1NBi`<2v=5Rb zvOl2lyc`DxW)LLpYOeZEZPE0dI?%3 zDB^XHUPSnqN-r;8A%66BD$UfxsRBMW-ge@XKygn@o5zz6G&B#VVRhHxWSq9QymIacuHsHKXG>0Qgn zcc*4DugWE^+Zx)%-sp3VaD1pO@I0*&Q*vq04Or;)$UuHek09*+u1eQ>XozQzzEwZZ z$b09aU=s{%pfT=?`=BUe z`Z)=PO;s&H^ULVVhua7n{!d!DN#kH5*h(IKEgt>}uV?X1YN3nAwaI(ex%b%Z(a_0c4dn5+ltW0t zRdSMnb+tTKc5O6!?O!rp7cyR_RK=)`wcc|?ESwB#wl1Qm=H+MeAmf|XlD&+6-58P6 zj?4Xl)(36#{cGfK4?XfgV&p(3W)O8Zs%jB`*prds0{{MPL2??R=@iC7;>>-0uZ%?& zB&~J8!gtd3@!mrx=4*foauMXRdsF(q%LiZ3eQ$XwgCO96m+fI39+lPQ==Hzry= zriy1!bBO;%Qp>WR|J{-wsM$)FQ+YSBG|gycI3_?XVIT_I1~^i^irBc_(~qNYh~51B zYvk;Rvt`g${KO2s>)I8tgI<)9{H$com=n`vg97Nz1f@wyBZ*?-mvH5p_`E6;cx=~f z5f>+|B*g>auPZl=ldLBzq&^p9uSijuduFglEJ)fpT&GvPWVS{|*+YQA%ir*iA0XU4 zb)X?=mO9O4+;eEn#KGkABrE^G)AZCprq9OGV3qX{}yfj4o5wV2JWI~dNvakQrMI{ zMQuY$Ya*SR?m>DmuChMr5x#rf#u){8RDXuvuc?Wp5D}KS$WsWvAF0bquD4;+|trKHcrdzsPq%E z6!CRaYTiw-Whk|xl9zHD%xY;I)fG=_J9K7W)%>Mvn*nBn@w!836xBDpqF&|qfXS3X zkv|$guge!MQ9b*-9E96Sxwb7@L!@Nq(>nC!@Z_|g_Drcdat(JCG+|WTWn#i0`1%7i zFx4=eHQgZPWf@3ocxK2pHrxfPY-}UEG9OPOYiZRoG1-#$nW7Xr4WZY23}kvXE4{iX z2XC$^I{qQDO@{A2k%HrV{t%^JSwB1{F1Z}DMXN$jarp#3VMHeS7{I?*8Y{hW508|R z&SfqcV2ClCoQT(4?S< z8C8EfpF{We6>F)h1jkX>n1b!9)pumC1Lr7jka`26mEXsz%?*dhn^)4U1^!gP>zr>I zzDsO`ut%Ipuh3px7y(nKo$B%*75h83(^O_O!=A75rcRgCDx35E5QSEOcyQ)z=2gK* zMmYbb-zC9oqh5W4){AT?nIBdLXM}&V9(V0=`;1?d^ngkuO@D{9Pan(rI&*@u>^jHX zVb8?kpgR5m<`M+N5lPb6F7>UlSO^z3$1*Ivh#lLyI&tSMw}NJ`fIh3Xv&)6RXWP|P z3nq&@>b~a{BNUK=2SW3GSQ~2VePJh1_WPaj)b#g`?oiWNsjldwd&>e43f;JFl$_fm zj)ayKEg3{_2wSil>=l6m!yE@N?IPg-YZ5;`*>FC=@!d_vAD4Qyj>pz!rjlETW`rlG z>tWDh>yMsJJsc-+f+*~PWCdU^cEJjUgMh@nc1};&KRNZgoZHR|FiE{dbf>CPg<;U<{-zB!XRS7xH#) z>OC^~w4!Tkbv&%D>Tt$v6|`_LIj9nGi$nc^V2bfsEBYsDtGqo);aujZZ9yEEMMGV2 zxBrqg|LgHjJqFhzGXeK*Ec^@N&{{QOp z|7n?*>feo+0GNX4Uypwsga5)BxKdCIkje=VjMxRq{4;CVKVG3P?7AcnR2Tq^vmVs= zhyQWq&)h%dk}P2=c%~S}@A)ic>0?D~-$vf+nP#Jl>_w~G1irWN=lYI0F5^T8dTy~I zN<9=LHA^b&)oSLm8WXo3meY?i&?z3b4RW~ItPGi$7^G?{5|J4hqQ5!sRQCW70e%8k zP&W&OZ-{ZYwW-Fgbrx48d>nroxi<3H&yK=zBarB===WhywFiYrNR*N!R2H8Qu)YG< z@4$M69#m!ri`d~*gX1&@g>Gi16b~>I7_`1L$aFF{lPbL&#Q|{@oRWCv4R;`6^dnk0 z*#ewfGV&!qMX^MuHfmyI)va_${~8elz%6bni3X~mW=p!k@l%#=({4@GKE->u0{ymu zRPb0MH#K?8{9Ci@a?31iKhn1KGCc>}btW9=!7iY1TlS7Q$qS!83Rj;GV~m|Y^}OW# zGM@I+5yhVsMev*j*Id&jbp6{_{Z{~`p6Gww-vkrm}lP(pY!gyzBn^|1r z5c=0OWxj>mvD#Vby5yp@LP0@f+(FifYVIE~puP!Ws7_6eAx5o|shF`nJlDilz|M)4 z9hGjVO7@<<AjQJq0c>S^f@DwjL8ui{c-AAh~R6af*2L+#`XmO4{kJvDNG)2Lt62Z|r zAtxEkJNjaK<~N_gMe!x&u0OMU#9H*E=_1bN!AGWf&S2|@S5i#H#DWD6SYJpxgtp&p z*NZ|hLdmdNBdF7EwvUiFHItU;!d^+<<7Vhf!5W%3kwm54-Rtv1Yjt{I9b%>Uw82Z^ z8MMfupMs62l>+<$N#yFCgP%hzH>jf$R$jEQknH#vO*Twixm-|UPU?}~wWp0aanG3f ziB`{KPuEV{W?*?p5hl%b-GVA)BUS6YC#sp0uk{e70Q6Ou9AsDNMHC7U0?j-se^7B_ z)Q89ILUBTXRevM5Lgj`sF^QQyk`{yqqeP?bwyt*?-qQWbl(2Yj2-^bk*H{*g_8y`x z!7CNJmjxCfBwHzurX-usA7eOL&aGf7DA;rm)- z*Qt(!VL`xT>ZTqlR~vZ{<&CWL529;C%Wn?F^IX%ZsAXki)JJojF_J6ZkB?8x*7|WX zKoJZGljZ@rLqPZUN=1)~%Hr9%@*p1RVTR_LlF#heWBaOWecm&D;o)$21M$WS4d&6| z;|giu-Pvy~bCcdpN_=o|NO_^v@lvo&nq1bAm4vSQT9?{IH1rm75bypuFndR@xn-u> zPZPa}a!FtDKqZRQ3Oq*!NQe(e#c5%;N-P1=OSEARmkp;_FuBs)l z8n5qo2G}=i{q#+WRU)46U=6^Msn33F|MvWLc5u$SsYl-`+q{w$t)R1hw*Cyu$}D8u zU|HB;7Y$~1r{@%#QS5%Lq|$wUJ^>!!9~LyENg4Ef>YA*l4Wa|WCzXytJgLP%(2g6` zSMi=wZu6w^58IgSg0~rywO{`oo9D{hM24aymvBu zR*7~gp4&9eC~_t2Nm(^Rt%NhDgQ1{CL5+Xv-!6HZS!gMSxgSmrHADR&qJ!YAdNXZr zV&A6-%r)#+-@h|d@u95;G|X=OdT=D+R9fRK25d4Xk&qjw`vd6UQX>Apm?QtE86p2j zk@?s6{~W&3f0yI>pX6oD|5G|Ogvp_ZNHvp6~neMsv$*vpwi@KG_7-!eY=ZznFf_?Z_xAetsjSLZkaz;tsaritYaM}^(6TTqdC0yoxb za8-V$N_eXV`=n{YnS+*Fk0r|gP|8xu?dY2UdCJm8kbE`EYu%o{Kot zw7Dkn(ge4{tuAY?^hZ0c)c47$qgWNdNzD zg-DUtN|2pY1PSj=+S~$|35VF6q>-OZg{U#io^TzAadFQ%WW`S-@3uSJ!$Hwv!eYK? z1J;TIHS)LJsphm`gZ4ff>9q#20K^oRUTN~FZ$^~TjQ&Ljaqne%9cTnH#biiri^Gk<2Pq) zrom$W%R-=MaE3(u28@ivqQ;P5dTWd2m8OCRbLY${s0G0f#P`*2dY!2OFFlEfh&@M%h=u6V0CFD5CDoo1QRkeZJK!Pe zx5gG2GmPkbawguNylQNfetqg84_g0fS=nb>Sop`Bvc;OW21lxh?xfgDYJ7h=5P&k? zTuejN&s=N0hqDYDy1TILm8@PTLmGKT?a?aUPQuw(iqeb92T9W2+JlMUEl};+D-vc( z4s9~gTP+z>t`x&e=g_gzR1He|FA6WdQ2tJgeNcYg7>dNitslmA$ZU87qpJHVEvJpK zrk#|F_jf1XHZ(Uu#3+l?SrhLhsOjHePVJAWF8uNFlcC2-f)KDfGrafl@4m+XG4pR$ zy8rI^Q?r(zRGU7js}kDY))&0f_Tpu{Hs8zYkg2Eg|qK#2SWenMyQtKc7eX-c*ZG>E-~_1&G!?+@gj^am05NZBg_mqQxCmevR1L(xEdtmEw;8}A z?7KY<-a&)AAcW9kIF|Xy>+F}&K@t$40Q+tLi2*w8d2oZ+1r7X6clr;}8Cw$GJ%5q; zG7>>3as5L?NKhv{;@G%^O`mT8%qud2Dk0}G#Q3}rKwk5k82-231Tdw#5heSFD8J$I z41RP4=y~9GfuBs2e*7Uiaz;lm;`0G%fzSCP;9SfD2tOM9hbZMUFw zILa*nzDR+vHXuC*{*Z9-e1LV7CAWUTO|D|ASw3kryZx#BRGApOt=jN2sqFVNa#s$x*6}n^-5&nxvjDQwei?lJ zAk@szXuS4i8lV|TgZO4c!hrUv^LVERa6-QNUwL#@Uc~$DukdYT?j$m ze0~4UB^^8>pXT7e1FDX4AIu4PkM_E6xg;ATb9I}pyg3l zKJR94bo{`4-M?`=Pkp_;EV+Qi`Gu#rajA&ZY|PO4YAk+q!z;-zt{oAUy%X6;wQ_%= zZ?W_3aj(}4EJfvkPI>e;U0n?G^Y9xB9Ql^#(jN`#p14LZ_TrCG>S0~1yRH?1OGZh$ z%~^yNWwTUG2{bm5=o5ToPzQWg?8*DIf?Zfbj($4+g`e0vm6-J zx&C0eS`>CgR`O-LZMR?T4_~t+=tKN3z`!c;V>URhG3DBaA3W#6H%7e$3w=fJm{R0@ z5GP4|c&9Dt3PBTawH~{#1L>A8dKG^|Ht{JVDua)kOA){)c51n4-h_Fp3;p$OZ;dfx z2Y-J5y2h*|dyOX)#ZsF#eR?{(mv{f72zDKR(>k*k#x?(FkLf29N+>v}N-->0;cmxoV4*^RbNPn3PY6LJBa01c8Y-#0fVZxPz0*zzJ?GLg!Q$g18VF z>^IToZwtLhSwEAkQA%4JuoIQUCpTOk+%22v_U{;5OAESNnCW*;j*vGn>u?l!@Vm+c zf6MPEzMa`HkNM8~J=3(tVwQf(u^(p$iEVkI6JnK%&<>T--3gD+f%6)*Sa~av-XF1% z$PcR@$|mk;u*z{s2m=m)#gP(6xKNA)N-X+Pz#7U`S7ZL`T&_6waNMp5hwV+69_BKN zeqmfrq#bGVAw&PwJ*d@NyUy|g8?aNg$ZTeaVcW5py0lEVL_A-e2?iak?(*~C++`&{ ztzI##@kEE%7Au08R+Rg+vez78(ck%&-uj6F?D%y{g_iAE z`loME<-~~GZA;(qzc_j9X$WXJn`Qarh<6}Y31V1C4`?x|yNd=3<)71}pdgnTwXE2zQX!FAnuNrP85`;xize>(Je*bNo^4#dT24twBO4 z9BJ>ehIt#3KXEyHL|bCB8j!oSgTI`Ze(ztMcL};2SGo%rwF5o;8#eUjtLvqoR27>g zB%QXz$<+*FS!IM{_eNeS8RffSUUUGhv%pNbTAN$;c1Egcl5e(7wUo~facjn0_ENj- z#)JL6TVy!f=?>7jx(lF-Qq)F`$#y%sI-*Fkj~47=36&^y9BHg(v@7qG+EEAREya0L zaLhdxjn$%GFMPBn%&Xe?R)XGV!D;NVsg1HOpFXV2rG`kk*B>b{e?&ZidmO&Wqie{9 zN4MFw7ydXkW%EmK3v^@o7`b;CGJQK22xFDCpx_%(R14ZjV#S-Cvjb!WC7QUw)%#e| z{ICYAMtY0R6I+M6zBPYSvs98qnSEBV3=Z_7adT$qMtoh91@%bmx7+Ny>@|5vY-kDXLK4^NLGeoDD()3y0oOZ```dKA zWtQA`iozXD9eBeX2efYdtRT}5tZGN*|BR@>4|W5t;&f0CRDw)58x4lWRSJLoXkg7> zjaFFvBLd^3^HuH(3*WbV#x3#gLC*bZXFiA7C9h^d>-nR`2_+8hoKbe+PJP&%AX++A z8zBL0I_^wL;;$}JQUI98{CLo{{K~PhEtQ#znN$(i+tQJ|ATk*RV$X z=_9xF+`M;;6KBA(X2)>ll$;eeU8Fy2cdz5)In>X*3?`NZGu!c3s^v|KD?Di#xxUk% ze_;H5%$SWxDQOo~EbdNeKFCRPlRufajp~(}S;K<{+Ds1eKka2-D|HMgmoE-ymiAHM zT~ZMI9AR@MIm40IGl+Y}tPC1(k=J2`0U6aLS+q3IoK)&-JojU_E*r8N*i4n)mLq!l zeN=#K%T9&#{0(k2%B&S1gV9=g*umysB6U3I7^trya=?k|Qx#B2?&gSG43zDEfeLI& zs}4`381UUMOUgbNT^P8i?n=_GnpePdfGqR}wAqR&-`?UKb*yg46g_QPtbBg{oiK`x z3RARLr?EdL9(ZLlkn%Z5Q#NeNKO^S2aV9>Qv4@a51Ph?M|%)qt3tw{ukK3-4?>@a*m53sb=H3F?o=b-ud)l6 zLz?P{P;)wt9dVH$lAUzk$g!9vuRCl=F$8Xi6h;&(_B0spkb?bmKIORWuXHv9?^P}t zX%Q&0=K4)bGWFk<_IPq0hq)Hz=6AX;RGMa=ek1()*a=4kbc?Y8Jd7yj-UMAn#Ee)x zpH8r1+ZylaJCt}B@ik{s%9P~w(3CLPr^bs=sUb;sb*xOHWCA+`!S2z}$}O5C&Pqq| z_4C)2oY9ysas$ZW6YP7dx6fRrs%_g0K3B8btOV5QEVaAqmSlz@sW+G{D=8V+(`8(3 zik%nH#js(oZ-n0;yFTJq&hNP+h&z}+;7e@hxZ2xhcdT50J`1m2a1v)_p=hpoD`414 z*}4xAoiEOCdS~4eHTS}2J7Te`^H$zYR<@&gM;l_cwq`3gttsb$X*WazHU!`aFS;21 z5Va&-<{Y?h|NgClQwJ#b5tx#TKKRL+k2Ms)AnrgR7s2_LspI3!0HQH|^Ri0?AmFQ` zBY>IapfgVPUqj9Rwyf6w+)(pBm`?te|IGiDFb4$wKK1@9EAb%%AUWVl)~0C?Z#!{J zFpkI0P`0W*4J=!;ZO4q4x*}bly)n-lcd2k{anktM;w!`sWcT%m)AXcz>xFzF95vzl zi?z?S@o6Zj%b3c*$U&9RlLuTbxUip(?3JuW%-cxmVfbpkJ4U-UVn2}^OYES;JTXjh zV`M?$uI)Xsie3L#6(FaUMAU-`L1E>rM|BM?T<gKs7e z72JMS87FyPPw1Ierm-Cpuj-bX)(&GRHVr8jqi7G%r$_;+;FnWr(^nG_)l_4+`Ba|P z1}9T-&saIDdeM8=7TELBzwIzBzhXV=1afG)O9R(5FDfP352M;1vu!uzvGtjn-4rJO zG3L*47{sk_62$819lL-vi$J&{5>uA&VuSKZS?dU@VMQM|`laPTKgD4>>Ko)cIsPoy z48qx8^CpVhPL~n{u{4Khx+17mEb6NZj!kHH>fr?6;@8TZ^|I)%j^I}vSy%qXy)fLf zoKx9q#qyy+#TG7>r`4pFm&fvXcP#kV<6Km%RX)u`@pq{5MZKR0`v4s; zE+6SZDU@)3eBJBy$nf+2j{0;vHsn&uEc5xtt|%Uafg(m*r5jrp$py%f9P^rs_J8IM_(%XGCj zX$oELRyItzEc zHMx0Jt_Xkeao3g{KZ_|Du)P^3B4aY!m{emMXL6LVdClEdA^kKt6U$IO)Z@?n^Suy+HwjK_fP@XE`e?!kvb^S+RXF0N@nzX?apoHBV7xwY}UdVY_4 z$aBt7x+v3#C7$nqJ>|AyqC65NND#r2CBkk)<*-g^-p(}n;ZhU_)v3r$3kOZxmDk}s zo)zCC>RaEwl-rLnnsxu?|Lq7A-9?GdU9V2egs?;TkmYdp-P0yIs|RsI<$ezKiL&3n zu=7X+1znHbp_fl*9bUr=ZRmxs-os~>LoD!y$K7i;5>j*?O&yL!jlGr(?ipaU6Q&$6 zZxFb>cVC9?Ti_bbSd1jnuiyW`VEeMMeb#R%Iwn6QSL1=?%{a0EGOJ{PBX z4VAzvYy^X&PuG|)h25IiqN4d4&S{FPGTB9nEBL%_=Nc^3P`+0F!joaAHn;iS&#tND z9c+m0ZpsEbm>H^zeb68u6@BuRzLupN~>Cyy{Z$!;gMi|MwzJ9Q;#H#{VMH^XMBPbTm*y!T7#^OeIk$1Oo zsIk8J3%flj_gOU2i&zHdQakOJmLun3dVqmC<6d(cZ8X@`#zzNrsN2@o{wDqsM~mju z1+QN10PLP%vmI82c=EMfFga~}j%D7qRi|f&8qsOgR|o!4$@{X#g35;9oks1qt1B*< zpm*k=>>Hq%S3A+gwe8ackI!*AE>DAQ&0RfxGA!TW@*zD}jiF_K;>lS?CdO-HttW0p zMFTI6qPYX1T-7~ZvT%plYdcJZtQXrsL-*Ke;@=MQbOp9oD_dmzA#$9c4BYRLKTY^J z4;A-8hLD-I0<``oH5o|JJ54XAD7YULIdAgIFtU;tHijhFN?-zzhw+Lx9ZTIf!C2La z7{3)K$Gr81s^^vd2TC>#99syym3|$|obOa2s4%kCZvo8j^!5MW8XzO9V)^V9 z{;Li7{a>JkRV>SA2e7M(H&`&P<*yD73%Te;oUD03Wag(pje`Kn z1Vnrt-1-Yx-HytlWD5B{wY`G@Fydk6Tib2~qo%g3q-PS~zaZ;ltErLNO99o|>D)$)$yC_nHd ze`va+VicjHA>lg=nFzA4Ga1>MOXcl#N)t4F4an?g z;(>%Qvj{oqKSTjiBkwH{J=K-z=?kI&BC38Ey^{noL01rJ8 z_!=N)iG+a&y3w0mub0^g_&h%&iP~9e zC$=20o537p86U-YuXO;h&3YjWz+*m&o?3rLq%9tx zPnx1wvD>Eug}5ZF$=7QCwx67Ll3hRDTXQkrcnfzvyA6AOc88l&PLcBy;xXK%uNh!- zy}?sS#<9$R46{6?H~jlEWlBp4Tu<%_+P*#)t7s5{>3o)oRzte+})+s|Oy zG;Z_MLaTL@-r{}fsQB%NJxRdyM`qBt_x!nFQO!x23My{rZNbpn=O;FPjZKQ_xej=? zz$k5oy$)43AG|{=H`{yk$iCUM&MEkjBKUf~82S|D+(GUX)ie2^0P66>uHMiH+`lFH zCJ8y=b_8MH2h}|&=^wK)#WLiHbBZ_2ZKhh--OteearaCy&!?94Vfgi{q*Mb!n}EI4 zH{07-nE?deri-~riDL$JE|Wr@k5*QUHf1t}i*oG>hOuf9nZzE?bpd5KavtyT8KSZQ zx&h^xt|Hhx=XRJOsbRX`pO9GppkABR@Y+PzkSxt=$1bMIJo|4kJ zsW&Tm9`}IX(&kgS6V5QVUSl_U^DLTuDU}GlpQLn=1-liPiSp_f3ZdeL!gkKGe8U!V(DlU8Y6eQaw^M8PM!7DW+G@~3kT8yNBchVLq@FJt;{91ESJ>G zE8O(H4*Ez)aB7Ro5)*;fC;D@J)|(S6p&kXjw-z*37x$*@Ohi2#xjKTQ_+J*J*Z9|k zsZf`mFDseE_h5Er?xkqez0s5|N_(}4<~eODTjM?^sCm8V$BNBkUwX!6dlaD-P0i{& zDys_-UoWmve5it9 zs`Axu^a9%o;(2n)1aMSxUjMS?63p4{@==F4;F6;k)uvRYXth2O{@XJjr9B*;9dWKn z378FrRx$D>F3SaFzPNuw$-Xrm`(_=+dr{N~#I1gX-y+C;8Z27v;Zm*1@Cmr7&T)11 zaR3E{W4gqovh^m%YxiRUn9`Z0tg*!M{l?fM<3mI5wCy%mUQpzNijH$o0ZO*{ZqHO9 zrO9nkOKhpzi??PX*&eM>!^41p+Oprag<|NBI86_M_XKrULT5=ps+5FGRy zPO707pTV0)m!LmH2;|y@?6C1Ky`@q0U_3#mfFLoAR@CN8W1K6M>U+7&ZtS;4`BlWw zsOz4I_O05~>w^QdU-Q{v3bs%ieEKsipYAkmR8_;0yZO@?PuQHPm8PxnTW~rpfQ2;2 z`)zkFU$c6T!k)O>VOB?O6bc6T#CL9Iv^~hzA^hgNrXVr=B9LJt!+wJda7J8|%Y2B0CW~aZ!?9y!^!z%)zb;3%P;I8+?6sJ{HD%K7^l=h6IS-VJbp*vRY!#xkan$Feo#0}!9GJG&l z@OmIjP*zNHFL=?c*K$10y||mf(zK#!UcgnhE?k9SQW4T|Us#m5YM7fR@_-Q*f=8e0 z@2xSNXoHCHKI(BjF#4KBwJdk8XL4~D`XMQOaU6jokINfmzG~lS4?ZM?eYX@NaJ_=` z9>&A2)nju%#ik58`v|FxGm#vj))*!=KB?*@4^ud7Xm-kx4^=dPkT|e8gG?8jk_gq{*?BY;wYW=;!M$=Epu$9FfPC}F1xv7ZuhiZ4l6gV68(GI zBC7mn>1=|6?v(d-3RzYHg8%G=(Eda;paLNyh&N6GD{s+guuA|N6>V21!}QzEdQ4HgU&zDf zYF$PQ)qpW`uFTKz$#~j#T?&`emOBGh54d~VJk!V`2r>n2I<6&_Lp*l^lLy();y9E| z`G?IX=^ll_PT50BfeHnziE8co7JMb9{s~qrTJrnj7pn@7CbfEb2=&U`kHU|PS()Xy zuBUf7hK}zU6`1C+FI2g<_QV&kR=L0I3MgLfI1q!3=8Ft&`tF$(E9x~sYxHwn#`U4i z+gemsB61AXUK3OB=cOk?hYKah@d9MK=FgK-(T2I{f_8s zQA9j(KmQs(v}w<=BMCLX06rGd8ISm)Bm|gy*`G^X-U#k*2LQmReB4;Q)o3$(VdDrf zW3K=?bmn+^mZeAOG-Lk;*HAfxU-5E&65RRq|@G}6e8V~ zBxdV9;oVC}JhU&bXbuq#c0-`ok}g4bpz$V0q+CSUJxA05{g=$yg7(`rpx;VBV6pFm zbWUF#4@N>Ee;Y?wp``1v#OL61v2z&%6@%1;vjPTs7=FS1n3M{zQiu?oY#j@Q1E@ z%AY`s)9YUs!v>?8XRqxH-sib5Nt}l=1e)-M!0j&5i$-27F>Df$3e>(=<&Y+RvTEhh za4t``xr2vp2q**|%bbpBVx2eUqY*BMXj{ts9-w}3XRl_x$XIk`i`B9{3MzE5*5)zY zWi>O1-*iT}oy+ti7!WQ=RLi@IVsT0|3xU0TrnLh&POfS0jDvp17fLTk+Z*l+d2=1x zZ8BNk{vYhUcT`j1y6y{tAiejhASk_~w17wx5k;g!lnwy_=@5v3^bP_FN~B3A^d{1q zv`Cjuf)pu|h{R|@JagSK);Vk4yZ1hOt-bHQ=iK8T8DS(dB=gJ6_r2fyKJV|j55AtW ziCto_6R@dIsj5SQ2f$l=SmGKOD((QpDJ#{_CVy$6Vo4_ z{I#O_!IqX5@4M2daZ}zAK(`GnP8N+&BzP1K?mPIfM$~`VMQ)_2h88p(gYzEzN+ksB zcjgmC@%|+t-G&B9G6FKE7ak4wX{aRST@J!9o5*nSv&@!7+AS`HxWjS$&NG&;)(fDf zJ~AUiA+Hv3pNk8Eo^ie-O2V{I|DLQr*kBMS42b_l*3kjTI;4Kq{Ym`)TYLQf1E2fn z692RRxB~G%qQd=`UYoymb#CE{Hb?1SvoP~b;%haKsdysp4I_MDY;Kv6R z|4mi~bdE``Y6$;pePjQV$H{dhbf_txTtJirz?V003DZiW&B?Ods~XJ0Hz5VK#+A|O z*5$F=PG2WXpj&#rTTdng<*#gA)%Wn8(q{ zmyCq1u8UwJ%lxYL6`u-*$F1H#=-rj8h!LlKbLCJnzc}fj>=yfG<*bpOS8igMNN%n5 zd_kp!$%p-w`5p_7@9~E~50^L{%9M}*Of}s*-9Cs?nDYZC!i^B~r-t_`vJbWTo`7B= z_<&*jYzm~Z5&nSi%=!@1Lrn99pMDeiWd?%m6(^C@K@I)?(ii^cvHzHq{eP43V_=Xa zfq-2|6l_oI9qrclzca>&;lRGega6_Zv7>h5Z%&BmpR&UEOK*V5m{o;{FIna)#q3YB zkN;^fGLy&8_aOmqXlEjphr9M*NYAD0uM5h%x}@;C)TV#m>&f?ba}|^SO<-mc{%eE% zwX?r-zR>;K)bXhY@xr$%s24CAjZA=FBV=L^BD;IPf7NC&3gN#$mtWDp8U;8Y$Mx3r2@A^82?| zj99Zw@25dIStPz(}b{9?OvT$&x zhvu(ppE;sJagP@Wl?enyh+iC_ZzUD5Zze(*K&8;Mct#8Vd2bH%Q>?p*bO{Hj611u! zrhzE|2^^iE1DMkwmtoMJpA=ZFq_cV;>11$L*s}+m_E=gG(Br8D8xSEO!QUld+Xi=m zSLK7yvd+KBPWH>!gh;nRXl6Ii2@bv?OVTEq{yRQHIzD*`MEq6x!vILNGvS}{Ac<$? zKY-OB1H`t6SYg6D!Tc>0^{Mmfnmkcnhe@^>dNK)Of;OlcsG zGoVrTGt=NNN!$R5M+g@NEJF8xlS!z>|0WX!&Xhi#pb7g3EHgO*fczrM9XRdxfa(=l z1rLDzl>0LXe+J>tmGH0r9C{3T9KkVR?_U!)IJl&&*5`bRgpOvSue#? zGDJ3dgUw@y`})JiUymiijK&t*oU_AEgI9`9S4B`M-wvJqmSO=3omIv9B9Oy*oU{U1 zie5pCAU~b^n`P+F-~YjOyZ_C`@}G=V{vozzzMm&2+vugZ9Ug&BiP4%n*Y_Cgs{A}~ z%!Zfambur$6o#wxL-x%ERgkG~!kDPT z6S|bK4HZfqj&44DC?*@QZ5N7D>q#Vn0`hplv8P5dwMv#Lw`F~9`nG&vf#)PV)jwD7 zO)QSehR>NsgE<4h$?G(2i_P=KkQLw7U#Urj-jCS>C=j_@J1IXLv@j3VJj?jnU+Qf5 zXJMw|G0RNLszWy2d3E5`kYN)UmWnTClF3%mOWyVSJHoX&-|D-ayrotH=r%Vf0KP68 z3JPQK(eSIj{InArxY?ErnN%7wT5G&E|0*gocX&E8OF^;=V?Zo~4}u6>wV>}7ukCye z5t}{qp6gop9Q138ne`v{8WzP`T2DS2;heKPUcc5oM`37Awh?k(n3-m*aXa`heo^-T z91rKzpupZ)Kp*s}6@&Q{iqV_~aqexq9`2tXc=7Ygjb&Y!(y2)oA_n2DF;hPS5L}SV zm3{hX(?(tPJVl9D%=V5J-@Y;Gf9v2+Swt2JPb>ADi~OX;PykTqoV&$lW%BMfH4B7tI6XPkM1eoKO?lenedGx+>O=EtGe?Y*($1e(V66&X=IL-@zId*<%{RNMdPwT_fC$vA%J9ue zv?b6otyxfFosg8blhd=(Nsic3b$oV;T%9Q$1)6Bc1+G=M@=yGN zqkasTl0(g!+TUaJtMZ>VyVxcqX(-;XV|}-k`QWJ+zY1;E19BkVW3t+p2tNTTr zsQleTU%K1-eY2unv+X-WM}6~$*ChY+^UxU3SrU)Akqgu?D({UO>lqm2A8p4xH#@@)8zrt`>PGE5!v>1 z(HzN~5-M)jSE3mNC&8oFU#Hg-dA53Jpt{p>J)4tvr0)!NqrfYQEG+iJ8Y6}_@vpIF3lRd)Yl1sdp-KUnF2>i5S6!8zY1{eI*T|g6=*huB7J(w; zl9!6V)&ZeEVA1zaOrrng*^qy=-|c^fgZuORe~!`O|BuFzm-p|W)FWU1t}B{Ka!Vc4 z15h?NCISGZ`hOoip??C1i-h;M0%cvTOL)*`_%S^`7d^JKziUx=COGst>mqwbA2;O( zev^0duTtBGuh%^yQRCbCHh2vNk+G`z5pOeVV55B{>mQ`(WP4TpTuC zGQW3ZKaeRT4!wlu*#hugQ1C+J<&Y7F=EXZrrOM4Gb>BN69nP(pJ%QT^3+&9#Z)*j` z{xtV}-_!ROsVLsBDrEhKZF|#e4dXhK%4Ir(%%zK&ucODimB=vz7mb<1epFpmOZ^J; zb4cg)uSKrlV&}A^KKQu?guiD97<{S`w|AS~K&lX&*YsKA=dy{Apg9k&f8gvd!3@J%Ri zG89~g&$en?$Qu+}zu9T_o6IBE#Y50TOkcag{zPtajC(@y|^m9C0k^pcmqXNWH3y<_q!g5wj+ z#R`T57`{Ej>Hp)H`3O(SGYdB%y&$0_3!xJrzai*@Eg?t@kV0WwlO&}GPx4difA!VV zrxn!mu72MwAD!hq`c04t4{{$0%!l*w1R;LQrR=-jMe|j?;Q5~AH+OAxYj~P^O;50 zpFV93K!l0&f2XQ5Z}A>xw;T~_^hpb(>81}!9_m(Gko7^&1VVn6zk8n#>2PdrZEnkX zDrG-bvD#;;ksX!o5K}>#2b}|2;Up`&3`K46#7|j##xF&!ekeT7ylI`D1Cm4~43Ozi zu9QnIv6j~Q?|Dj297oC#uQgyKptrKEQ;F9_E=eEC8>=Ekyl9VfjATXTaKks&21rUtvR)(830;1t*{k<+3J$eg!iC(X z9Y9!$Eo473PA$ouB(rdnH>UUl&gx<=`hou5)ZdUjfu1*_HYR>Bf{zdmSdWF}yvP_l zjLO`~AiJgUxN7f{@Mcto4iEp#cF_Ci1E+@KF3Sn z88^ElKTGtL&pC*PotpYdSOFivg6ul#%W1L+c#!V=aBNVkY6c%9Mz|fOcp*vTZ1>mO zaj|U&RhG=xYo=SE0eiN-PM>t;T32-e5zADU#?UNNHQ!GlQQBHbmdtmwL8sT)pBUHM z3zq1G>^>!dpyK#!K=$Vta^KFVg|+4YqM@*IYyb7F(2AxDQq`%0^snfD zD7Acp583v_fY`r-$nhoiSeYWHa=ZP_y=-d(4H%?joz1B-worjrLn?(GaqXJ&@wzg0 z_gS~V-pHV!!d&sJS6f2Ps$z4Noam)XRKZ^7eBZn}7k2K3Zy4jQy`&GeinIw6KoubBp~{P>}8p*A-> z^k^nnlS9yI+L*1sQn&%7Q%3+)EgCGCMU(Ug*w$*smy}ax%zdA8Ed*}*=_5q8L`1LB zWxl$L5(WOPdYI%WK)GE(`3$Zs&JZuwmJR*7n6Yfao_oU1^Fc4ia-x?pIG?+g)iyZf!lg!TM#)RTrtH535P^;h(-D55^})|sMyjKE?y z*p5HXKrhxLC*wbkdAr8hhT13th~5W`)Dfi_*M-dilI}p~r_*1O<3vj=99155`awe- z{oX_A0AJJ7Y<)S-vByoyTrTiNZ1TBt=j;l}u5%#2z?t@#aj(b_^TUO^chPP;5zBqT| z*@t_tobruS>%WExaG=*I&x*9zpa%0J@kHs`#6WTS=@nDT)?l zZjo&jVcmw1@r6{2(!}fN$wx{ngzD=Ew2|ZUqP8e!H>XQQDf~A-nXSBd%Fe1Sn`PM& z8xW?)u?x^`uHa?SuXDbZZ5E7lw>GpBlo~Zybxv~V2^;rhg0xpZ_n&2aA((kK2(`J%HTZL=x?@I0*U$iF==o zGs6fy%ieU&4%fNPlK8CDM{-EY06`F5hG6^pYf`keqPwldZJ}oEa*YAarLIde5Jq zc~rHk>S@fe8%PTB^B#fGW<}^(aIn6pqRhJ>%2<7jMsg&DljgyE_XVQ#B8hhuKkOZZ zo(jY=cBqtjN(&lXrH@UpRvi%(D!lPk@U#!giC4seHy0DPr=QoPc|M*zi;6lOaLPR=o4N#(aytnR)x% zc6S_`_qXbTfHe0q2`bX94B8`yB*|s13n36RXC}61P*AF%Hyf`y&`HYgw{SE3Vom%mLaW9^o0OjITtuEwp&+gDy#{jTv#>YEGB%=f5pt_F3ZTO;3Z# ztnK9cuVzi&Bt@8Vo~3qazQM;22#R9YdpxL$&?dTe9dX~#`|?r^<$3%CRK%RsnGQ$! zcf$Nf+*2Q+@+bst7aOx(qM)_3ge5MJB_jyMn8Ux-(ib(5v$`0f58UmxZ`~cV$tr z^$<|#s6>{)prLtmNec6r!SxeyO=}AeAyuL_RyOH0Jq1P+=pBs0VlMYB9vzjm6U^_A zhtsXviVXK(D*>qG85C}bjPjFVyCb&hx&&RKFx7?8{QcdDpVY{>^kYBd)&phPC?WnX zw=D!UR0n&U@EuZ+&a{tPeB!aN9$>Ms;A_f0Aq|Yg?QEm^uN%$KD%JYojoScsKX?en zd@4&H@~+pWr8N&NM7wIF+|b4F`NR7XzpQF8+84jrT^QaFJ%CM@OGbR|dI7+o#twk3u~ zwO1887V@)Fa|p3@AtOhXZ`?Z#2HsN-c0NdbUs(1Qk~?i{r^7n@g-uWK`FZW1!NV!k zj$M}b2|C1USW@*?It}q2=1!gm+6h&bDK;%@{Xj^{%9#5}l`Y(UZ1kC=6USQQ^ad?a z2OHCCvCfH^A9~%gs`iSj<_A*Lk9#Z4T$p<+r_EI-Jx?IC1dSX(CaG;C1i*O#yH4vF zWd;>R%4L`DO#(7U3M2l51&bk!<~s_8%eq&!J)JZ;-w!YBW&!&Wg8?6IsNM8^wD{9w zA_ta=;lo=~UzfD&##DU9<(WIj=c7Y275H-)vd9UJ-M6PnOi&9fe9)d6O+EbH(!iZ4 zb9>qKW$}m@QZqsQbLa!BM>%}t@XOkKp~!_m4(YOZzEiD-_-q0QZ;TF3+`*PVgBtmv zdN`N8$2tQ zaG?n#e`tF-DUq_^!7D0{2hl7!qP*XQIG}9!*C<#*Sot74%4Rz?rBGPwF`{%oDH5qP zARfk%`pr5Sq|Req!1y33B^ZAhn};4rXnt+HHQe&n;LR5{x{B}i{3@4Uni|mwlgVwi z{$yOOij}fyNy%vmzS*+kKNTtGnZI65FkkwFhy@Q%hlJlsvz2D4vsh=H5r1(4m$&Dx zZXcCA@v_4dzuxjzMN zV)#ww4(1FgF`Dm|2C{ z-Q|O_XBygQb&V>=$1SKIsKXfPn!FX_lZ-AxQVZrVeO7UOn;D9knGUstThMFG0KD_H z&En0LwDsegD>X~fofoi|i{Xmy+6t3Yr*}cgK5uGmk$@<${j8`aQu^}Hiy7%=m07`a z-y&)zZpZtAWyhCJ(xAOzSO?5r<5dIloG40`44wT8HQVafT0VNJ!_NU%(9zQ3@Ihxv$g=>LB~zq6K1fb_ z2+U0^`C_XiFT8wy6Ib*Z=ElOa#Y#Yq@uY?I_7yJCnvd zjlT|)QrfW6{8Y=y9yu(;;7(FJtB&MzcevDIo6MIg=CRZIoXS9@|7)Ou08j8R1w|hZ zweX@P(*StcVEEVD0ci&D)V|bZJT|#SRq#DrGLlVrXUl~^%bKC)g>YW)B zLFFane5qaJ*9!}mHy`@GVWj2=N|Jo>5$HE1``g>q+L_mY?vdQa^qtk>Yo?z^whDjk zdL{cp)C9~e6F{4=UwjsmP9cAbN$mUT9-VZh^sha))Vt|N9|*UJj(7&NPwYAyUel#` zeFOjN8>uhbD$a=Oh{ec>qh3EPGA$fkja~KPZZN6A2{be`=RNBLJk7*|9kl0 z65?IV`;XpqHjP}<#wsH6!lOBtKUZnU)MP!m(O786Qy1P3FiyC7k$fTXBo@$x5WsOG zY^rf6+G@!&_XK(Ij_vr(C(b|~LNN*)2egSMcKL1k;4h0ms|oliQ*+d1i#~X-6)NJy zqh_A9aeF$8@hE%JC;FP@4V_Uh0a-yP8U8hbsDYWv!=&RJ=7`j21k)J^UFP$R7 z8QxDGe5Gr>P#PF|XNTeflPKdUGQ@T(y$`{Mj|0<$sE%j}E}T~A=FNUTyR*Kt%M$TT zBz5V9IF^G_7Hauf^=P*k4AsRq5oGW_Xr2PyF07JDklLO#hgehI-HwK|Bi*;38DrOU z>Ff0U+wzlf1Uv)g9nQJ|?*Z`11oNTdXnWA0yp5=erDEIi%$0nXAXcCunOB}K0~XSl zfoku+YZDsd_DM?~zlAk61|VI@AzYjGajlLRtp-%b@o@m6aYZh7TUz<5?$0BZc00{3 zH#%W(9(Uh$g5*dK7a*9Hwhh9kqu2Nb^iRarl+{0Zm%fmt98T+d#rXLgbr^9Od!Puo z+1P4vt6#mb&e(q0^nuub)#g^3B$7k{RmKD#=@HzB;fo{~*)US-UYs##cW7{6aBw3Nv zpqzO_7Tr|IBas5xgQ~@fK5xa!y%x=0nrnDAB+_vPUd)L+xjIVe4^D>*bhE0!f8jjdjSx(l`b|blA}3Pe?Xg^_$n3LpbaI4iKTV5yzD38oEaU5s zCr-G67-Zq++PxS$gdc^}_29PWiN;&$D{iQf0XpOb2cq`J<|BqcTOVrONTt0NNV)Bj zOTpB`u_BA}Oc}DvU0-dOX~LFPEZ?TwO;$w*gAYNr5~4gIpMMGW$wiG)3Zi( zkekeC%B^2E3eMQA3hUF3R9k_X%<8w5?FF-~^_825)Hdgtjb%zTRw*2R+j662)CX&7 zmt>w_B9Nny$$+^V7zH`z!-I!9Whzl;GS^*t=8k-dau+6rO;zsuNpx7<^h-VJs+<3e z1&a`A&$_y3u;J+4R1(kr$)3&m?)Vx8@d3-DT0L>1=v(8C+9JqJmlQ)68ouJencc~| zG@J089%pzQi31~h^d3vV!DhT~7M_2H`CcuPsowQbv$@g6JL*T1Qc@<^gjvRz!znw< zKb=4i<;UArMJ~XEyibJ~Jbtj?>4s`Zyavf4w;(1K_#58Ck-FS*n%xC@{FQbx$d$J) zdQyXAR7M(P=R;aXjzkDrHhWjmiQS_3yB-SmnpqAq4elS`Pk5$bN~B7a60$@TQgSRP zpI%Tt`0<+zy9H1I>;T7T9c8>8&`Pm&5t+jC6!}N-S85j=uJqxHr62>f)=9pw8-zy3oG<)IZFBB=dBR1 znl)_A?d&9+e*!RP+>uWe@F^iT00SzH2=_^xpR;nZt~@*lDJokq@va*zd%T-sxy^Mb zj`pF&JK>b@v*pkS3$jNV0woZxpgjLi>X(;w!o4Y3^&mf5H3u@PsHlF@jEc5EXR*P$ zc(-tZtOI0&uE$L}cwm{53F7a9WwM(SvF&eK*DqL?E~|5d!?L3I#_jtSzn(ZuI`r>= ziV>Vp9gO%>Qm=*&T3Qm|;VfsL<&O~VeB(21F&+7eIJA0C-&mma3${PaaRq2%_s1oB;unpMEM-lhaeLl1De(R&I z6>f^WV5glIU&|~b>-ZQl&ePJla=9Ij#A>M$`LG)a;+XPiq95wa-@292xqkLkGQ1$8 zAp*xTd^k}N;`k<3)5I&}0PQo-6UPg+K@5Ufzc}zVxBK(6?&ta0B)-h$u5i$=w9zlQ1CUVJq+;+RVmag(_) z1xdj7!O%X5aLnBe9uKvR0luCT&Pci$r}SHx=lQR~B3fz$O>!TvOd6yaE*r=B^YFOl zaK93}a@W+Hf8;AD1{%DHNXVDNGCgfMQXjN&QrE-1SJ+o?znPpmY9scL-`VBP1>~j9 zPGa;~S~oWof)6E_g`^K=Kqcq8xrvHc*O!-&#p6|d71lQ3{ss-Rq8lks4zB3b|8%st z8??!^xSS}dNd?4kmE3=$9QeCb`v1C*`#UILUDG!~7Y-a$u=H`#vf8s>10_f9FPxHL*meM-u}f7(_4bN8|DyE>oL-;^WEFoBBrm zzD>8jQOagyZYm(AHyZOsK+Cj3$%Ue4FM(F0>YF`Ss>H{OooSr+5{Fw){F2y*khEz(7SZDWjvi7Zwy{#ZwcaR3`xG({DO!4eBGr)JEqIuMy<7Z zkhdpaARiTXUo9ZfUL*+VpzouC@lh1Fn<8GVm8Bkh3VqkQb!1GQ@HX^Bm%Cw#=UdK~(oVzjq6( zv-(pB|EETjmN|H15-l}B48J(kx=uf>@wgfmp|)b9xt6+J)_ih(xvFWBR=+Hr)~-UF z?1>&fqTXJ}?Vf`=7WiHD`$PwK3(S(lA=Smk=<2+tnYI;2K9U_`i90@f-sOUZY|!p?Yc+8N$=R)TE&AG7FB@^>D`htatFjo-J=1eVb-SF; zev&e?{U{(JSP$BSX~NB%6=*5q2l3RX-(<;K?-%(E7jU!7_SR>$j(o3rf3WChF?Xnw zubXqynzL%XEqdF=eGhyEYH`*EbA`tUN@e$w)Ulp@wJTnP2W78cPhwlvDXzS%MF;3F z>ny$AWWKeT@~Z|epv5lU9~yaW0L?Aa#lle86=Ab*{W0@Rp>dxz{Zy4xSWdR<=|f#I zo#F|Zuz3#bomv17%ygE~>^f_4Fp?;aD`iVr{v5AE`#&P5xtUu$W%`+5cO z0kjOq?cqUKKEOKbeOs?-f@+Y1^z6#5TnbOw0B`O7<kmbeNDvMWL{GLpu)rC zAh*g={0igHk_B+q>*LaNl^wD^Fk{l#+&yGNY2537o8!IDQ5evg_HMjwrep1ZLf5xw_I zC5hN|meRBNh7-g`G_4GwM#q(uFX0RB%{#~RCtB^8ecpqHlX7HsgD#s zb?v#ENJ}T*%byi~?@IGwpL(($^Z{@s_38FF3s4MZ;T?aBd>q$cec{s4-0=QgqY?S= z`3YtXGg*RPQ+c%|Fsbz!ki;N1s)va3kX@8%(&O*PM*_jOA@*SHIng%7WmATM)I83q zvSMF$%*q|=GG*#;xsG4DdXWK8U>p2Q^)A-na4mEToXoo$t?c0=eR;NdoDcG@8Ot$V zaNhlGE>jklf?bfkgLQl)d`C}g zbws_!A}uaVJcwe5W{|OLiQXgdCiP>GiA2pU&2u9|TI_S)r`{s>W8IqHH)j~rPqzg= z8lhx1xT`~X?n3Rdt~|hxV&2o@m?>Y>Fs@ksViVZ-HGluxgE)4<{Al+r8a&oP0F4@6YyZ|2GrGn2T3N<@sNSXe1~mpU-m`X=N>!izO*sAolO7uU9|6b8eaxhXb6^CNRN*I4asuiDf&A> z1Ig02E-*bi_AX1dPa8nmT5aF8JuG&#Hq;fQ^t?a@9XXT>W#k$EWd8U;fXP*Tnp?Jd z4hm25B3Mn0PqlP;3eiqNHTQ2RwRp6T1*ZgOHy4)(F1IeUg0z)hgN+uO4KVd-!5vA3Y7$=1OtLRRA)N!3RJa?Q;-51ROV5NN8He3w)*rT zXk4JXgwna#mcu7kTl?PYcD^WZXLa9%oLq5{MM_5{()GJX?X3Oc>a1zy@k9Bhn%!H? zp`N;dogRm?vj%vjMdL;Q(WCVJ#(!DWn7Db?0P{ek{U)R7mIqMY#+X|JI^EsaL8;8T zCi>cf)^_G$%0xffV*ZnMq1sTcWSE!fiWAQFeZv9}CBS2s1&}c^btNs?W|+fL!!#nw znP5LgP0L+n!td~in^7(Ku9qbUe5KA{pj#bNo&;qnC+g1xa+}5nN~>vF@O^oq%erjD zoi~xuP5H?3`0Ma0vtNu|UW}F|krkgu6Lam35ZVlc_4T z#%OB)`>u)$dv%gF_-xGaK%_`BCO!dbTJk0TYY~OLjYnJFN1afO3;fLu{a+i^$oaOP z3VZqJTCLLoAv+AMQt4M5LNVM)ozOz3C#zidvSMXzqT<4N+|9!wa$!!gx{yguirEST z()c({h#lx6j!yCbv|l0%qZkbm59Fs3khx)AytHhs`g2FNndzlYZ?=zsJoq90$Ch|M zEEeIzGzjC`>*mYF8m|mgZs>YTjWW`SKJHu6*{tWLt}GggMcyiIw<2AEs^s-~V+BDR z?zO0!caLk6@rMP zbG}{Sr}q^-sk=5!Cs*YcU*6IaK?UqaG$7cTL<7FtqDLx~nBBo;%S3PQR|XpS_N$gRxE^IRCM>0R83@Z!ra|D{_#YV1?Re|SrrPC=8GYS_|Mgt=Jdhr zcW%Z8{*!}-Y%Q18)CDnL&dXi~JM`Cf-`=rouZ!5wb2=aie}qbwtcAo6{z|ROF8fqo z5Eve=V7sFIk|H3>@By=PRF-y?jX?_WA_D&iaBDaSi!4Iz#@WWsdp|DD9~>ZmBo!Ut zBse-I=eiav;s0@lXJ*>&PApaywU|g$^xraN3@Oo{*8sUX+ooE~kZ5Wi$vvH+kztuI zu4lHDl4DXv;>EWRed&p6HJw6FHT-}*_`O&g{pQ&gZW-}UJ)wi{+b^ku$f6GJOfXp# zw`=;N^9DflA#t)aA=yLuVmY;w9%4m;ufr>BSJqly8H;?l7H7mUVVE&GfnPo+FHY-c zdK*84fm>lxHwGCLht)Vn;-tjF!)slXFMyg*xtc{eUyXm>(m&l_{VG_TO#X%4G9S;j z1*jhrPgZT9YS`R(v~jZDiFnNIIpNNH(W$j#{>M`lrsN6B;k+1lJS$P#XSoT@9@aDT znqp9k*IorwkLR61tIh``lDD0m4s}+9aFDyKb#%=tZ>21?|}aLQ|{$r@6HJTP9BhKCv;ff8_cHSPte zzcrz3+28No__yeDLqEL9+^!W*Fh(a~?G135XKBC26b2U4|66#Hy$^V+e=g+kvWMQfLb5&kJAfyluNhR{K|`a_vj4BPWLd&~6J^07(!4WZo3QfX*1IT^qXo zw!-u9hmwvSyBS$=(v`p5DA2IwW1LRUSw)~J1;oZO%>|=(9JSru-r632M|Gxa(cAOO zCAoJka?R477exFnHw~{tX~B50xOZErj#Il^$0Md3fU2Tq$7u0R?se(sjr0$dezX+CL%Rhar5DFN@pT+mfswT~A!&c*$f}DI$WYHa{zA$#_zW%p&R%WMGd; zi-J7=ml=WBHxP<{dV8j)zPqmnc_Q2uKS2O5bGd3QzVpf0@aOs2sL`}VeJEc6cw}yG zjL1d-b#kHDZi#u6JVe@!!rpd+b9h%m{8@?Cyi9qRVnsyNPp4l@dNM(rT3Tl>K+fq* zk(1?sUTI#L(8Ck5GWd1oKue@Ym%p*dN4dmsBAbIj%t7BRIJ4fU_by06!v%$mhqZ|x z9hdIIsNQtWL%BbC1V_rxl{M$CFF%+~{#ll5Zg5Y7Mc1p)G0sE5*PIRfv4bo{P^acd zQGZ6skfAQs$@>w_b@}D~jJFX%Z_ecxm~_9{`uLN8t2!%;KT^gNn#OK90J{ixts)3i zRxfX*0q4CTp1o^(cC;3ZO^a&>a*XP@J3`IWy>JF7J8)z<0U*Y#)T-w8= zY#xqCu%#o{)}`%;;tkhh)u5xJ6esHI)JO6J5f33D1W|psVb5+L%&d zw7!~)`2%;>r7?lzi>z18Cg(9y|_jav0L-4cB$kc3I%N#<~)9e9s7Cde|y_B2xurl-L%SwQ3ne89fzq;UfE z5@~=iElz|+6q~BQ14e0fptC#}>ik(nS0KR%INAiafj0@5E(g@;XE9?0 zF@~2LI7BlDQ1A-&1b;B?S=`UE1~oUg zT01%4bi0i?HVM^shA)-R6J?v}(cUY09h*>|jEpw)Z=2A!I~_(P{j|ktpS^&go_7m) z76Ov2XL%%6_>rckx6={qAqn}7xDpVAoNLm zAd((04wz@gCu^9hCNv`xY~pdx^6pyI!IhsUZZ}`%DDSj%FH01q_od;MOv0`p@M6e8 z&0thDQ49+nnS|WMOcgdTP@C{t8LO)%J$N`f=tF0BY2>{~`h;F;LLf+H;MahjrADkX zlnqF72n#W5aYF_0K0pv({t1M-|Gp%mjUYKink7j28Kg@ddEg?}Eh35SNT~lFtQmgT z4gw?=Lprejb`$AJKF~*lx}n2LSERR}p!9tsj0grO8D$73V3^30)?A z6l&Zh%9Fs+( zMsFIhZHK1>v2~&dwhn;>qgB&ed=g047oDO=Fj6+CSOpga|CntzgbRt z+IrmRUYs4g3LudF`&9ndDgA%>djuTh`J0T%vs>aUzx%Sg8o!V7DKf5wg7RKUS1r4~ zskA|mZ<+SjJCb{9SIfP%iMzmrZjav`5EMDfY4)Mo-IVfX+fc|j#2=TBw?rSOcJ}lN(XZKiI*5O)UEjSI9-{imw&sTB{vvtn zSN$B3hvb9Lias-Ix!{33DCH=g3w_M97P8SF)|Q|_6Ih}CeyugikKH20<$B(j(WRhg zT6gY&#|1JNCHFR#wr~&ddIW_pmO&s!ue!@Ua|whXM5SRHZ|Yd(SczdJuasvf4ezDw z50odCGM1GR1R-WvpF|7YwSb*L8%1h z$GZ)9RCtDo53;pAm~HWRl)vOXHp$cWOqWusf&FzJf`%i{6wv3OM@97<;hD7D(mJ{=&27%Yq#l4<#L_)@05~pSN4juL|p$(#@LJJe`_Kp z%Yokq(M&#(+2}v0c{7u*O^AL@0KG7O_%Kf)_(X9s!V)VE6rPQ-#wZR%9|O!7;zQ># zz5KR-;n`arFNQ4c%@F?MpP4jv?Y_OQdq2jPoT2D5d~Qi#fs%@x~l6A|;p z0RPIfM9wRppDme+dX00_j?Egdtoo0YZz5L;Q1D&>3LIa6FwG^Vg4P? z+>~XPm$2<(?!9Prn@&{ET*1(o>PtEWW|7Fci-S!oAF1D)KZSFIWE;-NXidAl@m%~k z{`RW$JEswWF0WD3^}A#IzJw^@GEtTE3>ZY+c7vCwc$R#q5z4n}TBqki^|n#|M9jFN z`B}#3oeQBXjb{UF_Q<23m^@S&{D_U9yqE72*F32;FsODIrd96a1`%iuR*~Q54e+3= z@qANN`t8cr)4>45j8H1LNgSvHrsX4}lo9M}`}t5flQKocm%Crg`Y2^Dh6gXHjr+*U z$9O%xFyVYI^h-QY?aPPKV4Fr@Ox+hlst6V>bCGNq%?7TayALgv7 zF25P{;puz)M2ErL=6pI52n62s3vyyik46Y8V7$6fImbbUm@2HVcAMy`Bh|`bKy~Y0A;+ol~O|~g=&4bD-2IBf?QWGRf4I*8w3*xx_HEGApZgc zjqm|5yQfWVilKG)_<=x4i0JantGwkKimXG%tfuAeJq;Uu2znybLXR7q zVP(UkPzcNJ5~#lCrav#^a+&7kZkBk;hlDxuZLfR%Z)*8JW@)itcKc*wvtWJFj{`e2 zl;3tE#AA=UZY&qr3@&ag%NvVUUTK$bK7ZrFFn8p^=RHHjOOo<_=UflwHdBf;crRKxhyPvhJ_^%v;rT&^MprJz@VQVBiYlAs1uzCkmA>F4*BIbUm*P2~v1$_C(3 zMqg2ID($AXf+*3J4aHp9j^~C?Hk3oekE#XXvWGCj)uC#`X{+w+(%Ojxg%30{HG?VC zPq;)aKP`YTx4=s?42-|YZqBtJxIDogsh65M+Iw100tT(K+*GQh1|?<=mqavtp1Ury zzgi?GghScpq`QTPWO$d&R5lB&N-E04Y|$vsrF!F*62VHkcK+*Rmkt$vcGtV2beD`# zqU3VGv*|qG1TN#j7>a&)<9bvzURCeo_v8a_gF7(;mTGo)N2DifDu9$t1FAf+=q+MNX%KirElrdXFyvn9UZ4SF5! z&9i%@us%7E>zU+;ii1ocjCnsS=%ufW7g4I8Nana_?gLkEC+X zrrc$%&HIyK*t2{hEzCXV?5zn7sCCfX_yS+LF*_AQP#yg?VcXRsGPTisdeTt1Rt*QQp} zp2ai}s6#BZF+Yw>3AgbSCJj*V$#G*F<<;S``{5jTwZ@g&WI?da zMY)GVW7XT<78l(qo+H2Du0)p-Dz&B|au}W`016mpu;=UTGB&th|EOQeJBpr(vy)~5N1NqIy45+GIuKAH7!%PYZ;7q^UVzrw#WSL`6#8LgXTk-VoI!cu%cfc5#jPd@LKSl$T5>kjo-s}1K2#2WxW?Ra(Eg46E$t>7i@_#PFe%n zhryJ$xY8E{STtP#0dlq=ASTtgj-xNGU0NoJx25gs7~R~TRE0B zl+n*KGj~)K(3gIG4a2E*Gnpque}i z%9B4{PL0TdFp(0G^btU>FvCGW=)2Cnb*~_^`OP0$h7AYj#{~#_OB9avHbT zwlWl*;b=K`8QYI8iq>EN3qK@;uG3If{LD9$zfX_P7zUgqGgb!OLiv*A#H%+hQiQ;> zmYX4o=bWpH^AOo-4Q^whyK}FDNVZ1Pv-cj<9yu7R1;{@|SMe%>c zd(syrL0P@z>r+AMvR(O7A$SeibCX|FW(seVRFm!Y10VQt1A^z34~eHpAqW$W1-u_2 z2VtL;R=cxt(HrlrJNt(!OMmXU3JQ8kem4)G=AX`KBkYjN2EvTp zbC8*NG1<4#vxd#AhAV&jsJ}X2S(Gp&tnp3d(ziRk;hEWToJp-jC37xg)yf;aeP#rt zSo5oezJsw@lkL9NVTSnf$X+x56z(R|?Ces3cr?hoe7d$C7F-)F(4s;#iC~`xG9kmB zNF^M5dfB{)-LjUcO*e#5jfs#L~>-gR+d$l_~zSqXgS zaIznfJ5_UcMh;#&v^|s=R@JxfSmHhSt1jouGEkF_8@Kz{mFfR0KNB-ZjhdT8s4MIb zG5#-wQ7#hjE{clTjo<)0ew=;zdohXEX{wfN7v{BvDUKb#_J9D0%Q_=#RM4M^tCb=n z_52CBFMoWWXXEfHQ5$>MW0X$0Q^<$_?~B2wY!3Y7-WoRPNvzs5Ka6?$Q~>6Q8wL@? zg8Gvo1}?Sa??5~TF~!{->N1sj6+SfV!Q;fGb+qarFYHA3BlN|6^y@9S$z5f<_+x-x z0+iXN5#@k)XPLmXp5oB&i>Wi&+oACl&z@7(+S$Pj&Ag2}b2V_vxNp{$YC?Z2SVNRZ zOBk%O1#6oU=+5Ol9j_71(L4kn?~`F!CR+z1KU)0-gE_``BJ!7fl|Fp9qIESw^LfNw z^5_~pQXL`&2yE+NXnglp?ha`EQJa;hTC~GAy!{A%9J83wkkY3+8IwkT^^E5sDH4#B zPCC9nNxlOK!BM3Wh0udexl7tf3ws(s8PsCc#@G7Ci&|jO zis?xxgl{LqW$^dmN)NISmK2LDrw!mNJNnUql($J_dE>Ih3|K~ST=__+BaX}a(LS>((^kJ~=dCdjOD?WulkNRF?4c2%{5+f} zKnSh2U-^~0h={vn5K?Kdxng6)x|scWlxE`xxLhY_KO^bSMf4$&<=C>B&|M0+Qt#p( zTM#M((7|cO%yXs)um#ssZ)``p=uGGJJ92^#u5#bwN~W#8Q=2nyg;tV1GxEs-AUH3; zFpu|S%drta=73=$PotXuQtVT1fYM)u5JeD3?%BAU3J;s?G@P}~U_cl%EcYtaK-$dp z^ba>>dVGXx4tI+d!uAE=@XCFF9YZa%*7&nupWJnRCB&5@eiVR$Z=atnZSubze|5x= zRP?zJ_K64p(uzb%LQWM?a*n8#rZlj(pTD(=x3&$;jBM(9H1JKen!e(Ghxd#89MR8v zkoI#Gh|0uoAeyqpzagauddN5vd_VmR@fU43oYJXW9>u%oxgPg4)J+M#IP?`#VVBIn z@Uszq)IelOuQeng6m!mu;P!6hjl9lBi`62v+|!L;hbV!ve2tItSME6Wp9J7vlHRIu zoQo0yddSi<@Ff@vINgeQ$3X^ zB>CL-zhVnCsG*&jNJiovaE#>_Xu^x~>Bz&Xm3dDN4!aXUV_}Ibq9FZD0dVHNOnOIV zgD_z^y7r;yxFWH5m~m}?#fs5zgwQKA63o|Qpu7JgtUYZXio{qVA!`kmxgwm5Zt;} zc>$F4J(&qc(1i!is2zR|NA2?nyA!{n+z|^Owu5b_SWDI8FcCo!i$zU`T}2DSy(mhE z5!Oq`k=~OU<1Q_hPwf)Y#@@I%$eP8)hcK*bm+pMpqkNAOv?Ti&C!Vh9D@y}Wn^t28 z$4NfG9>nwgh>zZw<}g8^a;+)~z2Y)*rX_Mr1qFQ>FOAVN{9N1D1|a7}L+FK3V15Da zyC`g|m*(R9In@$!Zp3W}5F$@dgPlObws)tJ`bia<&eED2sDx&Q#0Vyw&S9j65aH?7 zS~cxwT<`Md6>h~@^?c4`ZQpnCi#(|1Gx>hAPKHi^1#*R8i$io3wa6sm#JdD8D>1|% zFU-_BPjwiT_14&~S+#SzExcr-wPiIFe8$}u+}LzM<;@+21qD15sK)QceD z1G910xY=Q3%e9#yOBk-ZEtb4-PDA;oq8 zN|~Ci#|!)ykJg0w`T73qiZ3iE6^rL3TkT%x$c%nQ@&;=*&F!@TAT19@uh%!h5t{|| zfp(3}Ua7%reh$w%SLIO2<9B?APwe7r4-e#oZ79_|0@o#M~o)e6BoXZOu7{=6K1g0;j0iI7-z91IiRCrdCu!2xq>X9phC z9x!R%?{O7ccsn!jENO!7QXRS4y!!)zAt1Rzw5wnAM<*01zH-O*beb=m!DT?I51jiGOpXV!NAZD8qg$ z^r^+-StnmXxIeUbhj>Ux2iCn)EUX8#7(5ObY4_B*J(vAG`q&IRKSi8})&HI`w@?xA zc$9o2RR#8D;p}Yq!$XnW6z)BU-nl(kwx{UAUkVq7$fBl}_a4F6-P2K(MDIFr_N`O1 z5fwVuqtDNdmsd1%?X4{bbtR8()B-Nqm&uBwHDJkJM@pzGpElYpfyf8t8}d}aL#QX{(Dy3eh-Cc3X|;4k7cNd;sy3uXzh=vppGde)fZfQvW#rZ z5}9l>IZ0nsXe*;?+46o!bKq$Rsj%HufxM0Fe!j4NGB37gSiK^yW%LT%HPm1ys^Owc6vLgm9>IyXhQ1GqDEC{o zfoU_|3^krP*ycP9Zs?@C@-+ZIL`pfaJ&ZvzV^Mn(XfKtC4mk`peAKAR$p3qPb#sHB zz*~NgCru@~kE(L4b9E*2`XY*OzWig`L&{ko+1t)GY^Ly-ij zjL=&?96XD+c6lV3*~$j!?12v88T@@nxT(2MQ)TmQ?y)V{ek<+r`D6!l;YF4Dd2d`NqAF4 zk422+SJah9j1O3M1(z-D_= z|2F8Gz=G_0e;R7NhO9Vv8MfShU0gqQL_4vT!v$9bTQr{8=Du4^&M z0@qLoP4xA1x|x-60PVx`{iX2C9z+ZtSN1J$sE)l_53J6ve>#%a^1{O?k7>+vdEv8jZr5bNmm0OY{TNCvlr}_b_swIP{ALSFB1rQWaB>nT{jW_1w>XsCYhi>!=oE~ssrO$SWR1vp zPFOqQfGJq%NQ_ATX9MxKb~7c7C`{;I3KJB8B=FA$X`oweH99WPo?~(W6!%B-pAC2m zp9NSX0VkM$-u^#Z-1)zc@n1Xp|8q1k&7@?S)jE6~g*5$bPK!$J~)_)MN zD0v%n)IgVhHtJo^4t&XFZS^#Y;S$rdxv{Fzlw#O3x3Yy!7oW=OlJ2=TI{#7xQLsh) zx19d}=z{Zqs@?yX-}nEBhvu>EIu&qo<%hUpgHDrzWRv)%rkq=5Tt8bqofZ$02 zBQ4|quq=Y@Id_Z z$PIf$*MO*iM5c!0djT1*ABXepok-rc?`n@RyThC^N-A^piYNK*v$|I?mfeyEw0LX((SoBqS&; zL2s~9xg8!N4|xOmBnNfdG2Ytv_qaY3!|S5{xmUt8JgOct8;EpJoII0=PL7C~)lFfF zX9e$T%x~)q&fAR&A85uGaUl7LkCV_umKxE$+}FlE47_hZoIWvSNoCc)+Wgx#?$kn= zAW(tfc%UXrQ`9Ui5k8?eyR)2H&UzqQowmE?$VRkTM6EvsJIds0blFYw+zJqeDik?kcE@ z&ElXpTaTBNhg5?t>>d2|<(&I8Z~R&dTd#+I2oX@wNKW5CbYGe1P=XoP@PuiyFB^E3 z4JvgEZw<4Sa4;M(G19-Z(W*%cNb|~vld6%IS9WVYYGd4q+j)c5tE+)gw$6dXW}AVfT$V@z)N6xF$3-QjBmD z!>6w3#dEva`1;Nv2>eWcanVaZUw=qGwcGC=PtjXu`Tzr6cHEojP8vQ!m!Z z7qzidz?w*HaoCwpoFj9^g|+8R!{xoZ*j^ZATz?>}BT4$MkUKc|39kuv{1&clZhT#U zr9>^Qp=@e#x!cF;!_#3A!z8L;|C&PwI&9^)d_)gye$|)CJ%rjfJS^ZP_8qm=%Eu0t zP@j7uR0|08#7nsR!46!r11nae2)lugC+MjoUsv{DhT#j}#Us}Ezc*4PRHZJbv>Bz2 z=dN#7qg>yI%q$nWIftLugX14yXHx?(e6R34(#n&X%3kkmnLhDSj{EmSespdSq%dIH zKyr{O$TVP-eROxeN&?lAPZ^V~bZ;~Mmnz~~)lpwfTlzK5sQyT54AO^G1y?m{XNU&) z^f|R(#^aQ%OLNzZ(=hsgjtM$>FJrNL+u+MRn>&Vwkk7KMM-qr$dsPgO&bXXTV@;&F z%QjP^wHQQdDSlxy(zpKkK^kXc+WYMK^){h~95%VK+Io7EKLRE2BhbFW28!#~7gV&y zKh&DYoq2tEsos7yDYb8OMUrk^e|i7$JO5t&2-cfCH9t`olv`J+zcVCmfP>QahVMsY zGLFf_7>X|67c4kC04UP!*lsjHGhL!_yasMrx7}e<096`Yn&=N>65f`piCax!*)}U< zY+J9oF61I@l)K2!fRSD3$H>M6hIW9k;~0TNS%|Uw>9BiM0QYJj8>TY3Un$hN=OlX3oRvDyENl*Q2lhWzvB##%QIzhmB;6d$&bx^o?r z=LQhg2_K@-`XN8G%e>5k@$1N+h&nEhh|#Odoe%163NbQk8F2|hN;ReZfnJsE^*T&g zYZ{LZTDiMwqz?go0vI$_&VAd>$EF`w(oD;Zci%+pWKF{T0$LE)@fw*^nx24eReckG zqBlmz!1av|Zd|_Xm$P#N1W*IyTJwS?d7cnEhXEL zRQ@AibBbIM3TjwPhnvTSFDp^42u*^06WAMIUmO11n*6xMLu%`uT3dDk+rn!Kq#f54 zZGPjR)$1s1e>{NZQ2l~7E60addz$L!w)|0iJczG8*iU)WWprQu3jclD5IKQcoYJq& ze)SvC85APWhl|nc@rfsI1NjtsJQ0?Ft}l7Y+s~V~euW~_YKBrglDAqMkgty2kWE~r zB0$5+evxm@uo&mrhB`IM>j~>_@fAJ`WwRDGKjx4YGL8O#^}}-PQ(^$c#bmUt?T+JB zo-P}gZ2YKK^=QDgul05}bP!g;LC2JfPk3oJ(>VsR;0i3z?8HED zD62qq^im2_2QxKq$`s!83x}#k4VI%1NV2lH%s63PD|{d8TIC;D#@T{rT-TbTPuCfgKQYU z_Brx*QCv}z4l24?GPymOM*8_mu}Y&rr4oBAIQc(mmNol@o+35V#sD8-_MT|4Z4 zMTUFJAGdg!dckMc;yILON3WaM@}$J+EZg1@zoRx|qgJhfnf)W6wX}Rc zWD$Se&WwpT^PIT*mw2OYU$1~&B;_5-$RaO-b;EL3cfHjFiw^DwNa<@{dZc;nbg0X4 zv}Ec_D{zF3`#1cB|0GU=5UZW%QPYvMsaCqK-v_ce%-W*phy?}gdYXTz>ulN%M1U** z&)G2lckqX6rxw40e6Y9AyVITK$!u5NDcD(QOCI&U1f7?q|FYbbd;pLtoHYSb#nl;7 zMv#(wNz&_oZFl>B4VV94>)-#s^@YIS8PYlcc6}WAN4zC@u~*>e0)XDOK>u*hlAeM~ z$@eTZ@jx|&woNNU-)!OiZ(#n8N0Ir6tfcA)c6S8702Z2{CQ6snyPQ|w^gi+ZXa*DY z@@#0|{9Mu>adg}}+)n?Kc4+e5mgp*vecxnffI*+*wtOcX`#^brJYeiL%&Vk1NB8tW z)jSrounow`X)qHWTYm4*^K%POCnEx?AIyS;KgPL#)S-1wljr3eyLZWn_m6BWnVk@= z18_JQl0q!meapeoAUM1wPUX#ta3^1*T*kv^7qV!ayc#-rgZjUk8jP+WHyVy~P@U;X zWLiRR4Y2l%A-Mm)K^DLzMmF7c~T2@BtF$VGkd7abf1&r4JX@++&}qA5wn)9ZKlVXuaUe&Pb6*> zm4=i7H;N^LQZuKtznA4pvb&n8*j*52XwH18of%9-TY9Ww-MZEo!66%NsSzTp%ht$u z7^5(&y61_TDmT8Dd9Hdkdqy(H^Vqis!d{hRRrNce50_{jfLOrsXvetCLL=vypzQ_^ z;81PN*p@jfze}Dx7&xtl=wP$(#kl&nND3S%CGau2(mCzm&#H>;d5!badl`_AG;b(f z=s)$xxpIxq6pCRPVmh3vetv-^>|Opb7hgYnZk#omt9c!5&8s0C#Bi)zNzVoTHQRoz z++t8f#wogz|Cld)tj8N$=K6*uhvLZp zHrsCvxQk4A>Dn)Bj#dS8d+3Z)@Y{jXV8M)>?EN4?fSpb;$5RWDCRGu=(XX-FeHx-tmu?p&I5y!kFyryu9MOSB3yNPsU8bF+ z*y^YUZ-y^?`Z4sBeEd|(^3rYEyHC%SfubQzF9r^qC4yHkQgKo-^`;02zQZk>pP%wG zMj$8m#yyd=;O5W+PG^C}s^TZ^cTA0p8`6!Y>H-JKa-Ex_ZnGOD(l*`gE7mp;b}1O( za#hP3gi#Y#uuhK&i9scV_Rd$W-y!yBgzMD7N&s!rrhtqY+gpnJ62YIlSswfLR2iRh3cKjVSZ;Fl&y}P~)88Bt8nQro zFZ|qohx;yA))ZHf=v5JGB{H+$u~7|gJp=oSQfFPvT~0Fz5DTqe!^D3x2K7eJU|R~% z;jgTSdh3H5U#8&d=Bwz+cZN;P2M^XPQ;J~T*%e8>A^uZr8U>I`HODLAg`{F^BzxrY3~p&8saa@byr>6-pB%L=1CxhuJ) zdGR0L*c9KR$z+U%(h`PNV8uxudHXNzta_y^emdur=5F0C^-iemaZ`Qr`SN`wP-5fEDw4qEuC*`TP0mt@ZMejKqU5===)gFs5ZfyFOndcdy=I)Ns^=9 zNLh$RJE159>m&Uronyf&)zDsjzcPi#;;MGPs;*!b>G`HeO83^rj~A*K0l@45$hEIM zNbbM;Zt4e)?~g~moq@0Vbe@X;Qwa&%eqnljo`h@E6dy7S$DaX;F8`n@As@}5ybl-B zyCbCEcjm)VFt+Dj(O!Nc7I}{sb>v$n>->17IjG{E_F7W;aVl*THEdOS*O0%jHRqN- z3(U*4yt|sN)I6atlB8dDH(9vvTaLug{Yx)(9GimM*~TX%B)0?-)><_8_JJ(*F<7;a zU&87n%z`G!*TW+q6z?jm>H@wVXu&rn+W%5=k*I`Y=!}rVzJM~SN!Q%3CPY|?w%IO) zj}_Xfc8t`k#A%huv(a7^Ou?C9W@|%K?@Dft+Tt;!w}IgoAL97>Q!M0e>l^yuW(Qs69LP zC66GfaH8X}O5>c8jthq0?MZ24J4c+NTH2CEmt~c#Eo{Xny z(x|CpMyk&+T#-iC58hq*t!r11NjOlF{vY3wPCCOouSjM{9SRX~okXi=dXMTKM>u6JJ*N*eFs7yg{@ zNIlNhhKxI~>H&SVcFLuDFKWiE#q@eT|Bh&wl}vi=Pgpj}#dp!D%aNDJR^idGi&|V& z1X2X5=}di6<#49u0p)@FxB4%--UBCO{OS$2HK-_f``^vzSlfsbqlNxbyapC3UAWi3 zj%#254bHxIdZr|a_=EJ7sIw6+YfCWyNKokc1B)|3Re7>oDtP1t*!ryEFe?@86wWJhwthZ`lnpi|AT%Wh`2+ z6?|-OQogb=_t3{NE?(+o(2TW;*Ns6<>B+Cegicq^{3Nr!M}Z|d2S3<5@IJH0>eoKD zwY4<8K7fhp3nlo3iabpyoo*1MJb^_;+#t`OE&@ewd^lk1h6rK!{xl=`r|G`JS2|`> zU$V11{opb+pT8mQuKJm}kFuWX+g(t1fftba|F}RjpE(TFq(7SjDV;zZ>1 z^uEA674WfsL;P`r8jcz5D zgV65|_UU@^?%KS~9u_a4jO8jGT-=EXH0PuUTiaW}op%9@FurHrZZ*o4m~e4}siK~H ziv};M!_=&@>z(x9a+&_6nE9dSBWL%vkgw0Iv!grbT*i%Op|aAV&iVaR_(58SCezcM z4R*(|uP}+6`uw?)kaiERmRYySC#E`J@s58J@tgkd^Q&VksPcU2vFcb@zv}Au`QoYH zoXD%0qoJ=RlD$X~@Pgv-PsMZn2aVmRv9+$$VIG-f-a>{u7d}a>hs+hqPwtg453_Y% zZXteZ8@zN1^#n>tVpEI%QjF7}&e58LFK10Xj!eEfnF5VEcIh2Y88X9U@A zW$^8HU9fLk627(xmj!AC&cL7J3)cEpXFgSkjkgvP-hCXE(0XGZml2gx;v}*`er0h; z6wMDI9v6|%9LW+Wb?Jy(JEqIsf>Z0N&KBqU^K(wX^RHO|&u@<+5nnls80sx<_x^eQ zffL{YAuRz-{7-ZIcP72x7>KA`Vo75Ix}4BT^qYBmKpsJClMrQ^oI&c^r?QpbUPru$ z_gRM8IEu@ zLbr3qNk>IQdx$k~FJk9p2oTdD^MUKfwgIc}cMXB_{VG4>~c)~1Se|lh;$LcCDGD4j~j|Kj4!tzjPuqWy_EdsdqVuOr^=$ie=sk968J9>TE zWz!)Ur`;tezXylNCvE>*3PZrnv3sr&#kk#wNB`2bNR@a@5UE(IEH-GOUDkk0W;>ovnGU?xKfeb))ECw;{+I6d`h^r~oMGBT z7(5Z`T&39kErxuM-f!v6UDVh=Ky}D{B1=K2#~h6P%D-gacw@cD(1bEz@x(r2Tz_hN zy57tpX+`DpJsPI{OsZW+2|$5e3*z{fqL`r@N_YZPe;uWLb?ke!2SbM&K+3sdPBlu9 z*5s+I^XOwmG~2Xsku8`9BbxxP6NPnnl`MF$v`720=T#Y0m+HJeRvqRttli4@$T)#GROx()S z{rL!Wy<49E%i&fF;>r+Hyk_5Ow!f!pkq%g8!?mueUjn>+4>9TU@Kf%uvK_kWzMm@( z)79$|wHTh(U)>fi4-+7YvLd;_@I+~$hb;4jo=}|rMYk(iU z=56xm(s(&6LePCy;eF_{HYtibPqgm*X@%Xv9rhNnS$j;Vbcb_*re(PXvUl?u=ewCV zqDmH5f0c=xT_QM}#^;)vXbYEcTnKZpO9)z{*;|ty} za>8){>PRPSkQ);|tAAD>^skCxc=S@XQ(OJkLOkaI^||W(W<*{Wx0dJKG;|}K;m=?A zA13|}zjuSz({5eT13KS}y!M_PD!b)XW^{U^(tY!yaqs1G{?c`5vcui4xlahO=zVdM z#3ygJCg28#3q@*@I({zhtWKJ@GTO)jAj4#gSh8aEwa!4A&!*WAoj*V$kQ#GD-yrl^ zNgR*+7IXD;>x4dfRg2*FILk-jx-)bs!hyK-j+J?{A2g>L2SC|%ZZ|n}^bCl)*=*Sn zek9Y*!Ta~_(zU6{o7m)B!CbP#a5zCP`Ham@HqUgeiBt0O;ne-{5Y?;pBYc& zCT`hs7>JkBiD4><9V}P@G_q7CqY6A0bndbzGok-kt%P%G1XLhAcADqI`RVc)%|#$7 z!Lc`i0X(lRpA<2ILH*pbb=`O@fy-x3xCrpn~W+ibqlzbM?DFKaHmSOA*+x87bLH zNX4Z_&_Sf8h)^8Q!(5|2Tgh;(VI${kov@ehGMqg4F6yRnUi^@eL@33ylvn+7{4D;u zzPY`6Z+oc>_Rd%M!MT2xu>YHv9z`u~ky0lDuOnCp<$l#Wnm$caf%b~l)baDq)=r_c zle$kD-pkzcUQxBIjztqiA;#QnnW=XytLa#_f;}gE&b=z~LknFc6XXJK!FRv=m$ByMU+2mY z;Yl(F;X2Kziq*`C1&%JQAXH)}ght*rgI>!v_Q8BnhZ--Isyv*-vB>qwjftMITzpB@6Y&O<#a z3h~DuGw05KDd-AD{H2g+^WjO)Mr()=?SK-Iv?-&nIC4ec&yyO$@8Qcv9tY)xJNK+j zoe*i3n+zS6?jjfb?`aB<7cJx=DmYZv+G4VCO7uF(Q(fF1JSVfL@zL7qLqH6jysTrs zU$6EC7(@7V?hn@MGL>HehoJcZq7F45&M{wkMizo-u}ja+Ag4;4pdI<)pQ3`psoOaA zFJsn{1egFho^-m1VKP0=emt_-woc&O*K8(mOU~2s83gk@>fX?|%gGcsdGlPC8KpxJ zz3Lp8kUdKnLA=j_iI9O|-~vM(X3-OHS$abL&o#x8m-c0dJCvNUg7ViJ+aBgwNCcwX z&!rvl*`x|7l_2dJ-y`5*^i{gT#ksY~KxR7`>`u0LZtg5$$dbsS z|JyP0-MO6Ohaw(`F-{M&wr}w4Tpn!OJ6A}7Jt<83vRVu+Z-_TTi7-!e-*S2S@z$eH z_c$|_HD*lz2s?#&S6eOZDEo*Yz9fn#_UoqU&{pCuS^j|a*GbxwfRl-*5H=nCX@T(A zl;G{bwpF@Oew=3!IrM4R+jI7)$C`DSvSC2AC#5#>`NQgiCo#CKcQ{o*us#-hYKoof zu@CD*a^NDG*b|{2!TFo3BWiQTleWtDEV^F~B$;=`t=2z@_%yfS*fSpIph3#)fTCp= zsW9mY>3!kXrXIyV%=CU~m#VGj(R6*>jb{bKesVNlBPXb#VeZN-yBA~DuV$bq#V7J; zsQb;6lqijo0^=-JP!w4b9JVpu4@y7PytW6|WF%@39MSuCWa@)PMMdCww(ak(i*eQy z0MFO@&K!Qb*qlUTq_>j??jqipR35=kRBOZ`5gW1>Ai4?*P`BOV_e(Wg=*1HKqR2?K zh6d*+7RKo891DR6;u8Wu`G}67o(4qtk~=*rt?Jn<>6Cg#i)5}o7KSu3Mc$>%ea_0P zahp^C)TAG8sPwE6ywX!!DnYIYcQqFD+@{AO{i*krOOd&_t@X4jb6 zL%KwtuU5tn$^7VIvp0_(_Bh3PcALlw`W8VyfpQ)8BC+T-PWh;**LVz847y#oH#^bh zKH`^6@rm*N;_ad8oy&k810c<&*9+xZLLY-24nc6>H1)XG;?@4M6Et?Z#!U;!W4~Oc%W@@O<{ntTc7OivNDq|7kEvH6 z1hLo_X^>|chOxsKXeVm1kRg*#9xuQE&V3|TplWK-lbWX7*Ju~SxjJzBTg~E0D?qji z{l#r?VWWHP@^8~>P#+L5x4`*e%m-xb6|O6HmD-*0u$QD7DK##Yz3~k`mVN{tuiUL2 za4GZFAPR0O3d=5w3s$)rdE-Y@nu3EZ0G*x@!fL?2>pdt&;uDwmJ(`R#`Z_;9zmY>1 z&ZF9H{+(f@Goa02I+-7_Prg6f?}C%XAoc+TIi8(YD&6DJxnyzdNz(%7IqTq<6T-%ztwJk(3odtrB@^+t3&hcAOb|n zk(MSid7|LGu-9WhI|LoiZ{C+@C699tM#)=cj>KMI28mc!gEjF`QgZ~Ol^sytYQAwu zXRz+)CyRI4YM^c0c@@+=-WH`Ry(qsS5-T&2JIHT!n?K`RB4UUvOrWYGJ_O_h_tgx_ z!9zxrUJ3?Mh^+F~j&tqR+6;-B5uf{AgEA2>8g>>}B-^vR)6<~NK&XBn3kd!1`!bNn z4{`J$7JG+fPYv=PE7hG zdoWK|{%`r6FZ?-5dzNn&^e613CLT;6G=&nJONdpaA1wOf&sE9Oe<>*8be7#~O(97{ zRljw6-5sNLX@y=*_t574_dL1EX*X$ZoUFz5S*x(S;!$xZYNQ}Uh2Xo9YKf^#g+V+p z=}guuD-NoovUu+|Rxa0hZt%vWs;IKxl-_G-r!tunOdWAaGqrMZ z4|Y-)VA>IPw1r{I*I!5Y-jwcq8C-1FcJa`6`!;9MdPHgkQIYgj;|B3I_Mz5P_p8cy zX?WZ3bAOA~vET60m+uxozJGVEN%zK^4ScI? z36=KxZm~&&)P+1&`467nx6ttibX%D+24FChvuuneo#OZctjmBuwqmV_+2hs@cQb;0 zVr(v8rT&cl@%7AMe#qy_ACzGJCh|C{$+a8^VbN)OYalSjuYSN~8Fv-fM^`46!K+509&Mkka$Q{mhMz(JFDAL~ zND+WyrFHPT=;|7sc2|pn$ErL|=WDx9Q?kWWabPU9U1|HU1P9mnVL8>oo0|s8=htRc z4;N|BF;FV#`iJdrmoM1QYnK{SF^%n>7idw}n}ofVY^)~#ya;~8)38WrZ6WkK9ntu6;si0U0L-S%-sLbf?=RyJ3ztpx=7Y)4ikcu2awBxGH))mI%l=T1r5@JGfdm&{>MM}Ogf4qCkY`*@bh`*A~ zd)!`E^+BuQz}0H;K0kU|==T1jO89wN|G48NGAl&gcRpg+Bu}d$Lus7a&Qs+`?ds%c zh#G_JhcR8u&z?~444`)iG%bmx= zk1azE=mgYvTGp79G^72Ad`+Zm8#lD1-TTMu$UO?r^6r7hi{#n0I~T<9m)@W}!(jtG1a zI*4HJjbrFaF4no)klZ6GV}js&!*gY7(`k$e%h~WUl;ujdWhKOw^bTUX0a}P!TDOdY z=r6XwebvhVc~N*yrt8%eNpi&(`(v7xx*W4i*Cs{7xO}1 zZTkMgtgpNC^s&~Oo3>yFqRGQ-*N-O!ysM?!K(^>#}?34L;q8Ag<<7vp%o=+k@)O{thd4cG}lNt7%%(N3@2_ z{qIw3Tq!m(4oh1sK`i*XDjZSEJ1#EH&V-FrlnG39qr>7?$#DfhJ-C1rrh z<)eR>kMtkNWWh#c2{#G~wu&*1(Yf3~6OCWmW`a1MwwY#KYBZ?)q<=-%P>yao5H__) z=*J^SMG?Xf-wv!sC3f3vG7u0jSYBOG%rDEVJQP!|qtO*cv|22FXYTyQcXxB+XR9`c z5#4*UItN2JyTz|LXc5@4+bAxAxnBHcjc$rv*;$}%yNUGDX4+#iR}uarkn@THgw`nk zQk;pkf|mMJSBxGL)khr%s;cuQ_d7V zi+Jlu9({ID7APrxE2Zod-{U05TFJTzGg0>Iu-OWZ@pW^XbJ_;jBy4%lZ_TTzTNzZf zKgsg6$T>6LR<*ZRM@(-(fnsJ9wKvoa#E2|FkIL%jbz5B6ec0CAlEzh>$!;h9_7h*s z!Xi3x+1BqYQg}=9@Jt6^@R!0-`Y%QGCHT)jbAX|tBg6!2KmHmiNz|Eios{+-Y=Xlt z!A~{?N$>abE{xq#%MP#YN#ttTtS8?7?Nzjp+}{+@H;ZZ-TKjde=jB6ZuTn)IdArGH zdf1w^NA`hKTqIepZB#1F#P*@FZy_4|NEszj-UniPONPoJAs(+#4ySkIz`Q21ED1f2 zx(q+K#D8F$G#=U+8bs+ttkj00`U^C=90}rcv2ZT$;;>dVb@YRz;M9jzwbK4<1Iv`- zg52jUc>l@fc7fMq%J#-41fp=a>yH9R%2YzF4zE9EpM9ZUN!sQU){Z2L} zFx7hATG&y3#h>+jZWdkf+E=kR=HOA6vim!X@SD2kZ6LP*bp^$1@w!+LKFO|rryI}Z^RtW119WT4-Jq}J8!aZs z?Y)oH_vq=r(k>QQPeK|?Iah2~vTc3-tay}@^`^5;bs*Mw0P^|wUzz_Mzju(uL3L)7 zpnFAo!)~od$SKs*^Y)Hfx7}!m3F}vR#3mr)05$YjSY}9qqbR|B2NS|All3Cc1fNsz z-3lk|<^dTSmyN?e7wjXniGVpF6d94@-;A$i7=sbC)qou4y#Chzb-A|ya=~VRHbp{MpXh@viu$ z#9=&YQ_dGKx%hwBd+(^Gzcybqii%RCcMuei4k}FniTsc*L{xf-^d?=35(tR&4g%7Z z-XpyedPfALg&Gi$o`8m6iudcxIcL3R&8%7Xoq6ZpIcLouSy?QSot>TUcR$a5o=*XK zL!2E7`A;~`y4T_7@ICH~yVGh#=6AFLfux9r-qxDVbWxSHG&-=wtG7slPlUyK>q$Gh7pPfys@FLHOEc-JY0Z@9y$ZKT# zI`nUlDbf_ySGNVm|J7~)to8735U|))K*Gwc!#I2`&TFKXFZdqGWhe8;0WVo44frlk zye`0`#i6GS1Sby-nwl;E3so!T)l0rV_;toR7C*3o-j1`pQ&DK1 zlBYtJ1s1d^9{HWt`!@)dfJL-UA3dvY2^&dB;Kx9sXJwxj>s@|KM?rL%3F5`OTh!LI znN^6`NQfq~UsYFb*mEfRHG7yuE{B{(9G7}qjU?Q}T1Hgpq8IV`4M{oqd?1Bmv27{* zLnU3{+wUN+Lw#I$k7*EhfV&+&@-p$NP4R@rrA#Ay`}i!1)W{0bCSt;`Lk+OT8@lZ{UH>a!8w&&~iaW z*v}fE5q+I31ho5Jt|nHOgRi>0k-hZUJXn$H)KqxuLM1F?L+XNm-Q$*~013%gsr=WY zL%JtsTXGc9t8w{XCG1i(TApbY*bC5r*co5XEl&QBIhl%oUUSk!ys^Uf8e7)S>miWe z5j8N?wAtRrALDxNjby)iYH-i3dSxjA>Ahh@XaCYucJFzOGeeoAeU7q=q99V?V4iqD zs;TE?`%V_d2NE>>rF2uC#`ILim70|*PMy0k!Qc@R85OF<9k0Cg>?AwkHVFv9E)rZD z@a`&`9w<#ctyS-!s}I6rxu5&-4$fnOE7(H7V_bYBQsI2zhET|_Zhckvi>>dP;%+?A zp^s)}e4TxDtA4b(f=)e`ckiy-t1m$tTP}KTE?Ool6AnJxL3Qsj)i)X*L1t={6019Y zj^wttH^sCz%v4trGk4Bxu<2xd!kRnbEnLI}g>*tvxpayFws4Dq=cmjTm7GSNrv2+e35hNoHCO4cDOow(jc? z?qDymolI*r?toawisnPt0uG^;pEAGB#s)e#@fowd-8~Y^H_xh8&BXS;1+qf_YT%X; zvA$U%P>C{cIw)GMKLK3B0l<6Y0g#{t|05qtiyL&k6}*3(X>~AL#Qfs6l2KRYWSUBO zKKG_~At{8iR9-TLB^`k;=yRtCo*F$K1dq(j$!%DlV6g0(Gn##(BLlat3A1X599GiT z9Lq>denxYPdqZfSQ?wg8bd6yXbeOAuHYXIana2vuE)ol~O2-A0vWZU}&=aiAhU?QNpJpZtr+qFb z@v8~sYYu5f$xHU8J>DJaIwT}VGoz?^vrUov;{C!=`ThAD3l`)gS<2TqF?No**Cf@;2b7SHvsR(SJfO@*YHeklfi4QQx#ICSlHdG`- z^Y;+GlL9>g(mLMmJcTvd2g>VLd@sqW@pFazkfH%(*~y`Rc|xZ+7Sg-$*qakPmBu2>+j%LG~U{(&d=K6wew!dmx_&j?e zSXG<-=DVtB8&6*XIx1og2qX4KyyI9V^v#5FXC|3nxw%}AzH+KdU2ksaiSTvP8D`L8 z&)s*v=`%dM;l;{gZO#mBnCD2xw~4x8u#8&6J3|o@2};KCv$3clU6dIrX#aQD46 z@iuV6L%b2Rea?Ly3&$yT<`fOo2BxilDVjLqAm%e|-bv22J``=?QGHrCyS`;XSLj&H z-vecU@or>|+vs8X&stl*NCpj-_Iw$oUK~%iDAbOGjC0Y%#+GfxOhk6leZux9^afU(YxYgL~X* z{TKU9g`yg$4@7xf+z|JFJRNeaUg)!yRiSE2o&rsQ7Mq8=s@-ufNQDP52bxY!d`NW` zf_c?>j=K0kqx!8`kqd62FX<~)X=+I)$hiiy!k;c5T9R;8EPZ&Ub)itQpwrVmO8|uy zSky3Tsy)E*+%|*7xJ)aF_ptfizUqay z-~kU%*ZammNu&e#OU?_G?sV{>8+1U}51*+szCTV6Fad-Ar^>_#kX|POsU4{XNI`X- zXAMe-Z;ybsLjw7Yi(z#DsYgYP`5DchS7QOwlx08qJ@e||C z&GCZj&%Y_@YbOm+UsbK3_!4VWvuI|vu-Nb5z^HQJa!o(8GXwuab^`ExG-$n(7p91D??FG4+{E#|-vj9BZHo!#+2L zCHnjJGpM&9io;@}-wvKE8VP}Hw`abW___QM1NmR;_dnP1-+C7N8^G>tm7QbWF--)S z5!picY0P-Wavc|wrLb%V-0=@7hdw-QFswQ zrR5g;S?Bis;XUn4y_{>lW*;Uj#r}#0<-Z&~fBBXqU4t4arU*V!8>C+U{R52#W+jZD zEkHOU;fYF_>ko+=lVfSKH3NOF0WnONaMXt| z@-anpEXs@_lzppmQmt!(JUoruUKV}!sV>%4^BtXblbYfuF=`#FsoQ!43FPTTh#K4T zXM~G+#ADMmMSrUc4?|S;qBEn7&WrFxJEBCM-MQ6G*`ZJFpF(agfcZ^dKEC*{rcUj0 zk}$=Gk{Do^TcZ8U=2Tm=OR>z{Gw}T?MY6xto+^A|#KLS^}TANOJzBuW`Xz_?@WK zgZpP?!iE283GV-kdcyxdlEP{Ui zyN7Z_A?>v<7Lkt}MqlZ$J;;ZwvV8ld9Vk{r%2o=nvB6IFlbIVoeU%9m8@Tv0M{>d- zbbpRvNuy;`EmB(8&@lZ1Q)*PeH@Xt?Iih$L8py_>RI9@mex)oy{NS=R_TgHaFGQZ~ z#{Sh;+Fm+rzWz!14ona$6bpl{A$jrFwnipQFb(Keov7o7>2k3n0}_)OV!n0*phbS~ zsaEdw#ROGlqm~VzjaP3WHOR{yB!=(p|D;jooGDjB%Cm0L79$$213pZ-}`svCa2YU`qTxEvt8T@CBbs)LCcH=t6+jsm9)KaA6#Hh}Uf7&Cr@UT+*L zpHl@8i%jd=eY|ngLC4bv(0NYjZBf^Q9(Nzg;%}Pq8h)O8mKg*UnBYvOn7@r)OwU}R zQ|I+bpUa4P!tnCOvq45l{FH_{vHJ4UlN&#{=WH;cT@1uqcz}K>{nE?aM5Xo6xb}%o z+52H>k>{000rZ8zz9C)T1;@QstGh?b*1)xoZY`M*)uLaTFtnTarFwAafs{fAO{)kD zC{$B#&TI6?*pXb}b_1^huY@{N%+))TH#?wfMm@`L^jk~O1k2{SW`(U5}jtfAkr#5S$j~cOh%vl@$HyVkO-j zrd+EemR_BhIy1a4GU^_B(r*L8Jd8Vn+Tz)I(t<8weB;p6H0&?4?p8I0B;1}1dUg3{ z((&NZRMx4~Oq9e6pO^Y-&mI5dFMs#rA0)cK=|{Nq5eJ~Yo_c)bLAH*5$$Megbv@Yb z9_2^o_?R2-JUAd1u7%fuYx$nQqU?it(Qz1XQblV(6|JkRV3RlLWx+9bR<#gFDX5cx_kz)vsL0-lN7(jSbe=GRfC% zJY#Uq7FFljKSzguxT45^C~5wGi+ua<*ZpIWgur^z{hs`L61aa-^XU0keg``7sZJ`` z?Mb4O5BBQlLqNlsx@D^+VAw9ry7se{U;I6ug1C5y=!alUN=la9m-@Pmr_}!{Mg3nH z`(IpJ|0o3H-yMJcJu&OQ-}Cp!`Txt-4E`Uq=Kkj!0H``1v~cj>Jqme1{U13d>bkw# zb$R)LYAO&l(Ec4h;k=M?I4irYN)C8T2|*v7T~PVwJ~67%g{yb<_;Tn!Ra_cg%+h&3 z1nwBVe`E*bzsLH=c9i~aTu<=7C2FoAt`Fgewl!gBtpIgxfUPDo9 z-u~>=(!P^Yp#v&Zd{Nzl(ktMZ{=cY47^fM3h}zDXy}R+`@bItl^4MgpHIPCkPb&@a zC%~`B;~;;PGesm2?EsLiW{ufteZyqE$r%0OnPz;Yig>#_h%s!DLt<#K9wr9p4JAbI zGE@_}5W8%-5}5llbr8Es%OQlP)7?#LuVb0!o_C`1lG=CnN2#_Nzzach1jCJiyz@j? zKeW(Si=t#|p_BSEW2R%FcRP)@&d$un4en8ax&XZVx2 z!}6;roAH#!S!1D92!q0LGgJvv*kIJAPe}HuSjV0 zG49o(DlCLktE_mH`Dqh_zwr&K#R_+=cP4)6q{a&YqOZ^onk-l<2M@Bd;;x!yI10(OlDkm-1c-hV`=BMcDOO znWQu3GX?i1UOp|JhpAxVOEz3}(95aVls-)#vUW z^{L98dK=e^xp>)2cZc#_*|aTPQieA}&ea*mSYet>NFVBX+0_uv6#agdylir&&rL2? zHcD#LLJ1`P*Y!3-3Y`k(f{&dqUuRXVuNl_hM~9A$T_@9hw* zqw7E%y2YOisxB~kn1K6=A$pb?K(E|2pa6m9?t%>N_RFPPSv2^1=?BeSd;@(g$l}tni6PUCpP-ujJ;o38eg5P zAnux)q&H*Tn;lzWv(?5$%|vi^Hrzrnu@U*H7O5ncS6jB(2Lg9j4>;Tbv<*NNmjb*X zeJeJ*ODwBnDr#eao`Jc0cNckAWV8PiFb{4njfIU*Qc1Y zvijaGyfYzik_{{WRM7O~k|u=!&9PV`BJQVSL_Zics5hN(R1Xht_2@|Ff9as#g4%fl zj%1CBXqSKj<+->@Q$Ju6{$Ta@M(F>J%Kse+|4)kE;Qvka{!bH6v}3cVfmDc-uc@3p zz1@CiU_%t!EIOj`@j|E}aL|l;34P03FW)`eHrYwPhtjJ;M>#lEy=eH*6W}ehviVqw zXJm+iH*54lHV#e;@W_^xZ{)@Da`=q8Li>C%4UIYOC2tzj5?k+FGrc=}-PVIy@|<;l zM!d$?ABOQ2w#NnEj@XGT_yiz$MfxF`lrOr-DY$-aL}Kqv%iiCb+IsB0CHWPeksx$k zyT%$3A!Z@en-BqJdph6_l;>k|#mt7+Lq60^zKjw~coch!9~Sx0g&#B?JT!RMb=x`Q zhbyx}{Or|durLH*{+b6A|93kT@zj+&xLf$J_5&lWnWik?W^v>80DZi8fAGjV@ov58 zdLQ_ePCESJLu_#oR&~9x<7&gY*&e*6mp0ML&dL3F`?+nxmf{1@jbM33lkB7)ssH0M zoanld@nBjabX`JYZMG?&hpsO@EWcMa4JI1N^$+2jzTr=vaW3H%G4g|* z6!;nhAo>vA=2T7Vu1i0xz^0Sdmb8}qm-X?iOq8jdaZk(Hd1A$65F$Uq0M$K`a1l?3 zX@cUR4(kDdLF=E<4OLznPn+c2`NP5=f1o_!PoNv*vkjOmg-Of-N(7Fs((7OSSZy?G zzbxt|UwWlHYk$)w6)HYhcN&_)ci$gSn@m@1v#B{w?>h>GG9Gq{nZhjbI+Z)K3YiNM zaWjpjh)vCyW=1NFn`xv&t771LWgm#XAk3`w@0nV+{nRW|r+?in3O~Zy*<|Z1*WA!uU-ymNVsL8X(DiM0+SD+s(5X{k+Fgn$ zKHE60A~5bC7Rs9;UR{SsrCDnQDGjl|4l81v`H}@TcdS+&;H8IAVsNvwr-Pk0rrPhh z2i$FJOgCr`AHJFWX2sl9`h!E4kmGQ__hX_rUdnqz3U8D~R>17%3Z29?Pcc1hlrQ$2 zWw@O3>YdGdZ2RjcbsW3zjCj`}k(TE%z9of@xf0lGsrkE{#8ro>Dj({3*Airw7a%ml z9bYAOl*D%V%Bb+Jns+V_eiLKUzj8CBJBS545#bZ~nxD^AVf1NZ?%r*OvGX#CbCIfI!{kN3h;41m!c}kPX}c@Vk50@k(lMUjBu?$yGL?#6 z8dJ8A6w7-)X$jqc#33n_xt$rDl$Ei_2;%)fxv8|)v?6zxdxqav%;PJWNcnPvxqzZQ zQ@N%hmFn+JxVc}hU;px#lCamR+Kh~i(0csNjvpU#qqtY*JA!ix?uZCBJ&h~?%!098 zJ#)XtrXp|DHq9IeWSYER)8MR(7)w!lVcyi>4?ajD*nLOVcE%Mp<~S7Nv-(WKzBx%o z+$?TPPc$Cp9+Mn%PSM)rxN+ReOlACYsQ*cfTTct_09d{Qri}?aB0Q=gQ{Cl)XhX%hEffWaR*dH0J34xOg>lY^CCLszQ9xi&vOVg zOgCJX6k;grhD&g_Jb*{icK$?E!k*#n&`0!5Zvh4_>-X38jcEy`ayhR`7rT8MJ^6=D z-sh@$G77kjeRHKMObK?Zqv*(Q-{?ZJ2Qd>Nna7LhiI^V{#&N4qAJ$a(#ihhDL4zD? z-|OseSZW&Dm>%P|Hy}N{;YJ=#K#ERu`hfR3+l| zNrz9@veaxHmHEEF3<}NsLwh+>ixEXDB^j;(Rtn~ek23gQdi9xb+cG5Tx=aD+yFdOA zL_ughW#7TmwTbYb8TH`vcvCSbUgv0^EEQP!gRyg#q7I8_nEa8n{g3EHAzI^FM$--A6iE>KYUL602A#@`{K-- zMeEB{)B5iYbhLhoIUcena-Ek;yr-zCboK4EpC}b2w#OPiE;n>(hH5ngUU2ZR+nE3u z9}T|v%ZM=@yfUl5?`VJH$^}xc%n=iwzsp@Bt!|xqFUH^9HUOM^sPojDrtUMJ?wEz$ zr3XeAjobos4FI&Y!#JA?=|krg-Bxw4|-gI^Wiq%@mImT@x+XR{r6zK{!`v#M2iLY zrITs#PV1{|J334&-(Ry9m6?f;nDWaO)u-_JT-16pa_8KI`1N4LOtEjs`I3!@ShyoJ z#1P5|^PzpqSv61)=)6<%5VLsZJ9Y6dRxQC78;Egp+0_Ff?0LGyo>82WPM|cIK_`y3 zLFj$A&>|7-B!Fi;>$F_{e)sq!p0wLyfnu3xb41+2P7EO5^3ou#PXlDxX%)=oW>3_O z#;*g$XQj1D19__c1~hO zz((W*>396i{p>Z?NXZO~Tmu@@^Gdfj+`0YXDl>=YWqcLli`-p33?a<}_XG?L+|dG{ z0DXJP(n)AG_#D_Vj~gF@E5KvXQjy9cP44IMjPkA1{e6aMji|uNC&c595e=?-7kYvE z+FKyo+@GhiC4H+A@?@Hnb&2%omo5oZeYHxNOGX&w%nGLWOIe9qd@S9Dg3WGpGI#66 za{5_%k-+O2{Dom!Dg6p}@S3Hb)mivU84X3p=Lmp&o6x1Fn+aJ*#18-BT_m$Im!l

    O>Q{$B z-1rI&>>X2Af(FjwhrH`0;=zfboPC1tRrzz-p2n#3!MryLw`YwEu)y8!%GFyJV_|Z0 zrb>U`jU&g9s0e!wRQYQdy7Ve}tL+$jr2` z+vHcH;^4_Ma^?DM?R^$vpJJkjV8o4_wLh-)SA&F&b<1J*uyU#EW@Ziy6F1ZSSiXeK z+luGQ*>Euo(VXMY6z@7LEuTwI%M2_$Jw4HIb#>($mznycxeQNsdefqP1zli885_eUD5RButIVyCS~hCzn>qy1s%wx zOXw*Xgj75A_PV@q*Q>!bcXmkxPvqd@R*cf){oEFh%)`w@W_Ex=5}!5cPw;u(G+$!d}Y{& zy4i4UOe+(3}S;}K%vaqW`+T7#TFpgyAXyz--+E=J9U4q;bnubXMpcNWyQzRVCgo#KX%2)1* z8wIJhx+^wX?Vyej`rKh%jB=R`g~Ct$-z z0lmDdOxHB0L@f{4G#i*L;lpNExR$BAeZ$DTR_-q~7M{Dn-;H};PlC-R(pTiHh4l;; z`2!rWy+GI8ubXa?BncMl5RY%$L-=J{@)X>L3H zYWBeKJWYAGDa5thuRh#wX96W;{s$DR?= zQ=+;}jlZO(0EOecH@RZii8r16&d9owzEMH~z4`onj!SRujTZ6gUCyVP{Emmlp&4u5 z^)1%W>>p27&5R~CFi(1BaIVFfy0Uc;2cSvv@`nR5cwmr42PH7ATb#R7H4UJNGiMxu z25B{a{l5il{ZIWLA_D&62z3l!Xw&=BF0mHJp`?YS>_2}0(p5o)znAmKxUY7Q!9-9> z?DBdP$D_Es_paS%34N>eY((a#IQSO{vIx-?;~wsVpcKczT~Iq?6#*rxVs9zN-q;%t zTZ0m-dM@$Z=3sHAjo#k-$#RxLco8&nO$sxyU$O>`hl6D2Ce8X=w=$zVaE(oONYRHq zzx>{49`ff3CJBbA9&i}`MPYM~ba6(>35~eyug=SY-!rM1l==3Ct-1578-IP1QIdvH zV)^;O4--NmTKy=rcTIl#Y8UiRO87*c8ne-ll8>?+yVRWUs3UyK_*wR)m+F$`5K!W| z12a}6ef!0L4K8GaZxH`f305!fa&y}xe@9pY`*pjxzGO4X6Md>tu1lhqcp#psRykoT zLsEUb=jCI~(ChdZZDkQnaBP9d0XquoIOHeabF5MQT+)yx15KdRGn(?#LPuBec9`eu?Y@MFAoeyLI!1hn+LW8^^DdLiLwipxycxylLjCI4AHWkkqowWz~J;`2jRFTP( zSe0(C3NjvGcvocCHYug37!*6dI=|~%7FfKb=I46TSC^tPsd4HV`dy-j3MFM}h0j@= zRgn{*s6#7{DI@ywN;d1Evo=?W*Rx13`(dK3ZSy8eS{7tNgP+V-sp8sY7l(hU%eL>h!yGu zQ~5Wk%mvX`vc8V{+)%7$WdF6XGP*d|VMN{e+4uSXwMnV^jDPP4ZC#~z4UAC)l z$;o}mda@)wzSaJAlsRqh1lz1gxqKY52=q9$V01u>5HbdJ;B$%HS_PM{$6OsqAW$G8?A7(>~*)^QYnF52A+m zN&?EBmCff{Cy6~`KL70bXB z?v{13DgxAyi`MwT8`o1)x<7qV>k?OM41d6}AygFThn2@!0tB-df+kV<53?72lRN>i zNf?)a+1mz&ekRyk+Arhs>V;CtZS2Q+B}iIK-abH;)FRw;4c-Y#DQYXnr0e0`NaS0m;2LDmqOMFu$QLG93 zve?2*r>@bZ!uqpG#z!%4x8Vps$L`^TfD5+f=a#?!Q%W#7;H-xQoZ0;FEDSgV97!)>VTiM)cxq0NFNsJ>m~ShoVpS4=VB0 zn&G=6u8M=E#rgTTLy=x)X|=ks_vU3ZvFtZ6L%wUgMc}o zb*5m^A)<+N)8&;+%i*I(3xr&6w!x}^v<^U$Z5|)70gq?|j$(i)>xs$i%Vm1ALG+%{ zRK)xXMjF05W~8QN(F;$3Muz-4Z1J>_0uEbo;Wh4V zl|j=bDB0rECWo_vB%T;`!3*40_+i-YcSB`B*zHXe>@G&C=V-q3s25BpqoYfq?asK< zq&_vQ)EqmNb``&!MkW0NAxx^h*@=xLrI0V~Hl-`TLy5L8-_0sz&^Y-t@67HN#u70C zpN;rRom8)7@S5N&a8lTk1h_o}?=ZnbG;W`3xG>B2)fy{^OS8|AsOn-sozVugyjde| z!A_<+s5eUXL)TX0|G?7aF&ogYC7))McwJquMXhrg*W9nf^mFTxwO}&dm*2GqJ-=sg zz5B8Avt%xf?YRS>*o#K)H+W(Y8@ydSeEvp#jmp<`1RJLXQ-hWn`)s*VGe+e{ti5N| z-d7wA4KxiR!h_iGfG6HX6%0It=<0T=z_S|8-`r8pCqYxD%Oxi{f=eADj-RLMl)ecSf=>^@s~h){nM1G`+c}pPIVq(30J}5_$MRBq<3t z;KK#!a9*wbJ_B|MWWxcP<8QM@$U~E>TI3$Wr6YM58~s_H3*o_H0$33idg__$#j9ZP z*I?H$2HC?~cSL02N3VtlF`EF28;-d`n9cFct-K4#?9%rei{pb8YmUd#l17*m4Z6BO zRD%G`^yzv;&k@RIooxWj1!IcYmaH5ben3GIxVQP%;nPqUOFkWpdif=2Cy!^)gG1+{${&FGp+_^}q%Rg(& z-(`!Uok!Di#N=z8iV@%T=Thok^AC@Ue2p!$F4M?LYAb5fl{wg|&dY2ytz8|2ocBLj z2dqNj5vahuom>he!M0N>4fQe_x4R;!da=-iv<3*qk3GekUD((JPO6S!n4>qqEyL+l zV>akiWa4k{D*AcZY&8D5z+Y1{%qf!=iK5#Q!R;y0m>h>e`MEr1rTbXv0ptbpjebBc z5oNyMgxL|hR8m-)BM^_y^12F^mITz~50>hmI^X$t`~8O9lbUnk!$TBlr`N-Bd8{_0vTd~U+NV6*i^i7I#S6Dk zwOf;d1+_*&+}K;ySccx7ClybZb<0d|Z)zW9-hQmrc;@4}TfLk<1*eheP<@DZLQAk2 z2_ht`Uompq8kl~PhFZ+g9SRr7im2UXo9!@`t@w)uBeM`B>)Yy z|M#&v{(nY$vU~#^)_;S(g%N=WTmu1avnh^>S+NxC5#SJm1~%GAA{#ER2JXHikAk9L zNckbc3p%=tIxqJHr0?=*whe#PWxC$H`7E@dsx`$B^*VW4lXdlGFE+(}2voLV ze4+Uf^kFTh{Z}SQos<>?0_*~0hyo3GGZMfbxx%}Ea63|7#(_T_5}qC|emk4qaDQ}k z?+459QqU;akyH(k(G&1mI8v6D0u)C#FWn}0EyCjGCtfd17^>B4Y_CST z!5xmS!GiE9-k6OwXjG??f1LYfBlc23DO7+{H)r59wy2^ti8DdGpXrw2BHLlqW0RJn zd1o>iyby{jz|{51hJ~lvRcN^ukIU!ILX@*!ClA>6M2PaK*@N7~vT5dK*?2LKBWA$p z2pkPFz_@Hkwye?7;a9NLKdY|wjN86Di$7LS@!lTb?@m);I#XLW9DpKSF;Y0LIy^7t zbbYXQl#YG4+?4pJF?Wh%bGp4uW~ivCrAc>nM+#SL(0n1gw;AEObX9w{*(6UE-YpBa zI||L^$IE{NvQ7JFSp-XU!~KCAKOdR4mq+_;h##KrjI9HgGo?lBpXl7aun~PSF>kZx z-U~tH_3^Tj#fgDat9@j?tUfv?oWx&SRyctillgTK`a^D2j$wvM;{?~BBJvpTc{n{gfSbVR8{~zy4~{xwKH zaMt%h#0WU~#K}K;qeY<X1Tk;WFdzowY}K-aad1i?!m6&3)lhSA`t6mV+guI zlkh85+kuc?obOx^ud4J}HBRJ};HRi1(v~LDcaU9B@6ugFE=+oz_Vpmn0V@S4|0Tm9 zf!H?_J?AKCc~bkjS9CjksoQP}WitpV*&~g>b`$gII;35UOU(CAhj=q{zm#lcVMWbK}hienu=&H@@Fn()Qis1 zZQ#=40LU|Pw{lly&l zp8EneBs8~fsSF{S;4U#z3&`HwJ9r2tv@hr?R=y9(;vb+9PDkr6|1f=|R_-yKgyqhI z;ggHi?2<>zJvza80gFBf^Qyfl8l-P8|Pt{j6;1f8@$yhmKHipHuHrB9mHjN35@L}tJ+!Z#f=3k_{txjrd1b<%5pCdKHeo)>UuOsU~v zlx-SHdK2U^c}NzgJz6-$!SGJlMch2;!|Hl`E^0OhJwd-Z;a=Q6rRFLzK2GYXtc}dz z7}Jbp(ONu7R$1@t!n+eLIg_bK`AX6Rb$t4IZqWvurG`hoBi>zs&T5k~PSiOuybmRE?w8`uLN9R z)tKj^_7+4=g>Qj#AQIh@^!&2k70NGw;JXBV5e->ndx!^8a*j={C37S|;#d-N{-;0D5rW zat8h*&6KD@0{*Z?auQ8Rxp4F?ydBoT0`Klqxyy#KZ*j5}$Q^zBW;r-+%yVf`#;aU& z$jsZ#5@cRuM;EqB=LHA0+!;3Y9i|*tfN+@aEvGL_taU<%NH# z1mCmne^2n+{|Sh1Gs|~R&=Lj7vt+=|^zCc0NMyb8I1U1yXinQuhGfZ&ce*XTipzHB zJSS42cwS=@N@Uu@!SMzJ9ik$3wf`u-)eAq@udp2@St%*$zV&X{opaG_%<_I^k)U5r zr$>;%GvK613CCT;Y$X7li3>zuQZCF1XhdHz3Q^)&=aY&2$+T^jJ^CsqG*~&nc)KUK zZK?5`O`hf4pL|qV80bC3z%(j@KEv4>+{1qH#_$Fe<(ryz^a+o*DL;zxIcLBHV8`5I zZyN-*(^ViHAPec;ymbz}B0<;u)t%h7zjA8*)HU3Ekc-=WxX}JVQJ=x1j5Ax9JGnn2 zySVL*Mi&&Lc@$2*fW$wogh4tcB><(gto066z51C9X&>4Gr&)nM#KM(KJupgkW3@Jq z_U7AV~7jBvn%4pM#SfI2@JNyNcHWqlL+nb3(I03pl#{-yo^ajg3vm?Z|rb z!@-49w-xC&F7-RG2{dFM7=mhW4GJIu-mGFW0d9NGkjpH2iac_x+>IOcr2Li8FueU@ z;OJ-4b_tK8x6$*oh;8@ycBggd6`*h-7{s|lzLpu_*sfK*vZL_s%e_Ynvg9hozJ5L5 z5ls1~@1v=1SFgeoltD1PIr24s2OYcyLCQIZ0&5hpRohmFbxn_|s&*H2d>|pv_JoOl z2%iLf{eJ#Da1bUv{sOjvToT&G! zY%tO}c`Cyrk# zjiSkOIECDg1X=ts?e#J5>OAg4BMv!?xtc`RStProe)mymmk@|VS2Az1W5|lK%%cyV z{mg!_7UlgmT97JQIwknSRVVlT7e95l;-vM%BqiL|{e{9+-X)7LS5#aD%DNgwPQie! zAPa=S<#<@}wPH4&C^hTRaBHqf$=rRn(gqJUnDm*r@*Oo8!6`R|cp*_zdn{#!ua&);Z{w3t@qL1s=E$S$m z=qujASN-!Ml%=p5y+^dWxlDK#jLpD7>27P`ubeTm^l?4mVMhzYpm?}_%AJ9aOpdxm zT{cN?AXAg0CG3X&qDh`P$tj+Y)WgE1l+cEE^?ox#u4J!jt1F+Q2d0zXNiID}F&}kc z&i^{|Grk-phsgg42a@ByIb0jw$f|_f+|d-+HFb4(@5vcO@l>9BXFH&T@z_3>Fmgr7 zXjXQO_d*b(%gW_+W5BP?0!n}nT8bU>8no)ixM+T)v8kR$hN z`LtKbU+-E9^pu(o55J-IT!xFjr7d3j{8T+ZPWz|LsA%$5lbs!No(@W+&Br}(0kC&T ze*N+91y}!rUQf9JKe+Ab@B!Wp38LB~OWu%Rb-g*VGW$wX({OE~bdY^zRnl|Ld5U5TI{6Wd#ozLfk*_Sm!Yggch)>K zA|B2QG#ps{6?Jp(smCWh;@q^k$F-Te`@HhfUv&aSDrcHiGENpL<@X?Xk?MvuFhkJ5 z%B5K{dqWo9`e>jr*=f9_N8-Q~{Au@NtY_GRbBVXn>UxlS;|8SimqVSmgHYh}V22}~ zQLoMmc@HCua)%yGssy|={P~_IqM|BaC6~uggUZMkvVg2_+J}E6zQXgP^FmfQW}kL* zYJ~Z4aka7OFqM`s&YLWmYPi=_^SQU(wc*zs6dMw(6(AKMZH}&zzqI;=H*i_JKIu`G z#dyYkUlI89*HjOE_&oN?4e@+6Mt<>VZ1YGX^(D{b+Di#xV8^5IAZk(?5>U?t8q&Py zD#2*(yna;kluw|LUeYXemcE~zTkoj&1HwZ;y|)fljx#mQjDeKsrKEnKhUAU^P7W$AKi{#ru7KAnZ5wm_mZ#F764IlwaIdPIWP7DUa&XjP7XR0(4HJW&FUe3e+wS z25V=7?jk1L7b(fQF6()uXjC3K%J~?v;EfZU0yKC4n6|P6HZ5*_H0Sk1Ut=Pw-{F-* z?fU9S%Ge#9mqMP3ZMHQG5AXPcJfV6-D|{dz?*T=-M|W}tA_-DtMpx18M+Em2TtXe` zBh{;1L;UH%XWt|9U(p#__ski;5Cc=lk2`n?gMh{Z1+)ZSYmQkA0@NC)58Z5yKgRTM znho=RdiK!FR`7i;h$0!HeVj&$ISuOHBV32~*qrLY(bvhNzd@zlGsun6152{eFTma! zNU#yPK&$nQGx2FF<{&S`=Ln_Kcbdxp7zWrpd@~Rb0IdzX@-6annYiM4+7B|1KnJnW zuKmS`uPmRTj%O)U$F(Nn!Yjqd0S|WX32?2! zI`UlSIieDvh)78n9K^Gzo!05p($>~$CAj=^TG%ktkE-0bJotP$@CdP-(UCB^UTb-ckX@Wk33HT+3&lv_kQ$5)Vv+~+gbl)Rc7XVhu zACSxDcQfad&mP?zgQbx@Ks_X9OHy)AR~uR!v;y|?B+DY$CmKEdvfzNvH<`oqmO#4FNauep(C*Ne$@1KyQxWh}-m{m0i;rbC(WAFV8^jsBsrxiJ) zk1WY*^!HylOtu+CRAyu2)+!8=q~hcBSDkQ9RYn!_3ucbEydT*BvR^qLqnXB9bfo9Z2E%MxKzw^S&L6fGOwf5v1pP9%W$l(yD%RA zpodod6KokQdw4Ho}Dr;-Z|b@zU6Yg9BXF#P;+)qc=f;EqpeDq^rfQL4G&H49}SF()*Kg3T@B zy=xuJmkGyg7q_Zm>0DAd(GQ$;!|6moGwmidNZHpb`KEA<3C?~C}XUzB0bS)pmA=Fz9)Q;wdcOR>cbumMZt;x7p2Aj zgN&AcuawyZOC}ZmMTxJs0wpLFA2MDm$;tU>rSkFaOn02mH5~0Wx-57*ruEWtz#I4Fim!sS4I~6J{_J*zlEf?NJ2RDdOz}H(BZdBS5 zXx{!#74g)uzk8P1Wt+^T0{ZBZ$G><(^XpX!iTec=tP^@xx+}xc7z#4W{5ARRX3^0D z|IrA(X}-_wX;DWeZK>H*{&cm{-{ES@P$2fi`D=>vHXa)4g>%-akZ^$OKL` zgUGAi$3;5fG;HLupCfF`qtM@|*&*keSTjyi?06!{Jgt zNMim1feS{C<7Qg?zLAB!-#-xQS0=QD#1#d@3EgKIN|ZVCVJ`rzqcr-+id#HNlyb*N z!xi~`jXyloHn1VEZ~(;foNa>mj(}b=Zp%84P$sCRWej@csz0wwI^%kLF=~ry=R@v8 z_P`Yddnwbm8{@__08)gZRsr&7M?*{iJtEbHbJpUB%8M%Sk9|)eNzY^ z@6nrUix*JJt49%Uq6SWBh(WV}8T{z88u?1WA5afkU+=F*jM~XaJnFk8gmfLU1}0F~_LGC|#gOO?fa@z|XEp=a>)*8?$@p%s z1H!rrXrf3n5O8}9nT$*&2W|iWpZgz>IN%Xjc0msWI#7!#Xj0hkJB)h^f8XTqhM8Px zvv9x+&_XW69EXq|?MMJ)rNa#%jN`y9bPM?Z_vR+%Z>I*b%X5(2MyciY8Nl_y22k{4ZC>CUP2Gky@Wex^unmmd(ag%T>Tj%bFTZvCGTb zGV?Ai%br#9we_dcg(*B?yKnvBvXQ=AASG9Wn`t#Q3%1QD61$M7F|%VImgbi38{Z{4 z_|==g;ML}%r%g3A62|4P!}-e|9OEnIV0NLW0KcE0M#dkZLBOKC<18142V_-)+8p*=Z{{aoF{Q)&c zakd`OI7Fv}uOUYuqyyCHuaUpG!SmCQ%Ku4+)V>C^W4UuP>(Kwb7XknM@;{W8aI|B3 z7Q_GC38jC!{5II_Fmxlo)c&9Q0RE@ndq`9JUqAA{X7az5>woTNvPO71WtugNXFa}m z(}_2SL3#LvW}V_tnLvYMUc&iy=#z&|I7;jM zrE}^xHN48jqi<}#9ZmP`GQGZTjm(dBG87XYw|E znK5>by|3e_tl67)Je z{vnsZk1`Z``urSz?g$cj;^Tifpg|btm}%MWle6{hqr7Q& zL)*#5xtH~E^r!jBDYF(TJcpxUOV>9!`S@Gn+&>ik25&Dd!mw?-o!BH`S9d6*)3CL5 zzu9ZQRdoLqa6o9-Rx%?TS{Vc+|gkWL>H-p3M{BVAF}+T1J5p}XFxFm}ZA zsVe#Yn)>kBu2=dy8q}KB4s1h?G9F1tP5fEHTGCR9v(97* z>Kkk>5|8ES!*@dN?`P_LE$=47&ni%MVxep#FvS4}cZZx$P4$Gg`E*Gn z@4T#CxRF|IAUylQu{XBQ{0$FK$`?Exap-jRg$zcHo1r_<;Y?S2ZT*i7>=1 z`JCbrPUTbqVSSdVL`h^eYOJV;S%!pI$5|?CnOcVnm>s`lrtK6<`F>e7jK%Iz_J!af z2~VfA&X>h!4WW>4mzF4setkMRJ1`_*A|b2H^AvwTUrB#JC!XU)TAdgQ3=9(iu!I-! z;K<4uxD@7ENy@Y%#^G!5kzw5VuNjGYN`qe?oA&x@Z&UQ)_Qgtx@rQ22Vbm!F+<37w zFQkUpuRXC^Uq6PNu~VouJ2`wl@AuifW@k;ctw;A>*TZu{w>VDO$=w*LDs-?F4Y7BH zvP>GN$gGo6RdiOW{-xrSqm`*AjP)t;MuE#|u!dd>BOVe&3iv|OBAzV+#8W4HViSSs z3VBRe&gkPP>`hpL$fQbSQ3Rap&iP=y2G_;uTjXEJ0rE8<(_{Co5)B>}u}y9*b~F;X zHdm8+bKX4J<`pa~U1sTRUpXV5FBD@>gEctxAx;&O^v;rB;S?TbIq%AA= z-R5u%R9%~P0Vv)PN;x1w8&`Mhd~P$2$hZWak?n-ghjc(<|N`<%HnpG)}D1GZU<-$sy!y$3?0_YVx>%=Zet$Ry;B{Z-+#H^#6FB9pnNd&shfmfqwgMk`(xlQfmPekp4X=xX`{lnMy); zx)CG~oA?7tNJD;_T7!`o?8%Cxb7XQksd#G{p$agXd;u%h^$%#n9pZX&AJyB@kJ%9; zC+GXYT46uh7M9oB1BAMFm)A#U^qmuAvUKP}$-@djS-s-Z$9{9GJEvK!-A%9aNb)ID zPtdVAp1TphNb5Gqd0A2XO1q^ev6FqHvN zPU7!wKVNz~RwNC$Uo*k!Yxtp8S{7^X9bfGln;Mtq^)=MLxONQDSGw$sW)!vf1+{01 z{G?}9LsCxX08kJVDFsD}-9r)XrgNdbcmDwi06xL0{r{@H+TS++fa>Ui1FnNIn3M+? z{t4)002SGBhMs`4fgSS!007iOmS!h02M`Sa-FW*4)a!lnLIq5?4)-MkpY|Mq*TX*` zde&vWv#Y4RG%(o+X%9d!btR}@$anzhxBv_|;9P|KRa5%=86TMn*xT+*Z3Ho(I_@tm zoB*GC^MDrx>4|+P`rs~5vDOQ9YVevJhxc{RP5#U z%v<4ximCgIC#$EDame#TKF9$zUX$ZDq?Jg;A}ZLmXh}#)_M4-sTSIURVV-fuLtf zJQOr4vIDxiG($0WA&n5Z-HHnX2!P3W+XS;UhdSUZRjLQdn0`w$625^N0qBd5UjLL3 z8q30SKdz)YsD+HHk%2Q?aQ{j8|MdZ2LkfnUUE(rpBQTTR>7H(xR|PHKPLl$FjH*cH z#vqQN^Whd?WmTGt(lP6Ch$o@IiN99I$s#;B)WPFy>34}!kG?ETeo0!)+1RoE4Q!%V z6VX5ha-_Se`{3DXE$6{GMTWVJv!&yaroVa%8mRjNvRDA>!3vru>E3UCF@vdk_z(XX z0TIRnoL(1mNGQEH9w%F);7D(Ya}!Q8L=08l8sAaKx)IJ)XZyx$-;;6aFHFC(xFW;f z$Q7Z8M-gY8WJ77=8rxF^sQ76FsnF%G`;AMmrWFIosOb7xyOp6;L9s-d=+E<4emM90 z)MSu~oXpvjUsMSPT$6Nu4erZ{y7?{$lHg?U!%Tn-pvPY1XMQl?2R(h5kMRl+a18IS z^pMlv{cF@KIFfJv0c{RX=1)wAe{8vhcFKGA%W1MxqfIO=s8apHGy$ z(d=ma1BwYDK|YiWuyr(0;Apz@_88p-@0is9z%F2C|A3kd|6w|fh0mGyXq^YS=2rSM z3}M4zpV0H*G|qpRr+zy-+f8VIQgDT{vYwadrx)LkigEp$5V@SMCT7v-x&XCxy`Qfq zKvn)g@E``DhA%=-t`xFzR{84KGRMX}0L$)4rTAR-{%D}^1JjK8lrCg+Xv`l3V{1{9(aDE9438$`MO2K z03$$K!nU7BxO<5~PycVjb6XR3k^t zjAshUe?YaH(*V~R*LuplfxVuZw}6mqj9Z%=rL=Qj!ifMChMkI)*+1y0E5ycI(&p9~ zRQ13NHe;f|Rc9gXx^J54iMvY+*R_j(5Uck_=jXc)S6|r$3l%v>yxEg-^k_)x!9KG= zOwV=Ej4V(oxGm5tu@UW;si&jvH!A0vJ6-;+u}-PoMlrqSqCMjoiDNw|8v;zG(t-?U z%s1E@>4mFi5ggnGQwOPA>@VmZu9{mq`i^!5-|KyR+r;+S2O4Foy38%~O)2P)ilsK> zFqY2QO0ensLBDsLam};x8kwI>T{FqnYHp7T1!g5?lGRxx7V8Wwy90Ea^UK=N;c^SO zg;OOs^a-(SMK>(bSj^8Cc4;zL^mcs;R>Lrxw*S?4k-7^nG_GFnpx=wb+&jUn7~}kx zG1tjGob<$rC2A891?3gB>amU}+Qj*`)#bEOo5vo?F8;o>6tnj)y#qbei63npKt@bm zBQP)P29oLFM(bS*H>APaB}^mB(avZt=cqFI(C>b3YCIvr50-S~(uMZ4S|!MH00BNY zi~um;I$dPDeNSTI6?cO(8Y&!JXq#W&Bv=SPy$!9C5K`UjrZQ&_ft?~87r+Q*yreRr zxyjveX^=`psSUzNd^t?l=N%@>AoOU2XQWa6=MCg{%BK=n2Bo#;D_#s{oWK@gq3I07 zw8bJQn55JoHxz}exjgiOA+gJ+I(EtXid(E4T1Lj=+uN?!&!+|f{G_`q63tdhf>Lxs zFQBhkx8_mi_OO zSuHl?8qHMSM|bUDk6}NgN^_^53B=u>30v5WNs42pIpjFYLvq0*+R;pimxPE89ol z*-Ss;#b#B=t`P5O4s2H z9Sc>_A&2HMlx9_DB_w*2Mr_Wvu5D|Z%H$Q(iHWCoO(xz)j0an5##=trT%TbuURn<% zb0L4As~!WAZ$W@|+KKU7sQ@JcZd;8*~kbz zXdD|l7&t7l{($JDMa(JlB#U4|AO zoxOtqj#Vg>^+1;yuDh%<`#QROsR&-W{aY%EI}y$+QB<6HtDK%*@RjXTQ}fTUadO&1 zvxx2u7e?YWJU76x?jDa$x;prxl1K+d}kdRz-KA8hbV#zp#; zeW(JAro~KjMHD$`InDW5(y~%2$_+W4W1=Qn%1j-9^`}|(cUKEs9NkliJ@sSr6u{lg zbSf(C$D=LOd8;hd)G-Y=EV@Q?Z+yny)1ZB0<2}Txe(}abh9zxJq0P7b%7-|e)oYuQ zTF_7k2g3anfe;!wD2ql45VV_Pvz>k2J7JlQx%cCroN7vapP6krB%8muQM$KRy%g9I zi=K>myFhD0tis2%VQB4x<^7{Pb;6g|_$yX(+ScwT$lBoOid=g6I0hdVRF+EZysl!< zGmZS)qe*CAmP`elqMT$P%M@ioHu65Pg(hS0dT8Pip5pYRd#|DlP%I^D%F3;WN|U}w+8fUqy6hH2rRwN<&W)t6cJ-~8gq58{@(0$R%`QTODZ}>^CN*iRYxATa>{5buUJ|ww{R= ze_gyW2u>WxXf}%NXL9@T;0x6ublP_X8ckAOhSHI45L>^&SmBQwgAc{OSea! zW@p4w)#^3fq`Pq5j6k|$eoeJ_#fUyl$X{Khq3gt3LtgQY-F8olyX{SFGra`XHb&U? zI!~Rg%WIj5$?`SLU#I%vq-3J>G6d_pQe6RfHWa_C7?rwp z9Ix?g|E)_pv+-k*i-qGHW_o2;4D&dBFCksctxk%ph7OyyJ)aXNqfoYb-#Vme@N7wD>r(yZ6D;&CYoQ;}EEHc@8}SS6Gd z=+4bt^~)t1FZ0DH@gaYl9EEs^IBn8gAaYJ;HdVfTQ&ZOvhKT#-p0?l^d%E{^xwa5r z%9ZBy{o|B~oKv1mwY#IojP{I=m9M;`yAEfLpkyRiS~D6gnxwD254DWb#t4DodfL7w zBWXQ`Pc``7fHs}?B~=JhT_|i`r`ciIB8nrg9d+SX4aB;fpK+)3=LMf5_Dl~`&58@R z9$ziYh+b2;ud1cs_U-W4`a?j!mk|p;nfB#j8ygR)g#>>-xT{kuJV2=8Y*D zq9x7wXVRH~Wp<7LTlPsJ#F(F--jjuCAz z2m7Q2how$off?<4Zq|M0z7;;3$)sbmqtPjaq%NtO0&-sV0z>*CB1sY-4Y9sH$Mchu zhY*8bWkbMbsia8k#~&Sr*Po>~3p=lO8ckJ;`Q|#&#mujANfs5+Vw_dugt8K`TM748 zD;ix06f3Z9bj<6iR?NHk+d#si+5`NDI7MB9P~`DiXz0&7!8|>mKCMKtKBOAz{_>2d zRNqmrQ>>U0Sb&pW{pqh*=5tV0=Kcp%%D~BL_$&{1a}Hu}J2+HhzBerb^EWw(Uq0$i zlV9SC0F;4*26+Qiq3qC97Q1lUD3{R0d66NixOQ34;>#Vh*%6lHO6<-8%#6IQ-S?c= ziFu`J%c;)SUQe#8O?Fs4+3)o9bEr_3_({T7YN~2+lq$U(30OZ&BW4kj+7>Edfh1;p zg}!%yy+)YzSPtZuhvl5E_xVBb);recEDqZ|D0`H0kk|8!STs#EW7j?lY7gxu!C`ab$!0T)4J@nphw5s+e-&cX!mo*hu_?m;W7>rksI$2Oo!_%ch{mWIm z9blq%<;VJ95p&(zkp8z9yFrw_&$;c6FLahqgp+D{o5~K~o#j^1P$Q)90SoPvSLb<% zNf)&b0tVl=>A~wztFcIYhqP9=GgCIQtpOff?YX+_dD5E8bh z@Yr5Q$2F#4KI4}{lvxQqeeYQ;F1u;`Vqbx>!?g~ZfxIFJ(L4?T0J-neDlww#CL{Kd zP4z7%SI)!w>P6jFnRuxa4Pq^gam<82ASgiovR$A;SP-4_Er`f`ip3e&24t(^dv z4a*j<= zqYKOD^15ciTegc3pdhU|e)fJXKfkWmW15fCdFxJqtcF(r%?J$`e$)=W+9>3=qRTQ{ zl^$<)MVK{u~E`jn}p~@|*JG*!kux=?ax*%dN_pil2z6 z+USYxg>T@uB-j!rL{7D7S(nuU{!VMMQc=`JOe=ArPg{^1$S4&751_R!J3+upBlxkGA_2X0%@$S0rJc@GchJvnuboQA7x#{8p#GFMp6ZR*@qf6drEG^l2co_ zSPQ3cdEGlsC%2i4RBs&tDPDd~X3ia@H z(9mphf+lzRN-AGx@Lu8Zd>@q!UxH5$&n>X%!{_48t)Ujt&6K0Blo%~ZD#@3+8kV(H zH|(2|)<2ogG`Di@C$_6|s;&#G5BWp}P!om^QFv%~6Igc)@$`tvn9a5u?SiuAW>zoG zvAVU#9V0R{`68w<+~0U^#nJ7MhE<`kNx)2Lt7zc3E)l1^RCQ2&xQXDTD!Z{F?l$OP z7`5&ws3Vk=u>F}V>g*g)HridU2W2FWL0J*@_`cybC?%i(IS-%dL~5s1XSmVMGZ?8~ z%rQk&2oLEF7+Zt3)iepiU`J?`{q{`849T>K7>sbqHMKC0f@#)8y%%EG;XZzn8r`;P zfUuU;Z^3bU`@yf%7lh^Lc$83T7CC*k^&c9Vqu1P38JL*fTp^YwvX!W-My|7I$i+pK zP#67nY!HD8`as=JU@AyoTOXA@w!iBo&7(WTpj zuq5uK7;gg$m5Bs_Ov8Sk2~J3jzJ~=LJw11Q_9v`nAZp>$uvr^|FES$M+WkDeQRid3 zl!e|ruPJ4?yB(Eg_V3jwX+y}`J6*%I61%i2B|ujda_An?1?YH%NP|P=(k6>aH>(%0 z#&mP&c`i$|1WWnb8k;-k!1IG1=qj~t`2_%2qgiUipVihAm>S%?&IDL}i}e|W$0R&4 z{VY`Ij_tY?>9w)mEu#Tcg&R&2F(gouR)6A%QXVPzte2I9aea?vSd8)N4YPT12ST|e%;WN4Nq)`I|r2%!TS#LD@bQ`^JZeoVZf^ZZxzJtQyqi{ zVX7VSKK&AK7HaOvbC`D)z^-*wc!E=^^Q#GsF+I;Yun&A05c~+yZQ7_TINxCrzB%U$ zvAYFA@6s%=A)7?$Eq{NWr9^3{-IFE9(e{%1k{MSd%e$|oOO_1x%skq<8O<7RW&g|? zMEP|GyjtPxPcHiWo0xf{Q;S;l{pP*=I*+~6LUV_oIburBwgUnHG;94Wk|BzRh&|DQ z;2O%xr(Xq{l6q?wkDX?a_|>mMqeXj3hX;7yh_NnCCc?#L!E|jQ(+TV9F{v`XD>*~4 zk7DE?$@>*w7ov({%ual}IPVgSx6AN+Pp1@M%Nhv(vR>r#G1t~WIlXv4&2X|$7qw$Z(9TVYF4Gbw!p z*2Q_tQRcWNDiN!wTjo$U5CNq|PRep#f`vyBg6G9Da=P7dlX(jKWHh>$ayzAXML z)0@c95hw)-gkQjcS1?3nqzG}p+iu~J%vUXNNQN>w+4&GAz*`%u`Hlw{5y(EUT03)O zyi0|zKvdsj`vWpId?{12+7#bl)SyW&+!{EN_}pi*2{!S{xp=r%h5hC@Eo*Zs58*)^ z0SUuz#+s#6mJ42a`5j=0Dy8J!TZy(w!}mVySQa*@?VPTcAXc-G3m))btQL-X2^Q)t zWCFFRFvVcGquyK$wt16(>xP^9as}pv81aAzU;-~(|$C(?? zzw~%jbU`@4^!s`gpN5L32EyHCGwo>Cb4fgT$T`}C>S`hX;m_e)MT zNJ+|pHYX;mvdLq94EG^OgGc#he8&)6^n0(XjOPc|2CuX0Be0>OtRGH6KJ5Jtt}-Wn z`Rq4O?3$s9{di|;Wr-F(mS<}^A`6LnRoe&kfL2_=Z&s!kk?_J}SH%Jyw2;2$>_`I6 zyClUi&*hpNe`d4AiQB)08lJzFA_PzOn|m29ExPklun@@9`YQujTf;)PqwK0B9H@8_ zj$nQ1erTTNuQD#xW8ZsTKulzw(c=NnB~h9SQn!?pfp`pTIAm|j_{6v!MgebFzEquX zH+b7SF(*@HxZqMKqGge) z5M?)**8Iw8Cne?& z*{0=4GF_DVv`}tED<~nj7=0-q3O}PpWG1ld6JolCr*|!CbQ$4!+VQ%L6CyT=0C6c! zdXE>a{i#qmpHTb>{;?U%v-1jf@6Pv+!|wP6Ac3EV=}fn|%s)l%{~?o? z-39ry9iUvJkmpYn9gd6^*(_{#>*XSxcZ=M+%pU#AfTbAqtnj8Ryggk}%RvnXrFd@`*1VEN)LWY3iL?yq5 zbIgAvXhlX%?sbHdTyEWXdimC;Xb+$v{5aXNudlFFo#zrniDWkpaT%VjSqd&#NmE;9La&%#;SQeP zd67FaW9d*at69J?z#5uONT%xO=R!A&NA&X-)QpQFfl40~B;~VQ1ylU#p%d|C*~V8< z3<12XnvjXzaOCQ~>9Og1_@S|ej_cSRN2e|x&gfeoO@D7+?54&k3^{DY6te;eR|v?3 z?>`yi353zYeYbCzWtyvR#s#E%1+i=kUtYaZBOeZ=brgK9JMjloZ2=fvx`8^UZKw9s9+^qjJ{wJZtcgo&P)Ci;6X=>|b4MQ+y<->#hIR*%`$ zQB5A+Q0?QXjWxd=Gr0GY@tox+jtsMB<~|(5<-l!fcdZ6%zLX+Erf#ncbHa6(l7hqUh7)eb{0!q5+Tbx<(vghs(nk7|9#@_c zCJ_SHQ?D9*m@XTXa@SrB<1u*_H(1Am%?|c#%{sLuzUzdrKy0*NkxDd#D!H};-sv}|Zg45V*Ci^R2a-_2H&evDW@6ZqGj$AI_G^Wd zF0`#pG2b|Ik7WBz&1bcasvvf1&$jrz&!~5tkjZ9#gvV9w@F01T(3Hm83{AfS9Pt3YY=@Tj8-mi@Ot&B}l5sSvm78 z=}C%>eqXkw_j|jis0okjD^|wK7%a^)Ira3M1DH*?@xe<%8m>b_Deq9rNVFth`wFMV z`xc1LTX>|`u$AMUmu1`f29ELAnZRdtRzsr4kDwf>(QJ-)&CJwX^5@_JQa{XCq)w9_ zaQnvvYUDiKv}?Z&h9r_euQpGna78OILCh$i=oR3nz;$)lY_R0yMWPhZbw8&gN_^jS zEi6Uj=Ud-%HO_Q*SL1`Mh6h9gY5*+}RA@6TLMgS&iWwht6t$aGUsJ-er2%6@5bFQLe~MG^UK<@VRSgS zayX_8^`ePyW%r%v*h2zu=A^r}@C`L7M~QjqoF4TZeZwEa>@Gb#=f!o+PPCTQ-jijC z`;YNW>uqJBnd>71{s_gJ4xaMd*Xfie@{z$Ngi zP0<2Nvk=0a7(+PGovu0bAIFhI$>6WrY8~^p>jLH9(GA7+-qhm>Utzb6L}dBP|5j18 zQ%tS3IH6xD>vkYhSrR!%4vZ!tOOk|Z)qyOQ0jb-$i*MIo_9YF>pzHS5PurT`lyzdT zOkgZc8-!NrI+0>Uwd&;xhm0=kMtyYDvb?$Bx3m!*hgRX1ASJc@vc#)zMGuf732~8Q5%eI zGck1l84&9nT2m^~uu@~&)6+BLjDv-N_jE`G07zj8#OF1Bmi3EG8yr>o0U7RN2cZ~!m9TZ#W9jkLhc*HhMz_=_c~hdodBg1>wk0e=P>-Nbe!Ax6%(`^ zxtU8Zo)H4idGt9;=I6LoL03Q2DBu`xu##9#@c09I_H9Cex&;2v>z8%Hoe8}W*Q=2& zg|6WW30=BYRR&C+o4G%6SbYWAY4{eQlxXV z^>WjaW_kCmf*OIwglL`()sPK6U{M+gc;&MCI9lAAmF`>UOmch>Us-vXR#_J+1VV)r zbi!0IN0Zod5KkJ%6jX9eq-y55v>+1Z#3QVoOvJEdmS~9l%npqe#1@dE-nU|A`1#4A zx`h9Q)BgS=3I7eRwBK49P?dV~O@D+~+H!@Up?!E(Z~uK2my+y>%J@l{%Je~!?4uDC z#=Y)wfHm{r|M=?#F`oRO28^dUyXNfil?0%_x6EUR6y)G4FR~Ox0??Dqw=KjX@*~n|swB#Mu0_E#Z>l9!WR@cnc2(MY1YTpG1B$4ScR~_cLPic}5p*R<%`O}{L z>f>~1dQ1hfbFPLJEBW0R;PhYJI#v1u8o7&yktu~C#LicRY6-mBX*)4*0AOB87@(C! zV{A|$BBsq7OMuJ7Ox|a#>`)*|;~DlI_Bw^r-Y!r75JdJ z`Lrx2tYszJ;Na&N`f@E3>^y~6D{b#-o9ZIV7YgZey_JeGx-{7Wuj;kT9>2Os zyJBNKTlSIGO}xSFZcB!t#+~O5Y4Im{YA+Iffnas$z5c<1}L(^qPIwIn?L&^5a zbl9ZIUYumTy}y5l#H3?tM9C8~`R#o>@*(xs(OmHpFn=BanH%~<&2xIa2_-*xeL zPs({aGsB48AT#2-;+KBo(-X2yOn*7jx76D(g0Yq{ z7-^nl`t>ZqYvIa9y22S+!F*X;zH``}zISQs*G~~^R*eri%k4J|Xj?AXeQJhx^B_Y= z_7HjnVE4TEqjH>ltJND4raz<8fY~33>aB{=)4Mu$)pg#bv5rSrLSkTUFkmBx1R}C# zZ1$G|8zD`jq zca)_rQp%#flhx0MRmjJ!S0{>J=;mT!cHK?7^HwMtxp?*&;tb|QewQKNBT&I9&55vf zA5V?v*2(kh`t`@@Leip{^a5chzraTox$qZIJz~*fUh5yw&CPcfgrvYq4re{~hYihR zCF^N{2UmB3lGUDfgzp@<(5T?oQm;oX;gqJd#&B;Nw>$-zZnEf7yQYY|&L7V?|QK zODqxsI*?$1;C_QNW* zAibwEEG6TWo}Ge9yLX=HTf-O^eqni|mIfuRF}F`L)!U(+8g5lq1c@w$1<>7VD~x%5iLS^^ubebXx;=ri z2Qy)bF~G=GDK#vY*mpMjIa3SSCmJAwlwS`0*yeTA;-}$p&*v`HQHld-5765&U0}Ku z{t?{w40iCYC@+%cLhQoZgR-fgL}`39gdWxA2o7visQ&{>u_uqQ*?w)<59>`D+&el( zV2j8y(_0niNY0Mpv>wu~zh!hary6a``o_o(7Er!FY%viAdz7p4! zkjRldYDQeBoZ9Ms-+U9HfTy;EyhVs*65Y$*TmRaS3up@atkPhg{8Kgc(AA}(BJ#7h zG^WJiwOE)`?*Ww7@vuG@?bX0n*%TytD|$~||0w(|)^TnKq}|_rF-{10AQtoYooeVw0%*%Xc^6G> zzNh{@x2*{(Ze8E|Lc0?U5pQ($pd|o_RS5sCr9S4c`yGPui$l$d&rE}e+&p{GRq;oC z`5X@~MxMVc^w+c7%mF{tazuD9xiELY>^NLfjmAoLQyAkfb=bjYzgTD8>vEGE4IR4c zn0-yA7y%@4UE%?P!q01gJe;9q(tMM_;Vs~^(O&|UROPpnv+5e9vep{-4fqit-4tmVanetAy+qyPMCuawzT=QEA5%V9=lX0b(f1UD2bygxp!$^`O)+_>mRZLr=Sb>>?L!#Nh z-9SX`!&t3y5Y99sZCMog?0MbI{TCM8i)zED&KWZ zGRrhGeT~Bky=2hu6fk9trBe-?UU}?zT{8ozd>)w|u^7^t?|rwpeGzs&=NC#`r{41G z3trDgYbADtovoDLAP~#VbI_Mi*?#eZI*Wl*FJ?e zPUma5g(pc=cr^Hq*?xPCPG0ynjN`!XFY&cIBIywp4M}a--S@jU?++svi+UQx57M-kty^u)&Z zcp+Zh4=HKgq<+L<)woXN%ILS zb8U@`SX}2|MITSlG-If6ba5k4HaapK2UwKMrsPp{bzU$@7k|cEhwly)W*E+w(x$r? zG5q1bvG?9lO}*>7C<>x(lOYetRyP{K7C(uwzyAJwNwCu{y(=%iKmt*8r- z1iX?0E7G-{eSNf4%0k^4MHANS{d1VGL0=rQxXIhndT01>Pv&K0S$O`sx&GS5hA#7` z8}5&%TdHeNfdTP1BMzy18B&pjD^C2QG3V!g=2@#U6BaEEu4O2H$BiIZ(6l7LZHpaW zhRmv})=KvD$;20R6*%<|u)U|*8K#bp!F)E&m$XX4QTnnTJi?m#V5ls~b)da(-#30b z(j-Yht=eh&z?wJAG^#^@>7momu~uc`2$2gCn(i91HWI+|vi!UX8-;4u{vdp&{)7Jo zLWCpDKvoiu@N}sd(II^_-YoeQATk}}y8e6H`}@qCIyn5+b@foRF;jcyMo@1A8cR{% zI94aAqlJ^JUCy%`o3e1VDfd~Mv9DdBR#%~X9OK%b?q)TnrS2X(=4>wbynG>gx^}?d zUO6MNL`jdHtC02@-$_N+(t9{9z^brx6-wm6LUD#OI^VGI+AX5C{=(f8%yrB$Ur6#7 zYL}Cb=s%oKqG?T;a6XohC?b0R7&G^fD2d)kEo-TtV_2G1dxHt>qx_ZkiD7~?3gheg zVevWTZ@&h9quTGOLgTS})9n4rBq65HB;){TP-Ou=Whowh zDn>^To`4>>0^|vuR@cW`7SjkP}~)%84L(&N?95$~Jj{R^F} zI7g*WB1a}kzYz6R)lrKU4YHE1S*ym579N!YiSAuz@;kk5Itu9Sr;nYDV(w5&UibJsNp@lCx~EMa zQJidSZfxy2Tb+}$%F|j>3>GAU@s8*UAXu5biWCYWPt+#dd^qiVhZUZPQFZfCUeDEG zS64IaV0>2PHhE!9c7mvZ$D|?;P;Thmo~qdaKl1ooi~0HP%lk=tB_95pRdMTgMDWXQ zeWVE&e4q}10WLu`Z%zZLO!I1F-SN1K)vI-m<$3a~w%a;!dnVyo)r-V0NgoB*!BIpK zOk54l8ZU!Mf#)nNU|~9A?`5aA>7fpgz*7_TEn03Xy0b8UGj&+%M44`^f{WC)L221N z!T#+gh>q=A-CIr(*Us}W1!<^mPV(cL!hTG2wV%0Z&BBE9PQIfO;bRX_EG`>Fx*Y2JWQTh?0zBP5fEc7h6Vl0bod9%Znc4xO@ONC~nnxHPT z#4%Z$MP}I3+bHpcKX969(l@>%`z9tVWIITBbWgg8`;%_%Y;39No6=&+2nvC!@Jk=A zvXH>dOZga=#cq7nCVgzx&!qgVA>G}XH_=3kJ1M9%)1Jz@5?x`+VWKkSNyr2kpS7B5 z_vf6eTW)}rU{J(4HBgH_Wq=UqbSnG^^@z3Ezo6-J&tZaST}AE*>xSdl?gevN zk;+G+UG{X!n_p9|hgl;m^f4Oz&G~fw%H*QE>EvN6ffCPeb~QTZmlfz54LAl~ww(KE z5L&*qW}-gZCiZjrord(L;flhWluPyCQ$KmI;xJqm>Du&W z5!$DlM&=Ai#fSJiRz7<)uB37cjT$IEe59{jnkYL;uMK(uoq^dLL|KtzdOKjWSV-sF zj}4Q};yI2}>1AJ|G#3}u;`?`YikGXML&CzvGi^-AQp~N=3LL+0%?5sBZVYS|bfx(w zF7!r&Qe-lgOP@(!^wRw{_E!zVG(g;aGgM^1qL~Xh1PrINl2{HD%yc>(zH=wNHL1n2 z$ssvpA2}?BjcF~X{RWaO+)>LfJ1~>xaT(B79uk}3hUy z>s5LF6$okR0M^Fo4#+7LFmS42x&Z$ni~k5A0~)b}!N%E>0eon8NOy5AO6d2`v-XXCRE?31I04h_u;4^W?A@$Y6wXb>t6k#*OQCjA z1QQ0g#7|a*MB_}OoK@kpa|1vL+9CSXXhM287{q~W7(2;v=}?2hwr6=^8In(j@R&yVDk zxM-=m3W!CX#!K9fxWv>^#3evU90%8cI_W;ZC`5ud14IzOwgn*awdx-sV~bN>zR3j| z&MTVJB6CVL4^rmaD}&PsEG<<(CdzoePrQiQnf1CSBzZeg{`!fD37MF3J5xK>2T9j~ zxV8hE1Im!$k$I4qu!idIAuJzd^~(z>L+3AFu@7Fcq%4TYmWjzTiD>U1i=IxqC#VdE zK6ik|Bbz;nK7> z;9&MMO|xi*rVZnc_4+c=Ie43uBvT>f|TW5JADth zve~SJwkbD?dwdMSmmiU=%|G}x?bWvJ5B&%pek(UuHRcND^t0R}^PPYp+&%oA%%J{5 ztYnY(n1yIes(yV61vz)r;yIbP3^**tp{nAcENyt}WwZ_=I_U1M$~8iQ*A|O&v*5~* zuJezKI!U+A)-k^jT&XWCrtd2d?UPY8m7vsOE;0mj1;N&e2{G+~=XS{(Hyww_tq> z+-NfWJbxJRtEzrOj`j0Bptk4ID76po`xcdX2}uj3}qTC>ouNRjxGSlDB=BKvYA9C1S`W;`pvSwA#yXG1RFZ ze=IO%V%jp>q~gz#uz|&MyjGEE4&3_o$r@-AZBVS;Kdl1f71}_xCzHb=t-Sc#*a{D* za8LY~o%ajX+L^apg!h%26G{BIv}EjKiYlZDRR~9L1cEt;)y(%PGV{#5L!)pGvg4EU z%hwsj^=$pwig()eNN%57ZG!J#X09Miq+@TYW0yOk}peU{Qua+`&6fA3&&``LRXB3F(eZ$S=qG?H;6osoW9+N8b(aX@97AiZTVoRQr zu9?tQk`VETh;27vVQFVdL&C0N_1YEuw^e9IOFgyk@i;Cn>D9%}%BF=Bc|V(c@Txf1 zZIgudohdn0WDydLwR#4IKGf-6T5!@uNw-|^rY*!G(?4gb-+K(5F^w`bIZKMFazwe5 zNjIE}<^uf!WW*$7GpE2L%4OUHO$LGt(euopzGjl~(tUMcID) zGB2H8$1s}d%!#rc5hE)h{9bNBiGs{JD)y)#chQozd`f#7yE~I1(XOq3{;Ly6a zmD4WG^vH8B@}wbX#JKa!?0t{s#LQa1dB5Ses2Vse7S{DvdrRfcxX_7?#Pf{33g^}O zT34a)C*60eI7+{iYmi-5FfIA8svuIs`q9fK*HF^QOFq%SXFY0ev_+3M;>x5E^l5tT zO#%0iH~BepiFH!h0KVQCIfBoe5j7#52RP>H3IcCjOHk4FMPPW6wQJtC$;ix%`J;~( zb8|hFs?-MW3O__$E-oawLP8?Pb5#O(nt zYDv=Gyr5zS6&iFzSlBYUCnA5FR39dDu*dvyF7WI5dEQ~NPaR)M-`d4AC*m@ZLXaF1 z(aJ=|9wKFN`vK^*r*)%_$rf9*G9~iqTu{Mv!BP5WS(6TQD?nL=Y4FWR`@mHb_G4qU zgL_ivv;Mr<*#(=a-LMZg;kCVSN{Z-Qq;c3L{_b0y!=;^zmM(Q37Q<{i+JNr~Qqion ztfsG>N$e0NDAY=IUDVWqw;H98LvB@J8|WRAp4Mn}Ao-9dineD?x8~vFc1Oww*ZGRB zz2hGqt1j8}DBZ{w$r2J_hV!uctm`b)p13DfH3g+uaBWptrsv;M@!I4`2^GcuYtM5k{-$h3k?$c5 z&STb2tJn|awNWJ4P-FoLj%HYi557rcjw|@)Yi$vuMgHRp^!?p#r64*FfYfJOuFje$n3>W!Pd|#EsqJxz)1wnWlK*J?=g?z`7mI;y41(h z-0J>b^);G4yT}WH4*!F zU#auuxz%ktNz|`xrjKIuLIb!~v&6PqrA9po-fiDuKb%Y|7ap>EzDO(~>8vEZ8|TRk zmI?yn)zHHzRZIp_ufsKDT6@K4MEZBLYWYsei<-RTyyk@PyRS*Qcvs$dr;2~!$sipU z0R%>jqSPk!uVVzI2^$O(Sv}?i8@vX3DfWa!`l!7_NCQWSXR7 zwL79$OfczNjlLMtxxm<(m(c47nVGA7vTOn4oc9%Foquj?D9q6Ic0lU>ki;|_Am>04 z3S{Q{Uy(pkB;HCUNMNeE$EKmK<|&0SZImT>Y<>Xs{vp~p;_5O$`UxZ=s;-9PueiWT zPuPITty=%jcJ&hs2K5tM)s-x?XViL$72L6`0w3A9k9+72vYWm*Sn-c-j^MTPXPsu9 zN2rvujww7``N^(x-XrBAQTOA3hE`!Y&=B2GkpNlSWiU;5p4HE!=Vmm#6LZ7d8vKVn z)&hGG`zd4Q4EJ`70JX2a4k5rRVIKE0}5%S-Zt;~er9rdwCgiIli}}tx8vSwa!mPJ zQKE)gq|qCn8>P=kg7yJ6S!K-5S?7gr5l(S8rO~b;Xa2m7Rc%c_L)mF1Y9Ci1L|E$YMgS?xAi*3G>iI0d{~O*Vv*h17;ba{!|k0b5RM%Qfxs=zW> zcPflw<2zf)&>LyR>O>o#l*Z@Pl0BOx>btjW_>_{lqy-Ay(-Bls&Rm^KG%heZ)9*cEwDcBo2KRqEV#88)j<7|Zj11ALJ9>ose(V5bpG1%qd}E@>Wsd$@^S3s z^IJ?W{6oaedQ5!SG&T+Zx(U62d)P3v)fv*Yn#X*641ui39elV1M!487F`3g?V@l2+o$SH`r_4|$4xp?Vvm+Gy(=n+U)gqZ^2kEur{}*NYfq1g>lTd@u1gf{Um}?f4H^eJEd4NS zKyUvQ>7}dN9Tx-bL~(#UIFhDIjFA0HG1}Dea{t!YOKa;|v7ugFH7LcmF$;=(zuw;A zIqnGQ_&6HGZ7oyHLUG{FpiPY@~{K(0_yZ1<-=;GC8j~d=n#z ztozV>6^KLnGTIuGzipR1b2Rrtp-KYA+QxQ(o2V>i^{1(GXHJ+(@>z0SyI$y25D%wh zHCiL$>-3|1x5D0h3*nK^iBEDK+6Y@yZ-)8vg^LL zQ*k$a!>8M4TUrSFq39`#=jNjnM|&>dD%<#hv}hkcIP+h91$4yVQ?ob!pDj!x~)!ydDIR)PbOt9l^)pSIQntpzRB<2wIW*W zHwfRPsr15>g^u2LL6b{|4l005MP<+uzzG>1e+|f;#5%%GwXOe97x8x21 zRMNWD6P_iXyCk12v?a}H-RhxV@#60nljCAHpbh?NvE_)24pbNuW>qwJmF|8&+MzaB zsgmS9osim(^_kcv-yQ+K>hHrn@$|6%d(Dc#hz{u=l7fL^T=Z!YAr}R+hljr9o^^(*L2 z&LAuDa5Qp$wCC4D(%3<>pI3f@qyqwhZvk?%wb?~0klBZer=ld-gC}9tStb?$*#f3!ylt7x=jmjcYN4QP*!C7J_MtICmPNP*>!LQR#VS>Djlpx~8e zkL#ojnrq&(-pkZCRGkU$D-?cUAE4oS1 zw$4XI``%Qa;2(Wc|7w?jm~JnJ zwli&j&^bZZV53Ut3L=FN0y`8aqnGjG-V3K?adbw^I*Szz-+q2$!BXWB%P-^jlDOQs zHeE{nm|VyF@eQ{?R@D4vGYcjjN!Kf=>;nV^G$-8?d=@t?zSNqUv>N$b$}t&4cd;dn zYDuFv_u?@C^EpZN0b>9QcHqq-+r%--30k<8O<^Sq@R4hAzKZ?Oo5;Zu&SB}SEO}v! ztcYm6gti&C#42{0tAq*nzfaeZ=%A8KW?U2eY(PcgM=}X!z>8vCS1o?mh~C6nXiSwO zFB$sdZnf}Zn=o5MKv)o}8jWSgKc#}zO&#h9pREd&` zyirKk(+$0aRPJ0sM;8~H$)uJuuq^RrfO;ji1m{D?>M)|ioZykSpqk&Oq{CooVKMu; z8C`m5j;kbj`1YGO4@k#?D1rUvGyv!~{r(iTH9(0go@*mg{+N1!;V6hWu9t>Fy{bcq ziu{HD%T9i5$4zJSfu1vqA&+v`71eH+7^pyBrhes4~w zKSzVb82Onnx+Pe`(*4XS>Kp%Qd6KO#BV4RX{jTt>s4H&Ws+=yf-VZ1 z2|JhUAA@C%;U82Ou;REkc=gFv9xD#kPSbH_L5s9IDVhuU%!UOQ)gCn|vTT*r63Kkh z7g5)4Uw5aw9kq&oiau(ylJn)pq((@UxuL@E1gigH%a375Wc(R(pRbUe^JF@Q+o7`O zD?N#^@ht#G)3%vLXKz%CW4;<*cPqNZ{}?58Zm3lzQZaRQcy#KnRNQ*mc>9-=(Ym~@ zg4DF6b3s{F#%PIO9MJ2zIUMl+bc5&r=<@`#Fg6`aoo7$QxJ6Uj&Jxr-jr)CHKQd(2D>`fg#Tu6Yd(_ zL(aDl31ua)`k)$Rhi_w5OsKEJuvb6^R{7?k^k{x1qh{r;-=8jdz;*A{0O|g~*8K2> zI);fYSlwNKN>S%L>er;#vwdf3ZMMtL*ps%+lC3rgkQIKTmCrmyPX{?a`Gt<<P*yuA~t{qfVcZfLZ!I;iYQuheRK75WnWcXxtSAcY^H0f5q2`p6P8GIg|-S zRo%04a_fDwWe_L6^xN1K<$$PP`do_AAcDF7kh}xhzl}<4nXDfrG)p`5_4MF(Ab0gBQWSa>_HfRIQY?=<684S!TqU zzj=WmJSSCvDJ~ReCwjiI^tpuIi9%qXQqBFO07@*uNM-7qv4LOf?We2!hA&9N)pS0Z zamb!dK#qoh3;f5cC5o{@-;CqZr1dA~YZ^h-OLcr_I@W4u2+Hg@BWCr~xg=cvcUP~% zoCe>)*KTnhdjjk3=5Ra==*hnXG~TOO-YqvWlm^<3{;~+*GAT6fTuRpcF3&X#b}jSt z0D@eQIII=%tNM+a8D|2a1cX0w9W?mIs|gQNgn(p~49F-Z{>1($ALdwaMz?7uJeH;* zK*a{0btH72PtdrRYm@ku&SnK|74(Pvtl5lLeUgZJrFzHkVa$Z|m1(>`Vl0f2y%2&b zL~LdRy^8~ly}-0Xh%>ti9yvn-9G*5t!=|e)d_;* zwTvN7c$pS;@HbM<8pC`5Z(1Va55ETIsWF<^97#fTKlG8ZuWoW3Gr{(Hr%JZLn7yR~ z_?n<@Wzxh@oqNGde3?h9!?E<#^i5N8sXqMZmSbr^d$rqQPCtG8WH&K}?or zy!49A-TJBu;#VF{{GBu1-^s|=k&MLEKh-*+<-MM^*66n zhZxR(i;Mopf#RbbOg)MELLihmjqic!-ijhC{BNcjCFH~B!CBS@ddZA|^vJnm=Np44 z@J{bZS;b$g>TU(#n~xlDf(JnAy5RRTYQs*)>1}rSijK%lc6%7m^lnlapbX4f_`|Ru zV)$f@l?{>C4#?{i{j=;7>7Vg`?t!n6#MJe;0=oyzCUcM?#ydXexdg?PvW0m^cY}KB z+kKykdiCqe5LW?SN>G=gyepn!g+sFI`h8=&K!trlIL~KWlLu@!Vl-}xk!OA802=oH z>r#XNGfBe#uSgIwoo4{xiv?ifg^|Dzr7%GSf<#3E7z%k8#D`tVb-3ZHzA>dppoAvA zX!4HbjoN#3tE9M=gRQc`!@3bPXao zoG1%nN^ujVe}Vq=Jz6+PKPvWQs(<=7p(GNZkUEeX!kATKQ!<}BB^5y*%w_AEAQW0E zpzv}8`g=35XQ;{mT<5`P7^WYLeCbO{40i@*GDw#Jy5Zz=0FC|g*J$9yQhQ6 z47=bBFn3u3XZWxo2)yPC$B;M6{~^iA0eOPYB~PY_%s1bGx9oruMXS&e-`~N%O$w4fG?N^Fu*MU zFj-0?hY3vAaX<~c@BjX}gLYHtf$g#!5`TUb|Db{vwpAFhKu3H+@B)J5uw}XnmTDND zE)utFl_|L8~k^dVp$(g40FfB zeu7tMd_De=GV|RWPcdFmlypn#r?4PNwwyupDxcH5Y5>^903V8c0F_%lgwn-;#SXDd;j@ZqF`jX#XwdaKT8kECYp)8%!#{HP0jK!_n4r= zX%qx6%1xt7W4Fr%0_;Rs8|-7T@m621V;v=LI>)aC-3<=8JvBufDG;V-gRnT@p<$d; zTxzPIv>t@Mm@M9hKNw5az9vtSaD@0n!eoT_Aq3lm6P1=$b&OOEHF*AlMCEXtL@&_~ z*jk5(QoY}+;^{f*wC--95m**z$N@K;VRbXu`KhZmT|$3AOq?Qb~Jtr zLh3*VQgFlYHdvS*7p)1@;2BUzf#a6G;oc# z&D6gPV7yin!hDYv87QH{-WOs^DX^IgD%f%Oo89juZxsi<&PII69ZH%$X0j}$xz&1x zd^+*=VdTCU&-uHDZsVEpkGER8odMHKJT~!Wu%r1;Xz4BFcNwDLB7Ln!QP8zc>k#JC zK-?-Wkap|QTr2i>#hynKPKaI=r-H8gLHY4j?VtntHv>QAmDQkw5DDqRQdo^1;b6qz zvmMv3-%Qb4j-V(kK8C0P9Um-s4EM1}VBSwe8PXCUfntGCF9#<~Ma4JJ(|H(UgJmD%#lK9Yg%!RE^(LY}(ZVUw`8LQy?G(6KG1D{(Eet3Oo$H{Ik;J z)jt#ez*d4_lf(;Qk;CW0WOb3Qf)=@fQ=jkg(^bBv94XE6Hx2Ea<;<;EP?H+ju1#9h zVrq;?8zYmD==t{U*F&Vby53t7qA$X37Zwb^LFC598xl(K54`Knf_Z8Zi5nIUBIQ}X zbzNK-XxP**Uz=#;rJ_R`KwtrP2?zxjvn5V$2maX{wm7r^+&ogl$1`pN3M+RA1KXl*aR>7`+e^` zi{o0VkKNoL!$%+qysk49r~T-I&>%W&O|x3T!p^L5?3{%{U6yO01W$L?&kvuoRt


    ;2+5 z^+Vm|u5W%=+x3cJx|tRA8|22bmtvN`U-?#AO}~p^217acIv_u$k^q#`CPR-6>3r5$ zTNkys@zh1X-^R@3VKFw5h0ErJge+-@I!!`5gVy_PJlPW9gF{Ui zD~=5qEowl69}qs4y>33^1dP~= zgd;q*K<_{`H$#Ht@KO^2u-nhcU1@XcJ_v254q9A^WrZjX&Nd-^N7F1Ez=HQ!tb*hx zz{@!>`he%eg+tQ=qN*{~#U(vUA-U&xa&9D0J#hN!nX7G~%*FfgrC?d2=NI<8gyy)nhK zA(`9O`{OmUkWwcLCqYSeTJmRpussdox(_gCFo$nad$C4@O?=l)11Sf?JJucn;WKaU z&t-l!=8}8bdzBT2)-6-$Up1u#>=YBjpyT2@+ zQHTL2;`{!cuBc~KV8rYyBdi@PO`ttHwMLgmI>MY~z816=*O?Rvq#;EMEj61~e=SpL z#!%eqq}0J>Tz!fv=Fh3R^-s(UB>yYFKefnHNMGQ;Tn+cGtWW?Q&DPrBYGVBWaF{)D zX&mcMd~h8GtoE@?S$Yrp3?BjnHNgPdbGDEUogW1#H2x_?bq+s`Bor%=;-o-y@$8PX zJzs$oGH5PnfRN01V-H7{u;G^-fVNh7i<)balka6YUSh+WqwPBp;p2ECjPrn*!}pnP zZ_+E0{r38!4_I9~b?)3TR<%~v)YmLGxv4A?bsmkRL&@ghz0szTO7`f34y!vBfwUB@ z1B4=`nnowngq|@fsCU%LtKpAdBQF`%uVn^rB7U5q?qHyRI4T~t`iCSc`x+}v?|bXU zhN{??^_5Ws;pG(jh?~4xq+Qn64oOT!R4haI*IqT85||F{_tJ2>dN^HQf2(PG^p3CR zR$Zm}wM6uyU+ZiCGZ(SWIu7yBlZw)9Evda8Mgz6m|1~^QA&D)+c(j87COV=Zre#jm z1=3$v7xpMs_UU!aI7V*r*VKuc#_W}sYdev}D*`>S*BZ385+{GKtMjICekw6djIl0p zeUz~M$#l~mFS}OR)<~G{7{1<(^tl}i8RVg4cz0h+h#Rg4W?7X!UFt-9SiFucLi)pQ z`tlM)kI(zmdk2Ih3a{iO)7`tfW~=w=TK`AAvNZIfZR)tOn`h>$g$mC}oJ_`#!?)Qc zW?geP z*jfDVPN}Ct(ASS@F)nY6QdsJ*l7c)hrdy|JGiAI?a64$v3kIXwFao^ zT$LPG*;0Oty?&yt)WGrb6TTKh}s^k*eE0GVe`iV+fJwwZ|%tz`fVcvD^ftNXbO&&4v?D- zPmn1A_8O+6X9Y&9m=)&Wr76(Ql6^W8Ps}4y10s0TA zLd*w~a|LM!6Q+ZRYCtR67R?L@pEkZGayCtGVnDNM&E82uE}RQF`d=T*dwcMc00H-I zFAUjyoW;5V!s@AT_~v%jCe}l$hpxS7IH)xaz3oGi6!$J$@OL{p8yfYMU6(UnipL+y z)}*K?xa9TwP(0H#`6YCK=0n*4KR^?#T~g9y=756-8CW7hmF$r~0hp}{;~ILfI#FG6 zyhM#z^OUo-eZj(^t13v+uYRpOr)=Op*HXd#{?#fUI>sKfQ7Z*!7!@a#MosOV`UE4 zio1C$Zt&D@%IO~py2LQxb%T|kna;L_NXV1d3|0lWK%}W{WtmS*XD*cCKZk{f(0=dP z(O!&+qjO$6br3PYn}?LI|IMBMa_Rq{`~a=HkV64LuzgOHz?wWY&9@q6hi=&^IyPFehZFq}ghK}$z_WEU^oQgi$qH%DRR-jV$6(Xq_#x|QjgsFG zZTB*!=5m=!$?9D4Dj`*fAJU@d$8aK{4r}^^wbJnu)KO)HaOEj+a00P82E&~$AuT8h z0OsVaaZk|U5TYY9onzDln3(defY}3ipW*7IGZdmDH>KyqRM?fDGmi}~@brO)N)&vr z4t&(Fhg%yxwSb?m^AQfA+F!&(VbjFRMN@gGtQq)S3PAmZ#t6r>iASdrvC@uTZvRzB zt}!dI_p1+D(M8Nl|2T!lpRfHPIqNG*y8plg*u1CUXRyLT;*i@P5+ykCT-xc^?B)=- z5!OT)-x1)Rd_Ul61au@f0Fj0vY>79%Y92Vnd*FklXA9S@YX5;@w^Pvl-wSPC_$`O- zKJ{~lc_FyeMy~e}%UZ*$N?vKM9nP)0L{yd{1R(peYfd?%23&=?-?7OzwvSz zlg-Q`|G)!&(gsx`6DUdcjE#6Mb@PUR0EE|dUzTv_*fEQ2-O~+z2CZ<0lLifuIu^6UnVS_`K>x{!XMy%OzIV$mKW@u zfsO!YJV>&fG;g?Zr(F9fkdN91FM}_xl@I^N!IT15x6^GfKxfd2=fmY>@s`V_8BOD2 zkEN~u{Y^Ul{;{#Rf-TtlX|+*xWhU^93vBZf{H!nc-~Gw+%8(?%njmQRLu$@Hm}`TN z9p*XJn;?rng5lOCw7%Re?L$n}P1P#>n<=U`LR_U0-V3ls$L=qJPq}g7rGAnyH^f2G z6JK6_H#Wdginpj|1zH$LlHLHMhk#{5Gt*Znzhe>4>ib;@o{hHTP zb50|~yz9>i<+iv&Aa!FN^tV4JNf{f!Twcdg@1p*#!kvF^2rz>oxSl>>%rWL+L%2HK zk~m>R{iX0}Q2B?X5hgG=_|_Becsxl)tA1~S#1uaL#gvVZRs!v75)1?A{CgvO7sH* zHy~RsgNk0OW3p7`&nhO{+c9s{G-_Jt*6-8gq(a}aiUrb;@cfaOSed2F#sE~m%kdER1 zxy=1+k(11kc^Mr91JX2h&3*VM;Xq`2Kjfe4$#j1v|E=hKn?CfXl)-(_r2tI;`gj1X z+EWc4D;%tWdL2m%wdcZ~wGn%lh8Jx@G(Yz?;R<&`tD=sc{z`j^mNWLLlpEQ>JQ1sksf*6W@`$Yn^Z> z>TxG(Rfu7gaa4F#>5}hQibjG;7(lLHNu5^}nNlw8HGMgpj zO4GZ_j2=y9SK z*Opje?J|JSc7YY#V$7wcf~MEwPNFuAf`d*DGk4vmAF5nE6iWQBqQsc)->MjSD`50_ zmt#e=I7I8PD|D)!eHte#iDczUP*=g8r^pf1XxA73^$6Hxl?zD={xc$uMIM}!Jpd}`Ky`bz7J9WI2S9`$0g(JRmQYX} z2yGJIg09$J0TFsle-ZCuzXsx7<6mQ8X!k%JprzLTDNR_;VYDp%c2xh$?+YTdFV#7o)50%!MW;2exwyxFOXU28<9&1v86eJ)dr&?mlN-k2nA12 zU*TJ$`z=V+X<9uagaA*wajyltT$_Uiooo;e#D`K{si5`EK>i`w+x2t&(ti5cOW5AL z(Ke!sbJe^xcO~e_wd4glFIa1N5+IrQ3_Q5H;AB%g}p#@j5Np`?KO+)Oxk@ z+==rG&;7>ZGevGlS@*z1X{H$|s{V zqPQP?=yG_sD%6Q>()$f3(z{}vo7B;2WvJAMZ_n*?1ZeJuH(KyjCx4NkU&y`Fgw_~b z@{Qx%eIi=4!?n7Ja$bSYX7s|Y2Ho=gV5!AhS-zcaX&&-W=(x!rn%dp2@|0msmBnZt z#1qlKNivTH^@3-kmjN;6oB($0!ipYN>-EXd!=9dXmK1+G9kUcUkup1noMbY0uwac* zn62R1Gi{tWtY1Tg2Z&hCqQS@YhQ!~n!;f@f;FIwV_=R^$(qnw>7Kcb66sJ9ev&Uxi z)>=_xshsJ;o4G61PKO!vlym%&G+Dk0Jf^|mDUDPT-}CYi9=`FDej&(){R?UP2|tlq6{qKWS@hkA zlOxh-%9BUCOuugfVo8PB7{2vU@I#~eCFAp4>)>=QI`2WY3F7C%oTxRa|I5iD{1|pR z&mzE-P}fX_p6lwbQy@>sCsgjOu|%2D5BuDy%U=*N$?m z$2A_i+U^9v*gCM+j57U-I==Bpw=C!RIZg`C?!}L6m%c6+g6q`RMOkRw6;q-O&T;tc z%&#At_ZSmn&2IY3iUg~+44ZL%fa#5{&eAqaRm;97?O(zc%GT|aq9yd+;=S7IPtI_0 zo{+*Fz%T@${A*trAmP;%sC&Aqf2dvlQOMpRmlr&lMj4pt(e5f$@l{zc8*2(Y`!`pI z+x0$}!ft$H{^;A-u`%5-b;CmK^{c37d4gOLcQr>9l^kO4^cnp(_TD?HsjpiX#)_hd zAV>$Lh;&7Ii->@LfYi_ah92o1q(dknq4$IuAmn$qZ@J^V z=NsP`=ZtRQi{H?B4l}Q}m0@PrRGm4|QIf0Bbiu0CEH$3h!-O7WRgN?YIB_W= zs7QfZi=k@6#m{QXTHdHi23bgi9GPo~n6=oPeIq=Hu){_HFh9DFaBYI>{Kc+%I~>Cf z`$Y3wKj&DHSEf7#u_2FzaO!)npIp7{diyjH;kX@|aaoyNn~;KEoNl~14j1qXWN-3< zDSWk!wf|bW2ry;TgJY6x7T;WOr(=VZ)ypH>#4+4>x=m(RJE^3Q%1d@bd*ffdQ$nyV zXSnKkP#1N-J$M_df-LO+>3(2(Y+9sf^C;Q6T}u$!8xIp*9$>GrYCPr^jGYHp{xWXH z^BXj%pH^6=lr6gGwAHD`n&_8B!}R3-+to_(8D$zBkc;FyM4xC+y)YTN9H->jf*p*r z&edoaF<81jpIB#+%1bZ1`E%5KFTv1UQtiM&{As143?31>Dm4(INdNptph~7=Osh5g zEn%&s@#V0HV==%JdM0u_H|4p*eyTNB72Eyh0$nZ3kLH#IeY)dI?!$=`6UFI5Zn{F7 z`;2Y|i9*IkmX=fXe#uTvb$9dD`&<>$Q71Cw$HW-`ND+oT*T@eA%eVC>2~0`)Az4_J zyV#H4NW2X3DJyHoo%1hm;hk5sXIhOM&!#k|U#uHc zW||>C8b1?fPMrNNmZLaC#TVc^8Ow>?mP^#%yoIFZ)V_SR)^E>-8*3OPSyWWu{9|a< zv3d0{W@`@mV)?7;?fAu$E!$(WfLEw0kI5RL z@>U{){F}LV2y?OyNt3XB5J>Pw;~^pFO3`?XUf5pU&;ht?+DZlL=GaX|1DkTW47tw| zod%&=bhCE7VDoe5dw9vP*NdzkN1>=Y*ay$zG^ZZVR_WOe&s*y9ND(S-XvL@2AZ2u< zV;6PZbPCso)&>s{zvvW2A&`#|^>vz+vBO6thD3rSl zFK@oXg(t*;(Am4E*%UFd2`PiH+uSG(W7i(quZucP>(%xU4X%&@>#M<_TvS}%Dz*ti zKO^t0%;atiLr)~v5NuLs2zt%_O>r$_Wo4vK+J&J%Uw(c3w9#_Mh4!{ReaO?FCeNu@ zZ&2DZb}WLF5fi^adnkQm0LxF3T~zB(OJ2X^s)8=PNnEn77ISRWXoh&;;N0_BUIv1! zb9LxltsG-tU0vccm|Fj!dwK2BW_jZfoSwK1(8;1JjK??jiM9}Bp{j<`fQARp;)4za z>C>!CqfX2vW2`u;#x3(ZL;)R_Fc5o?A~|5Jk(SIwq$BglQ-Amnv*w?wdN?5YSmUFPV5Noy8Kx$=Mdo83*D(Ts0d!x=gu2Y)m-0M;U`$-we9rSH;F1wUne^ zG19BY=PV`=@Z}S)iFdR_*A1rgg?ZEWV4) z25COMay${ymF`>ODtgn=-ZCTo=Eo;pwv`>d{(wT{Fq$z)f@Ee7jxt=W!jc1;Q9 z&{MNvb_}-RmryF1H{79jIV`FCU<525=bG3B3NRU(v_90%_rOPj0OiJU2RowqA)bzW z2cu=Bdb2opT6OW)JKHzsU;d;ohrh*7thEJM-mo4&QK1Z*hXw=0YRcQV4;Nh-;M(u* z{O&2JsrvLXEa~oCmKK}mWAKp=gYKgQWvLV39Tq~Xd9lQ>t?OH4Fbh_sPp9>QC#PE0XV zb;y`G`dMN3^Wk(2zJB`7*>wK70tZQ6ntl&0#R861b;44(CfqxG*f`g4sBM1DCQ&|J z$J#LS`^ClH{t+6Pz%?2LQ+`Py|ve*cfu)odZsVytsc$y$nv>Fy`-4{`Lfm5T^8DH z8twZ@40fXu%mmESFnlW{!(>DBnGkRN-%7B zO}0mqlDJ9_0#e-aSMOU~+JLogG3oR2zASaMVTF};y}Km0;2boL*kL_QM{L|dA9JNx z@1D3Jx+;7NDYdH^#}HRbKRahnn}pv+&qtkfAJp?qS&z1lL$w8ldomR~w4{XfBumzH zssvp>qg!fJ3=I2I(8u4(7Twv|mi8ouWOoh}wzM3Ts36XzX(L zvYUYwz`?7SdNgpd2>=jRO-dCLMLbUF+*f_Q7f1bg?sAQ3vaf{YME{i9K$rQniJ$YG z$ALoO;<$zQ&6V1suurL?#Dg!piQH#e7+HSAV7;r^y`$TG;BCHx0mPj5l{0}n!mjGx z&k_MVB+Lb1-db+67AjksR|hISNxlQyqQwgdzQ=hbn9_Q+Omp9qmg9_&{i~-*#^u$e z8JzIKFBqAQ#xbczwr5d}S_Son;JwBlvS0d*a@bskoTkPwe>mNmKydo(3Aj)lDMI2( zluv7N)X+eH_`p3BD)61svS`R>#i)MMnAA;{W9In`Am}}Zj`mm1_(A%NEl0J% zqxNE66|n&c8MPOzX{KlIDN*AQn;u1Qm9YxfQ0_8#`C9V+=I?-Fog%~_9`(#P$!^4t zhV{Ls@GT?bUm_OKL%Wt4@`bH8I_9SIVck+e&A}CLOm@5{s=H&!PuDGW2zTrkbYFlk z-cEb725a1t1?#aOZ}~HCN)eOV5m$@I{De?kM8k4aJ$PYq%9;5LCr<6<8!h9c9*N!s za+~+9YptG04&p(=iAhjjEhoAy1smqjjL;O zWRYx$bU6NjmVw~$9p5+z5uTX%+S~%uk7TG=*{s)OTby!8%#)bT4yx7^$AZtfd~yg^+yY0&8ok+it(P1$+DJPZ#6{`BUWRk$ zmq>|9Z!+K*Lnqx2cAl(U>V$o2bCxvrg2bxmeX$>>3fnVGHdvtdfL3W@I527cnAs2o7G1_*- z*fnN`rdzEs7T8-McRC2AQ8$fO)N#MZe_3Bf5nWrY9GbYx#2U_tqJrUO`F;7HH6i5> zeYCz2jDCn!#WhLfajnxP*j3l7nLpE1y<%?Qs^jF0L1QYi>>6XQ>O#NT+}kVYLm4uu z-`BPhc8o(!*4z11m4eI>4lHnhhlu2%PkNs}<6OH{2s_Pj6P-N8^H1*Hj$mQ;e)Np~ zbYoob#0c)uK;kA)*azsQB9Qb@6t&(^Op<@wZG*lC0t)2PH~D1Up7aNhry6XJBN(;l zuU`N#jZ||~3TGgfD6>IyM6TR9BrW_AVX@FUg&~ zT>{}jE1qgPIVYd6dbi0;N(un zOcFR~Bk9c51c`MKMJs#yg!s^-o`NrcXyldm5+&O+we8v46Q!Kr;^o@>>DqwIRqgmk z#7kM_`y`q*1l<~V7^6;6UD9?+T<)w6yU$Uw_=@9fx5}@6VVQaH%@~4fE8_fqu}rgk zWb&s5e4rRif>wBAScIS%$Qq(2SrKHq^FFTS`U2HgYTllpTcmSVB%mtsJf)Jb{qZ~0 zN`HG3=^{F{{m=#Z^dq~!L_7HP#w2GU@=9>N6;5br!nYYY#>-g4DEIv!E-C?^77J6_ z^oV8(555qQ6OZSYhk)Z#mOU0TCY^QaGz+WZEUY_I+A;{^&){rjr{Bi$ROL@3rxf;o z!)z=?Bl8)Gh7`lQB#R>NB)l|?{AoA{b=m?sqa%TvW;SU(%NIjOW8UVVv=|*)vGVmi z(NtKZOmSJX(TT@B>}2?Y-`R$q4)l&KF#Z=@K$0>0ry=DmqU(zj`apCId(715-Xpcx z6zD7|xZ92{htx&?8r`g_b2;7IA5Ht&ZCQ#nPVi5S{l3Y)%}>mAfzGZYg3T42GG$@x z^BGZOyA#HB#0|bi>2$$i(dEj36FbwdKi+xj-+d-sDEUf;>VBOp)pI&!k{uAA#0<=L zwCKpR1K3{%_U41tD=I%MigcEpXVkilL6MTcz@q1N~0@c z4_4{VywP`JLcUATYvL4c#$MlNmDI6l`P~`e%3riKp1AX5NQIA`{=EyB#$DqUYUb8> zkia?c6;)=DLR!{-*^s2_AM;#Ru}-T2q7^mEHf|%OQUtd?H;E7Yp$0)G@8XelPQ@^v z#;SGO-)MVr3~y^A4`Dfq3?fE;AX2xu%Iyp17?~tuuzd{Mri2hvE@~6IGp0;RJ~tM* zsOwIDAe_fWDYr=h?c0EI9bu-SB*yktFh=$I`Oy1cCVzEec^{l@d-v@e)7KLTUywDa zgRVm_4C!(Vi*A+m5`FO0N&QT~{Of^t_gGYx)N?{qrE#et#oU0b-CF{vDf+xKh?*iX zt%=j|Xps+|+}Zk;s3PKy`tplKpW@~IT5dK+>*jg9TlFQIf_djfBVLimRv<4my%;T- z3=wlEesJZ;B$?@Luh0vp=lS-PiyVpxdVXCkY3(MVl5u)-hlzz>*YnrW<>Kv#FmR(h zQT!KOWPehS5aE~K+{kC4)3Q<4iyS-!aaE=ZQ3Td`*VKy!7Qce7tYa&-JD}SvB-_Wt z^%Z&R=z=E>qq$+0vvl`5KVvMAJ!SXk(xIVp zawevq*3tInCH}rc&8qzLjt0kuNP%B*^u&!V_%nbGCK9n}-os79!sa`zBXrN$=&p3C z;j?^!?d-Iny-AR&Wg{SANig-V{IM!Gu9+{ntP48lZsI!bUAty^nnK}&Tm24__02l) zA=MN{)9d6+rc8-y9wpJ-_F}IJ@l0fGNn0qBNa*V7P%0v8lFCoi;Fd&zw-grNW>6@F zf$%b9XxHF~ZE0t@*Y_~5Th7E0`vMT=WcEnwWpwoQp%{qbqfeND+U_A?p(CUgKOqog2XTcjJe!&*}mIYTLCC=oeV}tmcnb_*mi%{Fj3m}sN%j=46FI>2WO4gFvA%(E_#M!)BiK?#E<0*akSwHZ^X8R0#+&{s1bI z5}5|9S)8RxS;BCeQlSRUp7ZQIY5dBM7qYyd#bVoD4oJ1jc&L60ZZWK$Kg>;+iC6Ee zg$6mDxk<0!FRE45F}+RKqDd0_K@J zUth*ob(h}ootEZ_WF4c9Nx#OoLg$ylTrIFgfU?7g42$tX_?UO9|D z+sF!Jlz{_V0SVx>usH(7OC2JE;l{vSlbsYa&}Pk4ucN9pLZ6OVWJ-8x{37&B^UoGR z>RTM)Bw-2-HKh=jbQ-!qc{bkh8k$E6k1Hw`?q3@#Urg8=!t9lm9wOJI;)Zqp zy!h1AochNsOG}dO@p>!f%Ola^TjkZ28u5hh3pJl^6{crjlwG-uRJ{?m#4=qd>Puq~ zr{$l$kx}7YHwnimPK$XYJ@!eSnYJDWV{?)Hbah6Tt2vUqW8X$muy8+IX@s4 z_ib%s%z3jqKLii8fAvX5lTEE|CaCfl1fdOTF!JP`XaWggEwrEJNo1YpCIPiPc z!K2cs`81lfDY8$bw@L5zqjVCo2HH07ytlT@WT&eIp@F?x$c)dJ{kr*DhbF$vSwEkd z`(yT@zPpPsK=9xQ;2Muq*Sgt?2}65zeVQRSCs&aL|FDrNkQRC3^bJ|SVg5=63*3wN zh*q@~>|askX4$vPV*Xe=BJ`n0$mia|x;*`g=Qjsd1N-X1-a{B&88rz>7bWBNOXR2^ zJ@^GuFx|#mB6l0;VvtyN6&vR%c1m*7vW+jM6f)BxcqzNWu$5Wl+mTDKsn^t@li7H|)ZvLEGHboP;M; z1x;;ci}Yo9zS_mVd24i?v6PSF+{Y6QxMDTQ_=Le%`xmNBXDg$?tYN8rI&*EoN>8HD z$2eB~S->0NRSA)+tIr9tB13H54fB^p{p4VxT!ZdaQO^6yP^FGXB@Z~Gx|g%>ZECss zd+b^(@62k+j4DaXEJYvSh`Dd@q=rmVmfm1y0OQCjMY*f@D_g4~xb_{KL?_w&K2u-n ztJwOBD$!wqtBrs?C@LKX+4pJBL&8PRVtrpVe9D5kJj!W1KGJ4RO5HH)C8n!Pn1%K- z3q$}skVryG%U@K~uad0N@dItO>PeiDnDdEoB7K1{=0rxBq=g~_iLDD?V1^-Q8qEoR zQGLLkBRiB(S}5Yfn<76bqeLmP|5%c84#qmcD?-7#qSwPzONosY0l0G4BYKj53)u zC@j)h>hgYmsnHWsA9v`-FLCKY{4v-C`nEYhP4@?9iTQ8+pl(TgTtcdA?`bB~Xyas~ zUnG|^;=m1LvOh{Flha20a z#-U8f82*>fm^ikCnjTt~JY@Yfd-@r@Fh!;Z)U1H=qSdFPb0C5q-4P?)>n|!>X37xL zoWqUpADiv(9o&olna@Pcl0D=eZw_PtnP1*Ua8C zZ_H3eodST@KjErx|9`-{miV+~>`(y({#A(M`_Rk+^WaGNgw#aCM58ztt?TTIx2rpO z^9tTq#66F`h?x!D$%6WGch_c+)b_DQz)7I<<6l&5`T!wXd$pW2_J|%tD%cOKP)PFh zIAg&{x2XH)CnYHq`UWT^5UAJP`-=*n2W%%-fU+wCK@J3(2aPFp*7R<=sva5PEI7_t z0F{MN>yX`T;7kaVwKAF9XsPXdMvJB>XONArWe2?N-9T(*bmu|f2aASFavay7K$YZ& z0HB)oAN}M+o^NH_(BmQRwTZb5{+{#70sN#r+w5T^GQedol7Ybl}u88mJ^G zKoCNGqbY%)9T5cC*s)NxR{YCc?kCkdk4nix;(r66LVxSMuYZDA90sbmJAfG*B5<@s z0rC;RyZ~I8R8VQoIX=s*LB$gX`hz??as|MKV8H7xIebJc5B%dtga7XHkXQa8#Mg=g zlpub{z9Z$*$D!q(Z5OE2<_#f)k|O>Lhz~)wQU+Zj1-~?rP6g0NO1yvp*8l^5T3N3D z0}#xYmR-x;De0il2l`L~(K|c{qEQE-s#b#>fLM48G%g6tR*36$YqJ1(QjA(UsKYlv zxSQjMET}V(I#NIKw$Cd z^`?GV4+%&?f%=i30IWoc@+h~Y8+3I-gRQ+xC-{uISpedf6W}nPfJF#`oU9^9$vv1> zrEFcAZHiL%F<8dBK_EngPnBI$n#DE*Lb(I@Uh|C{#DGQg-x^nsg{y4x_yszeowgs3 ze?$D+?~=tYIhCHhsd=;yywc&Fy6f5KoNfeX)&a9ttN4rRXjSHP4^7tLCPV-u*d22H zTZaRf$4c3H+=xc3;ZvlEGWC!qUys`W$PYvc*CVM^WE`#knHpesdgIMZt|5WsNd%Hx z)j|r>HrF7=JXV+-)#R%7J9TtmC?`az5=O-I9DR1gx)KH^`#G)`*rw;X85siw}o ziYBWWIk(+Ah*3-q--rC`1vSXGFnZ+u8USh=2N1y&9uR=}^yMLSf&KC)%b1ShJ{$tJ zC?leoIR^l6|J_94(Xc<)RuZQuv!Aw*+g!n#t~e$6nV#&pSarESmQe%1lplfyPIf3z z3R7yqoh}azZp%bq^|*(C3jRHMYrtZcPBRV{$>U^UdBjD45wT)G^p}16>z+>+L3O%P z49|Z=^JTl%1|_Uv3GbaiyJO3gW55?albAJmxnKjQdl_UkNYzKc6QT$I*4s6}5tT|1 z+u6Y7?*dn+)&Pb-4mun{5SfBz&j2gxm4S85aRg_!&B5;QX!Upu6#lWfA)=Z6$NygZ zB=g%q%bb@3l$yrM4GSU_)re$ttk)Alw;X?=M&WeJWgk z8zBs(emt7IH$sq(<9e95oMECzxsw4{N;d(Rn+s?gSOUD`-`1#S?d@7nUIV8GfN{8H&l$h)6O~gYAc2kWfezT2f6ReF5E-+6B|ntg{im(G=0LC; zJ$I4DKr41&$myuR84Xx+TELbA7JnaZG>joI8z8)_=-w}%)48>~{Lcv*AeyPO5r1a2 z3Cya}`j4eWy%NU%<=?>CVp?5oBG4 zB#>xiu*bB{P59vScQXS%_+|FukVVnoU5oLw{v77p{(e5tRAY^wB)iv)&HX`5%kyCKf&9X;Z*#BPzYUp-~oiBOTM4w8@-|IFo~$FI0@M1?Vnom3-VX)J7M;oc5oa}u~Kk4 zpnL9_TNPZCe8UA6>1=Z^0b?r|RVPhEv2<_>%`k3Jr;!kV0FdkD9v_)tI1_G$P5Z>5 ztTqs&Raxn@ncd`TcmHt?{>83o+C!dZAp5!>)U-S9}d05AK~BYe6ukPGbi@ zt?~_m9X{P1wYy+i!oqc#lh%7g-<$f2K$G};{S_Z&me5~8Y!OfDzlw>8@D7PGV9c^u zG`aZ0ZpUcQw7PIcc~Wi=G~O#x=Ss)SMP1STEs{oVM-9VXR77Q*^p*;RxO&7{sWDo; zT~@wLQX*uu)eiYaw<%x!Efr`?6B7{lW6IJJ{^mk&ta`8a8|j5q+wk9IBqzeW4)KBS zxGqpL48=3g)R~0pqCTAh@cO>)c78d%c=Gfoc`!_k`MH}p1Yk2Z7YN&e+h&r$kAIGD zo14^(!aUb~U2~x!Six8vFGEdCjeD;Tm#z+52hiA>hj<}30C(=olCf~HUwB0u!Z2>U^)?j|8zLEf{WM8gWn{yq&auqh zn^iv?&{=vgTfRtAAbbMCLloEZzV6y-46@&c@4)U1W|w}t%w`%p6KNJ~8}s$|?K7r{ z9I?>BmCTK^;diqEDEG7f(BIQo*+unm<7=O2%6@hNTHDlllE-(?^<k($b2&|<80tk@MMBkTjC*fYK!al}rlsGr6{ zVwNd)NP2{pcAxbE5~|r>y43%hIL7srP4bJv-^_L*4>vCJ)QQSG9rlE>csB41U13+S zyG+|q0cE`ZZ6z=4Y~I0Hq`sDK^6%D3dj%4+OXw`Zn;CVG8;tZ6K=V+#LqH z)h;<%_o2|eB!!{(UD}ioOM?h z>DI%87)!DLLVvpf;)3@f!B>%Y})B8;F8! z*!>_f8S3DZn`@&@tEC^$&*V^BHa=d{+(AI&vY)Mb50dm$O)KQg1MgoLfWJj8a>>eoXh?Qxv2h zh3fEv<*G22mTpDn(QtdUd0F!ka)Ze>&ubwO01dfI7ypgosw^new6M^-@6B38-`~7A z)BaFk47F02{(xStpghIhLZJ@%^;<_N>h>#c$S{9l#po&p%Swj-oRD*J(N2W>6{G^s(oFmW4mj-mUA~>K|9cA zf9qPX^t=F*@8*x$G@t^jDC!YVhIP%hl|n=CiGmsQw6k%g6*sYpa`*=qfrTOikY9=` zP9oXfH#brdWZq^ba$odCJpDZe_d`z}#8|N2ANGqfs#5AIsSTfbQ5XQS?LP{zn%bIZ zS*r-lty-|0*+3p{nfxaO^!!B`4O_=@sdaBSDGj*{j90JOzCUW%}hcy zaa~KYBxr;<5M2t&TNwReb&U2Ju4z3NUYyNSV%!Zsiar41%k%55+`i~}Y}<^ql0Yk$ zfKPaj?gef0sZQL2J&lFSBlWP$*h*8>?DCesAkO0}?x)mi&*yRZ**VAuQNxR~T~^SX ziBdDk`?46*?qQhW*!|%Itv(N!_r~5>&rd@u1|!FXXDf4s!dKtXJ=1vYGy;wc5J+*t zUZgOr99NI;Aul>gCx4FXv&i$D@=W0C_bEKzUFFw3x72XcG&5b`ou?UZjF!CY)uN(% ze^kcTgtUa6Cz40a6U=I)W~yv8=(J+pav0+_iu_6v2o5-|&?50LNmrm#^LW=K{t-5> zdiQqVA)#(`!D8k$v~;{)4!6zZgv=skXqSzN@Y(rPqEVlgF20mAuuU-2H0+d}8|z~- zzMrrxW#HkWZ9i4J_p;^kqwAiy^O=X5!XP8tX zRK^iq!iz`FRA;>BsARMB2nTPoS9$>H&Zp(&Lk2RzuS7TJ!$)7lDSUoZa9!9%j-{z< zzMlyTuEL2`MZ9&q%;e)~S6y-pDrz=ML zOuzTucKC66`2P2FyzTqrYzynZs6bL!sUts_M|ZkLNaBNackrQ{+^ZT`UteWif{2KS zWSy+%rUZsvLEDqQ<@}{mHMVQvVc&$_u3Qw3Bx*FHL(-)ibhO(jOoRI=)gwaF)|0A= z-!;@`*G%*F`jrYBGwW+j!z-3I1-tTn06$Gnvs4oLeBi~C8X)C$$a-uNljCR(m)HI1 z;F>!zCDsWmeqm}8707h`g8m*;+BaW~<{(C9)n<%XT2{m=d>y%QVpR+Uibf=DeXBZJmDqYHcRwv3Pdmg=bN( znfvT81@|m~iXtY9HtX{Q@Fy6)l9>KJ7aH@hEz|1Jm_mkV^!eN`MMg2xFKTOr=06s* z3bwq55vtffG=caKC~OuMnKJ;>9aW+g92M9oZ2 z%K+B%=PpR!G8Adc(#FqOF*RH|;H*qfNJkc#`9_!k4E!@-9pGFiydGhiQ(LtRs!15B zho_RWJq&>778g6wI~4;8uP79K(e_2I>W^ECw#dR_g=t#^s9Bk6)cCCn+?@4hrmPwi zXn+40i3>kO4I`|V)ZgPyuQX$xveAW9oLC9#RL(45%$h%|a2<$E**;`+u@`wQ#~0UC zkbaAR_)u>ysu5nHqhNN^z;CSOSw;AI=jZw5=suH)>1$x>@Rhvp;Jn;TqYRmV^~3ky zby_B6ORUF+x8y)Ke6RePm4f;J#QmOWe#Pe^Jn|=+d~FzP5lPFun==&-Ed3Y#X$A&^ zZo=;qMu)JzKFw^`65K=G`|N&LSajx&NV?@y6H9i&hh%#n_h;MjhUnAXD11H|Xea5nK`#oGd$MH}%#~eR4CYhSQ)@VONI_MW} zL%)KA1WA&Rgjn$_q-(#N&6C#-Q0g}nO#nXHD1H;zuA)c)@1a{A~AJXFR>>GV?j_%9G*LDX)te5ZzP6(kkrvIFZ)^X zPCKQCJ>X+Y6I;nJukg)F8XTV$z=a+Eip-*ZD7Xjagd3YW_?;uws}7B?TH5^Xp5m-&szrRPJxt zud+!;<2I#f*&mix4SSv#3tz}d6+BNb$RGA(o~Gb3-`tt5A^;{pQf&^p9uk1Kyi)5O z!8DTYxteRP=2+Gf>FUOiXWzjpCpD;VRy)cT>+Zi@LTTt@Vsq|ObE-=5_XY~b%Kzil zWB*^LS%sbsThjqZ@;b^l^bruP!4&l6wnM5c`B4YTeMd))!>P}240m%SjPJ9pTLeLt z40DCKhSmHKWRZ;HP{aa)GX50tF#w3jFibB~QUF=RTIFKHQW}wcdEZrQ;NEaWf$|-F z1Vr>J$BI$W^&}I>PuNVl;>MmVWs#0n-zAi9KrLT*0D+SQq=CfA5kP@47zp}ZiKBc2 zqO}iz0p$mp6jiCPluzkt+4+7ab#;(|jQx%S(4!#;=Z+Ww`o4oWF$U$0Qy`rjd;wle zi~gY2flY`4g})J;fTCDz^=|-WuJ>0hAOwJoV>pAN24vp_6_uW>42p?zuB>WXyNb7R z3-%(O_bVJ>PlJKDMn=)#U+Xj+#rSx{Vl1P71ZF*^6Hg-YtD5XZ5721A7asjl>X@D37mJCEtG3&X} z?I~F%C*05IjFcc%v6*Go-3QTR z?~2A@*%#d#7kybfnKH_fcWzFlfNU!NMwc4>69bTu?}O&ma+<26MGuqSo2=hlNJ|mq z>(2Q1O-$CC>H|)6;g?8WInJ0*HS<_s@)z_hNq$w#KX;Bl zjOw$X=0`U_5AOjL1IUfE6IMWsURC6TWjcd2{W}S) zMP_cgRmV%C6I$dU$dDrWzgwWxICi#fs_Fgvp%i-R4~XIZ=LdhcQ!rUN;z2jSKi$20TErZT?*#jry8HSmz2XNIq>zN`V zWq=$`D3FfIh$g#%NXa3P>+2kdZ2!Ng&LW6X;Vqp#3F$Fb%D~fz3wY-LLVra9T3-M5 z2foDUusJRxu&?ooV%~r?qVDm}9Y;>-fKxwC4>TdgH-mxz?Jy$9YA+G5oFThYe^GHl z2nD&Ju^i6_r@7+0=QBqusXq&AfA$~?QM<@kUlIhrAeG&%zrSkQlwX9 zi9i5k;08?|3f%>LpY;^!2n7qzq55iORZ)B|I2_KFRaTy@bhJPel6ABX91(TL$<656 z^r5*HmnlQ4d8(bD%=BX(SC4PV$DyZB5nIuV)|{ug0JP#CbkkTp`t{q4S3Xg3|H>fi zJYGTlOf}m`b|oWJDf>RgyW62sr{;m~{j3ht(Ra_kQi&na9G|%?bb0H78~6u)Uv90C zoee^+{G0BS`B+h!IK>=T97Hh`{1uG14QYZT{Y6zSurW@En2+9Y!u24SSG+$%+2);T zz3>X`3XjKqETA%1b6oA;IZ1R~|A=zweVltPckKe)?Su-^l|=#=Pq>f08**-rSf~c1 z(uFHuhKmuDcAU~BPje|ej?A&QVnp}#!fsX>N^s*ZOFn4}B!(X?&tpHJU($sl%B)d1 z#0Sjbdw?RNPpT+IchqF}Ndr-`PptU-P>h>nz5)x*M=wTwf|>e_jz8q1Cnuc=Y5wHt zI_5{(oN%AsBM4PVQUHQP1hQD^jTkxiT6Lu8S&rn)(`sY z_y0vt2nY=~>8{2vA`whs0!{2=0pD~%JWtc>tfKUtXk;89WO`4)Ul&{{AaMo1cHtF0 zfM&rQKuU|^v0-ObILFd*JFDcDWyV?Fjo4Q2W7>4YNz>b>)IaTk zzEFAjBA2z1!>uha@I(RrR1Y|GT!8b>@7{`C*B_qOCc8zfzj$8DvT=2=lzZbdl(>Fy zu7L!#B_cb@;hGt$s498S^;ekrDL%WtwVU)Qu@^V4x%J=uz?32L(6#i~gP?FQLgFWG zTPiaV`r0UQB@&XKosN6&PxZK$MxtT|wIy}C>0*wp1XITd63ulQ=_We<&@B(6lER4u)%3X{Vi;{!vqKWCS8Bv zCGm+2Gv>&C0t>n?8Xm3^97QotzYo6Mv%3{e zTE;tUP7)9}pEkKh`nBVrx;*EDVe=^+<%|mzi7|cNdELQ_H6OcFc($84I6gh$soy@h z1Ds}lbe-H?W2T6G0gl}?bG4Od@GY%Zu&f7|Iq%ci=PHqj5iI-U*CV22FdisC@|foi zPdRKiv5vX{UG-r_bLv(#pUaeECeuIlfpX4Uvb)r^r6>D1T#A8)7Qp7n-*I zX`#nACpOn9u6~s-Ic^+>?iUSPE zwth`}b@Qe;OXU&JP*-$ z)Lc~A9{+7_sMdKZw_MM-VIXG(+3R`+;EX!a$E-naR77)u-oTX)-CHtO$MUSqY<}HN z28n)Eg%|TCir{{}eaHmVCU3W+IZ2_djr^qB#P=^}B?uq=Xg6#g)>mrYlop{k)>P$T zdY$Dj%Xjg7~ly+}abXOx_4Iki z4$k%!T94Hn3#h?)|63?ejB%D~@}u7cOUyk-bz!gYLyJ`@RU7qN-b46QB`qTd7G>uI zPGu{x?3AU(I=+-36JXe)u?7Ye?AB>XM$?NK*H) zpB%X-k*lfFpS|6~g4x2L{3K(oECTh9p=xje!Z|#)byIwv3w5GQwjM2pYx=+(T`K~e zC-t8a@0#4Dm_D1zf=x$tcCMK<87O+-4#PoCpa?WmqW~EY-Hl37AKGhhnGeeKV3_W) zfzghR=v$a~yrE9}Y`Umb#Cug{6E4DUw8;XlJf^r8TzzgkpcmOd8x4p^ab)p0}9UllUM!vG%emcU> z-O&Bm0c^q>G#14N))1{67jo-y@SVcbwPT|}T>HrP2N%zXBsbYGxtJ6hhSS??x^yBVDP^fRCp&NsqDZ7xzaOht+>w4qw_Cv=6cCI>I@|MlG%B|9p+Mw7l5Fy1$(CeysRfZc~(%H+OsOV#VsrT(rN7xoJ9lh2G zfNWBq=s0uTBstBUphK>+)@u^QE~Ti76)a)oVtFjH-bLMY8l;CTPHW217#)d!xX&b@ z@_bFb$hj&$VMteKXK6{ZP}Fl_YdK*8Z~ zA5sDyh^wX3p@?+v$hXJ!By9!~ zG@20v6dwsd7hYCy3bN`i%^YA2@Otx;;XF3M(-PM$@|0O5m!}qq5uc}^^C>-zjDW(# z__G?qX@5=Ol$zk%rdUNa@A6+3-1p4Bx4(W+!6gs2rw&G72)jA5;2gOf>D1Vz$|JkX z*xFI4nvPHSoc*Vdg_=EunmG=fMRzv)I4!Nxqh!7xE+zQ5A#x_MR+xtP!+UBXi9?^Q zsl9*~7LJ>byi?s`!0Wri2UVm(ptA5foTr*_C&!I*3@H+_Hp(I7EcUK5+aO&T?8K^j z2^THZO12;ViwcT(m(CtKF9l%#s`sPIr>COyG{Yw}Y+VFi7kp%8=Z^@u)C2qU^t#Fb z?^SNff4{Tis!oz~X!-WbK+AAUFan$?Rs#O7YhuA9%Y6R#!Uv2?4A zcYiNFSjAKp$yAj|NZR4jRI`|#v$|)$e#nCcAzsmuo=|v$`i?c)(u5&NTEBCL_IjpH z&)DwrHUz3e?IYlI<4!1t&Leg=&caFbOVBwIwgqv`Gb*4pU80(JF{#+m-x6b;B#Mqt zHc}U*n1jAho@2UeF!kcPqiI3hEe2gTmAzo#bbYt+oCa*jboZglK~;nm?OnHvJSE;c zB!dQV2iAP2QFH(vEnFFklG>V0&afC%W7-iI3uXl0Ad0ujC^L{e@JJ>z54SAG+F-Jx z!(rmsj#ho3kx7MsTS1(V{^B-_#MeEZpR@te$y}?xyA(9gQOO+dJ>t8KzZAS_!x^z+P0#y{)^G?#-G$j#5-A@<+DAzy`r1g=h1#W zHraH%9*KMo3hK(iq~#M3Nql}bIu}@lfC%9ziTQylbSZsAwuu4qx*vT<;xhDJ$E=!C z;TEx?I)FMd-Pk(5nA>Y~2|nH9{hfe0wg~W2rn>+RbNeD|Um?`KTah5Z)YE;Si(rXb`Ci451^}7+lnwe`cD7Uv+7IRG{Ssm4T>7g5*-AVgiEQs zd2@W1qbrb@@&nlKO^V1YSyfqjSfoU>9hGC*-pmy4Lr?yp(_!PD-*}%r|G9TeZGAoU z<&oB*Tse~u{Kl2K5)I1^E+2DzGMAh;_T9)}ZCaCTUUiQszBEnsvEb3;ikU=Yf`WMU zsZ1tX0qr~xt;|;rbVTD@ZF-{k;P2R^M-d5JKlp{ z6^l2aAglweAOXYj%0Tn^=7TC6Y!f!(S4?wphGT%yZyL$Umw#q@E~Vvlu;yUy|6=bw zqni5LcTZFhrHE3b8>K@iN=Hgmqzed0Z_)_}NDmMa3r(tY=^{;Pr1#!IK)QsQ(0f7+ z5cti0&bzbLoHgf}nP+DHYt9SaWU)i`&Srn_?|t3Z=Q6!#L4UT&mj_-<6&`ZTvS{RZ zB4@6}P^8oOC&NOuN3%C+uEJ2;7`!>|`{$D9N2`PaCqOAp#FL-sjsgZ-a{anL5*hb_ z=*~{7E-fiP+Q$Zq2WL_A;dgv;esL}HF0=>}{WQTRCddc`2w;l?VZTnP&=*pj0Y6DX zT|J~`*Od2vJCs@}(Df*dg`|)F+>{_zYPzkAH#lcENmr6_>-rz`q!wizJ})RbG^o@< zXBmxJfG(svm!IR@I#Xl1*tNFxr@c+xcEMj=xe~D>P(%;g!IVx6iegR#6$@oO)tbT2Q1g`J+{oXt|U%%ZQQs zv`T2??W&m;5&s$Y4~hPtyfC$QUxld#8e4s~S>ju*yL>*qUT4^VFkUHaTX0`~T_W@@ zfeVR{LQ8EZ>lsaoP`+sv2lio?M`Plz_muqPH>68Jw%{Qa8D<&$E<2(CywxQL$^G3#Yx~sO zqc5|cp-V=8&5`u%EM5B`cY#R;8gR~#7!bAiBI_(s)s1pCSLkMnYq?a$Pl?6~`R<9q z5KGZ#ZSR+zA|JBV+`~}(3O4!HfGc3}W|v$PJB@2e(ge&NLgrDc-A4sa-{2B>b@lm+ zz6k5@)w;Nj0m}G?nDGHHW^pFsBn&SS)+Doxq#@col6#Ju9S&6A)Mv=6sXctzI3@Qe ziTvYa$}9yAMGU5yMwJis>+<#A0?ZqB(zvyXhfc3pe)+g|d$s8rwY zw(_?>NxC&KR7wJq*%5^-@AI68<_m^O{NAPcn(Y2q=oS|4oXk;%se2SYt#!K1%{>Sp z&&CY=-HOrcn?nqEnw>#>7M5W@x=90fV_7V3F}&&V1DFICMeDn2<>#TdigI_8WH{kB zVYBPk5|VJBPv@sVF^9hBQD7RC^FP0noU;|jdfl3Fa%Ukx zU-%A++2G{Zib+q$RkN0nnJ5|+?|PNIp>n<)n$FiiUm!E#5gx9lrVgS9#vPrf76L4f zJ;I4s|6zka|9b?7bjnzUYLR<*S|LhAu;qS9eVW_L(tyiVl?dEH=Zg~l{)vQi26>tK z9E9!Ogsz%-BYM&OTuBLmfQt}v@TM44Z!zQpV2)~1aa)#`>bf7YtUUGOrCMyFt(~!? zEwP7D*-*H>=2u|uG@bXd8=9`CwHg$oaF?inw>;5LaQ{NqjTYR@GwwXG;Yz~a3rSp1 zigfty@m%kaB8J@1+kxDGg5trbwuK>OULeZr)SxA)W;(?^nq4PolE1zob0l8NPFBk0 z?A_FmtP`*kM5Irr64J2iAK{Od&B-x)je%S&>tB#7?5@8wI0lpWe~0JMeMR7b5V+4o zOZ@nf?=JgyI42<)n-9$YU^i+_=7;NI*@PR#_ZN?%;EM}kmmVocWpk~ce-1MJ#1^9*1Uw3vm9Gxe z0P6bIP(Xk2jIq=EH8v{~X(>y}(B!Hp)$s(oL0HZ5<+f~oy@`wY{HEveR1^6!s<#xf zG218!803^c}g?!i@Zj&|am65hao^c)xBrb~i+t2YTxy0$Q3n zqi{Lm@I_2enT{`0G^SBJr#S<~l^Rf1v(@P&qjo&W;CR z_d9O!_<3cJ8`@VIx^%yiqB9k^2WdK6y#z<4V+X>Qb2c)`%b58$3#JlYM0x8q|@stBeyPvZyoTc!Nh!xS{n zT@xY%zowAz@%Y-lu_wLzp>pYSEh*hmNHJWzS@=-bs?Nl8Q0Bo8389#=R!PWP$n6>O)x(`ZMk+0kP@4oxqF-9Pp-!Z(3wmpJo_oB0=K3%Z@L8<6!>G zeR!X&B>6R;g1_o*+Yr&A$aT5td;*d()oKG<@$*9JY38s_LVaAkp{A_Gkj1&Y_7nG; zU~rdV$6CcnIQ?ab9mBw!URAtd^|%wdIOmumoH&&VnGNm9yb*ytiNHodl)vog>HNa2 zJnosMMGFQ@X5`k|Lf4+LaY~LiUKwY74Pfj;yQ*K_nA+M_D$;rvlOi%>wSfWto@q_}C?9^ll*ko-P}PU5OLL>RR#9y;DaTC*x(VoX;82ZO2V(_WON(xQ6~1? zkY-+kgDuHll)5?JzL4=4QmVnuk2Q~(%;xxSE}&P_^%a>d5-G3x{?vm}s{+@9+OB-d1N_0|=tFM2nmuL4+IU13L($j^%d+ua?&D zg@{?tOCXU!&p7_dviFK4(9Q<}0c4wLk!51Bddp5g%8G9E4EfKtq7q8}*WGHQH<|iQ znMo1Wq2F3piO41LDMOp3i^U}(GEWE9O#3J1_)ra!fB6}t8Qxl|c47np#MZoT|B(3N zfwB%{WF9jVsCNZf{cmFE21n)g(d4^PTao4z?U&MMg>THnA&nDQD{##Cg~6i3zm74S zO14=}&78gT4|LvH{O?kY*Um5g^_j0F*B&Tn7bhVDTN00X(FIqh8~oJ2dy5gDDq*#= z|9X!Hi+yJm_3}1X5bcW}EiD%>TLGp0QL5s^)u%8ldJ||Yx6zg~f*1`_FGr71DEPk; zyLB(imI0X>m~*2*mHoFQ%oe@M(#4kV5m0=v&Cs$BPOwIlx^gB1^D z-TM_1M+_=|?oCD(X@8plyuYg`BKi4LwNClR?8szHTJ3iR|0ZnrHY46YY}*xS8|3#krc-hpT5kHDN${FT60;<0B^tb-g$Q zlYw*;Ip={?8eZKJ0j%2H?{XXC?E^sVmS?gtXvM1)%S-V=OPlH6Ad)M3q9!gm^6C)~ z^w}8jeF&=!(#ScRT3}@a5@+y^QzI_-6n#j$a}5V?e7&`_v=j(2()RfOfOIL+pWV>BSxrCk z6R-XoDJHu%XA!+T%2RSAGN?srIt;QCLhW2hDCo?j{Zm!yv_5or%qk>_UO2Fnsa0!% zEgQ22c+A+;9XM%I5yc7aOT`x%dhIO?qvEWyll-2Z`G6VK^HpB0wMvM%18aRdZ>InF zNwGmgsEB9x6(yn%M5;jQw2y_{gnG_7YqGvbjV_RB$mClLxktt2p2T+l4b^i_X)aJO z#Bu*jjizJq)Qjd4T!;Fbx?C+1iljM?p*0~48sdgy0#Sd%y32|^tw^W;-1=gBu*XhS zSpF>$-ANkvbV4~tYsbY4LfYztqJS+go%SNxi2^v$2FF&$MeU~W;m61^#HbIC>iAds zysbgnXqBYDy+gf)S7-8|utdpug za)!7O?BZu2;u=C>)RG6y{$cF=xaal(=BAQx*NtlX5Z|!+Go4Q>o+BIGqN>Y@Ec|bt zN(&5hKK$HH`r!L?dFu9^sT5u=$1w43!d$*Nse`NiZL; zHTU;X5dTjFWoVC&!tW9!?&M2+gHvNBnrYTJ!rvXIs*Q;wOm1 z|H;UUIyp^T7A7=ue_9{!7Ifrdef-R_Y5@qA?Kb=_JYe+kFRhY~iEB#!#?<3Yf2G{} zl0E~ptO1Twd^XQVbpT7v4w;WlT^gCRwX+r9fJ8aPxT{*X`JqOlF~qM7r4@SrM#z(2 zNi5OARdy7f4;Y>53U^;c#5MPvj`IDRSQS zg}Ri?0HY8^++F1*Mas&1Odew0fsL3fa)Nfd5AQ2nNR-tNw3wUYJE7$oW#s!Ql1lTPgp|NoD6f?0`>OBpaR$x1c_`=Y6<8DGtG#r3E42!pj&h{K$l$=l zK6A~F`Q5=lH|JSxvBQg#`cwdEoz;_d{c4Pby*yJ=ttwE9Th_q9Xzrn#0n8Te5ZTfE zB=sokuU^}NC<9H&#)_#>;il0r^~pbzr4_kci(u3B|Jz z*~a0FJfAUOYTu#cE<;xev*&IN5-EygXYY3gzSL)}UgUTqJ~^`RWMDEVwQy^(Od-7* z)gi;^C&b5XpTt3*ewIuTO*TvlOe(W<;CC zC-K&g7|4c0r$Yo%^_+68=+}kq>uqyq-C7!i7>3PgDBonu2`Mi%rx);KKQOu&b+6E< zo}Ij;ox{E82jY4dP7tvGv`r<_AQu}+dZY_+g|1}#~YlFyNe`pmCraHCH$CUIDD~d z&qN(bE1QbKwq7TQ^cs{K!$w|=SNA=%#`9#mu(w5pwc8%j?{d9YZSQv02>&(wq!Y_8 zr8XMHZ=Ac&EW@v~o3rt|_x-JR%MGBmA+ymiificqan99obQK%weug2m!A9` z08*(I@b_>~5>;>Ot#edJ~It<4(Oy{4$#nbY08M&F^}OPAVg9 z7)i&%gX+gCU0MSSSF_J{@yjR8gy-1SPv^Cv@Y_EJOSTull`fKtni|Gqj+#@PUk6*{ zv%fx}pfC67nTz=bTkLha<_w{RSjAk2X>{p)^OlA`XBe8`EfKX4&URl-rw!Kx(+=e67!{OWsE8+46-susw4)@dKPez_WS7-z8vVw;Qk^ zjbc&f+xf`Nz~(?uF+rq#;bFX1vgMzuA+@8>L-CWQ$`GGQWWwHuzDR2srPap>osB0* zvp?b@benM-E!^WS5-v|Y@=Y~102KL)NOoTpjta@CYau{9xeV%Q^1bn(U)1fu5O06~ z*#i6=UM0^mC!_1$+pUSLQd06W$>M312k@MH(AtJ9BHgTJcn41k2G<+vYHk?E@p0ES zwk0gw=;DUO-~}&S7bg|1Ku7THxMH0M9xla1(P>ppx=`sVpBxAx3=Q=S$#UM&3qFZl z=4eT%7&{u0)eKc0-+fp;zKb!1xjgz}^GRCArv%FKq39!w;wH&5!!XPlv;Lup+1EP| z1G)n>;qJy=Uo%%2W;IN>@s+8kNOeSdGt*g4%P+Lc?)bg;!?O$(T_FiK|NcU-kBRXf zG^7vD1n)=^O`jIXFYFiJ-ya66Q`rb+ z{PaM5R1Q2~*Tz&_|AnilZp0*=bfHp@sn|izNg!dAai{R7bJleQ*-p>ASrbbQzkH`J zr15Q|gTY*aPGjP$Z;#@4i&Y{pm!|G={!(`Q#u95w7E{{iBB_+4s27Cg4#_e~2 zf)V#70ODYi2!@(Yy_K2L6KX{&_Ruxk{51o49Vx}gjjlQdZ5lZjX7y5<&xv`AC{bO5 z0EMZBKhvO2*t*_5Dh9`%!~oAjo|TMx(};!z@UF#(-l6tMDbBSRcY0z$&{(?EbT5R~ zIws%BRTxaJjZ=B1toY6c9Q)P1mth~Fa9N`OoDSYQ1tIRG(QPY9i`_tpf z*4`;8a=*S--skoGT^u|JP(@uvbM2L*Mfj5gY`*vR zIB&6z1Z7c5fbN1A772=q^;0I##Vj=n+D@tYHaTTpboVk@`%>M3p$y90leLA~r+$B5 zSMaZ_vP~*eL#q|!wrFe82qfvGD2jHE4ccelQGeZpT?iMl5R%weS*xBnOFPjcC`w-@ zuHqabcXMkjnSsZdAT5;|L-WBc>vu>>rcpbKAXj#3IN4Gf@;*^%3V!c`WBZTjEX=Sw zH65Kons^~j?o~uXrf$6rm7^2*xPQTpx9PqT0`Nt7({lGPDMZtv5?uF#G}mtC*x7ZD zcB2nPG)i*V7Msgt#9i3=Z0~{y;S}$y8KZ_L|1yuQ82_j{RdXQi$NVX2_G|2q>%O}N zLZ|hM{8*We5LtQQf*Lszz-;w6s~DY4h#Q7QvFn_#s9 zu*sf|f!d*n7?`0_0k8DJ;+(9ciqj?aSPK=A^UrZG{$|wD(9J>JC}LN`XMj>=w0O}s@S!-M zjN0evX{ek$wp8K>Nw?`^(j638c2awSpKJRaD)7pqm|wt;oQ-8OyX!9%U5>^zb@g;7 zfw(njf8EB9k~lT+S&gT)v;%vT{?_tV_e3JUZj68vzlNV|&8+K!8s{nm@Uiws)n11A zyIsY#YZGn*tELseFaKLxjNCAkIM(kCg+qL^v#88BlRO>Q$f}>hlkm+2Gk?g8n&Mh4 zlfIKMx{}-o7YEu9jx1V{Eo@^|gR=76oG^oqg$3zS1Aei3Qi1I2HOsM)Z0W}+4!@NL z)R=nnc{eB$Ea9g2H$+W(*#Mkktb5{f*x-8v5kxQ|2jt~k06e2q>$xPlnexQ`vKT=R z<6VkQ#JGj|N2cE2i8O=NV|o!v*5nZN)D zDkPJ=wkBZk)$C@feC~U}c=7TiFug#LQEf44OdZ6k!y)L24&5+4aiP>=6RWbkSo zu*h3<@Vf#ZAb``)wlD9)x~#u%cc4A5%;1xhKz~A?MpFKxe!hNwlxLuxrWX=N6W?h& zlSu=55q-yftsqp#Rmb(t*a5U)64_pVFul{ibQ!SB24Pp=Xl~rBf!xJRR1H1JVQuPZW<{RDlEh3k8{)xIg|yQ~lehgta#QZ!Pa=pg=*}A~F+#P)=rGvu$(Xc!V8MpO zO66>_zir(=kHGHUf;!5dfBfL1`0jr#d|6Hhhr$#@ zq4ad|$Y8HP`#lMYTE?2!BYZlez70tN@?tSg2Y5E);GThIHgDb?do1?Hw73kTz+`^L za!$jMydVTnWOcGlZx}!Qqo?bkodiEvOZt*uDx{wkufTMfHuMik4+tIF3#pP!v6H9` zHkqkvWc`wCRi8+UCT&~h?GNf(&InG_mMFit9yk&XI(ZUvS!{{eG*M2>hT=A#a%Z$; z*}Q9fR8#%MDn#Pw%`*m-KL$P@F`(Oo*XA^ojtxH1B6){pt(Cjh)rYI=>+u8n3v-#K zJoS}#_zS*dhjsR33EHu||I~bDqq*rjSy?QCfntiSwg-SH0fG3m>iRsr0O2nPmG%X8 zAVDJnmUGp;(Rvg<(BeGS!Uku=N%d;862wk2E9$ChCxjRNA?a+$PF-VlsEvcB2~O>f zq(-KHeev{Z95tdt2}5(c6DtF^y7Q$#1*OdtVy@%D!8oRT{ikvr?29NpY(nk2&nEoF|X5-h~nqt=4&4VFh;`01=M+r z=knvy?(qBDN<(!6P9d}ScZsK3}`Yxx&nEL+0cTJBl@?z$Vc;yVhAJ8!Ty;1m>Nr)r*0HdH+W1RRh zTC}+H$wkf|LrIbF!ihw$gp~sAnF5(Zb>e`b%WlBCs;9rw!MX{Kk{CvcmeHSzNf64Y zIMe;c34-297!ZiiLEt~OyyWxmzbrjYgWN!jwvt~xg2nv){Jh+rkx_$HmGi)X`Vsm= zIlAIz8(G-!H95nL`#Hcpuyx*@99HeZsMS2_wMaNly#g1Pv^n(}W(0P(mL&DzJ?2dX-8} z`^G~ZD4vrB{B3lJBGIY;R^GM}r_ECY``ms-i+8MkATTPvYiq2wZc6=P42@!H7;8_d>y;>OFCHcF-QCq& zmPv$)OBRLg24?4!M(Ph4NYt~AM-te2hEu$pfd{zT0 zJx|{^T6b*7#CafbCXZvp(nBi9yO8umWUUq|0l>sp+{sQTdAx7Hmlpe~V&K`cqFWS3ad)nDUFNhrFoO$R0*cL& zDK`I9ds1-W(8g8A+e5e@MZdAApOa6%jp3XU0}5)Tf|Genw@~9+5_9ICCOSu$8jX1r z6rH8{F3(D%?q8N8mD`|N{F=q}F22rOaibuy^!Ha!^J$o)yZg18HtUohYYoWGSnSpuBi+{? zFke<*(ZP^QyK&oP)v@LBv}bW+b}6XR9|s*F2gJm1W%IU5>Z12;QiB#CT-p*c z@_6wtvim$RFJTeg!qXaKGPbMQIcG8>QOPsG1s}hadjOAAVhJ##j!+zN#^H$5IK)0z{&SLwEcJ_ceS!oo-h6V= zVycPpyQPz;C>ofw4HWF{%~x_kLUSZ_}659GhS&$%}$PAsIp7? z@91Mb!A+39hLs#Q&Y!Aa5TD7(t@y1u|xX>WtBdU`tx>2v)``|*k&2g=&@KL*D zrUfkb>!D~-59f~3EWHdtZwc~?vqBNDx^Csgh)=YVF2iFn56ilUh{L8gUKi2fK%?QC z!nc1{iTYo^|9=fr{6D&W|BU_D(aQf9*W&+quEqZ!uQ~ZY@CyDNGjMJHU?Ha2X#48H*zq{yS8p&lpcU)`u#-okEM0e~mVLxNW~pllfq&Ik zE78A+I>|O) z89Ao5o8|`G0@nrPr~4KHUIoX4A)$u3GrC!OBQ16xGZp%Dx@lJ$r~RLf05<#kzxVS$ zZBj4&MR2MMFVA@)tn1HJ{jT@k7l89CFu9V$v^6`*+rkBhTVo+03q&KDEFe;1boSY-y|x z;NN>2ZY-4du<6bgbB?vNV9>7gG6!l}?#FoLQ#ws^GKOB*MQ;lekh*C)!UF4AEjdb~ zqa6`_c_^B)VibRyrIM}jW7~46Gt8~)sbK^hl26D{Oc*I}n))fdn^u|DvSf)!@Bm5e{Y5rC4raf9=T3X>}jCsocB3b zl^6lE>%OP+ZkJn>HD*DDzDPVMgoL^ z(N0XxkW|Hy;r2=-z78#|F4|?yJxJDg?6bKRaLjNhvP*WMHUlSX#qFg*Ld%ZNh$<(s zZtS^(tf94z34FTI=IH|@H_~JL@rwjEd0yqZ&?~x;PzyfXZf+UluGkAE;eg%5w|(qw zT+82i!%rT=c~h}JRp>%smo;8Lnn&X4^Lv$4@l6}_(mZKAQCz&}yfOZmss3Uo{~ zlruLoj|F}elIf;3SrnG)ng0QZHDrJj^*4wKctB|fw~|Z5Vw?|s%V3{Z7V|e%EBbGL zz5dC;M@jkZ^IMb(w`AWxm#;^dhWF~9FsaH6RuC8M)`kT;MLU+D0dRArTc?2+eyv=A z5GAQedm{UE6@+EFPOe5n2Me^e3vuh;)GmG=M4 zb^T}JzfR#nz8p$q$k~U3&tZUr(0y`qf%USq)d~!{1E?9%$yO)D0bp3cNQhLH1gAbS ztxx9c<9f4S%zt;xX+aRgUVCP3zr%!ka=J5%T0|UEx#OT*nc{CsYa(=Q~T-ep`RhIVATBI1Gbv?a|I9sG4=P13Sz`5%{~j2Eiy95XA? z{0|+rtL?Zao-k3-KQMm&lWRhce4-Fo$(Rx7tje0u9Jnc-w`(5UbyCatm?tNjasw5~ z^VYOKm6P_TE~8CzUG+nYn$OJ+IvLq@RSw97 zP#`Sl#gY_0$8G>(vPgSCgxgF6Mk>7N9wtNM`5(A&YHZ_}NS<@?{8M%xqrIGfP`M9W zFF>3tq5^EzSpqKR6#Lm(SQ>__StP|3XcI`R@&<)B*rQw?RW&sIX_=*-`_&2S}kkAM;)SCYV`!@ftuFL3U{_xHejuiR6W|8w#|h;E&7HcZ`jO+&yY*{wT3= zJ!_xhUv?nNjo=%2uO^^ivC#Bwcebrt-{h z?oIK1PLM!m)_Yh%2a8fGkq*_B)N%t?R)8s{yp*pF^NLdU^z~z?YYz8KoMooK_1NDa zypCK>WsldIe|Bg|;8_n+kYVT4!heEg{)RuM+j`&fx%q0s?LFh~*%OyWUrgot$wl?~ zsbvBPr)e480+h)?Jc~<8oo3xwfT3*JmZ|3KR~lZD7nBoh*Ll4(`ZMzgsQ5U_`OxXj zzj(zJ|JIN#_)ufnBGCmfMK0h(I^F05<~CTA(@uITzuNPgsmRZLOu%*M(W@?JEA^Hl zlBU-oyU_126eg=1=(kTG=*t7u#}!{zOj$+Jt#(j_n}?A*4N>ji^E}8Q>s!zIwe)2c z^wd4ENGpArygGk*Z+o;kxe!luqg;BV3y4}2UlXQIKP9dfys34rfGFmdSJYJmN@5rI zUAV&9a8GZ3`C(O+{X#Mk-18*T-BJd21H<_79}>G}^40GNX>5v@!rc={nn%@@v+(oA30~{!cF75D}J;# z%H0@ve?66O8{z~)M^Eb#GIC*f;tkAcV-h=~R`rXm!8_NeM$g_2u&zSc${nbKC`eFt zfTk}&*zZ!{#*4JmM6&YWlY=z++-xYwR*O{2%io34)K)YByU@PumA)Y&NZb;e#ol0nB`PC{dlN z+AKNORiBFuc)muI#q>2p%CJQBW7d;V57)uOda3)nchroP2jr{e)|aLMwFbHvjUI_0 zg0f*-<}?~(68u~ZQ}z+Nj5@_tvF%XTn{)hshr2{$KOTBfZy|5D@W5>ediX1vnf+zu za)7=9%yPJ#M1c(6%KcGS#n)VM@jnVzO|kS8{Jk6BahF#o@fy%dEq{2ts;k75Z~%alPa(=($( zj}o{-X;7C=x4KcGzK*-6D}D`HaC7HJ4oaeF&wp`e5l`3a17SmO!tW3gOUVCE)`kx(459f zVtnM@#jBXb#88Irt7$PyA{}9T$r7!oSR>ZcFBwzVRp2x*$QnewH9{2=iT`GpFJ=*c zK>=gge@QU(#Z#Ky)O7Ck4{As_R3(YsUA9;CMK-M&t5khzV)GW{65U)M4MGNLiH}uO z6c(jvr%%J@3L0#hTll+_G@AY=^O|VXw;kxj}iJ_B%THzdZEedLgzmtAEO=RE*&=A@kdiY z*JG_|UL38FDAO&8D%L1g0rtow3PMD!v8yNB$jbfhU6bjqWS^;!3zezTMEVihVC$IG zVCn|R`M$Zx=)80W&Rzu&5ON<+XvCXhk)0zHgqbQA5sj%msV(0pLceS7|Cu*eVyG^G zk@(G!MZYQPc-)!elU1pS+8Hk-I!=(cLvC;1)TS{R7{tm5mpy2Yv%4pH=Gk8-9{f$2 zS+w2w1BtEn1q-s@g>VKW-jV=u_w&llJJy|;Bq=(LJ2}$hxx3OzCR$J#J_~gz19U@DtmidOYJ(2DS3I!ntK$m)%ZrWJS4gh4)OaY1SwoV7X%l!@qm_ArEtrg zW>IeR9$V0_t`s^tHuoI9N5+4TD3M*Q2eEDx$iK}ZSFx6cSp@B#bHkm^3uCxqH6dl~ zuOmV#)f`AHdmJ4pK&ifwKF(xN(fhN}nk#=E8EMk#<`ss6} z{Cw}w;SC0h*>qhKd1`OV8CKck+(m=(e(!zH8Dgqm*p!i#3aqMcgWHT=JtpWa`*zTf z!}X>)@tV~k&CZ@V+twmHe%Sh^D&y=F4+PAFo0B_w8uVnN<)a+>uyJdR&hw)@rvb*b#OF zay%RsBeObYTbeDVT$j718g>Em!g(YAIhYnATi3n$&*7V?F%v{Wl6&@~6V|KapUWA-;p3o_VaDsr=k{G+5IhLtBF}rZB{4 z%TK~jD?ErbSuBeyy;Q+iaHz&m@cWMP!X1n317jIP6iBeJuw6ML)7HCx^)4)0AFBe)dXAn_Ia_HxPTV27)C zgxpZFvxgzW^-Vp!O!mNR)4O<`#5kXAYcV!kN4*~10e9LA2E~*XUj^eu4Yiu_1Xpyc zM&`(cDO{hBymU+9o*9fzw=R0yFiFwdDVjlo@wu%ozcvV56?&cu7nqHn;57%Q0|BuI)-B%L}M?(ys z>wMPFlOD!JxC9uonwJV@X$yz#4)INt%8fOaif{CY1{ZL}zo|#rj`=#5PF)&f7i-9g zT*!N^_LrR4;Y8G6o}=ETzuoTDhw`)7g_azNN~IR_?W) zRJj?e&1?qytT9z3FAO|MlNJXw1vu{m{1(t}KgP-f$}_~625iU55fku6Y!*R&iv z|Aw`Z(M^Chsuj4II>I5qiC*HAaI5E zvx`zK-{b|w{n10GIGwRYFeqhJrbj~~baR!^l^&;)C{nW?Xeb<2E97=qzhPN5-D3&x zXex2Z9iR~5(@aUcWxRvSPcu^AV1~GB{05eSgJSWA41Yh!-WzN3>_r>y8ed3m0Db-Y zZiq)&tk5LG`^%fbCfMNRyYRh>7hiM$&di%Ck7MR1qn)q&?qok_G2}X-3KPd z?NB%8r%mePIW>4L=VX^mlkbDBZ~wLpdE5R~&+NRx#`8de|Bc(MTDQioaRCpCjA7b3XU6>NCz2_v-Q<6^6v+n45?h6U)!8En-<=2(1DKr}OIG;9ZxFS@i7B zLlb)H2ANF0!+Hx*HQgKa?F!Hf1A>%3A>cv;&W%Il*G*K-R%rz3g6sxo`#HaJKB|#r z=*Fb!ffQOo9$Jh#d zh-(tj~OEzk;)Ckr)9PW*Mv7c)GAxZx2nETqZ zn*Ad$v%)dncXFq<`0(efIMXR*8Wo_7{0qOakIs%C)! zZ(ksp|C}2!bZuVXbGz=h2DCQm!1sU>N&O@&o&oxtn@R|()+~`%>;(8t$D)$XRiDU| z*#&iu6-&;)(r4-x@+$qIwP=q|TtIhSLNC*;@ZSkim?0JHodWNo?S|6Ylfx;&1wi5SgHHYtNr1?Q@ zSEu%-5e4jpM2X89I6LNB%cIT3yQ}y+O_`eA>ml&qBvIyMFP**1@kG-7Td}x1J-n`g zQp=&ckCtgdvu+Zf6q;_tDwK^%Cgko7tZ|5raC-0Q-P_?{+RPlQ&#Lo9{c*@8F%M0Tf*l9QN4uS~| zce~SX=Z7(k+11ojxnma~?TTKo6-|m_B9+{#xZ3BlTw~I8-ueYI(*m=GMEeE>ajbJ? z3H)#Dy=PF9Z@c!3qJmVVca+|du7IE-T|jy#A|TDsOOOy$kX{4?q$|B6C3J{%0qIRb z386{}B$N;!@PF^;{kF^O{jS;T*?ZRP^&$DdFbQ09U+p}P<98IVPg}oqwZd)Ib>!kZ zONQ2@Aa5lo>BUl*6_a1ax@Sxq>T5qLK5+F}Iz$y=ihe2o+dyuWs5nHUW*6cvk$$!T zMZP2Lrg$2C+|uw%wx<3?eHZ`n8&>7Xi8F^G8Ure|&(|F}62%E&E7XDLCL;a$Tu-OK zrz(F`iY)sz3WPdSVBosnnGSX!u4nLI|I8mR{k=J8|-?Xi7|bsNTqfugRD+?#rP zF!vPx$*WPiP^Vt&P?T@7?m?z$lD>48fqGR*^JV#!3{fOytvY_qMOoOLG5a)4g=;F` zk-qO$W!WwTq&iVF(RE-Bh;oZQO!G+@*_*EJy5J9HdLSfz{nQA2HS`m+1qY4=^+5CA>f7F$2HmbfdXu+vn^-BRK9d|LP z&@0z9Z`sl=)0f|n-g?gp^E#6Qb*CIJseR5?rKY;Ip0rn!1de9PKi`*^q z#d{KxaDh(==+(q|)aqunvbucw8_&0J!A%>XPtzVq)=3Lzn!w~=L*{~l9Qb;i;OZEL z5$D^ROGdC?BfsJ%=yoNu>yGHIyJrpONv$mx2yIvvgv8W|gxih=J}~@_MxALj#i;Jt z0dWOdl@*&EqUeFHP*SKL4iTp6x+@y18}bZuKvXJct+sCr4id6or(x(?}{w#Ykj-g{4>Ju+NZLM3P+w^ACMni_xfSQWC!6UiGh{h=U`+h$$ zDfM}FvZwAzow?R|B098>I|fA!du&N0BUG&_g|cpU-fK``lZVssyojsd6&b7-+htASx0Pf1uH7dE-_LnZl3-wf zHNP^bU!A-BnDq~dhK2cKh<)j7?`z)3hFMPSvZ**v3BNCA^vUrv)eUVOGr1iMg3(o) zw2^5%*tGk74^6&QacL{ix=hXnA=Lau_PvAY2NSseKm%i#f%7roWXO>nEZ^(VC3-xrUdv(r|I_Q%8ggdCeg zKKFNe9z2J&yf`;+08k`{5YSDPihoFoXf0LYo%PNuK>c9_;4DktJ^Sn;FV?-f?cjO? z_5xj~^pZ)*?8zUJTGpR}F$*HSdTU;PVjjelS$)ziV!4=1R5UY#FQraeVccDuGST0D zSY$tbm+bdh_kD@U{yrFuAxS$!fGtdd`j1Sa%kWW z?qhNDyZpif_GhD~t()=i*{h~Np38>NbvP90U;F^{o|8)|h^HqV^qM$<6#wt;c&{C{ zkUcPUAH4fEPZE2nlQQ?;Ada~$1p5AU!N=D9D#;{=61SA4#bJ2fa0F!qSyJP- zH%g}3-5(8J>Afq7LwI_AU=xV<_C+i0*3i4zlQ%Ye99^B^GVYE{-hBA6`rT)iwQm9) zRQd*A9egU`E5bT@!aFH-unFXW(~--OSIemFhmm@@#0r*TbYW&2Kz z4DXT^F&Zq>D71kIq%5fqHrEhue)|`kjJ86^t*J+M@<2|2{ZmnUebNrE(Vt7vBL_Sw zHYYDm0&0(&>m!$=y=%KOIp622@Y~+?u7m{VwNW-T>FWLO8WZoWRf|slp9|BMyhaq^ zhuJV@vMN_0CCTV6h2QDsle^_A?9t^$-?LsxS&RtvjmTdy%BPElw9a>AoTOt4uyapw z16|a<)*X#%6_D3UaB-Et&^irXkvDovI@(NoWour>GwRT-a-tU@`rB0xj1q^6fV+yx z{W(~h*l0Uk%UPzhl=$`%di(hObn!;Z{WQxLmuGDL}i=4*s!Phq$8iLh_ka88%XO*X%6wSW_6hw1B_rQhw zw#P;w7sEk9D3jwqsDC5=FITchwGq+nNlWCcvkeH2Rb|>{vk8!Kp|g5XFL%;lXYq0w zlw7Md+{|PT*JrtP=H3Ieg=ulQZ5Olzgh4}H)`fZR9B$Xjwu~h>)>@#^5gDGCY0r=g zI*sU-CNHv$p2~O3ND${8VQsFV(5Rv1T-57B# zY~k!2_5u5BBL{rZsh15fA%ZLsG(q$&`J3mVE$Np>Q+NlQw&Y`Y8yDZ)X)m~lZaD7# zyof;NE2%%{%h52T|46S3&xKCZ|Cnmnl6*sT>w2(NH2IHRr%12ME=4H?ruwlrU zzyF7%OIdZ@5I1)j2fENQ|B$U8A$a?1&!29U?m`oe*D2aVhOY36j@FYiYt$ebL>)U^ zf+>*%>)Y%Yf#nNW1yUy5&Y|b1V<5Z!Uj?g=|6w`ee^kEs-@pIABKM^IXXd|Bo6bw8EFUiOZAIBi;*Bpb^(!G zkr5os6G7O6_fUlrw+GG1>=j@M3bemxSn?&PQzgVb zF#WQANAm_3T%2LyEiLyH!g=h2l}2?dJ_CE&Bi|bCbK9XgRvT`f@Wh8pn)i%_%cogx zcdvZM@TZggN7{Q~y6V(T%^Wnb*b;PTq}(RXAhMksxR6XmQkTxVrp=x@k6fH(KAcrU z5L&i60;dhNfBeUUk@@c_9Fqn_WQ|Jwyi8Lgo|1hefBdv$;t7{6a~Eyc%N+;H$e+uc zy5m=`a{Ss}b-&nx^g`u>mgeMkNJt($yq*@b+$aEd4|Z*_Zu2~PP!Lg|aWzDtu$N3s zRK+QDhemM8AZ*S1I_W3iu`}0TQ^5{YQ;c@++k{_}z0zi!cWr%>RbbImw9WL{VtzqL z3ydkS_#RKD4pg8te=xp_G47qsW_&c#T$SQ6JVC?a_A;xCyhyOEuIV3!oJz( z6kQdF$9Z;(%qkXlHU*?1M=}Q)hVpH6*@g-oJaX7`e+Vn|Ogd zKB9YB$oV_y8pRpTFO_G;0j=8LL*=~!{+^BF`x=>T@pCdd zcyPy+^OhintOy!>`JJeNAd^Mi9Q;Pm>9IX>!Z9vk z^eyvNbP2@y*;MilNkH@#<)k(eip&V>=6=ZxnW}$m)!;gl$Zt{HCg=AGpt1ZXLZj5xAH5do ze6xOuIYG@er&8#qP1x-t!P|ef909}BCXT`GLAv@>p4~?oDyssk=bp(ttUBUdAX$l7 z`}cpkcw5S^M6r?|iT>`4>2b&6N;V&FNz%D7O)QfCiDNR5(x2445yxR}s{U}*UBfWj zamrfhUm@%kZbot6g(@D;q;+A0U9uVX+OStyMdY3}lRUHIofmtR@#7p??c4_x z7nw1~b;RAfpUOniH$9{iss8^z0%meI=fHE@;=CI&)Tm(h{OfIPYpe&hi`b|BH`3+|3&)3w{^!C_`M~tU z%EyQIrW?$&%w-V-0op=2U^u%)HQLgIk%rFHiHh_Njr27Y zI6~b9_ceONGGn0*fV)~^0CMk(`%$Bko z_UNY_Ra0{21dwKIpPGE&jU2#9;~A2QFEBHO<9{ zCHv4$&G3UD({uhNqJC}UYOEpV&tO8F||hFmn{b5d_#4I5;yhL z;?;FrUSc~hT1oQe-??|c@tIqB$^l6kY!BOuZ7~U9f~+;Ao+@-uv1#ZNsCg)wnr6$= z^~OkuL>}p)Y(Zh%1|>6xee~jkN2?t)F~@GskjSutu&@o2Gq_&y84|48d}N<&KP%BF z4HT$1CJHCl^#nSPMpVQgh`vs8tY7QaM`an+Sbx)8mVznpp8{zm=)Q&M9mc1BOR~*> z15NL4T5fB|1z?2Wx{}zg-B!7WQ))M}@)EI{UDl-KePFcULnr%0|FXQV^HkM$!hkY+ zjQT@GSiy}PJ$~_^Y>zw%yw0wzL|@F{_vrq~ezPpM+}ecfax?cV_a^7!M%$172V{$W ze!IiG@`65~7fIKg=|>bnWS%xmhv&L2sTD62mI?TFSO}QbkD8Oo{jKX`z$w$`4|2p9 zPW*HNwo-ECJDP?@hSp}iv*pv}BZJkLdbWq}Oj6>73iF&CM*M2RRLPGY+e4og03TX6 zT@WfI@olPVEluI-Q2Lw5@@|-ILI-+`7E+-X!rgmrn^tSjE80Vf^>lK#jgLi091lNg@V$=A zQPG7gPwoA^UTLQdk{o_XLfJ%et&c4s zs>L#Ud;6mKo)Xuz@p+{6w*;1Z>M=s&ekSVIOlY4-**M3jFcM{P@EtchNo;juB5 zvsL^M>js6Q#HmrAX0bQM_Jna`ve&dk)m2|lw-v0~e!PR;Hi@h+V_2K7sA(RXY={?q zHie$_(^U#l>7Nf!1H04pP`UpteJ_&utrA1ptwN9e*2={MSamr-x?8rP&d5IZt>LjT z!7mjZEy|60ezGPPZfVJz?+&W3I)?BsTcH(t?Hx)SdvJ505UXKx|=%6+d3 z6V@1KCd@hXTn6=JT&r@H=Dth!7=5wfFwNZ=8k^o!{?FrAsH+RV78x8eO7ZxgYhVJ` z7YCMpVIfTMa!x|9AvQ7mB;@sQuT^7giQP`CCou?QW7Q`<)AZ=_cXiR)`ql<-W_mB6 zK}5wiY)%kOi!9xl#JWb{z%wo7!P@gS$-;y6;lp25L&72_#<51QeI#c4kH;tZ!ff-7 z&aCrOX<2UCzjjQMug0NrCCF_naP@(}-vl?r!9TsW+A^!q7$$Wz#fh)3KWn1YAE8GhUh7K^P%Cq`7KUG#L` z8TYIBqLk(?E{$APSNUELucaCv_{A)#|23nkWARzA?ZW>3mA{I)GhM!cb%MI4LmUr3ScHRPV`n%Rn~5`W{PZ^<~$MxD0(w4zshtN zdC-Oed79Mt_K)L2`%VPJYBe=yMKZ6|*5nka)S=?e`{ zf`EPokQzA5ho#@(P$ySkPx$$Zy_VEuX#O%hGiUVJ>l~OJGYCy?9u-ypKfKZDqM3 zZgtYRx3=vf=RO`&Vl*^R{4@Poo?>;Xr#Tt715V#&Vmy!am`x@f$NiNktvjS$+M~rv zKJhm~BH7slZc^Y*wffu)pX2M}M?nP^hJ=0_{eW@)-LZjyloL0H$vEecc1OxS{28-&#qY^#ENm2DQ z8;9ViR5kgBgM-yEF(b33(HEL`=l?SU@xS+HWdF?l*O?S#zX7JV0mg8Jd;S{%3R%;imHSsN>H;3L zSNbj8`42Qv=J(N780*oT$I^V)=)9((jIz6F>%_-c&V)R z4@vXRJW$fcbX_oSBWbcqP*BD8{|XUXoYH^{m#puE1&C+@>N4Sy;nbku3}C_(`NR*hHT!_w|; ztw?eb>81s+Rh=EBt{9I-tQx&UUZDNsp=Ym=xZ(%lpH}X^_XdeWnt)qOQ)dxYLGSYW zLSNFh3QO&eTBo|YBxA!|?&L+(2uqXbyQ1daUv5rcF6Se-uEehQqF&`jdBR1Ku^ifd8&vvRdR&~<>6Sqtp)L^2x?$Z%V|W}WhrM0N3^ z#=Huv5zc`Wx!s_E!@{9x-h$zaGKa4o&lH4_s*sLYc+b~MXg-1w_{NFQhSA82*SiSH zPDXc`Wxn(~7Orff1J{}?Yh?fYSa0=alu_O|I~{FWfQ$jxFZn(!r9u=x9LSm^{Pikn8+^dFL&H!r_+ zG6_aQZ?DS1r~G(*X_0nXV9%3^qNE~w_8>lmwCY-+B?e88+v089YOp>anH1pmq7E29Qg*B_`&v zsn7sWv`pu2)OBtqtKAf>rYZK|*u)H0MsR`r-VN#2+E<6SjL0Yg8dViwH=vG}t*S1J zv+a!7)4n&Z_VRP)t$nBS=nE>AR3^sHPx@fvBW>$yg8LDZ6$q8K3mYQnvH(Wv0=nHU z+bNYh2Wp=1xP|a`bI6gDK&QzaQ4TX%M<}CxbJdNCTrDHz(^Va((_U(cXbqB;;@4vQ zVOCd5stu?m^ zLzQYTTcI~`q-t$HaN?2uAQF@|Ek1Pyev(r7QuiKRgQxQ6QGsY_++Gmxt0%n5zR@5BNRafu!Va~8rdwXzwWPpiF-f9 z5SEDrd#-t69$(x8Qj`M`^hEy>s4jN2Ws|)^S82K({9}(^L^(-QS$bge77!v|4kM-c zZdHhEZb`cE2FVdaE-O^%VWUY_U2E1^Sxa_j(BTl?7tb?~dy+eSE0Z;n`kL>sE;8NQ zqlVWWt7Mg0xt;r0SJ(??xd&4P-g&s@=TqwE*{3B(@VxMa`1bxo0z89mOR8&W2=o|1 zyPenP+;;lTlEm(9>Ttt8@h_E(y-pca?-f5U9KnCu>wGcE`MSQBSVg1;9ytX9`)a%o z>N$=nqmtkxv3Zz$g}I+O^k7j`AE)IQNB>?cKJ{HK#qFAjuYw$jmuy-~kB{G{TxMvlGBr2#}^5Gb-3g~6<_v?jg3ZqE>WJB2MlpvvEr4RUuM?#JDrNi$^VJwwQQQ8G#bV!m-Hhm5 z3)2bHz}}+h;o_Ai_Btk+J94}jMIf(fRTLX^Znsqdi{nSuXdEMp5}Hd{N1=_3IBNKpCZ@(Z zWeqJS-M^SM6gz;SaK84LIB#31`iqOVgOKC?uVoGA)YSE>3e|U>P*nP7dvXbL2#)Vd z5o;1-k$G8frE@uVQCy(ymiNj>^&2qg-1>yI3-hZHR6|~5CkN33_Z-GQb+V8} z8pO4cJFy2B?i>RWGO6wivp?Hezh`3f>!}fqJASo3`;kUYcZ^Uei+KjojL@=@7pB4- z%1Btod~cH6uwLlS*Uc)6;?ty{BbC3U%_ovz@xd+wlnG)xuJ8mTS&gS3VC;|#v#lqS zvZ%(#*~AsY)W~zUFIkQb;^@N1@xR6BHQvqrt}EPLHgE)$N8y{w9(}%P9U4 z6SHF!H1dh#4M|CXqmpN`Kv-1r!1diz5T&vj^QXm+a-k2t2^?9K|d0^s(Qpyg8FW@uGE~cwrI? z7uk~u*%$I(gfu3LApKGaqFa(AP#&!6UK5Z2*@x6GD$~s$W6127iSLSEh3oRAJrnQd zY1EM)`@?c-h4?&=8{|iazLIb%`>c5eYK*z!3sLk z_q40XZ>GuMO>RYSp6&$b>+*F2WF)=-KHVwk%B37iLI~O4h*Y1ddf8gn^IlCBW-T3Y zW~C`+*<*kGz36*uKTJV%7M?4onYaDxN zQQdYQz^1;HW2=w1(D$pt+-Evq-=5*yJ5J52=e^eV#aqr*!^D}~6j7g@ypan3yI^iL zkRe)t(Gfj#{`Z?_^<_R*y!3a~Xy-S}4q9Y9YXJxER0S&`PisfQ)~w_%Q#GG?7a7^a zfHl)IG5iG`S!NwozQ_780QcLI5WUvMouGQRlMJdeTkRj4BVo|!a%A^FIwV=)gv4=> z3adr-R%$Z~4T{QDCgfZ=>@IVT!z)UGjH%!^G5E<_*Oxc<`44pl`vXIYH4lb*Vpc1< zn`8Vdo*COTY?o&v=lhC=!3yB{phZkk-@|ubN=F|+XG9dCt+v_swtc2vSoDY;|hCZlcU|b#lIue*5ol-+x_WS&M4JU4gL}}T3xAeS;eMRonY-=ZL*d& zifiiSjz@689`Ax~UC}}!ukLb^c0bcPd8~i`Vk?t}_j4Lc3eRhY1X-sWqDovJj91Qg z>9!Sr;N2)n8s^fI_V67Gld~NISIsXWPNqhA4ZePtIq3?DOP<4y@D7Z_jzKJn%kLmO zR~)MvUL%`v!U_e$-r8phr^z+)SVNi~is(`;l72{R4RJ@gK@Ua zImr>=kJbP356SnzK>x?nl(IcHQloDAF5Ldbd+$7zpqOJ}FKjQOndw!gpwwwrAPAn# z-A`0^4$d{Dk4bgE1tx6JI{E^cEIp2uepW*k4wtzG+he9ASt% zp=kL@qa7AFSY?%4;s!kj2`~Tq_^fqWrU5h2!<9@t0!fR{oK_acPIaAzQr(k*q^#Q9 zm5;DB9cl38d)}gXlkMeHJ`ZBJIoXGy><~o$T15Y2*L1wa3|LsSFxzxv)mPB5xB!>S(g7(nH30~0! zOcL929Mp5A@w?Yv@!dv<{B7y^V)0eNQeWL4)BqZ|3^l$?hYI6-(beC5(HA+s=8ZAN zq+xuJ7>x7#MwM6fYH=&hF3Nt@ziBA|IW>Aq$E!xicOCqQf5iQ8Se@U$+}$^kvrl*D zYm{mDU(@hMd(+k%3z>?|ZI9VJ)ec7f?}=MmB^WTyd=>Qm9zorEMNel{smjihdZzrL z$+YdwB&{N4X!WFY_T3&X_h`aM$rYdhTi)#Ifpg<&ZTL6;v8qwH2@I;4}M9FFM*egC{8C#i5vXc)VqT?avkO zS$Mm`DyL{)C-)T8@cWSS6x`jy6YS{dGTfsWsUh-HdP8gFJrHJo^@kgqMAW#ddg!g=}>-E$-Ex+3r2=RL|k!fvzri3Y(l~2kL#Pz73!dr=huGy*C=yj8iju{kJIEU4{Iwe0U<7qb>i!Z2 z%+$*Ea+ONm2D5=JdS^E(V>DF(JMDv+yYqXvFLlVNGyw|Kb%QDjp=!ZZP?QH?DBJ3@ z6RF!=)0pt5BtXUv#H-d*A?Kx4E|aNNNk!BPx(6~Re&{K+Mj z@{@~U0#3^gNnS!K#p=mK<)`|#^4_!LECj8Z#D6@zO-0TCV}~Jg>2T(;_E^DQTG|~h zlG**Cs)l#p1#a{ChQ}8|(2)7DB@~MhWGVUDX6Q;@C;8-C6-Ha4ddAKEwpBfiZRd{k zt*_P#on2fAu(!EG=FeR;}c&gL|b2M&GB%dAydWOwh7 zH&J|a);_U4CfIyw&D9G}%c@-7A#A0xTk-p>B@Ls7h95WLcc5GpHFwq?c#XK2#&5Zb z=!>SO?qy#|xVwkcOb)aff*7x+K&((_?9f;@h#bb!whUS5gX!foe)qb&%wVQAVfgH#g;bK@t!&@@!_Hj`@Cl_vJ$#CjG?a; zjDBKx{evJh^nk5G-=~UPr@}4UDR^{riC*VG{rAlL&KjXQjjc*cBW{aLTQNDlin8Lw zwM`r(5&-f(_f&@k!v0-WLxl)5MM{BwBTu{V(U75zIYQPpb3&yf;Q4DvYqqFKLF9ME z_N2J1MDZ5K>fAc>sp5n0=X&ZeH>f6I9w)j=yH}J?^up3qcz<(O1e{Nxh@Plyk3+s! zRfvrvHem)1jOHuteuECEE#WZJe6Inpd=gbv=RHp@MP_6t!#yKTUtXhGT=#7H9gq4t zX4MT9jEgOzepF&O>3;Sm6E#@kH>U>ienX*Cc(M^MXR5jxW?;Hm^(F_dQk&ndiuaD} z4_iS+%5g_2=FC=9?=+&2=5$MW%RIJu(k!(fnjHDBaW9Yf)_Y-eprgW7Ij!fep=uLl*@gw_ zp{}p#;~tr023J%y>{eARo_oX)m!_}St`<~iu)jffjt`=I2$6OM0;@+oD$j;lsfJsH zPGQ?a9|Oa+2xJcQO)6Qq9{guqWY1U_jAgp&SaS**x+cdIa30d_yr-bW=)s|Jb|kMP z6m5OKsqAfB*pBWeHa*0Dq)p8j*Qp|-YWeVOA$8ql z?Ii^#XQny1y%uW&laLB^m=gifrP>JaV{Q)P0t}+T)po9i{8tspZ8-b#)R+x-v zZh2d&+%0=#cwPlD3EwO`(!j^%X=Lip&d6p4NZZ)z->o_+r!P_T@_*zs9Hj0srB_jv z36}E<*;0p@L+Y>Cc4))QKn-lfCrqxlvYcV9)Rm&2YEyYkGc}Ve1yYm3Co5yT(gGR+cxTzSR}E#(xIPX)uWCP3F+P#i?h zHOW6E)`SdxTmw!&!}D$WBI(lAlq%dEFt^?zK3)qX1fwAy z%X3<_jn6Rmqpquc{rz^u(K9LI!|Se>*J9GUSz=5p)R>KA(K>TkZ3bBuhx79L@CEgb z7#rKj3XM4ImklF-k7+s`-E?hV+rr9YlsSBCDpPcodp>vz^gFhX7_ zW8JhEVe@WrVKjG5wHL&EGze69R4N~Ga?B~IIm0KjGu12M zN%qxI3oJgjG0#Fiy2on4*^V!_;k1*k!t8WED9&HZ@{o`rII#;XT}lFJ;RCb}4gG}m zeM>hO8IqH~s0a;V_~xqG@ra!b#(A?Hkbxr@424^EKz<$l3;kY zqX4Gb7L$oo8gz6LO-V@zwj`wBobfS)mQ@2%;sacGC@qnHWstgPv*=K6WMk|r?!fSi zPvt~spC#ini7lnCLPjgZ=iV|!ib*DGj0(nfc9u|aOJDirXsgqUp+si%M#LuzXX|6W)fD+9@uU&~k5>Wl`Jfp;^`{S#gN2TzJ z{2HozJ9ri+5m#*+27Yy+T`jQNk7^K%2ZF!2p!W$OtBAb{p*1?n*apifo%+bVer@qp zUTV?ySeCIT>mN%t8M$bxcSdO{85kbt=eAcf>s;7v?^lJ0gKMPMI1~$RmmB-~Kh6`# z9t!YSIGnG_446?u;(>zd{~EG$6+}irtd;ycKq@^UXyPRM^wu7e^-Xxd+j6{ll+He| z26{l2ob)QQ<|!WVW;+Z64#>zn;1FHj&TbDF&}YVwMC~BHbN;#cv+?iAXuKNR8KUFv z`GyRj-w!1-Op`L~Gq)Qx*)Wohzg$nb!pvlzrkK~&O{0H5>~8S#=wzmqbY)8_5gJaz zfd?*T1ND3wkZo;~Tf0>d+5u3aRp`$d?3x?fb7T#X&wS){?u%jOwZ2Si}h!Dm!rpxM~I~*=ft3+Ja{S z9v|U9UKZ|kUPloE_40b=-Vv{OFSMExCY|DY78-OIgf%5bok*^YYIZ;lhzhwW1vWsR z=Jr-k4Fe=#(wq{X@tunKF@ETWBh3n(_c-9hg&|cBtvX48TqUo6NVYxlK;HnzK>;B} zyLB9X@z9@I1uq}5ibUT?(u$v!&ZRw8PH>;aTWZh(@n(JY*&C+XgJsQdS9tymcs+=s z+B5EV?Ux75spOO1hWZrR%M?}}Tvu;EVam?vyeJj+I-F|j< zr@opf$&dzlbw1*u>TB8tg3_vLA1eILi^FY2n(C1^)eA9SJp>xx{95x%DzxsH+XWSz z91+!1)g%VSsC8_{C;B=te1$bz&Sl3ON||F)Kh+q`lE zpn$~zs)W9`<(jDidO7=vW1a*d&sb&EwVHWr^js`akiM(Ht#u=3 zB+lshkA?t@ePHbiH=W{%{&6oQ^CqAwW7x)gnGym}Vzgh^)WQuSK-BOt+CKzcD{OIO zsmnWiv)U<%hF#anDji|<2S0IN*F-8!z=rR&IOwgVAqL%3khc(dTXusG5^M3#lbN%; z4tSGmX596uhwb0Kkz@tDmx{638N8YJoA!^he*4TnB*3B^h1AcXHwX;&UsZzeJkiw_ zYqf3VdH;z2>-(D!6p`L|t>mNf?T`whFizB_tAfyETCb?9c{`ca>CZ%i$MA1Qjyqai zHx8x7_5kw~krT~Lx(x&>NvcixQIe&WxZ6rhiVCrrU$gw}RW9tq`1Gyg`^PtdZ|yE2 zSV6UNWmIU!Rh!nn=x5g8ooW`+3@7>q(l;ZymzOd<@&j8J9~t}v4iuE=5q?bDub_xCbQ>^YBtwg|kH%X~~MSV5hqc(vN##&^bj5S8q~pMl;`ZlWBT z`y-43XArgj3enIJX>|de#itd|K9yNimboSz!66JrE98Bq`fQC?>sO^!Cd1K;iqdit z0(EgW+o`M=rmJ{b?@FySzyK+yrctx(7#Z{-Aq0DPyQ-ZG-67aBv3TbVn^U%Y@#_w% z2!5mW05R?2U0X8YGddDgtQk${7SJ#D-vPP+6pTPJUf$}#`9UGDf_E=>egg&!jNQGW z8zUHmqM0T_(Sl2D0NzItb)g%WzP^RA1Lfq2u8$tH93HQ>8{>Zw{MWc+5cQ}iBJGN} z>V>g2B3<7-7^KJ^vP${ODdjY!-8;TCF33Lnl_O3!=aJ?w@dDLH|ByHyEQJ0Hy?%A8 zZhdJv^ytQ+<;+-}7QB)H##_Lc2Eh(Jyw%T_elL z*S)OExs2W3H&}MK&P3+ka$MnF=$kE`&i2W61@sc7dScHzfpn*mZ7B`=gznkphhQW5 zUqbiQ$r;wy9d2cp^ZT_%3r-VL0QS*~#A8ql*;S^C>d;k79zkS)n1G-O-R*e^Otw(D`LUJ0b)#g}KZ<0JRAXDB{9JxFRnzgoFo$t-r8xDMbY!X40a1Kd=Cd&X;z zgL|hygPx6#z?+Sq7g<%;kTyLY54?_x{h{iX^a0~$bA=z7=H zaG6zKAPttBy3W*T@fsipt@iCgk#{mdA2$b&kwP?|k9#;8C3=_qdFrXe>tkBWBV4qT^+!GO07#0GX!)Sei*+bRw{V(fygHiH-}LpOX1IMjAXImf?n#@0|i~ zt%NnlNzwrnl5IIKyBff@S7jpX8qQ=t6R)h^uVNMWTt0AznWBa-Zh(X*6_{T*P`lkd zEGn|yvPkbcq2xvVtn6;$Nn<*e|2J(yRzZ@bzLK|1V7Lkukq>+GYz9idvP7xY>Ex(j zrU<=b^VXxB--L$Y)MoXQ_4BVS{z|k|+h*s?5yw?@legeu2DOdw1hSM7mt>Y1MykAi z6Dp>IxVf#+0nY`QeiW&SVjc<5d~qro>xo^8rzxPK8}tuy5)<8&g*hUelfLJ~46?q| zsR0ieY}@+UiC~?>mHmPINe9TB+7x`3Wc4ZdU!!`*iyZK1ykfcT)stkYq5as<;1`kc z^2Ze)Y!F3v9M6VYp^XUV63cpN-rP3T&{OhmW9MOS{8q13n}cqhzjx$43Y z%0!5#-2-7m&1S`_9T8Z$hNNAlK7%YL*V6K@zsqLSn`4Bs9fYj2NqS+`5aSivE=f{1 z;Yxz;PvotP(&tx>=AZwl;iJBuCKwe>{Vk+8$#@+D$z@G)Oi&m3`%}8^N7oH5a^BLa zkUF!br#^XO`dOu+U5~|8n6_JID+&pj7yY8{o}Bo2>XBZLQ#Zyp-d<1OcV%wbhqbwq zzEY+QBn*_hp=ea?5wtt1H)J0N{ZtJi9)PwAJ~#x(M; zR@%F_{aPanP5YAyQME4lO6#v3je6P6KOS8f+B*O{(dIuSQHc_LtZv_fzMmSo{aHtpMr? zwlmAQU_0%@r;gO{yU2y-oUx6+@=etj_bgVZk`0V4RHO;+*waB`j>Df+o#X;Fr;kU|rYnEeZ>KZGoF38tqE3I-rcp4c(8!6z6BEyn6`vRf}^tmes} zzMhd!VJ7D)#qji(Sf6j)3UmZJc4d@Q6FRpK;Ngl-YvvH-1j?URBy}N5KbN_ulvTmP z#(Y*zg=Vq55?(gXE8637tpzh{z}N-!OnQtXZY1whgN3rTR6vSFtbUW;)8DbR!!}X9 zy>-MA@n0)AJ(HM@&z5;fdDxeEXNqH=LQgh|fE{bn?Xt95v{#k7lM~Wr)-cy@js`PZ zP48W=D%vk7v0k!SD1Vgn+-R6x@urUjn3u}uQT1%TQLFBcN#K0`%^s8uv<-?stRf8Ap&G);njEDQI^QR{%*7ZrB@yUJ=>!vZs0Oj7J7RSslj-9{r_j6izr)qIU zbC|7*$7<-5s)e1vCbr0ATfYwuic73gSCzkv_ttVLgNj0KZ`+)9BM2Z>E*J&$SsNks zUPD;lEVFzk(~o4Nr~`vhpRgrs5}ii${MvI@8lCSjwd=6l*pB%+@olb5OJ8JI9^94PaIy0irgSGLQQs!605w+}Zg%3pYq zpUg8hOZ#8Trb?y`>00#!Izm*+Ky9ri7Ajca&Rl-|vKZgbS}f9WEx@Rk=4Y~K-IU*m zf<|qm?xgs=+WN+qpBEN1YSq|iVt_E{#8;yAoV|a9d+4oiYZz8VnPvItiyUOyln-8v z_`mae_=7_#KK#eDw|{}i!$1f>Qz24cYBLZ>uUD^FBqm-at0)2hHHFyr;C$%Q<`B^r zvnS?hQ${Y1!QQ@a52Nn~sK2J4#>r5E zKiiW^7@9KMa*OcEdZpY0PUUlc*YMW4D zTp~W3yOv^wTmCLlq&!K&7YvzC=0qJlN;AaVt5LoDc3hJf+MpUWx3ZbF(jv=!+rqu` zZ6{Bgm(eO^$VBB3i{xi3ipY*f5i*s=khwlFW6b0%+zX%4%a_%XSF{jfOOydl{z zyTC8YEN0HV>GjEf2F)3^G{+1Cc5~Ab1I7_?#79NDo*weOr}!irdTyNoRF-x z+p(`Iq(5uq^3Y)vujC(=T?>}QH&)4xIY&SAeq&q^6%(7IG_S8b?q=5N-Wgroc$P;6 z8<9uU>zCjPUUJ_c3RbczKXws_&V|cvOezZ(&b!Qt^nHSVY7dz3@TjVjGfY`r6DXJff^H>g z>~`4FBOCOTblYbNk81YCs(6~x_Vs=*Y(ZwM%og{2*0XCLkG`v2G}dpIGMUUWIxVg9SNRIq)U^zBE*|r+~u8o_FkbY$(jyAYp23Ffi z(VZipAVfgvTz(C8my8NP&?_%c)Ak?fmLlEW2qw!h(|7#Vyj!89nc)~6^U-wLgJqDr zm=2=W?F)&KWYNvv9;R|%Ci4|s+7f{eHc<97`v(0jsRKV)k0@7wXST7q2$t?Et4|x4-7r-7;^{lMTGO z6!oH;*+S2|y&5{gfg0O?(06=v@o<)3b(fucC~g{ssSmwxtNKQthIF1$NTNAxC8}Bhr+Ts~uAu=J6YEa-_8ODngOArG|h{Pg} zlJ2w4k{7V0fK4K!&{!~F;Pv@4w}+0eiy_Av}zl9a)ZRxIeyHa^Jz&g6+Wi> z0CcE1(O7g^A$+hbr*in)?u4V}#3^4V79z*-Ts!93zBE0^)xl z)65>f!5bFVr8V17_uuG)axVG+Zw8iqDJ5s`QhKoMkpDH(nK%hoe^7hkUGUC%GizKX zq<6@Wcvms4;r-aa);cr$oE&5QfsS3(TelOnXH#sHhO#n!cBWd?M27wTQ_N)KkHVWA zeP(7yef|0`k(gjLBddg(?hv2CYvN?`xA-Cf`H;>KX9Z^Avt3u0dIUB&QpmSgNsL&X ziv&!o0Hbr-&B}5f-#gHC^&GYINT;6Qp(5+n(X@(ZP%rfdZ!^JpcpO-9uJoK#`@~nY z%BOBvQdO3b$4MKCwJ)Qgn&zW%BWYNEYGdEnsB(DkDfetP9z)Jzd^NUplK`0UyWFGS zoU#68578DS%sP}2T)+RUz#E!plwMf%`K?w}=5Ar|;TvwEeHzL!n@QKOzm}imgcB^% zOjzofUZq}Rc+Dxq)im8rahLUaeL!;c--4QYr$h%)l-k6%N{@X-_OdOZL*G}%KIeR) z()(T~f`G`q_du0C%C^q-8d{EJ3~8eL=9$l)dT1-EJuDJycsX#bF4hr3jh?6z57Us9_kFk)N7Y z;5GHoK$qafTTbv3pmlf&n#+o|W2@Y5QFv{Rw%)$LccMI~10iC_JKx_>y1~y0G8<9X zcr2;8%J!dXd?)+3OUL>qs@vkb!$QyiNI|1CrkHPF%^=yUG?EGd!z)IIgDiASpLZ;a zlT()KU^$LssnGrQZ2z?LQ5fK~JUotsuKb+VM8}N}? zoI^@azAq6Az@<1BrFWXCYfcjI@|7E1&s*Ff#!ehj)~rmH`qbzz_@I`}Eye7^iB8I= z-E5P&)`xVN_hiALvI8eh#;}aobyU!)b;nB`Ns}OlEl33W%N1$?)#wWgUIgr~STlSk z*p`QX1q9AZsCCcm|GVz=M}xi!eaLYX9<7pk`dg-RZy$9zplWy94@nR*j??lMm*CQ< zbK~G3JBE!Pv)GyDnU9>{sIaTY458dGKFRitq#E7L^$g6js6lt)pa8IXDMfcPm!IJI zU6J2k)Cq{wd`Dtv?O5$z;KVU;EA*B2^=(5iiPLOuuBWEP@UVN~7#upQm=MUQ?6;Bf z8IU<4MH49gO^Pf&X7$UItxadmjDckUqO06Z~s722{m?v76c)kgR!&OeQtEjQH`!d}IZ=So9 z-qbFo5z;RyGWw?PQdtOZb_5{nw(}kH8tW4{?e3j zBRj>%B|5UX_w>Wrugimylc3Rk=H>3{!}m8)14XgLbGqO=*| ziL$AP?~_VZPlfC72UytgBiR{-DTzcw&q zx~a1u7N)xeTmLWu`rOcR1CaVwAhoI{xua)5yn6a+!ZEQHZ?a$0&^ADIpHxUF$NQ+IWNPJiv|+eaG>@mlc-3n&YOEyOPgz z9)Eh{0L;dPM~_Fi)vn#Bh7hxBTFg=DKpbn-_Z}~7Gsr~DwC9HQd+-llbk0`@Pm4Ox zPIlXjHl5WTzk!$57dPO(`#Vm>P(-JtIkIWm*>@8Q6QUcilUoo6QpUrb-F1~jwY_}js!n)LjJOg(UyVS0xuIO20ry>IFHMpqTS(!pZ$ZJv8q;3u{HzMeW-!B%D1PSR1)SIZ8u66Nw*BpI+0 zfPYfiq*$4Iv(!7@Re7|s^0Ny;Q<$H7zJFxh(eG)9l#X2F$6roU_oeM73moQgzqACi zy*zixpZU(kp6)rIDQud$dyRRxfl1rvMxxN?#I958bLVKz)9{+v$1~39$8~?W$CuM5 zD=yo8@x^mpBPZj^G@fhw9ro09Oz~S*qDY4{269nlvo~#c;WPe7@_3^@eFNY{%HYQ_en5?Eyvx!G5hXT!is>iR_ia7WeA}TkBKxBzxLca+ z)Yq3*S(7!^lC$mhZ?b$5=DWe1FfWm>KNq;_L9{lL(NsC>_+ZXrgrV@{etLd9***tm z{ZSpkyS0ig|QS%_bok^MgvnHy1`n0iTmGz7I zLk{u%>*HP5ovJPhgKvT%k(p~5$lTvNkI~sOX9t+!zr_86l5`9Wuz^_r(6rYOQ0czV zaB9l~DBt?r2|8z$1z^lU$ecLg`~t6p^Q{_aUn%CzgZzd)l)rl(Ol!URl)D3r`Uk|v zxj3I-I?tsE!(SLGmxp`YZUEh-9Pd#|wEceKpn%a(xh3CaO7Zp9w^48G?d`on{a2^l zKj?<0eo=_!7rpmVTMi(46?X6+K|861q-Pi*L!XF@H0-WQ?)s&GB3NxxMJ(!iBDRS_ zwpx7~X_xkVVL)9M_GPiy$nBCOK%y?Er_PqBy&)$zp0n!8fdZlv1SX_=snHp)ap=mG zH0=)%9WU>)-E?O6*IB>)3;Pt1{$9?9eq~}KkUE$@F>TQxG*FcK&sjbxki}V3*{`Z4 zC}Prt*f%tHoKKF&FdTP&nvH4`k3!?XBmbg}%eYSpRh5|wUL?JL&r<@5uKXav&w;i_ ze+h?kE$RV}G_peW2uJ{lpkZW~ZC%b@+MLuGd=mgMgb3a|slDxXe(Wh=qhH$3yqTEg zfH?{kf{QrA^z)@`-p&vQfti!R=9^r3*2Zogyi=a7Ve*?N&ldLG8P8mtZ+X&e5I)yN z_^^fV%x}&K{kbpqNpSoE?MmZoi^&HyzG%w*rD)L+$}1%MxCVc?rMzhWB7ZQ%;7UaJ zN6j^?*Rf{Y1;BNkR)kKPcTw9zmje?pKR~!xo3nn+%ki!V;jD6#gAy-7j@o}iR~sJB zzDKq;4S30D4=D2jUsJv*oWcd{1PAC~+uu?Y75Q2vHMnN8q zY%el{pnEAAQCC){B-=;=YrlxgxCC_b>DbMs3AllRulnnrvfIax>5fWG@bE8jzsYzp zOFJMn%!G2w5BFz}#_1T|aHBSfB-eR{7<5E!$z{~EkY@U@ydz;q0XF?;2x9I|7df(; z!I*og&EUqDQA2nq!;0Fx#KY2#Y-Uvqlqd$^ou!gyPDa;}(J;%r(!4??gQ!9M;S9TF z>jpn0%{}MSQ@UeH?zI~JS{OUwx-6OF^bj}qXtuDO@WFI9Te1yW!W}FcQ=tsMvPq5% zJAq@wI{Go{zO)ebLT$bM)HV6ndS1d5-o#I@ba_;7`X5tto4i0#`Gj1PJ^Y7e5WHUm3(hNmWH5p2T9Q1>h=O+ z>91qrU&c#{TjkynSb@}z6#@gPgT+C2Nhaz|*e+(LH-%-EbtB(84|sEJRWH!t3MuSl zR!^hK0!f5xm19Q%nZyNeY1?=4?D6wurDsaT^@IVjQn&PZmh9RP%-cp=2*H-GM&~wT zn}75CgEqnLen>tWUUDR<|G;6zrCiA3kp5<%{&bZxQd27*4f-b3|rtu)dZuL>fm51vp z#5eA^wmz_>*`r($WO2+%J zTb0e*^NQ?4<0aw!Y&J*qz2~?jNUXV@rCV?ei@c@!P0i}F;dzw0+jHPb@qt8xzmRKl zV&O+yXLXQathBh`EEMq|HmG=_E!_3E8zyZ3?(9v{H(S)f96jQToW-M?U#UX9~cXW8Dh@ zC%DW~q+Zj#&r1COxhqTc2sl9{s#9JN?MmNM>68aFpN2POH!RXdBH5_h-Dj>fgDsMz z+TsUQ2il;guV-^eb)N5=BlQP?5A=E$fq8H1)o^CL8aHsDxU+q$pC|NOR&^;6O*CId zi|xNOCygCre^ey@x0N(TzeoQ6C{O+{9{#gDhd=xKKl{t~d-P9J{3(V%&Gx@*fbaL@ Fe*ra3AesOG literal 0 HcmV?d00001 diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 2de0d54a7..eda790be4 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -4,6 +4,7 @@ include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] include::introduction.asciidoc[] +include::getting-started.asciidoc[] include::changelog.asciidoc[] include::installation.asciidoc[] include::connecting.asciidoc[] diff --git a/docs/introduction.asciidoc b/docs/introduction.asciidoc index 83885d09e..e6b5963e0 100644 --- a/docs/introduction.asciidoc +++ b/docs/introduction.asciidoc @@ -17,66 +17,6 @@ about the features of the client. * TypeScript support out of the box. -[discrete] -=== Quick start - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) - -async function run () { - // Let's start by indexing some data - await client.index({ - index: 'game-of-thrones', - document: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - await client.index({ - index: 'game-of-thrones', - document: { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - } - }) - - await client.index({ - index: 'game-of-thrones', - document: { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - }) - - // here we are forcing an index refresh, otherwise we will not - // get any result in the consequent search - await client.indices.refresh({ index: 'game-of-thrones' }) - - // Let's search! - const result= await client.search({ - index: 'game-of-thrones', - query: { - match: { quote: 'winter' } - } - }) - - console.log(result.hits.hits) -} - -run().catch(console.log) ----- - -TIP: For an elaborate example of how to ingest data into Elastic Cloud, -refer to {cloud}/ec-getting-started-node-js.html[this page]. - [discrete] ==== Install multiple versions From 53d3b4a83b0294125ad13e06acdfe97eafe9f0ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 5 Jul 2023 19:57:31 +0200 Subject: [PATCH 227/647] [DOCS] Changes getting started button link on landing page (#1933) --- docs/index-custom-title-page.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index-custom-title-page.html b/docs/index-custom-title-page.html index b934f8e0a..804b36782 100644 --- a/docs/index-custom-title-page.html +++ b/docs/index-custom-title-page.html @@ -63,7 +63,7 @@

    Documentation

    The official Node.js client provides one-to-one mapping with Elasticsearch REST APIs.

    From 2375059aa36281a5b7b004e3d0838a96ddf04238 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 6 Jul 2023 12:28:43 -0500 Subject: [PATCH 228/647] Include body parameters in generated API reference docs (#1936) --- docs/reference.asciidoc | 841 ++++++++++++++++++------------------ src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 3 files changed, 426 insertions(+), 419 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 528ff0b63..d90f570b0 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -39,6 +39,7 @@ client.bulk({ ... }) * *Request (object):* ** *`index` (Optional, string)*: Default index for items which don't provide one +** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* ** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. ** *`routing` (Optional, string)*: Specific routing value @@ -93,6 +94,7 @@ client.count({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of indices to restrict the results +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`analyzer` (Optional, string)*: The analyzer to use for the query string ** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) @@ -107,7 +109,6 @@ client.count({ ... }) ** *`routing` (Optional, string)*: A list of specific routing values ** *`terminate_after` (Optional, number)*: The maximum count for each shard, upon reaching which the query execution will terminate early ** *`q` (Optional, string)*: Query in the Lucene query string syntax -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* [discrete] === create @@ -126,6 +127,7 @@ client.create({ id, index }) * *Request (object):* ** *`id` (string)*: Document ID ** *`index` (string)*: The name of the index +** *`document` (Optional, object)*: A document. ** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. ** *`routing` (Optional, string)*: Specific routing value @@ -172,6 +174,9 @@ client.deleteByQuery({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`max_docs` (Optional, number)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`slice` (Optional, { field, id, max })* ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`analyzer` (Optional, string)*: The analyzer to use for the query string ** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) @@ -182,7 +187,6 @@ client.deleteByQuery({ index }) ** *`from` (Optional, number)*: Starting offset (default: 0) ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) ** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`max_docs` (Optional, number)*: Maximum number of documents to process (default: all documents) ** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) ** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? ** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to index level setting @@ -201,8 +205,6 @@ client.deleteByQuery({ index }) ** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) ** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the delete by query is complete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`slice` (Optional, { field, id, max })* [discrete] === delete_by_query_rethrottle @@ -303,6 +305,7 @@ client.explain({ id, index }) * *Request (object):* ** *`id` (string)*: The document ID ** *`index` (string)*: The name of the index +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`analyzer` (Optional, string)*: The analyzer for the query string query ** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false) ** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) @@ -315,7 +318,6 @@ client.explain({ id, index }) ** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field ** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response ** *`q` (Optional, string)*: Query in the Lucene query string syntax -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* [discrete] === field_caps @@ -331,18 +333,18 @@ client.fieldCaps({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. +** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. +These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. ** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. ** *`filters` (Optional, string)*: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent ** *`types` (Optional, string[])*: Only return results for fields that have one of the types in the list -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. -** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. -These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. [discrete] === get @@ -465,6 +467,7 @@ client.index({ index }) * *Request (object):* ** *`index` (string)*: The name of the index ** *`id` (Optional, string)*: Document ID +** *`document` (Optional, object)*: A document. ** *`if_primary_term` (Optional, number)*: only perform the index operation if the last operation that has changed the document has the specified primary term ** *`if_seq_no` (Optional, number)*: only perform the index operation if the last operation that has changed the document has the specified sequence number ** *`op_type` (Optional, Enum("index" | "create"))*: Explicit operation type. Defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID @@ -503,7 +506,6 @@ client.knnSearch({ index, knn }) ** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices ** *`knn` ({ field, query_vector, k, num_candidates })*: kNN query to execute -** *`routing` (Optional, string)*: A list of specific routing values ** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. ** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns @@ -517,6 +519,7 @@ in the hits.fields property of the response. Accepts wildcard (*) patterns. ** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. +** *`routing` (Optional, string)*: A list of specific routing values [discrete] === mget @@ -532,6 +535,8 @@ client.mget({ ... }) * *Request (object):* ** *`index` (Optional, string)*: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. +** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. ** *`refresh` (Optional, boolean)*: If `true`, the request refreshes relevant shards before retrieving documents. @@ -543,8 +548,6 @@ You can also use this parameter to exclude fields from the subset specified in ` If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. ** *`stored_fields` (Optional, string | string[])*: If `true`, retrieves the document fields stored in the index rather than the document `_source`. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. -** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. [discrete] === msearch @@ -560,6 +563,7 @@ client.msearch({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases to search. +** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. @@ -587,6 +591,7 @@ client.msearchTemplate({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of index names to use as default +** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* ** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution ** *`max_concurrent_searches` (Optional, number)*: Controls the maximum number of concurrent searches the multi search api will execute ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type @@ -607,7 +612,8 @@ client.mtermvectors({ ... }) * *Request (object):* ** *`index` (Optional, string)*: The index in which the document resides. -** *`ids` (Optional, string[])*: A list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])* +** *`ids` (Optional, string[])* ** *`fields` (Optional, string | string[])*: A list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". ** *`field_statistics` (Optional, boolean)*: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". ** *`offsets` (Optional, boolean)*: Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". @@ -619,7 +625,6 @@ client.mtermvectors({ ... }) ** *`term_statistics` (Optional, boolean)*: Specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". ** *`version` (Optional, number)*: Explicit version number for concurrency control ** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])* [discrete] === open_point_in_time @@ -686,11 +691,11 @@ client.rankEval({ requests }) ** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. ** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. ** *`search_type` (Optional, string)*: Search operation type -** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. [discrete] === reindex @@ -709,6 +714,10 @@ client.reindex({ dest, source }) * *Request (object):* ** *`dest` ({ index, op_type, pipeline, routing, version_type })* ** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })* +** *`conflicts` (Optional, Enum("abort" | "proceed"))* +** *`max_docs` (Optional, number)* +** *`script` (Optional, { lang, options, source } | { id })* +** *`size` (Optional, number)* ** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? ** *`requests_per_second` (Optional, float)*: The throttle to set on this request in sub-requests per second. -1 means no throttle. ** *`scroll` (Optional, string | -1 | 0)*: Control how long to keep the search context alive @@ -717,10 +726,6 @@ client.reindex({ dest, source }) ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) ** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the reindex is complete. ** *`require_alias` (Optional, boolean)* -** *`conflicts` (Optional, Enum("abort" | "proceed"))* -** *`max_docs` (Optional, number)* -** *`script` (Optional, { lang, options, source } | { id })* -** *`size` (Optional, number)* [discrete] === reindex_rethrottle @@ -804,6 +809,62 @@ client.search({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +** *`highlight` (Optional, { encoder, fields })* +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. +** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +** *`slice` (Optional, { field, id, max })* +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. +** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. +** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout ** *`analyzer` (Optional, string)*: The analyzer to use for the query string @@ -812,9 +873,7 @@ client.search({ ... }) ** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution ** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) ** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`docvalue_fields` (Optional, string | string[])*: A list of fields to return as the docvalue representation of a field for each hit ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`explain` (Optional, boolean)*: Specify whether to return detailed information about score computation as part of a hit ** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) ** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored @@ -826,50 +885,15 @@ client.search({ ... }) ** *`routing` (Optional, string)*: A list of specific routing values ** *`scroll` (Optional, string | -1 | 0)*: Specify how long a consistent view of the index should be maintained for scrolled search ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type -** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit ** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. ** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode ** *`suggest_size` (Optional, number)*: How many suggestions to return in response ** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`track_total_hits` (Optional, boolean | number)*: Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. -** *`track_scores` (Optional, boolean)*: Whether to calculate and return scores even if they are not used for sorting ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return ** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field ** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`seq_no_primary_term` (Optional, boolean)*: Specify whether to return sequence number and primary term of the last modification of each hit ** *`q` (Optional, string)*: Query in the Lucene query string syntax -** *`size` (Optional, number)*: Number of hits to return (default: 10) -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`sort` (Optional, string | string[])*: A list of : pairs -** *`aggregations` (Optional, Record)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* -** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`highlight` (Optional, { encoder, fields })* -** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. -** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* -** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* -** *`slice` (Optional, { field, id, max })* -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. [discrete] === search_mvt @@ -889,24 +913,6 @@ client.searchMvt({ index, field, zoom, x, y }) ** *`zoom` (number)*: Zoom level for the vector tile to search ** *`x` (number)*: X coordinate for the vector tile to search ** *`y` (number)*: Y coordinate for the vector tile to search -** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. -If true, the meta layer’s feature is a bounding box resulting from a -geo_bounds aggregation. The aggregation runs on values that intersect -the // tile with wrap_longitude set to false. The resulting -bounding box may be larger than the vector tile. -** *`extent` (Optional, number)*: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. -** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: Aggregation used to create a grid for `field`. -** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if is 7 -and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results -don’t include the aggs layer. -** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, -each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon -of the cells bounding box. If 'point' each feature is a Point that is the centroid -of the cell. -** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. -If 0, results don’t include the hits layer. -** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing -suggested label positions for the original features. ** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. Supports the following aggregation types: @@ -917,18 +923,36 @@ Supports the following aggregation types: - sum ** *`buffer` (Optional, number)*: Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. +If true, the meta layer’s feature is a bounding box resulting from a +geo_bounds aggregation. The aggregation runs on values that intersect +the // tile with wrap_longitude set to false. The resulting +bounding box may be larger than the vector tile. +** *`extent` (Optional, number)*: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. ** *`fields` (Optional, string | string[])*: Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: Aggregation used to create a grid for the `field`. +** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if is 7 +and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results +don’t include the aggs layer. +** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, +each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon +of the cells bounding box. If 'point' each feature is a Point that is the centroid +of the cell. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. +If 0, results don’t include the hits layer. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. ** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing +suggested label positions for the original features. [discrete] === search_shards @@ -966,26 +990,26 @@ client.searchTemplate({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (*). +** *`explain` (Optional, boolean)* +** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, +this parameter is required. +** *`params` (Optional, Record)* +** *`profile` (Optional, boolean)* +** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's +request body. Also supports Mustache variables. If no id is specified, this +parameter is required. ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`explain` (Optional, boolean)*: Specify whether to return detailed information about score computation as part of a hit ** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) ** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`profile` (Optional, boolean)*: Specify whether to profile the query execution ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. ** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are rendered as an integer in the response. ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, -this parameter is required. -** *`params` (Optional, Record)* -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's -request body. Also supports Mustache variables. If no id is specified, this -parameter is required. [discrete] === terms_enum @@ -1024,6 +1048,9 @@ client.termvectors({ index }) * *Request (object):* ** *`index` (string)*: The index in which the document resides. ** *`id` (Optional, string)*: The id of the document, when not specified a doc param should be supplied. +** *`doc` (Optional, object)*: A document. +** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })* +** *`per_field_analyzer` (Optional, Record)* ** *`fields` (Optional, string | string[])*: A list of fields to return. ** *`field_statistics` (Optional, boolean)*: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. ** *`offsets` (Optional, boolean)*: Specifies if term offsets should be returned. @@ -1035,9 +1062,6 @@ client.termvectors({ index }) ** *`term_statistics` (Optional, boolean)*: Specifies if total term frequency and document frequency should be returned. ** *`version` (Optional, number)*: Explicit version number for concurrency control ** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type -** *`doc` (Optional, document object)* -** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })* -** *`per_field_analyzer` (Optional, Record)* [discrete] === update @@ -1054,6 +1078,16 @@ client.update({ id, index }) * *Request (object):* ** *`id` (string)*: Document ID ** *`index` (string)*: The name of the index +** *`detect_noop` (Optional, boolean)*: Set to false to disable setting 'result' in the response +to 'noop' if no change to the document occurred. +** *`doc` (Optional, object)*: A partial update to an existing document. +** *`doc_as_upsert` (Optional, boolean)*: Set to true to use the contents of 'doc' as the value of 'upsert' +** *`script` (Optional, { lang, options, source } | { id })*: Script to execute to update the document. +** *`scripted_upsert` (Optional, boolean)*: Set to true to execute the script whether or not the document exists. +** *`_source` (Optional, boolean | { excludes, includes })*: Set to false to disable source retrieval. You can also specify a comma-separated +list of the fields you want to retrieve. +** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a +new document. If the document exists, the 'script' is executed. ** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. ** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. ** *`lang` (Optional, string)*: The script language. @@ -1069,18 +1103,8 @@ The actual wait time could be longer, particularly when multiple waits occur. ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operations. Set to 'all' or any positive integer up to the total number of shards in the index (number_of_replicas+1). Defaults to 1 meaning the primary shard. -** *`_source` (Optional, boolean | string | string[])*: Set to false to disable source retrieval. You can also specify a comma-separated -list of the fields you want to retrieve. ** *`_source_excludes` (Optional, string | string[])*: Specify the source fields you want to exclude. ** *`_source_includes` (Optional, string | string[])*: Specify the source fields you want to retrieve. -** *`detect_noop` (Optional, boolean)*: Set to false to disable setting 'result' in the response -to 'noop' if no change to the document occurred. -** *`doc` (Optional, partial document object)*: A partial update to an existing document. -** *`doc_as_upsert` (Optional, boolean)*: Set to true to use the contents of 'doc' as the value of 'upsert' -** *`script` (Optional, { lang, options, source } | { id })*: Script to execute to update the document. -** *`scripted_upsert` (Optional, boolean)*: Set to true to execute the script whether or not the document exists. -** *`upsert` (Optional, document object)*: If the document does not already exist, the contents of 'upsert' are inserted as a -new document. If the document exists, the 'script' is executed. [discrete] === update_by_query @@ -1097,17 +1121,20 @@ client.updateByQuery({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`max_docs` (Optional, number)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`script` (Optional, { lang, options, source } | { id })* +** *`slice` (Optional, { field, id, max })* +** *`conflicts` (Optional, Enum("abort" | "proceed"))* ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`analyzer` (Optional, string)*: The analyzer to use for the query string ** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do when the update by query hits version conflicts? ** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) ** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. ** *`from` (Optional, number)*: Starting offset (default: 0) ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) ** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`max_docs` (Optional, number)*: Maximum number of documents to process (default: all documents) ** *`pipeline` (Optional, string)*: Ingest pipeline to set on index requests made by this action. (default: none) ** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) ** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? @@ -1127,9 +1154,6 @@ client.updateByQuery({ index }) ** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) ** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the update by query operation is complete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`script` (Optional, { lang, options, source } | { id })* -** *`slice` (Optional, { field, id, max })* [discrete] === update_by_query_rethrottle @@ -1221,6 +1245,61 @@ client.asyncSearch.submit({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +** *`highlight` (Optional, { encoder, fields })* +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +** *`slice` (Optional, { field, id, max })* +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. +** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. +** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. ** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. @@ -1235,9 +1314,7 @@ A partial reduction is performed every time the coordinating node has received a ** *`ccs_minimize_roundtrips` (Optional, boolean)*: The default value is the only supported value. ** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) ** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`docvalue_fields` (Optional, string | string[])*: A list of fields to return as the docvalue representation of a field for each hit ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`explain` (Optional, boolean)*: Specify whether to return detailed information about score computation as part of a hit ** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) ** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored @@ -1249,49 +1326,15 @@ A partial reduction is performed every time the coordinating node has received a ** *`routing` (Optional, string)*: A list of specific routing values ** *`scroll` (Optional, string | -1 | 0)* ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type -** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit ** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. ** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode ** *`suggest_size` (Optional, number)*: How many suggestions to return in response ** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`track_total_hits` (Optional, boolean | number)*: Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. -** *`track_scores` (Optional, boolean)*: Whether to calculate and return scores even if they are not used for sorting ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response ** *`rest_total_hits_as_int` (Optional, boolean)* -** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return ** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field ** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`seq_no_primary_term` (Optional, boolean)*: Specify whether to return sequence number and primary term of the last modification of each hit ** *`q` (Optional, string)*: Query in the Lucene query string syntax -** *`size` (Optional, number)*: Number of hits to return (default: 10) -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`sort` (Optional, string | string[])*: A list of : pairs -** *`aggregations` (Optional, Record)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* -** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`highlight` (Optional, { encoder, fields })* -** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* -** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* -** *`slice` (Optional, { field, id, max })* -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. [discrete] === cat @@ -1798,7 +1841,6 @@ client.ccr.follow({ index }) * *Request (object):* ** *`index` (string)*: The name of the follower index -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) ** *`leader_index` (Optional, string)* ** *`max_outstanding_read_requests` (Optional, number)* ** *`max_outstanding_write_requests` (Optional, number)* @@ -1811,6 +1853,7 @@ client.ccr.follow({ index }) ** *`max_write_request_size` (Optional, string)* ** *`read_poll_timeout` (Optional, string | -1 | 0)* ** *`remote_cluster` (Optional, string)* +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) [discrete] ==== follow_info @@ -2028,12 +2071,12 @@ client.cluster.allocationExplain({ ... }) ==== Arguments * *Request (object):* -** *`include_disk_info` (Optional, boolean)*: If true, returns information about disk usage and shard sizes. -** *`include_yes_decisions` (Optional, boolean)*: If true, returns YES decisions in explanation. ** *`current_node` (Optional, string)*: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. ** *`index` (Optional, string)*: Specifies the name of the index that you would like an explanation for. ** *`primary` (Optional, boolean)*: If true, returns explanation for the primary shard for the given shard ID. ** *`shard` (Optional, number)*: Specifies the ID of the shard that you would like an explanation for. +** *`include_disk_info` (Optional, boolean)*: If true, returns information about disk usage and shard sizes. +** *`include_yes_decisions` (Optional, boolean)*: If true, returns YES decisions in explanation. [discrete] ==== delete_component_template @@ -2248,9 +2291,6 @@ Elastic Agent uses these templates to configure backing indices for its data str If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. ** *`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })*: The template to be applied which includes mappings, settings, or aliases configuration. -** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing component templates. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. ** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. @@ -2262,6 +2302,9 @@ To unset a version, replace the template without specifying a version. May have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. +** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing component templates. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_settings @@ -2277,11 +2320,11 @@ client.cluster.putSettings({ ... }) ==== Arguments * *Request (object):* +** *`persistent` (Optional, Record)* +** *`transient` (Optional, Record)* ** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) ** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`persistent` (Optional, Record)* -** *`transient` (Optional, Record)* [discrete] ==== remote_info @@ -2308,13 +2351,13 @@ client.cluster.reroute({ ... }) ==== Arguments * *Request (object):* +** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. ** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation only and returns the resulting state. ** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot be executed. ** *`metric` (Optional, string | string[])*: Limits the information returned to the specified metrics. ** *`retry_failed` (Optional, boolean)*: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. [discrete] ==== state @@ -2561,22 +2604,22 @@ client.eql.search({ index, query }) * *Request (object):* ** *`index` (string | string[])*: The name of the index to scope the operation ** *`query` (string)*: EQL query you wish to run. -** *`allow_no_indices` (Optional, boolean)* -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. -** *`keep_on_completion` (Optional, boolean)*: If true, the search and its results are stored on the cluster. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. ** *`case_sensitive` (Optional, boolean)* ** *`event_category_field` (Optional, string)*: Field containing the event classification, such as process, file, or network. ** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order ** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" ** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. ** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`keep_alive` (Optional, string | -1 | 0)* +** *`keep_on_completion` (Optional, boolean)* +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* ** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 ** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. ** *`result_position` (Optional, Enum("tail" | "head"))* ** *`runtime_mappings` (Optional, Record)* +** *`allow_no_indices` (Optional, boolean)* +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. [discrete] === features @@ -2641,6 +2684,7 @@ client.fleet.msearch({ ... }) * *Request (object):* ** *`index` (Optional, string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. +** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. @@ -2672,6 +2716,60 @@ client.fleet.search({ index }) * *Request (object):* ** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +** *`highlight` (Optional, { encoder, fields })* +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +** *`slice` (Optional, { field, id, max })* +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. +** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. +** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. ** *`allow_no_indices` (Optional, boolean)* ** *`analyzer` (Optional, string)* ** *`analyze_wildcard` (Optional, boolean)* @@ -2679,9 +2777,7 @@ client.fleet.search({ index }) ** *`ccs_minimize_roundtrips` (Optional, boolean)* ** *`default_operator` (Optional, Enum("and" | "or"))* ** *`df` (Optional, string)* -** *`docvalue_fields` (Optional, string | string[])* ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* -** *`explain` (Optional, boolean)* ** *`ignore_throttled` (Optional, boolean)* ** *`ignore_unavailable` (Optional, boolean)* ** *`lenient` (Optional, boolean)* @@ -2693,54 +2789,21 @@ client.fleet.search({ index }) ** *`routing` (Optional, string)* ** *`scroll` (Optional, string | -1 | 0)* ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))* -** *`stats` (Optional, string[])* -** *`stored_fields` (Optional, string | string[])* ** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. ** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))* ** *`suggest_size` (Optional, number)* ** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`terminate_after` (Optional, number)* -** *`timeout` (Optional, string | -1 | 0)* -** *`track_total_hits` (Optional, boolean | number)* -** *`track_scores` (Optional, boolean)* ** *`typed_keys` (Optional, boolean)* ** *`rest_total_hits_as_int` (Optional, boolean)* -** *`version` (Optional, boolean)* -** *`_source` (Optional, boolean | string | string[])* ** *`_source_excludes` (Optional, string | string[])* ** *`_source_includes` (Optional, string | string[])* -** *`seq_no_primary_term` (Optional, boolean)* ** *`q` (Optional, string)* -** *`size` (Optional, number)* -** *`from` (Optional, number)* -** *`sort` (Optional, string | string[])* ** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. ** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. -** *`aggregations` (Optional, Record)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* -** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`highlight` (Optional, { encoder, fields })* -** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* -** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* -** *`slice` (Optional, { field, id, max })* -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. [discrete] === graph @@ -2759,12 +2822,12 @@ client.graph.explore({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`routing` (Optional, string)*: Specific routing value -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout ** *`connections` (Optional, { connections, query, vertices })* ** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })* ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])* +** *`routing` (Optional, string)*: Specific routing value +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] === ilm @@ -2850,10 +2913,10 @@ client.ilm.migrateToDataTiers({ ... }) ==== Arguments * *Request (object):* -** *`dry_run` (Optional, boolean)*: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. -This provides a way to retrieve the indices and ILM policies that need to be migrated. ** *`legacy_template_to_delete` (Optional, string)* ** *`node_attribute` (Optional, string)* +** *`dry_run` (Optional, boolean)*: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. +This provides a way to retrieve the indices and ILM policies that need to be migrated. [discrete] ==== move_to_step @@ -3045,11 +3108,11 @@ client.indices.clone({ index, target }) * *Request (object):* ** *`index` (string)*: The name of the source index to clone ** *`target` (string)*: The name of the target index to clone into +** *`aliases` (Optional, Record)* +** *`settings` (Optional, Record)* ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the cloned index before the operation returns. -** *`aliases` (Optional, Record)* -** *`settings` (Optional, Record)* [discrete] ==== close @@ -3088,15 +3151,15 @@ client.indices.create({ index }) * *Request (object):* ** *`index` (string)*: The name of the index -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for before the operation returns. ** *`aliases` (Optional, Record)* ** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for before the operation returns. [discrete] ==== create_data_stream @@ -3285,6 +3348,7 @@ client.indices.downsample({ index, target_index }) * *Request (object):* ** *`index` (string)*: The index to downsample ** *`target_index` (string)*: The name of the target index to store downsampled data +** *`config` (Optional, { fixed_interval })* [discrete] ==== exists @@ -3738,13 +3802,13 @@ client.indices.putAlias({ index, name }) * *Request (object):* ** *`index` (string | string[])*: A list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. ** *`name` (string)*: The name of the alias to be created or updated -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document ** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`index_routing` (Optional, string)* ** *`is_write_index` (Optional, boolean)* ** *`routing` (Optional, string)* ** *`search_routing` (Optional, string)* +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document [discrete] ==== put_data_lifecycle @@ -3761,10 +3825,10 @@ client.indices.putDataLifecycle({ name }) * *Request (object):* ** *`name` (string | string[])*: A list of data streams whose lifecycle will be updated; use `*` to set the lifecycle to all data streams +** *`data_retention` (Optional, string | -1 | 0)* ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document -** *`data_retention` (Optional, string | -1 | 0)* [discrete] ==== put_index_template @@ -3781,7 +3845,6 @@ client.indices.putIndexTemplate({ name }) * *Request (object):* ** *`name` (string)*: Index or template name -** *`create` (Optional, boolean)*: Whether the index template should only be added if new or can also replace an existing one ** *`index_patterns` (Optional, string | string[])* ** *`composed_of` (Optional, string[])* ** *`template` (Optional, { aliases, mappings, settings, lifecycle })* @@ -3789,6 +3852,7 @@ client.indices.putIndexTemplate({ name }) ** *`priority` (Optional, number)* ** *`version` (Optional, number)* ** *`_meta` (Optional, Record)* +** *`create` (Optional, boolean)*: Whether the index template should only be added if new or can also replace an existing one [discrete] ==== put_mapping @@ -3805,12 +3869,6 @@ client.indices.putMapping({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`write_index_only` (Optional, boolean)*: When true, applies mappings only to the write index of an alias or data stream ** *`date_detection` (Optional, boolean)*: Controls whether dynamic date detection is enabled. ** *`dynamic` (Optional, Enum("strict" | "runtime" | true | false))*: Controls whether new fields are added dynamically. ** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked @@ -3830,6 +3888,12 @@ application-specific metadata. ** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents. ** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index. ** *`runtime` (Optional, Record)*: Mapping of runtime fields for the index. +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`write_index_only` (Optional, boolean)*: When true, applies mappings only to the write index of an alias or data stream [discrete] ==== put_settings @@ -3846,6 +3910,7 @@ client.indices.putSettings({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. ** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) @@ -3869,23 +3934,23 @@ client.indices.putTemplate({ name }) * *Request (object):* ** *`name` (string)*: The name of the template -** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. -** *`flat_settings` (Optional, boolean)* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)* +** *`aliases` (Optional, Record)*: Aliases for the index. +** *`index_patterns` (Optional, string | string[])*: Array of wildcard expressions used to match the names +of indices during creation. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. ** *`order` (Optional, number)*: Order in which Elasticsearch applies this template if index matches multiple templates. Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values. -** *`aliases` (Optional, Record)*: Aliases for the index. -** *`index_patterns` (Optional, string | string[])*: Array of wildcard expressions used to match the names -of indices during creation. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. ** *`settings` (Optional, Record)*: Configuration options for the index. ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. +** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. +** *`flat_settings` (Optional, boolean)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)* [discrete] ==== recovery @@ -3977,14 +4042,14 @@ client.indices.rollover({ alias }) * *Request (object):* ** *`alias` (string)*: The name of the alias to rollover ** *`new_index` (Optional, string)*: The name of the rollover index -** *`dry_run` (Optional, boolean)*: If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the newly created rollover index before the operation returns. ** *`aliases` (Optional, Record)* ** *`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })* ** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })* ** *`settings` (Optional, Record)* +** *`dry_run` (Optional, boolean)*: If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the newly created rollover index before the operation returns. [discrete] ==== segments @@ -4045,11 +4110,11 @@ client.indices.shrink({ index, target }) * *Request (object):* ** *`index` (string)*: The name of the source index to shrink ** *`target` (string)*: The name of the target index to shrink into +** *`aliases` (Optional, Record)* +** *`settings` (Optional, Record)* ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the shrunken index before the operation returns. -** *`aliases` (Optional, Record)* -** *`settings` (Optional, Record)* [discrete] ==== simulate_index_template @@ -4066,14 +4131,6 @@ client.indices.simulateIndexTemplate({ name }) * *Request (object):* ** *`name` (string)*: Index or template name to simulate -** *`create` (Optional, boolean)*: If `true`, the template passed in the body is only used if no existing -templates match the same index patterns. If `false`, the simulation uses -the template with the highest priority. Note that the template is not -permanently added or updated in either case; it is only used for the -simulation. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received -before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. ** *`allow_auto_create` (Optional, boolean)* ** *`index_patterns` (Optional, string | string[])* ** *`composed_of` (Optional, string[])* @@ -4082,6 +4139,14 @@ before the timeout expires, the request fails and returns an error. ** *`priority` (Optional, number)* ** *`version` (Optional, number)* ** *`_meta` (Optional, Record)* +** *`create` (Optional, boolean)*: If `true`, the template passed in the body is only used if no existing +templates match the same index patterns. If `false`, the simulation uses +the template with the highest priority. Note that the template is not +permanently added or updated in either case; it is only used for the +simulation. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received +before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. [discrete] ==== simulate_template @@ -4099,6 +4164,7 @@ client.indices.simulateTemplate({ ... }) * *Request (object):* ** *`name` (Optional, string)*: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. +** *`template` (Optional, { index_patterns, composed_of, template, version, priority, _meta, allow_auto_create, data_stream })* ** *`create` (Optional, boolean)*: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. @@ -4119,11 +4185,11 @@ client.indices.split({ index, target }) * *Request (object):* ** *`index` (string)*: The name of the source index to split ** *`target` (string)*: The name of the target index to split into +** *`aliases` (Optional, Record)* +** *`settings` (Optional, Record)* ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the shrunken index before the operation returns. -** *`aliases` (Optional, Record)* -** *`settings` (Optional, Record)* [discrete] ==== stats @@ -4189,9 +4255,9 @@ client.indices.updateAliases({ ... }) ==== Arguments * *Request (object):* +** *`actions` (Optional, { add_backing_index, remove_backing_index }[])* ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Request timeout -** *`actions` (Optional, { add_backing_index, remove_backing_index }[])* [discrete] ==== validate_query @@ -4208,6 +4274,7 @@ client.indices.validateQuery({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`all_shards` (Optional, boolean)*: Execute validation on all shards instead of one random shard per index ** *`analyzer` (Optional, string)*: The analyzer to use for the query string @@ -4220,7 +4287,6 @@ client.indices.validateQuery({ ... }) ** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored ** *`rewrite` (Optional, boolean)*: Provide a more detailed explanation showing the actual Lucene query that will be executed. ** *`q` (Optional, string)*: Query in the Lucene query string syntax -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* [discrete] === ingest @@ -4297,14 +4363,14 @@ client.ingest.putPipeline({ id }) * *Request (object):* ** *`id` (string)*: ID of the ingest pipeline to create or update. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates ** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. ** *`description` (Optional, string)*: Description of the ingest pipeline. ** *`on_failure` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. ** *`processors` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. ** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates [discrete] ==== simulate @@ -4321,9 +4387,9 @@ client.ingest.simulate({ ... }) * *Request (object):* ** *`id` (Optional, string)*: Pipeline ID -** *`verbose` (Optional, boolean)*: Verbose mode. Display data output for each processor in executed pipeline ** *`docs` (Optional, { _id, _index, _source }[])* ** *`pipeline` (Optional, { description, on_failure, processors, version })* +** *`verbose` (Optional, boolean)*: Verbose mode. Display data output for each processor in executed pipeline [discrete] === license @@ -4392,9 +4458,9 @@ client.license.post({ ... }) ==== Arguments * *Request (object):* -** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. ** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* ** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. +** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. [discrete] ==== post_start_basic @@ -4478,6 +4544,7 @@ client.logstash.putPipeline({ id }) * *Request (object):* ** *`id` (string)*: The ID of the Pipeline +** *`pipeline` (Optional, { description, on_failure, processors, version })* [discrete] === migration @@ -4552,11 +4619,9 @@ client.ml.closeJob({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches. -If `false`, the request returns a 404 status code when there are no matches or only partial matches. -** *`force` (Optional, boolean)*: Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots. -If you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait until a job has closed. +** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. +** *`force` (Optional, boolean)*: Refer to the descriptiion for the `force` query parameter. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. [discrete] ==== delete_calendar @@ -4888,16 +4953,11 @@ client.ml.flushJob({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`advance_time` (Optional, string | Unit)*: Specifies to advance to a particular time value. Results are generated -and the model is updated for data from the specified time interval. -** *`calc_interim` (Optional, boolean)*: If true, calculates the interim results for the most recent bucket or all -buckets within the latency period. -** *`end` (Optional, string | Unit)*: When used in conjunction with `calc_interim` and `start`, specifies the -range of buckets on which to calculate interim results. -** *`skip_time` (Optional, string | Unit)*: Specifies to skip to a particular time value. Results are not generated -and the model is not updated for data from the specified time interval. -** *`start` (Optional, string | Unit)*: When used in conjunction with `calc_interim`, specifies the range of -buckets on which to calculate interim results. +** *`advance_time` (Optional, string | Unit)*: Refer to the description for the `advance_time` query parameter. +** *`calc_interim` (Optional, boolean)*: Refer to the description for the `calc_interim` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`skip_time` (Optional, string | Unit)*: Refer to the description for the `skip_time` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. [discrete] ==== forecast @@ -4915,17 +4975,9 @@ client.ml.forecast({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. -** *`duration` (Optional, string | -1 | 0)*: A period of time that indicates how far into the future to forecast. For -example, `30d` corresponds to 30 days. The forecast starts at the last -record that was processed. -** *`expires_in` (Optional, string | -1 | 0)*: The period of time that forecast results are retained. After a forecast -expires, the results are deleted. If set to a value of 0, the forecast is -never automatically deleted. -** *`max_model_memory` (Optional, string)*: The maximum memory the forecast can use. If the forecast needs to use -more than the provided amount, it will spool to disk. Default is 20mb, -maximum is 500mb and minimum is 1mb. If set to 40% or more of the job’s -configured memory limit, it is automatically reduced to below that -amount. +** *`duration` (Optional, string | -1 | 0)*: Refer to the description for the `duration` query parameter. +** *`expires_in` (Optional, string | -1 | 0)*: Refer to the description for the `expires_in` query parameter. +** *`max_model_memory` (Optional, string)*: Refer to the description for the `max_model_memory` query parameter. [discrete] ==== get_buckets @@ -4944,18 +4996,16 @@ client.ml.getBuckets({ job_id }) ** *`job_id` (string)*: Identifier for the anomaly detection job. ** *`timestamp` (Optional, string | Unit)*: The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. -** *`anomaly_score` (Optional, number)*: Returns buckets with anomaly scores greater or equal than this value. -** *`desc` (Optional, boolean)*: If `true`, the buckets are sorted in descending order. -** *`end` (Optional, string | Unit)*: Returns buckets with timestamps earlier than this time. `-1` means it is -unset and results are not limited to specific timestamps. -** *`exclude_interim` (Optional, boolean)*: If `true`, the output excludes interim results. -** *`expand` (Optional, boolean)*: If true, the output includes anomaly records. +** *`anomaly_score` (Optional, number)*: Refer to the description for the `anomaly_score` query parameter. +** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. +** *`expand` (Optional, boolean)*: Refer to the description for the `expand` query parameter. +** *`page` (Optional, { from, size })* +** *`sort` (Optional, string)*: Refer to the desription for the `sort` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. ** *`from` (Optional, number)*: Skips the specified number of buckets. ** *`size` (Optional, number)*: Specifies the maximum number of buckets to obtain. -** *`sort` (Optional, string)*: Specifies the sort field for the requested buckets. -** *`start` (Optional, string | Unit)*: Returns buckets with timestamps after this time. `-1` means it is unset -and results are not limited to specific timestamps. -** *`page` (Optional, { from, size })* [discrete] ==== get_calendar_events @@ -4993,9 +5043,9 @@ client.ml.getCalendars({ ... }) * *Request (object):* ** *`calendar_id` (Optional, string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +** *`page` (Optional, { from, size })*: This object is supported only when you omit the calendar identifier. ** *`from` (Optional, number)*: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. ** *`size` (Optional, number)*: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. -** *`page` (Optional, { from, size })*: This object is supported only when you omit the calendar identifier. [discrete] ==== get_categories @@ -5017,10 +5067,10 @@ neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. +** *`page` (Optional, { from, size })* ** *`from` (Optional, number)*: Skips the specified number of categories. ** *`partition_field_value` (Optional, string)*: Only return categories for the specified partition. ** *`size` (Optional, number)*: Specifies the maximum number of categories to obtain. -** *`page` (Optional, { from, size })* [discrete] ==== get_data_frame_analytics @@ -5180,6 +5230,7 @@ client.ml.getInfluencers({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`page` (Optional, { from, size })* ** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. ** *`end` (Optional, string | Unit)*: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to @@ -5194,7 +5245,6 @@ value. influencers are sorted by the `influencer_score` value. ** *`start` (Optional, string | Unit)*: Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. -** *`page` (Optional, { from, size })* [discrete] ==== get_job_stats @@ -5325,14 +5375,13 @@ client.ml.getModelSnapshots({ job_id }) ** *`snapshot_id` (Optional, string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. -** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. -** *`end` (Optional, string | Unit)*: Returns snapshots with timestamps earlier than this time. +** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`page` (Optional, { from, size })* +** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. ** *`from` (Optional, number)*: Skips the specified number of snapshots. ** *`size` (Optional, number)*: Specifies the maximum number of snapshots to obtain. -** *`sort` (Optional, string)*: Specifies the sort field for the requested snapshots. By default, the -snapshots are sorted by their timestamp. -** *`start` (Optional, string | Unit)*: Returns snapshots with timestamps after this time. -** *`page` (Optional, { from, size })* [discrete] ==== get_overall_buckets @@ -5354,30 +5403,13 @@ expression. You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no jobs that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -If `true`, the request returns an empty `jobs` array when there are no -matches and the subset of results when there are partial matches. If this -parameter is `false`, the request returns a `404` status code when there -are no matches or only partial matches. -** *`bucket_span` (Optional, string | -1 | 0)*: The span of the overall buckets. Must be greater or equal to the largest -bucket span of the specified anomaly detection jobs, which is the default -value. - -By default, an overall bucket has a span equal to the largest bucket span -of the specified anomaly detection jobs. To override that behavior, use -the optional `bucket_span` parameter. -** *`end` (Optional, string | Unit)*: Returns overall buckets with timestamps earlier than this time. -** *`exclude_interim` (Optional, boolean)*: If `true`, the output excludes interim results. -** *`overall_score` (Optional, number | string)*: Returns overall buckets with overall scores greater than or equal to this -value. -** *`start` (Optional, string | Unit)*: Returns overall buckets with timestamps after this time. -** *`top_n` (Optional, number)*: The number of top anomaly detection job bucket scores to be used in the -`overall_score` calculation. +** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. +** *`bucket_span` (Optional, string | -1 | 0)*: Refer to the description for the `bucket_span` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. +** *`overall_score` (Optional, number | string)*: Refer to the description for the `overall_score` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. +** *`top_n` (Optional, number)*: Refer to the description for the `top_n` query parameter. [discrete] ==== get_records @@ -5394,17 +5426,15 @@ client.ml.getRecords({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. -** *`end` (Optional, string | Unit)*: Returns records with timestamps earlier than this time. The default value -means results are not limited to specific timestamps. -** *`exclude_interim` (Optional, boolean)*: If `true`, the output excludes interim results. +** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. +** *`page` (Optional, { from, size })* +** *`record_score` (Optional, number)*: Refer to the description for the `record_score` query parameter. +** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. ** *`from` (Optional, number)*: Skips the specified number of records. -** *`record_score` (Optional, number)*: Returns records with anomaly scores greater or equal than this value. ** *`size` (Optional, number)*: Specifies the maximum number of records to obtain. -** *`sort` (Optional, string)*: Specifies the sort field for the requested records. -** *`start` (Optional, string | Unit)*: Returns records with timestamps after this time. The default value means -results are not limited to specific timestamps. -** *`page` (Optional, { from, size })* [discrete] ==== get_trained_models @@ -5487,8 +5517,8 @@ client.ml.inferTrainedModel({ model_id, docs }) ** *`docs` (Record[])*: An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. -** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait for inference results. ** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The inference configuration updates to apply on the API call +** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait for inference results. [discrete] ==== info @@ -5516,7 +5546,7 @@ client.ml.openJob({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait until a job has opened. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. [discrete] ==== post_calendar_events @@ -5550,6 +5580,7 @@ client.ml.postData({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. +** *`data` (Optional, TData[])* ** *`reset_end` (Optional, string | Unit)*: Specifies the end of the bucket resetting range. ** *`reset_start` (Optional, string | Unit)*: Specifies the start of the bucket resetting range. @@ -5590,13 +5621,13 @@ client.ml.previewDatafeed({ ... }) alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. -** *`start` (Optional, string | Unit)*: The start time from where the datafeed preview should begin -** *`end` (Optional, string | Unit)*: The end time when the datafeed preview should stop ** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: The datafeed definition to preview. ** *`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })*: The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. +** *`start` (Optional, string | Unit)*: The start time from where the datafeed preview should begin +** *`end` (Optional, string | Unit)*: The end time when the datafeed preview should stop [discrete] ==== put_calendar @@ -5721,12 +5752,6 @@ client.ml.putDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`allow_no_indices` (Optional, boolean)*: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` -string or when no indices are specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If true, unavailable indices (missing or closed) are ignored. ** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. ** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years. @@ -5764,6 +5789,12 @@ The detector configuration objects in a job can contain functions that use these ** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. ** *`headers` (Optional, Record)* +** *`allow_no_indices` (Optional, boolean)*: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` +string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If true, unavailable indices (missing or closed) are ignored. [discrete] ==== put_filter @@ -5822,7 +5853,7 @@ Creates an inference trained model. {ref}/put-trained-models.html[Endpoint documentation] [source,ts] ---- -client.ml.putTrainedModel({ model_id, inference_config }) +client.ml.putTrainedModel({ model_id }) ---- [discrete] @@ -5830,16 +5861,16 @@ client.ml.putTrainedModel({ model_id, inference_config }) * *Request (object):* ** *`model_id` (string)*: The unique identifier of the trained model. -** *`inference_config` ({ regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression -or classification configuration. It must match the underlying -definition.trained_model's target_type. -** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. ** *`compressed_definition` (Optional, string)*: The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. ** *`definition` (Optional, { preprocessors, trained_model })*: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. ** *`description` (Optional, string)*: A human-readable description of the inference trained model. +** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression +or classification configuration. It must match the underlying +definition.trained_model's target_type. For pre-packaged models such as +ELSER the config is not required. ** *`input` (Optional, { field_names })*: The input field names for the model definition. ** *`metadata` (Optional, User-defined value)*: An object map that contains metadata about the model. ** *`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))*: The model type. @@ -5847,6 +5878,7 @@ compressed_definition cannot be specified. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. ** *`tags` (Optional, string[])*: An array of tags to organize the model. +** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. [discrete] ==== put_trained_model_alias @@ -5946,12 +5978,7 @@ client.ml.revertModelSnapshot({ job_id, snapshot_id }) ** *`snapshot_id` (string)*: You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. -** *`delete_intervening_results` (Optional, boolean)*: If true, deletes the results in the time period between the latest -results and the time of the reverted snapshot. It also resets the model -to accept records for this time period. If you choose not to delete -intervening results when reverting a snapshot, the job will not accept -input data that is older than the current time. If you want to resend -data, then delete the intervening results. +** *`delete_intervening_results` (Optional, boolean)*: Refer to the description for the `delete_intervening_results` query parameter. [discrete] ==== set_upgrade_mode @@ -6009,24 +6036,9 @@ client.ml.startDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`end` (Optional, string | Unit)*: The time that the datafeed should end, which can be specified by using one of the following formats: - -* ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z` -* ISO 8601 format without milliseconds, for example `2017-01-22T06:00:00+00:00` -* Milliseconds since the epoch, for example `1485061200000` - -Date-time arguments using either of the ISO 8601 formats must have a time zone designator, where `Z` is accepted -as an abbreviation for UTC time. When a URL is expected (for example, in browsers), the `+` used in time zone -designators must be encoded as `%2B`. -The end time value is exclusive. If you do not specify an end time, the datafeed -runs continuously. -** *`start` (Optional, string | Unit)*: The time that the datafeed should begin, which can be specified by using the same formats as the `end` parameter. -This value is inclusive. -If you do not specify a start time and the datafeed is associated with a new anomaly detection job, the analysis -starts from the earliest time for which data is available. -If you restart a stopped datafeed and specify a start value that is earlier than the timestamp of the latest -processed record, the datafeed continues from 1 millisecond after the timestamp of the latest processed record. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait until a datafeed starts. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. [discrete] ==== start_trained_model_deployment @@ -6112,17 +6124,9 @@ client.ml.stopDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -* Contains wildcard expressions and there are no datafeeds that match. -* Contains the `_all` string or no identifiers and there are no matches. -* Contains wildcard expressions and there are only partial matches. - -If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when -there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only -partial matches. -** *`force` (Optional, boolean)*: If `true`, the datafeed is stopped forcefully. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait until a datafeed stops. +** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. +** *`force` (Optional, boolean)*: Refer to the description for the `force` query parameter. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. [discrete] ==== stop_trained_model_deployment @@ -6193,18 +6197,6 @@ client.ml.updateDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the -`_all` string or when no indices are specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: - -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. ** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. ** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time @@ -6244,6 +6236,18 @@ when there are multiple jobs running on the same node. The detector configuration objects in a job can contain functions that use these script fields. ** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. +** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. [discrete] ==== update_filter @@ -6481,8 +6485,8 @@ client.nodes.reloadSecureSettings({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: A list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes. -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout ** *`secure_settings_password` (Optional, string)* +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] ==== stats @@ -6652,11 +6656,11 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: The indices or index-pattern(s) (containing rollup or regular data) that should be searched -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response ** *`aggregations` (Optional, Record)* ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response [discrete] ==== start_job @@ -6802,6 +6806,7 @@ client.searchApplication.put({ name }) * *Request (object):* ** *`name` (string)*: The name of the search application to be created or updated +** *`search_application` (Optional, { name, indices, updated_at_millis, analytics_collection_name, template })* ** *`create` (Optional, boolean)*: If true, requires that a search application with the specified resource_id does not already exist. (default: false) [discrete] @@ -6905,12 +6910,12 @@ client.searchableSnapshots.mount({ repository, snapshot, index }) ** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount ** *`snapshot` (string)*: The name of the snapshot of the index to mount ** *`index` (string)* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning -** *`storage` (Optional, string)*: Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy` ** *`renamed_index` (Optional, string)* ** *`index_settings` (Optional, Record)* ** *`ignore_index_settings` (Optional, string[])* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning +** *`storage` (Optional, string)*: Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy` [discrete] ==== stats @@ -6969,12 +6974,12 @@ client.security.changePassword({ ... }) * *Request (object):* ** *`username` (Optional, string)*: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ** *`password` (Optional, string)*: The new password value. Passwords must be at least 6 characters long. ** *`password_hash` (Optional, string)*: A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] ==== clear_api_key_cache @@ -7073,11 +7078,11 @@ client.security.createApiKey({ ... }) ==== Arguments * *Request (object):* -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. ** *`name` (Optional, string)*: Specifies the name for this API key. ** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] ==== create_cross_cluster_api_key @@ -7554,6 +7559,7 @@ client.security.putPrivileges({ ... }) ==== Arguments * *Request (object):* +** *`privileges` (Optional, Record>)* ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -7571,7 +7577,6 @@ client.security.putRole({ name }) * *Request (object):* ** *`name` (string)*: The name of the role. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. ** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "grant_api_key" | "manage" | "manage_api_key" | "manage_ccr" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "read_ccr" | "read_ilm" | "read_pipeline" | "read_slm" | "transport_client")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. @@ -7579,6 +7584,7 @@ client.security.putRole({ name }) ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. ** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. ** *`transient_metadata` (Optional, { enabled })*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] ==== put_role_mapping @@ -7595,12 +7601,12 @@ client.security.putRoleMapping({ name }) * *Request (object):* ** *`name` (string)*: Role-mapping name -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ** *`enabled` (Optional, boolean)* ** *`metadata` (Optional, Record)* ** *`roles` (Optional, string[])* ** *`rules` (Optional, { any, all, field, except })* ** *`run_as` (Optional, string[])* +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] ==== put_user @@ -7617,7 +7623,6 @@ client.security.putUser({ username }) * *Request (object):* ** *`username` (string)*: The username of the User -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ** *`email` (Optional, string | null)* ** *`full_name` (Optional, string | null)* ** *`metadata` (Optional, Record)* @@ -7625,6 +7630,7 @@ client.security.putUser({ username }) ** *`password_hash` (Optional, string)* ** *`roles` (Optional, string[])* ** *`enabled` (Optional, boolean)* +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] ==== query_api_keys @@ -7640,10 +7646,6 @@ client.security.queryApiKeys({ ... }) ==== Arguments * *Request (object):* -** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors -associated with the API key. An API key's actual -permission is the intersection of its assigned role -descriptors and the owner user's role descriptors. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query to filter which API keys to return. The query supports a subset of query types, including match_all, bool, term, terms, ids, prefix, wildcard, and range. You can query all public information associated with an API key @@ -7655,6 +7657,10 @@ search_after parameter. than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. [discrete] ==== saml_authenticate @@ -7898,13 +7904,13 @@ client.slm.putLifecycle({ policy_id }) * *Request (object):* ** *`policy_id` (string)*: ID for the snapshot lifecycle policy you want to create or update. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ** *`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })*: Configuration for each snapshot created by the policy. ** *`name` (Optional, string)*: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. ** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. ** *`retention` (Optional, { expire_after, max_count, min_count })*: Retention rules used to retain and delete snapshots created by the policy. ** *`schedule` (Optional, string)*: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== start @@ -7985,14 +7991,14 @@ client.snapshot.create({ repository, snapshot }) * *Request (object):* ** *`repository` (string)*: Repository for the snapshot. ** *`snapshot` (string)*: Name of the snapshot. Must be unique in the repository. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. ** *`include_global_state` (Optional, boolean)*: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). ** *`indices` (Optional, string | string[])*: Data streams and indices to include in the snapshot. Supports multi-target syntax. Includes all data streams and indices by default. ** *`feature_states` (Optional, string[])*: Feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. ** *`metadata` (Optional, Record)*: Optional metadata for the snapshot. May have any contents. Must be less than 1024 bytes. This map is not automatically generated by Elasticsearch. ** *`partial` (Optional, boolean)*: If `true`, allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. [discrete] ==== create_repository @@ -8128,8 +8134,6 @@ client.snapshot.restore({ repository, snapshot }) * *Request (object):* ** *`repository` (string)*: A repository name ** *`snapshot` (string)*: A snapshot name -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning ** *`feature_states` (Optional, string[])* ** *`ignore_index_settings` (Optional, string[])* ** *`ignore_unavailable` (Optional, boolean)* @@ -8140,6 +8144,8 @@ client.snapshot.restore({ repository, snapshot }) ** *`partial` (Optional, boolean)* ** *`rename_pattern` (Optional, string)* ** *`rename_replacement` (Optional, string)* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning [discrete] ==== status @@ -8265,7 +8271,6 @@ client.sql.query({ ... }) ==== Arguments * *Request (object):* -** *`format` (Optional, string)*: a short version of the Accept header, e.g. json, yaml ** *`catalog` (Optional, string)*: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. ** *`columnar` (Optional, boolean)*: If true, the results in a columnar fashion: one row represents all the values of a certain column from the current page of results. ** *`cursor` (Optional, string)* @@ -8283,6 +8288,7 @@ precedence over mapped fields with the same name. ** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search. ** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. ** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false. +** *`format` (Optional, string)*: a short version of the Accept header, e.g. json, yaml [discrete] ==== translate @@ -8430,6 +8436,7 @@ client.textStructure.findStructure({ ... }) ==== Arguments * *Request (object):* +** *`text_files` (Optional, TJsonDocument[])* ** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. ** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", etc. ** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. @@ -8544,8 +8551,6 @@ client.transform.previewTransform({ ... }) * *Request (object):* ** *`transform_id` (Optional, string)*: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the -timeout expires, the request fails and returns an error. ** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. ** *`description` (Optional, string)*: Free text description of the transform. ** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the @@ -8562,6 +8567,8 @@ the data. criteria is deleted from the destination index. ** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for each unique key. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. [discrete] ==== put_transform @@ -8581,12 +8588,6 @@ client.transform.putTransform({ transform_id, dest, source }) hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. ** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination for the transform. ** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. -** *`defer_validation` (Optional, boolean)*: When the transform is created, a series of validations occur to ensure its success. For example, there is a -check for the existence of the source indices and a check that the destination index is not part of the source -index pattern. You can use this parameter to skip the checks, for example when the source index does not exist -until after the transform is created. The validations are always run when you start the transform, however, with -the exception of privilege checks. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ** *`description` (Optional, string)*: Free text description of the transform. ** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. @@ -8599,6 +8600,12 @@ and the aggregation to reduce the data. destination index. ** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. ** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. +** *`defer_validation` (Optional, boolean)*: When the transform is created, a series of validations occur to ensure its success. For example, there is a +check for the existence of the source indices and a check that the destination index is not part of the source +index pattern. You can use this parameter to skip the checks, for example when the source index does not exist +until after the transform is created. The validations are always run when you start the transform, however, with +the exception of privilege checks. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== reset_transform @@ -8702,11 +8709,6 @@ client.transform.updateTransform({ transform_id }) * *Request (object):* ** *`transform_id` (string)*: Identifier for the transform. -** *`defer_validation` (Optional, boolean)*: When true, deferrable validations are not run. This behavior may be -desired if the source index does not exist until after the transform is -created. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the -timeout expires, the request fails and returns an error. ** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. ** *`description` (Optional, string)*: Free text description of the transform. ** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the @@ -8719,6 +8721,11 @@ indexing. The minimum value is 1s and the maximum is 1h. ** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. ** *`retention_policy` (Optional, { time } | null)*: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. +** *`defer_validation` (Optional, boolean)*: When true, deferrable validations are not run. This behavior may be +desired if the source index does not exist until after the transform is +created. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. [discrete] ==== upgrade_transforms @@ -8820,7 +8827,6 @@ client.watcher.executeWatch({ ... }) * *Request (object):* ** *`id` (Optional, string)*: Identifier for the watch. -** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. ** *`action_modes` (Optional, Record)*: Determines how to handle the watch actions as part of the watch execution. ** *`alternative_input` (Optional, Record)*: When present, the watch uses this object as a payload instead of executing its own input. ** *`ignore_condition` (Optional, boolean)*: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. @@ -8828,6 +8834,7 @@ client.watcher.executeWatch({ ... }) ** *`simulated_actions` (Optional, { actions, all, use_all })* ** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution ** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and record_execution cannot be set. +** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. [discrete] ==== get_settings @@ -8871,10 +8878,6 @@ client.watcher.putWatch({ id }) * *Request (object):* ** *`id` (string)*: Watch ID -** *`active` (Optional, boolean)*: Specify whether the watch is in/active by default -** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term -** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number -** *`version` (Optional, number)*: Explicit version number for concurrency control ** *`actions` (Optional, Record)* ** *`condition` (Optional, { always, array_compare, compare, never, script })* ** *`input` (Optional, { chain, http, search, simple })* @@ -8882,6 +8885,10 @@ client.watcher.putWatch({ id }) ** *`throttle_period` (Optional, string)* ** *`transform` (Optional, { chain, script, search })* ** *`trigger` (Optional, { schedule })* +** *`active` (Optional, boolean)*: Specify whether the watch is in/active by default +** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term +** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number +** *`version` (Optional, number)*: Explicit version number for concurrency control [discrete] ==== query_watches diff --git a/src/api/types.ts b/src/api/types.ts index fba574307..cf5c76cfd 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -13891,7 +13891,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { compressed_definition?: string definition?: MlPutTrainedModelDefinition description?: string - inference_config: MlInferenceConfigCreateContainer + inference_config?: MlInferenceConfigCreateContainer input?: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 66aea5837..f6685a039 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -14147,7 +14147,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { compressed_definition?: string definition?: MlPutTrainedModelDefinition description?: string - inference_config: MlInferenceConfigCreateContainer + inference_config?: MlInferenceConfigCreateContainer input?: MlPutTrainedModelInput metadata?: any model_type?: MlTrainedModelType From 6fef22d387440e041b540b8498ec2757ce5ff92a Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 17 Jul 2023 11:53:37 -0500 Subject: [PATCH 229/647] Update codegen task to be runnable via CI (#1944) --- .ci/Dockerfile | 25 ++++++++++++++++++------- .ci/make.sh | 43 +++++++++++++++++++++++++++---------------- 2 files changed, 45 insertions(+), 23 deletions(-) diff --git a/.ci/Dockerfile b/.ci/Dockerfile index c54dd761a..1f10aed8c 100644 --- a/.ci/Dockerfile +++ b/.ci/Dockerfile @@ -1,15 +1,26 @@ ARG NODE_JS_VERSION=18 FROM node:${NODE_JS_VERSION} -# Create app directory -WORKDIR /usr/src/app +ARG BUILDER_UID=1000 +ARG BUILDER_GID=1000 +ENV BUILDER_USER elastic +ENV BUILDER_GROUP elastic -RUN apt-get clean -y -RUN apt-get update -y -RUN apt-get install -y zip +# install zip util +RUN apt-get clean -y && \ + apt-get update -y && \ + apt-get install -y zip + +# Set user permissions and directory +RUN groupadd --system -g ${BUILDER_GID} ${BUILDER_GROUP} \ + && useradd --system --shell /bin/bash -u ${BUILDER_UID} -g ${BUILDER_GROUP} -m elastic 1>/dev/null 2>/dev/null \ + && mkdir -p /usr/src/elasticsearch-js \ + && chown -R ${BUILDER_USER}:${BUILDER_GROUP} /usr/src/ +WORKDIR /usr/src/elasticsearch-js +USER ${BUILDER_USER}:${BUILDER_GROUP} # Install app dependencies -COPY package*.json ./ +COPY --chown=$BUILDER_USER:$BUILDER_GROUP package*.json ./ RUN npm install -COPY . . +COPY --chown=$BUILDER_USER:$BUILDER_GROUP . . diff --git a/.ci/make.sh b/.ci/make.sh index e5bcbd5e9..adf79ef23 100755 --- a/.ci/make.sh +++ b/.ci/make.sh @@ -12,7 +12,7 @@ # assemble : build client artifacts with version # bump : bump client internals to version # bumpmatrix : bump stack version in test matrix to version -# codegen : generate endpoints +# codegen : generate endpoints # docsgen : generate documentation # examplegen : generate the doc examples # clean : clean workspace @@ -24,7 +24,6 @@ # ------------------------------------------------------- # script_path=$(dirname "$(realpath -s "$0")") repo=$(realpath "$script_path/../") -generator=$(realpath "$script_path/../../elastic-client-generator-js") # shellcheck disable=SC1090 CMD=$1 @@ -38,7 +37,6 @@ product="elastic/elasticsearch-js" output_folder=".ci/output" codegen_folder=".ci/output" OUTPUT_DIR="$repo/${output_folder}" -# REPO_BINDING="${OUTPUT_DIR}:/sln/${output_folder}" NODE_JS_VERSION=18 WORKFLOW=${WORKFLOW-staging} mkdir -p "$OUTPUT_DIR" @@ -59,18 +57,29 @@ case $CMD in echo -e "\033[31;1mTARGET: assemble -> missing version parameter\033[0m" exit 1 fi - echo -e "\033[36;1mTARGET: assemble artefact $VERSION\033[0m" + echo -e "\033[36;1mTARGET: assemble artifact $VERSION\033[0m" TASK=release TASK_ARGS=("$VERSION" "$output_folder") ;; codegen) - if [ -v $VERSION ]; then - echo -e "\033[31;1mTARGET: codegen -> missing version parameter\033[0m" - exit 1 + if [ -v "$VERSION" ] || [[ -z "$VERSION" ]]; then + # fall back to branch name or `main` if no VERSION is set + branch_name=$(git rev-parse --abbrev-ref HEAD) + if [[ "$branch_name" =~ ^\d+\.\d+ ]]; then + echo -e "\033[36;1mTARGET: codegen -> No VERSION found, using branch name: \`$VERSION\`\033[0m" + VERSION="$branch_name" + else + echo -e "\033[36;1mTARGET: codegen -> No VERSION found, using \`main\`\033[0m" + VERSION="main" + fi fi - echo -e "\033[36;1mTARGET: codegen API v$VERSION\033[0m" + if [ "$VERSION" = 'main' ]; then + echo -e "\033[36;1mTARGET: codegen API $VERSION\033[0m" + else + echo -e "\033[36;1mTARGET: codegen API v$VERSION\033[0m" + fi + TASK=codegen - # VERSION is BRANCH here for now TASK_ARGS=("$VERSION") ;; docsgen) @@ -80,13 +89,11 @@ case $CMD in fi echo -e "\033[36;1mTARGET: generate docs for $VERSION\033[0m" TASK=codegen - # VERSION is BRANCH here for now TASK_ARGS=("$VERSION" "$codegen_folder") ;; examplesgen) echo -e "\033[36;1mTARGET: generate examples\033[0m" TASK=codegen - # VERSION is BRANCH here for now TASK_ARGS=("$VERSION" "$codegen_folder") ;; bump) @@ -96,7 +103,6 @@ case $CMD in fi echo -e "\033[36;1mTARGET: bump to version $VERSION\033[0m" TASK=bump - # VERSION is BRANCH here for now TASK_ARGS=("$VERSION") ;; bumpmatrix) @@ -128,6 +134,8 @@ docker build \ --file .ci/Dockerfile \ --tag "$product" \ --build-arg NODE_JS_VERSION="$NODE_JS_VERSION" \ + --build-arg "BUILDER_UID=$(id -u)" \ + --build-arg "BUILDER_GID=$(id -g)" \ . # ------------------------------------------------------- # @@ -137,15 +145,18 @@ docker build \ echo -e "\033[34;1mINFO: running $product container\033[0m" docker run \ - --volume "$repo:/usr/src/app" \ - --volume "$generator:/usr/src/elastic-client-generator-js" \ - --volume /usr/src/app/node_modules \ + --volume "$repo:/usr/src/elasticsearch-js" \ + --volume /usr/src/elasticsearch-js/node_modules \ -u "$(id -u):$(id -g)" \ --env "WORKFLOW=$WORKFLOW" \ --name make-elasticsearch-js \ --rm \ $product \ - node .ci/make.mjs --task $TASK ${TASK_ARGS[*]} + /bin/bash -c "cd /usr/src && \ + git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ + mkdir -p /usr/src/elastic-client-generator-js/output && \ + cd /usr/src/elasticsearch-js && \ + node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" # ------------------------------------------------------- # # Post Command tasks & checks From 296c4d432efa2755941e99fa0b30f56de5a7d253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 19 Jul 2023 22:16:59 +0200 Subject: [PATCH 230/647] [DOCS] Restructures repo README (#1947) --- README.md | 123 +++++++++++++++++++----------------------------------- 1 file changed, 42 insertions(+), 81 deletions(-) diff --git a/README.md b/README.md index b33b20d65..9281aefc8 100644 --- a/README.md +++ b/README.md @@ -6,19 +6,25 @@ The official Node.js client for Elasticsearch. -## Features -- One-to-one mapping with REST API. -- Generalized, pluggable architecture. -- Configurable, automatic discovery of cluster nodes. -- Persistent, Keep-Alive connections. -- Load balancing across all available nodes. -- Child client support. -- TypeScript support out of the box. - -## Install -``` -npm install @elastic/elasticsearch -``` +## Installation + +Refer to the [Installation section](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_installation) +of the getting started documentation. + +## Connecting + +Refer to the [Connecting section](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_connecting) +of the getting started documentation. + +## Usage + +* [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_creating_an_index) +* [Indexing a document](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_indexing_documents) +* [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_getting_documents) +* [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_searching_documents) +* [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_updating_documents) +* [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_documents) +* [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_an_index) ### Node.js support @@ -72,93 +78,45 @@ We recommend that you write a lightweight proxy that uses this client instead, y ## Documentation -- [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html) -- [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage) -- [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html) -- [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html) -- [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication) -- [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html) -- [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html) -- [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html) -- [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html) -- [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html) -- [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) - -## Quick start - -```js -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) - -async function run () { - // Let's start by indexing some data - await client.index({ - index: 'game-of-thrones', - document: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - await client.index({ - index: 'game-of-thrones', - document: { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - } - }) - - await client.index({ - index: 'game-of-thrones', - document: { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - }) - - // here we are forcing an index refresh, otherwise we will not - // get any result in the consequent search - await client.indices.refresh({ index: 'game-of-thrones' }) - - // Let's search! - const result= await client.search({ - index: 'game-of-thrones', - query: { - match: { quote: 'winter' } - } - }) - - console.log(result.hits.hits) -} - -run().catch(console.log) -``` +* [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html) +* [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage) +* [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html) +* [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html) +* [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication) +* [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html) +* [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html) +* [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html) +* [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html) +* [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html) +* [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) ## Install multiple versions If you are using multiple versions of Elasticsearch, you need to use multiple versions of the client. In the past, install multiple versions of the same package was not possible, but with `npm v6.9`, you can do that via aliasing. The command you must run to install different version of the client is: + ```sh npm install @npm:@elastic/elasticsearch@ ``` -So for example if you need to install `7.x` and `6.x`, you will run + +So for example if you need to install `7.x` and `6.x`, you will run: + ```sh npm install es6@npm:@elastic/elasticsearch@6 npm install es7@npm:@elastic/elasticsearch@7 ``` + And your `package.json` will look like the following: + ```json "dependencies": { "es6": "npm:@elastic/elasticsearch@^6.7.0", "es7": "npm:@elastic/elasticsearch@^7.0.0" } ``` + You will require the packages from your code by using the alias you have defined. + ```js const { Client: Client6 } = require('es6') const { Client: Client7 } = require('es7') @@ -176,7 +134,10 @@ client6.info().then(console.log, console.log) client7.info().then(console.log, console.log) ``` -Finally, if you want to install the client for the next version of Elasticsearch *(the one that lives in Elasticsearch’s main branch)*, you can use the following command: +Finally, if you want to install the client for the next version of Elasticsearch +*(the one that lives in Elasticsearch’s main branch)*, you can use the following +command: + ```sh npm install esmain@github:elastic/elasticsearch-js ``` From 5072e4afc389a346ef1bab57bcf27e2de98690a4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 20 Jul 2023 09:30:19 -0500 Subject: [PATCH 231/647] Update integration test rules (#1948) --- .buildkite/pipeline.yml | 1 + catalog-info.yaml | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 04dcbf016..d05129234 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,3 +1,4 @@ +--- steps: - label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }}) Test Suite: {{ matrix.suite }}" agents: diff --git a/catalog-info.yaml b/catalog-info.yaml index 38382017f..b8bbd36ff 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -35,15 +35,16 @@ spec: access_level: READ_ONLY provider_settings: build_pull_requests: false + build_branches: false cancel_intermediate_builds: true cancel_intermediate_builds_branch_filter: '!main' schedules: main_semi_daily: branch: 'main' cronline: '0 */12 * * *' - 8_8_semi_daily: - branch: '8.8' + 8_9_semi_daily: + branch: '8.9' cronline: '0 */12 * * *' - 8_7_daily: - branch: '8.7' + 8_8_daily: + branch: '8.8' cronline: '@daily' From 6b4cdee53c93e1abfc458caff016bd1d6f489da4 Mon Sep 17 00:00:00 2001 From: Robert Da Silva Date: Thu, 20 Jul 2023 21:33:46 +0200 Subject: [PATCH 232/647] Allow document to be overwritten in `onDocument` iteratee of bulk helper (#1732) Co-authored-by: Josh Mock --- src/helpers.ts | 25 ++++---- test/unit/helpers/bulk.test.ts | 109 ++++++++++++++++++++++++++++++++- 2 files changed, 120 insertions(+), 14 deletions(-) diff --git a/src/helpers.ts b/src/helpers.ts index 9c5c822bf..768ad1dc8 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -74,11 +74,11 @@ export interface BulkStats { aborted: boolean } -interface IndexAction { +interface IndexActionOperation { index: T.BulkIndexOperation } -interface CreateAction { +interface CreateActionOperation { create: T.BulkCreateOperation } @@ -90,7 +90,9 @@ interface DeleteAction { delete: T.BulkDeleteOperation } -type UpdateAction = [UpdateActionOperation, Record] +type CreateAction = CreateActionOperation | [CreateActionOperation, unknown] +type IndexAction = IndexActionOperation | [IndexActionOperation, unknown] +type UpdateAction = [UpdateActionOperation, T.BulkUpdateAction] type Action = IndexAction | CreateAction | UpdateAction | DeleteAction export interface OnDropDocument { @@ -618,22 +620,21 @@ export default class Helpers { for await (const chunk of datasource) { if (shouldAbort) break timeoutRef.refresh() - const action = onDocument(chunk) - const operation = Array.isArray(action) - ? Object.keys(action[0])[0] - : Object.keys(action)[0] + const result = onDocument(chunk) + const [action, payload] = Array.isArray(result) ? result : [result, chunk] + const operation = Object.keys(action)[0] if (operation === 'index' || operation === 'create') { actionBody = serializer.serialize(action) - payloadBody = typeof chunk === 'string' ? chunk : serializer.serialize(chunk) + payloadBody = typeof payload === 'string' + ? payload + : serializer.serialize(payload) chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) bulkBody.push(actionBody, payloadBody) } else if (operation === 'update') { - // @ts-expect-error in case of update action is an array - actionBody = serializer.serialize(action[0]) + actionBody = serializer.serialize(action) payloadBody = typeof chunk === 'string' ? `{"doc":${chunk}}` - // @ts-expect-error in case of update action is an array - : serializer.serialize({ doc: chunk, ...action[1] }) + : serializer.serialize({ doc: chunk, ...payload }) chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) bulkBody.push(actionBody, payloadBody) } else if (operation === 'delete') { diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index dbabef07c..732d696c4 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -17,11 +17,11 @@ * under the License. */ -import * as http from 'http' +import FakeTimers from '@sinonjs/fake-timers' import { createReadStream } from 'fs' +import * as http from 'http' import { join } from 'path' import split from 'split2' -import FakeTimers from '@sinonjs/fake-timers' import { test } from 'tap' import { Client, errors } from '../../../' import { buildServer, connection } from '../../utils' @@ -785,6 +785,59 @@ test('bulk index', t => { t.end() }) + t.test('Should use payload returned by `onDocument`', async t => { + let count = 0 + const updatedAt = '1970-01-01T12:00:00.000Z' + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` + }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), { ...dataset[count++], updatedAt }) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + t.type(doc.user, 'string') // testing that doc is type of Document + return [ + { + index: { + _index: 'test' + } + }, + { ...doc, updatedAt } + ] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + t.end() }) @@ -835,6 +888,58 @@ test('bulk create', t => { aborted: false }) }) + + t.test('Should use payload returned by `onDocument`', async t => { + let count = 0 + const updatedAt = '1970-01-01T12:00:00.000Z' + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { create: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { ...dataset[count++], updatedAt }) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return [ + { + create: { + _index: 'test', + _id: String(id++) + } + }, + { ...doc, updatedAt } + ] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + t.end() }) From 80ba91645be56961fcf1d435fbeca5cc70d2a9bb Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 25 Jul 2023 11:05:25 -0500 Subject: [PATCH 233/647] Add docs for bulk helper improvement (#1951) --- docs/helpers.asciidoc | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index b78f79399..4815ebc4a 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -281,7 +281,7 @@ helper uses those options in conjunction with the Bulk API call. [source,js] ---- const result = await client.helpers.bulk({ - datasource: [...] + datasource: [...], onDocument (doc) { return { index: { _index: 'my-index' } @@ -326,6 +326,33 @@ const result = await client.helpers.bulk({ console.log(result) ---- +[discrete] +==== Modifying a document before operation + +~Added~ ~in~ ~`v8.8.2`~ + +If you need to modify documents in your datasource before it is sent to Elasticsearch, you can return an array in the `onDocument` function rather than an operation object. The first item in the array must be the operation object, and the second item must be the document or partial document object as you'd like it to be sent to Elasticsearch. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const result = await client.helpers.bulk({ + datasource: [...], + onDocument (doc) { + return [ + { index: { _index: 'my-index' } }, + { ...doc, favorite_color: 'mauve' }, + ] + } +}) + +console.log(result) +---- [discrete] [[multi-search-helper]] @@ -574,4 +601,4 @@ const scrollSearch = client.helpers.scrollDocuments({ for await (const doc of scrollSearch) { console.log(doc) } ----- \ No newline at end of file +---- From 1049a0ffa0f87119208818f29ce5fdea0b7a10b6 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 25 Jul 2023 11:11:49 -0500 Subject: [PATCH 234/647] Update user agent format (#1954) --- src/client.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index 99bb60e4a..09118d58c 100644 --- a/src/client.ts +++ b/src/client.ts @@ -175,7 +175,7 @@ export default class Client extends API { caFingerprint: null, agent: null, headers: { - 'user-agent': `elasticsearch-js/${clientVersion} Node.js ${nodeVersion}; Transport ${transportVersion}; (${os.platform()} ${os.release()} ${os.arch()})` + 'user-agent': `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${nodeVersion}; Transport ${transportVersion})` }, nodeFilter: null, generateRequestId: null, From 33aae3c99a8825ca0ce456d17b3457d482aecccb Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 25 Jul 2023 16:06:00 -0500 Subject: [PATCH 235/647] Changelog for 8.9.0 (#1961) --- docs/changelog.asciidoc | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 4de89967d..911c3cc3c 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,31 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.9.0 + +[discrete] +==== Features + +[discrete] +===== Support for Elasticsearch `v8.9.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.9/release-notes-8.9.0.html[here]. + +[discrete] +===== Allow document to be overwritten in `onDocument` iteratee of bulk helper https://github.com/elastic/elasticsearch-js/pull/1732[#1732] + +In the https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html#bulk-helper[bulk helper], documents could not be modified before being sent to Elasticsearch. It is now possible to https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html#_modifying_a_document_before_operation[modify a document] before sending it. + +[discrete] +==== Fixes + +[discrete] +===== Updated `user-agent` header https://github.com/elastic/elasticsearch-js/pull/1954[#1954] + +The `user-agent` header the client used to connect to Elasticsearch was using a non-standard format that has been improved. + [discrete] === 8.8.1 From 7320868f627b9548ee269c77dd1d921be714ba44 Mon Sep 17 00:00:00 2001 From: David Olivier Date: Wed, 9 Aug 2023 20:11:15 +0200 Subject: [PATCH 236/647] Fix AnalysisPatternTokenizer definition (#1828) (#1967) --- src/api/types.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index cf5c76cfd..7a209917e 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4495,9 +4495,9 @@ export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBa export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { type: 'pattern' - flags: string - group: integer - pattern: string + flags?: string + group?: integer + pattern?: string } export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' From 9aaf7585d3944dce9e5141070910f7e3208a0894 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 11 Aug 2023 12:59:17 -0500 Subject: [PATCH 237/647] Upgrade transport (#1968) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 520f7d768..5f9ef5d04 100644 --- a/package.json +++ b/package.json @@ -86,7 +86,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.3.2", + "@elastic/transport": "^8.3.3", "tslib": "^2.4.0" }, "tap": { From 787b3b03ef2ec88f57a055b735f9ed06c3119049 Mon Sep 17 00:00:00 2001 From: Wonseop Kim Date: Wed, 16 Aug 2023 01:13:50 +0900 Subject: [PATCH 238/647] Remove unnecessary code (#1971) --- scripts/utils/generateApis.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/utils/generateApis.js b/scripts/utils/generateApis.js index a1dddd063..cb99b3701 100644 --- a/scripts/utils/generateApis.js +++ b/scripts/utils/generateApis.js @@ -232,7 +232,7 @@ function generateSingleApi (version, spec, common) { querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) let path = '' - ${buildPath(api)} + ${buildPath()} // build request object const request = { @@ -254,7 +254,7 @@ function generateSingleApi (version, spec, common) { documentation: generateDocumentation(spec[api], api) } - function genRequiredChecks (param) { + function genRequiredChecks () { const code = required .map(_genRequiredCheck) .concat(_noBody()) From a9bed7eacb2b92e2586663c22d447e956da42bc3 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Fri, 18 Aug 2023 00:42:05 +0930 Subject: [PATCH 239/647] Auto-generated code for main (#1946) --- docs/reference.asciidoc | 1326 ++++++++++++++------- src/api/api/async_search.ts | 8 +- src/api/api/autoscaling.ts | 8 +- src/api/api/bulk.ts | 2 +- src/api/api/cat.ts | 55 +- src/api/api/ccr.ts | 26 +- src/api/api/clear_scroll.ts | 2 +- src/api/api/close_point_in_time.ts | 2 +- src/api/api/cluster.ts | 35 +- src/api/api/count.ts | 2 +- src/api/api/create.ts | 6 +- src/api/api/dangling_indices.ts | 6 +- src/api/api/delete.ts | 2 +- src/api/api/delete_by_query.ts | 2 +- src/api/api/delete_by_query_rethrottle.ts | 2 +- src/api/api/delete_script.ts | 2 +- src/api/api/enrich.ts | 10 +- src/api/api/eql.ts | 8 +- src/api/api/exists.ts | 2 +- src/api/api/exists_source.ts | 2 +- src/api/api/explain.ts | 2 +- src/api/api/features.ts | 4 +- src/api/api/field_caps.ts | 2 +- src/api/api/fleet.ts | 2 +- src/api/api/get.ts | 2 +- src/api/api/get_script.ts | 2 +- src/api/api/get_script_context.ts | 2 +- src/api/api/get_script_languages.ts | 2 +- src/api/api/get_source.ts | 2 +- src/api/api/graph.ts | 2 +- src/api/api/health_report.ts | 2 +- src/api/api/ilm.ts | 22 +- src/api/api/index.ts | 2 +- src/api/api/indices.ts | 119 +- src/api/api/info.ts | 2 +- src/api/api/ingest.ts | 12 +- src/api/api/knn_search.ts | 2 +- src/api/api/license.ts | 14 +- src/api/api/logstash.ts | 6 +- src/api/api/mget.ts | 2 +- src/api/api/migration.ts | 6 +- src/api/api/ml.ts | 146 +-- src/api/api/monitoring.ts | 2 +- src/api/api/msearch.ts | 2 +- src/api/api/msearch_template.ts | 2 +- src/api/api/mtermvectors.ts | 2 +- src/api/api/nodes.ts | 14 +- src/api/api/open_point_in_time.ts | 2 +- src/api/api/ping.ts | 2 +- src/api/api/put_script.ts | 2 +- src/api/api/query_ruleset.ts | 155 +++ src/api/api/rank_eval.ts | 2 +- src/api/api/reindex.ts | 6 +- src/api/api/reindex_rethrottle.ts | 2 +- src/api/api/render_search_template.ts | 2 +- src/api/api/rollup.ts | 16 +- src/api/api/scripts_painless_execute.ts | 2 +- src/api/api/scroll.ts | 2 +- src/api/api/search.ts | 2 +- src/api/api/search_application.ts | 20 +- src/api/api/search_mvt.ts | 2 +- src/api/api/search_shards.ts | 2 +- src/api/api/search_template.ts | 2 +- src/api/api/searchable_snapshots.ts | 8 +- src/api/api/security.ts | 114 +- src/api/api/shutdown.ts | 6 +- src/api/api/slm.ts | 18 +- src/api/api/snapshot.ts | 24 +- src/api/api/sql.ts | 12 +- src/api/api/ssl.ts | 2 +- src/api/api/synonyms.ts | 177 ++- src/api/api/tasks.ts | 6 +- src/api/api/terms_enum.ts | 4 +- src/api/api/termvectors.ts | 2 +- src/api/api/text_structure.ts | 2 +- src/api/api/transform.ts | 22 +- src/api/api/update.ts | 2 +- src/api/api/update_by_query.ts | 5 +- src/api/api/update_by_query_rethrottle.ts | 2 +- src/api/api/watcher.ts | 26 +- src/api/api/xpack.ts | 4 +- src/api/index.ts | 8 + src/api/types.ts | 267 ++++- src/api/typesWithBodyKey.ts | 280 ++++- 84 files changed, 2113 insertions(+), 986 deletions(-) create mode 100644 src/api/api/query_ruleset.ts diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index d90f570b0..5d35bb0bc 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -94,7 +94,7 @@ client.count({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of indices to restrict the results -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`analyzer` (Optional, string)*: The analyzer to use for the query string ** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) @@ -175,7 +175,7 @@ client.deleteByQuery({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices ** *`max_docs` (Optional, number)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`slice` (Optional, { field, id, max })* ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`analyzer` (Optional, string)*: The analyzer to use for the query string @@ -305,7 +305,7 @@ client.explain({ id, index }) * *Request (object):* ** *`id` (string)*: The document ID ** *`index` (string)*: The name of the index -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`analyzer` (Optional, string)*: The analyzer for the query string query ** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false) ** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) @@ -334,7 +334,7 @@ client.fieldCaps({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. ** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. ** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, @@ -516,7 +516,7 @@ parameter defaults to false. You can pass _source: true to return both source fi and stored fields in the search response. ** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. ** *`routing` (Optional, string)*: A list of specific routing values @@ -808,92 +808,137 @@ client.search({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`aggregations` (Optional, Record)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams and indices, omit this parameter or use `*` or `_all`. +** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field. ** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 -hits using the from and size parameters. To page through more hits, use the -search_after parameter. -** *`highlight` (Optional, { encoder, fields })* -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact -number of hits is returned at the cost of some performance. If false, the -response does not include the total number of hits matching the query. -Defaults to 10,000 hits. +** *`from` (Optional, number)*: Starting document offset. +Needs to be non-negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`highlight` (Optional, { encoder, fields })*: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. +If `true`, the exact number of hits is returned at the cost of some performance. +If `false`, the response does not include the total number of hits matching the query. ** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field -names matching these patterns in the hits.fields property of the response. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. +The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. ** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. -** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. +** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. +Documents with a lower `_score` are not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. +The search hits are filtered after the aggregations are calculated. +A post filter has no impact on the aggregation results. +** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. +NOTE: This is a debugging tool and adds significant overhead to search execution. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { query, window_size } | { query, window_size }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* -** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more -than 10,000 hits using the from and size parameters. To page through more -hits, use the search_after parameter. -** *`slice` (Optional, { field, id, max })* -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These -fields are returned in the hits._source property of the search response. -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this -limit, Elasticsearch terminates the query early. Elasticsearch collects documents -before sorting. Defaults to 0, which does not terminate query execution early. -** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response -is received before the timeout expires, the request fails and returns an error. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. +** *`size` (Optional, number)*: The number of hits to return. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`slice` (Optional, { field, id, max })*: Can be used to split a scrolled search into multiple slices that can be consumed independently. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: A list of : pairs. +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. +These fields are returned in the hits._source property of the search response. +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. +The request returns values for field names matching these patterns in the `hits.fields` property of the response. +** *`suggest` (Optional, { text })*: Defines a suggester that provides similar looking terms based on a provided text. +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. +If a query reaches this limit, Elasticsearch terminates the query early. +Elasticsearch collects documents before sorting. +Use with caution. +Elasticsearch applies this parameter to each shard handling the request. +When possible, let Elasticsearch perform early termination automatically. +Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +If set to `0` (default), the query does not terminate early. +** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. +If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. ** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. ** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification -of each hit. See Optimistic concurrency control. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, -no stored fields are included in the response. If this field is specified, the _source -parameter defaults to false. You can pass _source: true to return both source fields -and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics -aggregation for its associated searches. You can retrieve these stats using -the indices stats API. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout -** *`analyzer` (Optional, string)*: The analyzer to use for the query string -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests -** *`min_compatible_shard_node` (Optional, string)*: The minimum compatible version that all shards involved in search should have for this request to be successful -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`pre_filter_shard_size` (Optional, number)*: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint. -** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to index level setting -** *`routing` (Optional, string)*: A list of specific routing values -** *`scroll` (Optional, string | -1 | 0)*: Specify how long a consistent view of the index should be maintained for scrolled search -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type +** *`seq_no_primary_term` (Optional, boolean)*: If `true`, returns sequence number and primary term of the last modification of each hit. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. +If no fields are specified, no stored fields are included in the response. +If this field is specified, the `_source` parameter defaults to `false`. +You can pass `_source: true` to return both source fields and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). +If you provide a PIT, you cannot specify an `` in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. +These fields take precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: Stats groups to associate with the search. +Each group maintains a statistics aggregation for its associated searches. +You can retrieve these stats using the indices stats API. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. +This parameter can only be used when the q query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If true, wildcard and prefix queries are analyzed. +This parameter can only be used when the q query string parameter is specified. +** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. +This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network round-trips between the coordinating node and the remote clusters are minimized when executing cross-cluster search (CCS) requests. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: AND or OR. +This parameter can only be used when the `q` query string parameter is specified. +** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the q query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +This parameter can only be used when the `q` query string parameter is specified. +** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently. +This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +** *`min_compatible_shard_node` (Optional, string)*: The minimum version of the node that can handle the request +Any handling node with a lower version will fail the request. +** *`preference` (Optional, string)*: Nodes and shards used for the search. +By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: +`_only_local` to run the search only on shards on the local node; +`_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method; +`_only_nodes:,` to run the search on only the specified nodes IDs, where, if suitable shards exist on more than one selected node, use shards on those nodes using the default method, or if none of the specified nodes are available, select shards from any available node using the default method; +`_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards using the default method; +`_shards:,` to run the search only on the specified shards; +`` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. +This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). +When unspecified, the pre-filter phase is executed if any of these conditions is met: +the request targets more than 128 shards; +the request targets one or more read-only index; +the primary sort of the query targets an indexed field. +** *`request_cache` (Optional, boolean)*: If `true`, the caching of search results is enabled for requests where `size` is `0`. +Defaults to index level settings. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. See Scroll search results. +By default, this value cannot exceed `1d` (24 hours). +You can change this limit using the `search.max_keep_alive` cluster-level setting. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: How distributed term frequencies are calculated for relevance scoring. ** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode -** *`suggest_size` (Optional, number)*: How many suggestions to return in response +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specifies the suggest mode. +This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`suggest_size` (Optional, number)*: Number of suggestions to return. +This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. ** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field -** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`q` (Optional, string)*: Query in the Lucene query string syntax +This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`typed_keys` (Optional, boolean)*: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. +You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +If this parameter is specified, only these source fields are returned. +You can exclude fields from this subset using the `_source_excludes` query parameter. +If the `_source` parameter is `false`, this parameter is ignored. +** *`q` (Optional, string)*: Query in the Lucene query string syntax using query parameter search. +Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. [discrete] === search_mvt @@ -940,7 +985,7 @@ don’t include the aggs layer. each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. @@ -1029,7 +1074,7 @@ client.termsEnum({ index, field }) ** *`size` (Optional, number)*: How many matching terms to return. ** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. ** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. ** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. ** *`search_after` (Optional, string)* @@ -1122,7 +1167,7 @@ client.updateByQuery({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices ** *`max_docs` (Optional, number)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`script` (Optional, { lang, options, source } | { id })* ** *`slice` (Optional, { field, id, max })* ** *`conflicts` (Optional, Enum("abort" | "proceed"))* @@ -1263,9 +1308,9 @@ names matching these patterns in the hits.fields property of the response. ** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -2471,7 +2516,7 @@ client.enrich.deletePolicy({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the enrich policy +** *`name` (string)*: Enrich policy to delete. [discrete] ==== execute_policy @@ -2487,8 +2532,8 @@ client.enrich.executePolicy({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the enrich policy -** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the execution is complete. +** *`name` (string)*: Enrich policy to execute. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks other enrich policy execution requests until complete. [discrete] ==== get_policy @@ -2504,7 +2549,8 @@ client.enrich.getPolicy({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string | string[])*: A list of enrich policy names +** *`name` (Optional, string | string[])*: List of enrich policy names used to limit the request. +To return information for all enrich policies, omit this parameter. [discrete] ==== put_policy @@ -2520,10 +2566,10 @@ client.enrich.putPolicy({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the enrich policy -** *`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })* -** *`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })* -** *`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })* +** *`name` (string)*: Name of the enrich policy to create or update. +** *`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `geo_shape` query. +** *`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `term` query. +** *`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. [discrete] ==== stats @@ -2553,12 +2599,14 @@ client.eql.delete({ id }) * *Request (object):* ** *`id` (string)*: Identifier for the search to delete. +A search ID is provided in the EQL search API's response for an async search. +A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. [discrete] ==== get Returns async results from previously executed Event Query Language (EQL) search -{ref}/eql-search-api.html[Endpoint documentation] +{ref}/get-async-eql-search-api.html[Endpoint documentation] [source,ts] ---- client.eql.get({ id }) @@ -2569,14 +2617,16 @@ client.eql.get({ id }) * *Request (object):* ** *`id` (string)*: Identifier for the search. -** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. +** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. +Defaults to the keep_alive value set by the search’s EQL search API request. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. +Defaults to no timeout, meaning the request waits for complete search results. [discrete] ==== get_status Returns the status of a previously submitted async or stored Event Query Language (EQL) search -{ref}/eql-search-api.html[Endpoint documentation] +{ref}/get-async-eql-status-api.html[Endpoint documentation] [source,ts] ---- client.eql.getStatus({ id }) @@ -2609,7 +2659,7 @@ client.eql.search({ index, query }) ** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order ** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" ** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* @@ -2733,9 +2783,9 @@ Defaults to 10,000 hits. names matching these patterns in the hits.fields property of the response. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -2821,13 +2871,15 @@ client.graph.explore({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`connections` (Optional, { connections, query, vertices })* -** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])* -** *`routing` (Optional, string)*: Specific routing value -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`index` (string | string[])*: Name of the index. +** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. +** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. +If no response is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. [discrete] === ilm @@ -3058,16 +3110,22 @@ client.indices.analyze({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string)*: The name of the index to scope the operation -** *`analyzer` (Optional, string)* -** *`attributes` (Optional, string[])* -** *`char_filter` (Optional, string | { type } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])* -** *`explain` (Optional, boolean)* -** *`field` (Optional, string)* -** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])* -** *`normalizer` (Optional, string)* -** *`text` (Optional, string | string[])* -** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, max_token_length } | { type, max_token_length } | { type, max_token_length } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, flags, group, pattern } | { type, rule_files })* +** *`index` (Optional, string)*: Index used to derive the analyzer. +If specified, the `analyzer` or field parameter overrides this value. +If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. +** *`analyzer` (Optional, string)*: The name of the analyzer that should be applied to the provided `text`. +This could be a built-in analyzer, or an analyzer that’s been configured in the index. +** *`attributes` (Optional, string[])*: Array of token attributes used to filter the output of the `explain` parameter. +** *`char_filter` (Optional, string | { type } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer. +** *`explain` (Optional, boolean)*: If `true`, the response includes token attributes and additional details. +** *`field` (Optional, string)*: Field used to derive the analyzer. +To use this parameter, you must specify an index. +If specified, the `analyzer` parameter overrides this value. +** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. +** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. +** *`text` (Optional, string | string[])*: Text to analyze. +If an array of strings is provided, it is analyzed as a multi-value field. +** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, max_token_length } | { type, max_token_length } | { type, max_token_length } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, flags, group, pattern } | { type, rule_files })*: Tokenizer to use to convert text into tokens. [discrete] ==== clear_cache @@ -3083,14 +3141,21 @@ client.indices.clearCache({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index name to limit the operation -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`fielddata` (Optional, boolean)*: Clear field data -** *`fields` (Optional, string | string[])*: A list of fields to clear when using the `fielddata` parameter (default: all) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`query` (Optional, boolean)*: Clear query caches -** *`request` (Optional, boolean)*: Clear request cache +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`fielddata` (Optional, boolean)*: If `true`, clears the fields cache. +Use the `fields` parameter to clear the cache of specific fields only. +** *`fields` (Optional, string | string[])*: List of field names used to limit the `fielddata` parameter. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`query` (Optional, boolean)*: If `true`, clears the query cache. +** *`request` (Optional, boolean)*: If `true`, clears the request cache. [discrete] ==== clone @@ -3106,19 +3171,22 @@ client.indices.clone({ index, target }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the source index to clone -** *`target` (string)*: The name of the target index to clone into -** *`aliases` (Optional, Record)* -** *`settings` (Optional, Record)* -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the cloned index before the operation returns. +** *`index` (string)*: Name of the source index to clone. +** *`target` (string)*: Name of the target index to create. +** *`aliases` (Optional, Record)*: Aliases for the resulting index. +** *`settings` (Optional, Record)*: Configuration options for the target index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== close Closes an index. -{ref}/indices-open-close.html[Endpoint documentation] +{ref}/indices-close.html[Endpoint documentation] [source,ts] ---- client.indices.close({ index }) @@ -3128,13 +3196,20 @@ client.indices.close({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A comma separated list of indices to close -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of active shards to wait for before the operation returns. +** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== create @@ -3150,16 +3225,19 @@ client.indices.create({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the index -** *`aliases` (Optional, Record)* +** *`index` (string)*: Name of the index you wish to create. +** *`aliases` (Optional, Record)*: Aliases for the index. ** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for before the operation returns. +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== create_data_stream @@ -3196,8 +3274,11 @@ client.indices.dataStreamsStats({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string)*: A list of data stream names; use `_all` or empty string to perform the operation on all data streams -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* +** *`name` (Optional, string)*: List of data streams used to limit the request. +Wildcard expressions (`*`) are supported. +To target all data streams in a cluster, omit this parameter or use `*`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. [discrete] ==== delete @@ -3213,12 +3294,21 @@ client.indices.delete({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of indices to delete; use `_all` or `*` string to delete all indices -** *`allow_no_indices` (Optional, boolean)*: Ignore if a wildcard expression resolves to no concrete indices (default: false) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open, closed, or hidden indices -** *`ignore_unavailable` (Optional, boolean)*: Ignore unavailable indexes (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`index` (string | string[])*: List of indices to delete. +You cannot specify index aliases. +By default, this parameter does not support wildcards (`*`) or `_all`. +To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_alias @@ -3234,10 +3324,14 @@ client.indices.deleteAlias({ index, name }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names (supports wildcards); use `_all` for all indices -** *`name` (string | string[])*: A list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document +** *`index` (string | string[])*: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +** *`name` (string | string[])*: List of aliases to remove. +Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_data_lifecycle @@ -3307,9 +3401,12 @@ client.indices.deleteTemplate({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the template -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`name` (string)*: The name of the legacy index template to delete. +Wildcard (`*`) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== disk_usage @@ -3325,18 +3422,25 @@ client.indices.diskUsage({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as open,hidden. -** *`flush` (Optional, boolean)*: If true, the API performs a flush before analysis. If false, the response may not include uncommitted data. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`run_expensive_tasks` (Optional, boolean)*: Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to true. +** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. +It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`flush` (Optional, boolean)*: If `true`, the API performs a flush before analysis. +If `false`, the response may not include uncommitted data. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. +** *`run_expensive_tasks` (Optional, boolean)*: Analyzing field disk usage is resource-intensive. +To use the API, this parameter must be set to `true`. [discrete] ==== downsample Downsample an index -{ref}/xpack-rollup.html[Endpoint documentation] +{ref}/indices-downsample-data-stream.html[Endpoint documentation] [source,ts] ---- client.indices.downsample({ index, target_index }) @@ -3346,8 +3450,8 @@ client.indices.downsample({ index, target_index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The index to downsample -** *`target_index` (string)*: The name of the target index to store downsampled data +** *`index` (string)*: Name of the time series index to downsample. +** *`target_index` (string)*: Name of the index to create. ** *`config` (Optional, { fixed_interval })* [discrete] @@ -3364,13 +3468,17 @@ client.indices.exists({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names -** *`allow_no_indices` (Optional, boolean)*: Ignore if a wildcard expression resolves to no concrete indices (default: false) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`ignore_unavailable` (Optional, boolean)*: Ignore unavailable indexes (default: false) -** *`include_defaults` (Optional, boolean)*: Whether to return all default setting for each of the indices. -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`index` (string | string[])*: List of data streams, indices, and aliases. Supports wildcards (`*`). +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. [discrete] ==== exists_alias @@ -3386,12 +3494,17 @@ client.indices.existsAlias({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: A list of alias names to return -** *`index` (Optional, string | string[])*: A list of index names to filter aliases -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`name` (string | string[])*: List of aliases to check. Supports wildcards (`*`). +** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. [discrete] ==== exists_index_template @@ -3462,21 +3575,20 @@ client.indices.fieldUsageStats({ index }) * *Request (object):* ** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets -only missing or closed indices. This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index -starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument -determines whether wildcard expressions match hidden data streams. Supports a list of values, -such as `open,hidden`. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. ** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, -the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails -and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to all or any -positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== flush @@ -3492,12 +3604,19 @@ client.indices.flush({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string for all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`force` (Optional, boolean)*: Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`wait_if_ongoing` (Optional, boolean)*: If set to true the flush operation will block until the flush can be executed if another flush operation is already executing. The default is true. If set to false the flush will be skipped iff if another flush operation is already running. +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to flush. +Supports wildcards (`*`). +To flush all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`force` (Optional, boolean)*: If `true`, the request forces a flush even if there are no changes to commit to the index. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`wait_if_ongoing` (Optional, boolean)*: If `true`, the flush operation blocks until execution when another flush operation is running. +If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. [discrete] ==== forcemerge @@ -3565,12 +3684,20 @@ client.indices.getAlias({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string | string[])*: A list of alias names to return -** *`index` (Optional, string | string[])*: A list of index names to filter aliases -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`name` (Optional, string | string[])*: List of aliases to retrieve. +Supports wildcards (`*`). +To retrieve all aliases, omit this parameter or use `*` or `_all`. +** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. [discrete] ==== get_data_lifecycle @@ -3586,9 +3713,13 @@ client.indices.getDataLifecycle({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: A list of data streams to get; use `*` to get all data streams -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) -** *`include_defaults` (Optional, boolean)*: Return all relevant default configurations for the data stream (default: false) +** *`name` (string | string[])*: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. [discrete] ==== get_data_stream @@ -3624,13 +3755,19 @@ client.indices.getFieldMapping({ fields }) ==== Arguments * *Request (object):* -** *`fields` (string | string[])*: A list of fields -** *`index` (Optional, string | string[])*: A list of index names -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`include_defaults` (Optional, boolean)*: Whether the default mapping values should be returned as well -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`fields` (string | string[])*: List or wildcard expression of fields used to limit returned information. +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. [discrete] ==== get_index_template @@ -3666,12 +3803,19 @@ client.indices.getMapping({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_settings @@ -3687,15 +3831,26 @@ client.indices.getSettings({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`name` (Optional, string | string[])*: The name of the settings that should be included -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`include_defaults` (Optional, boolean)*: Whether to return all default setting for each of the indices. -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +** *`name` (Optional, string | string[])*: List or wildcard expression of settings to retrieve. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with foo but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. If +`false`, information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. [discrete] ==== get_template @@ -3711,10 +3866,13 @@ client.indices.getTemplate({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string | string[])*: The comma separated names of the index templates -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`name` (Optional, string | string[])*: List of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +To return all index templates, omit this parameter or use a value of `_all` or `*`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== migrate_to_data_stream @@ -3730,7 +3888,7 @@ client.indices.migrateToDataStream({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the alias to migrate +** *`name` (string)*: Name of the index alias to convert to a data stream. [discrete] ==== modify_data_stream @@ -3762,13 +3920,24 @@ client.indices.open({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A comma separated list of indices to open -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of active shards to wait for before the operation returns. +** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +By default, you must explicitly name the indices you using to limit the request. +To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. +You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== promote_data_stream @@ -3800,15 +3969,29 @@ client.indices.putAlias({ index, name }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. -** *`name` (string)*: The name of the alias to be created or updated -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`index_routing` (Optional, string)* -** *`is_write_index` (Optional, boolean)* -** *`routing` (Optional, string)* -** *`search_routing` (Optional, string)* -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document +** *`index` (string | string[])*: List of data streams or indices to add. +Supports wildcards (`*`). +Wildcard patterns that match both data streams and indices return an error. +** *`name` (string)*: Alias to update. +If the alias doesn’t exist, the request creates it. +Index alias names support date math. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query used to limit documents the alias can access. +** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. +If specified, this overwrites the `routing` value for indexing operations. +Data stream aliases don’t support this parameter. +** *`is_write_index` (Optional, boolean)*: If `true`, sets the write index or data stream for the alias. +If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. +If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. +Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. +** *`routing` (Optional, string)*: Value used to route indexing and search operations to a specific shard. +Data stream aliases don’t support this parameter. +** *`search_routing` (Optional, string)*: Value used to route search operations to a specific shard. +If specified, this overwrites the `routing` value for search operations. +Data stream aliases don’t support this parameter. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_data_lifecycle @@ -3824,11 +4007,20 @@ client.indices.putDataLifecycle({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: A list of data streams whose lifecycle will be updated; use `*` to set the lifecycle to all data streams -** *`data_retention` (Optional, string | -1 | 0)* -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document +** *`name` (string | string[])*: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +** *`data_retention` (Optional, string | -1 | 0)*: If defined, every document added to this data stream will be stored at least for this time frame. +Any time after this duration the document could be deleted. +When empty, every document in this data stream will be stored indefinitely. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `hidden`, `open`, `closed`, `none`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_index_template @@ -3845,14 +4037,24 @@ client.indices.putIndexTemplate({ name }) * *Request (object):* ** *`name` (string)*: Index or template name -** *`index_patterns` (Optional, string | string[])* -** *`composed_of` (Optional, string[])* -** *`template` (Optional, { aliases, mappings, settings, lifecycle })* -** *`data_stream` (Optional, { hidden })* -** *`priority` (Optional, number)* -** *`version` (Optional, number)* -** *`_meta` (Optional, Record)* -** *`create` (Optional, boolean)*: Whether the index template should only be added if new or can also replace an existing one +** *`index_patterns` (Optional, string | string[])*: Name of the index template to create. +** *`composed_of` (Optional, string[])*: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +** *`data_stream` (Optional, { hidden })*: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +** *`version` (Optional, number)*: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +** *`_meta` (Optional, Record)*: Optional user metadata about the index template. +May have any contents. +This map is not automatically generated by Elasticsearch. +** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing index templates. [discrete] ==== put_mapping @@ -3888,12 +4090,18 @@ application-specific metadata. ** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents. ** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index. ** *`runtime` (Optional, Record)*: Mapping of runtime fields for the index. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`write_index_only` (Optional, boolean)*: When true, applies mappings only to the write index of an alias or data stream +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`write_index_only` (Optional, boolean)*: If `true`, the mappings are applied only to the current write index for the target. [discrete] ==== put_settings @@ -3909,15 +4117,27 @@ client.indices.putSettings({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`preserve_existing` (Optional, boolean)*: Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false` -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target +data streams, this argument determines whether wildcard expressions match +hidden data streams. Supports a list of values, such as +`open,hidden`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +** *`preserve_existing` (Optional, boolean)*: If `true`, existing index settings remain unchanged. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. [discrete] ==== put_template @@ -3947,10 +4167,11 @@ Templates with lower 'order' values are merged first. Templates with higher ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. ** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. -** *`flat_settings` (Optional, boolean)* +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)* +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== recovery @@ -3966,9 +4187,11 @@ client.indices.recovery({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`active_only` (Optional, boolean)*: Display only those recoveries that are currently on-going -** *`detailed` (Optional, boolean)*: Whether to display detailed information about shard recovery +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. [discrete] ==== refresh @@ -3984,10 +4207,16 @@ client.indices.refresh({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. [discrete] ==== reload_search_analyzers @@ -4022,8 +4251,12 @@ client.indices.resolveIndex({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: A list of names or wildcard expressions -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== rollover @@ -4040,16 +4273,28 @@ client.indices.rollover({ alias }) ==== Arguments * *Request (object):* -** *`alias` (string)*: The name of the alias to rollover -** *`new_index` (Optional, string)*: The name of the rollover index -** *`aliases` (Optional, Record)* -** *`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })* -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })* -** *`settings` (Optional, Record)* -** *`dry_run` (Optional, boolean)*: If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the newly created rollover index before the operation returns. +** *`alias` (string)*: Name of the data stream or index alias to roll over. +** *`new_index` (Optional, string)*: Name of the index to create. +Supports date math. +Data streams do not support this parameter. +** *`aliases` (Optional, Record)*: Aliases for the target index. +Data streams do not support this parameter. +** *`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })*: Conditions for the rollover. +If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. +If this parameter is not specified, Elasticsearch performs the rollover unconditionally. +If conditions are specified, at least one of them must be a `max_*` condition. +The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. +If specified, this mapping can include field names, field data types, and mapping paramaters. +** *`settings` (Optional, Record)*: Configuration options for the index. +Data streams do not support this parameter. +** *`dry_run` (Optional, boolean)*: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== segments @@ -4065,11 +4310,17 @@ client.indices.segments({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`verbose` (Optional, boolean)*: Includes detailed memory usage by Lucene. +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`verbose` (Optional, boolean)*: If `true`, the request returns a verbose response. [discrete] ==== shard_stores @@ -4108,13 +4359,17 @@ client.indices.shrink({ index, target }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the source index to shrink -** *`target` (string)*: The name of the target index to shrink into -** *`aliases` (Optional, Record)* -** *`settings` (Optional, Record)* -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the shrunken index before the operation returns. +** *`index` (string)*: Name of the source index to shrink. +** *`target` (string)*: Name of the target index to create. +** *`aliases` (Optional, Record)*: The key is the alias name. +Index alias names support date math. +** *`settings` (Optional, Record)*: Configuration options for the target index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== simulate_index_template @@ -4131,14 +4386,26 @@ client.indices.simulateIndexTemplate({ name }) * *Request (object):* ** *`name` (string)*: Index or template name to simulate -** *`allow_auto_create` (Optional, boolean)* -** *`index_patterns` (Optional, string | string[])* -** *`composed_of` (Optional, string[])* -** *`template` (Optional, { aliases, mappings, settings, lifecycle })* -** *`data_stream` (Optional, { hidden })* -** *`priority` (Optional, number)* -** *`version` (Optional, number)* -** *`_meta` (Optional, Record)* +** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +** *`index_patterns` (Optional, string | string[])*: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. +** *`composed_of` (Optional, string[])*: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +** *`data_stream` (Optional, { hidden })*: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +** *`version` (Optional, number)*: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +** *`_meta` (Optional, Record)*: Optional user metadata about the index template. +May have any contents. +This map is not automatically generated by Elasticsearch. ** *`create` (Optional, boolean)*: If `true`, the template passed in the body is only used if no existing templates match the same index patterns. If `false`, the simulation uses the template with the highest priority. Note that the template is not @@ -4183,13 +4450,16 @@ client.indices.split({ index, target }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the source index to split -** *`target` (string)*: The name of the target index to split into -** *`aliases` (Optional, Record)* -** *`settings` (Optional, Record)* -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Set the number of active shards to wait for on the shrunken index before the operation returns. +** *`index` (string)*: Name of the source index to split. +** *`target` (string)*: Name of the target index to create. +** *`aliases` (Optional, Record)*: Aliases for the resulting index. +** *`settings` (Optional, Record)*: Configuration options for the target index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== stats @@ -4233,13 +4503,20 @@ client.indices.unfreeze({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the index to unfreeze -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, string)*: Sets the number of active shards to wait for before the operation returns. +** *`index` (string)*: Identifier for the index. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, string)*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] ==== update_aliases @@ -4255,9 +4532,11 @@ client.indices.updateAliases({ ... }) ==== Arguments * *Request (object):* -** *`actions` (Optional, { add_backing_index, remove_backing_index }[])* -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Request timeout +** *`actions` (Optional, { add_backing_index, remove_backing_index }[])*: Actions to perform. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== validate_query @@ -4273,20 +4552,28 @@ client.indices.validateQuery({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`all_shards` (Optional, boolean)*: Execute validation on all shards instead of one random shard per index -** *`analyzer` (Optional, string)*: The analyzer to use for the query string -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`explain` (Optional, boolean)*: Return detailed information about the error -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`rewrite` (Optional, boolean)*: Provide a more detailed explanation showing the actual Lucene query that will be executed. -** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query in the Lucene query string syntax. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. +** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`explain` (Optional, boolean)*: If `true`, the response returns detailed information if an error has occurred. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +** *`rewrite` (Optional, boolean)*: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. +** *`q` (Optional, string)*: Query in the Lucene query string syntax. [discrete] === ingest @@ -4304,15 +4591,18 @@ client.ingest.deletePipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Pipeline ID -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`id` (string)*: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +To delete all ingest pipelines in a cluster, use a value of `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== geo_ip_stats Returns statistical information about geoip databases -{ref}/geoip-stats-api.html[Endpoint documentation] +{ref}/geoip-processor.html[Endpoint documentation] [source,ts] ---- client.ingest.geoIpStats() @@ -4333,8 +4623,11 @@ client.ingest.getPipeline({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: Comma separated list of pipeline ids. Wildcards supported -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`id` (Optional, string)*: List of pipeline IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all ingest pipelines, omit this parameter or use `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. ** *`summary` (Optional, boolean)*: Return pipelines without their definitions (default: false) [discrete] @@ -4352,7 +4645,7 @@ client.ingest.processorGrok() ==== put_pipeline Creates or updates a pipeline. -{ref}/put-pipeline-api.html[Endpoint documentation] +{ref}/ingest.html[Endpoint documentation] [source,ts] ---- client.ingest.putPipeline({ id }) @@ -4386,10 +4679,13 @@ client.ingest.simulate({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: Pipeline ID -** *`docs` (Optional, { _id, _index, _source }[])* -** *`pipeline` (Optional, { description, on_failure, processors, version })* -** *`verbose` (Optional, boolean)*: Verbose mode. Display data output for each processor in executed pipeline +** *`id` (Optional, string)*: Pipeline to test. +If you don’t specify a `pipeline` in the request body, this parameter is required. +** *`docs` (Optional, { _id, _index, _source }[])*: Sample documents to test in the pipeline. +** *`pipeline` (Optional, { description, on_failure, processors, version })*: Pipeline to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. [discrete] === license @@ -4511,7 +4807,7 @@ client.logstash.deletePipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The ID of the Pipeline +** *`id` (string)*: Identifier for the pipeline. [discrete] ==== get_pipeline @@ -4527,7 +4823,7 @@ client.logstash.getPipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string | string[])*: A list of Pipeline IDs +** *`id` (string | string[])*: List of pipeline identifiers. [discrete] ==== put_pipeline @@ -4543,7 +4839,7 @@ client.logstash.putPipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The ID of the Pipeline +** *`id` (string)*: Identifier for the pipeline. ** *`pipeline` (Optional, { description, on_failure, processors, version })* [discrete] @@ -4894,7 +5190,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) * *Request (object):* ** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. ** *`index` (string)*: Defines the `index` in which the evaluation will be performed. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. [discrete] ==== explain_data_frame_analytics @@ -5776,7 +6072,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. ** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might @@ -6220,7 +6516,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is @@ -6534,6 +6830,74 @@ client.nodes.usage({ ... }) ** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +[discrete] +=== query_ruleset +[discrete] +==== delete +Deletes a query ruleset. + +{ref}/delete-query-ruleset.html[Endpoint documentation] +[source,ts] +---- +client.queryRuleset.delete({ ruleset_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to delete + +[discrete] +==== get +Returns the details about a query ruleset. + +{ref}/get-query-ruleset.html[Endpoint documentation] +[source,ts] +---- +client.queryRuleset.get({ ruleset_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset + +[discrete] +==== list +Lists query rulesets. + +{ref}/list-query-rulesets.html[Endpoint documentation] +[source,ts] +---- +client.queryRuleset.list({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: specifies a max number of results to get + +[discrete] +==== put +Creates or updates a query ruleset. + +{ref}/put-query-ruleset.html[Endpoint documentation] +[source,ts] +---- +client.queryRuleset.put({ ruleset_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated +** *`query_ruleset` (Optional, { ruleset_id, rules })* + [discrete] === rollup [discrete] @@ -6657,7 +7021,7 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: The indices or index-pattern(s) (containing rollup or regular data) that should be searched ** *`aggregations` (Optional, Record)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -6995,7 +7359,9 @@ client.security.clearApiKeyCache({ ids }) ==== Arguments * *Request (object):* -** *`ids` (string | string[])*: A list of IDs of API keys to clear from the cache +** *`ids` (string | string[])*: List of API key IDs to evict from the API key cache. +To evict all API keys, use `*`. +Does not support other wildcard patterns. [discrete] ==== clear_cached_privileges @@ -7081,20 +7447,9 @@ client.security.createApiKey({ ... }) ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. ** *`name` (Optional, string)*: Specifies the name for this API key. ** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. -** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -[discrete] -==== create_cross_cluster_api_key -Creates a cross-cluster API key for API key based remote cluster access. - -{ref}/security-api-create-cross-cluster-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.createCrossClusterApiKey() ----- - - [discrete] ==== create_service_token Creates a service account token for access without requiring basic authentication. @@ -7272,11 +7627,17 @@ client.security.getApiKey({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: API key id of the API key to be retrieved -** *`name` (Optional, string)*: API key name of the API key to be retrieved -** *`owner` (Optional, boolean)*: flag to query API keys owned by the currently authenticated user -** *`realm_name` (Optional, string)*: realm name of the user who created this API key to be retrieved -** *`username` (Optional, string)*: user name of the user who created this API key to be retrieved +** *`id` (Optional, string)*: An API key id. +This parameter cannot be used with any of `name`, `realm_name` or `username`. +** *`name` (Optional, string)*: An API key name. +This parameter cannot be used with any of `id`, `realm_name` or `username`. +It supports prefix search with wildcard. +** *`owner` (Optional, boolean)*: A boolean flag that can be used to query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +** *`realm_name` (Optional, string)*: The name of an authentication realm. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +** *`username` (Optional, string)*: The username of a user. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. ** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role @@ -7446,12 +7807,17 @@ client.security.grantApiKey({ api_key, grant_type }) ==== Arguments * *Request (object):* -** *`api_key` ({ name, expiration, role_descriptors, metadata })* -** *`grant_type` (Enum("access_token" | "password"))* -** *`access_token` (Optional, string)* -** *`username` (Optional, string)* -** *`password` (Optional, string)* -** *`run_as` (Optional, string)* +** *`api_key` ({ name, expiration, role_descriptors, metadata })*: Defines the API key. +** *`grant_type` (Enum("access_token" | "password"))*: The type of grant. Supported grant types are: `access_token`, `password`. +** *`access_token` (Optional, string)*: The user’s access token. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +** *`username` (Optional, string)*: The user name that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +** *`password` (Optional, string)*: The user’s password. If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +** *`run_as` (Optional, string)*: The name of the user to be impersonated. [discrete] ==== has_privileges @@ -7487,11 +7853,16 @@ client.security.invalidateApiKey({ ... }) * *Request (object):* ** *`id` (Optional, string)* -** *`ids` (Optional, string[])* -** *`name` (Optional, string)* -** *`owner` (Optional, boolean)* -** *`realm_name` (Optional, string)* -** *`username` (Optional, string)* +** *`ids` (Optional, string[])*: A list of API key ids. +This parameter cannot be used with any of `name`, `realm_name`, or `username`. +** *`name` (Optional, string)*: An API key name. +This parameter cannot be used with any of `ids`, `realm_name` or `username`. +** *`owner` (Optional, boolean)*: Can be used to query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +** *`realm_name` (Optional, string)*: The name of an authentication realm. +This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. +** *`username` (Optional, string)*: The username of a user. +This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. [discrete] ==== invalidate_token @@ -7646,21 +8017,20 @@ client.security.queryApiKeys({ ... }) ==== Arguments * *Request (object):* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query to filter which API keys to return. -The query supports a subset of query types, including match_all, bool, term, terms, ids, prefix, wildcard, and range. -You can query all public information associated with an API key -** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 -hits using the from and size parameters. To page through more hits, use the -search_after parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* -** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more -than 10,000 hits using the from and size parameters. To page through more -hits, use the search_after parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* -** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors -associated with the API key. An API key's actual -permission is the intersection of its assigned role -descriptors and the owner user's role descriptors. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query to filter which API keys to return. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `ids`, `prefix`, `wildcard`, and `range`. +You can query all public information associated with an API key. +** *`from` (Optional, number)*: Starting document offset. +By default, you cannot page through more than 10,000 hits using the from and size parameters. +To page through more hits, use the `search_after` parameter. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Other than `id`, all public fields of an API key are eligible for sorting. +In addition, sort can also be applied to the `_doc` field to sort by index order. +** *`size` (Optional, number)*: The number of hits to return. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition +** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. +An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. [discrete] ==== saml_authenticate @@ -7795,17 +8165,6 @@ client.security.updateApiKey({ id }) ** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. -[discrete] -==== update_cross_cluster_api_key -Updates attributes of an existing cross-cluster API key. - -{ref}/security-api-update-cross-cluster-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.updateCrossClusterApiKey() ----- - - [discrete] === slm [discrete] @@ -8275,7 +8634,7 @@ client.sql.query({ ... }) ** *`columnar` (Optional, boolean)*: If true, the results in a columnar fashion: one row represents all the values of a certain column from the current page of results. ** *`cursor` (Optional, string)* ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional Elasticsearch query DSL for additional filtering. ** *`query` (Optional, string)*: SQL query to execute ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. ** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. @@ -8306,7 +8665,7 @@ client.sql.translate({ query }) * *Request (object):* ** *`query` (string)* ** *`fetch_size` (Optional, number)* -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* ** *`time_zone` (Optional, string)* [discrete] @@ -8325,37 +8684,124 @@ client.ssl.certificates() [discrete] === synonyms [discrete] -==== delete +==== delete_synonym Deletes a synonym set -{ref}/delete-synonyms.html[Endpoint documentation] +{ref}/delete-synonyms-set.html[Endpoint documentation] [source,ts] ---- -client.synonyms.delete() +client.synonyms.deleteSynonym({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The id of the synonyms set to be deleted [discrete] -==== get +==== delete_synonym_rule +Deletes a synonym rule in a synonym set + +{ref}/delete-synonym-rule.html[Endpoint documentation] +[source,ts] +---- +client.synonyms.deleteSynonymRule({ set_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`set_id` (string)*: The id of the synonym set to be updated +** *`rule_id` (string)*: The id of the synonym rule to be deleted + +[discrete] +==== get_synonym Retrieves a synonym set -{ref}/get-synonyms.html[Endpoint documentation] +{ref}/get-synonyms-set.html[Endpoint documentation] [source,ts] ---- -client.synonyms.get() +client.synonyms.getSynonym({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: "The id of the synonyms set to be retrieved +** *`from` (Optional, number)*: Starting offset for query rules to be retrieved +** *`size` (Optional, number)*: specifies a max number of query rules to retrieve [discrete] -==== put +==== get_synonym_rule +Retrieves a synonym rule from a synonym set + +{ref}/get-synonym-rule.html[Endpoint documentation] +[source,ts] +---- +client.synonyms.getSynonymRule({ set_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`set_id` (string)*: The id of the synonym set to retrieve the synonym rule from +** *`rule_id` (string)*: The id of the synonym rule to retrieve + +[discrete] +==== get_synonyms_sets +Retrieves a summary of all defined synonym sets + +{ref}/list-synonyms-sets.html[Endpoint documentation] +[source,ts] +---- +client.synonyms.getSynonymsSets({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset +** *`size` (Optional, number)*: specifies a max number of results to get + +[discrete] +==== put_synonym Creates or updates a synonyms set -{ref}/put-synonyms.html[Endpoint documentation] +{ref}/put-synonyms-set.html[Endpoint documentation] +[source,ts] +---- +client.synonyms.putSynonym({ id, synonyms_set }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The id of the synonyms set to be created or updated +** *`synonyms_set` ({ id, synonyms }[])*: The synonym set information to update + +[discrete] +==== put_synonym_rule +Creates or updates a synonym rule in a synonym set + +{ref}/put-synonym-rule.html[Endpoint documentation] [source,ts] ---- -client.synonyms.put() +client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`set_id` (string)*: The id of the synonym set to be updated with the synonym rule +** *`rule_id` (string)*: The id of the synonym rule to be updated or created +** *`synonyms` (string[])* [discrete] === tasks @@ -8906,7 +9352,7 @@ client.watcher.queryWatches({ ... }) * *Request (object):* ** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. ** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional, query filter watches to be returned. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional, query filter watches to be returned. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index a0c36598b..e684cebfc 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -45,7 +45,7 @@ export default class AsyncSearch { /** * Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class AsyncSearch { /** * Retrieves the results of a previously submitted async search request given its ID. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -97,7 +97,7 @@ export default class AsyncSearch { /** * Retrieves the status of a previously submitted async search request given its ID. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -123,7 +123,7 @@ export default class AsyncSearch { /** * Executes a search request asynchronously. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/async-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index a23212f2e..941f81083 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -45,7 +45,7 @@ export default class Autoscaling { /** * Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-delete-autoscaling-policy.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-delete-autoscaling-policy.html | Elasticsearch API documentation} */ async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Autoscaling { /** * Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-get-autoscaling-capacity.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -98,7 +98,7 @@ export default class Autoscaling { /** * Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-get-autoscaling-capacity.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -124,7 +124,7 @@ export default class Autoscaling { /** * Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/autoscaling-put-autoscaling-policy.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-put-autoscaling-policy.html | Elasticsearch API documentation} */ async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index 41a700dfe..f84ea175f 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to perform multiple index/update/delete operations in a single request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-bulk.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html | Elasticsearch API documentation} */ export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index 127c198c8..af8b64da8 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -45,7 +45,7 @@ export default class Cat { /** * Shows information about currently configured aliases to indices including filter and routing infos. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-alias.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -79,7 +79,7 @@ export default class Cat { /** * Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-allocation.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html | Elasticsearch API documentation} */ async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -113,7 +113,7 @@ export default class Cat { /** * Returns information about existing component_templates templates. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-component-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -147,7 +147,7 @@ export default class Cat { /** * Provides quick access to the document count of the entire cluster, or individual indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-count.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -181,7 +181,7 @@ export default class Cat { /** * Shows how much heap memory is currently being used by fielddata on every data node in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-fielddata.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html | Elasticsearch API documentation} */ async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -215,7 +215,7 @@ export default class Cat { /** * Returns a concise representation of the cluster health. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-health.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html | Elasticsearch API documentation} */ async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -242,7 +242,7 @@ export default class Cat { /** * Returns help for the Cat APIs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html | Elasticsearch API documentation} */ async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -269,7 +269,7 @@ export default class Cat { /** * Returns information about indices: number of primaries and replicas, document counts, disk size, ... - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-indices.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -303,7 +303,7 @@ export default class Cat { /** * Returns information about the master node. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-master.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html | Elasticsearch API documentation} */ async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -330,7 +330,7 @@ export default class Cat { /** * Gets configuration and usage information about data frame analytics jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -364,7 +364,7 @@ export default class Cat { /** * Gets configuration and usage information about datafeeds. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-datafeeds.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -398,7 +398,7 @@ export default class Cat { /** * Gets configuration and usage information about anomaly detection jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-anomaly-detectors.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -432,7 +432,7 @@ export default class Cat { /** * Gets configuration and usage information about inference trained models. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-trained-model.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -466,7 +466,7 @@ export default class Cat { /** * Returns information about custom node attributes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-nodeattrs.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html | Elasticsearch API documentation} */ async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -493,7 +493,7 @@ export default class Cat { /** * Returns basic statistics about performance of cluster nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-nodes.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html | Elasticsearch API documentation} */ async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -520,7 +520,7 @@ export default class Cat { /** * Returns a concise representation of the cluster pending tasks. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-pending-tasks.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -547,7 +547,7 @@ export default class Cat { /** * Returns information about installed plugins across nodes node. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-plugins.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html | Elasticsearch API documentation} */ async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -574,7 +574,7 @@ export default class Cat { /** * Returns information about index shard recoveries, both on-going completed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-recovery.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -608,7 +608,7 @@ export default class Cat { /** * Returns information about snapshot repositories registered in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-repositories.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html | Elasticsearch API documentation} */ async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -635,7 +635,7 @@ export default class Cat { /** * Provides low-level information about the segments in the shards of an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-segments.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html | Elasticsearch API documentation} */ async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -669,7 +669,7 @@ export default class Cat { /** * Provides a detailed view of shard allocation on nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-shards.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html | Elasticsearch API documentation} */ async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -703,7 +703,7 @@ export default class Cat { /** * Returns all snapshots in a specific repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html | Elasticsearch API documentation} */ async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -737,7 +737,7 @@ export default class Cat { /** * Returns information about the tasks currently executing on one or more nodes in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -764,7 +764,7 @@ export default class Cat { /** * Returns information about existing templates. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html | Elasticsearch API documentation} */ async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -797,9 +797,8 @@ export default class Cat { } /** - * Returns cluster-wide thread pool statistics per node. - By default the active, queue and rejected statistics are returned for all thread pools. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-thread-pool.html Elasticsearch API docs} + * Returns cluster-wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html | Elasticsearch API documentation} */ async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -833,7 +832,7 @@ export default class Cat { /** * Gets configuration and usage information about transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cat-transforms.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html | Elasticsearch API documentation} */ async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 5b61ba9c4..d2f58a616 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -45,7 +45,7 @@ export default class Ccr { /** * Deletes auto-follow patterns. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-delete-auto-follow-pattern.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-follow-pattern.html | Elasticsearch API documentation} */ async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Ccr { /** * Creates a new follower index configured to follow the referenced leader index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-put-follow.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.html | Elasticsearch API documentation} */ async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -109,7 +109,7 @@ export default class Ccr { /** * Retrieves information about all follower indices, including parameters and status for each follower index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-follow-info.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-info.html | Elasticsearch API documentation} */ async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -135,7 +135,7 @@ export default class Ccr { /** * Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-follow-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-stats.html | Elasticsearch API documentation} */ async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -161,7 +161,7 @@ export default class Ccr { /** * Removes the follower retention leases from the leader. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-forget-follower.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-follower.html | Elasticsearch API documentation} */ async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -199,7 +199,7 @@ export default class Ccr { /** * Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-auto-follow-pattern.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-follow-pattern.html | Elasticsearch API documentation} */ async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -233,7 +233,7 @@ export default class Ccr { /** * Pauses an auto-follow pattern - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-pause-auto-follow-pattern.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-follow-pattern.html | Elasticsearch API documentation} */ async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -259,7 +259,7 @@ export default class Ccr { /** * Pauses a follower index. The follower index will not fetch any additional operations from the leader index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-pause-follow.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-follow.html | Elasticsearch API documentation} */ async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -285,7 +285,7 @@ export default class Ccr { /** * Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-put-auto-follow-pattern.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-follow-pattern.html | Elasticsearch API documentation} */ async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -323,7 +323,7 @@ export default class Ccr { /** * Resumes an auto-follow pattern that has been paused - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-resume-auto-follow-pattern.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-follow-pattern.html | Elasticsearch API documentation} */ async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -349,7 +349,7 @@ export default class Ccr { /** * Resumes a follower index that has been paused - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-resume-follow.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-follow.html | Elasticsearch API documentation} */ async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -387,7 +387,7 @@ export default class Ccr { /** * Gets all stats related to cross-cluster replication. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-get-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -414,7 +414,7 @@ export default class Ccr { /** * Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ccr-post-unfollow.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow.html | Elasticsearch API documentation} */ async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 3e5b25adb..99eae7286 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Explicitly clears the search context for a scroll. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-scroll-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api.html | Elasticsearch API documentation} */ export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index f595531a0..380689069 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Close a point in time - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/point-in-time-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index c177e0760..59007a12c 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -45,7 +45,7 @@ export default class Cluster { /** * Provides explanations for shard allocations in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-allocation-explain.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html | Elasticsearch API documentation} */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -84,7 +84,7 @@ export default class Cluster { /** * Deletes a component template - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -110,7 +110,7 @@ export default class Cluster { /** * Clears cluster voting config exclusions. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/voting-config-exclusions.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} */ async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -137,7 +137,7 @@ export default class Cluster { /** * Returns information about whether a particular component template exist - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -163,7 +163,7 @@ export default class Cluster { /** * Returns one or more component templates - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -197,7 +197,7 @@ export default class Cluster { /** * Returns cluster settings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-get-settings.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -224,7 +224,7 @@ export default class Cluster { /** * Returns basic information about the health of the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-health.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html | Elasticsearch API documentation} */ async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -258,7 +258,7 @@ export default class Cluster { /** * Returns different information about the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-info.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html | Elasticsearch API documentation} */ async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -283,9 +283,8 @@ export default class Cluster { } /** - * Returns a list of any cluster-level changes (e.g. create index, update mapping, - allocate or fail shard) which have not yet been executed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-pending.html Elasticsearch API docs} + * Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -312,7 +311,7 @@ export default class Cluster { /** * Updates the cluster voting config exclusions by node ids or node names. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/voting-config-exclusions.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} */ async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -339,7 +338,7 @@ export default class Cluster { /** * Creates or updates a component template - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-component-template.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -377,7 +376,7 @@ export default class Cluster { /** * Updates the cluster settings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-update-settings.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -416,7 +415,7 @@ export default class Cluster { /** * Returns the information about configured remote clusters. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-remote-info.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html | Elasticsearch API documentation} */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -443,7 +442,7 @@ export default class Cluster { /** * Allows to manually change the allocation of individual shards in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-reroute.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html | Elasticsearch API documentation} */ async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -482,7 +481,7 @@ export default class Cluster { /** * Returns a comprehensive information about the state of the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-state.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html | Elasticsearch API documentation} */ async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -519,7 +518,7 @@ export default class Cluster { /** * Returns high-level overview of cluster statistics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 0e222b780..0d01dbb57 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns number of documents matching a query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-count.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html | Elasticsearch API documentation} */ export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 0284dc19c..622f8eb63 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -38,10 +38,8 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Creates a new document in the index. - -Returns a 409 response when a document with a same ID already exists in the index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-index_.html Elasticsearch API docs} + * Creates a new document in the index. Returns a 409 response when a document with a same ID already exists in the index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 825e565aa..069734428 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -45,7 +45,7 @@ export default class DanglingIndices { /** * Deletes the specified dangling index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-gateway-dangling-indices.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class DanglingIndices { /** * Imports the specified dangling index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-gateway-dangling-indices.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -97,7 +97,7 @@ export default class DanglingIndices { /** * Returns all dangling indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-gateway-dangling-indices.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 3a8783a88..f834f7283 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Removes a document from the index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-delete.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html | Elasticsearch API documentation} */ export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 7b7664581..fc62679cd 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Deletes documents matching the provided query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-delete-by-query.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index c84002519..bb57bea1d 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Changes the number of requests per second for a particular Delete By Query operation. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-delete-by-query.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index ff186863f..5c849219f 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Deletes a script. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index d6a6b990c..f2f0f682f 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -45,7 +45,7 @@ export default class Enrich { /** * Deletes an existing enrich policy and its enrich index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-enrich-policy-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-enrich-policy-api.html | Elasticsearch API documentation} */ async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Enrich { /** * Creates the enrich index for an existing enrich policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/execute-enrich-policy-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/execute-enrich-policy-api.html | Elasticsearch API documentation} */ async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -97,7 +97,7 @@ export default class Enrich { /** * Gets information about an enrich policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-enrich-policy-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-enrich-policy-api.html | Elasticsearch API documentation} */ async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -131,7 +131,7 @@ export default class Enrich { /** * Creates a new enrich policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-enrich-policy-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-enrich-policy-api.html | Elasticsearch API documentation} */ async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -169,7 +169,7 @@ export default class Enrich { /** * Gets enrich coordinator statistics and information about enrich policies that are currently executing. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/enrich-stats-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/enrich-stats-api.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index a181194c8..d299744da 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -45,7 +45,7 @@ export default class Eql { /** * Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/eql-search-api.html | Elasticsearch API documentation} */ async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Eql { /** * Returns async results from previously executed Event Query Language (EQL) search - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-search-api.html | Elasticsearch API documentation} */ async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -97,7 +97,7 @@ export default class Eql { /** * Returns the status of a previously submitted async or stored Event Query Language (EQL) search - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-status-api.html | Elasticsearch API documentation} */ async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -123,7 +123,7 @@ export default class Eql { /** * Returns results matching a query expressed in Event Query Language (EQL) - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/eql-search-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/eql-search-api.html | Elasticsearch API documentation} */ async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 309612b31..a0448c0aa 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns information about whether a document exists in an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 6c8142f9b..ba264a376 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns information about whether a document source exists in an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 45568b812..6e910ffb2 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns information about why a specific matches (or doesn't match) a query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-explain.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html | Elasticsearch API documentation} */ export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 01a1b3a82..5bffb723c 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -45,7 +45,7 @@ export default class Features { /** * Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-features-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html | Elasticsearch API documentation} */ async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -72,7 +72,7 @@ export default class Features { /** * Resets the internal state of features, usually by deleting system indices - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 765b276bf..ec0a5b086 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns the information about the capabilities of fields among multiple indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-field-caps.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html | Elasticsearch API documentation} */ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 16a75fff7..4730068b6 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -45,7 +45,7 @@ export default class Fleet { /** * Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-global-checkpoints.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-global-checkpoints.html | Elasticsearch API documentation} */ async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 4bdeaa4c4..96f31255e 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns a document. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index d2808c987..b2c4a03d1 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns a script. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index b23e0908a..78ceed3b3 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns all script contexts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/main/painless-contexts.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html | Elasticsearch API documentation} */ export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index d155cd83a..600baac46 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns available script types, languages and contexts - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 958738be3..7f92d9b74 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns the source of a document. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-get.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index ff46902a5..666a40940 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -45,7 +45,7 @@ export default class Graph { /** * Explore extracted and summarized information about the documents and terms in an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/graph-explore-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/graph-explore-api.html | Elasticsearch API documentation} */ async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index ee365433e..4ad9a19c3 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns the health of the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/health-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/health-api.html | Elasticsearch API documentation} */ export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 41c96e94e..23de35220 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -45,7 +45,7 @@ export default class Ilm { /** * Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-delete-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Ilm { /** * Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-explain-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-explain-lifecycle.html | Elasticsearch API documentation} */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -97,7 +97,7 @@ export default class Ilm { /** * Returns the specified policy definition. Includes the policy version and last modified date. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-get-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-lifecycle.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -131,7 +131,7 @@ export default class Ilm { /** * Retrieves the current index lifecycle management (ILM) status. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-get-status.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-status.html | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -158,7 +158,7 @@ export default class Ilm { /** * Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-migrate-to-data-tiers.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -197,7 +197,7 @@ export default class Ilm { /** * Manually moves an index into the specified step and executes that step. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-move-to-step.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-move-to-step.html | Elasticsearch API documentation} */ async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -235,7 +235,7 @@ export default class Ilm { /** * Creates a lifecycle policy - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-put-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-put-lifecycle.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -273,7 +273,7 @@ export default class Ilm { /** * Removes the assigned lifecycle policy and stops managing the specified index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-remove-policy.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-remove-policy.html | Elasticsearch API documentation} */ async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -299,7 +299,7 @@ export default class Ilm { /** * Retries executing the policy for an index that is in the ERROR step. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-retry-policy.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-retry-policy.html | Elasticsearch API documentation} */ async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -325,7 +325,7 @@ export default class Ilm { /** * Start the index lifecycle management (ILM) plugin. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-start.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -352,7 +352,7 @@ export default class Ilm { /** * Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ilm-stop.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/index.ts b/src/api/api/index.ts index d461144a3..b156d47c6 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Creates or updates a document in an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-index_.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 8413dbf03..4ef0eac17 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -45,7 +45,7 @@ export default class Indices { /** * Adds a block to an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/index-modules-blocks.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation} */ async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Indices { /** * Performs the analysis process on a text and return the tokens breakdown of the text. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-analyze.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html | Elasticsearch API documentation} */ async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -117,7 +117,7 @@ export default class Indices { /** * Clears all or specific caches for one or more indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-clearcache.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -151,7 +151,7 @@ export default class Indices { /** * Clones an index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-clone-index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -189,7 +189,7 @@ export default class Indices { /** * Closes an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-open-close.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-close.html | Elasticsearch API documentation} */ async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -215,7 +215,7 @@ export default class Indices { /** * Creates an index with optional settings and mappings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-create-index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html | Elasticsearch API documentation} */ async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -253,7 +253,7 @@ export default class Indices { /** * Creates a data stream - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -279,7 +279,7 @@ export default class Indices { /** * Provides statistics on operations happening in a data stream. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -313,7 +313,7 @@ export default class Indices { /** * Deletes an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-delete-index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html | Elasticsearch API documentation} */ async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -339,7 +339,7 @@ export default class Indices { /** * Deletes an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -372,7 +372,7 @@ export default class Indices { /** * Deletes the data lifecycle of the selected data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-delete-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -398,7 +398,7 @@ export default class Indices { /** * Deletes a data stream. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -424,7 +424,7 @@ export default class Indices { /** * Deletes an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -450,7 +450,7 @@ export default class Indices { /** * Deletes an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -476,7 +476,7 @@ export default class Indices { /** * Analyzes the disk usage of each field of an index or data stream - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-disk-usage.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -502,7 +502,7 @@ export default class Indices { /** * Downsample an index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/xpack-rollup.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-downsample-data-stream.html | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -533,7 +533,7 @@ export default class Indices { /** * Returns information about whether a particular index exists. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-exists.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html | Elasticsearch API documentation} */ async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -559,7 +559,7 @@ export default class Indices { /** * Returns information about whether a particular alias exists. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -592,7 +592,7 @@ export default class Indices { /** * Returns information about whether a particular index template exists. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -618,7 +618,7 @@ export default class Indices { /** * Returns information about whether a particular index template exists. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -644,7 +644,7 @@ export default class Indices { /** * Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-explain-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-explain-lifecycle.html | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -670,7 +670,7 @@ export default class Indices { /** * Returns the field usage stats for each field of an index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/field-usage-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -696,7 +696,7 @@ export default class Indices { /** * Performs the flush operation on one or more indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-flush.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html | Elasticsearch API documentation} */ async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -730,7 +730,7 @@ export default class Indices { /** * Performs the force merge operation on one or more indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-forcemerge.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -764,7 +764,7 @@ export default class Indices { /** * Returns information about one or more indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html | Elasticsearch API documentation} */ async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -790,7 +790,7 @@ export default class Indices { /** * Returns an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -830,7 +830,7 @@ export default class Indices { /** * Returns the data lifecycle of the selected data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-get-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-get-lifecycle.html | Elasticsearch API documentation} */ async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -856,7 +856,7 @@ export default class Indices { /** * Returns data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -890,7 +890,7 @@ export default class Indices { /** * Returns mapping for one or more fields. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-field-mapping.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html | Elasticsearch API documentation} */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -923,7 +923,7 @@ export default class Indices { /** * Returns an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -957,7 +957,7 @@ export default class Indices { /** * Returns mappings for one or more indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-mapping.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html | Elasticsearch API documentation} */ async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -991,7 +991,7 @@ export default class Indices { /** * Returns settings for one or more indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-get-settings.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1031,7 +1031,7 @@ export default class Indices { /** * Returns an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1065,7 +1065,7 @@ export default class Indices { /** * Migrates an alias to a data stream - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1091,7 +1091,7 @@ export default class Indices { /** * Modifies a data stream - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1129,7 +1129,7 @@ export default class Indices { /** * Opens an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-open-close.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html | Elasticsearch API documentation} */ async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1155,7 +1155,7 @@ export default class Indices { /** * Promotes a data stream from a replicated data stream managed by CCR to a regular data stream - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/data-streams.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1181,7 +1181,7 @@ export default class Indices { /** * Creates or updates an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1226,7 +1226,7 @@ export default class Indices { /** * Updates the data lifecycle of the selected data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/dlm-put-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-put-lifecycle.html | Elasticsearch API documentation} */ async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1264,7 +1264,7 @@ export default class Indices { /** * Creates or updates an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1302,7 +1302,7 @@ export default class Indices { /** * Updates the index mappings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-put-mapping.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1340,7 +1340,7 @@ export default class Indices { /** * Updates the index settings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-update-settings.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1378,7 +1378,7 @@ export default class Indices { /** * Creates or updates an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1416,7 +1416,7 @@ export default class Indices { /** * Returns information about ongoing index shard recoveries. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-recovery.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1450,7 +1450,7 @@ export default class Indices { /** * Performs the refresh operation in one or more indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-refresh.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1484,7 +1484,7 @@ export default class Indices { /** * Reloads an index's search analyzers and their resources. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-reload-analyzers.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html | Elasticsearch API documentation} */ async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1510,7 +1510,7 @@ export default class Indices { /** * Returns information about any matching indices, aliases, and data streams - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-resolve-index-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html | Elasticsearch API documentation} */ async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1535,9 +1535,8 @@ export default class Indices { } /** - * Updates an alias to point to a new index when the existing index - is considered to be too large or too old. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-rollover-index.html Elasticsearch API docs} + * Updates an alias to point to a new index when the existing index is considered to be too large or too old. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1582,7 +1581,7 @@ export default class Indices { /** * Provides low-level information about segments in a Lucene index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-segments.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html | Elasticsearch API documentation} */ async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1616,7 +1615,7 @@ export default class Indices { /** * Provides store information for shard copies of indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-shards-stores.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html | Elasticsearch API documentation} */ async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1650,7 +1649,7 @@ export default class Indices { /** * Allow to shrink an existing index into a new index with fewer primary shards. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-shrink-index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html | Elasticsearch API documentation} */ async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1688,7 +1687,7 @@ export default class Indices { /** * Simulate matching the given index name against the index templates in the system - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1726,7 +1725,7 @@ export default class Indices { /** * Simulate resolving the given template name or body - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-templates.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} */ async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1764,7 +1763,7 @@ export default class Indices { /** * Allows you to split an existing index into a new index with more primary shards. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-split-index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1802,7 +1801,7 @@ export default class Indices { /** * Provides statistics on operations happening in an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1842,7 +1841,7 @@ export default class Indices { /** * Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/unfreeze-index-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unfreeze-index-api.html | Elasticsearch API documentation} */ async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1868,7 +1867,7 @@ export default class Indices { /** * Updates index aliases. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/indices-aliases.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1907,7 +1906,7 @@ export default class Indices { /** * Allows a user to validate a potentially expensive query without executing it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-validate.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html | Elasticsearch API documentation} */ async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 83478097f..3eca9959e 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns basic information about the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index ea0d7dae5..2a877cc79 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -45,7 +45,7 @@ export default class Ingest { /** * Deletes a pipeline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-pipeline-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Ingest { /** * Returns statistical information about geoip databases - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/geoip-stats-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-processor.html | Elasticsearch API documentation} */ async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -98,7 +98,7 @@ export default class Ingest { /** * Returns a pipeline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-pipeline-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -132,7 +132,7 @@ export default class Ingest { /** * Returns a list of the built-in patterns. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/grok-processor.html#grok-processor-rest-get Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html | Elasticsearch API documentation} */ async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -159,7 +159,7 @@ export default class Ingest { /** * Creates or updates a pipeline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-pipeline-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -197,7 +197,7 @@ export default class Ingest { /** * Allows to simulate a pipeline with example documents. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/simulate-pipeline-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html | Elasticsearch API documentation} */ async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index ac7729167..460826e43 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Performs a kNN search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/license.ts b/src/api/api/license.ts index fb257ad1c..f1301d25f 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -45,7 +45,7 @@ export default class License { /** * Deletes licensing information for the cluster - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-license.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html | Elasticsearch API documentation} */ async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -72,7 +72,7 @@ export default class License { /** * Retrieves licensing information for the cluster - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-license.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -99,7 +99,7 @@ export default class License { /** * Retrieves information about the status of the basic license. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-basic-status.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html | Elasticsearch API documentation} */ async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -126,7 +126,7 @@ export default class License { /** * Retrieves information about the status of the trial license. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-trial-status.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html | Elasticsearch API documentation} */ async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -153,7 +153,7 @@ export default class License { /** * Updates the license for the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-license.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html | Elasticsearch API documentation} */ async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -192,7 +192,7 @@ export default class License { /** * Starts an indefinite basic license. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-basic.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html | Elasticsearch API documentation} */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -219,7 +219,7 @@ export default class License { /** * starts a limited time trial license. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-trial.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html | Elasticsearch API documentation} */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index c9d547266..0b85093de 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -45,7 +45,7 @@ export default class Logstash { /** * Deletes Logstash Pipelines used by Central Management - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/logstash-api-delete-pipeline.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Logstash { /** * Retrieves Logstash Pipelines used by Central Management - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/logstash-api-get-pipeline.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-get-pipeline.html | Elasticsearch API documentation} */ async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -104,7 +104,7 @@ export default class Logstash { /** * Adds and updates Logstash Pipelines used for Central Management - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/logstash-api-put-pipeline.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-put-pipeline.html | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 0b69f08d9..bf6717a24 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to get multiple documents in one request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-multi-get.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html | Elasticsearch API documentation} */ export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index ac09d60e4..52b361a5a 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -45,7 +45,7 @@ export default class Migration { /** * Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/migration-api-deprecation.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-deprecation.html | Elasticsearch API documentation} */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -79,7 +79,7 @@ export default class Migration { /** * Find out whether system features need to be upgraded or not - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/migration-api-feature-upgrade.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -106,7 +106,7 @@ export default class Migration { /** * Begin upgrades for system features - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/migration-api-feature-upgrade.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 97f0b0ddd..27ccefdb9 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -45,7 +45,7 @@ export default class Ml { /** * Clear the cached results from a trained model deployment - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-trained-model-deployment-cache.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html | Elasticsearch API documentation} */ async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Ml { /** * Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-close-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-close-job.html | Elasticsearch API documentation} */ async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -109,7 +109,7 @@ export default class Ml { /** * Deletes a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-calendar.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html | Elasticsearch API documentation} */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -135,7 +135,7 @@ export default class Ml { /** * Deletes scheduled events from a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-calendar-event.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-event.html | Elasticsearch API documentation} */ async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -161,7 +161,7 @@ export default class Ml { /** * Deletes anomaly detection jobs from a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-calendar-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-job.html | Elasticsearch API documentation} */ async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -187,7 +187,7 @@ export default class Ml { /** * Deletes an existing data frame analytics job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html | Elasticsearch API documentation} */ async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -213,7 +213,7 @@ export default class Ml { /** * Deletes an existing datafeed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-datafeed.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafeed.html | Elasticsearch API documentation} */ async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -239,7 +239,7 @@ export default class Ml { /** * Deletes expired and unused machine learning data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-expired-data.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-expired-data.html | Elasticsearch API documentation} */ async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -285,7 +285,7 @@ export default class Ml { /** * Deletes a filter. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-filter.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-filter.html | Elasticsearch API documentation} */ async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -311,7 +311,7 @@ export default class Ml { /** * Deletes forecasts from a machine learning job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-forecast.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-forecast.html | Elasticsearch API documentation} */ async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -344,7 +344,7 @@ export default class Ml { /** * Deletes an existing anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-job.html | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -370,7 +370,7 @@ export default class Ml { /** * Deletes an existing model snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-delete-snapshot.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-snapshot.html | Elasticsearch API documentation} */ async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -396,7 +396,7 @@ export default class Ml { /** * Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-trained-models.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models.html | Elasticsearch API documentation} */ async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -422,7 +422,7 @@ export default class Ml { /** * Deletes a model alias that refers to the trained model - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-trained-models-aliases.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models-aliases.html | Elasticsearch API documentation} */ async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -448,7 +448,7 @@ export default class Ml { /** * Estimates the model memory - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-apis.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-apis.html | Elasticsearch API documentation} */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -487,7 +487,7 @@ export default class Ml { /** * Evaluates the data frame analytics for an annotated index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/evaluate-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/evaluate-dfanalytics.html | Elasticsearch API documentation} */ async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -525,7 +525,7 @@ export default class Ml { /** * Explains a data frame analytics config. - * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/main/explain-dfanalytics.html Elasticsearch API docs} + * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/explain-dfanalytics.html | Elasticsearch API documentation} */ async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -571,7 +571,7 @@ export default class Ml { /** * Forces any buffered data to be processed by the job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-flush-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-flush-job.html | Elasticsearch API documentation} */ async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -609,7 +609,7 @@ export default class Ml { /** * Predicts the future behavior of a time series by using its historical behavior. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-forecast.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-forecast.html | Elasticsearch API documentation} */ async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -647,7 +647,7 @@ export default class Ml { /** * Retrieves anomaly detection job results for one or more buckets. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-bucket.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-bucket.html | Elasticsearch API documentation} */ async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -692,7 +692,7 @@ export default class Ml { /** * Retrieves information about the scheduled events in calendars. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-calendar-event.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar-event.html | Elasticsearch API documentation} */ async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -718,7 +718,7 @@ export default class Ml { /** * Retrieves configuration information for calendars. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-calendar.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar.html | Elasticsearch API documentation} */ async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -764,7 +764,7 @@ export default class Ml { /** * Retrieves anomaly detection job results for one or more categories. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-category.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-category.html | Elasticsearch API documentation} */ async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -809,7 +809,7 @@ export default class Ml { /** * Retrieves configuration information for data frame analytics jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics.html | Elasticsearch API documentation} */ async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -843,7 +843,7 @@ export default class Ml { /** * Retrieves usage information for data frame analytics jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-dfanalytics-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics-stats.html | Elasticsearch API documentation} */ async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -877,7 +877,7 @@ export default class Ml { /** * Retrieves usage information for datafeeds. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-datafeed-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed-stats.html | Elasticsearch API documentation} */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -911,7 +911,7 @@ export default class Ml { /** * Retrieves configuration information for datafeeds. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-datafeed.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed.html | Elasticsearch API documentation} */ async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -945,7 +945,7 @@ export default class Ml { /** * Retrieves filters. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-filter.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-filter.html | Elasticsearch API documentation} */ async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -979,7 +979,7 @@ export default class Ml { /** * Retrieves anomaly detection job results for one or more influencers. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-influencer.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-influencer.html | Elasticsearch API documentation} */ async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1017,7 +1017,7 @@ export default class Ml { /** * Retrieves usage information for anomaly detection jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-job-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-stats.html | Elasticsearch API documentation} */ async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1051,7 +1051,7 @@ export default class Ml { /** * Retrieves configuration information for anomaly detection jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job.html | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1085,7 +1085,7 @@ export default class Ml { /** * Returns information on how ML is using memory. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-ml-memory.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-memory.html | Elasticsearch API documentation} */ async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1119,7 +1119,7 @@ export default class Ml { /** * Gets stats for anomaly detection job model snapshot upgrades that are in progress. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-job-model-snapshot-upgrade-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-model-snapshot-upgrade-stats.html | Elasticsearch API documentation} */ async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1145,7 +1145,7 @@ export default class Ml { /** * Retrieves information about model snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-snapshot.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-snapshot.html | Elasticsearch API documentation} */ async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1190,7 +1190,7 @@ export default class Ml { /** * Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-overall-buckets.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-overall-buckets.html | Elasticsearch API documentation} */ async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1228,7 +1228,7 @@ export default class Ml { /** * Retrieves anomaly records for an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-get-record.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-record.html | Elasticsearch API documentation} */ async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1266,7 +1266,7 @@ export default class Ml { /** * Retrieves configuration information for a trained inference model. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-trained-models.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models.html | Elasticsearch API documentation} */ async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1300,7 +1300,7 @@ export default class Ml { /** * Retrieves usage information for trained inference models. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-trained-models-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models-stats.html | Elasticsearch API documentation} */ async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1334,7 +1334,7 @@ export default class Ml { /** * Evaluate a trained model. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/infer-trained-model.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html | Elasticsearch API documentation} */ async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1372,7 +1372,7 @@ export default class Ml { /** * Returns defaults and limits used by machine learning. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-ml-info.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1399,7 +1399,7 @@ export default class Ml { /** * Opens one or more anomaly detection jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-open-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-open-job.html | Elasticsearch API documentation} */ async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1437,7 +1437,7 @@ export default class Ml { /** * Posts scheduled events in a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-post-calendar-event.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-calendar-event.html | Elasticsearch API documentation} */ async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1475,7 +1475,7 @@ export default class Ml { /** * Sends data to an anomaly detection job for analysis. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-post-data.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-data.html | Elasticsearch API documentation} */ async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1506,7 +1506,7 @@ export default class Ml { /** * Previews that will be analyzed given a data frame analytics config. - * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/main/preview-dfanalytics.html Elasticsearch API docs} + * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/preview-dfanalytics.html | Elasticsearch API documentation} */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1552,7 +1552,7 @@ export default class Ml { /** * Previews a datafeed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-preview-datafeed.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-preview-datafeed.html | Elasticsearch API documentation} */ async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -1598,7 +1598,7 @@ export default class Ml { /** * Instantiates a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-calendar.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar.html | Elasticsearch API documentation} */ async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1636,7 +1636,7 @@ export default class Ml { /** * Adds an anomaly detection job to a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-calendar-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar-job.html | Elasticsearch API documentation} */ async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1662,7 +1662,7 @@ export default class Ml { /** * Instantiates a data frame analytics job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1700,7 +1700,7 @@ export default class Ml { /** * Instantiates a datafeed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-datafeed.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1738,7 +1738,7 @@ export default class Ml { /** * Instantiates a filter. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-filter.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-filter.html | Elasticsearch API documentation} */ async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1776,7 +1776,7 @@ export default class Ml { /** * Instantiates an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-put-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-job.html | Elasticsearch API documentation} */ async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1814,7 +1814,7 @@ export default class Ml { /** * Creates an inference trained model. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-models.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models.html | Elasticsearch API documentation} */ async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1852,7 +1852,7 @@ export default class Ml { /** * Creates a new model alias (or reassigns an existing one) to refer to the trained model - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-models-aliases.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models-aliases.html | Elasticsearch API documentation} */ async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1878,7 +1878,7 @@ export default class Ml { /** * Creates part of a trained model definition - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-model-definition-part.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-definition-part.html | Elasticsearch API documentation} */ async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1916,7 +1916,7 @@ export default class Ml { /** * Creates a trained model vocabulary - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-trained-model-vocabulary.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-vocabulary.html | Elasticsearch API documentation} */ async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1954,7 +1954,7 @@ export default class Ml { /** * Resets an existing anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-reset-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-reset-job.html | Elasticsearch API documentation} */ async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1980,7 +1980,7 @@ export default class Ml { /** * Reverts to a specific snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-revert-snapshot.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-revert-snapshot.html | Elasticsearch API documentation} */ async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2018,7 +2018,7 @@ export default class Ml { /** * Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-set-upgrade-mode.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-set-upgrade-mode.html | Elasticsearch API documentation} */ async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2045,7 +2045,7 @@ export default class Ml { /** * Starts a data frame analytics job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-dfanalytics.html | Elasticsearch API documentation} */ async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2071,7 +2071,7 @@ export default class Ml { /** * Starts one or more datafeeds. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-start-datafeed.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-start-datafeed.html | Elasticsearch API documentation} */ async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2109,7 +2109,7 @@ export default class Ml { /** * Start a trained model deployment. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-trained-model-deployment.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html | Elasticsearch API documentation} */ async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2135,7 +2135,7 @@ export default class Ml { /** * Stops one or more data frame analytics jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/stop-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-dfanalytics.html | Elasticsearch API documentation} */ async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2161,7 +2161,7 @@ export default class Ml { /** * Stops one or more datafeeds. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-stop-datafeed.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-stop-datafeed.html | Elasticsearch API documentation} */ async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2199,7 +2199,7 @@ export default class Ml { /** * Stop a trained model deployment. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/stop-trained-model-deployment.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html | Elasticsearch API documentation} */ async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2225,7 +2225,7 @@ export default class Ml { /** * Updates certain properties of a data frame analytics job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-dfanalytics.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-dfanalytics.html | Elasticsearch API documentation} */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2263,7 +2263,7 @@ export default class Ml { /** * Updates certain properties of a datafeed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-datafeed.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-datafeed.html | Elasticsearch API documentation} */ async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2301,7 +2301,7 @@ export default class Ml { /** * Updates the description of a filter, adds items, or removes items. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-filter.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-filter.html | Elasticsearch API documentation} */ async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2339,7 +2339,7 @@ export default class Ml { /** * Updates certain properties of an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-job.html | Elasticsearch API documentation} */ async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2377,7 +2377,7 @@ export default class Ml { /** * Updates certain properties of a snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-update-snapshot.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-snapshot.html | Elasticsearch API documentation} */ async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2415,7 +2415,7 @@ export default class Ml { /** * Updates certain properties of trained model deployment. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-trained-model-deployment.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation} */ async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -2441,7 +2441,7 @@ export default class Ml { /** * Upgrades a given job snapshot to the current major version. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/ml-upgrade-job-model-snapshot.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation} */ async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2467,7 +2467,7 @@ export default class Ml { /** * Validates an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/machine-learning/main/ml-jobs.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} */ async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2506,7 +2506,7 @@ export default class Ml { /** * Validates an anomaly detection detector. - * @see {@link https://www.elastic.co/guide/en/machine-learning/main/ml-jobs.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} */ async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 6ca1c152c..86eff3cb7 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -45,7 +45,7 @@ export default class Monitoring { /** * Used by the monitoring features to send monitoring data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/monitor-elasticsearch-cluster.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html | Elasticsearch API documentation} */ async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 5580751d1..b799c5462 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to execute several search operations in one request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-multi-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html | Elasticsearch API documentation} */ export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index 000477486..7e35d0b68 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to execute several search template operations in one request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-multi-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html | Elasticsearch API documentation} */ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index 6f5a1d197..7509fb773 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns multiple termvectors in one request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-multi-termvectors.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html | Elasticsearch API documentation} */ export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 97e10edbf..c6dee81f5 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -45,7 +45,7 @@ export default class Nodes { /** * Removes the archived repositories metering information present in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-repositories-metering-archive-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-repositories-metering-archive-api.html | Elasticsearch API documentation} */ async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Nodes { /** * Returns cluster repositories metering information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-repositories-metering-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-repositories-metering-api.html | Elasticsearch API documentation} */ async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -97,7 +97,7 @@ export default class Nodes { /** * Returns information about hot threads on each node in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-hot-threads.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html | Elasticsearch API documentation} */ async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -131,7 +131,7 @@ export default class Nodes { /** * Returns information about nodes in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-info.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -171,7 +171,7 @@ export default class Nodes { /** * Reloads secure settings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/secure-settings.html#reloadable-secure-settings Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation} */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -217,7 +217,7 @@ export default class Nodes { /** * Returns statistical information about nodes in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -263,7 +263,7 @@ export default class Nodes { /** * Returns low-level information about REST actions usage on nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/cluster-nodes-usage.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 38e0165b7..dbe2fb182 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Open a point in time that can be used in subsequent searches - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/point-in-time-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index b2db543f2..b91a22fd3 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns whether the cluster is running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/index.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 0aec2b2d7..36ea03bd2 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Creates or updates a script. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-scripting.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/query_ruleset.ts b/src/api/api/query_ruleset.ts new file mode 100644 index 000000000..01ee774ae --- /dev/null +++ b/src/api/api/query_ruleset.ts @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class QueryRuleset { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Deletes a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html | Elasticsearch API documentation} + */ + async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Returns the details about a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html | Elasticsearch API documentation} + */ + async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Lists query rulesets. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html | Elasticsearch API documentation} + */ + async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_query_rules' + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Creates or updates a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html | Elasticsearch API documentation} + */ + async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id'] + const acceptedBody: string[] = ['query_ruleset'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index e3618721b..5e0e1c263 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to evaluate the quality of ranked search results over a set of typical search queries - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-rank-eval.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html | Elasticsearch API documentation} */ export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 43aa34f1f..17790d754 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -38,10 +38,8 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to copy documents from one index to another, optionally filtering the source -documents by a query, changing the destination index settings, or fetching the -documents from a remote cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-reindex.html Elasticsearch API docs} + * Allows to copy documents from one index to another, optionally filtering the source documents by a query, changing the destination index settings, or fetching the documents from a remote cluster. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 9127d4a0f..3c12d0b7e 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Changes the number of requests per second for a particular Reindex operation. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-reindex.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 3081bdfa6..ef14b738c 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to use the Mustache language to pre-render a search definition. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/render-search-template-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/render-search-template-api.html | Elasticsearch API documentation} */ export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index ccdfca6ac..26c5d5759 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -45,7 +45,7 @@ export default class Rollup { /** * Deletes an existing rollup job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-delete-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Rollup { /** * Retrieves the configuration, stats, and status of rollup jobs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-get-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -105,7 +105,7 @@ export default class Rollup { /** * Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-get-rollup-caps.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html | Elasticsearch API documentation} */ async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -139,7 +139,7 @@ export default class Rollup { /** * Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-get-rollup-index-caps.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html | Elasticsearch API documentation} */ async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -165,7 +165,7 @@ export default class Rollup { /** * Creates a rollup job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-put-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html | Elasticsearch API documentation} */ async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -203,7 +203,7 @@ export default class Rollup { /** * Enables searching rolled-up data using the standard query DSL. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -241,7 +241,7 @@ export default class Rollup { /** * Starts an existing, stopped rollup job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-start-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html | Elasticsearch API documentation} */ async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -267,7 +267,7 @@ export default class Rollup { /** * Stops an existing, started rollup job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/rollup-stop-job.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html | Elasticsearch API documentation} */ async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index a002cef20..e27a59952 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows an arbitrary script to be executed and a result to be returned - * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/main/painless-execute-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html | Elasticsearch API documentation} */ export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 812526193..77e291799 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to retrieve a large numbers of results from a single search request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-request-body.html#request-body-search-scroll Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll | Elasticsearch API documentation} */ export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/search.ts b/src/api/api/search.ts index e4d909e54..b28d29735 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns results matching a query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index 2ddb8749a..e825279f0 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -45,7 +45,7 @@ export default class SearchApplication { /** * Deletes a search application. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-search-application.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class SearchApplication { /** * Delete a behavioral analytics collection. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-analytics-collection.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html | Elasticsearch API documentation} */ async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -97,7 +97,7 @@ export default class SearchApplication { /** * Returns the details about a search application. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-search-application.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html | Elasticsearch API documentation} */ async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -123,7 +123,7 @@ export default class SearchApplication { /** * Returns the existing behavioral analytics collections. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/list-analytics-collection.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html | Elasticsearch API documentation} */ async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -157,7 +157,7 @@ export default class SearchApplication { /** * Returns the existing search applications. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/list-search-applications.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html | Elasticsearch API documentation} */ async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -184,7 +184,7 @@ export default class SearchApplication { /** * Creates a behavioral analytics event for existing collection. - * @see {@link http://todo.com/tbd Elasticsearch API docs} + * @see {@link http://todo.com/tbd | Elasticsearch API documentation} */ async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -210,7 +210,7 @@ export default class SearchApplication { /** * Creates or updates a search application. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-search-application.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html | Elasticsearch API documentation} */ async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -241,7 +241,7 @@ export default class SearchApplication { /** * Creates a behavioral analytics collection. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-analytics-collection.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html | Elasticsearch API documentation} */ async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -267,7 +267,7 @@ export default class SearchApplication { /** * Renders a query for given search application search parameters - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-application-render-query.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html | Elasticsearch API documentation} */ async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -293,7 +293,7 @@ export default class SearchApplication { /** * Perform a search against a search application - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-application-search.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html | Elasticsearch API documentation} */ async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 9f8b0fdc4..5d3fbdbef 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-vector-tile-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html | Elasticsearch API documentation} */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index 7cf6a048d..eaf609e73 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns information about the indices and shards that a search request would be executed against. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-shards.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html | Elasticsearch API documentation} */ export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index 2ba27129d..f8a5c3548 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Allows to use the Mustache language to pre-render a search definition. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-template.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-template.html | Elasticsearch API documentation} */ export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 5ed863d2e..73b6853bb 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -45,7 +45,7 @@ export default class SearchableSnapshots { /** * Retrieve node-level cache statistics about searchable snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-apis.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -79,7 +79,7 @@ export default class SearchableSnapshots { /** * Clear the cache of searchable snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-apis.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -113,7 +113,7 @@ export default class SearchableSnapshots { /** * Mount a snapshot as a searchable index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-api-mount-snapshot.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html | Elasticsearch API documentation} */ async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -151,7 +151,7 @@ export default class SearchableSnapshots { /** * Retrieve shard-level statistics about searchable snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/searchable-snapshots-apis.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 4332002fe..c957d8c11 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -45,7 +45,7 @@ export default class Security { /** * Creates or updates the user profile on behalf of another user. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-activate-user-profile.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html | Elasticsearch API documentation} */ async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -83,7 +83,7 @@ export default class Security { /** * Enables authentication as a user and retrieve information about the authenticated user. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-authenticate.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-authenticate.html | Elasticsearch API documentation} */ async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -110,7 +110,7 @@ export default class Security { /** * Updates the attributes of multiple existing API keys. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-bulk-update-api-keys.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-update-api-keys.html | Elasticsearch API documentation} */ async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -136,7 +136,7 @@ export default class Security { /** * Changes the passwords of users in the native realm and built-in users. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-change-password.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-change-password.html | Elasticsearch API documentation} */ async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -182,7 +182,7 @@ export default class Security { /** * Clear a subset or all entries from the API key cache. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-api-key-cache.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-api-key-cache.html | Elasticsearch API documentation} */ async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -208,7 +208,7 @@ export default class Security { /** * Evicts application privileges from the native application privileges cache. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-privilege-cache.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-privilege-cache.html | Elasticsearch API documentation} */ async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -234,7 +234,7 @@ export default class Security { /** * Evicts users from the user cache. Can completely clear the cache or evict specific users. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-cache.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-cache.html | Elasticsearch API documentation} */ async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -260,7 +260,7 @@ export default class Security { /** * Evicts roles from the native role cache. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-role-cache.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-role-cache.html | Elasticsearch API documentation} */ async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -286,7 +286,7 @@ export default class Security { /** * Evicts tokens from the service account token caches. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-clear-service-token-caches.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-service-token-caches.html | Elasticsearch API documentation} */ async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -312,7 +312,7 @@ export default class Security { /** * Creates an API key for access without requiring basic authentication. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-create-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-api-key.html | Elasticsearch API documentation} */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -351,7 +351,7 @@ export default class Security { /** * Creates a cross-cluster API key for API key based remote cluster access. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-create-cross-cluster-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-cross-cluster-api-key.html | Elasticsearch API documentation} */ async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -377,7 +377,7 @@ export default class Security { /** * Creates a service account token for access without requiring basic authentication. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-create-service-token.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-service-token.html | Elasticsearch API documentation} */ async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -410,7 +410,7 @@ export default class Security { /** * Removes application privileges. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-privilege.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-privilege.html | Elasticsearch API documentation} */ async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -436,7 +436,7 @@ export default class Security { /** * Removes roles in the native realm. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-role.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role.html | Elasticsearch API documentation} */ async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -462,7 +462,7 @@ export default class Security { /** * Removes role mappings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-role-mapping.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role-mapping.html | Elasticsearch API documentation} */ async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -488,7 +488,7 @@ export default class Security { /** * Deletes a service account token. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-service-token.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-service-token.html | Elasticsearch API documentation} */ async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -514,7 +514,7 @@ export default class Security { /** * Deletes users from the native realm. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-delete-user.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-user.html | Elasticsearch API documentation} */ async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -540,7 +540,7 @@ export default class Security { /** * Disables users in the native realm. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-disable-user.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user.html | Elasticsearch API documentation} */ async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -566,7 +566,7 @@ export default class Security { /** * Disables a user profile so it's not visible in user profile searches. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-disable-user-profile.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html | Elasticsearch API documentation} */ async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -592,7 +592,7 @@ export default class Security { /** * Enables users in the native realm. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-enable-user.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user.html | Elasticsearch API documentation} */ async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -618,7 +618,7 @@ export default class Security { /** * Enables a user profile so it's visible in user profile searches. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-enable-user-profile.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html | Elasticsearch API documentation} */ async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -644,7 +644,7 @@ export default class Security { /** * Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-kibana-enrollment.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html | Elasticsearch API documentation} */ async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -671,7 +671,7 @@ export default class Security { /** * Allows a new node to enroll to an existing cluster with security enabled. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-node-enrollment.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html | Elasticsearch API documentation} */ async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -698,7 +698,7 @@ export default class Security { /** * Retrieves information for one or more API keys. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-api-key.html | Elasticsearch API documentation} */ async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -725,7 +725,7 @@ export default class Security { /** * Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-builtin-privileges.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-builtin-privileges.html | Elasticsearch API documentation} */ async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -752,7 +752,7 @@ export default class Security { /** * Retrieves application privileges. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-privileges.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-privileges.html | Elasticsearch API documentation} */ async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -789,7 +789,7 @@ export default class Security { /** * Retrieves roles in the native realm. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-role.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role.html | Elasticsearch API documentation} */ async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -823,7 +823,7 @@ export default class Security { /** * Retrieves role mappings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-role-mapping.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role-mapping.html | Elasticsearch API documentation} */ async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -857,7 +857,7 @@ export default class Security { /** * Retrieves information about service accounts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-service-accounts.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-accounts.html | Elasticsearch API documentation} */ async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -894,7 +894,7 @@ export default class Security { /** * Retrieves information of all service credentials for a service account. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-service-credentials.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-credentials.html | Elasticsearch API documentation} */ async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -920,7 +920,7 @@ export default class Security { /** * Creates a bearer token for access without requiring basic authentication. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-token.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-token.html | Elasticsearch API documentation} */ async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -959,7 +959,7 @@ export default class Security { /** * Retrieves information about users in the native realm and built-in users. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-user.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user.html | Elasticsearch API documentation} */ async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -993,7 +993,7 @@ export default class Security { /** * Retrieves security privileges for the logged in user. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-user-privileges.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-privileges.html | Elasticsearch API documentation} */ async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1020,7 +1020,7 @@ export default class Security { /** * Retrieves user profiles for the given unique ID(s). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-get-user-profile.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html | Elasticsearch API documentation} */ async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1046,7 +1046,7 @@ export default class Security { /** * Creates an API key on behalf of another user. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-grant-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-grant-api-key.html | Elasticsearch API documentation} */ async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1084,7 +1084,7 @@ export default class Security { /** * Determines whether the specified user has a specified list of privileges. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-has-privileges.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges.html | Elasticsearch API documentation} */ async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1130,7 +1130,7 @@ export default class Security { /** * Determines whether the users associated with the specified profile IDs have all the requested privileges. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-has-privileges-user-profile.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges-user-profile.html | Elasticsearch API documentation} */ async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1168,7 +1168,7 @@ export default class Security { /** * Invalidates one or more API keys. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-invalidate-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-api-key.html | Elasticsearch API documentation} */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1207,7 +1207,7 @@ export default class Security { /** * Invalidates one or more access tokens or refresh tokens. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-invalidate-token.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-token.html | Elasticsearch API documentation} */ async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1246,7 +1246,7 @@ export default class Security { /** * Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-oidc-authenticate.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-authenticate.html | Elasticsearch API documentation} */ async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -1272,7 +1272,7 @@ export default class Security { /** * Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-oidc-logout.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-logout.html | Elasticsearch API documentation} */ async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -1298,7 +1298,7 @@ export default class Security { /** * Creates an OAuth 2.0 authentication request as a URL string - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-oidc-prepare-authentication.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-prepare-authentication.html | Elasticsearch API documentation} */ async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -1324,7 +1324,7 @@ export default class Security { /** * Adds or updates application privileges. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-privileges.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-privileges.html | Elasticsearch API documentation} */ async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1355,7 +1355,7 @@ export default class Security { /** * Adds and updates roles in the native realm. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-role.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role.html | Elasticsearch API documentation} */ async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1393,7 +1393,7 @@ export default class Security { /** * Creates and updates role mappings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-role-mapping.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role-mapping.html | Elasticsearch API documentation} */ async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1431,7 +1431,7 @@ export default class Security { /** * Adds and updates users in the native realm. These users are commonly referred to as native users. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-put-user.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-user.html | Elasticsearch API documentation} */ async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1469,7 +1469,7 @@ export default class Security { /** * Retrieves information for API keys using a subset of query DSL - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-query-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-api-key.html | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1508,7 +1508,7 @@ export default class Security { /** * Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-authenticate.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-authenticate.html | Elasticsearch API documentation} */ async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1546,7 +1546,7 @@ export default class Security { /** * Verifies the logout response sent from the SAML IdP - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-complete-logout.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-complete-logout.html | Elasticsearch API documentation} */ async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1584,7 +1584,7 @@ export default class Security { /** * Consumes a SAML LogoutRequest - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-invalidate.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-invalidate.html | Elasticsearch API documentation} */ async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1622,7 +1622,7 @@ export default class Security { /** * Invalidates an access token and a refresh token that were generated via the SAML Authenticate API - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-logout.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-logout.html | Elasticsearch API documentation} */ async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1660,7 +1660,7 @@ export default class Security { /** * Creates a SAML authentication request - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-prepare-authentication.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-prepare-authentication.html | Elasticsearch API documentation} */ async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1699,7 +1699,7 @@ export default class Security { /** * Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-saml-sp-metadata.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-sp-metadata.html | Elasticsearch API documentation} */ async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1725,7 +1725,7 @@ export default class Security { /** * Get suggestions for user profiles that match specified search criteria. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-suggest-user-profile.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html | Elasticsearch API documentation} */ async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1764,7 +1764,7 @@ export default class Security { /** * Updates attributes of an existing API key. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-update-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-api-key.html | Elasticsearch API documentation} */ async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1802,7 +1802,7 @@ export default class Security { /** * Updates attributes of an existing cross-cluster API key. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-update-cross-cluster-api-key.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-cross-cluster-api-key.html | Elasticsearch API documentation} */ async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -1828,7 +1828,7 @@ export default class Security { /** * Update application specific data for the user profile of the given unique ID. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-update-user-profile-data.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html | Elasticsearch API documentation} */ async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 69ae52852..3fc62d09a 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -45,7 +45,7 @@ export default class Shutdown { /** * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Shutdown { /** * Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -105,7 +105,7 @@ export default class Shutdown { /** * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index e73f9233d..c84752269 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -45,7 +45,7 @@ export default class Slm { /** * Deletes an existing snapshot lifecycle policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-delete-policy.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-delete-policy.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Slm { /** * Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-execute-lifecycle.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-lifecycle.html | Elasticsearch API documentation} */ async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -97,7 +97,7 @@ export default class Slm { /** * Deletes any snapshots that are expired according to the policy's retention rules. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-execute-retention.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-retention.html | Elasticsearch API documentation} */ async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -124,7 +124,7 @@ export default class Slm { /** * Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-get-policy.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-policy.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -158,7 +158,7 @@ export default class Slm { /** * Returns global and policy-level statistics about actions taken by snapshot lifecycle management. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-get-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html | Elasticsearch API documentation} */ async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -185,7 +185,7 @@ export default class Slm { /** * Retrieves the status of snapshot lifecycle management (SLM). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-get-status.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-status.html | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -212,7 +212,7 @@ export default class Slm { /** * Creates or updates a snapshot lifecycle policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-put-policy.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-put-policy.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -250,7 +250,7 @@ export default class Slm { /** * Turns on snapshot lifecycle management (SLM). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-start.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -277,7 +277,7 @@ export default class Slm { /** * Turns off snapshot lifecycle management (SLM). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/slm-api-stop.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 09245f182..bdd5e9dbc 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -45,7 +45,7 @@ export default class Snapshot { /** * Removes stale data from repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clean-up-snapshot-repo-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html | Elasticsearch API documentation} */ async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Snapshot { /** * Clones indices from one snapshot into another snapshot in the same repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -109,7 +109,7 @@ export default class Snapshot { /** * Creates a snapshot in a repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -147,7 +147,7 @@ export default class Snapshot { /** * Creates a repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -185,7 +185,7 @@ export default class Snapshot { /** * Deletes one or more snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -211,7 +211,7 @@ export default class Snapshot { /** * Deletes a repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -237,7 +237,7 @@ export default class Snapshot { /** * Returns information about a snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -263,7 +263,7 @@ export default class Snapshot { /** * Returns information about a repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -297,7 +297,7 @@ export default class Snapshot { /** * Analyzes a repository for correctness and performance - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -323,7 +323,7 @@ export default class Snapshot { /** * Restores a snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -361,7 +361,7 @@ export default class Snapshot { /** * Returns information about the status of a snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -398,7 +398,7 @@ export default class Snapshot { /** * Verifies a repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/modules-snapshots.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index b3434a35c..05e597545 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -45,7 +45,7 @@ export default class Sql { /** * Clears the SQL cursor - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/clear-sql-cursor-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-sql-cursor-api.html | Elasticsearch API documentation} */ async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -83,7 +83,7 @@ export default class Sql { /** * Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-async-sql-search-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html | Elasticsearch API documentation} */ async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -109,7 +109,7 @@ export default class Sql { /** * Returns the current status and available results for an async SQL search or stored synchronous SQL search - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-async-sql-search-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html | Elasticsearch API documentation} */ async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -135,7 +135,7 @@ export default class Sql { /** * Returns the current status of an async SQL search or a stored synchronous SQL search - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-async-sql-search-status-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-status-api.html | Elasticsearch API documentation} */ async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -161,7 +161,7 @@ export default class Sql { /** * Executes a SQL request - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/sql-search-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/sql-search-api.html | Elasticsearch API documentation} */ async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -200,7 +200,7 @@ export default class Sql { /** * Translates SQL into Elasticsearch queries - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/sql-translate-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/sql-translate-api.html | Elasticsearch API documentation} */ async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 333be7894..198146ad1 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -45,7 +45,7 @@ export default class Ssl { /** * Retrieves information about the X.509 certificates used to encrypt communications in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/security-api-ssl.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-ssl.html | Elasticsearch API documentation} */ async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index bf1de39e0..80ba96536 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -45,65 +45,117 @@ export default class Synonyms { /** * Deletes a synonym set - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-synonyms.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html | Elasticsearch API documentation} */ - async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async delete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['synonyms_set'] + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Deletes a synonym rule in a synonym set + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html | Elasticsearch API documentation} + */ + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['set_id', 'rule_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } const method = 'DELETE' - const path = `/_synonyms/${encodeURIComponent(params.synonyms_set.toString())}` + const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } /** * Retrieves a synonym set - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-synonyms.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html | Elasticsearch API documentation} */ - async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async get (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['synonyms_set'] + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } const method = 'GET' - const path = `/_synonyms/${encodeURIComponent(params.synonyms_set.toString())}` + const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } /** - * Creates or updates a synonyms set - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-synonyms.html Elasticsearch API docs} + * Retrieves a synonym rule from a synonym set + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html | Elasticsearch API documentation} + */ + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['set_id', 'rule_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Retrieves a summary of all defined synonym sets + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html | Elasticsearch API documentation} */ - async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async put (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['synonyms_set'] + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -112,12 +164,89 @@ export default class Synonyms { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_synonyms' + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Creates or updates a synonyms set + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html | Elasticsearch API documentation} + */ + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['synonyms_set'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Creates or updates a synonym rule in a synonym set + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html | Elasticsearch API documentation} + */ + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['set_id', 'rule_id'] + const acceptedBody: string[] = ['synonyms'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } const method = 'PUT' - const path = `/_synonyms/${encodeURIComponent(params.synonyms_set.toString())}` + const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` return await this.transport.request({ path, method, querystring, body }, options) } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index 36d317804..a3cdcb968 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -45,7 +45,7 @@ export default class Tasks { /** * Cancels a task, if it can be cancelled through an API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -79,7 +79,7 @@ export default class Tasks { /** * Returns information about a task. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -105,7 +105,7 @@ export default class Tasks { /** * Returns a list of tasks. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/tasks.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index 1bff21cb8..db54a8c40 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -38,8 +38,8 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/search-terms-enum.html Elasticsearch API docs} + * The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-terms-enum.html | Elasticsearch API documentation} */ export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 2c3f11472..6ffee1fa1 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Returns information and statistics about terms in the fields of a particular document. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-termvectors.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html | Elasticsearch API documentation} */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 59e153bcc..ca44736c7 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -45,7 +45,7 @@ export default class TextStructure { /** * Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/find-structure.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-structure.html | Elasticsearch API documentation} */ async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 104619bd7..cd472a416 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -45,7 +45,7 @@ export default class Transform { /** * Deletes an existing transform. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/delete-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-transform.html | Elasticsearch API documentation} */ async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -71,7 +71,7 @@ export default class Transform { /** * Retrieves configuration information for transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform.html | Elasticsearch API documentation} */ async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -105,7 +105,7 @@ export default class Transform { /** * Retrieves usage information for transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/get-transform-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-stats.html | Elasticsearch API documentation} */ async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -131,7 +131,7 @@ export default class Transform { /** * Previews a transform. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/preview-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/preview-transform.html | Elasticsearch API documentation} */ async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -177,7 +177,7 @@ export default class Transform { /** * Instantiates a transform. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/put-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-transform.html | Elasticsearch API documentation} */ async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -215,7 +215,7 @@ export default class Transform { /** * Resets an existing transform. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/reset-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/reset-transform.html | Elasticsearch API documentation} */ async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -241,7 +241,7 @@ export default class Transform { /** * Schedules now a transform. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/schedule-now-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/schedule-now-transform.html | Elasticsearch API documentation} */ async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -267,7 +267,7 @@ export default class Transform { /** * Starts one or more transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/start-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-transform.html | Elasticsearch API documentation} */ async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -293,7 +293,7 @@ export default class Transform { /** * Stops one or more transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/stop-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-transform.html | Elasticsearch API documentation} */ async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -319,7 +319,7 @@ export default class Transform { /** * Updates certain properties of a transform. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/update-transform.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-transform.html | Elasticsearch API documentation} */ async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -357,7 +357,7 @@ export default class Transform { /** * Upgrades all transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/upgrade-transforms.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/upgrade-transforms.html | Elasticsearch API documentation} */ async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/update.ts b/src/api/api/update.ts index f5170e64c..d15e007a0 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Updates a document with a script or partial document. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-update.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html | Elasticsearch API documentation} */ export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index cdf1cac3a..5549386e8 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -38,9 +38,8 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Performs an update on every document in the index without changing the source, -for example to pick up a mapping change. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-update-by-query.html Elasticsearch API docs} + * Performs an update on every document in the index without changing the source, for example to pick up a mapping change. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index c91c4b0fa..370acd548 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Changes the number of requests per second for a particular Update By Query operation. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/docs-update-by-query.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index a026dbc1f..09aca4615 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -45,7 +45,7 @@ export default class Watcher { /** * Acknowledges a watch, manually throttling the execution of the watch's actions. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-ack-watch.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-ack-watch.html | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -78,7 +78,7 @@ export default class Watcher { /** * Activates a currently inactive watch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-activate-watch.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-activate-watch.html | Elasticsearch API documentation} */ async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -104,7 +104,7 @@ export default class Watcher { /** * Deactivates a currently active watch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-deactivate-watch.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-deactivate-watch.html | Elasticsearch API documentation} */ async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -130,7 +130,7 @@ export default class Watcher { /** * Removes a watch from Watcher. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-delete-watch.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-delete-watch.html | Elasticsearch API documentation} */ async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -156,7 +156,7 @@ export default class Watcher { /** * Forces the execution of a stored watch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-execute-watch.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-execute-watch.html | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -202,7 +202,7 @@ export default class Watcher { /** * Retrieve settings for the watcher system index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-get-settings.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -228,7 +228,7 @@ export default class Watcher { /** * Retrieves a watch by its ID. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-get-watch.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-watch.html | Elasticsearch API documentation} */ async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -254,7 +254,7 @@ export default class Watcher { /** * Creates a new watch, or updates an existing one. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-put-watch.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-put-watch.html | Elasticsearch API documentation} */ async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -292,7 +292,7 @@ export default class Watcher { /** * Retrieves stored watches. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-query-watches.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-query-watches.html | Elasticsearch API documentation} */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -331,7 +331,7 @@ export default class Watcher { /** * Starts Watcher if it is not already running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-start.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -358,7 +358,7 @@ export default class Watcher { /** * Retrieves the current Watcher metrics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-stats.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -392,7 +392,7 @@ export default class Watcher { /** * Stops Watcher if it is running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-stop.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -419,7 +419,7 @@ export default class Watcher { /** * Update settings for the watcher system index - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/watcher-api-update-settings.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-update-settings.html | Elasticsearch API documentation} */ async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index eab5e38c1..c288e8091 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -45,7 +45,7 @@ export default class Xpack { /** * Retrieves information about the installed X-Pack features. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/info-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/info-api.html | Elasticsearch API documentation} */ async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -72,7 +72,7 @@ export default class Xpack { /** * Retrieves usage information about the installed X-Pack features. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/main/usage-api.html Elasticsearch API docs} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/usage-api.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/index.ts b/src/api/index.ts index 46efbd366..ac64d4a36 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -75,6 +75,7 @@ import NodesApi from './api/nodes' import openPointInTimeApi from './api/open_point_in_time' import pingApi from './api/ping' import putScriptApi from './api/put_script' +import QueryRulesetApi from './api/query_ruleset' import rankEvalApi from './api/rank_eval' import reindexApi from './api/reindex' import reindexRethrottleApi from './api/reindex_rethrottle' @@ -157,6 +158,7 @@ export default interface API { openPointInTime: typeof openPointInTimeApi ping: typeof pingApi putScript: typeof putScriptApi + queryRuleset: QueryRulesetApi rankEval: typeof rankEvalApi reindex: typeof reindexApi reindexRethrottle: typeof reindexRethrottleApi @@ -209,6 +211,7 @@ const kMigration = Symbol('Migration') const kMl = Symbol('Ml') const kMonitoring = Symbol('Monitoring') const kNodes = Symbol('Nodes') +const kQueryRuleset = Symbol('QueryRuleset') const kRollup = Symbol('Rollup') const kSearchApplication = Symbol('SearchApplication') const kSearchableSnapshots = Symbol('SearchableSnapshots') @@ -246,6 +249,7 @@ export default class API { [kMl]: symbol | null [kMonitoring]: symbol | null [kNodes]: symbol | null + [kQueryRuleset]: symbol | null [kRollup]: symbol | null [kSearchApplication]: symbol | null [kSearchableSnapshots]: symbol | null @@ -282,6 +286,7 @@ export default class API { this[kMl] = null this[kMonitoring] = null this[kNodes] = null + this[kQueryRuleset] = null this[kRollup] = null this[kSearchApplication] = null this[kSearchableSnapshots] = null @@ -406,6 +411,9 @@ Object.defineProperties(API.prototype, { nodes: { get () { return this[kNodes] === null ? (this[kNodes] = new NodesApi(this.transport)) : this[kNodes] } }, + queryRuleset: { + get () { return this[kQueryRuleset] === null ? (this[kQueryRuleset] = new QueryRulesetApi(this.transport)) : this[kQueryRuleset] } + }, rollup: { get () { return this[kRollup] === null ? (this[kRollup] = new RollupApi(this.transport)) : this[kRollup] } }, diff --git a/src/api/types.ts b/src/api/types.ts index 7a209917e..be16483e6 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -562,6 +562,7 @@ export interface HealthReportRequest extends RequestBase { export interface HealthReportResponse { cluster_name: string indicators: HealthReportIndicators + status?: HealthReportIndicatorHealthStatus } export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { @@ -1402,7 +1403,7 @@ export type SearchHighlighterOrder = 'score' export type SearchHighlighterTagsSchema = 'styled' -export type SearchHighlighterType = 'plain' | 'fvh' | 'unified'| string +export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string export interface SearchHit { _index: IndexName @@ -1951,6 +1952,8 @@ export interface SpecUtilsBaseNode { transport_address: TransportAddress } +export type SpecUtilsPipeSeparatedFlags = T | string + export type SpecUtilsStringified = T | string export interface AcknowledgedResponseBase { @@ -2459,7 +2462,7 @@ export interface ScriptField { ignore_failure?: boolean } -export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java'| string +export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string export interface ScriptSort { order?: SortOrder @@ -3000,11 +3003,20 @@ export interface AggregationsCompositeAggregation extends AggregationsBucketAggr sources?: Record[] } +export interface AggregationsCompositeAggregationBase { + field?: Field + missing_bucket?: boolean + missing_order?: AggregationsMissingOrder + script?: Script + value_type?: AggregationsValueType + order?: SortOrder +} + export interface AggregationsCompositeAggregationSource { - terms?: AggregationsTermsAggregation - histogram?: AggregationsHistogramAggregation - date_histogram?: AggregationsDateHistogramAggregation - geotile_grid?: AggregationsGeoTileGridAggregation + terms?: AggregationsCompositeTermsAggregation + histogram?: AggregationsCompositeHistogramAggregation + date_histogram?: AggregationsCompositeDateHistogramAggregation + geotile_grid?: AggregationsCompositeGeoTileGridAggregation } export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { @@ -3013,6 +3025,26 @@ export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucket export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys & { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long } +export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { + format?: string + calendar_interval?: DurationLarge + fixed_interval?: DurationLarge + offset?: Duration + time_zone?: TimeZone +} + +export interface AggregationsCompositeGeoTileGridAggregation extends AggregationsCompositeAggregationBase { + precision?: integer + bounds?: GeoBounds +} + +export interface AggregationsCompositeHistogramAggregation extends AggregationsCompositeAggregationBase { + interval: double +} + +export interface AggregationsCompositeTermsAggregation extends AggregationsCompositeAggregationBase { +} + export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { value: long value_as_string?: string @@ -4092,7 +4124,7 @@ export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnaly export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export type AnalysisCharFilter = string | AnalysisCharFilterDefinition @@ -4172,7 +4204,7 @@ export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { max_gram?: integer min_gram?: integer side?: AnalysisEdgeNGramSide - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { @@ -4187,7 +4219,7 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { type: 'elision' articles?: string[] articles_path?: string - articles_case?: boolean + articles_case?: SpecUtilsStringified } export interface AnalysisFingerprintAnalyzer { @@ -4411,14 +4443,14 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' filters: string[] - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram' max_gram?: integer min_gram?: integer - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { @@ -4456,11 +4488,11 @@ export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNor export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' - buffer_size: integer + buffer_size: SpecUtilsStringified delimiter: string replacement: string - reverse: boolean - skip: integer + reverse: SpecUtilsStringified + skip: SpecUtilsStringified } export interface AnalysisPatternAnalyzer { @@ -4475,7 +4507,7 @@ export interface AnalysisPatternAnalyzer { export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture' patterns: string[] - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { @@ -4688,7 +4720,7 @@ export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilt generate_number_parts?: boolean generate_word_parts?: boolean ignore_keywords?: boolean - preserve_original?: boolean + preserve_original?: SpecUtilsStringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -4705,7 +4737,7 @@ export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBas catenate_words?: boolean generate_number_parts?: boolean generate_word_parts?: boolean - preserve_original?: boolean + preserve_original?: SpecUtilsStringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -5377,7 +5409,7 @@ export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeature } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { - distance?: Distance + distance: Distance distance_type?: GeoDistanceType validation_method?: QueryDslGeoValidationMethod } @@ -5711,6 +5743,7 @@ export interface QueryDslQueryContainer { range?: Partial> rank_feature?: QueryDslRankFeatureQuery regexp?: Partial> + rule_query?: QueryDslRuleQuery script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery shape?: QueryDslShapeQuery @@ -5727,7 +5760,7 @@ export interface QueryDslQueryContainer { term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> - text_expansion?: QueryDslTextExpansionQuery | Field + text_expansion?: Partial> wildcard?: Partial> wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery @@ -5809,6 +5842,12 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { value: string } +export interface QueryDslRuleQuery extends QueryDslQueryBase { + organic: QueryDslQueryContainer + ruleset_id: Id + match_criteria: any +} + export interface QueryDslScriptQuery extends QueryDslQueryBase { script: Script } @@ -5835,9 +5874,9 @@ export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { export type QueryDslShapeQuery = QueryDslShapeQueryKeys & { [property: string]: QueryDslShapeFieldQuery | boolean | float | string } -export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' +export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' -export type QueryDslSimpleQueryStringFlags = QueryDslSimpleQueryStringFlag | string +export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { analyzer?: string @@ -5942,7 +5981,6 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { - value: Field model_id: string model_text: string } @@ -9624,7 +9662,7 @@ export interface IndicesIndexSettingsKeys { queries?: IndicesQueries similarity?: IndicesSettingsSimilarity mapping?: IndicesMappingLimitSettings - 'indexing.slowlog'?: IndicesSlowlogSettings + 'indexing.slowlog'?: IndicesIndexingSlowlogSettings indexing_pressure?: IndicesIndexingPressure store?: IndicesStorage } @@ -9641,7 +9679,7 @@ export interface IndicesIndexSettingsAnalysis { export interface IndicesIndexSettingsLifecycle { name: Name - indexing_complete?: boolean + indexing_complete?: SpecUtilsStringified origination_date?: long parse_origination_date?: boolean step?: IndicesIndexSettingsLifecycleStep @@ -9702,6 +9740,17 @@ export interface IndicesIndexingPressureMemory { limit?: integer } +export interface IndicesIndexingSlowlogSettings { + level?: string + source?: integer + reformat?: boolean + threshold?: IndicesIndexingSlowlogTresholds +} + +export interface IndicesIndexingSlowlogTresholds { + index?: IndicesSlowlogTresholdLevels +} + export interface IndicesMappingLimitSettings { coerce?: boolean total_fields?: IndicesMappingLimitSettingsTotalFields @@ -9855,7 +9904,6 @@ export interface IndicesSlowlogTresholdLevels { export interface IndicesSlowlogTresholds { query?: IndicesSlowlogTresholdLevels fetch?: IndicesSlowlogTresholdLevels - index?: IndicesSlowlogTresholdLevels } export interface IndicesSoftDeletes { @@ -9868,7 +9916,7 @@ export interface IndicesStorage { allow_mmap?: boolean } -export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs'| string +export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs' | string export interface IndicesTemplateMapping { aliases: Record @@ -10061,9 +10109,9 @@ export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase export interface IndicesDataStreamsStatsDataStreamsStatsItem { backing_indices: integer data_stream: Name + maximum_timestamp: EpochTime store_size?: ByteSize store_size_bytes: integer - maximum_timestamp: EpochTime } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -10075,9 +10123,9 @@ export interface IndicesDataStreamsStatsResponse { _shards: ShardStatistics backing_indices: integer data_stream_count: integer + data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] total_store_sizes?: ByteSize total_store_size_bytes: integer - data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] } export interface IndicesDeleteRequest extends RequestBase { @@ -10438,8 +10486,8 @@ export interface IndicesModifyDataStreamAction { } export interface IndicesModifyDataStreamIndexAndDataStreamAction { - index: IndexName data_stream: DataStreamName + index: IndexName } export interface IndicesModifyDataStreamRequest extends RequestBase { @@ -10689,6 +10737,11 @@ export interface IndicesReloadSearchAnalyzersReloadDetails { reloaded_node_ids: string[] } +export interface IndicesReloadSearchAnalyzersReloadResult { + reload_details: IndicesReloadSearchAnalyzersReloadDetails[] + _shards: ShardStatistics +} + export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -10696,10 +10749,7 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { ignore_unavailable?: boolean } -export interface IndicesReloadSearchAnalyzersResponse { - reload_details: IndicesReloadSearchAnalyzersReloadDetails[] - _shards: ShardStatistics -} +export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveIndexRequest extends RequestBase { name: Names @@ -11479,7 +11529,7 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase { export interface IngestUrlDecodeProcessor extends IngestProcessorBase { field: Field - ignore_missing?: boolean + ignre_missing?: boolean target_field?: Field } @@ -12382,6 +12432,7 @@ export interface MlDiscoveryNode { export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' export interface MlFillMaskInferenceOptions { + mask_token?: string num_top_classes?: integer tokenization?: MlTokenizationConfigContainer results_field?: string @@ -15143,6 +15194,69 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { nodes: Record } +export interface QueryRulesetQueryRule { + rule_id: Id + type: QueryRulesetQueryRuleType + criteria: QueryRulesetQueryRuleCriteria[] + actions: QueryRulesetQueryRuleActions +} + +export interface QueryRulesetQueryRuleActions { + ids?: Id[] + docs?: QueryDslPinnedDoc[] +} + +export interface QueryRulesetQueryRuleCriteria { + type: QueryRulesetQueryRuleCriteriaType + metadata: string + values?: any[] +} + +export type QueryRulesetQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' + +export type QueryRulesetQueryRuleType = 'pinned' + +export interface QueryRulesetQueryRuleset { + ruleset_id: Id + rules: QueryRulesetQueryRule[] +} + +export interface QueryRulesetDeleteRequest extends RequestBase { + ruleset_id: Id +} + +export type QueryRulesetDeleteResponse = AcknowledgedResponseBase + +export interface QueryRulesetGetRequest extends RequestBase { + ruleset_id: Id +} + +export type QueryRulesetGetResponse = QueryRulesetQueryRuleset + +export interface QueryRulesetListQueryRulesetListItem { + ruleset_id: Id + rules_count: integer +} + +export interface QueryRulesetListRequest extends RequestBase { + from?: integer + size?: integer +} + +export interface QueryRulesetListResponse { + count: long + results: QueryRulesetListQueryRulesetListItem[] +} + +export interface QueryRulesetPutRequest extends RequestBase { + ruleset_id: Id + query_ruleset?: QueryRulesetQueryRuleset +} + +export interface QueryRulesetPutResponse { + result: Result +} + export interface RollupDateHistogramGrouping { delay?: Duration field: Field @@ -15515,7 +15629,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client'| string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' | string export interface SecurityCreatedStatus { created: boolean @@ -15540,7 +15654,7 @@ export interface SecurityGlobalPrivilege { export type SecurityGrantType = 'password' | 'access_token' -export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write'| string +export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity @@ -16947,6 +17061,87 @@ export interface SslCertificatesRequest extends RequestBase { export type SslCertificatesResponse = SslCertificatesCertificateInformation[] +export interface SynonymsSynonymRule { + id?: Id + synonyms: SynonymsSynonymString +} + +export interface SynonymsSynonymRuleRead { + id: Id + synonyms: SynonymsSynonymString +} + +export type SynonymsSynonymString = string + +export interface SynonymsSynonymsUpdateResult { + result: Result + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult +} + +export interface SynonymsDeleteSynonymRequest extends RequestBase { + id: Id +} + +export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase + +export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { + set_id: Id + rule_id: Id +} + +export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult + +export interface SynonymsGetSynonymRequest extends RequestBase { + id: Id + from?: integer + size?: integer +} + +export interface SynonymsGetSynonymResponse { + count: integer + synonyms_set: SynonymsSynonymRuleRead[] +} + +export interface SynonymsGetSynonymRuleRequest extends RequestBase { + set_id: Id + rule_id: Id +} + +export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead + +export interface SynonymsGetSynonymsSetsRequest extends RequestBase { + from?: integer + size?: integer +} + +export interface SynonymsGetSynonymsSetsResponse { + count: integer + results: SynonymsGetSynonymsSetsSynonymsSetItem[] +} + +export interface SynonymsGetSynonymsSetsSynonymsSetItem { + synonyms_set: Id + count: integer +} + +export interface SynonymsPutSynonymRequest extends RequestBase { + id: Id + synonyms_set: SynonymsSynonymRule[] +} + +export interface SynonymsPutSynonymResponse { + result: Result + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadDetails +} + +export interface SynonymsPutSynonymRuleRequest extends RequestBase { + set_id: Id + rule_id: Id + synonyms: SynonymsSynonymString[] +} + +export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult + export type TasksGroupBy = 'nodes' | 'parents' | 'none' export interface TasksNodeTasks { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index f6685a039..4b8b98344 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -583,6 +583,7 @@ export interface HealthReportRequest extends RequestBase { export interface HealthReportResponse { cluster_name: string indicators: HealthReportIndicators + status?: HealthReportIndicatorHealthStatus } export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { @@ -1457,7 +1458,7 @@ export type SearchHighlighterOrder = 'score' export type SearchHighlighterTagsSchema = 'styled' -export type SearchHighlighterType = 'plain' | 'fvh' | 'unified'| string +export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string export interface SearchHit { _index: IndexName @@ -2024,6 +2025,8 @@ export interface SpecUtilsBaseNode { transport_address: TransportAddress } +export type SpecUtilsPipeSeparatedFlags = T | string + export type SpecUtilsStringified = T | string export interface AcknowledgedResponseBase { @@ -2532,7 +2535,7 @@ export interface ScriptField { ignore_failure?: boolean } -export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java'| string +export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string export interface ScriptSort { order?: SortOrder @@ -3073,11 +3076,20 @@ export interface AggregationsCompositeAggregation extends AggregationsBucketAggr sources?: Record[] } +export interface AggregationsCompositeAggregationBase { + field?: Field + missing_bucket?: boolean + missing_order?: AggregationsMissingOrder + script?: Script + value_type?: AggregationsValueType + order?: SortOrder +} + export interface AggregationsCompositeAggregationSource { - terms?: AggregationsTermsAggregation - histogram?: AggregationsHistogramAggregation - date_histogram?: AggregationsDateHistogramAggregation - geotile_grid?: AggregationsGeoTileGridAggregation + terms?: AggregationsCompositeTermsAggregation + histogram?: AggregationsCompositeHistogramAggregation + date_histogram?: AggregationsCompositeDateHistogramAggregation + geotile_grid?: AggregationsCompositeGeoTileGridAggregation } export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { @@ -3086,6 +3098,26 @@ export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucket export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys & { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long } +export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { + format?: string + calendar_interval?: DurationLarge + fixed_interval?: DurationLarge + offset?: Duration + time_zone?: TimeZone +} + +export interface AggregationsCompositeGeoTileGridAggregation extends AggregationsCompositeAggregationBase { + precision?: integer + bounds?: GeoBounds +} + +export interface AggregationsCompositeHistogramAggregation extends AggregationsCompositeAggregationBase { + interval: double +} + +export interface AggregationsCompositeTermsAggregation extends AggregationsCompositeAggregationBase { +} + export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { value: long value_as_string?: string @@ -4165,7 +4197,7 @@ export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnaly export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export type AnalysisCharFilter = string | AnalysisCharFilterDefinition @@ -4245,7 +4277,7 @@ export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { max_gram?: integer min_gram?: integer side?: AnalysisEdgeNGramSide - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { @@ -4260,7 +4292,7 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { type: 'elision' articles?: string[] articles_path?: string - articles_case?: boolean + articles_case?: SpecUtilsStringified } export interface AnalysisFingerprintAnalyzer { @@ -4484,14 +4516,14 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' filters: string[] - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram' max_gram?: integer min_gram?: integer - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { @@ -4529,11 +4561,11 @@ export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNor export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' - buffer_size: integer + buffer_size: SpecUtilsStringified delimiter: string replacement: string - reverse: boolean - skip: integer + reverse: SpecUtilsStringified + skip: SpecUtilsStringified } export interface AnalysisPatternAnalyzer { @@ -4548,7 +4580,7 @@ export interface AnalysisPatternAnalyzer { export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture' patterns: string[] - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { @@ -4568,9 +4600,9 @@ export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBa export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { type: 'pattern' - flags: string - group: integer - pattern: string + flags?: string + group?: integer + pattern?: string } export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' @@ -4761,7 +4793,7 @@ export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilt generate_number_parts?: boolean generate_word_parts?: boolean ignore_keywords?: boolean - preserve_original?: boolean + preserve_original?: SpecUtilsStringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -4778,7 +4810,7 @@ export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBas catenate_words?: boolean generate_number_parts?: boolean generate_word_parts?: boolean - preserve_original?: boolean + preserve_original?: SpecUtilsStringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -5450,7 +5482,7 @@ export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeature } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { - distance?: Distance + distance: Distance distance_type?: GeoDistanceType validation_method?: QueryDslGeoValidationMethod } @@ -5784,6 +5816,7 @@ export interface QueryDslQueryContainer { range?: Partial> rank_feature?: QueryDslRankFeatureQuery regexp?: Partial> + rule_query?: QueryDslRuleQuery script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery shape?: QueryDslShapeQuery @@ -5800,7 +5833,7 @@ export interface QueryDslQueryContainer { term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> - text_expansion?: QueryDslTextExpansionQuery | Field + text_expansion?: Partial> wildcard?: Partial> wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery @@ -5882,6 +5915,12 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { value: string } +export interface QueryDslRuleQuery extends QueryDslQueryBase { + organic: QueryDslQueryContainer + ruleset_id: Id + match_criteria: any +} + export interface QueryDslScriptQuery extends QueryDslQueryBase { script: Script } @@ -5908,9 +5947,9 @@ export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { export type QueryDslShapeQuery = QueryDslShapeQueryKeys & { [property: string]: QueryDslShapeFieldQuery | boolean | float | string } -export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'OR' | 'NOT' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' +export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' -export type QueryDslSimpleQueryStringFlags = QueryDslSimpleQueryStringFlag | string +export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { analyzer?: string @@ -6015,7 +6054,6 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { - value: Field model_id: string model_text: string } @@ -9747,7 +9785,7 @@ export interface IndicesIndexSettingsKeys { queries?: IndicesQueries similarity?: IndicesSettingsSimilarity mapping?: IndicesMappingLimitSettings - 'indexing.slowlog'?: IndicesSlowlogSettings + 'indexing.slowlog'?: IndicesIndexingSlowlogSettings indexing_pressure?: IndicesIndexingPressure store?: IndicesStorage } @@ -9764,7 +9802,7 @@ export interface IndicesIndexSettingsAnalysis { export interface IndicesIndexSettingsLifecycle { name: Name - indexing_complete?: boolean + indexing_complete?: SpecUtilsStringified origination_date?: long parse_origination_date?: boolean step?: IndicesIndexSettingsLifecycleStep @@ -9825,6 +9863,17 @@ export interface IndicesIndexingPressureMemory { limit?: integer } +export interface IndicesIndexingSlowlogSettings { + level?: string + source?: integer + reformat?: boolean + threshold?: IndicesIndexingSlowlogTresholds +} + +export interface IndicesIndexingSlowlogTresholds { + index?: IndicesSlowlogTresholdLevels +} + export interface IndicesMappingLimitSettings { coerce?: boolean total_fields?: IndicesMappingLimitSettingsTotalFields @@ -9978,7 +10027,6 @@ export interface IndicesSlowlogTresholdLevels { export interface IndicesSlowlogTresholds { query?: IndicesSlowlogTresholdLevels fetch?: IndicesSlowlogTresholdLevels - index?: IndicesSlowlogTresholdLevels } export interface IndicesSoftDeletes { @@ -9991,7 +10039,7 @@ export interface IndicesStorage { allow_mmap?: boolean } -export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs'| string +export type IndicesStorageType = 'fs' | '' | 'niofs' | 'mmapfs' | 'hybridfs' | string export interface IndicesTemplateMapping { aliases: Record @@ -10193,9 +10241,9 @@ export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase export interface IndicesDataStreamsStatsDataStreamsStatsItem { backing_indices: integer data_stream: Name + maximum_timestamp: EpochTime store_size?: ByteSize store_size_bytes: integer - maximum_timestamp: EpochTime } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -10207,9 +10255,9 @@ export interface IndicesDataStreamsStatsResponse { _shards: ShardStatistics backing_indices: integer data_stream_count: integer + data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] total_store_sizes?: ByteSize total_store_size_bytes: integer - data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] } export interface IndicesDeleteRequest extends RequestBase { @@ -10571,8 +10619,8 @@ export interface IndicesModifyDataStreamAction { } export interface IndicesModifyDataStreamIndexAndDataStreamAction { - index: IndexName data_stream: DataStreamName + index: IndexName } export interface IndicesModifyDataStreamRequest extends RequestBase { @@ -10841,6 +10889,11 @@ export interface IndicesReloadSearchAnalyzersReloadDetails { reloaded_node_ids: string[] } +export interface IndicesReloadSearchAnalyzersReloadResult { + reload_details: IndicesReloadSearchAnalyzersReloadDetails[] + _shards: ShardStatistics +} + export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { index: Indices allow_no_indices?: boolean @@ -10848,10 +10901,7 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { ignore_unavailable?: boolean } -export interface IndicesReloadSearchAnalyzersResponse { - reload_details: IndicesReloadSearchAnalyzersReloadDetails[] - _shards: ShardStatistics -} +export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveIndexRequest extends RequestBase { name: Names @@ -11650,7 +11700,7 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase { export interface IngestUrlDecodeProcessor extends IngestProcessorBase { field: Field - ignore_missing?: boolean + ignre_missing?: boolean target_field?: Field } @@ -12563,6 +12613,7 @@ export interface MlDiscoveryNode { export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' export interface MlFillMaskInferenceOptions { + mask_token?: string num_top_classes?: integer tokenization?: MlTokenizationConfigContainer results_field?: string @@ -15438,6 +15489,70 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { nodes: Record } +export interface QueryRulesetQueryRule { + rule_id: Id + type: QueryRulesetQueryRuleType + criteria: QueryRulesetQueryRuleCriteria[] + actions: QueryRulesetQueryRuleActions +} + +export interface QueryRulesetQueryRuleActions { + ids?: Id[] + docs?: QueryDslPinnedDoc[] +} + +export interface QueryRulesetQueryRuleCriteria { + type: QueryRulesetQueryRuleCriteriaType + metadata: string + values?: any[] +} + +export type QueryRulesetQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' + +export type QueryRulesetQueryRuleType = 'pinned' + +export interface QueryRulesetQueryRuleset { + ruleset_id: Id + rules: QueryRulesetQueryRule[] +} + +export interface QueryRulesetDeleteRequest extends RequestBase { + ruleset_id: Id +} + +export type QueryRulesetDeleteResponse = AcknowledgedResponseBase + +export interface QueryRulesetGetRequest extends RequestBase { + ruleset_id: Id +} + +export type QueryRulesetGetResponse = QueryRulesetQueryRuleset + +export interface QueryRulesetListQueryRulesetListItem { + ruleset_id: Id + rules_count: integer +} + +export interface QueryRulesetListRequest extends RequestBase { + from?: integer + size?: integer +} + +export interface QueryRulesetListResponse { + count: long + results: QueryRulesetListQueryRulesetListItem[] +} + +export interface QueryRulesetPutRequest extends RequestBase { + ruleset_id: Id + /** @deprecated The use of the 'body' key has been deprecated, use 'query_ruleset' instead. */ + body?: QueryRulesetQueryRuleset +} + +export interface QueryRulesetPutResponse { + result: Result +} + export interface RollupDateHistogramGrouping { delay?: Duration field: Field @@ -15823,7 +15938,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client'| string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' | string export interface SecurityCreatedStatus { created: boolean @@ -15848,7 +15963,7 @@ export interface SecurityGlobalPrivilege { export type SecurityGrantType = 'password' | 'access_token' -export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write'| string +export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity @@ -17347,6 +17462,93 @@ export interface SslCertificatesRequest extends RequestBase { export type SslCertificatesResponse = SslCertificatesCertificateInformation[] +export interface SynonymsSynonymRule { + id?: Id + synonyms: SynonymsSynonymString +} + +export interface SynonymsSynonymRuleRead { + id: Id + synonyms: SynonymsSynonymString +} + +export type SynonymsSynonymString = string + +export interface SynonymsSynonymsUpdateResult { + result: Result + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult +} + +export interface SynonymsDeleteSynonymRequest extends RequestBase { + id: Id +} + +export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase + +export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { + set_id: Id + rule_id: Id +} + +export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult + +export interface SynonymsGetSynonymRequest extends RequestBase { + id: Id + from?: integer + size?: integer +} + +export interface SynonymsGetSynonymResponse { + count: integer + synonyms_set: SynonymsSynonymRuleRead[] +} + +export interface SynonymsGetSynonymRuleRequest extends RequestBase { + set_id: Id + rule_id: Id +} + +export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead + +export interface SynonymsGetSynonymsSetsRequest extends RequestBase { + from?: integer + size?: integer +} + +export interface SynonymsGetSynonymsSetsResponse { + count: integer + results: SynonymsGetSynonymsSetsSynonymsSetItem[] +} + +export interface SynonymsGetSynonymsSetsSynonymsSetItem { + synonyms_set: Id + count: integer +} + +export interface SynonymsPutSynonymRequest extends RequestBase { + id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + synonyms_set: SynonymsSynonymRule[] + } +} + +export interface SynonymsPutSynonymResponse { + result: Result + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadDetails +} + +export interface SynonymsPutSynonymRuleRequest extends RequestBase { + set_id: Id + rule_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + synonyms: SynonymsSynonymString[] + } +} + +export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult + export type TasksGroupBy = 'nodes' | 'parents' | 'none' export interface TasksNodeTasks { From c4c3be17a50bb1c0e8251e2fa2595a1411b9efe8 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 17 Aug 2023 11:41:58 -0500 Subject: [PATCH 240/647] Bump 8.9.1 on main (#1972) --- docs/changelog.asciidoc | 11 +++++++++++ package.json | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 911c3cc3c..3e339a3f7 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,17 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.9.1 + +[discrete] +==== Fixes + +[discrete] +===== Upgrade Transport https://github.com/elastic/elasticsearch-js/pull/1968[#1968] + +Upgrades `@elastic/transport` to the latest patch release to fix https://github.com/elastic/elastic-transport-js/pull/69[a bug] that could cause the process to exit when handling malformed `HEAD` requests. + [discrete] === 8.9.0 diff --git a/package.json b/package.json index 5f9ef5d04..9df0610ca 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.9.0", - "versionCanary": "8.9.0-canary.0", + "version": "8.9.1", + "versionCanary": "8.9.1-canary.1", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From ad86306a472aa9c8c370fea731c77bb833f38bd1 Mon Sep 17 00:00:00 2001 From: Nigel Small Date: Mon, 21 Aug 2023 19:51:32 +0100 Subject: [PATCH 241/647] Updated project metadata to use group name (#1979) --- docs/examples/proxy/package.json | 2 +- package.json | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/examples/proxy/package.json b/docs/examples/proxy/package.json index bfe19ae84..cdbeea15e 100644 --- a/docs/examples/proxy/package.json +++ b/docs/examples/proxy/package.json @@ -8,7 +8,7 @@ "test": "standard" }, "keywords": [], - "author": "Tomas Della Vedova", + "author": "Elastic Client Library Maintainers", "license": "Apache-2.0", "dependencies": { "@elastic/elasticsearch": "^8.0.0" diff --git a/package.json b/package.json index 9df0610ca..753cf56b7 100644 --- a/package.json +++ b/package.json @@ -32,11 +32,7 @@ ], "contributors": [ { - "name": "Tomas Della Vedova", - "company": "Elastic BV" - }, - { - "name": "Josh Mock", + "name": "Elastic Client Library Maintainers", "company": "Elastic BV" } ], From 8d5e75d795f9df7b470c8e29d18d75204cf7ad35 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 22 Aug 2023 10:39:51 -0500 Subject: [PATCH 242/647] Fix branch name checker for automatic codegen script (#1980) --- .ci/make.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/make.sh b/.ci/make.sh index adf79ef23..7f890cb2b 100755 --- a/.ci/make.sh +++ b/.ci/make.sh @@ -65,11 +65,11 @@ case $CMD in if [ -v "$VERSION" ] || [[ -z "$VERSION" ]]; then # fall back to branch name or `main` if no VERSION is set branch_name=$(git rev-parse --abbrev-ref HEAD) - if [[ "$branch_name" =~ ^\d+\.\d+ ]]; then - echo -e "\033[36;1mTARGET: codegen -> No VERSION found, using branch name: \`$VERSION\`\033[0m" + if [[ "$branch_name" =~ ^[0-9]+\.[0-9]+ ]]; then + echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using branch name: \`$branch_name\`\033[0m" VERSION="$branch_name" else - echo -e "\033[36;1mTARGET: codegen -> No VERSION found, using \`main\`\033[0m" + echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using \`main\`\033[0m" VERSION="main" fi fi From 56b4a380c156aa7c34be5c028422d93130d74239 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 24 Aug 2023 01:04:02 +0930 Subject: [PATCH 243/647] Auto-generated code for main (#1978) --- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index be16483e6..93a57fa12 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -11529,7 +11529,7 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase { export interface IngestUrlDecodeProcessor extends IngestProcessorBase { field: Field - ignre_missing?: boolean + ignore_missing?: boolean target_field?: Field } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 4b8b98344..c79b0fbba 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -11700,7 +11700,7 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase { export interface IngestUrlDecodeProcessor extends IngestProcessorBase { field: Field - ignre_missing?: boolean + ignore_missing?: boolean target_field?: Field } From 9da7c44bb012e1ff7f1d3b656ea83462f477cc51 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Fri, 25 Aug 2023 00:23:40 +0930 Subject: [PATCH 244/647] Auto-generated code for main (#1984) --- docs/reference.asciidoc | 18 ++++++------ src/api/api/indices.ts | 16 +++++------ src/api/types.ts | 56 ++++++++++++++++++------------------- src/api/typesWithBodyKey.ts | 56 ++++++++++++++++++------------------- 4 files changed, 73 insertions(+), 73 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 5d35bb0bc..85475327b 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -3335,9 +3335,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_data_lifecycle -Deletes the data lifecycle of the selected data streams. +Deletes the data stream lifecycle of the selected data streams. -{ref}/dlm-delete-lifecycle.html[Endpoint documentation] +{ref}/data-streams-delete-lifecycle.html[Endpoint documentation] [source,ts] ---- client.indices.deleteDataLifecycle({ name }) @@ -3347,7 +3347,7 @@ client.indices.deleteDataLifecycle({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: A list of data streams of which the data lifecycle will be deleted; use `*` to get all data streams +** *`name` (string | string[])*: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document @@ -3544,9 +3544,9 @@ client.indices.existsTemplate({ name }) [discrete] ==== explain_data_lifecycle -Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc. +Retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc. -{ref}/dlm-explain-lifecycle.html[Endpoint documentation] +{ref}/data-streams-explain-lifecycle.html[Endpoint documentation] [source,ts] ---- client.indices.explainDataLifecycle({ index }) @@ -3701,9 +3701,9 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== get_data_lifecycle -Returns the data lifecycle of the selected data streams. +Returns the data stream lifecycle of the selected data streams. -{ref}/dlm-get-lifecycle.html[Endpoint documentation] +{ref}/data-streams-get-lifecycle.html[Endpoint documentation] [source,ts] ---- client.indices.getDataLifecycle({ name }) @@ -3995,9 +3995,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_data_lifecycle -Updates the data lifecycle of the selected data streams. +Updates the data stream lifecycle of the selected data streams. -{ref}/dlm-put-lifecycle.html[Endpoint documentation] +{ref}/data-streams-put-lifecycle.html[Endpoint documentation] [source,ts] ---- client.indices.putDataLifecycle({ name }) diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 4ef0eac17..2dcf18270 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -371,8 +371,8 @@ export default class Indices { } /** - * Deletes the data lifecycle of the selected data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-delete-lifecycle.html | Elasticsearch API documentation} + * Deletes the data stream lifecycle of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -643,8 +643,8 @@ export default class Indices { } /** - * Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-explain-lifecycle.html | Elasticsearch API documentation} + * Retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-explain-lifecycle.html | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -829,8 +829,8 @@ export default class Indices { } /** - * Returns the data lifecycle of the selected data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-get-lifecycle.html | Elasticsearch API documentation} + * Returns the data stream lifecycle of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html | Elasticsearch API documentation} */ async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1225,8 +1225,8 @@ export default class Indices { } /** - * Updates the data lifecycle of the selected data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-put-lifecycle.html | Elasticsearch API documentation} + * Updates the data stream lifecycle of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html | Elasticsearch API documentation} */ async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/types.ts b/src/api/types.ts index 93a57fa12..74f1227a2 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -8232,7 +8232,7 @@ export interface ClusterComponentTemplateSummary { settings?: Record mappings?: MappingTypeMapping aliases?: Record - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover } export interface ClusterAllocationExplainAllocationDecision { @@ -9496,15 +9496,6 @@ export interface IndicesCacheQueries { enabled: boolean } -export interface IndicesDataLifecycle { - data_retention?: Duration -} - -export interface IndicesDataLifecycleWithRollover { - data_retention?: Duration - rollover?: IndicesDlmRolloverConditions -} - export interface IndicesDataStream { _meta?: Metadata allow_custom_routing?: boolean @@ -9512,7 +9503,7 @@ export interface IndicesDataStream { hidden: boolean ilm_policy?: Name indices: IndicesDataStreamIndex[] - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName replicated?: boolean status: HealthStatus @@ -9526,15 +9517,11 @@ export interface IndicesDataStreamIndex { index_uuid: Uuid } -export interface IndicesDataStreamTimestampField { - name: Field -} - -export interface IndicesDataStreamVisibility { - hidden?: boolean +export interface IndicesDataStreamLifecycle { + data_retention?: Duration } -export interface IndicesDlmRolloverConditions { +export interface IndicesDataStreamLifecycleRolloverConditions { min_age?: Duration max_age?: string min_docs?: long @@ -9547,6 +9534,19 @@ export interface IndicesDlmRolloverConditions { max_primary_shard_docs?: long } +export interface IndicesDataStreamLifecycleWithRollover { + data_retention?: Duration + rollover?: IndicesDataStreamLifecycleRolloverConditions +} + +export interface IndicesDataStreamTimestampField { + name: Field +} + +export interface IndicesDataStreamVisibility { + hidden?: boolean +} + export interface IndicesDownsampleConfig { fixed_interval: DurationLarge } @@ -9701,7 +9701,7 @@ export interface IndicesIndexState { settings?: IndicesIndexSettings defaults?: IndicesIndexSettings data_stream?: DataStreamName - lifecycle?: IndicesDataLifecycle + lifecycle?: IndicesDataStreamLifecycle } export interface IndicesIndexTemplate { @@ -9724,7 +9724,7 @@ export interface IndicesIndexTemplateSummary { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover } export interface IndicesIndexVersioning { @@ -10238,14 +10238,14 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean -export interface IndicesExplainDataLifecycleDataLifecycleExplain { +export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { index: IndexName - managed_by_dlm: boolean + managed_by_lifecycle: boolean index_creation_date_millis?: EpochTime time_since_index_creation?: Duration rollover_date_millis?: EpochTime time_since_rollover?: Duration - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover generation_time?: Duration error?: string } @@ -10257,7 +10257,7 @@ export interface IndicesExplainDataLifecycleRequest extends RequestBase { } export interface IndicesExplainDataLifecycleResponse { - indices: Record + indices: Record } export interface IndicesFieldUsageStatsFieldSummary { @@ -10377,9 +10377,9 @@ export interface IndicesGetAliasRequest extends RequestBase { export type IndicesGetAliasResponse = Record -export interface IndicesGetDataLifecycleDataStreamLifecycle { +export interface IndicesGetDataLifecycleDataStreamWithLifecycle { name: DataStreamName - lifecycle?: IndicesDataLifecycle + lifecycle?: IndicesDataStreamLifecycle } export interface IndicesGetDataLifecycleRequest extends RequestBase { @@ -10389,7 +10389,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase { } export interface IndicesGetDataLifecycleResponse { - data_streams: IndicesGetDataLifecycleDataStreamLifecycle[] + data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] } export interface IndicesGetDataStreamRequest extends RequestBase { @@ -10545,7 +10545,7 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings - lifecycle?: IndicesDataLifecycle + lifecycle?: IndicesDataStreamLifecycle } export interface IndicesPutIndexTemplateRequest extends RequestBase { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index c79b0fbba..a1710a897 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -8321,7 +8321,7 @@ export interface ClusterComponentTemplateSummary { settings?: Record mappings?: MappingTypeMapping aliases?: Record - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover } export interface ClusterAllocationExplainAllocationDecision { @@ -9619,15 +9619,6 @@ export interface IndicesCacheQueries { enabled: boolean } -export interface IndicesDataLifecycle { - data_retention?: Duration -} - -export interface IndicesDataLifecycleWithRollover { - data_retention?: Duration - rollover?: IndicesDlmRolloverConditions -} - export interface IndicesDataStream { _meta?: Metadata allow_custom_routing?: boolean @@ -9635,7 +9626,7 @@ export interface IndicesDataStream { hidden: boolean ilm_policy?: Name indices: IndicesDataStreamIndex[] - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName replicated?: boolean status: HealthStatus @@ -9649,15 +9640,11 @@ export interface IndicesDataStreamIndex { index_uuid: Uuid } -export interface IndicesDataStreamTimestampField { - name: Field -} - -export interface IndicesDataStreamVisibility { - hidden?: boolean +export interface IndicesDataStreamLifecycle { + data_retention?: Duration } -export interface IndicesDlmRolloverConditions { +export interface IndicesDataStreamLifecycleRolloverConditions { min_age?: Duration max_age?: string min_docs?: long @@ -9670,6 +9657,19 @@ export interface IndicesDlmRolloverConditions { max_primary_shard_docs?: long } +export interface IndicesDataStreamLifecycleWithRollover { + data_retention?: Duration + rollover?: IndicesDataStreamLifecycleRolloverConditions +} + +export interface IndicesDataStreamTimestampField { + name: Field +} + +export interface IndicesDataStreamVisibility { + hidden?: boolean +} + export interface IndicesDownsampleConfig { fixed_interval: DurationLarge } @@ -9824,7 +9824,7 @@ export interface IndicesIndexState { settings?: IndicesIndexSettings defaults?: IndicesIndexSettings data_stream?: DataStreamName - lifecycle?: IndicesDataLifecycle + lifecycle?: IndicesDataStreamLifecycle } export interface IndicesIndexTemplate { @@ -9847,7 +9847,7 @@ export interface IndicesIndexTemplateSummary { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover } export interface IndicesIndexVersioning { @@ -10371,14 +10371,14 @@ export interface IndicesExistsTemplateRequest extends RequestBase { export type IndicesExistsTemplateResponse = boolean -export interface IndicesExplainDataLifecycleDataLifecycleExplain { +export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { index: IndexName - managed_by_dlm: boolean + managed_by_lifecycle: boolean index_creation_date_millis?: EpochTime time_since_index_creation?: Duration rollover_date_millis?: EpochTime time_since_rollover?: Duration - lifecycle?: IndicesDataLifecycleWithRollover + lifecycle?: IndicesDataStreamLifecycleWithRollover generation_time?: Duration error?: string } @@ -10390,7 +10390,7 @@ export interface IndicesExplainDataLifecycleRequest extends RequestBase { } export interface IndicesExplainDataLifecycleResponse { - indices: Record + indices: Record } export interface IndicesFieldUsageStatsFieldSummary { @@ -10510,9 +10510,9 @@ export interface IndicesGetAliasRequest extends RequestBase { export type IndicesGetAliasResponse = Record -export interface IndicesGetDataLifecycleDataStreamLifecycle { +export interface IndicesGetDataLifecycleDataStreamWithLifecycle { name: DataStreamName - lifecycle?: IndicesDataLifecycle + lifecycle?: IndicesDataStreamLifecycle } export interface IndicesGetDataLifecycleRequest extends RequestBase { @@ -10522,7 +10522,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase { } export interface IndicesGetDataLifecycleResponse { - data_streams: IndicesGetDataLifecycleDataStreamLifecycle[] + data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] } export interface IndicesGetDataStreamRequest extends RequestBase { @@ -10687,7 +10687,7 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings - lifecycle?: IndicesDataLifecycle + lifecycle?: IndicesDataStreamLifecycle } export interface IndicesPutIndexTemplateRequest extends RequestBase { From 6e1c20989e79f589b1eafddef33aea6b5df6d781 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Sat, 26 Aug 2023 00:21:12 +0930 Subject: [PATCH 245/647] Auto-generated code for main (#1985) --- docs/reference.asciidoc | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 85475327b..e1adf624d 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -712,20 +712,23 @@ client.reindex({ dest, source }) ==== Arguments * *Request (object):* -** *`dest` ({ index, op_type, pipeline, routing, version_type })* -** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })* -** *`conflicts` (Optional, Enum("abort" | "proceed"))* -** *`max_docs` (Optional, number)* -** *`script` (Optional, { lang, options, source } | { id })* +** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination you are copying to. +** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source you are copying from. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: Set to proceed to continue reindexing even if there are conflicts. +** *`max_docs` (Optional, number)*: The maximum number of documents to reindex. +** *`script` (Optional, { lang, options, source } | { id })*: The script to run to update the document source or metadata when reindexing. ** *`size` (Optional, number)* -** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? -** *`requests_per_second` (Optional, float)*: The throttle to set on this request in sub-requests per second. -1 means no throttle. -** *`scroll` (Optional, string | -1 | 0)*: Control how long to keep the search context alive -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. -** *`timeout` (Optional, string | -1 | 0)*: Time each individual bulk request should wait for shards that are unavailable. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) -** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the reindex is complete. -** *`require_alias` (Optional, boolean)* +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes affected shards to make this operation visible to search. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +Defaults to no throttle. +** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. +Defaults to 1 slice, meaning the task isn’t sliced into subtasks. +** *`timeout` (Optional, string | -1 | 0)*: Period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. +** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. [discrete] === reindex_rethrottle @@ -4949,8 +4952,9 @@ client.ml.deleteCalendarEvent({ calendar_id, event_id }) ==== Arguments * *Request (object):* -** *`calendar_id` (string)*: The ID of the calendar to modify -** *`event_id` (string)*: The ID of the event to remove from the calendar +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`event_id` (string)*: Identifier for the scheduled event. +You can obtain this identifier by using the get calendar events API. [discrete] ==== delete_calendar_job @@ -5363,7 +5367,8 @@ neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. -** *`page` (Optional, { from, size })* +** *`page` (Optional, { from, size })*: Configures pagination. +This parameter has the `from` and `size` properties. ** *`from` (Optional, number)*: Skips the specified number of categories. ** *`partition_field_value` (Optional, string)*: Only return categories for the specified partition. ** *`size` (Optional, number)*: Specifies the maximum number of categories to obtain. @@ -5526,7 +5531,8 @@ client.ml.getInfluencers({ job_id }) * *Request (object):* ** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`page` (Optional, { from, size })* +** *`page` (Optional, { from, size })*: Configures pagination. +This parameter has the `from` and `size` properties. ** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. ** *`end` (Optional, string | Unit)*: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to From 13923b353be8431ce909623f56e7270141fe1cef Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 29 Aug 2023 00:48:09 +0930 Subject: [PATCH 246/647] Auto-generated code for main (#1987) --- docs/reference.asciidoc | 15 +++++++++------ src/api/types.ts | 24 ++---------------------- src/api/typesWithBodyKey.ts | 24 ++---------------------- 3 files changed, 13 insertions(+), 50 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index e1adf624d..ea908174d 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -6786,9 +6786,10 @@ client.nodes.reloadSecureSettings({ ... }) ==== Arguments * *Request (object):* -** *`node_id` (Optional, string | string[])*: A list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes. -** *`secure_settings_password` (Optional, string)* -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`node_id` (Optional, string | string[])*: The names of particular nodes in the cluster to target. +** *`secure_settings_password` (Optional, string)*: The password for the Elasticsearch keystore. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== stats @@ -6816,7 +6817,7 @@ client.nodes.stats({ ... }) ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ** *`types` (Optional, string[])*: A list of document types for the indexing index metric. -** *`include_unloaded_segments` (Optional, boolean)*: If set to true segment stats will include stats for segments that are not currently loaded into memory +** *`include_unloaded_segments` (Optional, boolean)*: If `true`, the response includes information from segments that are not loaded into memory. [discrete] ==== usage @@ -6833,8 +6834,10 @@ client.nodes.usage({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. +A list of the following options: `_all`, `rest_actions`. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] === query_ruleset diff --git a/src/api/types.ts b/src/api/types.ts index 74f1227a2..384ca440a 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -17169,7 +17169,7 @@ export interface TasksTaskInfo { running_time?: Duration running_time_in_nanos: DurationValue start_time_in_millis: EpochTime - status?: TasksTaskStatus + status?: any type: string parent_task_id?: TaskId } @@ -17183,26 +17183,6 @@ export interface TasksTaskListResponseBase { tasks?: TasksTaskInfos } -export interface TasksTaskStatus { - batches: long - canceled?: string - created: long - deleted: long - noops: long - failures?: string[] - requests_per_second: float - retries: Retries - throttled?: Duration - throttled_millis: DurationValue - throttled_until?: Duration - throttled_until_millis: DurationValue - timed_out?: boolean - took?: DurationValue - total: long - updated: long - version_conflicts: long -} - export interface TasksCancelRequest extends RequestBase { task_id?: TaskId actions?: string | string[] @@ -17222,7 +17202,7 @@ export interface TasksGetRequest extends RequestBase { export interface TasksGetResponse { completed: boolean task: TasksTaskInfo - response?: TasksTaskStatus + response?: any error?: ErrorCause } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index a1710a897..94861b37d 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -17576,7 +17576,7 @@ export interface TasksTaskInfo { running_time?: Duration running_time_in_nanos: DurationValue start_time_in_millis: EpochTime - status?: TasksTaskStatus + status?: any type: string parent_task_id?: TaskId } @@ -17590,26 +17590,6 @@ export interface TasksTaskListResponseBase { tasks?: TasksTaskInfos } -export interface TasksTaskStatus { - batches: long - canceled?: string - created: long - deleted: long - noops: long - failures?: string[] - requests_per_second: float - retries: Retries - throttled?: Duration - throttled_millis: DurationValue - throttled_until?: Duration - throttled_until_millis: DurationValue - timed_out?: boolean - took?: DurationValue - total: long - updated: long - version_conflicts: long -} - export interface TasksCancelRequest extends RequestBase { task_id?: TaskId actions?: string | string[] @@ -17629,7 +17609,7 @@ export interface TasksGetRequest extends RequestBase { export interface TasksGetResponse { completed: boolean task: TasksTaskInfo - response?: TasksTaskStatus + response?: any error?: ErrorCause } From f72d146b0ab0f50278d0a110b29a901eab5516a5 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 28 Aug 2023 10:38:52 -0500 Subject: [PATCH 247/647] Pin untrusted Github Action to a commit hash (#1986) --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index a10d7338a..906f1474b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -11,6 +11,6 @@ jobs: name: Backport steps: - name: Backport - uses: tibdex/backport@v2 + uses: tibdex/backport@7005ef85c4562bc23b0e9b4a9940d5922f439750 with: github_token: ${{ secrets.GITHUB_TOKEN }} From 49eccfbd0017434ea83d7218f9e38213aea711ca Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 30 Aug 2023 00:41:55 +0930 Subject: [PATCH 248/647] Auto-generated code for main (#1993) --- docs/reference.asciidoc | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index ea908174d..1619d7f58 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -6923,7 +6923,7 @@ client.rollup.deleteJob({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The ID of the job to delete +** *`id` (string)*: Identifier for the job. [discrete] ==== get_jobs @@ -6939,7 +6939,8 @@ client.rollup.getJobs({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs +** *`id` (Optional, string)*: Identifier for the rollup job. +If it is `_all` or omitted, the API returns all rollup jobs. [discrete] ==== get_rollup_caps @@ -6955,7 +6956,8 @@ client.rollup.getRollupCaps({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: The ID of the index to check rollup capabilities on, or left blank for all jobs +** *`id` (Optional, string)*: Index, indices or index-pattern to return rollup capabilities for. +`_all` may be used to fetch rollup capabilities from all jobs. [discrete] ==== get_rollup_index_caps @@ -6971,7 +6973,8 @@ client.rollup.getRollupIndexCaps({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: The rollup index or index pattern to obtain rollup capabilities from. +** *`index` (string | string[])*: Data stream or index to check for rollup capabilities. +Wildcard (`*`) expressions are supported. [discrete] ==== put_job @@ -7028,10 +7031,10 @@ client.rollup.rollupSearch({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: The indices or index-pattern(s) (containing rollup or regular data) that should be searched -** *`aggregations` (Optional, Record)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data +** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. +** *`aggregations` (Optional, Record)*: Specifies aggregations. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Specifies a DSL query. +** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -7049,7 +7052,7 @@ client.rollup.startJob({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The ID of the job to start +** *`id` (string)*: Identifier for the rollup job. [discrete] ==== stop_job @@ -7065,9 +7068,11 @@ client.rollup.stopJob({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The ID of the job to stop -** *`timeout` (Optional, string | -1 | 0)*: Block for (at maximum) the specified duration while waiting for the job to stop. Defaults to 30s. -** *`wait_for_completion` (Optional, boolean)*: True if the API should block until the job has fully stopped, false if should be executed async. Defaults to false. +** *`id` (string)*: Identifier for the rollup job. +** *`timeout` (Optional, string | -1 | 0)*: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. +If more than `timeout` time has passed, the API throws a timeout exception. +** *`wait_for_completion` (Optional, boolean)*: If set to `true`, causes the API to block until the indexer state completely stops. +If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. [discrete] === search_application From 516da3a7202d9f9bb5e6398f5269e467ce33403b Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Fri, 1 Sep 2023 02:17:50 +0930 Subject: [PATCH 249/647] Auto-generated code for main (#1996) --- docs/reference.asciidoc | 2 ++ src/api/api/indices.ts | 2 +- src/api/types.ts | 12 ++++++++++++ src/api/typesWithBodyKey.ts | 12 ++++++++++++ 4 files changed, 27 insertions(+), 1 deletion(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 1619d7f58..072395dbe 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -4016,6 +4016,8 @@ To target all data streams use `*` or `_all`. ** *`data_retention` (Optional, string | -1 | 0)*: If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. +** *`downsampling` (Optional, { rounds })*: If defined, every backing index will execute the configured downsampling configuration after the backing +index is not the data stream write index anymore. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 2dcf18270..078b1c52d 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1233,7 +1233,7 @@ export default class Indices { async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['data_retention'] + const acceptedBody: string[] = ['data_retention', 'downsampling'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index 384ca440a..242ee9f69 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -9519,6 +9519,11 @@ export interface IndicesDataStreamIndex { export interface IndicesDataStreamLifecycle { data_retention?: Duration + downsampling?: IndicesDataStreamLifecycleDownsampling +} + +export interface IndicesDataStreamLifecycleDownsampling { + rounds: IndicesDownsamplingRound[] } export interface IndicesDataStreamLifecycleRolloverConditions { @@ -9536,6 +9541,7 @@ export interface IndicesDataStreamLifecycleRolloverConditions { export interface IndicesDataStreamLifecycleWithRollover { data_retention?: Duration + downsampling?: IndicesDataStreamLifecycleDownsampling rollover?: IndicesDataStreamLifecycleRolloverConditions } @@ -9551,6 +9557,11 @@ export interface IndicesDownsampleConfig { fixed_interval: DurationLarge } +export interface IndicesDownsamplingRound { + after: Duration + config: IndicesDownsampleConfig +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -10537,6 +10548,7 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { master_timeout?: Duration timeout?: Duration data_retention?: Duration + downsampling?: IndicesDataStreamLifecycleDownsampling } export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 94861b37d..2401a1d6d 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -9642,6 +9642,11 @@ export interface IndicesDataStreamIndex { export interface IndicesDataStreamLifecycle { data_retention?: Duration + downsampling?: IndicesDataStreamLifecycleDownsampling +} + +export interface IndicesDataStreamLifecycleDownsampling { + rounds: IndicesDownsamplingRound[] } export interface IndicesDataStreamLifecycleRolloverConditions { @@ -9659,6 +9664,7 @@ export interface IndicesDataStreamLifecycleRolloverConditions { export interface IndicesDataStreamLifecycleWithRollover { data_retention?: Duration + downsampling?: IndicesDataStreamLifecycleDownsampling rollover?: IndicesDataStreamLifecycleRolloverConditions } @@ -9674,6 +9680,11 @@ export interface IndicesDownsampleConfig { fixed_interval: DurationLarge } +export interface IndicesDownsamplingRound { + after: Duration + config: IndicesDownsampleConfig +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -10678,6 +10689,7 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { data_retention?: Duration + downsampling?: IndicesDataStreamLifecycleDownsampling } } From a4be77425eee057de771b24d2272bd69c1fae62c Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Thu, 31 Aug 2023 21:24:36 +0400 Subject: [PATCH 250/647] Use GitHub admonition for browser support warning (#1997) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9281aefc8..37e78a387 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,8 @@ npm install @elastic/elasticsearch@ #### Browser -WARNING: There is no official support for the browser environment. It exposes your Elasticsearch instance to everyone, which could lead to security issues. +> [!WARNING] +> There is no official support for the browser environment. It exposes your Elasticsearch instance to everyone, which could lead to security issues. We recommend that you write a lightweight proxy that uses this client instead, you can see a proxy example [here](./docs/examples/proxy). ## Documentation From 63b4b4c08a35109df3fd12c54783fe2875becd98 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 6 Sep 2023 03:58:51 +0930 Subject: [PATCH 251/647] Auto-generated code for main (#1998) --- docs/reference.asciidoc | 103 +++++++++++++++++++++--------------- src/api/types.ts | 17 ++++++ src/api/typesWithBodyKey.ts | 17 ++++++ 3 files changed, 93 insertions(+), 44 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 072395dbe..1ddd6e5f7 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -93,22 +93,34 @@ client.count({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of indices to restrict the results -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`analyzer` (Optional, string)*: The analyzer to use for the query string -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`min_score` (Optional, number)*: Include only documents with a specific `_score` value in the result -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`routing` (Optional, string)*: A list of specific routing values -** *`terminate_after` (Optional, number)*: The maximum count for each shard, upon reaching which the query execution will terminate early -** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams and indices, omit this parameter or use `*` or `_all`. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. +This parameter can only be used when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. +This parameter can only be used when the `q` query string parameter is specified. +** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +** *`min_score` (Optional, number)*: Sets the minimum `_score` value that documents must have to be included in the result. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. +If a query reaches this limit, Elasticsearch terminates the query early. +Elasticsearch collects documents before sorting. +** *`q` (Optional, string)*: Query in the Lucene query string syntax. [discrete] === create @@ -7156,9 +7168,9 @@ client.searchApplication.list({ ... }) ==== Arguments * *Request (object):* -** *`q` (Optional, string)*: Query in the Lucene query string syntax" -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: specifies a max number of results to get +** *`q` (Optional, string)*: Query in the Lucene query string syntax. +** *`from` (Optional, number)*: Starting offset. +** *`size` (Optional, number)*: Specifies a max number of results to get. [discrete] ==== post_behavioral_analytics_event @@ -7185,9 +7197,9 @@ client.searchApplication.put({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the search application to be created or updated +** *`name` (string)*: The name of the search application to be created or updated. ** *`search_application` (Optional, { name, indices, updated_at_millis, analytics_collection_name, template })* -** *`create` (Optional, boolean)*: If true, requires that a search application with the specified resource_id does not already exist. (default: false) +** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing Search Applications. [discrete] ==== put_behavioral_analytics @@ -7203,7 +7215,7 @@ client.searchApplication.putBehavioralAnalytics({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the analytics collection to be created or updated +** *`name` (string)*: The name of the analytics collection to be created or updated. [discrete] ==== render_query @@ -7230,8 +7242,8 @@ client.searchApplication.search({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the search application to be searched -** *`params` (Optional, Record)* +** *`name` (string)*: The name of the search application to be searched. +** *`params` (Optional, Record)*: Query parameters specific to this request, which will override any defaults specified in the template. [discrete] === searchable_snapshots @@ -8575,7 +8587,7 @@ client.sql.clearCursor({ cursor }) ==== Arguments * *Request (object):* -** *`cursor` (string)* +** *`cursor` (string)*: Cursor to clear. [discrete] ==== delete_async @@ -8591,7 +8603,7 @@ client.sql.deleteAsync({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The async search ID +** *`id` (string)*: Identifier for the search. [discrete] ==== get_async @@ -8607,7 +8619,7 @@ client.sql.getAsync({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The async search ID +** *`id` (string)*: Identifier for the search. ** *`delimiter` (Optional, string)*: Separator for CSV results. The API only supports this parameter for CSV responses. ** *`format` (Optional, string)*: Format for the response. You must specify a format using this parameter or the Accept HTTP header. If you specify both, the API uses this parameter. @@ -8630,7 +8642,7 @@ client.sql.getAsyncStatus({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The async search ID +** *`id` (string)*: Identifier for the search. [discrete] ==== query @@ -8648,13 +8660,15 @@ client.sql.query({ ... }) * *Request (object):* ** *`catalog` (Optional, string)*: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. ** *`columnar` (Optional, boolean)*: If true, the results in a columnar fashion: one row represents all the values of a certain column from the current page of results. -** *`cursor` (Optional, string)* +** *`cursor` (Optional, string)*: Cursor used to retrieve a set of paginated results. +If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. +It ignores other request body parameters. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional Elasticsearch query DSL for additional filtering. -** *`query` (Optional, string)*: SQL query to execute +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`query` (Optional, string)*: SQL query to run. ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. ** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. -** *`time_zone` (Optional, string)*: Time-zone in ISO 8601 used for executing the query on the server. More information available here. +** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. ** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. @@ -8663,7 +8677,7 @@ precedence over mapped fields with the same name. ** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search. ** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. ** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false. -** *`format` (Optional, string)*: a short version of the Accept header, e.g. json, yaml +** *`format` (Optional, string)*: Format for the response. [discrete] ==== translate @@ -8679,10 +8693,10 @@ client.sql.translate({ query }) ==== Arguments * *Request (object):* -** *`query` (string)* -** *`fetch_size` (Optional, number)* -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`time_zone` (Optional, string)* +** *`query` (string)*: SQL query to run. +** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. [discrete] === ssl @@ -8835,10 +8849,10 @@ client.tasks.cancel({ ... }) ==== Arguments * *Request (object):* -** *`task_id` (Optional, string | number)*: Cancel the task with specified task id (node_id:task_number) -** *`actions` (Optional, string | string[])*: A list of actions that should be cancelled. Leave empty to cancel all. -** *`nodes` (Optional, string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -** *`parent_task_id` (Optional, string)*: Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. +** *`task_id` (Optional, string | number)*: ID of the task. +** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. +** *`nodes` (Optional, string[])*: List of node IDs or names used to limit the request. +** *`parent_task_id` (Optional, string)*: Parent task ID used to limit the tasks. ** *`wait_for_completion` (Optional, boolean)*: Should the request block until the cancellation of the task and its descendant tasks is completed. Defaults to false [discrete] @@ -8855,9 +8869,10 @@ client.tasks.get({ task_id }) ==== Arguments * *Request (object):* -** *`task_id` (string)*: Return the task with specified id (node_id:task_number) -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_completion` (Optional, boolean)*: Wait for the matching tasks to complete (default: false) +** *`task_id` (string)*: ID of the task. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. [discrete] ==== list diff --git a/src/api/types.ts b/src/api/types.ts index 242ee9f69..cb43fd81e 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1988,14 +1988,28 @@ export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string +export type ClusterAlias = string + +export interface ClusterDetails { + status: ClusterSearchStatus + indices: string + took?: DurationValue + timed_out: boolean + _shards?: ShardStatistics + failures?: ShardFailure[] +} + export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[] +export type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' + export interface ClusterStatistics { skipped: integer successful: integer total: integer + details?: Record } export interface CompletionStats { @@ -6033,6 +6047,8 @@ export interface AsyncSearchAsyncSearchResponseBase { expiration_time_in_millis: EpochTime start_time?: DateTime start_time_in_millis: EpochTime + completion_time?: DateTime + completion_time_in_millis?: EpochTime } export interface AsyncSearchDeleteRequest extends RequestBase { @@ -6058,6 +6074,7 @@ export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { _shards: ShardStatistics + _clusters?: ClusterStatistics completion_status?: integer } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 2401a1d6d..c97c43834 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2061,14 +2061,28 @@ export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string +export type ClusterAlias = string + +export interface ClusterDetails { + status: ClusterSearchStatus + indices: string + took?: DurationValue + timed_out: boolean + _shards?: ShardStatistics + failures?: ShardFailure[] +} + export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[] +export type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' + export interface ClusterStatistics { skipped: integer successful: integer total: integer + details?: Record } export interface CompletionStats { @@ -6106,6 +6120,8 @@ export interface AsyncSearchAsyncSearchResponseBase { expiration_time_in_millis: EpochTime start_time?: DateTime start_time_in_millis: EpochTime + completion_time?: DateTime + completion_time_in_millis?: EpochTime } export interface AsyncSearchDeleteRequest extends RequestBase { @@ -6131,6 +6147,7 @@ export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { _shards: ShardStatistics + _clusters?: ClusterStatistics completion_status?: integer } From beaee47ca0c0db89c360385e7f330ba6767db983 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 7 Sep 2023 00:56:15 +0930 Subject: [PATCH 252/647] Auto-generated code for main (#2001) --- docs/reference.asciidoc | 93 ++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 42 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 1ddd6e5f7..bb88c67fc 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -38,17 +38,21 @@ client.bulk({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string)*: Default index for items which don't provide one +** *`index` (Optional, string)*: Name of the data stream, index, or index alias to perform bulk actions on. ** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* -** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. -** *`routing` (Optional, string)*: Specific routing value -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request -** *`_source_excludes` (Optional, string | string[])*: Default list of fields to exclude from the returned _source field, can be overridden on each sub-request -** *`_source_includes` (Optional, string | string[])*: Default list of fields to extract and return from the _source field, can be overridden on each sub-request -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) -** *`require_alias` (Optional, boolean)*: Sets require_alias for all incoming documents. Defaults to unset (false) +** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. +If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. +If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. +Valid values: `true`, `false`, `wait_for`. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`timeout` (Optional, string | -1 | 0)*: Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`require_alias` (Optional, boolean)*: If `true`, the request’s actions must target an index alias. [discrete] === clear_scroll @@ -63,7 +67,8 @@ client.clearScroll({ ... }) ==== Arguments * *Request (object):* -** *`scroll_id` (Optional, string | string[])*: A list of scroll IDs to clear +** *`scroll_id` (Optional, string | string[])*: List of scroll IDs to clear. +To clear all scroll IDs, use `_all`. [discrete] === close_point_in_time @@ -78,7 +83,7 @@ client.closePointInTime({ id }) ==== Arguments * *Request (object):* -** *`id` (string)* +** *`id` (string)*: The ID of the point-in-time. [discrete] === count @@ -623,20 +628,22 @@ client.mtermvectors({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string)*: The index in which the document resides. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])* -** *`ids` (Optional, string[])* -** *`fields` (Optional, string | string[])*: A list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`field_statistics` (Optional, boolean)*: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`offsets` (Optional, boolean)*: Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`payloads` (Optional, boolean)*: Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`positions` (Optional, boolean)*: Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) .Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`realtime` (Optional, boolean)*: Specifies if requests are real-time as opposed to near-real-time (default: true). -** *`routing` (Optional, string)*: Specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`term_statistics` (Optional, boolean)*: Specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -** *`version` (Optional, number)*: Explicit version number for concurrency control -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`index` (Optional, string)*: Name of the index that contains the documents. +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: Array of existing or artificial documents. +** *`ids` (Optional, string[])*: Simplified syntax to specify documents by their ID if they're in the same index. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. +Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. +** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. +** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`term_statistics` (Optional, boolean)*: If true, the response includes term frequency and document frequency. +** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. [discrete] === open_point_in_time @@ -1106,22 +1113,24 @@ client.termvectors({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The index in which the document resides. -** *`id` (Optional, string)*: The id of the document, when not specified a doc param should be supplied. -** *`doc` (Optional, object)*: A document. -** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })* -** *`per_field_analyzer` (Optional, Record)* -** *`fields` (Optional, string | string[])*: A list of fields to return. -** *`field_statistics` (Optional, boolean)*: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. -** *`offsets` (Optional, boolean)*: Specifies if term offsets should be returned. -** *`payloads` (Optional, boolean)*: Specifies if term payloads should be returned. -** *`positions` (Optional, boolean)*: Specifies if term positions should be returned. -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random). -** *`realtime` (Optional, boolean)*: Specifies if request is real-time as opposed to near-real-time (default: true). -** *`routing` (Optional, string)*: Specific routing value. -** *`term_statistics` (Optional, boolean)*: Specifies if total term frequency and document frequency should be returned. -** *`version` (Optional, number)*: Explicit version number for concurrency control -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`index` (string)*: Name of the index that contains the document. +** *`id` (Optional, string)*: Unique identifier of the document. +** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors. +** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. +** *`per_field_analyzer` (Optional, Record)*: Overrides the default per-field analyzer. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. +Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. +** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. +** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`term_statistics` (Optional, boolean)*: If `true`, the response includes term frequency and document frequency. +** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. [discrete] === update From ff0fb27ebe41274ed94e5ed976211dbf53aa2774 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 7 Sep 2023 23:57:59 +0930 Subject: [PATCH 253/647] Auto-generated code for main (#2002) --- docs/reference.asciidoc | 259 ++++++++++++++++++++++++---------------- 1 file changed, 156 insertions(+), 103 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index bb88c67fc..5a83622c9 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -142,16 +142,23 @@ client.create({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Document ID -** *`index` (string)*: The name of the index +** *`id` (string)*: Unique identifier for the document. +** *`index` (string)*: Name of the data stream or index to target. +If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. +If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. ** *`document` (Optional, object)*: A document. -** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. -** *`routing` (Optional, string)*: Specific routing value -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`version` (Optional, number)*: Explicit version number for concurrency control -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. +If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. +If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. +Valid values: `true`, `false`, `wait_for`. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: Period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. +** *`version` (Optional, number)*: Explicit version number for concurrency control. +The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] === delete @@ -166,16 +173,19 @@ client.delete({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: The document ID -** *`index` (string)*: The name of the index -** *`if_primary_term` (Optional, number)*: only perform the delete operation if the last operation that has changed the document has the specified primary term -** *`if_seq_no` (Optional, number)*: only perform the delete operation if the last operation that has changed the document has the specified sequence number -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. -** *`routing` (Optional, string)*: Specific routing value -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`version` (Optional, number)*: Explicit version number for concurrency control -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`id` (string)*: Unique identifier for the document. +** *`index` (string)*: Name of the target index. +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. +Valid values: `true`, `false`, `wait_for`. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for active shards. +** *`version` (Optional, number)*: Explicit version number for concurrency control. +The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] === delete_by_query @@ -190,38 +200,55 @@ client.deleteByQuery({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`max_docs` (Optional, number)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`slice` (Optional, { field, id, max })* -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`analyzer` (Optional, string)*: The analyzer to use for the query string -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do when the delete by query hits version conflicts? -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`index` (string | string[])*: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`max_docs` (Optional, number)*: The maximum number of documents to delete. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. +** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if delete by query hits version conflicts: `abort` or `proceed`. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. +** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`from` (Optional, number)*: Starting offset (default: 0) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? -** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to index level setting -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -1 means no throttle. -** *`routing` (Optional, string)*: A list of specific routing values -** *`q` (Optional, string)*: Query in the Lucene query string syntax -** *`scroll` (Optional, string | -1 | 0)*: Specify how long a consistent view of the index should be maintained for scrolled search -** *`scroll_size` (Optional, number)*: Size on the scroll request powering the delete by query -** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. Defaults to no timeout. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. -** *`sort` (Optional, string[])*: A list of : pairs -** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. -** *`timeout` (Optional, string | -1 | 0)*: Time each individual bulk request should wait for shards that are unavailable. -** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) -** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the delete by query is complete. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. +** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. +Defaults to the index-level setting. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`q` (Optional, string)*: Query in the Lucene query string syntax. +** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. +** *`scroll_size` (Optional, number)*: Size of the scroll request that powers the operation. +** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. +Defaults to no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. +Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. +** *`sort` (Optional, string[])*: A list of : pairs. +** *`stats` (Optional, string[])*: Specific `tag` of the request for logging and statistical purposes. +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. +If a query reaches this limit, Elasticsearch terminates the query early. +Elasticsearch collects documents before sorting. +Use with caution. +Elasticsearch applies this parameter to each shard handling the request. +When possible, let Elasticsearch perform early termination automatically. +Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`timeout` (Optional, string | -1 | 0)*: Period each deletion request waits for active shards. +** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. [discrete] === delete_by_query_rethrottle @@ -236,8 +263,8 @@ client.deleteByQueryRethrottle({ task_id }) ==== Arguments * *Request (object):* -** *`task_id` (string | number)*: The task id to rethrottle -** *`requests_per_second` (Optional, float)*: The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. +** *`task_id` (string | number)*: The ID for the task. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. [discrete] === delete_script @@ -252,9 +279,11 @@ client.deleteScript({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Script ID -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`id` (string)*: Identifier for the stored script or search template. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] === exists @@ -402,7 +431,7 @@ client.getScript({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Script ID +** *`id` (string)*: Identifier for the stored script or search template. ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master [discrete] @@ -659,11 +688,14 @@ client.openPointInTime({ index, keep_alive }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices -** *`keep_alive` (string | -1 | 0)*: Specific the time to live for the point in time -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`routing` (Optional, string)*: Specific routing value -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`keep_alive` (string | -1 | 0)*: Extends the time to live of the corresponding point in time. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] === ping @@ -688,11 +720,15 @@ client.putScript({ id, script }) ==== Arguments * *Request (object):* -** *`id` (string)*: Script ID -** *`script` ({ lang, options, source })* -** *`context` (Optional, string)*: Script context -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`id` (string)*: Identifier for the stored script or search template. +Must be unique within the cluster. +** *`script` ({ lang, options, source })*: Contains the script or search template, its parameters, and its language. +** *`context` (Optional, string)*: Context in which the script or search template should run. +To prevent errors, the API immediately compiles the script or template in this context. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] === rank_eval @@ -796,9 +832,9 @@ client.scriptsPainlessExecute({ ... }) ==== Arguments * *Request (object):* -** *`context` (Optional, string)* -** *`context_setup` (Optional, { document, index, query })* -** *`script` (Optional, { lang, options, source })* +** *`context` (Optional, string)*: The context that the script should run in. +** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. +** *`script` (Optional, { lang, options, source })*: The Painless script to execute. [discrete] === scroll @@ -1189,40 +1225,57 @@ client.updateByQuery({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`max_docs` (Optional, number)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`script` (Optional, { lang, options, source } | { id })* -** *`slice` (Optional, { field, id, max })* -** *`conflicts` (Optional, Enum("abort" | "proceed"))* -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`analyzer` (Optional, string)*: The analyzer to use for the query string -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`index` (string | string[])*: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`max_docs` (Optional, number)*: The maximum number of documents to update. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. +** *`script` (Optional, { lang, options, source } | { id })*: The script to run to update the document source or metadata when updating. +** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. +** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`from` (Optional, number)*: Starting offset (default: 0) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`pipeline` (Optional, string)*: Ingest pipeline to set on index requests made by this action. (default: none) -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`refresh` (Optional, boolean)*: Should the affected indexes be refreshed? -** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to index level setting -** *`requests_per_second` (Optional, float)*: The throttle to set on this request in sub-requests per second. -1 means no throttle. -** *`routing` (Optional, string)*: A list of specific routing values -** *`scroll` (Optional, string | -1 | 0)*: Specify how long a consistent view of the index should be maintained for scrolled search -** *`scroll_size` (Optional, number)*: Size on the scroll request powering the update by query -** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. Defaults to no timeout. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. -** *`sort` (Optional, string[])*: A list of : pairs -** *`stats` (Optional, string[])*: Specific 'tag' of the request for logging and statistical purposes -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. -** *`timeout` (Optional, string | -1 | 0)*: Time each individual bulk request should wait for shards that are unavailable. -** *`version` (Optional, boolean)*: Specify whether to return document version as part of a hit +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. +If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. +If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. +** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. +** *`scroll_size` (Optional, number)*: Size of the scroll request that powers the operation. +** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. +** *`sort` (Optional, string[])*: A list of : pairs. +** *`stats` (Optional, string[])*: Specific `tag` of the request for logging and statistical purposes. +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. +If a query reaches this limit, Elasticsearch terminates the query early. +Elasticsearch collects documents before sorting. +Use with caution. +Elasticsearch applies this parameter to each shard handling the request. +When possible, let Elasticsearch perform early termination automatically. +Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`timeout` (Optional, string | -1 | 0)*: Period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. +** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. ** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) -** *`wait_for_completion` (Optional, boolean)*: Should the request should block until the update by query operation is complete. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. [discrete] === update_by_query_rethrottle @@ -1237,8 +1290,8 @@ client.updateByQueryRethrottle({ task_id }) ==== Arguments * *Request (object):* -** *`task_id` (string)*: The task id to rethrottle -** *`requests_per_second` (Optional, float)*: The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. +** *`task_id` (string)*: The ID for the task. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. [discrete] === async_search From 9e47fe2d3edc0ee76d7251da2721053c479502d0 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Sat, 9 Sep 2023 03:40:13 +0930 Subject: [PATCH 254/647] Auto-generated code for main (#2004) --- docs/reference.asciidoc | 209 ++++++++++++++++++++++++---------------- 1 file changed, 128 insertions(+), 81 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 5a83622c9..c9ef99a1a 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -298,18 +298,23 @@ client.exists({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: The document ID -** *`index` (string)*: The name of the index -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`realtime` (Optional, boolean)*: Specify whether to perform the operation in realtime or search mode -** *`refresh` (Optional, boolean)*: Refresh the shard containing the document before performing the operation -** *`routing` (Optional, string)*: Specific routing value -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return -** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field -** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response -** *`version` (Optional, number)*: Explicit version number for concurrency control -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`id` (string)*: Identifier of the document. +** *`index` (string)*: List of data streams, indices, and aliases. +Supports wildcards (`*`). +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. +** *`routing` (Optional, string)*: Target the specified primary shard. +** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. +If no fields are specified, no stored fields are included in the response. +If this field is specified, the `_source` parameter defaults to false. +** *`version` (Optional, number)*: Explicit version number for concurrency control. +The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. [discrete] === exists_source @@ -324,17 +329,20 @@ client.existsSource({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: The document ID -** *`index` (string)*: The name of the index -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`realtime` (Optional, boolean)*: Specify whether to perform the operation in realtime or search mode -** *`refresh` (Optional, boolean)*: Refresh the shard containing the document before performing the operation -** *`routing` (Optional, string)*: Specific routing value -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return -** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field -** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`version` (Optional, number)*: Explicit version number for concurrency control -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type +** *`id` (string)*: Identifier of the document. +** *`index` (string)*: List of data streams, indices, and aliases. +Supports wildcards (`*`). +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. +** *`routing` (Optional, string)*: Target the specified primary shard. +** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`version` (Optional, number)*: Explicit version number for concurrency control. +The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. [discrete] === explain @@ -349,21 +357,24 @@ client.explain({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: The document ID -** *`index` (string)*: The name of the index -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* -** *`analyzer` (Optional, string)*: The analyzer for the query string query -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false) -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The default field for query string query (default: _all) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`routing` (Optional, string)*: Specific routing value -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return -** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field -** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response -** *`q` (Optional, string)*: Query in the Lucene query string syntax +** *`id` (string)*: Defines the document ID. +** *`index` (string)*: Index names used to limit the request. +Only a single index name can be provided to this parameter. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. +** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response. +** *`q` (Optional, string)*: Query in the Lucene query string syntax. [discrete] === field_caps @@ -408,13 +419,15 @@ client.get({ id, index }) ** *`id` (string)*: Unique identifier of the document. ** *`index` (string)*: Name of the index that contains the document. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`realtime` (Optional, boolean)*: Boolean) If true, the request is real-time as opposed to near-real-time. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. ** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. ** *`routing` (Optional, string)*: Target the specified primary shard. ** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return. ** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. ** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. +If no fields are specified, no stored fields are included in the response. +If this field is specified, the `_source` parameter defaults to false. ** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. ** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: internal, external, external_gte. @@ -511,20 +524,30 @@ client.index({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the index -** *`id` (Optional, string)*: Document ID +** *`index` (string)*: Name of the data stream or index to target. +** *`id` (Optional, string)*: Unique identifier for the document. ** *`document` (Optional, object)*: A document. -** *`if_primary_term` (Optional, number)*: only perform the index operation if the last operation that has changed the document has the specified primary term -** *`if_seq_no` (Optional, number)*: only perform the index operation if the last operation that has changed the document has the specified sequence number -** *`op_type` (Optional, Enum("index" | "create"))*: Explicit operation type. Defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID -** *`pipeline` (Optional, string)*: The pipeline id to preprocess incoming documents with -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. -** *`routing` (Optional, string)*: Specific routing value -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`version` (Optional, number)*: Explicit version number for concurrency control -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) -** *`require_alias` (Optional, boolean)*: When true, requires destination to be an alias. Default is false +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`op_type` (Optional, Enum("index" | "create"))*: Set to create to only index the document if it does not already exist (put if absent). +If a document with the specified `_id` already exists, the indexing operation will fail. +Same as using the `/_create` endpoint. +Valid values: `index`, `create`. +If document id is specified, it defaults to `index`. +Otherwise, it defaults to `create`. +** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. +If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. +If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. +Valid values: `true`, `false`, `wait_for`. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: Period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. +** *`version` (Optional, number)*: Explicit version number for concurrency control. +The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. [discrete] === info @@ -636,13 +659,17 @@ client.msearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names to use as default +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams and indices, omit this parameter or use `*`. ** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* -** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution -** *`max_concurrent_searches` (Optional, number)*: Controls the maximum number of concurrent searches the multi search api will execute -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. +** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the API can run. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. +Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. +If `false`, it returns `hits.total` as an object. +** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. [discrete] === mtermvectors @@ -798,8 +825,8 @@ client.reindexRethrottle({ task_id }) ==== Arguments * *Request (object):* -** *`task_id` (string)*: The task id to rethrottle -** *`requests_per_second` (Optional, float)*: The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. +** *`task_id` (string)*: Identifier for the task. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. [discrete] === render_search_template @@ -814,10 +841,16 @@ client.renderSearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: The id of the stored search template +** *`id` (Optional, string)*: ID of the search template to render. +If no `source` is specified, this or the `id` request body parameter is required. ** *`file` (Optional, string)* -** *`params` (Optional, Record)* -** *`source` (Optional, string)* +** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. +The key is the variable name. +The value is the variable value. +** *`source` (Optional, string)*: An inline search template. +Supports the same parameters as the search API's request body. +These parameters also support Mustache variables. +If no `id` or `` is specified, this parameter is required. [discrete] === scripts_painless_execute @@ -1070,13 +1103,19 @@ client.searchShards({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`routing` (Optional, string)*: Specific routing value +** *`index` (Optional, string | string[])*: Returns the indices and shards that a search request would be executed against. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. [discrete] === search_template @@ -1093,26 +1132,34 @@ client.searchTemplate({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (*). -** *`explain` (Optional, boolean)* +** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. ** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, this parameter is required. -** *`params` (Optional, Record)* -** *`profile` (Optional, boolean)* +** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. +The key is the variable name. +The value is the variable value. +** *`profile` (Optional, boolean)*: If `true`, the query execution is profiled. ** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`ccs_minimize_roundtrips` (Optional, boolean)*: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. +Random by default. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. ** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are rendered as an integer in the response. -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. [discrete] === terms_enum From 57e84a911432db195f7f3a93b36f6f921b5d2553 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 12 Sep 2023 11:38:58 -0500 Subject: [PATCH 255/647] Upgrade transport: skip adding new nodes that aren't ready yet (#1995) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 753cf56b7..29388a7af 100644 --- a/package.json +++ b/package.json @@ -82,7 +82,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.3.3", + "@elastic/transport": "^8.3.4", "tslib": "^2.4.0" }, "tap": { From 59caa7be59c0bda1d95e60495955a92b974dd0cf Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 13 Sep 2023 15:53:09 -0400 Subject: [PATCH 256/647] [DOCS] Fix `welcome-to-elastic` link (#2007) --- docs/index-custom-title-page.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index-custom-title-page.html b/docs/index-custom-title-page.html index 804b36782..3efa92c00 100644 --- a/docs/index-custom-title-page.html +++ b/docs/index-custom-title-page.html @@ -159,7 +159,7 @@

    - +

    - +

    From 10f7ba75a3de89990798d27ced47da1caef0b264 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 21 Sep 2023 14:37:08 -0500 Subject: [PATCH 257/647] Auto-generated code for main (#2015) --- docs/reference.asciidoc | 4 ++-- src/api/api/query_ruleset.ts | 13 ++++++++++--- src/api/types.ts | 8 ++++---- src/api/typesWithBodyKey.ts | 12 +++++++----- 4 files changed, 23 insertions(+), 14 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index c9ef99a1a..ed0ac8f9f 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -7020,7 +7020,7 @@ Creates or updates a query ruleset. {ref}/put-query-ruleset.html[Endpoint documentation] [source,ts] ---- -client.queryRuleset.put({ ruleset_id }) +client.queryRuleset.put({ ruleset_id, rules }) ---- [discrete] @@ -7028,7 +7028,7 @@ client.queryRuleset.put({ ruleset_id }) * *Request (object):* ** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated -** *`query_ruleset` (Optional, { ruleset_id, rules })* +** *`rules` ({ rule_id, type, criteria, actions }[])* [discrete] === rollup diff --git a/src/api/api/query_ruleset.ts b/src/api/api/query_ruleset.ts index 01ee774ae..771205b0e 100644 --- a/src/api/api/query_ruleset.ts +++ b/src/api/api/query_ruleset.ts @@ -131,15 +131,22 @@ export default class QueryRuleset { async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['query_ruleset'] + const acceptedBody: string[] = ['rules'] const querystring: Record = {} // @ts-expect-error - let body: any = params.body ?? undefined + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - body = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { diff --git a/src/api/types.ts b/src/api/types.ts index cb43fd81e..bb6172874 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -3240,8 +3240,8 @@ export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBuc export interface AggregationsFrequentItemSetsField { field: Field - exclude?: string | string[] - include?: string | string[] + exclude?: AggregationsTermsExclude + include?: AggregationsTermsInclude } export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' @@ -3856,7 +3856,7 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck field?: Field filter_duplicate_text?: boolean gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: string | string[] + include?: AggregationsTermsInclude jlh?: EmptyObject min_doc_count?: long mutual_information?: AggregationsMutualInformationHeuristic @@ -15279,7 +15279,7 @@ export interface QueryRulesetListResponse { export interface QueryRulesetPutRequest extends RequestBase { ruleset_id: Id - query_ruleset?: QueryRulesetQueryRuleset + rules: QueryRulesetQueryRule[] } export interface QueryRulesetPutResponse { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index c97c43834..9baaacbc2 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -3313,8 +3313,8 @@ export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBuc export interface AggregationsFrequentItemSetsField { field: Field - exclude?: string | string[] - include?: string | string[] + exclude?: AggregationsTermsExclude + include?: AggregationsTermsInclude } export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' @@ -3929,7 +3929,7 @@ export interface AggregationsSignificantTextAggregation extends AggregationsBuck field?: Field filter_duplicate_text?: boolean gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: string | string[] + include?: AggregationsTermsInclude jlh?: EmptyObject min_doc_count?: long mutual_information?: AggregationsMutualInformationHeuristic @@ -15574,8 +15574,10 @@ export interface QueryRulesetListResponse { export interface QueryRulesetPutRequest extends RequestBase { ruleset_id: Id - /** @deprecated The use of the 'body' key has been deprecated, use 'query_ruleset' instead. */ - body?: QueryRulesetQueryRuleset + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + rules: QueryRulesetQueryRule[] + } } export interface QueryRulesetPutResponse { From 1a9f1003fbf6dbf2d32b0b1b6cecb7bf154a582d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 25 Sep 2023 11:47:48 -0500 Subject: [PATCH 258/647] Update CONTRIBUTING (#2016) --- CONTRIBUTING.md | 60 ++++++++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 13a6bb39a..2ac7f14dd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ improving the documentation, submitting bug reports and feature requests or writing code. ## Repository structure -The `master` branch is considered unstable, and it's compatible with Elasticsearch master. Unless you are patching an issue, new features should always be sent to the `master` branch, in case of a bugfix, it depends if the bug affects all the release lines.
    +The `main` branch is considered unstable, and it's compatible with Elasticsearch main. Unless you are patching an issue, new features should always be sent to the `main` branch, in case of a bugfix, it depends if the bug affects all the release lines.
    There is a branch for every supported release line, such as `7.x` or `6.x`. We release bugfixes as soon as possible, while minor and major releases are published at the same time of the Elastic Stack. Usually for every release line there will be a *published* version and a *next* version. Eg: the `7.x` branch contains the version published on npm, and bugfixes should be sent there, while `7.2` *(assuming that 7.1.x is released)* contains the next version, and new features should be sent there. @@ -31,7 +31,7 @@ Once your changes are ready to submit for review: 1. Test your changes Run the test suite to make sure that nothing is broken. - Usually run `npm test` is enough, our CI will take care of running the integration test. If you want to run the integration test yourself, see the *Testing* section below. + Usually running `npm test` is enough; our CI will take care of running the integration tests. If you want to run the integration tests yourself, see [the *Testing* section](#testing) below. 2. Submit a pull request @@ -58,36 +58,50 @@ Once your changes are ready to submit for review: ### Code generation -The entire content of the API folder is generated as well as the `docs/reference.asciidoc` file.
    -If you want to run the code generation you should run the following command: -```sh -node scripts/generate --tag -# or -node scripts/generate --branch -``` -Then you should copy the content of `api/generated.d.ts` into the `index.d.ts` file *(automate this step would be a nice pr!)*. +The entire content of the `src/api/` directory is automatically generated from [the Elasticsearch specification](https://github.com/elastic/elasticsearch-specification), as is the `docs/reference.asciidoc` file. +This code generation is done using a separate repository that is not currently available to the public. + +If you find discrepancies between this client's API code and what you see when actually interacting with an Elasticsearch API, you can open a pull request here to fix it. +For API fixes, it's likely a change will need to be made to the specification as well, to ensure your fix is not undone by the code generation process. +We will do our best to make sure this is addressed when reviewing and merging your changes. + +PRs to improve the specification are also welcome! +It is implemented in TypeScript, so JavaScript devs should be able to understand it fairly easily. +Spec fixes are particularly helpful, as they will be reflected in ALL official Elasticsearch clients, not just this one. ### Testing -There are different test scripts, usually during development you only need to run `npm test`, but if you want you can run just a part of the suite, following you will find all the testing scripts and what they do. + +There are a few different test scripts. +Usually during development you only need to run `npm test`, but if you want you can run just a part of the suite: | Script | Description | |---|---| | `npm run test:unit` | Runs the content of the `test/unit` folder. | -| `npm run test:behavior` | Runs the content of the `test/behavior` folder. | -| `npm run test:types` | Runs the content of the `test/types` folder. | -| `npm run test:unit -- --cov --coverage-report=html` | Runs the content of the `test/unit` folder and calculates the code coverage. | -| `npm run test:integration` | Runs the integration test runner.
    *Note: it requires a living instance of Elasticsearch.* | -| `npm run lint` | Run the [linter](https://standardjs.com/). | -| `npm run lint:fix` | Fixes the lint errors. | -| `npm test` | Runs lint, unit, behavior, and types test. | +| `npm run test:coverage-100` | Runs unit tests enforcing 100% coverage. | +| `npm run test:coverage-report` | Runs unit tests and generates an `lcov` coverage report. | +| `npm run test:coverage-ui` | Runs unit tests and generates an HTML coverage report. | +| `npm run test:integration` | Runs the integration test runner.
    **Note: requires a living instance of Elasticsearch.** | +| `npm run lint` | Run the [linter](https://github.com/standard/ts-standard). | +| `npm run lint:fix` | Fixes linter errors. | +| `npm run license-checker` | Checks that all dependencies have acceptable open source licenses. | + +| `npm test` | Runs `lint` and `test:unit`. | #### Integration test -The integration test are generated on the fly by the runner you will find inside `test/integration`, once you execute it, it will clone the Elasticsearch repository and checkout the correct version to grab the [OSS yaml files](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/test) and the [Elastic licensed yaml files](https://github.com/elastic/elasticsearch/tree/master/x-pack/plugin/src/test/resources/rest-api-spec/test) that will be used for generating the test. -Usually this step is executed by CI since it takes some time, but you can easily run this yourself! Just follow this steps: -1. Boot an Elasticsearch instance, you can do that by running `./scripts/es-docker.sh` or `./scripts/es-docker-platinum.sh`, the first one will work only with the OSS APIs, while the second will work also with the Elastic licensed APIs; -1. If you are running the OSS test, you should use `npm run test:integration`, otherwise use `TEST_ES_SERVER=https://elastic:changeme@localhost:9200 npm run test:integration`. You can also pass a `-b` parameter if you want the test to bail out at the first failure: `npm run test:integration -- -b`; -1. Grab a coffee, it will take some time ;) +The integration tests are generated on the fly by the runner you will find inside `test/integration`. +Once you execute it, it will fetch the [YAML REST test files](https://github.com/elastic/elasticsearch/tree/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test) from our artifacts API. +These are used to generate the integration tests. + +Usually this step is executed by CI since it takes some time, but you can easily run this yourself! +Just follow this steps: +1. Boot a fresh Elasticsearch instance, which can be done in a Docker container by running `STACK_VERSION=8.10.0 DETACH=true .buildkite/run-elasticsearch.sh`, where `STACK_VERSION` and `DETACH` environment variables can be adjusted to your needs. A `TEST_SUITE` env var can also be set to `free` or `platinum`, and defaults to `free`. +1. Run `npm run test:integration` to run the whole suite, or `npm run test:integration -- --bail` to stop after the first failure. +1. Grab a coffee, it will take some time. ;) + +This suite is very large, and not all tests will pass. +This is fine. +This suite is mostly used to identify notable changes in success/fail rate over time as we make changes to the client. ### Releasing From 29960d84a8274511da2529f6175bc42daf13e426 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Sep 2023 15:25:45 -0500 Subject: [PATCH 259/647] Update changelog for 8.10.0 (#2021) --- docs/changelog.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 3e339a3f7..b82c397da 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,18 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.10.0 + +[discrete] +=== Features + +[discrete] +===== Support for Elasticsearch `v8.10.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.10/release-notes-8.10.0.html[here]. + [discrete] === 8.9.1 From 65580b0a2d59179ea9d63be315051588d0b25ab1 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 28 Sep 2023 11:31:41 -0500 Subject: [PATCH 260/647] Auto-generated code for main (#2025) --- docs/reference.asciidoc | 4 ++-- src/api/types.ts | 1 + src/api/typesWithBodyKey.ts | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index ed0ac8f9f..56d17f6db 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -917,7 +917,7 @@ If `false`, the response does not include the total number of hits matching the ** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. ** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity }[])*: Defines the approximate kNN search to run. ** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. ** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. @@ -1429,7 +1429,7 @@ Defaults to 10,000 hits. ** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. ** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter }[])*: Defines the approximate kNN search to run. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity }[])*: Defines the approximate kNN search to run. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. ** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* diff --git a/src/api/types.ts b/src/api/types.ts index bb6172874..7748aa7e7 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2271,6 +2271,7 @@ export interface KnnQuery { num_candidates: long boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + similarity?: float } export interface LatLonGeoLocation { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 9baaacbc2..8b077f0f6 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2344,6 +2344,7 @@ export interface KnnQuery { num_candidates: long boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + similarity?: float } export interface LatLonGeoLocation { From 85fe814f796ca105f31358dc83cba3038fb08132 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 4 Oct 2023 09:21:33 -0500 Subject: [PATCH 261/647] Auto-generated code for main (#2028) --- docs/reference.asciidoc | 7 +++++++ src/api/api/ml.ts | 2 +- src/api/types.ts | 1 + src/api/typesWithBodyKey.ts | 1 + 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 56d17f6db..eee72b07c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -6302,6 +6302,13 @@ ELSER the config is not required. ** *`model_size_bytes` (Optional, number)*: The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. +** *`platform_architecture` (Optional, string)*: The platform architecture (if applicable) of the trained mode. If the model +only works on one platform, because it is heavily optimized for a particular +processor architecture and OS combination, then this field specifies which. +The format of the string must match the platform identifiers used by Elasticsearch, +so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, +or `windows-x86_64`. For portable models (those that work independent of processor +architecture or OS features), leave this field unset. ** *`tags` (Optional, string[])*: An array of tags to organize the model. ** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 27ccefdb9..ffebde954 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1821,7 +1821,7 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'tags'] + const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index 7748aa7e7..c879844ca 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -13977,6 +13977,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { metadata?: any model_type?: MlTrainedModelType model_size_bytes?: long + platform_architecture?: string tags?: string[] } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 8b077f0f6..619658ce9 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -14233,6 +14233,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { metadata?: any model_type?: MlTrainedModelType model_size_bytes?: long + platform_architecture?: string tags?: string[] } } From 186692c4b62be3f65c7f1532d546008b57e788ac Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 17 Oct 2023 05:03:29 +1100 Subject: [PATCH 262/647] Auto-generated code for main (#2030) --- docs/reference.asciidoc | 4 ++-- src/api/api/logstash.ts | 9 +++++---- src/api/types.ts | 7 +++++-- src/api/typesWithBodyKey.ts | 7 +++++-- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index eee72b07c..92db3bbab 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -4942,14 +4942,14 @@ Retrieves Logstash Pipelines used by Central Management {ref}/logstash-api-get-pipeline.html[Endpoint documentation] [source,ts] ---- -client.logstash.getPipeline({ id }) +client.logstash.getPipeline({ ... }) ---- [discrete] ==== Arguments * *Request (object):* -** *`id` (string | string[])*: List of pipeline identifiers. +** *`id` (Optional, string | string[])*: List of pipeline identifiers. [discrete] ==== put_pipeline diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 0b85093de..9367e308a 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -73,14 +73,15 @@ export default class Logstash { * Retrieves Logstash Pipelines used by Central Management * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-get-pipeline.html | Elasticsearch API documentation} */ - async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise - async getPipeline (this: That, params: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined + params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue diff --git a/src/api/types.ts b/src/api/types.ts index c879844ca..9cd35fef9 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2009,6 +2009,9 @@ export interface ClusterStatistics { skipped: integer successful: integer total: integer + running: integer + partial: integer + failed: integer details?: Record } @@ -4930,7 +4933,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'match_only_text' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -11809,7 +11812,7 @@ export interface LogstashDeletePipelineRequest extends RequestBase { export type LogstashDeletePipelineResponse = boolean export interface LogstashGetPipelineRequest extends RequestBase { - id: Ids + id?: Ids } export type LogstashGetPipelineResponse = Record diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 619658ce9..1afe734f8 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2082,6 +2082,9 @@ export interface ClusterStatistics { skipped: integer successful: integer total: integer + running: integer + partial: integer + failed: integer details?: Record } @@ -5003,7 +5006,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'match_only_text' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -11989,7 +11992,7 @@ export interface LogstashDeletePipelineRequest extends RequestBase { export type LogstashDeletePipelineResponse = boolean export interface LogstashGetPipelineRequest extends RequestBase { - id: Ids + id?: Ids } export type LogstashGetPipelineResponse = Record From f7f1d7c52e8e820002b2dc58f6f641a5f47306e7 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 19 Oct 2023 02:08:56 +1100 Subject: [PATCH 263/647] Auto-generated code for main (#2033) --- src/api/types.ts | 7 +++++++ src/api/typesWithBodyKey.ts | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/src/api/types.ts b/src/api/types.ts index 9cd35fef9..9d9176873 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2286,6 +2286,8 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' +export type ManagedBy = 'Index Lifecycle Management' | 'Data Stream Lifecycle' | 'Unmanaged' + export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -9523,6 +9525,8 @@ export interface IndicesDataStream { generation: integer hidden: boolean ilm_policy?: Name + next_generation_managed_by: ManagedBy + prefer_ilm: boolean indices: IndicesDataStreamIndex[] lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName @@ -9536,6 +9540,9 @@ export interface IndicesDataStream { export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid + ilm_policy?: Name + managed_by: ManagedBy + prefer_ilm: boolean } export interface IndicesDataStreamLifecycle { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 1afe734f8..7016b27c3 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2359,6 +2359,8 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' +export type ManagedBy = 'Index Lifecycle Management' | 'Data Stream Lifecycle' | 'Unmanaged' + export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -9646,6 +9648,8 @@ export interface IndicesDataStream { generation: integer hidden: boolean ilm_policy?: Name + next_generation_managed_by: ManagedBy + prefer_ilm: boolean indices: IndicesDataStreamIndex[] lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName @@ -9659,6 +9663,9 @@ export interface IndicesDataStream { export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid + ilm_policy?: Name + managed_by: ManagedBy + prefer_ilm: boolean } export interface IndicesDataStreamLifecycle { From 78cc262939d18195c00e229bf15854f435138e1f Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 24 Oct 2023 04:46:35 +1100 Subject: [PATCH 264/647] Auto-generated code for main (#2034) --- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index 9d9176873..5f1885b02 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2286,7 +2286,7 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' -export type ManagedBy = 'Index Lifecycle Management' | 'Data Stream Lifecycle' | 'Unmanaged' +export type ManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' export type MapboxVectorTiles = ArrayBuffer diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 7016b27c3..993be2b15 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2359,7 +2359,7 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' -export type ManagedBy = 'Index Lifecycle Management' | 'Data Stream Lifecycle' | 'Unmanaged' +export type ManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' export type MapboxVectorTiles = ArrayBuffer From a28a6a3654b84808786c3e031e9b4e65d4e2f8f4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 23 Oct 2023 14:05:39 -0500 Subject: [PATCH 265/647] Improve proxy configuration docs (#2035) --- docs/connecting.asciidoc | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 239eea79e..dab9bc4c6 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -539,11 +539,17 @@ If you need to pass through an http(s) proxy for connecting to {es}, the client out of the box offers a handy configuration for helping you with it. Under the hood, it uses the https://github.com/delvedor/hpagent[`hpagent`] module. +In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. +To use a proxy, you will need to use the `HttpConnection` class from `@elastic/transport` instead. + [source,js] ---- +import { HttpConnection } from '@elastic/transport' + const client = new Client({ node: 'http://localhost:9200/', - proxy: 'http://localhost:8080/' + proxy: 'http://localhost:8080/', + Connection: HttpConnection, }) ---- @@ -553,11 +559,12 @@ Basic authentication is supported as well: ---- const client = new Client({ node: 'http://localhost:9200/', - proxy: 'http:user:pwd@//localhost:8080' + proxy: 'http:user:pwd@//localhost:8080', + Connection: HttpConnection, }) ---- -If you are connecting through a not http(s) proxy, such as a `socks5` or `pac`, +If you are connecting through a non-http(s) proxy, such as a `socks5` or `pac`, you can use the `agent` option to configure it. [source,js] @@ -567,7 +574,8 @@ const client = new Client({ node: 'http://localhost:9200/', agent () { return new SocksProxyAgent('socks://127.0.0.1:1080') - } + }, + Connection: HttpConnection, }) ---- From d194a220ea1a7772b27c2f3269f90c7aa4709619 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 24 Oct 2023 12:22:31 -0500 Subject: [PATCH 266/647] Fix backport action (#2036) This should allow us to backport PRs from forks, and gets the action on a newer commit. --- .github/workflows/backport.yml | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 906f1474b..56d4f328a 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -1,16 +1,27 @@ +--- name: Backport on: - pull_request: + pull_request_target: types: - closed - labeled jobs: backport: - runs-on: ubuntu-latest name: Backport + runs-on: ubuntu-latest + # Only react to merged PRs for security reasons. + # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target. + if: > + github.event.pull_request.merged + && ( + github.event.action == 'closed' + || ( + github.event.action == 'labeled' + && contains(github.event.label.name, 'backport') + ) + ) steps: - - name: Backport - uses: tibdex/backport@7005ef85c4562bc23b0e9b4a9940d5922f439750 + - uses: tibdex/backport@9565281eda0731b1d20c4025c43339fb0a23812e # v2.0.4 with: github_token: ${{ secrets.GITHUB_TOKEN }} From f7ed0394bfac6adfaaf8b9bd1b4d2ed67c1db554 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 24 Oct 2023 15:32:27 -0500 Subject: [PATCH 267/647] Add important flag to proxy config docs (#2040) --- docs/connecting.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index dab9bc4c6..5e1bd2366 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -539,7 +539,7 @@ If you need to pass through an http(s) proxy for connecting to {es}, the client out of the box offers a handy configuration for helping you with it. Under the hood, it uses the https://github.com/delvedor/hpagent[`hpagent`] module. -In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. +IMPORTANT: In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. To use a proxy, you will need to use the `HttpConnection` class from `@elastic/transport` instead. [source,js] From 84a93c2ebfd62400d9e4b479b32dff3720f11fee Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 24 Oct 2023 15:40:00 -0500 Subject: [PATCH 268/647] Skip unit tests for PRs where code did not change (#2041) --- .github/workflows/nodejs.yml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index ca00cb372..6443f0ee8 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -1,7 +1,19 @@ --- name: Node CI -on: [push, pull_request] +on: + push: + paths-ignore: &ignore + - '**/*.md' + - '**/*.asciidoc' + - '**/*.txt' + - 'docs/**' + - '.ci/**' + - '.buildkite/**' + - 'scripts/**' + - 'catalog-info.yaml' + pull_request: + paths-ignore: *ignore jobs: test: From dc7d603f4734f1e9cfff8882f0f462934f1de9e0 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 31 Oct 2023 11:53:22 -0500 Subject: [PATCH 269/647] Fix unit test workflow (#2045) --- .github/workflows/nodejs.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 6443f0ee8..4fe2b1236 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -3,7 +3,7 @@ name: Node CI on: push: - paths-ignore: &ignore + paths-ignore: - '**/*.md' - '**/*.asciidoc' - '**/*.txt' @@ -13,7 +13,15 @@ on: - 'scripts/**' - 'catalog-info.yaml' pull_request: - paths-ignore: *ignore + paths-ignore: + - '**/*.md' + - '**/*.asciidoc' + - '**/*.txt' + - 'docs/**' + - '.ci/**' + - '.buildkite/**' + - 'scripts/**' + - 'catalog-info.yaml' jobs: test: From a6392ebe07c3d2884a725d024c7d237dd99cbd98 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 2 Nov 2023 03:14:57 +1100 Subject: [PATCH 270/647] Auto-generated code for main (#2047) --- docs/reference.asciidoc | 1 + src/api/api/ml.ts | 2 +- src/api/types.ts | 1 + src/api/typesWithBodyKey.ts | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 92db3bbab..0c5563289 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -6370,6 +6370,7 @@ client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) ** *`model_id` (string)*: The unique identifier of the trained model. ** *`vocabulary` (string[])*: The model vocabulary, which must not be empty. ** *`merges` (Optional, string[])*: The optional model merges if required by the tokenizer. +** *`scores` (Optional, number[])*: The optional vocabulary value scores if required by the tokenizer. [discrete] ==== reset_job diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index ffebde954..e0f57a2ad 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1923,7 +1923,7 @@ export default class Ml { async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['vocabulary', 'merges'] + const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index 5f1885b02..13b4c40d4 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -14051,6 +14051,7 @@ export interface MlPutTrainedModelVocabularyRequest extends RequestBase { model_id: Id vocabulary: string[] merges?: string[] + scores?: double[] } export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 993be2b15..68b792bc3 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -14313,6 +14313,7 @@ export interface MlPutTrainedModelVocabularyRequest extends RequestBase { body?: { vocabulary: string[] merges?: string[] + scores?: double[] } } From f1e83ae853da8743688eee0f9b3f120a0080b00b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 2 Nov 2023 10:15:28 -0500 Subject: [PATCH 271/647] Add more docs about keep-alive connections (#2048) --- docs/connecting.asciidoc | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 5e1bd2366..77c2e7d11 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -11,6 +11,7 @@ This page contains the information you need to connect and use the Client with * <> * <> * <> +* <> * <> [[authentication]] @@ -659,6 +660,37 @@ a|* `name` - `string` * `headers` - `object`, the response status code |=== +[[keep-alive]] +[discrete] +=== Keep-alive connections + +By default, the client uses persistent, keep-alive connections to reduce the overhead of creating a new HTTP connection for each Elasticsearch request. +If you are using the default `UndiciConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 10 minutes. +If you are using the legacy `HttpConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 1 minute. + +If you need to disable keep-alive connections, you can override the HTTP agent with your preferred https://nodejs.org/api/http.html#http_new_agent_options[HTTP agent options]: + +[source,js] +---- +const client = new Client({ + node: 'http://localhost:9200/', + // the function takes as parameter the option + // object passed to the Connection constructor + agent: (opts) => new CustomAgent() +}) +---- + +Or you can disable the HTTP agent entirely: + +[source,js] +---- +const client = new Client({ + node: 'http://localhost:9200/', + // Disable agent and keep-alive + agent: false +}) +---- + [discrete] [[product-check]] === Automatic product check From 96d0adb7155caccfa22f7c75cfcc3c390ef6dc44 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 2 Nov 2023 10:26:20 -0500 Subject: [PATCH 272/647] Action to auto-close stale issues and PRs (#2051) --- .github/workflows/stale.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..3970f1d8d --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,21 @@ +--- +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v8 + with: + stale-issue-label: stale + stale-pr-label: stale + days-before-stale: 90 + days-before-close: 14 + exempt-issue-labels: 'good first issue' + close-issue-label: closed-stale + close-pr-label: closed-stale + stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days.' + stale-pr-message: 'This pull request is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days.' From 2d139e512b8aeee2233e739d0fffd47f9cab9db9 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 2 Nov 2023 11:16:39 -0500 Subject: [PATCH 273/647] Improve unit test path filtering rules (#2052) * Improve path-filtering rules on unit test action See docs about how jobs that are skipped still report "success", which is necessary for jobs that are required to succeed based on branch protection rules. https://docs.github.com/en/actions/using-jobs/using-conditions-to-control-job-execution * Code must be checked out for paths-filter to run * Only run on default pull request events Defaults are that it will run when a PR is opened, reopened, or synchronized (new commits pushed). --- .github/workflows/nodejs.yml | 47 ++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 4fe2b1236..92d2ca72b 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -2,31 +2,36 @@ name: Node CI on: - push: - paths-ignore: - - '**/*.md' - - '**/*.asciidoc' - - '**/*.txt' - - 'docs/**' - - '.ci/**' - - '.buildkite/**' - - 'scripts/**' - - 'catalog-info.yaml' - pull_request: - paths-ignore: - - '**/*.md' - - '**/*.asciidoc' - - '**/*.txt' - - 'docs/**' - - '.ci/**' - - '.buildkite/**' - - 'scripts/**' - - 'catalog-info.yaml' + pull_request: {} jobs: + paths-filter: + name: Detect files changed + runs-on: ubuntu-latest + outputs: + skip: '${{ steps.changes.outputs.skip }}' + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter/@v2.11.1 + id: changes + with: + filters: | + skip: + - '**/*.md' + - '**/*.asciidoc' + - '**/*.txt' + - 'docs/**' + - '.ci/**' + - '.buildkite/**' + - 'scripts/**' + - 'catalog-info.yaml' + test: name: Test runs-on: ${{ matrix.os }} + needs: paths-filter + # only run if files not in `skip` filter were changed + if: needs.paths-filter.outputs.skip != 'true' strategy: fail-fast: false @@ -35,7 +40,7 @@ jobs: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v3 From 1adc8c356ab471e79a09ea581ea1df03814606e3 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 2 Nov 2023 11:48:16 -0500 Subject: [PATCH 274/647] Fix version bump script (#2054) * Fix version bump script failure It was trying to run `.replace` on a Buffer instead of a string. * Correctly set Node version in Docker --- .ci/Dockerfile | 2 +- .ci/make.mjs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/Dockerfile b/.ci/Dockerfile index 1f10aed8c..cf5ff29a6 100644 --- a/.ci/Dockerfile +++ b/.ci/Dockerfile @@ -1,4 +1,4 @@ -ARG NODE_JS_VERSION=18 +ARG NODE_JS_VERSION=${NODE_JS_VERSION:-18} FROM node:${NODE_JS_VERSION} ARG BUILDER_UID=1000 diff --git a/.ci/make.mjs b/.ci/make.mjs index 305f066e2..b4b1bb7d6 100644 --- a/.ci/make.mjs +++ b/.ci/make.mjs @@ -86,10 +86,10 @@ async function bump (args) { 'utf8' ) - const pipeline = await readFile(join(import.meta.url, '..', '.buildkite', 'pipeline.yml')) + const pipeline = await readFile(join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), 'utf8') await writeFile( join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), - pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: ${cleanVersion}-SNAPSHOT`), // eslint-disable-line + pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: ${cleanVersion}-SNAPSHOT`), 'utf8' ) } From 9e2ff665643d0f592dec1b7a242dd5d665a1a879 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 2 Nov 2023 11:48:37 -0500 Subject: [PATCH 275/647] Set module type to commonjs (#2053) https://github.com/elastic/elasticsearch-js/issues/1281 --- package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/package.json b/package.json index 29388a7af..9ccfc0070 100644 --- a/package.json +++ b/package.json @@ -5,6 +5,7 @@ "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", + "type": "commonjs", "scripts": { "test": "npm run build && npm run lint && tap test/unit/{*,**/*}.test.ts", "test:unit": "npm run build && tap test/unit/{*,**/*}.test.ts", From 3c5178c7aebaf435f127de29d57298519c6ac85a Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 2 Nov 2023 13:16:24 -0500 Subject: [PATCH 276/647] Stop supporting Node v14 and v16 (#2055) --- .buildkite/pipeline.yml | 1 - .github/workflows/nodejs.yml | 2 +- README.md | 3 ++- docs/installation.asciidoc | 6 +++++- package.json | 2 +- 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d05129234..e493831a9 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -13,7 +13,6 @@ steps: - "free" - "platinum" nodejs: - - "16" - "18" - "20" command: ./.buildkite/run-tests.sh diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 92d2ca72b..8cd702425 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -36,7 +36,7 @@ jobs: strategy: fail-fast: false matrix: - node-version: [14.x, 16.x, 18.x, 20.x] + node-version: [18.x, 20.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: diff --git a/README.md b/README.md index 37e78a387..1046dc84d 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ of the getting started documentation. ### Node.js support -NOTE: The minimum supported version of Node.js is `v14`. +NOTE: The minimum supported version of Node.js is `v18`. The client versioning follows the Elastic Stack versioning, this means that major, minor, and patch releases are done following a precise schedule that @@ -53,6 +53,7 @@ of `^7.10.0`). | `10.x` | `April 2021` | `7.12` (mid 2021) | | `12.x` | `April 2022` | `8.2` (early 2022) | | `14.x` | `April 2023` | `8.8` (early 2023) | +| `16.x` | `September 2023` | `8.11` (late 2023) | ### Compatibility diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc index b04a1a1cd..4fe1f78ab 100644 --- a/docs/installation.asciidoc +++ b/docs/installation.asciidoc @@ -24,7 +24,7 @@ To learn more about the supported major versions, please refer to the [[nodejs-support]] === Node.js support -NOTE: The minimum supported version of Node.js is `v14`. +NOTE: The minimum supported version of Node.js is `v18`. The client versioning follows the {stack} versioning, this means that major, minor, and patch releases are done following a precise schedule that @@ -64,6 +64,10 @@ of `^7.10.0`). |`14.x` |April 2023 |`8.8` (early 2023) + +|`16.x` +|September 2023 +|`8.11` (late 2023) |=== [discrete] diff --git a/package.json b/package.json index 9ccfc0070..48e76e34d 100644 --- a/package.json +++ b/package.json @@ -47,7 +47,7 @@ }, "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "engines": { - "node": ">=14" + "node": ">=18" }, "devDependencies": { "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", From ab10e462e701e1a52415d84b466ed3b19d6a3526 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Sat, 4 Nov 2023 02:39:04 +1100 Subject: [PATCH 277/647] Auto-generated code for main (#2058) --- docs/reference.asciidoc | 2 ++ src/api/types.ts | 1 + src/api/typesWithBodyKey.ts | 1 + 3 files changed, 4 insertions(+) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 0c5563289..cea62abd9 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -9065,6 +9065,8 @@ client.transform.deleteTransform({ transform_id }) ** *`transform_id` (string)*: Identifier for the transform. ** *`force` (Optional, boolean)*: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. +** *`delete_dest_index` (Optional, boolean)*: If this value is true, the destination index is deleted together with the transform. If false, the destination +index will not be deleted ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] diff --git a/src/api/types.ts b/src/api/types.ts index 13b4c40d4..7854e0e4d 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -17380,6 +17380,7 @@ export interface TransformTimeSync { export interface TransformDeleteTransformRequest extends RequestBase { transform_id: Id force?: boolean + delete_dest_index?: boolean timeout?: Duration } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 68b792bc3..c08a73a00 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -17790,6 +17790,7 @@ export interface TransformTimeSync { export interface TransformDeleteTransformRequest extends RequestBase { transform_id: Id force?: boolean + delete_dest_index?: boolean timeout?: Duration } From 99bcff02d5edf0e9200f3a36baf6df9f32aa6bf8 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 3 Nov 2023 11:41:53 -0500 Subject: [PATCH 278/647] Path filtering improvement (#2059) * Another attempt at getting path filtering right * Improve filters --- .github/workflows/nodejs.yml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 8cd702425..f8a4165c5 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -9,29 +9,23 @@ jobs: name: Detect files changed runs-on: ubuntu-latest outputs: - skip: '${{ steps.changes.outputs.skip }}' + src-only: '${{ steps.changes.outputs.src-only }}' steps: - uses: actions/checkout@v4 - uses: dorny/paths-filter/@v2.11.1 id: changes with: filters: | - skip: - - '**/*.md' - - '**/*.asciidoc' - - '**/*.txt' - - 'docs/**' - - '.ci/**' - - '.buildkite/**' - - 'scripts/**' - - 'catalog-info.yaml' + src-only: + - '!(**/*.{md,asciidoc,txt}|*.{md,asciidoc,txt}|{docs,.ci,.buildkite,scripts}/**/*|catalog-info.yaml)' + - '.github/workflows/**' test: name: Test runs-on: ${{ matrix.os }} needs: paths-filter - # only run if files not in `skip` filter were changed - if: needs.paths-filter.outputs.skip != 'true' + # only run if code relevant to unit tests was changed + if: needs.paths-filter.outputs.src-only == 'true' strategy: fail-fast: false From 5fb65d07af8ded8a5d2a1a62e6e6178034773509 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 3 Nov 2023 15:05:37 -0500 Subject: [PATCH 279/647] Update how users/permissions are set up on CI Docker image (#2060) --- .ci/Dockerfile | 20 ++++++++++++-------- .ci/make.sh | 42 +++++++++++++++++++++++++++++------------- 2 files changed, 41 insertions(+), 21 deletions(-) diff --git a/.ci/Dockerfile b/.ci/Dockerfile index cf5ff29a6..1f871d9f2 100644 --- a/.ci/Dockerfile +++ b/.ci/Dockerfile @@ -12,15 +12,19 @@ RUN apt-get clean -y && \ apt-get install -y zip # Set user permissions and directory -RUN groupadd --system -g ${BUILDER_GID} ${BUILDER_GROUP} \ - && useradd --system --shell /bin/bash -u ${BUILDER_UID} -g ${BUILDER_GROUP} -m elastic 1>/dev/null 2>/dev/null \ +RUN (id -g ${BUILDER_GID} || groupadd --system -g ${BUILDER_GID} ${BUILDER_GROUP}) \ + && (id -u ${BUILDER_UID} || useradd --system --shell /bin/bash -u ${BUILDER_UID} -g ${BUILDER_GID} -m elastic) \ && mkdir -p /usr/src/elasticsearch-js \ - && chown -R ${BUILDER_USER}:${BUILDER_GROUP} /usr/src/ + && chown -R ${BUILDER_UID}:${BUILDER_GID} /usr/src/ + WORKDIR /usr/src/elasticsearch-js -USER ${BUILDER_USER}:${BUILDER_GROUP} -# Install app dependencies -COPY --chown=$BUILDER_USER:$BUILDER_GROUP package*.json ./ -RUN npm install +# run remainder of commands as non-root user +USER ${BUILDER_UID}:${BUILDER_GID} + +# install dependencies +COPY package.json . +RUN npm install --production=false -COPY --chown=$BUILDER_USER:$BUILDER_GROUP . . +# copy project files +COPY . . diff --git a/.ci/make.sh b/.ci/make.sh index 7f890cb2b..c3d9f5b4f 100755 --- a/.ci/make.sh +++ b/.ci/make.sh @@ -144,19 +144,35 @@ docker build \ echo -e "\033[34;1mINFO: running $product container\033[0m" -docker run \ - --volume "$repo:/usr/src/elasticsearch-js" \ - --volume /usr/src/elasticsearch-js/node_modules \ - -u "$(id -u):$(id -g)" \ - --env "WORKFLOW=$WORKFLOW" \ - --name make-elasticsearch-js \ - --rm \ - $product \ - /bin/bash -c "cd /usr/src && \ - git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ - mkdir -p /usr/src/elastic-client-generator-js/output && \ - cd /usr/src/elasticsearch-js && \ - node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" +if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x}" ]]; then + echo -e "\033[34;1mINFO: Running in local mode" + docker run \ + -u "$(id -u):$(id -g)" \ + --volume "$repo:/usr/src/elasticsearch-js" \ + --volume /usr/src/elasticsearch-js/node_modules \ + --volume "$(realpath $repo/../elastic-client-generator-js):/usr/src/elastic-client-generator-js" \ + --env "WORKFLOW=$WORKFLOW" \ + --name make-elasticsearch-js \ + --rm \ + $product \ + /bin/bash -c "mkdir -p /usr/src/elastic-client-generator-js/output && \ + node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" +else + echo -e "\033[34;1mINFO: Running in CI mode" + docker run \ + --volume "$repo:/usr/src/elasticsearch-js" \ + --volume /usr/src/elasticsearch-js/node_modules \ + -u "$(id -u):$(id -g)" \ + --env "WORKFLOW=$WORKFLOW" \ + --name make-elasticsearch-js \ + --rm \ + $product \ + /bin/bash -c "cd /usr/src && \ + git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ + mkdir -p /usr/src/elastic-client-generator-js/output && \ + cd /usr/src/elasticsearch-js && \ + node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" +fi # ------------------------------------------------------- # # Post Command tasks & checks From b47747007bec45678ae5ec79fe488db6bc220862 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 7 Nov 2023 03:59:55 +1100 Subject: [PATCH 280/647] Auto-generated code for main (#2061) --- src/api/types.ts | 6 +++--- src/api/typesWithBodyKey.ts | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index 7854e0e4d..0dc17efc8 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -707,7 +707,7 @@ export interface MsearchMultiSearchItem extends SearchRespo status?: integer } -export interface MsearchMultiSearchResult { +export interface MsearchMultiSearchResult> { took: long responses: MsearchResponseItem[] } @@ -780,7 +780,7 @@ export interface MsearchRequest extends RequestBase { export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody -export type MsearchResponse> = MsearchMultiSearchResult +export type MsearchResponse> = MsearchMultiSearchResult export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase @@ -796,7 +796,7 @@ export interface MsearchTemplateRequest extends RequestBase { export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig -export type MsearchTemplateResponse> = MsearchMultiSearchResult +export type MsearchTemplateResponse> = MsearchMultiSearchResult export interface MsearchTemplateTemplateConfig { explain?: boolean diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index c08a73a00..5a4157ac8 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -735,7 +735,7 @@ export interface MsearchMultiSearchItem extends SearchRespo status?: integer } -export interface MsearchMultiSearchResult { +export interface MsearchMultiSearchResult> { took: long responses: MsearchResponseItem[] } @@ -809,7 +809,7 @@ export interface MsearchRequest extends RequestBase { export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody -export type MsearchResponse> = MsearchMultiSearchResult +export type MsearchResponse> = MsearchMultiSearchResult export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase @@ -826,7 +826,7 @@ export interface MsearchTemplateRequest extends RequestBase { export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig -export type MsearchTemplateResponse> = MsearchMultiSearchResult +export type MsearchTemplateResponse> = MsearchMultiSearchResult export interface MsearchTemplateTemplateConfig { explain?: boolean From abd5018cfe550ee3d8e10753bdb5696e52e9fb36 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 7 Nov 2023 12:07:18 -0600 Subject: [PATCH 281/647] Bump version in main to 8.10.3 (#2066) --- .buildkite/pipeline.yml | 2 +- package.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index e493831a9..32b37b6c6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,7 +6,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.8.0-SNAPSHOT + STACK_VERSION: 8.10.3-SNAPSHOT matrix: setup: suite: diff --git a/package.json b/package.json index 48e76e34d..a772f791e 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.9.1", - "versionCanary": "8.9.1-canary.1", + "version": "8.10.3", + "versionCanary": "8.10.3-canary.1", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 845ddaaf34a1d0387f0a977b9798d3cea87a4400 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 8 Nov 2023 18:51:30 +0100 Subject: [PATCH 282/647] Auto-generated code for main (#2064) --- docs/reference.asciidoc | 1 + src/api/types.ts | 1 + src/api/typesWithBodyKey.ts | 1 + 3 files changed, 3 insertions(+) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index cea62abd9..9e5291cbb 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -6491,6 +6491,7 @@ client.ml.startTrainedModelDeployment({ model_id }) ** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. +** *`deployment_id` (Optional, string)*: A unique identifier for the deployment of the model. ** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. diff --git a/src/api/types.ts b/src/api/types.ts index 0dc17efc8..707e1cc13 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -14106,6 +14106,7 @@ export interface MlStartDatafeedResponse { export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id cache_size?: ByteSize + deployment_id?: string number_of_allocations?: integer priority?: MlTrainingPriority queue_capacity?: integer diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 5a4157ac8..dbbe3ea1a 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -14375,6 +14375,7 @@ export interface MlStartDatafeedResponse { export interface MlStartTrainedModelDeploymentRequest extends RequestBase { model_id: Id cache_size?: ByteSize + deployment_id?: string number_of_allocations?: integer priority?: MlTrainingPriority queue_capacity?: integer From b7b16d81ee3bdeafbaad393801bda4abfa30fe78 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 8 Nov 2023 13:07:55 -0600 Subject: [PATCH 283/647] Fix arg-parsing issue in codegen script (#2068) --- .ci/make.mjs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.ci/make.mjs b/.ci/make.mjs index b4b1bb7d6..511944972 100644 --- a/.ci/make.mjs +++ b/.ci/make.mjs @@ -28,6 +28,11 @@ import assert from 'assert' import { join } from 'desm' import semver from 'semver' +// xz/globals loads minimist-parsed args as a global `argv`, but it +// interprets args like '8.10' as numbers and shortens them to '8.1'. +// so we have to import and configure minimist ourselves. +import minimist from 'minimist' +const argv = minimist(process.argv.slice(2), { string: ['_', 'task'] }) assert(typeof argv.task === 'string', 'Missing task parameter') switch (argv.task) { From 426919709f43e3680a696de8880c4203be1e4dab Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 16 Nov 2023 10:14:20 -0600 Subject: [PATCH 284/647] Throw an explicit error when asStream is used with bulk helper (#2078) --- src/helpers.ts | 2 ++ test/unit/helpers/bulk.test.ts | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/helpers.ts b/src/helpers.ts index 768ad1dc8..0bd1b1c5c 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -527,6 +527,8 @@ export default class Helpers { * @return {object} The possible operations to run with the datasource. */ bulk (options: BulkHelperOptions, reqOptions: TransportRequestOptions = {}): BulkHelper { + assert(!(reqOptions.asStream ?? false), 'bulk helper: the asStream request option is not supported') + const client = this[kClient] const { serializer } = client if (this[kMetaHeader] !== null) { diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 732d696c4..2c3229ce9 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -18,6 +18,7 @@ */ import FakeTimers from '@sinonjs/fake-timers' +import { AssertionError } from 'assert' import { createReadStream } from 'fs' import * as http from 'http' import { join } from 'path' @@ -1336,6 +1337,37 @@ test('transport options', t => { }) }) + t.test('Should not allow asStream request option', async t => { + t.plan(2) + + const client = new Client({ + node: 'http://localhost:9200/', + }) + + try { + await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { index: { _index: 'test' } } + }, + onDrop (doc) { + t.fail('This should never be called') + }, + refreshOnCompletion: true + }, { + headers: { + foo: 'bar' + }, + asStream: true, + }) + } catch (err: any) { + t.ok(err instanceof AssertionError) + t.equal(err.message, 'bulk helper: the asStream request option is not supported') + } + }) + t.end() }) From 06e3b050778bdeca4c44d28a0926e6c8e4f5ab4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 27 Nov 2023 17:46:53 +0100 Subject: [PATCH 285/647] [DOCS] Adds a signpost for downloading ES or signing-up for ESS (#2084) --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 1046dc84d..5d34cfa98 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,11 @@ [![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://badge.buildkite.com/15e4246eb268ea78f6e10aa90bce38c1abb0a4489e79f5a0ac.svg)](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) +**[Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)** +or +**[sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page)** +**for a free trial of Elastic Cloud**. + The official Node.js client for Elasticsearch. ## Installation From c2c417a9fd84e7f77a84b0cfa079bdde40473574 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 12 Dec 2023 15:40:47 -0600 Subject: [PATCH 286/647] Bump transport to 8.4.0 (#2095) * Support for transport 8.4.0 redaction functionality * Docs for `redaction` options --- docs/advanced-config.asciidoc | 88 +++++++++++++++++++++++++++++++++++ package.json | 2 +- src/client.ts | 11 ++++- src/helpers.ts | 12 +++-- 4 files changed, 107 insertions(+), 6 deletions(-) diff --git a/docs/advanced-config.asciidoc b/docs/advanced-config.asciidoc index 638aeada4..167061dce 100644 --- a/docs/advanced-config.asciidoc +++ b/docs/advanced-config.asciidoc @@ -91,6 +91,94 @@ const client = new Client({ }) ---- +[discrete] +==== Redaction of potentially sensitive data + +When the client raises an `Error` that originated at the HTTP layer, like a `ConnectionError` or `TimeoutError`, a `meta` object is often attached to the error object that includes metadata useful for debugging, like request and response information. Because this can include potentially sensitive data, like authentication secrets in an `Authorization` header, the client takes measures to redact common sources of sensitive data when this metadata is attached and serialized. + +If your configuration requires extra headers or other configurations that may include sensitive data, you may want to adjust these settings to account for that. + +By default, the `redaction` option is set to `{ type: 'replace' }`, which recursively searches for sensitive key names, case insensitive, and replaces their values with the string `[redacted]`. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers.authorization) // prints "[redacted]" +} +---- + +If you would like to redact additional properties, you can include additional key names to search and replace: + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + headers: { 'X-My-Secret-Password': 'shhh it's a secret!' }, + redaction: { + type: "replace", + additionalKeys: ["x-my-secret-password"] + } +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers['X-My-Secret-Password']) // prints "[redacted]" +} +---- + +Alternatively, if you know you're not going to use the metadata at all, setting the redaction type to `remove` will remove all optional sources of potentially sensitive data entirely, or replacing them with `null` for required properties. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + redaction: { type: "remove" } +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers) // undefined +} +---- + +Finally, if you prefer to turn off redaction altogether, perhaps while debugging on a local developer environment, you can set the redaction type to `off`. This will revert the client to pre-8.11.0 behavior, where basic redaction is only performed during common serialization methods like `console.log` and `JSON.stringify`. + +WARNING: Setting `redaction.type` to `off` is not recommended in production environments. + +[source,js] +---- +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + redaction: { type: "off" } +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers.authorization) // the actual header value will be logged +} +---- + [discrete] ==== Migrate to v8 diff --git a/package.json b/package.json index a772f791e..56332e190 100644 --- a/package.json +++ b/package.json @@ -83,7 +83,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.3.4", + "@elastic/transport": "^8.4.0", "tslib": "^2.4.0" }, "tap": { diff --git a/src/client.ts b/src/client.ts index 09118d58c..50ba4942f 100644 --- a/src/client.ts +++ b/src/client.ts @@ -43,6 +43,7 @@ import { BearerAuth, Context } from '@elastic/transport/lib/types' +import { RedactionOptions } from '@elastic/transport/lib/Transport' import BaseConnection, { prepareHeaders } from '@elastic/transport/lib/connection/BaseConnection' import SniffingTransport from './sniffingTransport' import Helpers from './helpers' @@ -113,6 +114,7 @@ export interface ClientOptions { caFingerprint?: string maxResponseSize?: number maxCompressedResponseSize?: number + redaction?: RedactionOptions } export default class Client extends API { @@ -186,7 +188,11 @@ export default class Client extends API { proxy: null, enableMetaHeader: true, maxResponseSize: null, - maxCompressedResponseSize: null + maxCompressedResponseSize: null, + redaction: { + type: 'replace', + additionalKeys: [] + } }, opts) if (options.caFingerprint != null && isHttpConnection(opts.node ?? opts.nodes)) { @@ -259,7 +265,8 @@ export default class Client extends API { jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=8', ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', accept: 'application/vnd.elasticsearch+json; compatible-with=8,text/plain' - } + }, + redaction: options.redaction }) this.helpers = new Helpers({ diff --git a/src/helpers.ts b/src/helpers.ts index 0bd1b1c5c..efad8b49b 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -196,8 +196,11 @@ export default class Helpers { await sleep(wait) } assert(response !== undefined, 'The response is undefined, please file a bug report') + + const { redaction = { type: 'replace' } } = options + const errorOptions = { redaction } if (response.statusCode === 429) { - throw new ResponseError(response) + throw new ResponseError(response, errorOptions) } let scroll_id = response.body._scroll_id @@ -237,7 +240,7 @@ export default class Helpers { await sleep(wait) } if (response.statusCode === 429) { - throw new ResponseError(response) + throw new ResponseError(response, errorOptions) } } @@ -289,6 +292,9 @@ export default class Helpers { } = options reqOptions.meta = true + const { redaction = { type: 'replace' } } = reqOptions + const errorOptions = { redaction } + let stopReading = false let stopError: Error | null = null let timeoutRef = null @@ -502,7 +508,7 @@ export default class Helpers { // @ts-expect-error addDocumentsGetter(result) if (response.status != null && response.status >= 400) { - callbacks[i](new ResponseError(result), result) + callbacks[i](new ResponseError(result, errorOptions), result) } else { callbacks[i](null, result) } From 1fb789862dd36e85c211bf1d32aa90c3454c5dc2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 12 Dec 2023 16:06:03 -0600 Subject: [PATCH 287/647] 8.11.0 changelog (#2097) * Changelog for 8.11.0 * Add redaction docs link to changelog --- docs/advanced-config.asciidoc | 1 + docs/changelog.asciidoc | 19 +++++++++++++++++++ package.json | 4 ++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/docs/advanced-config.asciidoc b/docs/advanced-config.asciidoc index 167061dce..b3c9388a4 100644 --- a/docs/advanced-config.asciidoc +++ b/docs/advanced-config.asciidoc @@ -92,6 +92,7 @@ const client = new Client({ ---- [discrete] +[[redaction]] ==== Redaction of potentially sensitive data When the client raises an `Error` that originated at the HTTP layer, like a `ConnectionError` or `TimeoutError`, a `meta` object is often attached to the error object that includes metadata useful for debugging, like request and response information. Because this can include potentially sensitive data, like authentication secrets in an `Authorization` header, the client takes measures to redact common sources of sensitive data when this metadata is attached and serialized. diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index b82c397da..0d79214b2 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,25 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.11.0 + +[discrete] +=== Features + +[discrete] +===== Support for Elasticsearch `v8.11.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.11/release-notes-8.11.0.html[here]. + +[discrete] +===== Enhanced support for redacting potentially sensitive data https://github.com/elastic/elasticsearch-js/pull/2095[#2095] + +`@elastic/transport` https://github.com/elastic/elastic-transport-js/releases/tag/v8.4.0[version 8.4.0] introduces enhanced measures for ensuring that request metadata attached to some `Error` objects is redacted. This functionality is primarily to address custom logging solutions that don't use common serialization methods like `JSON.stringify`, `console.log`, or `util.inspect`, which were already accounted for. + +See <> for more information. + [discrete] === 8.10.0 diff --git a/package.json b/package.json index 56332e190..add6446fa 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.10.3", - "versionCanary": "8.10.3-canary.1", + "version": "8.11.0", + "versionCanary": "8.11.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 51323e769dbaedc8c584ca7ad6cbdf3a4722234b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 13 Dec 2023 11:20:04 -0600 Subject: [PATCH 288/647] Github action for publishing to npm with provenance metadata (#2103) --- .github/workflows/npm-publish.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/workflows/npm-publish.yml diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml new file mode 100644 index 000000000..73a7d36c2 --- /dev/null +++ b/.github/workflows/npm-publish.yml @@ -0,0 +1,27 @@ +name: Publish Package to npm +on: + workflow_dispatch: + inputs: + branch: + description: 'Git branch to build and publish' + required: true +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.branch }} + - uses: actions/setup-node@v3 + with: + node-version: '20.x' + registry-url: 'https://registry.npmjs.org/' + - run: npm install -g npm + - run: npm install + - run: npm test + - run: npm publish --provenance --access public + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} From d3f22f1e14d292849d55eaea356d6f28ea7abfec Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 14 Dec 2023 09:46:04 -0600 Subject: [PATCH 289/647] Add doc for closing connections (#2104) --- docs/connecting.asciidoc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 77c2e7d11..15007ceb3 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -12,6 +12,7 @@ This page contains the information you need to connect and use the Client with * <> * <> * <> +* <> * <> [[authentication]] @@ -691,6 +692,20 @@ const client = new Client({ }) ---- +[discrete] +[[close-connections]] +=== Closing a client's connections + +If you would like to close all open connections being managed by an instance of the client, use the `close()` function: + +[source,js] +---- +const client = new Client({ + node: 'http://localhost:9200/' +}); +client.close(); +---- + [discrete] [[product-check]] === Automatic product check From 4aaf49b6ea86a0906dc7768b4d227f10b6bd8198 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 14 Dec 2023 16:35:37 -0600 Subject: [PATCH 290/647] Integration test improvements (#2109) * Improvements to integrations Borrowed largely from https://github.com/elastic/elasticsearch-serverless-js/pull/38 * Bump all the things to 8.12.0 * Split Dockerfile copy into two layers * Fix test cron names --- .buildkite/Dockerfile | 4 +- .buildkite/pipeline.yml | 2 +- .dockerignore | 2 + catalog-info.yaml | 8 +- package.json | 6 +- test/integration/index.js | 4 +- test/integration/test-runner.js | 165 ++++++++++++++++++++++---------- 7 files changed, 130 insertions(+), 61 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 5608747b6..6d44e2211 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -10,5 +10,7 @@ RUN apt-get clean -y && \ WORKDIR /usr/src/app -COPY . . +COPY package.json . RUN npm install --production=false + +COPY . . diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 32b37b6c6..1dca14548 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,7 +6,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.10.3-SNAPSHOT + STACK_VERSION: 8.12.0-SNAPSHOT matrix: setup: suite: diff --git a/.dockerignore b/.dockerignore index 54eb2a95a..e34f9ff27 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,3 +3,5 @@ npm-debug.log test/benchmarks elasticsearch .git +lib +junit-output diff --git a/catalog-info.yaml b/catalog-info.yaml index b8bbd36ff..80c943cd8 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -42,9 +42,9 @@ spec: main_semi_daily: branch: 'main' cronline: '0 */12 * * *' - 8_9_semi_daily: - branch: '8.9' + 8_12_semi_daily: + branch: '8.12' cronline: '0 */12 * * *' - 8_8_daily: - branch: '8.8' + 8_11_daily: + branch: '8.11' cronline: '@daily' diff --git a/package.json b/package.json index add6446fa..7e3d637b0 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.11.0", - "versionCanary": "8.11.0-canary.0", + "version": "8.12.0", + "versionCanary": "8.12.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", @@ -93,4 +93,4 @@ "coverage": false, "check-coverage": false } -} +} \ No newline at end of file diff --git a/test/integration/index.js b/test/integration/index.js index defdb400f..b07ddd2d7 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -317,7 +317,7 @@ async function start ({ client, isXPack }) { if (name === 'setup' || name === 'teardown') continue if (options.test && !name.endsWith(options.test)) continue - const junitTestCase = junitTestSuite.testcase(name, `node_${process.version}/${cleanPath}`) + const junitTestCase = junitTestSuite.testcase(name, `node_${process.version}: ${cleanPath}`) stats.total += 1 if (shouldSkip(isXPack, file, name)) { @@ -336,6 +336,7 @@ async function start ({ client, isXPack }) { junitTestSuite.end() junitTestSuites.end() generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') + err.meta = JSON.stringify(err.meta ?? {}, null, 2) console.error(err) if (options.bail) { @@ -374,6 +375,7 @@ async function start ({ client, isXPack }) { - Total: ${stats.total} - Skip: ${stats.skip} - Pass: ${stats.pass} + - Fail: ${stats.total - (stats.pass + stats.skip)} - Assertions: ${stats.assertions} `) } diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index 64570945a..ce80da43e 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -593,13 +593,14 @@ function build (opts = {}) { const key = Object.keys(action.match)[0] match( // in some cases, the yaml refers to the body with an empty string - key === '$body' || key === '' + key.split('.')[0] === '$body' || key === '' ? response : delve(response, fillStashedValues(key)), - key === '$body' + key.split('.')[0] === '$body' ? action.match[key] : fillStashedValues(action.match)[key], - action.match + action.match, + response ) } @@ -608,7 +609,8 @@ function build (opts = {}) { const key = Object.keys(action.lt)[0] lt( delve(response, fillStashedValues(key)), - fillStashedValues(action.lt)[key] + fillStashedValues(action.lt)[key], + response ) } @@ -617,7 +619,8 @@ function build (opts = {}) { const key = Object.keys(action.gt)[0] gt( delve(response, fillStashedValues(key)), - fillStashedValues(action.gt)[key] + fillStashedValues(action.gt)[key], + response ) } @@ -626,7 +629,8 @@ function build (opts = {}) { const key = Object.keys(action.lte)[0] lte( delve(response, fillStashedValues(key)), - fillStashedValues(action.lte)[key] + fillStashedValues(action.lte)[key], + response ) } @@ -635,7 +639,8 @@ function build (opts = {}) { const key = Object.keys(action.gte)[0] gte( delve(response, fillStashedValues(key)), - fillStashedValues(action.gte)[key] + fillStashedValues(action.gte)[key], + response ) } @@ -648,7 +653,8 @@ function build (opts = {}) { : delve(response, fillStashedValues(key)), key === '$body' ? action.length[key] - : fillStashedValues(action.length)[key] + : fillStashedValues(action.length)[key], + response ) } @@ -657,7 +663,8 @@ function build (opts = {}) { const isTrue = fillStashedValues(action.is_true) is_true( delve(response, isTrue), - isTrue + isTrue, + response ) } @@ -666,7 +673,8 @@ function build (opts = {}) { const isFalse = fillStashedValues(action.is_false) is_false( delve(response, isFalse), - isFalse + isFalse, + response ) } } @@ -679,46 +687,67 @@ function build (opts = {}) { * Asserts that the given value is truthy * @param {any} the value to check * @param {string} an optional message + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function is_true (val, msg) { - assert.ok(val, `expect truthy value: ${msg} - value: ${JSON.stringify(val)}`) +function is_true (val, msg, response) { + try { + assert.ok((typeof val === 'string' && val.toLowerCase() === 'true') || val, `expect truthy value: ${msg} - value: ${JSON.stringify(val)}`) + } catch (err) { + err.response = JSON.stringify(response) + throw err + } } /** * Asserts that the given value is falsey * @param {any} the value to check * @param {string} an optional message + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function is_false (val, msg) { - assert.ok(!val, `expect falsey value: ${msg} - value: ${JSON.stringify(val)}`) +function is_false (val, msg, response) { + try { + assert.ok((typeof val === 'string' && val.toLowerCase() === 'false') || !val, `expect falsey value: ${msg} - value: ${JSON.stringify(val)}`) + } catch (err) { + err.response = JSON.stringify(response) + throw err + } } /** * Asserts that two values are the same * @param {any} the first value * @param {any} the second value + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function match (val1, val2, action) { - // both values are objects - if (typeof val1 === 'object' && typeof val2 === 'object') { - assert.deepEqual(val1, val2, typeof action === 'object' ? JSON.stringify(action) : action) - // the first value is the body as string and the second a pattern string - } else if ( - typeof val1 === 'string' && typeof val2 === 'string' && - val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) - ) { - const regStr = val2 - .replace(/(^|[^\\])#.*/g, '$1') - .replace(/(^|[^\\])\s+/g, '$1') - .slice(1, -1) - // 'm' adds the support for multiline regex - assert.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, but got: ${val1}`) - // everything else - } else { - assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) +function match (val1, val2, action, response) { + try { + // both values are objects + if (typeof val1 === 'object' && typeof val2 === 'object') { + assert.deepEqual(val1, val2, typeof action === 'object' ? JSON.stringify(action) : action) + // the first value is the body as string and the second a pattern string + } else if ( + typeof val1 === 'string' && typeof val2 === 'string' && + val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) + ) { + const regStr = val2 + .replace(/(^|[^\\])#.*/g, '$1') + .replace(/(^|[^\\])\s+/g, '$1') + .slice(1, -1) + // 'm' adds the support for multiline regex + assert.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, but got: ${val1}: ${JSON.stringify(action)}`) + } else if (typeof val1 === 'string' && typeof val2 === 'string') { + // string comparison + assert.include(val1, val2, `should include pattern provided: ${val2}, but got: ${val1}: ${JSON.stringify(action)}`) + } else { + // everything else + assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) + } + } catch (err) { + err.response = JSON.stringify(response) + throw err } } @@ -727,11 +756,17 @@ function match (val1, val2, action) { * It also verifies that the two values are numbers * @param {any} the first value * @param {any} the second value + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function lt (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 < val2) +function lt (val1, val2, response) { + try { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 < val2) + } catch (err) { + err.response = JSON.stringify(response) + throw err + } } /** @@ -739,11 +774,17 @@ function lt (val1, val2) { * It also verifies that the two values are numbers * @param {any} the first value * @param {any} the second value + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function gt (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 > val2) +function gt (val1, val2, response) { + try { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 > val2) + } catch (err) { + err.response = JSON.stringify(response) + throw err + } } /** @@ -751,11 +792,17 @@ function gt (val1, val2) { * It also verifies that the two values are numbers * @param {any} the first value * @param {any} the second value + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function lte (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 <= val2) +function lte (val1, val2, response) { + try { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 <= val2) + } catch (err) { + err.response = JSON.stringify(response) + throw err + } } /** @@ -763,26 +810,38 @@ function lte (val1, val2) { * It also verifies that the two values are numbers * @param {any} the first value * @param {any} the second value + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function gte (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 >= val2) +function gte (val1, val2, response) { + try { + ;[val1, val2] = getNumbers(val1, val2) + assert.ok(val1 >= val2) + } catch (err) { + err.response = JSON.stringify(response) + throw err + } } /** * Asserts that the given value has the specified length * @param {string|object|array} the object to check * @param {number} the expected length + * @param {any} debugging metadata to attach to any assertion errors * @returns {TestRunner} */ -function length (val, len) { - if (typeof val === 'string' || Array.isArray(val)) { - assert.equal(val.length, len) - } else if (typeof val === 'object' && val !== null) { - assert.equal(Object.keys(val).length, len) - } else { - assert.fail(`length: the given value is invalid: ${val}`) +function length (val, len, response) { + try { + if (typeof val === 'string' || Array.isArray(val)) { + assert.equal(val.length, len) + } else if (typeof val === 'object' && val !== null) { + assert.equal(Object.keys(val).length, len) + } else { + assert.fail(`length: the given value is invalid: ${val}`) + } + } catch (err) { + err.response = JSON.stringify(response) + throw err } } @@ -813,6 +872,10 @@ function length (val, len) { */ function parseDo (action) { action = JSON.parse(JSON.stringify(action)) + + if (typeof action === 'string') action = {[action]: {}} + if (Array.isArray(action)) action = action[0] + return Object.keys(action).reduce((acc, val) => { switch (val) { case 'catch': From 5413eb5f35d0f3cbda483b5800475d9a3da59038 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 14 Dec 2023 17:19:20 -0600 Subject: [PATCH 291/647] Add missing snippets (#2113) For https://github.com/elastic/clients-team/issues/728 --- .../36b86b97feedcf5632824eefc251d6ed.asciidoc | 12 ++++++ .../8575c966b004fb124c7afd6bb5827b50.asciidoc | 13 ++++++ .../bcc75fc01b45e482638c65b8fbdf09fa.asciidoc | 7 +++ .../d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc | 43 +++++++++++++++++++ 4 files changed, 75 insertions(+) create mode 100644 docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc create mode 100644 docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc create mode 100644 docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc create mode 100644 docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc diff --git a/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc b/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc new file mode 100644 index 000000000..408ce2f71 --- /dev/null +++ b/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc @@ -0,0 +1,12 @@ +[source,js] +---- +const response = await client.search({ + index: 'books', + query: { + match: { + name: 'brave' + } + } +}) +console.log(response) +---- diff --git a/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc b/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc new file mode 100644 index 000000000..d99bd96dc --- /dev/null +++ b/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc @@ -0,0 +1,13 @@ +[source,js] +---- +const response = await client.index({ + index: 'books', + document: { + name: 'Snow Crash', + author: 'Neal Stephenson', + release_date: '1992-06-01', + page_count: 470, + } +}) +console.log(response) +---- diff --git a/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc b/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc new file mode 100644 index 000000000..1708d0956 --- /dev/null +++ b/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc @@ -0,0 +1,7 @@ +[source,js] +---- +const response = await client.search({ + index: 'books' +}) +console.log(response) +---- diff --git a/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc b/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc new file mode 100644 index 000000000..e5ce437b2 --- /dev/null +++ b/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc @@ -0,0 +1,43 @@ +[source,js] +---- +const response = await client.bulk({ + operations: [ + { index: { _index: 'books' } }, + { + name: 'Revelation Space', + author: 'Alastair Reynolds', + release_date: '2000-03-15', + page_count: 585, + }, + { index: { _index: 'books' } }, + { + name: '1984', + author: 'George Orwell', + release_date: '1985-06-01', + page_count: 328, + }, + { index: { _index: 'books' } }, + { + name: 'Fahrenheit 451', + author: 'Ray Bradbury', + release_date: '1953-10-15', + page_count: 227, + }, + { index: { _index: 'books' } }, + { + name: 'Brave New World', + author: 'Aldous Huxley', + release_date: '1932-06-01', + page_count: 268, + }, + { index: { _index: 'books' } }, + { + name: 'The Handmaids Tale', + author: 'Margaret Atwood', + release_date: '1985-06-01', + page_count: 311, + } + ] +}) +console.log(response) +---- From 6eabf37097c5dc32a618e47fdc0c968377ced314 Mon Sep 17 00:00:00 2001 From: Enrico Zimuel Date: Thu, 4 Jan 2024 13:13:26 +0100 Subject: [PATCH 292/647] Improved the body BC break description in request/response for 8.x documentation (#2117) * Improved the body bc break in 8.x documentation * Removed just in the sentence --- docs/changelog.asciidoc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 0d79214b2..1b7e68c5a 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -361,6 +361,9 @@ The client API leaks HTTP-related notions in many places, and removing them woul This could be a rather big breaking change, so a double solution could be used during the 8.x lifecycle. (accepting body keys without them being wrapped in the body as well as the current solution). +To convert code from 7.x, you need to remove the `body` parameter in all the endpoints request. +For instance, this is an example for the `search` endpoint: + [source,js] ---- // from @@ -399,6 +402,12 @@ If you weren't extending the internals of the client, this won't be a breaking c The client API leaks HTTP-related notions in many places, and removing them would definitely improve the DX. The client will expose a new request-specific option to still get the full response details. +The new behaviour returns the `body` value directly as response. +If you want to have the 7.x response format, you need to add `meta : true` in the request. +This will return all the HTTP meta information, including the `body`. + +For instance, this is an example for the `search` endpoint: + [source,js] ---- // from From 57ee5cf6c257289557e7ed9e15da66995aae4240 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Wed, 31 Jan 2024 13:37:05 +0400 Subject: [PATCH 293/647] 8.12.0 changelog (#2125) --- docs/changelog.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 1b7e68c5a..4dbf11907 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,18 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.12.0 + +[discrete] +=== Features + +[discrete] +===== Support for Elasticsearch `v8.12.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.12/release-notes-8.12.0.html[here]. + [discrete] === 8.11.0 From 1607a0d3f78a6e60c44d62c5326dc36dedb972ea Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 5 Feb 2024 23:58:21 -0600 Subject: [PATCH 294/647] Fix hang in bulk helper semaphore when server responses are slower than flushInterval (#2027) * Set version to 8.10.1 * Add tests for bulk helper with various flush and server timeouts * Copy and empty bulkBody when flushBytes is reached Before it was waiting until after semaphore resolved, then sending with a reference to bulkBody. If flushInterval is reached after `await semaphore()` but before `send(bulkBody)`, onFlushTimeout is "stealing" bulkBody so that there is nothing left in bulkBody for the flushBytes block to send, causing an indefinite hang for a promise that does not resolve. * comment typo fixes --------- Co-authored-by: Quentin Pradet --- src/helpers.ts | 13 +-- test/unit/helpers/bulk.test.ts | 149 +++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+), 6 deletions(-) diff --git a/src/helpers.ts b/src/helpers.ts index efad8b49b..fbf4ff334 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -624,7 +624,7 @@ export default class Helpers { let chunkBytes = 0 timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line - // @ts-expect-error datasoruce is an iterable + // @ts-expect-error datasource is an iterable for await (const chunk of datasource) { if (shouldAbort) break timeoutRef.refresh() @@ -656,15 +656,16 @@ export default class Helpers { if (chunkBytes >= flushBytes) { stats.bytes += chunkBytes - const send = await semaphore() - send(bulkBody.slice()) + const bulkBodyCopy = bulkBody.slice() bulkBody.length = 0 chunkBytes = 0 + const send = await semaphore() + send(bulkBodyCopy) } } clearTimeout(timeoutRef) - // In some cases the previos http call does not have finished, + // In some cases the previous http call has not finished, // or we didn't reach the flush bytes threshold, so we force one last operation. if (!shouldAbort && chunkBytes > 0) { const send = await semaphore() @@ -708,8 +709,8 @@ export default class Helpers { // to guarantee that no more than the number of operations // allowed to run at the same time are executed. // It returns a semaphore function which resolves in the next tick - // if we didn't reach the maximim concurrency yet, otherwise it returns - // a promise that resolves as soon as one of the running request has finshed. + // if we didn't reach the maximum concurrency yet, otherwise it returns + // a promise that resolves as soon as one of the running requests has finished. // The semaphore function resolves a send function, which will be used // to send the actual bulk request. // It also returns a finish function, which returns a promise that is resolved diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 2c3229ce9..62c297ebf 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -23,9 +23,11 @@ import { createReadStream } from 'fs' import * as http from 'http' import { join } from 'path' import split from 'split2' +import { Readable } from 'stream' import { test } from 'tap' import { Client, errors } from '../../../' import { buildServer, connection } from '../../utils' +const { sleep } = require('../../integration/helper') let clientVersion: string = require('../../../package.json').version // eslint-disable-line if (clientVersion.includes('-')) { @@ -1594,3 +1596,150 @@ test('Flush interval', t => { t.end() }) + +test(`flush timeout does not lock process when flushInterval is less than server timeout`, async t => { + const flushInterval = 500 + + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, 1000) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc + } + } + + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + + server.stop() +}) + +test(`flush timeout does not lock process when flushInterval is greater than server timeout`, async t => { + const flushInterval = 500 + + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, 250) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc + } + } + + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + + server.stop() +}) + +test(`flush timeout does not lock process when flushInterval is equal to server timeout`, async t => { + const flushInterval = 500 + + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, flushInterval) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc + } + } + + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + + server.stop() +}) From 8df91fce7cd757a25ca98e50cb24422748b15ef1 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 23 Feb 2024 13:18:01 -0600 Subject: [PATCH 295/647] Upgrade transport to 8.4.1 (#2137) --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 7e3d637b0..0ecd601ca 100644 --- a/package.json +++ b/package.json @@ -83,7 +83,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.4.0", + "@elastic/transport": "^8.4.1", "tslib": "^2.4.0" }, "tap": { @@ -93,4 +93,4 @@ "coverage": false, "check-coverage": false } -} \ No newline at end of file +} From 1d8da99d5bb9c282e7ecd500620c45024fafccfc Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 23 Feb 2024 14:01:55 -0600 Subject: [PATCH 296/647] Update changelog for 8.12.2 (#2139) * Backport changelog for 8.12.1 * Add changelog for 8.12.2 --- docs/changelog.asciidoc | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 4dbf11907..0daeee5d7 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,28 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.12.2 + +[discrete] +==== Fixes + +[discrete] +===== Upgrade transport to 8.4.1 https://github.com/elastic/elasticsearch-js/pull/2137[#2137] + +Upgrades `@elastic/transport` to 8.4.1 to resolve https://github.com/elastic/elastic-transport-js/pull/83[a bug] where arrays in error diagnostics were unintentionally transformed into objects. + +[discrete] +=== 8.12.1 + +[discrete] +==== Fixes + +[discrete] +===== Fix hang in bulk helper semaphore https://github.com/elastic/elasticsearch-js/pull/2027[#2027] + +The failing state could be reached when a server's response times are slower than flushInterval. + [discrete] === 8.12.0 From 352f73e7c2d91baffd05a92e98a8ba30cbbd93a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 28 Feb 2024 18:54:15 +0100 Subject: [PATCH 297/647] [DOCS] Adds compatibility matrix to docs and readme (#2136) Co-authored-by: Josh Mock --- README.md | 19 +++++++++++++++++++ docs/installation.asciidoc | 17 ++++++++++++++--- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5d34cfa98..fb0d45a32 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,25 @@ of the getting started documentation. Refer to the [Connecting section](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_connecting) of the getting started documentation. +## Compatibility + +The Elasticsearch client is compatible with currently maintained JS versions. + +Language clients are forward compatible; meaning that clients support +communicating with greater or equal minor versions of Elasticsearch without +breaking. It does not mean that the client automatically supports new features +of newer Elasticsearch versions; it is only possible after a release of a new +client version. For example, a 8.12 client version won't automatically support +the new features of the 8.13 version of Elasticsearch, the 8.13 client version +is required for that. Elasticsearch language clients are only backwards +compatible with default distributions and without guarantees made. + +| Elasticsearch Version | Elasticsearch-JS Branch | Supported | +| --------------------- | ------------------------ | --------- | +| main | main | | +| 8.x | 8.x | 8.x | +| 7.x | 7.x | 7.17 | + ## Usage * [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_creating_an_index) diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc index 4fe1f78ab..cd36cf3c2 100644 --- a/docs/installation.asciidoc +++ b/docs/installation.asciidoc @@ -74,25 +74,36 @@ of `^7.10.0`). [[js-compatibility-matrix]] === Compatibility matrix -Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch. -Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made. +Language clients are forward compatible; meaning that clients support +communicating with greater or equal minor versions of {es} without breaking. It +does not mean that the client automatically supports new features of newer {es} +versions; it is only possible after a release of a new client version. For +example, a 8.12 client version won't automatically support the new features of +the 8.13 version of {es}, the 8.13 client version is required for that. +{es} language clients are only backwards compatible with default distributions +and without guarantees made. -[%header,cols=2*] +[%header,cols=3*] |=== |{es} Version |Client Version +|Supported +|`8.x` |`8.x` |`8.x` |`7.x` |`7.x` +|`7.17` |`6.x` |`6.x` +| |`5.x` |`5.x` +| |=== From abd15eb111fe1d3d274f51125a93e846e689c979 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 5 Mar 2024 14:51:24 -0500 Subject: [PATCH 298/647] Auto-generated code for main (#2127) --- docs/reference.asciidoc | 292 +++++++++++++++++++++++++----- src/api/api/eql.ts | 4 +- src/api/api/esql.ts | 83 +++++++++ src/api/api/fleet.ts | 75 ++++++++ src/api/api/indices.ts | 46 ++++- src/api/api/inference.ts | 194 ++++++++++++++++++++ src/api/api/search_application.ts | 2 +- src/api/api/security.ts | 56 +++++- src/api/api/text_structure.ts | 38 ++++ src/api/api/update_by_query.ts | 2 +- src/api/index.ts | 16 ++ src/api/types.ts | 264 +++++++++++++++++++++++---- src/api/typesWithBodyKey.ts | 274 ++++++++++++++++++++++++---- 13 files changed, 1202 insertions(+), 144 deletions(-) create mode 100644 src/api/api/esql.ts create mode 100644 src/api/api/inference.ts diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 9e5291cbb..05f9e1c04 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -101,7 +101,7 @@ client.count({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. @@ -204,7 +204,7 @@ client.deleteByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to delete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. @@ -360,7 +360,7 @@ client.explain({ id, index }) ** *`id` (string)*: Defines the document ID. ** *`index` (string)*: Index names used to limit the request. Only a single index name can be provided to this parameter. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. ** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. @@ -391,7 +391,7 @@ client.fieldCaps({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. ** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. ** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, @@ -402,6 +402,7 @@ targeting `foo*,bar*` returns an error if an index starts with foo but no index ** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. ** *`filters` (Optional, string)*: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent ** *`types` (Optional, string[])*: Only return results for fields that have one of the types in the list +** *`include_empty_fields` (Optional, boolean)*: If false, empty fields are not included in the response. [discrete] === get @@ -585,7 +586,7 @@ parameter defaults to false. You can pass _source: true to return both source fi and stored fields in the search response. ** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. ** *`routing` (Optional, string)*: A list of specific routing values @@ -917,16 +918,16 @@ If `false`, the response does not include the total number of hits matching the ** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. ** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity }[])*: Defines the approximate kNN search to run. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. ** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. ** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. ** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { query, window_size } | { query, window_size }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. @@ -1076,7 +1077,7 @@ don’t include the aggs layer. each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. @@ -1179,7 +1180,7 @@ client.termsEnum({ index, field }) ** *`size` (Optional, number)*: How many matching terms to return. ** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. ** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. ** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. ** *`search_after` (Optional, string)* @@ -1260,7 +1261,8 @@ Set to 'all' or any positive integer up to the total number of shards in the ind [discrete] === update_by_query -Performs an update on every document in the index without changing the source, +Updates documents that match the specified query. If no query is specified, + performs an update on every document in the index without changing the source, for example to pick up a mapping change. {ref}/docs-update-by-query.html[Endpoint documentation] @@ -1276,7 +1278,7 @@ client.updateByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. ** *`script` (Optional, { lang, options, source } | { id })*: The script to run to update the document source or metadata when updating. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. @@ -1429,12 +1431,12 @@ Defaults to 10,000 hits. ** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. ** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity }[])*: Defines the approximate kNN search to run. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -2783,7 +2785,7 @@ client.eql.search({ index, query }) ** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order ** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" ** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* @@ -2795,6 +2797,30 @@ client.eql.search({ index, query }) ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* ** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +[discrete] +=== esql +[discrete] +==== query +Executes an ESQL request + +{ref}/esql-rest.html[Endpoint documentation] +[source,ts] +---- +client.esql.query({ query }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +** *`locale` (Optional, string)* +** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml. +** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. + [discrete] === features [discrete] @@ -2907,9 +2933,9 @@ Defaults to 10,000 hits. names matching these patterns in the hits.fields property of the response. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -2998,7 +3024,7 @@ client.graph.explore({ index }) ** *`index` (string | string[])*: Name of the index. ** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. ** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. ** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. @@ -3245,7 +3271,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th ** *`field` (Optional, string)*: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. +** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. ** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. ** *`text` (Optional, string | string[])*: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. @@ -3355,7 +3381,7 @@ client.indices.create({ index }) - Field names - Field data types - Mapping parameters -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. @@ -3497,7 +3523,7 @@ client.indices.deleteDataStream({ name }) ==== delete_index_template Deletes an index template. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-delete-template.html[Endpoint documentation] [source,ts] ---- client.indices.deleteIndexTemplate({ name }) @@ -3515,7 +3541,7 @@ client.indices.deleteIndexTemplate({ name }) ==== delete_template Deletes an index template. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-delete-template-v1.html[Endpoint documentation] [source,ts] ---- client.indices.deleteTemplate({ name }) @@ -3634,7 +3660,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ==== exists_index_template Returns information about whether a particular index template exists. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/index-templates.html[Endpoint documentation] [source,ts] ---- client.indices.existsIndexTemplate({ name }) @@ -3651,7 +3677,7 @@ client.indices.existsIndexTemplate({ name }) ==== exists_template Returns information about whether a particular index template exists. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-template-exists-v1.html[Endpoint documentation] [source,ts] ---- client.indices.existsTemplate({ name }) @@ -3897,7 +3923,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ==== get_index_template Returns an index template. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-get-template.html[Endpoint documentation] [source,ts] ---- client.indices.getIndexTemplate({ ... }) @@ -3980,7 +4006,7 @@ error. ==== get_template Returns an index template. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-get-template-v1.html[Endpoint documentation] [source,ts] ---- client.indices.getTemplate({ ... }) @@ -4099,7 +4125,7 @@ Wildcard patterns that match both data streams and indices return an error. ** *`name` (string)*: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query used to limit documents the alias can access. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. ** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. @@ -4152,7 +4178,7 @@ If no response is received before the timeout expires, the request fails and ret ==== put_index_template Creates or updates an index template. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-put-template.html[Endpoint documentation] [source,ts] ---- client.indices.putIndexTemplate({ name }) @@ -4208,7 +4234,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -4246,7 +4272,7 @@ client.indices.putSettings({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For @@ -4269,7 +4295,7 @@ error. ==== put_template Creates or updates an index template. -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-templates-v1.html[Endpoint documentation] [source,ts] ---- client.indices.putTemplate({ name }) @@ -4363,6 +4389,32 @@ client.indices.reloadSearchAnalyzers({ index }) ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +[discrete] +==== resolve_cluster +Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. + +{ref}/indices-resolve-cluster-api.html[Endpoint documentation] +[source,ts] +---- +client.indices.resolveCluster({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing +or closed indices. This behavior applies even if the request targets other open indices. For example, a request +targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. +** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. Defaults to false. + [discrete] ==== resolve_index Returns information about any matching indices, aliases, and data streams @@ -4501,7 +4553,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind ==== simulate_index_template Simulate matching the given index name against the index templates in the system -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-simulate-index.html[Endpoint documentation] [source,ts] ---- client.indices.simulateIndexTemplate({ name }) @@ -4545,7 +4597,7 @@ before the timeout expires, the request fails and returns an error. ==== simulate_template Simulate resolving the given template name or body -{ref}/indices-templates.html[Endpoint documentation] +{ref}/indices-simulate-template.html[Endpoint documentation] [source,ts] ---- client.indices.simulateTemplate({ ... }) @@ -4681,7 +4733,7 @@ client.indices.validateQuery({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Query in the Lucene query string syntax. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. @@ -4701,6 +4753,80 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`rewrite` (Optional, boolean)*: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. ** *`q` (Optional, string)*: Query in the Lucene query string syntax. +[discrete] +=== inference +[discrete] +==== delete_model +Delete model in the Inference API + +{ref}/delete-inference-api.html[Endpoint documentation] +[source,ts] +---- +client.inference.deleteModel({ inference_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type + +[discrete] +==== get_model +Get a model in the Inference API + +{ref}/get-inference-api.html[Endpoint documentation] +[source,ts] +---- +client.inference.getModel({ inference_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type + +[discrete] +==== inference +Perform inference on a model + +{ref}/post-inference-api.html[Endpoint documentation] +[source,ts] +---- +client.inference.inference({ inference_id, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`input` (string | string[])*: Text input to the model. +Either a string or an array of strings. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type +** *`task_settings` (Optional, User-defined value)*: Optional task settings + +[discrete] +==== put_model +Configure a model for use in the Inference API + +{ref}/put-inference-api.html[Endpoint documentation] +[source,ts] +---- +client.inference.putModel({ inference_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type +** *`model_config` (Optional, { service, service_settings, task_settings })* + [discrete] === ingest [discrete] @@ -4784,8 +4910,8 @@ client.ingest.putPipeline({ id }) ** *`id` (string)*: ID of the ingest pipeline to create or update. ** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. ** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`on_failure` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, reroute, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, reroute, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. ** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -4808,7 +4934,7 @@ client.ingest.simulate({ ... }) ** *`id` (Optional, string)*: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. ** *`docs` (Optional, { _id, _index, _source }[])*: Sample documents to test in the pipeline. -** *`pipeline` (Optional, { description, on_failure, processors, version })*: Pipeline to test. +** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. ** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. @@ -4966,7 +5092,7 @@ client.logstash.putPipeline({ id }) * *Request (object):* ** *`id` (string)*: Identifier for the pipeline. -** *`pipeline` (Optional, { description, on_failure, processors, version })* +** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })* [discrete] === migration @@ -5317,7 +5443,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) * *Request (object):* ** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. ** *`index` (string)*: Defines the `index` in which the evaluation will be performed. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. [discrete] ==== explain_data_frame_analytics @@ -6201,7 +6327,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. ** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might @@ -6654,7 +6780,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is @@ -7165,7 +7291,7 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. ** *`aggregations` (Optional, Record)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Specifies a DSL query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -7212,7 +7338,7 @@ If set to `false`, the API returns immediately and the indexer is stopped asynch ==== delete Deletes a search application. -{ref}/put-search-application.html[Endpoint documentation] +{ref}/delete-search-application.html[Endpoint documentation] [source,ts] ---- client.searchApplication.delete({ name }) @@ -7596,6 +7722,17 @@ client.security.createApiKey({ ... }) ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +[discrete] +==== create_cross_cluster_api_key +Creates a cross-cluster API key for API key based remote cluster access. + +{ref}/security-api-create-cross-cluster-api-key.html[Endpoint documentation] +[source,ts] +---- +client.security.createCrossClusterApiKey() +---- + + [discrete] ==== create_service_token Creates a service account token for access without requiring basic authentication. @@ -7788,6 +7925,7 @@ This parameter cannot be used with either `id` or `name` or when `owner` flag is associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. +** *`active_only` (Optional, boolean)*: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. [discrete] ==== get_builtin_privileges @@ -7883,6 +8021,17 @@ client.security.getServiceCredentials({ namespace, service }) ** *`namespace` (string)*: Name of the namespace. ** *`service` (string)*: Name of the service name. +[discrete] +==== get_settings +Retrieve settings for the security system indices + +{ref}/security-api-get-settings.html[Endpoint documentation] +[source,ts] +---- +client.security.getSettings() +---- + + [discrete] ==== get_token Creates a bearer token for access without requiring basic authentication. @@ -8121,6 +8270,7 @@ client.security.putRoleMapping({ name }) ** *`enabled` (Optional, boolean)* ** *`metadata` (Optional, Record)* ** *`roles` (Optional, string[])* +** *`role_templates` (Optional, { format, template }[])* ** *`rules` (Optional, { any, all, field, except })* ** *`run_as` (Optional, string[])* ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -8163,7 +8313,7 @@ client.security.queryApiKeys({ ... }) ==== Arguments * *Request (object):* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: A query to filter which API keys to return. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query to filter which API keys to return. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `ids`, `prefix`, `wildcard`, and `range`. You can query all public information associated with an API key. ** *`from` (Optional, number)*: Starting document offset. @@ -8310,6 +8460,29 @@ client.security.updateApiKey({ id }) ** *`id` (string)*: The ID of the API key to update. ** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. + +[discrete] +==== update_cross_cluster_api_key +Updates attributes of an existing cross-cluster API key. + +{ref}/security-api-update-cross-cluster-api-key.html[Endpoint documentation] +[source,ts] +---- +client.security.updateCrossClusterApiKey() +---- + + +[discrete] +==== update_settings +Update settings for the security system index + +{ref}/security-api-update-settings.html[Endpoint documentation] +[source,ts] +---- +client.security.updateSettings() +---- + [discrete] === slm @@ -8644,7 +8817,7 @@ client.snapshot.restore({ repository, snapshot }) ** *`ignore_unavailable` (Optional, boolean)* ** *`include_aliases` (Optional, boolean)* ** *`include_global_state` (Optional, boolean)* -** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, shards, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* +** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* ** *`indices` (Optional, string | string[])* ** *`partial` (Optional, boolean)* ** *`rename_pattern` (Optional, string)* @@ -8782,7 +8955,7 @@ client.sql.query({ ... }) If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`query` (Optional, string)*: SQL query to run. ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. ** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. @@ -8813,7 +8986,7 @@ client.sql.translate({ query }) * *Request (object):* ** *`query` (string)*: SQL query to run. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. [discrete] @@ -8949,7 +9122,7 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) * *Request (object):* ** *`set_id` (string)*: The id of the synonym set to be updated with the synonym rule ** *`rule_id` (string)*: The id of the synonym rule to be updated or created -** *`synonyms` (string[])* +** *`synonyms` (string)* [discrete] === tasks @@ -9035,6 +9208,7 @@ client.textStructure.findStructure({ ... }) ** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. ** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", etc. ** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). ** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. ** *`format` (Optional, string)*: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. ** *`grok_pattern` (Optional, string)*: If you have set format to semi_structured_text, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the timestamp_field parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If grok_pattern is not specified, the structure finder creates a Grok pattern. @@ -9047,6 +9221,24 @@ client.textStructure.findStructure({ ... }) ** *`timestamp_field` (Optional, string)*: Optional parameter to specify the timestamp field in the file ** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +[discrete] +==== test_grok_pattern +Tests a Grok pattern on some text. + +{ref}/test-grok-pattern.html[Endpoint documentation] +[source,ts] +---- +client.textStructure.testGrokPattern({ grok_pattern, text }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`grok_pattern` (string)*: Grok pattern to run on the text. +** *`text` (string[])*: Lines of text to run the Grok pattern on. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). + [discrete] === transform [discrete] @@ -9503,7 +9695,7 @@ client.watcher.queryWatches({ ... }) * *Request (object):* ** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. ** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, wildcard, wrapper, type })*: Optional, query filter watches to be returned. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index d299744da..3846df925 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -71,7 +71,7 @@ export default class Eql { /** * Returns async results from previously executed Event Query Language (EQL) search - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-search-api.html | Elasticsearch API documentation} */ async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -97,7 +97,7 @@ export default class Eql { /** * Returns the status of a previously submitted async or stored Event Query Language (EQL) search - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-status-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-status-api.html | Elasticsearch API documentation} */ async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts new file mode 100644 index 000000000..4d211a14c --- /dev/null +++ b/src/api/api/esql.ts @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Esql { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Executes an ESQL request + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-rest.html | Elasticsearch API documentation} + */ + async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'query'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_query' + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 4730068b6..965a926bb 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -43,6 +43,56 @@ export default class Fleet { this.transport = transport } + /** + * Deletes a secret stored by Fleet. + */ + async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_fleet/secret/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Retrieves a secret stored by Fleet. + */ + async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_fleet/secret/${encodeURIComponent(params.id.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + /** * Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-global-checkpoints.html | Elasticsearch API documentation} @@ -106,6 +156,31 @@ export default class Fleet { return await this.transport.request({ path, method, querystring, bulkBody: body }, options) } + /** + * Creates a secret stored by Fleet. + */ + async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_fleet/secret' + return await this.transport.request({ path, method, querystring, body }, options) + } + /** * Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. */ diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 078b1c52d..128f0a5bb 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -424,7 +424,7 @@ export default class Indices { /** * Deletes an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template.html | Elasticsearch API documentation} */ async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -450,7 +450,7 @@ export default class Indices { /** * Deletes an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -592,7 +592,7 @@ export default class Indices { /** * Returns information about whether a particular index template exists. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-templates.html | Elasticsearch API documentation} */ async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -618,7 +618,7 @@ export default class Indices { /** * Returns information about whether a particular index template exists. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html | Elasticsearch API documentation} */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -923,7 +923,7 @@ export default class Indices { /** * Returns an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html | Elasticsearch API documentation} */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1031,7 +1031,7 @@ export default class Indices { /** * Returns an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1264,7 +1264,7 @@ export default class Indices { /** * Creates or updates an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html | Elasticsearch API documentation} */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1378,7 +1378,7 @@ export default class Indices { /** * Creates or updates an index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1508,6 +1508,32 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html | Elasticsearch API documentation} + */ + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_resolve/cluster/${encodeURIComponent(params.name.toString())}` + return await this.transport.request({ path, method, querystring, body }, options) + } + /** * Returns information about any matching indices, aliases, and data streams * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html | Elasticsearch API documentation} @@ -1687,7 +1713,7 @@ export default class Indices { /** * Simulate matching the given index name against the index templates in the system - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html | Elasticsearch API documentation} */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1725,7 +1751,7 @@ export default class Indices { /** * Simulate resolving the given template name or body - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html | Elasticsearch API documentation} */ async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts new file mode 100644 index 000000000..e02487067 --- /dev/null +++ b/src/api/api/inference.ts @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Inference { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Delete model in the Inference API + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html | Elasticsearch API documentation} + */ + async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptions): Promise + async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_type', 'inference_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'DELETE' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'DELETE' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Get a model in the Inference API + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html | Elasticsearch API documentation} + */ + async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptions): Promise + async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_type', 'inference_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'GET' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'GET' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Perform inference on a model + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html | Elasticsearch API documentation} + */ + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise + async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_type', 'inference_id'] + const acceptedBody: string[] = ['input', 'task_settings'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } + + /** + * Configure a model for use in the Inference API + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html | Elasticsearch API documentation} + */ + async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptions): Promise + async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_type', 'inference_id'] + const acceptedBody: string[] = ['model_config'] + const querystring: Record = {} + // @ts-expect-error + let body: any = params.body ?? undefined + + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'PUT' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'PUT' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + return await this.transport.request({ path, method, querystring, body }, options) + } +} diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index e825279f0..c4e729d37 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -45,7 +45,7 @@ export default class SearchApplication { /** * Deletes a search application. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-search-application.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index c957d8c11..7668d9ee2 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -918,6 +918,32 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Retrieve settings for the security system indices + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-settings.html | Elasticsearch API documentation} + */ + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/settings' + return await this.transport.request({ path, method, querystring, body }, options) + } + /** * Creates a bearer token for access without requiring basic authentication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-token.html | Elasticsearch API documentation} @@ -1400,7 +1426,7 @@ export default class Security { async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'rules', 'run_as'] + const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1771,7 +1797,7 @@ export default class Security { async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['role_descriptors', 'metadata'] + const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1826,6 +1852,32 @@ export default class Security { return await this.transport.request({ path, method, querystring, body }, options) } + /** + * Update settings for the security system index + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-settings.html | Elasticsearch API documentation} + */ + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = '/_security/settings' + return await this.transport.request({ path, method, querystring, body }, options) + } + /** * Update application specific data for the user profile of the given unique ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html | Elasticsearch API documentation} diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index ca44736c7..ba75e1c39 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -73,4 +73,42 @@ export default class TextStructure { const path = '/_text_structure/find_structure' return await this.transport.request({ path, method, querystring, bulkBody: body }, options) } + + /** + * Tests a Grok pattern on some text. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/test-grok-pattern.html | Elasticsearch API documentation} + */ + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['grok_pattern', 'text'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_text_structure/test_grok_pattern' + return await this.transport.request({ path, method, querystring, body }, options) + } } diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 5549386e8..79cbb8cc2 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -38,7 +38,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Performs an update on every document in the index without changing the source, for example to pick up a mapping change. + * Updates documents that match the specified query. If no query is specified, performs an update on every document in the index without changing the source, for example to pick up a mapping change. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/index.ts b/src/api/index.ts index ac64d4a36..286a59bb7 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -43,6 +43,7 @@ import deleteByQueryRethrottleApi from './api/delete_by_query_rethrottle' import deleteScriptApi from './api/delete_script' import EnrichApi from './api/enrich' import EqlApi from './api/eql' +import EsqlApi from './api/esql' import existsApi from './api/exists' import existsSourceApi from './api/exists_source' import explainApi from './api/explain' @@ -59,6 +60,7 @@ import healthReportApi from './api/health_report' import IlmApi from './api/ilm' import indexApi from './api/index' import IndicesApi from './api/indices' +import InferenceApi from './api/inference' import infoApi from './api/info' import IngestApi from './api/ingest' import knnSearchApi from './api/knn_search' @@ -126,6 +128,7 @@ export default interface API { deleteScript: typeof deleteScriptApi enrich: EnrichApi eql: EqlApi + esql: EsqlApi exists: typeof existsApi existsSource: typeof existsSourceApi explain: typeof explainApi @@ -142,6 +145,7 @@ export default interface API { ilm: IlmApi index: typeof indexApi indices: IndicesApi + inference: InferenceApi info: typeof infoApi ingest: IngestApi knnSearch: typeof knnSearchApi @@ -199,11 +203,13 @@ const kCluster = Symbol('Cluster') const kDanglingIndices = Symbol('DanglingIndices') const kEnrich = Symbol('Enrich') const kEql = Symbol('Eql') +const kEsql = Symbol('Esql') const kFeatures = Symbol('Features') const kFleet = Symbol('Fleet') const kGraph = Symbol('Graph') const kIlm = Symbol('Ilm') const kIndices = Symbol('Indices') +const kInference = Symbol('Inference') const kIngest = Symbol('Ingest') const kLicense = Symbol('License') const kLogstash = Symbol('Logstash') @@ -237,11 +243,13 @@ export default class API { [kDanglingIndices]: symbol | null [kEnrich]: symbol | null [kEql]: symbol | null + [kEsql]: symbol | null [kFeatures]: symbol | null [kFleet]: symbol | null [kGraph]: symbol | null [kIlm]: symbol | null [kIndices]: symbol | null + [kInference]: symbol | null [kIngest]: symbol | null [kLicense]: symbol | null [kLogstash]: symbol | null @@ -274,11 +282,13 @@ export default class API { this[kDanglingIndices] = null this[kEnrich] = null this[kEql] = null + this[kEsql] = null this[kFeatures] = null this[kFleet] = null this[kGraph] = null this[kIlm] = null this[kIndices] = null + this[kInference] = null this[kIngest] = null this[kLicense] = null this[kLogstash] = null @@ -375,6 +385,9 @@ Object.defineProperties(API.prototype, { eql: { get () { return this[kEql] === null ? (this[kEql] = new EqlApi(this.transport)) : this[kEql] } }, + esql: { + get () { return this[kEsql] === null ? (this[kEsql] = new EsqlApi(this.transport)) : this[kEsql] } + }, features: { get () { return this[kFeatures] === null ? (this[kFeatures] = new FeaturesApi(this.transport)) : this[kFeatures] } }, @@ -390,6 +403,9 @@ Object.defineProperties(API.prototype, { indices: { get () { return this[kIndices] === null ? (this[kIndices] = new IndicesApi(this.transport)) : this[kIndices] } }, + inference: { + get () { return this[kInference] === null ? (this[kInference] = new InferenceApi(this.transport)) : this[kInference] } + }, ingest: { get () { return this[kIngest] === null ? (this[kIngest] = new IngestApi(this.transport)) : this[kIngest] } }, diff --git a/src/api/types.ts b/src/api/types.ts index 707e1cc13..37a0fa4c4 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -347,6 +347,7 @@ export interface FieldCapsRequest extends RequestBase { include_unmapped?: boolean filters?: string types?: string[] + include_empty_fields?: boolean fields?: Fields index_filter?: QueryDslQueryContainer runtime_mappings?: MappingRuntimeFields @@ -1146,6 +1147,7 @@ export interface SearchRequest extends RequestBase { _source_includes?: Fields q?: string aggregations?: Record + /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse explain?: boolean @@ -2073,6 +2075,13 @@ export interface ElasticsearchVersionInfo { number: string } +export interface ElasticsearchVersionMinInfo { + build_flavor: string + minimum_index_compatibility_version: VersionString + minimum_wire_compatibility_version: VersionString + number: string +} + export interface EmptyObject { } @@ -2094,6 +2103,8 @@ export interface ErrorResponseBase { status: integer } +export type EsqlColumns = ArrayBuffer + export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] @@ -2275,6 +2286,7 @@ export interface KnnQuery { boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float + inner_hits?: SearchInnerHits } export interface LatLonGeoLocation { @@ -2286,8 +2298,6 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' -export type ManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' - export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -2467,6 +2477,8 @@ export interface RrfRank { window_size?: long } +export type ScalarValue = long | double | string | boolean | null + export interface ScoreSort { order?: SortOrder } @@ -4508,11 +4520,11 @@ export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNor export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' - buffer_size: SpecUtilsStringified - delimiter: string - replacement: string - reverse: SpecUtilsStringified - skip: SpecUtilsStringified + buffer_size?: SpecUtilsStringified + delimiter?: string + replacement?: string + reverse?: SpecUtilsStringified + skip?: SpecUtilsStringified } export interface AnalysisPatternAnalyzer { @@ -4688,7 +4700,7 @@ export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export type AnalysisTokenizer = string | AnalysisTokenizerDefinition @@ -5096,7 +5108,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty export interface MappingPropertyBase { meta?: Record @@ -5190,6 +5202,10 @@ export interface MappingSourceField { export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' +export interface MappingSparseVectorProperty extends MappingPropertyBase { + type: 'sparse_vector' +} + export interface MappingSuggestContext { name: Name path?: Field @@ -5432,9 +5448,10 @@ export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { distance: Distance distance_type?: GeoDistanceType validation_method?: QueryDslGeoValidationMethod + ignore_unmapped?: boolean } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys -& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } +& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | boolean | float | string } export type QueryDslGeoExecution = 'memory' | 'indexed' @@ -5746,6 +5763,7 @@ export interface QueryDslQueryContainer { has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery intervals?: Partial> + knn?: KnnQuery match?: Partial> match_all?: QueryDslMatchAllQuery match_bool_prefix?: Partial> @@ -5781,6 +5799,7 @@ export interface QueryDslQueryContainer { terms?: QueryDslTermsQuery terms_set?: Partial> text_expansion?: Partial> + weighted_tokens?: Partial> wildcard?: Partial> wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery @@ -6003,14 +6022,26 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { model_id: string model_text: string + pruning_config?: QueryDslTokenPruningConfig } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' +export interface QueryDslTokenPruningConfig { + tokens_freq_ratio_threshold?: integer + tokens_weight_threshold?: float + only_score_pruned_tokens?: boolean +} + export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } +export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { + tokens: Record + pruning_config?: QueryDslTokenPruningConfig +} + export interface QueryDslWildcardQuery extends QueryDslQueryBase { case_insensitive?: boolean rewrite?: MultiTermQueryRewrite @@ -6119,6 +6150,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { _source_includes?: Fields q?: string aggregations?: Record + /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse explain?: boolean @@ -8952,7 +8984,7 @@ export interface EnrichPolicy { enrich_fields: Fields indices: Indices match_field: Field - query?: string + query?: QueryDslQueryContainer name?: Name elasticsearch_version?: string } @@ -9111,6 +9143,18 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase + /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse explain?: boolean @@ -9525,7 +9570,7 @@ export interface IndicesDataStream { generation: integer hidden: boolean ilm_policy?: Name - next_generation_managed_by: ManagedBy + next_generation_managed_by: IndicesManagedBy prefer_ilm: boolean indices: IndicesDataStreamIndex[] lifecycle?: IndicesDataStreamLifecycleWithRollover @@ -9541,7 +9586,7 @@ export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid ilm_policy?: Name - managed_by: ManagedBy + managed_by: IndicesManagedBy prefer_ilm: boolean } @@ -9697,7 +9742,6 @@ export interface IndicesIndexSettingsKeys { analysis?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries - shards?: integer queries?: IndicesQueries similarity?: IndicesSettingsSimilarity mapping?: IndicesMappingLimitSettings @@ -9790,6 +9834,8 @@ export interface IndicesIndexingSlowlogTresholds { index?: IndicesSlowlogTresholdLevels } +export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' + export interface IndicesMappingLimitSettings { coerce?: boolean total_fields?: IndicesMappingLimitSettingsTotalFields @@ -10791,6 +10837,24 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult +export interface IndicesResolveClusterRequest extends RequestBase { + name: Names + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean +} + +export interface IndicesResolveClusterResolveClusterInfo { + connected: boolean + skip_unavailable: boolean + matching_indices?: boolean + error?: string + version?: ElasticsearchVersionMinInfo +} + +export type IndicesResolveClusterResponse = Record + export interface IndicesResolveIndexRequest extends RequestBase { name: Names expand_wildcards?: ExpandWildcards @@ -11263,6 +11327,80 @@ export interface IndicesValidateQueryResponse { error?: string } +export type InferenceDenseByteVector = byte[] + +export type InferenceDenseVector = float[] + +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] +} + +export interface InferenceModelConfig { + service: string + service_settings: InferenceServiceSettings + task_settings: InferenceTaskSettings +} + +export interface InferenceModelConfigContainer extends InferenceModelConfig { + model_id: string + task_type: InferenceTaskType +} + +export type InferenceServiceSettings = any + +export interface InferenceSparseEmbeddingResult { + embedding: InferenceSparseVector +} + +export type InferenceSparseVector = Record + +export type InferenceTaskSettings = any + +export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' + +export interface InferenceTextEmbeddingByteResult { + embedding: InferenceDenseByteVector +} + +export interface InferenceTextEmbeddingResult { + embedding: InferenceDenseVector +} + +export interface InferenceDeleteModelRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id +} + +export type InferenceDeleteModelResponse = AcknowledgedResponseBase + +export interface InferenceGetModelRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id +} + +export interface InferenceGetModelResponse { + models: InferenceModelConfigContainer[] +} + +export interface InferenceInferenceRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id + input: string | string[] + task_settings?: InferenceTaskSettings +} + +export type InferenceInferenceResponse = InferenceInferenceResult + +export interface InferencePutModelRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id + model_config?: InferenceModelConfig +} + +export type InferencePutModelResponse = InferenceModelConfigContainer + export interface IngestAppendProcessor extends IngestProcessorBase { field: Field value: any[] @@ -11276,6 +11414,7 @@ export interface IngestAttachmentProcessor extends IngestProcessorBase { indexed_chars_field?: Field properties?: string[] target_field?: Field + remove_binary?: boolean resource_name?: string } @@ -11456,6 +11595,7 @@ export interface IngestPipeline { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber + _meta: Metadata } export interface IngestPipelineConfig { @@ -11498,7 +11638,8 @@ export interface IngestProcessorContainer { lowercase?: IngestLowercaseProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor - script?: Script + reroute?: IngestRerouteProcessor + script?: IngestScriptProcessor set?: IngestSetProcessor sort?: IngestSortProcessor split?: IngestSplitProcessor @@ -11525,6 +11666,19 @@ export interface IngestRenameProcessor extends IngestProcessorBase { target_field: Field } +export interface IngestRerouteProcessor extends IngestProcessorBase { + destination?: string + dataset?: string | string[] + namespace?: string | string[] +} + +export interface IngestScriptProcessor extends IngestProcessorBase { + id?: Id + lang?: string + params?: Record + source?: string +} + export interface IngestSetProcessor extends IngestProcessorBase { copy_from?: Field field: Field @@ -12969,6 +13123,7 @@ export interface MlTrainedModelConfig { metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize location?: MlTrainedModelLocation + prefix_strings?: MlTrainedModelPrefixStrings } export interface MlTrainedModelConfigInput { @@ -13056,6 +13211,11 @@ export interface MlTrainedModelLocationIndex { name: IndexName } +export interface MlTrainedModelPrefixStrings { + ingest: string + search: string +} + export interface MlTrainedModelSizeStats { model_size_bytes: ByteSize required_native_memory_bytes: integer @@ -13851,6 +14011,7 @@ export interface MlPutDatafeedRequest extends RequestBase { delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration indices?: Indices + /** @alias indices */ indexes?: Indices indices_options?: IndicesOptions job_id?: Id @@ -14185,6 +14346,7 @@ export interface MlUpdateDatafeedRequest extends RequestBase { delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration indices?: string[] + /** @alias indices */ indexes?: string[] indices_options?: IndicesOptions job_id?: Id @@ -14874,9 +15036,13 @@ export interface NodesInfoNodeInfoClient { type: string } -export interface NodesInfoNodeInfoDiscover { - seed_hosts: string +export interface NodesInfoNodeInfoDiscoverKeys { + seed_hosts?: string[] + type?: string + seed_providers?: string[] } +export type NodesInfoNodeInfoDiscover = NodesInfoNodeInfoDiscoverKeys +& { [property: string]: any } export interface NodesInfoNodeInfoHttp { bound_address: string[] @@ -14942,9 +15108,9 @@ export interface NodesInfoNodeInfoOSCPU { } export interface NodesInfoNodeInfoPath { - logs: string - home: string - repo: string[] + logs?: string + home?: string + repo?: string[] data?: string[] } @@ -14972,7 +15138,7 @@ export interface NodesInfoNodeInfoSearchRemote { export interface NodesInfoNodeInfoSettings { cluster: NodesInfoNodeInfoSettingsCluster node: NodesInfoNodeInfoSettingsNode - path: NodesInfoNodeInfoPath + path?: NodesInfoNodeInfoPath repositories?: NodesInfoNodeInfoRepositories discovery?: NodesInfoNodeInfoDiscover action?: NodesInfoNodeInfoAction @@ -14991,7 +15157,7 @@ export interface NodesInfoNodeInfoSettingsCluster { name: Name routing?: IndicesIndexRouting election: NodesInfoNodeInfoSettingsClusterElection - initial_master_nodes?: string + initial_master_nodes?: string[] deprecation_indexing?: NodesInfoDeprecationIndexing } @@ -15454,6 +15620,7 @@ export interface RollupRollupSearchRequest extends RequestBase { rest_total_hits_as_int?: boolean typed_keys?: boolean aggregations?: Record + /** @alias aggregations */ aggs?: Record query?: QueryDslQueryContainer size?: integer @@ -15679,11 +15846,9 @@ export interface SecurityCreatedStatus { } export interface SecurityFieldRule { - username?: Name + username?: Names dn?: Names groups?: Names - metadata?: any - realm?: SecurityRealm } export interface SecurityFieldSecurity { @@ -15713,10 +15878,6 @@ export interface SecurityManageUserPrivileges { applications: string[] } -export interface SecurityRealm { - name: Name -} - export interface SecurityRealmInfo { name: Name type: string @@ -15749,7 +15910,7 @@ export interface SecurityRoleMapping { metadata: Metadata roles: string[] rules: SecurityRoleMappingRule - role_templates?: SecurityGetRoleRoleTemplate[] + role_templates?: SecurityRoleTemplate[] } export interface SecurityRoleMappingRule { @@ -15759,6 +15920,11 @@ export interface SecurityRoleMappingRule { except?: SecurityRoleMappingRule } +export interface SecurityRoleTemplate { + format?: SecurityTemplateFormat + template: Script +} + export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer export interface SecurityRoleTemplateInlineScript extends ScriptBase { @@ -15773,6 +15939,8 @@ export interface SecurityRoleTemplateQuery { export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | SecurityRoleTemplateInlineQuery | StoredScriptId +export type SecurityTemplateFormat = 'string' | 'json' + export interface SecurityTransientMetadataConfig { enabled: boolean } @@ -16063,6 +16231,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { realm_name?: Name username?: Username with_limited_by?: boolean + active_only?: boolean } export interface SecurityGetApiKeyResponse { @@ -16097,17 +16266,10 @@ export interface SecurityGetRoleRole { run_as: string[] transient_metadata: SecurityTransientMetadataConfig applications: SecurityApplicationPrivileges[] - role_templates?: SecurityGetRoleRoleTemplate[] + role_templates?: SecurityRoleTemplate[] global?: Record>> } -export interface SecurityGetRoleRoleTemplate { - format?: SecurityGetRoleTemplateFormat - template: Script -} - -export type SecurityGetRoleTemplateFormat = 'string' | 'json' - export interface SecurityGetRoleMappingRequest extends RequestBase { name?: Names } @@ -16366,6 +16528,7 @@ export interface SecurityPutRoleMappingRequest extends RequestBase { enabled?: boolean metadata?: Metadata roles?: string[] + role_templates?: SecurityRoleTemplate[] rules?: SecurityRoleMappingRule run_as?: string[] } @@ -16497,6 +16660,7 @@ export interface SecurityUpdateApiKeyRequest extends RequestBase { id: Id role_descriptors?: Record metadata?: Metadata + expiration?: Duration } export interface SecurityUpdateApiKeyResponse { @@ -17180,7 +17344,7 @@ export interface SynonymsPutSynonymResponse { export interface SynonymsPutSynonymRuleRequest extends RequestBase { set_id: Id rule_id: Id - synonyms: SynonymsSynonymString[] + synonyms: SynonymsSynonymString } export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult @@ -17278,6 +17442,7 @@ export interface TextStructureFindStructureRequest { charset?: string column_names?: string delimiter?: string + ecs_compatibility?: string explain?: boolean format?: string grok_pattern?: string @@ -17322,6 +17487,27 @@ export interface TextStructureFindStructureTopHit { value: any } +export interface TextStructureTestGrokPatternMatchedField { + match: string + offset: integer + length: integer +} + +export interface TextStructureTestGrokPatternMatchedText { + matched: boolean + fields?: Record +} + +export interface TextStructureTestGrokPatternRequest extends RequestBase { + ecs_compatibility?: string + grok_pattern: string + text: string[] +} + +export interface TextStructureTestGrokPatternResponse { + matches: TextStructureTestGrokPatternMatchedText[] +} + export interface TransformDestination { index?: IndexName pipeline?: string diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index dbbe3ea1a..c4f534b6e 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -365,6 +365,7 @@ export interface FieldCapsRequest extends RequestBase { include_unmapped?: boolean filters?: string types?: string[] + include_empty_fields?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { fields?: Fields @@ -1200,6 +1201,7 @@ export interface SearchRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record + /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse explain?: boolean @@ -2146,6 +2148,13 @@ export interface ElasticsearchVersionInfo { number: string } +export interface ElasticsearchVersionMinInfo { + build_flavor: string + minimum_index_compatibility_version: VersionString + minimum_wire_compatibility_version: VersionString + number: string +} + export interface EmptyObject { } @@ -2167,6 +2176,8 @@ export interface ErrorResponseBase { status: integer } +export type EsqlColumns = ArrayBuffer + export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] @@ -2348,6 +2359,7 @@ export interface KnnQuery { boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float + inner_hits?: SearchInnerHits } export interface LatLonGeoLocation { @@ -2359,8 +2371,6 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' -export type ManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' - export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -2540,6 +2550,8 @@ export interface RrfRank { window_size?: long } +export type ScalarValue = long | double | string | boolean | null + export interface ScoreSort { order?: SortOrder } @@ -4581,11 +4593,11 @@ export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNor export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' - buffer_size: SpecUtilsStringified - delimiter: string - replacement: string - reverse: SpecUtilsStringified - skip: SpecUtilsStringified + buffer_size?: SpecUtilsStringified + delimiter?: string + replacement?: string + reverse?: SpecUtilsStringified + skip?: SpecUtilsStringified } export interface AnalysisPatternAnalyzer { @@ -4761,7 +4773,7 @@ export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuTokenizer | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export type AnalysisTokenizer = string | AnalysisTokenizerDefinition @@ -5169,7 +5181,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty export interface MappingPropertyBase { meta?: Record @@ -5263,6 +5275,10 @@ export interface MappingSourceField { export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' +export interface MappingSparseVectorProperty extends MappingPropertyBase { + type: 'sparse_vector' +} + export interface MappingSuggestContext { name: Name path?: Field @@ -5505,9 +5521,10 @@ export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { distance: Distance distance_type?: GeoDistanceType validation_method?: QueryDslGeoValidationMethod + ignore_unmapped?: boolean } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys -& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | float | string } +& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | boolean | float | string } export type QueryDslGeoExecution = 'memory' | 'indexed' @@ -5819,6 +5836,7 @@ export interface QueryDslQueryContainer { has_parent?: QueryDslHasParentQuery ids?: QueryDslIdsQuery intervals?: Partial> + knn?: KnnQuery match?: Partial> match_all?: QueryDslMatchAllQuery match_bool_prefix?: Partial> @@ -5854,6 +5872,7 @@ export interface QueryDslQueryContainer { terms?: QueryDslTermsQuery terms_set?: Partial> text_expansion?: Partial> + weighted_tokens?: Partial> wildcard?: Partial> wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery @@ -6076,14 +6095,26 @@ export interface QueryDslTermsSetQuery extends QueryDslQueryBase { export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { model_id: string model_text: string + pruning_config?: QueryDslTokenPruningConfig } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' +export interface QueryDslTokenPruningConfig { + tokens_freq_ratio_threshold?: integer + tokens_weight_threshold?: float + only_score_pruned_tokens?: boolean +} + export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } +export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { + tokens: Record + pruning_config?: QueryDslTokenPruningConfig +} + export interface QueryDslWildcardQuery extends QueryDslQueryBase { case_insensitive?: boolean rewrite?: MultiTermQueryRewrite @@ -6194,6 +6225,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record + /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse explain?: boolean @@ -9053,7 +9085,7 @@ export interface EnrichPolicy { enrich_fields: Fields indices: Indices match_field: Field - query?: string + query?: QueryDslQueryContainer name?: Name elasticsearch_version?: string } @@ -9218,6 +9250,21 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase + /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse explain?: boolean @@ -9648,7 +9696,7 @@ export interface IndicesDataStream { generation: integer hidden: boolean ilm_policy?: Name - next_generation_managed_by: ManagedBy + next_generation_managed_by: IndicesManagedBy prefer_ilm: boolean indices: IndicesDataStreamIndex[] lifecycle?: IndicesDataStreamLifecycleWithRollover @@ -9664,7 +9712,7 @@ export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid ilm_policy?: Name - managed_by: ManagedBy + managed_by: IndicesManagedBy prefer_ilm: boolean } @@ -9820,7 +9868,6 @@ export interface IndicesIndexSettingsKeys { analysis?: IndicesIndexSettingsAnalysis settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries - shards?: integer queries?: IndicesQueries similarity?: IndicesSettingsSimilarity mapping?: IndicesMappingLimitSettings @@ -9913,6 +9960,8 @@ export interface IndicesIndexingSlowlogTresholds { index?: IndicesSlowlogTresholdLevels } +export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' + export interface IndicesMappingLimitSettings { coerce?: boolean total_fields?: IndicesMappingLimitSettingsTotalFields @@ -10943,6 +10992,24 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult +export interface IndicesResolveClusterRequest extends RequestBase { + name: Names + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean +} + +export interface IndicesResolveClusterResolveClusterInfo { + connected: boolean + skip_unavailable: boolean + matching_indices?: boolean + error?: string + version?: ElasticsearchVersionMinInfo +} + +export type IndicesResolveClusterResponse = Record + export interface IndicesResolveIndexRequest extends RequestBase { name: Names expand_wildcards?: ExpandWildcards @@ -11434,6 +11501,84 @@ export interface IndicesValidateQueryResponse { error?: string } +export type InferenceDenseByteVector = byte[] + +export type InferenceDenseVector = float[] + +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] +} + +export interface InferenceModelConfig { + service: string + service_settings: InferenceServiceSettings + task_settings: InferenceTaskSettings +} + +export interface InferenceModelConfigContainer extends InferenceModelConfig { + model_id: string + task_type: InferenceTaskType +} + +export type InferenceServiceSettings = any + +export interface InferenceSparseEmbeddingResult { + embedding: InferenceSparseVector +} + +export type InferenceSparseVector = Record + +export type InferenceTaskSettings = any + +export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' + +export interface InferenceTextEmbeddingByteResult { + embedding: InferenceDenseByteVector +} + +export interface InferenceTextEmbeddingResult { + embedding: InferenceDenseVector +} + +export interface InferenceDeleteModelRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id +} + +export type InferenceDeleteModelResponse = AcknowledgedResponseBase + +export interface InferenceGetModelRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id +} + +export interface InferenceGetModelResponse { + models: InferenceModelConfigContainer[] +} + +export interface InferenceInferenceRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + input: string | string[] + task_settings?: InferenceTaskSettings + } +} + +export type InferenceInferenceResponse = InferenceInferenceResult + +export interface InferencePutModelRequest extends RequestBase { + task_type?: InferenceTaskType + inference_id: Id + /** @deprecated The use of the 'body' key has been deprecated, use 'model_config' instead. */ + body?: InferenceModelConfig +} + +export type InferencePutModelResponse = InferenceModelConfigContainer + export interface IngestAppendProcessor extends IngestProcessorBase { field: Field value: any[] @@ -11447,6 +11592,7 @@ export interface IngestAttachmentProcessor extends IngestProcessorBase { indexed_chars_field?: Field properties?: string[] target_field?: Field + remove_binary?: boolean resource_name?: string } @@ -11627,6 +11773,7 @@ export interface IngestPipeline { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber + _meta: Metadata } export interface IngestPipelineConfig { @@ -11669,7 +11816,8 @@ export interface IngestProcessorContainer { lowercase?: IngestLowercaseProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor - script?: Script + reroute?: IngestRerouteProcessor + script?: IngestScriptProcessor set?: IngestSetProcessor sort?: IngestSortProcessor split?: IngestSplitProcessor @@ -11696,6 +11844,19 @@ export interface IngestRenameProcessor extends IngestProcessorBase { target_field: Field } +export interface IngestRerouteProcessor extends IngestProcessorBase { + destination?: string + dataset?: string | string[] + namespace?: string | string[] +} + +export interface IngestScriptProcessor extends IngestProcessorBase { + id?: Id + lang?: string + params?: Record + source?: string +} + export interface IngestSetProcessor extends IngestProcessorBase { copy_from?: Field field: Field @@ -13150,6 +13311,7 @@ export interface MlTrainedModelConfig { metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize location?: MlTrainedModelLocation + prefix_strings?: MlTrainedModelPrefixStrings } export interface MlTrainedModelConfigInput { @@ -13237,6 +13399,11 @@ export interface MlTrainedModelLocationIndex { name: IndexName } +export interface MlTrainedModelPrefixStrings { + ingest: string + search: string +} + export interface MlTrainedModelSizeStats { model_size_bytes: ByteSize required_native_memory_bytes: integer @@ -14098,6 +14265,7 @@ export interface MlPutDatafeedRequest extends RequestBase { delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration indices?: Indices + /** @alias indices */ indexes?: Indices indices_options?: IndicesOptions job_id?: Id @@ -14462,6 +14630,7 @@ export interface MlUpdateDatafeedRequest extends RequestBase { delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration indices?: string[] + /** @alias indices */ indexes?: string[] indices_options?: IndicesOptions job_id?: Id @@ -15166,9 +15335,13 @@ export interface NodesInfoNodeInfoClient { type: string } -export interface NodesInfoNodeInfoDiscover { - seed_hosts: string +export interface NodesInfoNodeInfoDiscoverKeys { + seed_hosts?: string[] + type?: string + seed_providers?: string[] } +export type NodesInfoNodeInfoDiscover = NodesInfoNodeInfoDiscoverKeys +& { [property: string]: any } export interface NodesInfoNodeInfoHttp { bound_address: string[] @@ -15234,9 +15407,9 @@ export interface NodesInfoNodeInfoOSCPU { } export interface NodesInfoNodeInfoPath { - logs: string - home: string - repo: string[] + logs?: string + home?: string + repo?: string[] data?: string[] } @@ -15264,7 +15437,7 @@ export interface NodesInfoNodeInfoSearchRemote { export interface NodesInfoNodeInfoSettings { cluster: NodesInfoNodeInfoSettingsCluster node: NodesInfoNodeInfoSettingsNode - path: NodesInfoNodeInfoPath + path?: NodesInfoNodeInfoPath repositories?: NodesInfoNodeInfoRepositories discovery?: NodesInfoNodeInfoDiscover action?: NodesInfoNodeInfoAction @@ -15283,7 +15456,7 @@ export interface NodesInfoNodeInfoSettingsCluster { name: Name routing?: IndicesIndexRouting election: NodesInfoNodeInfoSettingsClusterElection - initial_master_nodes?: string + initial_master_nodes?: string[] deprecation_indexing?: NodesInfoDeprecationIndexing } @@ -15757,6 +15930,7 @@ export interface RollupRollupSearchRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record + /** @alias aggregations */ aggs?: Record query?: QueryDslQueryContainer size?: integer @@ -15990,11 +16164,9 @@ export interface SecurityCreatedStatus { } export interface SecurityFieldRule { - username?: Name + username?: Names dn?: Names groups?: Names - metadata?: any - realm?: SecurityRealm } export interface SecurityFieldSecurity { @@ -16024,10 +16196,6 @@ export interface SecurityManageUserPrivileges { applications: string[] } -export interface SecurityRealm { - name: Name -} - export interface SecurityRealmInfo { name: Name type: string @@ -16060,7 +16228,7 @@ export interface SecurityRoleMapping { metadata: Metadata roles: string[] rules: SecurityRoleMappingRule - role_templates?: SecurityGetRoleRoleTemplate[] + role_templates?: SecurityRoleTemplate[] } export interface SecurityRoleMappingRule { @@ -16070,6 +16238,11 @@ export interface SecurityRoleMappingRule { except?: SecurityRoleMappingRule } +export interface SecurityRoleTemplate { + format?: SecurityTemplateFormat + template: Script +} + export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer export interface SecurityRoleTemplateInlineScript extends ScriptBase { @@ -16084,6 +16257,8 @@ export interface SecurityRoleTemplateQuery { export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | SecurityRoleTemplateInlineQuery | StoredScriptId +export type SecurityTemplateFormat = 'string' | 'json' + export interface SecurityTransientMetadataConfig { enabled: boolean } @@ -16383,6 +16558,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { realm_name?: Name username?: Username with_limited_by?: boolean + active_only?: boolean } export interface SecurityGetApiKeyResponse { @@ -16417,17 +16593,10 @@ export interface SecurityGetRoleRole { run_as: string[] transient_metadata: SecurityTransientMetadataConfig applications: SecurityApplicationPrivileges[] - role_templates?: SecurityGetRoleRoleTemplate[] + role_templates?: SecurityRoleTemplate[] global?: Record>> } -export interface SecurityGetRoleRoleTemplate { - format?: SecurityGetRoleTemplateFormat - template: Script -} - -export type SecurityGetRoleTemplateFormat = 'string' | 'json' - export interface SecurityGetRoleMappingRequest extends RequestBase { name?: Names } @@ -16710,6 +16879,7 @@ export interface SecurityPutRoleMappingRequest extends RequestBase { enabled?: boolean metadata?: Metadata roles?: string[] + role_templates?: SecurityRoleTemplate[] rules?: SecurityRoleMappingRule run_as?: string[] } @@ -16869,6 +17039,7 @@ export interface SecurityUpdateApiKeyRequest extends RequestBase { body?: { role_descriptors?: Record metadata?: Metadata + expiration?: Duration } } @@ -17588,7 +17759,7 @@ export interface SynonymsPutSynonymRuleRequest extends RequestBase { rule_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - synonyms: SynonymsSynonymString[] + synonyms: SynonymsSynonymString } } @@ -17687,6 +17858,7 @@ export interface TextStructureFindStructureRequest { charset?: string column_names?: string delimiter?: string + ecs_compatibility?: string explain?: boolean format?: string grok_pattern?: string @@ -17732,6 +17904,30 @@ export interface TextStructureFindStructureTopHit { value: any } +export interface TextStructureTestGrokPatternMatchedField { + match: string + offset: integer + length: integer +} + +export interface TextStructureTestGrokPatternMatchedText { + matched: boolean + fields?: Record +} + +export interface TextStructureTestGrokPatternRequest extends RequestBase { + ecs_compatibility?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + grok_pattern: string + text: string[] + } +} + +export interface TextStructureTestGrokPatternResponse { + matches: TextStructureTestGrokPatternMatchedText[] +} + export interface TransformDestination { index?: IndexName pipeline?: string From c2fb0a294fa711622e0fd35dcce38aedec8891b2 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 6 Mar 2024 13:11:52 -0500 Subject: [PATCH 299/647] Auto-generated code for main (#2150) --- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index 37a0fa4c4..0b4ecd5fe 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -11595,7 +11595,7 @@ export interface IngestPipeline { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber - _meta: Metadata + _meta?: Metadata } export interface IngestPipelineConfig { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index c4f534b6e..22041532c 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -11773,7 +11773,7 @@ export interface IngestPipeline { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber - _meta: Metadata + _meta?: Metadata } export interface IngestPipelineConfig { From fa33037b86f64384b933401236471b89b8ac8d1c Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 7 Mar 2024 20:44:52 +0100 Subject: [PATCH 300/647] Auto-generated code for main (#2152) --- docs/reference.asciidoc | 8 +-- src/api/api/snapshot.ts | 13 +--- src/api/types.ts | 125 +++++++++++++++++++++++++++------- src/api/typesWithBodyKey.ts | 131 +++++++++++++++++++++++++++--------- 4 files changed, 206 insertions(+), 71 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 05f9e1c04..4e74760a0 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -4234,7 +4234,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -8249,7 +8249,7 @@ client.security.putRole({ name }) ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. ** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. -** *`transient_metadata` (Optional, { enabled })*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. +** *`transient_metadata` (Optional, Record)*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -8685,7 +8685,7 @@ Creates a repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.snapshot.createRepository({ repository, type, settings }) +client.snapshot.createRepository({ repository }) ---- [discrete] @@ -8693,8 +8693,6 @@ client.snapshot.createRepository({ repository, type, settings }) * *Request (object):* ** *`repository` (string)*: A repository name -** *`type` (string)* -** *`settings` ({ chunk_size, compress, concurrent_streams, location, read_only })* ** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout ** *`verify` (Optional, boolean)*: Whether to verify the repository after creation diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index bdd5e9dbc..f6a5d3923 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -154,22 +154,15 @@ export default class Snapshot { async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['repository', 'type', 'settings'] + const acceptedBody: string[] = ['repository'] const querystring: Record = {} // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { diff --git a/src/api/types.ts b/src/api/types.ts index 0b4ecd5fe..94e2cd7aa 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1449,7 +1449,7 @@ export interface SearchInnerHits { fields?: Fields sort?: Sort _source?: SearchSourceConfig - stored_field?: Fields + stored_fields?: Fields track_scores?: boolean version?: boolean } @@ -2264,7 +2264,7 @@ export interface InlineGetKeys { _seq_no?: SequenceNumber _primary_term?: long _routing?: Routing - _source: TDocument + _source?: TDocument } export type InlineGet = InlineGetKeys & { [property: string]: any } @@ -4876,7 +4876,7 @@ export interface MappingDenseVectorIndexOptions { export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' - dims: integer + dims?: integer similarity?: string index?: boolean index_options?: MappingDenseVectorIndexOptions @@ -4977,6 +4977,9 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean ignore_z_value?: boolean null_value?: GeoLocation + index?: boolean + on_script_error?: MappingOnScriptError + script?: Script type: 'geo_point' } @@ -5130,6 +5133,7 @@ export interface MappingRankFeatureProperty extends MappingPropertyBase { } export interface MappingRankFeaturesProperty extends MappingPropertyBase { + positive_score_impact?: boolean type: 'rank_features' } @@ -9013,7 +9017,7 @@ export interface EnrichExecutePolicyRequest extends RequestBase { } export interface EnrichExecutePolicyResponse { - status: EnrichExecutePolicyExecuteEnrichPolicyStatus + status?: EnrichExecutePolicyExecuteEnrichPolicyStatus task_id?: TaskId } @@ -10196,7 +10200,7 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { data_stream: Name maximum_timestamp: EpochTime store_size?: ByteSize - store_size_bytes: integer + store_size_bytes: long } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -10210,7 +10214,7 @@ export interface IndicesDataStreamsStatsResponse { data_stream_count: integer data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] total_store_sizes?: ByteSize - total_store_size_bytes: integer + total_store_size_bytes: long } export interface IndicesDeleteRequest extends RequestBase { @@ -15891,7 +15895,7 @@ export interface SecurityRoleDescriptor { applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig + transient_metadata?: Record } export interface SecurityRoleDescriptorRead { @@ -15902,7 +15906,7 @@ export interface SecurityRoleDescriptorRead { applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig + transient_metadata?: Record } export interface SecurityRoleMapping { @@ -15941,10 +15945,6 @@ export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | Secu export type SecurityTemplateFormat = 'string' | 'json' -export interface SecurityTransientMetadataConfig { - enabled: boolean -} - export interface SecurityUser { email?: string | null full_name?: Name | null @@ -16264,7 +16264,7 @@ export interface SecurityGetRoleRole { indices: SecurityIndicesPrivileges[] metadata: Metadata run_as: string[] - transient_metadata: SecurityTransientMetadataConfig + transient_metadata?: Record applications: SecurityApplicationPrivileges[] role_templates?: SecurityRoleTemplate[] global?: Record>> @@ -16515,7 +16515,7 @@ export interface SecurityPutRoleRequest extends RequestBase { indices?: SecurityIndicesPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig + transient_metadata?: Record } export interface SecurityPutRoleResponse { @@ -16873,11 +16873,37 @@ export interface SlmStopRequest extends RequestBase { export type SlmStopResponse = AcknowledgedResponseBase +export interface SnapshotAzureRepository extends SnapshotRepositoryBase { + type: 'azure' + settings: SnapshotAzureRepositorySettings +} + +export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { + client?: string + container?: string + base_path?: string + readonly?: boolean + location_mode?: string +} + export interface SnapshotFileCountSnapshotStats { file_count: integer size_in_bytes: long } +export interface SnapshotGcsRepository extends SnapshotRepositoryBase { + type: 'gcs' + settings: SnapshotGcsRepositorySettings +} + +export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { + bucket: string + client?: string + base_path?: string + readonly?: boolean + application_name?: string +} + export interface SnapshotIndexDetails { shard_count: integer size?: ByteSize @@ -16890,19 +16916,45 @@ export interface SnapshotInfoFeatureState { indices: Indices } -export interface SnapshotRepository { - type: string +export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { + type: 'url' + settings: SnapshotReadOnlyUrlRepositorySettings +} + +export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { + http_max_retries?: integer + http_socket_timeout?: Duration + max_number_of_snapshots?: integer + url: string +} + +export type SnapshotRepository = SnapshotAzureRepository | SnapshotGcsRepository | SnapshotS3Repository | SnapshotSharedFileSystemRepository | SnapshotReadOnlyUrlRepository | SnapshotSourceOnlyRepository + +export interface SnapshotRepositoryBase { uuid?: Uuid - settings: SnapshotRepositorySettings } -export interface SnapshotRepositorySettings { - chunk_size?: string - compress?: string | boolean - concurrent_streams?: string | integer - location: string - read_only?: string | boolean - readonly?: string | boolean +export interface SnapshotRepositorySettingsBase { + chunk_size?: ByteSize + compress?: boolean + max_restore_bytes_per_sec?: ByteSize + max_snapshot_bytes_per_sec?: ByteSize +} + +export interface SnapshotS3Repository extends SnapshotRepositoryBase { + type: 's3' + settings: SnapshotS3RepositorySettings +} + +export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { + bucket: string + client?: string + base_path?: string + readonly?: boolean + server_side_encryption?: boolean + buffer_size?: ByteSize + canned_acl?: string + storage_class?: string } export interface SnapshotShardsStats { @@ -16929,6 +16981,17 @@ export interface SnapshotShardsStatsSummaryItem { size_in_bytes: long } +export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { + type: 'fs' + settings: SnapshotSharedFileSystemRepositorySettings +} + +export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { + location: string + max_number_of_snapshots?: integer + readonly?: boolean +} + export interface SnapshotSnapshotIndexStats { shards: Record shards_stats: SnapshotShardsStats @@ -16982,6 +17045,18 @@ export interface SnapshotSnapshotStats { total: SnapshotFileCountSnapshotStats } +export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { + type: 'source' + settings: SnapshotSourceOnlyRepositorySettings +} + +export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { + delegate_type?: string + max_number_of_snapshots?: integer + read_only?: boolean + readonly?: boolean +} + export interface SnapshotStatus { include_global_state: boolean indices: Record @@ -17043,8 +17118,6 @@ export interface SnapshotCreateRepositoryRequest extends RequestBase { timeout?: Duration verify?: boolean repository?: SnapshotRepository - type: string - settings: SnapshotRepositorySettings } export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 22041532c..7911ec596 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -1504,7 +1504,7 @@ export interface SearchInnerHits { fields?: Fields sort?: Sort _source?: SearchSourceConfig - stored_field?: Fields + stored_fields?: Fields track_scores?: boolean version?: boolean } @@ -2337,7 +2337,7 @@ export interface InlineGetKeys { _seq_no?: SequenceNumber _primary_term?: long _routing?: Routing - _source: TDocument + _source?: TDocument } export type InlineGet = InlineGetKeys & { [property: string]: any } @@ -4949,7 +4949,7 @@ export interface MappingDenseVectorIndexOptions { export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' - dims: integer + dims?: integer similarity?: string index?: boolean index_options?: MappingDenseVectorIndexOptions @@ -5050,6 +5050,9 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean ignore_z_value?: boolean null_value?: GeoLocation + index?: boolean + on_script_error?: MappingOnScriptError + script?: Script type: 'geo_point' } @@ -5203,6 +5206,7 @@ export interface MappingRankFeatureProperty extends MappingPropertyBase { } export interface MappingRankFeaturesProperty extends MappingPropertyBase { + positive_score_impact?: boolean type: 'rank_features' } @@ -9114,7 +9118,7 @@ export interface EnrichExecutePolicyRequest extends RequestBase { } export interface EnrichExecutePolicyResponse { - status: EnrichExecutePolicyExecuteEnrichPolicyStatus + status?: EnrichExecutePolicyExecuteEnrichPolicyStatus task_id?: TaskId } @@ -10331,7 +10335,7 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { data_stream: Name maximum_timestamp: EpochTime store_size?: ByteSize - store_size_bytes: integer + store_size_bytes: long } export interface IndicesDataStreamsStatsRequest extends RequestBase { @@ -10345,7 +10349,7 @@ export interface IndicesDataStreamsStatsResponse { data_stream_count: integer data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] total_store_sizes?: ByteSize - total_store_size_bytes: integer + total_store_size_bytes: long } export interface IndicesDeleteRequest extends RequestBase { @@ -16209,7 +16213,7 @@ export interface SecurityRoleDescriptor { applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig + transient_metadata?: Record } export interface SecurityRoleDescriptorRead { @@ -16220,7 +16224,7 @@ export interface SecurityRoleDescriptorRead { applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig + transient_metadata?: Record } export interface SecurityRoleMapping { @@ -16259,10 +16263,6 @@ export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | Secu export type SecurityTemplateFormat = 'string' | 'json' -export interface SecurityTransientMetadataConfig { - enabled: boolean -} - export interface SecurityUser { email?: string | null full_name?: Name | null @@ -16591,7 +16591,7 @@ export interface SecurityGetRoleRole { indices: SecurityIndicesPrivileges[] metadata: Metadata run_as: string[] - transient_metadata: SecurityTransientMetadataConfig + transient_metadata?: Record applications: SecurityApplicationPrivileges[] role_templates?: SecurityRoleTemplate[] global?: Record>> @@ -16863,7 +16863,7 @@ export interface SecurityPutRoleRequest extends RequestBase { indices?: SecurityIndicesPrivileges[] metadata?: Metadata run_as?: string[] - transient_metadata?: SecurityTransientMetadataConfig + transient_metadata?: Record } } @@ -17262,11 +17262,37 @@ export interface SlmStopRequest extends RequestBase { export type SlmStopResponse = AcknowledgedResponseBase +export interface SnapshotAzureRepository extends SnapshotRepositoryBase { + type: 'azure' + settings: SnapshotAzureRepositorySettings +} + +export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { + client?: string + container?: string + base_path?: string + readonly?: boolean + location_mode?: string +} + export interface SnapshotFileCountSnapshotStats { file_count: integer size_in_bytes: long } +export interface SnapshotGcsRepository extends SnapshotRepositoryBase { + type: 'gcs' + settings: SnapshotGcsRepositorySettings +} + +export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { + bucket: string + client?: string + base_path?: string + readonly?: boolean + application_name?: string +} + export interface SnapshotIndexDetails { shard_count: integer size?: ByteSize @@ -17279,19 +17305,45 @@ export interface SnapshotInfoFeatureState { indices: Indices } -export interface SnapshotRepository { - type: string +export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { + type: 'url' + settings: SnapshotReadOnlyUrlRepositorySettings +} + +export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { + http_max_retries?: integer + http_socket_timeout?: Duration + max_number_of_snapshots?: integer + url: string +} + +export type SnapshotRepository = SnapshotAzureRepository | SnapshotGcsRepository | SnapshotS3Repository | SnapshotSharedFileSystemRepository | SnapshotReadOnlyUrlRepository | SnapshotSourceOnlyRepository + +export interface SnapshotRepositoryBase { uuid?: Uuid - settings: SnapshotRepositorySettings } -export interface SnapshotRepositorySettings { - chunk_size?: string - compress?: string | boolean - concurrent_streams?: string | integer - location: string - read_only?: string | boolean - readonly?: string | boolean +export interface SnapshotRepositorySettingsBase { + chunk_size?: ByteSize + compress?: boolean + max_restore_bytes_per_sec?: ByteSize + max_snapshot_bytes_per_sec?: ByteSize +} + +export interface SnapshotS3Repository extends SnapshotRepositoryBase { + type: 's3' + settings: SnapshotS3RepositorySettings +} + +export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { + bucket: string + client?: string + base_path?: string + readonly?: boolean + server_side_encryption?: boolean + buffer_size?: ByteSize + canned_acl?: string + storage_class?: string } export interface SnapshotShardsStats { @@ -17318,6 +17370,17 @@ export interface SnapshotShardsStatsSummaryItem { size_in_bytes: long } +export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { + type: 'fs' + settings: SnapshotSharedFileSystemRepositorySettings +} + +export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { + location: string + max_number_of_snapshots?: integer + readonly?: boolean +} + export interface SnapshotSnapshotIndexStats { shards: Record shards_stats: SnapshotShardsStats @@ -17371,6 +17434,18 @@ export interface SnapshotSnapshotStats { total: SnapshotFileCountSnapshotStats } +export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { + type: 'source' + settings: SnapshotSourceOnlyRepositorySettings +} + +export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { + delegate_type?: string + max_number_of_snapshots?: integer + read_only?: boolean + readonly?: boolean +} + export interface SnapshotStatus { include_global_state: boolean indices: Record @@ -17437,12 +17512,8 @@ export interface SnapshotCreateRepositoryRequest extends RequestBase { master_timeout?: Duration timeout?: Duration verify?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - repository?: SnapshotRepository - type: string - settings: SnapshotRepositorySettings - } + /** @deprecated The use of the 'body' key has been deprecated, use 'repository' instead. */ + body?: SnapshotRepository } export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase From 24e1f4fb266f26c5db3ed8947900b41d83a735c6 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 19 Mar 2024 17:28:02 +0100 Subject: [PATCH 301/647] Auto-generated code for main (#2156) --- docs/reference.asciidoc | 25 +++++++---- src/api/api/ml.ts | 2 +- src/api/api/security.ts | 2 +- src/api/types.ts | 88 +++++++++++++++++++++++++++---------- src/api/typesWithBodyKey.ts | 88 +++++++++++++++++++++++++++---------- 5 files changed, 149 insertions(+), 56 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 4e74760a0..556db3178 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -3377,7 +3377,7 @@ client.indices.create({ index }) * *Request (object):* ** *`index` (string)*: Name of the index you wish to create. ** *`aliases` (Optional, Record)*: Aliases for the index. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters @@ -4234,7 +4234,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -4309,7 +4309,7 @@ client.indices.putTemplate({ name }) ** *`aliases` (Optional, Record)*: Aliases for the index. ** *`index_patterns` (Optional, string | string[])*: Array of wildcard expressions used to match the names of indices during creation. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. ** *`order` (Optional, number)*: Order in which Elasticsearch applies this template if index matches multiple templates. @@ -4462,7 +4462,7 @@ If specified, Elasticsearch only performs the rollover if the current index sati If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, _data_stream_timestamp })*: Mapping for fields in the index. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. ** *`settings` (Optional, Record)*: Configuration options for the index. Data streams do not support this parameter. @@ -6436,6 +6436,7 @@ so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. ** *`tags` (Optional, string[])*: An array of tags to organize the model. +** *`prefix_strings` (Optional, { ingest, search })*: Optional prefix strings applied at inference ** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. [discrete] @@ -8313,9 +8314,17 @@ client.security.queryApiKeys({ ... }) ==== Arguments * *Request (object):* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query to filter which API keys to return. -The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `ids`, `prefix`, `wildcard`, and `range`. -You can query all public information associated with an API key. +** *`aggregations` (Optional, Record)*: Any aggregations to run over the corpus of returned API keys. +Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. +This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, +`cardinality`, `value_count`, `composite`, `filter`, and `filters`. +Additionally, aggregations only run over the same subset of fields that query works with. +** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which API keys to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following public information associated with an API key: `id`, `type`, `name`, +`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. ** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the `search_after` parameter. @@ -8325,7 +8334,7 @@ In addition, sort can also be applied to the `_doc` field to sort by index order By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition -** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. +** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. [discrete] diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index e0f57a2ad..d71a08d05 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1821,7 +1821,7 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags'] + const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 7668d9ee2..0d2d613bc 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1502,7 +1502,7 @@ export default class Security { async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index 94e2cd7aa..f6b83fd75 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -5095,6 +5095,7 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase export interface MappingObjectProperty extends MappingCorePropertyBase { enabled?: boolean + subobjects?: boolean type?: 'object' } @@ -5269,6 +5270,7 @@ export interface MappingTypeMapping { _source?: MappingSourceField runtime?: Record enabled?: boolean + subobjects?: boolean _data_stream_timestamp?: MappingDataStreamTimestamp } @@ -9747,7 +9749,7 @@ export interface IndicesIndexSettingsKeys { settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries queries?: IndicesQueries - similarity?: IndicesSettingsSimilarity + similarity?: Record mapping?: IndicesMappingLimitSettings 'indexing.slowlog'?: IndicesIndexingSlowlogSettings indexing_pressure?: IndicesIndexingPressure @@ -9925,55 +9927,52 @@ export interface IndicesSettingsSearch { slowlog?: IndicesSlowlogSettings } -export interface IndicesSettingsSimilarity { - bm25?: IndicesSettingsSimilarityBm25 - dfi?: IndicesSettingsSimilarityDfi - dfr?: IndicesSettingsSimilarityDfr - ib?: IndicesSettingsSimilarityIb - lmd?: IndicesSettingsSimilarityLmd - lmj?: IndicesSettingsSimilarityLmj - scripted_tfidf?: IndicesSettingsSimilarityScriptedTfidf -} +export type IndicesSettingsSimilarity = IndicesSettingsSimilarityBm25 | IndicesSettingsSimilarityBoolean | IndicesSettingsSimilarityDfi | IndicesSettingsSimilarityDfr | IndicesSettingsSimilarityIb | IndicesSettingsSimilarityLmd | IndicesSettingsSimilarityLmj | IndicesSettingsSimilarityScripted export interface IndicesSettingsSimilarityBm25 { - b: double - discount_overlaps: boolean - k1: double type: 'BM25' + b?: double + discount_overlaps?: boolean + k1?: double +} + +export interface IndicesSettingsSimilarityBoolean { + type: 'boolean' } export interface IndicesSettingsSimilarityDfi { - independence_measure: DFIIndependenceMeasure type: 'DFI' + independence_measure: DFIIndependenceMeasure } export interface IndicesSettingsSimilarityDfr { + type: 'DFR' after_effect: DFRAfterEffect basic_model: DFRBasicModel normalization: Normalization - type: 'DFR' } export interface IndicesSettingsSimilarityIb { + type: 'IB' distribution: IBDistribution lambda: IBLambda normalization: Normalization - type: 'IB' } export interface IndicesSettingsSimilarityLmd { - mu: integer type: 'LMDirichlet' + mu?: double } export interface IndicesSettingsSimilarityLmj { - lambda: double type: 'LMJelinekMercer' + lambda?: double } -export interface IndicesSettingsSimilarityScriptedTfidf { - script: Script +export interface IndicesSettingsSimilarityScripted { type: 'scripted' + script: Script + weight_script?: Script } export interface IndicesSlowlogSettings { @@ -13216,8 +13215,8 @@ export interface MlTrainedModelLocationIndex { } export interface MlTrainedModelPrefixStrings { - ingest: string - search: string + ingest?: string + search?: string } export interface MlTrainedModelSizeStats { @@ -14154,6 +14153,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { model_size_bytes?: long platform_architecture?: string tags?: string[] + prefix_strings?: MlTrainedModelPrefixStrings } export type MlPutTrainedModelResponse = MlTrainedModelConfig @@ -16554,9 +16554,50 @@ export interface SecurityPutUserResponse { created: boolean } +export type SecurityQueryApiKeysAPIKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate + +export interface SecurityQueryApiKeysAPIKeyAggregationContainer { + aggregations?: Record + aggs?: Record + meta?: Metadata + cardinality?: AggregationsCardinalityAggregation + composite?: AggregationsCompositeAggregation + date_range?: AggregationsDateRangeAggregation + filter?: SecurityQueryApiKeysAPIKeyQueryContainer + filters?: SecurityQueryApiKeysAPIKeyFiltersAggregation + missing?: AggregationsMissingAggregation + range?: AggregationsRangeAggregation + terms?: AggregationsTermsAggregation + value_count?: AggregationsValueCountAggregation +} + +export interface SecurityQueryApiKeysAPIKeyFiltersAggregation extends AggregationsBucketAggregationBase { + filters?: AggregationsBuckets + other_bucket?: boolean + other_bucket_key?: string + keyed?: boolean +} + +export interface SecurityQueryApiKeysAPIKeyQueryContainer { + bool?: QueryDslBoolQuery + exists?: QueryDslExistsQuery + ids?: QueryDslIdsQuery + match?: Partial> + match_all?: QueryDslMatchAllQuery + prefix?: Partial> + range?: Partial> + simple_query_string?: QueryDslSimpleQueryStringQuery + term?: Partial> + terms?: QueryDslTermsQuery + wildcard?: Partial> +} + export interface SecurityQueryApiKeysRequest extends RequestBase { with_limited_by?: boolean - query?: QueryDslQueryContainer + aggregations?: Record + /** @alias aggregations */ + aggs?: Record + query?: SecurityQueryApiKeysAPIKeyQueryContainer from?: integer sort?: Sort size?: integer @@ -16567,6 +16608,7 @@ export interface SecurityQueryApiKeysResponse { total: integer count: integer api_keys: SecurityApiKey[] + aggregations?: Record } export interface SecuritySamlAuthenticateRequest extends RequestBase { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 7911ec596..22af6bb62 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -5168,6 +5168,7 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase export interface MappingObjectProperty extends MappingCorePropertyBase { enabled?: boolean + subobjects?: boolean type?: 'object' } @@ -5342,6 +5343,7 @@ export interface MappingTypeMapping { _source?: MappingSourceField runtime?: Record enabled?: boolean + subobjects?: boolean _data_stream_timestamp?: MappingDataStreamTimestamp } @@ -9873,7 +9875,7 @@ export interface IndicesIndexSettingsKeys { settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries queries?: IndicesQueries - similarity?: IndicesSettingsSimilarity + similarity?: Record mapping?: IndicesMappingLimitSettings 'indexing.slowlog'?: IndicesIndexingSlowlogSettings indexing_pressure?: IndicesIndexingPressure @@ -10051,55 +10053,52 @@ export interface IndicesSettingsSearch { slowlog?: IndicesSlowlogSettings } -export interface IndicesSettingsSimilarity { - bm25?: IndicesSettingsSimilarityBm25 - dfi?: IndicesSettingsSimilarityDfi - dfr?: IndicesSettingsSimilarityDfr - ib?: IndicesSettingsSimilarityIb - lmd?: IndicesSettingsSimilarityLmd - lmj?: IndicesSettingsSimilarityLmj - scripted_tfidf?: IndicesSettingsSimilarityScriptedTfidf -} +export type IndicesSettingsSimilarity = IndicesSettingsSimilarityBm25 | IndicesSettingsSimilarityBoolean | IndicesSettingsSimilarityDfi | IndicesSettingsSimilarityDfr | IndicesSettingsSimilarityIb | IndicesSettingsSimilarityLmd | IndicesSettingsSimilarityLmj | IndicesSettingsSimilarityScripted export interface IndicesSettingsSimilarityBm25 { - b: double - discount_overlaps: boolean - k1: double type: 'BM25' + b?: double + discount_overlaps?: boolean + k1?: double +} + +export interface IndicesSettingsSimilarityBoolean { + type: 'boolean' } export interface IndicesSettingsSimilarityDfi { - independence_measure: DFIIndependenceMeasure type: 'DFI' + independence_measure: DFIIndependenceMeasure } export interface IndicesSettingsSimilarityDfr { + type: 'DFR' after_effect: DFRAfterEffect basic_model: DFRBasicModel normalization: Normalization - type: 'DFR' } export interface IndicesSettingsSimilarityIb { + type: 'IB' distribution: IBDistribution lambda: IBLambda normalization: Normalization - type: 'IB' } export interface IndicesSettingsSimilarityLmd { - mu: integer type: 'LMDirichlet' + mu?: double } export interface IndicesSettingsSimilarityLmj { - lambda: double type: 'LMJelinekMercer' + lambda?: double } -export interface IndicesSettingsSimilarityScriptedTfidf { - script: Script +export interface IndicesSettingsSimilarityScripted { type: 'scripted' + script: Script + weight_script?: Script } export interface IndicesSlowlogSettings { @@ -13404,8 +13403,8 @@ export interface MlTrainedModelLocationIndex { } export interface MlTrainedModelPrefixStrings { - ingest: string - search: string + ingest?: string + search?: string } export interface MlTrainedModelSizeStats { @@ -14417,6 +14416,7 @@ export interface MlPutTrainedModelRequest extends RequestBase { model_size_bytes?: long platform_architecture?: string tags?: string[] + prefix_strings?: MlTrainedModelPrefixStrings } } @@ -16910,11 +16910,52 @@ export interface SecurityPutUserResponse { created: boolean } +export type SecurityQueryApiKeysAPIKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate + +export interface SecurityQueryApiKeysAPIKeyAggregationContainer { + aggregations?: Record + aggs?: Record + meta?: Metadata + cardinality?: AggregationsCardinalityAggregation + composite?: AggregationsCompositeAggregation + date_range?: AggregationsDateRangeAggregation + filter?: SecurityQueryApiKeysAPIKeyQueryContainer + filters?: SecurityQueryApiKeysAPIKeyFiltersAggregation + missing?: AggregationsMissingAggregation + range?: AggregationsRangeAggregation + terms?: AggregationsTermsAggregation + value_count?: AggregationsValueCountAggregation +} + +export interface SecurityQueryApiKeysAPIKeyFiltersAggregation extends AggregationsBucketAggregationBase { + filters?: AggregationsBuckets + other_bucket?: boolean + other_bucket_key?: string + keyed?: boolean +} + +export interface SecurityQueryApiKeysAPIKeyQueryContainer { + bool?: QueryDslBoolQuery + exists?: QueryDslExistsQuery + ids?: QueryDslIdsQuery + match?: Partial> + match_all?: QueryDslMatchAllQuery + prefix?: Partial> + range?: Partial> + simple_query_string?: QueryDslSimpleQueryStringQuery + term?: Partial> + terms?: QueryDslTermsQuery + wildcard?: Partial> +} + export interface SecurityQueryApiKeysRequest extends RequestBase { with_limited_by?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - query?: QueryDslQueryContainer + aggregations?: Record + /** @alias aggregations */ + aggs?: Record + query?: SecurityQueryApiKeysAPIKeyQueryContainer from?: integer sort?: Sort size?: integer @@ -16926,6 +16967,7 @@ export interface SecurityQueryApiKeysResponse { total: integer count: integer api_keys: SecurityApiKey[] + aggregations?: Record } export interface SecuritySamlAuthenticateRequest extends RequestBase { From 8b9ca79d5b9cabeb9db0af0b73431e5e03e505dd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:45:16 -0500 Subject: [PATCH 302/647] [Backport main] Update bulk.asciidoc change 'date' property to 'time' to match index creation (#2161) (cherry picked from commit 747171097dbee1c52f44d71f2eab52bd0a9e378e) Co-authored-by: SuperGingie --- docs/examples/bulk.asciidoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc index 8d470a305..e35052e0e 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/examples/bulk.asciidoc @@ -36,27 +36,27 @@ async function run () { id: 1, text: 'If I fall, don\'t bring me back.', user: 'jon', - date: new Date() + time: new Date() }, { id: 2, text: 'Winter is coming', user: 'ned', - date: new Date() + time: new Date() }, { id: 3, text: 'A Lannister always pays his debts.', user: 'tyrion', - date: new Date() + time: new Date() }, { id: 4, text: 'I am the blood of the dragon.', user: 'daenerys', - date: new Date() + time: new Date() }, { id: 5, // change this value to a string to see the bulk response with errors text: 'A girl is Arya Stark of Winterfell. And I\'m going home.', user: 'arya', - date: new Date() + time: new Date() }] const operations = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc]) From 05e3139f80a853a435d19a9ad350ced652894e72 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 20 Mar 2024 16:32:15 -0500 Subject: [PATCH 303/647] Ensure new connections inherit client's set defaults (#2159) * Add test confirming the issue See https://github.com/elastic/elasticsearch-js/issues/1791 * fix: ensure new connections inherit the client instance's defaults for https://github.com/elastic/elasticsearch-js/issues/1791 --- src/client.ts | 34 ++++++++++++++++++++++++++++--- test/unit/client.test.ts | 43 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 4 deletions(-) diff --git a/src/client.ts b/src/client.ts index 50ba4942f..12a88cc24 100644 --- a/src/client.ts +++ b/src/client.ts @@ -44,7 +44,7 @@ import { Context } from '@elastic/transport/lib/types' import { RedactionOptions } from '@elastic/transport/lib/Transport' -import BaseConnection, { prepareHeaders } from '@elastic/transport/lib/connection/BaseConnection' +import BaseConnection, { prepareHeaders, ConnectionOptions } from '@elastic/transport/lib/connection/BaseConnection' import SniffingTransport from './sniffingTransport' import Helpers from './helpers' import API from './api' @@ -237,7 +237,35 @@ export default class Client extends API { diagnostic: this.diagnostic, caFingerprint: options.caFingerprint }) - this.connectionPool.addConnection(options.node ?? options.nodes) + + // ensure default connection values are inherited when creating new connections + // see https://github.com/elastic/elasticsearch-js/issues/1791 + const nodes = options.node ?? options.nodes + let nodeOptions: Array = Array.isArray(nodes) ? nodes : [nodes] + type ConnectionDefaults = Record + nodeOptions = nodeOptions.map(opt => { + const { tls, headers, auth, requestTimeout: timeout, agent, proxy, caFingerprint } = options + let defaults: ConnectionDefaults = { tls, headers, auth, timeout, agent, proxy, caFingerprint } + + // strip undefined values from defaults + defaults = Object.keys(defaults).reduce((acc: ConnectionDefaults, key) => { + const val = defaults[key] + if (val !== undefined) acc[key] = val + return acc + }, {}) + + let newOpts + if (typeof opt === 'string') { + newOpts = { + url: new URL(opt) + } + } else { + newOpts = opt + } + + return { ...defaults, ...newOpts } + }) + this.connectionPool.addConnection(nodeOptions) } this.transport = new options.Transport({ @@ -282,7 +310,7 @@ export default class Client extends API { // Merge the new options with the initial ones // @ts-expect-error kChild symbol is for internal use only const options: ClientOptions = Object.assign({}, this[kInitialOptions], opts) - // Pass to the child client the parent instances that cannot be overriden + // Pass to the child client the parent instances that cannot be overridden // @ts-expect-error kInitialOptions symbol is for internal use only options[kChild] = { connectionPool: this.connectionPool, diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index b9481d19b..b896946ad 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -17,9 +17,11 @@ * under the License. */ +import * as http from 'http' import { test } from 'tap' import { URL } from 'url' -import { connection } from '../utils' +import FakeTimers from '@sinonjs/fake-timers' +import { buildServer, connection } from '../utils' import { Client, errors } from '../..' import * as symbols from '@elastic/transport/lib/symbols' import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool } from '@elastic/transport' @@ -441,3 +443,42 @@ test('user agent is in the correct format', t => { t.ok(/^\d+\.\d+\.\d+/.test(agentSplit[0].split('/')[1])) t.end() }) + +test('Ensure new client instance stores requestTimeout for each connection', t => { + const client = new Client({ + node: { url: new URL('http://localhost:9200/') }, + requestTimeout: 60000, + }) + t.equal(client.connectionPool.connections[0].timeout, 60000) + t.end() +}) + +test('Ensure new client does not time out at default (30s) when client sets requestTimeout', async t => { + const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) + t.teardown(() => clock.uninstall()) + + function handler (_req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + t.pass('timeout ended') + res.setHeader('content-type', 'application/json') + res.end(JSON.stringify({ success: true })) + }, 31000) // default is 30000 + clock.runToLast() + } + + const [{ port }, server] = await buildServer(handler) + + const client = new Client({ + node: `http://localhost:${port}`, + requestTimeout: 60000 + }) + + try { + await client.transport.request({ method: 'GET', path: '/' }) + } catch (error) { + t.fail('timeout error hit') + } finally { + server.stop() + t.end() + } +}) From 29a0e53978515679d0d55d5e110991da6a56c2b7 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Mar 2024 09:57:07 -0500 Subject: [PATCH 304/647] Update changelog for 8.13 (#2164) --- docs/changelog.asciidoc | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 0daeee5d7..33b6128c9 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,26 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.13.0 + +[discrete] +==== Features + +[discrete] +===== Support for Elasticsearch `v8.13.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.13/release-notes-8.13.0.html[here]. + +[discrete] +==== Fixes + +[discrete] +===== Ensure new connections inherit client's set defaults https://github.com/elastic/elasticsearch-js/pull/2159[#2159] + +When instantiating a client, any connection-related defaults (e.g. `requestTimeout`) set on that client instance would not be inherited by nodes if they were entered as strings rather than a `ConnectionOptions` object. + [discrete] === 8.12.2 @@ -27,7 +47,7 @@ The failing state could be reached when a server's response times are slower tha === 8.12.0 [discrete] -=== Features +==== Features [discrete] ===== Support for Elasticsearch `v8.12.0` @@ -39,7 +59,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.12/release-notes-8.12. === 8.11.0 [discrete] -=== Features +==== Features [discrete] ===== Support for Elasticsearch `v8.11.0` @@ -58,7 +78,7 @@ See <> for more information. === 8.10.0 [discrete] -=== Features +==== Features [discrete] ===== Support for Elasticsearch `v8.10.0` From d430aecdbd4a0f603a4cb62b2c247d2fdb53b509 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Mar 2024 12:07:24 -0500 Subject: [PATCH 305/647] Move make.sh to .buildkite (#2165) --- .ci/Dockerfile => .buildkite/Dockerfile-make | 0 .buildkite/functions/cleanup.sh | 2 +- .buildkite/functions/wait-for-container.sh | 2 +- {.ci => .buildkite}/make.mjs | 0 {.ci => .buildkite}/make.sh | 14 ++-- .buildkite/pull-requests.json | 1 - .github/workflows/nodejs.yml | 2 +- .npmignore | 1 - Makefile | 11 --- scripts/es-docker-platinum.sh | 77 -------------------- scripts/es-docker.sh | 40 ---------- test/integration/README.md | 4 +- 12 files changed, 12 insertions(+), 142 deletions(-) rename .ci/Dockerfile => .buildkite/Dockerfile-make (100%) rename {.ci => .buildkite}/make.mjs (100%) rename {.ci => .buildkite}/make.sh (95%) delete mode 100644 Makefile delete mode 100755 scripts/es-docker-platinum.sh delete mode 100755 scripts/es-docker.sh diff --git a/.ci/Dockerfile b/.buildkite/Dockerfile-make similarity index 100% rename from .ci/Dockerfile rename to .buildkite/Dockerfile-make diff --git a/.buildkite/functions/cleanup.sh b/.buildkite/functions/cleanup.sh index 4c25166fb..98dfe4e14 100755 --- a/.buildkite/functions/cleanup.sh +++ b/.buildkite/functions/cleanup.sh @@ -2,7 +2,7 @@ # # Shared cleanup routines between different steps # -# Please source .ci/functions/imports.sh as a whole not just this file +# Please source .buildkite/functions/imports.sh as a whole not just this file # # Version 1.0.0 # - Initial version after refactor diff --git a/.buildkite/functions/wait-for-container.sh b/.buildkite/functions/wait-for-container.sh index 1a721b588..bbbf4ea63 100755 --- a/.buildkite/functions/wait-for-container.sh +++ b/.buildkite/functions/wait-for-container.sh @@ -2,7 +2,7 @@ # # Exposes a routine scripts can call to wait for a container if that container set up a health command # -# Please source .ci/functions/imports.sh as a whole not just this file +# Please source .buildkite/functions/imports.sh as a whole not just this file # # Version 1.0.1 # - Initial version after refactor diff --git a/.ci/make.mjs b/.buildkite/make.mjs similarity index 100% rename from .ci/make.mjs rename to .buildkite/make.mjs diff --git a/.ci/make.sh b/.buildkite/make.sh similarity index 95% rename from .ci/make.sh rename to .buildkite/make.sh index c3d9f5b4f..ae54050ae 100755 --- a/.ci/make.sh +++ b/.buildkite/make.sh @@ -3,7 +3,7 @@ # # Build entry script for elasticsearch-js # -# Must be called: ./.ci/make.sh +# Must be called: ./.buildkite/make.sh # # Version: 1.1.0 # @@ -34,8 +34,8 @@ STACK_VERSION=$VERSION set -euo pipefail product="elastic/elasticsearch-js" -output_folder=".ci/output" -codegen_folder=".ci/output" +output_folder=".buildkite/output" +codegen_folder=".buildkite/output" OUTPUT_DIR="$repo/${output_folder}" NODE_JS_VERSION=18 WORKFLOW=${WORKFLOW-staging} @@ -131,7 +131,7 @@ esac echo -e "\033[34;1mINFO: building $product container\033[0m" docker build \ - --file .ci/Dockerfile \ + --file .buildkite/Dockerfile-make \ --tag "$product" \ --build-arg NODE_JS_VERSION="$NODE_JS_VERSION" \ --build-arg "BUILDER_UID=$(id -u)" \ @@ -156,7 +156,7 @@ if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x} --rm \ $product \ /bin/bash -c "mkdir -p /usr/src/elastic-client-generator-js/output && \ - node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" + node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}" else echo -e "\033[34;1mINFO: Running in CI mode" docker run \ @@ -171,7 +171,7 @@ else git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ mkdir -p /usr/src/elastic-client-generator-js/output && \ cd /usr/src/elasticsearch-js && \ - node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" + node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}" fi # ------------------------------------------------------- # @@ -179,7 +179,7 @@ fi # ------------------------------------------------------- # if [[ "$CMD" == "assemble" ]]; then - if compgen -G ".ci/output/*" > /dev/null; then + if compgen -G ".buildkite/output/*" > /dev/null; then echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m" else echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m" diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 794d8624c..59c46cd87 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -9,7 +9,6 @@ "\\.md$", "\\.asciidoc$", "^docs\\/", - "^\\.ci\\/", "^scripts\\/", "^catalog-info\\.yaml$", "^test\\/unit\\/", diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index f8a4165c5..132294174 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -17,7 +17,7 @@ jobs: with: filters: | src-only: - - '!(**/*.{md,asciidoc,txt}|*.{md,asciidoc,txt}|{docs,.ci,.buildkite,scripts}/**/*|catalog-info.yaml)' + - '!(**/*.{md,asciidoc,txt}|*.{md,asciidoc,txt}|{docs,.buildkite,scripts}/**/*|catalog-info.yaml)' - '.github/workflows/**' test: diff --git a/.npmignore b/.npmignore index 3548958d0..2e604be0f 100644 --- a/.npmignore +++ b/.npmignore @@ -64,7 +64,6 @@ test scripts # ci configuration -.ci .travis.yml .buildkite certs diff --git a/Makefile b/Makefile deleted file mode 100644 index be93e1de9..000000000 --- a/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: integration-setup -integration-setup: integration-cleanup - DETACH=true .ci/run-elasticsearch.sh - -.PHONY: integration-cleanup -integration-cleanup: - docker container rm --force --volumes instance || true - -.PHONY: integration -integration: integration-setup - npm run test:integration diff --git a/scripts/es-docker-platinum.sh b/scripts/es-docker-platinum.sh deleted file mode 100755 index e5626a30c..000000000 --- a/scripts/es-docker-platinum.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -# Images are cached locally, it may be needed -# to delete an old image and download again -# the latest snapshot. - -repo=$(pwd) -testnodecrt="/.ci/certs/testnode.crt" -testnodekey="/.ci/certs/testnode.key" -cacrt="/.ci/certs/ca.crt" - -# pass `--clean` to reemove the old snapshot -if [ "$1" == "--clean" ]; then - docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT') -fi - -# Create the 'elastic' network if doesn't exist -exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null - -if [ "$1" == "--detach" ]; then - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -e "action.destructive_requires_name=false" \ - -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \ - -e "ELASTIC_PASSWORD=changeme" \ - -e "xpack.security.enabled=true" \ - -e "xpack.license.self_generated.type=trial" \ - -e "xpack.security.http.ssl.enabled=true" \ - -e "xpack.security.http.ssl.verification_mode=certificate" \ - -e "xpack.security.http.ssl.key=certs/testnode.key" \ - -e "xpack.security.http.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \ - -e "xpack.security.transport.ssl.enabled=true" \ - -e "xpack.security.transport.ssl.key=certs/testnode.key" \ - -e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \ - -v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \ - -v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \ - -v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \ - -p 9200:9200 \ - --detach \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -else - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -e "action.destructive_requires_name=false" \ - -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \ - -e "ELASTIC_PASSWORD=changeme" \ - -e "xpack.security.enabled=true" \ - -e "xpack.license.self_generated.type=trial" \ - -e "xpack.security.http.ssl.enabled=true" \ - -e "xpack.security.http.ssl.verification_mode=certificate" \ - -e "xpack.security.http.ssl.key=certs/testnode.key" \ - -e "xpack.security.http.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \ - -e "xpack.security.transport.ssl.enabled=true" \ - -e "xpack.security.transport.ssl.key=certs/testnode.key" \ - -e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \ - -v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \ - -v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \ - -v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \ - -p 9200:9200 \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -fi diff --git a/scripts/es-docker.sh b/scripts/es-docker.sh deleted file mode 100755 index 8bdc5633f..000000000 --- a/scripts/es-docker.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# Images are cached locally, it may be needed -# to delete an old image and download again -# the latest snapshot. - -# pass `--clean` to reemove the old snapshot -if [ "$1" == "--clean" ]; then - docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT') -fi - -# Create the 'elastic' network if doesn't exist -exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null - -if [ "$1" == "--detach" ]; then - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -e "action.destructive_requires_name=false" \ - -p 9200:9200 \ - --detach \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -else - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -e "action.destructive_requires_name=false" \ - -p 9200:9200 \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -fi diff --git a/test/integration/README.md b/test/integration/README.md index 36b011975..3b0218f7c 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -23,7 +23,7 @@ The specification does not allow the test to be run in parallel, so it might tak ### Running locally If you want to run the integration tests on your development machine, you must have an Elasticsearch instance running first. -A local instance can be spun up in a Docker container by running the [`.ci/run-elasticsearch.sh`](/.ci/run-elasticsearch.sh) script. +A local instance can be spun up in a Docker container by running the [`.buildkite/run-elasticsearch.sh`](/.buildkite/run-elasticsearch.sh) script. This is the same script CI jobs use to run Elasticsearch for integration tests, so your results should be relatively consistent. To simplify the process of starting a container, testing, and cleaning up the container, you can run the `make integration` target: @@ -35,7 +35,7 @@ export TEST_SUITE=free # can be `free` or `platinum` make integration ``` -If Elasticsearch doesn't come up, run `make integration-cleanup` and then `DETACH=false .ci/run-elasticsearch.sh` manually to read the startup logs. +If Elasticsearch doesn't come up, run `make integration-cleanup` and then `DETACH=false .buildkite/run-elasticsearch.sh` manually to read the startup logs. If you get an error about `vm.max_map_count` being too low, run `sudo sysctl -w vm.max_map_count=262144` to update the setting until the next reboot, or `sudo sysctl -w vm.max_map_count=262144; echo 'vm.max_map_count=262144' | sudo tee -a /etc/sysctl.conf` to update the setting permanently. From d61d54a811e199a3f5e338c65baf382045d2c0de Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Mar 2024 16:14:53 -0500 Subject: [PATCH 306/647] Action to apply stack client patches to serverless (#2169) * GitHub action for applying stack client patches to serverless * Drop unnecessary comment --- .github/workflows/serverless-patch.yml | 45 ++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 .github/workflows/serverless-patch.yml diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml new file mode 100644 index 000000000..c29e0312c --- /dev/null +++ b/.github/workflows/serverless-patch.yml @@ -0,0 +1,45 @@ +--- +name: Apply PR changes to serverless +on: + pull_request_target: + types: + - closed + - labeled + +jobs: + backport: + name: Backport + runs-on: ubuntu-latest + # Only react to merged PRs for security reasons. + # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target. + if: > + github.event.pull_request.merged + && ( + ( + github.event.action == 'closed' + && contains(github.event.pull_request.labels.*.name, 'apply-to-serverless') + ) + || + ( + github.event.action == 'labeled' + && github.event.label.name == 'apply-to-serverless' + ) + ) + steps: + - uses: actions/checkout@v4 + - name: Generate patch file + run: | + git format-patch -1 --stdout ${{ github.event.pull_request.merge_commit_sha }} > /tmp/patch.diff + - uses: actions/checkout@v4 + with: + repository: elastic/elasticsearch-serverless-js + ref: main + - name: Apply patch to serverless + run: | + git checkout -b apply-patch-${{ github.event.pull_request.id }} + git apply -C1 --recount --reject /tmp/patch.diff + comment='Patch applied from elastic/elasticsearch-js#${{ github.event.pull_request.id }}' + for f in $(find . -name '*.rej'); do + comment="$comment\n\n## Rejected patch \`$f`\:\n\`\`\`\n$(cat $f)\n\`\`\`" + done + gh pr create -t "Apply PR changes from elastic/elasticsearch-js#${{ github.event.pull_request.id }}" --body "$comment" From b77bdf2a79c024d7923725060cf4157687c0add3 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Mar 2024 16:20:12 -0500 Subject: [PATCH 307/647] Ensure patch can generate by checking out main branch (#2170) --- .github/workflows/serverless-patch.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index c29e0312c..0cf512af5 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -7,8 +7,8 @@ on: - labeled jobs: - backport: - name: Backport + apply-patch: + name: Apply patch runs-on: ubuntu-latest # Only react to merged PRs for security reasons. # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target. @@ -27,6 +27,9 @@ jobs: ) steps: - uses: actions/checkout@v4 + with: + repository: elastic/elasticsearch-js + ref: main - name: Generate patch file run: | git format-patch -1 --stdout ${{ github.event.pull_request.merge_commit_sha }} > /tmp/patch.diff From 8afdec052a3fd1a1d4c5db08a29498dbb0a55cf5 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Mar 2024 16:25:09 -0500 Subject: [PATCH 308/647] Check out merge commit before creating patch (#2171) --- .github/workflows/serverless-patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 0cf512af5..d0f2aed39 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -29,7 +29,7 @@ jobs: - uses: actions/checkout@v4 with: repository: elastic/elasticsearch-js - ref: main + ref: '${{ github.event.pull_request.merge_commit_sha }}' - name: Generate patch file run: | git format-patch -1 --stdout ${{ github.event.pull_request.merge_commit_sha }} > /tmp/patch.diff From 1d84468762643ba5504f9d114ecf403f5aec8ab7 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Mar 2024 16:33:43 -0500 Subject: [PATCH 309/647] Move between two repositories more cleanly during patch (#2172) * fix: checkout multiple repos cleanly * fix: ensure git apply exits cleanly --- .github/workflows/serverless-patch.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index d0f2aed39..8f204bbea 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -30,17 +30,21 @@ jobs: with: repository: elastic/elasticsearch-js ref: '${{ github.event.pull_request.merge_commit_sha }}' - - name: Generate patch file - run: | - git format-patch -1 --stdout ${{ github.event.pull_request.merge_commit_sha }} > /tmp/patch.diff + path: elasticsearch-js - uses: actions/checkout@v4 with: repository: elastic/elasticsearch-serverless-js ref: main - - name: Apply patch to serverless + path: elasticsearch-serverless-js + - name: Generate patch file + run: | + cd $GITHUB_WORKSPACE/elasticsearch-js + git format-patch -1 --stdout ${{ github.event.pull_request.merge_commit_sha }} > /tmp/patch.diff + - name: Apply patch file run: | + cd $GITHUB_WORKSPACE/elasticsearch-serverless-js git checkout -b apply-patch-${{ github.event.pull_request.id }} - git apply -C1 --recount --reject /tmp/patch.diff + git apply -C1 --recount --reject /tmp/patch.diff || exit 0 comment='Patch applied from elastic/elasticsearch-js#${{ github.event.pull_request.id }}' for f in $(find . -name '*.rej'); do comment="$comment\n\n## Rejected patch \`$f`\:\n\`\`\`\n$(cat $f)\n\`\`\`" From 86d89a47a0e6a72076e68859418b77deed3ba667 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 12:09:39 -0500 Subject: [PATCH 310/647] Bump version to 8.13.0 (#2173) --- .buildkite/pipeline.yml | 2 +- catalog-info.yaml | 8 ++++---- package.json | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 1dca14548..cf7d13854 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,7 +6,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.12.0-SNAPSHOT + STACK_VERSION: 8.13.0-SNAPSHOT matrix: setup: suite: diff --git a/catalog-info.yaml b/catalog-info.yaml index 80c943cd8..e069ddc74 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -42,9 +42,9 @@ spec: main_semi_daily: branch: 'main' cronline: '0 */12 * * *' - 8_12_semi_daily: - branch: '8.12' + 8_13_semi_daily: + branch: '8.13' cronline: '0 */12 * * *' - 8_11_daily: - branch: '8.11' + 8_12_daily: + branch: '8.12' cronline: '@daily' diff --git a/package.json b/package.json index 0ecd601ca..6af5a7eba 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.12.0", - "versionCanary": "8.12.0-canary.0", + "version": "8.13.0", + "versionCanary": "8.13.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From 6a821583c06cda1b196d48880b14776360cfa40c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 12:17:30 -0500 Subject: [PATCH 311/647] Fixes to serverless patch script (#2175) --- .github/workflows/serverless-patch.sh | 38 ++++++++++++++++++++++++++ .github/workflows/serverless-patch.yml | 16 ++--------- 2 files changed, 40 insertions(+), 14 deletions(-) create mode 100755 .github/workflows/serverless-patch.sh diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh new file mode 100755 index 000000000..cd746c97c --- /dev/null +++ b/.github/workflows/serverless-patch.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +set -exuo pipefail + +merge_commit_sha=$(jq -r '.pull_request.merge_commit_sha' "$GITHUB_EVENT_PATH") +pull_request_id=$(jq -r '.pull_request.number' "$GITHUB_EVENT_PATH") + +# generate patch file +cd "$GITHUB_WORKSPACE/elasticsearch-js" +git format-patch -1 --stdout "$merge_commit_sha" > /tmp/patch.diff + +# apply patch file +cd "$GITHUB_WORKSPACE/elasticsearch-serverless-js" +git checkout -b "apply-patch-$pull_request_id" +git apply -C1 --recount --reject /tmp/patch.diff || exit 0 + +comment="Patch applied from elastic/elasticsearch-js#$pull_request_id" + +# check for rejected patches +tick='\`' # just trying to satisfy shellcheck here +has_rejects='' +for f in ./**/*.rej; do + has_rejects=' --draft' + comment="$comment + +## Rejected patch $tick$f$tick must be resolved: + +$tick$tick$tick +$(cat "$f") +$tick$tick$tick +" +done + +# open a PR +gh pr create \ + -t "Apply PR changes from elastic/elasticsearch-js#$pull_request_id" \ + --body "$comment" \ + "$has_rejects" diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 8f204bbea..9acda04b6 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -36,17 +36,5 @@ jobs: repository: elastic/elasticsearch-serverless-js ref: main path: elasticsearch-serverless-js - - name: Generate patch file - run: | - cd $GITHUB_WORKSPACE/elasticsearch-js - git format-patch -1 --stdout ${{ github.event.pull_request.merge_commit_sha }} > /tmp/patch.diff - - name: Apply patch file - run: | - cd $GITHUB_WORKSPACE/elasticsearch-serverless-js - git checkout -b apply-patch-${{ github.event.pull_request.id }} - git apply -C1 --recount --reject /tmp/patch.diff || exit 0 - comment='Patch applied from elastic/elasticsearch-js#${{ github.event.pull_request.id }}' - for f in $(find . -name '*.rej'); do - comment="$comment\n\n## Rejected patch \`$f`\:\n\`\`\`\n$(cat $f)\n\`\`\`" - done - gh pr create -t "Apply PR changes from elastic/elasticsearch-js#${{ github.event.pull_request.id }}" --body "$comment" + - name: Apply patch from stack to serverless + run: ./.github/serverless-patch.sh From fba3e4186242ed92be2a167b820720daa942a2db Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 13:22:43 -0500 Subject: [PATCH 312/647] Continuing to test the serverless patch workflow (#2176) --- .github/workflows/serverless-patch.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 9acda04b6..cafe9cbfb 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -37,4 +37,5 @@ jobs: ref: main path: elasticsearch-serverless-js - name: Apply patch from stack to serverless - run: ./.github/serverless-patch.sh + run: | + .$GITHUB_WORKSPACE/.github/serverless-patch.sh From 3ac5a1cc659c3fb2e57c150a45cd86fc1c16f47e Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 13:26:01 -0500 Subject: [PATCH 313/647] Still testing serverless patch workflow (#2177) * Stray period :facepalm: * Fix path --- .github/workflows/serverless-patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index cafe9cbfb..024435e9e 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -38,4 +38,4 @@ jobs: path: elasticsearch-serverless-js - name: Apply patch from stack to serverless run: | - .$GITHUB_WORKSPACE/.github/serverless-patch.sh + $GITHUB_WORKSPACE/elasticsearch-js/.github/serverless-patch.sh From af2dbc01d317995412d0eb22ef1caa45be02e55c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 13:34:07 -0500 Subject: [PATCH 314/647] Run serverless patch workflow using correct branch of the stack client (#2178) --- .github/workflows/serverless-patch.sh | 4 ++-- .github/workflows/serverless-patch.yml | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index cd746c97c..4dddb448e 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -6,11 +6,11 @@ merge_commit_sha=$(jq -r '.pull_request.merge_commit_sha' "$GITHUB_EVENT_PATH") pull_request_id=$(jq -r '.pull_request.number' "$GITHUB_EVENT_PATH") # generate patch file -cd "$GITHUB_WORKSPACE/elasticsearch-js" +cd "$GITHUB_WORKSPACE/stack" git format-patch -1 --stdout "$merge_commit_sha" > /tmp/patch.diff # apply patch file -cd "$GITHUB_WORKSPACE/elasticsearch-serverless-js" +cd "$GITHUB_WORKSPACE/serverless" git checkout -b "apply-patch-$pull_request_id" git apply -C1 --recount --reject /tmp/patch.diff || exit 0 diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 024435e9e..a330d32ed 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -29,13 +29,13 @@ jobs: - uses: actions/checkout@v4 with: repository: elastic/elasticsearch-js - ref: '${{ github.event.pull_request.merge_commit_sha }}' - path: elasticsearch-js + ref: main + path: stack - uses: actions/checkout@v4 with: repository: elastic/elasticsearch-serverless-js ref: main - path: elasticsearch-serverless-js + path: serverless - name: Apply patch from stack to serverless run: | - $GITHUB_WORKSPACE/elasticsearch-js/.github/serverless-patch.sh + $GITHUB_WORKSPACE/stack/.github/serverless-patch.sh From 78dab89db88d7542ee65f57d20849ee3cc945192 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 13:39:19 -0500 Subject: [PATCH 315/647] More directory fixes to patch workflow (#2179) --- .github/workflows/serverless-patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index a330d32ed..8b000449e 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -38,4 +38,4 @@ jobs: path: serverless - name: Apply patch from stack to serverless run: | - $GITHUB_WORKSPACE/stack/.github/serverless-patch.sh + $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh From c106146d30d7501a6ac338e956149418bbe90a28 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 13:46:32 -0500 Subject: [PATCH 316/647] More fetch depth fix to patch workflow (#2180) --- .github/workflows/serverless-patch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 8b000449e..9ec46d449 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -31,6 +31,7 @@ jobs: repository: elastic/elasticsearch-js ref: main path: stack + fetch-depth: 0 - uses: actions/checkout@v4 with: repository: elastic/elasticsearch-serverless-js From 2721008867c2d2f7ddb45b2f80101558c7b6817f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Mar 2024 17:19:06 -0500 Subject: [PATCH 317/647] Correctly apply and commit patch changes in patch automation workflow (#2181) --- .github/workflows/serverless-patch.sh | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index 4dddb448e..b3c9bd32e 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -4,6 +4,7 @@ set -exuo pipefail merge_commit_sha=$(jq -r '.pull_request.merge_commit_sha' "$GITHUB_EVENT_PATH") pull_request_id=$(jq -r '.pull_request.number' "$GITHUB_EVENT_PATH") +pr_shortcode="elastic/elasticsearch-js#$pull_request_id" # generate patch file cd "$GITHUB_WORKSPACE/stack" @@ -12,11 +13,16 @@ git format-patch -1 --stdout "$merge_commit_sha" > /tmp/patch.diff # apply patch file cd "$GITHUB_WORKSPACE/serverless" git checkout -b "apply-patch-$pull_request_id" -git apply -C1 --recount --reject /tmp/patch.diff || exit 0 +git am -C1 --reject /tmp/patch.diff || git am --quit -comment="Patch applied from elastic/elasticsearch-js#$pull_request_id" +# commit changes, ignoring rejects +git add -A +git reset -- **/*.rej +git commit -m "Apply changes from $pr_shortcode" -# check for rejected patches +comment="Patch applied from $pr_shortcode" + +# enumerate rejected patches in PR comment tick='\`' # just trying to satisfy shellcheck here has_rejects='' for f in ./**/*.rej; do @@ -33,6 +39,7 @@ done # open a PR gh pr create \ - -t "Apply PR changes from elastic/elasticsearch-js#$pull_request_id" \ + --repo elastic/elasticsearch-serverless-js \ + -t "Apply PR changes from $pr_shortcode" \ --body "$comment" \ "$has_rejects" From 3ad00b4a9f4e3225a0d265731f36980cd192a54d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 09:39:19 -0500 Subject: [PATCH 318/647] Add committer identity to serverless patch action (#2182) --- .github/workflows/serverless-patch.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 9ec46d449..a4e14db38 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -37,6 +37,10 @@ jobs: repository: elastic/elasticsearch-serverless-js ref: main path: serverless + - name: Set Git user + run: | + git config --global user.name "Elastic Machine" + git confit --global user.email "elasticmachine@users.noreply.github.com" - name: Apply patch from stack to serverless run: | $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh From 7475dba8b95dbcc8213076c7426ac79a3b387f3d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 09:41:18 -0500 Subject: [PATCH 319/647] Git config typo (#2183) --- .github/workflows/serverless-patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index a4e14db38..4008b8070 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -40,7 +40,7 @@ jobs: - name: Set Git user run: | git config --global user.name "Elastic Machine" - git confit --global user.email "elasticmachine@users.noreply.github.com" + git config --global user.email "elasticmachine@users.noreply.github.com" - name: Apply patch from stack to serverless run: | $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh From 63eb92b42ad9be3048de10169178eb70b2ae3d81 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 09:45:25 -0500 Subject: [PATCH 320/647] Fix bad argument string (#2184) --- .github/workflows/serverless-patch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index b3c9bd32e..58f822100 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -26,7 +26,7 @@ comment="Patch applied from $pr_shortcode" tick='\`' # just trying to satisfy shellcheck here has_rejects='' for f in ./**/*.rej; do - has_rejects=' --draft' + has_rejects='--draft' comment="$comment ## Rejected patch $tick$f$tick must be resolved: From 38c17fd7f337827acf1fb7bcc5a7144c4399d557 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 09:58:50 -0500 Subject: [PATCH 321/647] Set Github token to use Github CLI in an action (#2185) --- .github/workflows/serverless-patch.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 4008b8070..b7796d5ed 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -42,5 +42,6 @@ jobs: git config --global user.name "Elastic Machine" git config --global user.email "elasticmachine@users.noreply.github.com" - name: Apply patch from stack to serverless - run: | - $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh + env: + GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh From 6e635308012edb856f30a5e59dd375d2d7cd7e8e Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 10:40:12 -0500 Subject: [PATCH 322/647] Refactor patch action to use create-pull-request action (#2186) --- .github/workflows/serverless-patch.sh | 28 ++++++++++---------------- .github/workflows/serverless-patch.yml | 17 ++++++++++------ 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index 58f822100..2cf8d90ca 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -15,31 +15,25 @@ cd "$GITHUB_WORKSPACE/serverless" git checkout -b "apply-patch-$pull_request_id" git am -C1 --reject /tmp/patch.diff || git am --quit -# commit changes, ignoring rejects -git add -A -git reset -- **/*.rej -git commit -m "Apply changes from $pr_shortcode" - +# generate PR body comment comment="Patch applied from $pr_shortcode" # enumerate rejected patches in PR comment -tick='\`' # just trying to satisfy shellcheck here -has_rejects='' +has_rejects='false' for f in ./**/*.rej; do - has_rejects='--draft' + has_rejects='true' comment="$comment -## Rejected patch $tick$f$tick must be resolved: +## Rejected patch \`$f\` must be resolved: -$tick$tick$tick +\`\`\`diff $(cat "$f") -$tick$tick$tick +\`\`\` " done -# open a PR -gh pr create \ - --repo elastic/elasticsearch-serverless-js \ - -t "Apply PR changes from $pr_shortcode" \ - --body "$comment" \ - "$has_rejects" +# send data to output parameters +{ + echo "PR_BODY='$comment'" + echo "PR_DRAFT=$has_rejects" +} >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index b7796d5ed..bf0af9100 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -37,11 +37,16 @@ jobs: repository: elastic/elasticsearch-serverless-js ref: main path: serverless - - name: Set Git user - run: | - git config --global user.name "Elastic Machine" - git config --global user.email "elasticmachine@users.noreply.github.com" - name: Apply patch from stack to serverless - env: - GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + id: apply-patch run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh + - uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + path: $GITHUB_WORKSPACE/serverless + title: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' + commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' + branch-suffix: short-commit-hash + body: '${{ steps.apply-patch.outputs.PR_BODY }}' + draft: '${{ steps.apply-patch.outputs.PR_DRAFT }}' + add-paths: ':!*.rej' From 113b32258d40049834d58d5d9b72fe116ff189d9 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 10:42:53 -0500 Subject: [PATCH 323/647] Must set committer info to apply a patch (#2187) --- .github/workflows/serverless-patch.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index 2cf8d90ca..ab4f9ccc2 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -10,6 +10,10 @@ pr_shortcode="elastic/elasticsearch-js#$pull_request_id" cd "$GITHUB_WORKSPACE/stack" git format-patch -1 --stdout "$merge_commit_sha" > /tmp/patch.diff +# set committer info +git config --global user.email "elasticmachine@users.noreply.github.com" +git config --global user.name "Elastic Machine" + # apply patch file cd "$GITHUB_WORKSPACE/serverless" git checkout -b "apply-patch-$pull_request_id" From c7cbe941dbba626e79faec1029cb36f8141b1e1f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 10:46:56 -0500 Subject: [PATCH 324/647] Write PR body to a file instead of a buggy multi-line echo (#2188) --- .github/workflows/serverless-patch.sh | 6 ++---- .github/workflows/serverless-patch.yml | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index ab4f9ccc2..91cb49e38 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -37,7 +37,5 @@ $(cat "$f") done # send data to output parameters -{ - echo "PR_BODY='$comment'" - echo "PR_DRAFT=$has_rejects" -} >> "$GITHUB_OUTPUT" +echo "$comment" > /tmp/pr_body +echo "PR_DRAFT=$has_rejects" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index bf0af9100..9dfd063cd 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -47,6 +47,6 @@ jobs: title: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' branch-suffix: short-commit-hash - body: '${{ steps.apply-patch.outputs.PR_BODY }}' + body-path: /tmp/pr_body draft: '${{ steps.apply-patch.outputs.PR_DRAFT }}' add-paths: ':!*.rej' From d584836399a337ca38c551034dbbf97b81fea468 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 12:53:46 -0500 Subject: [PATCH 325/647] Use relative path for PR action (#2189) --- .github/workflows/serverless-patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 9dfd063cd..1229ea6aa 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -43,7 +43,7 @@ jobs: - uses: peter-evans/create-pull-request@v6 with: token: ${{ secrets.GITHUB_TOKEN }} - path: $GITHUB_WORKSPACE/serverless + path: serverless title: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' branch-suffix: short-commit-hash From 7f7942e207720feda23a19b7cd1515a81af554a2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 13:18:14 -0500 Subject: [PATCH 326/647] Drop branch suffix from patch action (#2190) drop .rej files and don't use a branch suffix --- .github/workflows/serverless-patch.sh | 3 +++ .github/workflows/serverless-patch.yml | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index 91cb49e38..4a36760e9 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -36,6 +36,9 @@ $(cat "$f") " done +# delete .rej files +rm -fv ./**/*.rej + # send data to output parameters echo "$comment" > /tmp/pr_body echo "PR_DRAFT=$has_rejects" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 1229ea6aa..d1647d408 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -46,7 +46,5 @@ jobs: path: serverless title: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' - branch-suffix: short-commit-hash body-path: /tmp/pr_body draft: '${{ steps.apply-patch.outputs.PR_DRAFT }}' - add-paths: ':!*.rej' From f3d9dfb48e5bb0b4e503287417aea59136dfe484 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 13:20:46 -0500 Subject: [PATCH 327/647] Stop creating a branch before applying patch (#2191) --- .github/workflows/serverless-patch.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh index 4a36760e9..a38eda9ec 100755 --- a/.github/workflows/serverless-patch.sh +++ b/.github/workflows/serverless-patch.sh @@ -16,7 +16,6 @@ git config --global user.name "Elastic Machine" # apply patch file cd "$GITHUB_WORKSPACE/serverless" -git checkout -b "apply-patch-$pull_request_id" git am -C1 --reject /tmp/patch.diff || git am --quit # generate PR body comment From ec9a4dc9602d1f6c9bac9085e322a73feebaf79c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 13:28:25 -0500 Subject: [PATCH 328/647] Try using default token (#2192) Provided token was not able to push a new branch to elasticsearch-serverless-js --- .github/workflows/serverless-patch.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index d1647d408..e1a15fe08 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -42,7 +42,6 @@ jobs: run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh - uses: peter-evans/create-pull-request@v6 with: - token: ${{ secrets.GITHUB_TOKEN }} path: serverless title: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' From 1ef318adedf3903f3574c1ad2ec329f19a2736e5 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 13:38:29 -0500 Subject: [PATCH 329/647] use a repo-scoped personal access token (#2193) --- .github/workflows/serverless-patch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index e1a15fe08..700310e08 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -42,6 +42,7 @@ jobs: run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh - uses: peter-evans/create-pull-request@v6 with: + token: ${{ secrets.GH_TOKEN }} path: serverless title: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' From 628254df2da74a9238f2f5d5a192db3ae72bf4e9 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 27 Mar 2024 13:40:50 -0500 Subject: [PATCH 330/647] Don't commit .rej files (#2194) --- .github/workflows/serverless-patch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 700310e08..9cf9926a4 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -48,3 +48,4 @@ jobs: commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' body-path: /tmp/pr_body draft: '${{ steps.apply-patch.outputs.PR_DRAFT }}' + add-paths: ':!*.rej' From f96aa32345077e33a78496adb95c12a9335989ae Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 28 Mar 2024 13:48:06 -0500 Subject: [PATCH 331/647] Stop defaulting to snapshots for integration tests (#2197) * Run integration tests against non-snapshot on main Fetching SNAPSHOT artifacts is currently flaky. * Reduce number of scheduled integration test runs --- .buildkite/make.mjs | 2 +- .buildkite/pipeline.yml | 2 +- catalog-info.yaml | 9 +++------ 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/.buildkite/make.mjs b/.buildkite/make.mjs index 511944972..b9366e404 100644 --- a/.buildkite/make.mjs +++ b/.buildkite/make.mjs @@ -94,7 +94,7 @@ async function bump (args) { const pipeline = await readFile(join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), 'utf8') await writeFile( join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), - pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: ${cleanVersion}-SNAPSHOT`), + pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: ${cleanVersion}`), 'utf8' ) } diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index cf7d13854..1456b8d73 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,7 +6,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.13.0-SNAPSHOT + STACK_VERSION: 8.13.0 matrix: setup: suite: diff --git a/catalog-info.yaml b/catalog-info.yaml index e069ddc74..e7ba4261d 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -39,12 +39,9 @@ spec: cancel_intermediate_builds: true cancel_intermediate_builds_branch_filter: '!main' schedules: - main_semi_daily: + main: branch: 'main' - cronline: '0 */12 * * *' - 8_13_semi_daily: + cronline: '@daily' + 8_13: branch: '8.13' - cronline: '0 */12 * * *' - 8_12_daily: - branch: '8.12' cronline: '@daily' From 3bd7ba95f866c1ddad6f167029b7681049fec18e Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 28 Mar 2024 15:42:43 -0500 Subject: [PATCH 332/647] Bump main to 8.14.0 (#2198) * Improve version bump script's assertion feedback * Add junit output to gitignore * Assume x.0 if a patch value is not provided * Bump package to 8.14 --- .buildkite/make.mjs | 5 +++-- .buildkite/pipeline.yml | 2 +- .gitignore | 1 + package.json | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.buildkite/make.mjs b/.buildkite/make.mjs index b9366e404..0f3e724fd 100644 --- a/.buildkite/make.mjs +++ b/.buildkite/make.mjs @@ -74,14 +74,15 @@ async function release (args) { async function bump (args) { assert(args.length === 1, 'Bump task expects one parameter') - const [version] = args + let [version] = args const packageJson = JSON.parse(await readFile( join(import.meta.url, '..', 'package.json'), 'utf8' )) + if (version.split('.').length === 2) version = `${version}.0` const cleanVersion = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version) - assert(semver.valid(cleanVersion)) + assert(semver.valid(cleanVersion), `${cleanVersion} is not seen as a valid semver version. raw version: ${version}`) packageJson.version = cleanVersion packageJson.versionCanary = `${cleanVersion}-canary.0` diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 1456b8d73..e27888106 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,7 +6,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.13.0 + STACK_VERSION: 8.14.0 matrix: setup: suite: diff --git a/.gitignore b/.gitignore index 5c0af8e20..c38ae71df 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,4 @@ test/bundlers/**/bundle.js test/bundlers/parcel-test/.parcel-cache lib +junit-output diff --git a/package.json b/package.json index 6af5a7eba..5528f09c3 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.13.0", - "versionCanary": "8.13.0-canary.0", + "version": "8.14.0", + "versionCanary": "8.14.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", From e2974b07477a1e35259fc4b636b16e87ceaf11f4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 2 Apr 2024 14:32:16 -0500 Subject: [PATCH 333/647] Upgrade transport to 8.5.0 (#2202) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5528f09c3..c19d2dcc2 100644 --- a/package.json +++ b/package.json @@ -83,7 +83,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.4.1", + "@elastic/transport": "^8.5.0", "tslib": "^2.4.0" }, "tap": { From 4aa00e03e1fac5385387dfa7cfd6e811d5307028 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 2 Apr 2024 14:38:09 -0500 Subject: [PATCH 334/647] onSuccess function for bulk helper (#2199) * Bulk helper onSuccess callback For https://github.com/elastic/elasticsearch-js/issues/2090 Includes refactor of the tryBulk result processing code, to make iterating over bulk response data easier to understand. * Add onSuccess tests for each datasource type * Cleanup, additional comments * Add documentation for onSuccess callback * Update changelog * Drop link to 8.14 release notes. Page not yet published, breaking docs build. --- docs/changelog.asciidoc | 8 + docs/helpers.asciidoc | 11 + src/helpers.ts | 92 +++++-- test/unit/helpers/bulk.test.ts | 447 +++++++++++++++++++++++---------- 4 files changed, 407 insertions(+), 151 deletions(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 33b6128c9..dc918e32f 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,14 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.14.0 + +[discrete] +===== `onSuccess` callback added to bulk helper + +The bulk helper now supports an `onSuccess` callback that will be called for each successful operation. https://github.com/elastic/elasticsearch-js/pull/2199[#2199] + [discrete] === 8.13.0 diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index 4815ebc4a..2ecf8cd30 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -98,6 +98,17 @@ const b = client.helpers.bulk({ }) ---- +|`onSuccess` +a|A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations. +[source,js] +---- +const b = client.helpers.bulk({ + onSuccess ({ result, document }) { + console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`) + } +}) +---- + |`flushBytes` a|The size of the bulk body in bytes to reach before to send it. Default of 5MB. + _Default:_ `5000000` diff --git a/src/helpers.ts b/src/helpers.ts index fbf4ff334..94d59b062 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -103,6 +103,24 @@ export interface OnDropDocument { retried: boolean } +type BulkResponseItem = Partial> + +export interface OnSuccessDocument { + result: BulkResponseItem + document?: TDocument +} + +interface ZippedResult { + result: BulkResponseItem + raw: { + action: string + document?: string + } + // this is a function so that deserialization is only done when needed + // to avoid a performance hit + document?: () => TDocument +} + export interface BulkHelperOptions extends T.BulkRequest { datasource: TDocument[] | Buffer | Readable | AsyncIterator onDocument: (doc: TDocument) => Action @@ -112,6 +130,7 @@ export interface BulkHelperOptions extends T.BulkRequest { retries?: number wait?: number onDrop?: (doc: OnDropDocument) => void + onSuccess?: (doc: OnSuccessDocument) => void refreshOnCompletion?: boolean | string } @@ -551,6 +570,9 @@ export default class Helpers { retries = this[kMaxRetries], wait = 5000, onDrop = noop, + // onSuccess does not default to noop, to avoid the performance hit + // of deserializing every document in the bulk request + onSuccess, refreshOnCompletion = false, ...bulkOptions } = options @@ -817,57 +839,93 @@ export default class Helpers { callback() } + /** + * Zips bulk response items (the action's result) with the original document body. + * The raw string version of action and document lines are also included. + */ + function zipBulkResults (responseItems: BulkResponseItem[], bulkBody: string[]): ZippedResult[] { + const zipped = [] + let indexSlice = 0 + for (let i = 0, len = responseItems.length; i < len; i++) { + const result = responseItems[i] + const operation = Object.keys(result)[0] + let zipResult + + if (operation === 'delete') { + zipResult = { + result, + raw: { action: bulkBody[indexSlice] } + } + indexSlice += 1 + } else { + const document = bulkBody[indexSlice + 1] + zipResult = { + result, + raw: { action: bulkBody[indexSlice], document }, + // this is a function so that deserialization is only done when needed + // to avoid a performance hit + document: () => serializer.deserialize(document) + } + indexSlice += 2 + } + + zipped.push(zipResult as ZippedResult) + } + + return zipped + } + function tryBulk (bulkBody: string[], callback: (err: Error | null, bulkBody: string[]) => void): void { if (shouldAbort) return callback(null, []) client.bulk(Object.assign({}, bulkOptions, { body: bulkBody }), reqOptions as TransportRequestOptionsWithMeta) .then(response => { const result = response.body + const results = zipBulkResults(result.items, bulkBody) + if (!result.errors) { stats.successful += result.items.length - for (const item of result.items) { - if (item.update?.result === 'noop') { + for (const item of results) { + const { result, document = noop } = item + if (result.update?.result === 'noop') { stats.noop++ } + if (onSuccess != null) onSuccess({ result, document: document() }) } return callback(null, []) } const retry = [] - const { items } = result - let indexSlice = 0 - for (let i = 0, len = items.length; i < len; i++) { - const action = items[i] - const operation = Object.keys(action)[0] + for (const item of results) { + const { result, raw, document = noop } = item + const operation = Object.keys(result)[0] // @ts-expect-error - const responseItem = action[operation as keyof T.BulkResponseItemContainer] + const responseItem = result[operation as keyof T.BulkResponseItemContainer] assert(responseItem !== undefined, 'The responseItem is undefined, please file a bug report') if (responseItem.status >= 400) { - // 429 is the only staus code where we might want to retry + // 429 is the only status code where we might want to retry // a document, because it was not an error in the document itself, - // but the ES node were handling too many operations. + // but the ES node was handling too many operations. if (responseItem.status === 429) { - retry.push(bulkBody[indexSlice]) + retry.push(raw.action) /* istanbul ignore next */ if (operation !== 'delete') { - retry.push(bulkBody[indexSlice + 1]) + retry.push(raw.document ?? '') } } else { onDrop({ status: responseItem.status, error: responseItem.error ?? null, - operation: serializer.deserialize(bulkBody[indexSlice]), + operation: serializer.deserialize(raw.action), // @ts-expect-error - document: operation !== 'delete' - ? serializer.deserialize(bulkBody[indexSlice + 1]) - : null, + document: document(), retried: isRetrying }) stats.failed += 1 } } else { stats.successful += 1 + if (onSuccess != null) onSuccess({ result, document: document() }) } - operation === 'delete' ? indexSlice += 1 : indexSlice += 2 } callback(null, retry) }) diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 62c297ebf..0a15c3fc6 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -514,7 +514,7 @@ test('bulk index', t => { t.test('Server error', async t => { const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { statusCode: 500, body: { somothing: 'went wrong' } @@ -530,12 +530,12 @@ test('bulk index', t => { datasource: dataset.slice(), flushBytes: 1, concurrency: 1, - onDocument (doc) { + onDocument (_doc) { return { index: { _index: 'test' } } }, - onDrop (doc) { + onDrop (_doc) { t.fail('This should never be called') } }) @@ -550,7 +550,7 @@ test('bulk index', t => { t.test('Server error (high flush size, to trigger the finish error)', async t => { const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { statusCode: 500, body: { somothing: 'went wrong' } @@ -566,12 +566,12 @@ test('bulk index', t => { datasource: dataset.slice(), flushBytes: 5000000, concurrency: 1, - onDocument (doc) { + onDocument (_doc) { return { index: { _index: 'test' } } }, - onDrop (doc) { + onDrop (_doc) { t.fail('This should never be called') } }) @@ -625,12 +625,12 @@ test('bulk index', t => { flushBytes: 1, concurrency: 1, wait: 10, - onDocument (doc) { + onDocument (_doc) { return { index: { _index: 'test' } } }, - onDrop (doc) { + onDrop (_doc) { b.abort() } }) @@ -651,7 +651,7 @@ test('bulk index', t => { t.test('Invalid operation', t => { t.plan(2) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { errors: false, items: [{}] } } } }) @@ -666,7 +666,7 @@ test('bulk index', t => { flushBytes: 1, concurrency: 1, // @ts-expect-error - onDocument (doc) { + onDocument (_doc) { return { foo: { _index: 'test' } } @@ -678,6 +678,43 @@ test('bulk index', t => { }) }) + t.test('should call onSuccess callback for each indexed document', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + let count = 0 + await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onSuccess ({ result, document }) { + t.same(result, { index: { _index: 'test' }}) + t.same(document, dataset[count++]) + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + t.equal(count, 3) + t.end() + }) + t.end() }) @@ -731,6 +768,44 @@ test('bulk index', t => { }) }) + t.test('onSuccess is called for each indexed document', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'small-dataset.ndjson'), 'utf8') + + let count = 0 + await client.helpers.bulk({ + datasource: stream.pipe(split()), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onSuccess ({ result, document }) { + t.same(result, { index: { _index: 'test' }}) + t.same(document, dataset[count++]) + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + t.equal(count, 3) + t.end() + }) + t.end() }) @@ -785,6 +860,50 @@ test('bulk index', t => { aborted: false }) }) + + t.test('onSuccess is called for each indexed document', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + yield doc + } + } + + let count = 0 + await client.helpers.bulk({ + datasource: generator(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onSuccess ({ result, document }) { + t.same(result, { index: { _index: 'test' }}) + t.same(document, dataset[count++]) + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + t.equal(count, 3) + t.end() + }) t.end() }) @@ -943,6 +1062,8 @@ test('bulk create', t => { }) }) + + t.end() }) @@ -1279,6 +1400,63 @@ test('bulk delete', t => { server.stop() }) + t.test('should call onSuccess callback with delete action object', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action, payload] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + let docCount = 0 + let successCount = 0 + await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + if (docCount++ === 1) { + return { + delete: { + _index: 'test', + _id: String(docCount) + } + } + } else { + return { + index: { _index: 'test' } + } + } + }, + onSuccess ({ result, document }) { + const item = dataset[successCount] + if (successCount++ === 1) { + t.same(result, { + delete: { + _index: 'test', + _id: String(successCount) + } + }) + } else { + t.same(result, { index: { _index: 'test' }}) + t.same(document, item) + } + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + + t.end() + }) + t.end() }) @@ -1594,152 +1772,153 @@ test('Flush interval', t => { }) }) - t.end() -}) - -test(`flush timeout does not lock process when flushInterval is less than server timeout`, async t => { - const flushInterval = 500 + test(`flush timeout does not lock process when flushInterval is less than server timeout`, async t => { + const flushInterval = 500 - async function handler (req: http.IncomingMessage, res: http.ServerResponse) { - setTimeout(() => { - res.writeHead(200, { 'content-type': 'application/json' }) - res.end(JSON.stringify({ errors: false, items: [{}] })) - }, 1000) - } + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, 1000) + } - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) - async function * generator () { - const data = dataset.slice() - for (const doc of data) { - await sleep(flushInterval) - yield doc - } - } - - const result = await client.helpers.bulk({ - datasource: Readable.from(generator()), - flushBytes: 1, - flushInterval: flushInterval, - concurrency: 1, - onDocument (_) { - return { - index: { _index: 'test' } + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc } - }, - onDrop (_) { - t.fail('This should never be called') } - }) - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - - server.stop() -}) + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) -test(`flush timeout does not lock process when flushInterval is greater than server timeout`, async t => { - const flushInterval = 500 + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) - async function handler (req: http.IncomingMessage, res: http.ServerResponse) { - setTimeout(() => { - res.writeHead(200, { 'content-type': 'application/json' }) - res.end(JSON.stringify({ errors: false, items: [{}] })) - }, 250) - } + server.stop() + }) - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) + test(`flush timeout does not lock process when flushInterval is greater than server timeout`, async t => { + const flushInterval = 500 - async function * generator () { - const data = dataset.slice() - for (const doc of data) { - await sleep(flushInterval) - yield doc + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, 250) } - } - - const result = await client.helpers.bulk({ - datasource: Readable.from(generator()), - flushBytes: 1, - flushInterval: flushInterval, - concurrency: 1, - onDocument (_) { - return { - index: { _index: 'test' } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc } - }, - onDrop (_) { - t.fail('This should never be called') } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - server.stop() -}) + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) -test(`flush timeout does not lock process when flushInterval is equal to server timeout`, async t => { - const flushInterval = 500 + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) - async function handler (req: http.IncomingMessage, res: http.ServerResponse) { - setTimeout(() => { - res.writeHead(200, { 'content-type': 'application/json' }) - res.end(JSON.stringify({ errors: false, items: [{}] })) - }, flushInterval) - } + server.stop() + }) - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) + test(`flush timeout does not lock process when flushInterval is equal to server timeout`, async t => { + const flushInterval = 500 - async function * generator () { - const data = dataset.slice() - for (const doc of data) { - await sleep(flushInterval) - yield doc + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, flushInterval) } - } - - const result = await client.helpers.bulk({ - datasource: Readable.from(generator()), - flushBytes: 1, - flushInterval: flushInterval, - concurrency: 1, - onDocument (_) { - return { - index: { _index: 'test' } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc } - }, - onDrop (_) { - t.fail('This should never be called') } - }) - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + + server.stop() }) - server.stop() + t.end() }) + From b857d8ee71fa28ce6378746edfd445a60fbd283a Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 9 Apr 2024 14:37:07 -0500 Subject: [PATCH 335/647] Backport changelogs from other minor releases (#2218) --- docs/changelog.asciidoc | 95 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 94 insertions(+), 1 deletion(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index dc918e32f..cb0ddb673 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -4,11 +4,27 @@ [discrete] === 8.14.0 +[discrete] +==== Features + [discrete] ===== `onSuccess` callback added to bulk helper The bulk helper now supports an `onSuccess` callback that will be called for each successful operation. https://github.com/elastic/elasticsearch-js/pull/2199[#2199] +[discrete] +=== 8.13.1 + +[discrete] +==== Fixes + +[discrete] +===== Pin @elastic/transport to `~8.4.1` + +Switching from `^8.4.1` to `~8.4.1` ensures 8.13 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + +v8.13.0 was also released depending on v8.4.0 of `@elastic/transport` instead of v8.4.1, which was unintentional. + [discrete] === 8.13.0 @@ -29,6 +45,17 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.13/release-notes-8.13. When instantiating a client, any connection-related defaults (e.g. `requestTimeout`) set on that client instance would not be inherited by nodes if they were entered as strings rather than a `ConnectionOptions` object. +[discrete] +=== 8.12.3 + +[discrete] +==== Fixes + +[discrete] +===== Bump @elastic/transport to `~8.4.1` + +Switching from `^8.4.1` to `~8.4.1` ensures 8.12 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + [discrete] === 8.12.2 @@ -55,7 +82,7 @@ The failing state could be reached when a server's response times are slower tha === 8.12.0 [discrete] -==== Features +=== Features [discrete] ===== Support for Elasticsearch `v8.12.0` @@ -63,6 +90,17 @@ The failing state could be reached when a server's response times are slower tha You can find all the API changes https://www.elastic.co/guide/en/elasticsearch/reference/8.12/release-notes-8.12.0.html[here]. +[discrete] +=== 8.11.1 + +[discrete] +==== Fixes + +[discrete] +===== Bump @elastic/transport to `~8.4.0` + +Switching from `^8.4.0` to `~8.4.0` ensures 8.11 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + [discrete] === 8.11.0 @@ -82,6 +120,17 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.11/release-notes-8.11. See <> for more information. +[discrete] +=== 8.10.1 + +[discrete] +==== Fixes + +[discrete] +===== Bump @elastic/transport to `~8.3.4` + +Switching from `^8.3.4` to `~8.3.4` ensures 8.10 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + [discrete] === 8.10.0 @@ -94,6 +143,17 @@ See <> for more information. You can find all the API changes https://www.elastic.co/guide/en/elasticsearch/reference/8.10/release-notes-8.10.0.html[here]. +[discrete] +=== 8.9.2 + +[discrete] +==== Fixes + +[discrete] +===== Bump @elastic/transport to `~8.3.4` + +Switching from `^8.3.4` to `~8.3.4` ensures 8.9 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + [discrete] === 8.9.1 @@ -130,6 +190,17 @@ In the https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/curre The `user-agent` header the client used to connect to Elasticsearch was using a non-standard format that has been improved. +[discrete] +=== 8.8.2 + +[discrete] +==== Fixes + +[discrete] +===== Bump @elastic/transport to `~8.3.2` + +Switching from `^8.3.2` to `~8.3.2` ensures 8.8 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + [discrete] === 8.8.1 @@ -175,6 +246,17 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.8.0. Prior releases contained a bug where type declarations for legacy types that include a `body` key were not actually importing the type that includes the `body` key. +[discrete] +=== 8.7.3 + +[discrete] +==== Fixes + +[discrete] +===== Bump @elastic/transport to `~8.3.1` + +Switching from `^8.3.1` to `~8.3.1` ensures 8.7 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + [discrete] === 8.7.0 @@ -184,6 +266,17 @@ Prior releases contained a bug where type declarations for legacy types that inc You can find all the API changes https://www.elastic.co/guide/en/elasticsearch/reference/8.7/release-notes-8.7.0.html[here]. +[discrete] +=== 8.6.1 + +[discrete] +==== Fixes + +[discrete] +===== Bump @elastic/transport to `~8.3.1` + +Switching from `^8.3.1` to `~8.3.1` ensures 8.6 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. + [discrete] === 8.6.0 From 6f2aaa5c7c78f894e2a11eb1c487f50b701d9785 Mon Sep 17 00:00:00 2001 From: Alexa Nguyen Date: Thu, 18 Apr 2024 01:58:53 +0800 Subject: [PATCH 336/647] docs: fix typo in basic-config.asciidoc (#2222) LENTGH -> LENGTH --- docs/basic-config.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index ab288f2b1..a71269961 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -260,11 +260,11 @@ _Default:_ `false` _Default:_ `null` |`maxResponseSize` -|`number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH + +|`number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENGTH + _Default:_ `null` |`maxCompressedResponseSize` -|`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH + +|`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENGTH + _Default:_ `null` |=== From 95fd81a883fbfd4b004a3009670811b07368af46 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 18 Apr 2024 11:06:21 -0500 Subject: [PATCH 337/647] Auto-create release tag on publish (#2226) --- .github/workflows/npm-publish.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 73a7d36c2..615b943eb 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -25,3 +25,12 @@ jobs: - run: npm publish --provenance --access public env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + - run: | + version=$(jq -r .version package.json) + gh release create \ + -n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)" \ + --target "$BRANCH_NAME" \ + -t "v$version" \ + "v$version" + env: + BRANCH_NAME: ${{ github.event.inputs.branch }} From a13992ec7d8b9d42c1a5abf7334700cdd5b4fbff Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 18 Apr 2024 11:06:33 -0500 Subject: [PATCH 338/647] Update issue templates to use new labels (#2225) And experimentally support GitHub issue forms for regression reports. --- .github/ISSUE_TEMPLATE/bug.md | 3 +- .github/ISSUE_TEMPLATE/feature.md | 3 +- .github/ISSUE_TEMPLATE/question.md | 1 + .github/ISSUE_TEMPLATE/regression.md | 141 +++++++++++++++++---------- 4 files changed, 94 insertions(+), 54 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index c1dbcdb75..e93bca168 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -1,13 +1,14 @@ --- name: 🐛 Bug report about: Create a report to help us improve +labels: ["Category: Bug"] --- It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. **Please read this entire template before posting any issue. If you ignore these instructions and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `not reproducible` label.** +locked, and assigned the `Category: Not an issue` label.** ## 🐛 Bug Report diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md index 2335d551d..4b5f8d648 100644 --- a/.github/ISSUE_TEMPLATE/feature.md +++ b/.github/ISSUE_TEMPLATE/feature.md @@ -1,13 +1,14 @@ --- name: 🚀 Feature Proposal about: Submit a proposal for a new feature +labels: ["Category: Feature"] --- It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. **Please read this entire template before posting any issue. If you ignore these instructions and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `invalid` label.** +locked, and assigned the `Category: Not an issue` label.** ## 🚀 Feature Proposal diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index bbd7143bc..fc7ab1490 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -1,6 +1,7 @@ --- name: 💬 Questions / Help about: If you have questions, please check our Gitter or Help repo +labels: ["Category: Question"] --- ## 💬 Questions and Help diff --git a/.github/ISSUE_TEMPLATE/regression.md b/.github/ISSUE_TEMPLATE/regression.md index 7984b3129..14a82bcac 100644 --- a/.github/ISSUE_TEMPLATE/regression.md +++ b/.github/ISSUE_TEMPLATE/regression.md @@ -1,56 +1,93 @@ --- name: 💥 Regression Report about: Report unexpected behavior that worked in previous versions +labels: ["Category: Bug"] +body: + - type: markdown + attributes: + value: | + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + **Please read this entire template before posting any issue. If you ignore these instructions + and post an issue here that does not follow the instructions, your issue might be closed, + locked, and assigned the `Category: Not an issue` label.** + + - type: textarea + id: report + attributes: + label: Regression report + description: A clear and concise description of what the regression is. + validations: + required: true + + - type: input + id: last-working-version + attributes: + label: Last working version + description: Version of `@elastic/elasticsearch` where this last worked. + validations: + required: true + + - type: textarea + id: to-reproduce + attributes: + label: To reproduce + description: | + Paste your code here that shows how to reproduce the behavior. + + In some cases, it might be challenging to reproduce the bug in a few lines of code. + You can fork the following repository, which contains all the configuration needed to spin up a three nodes Elasticsearch cluster with security enabled. + [This repository](https://github.com/delvedor/es-reproduce-issue) also contains a preconfigured client instance that you can use to reproduce the issue. + validations: + required: true + + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: A clear and concise description of what you expected to happen. + validations: + required: true + + - type: input + id: node-version + attributes: + label: Node.js version + description: What version of Node.js you are using (`node --version`). + validations: + required: true + + - type: input + id: typescript-version + attributes: + label: TypeScript version + description: TypeScript version you are using, if applicable. + + - type: input + id: elasticsearch-client-version + attributes: + label: Elasticsearch client version + description: What version of `@elastic/elasticsearch` and `@elastic/transport` you are using (`npm ls -a | grep '@elastic'`). + validations: + required: true + + - type: input + id: elasticsearch-version + attributes: + label: Elasticsearch server version + description: What version of Elasticsearch you are using. + validations: + required: true + + - type: input + id: operating-system + attributes: + label: Operating system + description: What operating system you are running. + placeholder: e.g. Linux, MacOS, Windows + + - type: textarea + id: env-info + attributes: + label: Any other relevant environment information. --- - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. - -**Please read this entire template before posting any issue. If you ignore these instructions -and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `invalid` label.** - -## 💥 Regression Report - -A clear and concise description of what the regression is. - -## Last working version - -Worked up to version: - -Stopped working in version: - -## To Reproduce - -Steps to reproduce the behavior: - -Paste your code here: - -```js - -``` - - - -## Expected behavior - -A clear and concise description of what you expected to happen. - -Paste the results here: - -```js - -``` - -## Your Environment - -- *node version*: 6,8,10 -- `@elastic/elasticsearch` *version*: >=7.0.0 -- *typescript version*: 4.x (if applicable) -- *os*: Mac, Windows, Linux -- *any other relevant information* From 2da30cd4cd30639b650e36679fb2e747171ddebf Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 18 Apr 2024 12:04:06 -0500 Subject: [PATCH 339/647] Move regression template to yaml file (#2229) --- .github/ISSUE_TEMPLATE/{regression.md => regression.yaml} | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) rename .github/ISSUE_TEMPLATE/{regression.md => regression.yaml} (97%) diff --git a/.github/ISSUE_TEMPLATE/regression.md b/.github/ISSUE_TEMPLATE/regression.yaml similarity index 97% rename from .github/ISSUE_TEMPLATE/regression.md rename to .github/ISSUE_TEMPLATE/regression.yaml index 14a82bcac..5271be332 100644 --- a/.github/ISSUE_TEMPLATE/regression.md +++ b/.github/ISSUE_TEMPLATE/regression.yaml @@ -1,6 +1,6 @@ --- name: 💥 Regression Report -about: Report unexpected behavior that worked in previous versions +description: Report unexpected behavior that worked in previous versions labels: ["Category: Bug"] body: - type: markdown @@ -90,4 +90,3 @@ body: id: env-info attributes: label: Any other relevant environment information. ---- From 768ba3d8aedd2264aed0b5ba1afa63f37bf9ad17 Mon Sep 17 00:00:00 2001 From: Rami <72725910+ramikg@users.noreply.github.com> Date: Thu, 18 Apr 2024 22:31:21 +0300 Subject: [PATCH 340/647] Add `Date` to `DateMath` type (#2208) Co-authored-by: Josh Mock --- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/types.ts b/src/api/types.ts index f6b83fd75..31cfb5e5d 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2044,7 +2044,7 @@ export type DataStreamNames = DataStreamName | DataStreamName[] export type DateFormat = string -export type DateMath = string +export type DateMath = string | Date export type DateTime = string | EpochTime diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 22af6bb62..901559cab 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2117,7 +2117,7 @@ export type DataStreamNames = DataStreamName | DataStreamName[] export type DateFormat = string -export type DateMath = string +export type DateMath = string | Date export type DateTime = string | EpochTime From b65e468b954a8a3fbaa3880c9634981e6a28baf3 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 24 Apr 2024 08:47:07 -0500 Subject: [PATCH 341/647] Move make.sh to .github (#2236) --- {.buildkite => .github}/make.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename {.buildkite => .github}/make.sh (99%) diff --git a/.buildkite/make.sh b/.github/make.sh similarity index 99% rename from .buildkite/make.sh rename to .github/make.sh index ae54050ae..4c32a77b4 100755 --- a/.buildkite/make.sh +++ b/.github/make.sh @@ -3,7 +3,7 @@ # # Build entry script for elasticsearch-js # -# Must be called: ./.buildkite/make.sh +# Must be called: ./.github/make.sh # # Version: 1.1.0 # From 45e3c0657ab8ff1db8c7f74be9a19a6a0549b971 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 30 Apr 2024 13:17:45 -0500 Subject: [PATCH 342/647] Update asStream code example (#2242) https://github.com/elastic/elasticsearch-js/issues/2241 notes that there is no body attribute on a response. This is mostly just a typo in the example, as `result` itself is a readable stream, unless `meta: true` is passed, in which case `result.body` will be a readable stream. Also dropped the callback-style stream processing example as it's a bit outdated. --- docs/examples/asStream.asciidoc | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/docs/examples/asStream.asciidoc b/docs/examples/asStream.asciidoc index e77025fcf..e27c0a1b1 100644 --- a/docs/examples/asStream.asciidoc +++ b/docs/examples/asStream.asciidoc @@ -1,7 +1,7 @@ [[as_stream_examples]] === asStream -Instead of getting the parsed body back, you will get the raw Node.js stream of +Instead of getting the parsed body back, you will get the raw Node.js stream of data. [source,js] @@ -57,28 +57,18 @@ async function run () { asStream: true }) - // stream async iteration, available in Node.js ≥ 10 let payload = '' - body.setEncoding('utf8') + result.setEncoding('utf8') for await (const chunk of result) { payload += chunk } console.log(JSON.parse(payload)) - - // classic stream callback style - let payload = '' - result.setEncoding('utf8') - result.on('data', chunk => { payload += chunk }) - result.on('error', console.log) - result.on('end', () => { - console.log(JSON.parse(payload)) - }) } run().catch(console.log) ---- -TIP: This can be useful if you need to pipe the {es}'s response to a proxy, or +TIP: This can be useful if you need to pipe the {es}'s response to a proxy, or send it directly to another source. [source,js] From 896216860fe6bbcdb24d538a0f212bb0f3650518 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 6 May 2024 12:30:47 -0500 Subject: [PATCH 343/647] ES|QL: Object API helper (#2238) * ESQL toRecord helper * ESQL helper tests * Add ESQL object API helper to client meta header * Add docstring for toRecords * Include column metadata in toRecords helper * Add docs for ESQL toRecords helper * Verify columns in helper return object --- docs/helpers.asciidoc | 94 +++++++++++++++++++++++++++ src/helpers.ts | 66 +++++++++++++++++++ test/unit/helpers/esql.test.ts | 113 +++++++++++++++++++++++++++++++++ 3 files changed, 273 insertions(+) create mode 100644 test/unit/helpers/esql.test.ts diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index 2ecf8cd30..fa8394a9d 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -613,3 +613,97 @@ for await (const doc of scrollSearch) { console.log(doc) } ---- + +[discrete] +[[esql-helper]] +=== ES|QL helper + +ES|QL queries can return their results in {ref}/esql-rest.html#esql-rest-format[several formats]. +The default JSON format returned by ES|QL queries contains arrays of values +for each row, with column names and types returned separately: + +[discrete] +==== Usage + +[discrete] +===== `toRecords` + +~Added~ ~in~ ~`v8.14.0`~ + +The default JSON format returned by ES|QL queries contains arrays of values +for each row, with column names and types returned separately: + +[source,json] +---- +{ + "columns": [ + { "name": "@timestamp", "type": "date" }, + { "name": "client_ip", "type": "ip" }, + { "name": "event_duration", "type": "long" }, + { "name": "message", "type": "keyword" } + ], + "values": [ + [ + "2023-10-23T12:15:03.360Z", + "172.21.2.162", + 3450233, + "Connected to 10.1.0.3" + ], + [ + "2023-10-23T12:27:28.948Z", + "172.21.2.113", + 2764889, + "Connected to 10.1.0.2" + ] + ] +} +---- + +In many cases, it's preferable to operate on an array of objects, one object per row, +rather than an array of arrays. The ES|QL `toRecords` helper converts row data into objects. + +[source,js] +---- +await client.helpers + .esql({ query: 'FROM sample_data | LIMIT 2' }) + .toRecords() +// => +// { +// "columns": [ +// { "name": "@timestamp", "type": "date" }, +// { "name": "client_ip", "type": "ip" }, +// { "name": "event_duration", "type": "long" }, +// { "name": "message", "type": "keyword" } +// ], +// "records": [ +// { +// "@timestamp": "2023-10-23T12:15:03.360Z", +// "client_ip": "172.21.2.162", +// "event_duration": 3450233, +// "message": "Connected to 10.1.0.3" +// }, +// { +// "@timestamp": "2023-10-23T12:27:28.948Z", +// "client_ip": "172.21.2.113", +// "event_duration": 2764889, +// "message": "Connected to 10.1.0.2" +// }, +// ] +// } +---- + +In TypeScript, you can declare the type that `toRecords` returns: + +[source,ts] +---- +type EventLog = { + '@timestamp': string, + client_ip: string, + event_duration: number, + message: string, +} + +const result = await client.helpers + .esql({ query: 'FROM sample_data | LIMIT 2' }) + .toRecords() +---- diff --git a/src/helpers.ts b/src/helpers.ts index 94d59b062..39b1d6bba 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -139,6 +139,29 @@ export interface BulkHelper extends Promise { readonly stats: BulkStats } +export interface EsqlColumn { + name: string + type: string +} + +export type EsqlValue = any[] + +export type EsqlRow = EsqlValue[] + +export interface EsqlResponse { + columns: EsqlColumn[] + values: EsqlRow[] +} + +export interface EsqlHelper { + toRecords: () => Promise> +} + +export interface EsqlToRecords { + columns: EsqlColumn[] + records: TDocument[] +} + const { ResponseError, ConfigurationError } = errors const sleep = promisify(setTimeout) const pImmediate = promisify(setImmediate) @@ -935,6 +958,49 @@ export default class Helpers { } } } + + /** + * Creates an ES|QL helper instance, to help transform the data returned by an ES|QL query into easy-to-use formats. + * @param {object} params - Request parameters sent to esql.query() + * @returns {object} EsqlHelper instance + */ + esql (params: T.EsqlQueryRequest, reqOptions: TransportRequestOptions = {}): EsqlHelper { + if (this[kMetaHeader] !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${this[kMetaHeader] as string},h=qo` + } + + const client = this[kClient] + + function toRecords (response: EsqlResponse): TDocument[] { + const { columns, values } = response + return values.map(row => { + const doc: Partial = {} + row.forEach((cell, index) => { + const { name } = columns[index] + // @ts-expect-error + doc[name] = cell + }) + return doc as TDocument + }) + } + + const helper: EsqlHelper = { + /** + * Pivots ES|QL query results into an array of row objects, rather than the default format where each row is an array of values. + */ + async toRecords(): Promise> { + params.format = 'json' + // @ts-expect-error it's typed as ArrayBuffer but we know it will be JSON + const response: EsqlResponse = await client.esql.query(params, reqOptions) + const records: TDocument[] = toRecords(response) + const { columns } = response + return { records, columns } + } + } + + return helper + } } // Using a getter will improve the overall performances of the code, diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts new file mode 100644 index 000000000..b029e1323 --- /dev/null +++ b/test/unit/helpers/esql.test.ts @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { test } from 'tap' +import { connection } from '../../utils' +import { Client } from '../../../' + +test('ES|QL helper', t => { + test('toRecords', t => { + t.test('Takes an ESQL response and pivots it to an array of records', async t => { + type MyDoc = { + '@timestamp': string, + client_ip: string, + event_duration: number, + message: string, + } + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + columns: [ + { name: '@timestamp', type: 'date' }, + { name: 'client_ip', type: 'ip' }, + { name: 'event_duration', type: 'long' }, + { name: 'message', type: 'keyword' } + ], + values: [ + [ + '2023-10-23T12:15:03.360Z', + '172.21.2.162', + 3450233, + 'Connected to 10.1.0.3' + ], + [ + '2023-10-23T12:27:28.948Z', + '172.21.2.113', + 2764889, + 'Connected to 10.1.0.2' + ] + ] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toRecords() + const { records, columns } = result + t.equal(records.length, 2) + t.ok(records[0]) + t.same(records[0], { + '@timestamp': '2023-10-23T12:15:03.360Z', + client_ip: '172.21.2.162', + event_duration: 3450233, + message: 'Connected to 10.1.0.3' + }) + t.same(columns, [ + { name: '@timestamp', type: 'date' }, + { name: 'client_ip', type: 'ip' }, + { name: 'event_duration', type: 'long' }, + { name: 'message', type: 'keyword' } + ]) + t.end() + }) + + t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + const header = params.headers?.['x-elastic-client-meta'] ?? '' + t.ok(header.includes('h=qo'), `Client meta header does not include ESQL helper value: ${header}`) + return { + body: { + columns: [{ name: '@timestamp', type: 'date' }], + values: [['2023-10-23T12:15:03.360Z']], + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + await client.helpers.esql({ query: 'FROM sample_data' }).toRecords() + t.end() + }) + + t.end() + }) + t.end() +}) From b9ea8f8906965f32dc1fa14f9fea732540260247 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 6 May 2024 14:33:03 -0500 Subject: [PATCH 344/647] Add ES|QL helper note to changelog (#2249) --- docs/changelog.asciidoc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index cb0ddb673..8c5b76c91 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -7,6 +7,12 @@ [discrete] ==== Features +[discrete] +===== ES|QL object API helper + +A helper method has been added that parses the response of an ES|QL query and converts it into an array of objects. +A TypeScript type parameter can also be provided to improve developer experience when working with the result. https://github.com/elastic/elasticsearch-js/pull/2238[#2238] + [discrete] ===== `onSuccess` callback added to bulk helper @@ -461,7 +467,7 @@ client.search({ params }, { options }, (err, result) => { client.search({ params }, { options }) .then(console.log) .catch(console.log) - + // async-style (sugar syntax on top of promises) const response = await client.search({ params }, { options }) console.log(response) @@ -621,7 +627,7 @@ If you weren't extending the internals of the client, this won't be a breaking c *Breaking: Yes* | *Migration effort: Medium* -Currently, every path or query parameter could be expressed in both `snake_case` and `camelCase`. Internally the client will convert everything to `snake_case`. +Currently, every path or query parameter could be expressed in both `snake_case` and `camelCase`. Internally the client will convert everything to `snake_case`. This was done in an effort to reduce the friction of migrating from the legacy to the new client, but now it no longer makes sense. If you are already using `snake_case` keys, this won't be a breaking change for you. From fe2d8c1915f7405cb16c33a2d4e9668b71be8cb8 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 6 May 2024 15:22:41 -0500 Subject: [PATCH 345/647] Add Node.js 22 to unit test matrix (#2251) --- .buildkite/pipeline.yml | 3 ++- .github/workflows/nodejs.yml | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index e27888106..4dc60e227 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -15,6 +15,7 @@ steps: nodejs: - "18" - "20" + - "22" command: ./.buildkite/run-tests.sh artifact_paths: "./junit-output/junit-*.xml" - wait: ~ @@ -26,6 +27,6 @@ steps: plugins: - junit-annotate#v2.4.1: artifacts: "junit-output/junit-*.xml" - job-uuid-file-pattern: 'junit-(.*).xml' + job-uuid-file-pattern: "junit-(.*).xml" fail-build-on-error: true failure-format: file diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 132294174..7511a0033 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -9,7 +9,7 @@ jobs: name: Detect files changed runs-on: ubuntu-latest outputs: - src-only: '${{ steps.changes.outputs.src-only }}' + src-only: "${{ steps.changes.outputs.src-only }}" steps: - uses: actions/checkout@v4 - uses: dorny/paths-filter/@v2.11.1 @@ -30,7 +30,7 @@ jobs: strategy: fail-fast: false matrix: - node-version: [18.x, 20.x] + node-version: [18.x, 20.x, 22.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: @@ -66,7 +66,7 @@ jobs: strategy: matrix: - node-version: [20.x] + node-version: [22.x] steps: - uses: actions/checkout@v3 From b250049ee7b53d20ceeea2da96cb01af0602c900 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 9 May 2024 20:50:16 +0100 Subject: [PATCH 346/647] Auto-generated code for main (#2239) --- docs/reference.asciidoc | 145 ++++--- src/api/api/cluster.ts | 2 +- src/api/api/esql.ts | 8 +- src/api/api/indices.ts | 40 +- src/api/api/inference.ts | 42 +- src/api/types.ts | 709 +++++++++++++++++++++++++++++---- src/api/typesWithBodyKey.ts | 766 +++++++++++++++++++++++++++++++----- 7 files changed, 1437 insertions(+), 275 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 556db3178..cc6343565 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -419,6 +419,9 @@ client.get({ id, index }) * *Request (object):* ** *`id` (string)*: Unique identifier of the document. ** *`index` (string)*: Name of the index that contains the document. +** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? +Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. +Fetches with this enabled will be slower the enabling synthetic source natively in the index. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. ** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. @@ -607,6 +610,9 @@ client.mget({ ... }) ** *`index` (Optional, string)*: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. ** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. ** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. +** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? +Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. +Fetches with this enabled will be slower the enabling synthetic source natively in the index. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. ** *`refresh` (Optional, boolean)*: If `true`, the request refreshes relevant shards before retrieving documents. @@ -928,7 +934,7 @@ A post filter has no impact on the aggregation results. ** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { query, window_size } | { query, window_size }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. ** *`size` (Optional, number)*: The number of hits to return. @@ -1031,6 +1037,9 @@ You can exclude fields from this subset using the `_source_excludes` query param If the `_source` parameter is `false`, this parameter is ignored. ** *`q` (Optional, string)*: Query in the Lucene query string syntax using query parameter search. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. +** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? +Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. +Fetches with this enabled will be slower the enabling synthetic source natively in the index. [discrete] === search_mvt @@ -1437,7 +1446,7 @@ not included in the search results. ** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* ** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more @@ -2462,10 +2471,6 @@ Elastic Agent uses these templates to configure backing indices for its data str If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. ** *`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })*: The template to be applied which includes mappings, settings, or aliases configuration. -** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. -If set to `true` in a template, then indices can be automatically created using that -template even if auto-creation of indices is disabled via `actions.auto_create_index`. -If set to `false` then data streams matching the template must always be explicitly created. ** *`version` (Optional, number)*: Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. @@ -2473,6 +2478,8 @@ To unset a version, replace the template without specifying a version. May have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. +** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. ** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing component templates. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -2818,6 +2825,7 @@ client.esql.query({ query }) ** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. ** *`locale` (Optional, string)* ** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`version` (Optional, Enum("2024.04.01"))*: The version of the ES|QL language in which the "query" field was written. ** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml. ** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. @@ -2936,7 +2944,7 @@ not included in the search results. ** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { query, window_size } | { query, window_size }[])* +** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* ** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more @@ -3266,12 +3274,12 @@ If no index is specified or the index does not have a default analyzer, the anal ** *`analyzer` (Optional, string)*: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. ** *`attributes` (Optional, string[])*: Array of token attributes used to filter the output of the `explain` parameter. -** *`char_filter` (Optional, string | { type } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer. +** *`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer. ** *`explain` (Optional, boolean)*: If `true`, the response includes token attributes and additional details. ** *`field` (Optional, string)*: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. +** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. ** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. ** *`text` (Optional, string | string[])*: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. @@ -3386,7 +3394,7 @@ client.indices.create({ index }) If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). [discrete] @@ -4206,7 +4214,17 @@ This number is not automatically generated by Elasticsearch. ** *`_meta` (Optional, Record)*: Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. +** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. ** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing index templates. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`cause` (Optional, string)*: User defined reason for creating/updating the index template [discrete] ==== put_mapping @@ -4234,7 +4252,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -4319,11 +4337,9 @@ Templates with lower 'order' values are merged first. Templates with higher ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. ** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. +** *`cause` (Optional, string)* [discrete] ==== recovery @@ -4563,7 +4579,26 @@ client.indices.simulateIndexTemplate({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: Index or template name to simulate +** *`name` (string)*: Name of the index to simulate +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. + +[discrete] +==== simulate_template +Simulate resolving the given template name or body + +{ref}/indices-simulate-template.html[Endpoint documentation] +[source,ts] +---- +client.indices.simulateTemplate({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit +this parameter and specify the template configuration in the request body. ** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. @@ -4584,32 +4619,10 @@ This number is not automatically generated by Elasticsearch. ** *`_meta` (Optional, Record)*: Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. -** *`create` (Optional, boolean)*: If `true`, the template passed in the body is only used if no existing -templates match the same index patterns. If `false`, the simulation uses -the template with the highest priority. Note that the template is not -permanently added or updated in either case; it is only used for the -simulation. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received -before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. - -[discrete] -==== simulate_template -Simulate resolving the given template name or body - -{ref}/indices-simulate-template.html[Endpoint documentation] -[source,ts] ----- -client.indices.simulateTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit -this parameter and specify the template configuration in the request body. -** *`template` (Optional, { index_patterns, composed_of, template, version, priority, _meta, allow_auto_create, data_stream })* +** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. ** *`create` (Optional, boolean)*: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. @@ -4756,13 +4769,13 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] === inference [discrete] -==== delete_model -Delete model in the Inference API +==== delete +Delete an inference endpoint {ref}/delete-inference-api.html[Endpoint documentation] [source,ts] ---- -client.inference.deleteModel({ inference_id }) +client.inference.delete({ inference_id }) ---- [discrete] @@ -4770,28 +4783,28 @@ client.inference.deleteModel({ inference_id }) * *Request (object):* ** *`inference_id` (string)*: The inference Id -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type [discrete] -==== get_model -Get a model in the Inference API +==== get +Get an inference endpoint {ref}/get-inference-api.html[Endpoint documentation] [source,ts] ---- -client.inference.getModel({ inference_id }) +client.inference.get({ ... }) ---- [discrete] ==== Arguments * *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type +** *`inference_id` (Optional, string)*: The inference Id [discrete] ==== inference -Perform inference on a model +Perform inference {ref}/post-inference-api.html[Endpoint documentation] [source,ts] @@ -4804,19 +4817,22 @@ client.inference.inference({ inference_id, input }) * *Request (object):* ** *`inference_id` (string)*: The inference Id -** *`input` (string | string[])*: Text input to the model. +** *`input` (string | string[])*: Inference input. Either a string or an array of strings. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type +** *`query` (Optional, string)*: Query input, required for rerank task. +Not required for other tasks. ** *`task_settings` (Optional, User-defined value)*: Optional task settings +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. [discrete] -==== put_model -Configure a model for use in the Inference API +==== put +Configure an inference endpoint for use in the Inference API {ref}/put-inference-api.html[Endpoint documentation] [source,ts] ---- -client.inference.putModel({ inference_id }) +client.inference.put({ inference_id }) ---- [discrete] @@ -4824,8 +4840,8 @@ client.inference.putModel({ inference_id }) * *Request (object):* ** *`inference_id` (string)*: The inference Id -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding"))*: The task type -** *`model_config` (Optional, { service, service_settings, task_settings })* +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type +** *`inference_config` (Optional, { service, service_settings, task_settings })* [discrete] === ingest @@ -6437,7 +6453,11 @@ or `windows-x86_64`. For portable models (those that work independent of process architecture or OS features), leave this field unset. ** *`tags` (Optional, string[])*: An array of tags to organize the model. ** *`prefix_strings` (Optional, { ingest, search })*: Optional prefix strings applied at inference -** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. +** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, +the request defers definition decompression and skips relevant +validations. +** *`wait_for_completion` (Optional, boolean)*: Whether to wait for all child operations (e.g. model download) +to complete. [discrete] ==== put_trained_model_alias @@ -7927,6 +7947,7 @@ associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. ** *`active_only` (Optional, boolean)*: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. +** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. [discrete] ==== get_builtin_privileges @@ -8131,7 +8152,7 @@ client.security.hasPrivileges({ ... }) * *Request (object):* ** *`user` (Optional, string)*: Username ** *`application` (Optional, { application, privileges, resources }[])* -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "grant_api_key" | "manage" | "manage_api_key" | "manage_ccr" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "read_ccr" | "read_ilm" | "read_pipeline" | "read_slm" | "transport_client")[])*: A list of the cluster privileges that you want to check. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. ** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* [discrete] @@ -8245,7 +8266,7 @@ client.security.putRole({ name }) * *Request (object):* ** *`name` (string)*: The name of the role. ** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "grant_api_key" | "manage" | "manage_api_key" | "manage_ccr" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "read_ccr" | "read_ilm" | "read_pipeline" | "read_slm" | "transport_client")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. @@ -8336,6 +8357,8 @@ To page through more hits, use the `search_after` parameter. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition ** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. +** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. +** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. [discrete] ==== saml_authenticate diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 59007a12c..1c1b9c8d1 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -345,7 +345,7 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['allow_auto_create', 'template', 'version', '_meta'] + const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 4d211a14c..a4029c1de 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -52,7 +52,7 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'query'] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'query', 'version'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -63,6 +63,12 @@ export default class Esql { body = userBody != null ? { ...userBody } : undefined } + // a version number is required for all ES|QL queries. + // inject a default value if none is provided. + if (typeof body === 'object' && body.version == null) { + body.version = '2024.04.01' + } + for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 128f0a5bb..3b90bc00f 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1271,7 +1271,7 @@ export default class Indices { async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta'] + const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1720,23 +1720,11 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body = undefined for (const key in params) { - if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error @@ -1753,20 +1741,28 @@ export default class Indices { * Simulate resolving the given template name or body * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html | Elasticsearch API documentation} */ - async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise - async simulateTemplate (this: That, params: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['template'] + const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] const querystring: Record = {} // @ts-expect-error - let body: any = params.body ?? undefined + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - body = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index e02487067..16dc527a1 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -44,13 +44,13 @@ export default class Inference { } /** - * Delete model in the Inference API + * Delete an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html | Elasticsearch API documentation} */ - async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptions): Promise - async deleteModel (this: That, params: T.InferenceDeleteModelRequest | TB.InferenceDeleteModelRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const querystring: Record = {} const body = undefined @@ -77,17 +77,18 @@ export default class Inference { } /** - * Get a model in the Inference API + * Get an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html | Elasticsearch API documentation} */ - async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptions): Promise - async getModel (this: That, params: T.InferenceGetModelRequest | TB.InferenceGetModelRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const querystring: Record = {} const body = undefined + params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue @@ -102,15 +103,18 @@ export default class Inference { if (params.task_type != null && params.inference_id != null) { method = 'GET' path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` - } else { + } else if (params.inference_id != null) { method = 'GET' path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'GET' + path = '/_inference' } return await this.transport.request({ path, method, querystring, body }, options) } /** - * Perform inference on a model + * Perform inference * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html | Elasticsearch API documentation} */ async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -118,7 +122,7 @@ export default class Inference { async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['input', 'task_settings'] + const acceptedBody: string[] = ['query', 'input', 'task_settings'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -155,15 +159,15 @@ export default class Inference { } /** - * Configure a model for use in the Inference API + * Configure an inference endpoint for use in the Inference API * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html | Elasticsearch API documentation} */ - async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptions): Promise - async putModel (this: That, params: T.InferencePutModelRequest | TB.InferencePutModelRequest, options?: TransportRequestOptions): Promise { + async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['model_config'] + const acceptedBody: string[] = ['inference_config'] const querystring: Record = {} // @ts-expect-error let body: any = params.body ?? undefined diff --git a/src/api/types.ts b/src/api/types.ts index 31cfb5e5d..bde49c300 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -373,6 +373,7 @@ export interface GetGetResult { export interface GetRequest extends RequestBase { id: Id index: IndexName + force_synthetic_source?: boolean preference?: string realtime?: boolean refresh?: boolean @@ -686,6 +687,7 @@ export interface MgetOperation { export interface MgetRequest extends RequestBase { index?: IndexName + force_synthetic_source?: boolean preference?: string realtime?: boolean refresh?: boolean @@ -722,7 +724,7 @@ export interface MsearchMultisearchBody { ext?: Record stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery | KnnQuery[] + knn?: KnnSearch | KnnSearch[] from?: integer highlight?: SearchHighlight indices_boost?: Record[] @@ -1146,6 +1148,7 @@ export interface SearchRequest extends RequestBase { _source_excludes?: Fields _source_includes?: Fields q?: string + force_synthetic_source?: boolean aggregations?: Record /** @alias aggregations */ aggs?: Record @@ -1157,7 +1160,7 @@ export interface SearchRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery | KnnQuery[] + knn?: KnnSearch | KnnSearch[] rank?: RankContainer min_score?: double post_filter?: QueryDslQueryContainer @@ -1462,6 +1465,11 @@ export interface SearchLaplaceSmoothingModel { alpha: double } +export interface SearchLearningToRank { + model_id: string + params?: Record +} + export interface SearchLinearInterpolationSmoothingModel { bigram_lambda: double trigram_lambda: double @@ -1561,8 +1569,9 @@ export interface SearchRegexOptions { } export interface SearchRescore { - query: SearchRescoreQuery window_size?: integer + query?: SearchRescoreQuery + learning_to_rank?: SearchLearningToRank } export interface SearchRescoreQuery { @@ -1954,10 +1963,14 @@ export interface SpecUtilsBaseNode { transport_address: TransportAddress } +export type SpecUtilsNullValue = null + export type SpecUtilsPipeSeparatedFlags = T | string export type SpecUtilsStringified = T | string +export type SpecUtilsWithNullValue = T | SpecUtilsNullValue + export interface AcknowledgedResponseBase { acknowledged: boolean } @@ -2046,7 +2059,7 @@ export type DateFormat = string export type DateMath = string | Date -export type DateTime = string | EpochTime +export type DateTime = string | EpochTime | Date export type Distance = string @@ -2277,12 +2290,21 @@ export interface InlineScript extends ScriptBase { export type Ip = string -export interface KnnQuery { +export interface KnnQuery extends QueryDslQueryBase { field: Field query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder - k: long - num_candidates: long + num_candidates?: long + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + similarity?: float +} + +export interface KnnSearch { + field: Field + query_vector?: QueryVector + query_vector_builder?: QueryVectorBuilder + k?: long + num_candidates?: long boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float @@ -2405,14 +2427,14 @@ export interface PluginStats { export type PropertyName = string export interface QueryCacheStats { - cache_count: integer - cache_size: integer - evictions: integer - hit_count: integer + cache_count: long + cache_size: long + evictions: long + hit_count: long memory_size?: ByteSize memory_size_in_bytes: long - miss_count: integer - total_count: integer + miss_count: long + total_count: long } export type QueryVector = float[] @@ -2736,9 +2758,9 @@ export interface WktGeoBounds { export interface WriteResponseBase { _id: Id _index: IndexName - _primary_term: long + _primary_term?: long result: Result - _seq_no: SequenceNumber + _seq_no?: SequenceNumber _shards: ShardStatistics _version: VersionNumber forced_refresh?: boolean @@ -2765,6 +2787,7 @@ export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiB export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { filters?: Record + separator?: string } export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { @@ -2782,8 +2805,6 @@ export interface AggregationsAggregateBase { export type AggregationsAggregateOrder = Partial> | Partial>[] export interface AggregationsAggregation { - meta?: Metadata - name?: string } export interface AggregationsAggregationContainer { @@ -2926,7 +2947,7 @@ export interface AggregationsBoxplotAggregation extends AggregationsMetricAggreg compression?: double } -export interface AggregationsBucketAggregationBase extends AggregationsAggregation { +export interface AggregationsBucketAggregationBase { } export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { @@ -2957,7 +2978,7 @@ export interface AggregationsBucketMetricValueAggregate extends AggregationsSing keys: string[] } -export interface AggregationsBucketPathAggregation extends AggregationsAggregation { +export interface AggregationsBucketPathAggregation { buckets_path?: AggregationsBucketsPath } @@ -2969,7 +2990,7 @@ export interface AggregationsBucketSelectorAggregation extends AggregationsPipel script?: Script } -export interface AggregationsBucketSortAggregation extends AggregationsAggregation { +export interface AggregationsBucketSortAggregation { from?: integer gap_policy?: AggregationsGapPolicy size?: integer @@ -2994,7 +3015,7 @@ export interface AggregationsCardinalityAggregation extends AggregationsMetricAg export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' -export interface AggregationsCategorizeTextAggregation extends AggregationsAggregation { +export interface AggregationsCategorizeTextAggregation { field: Field max_unique_tokens?: integer max_matched_tokens?: integer @@ -3176,8 +3197,8 @@ export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMo } export interface AggregationsExtendedBounds { - max: T - min: T + max?: T + min?: T } export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { @@ -3544,7 +3565,7 @@ export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucket export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string } -export interface AggregationsMatrixAggregation extends AggregationsAggregation { +export interface AggregationsMatrixAggregation { fields?: Fields missing?: Record } @@ -4000,7 +4021,7 @@ export interface AggregationsTTestAggregate extends AggregationsAggregateBase { value_as_string?: string } -export interface AggregationsTTestAggregation extends AggregationsAggregation { +export interface AggregationsTTestAggregation { a?: AggregationsTestPopulation b?: AggregationsTestPopulation type?: AggregationsTTestType @@ -4060,8 +4081,9 @@ export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { - docvalue_fields?: Fields + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] explain?: boolean + fields?: (QueryDslFieldAndFormat | Field)[] from?: integer highlight?: SearchHighlight script_fields?: Record @@ -4123,6 +4145,7 @@ export interface AggregationsVariableWidthHistogramAggregation { buckets?: integer shard_size?: integer initial_buffer?: integer + script?: Script } export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { @@ -4136,7 +4159,7 @@ export interface AggregationsVariableWidthHistogramBucketKeys extends Aggregatio export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys & { [property: string]: AggregationsAggregate | double | string | long } -export interface AggregationsWeightedAverageAggregation extends AggregationsAggregation { +export interface AggregationsWeightedAverageAggregation { format?: string value?: AggregationsWeightedAverageValue value_type?: AggregationsValueType @@ -4272,6 +4295,7 @@ export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' + escaped_tags?: string[] } export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { @@ -4677,6 +4701,7 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean } @@ -4688,6 +4713,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean } @@ -4876,6 +4902,7 @@ export interface MappingDenseVectorIndexOptions { export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' + element_type?: string dims?: integer similarity?: string index?: boolean @@ -4898,7 +4925,7 @@ export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { - type: '{dynamic_property}' + type: '{dynamic_type}' enabled?: boolean null_value?: FieldValue boost?: double @@ -5157,7 +5184,7 @@ export interface MappingRuntimeFieldFetchFields { format?: string } -export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' +export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' export type MappingRuntimeFields = Record @@ -5656,7 +5683,6 @@ export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { minimum_should_match?: MinimumShouldMatch min_term_freq?: integer min_word_length?: integer - per_field_analyzer?: Record routing?: Routing stop_words?: AnalysisStopWords unlike?: QueryDslLike | QueryDslLike[] @@ -5844,7 +5870,7 @@ export interface QueryDslRandomScoreFunction { seed?: long | string } -export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery +export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermsRangeQuery export interface QueryDslRangeQueryBase extends QueryDslQueryBase { relation?: QueryDslRangeRelation @@ -6019,6 +6045,15 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup +export interface QueryDslTermsRangeQuery extends QueryDslRangeQueryBase { + gt?: string + gte?: string + lt?: string + lte?: string + from?: string | null + to?: string | null +} + export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field minimum_should_match_script?: Script @@ -6166,7 +6201,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery | KnnQuery[] + knn?: KnnSearch | KnnSearch[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -8560,10 +8595,10 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { name: Name create?: boolean master_timeout?: Duration - allow_auto_create?: boolean template: IndicesIndexState version?: VersionNumber _meta?: Metadata + deprecated?: boolean } export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase @@ -8954,6 +8989,480 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { timestamp: long } +export interface ConnectorConnector { + api_key_id?: string + configuration: ConnectorConnectorConfiguration + custom_scheduling: ConnectorConnectorCustomScheduling + description?: string + error?: string + features?: ConnectorConnectorFeatures + filtering: ConnectorFilteringConfig[] + id?: Id + index_name?: IndexName + is_native: boolean + language?: string + last_access_control_sync_error?: string + last_access_control_sync_scheduled_at?: DateTime + last_access_control_sync_status?: ConnectorSyncStatus + last_deleted_document_count?: long + last_incremental_sync_scheduled_at?: DateTime + last_indexed_document_count?: long + last_seen?: DateTime + last_sync_error?: string + last_sync_scheduled_at?: DateTime + last_sync_status?: ConnectorSyncStatus + last_synced?: DateTime + name?: string + pipeline?: ConnectorIngestPipelineParams + scheduling: ConnectorSchedulingConfiguration + service_type: string + status: ConnectorConnectorStatus + sync_now: boolean +} + +export interface ConnectorConnectorConfigProperties { + category?: string + default_value: ScalarValue + depends_on: ConnectorDependency[] + display: ConnectorDisplayType + label: string + options: ConnectorSelectOption[] + order?: integer + placeholder?: string + required: boolean + sensitive: boolean + tooltip?: string + type: ConnectorConnectorFieldType + ui_restrictions: string[] + validations: ConnectorValidation[] + value: ScalarValue +} + +export type ConnectorConnectorConfiguration = Record + +export type ConnectorConnectorCustomScheduling = Record + +export interface ConnectorConnectorFeatures { + document_level_security?: ConnectorFeatureEnabled + filtering_advanced_config?: boolean + filtering_rules?: boolean + incremental_sync?: ConnectorFeatureEnabled + sync_rules?: ConnectorSyncRulesFeature +} + +export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' + +export interface ConnectorConnectorScheduling { + enabled: boolean + interval: string +} + +export type ConnectorConnectorStatus = 'created' | 'needs_configuration' | 'configured' | 'connected' | 'error' + +export interface ConnectorConnectorSyncJob { + cancelation_requested_at?: DateTime + canceled_at?: DateTime + completed_at?: DateTime + connector: ConnectorSyncJobConnectorReference + created_at: DateTime + deleted_document_count: long + error?: string + id: Id + indexed_document_count: long + indexed_document_volume: long + job_type: ConnectorSyncJobType + last_seen?: DateTime + metadata: Record + started_at?: DateTime + status: ConnectorSyncStatus + total_document_count: long + trigger_method: ConnectorSyncJobTriggerMethod + worker_hostname?: string +} + +export interface ConnectorCustomScheduling { + configuration_overrides: ConnectorCustomSchedulingConfigurationOverrides + enabled: boolean + interval: string + last_synced?: DateTime + name: string +} + +export interface ConnectorCustomSchedulingConfigurationOverrides { + max_crawl_depth?: integer + sitemap_discovery_disabled?: boolean + domain_allowlist?: string[] + sitemap_urls?: string[] + seed_urls?: string[] +} + +export interface ConnectorDependency { + field: string + value: ScalarValue +} + +export type ConnectorDisplayType = 'textbox' | 'textarea' | 'numeric' | 'toggle' | 'dropdown' + +export interface ConnectorFeatureEnabled { + enabled: boolean +} + +export interface ConnectorFilteringAdvancedSnippet { + created_at?: DateTime + updated_at?: DateTime + value: Record +} + +export interface ConnectorFilteringConfig { + active: ConnectorFilteringRules + domain: string + draft: ConnectorFilteringRules +} + +export type ConnectorFilteringPolicy = 'exclude' | 'include' + +export interface ConnectorFilteringRule { + created_at?: DateTime + field: Field + id: Id + order: integer + policy: ConnectorFilteringPolicy + rule: ConnectorFilteringRuleRule + updated_at?: DateTime + value: string +} + +export type ConnectorFilteringRuleRule = 'contains' | 'ends_with' | 'equals' | 'regex' | 'starts_with' | '>' | '<' + +export interface ConnectorFilteringRules { + advanced_snippet: ConnectorFilteringAdvancedSnippet + rules: ConnectorFilteringRule[] + validation: ConnectorFilteringRulesValidation +} + +export interface ConnectorFilteringRulesValidation { + errors: ConnectorFilteringValidation[] + state: ConnectorFilteringValidationState +} + +export interface ConnectorFilteringValidation { + ids: Id[] + messages: string[] +} + +export type ConnectorFilteringValidationState = 'edited' | 'invalid' | 'valid' + +export interface ConnectorGreaterThanValidation { + type: 'greater_than' + constraint: double +} + +export interface ConnectorIncludedInValidation { + type: 'included_in' + constraint: string +} + +export interface ConnectorIngestPipelineParams { + extract_binary_content: boolean + name: string + reduce_whitespace: boolean + run_ml_inference: boolean +} + +export interface ConnectorLessThanValidation { + type: 'less_than' + constraint: double +} + +export interface ConnectorListTypeValidation { + type: 'list_type' + constraint: ScalarValue[] +} + +export interface ConnectorRegexValidation { + type: 'regex' + constraint: string +} + +export interface ConnectorSchedulingConfiguration { + access_control?: ConnectorConnectorScheduling + full?: ConnectorConnectorScheduling + incremental?: ConnectorConnectorScheduling +} + +export interface ConnectorSelectOption { + label: string + value: string +} + +export interface ConnectorSyncJobConnectorReference { + configuration: ConnectorConnectorConfiguration + filtering: ConnectorFilteringRules + id: Id + index_name: string + language?: string + pipeline?: ConnectorIngestPipelineParams + service_type: string +} + +export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' + +export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' + +export interface ConnectorSyncRulesFeature { + advanced?: ConnectorFeatureEnabled + basic?: ConnectorFeatureEnabled +} + +export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'error' | 'in_progress' | 'pending' | 'suspended' + +export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation + +export interface ConnectorCheckInRequest extends RequestBase { + connector_id: Id +} + +export interface ConnectorCheckInResponse { + result: Result +} + +export interface ConnectorDeleteRequest extends RequestBase { + connector_id: Id + delete_sync_jobs: boolean +} + +export type ConnectorDeleteResponse = AcknowledgedResponseBase + +export interface ConnectorGetRequest extends RequestBase { + connector_id: Id +} + +export type ConnectorGetResponse = ConnectorConnector + +export interface ConnectorLastSyncRequest extends RequestBase { + connector_id: Id + last_access_control_sync_error?: SpecUtilsWithNullValue + last_access_control_sync_scheduled_at?: DateTime + last_access_control_sync_status?: ConnectorSyncStatus + last_deleted_document_count?: long + last_incremental_sync_scheduled_at?: DateTime + last_indexed_document_count?: long + last_seen?: SpecUtilsWithNullValue + last_sync_error?: SpecUtilsWithNullValue + last_sync_scheduled_at?: DateTime + last_sync_status?: ConnectorSyncStatus + last_synced?: DateTime +} + +export interface ConnectorLastSyncResponse { + result: Result +} + +export interface ConnectorListRequest extends RequestBase { + from?: integer + size?: integer + index_name?: Indices + connector_name?: Names + service_type?: Names + query?: string +} + +export interface ConnectorListResponse { + count: long + results: ConnectorConnector[] +} + +export interface ConnectorPostRequest extends RequestBase { + description?: string + index_name: SpecUtilsWithNullValue + is_native?: boolean + language?: string + name?: string + service_type?: string +} + +export interface ConnectorPostResponse { + id: Id +} + +export interface ConnectorPutRequest extends RequestBase { + connector_id: Id + description?: string + index_name: SpecUtilsWithNullValue + is_native?: boolean + language?: string + name?: string + service_type?: string +} + +export interface ConnectorPutResponse { + result: Result +} + +export interface ConnectorSyncJobCancelRequest extends RequestBase { + connector_sync_job_id: Id +} + +export interface ConnectorSyncJobCancelResponse { + result: Result +} + +export interface ConnectorSyncJobDeleteRequest extends RequestBase { + connector_sync_job_id: Id +} + +export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase + +export interface ConnectorSyncJobGetRequest extends RequestBase { + connector_sync_job_id: Id +} + +export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob + +export interface ConnectorSyncJobListRequest extends RequestBase { + from?: integer + size?: integer + status?: ConnectorSyncStatus + connector_id?: Id + job_type?: ConnectorSyncJobType[] +} + +export interface ConnectorSyncJobListResponse { + count: long + results: ConnectorConnectorSyncJob[] +} + +export interface ConnectorSyncJobPostRequest extends RequestBase { + id: Id + job_type?: ConnectorSyncJobType + trigger_method?: ConnectorSyncJobTriggerMethod +} + +export interface ConnectorSyncJobPostResponse { + id: Id +} + +export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { + connector_id: Id +} + +export interface ConnectorUpdateActiveFilteringResponse { + result: Result +} + +export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { + connector_id: Id + api_key_id?: SpecUtilsWithNullValue + api_key_secret_id?: SpecUtilsWithNullValue +} + +export interface ConnectorUpdateApiKeyIdResponse { + result: Result +} + +export interface ConnectorUpdateConfigurationRequest extends RequestBase { + connector_id: Id + configuration?: ConnectorConnectorConfiguration + values?: Record +} + +export interface ConnectorUpdateConfigurationResponse { + result: Result +} + +export interface ConnectorUpdateErrorRequest extends RequestBase { + connector_id: Id + error: SpecUtilsWithNullValue +} + +export interface ConnectorUpdateErrorResponse { + result: Result +} + +export interface ConnectorUpdateFilteringRequest extends RequestBase { + connector_id: Id + filtering?: ConnectorFilteringConfig[] + rules?: ConnectorFilteringRule[] + advanced_snippet?: ConnectorFilteringAdvancedSnippet +} + +export interface ConnectorUpdateFilteringResponse { + result: Result +} + +export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { + connector_id: Id + validation: ConnectorFilteringRulesValidation +} + +export interface ConnectorUpdateFilteringValidationResponse { + result: Result +} + +export interface ConnectorUpdateIndexNameRequest extends RequestBase { + connector_id: Id + index_name: SpecUtilsWithNullValue +} + +export interface ConnectorUpdateIndexNameResponse { + result: Result +} + +export interface ConnectorUpdateNameRequest extends RequestBase { + connector_id: Id + name: string + description?: string +} + +export interface ConnectorUpdateNameResponse { + result: Result +} + +export interface ConnectorUpdateNativeRequest extends RequestBase { + connector_id: Id + is_native: boolean +} + +export interface ConnectorUpdateNativeResponse { + result: Result +} + +export interface ConnectorUpdatePipelineRequest extends RequestBase { + connector_id: Id + pipeline: ConnectorIngestPipelineParams +} + +export interface ConnectorUpdatePipelineResponse { + result: Result +} + +export interface ConnectorUpdateSchedulingRequest extends RequestBase { + connector_id: Id + scheduling: ConnectorSchedulingConfiguration +} + +export interface ConnectorUpdateSchedulingResponse { + result: Result +} + +export interface ConnectorUpdateServiceTypeRequest extends RequestBase { + connector_id: Id + service_type: string +} + +export interface ConnectorUpdateServiceTypeResponse { + result: Result +} + +export interface ConnectorUpdateStatusRequest extends RequestBase { + connector_id: Id + status: ConnectorConnectorStatus +} + +export interface ConnectorUpdateStatusResponse { + result: Result +} + export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean @@ -9149,6 +9658,12 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase origination_date?: long parse_origination_date?: boolean @@ -10640,13 +11156,18 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { export interface IndicesPutIndexTemplateRequest extends RequestBase { name: Name create?: boolean + master_timeout?: Duration + cause?: string index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping data_stream?: IndicesDataStreamVisibility - priority?: integer + priority?: long version?: VersionNumber _meta?: Metadata + allow_auto_create?: boolean + ignore_missing_component_templates?: string[] + deprecated?: boolean } export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase @@ -10691,9 +11212,8 @@ export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { name: Name create?: boolean - flat_settings?: boolean master_timeout?: Duration - timeout?: Duration + cause?: string aliases?: Record index_patterns?: string | string[] mappings?: MappingTypeMapping @@ -11034,20 +11554,13 @@ export interface IndicesShrinkResponse { export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name - create?: boolean master_timeout?: Duration include_defaults?: boolean - allow_auto_create?: boolean - index_patterns?: Indices - composed_of?: Name[] - template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStreamVisibility - priority?: integer - version?: VersionNumber - _meta?: Metadata } export interface IndicesSimulateIndexTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] + template: IndicesSimulateTemplateTemplate } export interface IndicesSimulateTemplateOverlapping { @@ -11060,7 +11573,16 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { create?: boolean master_timeout?: Duration include_defaults?: boolean - template?: IndicesIndexTemplate + allow_auto_create?: boolean + index_patterns?: Indices + composed_of?: Name[] + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: IndicesDataStreamVisibility + priority?: long + version?: VersionNumber + _meta?: Metadata + ignore_missing_component_templates?: string[] + deprecated?: boolean } export interface IndicesSimulateTemplateResponse { @@ -11330,27 +11852,39 @@ export interface IndicesValidateQueryResponse { error?: string } +export interface InferenceCompletionResult { + result: string +} + export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] -export interface InferenceInferenceResult { - text_embedding_bytes?: InferenceTextEmbeddingByteResult[] - text_embedding?: InferenceTextEmbeddingResult[] - sparse_embedding?: InferenceSparseEmbeddingResult[] -} - -export interface InferenceModelConfig { +export interface InferenceInferenceEndpoint { service: string service_settings: InferenceServiceSettings task_settings: InferenceTaskSettings } -export interface InferenceModelConfigContainer extends InferenceModelConfig { - model_id: string +export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { + inference_id: string task_type: InferenceTaskType } +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] + completion?: InferenceCompletionResult[] + rerank?: InferenceRankedDocument[] +} + +export interface InferenceRankedDocument { + index: integer + score: float + text?: string +} + export type InferenceServiceSettings = any export interface InferenceSparseEmbeddingResult { @@ -11361,7 +11895,7 @@ export type InferenceSparseVector = Record export type InferenceTaskSettings = any -export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' +export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector @@ -11371,38 +11905,40 @@ export interface InferenceTextEmbeddingResult { embedding: InferenceDenseVector } -export interface InferenceDeleteModelRequest extends RequestBase { +export interface InferenceDeleteRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id } -export type InferenceDeleteModelResponse = AcknowledgedResponseBase +export type InferenceDeleteResponse = AcknowledgedResponseBase -export interface InferenceGetModelRequest extends RequestBase { +export interface InferenceGetRequest extends RequestBase { task_type?: InferenceTaskType - inference_id: Id + inference_id?: Id } -export interface InferenceGetModelResponse { - models: InferenceModelConfigContainer[] +export interface InferenceGetResponse { + endpoints: InferenceInferenceEndpointInfo[] } export interface InferenceInferenceRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id + timeout?: Duration + query?: string input: string | string[] task_settings?: InferenceTaskSettings } export type InferenceInferenceResponse = InferenceInferenceResult -export interface InferencePutModelRequest extends RequestBase { +export interface InferencePutRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id - model_config?: InferenceModelConfig + inference_config?: InferenceInferenceEndpoint } -export type InferencePutModelResponse = InferenceModelConfigContainer +export type InferencePutResponse = InferenceInferenceEndpointInfo export interface IngestAppendProcessor extends IngestProcessorBase { field: Field @@ -11660,6 +12196,7 @@ export interface IngestProcessorContainer { export interface IngestRemoveProcessor extends IngestProcessorBase { field: Fields + keep?: Fields ignore_missing?: boolean } @@ -14143,6 +14680,7 @@ export interface MlPutTrainedModelPreprocessor { export interface MlPutTrainedModelRequest extends RequestBase { model_id: Id defer_definition_decompression?: boolean + wait_for_completion?: boolean compressed_definition?: string definition?: MlPutTrainedModelDefinition description?: string @@ -15822,7 +16360,9 @@ export interface SecurityApiKey { invalidated?: boolean name: Name realm?: string + realm_type?: string username?: Username + profile_uid?: string metadata?: Metadata role_descriptors?: Record limited_by?: Record[] @@ -15843,7 +16383,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean @@ -15866,7 +16406,7 @@ export interface SecurityGlobalPrivilege { export type SecurityGrantType = 'password' | 'access_token' -export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string +export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity @@ -15912,9 +16452,9 @@ export interface SecurityRoleDescriptorRead { export interface SecurityRoleMapping { enabled: boolean metadata: Metadata - roles: string[] - rules: SecurityRoleMappingRule + roles?: string[] role_templates?: SecurityRoleTemplate[] + rules: SecurityRoleMappingRule } export interface SecurityRoleMappingRule { @@ -16232,6 +16772,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { username?: Username with_limited_by?: boolean active_only?: boolean + with_profile_uid?: boolean } export interface SecurityGetApiKeyResponse { @@ -16554,31 +17095,31 @@ export interface SecurityPutUserResponse { created: boolean } -export type SecurityQueryApiKeysAPIKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate +export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate -export interface SecurityQueryApiKeysAPIKeyAggregationContainer { - aggregations?: Record - aggs?: Record +export interface SecurityQueryApiKeysApiKeyAggregationContainer { + aggregations?: Record + aggs?: Record meta?: Metadata cardinality?: AggregationsCardinalityAggregation composite?: AggregationsCompositeAggregation date_range?: AggregationsDateRangeAggregation - filter?: SecurityQueryApiKeysAPIKeyQueryContainer - filters?: SecurityQueryApiKeysAPIKeyFiltersAggregation + filter?: SecurityQueryApiKeysApiKeyQueryContainer + filters?: SecurityQueryApiKeysApiKeyFiltersAggregation missing?: AggregationsMissingAggregation range?: AggregationsRangeAggregation terms?: AggregationsTermsAggregation value_count?: AggregationsValueCountAggregation } -export interface SecurityQueryApiKeysAPIKeyFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: AggregationsBuckets +export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { + filters?: AggregationsBuckets other_bucket?: boolean other_bucket_key?: string keyed?: boolean } -export interface SecurityQueryApiKeysAPIKeyQueryContainer { +export interface SecurityQueryApiKeysApiKeyQueryContainer { bool?: QueryDslBoolQuery exists?: QueryDslExistsQuery ids?: QueryDslIdsQuery @@ -16594,10 +17135,12 @@ export interface SecurityQueryApiKeysAPIKeyQueryContainer { export interface SecurityQueryApiKeysRequest extends RequestBase { with_limited_by?: boolean - aggregations?: Record + with_profile_uid?: boolean + typed_keys?: boolean + aggregations?: Record /** @alias aggregations */ - aggs?: Record - query?: SecurityQueryApiKeysAPIKeyQueryContainer + aggs?: Record + query?: SecurityQueryApiKeysApiKeyQueryContainer from?: integer sort?: Sort size?: integer @@ -16608,7 +17151,7 @@ export interface SecurityQueryApiKeysResponse { total: integer count: integer api_keys: SecurityApiKey[] - aggregations?: Record + aggregations?: Record } export interface SecuritySamlAuthenticateRequest extends RequestBase { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 901559cab..84402cc8f 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -394,6 +394,7 @@ export interface GetGetResult { export interface GetRequest extends RequestBase { id: Id index: IndexName + force_synthetic_source?: boolean preference?: string realtime?: boolean refresh?: boolean @@ -711,6 +712,7 @@ export interface MgetOperation { export interface MgetRequest extends RequestBase { index?: IndexName + force_synthetic_source?: boolean preference?: string realtime?: boolean refresh?: boolean @@ -750,7 +752,7 @@ export interface MsearchMultisearchBody { ext?: Record stored_fields?: Fields docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery | KnnQuery[] + knn?: KnnSearch | KnnSearch[] from?: integer highlight?: SearchHighlight indices_boost?: Record[] @@ -1198,6 +1200,7 @@ export interface SearchRequest extends RequestBase { _source_excludes?: Fields _source_includes?: Fields q?: string + force_synthetic_source?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record @@ -1211,7 +1214,7 @@ export interface SearchRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery | KnnQuery[] + knn?: KnnSearch | KnnSearch[] rank?: RankContainer min_score?: double post_filter?: QueryDslQueryContainer @@ -1517,6 +1520,11 @@ export interface SearchLaplaceSmoothingModel { alpha: double } +export interface SearchLearningToRank { + model_id: string + params?: Record +} + export interface SearchLinearInterpolationSmoothingModel { bigram_lambda: double trigram_lambda: double @@ -1616,8 +1624,9 @@ export interface SearchRegexOptions { } export interface SearchRescore { - query: SearchRescoreQuery window_size?: integer + query?: SearchRescoreQuery + learning_to_rank?: SearchLearningToRank } export interface SearchRescoreQuery { @@ -2027,10 +2036,14 @@ export interface SpecUtilsBaseNode { transport_address: TransportAddress } +export type SpecUtilsNullValue = null + export type SpecUtilsPipeSeparatedFlags = T | string export type SpecUtilsStringified = T | string +export type SpecUtilsWithNullValue = T | SpecUtilsNullValue + export interface AcknowledgedResponseBase { acknowledged: boolean } @@ -2119,7 +2132,7 @@ export type DateFormat = string export type DateMath = string | Date -export type DateTime = string | EpochTime +export type DateTime = string | EpochTime | Date export type Distance = string @@ -2350,12 +2363,21 @@ export interface InlineScript extends ScriptBase { export type Ip = string -export interface KnnQuery { +export interface KnnQuery extends QueryDslQueryBase { field: Field query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder - k: long - num_candidates: long + num_candidates?: long + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + similarity?: float +} + +export interface KnnSearch { + field: Field + query_vector?: QueryVector + query_vector_builder?: QueryVectorBuilder + k?: long + num_candidates?: long boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float @@ -2478,14 +2500,14 @@ export interface PluginStats { export type PropertyName = string export interface QueryCacheStats { - cache_count: integer - cache_size: integer - evictions: integer - hit_count: integer + cache_count: long + cache_size: long + evictions: long + hit_count: long memory_size?: ByteSize memory_size_in_bytes: long - miss_count: integer - total_count: integer + miss_count: long + total_count: long } export type QueryVector = float[] @@ -2809,9 +2831,9 @@ export interface WktGeoBounds { export interface WriteResponseBase { _id: Id _index: IndexName - _primary_term: long + _primary_term?: long result: Result - _seq_no: SequenceNumber + _seq_no?: SequenceNumber _shards: ShardStatistics _version: VersionNumber forced_refresh?: boolean @@ -2838,6 +2860,7 @@ export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiB export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { filters?: Record + separator?: string } export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { @@ -2855,8 +2878,6 @@ export interface AggregationsAggregateBase { export type AggregationsAggregateOrder = Partial> | Partial>[] export interface AggregationsAggregation { - meta?: Metadata - name?: string } export interface AggregationsAggregationContainer { @@ -2999,7 +3020,7 @@ export interface AggregationsBoxplotAggregation extends AggregationsMetricAggreg compression?: double } -export interface AggregationsBucketAggregationBase extends AggregationsAggregation { +export interface AggregationsBucketAggregationBase { } export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { @@ -3030,7 +3051,7 @@ export interface AggregationsBucketMetricValueAggregate extends AggregationsSing keys: string[] } -export interface AggregationsBucketPathAggregation extends AggregationsAggregation { +export interface AggregationsBucketPathAggregation { buckets_path?: AggregationsBucketsPath } @@ -3042,7 +3063,7 @@ export interface AggregationsBucketSelectorAggregation extends AggregationsPipel script?: Script } -export interface AggregationsBucketSortAggregation extends AggregationsAggregation { +export interface AggregationsBucketSortAggregation { from?: integer gap_policy?: AggregationsGapPolicy size?: integer @@ -3067,7 +3088,7 @@ export interface AggregationsCardinalityAggregation extends AggregationsMetricAg export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' -export interface AggregationsCategorizeTextAggregation extends AggregationsAggregation { +export interface AggregationsCategorizeTextAggregation { field: Field max_unique_tokens?: integer max_matched_tokens?: integer @@ -3249,8 +3270,8 @@ export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMo } export interface AggregationsExtendedBounds { - max: T - min: T + max?: T + min?: T } export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { @@ -3617,7 +3638,7 @@ export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucket export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string } -export interface AggregationsMatrixAggregation extends AggregationsAggregation { +export interface AggregationsMatrixAggregation { fields?: Fields missing?: Record } @@ -4073,7 +4094,7 @@ export interface AggregationsTTestAggregate extends AggregationsAggregateBase { value_as_string?: string } -export interface AggregationsTTestAggregation extends AggregationsAggregation { +export interface AggregationsTTestAggregation { a?: AggregationsTestPopulation b?: AggregationsTestPopulation type?: AggregationsTTestType @@ -4133,8 +4154,9 @@ export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { - docvalue_fields?: Fields + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] explain?: boolean + fields?: (QueryDslFieldAndFormat | Field)[] from?: integer highlight?: SearchHighlight script_fields?: Record @@ -4196,6 +4218,7 @@ export interface AggregationsVariableWidthHistogramAggregation { buckets?: integer shard_size?: integer initial_buffer?: integer + script?: Script } export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { @@ -4209,7 +4232,7 @@ export interface AggregationsVariableWidthHistogramBucketKeys extends Aggregatio export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys & { [property: string]: AggregationsAggregate | double | string | long } -export interface AggregationsWeightedAverageAggregation extends AggregationsAggregation { +export interface AggregationsWeightedAverageAggregation { format?: string value?: AggregationsWeightedAverageValue value_type?: AggregationsValueType @@ -4345,6 +4368,7 @@ export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' + escaped_tags?: string[] } export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { @@ -4750,6 +4774,7 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean } @@ -4761,6 +4786,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean } @@ -4949,6 +4975,7 @@ export interface MappingDenseVectorIndexOptions { export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' + element_type?: string dims?: integer similarity?: string index?: boolean @@ -4971,7 +4998,7 @@ export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { - type: '{dynamic_property}' + type: '{dynamic_type}' enabled?: boolean null_value?: FieldValue boost?: double @@ -5230,7 +5257,7 @@ export interface MappingRuntimeFieldFetchFields { format?: string } -export type MappingRuntimeFieldType = 'boolean' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' +export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' export type MappingRuntimeFields = Record @@ -5729,7 +5756,6 @@ export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { minimum_should_match?: MinimumShouldMatch min_term_freq?: integer min_word_length?: integer - per_field_analyzer?: Record routing?: Routing stop_words?: AnalysisStopWords unlike?: QueryDslLike | QueryDslLike[] @@ -5917,7 +5943,7 @@ export interface QueryDslRandomScoreFunction { seed?: long | string } -export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery +export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermsRangeQuery export interface QueryDslRangeQueryBase extends QueryDslQueryBase { relation?: QueryDslRangeRelation @@ -6092,6 +6118,15 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup +export interface QueryDslTermsRangeQuery extends QueryDslRangeQueryBase { + gt?: string + gte?: string + lt?: string + lte?: string + from?: string | null + to?: string | null +} + export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field minimum_should_match_script?: Script @@ -6241,7 +6276,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { track_total_hits?: SearchTrackHits indices_boost?: Record[] docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnQuery | KnnQuery[] + knn?: KnnSearch | KnnSearch[] min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -8654,10 +8689,10 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { master_timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - allow_auto_create?: boolean template: IndicesIndexState version?: VersionNumber _meta?: Metadata + deprecated?: boolean } } @@ -9055,6 +9090,528 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { timestamp: long } +export interface ConnectorConnector { + api_key_id?: string + configuration: ConnectorConnectorConfiguration + custom_scheduling: ConnectorConnectorCustomScheduling + description?: string + error?: string + features?: ConnectorConnectorFeatures + filtering: ConnectorFilteringConfig[] + id?: Id + index_name?: IndexName + is_native: boolean + language?: string + last_access_control_sync_error?: string + last_access_control_sync_scheduled_at?: DateTime + last_access_control_sync_status?: ConnectorSyncStatus + last_deleted_document_count?: long + last_incremental_sync_scheduled_at?: DateTime + last_indexed_document_count?: long + last_seen?: DateTime + last_sync_error?: string + last_sync_scheduled_at?: DateTime + last_sync_status?: ConnectorSyncStatus + last_synced?: DateTime + name?: string + pipeline?: ConnectorIngestPipelineParams + scheduling: ConnectorSchedulingConfiguration + service_type: string + status: ConnectorConnectorStatus + sync_now: boolean +} + +export interface ConnectorConnectorConfigProperties { + category?: string + default_value: ScalarValue + depends_on: ConnectorDependency[] + display: ConnectorDisplayType + label: string + options: ConnectorSelectOption[] + order?: integer + placeholder?: string + required: boolean + sensitive: boolean + tooltip?: string + type: ConnectorConnectorFieldType + ui_restrictions: string[] + validations: ConnectorValidation[] + value: ScalarValue +} + +export type ConnectorConnectorConfiguration = Record + +export type ConnectorConnectorCustomScheduling = Record + +export interface ConnectorConnectorFeatures { + document_level_security?: ConnectorFeatureEnabled + filtering_advanced_config?: boolean + filtering_rules?: boolean + incremental_sync?: ConnectorFeatureEnabled + sync_rules?: ConnectorSyncRulesFeature +} + +export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' + +export interface ConnectorConnectorScheduling { + enabled: boolean + interval: string +} + +export type ConnectorConnectorStatus = 'created' | 'needs_configuration' | 'configured' | 'connected' | 'error' + +export interface ConnectorConnectorSyncJob { + cancelation_requested_at?: DateTime + canceled_at?: DateTime + completed_at?: DateTime + connector: ConnectorSyncJobConnectorReference + created_at: DateTime + deleted_document_count: long + error?: string + id: Id + indexed_document_count: long + indexed_document_volume: long + job_type: ConnectorSyncJobType + last_seen?: DateTime + metadata: Record + started_at?: DateTime + status: ConnectorSyncStatus + total_document_count: long + trigger_method: ConnectorSyncJobTriggerMethod + worker_hostname?: string +} + +export interface ConnectorCustomScheduling { + configuration_overrides: ConnectorCustomSchedulingConfigurationOverrides + enabled: boolean + interval: string + last_synced?: DateTime + name: string +} + +export interface ConnectorCustomSchedulingConfigurationOverrides { + max_crawl_depth?: integer + sitemap_discovery_disabled?: boolean + domain_allowlist?: string[] + sitemap_urls?: string[] + seed_urls?: string[] +} + +export interface ConnectorDependency { + field: string + value: ScalarValue +} + +export type ConnectorDisplayType = 'textbox' | 'textarea' | 'numeric' | 'toggle' | 'dropdown' + +export interface ConnectorFeatureEnabled { + enabled: boolean +} + +export interface ConnectorFilteringAdvancedSnippet { + created_at?: DateTime + updated_at?: DateTime + value: Record +} + +export interface ConnectorFilteringConfig { + active: ConnectorFilteringRules + domain: string + draft: ConnectorFilteringRules +} + +export type ConnectorFilteringPolicy = 'exclude' | 'include' + +export interface ConnectorFilteringRule { + created_at?: DateTime + field: Field + id: Id + order: integer + policy: ConnectorFilteringPolicy + rule: ConnectorFilteringRuleRule + updated_at?: DateTime + value: string +} + +export type ConnectorFilteringRuleRule = 'contains' | 'ends_with' | 'equals' | 'regex' | 'starts_with' | '>' | '<' + +export interface ConnectorFilteringRules { + advanced_snippet: ConnectorFilteringAdvancedSnippet + rules: ConnectorFilteringRule[] + validation: ConnectorFilteringRulesValidation +} + +export interface ConnectorFilteringRulesValidation { + errors: ConnectorFilteringValidation[] + state: ConnectorFilteringValidationState +} + +export interface ConnectorFilteringValidation { + ids: Id[] + messages: string[] +} + +export type ConnectorFilteringValidationState = 'edited' | 'invalid' | 'valid' + +export interface ConnectorGreaterThanValidation { + type: 'greater_than' + constraint: double +} + +export interface ConnectorIncludedInValidation { + type: 'included_in' + constraint: string +} + +export interface ConnectorIngestPipelineParams { + extract_binary_content: boolean + name: string + reduce_whitespace: boolean + run_ml_inference: boolean +} + +export interface ConnectorLessThanValidation { + type: 'less_than' + constraint: double +} + +export interface ConnectorListTypeValidation { + type: 'list_type' + constraint: ScalarValue[] +} + +export interface ConnectorRegexValidation { + type: 'regex' + constraint: string +} + +export interface ConnectorSchedulingConfiguration { + access_control?: ConnectorConnectorScheduling + full?: ConnectorConnectorScheduling + incremental?: ConnectorConnectorScheduling +} + +export interface ConnectorSelectOption { + label: string + value: string +} + +export interface ConnectorSyncJobConnectorReference { + configuration: ConnectorConnectorConfiguration + filtering: ConnectorFilteringRules + id: Id + index_name: string + language?: string + pipeline?: ConnectorIngestPipelineParams + service_type: string +} + +export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' + +export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' + +export interface ConnectorSyncRulesFeature { + advanced?: ConnectorFeatureEnabled + basic?: ConnectorFeatureEnabled +} + +export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'error' | 'in_progress' | 'pending' | 'suspended' + +export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation + +export interface ConnectorCheckInRequest extends RequestBase { + connector_id: Id +} + +export interface ConnectorCheckInResponse { + result: Result +} + +export interface ConnectorDeleteRequest extends RequestBase { + connector_id: Id + delete_sync_jobs: boolean +} + +export type ConnectorDeleteResponse = AcknowledgedResponseBase + +export interface ConnectorGetRequest extends RequestBase { + connector_id: Id +} + +export type ConnectorGetResponse = ConnectorConnector + +export interface ConnectorLastSyncRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + last_access_control_sync_error?: SpecUtilsWithNullValue + last_access_control_sync_scheduled_at?: DateTime + last_access_control_sync_status?: ConnectorSyncStatus + last_deleted_document_count?: long + last_incremental_sync_scheduled_at?: DateTime + last_indexed_document_count?: long + last_seen?: SpecUtilsWithNullValue + last_sync_error?: SpecUtilsWithNullValue + last_sync_scheduled_at?: DateTime + last_sync_status?: ConnectorSyncStatus + last_synced?: DateTime + } +} + +export interface ConnectorLastSyncResponse { + result: Result +} + +export interface ConnectorListRequest extends RequestBase { + from?: integer + size?: integer + index_name?: Indices + connector_name?: Names + service_type?: Names + query?: string +} + +export interface ConnectorListResponse { + count: long + results: ConnectorConnector[] +} + +export interface ConnectorPostRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + description?: string + index_name: SpecUtilsWithNullValue + is_native?: boolean + language?: string + name?: string + service_type?: string + } +} + +export interface ConnectorPostResponse { + id: Id +} + +export interface ConnectorPutRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + description?: string + index_name: SpecUtilsWithNullValue + is_native?: boolean + language?: string + name?: string + service_type?: string + } +} + +export interface ConnectorPutResponse { + result: Result +} + +export interface ConnectorSyncJobCancelRequest extends RequestBase { + connector_sync_job_id: Id +} + +export interface ConnectorSyncJobCancelResponse { + result: Result +} + +export interface ConnectorSyncJobDeleteRequest extends RequestBase { + connector_sync_job_id: Id +} + +export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase + +export interface ConnectorSyncJobGetRequest extends RequestBase { + connector_sync_job_id: Id +} + +export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob + +export interface ConnectorSyncJobListRequest extends RequestBase { + from?: integer + size?: integer + status?: ConnectorSyncStatus + connector_id?: Id + job_type?: ConnectorSyncJobType[] +} + +export interface ConnectorSyncJobListResponse { + count: long + results: ConnectorConnectorSyncJob[] +} + +export interface ConnectorSyncJobPostRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + id: Id + job_type?: ConnectorSyncJobType + trigger_method?: ConnectorSyncJobTriggerMethod + } +} + +export interface ConnectorSyncJobPostResponse { + id: Id +} + +export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { + connector_id: Id +} + +export interface ConnectorUpdateActiveFilteringResponse { + result: Result +} + +export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + api_key_id?: SpecUtilsWithNullValue + api_key_secret_id?: SpecUtilsWithNullValue + } +} + +export interface ConnectorUpdateApiKeyIdResponse { + result: Result +} + +export interface ConnectorUpdateConfigurationRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + configuration?: ConnectorConnectorConfiguration + values?: Record + } +} + +export interface ConnectorUpdateConfigurationResponse { + result: Result +} + +export interface ConnectorUpdateErrorRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + error: SpecUtilsWithNullValue + } +} + +export interface ConnectorUpdateErrorResponse { + result: Result +} + +export interface ConnectorUpdateFilteringRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + filtering?: ConnectorFilteringConfig[] + rules?: ConnectorFilteringRule[] + advanced_snippet?: ConnectorFilteringAdvancedSnippet + } +} + +export interface ConnectorUpdateFilteringResponse { + result: Result +} + +export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + validation: ConnectorFilteringRulesValidation + } +} + +export interface ConnectorUpdateFilteringValidationResponse { + result: Result +} + +export interface ConnectorUpdateIndexNameRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + index_name: SpecUtilsWithNullValue + } +} + +export interface ConnectorUpdateIndexNameResponse { + result: Result +} + +export interface ConnectorUpdateNameRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + name: string + description?: string + } +} + +export interface ConnectorUpdateNameResponse { + result: Result +} + +export interface ConnectorUpdateNativeRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + is_native: boolean + } +} + +export interface ConnectorUpdateNativeResponse { + result: Result +} + +export interface ConnectorUpdatePipelineRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + pipeline: ConnectorIngestPipelineParams + } +} + +export interface ConnectorUpdatePipelineResponse { + result: Result +} + +export interface ConnectorUpdateSchedulingRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + scheduling: ConnectorSchedulingConfiguration + } +} + +export interface ConnectorUpdateSchedulingResponse { + result: Result +} + +export interface ConnectorUpdateServiceTypeRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + service_type: string + } +} + +export interface ConnectorUpdateServiceTypeResponse { + result: Result +} + +export interface ConnectorUpdateStatusRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + status: ConnectorConnectorStatus + } +} + +export interface ConnectorUpdateStatusResponse { + result: Result +} + export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { index_uuid: Uuid accept_data_loss: boolean @@ -9256,6 +9813,12 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase origination_date?: long parse_origination_date?: boolean @@ -10785,15 +11349,20 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { export interface IndicesPutIndexTemplateRequest extends RequestBase { name: Name create?: boolean + master_timeout?: Duration + cause?: string /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { index_patterns?: Indices composed_of?: Name[] template?: IndicesPutIndexTemplateIndexTemplateMapping data_stream?: IndicesDataStreamVisibility - priority?: integer + priority?: long version?: VersionNumber _meta?: Metadata + allow_auto_create?: boolean + ignore_missing_component_templates?: string[] + deprecated?: boolean } } @@ -10843,9 +11412,8 @@ export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { name: Name create?: boolean - flat_settings?: boolean master_timeout?: Duration - timeout?: Duration + cause?: string /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aliases?: Record @@ -11195,23 +11763,13 @@ export interface IndicesShrinkResponse { export interface IndicesSimulateIndexTemplateRequest extends RequestBase { name: Name - create?: boolean master_timeout?: Duration include_defaults?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_auto_create?: boolean - index_patterns?: Indices - composed_of?: Name[] - template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStreamVisibility - priority?: integer - version?: VersionNumber - _meta?: Metadata - } } export interface IndicesSimulateIndexTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] + template: IndicesSimulateTemplateTemplate } export interface IndicesSimulateTemplateOverlapping { @@ -11224,8 +11782,19 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { create?: boolean master_timeout?: Duration include_defaults?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'template' instead. */ - body?: IndicesIndexTemplate + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + allow_auto_create?: boolean + index_patterns?: Indices + composed_of?: Name[] + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: IndicesDataStreamVisibility + priority?: long + version?: VersionNumber + _meta?: Metadata + ignore_missing_component_templates?: string[] + deprecated?: boolean + } } export interface IndicesSimulateTemplateResponse { @@ -11504,27 +12073,39 @@ export interface IndicesValidateQueryResponse { error?: string } +export interface InferenceCompletionResult { + result: string +} + export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] -export interface InferenceInferenceResult { - text_embedding_bytes?: InferenceTextEmbeddingByteResult[] - text_embedding?: InferenceTextEmbeddingResult[] - sparse_embedding?: InferenceSparseEmbeddingResult[] -} - -export interface InferenceModelConfig { +export interface InferenceInferenceEndpoint { service: string service_settings: InferenceServiceSettings task_settings: InferenceTaskSettings } -export interface InferenceModelConfigContainer extends InferenceModelConfig { - model_id: string +export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { + inference_id: string task_type: InferenceTaskType } +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] + completion?: InferenceCompletionResult[] + rerank?: InferenceRankedDocument[] +} + +export interface InferenceRankedDocument { + index: integer + score: float + text?: string +} + export type InferenceServiceSettings = any export interface InferenceSparseEmbeddingResult { @@ -11535,7 +12116,7 @@ export type InferenceSparseVector = Record export type InferenceTaskSettings = any -export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' +export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector @@ -11545,27 +12126,29 @@ export interface InferenceTextEmbeddingResult { embedding: InferenceDenseVector } -export interface InferenceDeleteModelRequest extends RequestBase { +export interface InferenceDeleteRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id } -export type InferenceDeleteModelResponse = AcknowledgedResponseBase +export type InferenceDeleteResponse = AcknowledgedResponseBase -export interface InferenceGetModelRequest extends RequestBase { +export interface InferenceGetRequest extends RequestBase { task_type?: InferenceTaskType - inference_id: Id + inference_id?: Id } -export interface InferenceGetModelResponse { - models: InferenceModelConfigContainer[] +export interface InferenceGetResponse { + endpoints: InferenceInferenceEndpointInfo[] } export interface InferenceInferenceRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id + timeout?: Duration /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + query?: string input: string | string[] task_settings?: InferenceTaskSettings } @@ -11573,14 +12156,14 @@ export interface InferenceInferenceRequest extends RequestBase { export type InferenceInferenceResponse = InferenceInferenceResult -export interface InferencePutModelRequest extends RequestBase { +export interface InferencePutRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id - /** @deprecated The use of the 'body' key has been deprecated, use 'model_config' instead. */ - body?: InferenceModelConfig + /** @deprecated The use of the 'body' key has been deprecated, use 'inference_config' instead. */ + body?: InferenceInferenceEndpoint } -export type InferencePutModelResponse = InferenceModelConfigContainer +export type InferencePutResponse = InferenceInferenceEndpointInfo export interface IngestAppendProcessor extends IngestProcessorBase { field: Field @@ -11838,6 +12421,7 @@ export interface IngestProcessorContainer { export interface IngestRemoveProcessor extends IngestProcessorBase { field: Fields + keep?: Fields ignore_missing?: boolean } @@ -14404,6 +14988,7 @@ export interface MlPutTrainedModelPreprocessor { export interface MlPutTrainedModelRequest extends RequestBase { model_id: Id defer_definition_decompression?: boolean + wait_for_completion?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { compressed_definition?: string @@ -16140,7 +16725,9 @@ export interface SecurityApiKey { invalidated?: boolean name: Name realm?: string + realm_type?: string username?: Username + profile_uid?: string metadata?: Metadata role_descriptors?: Record limited_by?: Record[] @@ -16161,7 +16748,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_ccr' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'read_ccr' | 'read_ilm' | 'read_pipeline' | 'read_slm' | 'transport_client' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean @@ -16184,7 +16771,7 @@ export interface SecurityGlobalPrivilege { export type SecurityGrantType = 'password' | 'access_token' -export type SecurityIndexPrivilege = 'none' | 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string +export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity @@ -16230,9 +16817,9 @@ export interface SecurityRoleDescriptorRead { export interface SecurityRoleMapping { enabled: boolean metadata: Metadata - roles: string[] - rules: SecurityRoleMappingRule + roles?: string[] role_templates?: SecurityRoleTemplate[] + rules: SecurityRoleMappingRule } export interface SecurityRoleMappingRule { @@ -16559,6 +17146,7 @@ export interface SecurityGetApiKeyRequest extends RequestBase { username?: Username with_limited_by?: boolean active_only?: boolean + with_profile_uid?: boolean } export interface SecurityGetApiKeyResponse { @@ -16910,31 +17498,31 @@ export interface SecurityPutUserResponse { created: boolean } -export type SecurityQueryApiKeysAPIKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate +export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate -export interface SecurityQueryApiKeysAPIKeyAggregationContainer { - aggregations?: Record - aggs?: Record +export interface SecurityQueryApiKeysApiKeyAggregationContainer { + aggregations?: Record + aggs?: Record meta?: Metadata cardinality?: AggregationsCardinalityAggregation composite?: AggregationsCompositeAggregation date_range?: AggregationsDateRangeAggregation - filter?: SecurityQueryApiKeysAPIKeyQueryContainer - filters?: SecurityQueryApiKeysAPIKeyFiltersAggregation + filter?: SecurityQueryApiKeysApiKeyQueryContainer + filters?: SecurityQueryApiKeysApiKeyFiltersAggregation missing?: AggregationsMissingAggregation range?: AggregationsRangeAggregation terms?: AggregationsTermsAggregation value_count?: AggregationsValueCountAggregation } -export interface SecurityQueryApiKeysAPIKeyFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: AggregationsBuckets +export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { + filters?: AggregationsBuckets other_bucket?: boolean other_bucket_key?: string keyed?: boolean } -export interface SecurityQueryApiKeysAPIKeyQueryContainer { +export interface SecurityQueryApiKeysApiKeyQueryContainer { bool?: QueryDslBoolQuery exists?: QueryDslExistsQuery ids?: QueryDslIdsQuery @@ -16950,12 +17538,14 @@ export interface SecurityQueryApiKeysAPIKeyQueryContainer { export interface SecurityQueryApiKeysRequest extends RequestBase { with_limited_by?: boolean + with_profile_uid?: boolean + typed_keys?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - aggregations?: Record + aggregations?: Record /** @alias aggregations */ - aggs?: Record - query?: SecurityQueryApiKeysAPIKeyQueryContainer + aggs?: Record + query?: SecurityQueryApiKeysApiKeyQueryContainer from?: integer sort?: Sort size?: integer @@ -16967,7 +17557,7 @@ export interface SecurityQueryApiKeysResponse { total: integer count: integer api_keys: SecurityApiKey[] - aggregations?: Record + aggregations?: Record } export interface SecuritySamlAuthenticateRequest extends RequestBase { From 05f7078534ae1a4a7a91962ee02b70ce6a2b440d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 9 May 2024 14:53:32 -0500 Subject: [PATCH 347/647] Stop using matrix for license check (#2254) --- .github/workflows/nodejs.yml | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 7511a0033..225b67732 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -37,17 +37,10 @@ jobs: - uses: actions/checkout@v4 - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: ${{ matrix.node-version }} - # workaround for failing tests on Node.js 14.x - # see https://github.com/actions/setup-node/issues/411 - - name: Force install specific npm version - run: | - npm install --global npm@8.3.1 - npm install --global npm@9.7.1 - - name: Install run: | npm install @@ -64,17 +57,13 @@ jobs: name: License check runs-on: ubuntu-latest - strategy: - matrix: - node-version: [22.x] - steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v3 + - name: Use Node.js + uses: actions/setup-node@v4 with: - node-version: ${{ matrix.node-version }} + node-version: 22.x - name: Install run: | From a71ebb5f681531ea994a8a68a4cb4b3aabf99a92 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 20 May 2024 17:47:02 +0100 Subject: [PATCH 348/647] Auto-generated code for main (#2258) --- docs/reference.asciidoc | 1 + src/api/api/search.ts | 2 +- src/api/types.ts | 45 ++++++++++++++++++++++++++++++++----- src/api/typesWithBodyKey.ts | 45 ++++++++++++++++++++++++++++++++----- 4 files changed, 82 insertions(+), 11 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index cc6343565..86c80ab5c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -935,6 +935,7 @@ A post filter has no impact on the aggregation results. NOTE: This is a debugging tool and adds significant overhead to search execution. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +** *`retriever` (Optional, { standard, knn, rrf })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. ** *`size` (Optional, number)*: The number of hits to return. diff --git a/src/api/api/search.ts b/src/api/api/search.ts index b28d29735..6490428cc 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -46,7 +46,7 @@ export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index bde49c300..159213070 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -665,8 +665,8 @@ export interface KnnSearchResponse { export interface KnnSearchQuery { field: Field query_vector: QueryVector - k: long - num_candidates: long + k: integer + num_candidates: integer } export interface MgetMultiGetError { @@ -1167,6 +1167,7 @@ export interface SearchRequest extends RequestBase { profile?: boolean query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] + retriever?: RetrieverContainer script_fields?: Record search_after?: SortResults size?: integer @@ -2294,17 +2295,26 @@ export interface KnnQuery extends QueryDslQueryBase { field: Field query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder - num_candidates?: long + num_candidates?: integer filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } +export interface KnnRetriever extends RetrieverBase { + field: string + query_vector?: QueryVector + query_vector_builder?: QueryVectorBuilder + k: integer + num_candidates: integer + similarity?: float +} + export interface KnnSearch { field: Field query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder - k?: long - num_candidates?: long + k?: integer + num_candidates?: integer boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float @@ -2443,6 +2453,12 @@ export interface QueryVectorBuilder { text_embedding?: TextEmbedding } +export interface RRFRetriever extends RetrieverBase { + retrievers: RetrieverContainer[] + rank_constant?: integer + rank_window_size?: integer +} + export interface RankBase { } @@ -2492,6 +2508,16 @@ export interface Retries { search: long } +export interface RetrieverBase { + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] +} + +export interface RetrieverContainer { + standard?: StandardRetriever + knn?: KnnRetriever + rrf?: RRFRetriever +} + export type Routing = string export interface RrfRank { @@ -2647,6 +2673,15 @@ export type SortOrder = 'asc' | 'desc' export type SortResults = FieldValue[] +export interface StandardRetriever extends RetrieverBase { + query?: QueryDslQueryContainer + search_after?: SortResults + terminate_after?: integer + sort?: Sort + min_score?: float + collapse?: SearchFieldCollapse +} + export interface StoreStats { size?: ByteSize size_in_bytes: long diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 84402cc8f..838b6dcd7 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -690,8 +690,8 @@ export interface KnnSearchResponse { export interface KnnSearchQuery { field: Field query_vector: QueryVector - k: long - num_candidates: long + k: integer + num_candidates: integer } export interface MgetMultiGetError { @@ -1221,6 +1221,7 @@ export interface SearchRequest extends RequestBase { profile?: boolean query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] + retriever?: RetrieverContainer script_fields?: Record search_after?: SortResults size?: integer @@ -2367,17 +2368,26 @@ export interface KnnQuery extends QueryDslQueryBase { field: Field query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder - num_candidates?: long + num_candidates?: integer filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } +export interface KnnRetriever extends RetrieverBase { + field: string + query_vector?: QueryVector + query_vector_builder?: QueryVectorBuilder + k: integer + num_candidates: integer + similarity?: float +} + export interface KnnSearch { field: Field query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder - k?: long - num_candidates?: long + k?: integer + num_candidates?: integer boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float @@ -2516,6 +2526,12 @@ export interface QueryVectorBuilder { text_embedding?: TextEmbedding } +export interface RRFRetriever extends RetrieverBase { + retrievers: RetrieverContainer[] + rank_constant?: integer + rank_window_size?: integer +} + export interface RankBase { } @@ -2565,6 +2581,16 @@ export interface Retries { search: long } +export interface RetrieverBase { + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] +} + +export interface RetrieverContainer { + standard?: StandardRetriever + knn?: KnnRetriever + rrf?: RRFRetriever +} + export type Routing = string export interface RrfRank { @@ -2720,6 +2746,15 @@ export type SortOrder = 'asc' | 'desc' export type SortResults = FieldValue[] +export interface StandardRetriever extends RetrieverBase { + query?: QueryDslQueryContainer + search_after?: SortResults + terminate_after?: integer + sort?: Sort + min_score?: float + collapse?: SearchFieldCollapse +} + export interface StoreStats { size?: ByteSize size_in_bytes: long From 4be14a1f6cc2d0b0f0395ddb47dc2e93aeb645eb Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 23 May 2024 16:35:32 +0100 Subject: [PATCH 349/647] Auto-generated code for main (#2261) --- docs/reference.asciidoc | 1 - src/api/api/esql.ts | 8 +------- src/api/types.ts | 9 +-------- src/api/typesWithBodyKey.ts | 9 +-------- 4 files changed, 3 insertions(+), 24 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 86c80ab5c..58118b6b7 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -2826,7 +2826,6 @@ client.esql.query({ query }) ** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. ** *`locale` (Optional, string)* ** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. -** *`version` (Optional, Enum("2024.04.01"))*: The version of the ES|QL language in which the "query" field was written. ** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml. ** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index a4029c1de..4d211a14c 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -52,7 +52,7 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'query', 'version'] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'query'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -63,12 +63,6 @@ export default class Esql { body = userBody != null ? { ...userBody } : undefined } - // a version number is required for all ES|QL queries. - // inject a default value if none is provided. - if (typeof body === 'object' && body.version == null) { - body.version = '2024.04.01' - } - for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/types.ts b/src/api/types.ts index 159213070..8f8a49c59 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -9693,12 +9693,6 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 838b6dcd7..d540452cf 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -9848,12 +9848,6 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase From e1de2bd53d73fb4cb9be233da7631634a0e7029c Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 3 Jun 2024 19:31:54 +0100 Subject: [PATCH 350/647] Auto-generated code for main (#2262) --- docs/reference.asciidoc | 72 +++++++++++++++++++++---------------- src/api/api/ml.ts | 26 ++++++++++---- src/api/types.ts | 62 +++++++++++++++++++------------- src/api/typesWithBodyKey.ts | 65 ++++++++++++++++++++------------- 4 files changed, 140 insertions(+), 85 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 58118b6b7..f06f9868c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -101,7 +101,7 @@ client.count({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. @@ -204,7 +204,7 @@ client.deleteByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to delete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. @@ -360,7 +360,7 @@ client.explain({ id, index }) ** *`id` (string)*: Defines the document ID. ** *`index` (string)*: Index names used to limit the request. Only a single index name can be provided to this parameter. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. ** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. @@ -391,7 +391,7 @@ client.fieldCaps({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. ** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. ** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, @@ -589,7 +589,7 @@ parameter defaults to false. You can pass _source: true to return both source fi and stored fields in the search response. ** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. ** *`routing` (Optional, string)*: A list of specific routing values @@ -928,12 +928,12 @@ The request returns doc values for field names matching these patterns in the `h ** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. ** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. ** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`retriever` (Optional, { standard, knn, rrf })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. @@ -1087,7 +1087,7 @@ don’t include the aggs layer. each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. @@ -1190,7 +1190,7 @@ client.termsEnum({ index, field }) ** *`size` (Optional, number)*: How many matching terms to return. ** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. ** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. ** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. ** *`search_after` (Optional, string)* @@ -1288,7 +1288,7 @@ client.updateByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. ** *`script` (Optional, { lang, options, source } | { id })*: The script to run to update the document source or metadata when updating. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. @@ -1444,9 +1444,9 @@ names matching these patterns in the hits.fields property of the response. ** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -2793,7 +2793,7 @@ client.eql.search({ index, query }) ** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order ** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" ** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* @@ -2823,7 +2823,7 @@ client.esql.query({ query }) * *Request (object):* ** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. ** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. ** *`locale` (Optional, string)* ** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. ** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml. @@ -2941,9 +2941,9 @@ Defaults to 10,000 hits. names matching these patterns in the hits.fields property of the response. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -3032,7 +3032,7 @@ client.graph.explore({ index }) ** *`index` (string | string[])*: Name of the index. ** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. ** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. ** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. @@ -4133,7 +4133,7 @@ Wildcard patterns that match both data streams and indices return an error. ** *`name` (string)*: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. ** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. @@ -4246,13 +4246,13 @@ client.indices.putMapping({ index }) ** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. -** *`dynamic_templates` (Optional, Record | Record[])*: Specify dynamic templates for the mapping. +** *`dynamic_templates` (Optional, Record | Record[])*: Specify dynamic templates for the mapping. ** *`_field_names` (Optional, { enabled })*: Control whether field names are enabled for the index. ** *`_meta` (Optional, Record)*: A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -4333,7 +4333,7 @@ matches multiple templates. Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values. -** *`settings` (Optional, Record)*: Configuration options for the index. +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. ** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. @@ -4746,7 +4746,7 @@ client.indices.validateQuery({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. @@ -5459,7 +5459,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) * *Request (object):* ** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. ** *`index` (string)*: Defines the `index` in which the evaluation will be performed. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. [discrete] ==== explain_data_frame_analytics @@ -6343,7 +6343,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. ** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might @@ -6801,7 +6801,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is @@ -6942,9 +6942,20 @@ Updates certain properties of trained model deployment. {ref}/update-trained-model-deployment.html[Endpoint documentation] [source,ts] ---- -client.ml.updateTrainedModelDeployment() +client.ml.updateTrainedModelDeployment({ model_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. +** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. [discrete] ==== upgrade_job_snapshot @@ -7312,7 +7323,7 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. ** *`aggregations` (Optional, Record)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -7509,6 +7520,7 @@ client.searchApplication.search({ name }) * *Request (object):* ** *`name` (string)*: The name of the search application to be searched. ** *`params` (Optional, Record)*: Query parameters specific to this request, which will override any defaults specified in the template. +** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. [discrete] === searchable_snapshots @@ -8985,7 +8997,7 @@ client.sql.query({ ... }) If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`query` (Optional, string)*: SQL query to run. ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. ** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. @@ -9016,7 +9028,7 @@ client.sql.translate({ query }) * *Request (object):* ** *`query` (string)*: SQL query to run. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. [discrete] @@ -9725,7 +9737,7 @@ client.watcher.queryWatches({ ... }) * *Request (object):* ** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. ** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, field_masking_span, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index d71a08d05..2646643a9 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -2417,19 +2417,31 @@ export default class Ml { * Updates certain properties of trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation} */ - async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateTrainedModelDeployment (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['number_of_allocations'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/types.ts b/src/api/types.ts index 8f8a49c59..aededc0c6 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -810,7 +810,7 @@ export interface MsearchTemplateTemplateConfig { } export interface MtermvectorsOperation { - _id: Id + _id?: Id _index?: IndexName doc?: any fields?: Fields @@ -1413,7 +1413,7 @@ export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string export interface SearchHit { _index: IndexName - _id: Id + _id?: Id _score?: double | null _explanation?: ExplainExplanation fields?: Record @@ -2925,9 +2925,9 @@ export interface AggregationsAggregationContainer { } export interface AggregationsAggregationRange { - from?: double | string | null + from?: double key?: string - to?: double | string | null + to?: double } export interface AggregationsArrayPercentilesItem { @@ -4093,7 +4093,7 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { - doc_count_error?: long + doc_count_error_upper_bound?: long } export type AggregationsTermsExclude = string | string[] @@ -4634,7 +4634,7 @@ export type AnalysisPhoneticRuleType = 'approx' | 'exact' export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { type: 'phonetic' encoder: AnalysisPhoneticEncoder - languageset: AnalysisPhoneticLanguage[] + languageset: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] max_code_len?: integer name_type: AnalysisPhoneticNameType replace?: boolean @@ -4987,12 +4987,14 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { export interface MappingDynamicTemplate { mapping?: MappingProperty - match?: string - match_mapping_type?: string + runtime?: MappingProperty + match?: string | string[] + path_match?: string | string[] + unmatch?: string | string[] + path_unmatch?: string | string[] + match_mapping_type?: string | string[] + unmatch_mapping_type?: string | string[] match_pattern?: MappingMatchType - path_match?: string - path_unmatch?: string - unmatch?: string } export interface MappingFieldAliasProperty extends MappingPropertyBase { @@ -5009,7 +5011,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5107,6 +5109,8 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { eager_global_ordinals?: boolean index?: boolean index_options?: MappingIndexOptions + script?: Script + on_script_error?: MappingOnScriptError normalizer?: string norms?: boolean null_value?: string @@ -5854,7 +5858,7 @@ export interface QueryDslQueryContainer { shape?: QueryDslShapeQuery simple_query_string?: QueryDslSimpleQueryStringQuery span_containing?: QueryDslSpanContainingQuery - field_masking_span?: QueryDslSpanFieldMaskingQuery + span_field_masking?: QueryDslSpanFieldMaskingQuery span_first?: QueryDslSpanFirstQuery span_multi?: QueryDslSpanMultiTermQuery span_near?: QueryDslSpanNearQuery @@ -6041,7 +6045,7 @@ export interface QueryDslSpanOrQuery extends QueryDslQueryBase { export interface QueryDslSpanQuery { span_containing?: QueryDslSpanContainingQuery - field_masking_span?: QueryDslSpanFieldMaskingQuery + span_field_masking?: QueryDslSpanFieldMaskingQuery span_first?: QueryDslSpanFirstQuery span_gap?: QueryDslSpanGapQuery span_multi?: QueryDslSpanMultiTermQuery @@ -10398,11 +10402,11 @@ export interface IndicesMappingLimitSettings { } export interface IndicesMappingLimitSettingsDepth { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsDimensionFields { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsFieldNameLength { @@ -10410,15 +10414,15 @@ export interface IndicesMappingLimitSettingsFieldNameLength { } export interface IndicesMappingLimitSettingsNestedFields { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsNestedObjects { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsTotalFields { - limit?: integer + limit?: long } export interface IndicesMerge { @@ -11246,7 +11250,7 @@ export interface IndicesPutTemplateRequest extends RequestBase { index_patterns?: string | string[] mappings?: MappingTypeMapping order?: integer - settings?: Record + settings?: IndicesIndexSettings version?: VersionNumber } @@ -15014,6 +15018,15 @@ export interface MlUpdateModelSnapshotResponse { model: MlModelSnapshot } +export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + number_of_allocations?: integer +} + +export interface MlUpdateTrainedModelDeploymentResponse { + assignment: MlTrainedModelAssignment +} + export interface MlUpgradeJobSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id @@ -15694,7 +15707,7 @@ export interface NodesInfoNodeInfoRepositoriesUrl { export interface NodesInfoNodeInfoScript { allowed_types: string - disable_max_compilations_rate: string + disable_max_compilations_rate?: string } export interface NodesInfoNodeInfoSearch { @@ -15712,7 +15725,7 @@ export interface NodesInfoNodeInfoSettings { repositories?: NodesInfoNodeInfoRepositories discovery?: NodesInfoNodeInfoDiscover action?: NodesInfoNodeInfoAction - client: NodesInfoNodeInfoClient + client?: NodesInfoNodeInfoClient http: NodesInfoNodeInfoSettingsHttp bootstrap?: NodesInfoNodeInfoBootstrap transport: NodesInfoNodeInfoSettingsTransport @@ -15784,7 +15797,7 @@ export interface NodesInfoNodeInfoSettingsIngest { } export interface NodesInfoNodeInfoSettingsNetwork { - host: Host + host?: Host } export interface NodesInfoNodeInfoSettingsNode { @@ -16307,6 +16320,7 @@ export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationP export interface SearchApplicationSearchRequest extends RequestBase { name: Name + typed_keys?: boolean params?: Record } @@ -18024,7 +18038,7 @@ export interface SynonymsPutSynonymRequest extends RequestBase { export interface SynonymsPutSynonymResponse { result: Result - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadDetails + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult } export interface SynonymsPutSynonymRuleRequest extends RequestBase { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index d540452cf..a1c083d81 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -840,7 +840,7 @@ export interface MsearchTemplateTemplateConfig { } export interface MtermvectorsOperation { - _id: Id + _id?: Id _index?: IndexName doc?: any fields?: Fields @@ -1468,7 +1468,7 @@ export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string export interface SearchHit { _index: IndexName - _id: Id + _id?: Id _score?: double | null _explanation?: ExplainExplanation fields?: Record @@ -2998,9 +2998,9 @@ export interface AggregationsAggregationContainer { } export interface AggregationsAggregationRange { - from?: double | string | null + from?: double key?: string - to?: double | string | null + to?: double } export interface AggregationsArrayPercentilesItem { @@ -4166,7 +4166,7 @@ export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_f export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { - doc_count_error?: long + doc_count_error_upper_bound?: long } export type AggregationsTermsExclude = string | string[] @@ -4707,7 +4707,7 @@ export type AnalysisPhoneticRuleType = 'approx' | 'exact' export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { type: 'phonetic' encoder: AnalysisPhoneticEncoder - languageset: AnalysisPhoneticLanguage[] + languageset: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] max_code_len?: integer name_type: AnalysisPhoneticNameType replace?: boolean @@ -5060,12 +5060,14 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { export interface MappingDynamicTemplate { mapping?: MappingProperty - match?: string - match_mapping_type?: string + runtime?: MappingProperty + match?: string | string[] + path_match?: string | string[] + unmatch?: string | string[] + path_unmatch?: string | string[] + match_mapping_type?: string | string[] + unmatch_mapping_type?: string | string[] match_pattern?: MappingMatchType - path_match?: string - path_unmatch?: string - unmatch?: string } export interface MappingFieldAliasProperty extends MappingPropertyBase { @@ -5082,7 +5084,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5180,6 +5182,8 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { eager_global_ordinals?: boolean index?: boolean index_options?: MappingIndexOptions + script?: Script + on_script_error?: MappingOnScriptError normalizer?: string norms?: boolean null_value?: string @@ -5927,7 +5931,7 @@ export interface QueryDslQueryContainer { shape?: QueryDslShapeQuery simple_query_string?: QueryDslSimpleQueryStringQuery span_containing?: QueryDslSpanContainingQuery - field_masking_span?: QueryDslSpanFieldMaskingQuery + span_field_masking?: QueryDslSpanFieldMaskingQuery span_first?: QueryDslSpanFirstQuery span_multi?: QueryDslSpanMultiTermQuery span_near?: QueryDslSpanNearQuery @@ -6114,7 +6118,7 @@ export interface QueryDslSpanOrQuery extends QueryDslQueryBase { export interface QueryDslSpanQuery { span_containing?: QueryDslSpanContainingQuery - field_masking_span?: QueryDslSpanFieldMaskingQuery + span_field_masking?: QueryDslSpanFieldMaskingQuery span_first?: QueryDslSpanFirstQuery span_gap?: QueryDslSpanGapQuery span_multi?: QueryDslSpanMultiTermQuery @@ -10572,11 +10576,11 @@ export interface IndicesMappingLimitSettings { } export interface IndicesMappingLimitSettingsDepth { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsDimensionFields { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsFieldNameLength { @@ -10584,15 +10588,15 @@ export interface IndicesMappingLimitSettingsFieldNameLength { } export interface IndicesMappingLimitSettingsNestedFields { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsNestedObjects { - limit?: integer + limit?: long } export interface IndicesMappingLimitSettingsTotalFields { - limit?: integer + limit?: long } export interface IndicesMerge { @@ -11448,7 +11452,7 @@ export interface IndicesPutTemplateRequest extends RequestBase { index_patterns?: string | string[] mappings?: MappingTypeMapping order?: integer - settings?: Record + settings?: IndicesIndexSettings version?: VersionNumber } } @@ -15355,6 +15359,18 @@ export interface MlUpdateModelSnapshotResponse { model: MlModelSnapshot } +export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { + model_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + number_of_allocations?: integer + } +} + +export interface MlUpdateTrainedModelDeploymentResponse { + assignment: MlTrainedModelAssignment +} + export interface MlUpgradeJobSnapshotRequest extends RequestBase { job_id: Id snapshot_id: Id @@ -16040,7 +16056,7 @@ export interface NodesInfoNodeInfoRepositoriesUrl { export interface NodesInfoNodeInfoScript { allowed_types: string - disable_max_compilations_rate: string + disable_max_compilations_rate?: string } export interface NodesInfoNodeInfoSearch { @@ -16058,7 +16074,7 @@ export interface NodesInfoNodeInfoSettings { repositories?: NodesInfoNodeInfoRepositories discovery?: NodesInfoNodeInfoDiscover action?: NodesInfoNodeInfoAction - client: NodesInfoNodeInfoClient + client?: NodesInfoNodeInfoClient http: NodesInfoNodeInfoSettingsHttp bootstrap?: NodesInfoNodeInfoBootstrap transport: NodesInfoNodeInfoSettingsTransport @@ -16130,7 +16146,7 @@ export interface NodesInfoNodeInfoSettingsIngest { } export interface NodesInfoNodeInfoSettingsNetwork { - host: Host + host?: Host } export interface NodesInfoNodeInfoSettingsNode { @@ -16666,6 +16682,7 @@ export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationP export interface SearchApplicationSearchRequest extends RequestBase { name: Name + typed_keys?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { params?: Record @@ -18482,7 +18499,7 @@ export interface SynonymsPutSynonymRequest extends RequestBase { export interface SynonymsPutSynonymResponse { result: Result - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadDetails + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult } export interface SynonymsPutSynonymRuleRequest extends RequestBase { From 542585a5dc65ef0dd5d9f50b9e954cbba76f2ee6 Mon Sep 17 00:00:00 2001 From: Joey Freund Date: Wed, 5 Jun 2024 19:42:31 +0300 Subject: [PATCH 351/647] Fix typo in documentation (example query) (#2271) --- docs/examples/reindex.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/reindex.asciidoc b/docs/examples/reindex.asciidoc index 9d917dbd6..a9014036a 100644 --- a/docs/examples/reindex.asciidoc +++ b/docs/examples/reindex.asciidoc @@ -55,7 +55,7 @@ async function run () { source: { index: 'game-of-thrones', query: { - match: { character: 'stark' } + match: { house: 'stark' } } }, dest: { From 72a1114186ef4a11b5080d399aeb9cc7a86441e3 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Fri, 7 Jun 2024 03:12:42 +1000 Subject: [PATCH 352/647] Auto-generated code for main (#2275) --- docs/reference.asciidoc | 4 ++-- src/api/types.ts | 33 +++++++++++++++++++++++++++------ src/api/typesWithBodyKey.ts | 33 +++++++++++++++++++++++++++------ 3 files changed, 56 insertions(+), 14 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index f06f9868c..7db4ac61a 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -3279,7 +3279,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th ** *`field` (Optional, string)*: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. +** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. ** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. ** *`text` (Optional, string | string[])*: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. @@ -4252,7 +4252,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type diff --git a/src/api/types.ts b/src/api/types.ts index aededc0c6..d594b32e5 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4362,16 +4362,16 @@ export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation' alternate?: AnalysisIcuCollationAlternate - caseFirst?: AnalysisIcuCollationCaseFirst - caseLevel?: boolean + case_first?: AnalysisIcuCollationCaseFirst + case_level?: boolean country?: string decomposition?: AnalysisIcuCollationDecomposition - hiraganaQuaternaryMode?: boolean + hiragana_quaternary_mode?: boolean language?: string numeric?: boolean rules?: string strength?: AnalysisIcuCollationStrength - variableTop?: string + variable_top?: string variant?: string } @@ -5011,7 +5011,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5068,6 +5068,27 @@ export interface MappingHistogramProperty extends MappingPropertyBase { type: 'histogram' } +export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBase { + type: 'icu_collation_keyword' + norms?: boolean + index_options?: MappingIndexOptions + index?: boolean + null_value?: string + store?: boolean + rules?: string + language?: string + country?: string + variant?: string + strength?: AnalysisIcuCollationStrength + decomposition?: AnalysisIcuCollationDecomposition + alternate?: AnalysisIcuCollationAlternate + case_level?: boolean + case_first?: AnalysisIcuCollationCaseFirst + numeric?: boolean + variable_top?: string + hiragana_quaternary_mode?: boolean +} + export interface MappingIndexField { enabled: boolean } @@ -5178,7 +5199,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { meta?: Record diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index a1c083d81..fe6fe2fc4 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -4435,16 +4435,16 @@ export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation' alternate?: AnalysisIcuCollationAlternate - caseFirst?: AnalysisIcuCollationCaseFirst - caseLevel?: boolean + case_first?: AnalysisIcuCollationCaseFirst + case_level?: boolean country?: string decomposition?: AnalysisIcuCollationDecomposition - hiraganaQuaternaryMode?: boolean + hiragana_quaternary_mode?: boolean language?: string numeric?: boolean rules?: string strength?: AnalysisIcuCollationStrength - variableTop?: string + variable_top?: string variant?: string } @@ -5084,7 +5084,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5141,6 +5141,27 @@ export interface MappingHistogramProperty extends MappingPropertyBase { type: 'histogram' } +export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBase { + type: 'icu_collation_keyword' + norms?: boolean + index_options?: MappingIndexOptions + index?: boolean + null_value?: string + store?: boolean + rules?: string + language?: string + country?: string + variant?: string + strength?: AnalysisIcuCollationStrength + decomposition?: AnalysisIcuCollationDecomposition + alternate?: AnalysisIcuCollationAlternate + case_level?: boolean + case_first?: AnalysisIcuCollationCaseFirst + numeric?: boolean + variable_top?: string + hiragana_quaternary_mode?: boolean +} + export interface MappingIndexField { enabled: boolean } @@ -5251,7 +5272,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { meta?: Record From f97ba5b02a4915891dcad44c190d62a1f96ff2c8 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 6 Jun 2024 13:39:38 -0500 Subject: [PATCH 353/647] Bump transport to 8.6.0 (#2277) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index c19d2dcc2..2879ff7f4 100644 --- a/package.json +++ b/package.json @@ -83,7 +83,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.5.0", + "@elastic/transport": "^8.6.0", "tslib": "^2.4.0" }, "tap": { From 2b0eebc8faa2ee91434760d0a46a7a1ac93ae0b7 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 6 Jun 2024 14:23:42 -0500 Subject: [PATCH 354/647] Update changelog for 8.14.0 (#2279) --- docs/changelog.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 8c5b76c91..20de84fc7 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -7,6 +7,12 @@ [discrete] ==== Features +[discrete] +===== Support for Elasticsearch `v8.14.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.14/release-notes-8.14.0.html[here]. + [discrete] ===== ES|QL object API helper @@ -18,6 +24,12 @@ A TypeScript type parameter can also be provided to improve developer experience The bulk helper now supports an `onSuccess` callback that will be called for each successful operation. https://github.com/elastic/elasticsearch-js/pull/2199[#2199] +[discrete] +===== Request retries are more polite + +https://github.com/elastic/elastic-transport-js/releases/tag/v8.6.0[`@elastic/transport` v8.6.0] was released, which refactored when and how failed requests are retried. Timed-out requests are no longer retried by default, and retries now use exponential backoff rather than running immediately. + + [discrete] === 8.13.1 From 8e162dd8b87263982d76df94aa481ba9752b2fe2 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Sat, 8 Jun 2024 01:51:48 +1000 Subject: [PATCH 355/647] Auto-generated code for main (#2281) --- docs/reference.asciidoc | 4 +++- src/api/types.ts | 11 ++++++++--- src/api/typesWithBodyKey.ts | 11 ++++++++--- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 7db4ac61a..59fc4094a 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -4252,7 +4252,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -4784,6 +4784,8 @@ client.inference.delete({ inference_id }) * *Request (object):* ** *`inference_id` (string)*: The inference Id ** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type +** *`dry_run` (Optional, boolean)*: When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned +** *`force` (Optional, boolean)*: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields [discrete] ==== get diff --git a/src/api/types.ts b/src/api/types.ts index d594b32e5..bb07bb0ae 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -5074,7 +5074,6 @@ export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBas index_options?: MappingIndexOptions index?: boolean null_value?: string - store?: boolean rules?: string language?: string country?: string @@ -11909,6 +11908,10 @@ export interface InferenceCompletionResult { result: string } +export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { + pipelines: string[] +} + export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] @@ -11961,9 +11964,11 @@ export interface InferenceTextEmbeddingResult { export interface InferenceDeleteRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id + dry_run?: boolean + force?: boolean } -export type InferenceDeleteResponse = AcknowledgedResponseBase +export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult export interface InferenceGetRequest extends RequestBase { task_type?: InferenceTaskType @@ -13305,7 +13310,7 @@ export interface MlInferenceConfigUpdateContainer { export interface MlInferenceResponseResult { entities?: MlTrainedModelEntities[] is_truncated?: boolean - predicted_value?: MlPredictedValue[] + predicted_value?: MlPredictedValue | MlPredictedValue[] predicted_value_sequence?: string prediction_probability?: double prediction_score?: double diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index fe6fe2fc4..5b2206395 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -5147,7 +5147,6 @@ export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBas index_options?: MappingIndexOptions index?: boolean null_value?: string - store?: boolean rules?: string language?: string country?: string @@ -12130,6 +12129,10 @@ export interface InferenceCompletionResult { result: string } +export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { + pipelines: string[] +} + export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] @@ -12182,9 +12185,11 @@ export interface InferenceTextEmbeddingResult { export interface InferenceDeleteRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id + dry_run?: boolean + force?: boolean } -export type InferenceDeleteResponse = AcknowledgedResponseBase +export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult export interface InferenceGetRequest extends RequestBase { task_type?: InferenceTaskType @@ -13540,7 +13545,7 @@ export interface MlInferenceConfigUpdateContainer { export interface MlInferenceResponseResult { entities?: MlTrainedModelEntities[] is_truncated?: boolean - predicted_value?: MlPredictedValue[] + predicted_value?: MlPredictedValue | MlPredictedValue[] predicted_value_sequence?: string prediction_probability?: double prediction_score?: double From c202a6bbc52976c956f9a09bf60451fdf40577b9 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 7 Jun 2024 11:10:55 -0500 Subject: [PATCH 356/647] Run integration tests on 8.14 (#2283) --- catalog-info.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index e7ba4261d..3b577b95d 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -37,11 +37,11 @@ spec: build_pull_requests: false build_branches: false cancel_intermediate_builds: true - cancel_intermediate_builds_branch_filter: '!main' + cancel_intermediate_builds_branch_filter: "!main" schedules: main: - branch: 'main' - cronline: '@daily' - 8_13: - branch: '8.13' - cronline: '@daily' + branch: "main" + cronline: "@daily" + 8_14: + branch: "8.14" + cronline: "@daily" From bd89ab5dd7f703997bda529cae0b8c411db362ae Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 12 Jun 2024 02:04:39 +1000 Subject: [PATCH 357/647] Auto-generated code for main (#2285) --- docs/reference.asciidoc | 6 +++--- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 59fc4094a..08931b1ab 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -8166,7 +8166,7 @@ client.security.hasPrivileges({ ... }) * *Request (object):* ** *`user` (Optional, string)*: Username ** *`application` (Optional, { application, privileges, resources }[])* -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_slm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. ** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* [discrete] @@ -8280,11 +8280,11 @@ client.security.putRole({ name }) * *Request (object):* ** *`name` (string)*: The name of the role. ** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_slm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. -** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. +** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. ** *`transient_metadata` (Optional, Record)*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. diff --git a/src/api/types.ts b/src/api/types.ts index bb07bb0ae..4cc8c7c93 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -16451,7 +16451,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_slm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 5b2206395..38753beaa 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -16819,7 +16819,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_slm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean From 0e5beddd65cf7ca652c5db953a0d1337730d5f92 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Wed, 12 Jun 2024 15:32:41 +0400 Subject: [PATCH 358/647] Rename Buildkite team from clients-team to devtools-team (#2287) --- catalog-info.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 3b577b95d..4ce58c0b7 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -6,7 +6,7 @@ metadata: name: elasticsearch-js spec: type: library - owner: group:clients-team + owner: group:devtools-team lifecycle: production --- @@ -18,7 +18,7 @@ metadata: description: elasticsearch-js - integration tests spec: type: buildkite-pipeline - owner: group:clients-team + owner: group:devtools-team system: buildkite implementation: apiVersion: buildkite.elastic.dev/v1 @@ -29,7 +29,7 @@ spec: repository: elastic/elasticsearch-js pipeline_file: .buildkite/pipeline.yml teams: - clients-team: + devtools-team: access_level: MANAGE_BUILD_AND_READ everyone: access_level: READ_ONLY From 74be52ebb10ad2a58766c235e6545aa52a8b8bbc Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 13 Jun 2024 02:07:30 +1000 Subject: [PATCH 359/647] Auto-generated code for main (#2286) Co-authored-by: Josh Mock --- docs/reference.asciidoc | 4 ++-- src/api/types.ts | 2 +- src/api/typesWithBodyKey.ts | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 08931b1ab..d7c9224f2 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -8166,7 +8166,7 @@ client.security.hasPrivileges({ ... }) * *Request (object):* ** *`user` (Optional, string)*: Username ** *`application` (Optional, { application, privileges, resources }[])* -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_slm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. ** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* [discrete] @@ -8280,7 +8280,7 @@ client.security.putRole({ name }) * *Request (object):* ** *`name` (string)*: The name of the role. ** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_slm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. diff --git a/src/api/types.ts b/src/api/types.ts index 4cc8c7c93..bb07bb0ae 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -16451,7 +16451,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_slm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 38753beaa..5b2206395 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -16819,7 +16819,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_slm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean From 7b255bed98c24a7fb5ea8ffbb76fa40c7ab21df0 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 20 Jun 2024 17:15:05 +0100 Subject: [PATCH 360/647] Auto-generated code for main (#2291) --- docs/reference.asciidoc | 64 ++++++++++++++++++++----------------- src/api/types.ts | 26 +++++++++++---- src/api/typesWithBodyKey.ts | 26 +++++++++++---- 3 files changed, 72 insertions(+), 44 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index d7c9224f2..1bdef0d5e 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -101,7 +101,7 @@ client.count({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. @@ -204,7 +204,7 @@ client.deleteByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to delete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. @@ -360,7 +360,7 @@ client.explain({ id, index }) ** *`id` (string)*: Defines the document ID. ** *`index` (string)*: Index names used to limit the request. Only a single index name can be provided to this parameter. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. ** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. @@ -391,7 +391,7 @@ client.fieldCaps({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. ** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. ** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, @@ -589,7 +589,7 @@ parameter defaults to false. You can pass _source: true to return both source fi and stored fields in the search response. ** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. ** *`routing` (Optional, string)*: A list of specific routing values @@ -928,12 +928,12 @@ The request returns doc values for field names matching these patterns in the `h ** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. ** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. ** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`retriever` (Optional, { standard, knn, rrf })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. @@ -1087,7 +1087,7 @@ don’t include the aggs layer. each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. @@ -1190,7 +1190,7 @@ client.termsEnum({ index, field }) ** *`size` (Optional, number)*: How many matching terms to return. ** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. ** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. ** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. ** *`search_after` (Optional, string)* @@ -1288,7 +1288,7 @@ client.updateByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. ** *`script` (Optional, { lang, options, source } | { id })*: The script to run to update the document source or metadata when updating. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. @@ -1444,9 +1444,9 @@ names matching these patterns in the hits.fields property of the response. ** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -2793,7 +2793,7 @@ client.eql.search({ index, query }) ** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order ** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" ** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* @@ -2823,7 +2823,7 @@ client.esql.query({ query }) * *Request (object):* ** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. ** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. ** *`locale` (Optional, string)* ** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. ** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml. @@ -2941,9 +2941,9 @@ Defaults to 10,000 hits. names matching these patterns in the hits.fields property of the response. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -3032,7 +3032,7 @@ client.graph.explore({ index }) ** *`index` (string | string[])*: Name of the index. ** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. ** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. ** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. @@ -4133,7 +4133,7 @@ Wildcard patterns that match both data streams and indices return an error. ** *`name` (string)*: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. ** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. @@ -4252,7 +4252,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -4746,7 +4746,7 @@ client.indices.validateQuery({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. @@ -5461,7 +5461,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) * *Request (object):* ** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. ** *`index` (string)*: Defines the `index` in which the evaluation will be performed. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. [discrete] ==== explain_data_frame_analytics @@ -6019,7 +6019,11 @@ client.ml.getTrainedModels({ ... }) ==== Arguments * *Request (object):* -** *`model_id` (Optional, string)*: The unique identifier of the trained model. +** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. + +You can get information for multiple trained models in a single API +request by using a list of model IDs or a wildcard +expression. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. @@ -6037,7 +6041,7 @@ be retrieved and then added to another cluster. ** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response body. ** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. -** *`tags` (Optional, string)*: A comma delimited string of tags. A trained model can have many tags, or +** *`tags` (Optional, string | string[])*: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. @@ -6231,7 +6235,7 @@ client.ml.putCalendarJob({ calendar_id, job_id }) * *Request (object):* ** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`job_id` (string)*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. +** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. [discrete] ==== put_data_frame_analytics @@ -6345,7 +6349,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. ** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might @@ -6803,7 +6807,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is @@ -7325,7 +7329,7 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. ** *`aggregations` (Optional, Record)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -8999,7 +9003,7 @@ client.sql.query({ ... }) If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`query` (Optional, string)*: SQL query to run. ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. ** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. @@ -9030,7 +9034,7 @@ client.sql.translate({ query }) * *Request (object):* ** *`query` (string)*: SQL query to run. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. [discrete] @@ -9739,7 +9743,7 @@ client.watcher.queryWatches({ ... }) * *Request (object):* ** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. ** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. diff --git a/src/api/types.ts b/src/api/types.ts index bb07bb0ae..df17e1cbb 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -5011,7 +5011,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5198,7 +5198,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { meta?: Record @@ -5265,6 +5265,12 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase type: 'search_as_you_type' } +export interface MappingSemanticTextProperty { + type: 'semantic_text' + meta?: Record + inference_id: Id +} + export interface MappingShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean ignore_malformed?: boolean @@ -5875,6 +5881,7 @@ export interface QueryDslQueryContainer { rule_query?: QueryDslRuleQuery script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery + semantic?: QueryDslSemanticQuery shape?: QueryDslShapeQuery simple_query_string?: QueryDslSimpleQueryStringQuery span_containing?: QueryDslSpanContainingQuery @@ -5992,6 +5999,11 @@ export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { script: Script } +export interface QueryDslSemanticQuery extends QueryDslQueryBase { + field: string + query: string +} + export interface QueryDslShapeFieldQuery { indexed_shape?: QueryDslFieldLookup relation?: GeoShapeRelation @@ -12883,8 +12895,8 @@ export interface MlDatafeedConfig { datafeed_id?: Id delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration - indices?: string[] - indexes?: string[] + indices?: Indices + indexes?: Indices indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer @@ -14399,14 +14411,14 @@ export interface MlGetRecordsResponse { } export interface MlGetTrainedModelsRequest extends RequestBase { - model_id?: Id + model_id?: Ids allow_no_match?: boolean decompress_definition?: boolean exclude_generated?: boolean from?: integer include?: MlInclude size?: integer - tags?: string + tags?: string | string[] } export interface MlGetTrainedModelsResponse { @@ -14560,7 +14572,7 @@ export interface MlPutCalendarResponse { export interface MlPutCalendarJobRequest extends RequestBase { calendar_id: Id - job_id: Id + job_id: Ids } export interface MlPutCalendarJobResponse { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 5b2206395..c32c3af00 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -5084,7 +5084,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5271,7 +5271,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingSparseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { meta?: Record @@ -5338,6 +5338,12 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase type: 'search_as_you_type' } +export interface MappingSemanticTextProperty { + type: 'semantic_text' + meta?: Record + inference_id: Id +} + export interface MappingShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean ignore_malformed?: boolean @@ -5948,6 +5954,7 @@ export interface QueryDslQueryContainer { rule_query?: QueryDslRuleQuery script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery + semantic?: QueryDslSemanticQuery shape?: QueryDslShapeQuery simple_query_string?: QueryDslSimpleQueryStringQuery span_containing?: QueryDslSpanContainingQuery @@ -6065,6 +6072,11 @@ export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { script: Script } +export interface QueryDslSemanticQuery extends QueryDslQueryBase { + field: string + query: string +} + export interface QueryDslShapeFieldQuery { indexed_shape?: QueryDslFieldLookup relation?: GeoShapeRelation @@ -13118,8 +13130,8 @@ export interface MlDatafeedConfig { datafeed_id?: Id delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration - indices?: string[] - indexes?: string[] + indices?: Indices + indexes?: Indices indices_options?: IndicesOptions job_id?: Id max_empty_searches?: integer @@ -14676,14 +14688,14 @@ export interface MlGetRecordsResponse { } export interface MlGetTrainedModelsRequest extends RequestBase { - model_id?: Id + model_id?: Ids allow_no_match?: boolean decompress_definition?: boolean exclude_generated?: boolean from?: integer include?: MlInclude size?: integer - tags?: string + tags?: string | string[] } export interface MlGetTrainedModelsResponse { @@ -14856,7 +14868,7 @@ export interface MlPutCalendarResponse { export interface MlPutCalendarJobRequest extends RequestBase { calendar_id: Id - job_id: Id + job_id: Ids } export interface MlPutCalendarJobResponse { From db911746a0d05b676f6502d31bffc39ff565f6b1 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 20 Jun 2024 14:18:59 -0500 Subject: [PATCH 361/647] Add GH_TOKEN to release job (#2292) --- .github/workflows/npm-publish.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 615b943eb..b4be036b1 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -3,7 +3,7 @@ on: workflow_dispatch: inputs: branch: - description: 'Git branch to build and publish' + description: "Git branch to build and publish" required: true jobs: build: @@ -17,8 +17,8 @@ jobs: ref: ${{ github.event.inputs.branch }} - uses: actions/setup-node@v3 with: - node-version: '20.x' - registry-url: 'https://registry.npmjs.org/' + node-version: "20.x" + registry-url: "https://registry.npmjs.org/" - run: npm install -g npm - run: npm install - run: npm test @@ -34,3 +34,4 @@ jobs: "v$version" env: BRANCH_NAME: ${{ github.event.inputs.branch }} + GH_TOKEN: ${{ github.token }} From 069103612a36133c56b00b2b634918e98ce3d976 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 28 Jun 2024 13:26:09 -0500 Subject: [PATCH 362/647] Bump to 8.15 (#2297) --- .buildkite/pipeline.yml | 2 +- package.json | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 4dc60e227..95ed83af2 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,7 +6,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.14.0 + STACK_VERSION: 8.15.0 matrix: setup: suite: diff --git a/package.json b/package.json index 2879ff7f4..6dd5a7da5 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.14.0", - "versionCanary": "8.14.0-canary.0", + "version": "8.15.0", + "versionCanary": "8.15.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "index.js", "types": "index.d.ts", @@ -93,4 +93,4 @@ "coverage": false, "check-coverage": false } -} +} \ No newline at end of file From 1f9db892ea9ff4fd8517a46894dae816adff2932 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 1 Jul 2024 12:36:52 -0500 Subject: [PATCH 363/647] Drop deprecated use of npm install --production=false (#2298) --- .buildkite/Dockerfile | 10 +++++----- .buildkite/Dockerfile-make | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 6d44e2211..2bf3886dc 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -3,14 +3,14 @@ FROM node:$NODE_VERSION # Install required tools RUN apt-get clean -y && \ - apt-get -qy update && \ - apt-get -y install zip && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + apt-get -qy update && \ + apt-get -y install zip && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* WORKDIR /usr/src/app COPY package.json . -RUN npm install --production=false +RUN npm install COPY . . diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 1f871d9f2..3805eb0a2 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -24,7 +24,7 @@ USER ${BUILDER_UID}:${BUILDER_GID} # install dependencies COPY package.json . -RUN npm install --production=false +RUN npm install # copy project files COPY . . From f34bb6aa28e95d5ff8d86191db66cbb6a0db50a8 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 1 Jul 2024 13:29:55 -0500 Subject: [PATCH 364/647] Upgrade transport to 8.7.0 (#2300) --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 6dd5a7da5..ed81cc6d4 100644 --- a/package.json +++ b/package.json @@ -83,7 +83,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.6.0", + "@elastic/transport": "^8.7.0", "tslib": "^2.4.0" }, "tap": { @@ -93,4 +93,4 @@ "coverage": false, "check-coverage": false } -} \ No newline at end of file +} From 94bf5b2aa7f6a2eb3e310a26ea10d6ff91c467c4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 1 Jul 2024 15:47:50 -0500 Subject: [PATCH 365/647] Documentation for OpenTelemetry support (#2289) * Documentation for OpenTelemetry support * Update docs/observability.asciidoc Co-authored-by: Miguel Grinberg * Fix docs typo * Fix bad link references in asciidoc changelog * Drop link to 8.15 changelog For now. Link just doesn't work yet. --------- Co-authored-by: Miguel Grinberg --- docs/changelog.asciidoc | 15 ++++- docs/observability.asciidoc | 111 ++++++++++++++++++++++-------------- 2 files changed, 81 insertions(+), 45 deletions(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 20de84fc7..d3474617d 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,19 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.15.0 + +[discrete] +==== Features + +[discrete] +===== OpenTelemetry zero-code instrumentation support + +For those that use an observability service that supports OpenTelemetry spans, the client will now automatically generate traces for each Elasticsearch request it makes. +See {jsclient}/observability.html#_opentelemetry[the docs] +for more information. + [discrete] === 8.14.0 @@ -198,7 +211,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.9/release-notes-8.9.0. [discrete] ===== Allow document to be overwritten in `onDocument` iteratee of bulk helper https://github.com/elastic/elasticsearch-js/pull/1732[#1732] -In the https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html#bulk-helper[bulk helper], documents could not be modified before being sent to Elasticsearch. It is now possible to https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html#_modifying_a_document_before_operation[modify a document] before sending it. +In the {jsclient}/client-helpers.html#bulk-helper[bulk helper], documents could not be modified before being sent to Elasticsearch. It is now possible to {jsclient}/client-helpers.html#_modifying_a_document_before_operation[modify a document] before sending it. [discrete] ==== Fixes diff --git a/docs/observability.asciidoc b/docs/observability.asciidoc index 8ae57bcad..9436d457f 100644 --- a/docs/observability.asciidoc +++ b/docs/observability.asciidoc @@ -1,24 +1,52 @@ [[observability]] === Observability -The client does not provide a default logger, but instead it offers an event -emitter interface to hook into internal events, such as `request` and -`response`. +To observe and measure Elasticsearch client usage, several client features are provided. -Correlating those events can be hard, especially if your applications have a -large codebase with many events happening at the same time. +First, as of 8.15.0, the client provides native support for OpenTelemetry, which allows you to send client usage data to any endpoint that supports OpenTelemetry without having to make any changes to your JavaScript codebase. -To help you with this, the client offers you a correlation id system and other -features. Let's see them in action. +Also, rather than providing a default logger, the client offers an event +emitter interface to hook into internal events, such as `request` and +`response`, allowing you to log the events you care about, or otherwise react +to client usage however you might need. +Correlating events can be hard, especially if your applications have a large codebase with many events happening at the same time. To help you with this, the client provides a correlation ID system, and other +features. + +All of these observability features are documented below. + +[discrete] +==== OpenTelemetry + +The client supports OpenTelemetry's https://opentelemetry.io/docs/zero-code/js/[zero-code +instrumentation] to enable tracking each client request as an +https://opentelemetry.io/docs/concepts/signals/traces/#spans[OpenTelemetry span]. These spans +follow all of the https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/[semantic +OpenTelemetry conventions for Elasticsearch] except for `db.query.text`. + +To start sending Elasticsearch trace data to your OpenTelemetry endpoint, follow +https://opentelemetry.io/docs/zero-code/js/[OpenTelemetry's zero-code instrumentation guide], +or the following steps: + +1. Install `@opentelemetry/api` and `@opentelemetry/auto-instrumentations-node` as Node.js dependencies +2. Export the following environment variables with the appropriate values: + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_RESOURCE_ATTRIBUTES` + - `OTEL_SERVICE_NAME` +3. `require` the Node.js auto-instrumentation library at startup: +[source,bash] +---- +node --require '@opentelemetry/auto-instrumentations-node/register' index.js +---- [discrete] ==== Events -The client is an event emitter, this means that you can listen for its event and -add additional logic to your code, without need to change the client internals -or your normal usage. You can find the events names by access the `events` key -of the client. +The client is an event emitter. This means that you can listen for its events to +add additional logic to your code, without needing to change the client's internals +or how you use the client. You can find the events' names by accessing the `events` key +of the client: [source,js] ---- @@ -26,9 +54,8 @@ const { events } = require('@elastic/elasticsearch') console.log(events) ---- - -The event emitter functionality can be useful if you want to log every request, -response and error that is happening during the use of the client. +The event emitter functionality can be useful if you want to log every request, +response or error that is created by the client: [source,js] ---- @@ -48,7 +75,6 @@ client.diagnostic.on('response', (err, result) => { }) ---- - The client emits the following events: [cols=2*] |=== @@ -108,7 +134,7 @@ client.diagnostic.on('resurrect', (err, result) => { |=== -The values of `result` in `serialization`, `request`, `deserialization`, +The values of `result` in `serialization`, `request`, `deserialization`, `response` and `sniff` are: [source,ts] @@ -135,7 +161,6 @@ meta: { }; ---- - While the `result` value in `resurrect` is: [source,ts] @@ -152,10 +177,10 @@ request: { [discrete] ===== Events order -The event order is described in the following graph, in some edge cases, the +The event order is described in the following graph, in some edge cases, the order is not guaranteed. -You can find in -https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js[`test/acceptance/events-order.test.js`] +You can find in +https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js[`test/acceptance/events-order.test.js`] how the order changes based on the situation. [source] @@ -175,12 +200,11 @@ serialization └─▶ response ---- - [discrete] -==== Correlation id +==== Correlation ID -Correlating events can be hard, especially if there are many events at the same -time. The client offers you an automatic (and configurable) system to help you +Correlating events can be hard, especially if there are many events at the same +time. The client offers you an automatic (and configurable) system to help you handle this problem. [source,js] @@ -211,8 +235,7 @@ client.search({ }).then(console.log, console.log) ---- - -By default the id is an incremental integer, but you can configure it with the +By default the ID is an incremental integer, but you can configure it with the `generateRequestId` option: [source,js] @@ -231,7 +254,7 @@ const client = new Client({ ---- -You can also specify a custom id per request: +You can also specify a custom ID per request: [source,js] ---- @@ -247,8 +270,8 @@ client.search({ [discrete] ==== Context object -Sometimes, you might need to make some custom data available in your events, you -can do that via the `context` option of a request: +Sometimes, you might need to make some custom data available in your events, you +can do that via the `context` option of a request: [source,js] ---- @@ -283,7 +306,7 @@ client.search({ ---- The context object can also be configured as a global option in the client -configuration. If you provide both, the two context objects will be shallow +configuration. If you provide both, the two context objects will be shallow merged, and the API level object will take precedence. [source,js] @@ -323,9 +346,9 @@ client.search({ [discrete] ==== Client name -If you are using multiple instances of the client or if you are using multiple -child clients _(which is the recommended way to have multiple instances of the -client)_, you might need to recognize which client you are using. The `name` +If you are using multiple instances of the client or if you are using multiple +child clients _(which is the recommended way to have multiple instances of the +client)_, you might need to recognize which client you are using. The `name` options help you in this regard. [source,js] @@ -374,15 +397,15 @@ child.search({ [discrete] ==== X-Opaque-Id support -To improve observability, the client offers an easy way to configure the -`X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this -allows you to discover this identifier in the -https://www.elastic.co/guide/en/elasticsearch/reference/current/logging.html#deprecation-logging[deprecation logs], -helps you with https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin] +To improve observability, the client offers an easy way to configure the +`X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this +allows you to discover this identifier in the +https://www.elastic.co/guide/en/elasticsearch/reference/current/logging.html#deprecation-logging[deprecation logs], +helps you with https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin] as well as https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html#_identifying_running_tasks[identifying running tasks]. -The `X-Opaque-Id` should be configured in each request, for doing that you can -use the `opaqueId` option, as you can see in the following example. The +The `X-Opaque-Id` should be configured in each request, for doing that you can +use the `opaqueId` option, as you can see in the following example. The resulting header will be `{ 'X-Opaque-Id': 'my-search' }`. [source,js] @@ -401,10 +424,10 @@ client.search({ }).then(console.log, console.log) ---- -Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a -specific string, in case you need to identify a specific client or server. For -doing this, the client offers a top-level configuration option: -`opaqueIdPrefix`. In the following example, the resulting header will be +Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a +specific string, in case you need to identify a specific client or server. For +doing this, the client offers a top-level configuration option: +`opaqueIdPrefix`. In the following example, the resulting header will be `{ 'X-Opaque-Id': 'proxy-client::my-search' }`. [source,js] From 384debee9e199a73fa78a95c3a1b3bb196ab0a73 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 25 Jul 2024 07:38:52 +1000 Subject: [PATCH 366/647] Auto-generated code for main (#2306) --- docs/reference.asciidoc | 1579 +++++++++++++++++---- src/api/api/async_search.ts | 41 +- src/api/api/autoscaling.ts | 30 +- src/api/api/bulk.ts | 11 +- src/api/api/capabilities.ts | 68 + src/api/api/cat.ts | 232 ++- src/api/api/ccr.ts | 102 +- src/api/api/clear_scroll.ts | 11 +- src/api/api/close_point_in_time.ts | 8 +- src/api/api/cluster.ts | 122 +- src/api/api/count.ts | 9 +- src/api/api/create.ts | 12 +- src/api/api/dangling_indices.ts | 22 +- src/api/api/delete.ts | 12 +- src/api/api/delete_by_query.ts | 11 +- src/api/api/delete_by_query_rethrottle.ts | 9 +- src/api/api/delete_script.ts | 11 +- src/api/api/enrich.ts | 44 +- src/api/api/eql.ts | 39 +- src/api/api/esql.ts | 71 +- src/api/api/exists.ts | 12 +- src/api/api/exists_source.ts | 12 +- src/api/api/explain.ts | 12 +- src/api/api/features.ts | 11 +- src/api/api/field_caps.ts | 11 +- src/api/api/fleet.ts | 50 +- src/api/api/get.ts | 10 +- src/api/api/get_script.ts | 11 +- src/api/api/get_script_context.ts | 6 +- src/api/api/get_script_languages.ts | 6 +- src/api/api/get_source.ts | 10 +- src/api/api/graph.ts | 11 +- src/api/api/health_report.ts | 9 +- src/api/api/ilm.ts | 87 +- src/api/api/index.ts | 12 +- src/api/api/indices.ts | 584 ++++++-- src/api/api/inference.ts | 41 +- src/api/api/info.ts | 6 +- src/api/api/ingest.ts | 158 ++- src/api/api/knn_search.ts | 9 +- src/api/api/license.ts | 42 +- src/api/api/logstash.ts | 31 +- src/api/api/mget.ts | 9 +- src/api/api/migration.ts | 19 +- src/api/api/ml.ts | 698 +++++++-- src/api/api/monitoring.ts | 9 +- src/api/api/msearch.ts | 9 +- src/api/api/msearch_template.ts | 11 +- src/api/api/mtermvectors.ts | 9 +- src/api/api/nodes.ts | 76 +- src/api/api/open_point_in_time.ts | 11 +- src/api/api/ping.ts | 6 +- src/api/api/profiling.ts | 162 +++ src/api/api/put_script.ts | 12 +- src/api/api/query_rules.ts | 295 ++++ src/api/api/query_ruleset.ts | 162 --- src/api/api/rank_eval.ts | 11 +- src/api/api/reindex.ts | 6 +- src/api/api/reindex_rethrottle.ts | 11 +- src/api/api/render_search_template.ts | 11 +- src/api/api/rollup.ts | 69 +- src/api/api/scripts_painless_execute.ts | 8 +- src/api/api/scroll.ts | 9 +- src/api/api/search.ts | 11 +- src/api/api/search_application.ts | 83 +- src/api/api/search_mvt.ts | 13 +- src/api/api/search_shards.ts | 9 +- src/api/api/search_template.ts | 11 +- src/api/api/searchable_snapshots.ts | 34 +- src/api/api/security.ts | 613 ++++++-- src/api/api/shutdown.ts | 25 +- src/api/api/simulate.ts | 85 ++ src/api/api/slm.ts | 58 +- src/api/api/snapshot.ts | 106 +- src/api/api/sql.ts | 40 +- src/api/api/ssl.ts | 6 +- src/api/api/synonyms.ts | 59 +- src/api/api/tasks.ts | 24 +- src/api/api/terms_enum.ts | 9 +- src/api/api/termvectors.ts | 10 +- src/api/api/text_structure.ts | 69 +- src/api/api/transform.ts | 131 +- src/api/api/update.ts | 10 +- src/api/api/update_by_query.ts | 11 +- src/api/api/update_by_query_rethrottle.ts | 9 +- src/api/api/watcher.ts | 93 +- src/api/api/xpack.ts | 15 +- src/api/index.ts | 33 +- src/api/types.ts | 520 ++++--- src/api/typesWithBodyKey.ts | 535 ++++--- 90 files changed, 6331 insertions(+), 1489 deletions(-) create mode 100644 src/api/api/capabilities.ts create mode 100644 src/api/api/profiling.ts create mode 100644 src/api/api/query_rules.ts delete mode 100644 src/api/api/query_ruleset.ts create mode 100644 src/api/api/simulate.ts diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 1bdef0d5e..c7668ae7a 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -27,7 +27,8 @@ [discrete] === bulk -Allows to perform multiple index/update/delete operations in a single request. +Performs multiple indexing or delete operations in a single API call. +This reduces overhead and can greatly increase indexing speed. {ref}/docs-bulk.html[Endpoint documentation] [source,ts] @@ -56,7 +57,7 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] === clear_scroll -Explicitly clears the search context for a scroll. +Clears the search context and results for a scrolling search. {ref}/clear-scroll-api.html[Endpoint documentation] [source,ts] @@ -72,7 +73,7 @@ To clear all scroll IDs, use `_all`. [discrete] === close_point_in_time -Close a point in time +Closes a point-in-time. {ref}/point-in-time-api.html[Endpoint documentation] [source,ts] @@ -101,7 +102,7 @@ client.count({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. @@ -129,9 +130,8 @@ Elasticsearch collects documents before sorting. [discrete] === create -Creates a new document in the index. - -Returns a 409 response when a document with a same ID already exists in the index. +Adds a JSON document to the specified data stream or index and makes it searchable. +If the target is an index and the document already exists, the request updates the document and increments its version. {ref}/docs-index_.html[Endpoint documentation] [source,ts] @@ -162,7 +162,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] === delete -Removes a document from the index. +Removes a JSON document from the specified index. {ref}/docs-delete.html[Endpoint documentation] [source,ts] @@ -189,7 +189,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] === delete_by_query -Deletes documents matching the provided query. +Deletes documents that match the specified query. {ref}/docs-delete-by-query.html[Endpoint documentation] [source,ts] @@ -204,7 +204,7 @@ client.deleteByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to delete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. @@ -268,7 +268,7 @@ client.deleteByQueryRethrottle({ task_id }) [discrete] === delete_script -Deletes a script. +Deletes a stored script or search template. {ref}/modules-scripting.html[Endpoint documentation] [source,ts] @@ -287,7 +287,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] === exists -Returns information about whether a document exists in an index. +Checks if a document in an index exists. {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -318,7 +318,7 @@ The specified version must match the current version of the document for the req [discrete] === exists_source -Returns information about whether a document source exists in an index. +Checks if a document's `_source` is stored. {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -346,7 +346,7 @@ The specified version must match the current version of the document for the req [discrete] === explain -Returns information about why a specific matches (or doesn't match) a query. +Returns information about why a specific document matches (or doesn’t match) a query. {ref}/search-explain.html[Endpoint documentation] [source,ts] @@ -360,7 +360,7 @@ client.explain({ id, index }) ** *`id` (string)*: Defines the document ID. ** *`index` (string)*: Index names used to limit the request. Only a single index name can be provided to this parameter. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. ** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. @@ -378,7 +378,9 @@ Random by default. [discrete] === field_caps -Returns the information about the capabilities of fields among multiple indices. +The field capabilities API returns the information about the capabilities of fields among multiple indices. +The field capabilities API returns runtime fields like any other field. For example, a runtime field with a type +of keyword is returned as any other field that belongs to the `keyword` family. {ref}/search-field-caps.html[Endpoint documentation] [source,ts] @@ -391,7 +393,7 @@ client.fieldCaps({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. ** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. ** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, @@ -437,7 +439,7 @@ If this field is specified, the `_source` parameter defaults to false. [discrete] === get_script -Returns a script. +Retrieves a stored script or search template. {ref}/modules-scripting.html[Endpoint documentation] [source,ts] @@ -517,7 +519,8 @@ client.healthReport({ ... }) [discrete] === index -Creates or updates a document in an index. +Adds a JSON document to the specified data stream or index and makes it searchable. +If the target is an index and the document already exists, the request updates the document and increments its version. {ref}/docs-index_.html[Endpoint documentation] [source,ts] @@ -589,7 +592,7 @@ parameter defaults to false. You can pass _source: true to return both source fi and stored fields in the search response. ** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. ** *`routing` (Optional, string)*: A list of specific routing values @@ -655,7 +658,7 @@ client.msearch({ ... }) [discrete] === msearch_template -Allows to execute several search template operations in one request. +Runs multiple templated searches with a single request. {ref}/search-multi-search.html[Endpoint documentation] [source,ts] @@ -710,7 +713,12 @@ Random by default. [discrete] === open_point_in_time -Open a point in time that can be used in subsequent searches +A search request by default executes against the most recent visible data of the target indices, +which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the +state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple +search requests using the same point in time. For example, if refreshes happen between +`search_after` requests, then the results of those requests might not be consistent as changes happening +between searches are only visible to the more recent point in time. {ref}/point-in-time-api.html[Endpoint documentation] [source,ts] @@ -743,7 +751,7 @@ client.ping() [discrete] === put_script -Creates or updates a script. +Creates or updates a stored script or search template. {ref}/modules-scripting.html[Endpoint documentation] [source,ts] @@ -766,7 +774,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] === rank_eval -Allows to evaluate the quality of ranked search results over a set of typical search queries +Enables you to evaluate the quality of ranked search results over a set of typical search queries. {ref}/search-rank-eval.html[Endpoint documentation] [source,ts] @@ -805,7 +813,7 @@ client.reindex({ dest, source }) ** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source you are copying from. ** *`conflicts` (Optional, Enum("abort" | "proceed"))*: Set to proceed to continue reindexing even if there are conflicts. ** *`max_docs` (Optional, number)*: The maximum number of documents to reindex. -** *`script` (Optional, { lang, options, source } | { id })*: The script to run to update the document source or metadata when reindexing. +** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when reindexing. ** *`size` (Optional, number)* ** *`refresh` (Optional, boolean)*: If `true`, the request refreshes affected shards to make this operation visible to search. ** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. @@ -821,7 +829,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] === reindex_rethrottle -Changes the number of requests per second for a particular Reindex operation. +Copies documents from a source to a destination. {ref}/docs-reindex.html[Endpoint documentation] [source,ts] @@ -837,7 +845,7 @@ client.reindexRethrottle({ task_id }) [discrete] === render_search_template -Allows to use the Mustache language to pre-render a search definition. +Renders a search template as a search request body. {ref}/render-search-template-api.html[Endpoint documentation] [source,ts] @@ -861,7 +869,7 @@ If no `id` or `` is specified, this parameter is required. [discrete] === scripts_painless_execute -Allows an arbitrary script to be executed and a result to be returned +Runs a script and returns a result. {painless}/painless-execute-api.html[Endpoint documentation] [source,ts] @@ -874,7 +882,7 @@ client.scriptsPainlessExecute({ ... }) * *Request (object):* ** *`context` (Optional, string)*: The context that the script should run in. ** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. -** *`script` (Optional, { lang, options, source })*: The Painless script to execute. +** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to execute. [discrete] === scroll @@ -895,7 +903,9 @@ client.scroll({ scroll_id }) [discrete] === search -Returns results matching a query. +Returns search hits that match the query defined in the request. +You can provide search queries using the `q` query string parameter or the request body. +If both are specified, only the query parameter is used. {ref}/search-search.html[Endpoint documentation] [source,ts] @@ -928,12 +938,12 @@ The request returns doc values for field names matching these patterns in the `h ** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. ** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. ** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`retriever` (Optional, { standard, knn, rrf })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. @@ -1087,7 +1097,7 @@ don’t include the aggs layer. each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. @@ -1130,7 +1140,7 @@ Random by default. [discrete] === search_template -Allows to use the Mustache language to pre-render a search definition. +Runs a search with a search template. {ref}/search-template.html[Endpoint documentation] [source,ts] @@ -1190,7 +1200,7 @@ client.termsEnum({ index, field }) ** *`size` (Optional, number)*: How many matching terms to return. ** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. ** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. ** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. ** *`search_after` (Optional, string)* @@ -1245,7 +1255,7 @@ client.update({ id, index }) to 'noop' if no change to the document occurred. ** *`doc` (Optional, object)*: A partial update to an existing document. ** *`doc_as_upsert` (Optional, boolean)*: Set to true to use the contents of 'doc' as the value of 'upsert' -** *`script` (Optional, { lang, options, source } | { id })*: Script to execute to update the document. +** *`script` (Optional, { source, id, params, lang, options })*: Script to execute to update the document. ** *`scripted_upsert` (Optional, boolean)*: Set to true to execute the script whether or not the document exists. ** *`_source` (Optional, boolean | { excludes, includes })*: Set to false to disable source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. @@ -1271,9 +1281,8 @@ Set to 'all' or any positive integer up to the total number of shards in the ind [discrete] === update_by_query -Updates documents that match the specified query. If no query is specified, - performs an update on every document in the index without changing the source, -for example to pick up a mapping change. +Updates documents that match the specified query. +If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. {ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] @@ -1288,8 +1297,8 @@ client.updateByQuery({ index }) Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. -** *`script` (Optional, { lang, options, source } | { id })*: The script to run to update the document source or metadata when updating. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. +** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when updating. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. ** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. @@ -1356,7 +1365,10 @@ client.updateByQueryRethrottle({ task_id }) === async_search [discrete] ==== delete -Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. +Deletes an async search by identifier. +If the search is still running, the search request will be cancelled. +Otherwise, the saved search results are deleted. +If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. {ref}/async-search.html[Endpoint documentation] [source,ts] @@ -1372,7 +1384,8 @@ client.asyncSearch.delete({ id }) [discrete] ==== get -Retrieves the results of a previously submitted async search request given its ID. +Retrieves the results of a previously submitted async search request given its identifier. +If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. {ref}/async-search.html[Endpoint documentation] [source,ts] @@ -1397,7 +1410,9 @@ By default no timeout is set meaning that the currently available results will b [discrete] ==== status -Retrieves the status of a previously submitted async search request given its ID. +Get async search status +Retrieves the status of a previously submitted async search request given its identifier, without retrieving search results. +If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. {ref}/async-search.html[Endpoint documentation] [source,ts] @@ -1413,7 +1428,11 @@ client.asyncSearch.status({ id }) [discrete] ==== submit -Executes a search request asynchronously. +Runs a search request asynchronously. +When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field, hence partial results become available following the sort criteria that was requested. +Warning: Async search does not support scroll nor search requests that only include the suggest section. +By default, Elasticsearch doesn’t allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. +The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. {ref}/async-search.html[Endpoint documentation] [source,ts] @@ -1444,9 +1463,9 @@ names matching these patterns in the hits.fields property of the response. ** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -1517,11 +1536,75 @@ A partial reduction is performed every time the coordinating node has received a ** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field ** *`q` (Optional, string)*: Query in the Lucene query string syntax +[discrete] +=== autoscaling +[discrete] +==== delete_autoscaling_policy +Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +{ref}/autoscaling-delete-autoscaling-policy.html[Endpoint documentation] +[source,ts] +---- +client.autoscaling.deleteAutoscalingPolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: the name of the autoscaling policy + +[discrete] +==== get_autoscaling_capacity +Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +{ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] +[source,ts] +---- +client.autoscaling.getAutoscalingCapacity() +---- + + +[discrete] +==== get_autoscaling_policy +Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +{ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] +[source,ts] +---- +client.autoscaling.getAutoscalingPolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: the name of the autoscaling policy + +[discrete] +==== put_autoscaling_policy +Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +{ref}/autoscaling-put-autoscaling-policy.html[Endpoint documentation] +[source,ts] +---- +client.autoscaling.putAutoscalingPolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: the name of the autoscaling policy +** *`policy` (Optional, { roles, deciders })* + [discrete] === cat [discrete] ==== aliases -Shows information about currently configured aliases to indices including filter and routing infos. +Retrieves the cluster’s index aliases, including filter and routing information. +The API does not return data stream aliases. +IMPORTANT: cat APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. {ref}/cat-alias.html[Endpoint documentation] [source,ts] @@ -1538,7 +1621,8 @@ client.cat.aliases({ ... }) [discrete] ==== allocation -Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. +Provides a snapshot of the number of shards allocated to each data node and their disk space. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. {ref}/cat-allocation.html[Endpoint documentation] [source,ts] @@ -1555,7 +1639,10 @@ client.cat.allocation({ ... }) [discrete] ==== component_templates -Returns information about existing component_templates templates. +Returns information about component templates in a cluster. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. {ref}/cat-component-templates.html[Endpoint documentation] [source,ts] @@ -1571,7 +1658,10 @@ client.cat.componentTemplates({ ... }) [discrete] ==== count -Provides quick access to the document count of the entire cluster, or individual indices. +Provides quick access to a document count for a data stream, an index, or an entire cluster. +NOTE: The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. {ref}/cat-count.html[Endpoint documentation] [source,ts] @@ -1588,7 +1678,9 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== fielddata -Shows how much heap memory is currently being used by fielddata on every data node in the cluster. +Returns the amount of heap memory currently used by the field data cache on every data node in the cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the nodes stats API. {ref}/cat-fielddata.html[Endpoint documentation] [source,ts] @@ -1606,7 +1698,16 @@ To retrieve all fields, omit this parameter. [discrete] ==== health -Returns a concise representation of the cluster health. +Returns the health status of a cluster, similar to the cluster health API. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the cluster health API. +This API is often used to check malfunctioning clusters. +To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: +`HH:MM:SS`, which is human-readable but includes no date information; +`Unix epoch time`, which is machine-sortable and includes date information. +The latter format is useful for cluster recoveries that take multiple days. +You can use the cat health API to verify cluster health across multiple nodes. +You also can use the API to track the recovery of a large cluster over a longer period of time. {ref}/cat-health.html[Endpoint documentation] [source,ts] @@ -1634,7 +1735,12 @@ client.cat.help() [discrete] ==== indices -Returns information about indices: number of primaries and replicas, document counts, disk size, ... +Returns high-level information about indices in a cluster, including backing indices for data streams. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get index API. +Use the cat indices API to get the following information for each index in a cluster: shard count; document count; deleted document count; primary store size; total store size of all shards, including shard replicas. +These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. {ref}/cat-indices.html[Endpoint documentation] [source,ts] @@ -1657,7 +1763,8 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== master -Returns information about the master node. +Returns information about the master node, including the ID, bound IP address, and name. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-master.html[Endpoint documentation] [source,ts] @@ -1668,7 +1775,11 @@ client.cat.master() [discrete] ==== ml_data_frame_analytics -Gets configuration and usage information about data frame analytics jobs. +Returns configuration and usage information about data frame analytics jobs. + +IMPORTANT: cat APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get data frame analytics jobs statistics API. {ref}/cat-dfanalytics.html[Endpoint documentation] [source,ts] @@ -1690,7 +1801,14 @@ response. [discrete] ==== ml_datafeeds -Gets configuration and usage information about datafeeds. +Returns configuration and usage information about datafeeds. +This API returns a maximum of 10,000 datafeeds. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` +cluster privileges to use this API. + +IMPORTANT: cat APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get datafeed statistics API. {ref}/cat-datafeeds.html[Endpoint documentation] [source,ts] @@ -1718,7 +1836,14 @@ partial matches. [discrete] ==== ml_jobs -Gets configuration and usage information about anomaly detection jobs. +Returns configuration and usage information for anomaly detection jobs. +This API returns a maximum of 10,000 jobs. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. + +IMPORTANT: cat APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get anomaly detection job statistics API. {ref}/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] @@ -1747,7 +1872,11 @@ matches. [discrete] ==== ml_trained_models -Gets configuration and usage information about inference trained models. +Returns configuration and usage information about inference trained models. + +IMPORTANT: cat APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get trained models statistics API. {ref}/cat-trained-model.html[Endpoint documentation] [source,ts] @@ -1772,6 +1901,7 @@ If `false`, the API returns a 404 status code when there are no matches or only [discrete] ==== nodeattrs Returns information about custom node attributes. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-nodeattrs.html[Endpoint documentation] [source,ts] @@ -1782,7 +1912,8 @@ client.cat.nodeattrs() [discrete] ==== nodes -Returns basic statistics about performance of cluster nodes. +Returns information about the nodes in a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-nodes.html[Endpoint documentation] [source,ts] @@ -1800,7 +1931,8 @@ client.cat.nodes({ ... }) [discrete] ==== pending_tasks -Returns a concise representation of the cluster pending tasks. +Returns cluster-level changes that have not yet been executed. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. {ref}/cat-pending-tasks.html[Endpoint documentation] [source,ts] @@ -1811,7 +1943,8 @@ client.cat.pendingTasks() [discrete] ==== plugins -Returns information about installed plugins across nodes node. +Returns a list of plugins running on each node of a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-plugins.html[Endpoint documentation] [source,ts] @@ -1822,7 +1955,10 @@ client.cat.plugins() [discrete] ==== recovery -Returns information about index shard recoveries, both on-going completed. +Returns information about ongoing and completed shard recoveries. +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. +For data streams, the API returns information about the stream’s backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. {ref}/cat-recovery.html[Endpoint documentation] [source,ts] @@ -1842,7 +1978,8 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== repositories -Returns information about snapshot repositories registered in the cluster. +Returns the snapshot repositories for a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. {ref}/cat-repositories.html[Endpoint documentation] [source,ts] @@ -1853,7 +1990,9 @@ client.cat.repositories() [discrete] ==== segments -Provides low-level information about the segments in the shards of an index. +Returns low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. {ref}/cat-segments.html[Endpoint documentation] [source,ts] @@ -1872,7 +2011,9 @@ To target all data streams and indices, omit this parameter or use `*` or `_all` [discrete] ==== shards -Provides a detailed view of shard allocation on nodes. +Returns information about the shards in a cluster. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. {ref}/cat-shards.html[Endpoint documentation] [source,ts] @@ -1891,7 +2032,9 @@ To target all data streams and indices, omit this parameter or use `*` or `_all` [discrete] ==== snapshots -Returns all snapshots in a specific repository. +Returns information about the snapshots stored in one or more repositories. +A snapshot is a backup of an index or running Elasticsearch cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. {ref}/cat-snapshots.html[Endpoint documentation] [source,ts] @@ -1911,7 +2054,8 @@ If any repository fails during the request, Elasticsearch returns an error. [discrete] ==== tasks -Returns information about the tasks currently executing on one or more nodes in the cluster. +Returns information about tasks currently executing in the cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -1930,7 +2074,9 @@ client.cat.tasks({ ... }) [discrete] ==== templates -Returns information about existing templates. +Returns information about index templates in a cluster. +You can use index templates to apply index settings and field mappings to new indices at creation. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. {ref}/cat-templates.html[Endpoint documentation] [source,ts] @@ -1947,8 +2093,9 @@ Accepts wildcard expressions. If omitted, all templates are returned. [discrete] ==== thread_pool -Returns cluster-wide thread pool statistics per node. -By default the active, queue and rejected statistics are returned for all thread pools. +Returns thread pool statistics for each node in a cluster. +Returned information includes all built-in thread pools and custom thread pools. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-thread-pool.html[Endpoint documentation] [source,ts] @@ -1966,7 +2113,11 @@ Accepts wildcard expressions. [discrete] ==== transforms -Gets configuration and usage information about transforms. +Returns configuration and usage information about transforms. + +IMPORTANT: cat APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get transform statistics API. {ref}/cat-transforms.html[Endpoint documentation] [source,ts] @@ -2261,7 +2412,8 @@ client.cluster.allocationExplain({ ... }) [discrete] ==== delete_component_template -Deletes a component template +Deletes component templates. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. {ref}/indices-component-template.html[Endpoint documentation] [source,ts] @@ -2324,7 +2476,7 @@ Defaults to false, which means information is retrieved from the master node. [discrete] ==== get_component_template -Returns one or more component templates +Retrieves information about component templates. {ref}/indices-component-template.html[Endpoint documentation] [source,ts] @@ -2347,7 +2499,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_settings -Returns cluster settings. +Returns cluster-wide settings. +By default, it returns only settings that have been explicitly defined. {ref}/cluster-get-settings.html[Endpoint documentation] [source,ts] @@ -2368,7 +2521,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== health -Returns basic information about the health of the cluster. +The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. +The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. {ref}/cluster-health.html[Endpoint documentation] [source,ts] @@ -2411,8 +2565,10 @@ client.cluster.info({ target }) [discrete] ==== pending_tasks -Returns a list of any cluster-level changes (e.g. create index, update mapping, -allocate or fail shard) which have not yet been executed. +Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. +NOTE: This API returns a list of any pending updates to the cluster state. +These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. {ref}/cluster-pending.html[Endpoint documentation] [source,ts] @@ -2454,7 +2610,21 @@ is satisfied, the request fails and returns an error. [discrete] ==== put_component_template -Creates or updates a component template +Creates or updates a component template. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +An index template can be composed of multiple component templates. +To use a component template, specify it in an index template’s `composed_of` list. +Component templates are only applied to new data streams and indices as part of a matching index template. + +Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. + +Component templates are only used during index creation. +For data streams, this includes data stream creation and the creation of a stream’s backing indices. +Changes to component templates do not affect existing indices, including a stream’s backing indices. + +You can use C-style `/* *\/` block comments in component templates. +You can include comments anywhere in the request body except before the opening curly bracket. {ref}/indices-component-template.html[Endpoint documentation] [source,ts] @@ -2507,7 +2677,9 @@ client.cluster.putSettings({ ... }) [discrete] ==== remote_info -Returns the information about configured remote clusters. +The cluster remote info API allows you to retrieve all of the configured +remote cluster information. It returns connection and endpoint information +keyed by the configured remote cluster alias. {ref}/cluster-remote-info.html[Endpoint documentation] [source,ts] @@ -2565,7 +2737,8 @@ client.cluster.state({ ... }) [discrete] ==== stats -Returns high-level overview of cluster statistics. +Returns cluster statistics. +It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). {ref}/cluster-stats.html[Endpoint documentation] [source,ts] @@ -2671,7 +2844,7 @@ client.enrich.executePolicy({ name }) [discrete] ==== get_policy -Gets information about an enrich policy. +Returns information about an enrich policy. {ref}/get-enrich-policy-api.html[Endpoint documentation] [source,ts] @@ -2688,7 +2861,7 @@ To return information for all enrich policies, omit this parameter. [discrete] ==== put_policy -Creates a new enrich policy. +Creates an enrich policy. {ref}/put-enrich-policy-api.html[Endpoint documentation] [source,ts] @@ -2707,7 +2880,7 @@ client.enrich.putPolicy({ name }) [discrete] ==== stats -Gets enrich coordinator statistics and information about enrich policies that are currently executing. +Returns enrich coordinator statistics and information about enrich policies that are currently executing. {ref}/enrich-stats-api.html[Endpoint documentation] [source,ts] @@ -2720,7 +2893,8 @@ client.enrich.stats() === eql [discrete] ==== delete -Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. +Deletes an async EQL search or a stored synchronous EQL search. +The API also deletes results for the search. {ref}/eql-search-api.html[Endpoint documentation] [source,ts] @@ -2738,7 +2912,7 @@ A search ID is also provided if the request’s `keep_on_completion` parameter i [discrete] ==== get -Returns async results from previously executed Event Query Language (EQL) search +Returns the current status and available results for an async EQL search or a stored synchronous EQL search. {ref}/get-async-eql-search-api.html[Endpoint documentation] [source,ts] @@ -2758,7 +2932,7 @@ Defaults to no timeout, meaning the request waits for complete search results. [discrete] ==== get_status -Returns the status of a previously submitted async or stored Event Query Language (EQL) search +Returns the current status for an async EQL search or a stored synchronous EQL search without returning results. {ref}/get-async-eql-status-api.html[Endpoint documentation] [source,ts] @@ -2793,7 +2967,7 @@ client.eql.search({ index, query }) ** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order ** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" ** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* @@ -2807,9 +2981,31 @@ client.eql.search({ index, query }) [discrete] === esql +[discrete] +==== async_query +Executes an ESQL request asynchronously + +{ref}/esql-async-query-api.html[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQuery() +---- + + +[discrete] +==== async_query_get +Retrieves the results of a previously submitted async query request given its ID. + +{ref}/esql-async-query-get-api.html[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQueryGet() +---- + + [discrete] ==== query -Executes an ESQL request +Executes an ES|QL request {ref}/esql-rest.html[Endpoint documentation] [source,ts] @@ -2823,11 +3019,19 @@ client.esql.query({ query }) * *Request (object):* ** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. ** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. ** *`locale` (Optional, string)* -** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`params` (Optional, number | number | string | boolean | null | User-defined value[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. ** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml. ** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. +** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? +Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. [discrete] === features @@ -2881,7 +3085,9 @@ will cause Elasticsearch to immediately return the current global checkpoints. [discrete] ==== msearch -Multi Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. +Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request. +The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it +supports the wait_for_checkpoints parameter. [source,ts] ---- client.fleet.msearch({ ... }) @@ -2913,7 +3119,8 @@ which is true by default. [discrete] ==== search -Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. +The purpose of the fleet search api is to provide a search api where the search will only be executed +after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. [source,ts] ---- client.fleet.search({ index }) @@ -2941,9 +3148,9 @@ Defaults to 10,000 hits. names matching these patterns in the hits.fields property of the response. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* ** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* @@ -3017,7 +3224,7 @@ which is true by default. === graph [discrete] ==== explore -Explore extracted and summarized information about the documents and terms in an index. +Extracts and summarizes information about the documents and terms in an Elasticsearch data stream or index. {ref}/graph-explore-api.html[Endpoint documentation] [source,ts] @@ -3032,7 +3239,7 @@ client.graph.explore({ index }) ** *`index` (string | string[])*: Name of the index. ** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. ** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. ** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. @@ -3043,7 +3250,7 @@ Defaults to no timeout. === ilm [discrete] ==== delete_lifecycle -Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. +Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. {ref}/ilm-delete-lifecycle.html[Endpoint documentation] [source,ts] @@ -3061,7 +3268,7 @@ client.ilm.deleteLifecycle({ policy }) [discrete] ==== explain_lifecycle -Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. +Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. {ref}/ilm-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -3082,7 +3289,7 @@ To target all data streams and indices, use `*` or `_all`. [discrete] ==== get_lifecycle -Returns the specified policy definition. Includes the policy version and last modified date. +Retrieves a lifecycle policy. {ref}/ilm-get-lifecycle.html[Endpoint documentation] [source,ts] @@ -3111,7 +3318,9 @@ client.ilm.getStatus() [discrete] ==== migrate_to_data_tiers -Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing +Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and +attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ +Using node roles enables ILM to automatically move the indices between data tiers. {ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] [source,ts] @@ -3148,7 +3357,7 @@ client.ilm.moveToStep({ index }) [discrete] ==== put_lifecycle -Creates a lifecycle policy +Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. {ref}/ilm-put-lifecycle.html[Endpoint documentation] [source,ts] @@ -3256,7 +3465,7 @@ client.indices.addBlock({ index, block }) [discrete] ==== analyze -Performs the analysis process on a text and return the tokens breakdown of the text. +Performs analysis on a text string and returns the resulting tokens. {ref}/indices-analyze.html[Endpoint documentation] [source,ts] @@ -3287,7 +3496,8 @@ If an array of strings is provided, it is analyzed as a multi-value field. [discrete] ==== clear_cache -Clears all or specific caches for one or more indices. +Clears the caches of one or more indices. +For data streams, the API clears the caches of the stream’s backing indices. {ref}/indices-clearcache.html[Endpoint documentation] [source,ts] @@ -3317,7 +3527,7 @@ Use the `fields` parameter to clear the cache of specific fields only. [discrete] ==== clone -Clones an index +Clones an existing index. {ref}/indices-clone-index.html[Endpoint documentation] [source,ts] @@ -3371,7 +3581,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== create -Creates an index with optional settings and mappings. +Creates a new index. {ref}/indices-create-index.html[Endpoint documentation] [source,ts] @@ -3399,7 +3609,9 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== create_data_stream -Creates a data stream +Create a data stream. +Creates a data stream. +You must have a matching index template with data stream enabled. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -3420,7 +3632,8 @@ Cannot be longer than 255 bytes. Multi-byte characters count towards this limit [discrete] ==== data_streams_stats -Provides statistics on operations happening in a data stream. +Get data stream stats. +Retrieves statistics for one or more data streams. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -3440,7 +3653,7 @@ Supports a list of values, such as `open,hidden`. [discrete] ==== delete -Deletes an index. +Deletes one or more indices. {ref}/indices-delete-index.html[Endpoint documentation] [source,ts] @@ -3470,7 +3683,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_alias -Deletes an alias. +Removes a data stream or index from an alias. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] @@ -3493,7 +3706,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_data_lifecycle -Deletes the data stream lifecycle of the selected data streams. +Delete data stream lifecycles. +Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. {ref}/data-streams-delete-lifecycle.html[Endpoint documentation] [source,ts] @@ -3512,7 +3726,8 @@ client.indices.deleteDataLifecycle({ name }) [discrete] ==== delete_data_stream -Deletes a data stream. +Delete data streams. +Deletes one or more data streams and their backing indices. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -3529,7 +3744,10 @@ client.indices.deleteDataStream({ name }) [discrete] ==== delete_index_template -Deletes an index template. +Delete an index template. +The provided may contain multiple template names separated by a comma. If multiple template +names are specified then there is no wildcard support and the provided names should match completely with +existing templates. {ref}/indices-delete-template.html[Endpoint documentation] [source,ts] @@ -3547,7 +3765,7 @@ client.indices.deleteIndexTemplate({ name }) [discrete] ==== delete_template -Deletes an index template. +Deletes a legacy index template. {ref}/indices-delete-template-v1.html[Endpoint documentation] [source,ts] @@ -3568,7 +3786,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== disk_usage -Analyzes the disk usage of each field of an index or data stream +Analyzes the disk usage of each field of an index or data stream. {ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] @@ -3596,7 +3814,7 @@ To use the API, this parameter must be set to `true`. [discrete] ==== downsample -Downsample an index +Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. {ref}/indices-downsample-data-stream.html[Endpoint documentation] [source,ts] @@ -3614,7 +3832,7 @@ client.indices.downsample({ index, target_index }) [discrete] ==== exists -Returns information about whether a particular index exists. +Checks if a data stream, index, or alias exists. {ref}/indices-exists.html[Endpoint documentation] [source,ts] @@ -3640,7 +3858,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== exists_alias -Returns information about whether a particular alias exists. +Checks if an alias exists. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] @@ -3683,6 +3901,7 @@ client.indices.existsIndexTemplate({ name }) [discrete] ==== exists_template +Check existence of index templates. Returns information about whether a particular index template exists. {ref}/indices-template-exists-v1.html[Endpoint documentation] @@ -3720,7 +3939,7 @@ client.indices.explainDataLifecycle({ index }) [discrete] ==== field_usage_stats -Returns the field usage stats for each field of an index +Returns field usage information for each shard and field of an index. {ref}/field-usage-stats.html[Endpoint documentation] [source,ts] @@ -3750,7 +3969,7 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== flush -Performs the flush operation on one or more indices. +Flushes one or more data streams or indices. {ref}/indices-flush.html[Endpoint documentation] [source,ts] @@ -3801,7 +4020,8 @@ client.indices.forcemerge({ ... }) [discrete] ==== get -Returns information about one or more indices. +Returns information about one or more indices. For data streams, the API returns information about the +stream’s backing indices. {ref}/indices-get-index.html[Endpoint documentation] [source,ts] @@ -3830,7 +4050,7 @@ such as open,hidden. [discrete] ==== get_alias -Returns an alias. +Retrieves information for one or more aliases. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] @@ -3859,7 +4079,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== get_data_lifecycle -Returns the data stream lifecycle of the selected data streams. +Get data stream lifecycles. +Retrieves the data stream lifecycle configuration of one or more data streams. {ref}/data-streams-get-lifecycle.html[Endpoint documentation] [source,ts] @@ -3881,7 +4102,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== get_data_stream -Returns data streams. +Get data streams. +Retrieves information about one or more data streams. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -3901,7 +4123,8 @@ Supports a list of values, such as `open,hidden`. [discrete] ==== get_field_mapping -Returns mapping for one or more fields. +Retrieves mapping definitions for one or more fields. +For data streams, the API retrieves field mappings for the stream’s backing indices. {ref}/indices-get-field-mapping.html[Endpoint documentation] [source,ts] @@ -3929,7 +4152,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== get_index_template -Returns an index template. +Get index templates. +Returns information about one or more index templates. {ref}/indices-get-template.html[Endpoint documentation] [source,ts] @@ -3949,7 +4173,8 @@ client.indices.getIndexTemplate({ ... }) [discrete] ==== get_mapping -Returns mappings for one or more indices. +Retrieves mapping definitions for one or more indices. +For data streams, the API retrieves mappings for the stream’s backing indices. {ref}/indices-get-mapping.html[Endpoint documentation] [source,ts] @@ -3977,7 +4202,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_settings -Returns settings for one or more indices. +Returns setting information for one or more indices. For data streams, +returns setting information for the stream’s backing indices. {ref}/indices-get-settings.html[Endpoint documentation] [source,ts] @@ -4012,7 +4238,8 @@ error. [discrete] ==== get_template -Returns an index template. +Get index templates. +Retrieves information about one or more index templates. {ref}/indices-get-template-v1.html[Endpoint documentation] [source,ts] @@ -4034,7 +4261,17 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== migrate_to_data_stream -Migrates an alias to a data stream +Convert an index alias to a data stream. +Converts an index alias to a data stream. +You must have a matching index template that is data stream enabled. +The alias must meet the following criteria: +The alias must have a write index; +All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; +The alias must not have any filters; +The alias must not use custom routing. +If successful, the request removes the alias and creates a data stream with the same name. +The indices for the alias become hidden backing indices for the stream. +The write index for the alias becomes the write index for the stream. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -4050,7 +4287,8 @@ client.indices.migrateToDataStream({ name }) [discrete] ==== modify_data_stream -Modifies a data stream +Update data streams. +Performs one or more data stream modification actions in a single atomic operation. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -4066,7 +4304,8 @@ client.indices.modifyDataStream({ actions }) [discrete] ==== open -Opens an index. +Opens a closed index. +For data streams, the API opens any closed backing indices. {ref}/indices-open-close.html[Endpoint documentation] [source,ts] @@ -4115,7 +4354,7 @@ client.indices.promoteDataStream({ name }) [discrete] ==== put_alias -Creates or updates an alias. +Adds a data stream or index to an alias. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] @@ -4133,7 +4372,7 @@ Wildcard patterns that match both data streams and indices return an error. ** *`name` (string)*: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. ** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. @@ -4153,7 +4392,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_data_lifecycle -Updates the data stream lifecycle of the selected data streams. +Update data stream lifecycles. +Update the data stream lifecycle of the specified data streams. {ref}/data-streams-put-lifecycle.html[Endpoint documentation] [source,ts] @@ -4184,7 +4424,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_index_template -Creates or updates an index template. +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. {ref}/indices-put-template.html[Endpoint documentation] [source,ts] @@ -4228,7 +4469,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_mapping -Updates the index mappings. +Adds new fields to an existing data stream or index. +You can also use this API to change the search settings of existing fields. +For data streams, these changes are applied to all backing indices by default. {ref}/indices-put-mapping.html[Endpoint documentation] [source,ts] @@ -4275,7 +4518,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_settings -Updates the index settings. +Changes a dynamic index setting in real time. For data streams, index setting +changes are applied to all backing indices by default. {ref}/indices-update-settings.html[Endpoint documentation] [source,ts] @@ -4311,7 +4555,8 @@ error. [discrete] ==== put_template -Creates or updates an index template. +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. {ref}/indices-templates-v1.html[Endpoint documentation] [source,ts] @@ -4343,7 +4588,8 @@ received before the timeout expires, the request fails and returns an error. [discrete] ==== recovery -Returns information about ongoing index shard recoveries. +Returns information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream’s backing indices. {ref}/indices-recovery.html[Endpoint documentation] [source,ts] @@ -4363,7 +4609,8 @@ To target all data streams and indices, omit this parameter or use `*` or `_all` [discrete] ==== refresh -Performs the refresh operation in one or more indices. +A refresh makes recent operations performed on one or more indices available for search. +For data streams, the API runs the refresh operation on the stream’s backing indices. {ref}/indices-refresh.html[Endpoint documentation] [source,ts] @@ -4407,7 +4654,9 @@ client.indices.reloadSearchAnalyzers({ index }) [discrete] ==== resolve_cluster -Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. +Resolves the specified index expressions to return information about each cluster, including +the local cluster, if included. +Multiple patterns and remote clusters are supported. {ref}/indices-resolve-cluster-api.html[Endpoint documentation] [source,ts] @@ -4433,7 +4682,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== resolve_index -Returns information about any matching indices, aliases, and data streams +Resolves the specified name(s) and/or index patterns for indices, aliases, and data streams. +Multiple patterns and remote clusters are supported. {ref}/indices-resolve-index-api.html[Endpoint documentation] [source,ts] @@ -4454,8 +4704,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== rollover -Updates an alias to point to a new index when the existing index -is considered to be too large or too old. +Creates a new index for a data stream or index alias. {ref}/indices-rollover-index.html[Endpoint documentation] [source,ts] @@ -4492,7 +4741,8 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== segments -Provides low-level information about segments in a Lucene index. +Returns low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream’s backing indices. {ref}/indices-segments.html[Endpoint documentation] [source,ts] @@ -4518,7 +4768,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== shard_stores -Provides store information for shard copies of indices. +Retrieves store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream’s backing indices. {ref}/indices-shards-stores.html[Endpoint documentation] [source,ts] @@ -4541,7 +4792,7 @@ this argument determines whether wildcard expressions match hidden data streams. [discrete] ==== shrink -Allow to shrink an existing index into a new index with fewer primary shards. +Shrinks an existing index into a new index with fewer primary shards. {ref}/indices-shrink-index.html[Endpoint documentation] [source,ts] @@ -4567,7 +4818,8 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== simulate_index_template -Simulate matching the given index name against the index templates in the system +Simulate an index. +Returns the index configuration that would be applied to the specified index from an existing index template. {ref}/indices-simulate-index.html[Endpoint documentation] [source,ts] @@ -4585,7 +4837,8 @@ client.indices.simulateIndexTemplate({ name }) [discrete] ==== simulate_template -Simulate resolving the given template name or body +Simulate an index template. +Returns the index configuration that would be applied by a particular index template. {ref}/indices-simulate-template.html[Endpoint documentation] [source,ts] @@ -4629,7 +4882,7 @@ that uses deprecated components, Elasticsearch will emit a deprecation warning. [discrete] ==== split -Allows you to split an existing index into a new index with more primary shards. +Splits an existing index into a new index with more primary shards. {ref}/indices-split-index.html[Endpoint documentation] [source,ts] @@ -4654,7 +4907,8 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== stats -Provides statistics on operations happening in an index. +Returns statistics for one or more indices. +For data streams, the API retrieves statistics for the stream’s backing indices. {ref}/indices-stats.html[Endpoint documentation] [source,ts] @@ -4682,7 +4936,7 @@ such as `open,hidden`. [discrete] ==== unfreeze -Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. +Unfreezes an index. {ref}/unfreeze-index-api.html[Endpoint documentation] [source,ts] @@ -4711,7 +4965,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== update_aliases -Updates index aliases. +Adds a data stream or index to an alias. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] @@ -4731,7 +4985,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== validate_query -Allows a user to validate a potentially expensive query without executing it. +Validates a potentially expensive query without executing it. {ref}/search-validate.html[Endpoint documentation] [source,ts] @@ -4746,7 +5000,7 @@ client.indices.validateQuery({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. @@ -4806,7 +5060,7 @@ client.inference.get({ ... }) [discrete] ==== inference -Perform inference +Perform inference on the service {ref}/post-inference-api.html[Endpoint documentation] [source,ts] @@ -4829,7 +5083,7 @@ Not required for other tasks. [discrete] ==== put -Configure an inference endpoint for use in the Inference API +Create an inference endpoint {ref}/put-inference-api.html[Endpoint documentation] [source,ts] @@ -4847,9 +5101,18 @@ client.inference.put({ inference_id }) [discrete] === ingest +[discrete] +==== delete_geoip_database +Deletes a geoip database configuration +[source,ts] +---- +client.ingest.deleteGeoipDatabase() +---- + + [discrete] ==== delete_pipeline -Deletes a pipeline. +Deletes one or more existing ingest pipeline. {ref}/delete-pipeline-api.html[Endpoint documentation] [source,ts] @@ -4870,7 +5133,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== geo_ip_stats -Returns statistical information about geoip databases +Gets download statistics for GeoIP2 databases used with the geoip processor. {ref}/geoip-processor.html[Endpoint documentation] [source,ts] @@ -4879,9 +5142,19 @@ client.ingest.geoIpStats() ---- +[discrete] +==== get_geoip_database +Returns geoip database configuration. +[source,ts] +---- +client.ingest.getGeoipDatabase() +---- + + [discrete] ==== get_pipeline -Returns a pipeline. +Returns information about one or more ingest pipelines. +This API returns a local reference of the pipeline. {ref}/get-pipeline-api.html[Endpoint documentation] [source,ts] @@ -4902,7 +5175,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== processor_grok -Returns a list of the built-in patterns. +Extracts structured fields out of a single text field within a document. +You choose which field to extract matched fields from, as well as the grok pattern you expect will match. +A grok pattern is like a regular expression that supports aliased expressions that can be reused. {ref}/grok-processor.html[Endpoint documentation] [source,ts] @@ -4911,9 +5186,19 @@ client.ingest.processorGrok() ---- +[discrete] +==== put_geoip_database +Puts the configuration for a geoip database to be downloaded +[source,ts] +---- +client.ingest.putGeoipDatabase() +---- + + [discrete] ==== put_pipeline -Creates or updates a pipeline. +Creates or updates an ingest pipeline. +Changes made using this API take effect immediately. {ref}/ingest.html[Endpoint documentation] [source,ts] @@ -4928,8 +5213,8 @@ client.ingest.putPipeline({ id }) ** *`id` (string)*: ID of the ingest pipeline to create or update. ** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. ** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, reroute, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { attachment, append, csv, convert, date, date_index_name, dot_expander, enrich, fail, foreach, json, user_agent, kv, geoip, grok, gsub, join, lowercase, remove, rename, reroute, script, set, sort, split, trim, uppercase, urldecode, bytes, dissect, set_security_user, pipeline, drop, circle, inference }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. ** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -4937,7 +5222,7 @@ client.ingest.putPipeline({ id }) [discrete] ==== simulate -Allows to simulate a pipeline with example documents. +Executes an ingest pipeline against a set of provided documents. {ref}/simulate-pipeline-api.html[Endpoint documentation] [source,ts] @@ -4972,7 +5257,8 @@ client.license.delete() [discrete] ==== get -Retrieves licensing information for the cluster +This API returns information about the type of license, when it was issued, and when it expires, for example. +For more information about the different types of licenses, see https://www.elastic.co/subscriptions. {ref}/get-license.html[Endpoint documentation] [source,ts] @@ -5030,7 +5316,8 @@ client.license.post({ ... }) [discrete] ==== post_start_basic -Starts an indefinite basic license. +The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. +To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). {ref}/start-basic.html[Endpoint documentation] [source,ts] @@ -5046,7 +5333,7 @@ client.license.postStartBasic({ ... }) [discrete] ==== post_start_trial -starts a limited time trial license. +The start trial API enables you to start a 30-day trial, which gives access to all subscription features. {ref}/start-trial.html[Endpoint documentation] [source,ts] @@ -5065,7 +5352,7 @@ client.license.postStartTrial({ ... }) === logstash [discrete] ==== delete_pipeline -Deletes Logstash Pipelines used by Central Management +Deletes a pipeline used for Logstash Central Management. {ref}/logstash-api-delete-pipeline.html[Endpoint documentation] [source,ts] @@ -5081,7 +5368,7 @@ client.logstash.deletePipeline({ id }) [discrete] ==== get_pipeline -Retrieves Logstash Pipelines used by Central Management +Retrieves pipelines used for Logstash Central Management. {ref}/logstash-api-get-pipeline.html[Endpoint documentation] [source,ts] @@ -5097,7 +5384,7 @@ client.logstash.getPipeline({ ... }) [discrete] ==== put_pipeline -Adds and updates Logstash Pipelines used for Central Management +Creates or updates a pipeline used for Logstash Central Management. {ref}/logstash-api-put-pipeline.html[Endpoint documentation] [source,ts] @@ -5156,7 +5443,10 @@ client.migration.postFeatureUpgrade() === ml [discrete] ==== clear_trained_model_deployment_cache -Clear the cached results from a trained model deployment +Clears a trained model deployment cache on all nodes where the trained model is assigned. +A trained model deployment may have an inference cache enabled. +As requests are handled by each allocated node, their responses may be cached on that individual node. +Calling this API clears the caches without restarting the deployment. {ref}/clear-trained-model-deployment-cache.html[Endpoint documentation] [source,ts] @@ -5172,7 +5462,11 @@ client.ml.clearTrainedModelDeploymentCache({ model_id }) [discrete] ==== close_job -Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. +Close anomaly detection jobs. +A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. +If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. +When a datafeed that has a specified end date stops, it automatically closes its associated job. {ref}/ml-close-job.html[Endpoint documentation] [source,ts] @@ -5191,7 +5485,7 @@ client.ml.closeJob({ job_id }) [discrete] ==== delete_calendar -Deletes a calendar. +Removes all scheduled events from a calendar, then deletes it. {ref}/ml-delete-calendar.html[Endpoint documentation] [source,ts] @@ -5243,7 +5537,7 @@ list of jobs or groups. [discrete] ==== delete_data_frame_analytics -Deletes an existing data frame analytics job. +Deletes a data frame analytics job. {ref}/delete-dfanalytics.html[Endpoint documentation] [source,ts] @@ -5283,6 +5577,14 @@ stopping and deleting the datafeed. [discrete] ==== delete_expired_data Deletes expired and unused machine learning data. +Deletes all job results, model snapshots and forecast data that have exceeded +their retention days period. Machine learning state documents that are not +associated with any job are also deleted. +You can limit the request to a single or set of anomaly detection jobs by +using a job identifier, a group name, a comma-separated list of jobs, or a +wildcard expression. You can delete expired data for all anomaly detection +jobs by using _all, by specifying * as the , or by omitting the +. {ref}/ml-delete-expired-data.html[Endpoint documentation] [source,ts] @@ -5303,6 +5605,8 @@ behavior is no throttling. [discrete] ==== delete_filter Deletes a filter. +If an anomaly detection job references the filter, you cannot delete the +filter. You must update or delete the job before you can delete the filter. {ref}/ml-delete-filter.html[Endpoint documentation] [source,ts] @@ -5319,6 +5623,10 @@ client.ml.deleteFilter({ filter_id }) [discrete] ==== delete_forecast Deletes forecasts from a machine learning job. +By default, forecasts are retained for 14 days. You can specify a +different retention period with the `expires_in` parameter in the forecast +jobs API. The delete forecast API enables you to delete one or more +forecasts before they expire. {ref}/ml-delete-forecast.html[Endpoint documentation] [source,ts] @@ -5344,7 +5652,13 @@ error. [discrete] ==== delete_job -Deletes an existing anomaly detection job. +Delete an anomaly detection job. +All job configuration, model state and results are deleted. +It is not currently possible to delete multiple jobs using wildcards or a +comma separated list. If you delete a job that has a datafeed, the request +first tries to delete the datafeed. This behavior is equivalent to calling +the delete datafeed API with the same timeout and force parameters as the +delete job request. {ref}/ml-delete-job.html[Endpoint documentation] [source,ts] @@ -5368,6 +5682,9 @@ job deletion completes. [discrete] ==== delete_model_snapshot Deletes an existing model snapshot. +You cannot delete the active model snapshot. To delete that snapshot, first +revert to a different one. To identify the active model snapshot, refer to +the `model_snapshot_id` in the results from the get jobs API. {ref}/ml-delete-snapshot.html[Endpoint documentation] [source,ts] @@ -5384,7 +5701,8 @@ client.ml.deleteModelSnapshot({ job_id, snapshot_id }) [discrete] ==== delete_trained_model -Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. +Deletes an existing trained inference model that is currently not referenced +by an ingest pipeline. {ref}/delete-trained-models.html[Endpoint documentation] [source,ts] @@ -5401,7 +5719,10 @@ client.ml.deleteTrainedModel({ model_id }) [discrete] ==== delete_trained_model_alias -Deletes a model alias that refers to the trained model +Deletes a trained model alias. +This API deletes an existing model alias that refers to a trained model. If +the model alias is missing or refers to a model other than the one identified +by the `model_id`, this API returns an error. {ref}/delete-trained-models-aliases.html[Endpoint documentation] [source,ts] @@ -5418,7 +5739,9 @@ client.ml.deleteTrainedModelAlias({ model_alias, model_id }) [discrete] ==== estimate_model_memory -Estimates the model memory +Makes an estimation of the memory usage for an anomaly detection job model. +It is based on analysis configuration details for the job and cardinality +estimates for the fields it references. {ref}/ml-apis.html[Endpoint documentation] [source,ts] @@ -5448,6 +5771,10 @@ omitted from the request if no detectors have a `by_field_name`, [discrete] ==== evaluate_data_frame Evaluates the data frame analytics for an annotated index. +The API packages together commonly used evaluation metrics for various types +of machine learning features. This has been designed for use on indexes +created by data frame analytics. Evaluation requires both a ground truth +field and an analytics result field to be present. {ref}/evaluate-dfanalytics.html[Endpoint documentation] [source,ts] @@ -5461,11 +5788,17 @@ client.ml.evaluateDataFrame({ evaluation, index }) * *Request (object):* ** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. ** *`index` (string)*: Defines the `index` in which the evaluation will be performed. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. [discrete] ==== explain_data_frame_analytics Explains a data frame analytics config. +This API provides explanations for a data frame analytics config that either +exists already or one that has not been created yet. The following +explanations are provided: +* which fields are included or not in the analysis and why, +* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. +If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. {ref}/explain-dfanalytics.html[Endpoint documentation] [source,ts] @@ -5508,6 +5841,14 @@ learning node capacity for it to be immediately assigned to a node. [discrete] ==== flush_job Forces any buffered data to be processed by the job. +The flush jobs API is only applicable when sending data for analysis using +the post data API. Depending on the content of the buffer, then it might +additionally calculate new results. Both flush and close operations are +similar, however the flush is more efficient if you are expecting to send +more data for analysis. When flushing, the job remains open and is available +to continue analyzing data. A close operation additionally prunes and +persists the model state to disk and the job must be opened again before +analyzing further data. {ref}/ml-flush-job.html[Endpoint documentation] [source,ts] @@ -5528,7 +5869,12 @@ client.ml.flushJob({ job_id }) [discrete] ==== forecast -Predicts the future behavior of a time series by using its historical behavior. +Predicts the future behavior of a time series by using its historical +behavior. + +Forecasts are not supported for jobs that perform population analysis; an +error occurs if you try to create a forecast for a job that has an +`over_field_name` in its configuration. {ref}/ml-forecast.html[Endpoint documentation] [source,ts] @@ -5549,6 +5895,7 @@ create a forecast; otherwise, an error occurs. [discrete] ==== get_buckets Retrieves anomaly detection job results for one or more buckets. +The API presents a chronological view of the records, grouped by bucket. {ref}/ml-get-bucket.html[Endpoint documentation] [source,ts] @@ -5643,6 +5990,9 @@ This parameter has the `from` and `size` properties. [discrete] ==== get_data_frame_analytics Retrieves configuration information for data frame analytics jobs. +You can get information for multiple data frame analytics jobs in a single +API request by using a comma-separated list of data frame analytics jobs or a +wildcard expression. {ref}/get-dfanalytics.html[Endpoint documentation] [source,ts] @@ -5709,6 +6059,12 @@ there are no matches or only partial matches. [discrete] ==== get_datafeed_stats Retrieves usage information for datafeeds. +You can get statistics for multiple datafeeds in a single API request by +using a comma-separated list of datafeeds or a wildcard expression. You can +get statistics for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. If the datafeed is stopped, the +only information you receive is the `datafeed_id` and the `state`. +This API returns a maximum of 10,000 datafeeds. {ref}/ml-get-datafeed-stats.html[Endpoint documentation] [source,ts] @@ -5737,6 +6093,11 @@ partial matches. If this parameter is `false`, the request returns a [discrete] ==== get_datafeeds Retrieves configuration information for datafeeds. +You can get information for multiple datafeeds in a single API request by +using a comma-separated list of datafeeds or a wildcard expression. You can +get information for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. +This API returns a maximum of 10,000 datafeeds. {ref}/ml-get-datafeed.html[Endpoint documentation] [source,ts] @@ -5768,6 +6129,7 @@ be retrieved and then added to another cluster. [discrete] ==== get_filters Retrieves filters. +You can get a single filter or all filters. {ref}/ml-get-filter.html[Endpoint documentation] [source,ts] @@ -5786,6 +6148,9 @@ client.ml.getFilters({ ... }) [discrete] ==== get_influencers Retrieves anomaly detection job results for one or more influencers. +Influencers are the entities that have contributed to, or are to blame for, +the anomalies. Influencer results are available only if an +`influencer_field_name` is specified in the job configuration. {ref}/ml-get-influencer.html[Endpoint documentation] [source,ts] @@ -5847,6 +6212,10 @@ code when there are no matches or only partial matches. [discrete] ==== get_jobs Retrieves configuration information for anomaly detection jobs. +You can get information for multiple anomaly detection jobs in a single API +request by using a group name, a comma-separated list of jobs, or a wildcard +expression. You can get information for all anomaly detection jobs by using +`_all`, by specifying `*` as the ``, or by omitting the ``. {ref}/ml-get-job.html[Endpoint documentation] [source,ts] @@ -5877,7 +6246,8 @@ be retrieved and then added to another cluster. [discrete] ==== get_memory_stats -Returns information on how ML is using memory. +Get information about how machine learning jobs and trained models are using memory, +on each node, both within the JVM heap, and natively, outside of the JVM. {ref}/get-ml-memory.html[Endpoint documentation] [source,ts] @@ -5900,7 +6270,7 @@ fails and returns an error. [discrete] ==== get_model_snapshot_upgrade_stats -Gets stats for anomaly detection job model snapshot upgrades that are in progress. +Retrieves usage information for anomaly detection job model snapshot upgrades. {ref}/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] [source,ts] @@ -5954,7 +6324,23 @@ by specifying `*` as the snapshot ID, or by omitting the snapshot ID. [discrete] ==== get_overall_buckets -Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. +Retrieves overall bucket results that summarize the bucket results of +multiple anomaly detection jobs. + +The `overall_score` is calculated by combining the scores of all the +buckets within the overall bucket span. First, the maximum +`anomaly_score` per anomaly detection job in the overall bucket is +calculated. Then the `top_n` of those scores are averaged to result in +the `overall_score`. This means that you can fine-tune the +`overall_score` so that it is more or less sensitive to the number of +jobs that detect an anomaly at the same time. For example, if you set +`top_n` to `1`, the `overall_score` is the maximum bucket score in the +overall bucket. Alternatively, if you set `top_n` to the number of jobs, +the `overall_score` is high only when all jobs detect anomalies in that +overall bucket. If you set the `bucket_span` parameter (to a value +greater than its default), the `overall_score` is the maximum +`overall_score` of the overall buckets that have a span equal to the +jobs' largest bucket span. {ref}/ml-get-overall-buckets.html[Endpoint documentation] [source,ts] @@ -5983,6 +6369,16 @@ using `_all` or by specifying `*` as the ``. [discrete] ==== get_records Retrieves anomaly records for an anomaly detection job. +Records contain the detailed analytical results. They describe the anomalous +activity that has been identified in the input data based on the detector +configuration. +There can be many anomaly records depending on the characteristics and size +of the input data. In practice, there are often too many to be able to +manually process them. The machine learning features therefore perform a +sophisticated aggregation of the anomaly records into buckets. +The number of record results depends on the number of anomalies found in each +bucket, which relates to the number of time series being modeled and the +number of detectors. {ref}/ml-get-record.html[Endpoint documentation] [source,ts] @@ -6007,7 +6403,7 @@ client.ml.getRecords({ job_id }) [discrete] ==== get_trained_models -Retrieves configuration information for a trained inference model. +Retrieves configuration information for a trained model. {ref}/get-trained-models.html[Endpoint documentation] [source,ts] @@ -6047,7 +6443,8 @@ tags are returned. [discrete] ==== get_trained_models_stats -Retrieves usage information for trained inference models. +Retrieves usage information for trained models. You can get usage information for multiple trained +models in a single API request by using a comma-separated list of model IDs or a wildcard expression. {ref}/get-trained-models-stats.html[Endpoint documentation] [source,ts] @@ -6074,7 +6471,7 @@ subset of results when there are partial matches. [discrete] ==== infer_trained_model -Evaluate a trained model. +Evaluates a trained model. {ref}/infer-trained-model.html[Endpoint documentation] [source,ts] @@ -6096,6 +6493,12 @@ Currently, for NLP models, only a single value is allowed. [discrete] ==== info Returns defaults and limits used by machine learning. +This endpoint is designed to be used by a user interface that needs to fully +understand machine learning configurations where some options are not +specified, meaning that the defaults should be used. This endpoint may be +used to find out what those defaults are. It also provides information about +the maximum size of machine learning jobs that could run in the current +cluster configuration. {ref}/get-ml-info.html[Endpoint documentation] [source,ts] @@ -6106,7 +6509,14 @@ client.ml.info() [discrete] ==== open_job -Opens one or more anomaly detection jobs. +Open anomaly detection jobs. +An anomaly detection job must be opened in order for it to be ready to +receive and analyze data. It can be opened and closed multiple times +throughout its lifecycle. +When you open a new job, it starts with an empty model. +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once +new data is received. {ref}/ml-open-job.html[Endpoint documentation] [source,ts] @@ -6123,7 +6533,7 @@ client.ml.openJob({ job_id }) [discrete] ==== post_calendar_events -Posts scheduled events in a calendar. +Adds scheduled events to a calendar. {ref}/ml-post-calendar-event.html[Endpoint documentation] [source,ts] @@ -6142,6 +6552,9 @@ client.ml.postCalendarEvents({ calendar_id, events }) ==== post_data Sends data to an anomaly detection job for analysis. +IMPORTANT: For each job, data can be accepted from only a single connection at a time. +It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. + {ref}/ml-post-data.html[Endpoint documentation] [source,ts] ---- @@ -6159,7 +6572,7 @@ client.ml.postData({ job_id }) [discrete] ==== preview_data_frame_analytics -Previews that will be analyzed given a data frame analytics config. +Previews the extracted features used by a data frame analytics config. {ref}/preview-dfanalytics.html[Endpoint documentation] [source,ts] @@ -6179,6 +6592,14 @@ this API. [discrete] ==== preview_datafeed Previews a datafeed. +This API returns the first "page" of search results from a datafeed. +You can preview an existing datafeed or provide configuration details for a datafeed +and anomaly detection job in the API. The preview shows the structure of the data +that will be passed to the anomaly detection engine. +IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that +called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the +datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. +You can also use secondary authorization headers to supply the credentials. {ref}/ml-preview-datafeed.html[Endpoint documentation] [source,ts] @@ -6204,7 +6625,7 @@ used. You cannot specify a `job_config` object unless you also supply a `datafee [discrete] ==== put_calendar -Instantiates a calendar. +Creates a calendar. {ref}/ml-put-calendar.html[Endpoint documentation] [source,ts] @@ -6240,6 +6661,8 @@ client.ml.putCalendarJob({ calendar_id, job_id }) [discrete] ==== put_data_frame_analytics Instantiates a data frame analytics job. +This API creates a data frame analytics job that performs an analysis on the +source indices and stores the outcome in a destination index. {ref}/put-dfanalytics.html[Endpoint documentation] [source,ts] @@ -6311,6 +6734,15 @@ greater than that setting. [discrete] ==== put_datafeed Instantiates a datafeed. +Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +You can associate only one datafeed with each anomaly detection job. +The datafeed contains a query that runs at a defined interval (`frequency`). +If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had +at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. +You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed +directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. {ref}/ml-put-datafeed.html[Endpoint documentation] [source,ts] @@ -6349,7 +6781,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. ** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might @@ -6372,6 +6804,8 @@ whether wildcard expressions match hidden data streams. Supports a list of value [discrete] ==== put_filter Instantiates a filter. +A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. {ref}/ml-put-filter.html[Endpoint documentation] [source,ts] @@ -6390,7 +6824,8 @@ Up to 10000 items are allowed in each filter. [discrete] ==== put_job -Instantiates an anomaly detection job. +Create an anomaly detection job. +If you include a `datafeed_config`, you must have read index privileges on the source index. {ref}/ml-put-job.html[Endpoint documentation] [source,ts] @@ -6421,7 +6856,7 @@ client.ml.putJob({ job_id, analysis_config, data_description }) [discrete] ==== put_trained_model -Creates an inference trained model. +Enables you to supply a trained model that is not created by data frame analytics. {ref}/put-trained-models.html[Endpoint documentation] [source,ts] @@ -6467,7 +6902,22 @@ to complete. [discrete] ==== put_trained_model_alias -Creates a new model alias (or reassigns an existing one) to refer to the trained model +Creates or updates a trained model alias. A trained model alias is a logical +name used to reference a single trained model. +You can use aliases instead of trained model identifiers to make it easier to +reference your models. For example, you can use aliases in inference +aggregations and processors. +An alias must be unique and refer to only a single trained model. However, +you can have multiple aliases for each trained model. +If you use this API to update an alias such that it references a different +trained model ID and the model uses a different type of data frame analytics, +an error occurs. For example, this situation occurs if you have a trained +model for regression analysis and a trained model for classification +analysis; you cannot reassign an alias from one type of trained model to +another. +If you use this API to update an alias and there are very few input fields in +common between the old and new trained models for the model alias, the API +returns a warning. {ref}/put-trained-models-aliases.html[Endpoint documentation] [source,ts] @@ -6487,7 +6937,7 @@ already assigned and this parameter is false, the API returns an error. [discrete] ==== put_trained_model_definition_part -Creates part of a trained model definition +Creates part of a trained model definition. {ref}/put-trained-model-definition-part.html[Endpoint documentation] [source,ts] @@ -6508,7 +6958,9 @@ order of their part number. The first part must be `0` and the final part must b [discrete] ==== put_trained_model_vocabulary -Creates a trained model vocabulary +Creates a trained model vocabulary. +This API is supported only for natural language processing (NLP) models. +The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. {ref}/put-trained-model-vocabulary.html[Endpoint documentation] [source,ts] @@ -6527,7 +6979,11 @@ client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) [discrete] ==== reset_job -Resets an existing anomaly detection job. +Resets an anomaly detection job. +All model state and results are deleted. The job is ready to start over as if +it had just been created. +It is not currently possible to reset multiple jobs using wildcards or a +comma separated list. {ref}/ml-reset-job.html[Endpoint documentation] [source,ts] @@ -6549,6 +7005,13 @@ reset. [discrete] ==== revert_model_snapshot Reverts to a specific snapshot. +The machine learning features react quickly to anomalous input, learning new +behaviors in data. Highly anomalous input increases the variance in the +models whilst the system learns whether this is a new step-change in behavior +or a one-off event. In the case where this anomalous input is known to be a +one-off, then it might be appropriate to reset the model state to a time +before this event. For example, you might consider reverting to a saved +snapshot after Black Friday or a critical system failure. {ref}/ml-revert-snapshot.html[Endpoint documentation] [source,ts] @@ -6568,7 +7031,18 @@ scratch when it is started. [discrete] ==== set_upgrade_mode -Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. +Sets a cluster wide upgrade_mode setting that prepares machine learning +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your machine learning indices. In those circumstances, +there must be no machine learning jobs running. You can close the machine +learning jobs, do the upgrade, then open all the jobs again. Alternatively, +you can use this API to temporarily halt tasks associated with the jobs and +datafeeds and prevent new jobs from opening. You can also use this API +during upgrades that do not require you to reindex your machine learning +indices, though stopping jobs is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +machine learning info API. {ref}/ml-set-upgrade-mode.html[Endpoint documentation] [source,ts] @@ -6588,6 +7062,17 @@ starting. [discrete] ==== start_data_frame_analytics Starts a data frame analytics job. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. +If the destination index does not exist, it is created automatically the +first time you start the data frame analytics job. The +`index.number_of_shards` and `index.number_of_replicas` settings for the +destination index are copied from the source index. If there are multiple +source indices, the destination index copies the highest setting values. The +mappings for the destination index are also copied from the source indices. +If there are any mapping conflicts, the job fails to start. +If the destination index exists, it is used as is. You can therefore set up +the destination index in advance with custom settings and mappings. {ref}/start-dfanalytics.html[Endpoint documentation] [source,ts] @@ -6609,6 +7094,18 @@ starts. ==== start_datafeed Starts one or more datafeeds. +A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. + +Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. + +If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. +If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or +update it had at the time of creation or update and runs the query using those same roles. If you provided secondary +authorization headers when you created or updated the datafeed, those credentials are used instead. + {ref}/ml-start-datafeed.html[Endpoint documentation] [source,ts] ---- @@ -6628,7 +7125,7 @@ characters. [discrete] ==== start_trained_model_deployment -Start a trained model deployment. +Starts a trained model deployment, which allocates the model to every machine learning node. {ref}/start-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -6665,6 +7162,8 @@ it will automatically be changed to a value less than the number of hardware thr [discrete] ==== stop_data_frame_analytics Stops one or more data frame analytics jobs. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. {ref}/stop-dfanalytics.html[Endpoint documentation] [source,ts] @@ -6697,6 +7196,8 @@ stops. Defaults to 20 seconds. [discrete] ==== stop_datafeed Stops one or more datafeeds. +A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. {ref}/ml-stop-datafeed.html[Endpoint documentation] [source,ts] @@ -6717,7 +7218,7 @@ the identifier. [discrete] ==== stop_trained_model_deployment -Stop a trained model deployment. +Stops a trained model deployment. {ref}/stop-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -6739,7 +7240,7 @@ restart the model deployment. [discrete] ==== update_data_frame_analytics -Updates certain properties of a data frame analytics job. +Updates an existing data frame analytics job. {ref}/update-dfanalytics.html[Endpoint documentation] [source,ts] @@ -6769,7 +7270,11 @@ learning node capacity for it to be immediately assigned to a node. [discrete] ==== update_datafeed -Updates certain properties of a datafeed. +Updates the properties of a datafeed. +You must stop and start the datafeed for the changes to be applied. +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at +the time of the update and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. {ref}/ml-update-datafeed.html[Endpoint documentation] [source,ts] @@ -6807,7 +7312,7 @@ learning nodes must have the `remote_cluster_client` role. stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is @@ -6838,7 +7343,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value [discrete] ==== update_filter -Updates the description of a filter, adds items, or removes items. +Updates the description of a filter, adds items, or removes items from the list. {ref}/ml-update-filter.html[Endpoint documentation] [source,ts] @@ -6943,7 +7448,7 @@ snapshot will be deleted when the job is deleted. [discrete] ==== update_trained_model_deployment -Updates certain properties of trained model deployment. +Starts a trained model deployment, which allocates the model to every machine learning node. {ref}/update-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -6965,7 +7470,15 @@ it will automatically be changed to a value less than the number of hardware thr [discrete] ==== upgrade_job_snapshot -Upgrades a given job snapshot to the current major version. +Upgrades an anomaly detection model snapshot to the latest major version. +Over time, older snapshot formats are deprecated and removed. Anomaly +detection jobs support only snapshots that are from the current or previous +major version. +This API provides a means to upgrade a snapshot to the current major version. +This aids in preparing the cluster for an upgrade to the next major version. +Only one snapshot per anomaly detection job can be upgraded at a time and the +upgraded snapshot cannot be the current snapshot of the anomaly detection +job. {ref}/ml-upgrade-job-model-snapshot.html[Endpoint documentation] [source,ts] @@ -6983,11 +7496,33 @@ client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) Otherwise, it responds as soon as the upgrade task is assigned to a node. ** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the request to complete. +[discrete] +=== monitoring +[discrete] +==== bulk +Used by the monitoring features to send monitoring data. + +{ref}/monitor-elasticsearch-cluster.html[Endpoint documentation] +[source,ts] +---- +client.monitoring.bulk({ system_id, system_api_version, interval }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`system_id` (string)*: Identifier of the monitored system +** *`system_api_version` (string)* +** *`interval` (string | -1 | 0)*: Collection interval (e.g., '10s' or '10000ms') of the payload +** *`type` (Optional, string)*: Default document type for items which don't provide one +** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* + [discrete] === nodes [discrete] ==== clear_repositories_metering_archive -Removes the archived repositories metering information present in the cluster. +You can use this API to clear the archived repositories metering information in the cluster. {ref}/clear-repositories-metering-archive-api.html[Endpoint documentation] [source,ts] @@ -7005,7 +7540,10 @@ All the nodes selective options are explained [here](https://www.elastic.co/guid [discrete] ==== get_repositories_metering_info -Returns cluster repositories metering information. +You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. +This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the +information needed to compute aggregations over a period of time. Additionally, the information exposed by this +API is volatile, meaning that it won’t be present after node restarts. {ref}/get-repositories-metering-api.html[Endpoint documentation] [source,ts] @@ -7022,7 +7560,8 @@ All the nodes selective options are explained [here](https://www.elastic.co/guid [discrete] ==== hot_threads -Returns information about hot threads on each node in the cluster. +This API yields a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of each node’s top hot threads. {ref}/cluster-nodes-hot-threads.html[Endpoint documentation] [source,ts] @@ -7050,7 +7589,7 @@ before the timeout expires, the request fails and returns an error. [discrete] ==== info -Returns information about nodes in the cluster. +Returns cluster nodes information. {ref}/cluster-nodes-info.html[Endpoint documentation] [source,ts] @@ -7070,7 +7609,7 @@ client.nodes.info({ ... }) [discrete] ==== reload_secure_settings -Reloads secure settings. +Reloads the keystore on nodes in the cluster. {ref}/secure-settings.html[Endpoint documentation] [source,ts] @@ -7089,7 +7628,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== stats -Returns statistical information about nodes in the cluster. +Returns cluster nodes statistics. {ref}/cluster-nodes-stats.html[Endpoint documentation] [source,ts] @@ -7117,7 +7656,7 @@ client.nodes.stats({ ... }) [discrete] ==== usage -Returns low-level information about REST actions usage on nodes. +Returns information on the usage of features. {ref}/cluster-nodes-usage.html[Endpoint documentation] [source,ts] @@ -7136,15 +7675,32 @@ A list of the following options: `_all`, `rest_actions`. If no response is received before the timeout expires, the request fails and returns an error. [discrete] -=== query_ruleset +=== query_rules [discrete] -==== delete +==== delete_rule +Deletes a query rule within a query ruleset. + +{ref}/delete-query-rule.html[Endpoint documentation] +[source,ts] +---- +client.queryRules.deleteRule({ ruleset_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to delete +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to delete + +[discrete] +==== delete_ruleset Deletes a query ruleset. {ref}/delete-query-ruleset.html[Endpoint documentation] [source,ts] ---- -client.queryRuleset.delete({ ruleset_id }) +client.queryRules.deleteRuleset({ ruleset_id }) ---- [discrete] @@ -7154,13 +7710,30 @@ client.queryRuleset.delete({ ruleset_id }) ** *`ruleset_id` (string)*: The unique identifier of the query ruleset to delete [discrete] -==== get -Returns the details about a query ruleset. +==== get_rule +Returns the details about a query rule within a query ruleset + +{ref}/get-query-rule.html[Endpoint documentation] +[source,ts] +---- +client.queryRules.getRule({ ruleset_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to retrieve +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to retrieve + +[discrete] +==== get_ruleset +Returns the details about a query ruleset {ref}/get-query-ruleset.html[Endpoint documentation] [source,ts] ---- -client.queryRuleset.get({ ruleset_id }) +client.queryRules.getRuleset({ ruleset_id }) ---- [discrete] @@ -7170,13 +7743,13 @@ client.queryRuleset.get({ ruleset_id }) ** *`ruleset_id` (string)*: The unique identifier of the query ruleset [discrete] -==== list -Lists query rulesets. +==== list_rulesets +Returns summarized information about existing query rulesets. {ref}/list-query-rulesets.html[Endpoint documentation] [source,ts] ---- -client.queryRuleset.list({ ... }) +client.queryRules.listRulesets({ ... }) ---- [discrete] @@ -7187,13 +7760,34 @@ client.queryRuleset.list({ ... }) ** *`size` (Optional, number)*: specifies a max number of results to get [discrete] -==== put +==== put_rule +Creates or updates a query rule within a query ruleset. + +{ref}/put-query-rule.html[Endpoint documentation] +[source,ts] +---- +client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated +** *`type` (Enum("pinned"))* +** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])* +** *`actions` ({ ids, docs })* +** *`priority` (Optional, number)* + +[discrete] +==== put_ruleset Creates or updates a query ruleset. {ref}/put-query-ruleset.html[Endpoint documentation] [source,ts] ---- -client.queryRuleset.put({ ruleset_id, rules }) +client.queryRules.putRuleset({ ruleset_id, rules }) ---- [discrete] @@ -7201,7 +7795,7 @@ client.queryRuleset.put({ ruleset_id, rules }) * *Request (object):* ** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated -** *`rules` ({ rule_id, type, criteria, actions }[])* +** *`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])* [discrete] === rollup @@ -7257,7 +7851,7 @@ client.rollup.getRollupCaps({ ... }) [discrete] ==== get_rollup_index_caps -Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). +Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). {ref}/rollup-get-rollup-index-caps.html[Endpoint documentation] [source,ts] @@ -7315,7 +7909,7 @@ on a per-field basis and for each field you configure which metric should be col [discrete] ==== rollup_search -Enables searching rolled-up data using the standard query DSL. +Enables searching rolled-up data using the standard Query DSL. {ref}/rollup-search.html[Endpoint documentation] [source,ts] @@ -7329,7 +7923,7 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. ** *`aggregations` (Optional, Record)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -7406,7 +8000,7 @@ client.searchApplication.deleteBehavioralAnalytics({ name }) [discrete] ==== get -Returns the details about a search application. +Returns the details about a search application {ref}/get-search-application.html[Endpoint documentation] [source,ts] @@ -7512,7 +8106,7 @@ client.searchApplication.renderQuery() [discrete] ==== search -Perform a search against a search application +Perform a search against a search application. {ref}/search-application-search.html[Endpoint documentation] [source,ts] @@ -7611,9 +8205,30 @@ client.searchableSnapshots.stats({ ... }) [discrete] === security +[discrete] +==== activate_user_profile +Creates or updates a user profile on behalf of another user. + +{ref}/security-api-activate-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.activateUserProfile({ grant_type }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`grant_type` (Enum("password" | "access_token"))* +** *`access_token` (Optional, string)* +** *`password` (Optional, string)* +** *`username` (Optional, string)* + [discrete] ==== authenticate -Enables authentication as a user and retrieve information about the authenticated user. +Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user. +A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. +If the user cannot be authenticated, this API returns a 401 status code. {ref}/security-api-authenticate.html[Endpoint documentation] [source,ts] @@ -7622,6 +8237,42 @@ client.security.authenticate() ---- +[discrete] +==== bulk_delete_role +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk delete roles API cannot delete roles that are defined in roles files. + +{ref}/security-api-bulk-delete-role.html[Endpoint documentation] +[source,ts] +---- +client.security.bulkDeleteRole({ names }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`names` (string[])*: An array of role names to delete +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== bulk_put_role +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk create or update roles API cannot update roles that are defined in roles files. + +{ref}/security-api-bulk-put-role.html[Endpoint documentation] +[source,ts] +---- +client.security.bulkPutRole({ roles }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`roles` (Record)*: A dictionary of role name to RoleDescriptor objects to add or update +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + [discrete] ==== bulk_update_api_keys Updates the attributes of multiple existing API keys. @@ -7658,7 +8309,8 @@ setting. [discrete] ==== clear_api_key_cache -Clear a subset or all entries from the API key cache. +Evicts a subset of all entries from the API key cache. +The cache is also automatically cleared on state changes of the security index. {ref}/security-api-clear-api-key-cache.html[Endpoint documentation] [source,ts] @@ -7744,6 +8396,9 @@ client.security.clearCachedServiceTokens({ namespace, service, name }) [discrete] ==== create_api_key Creates an API key for access without requiring basic authentication. +A successful request returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. +NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. {ref}/security-api-create-api-key.html[Endpoint documentation] [source,ts] @@ -7757,7 +8412,7 @@ client.security.createApiKey({ ... }) * *Request (object):* ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. ** *`name` (Optional, string)*: Specifies the name for this API key. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -7774,7 +8429,7 @@ client.security.createCrossClusterApiKey() [discrete] ==== create_service_token -Creates a service account token for access without requiring basic authentication. +Creates a service accounts token for access without requiring basic authentication. {ref}/security-api-create-service-token.html[Endpoint documentation] [source,ts] @@ -7896,6 +8551,25 @@ client.security.disableUser({ username }) ** *`username` (string)*: The username of the user to disable ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +[discrete] +==== disable_user_profile +Disables a user profile so it's not visible in user profile searches. + +{ref}/security-api-disable-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.disableUserProfile({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string)*: Unique identifier for the user profile. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search, if 'wait_for' then wait for a refresh to make this operation +visible to search, if 'false' do nothing with refreshes. + [discrete] ==== enable_user Enables users in the native realm. @@ -7913,9 +8587,28 @@ client.security.enableUser({ username }) ** *`username` (string)*: The username of the user to enable ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +[discrete] +==== enable_user_profile +Enables a user profile so it's visible in user profile searches. + +{ref}/security-api-enable-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.enableUserProfile({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string)*: Unique identifier for the user profile. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search, if 'wait_for' then wait for a refresh to make this operation +visible to search, if 'false' do nothing with refreshes. + [discrete] ==== enroll_kibana -Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. +Enables a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. {ref}/security-api-kibana-enrollment.html[Endpoint documentation] [source,ts] @@ -7926,7 +8619,7 @@ client.security.enrollKibana() [discrete] ==== enroll_node -Allows a new node to enroll to an existing cluster with security enabled. +Allows a new node to join an existing cluster with security features enabled. {ref}/security-api-node-enrollment.html[Endpoint documentation] [source,ts] @@ -7938,6 +8631,8 @@ client.security.enrollNode() [discrete] ==== get_api_key Retrieves information for one or more API keys. +NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. {ref}/security-api-get-api-key.html[Endpoint documentation] [source,ts] @@ -7997,7 +8692,8 @@ client.security.getPrivileges({ ... }) [discrete] ==== get_role -Retrieves roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The get roles API cannot retrieve roles that are defined in roles files. {ref}/security-api-get-role.html[Endpoint documentation] [source,ts] @@ -8029,7 +8725,7 @@ client.security.getRoleMapping({ ... }) [discrete] ==== get_service_accounts -Retrieves information about service accounts. +This API returns a list of service accounts that match the provided path parameter(s). {ref}/security-api-get-service-accounts.html[Endpoint documentation] [source,ts] @@ -8128,9 +8824,41 @@ client.security.getUserPrivileges({ ... }) ** *`priviledge` (Optional, string)*: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. ** *`username` (Optional, string | null)* +[discrete] +==== get_user_profile +Retrieves a user's profile using the unique profile ID. + +{ref}/security-api-get-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.getUserProfile({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string | string[])*: A unique identifier for the user profile. +** *`data` (Optional, string | string[])*: List of filters for the `data` field of the profile document. +To return all content use `data=*`. To return a subset of content +use `data=` to retrieve content nested under the specified ``. +By default returns no `data` content. + [discrete] ==== grant_api_key Creates an API key on behalf of another user. +This API is similar to Create API keys, however it creates the API key for a user that is different than the user that runs the API. +The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. +It is not possible to use this API to create an API key without that user’s credentials. +The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. +In this case, the API key will be created on behalf of the impersonated user. + +This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. + +A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. {ref}/security-api-grant-api-key.html[Endpoint documentation] [source,ts] @@ -8170,12 +8898,35 @@ client.security.hasPrivileges({ ... }) * *Request (object):* ** *`user` (Optional, string)*: Username ** *`application` (Optional, { application, privileges, resources }[])* -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. ** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* +[discrete] +==== has_privileges_user_profile +Determines whether the users associated with the specified profile IDs have all the requested privileges. + +{ref}/security-api-has-privileges-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.hasPrivilegesUserProfile({ uids, privileges }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uids` (string[])*: A list of profile IDs. The privileges are checked for associated users of the profiles. +** *`privileges` ({ application, cluster, index })* + [discrete] ==== invalidate_api_key Invalidates one or more API keys. +The `manage_api_key` privilege allows deleting any API keys. +The `manage_own_api_key` only allows deleting API keys that are owned by the user. +In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: +- Set the parameter `owner=true`. +- Or, set both `username` and `realm_name` to match the user’s identity. +- Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field. {ref}/security-api-invalidate-api-key.html[Endpoint documentation] [source,ts] @@ -8270,7 +9021,8 @@ client.security.putPrivileges({ ... }) [discrete] ==== put_role -Adds and updates roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The create or update roles API cannot update roles that are defined in roles files. {ref}/security-api-put-role.html[Endpoint documentation] [source,ts] @@ -8284,11 +9036,12 @@ client.security.putRole({ name }) * *Request (object):* ** *`name` (string)*: The name of the role. ** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_connector_secrets" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. ** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. +** *`description` (Optional, string)*: Optional description of the role descriptor ** *`transient_metadata` (Optional, Record)*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -8341,7 +9094,7 @@ client.security.putUser({ username }) [discrete] ==== query_api_keys -Retrieves information for API keys using a subset of query DSL +Retrieves information for API keys in a paginated manner. You can optionally filter the results with a query. {ref}/security-api-query-api-key.html[Endpoint documentation] [source,ts] @@ -8378,9 +9131,69 @@ An API key's actual permission is the intersection of its assigned role descript ** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. ** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. +[discrete] +==== query_role +Retrieves roles in a paginated manner. You can optionally filter the results with a query. + +{ref}/security-api-query-role.html[Endpoint documentation] +[source,ts] +---- +client.security.queryRole({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which roles to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with roles: `name`, `description`, `metadata`, +`applications.application`, `applications.privileges`, `applications.resources`. +** *`from` (Optional, number)*: Starting document offset. +By default, you cannot page through more than 10,000 hits using the from and size parameters. +To page through more hits, use the `search_after` parameter. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: All public fields of a role are eligible for sorting. +In addition, sort can also be applied to the `_doc` field to sort by index order. +** *`size` (Optional, number)*: The number of hits to return. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition + +[discrete] +==== query_user +Retrieves information for Users in a paginated manner. You can optionally filter the results with a query. + +{ref}/security-api-query-user.html[Endpoint documentation] +[source,ts] +---- +client.security.queryUser({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which users to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with user: `username`, `roles`, `enabled` +** *`from` (Optional, number)*: Starting document offset. +By default, you cannot page through more than 10,000 hits using the from and size parameters. +To page through more hits, use the `search_after` parameter. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Fields eligible for sorting are: username, roles, enabled +In addition, sort can also be applied to the `_doc` field to sort by index order. +** *`size` (Optional, number)*: The number of hits to return. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition +** *`with_profile_uid` (Optional, boolean)*: If true will return the User Profile ID for the users in the query result, if any. + [discrete] ==== saml_authenticate -Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair +Submits a SAML Response message to Elasticsearch for consumption. {ref}/security-api-saml-authenticate.html[Endpoint documentation] [source,ts] @@ -8398,7 +9211,7 @@ client.security.samlAuthenticate({ content, ids }) [discrete] ==== saml_complete_logout -Verifies the logout response sent from the SAML IdP +Verifies the logout response sent from the SAML IdP. {ref}/security-api-saml-complete-logout.html[Endpoint documentation] [source,ts] @@ -8417,7 +9230,7 @@ client.security.samlCompleteLogout({ realm, ids }) [discrete] ==== saml_invalidate -Consumes a SAML LogoutRequest +Submits a SAML LogoutRequest message to Elasticsearch for consumption. {ref}/security-api-saml-invalidate.html[Endpoint documentation] [source,ts] @@ -8439,7 +9252,7 @@ The client application must not attempt to parse or process the string in any wa [discrete] ==== saml_logout -Invalidates an access token and a refresh token that were generated via the SAML Authenticate API +Submits a request to invalidate an access token and refresh token. {ref}/security-api-saml-logout.html[Endpoint documentation] [source,ts] @@ -8458,7 +9271,7 @@ Alternatively, the most recent refresh token that was received after refreshing [discrete] ==== saml_prepare_authentication -Creates a SAML authentication request +Creates a SAML authentication request () as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. {ref}/security-api-saml-prepare-authentication.html[Endpoint documentation] [source,ts] @@ -8479,7 +9292,7 @@ If the Authentication Request is signed, this value is used as part of the signa [discrete] ==== saml_service_provider_metadata -Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider +Generate SAML metadata for a SAML 2.0 Service Provider. {ref}/security-api-saml-sp-metadata.html[Endpoint documentation] [source,ts] @@ -8493,9 +9306,47 @@ client.security.samlServiceProviderMetadata({ realm_name }) * *Request (object):* ** *`realm_name` (string)*: The name of the SAML realm in Elasticsearch. +[discrete] +==== suggest_user_profiles +Get suggestions for user profiles that match specified search criteria. + +{ref}/security-api-suggest-user-profile.html[Endpoint documentation] +[source,ts] +---- +client.security.suggestUserProfiles({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: Query string used to match name-related fields in user profile documents. +Name-related fields are the user's `username`, `full_name`, and `email`. +** *`size` (Optional, number)*: Number of profiles to return. +** *`data` (Optional, string | string[])*: List of filters for the `data` field of the profile document. +To return all content use `data=*`. To return a subset of content +use `data=` to retrieve content nested under the specified ``. +By default returns no `data` content. +** *`hint` (Optional, { uids, labels })*: Extra search criteria to improve relevance of the suggestion result. +Profiles matching the spcified hint are ranked higher in the response. +Profiles not matching the hint don't exclude the profile from the response +as long as the profile matches the `name` field query. + [discrete] ==== update_api_key Updates attributes of an existing API key. +Users can only update API keys that they created or that were granted to them. +Use this API to update API keys created by the create API Key or grant API Key APIs. +If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. +It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. +This API supports updates to an API key’s access scope and metadata. +The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. +The snapshot of the owner’s permissions is updated automatically on every call. +If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. +This change can occur if the owner user’s permissions have changed since the API key was created or last modified. +To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. +IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user’s credentials are required. {ref}/security-api-update-api-key.html[Endpoint documentation] [source,ts] @@ -8508,7 +9359,7 @@ client.security.updateApiKey({ id }) * *Request (object):* ** *`id` (string)*: The ID of the API key to update. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. @@ -8534,6 +9385,117 @@ client.security.updateSettings() ---- +[discrete] +==== update_user_profile_data +Updates specific data for the user profile that's associated with the specified unique ID. + +{ref}/security-api-update-user-profile-data.html[Endpoint documentation] +[source,ts] +---- +client.security.updateUserProfileData({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string)*: A unique identifier for the user profile. +** *`labels` (Optional, Record)*: Searchable data that you want to associate with the user profile. This +field supports a nested data structure. +** *`data` (Optional, Record)*: Non-searchable data that you want to associate with the user profile. +This field supports a nested data structure. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search, if 'wait_for' then wait for a refresh to make this operation +visible to search, if 'false' do nothing with refreshes. + +[discrete] +=== shutdown +[discrete] +==== delete_node +Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +[source,ts] +---- +client.shutdown.deleteNode({ node_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string)*: The node id of node to be removed from the shutdown state +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_node +Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +[source,ts] +---- +client.shutdown.getNode({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: Which node for which to retrieve the shutdown status +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_node +Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +[source,ts] +---- +client.shutdown.putNode({ node_id, type, reason }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string)*: The node id of node to be shut down +** *`type` (Enum("restart" | "remove" | "replace"))*: Valid values are restart, remove, or replace. +Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. +Because the node is expected to rejoin the cluster, data is not migrated off of the node. +Use remove when you need to permanently remove a node from the cluster. +The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. +Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. +During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. +** *`reason` (string)*: A human-readable reason that the node is being shut down. +This field provides information for other cluster operators; it does not affect the shut down process. +** *`allocation_delay` (Optional, string)*: Only valid if type is restart. +Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. +This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. +If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. +** *`target_node_name` (Optional, string)*: Only valid if type is replace. +Specifies the name of the node that is replacing the node being shut down. +Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. +During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +=== simulate +[discrete] +==== ingest +Simulates running ingest with example documents. + +{ref}/simulate-ingest-api.html[Endpoint documentation] +[source,ts] +---- +client.simulate.ingest() +---- + + [discrete] === slm [discrete] @@ -8666,7 +9628,7 @@ client.slm.stop() === snapshot [discrete] ==== cleanup_repository -Removes stale data from repository. +Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. {ref}/clean-up-snapshot-repo-api.html[Endpoint documentation] [source,ts] @@ -9003,7 +9965,7 @@ client.sql.query({ ... }) If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`query` (Optional, string)*: SQL query to run. ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. ** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. @@ -9034,7 +9996,7 @@ client.sql.translate({ query }) * *Request (object):* ** *`query` (string)*: SQL query to run. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. ** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. [discrete] @@ -9139,7 +10101,7 @@ client.synonyms.getSynonymsSets({ ... }) [discrete] ==== put_synonym -Creates or updates a synonyms set +Creates or updates a synonym set. {ref}/put-synonyms-set.html[Endpoint documentation] [source,ts] @@ -9215,7 +10177,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== list -Returns a list of tasks. +The task management API returns information about tasks currently executing on one or more nodes in the cluster. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -9238,6 +10200,28 @@ client.tasks.list({ ... }) [discrete] === text_structure +[discrete] +==== find_field_structure +Finds the structure of a text field in an index. + +{ref}/find-field-structure.html[Endpoint documentation] +[source,ts] +---- +client.textStructure.findFieldStructure() +---- + + +[discrete] +==== find_message_structure +Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. + +{ref}/find-message-structure.html[Endpoint documentation] +[source,ts] +---- +client.textStructure.findMessageStructure() +---- + + [discrete] ==== find_structure Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. @@ -9291,7 +10275,7 @@ client.textStructure.testGrokPattern({ grok_pattern, text }) === transform [discrete] ==== delete_transform -Deletes an existing transform. +Deletes a transform. {ref}/delete-transform.html[Endpoint documentation] [source,ts] @@ -9310,6 +10294,15 @@ deleted regardless of its current state. index will not be deleted ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== get_node_stats +Retrieves transform usage information for transform nodes. +[source,ts] +---- +client.transform.getNodeStats() +---- + + [discrete] ==== get_transform Retrieves configuration information for transforms. @@ -9376,6 +10369,10 @@ there are no matches or only partial matches. ==== preview_transform Previews a transform. +It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also +generates a list of mappings and settings for the destination index. These values are determined based on the field +types of the source index and the transform aggregations. + {ref}/preview-transform.html[Endpoint documentation] [source,ts] ---- @@ -9409,7 +10406,27 @@ timeout expires, the request fails and returns an error. [discrete] ==== put_transform -Instantiates a transform. +Creates a transform. + +A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as +a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a +unique row per entity. + +You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If +you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in +the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values +in the latest object. + +You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and +`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the +transform remembers which roles the user that created it had at the time of creation and uses those same roles. If +those roles do not have the required privileges on the source and destination indices, the transform fails when it +attempts unauthorized operations. + +NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any +`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do +not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not +give users any privileges on `.data-frame-internal*` indices. {ref}/put-transform.html[Endpoint documentation] [source,ts] @@ -9446,7 +10463,9 @@ the exception of privilege checks. [discrete] ==== reset_transform -Resets an existing transform. +Resets a transform. +Before you can reset it, you must stop it; alternatively, use the `force` query parameter. +If the destination index was created by the transform, it is deleted. {ref}/reset-transform.html[Endpoint documentation] [source,ts] @@ -9467,6 +10486,11 @@ must be stopped before it can be reset. ==== schedule_now_transform Schedules now a transform. +If you _schedule_now a transform, it will process the new data instantly, +without waiting for the configured frequency interval. After _schedule_now API is called, +the transform will be processed again at now + frequency unless _schedule_now API +is called again in the meantime. + {ref}/schedule-now-transform.html[Endpoint documentation] [source,ts] ---- @@ -9482,7 +10506,22 @@ client.transform.scheduleNowTransform({ transform_id }) [discrete] ==== start_transform -Starts one or more transforms. +Starts a transform. + +When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is +set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping +definitions for the destination index from the source indices and the transform aggregations. If fields in the +destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), +the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce +mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you +start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings +in a pivot transform. + +When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you +created the transform, they occur when you start the transform—​with the exception of privilege checks. When +Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the +time of creation and uses those same roles. If those roles do not have the required privileges on the source and +destination indices, the transform fails when it attempts unauthorized operations. {ref}/start-transform.html[Endpoint documentation] [source,ts] @@ -9535,6 +10574,12 @@ immediately and the indexer is stopped asynchronously in the background. ==== update_transform Updates certain properties of a transform. +All updated properties except `description` do not take effect until after the transform starts the next checkpoint, +thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` +privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When +Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the +time of update and runs with those privileges. + {ref}/update-transform.html[Endpoint documentation] [source,ts] ---- @@ -9567,6 +10612,11 @@ timeout expires, the request fails and returns an error. [discrete] ==== upgrade_transforms Upgrades all transforms. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It +also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not +affect the source and destination indices. The upgrade also does not affect the roles that transforms use when +Elasticsearch security features are enabled; the role used to read source data and write to the destination index +remains unchanged. {ref}/upgrade-transforms.html[Endpoint documentation] [source,ts] @@ -9651,7 +10701,8 @@ client.watcher.deleteWatch({ id }) [discrete] ==== execute_watch -Forces the execution of a stored watch. +This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. +For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. {ref}/watcher-api-execute-watch.html[Endpoint documentation] [source,ts] @@ -9743,7 +10794,7 @@ client.watcher.queryWatches({ ... }) * *Request (object):* ** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. ** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule_query, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. @@ -9801,7 +10852,7 @@ client.watcher.updateSettings() === xpack [discrete] ==== info -Retrieves information about the installed X-Pack features. +Provides general information about the installed X-Pack features. {ref}/info-api.html[Endpoint documentation] [source,ts] @@ -9819,7 +10870,7 @@ client.xpack.info({ ... }) [discrete] ==== usage -Retrieves usage information about the installed X-Pack features. +This API provides information about which features are currently enabled and available under the current license and some usage statistics. {ref}/usage-api.html[Endpoint documentation] [source,ts] diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index e684cebfc..4a8cef916 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class AsyncSearch { } /** - * Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. + * Deletes an async search by identifier. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,11 +67,17 @@ export default class AsyncSearch { const method = 'DELETE' const path = `/_async_search/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'async_search.delete', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves the results of a previously submitted async search request given its ID. + * Retrieves the results of a previously submitted async search request given its identifier. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -92,11 +99,17 @@ export default class AsyncSearch { const method = 'GET' const path = `/_async_search/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'async_search.get', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves the status of a previously submitted async search request given its ID. + * Get async search status Retrieves the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -118,11 +131,17 @@ export default class AsyncSearch { const method = 'GET' const path = `/_async_search/status/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'async_search.status', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Executes a search request asynchronously. + * Runs a search request asynchronously. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field, hence partial results become available following the sort criteria that was requested. Warning: Async search does not support scroll nor search requests that only include the suggest section. By default, Elasticsearch doesn’t allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -170,6 +189,12 @@ export default class AsyncSearch { method = 'POST' path = '/_async_search' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'async_search.submit', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index 941f81083..04a2ab060 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class Autoscaling { const method = 'DELETE' const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'autoscaling.delete_autoscaling_policy', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -93,7 +100,10 @@ export default class Autoscaling { const method = 'GET' const path = '/_autoscaling/capacity' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'autoscaling.get_autoscaling_capacity' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -119,7 +129,13 @@ export default class Autoscaling { const method = 'GET' const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'autoscaling.get_autoscaling_policy', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -150,6 +166,12 @@ export default class Autoscaling { const method = 'PUT' const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'autoscaling.put_autoscaling_policy', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index f84ea175f..fe5fb81a7 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to perform multiple index/update/delete operations in a single request. + * Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html | Elasticsearch API documentation} */ export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -72,5 +73,11 @@ export default async function BulkApi +export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise +export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_capabilities' + const meta: TransportRequestMetadata = { + name: 'capabilities' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index af8b64da8..8d9a04a55 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Cat { } /** - * Shows information about currently configured aliases to indices including filter and routing infos. + * Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. IMPORTANT: cat APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -74,11 +75,17 @@ export default class Cat { method = 'GET' path = '/_cat/aliases' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.aliases', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. + * Provides a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html | Elasticsearch API documentation} */ async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -108,11 +115,17 @@ export default class Cat { method = 'GET' path = '/_cat/allocation' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.allocation', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about existing component_templates templates. + * Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -142,11 +155,17 @@ export default class Cat { method = 'GET' path = '/_cat/component_templates' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.component_templates', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides quick access to the document count of the entire cluster, or individual indices. + * Provides quick access to a document count for a data stream, an index, or an entire cluster. NOTE: The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -176,11 +195,17 @@ export default class Cat { method = 'GET' path = '/_cat/count' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.count', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Shows how much heap memory is currently being used by fielddata on every data node in the cluster. + * Returns the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html | Elasticsearch API documentation} */ async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -210,11 +235,17 @@ export default class Cat { method = 'GET' path = '/_cat/fielddata' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.fielddata', + pathParts: { + fields: params.fields + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns a concise representation of the cluster health. + * Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html | Elasticsearch API documentation} */ async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -237,7 +268,10 @@ export default class Cat { const method = 'GET' const path = '/_cat/health' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.health' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -264,11 +298,14 @@ export default class Cat { const method = 'GET' const path = '/_cat' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.help' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about indices: number of primaries and replicas, document counts, disk size, ... + * Returns high-level information about indices in a cluster, including backing indices for data streams. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index API. Use the cat indices API to get the following information for each index in a cluster: shard count; document count; deleted document count; primary store size; total store size of all shards, including shard replicas. These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -298,11 +335,17 @@ export default class Cat { method = 'GET' path = '/_cat/indices' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.indices', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about the master node. + * Returns information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html | Elasticsearch API documentation} */ async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -325,11 +368,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/master' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.master' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets configuration and usage information about data frame analytics jobs. + * Returns configuration and usage information about data frame analytics jobs. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -359,11 +405,17 @@ export default class Cat { method = 'GET' path = '/_cat/ml/data_frame/analytics' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.ml_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets configuration and usage information about datafeeds. + * Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -393,11 +445,17 @@ export default class Cat { method = 'GET' path = '/_cat/ml/datafeeds' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.ml_datafeeds', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets configuration and usage information about anomaly detection jobs. + * Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -427,11 +485,17 @@ export default class Cat { method = 'GET' path = '/_cat/ml/anomaly_detectors' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.ml_jobs', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets configuration and usage information about inference trained models. + * Returns configuration and usage information about inference trained models. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -461,11 +525,17 @@ export default class Cat { method = 'GET' path = '/_cat/ml/trained_models' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.ml_trained_models', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about custom node attributes. + * Returns information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html | Elasticsearch API documentation} */ async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -488,11 +558,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/nodeattrs' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.nodeattrs' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns basic statistics about performance of cluster nodes. + * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html | Elasticsearch API documentation} */ async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -515,11 +588,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/nodes' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.nodes' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns a concise representation of the cluster pending tasks. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -542,11 +618,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/pending_tasks' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.pending_tasks' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about installed plugins across nodes node. + * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html | Elasticsearch API documentation} */ async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -569,11 +648,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/plugins' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.plugins' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about index shard recoveries, both on-going completed. + * Returns information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -603,11 +685,17 @@ export default class Cat { method = 'GET' path = '/_cat/recovery' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.recovery', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about snapshot repositories registered in the cluster. + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html | Elasticsearch API documentation} */ async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -630,11 +718,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/repositories' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.repositories' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides low-level information about the segments in the shards of an index. + * Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html | Elasticsearch API documentation} */ async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -664,11 +755,17 @@ export default class Cat { method = 'GET' path = '/_cat/segments' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.segments', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides a detailed view of shard allocation on nodes. + * Returns information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html | Elasticsearch API documentation} */ async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -698,11 +795,17 @@ export default class Cat { method = 'GET' path = '/_cat/shards' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.shards', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns all snapshots in a specific repository. + * Returns information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html | Elasticsearch API documentation} */ async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -732,11 +835,17 @@ export default class Cat { method = 'GET' path = '/_cat/snapshots' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.snapshots', + pathParts: { + repository: params.repository + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about the tasks currently executing on one or more nodes in the cluster. + * Returns information about tasks currently executing in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -759,11 +868,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/tasks' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.tasks' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about existing templates. + * Returns information about index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html | Elasticsearch API documentation} */ async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -793,11 +905,17 @@ export default class Cat { method = 'GET' path = '/_cat/templates' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.templates', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns cluster-wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. + * Returns thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html | Elasticsearch API documentation} */ async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -827,11 +945,17 @@ export default class Cat { method = 'GET' path = '/_cat/thread_pool' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.thread_pool', + pathParts: { + thread_pool_patterns: params.thread_pool_patterns + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets configuration and usage information about transforms. + * Returns configuration and usage information about transforms. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html | Elasticsearch API documentation} */ async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -861,6 +985,12 @@ export default class Cat { method = 'GET' path = '/_cat/transforms' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cat.transforms', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index d2f58a616..ec3db24c8 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class Ccr { const method = 'DELETE' const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.delete_auto_follow_pattern', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -104,7 +111,13 @@ export default class Ccr { const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}/_ccr/follow` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.follow', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -130,7 +143,13 @@ export default class Ccr { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_ccr/info` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.follow_info', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -156,7 +175,13 @@ export default class Ccr { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_ccr/stats` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.follow_stats', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -194,7 +219,13 @@ export default class Ccr { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_ccr/forget_follower` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.forget_follower', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -228,7 +259,13 @@ export default class Ccr { method = 'GET' path = '/_ccr/auto_follow' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.get_auto_follow_pattern', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -254,7 +291,13 @@ export default class Ccr { const method = 'POST' const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}/pause` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.pause_auto_follow_pattern', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -280,7 +323,13 @@ export default class Ccr { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_ccr/pause_follow` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.pause_follow', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -318,7 +367,13 @@ export default class Ccr { const method = 'PUT' const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.put_auto_follow_pattern', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -344,7 +399,13 @@ export default class Ccr { const method = 'POST' const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}/resume` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.resume_auto_follow_pattern', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -382,7 +443,13 @@ export default class Ccr { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_ccr/resume_follow` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.resume_follow', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -409,7 +476,10 @@ export default class Ccr { const method = 'GET' const path = '/_ccr/stats' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.stats' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -435,6 +505,12 @@ export default class Ccr { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_ccr/unfollow` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ccr.unfollow', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 99eae7286..7c4848d45 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Explicitly clears the search context for a scroll. + * Clears the search context and results for a scrolling search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api.html | Elasticsearch API documentation} */ export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -73,5 +74,11 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll const method = 'DELETE' const path = '/_search/scroll' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'clear_scroll', + pathParts: { + scroll_id: params.scroll_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 380689069..c4c779e5e 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Close a point in time + * Closes a point-in-time. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -72,5 +73,8 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo const method = 'DELETE' const path = '/_pit' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'close_point_in_time' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 1c1b9c8d1..55025b0b0 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -79,11 +80,14 @@ export default class Cluster { const method = body != null ? 'POST' : 'GET' const path = '/_cluster/allocation/explain' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.allocation_explain' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes a component template + * Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -105,7 +109,13 @@ export default class Cluster { const method = 'DELETE' const path = `/_component_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.delete_component_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -132,7 +142,10 @@ export default class Cluster { const method = 'DELETE' const path = '/_cluster/voting_config_exclusions' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.delete_voting_config_exclusions' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -158,11 +171,17 @@ export default class Cluster { const method = 'HEAD' const path = `/_component_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.exists_component_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns one or more component templates + * Retrieves information about component templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -192,11 +211,17 @@ export default class Cluster { method = 'GET' path = '/_component_template' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.get_component_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns cluster settings. + * Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -219,11 +244,14 @@ export default class Cluster { const method = 'GET' const path = '/_cluster/settings' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.get_settings' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns basic information about the health of the cluster. + * The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html | Elasticsearch API documentation} */ async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -253,7 +281,13 @@ export default class Cluster { method = 'GET' path = '/_cluster/health' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.health', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -279,11 +313,17 @@ export default class Cluster { const method = 'GET' const path = `/_info/${encodeURIComponent(params.target.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.info', + pathParts: { + target: params.target + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. + * Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -306,7 +346,10 @@ export default class Cluster { const method = 'GET' const path = '/_cluster/pending_tasks' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.pending_tasks' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -333,11 +376,14 @@ export default class Cluster { const method = 'POST' const path = '/_cluster/voting_config_exclusions' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.post_voting_config_exclusions' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates or updates a component template + * Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -371,7 +417,13 @@ export default class Cluster { const method = 'PUT' const path = `/_component_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.put_component_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -410,11 +462,14 @@ export default class Cluster { const method = 'PUT' const path = '/_cluster/settings' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.put_settings' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns the information about configured remote clusters. + * The cluster remote info API allows you to retrieve all of the configured remote cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html | Elasticsearch API documentation} */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -437,7 +492,10 @@ export default class Cluster { const method = 'GET' const path = '/_remote/info' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.remote_info' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -476,7 +534,10 @@ export default class Cluster { const method = 'POST' const path = '/_cluster/reroute' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.reroute' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -513,11 +574,18 @@ export default class Cluster { method = 'GET' path = '/_cluster/state' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.state', + pathParts: { + metric: params.metric, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns high-level overview of cluster statistics. + * Returns cluster statistics. It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -547,6 +615,12 @@ export default class Cluster { method = 'GET' path = '/_cluster/stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'cluster.stats', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 0d01dbb57..32e09ad9b 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -80,5 +81,11 @@ export default async function CountApi (this: That, params?: T.CountRequest | TB method = body != null ? 'POST' : 'GET' path = '/_count' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'count', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 622f8eb63..29fedfbc0 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Creates a new document in the index. Returns a 409 response when a document with a same ID already exists in the index. + * Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -65,5 +66,12 @@ export default async function CreateApi (this: That, params const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}/_create/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'create', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 069734428..e042d801a 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class DanglingIndices { const method = 'DELETE' const path = `/_dangling/${encodeURIComponent(params.index_uuid.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'dangling_indices.delete_dangling_index', + pathParts: { + index_uuid: params.index_uuid + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -92,7 +99,13 @@ export default class DanglingIndices { const method = 'POST' const path = `/_dangling/${encodeURIComponent(params.index_uuid.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'dangling_indices.import_dangling_index', + pathParts: { + index_uuid: params.index_uuid + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -119,6 +132,9 @@ export default class DanglingIndices { const method = 'GET' const path = '/_dangling' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'dangling_indices.list_dangling_indices' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index f834f7283..fd9bb85ee 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Removes a document from the index. + * Removes a JSON document from the specified index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html | Elasticsearch API documentation} */ export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -60,5 +61,12 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest | T const method = 'DELETE' const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'delete', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index fc62679cd..d7dc4bd44 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Deletes documents matching the provided query. + * Deletes documents that match the specified query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -72,5 +73,11 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_delete_by_query` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'delete_by_query', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index bb57bea1d..d7847a1ba 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -60,5 +61,11 @@ export default async function DeleteByQueryRethrottleApi (this: That, params: T. const method = 'POST' const path = `/_delete_by_query/${encodeURIComponent(params.task_id.toString())}/_rethrottle` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'delete_by_query_rethrottle', + pathParts: { + task_id: params.task_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 5c849219f..2f63e01e1 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Deletes a script. + * Deletes a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -60,5 +61,11 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip const method = 'DELETE' const path = `/_scripts/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'delete_script', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index f2f0f682f..b25c42ede 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class Enrich { const method = 'DELETE' const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'enrich.delete_policy', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -92,11 +99,17 @@ export default class Enrich { const method = 'PUT' const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}/_execute` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'enrich.execute_policy', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets information about an enrich policy. + * Returns information about an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-enrich-policy-api.html | Elasticsearch API documentation} */ async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -126,11 +139,17 @@ export default class Enrich { method = 'GET' path = '/_enrich/policy' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'enrich.get_policy', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates a new enrich policy. + * Creates an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-enrich-policy-api.html | Elasticsearch API documentation} */ async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -164,11 +183,17 @@ export default class Enrich { const method = 'PUT' const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'enrich.put_policy', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets enrich coordinator statistics and information about enrich policies that are currently executing. + * Returns enrich coordinator statistics and information about enrich policies that are currently executing. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/enrich-stats-api.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -191,6 +216,9 @@ export default class Enrich { const method = 'GET' const path = '/_enrich/_stats' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'enrich.stats' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 3846df925..0e0ddd859 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Eql { } /** - * Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. + * Deletes an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/eql-search-api.html | Elasticsearch API documentation} */ async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,11 +67,17 @@ export default class Eql { const method = 'DELETE' const path = `/_eql/search/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'eql.delete', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns async results from previously executed Event Query Language (EQL) search + * Returns the current status and available results for an async EQL search or a stored synchronous EQL search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-search-api.html | Elasticsearch API documentation} */ async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -92,11 +99,17 @@ export default class Eql { const method = 'GET' const path = `/_eql/search/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'eql.get', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns the status of a previously submitted async or stored Event Query Language (EQL) search + * Returns the current status for an async EQL search or a stored synchronous EQL search without returning results. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-status-api.html | Elasticsearch API documentation} */ async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -118,7 +131,13 @@ export default class Eql { const method = 'GET' const path = `/_eql/search/status/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'eql.get_status', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -156,6 +175,12 @@ export default class Eql { const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_eql/search` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'eql.search', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 4d211a14c..da1570d79 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,68 @@ export default class Esql { } /** - * Executes an ESQL request + * Executes an ESQL request asynchronously + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-api.html | Elasticsearch API documentation} + */ + async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_query/async' + const meta: TransportRequestMetadata = { + name: 'esql.async_query' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves the results of a previously submitted async query request given its ID. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-get-api.html | Elasticsearch API documentation} + */ + async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query/async/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'esql.async_query_get', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Executes an ES|QL request * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-rest.html | Elasticsearch API documentation} */ async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -52,7 +114,7 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'query'] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -78,6 +140,9 @@ export default class Esql { const method = 'POST' const path = '/_query' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'esql.query' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index a0448c0aa..4acd561d9 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns information about whether a document exists in an index. + * Checks if a document in an index exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -60,5 +61,12 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest | T const method = 'HEAD' const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'exists', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index ba264a376..714e62fa7 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns information about whether a document source exists in an index. + * Checks if a document's `_source` is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -60,5 +61,12 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc const method = 'HEAD' const path = `/${encodeURIComponent(params.index.toString())}/_source/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'exists_source', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 6e910ffb2..13a744093 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns information about why a specific matches (or doesn't match) a query. + * Returns information about why a specific document matches (or doesn’t match) a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html | Elasticsearch API documentation} */ export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -72,5 +73,12 @@ export default async function ExplainApi (this: That, param const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_explain/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'explain', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 5bffb723c..6ec8b7c75 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -67,7 +68,10 @@ export default class Features { const method = 'GET' const path = '/_features' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'features.get_features' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -94,6 +98,9 @@ export default class Features { const method = 'POST' const path = '/_features/_reset' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'features.reset_features' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index ec0a5b086..7a2c5bc12 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns the information about the capabilities of fields among multiple indices. + * The field capabilities API returns the information about the capabilities of fields among multiple indices. The field capabilities API returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned as any other field that belongs to the `keyword` family. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html | Elasticsearch API documentation} */ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -80,5 +81,11 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ method = body != null ? 'POST' : 'GET' path = '/_field_caps' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'field_caps', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 965a926bb..4fe0b0ed8 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -65,7 +66,13 @@ export default class Fleet { const method = 'DELETE' const path = `/_fleet/secret/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'fleet.delete_secret', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -90,7 +97,13 @@ export default class Fleet { const method = 'GET' const path = `/_fleet/secret/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'fleet.get_secret', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -116,11 +129,17 @@ export default class Fleet { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_fleet/global_checkpoints` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'fleet.global_checkpoints', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Multi Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. + * Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it supports the wait_for_checkpoints parameter. */ async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -153,7 +172,13 @@ export default class Fleet { method = body != null ? 'POST' : 'GET' path = '/_fleet/_fleet_msearch' } - return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + const meta: TransportRequestMetadata = { + name: 'fleet.msearch', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } /** @@ -178,11 +203,14 @@ export default class Fleet { const method = 'POST' const path = '/_fleet/secret' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'fleet.post_secret' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. + * The purpose of the fleet search api is to provide a search api where the search will only be executed after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. */ async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -215,6 +243,12 @@ export default class Fleet { const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_fleet/_fleet_search` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'fleet.search', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 96f31255e..0d742a15f 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -60,5 +61,12 @@ export default async function GetApi (this: That, params: T const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'get', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index b2c4a03d1..f0c396efb 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns a script. + * Retrieves a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -60,5 +61,11 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque const method = 'GET' const path = `/_scripts/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'get_script', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index 78ceed3b3..f242c6870 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -61,5 +62,8 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr const method = 'GET' const path = '/_script_context' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'get_script_context' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 600baac46..1487bedf3 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -61,5 +62,8 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS const method = 'GET' const path = '/_script_language' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'get_script_languages' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 7f92d9b74..517452614 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -60,5 +61,12 @@ export default async function GetSourceApi (this: That, par const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_source/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'get_source', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 666a40940..01d14aa5d 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Graph { } /** - * Explore extracted and summarized information about the documents and terms in an index. + * Extracts and summarizes information about the documents and terms in an Elasticsearch data stream or index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/graph-explore-api.html | Elasticsearch API documentation} */ async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -78,6 +79,12 @@ export default class Graph { const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_graph/explore` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'graph.explore', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 4ad9a19c3..58e098339 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -68,5 +69,11 @@ export default async function HealthReportApi (this: That, params?: T.HealthRepo method = 'GET' path = '/_health_report' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'health_report', + pathParts: { + feature: params.feature + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 23de35220..48c35c9fb 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Ilm { } /** - * Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. + * Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,11 +67,17 @@ export default class Ilm { const method = 'DELETE' const path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.delete_lifecycle', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. + * Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-explain-lifecycle.html | Elasticsearch API documentation} */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -92,11 +99,17 @@ export default class Ilm { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_ilm/explain` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.explain_lifecycle', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns the specified policy definition. Includes the policy version and last modified date. + * Retrieves a lifecycle policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-lifecycle.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -126,7 +139,13 @@ export default class Ilm { method = 'GET' path = '/_ilm/policy' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.get_lifecycle', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -153,11 +172,14 @@ export default class Ilm { const method = 'GET' const path = '/_ilm/status' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.get_status' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing + * Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -192,7 +214,10 @@ export default class Ilm { const method = 'POST' const path = '/_ilm/migrate_to_data_tiers' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.migrate_to_data_tiers' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -230,11 +255,17 @@ export default class Ilm { const method = 'POST' const path = `/_ilm/move/${encodeURIComponent(params.index.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.move_to_step', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates a lifecycle policy + * Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-put-lifecycle.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -268,7 +299,13 @@ export default class Ilm { const method = 'PUT' const path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.put_lifecycle', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -294,7 +331,13 @@ export default class Ilm { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_ilm/remove` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.remove_policy', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -320,7 +363,13 @@ export default class Ilm { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_ilm/retry` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.retry', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -347,7 +396,10 @@ export default class Ilm { const method = 'POST' const path = '/_ilm/start' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.start' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -374,6 +426,9 @@ export default class Ilm { const method = 'POST' const path = '/_ilm/stop' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ilm.stop' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/index.ts b/src/api/api/index.ts index b156d47c6..35508c4f9 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Creates or updates a document in an index. + * Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -72,5 +73,12 @@ export default async function IndexApi (this: That, params: method = 'POST' path = `/${encodeURIComponent(params.index.toString())}/_doc` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'index', + pathParts: { + id: params.id, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 3b90bc00f..7fad7c65c 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,11 +67,18 @@ export default class Indices { const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}/_block/${encodeURIComponent(params.block.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.add_block', + pathParts: { + index: params.index, + block: params.block + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Performs the analysis process on a text and return the tokens breakdown of the text. + * Performs analysis on a text string and returns the resulting tokens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html | Elasticsearch API documentation} */ async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -112,11 +120,17 @@ export default class Indices { method = body != null ? 'POST' : 'GET' path = '/_analyze' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.analyze', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Clears all or specific caches for one or more indices. + * Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -146,11 +160,17 @@ export default class Indices { method = 'POST' path = '/_cache/clear' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.clear_cache', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Clones an index + * Clones an existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -184,7 +204,14 @@ export default class Indices { const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}/_clone/${encodeURIComponent(params.target.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.clone', + pathParts: { + index: params.index, + target: params.target + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -210,11 +237,17 @@ export default class Indices { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_close` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.close', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates an index with optional settings and mappings. + * Creates a new index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html | Elasticsearch API documentation} */ async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -248,11 +281,17 @@ export default class Indices { const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.create', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates a data stream + * Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -274,11 +313,17 @@ export default class Indices { const method = 'PUT' const path = `/_data_stream/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.create_data_stream', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides statistics on operations happening in a data stream. + * Get data stream stats. Retrieves statistics for one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -308,11 +353,17 @@ export default class Indices { method = 'GET' path = '/_data_stream/_stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.data_streams_stats', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes an index. + * Deletes one or more indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html | Elasticsearch API documentation} */ async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -334,11 +385,17 @@ export default class Indices { const method = 'DELETE' const path = `/${encodeURIComponent(params.index.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.delete', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes an alias. + * Removes a data stream or index from an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -367,11 +424,18 @@ export default class Indices { method = 'DELETE' path = `/${encodeURIComponent(params.index.toString())}/_aliases/${encodeURIComponent(params.name.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.delete_alias', + pathParts: { + index: params.index, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes the data stream lifecycle of the selected data streams. + * Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -393,11 +457,17 @@ export default class Indices { const method = 'DELETE' const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.delete_data_lifecycle', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes a data stream. + * Delete data streams. Deletes one or more data streams and their backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -419,11 +489,17 @@ export default class Indices { const method = 'DELETE' const path = `/_data_stream/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.delete_data_stream', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes an index template. + * Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template.html | Elasticsearch API documentation} */ async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -445,11 +521,17 @@ export default class Indices { const method = 'DELETE' const path = `/_index_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.delete_index_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes an index template. + * Deletes a legacy index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -471,11 +553,17 @@ export default class Indices { const method = 'DELETE' const path = `/_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.delete_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Analyzes the disk usage of each field of an index or data stream + * Analyzes the disk usage of each field of an index or data stream. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -497,11 +585,17 @@ export default class Indices { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_disk_usage` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.disk_usage', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Downsample an index + * Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-downsample-data-stream.html | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -528,11 +622,18 @@ export default class Indices { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_downsample/${encodeURIComponent(params.target_index.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.downsample', + pathParts: { + index: params.index, + target_index: params.target_index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about whether a particular index exists. + * Checks if a data stream, index, or alias exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html | Elasticsearch API documentation} */ async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -554,11 +655,17 @@ export default class Indices { const method = 'HEAD' const path = `/${encodeURIComponent(params.index.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.exists', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about whether a particular alias exists. + * Checks if an alias exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -587,7 +694,14 @@ export default class Indices { method = 'HEAD' path = `/_alias/${encodeURIComponent(params.name.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.exists_alias', + pathParts: { + name: params.name, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -613,11 +727,17 @@ export default class Indices { const method = 'HEAD' const path = `/_index_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.exists_index_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about whether a particular index template exists. + * Check existence of index templates. Returns information about whether a particular index template exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html | Elasticsearch API documentation} */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -639,7 +759,13 @@ export default class Indices { const method = 'HEAD' const path = `/_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.exists_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -665,11 +791,17 @@ export default class Indices { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_lifecycle/explain` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.explain_data_lifecycle', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns the field usage stats for each field of an index + * Returns field usage information for each shard and field of an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -691,11 +823,17 @@ export default class Indices { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_field_usage_stats` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.field_usage_stats', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Performs the flush operation on one or more indices. + * Flushes one or more data streams or indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html | Elasticsearch API documentation} */ async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -725,7 +863,13 @@ export default class Indices { method = body != null ? 'POST' : 'GET' path = '/_flush' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.flush', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -759,11 +903,17 @@ export default class Indices { method = 'POST' path = '/_forcemerge' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.forcemerge', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about one or more indices. + * Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html | Elasticsearch API documentation} */ async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -785,11 +935,17 @@ export default class Indices { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns an alias. + * Retrieves information for one or more aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -825,11 +981,18 @@ export default class Indices { method = 'GET' path = '/_alias' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_alias', + pathParts: { + name: params.name, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns the data stream lifecycle of the selected data streams. + * Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html | Elasticsearch API documentation} */ async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -851,11 +1014,17 @@ export default class Indices { const method = 'GET' const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_data_lifecycle', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns data streams. + * Get data streams. Retrieves information about one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -885,11 +1054,17 @@ export default class Indices { method = 'GET' path = '/_data_stream' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns mapping for one or more fields. + * Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html | Elasticsearch API documentation} */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -918,11 +1093,18 @@ export default class Indices { method = 'GET' path = `/_mapping/field/${encodeURIComponent(params.fields.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_field_mapping', + pathParts: { + fields: params.fields, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns an index template. + * Get index templates. Returns information about one or more index templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html | Elasticsearch API documentation} */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -952,11 +1134,17 @@ export default class Indices { method = 'GET' path = '/_index_template' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_index_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns mappings for one or more indices. + * Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html | Elasticsearch API documentation} */ async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -986,11 +1174,17 @@ export default class Indices { method = 'GET' path = '/_mapping' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_mapping', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns settings for one or more indices. + * Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1026,11 +1220,18 @@ export default class Indices { method = 'GET' path = '/_settings' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_settings', + pathParts: { + index: params.index, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns an index template. + * Get index templates. Retrieves information about one or more index templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1060,11 +1261,17 @@ export default class Indices { method = 'GET' path = '/_template' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.get_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Migrates an alias to a data stream + * Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1086,11 +1293,17 @@ export default class Indices { const method = 'POST' const path = `/_data_stream/_migrate/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.migrate_to_data_stream', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Modifies a data stream + * Update data streams. Performs one or more data stream modification actions in a single atomic operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1124,11 +1337,14 @@ export default class Indices { const method = 'POST' const path = '/_data_stream/_modify' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.modify_data_stream' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Opens an index. + * Opens a closed index. For data streams, the API opens any closed backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html | Elasticsearch API documentation} */ async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1150,7 +1366,13 @@ export default class Indices { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_open` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.open', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1176,11 +1398,17 @@ export default class Indices { const method = 'POST' const path = `/_data_stream/_promote/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.promote_data_stream', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates or updates an alias. + * Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1221,11 +1449,18 @@ export default class Indices { method = 'PUT' path = `/${encodeURIComponent(params.index.toString())}/_aliases/${encodeURIComponent(params.name.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.put_alias', + pathParts: { + index: params.index, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates the data stream lifecycle of the selected data streams. + * Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html | Elasticsearch API documentation} */ async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1259,11 +1494,17 @@ export default class Indices { const method = 'PUT' const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.put_data_lifecycle', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates or updates an index template. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html | Elasticsearch API documentation} */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1297,11 +1538,17 @@ export default class Indices { const method = 'PUT' const path = `/_index_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.put_index_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates the index mappings. + * Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1335,11 +1582,17 @@ export default class Indices { const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}/_mapping` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.put_mapping', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates the index settings. + * Changes a dynamic index setting in real time. For data streams, index setting changes are applied to all backing indices by default. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1373,11 +1626,17 @@ export default class Indices { method = 'PUT' path = '/_settings' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.put_settings', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates or updates an index template. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1411,11 +1670,17 @@ export default class Indices { const method = 'PUT' const path = `/_template/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.put_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about ongoing index shard recoveries. + * Returns information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1445,11 +1710,17 @@ export default class Indices { method = 'GET' path = '/_recovery' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.recovery', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Performs the refresh operation in one or more indices. + * A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1479,7 +1750,13 @@ export default class Indices { method = body != null ? 'POST' : 'GET' path = '/_refresh' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.refresh', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1505,11 +1782,17 @@ export default class Indices { const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_reload_search_analyzers` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.reload_search_analyzers', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. + * Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1531,11 +1814,17 @@ export default class Indices { const method = 'GET' const path = `/_resolve/cluster/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.resolve_cluster', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about any matching indices, aliases, and data streams + * Resolves the specified name(s) and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html | Elasticsearch API documentation} */ async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1557,11 +1846,17 @@ export default class Indices { const method = 'GET' const path = `/_resolve/index/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.resolve_index', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates an alias to point to a new index when the existing index is considered to be too large or too old. + * Creates a new index for a data stream or index alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1602,11 +1897,18 @@ export default class Indices { method = 'POST' path = `/${encodeURIComponent(params.alias.toString())}/_rollover` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.rollover', + pathParts: { + alias: params.alias, + new_index: params.new_index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides low-level information about segments in a Lucene index. + * Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html | Elasticsearch API documentation} */ async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1636,11 +1938,17 @@ export default class Indices { method = 'GET' path = '/_segments' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.segments', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides store information for shard copies of indices. + * Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html | Elasticsearch API documentation} */ async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1670,11 +1978,17 @@ export default class Indices { method = 'GET' path = '/_shard_stores' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.shard_stores', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Allow to shrink an existing index into a new index with fewer primary shards. + * Shrinks an existing index into a new index with fewer primary shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html | Elasticsearch API documentation} */ async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1708,11 +2022,18 @@ export default class Indices { const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}/_shrink/${encodeURIComponent(params.target.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.shrink', + pathParts: { + index: params.index, + target: params.target + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Simulate matching the given index name against the index templates in the system + * Simulate an index. Returns the index configuration that would be applied to the specified index from an existing index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html | Elasticsearch API documentation} */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1734,11 +2055,17 @@ export default class Indices { const method = 'POST' const path = `/_index_template/_simulate_index/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.simulate_index_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Simulate resolving the given template name or body + * Simulate an index template. Returns the index configuration that would be applied by a particular index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html | Elasticsearch API documentation} */ async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1780,11 +2107,17 @@ export default class Indices { method = 'POST' path = '/_index_template/_simulate' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.simulate_template', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Allows you to split an existing index into a new index with more primary shards. + * Splits an existing index into a new index with more primary shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1818,11 +2151,18 @@ export default class Indices { const method = 'PUT' const path = `/${encodeURIComponent(params.index.toString())}/_split/${encodeURIComponent(params.target.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.split', + pathParts: { + index: params.index, + target: params.target + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Provides statistics on operations happening in an index. + * Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1858,11 +2198,18 @@ export default class Indices { method = 'GET' path = '/_stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.stats', + pathParts: { + metric: params.metric, + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. + * Unfreezes an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unfreeze-index-api.html | Elasticsearch API documentation} */ async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1884,11 +2231,17 @@ export default class Indices { const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_unfreeze` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.unfreeze', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates index aliases. + * Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1923,11 +2276,14 @@ export default class Indices { const method = 'POST' const path = '/_aliases' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.update_aliases' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Allows a user to validate a potentially expensive query without executing it. + * Validates a potentially expensive query without executing it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html | Elasticsearch API documentation} */ async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1969,6 +2325,12 @@ export default class Indices { method = body != null ? 'POST' : 'GET' path = '/_validate/query' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'indices.validate_query', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 16dc527a1..ad69cb84a 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -73,7 +74,14 @@ export default class Inference { method = 'DELETE' path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'inference.delete', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -110,11 +118,18 @@ export default class Inference { method = 'GET' path = '/_inference' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'inference.get', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Perform inference + * Perform inference on the service * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html | Elasticsearch API documentation} */ async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -155,11 +170,18 @@ export default class Inference { method = 'POST' path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'inference.inference', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Configure an inference endpoint for use in the Inference API + * Create an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -193,6 +215,13 @@ export default class Inference { method = 'PUT' path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'inference.put', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 3eca9959e..a6ac8226d 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -61,5 +62,8 @@ export default async function InfoApi (this: That, params?: T.InfoRequest | TB.I const method = 'GET' const path = '/' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'info' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 2a877cc79..b332c6279 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,39 @@ export default class Ingest { } /** - * Deletes a pipeline. + * Deletes a geoip database configuration + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} + */ + async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ingest/geoip/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.delete_geoip_database', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes one or more existing ingest pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,11 +99,17 @@ export default class Ingest { const method = 'DELETE' const path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ingest.delete_pipeline', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns statistical information about geoip databases + * Gets download statistics for GeoIP2 databases used with the geoip processor. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-processor.html | Elasticsearch API documentation} */ async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -93,11 +132,53 @@ export default class Ingest { const method = 'GET' const path = '/_ingest/geoip/stats' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ingest.geo_ip_stats' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns geoip database configuration. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} + */ + async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ingest/geoip/database/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ingest/geoip/database' + } + const meta: TransportRequestMetadata = { + name: 'ingest.get_geoip_database', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns a pipeline. + * Returns information about one or more ingest pipelines. This API returns a local reference of the pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -127,11 +208,17 @@ export default class Ingest { method = 'GET' path = '/_ingest/pipeline' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ingest.get_pipeline', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns a list of the built-in patterns. + * Extracts structured fields out of a single text field within a document. You choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html | Elasticsearch API documentation} */ async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -154,11 +241,46 @@ export default class Ingest { const method = 'GET' const path = '/_ingest/processor/grok' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ingest.processor_grok' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Puts the configuration for a geoip database to be downloaded + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} + */ + async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ingest/geoip/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.put_geoip_database', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates or updates a pipeline. + * Creates or updates an ingest pipeline. Changes made using this API take effect immediately. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -192,11 +314,17 @@ export default class Ingest { const method = 'PUT' const path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ingest.put_pipeline', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Allows to simulate a pipeline with example documents. + * Executes an ingest pipeline against a set of provided documents. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html | Elasticsearch API documentation} */ async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -238,6 +366,12 @@ export default class Ingest { method = body != null ? 'POST' : 'GET' path = '/_ingest/pipeline/_simulate' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ingest.simulate', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 460826e43..fe30ca47a 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -72,5 +73,11 @@ export default async function KnnSearchApi (this: That, par const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_knn_search` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'knn_search', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index f1301d25f..9952f9f29 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -67,11 +68,14 @@ export default class License { const method = 'DELETE' const path = '/_license' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'license.delete' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves licensing information for the cluster + * This API returns information about the type of license, when it was issued, and when it expires, for example. For more information about the different types of licenses, see https://www.elastic.co/subscriptions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -94,7 +98,10 @@ export default class License { const method = 'GET' const path = '/_license' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'license.get' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -121,7 +128,10 @@ export default class License { const method = 'GET' const path = '/_license/basic_status' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'license.get_basic_status' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -148,7 +158,10 @@ export default class License { const method = 'GET' const path = '/_license/trial_status' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'license.get_trial_status' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -187,11 +200,14 @@ export default class License { const method = 'PUT' const path = '/_license' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'license.post' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Starts an indefinite basic license. + * The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html | Elasticsearch API documentation} */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -214,11 +230,14 @@ export default class License { const method = 'POST' const path = '/_license/start_basic' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'license.post_start_basic' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * starts a limited time trial license. + * The start trial API enables you to start a 30-day trial, which gives access to all subscription features. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html | Elasticsearch API documentation} */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -241,6 +260,9 @@ export default class License { const method = 'POST' const path = '/_license/start_trial' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'license.post_start_trial' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 9367e308a..f92f8c5c3 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Logstash { } /** - * Deletes Logstash Pipelines used by Central Management + * Deletes a pipeline used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,11 +67,17 @@ export default class Logstash { const method = 'DELETE' const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'logstash.delete_pipeline', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves Logstash Pipelines used by Central Management + * Retrieves pipelines used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-get-pipeline.html | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -100,11 +107,17 @@ export default class Logstash { method = 'GET' path = '/_logstash/pipeline' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'logstash.get_pipeline', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Adds and updates Logstash Pipelines used for Central Management + * Creates or updates a pipeline used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-put-pipeline.html | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -131,6 +144,12 @@ export default class Logstash { const method = 'PUT' const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'logstash.put_pipeline', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index bf6717a24..75ffcaef0 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -80,5 +81,11 @@ export default async function MgetApi (this: That, params?: method = body != null ? 'POST' : 'GET' path = '/_mget' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'mget', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 52b361a5a..48bb46c23 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -74,7 +75,13 @@ export default class Migration { method = 'GET' path = '/_migration/deprecations' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'migration.deprecations', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -101,7 +108,10 @@ export default class Migration { const method = 'GET' const path = '/_migration/system_features' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'migration.get_feature_upgrade_status' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -128,6 +138,9 @@ export default class Migration { const method = 'POST' const path = '/_migration/system_features' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'migration.post_feature_upgrade' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 2646643a9..e0805125a 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Ml { } /** - * Clear the cached results from a trained model deployment + * Clears a trained model deployment cache on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html | Elasticsearch API documentation} */ async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,11 +67,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/cache/_clear` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.clear_trained_model_deployment_cache', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. + * Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-close-job.html | Elasticsearch API documentation} */ async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -104,11 +111,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_close` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.close_job', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes a calendar. + * Removes all scheduled events from a calendar, then deletes it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html | Elasticsearch API documentation} */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -130,7 +143,13 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_calendar', + pathParts: { + calendar_id: params.calendar_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -156,7 +175,14 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events/${encodeURIComponent(params.event_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_calendar_event', + pathParts: { + calendar_id: params.calendar_id, + event_id: params.event_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -182,11 +208,18 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/jobs/${encodeURIComponent(params.job_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_calendar_job', + pathParts: { + calendar_id: params.calendar_id, + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes an existing data frame analytics job. + * Deletes a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html | Elasticsearch API documentation} */ async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -208,7 +241,13 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -234,11 +273,17 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes expired and unused machine learning data. + * Deletes expired and unused machine learning data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-expired-data.html | Elasticsearch API documentation} */ async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -280,11 +325,17 @@ export default class Ml { method = 'DELETE' path = '/_ml/_delete_expired_data' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_expired_data', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes a filter. + * Deletes a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-filter.html | Elasticsearch API documentation} */ async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -306,11 +357,17 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_filter', + pathParts: { + filter_id: params.filter_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes forecasts from a machine learning job. + * Deletes forecasts from a machine learning job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-forecast.html | Elasticsearch API documentation} */ async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -339,11 +396,18 @@ export default class Ml { method = 'DELETE' path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_forecast', + pathParts: { + job_id: params.job_id, + forecast_id: params.forecast_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes an existing anomaly detection job. + * Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-job.html | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -365,11 +429,17 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_job', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes an existing model snapshot. + * Deletes an existing model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-snapshot.html | Elasticsearch API documentation} */ async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -391,7 +461,14 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_model_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -417,11 +494,17 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_trained_model', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Deletes a model alias that refers to the trained model + * Deletes a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models-aliases.html | Elasticsearch API documentation} */ async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -443,11 +526,18 @@ export default class Ml { const method = 'DELETE' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/model_aliases/${encodeURIComponent(params.model_alias.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.delete_trained_model_alias', + pathParts: { + model_alias: params.model_alias, + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Estimates the model memory + * Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-apis.html | Elasticsearch API documentation} */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -482,11 +572,14 @@ export default class Ml { const method = 'POST' const path = '/_ml/anomaly_detectors/_estimate_model_memory' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.estimate_model_memory' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Evaluates the data frame analytics for an annotated index. + * Evaluates the data frame analytics for an annotated index. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/evaluate-dfanalytics.html | Elasticsearch API documentation} */ async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -520,11 +613,14 @@ export default class Ml { const method = 'POST' const path = '/_ml/data_frame/_evaluate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.evaluate_data_frame' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Explains a data frame analytics config. + * Explains a data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/explain-dfanalytics.html | Elasticsearch API documentation} */ async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -566,11 +662,17 @@ export default class Ml { method = body != null ? 'POST' : 'GET' path = '/_ml/data_frame/analytics/_explain' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.explain_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Forces any buffered data to be processed by the job. + * Forces any buffered data to be processed by the job. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-flush-job.html | Elasticsearch API documentation} */ async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -604,11 +706,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_flush` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.flush_job', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Predicts the future behavior of a time series by using its historical behavior. + * Predicts the future behavior of a time series by using its historical behavior. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-forecast.html | Elasticsearch API documentation} */ async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -642,11 +750,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.forecast', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves anomaly detection job results for one or more buckets. + * Retrieves anomaly detection job results for one or more buckets. The API presents a chronological view of the records, grouped by bucket. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-bucket.html | Elasticsearch API documentation} */ async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -687,7 +801,14 @@ export default class Ml { method = body != null ? 'POST' : 'GET' path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/buckets` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_buckets', + pathParts: { + job_id: params.job_id, + timestamp: params.timestamp + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -713,7 +834,13 @@ export default class Ml { const method = 'GET' const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_calendar_events', + pathParts: { + calendar_id: params.calendar_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -759,7 +886,13 @@ export default class Ml { method = body != null ? 'POST' : 'GET' path = '/_ml/calendars' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_calendars', + pathParts: { + calendar_id: params.calendar_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -804,11 +937,18 @@ export default class Ml { method = body != null ? 'POST' : 'GET' path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_categories', + pathParts: { + job_id: params.job_id, + category_id: params.category_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves configuration information for data frame analytics jobs. + * Retrieves configuration information for data frame analytics jobs. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics.html | Elasticsearch API documentation} */ async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -838,7 +978,13 @@ export default class Ml { method = 'GET' path = '/_ml/data_frame/analytics' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -872,11 +1018,17 @@ export default class Ml { method = 'GET' path = '/_ml/data_frame/analytics/_stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_data_frame_analytics_stats', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves usage information for datafeeds. + * Retrieves usage information for datafeeds. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed-stats.html | Elasticsearch API documentation} */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -906,11 +1058,17 @@ export default class Ml { method = 'GET' path = '/_ml/datafeeds/_stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_datafeed_stats', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves configuration information for datafeeds. + * Retrieves configuration information for datafeeds. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed.html | Elasticsearch API documentation} */ async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -940,11 +1098,17 @@ export default class Ml { method = 'GET' path = '/_ml/datafeeds' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_datafeeds', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves filters. + * Retrieves filters. You can get a single filter or all filters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-filter.html | Elasticsearch API documentation} */ async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -974,11 +1138,17 @@ export default class Ml { method = 'GET' path = '/_ml/filters' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_filters', + pathParts: { + filter_id: params.filter_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves anomaly detection job results for one or more influencers. + * Retrieves anomaly detection job results for one or more influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-influencer.html | Elasticsearch API documentation} */ async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1012,7 +1182,13 @@ export default class Ml { const method = body != null ? 'POST' : 'GET' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/influencers` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_influencers', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1046,11 +1222,17 @@ export default class Ml { method = 'GET' path = '/_ml/anomaly_detectors/_stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_job_stats', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves configuration information for anomaly detection jobs. + * Retrieves configuration information for anomaly detection jobs. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job.html | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1080,11 +1262,17 @@ export default class Ml { method = 'GET' path = '/_ml/anomaly_detectors' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_jobs', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information on how ML is using memory. + * Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-memory.html | Elasticsearch API documentation} */ async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1114,11 +1302,17 @@ export default class Ml { method = 'GET' path = '/_ml/memory/_stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_memory_stats', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Gets stats for anomaly detection job model snapshot upgrades that are in progress. + * Retrieves usage information for anomaly detection job model snapshot upgrades. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-model-snapshot-upgrade-stats.html | Elasticsearch API documentation} */ async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1140,7 +1334,14 @@ export default class Ml { const method = 'GET' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_upgrade/_stats` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_model_snapshot_upgrade_stats', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1185,11 +1386,18 @@ export default class Ml { method = body != null ? 'POST' : 'GET' path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_model_snapshots', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. + * Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-overall-buckets.html | Elasticsearch API documentation} */ async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1223,11 +1431,17 @@ export default class Ml { const method = body != null ? 'POST' : 'GET' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/overall_buckets` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_overall_buckets', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves anomaly records for an anomaly detection job. + * Retrieves anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-record.html | Elasticsearch API documentation} */ async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1261,11 +1475,17 @@ export default class Ml { const method = body != null ? 'POST' : 'GET' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/records` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_records', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves configuration information for a trained inference model. + * Retrieves configuration information for a trained model. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models.html | Elasticsearch API documentation} */ async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1295,11 +1515,17 @@ export default class Ml { method = 'GET' path = '/_ml/trained_models' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_trained_models', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves usage information for trained inference models. + * Retrieves usage information for trained models. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models-stats.html | Elasticsearch API documentation} */ async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1329,11 +1555,17 @@ export default class Ml { method = 'GET' path = '/_ml/trained_models/_stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.get_trained_models_stats', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Evaluate a trained model. + * Evaluates a trained model. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html | Elasticsearch API documentation} */ async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1367,11 +1599,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/_infer` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.infer_trained_model', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns defaults and limits used by machine learning. + * Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1394,11 +1632,14 @@ export default class Ml { const method = 'GET' const path = '/_ml/info' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.info' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Opens one or more anomaly detection jobs. + * Open anomaly detection jobs. An anomaly detection job must be opened in order for it to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-open-job.html | Elasticsearch API documentation} */ async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1432,11 +1673,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_open` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.open_job', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Posts scheduled events in a calendar. + * Adds scheduled events to a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-calendar-event.html | Elasticsearch API documentation} */ async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1470,11 +1717,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.post_calendar_events', + pathParts: { + calendar_id: params.calendar_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Sends data to an anomaly detection job for analysis. + * Sends data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-data.html | Elasticsearch API documentation} */ async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1501,11 +1754,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_data` - return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.post_data', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } /** - * Previews that will be analyzed given a data frame analytics config. + * Previews the extracted features used by a data frame analytics config. * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/preview-dfanalytics.html | Elasticsearch API documentation} */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1547,11 +1806,17 @@ export default class Ml { method = body != null ? 'POST' : 'GET' path = '/_ml/data_frame/analytics/_preview' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.preview_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Previews a datafeed. + * Previews a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-preview-datafeed.html | Elasticsearch API documentation} */ async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -1593,11 +1858,17 @@ export default class Ml { method = body != null ? 'POST' : 'GET' path = '/_ml/datafeeds/_preview' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.preview_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Instantiates a calendar. + * Creates a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar.html | Elasticsearch API documentation} */ async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1631,7 +1902,13 @@ export default class Ml { const method = 'PUT' const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_calendar', + pathParts: { + calendar_id: params.calendar_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1657,11 +1934,18 @@ export default class Ml { const method = 'PUT' const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/jobs/${encodeURIComponent(params.job_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_calendar_job', + pathParts: { + calendar_id: params.calendar_id, + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Instantiates a data frame analytics job. + * Instantiates a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1695,11 +1979,17 @@ export default class Ml { const method = 'PUT' const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Instantiates a datafeed. + * Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1733,11 +2023,17 @@ export default class Ml { const method = 'PUT' const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Instantiates a filter. + * Instantiates a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-filter.html | Elasticsearch API documentation} */ async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1771,11 +2067,17 @@ export default class Ml { const method = 'PUT' const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_filter', + pathParts: { + filter_id: params.filter_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Instantiates an anomaly detection job. + * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-job.html | Elasticsearch API documentation} */ async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1809,11 +2111,17 @@ export default class Ml { const method = 'PUT' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_job', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates an inference trained model. + * Enables you to supply a trained model that is not created by data frame analytics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models.html | Elasticsearch API documentation} */ async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1847,11 +2155,17 @@ export default class Ml { const method = 'PUT' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates a new model alias (or reassigns an existing one) to refer to the trained model + * Creates or updates a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models-aliases.html | Elasticsearch API documentation} */ async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1873,11 +2187,18 @@ export default class Ml { const method = 'PUT' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/model_aliases/${encodeURIComponent(params.model_alias.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model_alias', + pathParts: { + model_alias: params.model_alias, + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates part of a trained model definition + * Creates part of a trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-definition-part.html | Elasticsearch API documentation} */ async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1911,11 +2232,18 @@ export default class Ml { const method = 'PUT' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/definition/${encodeURIComponent(params.part.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model_definition_part', + pathParts: { + model_id: params.model_id, + part: params.part + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates a trained model vocabulary + * Creates a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-vocabulary.html | Elasticsearch API documentation} */ async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1949,11 +2277,17 @@ export default class Ml { const method = 'PUT' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/vocabulary` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model_vocabulary', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Resets an existing anomaly detection job. + * Resets an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-reset-job.html | Elasticsearch API documentation} */ async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1975,11 +2309,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_reset` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.reset_job', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Reverts to a specific snapshot. + * Reverts to a specific snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-revert-snapshot.html | Elasticsearch API documentation} */ async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2013,11 +2353,18 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_revert` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.revert_model_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. + * Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-set-upgrade-mode.html | Elasticsearch API documentation} */ async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2040,11 +2387,14 @@ export default class Ml { const method = 'POST' const path = '/_ml/set_upgrade_mode' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.set_upgrade_mode' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Starts a data frame analytics job. + * Starts a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-dfanalytics.html | Elasticsearch API documentation} */ async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2066,11 +2416,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_start` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.start_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Starts one or more datafeeds. + * Starts one or more datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-start-datafeed.html | Elasticsearch API documentation} */ async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2104,11 +2460,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_start` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.start_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Start a trained model deployment. + * Starts a trained model deployment, which allocates the model to every machine learning node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html | Elasticsearch API documentation} */ async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2130,11 +2492,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_start` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.start_trained_model_deployment', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Stops one or more data frame analytics jobs. + * Stops one or more data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-dfanalytics.html | Elasticsearch API documentation} */ async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2156,11 +2524,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_stop` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.stop_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Stops one or more datafeeds. + * Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-stop-datafeed.html | Elasticsearch API documentation} */ async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2194,11 +2568,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_stop` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.stop_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Stop a trained model deployment. + * Stops a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html | Elasticsearch API documentation} */ async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2220,11 +2600,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_stop` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.stop_trained_model_deployment', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates certain properties of a data frame analytics job. + * Updates an existing data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-dfanalytics.html | Elasticsearch API documentation} */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2258,11 +2644,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_update` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.update_data_frame_analytics', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates certain properties of a datafeed. + * Updates the properties of a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-datafeed.html | Elasticsearch API documentation} */ async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2296,11 +2688,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_update` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.update_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates the description of a filter, adds items, or removes items. + * Updates the description of a filter, adds items, or removes items from the list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-filter.html | Elasticsearch API documentation} */ async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2334,7 +2732,13 @@ export default class Ml { const method = 'POST' const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}/_update` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.update_filter', + pathParts: { + filter_id: params.filter_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -2372,7 +2776,13 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_update` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.update_job', + pathParts: { + job_id: params.job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -2410,11 +2820,18 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_update` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.update_model_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates certain properties of trained model deployment. + * Starts a trained model deployment, which allocates the model to every machine learning node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation} */ async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2448,11 +2865,17 @@ export default class Ml { const method = 'POST' const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_update` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.update_trained_model_deployment', + pathParts: { + model_id: params.model_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Upgrades a given job snapshot to the current major version. + * Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation} */ async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2474,7 +2897,14 @@ export default class Ml { const method = 'POST' const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_upgrade` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.upgrade_job_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -2513,7 +2943,10 @@ export default class Ml { const method = 'POST' const path = '/_ml/anomaly_detectors/_validate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.validate' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -2544,6 +2977,9 @@ export default class Ml { const method = 'POST' const path = '/_ml/anomaly_detectors/_validate/detector' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ml.validate_detector' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 86eff3cb7..f58cf06af 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -71,6 +72,12 @@ export default class Monitoring { const method = 'POST' const path = '/_monitoring/bulk' - return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + const meta: TransportRequestMetadata = { + name: 'monitoring.bulk', + pathParts: { + type: params.type + } + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index b799c5462..5d5fbc912 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -72,5 +73,11 @@ export default async function MsearchApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -72,5 +73,11 @@ export default async function MsearchTemplateApi @@ -66,11 +67,18 @@ export default class Nodes { const method = 'DELETE' const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/_repositories_metering/${encodeURIComponent(params.max_archive_version.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'nodes.clear_repositories_metering_archive', + pathParts: { + node_id: params.node_id, + max_archive_version: params.max_archive_version + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns cluster repositories metering information. + * You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-repositories-metering-api.html | Elasticsearch API documentation} */ async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -92,11 +100,17 @@ export default class Nodes { const method = 'GET' const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/_repositories_metering` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'nodes.get_repositories_metering_info', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about hot threads on each node in the cluster. + * This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html | Elasticsearch API documentation} */ async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -126,11 +140,17 @@ export default class Nodes { method = 'GET' path = '/_nodes/hot_threads' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'nodes.hot_threads', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns information about nodes in the cluster. + * Returns cluster nodes information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -166,11 +186,18 @@ export default class Nodes { method = 'GET' path = '/_nodes' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'nodes.info', + pathParts: { + node_id: params.node_id, + metric: params.metric + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Reloads secure settings. + * Reloads the keystore on nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation} */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -212,11 +239,17 @@ export default class Nodes { method = 'POST' path = '/_nodes/reload_secure_settings' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'nodes.reload_secure_settings', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns statistical information about nodes in the cluster. + * Returns cluster nodes statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -258,11 +291,19 @@ export default class Nodes { method = 'GET' path = '/_nodes/stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'nodes.stats', + pathParts: { + node_id: params.node_id, + metric: params.metric, + index_metric: params.index_metric + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns low-level information about REST actions usage on nodes. + * Returns information on the usage of features. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -298,6 +339,13 @@ export default class Nodes { method = 'GET' path = '/_nodes/usage' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'nodes.usage', + pathParts: { + node_id: params.node_id, + metric: params.metric + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index dbe2fb182..f0fdd689c 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Open a point in time that can be used in subsequent searches + * A search request by default executes against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -60,5 +61,11 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_pit` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'open_point_in_time', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index b91a22fd3..e0a8011df 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -61,5 +62,8 @@ export default async function PingApi (this: That, params?: T.PingRequest | TB.P const method = 'HEAD' const path = '/' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ping' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts new file mode 100644 index 000000000..0d729387c --- /dev/null +++ b/src/api/api/profiling.ts @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Profiling { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Extracts a UI-optimized structure to render flamegraphs from Universal Profiling. + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_profiling/flamegraph' + const meta: TransportRequestMetadata = { + name: 'profiling.flamegraph' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Extracts raw stacktrace information from Universal Profiling. + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_profiling/stacktraces' + const meta: TransportRequestMetadata = { + name: 'profiling.stacktraces' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns basic information about the status of Universal Profiling. + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_profiling/status' + const meta: TransportRequestMetadata = { + name: 'profiling.status' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Extracts a list of topN functions from Universal Profiling. + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_profiling/topn/functions' + const meta: TransportRequestMetadata = { + name: 'profiling.topn_functions' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 36ea03bd2..e7dbfc87e 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Creates or updates a script. + * Creates or updates a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -79,5 +80,12 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque method = 'PUT' path = `/_scripts/${encodeURIComponent(params.id.toString())}` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'put_script', + pathParts: { + id: params.id, + context: params.context + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts new file mode 100644 index 000000000..1a090d5ff --- /dev/null +++ b/src/api/api/query_rules.ts @@ -0,0 +1,295 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class QueryRules { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Deletes a query rule within a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-rule.html | Elasticsearch API documentation} + */ + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_rule/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.delete_rule', + pathParts: { + ruleset_id: params.ruleset_id, + rule_id: params.rule_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html | Elasticsearch API documentation} + */ + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.delete_ruleset', + pathParts: { + ruleset_id: params.ruleset_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns the details about a query rule within a query ruleset + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-rule.html | Elasticsearch API documentation} + */ + async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_rule/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.get_rule', + pathParts: { + ruleset_id: params.ruleset_id, + rule_id: params.rule_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns the details about a query ruleset + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html | Elasticsearch API documentation} + */ + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.get_ruleset', + pathParts: { + ruleset_id: params.ruleset_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns summarized information about existing query rulesets. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html | Elasticsearch API documentation} + */ + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_query_rules' + const meta: TransportRequestMetadata = { + name: 'query_rules.list_rulesets' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates or updates a query rule within a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-rule.html | Elasticsearch API documentation} + */ + async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_rule/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.put_rule', + pathParts: { + ruleset_id: params.ruleset_id, + rule_id: params.rule_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates or updates a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html | Elasticsearch API documentation} + */ + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id'] + const acceptedBody: string[] = ['rules'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.put_ruleset', + pathParts: { + ruleset_id: params.ruleset_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/query_ruleset.ts b/src/api/api/query_ruleset.ts deleted file mode 100644 index 771205b0e..000000000 --- a/src/api/api/query_ruleset.ts +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint-disable import/export */ -/* eslint-disable @typescript-eslint/no-misused-new */ -/* eslint-disable @typescript-eslint/no-extraneous-class */ -/* eslint-disable @typescript-eslint/no-unused-vars */ - -// This file was automatically generated by elastic/elastic-client-generator-js -// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, -// and elastic/elastic-client-generator-js to regenerate this file again. - -import { - Transport, - TransportRequestOptions, - TransportRequestOptionsWithMeta, - TransportRequestOptionsWithOutMeta, - TransportResult -} from '@elastic/transport' -import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } - -export default class QueryRuleset { - transport: Transport - constructor (transport: Transport) { - this.transport = transport - } - - /** - * Deletes a query ruleset. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html | Elasticsearch API documentation} - */ - async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.QueryRulesetDeleteRequest | TB.QueryRulesetDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const querystring: Record = {} - const body = undefined - - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'DELETE' - const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) - } - - /** - * Returns the details about a query ruleset. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html | Elasticsearch API documentation} - */ - async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.QueryRulesetGetRequest | TB.QueryRulesetGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const querystring: Record = {} - const body = undefined - - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'GET' - const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) - } - - /** - * Lists query rulesets. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html | Elasticsearch API documentation} - */ - async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptions): Promise - async list (this: That, params?: T.QueryRulesetListRequest | TB.QueryRulesetListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined - - params = params ?? {} - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'GET' - const path = '/_query_rules' - return await this.transport.request({ path, method, querystring, body }, options) - } - - /** - * Creates or updates a query ruleset. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html | Elasticsearch API documentation} - */ - async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptions): Promise - async put (this: That, params: T.QueryRulesetPutRequest | TB.QueryRulesetPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['rules'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } - - for (const key in params) { - if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } else if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'PUT' - const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) - } -} diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 5e0e1c263..010a984a6 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to evaluate the quality of ranked search results over a set of typical search queries + * Enables you to evaluate the quality of ranked search results over a set of typical search queries. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html | Elasticsearch API documentation} */ export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -79,5 +80,11 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest method = body != null ? 'POST' : 'GET' path = '/_rank_eval' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rank_eval', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 17790d754..91c6e14d3 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -72,5 +73,8 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest | const method = 'POST' const path = '/_reindex' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'reindex' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 3c12d0b7e..4fedb4ed3 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Changes the number of requests per second for a particular Reindex operation. + * Copies documents from a source to a destination. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -60,5 +61,11 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde const method = 'POST' const path = `/_reindex/${encodeURIComponent(params.task_id.toString())}/_rethrottle` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'reindex_rethrottle', + pathParts: { + task_id: params.task_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index ef14b738c..cd31ab4c8 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to use the Mustache language to pre-render a search definition. + * Renders a search template as a search request body. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/render-search-template-api.html | Elasticsearch API documentation} */ export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -80,5 +81,11 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re method = body != null ? 'POST' : 'GET' path = '/_render/template' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'render_search_template', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 26c5d5759..d9aad8fd7 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class Rollup { const method = 'DELETE' const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.delete_job', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -100,7 +107,13 @@ export default class Rollup { method = 'GET' path = '/_rollup/job' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.get_jobs', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -134,11 +147,17 @@ export default class Rollup { method = 'GET' path = '/_rollup/data' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.get_rollup_caps', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). + * Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html | Elasticsearch API documentation} */ async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -160,7 +179,13 @@ export default class Rollup { const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_rollup/data` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.get_rollup_index_caps', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -198,11 +223,17 @@ export default class Rollup { const method = 'PUT' const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.put_job', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Enables searching rolled-up data using the standard query DSL. + * Enables searching rolled-up data using the standard Query DSL. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -236,7 +267,13 @@ export default class Rollup { const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_rollup_search` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.rollup_search', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -262,7 +299,13 @@ export default class Rollup { const method = 'POST' const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}/_start` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.start_job', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -288,6 +331,12 @@ export default class Rollup { const method = 'POST' const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}/_stop` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'rollup.stop_job', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index e27a59952..33b66e3bd 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows an arbitrary script to be executed and a result to be returned + * Runs a script and returns a result. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html | Elasticsearch API documentation} */ export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -73,5 +74,8 @@ export default async function ScriptsPainlessExecuteApi (this const method = body != null ? 'POST' : 'GET' const path = '/_scripts/painless/_execute' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'scripts_painless_execute' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 77e291799..13b86d8ee 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -72,5 +73,11 @@ export default async function ScrollApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -86,5 +87,11 @@ export default async function SearchApi @@ -118,7 +131,13 @@ export default class SearchApplication { const method = 'GET' const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.get', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -152,7 +171,13 @@ export default class SearchApplication { method = 'GET' path = '/_application/analytics' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.get_behavioral_analytics', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -179,7 +204,10 @@ export default class SearchApplication { const method = 'GET' const path = '/_application/search_application' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.list' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -205,7 +233,14 @@ export default class SearchApplication { const method = 'POST' const path = `/_application/analytics/${encodeURIComponent(params.collection_name.toString())}/event/${encodeURIComponent(params.event_type.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.post_behavioral_analytics_event', + pathParts: { + collection_name: params.collection_name, + event_type: params.event_type + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -236,7 +271,13 @@ export default class SearchApplication { const method = 'PUT' const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.put', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -262,7 +303,13 @@ export default class SearchApplication { const method = 'PUT' const path = `/_application/analytics/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.put_behavioral_analytics', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -288,11 +335,17 @@ export default class SearchApplication { const method = 'POST' const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}/_render_query` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.render_query', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Perform a search against a search application + * Perform a search against a search application. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html | Elasticsearch API documentation} */ async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -326,6 +379,12 @@ export default class SearchApplication { const method = body != null ? 'POST' : 'GET' const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}/_search` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_application.search', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 5d3fbdbef..e81f4b2da 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -72,5 +73,15 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_mvt/${encodeURIComponent(params.field.toString())}/${encodeURIComponent(params.zoom.toString())}/${encodeURIComponent(params.x.toString())}/${encodeURIComponent(params.y.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_mvt', + pathParts: { + index: params.index, + field: params.field, + zoom: params.zoom, + x: params.x, + y: params.y + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index eaf609e73..c9b2b299d 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -68,5 +69,11 @@ export default async function SearchShardsApi (this: That, params?: T.SearchShar method = body != null ? 'POST' : 'GET' path = '/_search_shards' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_shards', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index f8a5c3548..a158ad55a 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -38,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to use the Mustache language to pre-render a search definition. + * Runs a search with a search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-template.html | Elasticsearch API documentation} */ export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -80,5 +81,11 @@ export default async function SearchTemplateApi (this: That method = body != null ? 'POST' : 'GET' path = '/_search/template' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'search_template', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 73b6853bb..90ac0b9e2 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -74,7 +75,13 @@ export default class SearchableSnapshots { method = 'GET' path = '/_searchable_snapshots/cache/stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.cache_stats', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -108,7 +115,13 @@ export default class SearchableSnapshots { method = 'POST' path = '/_searchable_snapshots/cache/clear' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.clear_cache', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -146,7 +159,14 @@ export default class SearchableSnapshots { const method = 'POST' const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_mount` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.mount', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -180,6 +200,12 @@ export default class SearchableSnapshots { method = 'GET' path = '/_searchable_snapshots/stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.stats', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 0d2d613bc..a810764df 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Security { } /** - * Creates or updates the user profile on behalf of another user. + * Creates or updates a user profile on behalf of another user. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html | Elasticsearch API documentation} */ async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -78,11 +79,14 @@ export default class Security { const method = 'POST' const path = '/_security/profile/_activate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.activate_user_profile' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Enables authentication as a user and retrieve information about the authenticated user. + * Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user. A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-authenticate.html | Elasticsearch API documentation} */ async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -105,7 +109,92 @@ export default class Security { const method = 'GET' const path = '/_security/_authenticate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.authenticate' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-delete-role.html | Elasticsearch API documentation} + */ + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['names'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_security/role' + const meta: TransportRequestMetadata = { + name: 'security.bulk_delete_role' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-put-role.html | Elasticsearch API documentation} + */ + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['roles'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/role' + const meta: TransportRequestMetadata = { + name: 'security.bulk_put_role' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -131,7 +220,10 @@ export default class Security { const method = 'POST' const path = '/_security/api_key/_bulk_update' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.bulk_update_api_keys' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -177,11 +269,17 @@ export default class Security { method = 'PUT' path = '/_security/user/_password' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.change_password', + pathParts: { + username: params.username + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Clear a subset or all entries from the API key cache. + * Evicts a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-api-key-cache.html | Elasticsearch API documentation} */ async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -203,7 +301,13 @@ export default class Security { const method = 'POST' const path = `/_security/api_key/${encodeURIComponent(params.ids.toString())}/_clear_cache` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.clear_api_key_cache', + pathParts: { + ids: params.ids + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -229,7 +333,13 @@ export default class Security { const method = 'POST' const path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/_clear_cache` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_privileges', + pathParts: { + application: params.application + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -255,7 +365,13 @@ export default class Security { const method = 'POST' const path = `/_security/realm/${encodeURIComponent(params.realms.toString())}/_clear_cache` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_realms', + pathParts: { + realms: params.realms + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -281,7 +397,13 @@ export default class Security { const method = 'POST' const path = `/_security/role/${encodeURIComponent(params.name.toString())}/_clear_cache` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_roles', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -307,11 +429,19 @@ export default class Security { const method = 'POST' const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}/_clear_cache` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_service_tokens', + pathParts: { + namespace: params.namespace, + service: params.service, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates an API key for access without requiring basic authentication. + * Creates an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-api-key.html | Elasticsearch API documentation} */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -346,7 +476,10 @@ export default class Security { const method = 'PUT' const path = '/_security/api_key' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.create_api_key' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -372,11 +505,14 @@ export default class Security { const method = 'POST' const path = '/_security/cross_cluster/api_key' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.create_cross_cluster_api_key' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates a service account token for access without requiring basic authentication. + * Creates a service accounts token for access without requiring basic authentication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-service-token.html | Elasticsearch API documentation} */ async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -405,7 +541,15 @@ export default class Security { method = 'POST' path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.create_service_token', + pathParts: { + namespace: params.namespace, + service: params.service, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -431,7 +575,14 @@ export default class Security { const method = 'DELETE' const path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.delete_privileges', + pathParts: { + application: params.application, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -457,7 +608,13 @@ export default class Security { const method = 'DELETE' const path = `/_security/role/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.delete_role', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -483,7 +640,13 @@ export default class Security { const method = 'DELETE' const path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.delete_role_mapping', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -509,7 +672,15 @@ export default class Security { const method = 'DELETE' const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.delete_service_token', + pathParts: { + namespace: params.namespace, + service: params.service, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -535,7 +706,13 @@ export default class Security { const method = 'DELETE' const path = `/_security/user/${encodeURIComponent(params.username.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.delete_user', + pathParts: { + username: params.username + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -561,7 +738,13 @@ export default class Security { const method = 'PUT' const path = `/_security/user/${encodeURIComponent(params.username.toString())}/_disable` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.disable_user', + pathParts: { + username: params.username + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -587,7 +770,13 @@ export default class Security { const method = 'PUT' const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_disable` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.disable_user_profile', + pathParts: { + uid: params.uid + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -613,7 +802,13 @@ export default class Security { const method = 'PUT' const path = `/_security/user/${encodeURIComponent(params.username.toString())}/_enable` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.enable_user', + pathParts: { + username: params.username + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -639,11 +834,17 @@ export default class Security { const method = 'PUT' const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_enable` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.enable_user_profile', + pathParts: { + uid: params.uid + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. + * Enables a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html | Elasticsearch API documentation} */ async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -666,11 +867,14 @@ export default class Security { const method = 'GET' const path = '/_security/enroll/kibana' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.enroll_kibana' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Allows a new node to enroll to an existing cluster with security enabled. + * Allows a new node to join an existing cluster with security features enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html | Elasticsearch API documentation} */ async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -693,11 +897,14 @@ export default class Security { const method = 'GET' const path = '/_security/enroll/node' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.enroll_node' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves information for one or more API keys. + * Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-api-key.html | Elasticsearch API documentation} */ async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -720,7 +927,10 @@ export default class Security { const method = 'GET' const path = '/_security/api_key' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_api_key' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -747,7 +957,10 @@ export default class Security { const method = 'GET' const path = '/_security/privilege/_builtin' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_builtin_privileges' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -784,11 +997,18 @@ export default class Security { method = 'GET' path = '/_security/privilege' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_privileges', + pathParts: { + application: params.application, + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves roles in the native realm. + * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role.html | Elasticsearch API documentation} */ async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -818,7 +1038,13 @@ export default class Security { method = 'GET' path = '/_security/role' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_role', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -852,11 +1078,17 @@ export default class Security { method = 'GET' path = '/_security/role_mapping' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_role_mapping', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves information about service accounts. + * This API returns a list of service accounts that match the provided path parameter(s). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-accounts.html | Elasticsearch API documentation} */ async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -889,7 +1121,14 @@ export default class Security { method = 'GET' path = '/_security/service' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_service_accounts', + pathParts: { + namespace: params.namespace, + service: params.service + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -915,7 +1154,14 @@ export default class Security { const method = 'GET' const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_service_credentials', + pathParts: { + namespace: params.namespace, + service: params.service + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -941,7 +1187,10 @@ export default class Security { const method = 'GET' const path = '/_security/settings' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_settings' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -980,7 +1229,10 @@ export default class Security { const method = 'POST' const path = '/_security/oauth2/token' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_token' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1014,7 +1266,13 @@ export default class Security { method = 'GET' path = '/_security/user' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_user', + pathParts: { + username: params.username + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1041,11 +1299,14 @@ export default class Security { const method = 'GET' const path = '/_security/user/_privileges' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_user_privileges' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves user profiles for the given unique ID(s). + * Retrieves a user's profile using the unique profile ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html | Elasticsearch API documentation} */ async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1067,11 +1328,17 @@ export default class Security { const method = 'GET' const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.get_user_profile', + pathParts: { + uid: params.uid + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates an API key on behalf of another user. + * Creates an API key on behalf of another user. This API is similar to Create API keys, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-grant-api-key.html | Elasticsearch API documentation} */ async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1105,7 +1372,10 @@ export default class Security { const method = 'POST' const path = '/_security/api_key/grant' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.grant_api_key' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1151,7 +1421,13 @@ export default class Security { method = body != null ? 'POST' : 'GET' path = '/_security/user/_has_privileges' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.has_privileges', + pathParts: { + user: params.user + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1189,11 +1465,14 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/profile/_has_privileges' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.has_privileges_user_profile' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Invalidates one or more API keys. + * Invalidates one or more API keys. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-api-key.html | Elasticsearch API documentation} */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1228,7 +1507,10 @@ export default class Security { const method = 'DELETE' const path = '/_security/api_key' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.invalidate_api_key' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1267,7 +1549,10 @@ export default class Security { const method = 'DELETE' const path = '/_security/oauth2/token' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.invalidate_token' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1293,7 +1578,10 @@ export default class Security { const method = 'POST' const path = '/_security/oidc/authenticate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.oidc_authenticate' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1319,7 +1607,10 @@ export default class Security { const method = 'POST' const path = '/_security/oidc/logout' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.oidc_logout' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1345,7 +1636,10 @@ export default class Security { const method = 'POST' const path = '/_security/oidc/prepare' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.oidc_prepare_authentication' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1376,11 +1670,14 @@ export default class Security { const method = 'PUT' const path = '/_security/privilege' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.put_privileges' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Adds and updates roles in the native realm. + * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role.html | Elasticsearch API documentation} */ async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1388,7 +1685,7 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'transient_metadata'] + const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'description', 'transient_metadata'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1414,7 +1711,13 @@ export default class Security { const method = 'PUT' const path = `/_security/role/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.put_role', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1452,7 +1755,13 @@ export default class Security { const method = 'PUT' const path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.put_role_mapping', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1490,11 +1799,17 @@ export default class Security { const method = 'PUT' const path = `/_security/user/${encodeURIComponent(params.username.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.put_user', + pathParts: { + username: params.username + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves information for API keys using a subset of query DSL + * Retrieves information for API keys in a paginated manner. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-api-key.html | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1529,11 +1844,98 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/_query/api_key' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.query_api_keys' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves roles in a paginated manner. You can optionally filter the results with a query. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-role.html | Elasticsearch API documentation} + */ + async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/_query/role' + const meta: TransportRequestMetadata = { + name: 'security.query_role' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair + * Retrieves information for Users in a paginated manner. You can optionally filter the results with a query. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-user.html | Elasticsearch API documentation} + */ + async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/_query/user' + const meta: TransportRequestMetadata = { + name: 'security.query_user' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Submits a SAML Response message to Elasticsearch for consumption. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-authenticate.html | Elasticsearch API documentation} */ async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1567,11 +1969,14 @@ export default class Security { const method = 'POST' const path = '/_security/saml/authenticate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.saml_authenticate' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Verifies the logout response sent from the SAML IdP + * Verifies the logout response sent from the SAML IdP. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-complete-logout.html | Elasticsearch API documentation} */ async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1605,11 +2010,14 @@ export default class Security { const method = 'POST' const path = '/_security/saml/complete_logout' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.saml_complete_logout' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Consumes a SAML LogoutRequest + * Submits a SAML LogoutRequest message to Elasticsearch for consumption. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-invalidate.html | Elasticsearch API documentation} */ async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1643,11 +2051,14 @@ export default class Security { const method = 'POST' const path = '/_security/saml/invalidate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.saml_invalidate' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Invalidates an access token and a refresh token that were generated via the SAML Authenticate API + * Submits a request to invalidate an access token and refresh token. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-logout.html | Elasticsearch API documentation} */ async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1681,11 +2092,14 @@ export default class Security { const method = 'POST' const path = '/_security/saml/logout' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.saml_logout' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates a SAML authentication request + * Creates a SAML authentication request () as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-prepare-authentication.html | Elasticsearch API documentation} */ async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1720,11 +2134,14 @@ export default class Security { const method = 'POST' const path = '/_security/saml/prepare' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.saml_prepare_authentication' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider + * Generate SAML metadata for a SAML 2.0 Service Provider. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-sp-metadata.html | Elasticsearch API documentation} */ async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1746,7 +2163,13 @@ export default class Security { const method = 'GET' const path = `/_security/saml/metadata/${encodeURIComponent(params.realm_name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.saml_service_provider_metadata', + pathParts: { + realm_name: params.realm_name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1785,11 +2208,14 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/profile/_suggest' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.suggest_user_profiles' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates attributes of an existing API key. + * Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. This API supports updates to an API key’s access scope and metadata. The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-api-key.html | Elasticsearch API documentation} */ async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1823,7 +2249,13 @@ export default class Security { const method = 'PUT' const path = `/_security/api_key/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.update_api_key', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1849,7 +2281,13 @@ export default class Security { const method = 'PUT' const path = `/_security/cross_cluster/api_key/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.update_cross_cluster_api_key', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -1875,11 +2313,14 @@ export default class Security { const method = 'PUT' const path = '/_security/settings' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.update_settings' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Update application specific data for the user profile of the given unique ID. + * Updates specific data for the user profile that's associated with the specified unique ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html | Elasticsearch API documentation} */ async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1913,6 +2354,12 @@ export default class Security { const method = 'PUT' const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_data` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'security.update_user_profile_data', + pathParts: { + uid: params.uid + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 3fc62d09a..cf83485f3 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class Shutdown { const method = 'DELETE' const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'shutdown.delete_node', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -100,7 +107,13 @@ export default class Shutdown { method = 'GET' path = '/_nodes/shutdown' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'shutdown.get_node', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -138,6 +151,12 @@ export default class Shutdown { const method = 'PUT' const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'shutdown.put_node', + pathParts: { + node_id: params.node_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts new file mode 100644 index 000000000..a5a76325c --- /dev/null +++ b/src/api/api/simulate.ts @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Simulate { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Simulates running ingest with example documents. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html | Elasticsearch API documentation} + */ + async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ingest/${encodeURIComponent(params.index.toString())}/_simulate` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ingest/_simulate' + } + const meta: TransportRequestMetadata = { + name: 'simulate.ingest', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index c84752269..16f0913c8 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class Slm { const method = 'DELETE' const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.delete_lifecycle', + pathParts: { + policy_id: params.policy_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -92,7 +99,13 @@ export default class Slm { const method = 'PUT' const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}/_execute` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.execute_lifecycle', + pathParts: { + policy_id: params.policy_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -119,7 +132,10 @@ export default class Slm { const method = 'POST' const path = '/_slm/_execute_retention' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.execute_retention' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -153,7 +169,13 @@ export default class Slm { method = 'GET' path = '/_slm/policy' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.get_lifecycle', + pathParts: { + policy_id: params.policy_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -180,7 +202,10 @@ export default class Slm { const method = 'GET' const path = '/_slm/stats' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.get_stats' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -207,7 +232,10 @@ export default class Slm { const method = 'GET' const path = '/_slm/status' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.get_status' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -245,7 +273,13 @@ export default class Slm { const method = 'PUT' const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.put_lifecycle', + pathParts: { + policy_id: params.policy_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -272,7 +306,10 @@ export default class Slm { const method = 'POST' const path = '/_slm/start' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.start' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -299,6 +336,9 @@ export default class Slm { const method = 'POST' const path = '/_slm/stop' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'slm.stop' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index f6a5d3923..cd0f53dcf 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Snapshot { } /** - * Removes stale data from repository. + * Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html | Elasticsearch API documentation} */ async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,7 +67,13 @@ export default class Snapshot { const method = 'POST' const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_cleanup` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.cleanup_repository', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -104,7 +111,15 @@ export default class Snapshot { const method = 'PUT' const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_clone/${encodeURIComponent(params.target_snapshot.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.clone', + pathParts: { + repository: params.repository, + snapshot: params.snapshot, + target_snapshot: params.target_snapshot + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -142,7 +157,14 @@ export default class Snapshot { const method = 'PUT' const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.create', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -173,7 +195,13 @@ export default class Snapshot { const method = 'PUT' const path = `/_snapshot/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.create_repository', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -199,7 +227,14 @@ export default class Snapshot { const method = 'DELETE' const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.delete', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -225,7 +260,13 @@ export default class Snapshot { const method = 'DELETE' const path = `/_snapshot/${encodeURIComponent(params.name.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.delete_repository', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -251,7 +292,14 @@ export default class Snapshot { const method = 'GET' const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.get', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -285,7 +333,13 @@ export default class Snapshot { method = 'GET' path = '/_snapshot' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.get_repository', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -311,7 +365,13 @@ export default class Snapshot { const method = 'POST' const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_analyze` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.repository_analyze', + pathParts: { + repository: params.repository + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -349,7 +409,14 @@ export default class Snapshot { const method = 'POST' const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_restore` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.restore', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -386,7 +453,14 @@ export default class Snapshot { method = 'GET' path = '/_snapshot/_status' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.status', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -412,6 +486,12 @@ export default class Snapshot { const method = 'POST' const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_verify` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'snapshot.verify_repository', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 05e597545..fbd30e803 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -78,7 +79,10 @@ export default class Sql { const method = 'POST' const path = '/_sql/close' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'sql.clear_cursor' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -104,7 +108,13 @@ export default class Sql { const method = 'DELETE' const path = `/_sql/async/delete/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'sql.delete_async', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -130,7 +140,13 @@ export default class Sql { const method = 'GET' const path = `/_sql/async/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'sql.get_async', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -156,7 +172,13 @@ export default class Sql { const method = 'GET' const path = `/_sql/async/status/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'sql.get_async_status', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -195,7 +217,10 @@ export default class Sql { const method = body != null ? 'POST' : 'GET' const path = '/_sql' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'sql.query' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -233,6 +258,9 @@ export default class Sql { const method = body != null ? 'POST' : 'GET' const path = '/_sql/translate' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'sql.translate' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 198146ad1..9ace268c1 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -67,6 +68,9 @@ export default class Ssl { const method = 'GET' const path = '/_ssl/certificates' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'ssl.certificates' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 80ba96536..abbf98749 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -66,7 +67,13 @@ export default class Synonyms { const method = 'DELETE' const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'synonyms.delete_synonym', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -92,7 +99,14 @@ export default class Synonyms { const method = 'DELETE' const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'synonyms.delete_synonym_rule', + pathParts: { + set_id: params.set_id, + rule_id: params.rule_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -118,7 +132,13 @@ export default class Synonyms { const method = 'GET' const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'synonyms.get_synonym', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -144,7 +164,14 @@ export default class Synonyms { const method = 'GET' const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'synonyms.get_synonym_rule', + pathParts: { + set_id: params.set_id, + rule_id: params.rule_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -171,11 +198,14 @@ export default class Synonyms { const method = 'GET' const path = '/_synonyms' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'synonyms.get_synonyms_sets' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Creates or updates a synonyms set + * Creates or updates a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -209,7 +239,13 @@ export default class Synonyms { const method = 'PUT' const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'synonyms.put_synonym', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -247,6 +283,13 @@ export default class Synonyms { const method = 'PUT' const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'synonyms.put_synonym_rule', + pathParts: { + set_id: params.set_id, + rule_id: params.rule_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index a3cdcb968..8f54ef0e4 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -74,7 +75,13 @@ export default class Tasks { method = 'POST' path = '/_tasks/_cancel' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'tasks.cancel', + pathParts: { + task_id: params.task_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -100,11 +107,17 @@ export default class Tasks { const method = 'GET' const path = `/_tasks/${encodeURIComponent(params.task_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'tasks.get', + pathParts: { + task_id: params.task_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Returns a list of tasks. + * The task management API returns information about tasks currently executing on one or more nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -127,6 +140,9 @@ export default class Tasks { const method = 'GET' const path = '/_tasks' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'tasks.list' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index db54a8c40..1dd51ece7 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -72,5 +73,11 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque const method = body != null ? 'POST' : 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_terms_enum` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'terms_enum', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 6ffee1fa1..956c4df14 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -79,5 +80,12 @@ export default async function TermvectorsApi (this: That, p method = body != null ? 'POST' : 'GET' path = `/${encodeURIComponent(params.index.toString())}/_termvectors` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'termvectors', + pathParts: { + index: params.index, + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index ba75e1c39..972556a6d 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -43,6 +44,64 @@ export default class TextStructure { this.transport = transport } + /** + * Finds the structure of a text field in an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-field-structure.html | Elasticsearch API documentation} + */ + async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_text_structure/find_field_structure' + const meta: TransportRequestMetadata = { + name: 'text_structure.find_field_structure' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-message-structure.html | Elasticsearch API documentation} + */ + async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_text_structure/find_message_structure' + const meta: TransportRequestMetadata = { + name: 'text_structure.find_message_structure' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-structure.html | Elasticsearch API documentation} @@ -71,7 +130,10 @@ export default class TextStructure { const method = 'POST' const path = '/_text_structure/find_structure' - return await this.transport.request({ path, method, querystring, bulkBody: body }, options) + const meta: TransportRequestMetadata = { + name: 'text_structure.find_structure' + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } /** @@ -109,6 +171,9 @@ export default class TextStructure { const method = body != null ? 'POST' : 'GET' const path = '/_text_structure/test_grok_pattern' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'text_structure.test_grok_pattern' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index cd472a416..2e3baa8ae 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Transform { } /** - * Deletes an existing transform. + * Deletes a transform. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-transform.html | Elasticsearch API documentation} */ async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -66,7 +67,42 @@ export default class Transform { const method = 'DELETE' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.delete_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves transform usage information for transform nodes. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-node-stats.html | Elasticsearch API documentation} + */ + async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_transform/_node_stats' + const meta: TransportRequestMetadata = { + name: 'transform.get_node_stats' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -100,7 +136,13 @@ export default class Transform { method = 'GET' path = '/_transform' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.get_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -126,11 +168,17 @@ export default class Transform { const method = 'GET' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_stats` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.get_transform_stats', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Previews a transform. + * Previews a transform. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/preview-transform.html | Elasticsearch API documentation} */ async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -172,11 +220,17 @@ export default class Transform { method = body != null ? 'POST' : 'GET' path = '/_transform/_preview' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.preview_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Instantiates a transform. + * Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-transform.html | Elasticsearch API documentation} */ async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -210,11 +264,17 @@ export default class Transform { const method = 'PUT' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.put_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Resets an existing transform. + * Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/reset-transform.html | Elasticsearch API documentation} */ async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -236,11 +296,17 @@ export default class Transform { const method = 'POST' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_reset` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.reset_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Schedules now a transform. + * Schedules now a transform. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/schedule-now-transform.html | Elasticsearch API documentation} */ async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -262,11 +328,17 @@ export default class Transform { const method = 'POST' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_schedule_now` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.schedule_now_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Starts one or more transforms. + * Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-transform.html | Elasticsearch API documentation} */ async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -288,7 +360,13 @@ export default class Transform { const method = 'POST' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_start` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.start_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -314,11 +392,17 @@ export default class Transform { const method = 'POST' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_stop` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.stop_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Updates certain properties of a transform. + * Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-transform.html | Elasticsearch API documentation} */ async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -352,11 +436,17 @@ export default class Transform { const method = 'POST' const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_update` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.update_transform', + pathParts: { + transform_id: params.transform_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Upgrades all transforms. + * Upgrades all transforms. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/upgrade-transforms.html | Elasticsearch API documentation} */ async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -379,6 +469,9 @@ export default class Transform { const method = 'POST' const path = '/_transform/_upgrade' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'transform.upgrade_transforms' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/update.ts b/src/api/api/update.ts index d15e007a0..1f234cda4 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -72,5 +73,12 @@ export default async function UpdateApi @@ -72,5 +73,11 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu const method = 'POST' const path = `/${encodeURIComponent(params.index.toString())}/_update_by_query` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'update_by_query', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index 370acd548..68f5cd9ed 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -60,5 +61,11 @@ export default async function UpdateByQueryRethrottleApi (this: That, params: T. const method = 'POST' const path = `/_update_by_query/${encodeURIComponent(params.task_id.toString())}/_rethrottle` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'update_by_query_rethrottle', + pathParts: { + task_id: params.task_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 09aca4615..5b98a7593 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -73,7 +74,14 @@ export default class Watcher { method = 'PUT' path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_ack` } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.ack_watch', + pathParts: { + watch_id: params.watch_id, + action_id: params.action_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -99,7 +107,13 @@ export default class Watcher { const method = 'PUT' const path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_activate` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.activate_watch', + pathParts: { + watch_id: params.watch_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -125,7 +139,13 @@ export default class Watcher { const method = 'PUT' const path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_deactivate` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.deactivate_watch', + pathParts: { + watch_id: params.watch_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -151,11 +171,17 @@ export default class Watcher { const method = 'DELETE' const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.delete_watch', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Forces the execution of a stored watch. + * This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-execute-watch.html | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -197,7 +223,13 @@ export default class Watcher { method = 'PUT' path = '/_watcher/watch/_execute' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.execute_watch', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -223,7 +255,10 @@ export default class Watcher { const method = 'GET' const path = '/_watcher/settings' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.get_settings' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -249,7 +284,13 @@ export default class Watcher { const method = 'GET' const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.get_watch', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -287,7 +328,13 @@ export default class Watcher { const method = 'PUT' const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.put_watch', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -326,7 +373,10 @@ export default class Watcher { const method = body != null ? 'POST' : 'GET' const path = '/_watcher/_query/watches' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.query_watches' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -353,7 +403,10 @@ export default class Watcher { const method = 'POST' const path = '/_watcher/_start' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.start' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -387,7 +440,13 @@ export default class Watcher { method = 'GET' path = '/_watcher/stats' } - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.stats', + pathParts: { + metric: params.metric + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -414,7 +473,10 @@ export default class Watcher { const method = 'POST' const path = '/_watcher/_stop' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.stop' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -440,6 +502,9 @@ export default class Watcher { const method = 'PUT' const path = '/_watcher/settings' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'watcher.update_settings' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index c288e8091..3472a080c 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -28,6 +28,7 @@ import { Transport, + TransportRequestMetadata, TransportRequestOptions, TransportRequestOptionsWithMeta, TransportRequestOptionsWithOutMeta, @@ -44,7 +45,7 @@ export default class Xpack { } /** - * Retrieves information about the installed X-Pack features. + * Provides general information about the installed X-Pack features. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/info-api.html | Elasticsearch API documentation} */ async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -67,11 +68,14 @@ export default class Xpack { const method = 'GET' const path = '/_xpack' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'xpack.info' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Retrieves usage information about the installed X-Pack features. + * This API provides information about which features are currently enabled and available under the current license and some usage statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/usage-api.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -94,6 +98,9 @@ export default class Xpack { const method = 'GET' const path = '/_xpack/usage' - return await this.transport.request({ path, method, querystring, body }, options) + const meta: TransportRequestMetadata = { + name: 'xpack.usage' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } } diff --git a/src/api/index.ts b/src/api/index.ts index 286a59bb7..e2ef990bb 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -29,6 +29,7 @@ import AsyncSearchApi from './api/async_search' import AutoscalingApi from './api/autoscaling' import bulkApi from './api/bulk' +import capabilitiesApi from './api/capabilities' import CatApi from './api/cat' import CcrApi from './api/ccr' import clearScrollApi from './api/clear_scroll' @@ -76,8 +77,9 @@ import mtermvectorsApi from './api/mtermvectors' import NodesApi from './api/nodes' import openPointInTimeApi from './api/open_point_in_time' import pingApi from './api/ping' +import ProfilingApi from './api/profiling' import putScriptApi from './api/put_script' -import QueryRulesetApi from './api/query_ruleset' +import QueryRulesApi from './api/query_rules' import rankEvalApi from './api/rank_eval' import reindexApi from './api/reindex' import reindexRethrottleApi from './api/reindex_rethrottle' @@ -93,6 +95,7 @@ import searchTemplateApi from './api/search_template' import SearchableSnapshotsApi from './api/searchable_snapshots' import SecurityApi from './api/security' import ShutdownApi from './api/shutdown' +import SimulateApi from './api/simulate' import SlmApi from './api/slm' import SnapshotApi from './api/snapshot' import SqlApi from './api/sql' @@ -114,6 +117,7 @@ export default interface API { asyncSearch: AsyncSearchApi autoscaling: AutoscalingApi bulk: typeof bulkApi + capabilities: typeof capabilitiesApi cat: CatApi ccr: CcrApi clearScroll: typeof clearScrollApi @@ -161,8 +165,9 @@ export default interface API { nodes: NodesApi openPointInTime: typeof openPointInTimeApi ping: typeof pingApi + profiling: ProfilingApi putScript: typeof putScriptApi - queryRuleset: QueryRulesetApi + queryRules: QueryRulesApi rankEval: typeof rankEvalApi reindex: typeof reindexApi reindexRethrottle: typeof reindexRethrottleApi @@ -178,6 +183,7 @@ export default interface API { searchableSnapshots: SearchableSnapshotsApi security: SecurityApi shutdown: ShutdownApi + simulate: SimulateApi slm: SlmApi snapshot: SnapshotApi sql: SqlApi @@ -217,12 +223,14 @@ const kMigration = Symbol('Migration') const kMl = Symbol('Ml') const kMonitoring = Symbol('Monitoring') const kNodes = Symbol('Nodes') -const kQueryRuleset = Symbol('QueryRuleset') +const kProfiling = Symbol('Profiling') +const kQueryRules = Symbol('QueryRules') const kRollup = Symbol('Rollup') const kSearchApplication = Symbol('SearchApplication') const kSearchableSnapshots = Symbol('SearchableSnapshots') const kSecurity = Symbol('Security') const kShutdown = Symbol('Shutdown') +const kSimulate = Symbol('Simulate') const kSlm = Symbol('Slm') const kSnapshot = Symbol('Snapshot') const kSql = Symbol('Sql') @@ -257,12 +265,14 @@ export default class API { [kMl]: symbol | null [kMonitoring]: symbol | null [kNodes]: symbol | null - [kQueryRuleset]: symbol | null + [kProfiling]: symbol | null + [kQueryRules]: symbol | null [kRollup]: symbol | null [kSearchApplication]: symbol | null [kSearchableSnapshots]: symbol | null [kSecurity]: symbol | null [kShutdown]: symbol | null + [kSimulate]: symbol | null [kSlm]: symbol | null [kSnapshot]: symbol | null [kSql]: symbol | null @@ -296,12 +306,14 @@ export default class API { this[kMl] = null this[kMonitoring] = null this[kNodes] = null - this[kQueryRuleset] = null + this[kProfiling] = null + this[kQueryRules] = null this[kRollup] = null this[kSearchApplication] = null this[kSearchableSnapshots] = null this[kSecurity] = null this[kShutdown] = null + this[kSimulate] = null this[kSlm] = null this[kSnapshot] = null this[kSql] = null @@ -316,6 +328,7 @@ export default class API { } API.prototype.bulk = bulkApi +API.prototype.capabilities = capabilitiesApi API.prototype.clearScroll = clearScrollApi API.prototype.closePointInTime = closePointInTimeApi API.prototype.count = countApi @@ -427,8 +440,11 @@ Object.defineProperties(API.prototype, { nodes: { get () { return this[kNodes] === null ? (this[kNodes] = new NodesApi(this.transport)) : this[kNodes] } }, - queryRuleset: { - get () { return this[kQueryRuleset] === null ? (this[kQueryRuleset] = new QueryRulesetApi(this.transport)) : this[kQueryRuleset] } + profiling: { + get () { return this[kProfiling] === null ? (this[kProfiling] = new ProfilingApi(this.transport)) : this[kProfiling] } + }, + queryRules: { + get () { return this[kQueryRules] === null ? (this[kQueryRules] = new QueryRulesApi(this.transport)) : this[kQueryRules] } }, rollup: { get () { return this[kRollup] === null ? (this[kRollup] = new RollupApi(this.transport)) : this[kRollup] } @@ -445,6 +461,9 @@ Object.defineProperties(API.prototype, { shutdown: { get () { return this[kShutdown] === null ? (this[kShutdown] = new ShutdownApi(this.transport)) : this[kShutdown] } }, + simulate: { + get () { return this[kSimulate] === null ? (this[kSimulate] = new SimulateApi(this.transport)) : this[kSimulate] } + }, slm: { get () { return this[kSlm] === null ? (this[kSlm] = new SlmApi(this.transport)) : this[kSlm] } }, diff --git a/src/api/types.ts b/src/api/types.ts index df17e1cbb..db34c036c 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -95,7 +95,7 @@ export interface BulkUpdateAction @@ -1005,7 +1007,7 @@ export interface ReindexRequest extends RequestBase { conflicts?: Conflicts dest: ReindexDestination max_docs?: long - script?: Script + script?: Script | string size?: long source: ReindexSource } @@ -1102,7 +1104,7 @@ export interface ScriptsPainlessExecutePainlessContextSetup { export interface ScriptsPainlessExecuteRequest extends RequestBase { context?: string context_setup?: ScriptsPainlessExecutePainlessContextSetup - script?: InlineScript | string + script?: Script | string } export interface ScriptsPainlessExecuteResponse { @@ -1427,6 +1429,7 @@ export interface SearchHit { _node?: string _routing?: string _source?: TDocument + _rank?: integer _seq_no?: SequenceNumber _primary_term?: long _version?: VersionNumber @@ -1830,7 +1833,7 @@ export interface TermvectorsRequest extends RequestBase { export interface TermvectorsResponse { found: boolean - _id: Id + _id?: Id _index: IndexName term_vectors?: Record took: long @@ -1846,7 +1849,7 @@ export interface TermvectorsTerm { } export interface TermvectorsTermVector { - field_statistics: TermvectorsFieldStatistics + field_statistics?: TermvectorsFieldStatistics terms: Record } @@ -1874,7 +1877,7 @@ export interface UpdateRequest detect_noop?: boolean doc?: TPartialDocument doc_as_upsert?: boolean - script?: Script + script?: Script | string scripted_upsert?: boolean _source?: SearchSourceConfig upsert?: TDocument @@ -1918,7 +1921,7 @@ export interface UpdateByQueryRequest extends RequestBase { wait_for_completion?: boolean max_docs?: long query?: QueryDslQueryContainer - script?: Script + script?: Script | string slice?: SlicedScroll conflicts?: Conflicts } @@ -2175,9 +2178,10 @@ export interface GeoDistanceSortKeys { ignore_unmapped?: boolean order?: SortOrder unit?: DistanceUnit + nested?: NestedSortValue } export type GeoDistanceSort = GeoDistanceSortKeys -& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit } +& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit | NestedSortValue } export type GeoDistanceType = 'arc' | 'plane' @@ -2283,12 +2287,6 @@ export interface InlineGetKeys { export type InlineGet = InlineGetKeys & { [property: string]: any } -export interface InlineScript extends ScriptBase { - lang?: ScriptLanguage - options?: Record - source: string -} - export type Ip = string export interface KnnQuery extends QueryDslQueryBase { @@ -2522,7 +2520,7 @@ export type Routing = string export interface RrfRank { rank_constant?: long - window_size?: long + rank_window_size?: long } export type ScalarValue = long | double | string | boolean | null @@ -2531,14 +2529,16 @@ export interface ScoreSort { order?: SortOrder } -export type Script = InlineScript | string | StoredScriptId - -export interface ScriptBase { +export interface Script { + source?: string + id?: Id params?: Record + lang?: ScriptLanguage + options?: Record } export interface ScriptField { - script: Script + script: Script | string ignore_failure?: boolean } @@ -2546,7 +2546,7 @@ export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | s export interface ScriptSort { order?: SortOrder - script: Script + script: Script | string type?: ScriptSortType mode?: SortMode nested?: NestedSortValue @@ -2697,10 +2697,6 @@ export interface StoredScript { source: string } -export interface StoredScriptId extends ScriptBase { - id: Id -} - export type SuggestMode = 'missing' | 'popular' | 'always' export type SuggestionName = string @@ -2948,7 +2944,7 @@ export interface AggregationsAutoDateHistogramAggregation extends AggregationsBu missing?: DateTime offset?: string params?: Record - script?: Script + script?: Script | string time_zone?: TimeZone } @@ -3018,11 +3014,11 @@ export interface AggregationsBucketPathAggregation { } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { - script?: Script + script?: Script | string } export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { - script?: Script + script?: Script | string } export interface AggregationsBucketSortAggregation { @@ -3036,7 +3032,7 @@ export type AggregationsBuckets = Record | T export type AggregationsBucketsPath = string | string[] | Record -export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1Y' +export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y' export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { value: long @@ -3095,7 +3091,7 @@ export interface AggregationsCompositeAggregationBase { field?: Field missing_bucket?: boolean missing_order?: AggregationsMissingOrder - script?: Script + script?: Script | string value_type?: AggregationsValueType order?: SortOrder } @@ -3166,7 +3162,7 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket offset?: Duration order?: AggregationsAggregateOrder params?: Record - script?: Script + script?: Script | string time_zone?: TimeZone keyed?: boolean } @@ -3207,7 +3203,7 @@ export interface AggregationsDerivativeAggregation extends AggregationsPipelineA export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { execution_hint?: AggregationsSamplerAggregationExecutionHint max_docs_per_value?: integer - script?: Script + script?: Script | string shard_size?: integer field?: Field } @@ -3456,7 +3452,7 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr missing?: double offset?: double order?: AggregationsAggregateOrder - script?: Script + script?: Script | string format?: string keyed?: boolean } @@ -3644,7 +3640,7 @@ export interface AggregationsMedianAbsoluteDeviationAggregation extends Aggregat export interface AggregationsMetricAggregationBase { field?: Field missing?: AggregationsMissing - script?: Script + script?: Script | string } export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { @@ -3798,7 +3794,7 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat field?: Field missing?: integer ranges?: AggregationsAggregationRange[] - script?: Script + script?: Script | string keyed?: boolean format?: string } @@ -3856,7 +3852,7 @@ export interface AggregationsSamplerAggregation extends AggregationsBucketAggreg export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' export interface AggregationsScriptedHeuristic { - script: Script + script: Script | string } export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { @@ -3864,11 +3860,11 @@ export interface AggregationsScriptedMetricAggregate extends AggregationsAggrega } export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { - combine_script?: Script - init_script?: Script - map_script?: Script + combine_script?: Script | string + init_script?: Script | string + map_script?: Script | string params?: Record - reduce_script?: Script + reduce_script?: Script | string } export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { @@ -4081,7 +4077,8 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat missing_bucket?: boolean value_type?: string order?: AggregationsAggregateOrder - script?: Script + script?: Script | string + shard_min_doc_count?: long shard_size?: integer show_term_doc_count_error?: boolean size?: integer @@ -4107,7 +4104,7 @@ export interface AggregationsTermsPartition { export interface AggregationsTestPopulation { field: Field - script?: Script + script?: Script | string filter?: QueryDslQueryContainer } @@ -4180,7 +4177,7 @@ export interface AggregationsVariableWidthHistogramAggregation { buckets?: integer shard_size?: integer initial_buffer?: integer - script?: Script + script?: Script | string } export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { @@ -4204,7 +4201,7 @@ export interface AggregationsWeightedAverageAggregation { export interface AggregationsWeightedAverageValue { field?: Field missing?: double - script?: Script + script?: Script | string } export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { @@ -4252,7 +4249,7 @@ export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilter export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { type: 'condition' filter: string[] - script: Script + script: Script | string } export interface AnalysisCustomAnalyzer { @@ -4647,7 +4644,7 @@ export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' - script: Script + script: Script | string } export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { @@ -4684,7 +4681,7 @@ export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Dani export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { type: 'snowball' - language: AnalysisSnowballLanguage + language?: AnalysisSnowballLanguage } export interface AnalysisStandardAnalyzer { @@ -4931,8 +4928,9 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { export interface MappingDenseVectorIndexOptions { type: string - m: integer - ef_construction: integer + m?: integer + ef_construction?: integer + confidence_interval?: float } export interface MappingDenseVectorProperty extends MappingPropertyBase { @@ -4965,7 +4963,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { null_value?: FieldValue boost?: double coerce?: boolean - script?: Script + script?: Script | string on_script_error?: MappingOnScriptError ignore_malformed?: boolean time_series_metric?: MappingTimeSeriesMetricType @@ -5043,7 +5041,7 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { null_value?: GeoLocation index?: boolean on_script_error?: MappingOnScriptError - script?: Script + script?: Script | string type: 'geo_point' } @@ -5109,7 +5107,7 @@ export interface MappingIpProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean null_value?: string on_script_error?: MappingOnScriptError - script?: Script + script?: Script | string time_series_dimension?: boolean type: 'ip' } @@ -5129,7 +5127,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { eager_global_ordinals?: boolean index?: boolean index_options?: MappingIndexOptions - script?: Script + script?: Script | string on_script_error?: MappingOnScriptError normalizer?: string norms?: boolean @@ -5174,7 +5172,7 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase ignore_malformed?: boolean index?: boolean on_script_error?: MappingOnScriptError - script?: Script + script?: Script | string time_series_metric?: MappingTimeSeriesMetricType time_series_dimension?: boolean } @@ -5234,7 +5232,7 @@ export interface MappingRuntimeField { input_field?: Field target_field?: Field target_index?: IndexName - script?: Script + script?: Script | string type: MappingRuntimeFieldType } @@ -5422,28 +5420,22 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { filter: QueryDslQueryContainer } -export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { +export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } -export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { - gt?: DateMath - gte?: DateMath - lt?: DateMath - lte?: DateMath - from?: DateMath | null - to?: DateMath | null +export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { format?: DateFormat time_zone?: TimeZone } -export type QueryDslDecayFunction = QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction +export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction -export interface QueryDslDecayFunctionBase { +export interface QueryDslDecayFunctionBase { multi_value_mode?: QueryDslMultiValueMode } @@ -5459,7 +5451,7 @@ export interface QueryDslDisMaxQuery extends QueryDslQueryBase { tie_breaker?: double } -export type QueryDslDistanceFeatureQuery = QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery +export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { origin: TOrigin @@ -5534,10 +5526,10 @@ export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys & { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } -export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { +export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -5630,7 +5622,7 @@ export interface QueryDslIntervalsFilter { not_containing?: QueryDslIntervalsContainer not_overlapping?: QueryDslIntervalsContainer overlapping?: QueryDslIntervalsContainer - script?: Script + script?: Script | string } export interface QueryDslIntervalsFuzzy { @@ -5785,19 +5777,13 @@ export interface QueryDslNestedQuery extends QueryDslQueryBase { score_mode?: QueryDslChildScoreMode } -export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { - gt?: double - gte?: double - lt?: double - lte?: double - from?: double | null - to?: double | null +export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { } -export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { +export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' @@ -5850,7 +5836,7 @@ export interface QueryDslQueryContainer { dis_max?: QueryDslDisMaxQuery distance_feature?: QueryDslDistanceFeatureQuery exists?: QueryDslExistsQuery - function_score?: QueryDslFunctionScoreQuery + function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] fuzzy?: Partial> geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery @@ -5878,7 +5864,7 @@ export interface QueryDslQueryContainer { range?: Partial> rank_feature?: QueryDslRankFeatureQuery regexp?: Partial> - rule_query?: QueryDslRuleQuery + rule?: QueryDslRuleQuery script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery semantic?: QueryDslSemanticQuery @@ -5893,6 +5879,7 @@ export interface QueryDslQueryContainer { span_or?: QueryDslSpanOrQuery span_term?: Partial> span_within?: QueryDslSpanWithinQuery + sparse_vector?: QueryDslSparseVectorQuery term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> @@ -5936,10 +5923,16 @@ export interface QueryDslRandomScoreFunction { seed?: long | string } -export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermsRangeQuery +export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery -export interface QueryDslRangeQueryBase extends QueryDslQueryBase { +export interface QueryDslRangeQueryBase extends QueryDslQueryBase { relation?: QueryDslRangeRelation + gt?: T + gte?: T + lt?: T + lte?: T + from?: T | null + to?: T | null } export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' @@ -5981,22 +5974,22 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { export interface QueryDslRuleQuery extends QueryDslQueryBase { organic: QueryDslQueryContainer - ruleset_id: Id + ruleset_ids: Id[] match_criteria: any } export interface QueryDslScriptQuery extends QueryDslQueryBase { - script: Script + script: Script | string } export interface QueryDslScriptScoreFunction { - script: Script + script: Script | string } export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { min_score?: float query: QueryDslQueryContainer - script: Script + script: Script | string } export interface QueryDslSemanticQuery extends QueryDslQueryBase { @@ -6097,11 +6090,23 @@ export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { little: QueryDslSpanQuery } +export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { + field: Field + query_vector?: Record + inference_id?: Id + query?: string + prune?: boolean + pruning_config?: QueryDslTokenPruningConfig +} + export interface QueryDslTermQuery extends QueryDslQueryBase { value: FieldValue case_insensitive?: boolean } +export interface QueryDslTermRangeQuery extends QueryDslRangeQueryBase { +} + export interface QueryDslTermsLookup { index: IndexName id: Id @@ -6116,18 +6121,9 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup -export interface QueryDslTermsRangeQuery extends QueryDslRangeQueryBase { - gt?: string - gte?: string - lt?: string - lte?: string - from?: string | null - to?: string | null -} - export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field - minimum_should_match_script?: Script + minimum_should_match_script?: Script | string terms: string[] } @@ -6149,6 +6145,19 @@ export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } +export interface QueryDslUntypedDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } + +export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { + format?: DateFormat + time_zone?: TimeZone +} + export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { tokens: Record pruning_config?: QueryDslTokenPruningConfig @@ -6408,28 +6417,39 @@ export interface CatAliasesRequest extends CatCatRequestBase { export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { - shards?: string - s?: string - 'disk.indices'?: ByteSize | null - di?: ByteSize | null - diskIndices?: ByteSize | null - 'disk.used'?: ByteSize | null - du?: ByteSize | null - diskUsed?: ByteSize | null - 'disk.avail'?: ByteSize | null - da?: ByteSize | null - diskAvail?: ByteSize | null - 'disk.total'?: ByteSize | null - dt?: ByteSize | null - diskTotal?: ByteSize | null - 'disk.percent'?: Percentage | null - dp?: Percentage | null - diskPercent?: Percentage | null - host?: Host | null - h?: Host | null - ip?: Ip | null - node?: string - n?: string + shards: string + s: string + 'shards.undesired': string | null + 'write_load.forecast': double | null + wlf: double | null + writeLoadForecast: double | null + 'disk.indices.forecast': ByteSize | null + dif: ByteSize | null + diskIndicesForecast: ByteSize | null + 'disk.indices': ByteSize | null + di: ByteSize | null + diskIndices: ByteSize | null + 'disk.used': ByteSize | null + du: ByteSize | null + diskUsed: ByteSize | null + 'disk.avail': ByteSize | null + da: ByteSize | null + diskAvail: ByteSize | null + 'disk.total': ByteSize | null + dt: ByteSize | null + diskTotal: ByteSize | null + 'disk.percent': Percentage | null + dp: Percentage | null + diskPercent: Percentage | null + host: Host | null + h: Host | null + ip: Ip | null + node: string + n: string + 'node.role': string | null + r: string | null + role: string | null + nodeRole: string | null } export interface CatAllocationRequest extends CatCatRequestBase { @@ -9669,12 +9689,13 @@ export interface EqlHitsEvent { _index: IndexName _id: Id _source: TEvent + missing?: boolean fields?: Record } export interface EqlHitsSequence { events: EqlHitsEvent[] - join_keys: any[] + join_keys?: any[] } export interface EqlDeleteRequest extends RequestBase { @@ -9729,14 +9750,32 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase> } export type EsqlQueryResponse = EsqlColumns @@ -10455,6 +10494,7 @@ export interface IndicesMappingLimitSettingsNestedObjects { export interface IndicesMappingLimitSettingsTotalFields { limit?: long + ignore_dynamic_beyond_limit?: boolean } export interface IndicesMerge { @@ -10551,8 +10591,8 @@ export interface IndicesSettingsSimilarityLmj { export interface IndicesSettingsSimilarityScripted { type: 'scripted' - script: Script - weight_script?: Script + script: Script | string + weight_script?: Script | string } export interface IndicesSlowlogSettings { @@ -12227,41 +12267,41 @@ export interface IngestProcessorBase { } export interface IngestProcessorContainer { - attachment?: IngestAttachmentProcessor append?: IngestAppendProcessor - csv?: IngestCsvProcessor + attachment?: IngestAttachmentProcessor + bytes?: IngestBytesProcessor + circle?: IngestCircleProcessor convert?: IngestConvertProcessor + csv?: IngestCsvProcessor date?: IngestDateProcessor date_index_name?: IngestDateIndexNameProcessor + dissect?: IngestDissectProcessor dot_expander?: IngestDotExpanderProcessor + drop?: IngestDropProcessor enrich?: IngestEnrichProcessor fail?: IngestFailProcessor foreach?: IngestForeachProcessor - json?: IngestJsonProcessor - user_agent?: IngestUserAgentProcessor - kv?: IngestKeyValueProcessor geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor gsub?: IngestGsubProcessor + inference?: IngestInferenceProcessor join?: IngestJoinProcessor + json?: IngestJsonProcessor + kv?: IngestKeyValueProcessor lowercase?: IngestLowercaseProcessor + pipeline?: IngestPipelineProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor reroute?: IngestRerouteProcessor script?: IngestScriptProcessor set?: IngestSetProcessor + set_security_user?: IngestSetSecurityUserProcessor sort?: IngestSortProcessor split?: IngestSplitProcessor trim?: IngestTrimProcessor uppercase?: IngestUppercaseProcessor urldecode?: IngestUrlDecodeProcessor - bytes?: IngestBytesProcessor - dissect?: IngestDissectProcessor - set_security_user?: IngestSetSecurityUserProcessor - pipeline?: IngestPipelineProcessor - drop?: IngestDropProcessor - circle?: IngestCircleProcessor - inference?: IngestInferenceProcessor + user_agent?: IngestUserAgentProcessor } export interface IngestRemoveProcessor extends IngestProcessorBase { @@ -16024,66 +16064,95 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { nodes: Record } -export interface QueryRulesetQueryRule { +export interface QueryRulesQueryRule { rule_id: Id - type: QueryRulesetQueryRuleType - criteria: QueryRulesetQueryRuleCriteria[] - actions: QueryRulesetQueryRuleActions + type: QueryRulesQueryRuleType + criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + actions: QueryRulesQueryRuleActions + priority?: integer } -export interface QueryRulesetQueryRuleActions { +export interface QueryRulesQueryRuleActions { ids?: Id[] docs?: QueryDslPinnedDoc[] } -export interface QueryRulesetQueryRuleCriteria { - type: QueryRulesetQueryRuleCriteriaType - metadata: string +export interface QueryRulesQueryRuleCriteria { + type: QueryRulesQueryRuleCriteriaType + metadata?: string values?: any[] } -export type QueryRulesetQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' +export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' + +export type QueryRulesQueryRuleType = 'pinned' -export type QueryRulesetQueryRuleType = 'pinned' +export interface QueryRulesQueryRuleset { + ruleset_id: Id + rules: QueryRulesQueryRule[] +} -export interface QueryRulesetQueryRuleset { +export interface QueryRulesDeleteRuleRequest extends RequestBase { ruleset_id: Id - rules: QueryRulesetQueryRule[] + rule_id: Id } -export interface QueryRulesetDeleteRequest extends RequestBase { +export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase + +export interface QueryRulesDeleteRulesetRequest extends RequestBase { ruleset_id: Id } -export type QueryRulesetDeleteResponse = AcknowledgedResponseBase +export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase + +export interface QueryRulesGetRuleRequest extends RequestBase { + ruleset_id: Id + rule_id: Id +} + +export type QueryRulesGetRuleResponse = QueryRulesQueryRule -export interface QueryRulesetGetRequest extends RequestBase { +export interface QueryRulesGetRulesetRequest extends RequestBase { ruleset_id: Id } -export type QueryRulesetGetResponse = QueryRulesetQueryRuleset +export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset -export interface QueryRulesetListQueryRulesetListItem { +export interface QueryRulesListRulesetsQueryRulesetListItem { ruleset_id: Id - rules_count: integer + rule_total_count: integer + rule_criteria_types_counts: Record } -export interface QueryRulesetListRequest extends RequestBase { +export interface QueryRulesListRulesetsRequest extends RequestBase { from?: integer size?: integer } -export interface QueryRulesetListResponse { +export interface QueryRulesListRulesetsResponse { count: long - results: QueryRulesetListQueryRulesetListItem[] + results: QueryRulesListRulesetsQueryRulesetListItem[] } -export interface QueryRulesetPutRequest extends RequestBase { +export interface QueryRulesPutRuleRequest extends RequestBase { ruleset_id: Id - rules: QueryRulesetQueryRule[] + rule_id: Id + type: QueryRulesQueryRuleType + criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + actions: QueryRulesQueryRuleActions + priority?: integer +} + +export interface QueryRulesPutRuleResponse { + result: Result +} + +export interface QueryRulesPutRulesetRequest extends RequestBase { + ruleset_id: Id + rules: QueryRulesQueryRule | QueryRulesQueryRule[] } -export interface QueryRulesetPutResponse { +export interface QueryRulesPutRulesetResponse { result: Result } @@ -16291,7 +16360,7 @@ export interface SearchApplicationSearchApplication { } export interface SearchApplicationSearchApplicationTemplate { - script: InlineScript | string + script: Script | string } export interface SearchApplicationDeleteRequest extends RequestBase { @@ -16459,11 +16528,16 @@ export interface SecurityApplicationPrivileges { resources: string[] } +export interface SecurityBulkError { + count: integer + details: Record +} + export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean @@ -16508,24 +16582,26 @@ export interface SecurityRealmInfo { } export interface SecurityRoleDescriptor { - cluster?: string[] + cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] index?: SecurityIndicesPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] + description?: string transient_metadata?: Record } export interface SecurityRoleDescriptorRead { - cluster: string[] + cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] index: SecurityIndicesPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] + description?: string transient_metadata?: Record } @@ -16546,22 +16622,22 @@ export interface SecurityRoleMappingRule { export interface SecurityRoleTemplate { format?: SecurityTemplateFormat - template: Script + template: Script | string } export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer -export interface SecurityRoleTemplateInlineScript extends ScriptBase { - lang?: ScriptLanguage - options?: Record - source: SecurityRoleTemplateInlineQuery -} - export interface SecurityRoleTemplateQuery { - template?: SecurityRoleTemplateScript + template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery } -export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | SecurityRoleTemplateInlineQuery | StoredScriptId +export interface SecurityRoleTemplateScript { + source?: SecurityRoleTemplateInlineQuery + id?: Id + params?: Record + lang?: ScriptLanguage + options?: Record +} export type SecurityTemplateFormat = 'string' | 'json' @@ -16643,6 +16719,29 @@ export interface SecurityAuthenticateToken { type?: string } +export interface SecurityBulkDeleteRoleRequest extends RequestBase { + refresh?: Refresh + names: string[] +} + +export interface SecurityBulkDeleteRoleResponse { + deleted?: string[] + not_found?: string[] + errors?: SecurityBulkError +} + +export interface SecurityBulkPutRoleRequest extends RequestBase { + refresh?: Refresh + roles: Record +} + +export interface SecurityBulkPutRoleResponse { + created?: string[] + updated?: string[] + noop?: string[] + errors?: SecurityBulkError +} + export interface SecurityChangePasswordRequest extends RequestBase { username?: Username refresh?: Refresh @@ -17136,6 +17235,7 @@ export interface SecurityPutRoleRequest extends RequestBase { indices?: SecurityIndicesPrivileges[] metadata?: Metadata run_as?: string[] + description?: string transient_metadata?: Record } @@ -17234,6 +17334,72 @@ export interface SecurityQueryApiKeysResponse { aggregations?: Record } +export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { + _sort?: SortResults + name: string +} + +export interface SecurityQueryRoleRequest extends RequestBase { + query?: SecurityQueryRoleRoleQueryContainer + from?: integer + sort?: Sort + size?: integer + search_after?: SortResults +} + +export interface SecurityQueryRoleResponse { + total: integer + count: integer + roles: SecurityQueryRoleQueryRole[] +} + +export interface SecurityQueryRoleRoleQueryContainer { + bool?: QueryDslBoolQuery + exists?: QueryDslExistsQuery + ids?: QueryDslIdsQuery + match?: Partial> + match_all?: QueryDslMatchAllQuery + prefix?: Partial> + range?: Partial> + simple_query_string?: QueryDslSimpleQueryStringQuery + term?: Partial> + terms?: QueryDslTermsQuery + wildcard?: Partial> +} + +export interface SecurityQueryUserQueryUser extends SecurityUser { + _sort?: SortResults +} + +export interface SecurityQueryUserRequest extends RequestBase { + with_profile_uid?: boolean + query?: SecurityQueryUserUserQueryContainer + from?: integer + sort?: Sort + size?: integer + search_after?: SortResults +} + +export interface SecurityQueryUserResponse { + total: integer + count: integer + users: SecurityQueryUserQueryUser[] +} + +export interface SecurityQueryUserUserQueryContainer { + ids?: QueryDslIdsQuery + bool?: QueryDslBoolQuery + exists?: QueryDslExistsQuery + match?: Partial> + match_all?: QueryDslMatchAllQuery + prefix?: Partial> + range?: Partial> + simple_query_string?: QueryDslSimpleQueryStringQuery + term?: Partial> + terms?: QueryDslTermsQuery + wildcard?: Partial> +} + export interface SecuritySamlAuthenticateRequest extends RequestBase { content: string ids: Ids @@ -18395,9 +18561,9 @@ export interface TransformGetTransformStatsTransformIndexerStats { export interface TransformGetTransformStatsTransformProgress { docs_indexed: long docs_processed: long - docs_remaining: long - percent_complete: double - total_docs: long + docs_remaining?: long + percent_complete?: double + total_docs?: long } export interface TransformGetTransformStatsTransformStats { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index c32c3af00..71b7922bd 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -96,7 +96,7 @@ export interface BulkUpdateAction @@ -1046,7 +1048,7 @@ export interface ReindexRequest extends RequestBase { conflicts?: Conflicts dest: ReindexDestination max_docs?: long - script?: Script + script?: Script | string size?: long source: ReindexSource } @@ -1149,7 +1151,7 @@ export interface ScriptsPainlessExecuteRequest extends RequestBase { body?: { context?: string context_setup?: ScriptsPainlessExecutePainlessContextSetup - script?: InlineScript | string + script?: Script | string } } @@ -1482,6 +1484,7 @@ export interface SearchHit { _node?: string _routing?: string _source?: TDocument + _rank?: integer _seq_no?: SequenceNumber _primary_term?: long _version?: VersionNumber @@ -1897,7 +1900,7 @@ export interface TermvectorsRequest extends RequestBase { export interface TermvectorsResponse { found: boolean - _id: Id + _id?: Id _index: IndexName term_vectors?: Record took: long @@ -1913,7 +1916,7 @@ export interface TermvectorsTerm { } export interface TermvectorsTermVector { - field_statistics: TermvectorsFieldStatistics + field_statistics?: TermvectorsFieldStatistics terms: Record } @@ -1943,7 +1946,7 @@ export interface UpdateRequest detect_noop?: boolean doc?: TPartialDocument doc_as_upsert?: boolean - script?: Script + script?: Script | string scripted_upsert?: boolean _source?: SearchSourceConfig upsert?: TDocument @@ -1990,7 +1993,7 @@ export interface UpdateByQueryRequest extends RequestBase { body?: { max_docs?: long query?: QueryDslQueryContainer - script?: Script + script?: Script | string slice?: SlicedScroll conflicts?: Conflicts } @@ -2248,9 +2251,10 @@ export interface GeoDistanceSortKeys { ignore_unmapped?: boolean order?: SortOrder unit?: DistanceUnit + nested?: NestedSortValue } export type GeoDistanceSort = GeoDistanceSortKeys -& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit } +& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit | NestedSortValue } export type GeoDistanceType = 'arc' | 'plane' @@ -2356,12 +2360,6 @@ export interface InlineGetKeys { export type InlineGet = InlineGetKeys & { [property: string]: any } -export interface InlineScript extends ScriptBase { - lang?: ScriptLanguage - options?: Record - source: string -} - export type Ip = string export interface KnnQuery extends QueryDslQueryBase { @@ -2595,7 +2593,7 @@ export type Routing = string export interface RrfRank { rank_constant?: long - window_size?: long + rank_window_size?: long } export type ScalarValue = long | double | string | boolean | null @@ -2604,14 +2602,16 @@ export interface ScoreSort { order?: SortOrder } -export type Script = InlineScript | string | StoredScriptId - -export interface ScriptBase { +export interface Script { + source?: string + id?: Id params?: Record + lang?: ScriptLanguage + options?: Record } export interface ScriptField { - script: Script + script: Script | string ignore_failure?: boolean } @@ -2619,7 +2619,7 @@ export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | s export interface ScriptSort { order?: SortOrder - script: Script + script: Script | string type?: ScriptSortType mode?: SortMode nested?: NestedSortValue @@ -2770,10 +2770,6 @@ export interface StoredScript { source: string } -export interface StoredScriptId extends ScriptBase { - id: Id -} - export type SuggestMode = 'missing' | 'popular' | 'always' export type SuggestionName = string @@ -3021,7 +3017,7 @@ export interface AggregationsAutoDateHistogramAggregation extends AggregationsBu missing?: DateTime offset?: string params?: Record - script?: Script + script?: Script | string time_zone?: TimeZone } @@ -3091,11 +3087,11 @@ export interface AggregationsBucketPathAggregation { } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { - script?: Script + script?: Script | string } export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { - script?: Script + script?: Script | string } export interface AggregationsBucketSortAggregation { @@ -3109,7 +3105,7 @@ export type AggregationsBuckets = Record | T export type AggregationsBucketsPath = string | string[] | Record -export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1Y' +export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y' export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { value: long @@ -3168,7 +3164,7 @@ export interface AggregationsCompositeAggregationBase { field?: Field missing_bucket?: boolean missing_order?: AggregationsMissingOrder - script?: Script + script?: Script | string value_type?: AggregationsValueType order?: SortOrder } @@ -3239,7 +3235,7 @@ export interface AggregationsDateHistogramAggregation extends AggregationsBucket offset?: Duration order?: AggregationsAggregateOrder params?: Record - script?: Script + script?: Script | string time_zone?: TimeZone keyed?: boolean } @@ -3280,7 +3276,7 @@ export interface AggregationsDerivativeAggregation extends AggregationsPipelineA export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { execution_hint?: AggregationsSamplerAggregationExecutionHint max_docs_per_value?: integer - script?: Script + script?: Script | string shard_size?: integer field?: Field } @@ -3529,7 +3525,7 @@ export interface AggregationsHistogramAggregation extends AggregationsBucketAggr missing?: double offset?: double order?: AggregationsAggregateOrder - script?: Script + script?: Script | string format?: string keyed?: boolean } @@ -3717,7 +3713,7 @@ export interface AggregationsMedianAbsoluteDeviationAggregation extends Aggregat export interface AggregationsMetricAggregationBase { field?: Field missing?: AggregationsMissing - script?: Script + script?: Script | string } export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { @@ -3871,7 +3867,7 @@ export interface AggregationsRangeAggregation extends AggregationsBucketAggregat field?: Field missing?: integer ranges?: AggregationsAggregationRange[] - script?: Script + script?: Script | string keyed?: boolean format?: string } @@ -3929,7 +3925,7 @@ export interface AggregationsSamplerAggregation extends AggregationsBucketAggreg export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' export interface AggregationsScriptedHeuristic { - script: Script + script: Script | string } export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { @@ -3937,11 +3933,11 @@ export interface AggregationsScriptedMetricAggregate extends AggregationsAggrega } export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { - combine_script?: Script - init_script?: Script - map_script?: Script + combine_script?: Script | string + init_script?: Script | string + map_script?: Script | string params?: Record - reduce_script?: Script + reduce_script?: Script | string } export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { @@ -4154,7 +4150,8 @@ export interface AggregationsTermsAggregation extends AggregationsBucketAggregat missing_bucket?: boolean value_type?: string order?: AggregationsAggregateOrder - script?: Script + script?: Script | string + shard_min_doc_count?: long shard_size?: integer show_term_doc_count_error?: boolean size?: integer @@ -4180,7 +4177,7 @@ export interface AggregationsTermsPartition { export interface AggregationsTestPopulation { field: Field - script?: Script + script?: Script | string filter?: QueryDslQueryContainer } @@ -4253,7 +4250,7 @@ export interface AggregationsVariableWidthHistogramAggregation { buckets?: integer shard_size?: integer initial_buffer?: integer - script?: Script + script?: Script | string } export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { @@ -4277,7 +4274,7 @@ export interface AggregationsWeightedAverageAggregation { export interface AggregationsWeightedAverageValue { field?: Field missing?: double - script?: Script + script?: Script | string } export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { @@ -4325,7 +4322,7 @@ export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilter export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { type: 'condition' filter: string[] - script: Script + script: Script | string } export interface AnalysisCustomAnalyzer { @@ -4720,7 +4717,7 @@ export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' - script: Script + script: Script | string } export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { @@ -4757,7 +4754,7 @@ export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Dani export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { type: 'snowball' - language: AnalysisSnowballLanguage + language?: AnalysisSnowballLanguage } export interface AnalysisStandardAnalyzer { @@ -5004,8 +5001,9 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { export interface MappingDenseVectorIndexOptions { type: string - m: integer - ef_construction: integer + m?: integer + ef_construction?: integer + confidence_interval?: float } export interface MappingDenseVectorProperty extends MappingPropertyBase { @@ -5038,7 +5036,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { null_value?: FieldValue boost?: double coerce?: boolean - script?: Script + script?: Script | string on_script_error?: MappingOnScriptError ignore_malformed?: boolean time_series_metric?: MappingTimeSeriesMetricType @@ -5116,7 +5114,7 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { null_value?: GeoLocation index?: boolean on_script_error?: MappingOnScriptError - script?: Script + script?: Script | string type: 'geo_point' } @@ -5182,7 +5180,7 @@ export interface MappingIpProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean null_value?: string on_script_error?: MappingOnScriptError - script?: Script + script?: Script | string time_series_dimension?: boolean type: 'ip' } @@ -5202,7 +5200,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { eager_global_ordinals?: boolean index?: boolean index_options?: MappingIndexOptions - script?: Script + script?: Script | string on_script_error?: MappingOnScriptError normalizer?: string norms?: boolean @@ -5247,7 +5245,7 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase ignore_malformed?: boolean index?: boolean on_script_error?: MappingOnScriptError - script?: Script + script?: Script | string time_series_metric?: MappingTimeSeriesMetricType time_series_dimension?: boolean } @@ -5307,7 +5305,7 @@ export interface MappingRuntimeField { input_field?: Field target_field?: Field target_index?: IndexName - script?: Script + script?: Script | string type: MappingRuntimeFieldType } @@ -5495,28 +5493,22 @@ export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { filter: QueryDslQueryContainer } -export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { +export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } -export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { - gt?: DateMath - gte?: DateMath - lt?: DateMath - lte?: DateMath - from?: DateMath | null - to?: DateMath | null +export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { format?: DateFormat time_zone?: TimeZone } -export type QueryDslDecayFunction = QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction +export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction -export interface QueryDslDecayFunctionBase { +export interface QueryDslDecayFunctionBase { multi_value_mode?: QueryDslMultiValueMode } @@ -5532,7 +5524,7 @@ export interface QueryDslDisMaxQuery extends QueryDslQueryBase { tie_breaker?: double } -export type QueryDslDistanceFeatureQuery = QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery +export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { origin: TOrigin @@ -5607,10 +5599,10 @@ export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys & { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } -export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { +export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } @@ -5703,7 +5695,7 @@ export interface QueryDslIntervalsFilter { not_containing?: QueryDslIntervalsContainer not_overlapping?: QueryDslIntervalsContainer overlapping?: QueryDslIntervalsContainer - script?: Script + script?: Script | string } export interface QueryDslIntervalsFuzzy { @@ -5858,19 +5850,13 @@ export interface QueryDslNestedQuery extends QueryDslQueryBase { score_mode?: QueryDslChildScoreMode } -export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { - gt?: double - gte?: double - lt?: double - lte?: double - from?: double | null - to?: double | null +export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { } -export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { +export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' @@ -5923,7 +5909,7 @@ export interface QueryDslQueryContainer { dis_max?: QueryDslDisMaxQuery distance_feature?: QueryDslDistanceFeatureQuery exists?: QueryDslExistsQuery - function_score?: QueryDslFunctionScoreQuery + function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] fuzzy?: Partial> geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery @@ -5951,7 +5937,7 @@ export interface QueryDslQueryContainer { range?: Partial> rank_feature?: QueryDslRankFeatureQuery regexp?: Partial> - rule_query?: QueryDslRuleQuery + rule?: QueryDslRuleQuery script?: QueryDslScriptQuery script_score?: QueryDslScriptScoreQuery semantic?: QueryDslSemanticQuery @@ -5966,6 +5952,7 @@ export interface QueryDslQueryContainer { span_or?: QueryDslSpanOrQuery span_term?: Partial> span_within?: QueryDslSpanWithinQuery + sparse_vector?: QueryDslSparseVectorQuery term?: Partial> terms?: QueryDslTermsQuery terms_set?: Partial> @@ -6009,10 +5996,16 @@ export interface QueryDslRandomScoreFunction { seed?: long | string } -export type QueryDslRangeQuery = QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermsRangeQuery +export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery -export interface QueryDslRangeQueryBase extends QueryDslQueryBase { +export interface QueryDslRangeQueryBase extends QueryDslQueryBase { relation?: QueryDslRangeRelation + gt?: T + gte?: T + lt?: T + lte?: T + from?: T | null + to?: T | null } export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' @@ -6054,22 +6047,22 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { export interface QueryDslRuleQuery extends QueryDslQueryBase { organic: QueryDslQueryContainer - ruleset_id: Id + ruleset_ids: Id[] match_criteria: any } export interface QueryDslScriptQuery extends QueryDslQueryBase { - script: Script + script: Script | string } export interface QueryDslScriptScoreFunction { - script: Script + script: Script | string } export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { min_score?: float query: QueryDslQueryContainer - script: Script + script: Script | string } export interface QueryDslSemanticQuery extends QueryDslQueryBase { @@ -6170,11 +6163,23 @@ export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { little: QueryDslSpanQuery } +export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { + field: Field + query_vector?: Record + inference_id?: Id + query?: string + prune?: boolean + pruning_config?: QueryDslTokenPruningConfig +} + export interface QueryDslTermQuery extends QueryDslQueryBase { value: FieldValue case_insensitive?: boolean } +export interface QueryDslTermRangeQuery extends QueryDslRangeQueryBase { +} + export interface QueryDslTermsLookup { index: IndexName id: Id @@ -6189,18 +6194,9 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup -export interface QueryDslTermsRangeQuery extends QueryDslRangeQueryBase { - gt?: string - gte?: string - lt?: string - lte?: string - from?: string | null - to?: string | null -} - export interface QueryDslTermsSetQuery extends QueryDslQueryBase { minimum_should_match_field?: Field - minimum_should_match_script?: Script + minimum_should_match_script?: Script | string terms: string[] } @@ -6222,6 +6218,19 @@ export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } +export interface QueryDslUntypedDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } + +export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { + format?: DateFormat + time_zone?: TimeZone +} + export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { tokens: Record pruning_config?: QueryDslTokenPruningConfig @@ -6485,28 +6494,39 @@ export interface CatAliasesRequest extends CatCatRequestBase { export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { - shards?: string - s?: string - 'disk.indices'?: ByteSize | null - di?: ByteSize | null - diskIndices?: ByteSize | null - 'disk.used'?: ByteSize | null - du?: ByteSize | null - diskUsed?: ByteSize | null - 'disk.avail'?: ByteSize | null - da?: ByteSize | null - diskAvail?: ByteSize | null - 'disk.total'?: ByteSize | null - dt?: ByteSize | null - diskTotal?: ByteSize | null - 'disk.percent'?: Percentage | null - dp?: Percentage | null - diskPercent?: Percentage | null - host?: Host | null - h?: Host | null - ip?: Ip | null - node?: string - n?: string + shards: string + s: string + 'shards.undesired': string | null + 'write_load.forecast': double | null + wlf: double | null + writeLoadForecast: double | null + 'disk.indices.forecast': ByteSize | null + dif: ByteSize | null + diskIndicesForecast: ByteSize | null + 'disk.indices': ByteSize | null + di: ByteSize | null + diskIndices: ByteSize | null + 'disk.used': ByteSize | null + du: ByteSize | null + diskUsed: ByteSize | null + 'disk.avail': ByteSize | null + da: ByteSize | null + diskAvail: ByteSize | null + 'disk.total': ByteSize | null + dt: ByteSize | null + diskTotal: ByteSize | null + 'disk.percent': Percentage | null + dp: Percentage | null + diskPercent: Percentage | null + host: Host | null + h: Host | null + ip: Ip | null + node: string + n: string + 'node.role': string | null + r: string | null + role: string | null + nodeRole: string | null } export interface CatAllocationRequest extends CatCatRequestBase { @@ -9821,12 +9841,13 @@ export interface EqlHitsEvent { _index: IndexName _id: Id _source: TEvent + missing?: boolean fields?: Record } export interface EqlHitsSequence { events: EqlHitsEvent[] - join_keys: any[] + join_keys?: any[] } export interface EqlDeleteRequest extends RequestBase { @@ -9884,16 +9905,34 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase> } } @@ -10629,6 +10668,7 @@ export interface IndicesMappingLimitSettingsNestedObjects { export interface IndicesMappingLimitSettingsTotalFields { limit?: long + ignore_dynamic_beyond_limit?: boolean } export interface IndicesMerge { @@ -10725,8 +10765,8 @@ export interface IndicesSettingsSimilarityLmj { export interface IndicesSettingsSimilarityScripted { type: 'scripted' - script: Script - weight_script?: Script + script: Script | string + weight_script?: Script | string } export interface IndicesSlowlogSettings { @@ -12452,41 +12492,41 @@ export interface IngestProcessorBase { } export interface IngestProcessorContainer { - attachment?: IngestAttachmentProcessor append?: IngestAppendProcessor - csv?: IngestCsvProcessor + attachment?: IngestAttachmentProcessor + bytes?: IngestBytesProcessor + circle?: IngestCircleProcessor convert?: IngestConvertProcessor + csv?: IngestCsvProcessor date?: IngestDateProcessor date_index_name?: IngestDateIndexNameProcessor + dissect?: IngestDissectProcessor dot_expander?: IngestDotExpanderProcessor + drop?: IngestDropProcessor enrich?: IngestEnrichProcessor fail?: IngestFailProcessor foreach?: IngestForeachProcessor - json?: IngestJsonProcessor - user_agent?: IngestUserAgentProcessor - kv?: IngestKeyValueProcessor geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor gsub?: IngestGsubProcessor + inference?: IngestInferenceProcessor join?: IngestJoinProcessor + json?: IngestJsonProcessor + kv?: IngestKeyValueProcessor lowercase?: IngestLowercaseProcessor + pipeline?: IngestPipelineProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor reroute?: IngestRerouteProcessor script?: IngestScriptProcessor set?: IngestSetProcessor + set_security_user?: IngestSetSecurityUserProcessor sort?: IngestSortProcessor split?: IngestSplitProcessor trim?: IngestTrimProcessor uppercase?: IngestUppercaseProcessor urldecode?: IngestUrlDecodeProcessor - bytes?: IngestBytesProcessor - dissect?: IngestDissectProcessor - set_security_user?: IngestSetSecurityUserProcessor - pipeline?: IngestPipelineProcessor - drop?: IngestDropProcessor - circle?: IngestCircleProcessor - inference?: IngestInferenceProcessor + user_agent?: IngestUserAgentProcessor } export interface IngestRemoveProcessor extends IngestProcessorBase { @@ -16376,69 +16416,101 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { nodes: Record } -export interface QueryRulesetQueryRule { +export interface QueryRulesQueryRule { rule_id: Id - type: QueryRulesetQueryRuleType - criteria: QueryRulesetQueryRuleCriteria[] - actions: QueryRulesetQueryRuleActions + type: QueryRulesQueryRuleType + criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + actions: QueryRulesQueryRuleActions + priority?: integer } -export interface QueryRulesetQueryRuleActions { +export interface QueryRulesQueryRuleActions { ids?: Id[] docs?: QueryDslPinnedDoc[] } -export interface QueryRulesetQueryRuleCriteria { - type: QueryRulesetQueryRuleCriteriaType - metadata: string +export interface QueryRulesQueryRuleCriteria { + type: QueryRulesQueryRuleCriteriaType + metadata?: string values?: any[] } -export type QueryRulesetQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' +export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' + +export type QueryRulesQueryRuleType = 'pinned' + +export interface QueryRulesQueryRuleset { + ruleset_id: Id + rules: QueryRulesQueryRule[] +} + +export interface QueryRulesDeleteRuleRequest extends RequestBase { + ruleset_id: Id + rule_id: Id +} -export type QueryRulesetQueryRuleType = 'pinned' +export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase -export interface QueryRulesetQueryRuleset { +export interface QueryRulesDeleteRulesetRequest extends RequestBase { ruleset_id: Id - rules: QueryRulesetQueryRule[] } -export interface QueryRulesetDeleteRequest extends RequestBase { +export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase + +export interface QueryRulesGetRuleRequest extends RequestBase { ruleset_id: Id + rule_id: Id } -export type QueryRulesetDeleteResponse = AcknowledgedResponseBase +export type QueryRulesGetRuleResponse = QueryRulesQueryRule -export interface QueryRulesetGetRequest extends RequestBase { +export interface QueryRulesGetRulesetRequest extends RequestBase { ruleset_id: Id } -export type QueryRulesetGetResponse = QueryRulesetQueryRuleset +export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset -export interface QueryRulesetListQueryRulesetListItem { +export interface QueryRulesListRulesetsQueryRulesetListItem { ruleset_id: Id - rules_count: integer + rule_total_count: integer + rule_criteria_types_counts: Record } -export interface QueryRulesetListRequest extends RequestBase { +export interface QueryRulesListRulesetsRequest extends RequestBase { from?: integer size?: integer } -export interface QueryRulesetListResponse { +export interface QueryRulesListRulesetsResponse { count: long - results: QueryRulesetListQueryRulesetListItem[] + results: QueryRulesListRulesetsQueryRulesetListItem[] } -export interface QueryRulesetPutRequest extends RequestBase { +export interface QueryRulesPutRuleRequest extends RequestBase { ruleset_id: Id + rule_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - rules: QueryRulesetQueryRule[] + type: QueryRulesQueryRuleType + criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + actions: QueryRulesQueryRuleActions + priority?: integer } } -export interface QueryRulesetPutResponse { +export interface QueryRulesPutRuleResponse { + result: Result +} + +export interface QueryRulesPutRulesetRequest extends RequestBase { + ruleset_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + rules: QueryRulesQueryRule | QueryRulesQueryRule[] + } +} + +export interface QueryRulesPutRulesetResponse { result: Result } @@ -16652,7 +16724,7 @@ export interface SearchApplicationSearchApplication { } export interface SearchApplicationSearchApplicationTemplate { - script: InlineScript | string + script: Script | string } export interface SearchApplicationDeleteRequest extends RequestBase { @@ -16827,11 +16899,16 @@ export interface SecurityApplicationPrivileges { resources: string[] } +export interface SecurityBulkError { + count: integer + details: Record +} + export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_connector_secrets' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean @@ -16876,24 +16953,26 @@ export interface SecurityRealmInfo { } export interface SecurityRoleDescriptor { - cluster?: string[] + cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] index?: SecurityIndicesPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] + description?: string transient_metadata?: Record } export interface SecurityRoleDescriptorRead { - cluster: string[] + cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] index: SecurityIndicesPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata run_as?: string[] + description?: string transient_metadata?: Record } @@ -16914,22 +16993,22 @@ export interface SecurityRoleMappingRule { export interface SecurityRoleTemplate { format?: SecurityTemplateFormat - template: Script + template: Script | string } export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer -export interface SecurityRoleTemplateInlineScript extends ScriptBase { - lang?: ScriptLanguage - options?: Record - source: SecurityRoleTemplateInlineQuery -} - export interface SecurityRoleTemplateQuery { - template?: SecurityRoleTemplateScript + template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery } -export type SecurityRoleTemplateScript = SecurityRoleTemplateInlineScript | SecurityRoleTemplateInlineQuery | StoredScriptId +export interface SecurityRoleTemplateScript { + source?: SecurityRoleTemplateInlineQuery + id?: Id + params?: Record + lang?: ScriptLanguage + options?: Record +} export type SecurityTemplateFormat = 'string' | 'json' @@ -17014,6 +17093,35 @@ export interface SecurityAuthenticateToken { type?: string } +export interface SecurityBulkDeleteRoleRequest extends RequestBase { + refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + names: string[] + } +} + +export interface SecurityBulkDeleteRoleResponse { + deleted?: string[] + not_found?: string[] + errors?: SecurityBulkError +} + +export interface SecurityBulkPutRoleRequest extends RequestBase { + refresh?: Refresh + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + roles: Record + } +} + +export interface SecurityBulkPutRoleResponse { + created?: string[] + updated?: string[] + noop?: string[] + errors?: SecurityBulkError +} + export interface SecurityChangePasswordRequest extends RequestBase { username?: Username refresh?: Refresh @@ -17534,6 +17642,7 @@ export interface SecurityPutRoleRequest extends RequestBase { indices?: SecurityIndicesPrivileges[] metadata?: Metadata run_as?: string[] + description?: string transient_metadata?: Record } } @@ -17643,6 +17752,78 @@ export interface SecurityQueryApiKeysResponse { aggregations?: Record } +export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { + _sort?: SortResults + name: string +} + +export interface SecurityQueryRoleRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + query?: SecurityQueryRoleRoleQueryContainer + from?: integer + sort?: Sort + size?: integer + search_after?: SortResults + } +} + +export interface SecurityQueryRoleResponse { + total: integer + count: integer + roles: SecurityQueryRoleQueryRole[] +} + +export interface SecurityQueryRoleRoleQueryContainer { + bool?: QueryDslBoolQuery + exists?: QueryDslExistsQuery + ids?: QueryDslIdsQuery + match?: Partial> + match_all?: QueryDslMatchAllQuery + prefix?: Partial> + range?: Partial> + simple_query_string?: QueryDslSimpleQueryStringQuery + term?: Partial> + terms?: QueryDslTermsQuery + wildcard?: Partial> +} + +export interface SecurityQueryUserQueryUser extends SecurityUser { + _sort?: SortResults +} + +export interface SecurityQueryUserRequest extends RequestBase { + with_profile_uid?: boolean + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + query?: SecurityQueryUserUserQueryContainer + from?: integer + sort?: Sort + size?: integer + search_after?: SortResults + } +} + +export interface SecurityQueryUserResponse { + total: integer + count: integer + users: SecurityQueryUserQueryUser[] +} + +export interface SecurityQueryUserUserQueryContainer { + ids?: QueryDslIdsQuery + bool?: QueryDslBoolQuery + exists?: QueryDslExistsQuery + match?: Partial> + match_all?: QueryDslMatchAllQuery + prefix?: Partial> + range?: Partial> + simple_query_string?: QueryDslSimpleQueryStringQuery + term?: Partial> + terms?: QueryDslTermsQuery + wildcard?: Partial> +} + export interface SecuritySamlAuthenticateRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -18863,9 +19044,9 @@ export interface TransformGetTransformStatsTransformIndexerStats { export interface TransformGetTransformStatsTransformProgress { docs_indexed: long docs_processed: long - docs_remaining: long - percent_complete: double - total_docs: long + docs_remaining?: long + percent_complete?: double + total_docs?: long } export interface TransformGetTransformStatsTransformStats { From 94da0d241a77382ebc7c2e73557982b18f3533a3 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 30 Jul 2024 01:27:15 +1000 Subject: [PATCH 367/647] Auto-generated code for main (#2317) --- docs/reference.asciidoc | 185 ++++++++++++++++++------ src/api/api/bulk.ts | 2 +- src/api/api/cat.ts | 20 +-- src/api/api/cluster.ts | 10 +- src/api/api/create.ts | 2 +- src/api/api/delete.ts | 2 +- src/api/api/delete_by_query.ts | 2 +- src/api/api/delete_script.ts | 2 +- src/api/api/enrich.ts | 8 +- src/api/api/exists.ts | 2 +- src/api/api/exists_source.ts | 2 +- src/api/api/explain.ts | 2 +- src/api/api/get.ts | 2 +- src/api/api/get_script.ts | 2 +- src/api/api/get_source.ts | 2 +- src/api/api/index.ts | 2 +- src/api/api/indices.ts | 38 ++--- src/api/api/info.ts | 2 +- src/api/api/license.ts | 2 +- src/api/api/ping.ts | 2 +- src/api/api/put_script.ts | 2 +- src/api/api/reindex.ts | 2 +- src/api/api/scripts_painless_execute.ts | 2 +- src/api/api/search_mvt.ts | 2 +- src/api/api/security.ts | 14 +- src/api/api/tasks.ts | 2 +- src/api/api/termvectors.ts | 2 +- src/api/api/transform.ts | 20 +-- src/api/api/update.ts | 2 +- src/api/api/update_by_query.ts | 2 +- src/api/types.ts | 51 ++++--- src/api/typesWithBodyKey.ts | 51 ++++--- 32 files changed, 273 insertions(+), 170 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index c7668ae7a..237af9280 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -27,6 +27,7 @@ [discrete] === bulk +Bulk index or delete documents. Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. @@ -130,6 +131,7 @@ Elasticsearch collects documents before sorting. [discrete] === create +Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. @@ -162,6 +164,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] === delete +Delete a document. Removes a JSON document from the specified index. {ref}/docs-delete.html[Endpoint documentation] @@ -189,6 +192,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] === delete_by_query +Delete documents. Deletes documents that match the specified query. {ref}/docs-delete-by-query.html[Endpoint documentation] @@ -268,6 +272,7 @@ client.deleteByQueryRethrottle({ task_id }) [discrete] === delete_script +Delete a script or search template. Deletes a stored script or search template. {ref}/modules-scripting.html[Endpoint documentation] @@ -287,7 +292,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] === exists -Checks if a document in an index exists. +Check a document. +Checks if a specified document exists. {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -318,6 +324,7 @@ The specified version must match the current version of the document for the req [discrete] === exists_source +Check for a document source. Checks if a document's `_source` is stored. {ref}/docs-get.html[Endpoint documentation] @@ -346,7 +353,8 @@ The specified version must match the current version of the document for the req [discrete] === explain -Returns information about why a specific document matches (or doesn’t match) a query. +Explain a document match result. +Returns information about why a specific document matches, or doesn’t match, a query. {ref}/search-explain.html[Endpoint documentation] [source,ts] @@ -408,7 +416,8 @@ targeting `foo*,bar*` returns an error if an index starts with foo but no index [discrete] === get -Returns a document. +Get a document by its ID. +Retrieves the document with the specified ID from an index. {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -439,6 +448,7 @@ If this field is specified, the `_source` parameter defaults to false. [discrete] === get_script +Get a script or search template. Retrieves a stored script or search template. {ref}/modules-scripting.html[Endpoint documentation] @@ -475,6 +485,7 @@ client.getScriptLanguages() [discrete] === get_source +Get a document's source. Returns the source of a document. {ref}/docs-get.html[Endpoint documentation] @@ -519,6 +530,7 @@ client.healthReport({ ... }) [discrete] === index +Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. @@ -558,6 +570,7 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] === info +Get cluster info. Returns basic information about the cluster. {ref}/index.html[Endpoint documentation] @@ -741,6 +754,7 @@ Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open [discrete] === ping +Ping the cluster. Returns whether the cluster is running. {ref}/index.html[Endpoint documentation] @@ -751,6 +765,7 @@ client.ping() [discrete] === put_script +Create or update a script or search template. Creates or updates a stored script or search template. {ref}/modules-scripting.html[Endpoint documentation] @@ -796,9 +811,8 @@ To target all data streams and indices in a cluster, omit this parameter or use [discrete] === reindex -Allows to copy documents from one index to another, optionally filtering the source -documents by a query, changing the destination index settings, or fetching the -documents from a remote cluster. +Reindex documents. +Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. {ref}/docs-reindex.html[Endpoint documentation] [source,ts] @@ -869,6 +883,7 @@ If no `id` or `` is specified, this parameter is required. [discrete] === scripts_painless_execute +Run a script. Runs a script and returns a result. {painless}/painless-execute-api.html[Endpoint documentation] @@ -1054,7 +1069,8 @@ Fetches with this enabled will be slower the enabling synthetic source natively [discrete] === search_mvt -Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. +Search a vector tile. +Searches a vector tile for geospatial values. {ref}/search-vector-tile-api.html[Endpoint documentation] [source,ts] @@ -1206,6 +1222,7 @@ client.termsEnum({ index, field }) [discrete] === termvectors +Get term vector information. Returns information and statistics about terms in the fields of a particular document. {ref}/docs-termvectors.html[Endpoint documentation] @@ -1238,7 +1255,8 @@ Random by default. [discrete] === update -Updates a document with a script or partial document. +Update a document. +Updates a document by running a script or passing a partial document. {ref}/docs-update.html[Endpoint documentation] [source,ts] @@ -1281,6 +1299,7 @@ Set to 'all' or any positive integer up to the total number of shards in the ind [discrete] === update_by_query +Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. @@ -1602,9 +1621,11 @@ client.autoscaling.putAutoscalingPolicy({ name }) === cat [discrete] ==== aliases +Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. -IMPORTANT: cat APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. +> info +> CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use [the /_alias endpoints](#endpoint-alias). {ref}/cat-alias.html[Endpoint documentation] [source,ts] @@ -1639,10 +1660,12 @@ client.cat.allocation({ ... }) [discrete] ==== component_templates +Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the get component template API. +> info +> CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use [the /_component_template endpoints](#endpoint-component-template). {ref}/cat-component-templates.html[Endpoint documentation] [source,ts] @@ -1658,10 +1681,12 @@ client.cat.componentTemplates({ ... }) [discrete] ==== count -Provides quick access to a document count for a data stream, an index, or an entire cluster. -NOTE: The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the count API. +Get a document count. +Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ +The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. +> info +> CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use [the /_count endpoints](#endpoint-count). {ref}/cat-count.html[Endpoint documentation] [source,ts] @@ -1724,7 +1749,8 @@ client.cat.health({ ... }) [discrete] ==== help -Returns help for the Cat APIs. +Get CAT help. +Returns help for the CAT APIs. {ref}/cat.html[Endpoint documentation] [source,ts] @@ -1735,12 +1761,21 @@ client.cat.help() [discrete] ==== indices +Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the get index API. -Use the cat indices API to get the following information for each index in a cluster: shard count; document count; deleted document count; primary store size; total store size of all shards, including shard replicas. +> info +> CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. + +Use this request to get the following information for each index in a cluster: +- shard count +- document count +- deleted document count +- primary store size +- total store size of all shards, including shard replicas + These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. -To get an accurate count of Elasticsearch documents, use the cat count or count APIs. +To get an accurate count of Elasticsearch documents, use the [/_cat/count](#operation-cat-count) or [count](#endpoint-count) endpoints. {ref}/cat-indices.html[Endpoint documentation] [source,ts] @@ -1775,11 +1810,13 @@ client.cat.master() [discrete] ==== ml_data_frame_analytics +Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. -IMPORTANT: cat APIs are only intended for human consumption using the Kibana +> info +> CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use the get data frame analytics jobs statistics API. +application consumption, use [the /_ml/data_frame/analytics endpoints](#endpoint-ml). {ref}/cat-dfanalytics.html[Endpoint documentation] [source,ts] @@ -1801,14 +1838,16 @@ response. [discrete] ==== ml_datafeeds +Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -IMPORTANT: cat APIs are only intended for human consumption using the Kibana +> info +> CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use the get datafeed statistics API. +application consumption, use [the /_ml/datafeeds endpoints](#endpoint-ml). {ref}/cat-datafeeds.html[Endpoint documentation] [source,ts] @@ -1836,14 +1875,16 @@ partial matches. [discrete] ==== ml_jobs +Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -IMPORTANT: cat APIs are only intended for human consumption using the Kibana +> info +> CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use the get anomaly detection job statistics API. +application consumption, use [the /_ml/anomaly_detectors endpoints](#endpoint-ml). {ref}/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] @@ -1872,11 +1913,13 @@ matches. [discrete] ==== ml_trained_models +Get trained models. Returns configuration and usage information about inference trained models. -IMPORTANT: cat APIs are only intended for human consumption using the Kibana +> info +> CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use the get trained models statistics API. +application consumption, use [the /_ml/trained_models endpoints](#endpoint-ml). {ref}/cat-trained-model.html[Endpoint documentation] [source,ts] @@ -2113,11 +2156,13 @@ Accepts wildcard expressions. [discrete] ==== transforms +Get transforms. Returns configuration and usage information about transforms. -IMPORTANT: cat APIs are only intended for human consumption using the Kibana +> info +> CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use the get transform statistics API. +application consumption, use [the /_transform endpoints](#endpoint-transform). {ref}/cat-transforms.html[Endpoint documentation] [source,ts] @@ -2412,6 +2457,7 @@ client.cluster.allocationExplain({ ... }) [discrete] ==== delete_component_template +Delete component templates. Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. @@ -2454,7 +2500,8 @@ nodes are still in the cluster. [discrete] ==== exists_component_template -Returns information about whether a particular component template exist +Check component templates. +Returns information about whether a particular component template exists. {ref}/indices-component-template.html[Endpoint documentation] [source,ts] @@ -2476,6 +2523,7 @@ Defaults to false, which means information is retrieved from the master node. [discrete] ==== get_component_template +Get component templates. Retrieves information about component templates. {ref}/indices-component-template.html[Endpoint documentation] @@ -2549,7 +2597,8 @@ client.cluster.health({ ... }) [discrete] ==== info -Returns different information about the cluster. +Get cluster info. +Returns basic information about the cluster. {ref}/cluster-info.html[Endpoint documentation] [source,ts] @@ -2610,6 +2659,7 @@ is satisfied, the request fails and returns an error. [discrete] ==== put_component_template +Create or update a component template. Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. @@ -2811,6 +2861,7 @@ client.danglingIndices.listDanglingIndices() === enrich [discrete] ==== delete_policy +Delete an enrich policy. Deletes an existing enrich policy and its enrich index. {ref}/delete-enrich-policy-api.html[Endpoint documentation] @@ -2844,6 +2895,7 @@ client.enrich.executePolicy({ name }) [discrete] ==== get_policy +Get an enrich policy. Returns information about an enrich policy. {ref}/get-enrich-policy-api.html[Endpoint documentation] @@ -2861,6 +2913,7 @@ To return information for all enrich policies, omit this parameter. [discrete] ==== put_policy +Create an enrich policy. Creates an enrich policy. {ref}/put-enrich-policy-api.html[Endpoint documentation] @@ -2880,6 +2933,7 @@ client.enrich.putPolicy({ name }) [discrete] ==== stats +Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. {ref}/enrich-stats-api.html[Endpoint documentation] @@ -3443,7 +3497,8 @@ client.ilm.stop({ ... }) === indices [discrete] ==== add_block -Adds a block to an index. +Add an index block. +Limits the operations allowed on an index by blocking specific operation types. {ref}/index-modules-blocks.html[Endpoint documentation] [source,ts] @@ -3581,6 +3636,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== create +Create an index. Creates a new index. {ref}/indices-create-index.html[Endpoint documentation] @@ -3653,6 +3709,7 @@ Supports a list of values, such as `open,hidden`. [discrete] ==== delete +Delete indices. Deletes one or more indices. {ref}/indices-delete-index.html[Endpoint documentation] @@ -3683,6 +3740,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_alias +Delete an alias. Removes a data stream or index from an alias. {ref}/indices-aliases.html[Endpoint documentation] @@ -3832,7 +3890,8 @@ client.indices.downsample({ index, target_index }) [discrete] ==== exists -Checks if a data stream, index, or alias exists. +Check indices. +Checks if one or more indices, index aliases, or data streams exist. {ref}/indices-exists.html[Endpoint documentation] [source,ts] @@ -3858,7 +3917,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== exists_alias -Checks if an alias exists. +Check aliases. +Checks if one or more data stream or index aliases exist. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] @@ -3921,7 +3981,8 @@ client.indices.existsTemplate({ name }) [discrete] ==== explain_data_lifecycle -Retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc. +Get the status for a data stream lifecycle. +Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. {ref}/data-streams-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -4020,6 +4081,7 @@ client.indices.forcemerge({ ... }) [discrete] ==== get +Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. @@ -4050,7 +4112,8 @@ such as open,hidden. [discrete] ==== get_alias -Retrieves information for one or more aliases. +Get aliases. +Retrieves information for one or more data stream or index aliases. {ref}/indices-aliases.html[Endpoint documentation] [source,ts] @@ -4123,6 +4186,7 @@ Supports a list of values, such as `open,hidden`. [discrete] ==== get_field_mapping +Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. @@ -4173,6 +4237,7 @@ client.indices.getIndexTemplate({ ... }) [discrete] ==== get_mapping +Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. @@ -4202,6 +4267,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_settings +Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. @@ -4354,6 +4420,7 @@ client.indices.promoteDataStream({ name }) [discrete] ==== put_alias +Create or update an alias. Adds a data stream or index to an alias. {ref}/indices-aliases.html[Endpoint documentation] @@ -4469,6 +4536,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_mapping +Update field mappings. Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. @@ -4518,7 +4586,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_settings -Changes a dynamic index setting in real time. For data streams, index setting +Update index settings. +Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. {ref}/indices-update-settings.html[Endpoint documentation] @@ -4609,6 +4678,7 @@ To target all data streams and indices, omit this parameter or use `*` or `_all` [discrete] ==== refresh +Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. @@ -4704,6 +4774,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== rollover +Roll over to a new index. Creates a new index for a data stream or index alias. {ref}/indices-rollover-index.html[Endpoint documentation] @@ -4965,6 +5036,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== update_aliases +Create or update an alias. Adds a data stream or index to an alias. {ref}/indices-aliases.html[Endpoint documentation] @@ -4985,7 +5057,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== validate_query -Validates a potentially expensive query without executing it. +Validate a query. +Validates a query without running it. {ref}/search-validate.html[Endpoint documentation] [source,ts] @@ -5257,8 +5330,9 @@ client.license.delete() [discrete] ==== get -This API returns information about the type of license, when it was issued, and when it expires, for example. -For more information about the different types of licenses, see https://www.elastic.co/subscriptions. +Get license information. +Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. +For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). {ref}/get-license.html[Endpoint documentation] [source,ts] @@ -8226,7 +8300,9 @@ client.security.activateUserProfile({ grant_type }) [discrete] ==== authenticate -Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user. +Authenticate a user. +Authenticates a user and returns information about the authenticated user. +Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. @@ -8395,6 +8471,7 @@ client.security.clearCachedServiceTokens({ namespace, service, name }) [discrete] ==== create_api_key +Create an API key. Creates an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. @@ -8630,6 +8707,7 @@ client.security.enrollNode() [discrete] ==== get_api_key +Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. @@ -8884,6 +8962,7 @@ It is not valid with other grant types. [discrete] ==== has_privileges +Check user privileges. Determines whether the specified user has a specified list of privileges. {ref}/security-api-has-privileges.html[Endpoint documentation] @@ -8920,6 +8999,7 @@ client.security.hasPrivilegesUserProfile({ uids, privileges }) [discrete] ==== invalidate_api_key +Invalidate API keys. Invalidates one or more API keys. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. @@ -9094,7 +9174,8 @@ client.security.putUser({ username }) [discrete] ==== query_api_keys -Retrieves information for API keys in a paginated manner. You can optionally filter the results with a query. +Query API keys. +Retrieves a paginated list of API keys and their information. You can optionally filter the results with a query. {ref}/security-api-query-api-key.html[Endpoint documentation] [source,ts] @@ -9334,6 +9415,7 @@ as long as the profile matches the `name` field query. [discrete] ==== update_api_key +Update an API key. Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. @@ -10158,7 +10240,8 @@ client.tasks.cancel({ ... }) [discrete] ==== get -Returns information about a task. +Get task information. +Returns information about the tasks currently executing in the cluster. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -10275,6 +10358,7 @@ client.textStructure.testGrokPattern({ grok_pattern, text }) === transform [discrete] ==== delete_transform +Delete a transform. Deletes a transform. {ref}/delete-transform.html[Endpoint documentation] @@ -10305,6 +10389,7 @@ client.transform.getNodeStats() [discrete] ==== get_transform +Get transforms. Retrieves configuration information for transforms. {ref}/get-transform.html[Endpoint documentation] @@ -10337,6 +10422,7 @@ be retrieved and then added to another cluster. [discrete] ==== get_transform_stats +Get transform stats. Retrieves usage information for transforms. {ref}/get-transform-stats.html[Endpoint documentation] @@ -10367,7 +10453,8 @@ there are no matches or only partial matches. [discrete] ==== preview_transform -Previews a transform. +Preview a transform. +Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field @@ -10406,6 +10493,7 @@ timeout expires, the request fails and returns an error. [discrete] ==== put_transform +Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as @@ -10463,6 +10551,7 @@ the exception of privilege checks. [discrete] ==== reset_transform +Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. @@ -10484,7 +10573,8 @@ must be stopped before it can be reset. [discrete] ==== schedule_now_transform -Schedules now a transform. +Schedule a transform to start now. +Instantly runs a transform to process data. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, @@ -10506,6 +10596,7 @@ client.transform.scheduleNowTransform({ transform_id }) [discrete] ==== start_transform +Start a transform. Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is @@ -10539,6 +10630,7 @@ client.transform.startTransform({ transform_id }) [discrete] ==== stop_transform +Stop transforms. Stops one or more transforms. {ref}/stop-transform.html[Endpoint documentation] @@ -10572,6 +10664,7 @@ immediately and the indexer is stopped asynchronously in the background. [discrete] ==== update_transform +Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index fe5fb81a7..b7a5dfa84 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. + * Bulk index or delete documents. Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html | Elasticsearch API documentation} */ export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index 8d9a04a55..0a740b584 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -45,7 +45,7 @@ export default class Cat { } /** - * Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. IMPORTANT: cat APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. + * Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. > info > CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use [the /_alias endpoints](#endpoint-alias). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -125,7 +125,7 @@ export default class Cat { } /** - * Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. + * Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use [the /_component_template endpoints](#endpoint-component-template). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -165,7 +165,7 @@ export default class Cat { } /** - * Provides quick access to a document count for a data stream, an index, or an entire cluster. NOTE: The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. + * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use [the /_count endpoints](#endpoint-count). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -275,7 +275,7 @@ export default class Cat { } /** - * Returns help for the Cat APIs. + * Get CAT help. Returns help for the CAT APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html | Elasticsearch API documentation} */ async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -305,7 +305,7 @@ export default class Cat { } /** - * Returns high-level information about indices in a cluster, including backing indices for data streams. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index API. Use the cat indices API to get the following information for each index in a cluster: shard count; document count; deleted document count; primary store size; total store size of all shards, including shard replicas. These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. + * Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the [/_cat/count](#operation-cat-count) or [count](#endpoint-count) endpoints. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -375,7 +375,7 @@ export default class Cat { } /** - * Returns configuration and usage information about data frame analytics jobs. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. + * Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/data_frame/analytics endpoints](#endpoint-ml). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -415,7 +415,7 @@ export default class Cat { } /** - * Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. + * Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/datafeeds endpoints](#endpoint-ml). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -455,7 +455,7 @@ export default class Cat { } /** - * Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. + * Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/anomaly_detectors endpoints](#endpoint-ml). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -495,7 +495,7 @@ export default class Cat { } /** - * Returns configuration and usage information about inference trained models. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. + * Get trained models. Returns configuration and usage information about inference trained models. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/trained_models endpoints](#endpoint-ml). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -955,7 +955,7 @@ export default class Cat { } /** - * Returns configuration and usage information about transforms. IMPORTANT: cat APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. + * Get transforms. Returns configuration and usage information about transforms. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_transform endpoints](#endpoint-transform). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html | Elasticsearch API documentation} */ async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 55025b0b0..6795c7f13 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -87,7 +87,7 @@ export default class Cluster { } /** - * Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + * Delete component templates. Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -149,7 +149,7 @@ export default class Cluster { } /** - * Returns information about whether a particular component template exist + * Check component templates. Returns information about whether a particular component template exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -181,7 +181,7 @@ export default class Cluster { } /** - * Retrieves information about component templates. + * Get component templates. Retrieves information about component templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -291,7 +291,7 @@ export default class Cluster { } /** - * Returns different information about the cluster. + * Get cluster info. Returns basic information about the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html | Elasticsearch API documentation} */ async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -383,7 +383,7 @@ export default class Cluster { } /** - * Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. + * Create or update a component template. Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 29fedfbc0..f130eb4ac 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. + * Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index fd9bb85ee..387a22356 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Removes a JSON document from the specified index. + * Delete a document. Removes a JSON document from the specified index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html | Elasticsearch API documentation} */ export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index d7dc4bd44..68776b013 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Deletes documents that match the specified query. + * Delete documents. Deletes documents that match the specified query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 2f63e01e1..801d4aae7 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Deletes a stored script or search template. + * Delete a script or search template. Deletes a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index b25c42ede..ada26a215 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -45,7 +45,7 @@ export default class Enrich { } /** - * Deletes an existing enrich policy and its enrich index. + * Delete an enrich policy. Deletes an existing enrich policy and its enrich index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-enrich-policy-api.html | Elasticsearch API documentation} */ async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class Enrich { } /** - * Returns information about an enrich policy. + * Get an enrich policy. Returns information about an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-enrich-policy-api.html | Elasticsearch API documentation} */ async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -149,7 +149,7 @@ export default class Enrich { } /** - * Creates an enrich policy. + * Create an enrich policy. Creates an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-enrich-policy-api.html | Elasticsearch API documentation} */ async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -193,7 +193,7 @@ export default class Enrich { } /** - * Returns enrich coordinator statistics and information about enrich policies that are currently executing. + * Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/enrich-stats-api.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 4acd561d9..8f5033eb2 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Checks if a document in an index exists. + * Check a document. Checks if a specified document exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 714e62fa7..8c6f14496 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Checks if a document's `_source` is stored. + * Check for a document source. Checks if a document's `_source` is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 13a744093..a65f6dc7a 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns information about why a specific document matches (or doesn’t match) a query. + * Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html | Elasticsearch API documentation} */ export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 0d742a15f..3a64e8f07 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns a document. + * Get a document by its ID. Retrieves the document with the specified ID from an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index f0c396efb..e84a69da1 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Retrieves a stored script or search template. + * Get a script or search template. Retrieves a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 517452614..79abedad1 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns the source of a document. + * Get a document's source. Returns the source of a document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/index.ts b/src/api/api/index.ts index 35508c4f9..89fba417f 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. + * Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 7fad7c65c..b1cff556b 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -45,7 +45,7 @@ export default class Indices { } /** - * Adds a block to an index. + * Add an index block. Limits the operations allowed on an index by blocking specific operation types. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation} */ async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -247,7 +247,7 @@ export default class Indices { } /** - * Creates a new index. + * Create an index. Creates a new index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html | Elasticsearch API documentation} */ async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -363,7 +363,7 @@ export default class Indices { } /** - * Deletes one or more indices. + * Delete indices. Deletes one or more indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html | Elasticsearch API documentation} */ async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -395,7 +395,7 @@ export default class Indices { } /** - * Removes a data stream or index from an alias. + * Delete an alias. Removes a data stream or index from an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -633,7 +633,7 @@ export default class Indices { } /** - * Checks if a data stream, index, or alias exists. + * Check indices. Checks if one or more indices, index aliases, or data streams exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html | Elasticsearch API documentation} */ async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -665,7 +665,7 @@ export default class Indices { } /** - * Checks if an alias exists. + * Check aliases. Checks if one or more data stream or index aliases exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -769,7 +769,7 @@ export default class Indices { } /** - * Retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc. + * Get the status for a data stream lifecycle. Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-explain-lifecycle.html | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -913,7 +913,7 @@ export default class Indices { } /** - * Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. + * Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html | Elasticsearch API documentation} */ async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -945,7 +945,7 @@ export default class Indices { } /** - * Retrieves information for one or more aliases. + * Get aliases. Retrieves information for one or more data stream or index aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1064,7 +1064,7 @@ export default class Indices { } /** - * Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. + * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html | Elasticsearch API documentation} */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1144,7 +1144,7 @@ export default class Indices { } /** - * Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. + * Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html | Elasticsearch API documentation} */ async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1184,7 +1184,7 @@ export default class Indices { } /** - * Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. + * Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1408,7 +1408,7 @@ export default class Indices { } /** - * Adds a data stream or index to an alias. + * Create or update an alias. Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1548,7 +1548,7 @@ export default class Indices { } /** - * Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. + * Update field mappings. Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1592,7 +1592,7 @@ export default class Indices { } /** - * Changes a dynamic index setting in real time. For data streams, index setting changes are applied to all backing indices by default. + * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1720,7 +1720,7 @@ export default class Indices { } /** - * A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. + * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1856,7 +1856,7 @@ export default class Indices { } /** - * Creates a new index for a data stream or index alias. + * Roll over to a new index. Creates a new index for a data stream or index alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2241,7 +2241,7 @@ export default class Indices { } /** - * Adds a data stream or index to an alias. + * Create or update an alias. Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2283,7 +2283,7 @@ export default class Indices { } /** - * Validates a potentially expensive query without executing it. + * Validate a query. Validates a query without running it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html | Elasticsearch API documentation} */ async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/info.ts b/src/api/api/info.ts index a6ac8226d..83ce76773 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns basic information about the cluster. + * Get cluster info. Returns basic information about the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 9952f9f29..4d12f0806 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -75,7 +75,7 @@ export default class License { } /** - * This API returns information about the type of license, when it was issued, and when it expires, for example. For more information about the different types of licenses, see https://www.elastic.co/subscriptions. + * Get license information. Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index e0a8011df..9d07552f0 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns whether the cluster is running. + * Ping the cluster. Returns whether the cluster is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index e7dbfc87e..94c3449d9 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Creates or updates a stored script or search template. + * Create or update a script or search template. Creates or updates a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 91c6e14d3..69d23a4f6 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to copy documents from one index to another, optionally filtering the source documents by a query, changing the destination index settings, or fetching the documents from a remote cluster. + * Reindex documents. Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index 33b66e3bd..a1a9fa0b5 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Runs a script and returns a result. + * Run a script. Runs a script and returns a result. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html | Elasticsearch API documentation} */ export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index e81f4b2da..6d2f125b8 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. + * Search a vector tile. Searches a vector tile for geospatial values. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html | Elasticsearch API documentation} */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/security.ts b/src/api/api/security.ts index a810764df..b5e25f38a 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -86,7 +86,7 @@ export default class Security { } /** - * Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user. A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. + * Authenticate a user. Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-authenticate.html | Elasticsearch API documentation} */ async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -441,7 +441,7 @@ export default class Security { } /** - * Creates an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. + * Create an API key. Creates an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-api-key.html | Elasticsearch API documentation} */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -904,7 +904,7 @@ export default class Security { } /** - * Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + * Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-api-key.html | Elasticsearch API documentation} */ async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1379,7 +1379,7 @@ export default class Security { } /** - * Determines whether the specified user has a specified list of privileges. + * Check user privileges. Determines whether the specified user has a specified list of privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges.html | Elasticsearch API documentation} */ async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1472,7 +1472,7 @@ export default class Security { } /** - * Invalidates one or more API keys. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field. + * Invalidate API keys. Invalidates one or more API keys. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-api-key.html | Elasticsearch API documentation} */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1809,7 +1809,7 @@ export default class Security { } /** - * Retrieves information for API keys in a paginated manner. You can optionally filter the results with a query. + * Query API keys. Retrieves a paginated list of API keys and their information. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-api-key.html | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2215,7 +2215,7 @@ export default class Security { } /** - * Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. This API supports updates to an API key’s access scope and metadata. The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. + * Update an API key. Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. This API supports updates to an API key’s access scope and metadata. The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-api-key.html | Elasticsearch API documentation} */ async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index 8f54ef0e4..0cb901686 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -85,7 +85,7 @@ export default class Tasks { } /** - * Returns information about a task. + * Get task information. Returns information about the tasks currently executing in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 956c4df14..3e8c12034 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns information and statistics about terms in the fields of a particular document. + * Get term vector information. Returns information and statistics about terms in the fields of a particular document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html | Elasticsearch API documentation} */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 2e3baa8ae..e1b9dc6bc 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -45,7 +45,7 @@ export default class Transform { } /** - * Deletes a transform. + * Delete a transform. Deletes a transform. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-transform.html | Elasticsearch API documentation} */ async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -106,7 +106,7 @@ export default class Transform { } /** - * Retrieves configuration information for transforms. + * Get transforms. Retrieves configuration information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform.html | Elasticsearch API documentation} */ async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -146,7 +146,7 @@ export default class Transform { } /** - * Retrieves usage information for transforms. + * Get transform stats. Retrieves usage information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-stats.html | Elasticsearch API documentation} */ async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -178,7 +178,7 @@ export default class Transform { } /** - * Previews a transform. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. + * Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/preview-transform.html | Elasticsearch API documentation} */ async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -230,7 +230,7 @@ export default class Transform { } /** - * Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. + * Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-transform.html | Elasticsearch API documentation} */ async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -274,7 +274,7 @@ export default class Transform { } /** - * Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. + * Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/reset-transform.html | Elasticsearch API documentation} */ async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -306,7 +306,7 @@ export default class Transform { } /** - * Schedules now a transform. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. + * Schedule a transform to start now. Instantly runs a transform to process data. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/schedule-now-transform.html | Elasticsearch API documentation} */ async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -338,7 +338,7 @@ export default class Transform { } /** - * Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. + * Start a transform. Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-transform.html | Elasticsearch API documentation} */ async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -370,7 +370,7 @@ export default class Transform { } /** - * Stops one or more transforms. + * Stop transforms. Stops one or more transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-transform.html | Elasticsearch API documentation} */ async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -402,7 +402,7 @@ export default class Transform { } /** - * Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. + * Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-transform.html | Elasticsearch API documentation} */ async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 1f234cda4..0dd6f4220 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Updates a document with a script or partial document. + * Update a document. Updates a document by running a script or passing a partial document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html | Elasticsearch API documentation} */ export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 3e6725baa..64d5c95ef 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index db34c036c..d9940f8f3 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -9082,14 +9082,15 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { export interface ConnectorConnector { api_key_id?: string + api_key_secret_id?: string configuration: ConnectorConnectorConfiguration custom_scheduling: ConnectorConnectorCustomScheduling description?: string - error?: string + error?: string | null features?: ConnectorConnectorFeatures filtering: ConnectorFilteringConfig[] id?: Id - index_name?: IndexName + index_name?: IndexName | null is_native: boolean language?: string last_access_control_sync_error?: string @@ -9106,8 +9107,9 @@ export interface ConnectorConnector { name?: string pipeline?: ConnectorIngestPipelineParams scheduling: ConnectorSchedulingConfiguration - service_type: string + service_type?: string status: ConnectorConnectorStatus + sync_cursor?: any sync_now: boolean } @@ -9122,11 +9124,11 @@ export interface ConnectorConnectorConfigProperties { placeholder?: string required: boolean sensitive: boolean - tooltip?: string + tooltip?: string | null type: ConnectorConnectorFieldType ui_restrictions: string[] validations: ConnectorValidation[] - value: ScalarValue + value: any } export type ConnectorConnectorConfiguration = Record @@ -9135,9 +9137,8 @@ export type ConnectorConnectorCustomScheduling = Record + last_access_control_sync_error?: string last_access_control_sync_scheduled_at?: DateTime last_access_control_sync_status?: ConnectorSyncStatus last_deleted_document_count?: long last_incremental_sync_scheduled_at?: DateTime last_indexed_document_count?: long - last_seen?: SpecUtilsWithNullValue - last_sync_error?: SpecUtilsWithNullValue + last_seen?: DateTime + last_sync_error?: string last_sync_scheduled_at?: DateTime last_sync_status?: ConnectorSyncStatus last_synced?: DateTime + sync_cursor?: any } export interface ConnectorLastSyncResponse { @@ -9365,7 +9368,7 @@ export interface ConnectorListResponse { export interface ConnectorPostRequest extends RequestBase { description?: string - index_name: SpecUtilsWithNullValue + index_name?: IndexName is_native?: boolean language?: string name?: string @@ -9373,13 +9376,14 @@ export interface ConnectorPostRequest extends RequestBase { } export interface ConnectorPostResponse { + result: Result id: Id } export interface ConnectorPutRequest extends RequestBase { - connector_id: Id + connector_id?: Id description?: string - index_name: SpecUtilsWithNullValue + index_name?: IndexName is_native?: boolean language?: string name?: string @@ -9388,6 +9392,7 @@ export interface ConnectorPutRequest extends RequestBase { export interface ConnectorPutResponse { result: Result + id: Id } export interface ConnectorSyncJobCancelRequest extends RequestBase { @@ -9415,7 +9420,7 @@ export interface ConnectorSyncJobListRequest extends RequestBase { size?: integer status?: ConnectorSyncStatus connector_id?: Id - job_type?: ConnectorSyncJobType[] + job_type?: ConnectorSyncJobType | ConnectorSyncJobType[] } export interface ConnectorSyncJobListResponse { @@ -9443,8 +9448,8 @@ export interface ConnectorUpdateActiveFilteringResponse { export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { connector_id: Id - api_key_id?: SpecUtilsWithNullValue - api_key_secret_id?: SpecUtilsWithNullValue + api_key_id?: string + api_key_secret_id?: string } export interface ConnectorUpdateApiKeyIdResponse { @@ -9501,7 +9506,7 @@ export interface ConnectorUpdateIndexNameResponse { export interface ConnectorUpdateNameRequest extends RequestBase { connector_id: Id - name: string + name?: string description?: string } @@ -13868,7 +13873,7 @@ export interface MlTrainedModelPrefixStrings { export interface MlTrainedModelSizeStats { model_size_bytes: ByteSize - required_native_memory_bytes: integer + required_native_memory_bytes: ByteSize } export interface MlTrainedModelStats { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 71b7922bd..1e2d44d87 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -9183,14 +9183,15 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { export interface ConnectorConnector { api_key_id?: string + api_key_secret_id?: string configuration: ConnectorConnectorConfiguration custom_scheduling: ConnectorConnectorCustomScheduling description?: string - error?: string + error?: string | null features?: ConnectorConnectorFeatures filtering: ConnectorFilteringConfig[] id?: Id - index_name?: IndexName + index_name?: IndexName | null is_native: boolean language?: string last_access_control_sync_error?: string @@ -9207,8 +9208,9 @@ export interface ConnectorConnector { name?: string pipeline?: ConnectorIngestPipelineParams scheduling: ConnectorSchedulingConfiguration - service_type: string + service_type?: string status: ConnectorConnectorStatus + sync_cursor?: any sync_now: boolean } @@ -9223,11 +9225,11 @@ export interface ConnectorConnectorConfigProperties { placeholder?: string required: boolean sensitive: boolean - tooltip?: string + tooltip?: string | null type: ConnectorConnectorFieldType ui_restrictions: string[] validations: ConnectorValidation[] - value: ScalarValue + value: any } export type ConnectorConnectorConfiguration = Record @@ -9236,9 +9238,8 @@ export type ConnectorConnectorCustomScheduling = Record + last_access_control_sync_error?: string last_access_control_sync_scheduled_at?: DateTime last_access_control_sync_status?: ConnectorSyncStatus last_deleted_document_count?: long last_incremental_sync_scheduled_at?: DateTime last_indexed_document_count?: long - last_seen?: SpecUtilsWithNullValue - last_sync_error?: SpecUtilsWithNullValue + last_seen?: DateTime + last_sync_error?: string last_sync_scheduled_at?: DateTime last_sync_status?: ConnectorSyncStatus last_synced?: DateTime + sync_cursor?: any } } @@ -9471,7 +9474,7 @@ export interface ConnectorPostRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { description?: string - index_name: SpecUtilsWithNullValue + index_name?: IndexName is_native?: boolean language?: string name?: string @@ -9480,15 +9483,16 @@ export interface ConnectorPostRequest extends RequestBase { } export interface ConnectorPostResponse { + result: Result id: Id } export interface ConnectorPutRequest extends RequestBase { - connector_id: Id + connector_id?: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { description?: string - index_name: SpecUtilsWithNullValue + index_name?: IndexName is_native?: boolean language?: string name?: string @@ -9498,6 +9502,7 @@ export interface ConnectorPutRequest extends RequestBase { export interface ConnectorPutResponse { result: Result + id: Id } export interface ConnectorSyncJobCancelRequest extends RequestBase { @@ -9525,7 +9530,7 @@ export interface ConnectorSyncJobListRequest extends RequestBase { size?: integer status?: ConnectorSyncStatus connector_id?: Id - job_type?: ConnectorSyncJobType[] + job_type?: ConnectorSyncJobType | ConnectorSyncJobType[] } export interface ConnectorSyncJobListResponse { @@ -9558,8 +9563,8 @@ export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { connector_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - api_key_id?: SpecUtilsWithNullValue - api_key_secret_id?: SpecUtilsWithNullValue + api_key_id?: string + api_key_secret_id?: string } } @@ -9634,7 +9639,7 @@ export interface ConnectorUpdateNameRequest extends RequestBase { connector_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - name: string + name?: string description?: string } } @@ -14103,7 +14108,7 @@ export interface MlTrainedModelPrefixStrings { export interface MlTrainedModelSizeStats { model_size_bytes: ByteSize - required_native_memory_bytes: integer + required_native_memory_bytes: ByteSize } export interface MlTrainedModelStats { From f737290d107ebce0341c9ab7c0e145302a36c43f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 29 Jul 2024 17:10:05 -0500 Subject: [PATCH 368/647] Added/updated snippets for docs (#2318) --- .../00272f75a6afea91f8554ef7cda0c1f2.asciidoc | 10 + .../004743b9c9f61588926ccf734696b713.asciidoc | 11 + ...004a17b42ab5155bb61da797a006fa9f.asciidoc} | 26 +- .../006e0e16c9f1da58c0bfe57377f7fc38.asciidoc | 20 + .../007179b5e241da650562a5f0a5007823.asciidoc | 40 ++ .../008ed823c89e703c447ac89c6b689833.asciidoc | 8 + ...0091fc75271b1fbbd4269622a4881e8b.asciidoc} | 21 +- .../00b3b6d76a368ae71277ea24af318693.asciidoc | 8 + .../00c05aa931fc985985e3e21c93cf43ff.asciidoc | 15 + .../00d65f7b9daa1c6b18eedd8ace206bae.asciidoc | 12 + .../00e0c964c79fcc1876ab957da2ffce82.asciidoc | 67 ++ .../00fea15cbca83be9d5f1a024ff2ec708.asciidoc | 19 + .../010d5e901a2690fa7b2396edbe6cd463.asciidoc | 13 + .../0163af36c8472ac0c5160c8b716f5b26.asciidoc | 24 + ...016f3147dae9ff2c3e831257ae470361.asciidoc} | 23 +- .../019e329ed5a930aef825266822e7377a.asciidoc | 26 + .../01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc | 10 + ...01da9e0620e48270617fc248e6415cac.asciidoc} | 22 +- .../01dc7bdc223bd651574ed2d3954a5b1c.asciidoc | 10 + .../01f50acf7998b24969f451e922d145eb.asciidoc | 39 ++ .../020c95db88ef356093f03be84893ddf9.asciidoc | 10 + .../020de6b6cb960a76297452725a38889f.asciidoc | 20 + .../0246f73cc2ed3dfec577119e8cd15404.asciidoc | 19 + .../025155da86802ebf4c3aeee5aab692f9.asciidoc | 28 + .../02520ac7816b2c4cf8fb413fd16122f2.asciidoc | 11 + .../0264e994a7e68561e2ca6be0f0d90ee9.asciidoc | 23 + .../0280247e0cf2e561c548f22c9fb31163.asciidoc | 10 + .../02853293a5b7cd9cc7a886eb413bbeb6.asciidoc | 28 + .../028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc | 43 -- .../029de2f5383a42e1ac4ca1565bd2a130.asciidoc | 21 + .../02b00f21e9d23d82276ace0dd154d779.asciidoc | 16 + ...02b6aa3e5652839f03de3a655854b897.asciidoc} | 8 +- .../02c48d461536709c3fc8a0e8147c3787.asciidoc | 24 + .../02f65c6bab8f40bf3ce18160623d1870.asciidoc | 10 + .../02fad6b80bb29c2a7e6840db2fc67b18.asciidoc | 38 ++ ...0308cbd85281f95fc458042afe3f587d.asciidoc} | 11 +- .../032eac56b798bea29390e102538f4a26.asciidoc | 10 + .../033778305d52746f5ce0a2a922c8e521.asciidoc | 22 - .../033838729cfb5d1a28d04f69ee78d924.asciidoc | 26 + .../0350410d11579f4e876c798ce1eaef5b.asciidoc | 36 ++ .../0350ff5ebb8207c004eb771088339cb4.asciidoc | 38 ++ .../03582fc93683e573062bcfda45e01d69.asciidoc | 29 + .../035a7a919eb6513b4769a3727b7d6447.asciidoc | 11 + .../03891265df2111a38e0b6b24c1b967e1.asciidoc | 8 + .../03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc | 14 + .../03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc | 11 + .../04412d11783dac25b5fd2ec5407078a3.asciidoc | 15 + .../044b2f99e7438e408685b258db17f863.asciidoc | 12 + .../046b2249bbc49e77848c114cee940f17.asciidoc | 52 ++ .../0470d7101637568b9d3d1239f06325a7.asciidoc | 27 + .../047266b0d20fdb62ebc72d51952c8f6d.asciidoc | 26 +- .../048652b6abfe195da8ea8cef10ee01b1.asciidoc | 10 + .../048d8abd42d094bbdcf4452a58ccb35b.asciidoc | 17 - .../04d586a536061ec1045d0bb2dc3d1a5f.asciidoc | 31 + .../04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc | 27 + .../04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc | 20 + .../04f5dd677c777bcb15d7d5fa63275fc8.asciidoc | 9 +- .../04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc | 30 - .../0502284d4685c478eb68761f979f4303.asciidoc | 37 ++ .../050b3947025fee403232b8e6e9112dab.asciidoc | 12 + .../05148cc541f447486d9daf15ab77292b.asciidoc | 54 ++ .../0518c673094fb18ecb491a3b78af4695.asciidoc | 23 + .../05284c8ea91769c09c8db47db8a6629a.asciidoc | 10 + .../053497b6960f80fd7b005b7c6d54358f.asciidoc | 19 + .../05500e77aef581d92f6c605f7a48f7df.asciidoc | 24 + .../059e04aaf093379401f665c33ac796dc.asciidoc | 20 + .../05a09078fe1016e900e445ad4039cf97.asciidoc | 78 +++ .../05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc | 10 + .../05bee3adf46b9d6a2fef96c51bf958da.asciidoc | 21 + .../05f4a4b284f68f7fb13603d7cd854083.asciidoc | 17 + .../05f6049c677a156bdf9b83e71a3b87ed.asciidoc | 8 + .../0601b5cb5328c9ebff30f4be1b210f93.asciidoc | 11 + .../060a56477e39f272fc5a9cfe47443cf1.asciidoc | 32 + ...0620a10ff15a2bb3eb489afc24ff0131.asciidoc} | 9 +- .../06454a8e85e2d3479c90390bb955eb39.asciidoc | 12 + .../066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc | 16 + .../069030e5f43d8f8ce3e3eca40205027e.asciidoc | 58 ++ .../06a761823a694850a6efe5d5bf61478c.asciidoc | 15 + .../06afce2955f9094d96d27067ebca32e8.asciidoc | 48 -- .../06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc | 28 + .../06c0db0f42223761e32fa418066b275f.asciidoc | 19 + ...06d65e3505dcb306977185e8545cf4a8.asciidoc} | 13 +- .../070cf72783cfe534a04f2f64e4016052.asciidoc | 24 + .../0718a0b4f4905a8c90c1ff93de557e56.asciidoc | 19 + .../0721c8adec544d5ecea3fcc410e45feb.asciidoc | 12 + .../0737ebaea33631f001fb3f4226948492.asciidoc | 21 + .../073864d3f52f8f79aafdaa85a88ac46a.asciidoc | 10 + .../0755471d7dce4785d2e7ed0c10182ea3.asciidoc | 10 + .../07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc | 12 + .../07ba3eaa931f2cf110052e3544db51f8.asciidoc | 22 + .../07c07f6d497b1a3012aa4320f830e09e.asciidoc | 14 + .../07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc | 16 + .../07de76cb0e7f11c7533788faf8c093c3.asciidoc | 20 + .../07ec38b97601286ec106986a84e1e5f7.asciidoc | 23 + .../080c34d8151d02b760571e3a2899fa97.asciidoc | 27 + .../083e514297c09e91211f0d168aef1b0b.asciidoc | 22 + .../086ec4c5d86bbf80fb80162e94037689.asciidoc | 49 ++ .../0881397074d261ccc2db514daf116c31.asciidoc | 11 + .../08a76b3f5a8394d8f9084113334a260a.asciidoc | 19 + .../08c9af9dd519c011deedd406f3061836.asciidoc | 46 ++ .../08e08feb514b24006e13f258d617d873.asciidoc | 10 + .../08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc | 19 + .../08f20902821a4f7a73ce7b959c5bdbdc.asciidoc | 20 + .../091200b658023db31dffc2f08a85a9cc.asciidoc | 15 + ...0957bbd535f58c97b12ffba90813d64c.asciidoc} | 15 +- .../095d60b2cfc5004c97efc49f27287262.asciidoc | 19 + .../095e3f21941a9cc75f398389a075152d.asciidoc | 24 + .../09769561f082b50558fb7d8707719963.asciidoc | 11 + .../0989cc65d8924f666ce3eb0820d2d244.asciidoc | 24 - .../099006ab11b52ea99693401dceee8bad.asciidoc | 14 + .../09944369863fd8666d5301d717317276.asciidoc | 20 + .../09a44b619a99f6bf3f01bd5e258fd22d.asciidoc | 11 + .../09a478fe32a7b7d814083ffa5297bcdf.asciidoc | 16 + .../09bdf9a7e22733d668476724042a406c.asciidoc | 19 + .../09cb1b18bf4033b4afafb25bd3dab12c.asciidoc | 22 + .../09cdd5ae8114c49886026fef8d00a19c.asciidoc | 14 - .../09ce0ec993c494ac01f01ef9815fcc4b.asciidoc | 17 + .../09d617863a103c82fb4101e6165ea7fe.asciidoc | 13 +- .../09e6e06ba562f4b9bac59455e9151a80.asciidoc | 20 + .../09ecba5814d71e4c44468575eada9878.asciidoc | 22 - .../0a3003fa5af850e415634b50b1029859.asciidoc | 16 + .../0a3186bf20b5359393406fc0cb433313.asciidoc | 13 + .../0a46ac2968a574ce145f197f10d30152.asciidoc | 46 ++ ...0a46cc8fe93e372909660a63dc52ae3b.asciidoc} | 21 +- .../0a650401134f07e40216f0d0d1a66a32.asciidoc | 10 + .../0a6d56a66a2652ac6de68f8bd544a175.asciidoc | 24 + .../0a701bdc7b6786026f40c0be8ebfc753.asciidoc | 25 + ...0a758d9dec74d9e942cf41a06499234f.asciidoc} | 17 +- .../0a84c5b7c0793be745b13eaf13e94422.asciidoc | 15 + .../0a9173f3b22716c78653976dc4799eae.asciidoc | 25 + .../0a958e486ede3f519d48431ab689eded.asciidoc | 24 - .../0ac295efdabd59e7b1f1a4577535d942.asciidoc | 12 + .../0ac9916f47a2483b89c1416684af322a.asciidoc | 21 - .../0ac9e7dd7e4acba51888256326ed5ffe.asciidoc | 16 + .../0ad86b582aff1235f37ccb2cc90adad5.asciidoc | 10 + .../0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc | 21 + .../0adbce828234ca221e3d03b184296407.asciidoc | 17 + .../0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc | 31 + .../0afaf1cad692e6201aa574c8feb6e622.asciidoc | 21 - .../0aff04881be21eea45375ec4f4f50e66.asciidoc | 10 + .../0b1c5486f96bfa5db8db854c0178dbe5.asciidoc | 18 + .../0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc | 13 + .../0b615ff4ef5a8847ee8109b2fd11619a.asciidoc | 25 + .../0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc | 24 + .../0b913fb9e010d877c0be015519cfddc6.asciidoc | 42 ++ .../0b987b4101e016653a32d7b092d47e4c.asciidoc | 34 + .../0ba5acede9d43af424e85428e7d35420.asciidoc | 21 + .../0bbd30b9be3e54ff3028b9f4459634d2.asciidoc | 22 - .../0bcd380315ef4691b8c79df6ca53a85f.asciidoc | 21 + .../0bd3923424a20a4ba860b0774b9991b1.asciidoc | 39 -- .../0bef1fdefeb2956d60d52d3f38397cad.asciidoc | 15 + .../0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc | 16 + .../0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc | 17 + .../0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc | 11 + .../0c464965126cc09e6812716a145991d4.asciidoc | 10 + .../0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc | 31 + .../0c6f9c9da75293fae69659ac1d6329de.asciidoc | 10 + .../0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc | 26 + .../0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc | 19 + ...0c892d328b73d38396aaef6d9cbcd36b.asciidoc} | 11 +- .../0ca6aae1ab2f0be6127beea8a245374e.asciidoc | 16 + .../0cee58617e75f493c5049d77be1c49f3.asciidoc | 21 + .../0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc | 29 + .../0d0f7ece06f21e624d21b09804732f61.asciidoc | 19 + .../0d49474511b236bc89e768c8ee91adf1.asciidoc | 16 + .../0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc | 12 + .../0d59af9dc556dc526b9394051efa800a.asciidoc | 10 + .../0d664883151008b1051ef2c9ab2d0373.asciidoc | 34 - .../0d8063b484a18f8672fb5ed8712c5c97.asciidoc | 23 + .../0d94d76b7f00d0459d1f8c962c144dcd.asciidoc | 42 ++ .../0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc | 20 + .../0da747e9d98bae157d3520ff1b489ad4.asciidoc | 17 + .../0db06c3cba57cf442ac7fab89966e1e1.asciidoc | 29 + .../0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc | 11 + .../0ddf705317d9c5095b4a1419a2e3bace.asciidoc | 8 + .../0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc | 12 + .../0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc | 10 + .../0e0d8f652d7d29371b5ea7c7544385eb.asciidoc | 22 + .../0e118857b815b62118a30c042f079db1.asciidoc | 24 +- .../0e3abd15dde97a2334621190c4ad4f96.asciidoc | 21 + .../0e3b4a48a3450cd99c95ec46d4701b58.asciidoc | 42 ++ .../0e5d25c7bb738c42d471020d678e2966.asciidoc | 11 + .../0e5db64154a722a5cbdb84b588ce2ce8.asciidoc | 30 + .../0e71a18d1aac61720cdc6b3f91fe643f.asciidoc | 15 + .../0e83f140237d75469a428ff403564bb5.asciidoc | 15 + .../0e84bb54b8a9a5387f252eeffeb1098e.asciidoc | 36 ++ .../0ea146b178561bc8b9002bed8a35641f.asciidoc | 10 + .../0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc | 37 ++ .../0eae571e9e1c40a40cb4b1c9530a8987.asciidoc | 11 + .../0eb2c1284a9829224913a860190580d8.asciidoc | 20 + .../0ec2178fb0103862b47cc20bc5885972.asciidoc | 17 + .../0eccea755bd4f6dd47579a9022690546.asciidoc | 19 + .../0f2e5e006b663a88ee99b130ab1b4844.asciidoc | 26 + ...0f3a78296825d507dda6771f7ceb9d61.asciidoc} | 13 +- .../0f4583c56cfe5bd59eeb35bfba02957c.asciidoc | 27 + .../0f547926ebf092e19fc5fb433e9ac8c1.asciidoc | 20 + .../0f7aa40ad26d59a9268630b980a3d594.asciidoc | 10 + .../0fa220ee3fb267020382f74aa70eb1e9.asciidoc | 11 + .../0fb472645116d58ddef89ca976d15a01.asciidoc | 73 +++ .../0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc | 32 + .../0fc4b589df5388da784c6d981e769e31.asciidoc | 26 + ...0fd08e14ad651827be53897a6bdaf0b8.asciidoc} | 14 +- .../0fe74ccd098c742619805a7c0bd0fae6.asciidoc | 10 + .../100d4e33158069f3caa32e8bfa0eb3d0.asciidoc | 18 + .../1027ab1ca767ac1428176ef4f84bfbcf.asciidoc | 40 -- .../102c7de25d13c87cf28839ada9f63c95.asciidoc | 31 + .../103296e16b4233926ad1f07360385606.asciidoc | 44 ++ .../10535507a9735fcf06600444b9067d4c.asciidoc | 38 ++ .../1070e59ba144cdf309fd9b2591612b95.asciidoc | 24 + .../10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc | 27 + .../109db8ff7b715aca98de8ef1ab7e44ab.asciidoc | 10 + .../10a16abe990288253ea25a1b1712fe3d.asciidoc | 10 + .../10b924bf6298aa6157ed00ce12f8edc1.asciidoc | 45 ++ .../10d8b17e73d31dcd907de67327ed78a2.asciidoc | 49 ++ .../10d9da8a3b7061479be908c8c5c76cfb.asciidoc | 10 + .../10de9fd4a38755020a07c4ec964d44c9.asciidoc | 17 + .../10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc | 20 + .../10f0c8fed98455c460c374b50ffbb204.asciidoc | 10 + .../10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc | 8 + .../111c31db1fd29baeaa9964eafaea6789.asciidoc | 17 + .../111c69ca94162c1523b799a5c14723dd.asciidoc | 14 + .../113ac8466084ee6ac4ed272e342dc468.asciidoc | 18 + ...1147a02afa087278e51fa365fb9e06b7.asciidoc} | 7 +- .../114d470e752efa9672ca68d7290fada8.asciidoc | 11 + .../1153bd92ca18356db927054958cd95c6.asciidoc | 19 + .../115529722ba30b0b0d51a7ff87e59198.asciidoc | 10 + .../118f249a3b26c33416f641b33f2b74f8.asciidoc | 32 + .../11c395d1649733bcab853fe31ec393b2.asciidoc | 8 + .../11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc | 18 + ...11d9043d3050a7175069dec7e0adc963.asciidoc} | 15 +- .../11e772ff5dbb73408ae30a1a367a0d9b.asciidoc | 10 + .../11e8d6e14686efabb8634b6522c05cb5.asciidoc | 21 + .../1216f8f7367df3aa823012cef310c08a.asciidoc | 21 - .../1233be1d4c9c7ca54126f1a0693b26de.asciidoc | 37 ++ .../123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc | 23 + .../12433d2b637d002e8d5c9a1adce69d3b.asciidoc | 7 +- .../1252fa45847edba5ec2b2f33da70ec5b.asciidoc | 7 +- .../1259a9c151730e42de35bb2d1ba700c6.asciidoc | 10 + .../128283698535116931dca9d16a16dca2.asciidoc | 8 + .../1295f51b9e5d4ba9987b02478146b50b.asciidoc | 20 + .../12cb446446211f95f651e196a1f059b4.asciidoc | 12 + .../12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc | 12 + .../12ec704d62ffedcb03787e6aba69d382.asciidoc | 21 + .../12facf3617a41551ce2f0c4d005cb1c7.asciidoc | 25 + .../1302e24b0476e0e9af7a2c890edf9f62.asciidoc | 16 + .../1313c540fef7e7c18a066f07789673fc.asciidoc | 13 + .../132ea3d5a0ffb6b5203e356e8329f679.asciidoc | 26 + .../134384b8c63cfbd8d762fb01757bb3f9.asciidoc | 24 + .../135819da3a4bde684357c57a49ad8e85.asciidoc | 8 + .../13670d1534125831c2059eebd86d840c.asciidoc | 39 ++ .../136ae86b8d497dda799cf1cb583df929.asciidoc | 33 + ...137709a0a0dc38d6094291c9fc75b804.asciidoc} | 18 +- .../137c62a4443bdd7d5b95a15022a9dc30.asciidoc | 41 ++ .../138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc | 16 + .../13917f7cfb6a382c293275ff71134ec4.asciidoc | 21 + .../13b02da42d3afe7f0b649e1c98ac9549.asciidoc | 26 + .../13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc | 12 + .../13d90ba227131aefbf4fcfd5992e662a.asciidoc | 37 ++ .../13df08eefc9ba98e311793bbca74133b.asciidoc | 11 + .../13e3fefbf55f672926aa389d76fc8bea.asciidoc | 11 + .../13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc | 11 + .../13ecdf99114098c76b050397d9c3d4e6.asciidoc | 15 + .../1420a22aa817c7a996baaed0ad366d6f.asciidoc | 22 + .../14254a0e725044faedf9370ead76f6ce.asciidoc | 12 + .../142de21c40e84e2e2d8d832e5b3b36db.asciidoc | 8 + .../1445ca2e813ed1c25504107b4b11760e.asciidoc | 13 + .../1452829804551d2d6acedd4e73b29637.asciidoc | 10 + .../146bd22fd0e7be2345619e8f11d3a4cb.asciidoc | 12 + .../14701dcc0cca9665fce2aace0cb62af7.asciidoc | 22 - .../147d341cb212dcc015c129a9c5dcf9c9.asciidoc | 11 + .../148edc235fcfbc263561f87f5533e688.asciidoc | 29 + .../14936b96cfb8ff999a833f615ba75495.asciidoc | 24 + .../149a0eea54cdf6ea3052af6dba2d2a63.asciidoc | 21 + .../14a1db30e13eb1d03cfd9710ca847ebb.asciidoc | 28 + .../14a49c13c399840e64c00b487aa820c9.asciidoc | 29 + .../14af7e2899e64f231068bded6aaf9ec5.asciidoc | 25 + .../14afe65afee3d43f27aaaa5b37f26a31.asciidoc | 16 + .../14b81f96297952970b78a3216e059596.asciidoc | 10 + .../14f124294a4a0e3a657d1468c36161cd.asciidoc | 17 + ...14f2dab0583c5a9fcc39931d33194872.asciidoc} | 8 +- .../150b5fee5678bf8cdf0932da73eada80.asciidoc | 35 + .../151d2b11807ec684b0c01aa89189a801.asciidoc | 16 + .../154d703732daf5c5fcd0122e6a50213f.asciidoc | 18 + .../156bc64c94f9f3334fbce25165d2286a.asciidoc | 23 + .../1570976f7807b88dc8a046b833be057b.asciidoc | 12 + .../1572696b97822d3332be51700e09672f.asciidoc | 19 + .../1598a0fec6b1ca78cadbaba65f465196.asciidoc | 33 + .../15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc | 11 + .../15c76cc8a038f686395053a240262929.asciidoc | 20 + .../15d4be58359542775f4aff88e6d8adb5.asciidoc | 22 + .../15d948d593d2624ac5e2b155052048f0.asciidoc | 12 + .../15dad5338065baaaa7d475abe85f4c22.asciidoc | 29 - .../15e90b82827c8512670820cf856a9c71.asciidoc | 20 + .../1605be45a5711d1929d6ad2d1ae0f797.asciidoc | 10 + .../160986f49758f4e8345d183a842f6351.asciidoc | 19 + .../160de80948e0c7db49b1c311848a66a2.asciidoc | 43 ++ .../160f39a50847bad0be4be1529a95e4ce.asciidoc | 55 ++ .../16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc | 28 + .../162b5b693b713f0bfab1209d59443c46.asciidoc | 25 +- .../16351d99d0608789d04a0bb11a537098.asciidoc | 20 + .../1637ef51d673b35cc8894ee80cd61c87.asciidoc | 11 + .../1648dd31d0fef01e7504ebeb687f4f30.asciidoc | 57 ++ .../16535685833419f0033545ffce4fdf00.asciidoc | 25 + .../1659420311d907d9fc024b96f4150216.asciidoc | 18 + .../16634cfa7916cf4e8048a1d70e6240f2.asciidoc | 29 + .../166bcfc6d5d39defec7ad6aa44d0914b.asciidoc | 19 + .../16985e5b17d2da0955a14fbe02e8dfca.asciidoc | 17 + .../169b39bb889ecd47541bed3e48725488.asciidoc | 15 + .../170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc | 20 + .../172155ca4bf6dfcbd489453f50739396.asciidoc | 13 + .../17266cee5eaaddf08e5534bf580a1910.asciidoc | 8 + .../172b18e435c400bed85227624de3acfd.asciidoc | 28 + .../172d150e56a225155a62c7b18bf8da67.asciidoc | 12 + .../1736545c8b5674f6d311f3277eb387f1.asciidoc | 11 + .../173b190078621415a80e851eaf794e8a.asciidoc | 28 + .../1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc | 10 + .../17566e23c191f1004a2719f2c4242307.asciidoc | 8 + .../178be73b74ba9f297429e32267084ac7.asciidoc | 30 + ...178c920d5e8ec0071f77290fa059802c.asciidoc} | 15 +- .../179f0a3e84ff4bbac18787a018eabf89.asciidoc | 27 +- .../17a1e308761afd3282f13d44d7be008a.asciidoc | 18 + .../17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc | 36 ++ .../17dd67a66c49f7eb618dd17430e48dfa.asciidoc | 22 + .../17e6f3fac556f08a78f7a876e71acb89.asciidoc | 15 + .../17f8a8990b0166befa3bc2b10fd28134.asciidoc | 18 + ...17fb298fb1e47f7d946a772d68f4e2df.asciidoc} | 18 +- .../182df084f028479ecbe8d7648ddad892.asciidoc | 8 + .../186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc | 36 ++ .../187733e50c60350f3f75921bea3b72c2.asciidoc | 21 + .../187e8786e0a90f1f6278cf89b670de0a.asciidoc | 40 ++ .../188e6208cccb13027a5c1c95440841ee.asciidoc | 61 ++ .../189f0cd1ee2485cf11a2968f01d54e5b.asciidoc | 32 + .../18ddb7e7a4bcafd449df956e828ed7a8.asciidoc | 7 +- .../18de6782bd18f4a9baec2feec8c02a8b.asciidoc | 18 + .../190a21e32db2125ddaea0f634e126a84.asciidoc | 11 + .../19174d872fd1e43cbfb7a96a33d13c96.asciidoc | 67 ++ .../192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc | 10 + .../193234bb5dc6451fd15b584fbefd2446.asciidoc | 25 + .../193d86b6cc34e12c2be806d27816a35c.asciidoc | 28 + .../194bbac15e709174ac85b681f3a3d137.asciidoc | 28 + .../196aed02b11def364bab84e455c1a073.asciidoc | 12 + .../199f5165d876267080046c907e93483f.asciidoc | 12 + .../19c00c6b29bc7dbc5e92b3668da2da93.asciidoc | 8 + .../19ee488226d357d1576e7d3ae7a4693f.asciidoc | 11 + .../19f1f9f25933f8e7aba59a10881c648b.asciidoc | 18 + .../1a1f3421717ff744ed83232729289bb0.asciidoc | 10 + .../1a2890b90f3699fc2a4f27f94b145be9.asciidoc | 22 + .../1a3897cfb4f974c09d0d847baac8aa6d.asciidoc | 13 + .../1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc | 10 + .../1a4f8beb6847678880ca113ee6fb75ca.asciidoc | 19 + .../1a56df055b94466ca76818e0858752c6.asciidoc | 18 + .../1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc | 11 + .../1a81fe0186369838531e116e85aa4ccd.asciidoc | 35 + .../1a8d92e93481c432a91f7c213099800a.asciidoc | 8 + .../1a9e03ce0355872a7db27fedc783fbec.asciidoc | 18 + .../1a9efb56adb2cd84faa9825a129381b9.asciidoc | 18 + .../1aa91d3d48140d6367b6cabca8737b8f.asciidoc | 39 +- .../1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc | 35 + .../1adee74383e5594e45c937177d75aa2a.asciidoc | 16 + .../1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc | 11 + .../1b076ceb1ead9f6897c2f351f0e45f74.asciidoc | 23 + .../1b0b29e5cd7550c648d0892378e93804.asciidoc | 11 + .../1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc | 16 + .../1b0f40959a7a4d124372f2bd3f7eac85.asciidoc | 27 + .../1b2ab75d3c8064fac6ecc63104396c02.asciidoc | 11 + .../1b3762712c14a19e8c2956b4f530d327.asciidoc | 26 + .../1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc | 22 + .../1b47d988b218ee595430ec91eba91d80.asciidoc | 15 + .../1b5c8d6e61930a308008b5b1ace2aa07.asciidoc | 30 + .../1b8655e6ba99fe39933c6eafe78728b7.asciidoc | 38 -- .../1b8caf0a6741126c6d0ad83b56fce290.asciidoc | 31 - .../1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc | 22 + .../1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc | 39 ++ .../1bc731a4df952228af6dfa6b48627332.asciidoc | 25 - .../1bceb160ed2bcd51ee040caf21acf780.asciidoc | 29 + .../1c142bc8cac8d9dcb4f60e22902d434f.asciidoc | 19 + .../1c1f2a6a193d9e64c37242b2824b3031.asciidoc | 49 ++ .../1c3e3c4f2d268f1826a9b417e1868a58.asciidoc | 18 + .../1c87b5bf682bc1e8809a657529e14b07.asciidoc | 52 ++ .../1c8b6768c4eefc76fcb38708152f561b.asciidoc | 10 + .../1cab9da122778a95061831265c250cc1.asciidoc | 25 + .../1cadbcf2cfeb312f73b7f098291356ac.asciidoc | 13 + .../1cb3b45335ab1b9697c358104d44ea39.asciidoc | 12 + .../1cca4bb2f0ea7e43181be8bd965149d4.asciidoc | 11 + .../1cd3b9d65576a9212eef898eb3105758.asciidoc | 12 + .../1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc | 32 + .../1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc | 14 + .../1cecd4d87a92427175157d41859df2af.asciidoc | 13 + .../1cfa04e9654c1484e3d4c75bf439400a.asciidoc | 31 + .../1d252d9217c61c2c1cbe7a92f77b078f.asciidoc | 71 ++ .../1d746272a7511bf91302a15b5c58ca0e.asciidoc | 18 + .../1d827ae674970692643ea81991e5396e.asciidoc | 41 ++ .../1d918e206ad8dab916e59183da24d9ec.asciidoc | 13 + .../1d9b695a17cffd910c496c9b03c75d6f.asciidoc | 27 + .../1da77e114459e0b77d78a3dcc8fae429.asciidoc | 28 - .../1dadb7efe27b6c0c231eb6535e413bd9.asciidoc | 20 + .../1db086021e83205b6eab3b7765911cc2.asciidoc | 20 + .../1db715eb00832686ecddb6603684fc26.asciidoc | 8 + .../1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc | 7 +- .../1e08e054c761353f99211cd18e8ca47b.asciidoc | 11 + .../1e0b85750d4e63ebbc927d4627c44bf8.asciidoc | 11 + .../1e0f203aced9344382081ab095c44dde.asciidoc | 34 + .../1e18a67caf8f06ff2710ec4a8b30f625.asciidoc | 7 +- .../1e26353d546d733634187b8c3a7837a7.asciidoc | 14 + .../1e2c5cef7a3f254c71a33865eb4d7569.asciidoc | 26 + .../1e3384bc255729b65a6f0fc8011ff733.asciidoc | 10 + .../1e3553a73da487017f7a95088b6aa957.asciidoc | 10 + .../1e49eba5b9042c1900a608fe5105ba43.asciidoc | 42 -- .../1e4b17b830ead15087ccd96151a5ebde.asciidoc | 25 + .../1e50d993bd6517e6c381e82d09f0389e.asciidoc | 19 - .../1e547696f54582840040b1aa6661760c.asciidoc | 10 + .../1e871f060dbe1a5c316ed205278804a8.asciidoc | 35 + .../1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc | 21 + ...1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc} | 8 +- .../1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc | 11 + .../1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc | 20 + .../1ec66f188f681598cb5d7df700b214e3.asciidoc | 27 + .../1ed26c7b445ab1c167bd9385e1f0066f.asciidoc | 10 + .../1ed77bf308fa4ab328b36060e412f500.asciidoc | 66 ++ .../1eea46b08610972b79fdc4649748455d.asciidoc | 30 + .../1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc | 22 + .../1f00e73c144603e97f6c14ab15fa1913.asciidoc | 43 ++ .../1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc | 43 ++ .../1f336ecc62480c1d56351cc2f82d0d08.asciidoc | 17 - .../1f3dd84ab11bae09d3f99b1b3536e239.asciidoc | 11 + .../1f507659757e2844cefced25848540a0.asciidoc | 23 + ...1f673e1a0de2970dc648618d5425a994.asciidoc} | 15 +- .../1f6a190fa1aade1fb66680388f184ef9.asciidoc | 20 + .../1f6fe6833686e38c3711c6f2aa00a078.asciidoc | 21 - .../1f8a6d2cc57ed8997a52354aca371aac.asciidoc | 26 + ...1f900f7178e80051e75d4fd04467cf49.asciidoc} | 18 +- .../1fcc4a3280be399753dcfd5c489ff682.asciidoc | 35 + .../1fddbd602a6acf896a393cdb500a2831.asciidoc | 28 + .../1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc | 11 + .../1ff12523efbd59c213c676937757c460.asciidoc | 10 + .../1ff296e868635fd102239871a331331b.asciidoc | 19 + .../1ff9b263b7c3e83278bb6a776a51590a.asciidoc | 19 + .../20005d8a6555b259b299d862cd218701.asciidoc | 17 + .../2006f577a113bda40905cf7b405bf1cf.asciidoc | 20 + .../2009f2d1ba0780a799a0fdce889c9739.asciidoc | 55 ++ .../200f6d4cc7b9c300b8962a119e03873f.asciidoc | 10 + .../20162e1dac807a7604f58dad814d1bc5.asciidoc | 27 + .../203c3bb334384bdfb11ff1101ccfba25.asciidoc | 24 + .../20407c847adb8393ce41dc656384afc4.asciidoc | 32 + .../2051ffe025550ab6645bfd525eaed3c4.asciidoc | 25 + .../2063713516847eef5d1dbf4ca1e877b0.asciidoc | 93 +++ .../206c723296be8ef8d58aef3ee01f5ba2.asciidoc | 24 + .../206d57bf0cb022c8229894e7753eca83.asciidoc | 31 + .../2081739da0c69de8af6f5bf9e94433e6.asciidoc | 14 + .../208c2b41bd1659aae8f02fa3e3b7378a.asciidoc | 48 ++ ...209a9190082498f0b7daa26f8834846b.asciidoc} | 21 +- .../20bc71cc5bbe04184e27827f3777a406.asciidoc | 12 + .../20c595907b4afbf26bd60e816a6ddf6a.asciidoc | 13 + .../20e3b181114e00c943a27a9bbcf85f15.asciidoc | 13 + .../20f62d0540bf6261549bd286416eae28.asciidoc | 15 + .../2105f2d1d81977054a93163a175793ce.asciidoc | 8 + .../213ab768f1b6a895e09403a0880e259a.asciidoc | 26 - .../2155c920d7d860f3ee7542f2211b4fec.asciidoc | 17 + .../21565b72da426776e445b1a166f6e104.asciidoc | 20 + .../216848930c2d344fe0bed0daa70c35b9.asciidoc | 9 +- .../216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc | 10 + ...21715c32c140feeab04b38ff6d6de111.asciidoc} | 7 +- .../2185c9dfc62a59313df1702ec1c3513e.asciidoc | 19 + .../218b9009f120e8ad33f710e019179562.asciidoc | 10 + .../21a226d91d8edd209f6a821064e83918.asciidoc | 33 + .../21bb03ca9123de3237c1c76934f9f172.asciidoc | 40 ++ .../21c1e6ee886140ce0cd67184dd19b981.asciidoc | 8 + .../21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc | 20 + .../21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc | 35 + .../21d5fe55ca32b10b118224ea1a8a2e04.asciidoc | 77 +++ .../21e95d29bc37deb5689a654aa323b4ba.asciidoc | 17 + .../221e9b14567f950008459af77757750e.asciidoc | 24 + .../2224143c45dfc83a2d10b98cd4f94bb5.asciidoc | 26 + .../222e49c924ca8bac7b41bc952a39261c.asciidoc | 32 + .../22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc | 101 ++- .../2238ac4170275f6cfc2af49c3f014cbc.asciidoc | 29 + .../22619a4111f66e1b7231693b8f8d069a.asciidoc | 10 + .../22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc | 39 ++ .../229b83cbcd8efa1b0288a728a2abacb4.asciidoc | 73 +++ .../22cb99d4e6ba3101a2d9f59764a90877.asciidoc | 13 + .../22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc | 30 + .../22dd833336fa22c8a8f67bb754ffba9a.asciidoc | 22 + .../22dde5fe7ac5d85d52115641a68b3c55.asciidoc | 18 + .../22ef90a7fb057728d2115f0c6f551819.asciidoc | 36 ++ .../23074748d6c978176df5b04265e88938.asciidoc | 12 + .../2308c9948cbebd2092eec03b11281005.asciidoc | 16 + .../2310d84ebf113f2a3ed14cc53172ae4a.asciidoc | 18 + .../231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc | 18 - .../2342a56279106ea643026df657bf7f88.asciidoc | 23 + .../234cec3ead32d7ed71afbe1edfea23df.asciidoc | 22 + .../236f50d89a07b83119af72e367e685da.asciidoc | 23 + .../239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc | 30 + .../23af230e824f48b9cd56a4cf973d788c.asciidoc | 20 + .../23b062c157235246d7c347b9047b2435.asciidoc | 20 + .../23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc | 22 + .../2408020186af569a76a30eccadaed0d5.asciidoc | 32 + .../24275847128b68da6e14233aa1259fb9.asciidoc | 43 ++ .../242a26ced0e5706e48dcda19a4003094.asciidoc | 25 + .../2493c25e1ef944bc4de0f726470bcdec.asciidoc | 28 + .../249bf48252c8cea47ef872541c8a884c.asciidoc | 44 ++ .../24ad3c234f69f55a3fbe2d488e70178a.asciidoc | 29 + .../24aee6033bf77a68ced74e3fd9d34283.asciidoc | 10 + .../24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc | 23 + .../24d66b2ebdf662d8b03e17214e65c825.asciidoc | 12 + .../24d806d1803158dacd4dda73c4204d3e.asciidoc | 11 + .../24f4dfdf9922d5aa79151675b7767742.asciidoc | 17 + .../251ea12c1248385ab409906ac64d9ee9.asciidoc | 28 - .../253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc | 10 + .../2533e4b36ae837eaecda08407ecb6383.asciidoc | 26 - .../25576b6773322f0929d4c635a940dba0.asciidoc | 17 + .../256eba7a77c8890a43afeda8ce8a3225.asciidoc | 22 + .../25737fd456fd317cc4cc2db76b6cf28e.asciidoc | 15 + .../2577acb462b95bd4394523cf2f8a661f.asciidoc | 28 + .../2592e5361f7ea3b3dd1840f63d760dae.asciidoc | 32 + .../25981b7b3d55b87e1484586d57b695b1.asciidoc | 15 + .../25a0dad6547d432f5a3d394528f1c138.asciidoc | 13 + .../25ae1a698f867ba5139605cc952436c0.asciidoc | 31 + .../25c0e66a433a0cd596e0641b752ff6d7.asciidoc | 10 + .../25cb9e1da00dfd971065ce182467434d.asciidoc | 8 + .../25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc | 15 + .../25ecfe423548ac1d7cc86de4a18c48c6.asciidoc | 34 + .../25ed47fcb890fcf8d8518ae067362d18.asciidoc | 23 + .../261480571394632db40e88fbb6c59c2f.asciidoc | 10 + .../26168987f799cdc4ee4151c85ba7afc5.asciidoc | 18 + .../262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc | 17 + .../2623eb122cc0299b42fc9eca6e7f5e56.asciidoc | 8 + .../262a778d754add491fbc9c721ac25bf0.asciidoc | 11 + .../26419320085434680142567d5fda9c35.asciidoc | 20 + .../2643b8c512cb3f3449259cdf498c6ab5.asciidoc | 33 + .../2646710ece0c4c843aebeacd370d0396.asciidoc | 22 + .../268151ed1f0e12586e66e614b61d7981.asciidoc | 23 + .../26abfc49c238c2b5d259983ac38dbcee.asciidoc | 17 + .../26bd8c027c82cd72c007c10fa66dc97f.asciidoc | 13 + .../26d3ab748a855eb383e992eb1ff79662.asciidoc | 10 + .../26f237f9bf14e8b972cc33ff6aebefa2.asciidoc | 12 + .../270549e6b062228312c4e7a54a2c2209.asciidoc | 8 + .../2716453454dbf9c6dde2ea6850a62214.asciidoc | 35 + .../271fe0b452b62189505ce4a1d6f8bde1.asciidoc | 19 + .../2720e613d520ce352b62e990c2d283f7.asciidoc | 10 + .../2731a8577ad734a732d784c5dcb1225d.asciidoc | 39 ++ .../27384266370152add76471dd0332a2f1.asciidoc | 32 + .../2740b69e7246ac6d1ad249382f21d534.asciidoc | 19 + .../274feaaa727e0ddf61b3c0f093182839.asciidoc | 26 + .../275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc | 10 + .../27600d6a78623b69689d4218618e4278.asciidoc | 15 + .../276e5b71ff5c6879a9b819076ad82301.asciidoc | 55 ++ .../277fefe2b623af61f8274f73efc97aed.asciidoc | 21 + ...278d5bfa1a01f91d5c84679ef1bca390.asciidoc} | 9 +- .../2793fa53b7d269852aa74f6bf57e34dc.asciidoc | 30 + .../279e2b29261971999923fdc658bba8ff.asciidoc | 18 + .../27f9f604e7a48799fa30529cbc0ff619.asciidoc | 27 + .../2826510e4aeb1c0d8dc43d317ed7624a.asciidoc | 29 + .../282e9e845b606f29a5bba174ae4c4c4d.asciidoc | 24 + .../28415647fced5f983b42f8435332a625.asciidoc | 30 + .../28543836b62b5622a402e6f7731d68f0.asciidoc | 14 + ...2856a5ceff1861aa9a78099f1c517fe7.asciidoc} | 7 +- .../2864a24608b3ac59d21f604f8a31d131.asciidoc | 22 + .../2864d04bf99860ed5dbe1458f1ab5f78.asciidoc | 16 + .../2879d7bf4167194b102bf97117327164.asciidoc | 20 + .../2884eacac3ad05ff794f5296ec7427e7.asciidoc | 18 + .../2891aa10ee9d474780adf94d5607f2db.asciidoc | 23 +- .../2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc | 10 + .../2898cf033b5bdefdbe3723af850b25c5.asciidoc | 35 + .../28aad2c5942bfb221c2bf1bbdc01658e.asciidoc | 20 - .../28ac880057135e46b3b00c7f3976538c.asciidoc | 13 + .../291110f4cac02f4610d0853f5800a70d.asciidoc | 25 + ...2932e6f71e247cf52e11d2f38f114ddf.asciidoc} | 23 +- .../295b3aaeb223612afdd991744dc9c873.asciidoc | 18 + .../29783e5de3a5f3c985cbf11094cf49a0.asciidoc | 14 + .../29824032d7d64512d17458fdd687b1f6.asciidoc | 10 + .../29953082744b7a36e437b392a6391c81.asciidoc | 14 + ...299900fb08da80fe455cf3f1bb7d62ee.asciidoc} | 9 +- .../29d9df958de292cec50daaf31844b573.asciidoc | 11 + .../29e002ab596bae58712eb048ac1768d1.asciidoc | 16 + .../2a1eece9a59ac1773edcf0a932c26de0.asciidoc | 8 + .../2a247e36a86a373bcbf478ac9a588f44.asciidoc | 18 + .../2a287d213a812b98d8353c563a058cfc.asciidoc | 18 + .../2a44d254e6e32abe97515fd2eb34705d.asciidoc | 12 + .../2a47d11c6e19c9da5104e738359ea8a8.asciidoc | 8 + .../2a5f7e7d6b92c66e52616845146d2820.asciidoc | 38 ++ .../2a70194ebd2f01a3229a5092513676b3.asciidoc | 26 + .../2a71e2d7f7179dd76183d30789046808.asciidoc | 27 + .../2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc | 53 ++ .../2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc | 15 + .../2a9d3119a9e26e29220be436b9382955.asciidoc | 23 + .../2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc | 10 + .../2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc | 34 + .../2ac37c3c572170ded67f1d5a0c8151ab.asciidoc | 13 + .../2ac7efe3919ee0c7971f5d502f482662.asciidoc | 29 + ...2ad35a13262f98574a48f88b4a838512.asciidoc} | 9 +- .../2ade05fb3fb06a67df25e097dfadb045.asciidoc | 11 + .../2aec92bc31bc24bce58d983738f9e0fe.asciidoc | 19 + .../2afc1231679898bd864d06679d9e951b.asciidoc | 36 ++ .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 11 + .../2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc | 14 + .../2b47be4b712147a429102aef386470ee.asciidoc | 12 + .../2b59b014349d45bf894aca90b2b1fbe0.asciidoc | 10 + .../2b5a5f8689f04d095fa86570130ee4d4.asciidoc | 23 + .../2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc | 41 ++ .../2b7687e3d7c06824950e00618c297864.asciidoc | 10 + .../2ba15c066d55a9b26d49b09471151cb4.asciidoc | 66 ++ .../2bacdcb278705d944f367cfb984cf4d2.asciidoc | 31 + .../2bb2339ac055337abf753bddb7771659.asciidoc | 22 - .../2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc | 16 + .../2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc | 19 + .../2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc | 24 + .../2c090fe7ec7b66b3f5c178d71c46323b.asciidoc | 20 + .../2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc | 11 + .../2c1e16e9ac24cfea979af2a69900d3c2.asciidoc | 12 + .../2c27a8eb6528126f37a843d434cd88b6.asciidoc | 17 + .../2c3207c0c985d253b2ecccc14e69e25a.asciidoc | 11 + .../2c3dff44904d3d73ff47f1afe89c7f86.asciidoc | 16 + .../2c44657adf550b8ade5cf5334106d38b.asciidoc | 23 + .../2c602b4ee8f22cda2cdf19bad31da0af.asciidoc | 78 +++ .../2cd8439db5054c93c49f1bf50433e1bb.asciidoc | 27 + ...2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc} | 17 +- .../2d01a9e5550b525496757f1bd7f0e706.asciidoc | 19 + .../2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc | 17 + .../2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc | 58 ++ .../2d37b02cbf6d30ae11bf239a54ec9423.asciidoc | 85 +++ .../2d60e3bdfee7afbddee149f40450b8b5.asciidoc | 16 + .../2d8fcb03de417a71e7888bbdd948a692.asciidoc | 11 + .../2d9b30acd6b5683f39d53494c0dd779c.asciidoc | 11 + .../2dad2b0c8ba503228f4b11cecca0b348.asciidoc | 11 + .../2de6885bacb8769b8f22dce253c96b0c.asciidoc | 24 + .../2e364833626c9790c042c8f006fcc999.asciidoc | 26 + .../2e36fe22051a47e052e349854d9948b9.asciidoc | 12 + .../2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc | 154 +++++ .../2e796e5ca59768d4426abbf9a049db3e.asciidoc | 14 + .../2e7f4b9be999422a12abb680572b13c8.asciidoc | 10 + .../2e847378ba26aa64d40186b6e3e6a1da.asciidoc | 21 + .../2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc | 22 + .../2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc | 19 + .../2ec8d757188349a4630e120ba2c98c3b.asciidoc | 18 + .../2ee002e60bd7a38d466e5f0eb0c38946.asciidoc | 18 + .../2ee239df3243c98418f7d9a5c7be4cfd.asciidoc | 27 + .../2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc | 68 ++ .../2f07b81fd47ec3b074242a760f0c4e9e.asciidoc | 13 + .../2f0b2181c434a879a23b4643bdd92575.asciidoc | 20 + .../2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc | 30 + .../2f2580ea420e1836d922fe48fa8ada97.asciidoc | 10 + ...2f2fd35905feef0b561c05d70c7064c1.asciidoc} | 7 +- .../2f4a55dfeba8851b306ef9c1b216ef54.asciidoc | 15 + .../2f4e28c81db47547ad39d0926babab12.asciidoc | 39 ++ .../2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc | 13 + .../2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc | 21 + ...2f98924c3d593ea2b60edb9cef5bee22.asciidoc} | 8 +- .../2fa45d74ba9933188c4728f8a9e5372c.asciidoc | 26 + .../2fa7ded8515b32f26c54394ea598f573.asciidoc | 39 ++ .../2fc2c790a85be29bbcba50bdde1493f4.asciidoc | 11 + .../2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc | 12 + .../2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc | 17 + .../2fd458d37aab509fe2d970c0b6e2a10f.asciidoc | 53 ++ .../2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc | 85 ++- .../2fea3e324939cc7e9c396964aeee7111.asciidoc | 18 + .../2fee452baff92b409cbfc8d71eb5fc0e.asciidoc | 10 + .../2ffa953b29ed0156c9e610daf66b8e48.asciidoc | 10 + .../300576666769b78fa6fa26b232837f81.asciidoc | 8 + .../305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc | 21 + .../3082ae0c3ecdc61808103214631b40c6.asciidoc | 32 + .../309f0721145b5c656338a02459c3ff1e.asciidoc | 18 + .../30abc76a39e551f4b52c65002bb6405d.asciidoc | 11 + .../30bd3c0785f3df4795684754adeb5ecb.asciidoc | 23 + .../30db2702dd0071c72a090b8311d0db09.asciidoc | 37 ++ .../30f3e3b9df46afd12e68bc71f18483b4.asciidoc | 29 + .../3166455372f2d96622caff076e91ebe7.asciidoc | 18 + .../316cd43feb3b86396483903af1a048b1.asciidoc | 20 + .../3182f26c61fbe5cf89400804533d5ed2.asciidoc | 21 + .../318e209cc4d6f306e65cb2f5598a50b1.asciidoc | 19 + .../31a79a57b242713edec6795599ba0d5d.asciidoc | 20 + .../31ab4ec26176857280af630bf84a2823.asciidoc | 10 + .../31ac1b68dc7c26a1d37350be47ae9381.asciidoc | 17 + .../31aed390c30bd4f42a5c56253695e53f.asciidoc | 20 + .../31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc | 10 + .../31f4400716500149cccbc19aa06bff66.asciidoc | 11 + .../320645d771e952af2a67bb7445c3688d.asciidoc | 41 ++ .../32123981430e5a8b34fe14314fc48429.asciidoc | 15 + .../3218f8ccd59c8c90349816e0428e8fb8.asciidoc | 10 + .../3250a8d2d2a9619035040e55a03620b9.asciidoc | 13 + .../327466380bcd55361973b4a96c6dccb2.asciidoc | 39 ++ .../32a7acdfb7046966b28f394476c99126.asciidoc | 13 + .../32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc | 22 + .../32b7963c5cabbe9cc7d15da62f5edda9.asciidoc | 18 + .../32b8a5152b47930f2e16c40c8615c7bb.asciidoc | 29 + .../32cd57666bc80b8cf793d06fa1086669.asciidoc | 12 + .../32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc | 10 + .../32de5dd306bd014d67053d2f175defcd.asciidoc | 12 + .../331caebf810a923644eb6de26e5a97f4.asciidoc | 20 + .../3337c817ebd438254505a31e91c91724.asciidoc | 10 + ...3341d3bbb53052447a37c92a04c14b70.asciidoc} | 13 +- .../3343a4cf559060c422d86c786a95e535.asciidoc | 12 + .../33610800d9de3c3e6d6b3c611ace7330.asciidoc | 10 + .../336613f48dd95ea993dd3bcce264fd0e.asciidoc | 24 + .../33732208fc6e6fe1e8d278299681932e.asciidoc | 13 + .../3386fe07e90844dbcdbbe7c07f09e04a.asciidoc | 10 + .../33b732bb301e99d2161bd2246494f487.asciidoc | 21 + .../33d480fc6812ada75756cf5337bc9092.asciidoc | 15 + .../33f148e3d8676de6cc52f58749898a13.asciidoc | 37 +- .../342ddf9121aeddd82fea2464665e25da.asciidoc | 16 + .../343dd09a8c76987e586858be3bdc51eb.asciidoc | 50 ++ .../344b4144244d57f87c6aa4652b100b25.asciidoc | 14 + .../346f28d82acb5427c304aa574fea0008.asciidoc | 26 + .../3477a89d869b1f7f72d50c2ca86c4679.asciidoc | 10 + .../3487e60e1ae9d4925ce540cd63574385.asciidoc | 24 + .../349823d86980d40ac45248c19a59e339.asciidoc | 21 + .../34be27141e3a476c138546190101c8bc.asciidoc | 14 + .../34cdeefb09bbbe5206957a8bc1bd513d.asciidoc | 13 + ...34d51c54b62e9a160c0ddacc10134bb0.asciidoc} | 23 +- .../34d63740b58209a3d031212909743925.asciidoc | 22 + .../34efeade38445b2834749ced59782e25.asciidoc | 29 - .../35260b615d0b5628c95d7cc814c39bd3.asciidoc | 22 + ...353020cb30a885ee7f5ce2b141ba574a.asciidoc} | 13 +- .../3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc | 10 + .../3544f17cb97b613a2f733707c676f759.asciidoc | 37 ++ .../3545261682af72f4bee57f2bac0a9590.asciidoc | 10 + .../35563ef92dddef9d83906d9c43c60d0f.asciidoc | 21 + ...355d0ee2fcb6c1fc403c6267f710e25a.asciidoc} | 20 +- .../357edc9d10e98ed776401c7a439a1a55.asciidoc | 11 + .../35a272df8c919a12d7c3106a18245748.asciidoc | 15 + .../35b686d9d9e915d0dea7a4251781767d.asciidoc | 20 + .../35be136ba9df7474a5521631e2a385b1.asciidoc | 10 + .../35c33ef48cf8a4ee368874141622f9d5.asciidoc | 22 + .../35c664285f2e8b7d5d50ca37ae3ba794.asciidoc | 20 + .../35e8da9410b8432cf4095f2541ad7b1d.asciidoc | 21 - .../35eef1765e9a5991d77592a0c7490fe0.asciidoc | 18 + .../35f892b475a1770f18328158be7039fd.asciidoc | 19 + .../35fc63cbefce7bc131ad467b5ba209ef.asciidoc | 12 + .../35fd9549350926f8d57dc1765e2f40d3.asciidoc | 35 + .../36063ff9a318dba7bb0be3a230655dc8.asciidoc | 29 + .../3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc | 15 + ...360b3cef34bbddc5d9579ca95f0cb061.asciidoc} | 21 +- .../360c4f373e72ba861584ee85bd218124.asciidoc | 31 + .../3613f402ee63f0efb6b8d9c6a919b410.asciidoc | 20 + .../362dfccdb6f7933b22c909542e0b4e0a.asciidoc | 21 + .../365256ebdfa47b449780771d9beba8d9.asciidoc | 11 + ...36962727b806315b221e8a63e05caddc.asciidoc} | 21 +- .../36b26905c5f96d0b785c3267fb63838d.asciidoc | 609 ++++++++++++++++++ .../36b2778f23d0955255f52c075c4d213d.asciidoc | 28 - .../36b86b97feedcf5632824eefc251d6ed.asciidoc | 17 +- .../36d229f734adcdab00be266a7ce038b1.asciidoc | 19 + .../36da9668fef56910370f16bfb772cc40.asciidoc | 11 + .../36e09bbd5896498ede0f5d37a18eae2c.asciidoc | 20 + ...36fae9dfc0b815546b45745bac054b67.asciidoc} | 20 +- .../370b297ed3433577adf53e64f572d89d.asciidoc | 11 + .../371962cf63e65c10026177c6a1bad0b6.asciidoc | 8 + .../3722cb3705b6bc7f486969deace3dd83.asciidoc | 23 - .../37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc | 26 + .../3758b8f2ab9f6f28a764ee6c42c85766.asciidoc | 35 + .../3759ca688c4bd3c838780a9aad63258b.asciidoc | 10 + .../375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc | 17 + .../376fbc965e1b093f6dbc198a94c83aa9.asciidoc | 49 ++ .../376ff4b2b5f657481af78a778aaab57f.asciidoc | 74 +++ .../377af0ea9b19c113f224d8150890b41b.asciidoc | 74 +++ .../378e55f78fa13578a1302bae8d479765.asciidoc | 17 + .../37983daac3d9c8582583a507b3adb7f2.asciidoc | 12 + .../37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc | 23 + .../37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc | 39 ++ .../37c73410bf13429279cbc61a413957d8.asciidoc | 10 + .../37eaab0630976d3dee90a52011342883.asciidoc | 20 + .../37f1f2e75ed95308ae436bbbb8d5645e.asciidoc | 10 + .../3819d0a5c2eed635c88e9e7bf2e81584.asciidoc | 12 + .../386eb7dcd3149db82605bf22c5d851bf.asciidoc | 13 + .../388d3eda4f792d3fce044777739217e6.asciidoc | 19 + .../388ec2b038d3ad69378f4c2e5bc36dce.asciidoc | 38 ++ .../38af4a55c1ea0f908dc7b06d680d2789.asciidoc | 10 + .../38b20fe981605e80a41517e9aa13134a.asciidoc | 34 + .../38ba93890494bfa7beece58dffa44f98.asciidoc | 23 + .../38eed000de433b540116928681c520d3.asciidoc | 10 + .../38f7739f750f1411bccf511a0abaaea3.asciidoc | 10 + .../38ffa96674b5fd4042589af0ebb0437b.asciidoc | 17 + .../3924ee252581ebb96ac0e60046125ae8.asciidoc | 10 + .../3951d7fcd7f849fa278daf342872125a.asciidoc | 11 + .../39760996f94ad34aaceaa16a5cc97993.asciidoc | 10 + .../397ab5f9ea0b69ae85038bb0b9915180.asciidoc | 11 + .../397bdb40d0146102f1f4c6a35675e16a.asciidoc | 57 ++ .../39963032d423e2f20f53c4621b6ca3c6.asciidoc | 11 + .../39a6a038c4b551022afe83de0523634e.asciidoc | 23 - .../39ce44333d28ed2b833722d3e3cb06f3.asciidoc | 38 ++ .../39d6f575c9458d9c941364dfd0493fa0.asciidoc | 10 + .../3a12feb0de224bfaaf518d95b9f516ff.asciidoc | 56 ++ .../3a2953fd81d65118a776c87a81530e15.asciidoc | 23 + .../3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc | 15 + .../3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc | 36 ++ .../3a3e6e2627cafa08e4402a0de95785cc.asciidoc | 30 + .../3a5f2e2313614ea9693545edee22ac43.asciidoc | 12 + .../3a6238835c7d9f51e6d91f92885fadeb.asciidoc | 26 + .../3a64ae799cc03fadbb802794730c23da.asciidoc | 28 + .../3a700f836d8d5da1b656a876554028aa.asciidoc | 16 - .../3a7a6ab88a49b484fafb10c8eb09b562.asciidoc | 21 + .../3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc | 26 + .../3abedc1d68fe1d20621157406b2b1de0.asciidoc | 29 + .../3ac075c5b5bbe648d40d06cce3061367.asciidoc | 15 + .../3ac8b5234e9d53859245cf8ab0094ca5.asciidoc | 10 + .../3af10fde8138d9d95df127d39d9a0ed2.asciidoc | 12 + .../3afc6dacf90b42900ab571aad8a61d75.asciidoc | 40 ++ .../3b0475515ee692a2d9850c2bd7cdb895.asciidoc | 32 + .../3b04cc894e6a47d57983484010feac0c.asciidoc | 19 +- ...3b05128cba6852e79a905bcdd5a8ebc0.asciidoc} | 10 +- .../3b162509ed14eda44a9681cd1108fa39.asciidoc | 30 + .../3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc | 10 + .../3b1ff884f3bab390ae357e622c0544a9.asciidoc | 87 +++ .../3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc | 8 + .../3b606631284877f9bca15051630995ad.asciidoc | 26 + .../3b64821fe9db73eb03860c60d775d7ff.asciidoc | 23 + .../3b8ab7027e0d616fb432acd8813e086c.asciidoc | 18 + .../3b9c54604535d97e8368d47148aecc6f.asciidoc | 13 + .../3ba2896bcc724c27be8f0decf6f81813.asciidoc | 21 + .../3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc | 18 + .../3bb5951a9e1186af5d154f56ffc13502.asciidoc | 48 ++ .../3bc872dbcdad8ff02cbaea39e7f38352.asciidoc | 17 + .../3bfa2362add163802fc2210cc2f37ba2.asciidoc | 13 + .../3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc | 57 ++ .../3c09ca91057216125ed0e3856a91ff95.asciidoc | 149 +++++ .../3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc | 20 + .../3c36dc17359c6b6b6a40d04da9293fa7.asciidoc | 32 + .../3c5d5a5c34a62724942329658c688f5e.asciidoc | 10 + .../3c65cb58e131ef46f4dd081683b970ac.asciidoc | 26 + .../3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc | 11 + .../3c7621a81fa982b79f040a6d2611530e.asciidoc | 45 ++ ...3cd2f7f9096a8e8180f27b6c30e71840.asciidoc} | 34 +- .../3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc | 38 -- .../3cd93a48906069709b76420c66930c01.asciidoc | 26 + .../3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc | 19 + .../3d1a0e1dc5310544d032108ae0b3f099.asciidoc | 14 + .../3d1ff6097e2359f927c88c2ccdb36252.asciidoc | 5 +- .../3d316bddd8503a6cc10566630a4155d3.asciidoc | 11 + .../3d48d1ba49f680aac32177d653944623.asciidoc | 11 + .../3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc | 20 + .../3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc | 8 + .../3d82257167e8a14a7f474848b32da128.asciidoc | 31 + .../3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc | 12 + ...3db2b5a6424aa92ecab7a8640c38685a.asciidoc} | 9 +- .../3dd45f65e7bfe207e8d796118f25613c.asciidoc | 10 + .../3e121b43773cbb6dffa9b483c86a1f8d.asciidoc | 29 + .../3e13c8a81f40a537eddc0b57633b45f8.asciidoc | 12 + .../3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc | 19 + .../3e278e6c193b4c17dbdc70670e15d78c.asciidoc | 23 + .../3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc | 14 + ...3e4227250d49e81df48773f8ba803ea7.asciidoc} | 19 +- .../3e573bfabe00f8bfb8bb69aa5820768e.asciidoc | 25 - .../3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc | 13 + ...3e8ed6ae016eb823cb00d9035b8ac459.asciidoc} | 8 +- .../3ea33023474e77d73ac0540e3a02b0b2.asciidoc | 26 + .../3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc | 46 ++ .../3ec95ba697ff97ee2d1a721a393b5926.asciidoc | 70 ++ .../3eca58ef7592b3a857ea3a9898de5997.asciidoc | 31 + .../3ed39eb60fbfafb70f7825b8d103bf17.asciidoc | 26 + .../3ed79871d956bfb2d6d2721d7272520c.asciidoc | 10 + .../3ee232bcb2281a12b33cd9764ee4081a.asciidoc | 27 + .../3f1fe5f5f99b98d0891f38003e10b636.asciidoc | 8 + .../3f20459d358611793272f63dc596e889.asciidoc | 23 + .../3f292a5f67e20f91bf18f5c2412a07bf.asciidoc | 21 + .../3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc | 21 + .../3f30310cc6d0adae6b0f61705624a695.asciidoc | 19 + .../3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc | 62 ++ .../3f60a892bed18151b7baac6cc712576a.asciidoc | 20 + .../3f669878713a14dfba251c7ce74dd5c4.asciidoc | 41 ++ .../3f8dc309b63fa0437898107b0d964217.asciidoc | 11 + .../3f94ed945ae6416a0eb372c2db14d7e0.asciidoc | 16 + .../3fab530a2e43807929c0ef3ebf7d268c.asciidoc | 36 ++ .../3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc | 22 + .../3faf5e2873de340acfe0a617017db784.asciidoc | 14 + .../3fb1289c80a354da66693bfb25d7b412.asciidoc | 22 + .../3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc | 16 + .../3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc | 14 + .../3fe4264ace04405989141c43aadfff81.asciidoc | 17 + .../3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc | 10 + .../3fe79ed63195c5f8018648a5a6d645f6.asciidoc | 24 + .../3fe9006f6c7faea162e43fb250f4da38.asciidoc | 18 + .../3fecd5c6d0c172566da4a54320e1cff3.asciidoc | 17 + .../3ff634a50e2e4556bad7ea8553576992.asciidoc | 26 + .../3ffe9952786ab258bb6ab928b03148a2.asciidoc | 16 + .../400e89eb46ead8e9c9e40f123fd5e590.asciidoc | 23 +- .../402092585940953420404c2884a47e59.asciidoc | 41 ++ .../4029af36cb3f8202549017f7378803b4.asciidoc | 8 + .../4053de806dfd9172167999ce098107c4.asciidoc | 19 + .../405511f7c1f12cc0a227b4563fe7b2e2.asciidoc | 10 + .../405ac843a9156d3cab374e199cac87fb.asciidoc | 16 + .../405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc | 13 + .../4061fd5ba7221ca85805ed14d59a6bc5.asciidoc | 10 + .../406a0f1c1aac947bcee58f86b6d036c1.asciidoc | 64 ++ .../408060f0c52300588a6dee774f4fd6a5.asciidoc | 533 +++++++++++++++ .../40a42f005144cfed3dd1dcf2638e8211.asciidoc | 15 + .../40b73b5c7ca144dc3f63f5b741f33d80.asciidoc | 22 + .../40bd86e400d27e68b8f0ae580c29d32d.asciidoc | 11 + .../40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc | 44 ++ .../40d88d4f53343ef663c89ba488ab8001.asciidoc | 19 + .../40d90d9dc6f4942bf92d88bfc5a34672.asciidoc | 17 + .../40f97f70e8e743c6a6296c81b920aeb0.asciidoc | 12 + ...4113c57384aa37c58d11579e20c00760.asciidoc} | 11 +- .../41175d304e660da2931764f9a4418fd3.asciidoc | 19 + .../41195ef13af0465cdee1ae18f6c00fde.asciidoc | 8 + .../412f8238ab5182678f1d8f6383031b11.asciidoc | 11 + .../413fdcc7c437775a16bb55b81c2bbe2b.asciidoc | 17 + .../415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc | 25 + .../416a3ba11232d3c078c1c31340cf356f.asciidoc | 20 + .../41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc | 18 + ...41dbd79f624b998d01c10921e9a35c4b.asciidoc} | 16 +- .../41fd33a293a575bd71a1fac7bcc8b47c.asciidoc | 42 ++ .../4207219a892339e8f3abe0df8723dd27.asciidoc | 12 + .../421e68e2b9789f0e8c08760d9e685d1c.asciidoc | 23 + .../424fbf082cd4affb84439abfc916b597.asciidoc | 14 + .../425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc | 12 + .../4275ecbe4aa68d43a8a7139866610a27.asciidoc | 23 + .../42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc | 11 + .../42bc7608bb675dd6238e2fecbb758d06.asciidoc | 22 + .../42d02087f1c8ab0452ef373079a76843.asciidoc | 11 + .../42deb4fe32afbe0f94185e256a79c447.asciidoc | 26 + .../4301cb9d970ec65778f91ce1f438e0d5.asciidoc | 24 + .../430705509f8367aef92be413f702520b.asciidoc | 14 + .../4310869b97d4224acaa6d66b1e196048.asciidoc | 17 + .../4323f6d224847eccdce59c23e33fda0a.asciidoc | 20 + .../433cf45a23decdf3a096016ffaaf26ba.asciidoc | 19 + .../4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc | 13 + .../435e0d6a7d86e074d572d9671b7b9676.asciidoc | 24 + .../436d50b85fc8f0977d02059eec00719b.asciidoc | 21 + .../43854be6aae61edbea5f9ab988cb4ce5.asciidoc | 12 + .../43af86de5e49aa06070092fffc138208.asciidoc | 21 - .../43e86fbaeed068dcc981214338559b5a.asciidoc | 10 + .../43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc | 15 + .../43fe75fa9f3fca846598fdad58fd98cb.asciidoc | 8 + .../441be98c597698bb2809372abf086c3e.asciidoc | 16 + ...441f330f6872f995769db1ce2b9627e2.asciidoc} | 18 +- .../44231f7cdd5c3a21025861cdef31e355.asciidoc | 11 + .../4427517dcd8ec9997541150cdc11a0de.asciidoc | 10 + .../4435b654994b575ba181ea679871c78c.asciidoc | 15 + .../443dd902f64b3217505c9595839c3b2d.asciidoc | 17 + .../443e8da9968f1c65f46a2a65a1e1e078.asciidoc | 45 ++ .../443f0e8fbba83777b2df624879d188d5.asciidoc | 18 + .../445f8a6ef75fb43da52990b3a9063c78.asciidoc | 16 + .../446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc | 12 + .../4479e8c63a04fa22207a6a8803eadcad.asciidoc | 12 + .../44939997b0f2601f82a93585a879f65a.asciidoc | 32 + .../4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc | 13 + .../44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc | 16 + .../44bca3f17d403517af3616754dc795bb.asciidoc | 24 + ...44da736ce0e1587c1e7c45eee606ead7.asciidoc} | 25 +- .../44db41b8465af951e366da97ade63bc1.asciidoc | 10 + .../44dd65d69267017fa2fb2cffadef40bb.asciidoc | 24 + .../44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc | 17 + .../451b441c3311103d0d2bdbab771b26d2.asciidoc | 15 + .../451e7c29b2cf738cfc822f7c175bef56.asciidoc | 21 + .../4527d9bb12cf738111a188af235d5d4c.asciidoc | 23 + .../45499ed1824d1d7cb59972580d2344cb.asciidoc | 18 + .../455029c3d66306ad5d48f6dbddaf7324.asciidoc | 59 ++ .../4553e0acb6336687d61eaecc73f517b7.asciidoc | 26 + .../45813d971bfa890ffa2f51f3f480cce5.asciidoc | 18 + .../458b2228aed7464d915a5d73cb6b98f6.asciidoc | 12 + .../45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc | 34 + .../45c6e54a9c9e08623af96752b4bde346.asciidoc | 23 + .../45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc | 12 + .../46025fc47dfbfa410790df0dd6bdad8d.asciidoc | 26 + .../46064e81620162a23e75002a7eeb8b10.asciidoc | 18 + .../46103fee3cd5f53dc75123def82d52ad.asciidoc | 18 + .../4646764bf09911fee7d58630c72d3137.asciidoc | 24 - .../464dffb6a6e24a860223d1c32b232f95.asciidoc | 43 ++ .../4655c3dea0c61935b7ecf1e57441df66.asciidoc | 11 + .../4659f639d71a54df571260ee5798dbb3.asciidoc | 31 + .../46658f00edc4865dfe472a392374cd0f.asciidoc | 9 +- .../4670dd81a9865e07ae74ae8b0266e384.asciidoc | 35 + .../467833bd44b35a89a7fe0d7df5f253f1.asciidoc | 11 + .../468f7ec42cdd8287cdea3ec1cea4a514.asciidoc | 19 + .../46a0eaaf5c881f1ba716d1812b36c724.asciidoc | 23 + .../46b1c1f6e0c86528be84c373eeb8d425.asciidoc | 22 + .../46c5c14f20118dcf519ff6ef21360209.asciidoc | 24 + .../46ce40227fa60aa6ba435f366b3a1f5f.asciidoc | 25 + .../46ebd468c3f132a4978088964466c5cd.asciidoc | 20 + .../472ec8c57fec8457e31fe6dd7f6e3713.asciidoc | 16 + .../473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc | 10 + .../4752f82fec8b46e5a4b3788b76e3041f.asciidoc | 26 + .../47909e194d10743093f4a22c27a85925.asciidoc | 28 + .../47bb632c6091ad0cd94bc660bdd309a5.asciidoc | 31 - .../47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc | 17 + .../47fde7874e15a37242993fd69c62063b.asciidoc | 19 + .../480e531db799c4c909afd8e2a73a8d0b.asciidoc | 8 + .../4818a1288ac24a56d6d6a4130ee70202.asciidoc | 10 + .../4824a823a830a2a5d990eacfd783ac22.asciidoc | 37 ++ .../48313f620c2871b6f4019b66be730109.asciidoc | 45 ++ .../483d669ec0768bc4e275a568c6164704.asciidoc | 10 + .../484e24d1ed1a154ba9753e6090d38d78.asciidoc | 16 + .../487f0e07fd83c05f9763e0795c525e2e.asciidoc | 99 +++ ...488f6df1df71972392b670ce557f7ff3.asciidoc} | 23 +- .../48d9697a14dfe131325521f48a7adc84.asciidoc | 23 + .../48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc | 29 + .../49100a4f53c0ba345fadacdc4f2f86e4.asciidoc | 11 + .../4955bae30f265b9e436f82b015de6d7e.asciidoc | 20 + .../496d35c89dc991a1509f7e8fb93ade45.asciidoc | 42 ++ .../4980d6fcb369692b0b29ddc6767d4324.asciidoc | 12 + .../4989cc97ce1c8fff634a10d343031bd0.asciidoc | 12 + .../49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc | 17 + .../49c052a748c943180db78fee8e144239.asciidoc | 10 + .../49c40b51da2469a6e00fea8fa6fbf56e.asciidoc | 14 + .../49cb3f48a0097bfc597c52fa51c6d379.asciidoc | 11 + .../49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc | 12 + .../49e8773a34fcbf825de38426cff5509c.asciidoc | 17 + .../49f4d2a461536d150e16b1e0a3148678.asciidoc | 23 + .../4a1951844bd39f26961bfc965f3432b1.asciidoc | 18 + .../4a2080ae55d931eb0643cc3eb91ec524.asciidoc | 53 ++ .../4a4b8a406681584a91c0e614c1fa4344.asciidoc | 39 ++ .../4a7510a9c0468303658383c00796dad2.asciidoc | 24 + .../4aa81a694266fb634904224d14cd9a87.asciidoc | 18 + .../4acf902c2598b2558f34f20c1744c433.asciidoc | 17 - .../4ae494d1e62231e832fc0436b04e2014.asciidoc | 24 + .../4af15c4f26ddefb9c350e7a246a66a15.asciidoc | 33 + .../4b1044259a6d777d87529eae25675005.asciidoc | 25 + .../4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc | 17 + .../4b3a49710fafa35d6d41a8ec12434515.asciidoc | 34 + .../4b5110a21676cc0e26e050a4b4552235.asciidoc | 10 + .../4ba86373e13e106de044f190343be328.asciidoc | 40 ++ .../4bb4a64cf04e3feb133b0221d29beaa9.asciidoc | 12 + .../4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc | 35 + .../4bba59cf745ac7b996bf90308bc26957.asciidoc | 24 + .../4bc4db44b8c74610b73f21a421099a13.asciidoc | 10 + .../4bc744b0f33b322741a8caf6d8d7d765.asciidoc | 19 + .../4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc | 10 + .../4be07b34db282044c88d5021c7ea08ee.asciidoc | 41 ++ .../4be20da16d2b58216e8b307218c7bf3a.asciidoc | 27 + .../4bef98a2dac575a50ee0783c2269f1db.asciidoc | 21 + .../4bf6bb703a52267379ae2b1e1308cf8b.asciidoc | 24 + .../4bfcb2861f1d572bd0d66acd66deab0b.asciidoc | 15 + .../4c174e228b6b74497b73ef2be80de7ad.asciidoc | 8 + .../4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc | 16 + .../4c5f0d7af287618062bb627b44ccb23e.asciidoc | 10 + .../4c712bd5637892a11f16b8975a0a98ed.asciidoc | 10 + .../4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc | 36 ++ .../4c77d12039fe2445c9251e33979071ac.asciidoc | 20 + .../4c803b088c1915a7b0634d5cafabe606.asciidoc | 20 + .../4c9350ed09b28f00e297ebe73c3b95a2.asciidoc | 19 + .../4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc | 23 + .../4ca15672fc5ab1d80a127d086b6d2837.asciidoc | 8 + .../4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc | 23 + .../4cb44556b8c699f43489b17b42ddd475.asciidoc | 21 + .../4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc | 21 - .../4cd40113e0fc90c37976f28d7e4a2327.asciidoc | 81 +++ .../4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc | 11 + .../4cdcc3fde5cea165a3a7567962b9bd61.asciidoc | 64 ++ .../4ce4563e207233c48ffe849728052dca.asciidoc | 10 + .../4d21725453955582ff12b4a1104aa7b6.asciidoc | 13 + .../4d2e6eb7fea407deeb7a859c267fda62.asciidoc | 34 + .../4d46e2160784bdf7cce948e9f0d31fc8.asciidoc | 29 + .../4d56b179242fed59e3d6476f817b6055.asciidoc | 24 - .../4d6997c70a1851f9151443c0d38b532e.asciidoc | 55 -- .../4d7c0b52d3c0a084157428624c543c90.asciidoc | 8 + .../4dab4c5168047ba596af1beb0e55b845.asciidoc | 10 + .../4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc | 10 + .../4e1f02928ef243bf07fd425754b7642b.asciidoc | 16 + .../4e2317aa45e87922d07c8ddc67a82d32.asciidoc | 34 + .../4e3414fc712b16311f9e433dd366f49d.asciidoc | 11 + .../4e4608ae4ce93c27bd174a9ea078cab2.asciidoc | 36 ++ .../4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc | 30 + .../4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc | 13 + .../4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc | 19 + .../4e926063a9494b563387617b08c4f232.asciidoc | 12 + .../4e931cfac74e46e221cf4a9ab88a182d.asciidoc | 11 + .../4ed946065faa92f9950f04e402676a97.asciidoc | 10 + .../4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc | 19 + .../4f08d9e21d9f199acc77abfb83287878.asciidoc | 23 + .../4f140d8922efdf3420e41b1cb669a289.asciidoc | 10 + .../4f1e1205154d280db21fbd2754ed5398.asciidoc | 19 + .../4f3366fc26e7ea4de446dfa5cdec9683.asciidoc | 21 + .../4f621ab694f62ddb89e0684a9e76c4d1.asciidoc | 22 + .../4f666d710758578e2582850dac3ad144.asciidoc | 11 + .../4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc | 17 + .../4f792d86ff79dcfe4643cd95505f8d5f.asciidoc | 27 + .../4f8a4ad49e2bca6784c88ede18a1a709.asciidoc | 8 + .../4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc | 10 + .../4fb0629146ca78b85e823edd405497bb.asciidoc | 23 + .../4fcca1687d7b2cf08de526539fea5a76.asciidoc | 41 ++ .../4fe78a4dfb747fd5dc34145ec6b76183.asciidoc | 26 + .../4ff2dcec03fe097075cf1d174a019a1f.asciidoc | 25 + .../50096ee0ca53fe8a88450ebb2a50f285.asciidoc | 13 + .../5024c524a7db0d6bb44c1820007cc5f4.asciidoc | 29 + .../5043b83a89091fa00edb341ddf7ba370.asciidoc | 20 - .../50522d3d5b3d055f712ad737e3d1707a.asciidoc | 51 ++ .../505a6c21a4cb608d3662fab1a35eb6df.asciidoc | 33 + .../50764f4ea88079156b0aff2835bcdc45.asciidoc | 12 + .../5093bfd281dbe41bd0dba8ff979e6e47.asciidoc | 10 + .../50a9623c153cabe64101efb633e10e6c.asciidoc | 10 + .../50b5c0332949d2154c72b629b5fa6222.asciidoc | 23 + .../50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc | 54 ++ .../50c2cea2adbe9523458c2686ab11df54.asciidoc | 29 + .../50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc | 12 + ...50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc} | 15 +- .../50dc35d3d8705bd62aed20a15209476c.asciidoc | 28 + .../50ddf374cfa8128538ea092ee98b723d.asciidoc | 10 + .../50f922e9f002d8ac570953be59414b7b.asciidoc | 16 + ...511e5bb8ab881171b7e8629095e30b85.asciidoc} | 7 +- .../51390ca10aa22d7104e8970f09ea4512.asciidoc | 30 + .../515e1104d136082e826d1b32af011759.asciidoc | 25 + .../5174c3c731fc1703e5b43ae2bae7a80e.asciidoc | 11 + .../518fcf1dc1edd7dba0864accf71b49f4.asciidoc | 16 + .../5195a88194f7a139c635a84398d76205.asciidoc | 11 + .../51b40610ae05730b4c6afd25647d7ae0.asciidoc | 40 ++ .../51b44224feee6e2e5974824334474c77.asciidoc | 18 + .../51f1a0930362594b231a5bcc17673768.asciidoc | 23 + .../51f6cb682424e110f289af79c106f4c7.asciidoc | 12 + .../5271f4ff29bb48838396e5a674664ee0.asciidoc | 65 -- .../527324766814561b75aaee853ede49a7.asciidoc | 20 - .../5275842787967b6db876025f4a1c6942.asciidoc | 31 +- .../5276a831513623e43ed567eb52b6dba9.asciidoc | 18 + .../528e5f1c345c3769248cc6889e8cf552.asciidoc | 16 + .../529b975b7cedaac58dce9821956adc37.asciidoc | 42 ++ .../52a2d119addb15366a935115518335fd.asciidoc | 16 + .../52b71aa4ae6563abae78cd20ff06d1e9.asciidoc | 11 + .../52bc577a0d0cd42b46f33e0ef5124df8.asciidoc | 22 + .../52be795b68e6ef3f396f35fea52d0481.asciidoc | 13 + .../52c2b4c180388f5ae044588ba70b70f0.asciidoc | 27 + .../52c7e4172a446c394210a07c464c57d2.asciidoc | 9 +- .../52cdb5526ce69d0223d1dd198308bfea.asciidoc | 26 + .../52f1c1689ab35353858cdeaab7597546.asciidoc | 15 + .../52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc | 24 + .../5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc | 18 + .../5305bc07c1bf90bab3e8db1de3e31b26.asciidoc | 13 + .../532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc | 17 + .../532f371934b61fb4992d37bedcc085de.asciidoc | 13 + ...5330191ec9f11281ebf6867bf11c58ae.asciidoc} | 23 +- .../5332c4cca5fbb45cc700dcd34f37bc38.asciidoc | 15 + ...53aa8b21e2b1c4d48960343711296704.asciidoc} | 19 +- .../53b908c3432118c5a6e460f74d32006b.asciidoc | 22 +- .../53bb7f0e3429861aadb8dd3d588085cd.asciidoc | 16 + .../53c6256295111524d5ff2885bdcb99a9.asciidoc | 12 + .../53e4ac5a4009fd21024f4b31e54aa83f.asciidoc | 12 + .../54059961f05904368ced52c894a50e23.asciidoc | 32 + .../54092c8c646133f5dbbc047990dd458d.asciidoc | 36 -- .../540aefc39303c925a4efff71ebe2f002.asciidoc | 17 + .../5433bb83628cc91d81fbe53c533b2a09.asciidoc | 20 + .../5457c94f0039c6b95c7f9f305d0c6b58.asciidoc | 21 + .../548b85bd9e6e7d33e36133953869449b.asciidoc | 12 + .../54a215d242ab65123b09e9dfb71bcbbf.asciidoc | 19 + .../55096381f811388fafd8e244dd2402c8.asciidoc | 13 + .../553904c175a76d5ba83bc5d46fff7373.asciidoc | 11 + ...553d79817bb1333970e99507c37a159a.asciidoc} | 25 +- .../5553cf7a02c22f616cd994747f2dd5a5.asciidoc | 26 + .../5566cff431570f522e1fc5475b2ed875.asciidoc | 71 ++ .../55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc | 29 + .../558b3f9b987771e9f9f35e51a0d7e062.asciidoc | 45 ++ .../5597eeb8f43b5d47bd07f27122c24194.asciidoc | 18 + .../55d349ccb0efd5e1c06c6dd383a593cf.asciidoc | 16 + .../55e8ddf643726dec51531ada0bec7143.asciidoc | 8 + .../55f0fec6342f677af74de2124b801aa2.asciidoc | 17 + .../55f4a15b84b724b9fbf2efd29a4da120.asciidoc | 8 + .../5619103306878d58a058bce87c5bd82b.asciidoc | 11 + .../5632c3b947062d3a5fc0e4f3413b3308.asciidoc | 16 + .../563dfbf421422c837ee6929ae2ede876.asciidoc | 10 + .../56563f91d9f0b74e9e4aae9cb221845b.asciidoc | 35 + .../565908b03edff1d6e6e7cdfb92177faf.asciidoc | 25 + .../568979150ce18739f8d3ea859355aaa3.asciidoc | 11 + .../569f10fee671632017c722fd983009d4.asciidoc | 40 ++ .../56a1aa4f7fa62f2289e20607e3039bf3.asciidoc | 15 + .../56a903530990313b753b1be33578997a.asciidoc | 30 + .../56b6b50b174a935d368301ebd717231d.asciidoc | 10 + .../56da9c55774f4c2e8eadde0579bdc60c.asciidoc | 26 + .../56db76c987106a870357854d3068ad98.asciidoc | 11 + .../56e90a63f94eeb882fe8acbcd74229c2.asciidoc | 32 + .../56f3a6bec7be5a90fb43144c331a5b5a.asciidoc | 11 + .../56fa6c9e08258157d445e2f92274962b.asciidoc | 19 + .../571314a948e49f1f9614d36fcf79392a.asciidoc | 10 + .../578808065fee8691355b8f25c35782cd.asciidoc | 19 + .../5797df4b8e71d821a1488cbb63481104.asciidoc | 10 + .../57a3e8d2ca64e37e90d658c4cd935399.asciidoc | 26 + .../57c690f8fa95bacf4b250803be7467e4.asciidoc | 13 + .../57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc | 8 + .../57e0bbab98f17d5b564d1ea146a55fe4.asciidoc | 40 ++ .../582c4b05401dbc190b19411282d85310.asciidoc | 19 + .../582da02c09e0597b4396c87e33571e7b.asciidoc | 12 + .../5837d5f50665ac0a26181d3aaeb3f204.asciidoc | 11 + .../584f502cf840134f2db5f39e2483ced1.asciidoc | 39 ++ .../585a34ad79aee16678b37da785933ac8.asciidoc | 8 + .../585b19369cb9b9763a7e8d405f009a47.asciidoc | 13 + .../5865ca8d2bcd087ed5dbee33fafee57f.asciidoc | 10 + .../586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc | 53 ++ .../58b5003c0a53a39bf509aa3797aad471.asciidoc | 21 - .../58ca855be30049f8f0879e532db51ee2.asciidoc | 55 ++ .../58e684e0b771b4646662fe12d3060c05.asciidoc | 20 + .../58f72be60c25752d7899a35fc60fe6eb.asciidoc | 12 + .../591c7fb7451069829a14bba593136f1f.asciidoc | 11 + .../5969c446688c8b326acc80276573e9d2.asciidoc | 34 + .../59726e3c90e1218487a781508788c243.asciidoc | 20 + .../597d456edfcb3d410954a3e9b5babf9a.asciidoc | 22 + .../5987afb2c17c73fe3d860937565ef115.asciidoc | 11 + .../599454613ac699d447537e79e65ae35a.asciidoc | 20 + .../599f693cc7d30b1153f5eeecec8eb23a.asciidoc | 10 + .../59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc | 10 + .../59d015f7bd0eeab40d0885010a62fa70.asciidoc | 25 + .../59d736a4d064ed2013c7ead8e32e0998.asciidoc | 18 + .../59f0ad2a6f97200e98e8eb079cdd8334.asciidoc | 11 + .../5a006feed86309b547bbaa1baca1c496.asciidoc | 69 ++ .../5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc | 17 + .../5a3fe9584d203d1fd6c96981ba34e0de.asciidoc | 20 + .../5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc | 16 + .../5a754dcc854b9154296550a0b581cb9d.asciidoc | 19 + .../5a7f05ab1d05b4eef5ff327168517165.asciidoc | 16 + .../5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc | 24 + .../5ad365ed9e1a3c26093a0f09666c133a.asciidoc | 24 + .../5afbd9caed88c32f8a2968c07054f096.asciidoc | 10 + .../5b0cc9e186a8f765a11141809b8b17b7.asciidoc | 12 + .../5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc | 8 + .../5b1ae98ad03e2819fc7c3468840ef448.asciidoc | 12 + .../5b266deba5396c7810af1b8315c23596.asciidoc | 19 + .../5b281956e35a26e734c482b42b356c0d.asciidoc | 10 + .../5b2a13366bd4e1ab4b25d04d360570dc.asciidoc | 22 + .../5b3384992c398ea8a3064d2e08725e2b.asciidoc | 77 +++ .../5b58007f10700ec7934580f034404652.asciidoc | 18 + .../5b6bc085943e9189236d98b3c05ed62c.asciidoc | 27 + .../5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc | 18 + .../5b8119b4d9a09f4643be5a5b40875c8f.asciidoc | 49 ++ .../5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc | 31 + .../5ba32ebaa7ee28a339c7693696d305ca.asciidoc | 20 + .../5bb0d84185df2f276f01bb2fba709e1a.asciidoc | 11 + .../5bbccf103107e505c17ae59863753efd.asciidoc | 12 + .../5c187ba92dd1678fda86b5eec8cc7421.asciidoc | 19 + .../5c22172a944864a7d138decdc08558b4.asciidoc | 12 + .../5c249eaeb99e6aee07162128288ac1b1.asciidoc | 32 + .../5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc | 19 + .../5c2f486c27bd5346e512265f93375d16.asciidoc | 25 +- .../5c6fbeac20dc23b613847f35d431ecab.asciidoc | 37 ++ .../5c7ece1f30267adabdb832424871900a.asciidoc | 10 + .../5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc | 32 + .../5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc | 10 + .../5cd792dff7d5891c33bef098d9338ce1.asciidoc | 42 ++ .../5cfab507e50d8c5182939412a9dbcdc8.asciidoc | 67 ++ ...5d03bb385904d20c5323885706738459.asciidoc} | 23 +- .../5d32279dcd52b22d9e1178a02a3ad957.asciidoc | 17 - .../5d428ea66252fd252b6a8d6f47605c86.asciidoc | 27 + .../5d5b06468c54308f52c212cca5d58fef.asciidoc | 13 + .../5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc | 27 + .../5d689d74062cddd01a0711a2fa7f23fd.asciidoc | 12 + .../5d7980d8c745abf7ea0fa573e818bd5b.asciidoc | 28 + .../5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc | 49 +- .../5da6efd5b038ada64c9e853c88c1ec47.asciidoc | 26 +- .../5db5349162a4fbe74bffb646926a2495.asciidoc | 32 + .../5dbf06ca9058843f572676fcaf587f75.asciidoc | 19 + .../5dd695679b5141d9142d3d30ba8d300a.asciidoc | 21 - .../5ddc26da6e163fda54f52d33b5157051.asciidoc | 17 + ...5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc} | 15 +- .../5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc | 10 + .../5dfe24287bb930ad33345caf092a004b.asciidoc | 18 + .../5e099493f135ff7bd614e935c4f2bf5a.asciidoc | 19 + .../5e124875d97c27362ae858160ae1c6d5.asciidoc | 8 + .../5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc | 42 ++ .../5e2f7097eb299de553d0fa0087d70a59.asciidoc | 27 + .../5e3673bcbef5731746e400c4f3fe134d.asciidoc | 23 + .../5e415c490a46358643ee2aab554b4876.asciidoc | 14 + .../5e47a407b6ca29dadf6eac5ab1d71163.asciidoc | 36 ++ .../5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc | 30 + .../5e87dd38ac3a0fd59ad794005b16d13e.asciidoc | 10 + .../5e9a7845e60b79685aab59877c5fbd1a.asciidoc | 16 + ...5ea9da129ca70a5fe534f27a82d80b29.asciidoc} | 24 +- .../5f031b7bd2b7d98d2d10df7420d269ff.asciidoc | 10 + .../5f1ed9cfdc149763b444acfbe10b0e16.asciidoc | 18 + .../5f210f74725ea0c9265190346edfa246.asciidoc | 23 - .../5f3373887e8d3dc31239b687a5151449.asciidoc | 39 ++ .../5f3549ac7fee94682ca0d7439eebdd2a.asciidoc | 23 +- .../5f3a3eefeefe6fa85ec49d499212d245.asciidoc | 23 - .../5f72ab800c3db9d118df95e2a378d411.asciidoc | 11 + .../5f79c42b0f74fdf71359cef82843fad3.asciidoc | 47 ++ .../5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc | 49 ++ .../5f8acd1e367b048b5542dbc6079bcc88.asciidoc | 28 + .../5f8d90515995a5eee189d722abe3b111.asciidoc | 21 + .../5f8fb5513d4f725434db2f517ad4298f.asciidoc | 34 + ...5faa121e00a0582160b2adb2b72fed67.asciidoc} | 9 +- .../5fca6671bc8eaddc44ac488d1c3c6909.asciidoc | 10 + .../5fde0d78e9b2cc0519f8a63848ed344e.asciidoc | 11 + .../5ffe6fd303400e8678fa1ead291e237f.asciidoc | 18 + .../600d33c80f8872dda85c87ed41da95fd.asciidoc | 22 + .../6013ed65d2058da5ce704b47a504b60a.asciidoc | 53 ++ .../601ad3b0ceccb3fcd282e5ec36748954.asciidoc | 11 + .../60299454aa19fec15a604a0dd06fe522.asciidoc | 11 + .../602e04051c092cf77de2f75a563661b8.asciidoc | 10 + .../604da59fe41160efa10a846a9dacc07a.asciidoc | 10 + .../6061aadb3b870791278212d1e8f52b39.asciidoc | 10 + .../608cadc6b8a3f194612b69279ccc96de.asciidoc | 27 + .../6097ae69c64454a92a89ef01b994e9f9.asciidoc | 12 + .../60a9aa5dcde9023901f6ff27231a10c4.asciidoc | 27 + .../60b0fc1b6ae418621ff1b31591fa1fce.asciidoc | 10 + .../60cab62af1540db2ad3b696b0ee1d7a8.asciidoc | 18 + .../60d689aae3f8de1e6830329dfd69a6a6.asciidoc | 18 + .../60ee33f3acfdd0fe6f288ac77312c780.asciidoc | 21 - ...60f889fbed5df3185444f7015b48ed76.asciidoc} | 7 +- .../610f629d0486a64546d62402a0a5e00f.asciidoc | 16 + .../611c1e05f4ebb48a1a8c8488238ce34d.asciidoc | 10 + .../612c2e975f833de9815651135735eae5.asciidoc | 11 + .../6138d6919f3cbaaf61e1092f817d295c.asciidoc | 20 - .../618c9d42284c067891fb57034a4fd834.asciidoc | 10 + .../618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc | 22 - .../61bf6ac15ae3e22323454a9a2872a2fa.asciidoc | 18 + ...61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc} | 15 +- .../61d6b9503459914c436930c3ae87d454.asciidoc | 15 + .../61e38e95191f4dde791070c6fce8a092.asciidoc | 32 + .../621665fdbd7fc103c09bfeed28b67b1a.asciidoc | 7 +- .../621f4553e24592d40c8cdbbdfaeb027e.asciidoc | 23 + .../6220087321e6d288024a70c6b09bd720.asciidoc | 19 + .../6244204213f60edf2f23295f9059f2c9.asciidoc | 10 + .../624e69dedf42c4877234b87ec1d00068.asciidoc | 10 + .../625dc94df1f9affb49a082fd99d41620.asciidoc | 17 - .../626f8c4b3e2cd3d9beaa63a7f5799d7a.asciidoc | 26 - .../62c311e7ab4de8b79e532929a5069975.asciidoc | 83 +++ .../62ccee6ad356428c2d625742f961ceb7.asciidoc | 11 + .../62d3c8fccb11471bdc12555c1a7777f2.asciidoc | 21 + .../62eafc5b3ab75cc67314d5a8567d6077.asciidoc | 10 + .../62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc | 17 + .../630d127ccedd25a6cff31ea098ac2847.asciidoc | 34 + .../6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc | 47 ++ .../633c8a9fc57268979d8735c557705809.asciidoc | 30 + .../634ecacf14b83c5f0bb8b6273cf6418e.asciidoc | 55 ++ .../63521e0089c631d6668c44a0a9d7fdcc.asciidoc | 26 + .../6352e846bb83725ae6d853aa64d8697d.asciidoc | 26 + .../6365312d470426cab1b77e9ffde49170.asciidoc | 17 + .../636ee2066450605247ec1f68d04b8ee4.asciidoc | 16 + .../63893e7e9479a9b60db71dcddcc79aaf.asciidoc | 10 + ...63cc960215ae83b359c12df3c0993bfa.asciidoc} | 17 +- .../63d1c07d22a3ca3b0ec6d950547c011c.asciidoc | 29 + .../63e20883732ec30b5400046be2efb0f1.asciidoc | 10 + .../63ecdab34940af053acc409164914c32.asciidoc | 81 +++ .../640621cea39cdeeb76fbc95bff31a18d.asciidoc | 23 + .../640a89d0b39630269433425ff476faf3.asciidoc | 12 + .../640da6dd719a34975b5627dfa5fcdd55.asciidoc | 12 + .../640e4f2c2d29f9851320a70927bd7a6c.asciidoc | 12 + .../641009f2147e1ca56215c701f45c970b.asciidoc | 23 + .../6414b9276ba1c63898c3ff5cbe03c54e.asciidoc | 8 + .../641f75862c70e25e79d249d9e0a79f03.asciidoc | 34 + .../642161d70dacf7d153767d37d3726838.asciidoc | 10 + .../642c0c1c76e9bf226cd216ebae9ab958.asciidoc | 34 + .../643b9506d1129d5215f9a1bb0b509aba.asciidoc | 37 ++ .../643e19c3b6ac1134554dd890e2249c2b.asciidoc | 18 + .../645136747d37368a14ab34de8bd046c6.asciidoc | 57 -- .../645433e8e479e5d71c100f66dd2de5d0.asciidoc | 534 +++++++++++++++ .../645796e8047967ca4a7635a22a876f4c.asciidoc | 31 - .../645c4c6e209719d3a4d25b1a629cb23b.asciidoc | 20 - .../64622409407316d2d47094e692d9b516.asciidoc | 29 + .../6464124d1677f4552ddddd95a340ca3a.asciidoc | 61 +- .../646d71869f1a18c5bede7759559bfc47.asciidoc | 11 + .../6490d89a4e43cac5e6b9bc19840d5478.asciidoc | 11 + .../64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc | 11 + .../64a79861225553799b26e118d7851dcc.asciidoc | 10 + .../64aff98cf477555e7411714c17006572.asciidoc | 17 + .../64b9baa6d7556b960b29698f3383aa31.asciidoc | 27 - .../64c572abc23394a77b6cca0b5368ee1d.asciidoc | 8 + .../64c804869ddfbcb9075817d0bbf71b5c.asciidoc | 24 + .../64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc | 18 + .../64d24f4b2a57dba48092dafe3eb68ad1.asciidoc | 20 + .../64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc | 13 + .../650a0fb27c66a790c4687267423af1da.asciidoc | 23 + .../6521c3578dc4ad4a6db697700986e78e.asciidoc | 36 ++ .../653c0d0ef146c997ef6bc6450d4f5f94.asciidoc | 25 + .../654882f545eca8d7047695f867c63072.asciidoc | 10 + .../65578c390837cb4c0fcc77fb17857714.asciidoc | 29 + .../657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc | 19 + .../658842bf41e0fcb7969937155946a0ff.asciidoc | 17 + .../65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc | 16 + .../65c671fbecdb5b0d75c13d63f87e36f0.asciidoc | 32 + .../65e892a362d940e4a74965f21c15ca09.asciidoc | 18 + .../6606d46685d10377b996b5f20f1229b5.asciidoc | 14 + .../6636701d31b0c9eb8316f1f8e99cc918.asciidoc | 27 + .../66539dc6011dd2e0282cf81db1f3df27.asciidoc | 10 + .../666c420fe61fa122386da3c356a64943.asciidoc | 27 + .../6689aa213884196b47a6f482d4993749.asciidoc | 20 + .../6693f0ffa0de3229b5dedda197810e70.asciidoc | 11 + .../669773766b041be768003055ad523038.asciidoc | 11 + .../6705eca2095ade294548cfb25bf2dd86.asciidoc | 12 + ...67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc} | 19 +- .../672d30eb3af573140d966e88b14814f8.asciidoc | 15 + .../6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc | 19 + .../674bb755111c6fbaa4c5ac759395c122.asciidoc | 18 + .../67967388db610dcb9d24fb59ede348d8.asciidoc | 18 + .../67a1f31cf60773a2378c2c30723c4b96.asciidoc | 20 + .../67a490d749a0c3bb16a266663423893d.asciidoc | 10 + .../67a55ac3aaee09f4aeeb7d2763da3335.asciidoc | 67 ++ .../67aac8882fa476db8a5878b67ea08eb3.asciidoc | 16 + .../67bab07fda27ef77e3bc948211051a33.asciidoc | 13 + .../67c3808751223eef69a57e6fd02ddf4f.asciidoc | 27 + .../67ceac4bf2d9ac7cc500390544cdcb41.asciidoc | 20 - .../67ffa135c50c43d6788636c88078c7d1.asciidoc | 26 + .../682336e5232c9ad3d866cb203d1c58c1.asciidoc | 23 + .../6843d859e2965d17cad4f033c81db83f.asciidoc | 19 + .../6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc | 59 ++ .../6859530dd9d85e59bd33a53ec96a3836.asciidoc | 22 + .../686bc640b877de845c46bef372a9866c.asciidoc | 34 + .../68721288dc9ad8aa1b55099b4d303051.asciidoc | 24 +- .../68738b4fd0dda177022be45be95b4c84.asciidoc | 9 +- .../6884454f57c3a41059037ea762f48d77.asciidoc | 11 + .../68a891f609ca3a379d2d64e4914f3067.asciidoc | 12 + .../68b64313bf89ec3f2c645da61999dbb4.asciidoc | 10 + .../68cb8a452e780ca78b0cb761be3629af.asciidoc | 15 + .../691fe20d467324ed43a36fd15852c492.asciidoc | 13 + .../692606cc6d6462becc321d92961a3bac.asciidoc | 11 + .../69582847099ee62ed34feddfaba83ef6.asciidoc | 20 + .../698e0a2b67ba7842caa801d9ef46ebe3.asciidoc | 23 + .../69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc | 12 + .../69a7be47f85138b10437113ab2f0d72d.asciidoc | 14 - .../69ab708fe65a75f870223d2289c3d171.asciidoc | 29 + ...69c07cfdf8054c301cd6186c5d71aa02.asciidoc} | 8 +- .../69d5710bdec73041c66f21d5f96637e8.asciidoc | 17 + .../69d9b8fd364596aa37eae6864d8a6d89.asciidoc | 16 + .../69daf5ec2a9bc07096e1833286c36076.asciidoc | 19 + .../69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc | 10 + .../6a1702dd50690cae833572e48a0ddf25.asciidoc | 22 +- .../6a350a17701e8c8158407191f2718b66.asciidoc | 10 + .../6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc | 13 + .../6a3a86ff58e5f20950d429cf2832c229.asciidoc | 10 + .../6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc | 13 + .../6a4679531e64c492fce16dc12de6dcb0.asciidoc | 22 - .../6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc | 10 + .../6a55dbba114c6c1408474f7e9cfdbb94.asciidoc | 17 + .../6a81d00f0d73bc5985e76b3cadab645e.asciidoc | 26 - .../6a9655fe22fa5db7a540c145bcf1fb31.asciidoc | 33 + .../6a969ebe7490d93d35be895b14e5a42a.asciidoc | 10 + .../6aa2941855d13f365f70aa8767ecb137.asciidoc | 60 ++ .../6aca241c0361d26f134712821e2d09a9.asciidoc | 10 + .../6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc | 12 + .../6b0288acb739c4667d41339e5100c327.asciidoc | 17 + .../6b0d492c0f50103fefeab385a7bebd01.asciidoc | 24 + .../6b104a66ab47fc1e1f24a5738f82feb4.asciidoc | 13 + .../6b1336ff477f91d4a0db0b06db546ff0.asciidoc | 8 + .../6b1e837a8469eca2d03d5c36f5910f34.asciidoc | 33 + .../6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc | 10 + .../6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc | 19 + .../6b6fd0a5942dfb9762ad2790cf421a80.asciidoc | 29 + .../6b77795e9249c8d9865f7a49fd86a863.asciidoc | 18 + .../6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc | 14 + .../6ba332596f5eb29660c90ab2d480e7dc.asciidoc | 35 + .../6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc | 45 +- .../6be70810d6ebd6f09d8a49f9df847765.asciidoc | 37 -- .../6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc | 20 - .../6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc | 56 ++ .../6c00dae1a456ae5e854e98e895dca2ab.asciidoc | 23 + .../6c0acbff2df9003ccaf4350c9e2e186e.asciidoc | 27 + .../6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc | 33 + .../6c70b022a8a74b887fe46e514feb38c0.asciidoc | 10 + .../6c72460570307f23478100db04a84c8e.asciidoc | 10 + .../6c72f6791ba9223943f7556c5bfaa728.asciidoc | 24 + .../6c8bf6d4d68b7756f953be4c07655337.asciidoc | 16 + .../6c927313867647e0ef3cd3a37cb410cc.asciidoc | 11 + .../6cd083045bf06e80b83889a939a18451.asciidoc | 95 +++ .../6ce8334def48552ba7d44025580d9105.asciidoc | 13 + .../6cf3307c00f464c46475e352e067d714.asciidoc | 31 + .../6d48f83c4a36d0544d876d3eff48dcef.asciidoc | 8 + .../6d81c749ff9554044ee5f3ad92dcb89a.asciidoc | 55 ++ .../6db118771354792646229e7a3c30c7e9.asciidoc | 67 ++ .../6dbfe5565a95508e65d304131847f9fc.asciidoc | 18 + .../6dcd3916679f6aa64f79524c75991ebd.asciidoc | 11 + .../6dd2a107bc64fd6f058fb17c21640649.asciidoc | 11 + .../6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc | 11 + .../6ddd4e657efbf45def430a6419825796.asciidoc | 20 + .../6e000496a1fa8b57148518eaad692f35.asciidoc | 12 + .../6e0b675eff7ed73c09a76a415930a486.asciidoc | 20 + .../6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc | 26 + .../6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc | 14 + .../6e86225ed4a6e3be8078b83ef301f731.asciidoc | 18 + .../6ea062455229151e311869a81ee40252.asciidoc | 34 + .../6edfc35a66afd9b884431fccf48fdbf5.asciidoc | 18 + .../6eead05dd3b04722ef0ea5644c2e047d.asciidoc | 49 ++ .../6f0389ac52808df23bb6081a1acd4eed.asciidoc | 10 + .../6f07152055e99416deb10e95b428b847.asciidoc | 27 + .../6f21a878fee3b43c5332b81aaddbeac7.asciidoc | 23 - .../6f34e27481460a95e59ffbacb76bd637.asciidoc | 46 ++ .../6f3b723bf6179b96c3413597ed7f49e1.asciidoc | 8 + .../6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc | 11 + .../6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc | 33 + .../6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc | 16 + .../6f6d5a4a90e1265822628d4ced963639.asciidoc | 19 + .../6f842819c50e8490080dd085e0c6aca3.asciidoc | 18 + .../6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc | 10 + .../6f8a682c908b826ca90cadd9d2f582b4.asciidoc | 15 + .../6fa570ae7039171e2ab722344ec1063f.asciidoc | 11 + .../6faf10a73f7d5fffbcb037bdb2cbaff8.asciidoc | 24 - .../6fbb88f399618e1b47412082062ce2bd.asciidoc | 44 ++ .../6fbbf40cab0187f544ff7bca31d18d57.asciidoc | 31 + .../6fc778e9a888b16b937c5c2a7a1ec140.asciidoc | 10 + .../6fd82baa17a48e09e3d2eed514af7f46.asciidoc | 35 + .../6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc | 10 + .../6febf0e6883b23b15ac213abc4bac326.asciidoc | 25 + .../7011fcdd231804f9c3894154ae2c3fbc.asciidoc | 17 + .../701f1fffc65e9e51c96aa60261e2eae3.asciidoc | 10 + .../7021ddb273a3a00847324d2f670c4c04.asciidoc | 35 + .../7067a498bb6c788854a26443a64b843a.asciidoc | 32 + .../708e7ec681be41791f232817a07cda82.asciidoc | 14 + .../70bbe14bc4d5a5d58e81ab2b02408817.asciidoc | 17 + .../70c736ecb3746dbe839af0e468712805.asciidoc | 10 + .../70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc | 11 + .../70f0aa5853697e265ef3b1df72940951.asciidoc | 42 -- .../70f89dd6b71ea890ad3cf47d83e43344.asciidoc | 32 + .../7106e6317e6368b9863cf64df9c6f0c9.asciidoc | 28 + .../710c7871f20f176d51209b1574b0d61b.asciidoc | 13 - .../711443504b69d0d296e717c716a223e2.asciidoc | 36 ++ .../7148c8512079d378af70302e65502dd2.asciidoc | 15 + .../719141517d83b7e8e929b347a8d67c9f.asciidoc | 12 + .../71b5b2ba9557d0f296ff2de91727d2f6.asciidoc | 29 - .../71c629c44bf3c542a0daacbfc253c4b0.asciidoc | 10 + .../71de08a2d962c66f0c60677eff23f8d1.asciidoc | 33 + .../71e47a83f632ef159956287bbfe4ca12.asciidoc | 24 + .../71fa652ddea811eb3c8bf8c5db21e549.asciidoc | 12 + .../722238b4e7b78cdb3c6a986780e7e286.asciidoc | 28 + .../72231b7debac60c95b9869a97dafda3a.asciidoc | 20 - .../726994d8f3793b86628255a797155a52.asciidoc | 11 + .../72a3668ddc95d9aec47cc679d1e7afc5.asciidoc | 26 + .../72ae3851160fcf02b8e2cdfd4e57d238.asciidoc | 8 + .../72b999120785dfba2827268482e9be0a.asciidoc | 98 +++ .../72bae0252b74ff6fd9f0702ff008d84a.asciidoc | 13 + .../72beebe779a258c225dee7b023e60c52.asciidoc | 9 +- ...72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc} | 7 +- .../73250f845738c428246a3ade66a8f54c.asciidoc | 51 ++ .../734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc | 20 - .../734e2b1d1ca84a305240a449738f0eba.asciidoc | 12 + .../73646c12ad33a813ab2280f1dc83500e.asciidoc | 13 + .../738db420e3ad2a127ea75fb8e5051926.asciidoc | 10 + .../73b07b24ab2c4cd304a57f9cbda8b863.asciidoc | 8 + .../73be1f93d789264e5b972ddb5991bc66.asciidoc | 12 + .../73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc | 10 + .../73df03be6ee78b10106581dbd7cb39ef.asciidoc | 32 + .../73e5c88ad1488b213fb278ee1cb42289.asciidoc | 22 - .../73ebc89cb32adb389ae16bb088d7c7e6.asciidoc | 12 + .../73f9271dee9b8539b6aa7e17f323c623.asciidoc | 34 + .../73fa0d6d03cd98ea538fff9e89d99eed.asciidoc | 11 + .../7404c6e809fee5d7eb9678a82a872806.asciidoc | 24 + .../741180473ba526219578ad0422f4fe81.asciidoc | 29 + .../7429b16221fe741fd31b0584786dd0b0.asciidoc | 18 + .../744aeb2af40f519e430e21e004e3c3b7.asciidoc | 44 ++ .../7456ef459d510d66ba4492cc9fbdc6c6.asciidoc | 21 + .../745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc | 13 - .../74678f8bbc7e4fc1885719d1cf63ac67.asciidoc | 32 + .../746e0a1cb5984f2672963b363505c7b3.asciidoc | 39 ++ .../746e87db7e1e8b5e6b40d8b5b188de42.asciidoc | 18 + .../7471e97aaaf21c3a200abdd89f15c3cc.asciidoc | 26 + .../7477671958734843dd67cf0b8e6c7515.asciidoc | 20 - .../747a4b5001423938d7d05399d28f1995.asciidoc | 12 + ...74a80c28737a0648db0dfe7f049d12f2.asciidoc} | 9 +- .../74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc | 17 + .../74b229a6e020113e5749099451979c89.asciidoc | 26 + .../74da377bccad43da2b0e276c086d26ba.asciidoc | 35 + .../75330ec1305d2beb0e2f34d2195464e2.asciidoc | 17 - .../7594a9a85c8511701e281974cbc253e1.asciidoc | 21 + .../75957a7d1b67e3d47899c5f18b32cb61.asciidoc | 10 + .../75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc | 17 + .../75c347b181112d2c4538c01ade903afe.asciidoc | 19 + .../75e13a00f0909c955031ff62acc14a79.asciidoc | 26 + .../75e360d03fb416f0a65ca37c662c2e9c.asciidoc | 49 ++ .../75e6d66e94e61bd8a555beaaee255c36.asciidoc | 18 + .../763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc | 10 + .../76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc | 11 + .../764f9884b370cbdc82a1c5c42ed40ff3.asciidoc | 23 - .../7659f2f2b0fbe8584b855a01638b95ed.asciidoc | 25 + .../765c9c8b40b67a42121648045dbf10fb.asciidoc | 10 + ...766cfc1c9fcd2c186e965761ceb2c07d.asciidoc} | 15 +- .../769f75829a8e6670aa4cf83d0d737046.asciidoc | 49 ++ .../76b279835936ee4b546a171c671c3cd7.asciidoc | 12 + .../76bc87c2592864152768687c2963d1d1.asciidoc | 27 + .../76c167d8ab305cb43b594f140c902dfe.asciidoc | 19 + .../76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc | 11 + .../7709a48020a6cefbbe547fb944541cdb.asciidoc | 29 + .../77113c65e1755313183a8969233a5a07.asciidoc | 30 + .../77243bbf92f2a55e0fca6c2a349a1c15.asciidoc | 35 - .../7741a04e7e621c528cd72848d875776d.asciidoc | 10 + .../77447e2966708e92f5e219d43ac3f00d.asciidoc | 12 + .../774bfde8793dc4927f7cad2dd91c5b5f.asciidoc | 28 + .../774d715155cd13713e6e327adf6ce328.asciidoc | 20 - .../77518e8c6198acfe77c0934fd2fe65cb.asciidoc | 8 + .../7752b677825523bfb0c38ad9325a6d47.asciidoc | 14 + .../776b553df0e507c96dbdbaedecaca0cc.asciidoc | 15 + .../7777326c6052fee28061e5b82540aedc.asciidoc | 19 + .../7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc | 10 + .../77828fcaecc3f058c48b955928198ff6.asciidoc | 31 + .../77b90f6787195767b6da60d8532714b4.asciidoc | 20 + .../77c099c97ea6911e2dd6e996da7dcca0.asciidoc | 13 + .../77c50f982906718ecc59aa708aed728f.asciidoc | 18 + .../77ca1a3193f75651e0bf9e8fe5227a04.asciidoc | 11 + .../77d0780c5faea4c9ec51a322a6811b3b.asciidoc | 68 ++ .../77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc | 11 + .../78176cd6f570e1534bb40b19e6e900b6.asciidoc | 10 + .../783c4fa5351a242364210fc32496beb2.asciidoc | 18 + ...7841b65a3bb880ed66cec453925a50cf.asciidoc} | 15 +- .../7846974b47a3eab1832a475663d23ad9.asciidoc | 29 + .../7885ca9d7c61050095288eef6bc6cca9.asciidoc | 27 + .../78c4035e4fbf6851140660f6ed2a1fa5.asciidoc | 8 + .../78c96113ae4ed0054e581b17542528a7.asciidoc | 29 +- .../78e20b4cff470ed7357de1fd74bcfeb7.asciidoc | 23 + .../790c49fe2ec638e5e8db51a9236bba35.asciidoc | 31 + .../794d9a321b944347d2a8834a07b5eb22.asciidoc | 31 + .../7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc | 80 +++ .../79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc | 20 + .../79bf91ace935d095d8e44b3ef3fe2efa.asciidoc | 18 + .../79cb85efd5e4c435e73b253cb9feabb1.asciidoc | 23 + .../79e053326a3a8eec828523a035393f66.asciidoc | 11 + .../79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc | 13 + .../79f33e05b203eb46eef7958fbc95ef77.asciidoc | 10 + .../79feb4a0c0a21b7015a52f9736cd4683.asciidoc | 69 ++ .../7a0c633a67244e9703344d036e584d95.asciidoc | 10 + .../7a0eb2222fe282d3aab66e12feff2a3b.asciidoc | 54 ++ .../7a23a385a63c87cab58fd494870450fd.asciidoc | 26 + .../7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc | 22 + .../7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc | 23 + .../7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc | 10 + .../7a8de5606f283f4ef171b015eef6befa.asciidoc | 11 + .../7a987cd13383bdc990155d7bd5fb221e.asciidoc | 20 + .../7ab968a61bb0783f563dd6d29b253901.asciidoc | 45 ++ ...7ae434b3667c589a8e70fe560f4ee3f9.asciidoc} | 8 +- .../7b3e913368e96eaa6e22e0d03c81310e.asciidoc | 13 + .../7b3f255d28ce5b46d111402b96b41351.asciidoc | 17 + ...7b5c231526846f2f7b98d78f3656ae6a.asciidoc} | 19 +- .../7b7a828c21c856a3cbc41fd2f85108bf.asciidoc | 22 + .../7b864d61767ab283cfd5f9b9ba784b1f.asciidoc | 10 + .../7b908b1189f076942de8cd497ff1fa59.asciidoc | 25 +- .../7b9dfe5857bde1bd8483ea3241656714.asciidoc | 11 + .../7c24d4bef3f2045407fbf1b95c5416f9.asciidoc | 41 ++ .../7c3414279d47e9c29105d061ed316ef8.asciidoc | 15 + .../7c4551abbb7a5f3841109f7664bc4aad.asciidoc | 28 + .../7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc | 109 ++++ .../7c5e41a7c0075d87b8f8348a6efa990c.asciidoc | 23 + .../7c8f207e43115ea8f20d2298be5aaebc.asciidoc | 8 + .../7c9076f3e93a8f61189783c736bf6082.asciidoc | 19 + .../7ca224d1a7de20a15c008e1b9dbda377.asciidoc | 17 + .../7cd23457e220c8b64c5b0041d2acc27a.asciidoc | 11 + ...7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc} | 15 +- .../7cf71671859be7c1ecf673396db377cd.asciidoc | 25 - .../7d1cbcb545aa19260073dbb2b7ef5074.asciidoc | 34 + .../7d880157a95f64ad339225d4af71c2de.asciidoc | 19 + .../7d9eba51a269571ae62fb8b442b373ce.asciidoc | 26 + .../7dabae9b37d2cbd724f2a069be9e753b.asciidoc | 11 + .../7daff6b7e668ab8a762b8ab5dff7a167.asciidoc | 41 ++ .../7dc6c0a6386289ac6a34105e839ced55.asciidoc | 26 + .../7dc82f7d36686fd57a47e34cbda39a4e.asciidoc | 12 + .../7dd481337e40f16185f3baa3fc2cce15.asciidoc | 15 + .../7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc | 16 + .../7dedb148ff74912de81b8f8275f0d7f3.asciidoc | 17 + .../7df191cc7f814e410a4ac7261065e6ef.asciidoc | 9 +- .../7e126e2751311db60cfcbb22c9c41caa.asciidoc | 8 + .../7e16d21cba51eb8960835b63a1a7266a.asciidoc | 22 + .../7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc | 35 + .../7e2b9bf4ab353c377b761101775edf93.asciidoc | 44 ++ ...7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc} | 19 +- .../7e48648ca27024831c60b455e836c496.asciidoc | 27 + .../7e49705769c42895fb7b1e2ca028ff47.asciidoc | 8 + .../7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc | 10 + .../7e5bee18e61d950e823782da1b733903.asciidoc | 16 + .../7e5faa551f2c95ffd627da352563d450.asciidoc | 17 + .../7e74d1a54e816e8f40cfdaa01b070788.asciidoc | 43 ++ .../7e77509ab646276ff78f58bb38bec8dd.asciidoc | 11 + .../7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc | 52 ++ .../7ebfb30b3ece855c1b783d9210939469.asciidoc | 11 + .../7ed26b34ce90192a1563dcddf0e45dc0.asciidoc | 31 + .../7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc | 28 - .../7f37031fb40b68a61255b7c71d7eed0b.asciidoc | 14 + .../7f465b7e8ed42df6c42251b4481e699e.asciidoc | 21 - .../7f514e9e785e4323d16396359cb184f2.asciidoc | 24 + .../7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc | 23 +- .../7f697eb436dfa3c30dfe610d8c32d132.asciidoc | 28 - .../7f92ddd4e940a37d6227c43fd279c8f5.asciidoc | 17 + .../7fb921376cbf66bf9f381bcdd62030ba.asciidoc | 8 + .../7fbebf0fc9b4a402917a4723ad547c6a.asciidoc | 18 + .../7fd2532f4e12e3efbc58af195060b31e.asciidoc | 30 + .../7fd5883564d183603e60b37d286ac7e2.asciidoc | 10 + .../7fde3ff91c4a2e7080444af37d5cd287.asciidoc | 22 + .../7fe2179705304af5e87eb382dca6235a.asciidoc | 10 + .../7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc | 34 + ...7fef68840761c6982c14ad7af96caf37.asciidoc} | 27 +- .../7ff4124df0541ee2496034004f4146d4.asciidoc | 16 + .../800861c15bb33ca01a46fb97dde7537a.asciidoc | 10 + .../803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc | 17 + .../804a97ff4d0613e6568e4efb19c52021.asciidoc | 33 - .../804cdf477ec829740e3d045140400c3b.asciidoc | 22 + .../8051766cadded0892290bc2cc06e145c.asciidoc | 11 + .../805f5550b90e75aa5cc82b90d8c6c242.asciidoc | 30 + .../807c0c9763f8c1114b3c8278c2a0cb56.asciidoc | 43 ++ .../808f4db1e2361be77dd6816c1f818139.asciidoc | 10 + .../80dbaf28d1976dc00de3fe2018067e81.asciidoc | 10 + .../80dd7f5882c59b9c1c90e8351937441f.asciidoc | 8 + .../80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc | 11 + .../812a3d7ab461d74efd9136aaf4bcf11c.asciidoc | 19 + .../812deb6b7668c7444f3b99d843d2adc1.asciidoc | 48 ++ .../8141b60ad245ece2ff5e8d0817400ee5.asciidoc | 12 + .../8141cdaddbe7d794f09f9ee84e46194c.asciidoc | 11 + .../81612c2537386e031b7eb604f6756a71.asciidoc | 17 + .../8194f1fae6aa72ab91ea559daad932d4.asciidoc | 16 + .../819e00cc6547d925d80090b94e0650d7.asciidoc | 17 + .../81c7a392efd505b686eed978fb7d9d17.asciidoc | 44 ++ .../81c9aa2678d6166a9662ddf2c011a6a5.asciidoc | 15 - .../81ee2ad368208c4c78098292547b0577.asciidoc | 17 + .../81ef5774355180fc44d2a52b5182d24a.asciidoc | 18 + .../81f1b1e1d5c81683b6bf471c469e6046.asciidoc | 33 + .../8206a7cc615ad93fec322513b8fdd4fd.asciidoc | 17 + .../820f689eaaef15fc07abd1073fa880f8.asciidoc | 16 + .../821422f8a03dc98d024a15fc737fe9eb.asciidoc | 11 + .../821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc | 10 + .../824fded1f9db28906ae7e85ae8de9bd0.asciidoc | 20 + .../827b7e9308ea288f18aea00a5accc38e.asciidoc | 10 + .../82844ef45e11c0eece100d3109db3182.asciidoc | 21 + .../829a40d484c778a8c58340c7bf09e1d8.asciidoc | 32 + .../82d6de3081de7b0664f44adf2942675a.asciidoc | 10 + .../82e94b6cdf65e324575f916b3776b779.asciidoc | 20 + .../83062a543163370328cf2e21a68c1bd3.asciidoc | 21 + .../831f65d700577e11112c711236110f61.asciidoc | 28 + .../8330b2ea6317769e52d0647ba434b354.asciidoc | 21 + .../8345d2615f43a934fe1871a5120eca1d.asciidoc | 52 ++ .../834764b2fba6cbb41eaabd740be75656.asciidoc | 20 + .../8357aa6099089940589ae3e97e7bcffa.asciidoc | 8 + ...83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc} | 17 +- .../838a4eabebba4c06100fb37dc30c7722.asciidoc | 34 + .../839710129a165cf93c6e329abedf9089.asciidoc | 24 + .../839a4b2930856790e34cc9dfeb983284.asciidoc | 21 + .../83b94f9e7b3a9abca8e165ea56927714.asciidoc | 13 + .../83cd4eb89818b4c32f654d370eafa920.asciidoc | 17 + .../83d712b9ffb2e703212b762eba3c521a.asciidoc | 11 + .../83d8c920460a12f87b9d5bf65515c367.asciidoc | 33 + .../83dd715e45a5da097123c6d10f22f8f4.asciidoc | 36 ++ .../83dfd0852101eca3ba8174c9c38b4e73.asciidoc | 10 + .../83f95657beca9bf5d8264c80c7fb463f.asciidoc | 17 - .../840b6c5c3d9c56aed854cfab8da04486.asciidoc | 95 +++ .../84108653e9e03b4edacd878ec870df77.asciidoc | 39 ++ .../841ad0a70f4271f61f0bac0b467b59c5.asciidoc | 20 + .../841d8b766902c8e3ae85c228a31383ac.asciidoc | 11 + .../84243213614fe64930b1d430704afb29.asciidoc | 22 + ...84465de841fe5c6099a0382f786f2cb8.asciidoc} | 23 +- .../84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc | 18 + .../844928da2ff9a1394af5347a5e2e4f78.asciidoc | 17 + .../8478c39c71bbb559ef6ab919f918f22b.asciidoc | 20 + .../8494d09c39e109a012094eb9d6ec52ac.asciidoc | 19 + .../84c61160ca815e29e9973ba1380219dd.asciidoc | 10 + .../84c69fb07050f0e89720007a6507a221.asciidoc | 10 + .../84d6a777a51963629272b1be5698b091.asciidoc | 18 - .../84e2cf7417c9e0c9e6f3c23031001440.asciidoc | 8 + .../84edb44c5b74426f448b2baa101092d6.asciidoc | 17 + .../84f2f0cea90340bdd041421afdb58ec3.asciidoc | 24 + .../84f3e8524f6ff80e870c03ab71551538.asciidoc | 16 + .../850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc | 25 + .../851f9754dbefc099c54c5423ca4565c0.asciidoc | 20 + .../852b394d78b8c79ee0055b5501981a4b.asciidoc | 27 + .../85479e02af00681210e17e3d0ff51e21.asciidoc | 18 + .../85519a614ae18c998986d46bbad82b76.asciidoc | 19 + .../8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc | 18 + .../856c10ad554c26b70f1121454caff40a.asciidoc | 17 + .../8575c966b004fb124c7afd6bb5827b50.asciidoc | 19 +- .../8582e918a6275472d2eba2e95f1dbe77.asciidoc | 29 + .../858fde15fb0a0340873b123043f8c3b4.asciidoc | 31 + .../8593715fcc70315a0816b435551258e0.asciidoc | 22 + .../85ae90b63ecba9d2bad16144b054c0a1.asciidoc | 19 + .../85d2e33791f1a74a69dfb04a60e69306.asciidoc | 58 ++ .../85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc | 8 + .../85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc | 39 ++ .../85f2839beeb71edb66988e5c82188be0.asciidoc | 21 + .../85f6667f148d16d075493fddf07e2932.asciidoc | 16 + .../8619bd17bbfe33490b1f277007f654db.asciidoc | 22 + .../861f5f61409dc87f3671293b87839ff7.asciidoc | 11 + .../86280dcb49aa89083be4b2644daf1b7c.asciidoc | 10 + .../862907653d1c18d2e80eff7f421200e2.asciidoc | 17 + .../863253bf0ab7d227ff72a0a384f4de8c.asciidoc | 12 + .../8634c9993485d622fb12d24f4f242264.asciidoc | 23 + .../867f7d43a78066731ead2e223960fc07.asciidoc | 12 + .../8684589e31d96ab229e8c4feb4d704bb.asciidoc | 10 + .../86926bcebf213ac182d4373027554858.asciidoc | 17 + .../8696ba08ca6cc4992110c331732e5f47.asciidoc | 19 + .../8699d35269a47ba867fa8cc766287413.asciidoc | 8 + .../86c5594c4ec551391096c1abcd652b50.asciidoc | 20 + .../8703f3b1b3895543abc36e2a7a0013d3.asciidoc | 31 + .../871154d08efd7251cf3272e758f06acf.asciidoc | 26 + .../8731188553e14134b0a533010318f91a.asciidoc | 21 + .../8739fad1fb2323950b673acf0c9f2ff5.asciidoc | 10 + .../873e2333734b1cf5ed066596e5f74b0a.asciidoc | 98 +++ .../873fbbc6ab81409058591385fd602736.asciidoc | 61 +- .../87416e6a1ca2da324dbed6deb05303eb.asciidoc | 31 + .../8743887d9b89ea1a2d5e780c349972cf.asciidoc | 25 + .../87457bb3467484bec3e9df4e25942ba6.asciidoc | 10 + .../87469f8b7e9b965408479d276c3ce8aa.asciidoc | 10 + .../87733deeea4b441b595d19a0f97346f0.asciidoc | 10 + .../877ea90c663b5df9efe95717646a666f.asciidoc | 55 ++ .../87846c3ddacab1da4af626ae8099e4be.asciidoc | 17 + .../87b0b496747ad6c1e4ab4b462128fa1c.asciidoc | 11 + .../87c3e9963400a3e4b296ef8d1c86fae3.asciidoc | 10 + .../87c42ef733a50954e4d757fc0a08decc.asciidoc | 13 + .../87d970b4944b6d742c484d7184996c8a.asciidoc | 13 + .../87f854393d715aabf4d45e90a8eb74ce.asciidoc | 19 + .../88195d87a350e7fff200131f410c3e88.asciidoc | 30 + .../88341b4eba71ec722f3e38fa1696fe87.asciidoc | 56 ++ .../88554b79dba8fd79991855a692b69ff9.asciidoc | 55 ++ .../8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc | 23 - .../88a08d0b15ef41324f5c23db533d47d1.asciidoc | 11 + .../88a283dfccc481f1afba79d9b3c61f51.asciidoc | 8 + .../88b19973b970adf9b73fca82017d4951.asciidoc | 15 + .../88cecae3f0363fc186d955dd8616b5d4.asciidoc | 11 + .../88cf60d3310a56d8ae12704abc05b565.asciidoc | 8 + .../88ec7fa6768a7e13cd2158667a69e97f.asciidoc | 32 + .../8963fb1e3d0900ba3b68be212e8972ee.asciidoc | 37 ++ .../897668edcbb0785fa5229aeb2dfc963e.asciidoc | 19 + .../899eef71a67a1b2aa11a2166ec7f48f1.asciidoc | 24 - .../89a6b24618cafd60de1702a5b9f28a8d.asciidoc | 39 ++ .../89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc | 56 ++ .../89b72dd7f747f6297c2b089e8bc807be.asciidoc | 16 + .../89c57917bc7bd2e6387b5eb54ece37b1.asciidoc | 15 + .../89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc | 11 + .../89dee10a24ea2727af5b00039a4271bd.asciidoc | 161 +++++ .../89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc | 14 + .../8a12cd824404d74f098d854716a26899.asciidoc | 10 + .../8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc | 10 + .../8a1f6cffa653800282c0ae160ee375bc.asciidoc | 19 + .../8a355eb25d2a01ba62dc1a22dd46f46f.asciidoc | 23 - .../8a4941cae0b32d68b22bec2d12c82860.asciidoc | 12 + .../8a617dbfe5887f8ecc8815de132b6eb0.asciidoc | 12 + .../8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc | 17 + .../8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc | 37 ++ .../8ab11a25e017124a70484781ca11fb52.asciidoc | 13 + .../8acc1d67b152e7027e0f0e1a8b4b2431.asciidoc | 28 - .../8b07372a21a10a16b52e70fc0c87ad4e.asciidoc | 21 + .../8b301122cbf42be6eafeda714a36559e.asciidoc | 28 + .../8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc | 30 + .../8b3a94495127efd9d56b2cd7f3eecdca.asciidoc | 10 + .../8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc | 18 + .../8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc | 18 + .../8b7956a2b88fd798a895d3466d671b58.asciidoc | 13 + .../8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc | 14 + .../8bf51fd50195b46bacbf872f460ebec2.asciidoc | 29 + .../8c2060b0272556457f4871c5d7a589fd.asciidoc | 16 + .../8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc | 10 + .../8c619666488927dac6ecb7dcebca44c2.asciidoc | 22 + .../8c693e057f6e85fbf2b56ca442719362.asciidoc | 38 ++ .../8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc | 39 ++ .../8c8b5224befab7804461c7e7b6086d9a.asciidoc | 34 + .../8c9081dc738d1290fd76071b283fcaec.asciidoc | 12 + .../8c92c5e87facbae8dc4f58376ec21815.asciidoc | 12 + .../8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc | 14 + .../8cd00a3aba7c3c158277bc032aac2830.asciidoc | 77 ++- .../8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc | 27 + .../8d064eda2199de52e5be9ee68a5b7c68.asciidoc | 24 + .../8d421c5bec38eecce4679b219cacc9db.asciidoc | 30 + .../8d4ca17349e7e82c329cdd854cc670a1.asciidoc | 10 + .../8d4dda5d988d568f4f4210a6387e026f.asciidoc | 11 + .../8d6631b622f9bfb8fa70154f6fb8b153.asciidoc | 11 + .../8d7193902a353872740a3324c60c5001.asciidoc | 23 + .../8d9a63d7c31f08bd27d92ece3de1649c.asciidoc | 40 -- .../8d9b04f2a97f4229dec9e620126de049.asciidoc | 12 + .../8db799543eb084ec71547980863d60b9.asciidoc | 31 + .../8de3206f80e18185a5ad6481f4c2ee07.asciidoc | 23 - .../8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc | 10 + .../8e06d8b2b737c43806018eae2ca061c1.asciidoc | 19 + .../8e0f43829df9af20547ea6896f4c0124.asciidoc | 27 + .../8e208098a0156c4c92afe0a06960b230.asciidoc | 12 + .../8e286a205a1f84f888a6d99f2620c80e.asciidoc | 12 + .../8e2bbef535fef688d397e60e09aefa7f.asciidoc | 13 + .../8e42a17edace2bc6e42c6a1532779937.asciidoc | 18 + .../8e43bb5b7946143e69d397bb81d87df0.asciidoc | 10 + .../8e68cdfad45e7e6dff254d931eea29d4.asciidoc | 101 +++ .../8e89fee0be6a436c4e3d7c152659c47e.asciidoc | 27 + .../8e92b10ebcfedc76562ab52d0e46b916.asciidoc | 10 + .../8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc | 11 + .../8e9f7261af6264c92d0eb4d586a176f9.asciidoc | 20 + .../8eac28d2e9b6482b413d61817456a14f.asciidoc | 26 + .../8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc | 22 - .../8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc | 16 + .../8ed31628081db2b6e9106d61d1e142be.asciidoc | 15 + .../8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc | 20 + .../8ee9521f57661a050efb614f02b4a090.asciidoc | 16 + .../8f0511f8a5cb176ff2afdd4311799a33.asciidoc | 34 - .../8f0a3d7b5fbdf5351750a23c493cc078.asciidoc | 24 + .../8f0c5c81cdb902c136db821947ee70a1.asciidoc | 25 + .../8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc | 11 + .../8f6f7ea5abf56152b4a5639ddf40848f.asciidoc | 27 + .../8f7936f219500305e5b2518dbbf949ea.asciidoc | 10 + .../8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc | 17 + .../8f9f88cf9a27c1138226efb94ac09e73.asciidoc | 15 + .../8fdf2344c4fb3de6902ad7c5735270df.asciidoc | 14 - .../8fe128323a944765f525c76d85af7a2f.asciidoc | 26 + .../8fec06a98d0151c1d717a01491d0b8f0.asciidoc | 15 + .../90083d93e46fad2524755b8d4d1306fc.asciidoc | 18 + .../901d66919e584515717bf78ab5ca2cbb.asciidoc | 31 + .../902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc | 13 + .../9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc | 8 + .../90631797c7fbda43902abf2cc0ea8304.asciidoc | 12 + .../908326e14ad76c2ff04a9b6d8365751f.asciidoc | 23 + .../909a032a9c1f7095b798444705b09ad6.asciidoc | 14 + .../90c087560ea6c0b7405f710971c86ef0.asciidoc | 26 + .../90e06d5ec5e454832d8fbd2e73ec2248.asciidoc | 10 + .../90f1f5304922fb6d097846dd1444c075.asciidoc | 31 + .../9116ee8a5b00cc877291ed5559563f24.asciidoc | 37 ++ .../911c56114e50ce7440eb83efc91d28b8.asciidoc | 20 + .../9120b6a49ec39a1571339fddf8e1a26f.asciidoc | 18 + .../91270cef57ac455547ffd47839420887.asciidoc | 43 ++ .../9129dec88d35571b3166c6677297f03b.asciidoc | 10 + .../913770050ebbf3b9b549a899bc11060a.asciidoc | 25 - .../9138550002cb26ab64918cce427963b8.asciidoc | 18 + .../913c163c197802078a8af72150178061.asciidoc | 36 ++ .../9143be4f137574271953a7a8107e175b.asciidoc | 10 + .../91750571c195718f0ff246e058e4bc63.asciidoc | 23 + .../91c01fcad9bf341d039a15dfc593dcd7.asciidoc | 18 + .../91c925fc71abe0ddfe52457e9130363b.asciidoc | 16 + .../91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc | 25 + .../91ed08faaed54cb5ace9a295af937439.asciidoc | 25 + .../9200ed8d5f798a158def4c526e41269e.asciidoc | 11 + .../92035a2a62d01a511662af65606d5fc6.asciidoc | 27 + .../9216e8e544e6d193eda1f59e9160a225.asciidoc | 32 + .../922529276f87cb9d116be2468d108466.asciidoc | 19 + .../9225841fdcddaf83ebdb90c2b0399e20.asciidoc | 8 + .../92284d24bbb80ce6943f2ddcbf74b833.asciidoc | 36 ++ .../923aee95078219ee6eb321a252e1121b.asciidoc | 20 + .../926c0134aeaad53bd0f3bdad9c430217.asciidoc | 16 + .../9270964d35d172ea5b193c5fc7a473dd.asciidoc | 12 + .../927b20a221f975b75d1227b67d0eb7e2.asciidoc | 12 + .../9298aaf8232a819e79b3bf8471245e98.asciidoc | 10 + .../92d0c12d53a900308150d572c3f2f82f.asciidoc | 22 + .../92d343eb755971c44a939d0660bf5ac2.asciidoc | 25 + .../92f073762634a4b2274f71002494192e.asciidoc | 10 + .../92fa6608673cec5a2ed568a07e80d36b.asciidoc | 17 + .../92fe53019958ba466d1272da0834cf53.asciidoc | 10 + .../930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc | 29 + .../930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc | 18 + .../931da02a06953a768f4ad3fecfd7b2df.asciidoc | 12 + .../9334ccd09548b585cd637d7c66c5ae65.asciidoc | 49 ++ .../93429d2bfbc0a9b7a4854b27e34658cf.asciidoc | 20 + .../93444b445446c1a6033347d6267253d6.asciidoc | 16 + .../934aa38c3adcc4cf74ea40cd8736876c.asciidoc | 20 + .../934ced0998552cc95a28e48554147e8b.asciidoc | 13 + .../935566d5426d44ade486a49ec5289741.asciidoc | 17 + .../935ee7c1b86ba9592604834bb673c7a3.asciidoc | 93 +++ .../936d809c848f8b77d5b55f57f0aab89a.asciidoc | 22 + .../937089157fc82cf08b68a954d0e6d52c.asciidoc | 12 + .../9370e4935ab6678571d3227973b8c830.asciidoc | 11 + .../937ffc65cbb20505a8aba25b37a796a5.asciidoc | 37 ++ .../9382f022086c692ba05efb0acae65946.asciidoc | 21 + .../9399cbbd133ec2b7aad2820fa617ae3a.asciidoc | 20 + .../93bd651aff81daa2b86f9f2089e6d088.asciidoc | 33 + .../93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc | 22 + .../93d7ba4130722cae04f9690e52a8f54f.asciidoc | 19 + .../93f1bdd72e79827dcf9a34efa02fd977.asciidoc | 22 - .../93fb59d3204f37af952198b331fb6bb7.asciidoc | 12 + .../9403764e6eccad7b321b65e9a10c5727.asciidoc | 18 + .../940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc | 10 + .../9410af79177dd1df9b7b16229a581e18.asciidoc | 11 + .../941c8d05486200e835d97642e4ee05d5.asciidoc | 40 ++ .../94246f45025ed394cd6415ed8d7a0588.asciidoc | 10 + .../944806221eb89f5af2298ccdf2902277.asciidoc | 10 + .../944a2dc22dae2a8503299926326a9c18.asciidoc | 36 ++ ...946522c26d02bebf5c527ba28e55c724.asciidoc} | 9 +- .../9467e52087a13b63b02d78c35ff6f798.asciidoc | 14 + .../94cd66bf93f99881c1bda547283a0357.asciidoc | 40 ++ .../9501e6c8e95c21838653ea15b9b7ed5f.asciidoc | 14 + .../950f1230536422567f99a205ff4165ec.asciidoc | 16 + .../9524a9b7373fa4eb2905183b0e806962.asciidoc | 24 - .../95414139c7b1203e3c2d99a354415801.asciidoc | 10 + .../9559de0c2190f99fcc344887fc7b232a.asciidoc | 36 ++ .../956cb470258024af964cd2dabbaf7c7c.asciidoc | 14 + .../957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc | 15 + .../9584b042223982e0bfde8d12d42c9705.asciidoc | 17 + .../95b3f53f2065737bbeba6199e8a12df3.asciidoc | 14 + .../95c03bdef4faf6bef039c986f4cb3aba.asciidoc | 16 + .../9606c271921cb800d5ea395b16d6ceaf.asciidoc | 39 ++ .../9608820dbeac261ba53fb89bb9400560.asciidoc | 10 + .../962e6187bbd71c5749376efed04b65ba.asciidoc | 20 + .../966ff3a4c5b61ed1a36d44c17ce06157.asciidoc | 35 + .../9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc | 27 + .../96b9289c3c4c6b135ab3386562c4ee8d.asciidoc | 12 + .../96de5703ba0bd43fd4ac239ec5408542.asciidoc | 21 - .../96e137e42d12c180e2c702db30714a9e.asciidoc | 17 + .../96ea0e80323d6d2d99964625c004a44d.asciidoc | 12 + .../971c7a36ee79f2b3aa82c64ea338de70.asciidoc | 18 + .../973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc | 28 +- .../975b4b92464d52068516aa2f0f955cc1.asciidoc | 10 + .../976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc | 19 + .../97916243f245478b735471a9e37f33d1.asciidoc | 26 + .../979d25dff2d8987119410291ad47b0d1.asciidoc | 29 - .../97a3216af3d4b4d805d467d9c715cb3e.asciidoc | 11 + .../97ae2b62aa372a955278be6f660356ba.asciidoc | 15 + .../97babc8d19ef0866774576716eb6d19e.asciidoc | 33 +- .../97da68c09c9f1a97a21780fd404e213a.asciidoc | 20 + .../97ea5ab17213cb1faaf6f3ea13607098.asciidoc | 8 + .../97f260817b60f3deb7f7034d7dee7e12.asciidoc | 36 ++ .../97f5df84efec655f479fad78bc392d4d.asciidoc | 40 ++ .../983a867c90e63e070518f2f709f659ee.asciidoc | 29 + .../983fbb78e57e8fe98db38cf2d217e943.asciidoc | 56 ++ .../9851f5225150bc032fb3b195cd447f4f.asciidoc | 40 ++ .../98574a419b6be603a0af8f7f22a92d23.asciidoc | 8 + .../98621bea4765b1b838cc9daa914bf5c5.asciidoc | 12 + .../98855f4bda8726d5d123aeebf7869e47.asciidoc | 10 + .../9887f65af249bbf09190b1153ea2597b.asciidoc | 10 + .../98aeb275f829b5f7b8eb2147701565ff.asciidoc | 21 - .../98b121bf47cebd85671a2cb519688d28.asciidoc | 32 - .../98b403c356a9b14544e9b9f646845e9f.asciidoc | 22 + .../98c1080d8630d3a18d564312300d020f.asciidoc | 30 + .../98f43710cedd28a464e8abf4b09bcc9a.asciidoc | 25 + .../98f7525ec0bc8945eafa008a5a9c50c0.asciidoc | 12 + .../990c0d794ed6f05d1620b5d49f7aff6e.asciidoc | 10 + .../99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc | 44 ++ .../991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc | 22 + .../99474a7e7979816c874aeac4403be5d0.asciidoc | 27 + .../996521cef7803ef363a49ac6321ea1de.asciidoc | 12 + .../996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc | 27 + .../99803d7b111b862c0c82e9908e549b16.asciidoc | 18 + .../998651b98e152add530084a631a4ab5a.asciidoc | 12 + .../99a56f423df3a0e57b7f20146f0d33b5.asciidoc | 20 + .../99b617a0a83fcfbe5755ccc724a4ce62.asciidoc | 15 + .../99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc | 14 + .../9a02bd47c000a3d9a8911233c37c890f.asciidoc | 39 ++ .../9a036a792be1d39af9fd0d1adb5f3402.asciidoc | 17 + .../9a05cc10eea1251e23b82a4549913536.asciidoc | 12 + .../9a09d33ec11e20b6081cae882282ca60.asciidoc | 10 + .../9a203aae3e1412d919546276fb52a5ca.asciidoc | 19 + .../9a49b7572d571e00e20dbebdd30f9368.asciidoc | 50 ++ .../9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc | 35 +- .../9a743b6575c6fe5acdf46024a7fda8a1.asciidoc | 22 + .../9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc | 29 + .../9ab351893dae65ec97fd8cb6832950fb.asciidoc | 31 + .../9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc | 10 + .../9ad38ab4d9c3983e97e8c38fec611f10.asciidoc | 18 + .../9ae268058c0ea32ef8926568e011c728.asciidoc | 18 + .../9af44592fb2e78fb17ad3e834bbef7a7.asciidoc | 8 + .../9afa0844883b7471883aa378a8dd10b4.asciidoc | 11 + .../9b0f34d122a4b348dc86df7410d6ebb6.asciidoc | 11 + .../9b30a69fec54cf01f7af1b04a6e15239.asciidoc | 8 + .../9b345e0bfd45f3a37194585ec9193478.asciidoc | 10 + .../9b68748c061b768c0153c1f2508ce207.asciidoc | 22 + .../9b92266d87170e93a84f9700596d9035.asciidoc | 30 + .../9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc | 26 + .../9ba868784f417a8d3679b3c8ed5939ad.asciidoc | 21 + ...9bae72e974bdeb56007d9104e73eff92.asciidoc} | 15 +- .../9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc | 10 + .../9beb260834f8cfb240f6308950dbb9c2.asciidoc | 23 + .../9bfdda207b701028a3439e495e800c02.asciidoc | 20 + .../9c01db07c9ac395b6370e3b33965c21f.asciidoc | 8 + .../9c021836acf7c0370e289f611325868d.asciidoc | 20 + .../9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc | 18 + .../9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc | 20 + .../9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc | 17 + .../9c7c8051592b6af3adb5d7c490849068.asciidoc | 22 + .../9cb150d67dfa0947f29aa809bcc93c6e.asciidoc | 11 + .../9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc | 21 + .../9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc | 11 + .../9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc | 44 ++ .../9cf6c7012a4f2bb562bc256aa28c3409.asciidoc | 13 + ...9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc} | 27 +- .../9d1fb129ac783355a20097effded1845.asciidoc | 52 ++ .../9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc | 21 + .../9d461ae140ddc018efd2650559800cd1.asciidoc | 24 + .../9d47f02a063444da9f098858a1830d28.asciidoc | 12 + .../9d5855075e7008270459cc88c189043d.asciidoc | 12 + .../9d662fc9f943c287b7144f5e4e2ae358.asciidoc | 19 + .../9d67db8370a98854812d38ae73ee2a12.asciidoc | 25 + .../9d79645ab3a9da3f63c54a1516214a5a.asciidoc | 8 + .../9d9c8d715b72ce336e604c2c8a2b540e.asciidoc | 38 ++ .../9de10a59a5f56dd0906be627896cc789.asciidoc | 15 + .../9de4704d2f047dae1259249112488697.asciidoc | 16 + .../9de4ea9d5f3d427a71ee07d998cb5611.asciidoc | 11 + .../9de4edafd22a8b9cb557632b2c8779cd.asciidoc | 12 + .../9e0e3ce27967f164f4585c5231ba9c75.asciidoc | 15 + .../9e3c28d5820c38ea117eb2e9a5061089.asciidoc | 19 + .../9e563b8d5a7845f644db8d5bbf453eb6.asciidoc | 23 + .../9e56d79ad9a02b642c361f0b85dd95d7.asciidoc | 20 - .../9e5ae957fd0663662bfbed9d1effe99e.asciidoc | 19 + .../9e962baf1fb407c21d6c47dcd37cec29.asciidoc | 21 + .../9e9717d9108ae1425bfacf71c7c44539.asciidoc | 12 + .../9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc | 12 + .../9eef31d85ebaf6c27054d7375715dbe0.asciidoc | 45 ++ .../9f04cc1a0c6cdb3ed2247f1399713767.asciidoc | 17 + .../9f0a0029982d9b3423a2a3de1f1b5136.asciidoc | 98 +++ .../9f16fca9813304e398ee052aa857dbcd.asciidoc | 18 + .../9f22a0920cc763eefa233ced963d9624.asciidoc | 17 + .../9f286416f1b18940f13cb27ab5c8458e.asciidoc | 28 + .../9f3341489fefd38c4e439c29f6dcb86c.asciidoc | 21 + .../9f66b5243050f71ed51bc787a7ac1218.asciidoc | 29 + .../9f7671119236423e0e40801ef6485af1.asciidoc | 12 + .../9f99be2d58c48a6bf8e892aa24604197.asciidoc | 11 + .../9fa55fc76ec4bd81f372e9389f1da851.asciidoc | 15 + .../9fda516a5dc60ba477b970eaad4429db.asciidoc | 10 + .../9feff356f302ea4915347ab71cc4887a.asciidoc | 17 + .../9ff9b2a73419a6c82f17a358b4991499.asciidoc | 10 + .../9ffe41322c095af1b6ea45a79b640a6f.asciidoc | 36 ++ .../a00311843b5f8f3e9f7d511334a828b1.asciidoc | 10 + .../a008f42379930edc354b4074e0a33344.asciidoc | 15 + .../a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc | 15 + .../a037beb3d02296e1d36dd43ef5c935dd.asciidoc | 14 + .../a0497157fdefecd04e597edb800a1a95.asciidoc | 15 + .../a04a8d90f8245ff5f30a9983909faa1d.asciidoc | 45 ++ .../a0871be90badeecd2f8d8ec90230e248.asciidoc | 43 ++ .../a0a7557bb7e2aff7918557cd648f41af.asciidoc | 29 + .../a0c64894f14d28b7e0c902add71d2e9a.asciidoc | 12 + ...a0c868282c0514a342ad04998cdc2175.asciidoc} | 18 +- .../a0d53dcb3df938fc0a01d248571a41e4.asciidoc | 33 + .../a0f4e902d18460337684d74ea932fbe9.asciidoc | 14 + .../a1070cf2f5969d42d71cda057223f152.asciidoc | 10 + .../a116949e446f34dc25ae57d4b703d0c1.asciidoc | 21 - .../a1377b32d7fe3680079ae0df73009b0e.asciidoc | 35 + .../a1490f71d705053951870fd2d3bceb39.asciidoc | 27 + .../a159143bb578403bb9c7ff37d635d7ad.asciidoc | 19 + .../a159e1ce0cba7a35ce44db9bebad22f3.asciidoc | 8 + .../a162eb50853331c80596f5994e9d1c38.asciidoc | 10 + .../a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc | 23 + .../a1879930c1dac36a57d7f094a680420b.asciidoc | 31 + .../a197076e0e74951ea88f20309ec257e2.asciidoc | 29 + .../a1acf454bd6477183ce27ace872deb46.asciidoc | 35 + .../a1ccd51eef37e43c935a047b0ee15daa.asciidoc | 10 + .../a1d0603b24a5b048f0959975d8057534.asciidoc | 18 + .../a1db5c822745fe167e9ef854dca3d129.asciidoc | 26 - .../a1dcc6668d13271c8207ff5ff1d35492.asciidoc | 10 + .../a1e5884051755b5a5f4d7549f319f4c7.asciidoc | 25 + .../a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc | 11 + .../a1f70bc71b763b58206814c40a7440e7.asciidoc | 14 + .../a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc | 12 + .../a21a7bf052b41f5b996dc58f7b69770f.asciidoc | 10 + .../a253a1712953f7292bdd646c48ec7fd2.asciidoc | 12 + .../a28111cdd9b5aaea96c779cbfbf38780.asciidoc | 39 ++ .../a2a25aad1fea9a541b52ac613c78fb64.asciidoc | 22 - .../a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc | 26 + ...a2b2ce031120dac49b5120b26eea8758.asciidoc} | 11 +- .../a2bab367f0e598ae27a2f4ec82e778e9.asciidoc | 33 + .../a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc | 20 + .../a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc | 8 + .../a2dabdcbb661e7690166ae6d0de27e46.asciidoc | 11 + .../a322c8c73d6f2f5e1e375588ed20b636.asciidoc | 16 + .../a325f31e94fb1e8739258910593504a8.asciidoc | 11 + .../a3464bd6f0a61623562162859566b078.asciidoc | 13 + .../a34d70d7022eb4ba48909d440c80390f.asciidoc | 19 +- .../a34e758e019f563d323ca90ad9fd6e3e.asciidoc | 10 + .../a38f29375eabd0103f8d7c00b17bb0ab.asciidoc | 8 + .../a3a14f7f0e80725f695a901a7e1d579d.asciidoc | 12 + .../a3a2856ac2338a624a1fa5f31aec4db4.asciidoc | 14 + .../a3a64d568fe93a22b042a8b31b9905b0.asciidoc | 43 ++ .../a3c8f474b0700711a356682f37e62b39.asciidoc | 23 + .../a3ce0cfe2176f3d8a36959a5916995f0.asciidoc | 10 + .../a3cfd350c73a104b99a998c6be931408.asciidoc | 10 + .../a3d13833714f9bb918e5e0f62a49bd0e.asciidoc | 27 + .../a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc | 11 + ...a3e79d6c626a490341c5b731acbb4a5d.asciidoc} | 9 +- .../a3f19f3787cb331f230cdac67ff578e8.asciidoc | 17 + ...a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc} | 9 +- .../a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc | 15 + .../a3f66deb467df86edbf66e1dca31da51.asciidoc | 20 + .../a412fe22a74900c72434391ed75139dc.asciidoc | 31 + .../a425fcab60f603504becee7d001f0a4b.asciidoc | 13 + .../a428d518162918733d49261ffd65cfc1.asciidoc | 20 + .../a42f33e15b0995bb4b6058659bfdea85.asciidoc | 22 - .../a43954d055f042d625a905513821f5f0.asciidoc | 17 + .../a45244aa3adbf3c793fede100786d1f5.asciidoc | 19 + .../a45605347d6438e7aecdf3b37198616d.asciidoc | 20 + .../a45810722dc4f468f81b1e8a451d21be.asciidoc | 12 + .../a45d80a3fdba70c1b1ba493e51652c8a.asciidoc | 19 + .../a45eb0cdd138d9c894ca2de9352549a1.asciidoc | 29 + .../a49169b4622918992411fab4ec48191b.asciidoc | 23 - .../a49acb27f56fe799a9b1342f85cba0f3.asciidoc | 20 + .../a4a3c3cd09efa75168dab90105afb2e9.asciidoc | 11 + .../a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc | 28 + .../a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc | 31 + .../a4dbd52004f3ab1580eb73997f77dcab.asciidoc | 62 ++ .../a4e510aa9145ccedae151c4a6634f0a4.asciidoc | 12 + .../a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc | 34 + .../a4ee2214d621bcfaf768c46d21325958.asciidoc | 18 + .../a4f259522b4dc10a0323aff58236c2c2.asciidoc | 18 + .../a512e4dd8880ce0395937db1bab1d205.asciidoc | 11 + .../a520168c1c8b454a8f102d6a13027c73.asciidoc | 10 + .../a5217a93efabceee9be19949e484f930.asciidoc | 24 + .../a53ff77d83222c0e76453e630d64787e.asciidoc | 21 + .../a547bb926c25f670078b98fbe67de3cc.asciidoc | 11 + .../a56c20a733a350673d41829c8daaafbe.asciidoc | 21 + .../a594f05459d9eecc8050c73fc8da336f.asciidoc | 20 + .../a5a58e8ad66afe831bc295500e3e8739.asciidoc | 19 + .../a5a5fb129de2f492e8fd33043a73439c.asciidoc | 27 + .../a5b59f0170a2feaa39e40243fd7ae359.asciidoc | 24 + .../a5dfcfd1cfb3558e7912456669c92eee.asciidoc | 10 + .../a5e2b3588258430f2e595abda98e3943.asciidoc | 10 + .../a5e6ad9e65615f6f92ae6a19674dd742.asciidoc | 29 + .../a5e6ccfb6019238e6db602373b9af147.asciidoc | 10 + .../a5e793d82a4455cf4105dac82a156617.asciidoc | 19 + .../a5ebcd70c34d1ece77a4fb27cc050917.asciidoc | 20 + .../a5f9eb40087921e67d820775acf71522.asciidoc | 17 + .../a6169bc057ce8654bd306ff4b062081b.asciidoc | 20 + .../a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc | 11 + .../a62833baf15f2c9ac094a9289e56a012.asciidoc | 14 + .../a63e0d0504e0c9313814b7f4e2641353.asciidoc | 82 +++ .../a669e9d56e34c95ef4c780e92ed307f1.asciidoc | 10 + .../a692b4c0ca7825c467880b346841f5a5.asciidoc | 21 + .../a699189c8d1a7573beeaea768f2fc618.asciidoc | 13 + ...a69b1ce5cc9528fb3639185eaf241ae3.asciidoc} | 9 +- .../a6b2815d54df34b6b8d00226e9a1af0c.asciidoc | 21 + ...a6bb306ca250cf651f19cae808b97012.asciidoc} | 7 +- .../a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc | 10 + .../a6ccac9f80c5e5efdaab992f3a32d919.asciidoc | 10 + .../a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc | 30 + ...a6fdd0100cd362df54af6c95d1055c96.asciidoc} | 7 +- .../a71154ea11a5214f409ecfd118e9b5e3.asciidoc | 12 + .../a71c438cc4df1cafe3109ccff475afdb.asciidoc | 27 - .../a72613de3774571ba24def4b495161b5.asciidoc | 16 + .../a735081e715d385b4d471eea0f2b57da.asciidoc | 12 + .../a73a9a6f19516b8ead63182a9ae5b540.asciidoc | 14 + .../a75765e3fb130421dde6c3c2f12e8acb.asciidoc | 14 + .../a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc | 12 + .../a799477dff04578b200788a63f9cff71.asciidoc | 29 + .../a7cf31f4b907e4c00132aca75f55790c.asciidoc | 10 + .../a7e58d4dc477a84c1306fd5749aafd8b.asciidoc | 23 + .../a7fb1c0d0827d66bfa66016f2564b10c.asciidoc | 12 + .../a8019280dab5b04211ae3b21e5e08223.asciidoc | 16 + .../a80f5db4357bb25b8704d374c18318ed.asciidoc | 19 - .../a810da963d3b28d79dcd17be829bb271.asciidoc | 23 + .../a811b82ba4632bdd9065829085188bc9.asciidoc | 11 + .../a84bc239eb2f607e8bed1fdb70d63823.asciidoc | 18 + .../a861a89f52008610e813b9f073951c58.asciidoc | 10 + .../a89052bcdfe40e604a98d12be6ae59d2.asciidoc | 13 + .../a8add749c3f41ad1308a45308df14103.asciidoc | 29 + .../a9280b55a7284952f604ec7bece712f6.asciidoc | 20 + .../a941fd568f2e20e13df909ab24506073.asciidoc | 15 + .../a9541c64512ebc5fcff2dc48487dc0b7.asciidoc | 12 + .../a9554396506888e392a1aee0ca28e6fc.asciidoc | 36 ++ .../a95a123b9f862e52ab1e8f875961c852.asciidoc | 17 + .../a960b43e720b4934edb74ab4b085ca77.asciidoc | 11 + .../a97aace57c6442bbb90e1e14effbcda3.asciidoc | 12 + .../a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc | 20 + .../a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc | 10 + ...a98692a565904ec0783884d81a7b71fc.asciidoc} | 7 +- .../a999b5661bebb802bbbfe04faacf1971.asciidoc | 15 + .../a99bc141066ef673e35f306157750ec9.asciidoc | 11 + .../a99bf70ae38bdf1c6f350140b25e0422.asciidoc | 16 + .../a9c08023354aa9b9023807962df71d13.asciidoc | 10 + .../a9d44463dcea3cb0ea4c8f8460cea524.asciidoc | 23 + .../a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc | 32 + .../a9dd9595e96c307b8c798beaeb571521.asciidoc | 13 + .../a9fe70387d9c96a07830e1859c57efbb.asciidoc | 14 + .../aa1771b702f4b771491ba4ab743a9197.asciidoc | 11 + .../aa3284717241ed79d3d1d3bdbbdce598.asciidoc | 12 + .../aa5c0fa51a3553ce7caa763c3832120d.asciidoc | 22 + .../aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc | 20 + .../aa6282d4bc92c753c4bd7a5b166abece.asciidoc | 12 + .../aa699ff3234f54d091575a38e859a627.asciidoc | 19 + .../aa6bfe54e2436eb668091fe31c2fbf4d.asciidoc | 43 -- .../aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc | 23 + .../aa7f62279b487989440d423c1ed4a1c0.asciidoc | 12 + .../aaa7a61b07861235fb6e489b946c705c.asciidoc | 18 + .../aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc | 8 + .../aaba346e0becdf12db13658296e0b8a1.asciidoc | 14 + .../aac5996a8398cc8f7701a063df0b2346.asciidoc | 26 + .../aadf36ae37460a735e06b953b4cee494.asciidoc | 39 ++ .../ab0fd1908c9957cc7f63165c156e48cd.asciidoc | 56 ++ .../ab1372270c11bcd6f36d1a13e6c69276.asciidoc | 18 + .../ab1a989958c1d345a9dc3dd36ad90c27.asciidoc | 14 + .../ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc | 31 + .../ab29bfbd35ee482cf54052b03d62cd31.asciidoc | 32 + .../ab317aa09c4bd44abbf02517141e37ef.asciidoc | 42 ++ .../ab3c36b70459093beafbfd3a7ae75b9b.asciidoc | 49 ++ .../ab8b4537fad80107bc88f633d4039a52.asciidoc | 13 + .../ab8de34fcfc0277901cb39618ecfc9d5.asciidoc | 12 + .../abb4a58089574211d434946a923e5725.asciidoc | 89 +++ .../abc280775734daa6cf2c28868e155d10.asciidoc | 33 + .../abc496de5fd013099a134db369b34a8b.asciidoc | 28 + .../abc7a670a47516b58b6b07d7497b140c.asciidoc | 44 ++ .../abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc | 21 +- .../abdbc81e799e28c833556b1c29f03ba6.asciidoc | 8 + .../abf329ebefaf58acd4ee30e685731499.asciidoc | 20 - .../ac366b9dda7040e743dee85335354094.asciidoc | 18 + .../ac483996d479946d57c374c3a86b2621.asciidoc | 17 + .../ac497917ef707538198a8458ae3d5c6b.asciidoc | 14 + .../ac544eb247a29ca42aab13826ca88561.asciidoc | 21 - .../ac73895ca1882cd1ac65b1facfbb5c63.asciidoc | 15 + .../ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc | 8 + .../ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc | 13 + .../ac9fe9b64891095bcf84066f719b3dc4.asciidoc | 17 + .../acb10091ad335ddd15d71021aaf23c62.asciidoc | 28 + .../acb850c08f51226eadb75be09e336076.asciidoc | 10 + .../acc52da725a996ae696b00d9f818dfde.asciidoc | 19 + .../acc6cd860032167e34fa5e0c043ab3b0.asciidoc | 14 + .../ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc | 26 +- .../ad2416ca0581316cee6c63129685bca5.asciidoc | 16 + .../ad2b8aed84c67cdc295917b47a12d3dc.asciidoc | 43 ++ .../ad3b159657d4bcb373623fdc61acc3bf.asciidoc | 11 + .../ad57ccba0a060da4f5313692fa26a235.asciidoc | 72 +++ .../ad63eca6829a25293c9be589c1870547.asciidoc | 32 + .../ad6d81be5fad4bad87486b699454dce5.asciidoc | 24 + .../ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc | 18 - .../ad79228630684d950fe9792a768d24c5.asciidoc | 28 - .../ad88e46bb06739991498dee248850223.asciidoc | 8 + .../ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc | 15 + .../ada2675a9c631da2bfe627fc2618f5ed.asciidoc | 21 + .../adc18ca0c344d81d68ec3b9422b54ff5.asciidoc | 34 + ...add240aa149d8b11139947502b279ee0.asciidoc} | 11 +- .../adf36e2d8fc05c3719c91912481c4e19.asciidoc | 10 + .../adf728b0c11c5c309c730205609a379d.asciidoc | 19 + .../ae0d20c2ebb59278e08a26c9634d90c9.asciidoc | 11 + .../ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc | 52 ++ .../ae4aa368617637a390074535df86e64b.asciidoc | 11 + .../ae591d49e54b838c15cdcf64a8dee9c2.asciidoc | 21 + .../ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc | 23 + .../ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc | 33 - .../ae9ccfaa146731ab9176df90670db1c2.asciidoc | 43 +- .../aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc | 16 + .../aee26dd62fbb6d614a0798f3344c0598.asciidoc | 39 ++ .../aee4734ee63dbbbd12a21ee886f7a829.asciidoc | 23 + .../af00a58d9171d32f6efe52d94e51e526.asciidoc | 42 ++ .../af18f5c5fb2364ae23c6a14431820aba.asciidoc | 10 + .../af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc | 26 - .../af44cc7fb0c435d4497c77baf904bf5e.asciidoc | 19 + .../af517b6936fa41d124d68b107b2efdc3.asciidoc | 10 + .../af607715d0693587dd12962266359a96.asciidoc | 17 + .../af746266a49a693ff6170c88da8a8c04.asciidoc | 27 + .../af7c5add165b005aefb552d79130fed6.asciidoc | 17 + .../af84b3995564a7ca84360a526a4ac896.asciidoc | 26 + .../af85ad2551d1cc6742c6521d71c889cc.asciidoc | 18 + .../af91019991bee136df5460e2fd4ac72a.asciidoc | 11 + .../af970eb8b93cdea52209e1256eba9d8c.asciidoc | 10 + .../afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc | 43 ++ .../afa24b7d72c2d9f586023a49bd655ec7.asciidoc | 49 ++ .../afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc | 16 + .../afbea723c4ba0d50c67d04ebb73a4101.asciidoc | 10 + .../afc0a9cffc0100797a3f093094394763.asciidoc | 12 + .../afc29b61c532cf683f749baf013e7bfe.asciidoc | 19 - .../afcacd742d18bf220e02f0bc6891526d.asciidoc | 20 + .../afd90d268187f995dc002abc189f818d.asciidoc | 27 + .../afdb19ad1ebb4f64e235528b640817b6.asciidoc | 18 + .../afe30f159937b38d74c869570cfcd369.asciidoc | 18 + .../afe5aeb9317f0ae470b28e85a8d98274.asciidoc | 46 ++ .../afe87a2850326e0328fbebbefec2e839.asciidoc | 11 + .../afef5cac988592b97ae289ab39c2f437.asciidoc | 22 + .../affc7ff234dc3acccb2bf7dc51f54813.asciidoc | 12 + .../b00ac39faf96785e89be8d4205fb984d.asciidoc | 29 + .../b00d74eed431a272c829c0f798e3a539.asciidoc | 84 +++ .../b00f3bc0e47905aaa2124d6a025c75d4.asciidoc | 11 + .../b02e4907c9936c1adc16ccce9d49900d.asciidoc | 5 +- .../b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc | 21 + .../b0bddf2ffaa83049b195829c06b875cd.asciidoc | 10 + .../b0ce54ff4fec0b0c712506eb81e633f4.asciidoc | 28 + .../b0d3f839237fabf8cdc2221734c668ad.asciidoc | 41 ++ .../b0eaf67e5cce24ef8889bf20951ccec1.asciidoc | 39 +- ...b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc} | 17 +- .../b0fe9a7c8e519995258786be4bef36c4.asciidoc | 10 + .../b109d0141ec8a0aed5d3805abc349a20.asciidoc | 32 + .../b11a0675e49df0709be693297ca73a2c.asciidoc | 10 + .../b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc | 10 + .../b17143780e9904bfc1e1c53436497fa1.asciidoc | 13 + .../b176e0d428726705298184ef39ad5cb2.asciidoc | 17 + .../b195068563b1dc0f721f5f8c8d172312.asciidoc | 13 + .../b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc | 10 + .../b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc | 21 +- .../b1f7cb4157b13368373383abd7d2b8cb.asciidoc | 22 + .../b22559a7c319f90bc63a41cac1c39b4c.asciidoc | 11 + .../b23ed357dce8ec0014708b7b2850a8fb.asciidoc | 10 + .../b2440b492149b705ef107137fdccb0c2.asciidoc | 10 + .../b24a374c0ad264abbcacb5686f5ed61c.asciidoc | 13 + .../b25256ed615cd837461b0bfa590526b7.asciidoc | 10 + .../b2652b1763a5fd31e95c983869b433bd.asciidoc | 44 ++ .../b26b5574438e4eaf146b2428bf537c51.asciidoc | 50 ++ .../b2b26f8568c5dba7649e79f09b859272.asciidoc | 12 + .../b2dec193082462c775169db438308bc3.asciidoc | 17 + .../b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc | 17 + .../b2e20bca1846d7d584626b12eae9f6dc.asciidoc | 11 + .../b2e4f3257c0e0aa3311f7270034bbc42.asciidoc | 14 + .../b3623b8c7f3e7650f52b6fb8b050f583.asciidoc | 8 + .../b3685560cb328f179d96ffe7c2668f72.asciidoc | 33 + .../b3756e700d0f6c7e8919003bdf26bc8f.asciidoc | 11 + .../b37919cc438b47477343833b4e522408.asciidoc | 22 + .../b3a1c4220617ded67ed43fff2051d324.asciidoc | 16 + ...b3a711c3deddcdb8a3f6623184a8b794.asciidoc} | 20 +- .../b3ed567d2c0915a280b6b15f7a37539b.asciidoc | 32 + .../b3fffd96fdb118cd059b5f1d67d928de.asciidoc | 19 + .../b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc | 36 ++ .../b430122345d560bbd2a77826f5c475f7.asciidoc | 35 + .../b45a8c6fc746e9c90fd181e69a605fad.asciidoc | 14 + .../b45c60f908b329835ab40609423f378e.asciidoc | 10 + .../b468d0124dc485385a34504d5b7af82a.asciidoc | 18 + .../b4693f2aa9fa65db04ab2499355c54fc.asciidoc | 22 + .../b47945c7db8868dd36ba079b742f2a90.asciidoc | 14 + .../b4946ecc9101b97102a1c5bcb19e5607.asciidoc | 15 + .../b4a0d0ed512dffc10ee53bca2feca49b.asciidoc | 43 -- .../b4aec2a1d353852507c091bdb629b765.asciidoc | 12 + ...b4d1fc887e40885cdf6ac2d01487cb76.asciidoc} | 29 +- .../b4d9d5017d42f27281e734e969949623.asciidoc | 10 + .../b4da132cb934c33d61e2b60988c6d4a3.asciidoc | 31 + .../b4f3165e873f551fbaa03945877eb370.asciidoc | 22 + .../b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc | 28 + .../b504119238b44cddd3b5944da20a498d.asciidoc | 14 + .../b515427f8685ca7d79176def672d19fa.asciidoc | 16 + .../b52951b78cd5fb2f9353d1c7e6d37070.asciidoc | 18 + .../b557f114e21dbc6f531d4e7621a08e8f.asciidoc | 46 ++ .../b573e893de0d5f92d67f4f5eb7f0c353.asciidoc | 31 + .../b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc | 34 + .../b583bf8d3a2f49d633aa2cfed5606418.asciidoc | 27 + .../b58b17975bbce307b2ccce5051a449e8.asciidoc | 19 + .../b5bc1bb7278f2f95bc54790c78c928e0.asciidoc | 39 ++ .../b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc | 32 + .../b5f95bc097a201b29c7200fc8d3d31c1.asciidoc | 44 -- .../b601bc78fb69e15a42e0783219ddc38d.asciidoc | 31 + .../b607eea422295a3e9acd75f9ed1c8cb7.asciidoc | 21 + .../b61afb7ca29a11243232ffcc8b5a43cf.asciidoc | 11 + .../b620ef4400d2f660fe2c67835938442c.asciidoc | 10 + .../b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc | 216 +++++++ .../b638e11d6a8a084290f8934d224abd52.asciidoc | 12 + .../b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc | 13 + .../b65dbb51ddd496189c65a9326a53480c.asciidoc | 16 + .../b66be1daf6c220eb66d94e708b2fae39.asciidoc | 11 + .../b67fa8c560dd10a8e6f226048cd21562.asciidoc | 24 + .../b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc | 30 - .../b68ed7037042719945a2452d23e64c78.asciidoc | 19 + .../b691d41f84b5b46e9051b51db22a46af.asciidoc | 18 + .../b6a6aa9ba20e9a019371ae268488833f.asciidoc | 10 + .../b6a7ffd2003c38f4aa321f067d162be5.asciidoc | 31 + .../b6c872d04eabb39d1947cde6b29d4ae1.asciidoc | 17 + .../b6e29a0e14b611d4aaafb3051220ea56.asciidoc | 19 + .../b6e385760e036e36827f719b540d9c11.asciidoc | 21 + .../b6f690896001f8f9ad5bf24e1304a552.asciidoc | 22 + .../b717a583b5165e5c6caafc42fdfd9086.asciidoc | 67 ++ .../b724f547c5d67e95bbc0a9920e47033c.asciidoc | 15 + .../b728d6ba226dba719aadcd8b8099cc74.asciidoc | 11 + .../b789292f9cf63ce912e058c46d90ce20.asciidoc | 22 - .../b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc | 16 + .../b7a9f60b3646efe3834ca8381f8aa560.asciidoc | 12 + .../b7ad394975863a8f5ee29627c3ab738b.asciidoc | 20 + .../b7bb5503e64bd869b2ac1c46c434a079.asciidoc | 26 + .../b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc | 14 + .../b7df0848b2dc3093f931976db5b8cfff.asciidoc | 10 + .../b7f8bd33c22f3c93336ab57c2e091f73.asciidoc | 11 + .../b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc | 26 + .../b81a7b5f5ef19553f9cd49196f31018c.asciidoc | 23 + .../b82b156c7b9d1d78054577a6947a6cdd.asciidoc | 21 + .../b839f79a5d58506baed5714f1876ab55.asciidoc | 11 + .../b84932030e60a2cd58884b9dc6d3147f.asciidoc | 10 + .../b85716ba42a57096452665c38995da7d.asciidoc | 19 + .../b857abedc64e367def172bd07075e5c7.asciidoc | 27 + .../b87438263ccd68624b1d69d8750f9432.asciidoc | 21 + .../b87bc8a521995051c7e7395f9c047e1c.asciidoc | 41 ++ .../b88a2d96da1401d548a4540cca223d27.asciidoc | 39 ++ .../b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc | 12 + .../b8cc74a92bac837bfd8ba6d5935350ed.asciidoc | 41 ++ .../b8dc3764c4467922474b2cdec74bb86b.asciidoc | 10 + .../b8e6e320a19936f6edfc242ccb5cde43.asciidoc | 39 ++ .../b918d6b798da673a33e49b94f61dcdc0.asciidoc | 18 - .../b919f88e6f47a40d5793479440a90ba6.asciidoc | 102 --- .../b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc | 15 + .../b93ed4ef309819734f0eeea82e8b0f1f.asciidoc | 30 - .../b94cee0f74f57742b3948f9b784dfdd4.asciidoc | 8 +- .../b968853454b4416f7baa3209eb335957.asciidoc | 25 + .../b96f465abb658fe32889c3d183f159a3.asciidoc | 20 + .../b997885974522ef439d5e345924cc5ba.asciidoc | 39 -- .../b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc | 11 + .../b9f716219359a6c973dafc50b348de33.asciidoc | 15 + .../ba07330ed3291b3970f4eb01dacd8086.asciidoc | 105 +++ .../ba0b4081c98f3387f76b77847c52ee9a.asciidoc | 31 - .../ba10b644a4e9a2e7d78744ca607355d0.asciidoc | 13 + .../ba21a7fbb74180ff138d97032f28ace7.asciidoc | 18 + .../ba3b9783aa188c6841e1926c5ab1472d.asciidoc | 13 + .../ba5dc6fb9bbe1406714da5d641462a23.asciidoc | 23 + .../ba6040de55afb2c8fb9e5b24bb038820.asciidoc | 10 + .../ba66768ed04f7b87906badff40ff40ed.asciidoc | 21 + .../ba8c3578613ae0bf890f6a05706ce776.asciidoc | 20 + .../ba9a5f66a6148612de0ad2491fd6c90d.asciidoc | 32 + .../baadbfffcd0c16f51eb3537f516dc3ed.asciidoc | 10 + .../bab4c3b22c1768fcc7153345e4096dfb.asciidoc | 12 + .../bb067c049331cc850a77b18bdfff81b5.asciidoc | 39 ++ .../bb143628fd04070683eeeadc9406d9cc.asciidoc | 17 - .../bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc | 19 + .../bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc | 10 + .../bb5a1319c496acc862c670cc7224e59a.asciidoc | 37 ++ .../bb64a7228a479f6aeeaccaf7560e11ee.asciidoc | 36 ++ .../bb792e64a4c1f872296073b457aa03c8.asciidoc | 11 + .../bb975b342de7e838ebf6a36aaa1a8749.asciidoc | 20 + .../bb9e268ec62d19ca2a6366cbb48fae68.asciidoc | 10 + .../bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc | 21 - .../bc4d308069af23929a49d856f6bc3008.asciidoc | 33 + .../bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc | 27 + .../bcb572658986d69ae17c28ddd7e4bfd8.asciidoc | 10 + .../bcbd4d4749126837723438ff4faeb0f6.asciidoc | 20 + .../bcc75fc01b45e482638c65b8fbdf09fa.asciidoc | 11 +- .../bcdfaa4487747249699a86a0dcd22f5e.asciidoc | 8 + .../bd0d30a7683037e1ebadd163514765d4.asciidoc | 26 + .../bd1e55b8cb2ca9e496e223e717d76640.asciidoc | 23 + ...bd23c3a03907b1238dcb07ab9eecae7b.asciidoc} | 9 +- .../bd298b11933605c641626750c981d70b.asciidoc | 43 ++ .../bd2a387e8c21bf01a1039e81d7602921.asciidoc | 21 + .../bd3d710ec50a151453e141691163af72.asciidoc | 10 + .../bd458073196a19ecdeb24a8016488c20.asciidoc | 10 + .../bd57976bc93ca64b2d3e001df9f06c82.asciidoc | 11 + .../bd5918ab903c0889bb1f09c8c2466e43.asciidoc | 20 - .../bd5bd5d8b3d81241335fe1e5747080ac.asciidoc | 22 + .../bd68666ca2e0be12f7624016317a62bc.asciidoc | 16 + .../bd6f30e3caa3632260da42d9ff82c98c.asciidoc | 10 + .../bd7330af2609bdd8aa10958f5e640b93.asciidoc | 19 + .../bd767ea03171fe71c73f58f16d5da92f.asciidoc | 15 + .../bd7a1417fc27b5a801334ec44462b376.asciidoc | 10 + .../bd7fa2f122ab861cd00e0b9154d120b3.asciidoc | 21 + .../bdb30dd52d32f50994008f4f9c0da5f0.asciidoc | 9 +- .../bdb671866e2f0195f8dfbdb7f20bf591.asciidoc | 18 + .../bdc1afd2181154bb78797360f9dbb1a0.asciidoc | 16 + .../bdc68012c121062628d6d73468bf4866.asciidoc | 10 + .../bde74dbbcef8ebf8541cae2c1711255f.asciidoc | 10 + .../bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc | 39 ++ .../be1bd47393646ac6bbee177d1cdb7738.asciidoc | 22 - .../be285eef1d2df0dfcf876e2d4b361f1e.asciidoc | 28 + .../be30ea12f605fd61acba689b68e00bbe.asciidoc | 21 + .../be3a6431d01846950dc1a39a7a6a1faa.asciidoc | 7 +- ...be5b415d7f33d6f0397ac2f8b5c10521.asciidoc} | 20 +- .../be5c5a9c25901737585e4fff9195da3c.asciidoc | 17 + .../be5d62e7c8f63687c585305fbe70d7d0.asciidoc | 21 + .../be5fef0640c3a650ee96f84e3376a1be.asciidoc | 20 + .../be6b0bfcdce1ef100af89f74da5d4748.asciidoc | 14 + .../be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc | 61 ++ .../be9836fe55c5fada404a2adc1663d832.asciidoc | 27 + .../beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc | 13 + .../beb0b9ff4f68672273fcff1b7bae706b.asciidoc | 17 + .../beba2a9795c8a13653e1edf64eec4357.asciidoc | 14 + .../bed14cc152522ca0726ac3746ebc31db.asciidoc | 45 ++ .../bf17440ac178d2ef5f5be643d033920b.asciidoc | 18 + .../bf1de9fa1b825fa875d27fa08821a6d1.asciidoc | 12 + .../bf2e6ea2bae621b9b2fee7003e891f86.asciidoc | 16 + ...bf3f520b47581d861e802730aaf2a519.asciidoc} | 23 +- .../bf448c3889c18266e2e6d3af4f614da2.asciidoc | 20 + .../bf639275d0818be04317ee5ab6075da6.asciidoc | 22 + .../bf8680d940c84e43a9483a25548dea57.asciidoc | 59 ++ .../bf9f13dc6c24cc225a72e32177e9ee02.asciidoc | 73 +++ .../bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc | 20 + .../bfb8a15cd05b43094ffbce8078bad3e1.asciidoc | 11 + .../bfcd65ab85d684d36a8550080032958d.asciidoc | 13 - .../bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc | 22 + .../bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc | 43 +- .../c00c9412609832ebceb9e786dd9542df.asciidoc | 15 + .../c012f42b26eb8dd9b197644c3ed954cf.asciidoc | 21 + .../c02c2916b97b6fa7db82dbc7f0378310.asciidoc | 27 + .../c03ce952de42eae4b522cedc9fd3d14a.asciidoc | 14 + .../c065a200c00e2005d88ec2f0c10c908a.asciidoc | 12 + .../c067182d385f59ce5952fb9a716fbf05.asciidoc | 27 + .../c088ce5291ae28650b6091cdec489398.asciidoc | 19 + .../c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc | 11 + .../c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc | 17 + .../c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc | 16 + .../c0ebaa33e750b87555dc352073f692e8.asciidoc | 30 + ...c0ff8b3db994c4736f7579dde18097d2.asciidoc} | 13 +- .../c10a486a28cbc5b2f15c3474ae31a431.asciidoc | 22 + .../c11c4d6b30e882871bf0074f407149bd.asciidoc | 16 + .../c12d6e962f083c728f9397932f05202e.asciidoc | 14 + .../c1409f591a01589638d9b00436ce42c0.asciidoc | 11 + .../c147de68fd6da032ad4a3c1bf626f5d6.asciidoc | 21 + .../c155d2670ff82b135c7dcec0fc8a3f23.asciidoc | 10 + .../c18100d62ed31bc9e05f62900156e6a8.asciidoc | 14 + .../c186ecf6f799ddff7add1abdecea5821.asciidoc | 47 ++ .../c187b52646cedeebe0716327add65642.asciidoc | 11 + .../c1a39c2628ada04c3ddd61a303b65d44.asciidoc | 30 + .../c1a895497066a3dac674d4b1a119048d.asciidoc | 16 + .../c1ac9e53b04f7acee4b4933969d6b574.asciidoc | 29 + .../c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc | 27 + .../c1bb395546102279296534522061829f.asciidoc | 38 ++ .../c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc | 20 + .../c208a06212dc0cf6ac413d4f2c154296.asciidoc | 10 + .../c208de54369379e8d78ab201be18b6be.asciidoc | 34 + .../c21aaedb5752a83489476fa3b5e2e9ff.asciidoc | 29 + .../c21eb4bc30087188241cbba6b6b89999.asciidoc | 14 + .../c23e32775340d7bc6f46820313014d8a.asciidoc | 17 + .../c267e90b7873a7c8c8af06f01e958e69.asciidoc | 11 + .../c27b7d9836aa4ea756f59e9c42911721.asciidoc | 10 + .../c28f0b0dd3246cb91d6facb3295a61d7.asciidoc | 10 + .../c2c21e2824fbf6b7198ede30419da82b.asciidoc | 7 +- .../c2d7c36daac8608d2515c549b2c82436.asciidoc | 30 + .../c318fde926842722825a51e5c9c326a9.asciidoc | 11 + .../c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc | 21 - .../c38c882c642dd412e8fa4c3eed49d12f.asciidoc | 15 + .../c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc | 23 + .../c42bc6e74afc3d43cd032ec2bfd77385.asciidoc | 12 + .../c4607ca79b2bcde39305d6f4f21cad37.asciidoc | 12 + .../c464ed2001d66a1446f37659dc9efc2a.asciidoc | 27 + .../c47f030216a3c89f92f31787fc4d5df5.asciidoc | 12 + .../c48b8bcd6f41e0d12b58e854e09ea893.asciidoc | 14 + .../c4a1d03dcfb82913d0724a42b0a89f20.asciidoc | 8 + .../c4b278ba293abd0d02a0b5ad1a99f84a.asciidoc | 28 - .../c4b727723b57052b6504bb74fe09abc6.asciidoc | 17 + .../c4c1a87414741a678f6cb91804daf095.asciidoc | 16 + .../c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc | 14 + .../c526fca1609b4c3c1d12dfd218d69a50.asciidoc | 15 + .../c54597143ac86540726f6422fd98b22e.asciidoc | 22 + .../c554a1791f29bbbcddda84c64deaba6f.asciidoc | 12 + .../c580092fd3d36c32b09d63921708a67b.asciidoc | 26 + .../c5802e9f3f4068fcecb6937b867b270d.asciidoc | 19 + .../c580990a70028bb49cca8a6bde86bbf6.asciidoc | 8 + .../c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc | 12 + .../c5bc577ff92f889225b0d2617adcb48c.asciidoc | 11 + .../c5cc19e48549fbc5327a9d46874bbeee.asciidoc | 17 + .../c5ed7d83ade97a417aef28b9e2871e5d.asciidoc | 11 + .../c612d93e7f682a0d731e385edf9f5d56.asciidoc | 20 - .../c6151a0788a10a7f40da684d72c3255c.asciidoc | 55 ++ .../c630a1f891aa9aa651f9982b832a42e1.asciidoc | 18 + .../c6339d09f85000a6432304b0ec63b8f6.asciidoc | 16 + .../c639036b87d02fb864e27c4ca29ef833.asciidoc | 39 ++ .../c64b61bedb21b9def8fce5092e677af9.asciidoc | 23 + .../c654b09be981be12fc7be0ba33f8652b.asciidoc | 35 + .../c65b00a285f510dcd2865aa3539b4e03.asciidoc | 10 + .../c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc | 12 + .../c67b0f00c2e690303c0e5af2f51e0fea.asciidoc | 23 + .../c6abe91b5527870face2b826f37ba1da.asciidoc | 26 + .../c6b365c7da97d7e50f36820a7d36f548.asciidoc | 13 + .../c6b5c695a9b757b5e7325345b206bde5.asciidoc | 10 + .../c6b8713bd49661d69d6b868f5b991d17.asciidoc | 17 + .../c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc | 17 + .../c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc | 11 + .../c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc | 13 + .../c6f07c53eda4db77305bb14751b3263f.asciidoc | 50 ++ .../c733f20641b20e124f26198534755d6d.asciidoc | 22 + .../c765ce78f3605c0e70d213f22aac8a53.asciidoc | 16 + .../c76cb6a080959b0d87afd780cf814be2.asciidoc | 30 + .../c79b284fa7a5d7421c6daae62bc697f9.asciidoc | 10 + .../c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc | 21 + .../c8210f23c10d0642f24c1e43faa4deda.asciidoc | 41 ++ .../c849c6c8f8659dbb93e1c14356f74e37.asciidoc | 20 - ...c87038b96ab06d9a741a130f94de4f02.asciidoc} | 11 +- .../c873f9cd093e26515148f052e28c7805.asciidoc | 11 + .../c8bbf362f06a0d8dab33ec0d99743343.asciidoc | 12 + .../c8e2109b19d50467ab83a40006462e9f.asciidoc | 11 + .../c92b761c18d8e1c3df75c04a21503e16.asciidoc | 25 + .../c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc | 16 + .../c95d5317525c2ff625e6971c277247af.asciidoc | 12 + .../c96669604d0e66a097ddf3093b025ccd.asciidoc | 18 + .../c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc | 18 + .../c97fd95ebdcf56cc973582e37f732ed2.asciidoc | 8 + .../c9a6ab0a56bb0177f158277185f68302.asciidoc | 58 ++ .../c9afa715021f2e6450e72ac73271960c.asciidoc | 20 + .../c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc | 67 ++ .../c9c396b94bb88098477e2b08b55a12ee.asciidoc | 54 ++ .../c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc | 19 + .../c9d9a1d751f20f6197c825cb4378fe9f.asciidoc | 15 + .../ca06db2aa4747910278f96315f7be94b.asciidoc | 27 + .../ca08e511e5907d258081b10a1a9f0072.asciidoc | 26 + .../ca1cc4bcef22fdf9153833bfe6a55294.asciidoc | 27 + .../ca3bcd6278510ebced5f74484033cb36.asciidoc | 8 + .../ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc | 8 + .../ca5dda98e977125d40a7fe1e178e213f.asciidoc | 17 + .../ca98afbd6a90f63e02f62239d225313b.asciidoc | 11 + .../caaafef1a76c2bec677704c2dc233218.asciidoc | 10 + .../caab99520d3fe41f6154d74a7f696057.asciidoc | 10 + .../cac74a85c6b352a6e23d8673abae126f.asciidoc | 32 + .../cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc | 14 + ...cb0c3223fd45148497df73adfba2e9ce.asciidoc} | 25 +- .../cb16f1ff85399ddaa418834be580c9de.asciidoc | 17 + .../cb1d2a787bbe88974cfc5f132556a51c.asciidoc | 11 + .../cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc | 11 + .../cb3c483816b6ea150ff6c559fa144d32.asciidoc | 28 + .../cb4388b72d41c431ec9ca8255b2f65fb.asciidoc | 31 + .../cb71332115c92cfb89375abd30b8bbbb.asciidoc | 10 + .../cb71c6ecfb8b19725c374572444e5d32.asciidoc | 22 + .../cba3462a307e2483c14e3e198f6960e3.asciidoc | 35 + .../cbc2b5595890f87165aab1a741b1d22c.asciidoc | 26 + .../cbfd6f23f8283e64ec3157c65bb722c4.asciidoc | 11 + .../cc0cca5556ec6224c7134c233734beed.asciidoc | 8 + .../cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc | 26 + .../cc56be758d5d75febbd975786187c861.asciidoc | 11 + .../cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc | 44 ++ .../cc7f1c74ede6810e2c9db19256d6b653.asciidoc | 16 + .../cc90639f2e65bd89cb73296cac6135cf.asciidoc | 10 + .../cc9dac8db7a1482e2fbe3235197c3de1.asciidoc | 18 + .../ccec66fb20d5ede6c691e0890cfe402a.asciidoc | 11 + .../ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc | 20 + .../cd16538654e0f834ff19fe6cf329c398.asciidoc | 22 + .../cd247f267968aa0927bfdad56852f8f5.asciidoc | 18 - .../cd373a6eb1ef4748616500b26fab3006.asciidoc | 26 + .../cd38c601ab293a6ec0e2df71d0c96b58.asciidoc | 32 + .../cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc | 29 - .../cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc | 11 + .../cd6eee201a233b989ac1f2794fa6d640.asciidoc | 19 + .../cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc | 36 ++ .../cd7da0c3769682f546cc1888e569382e.asciidoc | 25 + .../cd8006165ac64f1ef99af48e5a35a25b.asciidoc | 11 + .../cd93919e13f656ad2e6629f45c579b93.asciidoc | 10 + .../cda045dfd79acd160ed8668f2ee17ea7.asciidoc | 16 + .../cdb68b3f565df7c85e52a55864b37d40.asciidoc | 17 + .../cdc04e6d3d37f036c7045ee4a582ef06.asciidoc | 29 + .../cdc38c98320a0df705ec8d173c725375.asciidoc | 19 + .../cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc | 11 + .../cdd29b01e730b3996de68a2788050021.asciidoc | 10 + .../cdd7127681254f4d614cc075f9e6fbcf.asciidoc | 16 + .../cde19d110a58317610033ea3dcb0eb80.asciidoc | 23 + .../cde4104a29dfe942d55863cdd8718627.asciidoc | 8 + .../cdedd5f33f7e5f7acde561e97bff61de.asciidoc | 19 - .../cdf400299acd1c7b1b7bb42e284e3d08.asciidoc | 18 + .../cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc | 12 + .../ce0a1aba713b0448b0c6a504af7b3a08.asciidoc | 8 + .../ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc | 10 + .../ce13afc0c976c5e1f424b58e0c97fd64.asciidoc | 18 + .../ce247fc08371e1b30cb52195e521c076.asciidoc | 25 + .../ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc | 19 + .../ce3c391c2b1915cfc44a2917bca71d19.asciidoc | 24 + .../ce725697f93b3eebb3a266314568565a.asciidoc | 20 + .../ce8471d31e5d60309e142feb040fd2f8.asciidoc | 8 + .../ce899fcf55da72fc32e623d1ad88b301.asciidoc | 19 + .../ce8eebfb810335803630abe83278bee7.asciidoc | 10 + .../cedb56a71cc743d80263ce352bb21720.asciidoc | 18 + .../cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc | 25 + .../cee591c1fc70d4f180c623a3a6d07755.asciidoc | 10 + .../cf23f18761df33f08bc6f6d1875496fd.asciidoc | 15 + ...cf47cd4a39cd62a3ecad919e54a67bca.asciidoc} | 13 +- .../cf5dab4334783ca9b8942eab68fb7174.asciidoc | 45 ++ .../cf75a880c749a2f2010a8ec3f348e5c3.asciidoc | 13 + .../cf8ca470156698dbf47fdc822d0a714f.asciidoc | 11 + .../cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc | 17 + .../cfad3631be0634ee49c424f9ccec62d9.asciidoc | 10 + .../cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc | 28 - .../cfd4b34f35e531a20739a3b308d57134.asciidoc | 21 + .../cffce059425d3d21e7f9571500d63524.asciidoc | 10 + .../d003ee256d24aa6000bd9dbf1d608dc5.asciidoc | 23 + .../d003f9110e5a474230abe11f36da9297.asciidoc | 27 + .../d01a590fa9ea8a0cb34ed8dda502296c.asciidoc | 11 + .../d01d309b0257d6fbca6d0941adeb3256.asciidoc | 45 ++ .../d03139a851888db53f8b7affd85eb495.asciidoc | 11 + .../d0378fe5e3aad05a2fd2e6e81213374f.asciidoc | 39 ++ .../d03b0e2f0f3f5ac8d53287c445007a89.asciidoc | 21 + .../d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc | 73 ++- .../d050c6fa7d806457a5f32d30b07e9521.asciidoc | 25 + .../d0546f047359b85a7e98207dc8de896a.asciidoc | 42 ++ .../d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc | 59 ++ .../d06a649bc38aa9a6433b64efa78d8cb5.asciidoc | 68 ++ .../d095b422d9803c02b62c01adffc85376.asciidoc | 10 + .../d0a8a938a2fa913b6fdbc871079a59dd.asciidoc | 20 - .../d0dee031197214b59ff9ac7540527d2c.asciidoc | 32 + .../d0fad375f6e074e9067ed93d3faa07bd.asciidoc | 103 +++ .../d0fde00ef381e61b8a9e99f18cb5970a.asciidoc | 15 + .../d11ea753a5d86f7e630fd69a069948b1.asciidoc | 12 + .../d1299b9ae1e621d2fdd0b8644c142ace.asciidoc | 36 ++ .../d12df43ffcdcd937bae9b26fb475e239.asciidoc | 11 + .../d133b5d82238f7d4778c341cbe0bc969.asciidoc | 23 + .../d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc | 37 ++ .../d14fe5838fc02224f4b5ade2626d6026.asciidoc | 11 + .../d17269bb80fb63ec0bf37d219e003dcb.asciidoc | 32 - .../d1a285aa244ec461d68f13e7078a33c0.asciidoc | 39 ++ .../d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc | 29 - .../d1b53bc9794e8609bd6f2245624bf977.asciidoc | 29 + .../d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc | 23 - .../d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc | 12 + .../d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc | 52 ++ .../d1e0fee64389e7c8d4c092030626b61f.asciidoc | 10 + .../d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc | 21 + .../d1fde25de1980b7e84fa878289fd0bcb.asciidoc | 13 + .../d23452f333b77bf5b463310e2a665560.asciidoc | 22 + .../d260225cf97e068ead2a8a6bb5aefd90.asciidoc | 39 ++ .../d268aec16bb1eb909b634e856175094c.asciidoc | 27 + .../d2f52c106685bd8eab47e11d644d7a70.asciidoc | 52 ++ .../d2f6040c058a9555dfa62bb42d896a8f.asciidoc | 18 + .../d2f6fb271e97fde8685d7744e6718cc7.asciidoc | 14 + .../d305110a8cabfbebd1e38d85559d1023.asciidoc | 62 ++ .../d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc | 18 - .../d31062ff8c015387889fed4ad86fd914.asciidoc | 21 - .../d3263afc69b6f969b9bbd8738cd07b97.asciidoc | 10 + .../d34946f59b6f938b141a37cb0b729308.asciidoc | 15 + .../d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc | 24 + .../d35c8cf7a98b3f112e1de8797ec6689d.asciidoc | 8 + .../d37b065a94b3ff65a2a8a204fc3b097c.asciidoc | 10 + .../d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc | 32 + .../d3a558ef226e9dccc1c7c61e1167547f.asciidoc | 34 + .../d3a5b70d493e0bd77b3f2b586341c83c.asciidoc | 17 + .../d3d117fec34301520ccdb26332e7c98a.asciidoc | 26 + .../d3dccdb15822e971ededb9f6f7d8ada1.asciidoc | 15 + .../d3e5edac5b461020017fd9d8ec7a91fa.asciidoc | 22 + .../d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc | 29 + .../d4323be84152fa91abd76e966d4751dc.asciidoc | 16 + .../d443db2755fde3b49ca3a9d296c4a96f.asciidoc | 20 + .../d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc | 21 + .../d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc | 12 + .../d48b274a4b6098ffef0c016c6c945fb9.asciidoc | 11 + .../d49318764244113ad2ac4cc0f06d77ec.asciidoc | 24 + .../d4a41fb74b41b41a0ee114a2311f2815.asciidoc | 21 + .../d4b405ef0302227e050ac8f0e39068e1.asciidoc | 16 + .../d4b50ae96e541c0031264a10f6afccbf.asciidoc | 10 + .../d4cdcf01014c75693b080c778071c1b5.asciidoc | 19 + .../d4d450f536d747d5ef5050d2d8c66f09.asciidoc | 37 ++ .../d4ef6ac034c4d42cb75d830ec69146e6.asciidoc | 10 + .../d4fb482a51d67a1af48e429af6019a46.asciidoc | 27 + .../d50a3c64890f88af32c6d4ef4899d82a.asciidoc | 26 - .../d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc | 12 + .../d5132d34ae922fa8e898889b627a1405.asciidoc | 34 + .../d524db57be9f16abac5396895b9a2a59.asciidoc | 10 + .../d547d55efbf75374f6de1f224323bc73.asciidoc | 46 ++ .../d5533f08f5cc0479f07a46c761f0786b.asciidoc | 22 + .../d56a9d89282df56adbbc34b91390ac17.asciidoc | 10 + .../d595b40bf1ea71923f9824d0f9c99c49.asciidoc | 57 ++ .../d59e9cc75814575aa5e275dbe262918c.asciidoc | 17 + .../d5abaf1fd26f0abf410dd8827d077bbf.asciidoc | 14 + .../d5bf9bc08f622ece98632a14a3982e27.asciidoc | 17 + .../d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc | 16 + .../d5dcddc6398b473b6ad9bce5c6adf986.asciidoc | 13 +- .../d5ead6aacbfbedc8396f87bb34acc880.asciidoc | 10 + .../d603e76ab70131f7ec6b08758f95a0e3.asciidoc | 11 + .../d64679f8a53928fe9958dbe5ee5d9d13.asciidoc | 36 ++ .../d64d509440afbed7cefd04b6898962eb.asciidoc | 26 + .../d66e2b4d1931bf88c72e74670156e43f.asciidoc | 16 + .../d681508a745b2bc777d47ba606d24224.asciidoc | 10 + .../d681b643da0d7f0a384f627b6d56111b.asciidoc | 17 + .../d690a6af462c70a783625a323e11c72c.asciidoc | 15 + .../d69bd36335774c8ae1286cee21310241.asciidoc | 17 + .../d69cf7c82602431d9e339583e7dfb988.asciidoc | 48 ++ .../d6a21afa4a94b9baa734eac430940bcf.asciidoc | 15 + .../d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc | 25 + .../d7141bd4d0db964f5cc4a872ad79dce9.asciidoc | 8 + .../d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc | 18 - .../d7348119df9f89a556a7b767d5298c7e.asciidoc | 30 + .../d7717318d93d0a1f3ad049f9c6604417.asciidoc | 32 + .../d775836a0d7abecc6637aa988f204c30.asciidoc | 26 + .../d7898526d239d2aea83727fb982f8f77.asciidoc | 8 + .../d7919fb6f4d02dde1390775eb8365b79.asciidoc | 16 + .../d7a55a7c491e97079e429483085f1d58.asciidoc | 18 + .../d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc | 37 ++ .../d7ae456f119246e95f2f4c37e7544b8c.asciidoc | 11 + .../d7b61bfb6adb22986a43388b823894cc.asciidoc | 19 + .../d7d92816cac64b7c70d72b0000eeeeea.asciidoc | 19 + .../d7f42d1b906dc406be1819d17c625d5f.asciidoc | 28 + .../d7fe687201ac87b307cd06ed015dd317.asciidoc | 16 + .../d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc | 24 + .../d80ac403d8d936ca9dec185c7da13f2f.asciidoc | 14 + .../d8310e5606c61e7a6e64a90838b1a830.asciidoc | 46 ++ .../d8496fa0e5a394fd758617ed6a6c956f.asciidoc | 23 + .../d84a861ce563508aeaaf30a9dd84b5cf.asciidoc | 23 + .../d851282dba548251d10db5954a339307.asciidoc | 24 + .../d870d5bd1f97fc75872a298fcddec513.asciidoc | 155 +++++ ...d87175daed2327565d4325528c6d8b38.asciidoc} | 9 +- .../d87cfcc0a297f75ffe646b2e61940d14.asciidoc | 20 + .../d880630b6f7dc634c4078293f9cd3d80.asciidoc | 40 ++ .../d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc | 27 + .../d89d36741d906a71eca6c144e8d83889.asciidoc | 10 + .../d8a82511cb94f49b4fe4828fee3ba074.asciidoc | 11 + .../d8b2a88b5eca99d3691ad3cd40266736.asciidoc | 26 - .../d8c401a5b7359ec65947b9f35ecf6927.asciidoc | 34 + .../d8ea6a1a1c546bf29f65f8c65439b156.asciidoc | 22 + .../d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc | 45 ++ .../d93d52b6057a7aff3d0766ca44c505e0.asciidoc | 27 + .../d9474f66970c6955e24b17c7447e7b5f.asciidoc | 24 - .../d94f666616dea141dcb7aaf08a35bc10.asciidoc | 18 + .../d952ac7c73219d8cabc080679e035514.asciidoc | 21 + .../d979f934af0992fb8c8596beff80b638.asciidoc | 15 + .../d983c1ea730eeabac9e914656d7c9be2.asciidoc | 39 ++ .../d98fb2ff2cdd154dff4a576430755d98.asciidoc | 37 ++ .../d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc | 21 + .../d9de409a4a197ce7cbe3714e07155d34.asciidoc | 32 + .../d9e0cba8e150681d861f5fd1545514e2.asciidoc | 13 + .../da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc | 26 + .../da18bae37cda566c0254b30c15221b01.asciidoc | 12 + .../da24c13eee8c9aeae9a23faf80489e31.asciidoc | 36 ++ .../da3cecc36a7313385d32c7f52ccfb7e3.asciidoc | 25 + .../da3f280bc65b581fb3097be768061bee.asciidoc | 10 + .../da8db0769dff7305f178c12b1111bc99.asciidoc | 15 + .../da90e457e2a34fe47dd82a0a2f336095.asciidoc | 17 + .../daae2e6acebc84e537764f4ba07f2e6e.asciidoc | 12 + .../dabb159e0b3456024889fb9754a10655.asciidoc | 17 + .../dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc | 15 + .../dac8ec8547bc446637fd97d9fa872f4f.asciidoc | 82 +++ .../dad2d4add751fde5c39475ca709cc14b.asciidoc | 13 + .../dadb69a225778ecd6528924c0aa029bb.asciidoc | 30 + .../dae57cf7df18adb4dc64426eb159733a.asciidoc | 22 + .../daf5631eba5285f1b929d5d8d8dc0d50.asciidoc | 32 + .../db19cc7a26ca80106d86d688f4be67a8.asciidoc | 10 + .../db6cba451ba562abe953d09ad80cc15c.asciidoc | 17 - .../db773f690edf659ac9b044dc854c77eb.asciidoc | 78 +++ .../db8710a9793ae0817a45892d33468160.asciidoc | 11 + .../db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc | 93 +++ .../db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc | 8 + .../dbc50b8c934171e94604575a8b36f349.asciidoc | 11 + .../dbcd8892dd01c43d5a60c94173574faf.asciidoc | 41 ++ .../dbd1b930782d34d7396fdb2db1216c0d.asciidoc | 14 + .../dbdd58cdeac9ef20b42ff73e4864e697.asciidoc | 11 + .../dbf93d02ab86a09929a21232b19709cc.asciidoc | 10 + .../dbf9abc37899352751dab0ede62af2fd.asciidoc | 11 + .../dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc | 38 -- .../dc33160f4087443f867080a8f5b2cfbd.asciidoc | 13 + .../dc3b7603e7d688106acb804059af7834.asciidoc | 15 + .../dc468865da947b4a9136a5b92878d918.asciidoc | 18 + .../dc4dcfeae8a5f248639335c2c9809549.asciidoc | 11 + .../dc8c94c9bef1f879282caea5c406f36e.asciidoc | 13 + .../dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc | 19 + .../dcee24dba43050e4b01b6e3a3211ce09.asciidoc | 21 + .../dcfa7f479a33f459a2d222a92e651451.asciidoc | 34 + .../dd0b196a099e1cca08c5ce4dd74e935a.asciidoc | 15 + .../dd1a25d821d0c8deaeaa9c8083152a54.asciidoc | 10 + .../dd3b263e9fa4226e59bedfc957d399d2.asciidoc | 11 + .../dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc | 8 + .../dd71b0c9f9197684ff29c61062c55660.asciidoc | 8 + .../dd792bb53703a57f9207e36d16e26255.asciidoc | 67 ++ .../dda949d20d07a9edbe64cefc623df945.asciidoc | 15 + .../ddcfa47381d47078dbec651e31b69949.asciidoc | 12 + .../dddb6a6ebd145f8411c5b4910d332f87.asciidoc | 10 + .../dde283eab92608e7bfbfa09c6482a12e.asciidoc | 10 + .../ddf375e4b6175d830fa4097ea0b41536.asciidoc | 11 + .../ddf56782ecc7eaeb3115e150c4830013.asciidoc | 29 + .../de139866a220124360e5e27d1a736ea4.asciidoc | 47 +- .../de176bc4788ea286fff9e92418a43ea8.asciidoc | 35 - .../de2f59887737de3a27716177b60393a2.asciidoc | 12 + .../de876505acc75d371d1f6f484c449197.asciidoc | 13 + .../de90249caeac6f1601a7e7e9f98f1bec.asciidoc | 15 + .../dea22bb4997e368950f0fc80f2a5f304.asciidoc | 11 + .../dea4ac54c63a10c62eccd7b7f6543b86.asciidoc | 19 + .../dead0682932ea6ec33c1197017bcb209.asciidoc | 25 + .../dec2af498a7e5892e8fcd09ae779c8f0.asciidoc | 26 + .../dee3023098d9e63aa9e113beea5686da.asciidoc | 27 + .../df04e2e9af66d5e30b1bfdbd458cab13.asciidoc | 11 + .../df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc | 38 ++ .../df103a3df9b353357e72f9180ef421a1.asciidoc | 18 + .../df1336e768fb6fc1826a5afa30a57285.asciidoc | 17 + .../df17f920b0deab3529b98df88b781f55.asciidoc | 40 -- .../df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc | 83 +++ .../df7dbac966b67404b8bfa9cdda5ef480.asciidoc | 10 + .../df7ed126d8c92ddd3655c59ce4f305c9.asciidoc | 12 + .../df82a9cb21a7557f3ddba2509f76f608.asciidoc | 12 + .../dfa16b7300d225e013f23625f44c087b.asciidoc | 65 ++ .../dfa75000edf4b960ed9002595a051871.asciidoc | 8 + .../dfb20907cfc5ac520ea3b1dba5f00811.asciidoc | 15 + .../dfb641d2d3155669ad6fb5a424dabf4f.asciidoc | 8 + .../dfbf53781adc6640493d49931a352167.asciidoc | 45 ++ .../dfcc83efefaddccfe5dce0695c2266ef.asciidoc | 17 + .../dfcdadcf91529d3a399e05684195028e.asciidoc | 18 + .../dfcdcd3ea6753dcc391a4a52cf640527.asciidoc | 30 + .../dfdf82b8d99436582f150117695190b3.asciidoc | 20 + .../dfef545b1e2c247bafd1347e8e807ac1.asciidoc | 23 - .../dff61a76d5ef9ca8cbe59a416269a84b.asciidoc | 10 + .../dffbbdc4025e5777c647d8818847b960.asciidoc | 11 + .../e0734215054e1ff5df712ce3a826cdba.asciidoc | 15 + .../e08fb1435dc659c24badf25b676efb68.asciidoc | 18 + .../e095fc96504efecc588f97673912e3d3.asciidoc | 54 ++ .../e09d30195108bd6a1f6857394a6123ea.asciidoc | 12 + .../e09ee13ce253c7892dd5ef076fbfbba5.asciidoc | 20 + .../e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc | 33 + .../e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc | 10 + .../e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc | 11 + .../e0d4a800de2d8f4062e69433586c38db.asciidoc | 13 + .../e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc | 19 - .../e0fcef99656799de6b88117d56f131e2.asciidoc | 16 + .../e1220f2c28db6ef0233e26e6bd3866fa.asciidoc | 44 ++ .../e12f2d2ddca387630e7855a6db952da2.asciidoc | 41 ++ .../e1337c6b76defd5a46d05220f9d9c9fc.asciidoc | 12 + .../e14a5a5a1c880031486bfff43031fa3a.asciidoc | 10 + .../e16a353e619b935c5c70769b1b9fa100.asciidoc | 26 + .../e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc | 24 + .../e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc | 23 + .../e19f5e3724d9f3f36a817b9a811ca42e.asciidoc | 29 + .../e1c08f5774e81da31cd75aa1bdc2c548.asciidoc | 34 + ...e1d6ecab4148b09f4c605474157e7dbd.asciidoc} | 8 +- ...e1f20ee96ce80edcc35b647cef731e15.asciidoc} | 17 +- .../e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc | 17 + .../e20037f66bf54bcac7d10f536f031f34.asciidoc | 13 + .../e26c96978096ccc592849cca9db67ffc.asciidoc | 13 + .../e26e8bfa68aa4ab265b22304c38c3aef.asciidoc | 90 +++ .../e270f3f721a5712cd11a5ca03554f5b0.asciidoc | 26 +- .../e273060a675c959fd5f3cde27c8aff07.asciidoc | 18 + .../e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc | 24 + .../e2883c88b5ceca9fce1e70e716d80025.asciidoc | 17 + .../e2a042c629429855c3bcaefffb26b7fa.asciidoc | 21 - .../e2a22c6fd58cc0becf4c383134a08f8b.asciidoc | 25 + .../e2a753029b450942a3228e3003a55a7d.asciidoc | 20 + .../e2a7d127b82ddebb690a959dcd0cbc09.asciidoc | 20 + .../e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc | 28 + .../e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc | 8 + .../e2d8cf24a12053eb09fec7087cdab43a.asciidoc | 33 + .../e2ec9e867f7141b304b53ebc59098f2a.asciidoc | 10 + .../e3019fd5f23458ae49ad9854c97d321c.asciidoc | 8 + .../e30ea6e3823a139d7693d8cce1920a06.asciidoc | 22 +- .../e316271f668c9889bf548311fb421f1e.asciidoc | 18 + .../e317a8380dfbc76c4e7f23d0997b3518.asciidoc | 12 + .../e324ea1547635180c31c1adf77870ba2.asciidoc | 41 ++ .../e35abc9403e4aef7d538ab29ccc363b3.asciidoc | 14 + .../e3678142aec988e2ff0ae5d934dc39e9.asciidoc | 102 +++ .../e3a6462ca79c101314da0680c97678cd.asciidoc | 29 + .../e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc | 13 + .../e3f2f6ee3e312b8a90634827ae954d70.asciidoc | 28 + .../e4193867485595c9c92f909a052d2a90.asciidoc | 23 + .../e41a9bac42d0c1cb103674ae9039b7af.asciidoc | 23 + .../e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc | 16 + .../e451900efbd8be50c2b8347a83816aa6.asciidoc | 31 + .../e46c83db1580e14be844079cd008f518.asciidoc | 15 + .../e47a71a2e314dbbee5db8142a23957ce.asciidoc | 19 + .../e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc | 12 + .../e494162e83ce041c56b2e2bc29d33474.asciidoc | 12 + .../e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc | 15 + .../e4b64b8277af259a52c8d3940157b5fa.asciidoc | 51 ++ .../e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc | 10 + .../e4be53736bcc02b03068fd72fdbfe271.asciidoc | 19 +- .../e4d1f01c025fb797a1d87f372760eabf.asciidoc | 11 + .../e4de6035653e8202c43631f02d244661.asciidoc | 17 + .../e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc | 8 + .../e51a86b666f447cda5f634547a8e1a4a.asciidoc | 10 + .../e551ea38a2d8f8deac110b33304200cc.asciidoc | 25 + .../e566e898902e432bc7ea0568400f0c50.asciidoc | 35 + .../e567e6dbf86300142573c73789c8fce4.asciidoc | 13 - .../e586d1d2a997133e039fd352a42a72b3.asciidoc | 18 + .../e58833449d01379df20ad06dc28144d8.asciidoc | 16 + .../e58b7965c3a314c34bc444c6db3b1b79.asciidoc | 12 + .../e5901f48eb8a419b878fc2cb815d8691.asciidoc | 12 + .../e5c710b08a545522d50b4ce35503bc46.asciidoc | 37 ++ .../e5d2172b524332196cac0f031c043659.asciidoc | 19 - .../e5f50b31f165462d883ecbff45f74985.asciidoc | 44 +- .../e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc | 13 + .../e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc | 12 + .../e608cd0c034f6c245ea87f425e09ce2f.asciidoc | 14 + .../e60b7f75ca806f2c74927c3d9409a986.asciidoc | 17 + .../e60c2bf89fdf38187709d04dd1c55330.asciidoc | 17 + .../e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc | 18 + .../e619e896ce3dad9dcfc6f8700438be98.asciidoc | 57 ++ .../e61b5abe85000cc954a42e2cd74f3a26.asciidoc | 10 + .../e6369e7cef82d881af593d5526bf79bd.asciidoc | 17 + .../e63775a2ff22b945ab9d5f630b80c506.asciidoc | 11 + .../e63cf08350e9381f519c2835843be7cd.asciidoc | 22 + .../e642be44a62a89cf4afb2db28220c9a9.asciidoc | 13 + .../e650d73c57ab313e686fec01e3b0c90f.asciidoc | 21 + .../e697ef947f3fb7835f7fadb9125b1043.asciidoc | 20 + .../e6b972611c0ec8ab4c240f33f323d85b.asciidoc | 20 + .../e6ccd979c34ba03007e625c6ec3e71a9.asciidoc | 8 + .../e6dcc2911d2416a65eaec9846b956e15.asciidoc | 10 + .../e6e47da87079a8b67f767a2a01878cf2.asciidoc | 21 + .../e6faae2e272ee57727f38e55a3de5bb2.asciidoc | 19 + .../e71d300cd87f09a9527cf45395dd7eb1.asciidoc | 8 + .../e7811867397b305efbbe8925d8a01c1a.asciidoc | 33 + .../e784fc00894635470adfd78a0c46b427.asciidoc | 29 + .../e7d819634d765cde269e2669e2dc677f.asciidoc | 10 + .../e7e95022867c72a6563137f066dd2973.asciidoc | 24 + .../e7eca57a5bf5a53cbbe2463bce11495b.asciidoc | 18 + .../e8211247c280a3fbbbdd32850b743b7b.asciidoc | 21 + .../e821d27a8b810821707ba860e31f8b78.asciidoc | 20 + .../e827a9040e137410d62d10bb3b3cbb71.asciidoc | 10 + .../e82c33def91faddcfeed7b02cd258605.asciidoc | 25 + .../e84e23232c7ecc8d6377ec2c16a60269.asciidoc | 21 + .../e88a057a13e191e4d5faa22edf2ae8ed.asciidoc | 11 + .../e891e1d4805172da45a81f62b6b44aca.asciidoc | 24 + .../e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc | 18 + .../e8a2726eea5545355d1d0835d4599f55.asciidoc | 15 + .../e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc | 31 + .../e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc | 10 + .../e8cbe2269f3dff6b231e73119e81511d.asciidoc | 14 + .../e8e451bc8c45bcf16df43804c4fc8329.asciidoc | 31 - .../e8ea65153d7775f25b08dfdfe6954498.asciidoc | 15 + .../e8f1c9ee003d115ec8f55e57990df6e4.asciidoc | 13 + .../e905543b281e9c41395304da76ed2ea3.asciidoc | 10 + .../e930a572e8ddfdecc13498c04007b9e3.asciidoc | 23 + .../e93ff228ab3e63738e1c83fdfb7424b9.asciidoc | 21 + .../e944653610f311fa06148d5b0afdf697.asciidoc | 37 ++ .../e95ba581b298cd7bb598374afbfed315.asciidoc | 10 + .../e95e61988dc3073a007f7b7445dd233b.asciidoc | 22 + .../e9738fe09a99080506a07945795e8eda.asciidoc | 12 + .../e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc | 29 + .../e9a0b450af6219772631703d602c7092.asciidoc | 41 ++ .../e9c2e15b36372d5281c879d336322b6c.asciidoc | 22 - .../e9f9e184499a793828233e536fac0487.asciidoc | 16 + .../e9fc47015922d51c2b05e502ce9c622e.asciidoc | 18 + .../e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc | 46 ++ .../e9fe608f105d7e3268a15e409e2cb9ab.asciidoc | 71 -- .../ea020ea32d5cd35e577c61a120f92451.asciidoc | 38 ++ .../ea02de2dbe05091fcb0dac72c8ba5f83.asciidoc | 18 - .../ea29029884a5fd9a8d8830d25884bf07.asciidoc | 16 + .../ea313059c18d6edbd28c3f743a5e7c1c.asciidoc | 26 + ...ea5391267ced860c00214c096e08c8d4.asciidoc} | 15 +- .../ea5b4d2d87fd4e040afad18903c44869.asciidoc | 31 + .../ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc | 41 ++ .../ea66a620c23337545e409c120c4ed5d9.asciidoc | 10 + .../ea68e3428cc2ca3455bf312d09451489.asciidoc | 22 + .../ea690283f301c6ce957efad93d7d5c5d.asciidoc | 20 + .../ea92390651e8ecad0c890658985343c5.asciidoc | 22 + .../eab3cad0257c539c5efd2689aa52f242.asciidoc | 11 + .../eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc | 61 ++ .../ead4d875877d618594d0cdbdd9b7998b.asciidoc | 13 + .../eada8af6588584ac88f1e5b15f4a5c2a.asciidoc | 44 ++ .../eae8931d01b3b878dd0c45214121e662.asciidoc | 18 + .../eaf53b05959cc6b7fb09579baf34de68.asciidoc | 40 ++ .../eaf6a846ded090fd6ac48269ad2b328b.asciidoc | 19 + .../eafdabe80b21b90495555fa6d9089412.asciidoc | 12 + .../eb09235533a1c65a0627ba05f7d4ad4d.asciidoc | 28 + .../eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc | 12 + .../eb33a7e5a0fe83fdaa0f79354f659428.asciidoc | 19 + .../eb4e43b47867b54214a8630172dd0e21.asciidoc | 11 + .../eb54506fbc71a7d250e86b22d0600114.asciidoc | 14 + .../eb5486d2fe4283475bf9e0e09280be16.asciidoc | 21 + .../eb5987b58dae90c3a8a1609410be0570.asciidoc | 39 ++ .../eb6d62f1d855a8e8fe9eab2656d47504.asciidoc | 31 + .../eb964d8d7f27c057a4542448ba5b74e4.asciidoc | 14 + .../eb96d7dd5f3116a50f7a86b729f1a934.asciidoc | 19 + .../ebb1c7554e91adb4552599f3e5de1865.asciidoc | 15 + .../ebd76a45e153c4656c5871e23b7b5508.asciidoc | 11 + .../ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc | 10 + .../ec0e50f78390b8622cef4e0b0cd45967.asciidoc | 12 + .../ec195297eb804cba1cb19c9926773059.asciidoc | 17 + .../ec27afee074001b0e4e393611010842b.asciidoc | 24 - .../ec420b28e327f332c9e99d6040c4eb3f.asciidoc | 17 + .../ec44999b6618ac6bbacb23eb08c0fa88.asciidoc | 27 + .../ec473de07fe89bcbac1f8e278617fe46.asciidoc | 26 - .../ec5a2ce156c36aaa267fa31dd9367307.asciidoc | 19 + .../ec69543e39c1f6afb5aff6fb9adc400d.asciidoc | 29 + .../ec736c31f49c54e5424efa2e53b22906.asciidoc | 35 + .../ec8f176ebf436d5719bdeca4a9ea8220.asciidoc | 30 + .../ecc57597f6b791d1151ad79d9f4ce67b.asciidoc | 21 + .../ece01f9382e450f669c0e0925e5b30e5.asciidoc | 28 + .../ecfd0d94dd14ef05dfa861f22544b388.asciidoc | 14 + .../ed01b542bb56b1521ea8d5a3c67aa891.asciidoc | 17 + .../ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc | 35 + .../ed09432c6069e41409f0a5e0d1d3842a.asciidoc | 16 + .../ed12eeadb4e530b53c4975dadaa06054.asciidoc | 10 + .../ed250b74bc77c15bb794f55a12d762c3.asciidoc | 10 + .../ed27843eff311f3011b679e97e6fda50.asciidoc | 15 + .../ed3bdf4d6799b43526851e92b6a60c55.asciidoc | 11 + .../ed5bfa68d01e079aac94de78dc5caddf.asciidoc | 10 + .../ed5c3b45e8de912faba44507d827eb93.asciidoc | 23 + .../ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc | 23 + .../ed688d86eeaa4d7969acb0f574eb917f.asciidoc | 19 + .../ed6b996ea389e0955a01c2e67f4c8339.asciidoc | 11 + .../ed7fa1971ac322aeccd6391ab32d0490.asciidoc | 11 + .../ed85ed833bec7286a0dfbe64077c5715.asciidoc | 39 ++ .../edae616e1244babf6032aecc6aaaf836.asciidoc | 26 + .../edb25dc0162b039d477cb06aed2d6275.asciidoc | 39 ++ .../edb5cad890208014ecd91f3f739ce193.asciidoc | 10 + .../edcfadbfb14d97a2f5e6e21ef7039818.asciidoc | 40 ++ .../ee08328cd157d547de19b4abe867b23e.asciidoc | 10 + .../ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc | 95 +++ .../ee223e604bb695cad2517d28ae63ac34.asciidoc | 35 + .../ee2d97090d617ed8aa2a87ea33556dd7.asciidoc | 12 + .../ee577c4c7cc723e99569ea2d1137adba.asciidoc | 10 + .../ee634d59def6302134d24fa90e18b609.asciidoc | 20 + .../ee90d1fb22b59d30da339d825303b912.asciidoc | 24 + .../eeb35b759bd239bb773c8ebd5fe63d05.asciidoc | 25 + .../eec051555c8050d017d3fe38ea59e3a0.asciidoc | 17 + .../eed37703cfe8fec093ed5a42210a6ffd.asciidoc | 33 + .../eee6110831c08b9c1b3f56b24656e95b.asciidoc | 18 + .../eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc | 11 + .../ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc | 24 - .../ef10e8d07d9fae945e035d5dee1e9754.asciidoc | 18 + .../ef22234b97cc06d7dd620b4ce7c97b31.asciidoc | 16 + .../ef33b3b373f7040b874146599db5d557.asciidoc | 12 + ...ef3666b5d288faefbcbc4a25e8f506da.asciidoc} | 8 +- .../ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc | 26 + .../ef867e563cbffe7866769a096b5d7a92.asciidoc | 31 + .../ef8f30e85e12e9a5a8817d28977598e4.asciidoc | 29 + .../ef9111c1648d7820925f12e07d1346c5.asciidoc | 25 - .../ef9c29759459904fef162acd223462c4.asciidoc | 11 + .../efa146bf81a9351ba42b92a6decbcfee.asciidoc | 17 + .../efa924638043f3a6b23ccb824d757eba.asciidoc | 32 + .../eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc | 19 + .../eff8ecaed1ed084909c64450fc363a20.asciidoc | 12 + .../f04e1284d09ceb4443d67b2ef9c7f476.asciidoc | 11 + .../f0816beb8ac21cb0940858b72f6b1946.asciidoc | 11 + .../f085fb032dae56a3b104ab874eaea2ad.asciidoc | 20 - .../f097c02541056f3c0fc855e7bbeef8a8.asciidoc | 39 ++ .../f09817fd13ff3dce52eb79d0722409c3.asciidoc | 49 ++ .../f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc | 29 + .../f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc | 15 + .../f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc | 26 - .../f10ab582387b2c157917a60205c993f7.asciidoc | 20 + .../f128a9dff5051b47efe2c53c4454a68f.asciidoc | 16 + .../f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc | 19 + .../f1508a2221152842894819e762e63491.asciidoc | 14 + .../f160561efab38e40c2feebf5a2542ab5.asciidoc | 11 + .../f18248c181690b81d090275b072f0070.asciidoc | 13 + .../f187ac2dc35425cb0ef48f328cc7e435.asciidoc | 12 + .../f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc | 15 + .../f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc | 21 + .../f1d2b8169160adfd27f32988113f0f9f.asciidoc | 20 + .../f1dc6f69453867ffafe86e998dd464d9.asciidoc | 17 + .../f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc | 10 + .../f2175feadc2abe545899889e6d4ffcad.asciidoc | 11 + .../f235544a883fd04bed2dc369b0c450f3.asciidoc | 16 + .../f2359acfb6eaa919125463cc1d3a7cd1.asciidoc | 17 + .../f268416813befd13c604642c6fe6eda9.asciidoc | 27 + .../f27c28ddbf4c266b5f42d14da837b8de.asciidoc | 8 + .../f281ff50b2cdb67ac0ece93f1594fa95.asciidoc | 31 + .../f298c4eb50ea97b34c57f8756eb350d3.asciidoc | 10 + .../f29a28fffa7ec604a33a838f48f7ea79.asciidoc | 63 +- .../f29b2674299ddf51a25ed87619025ede.asciidoc | 18 + .../f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc | 11 + .../f2b2d62bc0a44940ad14fca57d6d008a.asciidoc | 106 +++ .../f2c9afd052878b2ec00908739b0d0f74.asciidoc | 40 ++ .../f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc | 21 - .../f2e854b6c99659ccc1824e86c096e433.asciidoc | 10 + .../f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc | 27 + .../f2f1cae094855a45fd8f73478bec8e70.asciidoc | 17 + .../f329242d7c8406297eff9bf609870c37.asciidoc | 22 + .../f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc | 21 +- .../f342465c65ba76383dedbb334b57b616.asciidoc | 42 ++ .../f34c02351662481dd61a5c2a3e206c60.asciidoc | 18 + .../f3574cfee3971d98417b8dc574a91be0.asciidoc | 44 ++ .../f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc | 15 + .../f3697682a886ab129530f3e5c1b30632.asciidoc | 11 + .../f37173a75cd1b0d683c6f67819dd1de3.asciidoc | 11 + .../f38262ef72f73816ec35fa4c9c85760d.asciidoc | 46 ++ .../f388e571224dd6850f8c9f9f08fca3da.asciidoc | 10 + .../f3942d9b34138dfca79dff707af270b7.asciidoc | 13 + .../f39512478cae2db8f4566a1e4af9e8f5.asciidoc | 38 ++ .../f3ab820e1f2f54ea718017aeae865742.asciidoc | 26 + .../f3b185131f40687c25d2f85e1231d8bd.asciidoc | 11 + .../f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc | 21 + .../f3c696cd63a3f042e62cbb94b75c2427.asciidoc | 11 + .../f3e1dfe1c440e3590be26f265e19425d.asciidoc | 29 + .../f3fb3cba44988b6e9fee93316138b2cf.asciidoc | 10 + .../f3fb52680482925c202c2e2f8af6f044.asciidoc | 11 + .../f3fe2012557ebbce1ebad4fc997c092d.asciidoc | 16 + .../f43d551aaaad73d979adf1b86533e6a3.asciidoc | 19 + .../f43ec4041e3b72bbde063452990bfc4b.asciidoc | 10 + .../f44d287c6937785eb09b91353c1deb1e.asciidoc | 10 + .../f453e14bcf30853e57618bf12f83e148.asciidoc | 26 + .../f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc | 49 ++ .../f45990264f8755b96b11c69c12c90ff4.asciidoc | 10 + .../f495f9c99916a05e3b28166d31955fad.asciidoc | 26 + .../f49ac80f0130cae8d0ea6f4472a149dd.asciidoc | 26 + .../f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc | 20 - .../f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc | 26 + .../f4b9baed3c6a82be3672cbc8999c2368.asciidoc | 12 + .../f4c194628761a4cf2a01453a96bbcc3c.asciidoc | 42 ++ .../f4d0ef2e0f76babee83d999fe35127f2.asciidoc | 38 ++ .../f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc | 20 + .../f4f557716049b23f8840d58d71e748f0.asciidoc | 15 + .../f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc | 14 + .../f5013174f77868da4dc40cdd745d4ea4.asciidoc | 17 + .../f5140f08f56c64b5789357539f8b9ba8.asciidoc | 11 + .../f545bb95214769aca993c1632a71ad2c.asciidoc | 59 ++ .../f54f6d06163221f2c7aff6e8db942be3.asciidoc | 22 + .../f5569945024b9d664828693705c27c1a.asciidoc | 12 - .../f57ce7de0946e9416ddb9150e95f4b74.asciidoc | 20 + .../f5815d573cee0447910c9668003887b8.asciidoc | 19 + .../f58969ac405db85f439c5940d014964b.asciidoc | 24 + .../f58fd031597e2c3df78bf0efd07206e3.asciidoc | 10 + .../f5bf2526af19d964f8c4c59d4795cffc.asciidoc | 31 + .../f5cbbb60ca26867a5d2da625a68a6e65.asciidoc | 38 ++ .../f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc | 11 + .../f5e6378cc41ddf5326fe4084396c59b2.asciidoc | 22 + .../f5eed3f2e3558a238487bc85305b7a71.asciidoc | 14 + .../f5ef80dd92c67059ca353a833e6b7b5e.asciidoc | 27 + .../f63f6343e74bd5c844854272e746de14.asciidoc | 10 + .../f642b64e592131f37209a5100fe161cc.asciidoc | 43 ++ .../f6566395f85d3afe917228643d7318d6.asciidoc | 10 + .../f656c1e64268293ecc8ebd8065628faa.asciidoc | 12 + .../f65abb38dd0cfedeb06e0cef206fbdab.asciidoc | 12 + .../f66643c54999426c5afa6d5a87435d4e.asciidoc | 10 + .../f67d8aab9106ad24b1d2c771d3840ed1.asciidoc | 70 ++ .../f6911b0f2f56523ccbd8027f276981b3.asciidoc | 16 + .../f6982ff80b9a64cd5fcac5b20908c906.asciidoc | 11 + .../f6b5032bf27c2445d28845be0d413970.asciidoc | 20 - .../f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc | 13 + .../f6d493650b4344f17297b568016fb445.asciidoc | 10 + .../f6d6889667f56b8f49d2858070571a6b.asciidoc | 27 - .../f6de702c3d097af0b0bd391c4f947233.asciidoc | 12 + .../f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc | 10 + .../f6ead39c5505045543b9225deca7367d.asciidoc | 10 + .../f6edbed2b5b2709bbc13866a4780e27a.asciidoc | 23 + .../f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc | 22 + .../f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc | 25 +- .../f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc | 16 + .../f7139b3c0e066be832b9100ae17157cc.asciidoc | 12 + .../f733b25cd4c448b226bb76862974eef2.asciidoc | 27 + .../f749efe8f11ebd43ef83db91922c736e.asciidoc | 22 + .../f7726cc2c60dea26b88bf0df99fb0813.asciidoc | 17 + .../f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc | 11 + .../f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc | 16 + .../f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc | 39 ++ .../f7ec9062b3a7578fed55f119d7c22b74.asciidoc | 12 + .../f823e4b87ed181b27f73ebc51351f0ee.asciidoc | 10 + .../f83eb6605c7c56e297a494b318400ef0.asciidoc | 26 + .../f8525c2460a577edfef156c13f55b8a7.asciidoc | 34 + .../f8651356ce2e7e93fa306c30f57ed588.asciidoc | 20 + .../f8833488041f3d318435b60917fa877c.asciidoc | 29 + .../f8a0010753b1ff563dc42d703902d2fa.asciidoc | 45 ++ .../f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc | 14 + .../f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc | 10 + .../f92d2f5018a8843ffbb56ade15f84406.asciidoc | 8 + .../f9636d7ef1a45be4f36418c875cf6bef.asciidoc | 26 - .../f96d4614f2fc294339fef325b794355f.asciidoc | 12 + .../f96d8131e8a592fbf6dfd686173940a9.asciidoc | 27 + .../f9732ce07960134ea7156e118c2da8a6.asciidoc | 20 + .../f978088f5117d4addd55c11ee3777312.asciidoc | 12 + .../f97aa2efabbf11a534073041eb2658c9.asciidoc | 10 + ...f98687271e1bec031cc34d05d8f4b60b.asciidoc} | 22 +- .../f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc | 14 + ...f9c8245cc13770dff052b6759a749efa.asciidoc} | 9 +- .../f9cb2547ab04461a12bfd25a35be5f96.asciidoc | 38 ++ .../f9ee5d55a73f4c1fe7d507609047aefd.asciidoc | 17 + .../f9f541ae23a184301913f07e62d1afd3.asciidoc | 14 + .../fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc | 22 - .../fa2fe60f570bd930d2891778c6efbfe6.asciidoc | 17 - .../fa42ae3bf6a300420cd0f77ba006458a.asciidoc | 11 + .../fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc | 25 + .../fa61e3481b1f889b3bd4253866bb1c6b.asciidoc | 86 +++ .../fa82d86a046d67366cfe9ce65535e433.asciidoc | 24 + .../fa88f6f5a7d728ec4f1d05244228cb09.asciidoc | 29 +- .../fa946228e946da256d40264c8b070a1a.asciidoc | 20 + .../fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc | 12 + .../fab4b811ba968aa4df92fb1ac059ea31.asciidoc | 17 + .../fab702851e90e945c1b62dec0bb6a205.asciidoc | 10 + .../fabe14480624a99e8ee42c7338672058.asciidoc | 9 +- .../fad26f4fb5a1bc9c38db33394e877d94.asciidoc | 10 + .../fad524db23eb5718ff310956e590b00d.asciidoc | 17 + .../faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc | 28 + .../fb1180992b2087dfb36576b44c4261e4.asciidoc | 21 + .../fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc | 28 + .../fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc | 36 ++ .../fb3505d976283fb7c7b9705a761e0dc2.asciidoc | 25 + .../fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc | 10 + .../fb955375a202f66133af009c04cb77ad.asciidoc | 23 + .../fbb38243221c8fb311660616e3add9ce.asciidoc | 26 + .../fbc5ab85b908480bf944b55da0a43488.asciidoc | 16 + .../fbdad6620eb645f5f1f02e3673604d01.asciidoc | 23 + .../fc1907515f6a913884a9f86451e90ee8.asciidoc | 23 + .../fc190fbbf71949331266dcb3f46a1198.asciidoc | 10 + .../fc26f51bb22c0b5270a66b4722f18aa7.asciidoc | 22 + .../fc3f5f40fa283559ca615cd0eb0a1755.asciidoc | 20 + .../fc49437ce2e7916facf58128308c2aa3.asciidoc | 18 + .../fc51fbc60b0e20aac83300a43ad90252.asciidoc | 28 + .../fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc | 20 + .../fc75ea748e5f49b8ab292e453ab641a6.asciidoc | 30 + .../fc8097bdfb6f3a4017bf4186ccca8a84.asciidoc | 61 -- .../fc8a426f8a5112e61e2acb913982a8d9.asciidoc | 16 + .../fc9a1b1173690a911725cff3912e9755.asciidoc | 19 + .../fccbddfba9f975de7e321732874dfb78.asciidoc | 11 + .../fce5c03a388c893cb11a6696e068543f.asciidoc | 34 + .../fce7a35a737fc9e54ac1225e310dd561.asciidoc | 30 + .../fd04289c54493e19c1d3ac70d0b489c4.asciidoc | 18 + .../fd0cd8ecd03468726b59a605eea06d75.asciidoc | 40 ++ .../fd26bfdbe95b2d2db374385d12849f77.asciidoc | 20 + .../fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc | 8 + .../fd352b472d44d197022a46fce90b6ecb.asciidoc | 29 + .../fd60b4092c6552164862cec287359676.asciidoc | 11 + .../fd620f09dbce62c6f0f603a366623607.asciidoc | 22 + .../fd6fdc8fa994dd02cf1177077325304f.asciidoc | 14 + .../fd738a9af7b5d21da31a7722f03aade8.asciidoc | 13 + .../fd7eeadab6251d9113c4380a7fbe2572.asciidoc | 18 + .../fd9b668eeb1f117950bd4991c7c03fb1.asciidoc | 11 + .../fdada036a875d7995d5d7aba9c06361e.asciidoc | 19 + .../fdc8e090293e78e9a6b283650b682517.asciidoc | 10 + .../fdd38f0d248385a444c777e7acd97846.asciidoc | 22 - .../fde3463ddf136fdfff1306a60986515e.asciidoc | 12 + .../fdf7cfdf1c92d21ee710675596eac6fd.asciidoc | 38 ++ .../fe208d94ec93eabf3bd06139fa70701e.asciidoc | 44 ++ .../fe3a927d868cbc530e08e05964d5174a.asciidoc | 33 + .../fe54f3e53dbe7dee40ec3108a461d19a.asciidoc | 30 + .../fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc | 29 + .../fe6e35839f7d7381f8ec535c8f21959b.asciidoc | 22 + .../fe7169bab8e626f582c9ea87585d0f35.asciidoc | 20 + .../fe806011466e7cdc1590da186297edb6.asciidoc | 10 + .../fe825c05e13e8163073166572c7ac97d.asciidoc | 21 + .../fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc | 36 ++ .../fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc | 11 + .../febb71d774e0a1fc67454213d7448c53.asciidoc | 12 + .../fece7c0fe1f7d113aa05ff5346a18aff.asciidoc | 43 ++ .../feda4b996ea7004f8b2c5f5007fb717b.asciidoc | 21 + .../feefeb68144002fd1fff57b77b95b85e.asciidoc | 21 - .../fef520cbc9b0656e6aac7b3dd3da9984.asciidoc | 12 + .../ff05842419968a2141bde0371ac2f6f4.asciidoc | 19 + .../ff09e13391cecb2e8b9dd440b37e065f.asciidoc | 12 + .../ff1b96d2fdcf628bd938bff9e939943c.asciidoc | 26 + .../ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc | 29 + .../ff56ded50c65998c70f3c5691ddc6f86.asciidoc | 10 + .../ff63ae39c34925dbfa54282ec9989124.asciidoc | 26 + .../ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc | 15 + .../ff7b81fa96c3b994efa3dee230512291.asciidoc | 27 + .../ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc | 34 + .../ffcf80e1094aa2d774f56f6b0bc54827.asciidoc | 12 + .../ffd63dd186ab81b893faec3b3358fa09.asciidoc | 10 + .../ffe45a7c70071730c2078cabb8cbdf95.asciidoc | 31 + 3326 files changed, 67822 insertions(+), 6730 deletions(-) create mode 100644 docs/doc_examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc create mode 100644 docs/doc_examples/004743b9c9f61588926ccf734696b713.asciidoc rename docs/doc_examples/{4b90feb9d5d3dbfce424dac0341320b7.asciidoc => 004a17b42ab5155bb61da797a006fa9f.asciidoc} (51%) create mode 100644 docs/doc_examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc create mode 100644 docs/doc_examples/007179b5e241da650562a5f0a5007823.asciidoc create mode 100644 docs/doc_examples/008ed823c89e703c447ac89c6b689833.asciidoc rename docs/doc_examples/{506844befdc5691d835771bcbb1c1a60.asciidoc => 0091fc75271b1fbbd4269622a4881e8b.asciidoc} (55%) create mode 100644 docs/doc_examples/00b3b6d76a368ae71277ea24af318693.asciidoc create mode 100644 docs/doc_examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc create mode 100644 docs/doc_examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc create mode 100644 docs/doc_examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc create mode 100644 docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc create mode 100644 docs/doc_examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc create mode 100644 docs/doc_examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc rename docs/doc_examples/{b4392116f2cc57ce8064ccbad30318d5.asciidoc => 016f3147dae9ff2c3e831257ae470361.asciidoc} (56%) create mode 100644 docs/doc_examples/019e329ed5a930aef825266822e7377a.asciidoc create mode 100644 docs/doc_examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc rename docs/doc_examples/{0c4ad860a485fe53d8140ad3ccd11dcf.asciidoc => 01da9e0620e48270617fc248e6415cac.asciidoc} (55%) create mode 100644 docs/doc_examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc create mode 100644 docs/doc_examples/01f50acf7998b24969f451e922d145eb.asciidoc create mode 100644 docs/doc_examples/020c95db88ef356093f03be84893ddf9.asciidoc create mode 100644 docs/doc_examples/020de6b6cb960a76297452725a38889f.asciidoc create mode 100644 docs/doc_examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc create mode 100644 docs/doc_examples/025155da86802ebf4c3aeee5aab692f9.asciidoc create mode 100644 docs/doc_examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc create mode 100644 docs/doc_examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc create mode 100644 docs/doc_examples/0280247e0cf2e561c548f22c9fb31163.asciidoc create mode 100644 docs/doc_examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc delete mode 100644 docs/doc_examples/028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc create mode 100644 docs/doc_examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc create mode 100644 docs/doc_examples/02b00f21e9d23d82276ace0dd154d779.asciidoc rename docs/doc_examples/{43682666e1abcb14770c99f02eb26a0d.asciidoc => 02b6aa3e5652839f03de3a655854b897.asciidoc} (75%) create mode 100644 docs/doc_examples/02c48d461536709c3fc8a0e8147c3787.asciidoc create mode 100644 docs/doc_examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc create mode 100644 docs/doc_examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc rename docs/doc_examples/{1d65cb6d055c46a1bde809687d835b71.asciidoc => 0308cbd85281f95fc458042afe3f587d.asciidoc} (68%) create mode 100644 docs/doc_examples/032eac56b798bea29390e102538f4a26.asciidoc delete mode 100644 docs/doc_examples/033778305d52746f5ce0a2a922c8e521.asciidoc create mode 100644 docs/doc_examples/033838729cfb5d1a28d04f69ee78d924.asciidoc create mode 100644 docs/doc_examples/0350410d11579f4e876c798ce1eaef5b.asciidoc create mode 100644 docs/doc_examples/0350ff5ebb8207c004eb771088339cb4.asciidoc create mode 100644 docs/doc_examples/03582fc93683e573062bcfda45e01d69.asciidoc create mode 100644 docs/doc_examples/035a7a919eb6513b4769a3727b7d6447.asciidoc create mode 100644 docs/doc_examples/03891265df2111a38e0b6b24c1b967e1.asciidoc create mode 100644 docs/doc_examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc create mode 100644 docs/doc_examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc create mode 100644 docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc create mode 100644 docs/doc_examples/044b2f99e7438e408685b258db17f863.asciidoc create mode 100644 docs/doc_examples/046b2249bbc49e77848c114cee940f17.asciidoc create mode 100644 docs/doc_examples/0470d7101637568b9d3d1239f06325a7.asciidoc create mode 100644 docs/doc_examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc delete mode 100644 docs/doc_examples/048d8abd42d094bbdcf4452a58ccb35b.asciidoc create mode 100644 docs/doc_examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc create mode 100644 docs/doc_examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc create mode 100644 docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc delete mode 100644 docs/doc_examples/04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc create mode 100644 docs/doc_examples/0502284d4685c478eb68761f979f4303.asciidoc create mode 100644 docs/doc_examples/050b3947025fee403232b8e6e9112dab.asciidoc create mode 100644 docs/doc_examples/05148cc541f447486d9daf15ab77292b.asciidoc create mode 100644 docs/doc_examples/0518c673094fb18ecb491a3b78af4695.asciidoc create mode 100644 docs/doc_examples/05284c8ea91769c09c8db47db8a6629a.asciidoc create mode 100644 docs/doc_examples/053497b6960f80fd7b005b7c6d54358f.asciidoc create mode 100644 docs/doc_examples/05500e77aef581d92f6c605f7a48f7df.asciidoc create mode 100644 docs/doc_examples/059e04aaf093379401f665c33ac796dc.asciidoc create mode 100644 docs/doc_examples/05a09078fe1016e900e445ad4039cf97.asciidoc create mode 100644 docs/doc_examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc create mode 100644 docs/doc_examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc create mode 100644 docs/doc_examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc create mode 100644 docs/doc_examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc create mode 100644 docs/doc_examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc create mode 100644 docs/doc_examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc rename docs/doc_examples/{a6f8636b03cc5f677b7d89e750328612.asciidoc => 0620a10ff15a2bb3eb489afc24ff0131.asciidoc} (70%) create mode 100644 docs/doc_examples/06454a8e85e2d3479c90390bb955eb39.asciidoc create mode 100644 docs/doc_examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc create mode 100644 docs/doc_examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc create mode 100644 docs/doc_examples/06a761823a694850a6efe5d5bf61478c.asciidoc delete mode 100644 docs/doc_examples/06afce2955f9094d96d27067ebca32e8.asciidoc create mode 100644 docs/doc_examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc create mode 100644 docs/doc_examples/06c0db0f42223761e32fa418066b275f.asciidoc rename docs/doc_examples/{8e6bfb4441ffa15c86d5dc20fa083571.asciidoc => 06d65e3505dcb306977185e8545cf4a8.asciidoc} (63%) create mode 100644 docs/doc_examples/070cf72783cfe534a04f2f64e4016052.asciidoc create mode 100644 docs/doc_examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc create mode 100644 docs/doc_examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc create mode 100644 docs/doc_examples/0737ebaea33631f001fb3f4226948492.asciidoc create mode 100644 docs/doc_examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc create mode 100644 docs/doc_examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc create mode 100644 docs/doc_examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc create mode 100644 docs/doc_examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc create mode 100644 docs/doc_examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc create mode 100644 docs/doc_examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc create mode 100644 docs/doc_examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc create mode 100644 docs/doc_examples/07ec38b97601286ec106986a84e1e5f7.asciidoc create mode 100644 docs/doc_examples/080c34d8151d02b760571e3a2899fa97.asciidoc create mode 100644 docs/doc_examples/083e514297c09e91211f0d168aef1b0b.asciidoc create mode 100644 docs/doc_examples/086ec4c5d86bbf80fb80162e94037689.asciidoc create mode 100644 docs/doc_examples/0881397074d261ccc2db514daf116c31.asciidoc create mode 100644 docs/doc_examples/08a76b3f5a8394d8f9084113334a260a.asciidoc create mode 100644 docs/doc_examples/08c9af9dd519c011deedd406f3061836.asciidoc create mode 100644 docs/doc_examples/08e08feb514b24006e13f258d617d873.asciidoc create mode 100644 docs/doc_examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc create mode 100644 docs/doc_examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc create mode 100644 docs/doc_examples/091200b658023db31dffc2f08a85a9cc.asciidoc rename docs/doc_examples/{4d46dbb96125b27f46299547de9d8709.asciidoc => 0957bbd535f58c97b12ffba90813d64c.asciidoc} (60%) create mode 100644 docs/doc_examples/095d60b2cfc5004c97efc49f27287262.asciidoc create mode 100644 docs/doc_examples/095e3f21941a9cc75f398389a075152d.asciidoc create mode 100644 docs/doc_examples/09769561f082b50558fb7d8707719963.asciidoc delete mode 100644 docs/doc_examples/0989cc65d8924f666ce3eb0820d2d244.asciidoc create mode 100644 docs/doc_examples/099006ab11b52ea99693401dceee8bad.asciidoc create mode 100644 docs/doc_examples/09944369863fd8666d5301d717317276.asciidoc create mode 100644 docs/doc_examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc create mode 100644 docs/doc_examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc create mode 100644 docs/doc_examples/09bdf9a7e22733d668476724042a406c.asciidoc create mode 100644 docs/doc_examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc delete mode 100644 docs/doc_examples/09cdd5ae8114c49886026fef8d00a19c.asciidoc create mode 100644 docs/doc_examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc create mode 100644 docs/doc_examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc delete mode 100644 docs/doc_examples/09ecba5814d71e4c44468575eada9878.asciidoc create mode 100644 docs/doc_examples/0a3003fa5af850e415634b50b1029859.asciidoc create mode 100644 docs/doc_examples/0a3186bf20b5359393406fc0cb433313.asciidoc create mode 100644 docs/doc_examples/0a46ac2968a574ce145f197f10d30152.asciidoc rename docs/doc_examples/{23ab0f1023b1b2cd5cdf2a8f9ccfd57b.asciidoc => 0a46cc8fe93e372909660a63dc52ae3b.asciidoc} (54%) create mode 100644 docs/doc_examples/0a650401134f07e40216f0d0d1a66a32.asciidoc create mode 100644 docs/doc_examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc create mode 100644 docs/doc_examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc rename docs/doc_examples/{5eabcdbf61bfcb484dc694f25c2bba36.asciidoc => 0a758d9dec74d9e942cf41a06499234f.asciidoc} (66%) create mode 100644 docs/doc_examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc create mode 100644 docs/doc_examples/0a9173f3b22716c78653976dc4799eae.asciidoc delete mode 100644 docs/doc_examples/0a958e486ede3f519d48431ab689eded.asciidoc create mode 100644 docs/doc_examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc delete mode 100644 docs/doc_examples/0ac9916f47a2483b89c1416684af322a.asciidoc create mode 100644 docs/doc_examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc create mode 100644 docs/doc_examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc create mode 100644 docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc create mode 100644 docs/doc_examples/0adbce828234ca221e3d03b184296407.asciidoc create mode 100644 docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc delete mode 100644 docs/doc_examples/0afaf1cad692e6201aa574c8feb6e622.asciidoc create mode 100644 docs/doc_examples/0aff04881be21eea45375ec4f4f50e66.asciidoc create mode 100644 docs/doc_examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc create mode 100644 docs/doc_examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc create mode 100644 docs/doc_examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc create mode 100644 docs/doc_examples/0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc create mode 100644 docs/doc_examples/0b913fb9e010d877c0be015519cfddc6.asciidoc create mode 100644 docs/doc_examples/0b987b4101e016653a32d7b092d47e4c.asciidoc create mode 100644 docs/doc_examples/0ba5acede9d43af424e85428e7d35420.asciidoc delete mode 100644 docs/doc_examples/0bbd30b9be3e54ff3028b9f4459634d2.asciidoc create mode 100644 docs/doc_examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc delete mode 100644 docs/doc_examples/0bd3923424a20a4ba860b0774b9991b1.asciidoc create mode 100644 docs/doc_examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc create mode 100644 docs/doc_examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc create mode 100644 docs/doc_examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc create mode 100644 docs/doc_examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc create mode 100644 docs/doc_examples/0c464965126cc09e6812716a145991d4.asciidoc create mode 100644 docs/doc_examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc create mode 100644 docs/doc_examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc create mode 100644 docs/doc_examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc create mode 100644 docs/doc_examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc rename docs/doc_examples/{d90a84a24a407731dfc1929ac8327746.asciidoc => 0c892d328b73d38396aaef6d9cbcd36b.asciidoc} (68%) create mode 100644 docs/doc_examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc create mode 100644 docs/doc_examples/0cee58617e75f493c5049d77be1c49f3.asciidoc create mode 100644 docs/doc_examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc create mode 100644 docs/doc_examples/0d0f7ece06f21e624d21b09804732f61.asciidoc create mode 100644 docs/doc_examples/0d49474511b236bc89e768c8ee91adf1.asciidoc create mode 100644 docs/doc_examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc create mode 100644 docs/doc_examples/0d59af9dc556dc526b9394051efa800a.asciidoc delete mode 100644 docs/doc_examples/0d664883151008b1051ef2c9ab2d0373.asciidoc create mode 100644 docs/doc_examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc create mode 100644 docs/doc_examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc create mode 100644 docs/doc_examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc create mode 100644 docs/doc_examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc create mode 100644 docs/doc_examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc create mode 100644 docs/doc_examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc create mode 100644 docs/doc_examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc create mode 100644 docs/doc_examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc create mode 100644 docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc create mode 100644 docs/doc_examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc create mode 100644 docs/doc_examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc create mode 100644 docs/doc_examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc create mode 100644 docs/doc_examples/0e5d25c7bb738c42d471020d678e2966.asciidoc create mode 100644 docs/doc_examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc create mode 100644 docs/doc_examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc create mode 100644 docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc create mode 100644 docs/doc_examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc create mode 100644 docs/doc_examples/0ea146b178561bc8b9002bed8a35641f.asciidoc create mode 100644 docs/doc_examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc create mode 100644 docs/doc_examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc create mode 100644 docs/doc_examples/0eb2c1284a9829224913a860190580d8.asciidoc create mode 100644 docs/doc_examples/0ec2178fb0103862b47cc20bc5885972.asciidoc create mode 100644 docs/doc_examples/0eccea755bd4f6dd47579a9022690546.asciidoc create mode 100644 docs/doc_examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc rename docs/doc_examples/{014b788c879e4aaa1020672e45e25473.asciidoc => 0f3a78296825d507dda6771f7ceb9d61.asciidoc} (61%) create mode 100644 docs/doc_examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc create mode 100644 docs/doc_examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc create mode 100644 docs/doc_examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc create mode 100644 docs/doc_examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc create mode 100644 docs/doc_examples/0fb472645116d58ddef89ca976d15a01.asciidoc create mode 100644 docs/doc_examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc create mode 100644 docs/doc_examples/0fc4b589df5388da784c6d981e769e31.asciidoc rename docs/doc_examples/{025b54db0edc50c24ea48a2bd94366ad.asciidoc => 0fd08e14ad651827be53897a6bdaf0b8.asciidoc} (63%) create mode 100644 docs/doc_examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc create mode 100644 docs/doc_examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc delete mode 100644 docs/doc_examples/1027ab1ca767ac1428176ef4f84bfbcf.asciidoc create mode 100644 docs/doc_examples/102c7de25d13c87cf28839ada9f63c95.asciidoc create mode 100644 docs/doc_examples/103296e16b4233926ad1f07360385606.asciidoc create mode 100644 docs/doc_examples/10535507a9735fcf06600444b9067d4c.asciidoc create mode 100644 docs/doc_examples/1070e59ba144cdf309fd9b2591612b95.asciidoc create mode 100644 docs/doc_examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc create mode 100644 docs/doc_examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc create mode 100644 docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc create mode 100644 docs/doc_examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc create mode 100644 docs/doc_examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc create mode 100644 docs/doc_examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc create mode 100644 docs/doc_examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc create mode 100644 docs/doc_examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc create mode 100644 docs/doc_examples/10f0c8fed98455c460c374b50ffbb204.asciidoc create mode 100644 docs/doc_examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc create mode 100644 docs/doc_examples/111c31db1fd29baeaa9964eafaea6789.asciidoc create mode 100644 docs/doc_examples/111c69ca94162c1523b799a5c14723dd.asciidoc create mode 100644 docs/doc_examples/113ac8466084ee6ac4ed272e342dc468.asciidoc rename docs/doc_examples/{168bfdde773570cfc6dd3ab3574e413b.asciidoc => 1147a02afa087278e51fa365fb9e06b7.asciidoc} (80%) create mode 100644 docs/doc_examples/114d470e752efa9672ca68d7290fada8.asciidoc create mode 100644 docs/doc_examples/1153bd92ca18356db927054958cd95c6.asciidoc create mode 100644 docs/doc_examples/115529722ba30b0b0d51a7ff87e59198.asciidoc create mode 100644 docs/doc_examples/118f249a3b26c33416f641b33f2b74f8.asciidoc create mode 100644 docs/doc_examples/11c395d1649733bcab853fe31ec393b2.asciidoc create mode 100644 docs/doc_examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc rename docs/doc_examples/{d4b4cefba4318caeba7480187faf2b13.asciidoc => 11d9043d3050a7175069dec7e0adc963.asciidoc} (62%) create mode 100644 docs/doc_examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc create mode 100644 docs/doc_examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc delete mode 100644 docs/doc_examples/1216f8f7367df3aa823012cef310c08a.asciidoc create mode 100644 docs/doc_examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc create mode 100644 docs/doc_examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc create mode 100644 docs/doc_examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc create mode 100644 docs/doc_examples/128283698535116931dca9d16a16dca2.asciidoc create mode 100644 docs/doc_examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc create mode 100644 docs/doc_examples/12cb446446211f95f651e196a1f059b4.asciidoc create mode 100644 docs/doc_examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc create mode 100644 docs/doc_examples/12ec704d62ffedcb03787e6aba69d382.asciidoc create mode 100644 docs/doc_examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc create mode 100644 docs/doc_examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc create mode 100644 docs/doc_examples/1313c540fef7e7c18a066f07789673fc.asciidoc create mode 100644 docs/doc_examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc create mode 100644 docs/doc_examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc create mode 100644 docs/doc_examples/135819da3a4bde684357c57a49ad8e85.asciidoc create mode 100644 docs/doc_examples/13670d1534125831c2059eebd86d840c.asciidoc create mode 100644 docs/doc_examples/136ae86b8d497dda799cf1cb583df929.asciidoc rename docs/doc_examples/{0ba0b2db24852abccb7c0fc1098d566e.asciidoc => 137709a0a0dc38d6094291c9fc75b804.asciidoc} (61%) create mode 100644 docs/doc_examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc create mode 100644 docs/doc_examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc create mode 100644 docs/doc_examples/13917f7cfb6a382c293275ff71134ec4.asciidoc create mode 100644 docs/doc_examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc create mode 100644 docs/doc_examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc create mode 100644 docs/doc_examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc create mode 100644 docs/doc_examples/13df08eefc9ba98e311793bbca74133b.asciidoc create mode 100644 docs/doc_examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc create mode 100644 docs/doc_examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc create mode 100644 docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc create mode 100644 docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc create mode 100644 docs/doc_examples/14254a0e725044faedf9370ead76f6ce.asciidoc create mode 100644 docs/doc_examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc create mode 100644 docs/doc_examples/1445ca2e813ed1c25504107b4b11760e.asciidoc create mode 100644 docs/doc_examples/1452829804551d2d6acedd4e73b29637.asciidoc create mode 100644 docs/doc_examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc delete mode 100644 docs/doc_examples/14701dcc0cca9665fce2aace0cb62af7.asciidoc create mode 100644 docs/doc_examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc create mode 100644 docs/doc_examples/148edc235fcfbc263561f87f5533e688.asciidoc create mode 100644 docs/doc_examples/14936b96cfb8ff999a833f615ba75495.asciidoc create mode 100644 docs/doc_examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc create mode 100644 docs/doc_examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc create mode 100644 docs/doc_examples/14a49c13c399840e64c00b487aa820c9.asciidoc create mode 100644 docs/doc_examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc create mode 100644 docs/doc_examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc create mode 100644 docs/doc_examples/14b81f96297952970b78a3216e059596.asciidoc create mode 100644 docs/doc_examples/14f124294a4a0e3a657d1468c36161cd.asciidoc rename docs/doc_examples/{be49260e1b3496c4feac38c56ebb0669.asciidoc => 14f2dab0583c5a9fcc39931d33194872.asciidoc} (73%) create mode 100644 docs/doc_examples/150b5fee5678bf8cdf0932da73eada80.asciidoc create mode 100644 docs/doc_examples/151d2b11807ec684b0c01aa89189a801.asciidoc create mode 100644 docs/doc_examples/154d703732daf5c5fcd0122e6a50213f.asciidoc create mode 100644 docs/doc_examples/156bc64c94f9f3334fbce25165d2286a.asciidoc create mode 100644 docs/doc_examples/1570976f7807b88dc8a046b833be057b.asciidoc create mode 100644 docs/doc_examples/1572696b97822d3332be51700e09672f.asciidoc create mode 100644 docs/doc_examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc create mode 100644 docs/doc_examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc create mode 100644 docs/doc_examples/15c76cc8a038f686395053a240262929.asciidoc create mode 100644 docs/doc_examples/15d4be58359542775f4aff88e6d8adb5.asciidoc create mode 100644 docs/doc_examples/15d948d593d2624ac5e2b155052048f0.asciidoc delete mode 100644 docs/doc_examples/15dad5338065baaaa7d475abe85f4c22.asciidoc create mode 100644 docs/doc_examples/15e90b82827c8512670820cf856a9c71.asciidoc create mode 100644 docs/doc_examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc create mode 100644 docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc create mode 100644 docs/doc_examples/160de80948e0c7db49b1c311848a66a2.asciidoc create mode 100644 docs/doc_examples/160f39a50847bad0be4be1529a95e4ce.asciidoc create mode 100644 docs/doc_examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc create mode 100644 docs/doc_examples/16351d99d0608789d04a0bb11a537098.asciidoc create mode 100644 docs/doc_examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc create mode 100644 docs/doc_examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc create mode 100644 docs/doc_examples/16535685833419f0033545ffce4fdf00.asciidoc create mode 100644 docs/doc_examples/1659420311d907d9fc024b96f4150216.asciidoc create mode 100644 docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc create mode 100644 docs/doc_examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc create mode 100644 docs/doc_examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc create mode 100644 docs/doc_examples/169b39bb889ecd47541bed3e48725488.asciidoc create mode 100644 docs/doc_examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc create mode 100644 docs/doc_examples/172155ca4bf6dfcbd489453f50739396.asciidoc create mode 100644 docs/doc_examples/17266cee5eaaddf08e5534bf580a1910.asciidoc create mode 100644 docs/doc_examples/172b18e435c400bed85227624de3acfd.asciidoc create mode 100644 docs/doc_examples/172d150e56a225155a62c7b18bf8da67.asciidoc create mode 100644 docs/doc_examples/1736545c8b5674f6d311f3277eb387f1.asciidoc create mode 100644 docs/doc_examples/173b190078621415a80e851eaf794e8a.asciidoc create mode 100644 docs/doc_examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc create mode 100644 docs/doc_examples/17566e23c191f1004a2719f2c4242307.asciidoc create mode 100644 docs/doc_examples/178be73b74ba9f297429e32267084ac7.asciidoc rename docs/doc_examples/{8653e76676de5d327201b77512afa3a0.asciidoc => 178c920d5e8ec0071f77290fa059802c.asciidoc} (65%) create mode 100644 docs/doc_examples/17a1e308761afd3282f13d44d7be008a.asciidoc create mode 100644 docs/doc_examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc create mode 100644 docs/doc_examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc create mode 100644 docs/doc_examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc create mode 100644 docs/doc_examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc rename docs/doc_examples/{e21e1c26dc8687e7bf7bd2bf019a6698.asciidoc => 17fb298fb1e47f7d946a772d68f4e2df.asciidoc} (60%) create mode 100644 docs/doc_examples/182df084f028479ecbe8d7648ddad892.asciidoc create mode 100644 docs/doc_examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc create mode 100644 docs/doc_examples/187733e50c60350f3f75921bea3b72c2.asciidoc create mode 100644 docs/doc_examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc create mode 100644 docs/doc_examples/188e6208cccb13027a5c1c95440841ee.asciidoc create mode 100644 docs/doc_examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc create mode 100644 docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc create mode 100644 docs/doc_examples/190a21e32db2125ddaea0f634e126a84.asciidoc create mode 100644 docs/doc_examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc create mode 100644 docs/doc_examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc create mode 100644 docs/doc_examples/193234bb5dc6451fd15b584fbefd2446.asciidoc create mode 100644 docs/doc_examples/193d86b6cc34e12c2be806d27816a35c.asciidoc create mode 100644 docs/doc_examples/194bbac15e709174ac85b681f3a3d137.asciidoc create mode 100644 docs/doc_examples/196aed02b11def364bab84e455c1a073.asciidoc create mode 100644 docs/doc_examples/199f5165d876267080046c907e93483f.asciidoc create mode 100644 docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc create mode 100644 docs/doc_examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc create mode 100644 docs/doc_examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc create mode 100644 docs/doc_examples/1a1f3421717ff744ed83232729289bb0.asciidoc create mode 100644 docs/doc_examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc create mode 100644 docs/doc_examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc create mode 100644 docs/doc_examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc create mode 100644 docs/doc_examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc create mode 100644 docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc create mode 100644 docs/doc_examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc create mode 100644 docs/doc_examples/1a81fe0186369838531e116e85aa4ccd.asciidoc create mode 100644 docs/doc_examples/1a8d92e93481c432a91f7c213099800a.asciidoc create mode 100644 docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc create mode 100644 docs/doc_examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc create mode 100644 docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc create mode 100644 docs/doc_examples/1adee74383e5594e45c937177d75aa2a.asciidoc create mode 100644 docs/doc_examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc create mode 100644 docs/doc_examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc create mode 100644 docs/doc_examples/1b0b29e5cd7550c648d0892378e93804.asciidoc create mode 100644 docs/doc_examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc create mode 100644 docs/doc_examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc create mode 100644 docs/doc_examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc create mode 100644 docs/doc_examples/1b3762712c14a19e8c2956b4f530d327.asciidoc create mode 100644 docs/doc_examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc create mode 100644 docs/doc_examples/1b47d988b218ee595430ec91eba91d80.asciidoc create mode 100644 docs/doc_examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc delete mode 100644 docs/doc_examples/1b8655e6ba99fe39933c6eafe78728b7.asciidoc delete mode 100644 docs/doc_examples/1b8caf0a6741126c6d0ad83b56fce290.asciidoc create mode 100644 docs/doc_examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc create mode 100644 docs/doc_examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc delete mode 100644 docs/doc_examples/1bc731a4df952228af6dfa6b48627332.asciidoc create mode 100644 docs/doc_examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc create mode 100644 docs/doc_examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc create mode 100644 docs/doc_examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc create mode 100644 docs/doc_examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc create mode 100644 docs/doc_examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc create mode 100644 docs/doc_examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc create mode 100644 docs/doc_examples/1cab9da122778a95061831265c250cc1.asciidoc create mode 100644 docs/doc_examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc create mode 100644 docs/doc_examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc create mode 100644 docs/doc_examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc create mode 100644 docs/doc_examples/1cd3b9d65576a9212eef898eb3105758.asciidoc create mode 100644 docs/doc_examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc create mode 100644 docs/doc_examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc create mode 100644 docs/doc_examples/1cecd4d87a92427175157d41859df2af.asciidoc create mode 100644 docs/doc_examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc create mode 100644 docs/doc_examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc create mode 100644 docs/doc_examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc create mode 100644 docs/doc_examples/1d827ae674970692643ea81991e5396e.asciidoc create mode 100644 docs/doc_examples/1d918e206ad8dab916e59183da24d9ec.asciidoc create mode 100644 docs/doc_examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc delete mode 100644 docs/doc_examples/1da77e114459e0b77d78a3dcc8fae429.asciidoc create mode 100644 docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc create mode 100644 docs/doc_examples/1db086021e83205b6eab3b7765911cc2.asciidoc create mode 100644 docs/doc_examples/1db715eb00832686ecddb6603684fc26.asciidoc create mode 100644 docs/doc_examples/1e08e054c761353f99211cd18e8ca47b.asciidoc create mode 100644 docs/doc_examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc create mode 100644 docs/doc_examples/1e0f203aced9344382081ab095c44dde.asciidoc create mode 100644 docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc create mode 100644 docs/doc_examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc create mode 100644 docs/doc_examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc create mode 100644 docs/doc_examples/1e3553a73da487017f7a95088b6aa957.asciidoc delete mode 100644 docs/doc_examples/1e49eba5b9042c1900a608fe5105ba43.asciidoc create mode 100644 docs/doc_examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc delete mode 100644 docs/doc_examples/1e50d993bd6517e6c381e82d09f0389e.asciidoc create mode 100644 docs/doc_examples/1e547696f54582840040b1aa6661760c.asciidoc create mode 100644 docs/doc_examples/1e871f060dbe1a5c316ed205278804a8.asciidoc create mode 100644 docs/doc_examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc rename docs/doc_examples/{c48264ec5d9b9679fddd72e5c44425b9.asciidoc => 1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc} (75%) create mode 100644 docs/doc_examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc create mode 100644 docs/doc_examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc create mode 100644 docs/doc_examples/1ec66f188f681598cb5d7df700b214e3.asciidoc create mode 100644 docs/doc_examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc create mode 100644 docs/doc_examples/1ed77bf308fa4ab328b36060e412f500.asciidoc create mode 100644 docs/doc_examples/1eea46b08610972b79fdc4649748455d.asciidoc create mode 100644 docs/doc_examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc create mode 100644 docs/doc_examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc create mode 100644 docs/doc_examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc delete mode 100644 docs/doc_examples/1f336ecc62480c1d56351cc2f82d0d08.asciidoc create mode 100644 docs/doc_examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc create mode 100644 docs/doc_examples/1f507659757e2844cefced25848540a0.asciidoc rename docs/doc_examples/{3ae03ba3b56e5e287953094050766738.asciidoc => 1f673e1a0de2970dc648618d5425a994.asciidoc} (51%) create mode 100644 docs/doc_examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc delete mode 100644 docs/doc_examples/1f6fe6833686e38c3711c6f2aa00a078.asciidoc create mode 100644 docs/doc_examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc rename docs/doc_examples/{1577e6e806b3283c9e99f1596d310754.asciidoc => 1f900f7178e80051e75d4fd04467cf49.asciidoc} (58%) create mode 100644 docs/doc_examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc create mode 100644 docs/doc_examples/1fddbd602a6acf896a393cdb500a2831.asciidoc create mode 100644 docs/doc_examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc create mode 100644 docs/doc_examples/1ff12523efbd59c213c676937757c460.asciidoc create mode 100644 docs/doc_examples/1ff296e868635fd102239871a331331b.asciidoc create mode 100644 docs/doc_examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc create mode 100644 docs/doc_examples/20005d8a6555b259b299d862cd218701.asciidoc create mode 100644 docs/doc_examples/2006f577a113bda40905cf7b405bf1cf.asciidoc create mode 100644 docs/doc_examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc create mode 100644 docs/doc_examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc create mode 100644 docs/doc_examples/20162e1dac807a7604f58dad814d1bc5.asciidoc create mode 100644 docs/doc_examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc create mode 100644 docs/doc_examples/20407c847adb8393ce41dc656384afc4.asciidoc create mode 100644 docs/doc_examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc create mode 100644 docs/doc_examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc create mode 100644 docs/doc_examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc create mode 100644 docs/doc_examples/206d57bf0cb022c8229894e7753eca83.asciidoc create mode 100644 docs/doc_examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc create mode 100644 docs/doc_examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc rename docs/doc_examples/{a7c15fe6b5779c84ce9a34bf4b2a7ab7.asciidoc => 209a9190082498f0b7daa26f8834846b.asciidoc} (55%) create mode 100644 docs/doc_examples/20bc71cc5bbe04184e27827f3777a406.asciidoc create mode 100644 docs/doc_examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc create mode 100644 docs/doc_examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc create mode 100644 docs/doc_examples/20f62d0540bf6261549bd286416eae28.asciidoc create mode 100644 docs/doc_examples/2105f2d1d81977054a93163a175793ce.asciidoc delete mode 100644 docs/doc_examples/213ab768f1b6a895e09403a0880e259a.asciidoc create mode 100644 docs/doc_examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc create mode 100644 docs/doc_examples/21565b72da426776e445b1a166f6e104.asciidoc create mode 100644 docs/doc_examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc rename docs/doc_examples/{a8fba09a46b2c3524428aa3259b7124f.asciidoc => 21715c32c140feeab04b38ff6d6de111.asciidoc} (78%) create mode 100644 docs/doc_examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc create mode 100644 docs/doc_examples/218b9009f120e8ad33f710e019179562.asciidoc create mode 100644 docs/doc_examples/21a226d91d8edd209f6a821064e83918.asciidoc create mode 100644 docs/doc_examples/21bb03ca9123de3237c1c76934f9f172.asciidoc create mode 100644 docs/doc_examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc create mode 100644 docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc create mode 100644 docs/doc_examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc create mode 100644 docs/doc_examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc create mode 100644 docs/doc_examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc create mode 100644 docs/doc_examples/221e9b14567f950008459af77757750e.asciidoc create mode 100644 docs/doc_examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc create mode 100644 docs/doc_examples/222e49c924ca8bac7b41bc952a39261c.asciidoc create mode 100644 docs/doc_examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc create mode 100644 docs/doc_examples/22619a4111f66e1b7231693b8f8d069a.asciidoc create mode 100644 docs/doc_examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc create mode 100644 docs/doc_examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc create mode 100644 docs/doc_examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc create mode 100644 docs/doc_examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc create mode 100644 docs/doc_examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc create mode 100644 docs/doc_examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc create mode 100644 docs/doc_examples/22ef90a7fb057728d2115f0c6f551819.asciidoc create mode 100644 docs/doc_examples/23074748d6c978176df5b04265e88938.asciidoc create mode 100644 docs/doc_examples/2308c9948cbebd2092eec03b11281005.asciidoc create mode 100644 docs/doc_examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc delete mode 100644 docs/doc_examples/231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc create mode 100644 docs/doc_examples/2342a56279106ea643026df657bf7f88.asciidoc create mode 100644 docs/doc_examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc create mode 100644 docs/doc_examples/236f50d89a07b83119af72e367e685da.asciidoc create mode 100644 docs/doc_examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc create mode 100644 docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc create mode 100644 docs/doc_examples/23b062c157235246d7c347b9047b2435.asciidoc create mode 100644 docs/doc_examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc create mode 100644 docs/doc_examples/2408020186af569a76a30eccadaed0d5.asciidoc create mode 100644 docs/doc_examples/24275847128b68da6e14233aa1259fb9.asciidoc create mode 100644 docs/doc_examples/242a26ced0e5706e48dcda19a4003094.asciidoc create mode 100644 docs/doc_examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc create mode 100644 docs/doc_examples/249bf48252c8cea47ef872541c8a884c.asciidoc create mode 100644 docs/doc_examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc create mode 100644 docs/doc_examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc create mode 100644 docs/doc_examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc create mode 100644 docs/doc_examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc create mode 100644 docs/doc_examples/24d806d1803158dacd4dda73c4204d3e.asciidoc create mode 100644 docs/doc_examples/24f4dfdf9922d5aa79151675b7767742.asciidoc delete mode 100644 docs/doc_examples/251ea12c1248385ab409906ac64d9ee9.asciidoc create mode 100644 docs/doc_examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc delete mode 100644 docs/doc_examples/2533e4b36ae837eaecda08407ecb6383.asciidoc create mode 100644 docs/doc_examples/25576b6773322f0929d4c635a940dba0.asciidoc create mode 100644 docs/doc_examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc create mode 100644 docs/doc_examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc create mode 100644 docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc create mode 100644 docs/doc_examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc create mode 100644 docs/doc_examples/25981b7b3d55b87e1484586d57b695b1.asciidoc create mode 100644 docs/doc_examples/25a0dad6547d432f5a3d394528f1c138.asciidoc create mode 100644 docs/doc_examples/25ae1a698f867ba5139605cc952436c0.asciidoc create mode 100644 docs/doc_examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc create mode 100644 docs/doc_examples/25cb9e1da00dfd971065ce182467434d.asciidoc create mode 100644 docs/doc_examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc create mode 100644 docs/doc_examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc create mode 100644 docs/doc_examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc create mode 100644 docs/doc_examples/261480571394632db40e88fbb6c59c2f.asciidoc create mode 100644 docs/doc_examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc create mode 100644 docs/doc_examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc create mode 100644 docs/doc_examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc create mode 100644 docs/doc_examples/262a778d754add491fbc9c721ac25bf0.asciidoc create mode 100644 docs/doc_examples/26419320085434680142567d5fda9c35.asciidoc create mode 100644 docs/doc_examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc create mode 100644 docs/doc_examples/2646710ece0c4c843aebeacd370d0396.asciidoc create mode 100644 docs/doc_examples/268151ed1f0e12586e66e614b61d7981.asciidoc create mode 100644 docs/doc_examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc create mode 100644 docs/doc_examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc create mode 100644 docs/doc_examples/26d3ab748a855eb383e992eb1ff79662.asciidoc create mode 100644 docs/doc_examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc create mode 100644 docs/doc_examples/270549e6b062228312c4e7a54a2c2209.asciidoc create mode 100644 docs/doc_examples/2716453454dbf9c6dde2ea6850a62214.asciidoc create mode 100644 docs/doc_examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc create mode 100644 docs/doc_examples/2720e613d520ce352b62e990c2d283f7.asciidoc create mode 100644 docs/doc_examples/2731a8577ad734a732d784c5dcb1225d.asciidoc create mode 100644 docs/doc_examples/27384266370152add76471dd0332a2f1.asciidoc create mode 100644 docs/doc_examples/2740b69e7246ac6d1ad249382f21d534.asciidoc create mode 100644 docs/doc_examples/274feaaa727e0ddf61b3c0f093182839.asciidoc create mode 100644 docs/doc_examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc create mode 100644 docs/doc_examples/27600d6a78623b69689d4218618e4278.asciidoc create mode 100644 docs/doc_examples/276e5b71ff5c6879a9b819076ad82301.asciidoc create mode 100644 docs/doc_examples/277fefe2b623af61f8274f73efc97aed.asciidoc rename docs/doc_examples/{fbcf5078a6a9e09790553804054c36b3.asciidoc => 278d5bfa1a01f91d5c84679ef1bca390.asciidoc} (75%) create mode 100644 docs/doc_examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc create mode 100644 docs/doc_examples/279e2b29261971999923fdc658bba8ff.asciidoc create mode 100644 docs/doc_examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc create mode 100644 docs/doc_examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc create mode 100644 docs/doc_examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc create mode 100644 docs/doc_examples/28415647fced5f983b42f8435332a625.asciidoc create mode 100644 docs/doc_examples/28543836b62b5622a402e6f7731d68f0.asciidoc rename docs/doc_examples/{609260ad1d5998be2ca09ff1fe237efa.asciidoc => 2856a5ceff1861aa9a78099f1c517fe7.asciidoc} (80%) create mode 100644 docs/doc_examples/2864a24608b3ac59d21f604f8a31d131.asciidoc create mode 100644 docs/doc_examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc create mode 100644 docs/doc_examples/2879d7bf4167194b102bf97117327164.asciidoc create mode 100644 docs/doc_examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc create mode 100644 docs/doc_examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc create mode 100644 docs/doc_examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc delete mode 100644 docs/doc_examples/28aad2c5942bfb221c2bf1bbdc01658e.asciidoc create mode 100644 docs/doc_examples/28ac880057135e46b3b00c7f3976538c.asciidoc create mode 100644 docs/doc_examples/291110f4cac02f4610d0853f5800a70d.asciidoc rename docs/doc_examples/{cb01106bf524df5e0501d4c655c1aa7b.asciidoc => 2932e6f71e247cf52e11d2f38f114ddf.asciidoc} (53%) create mode 100644 docs/doc_examples/295b3aaeb223612afdd991744dc9c873.asciidoc create mode 100644 docs/doc_examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc create mode 100644 docs/doc_examples/29824032d7d64512d17458fdd687b1f6.asciidoc create mode 100644 docs/doc_examples/29953082744b7a36e437b392a6391c81.asciidoc rename docs/doc_examples/{99a52be903945b17e734a1d02a57e958.asciidoc => 299900fb08da80fe455cf3f1bb7d62ee.asciidoc} (73%) create mode 100644 docs/doc_examples/29d9df958de292cec50daaf31844b573.asciidoc create mode 100644 docs/doc_examples/29e002ab596bae58712eb048ac1768d1.asciidoc create mode 100644 docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc create mode 100644 docs/doc_examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc create mode 100644 docs/doc_examples/2a287d213a812b98d8353c563a058cfc.asciidoc create mode 100644 docs/doc_examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc create mode 100644 docs/doc_examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc create mode 100644 docs/doc_examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc create mode 100644 docs/doc_examples/2a70194ebd2f01a3229a5092513676b3.asciidoc create mode 100644 docs/doc_examples/2a71e2d7f7179dd76183d30789046808.asciidoc create mode 100644 docs/doc_examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc create mode 100644 docs/doc_examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc create mode 100644 docs/doc_examples/2a9d3119a9e26e29220be436b9382955.asciidoc create mode 100644 docs/doc_examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc create mode 100644 docs/doc_examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc create mode 100644 docs/doc_examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc create mode 100644 docs/doc_examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc rename docs/doc_examples/{3f3b3e207f79303ce6f86e03e928e062.asciidoc => 2ad35a13262f98574a48f88b4a838512.asciidoc} (74%) create mode 100644 docs/doc_examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc create mode 100644 docs/doc_examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc create mode 100644 docs/doc_examples/2afc1231679898bd864d06679d9e951b.asciidoc create mode 100644 docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc create mode 100644 docs/doc_examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc create mode 100644 docs/doc_examples/2b47be4b712147a429102aef386470ee.asciidoc create mode 100644 docs/doc_examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc create mode 100644 docs/doc_examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc create mode 100644 docs/doc_examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc create mode 100644 docs/doc_examples/2b7687e3d7c06824950e00618c297864.asciidoc create mode 100644 docs/doc_examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc create mode 100644 docs/doc_examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc delete mode 100644 docs/doc_examples/2bb2339ac055337abf753bddb7771659.asciidoc create mode 100644 docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc create mode 100644 docs/doc_examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc create mode 100644 docs/doc_examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc create mode 100644 docs/doc_examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc create mode 100644 docs/doc_examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc create mode 100644 docs/doc_examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc create mode 100644 docs/doc_examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc create mode 100644 docs/doc_examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc create mode 100644 docs/doc_examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc create mode 100644 docs/doc_examples/2c44657adf550b8ade5cf5334106d38b.asciidoc create mode 100644 docs/doc_examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc create mode 100644 docs/doc_examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc rename docs/doc_examples/{e17e8852ec3f31781e1364f4dffeb6d0.asciidoc => 2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc} (54%) create mode 100644 docs/doc_examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc create mode 100644 docs/doc_examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc create mode 100644 docs/doc_examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc create mode 100644 docs/doc_examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc create mode 100644 docs/doc_examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc create mode 100644 docs/doc_examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc create mode 100644 docs/doc_examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc create mode 100644 docs/doc_examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc create mode 100644 docs/doc_examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc create mode 100644 docs/doc_examples/2e364833626c9790c042c8f006fcc999.asciidoc create mode 100644 docs/doc_examples/2e36fe22051a47e052e349854d9948b9.asciidoc create mode 100644 docs/doc_examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc create mode 100644 docs/doc_examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc create mode 100644 docs/doc_examples/2e7f4b9be999422a12abb680572b13c8.asciidoc create mode 100644 docs/doc_examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc create mode 100644 docs/doc_examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc create mode 100644 docs/doc_examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc create mode 100644 docs/doc_examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc create mode 100644 docs/doc_examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc create mode 100644 docs/doc_examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc create mode 100644 docs/doc_examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc create mode 100644 docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc create mode 100644 docs/doc_examples/2f0b2181c434a879a23b4643bdd92575.asciidoc create mode 100644 docs/doc_examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc create mode 100644 docs/doc_examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc rename docs/doc_examples/{cf02e3d8b371bd59f0224967c36330da.asciidoc => 2f2fd35905feef0b561c05d70c7064c1.asciidoc} (78%) create mode 100644 docs/doc_examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc create mode 100644 docs/doc_examples/2f4e28c81db47547ad39d0926babab12.asciidoc create mode 100644 docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc create mode 100644 docs/doc_examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc rename docs/doc_examples/{fe5763d32955e8b65eb3048e97b1580c.asciidoc => 2f98924c3d593ea2b60edb9cef5bee22.asciidoc} (73%) create mode 100644 docs/doc_examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc create mode 100644 docs/doc_examples/2fa7ded8515b32f26c54394ea598f573.asciidoc create mode 100644 docs/doc_examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc create mode 100644 docs/doc_examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc create mode 100644 docs/doc_examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc create mode 100644 docs/doc_examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc create mode 100644 docs/doc_examples/2fea3e324939cc7e9c396964aeee7111.asciidoc create mode 100644 docs/doc_examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc create mode 100644 docs/doc_examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc create mode 100644 docs/doc_examples/300576666769b78fa6fa26b232837f81.asciidoc create mode 100644 docs/doc_examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc create mode 100644 docs/doc_examples/3082ae0c3ecdc61808103214631b40c6.asciidoc create mode 100644 docs/doc_examples/309f0721145b5c656338a02459c3ff1e.asciidoc create mode 100644 docs/doc_examples/30abc76a39e551f4b52c65002bb6405d.asciidoc create mode 100644 docs/doc_examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc create mode 100644 docs/doc_examples/30db2702dd0071c72a090b8311d0db09.asciidoc create mode 100644 docs/doc_examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc create mode 100644 docs/doc_examples/3166455372f2d96622caff076e91ebe7.asciidoc create mode 100644 docs/doc_examples/316cd43feb3b86396483903af1a048b1.asciidoc create mode 100644 docs/doc_examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc create mode 100644 docs/doc_examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc create mode 100644 docs/doc_examples/31a79a57b242713edec6795599ba0d5d.asciidoc create mode 100644 docs/doc_examples/31ab4ec26176857280af630bf84a2823.asciidoc create mode 100644 docs/doc_examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc create mode 100644 docs/doc_examples/31aed390c30bd4f42a5c56253695e53f.asciidoc create mode 100644 docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc create mode 100644 docs/doc_examples/31f4400716500149cccbc19aa06bff66.asciidoc create mode 100644 docs/doc_examples/320645d771e952af2a67bb7445c3688d.asciidoc create mode 100644 docs/doc_examples/32123981430e5a8b34fe14314fc48429.asciidoc create mode 100644 docs/doc_examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc create mode 100644 docs/doc_examples/3250a8d2d2a9619035040e55a03620b9.asciidoc create mode 100644 docs/doc_examples/327466380bcd55361973b4a96c6dccb2.asciidoc create mode 100644 docs/doc_examples/32a7acdfb7046966b28f394476c99126.asciidoc create mode 100644 docs/doc_examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc create mode 100644 docs/doc_examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc create mode 100644 docs/doc_examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc create mode 100644 docs/doc_examples/32cd57666bc80b8cf793d06fa1086669.asciidoc create mode 100644 docs/doc_examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc create mode 100644 docs/doc_examples/32de5dd306bd014d67053d2f175defcd.asciidoc create mode 100644 docs/doc_examples/331caebf810a923644eb6de26e5a97f4.asciidoc create mode 100644 docs/doc_examples/3337c817ebd438254505a31e91c91724.asciidoc rename docs/doc_examples/{eb30ba547e4a7b8f54f33ab259aca523.asciidoc => 3341d3bbb53052447a37c92a04c14b70.asciidoc} (59%) create mode 100644 docs/doc_examples/3343a4cf559060c422d86c786a95e535.asciidoc create mode 100644 docs/doc_examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc create mode 100644 docs/doc_examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc create mode 100644 docs/doc_examples/33732208fc6e6fe1e8d278299681932e.asciidoc create mode 100644 docs/doc_examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc create mode 100644 docs/doc_examples/33b732bb301e99d2161bd2246494f487.asciidoc create mode 100644 docs/doc_examples/33d480fc6812ada75756cf5337bc9092.asciidoc create mode 100644 docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc create mode 100644 docs/doc_examples/343dd09a8c76987e586858be3bdc51eb.asciidoc create mode 100644 docs/doc_examples/344b4144244d57f87c6aa4652b100b25.asciidoc create mode 100644 docs/doc_examples/346f28d82acb5427c304aa574fea0008.asciidoc create mode 100644 docs/doc_examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc create mode 100644 docs/doc_examples/3487e60e1ae9d4925ce540cd63574385.asciidoc create mode 100644 docs/doc_examples/349823d86980d40ac45248c19a59e339.asciidoc create mode 100644 docs/doc_examples/34be27141e3a476c138546190101c8bc.asciidoc create mode 100644 docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc rename docs/doc_examples/{7e52bec09624cf6c0de5d13f2bfad5a5.asciidoc => 34d51c54b62e9a160c0ddacc10134bb0.asciidoc} (56%) create mode 100644 docs/doc_examples/34d63740b58209a3d031212909743925.asciidoc delete mode 100644 docs/doc_examples/34efeade38445b2834749ced59782e25.asciidoc create mode 100644 docs/doc_examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc rename docs/doc_examples/{427f6b5c5376cbf0f71f242a60ca3d9e.asciidoc => 353020cb30a885ee7f5ce2b141ba574a.asciidoc} (69%) create mode 100644 docs/doc_examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc create mode 100644 docs/doc_examples/3544f17cb97b613a2f733707c676f759.asciidoc create mode 100644 docs/doc_examples/3545261682af72f4bee57f2bac0a9590.asciidoc create mode 100644 docs/doc_examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc rename docs/doc_examples/{52b2bfbdd78f8283b6f4891c48013237.asciidoc => 355d0ee2fcb6c1fc403c6267f710e25a.asciidoc} (55%) create mode 100644 docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc create mode 100644 docs/doc_examples/35a272df8c919a12d7c3106a18245748.asciidoc create mode 100644 docs/doc_examples/35b686d9d9e915d0dea7a4251781767d.asciidoc create mode 100644 docs/doc_examples/35be136ba9df7474a5521631e2a385b1.asciidoc create mode 100644 docs/doc_examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc create mode 100644 docs/doc_examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc delete mode 100644 docs/doc_examples/35e8da9410b8432cf4095f2541ad7b1d.asciidoc create mode 100644 docs/doc_examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc create mode 100644 docs/doc_examples/35f892b475a1770f18328158be7039fd.asciidoc create mode 100644 docs/doc_examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc create mode 100644 docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc create mode 100644 docs/doc_examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc create mode 100644 docs/doc_examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc rename docs/doc_examples/{17de0020b228df961ad3c6b06233c948.asciidoc => 360b3cef34bbddc5d9579ca95f0cb061.asciidoc} (54%) create mode 100644 docs/doc_examples/360c4f373e72ba861584ee85bd218124.asciidoc create mode 100644 docs/doc_examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc create mode 100644 docs/doc_examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc create mode 100644 docs/doc_examples/365256ebdfa47b449780771d9beba8d9.asciidoc rename docs/doc_examples/{71ba9033107882f61cdc3b32fc73568d.asciidoc => 36962727b806315b221e8a63e05caddc.asciidoc} (54%) create mode 100644 docs/doc_examples/36b26905c5f96d0b785c3267fb63838d.asciidoc delete mode 100644 docs/doc_examples/36b2778f23d0955255f52c075c4d213d.asciidoc create mode 100644 docs/doc_examples/36d229f734adcdab00be266a7ce038b1.asciidoc create mode 100644 docs/doc_examples/36da9668fef56910370f16bfb772cc40.asciidoc create mode 100644 docs/doc_examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc rename docs/doc_examples/{9a8995fd31351045d99c78e40444c8ea.asciidoc => 36fae9dfc0b815546b45745bac054b67.asciidoc} (63%) create mode 100644 docs/doc_examples/370b297ed3433577adf53e64f572d89d.asciidoc create mode 100644 docs/doc_examples/371962cf63e65c10026177c6a1bad0b6.asciidoc delete mode 100644 docs/doc_examples/3722cb3705b6bc7f486969deace3dd83.asciidoc create mode 100644 docs/doc_examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc create mode 100644 docs/doc_examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc create mode 100644 docs/doc_examples/3759ca688c4bd3c838780a9aad63258b.asciidoc create mode 100644 docs/doc_examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc create mode 100644 docs/doc_examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc create mode 100644 docs/doc_examples/376ff4b2b5f657481af78a778aaab57f.asciidoc create mode 100644 docs/doc_examples/377af0ea9b19c113f224d8150890b41b.asciidoc create mode 100644 docs/doc_examples/378e55f78fa13578a1302bae8d479765.asciidoc create mode 100644 docs/doc_examples/37983daac3d9c8582583a507b3adb7f2.asciidoc create mode 100644 docs/doc_examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc create mode 100644 docs/doc_examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc create mode 100644 docs/doc_examples/37c73410bf13429279cbc61a413957d8.asciidoc create mode 100644 docs/doc_examples/37eaab0630976d3dee90a52011342883.asciidoc create mode 100644 docs/doc_examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc create mode 100644 docs/doc_examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc create mode 100644 docs/doc_examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc create mode 100644 docs/doc_examples/388d3eda4f792d3fce044777739217e6.asciidoc create mode 100644 docs/doc_examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc create mode 100644 docs/doc_examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc create mode 100644 docs/doc_examples/38b20fe981605e80a41517e9aa13134a.asciidoc create mode 100644 docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc create mode 100644 docs/doc_examples/38eed000de433b540116928681c520d3.asciidoc create mode 100644 docs/doc_examples/38f7739f750f1411bccf511a0abaaea3.asciidoc create mode 100644 docs/doc_examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc create mode 100644 docs/doc_examples/3924ee252581ebb96ac0e60046125ae8.asciidoc create mode 100644 docs/doc_examples/3951d7fcd7f849fa278daf342872125a.asciidoc create mode 100644 docs/doc_examples/39760996f94ad34aaceaa16a5cc97993.asciidoc create mode 100644 docs/doc_examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc create mode 100644 docs/doc_examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc create mode 100644 docs/doc_examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc delete mode 100644 docs/doc_examples/39a6a038c4b551022afe83de0523634e.asciidoc create mode 100644 docs/doc_examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc create mode 100644 docs/doc_examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc create mode 100644 docs/doc_examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc create mode 100644 docs/doc_examples/3a2953fd81d65118a776c87a81530e15.asciidoc create mode 100644 docs/doc_examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc create mode 100644 docs/doc_examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc create mode 100644 docs/doc_examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc create mode 100644 docs/doc_examples/3a5f2e2313614ea9693545edee22ac43.asciidoc create mode 100644 docs/doc_examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc create mode 100644 docs/doc_examples/3a64ae799cc03fadbb802794730c23da.asciidoc delete mode 100644 docs/doc_examples/3a700f836d8d5da1b656a876554028aa.asciidoc create mode 100644 docs/doc_examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc create mode 100644 docs/doc_examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc create mode 100644 docs/doc_examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc create mode 100644 docs/doc_examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc create mode 100644 docs/doc_examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc create mode 100644 docs/doc_examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc create mode 100644 docs/doc_examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc create mode 100644 docs/doc_examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc rename docs/doc_examples/{09dbd90c5e22ea4a17b4cf9aa72e08ae.asciidoc => 3b05128cba6852e79a905bcdd5a8ebc0.asciidoc} (64%) create mode 100644 docs/doc_examples/3b162509ed14eda44a9681cd1108fa39.asciidoc create mode 100644 docs/doc_examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc create mode 100644 docs/doc_examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc create mode 100644 docs/doc_examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc create mode 100644 docs/doc_examples/3b606631284877f9bca15051630995ad.asciidoc create mode 100644 docs/doc_examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc create mode 100644 docs/doc_examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc create mode 100644 docs/doc_examples/3b9c54604535d97e8368d47148aecc6f.asciidoc create mode 100644 docs/doc_examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc create mode 100644 docs/doc_examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc create mode 100644 docs/doc_examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc create mode 100644 docs/doc_examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc create mode 100644 docs/doc_examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc create mode 100644 docs/doc_examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc create mode 100644 docs/doc_examples/3c09ca91057216125ed0e3856a91ff95.asciidoc create mode 100644 docs/doc_examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc create mode 100644 docs/doc_examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc create mode 100644 docs/doc_examples/3c5d5a5c34a62724942329658c688f5e.asciidoc create mode 100644 docs/doc_examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc create mode 100644 docs/doc_examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc create mode 100644 docs/doc_examples/3c7621a81fa982b79f040a6d2611530e.asciidoc rename docs/doc_examples/{b214942b938e47f2c486e523546cb574.asciidoc => 3cd2f7f9096a8e8180f27b6c30e71840.asciidoc} (51%) delete mode 100644 docs/doc_examples/3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc create mode 100644 docs/doc_examples/3cd93a48906069709b76420c66930c01.asciidoc create mode 100644 docs/doc_examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc create mode 100644 docs/doc_examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc create mode 100644 docs/doc_examples/3d316bddd8503a6cc10566630a4155d3.asciidoc create mode 100644 docs/doc_examples/3d48d1ba49f680aac32177d653944623.asciidoc create mode 100644 docs/doc_examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc create mode 100644 docs/doc_examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc create mode 100644 docs/doc_examples/3d82257167e8a14a7f474848b32da128.asciidoc create mode 100644 docs/doc_examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc rename docs/doc_examples/{c5e5873783246c7b1c01d8464fed72c4.asciidoc => 3db2b5a6424aa92ecab7a8640c38685a.asciidoc} (74%) create mode 100644 docs/doc_examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc create mode 100644 docs/doc_examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc create mode 100644 docs/doc_examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc create mode 100644 docs/doc_examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc create mode 100644 docs/doc_examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc create mode 100644 docs/doc_examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc rename docs/doc_examples/{5be23858b35043fcb7b50fe36b873e6e.asciidoc => 3e4227250d49e81df48773f8ba803ea7.asciidoc} (59%) delete mode 100644 docs/doc_examples/3e573bfabe00f8bfb8bb69aa5820768e.asciidoc create mode 100644 docs/doc_examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc rename docs/doc_examples/{8022e6a690344035b6472a43a9d122e0.asciidoc => 3e8ed6ae016eb823cb00d9035b8ac459.asciidoc} (74%) create mode 100644 docs/doc_examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc create mode 100644 docs/doc_examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc create mode 100644 docs/doc_examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc create mode 100644 docs/doc_examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc create mode 100644 docs/doc_examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc create mode 100644 docs/doc_examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc create mode 100644 docs/doc_examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc create mode 100644 docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc create mode 100644 docs/doc_examples/3f20459d358611793272f63dc596e889.asciidoc create mode 100644 docs/doc_examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc create mode 100644 docs/doc_examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc create mode 100644 docs/doc_examples/3f30310cc6d0adae6b0f61705624a695.asciidoc create mode 100644 docs/doc_examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc create mode 100644 docs/doc_examples/3f60a892bed18151b7baac6cc712576a.asciidoc create mode 100644 docs/doc_examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc create mode 100644 docs/doc_examples/3f8dc309b63fa0437898107b0d964217.asciidoc create mode 100644 docs/doc_examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc create mode 100644 docs/doc_examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc create mode 100644 docs/doc_examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc create mode 100644 docs/doc_examples/3faf5e2873de340acfe0a617017db784.asciidoc create mode 100644 docs/doc_examples/3fb1289c80a354da66693bfb25d7b412.asciidoc create mode 100644 docs/doc_examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc create mode 100644 docs/doc_examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc create mode 100644 docs/doc_examples/3fe4264ace04405989141c43aadfff81.asciidoc create mode 100644 docs/doc_examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc create mode 100644 docs/doc_examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc create mode 100644 docs/doc_examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc create mode 100644 docs/doc_examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc create mode 100644 docs/doc_examples/3ff634a50e2e4556bad7ea8553576992.asciidoc create mode 100644 docs/doc_examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc create mode 100644 docs/doc_examples/402092585940953420404c2884a47e59.asciidoc create mode 100644 docs/doc_examples/4029af36cb3f8202549017f7378803b4.asciidoc create mode 100644 docs/doc_examples/4053de806dfd9172167999ce098107c4.asciidoc create mode 100644 docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc create mode 100644 docs/doc_examples/405ac843a9156d3cab374e199cac87fb.asciidoc create mode 100644 docs/doc_examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc create mode 100644 docs/doc_examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc create mode 100644 docs/doc_examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc create mode 100644 docs/doc_examples/408060f0c52300588a6dee774f4fd6a5.asciidoc create mode 100644 docs/doc_examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc create mode 100644 docs/doc_examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc create mode 100644 docs/doc_examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc create mode 100644 docs/doc_examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc create mode 100644 docs/doc_examples/40d88d4f53343ef663c89ba488ab8001.asciidoc create mode 100644 docs/doc_examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc create mode 100644 docs/doc_examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc rename docs/doc_examples/{138ccd89f72aa7502dd9578403dcc589.asciidoc => 4113c57384aa37c58d11579e20c00760.asciidoc} (68%) create mode 100644 docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc create mode 100644 docs/doc_examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc create mode 100644 docs/doc_examples/412f8238ab5182678f1d8f6383031b11.asciidoc create mode 100644 docs/doc_examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc create mode 100644 docs/doc_examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc create mode 100644 docs/doc_examples/416a3ba11232d3c078c1c31340cf356f.asciidoc create mode 100644 docs/doc_examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc rename docs/doc_examples/{58df61acbfb15b8ef0aaa18b81ae98a6.asciidoc => 41dbd79f624b998d01c10921e9a35c4b.asciidoc} (61%) create mode 100644 docs/doc_examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc create mode 100644 docs/doc_examples/4207219a892339e8f3abe0df8723dd27.asciidoc create mode 100644 docs/doc_examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc create mode 100644 docs/doc_examples/424fbf082cd4affb84439abfc916b597.asciidoc create mode 100644 docs/doc_examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc create mode 100644 docs/doc_examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc create mode 100644 docs/doc_examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc create mode 100644 docs/doc_examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc create mode 100644 docs/doc_examples/42d02087f1c8ab0452ef373079a76843.asciidoc create mode 100644 docs/doc_examples/42deb4fe32afbe0f94185e256a79c447.asciidoc create mode 100644 docs/doc_examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc create mode 100644 docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc create mode 100644 docs/doc_examples/4310869b97d4224acaa6d66b1e196048.asciidoc create mode 100644 docs/doc_examples/4323f6d224847eccdce59c23e33fda0a.asciidoc create mode 100644 docs/doc_examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc create mode 100644 docs/doc_examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc create mode 100644 docs/doc_examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc create mode 100644 docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc create mode 100644 docs/doc_examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc delete mode 100644 docs/doc_examples/43af86de5e49aa06070092fffc138208.asciidoc create mode 100644 docs/doc_examples/43e86fbaeed068dcc981214338559b5a.asciidoc create mode 100644 docs/doc_examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc create mode 100644 docs/doc_examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc create mode 100644 docs/doc_examples/441be98c597698bb2809372abf086c3e.asciidoc rename docs/doc_examples/{3342c69b2c2303247217532956fcce85.asciidoc => 441f330f6872f995769db1ce2b9627e2.asciidoc} (63%) create mode 100644 docs/doc_examples/44231f7cdd5c3a21025861cdef31e355.asciidoc create mode 100644 docs/doc_examples/4427517dcd8ec9997541150cdc11a0de.asciidoc create mode 100644 docs/doc_examples/4435b654994b575ba181ea679871c78c.asciidoc create mode 100644 docs/doc_examples/443dd902f64b3217505c9595839c3b2d.asciidoc create mode 100644 docs/doc_examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc create mode 100644 docs/doc_examples/443f0e8fbba83777b2df624879d188d5.asciidoc create mode 100644 docs/doc_examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc create mode 100644 docs/doc_examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc create mode 100644 docs/doc_examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc create mode 100644 docs/doc_examples/44939997b0f2601f82a93585a879f65a.asciidoc create mode 100644 docs/doc_examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc create mode 100644 docs/doc_examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc create mode 100644 docs/doc_examples/44bca3f17d403517af3616754dc795bb.asciidoc rename docs/doc_examples/{2fd69fb0538e4f36ac69a8b8f8bf5ae8.asciidoc => 44da736ce0e1587c1e7c45eee606ead7.asciidoc} (50%) create mode 100644 docs/doc_examples/44db41b8465af951e366da97ade63bc1.asciidoc create mode 100644 docs/doc_examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc create mode 100644 docs/doc_examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc create mode 100644 docs/doc_examples/451b441c3311103d0d2bdbab771b26d2.asciidoc create mode 100644 docs/doc_examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc create mode 100644 docs/doc_examples/4527d9bb12cf738111a188af235d5d4c.asciidoc create mode 100644 docs/doc_examples/45499ed1824d1d7cb59972580d2344cb.asciidoc create mode 100644 docs/doc_examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc create mode 100644 docs/doc_examples/4553e0acb6336687d61eaecc73f517b7.asciidoc create mode 100644 docs/doc_examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc create mode 100644 docs/doc_examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc create mode 100644 docs/doc_examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc create mode 100644 docs/doc_examples/45c6e54a9c9e08623af96752b4bde346.asciidoc create mode 100644 docs/doc_examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc create mode 100644 docs/doc_examples/46025fc47dfbfa410790df0dd6bdad8d.asciidoc create mode 100644 docs/doc_examples/46064e81620162a23e75002a7eeb8b10.asciidoc create mode 100644 docs/doc_examples/46103fee3cd5f53dc75123def82d52ad.asciidoc delete mode 100644 docs/doc_examples/4646764bf09911fee7d58630c72d3137.asciidoc create mode 100644 docs/doc_examples/464dffb6a6e24a860223d1c32b232f95.asciidoc create mode 100644 docs/doc_examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc create mode 100644 docs/doc_examples/4659f639d71a54df571260ee5798dbb3.asciidoc create mode 100644 docs/doc_examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc create mode 100644 docs/doc_examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc create mode 100644 docs/doc_examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc create mode 100644 docs/doc_examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc create mode 100644 docs/doc_examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc create mode 100644 docs/doc_examples/46c5c14f20118dcf519ff6ef21360209.asciidoc create mode 100644 docs/doc_examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc create mode 100644 docs/doc_examples/46ebd468c3f132a4978088964466c5cd.asciidoc create mode 100644 docs/doc_examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc create mode 100644 docs/doc_examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc create mode 100644 docs/doc_examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc create mode 100644 docs/doc_examples/47909e194d10743093f4a22c27a85925.asciidoc delete mode 100644 docs/doc_examples/47bb632c6091ad0cd94bc660bdd309a5.asciidoc create mode 100644 docs/doc_examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc create mode 100644 docs/doc_examples/47fde7874e15a37242993fd69c62063b.asciidoc create mode 100644 docs/doc_examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc create mode 100644 docs/doc_examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc create mode 100644 docs/doc_examples/4824a823a830a2a5d990eacfd783ac22.asciidoc create mode 100644 docs/doc_examples/48313f620c2871b6f4019b66be730109.asciidoc create mode 100644 docs/doc_examples/483d669ec0768bc4e275a568c6164704.asciidoc create mode 100644 docs/doc_examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc create mode 100644 docs/doc_examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc rename docs/doc_examples/{9166cf38427d5cde5d2ec12a2012b669.asciidoc => 488f6df1df71972392b670ce557f7ff3.asciidoc} (52%) create mode 100644 docs/doc_examples/48d9697a14dfe131325521f48a7adc84.asciidoc create mode 100644 docs/doc_examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc create mode 100644 docs/doc_examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc create mode 100644 docs/doc_examples/4955bae30f265b9e436f82b015de6d7e.asciidoc create mode 100644 docs/doc_examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc create mode 100644 docs/doc_examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc create mode 100644 docs/doc_examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc create mode 100644 docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc create mode 100644 docs/doc_examples/49c052a748c943180db78fee8e144239.asciidoc create mode 100644 docs/doc_examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc create mode 100644 docs/doc_examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc create mode 100644 docs/doc_examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc create mode 100644 docs/doc_examples/49e8773a34fcbf825de38426cff5509c.asciidoc create mode 100644 docs/doc_examples/49f4d2a461536d150e16b1e0a3148678.asciidoc create mode 100644 docs/doc_examples/4a1951844bd39f26961bfc965f3432b1.asciidoc create mode 100644 docs/doc_examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc create mode 100644 docs/doc_examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc create mode 100644 docs/doc_examples/4a7510a9c0468303658383c00796dad2.asciidoc create mode 100644 docs/doc_examples/4aa81a694266fb634904224d14cd9a87.asciidoc delete mode 100644 docs/doc_examples/4acf902c2598b2558f34f20c1744c433.asciidoc create mode 100644 docs/doc_examples/4ae494d1e62231e832fc0436b04e2014.asciidoc create mode 100644 docs/doc_examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc create mode 100644 docs/doc_examples/4b1044259a6d777d87529eae25675005.asciidoc create mode 100644 docs/doc_examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc create mode 100644 docs/doc_examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc create mode 100644 docs/doc_examples/4b5110a21676cc0e26e050a4b4552235.asciidoc create mode 100644 docs/doc_examples/4ba86373e13e106de044f190343be328.asciidoc create mode 100644 docs/doc_examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc create mode 100644 docs/doc_examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc create mode 100644 docs/doc_examples/4bba59cf745ac7b996bf90308bc26957.asciidoc create mode 100644 docs/doc_examples/4bc4db44b8c74610b73f21a421099a13.asciidoc create mode 100644 docs/doc_examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc create mode 100644 docs/doc_examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc create mode 100644 docs/doc_examples/4be07b34db282044c88d5021c7ea08ee.asciidoc create mode 100644 docs/doc_examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc create mode 100644 docs/doc_examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc create mode 100644 docs/doc_examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc create mode 100644 docs/doc_examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc create mode 100644 docs/doc_examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc create mode 100644 docs/doc_examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc create mode 100644 docs/doc_examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc create mode 100644 docs/doc_examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc create mode 100644 docs/doc_examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc create mode 100644 docs/doc_examples/4c77d12039fe2445c9251e33979071ac.asciidoc create mode 100644 docs/doc_examples/4c803b088c1915a7b0634d5cafabe606.asciidoc create mode 100644 docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc create mode 100644 docs/doc_examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc create mode 100644 docs/doc_examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc create mode 100644 docs/doc_examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc create mode 100644 docs/doc_examples/4cb44556b8c699f43489b17b42ddd475.asciidoc delete mode 100644 docs/doc_examples/4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc create mode 100644 docs/doc_examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc create mode 100644 docs/doc_examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc create mode 100644 docs/doc_examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc create mode 100644 docs/doc_examples/4ce4563e207233c48ffe849728052dca.asciidoc create mode 100644 docs/doc_examples/4d21725453955582ff12b4a1104aa7b6.asciidoc create mode 100644 docs/doc_examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc create mode 100644 docs/doc_examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc delete mode 100644 docs/doc_examples/4d56b179242fed59e3d6476f817b6055.asciidoc delete mode 100644 docs/doc_examples/4d6997c70a1851f9151443c0d38b532e.asciidoc create mode 100644 docs/doc_examples/4d7c0b52d3c0a084157428624c543c90.asciidoc create mode 100644 docs/doc_examples/4dab4c5168047ba596af1beb0e55b845.asciidoc create mode 100644 docs/doc_examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc create mode 100644 docs/doc_examples/4e1f02928ef243bf07fd425754b7642b.asciidoc create mode 100644 docs/doc_examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc create mode 100644 docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc create mode 100644 docs/doc_examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc create mode 100644 docs/doc_examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc create mode 100644 docs/doc_examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc create mode 100644 docs/doc_examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc create mode 100644 docs/doc_examples/4e926063a9494b563387617b08c4f232.asciidoc create mode 100644 docs/doc_examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc create mode 100644 docs/doc_examples/4ed946065faa92f9950f04e402676a97.asciidoc create mode 100644 docs/doc_examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc create mode 100644 docs/doc_examples/4f08d9e21d9f199acc77abfb83287878.asciidoc create mode 100644 docs/doc_examples/4f140d8922efdf3420e41b1cb669a289.asciidoc create mode 100644 docs/doc_examples/4f1e1205154d280db21fbd2754ed5398.asciidoc create mode 100644 docs/doc_examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc create mode 100644 docs/doc_examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc create mode 100644 docs/doc_examples/4f666d710758578e2582850dac3ad144.asciidoc create mode 100644 docs/doc_examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc create mode 100644 docs/doc_examples/4f792d86ff79dcfe4643cd95505f8d5f.asciidoc create mode 100644 docs/doc_examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc create mode 100644 docs/doc_examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc create mode 100644 docs/doc_examples/4fb0629146ca78b85e823edd405497bb.asciidoc create mode 100644 docs/doc_examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc create mode 100644 docs/doc_examples/4fe78a4dfb747fd5dc34145ec6b76183.asciidoc create mode 100644 docs/doc_examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc create mode 100644 docs/doc_examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc create mode 100644 docs/doc_examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc delete mode 100644 docs/doc_examples/5043b83a89091fa00edb341ddf7ba370.asciidoc create mode 100644 docs/doc_examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc create mode 100644 docs/doc_examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc create mode 100644 docs/doc_examples/50764f4ea88079156b0aff2835bcdc45.asciidoc create mode 100644 docs/doc_examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc create mode 100644 docs/doc_examples/50a9623c153cabe64101efb633e10e6c.asciidoc create mode 100644 docs/doc_examples/50b5c0332949d2154c72b629b5fa6222.asciidoc create mode 100644 docs/doc_examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc create mode 100644 docs/doc_examples/50c2cea2adbe9523458c2686ab11df54.asciidoc create mode 100644 docs/doc_examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc rename docs/doc_examples/{8c5977410335d58217e0626618ce6641.asciidoc => 50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc} (66%) create mode 100644 docs/doc_examples/50dc35d3d8705bd62aed20a15209476c.asciidoc create mode 100644 docs/doc_examples/50ddf374cfa8128538ea092ee98b723d.asciidoc create mode 100644 docs/doc_examples/50f922e9f002d8ac570953be59414b7b.asciidoc rename docs/doc_examples/{d2153f3100bf12c2de98f14eb86ab061.asciidoc => 511e5bb8ab881171b7e8629095e30b85.asciidoc} (78%) create mode 100644 docs/doc_examples/51390ca10aa22d7104e8970f09ea4512.asciidoc create mode 100644 docs/doc_examples/515e1104d136082e826d1b32af011759.asciidoc create mode 100644 docs/doc_examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc create mode 100644 docs/doc_examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc create mode 100644 docs/doc_examples/5195a88194f7a139c635a84398d76205.asciidoc create mode 100644 docs/doc_examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc create mode 100644 docs/doc_examples/51b44224feee6e2e5974824334474c77.asciidoc create mode 100644 docs/doc_examples/51f1a0930362594b231a5bcc17673768.asciidoc create mode 100644 docs/doc_examples/51f6cb682424e110f289af79c106f4c7.asciidoc delete mode 100644 docs/doc_examples/5271f4ff29bb48838396e5a674664ee0.asciidoc delete mode 100644 docs/doc_examples/527324766814561b75aaee853ede49a7.asciidoc create mode 100644 docs/doc_examples/5276a831513623e43ed567eb52b6dba9.asciidoc create mode 100644 docs/doc_examples/528e5f1c345c3769248cc6889e8cf552.asciidoc create mode 100644 docs/doc_examples/529b975b7cedaac58dce9821956adc37.asciidoc create mode 100644 docs/doc_examples/52a2d119addb15366a935115518335fd.asciidoc create mode 100644 docs/doc_examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc create mode 100644 docs/doc_examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc create mode 100644 docs/doc_examples/52be795b68e6ef3f396f35fea52d0481.asciidoc create mode 100644 docs/doc_examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc create mode 100644 docs/doc_examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc create mode 100644 docs/doc_examples/52f1c1689ab35353858cdeaab7597546.asciidoc create mode 100644 docs/doc_examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc create mode 100644 docs/doc_examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc create mode 100644 docs/doc_examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc create mode 100644 docs/doc_examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc create mode 100644 docs/doc_examples/532f371934b61fb4992d37bedcc085de.asciidoc rename docs/doc_examples/{dfb1fe96d806a644214d06f9b4b87878.asciidoc => 5330191ec9f11281ebf6867bf11c58ae.asciidoc} (56%) create mode 100644 docs/doc_examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc rename docs/doc_examples/{0ce3606f1dba490eef83c4317b315b62.asciidoc => 53aa8b21e2b1c4d48960343711296704.asciidoc} (58%) create mode 100644 docs/doc_examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc create mode 100644 docs/doc_examples/53c6256295111524d5ff2885bdcb99a9.asciidoc create mode 100644 docs/doc_examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc create mode 100644 docs/doc_examples/54059961f05904368ced52c894a50e23.asciidoc delete mode 100644 docs/doc_examples/54092c8c646133f5dbbc047990dd458d.asciidoc create mode 100644 docs/doc_examples/540aefc39303c925a4efff71ebe2f002.asciidoc create mode 100644 docs/doc_examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc create mode 100644 docs/doc_examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc create mode 100644 docs/doc_examples/548b85bd9e6e7d33e36133953869449b.asciidoc create mode 100644 docs/doc_examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc create mode 100644 docs/doc_examples/55096381f811388fafd8e244dd2402c8.asciidoc create mode 100644 docs/doc_examples/553904c175a76d5ba83bc5d46fff7373.asciidoc rename docs/doc_examples/{2a1de18774f9c68cafa169847832b2bc.asciidoc => 553d79817bb1333970e99507c37a159a.asciidoc} (53%) create mode 100644 docs/doc_examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc create mode 100644 docs/doc_examples/5566cff431570f522e1fc5475b2ed875.asciidoc create mode 100644 docs/doc_examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc create mode 100644 docs/doc_examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc create mode 100644 docs/doc_examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc create mode 100644 docs/doc_examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc create mode 100644 docs/doc_examples/55e8ddf643726dec51531ada0bec7143.asciidoc create mode 100644 docs/doc_examples/55f0fec6342f677af74de2124b801aa2.asciidoc create mode 100644 docs/doc_examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc create mode 100644 docs/doc_examples/5619103306878d58a058bce87c5bd82b.asciidoc create mode 100644 docs/doc_examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc create mode 100644 docs/doc_examples/563dfbf421422c837ee6929ae2ede876.asciidoc create mode 100644 docs/doc_examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc create mode 100644 docs/doc_examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc create mode 100644 docs/doc_examples/568979150ce18739f8d3ea859355aaa3.asciidoc create mode 100644 docs/doc_examples/569f10fee671632017c722fd983009d4.asciidoc create mode 100644 docs/doc_examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc create mode 100644 docs/doc_examples/56a903530990313b753b1be33578997a.asciidoc create mode 100644 docs/doc_examples/56b6b50b174a935d368301ebd717231d.asciidoc create mode 100644 docs/doc_examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc create mode 100644 docs/doc_examples/56db76c987106a870357854d3068ad98.asciidoc create mode 100644 docs/doc_examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc create mode 100644 docs/doc_examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc create mode 100644 docs/doc_examples/56fa6c9e08258157d445e2f92274962b.asciidoc create mode 100644 docs/doc_examples/571314a948e49f1f9614d36fcf79392a.asciidoc create mode 100644 docs/doc_examples/578808065fee8691355b8f25c35782cd.asciidoc create mode 100644 docs/doc_examples/5797df4b8e71d821a1488cbb63481104.asciidoc create mode 100644 docs/doc_examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc create mode 100644 docs/doc_examples/57c690f8fa95bacf4b250803be7467e4.asciidoc create mode 100644 docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc create mode 100644 docs/doc_examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc create mode 100644 docs/doc_examples/582c4b05401dbc190b19411282d85310.asciidoc create mode 100644 docs/doc_examples/582da02c09e0597b4396c87e33571e7b.asciidoc create mode 100644 docs/doc_examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc create mode 100644 docs/doc_examples/584f502cf840134f2db5f39e2483ced1.asciidoc create mode 100644 docs/doc_examples/585a34ad79aee16678b37da785933ac8.asciidoc create mode 100644 docs/doc_examples/585b19369cb9b9763a7e8d405f009a47.asciidoc create mode 100644 docs/doc_examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc create mode 100644 docs/doc_examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc delete mode 100644 docs/doc_examples/58b5003c0a53a39bf509aa3797aad471.asciidoc create mode 100644 docs/doc_examples/58ca855be30049f8f0879e532db51ee2.asciidoc create mode 100644 docs/doc_examples/58e684e0b771b4646662fe12d3060c05.asciidoc create mode 100644 docs/doc_examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc create mode 100644 docs/doc_examples/591c7fb7451069829a14bba593136f1f.asciidoc create mode 100644 docs/doc_examples/5969c446688c8b326acc80276573e9d2.asciidoc create mode 100644 docs/doc_examples/59726e3c90e1218487a781508788c243.asciidoc create mode 100644 docs/doc_examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc create mode 100644 docs/doc_examples/5987afb2c17c73fe3d860937565ef115.asciidoc create mode 100644 docs/doc_examples/599454613ac699d447537e79e65ae35a.asciidoc create mode 100644 docs/doc_examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc create mode 100644 docs/doc_examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc create mode 100644 docs/doc_examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc create mode 100644 docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc create mode 100644 docs/doc_examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc create mode 100644 docs/doc_examples/5a006feed86309b547bbaa1baca1c496.asciidoc create mode 100644 docs/doc_examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc create mode 100644 docs/doc_examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc create mode 100644 docs/doc_examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc create mode 100644 docs/doc_examples/5a754dcc854b9154296550a0b581cb9d.asciidoc create mode 100644 docs/doc_examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc create mode 100644 docs/doc_examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc create mode 100644 docs/doc_examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc create mode 100644 docs/doc_examples/5afbd9caed88c32f8a2968c07054f096.asciidoc create mode 100644 docs/doc_examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc create mode 100644 docs/doc_examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc create mode 100644 docs/doc_examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc create mode 100644 docs/doc_examples/5b266deba5396c7810af1b8315c23596.asciidoc create mode 100644 docs/doc_examples/5b281956e35a26e734c482b42b356c0d.asciidoc create mode 100644 docs/doc_examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc create mode 100644 docs/doc_examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc create mode 100644 docs/doc_examples/5b58007f10700ec7934580f034404652.asciidoc create mode 100644 docs/doc_examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc create mode 100644 docs/doc_examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc create mode 100644 docs/doc_examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc create mode 100644 docs/doc_examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc create mode 100644 docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc create mode 100644 docs/doc_examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc create mode 100644 docs/doc_examples/5bbccf103107e505c17ae59863753efd.asciidoc create mode 100644 docs/doc_examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc create mode 100644 docs/doc_examples/5c22172a944864a7d138decdc08558b4.asciidoc create mode 100644 docs/doc_examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc create mode 100644 docs/doc_examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc create mode 100644 docs/doc_examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc create mode 100644 docs/doc_examples/5c7ece1f30267adabdb832424871900a.asciidoc create mode 100644 docs/doc_examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc create mode 100644 docs/doc_examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc create mode 100644 docs/doc_examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc create mode 100644 docs/doc_examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc rename docs/doc_examples/{d3016e4e8025362ad9a05ee86bb2061f.asciidoc => 5d03bb385904d20c5323885706738459.asciidoc} (56%) delete mode 100644 docs/doc_examples/5d32279dcd52b22d9e1178a02a3ad957.asciidoc create mode 100644 docs/doc_examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc create mode 100644 docs/doc_examples/5d5b06468c54308f52c212cca5d58fef.asciidoc create mode 100644 docs/doc_examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc create mode 100644 docs/doc_examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc create mode 100644 docs/doc_examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc create mode 100644 docs/doc_examples/5db5349162a4fbe74bffb646926a2495.asciidoc create mode 100644 docs/doc_examples/5dbf06ca9058843f572676fcaf587f75.asciidoc delete mode 100644 docs/doc_examples/5dd695679b5141d9142d3d30ba8d300a.asciidoc create mode 100644 docs/doc_examples/5ddc26da6e163fda54f52d33b5157051.asciidoc rename docs/doc_examples/{311c4b632a29b9ead63b02d01f10096b.asciidoc => 5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc} (64%) create mode 100644 docs/doc_examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc create mode 100644 docs/doc_examples/5dfe24287bb930ad33345caf092a004b.asciidoc create mode 100644 docs/doc_examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc create mode 100644 docs/doc_examples/5e124875d97c27362ae858160ae1c6d5.asciidoc create mode 100644 docs/doc_examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc create mode 100644 docs/doc_examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc create mode 100644 docs/doc_examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc create mode 100644 docs/doc_examples/5e415c490a46358643ee2aab554b4876.asciidoc create mode 100644 docs/doc_examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc create mode 100644 docs/doc_examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc create mode 100644 docs/doc_examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc create mode 100644 docs/doc_examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc rename docs/doc_examples/{46c4b0dfb674825f9579203d41e7f404.asciidoc => 5ea9da129ca70a5fe534f27a82d80b29.asciidoc} (52%) create mode 100644 docs/doc_examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc create mode 100644 docs/doc_examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc delete mode 100644 docs/doc_examples/5f210f74725ea0c9265190346edfa246.asciidoc create mode 100644 docs/doc_examples/5f3373887e8d3dc31239b687a5151449.asciidoc delete mode 100644 docs/doc_examples/5f3a3eefeefe6fa85ec49d499212d245.asciidoc create mode 100644 docs/doc_examples/5f72ab800c3db9d118df95e2a378d411.asciidoc create mode 100644 docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc create mode 100644 docs/doc_examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc create mode 100644 docs/doc_examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc create mode 100644 docs/doc_examples/5f8d90515995a5eee189d722abe3b111.asciidoc create mode 100644 docs/doc_examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc rename docs/doc_examples/{5925c23a173a63bdb30b458248d1df76.asciidoc => 5faa121e00a0582160b2adb2b72fed67.asciidoc} (71%) create mode 100644 docs/doc_examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc create mode 100644 docs/doc_examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc create mode 100644 docs/doc_examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc create mode 100644 docs/doc_examples/600d33c80f8872dda85c87ed41da95fd.asciidoc create mode 100644 docs/doc_examples/6013ed65d2058da5ce704b47a504b60a.asciidoc create mode 100644 docs/doc_examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc create mode 100644 docs/doc_examples/60299454aa19fec15a604a0dd06fe522.asciidoc create mode 100644 docs/doc_examples/602e04051c092cf77de2f75a563661b8.asciidoc create mode 100644 docs/doc_examples/604da59fe41160efa10a846a9dacc07a.asciidoc create mode 100644 docs/doc_examples/6061aadb3b870791278212d1e8f52b39.asciidoc create mode 100644 docs/doc_examples/608cadc6b8a3f194612b69279ccc96de.asciidoc create mode 100644 docs/doc_examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc create mode 100644 docs/doc_examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc create mode 100644 docs/doc_examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc create mode 100644 docs/doc_examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc create mode 100644 docs/doc_examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc delete mode 100644 docs/doc_examples/60ee33f3acfdd0fe6f288ac77312c780.asciidoc rename docs/doc_examples/{1c23507edd7a3c18538b68223378e4ab.asciidoc => 60f889fbed5df3185444f7015b48ed76.asciidoc} (77%) create mode 100644 docs/doc_examples/610f629d0486a64546d62402a0a5e00f.asciidoc create mode 100644 docs/doc_examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc create mode 100644 docs/doc_examples/612c2e975f833de9815651135735eae5.asciidoc delete mode 100644 docs/doc_examples/6138d6919f3cbaaf61e1092f817d295c.asciidoc create mode 100644 docs/doc_examples/618c9d42284c067891fb57034a4fd834.asciidoc delete mode 100644 docs/doc_examples/618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc create mode 100644 docs/doc_examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc rename docs/doc_examples/{b0ec418bf416c62bed602b0a32a6d5f5.asciidoc => 61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc} (68%) create mode 100644 docs/doc_examples/61d6b9503459914c436930c3ae87d454.asciidoc create mode 100644 docs/doc_examples/61e38e95191f4dde791070c6fce8a092.asciidoc create mode 100644 docs/doc_examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc create mode 100644 docs/doc_examples/6220087321e6d288024a70c6b09bd720.asciidoc create mode 100644 docs/doc_examples/6244204213f60edf2f23295f9059f2c9.asciidoc create mode 100644 docs/doc_examples/624e69dedf42c4877234b87ec1d00068.asciidoc delete mode 100644 docs/doc_examples/625dc94df1f9affb49a082fd99d41620.asciidoc delete mode 100644 docs/doc_examples/626f8c4b3e2cd3d9beaa63a7f5799d7a.asciidoc create mode 100644 docs/doc_examples/62c311e7ab4de8b79e532929a5069975.asciidoc create mode 100644 docs/doc_examples/62ccee6ad356428c2d625742f961ceb7.asciidoc create mode 100644 docs/doc_examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc create mode 100644 docs/doc_examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc create mode 100644 docs/doc_examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc create mode 100644 docs/doc_examples/630d127ccedd25a6cff31ea098ac2847.asciidoc create mode 100644 docs/doc_examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc create mode 100644 docs/doc_examples/633c8a9fc57268979d8735c557705809.asciidoc create mode 100644 docs/doc_examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc create mode 100644 docs/doc_examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc create mode 100644 docs/doc_examples/6352e846bb83725ae6d853aa64d8697d.asciidoc create mode 100644 docs/doc_examples/6365312d470426cab1b77e9ffde49170.asciidoc create mode 100644 docs/doc_examples/636ee2066450605247ec1f68d04b8ee4.asciidoc create mode 100644 docs/doc_examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc rename docs/doc_examples/{b9c5d7ca6ca9c6f747201f45337a4abf.asciidoc => 63cc960215ae83b359c12df3c0993bfa.asciidoc} (64%) create mode 100644 docs/doc_examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc create mode 100644 docs/doc_examples/63e20883732ec30b5400046be2efb0f1.asciidoc create mode 100644 docs/doc_examples/63ecdab34940af053acc409164914c32.asciidoc create mode 100644 docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc create mode 100644 docs/doc_examples/640a89d0b39630269433425ff476faf3.asciidoc create mode 100644 docs/doc_examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc create mode 100644 docs/doc_examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc create mode 100644 docs/doc_examples/641009f2147e1ca56215c701f45c970b.asciidoc create mode 100644 docs/doc_examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc create mode 100644 docs/doc_examples/641f75862c70e25e79d249d9e0a79f03.asciidoc create mode 100644 docs/doc_examples/642161d70dacf7d153767d37d3726838.asciidoc create mode 100644 docs/doc_examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc create mode 100644 docs/doc_examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc create mode 100644 docs/doc_examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc delete mode 100644 docs/doc_examples/645136747d37368a14ab34de8bd046c6.asciidoc create mode 100644 docs/doc_examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc delete mode 100644 docs/doc_examples/645796e8047967ca4a7635a22a876f4c.asciidoc delete mode 100644 docs/doc_examples/645c4c6e209719d3a4d25b1a629cb23b.asciidoc create mode 100644 docs/doc_examples/64622409407316d2d47094e692d9b516.asciidoc create mode 100644 docs/doc_examples/646d71869f1a18c5bede7759559bfc47.asciidoc create mode 100644 docs/doc_examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc create mode 100644 docs/doc_examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc create mode 100644 docs/doc_examples/64a79861225553799b26e118d7851dcc.asciidoc create mode 100644 docs/doc_examples/64aff98cf477555e7411714c17006572.asciidoc delete mode 100644 docs/doc_examples/64b9baa6d7556b960b29698f3383aa31.asciidoc create mode 100644 docs/doc_examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc create mode 100644 docs/doc_examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc create mode 100644 docs/doc_examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc create mode 100644 docs/doc_examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc create mode 100644 docs/doc_examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc create mode 100644 docs/doc_examples/650a0fb27c66a790c4687267423af1da.asciidoc create mode 100644 docs/doc_examples/6521c3578dc4ad4a6db697700986e78e.asciidoc create mode 100644 docs/doc_examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc create mode 100644 docs/doc_examples/654882f545eca8d7047695f867c63072.asciidoc create mode 100644 docs/doc_examples/65578c390837cb4c0fcc77fb17857714.asciidoc create mode 100644 docs/doc_examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc create mode 100644 docs/doc_examples/658842bf41e0fcb7969937155946a0ff.asciidoc create mode 100644 docs/doc_examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc create mode 100644 docs/doc_examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc create mode 100644 docs/doc_examples/65e892a362d940e4a74965f21c15ca09.asciidoc create mode 100644 docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc create mode 100644 docs/doc_examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc create mode 100644 docs/doc_examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc create mode 100644 docs/doc_examples/666c420fe61fa122386da3c356a64943.asciidoc create mode 100644 docs/doc_examples/6689aa213884196b47a6f482d4993749.asciidoc create mode 100644 docs/doc_examples/6693f0ffa0de3229b5dedda197810e70.asciidoc create mode 100644 docs/doc_examples/669773766b041be768003055ad523038.asciidoc create mode 100644 docs/doc_examples/6705eca2095ade294548cfb25bf2dd86.asciidoc rename docs/doc_examples/{53d938c754f36a912fcbe6473abb463f.asciidoc => 67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc} (59%) create mode 100644 docs/doc_examples/672d30eb3af573140d966e88b14814f8.asciidoc create mode 100644 docs/doc_examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc create mode 100644 docs/doc_examples/674bb755111c6fbaa4c5ac759395c122.asciidoc create mode 100644 docs/doc_examples/67967388db610dcb9d24fb59ede348d8.asciidoc create mode 100644 docs/doc_examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc create mode 100644 docs/doc_examples/67a490d749a0c3bb16a266663423893d.asciidoc create mode 100644 docs/doc_examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc create mode 100644 docs/doc_examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc create mode 100644 docs/doc_examples/67bab07fda27ef77e3bc948211051a33.asciidoc create mode 100644 docs/doc_examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc delete mode 100644 docs/doc_examples/67ceac4bf2d9ac7cc500390544cdcb41.asciidoc create mode 100644 docs/doc_examples/67ffa135c50c43d6788636c88078c7d1.asciidoc create mode 100644 docs/doc_examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc create mode 100644 docs/doc_examples/6843d859e2965d17cad4f033c81db83f.asciidoc create mode 100644 docs/doc_examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc create mode 100644 docs/doc_examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc create mode 100644 docs/doc_examples/686bc640b877de845c46bef372a9866c.asciidoc create mode 100644 docs/doc_examples/6884454f57c3a41059037ea762f48d77.asciidoc create mode 100644 docs/doc_examples/68a891f609ca3a379d2d64e4914f3067.asciidoc create mode 100644 docs/doc_examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc create mode 100644 docs/doc_examples/68cb8a452e780ca78b0cb761be3629af.asciidoc create mode 100644 docs/doc_examples/691fe20d467324ed43a36fd15852c492.asciidoc create mode 100644 docs/doc_examples/692606cc6d6462becc321d92961a3bac.asciidoc create mode 100644 docs/doc_examples/69582847099ee62ed34feddfaba83ef6.asciidoc create mode 100644 docs/doc_examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc create mode 100644 docs/doc_examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc delete mode 100644 docs/doc_examples/69a7be47f85138b10437113ab2f0d72d.asciidoc create mode 100644 docs/doc_examples/69ab708fe65a75f870223d2289c3d171.asciidoc rename docs/doc_examples/{54a770f053f3225ea0d1e34334232411.asciidoc => 69c07cfdf8054c301cd6186c5d71aa02.asciidoc} (72%) create mode 100644 docs/doc_examples/69d5710bdec73041c66f21d5f96637e8.asciidoc create mode 100644 docs/doc_examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc create mode 100644 docs/doc_examples/69daf5ec2a9bc07096e1833286c36076.asciidoc create mode 100644 docs/doc_examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc create mode 100644 docs/doc_examples/6a350a17701e8c8158407191f2718b66.asciidoc create mode 100644 docs/doc_examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc create mode 100644 docs/doc_examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc create mode 100644 docs/doc_examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc delete mode 100644 docs/doc_examples/6a4679531e64c492fce16dc12de6dcb0.asciidoc create mode 100644 docs/doc_examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc create mode 100644 docs/doc_examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc delete mode 100644 docs/doc_examples/6a81d00f0d73bc5985e76b3cadab645e.asciidoc create mode 100644 docs/doc_examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc create mode 100644 docs/doc_examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc create mode 100644 docs/doc_examples/6aa2941855d13f365f70aa8767ecb137.asciidoc create mode 100644 docs/doc_examples/6aca241c0361d26f134712821e2d09a9.asciidoc create mode 100644 docs/doc_examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc create mode 100644 docs/doc_examples/6b0288acb739c4667d41339e5100c327.asciidoc create mode 100644 docs/doc_examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc create mode 100644 docs/doc_examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc create mode 100644 docs/doc_examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc create mode 100644 docs/doc_examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc create mode 100644 docs/doc_examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc create mode 100644 docs/doc_examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc create mode 100644 docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc create mode 100644 docs/doc_examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc create mode 100644 docs/doc_examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc create mode 100644 docs/doc_examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc delete mode 100644 docs/doc_examples/6be70810d6ebd6f09d8a49f9df847765.asciidoc delete mode 100644 docs/doc_examples/6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc create mode 100644 docs/doc_examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc create mode 100644 docs/doc_examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc create mode 100644 docs/doc_examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc create mode 100644 docs/doc_examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc create mode 100644 docs/doc_examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc create mode 100644 docs/doc_examples/6c72460570307f23478100db04a84c8e.asciidoc create mode 100644 docs/doc_examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc create mode 100644 docs/doc_examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc create mode 100644 docs/doc_examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc create mode 100644 docs/doc_examples/6cd083045bf06e80b83889a939a18451.asciidoc create mode 100644 docs/doc_examples/6ce8334def48552ba7d44025580d9105.asciidoc create mode 100644 docs/doc_examples/6cf3307c00f464c46475e352e067d714.asciidoc create mode 100644 docs/doc_examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc create mode 100644 docs/doc_examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc create mode 100644 docs/doc_examples/6db118771354792646229e7a3c30c7e9.asciidoc create mode 100644 docs/doc_examples/6dbfe5565a95508e65d304131847f9fc.asciidoc create mode 100644 docs/doc_examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc create mode 100644 docs/doc_examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc create mode 100644 docs/doc_examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc create mode 100644 docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc create mode 100644 docs/doc_examples/6e000496a1fa8b57148518eaad692f35.asciidoc create mode 100644 docs/doc_examples/6e0b675eff7ed73c09a76a415930a486.asciidoc create mode 100644 docs/doc_examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc create mode 100644 docs/doc_examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc create mode 100644 docs/doc_examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc create mode 100644 docs/doc_examples/6ea062455229151e311869a81ee40252.asciidoc create mode 100644 docs/doc_examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc create mode 100644 docs/doc_examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc create mode 100644 docs/doc_examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc create mode 100644 docs/doc_examples/6f07152055e99416deb10e95b428b847.asciidoc delete mode 100644 docs/doc_examples/6f21a878fee3b43c5332b81aaddbeac7.asciidoc create mode 100644 docs/doc_examples/6f34e27481460a95e59ffbacb76bd637.asciidoc create mode 100644 docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc create mode 100644 docs/doc_examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc create mode 100644 docs/doc_examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc create mode 100644 docs/doc_examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc create mode 100644 docs/doc_examples/6f6d5a4a90e1265822628d4ced963639.asciidoc create mode 100644 docs/doc_examples/6f842819c50e8490080dd085e0c6aca3.asciidoc create mode 100644 docs/doc_examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc create mode 100644 docs/doc_examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc create mode 100644 docs/doc_examples/6fa570ae7039171e2ab722344ec1063f.asciidoc delete mode 100644 docs/doc_examples/6faf10a73f7d5fffbcb037bdb2cbaff8.asciidoc create mode 100644 docs/doc_examples/6fbb88f399618e1b47412082062ce2bd.asciidoc create mode 100644 docs/doc_examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc create mode 100644 docs/doc_examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc create mode 100644 docs/doc_examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc create mode 100644 docs/doc_examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc create mode 100644 docs/doc_examples/6febf0e6883b23b15ac213abc4bac326.asciidoc create mode 100644 docs/doc_examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc create mode 100644 docs/doc_examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc create mode 100644 docs/doc_examples/7021ddb273a3a00847324d2f670c4c04.asciidoc create mode 100644 docs/doc_examples/7067a498bb6c788854a26443a64b843a.asciidoc create mode 100644 docs/doc_examples/708e7ec681be41791f232817a07cda82.asciidoc create mode 100644 docs/doc_examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc create mode 100644 docs/doc_examples/70c736ecb3746dbe839af0e468712805.asciidoc create mode 100644 docs/doc_examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc delete mode 100644 docs/doc_examples/70f0aa5853697e265ef3b1df72940951.asciidoc create mode 100644 docs/doc_examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc create mode 100644 docs/doc_examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc delete mode 100644 docs/doc_examples/710c7871f20f176d51209b1574b0d61b.asciidoc create mode 100644 docs/doc_examples/711443504b69d0d296e717c716a223e2.asciidoc create mode 100644 docs/doc_examples/7148c8512079d378af70302e65502dd2.asciidoc create mode 100644 docs/doc_examples/719141517d83b7e8e929b347a8d67c9f.asciidoc delete mode 100644 docs/doc_examples/71b5b2ba9557d0f296ff2de91727d2f6.asciidoc create mode 100644 docs/doc_examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc create mode 100644 docs/doc_examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc create mode 100644 docs/doc_examples/71e47a83f632ef159956287bbfe4ca12.asciidoc create mode 100644 docs/doc_examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc create mode 100644 docs/doc_examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc delete mode 100644 docs/doc_examples/72231b7debac60c95b9869a97dafda3a.asciidoc create mode 100644 docs/doc_examples/726994d8f3793b86628255a797155a52.asciidoc create mode 100644 docs/doc_examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc create mode 100644 docs/doc_examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc create mode 100644 docs/doc_examples/72b999120785dfba2827268482e9be0a.asciidoc create mode 100644 docs/doc_examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc rename docs/doc_examples/{98f14fddddea54a7d6149ab7b92e099d.asciidoc => 72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc} (80%) create mode 100644 docs/doc_examples/73250f845738c428246a3ade66a8f54c.asciidoc delete mode 100644 docs/doc_examples/734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc create mode 100644 docs/doc_examples/734e2b1d1ca84a305240a449738f0eba.asciidoc create mode 100644 docs/doc_examples/73646c12ad33a813ab2280f1dc83500e.asciidoc create mode 100644 docs/doc_examples/738db420e3ad2a127ea75fb8e5051926.asciidoc create mode 100644 docs/doc_examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc create mode 100644 docs/doc_examples/73be1f93d789264e5b972ddb5991bc66.asciidoc create mode 100644 docs/doc_examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc create mode 100644 docs/doc_examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc delete mode 100644 docs/doc_examples/73e5c88ad1488b213fb278ee1cb42289.asciidoc create mode 100644 docs/doc_examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc create mode 100644 docs/doc_examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc create mode 100644 docs/doc_examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc create mode 100644 docs/doc_examples/7404c6e809fee5d7eb9678a82a872806.asciidoc create mode 100644 docs/doc_examples/741180473ba526219578ad0422f4fe81.asciidoc create mode 100644 docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc create mode 100644 docs/doc_examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc create mode 100644 docs/doc_examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc delete mode 100644 docs/doc_examples/745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc create mode 100644 docs/doc_examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc create mode 100644 docs/doc_examples/746e0a1cb5984f2672963b363505c7b3.asciidoc create mode 100644 docs/doc_examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc create mode 100644 docs/doc_examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc delete mode 100644 docs/doc_examples/7477671958734843dd67cf0b8e6c7515.asciidoc create mode 100644 docs/doc_examples/747a4b5001423938d7d05399d28f1995.asciidoc rename docs/doc_examples/{98234499cfec70487cec5d013e976a84.asciidoc => 74a80c28737a0648db0dfe7f049d12f2.asciidoc} (74%) create mode 100644 docs/doc_examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc create mode 100644 docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc create mode 100644 docs/doc_examples/74da377bccad43da2b0e276c086d26ba.asciidoc delete mode 100644 docs/doc_examples/75330ec1305d2beb0e2f34d2195464e2.asciidoc create mode 100644 docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc create mode 100644 docs/doc_examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc create mode 100644 docs/doc_examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc create mode 100644 docs/doc_examples/75c347b181112d2c4538c01ade903afe.asciidoc create mode 100644 docs/doc_examples/75e13a00f0909c955031ff62acc14a79.asciidoc create mode 100644 docs/doc_examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc create mode 100644 docs/doc_examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc create mode 100644 docs/doc_examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc create mode 100644 docs/doc_examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc delete mode 100644 docs/doc_examples/764f9884b370cbdc82a1c5c42ed40ff3.asciidoc create mode 100644 docs/doc_examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc create mode 100644 docs/doc_examples/765c9c8b40b67a42121648045dbf10fb.asciidoc rename docs/doc_examples/{0be2c28ee65384774b1e479b47dc3d92.asciidoc => 766cfc1c9fcd2c186e965761ceb2c07d.asciidoc} (65%) create mode 100644 docs/doc_examples/769f75829a8e6670aa4cf83d0d737046.asciidoc create mode 100644 docs/doc_examples/76b279835936ee4b546a171c671c3cd7.asciidoc create mode 100644 docs/doc_examples/76bc87c2592864152768687c2963d1d1.asciidoc create mode 100644 docs/doc_examples/76c167d8ab305cb43b594f140c902dfe.asciidoc create mode 100644 docs/doc_examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc create mode 100644 docs/doc_examples/7709a48020a6cefbbe547fb944541cdb.asciidoc create mode 100644 docs/doc_examples/77113c65e1755313183a8969233a5a07.asciidoc delete mode 100644 docs/doc_examples/77243bbf92f2a55e0fca6c2a349a1c15.asciidoc create mode 100644 docs/doc_examples/7741a04e7e621c528cd72848d875776d.asciidoc create mode 100644 docs/doc_examples/77447e2966708e92f5e219d43ac3f00d.asciidoc create mode 100644 docs/doc_examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc delete mode 100644 docs/doc_examples/774d715155cd13713e6e327adf6ce328.asciidoc create mode 100644 docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc create mode 100644 docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc create mode 100644 docs/doc_examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc create mode 100644 docs/doc_examples/7777326c6052fee28061e5b82540aedc.asciidoc create mode 100644 docs/doc_examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc create mode 100644 docs/doc_examples/77828fcaecc3f058c48b955928198ff6.asciidoc create mode 100644 docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc create mode 100644 docs/doc_examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc create mode 100644 docs/doc_examples/77c50f982906718ecc59aa708aed728f.asciidoc create mode 100644 docs/doc_examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc create mode 100644 docs/doc_examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc create mode 100644 docs/doc_examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc create mode 100644 docs/doc_examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc create mode 100644 docs/doc_examples/783c4fa5351a242364210fc32496beb2.asciidoc rename docs/doc_examples/{c22b72c4a52ee098331b3f252c22860d.asciidoc => 7841b65a3bb880ed66cec453925a50cf.asciidoc} (64%) create mode 100644 docs/doc_examples/7846974b47a3eab1832a475663d23ad9.asciidoc create mode 100644 docs/doc_examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc create mode 100644 docs/doc_examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc create mode 100644 docs/doc_examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc create mode 100644 docs/doc_examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc create mode 100644 docs/doc_examples/794d9a321b944347d2a8834a07b5eb22.asciidoc create mode 100644 docs/doc_examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc create mode 100644 docs/doc_examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc create mode 100644 docs/doc_examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc create mode 100644 docs/doc_examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc create mode 100644 docs/doc_examples/79e053326a3a8eec828523a035393f66.asciidoc create mode 100644 docs/doc_examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc create mode 100644 docs/doc_examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc create mode 100644 docs/doc_examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc create mode 100644 docs/doc_examples/7a0c633a67244e9703344d036e584d95.asciidoc create mode 100644 docs/doc_examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc create mode 100644 docs/doc_examples/7a23a385a63c87cab58fd494870450fd.asciidoc create mode 100644 docs/doc_examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc create mode 100644 docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc create mode 100644 docs/doc_examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc create mode 100644 docs/doc_examples/7a8de5606f283f4ef171b015eef6befa.asciidoc create mode 100644 docs/doc_examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc create mode 100644 docs/doc_examples/7ab968a61bb0783f563dd6d29b253901.asciidoc rename docs/doc_examples/{cde4dddae5c06e7f1d38c9d933dbc7ac.asciidoc => 7ae434b3667c589a8e70fe560f4ee3f9.asciidoc} (70%) create mode 100644 docs/doc_examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc create mode 100644 docs/doc_examples/7b3f255d28ce5b46d111402b96b41351.asciidoc rename docs/doc_examples/{015294a400986295039e52ebc62033be.asciidoc => 7b5c231526846f2f7b98d78f3656ae6a.asciidoc} (58%) create mode 100644 docs/doc_examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc create mode 100644 docs/doc_examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc create mode 100644 docs/doc_examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc create mode 100644 docs/doc_examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc create mode 100644 docs/doc_examples/7c3414279d47e9c29105d061ed316ef8.asciidoc create mode 100644 docs/doc_examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc create mode 100644 docs/doc_examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc create mode 100644 docs/doc_examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc create mode 100644 docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc create mode 100644 docs/doc_examples/7c9076f3e93a8f61189783c736bf6082.asciidoc create mode 100644 docs/doc_examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc create mode 100644 docs/doc_examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc rename docs/doc_examples/{dfac8d098b50aa0181161bcd17b38ef4.asciidoc => 7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc} (65%) delete mode 100644 docs/doc_examples/7cf71671859be7c1ecf673396db377cd.asciidoc create mode 100644 docs/doc_examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc create mode 100644 docs/doc_examples/7d880157a95f64ad339225d4af71c2de.asciidoc create mode 100644 docs/doc_examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc create mode 100644 docs/doc_examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc create mode 100644 docs/doc_examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc create mode 100644 docs/doc_examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc create mode 100644 docs/doc_examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc create mode 100644 docs/doc_examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc create mode 100644 docs/doc_examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc create mode 100644 docs/doc_examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc create mode 100644 docs/doc_examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc create mode 100644 docs/doc_examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc create mode 100644 docs/doc_examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc create mode 100644 docs/doc_examples/7e2b9bf4ab353c377b761101775edf93.asciidoc rename docs/doc_examples/{0cc991e3f7f8511a34730e154b3c5edc.asciidoc => 7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc} (58%) create mode 100644 docs/doc_examples/7e48648ca27024831c60b455e836c496.asciidoc create mode 100644 docs/doc_examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc create mode 100644 docs/doc_examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc create mode 100644 docs/doc_examples/7e5bee18e61d950e823782da1b733903.asciidoc create mode 100644 docs/doc_examples/7e5faa551f2c95ffd627da352563d450.asciidoc create mode 100644 docs/doc_examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc create mode 100644 docs/doc_examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc create mode 100644 docs/doc_examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc create mode 100644 docs/doc_examples/7ebfb30b3ece855c1b783d9210939469.asciidoc create mode 100644 docs/doc_examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc delete mode 100644 docs/doc_examples/7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc create mode 100644 docs/doc_examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc delete mode 100644 docs/doc_examples/7f465b7e8ed42df6c42251b4481e699e.asciidoc create mode 100644 docs/doc_examples/7f514e9e785e4323d16396359cb184f2.asciidoc delete mode 100644 docs/doc_examples/7f697eb436dfa3c30dfe610d8c32d132.asciidoc create mode 100644 docs/doc_examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc create mode 100644 docs/doc_examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc create mode 100644 docs/doc_examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc create mode 100644 docs/doc_examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc create mode 100644 docs/doc_examples/7fd5883564d183603e60b37d286ac7e2.asciidoc create mode 100644 docs/doc_examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc create mode 100644 docs/doc_examples/7fe2179705304af5e87eb382dca6235a.asciidoc create mode 100644 docs/doc_examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc rename docs/doc_examples/{8baccd8688a6bad1749b8935f9601ea4.asciidoc => 7fef68840761c6982c14ad7af96caf37.asciidoc} (51%) create mode 100644 docs/doc_examples/7ff4124df0541ee2496034004f4146d4.asciidoc create mode 100644 docs/doc_examples/800861c15bb33ca01a46fb97dde7537a.asciidoc create mode 100644 docs/doc_examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc delete mode 100644 docs/doc_examples/804a97ff4d0613e6568e4efb19c52021.asciidoc create mode 100644 docs/doc_examples/804cdf477ec829740e3d045140400c3b.asciidoc create mode 100644 docs/doc_examples/8051766cadded0892290bc2cc06e145c.asciidoc create mode 100644 docs/doc_examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc create mode 100644 docs/doc_examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc create mode 100644 docs/doc_examples/808f4db1e2361be77dd6816c1f818139.asciidoc create mode 100644 docs/doc_examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc create mode 100644 docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc create mode 100644 docs/doc_examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc create mode 100644 docs/doc_examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc create mode 100644 docs/doc_examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc create mode 100644 docs/doc_examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc create mode 100644 docs/doc_examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc create mode 100644 docs/doc_examples/81612c2537386e031b7eb604f6756a71.asciidoc create mode 100644 docs/doc_examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc create mode 100644 docs/doc_examples/819e00cc6547d925d80090b94e0650d7.asciidoc create mode 100644 docs/doc_examples/81c7a392efd505b686eed978fb7d9d17.asciidoc delete mode 100644 docs/doc_examples/81c9aa2678d6166a9662ddf2c011a6a5.asciidoc create mode 100644 docs/doc_examples/81ee2ad368208c4c78098292547b0577.asciidoc create mode 100644 docs/doc_examples/81ef5774355180fc44d2a52b5182d24a.asciidoc create mode 100644 docs/doc_examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc create mode 100644 docs/doc_examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc create mode 100644 docs/doc_examples/820f689eaaef15fc07abd1073fa880f8.asciidoc create mode 100644 docs/doc_examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc create mode 100644 docs/doc_examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc create mode 100644 docs/doc_examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc create mode 100644 docs/doc_examples/827b7e9308ea288f18aea00a5accc38e.asciidoc create mode 100644 docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc create mode 100644 docs/doc_examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc create mode 100644 docs/doc_examples/82d6de3081de7b0664f44adf2942675a.asciidoc create mode 100644 docs/doc_examples/82e94b6cdf65e324575f916b3776b779.asciidoc create mode 100644 docs/doc_examples/83062a543163370328cf2e21a68c1bd3.asciidoc create mode 100644 docs/doc_examples/831f65d700577e11112c711236110f61.asciidoc create mode 100644 docs/doc_examples/8330b2ea6317769e52d0647ba434b354.asciidoc create mode 100644 docs/doc_examples/8345d2615f43a934fe1871a5120eca1d.asciidoc create mode 100644 docs/doc_examples/834764b2fba6cbb41eaabd740be75656.asciidoc create mode 100644 docs/doc_examples/8357aa6099089940589ae3e97e7bcffa.asciidoc rename docs/doc_examples/{189a921df2f5b1fe580937210ce9c1c2.asciidoc => 83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc} (61%) create mode 100644 docs/doc_examples/838a4eabebba4c06100fb37dc30c7722.asciidoc create mode 100644 docs/doc_examples/839710129a165cf93c6e329abedf9089.asciidoc create mode 100644 docs/doc_examples/839a4b2930856790e34cc9dfeb983284.asciidoc create mode 100644 docs/doc_examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc create mode 100644 docs/doc_examples/83cd4eb89818b4c32f654d370eafa920.asciidoc create mode 100644 docs/doc_examples/83d712b9ffb2e703212b762eba3c521a.asciidoc create mode 100644 docs/doc_examples/83d8c920460a12f87b9d5bf65515c367.asciidoc create mode 100644 docs/doc_examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc create mode 100644 docs/doc_examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc delete mode 100644 docs/doc_examples/83f95657beca9bf5d8264c80c7fb463f.asciidoc create mode 100644 docs/doc_examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc create mode 100644 docs/doc_examples/84108653e9e03b4edacd878ec870df77.asciidoc create mode 100644 docs/doc_examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc create mode 100644 docs/doc_examples/841d8b766902c8e3ae85c228a31383ac.asciidoc create mode 100644 docs/doc_examples/84243213614fe64930b1d430704afb29.asciidoc rename docs/doc_examples/{3653567181f43a5f64c74f934aa821c2.asciidoc => 84465de841fe5c6099a0382f786f2cb8.asciidoc} (55%) create mode 100644 docs/doc_examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc create mode 100644 docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc create mode 100644 docs/doc_examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc create mode 100644 docs/doc_examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc create mode 100644 docs/doc_examples/84c61160ca815e29e9973ba1380219dd.asciidoc create mode 100644 docs/doc_examples/84c69fb07050f0e89720007a6507a221.asciidoc delete mode 100644 docs/doc_examples/84d6a777a51963629272b1be5698b091.asciidoc create mode 100644 docs/doc_examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc create mode 100644 docs/doc_examples/84edb44c5b74426f448b2baa101092d6.asciidoc create mode 100644 docs/doc_examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc create mode 100644 docs/doc_examples/84f3e8524f6ff80e870c03ab71551538.asciidoc create mode 100644 docs/doc_examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc create mode 100644 docs/doc_examples/851f9754dbefc099c54c5423ca4565c0.asciidoc create mode 100644 docs/doc_examples/852b394d78b8c79ee0055b5501981a4b.asciidoc create mode 100644 docs/doc_examples/85479e02af00681210e17e3d0ff51e21.asciidoc create mode 100644 docs/doc_examples/85519a614ae18c998986d46bbad82b76.asciidoc create mode 100644 docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc create mode 100644 docs/doc_examples/856c10ad554c26b70f1121454caff40a.asciidoc create mode 100644 docs/doc_examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc create mode 100644 docs/doc_examples/858fde15fb0a0340873b123043f8c3b4.asciidoc create mode 100644 docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc create mode 100644 docs/doc_examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc create mode 100644 docs/doc_examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc create mode 100644 docs/doc_examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc create mode 100644 docs/doc_examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc create mode 100644 docs/doc_examples/85f2839beeb71edb66988e5c82188be0.asciidoc create mode 100644 docs/doc_examples/85f6667f148d16d075493fddf07e2932.asciidoc create mode 100644 docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc create mode 100644 docs/doc_examples/861f5f61409dc87f3671293b87839ff7.asciidoc create mode 100644 docs/doc_examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc create mode 100644 docs/doc_examples/862907653d1c18d2e80eff7f421200e2.asciidoc create mode 100644 docs/doc_examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc create mode 100644 docs/doc_examples/8634c9993485d622fb12d24f4f242264.asciidoc create mode 100644 docs/doc_examples/867f7d43a78066731ead2e223960fc07.asciidoc create mode 100644 docs/doc_examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc create mode 100644 docs/doc_examples/86926bcebf213ac182d4373027554858.asciidoc create mode 100644 docs/doc_examples/8696ba08ca6cc4992110c331732e5f47.asciidoc create mode 100644 docs/doc_examples/8699d35269a47ba867fa8cc766287413.asciidoc create mode 100644 docs/doc_examples/86c5594c4ec551391096c1abcd652b50.asciidoc create mode 100644 docs/doc_examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc create mode 100644 docs/doc_examples/871154d08efd7251cf3272e758f06acf.asciidoc create mode 100644 docs/doc_examples/8731188553e14134b0a533010318f91a.asciidoc create mode 100644 docs/doc_examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc create mode 100644 docs/doc_examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc create mode 100644 docs/doc_examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc create mode 100644 docs/doc_examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc create mode 100644 docs/doc_examples/87457bb3467484bec3e9df4e25942ba6.asciidoc create mode 100644 docs/doc_examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc create mode 100644 docs/doc_examples/87733deeea4b441b595d19a0f97346f0.asciidoc create mode 100644 docs/doc_examples/877ea90c663b5df9efe95717646a666f.asciidoc create mode 100644 docs/doc_examples/87846c3ddacab1da4af626ae8099e4be.asciidoc create mode 100644 docs/doc_examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc create mode 100644 docs/doc_examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc create mode 100644 docs/doc_examples/87c42ef733a50954e4d757fc0a08decc.asciidoc create mode 100644 docs/doc_examples/87d970b4944b6d742c484d7184996c8a.asciidoc create mode 100644 docs/doc_examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc create mode 100644 docs/doc_examples/88195d87a350e7fff200131f410c3e88.asciidoc create mode 100644 docs/doc_examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc create mode 100644 docs/doc_examples/88554b79dba8fd79991855a692b69ff9.asciidoc delete mode 100644 docs/doc_examples/8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc create mode 100644 docs/doc_examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc create mode 100644 docs/doc_examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc create mode 100644 docs/doc_examples/88b19973b970adf9b73fca82017d4951.asciidoc create mode 100644 docs/doc_examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc create mode 100644 docs/doc_examples/88cf60d3310a56d8ae12704abc05b565.asciidoc create mode 100644 docs/doc_examples/88ec7fa6768a7e13cd2158667a69e97f.asciidoc create mode 100644 docs/doc_examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc create mode 100644 docs/doc_examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc delete mode 100644 docs/doc_examples/899eef71a67a1b2aa11a2166ec7f48f1.asciidoc create mode 100644 docs/doc_examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc create mode 100644 docs/doc_examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc create mode 100644 docs/doc_examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc create mode 100644 docs/doc_examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc create mode 100644 docs/doc_examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc create mode 100644 docs/doc_examples/89dee10a24ea2727af5b00039a4271bd.asciidoc create mode 100644 docs/doc_examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc create mode 100644 docs/doc_examples/8a12cd824404d74f098d854716a26899.asciidoc create mode 100644 docs/doc_examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc create mode 100644 docs/doc_examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc delete mode 100644 docs/doc_examples/8a355eb25d2a01ba62dc1a22dd46f46f.asciidoc create mode 100644 docs/doc_examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc create mode 100644 docs/doc_examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc create mode 100644 docs/doc_examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc create mode 100644 docs/doc_examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc create mode 100644 docs/doc_examples/8ab11a25e017124a70484781ca11fb52.asciidoc delete mode 100644 docs/doc_examples/8acc1d67b152e7027e0f0e1a8b4b2431.asciidoc create mode 100644 docs/doc_examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc create mode 100644 docs/doc_examples/8b301122cbf42be6eafeda714a36559e.asciidoc create mode 100644 docs/doc_examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc create mode 100644 docs/doc_examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc create mode 100644 docs/doc_examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc create mode 100644 docs/doc_examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc create mode 100644 docs/doc_examples/8b7956a2b88fd798a895d3466d671b58.asciidoc create mode 100644 docs/doc_examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc create mode 100644 docs/doc_examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc create mode 100644 docs/doc_examples/8c2060b0272556457f4871c5d7a589fd.asciidoc create mode 100644 docs/doc_examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc create mode 100644 docs/doc_examples/8c619666488927dac6ecb7dcebca44c2.asciidoc create mode 100644 docs/doc_examples/8c693e057f6e85fbf2b56ca442719362.asciidoc create mode 100644 docs/doc_examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc create mode 100644 docs/doc_examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc create mode 100644 docs/doc_examples/8c9081dc738d1290fd76071b283fcaec.asciidoc create mode 100644 docs/doc_examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc create mode 100644 docs/doc_examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc create mode 100644 docs/doc_examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc create mode 100644 docs/doc_examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc create mode 100644 docs/doc_examples/8d421c5bec38eecce4679b219cacc9db.asciidoc create mode 100644 docs/doc_examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc create mode 100644 docs/doc_examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc create mode 100644 docs/doc_examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc create mode 100644 docs/doc_examples/8d7193902a353872740a3324c60c5001.asciidoc delete mode 100644 docs/doc_examples/8d9a63d7c31f08bd27d92ece3de1649c.asciidoc create mode 100644 docs/doc_examples/8d9b04f2a97f4229dec9e620126de049.asciidoc create mode 100644 docs/doc_examples/8db799543eb084ec71547980863d60b9.asciidoc delete mode 100644 docs/doc_examples/8de3206f80e18185a5ad6481f4c2ee07.asciidoc create mode 100644 docs/doc_examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc create mode 100644 docs/doc_examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc create mode 100644 docs/doc_examples/8e0f43829df9af20547ea6896f4c0124.asciidoc create mode 100644 docs/doc_examples/8e208098a0156c4c92afe0a06960b230.asciidoc create mode 100644 docs/doc_examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc create mode 100644 docs/doc_examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc create mode 100644 docs/doc_examples/8e42a17edace2bc6e42c6a1532779937.asciidoc create mode 100644 docs/doc_examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc create mode 100644 docs/doc_examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc create mode 100644 docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc create mode 100644 docs/doc_examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc create mode 100644 docs/doc_examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc create mode 100644 docs/doc_examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc create mode 100644 docs/doc_examples/8eac28d2e9b6482b413d61817456a14f.asciidoc delete mode 100644 docs/doc_examples/8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc create mode 100644 docs/doc_examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc create mode 100644 docs/doc_examples/8ed31628081db2b6e9106d61d1e142be.asciidoc create mode 100644 docs/doc_examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc create mode 100644 docs/doc_examples/8ee9521f57661a050efb614f02b4a090.asciidoc delete mode 100644 docs/doc_examples/8f0511f8a5cb176ff2afdd4311799a33.asciidoc create mode 100644 docs/doc_examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc create mode 100644 docs/doc_examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc create mode 100644 docs/doc_examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc create mode 100644 docs/doc_examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc create mode 100644 docs/doc_examples/8f7936f219500305e5b2518dbbf949ea.asciidoc create mode 100644 docs/doc_examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc create mode 100644 docs/doc_examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc delete mode 100644 docs/doc_examples/8fdf2344c4fb3de6902ad7c5735270df.asciidoc create mode 100644 docs/doc_examples/8fe128323a944765f525c76d85af7a2f.asciidoc create mode 100644 docs/doc_examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc create mode 100644 docs/doc_examples/90083d93e46fad2524755b8d4d1306fc.asciidoc create mode 100644 docs/doc_examples/901d66919e584515717bf78ab5ca2cbb.asciidoc create mode 100644 docs/doc_examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc create mode 100644 docs/doc_examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc create mode 100644 docs/doc_examples/90631797c7fbda43902abf2cc0ea8304.asciidoc create mode 100644 docs/doc_examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc create mode 100644 docs/doc_examples/909a032a9c1f7095b798444705b09ad6.asciidoc create mode 100644 docs/doc_examples/90c087560ea6c0b7405f710971c86ef0.asciidoc create mode 100644 docs/doc_examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc create mode 100644 docs/doc_examples/90f1f5304922fb6d097846dd1444c075.asciidoc create mode 100644 docs/doc_examples/9116ee8a5b00cc877291ed5559563f24.asciidoc create mode 100644 docs/doc_examples/911c56114e50ce7440eb83efc91d28b8.asciidoc create mode 100644 docs/doc_examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc create mode 100644 docs/doc_examples/91270cef57ac455547ffd47839420887.asciidoc create mode 100644 docs/doc_examples/9129dec88d35571b3166c6677297f03b.asciidoc delete mode 100644 docs/doc_examples/913770050ebbf3b9b549a899bc11060a.asciidoc create mode 100644 docs/doc_examples/9138550002cb26ab64918cce427963b8.asciidoc create mode 100644 docs/doc_examples/913c163c197802078a8af72150178061.asciidoc create mode 100644 docs/doc_examples/9143be4f137574271953a7a8107e175b.asciidoc create mode 100644 docs/doc_examples/91750571c195718f0ff246e058e4bc63.asciidoc create mode 100644 docs/doc_examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc create mode 100644 docs/doc_examples/91c925fc71abe0ddfe52457e9130363b.asciidoc create mode 100644 docs/doc_examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc create mode 100644 docs/doc_examples/91ed08faaed54cb5ace9a295af937439.asciidoc create mode 100644 docs/doc_examples/9200ed8d5f798a158def4c526e41269e.asciidoc create mode 100644 docs/doc_examples/92035a2a62d01a511662af65606d5fc6.asciidoc create mode 100644 docs/doc_examples/9216e8e544e6d193eda1f59e9160a225.asciidoc create mode 100644 docs/doc_examples/922529276f87cb9d116be2468d108466.asciidoc create mode 100644 docs/doc_examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc create mode 100644 docs/doc_examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc create mode 100644 docs/doc_examples/923aee95078219ee6eb321a252e1121b.asciidoc create mode 100644 docs/doc_examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc create mode 100644 docs/doc_examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc create mode 100644 docs/doc_examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc create mode 100644 docs/doc_examples/9298aaf8232a819e79b3bf8471245e98.asciidoc create mode 100644 docs/doc_examples/92d0c12d53a900308150d572c3f2f82f.asciidoc create mode 100644 docs/doc_examples/92d343eb755971c44a939d0660bf5ac2.asciidoc create mode 100644 docs/doc_examples/92f073762634a4b2274f71002494192e.asciidoc create mode 100644 docs/doc_examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc create mode 100644 docs/doc_examples/92fe53019958ba466d1272da0834cf53.asciidoc create mode 100644 docs/doc_examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc create mode 100644 docs/doc_examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc create mode 100644 docs/doc_examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc create mode 100644 docs/doc_examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc create mode 100644 docs/doc_examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc create mode 100644 docs/doc_examples/93444b445446c1a6033347d6267253d6.asciidoc create mode 100644 docs/doc_examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc create mode 100644 docs/doc_examples/934ced0998552cc95a28e48554147e8b.asciidoc create mode 100644 docs/doc_examples/935566d5426d44ade486a49ec5289741.asciidoc create mode 100644 docs/doc_examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc create mode 100644 docs/doc_examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc create mode 100644 docs/doc_examples/937089157fc82cf08b68a954d0e6d52c.asciidoc create mode 100644 docs/doc_examples/9370e4935ab6678571d3227973b8c830.asciidoc create mode 100644 docs/doc_examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc create mode 100644 docs/doc_examples/9382f022086c692ba05efb0acae65946.asciidoc create mode 100644 docs/doc_examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc create mode 100644 docs/doc_examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc create mode 100644 docs/doc_examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc create mode 100644 docs/doc_examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc delete mode 100644 docs/doc_examples/93f1bdd72e79827dcf9a34efa02fd977.asciidoc create mode 100644 docs/doc_examples/93fb59d3204f37af952198b331fb6bb7.asciidoc create mode 100644 docs/doc_examples/9403764e6eccad7b321b65e9a10c5727.asciidoc create mode 100644 docs/doc_examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc create mode 100644 docs/doc_examples/9410af79177dd1df9b7b16229a581e18.asciidoc create mode 100644 docs/doc_examples/941c8d05486200e835d97642e4ee05d5.asciidoc create mode 100644 docs/doc_examples/94246f45025ed394cd6415ed8d7a0588.asciidoc create mode 100644 docs/doc_examples/944806221eb89f5af2298ccdf2902277.asciidoc create mode 100644 docs/doc_examples/944a2dc22dae2a8503299926326a9c18.asciidoc rename docs/doc_examples/{d8b115341da772a628a024e7d1644e73.asciidoc => 946522c26d02bebf5c527ba28e55c724.asciidoc} (73%) create mode 100644 docs/doc_examples/9467e52087a13b63b02d78c35ff6f798.asciidoc create mode 100644 docs/doc_examples/94cd66bf93f99881c1bda547283a0357.asciidoc create mode 100644 docs/doc_examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc create mode 100644 docs/doc_examples/950f1230536422567f99a205ff4165ec.asciidoc delete mode 100644 docs/doc_examples/9524a9b7373fa4eb2905183b0e806962.asciidoc create mode 100644 docs/doc_examples/95414139c7b1203e3c2d99a354415801.asciidoc create mode 100644 docs/doc_examples/9559de0c2190f99fcc344887fc7b232a.asciidoc create mode 100644 docs/doc_examples/956cb470258024af964cd2dabbaf7c7c.asciidoc create mode 100644 docs/doc_examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc create mode 100644 docs/doc_examples/9584b042223982e0bfde8d12d42c9705.asciidoc create mode 100644 docs/doc_examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc create mode 100644 docs/doc_examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc create mode 100644 docs/doc_examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc create mode 100644 docs/doc_examples/9608820dbeac261ba53fb89bb9400560.asciidoc create mode 100644 docs/doc_examples/962e6187bbd71c5749376efed04b65ba.asciidoc create mode 100644 docs/doc_examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc create mode 100644 docs/doc_examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc create mode 100644 docs/doc_examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc delete mode 100644 docs/doc_examples/96de5703ba0bd43fd4ac239ec5408542.asciidoc create mode 100644 docs/doc_examples/96e137e42d12c180e2c702db30714a9e.asciidoc create mode 100644 docs/doc_examples/96ea0e80323d6d2d99964625c004a44d.asciidoc create mode 100644 docs/doc_examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc create mode 100644 docs/doc_examples/975b4b92464d52068516aa2f0f955cc1.asciidoc create mode 100644 docs/doc_examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc create mode 100644 docs/doc_examples/97916243f245478b735471a9e37f33d1.asciidoc delete mode 100644 docs/doc_examples/979d25dff2d8987119410291ad47b0d1.asciidoc create mode 100644 docs/doc_examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc create mode 100644 docs/doc_examples/97ae2b62aa372a955278be6f660356ba.asciidoc create mode 100644 docs/doc_examples/97da68c09c9f1a97a21780fd404e213a.asciidoc create mode 100644 docs/doc_examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc create mode 100644 docs/doc_examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc create mode 100644 docs/doc_examples/97f5df84efec655f479fad78bc392d4d.asciidoc create mode 100644 docs/doc_examples/983a867c90e63e070518f2f709f659ee.asciidoc create mode 100644 docs/doc_examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc create mode 100644 docs/doc_examples/9851f5225150bc032fb3b195cd447f4f.asciidoc create mode 100644 docs/doc_examples/98574a419b6be603a0af8f7f22a92d23.asciidoc create mode 100644 docs/doc_examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc create mode 100644 docs/doc_examples/98855f4bda8726d5d123aeebf7869e47.asciidoc create mode 100644 docs/doc_examples/9887f65af249bbf09190b1153ea2597b.asciidoc delete mode 100644 docs/doc_examples/98aeb275f829b5f7b8eb2147701565ff.asciidoc delete mode 100644 docs/doc_examples/98b121bf47cebd85671a2cb519688d28.asciidoc create mode 100644 docs/doc_examples/98b403c356a9b14544e9b9f646845e9f.asciidoc create mode 100644 docs/doc_examples/98c1080d8630d3a18d564312300d020f.asciidoc create mode 100644 docs/doc_examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc create mode 100644 docs/doc_examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc create mode 100644 docs/doc_examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc create mode 100644 docs/doc_examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc create mode 100644 docs/doc_examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc create mode 100644 docs/doc_examples/99474a7e7979816c874aeac4403be5d0.asciidoc create mode 100644 docs/doc_examples/996521cef7803ef363a49ac6321ea1de.asciidoc create mode 100644 docs/doc_examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc create mode 100644 docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc create mode 100644 docs/doc_examples/998651b98e152add530084a631a4ab5a.asciidoc create mode 100644 docs/doc_examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc create mode 100644 docs/doc_examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc create mode 100644 docs/doc_examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc create mode 100644 docs/doc_examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc create mode 100644 docs/doc_examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc create mode 100644 docs/doc_examples/9a05cc10eea1251e23b82a4549913536.asciidoc create mode 100644 docs/doc_examples/9a09d33ec11e20b6081cae882282ca60.asciidoc create mode 100644 docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc create mode 100644 docs/doc_examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc create mode 100644 docs/doc_examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc create mode 100644 docs/doc_examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc create mode 100644 docs/doc_examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc create mode 100644 docs/doc_examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc create mode 100644 docs/doc_examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc create mode 100644 docs/doc_examples/9ae268058c0ea32ef8926568e011c728.asciidoc create mode 100644 docs/doc_examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc create mode 100644 docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc create mode 100644 docs/doc_examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc create mode 100644 docs/doc_examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc create mode 100644 docs/doc_examples/9b345e0bfd45f3a37194585ec9193478.asciidoc create mode 100644 docs/doc_examples/9b68748c061b768c0153c1f2508ce207.asciidoc create mode 100644 docs/doc_examples/9b92266d87170e93a84f9700596d9035.asciidoc create mode 100644 docs/doc_examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc create mode 100644 docs/doc_examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc rename docs/doc_examples/{38c1d0f6668e9563c0827f839f9fa505.asciidoc => 9bae72e974bdeb56007d9104e73eff92.asciidoc} (63%) create mode 100644 docs/doc_examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc create mode 100644 docs/doc_examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc create mode 100644 docs/doc_examples/9bfdda207b701028a3439e495e800c02.asciidoc create mode 100644 docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc create mode 100644 docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc create mode 100644 docs/doc_examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc create mode 100644 docs/doc_examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc create mode 100644 docs/doc_examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc create mode 100644 docs/doc_examples/9c7c8051592b6af3adb5d7c490849068.asciidoc create mode 100644 docs/doc_examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc create mode 100644 docs/doc_examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc create mode 100644 docs/doc_examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc create mode 100644 docs/doc_examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc create mode 100644 docs/doc_examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc rename docs/doc_examples/{a5a7050fb9dcb9574e081957ade28617.asciidoc => 9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc} (50%) create mode 100644 docs/doc_examples/9d1fb129ac783355a20097effded1845.asciidoc create mode 100644 docs/doc_examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc create mode 100644 docs/doc_examples/9d461ae140ddc018efd2650559800cd1.asciidoc create mode 100644 docs/doc_examples/9d47f02a063444da9f098858a1830d28.asciidoc create mode 100644 docs/doc_examples/9d5855075e7008270459cc88c189043d.asciidoc create mode 100644 docs/doc_examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc create mode 100644 docs/doc_examples/9d67db8370a98854812d38ae73ee2a12.asciidoc create mode 100644 docs/doc_examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc create mode 100644 docs/doc_examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc create mode 100644 docs/doc_examples/9de10a59a5f56dd0906be627896cc789.asciidoc create mode 100644 docs/doc_examples/9de4704d2f047dae1259249112488697.asciidoc create mode 100644 docs/doc_examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc create mode 100644 docs/doc_examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc create mode 100644 docs/doc_examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc create mode 100644 docs/doc_examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc create mode 100644 docs/doc_examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc delete mode 100644 docs/doc_examples/9e56d79ad9a02b642c361f0b85dd95d7.asciidoc create mode 100644 docs/doc_examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc create mode 100644 docs/doc_examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc create mode 100644 docs/doc_examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc create mode 100644 docs/doc_examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc create mode 100644 docs/doc_examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc create mode 100644 docs/doc_examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc create mode 100644 docs/doc_examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc create mode 100644 docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc create mode 100644 docs/doc_examples/9f22a0920cc763eefa233ced963d9624.asciidoc create mode 100644 docs/doc_examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc create mode 100644 docs/doc_examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc create mode 100644 docs/doc_examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc create mode 100644 docs/doc_examples/9f7671119236423e0e40801ef6485af1.asciidoc create mode 100644 docs/doc_examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc create mode 100644 docs/doc_examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc create mode 100644 docs/doc_examples/9fda516a5dc60ba477b970eaad4429db.asciidoc create mode 100644 docs/doc_examples/9feff356f302ea4915347ab71cc4887a.asciidoc create mode 100644 docs/doc_examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc create mode 100644 docs/doc_examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc create mode 100644 docs/doc_examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc create mode 100644 docs/doc_examples/a008f42379930edc354b4074e0a33344.asciidoc create mode 100644 docs/doc_examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc create mode 100644 docs/doc_examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc create mode 100644 docs/doc_examples/a0497157fdefecd04e597edb800a1a95.asciidoc create mode 100644 docs/doc_examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc create mode 100644 docs/doc_examples/a0871be90badeecd2f8d8ec90230e248.asciidoc create mode 100644 docs/doc_examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc create mode 100644 docs/doc_examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc rename docs/doc_examples/{ebb6b59fbc9325c17e45f524602d6be2.asciidoc => a0c868282c0514a342ad04998cdc2175.asciidoc} (58%) create mode 100644 docs/doc_examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc create mode 100644 docs/doc_examples/a0f4e902d18460337684d74ea932fbe9.asciidoc create mode 100644 docs/doc_examples/a1070cf2f5969d42d71cda057223f152.asciidoc delete mode 100644 docs/doc_examples/a116949e446f34dc25ae57d4b703d0c1.asciidoc create mode 100644 docs/doc_examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc create mode 100644 docs/doc_examples/a1490f71d705053951870fd2d3bceb39.asciidoc create mode 100644 docs/doc_examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc create mode 100644 docs/doc_examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc create mode 100644 docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc create mode 100644 docs/doc_examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc create mode 100644 docs/doc_examples/a1879930c1dac36a57d7f094a680420b.asciidoc create mode 100644 docs/doc_examples/a197076e0e74951ea88f20309ec257e2.asciidoc create mode 100644 docs/doc_examples/a1acf454bd6477183ce27ace872deb46.asciidoc create mode 100644 docs/doc_examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc create mode 100644 docs/doc_examples/a1d0603b24a5b048f0959975d8057534.asciidoc delete mode 100644 docs/doc_examples/a1db5c822745fe167e9ef854dca3d129.asciidoc create mode 100644 docs/doc_examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc create mode 100644 docs/doc_examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc create mode 100644 docs/doc_examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc create mode 100644 docs/doc_examples/a1f70bc71b763b58206814c40a7440e7.asciidoc create mode 100644 docs/doc_examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc create mode 100644 docs/doc_examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc create mode 100644 docs/doc_examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc create mode 100644 docs/doc_examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc delete mode 100644 docs/doc_examples/a2a25aad1fea9a541b52ac613c78fb64.asciidoc create mode 100644 docs/doc_examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc rename docs/doc_examples/{073539a7e38be3cdf13008330b6a536a.asciidoc => a2b2ce031120dac49b5120b26eea8758.asciidoc} (71%) create mode 100644 docs/doc_examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc create mode 100644 docs/doc_examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc create mode 100644 docs/doc_examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc create mode 100644 docs/doc_examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc create mode 100644 docs/doc_examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc create mode 100644 docs/doc_examples/a325f31e94fb1e8739258910593504a8.asciidoc create mode 100644 docs/doc_examples/a3464bd6f0a61623562162859566b078.asciidoc create mode 100644 docs/doc_examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc create mode 100644 docs/doc_examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc create mode 100644 docs/doc_examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc create mode 100644 docs/doc_examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc create mode 100644 docs/doc_examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc create mode 100644 docs/doc_examples/a3c8f474b0700711a356682f37e62b39.asciidoc create mode 100644 docs/doc_examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc create mode 100644 docs/doc_examples/a3cfd350c73a104b99a998c6be931408.asciidoc create mode 100644 docs/doc_examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc create mode 100644 docs/doc_examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc rename docs/doc_examples/{2468ab381257d759d8a88af1141f6f9c.asciidoc => a3e79d6c626a490341c5b731acbb4a5d.asciidoc} (74%) create mode 100644 docs/doc_examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc rename docs/doc_examples/{cfc37446bd892d1ac42a3c8e8b204e6c.asciidoc => a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc} (77%) create mode 100644 docs/doc_examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc create mode 100644 docs/doc_examples/a3f66deb467df86edbf66e1dca31da51.asciidoc create mode 100644 docs/doc_examples/a412fe22a74900c72434391ed75139dc.asciidoc create mode 100644 docs/doc_examples/a425fcab60f603504becee7d001f0a4b.asciidoc create mode 100644 docs/doc_examples/a428d518162918733d49261ffd65cfc1.asciidoc delete mode 100644 docs/doc_examples/a42f33e15b0995bb4b6058659bfdea85.asciidoc create mode 100644 docs/doc_examples/a43954d055f042d625a905513821f5f0.asciidoc create mode 100644 docs/doc_examples/a45244aa3adbf3c793fede100786d1f5.asciidoc create mode 100644 docs/doc_examples/a45605347d6438e7aecdf3b37198616d.asciidoc create mode 100644 docs/doc_examples/a45810722dc4f468f81b1e8a451d21be.asciidoc create mode 100644 docs/doc_examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc create mode 100644 docs/doc_examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc delete mode 100644 docs/doc_examples/a49169b4622918992411fab4ec48191b.asciidoc create mode 100644 docs/doc_examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc create mode 100644 docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc create mode 100644 docs/doc_examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc create mode 100644 docs/doc_examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc create mode 100644 docs/doc_examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc create mode 100644 docs/doc_examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc create mode 100644 docs/doc_examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc create mode 100644 docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc create mode 100644 docs/doc_examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc create mode 100644 docs/doc_examples/a512e4dd8880ce0395937db1bab1d205.asciidoc create mode 100644 docs/doc_examples/a520168c1c8b454a8f102d6a13027c73.asciidoc create mode 100644 docs/doc_examples/a5217a93efabceee9be19949e484f930.asciidoc create mode 100644 docs/doc_examples/a53ff77d83222c0e76453e630d64787e.asciidoc create mode 100644 docs/doc_examples/a547bb926c25f670078b98fbe67de3cc.asciidoc create mode 100644 docs/doc_examples/a56c20a733a350673d41829c8daaafbe.asciidoc create mode 100644 docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc create mode 100644 docs/doc_examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc create mode 100644 docs/doc_examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc create mode 100644 docs/doc_examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc create mode 100644 docs/doc_examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc create mode 100644 docs/doc_examples/a5e2b3588258430f2e595abda98e3943.asciidoc create mode 100644 docs/doc_examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc create mode 100644 docs/doc_examples/a5e6ccfb6019238e6db602373b9af147.asciidoc create mode 100644 docs/doc_examples/a5e793d82a4455cf4105dac82a156617.asciidoc create mode 100644 docs/doc_examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc create mode 100644 docs/doc_examples/a5f9eb40087921e67d820775acf71522.asciidoc create mode 100644 docs/doc_examples/a6169bc057ce8654bd306ff4b062081b.asciidoc create mode 100644 docs/doc_examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc create mode 100644 docs/doc_examples/a62833baf15f2c9ac094a9289e56a012.asciidoc create mode 100644 docs/doc_examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc create mode 100644 docs/doc_examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc create mode 100644 docs/doc_examples/a692b4c0ca7825c467880b346841f5a5.asciidoc create mode 100644 docs/doc_examples/a699189c8d1a7573beeaea768f2fc618.asciidoc rename docs/doc_examples/{b0d64d0a554549e5b2808002a0725493.asciidoc => a69b1ce5cc9528fb3639185eaf241ae3.asciidoc} (60%) create mode 100644 docs/doc_examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc rename docs/doc_examples/{be8f28f31207b173de61be032fcf239c.asciidoc => a6bb306ca250cf651f19cae808b97012.asciidoc} (77%) create mode 100644 docs/doc_examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc create mode 100644 docs/doc_examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc create mode 100644 docs/doc_examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc rename docs/doc_examples/{210cf5c76bff517f48e80fa1c2d63907.asciidoc => a6fdd0100cd362df54af6c95d1055c96.asciidoc} (78%) create mode 100644 docs/doc_examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc delete mode 100644 docs/doc_examples/a71c438cc4df1cafe3109ccff475afdb.asciidoc create mode 100644 docs/doc_examples/a72613de3774571ba24def4b495161b5.asciidoc create mode 100644 docs/doc_examples/a735081e715d385b4d471eea0f2b57da.asciidoc create mode 100644 docs/doc_examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc create mode 100644 docs/doc_examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc create mode 100644 docs/doc_examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc create mode 100644 docs/doc_examples/a799477dff04578b200788a63f9cff71.asciidoc create mode 100644 docs/doc_examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc create mode 100644 docs/doc_examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc create mode 100644 docs/doc_examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc create mode 100644 docs/doc_examples/a8019280dab5b04211ae3b21e5e08223.asciidoc delete mode 100644 docs/doc_examples/a80f5db4357bb25b8704d374c18318ed.asciidoc create mode 100644 docs/doc_examples/a810da963d3b28d79dcd17be829bb271.asciidoc create mode 100644 docs/doc_examples/a811b82ba4632bdd9065829085188bc9.asciidoc create mode 100644 docs/doc_examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc create mode 100644 docs/doc_examples/a861a89f52008610e813b9f073951c58.asciidoc create mode 100644 docs/doc_examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc create mode 100644 docs/doc_examples/a8add749c3f41ad1308a45308df14103.asciidoc create mode 100644 docs/doc_examples/a9280b55a7284952f604ec7bece712f6.asciidoc create mode 100644 docs/doc_examples/a941fd568f2e20e13df909ab24506073.asciidoc create mode 100644 docs/doc_examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc create mode 100644 docs/doc_examples/a9554396506888e392a1aee0ca28e6fc.asciidoc create mode 100644 docs/doc_examples/a95a123b9f862e52ab1e8f875961c852.asciidoc create mode 100644 docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc create mode 100644 docs/doc_examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc create mode 100644 docs/doc_examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc create mode 100644 docs/doc_examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc rename docs/doc_examples/{f8cc4b331a19ff4df8e4a490f906ee69.asciidoc => a98692a565904ec0783884d81a7b71fc.asciidoc} (82%) create mode 100644 docs/doc_examples/a999b5661bebb802bbbfe04faacf1971.asciidoc create mode 100644 docs/doc_examples/a99bc141066ef673e35f306157750ec9.asciidoc create mode 100644 docs/doc_examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc create mode 100644 docs/doc_examples/a9c08023354aa9b9023807962df71d13.asciidoc create mode 100644 docs/doc_examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc create mode 100644 docs/doc_examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc create mode 100644 docs/doc_examples/a9dd9595e96c307b8c798beaeb571521.asciidoc create mode 100644 docs/doc_examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc create mode 100644 docs/doc_examples/aa1771b702f4b771491ba4ab743a9197.asciidoc create mode 100644 docs/doc_examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc create mode 100644 docs/doc_examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc create mode 100644 docs/doc_examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc create mode 100644 docs/doc_examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc create mode 100644 docs/doc_examples/aa699ff3234f54d091575a38e859a627.asciidoc delete mode 100644 docs/doc_examples/aa6bfe54e2436eb668091fe31c2fbf4d.asciidoc create mode 100644 docs/doc_examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc create mode 100644 docs/doc_examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc create mode 100644 docs/doc_examples/aaa7a61b07861235fb6e489b946c705c.asciidoc create mode 100644 docs/doc_examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc create mode 100644 docs/doc_examples/aaba346e0becdf12db13658296e0b8a1.asciidoc create mode 100644 docs/doc_examples/aac5996a8398cc8f7701a063df0b2346.asciidoc create mode 100644 docs/doc_examples/aadf36ae37460a735e06b953b4cee494.asciidoc create mode 100644 docs/doc_examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc create mode 100644 docs/doc_examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc create mode 100644 docs/doc_examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc create mode 100644 docs/doc_examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc create mode 100644 docs/doc_examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc create mode 100644 docs/doc_examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc create mode 100644 docs/doc_examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc create mode 100644 docs/doc_examples/ab8b4537fad80107bc88f633d4039a52.asciidoc create mode 100644 docs/doc_examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc create mode 100644 docs/doc_examples/abb4a58089574211d434946a923e5725.asciidoc create mode 100644 docs/doc_examples/abc280775734daa6cf2c28868e155d10.asciidoc create mode 100644 docs/doc_examples/abc496de5fd013099a134db369b34a8b.asciidoc create mode 100644 docs/doc_examples/abc7a670a47516b58b6b07d7497b140c.asciidoc create mode 100644 docs/doc_examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc delete mode 100644 docs/doc_examples/abf329ebefaf58acd4ee30e685731499.asciidoc create mode 100644 docs/doc_examples/ac366b9dda7040e743dee85335354094.asciidoc create mode 100644 docs/doc_examples/ac483996d479946d57c374c3a86b2621.asciidoc create mode 100644 docs/doc_examples/ac497917ef707538198a8458ae3d5c6b.asciidoc delete mode 100644 docs/doc_examples/ac544eb247a29ca42aab13826ca88561.asciidoc create mode 100644 docs/doc_examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc create mode 100644 docs/doc_examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc create mode 100644 docs/doc_examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc create mode 100644 docs/doc_examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc create mode 100644 docs/doc_examples/acb10091ad335ddd15d71021aaf23c62.asciidoc create mode 100644 docs/doc_examples/acb850c08f51226eadb75be09e336076.asciidoc create mode 100644 docs/doc_examples/acc52da725a996ae696b00d9f818dfde.asciidoc create mode 100644 docs/doc_examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc create mode 100644 docs/doc_examples/ad2416ca0581316cee6c63129685bca5.asciidoc create mode 100644 docs/doc_examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc create mode 100644 docs/doc_examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc create mode 100644 docs/doc_examples/ad57ccba0a060da4f5313692fa26a235.asciidoc create mode 100644 docs/doc_examples/ad63eca6829a25293c9be589c1870547.asciidoc create mode 100644 docs/doc_examples/ad6d81be5fad4bad87486b699454dce5.asciidoc delete mode 100644 docs/doc_examples/ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc delete mode 100644 docs/doc_examples/ad79228630684d950fe9792a768d24c5.asciidoc create mode 100644 docs/doc_examples/ad88e46bb06739991498dee248850223.asciidoc create mode 100644 docs/doc_examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc create mode 100644 docs/doc_examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc create mode 100644 docs/doc_examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc rename docs/doc_examples/{b41dce56b0e640d32b1cf452f87cec17.asciidoc => add240aa149d8b11139947502b279ee0.asciidoc} (56%) create mode 100644 docs/doc_examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc create mode 100644 docs/doc_examples/adf728b0c11c5c309c730205609a379d.asciidoc create mode 100644 docs/doc_examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc create mode 100644 docs/doc_examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc create mode 100644 docs/doc_examples/ae4aa368617637a390074535df86e64b.asciidoc create mode 100644 docs/doc_examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc create mode 100644 docs/doc_examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc delete mode 100644 docs/doc_examples/ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc create mode 100644 docs/doc_examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc create mode 100644 docs/doc_examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc create mode 100644 docs/doc_examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc create mode 100644 docs/doc_examples/af00a58d9171d32f6efe52d94e51e526.asciidoc create mode 100644 docs/doc_examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc delete mode 100644 docs/doc_examples/af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc create mode 100644 docs/doc_examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc create mode 100644 docs/doc_examples/af517b6936fa41d124d68b107b2efdc3.asciidoc create mode 100644 docs/doc_examples/af607715d0693587dd12962266359a96.asciidoc create mode 100644 docs/doc_examples/af746266a49a693ff6170c88da8a8c04.asciidoc create mode 100644 docs/doc_examples/af7c5add165b005aefb552d79130fed6.asciidoc create mode 100644 docs/doc_examples/af84b3995564a7ca84360a526a4ac896.asciidoc create mode 100644 docs/doc_examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc create mode 100644 docs/doc_examples/af91019991bee136df5460e2fd4ac72a.asciidoc create mode 100644 docs/doc_examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc create mode 100644 docs/doc_examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc create mode 100644 docs/doc_examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc create mode 100644 docs/doc_examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc create mode 100644 docs/doc_examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc create mode 100644 docs/doc_examples/afc0a9cffc0100797a3f093094394763.asciidoc delete mode 100644 docs/doc_examples/afc29b61c532cf683f749baf013e7bfe.asciidoc create mode 100644 docs/doc_examples/afcacd742d18bf220e02f0bc6891526d.asciidoc create mode 100644 docs/doc_examples/afd90d268187f995dc002abc189f818d.asciidoc create mode 100644 docs/doc_examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc create mode 100644 docs/doc_examples/afe30f159937b38d74c869570cfcd369.asciidoc create mode 100644 docs/doc_examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc create mode 100644 docs/doc_examples/afe87a2850326e0328fbebbefec2e839.asciidoc create mode 100644 docs/doc_examples/afef5cac988592b97ae289ab39c2f437.asciidoc create mode 100644 docs/doc_examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc create mode 100644 docs/doc_examples/b00ac39faf96785e89be8d4205fb984d.asciidoc create mode 100644 docs/doc_examples/b00d74eed431a272c829c0f798e3a539.asciidoc create mode 100644 docs/doc_examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc create mode 100644 docs/doc_examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc create mode 100644 docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc create mode 100644 docs/doc_examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc create mode 100644 docs/doc_examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc rename docs/doc_examples/{381fced1882ca8337143e6bb180a5715.asciidoc => b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc} (67%) create mode 100644 docs/doc_examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc create mode 100644 docs/doc_examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc create mode 100644 docs/doc_examples/b11a0675e49df0709be693297ca73a2c.asciidoc create mode 100644 docs/doc_examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc create mode 100644 docs/doc_examples/b17143780e9904bfc1e1c53436497fa1.asciidoc create mode 100644 docs/doc_examples/b176e0d428726705298184ef39ad5cb2.asciidoc create mode 100644 docs/doc_examples/b195068563b1dc0f721f5f8c8d172312.asciidoc create mode 100644 docs/doc_examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc create mode 100644 docs/doc_examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc create mode 100644 docs/doc_examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc create mode 100644 docs/doc_examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc create mode 100644 docs/doc_examples/b2440b492149b705ef107137fdccb0c2.asciidoc create mode 100644 docs/doc_examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc create mode 100644 docs/doc_examples/b25256ed615cd837461b0bfa590526b7.asciidoc create mode 100644 docs/doc_examples/b2652b1763a5fd31e95c983869b433bd.asciidoc create mode 100644 docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc create mode 100644 docs/doc_examples/b2b26f8568c5dba7649e79f09b859272.asciidoc create mode 100644 docs/doc_examples/b2dec193082462c775169db438308bc3.asciidoc create mode 100644 docs/doc_examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc create mode 100644 docs/doc_examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc create mode 100644 docs/doc_examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc create mode 100644 docs/doc_examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc create mode 100644 docs/doc_examples/b3685560cb328f179d96ffe7c2668f72.asciidoc create mode 100644 docs/doc_examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc create mode 100644 docs/doc_examples/b37919cc438b47477343833b4e522408.asciidoc create mode 100644 docs/doc_examples/b3a1c4220617ded67ed43fff2051d324.asciidoc rename docs/doc_examples/{7cac05cb589f1614fd5b8589153bef06.asciidoc => b3a711c3deddcdb8a3f6623184a8b794.asciidoc} (52%) create mode 100644 docs/doc_examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc create mode 100644 docs/doc_examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc create mode 100644 docs/doc_examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc create mode 100644 docs/doc_examples/b430122345d560bbd2a77826f5c475f7.asciidoc create mode 100644 docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc create mode 100644 docs/doc_examples/b45c60f908b329835ab40609423f378e.asciidoc create mode 100644 docs/doc_examples/b468d0124dc485385a34504d5b7af82a.asciidoc create mode 100644 docs/doc_examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc create mode 100644 docs/doc_examples/b47945c7db8868dd36ba079b742f2a90.asciidoc create mode 100644 docs/doc_examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc delete mode 100644 docs/doc_examples/b4a0d0ed512dffc10ee53bca2feca49b.asciidoc create mode 100644 docs/doc_examples/b4aec2a1d353852507c091bdb629b765.asciidoc rename docs/doc_examples/{fdcaba9547180439ff4b6275034a5170.asciidoc => b4d1fc887e40885cdf6ac2d01487cb76.asciidoc} (51%) create mode 100644 docs/doc_examples/b4d9d5017d42f27281e734e969949623.asciidoc create mode 100644 docs/doc_examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc create mode 100644 docs/doc_examples/b4f3165e873f551fbaa03945877eb370.asciidoc create mode 100644 docs/doc_examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc create mode 100644 docs/doc_examples/b504119238b44cddd3b5944da20a498d.asciidoc create mode 100644 docs/doc_examples/b515427f8685ca7d79176def672d19fa.asciidoc create mode 100644 docs/doc_examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc create mode 100644 docs/doc_examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc create mode 100644 docs/doc_examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc create mode 100644 docs/doc_examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc create mode 100644 docs/doc_examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc create mode 100644 docs/doc_examples/b58b17975bbce307b2ccce5051a449e8.asciidoc create mode 100644 docs/doc_examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc create mode 100644 docs/doc_examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc delete mode 100644 docs/doc_examples/b5f95bc097a201b29c7200fc8d3d31c1.asciidoc create mode 100644 docs/doc_examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc create mode 100644 docs/doc_examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc create mode 100644 docs/doc_examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc create mode 100644 docs/doc_examples/b620ef4400d2f660fe2c67835938442c.asciidoc create mode 100644 docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc create mode 100644 docs/doc_examples/b638e11d6a8a084290f8934d224abd52.asciidoc create mode 100644 docs/doc_examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc create mode 100644 docs/doc_examples/b65dbb51ddd496189c65a9326a53480c.asciidoc create mode 100644 docs/doc_examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc create mode 100644 docs/doc_examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc delete mode 100644 docs/doc_examples/b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc create mode 100644 docs/doc_examples/b68ed7037042719945a2452d23e64c78.asciidoc create mode 100644 docs/doc_examples/b691d41f84b5b46e9051b51db22a46af.asciidoc create mode 100644 docs/doc_examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc create mode 100644 docs/doc_examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc create mode 100644 docs/doc_examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc create mode 100644 docs/doc_examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc create mode 100644 docs/doc_examples/b6e385760e036e36827f719b540d9c11.asciidoc create mode 100644 docs/doc_examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc create mode 100644 docs/doc_examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc create mode 100644 docs/doc_examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc create mode 100644 docs/doc_examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc delete mode 100644 docs/doc_examples/b789292f9cf63ce912e058c46d90ce20.asciidoc create mode 100644 docs/doc_examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc create mode 100644 docs/doc_examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc create mode 100644 docs/doc_examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc create mode 100644 docs/doc_examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc create mode 100644 docs/doc_examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc create mode 100644 docs/doc_examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc create mode 100644 docs/doc_examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc create mode 100644 docs/doc_examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc create mode 100644 docs/doc_examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc create mode 100644 docs/doc_examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc create mode 100644 docs/doc_examples/b839f79a5d58506baed5714f1876ab55.asciidoc create mode 100644 docs/doc_examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc create mode 100644 docs/doc_examples/b85716ba42a57096452665c38995da7d.asciidoc create mode 100644 docs/doc_examples/b857abedc64e367def172bd07075e5c7.asciidoc create mode 100644 docs/doc_examples/b87438263ccd68624b1d69d8750f9432.asciidoc create mode 100644 docs/doc_examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc create mode 100644 docs/doc_examples/b88a2d96da1401d548a4540cca223d27.asciidoc create mode 100644 docs/doc_examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc create mode 100644 docs/doc_examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc create mode 100644 docs/doc_examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc create mode 100644 docs/doc_examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc delete mode 100644 docs/doc_examples/b918d6b798da673a33e49b94f61dcdc0.asciidoc delete mode 100644 docs/doc_examples/b919f88e6f47a40d5793479440a90ba6.asciidoc create mode 100644 docs/doc_examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc delete mode 100644 docs/doc_examples/b93ed4ef309819734f0eeea82e8b0f1f.asciidoc create mode 100644 docs/doc_examples/b968853454b4416f7baa3209eb335957.asciidoc create mode 100644 docs/doc_examples/b96f465abb658fe32889c3d183f159a3.asciidoc delete mode 100644 docs/doc_examples/b997885974522ef439d5e345924cc5ba.asciidoc create mode 100644 docs/doc_examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc create mode 100644 docs/doc_examples/b9f716219359a6c973dafc50b348de33.asciidoc create mode 100644 docs/doc_examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc delete mode 100644 docs/doc_examples/ba0b4081c98f3387f76b77847c52ee9a.asciidoc create mode 100644 docs/doc_examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc create mode 100644 docs/doc_examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc create mode 100644 docs/doc_examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc create mode 100644 docs/doc_examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc create mode 100644 docs/doc_examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc create mode 100644 docs/doc_examples/ba66768ed04f7b87906badff40ff40ed.asciidoc create mode 100644 docs/doc_examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc create mode 100644 docs/doc_examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc create mode 100644 docs/doc_examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc create mode 100644 docs/doc_examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc create mode 100644 docs/doc_examples/bb067c049331cc850a77b18bdfff81b5.asciidoc delete mode 100644 docs/doc_examples/bb143628fd04070683eeeadc9406d9cc.asciidoc create mode 100644 docs/doc_examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc create mode 100644 docs/doc_examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc create mode 100644 docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc create mode 100644 docs/doc_examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc create mode 100644 docs/doc_examples/bb792e64a4c1f872296073b457aa03c8.asciidoc create mode 100644 docs/doc_examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc create mode 100644 docs/doc_examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc delete mode 100644 docs/doc_examples/bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc create mode 100644 docs/doc_examples/bc4d308069af23929a49d856f6bc3008.asciidoc create mode 100644 docs/doc_examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc create mode 100644 docs/doc_examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc create mode 100644 docs/doc_examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc create mode 100644 docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc create mode 100644 docs/doc_examples/bd0d30a7683037e1ebadd163514765d4.asciidoc create mode 100644 docs/doc_examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc rename docs/doc_examples/{a4a396cd07657b3977713fb3a742c41b.asciidoc => bd23c3a03907b1238dcb07ab9eecae7b.asciidoc} (71%) create mode 100644 docs/doc_examples/bd298b11933605c641626750c981d70b.asciidoc create mode 100644 docs/doc_examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc create mode 100644 docs/doc_examples/bd3d710ec50a151453e141691163af72.asciidoc create mode 100644 docs/doc_examples/bd458073196a19ecdeb24a8016488c20.asciidoc create mode 100644 docs/doc_examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc delete mode 100644 docs/doc_examples/bd5918ab903c0889bb1f09c8c2466e43.asciidoc create mode 100644 docs/doc_examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc create mode 100644 docs/doc_examples/bd68666ca2e0be12f7624016317a62bc.asciidoc create mode 100644 docs/doc_examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc create mode 100644 docs/doc_examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc create mode 100644 docs/doc_examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc create mode 100644 docs/doc_examples/bd7a1417fc27b5a801334ec44462b376.asciidoc create mode 100644 docs/doc_examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc create mode 100644 docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc create mode 100644 docs/doc_examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc create mode 100644 docs/doc_examples/bdc68012c121062628d6d73468bf4866.asciidoc create mode 100644 docs/doc_examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc create mode 100644 docs/doc_examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc delete mode 100644 docs/doc_examples/be1bd47393646ac6bbee177d1cdb7738.asciidoc create mode 100644 docs/doc_examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc create mode 100644 docs/doc_examples/be30ea12f605fd61acba689b68e00bbe.asciidoc rename docs/doc_examples/{52a87b81e4e0b6b11e23e85db1602a63.asciidoc => be5b415d7f33d6f0397ac2f8b5c10521.asciidoc} (56%) create mode 100644 docs/doc_examples/be5c5a9c25901737585e4fff9195da3c.asciidoc create mode 100644 docs/doc_examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc create mode 100644 docs/doc_examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc create mode 100644 docs/doc_examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc create mode 100644 docs/doc_examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc create mode 100644 docs/doc_examples/be9836fe55c5fada404a2adc1663d832.asciidoc create mode 100644 docs/doc_examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc create mode 100644 docs/doc_examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc create mode 100644 docs/doc_examples/beba2a9795c8a13653e1edf64eec4357.asciidoc create mode 100644 docs/doc_examples/bed14cc152522ca0726ac3746ebc31db.asciidoc create mode 100644 docs/doc_examples/bf17440ac178d2ef5f5be643d033920b.asciidoc create mode 100644 docs/doc_examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc create mode 100644 docs/doc_examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc rename docs/doc_examples/{6799d132c1c7ca3970763acde2337ef9.asciidoc => bf3f520b47581d861e802730aaf2a519.asciidoc} (54%) create mode 100644 docs/doc_examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc create mode 100644 docs/doc_examples/bf639275d0818be04317ee5ab6075da6.asciidoc create mode 100644 docs/doc_examples/bf8680d940c84e43a9483a25548dea57.asciidoc create mode 100644 docs/doc_examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc create mode 100644 docs/doc_examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc create mode 100644 docs/doc_examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc delete mode 100644 docs/doc_examples/bfcd65ab85d684d36a8550080032958d.asciidoc create mode 100644 docs/doc_examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc create mode 100644 docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc create mode 100644 docs/doc_examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc create mode 100644 docs/doc_examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc create mode 100644 docs/doc_examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc create mode 100644 docs/doc_examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc create mode 100644 docs/doc_examples/c067182d385f59ce5952fb9a716fbf05.asciidoc create mode 100644 docs/doc_examples/c088ce5291ae28650b6091cdec489398.asciidoc create mode 100644 docs/doc_examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc create mode 100644 docs/doc_examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc create mode 100644 docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc create mode 100644 docs/doc_examples/c0ebaa33e750b87555dc352073f692e8.asciidoc rename docs/doc_examples/{d222c6a6ec7a3beca6c97011b0874512.asciidoc => c0ff8b3db994c4736f7579dde18097d2.asciidoc} (60%) create mode 100644 docs/doc_examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc create mode 100644 docs/doc_examples/c11c4d6b30e882871bf0074f407149bd.asciidoc create mode 100644 docs/doc_examples/c12d6e962f083c728f9397932f05202e.asciidoc create mode 100644 docs/doc_examples/c1409f591a01589638d9b00436ce42c0.asciidoc create mode 100644 docs/doc_examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc create mode 100644 docs/doc_examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc create mode 100644 docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc create mode 100644 docs/doc_examples/c186ecf6f799ddff7add1abdecea5821.asciidoc create mode 100644 docs/doc_examples/c187b52646cedeebe0716327add65642.asciidoc create mode 100644 docs/doc_examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc create mode 100644 docs/doc_examples/c1a895497066a3dac674d4b1a119048d.asciidoc create mode 100644 docs/doc_examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc create mode 100644 docs/doc_examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc create mode 100644 docs/doc_examples/c1bb395546102279296534522061829f.asciidoc create mode 100644 docs/doc_examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc create mode 100644 docs/doc_examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc create mode 100644 docs/doc_examples/c208de54369379e8d78ab201be18b6be.asciidoc create mode 100644 docs/doc_examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc create mode 100644 docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc create mode 100644 docs/doc_examples/c23e32775340d7bc6f46820313014d8a.asciidoc create mode 100644 docs/doc_examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc create mode 100644 docs/doc_examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc create mode 100644 docs/doc_examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc create mode 100644 docs/doc_examples/c2d7c36daac8608d2515c549b2c82436.asciidoc create mode 100644 docs/doc_examples/c318fde926842722825a51e5c9c326a9.asciidoc delete mode 100644 docs/doc_examples/c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc create mode 100644 docs/doc_examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc create mode 100644 docs/doc_examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc create mode 100644 docs/doc_examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc create mode 100644 docs/doc_examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc create mode 100644 docs/doc_examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc create mode 100644 docs/doc_examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc create mode 100644 docs/doc_examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc create mode 100644 docs/doc_examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc delete mode 100644 docs/doc_examples/c4b278ba293abd0d02a0b5ad1a99f84a.asciidoc create mode 100644 docs/doc_examples/c4b727723b57052b6504bb74fe09abc6.asciidoc create mode 100644 docs/doc_examples/c4c1a87414741a678f6cb91804daf095.asciidoc create mode 100644 docs/doc_examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc create mode 100644 docs/doc_examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc create mode 100644 docs/doc_examples/c54597143ac86540726f6422fd98b22e.asciidoc create mode 100644 docs/doc_examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc create mode 100644 docs/doc_examples/c580092fd3d36c32b09d63921708a67b.asciidoc create mode 100644 docs/doc_examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc create mode 100644 docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc create mode 100644 docs/doc_examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc create mode 100644 docs/doc_examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc create mode 100644 docs/doc_examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc create mode 100644 docs/doc_examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc delete mode 100644 docs/doc_examples/c612d93e7f682a0d731e385edf9f5d56.asciidoc create mode 100644 docs/doc_examples/c6151a0788a10a7f40da684d72c3255c.asciidoc create mode 100644 docs/doc_examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc create mode 100644 docs/doc_examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc create mode 100644 docs/doc_examples/c639036b87d02fb864e27c4ca29ef833.asciidoc create mode 100644 docs/doc_examples/c64b61bedb21b9def8fce5092e677af9.asciidoc create mode 100644 docs/doc_examples/c654b09be981be12fc7be0ba33f8652b.asciidoc create mode 100644 docs/doc_examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc create mode 100644 docs/doc_examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc create mode 100644 docs/doc_examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc create mode 100644 docs/doc_examples/c6abe91b5527870face2b826f37ba1da.asciidoc create mode 100644 docs/doc_examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc create mode 100644 docs/doc_examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc create mode 100644 docs/doc_examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc create mode 100644 docs/doc_examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc create mode 100644 docs/doc_examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc create mode 100644 docs/doc_examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc create mode 100644 docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc create mode 100644 docs/doc_examples/c733f20641b20e124f26198534755d6d.asciidoc create mode 100644 docs/doc_examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc create mode 100644 docs/doc_examples/c76cb6a080959b0d87afd780cf814be2.asciidoc create mode 100644 docs/doc_examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc create mode 100644 docs/doc_examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc create mode 100644 docs/doc_examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc delete mode 100644 docs/doc_examples/c849c6c8f8659dbb93e1c14356f74e37.asciidoc rename docs/doc_examples/{47b5ff897f26e9c943cee5c06034181d.asciidoc => c87038b96ab06d9a741a130f94de4f02.asciidoc} (69%) create mode 100644 docs/doc_examples/c873f9cd093e26515148f052e28c7805.asciidoc create mode 100644 docs/doc_examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc create mode 100644 docs/doc_examples/c8e2109b19d50467ab83a40006462e9f.asciidoc create mode 100644 docs/doc_examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc create mode 100644 docs/doc_examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc create mode 100644 docs/doc_examples/c95d5317525c2ff625e6971c277247af.asciidoc create mode 100644 docs/doc_examples/c96669604d0e66a097ddf3093b025ccd.asciidoc create mode 100644 docs/doc_examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc create mode 100644 docs/doc_examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc create mode 100644 docs/doc_examples/c9a6ab0a56bb0177f158277185f68302.asciidoc create mode 100644 docs/doc_examples/c9afa715021f2e6450e72ac73271960c.asciidoc create mode 100644 docs/doc_examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc create mode 100644 docs/doc_examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc create mode 100644 docs/doc_examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc create mode 100644 docs/doc_examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc create mode 100644 docs/doc_examples/ca06db2aa4747910278f96315f7be94b.asciidoc create mode 100644 docs/doc_examples/ca08e511e5907d258081b10a1a9f0072.asciidoc create mode 100644 docs/doc_examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc create mode 100644 docs/doc_examples/ca3bcd6278510ebced5f74484033cb36.asciidoc create mode 100644 docs/doc_examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc create mode 100644 docs/doc_examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc create mode 100644 docs/doc_examples/ca98afbd6a90f63e02f62239d225313b.asciidoc create mode 100644 docs/doc_examples/caaafef1a76c2bec677704c2dc233218.asciidoc create mode 100644 docs/doc_examples/caab99520d3fe41f6154d74a7f696057.asciidoc create mode 100644 docs/doc_examples/cac74a85c6b352a6e23d8673abae126f.asciidoc create mode 100644 docs/doc_examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc rename docs/doc_examples/{6f097c298a7abf4c032c4314920c49c8.asciidoc => cb0c3223fd45148497df73adfba2e9ce.asciidoc} (50%) create mode 100644 docs/doc_examples/cb16f1ff85399ddaa418834be580c9de.asciidoc create mode 100644 docs/doc_examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc create mode 100644 docs/doc_examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc create mode 100644 docs/doc_examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc create mode 100644 docs/doc_examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc create mode 100644 docs/doc_examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc create mode 100644 docs/doc_examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc create mode 100644 docs/doc_examples/cba3462a307e2483c14e3e198f6960e3.asciidoc create mode 100644 docs/doc_examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc create mode 100644 docs/doc_examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc create mode 100644 docs/doc_examples/cc0cca5556ec6224c7134c233734beed.asciidoc create mode 100644 docs/doc_examples/cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc create mode 100644 docs/doc_examples/cc56be758d5d75febbd975786187c861.asciidoc create mode 100644 docs/doc_examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc create mode 100644 docs/doc_examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc create mode 100644 docs/doc_examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc create mode 100644 docs/doc_examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc create mode 100644 docs/doc_examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc create mode 100644 docs/doc_examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc create mode 100644 docs/doc_examples/cd16538654e0f834ff19fe6cf329c398.asciidoc delete mode 100644 docs/doc_examples/cd247f267968aa0927bfdad56852f8f5.asciidoc create mode 100644 docs/doc_examples/cd373a6eb1ef4748616500b26fab3006.asciidoc create mode 100644 docs/doc_examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc delete mode 100644 docs/doc_examples/cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc create mode 100644 docs/doc_examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc create mode 100644 docs/doc_examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc create mode 100644 docs/doc_examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc create mode 100644 docs/doc_examples/cd7da0c3769682f546cc1888e569382e.asciidoc create mode 100644 docs/doc_examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc create mode 100644 docs/doc_examples/cd93919e13f656ad2e6629f45c579b93.asciidoc create mode 100644 docs/doc_examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc create mode 100644 docs/doc_examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc create mode 100644 docs/doc_examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc create mode 100644 docs/doc_examples/cdc38c98320a0df705ec8d173c725375.asciidoc create mode 100644 docs/doc_examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc create mode 100644 docs/doc_examples/cdd29b01e730b3996de68a2788050021.asciidoc create mode 100644 docs/doc_examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc create mode 100644 docs/doc_examples/cde19d110a58317610033ea3dcb0eb80.asciidoc create mode 100644 docs/doc_examples/cde4104a29dfe942d55863cdd8718627.asciidoc delete mode 100644 docs/doc_examples/cdedd5f33f7e5f7acde561e97bff61de.asciidoc create mode 100644 docs/doc_examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc create mode 100644 docs/doc_examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc create mode 100644 docs/doc_examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc create mode 100644 docs/doc_examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc create mode 100644 docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc create mode 100644 docs/doc_examples/ce247fc08371e1b30cb52195e521c076.asciidoc create mode 100644 docs/doc_examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc create mode 100644 docs/doc_examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc create mode 100644 docs/doc_examples/ce725697f93b3eebb3a266314568565a.asciidoc create mode 100644 docs/doc_examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc create mode 100644 docs/doc_examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc create mode 100644 docs/doc_examples/ce8eebfb810335803630abe83278bee7.asciidoc create mode 100644 docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc create mode 100644 docs/doc_examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc create mode 100644 docs/doc_examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc create mode 100644 docs/doc_examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc rename docs/doc_examples/{6d1e75312a28a5ba23837abf768f2510.asciidoc => cf47cd4a39cd62a3ecad919e54a67bca.asciidoc} (66%) create mode 100644 docs/doc_examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc create mode 100644 docs/doc_examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc create mode 100644 docs/doc_examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc create mode 100644 docs/doc_examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc create mode 100644 docs/doc_examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc delete mode 100644 docs/doc_examples/cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc create mode 100644 docs/doc_examples/cfd4b34f35e531a20739a3b308d57134.asciidoc create mode 100644 docs/doc_examples/cffce059425d3d21e7f9571500d63524.asciidoc create mode 100644 docs/doc_examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc create mode 100644 docs/doc_examples/d003f9110e5a474230abe11f36da9297.asciidoc create mode 100644 docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc create mode 100644 docs/doc_examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc create mode 100644 docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc create mode 100644 docs/doc_examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc create mode 100644 docs/doc_examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc create mode 100644 docs/doc_examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc create mode 100644 docs/doc_examples/d0546f047359b85a7e98207dc8de896a.asciidoc create mode 100644 docs/doc_examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc create mode 100644 docs/doc_examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc create mode 100644 docs/doc_examples/d095b422d9803c02b62c01adffc85376.asciidoc delete mode 100644 docs/doc_examples/d0a8a938a2fa913b6fdbc871079a59dd.asciidoc create mode 100644 docs/doc_examples/d0dee031197214b59ff9ac7540527d2c.asciidoc create mode 100644 docs/doc_examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc create mode 100644 docs/doc_examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc create mode 100644 docs/doc_examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc create mode 100644 docs/doc_examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc create mode 100644 docs/doc_examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc create mode 100644 docs/doc_examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc create mode 100644 docs/doc_examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc create mode 100644 docs/doc_examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc delete mode 100644 docs/doc_examples/d17269bb80fb63ec0bf37d219e003dcb.asciidoc create mode 100644 docs/doc_examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc delete mode 100644 docs/doc_examples/d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc create mode 100644 docs/doc_examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc delete mode 100644 docs/doc_examples/d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc create mode 100644 docs/doc_examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc create mode 100644 docs/doc_examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc create mode 100644 docs/doc_examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc create mode 100644 docs/doc_examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc create mode 100644 docs/doc_examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc create mode 100644 docs/doc_examples/d23452f333b77bf5b463310e2a665560.asciidoc create mode 100644 docs/doc_examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc create mode 100644 docs/doc_examples/d268aec16bb1eb909b634e856175094c.asciidoc create mode 100644 docs/doc_examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc create mode 100644 docs/doc_examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc create mode 100644 docs/doc_examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc create mode 100644 docs/doc_examples/d305110a8cabfbebd1e38d85559d1023.asciidoc delete mode 100644 docs/doc_examples/d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc delete mode 100644 docs/doc_examples/d31062ff8c015387889fed4ad86fd914.asciidoc create mode 100644 docs/doc_examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc create mode 100644 docs/doc_examples/d34946f59b6f938b141a37cb0b729308.asciidoc create mode 100644 docs/doc_examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc create mode 100644 docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc create mode 100644 docs/doc_examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc create mode 100644 docs/doc_examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc create mode 100644 docs/doc_examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc create mode 100644 docs/doc_examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc create mode 100644 docs/doc_examples/d3d117fec34301520ccdb26332e7c98a.asciidoc create mode 100644 docs/doc_examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc create mode 100644 docs/doc_examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc create mode 100644 docs/doc_examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc create mode 100644 docs/doc_examples/d4323be84152fa91abd76e966d4751dc.asciidoc create mode 100644 docs/doc_examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc create mode 100644 docs/doc_examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc create mode 100644 docs/doc_examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc create mode 100644 docs/doc_examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc create mode 100644 docs/doc_examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc create mode 100644 docs/doc_examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc create mode 100644 docs/doc_examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc create mode 100644 docs/doc_examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc create mode 100644 docs/doc_examples/d4cdcf01014c75693b080c778071c1b5.asciidoc create mode 100644 docs/doc_examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc create mode 100644 docs/doc_examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc create mode 100644 docs/doc_examples/d4fb482a51d67a1af48e429af6019a46.asciidoc delete mode 100644 docs/doc_examples/d50a3c64890f88af32c6d4ef4899d82a.asciidoc create mode 100644 docs/doc_examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc create mode 100644 docs/doc_examples/d5132d34ae922fa8e898889b627a1405.asciidoc create mode 100644 docs/doc_examples/d524db57be9f16abac5396895b9a2a59.asciidoc create mode 100644 docs/doc_examples/d547d55efbf75374f6de1f224323bc73.asciidoc create mode 100644 docs/doc_examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc create mode 100644 docs/doc_examples/d56a9d89282df56adbbc34b91390ac17.asciidoc create mode 100644 docs/doc_examples/d595b40bf1ea71923f9824d0f9c99c49.asciidoc create mode 100644 docs/doc_examples/d59e9cc75814575aa5e275dbe262918c.asciidoc create mode 100644 docs/doc_examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc create mode 100644 docs/doc_examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc create mode 100644 docs/doc_examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc create mode 100644 docs/doc_examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc create mode 100644 docs/doc_examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc create mode 100644 docs/doc_examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc create mode 100644 docs/doc_examples/d64d509440afbed7cefd04b6898962eb.asciidoc create mode 100644 docs/doc_examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc create mode 100644 docs/doc_examples/d681508a745b2bc777d47ba606d24224.asciidoc create mode 100644 docs/doc_examples/d681b643da0d7f0a384f627b6d56111b.asciidoc create mode 100644 docs/doc_examples/d690a6af462c70a783625a323e11c72c.asciidoc create mode 100644 docs/doc_examples/d69bd36335774c8ae1286cee21310241.asciidoc create mode 100644 docs/doc_examples/d69cf7c82602431d9e339583e7dfb988.asciidoc create mode 100644 docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc create mode 100644 docs/doc_examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc create mode 100644 docs/doc_examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc delete mode 100644 docs/doc_examples/d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc create mode 100644 docs/doc_examples/d7348119df9f89a556a7b767d5298c7e.asciidoc create mode 100644 docs/doc_examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc create mode 100644 docs/doc_examples/d775836a0d7abecc6637aa988f204c30.asciidoc create mode 100644 docs/doc_examples/d7898526d239d2aea83727fb982f8f77.asciidoc create mode 100644 docs/doc_examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc create mode 100644 docs/doc_examples/d7a55a7c491e97079e429483085f1d58.asciidoc create mode 100644 docs/doc_examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc create mode 100644 docs/doc_examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc create mode 100644 docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc create mode 100644 docs/doc_examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc create mode 100644 docs/doc_examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc create mode 100644 docs/doc_examples/d7fe687201ac87b307cd06ed015dd317.asciidoc create mode 100644 docs/doc_examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc create mode 100644 docs/doc_examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc create mode 100644 docs/doc_examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc create mode 100644 docs/doc_examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc create mode 100644 docs/doc_examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc create mode 100644 docs/doc_examples/d851282dba548251d10db5954a339307.asciidoc create mode 100644 docs/doc_examples/d870d5bd1f97fc75872a298fcddec513.asciidoc rename docs/doc_examples/{67bba546d835bca8f31df13e3587c348.asciidoc => d87175daed2327565d4325528c6d8b38.asciidoc} (73%) create mode 100644 docs/doc_examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc create mode 100644 docs/doc_examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc create mode 100644 docs/doc_examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc create mode 100644 docs/doc_examples/d89d36741d906a71eca6c144e8d83889.asciidoc create mode 100644 docs/doc_examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc delete mode 100644 docs/doc_examples/d8b2a88b5eca99d3691ad3cd40266736.asciidoc create mode 100644 docs/doc_examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc create mode 100644 docs/doc_examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc create mode 100644 docs/doc_examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc create mode 100644 docs/doc_examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc delete mode 100644 docs/doc_examples/d9474f66970c6955e24b17c7447e7b5f.asciidoc create mode 100644 docs/doc_examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc create mode 100644 docs/doc_examples/d952ac7c73219d8cabc080679e035514.asciidoc create mode 100644 docs/doc_examples/d979f934af0992fb8c8596beff80b638.asciidoc create mode 100644 docs/doc_examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc create mode 100644 docs/doc_examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc create mode 100644 docs/doc_examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc create mode 100644 docs/doc_examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc create mode 100644 docs/doc_examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc create mode 100644 docs/doc_examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc create mode 100644 docs/doc_examples/da18bae37cda566c0254b30c15221b01.asciidoc create mode 100644 docs/doc_examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc create mode 100644 docs/doc_examples/da3cecc36a7313385d32c7f52ccfb7e3.asciidoc create mode 100644 docs/doc_examples/da3f280bc65b581fb3097be768061bee.asciidoc create mode 100644 docs/doc_examples/da8db0769dff7305f178c12b1111bc99.asciidoc create mode 100644 docs/doc_examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc create mode 100644 docs/doc_examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc create mode 100644 docs/doc_examples/dabb159e0b3456024889fb9754a10655.asciidoc create mode 100644 docs/doc_examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc create mode 100644 docs/doc_examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc create mode 100644 docs/doc_examples/dad2d4add751fde5c39475ca709cc14b.asciidoc create mode 100644 docs/doc_examples/dadb69a225778ecd6528924c0aa029bb.asciidoc create mode 100644 docs/doc_examples/dae57cf7df18adb4dc64426eb159733a.asciidoc create mode 100644 docs/doc_examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc create mode 100644 docs/doc_examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc delete mode 100644 docs/doc_examples/db6cba451ba562abe953d09ad80cc15c.asciidoc create mode 100644 docs/doc_examples/db773f690edf659ac9b044dc854c77eb.asciidoc create mode 100644 docs/doc_examples/db8710a9793ae0817a45892d33468160.asciidoc create mode 100644 docs/doc_examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc create mode 100644 docs/doc_examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc create mode 100644 docs/doc_examples/dbc50b8c934171e94604575a8b36f349.asciidoc create mode 100644 docs/doc_examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc create mode 100644 docs/doc_examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc create mode 100644 docs/doc_examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc create mode 100644 docs/doc_examples/dbf93d02ab86a09929a21232b19709cc.asciidoc create mode 100644 docs/doc_examples/dbf9abc37899352751dab0ede62af2fd.asciidoc delete mode 100644 docs/doc_examples/dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc create mode 100644 docs/doc_examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc create mode 100644 docs/doc_examples/dc3b7603e7d688106acb804059af7834.asciidoc create mode 100644 docs/doc_examples/dc468865da947b4a9136a5b92878d918.asciidoc create mode 100644 docs/doc_examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc create mode 100644 docs/doc_examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc create mode 100644 docs/doc_examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc create mode 100644 docs/doc_examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc create mode 100644 docs/doc_examples/dcfa7f479a33f459a2d222a92e651451.asciidoc create mode 100644 docs/doc_examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc create mode 100644 docs/doc_examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc create mode 100644 docs/doc_examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc create mode 100644 docs/doc_examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc create mode 100644 docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc create mode 100644 docs/doc_examples/dd792bb53703a57f9207e36d16e26255.asciidoc create mode 100644 docs/doc_examples/dda949d20d07a9edbe64cefc623df945.asciidoc create mode 100644 docs/doc_examples/ddcfa47381d47078dbec651e31b69949.asciidoc create mode 100644 docs/doc_examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc create mode 100644 docs/doc_examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc create mode 100644 docs/doc_examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc create mode 100644 docs/doc_examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc delete mode 100644 docs/doc_examples/de176bc4788ea286fff9e92418a43ea8.asciidoc create mode 100644 docs/doc_examples/de2f59887737de3a27716177b60393a2.asciidoc create mode 100644 docs/doc_examples/de876505acc75d371d1f6f484c449197.asciidoc create mode 100644 docs/doc_examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc create mode 100644 docs/doc_examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc create mode 100644 docs/doc_examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc create mode 100644 docs/doc_examples/dead0682932ea6ec33c1197017bcb209.asciidoc create mode 100644 docs/doc_examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc create mode 100644 docs/doc_examples/dee3023098d9e63aa9e113beea5686da.asciidoc create mode 100644 docs/doc_examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc create mode 100644 docs/doc_examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc create mode 100644 docs/doc_examples/df103a3df9b353357e72f9180ef421a1.asciidoc create mode 100644 docs/doc_examples/df1336e768fb6fc1826a5afa30a57285.asciidoc delete mode 100644 docs/doc_examples/df17f920b0deab3529b98df88b781f55.asciidoc create mode 100644 docs/doc_examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc create mode 100644 docs/doc_examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc create mode 100644 docs/doc_examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc create mode 100644 docs/doc_examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc create mode 100644 docs/doc_examples/dfa16b7300d225e013f23625f44c087b.asciidoc create mode 100644 docs/doc_examples/dfa75000edf4b960ed9002595a051871.asciidoc create mode 100644 docs/doc_examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc create mode 100644 docs/doc_examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc create mode 100644 docs/doc_examples/dfbf53781adc6640493d49931a352167.asciidoc create mode 100644 docs/doc_examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc create mode 100644 docs/doc_examples/dfcdadcf91529d3a399e05684195028e.asciidoc create mode 100644 docs/doc_examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc create mode 100644 docs/doc_examples/dfdf82b8d99436582f150117695190b3.asciidoc delete mode 100644 docs/doc_examples/dfef545b1e2c247bafd1347e8e807ac1.asciidoc create mode 100644 docs/doc_examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc create mode 100644 docs/doc_examples/dffbbdc4025e5777c647d8818847b960.asciidoc create mode 100644 docs/doc_examples/e0734215054e1ff5df712ce3a826cdba.asciidoc create mode 100644 docs/doc_examples/e08fb1435dc659c24badf25b676efb68.asciidoc create mode 100644 docs/doc_examples/e095fc96504efecc588f97673912e3d3.asciidoc create mode 100644 docs/doc_examples/e09d30195108bd6a1f6857394a6123ea.asciidoc create mode 100644 docs/doc_examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc create mode 100644 docs/doc_examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc create mode 100644 docs/doc_examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc create mode 100644 docs/doc_examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc create mode 100644 docs/doc_examples/e0d4a800de2d8f4062e69433586c38db.asciidoc delete mode 100644 docs/doc_examples/e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc create mode 100644 docs/doc_examples/e0fcef99656799de6b88117d56f131e2.asciidoc create mode 100644 docs/doc_examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc create mode 100644 docs/doc_examples/e12f2d2ddca387630e7855a6db952da2.asciidoc create mode 100644 docs/doc_examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc create mode 100644 docs/doc_examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc create mode 100644 docs/doc_examples/e16a353e619b935c5c70769b1b9fa100.asciidoc create mode 100644 docs/doc_examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc create mode 100644 docs/doc_examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc create mode 100644 docs/doc_examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc create mode 100644 docs/doc_examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc rename docs/doc_examples/{b9a153725b28fdd0a5aabd7f17a8c2d7.asciidoc => e1d6ecab4148b09f4c605474157e7dbd.asciidoc} (73%) rename docs/doc_examples/{36818c6d9f434d387819c30bd9addb14.asciidoc => e1f20ee96ce80edcc35b647cef731e15.asciidoc} (53%) create mode 100644 docs/doc_examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc create mode 100644 docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc create mode 100644 docs/doc_examples/e26c96978096ccc592849cca9db67ffc.asciidoc create mode 100644 docs/doc_examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc create mode 100644 docs/doc_examples/e273060a675c959fd5f3cde27c8aff07.asciidoc create mode 100644 docs/doc_examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc create mode 100644 docs/doc_examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc delete mode 100644 docs/doc_examples/e2a042c629429855c3bcaefffb26b7fa.asciidoc create mode 100644 docs/doc_examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc create mode 100644 docs/doc_examples/e2a753029b450942a3228e3003a55a7d.asciidoc create mode 100644 docs/doc_examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc create mode 100644 docs/doc_examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc create mode 100644 docs/doc_examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc create mode 100644 docs/doc_examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc create mode 100644 docs/doc_examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc create mode 100644 docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc create mode 100644 docs/doc_examples/e316271f668c9889bf548311fb421f1e.asciidoc create mode 100644 docs/doc_examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc create mode 100644 docs/doc_examples/e324ea1547635180c31c1adf77870ba2.asciidoc create mode 100644 docs/doc_examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc create mode 100644 docs/doc_examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc create mode 100644 docs/doc_examples/e3a6462ca79c101314da0680c97678cd.asciidoc create mode 100644 docs/doc_examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc create mode 100644 docs/doc_examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc create mode 100644 docs/doc_examples/e4193867485595c9c92f909a052d2a90.asciidoc create mode 100644 docs/doc_examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc create mode 100644 docs/doc_examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc create mode 100644 docs/doc_examples/e451900efbd8be50c2b8347a83816aa6.asciidoc create mode 100644 docs/doc_examples/e46c83db1580e14be844079cd008f518.asciidoc create mode 100644 docs/doc_examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc create mode 100644 docs/doc_examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc create mode 100644 docs/doc_examples/e494162e83ce041c56b2e2bc29d33474.asciidoc create mode 100644 docs/doc_examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc create mode 100644 docs/doc_examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc create mode 100644 docs/doc_examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc create mode 100644 docs/doc_examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc create mode 100644 docs/doc_examples/e4de6035653e8202c43631f02d244661.asciidoc create mode 100644 docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc create mode 100644 docs/doc_examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc create mode 100644 docs/doc_examples/e551ea38a2d8f8deac110b33304200cc.asciidoc create mode 100644 docs/doc_examples/e566e898902e432bc7ea0568400f0c50.asciidoc delete mode 100644 docs/doc_examples/e567e6dbf86300142573c73789c8fce4.asciidoc create mode 100644 docs/doc_examples/e586d1d2a997133e039fd352a42a72b3.asciidoc create mode 100644 docs/doc_examples/e58833449d01379df20ad06dc28144d8.asciidoc create mode 100644 docs/doc_examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc create mode 100644 docs/doc_examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc create mode 100644 docs/doc_examples/e5c710b08a545522d50b4ce35503bc46.asciidoc delete mode 100644 docs/doc_examples/e5d2172b524332196cac0f031c043659.asciidoc create mode 100644 docs/doc_examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc create mode 100644 docs/doc_examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc create mode 100644 docs/doc_examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc create mode 100644 docs/doc_examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc create mode 100644 docs/doc_examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc create mode 100644 docs/doc_examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc create mode 100644 docs/doc_examples/e619e896ce3dad9dcfc6f8700438be98.asciidoc create mode 100644 docs/doc_examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc create mode 100644 docs/doc_examples/e6369e7cef82d881af593d5526bf79bd.asciidoc create mode 100644 docs/doc_examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc create mode 100644 docs/doc_examples/e63cf08350e9381f519c2835843be7cd.asciidoc create mode 100644 docs/doc_examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc create mode 100644 docs/doc_examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc create mode 100644 docs/doc_examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc create mode 100644 docs/doc_examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc create mode 100644 docs/doc_examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc create mode 100644 docs/doc_examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc create mode 100644 docs/doc_examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc create mode 100644 docs/doc_examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc create mode 100644 docs/doc_examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc create mode 100644 docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc create mode 100644 docs/doc_examples/e784fc00894635470adfd78a0c46b427.asciidoc create mode 100644 docs/doc_examples/e7d819634d765cde269e2669e2dc677f.asciidoc create mode 100644 docs/doc_examples/e7e95022867c72a6563137f066dd2973.asciidoc create mode 100644 docs/doc_examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc create mode 100644 docs/doc_examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc create mode 100644 docs/doc_examples/e821d27a8b810821707ba860e31f8b78.asciidoc create mode 100644 docs/doc_examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc create mode 100644 docs/doc_examples/e82c33def91faddcfeed7b02cd258605.asciidoc create mode 100644 docs/doc_examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc create mode 100644 docs/doc_examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc create mode 100644 docs/doc_examples/e891e1d4805172da45a81f62b6b44aca.asciidoc create mode 100644 docs/doc_examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc create mode 100644 docs/doc_examples/e8a2726eea5545355d1d0835d4599f55.asciidoc create mode 100644 docs/doc_examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc create mode 100644 docs/doc_examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc create mode 100644 docs/doc_examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc delete mode 100644 docs/doc_examples/e8e451bc8c45bcf16df43804c4fc8329.asciidoc create mode 100644 docs/doc_examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc create mode 100644 docs/doc_examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc create mode 100644 docs/doc_examples/e905543b281e9c41395304da76ed2ea3.asciidoc create mode 100644 docs/doc_examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc create mode 100644 docs/doc_examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc create mode 100644 docs/doc_examples/e944653610f311fa06148d5b0afdf697.asciidoc create mode 100644 docs/doc_examples/e95ba581b298cd7bb598374afbfed315.asciidoc create mode 100644 docs/doc_examples/e95e61988dc3073a007f7b7445dd233b.asciidoc create mode 100644 docs/doc_examples/e9738fe09a99080506a07945795e8eda.asciidoc create mode 100644 docs/doc_examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc create mode 100644 docs/doc_examples/e9a0b450af6219772631703d602c7092.asciidoc delete mode 100644 docs/doc_examples/e9c2e15b36372d5281c879d336322b6c.asciidoc create mode 100644 docs/doc_examples/e9f9e184499a793828233e536fac0487.asciidoc create mode 100644 docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc create mode 100644 docs/doc_examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc delete mode 100644 docs/doc_examples/e9fe608f105d7e3268a15e409e2cb9ab.asciidoc create mode 100644 docs/doc_examples/ea020ea32d5cd35e577c61a120f92451.asciidoc delete mode 100644 docs/doc_examples/ea02de2dbe05091fcb0dac72c8ba5f83.asciidoc create mode 100644 docs/doc_examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc create mode 100644 docs/doc_examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc rename docs/doc_examples/{42744a175125df5be0ef77413bf8f608.asciidoc => ea5391267ced860c00214c096e08c8d4.asciidoc} (65%) create mode 100644 docs/doc_examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc create mode 100644 docs/doc_examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc create mode 100644 docs/doc_examples/ea66a620c23337545e409c120c4ed5d9.asciidoc create mode 100644 docs/doc_examples/ea68e3428cc2ca3455bf312d09451489.asciidoc create mode 100644 docs/doc_examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc create mode 100644 docs/doc_examples/ea92390651e8ecad0c890658985343c5.asciidoc create mode 100644 docs/doc_examples/eab3cad0257c539c5efd2689aa52f242.asciidoc create mode 100644 docs/doc_examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc create mode 100644 docs/doc_examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc create mode 100644 docs/doc_examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc create mode 100644 docs/doc_examples/eae8931d01b3b878dd0c45214121e662.asciidoc create mode 100644 docs/doc_examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc create mode 100644 docs/doc_examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc create mode 100644 docs/doc_examples/eafdabe80b21b90495555fa6d9089412.asciidoc create mode 100644 docs/doc_examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc create mode 100644 docs/doc_examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc create mode 100644 docs/doc_examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc create mode 100644 docs/doc_examples/eb4e43b47867b54214a8630172dd0e21.asciidoc create mode 100644 docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc create mode 100644 docs/doc_examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc create mode 100644 docs/doc_examples/eb5987b58dae90c3a8a1609410be0570.asciidoc create mode 100644 docs/doc_examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc create mode 100644 docs/doc_examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc create mode 100644 docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc create mode 100644 docs/doc_examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc create mode 100644 docs/doc_examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc create mode 100644 docs/doc_examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc create mode 100644 docs/doc_examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc create mode 100644 docs/doc_examples/ec195297eb804cba1cb19c9926773059.asciidoc delete mode 100644 docs/doc_examples/ec27afee074001b0e4e393611010842b.asciidoc create mode 100644 docs/doc_examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc create mode 100644 docs/doc_examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc delete mode 100644 docs/doc_examples/ec473de07fe89bcbac1f8e278617fe46.asciidoc create mode 100644 docs/doc_examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc create mode 100644 docs/doc_examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc create mode 100644 docs/doc_examples/ec736c31f49c54e5424efa2e53b22906.asciidoc create mode 100644 docs/doc_examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc create mode 100644 docs/doc_examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc create mode 100644 docs/doc_examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc create mode 100644 docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc create mode 100644 docs/doc_examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc create mode 100644 docs/doc_examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc create mode 100644 docs/doc_examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc create mode 100644 docs/doc_examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc create mode 100644 docs/doc_examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc create mode 100644 docs/doc_examples/ed27843eff311f3011b679e97e6fda50.asciidoc create mode 100644 docs/doc_examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc create mode 100644 docs/doc_examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc create mode 100644 docs/doc_examples/ed5c3b45e8de912faba44507d827eb93.asciidoc create mode 100644 docs/doc_examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc create mode 100644 docs/doc_examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc create mode 100644 docs/doc_examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc create mode 100644 docs/doc_examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc create mode 100644 docs/doc_examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc create mode 100644 docs/doc_examples/edae616e1244babf6032aecc6aaaf836.asciidoc create mode 100644 docs/doc_examples/edb25dc0162b039d477cb06aed2d6275.asciidoc create mode 100644 docs/doc_examples/edb5cad890208014ecd91f3f739ce193.asciidoc create mode 100644 docs/doc_examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc create mode 100644 docs/doc_examples/ee08328cd157d547de19b4abe867b23e.asciidoc create mode 100644 docs/doc_examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc create mode 100644 docs/doc_examples/ee223e604bb695cad2517d28ae63ac34.asciidoc create mode 100644 docs/doc_examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc create mode 100644 docs/doc_examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc create mode 100644 docs/doc_examples/ee634d59def6302134d24fa90e18b609.asciidoc create mode 100644 docs/doc_examples/ee90d1fb22b59d30da339d825303b912.asciidoc create mode 100644 docs/doc_examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc create mode 100644 docs/doc_examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc create mode 100644 docs/doc_examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc create mode 100644 docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc create mode 100644 docs/doc_examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc delete mode 100644 docs/doc_examples/ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc create mode 100644 docs/doc_examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc create mode 100644 docs/doc_examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc create mode 100644 docs/doc_examples/ef33b3b373f7040b874146599db5d557.asciidoc rename docs/doc_examples/{1b542e3ea87a742f95641d64dcfb1bdb.asciidoc => ef3666b5d288faefbcbc4a25e8f506da.asciidoc} (73%) create mode 100644 docs/doc_examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc create mode 100644 docs/doc_examples/ef867e563cbffe7866769a096b5d7a92.asciidoc create mode 100644 docs/doc_examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc delete mode 100644 docs/doc_examples/ef9111c1648d7820925f12e07d1346c5.asciidoc create mode 100644 docs/doc_examples/ef9c29759459904fef162acd223462c4.asciidoc create mode 100644 docs/doc_examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc create mode 100644 docs/doc_examples/efa924638043f3a6b23ccb824d757eba.asciidoc create mode 100644 docs/doc_examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc create mode 100644 docs/doc_examples/eff8ecaed1ed084909c64450fc363a20.asciidoc create mode 100644 docs/doc_examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc create mode 100644 docs/doc_examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc delete mode 100644 docs/doc_examples/f085fb032dae56a3b104ab874eaea2ad.asciidoc create mode 100644 docs/doc_examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc create mode 100644 docs/doc_examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc create mode 100644 docs/doc_examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc create mode 100644 docs/doc_examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc delete mode 100644 docs/doc_examples/f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc create mode 100644 docs/doc_examples/f10ab582387b2c157917a60205c993f7.asciidoc create mode 100644 docs/doc_examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc create mode 100644 docs/doc_examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc create mode 100644 docs/doc_examples/f1508a2221152842894819e762e63491.asciidoc create mode 100644 docs/doc_examples/f160561efab38e40c2feebf5a2542ab5.asciidoc create mode 100644 docs/doc_examples/f18248c181690b81d090275b072f0070.asciidoc create mode 100644 docs/doc_examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc create mode 100644 docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc create mode 100644 docs/doc_examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc create mode 100644 docs/doc_examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc create mode 100644 docs/doc_examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc create mode 100644 docs/doc_examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc create mode 100644 docs/doc_examples/f2175feadc2abe545899889e6d4ffcad.asciidoc create mode 100644 docs/doc_examples/f235544a883fd04bed2dc369b0c450f3.asciidoc create mode 100644 docs/doc_examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc create mode 100644 docs/doc_examples/f268416813befd13c604642c6fe6eda9.asciidoc create mode 100644 docs/doc_examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc create mode 100644 docs/doc_examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc create mode 100644 docs/doc_examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc create mode 100644 docs/doc_examples/f29b2674299ddf51a25ed87619025ede.asciidoc create mode 100644 docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc create mode 100644 docs/doc_examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc create mode 100644 docs/doc_examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc delete mode 100644 docs/doc_examples/f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc create mode 100644 docs/doc_examples/f2e854b6c99659ccc1824e86c096e433.asciidoc create mode 100644 docs/doc_examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc create mode 100644 docs/doc_examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc create mode 100644 docs/doc_examples/f329242d7c8406297eff9bf609870c37.asciidoc create mode 100644 docs/doc_examples/f342465c65ba76383dedbb334b57b616.asciidoc create mode 100644 docs/doc_examples/f34c02351662481dd61a5c2a3e206c60.asciidoc create mode 100644 docs/doc_examples/f3574cfee3971d98417b8dc574a91be0.asciidoc create mode 100644 docs/doc_examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc create mode 100644 docs/doc_examples/f3697682a886ab129530f3e5c1b30632.asciidoc create mode 100644 docs/doc_examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc create mode 100644 docs/doc_examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc create mode 100644 docs/doc_examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc create mode 100644 docs/doc_examples/f3942d9b34138dfca79dff707af270b7.asciidoc create mode 100644 docs/doc_examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc create mode 100644 docs/doc_examples/f3ab820e1f2f54ea718017aeae865742.asciidoc create mode 100644 docs/doc_examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc create mode 100644 docs/doc_examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc create mode 100644 docs/doc_examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc create mode 100644 docs/doc_examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc create mode 100644 docs/doc_examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc create mode 100644 docs/doc_examples/f3fb52680482925c202c2e2f8af6f044.asciidoc create mode 100644 docs/doc_examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc create mode 100644 docs/doc_examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc create mode 100644 docs/doc_examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc create mode 100644 docs/doc_examples/f44d287c6937785eb09b91353c1deb1e.asciidoc create mode 100644 docs/doc_examples/f453e14bcf30853e57618bf12f83e148.asciidoc create mode 100644 docs/doc_examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc create mode 100644 docs/doc_examples/f45990264f8755b96b11c69c12c90ff4.asciidoc create mode 100644 docs/doc_examples/f495f9c99916a05e3b28166d31955fad.asciidoc create mode 100644 docs/doc_examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc delete mode 100644 docs/doc_examples/f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc create mode 100644 docs/doc_examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc create mode 100644 docs/doc_examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc create mode 100644 docs/doc_examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc create mode 100644 docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc create mode 100644 docs/doc_examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc create mode 100644 docs/doc_examples/f4f557716049b23f8840d58d71e748f0.asciidoc create mode 100644 docs/doc_examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc create mode 100644 docs/doc_examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc create mode 100644 docs/doc_examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc create mode 100644 docs/doc_examples/f545bb95214769aca993c1632a71ad2c.asciidoc create mode 100644 docs/doc_examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc delete mode 100644 docs/doc_examples/f5569945024b9d664828693705c27c1a.asciidoc create mode 100644 docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc create mode 100644 docs/doc_examples/f5815d573cee0447910c9668003887b8.asciidoc create mode 100644 docs/doc_examples/f58969ac405db85f439c5940d014964b.asciidoc create mode 100644 docs/doc_examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc create mode 100644 docs/doc_examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc create mode 100644 docs/doc_examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc create mode 100644 docs/doc_examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc create mode 100644 docs/doc_examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc create mode 100644 docs/doc_examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc create mode 100644 docs/doc_examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc create mode 100644 docs/doc_examples/f63f6343e74bd5c844854272e746de14.asciidoc create mode 100644 docs/doc_examples/f642b64e592131f37209a5100fe161cc.asciidoc create mode 100644 docs/doc_examples/f6566395f85d3afe917228643d7318d6.asciidoc create mode 100644 docs/doc_examples/f656c1e64268293ecc8ebd8065628faa.asciidoc create mode 100644 docs/doc_examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc create mode 100644 docs/doc_examples/f66643c54999426c5afa6d5a87435d4e.asciidoc create mode 100644 docs/doc_examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc create mode 100644 docs/doc_examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc create mode 100644 docs/doc_examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc delete mode 100644 docs/doc_examples/f6b5032bf27c2445d28845be0d413970.asciidoc create mode 100644 docs/doc_examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc create mode 100644 docs/doc_examples/f6d493650b4344f17297b568016fb445.asciidoc delete mode 100644 docs/doc_examples/f6d6889667f56b8f49d2858070571a6b.asciidoc create mode 100644 docs/doc_examples/f6de702c3d097af0b0bd391c4f947233.asciidoc create mode 100644 docs/doc_examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc create mode 100644 docs/doc_examples/f6ead39c5505045543b9225deca7367d.asciidoc create mode 100644 docs/doc_examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc create mode 100644 docs/doc_examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc create mode 100644 docs/doc_examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc create mode 100644 docs/doc_examples/f7139b3c0e066be832b9100ae17157cc.asciidoc create mode 100644 docs/doc_examples/f733b25cd4c448b226bb76862974eef2.asciidoc create mode 100644 docs/doc_examples/f749efe8f11ebd43ef83db91922c736e.asciidoc create mode 100644 docs/doc_examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc create mode 100644 docs/doc_examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc create mode 100644 docs/doc_examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc create mode 100644 docs/doc_examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc create mode 100644 docs/doc_examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc create mode 100644 docs/doc_examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc create mode 100644 docs/doc_examples/f83eb6605c7c56e297a494b318400ef0.asciidoc create mode 100644 docs/doc_examples/f8525c2460a577edfef156c13f55b8a7.asciidoc create mode 100644 docs/doc_examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc create mode 100644 docs/doc_examples/f8833488041f3d318435b60917fa877c.asciidoc create mode 100644 docs/doc_examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc create mode 100644 docs/doc_examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc create mode 100644 docs/doc_examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc create mode 100644 docs/doc_examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc delete mode 100644 docs/doc_examples/f9636d7ef1a45be4f36418c875cf6bef.asciidoc create mode 100644 docs/doc_examples/f96d4614f2fc294339fef325b794355f.asciidoc create mode 100644 docs/doc_examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc create mode 100644 docs/doc_examples/f9732ce07960134ea7156e118c2da8a6.asciidoc create mode 100644 docs/doc_examples/f978088f5117d4addd55c11ee3777312.asciidoc create mode 100644 docs/doc_examples/f97aa2efabbf11a534073041eb2658c9.asciidoc rename docs/doc_examples/{978088f989d45dd09339582e9cbc60e0.asciidoc => f98687271e1bec031cc34d05d8f4b60b.asciidoc} (54%) create mode 100644 docs/doc_examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc rename docs/doc_examples/{89a8ac1509936acc272fc2d72907bc45.asciidoc => f9c8245cc13770dff052b6759a749efa.asciidoc} (74%) create mode 100644 docs/doc_examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc create mode 100644 docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc create mode 100644 docs/doc_examples/f9f541ae23a184301913f07e62d1afd3.asciidoc delete mode 100644 docs/doc_examples/fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc delete mode 100644 docs/doc_examples/fa2fe60f570bd930d2891778c6efbfe6.asciidoc create mode 100644 docs/doc_examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc create mode 100644 docs/doc_examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc create mode 100644 docs/doc_examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc create mode 100644 docs/doc_examples/fa82d86a046d67366cfe9ce65535e433.asciidoc create mode 100644 docs/doc_examples/fa946228e946da256d40264c8b070a1a.asciidoc create mode 100644 docs/doc_examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc create mode 100644 docs/doc_examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc create mode 100644 docs/doc_examples/fab702851e90e945c1b62dec0bb6a205.asciidoc create mode 100644 docs/doc_examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc create mode 100644 docs/doc_examples/fad524db23eb5718ff310956e590b00d.asciidoc create mode 100644 docs/doc_examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc create mode 100644 docs/doc_examples/fb1180992b2087dfb36576b44c4261e4.asciidoc create mode 100644 docs/doc_examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc create mode 100644 docs/doc_examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc create mode 100644 docs/doc_examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc create mode 100644 docs/doc_examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc create mode 100644 docs/doc_examples/fb955375a202f66133af009c04cb77ad.asciidoc create mode 100644 docs/doc_examples/fbb38243221c8fb311660616e3add9ce.asciidoc create mode 100644 docs/doc_examples/fbc5ab85b908480bf944b55da0a43488.asciidoc create mode 100644 docs/doc_examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc create mode 100644 docs/doc_examples/fc1907515f6a913884a9f86451e90ee8.asciidoc create mode 100644 docs/doc_examples/fc190fbbf71949331266dcb3f46a1198.asciidoc create mode 100644 docs/doc_examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc create mode 100644 docs/doc_examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc create mode 100644 docs/doc_examples/fc49437ce2e7916facf58128308c2aa3.asciidoc create mode 100644 docs/doc_examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc create mode 100644 docs/doc_examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc create mode 100644 docs/doc_examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc delete mode 100644 docs/doc_examples/fc8097bdfb6f3a4017bf4186ccca8a84.asciidoc create mode 100644 docs/doc_examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc create mode 100644 docs/doc_examples/fc9a1b1173690a911725cff3912e9755.asciidoc create mode 100644 docs/doc_examples/fccbddfba9f975de7e321732874dfb78.asciidoc create mode 100644 docs/doc_examples/fce5c03a388c893cb11a6696e068543f.asciidoc create mode 100644 docs/doc_examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc create mode 100644 docs/doc_examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc create mode 100644 docs/doc_examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc create mode 100644 docs/doc_examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc create mode 100644 docs/doc_examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc create mode 100644 docs/doc_examples/fd352b472d44d197022a46fce90b6ecb.asciidoc create mode 100644 docs/doc_examples/fd60b4092c6552164862cec287359676.asciidoc create mode 100644 docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc create mode 100644 docs/doc_examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc create mode 100644 docs/doc_examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc create mode 100644 docs/doc_examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc create mode 100644 docs/doc_examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc create mode 100644 docs/doc_examples/fdada036a875d7995d5d7aba9c06361e.asciidoc create mode 100644 docs/doc_examples/fdc8e090293e78e9a6b283650b682517.asciidoc delete mode 100644 docs/doc_examples/fdd38f0d248385a444c777e7acd97846.asciidoc create mode 100644 docs/doc_examples/fde3463ddf136fdfff1306a60986515e.asciidoc create mode 100644 docs/doc_examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc create mode 100644 docs/doc_examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc create mode 100644 docs/doc_examples/fe3a927d868cbc530e08e05964d5174a.asciidoc create mode 100644 docs/doc_examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc create mode 100644 docs/doc_examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc create mode 100644 docs/doc_examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc create mode 100644 docs/doc_examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc create mode 100644 docs/doc_examples/fe806011466e7cdc1590da186297edb6.asciidoc create mode 100644 docs/doc_examples/fe825c05e13e8163073166572c7ac97d.asciidoc create mode 100644 docs/doc_examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc create mode 100644 docs/doc_examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc create mode 100644 docs/doc_examples/febb71d774e0a1fc67454213d7448c53.asciidoc create mode 100644 docs/doc_examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc create mode 100644 docs/doc_examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc delete mode 100644 docs/doc_examples/feefeb68144002fd1fff57b77b95b85e.asciidoc create mode 100644 docs/doc_examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc create mode 100644 docs/doc_examples/ff05842419968a2141bde0371ac2f6f4.asciidoc create mode 100644 docs/doc_examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc create mode 100644 docs/doc_examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc create mode 100644 docs/doc_examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc create mode 100644 docs/doc_examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc create mode 100644 docs/doc_examples/ff63ae39c34925dbfa54282ec9989124.asciidoc create mode 100644 docs/doc_examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc create mode 100644 docs/doc_examples/ff7b81fa96c3b994efa3dee230512291.asciidoc create mode 100644 docs/doc_examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc create mode 100644 docs/doc_examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc create mode 100644 docs/doc_examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc create mode 100644 docs/doc_examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc diff --git a/docs/doc_examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc b/docs/doc_examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc new file mode 100644 index 000000000..2bd8aa7df --- /dev/null +++ b/docs/doc_examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "default_file,ldap1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/004743b9c9f61588926ccf734696b713.asciidoc b/docs/doc_examples/004743b9c9f61588926ccf734696b713.asciidoc new file mode 100644 index 000000000..146e97702 --- /dev/null +++ b/docs/doc_examples/004743b9c9f61588926ccf734696b713.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: ".ds-my-data-stream-2099.03.07-000001", + max_num_segments: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b90feb9d5d3dbfce424dac0341320b7.asciidoc b/docs/doc_examples/004a17b42ab5155bb61da797a006fa9f.asciidoc similarity index 51% rename from docs/doc_examples/4b90feb9d5d3dbfce424dac0341320b7.asciidoc rename to docs/doc_examples/004a17b42ab5155bb61da797a006fa9f.asciidoc index 74010bbda..4c8ea77d9 100644 --- a/docs/doc_examples/4b90feb9d5d3dbfce424dac0341320b7.asciidoc +++ b/docs/doc_examples/004a17b42ab5155bb61da797a006fa9f.asciidoc @@ -4,20 +4,16 @@ [source, js] ---- const response = await client.search({ - index: 'bank', - body: { - query: { - match_all: {} + query: { + pinned: { + ids: ["1", "4", "100"], + organic: { + match: { + description: "iphone", + }, + }, }, - sort: [ - { - account_number: 'asc' - } - ], - from: 10, - size: 10 - } -}) -console.log(response) + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc b/docs/doc_examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc new file mode 100644 index 000000000..f1d0b2224 --- /dev/null +++ b/docs/doc_examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["stemmer"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/007179b5e241da650562a5f0a5007823.asciidoc b/docs/doc_examples/007179b5e241da650562a5f0a5007823.asciidoc new file mode 100644 index 000000000..7ef9728b5 --- /dev/null +++ b/docs/doc_examples/007179b5e241da650562a5f0a5007823.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + }, + }, + }, + condition: { + compare: { + "ctx.payload.status": { + eq: "red", + }, + }, + }, + actions: { + send_email: { + email: { + to: "username@example.org", + subject: "Cluster Status Warning", + body: "Cluster status is RED", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/008ed823c89e703c447ac89c6b689833.asciidoc b/docs/doc_examples/008ed823c89e703c447ac89c6b689833.asciidoc new file mode 100644 index 000000000..9498fb4aa --- /dev/null +++ b/docs/doc_examples/008ed823c89e703c447ac89c6b689833.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.postFeatureUpgrade(); +console.log(response); +---- diff --git a/docs/doc_examples/506844befdc5691d835771bcbb1c1a60.asciidoc b/docs/doc_examples/0091fc75271b1fbbd4269622a4881e8b.asciidoc similarity index 55% rename from docs/doc_examples/506844befdc5691d835771bcbb1c1a60.asciidoc rename to docs/doc_examples/0091fc75271b1fbbd4269622a4881e8b.asciidoc index 2c64f07b3..af23c1cc6 100644 --- a/docs/doc_examples/506844befdc5691d835771bcbb1c1a60.asciidoc +++ b/docs/doc_examples/0091fc75271b1fbbd4269622a4881e8b.asciidoc @@ -4,18 +4,13 @@ [source, js] ---- const response = await client.search({ - index: 'bank', - body: { - query: { - match_all: {} + index: "my-index", + query: { + match: { + "http.clientip": "40.135.0.0", }, - sort: [ - { - account_number: 'asc' - } - ] - } -}) -console.log(response) + }, + fields: ["http.clientip"], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/00b3b6d76a368ae71277ea24af318693.asciidoc b/docs/doc_examples/00b3b6d76a368ae71277ea24af318693.asciidoc new file mode 100644 index 000000000..7f1e2c035 --- /dev/null +++ b/docs/doc_examples/00b3b6d76a368ae71277ea24af318693.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores(); +console.log(response); +---- diff --git a/docs/doc_examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc b/docs/doc_examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc new file mode 100644 index 000000000..63bfc32fe --- /dev/null +++ b/docs/doc_examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: '{ "query": {{#toJson}}my_query{{/toJson}} }', + params: { + my_query: { + match_all: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc b/docs/doc_examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc new file mode 100644 index 000000000..36a79d081 --- /dev/null +++ b/docs/doc_examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["asciifolding"], + text: "açaí à la carte", +}); +console.log(response); +---- diff --git a/docs/doc_examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc b/docs/doc_examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc new file mode 100644 index 000000000..92160eef7 --- /dev/null +++ b/docs/doc_examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "italian_example", + settings: { + analysis: { + filter: { + italian_elision: { + type: "elision", + articles: [ + "c", + "l", + "all", + "dall", + "dell", + "nell", + "sull", + "coll", + "pell", + "gl", + "agl", + "dagl", + "degl", + "negl", + "sugl", + "un", + "m", + "t", + "s", + "v", + "d", + ], + articles_case: true, + }, + italian_stop: { + type: "stop", + stopwords: "_italian_", + }, + italian_keywords: { + type: "keyword_marker", + keywords: ["esempio"], + }, + italian_stemmer: { + type: "stemmer", + language: "light_italian", + }, + }, + analyzer: { + rebuilt_italian: { + tokenizer: "standard", + filter: [ + "italian_elision", + "lowercase", + "italian_stop", + "italian_keywords", + "italian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc b/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc new file mode 100644 index 000000000..d2c53fbf1 --- /dev/null +++ b/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/my-e5-model", + body: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + model_id: ".multilingual-e5-small", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc b/docs/doc_examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc new file mode 100644 index 000000000..de83fe2e1 --- /dev/null +++ b/docs/doc_examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc b/docs/doc_examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc new file mode 100644 index 000000000..184d88118 --- /dev/null +++ b/docs/doc_examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + query: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4392116f2cc57ce8064ccbad30318d5.asciidoc b/docs/doc_examples/016f3147dae9ff2c3e831257ae470361.asciidoc similarity index 56% rename from docs/doc_examples/b4392116f2cc57ce8064ccbad30318d5.asciidoc rename to docs/doc_examples/016f3147dae9ff2c3e831257ae470361.asciidoc index f4b8f3ffc..0985ba535 100644 --- a/docs/doc_examples/b4392116f2cc57ce8064ccbad30318d5.asciidoc +++ b/docs/doc_examples/016f3147dae9ff2c3e831257ae470361.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test1', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) + actions: [ + { + add: { + index: "logs-*", + alias: "logs", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/019e329ed5a930aef825266822e7377a.asciidoc b/docs/doc_examples/019e329ed5a930aef825266822e7377a.asciidoc new file mode 100644 index 000000000..1e0efb671 --- /dev/null +++ b/docs/doc_examples/019e329ed5a930aef825266822e7377a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "asciifold_example", + settings: { + analysis: { + analyzer: { + standard_asciifolding: { + tokenizer: "standard", + filter: ["my_ascii_folding"], + }, + }, + filter: { + my_ascii_folding: { + type: "asciifolding", + preserve_original: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc b/docs/doc_examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc new file mode 100644 index 000000000..0a9693fe3 --- /dev/null +++ b/docs/doc_examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c4ad860a485fe53d8140ad3ccd11dcf.asciidoc b/docs/doc_examples/01da9e0620e48270617fc248e6415cac.asciidoc similarity index 55% rename from docs/doc_examples/0c4ad860a485fe53d8140ad3ccd11dcf.asciidoc rename to docs/doc_examples/01da9e0620e48270617fc248e6415cac.asciidoc index 441a025db..e243fb4f9 100644 --- a/docs/doc_examples/0c4ad860a485fe53d8140ad3ccd11dcf.asciidoc +++ b/docs/doc_examples/01da9e0620e48270617fc248e6415cac.asciidoc @@ -4,18 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { + index: "my-index-000001", + aggs: { + "my-agg-name": { terms: { - user: [ - 'kimchy', - 'elasticsearch' - ], - boost: 1 - } - } - } -}) -console.log(response) + field: "my-field", + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc b/docs/doc_examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc new file mode 100644 index 000000000..89f99c966 --- /dev/null +++ b/docs/doc_examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/01f50acf7998b24969f451e922d145eb.asciidoc b/docs/doc_examples/01f50acf7998b24969f451e922d145eb.asciidoc new file mode 100644 index 000000000..d92c8bdfe --- /dev/null +++ b/docs/doc_examples/01f50acf7998b24969f451e922d145eb.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "basque_example", + settings: { + analysis: { + filter: { + basque_stop: { + type: "stop", + stopwords: "_basque_", + }, + basque_keywords: { + type: "keyword_marker", + keywords: ["Adibidez"], + }, + basque_stemmer: { + type: "stemmer", + language: "basque", + }, + }, + analyzer: { + rebuilt_basque: { + tokenizer: "standard", + filter: [ + "lowercase", + "basque_stop", + "basque_keywords", + "basque_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/020c95db88ef356093f03be84893ddf9.asciidoc b/docs/doc_examples/020c95db88ef356093f03be84893ddf9.asciidoc new file mode 100644 index 000000000..0dc8775df --- /dev/null +++ b/docs/doc_examples/020c95db88ef356093f03be84893ddf9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followStats({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/020de6b6cb960a76297452725a38889f.asciidoc b/docs/doc_examples/020de6b6cb960a76297452725a38889f.asciidoc new file mode 100644 index 000000000..7fb6cecdf --- /dev/null +++ b/docs/doc_examples/020de6b6cb960a76297452725a38889f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + has_child: { + type: "child", + query: { + match_all: {}, + }, + max_children: 10, + min_children: 2, + score_mode: "min", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc b/docs/doc_examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc new file mode 100644 index 000000000..78f7c8a6c --- /dev/null +++ b/docs/doc_examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + name: { + properties: { + last: { + type: "text", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/025155da86802ebf4c3aeee5aab692f9.asciidoc b/docs/doc_examples/025155da86802ebf4c3aeee5aab692f9.asciidoc new file mode 100644 index 000000000..f41e9eb7a --- /dev/null +++ b/docs/doc_examples/025155da86802ebf4c3aeee5aab692f9.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "sales", + mappings: { + properties: { + tags: { + type: "keyword", + }, + comments: { + type: "nested", + properties: { + username: { + type: "keyword", + }, + comment: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc b/docs/doc_examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc new file mode 100644 index 000000000..6349c1f6e --- /dev/null +++ b/docs/doc_examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.flushJob({ + job_id: "low_request_rate", + calc_interim: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc b/docs/doc_examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc new file mode 100644 index 000000000..6be1d4da8 --- /dev/null +++ b/docs/doc_examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + JapaneseCars: { + terms: { + field: "make", + include: ["mazda", "honda"], + }, + }, + ActiveCarManufacturers: { + terms: { + field: "make", + exclude: ["rover", "jensen"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0280247e0cf2e561c548f22c9fb31163.asciidoc b/docs/doc_examples/0280247e0cf2e561c548f22c9fb31163.asciidoc new file mode 100644 index 000000000..b2677e6bc --- /dev/null +++ b/docs/doc_examples/0280247e0cf2e561c548f22c9fb31163.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + username: "myuser", +}); +console.log(response); +---- diff --git a/docs/doc_examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc b/docs/doc_examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc new file mode 100644 index 000000000..f6abfb4c5 --- /dev/null +++ b/docs/doc_examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + char_filter: [ + { + type: "mapping", + mappings: [ + "٠ => 0", + "١ => 1", + "٢ => 2", + "٣ => 3", + "٤ => 4", + "٥ => 5", + "٦ => 6", + "٧ => 7", + "٨ => 8", + "٩ => 9", + ], + }, + ], + text: "My license plate is ٢٥٠١٥", +}); +console.log(response); +---- diff --git a/docs/doc_examples/028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc b/docs/doc_examples/028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc deleted file mode 100644 index d5e9d1d71..000000000 --- a/docs/doc_examples/028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - countries: { - terms: { - field: 'artist.country', - order: [ - { - 'rock>playback_stats.avg': 'desc' - }, - { - _count: 'desc' - } - ] - }, - aggs: { - rock: { - filter: { - term: { - genre: 'rock' - } - }, - aggs: { - playback_stats: { - stats: { - field: 'play_count' - } - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc b/docs/doc_examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc new file mode 100644 index 000000000..18aa9dece --- /dev/null +++ b/docs/doc_examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_name: { + type: "text", + index_prefixes: { + min_chars: 1, + max_chars: 10, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/02b00f21e9d23d82276ace0dd154d779.asciidoc b/docs/doc_examples/02b00f21e9d23d82276ace0dd154d779.asciidoc new file mode 100644 index 000000000..3619352ce --- /dev/null +++ b/docs/doc_examples/02b00f21e9d23d82276ace0dd154d779.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + routing: "user1,user2", + query: { + match: { + title: "document", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43682666e1abcb14770c99f02eb26a0d.asciidoc b/docs/doc_examples/02b6aa3e5652839f03de3a655854b897.asciidoc similarity index 75% rename from docs/doc_examples/43682666e1abcb14770c99f02eb26a0d.asciidoc rename to docs/doc_examples/02b6aa3e5652839f03de3a655854b897.asciidoc index 3fccfdec7..fad61ac26 100644 --- a/docs/doc_examples/43682666e1abcb14770c99f02eb26a0d.asciidoc +++ b/docs/doc_examples/02b6aa3e5652839f03de3a655854b897.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.search({ - index: '*', - q: 'user:kimchy' -}) -console.log(response) + index: "my-data-stream", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/02c48d461536709c3fc8a0e8147c3787.asciidoc b/docs/doc_examples/02c48d461536709c3fc8a0e8147c3787.asciidoc new file mode 100644 index 000000000..dec8449bb --- /dev/null +++ b/docs/doc_examples/02c48d461536709c3fc8a0e8147c3787.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "pipelineB", + description: "outer pipeline", + processors: [ + { + pipeline: { + name: "pipelineA", + }, + }, + { + set: { + field: "outer_pipeline_set", + value: "outer", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc b/docs/doc_examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc new file mode 100644 index 000000000..ab8ea60ad --- /dev/null +++ b/docs/doc_examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc b/docs/doc_examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc new file mode 100644 index 000000000..e5501afe9 --- /dev/null +++ b/docs/doc_examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_wildcard: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_wildcard: "This string can be quite lengthy", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + wildcard: { + my_wildcard: { + value: "*quite*lengthy", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/1d65cb6d055c46a1bde809687d835b71.asciidoc b/docs/doc_examples/0308cbd85281f95fc458042afe3f587d.asciidoc similarity index 68% rename from docs/doc_examples/1d65cb6d055c46a1bde809687d835b71.asciidoc rename to docs/doc_examples/0308cbd85281f95fc458042afe3f587d.asciidoc index 122c24c08..07249ec05 100644 --- a/docs/doc_examples/1d65cb6d055c46a1bde809687d835b71.asciidoc +++ b/docs/doc_examples/0308cbd85281f95fc458042afe3f587d.asciidoc @@ -4,10 +4,9 @@ [source, js] ---- const response = await client.get({ - index: 'twitter', - id: '2', - routing: 'user1' -}) -console.log(response) + index: "my-index-000001", + id: 0, + _source: "*.id", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/032eac56b798bea29390e102538f4a26.asciidoc b/docs/doc_examples/032eac56b798bea29390e102538f4a26.asciidoc new file mode 100644 index 000000000..936e558a2 --- /dev/null +++ b/docs/doc_examples/032eac56b798bea29390e102538f4a26.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/033778305d52746f5ce0a2a922c8e521.asciidoc b/docs/doc_examples/033778305d52746f5ce0a2a922c8e521.asciidoc deleted file mode 100644 index 0bfc9f6e1..000000000 --- a/docs/doc_examples/033778305d52746f5ce0a2a922c8e521.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - script: { - source: "doc['genre'].value", - lang: 'painless' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/033838729cfb5d1a28d04f69ee78d924.asciidoc b/docs/doc_examples/033838729cfb5d1a28d04f69ee78d924.asciidoc new file mode 100644 index 000000000..1f4149800 --- /dev/null +++ b/docs/doc_examples/033838729cfb5d1a28d04f69ee78d924.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Polygon", + orientation: "LEFT", + coordinates: [ + [ + [-177, 10], + [176, 15], + [172, 0], + [176, -15], + [-177, -10], + [-177, 10], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0350410d11579f4e876c798ce1eaef5b.asciidoc b/docs/doc_examples/0350410d11579f4e876c798ce1eaef5b.asciidoc new file mode 100644 index 000000000..39af7ac7a --- /dev/null +++ b/docs/doc_examples/0350410d11579f4e876c798ce1eaef5b.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 5, + refresh: "true", + document: { + query: { + bool: { + should: [ + { + match: { + message: { + query: "Japanese art", + _name: "query1", + }, + }, + }, + { + match: { + message: { + query: "Holand culture", + _name: "query2", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0350ff5ebb8207c004eb771088339cb4.asciidoc b/docs/doc_examples/0350ff5ebb8207c004eb771088339cb4.asciidoc new file mode 100644 index 000000000..4962104be --- /dev/null +++ b/docs/doc_examples/0350ff5ebb8207c004eb771088339cb4.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "blue shoes sale", + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my_elser_model", + query: "What blue shoes are on sale?", + }, + }, + }, + }, + ], + rank_window_size: 50, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/03582fc93683e573062bcfda45e01d69.asciidoc b/docs/doc_examples/03582fc93683e573062bcfda45e01d69.asciidoc new file mode 100644 index 000000000..acf03f1b5 --- /dev/null +++ b/docs/doc_examples/03582fc93683e573062bcfda45e01d69.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + type: "custom", + tokenizer: "standard", + char_filter: ["html_strip"], + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_custom_analyzer", + text: "Is this déjà vu
    ?", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/035a7a919eb6513b4769a3727b7d6447.asciidoc b/docs/doc_examples/035a7a919eb6513b4769a3727b7d6447.asciidoc new file mode 100644 index 000000000..4c4a3671a --- /dev/null +++ b/docs/doc_examples/035a7a919eb6513b4769a3727b7d6447.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "whitespace", + text: "The quick brown fox.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/03891265df2111a38e0b6b24c1b967e1.asciidoc b/docs/doc_examples/03891265df2111a38e0b6b24c1b967e1.asciidoc new file mode 100644 index 000000000..d0da136f7 --- /dev/null +++ b/docs/doc_examples/03891265df2111a38e0b6b24c1b967e1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getServiceAccounts(); +console.log(response); +---- diff --git a/docs/doc_examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc b/docs/doc_examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc new file mode 100644 index 000000000..23fad0184 --- /dev/null +++ b/docs/doc_examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "my-index,logs-my_app-default", + rename_pattern: "(.+)", + rename_replacement: "restored-$1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc b/docs/doc_examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc new file mode 100644 index 000000000..7e6d9a983 --- /dev/null +++ b/docs/doc_examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC LIMIT 5", +}); +console.log(response); +---- diff --git a/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc b/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc new file mode 100644 index 000000000..dbf3b4c90 --- /dev/null +++ b/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_api_key_id", + body: { + api_key_id: "my-api-key-id", + api_key_secret_id: "my-connector-secret-id", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/044b2f99e7438e408685b258db17f863.asciidoc b/docs/doc_examples/044b2f99e7438e408685b258db17f863.asciidoc new file mode 100644 index 000000000..577720dbb --- /dev/null +++ b/docs/doc_examples/044b2f99e7438e408685b258db17f863.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: '\n process where process.name == "regsvr32.exe"\n ', + size: 50, +}); +console.log(response); +---- diff --git a/docs/doc_examples/046b2249bbc49e77848c114cee940f17.asciidoc b/docs/doc_examples/046b2249bbc49e77848c114cee940f17.asciidoc new file mode 100644 index 000000000..7541cfe0d --- /dev/null +++ b/docs/doc_examples/046b2249bbc49e77848c114cee940f17.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + }, + }, + }, + }, + { + standard: { + query: { + text_expansion: { + "ml.inference.title_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + }, + }, + }, + }, + }, + { + standard: { + query: { + text_expansion: { + "ml.inference.description_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + }, + }, + }, + }, + }, + ], + window_size: 10, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0470d7101637568b9d3d1239f06325a7.asciidoc b/docs/doc_examples/0470d7101637568b9d3d1239f06325a7.asciidoc new file mode 100644 index 000000000..0e0773a12 --- /dev/null +++ b/docs/doc_examples/0470d7101637568b9d3d1239f06325a7.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_internal/desired_nodes/<history_id>/<version>", + body: { + nodes: [ + { + settings: { + "node.name": "instance-000187", + "node.external_id": "instance-000187", + "node.roles": ["data_hot", "master"], + "node.attr.data": "hot", + "node.attr.logical_availability_zone": "zone-0", + }, + processors: 8, + memory: "58gb", + storage: "2tb", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc b/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc index ae2d02e7e..6358e9f97 100644 --- a/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc +++ b/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc @@ -4,20 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Will Smith', - type: 'cross_fields', - fields: [ - 'first_name', - 'last_name' - ], - operator: 'and' - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Will Smith", + type: "cross_fields", + fields: ["first_name", "last_name"], + operator: "and", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc b/docs/doc_examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc new file mode 100644 index 000000000..9ab89027c --- /dev/null +++ b/docs/doc_examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.resetTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/048d8abd42d094bbdcf4452a58ccb35b.asciidoc b/docs/doc_examples/048d8abd42d094bbdcf4452a58ccb35b.asciidoc deleted file mode 100644 index 42b87e0bc..000000000 --- a/docs/doc_examples/048d8abd42d094bbdcf4452a58ccb35b.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.create({ - index: 'twitter', - id: '1', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc b/docs/doc_examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc new file mode 100644 index 000000000..ffb79faa8 --- /dev/null +++ b/docs/doc_examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set_os", + description: "sets the value of host.os.name from the field os", + processors: [ + { + set: { + field: "host.os.name", + value: "{{{os}}}", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.ingest.simulate({ + id: "set_os", + docs: [ + { + _source: { + os: "Ubuntu", + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc b/docs/doc_examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc new file mode 100644 index 000000000..5496c5a44 --- /dev/null +++ b/docs/doc_examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline-id", + description: "My optional pipeline description", + processors: [ + { + set: { + description: "My optional processor description", + field: "my-keyword-field", + value: "foo", + }, + }, + ], + _meta: { + reason: "set my-keyword-field to foo", + serialization: { + class: "MyPipeline", + id: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc b/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc new file mode 100644 index 000000000..d78024183 --- /dev/null +++ b/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/google_vertex_ai_embeddings", + body: { + service: "googlevertexai", + service_settings: { + service_account_json: "", + model_id: "", + location: "", + project_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc b/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc index d7a06bce5..2622f6cef 100644 --- a/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc +++ b/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.cluster.health({ - wait_for_status: 'yellow', - timeout: '50s' -}) -console.log(response) + wait_for_status: "yellow", + timeout: "50s", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc b/docs/doc_examples/04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc deleted file mode 100644 index 12bf2c8fa..000000000 --- a/docs/doc_examples/04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - term: { - user: 'kimchy' - } - }, - sort: { - _script: { - type: 'number', - script: { - lang: 'painless', - source: "doc['field_name'].value * params.factor", - params: { - factor: 1.1 - } - }, - order: 'asc' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0502284d4685c478eb68761f979f4303.asciidoc b/docs/doc_examples/0502284d4685c478eb68761f979f4303.asciidoc new file mode 100644 index 000000000..35597e672 --- /dev/null +++ b/docs/doc_examples/0502284d4685c478eb68761f979f4303.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "house_price_predictions", + query: { + bool: { + filter: [ + { + term: { + "ml.is_training": false, + }, + }, + ], + }, + }, + evaluation: { + regression: { + actual_field: "price", + predicted_field: "ml.price_prediction", + metrics: { + r_squared: {}, + mse: {}, + msle: { + offset: 10, + }, + huber: { + delta: 1.5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/050b3947025fee403232b8e6e9112dab.asciidoc b/docs/doc_examples/050b3947025fee403232b8e6e9112dab.asciidoc new file mode 100644 index 000000000..7e17a91ea --- /dev/null +++ b/docs/doc_examples/050b3947025fee403232b8e6e9112dab.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "yaml", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05148cc541f447486d9daf15ab77292b.asciidoc b/docs/doc_examples/05148cc541f447486d9daf15ab77292b.asciidoc new file mode 100644 index 000000000..cebe6e3bd --- /dev/null +++ b/docs/doc_examples/05148cc541f447486d9daf15ab77292b.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "logs", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + warm: { + min_age: "30d", + actions: { + shrink: { + number_of_shards: 1, + }, + forcemerge: { + max_num_segments: 1, + }, + }, + }, + cold: { + min_age: "60d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + frozen: { + min_age: "90d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + delete: { + min_age: "735d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0518c673094fb18ecb491a3b78af4695.asciidoc b/docs/doc_examples/0518c673094fb18ecb491a3b78af4695.asciidoc new file mode 100644 index 000000000..1b3f88827 --- /dev/null +++ b/docs/doc_examples/0518c673094fb18ecb491a3b78af4695.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + allocate: { + include: { + box_type: "hot,warm", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05284c8ea91769c09c8db47db8a6629a.asciidoc b/docs/doc_examples/05284c8ea91769c09c8db47db8a6629a.asciidoc new file mode 100644 index 000000000..1017e8824 --- /dev/null +++ b/docs/doc_examples/05284c8ea91769c09c8db47db8a6629a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.repositories({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/053497b6960f80fd7b005b7c6d54358f.asciidoc b/docs/doc_examples/053497b6960f80fd7b005b7c6d54358f.asciidoc new file mode 100644 index 000000000..5a4d4ee7a --- /dev/null +++ b/docs/doc_examples/053497b6960f80fd7b005b7c6d54358f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + delete: { + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05500e77aef581d92f6c605f7a48f7df.asciidoc b/docs/doc_examples/05500e77aef581d92f6c605f7a48f7df.asciidoc new file mode 100644 index 000000000..80f74e73e --- /dev/null +++ b/docs/doc_examples/05500e77aef581d92f6c605f7a48f7df.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "polygon", + coordinates: [ + [ + [1000, -1001], + [1001, -1001], + [1001, -1000], + [1000, -1000], + [1000, -1001], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/059e04aaf093379401f665c33ac796dc.asciidoc b/docs/doc_examples/059e04aaf093379401f665c33ac796dc.asciidoc new file mode 100644 index 000000000..44a0e488d --- /dev/null +++ b/docs/doc_examples/059e04aaf093379401f665c33ac796dc.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "keyword_marker", + keywords: ["jumping"], + }, + "stemmer", + ], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/05a09078fe1016e900e445ad4039cf97.asciidoc b/docs/doc_examples/05a09078fe1016e900e445ad4039cf97.asciidoc new file mode 100644 index 000000000..791b13f13 --- /dev/null +++ b/docs/doc_examples/05a09078fe1016e900e445ad4039cf97.asciidoc @@ -0,0 +1,78 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "clientips", + mappings: { + properties: { + client_ip: { + type: "keyword", + }, + env: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "clientips", + operations: [ + { + index: {}, + }, + { + client_ip: "172.21.0.5", + env: "Development", + }, + { + index: {}, + }, + { + client_ip: "172.21.2.113", + env: "QA", + }, + { + index: {}, + }, + { + client_ip: "172.21.2.162", + env: "QA", + }, + { + index: {}, + }, + { + client_ip: "172.21.3.15", + env: "Production", + }, + { + index: {}, + }, + { + client_ip: "172.21.3.16", + env: "Production", + }, + ], +}); +console.log(response1); + +const response2 = await client.enrich.putPolicy({ + name: "clientip_policy", + match: { + indices: "clientips", + match_field: "client_ip", + enrich_fields: ["env"], + }, +}); +console.log(response2); + +const response3 = await client.enrich.executePolicy({ + name: "clientip_policy", + wait_for_completion: "false", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc b/docs/doc_examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc new file mode 100644 index 000000000..cdd215727 --- /dev/null +++ b/docs/doc_examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc b/docs/doc_examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc new file mode 100644 index 000000000..39c360520 --- /dev/null +++ b/docs/doc_examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "click_role", + indices: [ + { + names: ["events-*"], + privileges: ["read"], + query: { + match: { + category: "click", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc b/docs/doc_examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc new file mode 100644 index 000000000..d80e2c6ab --- /dev/null +++ b/docs/doc_examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "logs-my_app-default", + settings: { + index: { + lifecycle: { + name: "new-lifecycle-policy", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc b/docs/doc_examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc new file mode 100644 index 000000000..ceb726560 --- /dev/null +++ b/docs/doc_examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ssl.certificates(); +console.log(response); +---- diff --git a/docs/doc_examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc b/docs/doc_examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc new file mode 100644 index 000000000..709f0fadb --- /dev/null +++ b/docs/doc_examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.status({ + repository: "my_repository", + snapshot: "snapshot_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc b/docs/doc_examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc new file mode 100644 index 000000000..7f41907ea --- /dev/null +++ b/docs/doc_examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "simple_pattern", + pattern: "[0123456789]{3}", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "fd-786-335-514-x", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a6f8636b03cc5f677b7d89e750328612.asciidoc b/docs/doc_examples/0620a10ff15a2bb3eb489afc24ff0131.asciidoc similarity index 70% rename from docs/doc_examples/a6f8636b03cc5f677b7d89e750328612.asciidoc rename to docs/doc_examples/0620a10ff15a2bb3eb489afc24ff0131.asciidoc index e5b33232a..6d337053c 100644 --- a/docs/doc_examples/a6f8636b03cc5f677b7d89e750328612.asciidoc +++ b/docs/doc_examples/0620a10ff15a2bb3eb489afc24ff0131.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.search({ - index: 'twitter', - size: 'surprise_me' -}) -console.log(response) + index: "my-index-000001", + size: "surprise_me", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/06454a8e85e2d3479c90390bb955eb39.asciidoc b/docs/doc_examples/06454a8e85e2d3479c90390bb955eb39.asciidoc new file mode 100644 index 000000000..5c103b7f9 --- /dev/null +++ b/docs/doc_examples/06454a8e85e2d3479c90390bb955eb39.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*,-snapshot_3", + sort: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc b/docs/doc_examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc new file mode 100644 index 000000000..ebf376463 --- /dev/null +++ b/docs/doc_examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-alias", + conditions: { + max_age: "7d", + max_docs: 1000, + max_primary_shard_size: "50gb", + max_primary_shard_docs: "2000", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc b/docs/doc_examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc new file mode 100644 index 000000000..083a9bb5a --- /dev/null +++ b/docs/doc_examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + manager: { + properties: { + age: { + type: "integer", + }, + name: { + type: "text", + }, + }, + }, + employees: { + type: "nested", + properties: { + age: { + type: "integer", + }, + name: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + region: "US", + manager: { + name: "Alice White", + age: 30, + }, + employees: [ + { + name: "John Smith", + age: 34, + }, + { + name: "Peter Brown", + age: 26, + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/06a761823a694850a6efe5d5bf61478c.asciidoc b/docs/doc_examples/06a761823a694850a6efe5d5bf61478c.asciidoc new file mode 100644 index 000000000..e5aaf6495 --- /dev/null +++ b/docs/doc_examples/06a761823a694850a6efe5d5bf61478c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "users-policy", + match: { + indices: "users", + match_field: "email", + enrich_fields: ["first_name", "last_name", "city", "zip", "state"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/06afce2955f9094d96d27067ebca32e8.asciidoc b/docs/doc_examples/06afce2955f9094d96d27067ebca32e8.asciidoc deleted file mode 100644 index b60b6820d..000000000 --- a/docs/doc_examples/06afce2955f9094d96d27067ebca32e8.asciidoc +++ /dev/null @@ -1,48 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - bool: { - must: { - term: { - user: 'kimchy' - } - }, - filter: { - term: { - tag: 'tech' - } - }, - must_not: { - range: { - age: { - gte: 10, - lte: 20 - } - } - }, - should: [ - { - term: { - tag: 'wow' - } - }, - { - term: { - tag: 'elasticsearch' - } - } - ], - minimum_should_match: 1, - boost: 1 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc b/docs/doc_examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc new file mode 100644 index 000000000..57334617a --- /dev/null +++ b/docs/doc_examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.msearch({ + index: "my-index-000001", + searches: [ + {}, + { + query: { + match: { + message: "this is a test", + }, + }, + }, + { + index: "my-index-000002", + }, + { + query: { + match_all: {}, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/06c0db0f42223761e32fa418066b275f.asciidoc b/docs/doc_examples/06c0db0f42223761e32fa418066b275f.asciidoc new file mode 100644 index 000000000..2ee03c83d --- /dev/null +++ b/docs/doc_examples/06c0db0f42223761e32fa418066b275f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my-repo", + repository: { + type: "s3", + settings: { + bucket: "repo-bucket", + client: "elastic-internal-71bcd3", + base_path: "myrepo", + readonly: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e6bfb4441ffa15c86d5dc20fa083571.asciidoc b/docs/doc_examples/06d65e3505dcb306977185e8545cf4a8.asciidoc similarity index 63% rename from docs/doc_examples/8e6bfb4441ffa15c86d5dc20fa083571.asciidoc rename to docs/doc_examples/06d65e3505dcb306977185e8545cf4a8.asciidoc index 4f098a530..c4044aad9 100644 --- a/docs/doc_examples/8e6bfb4441ffa15c86d5dc20fa083571.asciidoc +++ b/docs/doc_examples/06d65e3505dcb306977185e8545cf4a8.asciidoc @@ -4,12 +4,9 @@ [source, js] ---- const response = await client.cluster.putSettings({ - body: { - transient: { - 'logger.org.elasticsearch.transport': 'trace' - } - } -}) -console.log(response) + persistent: { + "cluster.routing.allocation.total_shards_per_node": 400, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/070cf72783cfe534a04f2f64e4016052.asciidoc b/docs/doc_examples/070cf72783cfe534a04f2f64e4016052.asciidoc new file mode 100644 index 000000000..9bde2346e --- /dev/null +++ b/docs/doc_examples/070cf72783cfe534a04f2f64e4016052.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + subobjects: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "metric_1", + document: { + time: "100ms", + "time.min": "10ms", + "time.max": "900ms", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc b/docs/doc_examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc new file mode 100644 index 000000000..260c6a967 --- /dev/null +++ b/docs/doc_examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + extended_stats: { + field: "grade", + sigma: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc b/docs/doc_examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc new file mode 100644 index 000000000..c292cfd78 --- /dev/null +++ b/docs/doc_examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.activateUserProfile({ + grant_type: "password", + username: "jacknich", + password: "l0ng-r4nd0m-p@ssw0rd", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0737ebaea33631f001fb3f4226948492.asciidoc b/docs/doc_examples/0737ebaea33631f001fb3f4226948492.asciidoc new file mode 100644 index 000000000..99f666660 --- /dev/null +++ b/docs/doc_examples/0737ebaea33631f001fb3f4226948492.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_ip_locations", + mappings: { + properties: { + geoip: { + properties: { + location: { + type: "geo_point", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc b/docs/doc_examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc new file mode 100644 index 000000000..e1d337be8 --- /dev/null +++ b/docs/doc_examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc b/docs/doc_examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc new file mode 100644 index 000000000..23b0f56fe --- /dev/null +++ b/docs/doc_examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransformStats({ + transform_id: "ecommerce-customer-transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc b/docs/doc_examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc new file mode 100644 index 000000000..3580265c3 --- /dev/null +++ b/docs/doc_examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.stopJob({ + id: "sensor", + wait_for_completion: "true", + timeout: "10s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc b/docs/doc_examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc new file mode 100644 index 000000000..eb7e9575d --- /dev/null +++ b/docs/doc_examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + max_docs: 10, + source: { + index: "my-index-000001", + query: { + function_score: { + random_score: {}, + min_score: 0.9, + }, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc b/docs/doc_examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc new file mode 100644 index 000000000..e70d7e96c --- /dev/null +++ b/docs/doc_examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.forgetFollower({ + index: "leader_index", + follower_cluster: "follower_cluster", + follower_index: "follower_index", + follower_index_uuid: "vYpnaWPRQB6mNspmoCeYyA", + leader_remote_cluster: "leader_cluster", +}); +console.log(response); +---- diff --git a/docs/doc_examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc b/docs/doc_examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc new file mode 100644 index 000000000..e69d25470 --- /dev/null +++ b/docs/doc_examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + semantic: { + field: "inference_field", + query: "Best surfing places", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc b/docs/doc_examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc new file mode 100644 index 000000000..66dacc74e --- /dev/null +++ b/docs/doc_examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + }, + labels: { + type: "flattened", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/07ec38b97601286ec106986a84e1e5f7.asciidoc b/docs/doc_examples/07ec38b97601286ec106986a84e1e5f7.asciidoc new file mode 100644 index 000000000..b473ddcdc --- /dev/null +++ b/docs/doc_examples/07ec38b97601286ec106986a84e1e5f7.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "job-candidates", + mappings: { + properties: { + name: { + type: "keyword", + }, + programming_languages: { + type: "keyword", + }, + required_matches: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/080c34d8151d02b760571e3a2899fa97.asciidoc b/docs/doc_examples/080c34d8151d02b760571e3a2899fa97.asciidoc new file mode 100644 index 000000000..0c931b62f --- /dev/null +++ b/docs/doc_examples/080c34d8151d02b760571e3a2899fa97.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + analysis: { + filter: { + email: { + type: "pattern_capture", + preserve_original: true, + patterns: ["([^@]+)", "(\\p{L}+)", "(\\d+)", "@(.+)"], + }, + }, + analyzer: { + email: { + tokenizer: "uax_url_email", + filter: ["email", "lowercase", "unique"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/083e514297c09e91211f0d168aef1b0b.asciidoc b/docs/doc_examples/083e514297c09e91211f0d168aef1b0b.asciidoc new file mode 100644 index 000000000..744d438b3 --- /dev/null +++ b/docs/doc_examples/083e514297c09e91211f0d168aef1b0b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "logs-generic-default", + query: { + match: { + "event.sequence": "97", + }, + }, + script: { + source: "ctx._source.event.original = params.new_event", + lang: "painless", + params: { + new_event: "FOOBAR", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/086ec4c5d86bbf80fb80162e94037689.asciidoc b/docs/doc_examples/086ec4c5d86bbf80fb80162e94037689.asciidoc new file mode 100644 index 000000000..7508b3af3 --- /dev/null +++ b/docs/doc_examples/086ec4c5d86bbf80fb80162e94037689.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + weighted_tokens: { + query_expansion_field: { + tokens: { + "2161": 0.4679, + "2621": 0.307, + "2782": 0.1299, + "2851": 0.1056, + "3088": 0.3041, + "3376": 0.1038, + "3467": 0.4873, + "3684": 0.8958, + "4380": 0.334, + "4542": 0.4636, + "4633": 2.2805, + "4785": 1.2628, + "4860": 1.0655, + "5133": 1.0709, + "7139": 1.0016, + "7224": 0.2486, + "7387": 0.0985, + "7394": 0.0542, + "8915": 0.369, + "9156": 2.8947, + "10505": 0.2771, + "11464": 0.3996, + "13525": 0.0088, + "14178": 0.8161, + "16893": 0.1376, + "17851": 1.5348, + "19939": 0.6012, + }, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0881397074d261ccc2db514daf116c31.asciidoc b/docs/doc_examples/0881397074d261ccc2db514daf116c31.asciidoc new file mode 100644 index 000000000..c15090e30 --- /dev/null +++ b/docs/doc_examples/0881397074d261ccc2db514daf116c31.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + with_limited_by: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/08a76b3f5a8394d8f9084113334a260a.asciidoc b/docs/doc_examples/08a76b3f5a8394d8f9084113334a260a.asciidoc new file mode 100644 index 000000000..d10141ebc --- /dev/null +++ b/docs/doc_examples/08a76b3f5a8394d8f9084113334a260a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time", + compression: 200, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/08c9af9dd519c011deedd406f3061836.asciidoc b/docs/doc_examples/08c9af9dd519c011deedd406f3061836.asciidoc new file mode 100644 index 000000000..0707e105c --- /dev/null +++ b/docs/doc_examples/08c9af9dd519c011deedd406f3061836.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.previewDatafeed({ + datafeed_config: { + indices: ["kibana_sample_data_ecommerce"], + query: { + bool: { + filter: [ + { + term: { + _index: "kibana_sample_data_ecommerce", + }, + }, + ], + }, + }, + scroll_size: 1000, + }, + job_config: { + description: "Find customers spending an unusually high amount in an hour", + analysis_config: { + bucket_span: "1h", + detectors: [ + { + detector_description: "High total sales", + function: "high_sum", + field_name: "taxful_total_price", + over_field_name: "customer_full_name.keyword", + }, + ], + influencers: ["customer_full_name.keyword", "category.keyword"], + }, + analysis_limits: { + model_memory_limit: "10mb", + }, + data_description: { + time_field: "order_date", + time_format: "epoch_ms", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/08e08feb514b24006e13f258d617d873.asciidoc b/docs/doc_examples/08e08feb514b24006e13f258d617d873.asciidoc new file mode 100644 index 000000000..ad2134f5b --- /dev/null +++ b/docs/doc_examples/08e08feb514b24006e13f258d617d873.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScript({ + id: "calculate-score", +}); +console.log(response); +---- diff --git a/docs/doc_examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc b/docs/doc_examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc new file mode 100644 index 000000000..ab12f4a4c --- /dev/null +++ b/docs/doc_examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + tag_cardinality: { + cardinality: { + field: "tag", + missing: "N/A", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc b/docs/doc_examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc new file mode 100644 index 000000000..5c797b641 --- /dev/null +++ b/docs/doc_examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + regexp: { + "user.id": { + value: "k.*y", + flags: "ALL", + case_insensitive: true, + max_determinized_states: 10000, + rewrite: "constant_score_blended", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/091200b658023db31dffc2f08a85a9cc.asciidoc b/docs/doc_examples/091200b658023db31dffc2f08a85a9cc.asciidoc new file mode 100644 index 000000000..6c0f24be4 --- /dev/null +++ b/docs/doc_examples/091200b658023db31dffc2f08a85a9cc.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.total_shards_per_node": -1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d46dbb96125b27f46299547de9d8709.asciidoc b/docs/doc_examples/0957bbd535f58c97b12ffba90813d64c.asciidoc similarity index 60% rename from docs/doc_examples/4d46dbb96125b27f46299547de9d8709.asciidoc rename to docs/doc_examples/0957bbd535f58c97b12ffba90813d64c.asciidoc index f37274336..f21197ae4 100644 --- a/docs/doc_examples/4d46dbb96125b27f46299547de9d8709.asciidoc +++ b/docs/doc_examples/0957bbd535f58c97b12ffba90813d64c.asciidoc @@ -4,13 +4,10 @@ [source, js] ---- const response = await client.indices.create({ - index: 'test', - body: { - settings: { - 'index.write.wait_for_active_shards': '2' - } - } -}) -console.log(response) + index: "analyze_sample", + settings: { + "index.analyze.max_token_count": 20000, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/095d60b2cfc5004c97efc49f27287262.asciidoc b/docs/doc_examples/095d60b2cfc5004c97efc49f27287262.asciidoc new file mode 100644 index 000000000..2b125347a --- /dev/null +++ b/docs/doc_examples/095d60b2cfc5004c97efc49f27287262.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + fixed_interval: "30d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/095e3f21941a9cc75f398389a075152d.asciidoc b/docs/doc_examples/095e3f21941a9cc75f398389a075152d.asciidoc new file mode 100644 index 000000000..f88e0adc9 --- /dev/null +++ b/docs/doc_examples/095e3f21941a9cc75f398389a075152d.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "cross-encoder__ms-marco-tinybert-l-2-v2", + docs: [ + { + text_field: + "Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.", + }, + { + text_field: "New York City is famous for the Metropolitan Museum of Art.", + }, + ], + inference_config: { + text_similarity: { + text: "How many people live in Berlin?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09769561f082b50558fb7d8707719963.asciidoc b/docs/doc_examples/09769561f082b50558fb7d8707719963.asciidoc new file mode 100644 index 000000000..674ca8a30 --- /dev/null +++ b/docs/doc_examples/09769561f082b50558fb7d8707719963.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0989cc65d8924f666ce3eb0820d2d244.asciidoc b/docs/doc_examples/0989cc65d8924f666ce3eb0820d2d244.asciidoc deleted file mode 100644 index b7f079096..000000000 --- a/docs/doc_examples/0989cc65d8924f666ce3eb0820d2d244.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'users', - refresh: 'wait_for', - body: { - user_id: 12345 - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'users', - refresh: 'wait_for', - body: { - user_id: 12346 - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/099006ab11b52ea99693401dceee8bad.asciidoc b/docs/doc_examples/099006ab11b52ea99693401dceee8bad.asciidoc new file mode 100644 index 000000000..e9a963852 --- /dev/null +++ b/docs/doc_examples/099006ab11b52ea99693401dceee8bad.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "calculate-score", + script: { + lang: "painless", + source: "Math.log(_score * 2) + params['my_modifier']", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09944369863fd8666d5301d717317276.asciidoc b/docs/doc_examples/09944369863fd8666d5301d717317276.asciidoc new file mode 100644 index 000000000..9a6bf1366 --- /dev/null +++ b/docs/doc_examples/09944369863fd8666d5301d717317276.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "condition", + filter: ["lowercase"], + script: { + source: "token.getTerm().length() < 5", + }, + }, + ], + text: "THE QUICK BROWN FOX", +}); +console.log(response); +---- diff --git a/docs/doc_examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc b/docs/doc_examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc new file mode 100644 index 000000000..40a384942 --- /dev/null +++ b/docs/doc_examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + text: "New York", +}); +console.log(response); +---- diff --git a/docs/doc_examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc b/docs/doc_examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc new file mode 100644 index 000000000..31140522e --- /dev/null +++ b/docs/doc_examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + fuzzy: { + "user.id": { + value: "ki", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09bdf9a7e22733d668476724042a406c.asciidoc b/docs/doc_examples/09bdf9a7e22733d668476724042a406c.asciidoc new file mode 100644 index 000000000..74ff0e94b --- /dev/null +++ b/docs/doc_examples/09bdf9a7e22733d668476724042a406c.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "timeseries_template", + index_patterns: ["timeseries"], + data_stream: {}, + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "timeseries_policy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc b/docs/doc_examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc new file mode 100644 index 000000000..c5eca11d6 --- /dev/null +++ b/docs/doc_examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + rule: { + match_criteria: { + user_query: "pugs", + }, + ruleset_ids: ["my-ruleset"], + organic: { + match: { + description: "puggles", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09cdd5ae8114c49886026fef8d00a19c.asciidoc b/docs/doc_examples/09cdd5ae8114c49886026fef8d00a19c.asciidoc deleted file mode 100644 index 99d0f6539..000000000 --- a/docs/doc_examples/09cdd5ae8114c49886026fef8d00a19c.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.getMapping({ - index: '_all' -}) -console.log(response0) - -const response1 = await client.indices.getMapping() -console.log(response1) ----- - diff --git a/docs/doc_examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc b/docs/doc_examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc new file mode 100644 index 000000000..2d45c039f --- /dev/null +++ b/docs/doc_examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index", + runtime: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc b/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc index 26a1900e0..f8e0b44af 100644 --- a/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc +++ b/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc @@ -4,12 +4,9 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - match_all: {} - } - } -}) -console.log(response) + query: { + match_all: {}, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc b/docs/doc_examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc new file mode 100644 index 000000000..2c768dba6 --- /dev/null +++ b/docs/doc_examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "animal_classification", + evaluation: { + classification: { + actual_field: "animal_class", + metrics: { + auc_roc: { + class_name: "dog", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09ecba5814d71e4c44468575eada9878.asciidoc b/docs/doc_examples/09ecba5814d71e4c44468575eada9878.asciidoc deleted file mode 100644 index ce6746fb6..000000000 --- a/docs/doc_examples/09ecba5814d71e4c44468575eada9878.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - sales_over_time: { - date_histogram: { - field: 'date', - fixed_interval: '30d' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0a3003fa5af850e415634b50b1029859.asciidoc b/docs/doc_examples/0a3003fa5af850e415634b50b1029859.asciidoc new file mode 100644 index 000000000..9d6394951 --- /dev/null +++ b/docs/doc_examples/0a3003fa5af850e415634b50b1029859.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "logs-generic-default*", + filter_path: "hits.hits._index", + query: { + match: { + "event.sequence": "97", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a3186bf20b5359393406fc0cb433313.asciidoc b/docs/doc_examples/0a3186bf20b5359393406fc0cb433313.asciidoc new file mode 100644 index 000000000..e534e77b4 --- /dev/null +++ b/docs/doc_examples/0a3186bf20b5359393406fc0cb433313.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, + columnar: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a46ac2968a574ce145f197f10d30152.asciidoc b/docs/doc_examples/0a46ac2968a574ce145f197f10d30152.asciidoc new file mode 100644 index 000000000..ebec6756f --- /dev/null +++ b/docs/doc_examples/0a46ac2968a574ce145f197f10d30152.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "library", + refresh: "true", + operations: [ + { + index: { + _id: "Leviathan Wakes", + }, + }, + { + name: "Leviathan Wakes", + author: "James S.A. Corey", + release_date: "2011-06-02", + page_count: 561, + }, + { + index: { + _id: "Hyperion", + }, + }, + { + name: "Hyperion", + author: "Dan Simmons", + release_date: "1989-05-26", + page_count: 482, + }, + { + index: { + _id: "Dune", + }, + }, + { + name: "Dune", + author: "Frank Herbert", + release_date: "1965-06-01", + page_count: 604, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/23ab0f1023b1b2cd5cdf2a8f9ccfd57b.asciidoc b/docs/doc_examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc similarity index 54% rename from docs/doc_examples/23ab0f1023b1b2cd5cdf2a8f9ccfd57b.asciidoc rename to docs/doc_examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc index 439bf360b..497a5e10b 100644 --- a/docs/doc_examples/23ab0f1023b1b2cd5cdf2a8f9ccfd57b.asciidoc +++ b/docs/doc_examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc @@ -4,17 +4,12 @@ [source, js] ---- const response = await client.indices.create({ - index: 'test1', - body: { - mappings: { - properties: { - user: { - type: 'keyword' - } - } - } - } -}) -console.log(response) + index: "", + aliases: { + "my-alias": { + is_write_index: true, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/0a650401134f07e40216f0d0d1a66a32.asciidoc b/docs/doc_examples/0a650401134f07e40216f0d0d1a66a32.asciidoc new file mode 100644 index 000000000..68956e123 --- /dev/null +++ b/docs/doc_examples/0a650401134f07e40216f0d0d1a66a32.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc b/docs/doc_examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc new file mode 100644 index 000000000..994e1c6be --- /dev/null +++ b/docs/doc_examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index1", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: { + matched_fields: ["comment.english"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc b/docs/doc_examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc new file mode 100644 index 000000000..187671a16 --- /dev/null +++ b/docs/doc_examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + query: { + bool: { + filter: { + term: { + currency: "EUR", + }, + }, + }, + }, + }, + latest: { + unique_key: ["geoip.country_iso_code", "geoip.region_name"], + sort: "order_date", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5eabcdbf61bfcb484dc694f25c2bba36.asciidoc b/docs/doc_examples/0a758d9dec74d9e942cf41a06499234f.asciidoc similarity index 66% rename from docs/doc_examples/5eabcdbf61bfcb484dc694f25c2bba36.asciidoc rename to docs/doc_examples/0a758d9dec74d9e942cf41a06499234f.asciidoc index cbd1a9889..c33a0b1dd 100644 --- a/docs/doc_examples/5eabcdbf61bfcb484dc694f25c2bba36.asciidoc +++ b/docs/doc_examples/0a758d9dec74d9e942cf41a06499234f.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.index({ - index: 'twitter', - id: '1', - body: { + index: "my-index-000001", + id: 1, + document: { counter: 1, - tags: [ - 'red' - ] - } -}) -console.log(response) + tags: ["red"], + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc b/docs/doc_examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc new file mode 100644 index 000000000..cf13f17c8 --- /dev/null +++ b/docs/doc_examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.total_shards_per_node": "2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a9173f3b22716c78653976dc4799eae.asciidoc b/docs/doc_examples/0a9173f3b22716c78653976dc4799eae.asciidoc new file mode 100644 index 000000000..dd6c9eac3 --- /dev/null +++ b/docs/doc_examples/0a9173f3b22716c78653976dc4799eae.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a958e486ede3f519d48431ab689eded.asciidoc b/docs/doc_examples/0a958e486ede3f519d48431ab689eded.asciidoc deleted file mode 100644 index 9945b33ee..000000000 --- a/docs/doc_examples/0a958e486ede3f519d48431ab689eded.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'ctx._source.counter += params.count', - lang: 'painless', - params: { - count: 4 - } - }, - upsert: { - counter: 1 - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc b/docs/doc_examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc new file mode 100644 index 000000000..cbc8b66d8 --- /dev/null +++ b/docs/doc_examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ac9916f47a2483b89c1416684af322a.asciidoc b/docs/doc_examples/0ac9916f47a2483b89c1416684af322a.asciidoc deleted file mode 100644 index f10420a38..000000000 --- a/docs/doc_examples/0ac9916f47a2483b89c1416684af322a.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'to be or not to be', - operator: 'and', - zero_terms_query: 'all' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc b/docs/doc_examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc new file mode 100644 index 000000000..e6c8eca1b --- /dev/null +++ b/docs/doc_examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + track_total_hits: true, + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc b/docs/doc_examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc new file mode 100644 index 000000000..2d94108dd --- /dev/null +++ b/docs/doc_examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: ".ds-my-data-stream-2099.03.07-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc b/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc new file mode 100644 index 000000000..0fb258f3f --- /dev/null +++ b/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/amazon_bedrock_embeddings", + body: { + service: "amazonbedrock", + service_settings: { + access_key: "", + secret_key: "", + region: "us-east-1", + provider: "amazontitan", + model: "amazon.titan-embed-text-v2:0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0adbce828234ca221e3d03b184296407.asciidoc b/docs/doc_examples/0adbce828234ca221e3d03b184296407.asciidoc new file mode 100644 index 000000000..65d80a5f0 --- /dev/null +++ b/docs/doc_examples/0adbce828234ca221e3d03b184296407.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index", + runtime: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip); \n ", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc b/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc new file mode 100644 index 000000000..5df1863c2 --- /dev/null +++ b/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-g-drive-connector/_filtering", + body: { + rules: [ + { + field: "file_extension", + id: "exclude-txt-files", + order: 0, + policy: "exclude", + rule: "equals", + value: "txt", + }, + { + field: "_", + id: "DEFAULT", + order: 1, + policy: "include", + rule: "regex", + value: ".*", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0afaf1cad692e6201aa574c8feb6e622.asciidoc b/docs/doc_examples/0afaf1cad692e6201aa574c8feb6e622.asciidoc deleted file mode 100644 index ba2ee8cb4..000000000 --- a/docs/doc_examples/0afaf1cad692e6201aa574c8feb6e622.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - include: '.*sport.*', - exclude: 'water_.*' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0aff04881be21eea45375ec4f4f50e66.asciidoc b/docs/doc_examples/0aff04881be21eea45375ec4f4f50e66.asciidoc new file mode 100644 index 000000000..3aa94fd9f --- /dev/null +++ b/docs/doc_examples/0aff04881be21eea45375ec4f4f50e66.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc b/docs/doc_examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc new file mode 100644 index 000000000..ce0112e17 --- /dev/null +++ b/docs/doc_examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["127.0.0.1:{remote-interface-default-port}"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc b/docs/doc_examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc new file mode 100644 index 000000000..ac32302d7 --- /dev/null +++ b/docs/doc_examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "remote_monitor", + password: "changeme", + roles: ["remote_monitoring_agent"], + full_name: "Internal Agent For Remote Monitoring", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc b/docs/doc_examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc new file mode 100644 index 000000000..ccd775ed1 --- /dev/null +++ b/docs/doc_examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + match: { + message: "some message", + }, + }, + script: { + id: "calculate-score", + params: { + my_modifier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc b/docs/doc_examples/0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc new file mode 100644 index 000000000..0ebd8a2a4 --- /dev/null +++ b/docs/doc_examples/0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote1", + cluster: ["cross_cluster_search"], + indices: [ + { + names: [""], + privileges: ["read"], + }, + ], + remote_indices: [ + { + names: ["logs-*"], + privileges: ["read", "read_cross_cluster"], + clusters: ["my_remote_cluster"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b913fb9e010d877c0be015519cfddc6.asciidoc b/docs/doc_examples/0b913fb9e010d877c0be015519cfddc6.asciidoc new file mode 100644 index 000000000..9717c0327 --- /dev/null +++ b/docs/doc_examples/0b913fb9e010d877c0be015519cfddc6.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + document: { + "@timestamp": "2019-05-18T15:57:27.541Z", + ip: "225.44.217.191", + extension: "jpg", + response: "200", + geo: { + coordinates: { + lat: 38.53146222, + lon: -121.7864906, + }, + }, + url: "https://media-for-the-masses.theacademyofperformingartsandscience.org/uploads/charles-fullerton.jpg", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000002", + document: { + "@timestamp": "2019-05-20T03:44:20.844Z", + ip: "198.247.165.49", + extension: "php", + response: "200", + geo: { + coordinates: { + lat: 37.13189556, + lon: -76.4929875, + }, + }, + memory: 241720, + url: "https://theacademyofperformingartsandscience.org/people/type:astronauts/name:laurel-b-clark/profile", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0b987b4101e016653a32d7b092d47e4c.asciidoc b/docs/doc_examples/0b987b4101e016653a32d7b092d47e4c.asciidoc new file mode 100644 index 000000000..a2a3e2f36 --- /dev/null +++ b/docs/doc_examples/0b987b4101e016653a32d7b092d47e4c.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + region: { + type: "keyword", + }, + manager: { + properties: { + age: { + type: "integer", + }, + name: { + properties: { + first: { + type: "text", + }, + last: { + type: "text", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ba5acede9d43af424e85428e7d35420.asciidoc b/docs/doc_examples/0ba5acede9d43af424e85428e7d35420.asciidoc new file mode 100644 index 000000000..a279b534c --- /dev/null +++ b/docs/doc_examples/0ba5acede9d43af424e85428e7d35420.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "azure_openai_embeddings", + processors: [ + { + inference: { + model_id: "azure_openai_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0bbd30b9be3e54ff3028b9f4459634d2.asciidoc b/docs/doc_examples/0bbd30b9be3e54ff3028b9f4459634d2.asciidoc deleted file mode 100644 index f531241da..000000000 --- a/docs/doc_examples/0bbd30b9be3e54ff3028b9f4459634d2.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - name: { - properties: { - last: { - type: 'text' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc b/docs/doc_examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc new file mode 100644 index 000000000..395132389 --- /dev/null +++ b/docs/doc_examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + price: { + unmapped_type: "long", + }, + }, + ], + query: { + term: { + product: "chocolate", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0bd3923424a20a4ba860b0774b9991b1.asciidoc b/docs/doc_examples/0bd3923424a20a4ba860b0774b9991b1.asciidoc deleted file mode 100644 index 55e11a522..000000000 --- a/docs/doc_examples/0bd3923424a20a4ba860b0774b9991b1.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'drivers', - body: { - query: { - nested: { - path: 'driver', - query: { - nested: { - path: 'driver.vehicle', - query: { - bool: { - must: [ - { - match: { - 'driver.vehicle.make': 'Powell Motors' - } - }, - { - match: { - 'driver.vehicle.model': 'Canyonero' - } - } - ] - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc b/docs/doc_examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc new file mode 100644 index 000000000..950e6fd01 --- /dev/null +++ b/docs/doc_examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc b/docs/doc_examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc new file mode 100644 index 000000000..a53e1145d --- /dev/null +++ b/docs/doc_examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + saturation: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc b/docs/doc_examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc new file mode 100644 index 000000000..4b95c61f7 --- /dev/null +++ b/docs/doc_examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "http.client_ip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip); \n ", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc b/docs/doc_examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc new file mode 100644 index 000000000..9d967d750 --- /dev/null +++ b/docs/doc_examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.recovery({ + v: "true", + h: "i,s,t,ty,st,shost,thost,f,fp,b,bp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c464965126cc09e6812716a145991d4.asciidoc b/docs/doc_examples/0c464965126cc09e6812716a145991d4.asciidoc new file mode 100644 index 000000000..179e814a0 --- /dev/null +++ b/docs/doc_examples/0c464965126cc09e6812716a145991d4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc b/docs/doc_examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc new file mode 100644 index 000000000..5ee262246 --- /dev/null +++ b/docs/doc_examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "user_hits", + size: 0, + aggs: { + users_per_day: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + distinct_users: { + cardinality: { + field: "user_id", + }, + }, + total_new_users: { + cumulative_cardinality: { + buckets_path: "distinct_users", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc b/docs/doc_examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc new file mode 100644 index 000000000..a22aa8928 --- /dev/null +++ b/docs/doc_examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + refresh_token: "vLBPvmAB6KvwvJZr27cS", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc b/docs/doc_examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc new file mode 100644 index 000000000..1ade11a6c --- /dev/null +++ b/docs/doc_examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping4", + roles: ["superuser"], + enabled: true, + rules: { + any: [ + { + field: { + username: "esadmin", + }, + }, + { + field: { + groups: ["cn=admins,dc=example,dc=com", "cn=other,dc=example,dc=com"], + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc b/docs/doc_examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc new file mode 100644 index 000000000..c89f06982 --- /dev/null +++ b/docs/doc_examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: "{{^name_exists}}Hello World{{/name_exists}}", + }, + }, + }, + params: { + name_exists: false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d90a84a24a407731dfc1929ac8327746.asciidoc b/docs/doc_examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc similarity index 68% rename from docs/doc_examples/d90a84a24a407731dfc1929ac8327746.asciidoc rename to docs/doc_examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc index 35a5315ac..4b46d7ad7 100644 --- a/docs/doc_examples/d90a84a24a407731dfc1929ac8327746.asciidoc +++ b/docs/doc_examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc @@ -4,10 +4,9 @@ [source, js] ---- const response = await client.delete({ - index: 'twitter', - id: '1', - timeout: '5m' -}) -console.log(response) + index: "my-index-000001", + id: 1, + routing: "shard-1", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc b/docs/doc_examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc new file mode 100644 index 000000000..a2c6d6af8 --- /dev/null +++ b/docs/doc_examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "my-index-000001,cluster*:my-index-000001,-cluster_three:*", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0cee58617e75f493c5049d77be1c49f3.asciidoc b/docs/doc_examples/0cee58617e75f493c5049d77be1c49f3.asciidoc new file mode 100644 index 000000000..8d23a9a95 --- /dev/null +++ b/docs/doc_examples/0cee58617e75f493c5049d77be1c49f3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + fuzzy: { + "user.id": { + value: "ki", + fuzziness: "AUTO", + max_expansions: 50, + prefix_length: 0, + transpositions: true, + rewrite: "constant_score_blended", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc b/docs/doc_examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc new file mode 100644 index 000000000..5f8112567 --- /dev/null +++ b/docs/doc_examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + runtime_mappings: { + "grade.corrected": { + type: "double", + script: { + source: "emit(Math.min(100, doc['grade'].value * params.correction))", + params: { + correction: 1.2, + }, + }, + }, + }, + aggs: { + avg_corrected_grade: { + avg: { + field: "grade.corrected", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d0f7ece06f21e624d21b09804732f61.asciidoc b/docs/doc_examples/0d0f7ece06f21e624d21b09804732f61.asciidoc new file mode 100644 index 000000000..700c152f7 --- /dev/null +++ b/docs/doc_examples/0d0f7ece06f21e624d21b09804732f61.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grade_avg: { + avg: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d49474511b236bc89e768c8ee91adf1.asciidoc b/docs/doc_examples/0d49474511b236bc89e768c8ee91adf1.asciidoc new file mode 100644 index 000000000..1cfc2e11d --- /dev/null +++ b/docs/doc_examples/0d49474511b236bc89e768c8ee91adf1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: '"fried eggs" +(eggplant | potato) -frittata', + fields: ["title^5", "body"], + default_operator: "and", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc b/docs/doc_examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc new file mode 100644 index 000000000..ef4ef2bff --- /dev/null +++ b/docs/doc_examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["porter_stem"], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d59af9dc556dc526b9394051efa800a.asciidoc b/docs/doc_examples/0d59af9dc556dc526b9394051efa800a.asciidoc new file mode 100644 index 000000000..19e14ecd5 --- /dev/null +++ b/docs/doc_examples/0d59af9dc556dc526b9394051efa800a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "logs-foo-bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d664883151008b1051ef2c9ab2d0373.asciidoc b/docs/doc_examples/0d664883151008b1051ef2c9ab2d0373.asciidoc deleted file mode 100644 index 5df839e4b..000000000 --- a/docs/doc_examples/0d664883151008b1051ef2c9ab2d0373.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.updateByQuery({ - index: 'twitter', - body: { - slice: { - id: 0, - max: 2 - }, - script: { - source: "ctx._source['extra'] = 'test'" - } - } -}) -console.log(response0) - -const response1 = await client.updateByQuery({ - index: 'twitter', - body: { - slice: { - id: 1, - max: 2 - }, - script: { - source: "ctx._source['extra'] = 'test'" - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc b/docs/doc_examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc new file mode 100644 index 000000000..47243afad --- /dev/null +++ b/docs/doc_examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["foo", "bar"], + template: { + settings: { + number_of_shards: 3, + }, + }, + _meta: { + description: "set number of shards to three", + serialization: { + class: "MyIndexTemplate", + id: 17, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc b/docs/doc_examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc new file mode 100644 index 000000000..85243323d --- /dev/null +++ b/docs/doc_examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping8", + roles: ["superuser"], + enabled: true, + rules: { + all: [ + { + any: [ + { + field: { + dn: "*,ou=admin,dc=example,dc=com", + }, + }, + { + field: { + username: ["es-admin", "es-system"], + }, + }, + ], + }, + { + field: { + groups: "cn=people,dc=example,dc=com", + }, + }, + { + except: { + field: { + "metadata.terminated_date": null, + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc b/docs/doc_examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc new file mode 100644 index 000000000..f0fba00e2 --- /dev/null +++ b/docs/doc_examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + min_doc_count: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc b/docs/doc_examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc new file mode 100644 index 000000000..841b610e1 --- /dev/null +++ b/docs/doc_examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + bucket: "my-bucket", + client: "my-alternate-client", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc b/docs/doc_examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc new file mode 100644 index 000000000..5530f7f7c --- /dev/null +++ b/docs/doc_examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + my_id: "1", + text: "This is a question", + my_join_field: "question", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + my_id: "2", + text: "This is another question", + my_join_field: "question", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc b/docs/doc_examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc new file mode 100644 index 000000000..0161abb42 --- /dev/null +++ b/docs/doc_examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "id,ip,port,v,m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc b/docs/doc_examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc new file mode 100644 index 000000000..ba6d119b8 --- /dev/null +++ b/docs/doc_examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getPrivileges(); +console.log(response); +---- diff --git a/docs/doc_examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc b/docs/doc_examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc new file mode 100644 index 000000000..2724e3a6e --- /dev/null +++ b/docs/doc_examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.componentTemplates({ + name: "my-template-*", + v: "true", + s: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc new file mode 100644 index 000000000..627ba004d --- /dev/null +++ b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.renderQuery({ + name: "my-app", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc b/docs/doc_examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc new file mode 100644 index 000000000..f81e78aaf --- /dev/null +++ b/docs/doc_examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "amazon-bedrock-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "amazon_bedrock_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc b/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc index 45acb2f61..fbc3b97e9 100644 --- a/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc +++ b/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc @@ -4,19 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'quick brown f', - type: 'phrase_prefix', - fields: [ - 'subject', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "quick brown f", + type: "phrase_prefix", + fields: ["subject", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc b/docs/doc_examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc new file mode 100644 index 000000000..17e833805 --- /dev/null +++ b/docs/doc_examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "mistral_embeddings", + processors: [ + { + inference: { + model_id: "mistral_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc b/docs/doc_examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc new file mode 100644 index 000000000..71e7fd206 --- /dev/null +++ b/docs/doc_examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + hats: { + filter: { + term: { + type: "hat", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + t_shirts: { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e5d25c7bb738c42d471020d678e2966.asciidoc b/docs/doc_examples/0e5d25c7bb738c42d471020d678e2966.asciidoc new file mode 100644 index 000000000..c75941ffe --- /dev/null +++ b/docs/doc_examples/0e5d25c7bb738c42d471020d678e2966.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "my_model", + deployment_id: "my_model_for_ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc b/docs/doc_examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc new file mode 100644 index 000000000..05cd6d517 --- /dev/null +++ b/docs/doc_examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + f: { + type: "scaled_float", + scaling_factor: 0.01, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + f: 123, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc b/docs/doc_examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc new file mode 100644 index 000000000..064744d5c --- /dev/null +++ b/docs/doc_examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + fields: ["content"], + query: "foo bar -baz", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc b/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc new file mode 100644 index 000000000..aac173f77 --- /dev/null +++ b/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.disk.watermark.low": "100gb", + "cluster.routing.allocation.disk.watermark.high": "50gb", + "cluster.routing.allocation.disk.watermark.flood_stage": "10gb", + "cluster.info.update.interval": "1m", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc b/docs/doc_examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc new file mode 100644 index 000000000..5570248b6 --- /dev/null +++ b/docs/doc_examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_error_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ea146b178561bc8b9002bed8a35641f.asciidoc b/docs/doc_examples/0ea146b178561bc8b9002bed8a35641f.asciidoc new file mode 100644 index 000000000..a5321db01 --- /dev/null +++ b/docs/doc_examples/0ea146b178561bc8b9002bed8a35641f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingPolicy({ + name: "my_autoscaling_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc b/docs/doc_examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc new file mode 100644 index 000000000..c55280692 --- /dev/null +++ b/docs/doc_examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + slice: { + id: 0, + max: 2, + }, + query: { + match: { + message: "foo", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + }, +}); +console.log(response); + +const response1 = await client.search({ + slice: { + id: 1, + max: 2, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc b/docs/doc_examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc new file mode 100644 index 000000000..542a15de3 --- /dev/null +++ b/docs/doc_examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.migrateToDataTiers({ + legacy_template_to_delete: "global-template", + node_attribute: "custom_attribute_name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0eb2c1284a9829224913a860190580d8.asciidoc b/docs/doc_examples/0eb2c1284a9829224913a860190580d8.asciidoc new file mode 100644 index 000000000..6406bb3d7 --- /dev/null +++ b/docs/doc_examples/0eb2c1284a9829224913a860190580d8.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "fingerprint_example", + settings: { + analysis: { + analyzer: { + whitespace_fingerprint: { + tokenizer: "whitespace", + filter: ["fingerprint"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ec2178fb0103862b47cc20bc5885972.asciidoc b/docs/doc_examples/0ec2178fb0103862b47cc20bc5885972.asciidoc new file mode 100644 index 000000000..0bcba66a8 --- /dev/null +++ b/docs/doc_examples/0ec2178fb0103862b47cc20bc5885972.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "my_fs_backup_location", + readonly: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0eccea755bd4f6dd47579a9022690546.asciidoc b/docs/doc_examples/0eccea755bd4f6dd47579a9022690546.asciidoc new file mode 100644 index 000000000..bbe82e2f4 --- /dev/null +++ b/docs/doc_examples/0eccea755bd4f6dd47579a9022690546.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + my_remote: { + mode: "proxy", + proxy_address: "my.remote.cluster.com:9443", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc b/docs/doc_examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc new file mode 100644 index 000000000..f40987611 --- /dev/null +++ b/docs/doc_examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": [ + [-70, 40], + [-71, 42], + ], + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/014b788c879e4aaa1020672e45e25473.asciidoc b/docs/doc_examples/0f3a78296825d507dda6771f7ceb9d61.asciidoc similarity index 61% rename from docs/doc_examples/014b788c879e4aaa1020672e45e25473.asciidoc rename to docs/doc_examples/0f3a78296825d507dda6771f7ceb9d61.asciidoc index 593668bb6..89d44803c 100644 --- a/docs/doc_examples/014b788c879e4aaa1020672e45e25473.asciidoc +++ b/docs/doc_examples/0f3a78296825d507dda6771f7ceb9d61.asciidoc @@ -4,12 +4,9 @@ [source, js] ---- const response = await client.cluster.putSettings({ - body: { - transient: { - 'cluster.routing.use_adaptive_replica_selection': false - } - } -}) -console.log(response) + persistent: { + "cluster.routing.allocation.exclude._ip": "10.0.0.1", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc b/docs/doc_examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc new file mode 100644 index 000000000..87f3c4188 --- /dev/null +++ b/docs/doc_examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + recall: { + k: 20, + relevant_rating_threshold: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc b/docs/doc_examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc new file mode 100644 index 000000000..7cfd78e19 --- /dev/null +++ b/docs/doc_examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["lowercase", "porter_stem"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc b/docs/doc_examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc new file mode 100644 index 000000000..4d208c97c --- /dev/null +++ b/docs/doc_examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.simulateTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc b/docs/doc_examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc new file mode 100644 index 000000000..4b29c0392 --- /dev/null +++ b/docs/doc_examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "_all", + index: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fb472645116d58ddef89ca976d15a01.asciidoc b/docs/doc_examples/0fb472645116d58ddef89ca976d15a01.asciidoc new file mode 100644 index 000000000..921548536 --- /dev/null +++ b/docs/doc_examples/0fb472645116d58ddef89ca976d15a01.asciidoc @@ -0,0 +1,73 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": 1516729294000, + model_number: "QVKC92Q", + measures: { + voltage: 5.2, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516642894000, + model_number: "QVKC92Q", + measures: { + voltage: 5.8, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516556494000, + model_number: "QVKC92Q", + measures: { + voltage: 5.1, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516470094000, + model_number: "QVKC92Q", + measures: { + voltage: 5.6, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516383694000, + model_number: "HG537PU", + measures: { + voltage: 4.2, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516297294000, + model_number: "HG537PU", + measures: { + voltage: 4, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc b/docs/doc_examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc new file mode 100644 index 000000000..77565b1fd --- /dev/null +++ b/docs/doc_examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ledger", + size: 0, + aggs: { + profit: { + scripted_metric: { + init_script: { + id: "my_init_script", + }, + map_script: { + id: "my_map_script", + }, + combine_script: { + id: "my_combine_script", + }, + params: { + field: "amount", + }, + reduce_script: { + id: "my_reduce_script", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fc4b589df5388da784c6d981e769e31.asciidoc b/docs/doc_examples/0fc4b589df5388da784c6d981e769e31.asciidoc new file mode 100644 index 000000000..a052f882c --- /dev/null +++ b/docs/doc_examples/0fc4b589df5388da784c6d981e769e31.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putTemplate({ + name: "template_1", + index_patterns: ["te*"], + settings: { + number_of_shards: 1, + }, + aliases: { + alias1: {}, + alias2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + "{index}-alias": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/025b54db0edc50c24ea48a2bd94366ad.asciidoc b/docs/doc_examples/0fd08e14ad651827be53897a6bdaf0b8.asciidoc similarity index 63% rename from docs/doc_examples/025b54db0edc50c24ea48a2bd94366ad.asciidoc rename to docs/doc_examples/0fd08e14ad651827be53897a6bdaf0b8.asciidoc index 6115b5526..ba1afbe36 100644 --- a/docs/doc_examples/025b54db0edc50c24ea48a2bd94366ad.asciidoc +++ b/docs/doc_examples/0fd08e14ad651827be53897a6bdaf0b8.asciidoc @@ -4,11 +4,11 @@ [source, js] ---- const response = await client.search({ - index: 'twitter', - size: '0', - q: 'extra:test', - filter_path: 'hits.total' -}) -console.log(response) + query: { + match_bool_prefix: { + message: "quick brown f", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc b/docs/doc_examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc new file mode 100644 index 000000000..4b426ac41 --- /dev/null +++ b/docs/doc_examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.scheduleNowTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc b/docs/doc_examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc new file mode 100644 index 000000000..0aa38abad --- /dev/null +++ b/docs/doc_examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic: "runtime", + properties: { + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1027ab1ca767ac1428176ef4f84bfbcf.asciidoc b/docs/doc_examples/1027ab1ca767ac1428176ef4f84bfbcf.asciidoc deleted file mode 100644 index 81e46f517..000000000 --- a/docs/doc_examples/1027ab1ca767ac1428176ef4f84bfbcf.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.search({ - index: 'twitter', - scroll: '1m', - body: { - slice: { - id: 0, - max: 2 - }, - query: { - match: { - title: 'elasticsearch' - } - } - } -}) -console.log(response0) - -const response1 = await client.search({ - index: 'twitter', - scroll: '1m', - body: { - slice: { - id: 1, - max: 2 - }, - query: { - match: { - title: 'elasticsearch' - } - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/102c7de25d13c87cf28839ada9f63c95.asciidoc b/docs/doc_examples/102c7de25d13c87cf28839ada9f63c95.asciidoc new file mode 100644 index 000000000..dd86be71b --- /dev/null +++ b/docs/doc_examples/102c7de25d13c87cf28839ada9f63c95.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index", + id: 1, + document: { + my_date: "2016-05-11T16:30:55.328Z", + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "index", + query: { + constant_score: { + filter: { + range: { + my_date: { + gte: "now-1h", + lte: "now", + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/103296e16b4233926ad1f07360385606.asciidoc b/docs/doc_examples/103296e16b4233926ad1f07360385606.asciidoc new file mode 100644 index 000000000..06f7bd53e --- /dev/null +++ b/docs/doc_examples/103296e16b4233926ad1f07360385606.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "turkish_example", + settings: { + analysis: { + filter: { + turkish_stop: { + type: "stop", + stopwords: "_turkish_", + }, + turkish_lowercase: { + type: "lowercase", + language: "turkish", + }, + turkish_keywords: { + type: "keyword_marker", + keywords: ["örnek"], + }, + turkish_stemmer: { + type: "stemmer", + language: "turkish", + }, + }, + analyzer: { + rebuilt_turkish: { + tokenizer: "standard", + filter: [ + "apostrophe", + "turkish_lowercase", + "turkish_stop", + "turkish_keywords", + "turkish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10535507a9735fcf06600444b9067d4c.asciidoc b/docs/doc_examples/10535507a9735fcf06600444b9067d4c.asciidoc new file mode 100644 index 000000000..c7bc3b58e --- /dev/null +++ b/docs/doc_examples/10535507a9735fcf06600444b9067d4c.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + text: { + type: "text", + fields: { + raw: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + text: [ + "the quick brown fox", + "the quick brown fox", + "jumped over the lazy dog", + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1070e59ba144cdf309fd9b2591612b95.asciidoc b/docs/doc_examples/1070e59ba144cdf309fd9b2591612b95.asciidoc new file mode 100644 index 000000000..a2b53a448 --- /dev/null +++ b/docs/doc_examples/1070e59ba144cdf309fd9b2591612b95.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 3, + document: { + test: "test", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 4, + refresh: "false", + document: { + test: "test", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc b/docs/doc_examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc new file mode 100644 index 000000000..7377cbb91 --- /dev/null +++ b/docs/doc_examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_internal/desired_nodes/Ywkh3INLQcuPT49f6kcppA/100", + body: { + nodes: [ + { + settings: { + "node.name": "instance-000187", + "node.external_id": "instance-000187", + "node.roles": ["data_hot", "master"], + "node.attr.data": "hot", + "node.attr.logical_availability_zone": "zone-0", + }, + processors: 8, + memory: "58gb", + storage: "2tb", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc b/docs/doc_examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc new file mode 100644 index 000000000..a77dfed0d --- /dev/null +++ b/docs/doc_examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.resumeFollow({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc b/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc new file mode 100644 index 000000000..d8a89c5f8 --- /dev/null +++ b/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryUser({ + with_profile_uid: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc b/docs/doc_examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc new file mode 100644 index 000000000..4cbd44f94 --- /dev/null +++ b/docs/doc_examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + ignore_condition: true, + watch: { + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_error: { + logging: { + text: "Found {{ctx.payload.hits.total}} errors in the logs", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc b/docs/doc_examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc new file mode 100644 index 000000000..3cae02031 --- /dev/null +++ b/docs/doc_examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "dutch_example", + settings: { + analysis: { + filter: { + dutch_stop: { + type: "stop", + stopwords: "_dutch_", + }, + dutch_keywords: { + type: "keyword_marker", + keywords: ["voorbeeld"], + }, + dutch_stemmer: { + type: "stemmer", + language: "dutch", + }, + dutch_override: { + type: "stemmer_override", + rules: [ + "fiets=>fiets", + "bromfiets=>bromfiets", + "ei=>eier", + "kind=>kinder", + ], + }, + }, + analyzer: { + rebuilt_dutch: { + tokenizer: "standard", + filter: [ + "lowercase", + "dutch_stop", + "dutch_keywords", + "dutch_override", + "dutch_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc b/docs/doc_examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc new file mode 100644 index 000000000..ec93e4938 --- /dev/null +++ b/docs/doc_examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc b/docs/doc_examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc new file mode 100644 index 000000000..1803166ae --- /dev/null +++ b/docs/doc_examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "oidc-example", + roles: ["example_role"], + enabled: true, + rules: { + field: { + "realm.name": "oidc1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc b/docs/doc_examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc new file mode 100644 index 000000000..bcb703c37 --- /dev/null +++ b/docs/doc_examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + standard_shingle: { + tokenizer: "standard", + filter: ["shingle"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10f0c8fed98455c460c374b50ffbb204.asciidoc b/docs/doc_examples/10f0c8fed98455c460c374b50ffbb204.asciidoc new file mode 100644 index 000000000..9fecf5000 --- /dev/null +++ b/docs/doc_examples/10f0c8fed98455c460c374b50ffbb204.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "dsl-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc b/docs/doc_examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc new file mode 100644 index 000000000..d43215527 --- /dev/null +++ b/docs/doc_examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.list(); +console.log(response); +---- diff --git a/docs/doc_examples/111c31db1fd29baeaa9964eafaea6789.asciidoc b/docs/doc_examples/111c31db1fd29baeaa9964eafaea6789.asciidoc new file mode 100644 index 000000000..50c998961 --- /dev/null +++ b/docs/doc_examples/111c31db1fd29baeaa9964eafaea6789.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "analyst_user", + refresh: "true", + password: "l0nger-r4nd0mer-p@ssw0rd", + roles: ["my_analyst_role"], + full_name: "Monday Jaffe", + metadata: { + innovation: 8, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/111c69ca94162c1523b799a5c14723dd.asciidoc b/docs/doc_examples/111c69ca94162c1523b799a5c14723dd.asciidoc new file mode 100644 index 000000000..d7a3bc0f3 --- /dev/null +++ b/docs/doc_examples/111c69ca94162c1523b799a5c14723dd.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + full_text: "Quick Brown Foxes!", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/113ac8466084ee6ac4ed272e342dc468.asciidoc b/docs/doc_examples/113ac8466084ee6ac4ed272e342dc468.asciidoc new file mode 100644 index 000000000..406ed089e --- /dev/null +++ b/docs/doc_examples/113ac8466084ee6ac4ed272e342dc468.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "mistral-embeddings", + pipeline: "mistral_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/168bfdde773570cfc6dd3ab3574e413b.asciidoc b/docs/doc_examples/1147a02afa087278e51fa365fb9e06b7.asciidoc similarity index 80% rename from docs/doc_examples/168bfdde773570cfc6dd3ab3574e413b.asciidoc rename to docs/doc_examples/1147a02afa087278e51fa365fb9e06b7.asciidoc index 66af05e4f..267bf1a6a 100644 --- a/docs/doc_examples/168bfdde773570cfc6dd3ab3574e413b.asciidoc +++ b/docs/doc_examples/1147a02afa087278e51fa365fb9e06b7.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.search({ - q: 'user:kimchy' -}) -console.log(response) + size: 1000, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/114d470e752efa9672ca68d7290fada8.asciidoc b/docs/doc_examples/114d470e752efa9672ca68d7290fada8.asciidoc new file mode 100644 index 000000000..71741889f --- /dev/null +++ b/docs/doc_examples/114d470e752efa9672ca68d7290fada8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putAlias({ + index: "my-data-stream", + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1153bd92ca18356db927054958cd95c6.asciidoc b/docs/doc_examples/1153bd92ca18356db927054958cd95c6.asciidoc new file mode 100644 index 000000000..9b8284bc2 --- /dev/null +++ b/docs/doc_examples/1153bd92ca18356db927054958cd95c6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + field_value_factor: { + field: "my-int", + factor: 1.2, + modifier: "sqrt", + missing: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/115529722ba30b0b0d51a7ff87e59198.asciidoc b/docs/doc_examples/115529722ba30b0b0d51a7ff87e59198.asciidoc new file mode 100644 index 000000000..73710c751 --- /dev/null +++ b/docs/doc_examples/115529722ba30b0b0d51a7ff87e59198.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getRole({ + name: "my_admin_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/118f249a3b26c33416f641b33f2b74f8.asciidoc b/docs/doc_examples/118f249a3b26c33416f641b33f2b74f8.asciidoc new file mode 100644 index 000000000..0ee01c6b5 --- /dev/null +++ b/docs/doc_examples/118f249a3b26c33416f641b33f2b74f8.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "pattern", + pattern: ",", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "comma,separated,values", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/11c395d1649733bcab853fe31ec393b2.asciidoc b/docs/doc_examples/11c395d1649733bcab853fe31ec393b2.asciidoc new file mode 100644 index 000000000..07cbcf1e4 --- /dev/null +++ b/docs/doc_examples/11c395d1649733bcab853fe31ec393b2.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.get(); +console.log(response); +---- diff --git a/docs/doc_examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc b/docs/doc_examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc new file mode 100644 index 000000000..bdc4978ac --- /dev/null +++ b/docs/doc_examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + grade_max: { + max: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4b4cefba4318caeba7480187faf2b13.asciidoc b/docs/doc_examples/11d9043d3050a7175069dec7e0adc963.asciidoc similarity index 62% rename from docs/doc_examples/d4b4cefba4318caeba7480187faf2b13.asciidoc rename to docs/doc_examples/11d9043d3050a7175069dec7e0adc963.asciidoc index c005254ad..6c0308b64 100644 --- a/docs/doc_examples/d4b4cefba4318caeba7480187faf2b13.asciidoc +++ b/docs/doc_examples/11d9043d3050a7175069dec7e0adc963.asciidoc @@ -4,12 +4,11 @@ [source, js] ---- const response = await client.index({ - index: 'my_index', - id: '1', - body: { - full_text: 'Quick Brown Foxes!' - } -}) -console.log(response) + index: "my-index-000001", + id: 1, + document: { + my_field: "a\\b", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc b/docs/doc_examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc new file mode 100644 index 000000000..10a9f5129 --- /dev/null +++ b/docs/doc_examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc b/docs/doc_examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc new file mode 100644 index 000000000..7af802e6f --- /dev/null +++ b/docs/doc_examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + pre_tags: ["", ""], + post_tags: ["", ""], + fields: { + body: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1216f8f7367df3aa823012cef310c08a.asciidoc b/docs/doc_examples/1216f8f7367df3aa823012cef310c08a.asciidoc deleted file mode 100644 index d31d44fa2..000000000 --- a/docs/doc_examples/1216f8f7367df3aa823012cef310c08a.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'test' - }, - dest: { - index: 'test2' - }, - script: { - source: 'ctx._source.tag = ctx._source.remove("flag")' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc b/docs/doc_examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc new file mode 100644 index 000000000..47b7b293c --- /dev/null +++ b/docs/doc_examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 3, + routing: 1, + refresh: "true", + document: { + my_id: "3", + text: "This is an answer", + my_join_field: { + name: "answer", + parent: "1", + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 4, + routing: 1, + refresh: "true", + document: { + my_id: "4", + text: "This is another answer", + my_join_field: { + name: "answer", + parent: "1", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc b/docs/doc_examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc new file mode 100644 index 000000000..3f96a5c0c --- /dev/null +++ b/docs/doc_examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + routing: "user1", + refresh: "true", + document: { + title: "This is a document", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "my-index-000001", + id: 1, + routing: "user1", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc b/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc index b5c874cd0..4471f3a3c 100644 --- a/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc +++ b/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.create({ - index: 'publications' -}) -console.log(response) + index: "publications", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc b/docs/doc_examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc index d17c17ec9..70c2bd9ae 100644 --- a/docs/doc_examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc +++ b/docs/doc_examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.cluster.state({ - filter_path: 'routing_table.indices.**.state' -}) -console.log(response) + filter_path: "routing_table.indices.**.state", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc b/docs/doc_examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc new file mode 100644 index 000000000..bd4b4f7ae --- /dev/null +++ b/docs/doc_examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/128283698535116931dca9d16a16dca2.asciidoc b/docs/doc_examples/128283698535116931dca9d16a16dca2.asciidoc new file mode 100644 index 000000000..5f8f9eb6e --- /dev/null +++ b/docs/doc_examples/128283698535116931dca9d16a16dca2.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getRole(); +console.log(response); +---- diff --git a/docs/doc_examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc b/docs/doc_examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc new file mode 100644 index 000000000..ac47ac1be --- /dev/null +++ b/docs/doc_examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + settings: { + "index.max_result_window": 5000, + }, +}); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + "search.max_buckets": 20000, + "search.allow_expensive_queries": false, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/12cb446446211f95f651e196a1f059b4.asciidoc b/docs/doc_examples/12cb446446211f95f651e196a1f059b4.asciidoc new file mode 100644 index 000000000..b35ca9af9 --- /dev/null +++ b/docs/doc_examples/12cb446446211f95f651e196a1f059b4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "my_snapshot", + wait_for_completion: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc b/docs/doc_examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc new file mode 100644 index 000000000..2cd793b99 --- /dev/null +++ b/docs/doc_examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCalendarEvents({ + calendar_id: "planned-outages", + start: 1635638400000, + end: 1635724800000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/12ec704d62ffedcb03787e6aba69d382.asciidoc b/docs/doc_examples/12ec704d62ffedcb03787e6aba69d382.asciidoc new file mode 100644 index 000000000..46613023e --- /dev/null +++ b/docs/doc_examples/12ec704d62ffedcb03787e6aba69d382.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "stop", + stopwords: ["a"], + }, + { + type: "shingle", + filler_token: "+", + }, + ], + text: "fox jumps a lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc b/docs/doc_examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc new file mode 100644 index 000000000..625db81d8 --- /dev/null +++ b/docs/doc_examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "movies", + mappings: { + properties: { + name_and_plot: { + type: "text", + }, + name: { + type: "text", + copy_to: "name_and_plot", + }, + plot: { + type: "text", + copy_to: "name_and_plot", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc b/docs/doc_examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc new file mode 100644 index 000000000..457716ba1 --- /dev/null +++ b/docs/doc_examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + track_total_hits: false, + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1313c540fef7e7c18a066f07789673fc.asciidoc b/docs/doc_examples/1313c540fef7e7c18a066f07789673fc.asciidoc new file mode 100644 index 000000000..8e9ede09f --- /dev/null +++ b/docs/doc_examples/1313c540fef7e7c18a066f07789673fc.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsync({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", + keep_alive: "5d", + wait_for_completion_timeout: "2s", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc b/docs/doc_examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc new file mode 100644 index 000000000..0f5963660 --- /dev/null +++ b/docs/doc_examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc b/docs/doc_examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc new file mode 100644 index 000000000..222bc95f8 --- /dev/null +++ b/docs/doc_examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "logs-debug", + document: { + date: "2019-12-12", + message: "Starting up Elasticsearch", + level: "debug", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "logs-debug", + document: { + date: "2019-12-12", + message: "Starting up Elasticsearch", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/135819da3a4bde684357c57a49ad8e85.asciidoc b/docs/doc_examples/135819da3a4bde684357c57a49ad8e85.asciidoc new file mode 100644 index 000000000..c5583614b --- /dev/null +++ b/docs/doc_examples/135819da3a4bde684357c57a49ad8e85.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.deprecations(); +console.log(response); +---- diff --git a/docs/doc_examples/13670d1534125831c2059eebd86d840c.asciidoc b/docs/doc_examples/13670d1534125831c2059eebd86d840c.asciidoc new file mode 100644 index 000000000..080608e5e --- /dev/null +++ b/docs/doc_examples/13670d1534125831c2059eebd86d840c.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "brazilian_example", + settings: { + analysis: { + filter: { + brazilian_stop: { + type: "stop", + stopwords: "_brazilian_", + }, + brazilian_keywords: { + type: "keyword_marker", + keywords: ["exemplo"], + }, + brazilian_stemmer: { + type: "stemmer", + language: "brazilian", + }, + }, + analyzer: { + rebuilt_brazilian: { + tokenizer: "standard", + filter: [ + "lowercase", + "brazilian_stop", + "brazilian_keywords", + "brazilian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/136ae86b8d497dda799cf1cb583df929.asciidoc b/docs/doc_examples/136ae86b8d497dda799cf1cb583df929.asciidoc new file mode 100644 index 000000000..0e065a362 --- /dev/null +++ b/docs/doc_examples/136ae86b8d497dda799cf1cb583df929.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "publications", + mappings: { + properties: { + id: { + type: "text", + }, + title: { + type: "text", + }, + abstract: { + type: "text", + }, + author: { + properties: { + id: { + type: "text", + }, + name: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ba0b2db24852abccb7c0fc1098d566e.asciidoc b/docs/doc_examples/137709a0a0dc38d6094291c9fc75b804.asciidoc similarity index 61% rename from docs/doc_examples/0ba0b2db24852abccb7c0fc1098d566e.asciidoc rename to docs/doc_examples/137709a0a0dc38d6094291c9fc75b804.asciidoc index 7e23f1bce..47b2e1b27 100644 --- a/docs/doc_examples/0ba0b2db24852abccb7c0fc1098d566e.asciidoc +++ b/docs/doc_examples/137709a0a0dc38d6094291c9fc75b804.asciidoc @@ -4,16 +4,12 @@ [source, js] ---- const response = await client.index({ - index: 'twitter', - id: '2', - routing: 'user1', - body: { + index: "my-index-000001", + id: 1, + document: { counter: 1, - tags: [ - 'white' - ] - } -}) -console.log(response) + tags: ["production"], + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc b/docs/doc_examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc new file mode 100644 index 000000000..3c4e2ec94 --- /dev/null +++ b/docs/doc_examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "arabic_example", + settings: { + analysis: { + filter: { + arabic_stop: { + type: "stop", + stopwords: "_arabic_", + }, + arabic_keywords: { + type: "keyword_marker", + keywords: ["مثال"], + }, + arabic_stemmer: { + type: "stemmer", + language: "arabic", + }, + }, + analyzer: { + rebuilt_arabic: { + tokenizer: "standard", + filter: [ + "lowercase", + "decimal_digit", + "arabic_stop", + "arabic_normalization", + "arabic_keywords", + "arabic_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc b/docs/doc_examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc new file mode 100644 index 000000000..bb8e802e8 --- /dev/null +++ b/docs/doc_examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + routing: "user1", + document: { + counter: 1, + tags: ["env2"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13917f7cfb6a382c293275ff71134ec4.asciidoc b/docs/doc_examples/13917f7cfb6a382c293275ff71134ec4.asciidoc new file mode 100644 index 000000000..73c11f5ef --- /dev/null +++ b/docs/doc_examples/13917f7cfb6a382c293275ff71134ec4.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: + "Hello {{#name_exists}}{{query_string}}{{/name_exists}}{{^name_exists}}World{{/name_exists}}", + }, + }, + }, + params: { + query_string: "Kimchy", + name_exists: true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc b/docs/doc_examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc new file mode 100644 index 000000000..2418f9714 --- /dev/null +++ b/docs/doc_examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "keep_types_example", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["extract_alpha"], + }, + }, + filter: { + extract_alpha: { + type: "keep_types", + types: [""], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc b/docs/doc_examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc new file mode 100644 index 000000000..86959a78e --- /dev/null +++ b/docs/doc_examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.xpack.security.authc": "debug", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc b/docs/doc_examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc new file mode 100644 index 000000000..e7f6c16c0 --- /dev/null +++ b/docs/doc_examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + should: [ + { + match: { + "name.first": { + query: "shay", + _name: "first", + }, + }, + }, + { + match: { + "name.last": { + query: "banon", + _name: "last", + }, + }, + }, + ], + filter: { + terms: { + "name.last": ["banon", "kimchy"], + _name: "test", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13df08eefc9ba98e311793bbca74133b.asciidoc b/docs/doc_examples/13df08eefc9ba98e311793bbca74133b.asciidoc new file mode 100644 index 000000000..0e818c24a --- /dev/null +++ b/docs/doc_examples/13df08eefc9ba98e311793bbca74133b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", + data: "app1.key1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc b/docs/doc_examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc new file mode 100644 index 000000000..eacd1ceab --- /dev/null +++ b/docs/doc_examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.changePassword({ + username: "user1", + password: "new-test-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc b/docs/doc_examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc new file mode 100644 index 000000000..0d8d85d36 --- /dev/null +++ b/docs/doc_examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + index: "index1,index2", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc new file mode 100644 index 000000000..52c6688ac --- /dev/null +++ b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_inference/sparse_embedding/my-elser-model", + body: { + input: + "The sky above the port was the color of television tuned to a dead channel.", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc b/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc new file mode 100644 index 000000000..ce7709b43 --- /dev/null +++ b/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test-index", + query: { + nested: { + path: "inference_field.inference.chunks", + query: { + sparse_vector: { + field: "inference_field.inference.chunks.embeddings", + inference_id: "my-inference-id", + query: "mountain lake", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14254a0e725044faedf9370ead76f6ce.asciidoc b/docs/doc_examples/14254a0e725044faedf9370ead76f6ce.asciidoc new file mode 100644 index 000000000..88f3d5fcc --- /dev/null +++ b/docs/doc_examples/14254a0e725044faedf9370ead76f6ce.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + q: "user.id:elkbee", + size: 0, + terminate_after: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc b/docs/doc_examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc new file mode 100644 index 000000000..0dfc47570 --- /dev/null +++ b/docs/doc_examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.migrateToDataTiers(); +console.log(response); +---- diff --git a/docs/doc_examples/1445ca2e813ed1c25504107b4b11760e.asciidoc b/docs/doc_examples/1445ca2e813ed1c25504107b4b11760e.asciidoc new file mode 100644 index 000000000..f4e6078f1 --- /dev/null +++ b/docs/doc_examples/1445ca2e813ed1c25504107b4b11760e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "server-metrics-follower", + wait_for_active_shards: 1, + remote_cluster: "leader", + leader_index: "server-metrics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1452829804551d2d6acedd4e73b29637.asciidoc b/docs/doc_examples/1452829804551d2d6acedd4e73b29637.asciidoc new file mode 100644 index 000000000..782e6ffea --- /dev/null +++ b/docs/doc_examples/1452829804551d2d6acedd4e73b29637.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "logs-foo-bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc b/docs/doc_examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc new file mode 100644 index 000000000..9924e6b33 --- /dev/null +++ b/docs/doc_examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.tasks({ + v: "true", + s: "time:desc", + h: "type,action,running_time,node,cancellable", +}); +console.log(response); +---- diff --git a/docs/doc_examples/14701dcc0cca9665fce2aace0cb62af7.asciidoc b/docs/doc_examples/14701dcc0cca9665fce2aace0cb62af7.asciidoc deleted file mode 100644 index 7bd77883a..000000000 --- a/docs/doc_examples/14701dcc0cca9665fce2aace0cb62af7.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter', - size: '0', - filter_path: 'hits.total', - body: { - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc b/docs/doc_examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc new file mode 100644 index 000000000..8c45eebd2 --- /dev/null +++ b/docs/doc_examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putTrainedModelAlias({ + model_id: "flight-delay-prediction-1574775339910", + model_alias: "flight_delay_model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/148edc235fcfbc263561f87f5533e688.asciidoc b/docs/doc_examples/148edc235fcfbc263561f87f5533e688.asciidoc new file mode 100644 index 000000000..a19fe9d8f --- /dev/null +++ b/docs/doc_examples/148edc235fcfbc263561f87f5533e688.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + documents: [ + { + message: "bonsai tree", + }, + { + message: "new tree", + }, + { + message: "the office", + }, + { + message: "office tree", + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14936b96cfb8ff999a833f615ba75495.asciidoc b/docs/doc_examples/14936b96cfb8ff999a833f615ba75495.asciidoc new file mode 100644 index 000000000..3bf2abb00 --- /dev/null +++ b/docs/doc_examples/14936b96cfb8ff999a833f615ba75495.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bicycles,other_cycles", + query: { + bool: { + must: { + match: { + description: "dutch", + }, + }, + filter: { + term: { + cycle_type: "bicycle", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc b/docs/doc_examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc new file mode 100644 index 000000000..3624fa48e --- /dev/null +++ b/docs/doc_examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + set_priority: { + priority: 50, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc b/docs/doc_examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc new file mode 100644 index 000000000..eb061fc6a --- /dev/null +++ b/docs/doc_examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:25:42.000Z", + message: + '192.0.2.255 - - [06/May/2099:16:25:42 +0000] "GET /favicon.ico HTTP/1.0" 200 3638', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/14a49c13c399840e64c00b487aa820c9.asciidoc b/docs/doc_examples/14a49c13c399840e64c00b487aa820c9.asciidoc new file mode 100644 index 000000000..0ad43b3d4 --- /dev/null +++ b/docs/doc_examples/14a49c13c399840e64c00b487aa820c9.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + date: { + type: "date_nanos", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + date: ["2015-01-01T12:10:30.000Z", "2014-01-01T12:10:30.000Z"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc b/docs/doc_examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc new file mode 100644 index 000000000..a9bb73e81 --- /dev/null +++ b/docs/doc_examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + username: "marywhite", + email: "mary@white.com", + name: { + first: "Mary", + middle: "Alice", + last: "White", + }, + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc b/docs/doc_examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc new file mode 100644 index 000000000..c2989f48c --- /dev/null +++ b/docs/doc_examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Point", + coordinates: [-77.03653, 38.897676], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14b81f96297952970b78a3216e059596.asciidoc b/docs/doc_examples/14b81f96297952970b78a3216e059596.asciidoc new file mode 100644 index 000000000..ad822c276 --- /dev/null +++ b/docs/doc_examples/14b81f96297952970b78a3216e059596.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/14f124294a4a0e3a657d1468c36161cd.asciidoc b/docs/doc_examples/14f124294a4a0e3a657d1468c36161cd.asciidoc new file mode 100644 index 000000000..57cb958e6 --- /dev/null +++ b/docs/doc_examples/14f124294a4a0e3a657d1468c36161cd.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stats-index", + query: { + term: { + agg_metric: { + value: 702.3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be49260e1b3496c4feac38c56ebb0669.asciidoc b/docs/doc_examples/14f2dab0583c5a9fcc39931d33194872.asciidoc similarity index 73% rename from docs/doc_examples/be49260e1b3496c4feac38c56ebb0669.asciidoc rename to docs/doc_examples/14f2dab0583c5a9fcc39931d33194872.asciidoc index 613044417..2c76f8696 100644 --- a/docs/doc_examples/be49260e1b3496c4feac38c56ebb0669.asciidoc +++ b/docs/doc_examples/14f2dab0583c5a9fcc39931d33194872.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.search({ - index: 'twitter', - q: 'user:kimchy' -}) -console.log(response) + index: "sample_weblogs_by_clientip", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/150b5fee5678bf8cdf0932da73eada80.asciidoc b/docs/doc_examples/150b5fee5678bf8cdf0932da73eada80.asciidoc new file mode 100644 index 000000000..f87ba72dc --- /dev/null +++ b/docs/doc_examples/150b5fee5678bf8cdf0932da73eada80.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + fields: "field1,field2", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + level: "indices", + fields: "field1,field2", +}); +console.log(response1); + +const response2 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + level: "shards", + fields: "field1,field2", +}); +console.log(response2); + +const response3 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + fields: "field*", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/151d2b11807ec684b0c01aa89189a801.asciidoc b/docs/doc_examples/151d2b11807ec684b0c01aa89189a801.asciidoc new file mode 100644 index 000000000..90c13827f --- /dev/null +++ b/docs/doc_examples/151d2b11807ec684b0c01aa89189a801.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title", "content"], + query: "this that thus", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/154d703732daf5c5fcd0122e6a50213f.asciidoc b/docs/doc_examples/154d703732daf5c5fcd0122e6a50213f.asciidoc new file mode 100644 index 000000000..5f79c3ef0 --- /dev/null +++ b/docs/doc_examples/154d703732daf5c5fcd0122e6a50213f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "measures.start": { + type: "long", + }, + "measures.end": { + type: "long", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/156bc64c94f9f3334fbce25165d2286a.asciidoc b/docs/doc_examples/156bc64c94f9f3334fbce25165d2286a.asciidoc new file mode 100644 index 000000000..84f57ba66 --- /dev/null +++ b/docs/doc_examples/156bc64c94f9f3334fbce25165d2286a.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + "sort.field": "date", + "sort.order": "desc", + }, + }, + mappings: { + properties: { + date: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1570976f7807b88dc8a046b833be057b.asciidoc b/docs/doc_examples/1570976f7807b88dc8a046b833be057b.asciidoc new file mode 100644 index 000000000..64b22d4b2 --- /dev/null +++ b/docs/doc_examples/1570976f7807b88dc8a046b833be057b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + s: "master,name", + h: "name,master,node.role,heap.percent,disk.used_percent,cpu", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1572696b97822d3332be51700e09672f.asciidoc b/docs/doc_examples/1572696b97822d3332be51700e09672f.asciidoc new file mode 100644 index 000000000..45ae77442 --- /dev/null +++ b/docs/doc_examples/1572696b97822d3332be51700e09672f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + query: { + range: { + time_frame: { + gte: "2015-10-31", + lte: "2015-11-01", + relation: "within", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc b/docs/doc_examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc new file mode 100644 index 000000000..739e06130 --- /dev/null +++ b/docs/doc_examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "pattern", + pattern: '"((?:\\\\"|[^"]|\\\\")+)"', + group: 1, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: '"value", "value with embedded \\" quote"', +}); +console.log(response1); +---- diff --git a/docs/doc_examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc b/docs/doc_examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc new file mode 100644 index 000000000..66c81a93b --- /dev/null +++ b/docs/doc_examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + filter: ["lowercase"], + text: "BaR", +}); +console.log(response); +---- diff --git a/docs/doc_examples/15c76cc8a038f686395053a240262929.asciidoc b/docs/doc_examples/15c76cc8a038f686395053a240262929.asciidoc new file mode 100644 index 000000000..8e3ec71ae --- /dev/null +++ b/docs/doc_examples/15c76cc8a038f686395053a240262929.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "classic_example", + settings: { + analysis: { + analyzer: { + classic_analyzer: { + tokenizer: "classic", + filter: ["classic"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/15d4be58359542775f4aff88e6d8adb5.asciidoc b/docs/doc_examples/15d4be58359542775f4aff88e6d8adb5.asciidoc new file mode 100644 index 000000000..4cc3244aa --- /dev/null +++ b/docs/doc_examples/15d4be58359542775f4aff88e6d8adb5.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + id: "my-pipeline", + docs: [ + { + _source: { + "my-keyword-field": "FOO", + }, + }, + { + _source: { + "my-keyword-field": "BAR", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/15d948d593d2624ac5e2b155052048f0.asciidoc b/docs/doc_examples/15d948d593d2624ac5e2b155052048f0.asciidoc new file mode 100644 index 000000000..882a10ec9 --- /dev/null +++ b/docs/doc_examples/15d948d593d2624ac5e2b155052048f0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer"], + text: "jumping dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/15dad5338065baaaa7d475abe85f4c22.asciidoc b/docs/doc_examples/15dad5338065baaaa7d475abe85f4c22.asciidoc deleted file mode 100644 index 4e604ffd1..000000000 --- a/docs/doc_examples/15dad5338065baaaa7d475abe85f4c22.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': [ - -70, - 40 - ], - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/15e90b82827c8512670820cf856a9c71.asciidoc b/docs/doc_examples/15e90b82827c8512670820cf856a9c71.asciidoc new file mode 100644 index 000000000..3cfb23d78 --- /dev/null +++ b/docs/doc_examples/15e90b82827c8512670820cf856a9c71.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "monthlyindex", + description: "monthly date-time index naming", + processors: [ + { + date_index_name: { + field: "date1", + index_name_prefix: "my-index-", + date_rounding: "M", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc b/docs/doc_examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc new file mode 100644 index 000000000..cd712f029 --- /dev/null +++ b/docs/doc_examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + filter_path: "metadata.cluster_coordination.last_committed_config", +}); +console.log(response); +---- diff --git a/docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc b/docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc new file mode 100644 index 000000000..3aeb9d6f4 --- /dev/null +++ b/docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "cbor-attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + remove_binary: false, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/160de80948e0c7db49b1c311848a66a2.asciidoc b/docs/doc_examples/160de80948e0c7db49b1c311848a66a2.asciidoc new file mode 100644 index 000000000..467ff2e89 --- /dev/null +++ b/docs/doc_examples/160de80948e0c7db49b1c311848a66a2.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_error_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_error: { + logging: { + text: "Found {{ctx.payload.hits.total}} errors in the logs", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/160f39a50847bad0be4be1529a95e4ce.asciidoc b/docs/doc_examples/160f39a50847bad0be4be1529a95e4ce.asciidoc new file mode 100644 index 000000000..39f9d4449 --- /dev/null +++ b/docs/doc_examples/160f39a50847bad0be4be1529a95e4ce.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "irish_example", + settings: { + analysis: { + filter: { + irish_hyphenation: { + type: "stop", + stopwords: ["h", "n", "t"], + ignore_case: true, + }, + irish_elision: { + type: "elision", + articles: ["d", "m", "b"], + articles_case: true, + }, + irish_stop: { + type: "stop", + stopwords: "_irish_", + }, + irish_lowercase: { + type: "lowercase", + language: "irish", + }, + irish_keywords: { + type: "keyword_marker", + keywords: ["sampla"], + }, + irish_stemmer: { + type: "stemmer", + language: "irish", + }, + }, + analyzer: { + rebuilt_irish: { + tokenizer: "standard", + filter: [ + "irish_hyphenation", + "irish_elision", + "irish_lowercase", + "irish_stop", + "irish_keywords", + "irish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc b/docs/doc_examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc new file mode 100644 index 000000000..f045eced9 --- /dev/null +++ b/docs/doc_examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "issues", + mappings: { + properties: { + tags: { + type: "keyword", + }, + comments: { + type: "nested", + properties: { + username: { + type: "keyword", + }, + comment: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc b/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc index e110fd622..be7b89c0b 100644 --- a/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc +++ b/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc @@ -4,18 +4,15 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - constant_score: { - filter: { - term: { - status: 'active' - } - } - } - } - } -}) -console.log(response) + query: { + constant_score: { + filter: { + term: { + status: "active", + }, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/16351d99d0608789d04a0bb11a537098.asciidoc b/docs/doc_examples/16351d99d0608789d04a0bb11a537098.asciidoc new file mode 100644 index 000000000..88500c080 --- /dev/null +++ b/docs/doc_examples/16351d99d0608789d04a0bb11a537098.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "edge_ngram_example", + settings: { + analysis: { + analyzer: { + standard_edge_ngram: { + tokenizer: "standard", + filter: ["edge_ngram"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc b/docs/doc_examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc new file mode 100644 index 000000000..da530b1a9 --- /dev/null +++ b/docs/doc_examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + s: "cpu:desc", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc b/docs/doc_examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc new file mode 100644 index 000000000..0176b01af --- /dev/null +++ b/docs/doc_examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + url: "https://en.wikipedia.org/wiki/2016_Summer_Olympics", + content: "Rio 2016", + pagerank: 50.3, + url_length: 42, + topics: { + sports: 50, + brazil: 30, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 2, + refresh: "true", + document: { + url: "https://en.wikipedia.org/wiki/2016_Brazilian_Grand_Prix", + content: "Formula One motor race held on 13 November 2016", + pagerank: 50.3, + url_length: 47, + topics: { + sports: 35, + "formula one": 65, + brazil: 20, + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "test", + id: 3, + refresh: "true", + document: { + url: "https://en.wikipedia.org/wiki/Deadpool_(film)", + content: "Deadpool is a 2016 American superhero film", + pagerank: 50.3, + url_length: 37, + topics: { + movies: 60, + "super hero": 65, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/16535685833419f0033545ffce4fdf00.asciidoc b/docs/doc_examples/16535685833419f0033545ffce4fdf00.asciidoc new file mode 100644 index 000000000..450c4fb80 --- /dev/null +++ b/docs/doc_examples/16535685833419f0033545ffce4fdf00.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index2", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + "comment.english": { + type: "fvh", + matched_fields: ["comment"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1659420311d907d9fc024b96f4150216.asciidoc b/docs/doc_examples/1659420311d907d9fc024b96f4150216.asciidoc new file mode 100644 index 000000000..01be05459 --- /dev/null +++ b/docs/doc_examples/1659420311d907d9fc024b96f4150216.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "length", + min: 0, + max: 4, + }, + ], + text: "the quick brown fox jumps over the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc new file mode 100644 index 000000000..64aa8e2d1 --- /dev/null +++ b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["example-index"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + params: { + query: "", + _es_filters: {}, + _es_aggs: {}, + _es_sort_fields: {}, + size: 10, + from: 0, + }, + dictionary: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc b/docs/doc_examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc new file mode 100644 index 000000000..d8c815fd5 --- /dev/null +++ b/docs/doc_examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list(); +console.log(response); + +const response1 = await client.tasks.list({ + nodes: "nodeId1,nodeId2", +}); +console.log(response1); + +const response2 = await client.tasks.list({ + nodes: "nodeId1,nodeId2", + actions: "cluster:*", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc b/docs/doc_examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc new file mode 100644 index 000000000..5b89ae747 --- /dev/null +++ b/docs/doc_examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, + fields: ["text"], + offsets: true, + payloads: true, + positions: true, + term_statistics: true, + field_statistics: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/169b39bb889ecd47541bed3e48725488.asciidoc b/docs/doc_examples/169b39bb889ecd47541bed3e48725488.asciidoc new file mode 100644 index 000000000..2f62a2605 --- /dev/null +++ b/docs/doc_examples/169b39bb889ecd47541bed3e48725488.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bug_reports", + query: { + term: { + labels: "urgent", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc b/docs/doc_examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc new file mode 100644 index 000000000..e9bbbd34f --- /dev/null +++ b/docs/doc_examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + document: { + location: { + coordinates: [ + [46.25, 20.14], + [47.49, 19.04], + ], + type: "multipoint", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/172155ca4bf6dfcbd489453f50739396.asciidoc b/docs/doc_examples/172155ca4bf6dfcbd489453f50739396.asciidoc new file mode 100644 index 000000000..e380107dc --- /dev/null +++ b/docs/doc_examples/172155ca4bf6dfcbd489453f50739396.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*", + size: 2, + sort: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/17266cee5eaaddf08e5534bf580a1910.asciidoc b/docs/doc_examples/17266cee5eaaddf08e5534bf580a1910.asciidoc new file mode 100644 index 000000000..cf5d15d02 --- /dev/null +++ b/docs/doc_examples/17266cee5eaaddf08e5534bf580a1910.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/172b18e435c400bed85227624de3acfd.asciidoc b/docs/doc_examples/172b18e435c400bed85227624de3acfd.asciidoc new file mode 100644 index 000000000..a13d0a955 --- /dev/null +++ b/docs/doc_examples/172b18e435c400bed85227624de3acfd.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_analyst_role", + refresh: "true", + cluster: ["monitor"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["manage"], + }, + ], + applications: [ + { + application: "myapp", + privileges: ["read"], + resources: ["*"], + }, + ], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/172d150e56a225155a62c7b18bf8da67.asciidoc b/docs/doc_examples/172d150e56a225155a62c7b18bf8da67.asciidoc new file mode 100644 index 000000000..a4d21847f --- /dev/null +++ b/docs/doc_examples/172d150e56a225155a62c7b18bf8da67.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: + "SELECT YEAR(release_date) AS year FROM library WHERE page_count > 300 AND author = 'Frank Herbert' GROUP BY year HAVING COUNT(*) > 0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1736545c8b5674f6d311f3277eb387f1.asciidoc b/docs/doc_examples/1736545c8b5674f6d311f3277eb387f1.asciidoc new file mode 100644 index 000000000..de3df9694 --- /dev/null +++ b/docs/doc_examples/1736545c8b5674f6d311f3277eb387f1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-data-stream", + data_retention: "30d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/173b190078621415a80e851eaf794e8a.asciidoc b/docs/doc_examples/173b190078621415a80e851eaf794e8a.asciidoc new file mode 100644 index 000000000..f09c6d4ed --- /dev/null +++ b/docs/doc_examples/173b190078621415a80e851eaf794e8a.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_english_analyzer: { + type: "standard", + max_token_length: 5, + stopwords: "_english_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_english_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc b/docs/doc_examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc new file mode 100644 index 000000000..5c00b0b5c --- /dev/null +++ b/docs/doc_examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.hotThreads({ + node_id: "my-node,my-other-node", +}); +console.log(response); +---- diff --git a/docs/doc_examples/17566e23c191f1004a2719f2c4242307.asciidoc b/docs/doc_examples/17566e23c191f1004a2719f2c4242307.asciidoc new file mode 100644 index 000000000..1cd460618 --- /dev/null +++ b/docs/doc_examples/17566e23c191f1004a2719f2c4242307.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingCapacity(); +console.log(response); +---- diff --git a/docs/doc_examples/178be73b74ba9f297429e32267084ac7.asciidoc b/docs/doc_examples/178be73b74ba9f297429e32267084ac7.asciidoc new file mode 100644 index 000000000..2d564eeb6 --- /dev/null +++ b/docs/doc_examples/178be73b74ba9f297429e32267084ac7.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_or: { + clauses: [ + { + span_term: { + field: "value1", + }, + }, + { + span_term: { + field: "value2", + }, + }, + { + span_term: { + field: "value3", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8653e76676de5d327201b77512afa3a0.asciidoc b/docs/doc_examples/178c920d5e8ec0071f77290fa059802c.asciidoc similarity index 65% rename from docs/doc_examples/8653e76676de5d327201b77512afa3a0.asciidoc rename to docs/doc_examples/178c920d5e8ec0071f77290fa059802c.asciidoc index 5ef368ade..8df29abb5 100644 --- a/docs/doc_examples/8653e76676de5d327201b77512afa3a0.asciidoc +++ b/docs/doc_examples/178c920d5e8ec0071f77290fa059802c.asciidoc @@ -4,13 +4,12 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: 'twitter', - body: { + index: "my-index-000001", + settings: { index: { - number_of_replicas: 2 - } - } -}) -console.log(response) + refresh_interval: "1s", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc b/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc index 7229d6d0a..f6c66f74b 100644 --- a/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc +++ b/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc @@ -4,21 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Jon', - type: 'cross_fields', - analyzer: 'standard', - fields: [ - 'first', - 'last', - '*.edge' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Jon", + type: "cross_fields", + analyzer: "standard", + fields: ["first", "last", "*.edge"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/17a1e308761afd3282f13d44d7be008a.asciidoc b/docs/doc_examples/17a1e308761afd3282f13d44d7be008a.asciidoc new file mode 100644 index 000000000..5ca1e8e58 --- /dev/null +++ b/docs/doc_examples/17a1e308761afd3282f13d44d7be008a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + comment: { + type: "text", + term_vector: "with_positions_offsets", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc b/docs/doc_examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc new file mode 100644 index 000000000..2f461ea4c --- /dev/null +++ b/docs/doc_examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "_description", + processors: [ + { + set: { + field: "field2", + value: "_value", + }, + }, + ], + }, + docs: [ + { + _index: "index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc b/docs/doc_examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc new file mode 100644 index 000000000..69f249727 --- /dev/null +++ b/docs/doc_examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + constant_score: { + filter: { + range: { + my_date: { + gte: "now-1h/m", + lte: "now/m", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc b/docs/doc_examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc new file mode 100644 index 000000000..8661cf147 --- /dev/null +++ b/docs/doc_examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "_all", + settings: { + settings: { + "index.unassigned.node_left.delayed_timeout": "5m", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc b/docs/doc_examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc new file mode 100644 index 000000000..8c4710ec7 --- /dev/null +++ b/docs/doc_examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: "match_value", + document: { + query: { + match: { + field: "value", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e21e1c26dc8687e7bf7bd2bf019a6698.asciidoc b/docs/doc_examples/17fb298fb1e47f7d946a772d68f4e2df.asciidoc similarity index 60% rename from docs/doc_examples/e21e1c26dc8687e7bf7bd2bf019a6698.asciidoc rename to docs/doc_examples/17fb298fb1e47f7d946a772d68f4e2df.asciidoc index 8517fa362..064b8e0d0 100644 --- a/docs/doc_examples/e21e1c26dc8687e7bf7bd2bf019a6698.asciidoc +++ b/docs/doc_examples/17fb298fb1e47f7d946a772d68f4e2df.asciidoc @@ -4,14 +4,12 @@ [source, js] ---- const response = await client.deleteByQuery({ - index: 'twitter', - conflicts: 'proceed', - body: { - query: { - match_all: {} - } - } -}) -console.log(response) + index: "my-data-stream", + query: { + match: { + "user.id": "vlb44hny", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/182df084f028479ecbe8d7648ddad892.asciidoc b/docs/doc_examples/182df084f028479ecbe8d7648ddad892.asciidoc new file mode 100644 index 000000000..aca2d7530 --- /dev/null +++ b/docs/doc_examples/182df084f028479ecbe8d7648ddad892.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.getStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc b/docs/doc_examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc new file mode 100644 index 000000000..1bf5255a3 --- /dev/null +++ b/docs/doc_examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "passage_vectors", + mappings: { + properties: { + full_text: { + type: "text", + }, + creation_time: { + type: "date", + }, + paragraph: { + type: "nested", + properties: { + vector: { + type: "dense_vector", + dims: 2, + index_options: { + type: "hnsw", + }, + }, + text: { + type: "text", + index: false, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/187733e50c60350f3f75921bea3b72c2.asciidoc b/docs/doc_examples/187733e50c60350f3f75921bea3b72c2.asciidoc new file mode 100644 index 000000000..803cd40e1 --- /dev/null +++ b/docs/doc_examples/187733e50c60350f3f75921bea3b72c2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + scroll: "1m", + slice: { + field: "@timestamp", + id: 0, + max: 10, + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc b/docs/doc_examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc new file mode 100644 index 000000000..7351ff9ee --- /dev/null +++ b/docs/doc_examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "german_example", + settings: { + analysis: { + filter: { + german_stop: { + type: "stop", + stopwords: "_german_", + }, + german_keywords: { + type: "keyword_marker", + keywords: ["Beispiel"], + }, + german_stemmer: { + type: "stemmer", + language: "light_german", + }, + }, + analyzer: { + rebuilt_german: { + tokenizer: "standard", + filter: [ + "lowercase", + "german_stop", + "german_keywords", + "german_normalization", + "german_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/188e6208cccb13027a5c1c95440841ee.asciidoc b/docs/doc_examples/188e6208cccb13027a5c1c95440841ee.asciidoc new file mode 100644 index 000000000..8a3fcd943 --- /dev/null +++ b/docs/doc_examples/188e6208cccb13027a5c1c95440841ee.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "logs", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + body: "warning: page could not be rendered", + }, + { + index: { + _id: 2, + }, + }, + { + body: "authentication error", + }, + { + index: { + _id: 3, + }, + }, + { + body: "warning: connection timed out", + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "logs", + size: 0, + aggs: { + messages: { + filters: { + filters: { + errors: { + match: { + body: "error", + }, + }, + warnings: { + match: { + body: "warning", + }, + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc b/docs/doc_examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc new file mode 100644 index 000000000..e085fe57c --- /dev/null +++ b/docs/doc_examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + sales_deriv: { + derivative: { + buckets_path: "sales", + unit: "day", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc b/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc index 7172806a0..9d0141379 100644 --- a/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc +++ b/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.tasks.cancel({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc b/docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc new file mode 100644 index 000000000..d89eb07d1 --- /dev/null +++ b/docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + datetime: { + type: "date", + format: "uuuu/MM/dd HH:mm:ss||uuuu/MM/dd||epoch_millis", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/190a21e32db2125ddaea0f634e126a84.asciidoc b/docs/doc_examples/190a21e32db2125ddaea0f634e126a84.asciidoc new file mode 100644 index 000000000..f4a25c0ec --- /dev/null +++ b/docs/doc_examples/190a21e32db2125ddaea0f634e126a84.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clone({ + index: "my_source_index", + target: "my_target_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc b/docs/doc_examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc new file mode 100644 index 000000000..b35d51a2c --- /dev/null +++ b/docs/doc_examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(491.2350 5237.4081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [496.5305328369141, 5239.347642069457], + [496.6979026794433, 5239.172175893484], + [496.9425201416015, 5239.238958618537], + [496.7944622039794, 5239.420969150824], + [496.5305328369141, 5239.347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + centroid: { + cartesian_centroid: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc b/docs/doc_examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc new file mode 100644 index 000000000..1df059288 --- /dev/null +++ b/docs/doc_examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.retry({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/193234bb5dc6451fd15b584fbefd2446.asciidoc b/docs/doc_examples/193234bb5dc6451fd15b584fbefd2446.asciidoc new file mode 100644 index 000000000..4adf954e1 --- /dev/null +++ b/docs/doc_examples/193234bb5dc6451fd15b584fbefd2446.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "example1", + indices: [ + { + names: ["my-index-000001"], + privileges: ["read"], + query: { + template: { + source: { + term: { + "acl.username": "{{_user.username}}", + }, + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/193d86b6cc34e12c2be806d27816a35c.asciidoc b/docs/doc_examples/193d86b6cc34e12c2be806d27816a35c.asciidoc new file mode 100644 index 000000000..3dae701d0 --- /dev/null +++ b/docs/doc_examples/193d86b6cc34e12c2be806d27816a35c.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + size: 5, + query_string: "mountain climbing", + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 2, + }, + { + name: "state", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/194bbac15e709174ac85b681f3a3d137.asciidoc b/docs/doc_examples/194bbac15e709174ac85b681f3a3d137.asciidoc new file mode 100644 index 000000000..2015c6183 --- /dev/null +++ b/docs/doc_examples/194bbac15e709174ac85b681f3a3d137.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["template*"], + template: { + settings: { + number_of_shards: 1, + }, + aliases: { + alias1: {}, + alias2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + "{index}-alias": {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/196aed02b11def364bab84e455c1a073.asciidoc b/docs/doc_examples/196aed02b11def364bab84e455c1a073.asciidoc new file mode 100644 index 000000000..4492ff082 --- /dev/null +++ b/docs/doc_examples/196aed02b11def364bab84e455c1a073.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["logs-*"], + data_stream: {}, +}); +console.log(response); +---- diff --git a/docs/doc_examples/199f5165d876267080046c907e93483f.asciidoc b/docs/doc_examples/199f5165d876267080046c907e93483f.asciidoc new file mode 100644 index 000000000..d5e492b5a --- /dev/null +++ b/docs/doc_examples/199f5165d876267080046c907e93483f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "my-index-000001", + field: "my-field", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc new file mode 100644 index 000000000..f9344050e --- /dev/null +++ b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({}); +console.log(response); +---- diff --git a/docs/doc_examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc b/docs/doc_examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc new file mode 100644 index 000000000..aa4955985 --- /dev/null +++ b/docs/doc_examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "keyword", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc b/docs/doc_examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc new file mode 100644 index 000000000..1d9708b3c --- /dev/null +++ b/docs/doc_examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + inference_field: { + type: "semantic_text", + inference_id: "my-elser-endpoint", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a1f3421717ff744ed83232729289bb0.asciidoc b/docs/doc_examples/1a1f3421717ff744ed83232729289bb0.asciidoc new file mode 100644 index 000000000..3aff383cc --- /dev/null +++ b/docs/doc_examples/1a1f3421717ff744ed83232729289bb0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.deleteLifecycle({ + policy_id: "daily-snapshots", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc b/docs/doc_examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc new file mode 100644 index 000000000..f07d50ef1 --- /dev/null +++ b/docs/doc_examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-cluster-state-snapshots", + schedule: "0 30 2 * * ?", + name: "", + repository: "my_secure_repository", + config: { + include_global_state: true, + indices: "-*", + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc b/docs/doc_examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc new file mode 100644 index 000000000..8ca806885 --- /dev/null +++ b/docs/doc_examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + level: "shards", + human: "true", + expand_wildcards: "all", + filter_path: "indices.*.total.indexing.index_total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc b/docs/doc_examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc new file mode 100644 index 000000000..9ede96eef --- /dev/null +++ b/docs/doc_examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc b/docs/doc_examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc new file mode 100644 index 000000000..cf2015322 --- /dev/null +++ b/docs/doc_examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + regex: "n[ever|i]r", + completion: { + field: "suggest", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc b/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc new file mode 100644 index 000000000..dfb2fc9c9 --- /dev/null +++ b/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/openai_embeddings", + body: { + service: "openai", + service_settings: { + api_key: "", + model_id: "text-embedding-ada-002", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc b/docs/doc_examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc new file mode 100644 index 000000000..8a3b8bda4 --- /dev/null +++ b/docs/doc_examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "pattern", + text: "The foo_bar_size's default is 5.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a81fe0186369838531e116e85aa4ccd.asciidoc b/docs/doc_examples/1a81fe0186369838531e116e85aa4ccd.asciidoc new file mode 100644 index 000000000..ec6bd5524 --- /dev/null +++ b/docs/doc_examples/1a81fe0186369838531e116e85aa4ccd.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "shirts", + mappings: { + properties: { + brand: { + type: "keyword", + }, + color: { + type: "keyword", + }, + model: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "shirts", + id: 1, + refresh: "true", + document: { + brand: "gucci", + color: "red", + model: "slim", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1a8d92e93481c432a91f7c213099800a.asciidoc b/docs/doc_examples/1a8d92e93481c432a91f7c213099800a.asciidoc new file mode 100644 index 000000000..6e87b1d33 --- /dev/null +++ b/docs/doc_examples/1a8d92e93481c432a91f7c213099800a.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys(); +console.log(response); +---- diff --git a/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc b/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc new file mode 100644 index 000000000..f29d19695 --- /dev/null +++ b/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/rerank/google_vertex_ai_rerank", + body: { + service: "googlevertexai", + service_settings: { + service_account_json: "", + project_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc b/docs/doc_examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc new file mode 100644 index 000000000..1d19dc630 --- /dev/null +++ b/docs/doc_examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor-1,sensor_rollup", + size: 0, + aggregations: { + max_temperature: { + max: { + field: "temperature", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc b/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc index 8881b660d..dec1346f8 100644 --- a/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc +++ b/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc @@ -4,40 +4,39 @@ [source, js] ---- const response = await client.bulk({ - body: [ + operations: [ { update: { - _id: '5', - _index: 'index1' - } + _id: "5", + _index: "index1", + }, }, { doc: { - my_field: 'foo' - } + my_field: "foo", + }, }, { update: { - _id: '6', - _index: 'index1' - } + _id: "6", + _index: "index1", + }, }, { doc: { - my_field: 'foo' - } + my_field: "foo", + }, }, { create: { - _id: '7', - _index: 'index1' - } + _id: "7", + _index: "index1", + }, }, { - my_field: 'foo' - } - ] -}) -console.log(response) + my_field: "foo", + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc b/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc new file mode 100644 index 000000000..84fdefbae --- /dev/null +++ b/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + metrics: { + subobjects: false, + properties: { + time: { + type: "object", + properties: { + min: { + type: "long", + }, + max: { + type: "long", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000002", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1adee74383e5594e45c937177d75aa2a.asciidoc b/docs/doc_examples/1adee74383e5594e45c937177d75aa2a.asciidoc new file mode 100644 index 000000000..a3408bfb4 --- /dev/null +++ b/docs/doc_examples/1adee74383e5594e45c937177d75aa2a.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + match_all: {}, + }, + sort: { + my_counter: "desc", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc b/docs/doc_examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc new file mode 100644 index 000000000..5294ec4fa --- /dev/null +++ b/docs/doc_examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlTrainedModels({ + h: "c,o,l,ct,v", + v: "ture", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc b/docs/doc_examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc new file mode 100644 index 000000000..49d0b45e5 --- /dev/null +++ b/docs/doc_examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-restricted-api-key", + role_descriptors: { + "my-restricted-role-descriptor": { + indices: [ + { + names: ["my-search-app"], + privileges: ["read"], + }, + ], + restriction: { + workflows: ["search_application_query"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b0b29e5cd7550c648d0892378e93804.asciidoc b/docs/doc_examples/1b0b29e5cd7550c648d0892378e93804.asciidoc new file mode 100644 index 000000000..d8d5f1e85 --- /dev/null +++ b/docs/doc_examples/1b0b29e5cd7550c648d0892378e93804.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteCalendarJob({ + calendar_id: "planned-outages", + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc b/docs/doc_examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc new file mode 100644 index 000000000..e490ad820 --- /dev/null +++ b/docs/doc_examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "my_watch", + action_id: "test_index", +}); +console.log(response); + +const response1 = await client.watcher.getWatch({ + id: "my_watch", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc b/docs/doc_examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc new file mode 100644 index 000000000..fd884d183 --- /dev/null +++ b/docs/doc_examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_fingerprint_example", + settings: { + analysis: { + analyzer: { + whitespace_: { + tokenizer: "whitespace", + filter: ["fingerprint_plus_concat"], + }, + }, + filter: { + fingerprint_plus_concat: { + type: "fingerprint", + max_output_size: 100, + separator: "+", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc b/docs/doc_examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc new file mode 100644 index 000000000..1e7e3f102 --- /dev/null +++ b/docs/doc_examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putCalendarJob({ + calendar_id: "planned-outages", + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b3762712c14a19e8c2956b4f530d327.asciidoc b/docs/doc_examples/1b3762712c14a19e8c2956b4f530d327.asciidoc new file mode 100644 index 000000000..b790c8e30 --- /dev/null +++ b/docs/doc_examples/1b3762712c14a19e8c2956b4f530d327.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "follower_index", + wait_for_active_shards: 1, + remote_cluster: "remote_cluster", + leader_index: "leader_index", + settings: { + "index.number_of_replicas": 0, + }, + max_read_request_operation_count: 1024, + max_outstanding_read_requests: 16, + max_read_request_size: "1024k", + max_write_request_operation_count: 32768, + max_write_request_size: "16k", + max_outstanding_write_requests: 8, + max_write_buffer_count: 512, + max_write_buffer_size: "512k", + max_retry_delay: "10s", + read_poll_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc b/docs/doc_examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc new file mode 100644 index 000000000..908e50f66 --- /dev/null +++ b/docs/doc_examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "shrink-index", + policy: { + phases: { + warm: { + min_age: "5d", + actions: { + shrink: { + number_of_shards: 4, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b47d988b218ee595430ec91eba91d80.asciidoc b/docs/doc_examples/1b47d988b218ee595430ec91eba91d80.asciidoc new file mode 100644 index 000000000..d139c002b --- /dev/null +++ b/docs/doc_examples/1b47d988b218ee595430ec91eba91d80.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "logs-foo", + index_patterns: ["logs-foo-*"], + data_stream: {}, + composed_of: ["logs-foo_component1", "logs-foo_component2"], + ignore_missing_component_templates: ["logs-foo_component2"], + priority: 500, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc b/docs/doc_examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc new file mode 100644 index 000000000..fcb26b045 --- /dev/null +++ b/docs/doc_examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "manager.name": "Alice White", + }, + }, + aggs: { + Employees: { + nested: { + path: "employees", + }, + aggs: { + "Employee Ages": { + histogram: { + field: "employees.age", + interval: 5, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b8655e6ba99fe39933c6eafe78728b7.asciidoc b/docs/doc_examples/1b8655e6ba99fe39933c6eafe78728b7.asciidoc deleted file mode 100644 index 8901d4edc..000000000 --- a/docs/doc_examples/1b8655e6ba99fe39933c6eafe78728b7.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.reindex({ - body: { - source: { - index: 'twitter', - slice: { - id: 0, - max: 2 - } - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response0) - -const response1 = await client.reindex({ - body: { - source: { - index: 'twitter', - slice: { - id: 1, - max: 2 - } - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/1b8caf0a6741126c6d0ad83b56fce290.asciidoc b/docs/doc_examples/1b8caf0a6741126c6d0ad83b56fce290.asciidoc deleted file mode 100644 index 1eb89aaca..000000000 --- a/docs/doc_examples/1b8caf0a6741126c6d0ad83b56fce290.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - 'te*' - ], - settings: { - number_of_shards: 1 - }, - aliases: { - alias1: {}, - alias2: { - filter: { - term: { - user: 'kimchy' - } - }, - routing: 'kimchy' - }, - '{index}-alias': {} - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc b/docs/doc_examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc new file mode 100644 index 000000000..8e56b7f06 --- /dev/null +++ b/docs/doc_examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "example3", + indices: [ + { + names: ["my-index-000001"], + privileges: ["read"], + query: { + template: { + source: + '{ "terms": { "group.statuses": {{#toJson}}_user.metadata.statuses{{/toJson}} }}', + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc b/docs/doc_examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc new file mode 100644 index 000000000..44005a3f3 --- /dev/null +++ b/docs/doc_examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "romanian_example", + settings: { + analysis: { + filter: { + romanian_stop: { + type: "stop", + stopwords: "_romanian_", + }, + romanian_keywords: { + type: "keyword_marker", + keywords: ["exemplu"], + }, + romanian_stemmer: { + type: "stemmer", + language: "romanian", + }, + }, + analyzer: { + rebuilt_romanian: { + tokenizer: "standard", + filter: [ + "lowercase", + "romanian_stop", + "romanian_keywords", + "romanian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1bc731a4df952228af6dfa6b48627332.asciidoc b/docs/doc_examples/1bc731a4df952228af6dfa6b48627332.asciidoc deleted file mode 100644 index 0e51e68ad..000000000 --- a/docs/doc_examples/1bc731a4df952228af6dfa6b48627332.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - max_docs: 10, - source: { - index: 'twitter', - query: { - function_score: { - random_score: {}, - min_score: 0.9 - } - } - }, - dest: { - index: 'random_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc b/docs/doc_examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc new file mode 100644 index 000000000..25474a42e --- /dev/null +++ b/docs/doc_examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-search-app", + search_application: { + indices: ["index1"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "retriever": {\n "rrf": {\n "retrievers": [\n {{#text_fields}}\n {\n "standard": {\n "query": {\n "match": {\n "{{.}}": "{{query_string}}"\n }\n }\n }\n },\n {{/text_fields}}\n {{#elser_fields}}\n {\n "standard": {\n "query": {\n "sparse_vector": {\n "field": "ml.inference.{{.}}_expanded.predicted_value",\n "inference_id": "",\n "query": "{{query_string}}"\n }\n }\n }\n },\n {{/elser_fields}}\n ],\n "rank_window_size": {{rrf.rank_window_size}},\n "rank_constant": {{rrf.rank_constant}}\n }\n }\n }\n ', + params: { + elser_fields: ["title", "meta_description"], + text_fields: ["title", "meta_description"], + query_string: "", + rrf: { + rank_window_size: 100, + rank_constant: 60, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc b/docs/doc_examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc new file mode 100644 index 000000000..76ee6237d --- /dev/null +++ b/docs/doc_examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + message_stats: { + string_stats: { + field: "message.keyword", + show_distribution: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc b/docs/doc_examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc new file mode 100644 index 000000000..e9198e540 --- /dev/null +++ b/docs/doc_examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "source_template", + template: { + settings: { + index: { + number_of_replicas: 2, + number_of_shards: 2, + mode: "time_series", + routing_path: ["metricset"], + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + metricset: { + type: "keyword", + time_series_dimension: true, + }, + k8s: { + properties: { + tx: { + type: "long", + }, + rx: { + type: "long", + }, + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: 1, + index_patterns: ["k8s*"], + composed_of: ["source_template"], + data_stream: {}, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc b/docs/doc_examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc new file mode 100644 index 000000000..c7c693460 --- /dev/null +++ b/docs/doc_examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: "ctx._source.tags.add(params['tag'])", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc b/docs/doc_examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc new file mode 100644 index 000000000..f7d5b04ed --- /dev/null +++ b/docs/doc_examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "shapes", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "shapes", + id: "deu", + document: { + location: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "example", + query: { + bool: { + filter: { + geo_shape: { + location: { + indexed_shape: { + index: "shapes", + id: "deu", + path: "location", + }, + }, + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc b/docs/doc_examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc new file mode 100644 index 000000000..f7132ca37 --- /dev/null +++ b/docs/doc_examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cab9da122778a95061831265c250cc1.asciidoc b/docs/doc_examples/1cab9da122778a95061831265c250cc1.asciidoc new file mode 100644 index 000000000..2feadffb8 --- /dev/null +++ b/docs/doc_examples/1cab9da122778a95061831265c250cc1.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + tags: { + type: "keyword", + script: + "\n emit(doc['type'].value);\n if (doc['promoted'].value) {\n emit('hot');\n }\n ", + }, + }, + aggs: { + tags_count: { + value_count: { + field: "tags", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc b/docs/doc_examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc new file mode 100644 index 000000000..591ffd22e --- /dev/null +++ b/docs/doc_examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "MULTIPOINT (102.0 2.0, 103.0 2.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc b/docs/doc_examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc new file mode 100644 index 000000000..90dc0b24f --- /dev/null +++ b/docs/doc_examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.security.transport.filter.enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc b/docs/doc_examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc new file mode 100644 index 000000000..6703e293f --- /dev/null +++ b/docs/doc_examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + wait_for_completion_timeout: "2s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cd3b9d65576a9212eef898eb3105758.asciidoc b/docs/doc_examples/1cd3b9d65576a9212eef898eb3105758.asciidoc new file mode 100644 index 000000000..3aaca987e --- /dev/null +++ b/docs/doc_examples/1cd3b9d65576a9212eef898eb3105758.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": "primaries", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc b/docs/doc_examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc new file mode 100644 index 000000000..b73c6cce1 --- /dev/null +++ b/docs/doc_examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + script: { + description: "Set index based on `lang` field and `dataset` param", + lang: "painless", + source: + "\n ctx['_index'] = ctx['lang'] + '-' + params['dataset'];\n ", + params: { + dataset: "catalog", + }, + }, + }, + ], + }, + docs: [ + { + _index: "generic-index", + _source: { + lang: "fr", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc b/docs/doc_examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc new file mode 100644 index 000000000..c7e7c914b --- /dev/null +++ b/docs/doc_examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-index*", + max_samples_per_key: 2, + size: 20, + query: + "\n sample\n [any where uptime > 0] by host,os\n [any where port > 100] by host,op_sys\n [any where bool == true] by host,os\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cecd4d87a92427175157d41859df2af.asciidoc b/docs/doc_examples/1cecd4d87a92427175157d41859df2af.asciidoc new file mode 100644 index 000000000..c49cfc24d --- /dev/null +++ b/docs/doc_examples/1cecd4d87a92427175157d41859df2af.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index-000001", + shard: 0, + primary: false, + current_node: "my-node", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc b/docs/doc_examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc new file mode 100644 index 000000000..2990d47a9 --- /dev/null +++ b/docs/doc_examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "polygon", + coordinates: [ + [ + [1000, -1001], + [1001, -1001], + [1001, -1000], + [1000, -1000], + [1000, -1001], + ], + [ + [1000.2, -1001.2], + [1000.8, -1001.2], + [1000.8, -1001.8], + [1000.2, -1001.8], + [1000.2, -1001.2], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc b/docs/doc_examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc new file mode 100644 index 000000000..0963fa75c --- /dev/null +++ b/docs/doc_examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc @@ -0,0 +1,71 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + size: 0, + query: { + bool: { + must: { + term: { + invalidated: false, + }, + }, + should: [ + { + range: { + expiration: { + gte: "now", + }, + }, + }, + { + bool: { + must_not: { + exists: { + field: "expiration", + }, + }, + }, + }, + ], + minimum_should_match: 1, + }, + }, + aggs: { + keys_by_username: { + composite: { + sources: [ + { + usernames: { + terms: { + field: "username", + }, + }, + }, + ], + }, + aggs: { + expires_soon: { + filter: { + range: { + expiration: { + lte: "now+30d/d", + }, + }, + }, + aggs: { + key_names: { + terms: { + field: "name", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc b/docs/doc_examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc new file mode 100644 index 000000000..0e8581267 --- /dev/null +++ b/docs/doc_examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "passage_vectors", + fields: ["full_text", "creation_time"], + _source: false, + knn: { + query_vector: [0.45, 45], + field: "paragraph.vector", + k: 2, + num_candidates: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d827ae674970692643ea81991e5396e.asciidoc b/docs/doc_examples/1d827ae674970692643ea81991e5396e.asciidoc new file mode 100644 index 000000000..ac92b9868 --- /dev/null +++ b/docs/doc_examples/1d827ae674970692643ea81991e5396e.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + flattened: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + flattened: { + field: [ + "apple", + "apple", + "banana", + "avocado", + "10", + "200", + "AVOCADO", + "Banana", + "Tangerine", + ], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1d918e206ad8dab916e59183da24d9ec.asciidoc b/docs/doc_examples/1d918e206ad8dab916e59183da24d9ec.asciidoc new file mode 100644 index 000000000..6e0a57866 --- /dev/null +++ b/docs/doc_examples/1d918e206ad8dab916e59183da24d9ec.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: ".watches", + settings: { + "index.routing.allocation.include.role": "watcher", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc b/docs/doc_examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc new file mode 100644 index 000000000..4a91620c5 --- /dev/null +++ b/docs/doc_examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "pre-dsl-ilm-policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + delete: { + min_age: "7d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1da77e114459e0b77d78a3dcc8fae429.asciidoc b/docs/doc_examples/1da77e114459e0b77d78a3dcc8fae429.asciidoc deleted file mode 100644 index 69394b727..000000000 --- a/docs/doc_examples/1da77e114459e0b77d78a3dcc8fae429.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'twitter-1' -}) -console.log(response0) - -const response1 = await client.indices.create({ - index: 'twitter-2' -}) -console.log(response1) - -const response2 = await client.indices.putMapping({ - index: 'twitter-1,twitter-2', - body: { - properties: { - user_name: { - type: 'text' - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc b/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc new file mode 100644 index 000000000..9319faa39 --- /dev/null +++ b/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/azure_ai_studio_embeddings", + body: { + service: "azureaistudio", + service_settings: { + api_key: "", + target: "", + provider: "", + endpoint_type: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1db086021e83205b6eab3b7765911cc2.asciidoc b/docs/doc_examples/1db086021e83205b6eab3b7765911cc2.asciidoc new file mode 100644 index 000000000..aa9237175 --- /dev/null +++ b/docs/doc_examples/1db086021e83205b6eab3b7765911cc2.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "parent_example", + mappings: { + properties: { + join: { + type: "join", + relations: { + question: "answer", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1db715eb00832686ecddb6603684fc26.asciidoc b/docs/doc_examples/1db715eb00832686ecddb6603684fc26.asciidoc new file mode 100644 index 000000000..d1849d8d6 --- /dev/null +++ b/docs/doc_examples/1db715eb00832686ecddb6603684fc26.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enrollKibana(); +console.log(response); +---- diff --git a/docs/doc_examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc b/docs/doc_examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc index 5f5bce6dc..c1a1fab23 100644 --- a/docs/doc_examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc +++ b/docs/doc_examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.cluster.state({ - filter_path: 'metadata.indices.*.stat*' -}) -console.log(response) + filter_path: "metadata.indices.*.stat*", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1e08e054c761353f99211cd18e8ca47b.asciidoc b/docs/doc_examples/1e08e054c761353f99211cd18e8ca47b.asciidoc new file mode 100644 index 000000000..5f1410788 --- /dev/null +++ b/docs/doc_examples/1e08e054c761353f99211cd18e8ca47b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteModelSnapshot({ + job_id: "farequote", + snapshot_id: 1491948163, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc b/docs/doc_examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc new file mode 100644 index 000000000..5afe39c96 --- /dev/null +++ b/docs/doc_examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001", + only_expunge_deletes: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e0f203aced9344382081ab095c44dde.asciidoc b/docs/doc_examples/1e0f203aced9344382081ab095c44dde.asciidoc new file mode 100644 index 000000000..bd8639ace --- /dev/null +++ b/docs/doc_examples/1e0f203aced9344382081ab095c44dde.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + text: { + type: "text", + store: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + text: [ + "the quick brown fox", + "the quick brown fox", + "jumped over the lazy dog", + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc b/docs/doc_examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc index 072141bc9..3e50dd22f 100644 --- a/docs/doc_examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc +++ b/docs/doc_examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.cluster.state({ - filter_path: 'metadata.indices.*.state,-metadata.indices.logstash-*' -}) -console.log(response) + filter_path: "metadata.indices.*.state,-metadata.indices.logstash-*", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc b/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc new file mode 100644 index 000000000..2f307bbf3 --- /dev/null +++ b/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector", + querystring: { + service_type: "sharepoint_online", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc b/docs/doc_examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc new file mode 100644 index 000000000..12ec8e928 --- /dev/null +++ b/docs/doc_examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "items", + query: { + bool: { + must: { + match: { + name: "chocolate", + }, + }, + should: { + distance_feature: { + field: "production_date", + pivot: "7d", + origin: "now", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc b/docs/doc_examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc new file mode 100644 index 000000000..702ce3c67 --- /dev/null +++ b/docs/doc_examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e3553a73da487017f7a95088b6aa957.asciidoc b/docs/doc_examples/1e3553a73da487017f7a95088b6aa957.asciidoc new file mode 100644 index 000000000..d142fea11 --- /dev/null +++ b/docs/doc_examples/1e3553a73da487017f7a95088b6aa957.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRoles({ + name: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e49eba5b9042c1900a608fe5105ba43.asciidoc b/docs/doc_examples/1e49eba5b9042c1900a608fe5105ba43.asciidoc deleted file mode 100644 index 81a58db85..000000000 --- a/docs/doc_examples/1e49eba5b9042c1900a608fe5105ba43.asciidoc +++ /dev/null @@ -1,42 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.deleteByQuery({ - index: 'twitter', - body: { - slice: { - id: 0, - max: 2 - }, - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response0) - -const response1 = await client.deleteByQuery({ - index: 'twitter', - body: { - slice: { - id: 1, - max: 2 - }, - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc b/docs/doc_examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc new file mode 100644 index 000000000..bb1d3322e --- /dev/null +++ b/docs/doc_examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + runtime_mappings: { + message_and_context: { + type: "keyword", + script: + "\n emit(doc['message.keyword'].value + ' ' + doc['context.keyword'].value)\n ", + }, + }, + aggs: { + message_stats: { + string_stats: { + field: "message_and_context", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e50d993bd6517e6c381e82d09f0389e.asciidoc b/docs/doc_examples/1e50d993bd6517e6c381e82d09f0389e.asciidoc deleted file mode 100644 index 0486b4284..000000000 --- a/docs/doc_examples/1e50d993bd6517e6c381e82d09f0389e.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - from: 5, - size: 20, - query: { - term: { - 'user.id': '8a4f500d' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1e547696f54582840040b1aa6661760c.asciidoc b/docs/doc_examples/1e547696f54582840040b1aa6661760c.asciidoc new file mode 100644 index 000000000..4d6375dbb --- /dev/null +++ b/docs/doc_examples/1e547696f54582840040b1aa6661760c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e871f060dbe1a5c316ed205278804a8.asciidoc b/docs/doc_examples/1e871f060dbe1a5c316ed205278804a8.asciidoc new file mode 100644 index 000000000..25aa22960 --- /dev/null +++ b/docs/doc_examples/1e871f060dbe1a5c316ed205278804a8.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + countries: { + terms: { + field: "artist.country", + order: { + "rock>playback_stats.avg": "desc", + }, + }, + aggs: { + rock: { + filter: { + term: { + genre: "rock", + }, + }, + aggs: { + playback_stats: { + stats: { + field: "play_count", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc b/docs/doc_examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc new file mode 100644 index 000000000..218edbd2c --- /dev/null +++ b/docs/doc_examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + tdigest: { + execution_hint: "high_accuracy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c48264ec5d9b9679fddd72e5c44425b9.asciidoc b/docs/doc_examples/1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc similarity index 75% rename from docs/doc_examples/c48264ec5d9b9679fddd72e5c44425b9.asciidoc rename to docs/doc_examples/1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc index 646050e2b..acfb94c3a 100644 --- a/docs/doc_examples/c48264ec5d9b9679fddd72e5c44425b9.asciidoc +++ b/docs/doc_examples/1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.cluster.health({ - index: 'twitter', - level: 'shards' -}) -console.log(response) + pretty: "true", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc b/docs/doc_examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc new file mode 100644 index 000000000..70528bb7e --- /dev/null +++ b/docs/doc_examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "simple", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc b/docs/doc_examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc new file mode 100644 index 000000000..979a1f33d --- /dev/null +++ b/docs/doc_examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + term: { + "url.full": "{{#url}}{{host}}/{{page}}{{/url}}", + }, + }, + }, + params: { + host: "http://example.com/", + page: "hello-world", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ec66f188f681598cb5d7df700b214e3.asciidoc b/docs/doc_examples/1ec66f188f681598cb5d7df700b214e3.asciidoc new file mode 100644 index 000000000..e44de075a --- /dev/null +++ b/docs/doc_examples/1ec66f188f681598cb5d7df700b214e3.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["my_custom_keyword_marker_filter", "porter_stem"], + }, + }, + filter: { + my_custom_keyword_marker_filter: { + type: "keyword_marker", + keywords_path: "analysis/example_word_list.txt", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc b/docs/doc_examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc new file mode 100644 index 000000000..68fc1d5ff --- /dev/null +++ b/docs/doc_examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.deleteAsync({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ed77bf308fa4ab328b36060e412f500.asciidoc b/docs/doc_examples/1ed77bf308fa4ab328b36060e412f500.asciidoc new file mode 100644 index 000000000..bba203b58 --- /dev/null +++ b/docs/doc_examples/1ed77bf308fa4ab328b36060e412f500.asciidoc @@ -0,0 +1,66 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + network: { + properties: { + name: { + type: "keyword", + }, + }, + }, + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [1, 3, 8, 12, 15], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [1, 6, 8, 12, 14], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + aggs: { + latency_buckets: { + histogram: { + field: "latency_histo", + interval: 5, + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/1eea46b08610972b79fdc4649748455d.asciidoc b/docs/doc_examples/1eea46b08610972b79fdc4649748455d.asciidoc new file mode 100644 index 000000000..06268c2dd --- /dev/null +++ b/docs/doc_examples/1eea46b08610972b79fdc4649748455d.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "cosineSimilarity(params.query_vector, 'my_dense_vector') + 1.0", + params: { + query_vector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc b/docs/doc_examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc new file mode 100644 index 000000000..3a1fca850 --- /dev/null +++ b/docs/doc_examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + sort: [ + { + post_date: { + format: "strict_date_optional_time_nanos", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc b/docs/doc_examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc new file mode 100644 index 000000000..09187a87e --- /dev/null +++ b/docs/doc_examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "greek_example", + settings: { + analysis: { + filter: { + greek_stop: { + type: "stop", + stopwords: "_greek_", + }, + greek_lowercase: { + type: "lowercase", + language: "greek", + }, + greek_keywords: { + type: "keyword_marker", + keywords: ["παράδειγμα"], + }, + greek_stemmer: { + type: "stemmer", + language: "greek", + }, + }, + analyzer: { + rebuilt_greek: { + tokenizer: "standard", + filter: [ + "greek_lowercase", + "greek_stop", + "greek_keywords", + "greek_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc b/docs/doc_examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc new file mode 100644 index 000000000..303c74cb4 --- /dev/null +++ b/docs/doc_examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + std_folded: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, + mappings: { + properties: { + my_text: { + type: "text", + analyzer: "std_folded", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "std_folded", + text: "Is this déjà vu?", +}); +console.log(response1); + +const response2 = await client.indices.analyze({ + index: "my-index-000001", + field: "my_text", + text: "Is this déjà vu?", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/1f336ecc62480c1d56351cc2f82d0d08.asciidoc b/docs/doc_examples/1f336ecc62480c1d56351cc2f82d0d08.asciidoc deleted file mode 100644 index 0de94149b..000000000 --- a/docs/doc_examples/1f336ecc62480c1d56351cc2f82d0d08.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - version: '2', - version_type: 'external', - body: { - message: 'elasticsearch now has versioning support, double cool!' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc b/docs/doc_examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc new file mode 100644 index 000000000..93dc5492a --- /dev/null +++ b/docs/doc_examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f507659757e2844cefced25848540a0.asciidoc b/docs/doc_examples/1f507659757e2844cefced25848540a0.asciidoc new file mode 100644 index 000000000..2ff2c4ff7 --- /dev/null +++ b/docs/doc_examples/1f507659757e2844cefced25848540a0.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": [-70, 40], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ae03ba3b56e5e287953094050766738.asciidoc b/docs/doc_examples/1f673e1a0de2970dc648618d5425a994.asciidoc similarity index 51% rename from docs/doc_examples/3ae03ba3b56e5e287953094050766738.asciidoc rename to docs/doc_examples/1f673e1a0de2970dc648618d5425a994.asciidoc index cb67a6047..ee01d35bf 100644 --- a/docs/doc_examples/3ae03ba3b56e5e287953094050766738.asciidoc +++ b/docs/doc_examples/1f673e1a0de2970dc648618d5425a994.asciidoc @@ -3,14 +3,13 @@ [source, js] ---- -const response0 = await client.indices.refresh() -console.log(response0) +const response = await client.indices.refresh(); +console.log(response); const response1 = await client.search({ - index: 'new_twitter', - size: '0', - filter_path: 'hits.total' -}) -console.log(response1) + index: "my-new-index-000001", + size: 0, + filter_path: "hits.total", +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc b/docs/doc_examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc new file mode 100644 index 000000000..d58aa464e --- /dev/null +++ b/docs/doc_examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + rewrite: "true", + all_shards: "true", + query: { + match: { + "user.id": { + query: "kimchy", + fuzziness: "auto", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f6fe6833686e38c3711c6f2aa00a078.asciidoc b/docs/doc_examples/1f6fe6833686e38c3711c6f2aa00a078.asciidoc deleted file mode 100644 index 8d1852655..000000000 --- a/docs/doc_examples/1f6fe6833686e38c3711c6f2aa00a078.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - user_id: { - type: 'keyword', - ignore_above: 20 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc b/docs/doc_examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc new file mode 100644 index 000000000..65d03d532 --- /dev/null +++ b/docs/doc_examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "direct_pki_only", + roles: ["role_for_pki1_direct"], + rules: { + all: [ + { + field: { + "realm.name": "pki1", + }, + }, + { + field: { + "metadata.pki_delegated_by_user": null, + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1577e6e806b3283c9e99f1596d310754.asciidoc b/docs/doc_examples/1f900f7178e80051e75d4fd04467cf49.asciidoc similarity index 58% rename from docs/doc_examples/1577e6e806b3283c9e99f1596d310754.asciidoc rename to docs/doc_examples/1f900f7178e80051e75d4fd04467cf49.asciidoc index cf35f86a2..79c533e29 100644 --- a/docs/doc_examples/1577e6e806b3283c9e99f1596d310754.asciidoc +++ b/docs/doc_examples/1f900f7178e80051e75d4fd04467cf49.asciidoc @@ -4,14 +4,12 @@ [source, js] ---- const response = await client.index({ - index: 'test', - id: '1', - refresh: true, - body: { - text: 'words words', - flag: 'foo' - } -}) -console.log(response) + index: "my-index-000001", + id: 1, + pipeline: "pipelineB", + document: { + field: "value", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc b/docs/doc_examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc new file mode 100644 index 000000000..5bc970622 --- /dev/null +++ b/docs/doc_examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + my_range: { + type: "ip_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: [ + "10.0.0.0/24", + { + gte: "10.0.0.0", + lte: "10.0.0.255", + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1fddbd602a6acf896a393cdb500a2831.asciidoc b/docs/doc_examples/1fddbd602a6acf896a393cdb500a2831.asciidoc new file mode 100644 index 000000000..79f5ecce5 --- /dev/null +++ b/docs/doc_examples/1fddbd602a6acf896a393cdb500a2831.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + avg_number_of_sales_per_year: { + rate: { + field: "price", + unit: "year", + mode: "value_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc b/docs/doc_examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc new file mode 100644 index 000000000..674ca8a30 --- /dev/null +++ b/docs/doc_examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ff12523efbd59c213c676937757c460.asciidoc b/docs/doc_examples/1ff12523efbd59c213c676937757c460.asciidoc new file mode 100644 index 000000000..3673a4528 --- /dev/null +++ b/docs/doc_examples/1ff12523efbd59c213c676937757c460.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + ids: ["VuaCfGcBCdbkQm-e5aOx"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ff296e868635fd102239871a331331b.asciidoc b/docs/doc_examples/1ff296e868635fd102239871a331331b.asciidoc new file mode 100644 index 000000000..831f54389 --- /dev/null +++ b/docs/doc_examples/1ff296e868635fd102239871a331331b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + type_count: { + cardinality: { + field: "type", + precision_threshold: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc b/docs/doc_examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc new file mode 100644 index 000000000..d795b84eb --- /dev/null +++ b/docs/doc_examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20005d8a6555b259b299d862cd218701.asciidoc b/docs/doc_examples/20005d8a6555b259b299d862cd218701.asciidoc new file mode 100644 index 000000000..c6a7579d9 --- /dev/null +++ b/docs/doc_examples/20005d8a6555b259b299d862cd218701.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "this is a test", + operator: "and", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2006f577a113bda40905cf7b405bf1cf.asciidoc b/docs/doc_examples/2006f577a113bda40905cf7b405bf1cf.asciidoc new file mode 100644 index 000000000..cb8115039 --- /dev/null +++ b/docs/doc_examples/2006f577a113bda40905cf7b405bf1cf.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "If 'url.scheme' is 'http', set 'url.insecure' to true", + if: "ctx.url?.scheme =~ /^http[^s]/", + field: "url.insecure", + value: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc b/docs/doc_examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc new file mode 100644 index 000000000..2bf348f71 --- /dev/null +++ b/docs/doc_examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "passage_vectors", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + full_text: "first paragraph another paragraph", + creation_time: "2019-05-04", + paragraph: [ + { + vector: [0.45, 45], + text: "first paragraph", + paragraph_id: "1", + }, + { + vector: [0.8, 0.6], + text: "another paragraph", + paragraph_id: "2", + }, + ], + }, + { + index: { + _id: "2", + }, + }, + { + full_text: "number one paragraph number two paragraph", + creation_time: "2020-05-04", + paragraph: [ + { + vector: [1.2, 4.5], + text: "number one paragraph", + paragraph_id: "1", + }, + { + vector: [-1, 42], + text: "number two paragraph", + paragraph_id: "2", + }, + ], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc b/docs/doc_examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc new file mode 100644 index 000000000..536cf6cba --- /dev/null +++ b/docs/doc_examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/20162e1dac807a7604f58dad814d1bc5.asciidoc b/docs/doc_examples/20162e1dac807a7604f58dad814d1bc5.asciidoc new file mode 100644 index 000000000..3b4eb8d29 --- /dev/null +++ b/docs/doc_examples/20162e1dac807a7604f58dad814d1bc5.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + en: { + tokenizer: "standard", + filter: ["my_en_US_dict_stemmer"], + }, + }, + filter: { + my_en_US_dict_stemmer: { + type: "hunspell", + locale: "en_US", + dedup: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc b/docs/doc_examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc new file mode 100644 index 000000000..45ad3be18 --- /dev/null +++ b/docs/doc_examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "obel prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + smoothing: { + laplace: { + alpha: 0.7, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20407c847adb8393ce41dc656384afc4.asciidoc b/docs/doc_examples/20407c847adb8393ce41dc656384afc4.asciidoc new file mode 100644 index 000000000..ce827a730 --- /dev/null +++ b/docs/doc_examples/20407c847adb8393ce41dc656384afc4.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "passage_vectors", + fields: ["creation_time", "full_text"], + _source: false, + knn: { + query_vector: [0.45, 45], + field: "paragraph.vector", + k: 2, + num_candidates: 2, + filter: { + bool: { + filter: [ + { + range: { + creation_time: { + gte: "2019-05-01", + lte: "2019-05-05", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc b/docs/doc_examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc new file mode 100644 index 000000000..1b42fb168 --- /dev/null +++ b/docs/doc_examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: "POINT (-74.1 40.73)", + bottom_right: "POINT (-71.12 40.01)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc b/docs/doc_examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc new file mode 100644 index 000000000..c9405986c --- /dev/null +++ b/docs/doc_examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc @@ -0,0 +1,93 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggregations: { + "large-grid": { + geohex_grid: { + field: "location", + precision: 4, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc b/docs/doc_examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc new file mode 100644 index 000000000..f33acb605 --- /dev/null +++ b/docs/doc_examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + my_date_histo: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + the_deriv: { + derivative: { + buckets_path: "_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/206d57bf0cb022c8229894e7753eca83.asciidoc b/docs/doc_examples/206d57bf0cb022c8229894e7753eca83.asciidoc new file mode 100644 index 000000000..f739baea2 --- /dev/null +++ b/docs/doc_examples/206d57bf0cb022c8229894e7753eca83.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_shape: { + location: { + shape: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + relation: "within", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc b/docs/doc_examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc new file mode 100644 index 000000000..2730d46b0 --- /dev/null +++ b/docs/doc_examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTILINESTRING ((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0), (100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc b/docs/doc_examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc new file mode 100644 index 000000000..e782bce3f --- /dev/null +++ b/docs/doc_examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + first_name: { + type: "text", + copy_to: "full_name", + }, + last_name: { + type: "text", + copy_to: "full_name", + }, + full_name: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + first_name: "John", + last_name: "Smith", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + full_name: { + query: "John Smith", + operator: "and", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/a7c15fe6b5779c84ce9a34bf4b2a7ab7.asciidoc b/docs/doc_examples/209a9190082498f0b7daa26f8834846b.asciidoc similarity index 55% rename from docs/doc_examples/a7c15fe6b5779c84ce9a34bf4b2a7ab7.asciidoc rename to docs/doc_examples/209a9190082498f0b7daa26f8834846b.asciidoc index fb7990dd8..4b01230ea 100644 --- a/docs/doc_examples/a7c15fe6b5779c84ce9a34bf4b2a7ab7.asciidoc +++ b/docs/doc_examples/209a9190082498f0b7daa26f8834846b.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - my_field: { - type: 'text', - fielddata: true - } - } - } -}) -console.log(response) + index: "my-index-000001", + properties: { + title: { + type: "text", + norms: false, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/20bc71cc5bbe04184e27827f3777a406.asciidoc b/docs/doc_examples/20bc71cc5bbe04184e27827f3777a406.asciidoc new file mode 100644 index 000000000..d94a2c5aa --- /dev/null +++ b/docs/doc_examples/20bc71cc5bbe04184e27827f3777a406.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + fields: ["@timestamp", "day_of_week"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc b/docs/doc_examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc new file mode 100644 index 000000000..0ffd26450 --- /dev/null +++ b/docs/doc_examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + query_string: "kayaking", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc b/docs/doc_examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc new file mode 100644 index 000000000..970d578eb --- /dev/null +++ b/docs/doc_examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getRecords({ + job_id: "low_request_rate", + sort: "record_score", + desc: true, + start: 1454944100000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20f62d0540bf6261549bd286416eae28.asciidoc b/docs/doc_examples/20f62d0540bf6261549bd286416eae28.asciidoc new file mode 100644 index 000000000..2f0f9c3a6 --- /dev/null +++ b/docs/doc_examples/20f62d0540bf6261549bd286416eae28.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "my-policy", + match: { + indices: "users", + match_field: "email", + enrich_fields: ["first_name", "last_name", "city", "zip", "state"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2105f2d1d81977054a93163a175793ce.asciidoc b/docs/doc_examples/2105f2d1d81977054a93163a175793ce.asciidoc new file mode 100644 index 000000000..8f1c83690 --- /dev/null +++ b/docs/doc_examples/2105f2d1d81977054a93163a175793ce.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.status(); +console.log(response); +---- diff --git a/docs/doc_examples/213ab768f1b6a895e09403a0880e259a.asciidoc b/docs/doc_examples/213ab768f1b6a895e09403a0880e259a.asciidoc deleted file mode 100644 index e0c0f2487..000000000 --- a/docs/doc_examples/213ab768f1b6a895e09403a0880e259a.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - types_count: { - value_count: { - script: { - id: 'my_script', - params: { - field: 'type' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc b/docs/doc_examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc new file mode 100644 index 000000000..516c7c90c --- /dev/null +++ b/docs/doc_examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + text_expansion: { + "": { + model_id: "the model to produce the token weights", + model_text: "the query string", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21565b72da426776e445b1a166f6e104.asciidoc b/docs/doc_examples/21565b72da426776e445b1a166f6e104.asciidoc new file mode 100644 index 000000000..04fba14ee --- /dev/null +++ b/docs/doc_examples/21565b72da426776e445b1a166f6e104.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-join-field": { + type: "join", + relations: { + parent: "child", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc b/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc index 841913ded..3e6ff2d49 100644 --- a/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc +++ b/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.tasks.list({ - detailed: 'true', - actions: '*/delete/byquery' -}) -console.log(response) + detailed: "true", + actions: "*/delete/byquery", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc b/docs/doc_examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc new file mode 100644 index 000000000..2b66cb86d --- /dev/null +++ b/docs/doc_examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.verifyRepository({ + name: "my_unverified_backup", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a8fba09a46b2c3524428aa3259b7124f.asciidoc b/docs/doc_examples/21715c32c140feeab04b38ff6d6de111.asciidoc similarity index 78% rename from docs/doc_examples/a8fba09a46b2c3524428aa3259b7124f.asciidoc rename to docs/doc_examples/21715c32c140feeab04b38ff6d6de111.asciidoc index fdefde9b0..c37e742e2 100644 --- a/docs/doc_examples/a8fba09a46b2c3524428aa3259b7124f.asciidoc +++ b/docs/doc_examples/21715c32c140feeab04b38ff6d6de111.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.getMapping({ - index: 'twitter' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc b/docs/doc_examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc new file mode 100644 index 000000000..c268c702d --- /dev/null +++ b/docs/doc_examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + percents: [95, 99, 99.9], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/218b9009f120e8ad33f710e019179562.asciidoc b/docs/doc_examples/218b9009f120e8ad33f710e019179562.asciidoc new file mode 100644 index 000000000..0ac6636b6 --- /dev/null +++ b/docs/doc_examples/218b9009f120e8ad33f710e019179562.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.getRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/21a226d91d8edd209f6a821064e83918.asciidoc b/docs/doc_examples/21a226d91d8edd209f6a821064e83918.asciidoc new file mode 100644 index 000000000..44d76f5ca --- /dev/null +++ b/docs/doc_examples/21a226d91d8edd209f6a821064e83918.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + match: { + type: "t-shirt", + }, + }, + aggs: { + all_products: { + global: {}, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + t_shirts: { + avg: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21bb03ca9123de3237c1c76934f9f172.asciidoc b/docs/doc_examples/21bb03ca9123de3237c1c76934f9f172.asciidoc new file mode 100644 index 000000000..b791b1658 --- /dev/null +++ b/docs/doc_examples/21bb03ca9123de3237c1c76934f9f172.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "logs", + id: 4, + refresh: "true", + document: { + body: "info: user Bob logged out", + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "logs", + size: 0, + aggs: { + messages: { + filters: { + other_bucket_key: "other_messages", + filters: { + errors: { + match: { + body: "error", + }, + }, + warnings: { + match: { + body: "warning", + }, + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc b/docs/doc_examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc new file mode 100644 index 000000000..eb6345b43 --- /dev/null +++ b/docs/doc_examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.listDanglingIndices(); +console.log(response); +---- diff --git a/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc b/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc new file mode 100644 index 000000000..2cf11a621 --- /dev/null +++ b/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/azure_ai_studio_embeddings", + body: { + service: "azureaistudio", + service_settings: { + api_key: "", + target: "", + provider: "", + endpoint_type: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc b/docs/doc_examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc new file mode 100644 index 000000000..1e800bb8e --- /dev/null +++ b/docs/doc_examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + filter_path: "aggregations", + size: 0, + runtime_mappings: { + "rating.out_of_ten": { + type: "long", + script: { + source: "emit(doc['rating'].value * params.scaleFactor)", + params: { + scaleFactor: 2, + }, + }, + }, + }, + aggs: { + review_average: { + avg: { + field: "rating.out_of_ten", + }, + }, + review_variability: { + median_absolute_deviation: { + field: "rating.out_of_ten", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc b/docs/doc_examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc new file mode 100644 index 000000000..2540558a2 --- /dev/null +++ b/docs/doc_examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "correlate_latency", + size: 0, + filter_path: "aggregations", + aggs: { + buckets: { + terms: { + field: "version", + size: 2, + }, + aggs: { + latency_ranges: { + range: { + field: "latency", + ranges: [ + { + to: 0, + }, + { + from: 0, + to: 105, + }, + { + from: 105, + to: 225, + }, + { + from: 225, + to: 445, + }, + { + from: 445, + to: 665, + }, + { + from: 665, + to: 885, + }, + { + from: 885, + to: 1115, + }, + { + from: 1115, + to: 1335, + }, + { + from: 1335, + to: 1555, + }, + { + from: 1555, + to: 1775, + }, + { + from: 1775, + }, + ], + }, + }, + ks_test: { + bucket_count_ks_test: { + buckets_path: "latency_ranges>_count", + alternative: ["less", "greater", "two_sided"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc b/docs/doc_examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc new file mode 100644 index 000000000..48dedaf2a --- /dev/null +++ b/docs/doc_examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "admins", + roles: ["monitoring", "user"], + rules: { + field: { + groups: "cn=admins,dc=example,dc=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/221e9b14567f950008459af77757750e.asciidoc b/docs/doc_examples/221e9b14567f950008459af77757750e.asciidoc new file mode 100644 index 000000000..9fe26fa18 --- /dev/null +++ b/docs/doc_examples/221e9b14567f950008459af77757750e.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc b/docs/doc_examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc new file mode 100644 index 000000000..7fdf65e5f --- /dev/null +++ b/docs/doc_examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + must_not: [ + { + nested: { + path: "comments", + query: { + term: { + "comments.author": "nik9000", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/222e49c924ca8bac7b41bc952a39261c.asciidoc b/docs/doc_examples/222e49c924ca8bac7b41bc952a39261c.asciidoc new file mode 100644 index 000000000..b4bcd4bbb --- /dev/null +++ b/docs/doc_examples/222e49c924ca8bac7b41bc952a39261c.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 3, + query: { + bool: { + should: [ + { + match: { + title: { + query: "mountain lake", + boost: 1, + }, + }, + }, + { + semantic: { + field: "title_semantic", + query: "mountain lake", + boost: 2, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc b/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc index 246f3de57..36c63a01a 100644 --- a/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc +++ b/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc @@ -4,61 +4,58 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - nested: { - path: 'parent', - query: { - bool: { - must: { - range: { - 'parent.age': { - gte: 21 - } - } + query: { + nested: { + path: "parent", + query: { + bool: { + must: { + range: { + "parent.age": { + gte: 21, + }, }, - filter: { - nested: { - path: 'parent.child', - query: { - match: { - 'parent.child.name': 'matt' - } - } - } - } - } - } - } + }, + filter: { + nested: { + path: "parent.child", + query: { + match: { + "parent.child.name": "matt", + }, + }, + }, + }, + }, + }, }, - sort: [ - { - 'parent.child.age': { - mode: 'min', - order: 'asc', + }, + sort: [ + { + "parent.child.age": { + mode: "min", + order: "asc", + nested: { + path: "parent", + filter: { + range: { + "parent.age": { + gte: 21, + }, + }, + }, nested: { - path: 'parent', + path: "parent.child", filter: { - range: { - 'parent.age': { - gte: 21 - } - } + match: { + "parent.child.name": "matt", + }, }, - nested: { - path: 'parent.child', - filter: { - match: { - 'parent.child.name': 'matt' - } - } - } - } - } - } - ] - } -}) -console.log(response) + }, + }, + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc b/docs/doc_examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc new file mode 100644 index 000000000..4955d26be --- /dev/null +++ b/docs/doc_examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + runtime_mappings: { + "grade.corrected": { + type: "double", + script: { + source: "emit(Math.min(100, doc['grade'].value * params.correction))", + params: { + correction: 1.2, + }, + }, + }, + }, + aggs: { + grades_stats: { + extended_stats: { + field: "grade.corrected", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22619a4111f66e1b7231693b8f8d069a.asciidoc b/docs/doc_examples/22619a4111f66e1b7231693b8f8d069a.asciidoc new file mode 100644 index 000000000..99cef9cd2 --- /dev/null +++ b/docs/doc_examples/22619a4111f66e1b7231693b8f8d069a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.queryWatches({ + size: 100, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc b/docs/doc_examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc new file mode 100644 index 000000000..9ec51573d --- /dev/null +++ b/docs/doc_examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-small": { + type: "keyword", + ignore_above: 2, + }, + "my-large": { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + "my-small": ["ok", "bad"], + "my-large": "ok content", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: ["my-*"], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc b/docs/doc_examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc new file mode 100644 index 000000000..43d8b3d9a --- /dev/null +++ b/docs/doc_examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc @@ -0,0 +1,73 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + location: { + type: "point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Point as an object using GeoJSON format", + location: { + type: "Point", + coordinates: [-71.34, 41.12], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "Point as a WKT POINT primitive", + location: "POINT (-71.34 41.12)", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-000001", + id: 3, + document: { + text: "Point as an object with 'x' and 'y' keys", + location: { + x: -71.34, + y: 41.12, + }, + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "my-index-000001", + id: 4, + document: { + text: "Point as an array", + location: [-71.34, 41.12], + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "my-index-000001", + id: 5, + document: { + text: "Point as a string", + location: "-71.34,41.12", + }, +}); +console.log(response5); +---- diff --git a/docs/doc_examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc b/docs/doc_examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc new file mode 100644 index 000000000..27b5e33c7 --- /dev/null +++ b/docs/doc_examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "POINT (-77.03653 38.897676)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc b/docs/doc_examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc new file mode 100644 index 000000000..064052311 --- /dev/null +++ b/docs/doc_examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + name: { + type: "text", + }, + blob: { + type: "binary", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: "Some binary blob", + blob: "U29tZSBiaW5hcnkgYmxvYg==", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc b/docs/doc_examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc new file mode 100644 index 000000000..852e26c4a --- /dev/null +++ b/docs/doc_examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "azure-openai-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "azure_openai_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc b/docs/doc_examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc new file mode 100644 index 000000000..64ba4366e --- /dev/null +++ b/docs/doc_examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + "lowercase", + { + type: "stop", + stopwords: ["a", "is", "this"], + }, + ], + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/22ef90a7fb057728d2115f0c6f551819.asciidoc b/docs/doc_examples/22ef90a7fb057728d2115f0c6f551819.asciidoc new file mode 100644 index 000000000..bb51df733 --- /dev/null +++ b/docs/doc_examples/22ef90a7fb057728d2115f0c6f551819.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + aggs: { + price_stats: { + stats: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/23074748d6c978176df5b04265e88938.asciidoc b/docs/doc_examples/23074748d6c978176df5b04265e88938.asciidoc new file mode 100644 index 000000000..a7648d1a2 --- /dev/null +++ b/docs/doc_examples/23074748d6c978176df5b04265e88938.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.routing.allocation.include._tier_preference", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2308c9948cbebd2092eec03b11281005.asciidoc b/docs/doc_examples/2308c9948cbebd2092eec03b11281005.asciidoc new file mode 100644 index 000000000..84f81083d --- /dev/null +++ b/docs/doc_examples/2308c9948cbebd2092eec03b11281005.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "E:\\Mount\\Backups\\My_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc b/docs/doc_examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc new file mode 100644 index 000000000..60e82f591 --- /dev/null +++ b/docs/doc_examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + text_expansion: { + "ml.tokens": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc b/docs/doc_examples/231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc deleted file mode 100644 index ab3c92126..000000000 --- a/docs/doc_examples/231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - match_phrase: { - address: 'mill lane' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/2342a56279106ea643026df657bf7f88.asciidoc b/docs/doc_examples/2342a56279106ea643026df657bf7f88.asciidoc new file mode 100644 index 000000000..2369e4df7 --- /dev/null +++ b/docs/doc_examples/2342a56279106ea643026df657bf7f88.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + index: { + similarity: { + my_similarity: { + type: "DFR", + basic_model: "g", + after_effect: "l", + normalization: "h2", + "normalization.h2.c": "3.0", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc b/docs/doc_examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc new file mode 100644 index 000000000..8997d01a6 --- /dev/null +++ b/docs/doc_examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + has_parent: { + parent_type: "parent", + score: true, + query: { + function_score: { + script_score: { + script: "_score * doc['view_count'].value", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/236f50d89a07b83119af72e367e685da.asciidoc b/docs/doc_examples/236f50d89a07b83119af72e367e685da.asciidoc new file mode 100644 index 000000000..b79fd83ff --- /dev/null +++ b/docs/doc_examples/236f50d89a07b83119af72e367e685da.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + max_age: "30d", + min_primary_shard_size: "1gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc b/docs/doc_examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc new file mode 100644 index 000000000..abeca2dfd --- /dev/null +++ b/docs/doc_examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + auth: { + basic: { + username: "elastic", + password: "x-pack-test-password", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc b/docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc new file mode 100644 index 000000000..693b70a4f --- /dev/null +++ b/docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.search.slowlog.threshold.query.warn": "10s", + "index.search.slowlog.threshold.query.info": "5s", + "index.search.slowlog.threshold.query.debug": "2s", + "index.search.slowlog.threshold.query.trace": "500ms", + "index.search.slowlog.threshold.fetch.warn": "1s", + "index.search.slowlog.threshold.fetch.info": "800ms", + "index.search.slowlog.threshold.fetch.debug": "500ms", + "index.search.slowlog.threshold.fetch.trace": "200ms", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/23b062c157235246d7c347b9047b2435.asciidoc b/docs/doc_examples/23b062c157235246d7c347b9047b2435.asciidoc new file mode 100644 index 000000000..93cb410f7 --- /dev/null +++ b/docs/doc_examples/23b062c157235246d7c347b9047b2435.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping1", + roles: ["user"], + enabled: true, + rules: { + field: { + username: "*", + }, + }, + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc b/docs/doc_examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc new file mode 100644 index 000000000..1709625f7 --- /dev/null +++ b/docs/doc_examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + migrate: {}, + allocate: { + number_of_replicas: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2408020186af569a76a30eccadaed0d5.asciidoc b/docs/doc_examples/2408020186af569a76a30eccadaed0d5.asciidoc new file mode 100644 index 000000000..3b2bfd706 --- /dev/null +++ b/docs/doc_examples/2408020186af569a76a30eccadaed0d5.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + script: { + description: "Extract 'tags' from 'env' field", + lang: "painless", + source: + "\n String[] envSplit = ctx['env'].splitOnToken(params['delimiter']);\n ArrayList tags = new ArrayList();\n tags.add(envSplit[params['position']].trim());\n ctx['tags'] = tags;\n ", + params: { + delimiter: "-", + position: 1, + }, + }, + }, + ], + }, + docs: [ + { + _source: { + env: "es01-prod", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/24275847128b68da6e14233aa1259fb9.asciidoc b/docs/doc_examples/24275847128b68da6e14233aa1259fb9.asciidoc new file mode 100644 index 000000000..b556c6936 --- /dev/null +++ b/docs/doc_examples/24275847128b68da6e14233aa1259fb9.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + inner_hits: [ + { + name: "largest_responses", + size: 3, + sort: [ + { + "http.response.bytes": { + order: "desc", + }, + }, + ], + }, + { + name: "most_recent", + size: 3, + sort: [ + { + "@timestamp": { + order: "desc", + }, + }, + ], + }, + ], + }, + sort: ["http.response.bytes"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/242a26ced0e5706e48dcda19a4003094.asciidoc b/docs/doc_examples/242a26ced0e5706e48dcda19a4003094.asciidoc new file mode 100644 index 000000000..4542b8481 --- /dev/null +++ b/docs/doc_examples/242a26ced0e5706e48dcda19a4003094.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + remote: { + host: "http://otherhost:9200/", + username: "user", + password: "pass", + }, + index: "my-index-000001", + query: { + match: { + test: "data", + }, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc b/docs/doc_examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc new file mode 100644 index 000000000..3496eecab --- /dev/null +++ b/docs/doc_examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + my_agg: { + frequent_item_sets: { + minimum_set_size: 3, + fields: [ + { + field: "category.keyword", + }, + { + field: "geoip.city_name", + exclude: "other", + }, + ], + size: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/249bf48252c8cea47ef872541c8a884c.asciidoc b/docs/doc_examples/249bf48252c8cea47ef872541c8a884c.asciidoc new file mode 100644 index 000000000..53430547f --- /dev/null +++ b/docs/doc_examples/249bf48252c8cea47ef872541c8a884c.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.grantApiKey({ + grant_type: "password", + username: "test_admin", + password: "x-pack-test-password", + api_key: { + name: "my-api-key", + expiration: "1d", + role_descriptors: { + "role-a": { + cluster: ["all"], + indices: [ + { + names: ["index-a*"], + privileges: ["read"], + }, + ], + }, + "role-b": { + cluster: ["all"], + indices: [ + { + names: ["index-b*"], + privileges: ["all"], + }, + ], + }, + }, + metadata: { + application: "my-application", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc b/docs/doc_examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc new file mode 100644 index 000000000..a18387f52 --- /dev/null +++ b/docs/doc_examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "student_performance_mathematics_reg", + query: { + term: { + "ml.is_training": { + value: true, + }, + }, + }, + evaluation: { + regression: { + actual_field: "G3", + predicted_field: "ml.G3_prediction", + metrics: { + r_squared: {}, + mse: {}, + msle: {}, + huber: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc b/docs/doc_examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc new file mode 100644 index 000000000..d00bf24fb --- /dev/null +++ b/docs/doc_examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: "template_1,template_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc b/docs/doc_examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc new file mode 100644 index 000000000..2dfb3e83e --- /dev/null +++ b/docs/doc_examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-data-stream", + pipeline: "my-pipeline", +}); +console.log(response); + +const response1 = await client.reindex({ + source: { + index: "my-data-stream", + }, + dest: { + index: "my-new-data-stream", + op_type: "create", + pipeline: "my-pipeline", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc b/docs/doc_examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc new file mode 100644 index 000000000..a20bfd230 --- /dev/null +++ b/docs/doc_examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.profiling.templates.enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/24d806d1803158dacd4dda73c4204d3e.asciidoc b/docs/doc_examples/24d806d1803158dacd4dda73c4204d3e.asciidoc new file mode 100644 index 000000000..741ebc94c --- /dev/null +++ b/docs/doc_examples/24d806d1803158dacd4dda73c4204d3e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules/my-ruleset/_rule/my-rule1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/24f4dfdf9922d5aa79151675b7767742.asciidoc b/docs/doc_examples/24f4dfdf9922d5aa79151675b7767742.asciidoc new file mode 100644 index 000000000..a340d8afe --- /dev/null +++ b/docs/doc_examples/24f4dfdf9922d5aa79151675b7767742.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + scroll: "1m", + size: 100, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/251ea12c1248385ab409906ac64d9ee9.asciidoc b/docs/doc_examples/251ea12c1248385ab409906ac64d9ee9.asciidoc deleted file mode 100644 index 839a8e655..000000000 --- a/docs/doc_examples/251ea12c1248385ab409906ac64d9ee9.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - bool: { - must: { - match_all: {} - }, - filter: { - range: { - balance: { - gte: 20000, - lte: 30000 - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc b/docs/doc_examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc new file mode 100644 index 000000000..651e42711 --- /dev/null +++ b/docs/doc_examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2533e4b36ae837eaecda08407ecb6383.asciidoc b/docs/doc_examples/2533e4b36ae837eaecda08407ecb6383.asciidoc deleted file mode 100644 index 562215833..000000000 --- a/docs/doc_examples/2533e4b36ae837eaecda08407ecb6383.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - suggest: { - 'my-suggest-1': { - text: 'tring out Elasticsearch', - term: { - field: 'message' - } - }, - 'my-suggest-2': { - text: 'kmichy', - term: { - field: 'user' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/25576b6773322f0929d4c635a940dba0.asciidoc b/docs/doc_examples/25576b6773322f0929d4c635a940dba0.asciidoc new file mode 100644 index 000000000..5752db18f --- /dev/null +++ b/docs/doc_examples/25576b6773322f0929d4c635a940dba0.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title", "content"], + query: "this OR that OR thus", + type: "cross_fields", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc b/docs/doc_examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc new file mode 100644 index 000000000..4a308455d --- /dev/null +++ b/docs/doc_examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-text-embeddings-pipeline", + description: "Text embedding pipeline", + processors: [ + { + inference: { + model_id: "sentence-transformers__msmarco-minilm-l-12-v3", + target_field: "my_embeddings", + field_map: { + my_text_field: "text_field", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc b/docs/doc_examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc new file mode 100644 index 000000000..f621c76df --- /dev/null +++ b/docs/doc_examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-000001", + aliases: { + "test-alias": { + is_write_index: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc b/docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc new file mode 100644 index 000000000..9c0aff110 --- /dev/null +++ b/docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "txt", + query: + "\n FROM library\n | SORT page_count DESC\n | KEEP name, author\n | LOOKUP era ON author\n | LIMIT 5\n ", + tables: { + era: { + author: { + keyword: [ + "Frank Herbert", + "Peter F. Hamilton", + "Vernor Vinge", + "Alastair Reynolds", + "James S.A. Corey", + ], + }, + era: { + keyword: ["The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc b/docs/doc_examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc new file mode 100644 index 000000000..2803a9d85 --- /dev/null +++ b/docs/doc_examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + more_like_this: { + fields: ["name.first", "name.last"], + like: [ + { + _index: "marvel", + doc: { + name: { + first: "Ben", + last: "Grimm", + }, + _doc: "You got no idea what I'd... what I'd give to be invisible.", + }, + }, + { + _index: "marvel", + _id: "2", + }, + ], + min_term_freq: 1, + max_query_terms: 12, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25981b7b3d55b87e1484586d57b695b1.asciidoc b/docs/doc_examples/25981b7b3d55b87e1484586d57b695b1.asciidoc new file mode 100644 index 000000000..53895bd8a --- /dev/null +++ b/docs/doc_examples/25981b7b3d55b87e1484586d57b695b1.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "products", + id: 1567, + document: { + product: "r2d2", + details: "A resourceful astromech droid", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25a0dad6547d432f5a3d394528f1c138.asciidoc b/docs/doc_examples/25a0dad6547d432f5a3d394528f1c138.asciidoc new file mode 100644 index 000000000..2c1e36514 --- /dev/null +++ b/docs/doc_examples/25a0dad6547d432f5a3d394528f1c138.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 2, + routing: "user1", + stored_fields: "tags,counter", +}); +console.log(response); +---- diff --git a/docs/doc_examples/25ae1a698f867ba5139605cc952436c0.asciidoc b/docs/doc_examples/25ae1a698f867ba5139605cc952436c0.asciidoc new file mode 100644 index 000000000..69d44a527 --- /dev/null +++ b/docs/doc_examples/25ae1a698f867ba5139605cc952436c0.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + pretty: "true", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + place_type: [ + { + context: "cafe", + }, + { + context: "restaurants", + boost: 2, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc b/docs/doc_examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc new file mode 100644 index 000000000..455f478b2 --- /dev/null +++ b/docs/doc_examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + h: "index,shard,prirep,state,unassigned.reason", +}); +console.log(response); +---- diff --git a/docs/doc_examples/25cb9e1da00dfd971065ce182467434d.asciidoc b/docs/doc_examples/25cb9e1da00dfd971065ce182467434d.asciidoc new file mode 100644 index 000000000..d3c98e64c --- /dev/null +++ b/docs/doc_examples/25cb9e1da00dfd971065ce182467434d.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.deleteVotingConfigExclusions(); +console.log(response); +---- diff --git a/docs/doc_examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc b/docs/doc_examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc new file mode 100644 index 000000000..03cd13b25 --- /dev/null +++ b/docs/doc_examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "_all", + settings: { + settings: { + "index.unassigned.node_left.delayed_timeout": "0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc b/docs/doc_examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc new file mode 100644 index 000000000..ed4157ca2 --- /dev/null +++ b/docs/doc_examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + char_filter: ["my_char_filter"], + }, + }, + char_filter: { + my_char_filter: { + type: "pattern_replace", + pattern: "(\\d+)-(?=\\d)", + replacement: "$1_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "My credit card is 123-456-789", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc b/docs/doc_examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc new file mode 100644 index 000000000..cb23e8e5b --- /dev/null +++ b/docs/doc_examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + size: 0, + aggs: { + review_average: { + avg: { + field: "rating", + }, + }, + review_variability: { + median_absolute_deviation: { + field: "rating", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/261480571394632db40e88fbb6c59c2f.asciidoc b/docs/doc_examples/261480571394632db40e88fbb6c59c2f.asciidoc new file mode 100644 index 000000000..003b37404 --- /dev/null +++ b/docs/doc_examples/261480571394632db40e88fbb6c59c2f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteRoleMapping({ + name: "mapping1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc b/docs/doc_examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc new file mode 100644 index 000000000..c6e55f91d --- /dev/null +++ b/docs/doc_examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + filter_path: "aggregations", + aggs: { + "my-num-field-stats": { + stats: { + field: "my-num-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc b/docs/doc_examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc new file mode 100644 index 000000000..2dab9854b --- /dev/null +++ b/docs/doc_examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_gcs_repository", + repository: { + type: "gcs", + settings: { + bucket: "my_other_bucket", + base_path: "dev", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc b/docs/doc_examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc new file mode 100644 index 000000000..240f9faa0 --- /dev/null +++ b/docs/doc_examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getBuiltinPrivileges(); +console.log(response); +---- diff --git a/docs/doc_examples/262a778d754add491fbc9c721ac25bf0.asciidoc b/docs/doc_examples/262a778d754add491fbc9c721ac25bf0.asciidoc new file mode 100644 index 000000000..225854c1a --- /dev/null +++ b/docs/doc_examples/262a778d754add491fbc9c721ac25bf0.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "whitespace", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/26419320085434680142567d5fda9c35.asciidoc b/docs/doc_examples/26419320085434680142567d5fda9c35.asciidoc new file mode 100644 index 000000000..533c239b2 --- /dev/null +++ b/docs/doc_examples/26419320085434680142567d5fda9c35.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + min_doc_count: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc b/docs/doc_examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc new file mode 100644 index 000000000..b01e6d6b0 --- /dev/null +++ b/docs/doc_examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2646710ece0c4c843aebeacd370d0396.asciidoc b/docs/doc_examples/2646710ece0c4c843aebeacd370d0396.asciidoc new file mode 100644 index 000000000..0c73b9820 --- /dev/null +++ b/docs/doc_examples/2646710ece0c4c843aebeacd370d0396.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-byte-quantized-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + index: true, + index_options: { + type: "int8_hnsw", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/268151ed1f0e12586e66e614b61d7981.asciidoc b/docs/doc_examples/268151ed1f0e12586e66e614b61d7981.asciidoc new file mode 100644 index 000000000..e80c1d982 --- /dev/null +++ b/docs/doc_examples/268151ed1f0e12586e66e614b61d7981.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: ["drn5x1g8cu2y", "30, -80", "20, -90"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc b/docs/doc_examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc new file mode 100644 index 000000000..d6c0ddfef --- /dev/null +++ b/docs/doc_examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + simple_query_string: { + fields: ["body"], + quote_field_suffix: ".exact", + query: '"ski"', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc b/docs/doc_examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc new file mode 100644 index 000000000..79dc2c50d --- /dev/null +++ b/docs/doc_examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "*", + include_global_state: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/26d3ab748a855eb383e992eb1ff79662.asciidoc b/docs/doc_examples/26d3ab748a855eb383e992eb1ff79662.asciidoc new file mode 100644 index 000000000..151eb6f26 --- /dev/null +++ b/docs/doc_examples/26d3ab748a855eb383e992eb1ff79662.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.delete({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc b/docs/doc_examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc new file mode 100644 index 000000000..5982da74d --- /dev/null +++ b/docs/doc_examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["stemmer"], + text: "fox running and jumping", +}); +console.log(response); +---- diff --git a/docs/doc_examples/270549e6b062228312c4e7a54a2c2209.asciidoc b/docs/doc_examples/270549e6b062228312c4e7a54a2c2209.asciidoc new file mode 100644 index 000000000..5aec6e804 --- /dev/null +++ b/docs/doc_examples/270549e6b062228312c4e7a54a2c2209.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.hotThreads(); +console.log(response); +---- diff --git a/docs/doc_examples/2716453454dbf9c6dde2ea6850a62214.asciidoc b/docs/doc_examples/2716453454dbf9c6dde2ea6850a62214.asciidoc new file mode 100644 index 000000000..925eec77b --- /dev/null +++ b/docs/doc_examples/2716453454dbf9c6dde2ea6850a62214.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "trips", + mappings: { + properties: { + distance: { + type: "long", + }, + route_length_miles: { + type: "alias", + path: "distance", + }, + transit_mode: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.search({ + query: { + range: { + route_length_miles: { + gte: 39, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc b/docs/doc_examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc new file mode 100644 index 000000000..449a9a930 --- /dev/null +++ b/docs/doc_examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + keyed: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2720e613d520ce352b62e990c2d283f7.asciidoc b/docs/doc_examples/2720e613d520ce352b62e990c2d283f7.asciidoc new file mode 100644 index 000000000..0f57c9f94 --- /dev/null +++ b/docs/doc_examples/2720e613d520ce352b62e990c2d283f7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.removePolicy({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2731a8577ad734a732d784c5dcb1225d.asciidoc b/docs/doc_examples/2731a8577ad734a732d784c5dcb1225d.asciidoc new file mode 100644 index 000000000..db47f4d61 --- /dev/null +++ b/docs/doc_examples/2731a8577ad734a732d784c5dcb1225d.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "norwegian_example", + settings: { + analysis: { + filter: { + norwegian_stop: { + type: "stop", + stopwords: "_norwegian_", + }, + norwegian_keywords: { + type: "keyword_marker", + keywords: ["eksempel"], + }, + norwegian_stemmer: { + type: "stemmer", + language: "norwegian", + }, + }, + analyzer: { + rebuilt_norwegian: { + tokenizer: "standard", + filter: [ + "lowercase", + "norwegian_stop", + "norwegian_keywords", + "norwegian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/27384266370152add76471dd0332a2f1.asciidoc b/docs/doc_examples/27384266370152add76471dd0332a2f1.asciidoc new file mode 100644 index 000000000..4f87cfbc8 --- /dev/null +++ b/docs/doc_examples/27384266370152add76471dd0332a2f1.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.updateTransform({ + transform_id: "simple-kibana-ecomm-pivot", + source: { + index: "kibana_sample_data_ecommerce", + query: { + term: { + "geoip.continent_name": { + value: "Asia", + }, + }, + }, + }, + description: "Maximum priced ecommerce data by customer_id in Asia", + dest: { + index: "kibana_sample_data_ecommerce_transform_v2", + pipeline: "add_timestamp_pipeline", + }, + frequency: "15m", + sync: { + time: { + field: "order_date", + delay: "120s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2740b69e7246ac6d1ad249382f21d534.asciidoc b/docs/doc_examples/2740b69e7246ac6d1ad249382f21d534.asciidoc new file mode 100644 index 000000000..4c09df81b --- /dev/null +++ b/docs/doc_examples/2740b69e7246ac6d1ad249382f21d534.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "my-agg-metric-field": { + type: "aggregate_metric_double", + metrics: ["min", "max", "sum", "value_count"], + default_metric: "max", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/274feaaa727e0ddf61b3c0f093182839.asciidoc b/docs/doc_examples/274feaaa727e0ddf61b3c0f093182839.asciidoc new file mode 100644 index 000000000..322ec637b --- /dev/null +++ b/docs/doc_examples/274feaaa727e0ddf61b3c0f093182839.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + duration: { + type: "long", + script: { + source: + "\n emit(doc['measures.end'].value - doc['measures.start'].value);\n ", + }, + }, + }, + aggs: { + duration_stats: { + stats: { + field: "duration", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc b/docs/doc_examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc new file mode 100644 index 000000000..f6b5783c4 --- /dev/null +++ b/docs/doc_examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate({ + name: "temp*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/27600d6a78623b69689d4218618e4278.asciidoc b/docs/doc_examples/27600d6a78623b69689d4218618e4278.asciidoc new file mode 100644 index 000000000..7565a7fce --- /dev/null +++ b/docs/doc_examples/27600d6a78623b69689d4218618e4278.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + term: { + my_counter: 18446744073709552000, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/276e5b71ff5c6879a9b819076ad82301.asciidoc b/docs/doc_examples/276e5b71ff5c6879a9b819076ad82301.asciidoc new file mode 100644 index 000000000..40494f10d --- /dev/null +++ b/docs/doc_examples/276e5b71ff5c6879a9b819076ad82301.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_dense_vector: { + type: "dense_vector", + index: false, + dims: 3, + }, + my_byte_dense_vector: { + type: "dense_vector", + index: false, + dims: 3, + element_type: "byte", + }, + status: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_dense_vector: [0.5, 10, 6], + my_byte_dense_vector: [0, 10, 6], + status: "published", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + my_dense_vector: [-0.5, 10, 10], + my_byte_dense_vector: [0, 10, 10], + status: "published", + }, +}); +console.log(response2); + +const response3 = await client.indices.refresh({ + index: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/277fefe2b623af61f8274f73efc97aed.asciidoc b/docs/doc_examples/277fefe2b623af61f8274f73efc97aed.asciidoc new file mode 100644 index 000000000..93632b514 --- /dev/null +++ b/docs/doc_examples/277fefe2b623af61f8274f73efc97aed.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.scriptsPainlessExecute({ + script: { + source: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response)); \n ', + }, + context: "long_field", + context_setup: { + index: "my-index", + document: { + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fbcf5078a6a9e09790553804054c36b3.asciidoc b/docs/doc_examples/278d5bfa1a01f91d5c84679ef1bca390.asciidoc similarity index 75% rename from docs/doc_examples/fbcf5078a6a9e09790553804054c36b3.asciidoc rename to docs/doc_examples/278d5bfa1a01f91d5c84679ef1bca390.asciidoc index 37c29d110..a8cdb26df 100644 --- a/docs/doc_examples/fbcf5078a6a9e09790553804054c36b3.asciidoc +++ b/docs/doc_examples/278d5bfa1a01f91d5c84679ef1bca390.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.get({ - index: 'twitter', - id: '0' -}) -console.log(response) + index: "products", + id: 1567, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc b/docs/doc_examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc new file mode 100644 index 000000000..bce452553 --- /dev/null +++ b/docs/doc_examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "ngram_custom_example", + settings: { + index: { + max_ngram_diff: 2, + }, + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["3_5_grams"], + }, + }, + filter: { + "3_5_grams": { + type: "ngram", + min_gram: 3, + max_gram: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/279e2b29261971999923fdc658bba8ff.asciidoc b/docs/doc_examples/279e2b29261971999923fdc658bba8ff.asciidoc new file mode 100644 index 000000000..830bae7cd --- /dev/null +++ b/docs/doc_examples/279e2b29261971999923fdc658bba8ff.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: { + includes: ["obj1.*", "obj2.*"], + excludes: ["*.description"], + }, + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc b/docs/doc_examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc new file mode 100644 index 000000000..74236de4d --- /dev/null +++ b/docs/doc_examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "delimited_payload_example", + settings: { + analysis: { + analyzer: { + whitespace_plus_delimited: { + tokenizer: "whitespace", + filter: ["plus_delimited"], + }, + }, + filter: { + plus_delimited: { + type: "delimited_payload", + delimiter: "+", + encoding: "int", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc b/docs/doc_examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc new file mode 100644 index 000000000..722ee1661 --- /dev/null +++ b/docs/doc_examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + bool: { + type: "boolean", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + bool: [true, false, true, false], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc b/docs/doc_examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc new file mode 100644 index 000000000..749b74ca4 --- /dev/null +++ b/docs/doc_examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-restricted-api-key", + expiration: "7d", + role_descriptors: { + "my-restricted-role-descriptor": { + indices: [ + { + names: ["website-product-search"], + privileges: ["read"], + }, + ], + restriction: { + workflows: ["search_application_query"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/28415647fced5f983b42f8435332a625.asciidoc b/docs/doc_examples/28415647fced5f983b42f8435332a625.asciidoc new file mode 100644 index 000000000..bc11f9997 --- /dev/null +++ b/docs/doc_examples/28415647fced5f983b42f8435332a625.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + lowercase: { + field: "my-keyword-field", + }, + }, + ], + }, + docs: [ + { + _source: { + "my-keyword-field": "FOO", + }, + }, + { + _source: { + "my-keyword-field": "BAR", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/28543836b62b5622a402e6f7731d68f0.asciidoc b/docs/doc_examples/28543836b62b5622a402e6f7731d68f0.asciidoc new file mode 100644 index 000000000..c8cd51511 --- /dev/null +++ b/docs/doc_examples/28543836b62b5622a402e6f7731d68f0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.downsample({ + index: ".ds-my-data-stream-2023.07.26-000001", + target_index: ".ds-my-data-stream-2023.07.26-000001-downsample", + config: { + fixed_interval: "1h", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/609260ad1d5998be2ca09ff1fe237efa.asciidoc b/docs/doc_examples/2856a5ceff1861aa9a78099f1c517fe7.asciidoc similarity index 80% rename from docs/doc_examples/609260ad1d5998be2ca09ff1fe237efa.asciidoc rename to docs/doc_examples/2856a5ceff1861aa9a78099f1c517fe7.asciidoc index f3aa65766..85471c3a9 100644 --- a/docs/doc_examples/609260ad1d5998be2ca09ff1fe237efa.asciidoc +++ b/docs/doc_examples/2856a5ceff1861aa9a78099f1c517fe7.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.getMapping({ - index: 'my-index' -}) -console.log(response) + index: ".watches", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2864a24608b3ac59d21f604f8a31d131.asciidoc b/docs/doc_examples/2864a24608b3ac59d21f604f8a31d131.asciidoc new file mode 100644 index 000000000..7e2ac94ce --- /dev/null +++ b/docs/doc_examples/2864a24608b3ac59d21f604f8a31d131.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "jwt_role1", + refresh: "true", + cluster: ["manage"], + indices: [ + { + names: ["*"], + privileges: ["read"], + }, + ], + run_as: ["user123_runas"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc b/docs/doc_examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc new file mode 100644 index 000000000..4463693d5 --- /dev/null +++ b/docs/doc_examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "", + policy: { + roles: [], + deciders: { + fixed: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2879d7bf4167194b102bf97117327164.asciidoc b/docs/doc_examples/2879d7bf4167194b102bf97117327164.asciidoc new file mode 100644 index 000000000..c26a84b6d --- /dev/null +++ b/docs/doc_examples/2879d7bf4167194b102bf97117327164.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + char_filter: ["html_strip"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc b/docs/doc_examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc new file mode 100644 index 000000000..a9a3ee25f --- /dev/null +++ b/docs/doc_examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-image-index", + size: 3, + query: { + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc b/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc index 755d0b234..ccbd72aca 100644 --- a/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc +++ b/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.search({ - index: 'index_long,index_double', - body: { - sort: [ - { - field: { - numeric_type: 'double' - } - } - ] - } -}) -console.log(response) + index: "index_long,index_double", + sort: [ + { + field: { + numeric_type: "double", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc b/docs/doc_examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc new file mode 100644 index 000000000..d1ba94ce5 --- /dev/null +++ b/docs/doc_examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "FklQYndoTDJ2VEFlMEVBTzFJMGhJVFEaLVlKYndBWWZSMUdicUc4WVlEaFl4ZzoxNTU=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc b/docs/doc_examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc new file mode 100644 index 000000000..fdf8b3f45 --- /dev/null +++ b/docs/doc_examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + inner_hits: { + name: "most_recent", + size: 5, + sort: [ + { + "@timestamp": "desc", + }, + ], + }, + max_concurrent_group_searches: 4, + }, + sort: [ + { + "http.response.bytes": { + order: "desc", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/28aad2c5942bfb221c2bf1bbdc01658e.asciidoc b/docs/doc_examples/28aad2c5942bfb221c2bf1bbdc01658e.asciidoc deleted file mode 100644 index 5068af57e..000000000 --- a/docs/doc_examples/28aad2c5942bfb221c2bf1bbdc01658e.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'city.*' - ], - query: 'this AND that OR thus' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/28ac880057135e46b3b00c7f3976538c.asciidoc b/docs/doc_examples/28ac880057135e46b3b00c7f3976538c.asciidoc new file mode 100644 index 000000000..326307582 --- /dev/null +++ b/docs/doc_examples/28ac880057135e46b3b00c7f3976538c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + "index.routing.allocation.include._ip": "192.168.2.*", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/291110f4cac02f4610d0853f5800a70d.asciidoc b/docs/doc_examples/291110f4cac02f4610d0853f5800a70d.asciidoc new file mode 100644 index 000000000..bf8fdb708 --- /dev/null +++ b/docs/doc_examples/291110f4cac02f4610d0853f5800a70d.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + field: "grade", + missing: 2, + }, + weight: { + field: "weight", + missing: 3, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb01106bf524df5e0501d4c655c1aa7b.asciidoc b/docs/doc_examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc similarity index 53% rename from docs/doc_examples/cb01106bf524df5e0501d4c655c1aa7b.asciidoc rename to docs/doc_examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc index ed57b73c3..e8724553d 100644 --- a/docs/doc_examples/cb01106bf524df5e0501d4c655c1aa7b.asciidoc +++ b/docs/doc_examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.reindex({ - slices: '5', - refresh: true, - body: { - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) + slices: 5, + refresh: "true", + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/295b3aaeb223612afdd991744dc9c873.asciidoc b/docs/doc_examples/295b3aaeb223612afdd991744dc9c873.asciidoc new file mode 100644 index 000000000..efdcdd845 --- /dev/null +++ b/docs/doc_examples/295b3aaeb223612afdd991744dc9c873.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my_test_scores_pipeline", + description: "Calculates the total test score", + processors: [ + { + script: { + source: "ctx.total_score = (ctx.math_score + ctx.verbal_score)", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc b/docs/doc_examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc new file mode 100644 index 000000000..5a876f211 --- /dev/null +++ b/docs/doc_examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer", "remove_duplicates"], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/29824032d7d64512d17458fdd687b1f6.asciidoc b/docs/doc_examples/29824032d7d64512d17458fdd687b1f6.asciidoc new file mode 100644 index 000000000..2baa04ac3 --- /dev/null +++ b/docs/doc_examples/29824032d7d64512d17458fdd687b1f6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + parent_task_id: "oTUltX4IQMOUUVeiohTt8A:123", +}); +console.log(response); +---- diff --git a/docs/doc_examples/29953082744b7a36e437b392a6391c81.asciidoc b/docs/doc_examples/29953082744b7a36e437b392a6391c81.asciidoc new file mode 100644 index 000000000..42b500db9 --- /dev/null +++ b/docs/doc_examples/29953082744b7a36e437b392a6391c81.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + from: 20, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99a52be903945b17e734a1d02a57e958.asciidoc b/docs/doc_examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc similarity index 73% rename from docs/doc_examples/99a52be903945b17e734a1d02a57e958.asciidoc rename to docs/doc_examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc index 0eae9d3c7..2ace94737 100644 --- a/docs/doc_examples/99a52be903945b17e734a1d02a57e958.asciidoc +++ b/docs/doc_examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.indices.getFieldMapping({ - index: 'my-index', - fields: 'employee-id' -}) -console.log(response) + index: "publications", + fields: "title", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/29d9df958de292cec50daaf31844b573.asciidoc b/docs/doc_examples/29d9df958de292cec50daaf31844b573.asciidoc new file mode 100644 index 000000000..d0e279e5b --- /dev/null +++ b/docs/doc_examples/29d9df958de292cec50daaf31844b573.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "my-index-000001,my-index-000002", + fields: "message", +}); +console.log(response); +---- diff --git a/docs/doc_examples/29e002ab596bae58712eb048ac1768d1.asciidoc b/docs/doc_examples/29e002ab596bae58712eb048ac1768d1.asciidoc new file mode 100644 index 000000000..351d18c99 --- /dev/null +++ b/docs/doc_examples/29e002ab596bae58712eb048ac1768d1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + routing: "xyz", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "You know for search!", + "user.id": "xyz", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc new file mode 100644 index 000000000..a0a9b3805 --- /dev/null +++ b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcLogout({}); +console.log(response); +---- diff --git a/docs/doc_examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc b/docs/doc_examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc new file mode 100644 index 000000000..c4260ab6c --- /dev/null +++ b/docs/doc_examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + routing: "kimchy", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a287d213a812b98d8353c563a058cfc.asciidoc b/docs/doc_examples/2a287d213a812b98d8353c563a058cfc.asciidoc new file mode 100644 index 000000000..cf586f9f0 --- /dev/null +++ b/docs/doc_examples/2a287d213a812b98d8353c563a058cfc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc b/docs/doc_examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc new file mode 100644 index 000000000..a7fd770cf --- /dev/null +++ b/docs/doc_examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsync({ + id: "FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=", + wait_for_completion_timeout: "2s", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc b/docs/doc_examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc new file mode 100644 index 000000000..186d8540d --- /dev/null +++ b/docs/doc_examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.start(); +console.log(response); +---- diff --git a/docs/doc_examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc b/docs/doc_examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc new file mode 100644 index 000000000..c51f168fc --- /dev/null +++ b/docs/doc_examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + id: "index_compare", + source: { + index: ["index1", "index2"], + query: { + match_all: {}, + }, + }, + dest: { + index: "compare", + }, + pivot: { + group_by: { + "unique-id": { + terms: { + field: "", + }, + }, + }, + aggregations: { + compare: { + scripted_metric: { + map_script: "state.doc = new HashMap(params['_source'])", + combine_script: "return state", + reduce_script: + ' \n if (states.size() != 2) {\n return "count_mismatch"\n }\n if (states.get(0).equals(states.get(1))) {\n return "match"\n } else {\n return "mismatch"\n }\n ', + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a70194ebd2f01a3229a5092513676b3.asciidoc b/docs/doc_examples/2a70194ebd2f01a3229a5092513676b3.asciidoc new file mode 100644 index 000000000..27d8630e1 --- /dev/null +++ b/docs/doc_examples/2a70194ebd2f01a3229a5092513676b3.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + char_filter: ["my_custom_html_strip_char_filter"], + }, + }, + char_filter: { + my_custom_html_strip_char_filter: { + type: "html_strip", + escaped_tags: ["b"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a71e2d7f7179dd76183d30789046808.asciidoc b/docs/doc_examples/2a71e2d7f7179dd76183d30789046808.asciidoc new file mode 100644 index 000000000..d83750efc --- /dev/null +++ b/docs/doc_examples/2a71e2d7f7179dd76183d30789046808.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: 3, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc b/docs/doc_examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc new file mode 100644 index 000000000..4a364d79c --- /dev/null +++ b/docs/doc_examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + comments: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + title: "Test title", + comments: [ + { + author: "kimchy", + number: 1, + }, + { + author: "nik9000", + number: 2, + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + query: { + nested: { + path: "comments", + query: { + match: { + "comments.number": 2, + }, + }, + inner_hits: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc b/docs/doc_examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc new file mode 100644 index 000000000..bf26b94bf --- /dev/null +++ b/docs/doc_examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + query: "(new york city) OR (big apple)", + default_field: "content", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a9d3119a9e26e29220be436b9382955.asciidoc b/docs/doc_examples/2a9d3119a9e26e29220be436b9382955.asciidoc new file mode 100644 index 000000000..e32079ad0 --- /dev/null +++ b/docs/doc_examples/2a9d3119a9e26e29220be436b9382955.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mistral-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc b/docs/doc_examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc new file mode 100644 index 000000000..649f56d45 --- /dev/null +++ b/docs/doc_examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deleteWatch({ + id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc b/docs/doc_examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc new file mode 100644 index 000000000..3c45e67f9 --- /dev/null +++ b/docs/doc_examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + user_name: { + terms: { + field: "user_name", + }, + }, + }, + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc b/docs/doc_examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc new file mode 100644 index 000000000..f3d8511aa --- /dev/null +++ b/docs/doc_examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + tiebreaker_field: "event.sequence", + query: + '\n process where process.name == "cmd.exe" and stringContains(process.executable, "System32")\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc b/docs/doc_examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc new file mode 100644 index 000000000..22b4c7525 --- /dev/null +++ b/docs/doc_examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: "1 / (1 + l1norm(params.queryVector, 'my_dense_vector'))", + params: { + queryVector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f3b3e207f79303ce6f86e03e928e062.asciidoc b/docs/doc_examples/2ad35a13262f98574a48f88b4a838512.asciidoc similarity index 74% rename from docs/doc_examples/3f3b3e207f79303ce6f86e03e928e062.asciidoc rename to docs/doc_examples/2ad35a13262f98574a48f88b4a838512.asciidoc index 828854a64..b774c6457 100644 --- a/docs/doc_examples/3f3b3e207f79303ce6f86e03e928e062.asciidoc +++ b/docs/doc_examples/2ad35a13262f98574a48f88b4a838512.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.get({ - index: 'customer', - id: '1' -}) -console.log(response) + index: "current_year", + id: 1, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc b/docs/doc_examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc new file mode 100644 index 000000000..02eb444d9 --- /dev/null +++ b/docs/doc_examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc b/docs/doc_examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc new file mode 100644 index 000000000..a2acce444 --- /dev/null +++ b/docs/doc_examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + matrixstats: { + matrix_stats: { + fields: ["poverty", "income"], + missing: { + income: 50000, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2afc1231679898bd864d06679d9e951b.asciidoc b/docs/doc_examples/2afc1231679898bd864d06679d9e951b.asciidoc new file mode 100644 index 000000000..61ad3f6d7 --- /dev/null +++ b/docs/doc_examples/2afc1231679898bd864d06679d9e951b.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + histo: { + date_histogram: { + field: "date", + calendar_interval: "day", + }, + aggs: { + categories: { + terms: { + field: "category", + }, + }, + min_bucket_selector: { + bucket_selector: { + buckets_path: { + count: "categories._bucket_count", + }, + script: { + source: "params.count != 0", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc new file mode 100644 index 000000000..32a8ae35c --- /dev/null +++ b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryGet({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + wait_for_completion_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc b/docs/doc_examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc new file mode 100644 index 000000000..3e9c8d773 --- /dev/null +++ b/docs/doc_examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + job_type: "full,incremental", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b47be4b712147a429102aef386470ee.asciidoc b/docs/doc_examples/2b47be4b712147a429102aef386470ee.asciidoc new file mode 100644 index 000000000..87f348ef1 --- /dev/null +++ b/docs/doc_examples/2b47be4b712147a429102aef386470ee.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid\n [process where process.name == "regsvr32.exe"]\n [library where dll.name == "scrobj.dll"]\n [network where true]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc b/docs/doc_examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc new file mode 100644 index 000000000..e5501b1a8 --- /dev/null +++ b/docs/doc_examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc b/docs/doc_examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc new file mode 100644 index 000000000..94216bc5f --- /dev/null +++ b/docs/doc_examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_id: { + type: "keyword", + }, + my_join_field: { + type: "join", + relations: { + question: "answer", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc b/docs/doc_examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc new file mode 100644 index 000000000..bb81adede --- /dev/null +++ b/docs/doc_examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + size: 0, + query: { + bool: { + filter: { + term: { + invalidated: true, + }, + }, + }, + }, + aggs: { + invalidated_keys: { + composite: { + sources: [ + { + username: { + terms: { + field: "username", + }, + }, + }, + { + key_name: { + terms: { + field: "name", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b7687e3d7c06824950e00618c297864.asciidoc b/docs/doc_examples/2b7687e3d7c06824950e00618c297864.asciidoc new file mode 100644 index 000000000..10757782b --- /dev/null +++ b/docs/doc_examples/2b7687e3d7c06824950e00618c297864.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster({ + name: "my-index*,clust*:my-index*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc b/docs/doc_examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc new file mode 100644 index 000000000..792e02cbe --- /dev/null +++ b/docs/doc_examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc @@ -0,0 +1,66 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "emails", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + accounts: ["hillary", "sidney"], + }, + { + index: { + _id: 2, + }, + }, + { + accounts: ["hillary", "donald"], + }, + { + index: { + _id: 3, + }, + }, + { + accounts: ["vladimir", "donald"], + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "emails", + size: 0, + aggs: { + interactions: { + adjacency_matrix: { + filters: { + grpA: { + terms: { + accounts: ["hillary", "sidney"], + }, + }, + grpB: { + terms: { + accounts: ["donald", "mitt"], + }, + }, + grpC: { + terms: { + accounts: ["vladimir", "nigel"], + }, + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc b/docs/doc_examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc new file mode 100644 index 000000000..ae2742968 --- /dev/null +++ b/docs/doc_examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + sort: [ + { + post_date: { + order: "asc", + format: "strict_date_optional_time_nanos", + }, + }, + "user", + { + name: "desc", + }, + { + age: "desc", + }, + "_score", + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2bb2339ac055337abf753bddb7771659.asciidoc b/docs/doc_examples/2bb2339ac055337abf753bddb7771659.asciidoc deleted file mode 100644 index db8dd504e..000000000 --- a/docs/doc_examples/2bb2339ac055337abf753bddb7771659.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - sales_over_time: { - date_histogram: { - field: 'date', - fixed_interval: '2w' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc b/docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc new file mode 100644 index 000000000..5317e039e --- /dev/null +++ b/docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.indices.close.enable": false, + "indices.recovery.max_bytes_per_sec": "50mb", + }, + transient: { + "*": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc b/docs/doc_examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc new file mode 100644 index 000000000..18da0ad1c --- /dev/null +++ b/docs/doc_examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time", + execution_hint: "high_accuracy", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc b/docs/doc_examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc new file mode 100644 index 000000000..96f64fa4d --- /dev/null +++ b/docs/doc_examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [1, 5, -20], + k: 5, + num_candidates: 50, + similarity: 36, + filter: { + term: { + "file-type": "png", + }, + }, + }, + fields: ["title"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc b/docs/doc_examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc new file mode 100644 index 000000000..edd0fc3cd --- /dev/null +++ b/docs/doc_examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "fielddata", + human: "true", + fields: "my_join_field", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + human: "true", + fields: "my_join_field", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc b/docs/doc_examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc new file mode 100644 index 000000000..02df5c874 --- /dev/null +++ b/docs/doc_examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.health({ + v: "true", + ts: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc b/docs/doc_examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc new file mode 100644 index 000000000..be41e40e4 --- /dev/null +++ b/docs/doc_examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", + synonyms: "hello, hi, howdy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc b/docs/doc_examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc new file mode 100644 index 000000000..a3ecc0a0d --- /dev/null +++ b/docs/doc_examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "synonym_graph", + synonyms: ["dns, domain name system"], + }, + ], + text: "domain name system is fragile", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc b/docs/doc_examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc new file mode 100644 index 000000000..914a06153 --- /dev/null +++ b/docs/doc_examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ds-my-data-stream-2023.07.26-000001", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc b/docs/doc_examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc new file mode 100644 index 000000000..b4a13111d --- /dev/null +++ b/docs/doc_examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, + }, + max_docs: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c44657adf550b8ade5cf5334106d38b.asciidoc b/docs/doc_examples/2c44657adf550b8ade5cf5334106d38b.asciidoc new file mode 100644 index 000000000..60ba299c3 --- /dev/null +++ b/docs/doc_examples/2c44657adf550b8ade5cf5334106d38b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ", + }, + }, + query: { + match: { + "http.clientip": "40.135.0.0", + }, + }, + fields: ["http.clientip"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc b/docs/doc_examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc new file mode 100644 index 000000000..8289c7259 --- /dev/null +++ b/docs/doc_examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc @@ -0,0 +1,78 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info(); +console.log(response); + +const response1 = await client.nodes.info({ + node_id: "_all", +}); +console.log(response1); + +const response2 = await client.nodes.info({ + node_id: "_local", +}); +console.log(response2); + +const response3 = await client.nodes.info({ + node_id: "_master", +}); +console.log(response3); + +const response4 = await client.nodes.info({ + node_id: "node_name_goes_here", +}); +console.log(response4); + +const response5 = await client.nodes.info({ + node_id: "node_name_goes_*", +}); +console.log(response5); + +const response6 = await client.nodes.info({ + node_id: "10.0.0.3,10.0.0.4", +}); +console.log(response6); + +const response7 = await client.nodes.info({ + node_id: "10.0.0.*", +}); +console.log(response7); + +const response8 = await client.nodes.info({ + node_id: "_all,master:false", +}); +console.log(response8); + +const response9 = await client.nodes.info({ + node_id: "data:true,ingest:true", +}); +console.log(response9); + +const response10 = await client.nodes.info({ + node_id: "coordinating_only:true", +}); +console.log(response10); + +const response11 = await client.nodes.info({ + node_id: "master:true,voting_only:false", +}); +console.log(response11); + +const response12 = await client.nodes.info({ + node_id: "rack:2", +}); +console.log(response12); + +const response13 = await client.nodes.info({ + node_id: "ra*:2", +}); +console.log(response13); + +const response14 = await client.nodes.info({ + node_id: "ra*:2*", +}); +console.log(response14); +---- diff --git a/docs/doc_examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc b/docs/doc_examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc new file mode 100644 index 000000000..fb87919cd --- /dev/null +++ b/docs/doc_examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.reroute({ + metric: "none", + commands: [ + { + move: { + index: "test", + shard: 0, + from_node: "node1", + to_node: "node2", + }, + }, + { + allocate_replica: { + index: "test", + shard: 1, + node: "node3", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e17e8852ec3f31781e1364f4dffeb6d0.asciidoc b/docs/doc_examples/2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc similarity index 54% rename from docs/doc_examples/e17e8852ec3f31781e1364f4dffeb6d0.asciidoc rename to docs/doc_examples/2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc index 6d97c3961..dd5e94075 100644 --- a/docs/doc_examples/e17e8852ec3f31781e1364f4dffeb6d0.asciidoc +++ b/docs/doc_examples/2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc @@ -3,15 +3,10 @@ [source, js] ---- -const response = await client.search({ - body: { - query: { - query_string: { - query: '(content:this OR name:this) AND (content:that OR name:that)' - } - } - } -}) -console.log(response) +const response = await client.cat.indices({ + v: "true", + health: "red", + h: "index,status,health", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc b/docs/doc_examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc new file mode 100644 index 000000000..865aa0e5d --- /dev/null +++ b/docs/doc_examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + timeout: "5m", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc b/docs/doc_examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc new file mode 100644 index 000000000..4deb98ab1 --- /dev/null +++ b/docs/doc_examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_phrase: { + message: { + query: "this is a test", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc b/docs/doc_examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc new file mode 100644 index 000000000..4a0b8c958 --- /dev/null +++ b/docs/doc_examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: { + _index: "test1", + }, + }, + { + s: 1, + m: 3.1415, + }, + { + index: { + _index: "test1", + }, + }, + { + s: 2, + m: 1, + }, + { + index: { + _index: "test2", + }, + }, + { + s: 3.1, + m: 2.71828, + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "test*", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "asc", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc b/docs/doc_examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc new file mode 100644 index 000000000..3c73b9c1a --- /dev/null +++ b/docs/doc_examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc @@ -0,0 +1,85 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": 1516729294000, + model_number: "QVKC92Q", + measures: { + voltage: "5.2", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516642894000, + model_number: "QVKC92Q", + measures: { + voltage: "5.8", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516556494000, + model_number: "QVKC92Q", + measures: { + voltage: "5.1", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516470094000, + model_number: "QVKC92Q", + measures: { + voltage: "5.6", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516383694000, + model_number: "HG537PU", + measures: { + voltage: "4.2", + start: "400", + end: "8625309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516297294000, + model_number: "HG537PU", + measures: { + voltage: "4.0", + start: "400", + end: "8625309", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc b/docs/doc_examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc new file mode 100644 index 000000000..6beaa8b94 --- /dev/null +++ b/docs/doc_examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + query: { + query_string: { + query: "@timestamp:foo", + lenient: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc b/docs/doc_examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc new file mode 100644 index 000000000..fbb1460ce --- /dev/null +++ b/docs/doc_examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.transforms({ + v: "true", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc b/docs/doc_examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc new file mode 100644 index 000000000..df4dd09ce --- /dev/null +++ b/docs/doc_examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.health(); +console.log(response); + +const response1 = await client.cat.recovery(); +console.log(response1); +---- diff --git a/docs/doc_examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc b/docs/doc_examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc new file mode 100644 index 000000000..48f3e7445 --- /dev/null +++ b/docs/doc_examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "dsl-data-stream", + data_retention: "7d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc b/docs/doc_examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc new file mode 100644 index 000000000..fdb1ef5bf --- /dev/null +++ b/docs/doc_examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + match: { + query: "hot porridge", + filter: { + script: { + source: + "interval.start > 10 && interval.end < 20 && interval.gaps == 0", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e364833626c9790c042c8f006fcc999.asciidoc b/docs/doc_examples/2e364833626c9790c042c8f006fcc999.asciidoc new file mode 100644 index 000000000..b791428b4 --- /dev/null +++ b/docs/doc_examples/2e364833626c9790c042c8f006fcc999.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "multiplexer_example", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["my_multiplexer"], + }, + }, + filter: { + my_multiplexer: { + type: "multiplexer", + filters: ["lowercase", "lowercase, porter_stem"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e36fe22051a47e052e349854d9948b9.asciidoc b/docs/doc_examples/2e36fe22051a47e052e349854d9948b9.asciidoc new file mode 100644 index 000000000..e29c93a48 --- /dev/null +++ b/docs/doc_examples/2e36fe22051a47e052e349854d9948b9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.explain({ + index: "my-index-000001", + id: 0, + q: "message:search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc b/docs/doc_examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc new file mode 100644 index 000000000..b4e87a4e8 --- /dev/null +++ b/docs/doc_examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc @@ -0,0 +1,154 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + template: { + settings: { + index: { + mode: "time_series", + routing_path: [ + "kubernetes.namespace", + "kubernetes.host", + "kubernetes.node", + "kubernetes.pod", + ], + number_of_replicas: 0, + number_of_shards: 2, + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + kubernetes: { + properties: { + container: { + properties: { + cpu: { + properties: { + usage: { + properties: { + core: { + properties: { + ns: { + type: "long", + }, + }, + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + nanocores: { + type: "long", + time_series_metric: "gauge", + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + }, + }, + memory: { + properties: { + available: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + majorpagefaults: { + type: "long", + }, + pagefaults: { + type: "long", + time_series_metric: "gauge", + }, + rss: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + usage: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + workingset: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + }, + }, + name: { + type: "keyword", + }, + start_time: { + type: "date", + }, + }, + }, + host: { + type: "keyword", + time_series_dimension: true, + }, + namespace: { + type: "keyword", + time_series_dimension: true, + }, + node: { + type: "keyword", + time_series_dimension: true, + }, + pod: { + type: "keyword", + time_series_dimension: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc b/docs/doc_examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc new file mode 100644 index 000000000..ab5a2cc20 --- /dev/null +++ b/docs/doc_examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.split({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_shards": 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e7f4b9be999422a12abb680572b13c8.asciidoc b/docs/doc_examples/2e7f4b9be999422a12abb680572b13c8.asciidoc new file mode 100644 index 000000000..c7cafa2e2 --- /dev/null +++ b/docs/doc_examples/2e7f4b9be999422a12abb680572b13c8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.getLifecycle({ + name: "my_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc b/docs/doc_examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc new file mode 100644 index 000000000..4109e8fef --- /dev/null +++ b/docs/doc_examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: + "field('my_counter').asBigInteger(BigInteger.ZERO).floatValue()", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc b/docs/doc_examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc new file mode 100644 index 000000000..b75698f7e --- /dev/null +++ b/docs/doc_examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + mappings: { + properties: { + message: { + type: "text", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc b/docs/doc_examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc new file mode 100644 index 000000000..6689c3417 --- /dev/null +++ b/docs/doc_examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "linestring", + coordinates: [ + [-377.03653, 389.897676], + [-377.009051, 389.889939], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc b/docs/doc_examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc new file mode 100644 index 000000000..a11d42c1f --- /dev/null +++ b/docs/doc_examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "pattern_replace", + pattern: "(dog)", + replacement: "watch$1", + }, + ], + text: "foxes jump lazy dogs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc b/docs/doc_examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc new file mode 100644 index 000000000..26c954c49 --- /dev/null +++ b/docs/doc_examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "my-index-2099.05.06-000001", + alias: "my-alias", + routing: "1", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc b/docs/doc_examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc new file mode 100644 index 000000000..1ac2d8ecb --- /dev/null +++ b/docs/doc_examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_index_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["my_custom_word_delimiter_graph_filter", "flatten_graph"], + }, + }, + filter: { + my_custom_word_delimiter_graph_filter: { + type: "word_delimiter_graph", + catenate_all: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc b/docs/doc_examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc new file mode 100644 index 000000000..380d5aa17 --- /dev/null +++ b/docs/doc_examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc @@ -0,0 +1,68 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:22-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:28-05:00", + message: "not a valid apache log", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc b/docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc new file mode 100644 index 000000000..d2f122662 --- /dev/null +++ b/docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.indexing.slowlog.include.user": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f0b2181c434a879a23b4643bdd92575.asciidoc b/docs/doc_examples/2f0b2181c434a879a23b4643bdd92575.asciidoc new file mode 100644 index 000000000..a3d808bf4 --- /dev/null +++ b/docs/doc_examples/2f0b2181c434a879a23b4643bdd92575.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); + +const response1 = await client.indices.getSettings({ + index: "_all", +}); +console.log(response1); + +const response2 = await client.indices.getSettings({ + index: "log_2099_*", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc b/docs/doc_examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc new file mode 100644 index 000000000..dbc53e814 --- /dev/null +++ b/docs/doc_examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + fingerprint: { + fields: ["user"], + }, + }, + ], + }, + docs: [ + { + _source: { + user: { + last_name: "Smith", + first_name: "John", + date_of_birth: "1980-01-15", + is_active: true, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc b/docs/doc_examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc new file mode 100644 index 000000000..3fa52a230 --- /dev/null +++ b/docs/doc_examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.deleteAutoFollowPattern({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf02e3d8b371bd59f0224967c36330da.asciidoc b/docs/doc_examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc similarity index 78% rename from docs/doc_examples/cf02e3d8b371bd59f0224967c36330da.asciidoc rename to docs/doc_examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc index 7dcfc67aa..c37e742e2 100644 --- a/docs/doc_examples/cf02e3d8b371bd59f0224967c36330da.asciidoc +++ b/docs/doc_examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.getMapping({ - index: 'twitter,kimchy' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc b/docs/doc_examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc new file mode 100644 index 000000000..f5fb24488 --- /dev/null +++ b/docs/doc_examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bug_reports", + query: { + term: { + "labels.release": "v1.3.0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f4e28c81db47547ad39d0926babab12.asciidoc b/docs/doc_examples/2f4e28c81db47547ad39d0926babab12.asciidoc new file mode 100644 index 000000000..6313ca3ea --- /dev/null +++ b/docs/doc_examples/2f4e28c81db47547ad39d0926babab12.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "estonian_example", + settings: { + analysis: { + filter: { + estonian_stop: { + type: "stop", + stopwords: "_estonian_", + }, + estonian_keywords: { + type: "keyword_marker", + keywords: ["näide"], + }, + estonian_stemmer: { + type: "stemmer", + language: "estonian", + }, + }, + analyzer: { + rebuilt_estonian: { + tokenizer: "standard", + filter: [ + "lowercase", + "estonian_stop", + "estonian_keywords", + "estonian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc b/docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc new file mode 100644 index 000000000..839a4d359 --- /dev/null +++ b/docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.merge.policy.max_merge_at_once_explicit": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc b/docs/doc_examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc new file mode 100644 index 000000000..e2201ac00 --- /dev/null +++ b/docs/doc_examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + status_code: { + type: "keyword", + }, + session_id: { + type: "keyword", + doc_values: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe5763d32955e8b65eb3048e97b1580c.asciidoc b/docs/doc_examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc similarity index 73% rename from docs/doc_examples/fe5763d32955e8b65eb3048e97b1580c.asciidoc rename to docs/doc_examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc index aee2245fc..79d2f713b 100644 --- a/docs/doc_examples/fe5763d32955e8b65eb3048e97b1580c.asciidoc +++ b/docs/doc_examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.indices.forcemerge({ - index: 'twitter', - max_num_segments: '5' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc b/docs/doc_examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc new file mode 100644 index 000000000..c9f316a8c --- /dev/null +++ b/docs/doc_examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "action.auto_create_index": "my-index-000001,index10,-index1*,+ind*", + }, +}); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + "action.auto_create_index": "false", + }, +}); +console.log(response1); + +const response2 = await client.cluster.putSettings({ + persistent: { + "action.auto_create_index": "true", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2fa7ded8515b32f26c54394ea598f573.asciidoc b/docs/doc_examples/2fa7ded8515b32f26c54394ea598f573.asciidoc new file mode 100644 index 000000000..de58c2dc4 --- /dev/null +++ b/docs/doc_examples/2fa7ded8515b32f26c54394ea598f573.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["te*", "bar*"], + template: { + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: true, + }, + properties: { + host_name: { + type: "keyword", + }, + created_at: { + type: "date", + format: "EEE MMM dd HH:mm:ss Z yyyy", + }, + }, + }, + aliases: { + mydata: {}, + }, + }, + priority: 500, + composed_of: ["component_template1", "runtime_component_template"], + version: 3, + _meta: { + description: "my custom", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc b/docs/doc_examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc new file mode 100644 index 000000000..e46f4517b --- /dev/null +++ b/docs/doc_examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc b/docs/doc_examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc new file mode 100644 index 000000000..9235b4fab --- /dev/null +++ b/docs/doc_examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + transient: { + "indices.recovery.*": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc b/docs/doc_examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc new file mode 100644 index 000000000..b952b6ef8 --- /dev/null +++ b/docs/doc_examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "common_grams", + common_words: ["is", "the"], + }, + ], + text: "the quick fox is brown", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc b/docs/doc_examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc new file mode 100644 index 000000000..2f15a5eb4 --- /dev/null +++ b/docs/doc_examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "destination_template", + template: { + settings: { + index: { + number_of_replicas: 0, + number_of_shards: 4, + mode: "time_series", + routing_path: ["metricset"], + time_series: { + end_time: "2023-09-01T14:00:00.000Z", + start_time: "2023-09-01T06:00:00.000Z", + }, + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + metricset: { + type: "keyword", + time_series_dimension: true, + }, + k8s: { + properties: { + tx: { + type: "long", + }, + rx: { + type: "long", + }, + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: 2, + index_patterns: ["k8s*"], + composed_of: ["destination_template"], + data_stream: {}, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc b/docs/doc_examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc index 9663d1bdf..b0e232648 100644 --- a/docs/doc_examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc +++ b/docs/doc_examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc @@ -3,55 +3,50 @@ [source, js] ---- -const response0 = await client.indices.create({ - index: 'test', - body: { - mappings: { - dynamic: false, - properties: { - text: { - type: 'text' - } - } - } - } -}) -console.log(response0) +const response = await client.indices.create({ + index: "test", + mappings: { + dynamic: false, + properties: { + text: { + type: "text", + }, + }, + }, +}); +console.log(response); const response1 = await client.index({ - index: 'test', - refresh: true, - body: { - text: 'words words', - flag: 'bar' - } -}) -console.log(response1) + index: "test", + refresh: "true", + document: { + text: "words words", + flag: "bar", + }, +}); +console.log(response1); const response2 = await client.index({ - index: 'test', - refresh: true, - body: { - text: 'words words', - flag: 'foo' - } -}) -console.log(response2) + index: "test", + refresh: "true", + document: { + text: "words words", + flag: "foo", + }, +}); +console.log(response2); const response3 = await client.indices.putMapping({ - index: 'test', - body: { - properties: { - text: { - type: 'text' - }, - flag: { - type: 'text', - analyzer: 'keyword' - } - } - } -}) -console.log(response3) + index: "test", + properties: { + text: { + type: "text", + }, + flag: { + type: "text", + analyzer: "keyword", + }, + }, +}); +console.log(response3); ---- - diff --git a/docs/doc_examples/2fea3e324939cc7e9c396964aeee7111.asciidoc b/docs/doc_examples/2fea3e324939cc7e9c396964aeee7111.asciidoc new file mode 100644 index 000000000..f92d041ea --- /dev/null +++ b/docs/doc_examples/2fea3e324939cc7e9c396964aeee7111.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "to be or not to be", + operator: "and", + zero_terms_query: "all", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc b/docs/doc_examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc new file mode 100644 index 000000000..83ce4807d --- /dev/null +++ b/docs/doc_examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc b/docs/doc_examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc new file mode 100644 index 000000000..2f048f0a9 --- /dev/null +++ b/docs/doc_examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: "timeseries-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/300576666769b78fa6fa26b232837f81.asciidoc b/docs/doc_examples/300576666769b78fa6fa26b232837f81.asciidoc new file mode 100644 index 000000000..1cd460618 --- /dev/null +++ b/docs/doc_examples/300576666769b78fa6fa26b232837f81.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingCapacity(); +console.log(response); +---- diff --git a/docs/doc_examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc b/docs/doc_examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc new file mode 100644 index 000000000..ab2ae2bdc --- /dev/null +++ b/docs/doc_examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + script_fields: { + my_doubled_field: { + script: { + lang: "painless", + source: "doc['my_field'].value * params.get('multiplier');", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3082ae0c3ecdc61808103214631b40c6.asciidoc b/docs/doc_examples/3082ae0c3ecdc61808103214631b40c6.asciidoc new file mode 100644 index 000000000..22c1eb540 --- /dev/null +++ b/docs/doc_examples/3082ae0c3ecdc61808103214631b40c6.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + avg_monthly_sales: { + avg_bucket: { + buckets_path: "sales_per_month>sales", + gap_policy: "skip", + format: "#,##0.00;(#,##0.00)", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/309f0721145b5c656338a02459c3ff1e.asciidoc b/docs/doc_examples/309f0721145b5c656338a02459c3ff1e.asciidoc new file mode 100644 index 000000000..1c5d0dca3 --- /dev/null +++ b/docs/doc_examples/309f0721145b5c656338a02459c3ff1e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + saturation: { + pivot: 8, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/30abc76a39e551f4b52c65002bb6405d.asciidoc b/docs/doc_examples/30abc76a39e551f4b52c65002bb6405d.asciidoc new file mode 100644 index 000000000..0b8fe7b56 --- /dev/null +++ b/docs/doc_examples/30abc76a39e551f4b52c65002bb6405d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + username: "myuser", + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc b/docs/doc_examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc new file mode 100644 index 000000000..96086e079 --- /dev/null +++ b/docs/doc_examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: "{{query_string}}", + }, + }, + from: "{{from}}", + size: "{{size}}", + }, + params: { + query_string: "hello world", + from: 20, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/30db2702dd0071c72a090b8311d0db09.asciidoc b/docs/doc_examples/30db2702dd0071c72a090b8311d0db09.asciidoc new file mode 100644 index 000000000..153beae37 --- /dev/null +++ b/docs/doc_examples/30db2702dd0071c72a090b8311d0db09.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + query: { + match: { + body: "elections", + }, + }, + aggs: { + top_sites: { + terms: { + field: "domain", + order: { + top_hit: "desc", + }, + }, + aggs: { + top_tags_hits: { + top_hits: {}, + }, + top_hit: { + max: { + script: { + source: "_score", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc b/docs/doc_examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc new file mode 100644 index 000000000..12da5e39e --- /dev/null +++ b/docs/doc_examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "my-index-000002", +}); +console.log(response1); + +const response2 = await client.indices.putMapping({ + index: "my-index-000001,my-index-000002", + properties: { + user: { + properties: { + name: { + type: "keyword", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/3166455372f2d96622caff076e91ebe7.asciidoc b/docs/doc_examples/3166455372f2d96622caff076e91ebe7.asciidoc new file mode 100644 index 000000000..01c9127eb --- /dev/null +++ b/docs/doc_examples/3166455372f2d96622caff076e91ebe7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + index: "my-index-000001", + id: "2", + version: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/316cd43feb3b86396483903af1a048b1.asciidoc b/docs/doc_examples/316cd43feb3b86396483903af1a048b1.asciidoc new file mode 100644 index 000000000..524e00979 --- /dev/null +++ b/docs/doc_examples/316cd43feb3b86396483903af1a048b1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sale_date: { + date_histogram: { + field: "date", + calendar_interval: "year", + missing: "2000/01/01", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc b/docs/doc_examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc new file mode 100644 index 000000000..321f477e4 --- /dev/null +++ b/docs/doc_examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "My string", + text_fields: [ + { + user_name: "John", + }, + { + user_name: "kimchy", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc b/docs/doc_examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc new file mode 100644 index 000000000..0e2f4d81e --- /dev/null +++ b/docs/doc_examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "LineString", + coordinates: [ + [-77.03653, 38.897676], + [-77.009051, 38.889939], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31a79a57b242713edec6795599ba0d5d.asciidoc b/docs/doc_examples/31a79a57b242713edec6795599ba0d5d.asciidoc new file mode 100644 index 000000000..7d8fdd9e4 --- /dev/null +++ b/docs/doc_examples/31a79a57b242713edec6795599ba0d5d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + my_tokens: { + type: "sparse_vector", + }, + my_text_field: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31ab4ec26176857280af630bf84a2823.asciidoc b/docs/doc_examples/31ab4ec26176857280af630bf84a2823.asciidoc new file mode 100644 index 000000000..77c3e3ee3 --- /dev/null +++ b/docs/doc_examples/31ab4ec26176857280af630bf84a2823.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlServiceProviderMetadata({ + realm_name: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc b/docs/doc_examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc new file mode 100644 index 000000000..cf057e9c6 --- /dev/null +++ b/docs/doc_examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "music", + mappings: { + properties: { + suggest: { + type: "completion", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31aed390c30bd4f42a5c56253695e53f.asciidoc b/docs/doc_examples/31aed390c30bd4f42a5c56253695e53f.asciidoc new file mode 100644 index 000000000..4d03f9146 --- /dev/null +++ b/docs/doc_examples/31aed390c30bd4f42a5c56253695e53f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "whitespace_example", + settings: { + analysis: { + analyzer: { + rebuilt_whitespace: { + tokenizer: "whitespace", + filter: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc b/docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc new file mode 100644 index 000000000..36c1c5a79 --- /dev/null +++ b/docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.unfreeze({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/31f4400716500149cccbc19aa06bff66.asciidoc b/docs/doc_examples/31f4400716500149cccbc19aa06bff66.asciidoc new file mode 100644 index 000000000..4cd289777 --- /dev/null +++ b/docs/doc_examples/31f4400716500149cccbc19aa06bff66.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.deleteDanglingIndex({ + index_uuid: "", + accept_data_loss: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/320645d771e952af2a67bb7445c3688d.asciidoc b/docs/doc_examples/320645d771e952af2a67bb7445c3688d.asciidoc new file mode 100644 index 000000000..3dbe450ee --- /dev/null +++ b/docs/doc_examples/320645d771e952af2a67bb7445c3688d.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "sorani_example", + settings: { + analysis: { + filter: { + sorani_stop: { + type: "stop", + stopwords: "_sorani_", + }, + sorani_keywords: { + type: "keyword_marker", + keywords: ["mînak"], + }, + sorani_stemmer: { + type: "stemmer", + language: "sorani", + }, + }, + analyzer: { + rebuilt_sorani: { + tokenizer: "standard", + filter: [ + "sorani_normalization", + "lowercase", + "decimal_digit", + "sorani_stop", + "sorani_keywords", + "sorani_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32123981430e5a8b34fe14314fc48429.asciidoc b/docs/doc_examples/32123981430e5a8b34fe14314fc48429.asciidoc new file mode 100644 index 000000000..0e3687fe2 --- /dev/null +++ b/docs/doc_examples/32123981430e5a8b34fe14314fc48429.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001,my-index-000002", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc b/docs/doc_examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc new file mode 100644 index 000000000..2725863d8 --- /dev/null +++ b/docs/doc_examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + fielddata: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3250a8d2d2a9619035040e55a03620b9.asciidoc b/docs/doc_examples/3250a8d2d2a9619035040e55a03620b9.asciidoc new file mode 100644 index 000000000..a90140c98 --- /dev/null +++ b/docs/doc_examples/3250a8d2d2a9619035040e55a03620b9.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.http.HttpTracer": "TRACE", + "logger.org.elasticsearch.http.HttpBodyTracer": "TRACE", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/327466380bcd55361973b4a96c6dccb2.asciidoc b/docs/doc_examples/327466380bcd55361973b4a96c6dccb2.asciidoc new file mode 100644 index 000000000..b8aa8a2e9 --- /dev/null +++ b/docs/doc_examples/327466380bcd55361973b4a96c6dccb2.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "spanish_example", + settings: { + analysis: { + filter: { + spanish_stop: { + type: "stop", + stopwords: "_spanish_", + }, + spanish_keywords: { + type: "keyword_marker", + keywords: ["ejemplo"], + }, + spanish_stemmer: { + type: "stemmer", + language: "light_spanish", + }, + }, + analyzer: { + rebuilt_spanish: { + tokenizer: "standard", + filter: [ + "lowercase", + "spanish_stop", + "spanish_keywords", + "spanish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32a7acdfb7046966b28f394476c99126.asciidoc b/docs/doc_examples/32a7acdfb7046966b28f394476c99126.asciidoc new file mode 100644 index 000000000..faffd3f29 --- /dev/null +++ b/docs/doc_examples/32a7acdfb7046966b28f394476c99126.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "POINT (-377.03653 389.897676)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc b/docs/doc_examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc new file mode 100644 index 000000000..d304a1bdc --- /dev/null +++ b/docs/doc_examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + hdr: { + number_of_significant_value_digits: 3, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc b/docs/doc_examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc new file mode 100644 index 000000000..b0fd0f6e4 --- /dev/null +++ b/docs/doc_examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateUserProfileData({ + uid: "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", + labels: { + direction: "west", + }, + data: { + app1: { + font: "large", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc b/docs/doc_examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc new file mode 100644 index 000000000..86a4d3e28 --- /dev/null +++ b/docs/doc_examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["example-index"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {\n "multi_match" : {\n "query": "{{query}}",\n "fields": [ "title^4", "plot", "actors", "directors" ]\n }\n },\n {\n "multi_match" : {\n "query": "{{query}}",\n "type": "phrase_prefix",\n "fields": [ "title^4", "plot"]\n }\n },\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + params: { + query: "", + _es_filters: {}, + _es_aggs: {}, + _es_sort_fields: {}, + size: 10, + from: 0, + }, + dictionary: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32cd57666bc80b8cf793d06fa1086669.asciidoc b/docs/doc_examples/32cd57666bc80b8cf793d06fa1086669.asciidoc new file mode 100644 index 000000000..91db8be03 --- /dev/null +++ b/docs/doc_examples/32cd57666bc80b8cf793d06fa1086669.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "tsv", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc b/docs/doc_examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc new file mode 100644 index 000000000..948af1175 --- /dev/null +++ b/docs/doc_examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/32de5dd306bd014d67053d2f175defcd.asciidoc b/docs/doc_examples/32de5dd306bd014d67053d2f175defcd.asciidoc new file mode 100644 index 000000000..0e47d6710 --- /dev/null +++ b/docs/doc_examples/32de5dd306bd014d67053d2f175defcd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.xpack.security.authc.saml": "debug", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/331caebf810a923644eb6de26e5a97f4.asciidoc b/docs/doc_examples/331caebf810a923644eb6de26e5a97f4.asciidoc new file mode 100644 index 000000000..b98c10d2e --- /dev/null +++ b/docs/doc_examples/331caebf810a923644eb6de26e5a97f4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + question: ["answer", "comment"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3337c817ebd438254505a31e91c91724.asciidoc b/docs/doc_examples/3337c817ebd438254505a31e91c91724.asciidoc new file mode 100644 index 000000000..cdd215727 --- /dev/null +++ b/docs/doc_examples/3337c817ebd438254505a31e91c91724.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb30ba547e4a7b8f54f33ab259aca523.asciidoc b/docs/doc_examples/3341d3bbb53052447a37c92a04c14b70.asciidoc similarity index 59% rename from docs/doc_examples/eb30ba547e4a7b8f54f33ab259aca523.asciidoc rename to docs/doc_examples/3341d3bbb53052447a37c92a04c14b70.asciidoc index 08353b44f..dac17132e 100644 --- a/docs/doc_examples/eb30ba547e4a7b8f54f33ab259aca523.asciidoc +++ b/docs/doc_examples/3341d3bbb53052447a37c92a04c14b70.asciidoc @@ -4,12 +4,9 @@ [source, js] ---- const response = await client.update({ - index: 'test', - id: '1', - body: { - script: "ctx._source.new_field = 'value_of_new_field'" - } -}) -console.log(response) + index: "my-index-000001", + id: 1, + script: "ctx._source.new_field = 'value_of_new_field'", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/3343a4cf559060c422d86c786a95e535.asciidoc b/docs/doc_examples/3343a4cf559060c422d86c786a95e535.asciidoc new file mode 100644 index 000000000..7f260292c --- /dev/null +++ b/docs/doc_examples/3343a4cf559060c422d86c786a95e535.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["apostrophe"], + text: "Istanbul'a veya Istanbul'dan", +}); +console.log(response); +---- diff --git a/docs/doc_examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc b/docs/doc_examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc new file mode 100644 index 000000000..9c31f8a68 --- /dev/null +++ b/docs/doc_examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.get({ + task_id: "oTUltX4IQMOUUVeiohTt8A:124", +}); +console.log(response); +---- diff --git a/docs/doc_examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc b/docs/doc_examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc new file mode 100644 index 000000000..fe4d11b4c --- /dev/null +++ b/docs/doc_examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + cold: { + actions: { + allocate: { + require: { + box_type: "cold", + storage: "high", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/33732208fc6e6fe1e8d278299681932e.asciidoc b/docs/doc_examples/33732208fc6e6fe1e8d278299681932e.asciidoc new file mode 100644 index 000000000..f864a2ef1 --- /dev/null +++ b/docs/doc_examples/33732208fc6e6fe1e8d278299681932e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "LINESTRING (-377.03653 389.897676, -377.009051 389.889939)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc b/docs/doc_examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc new file mode 100644 index 000000000..03dd504ab --- /dev/null +++ b/docs/doc_examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.deleteSynonym({ + id: "my-synonyms-set", +}); +console.log(response); +---- diff --git a/docs/doc_examples/33b732bb301e99d2161bd2246494f487.asciidoc b/docs/doc_examples/33b732bb301e99d2161bd2246494f487.asciidoc new file mode 100644 index 000000000..6a2a72cc9 --- /dev/null +++ b/docs/doc_examples/33b732bb301e99d2161bd2246494f487.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "postal_lookup", + processors: [ + { + enrich: { + description: "Add 'geo_data' based on 'geo_location'", + policy_name: "postal_policy", + field: "geo_location", + target_field: "geo_data", + shape_relation: "INTERSECTS", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/33d480fc6812ada75756cf5337bc9092.asciidoc b/docs/doc_examples/33d480fc6812ada75756cf5337bc9092.asciidoc new file mode 100644 index 000000000..88753c964 --- /dev/null +++ b/docs/doc_examples/33d480fc6812ada75756cf5337bc9092.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + from: "0", + size: "2", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc b/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc index 35ca65dd2..75a1ad5bb 100644 --- a/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc +++ b/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc @@ -4,25 +4,22 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - dis_max: { - queries: [ - { - match_phrase_prefix: { - subject: 'quick brown f' - } + query: { + dis_max: { + queries: [ + { + match_phrase_prefix: { + subject: "quick brown f", }, - { - match_phrase_prefix: { - message: 'quick brown f' - } - } - ] - } - } - } -}) -console.log(response) + }, + { + match_phrase_prefix: { + message: "quick brown f", + }, + }, + ], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc b/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc new file mode 100644 index 000000000..00d2c0234 --- /dev/null +++ b/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector", + body: { + index_name: "search-google-drive", + name: "My Connector", + service_type: "google_drive", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/343dd09a8c76987e586858be3bdc51eb.asciidoc b/docs/doc_examples/343dd09a8c76987e586858be3bdc51eb.asciidoc new file mode 100644 index 000000000..816461ead --- /dev/null +++ b/docs/doc_examples/343dd09a8c76987e586858be3bdc51eb.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_queries2", + settings: { + analysis: { + analyzer: { + wildcard_suffix: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "reverse", "wildcard_edge_ngram"], + }, + wildcard_suffix_search_time: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "reverse"], + }, + }, + filter: { + wildcard_edge_ngram: { + type: "edge_ngram", + min_gram: 1, + max_gram: 32, + }, + }, + }, + }, + mappings: { + properties: { + query: { + type: "percolator", + }, + my_field: { + type: "text", + fields: { + suffix: { + type: "text", + analyzer: "wildcard_suffix", + search_analyzer: "wildcard_suffix_search_time", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/344b4144244d57f87c6aa4652b100b25.asciidoc b/docs/doc_examples/344b4144244d57f87c6aa4652b100b25.asciidoc new file mode 100644 index 000000000..04b448afc --- /dev/null +++ b/docs/doc_examples/344b4144244d57f87c6aa4652b100b25.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + color: "blue", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/346f28d82acb5427c304aa574fea0008.asciidoc b/docs/doc_examples/346f28d82acb5427c304aa574fea0008.asciidoc new file mode 100644 index 000000000..84f239166 --- /dev/null +++ b/docs/doc_examples/346f28d82acb5427c304aa574fea0008.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "thai_example", + settings: { + analysis: { + filter: { + thai_stop: { + type: "stop", + stopwords: "_thai_", + }, + }, + analyzer: { + rebuilt_thai: { + tokenizer: "thai", + filter: ["lowercase", "decimal_digit", "thai_stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc b/docs/doc_examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc new file mode 100644 index 000000000..acb89c3e8 --- /dev/null +++ b/docs/doc_examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.activateWatch({ + watch_id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3487e60e1ae9d4925ce540cd63574385.asciidoc b/docs/doc_examples/3487e60e1ae9d4925ce540cd63574385.asciidoc new file mode 100644 index 000000000..0e0e4838b --- /dev/null +++ b/docs/doc_examples/3487e60e1ae9d4925ce540cd63574385.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + boosting: { + positive: { + term: { + text: "apple", + }, + }, + negative: { + term: { + text: "pie tart fruit crumble tree", + }, + }, + negative_boost: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/349823d86980d40ac45248c19a59e339.asciidoc b/docs/doc_examples/349823d86980d40ac45248c19a59e339.asciidoc new file mode 100644 index 000000000..93a193897 --- /dev/null +++ b/docs/doc_examples/349823d86980d40ac45248c19a59e339.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "amazon_bedrock_embeddings", + processors: [ + { + inference: { + model_id: "amazon_bedrock_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/34be27141e3a476c138546190101c8bc.asciidoc b/docs/doc_examples/34be27141e3a476c138546190101c8bc.asciidoc new file mode 100644 index 000000000..62b8de4d2 --- /dev/null +++ b/docs/doc_examples/34be27141e3a476c138546190101c8bc.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchMvt({ + index: "my-index", + field: "my-geo-field", + zoom: 15, + x: 5271, + y: 12710, +}); +console.log(response); +---- diff --git a/docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc b/docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc new file mode 100644 index 000000000..9537a8386 --- /dev/null +++ b/docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.search.slowlog.include.user": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e52bec09624cf6c0de5d13f2bfad5a5.asciidoc b/docs/doc_examples/34d51c54b62e9a160c0ddacc10134bb0.asciidoc similarity index 56% rename from docs/doc_examples/7e52bec09624cf6c0de5d13f2bfad5a5.asciidoc rename to docs/doc_examples/34d51c54b62e9a160c0ddacc10134bb0.asciidoc index 031000eec..a0aef5244 100644 --- a/docs/doc_examples/7e52bec09624cf6c0de5d13f2bfad5a5.asciidoc +++ b/docs/doc_examples/34d51c54b62e9a160c0ddacc10134bb0.asciidoc @@ -4,17 +4,16 @@ [source, js] ---- const response = await client.search({ - index: 'twitter', - scroll: '1m', - body: { - size: 100, - query: { + query: { + span_first: { match: { - title: 'elasticsearch' - } - } - } -}) -console.log(response) + span_term: { + "user.id": "kimchy", + }, + }, + end: 3, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/34d63740b58209a3d031212909743925.asciidoc b/docs/doc_examples/34d63740b58209a3d031212909743925.asciidoc new file mode 100644 index 000000000..134309249 --- /dev/null +++ b/docs/doc_examples/34d63740b58209a3d031212909743925.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "openai-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "openai_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/34efeade38445b2834749ced59782e25.asciidoc b/docs/doc_examples/34efeade38445b2834749ced59782e25.asciidoc deleted file mode 100644 index be97bbd98..000000000 --- a/docs/doc_examples/34efeade38445b2834749ced59782e25.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - 'playback_stats.max': 'desc' - } - }, - aggs: { - playback_stats: { - stats: { - field: 'play_count' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc b/docs/doc_examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc new file mode 100644 index 000000000..da118f577 --- /dev/null +++ b/docs/doc_examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + has_child: { + type: "child", + query: { + function_score: { + script_score: { + script: "_score * doc['click_count'].value", + }, + }, + }, + score_mode: "max", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/427f6b5c5376cbf0f71f242a60ca3d9e.asciidoc b/docs/doc_examples/353020cb30a885ee7f5ce2b141ba574a.asciidoc similarity index 69% rename from docs/doc_examples/427f6b5c5376cbf0f71f242a60ca3d9e.asciidoc rename to docs/doc_examples/353020cb30a885ee7f5ce2b141ba574a.asciidoc index a95439de4..73a931778 100644 --- a/docs/doc_examples/427f6b5c5376cbf0f71f242a60ca3d9e.asciidoc +++ b/docs/doc_examples/353020cb30a885ee7f5ce2b141ba574a.asciidoc @@ -4,10 +4,11 @@ [source, js] ---- const response = await client.search({ - index: 'alias2', - q: 'user:kimchy', - routing: '2,3' -}) -console.log(response) + query: { + prefix: { + user: "ki", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc b/docs/doc_examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc new file mode 100644 index 000000000..d5f33f225 --- /dev/null +++ b/docs/doc_examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport({ + verbose: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3544f17cb97b613a2f733707c676f759.asciidoc b/docs/doc_examples/3544f17cb97b613a2f733707c676f759.asciidoc new file mode 100644 index 000000000..0d38c0dcc --- /dev/null +++ b/docs/doc_examples/3544f17cb97b613a2f733707c676f759.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + f: { + filters: { + filters: { + hats: { + term: { + type: "hat", + }, + }, + t_shirts: { + term: { + type: "t-shirt", + }, + }, + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3545261682af72f4bee57f2bac0a9590.asciidoc b/docs/doc_examples/3545261682af72f4bee57f2bac0a9590.asciidoc new file mode 100644 index 000000000..0cfae998e --- /dev/null +++ b/docs/doc_examples/3545261682af72f4bee57f2bac0a9590.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + status: "green", +}); +console.log(response); +---- diff --git a/docs/doc_examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc b/docs/doc_examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc new file mode 100644 index 000000000..9bb6c2042 --- /dev/null +++ b/docs/doc_examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + docs: [ + { + _index: "my-index-000001", + _id: "2", + term_statistics: true, + }, + { + _index: "my-index-000001", + _id: "1", + fields: ["message"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/52b2bfbdd78f8283b6f4891c48013237.asciidoc b/docs/doc_examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc similarity index 55% rename from docs/doc_examples/52b2bfbdd78f8283b6f4891c48013237.asciidoc rename to docs/doc_examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc index a87ac5363..8f37b6bd1 100644 --- a/docs/doc_examples/52b2bfbdd78f8283b6f4891c48013237.asciidoc +++ b/docs/doc_examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc @@ -4,16 +4,12 @@ [source, js] ---- const response = await client.reindex({ - body: { - max_docs: 1, - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) + source: { + index: ["my-index-000001", "my-index-000002"], + }, + dest: { + index: "my-new-index-000002", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc b/docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc new file mode 100644 index 000000000..088bda3bc --- /dev/null +++ b/docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster({ + name: "not-present,clust*:my-index*,oldcluster:*", + ignore_unavailable: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/35a272df8c919a12d7c3106a18245748.asciidoc b/docs/doc_examples/35a272df8c919a12d7c3106a18245748.asciidoc new file mode 100644 index 000000000..40e01fb68 --- /dev/null +++ b/docs/doc_examples/35a272df8c919a12d7c3106a18245748.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "lang_ident_model_1", + docs: [ + { + text: "The fool doth think he is wise, but the wise man knows himself to be a fool.", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/35b686d9d9e915d0dea7a4251781767d.asciidoc b/docs/doc_examples/35b686d9d9e915d0dea7a4251781767d.asciidoc new file mode 100644 index 000000000..918d625f6 --- /dev/null +++ b/docs/doc_examples/35b686d9d9e915d0dea7a4251781767d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.reroute({ + metric: "none", + commands: [ + { + allocate_empty_primary: { + index: "my-index", + shard: 0, + node: "my-node", + accept_data_loss: "true", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/35be136ba9df7474a5521631e2a385b1.asciidoc b/docs/doc_examples/35be136ba9df7474a5521631e2a385b1.asciidoc new file mode 100644 index 000000000..ddd6e37f7 --- /dev/null +++ b/docs/doc_examples/35be136ba9df7474a5521631e2a385b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.explainDataLifecycle({ + index: ".ds-metrics-2023.03.22-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc b/docs/doc_examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc new file mode 100644 index 000000000..efd76c619 --- /dev/null +++ b/docs/doc_examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_text: { + match_mapping_type: "string", + mapping: { + type: "text", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc b/docs/doc_examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc new file mode 100644 index 000000000..dc4c0d608 --- /dev/null +++ b/docs/doc_examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + }, + sort: ["user.id"], + search_after: ["dd5ce1ad"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/35e8da9410b8432cf4095f2541ad7b1d.asciidoc b/docs/doc_examples/35e8da9410b8432cf4095f2541ad7b1d.asciidoc deleted file mode 100644 index e84e5e35b..000000000 --- a/docs/doc_examples/35e8da9410b8432cf4095f2541ad7b1d.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - products: { - terms: { - field: 'product', - size: 5, - show_term_doc_count_error: true - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc b/docs/doc_examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc new file mode 100644 index 000000000..2fe9e17b4 --- /dev/null +++ b/docs/doc_examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + grade_min: { + min: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/35f892b475a1770f18328158be7039fd.asciidoc b/docs/doc_examples/35f892b475a1770f18328158be7039fd.asciidoc new file mode 100644 index 000000000..49bcef2d6 --- /dev/null +++ b/docs/doc_examples/35f892b475a1770f18328158be7039fd.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-2", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + similarity: "dot_product", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc b/docs/doc_examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc new file mode 100644 index 000000000..037812101 --- /dev/null +++ b/docs/doc_examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", + s: "disk.avail", + h: "node,disk.percent,disk.avail,disk.total,disk.used,disk.indices,shards", +}); +console.log(response); +---- diff --git a/docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc b/docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc new file mode 100644 index 000000000..3302992dc --- /dev/null +++ b/docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + remove_binary: false, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc b/docs/doc_examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc new file mode 100644 index 000000000..af0f597cc --- /dev/null +++ b/docs/doc_examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + long: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + long: [0, 0, -123466, 87612], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc b/docs/doc_examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc new file mode 100644 index 000000000..1e2416525 --- /dev/null +++ b/docs/doc_examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + synonyms: "hello => hi => howdy", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/17de0020b228df961ad3c6b06233c948.asciidoc b/docs/doc_examples/360b3cef34bbddc5d9579ca95f0cb061.asciidoc similarity index 54% rename from docs/doc_examples/17de0020b228df961ad3c6b06233c948.asciidoc rename to docs/doc_examples/360b3cef34bbddc5d9579ca95f0cb061.asciidoc index b7cbc2f04..271b3580b 100644 --- a/docs/doc_examples/17de0020b228df961ad3c6b06233c948.asciidoc +++ b/docs/doc_examples/360b3cef34bbddc5d9579ca95f0cb061.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - user_id: { - type: 'keyword', - ignore_above: 100 - } - } - } -}) -console.log(response) + index: "my-data-stream", + write_index_only: "true", + properties: { + message: { + type: "text", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/360c4f373e72ba861584ee85bd218124.asciidoc b/docs/doc_examples/360c4f373e72ba861584ee85bd218124.asciidoc new file mode 100644 index 000000000..0c46ff5ce --- /dev/null +++ b/docs/doc_examples/360c4f373e72ba861584ee85bd218124.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test_index", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "porter_stem"], + }, + }, + }, + }, + mappings: { + properties: { + query: { + type: "percolator", + }, + body: { + type: "text", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc b/docs/doc_examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc new file mode 100644 index 000000000..6b6a75b25 --- /dev/null +++ b/docs/doc_examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "txt", + query: + "\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", + filter: { + range: { + page_count: { + gte: 100, + lte: 200, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc b/docs/doc_examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc new file mode 100644 index 000000000..0b499febe --- /dev/null +++ b/docs/doc_examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-data-stream", + query: { + match: { + "user.id": "l7gk7f82", + }, + }, + script: { + source: "ctx._source.user.id = params.new_id", + params: { + new_id: "XgdX0NoX", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/365256ebdfa47b449780771d9beba8d9.asciidoc b/docs/doc_examples/365256ebdfa47b449780771d9beba8d9.asciidoc new file mode 100644 index 000000000..d2e864a8d --- /dev/null +++ b/docs/doc_examples/365256ebdfa47b449780771d9beba8d9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job/_check_in", +}); +console.log(response); +---- diff --git a/docs/doc_examples/71ba9033107882f61cdc3b32fc73568d.asciidoc b/docs/doc_examples/36962727b806315b221e8a63e05caddc.asciidoc similarity index 54% rename from docs/doc_examples/71ba9033107882f61cdc3b32fc73568d.asciidoc rename to docs/doc_examples/36962727b806315b221e8a63e05caddc.asciidoc index b2e8f8a5c..f50a6c52c 100644 --- a/docs/doc_examples/71ba9033107882f61cdc3b32fc73568d.asciidoc +++ b/docs/doc_examples/36962727b806315b221e8a63e05caddc.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.indices.putMapping({ - index: 'my-index', - body: { - properties: { - 'employee-id': { - type: 'keyword', - index: false - } - } - } -}) -console.log(response) + index: "my-index-000001", + properties: { + "employee-id": { + type: "keyword", + index: false, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/36b26905c5f96d0b785c3267fb63838d.asciidoc b/docs/doc_examples/36b26905c5f96d0b785c3267fb63838d.asciidoc new file mode 100644 index 000000000..8cea96414 --- /dev/null +++ b/docs/doc_examples/36b26905c5f96d0b785c3267fb63838d.asciidoc @@ -0,0 +1,609 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + ip: { + type: "ip", + }, + version: { + type: "version", + }, + missing_keyword: { + type: "keyword", + }, + "@timestamp": { + type: "date", + }, + type_test: { + type: "keyword", + }, + "@timestamp_pretty": { + type: "date", + format: "dd-MM-yyyy", + }, + event_type: { + type: "keyword", + }, + event: { + properties: { + category: { + type: "alias", + path: "event_type", + }, + }, + }, + host: { + type: "keyword", + }, + os: { + type: "keyword", + }, + bool: { + type: "boolean", + }, + uptime: { + type: "long", + }, + port: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + ip: { + type: "ip", + }, + "@timestamp": { + type: "date", + }, + "@timestamp_pretty": { + type: "date", + format: "yyyy-MM-dd", + }, + type_test: { + type: "keyword", + }, + event_type: { + type: "keyword", + }, + event: { + properties: { + category: { + type: "alias", + path: "event_type", + }, + }, + }, + host: { + type: "keyword", + }, + op_sys: { + type: "keyword", + }, + bool: { + type: "boolean", + }, + uptime: { + type: "long", + }, + port: { + type: "long", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.create({ + index: "my-index-000003", + mappings: { + properties: { + host_ip: { + type: "ip", + }, + "@timestamp": { + type: "date", + }, + date: { + type: "date", + }, + event_type: { + type: "keyword", + }, + event: { + properties: { + category: { + type: "alias", + path: "event_type", + }, + }, + }, + missing_keyword: { + type: "keyword", + }, + host: { + type: "keyword", + }, + os: { + type: "keyword", + }, + bool: { + type: "boolean", + }, + uptime: { + type: "long", + }, + port: { + type: "long", + }, + }, + }, +}); +console.log(response2); + +const response3 = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + "@timestamp": "1234567891", + "@timestamp_pretty": "12-12-2022", + missing_keyword: "test", + type_test: "abc", + ip: "10.0.0.1", + event_type: "alert", + host: "doom", + uptime: 0, + port: 1234, + os: "win10", + version: "1.0.0", + id: 11, + }, + { + index: { + _id: 2, + }, + }, + { + "@timestamp": "1234567892", + "@timestamp_pretty": "13-12-2022", + event_type: "alert", + type_test: "abc", + host: "CS", + uptime: 5, + port: 1, + os: "win10", + version: "1.2.0", + id: 12, + }, + { + index: { + _id: 3, + }, + }, + { + "@timestamp": "1234567893", + "@timestamp_pretty": "12-12-2022", + event_type: "alert", + type_test: "abc", + host: "farcry", + uptime: 1, + port: 1234, + bool: false, + os: "win10", + version: "2.0.0", + id: 13, + }, + { + index: { + _id: 4, + }, + }, + { + "@timestamp": "1234567894", + "@timestamp_pretty": "13-12-2022", + event_type: "alert", + type_test: "abc", + host: "GTA", + uptime: 3, + port: 12, + os: "slack", + version: "10.0.0", + id: 14, + }, + { + index: { + _id: 5, + }, + }, + { + "@timestamp": "1234567895", + "@timestamp_pretty": "17-12-2022", + event_type: "alert", + host: "sniper 3d", + uptime: 6, + port: 1234, + os: "fedora", + version: "20.1.0", + id: 15, + }, + { + index: { + _id: 6, + }, + }, + { + "@timestamp": "1234568896", + "@timestamp_pretty": "17-12-2022", + event_type: "alert", + host: "doom", + port: 65123, + bool: true, + os: "redhat", + version: "20.10.0", + id: 16, + }, + { + index: { + _id: 7, + }, + }, + { + "@timestamp": "1234567897", + "@timestamp_pretty": "17-12-2022", + missing_keyword: "yyy", + event_type: "failure", + host: "doom", + uptime: 15, + port: 1234, + bool: true, + os: "redhat", + version: "20.2.0", + id: 17, + }, + { + index: { + _id: 8, + }, + }, + { + "@timestamp": "1234567898", + "@timestamp_pretty": "12-12-2022", + missing_keyword: "test", + event_type: "success", + host: "doom", + uptime: 16, + port: 512, + os: "win10", + version: "1.2.3", + id: 18, + }, + { + index: { + _id: 9, + }, + }, + { + "@timestamp": "1234567899", + "@timestamp_pretty": "15-12-2022", + missing_keyword: "test", + event_type: "success", + host: "GTA", + port: 12, + bool: true, + os: "win10", + version: "1.2.3", + id: 19, + }, + { + index: { + _id: 10, + }, + }, + { + "@timestamp": "1234567893", + missing_keyword: null, + ip: "10.0.0.5", + event_type: "alert", + host: "farcry", + uptime: 1, + port: 1234, + bool: true, + os: "win10", + version: "1.2.3", + id: 110, + }, + ], +}); +console.log(response3); + +const response4 = await client.bulk({ + index: "my-index-000002", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + "@timestamp": "1234567991", + type_test: "abc", + ip: "10.0.0.1", + event_type: "alert", + host: "doom", + uptime: 0, + port: 1234, + op_sys: "win10", + id: 21, + }, + { + index: { + _id: 2, + }, + }, + { + "@timestamp": "1234567992", + type_test: "abc", + event_type: "alert", + host: "CS", + uptime: 5, + port: 1, + op_sys: "win10", + id: 22, + }, + { + index: { + _id: 3, + }, + }, + { + "@timestamp": "1234567993", + type_test: "abc", + "@timestamp_pretty": "2022-12-17", + event_type: "alert", + host: "farcry", + uptime: 1, + port: 1234, + bool: false, + op_sys: "win10", + id: 23, + }, + { + index: { + _id: 4, + }, + }, + { + "@timestamp": "1234567994", + event_type: "alert", + host: "GTA", + uptime: 3, + port: 12, + op_sys: "slack", + id: 24, + }, + { + index: { + _id: 5, + }, + }, + { + "@timestamp": "1234567995", + event_type: "alert", + host: "sniper 3d", + uptime: 6, + port: 1234, + op_sys: "fedora", + id: 25, + }, + { + index: { + _id: 6, + }, + }, + { + "@timestamp": "1234568996", + "@timestamp_pretty": "2022-12-17", + ip: "10.0.0.5", + event_type: "alert", + host: "doom", + port: 65123, + bool: true, + op_sys: "redhat", + id: 26, + }, + { + index: { + _id: 7, + }, + }, + { + "@timestamp": "1234567997", + "@timestamp_pretty": "2022-12-17", + event_type: "failure", + host: "doom", + uptime: 15, + port: 1234, + bool: true, + op_sys: "redhat", + id: 27, + }, + { + index: { + _id: 8, + }, + }, + { + "@timestamp": "1234567998", + ip: "10.0.0.1", + event_type: "success", + host: "doom", + uptime: 16, + port: 512, + op_sys: "win10", + id: 28, + }, + { + index: { + _id: 9, + }, + }, + { + "@timestamp": "1234567999", + ip: "10.0.0.1", + event_type: "success", + host: "GTA", + port: 12, + bool: false, + op_sys: "win10", + id: 29, + }, + ], +}); +console.log(response4); + +const response5 = await client.bulk({ + index: "my-index-000003", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + "@timestamp": "1334567891", + host_ip: "10.0.0.1", + event_type: "alert", + host: "doom", + uptime: 0, + port: 12, + os: "win10", + id: 31, + }, + { + index: { + _id: 2, + }, + }, + { + "@timestamp": "1334567892", + event_type: "alert", + host: "CS", + os: "win10", + id: 32, + }, + { + index: { + _id: 3, + }, + }, + { + "@timestamp": "1334567893", + event_type: "alert", + host: "farcry", + bool: true, + os: "win10", + id: 33, + }, + { + index: { + _id: 4, + }, + }, + { + "@timestamp": "1334567894", + event_type: "alert", + host: "GTA", + os: "slack", + bool: true, + id: 34, + }, + { + index: { + _id: 5, + }, + }, + { + "@timestamp": "1234567895", + event_type: "alert", + host: "sniper 3d", + os: "fedora", + id: 35, + }, + { + index: { + _id: 6, + }, + }, + { + "@timestamp": "1234578896", + host_ip: "10.0.0.1", + event_type: "alert", + host: "doom", + bool: true, + os: "redhat", + id: 36, + }, + { + index: { + _id: 7, + }, + }, + { + "@timestamp": "1234567897", + event_type: "failure", + missing_keyword: "test", + host: "doom", + bool: true, + os: "redhat", + id: 37, + }, + { + index: { + _id: 8, + }, + }, + { + "@timestamp": "1234577898", + event_type: "success", + host: "doom", + os: "win10", + id: 38, + date: "1671235200000", + }, + { + index: { + _id: 9, + }, + }, + { + "@timestamp": "1234577899", + host_ip: "10.0.0.5", + event_type: "success", + host: "GTA", + bool: true, + os: "win10", + id: 39, + }, + ], +}); +console.log(response5); +---- diff --git a/docs/doc_examples/36b2778f23d0955255f52c075c4d213d.asciidoc b/docs/doc_examples/36b2778f23d0955255f52c075c4d213d.asciidoc deleted file mode 100644 index 41c977ec1..000000000 --- a/docs/doc_examples/36b2778f23d0955255f52c075c4d213d.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - remote: { - host: 'http://otherhost:9200/', - username: 'user', - password: 'pass' - }, - index: 'source', - query: { - match: { - test: 'data' - } - } - }, - dest: { - index: 'dest' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc b/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc index 408ce2f71..c55cad6ae 100644 --- a/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc +++ b/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc @@ -1,12 +1,15 @@ -[source,js] +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] ---- const response = await client.search({ - index: 'books', + index: "books", query: { match: { - name: 'brave' - } - } -}) -console.log(response) + name: "brave", + }, + }, +}); +console.log(response); ---- diff --git a/docs/doc_examples/36d229f734adcdab00be266a7ce038b1.asciidoc b/docs/doc_examples/36d229f734adcdab00be266a7ce038b1.asciidoc new file mode 100644 index 000000000..771d1a09d --- /dev/null +++ b/docs/doc_examples/36d229f734adcdab00be266a7ce038b1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-bit-vectors", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 40, + element_type: "bit", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/36da9668fef56910370f16bfb772cc40.asciidoc b/docs/doc_examples/36da9668fef56910370f16bfb772cc40.asciidoc new file mode 100644 index 000000000..910f8a458 --- /dev/null +++ b/docs/doc_examples/36da9668fef56910370f16bfb772cc40.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "request_cache", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc b/docs/doc_examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc new file mode 100644 index 000000000..c8fd4c771 --- /dev/null +++ b/docs/doc_examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + routing: 1, + refresh: "true", + document: { + text: "This is a child document.", + "my-join-field": { + name: "my-child", + parent: "1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a8995fd31351045d99c78e40444c8ea.asciidoc b/docs/doc_examples/36fae9dfc0b815546b45745bac054b67.asciidoc similarity index 63% rename from docs/doc_examples/9a8995fd31351045d99c78e40444c8ea.asciidoc rename to docs/doc_examples/36fae9dfc0b815546b45745bac054b67.asciidoc index b736725fb..782d9bf5e 100644 --- a/docs/doc_examples/9a8995fd31351045d99c78e40444c8ea.asciidoc +++ b/docs/doc_examples/36fae9dfc0b815546b45745bac054b67.asciidoc @@ -4,16 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre' - } - } - } - } -}) -console.log(response) + index: "my-index-000001", + query: { + match: { + model_number: "HG537PU", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/370b297ed3433577adf53e64f572d89d.asciidoc b/docs/doc_examples/370b297ed3433577adf53e64f572d89d.asciidoc new file mode 100644 index 000000000..4e76cc905 --- /dev/null +++ b/docs/doc_examples/370b297ed3433577adf53e64f572d89d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_connector/_sync_job/my-connector-sync-job-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/371962cf63e65c10026177c6a1bad0b6.asciidoc b/docs/doc_examples/371962cf63e65c10026177c6a1bad0b6.asciidoc new file mode 100644 index 000000000..633073dea --- /dev/null +++ b/docs/doc_examples/371962cf63e65c10026177c6a1bad0b6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.start(); +console.log(response); +---- diff --git a/docs/doc_examples/3722cb3705b6bc7f486969deace3dd83.asciidoc b/docs/doc_examples/3722cb3705b6bc7f486969deace3dd83.asciidoc deleted file mode 100644 index 4b90869e9..000000000 --- a/docs/doc_examples/3722cb3705b6bc7f486969deace3dd83.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - type_count: { - value_count: { - script: { - source: "doc['type'].value" - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc b/docs/doc_examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc new file mode 100644 index 000000000..98875ead3 --- /dev/null +++ b/docs/doc_examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + actors: { + terms: { + field: "actors", + size: 10, + collect_mode: "breadth_first", + }, + aggs: { + costars: { + terms: { + field: "actors", + size: 5, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc b/docs/doc_examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc new file mode 100644 index 000000000..7e3581418 --- /dev/null +++ b/docs/doc_examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + scroll: "1m", + slice: { + id: 0, + max: 2, + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + scroll: "1m", + slice: { + id: 1, + max: 2, + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3759ca688c4bd3c838780a9aad63258b.asciidoc b/docs/doc_examples/3759ca688c4bd3c838780a9aad63258b.asciidoc new file mode 100644 index 000000000..1b4c9b4a6 --- /dev/null +++ b/docs/doc_examples/3759ca688c4bd3c838780a9aad63258b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc b/docs/doc_examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc new file mode 100644 index 000000000..4da50d7f2 --- /dev/null +++ b/docs/doc_examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_grid: { + location: { + geohex: "811fbffffffffff", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc b/docs/doc_examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc new file mode 100644 index 000000000..201aa0674 --- /dev/null +++ b/docs/doc_examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + gc: "[2021-04-27T16:16:34.699+0000][82460][gc,heap,exit] class space used 266K, capacity 384K, committed 384K, reserved 1048576K", + }, + { + index: {}, + }, + { + gc: "[2021-03-24T20:27:24.184+0000][90239][gc,heap,exit] class space used 15255K, capacity 16726K, committed 16844K, reserved 1048576K", + }, + { + index: {}, + }, + { + gc: "[2021-03-24T20:27:24.184+0000][90239][gc,heap,exit] Metaspace used 115409K, capacity 119541K, committed 120248K, reserved 1153024K", + }, + { + index: {}, + }, + { + gc: "[2021-04-19T15:03:21.735+0000][84408][gc,heap,exit] class space used 14503K, capacity 15894K, committed 15948K, reserved 1048576K", + }, + { + index: {}, + }, + { + gc: "[2021-04-19T15:03:21.735+0000][84408][gc,heap,exit] Metaspace used 107719K, capacity 111775K, committed 112724K, reserved 1146880K", + }, + { + index: {}, + }, + { + gc: "[2021-04-27T16:16:34.699+0000][82460][gc,heap,exit] class space used 266K, capacity 367K, committed 384K, reserved 1048576K", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/376ff4b2b5f657481af78a778aaab57f.asciidoc b/docs/doc_examples/376ff4b2b5f657481af78a778aaab57f.asciidoc new file mode 100644 index 000000000..8f9731735 --- /dev/null +++ b/docs/doc_examples/376ff4b2b5f657481af78a778aaab57f.asciidoc @@ -0,0 +1,74 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + nr: { + type: "integer", + }, + state: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + nr: 1, + state: "started", + }, + { + index: {}, + }, + { + nr: 2, + state: "stopped", + }, + { + index: {}, + }, + { + nr: 3, + state: "N/A", + }, + { + index: {}, + }, + { + nr: 4, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index", + filter_path: "aggregations", + aggs: { + my_top_metrics: { + top_metrics: { + metrics: { + field: "state", + missing: "N/A", + }, + sort: { + nr: "desc", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/377af0ea9b19c113f224d8150890b41b.asciidoc b/docs/doc_examples/377af0ea9b19c113f224d8150890b41b.asciidoc new file mode 100644 index 000000000..9b5a9864f --- /dev/null +++ b/docs/doc_examples/377af0ea9b19c113f224d8150890b41b.asciidoc @@ -0,0 +1,74 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + filter: [ + { + term: { + "event.outcome": "failure", + }, + }, + { + range: { + "@timestamp": { + gte: "2021-02-01", + lt: "2021-02-04", + }, + }, + }, + { + term: { + "service.name": { + value: "frontend-node", + }, + }, + }, + ], + }, + }, + aggs: { + failure_p_value: { + significant_terms: { + field: "user_agent.version", + background_filter: { + bool: { + must_not: [ + { + term: { + "event.outcome": "failure", + }, + }, + ], + filter: [ + { + range: { + "@timestamp": { + gte: "2021-02-01", + lt: "2021-02-04", + }, + }, + }, + { + term: { + "service.name": { + value: "frontend-node", + }, + }, + }, + ], + }, + }, + p_value: { + background_is_superset: false, + normalize_above: 1000, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/378e55f78fa13578a1302bae8d479765.asciidoc b/docs/doc_examples/378e55f78fa13578a1302bae8d479765.asciidoc new file mode 100644 index 000000000..dc69a09f6 --- /dev/null +++ b/docs/doc_examples/378e55f78fa13578a1302bae8d479765.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + color: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37983daac3d9c8582583a507b3adb7f2.asciidoc b/docs/doc_examples/37983daac3d9c8582583a507b3adb7f2.asciidoc new file mode 100644 index 000000000..9eee60e5d --- /dev/null +++ b/docs/doc_examples/37983daac3d9c8582583a507b3adb7f2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.putNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", + type: "restart", + reason: "Demonstrating how the node shutdown API works", +}); +console.log(response); +---- diff --git a/docs/doc_examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc b/docs/doc_examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc new file mode 100644 index 000000000..523c79f6d --- /dev/null +++ b/docs/doc_examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggregations: { + forces: { + terms: { + field: "force", + }, + aggregations: { + significant_crime_types: { + significant_terms: { + field: "crime_type", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc b/docs/doc_examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc new file mode 100644 index 000000000..7d06f09f6 --- /dev/null +++ b/docs/doc_examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterB: { + mode: "proxy", + skip_unavailable: true, + server_name: "clusterb.es.region-b.gcp.elastic-cloud.com", + proxy_socket_connections: 18, + proxy_address: "clusterb.es.region-b.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterA: { + mode: "proxy", + skip_unavailable: true, + server_name: "clustera.es.region-a.gcp.elastic-cloud.com", + proxy_socket_connections: 18, + proxy_address: "clustera.es.region-a.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/37c73410bf13429279cbc61a413957d8.asciidoc b/docs/doc_examples/37c73410bf13429279cbc61a413957d8.asciidoc new file mode 100644 index 000000000..62a84cd43 --- /dev/null +++ b/docs/doc_examples/37c73410bf13429279cbc61a413957d8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + filter_path: "indices.shards.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/37eaab0630976d3dee90a52011342883.asciidoc b/docs/doc_examples/37eaab0630976d3dee90a52011342883.asciidoc new file mode 100644 index 000000000..722466b4d --- /dev/null +++ b/docs/doc_examples/37eaab0630976d3dee90a52011342883.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc b/docs/doc_examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc new file mode 100644 index 000000000..40d543a28 --- /dev/null +++ b/docs/doc_examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.postStartTrial({ + acknowledge: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc b/docs/doc_examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc new file mode 100644 index 000000000..456a47a30 --- /dev/null +++ b/docs/doc_examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.revertModelSnapshot({ + job_id: "low_request_rate", + snapshot_id: 1637092688, + delete_intervening_results: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc b/docs/doc_examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc new file mode 100644 index 000000000..4f60f6d2d --- /dev/null +++ b/docs/doc_examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "application-key-1", + metadata: { + application: "my-application", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/388d3eda4f792d3fce044777739217e6.asciidoc b/docs/doc_examples/388d3eda4f792d3fce044777739217e6.asciidoc new file mode 100644 index 000000000..5ec9ee4b3 --- /dev/null +++ b/docs/doc_examples/388d3eda4f792d3fce044777739217e6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "animal_classification", + evaluation: { + classification: { + actual_field: "animal_class", + predicted_field: "ml.animal_class_prediction", + metrics: { + multiclass_confusion_matrix: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc b/docs/doc_examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc new file mode 100644 index 000000000..4052bc3ed --- /dev/null +++ b/docs/doc_examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_near: { + clauses: [ + { + span_term: { + text: "quick brown", + }, + }, + { + span_field_masking: { + query: { + span_term: { + "text.stems": "fox", + }, + }, + field: "text", + }, + }, + ], + slop: 5, + in_order: false, + }, + }, + highlight: { + require_field_match: false, + fields: { + "*": {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc b/docs/doc_examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc new file mode 100644 index 000000000..95db11a5b --- /dev/null +++ b/docs/doc_examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "new-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/38b20fe981605e80a41517e9aa13134a.asciidoc b/docs/doc_examples/38b20fe981605e80a41517e9aa13134a.asciidoc new file mode 100644 index 000000000..0260866f4 --- /dev/null +++ b/docs/doc_examples/38b20fe981605e80a41517e9aa13134a.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + total_sales: { + sum: { + field: "price", + }, + }, + sales_bucket_filter: { + bucket_selector: { + buckets_path: { + totalSales: "total_sales", + }, + script: "params.totalSales > 200", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc b/docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc new file mode 100644 index 000000000..f9dca64fb --- /dev/null +++ b/docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "test-index", + operations: [ + { + update: { + _id: "1", + }, + }, + { + doc: { + infer_field: "updated inference field", + source_field: "updated source field", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/38eed000de433b540116928681c520d3.asciidoc b/docs/doc_examples/38eed000de433b540116928681c520d3.asciidoc new file mode 100644 index 000000000..4807c0f30 --- /dev/null +++ b/docs/doc_examples/38eed000de433b540116928681c520d3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.previewDatafeed({ + datafeed_id: "datafeed-high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/38f7739f750f1411bccf511a0abaaea3.asciidoc b/docs/doc_examples/38f7739f750f1411bccf511a0abaaea3.asciidoc new file mode 100644 index 000000000..671550520 --- /dev/null +++ b/docs/doc_examples/38f7739f750f1411bccf511a0abaaea3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + fields: "rating", +}); +console.log(response); +---- diff --git a/docs/doc_examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc b/docs/doc_examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc new file mode 100644 index 000000000..4a6e0dbd6 --- /dev/null +++ b/docs/doc_examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_users", + roles: ["user"], + rules: { + field: { + groups: "cn=users,dc=example,dc=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3924ee252581ebb96ac0e60046125ae8.asciidoc b/docs/doc_examples/3924ee252581ebb96ac0e60046125ae8.asciidoc new file mode 100644 index 000000000..a440d8210 --- /dev/null +++ b/docs/doc_examples/3924ee252581ebb96ac0e60046125ae8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3951d7fcd7f849fa278daf342872125a.asciidoc b/docs/doc_examples/3951d7fcd7f849fa278daf342872125a.asciidoc new file mode 100644 index 000000000..6357296cf --- /dev/null +++ b/docs/doc_examples/3951d7fcd7f849fa278daf342872125a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/39760996f94ad34aaceaa16a5cc97993.asciidoc b/docs/doc_examples/39760996f94ad34aaceaa16a5cc97993.asciidoc new file mode 100644 index 000000000..77a80d74b --- /dev/null +++ b/docs/doc_examples/39760996f94ad34aaceaa16a5cc97993.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.getNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", +}); +console.log(response); +---- diff --git a/docs/doc_examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc b/docs/doc_examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc new file mode 100644 index 000000000..38335e2c1 --- /dev/null +++ b/docs/doc_examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "datastream", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc b/docs/doc_examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc new file mode 100644 index 000000000..cb5ab8825 --- /dev/null +++ b/docs/doc_examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + analysis: { + analyzer: { + english_exact: { + tokenizer: "standard", + filter: ["lowercase"], + }, + }, + }, + }, + mappings: { + properties: { + body: { + type: "text", + analyzer: "english", + fields: { + exact: { + type: "text", + analyzer: "english_exact", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + body: "Ski resort", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "index", + id: 2, + document: { + body: "A pair of skis", + }, +}); +console.log(response2); + +const response3 = await client.indices.refresh({ + index: "index", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc b/docs/doc_examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc new file mode 100644 index 000000000..233d15c49 --- /dev/null +++ b/docs/doc_examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "ngram", + text: "Quick Fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/39a6a038c4b551022afe83de0523634e.asciidoc b/docs/doc_examples/39a6a038c4b551022afe83de0523634e.asciidoc deleted file mode 100644 index 0cc19bd7f..000000000 --- a/docs/doc_examples/39a6a038c4b551022afe83de0523634e.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - sale_date: { - date_histogram: { - field: 'date', - calendar_interval: 'year', - missing: '2000/01/01' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc b/docs/doc_examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc new file mode 100644 index 000000000..3ab16d3e4 --- /dev/null +++ b/docs/doc_examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + include_named_queries_score: "true", + query: { + bool: { + should: [ + { + match: { + "name.first": { + query: "shay", + _name: "first", + }, + }, + }, + { + match: { + "name.last": { + query: "banon", + _name: "last", + }, + }, + }, + ], + filter: { + terms: { + "name.last": ["banon", "kimchy"], + _name: "test", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc b/docs/doc_examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc new file mode 100644 index 000000000..2152c1f8e --- /dev/null +++ b/docs/doc_examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCalendarEvents({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc b/docs/doc_examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc new file mode 100644 index 000000000..ab9097f4d --- /dev/null +++ b/docs/doc_examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "my-watch", + trigger: { + schedule: { + cron: "0 0/1 * * * ?", + }, + }, + input: { + search: { + request: { + indices: ["logstash*"], + body: { + query: { + bool: { + must: { + match: { + response: 404, + }, + }, + filter: { + range: { + "@timestamp": { + from: "{{ctx.trigger.scheduled_time}}||-5m", + to: "{{ctx.trigger.triggered_time}}", + }, + }, + }, + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + email_admin: { + email: { + to: "admin@domain.host.com", + subject: "404 recently encountered", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a2953fd81d65118a776c87a81530e15.asciidoc b/docs/doc_examples/3a2953fd81d65118a776c87a81530e15.asciidoc new file mode 100644 index 000000000..cf4bd65a7 --- /dev/null +++ b/docs/doc_examples/3a2953fd81d65118a776c87a81530e15.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + order: "score", + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc b/docs/doc_examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc new file mode 100644 index 000000000..597e9e13d --- /dev/null +++ b/docs/doc_examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + mode: "standard", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc b/docs/doc_examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc new file mode 100644 index 000000000..46049d94d --- /dev/null +++ b/docs/doc_examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "quantized-image-index", + knn: { + field: "image-vector", + query_vector: [0.1, -2], + k: 15, + num_candidates: 100, + }, + fields: ["title"], + rescore: { + window_size: 10, + query: { + rescore_query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: + "cosineSimilarity(params.query_vector, 'image-vector') + 1.0", + params: { + query_vector: [0.1, -2], + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc b/docs/doc_examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc new file mode 100644 index 000000000..493323793 --- /dev/null +++ b/docs/doc_examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "you know for search", + }, + }, + collapse: { + field: "user.id", + }, + rescore: { + window_size: 50, + query: { + rescore_query: { + match_phrase: { + message: "you know for search", + }, + }, + query_weight: 0.3, + rescore_query_weight: 1.4, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a5f2e2313614ea9693545edee22ac43.asciidoc b/docs/doc_examples/3a5f2e2313614ea9693545edee22ac43.asciidoc new file mode 100644 index 000000000..1ed8c9d64 --- /dev/null +++ b/docs/doc_examples/3a5f2e2313614ea9693545edee22ac43.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteServiceToken({ + namespace: "elastic", + service: "fleet-server", + name: "token42", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc b/docs/doc_examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc new file mode 100644 index 000000000..d68abb0d9 --- /dev/null +++ b/docs/doc_examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + post_date: { + type: "date", + }, + user: { + type: "keyword", + }, + name: { + type: "keyword", + }, + age: { + type: "integer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a64ae799cc03fadbb802794730c23da.asciidoc b/docs/doc_examples/3a64ae799cc03fadbb802794730c23da.asciidoc new file mode 100644 index 000000000..4869ca8f6 --- /dev/null +++ b/docs/doc_examples/3a64ae799cc03fadbb802794730c23da.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example_points", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example_points", + id: 1, + refresh: "true", + document: { + name: "Wind & Wetter, Berlin, Germany", + location: [13.400544, 52.530286], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3a700f836d8d5da1b656a876554028aa.asciidoc b/docs/doc_examples/3a700f836d8d5da1b656a876554028aa.asciidoc deleted file mode 100644 index 509b1f360..000000000 --- a/docs/doc_examples/3a700f836d8d5da1b656a876554028aa.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.clearScroll({ - body: { - scroll_id: [ - 'DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==', - 'DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB' - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc b/docs/doc_examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc new file mode 100644 index 000000000..1362f5945 --- /dev/null +++ b/docs/doc_examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "cohere_embeddings", + processors: [ + { + inference: { + model_id: "cohere_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc b/docs/doc_examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc new file mode 100644 index 000000000..1c6af827d --- /dev/null +++ b/docs/doc_examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["my_custom_stop_words_filter"], + }, + }, + filter: { + my_custom_stop_words_filter: { + type: "stop", + ignore_case: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc b/docs/doc_examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc new file mode 100644 index 000000000..df7a51c73 --- /dev/null +++ b/docs/doc_examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["my_custom_word_delimiter_filter"], + }, + }, + filter: { + my_custom_word_delimiter_filter: { + type: "word_delimiter", + type_table: ["- => ALPHA"], + split_on_case_change: false, + split_on_numerics: false, + stem_english_possessive: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc b/docs/doc_examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc new file mode 100644 index 000000000..c8b4bd9b1 --- /dev/null +++ b/docs/doc_examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '{ "query": { "bool": { "filter": [ {{#year_scope}} { "range": { "@timestamp": { "gte": "now-1y/d", "lt": "now/d" } } }, {{/year_scope}} { "term": { "user.id": "{{user_id}}" }}]}}}', + params: { + year_scope: false, + user_id: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc b/docs/doc_examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc new file mode 100644 index 000000000..790356725 --- /dev/null +++ b/docs/doc_examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteJob({ + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc b/docs/doc_examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc new file mode 100644 index 000000000..423dd5ba8 --- /dev/null +++ b/docs/doc_examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc b/docs/doc_examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc new file mode 100644 index 000000000..de00013ae --- /dev/null +++ b/docs/doc_examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "serbian_example", + settings: { + analysis: { + filter: { + serbian_stop: { + type: "stop", + stopwords: "_serbian_", + }, + serbian_keywords: { + type: "keyword_marker", + keywords: ["пример"], + }, + serbian_stemmer: { + type: "stemmer", + language: "serbian", + }, + }, + analyzer: { + rebuilt_serbian: { + tokenizer: "standard", + filter: [ + "lowercase", + "serbian_stop", + "serbian_keywords", + "serbian_stemmer", + "serbian_normalization", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc b/docs/doc_examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc new file mode 100644 index 000000000..0db0a8821 --- /dev/null +++ b/docs/doc_examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + unindexed_longs: { + match_mapping_type: "long", + mapping: { + type: "long", + index: false, + }, + }, + }, + { + unindexed_doubles: { + match_mapping_type: "double", + mapping: { + type: "float", + index: false, + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc b/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc index f9bc52491..fbb6caf31 100644 --- a/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc +++ b/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc @@ -3,16 +3,15 @@ [source, js] ---- -const response0 = await client.get({ - index: 'metricbeat-2016.05.30-1', - id: '1' -}) -console.log(response0) +const response = await client.get({ + index: "metricbeat-2016.05.30-1", + id: 1, +}); +console.log(response); const response1 = await client.get({ - index: 'metricbeat-2016.05.31-1', - id: '1' -}) -console.log(response1) + index: "metricbeat-2016.05.31-1", + id: 1, +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/09dbd90c5e22ea4a17b4cf9aa72e08ae.asciidoc b/docs/doc_examples/3b05128cba6852e79a905bcdd5a8ebc0.asciidoc similarity index 64% rename from docs/doc_examples/09dbd90c5e22ea4a17b4cf9aa72e08ae.asciidoc rename to docs/doc_examples/3b05128cba6852e79a905bcdd5a8ebc0.asciidoc index ba374d199..55b52a825 100644 --- a/docs/doc_examples/09dbd90c5e22ea4a17b4cf9aa72e08ae.asciidoc +++ b/docs/doc_examples/3b05128cba6852e79a905bcdd5a8ebc0.asciidoc @@ -4,9 +4,9 @@ [source, js] ---- const response = await client.search({ - q: 'elasticsearch', - filter_path: 'took,hits.hits._id,hits.hits._score' -}) -console.log(response) + index: "my-index-000001", + size: "surprise_me", + error_trace: "true", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/3b162509ed14eda44a9681cd1108fa39.asciidoc b/docs/doc_examples/3b162509ed14eda44a9681cd1108fa39.asciidoc new file mode 100644 index 000000000..e6170cfad --- /dev/null +++ b/docs/doc_examples/3b162509ed14eda44a9681cd1108fa39.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "noble prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + gram_size: 3, + direct_generator: [ + { + field: "title.trigram", + suggest_mode: "always", + }, + ], + highlight: { + pre_tag: "", + post_tag: "", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc b/docs/doc_examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc new file mode 100644 index 000000000..1902fe423 --- /dev/null +++ b/docs/doc_examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getPrivileges({ + application: "myapp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc b/docs/doc_examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc new file mode 100644 index 000000000..d24d0f706 --- /dev/null +++ b/docs/doc_examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc @@ -0,0 +1,87 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example-index", + mappings: { + properties: { + text: { + type: "text", + }, + vector: { + type: "dense_vector", + dims: 1, + index: true, + similarity: "l2_norm", + index_options: { + type: "hnsw", + }, + }, + integer: { + type: "integer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example-index", + id: 1, + document: { + text: "rrf", + vector: [5], + integer: 1, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "example-index", + id: 2, + document: { + text: "rrf rrf", + vector: [4], + integer: 2, + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "example-index", + id: 3, + document: { + text: "rrf rrf rrf", + vector: [3], + integer: 1, + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "example-index", + id: 4, + document: { + text: "rrf rrf rrf rrf", + integer: 2, + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "example-index", + id: 5, + document: { + vector: [0], + integer: 1, + }, +}); +console.log(response5); + +const response6 = await client.indices.refresh({ + index: "example-index", +}); +console.log(response6); +---- diff --git a/docs/doc_examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc b/docs/doc_examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc new file mode 100644 index 000000000..3760c9f46 --- /dev/null +++ b/docs/doc_examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate(); +console.log(response); +---- diff --git a/docs/doc_examples/3b606631284877f9bca15051630995ad.asciidoc b/docs/doc_examples/3b606631284877f9bca15051630995ad.asciidoc new file mode 100644 index 000000000..e355ac371 --- /dev/null +++ b/docs/doc_examples/3b606631284877f9bca15051630995ad.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_test_scores", + query: { + term: { + grad_year: "2099", + }, + }, + sort: [ + { + _script: { + type: "number", + script: { + source: "doc['math_score'].value + doc['verbal_score'].value", + }, + order: "desc", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc b/docs/doc_examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc new file mode 100644 index 000000000..e27e26bb2 --- /dev/null +++ b/docs/doc_examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_security/cross_cluster/api_key/VuaCfGcBCdbkQm-e5aOx", + body: { + access: { + replication: [ + { + names: ["archive"], + }, + ], + }, + metadata: { + application: "replication", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc b/docs/doc_examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc new file mode 100644 index 000000000..2170911ae --- /dev/null +++ b/docs/doc_examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b9c54604535d97e8368d47148aecc6f.asciidoc b/docs/doc_examples/3b9c54604535d97e8368d47148aecc6f.asciidoc new file mode 100644 index 000000000..819faa5c7 --- /dev/null +++ b/docs/doc_examples/3b9c54604535d97e8368d47148aecc6f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateModelSnapshot({ + job_id: "it_ops_new_logs", + snapshot_id: 1491852978, + description: "Snapshot 1", + retain: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc b/docs/doc_examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc new file mode 100644 index 000000000..453fb5d54 --- /dev/null +++ b/docs/doc_examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putTemplate({ + name: "custom_monitoring", + index_patterns: [ + ".monitoring-beats-7-*", + ".monitoring-es-7-*", + ".monitoring-kibana-7-*", + ".monitoring-logstash-7-*", + ], + order: 1, + settings: { + number_of_shards: 5, + number_of_replicas: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc b/docs/doc_examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc new file mode 100644 index 000000000..93e5127ef --- /dev/null +++ b/docs/doc_examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, + script: { + source: 'ctx._source.tag = ctx._source.remove("flag")', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc b/docs/doc_examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc new file mode 100644 index 000000000..75cd3cb43 --- /dev/null +++ b/docs/doc_examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + message: { + type: "keyword", + ignore_above: 20, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + message: "Syntax error", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + message: "Syntax error with some long stacktrace", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + aggs: { + messages: { + terms: { + field: "message", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc b/docs/doc_examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc new file mode 100644 index 000000000..d1794a5b7 --- /dev/null +++ b/docs/doc_examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_double", + mappings: { + properties: { + field: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc b/docs/doc_examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc new file mode 100644 index 000000000..d31648cf2 --- /dev/null +++ b/docs/doc_examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.clone({ + repository: "my_repository", + snapshot: "source_snapshot", + target_snapshot: "target_snapshot", + indices: "index_a,index_b", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc b/docs/doc_examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc new file mode 100644 index 000000000..0a22cb911 --- /dev/null +++ b/docs/doc_examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index_1", + id: 1, + document: { + text: "Document in index 1", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index_2", + id: 2, + refresh: "true", + document: { + text: "Document in index 2", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "index_1,index_2", + query: { + terms: { + _index: ["index_1", "index_2"], + }, + }, + aggs: { + indices: { + terms: { + field: "_index", + size: 10, + }, + }, + }, + sort: [ + { + _index: { + order: "asc", + }, + }, + ], + script_fields: { + index_name: { + script: { + lang: "painless", + source: "doc['_index']", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/3c09ca91057216125ed0e3856a91ff95.asciidoc b/docs/doc_examples/3c09ca91057216125ed0e3856a91ff95.asciidoc new file mode 100644 index 000000000..1d1a318d3 --- /dev/null +++ b/docs/doc_examples/3c09ca91057216125ed0e3856a91ff95.asciidoc @@ -0,0 +1,149 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "datastream_template", + index_patterns: ["datastream*"], + data_stream: {}, + template: { + settings: { + index: { + mode: "time_series", + number_of_replicas: 0, + number_of_shards: 2, + }, + "index.lifecycle.name": "datastream_policy", + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + kubernetes: { + properties: { + container: { + properties: { + cpu: { + properties: { + usage: { + properties: { + core: { + properties: { + ns: { + type: "long", + }, + }, + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + nanocores: { + type: "long", + time_series_metric: "gauge", + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + }, + }, + memory: { + properties: { + available: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + majorpagefaults: { + type: "long", + }, + pagefaults: { + type: "long", + time_series_metric: "gauge", + }, + rss: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + usage: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + workingset: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + }, + }, + name: { + type: "keyword", + }, + start_time: { + type: "date", + }, + }, + }, + host: { + type: "keyword", + time_series_dimension: true, + }, + namespace: { + type: "keyword", + time_series_dimension: true, + }, + node: { + type: "keyword", + time_series_dimension: true, + }, + pod: { + type: "keyword", + time_series_dimension: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc b/docs/doc_examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc new file mode 100644 index 000000000..0a5371ecb --- /dev/null +++ b/docs/doc_examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "", + }, + ], + inference_config: { + question_answering: { + question: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc b/docs/doc_examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc new file mode 100644 index 000000000..e10137efc --- /dev/null +++ b/docs/doc_examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c5d5a5c34a62724942329658c688f5e.asciidoc b/docs/doc_examples/3c5d5a5c34a62724942329658c688f5e.asciidoc new file mode 100644 index 000000000..8c8e80a33 --- /dev/null +++ b/docs/doc_examples/3c5d5a5c34a62724942329658c688f5e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc b/docs/doc_examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc new file mode 100644 index 000000000..49abf03b9 --- /dev/null +++ b/docs/doc_examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations,my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "200km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc b/docs/doc_examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc new file mode 100644 index 000000000..5ee49acac --- /dev/null +++ b/docs/doc_examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my-index-000001", + target: "shrunk-my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c7621a81fa982b79f040a6d2611530e.asciidoc b/docs/doc_examples/3c7621a81fa982b79f040a6d2611530e.asciidoc new file mode 100644 index 000000000..b299375ef --- /dev/null +++ b/docs/doc_examples/3c7621a81fa982b79f040a6d2611530e.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "ct1", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "ct2", + template: { + settings: { + "index.number_of_replicas": 0, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.putIndexTemplate({ + name: "final-template", + index_patterns: ["my-index-*"], + composed_of: ["ct1", "ct2"], + priority: 5, +}); +console.log(response2); + +const response3 = await client.indices.simulateTemplate({ + name: "final-template", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/b214942b938e47f2c486e523546cb574.asciidoc b/docs/doc_examples/3cd2f7f9096a8e8180f27b6c30e71840.asciidoc similarity index 51% rename from docs/doc_examples/b214942b938e47f2c486e523546cb574.asciidoc rename to docs/doc_examples/3cd2f7f9096a8e8180f27b6c30e71840.asciidoc index 009fd0fcc..a6a7c4806 100644 --- a/docs/doc_examples/b214942b938e47f2c486e523546cb574.asciidoc +++ b/docs/doc_examples/3cd2f7f9096a8e8180f27b6c30e71840.asciidoc @@ -4,26 +4,26 @@ [source, js] ---- const response = await client.search({ - index: 'my_index', - body: { - query: { - bool: { - must: [ + index: "logs", + size: 0, + aggs: { + messages: { + filters: { + filters: [ { match: { - 'user.first': 'Alice' - } + body: "error", + }, }, { match: { - 'user.last': 'Smith' - } - } - ] - } - } - } -}) -console.log(response) + body: "warning", + }, + }, + ], + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc b/docs/doc_examples/3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc deleted file mode 100644 index dd63318fe..000000000 --- a/docs/doc_examples/3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - bool: { - should: [ - { - multi_match: { - query: 'Will Smith', - type: 'cross_fields', - fields: [ - 'first', - 'last' - ], - minimum_should_match: '50%' - } - }, - { - multi_match: { - query: 'Will Smith', - type: 'cross_fields', - fields: [ - '*.edge' - ] - } - } - ] - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/3cd93a48906069709b76420c66930c01.asciidoc b/docs/doc_examples/3cd93a48906069709b76420c66930c01.asciidoc new file mode 100644 index 000000000..ab7d04c7b --- /dev/null +++ b/docs/doc_examples/3cd93a48906069709b76420c66930c01.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "my_stemmer"], + }, + }, + filter: { + my_stemmer: { + type: "stemmer", + language: "light_german", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc b/docs/doc_examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc new file mode 100644 index 000000000..c538c58f4 --- /dev/null +++ b/docs/doc_examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + content: "kimchy", + }, + }, + highlight: { + fields: { + content: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc b/docs/doc_examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc new file mode 100644 index 000000000..4d3535af2 --- /dev/null +++ b/docs/doc_examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: { + boost: 1.2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc b/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc index d770d86e8..db267ffc5 100644 --- a/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc +++ b/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc @@ -3,7 +3,6 @@ [source, js] ---- -const response = await client.info() -console.log(response) +const response = await client.info(); +console.log(response); ---- - diff --git a/docs/doc_examples/3d316bddd8503a6cc10566630a4155d3.asciidoc b/docs/doc_examples/3d316bddd8503a6cc10566630a4155d3.asciidoc new file mode 100644 index 000000000..44e3eb04e --- /dev/null +++ b/docs/doc_examples/3d316bddd8503a6cc10566630a4155d3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_watcher/settings", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d48d1ba49f680aac32177d653944623.asciidoc b/docs/doc_examples/3d48d1ba49f680aac32177d653944623.asciidoc new file mode 100644 index 000000000..c3efc69f2 --- /dev/null +++ b/docs/doc_examples/3d48d1ba49f680aac32177d653944623.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "", + action_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc b/docs/doc_examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc new file mode 100644 index 000000000..df578de4c --- /dev/null +++ b/docs/doc_examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + rename: { + description: "Rename 'provider' to 'cloud.provider'", + field: "provider", + target_field: "cloud.provider", + ignore_failure: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc b/docs/doc_examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc new file mode 100644 index 000000000..1984895ae --- /dev/null +++ b/docs/doc_examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.usage(); +console.log(response); +---- diff --git a/docs/doc_examples/3d82257167e8a14a7f474848b32da128.asciidoc b/docs/doc_examples/3d82257167e8a14a7f474848b32da128.asciidoc new file mode 100644 index 000000000..c350ee339 --- /dev/null +++ b/docs/doc_examples/3d82257167e8a14a7f474848b32da128.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set_bar", + description: "sets the value of bar from the field foo", + processors: [ + { + set: { + field: "bar", + copy_from: "foo", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.ingest.simulate({ + id: "set_bar", + docs: [ + { + _source: { + foo: ["foo1", "foo2"], + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc b/docs/doc_examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc new file mode 100644 index 000000000..bc5509871 --- /dev/null +++ b/docs/doc_examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.exclude._name": "target-node-name", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5e5873783246c7b1c01d8464fed72c4.asciidoc b/docs/doc_examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc similarity index 74% rename from docs/doc_examples/c5e5873783246c7b1c01d8464fed72c4.asciidoc rename to docs/doc_examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc index 2778add40..d0263a6ed 100644 --- a/docs/doc_examples/c5e5873783246c7b1c01d8464fed72c4.asciidoc +++ b/docs/doc_examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.delete({ - index: 'twitter', - id: '1' -}) -console.log(response) + index: "my-index-000001", + id: 1, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc b/docs/doc_examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc new file mode 100644 index 000000000..0ba906074 --- /dev/null +++ b/docs/doc_examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc b/docs/doc_examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc new file mode 100644 index 000000000..507bb9271 --- /dev/null +++ b/docs/doc_examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + role_descriptors: { + "role-a": { + cluster: ["all"], + indices: [ + { + names: ["index-a*"], + privileges: ["read"], + }, + ], + }, + }, + metadata: { + application: "my-application", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc b/docs/doc_examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc new file mode 100644 index 000000000..b200ad760 --- /dev/null +++ b/docs/doc_examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "test_index", + analyzer: "my_analyzer", + text: "missing bicycles", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc b/docs/doc_examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc new file mode 100644 index 000000000..c8090df78 --- /dev/null +++ b/docs/doc_examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + _meta: { + class: "MyApp::User", + version: { + min: "1.0", + max: "1.3", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc b/docs/doc_examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc new file mode 100644 index 000000000..efcd814b2 --- /dev/null +++ b/docs/doc_examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + no_match_size: 150, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc b/docs/doc_examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc new file mode 100644 index 000000000..0c54623e0 --- /dev/null +++ b/docs/doc_examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + doc: { + fullname: "John Doe", + text: "test test test", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5be23858b35043fcb7b50fe36b873e6e.asciidoc b/docs/doc_examples/3e4227250d49e81df48773f8ba803ea7.asciidoc similarity index 59% rename from docs/doc_examples/5be23858b35043fcb7b50fe36b873e6e.asciidoc rename to docs/doc_examples/3e4227250d49e81df48773f8ba803ea7.asciidoc index 9fc13cadc..1cc732f90 100644 --- a/docs/doc_examples/5be23858b35043fcb7b50fe36b873e6e.asciidoc +++ b/docs/doc_examples/3e4227250d49e81df48773f8ba803ea7.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.indices.putMapping({ - index: 'twitter', - body: { - properties: { - email: { - type: 'keyword' - } - } - } -}) -console.log(response) + index: "my-data-stream", + properties: { + message: { + type: "text", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/3e573bfabe00f8bfb8bb69aa5820768e.asciidoc b/docs/doc_examples/3e573bfabe00f8bfb8bb69aa5820768e.asciidoc deleted file mode 100644 index a7e633b13..000000000 --- a/docs/doc_examples/3e573bfabe00f8bfb8bb69aa5820768e.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.refresh() -console.log(response0) - -const response1 = await client.search({ - index: 'twitter', - size: '0', - filter_path: 'hits.total', - body: { - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc b/docs/doc_examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc new file mode 100644 index 000000000..d1a11e0fb --- /dev/null +++ b/docs/doc_examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "hello world", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8022e6a690344035b6472a43a9d122e0.asciidoc b/docs/doc_examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc similarity index 74% rename from docs/doc_examples/8022e6a690344035b6472a43a9d122e0.asciidoc rename to docs/doc_examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc index eb8025f4e..2d1bd3666 100644 --- a/docs/doc_examples/8022e6a690344035b6472a43a9d122e0.asciidoc +++ b/docs/doc_examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.search({ - index: '_all', - q: 'user:kimchy' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc b/docs/doc_examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc new file mode 100644 index 000000000..6b705d888 --- /dev/null +++ b/docs/doc_examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_users", + roles: ["user"], + rules: { + any: [ + { + field: { + dn: "cn=John Doe,cn=contractors,dc=example,dc=com", + }, + }, + { + field: { + groups: "cn=users,dc=example,dc=com", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc b/docs/doc_examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc new file mode 100644 index 000000000..b934334fb --- /dev/null +++ b/docs/doc_examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + query: { + type: "percolator", + }, + body: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "index", + alias: "queries", + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.index({ + index: "queries", + id: 1, + refresh: "true", + document: { + query: { + match: { + body: "quick brown fox", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc b/docs/doc_examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc new file mode 100644 index 000000000..136b2aead --- /dev/null +++ b/docs/doc_examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc @@ -0,0 +1,70 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase"], + }, + my_stop_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "english_stop"], + }, + }, + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + analyzer: "my_analyzer", + search_analyzer: "my_stop_analyzer", + search_quote_analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + title: "The Quick Brown Fox", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + title: "A Quick Brown Fox", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + query_string: { + query: '"the quick brown fox"', + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc b/docs/doc_examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc new file mode 100644 index 000000000..59b02055a --- /dev/null +++ b/docs/doc_examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + aggregations: { + zoom1: { + geohash_grid: { + field: "location", + precision: 8, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc b/docs/doc_examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc new file mode 100644 index 000000000..df4254b01 --- /dev/null +++ b/docs/doc_examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "200km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc b/docs/doc_examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc new file mode 100644 index 000000000..710fd7bb1 --- /dev/null +++ b/docs/doc_examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats({ + metric: "current_watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc b/docs/doc_examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc new file mode 100644 index 000000000..cca5fd082 --- /dev/null +++ b/docs/doc_examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geohex2shape", + description: "translate H3 cell to polygon with enriched fields", + processors: [ + { + geo_grid: { + description: + "Ingest H3 cells like '811fbffffffffff' and create polygons", + field: "geocell", + tile_type: "geohex", + target_format: "wkt", + target_field: "shape", + parent_field: "parent", + children_field: "children", + non_children_field: "nonChildren", + precision_field: "precision", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc new file mode 100644 index 000000000..be4d5355c --- /dev/null +++ b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQuery({}); +console.log(response); +---- diff --git a/docs/doc_examples/3f20459d358611793272f63dc596e889.asciidoc b/docs/doc_examples/3f20459d358611793272f63dc596e889.asciidoc new file mode 100644 index 000000000..c952456cb --- /dev/null +++ b/docs/doc_examples/3f20459d358611793272f63dc596e889.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + custom_all: "elasticsearch", + }, + }, + aggs: { + tags: { + significant_text: { + field: "custom_all", + source_fields: ["content", "title"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc b/docs/doc_examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc new file mode 100644 index 000000000..7b90f06cb --- /dev/null +++ b/docs/doc_examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "user_lookup", + processors: [ + { + enrich: { + description: "Add 'user' data based on 'email'", + policy_name: "users-policy", + field: "email", + target_field: "user", + max_matches: "1", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc b/docs/doc_examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc new file mode 100644 index 000000000..cf6730b6b --- /dev/null +++ b/docs/doc_examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + cold: { + actions: { + searchable_snapshot: { + snapshot_repository: "backing_repo", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f30310cc6d0adae6b0f61705624a695.asciidoc b/docs/doc_examples/3f30310cc6d0adae6b0f61705624a695.asciidoc new file mode 100644 index 000000000..3852f54b1 --- /dev/null +++ b/docs/doc_examples/3f30310cc6d0adae6b0f61705624a695.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "snapshot_2", + wait_for_completion: "true", + indices: "index_1,index_2", + ignore_unavailable: true, + include_global_state: false, + metadata: { + taken_by: "user123", + taken_because: "backup before upgrading", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc b/docs/doc_examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc new file mode 100644 index 000000000..5ba9a4ede --- /dev/null +++ b/docs/doc_examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc @@ -0,0 +1,62 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + my_parent: "my_child", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + number: 1, + my_join_field: "my_parent", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "test", + id: 2, + routing: 1, + refresh: "true", + document: { + number: 1, + my_join_field: { + name: "my_child", + parent: "1", + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "test", + query: { + has_child: { + type: "my_child", + query: { + match: { + number: 1, + }, + }, + inner_hits: {}, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/3f60a892bed18151b7baac6cc712576a.asciidoc b/docs/doc_examples/3f60a892bed18151b7baac6cc712576a.asciidoc new file mode 100644 index 000000000..09d1caed1 --- /dev/null +++ b/docs/doc_examples/3f60a892bed18151b7baac6cc712576a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["lowercase", "kstem"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc b/docs/doc_examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc new file mode 100644 index 000000000..0644c5101 --- /dev/null +++ b/docs/doc_examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + last: { + top_metrics: { + metrics: [ + { + field: "email", + }, + { + field: "customer_first_name.keyword", + }, + { + field: "customer_last_name.keyword", + }, + ], + sort: { + order_date: "desc", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f8dc309b63fa0437898107b0d964217.asciidoc b/docs/doc_examples/3f8dc309b63fa0437898107b0d964217.asciidoc new file mode 100644 index 000000000..8fe654c50 --- /dev/null +++ b/docs/doc_examples/3f8dc309b63fa0437898107b0d964217.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlJobs({ + h: "id,s,dpr,mb", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc b/docs/doc_examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc new file mode 100644 index 000000000..319fc75e4 --- /dev/null +++ b/docs/doc_examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + simple_query_string: { + fields: ["body.exact"], + query: "ski", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc b/docs/doc_examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc new file mode 100644 index 000000000..e7b6ae812 --- /dev/null +++ b/docs/doc_examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geoip", + description: "Add geoip info", + processors: [ + { + geoip: { + field: "ip", + target_field: "geo", + database_file: "GeoLite2-Country.mmdb", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "geoip", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc b/docs/doc_examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc new file mode 100644 index 000000000..5399ba1e1 --- /dev/null +++ b/docs/doc_examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "mistral-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "mistral_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3faf5e2873de340acfe0a617017db784.asciidoc b/docs/doc_examples/3faf5e2873de340acfe0a617017db784.asciidoc new file mode 100644 index 000000000..682d5836d --- /dev/null +++ b/docs/doc_examples/3faf5e2873de340acfe0a617017db784.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + query: "(content:this OR name:this) AND (content:that OR name:that)", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fb1289c80a354da66693bfb25d7b412.asciidoc b/docs/doc_examples/3fb1289c80a354da66693bfb25d7b412.asciidoc new file mode 100644 index 000000000..d79d59283 --- /dev/null +++ b/docs/doc_examples/3fb1289c80a354da66693bfb25d7b412.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-snapshots", + schedule: "0 30 2 * * ?", + name: "", + repository: "my_repository", + config: { + include_global_state: false, + indices: "*", + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc b/docs/doc_examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc new file mode 100644 index 000000000..46d564862 --- /dev/null +++ b/docs/doc_examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + timeout: "2s", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc b/docs/doc_examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc new file mode 100644 index 000000000..19244f327 --- /dev/null +++ b/docs/doc_examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + exists: { + field: "_ignored", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe4264ace04405989141c43aadfff81.asciidoc b/docs/doc_examples/3fe4264ace04405989141c43aadfff81.asciidoc new file mode 100644 index 000000000..f98271d62 --- /dev/null +++ b/docs/doc_examples/3fe4264ace04405989141c43aadfff81.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "cli_or_drivers_minimal", + cluster: ["cluster:monitor/main"], + indices: [ + { + names: ["test"], + privileges: ["read", "indices:admin/get"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc b/docs/doc_examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc new file mode 100644 index 000000000..c554ea9ec --- /dev/null +++ b/docs/doc_examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.verifyRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc b/docs/doc_examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc new file mode 100644 index 000000000..9636d7cba --- /dev/null +++ b/docs/doc_examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + _routing: { + required: true, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000002", + id: 1, + document: { + text: "No routing value provided", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc b/docs/doc_examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc new file mode 100644 index 000000000..b37e62340 --- /dev/null +++ b/docs/doc_examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + field: "_source.my-long-field", + value: 10, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc b/docs/doc_examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc new file mode 100644 index 000000000..64898ae51 --- /dev/null +++ b/docs/doc_examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "dictionary_decompounder", + word_list: ["Donau", "dampf", "meer", "schiff"], + }, + ], + text: "Donaudampfschiff", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ff634a50e2e4556bad7ea8553576992.asciidoc b/docs/doc_examples/3ff634a50e2e4556bad7ea8553576992.asciidoc new file mode 100644 index 000000000..48e3ffcda --- /dev/null +++ b/docs/doc_examples/3ff634a50e2e4556bad7ea8553576992.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "my_snow"], + }, + }, + filter: { + my_snow: { + type: "snowball", + language: "Lovins", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc b/docs/doc_examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc new file mode 100644 index 000000000..85740bdd2 --- /dev/null +++ b/docs/doc_examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc b/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc index 5c7696ec7..22dea0b19 100644 --- a/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc +++ b/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'source', - size: 100 - }, - dest: { - index: 'dest', - routing: '=cat' - } - } -}) -console.log(response) + source: { + index: "source", + size: 100, + }, + dest: { + index: "dest", + routing: "=cat", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/402092585940953420404c2884a47e59.asciidoc b/docs/doc_examples/402092585940953420404c2884a47e59.asciidoc new file mode 100644 index 000000000..3fb2ef01d --- /dev/null +++ b/docs/doc_examples/402092585940953420404c2884a47e59.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + aggregations: { + the_avg: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4029af36cb3f8202549017f7378803b4.asciidoc b/docs/doc_examples/4029af36cb3f8202549017f7378803b4.asciidoc new file mode 100644 index 000000000..8f8803735 --- /dev/null +++ b/docs/doc_examples/4029af36cb3f8202549017f7378803b4.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings(); +console.log(response); +---- diff --git a/docs/doc_examples/4053de806dfd9172167999ce098107c4.asciidoc b/docs/doc_examples/4053de806dfd9172167999ce098107c4.asciidoc new file mode 100644 index 000000000..5ee22552e --- /dev/null +++ b/docs/doc_examples/4053de806dfd9172167999ce098107c4.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + constant_score: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + boost: 1.2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc new file mode 100644 index 000000000..c63439d9c --- /dev/null +++ b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryGet({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/405ac843a9156d3cab374e199cac87fb.asciidoc b/docs/doc_examples/405ac843a9156d3cab374e199cac87fb.asciidoc new file mode 100644 index 000000000..dc52c27a2 --- /dev/null +++ b/docs/doc_examples/405ac843a9156d3cab374e199cac87fb.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_connector/_sync_job", + body: { + id: "connector-id", + job_type: "full", + trigger_method: "on_demand", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc b/docs/doc_examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc new file mode 100644 index 000000000..fd7235c82 --- /dev/null +++ b/docs/doc_examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getOverallBuckets({ + job_id: "job-*", + top_n: 2, + overall_score: 50, + start: 1403532000000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc b/docs/doc_examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc new file mode 100644 index 000000000..e8aad642f --- /dev/null +++ b/docs/doc_examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteScript({ + id: "calculate-score", +}); +console.log(response); +---- diff --git a/docs/doc_examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc b/docs/doc_examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc new file mode 100644 index 000000000..5edb2eea3 --- /dev/null +++ b/docs/doc_examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc @@ -0,0 +1,64 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_event_watch", + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + throttle_period: "15m", + actions: { + email_administrator: { + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + notify_pager: { + webhook: { + method: "POST", + host: "pager.service.domain", + port: 1234, + path: "/{{watch_id}}", + body: "Encountered {{ctx.payload.hits.total}} errors", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/408060f0c52300588a6dee774f4fd6a5.asciidoc b/docs/doc_examples/408060f0c52300588a6dee774f4fd6a5.asciidoc new file mode 100644 index 000000000..0b94860f6 --- /dev/null +++ b/docs/doc_examples/408060f0c52300588a6dee774f4fd6a5.asciidoc @@ -0,0 +1,533 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "datastream", + refresh: "true", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:49:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 91153, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 463314616, + }, + usage: { + bytes: 307007078, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 585236, + }, + rss: { + bytes: 102728, + }, + pagefaults: 120901, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:45:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 124501, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 982546514, + }, + usage: { + bytes: 360035574, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1339884, + }, + rss: { + bytes: 381174, + }, + pagefaults: 178473, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 38907, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 862723768, + }, + usage: { + bytes: 379572388, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 431227, + }, + rss: { + bytes: 386580, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 86706, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 103266017, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1724908, + }, + rss: { + bytes: 105431, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 150069, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 639054643, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1786511, + }, + rss: { + bytes: 189235, + }, + pagefaults: 138172, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 82260, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 854735585, + }, + usage: { + bytes: 309798052, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 924058, + }, + rss: { + bytes: 110838, + }, + pagefaults: 259073, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 153404, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 279586406, + }, + usage: { + bytes: 214904955, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1047265, + }, + rss: { + bytes: 91914, + }, + pagefaults: 302252, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:20Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 125613, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 822782853, + }, + usage: { + bytes: 100475044, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2109932, + }, + rss: { + bytes: 278446, + }, + pagefaults: 74843, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 100046, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 362826547, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1986724, + }, + rss: { + bytes: 402801, + }, + pagefaults: 296495, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:38:30Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 40018, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 1062428344, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2294743, + }, + rss: { + bytes: 340623, + }, + pagefaults: 224530, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc b/docs/doc_examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc new file mode 100644 index 000000000..d74f6fdc4 --- /dev/null +++ b/docs/doc_examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + field: "price", + operator: "gte", + value: 500, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc b/docs/doc_examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc new file mode 100644 index 000000000..9d9b27d2f --- /dev/null +++ b/docs/doc_examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + constant_score: { + filter: { + percolate: { + field: "query", + document: { + message: "A new bonsai tree in the office", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc b/docs/doc_examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc new file mode 100644 index 000000000..739a8c008 --- /dev/null +++ b/docs/doc_examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + human: "true", + filter_path: "indices.mappings.total_deduplicated_mapping_size*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc b/docs/doc_examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc new file mode 100644 index 000000000..c40e72a18 --- /dev/null +++ b/docs/doc_examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mv", + mappings: { + properties: { + b: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: [1, 1], + }, + ], +}); +console.log(response1); + +const response2 = await client.esql.query({ + query: "FROM mv | EVAL b=TO_STRING(b) | LIMIT 2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/40d88d4f53343ef663c89ba488ab8001.asciidoc b/docs/doc_examples/40d88d4f53343ef663c89ba488ab8001.asciidoc new file mode 100644 index 000000000..3cc09a3de --- /dev/null +++ b/docs/doc_examples/40d88d4f53343ef663c89ba488ab8001.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "envelope", + coordinates: [ + [1000, 100], + [1001, 100], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc b/docs/doc_examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc new file mode 100644 index 000000000..3cf2017ee --- /dev/null +++ b/docs/doc_examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_bool_prefix: { + message: { + query: "quick brown f", + analyzer: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc b/docs/doc_examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc new file mode 100644 index 000000000..b1664b187 --- /dev/null +++ b/docs/doc_examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + human: "true", + filter_path: + "nodes.*.name,nodes.*.indices.mappings.total_estimated_overhead*,nodes.*.jvm.mem.heap_max*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/138ccd89f72aa7502dd9578403dcc589.asciidoc b/docs/doc_examples/4113c57384aa37c58d11579e20c00760.asciidoc similarity index 68% rename from docs/doc_examples/138ccd89f72aa7502dd9578403dcc589.asciidoc rename to docs/doc_examples/4113c57384aa37c58d11579e20c00760.asciidoc index d6812ed4b..349bd2277 100644 --- a/docs/doc_examples/138ccd89f72aa7502dd9578403dcc589.asciidoc +++ b/docs/doc_examples/4113c57384aa37c58d11579e20c00760.asciidoc @@ -4,10 +4,9 @@ [source, js] ---- const response = await client.get({ - index: 'twitter', - id: '0', - _source: 'false' -}) -console.log(response) + index: "my-index-000001", + id: 0, + _source: "false", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc b/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc new file mode 100644 index 000000000..10c2ec536 --- /dev/null +++ b/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_pipeline", + body: { + pipeline: { + extract_binary_content: true, + name: "my-connector-pipeline", + reduce_whitespace: true, + run_ml_inference: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc b/docs/doc_examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc new file mode 100644 index 000000000..3d9e94a58 --- /dev/null +++ b/docs/doc_examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/412f8238ab5182678f1d8f6383031b11.asciidoc b/docs/doc_examples/412f8238ab5182678f1d8f6383031b11.asciidoc new file mode 100644 index 000000000..242c7984d --- /dev/null +++ b/docs/doc_examples/412f8238ab5182678f1d8f6383031b11.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + index: "my-data-stream", + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc b/docs/doc_examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc new file mode 100644 index 000000000..0d0213521 --- /dev/null +++ b/docs/doc_examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "http.client.ip": { + type: "ip", + script: + '\n String clientip=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{status} %{size}\').extract(doc["message"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc b/docs/doc_examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc new file mode 100644 index 000000000..687bf0527 --- /dev/null +++ b/docs/doc_examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "circles", + id: 2, + pipeline: "polygonize_circles", + document: { + circle: { + type: "circle", + radius: "40m", + coordinates: [30, 10], + }, + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "circles", + id: 2, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/416a3ba11232d3c078c1c31340cf356f.asciidoc b/docs/doc_examples/416a3ba11232d3c078c1c31340cf356f.asciidoc new file mode 100644 index 000000000..eb2c98ce7 --- /dev/null +++ b/docs/doc_examples/416a3ba11232d3c078c1c31340cf356f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + tags_schema: "styled", + fields: { + comment: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc b/docs/doc_examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc new file mode 100644 index 000000000..f6aaab8ed --- /dev/null +++ b/docs/doc_examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/58df61acbfb15b8ef0aaa18b81ae98a6.asciidoc b/docs/doc_examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc similarity index 61% rename from docs/doc_examples/58df61acbfb15b8ef0aaa18b81ae98a6.asciidoc rename to docs/doc_examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc index b4d71e244..401ace1c2 100644 --- a/docs/doc_examples/58df61acbfb15b8ef0aaa18b81ae98a6.asciidoc +++ b/docs/doc_examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc @@ -4,12 +4,12 @@ [source, js] ---- const response = await client.update({ - index: 'test', - id: '1', - body: { - script: "ctx._source.remove('new_field')" - } -}) -console.log(response) + index: "test", + id: 1, + doc: { + name: "new_name", + }, + detect_noop: false, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc b/docs/doc_examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc new file mode 100644 index 000000000..b1a830762 --- /dev/null +++ b/docs/doc_examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-app", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + source: { + query: { + query_string: { + query: "{{query_string}}", + default_field: "{{default_field}}", + }, + }, + }, + params: { + query_string: "*", + default_field: "*", + }, + }, + dictionary: { + properties: { + query_string: { + type: "string", + }, + default_field: { + type: "string", + enum: ["title", "description"], + }, + additionalProperties: false, + }, + required: ["query_string"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4207219a892339e8f3abe0df8723dd27.asciidoc b/docs/doc_examples/4207219a892339e8f3abe0df8723dd27.asciidoc new file mode 100644 index 000000000..210aca690 --- /dev/null +++ b/docs/doc_examples/4207219a892339e8f3abe0df8723dd27.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.metadata.administrator": "sysadmin@example.com", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc b/docs/doc_examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc new file mode 100644 index 000000000..4ae947494 --- /dev/null +++ b/docs/doc_examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateJob({ + job_id: "low_request_rate", + description: "An updated job", + detectors: { + detector_index: 0, + description: "An updated detector description", + }, + groups: ["kibana_sample_data", "kibana_sample_web_logs"], + model_plot_config: { + enabled: true, + }, + renormalization_window_days: 30, + background_persist_interval: "2h", + model_snapshot_retention_days: 7, + results_retention_days: 60, +}); +console.log(response); +---- diff --git a/docs/doc_examples/424fbf082cd4affb84439abfc916b597.asciidoc b/docs/doc_examples/424fbf082cd4affb84439abfc916b597.asciidoc new file mode 100644 index 000000000..e5488b55b --- /dev/null +++ b/docs/doc_examples/424fbf082cd4affb84439abfc916b597.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.downsample({ + index: "my-time-series-index", + target_index: "my-downsampled-time-series-index", + config: { + fixed_interval: "1d", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc b/docs/doc_examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc new file mode 100644 index 000000000..4987afa62 --- /dev/null +++ b/docs/doc_examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + v: "true", + s: "t,n", + h: "type,name,node_name,active,queue,rejected,completed", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc b/docs/doc_examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc new file mode 100644 index 000000000..27f37bc36 --- /dev/null +++ b/docs/doc_examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + field: "grade", + }, + weight: { + field: "weight", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc b/docs/doc_examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc new file mode 100644 index 000000000..c45e9cde5 --- /dev/null +++ b/docs/doc_examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", + lazy: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc b/docs/doc_examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc new file mode 100644 index 000000000..25619920d --- /dev/null +++ b/docs/doc_examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "postal_codes", + id: 1, + refresh: "wait_for", + document: { + location: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + postal_code: "96598", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/42d02087f1c8ab0452ef373079a76843.asciidoc b/docs/doc_examples/42d02087f1c8ab0452ef373079a76843.asciidoc new file mode 100644 index 000000000..5290c3b81 --- /dev/null +++ b/docs/doc_examples/42d02087f1c8ab0452ef373079a76843.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "stop", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/42deb4fe32afbe0f94185e256a79c447.asciidoc b/docs/doc_examples/42deb4fe32afbe0f94185e256a79c447.asciidoc new file mode 100644 index 000000000..5c7b26d50 --- /dev/null +++ b/docs/doc_examples/42deb4fe32afbe0f94185e256a79c447.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "stop_example", + settings: { + analysis: { + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + }, + analyzer: { + rebuilt_stop: { + tokenizer: "lowercase", + filter: ["english_stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc b/docs/doc_examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc new file mode 100644 index 000000000..141b7eb25 --- /dev/null +++ b/docs/doc_examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + { + add: { + index: "logs-my_app-default", + alias: "logs", + is_write_index: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc b/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc new file mode 100644 index 000000000..3fe855a63 --- /dev/null +++ b/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_status", + body: { + status: "needs_configuration", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4310869b97d4224acaa6d66b1e196048.asciidoc b/docs/doc_examples/4310869b97d4224acaa6d66b1e196048.asciidoc new file mode 100644 index 000000000..6eb4a744d --- /dev/null +++ b/docs/doc_examples/4310869b97d4224acaa6d66b1e196048.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "content_embedding", + inference_id: "my-elser-endpoint", + query: "How to avoid muscle soreness after running?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4323f6d224847eccdce59c23e33fda0a.asciidoc b/docs/doc_examples/4323f6d224847eccdce59c23e33fda0a.asciidoc new file mode 100644 index 000000000..e5c3d639c --- /dev/null +++ b/docs/doc_examples/4323f6d224847eccdce59c23e33fda0a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_bigram_example", + settings: { + analysis: { + analyzer: { + standard_cjk_bigram: { + tokenizer: "standard", + filter: ["cjk_bigram"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc b/docs/doc_examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc new file mode 100644 index 000000000..a7ccd0173 --- /dev/null +++ b/docs/doc_examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "my-index-2099.05.06-000001", + alias: "my-alias", + search_routing: "1", + index_routing: "2", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc b/docs/doc_examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc new file mode 100644 index 000000000..318e76153 --- /dev/null +++ b/docs/doc_examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.clearScroll({ + scroll_id: [ + "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", + "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB", + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc b/docs/doc_examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc new file mode 100644 index 000000000..30012027c --- /dev/null +++ b/docs/doc_examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Polygon", + coordinates: [ + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + [100, 0], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc b/docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc new file mode 100644 index 000000000..d1a2f84de --- /dev/null +++ b/docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: "ctx._source.counter += params.count", + lang: "painless", + params: { + count: 4, + }, + }, + upsert: { + counter: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc b/docs/doc_examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc new file mode 100644 index 000000000..b76a70ba1 --- /dev/null +++ b/docs/doc_examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.security.transport.filter.allow": "172.16.0.0/24", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43af86de5e49aa06070092fffc138208.asciidoc b/docs/doc_examples/43af86de5e49aa06070092fffc138208.asciidoc deleted file mode 100644 index 85af5a8ba..000000000 --- a/docs/doc_examples/43af86de5e49aa06070092fffc138208.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - bool: { - must_not: { - exists: { - field: 'user' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/43e86fbaeed068dcc981214338559b5a.asciidoc b/docs/doc_examples/43e86fbaeed068dcc981214338559b5a.asciidoc new file mode 100644 index 000000000..e4327570f --- /dev/null +++ b/docs/doc_examples/43e86fbaeed068dcc981214338559b5a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster({ + name: "my-index-*,cluster*:my-index-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc b/docs/doc_examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc new file mode 100644 index 000000000..e3d7b4c0a --- /dev/null +++ b/docs/doc_examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "networks_lookup", + document: { + ip: "10.100.34.1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc b/docs/doc_examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc new file mode 100644 index 000000000..c8b3ac2eb --- /dev/null +++ b/docs/doc_examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.usage(); +console.log(response); +---- diff --git a/docs/doc_examples/441be98c597698bb2809372abf086c3e.asciidoc b/docs/doc_examples/441be98c597698bb2809372abf086c3e.asciidoc new file mode 100644 index 000000000..07491e972 --- /dev/null +++ b/docs/doc_examples/441be98c597698bb2809372abf086c3e.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + histogram_titles: { + terms: { + field: "my_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3342c69b2c2303247217532956fcce85.asciidoc b/docs/doc_examples/441f330f6872f995769db1ce2b9627e2.asciidoc similarity index 63% rename from docs/doc_examples/3342c69b2c2303247217532956fcce85.asciidoc rename to docs/doc_examples/441f330f6872f995769db1ce2b9627e2.asciidoc index 4b9941a85..7c0193b54 100644 --- a/docs/doc_examples/3342c69b2c2303247217532956fcce85.asciidoc +++ b/docs/doc_examples/441f330f6872f995769db1ce2b9627e2.asciidoc @@ -4,14 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - exists: { - field: 'user' - } - } - } -}) -console.log(response) + stored_fields: [], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/44231f7cdd5c3a21025861cdef31e355.asciidoc b/docs/doc_examples/44231f7cdd5c3a21025861cdef31e355.asciidoc new file mode 100644 index 000000000..694e5ee8e --- /dev/null +++ b/docs/doc_examples/44231f7cdd5c3a21025861cdef31e355.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my-index", + target: "my-shrunken-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4427517dcd8ec9997541150cdc11a0de.asciidoc b/docs/doc_examples/4427517dcd8ec9997541150cdc11a0de.asciidoc new file mode 100644 index 000000000..c98321130 --- /dev/null +++ b/docs/doc_examples/4427517dcd8ec9997541150cdc11a0de.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.deleteRepository({ + name: "my-repo", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4435b654994b575ba181ea679871c78c.asciidoc b/docs/doc_examples/4435b654994b575ba181ea679871c78c.asciidoc new file mode 100644 index 000000000..54a3e4134 --- /dev/null +++ b/docs/doc_examples/4435b654994b575ba181ea679871c78c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/443dd902f64b3217505c9595839c3b2d.asciidoc b/docs/doc_examples/443dd902f64b3217505c9595839c3b2d.asciidoc new file mode 100644 index 000000000..eb32eb4b5 --- /dev/null +++ b/docs/doc_examples/443dd902f64b3217505c9595839c3b2d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + indices_boost: [ + { + "my-alias": 1.4, + }, + { + "my-index*": 1.3, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc b/docs/doc_examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc new file mode 100644 index 000000000..057b4dad6 --- /dev/null +++ b/docs/doc_examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-weather-sensor-index-template", + index_patterns: ["metrics-weather_sensors-*"], + data_stream: {}, + template: { + settings: { + "index.mode": "time_series", + "index.lifecycle.name": "my-lifecycle-policy", + }, + mappings: { + properties: { + sensor_id: { + type: "keyword", + time_series_dimension: true, + }, + location: { + type: "keyword", + time_series_dimension: true, + }, + temperature: { + type: "half_float", + time_series_metric: "gauge", + }, + humidity: { + type: "half_float", + time_series_metric: "gauge", + }, + "@timestamp": { + type: "date", + }, + }, + }, + }, + priority: 500, + _meta: { + description: "Template for my weather sensor data", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/443f0e8fbba83777b2df624879d188d5.asciidoc b/docs/doc_examples/443f0e8fbba83777b2df624879d188d5.asciidoc new file mode 100644 index 000000000..30dfcbd4f --- /dev/null +++ b/docs/doc_examples/443f0e8fbba83777b2df624879d188d5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "azure-openai-embeddings", + pipeline: "azure_openai_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc b/docs/doc_examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc new file mode 100644 index 000000000..c582b4fd4 --- /dev/null +++ b/docs/doc_examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "http.responses": "304", + }, + }, + fields: ["http.client_ip", "timestamp", "http.verb"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc b/docs/doc_examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc new file mode 100644 index 000000000..706cf1e73 --- /dev/null +++ b/docs/doc_examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["elision"], + text: "j’examine près du wharf", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc b/docs/doc_examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc new file mode 100644 index 000000000..11cb7a2ee --- /dev/null +++ b/docs/doc_examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.awareness.attributes": "rack_id", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44939997b0f2601f82a93585a879f65a.asciidoc b/docs/doc_examples/44939997b0f2601f82a93585a879f65a.asciidoc new file mode 100644 index 000000000..71cbc6b0b --- /dev/null +++ b/docs/doc_examples/44939997b0f2601f82a93585a879f65a.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "simple_pattern_split", + pattern: "_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "an_underscored_phrase", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc b/docs/doc_examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc new file mode 100644 index 000000000..36dcdadb5 --- /dev/null +++ b/docs/doc_examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.requests.cache.enable": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc b/docs/doc_examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc new file mode 100644 index 000000000..f6a9a9d5c --- /dev/null +++ b/docs/doc_examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + profile: true, + query: { + match: { + message: "GET /search", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44bca3f17d403517af3616754dc795bb.asciidoc b/docs/doc_examples/44bca3f17d403517af3616754dc795bb.asciidoc new file mode 100644 index 000000000..16f58b881 --- /dev/null +++ b/docs/doc_examples/44bca3f17d403517af3616754dc795bb.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.explain({ + index: "my-index-000001", + id: 0, + query: { + script_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script: { + source: + "\n long count = doc['count'].value;\n double normalizedCount = count / 10;\n if (explanation != null) {\n explanation.set('normalized count = count / 10 = ' + count + ' / 10 = ' + normalizedCount);\n }\n return normalizedCount;\n ", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fd69fb0538e4f36ac69a8b8f8bf5ae8.asciidoc b/docs/doc_examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc similarity index 50% rename from docs/doc_examples/2fd69fb0538e4f36ac69a8b8f8bf5ae8.asciidoc rename to docs/doc_examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc index ad0dd15c6..19c75356f 100644 --- a/docs/doc_examples/2fd69fb0538e4f36ac69a8b8f8bf5ae8.asciidoc +++ b/docs/doc_examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc @@ -4,19 +4,16 @@ [source, js] ---- const response = await client.updateByQuery({ - index: 'twitter', - body: { - script: { - source: 'ctx._source.likes++', - lang: 'painless' + index: "my-index-000001", + script: { + source: "ctx._source.count++", + lang: "painless", + }, + query: { + term: { + "user.id": "kimchy", }, - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/44db41b8465af951e366da97ade63bc1.asciidoc b/docs/doc_examples/44db41b8465af951e366da97ade63bc1.asciidoc new file mode 100644 index 000000000..7dbba3532 --- /dev/null +++ b/docs/doc_examples/44db41b8465af951e366da97ade63bc1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.reloadSearchAnalyzers({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc b/docs/doc_examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc new file mode 100644 index 000000000..3236cc23f --- /dev/null +++ b/docs/doc_examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + type_and_promoted: { + type: "keyword", + script: "emit(doc['type'].value + ' ' + doc['promoted'].value)", + }, + }, + aggs: { + type_promoted_count: { + cardinality: { + field: "type_and_promoted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc b/docs/doc_examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc new file mode 100644 index 000000000..22116d55c --- /dev/null +++ b/docs/doc_examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_double", + mappings: { + properties: { + field: { + type: "double", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/451b441c3311103d0d2bdbab771b26d2.asciidoc b/docs/doc_examples/451b441c3311103d0d2bdbab771b26d2.asciidoc new file mode 100644 index 000000000..7d5446de3 --- /dev/null +++ b/docs/doc_examples/451b441c3311103d0d2bdbab771b26d2.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "match": {\n {{=( )=}}\n "message": "(query_string)"\n (={{ }}=)\n }\n }\n }\n ', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc b/docs/doc_examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc new file mode 100644 index 000000000..9e46b4e15 --- /dev/null +++ b/docs/doc_examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + lifecycle: { + data_retention: "7d", + }, + }, + _meta: { + description: "Template with data stream lifecycle", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4527d9bb12cf738111a188af235d5d4c.asciidoc b/docs/doc_examples/4527d9bb12cf738111a188af235d5d4c.asciidoc new file mode 100644 index 000000000..16cc82185 --- /dev/null +++ b/docs/doc_examples/4527d9bb12cf738111a188af235d5d4c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + runtime_mappings: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ", + }, + }, + query: { + match: { + "http.clientip": "40.135.0.0", + }, + }, + fields: ["http.clientip"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/45499ed1824d1d7cb59972580d2344cb.asciidoc b/docs/doc_examples/45499ed1824d1d7cb59972580d2344cb.asciidoc new file mode 100644 index 000000000..d81463bae --- /dev/null +++ b/docs/doc_examples/45499ed1824d1d7cb59972580d2344cb.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + range: { + my_counter: { + gte: "9223372036854775808", + lte: "18446744073709551615", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc b/docs/doc_examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc new file mode 100644 index 000000000..ead1639ea --- /dev/null +++ b/docs/doc_examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + total_latency: { + sum: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/4553e0acb6336687d61eaecc73f517b7.asciidoc b/docs/doc_examples/4553e0acb6336687d61eaecc73f517b7.asciidoc new file mode 100644 index 000000000..803f24081 --- /dev/null +++ b/docs/doc_examples/4553e0acb6336687d61eaecc73f517b7.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + char_filter: ["my_mappings_char_filter"], + }, + }, + char_filter: { + my_mappings_char_filter: { + type: "mapping", + mappings: [":) => _happy_", ":( => _sad_"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc b/docs/doc_examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc new file mode 100644 index 000000000..892f6073f --- /dev/null +++ b/docs/doc_examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test_index", + query: { + percolate: { + field: "query", + document: { + body: "Bycicles are missing", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc b/docs/doc_examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc new file mode 100644 index 000000000..1773a1bdc --- /dev/null +++ b/docs/doc_examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.snapshots({ + repository: "repo1", + v: "true", + s: "id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc b/docs/doc_examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc new file mode 100644 index 000000000..f992abc2d --- /dev/null +++ b/docs/doc_examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "edge_ngram", + min_gram: 2, + max_gram: 10, + token_chars: ["letter", "digit"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "2 Quick Foxes.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/45c6e54a9c9e08623af96752b4bde346.asciidoc b/docs/doc_examples/45c6e54a9c9e08623af96752b4bde346.asciidoc new file mode 100644 index 000000000..2ab19e442 --- /dev/null +++ b/docs/doc_examples/45c6e54a9c9e08623af96752b4bde346.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": "POINT (-70 40)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc b/docs/doc_examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc new file mode 100644 index 000000000..b7156b497 --- /dev/null +++ b/docs/doc_examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46025fc47dfbfa410790df0dd6bdad8d.asciidoc b/docs/doc_examples/46025fc47dfbfa410790df0dd6bdad8d.asciidoc new file mode 100644 index 000000000..40e7af427 --- /dev/null +++ b/docs/doc_examples/46025fc47dfbfa410790df0dd6bdad8d.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))", + }, + }, + }, + aggs: { + day_of_week: { + terms: { + field: "day_of_week", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46064e81620162a23e75002a7eeb8b10.asciidoc b/docs/doc_examples/46064e81620162a23e75002a7eeb8b10.asciidoc new file mode 100644 index 000000000..3f6243b38 --- /dev/null +++ b/docs/doc_examples/46064e81620162a23e75002a7eeb8b10.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.moveToStep({ + index: "my-index-000001", + current_step: { + phase: "hot", + action: "complete", + name: "complete", + }, + next_step: { + phase: "warm", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46103fee3cd5f53dc75123def82d52ad.asciidoc b/docs/doc_examples/46103fee3cd5f53dc75123def82d52ad.asciidoc new file mode 100644 index 000000000..9830c6bf9 --- /dev/null +++ b/docs/doc_examples/46103fee3cd5f53dc75123def82d52ad.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "index.refresh_interval": "30s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4646764bf09911fee7d58630c72d3137.asciidoc b/docs/doc_examples/4646764bf09911fee7d58630c72d3137.asciidoc deleted file mode 100644 index 89dd77fe0..000000000 --- a/docs/doc_examples/4646764bf09911fee7d58630c72d3137.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - script: { - id: 'my_script', - params: { - field: 'genre' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/464dffb6a6e24a860223d1c32b232f95.asciidoc b/docs/doc_examples/464dffb6a6e24a860223d1c32b232f95.asciidoc new file mode 100644 index 000000000..617c0423c --- /dev/null +++ b/docs/doc_examples/464dffb6a6e24a860223d1c32b232f95.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + filter: { + my_shingle_filter: { + type: "shingle", + min_shingle_size: 5, + max_shingle_size: 5, + output_unigrams: false, + }, + my_minhash_filter: { + type: "min_hash", + hash_count: 1, + bucket_count: 512, + hash_set_size: 1, + with_rotation: true, + }, + }, + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["my_shingle_filter", "my_minhash_filter"], + }, + }, + }, + }, + mappings: { + properties: { + fingerprint: { + type: "text", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc b/docs/doc_examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc new file mode 100644 index 000000000..c9f7520c1 --- /dev/null +++ b/docs/doc_examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + v: "true", + h: "id,name,active,rejected,completed", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4659f639d71a54df571260ee5798dbb3.asciidoc b/docs/doc_examples/4659f639d71a54df571260ee5798dbb3.asciidoc new file mode 100644 index 000000000..a5bcaeefe --- /dev/null +++ b/docs/doc_examples/4659f639d71a54df571260ee5798dbb3.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + aggregations: { + zoom1: { + geotile_grid: { + field: "location", + precision: 22, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc b/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc index f341798d4..5c8453711 100644 --- a/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc +++ b/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.indices.getTemplate({ - name: 'template_1', - filter_path: '*.version' -}) -console.log(response) + name: "template_1", + filter_path: "*.version", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc b/docs/doc_examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc new file mode 100644 index 000000000..399823687 --- /dev/null +++ b/docs/doc_examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node_upgrade", + size: 0, + runtime_mappings: { + "startup_time_before.adjusted": { + type: "long", + script: { + source: "emit(doc['startup_time_before'].value - params.adjustment)", + params: { + adjustment: 10, + }, + }, + }, + }, + aggs: { + startup_time_ttest: { + t_test: { + a: { + field: "startup_time_before.adjusted", + }, + b: { + field: "startup_time_after", + }, + type: "paired", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc b/docs/doc_examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc new file mode 100644 index 000000000..d2335b434 --- /dev/null +++ b/docs/doc_examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "pattern", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc b/docs/doc_examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc new file mode 100644 index 000000000..f62702896 --- /dev/null +++ b/docs/doc_examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params['tag'])) { ctx._source.tags.remove(ctx._source.tags.indexOf(params['tag'])) }", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc b/docs/doc_examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc new file mode 100644 index 000000000..f830179d0 --- /dev/null +++ b/docs/doc_examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "logs-generic-default", + remote_cluster: "clusterB", + leader_index_patterns: [".ds-logs-generic-default-20*"], + leader_index_exclusion_patterns: "*-replicated_from_clustera", + follow_index_pattern: "{{leader_index}}-replicated_from_clusterb", +}); +console.log(response); + +const response1 = await client.ccr.putAutoFollowPattern({ + name: "logs-generic-default", + remote_cluster: "clusterA", + leader_index_patterns: [".ds-logs-generic-default-20*"], + leader_index_exclusion_patterns: "*-replicated_from_clusterb", + follow_index_pattern: "{{leader_index}}-replicated_from_clustera", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc b/docs/doc_examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc new file mode 100644 index 000000000..56d7ce9fd --- /dev/null +++ b/docs/doc_examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.post({ + acknowledge: "true", + licenses: [ + { + uid: "893361dc-9749-4997-93cb-802e3d7fa4xx", + type: "basic", + issue_date_in_millis: 1411948800000, + expiry_date_in_millis: 1914278399999, + max_nodes: 1, + issued_to: "issuedTo", + issuer: "issuer", + signature: "xx", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/46c5c14f20118dcf519ff6ef21360209.asciidoc b/docs/doc_examples/46c5c14f20118dcf519ff6ef21360209.asciidoc new file mode 100644 index 000000000..af98ccc6c --- /dev/null +++ b/docs/doc_examples/46c5c14f20118dcf519ff6ef21360209.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "datastream_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_docs: 1, + }, + downsample: { + fixed_interval: "1h", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc b/docs/doc_examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc new file mode 100644 index 000000000..7b9aa0f1b --- /dev/null +++ b/docs/doc_examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response); + +const response1 = await client.indices.close({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response1); + +const response2 = await client.ccr.unfollow({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response2); + +const response3 = await client.indices.open({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/46ebd468c3f132a4978088964466c5cd.asciidoc b/docs/doc_examples/46ebd468c3f132a4978088964466c5cd.asciidoc new file mode 100644 index 000000000..731bf1009 --- /dev/null +++ b/docs/doc_examples/46ebd468c3f132a4978088964466c5cd.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "apostrophe_example", + settings: { + analysis: { + analyzer: { + standard_apostrophe: { + tokenizer: "standard", + filter: ["apostrophe"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc b/docs/doc_examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc new file mode 100644 index 000000000..6d000e16c --- /dev/null +++ b/docs/doc_examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title"], + query: "this that thus", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc b/docs/doc_examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc new file mode 100644 index 000000000..e294a84c8 --- /dev/null +++ b/docs/doc_examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "2j8UKw1bRO283PMwDugNNg:5326", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc b/docs/doc_examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc new file mode 100644 index 000000000..35aa486ba --- /dev/null +++ b/docs/doc_examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + migrate: { + enabled: false, + }, + allocate: { + include: { + rack_id: "one,two", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/47909e194d10743093f4a22c27a85925.asciidoc b/docs/doc_examples/47909e194d10743093f4a22c27a85925.asciidoc new file mode 100644 index 000000000..2a78ec314 --- /dev/null +++ b/docs/doc_examples/47909e194d10743093f4a22c27a85925.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 10000, + query: { + match: { + "user.id": "elkbee", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, + sort: [ + { + "@timestamp": { + order: "asc", + format: "strict_date_optional_time_nanos", + numeric_type: "date_nanos", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/47bb632c6091ad0cd94bc660bdd309a5.asciidoc b/docs/doc_examples/47bb632c6091ad0cd94bc660bdd309a5.asciidoc deleted file mode 100644 index 7f7456e59..000000000 --- a/docs/doc_examples/47bb632c6091ad0cd94bc660bdd309a5.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - bool: { - must: [ - { - match: { - age: '40' - } - } - ], - must_not: [ - { - match: { - state: 'ID' - } - } - ] - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc b/docs/doc_examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc new file mode 100644 index 000000000..dd6d26ed1 --- /dev/null +++ b/docs/doc_examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "jacknich", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["admin", "other_role1"], + full_name: "Jack Nicholson", + email: "jacknich@example.com", + metadata: { + intelligence: 7, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/47fde7874e15a37242993fd69c62063b.asciidoc b/docs/doc_examples/47fde7874e15a37242993fd69c62063b.asciidoc new file mode 100644 index 000000000..4dba02a58 --- /dev/null +++ b/docs/doc_examples/47fde7874e15a37242993fd69c62063b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc b/docs/doc_examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc new file mode 100644 index 000000000..f3ae3f5f4 --- /dev/null +++ b/docs/doc_examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge(); +console.log(response); +---- diff --git a/docs/doc_examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc b/docs/doc_examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc new file mode 100644 index 000000000..492d8778a --- /dev/null +++ b/docs/doc_examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScript({ + id: "my-search-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4824a823a830a2a5d990eacfd783ac22.asciidoc b/docs/doc_examples/4824a823a830a2a5d990eacfd783ac22.asciidoc new file mode 100644 index 000000000..e74b54883 --- /dev/null +++ b/docs/doc_examples/4824a823a830a2a5d990eacfd783ac22.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + slice: { + id: 0, + max: 2, + }, + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.deleteByQuery({ + index: "my-index-000001", + slice: { + id: 1, + max: 2, + }, + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/48313f620c2871b6f4019b66be730109.asciidoc b/docs/doc_examples/48313f620c2871b6f4019b66be730109.asciidoc new file mode 100644 index 000000000..0d8ee0190 --- /dev/null +++ b/docs/doc_examples/48313f620c2871b6f4019b66be730109.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "shirts", + query: { + bool: { + filter: { + term: { + brand: "gucci", + }, + }, + }, + }, + aggs: { + colors: { + terms: { + field: "color", + }, + }, + color_red: { + filter: { + term: { + color: "red", + }, + }, + aggs: { + models: { + terms: { + field: "model", + }, + }, + }, + }, + }, + post_filter: { + term: { + color: "red", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/483d669ec0768bc4e275a568c6164704.asciidoc b/docs/doc_examples/483d669ec0768bc4e275a568c6164704.asciidoc new file mode 100644 index 000000000..44e105ea9 --- /dev/null +++ b/docs/doc_examples/483d669ec0768bc4e275a568c6164704.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc b/docs/doc_examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc new file mode 100644 index 000000000..ce94875dd --- /dev/null +++ b/docs/doc_examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "point", + coordinates: [-377.03653, 389.897676], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc b/docs/doc_examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc new file mode 100644 index 000000000..4d59de169 --- /dev/null +++ b/docs/doc_examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc @@ -0,0 +1,99 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + my_location: { + type: "geo_point", + }, + group: { + type: "keyword", + }, + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + my_location: { + lat: 52.373184, + lon: 4.889187, + }, + "@timestamp": "2023-01-02T09:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.370159, + lon: 4.885057, + }, + "@timestamp": "2023-01-02T10:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.369219, + lon: 4.901618, + }, + "@timestamp": "2023-01-02T13:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.374081, + lon: 4.91235, + }, + "@timestamp": "2023-01-02T16:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.371667, + lon: 4.914722, + }, + "@timestamp": "2023-01-03T12:00:00Z", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + line: { + geo_line: { + point: { + field: "my_location", + }, + sort: { + field: "@timestamp", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9166cf38427d5cde5d2ec12a2012b669.asciidoc b/docs/doc_examples/488f6df1df71972392b670ce557f7ff3.asciidoc similarity index 52% rename from docs/doc_examples/9166cf38427d5cde5d2ec12a2012b669.asciidoc rename to docs/doc_examples/488f6df1df71972392b670ce557f7ff3.asciidoc index b8fdd1d28..0aabaf773 100644 --- a/docs/doc_examples/9166cf38427d5cde5d2ec12a2012b669.asciidoc +++ b/docs/doc_examples/488f6df1df71972392b670ce557f7ff3.asciidoc @@ -4,18 +4,13 @@ [source, js] ---- const response = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - '*' - ], - order: 0, - settings: { - number_of_shards: 1 - }, - version: 123 - } -}) -console.log(response) + name: "template_1", + index_patterns: ["my-index-*"], + order: 0, + settings: { + number_of_shards: 1, + }, + version: 123, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/48d9697a14dfe131325521f48a7adc84.asciidoc b/docs/doc_examples/48d9697a14dfe131325521f48a7adc84.asciidoc new file mode 100644 index 000000000..1798f2fb9 --- /dev/null +++ b/docs/doc_examples/48d9697a14dfe131325521f48a7adc84.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "My string", + text_fields: [ + { + user_name: "John", + last: false, + }, + { + user_name: "kimchy", + last: true, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc b/docs/doc_examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc new file mode 100644 index 000000000..f367f00be --- /dev/null +++ b/docs/doc_examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "index", +}); +console.log(response); + +const response1 = await client.indices.putSettings({ + index: "index", + settings: { + index: { + similarity: { + default: { + type: "boolean", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.open({ + index: "index", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc b/docs/doc_examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc new file mode 100644 index 000000000..f72955f38 --- /dev/null +++ b/docs/doc_examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + q: "kimchy", + filter_path: "took,hits.hits._id,hits.hits._score", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4955bae30f265b9e436f82b015de6d7e.asciidoc b/docs/doc_examples/4955bae30f265b9e436f82b015de6d7e.asciidoc new file mode 100644 index 000000000..400ff437c --- /dev/null +++ b/docs/doc_examples/4955bae30f265b9e436f82b015de6d7e.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + pretty: "true", + query: { + terms: { + color: { + index: "my-index-000001", + id: "2", + path: "color", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc b/docs/doc_examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc new file mode 100644 index 000000000..04eff1a93 --- /dev/null +++ b/docs/doc_examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bengali_example", + settings: { + analysis: { + filter: { + bengali_stop: { + type: "stop", + stopwords: "_bengali_", + }, + bengali_keywords: { + type: "keyword_marker", + keywords: ["উদাহরণ"], + }, + bengali_stemmer: { + type: "stemmer", + language: "bengali", + }, + }, + analyzer: { + rebuilt_bengali: { + tokenizer: "standard", + filter: [ + "lowercase", + "decimal_digit", + "bengali_keywords", + "indic_normalization", + "bengali_normalization", + "bengali_stop", + "bengali_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc b/docs/doc_examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc new file mode 100644 index 000000000..520c175f2 --- /dev/null +++ b/docs/doc_examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index-000001", + shard: 0, + primary: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc b/docs/doc_examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc new file mode 100644 index 000000000..6b446681f --- /dev/null +++ b/docs/doc_examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", + h: "state,node", + s: "state", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc b/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc new file mode 100644 index 000000000..d66581f09 --- /dev/null +++ b/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.knnSearch({ + index: "my-index", + knn: { + field: "image_vector", + query_vector: [0.3, 0.1, 1.2], + k: 10, + num_candidates: 100, + }, + _source: ["name", "file_type"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/49c052a748c943180db78fee8e144239.asciidoc b/docs/doc_examples/49c052a748c943180db78fee8e144239.asciidoc new file mode 100644 index 000000000..f4bcb9fdb --- /dev/null +++ b/docs/doc_examples/49c052a748c943180db78fee8e144239.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearApiKeyCache({ + ids: "yVGMr3QByxdh1MSaicYx,YoiMaqREw0YVpjn40iMg", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc b/docs/doc_examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc new file mode 100644 index 000000000..c1278b3f1 --- /dev/null +++ b/docs/doc_examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + pretty: "true", + detailed: "true", + group_by: "parents", + human: "true", + actions: "*data/read/esql", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc b/docs/doc_examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc new file mode 100644 index 000000000..c7d0ad054 --- /dev/null +++ b/docs/doc_examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "saml-service-role", + cluster: ["manage_saml", "manage_token"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc b/docs/doc_examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc new file mode 100644 index 000000000..8c7811081 --- /dev/null +++ b/docs/doc_examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + normalizer: "my_normalizer", + text: "BaR", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49e8773a34fcbf825de38426cff5509c.asciidoc b/docs/doc_examples/49e8773a34fcbf825de38426cff5509c.asciidoc new file mode 100644 index 000000000..db9a6bca1 --- /dev/null +++ b/docs/doc_examples/49e8773a34fcbf825de38426cff5509c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-knn-index", + profile: true, + knn: { + field: "my-vector", + query_vector: [-5, 9, -12], + k: 3, + num_candidates: 100, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/49f4d2a461536d150e16b1e0a3148678.asciidoc b/docs/doc_examples/49f4d2a461536d150e16b1e0a3148678.asciidoc new file mode 100644 index 000000000..ddfea5d4b --- /dev/null +++ b/docs/doc_examples/49f4d2a461536d150e16b1e0a3148678.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001", + fielddata: "true", +}); +console.log(response); + +const response1 = await client.indices.clearCache({ + index: "my-index-000001", + query: "true", +}); +console.log(response1); + +const response2 = await client.indices.clearCache({ + index: "my-index-000001", + request: "true", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/4a1951844bd39f26961bfc965f3432b1.asciidoc b/docs/doc_examples/4a1951844bd39f26961bfc965f3432b1.asciidoc new file mode 100644 index 000000000..bfdaca2d6 --- /dev/null +++ b/docs/doc_examples/4a1951844bd39f26961bfc965f3432b1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + index: "my-index-000001", + docs: [ + { + _id: "1", + }, + { + _id: "2", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc b/docs/doc_examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc new file mode 100644 index 000000000..8b68c5966 --- /dev/null +++ b/docs/doc_examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + fields: { + english: { + type: "text", + analyzer: "english", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "quick brown fox", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "quick brown foxes", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + multi_match: { + query: "quick brown foxes", + fields: ["text", "text.english"], + type: "most_fields", + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc b/docs/doc_examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc new file mode 100644 index 000000000..55a3cfae9 --- /dev/null +++ b/docs/doc_examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + expiration: "1d", + role_descriptors: { + "role-a": { + cluster: ["all"], + indices: [ + { + names: ["index-a*"], + privileges: ["read"], + }, + ], + }, + "role-b": { + cluster: ["all"], + indices: [ + { + names: ["index-b*"], + privileges: ["all"], + }, + ], + }, + }, + metadata: { + application: "my-application", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4a7510a9c0468303658383c00796dad2.asciidoc b/docs/doc_examples/4a7510a9c0468303658383c00796dad2.asciidoc new file mode 100644 index 000000000..f71abdb74 --- /dev/null +++ b/docs/doc_examples/4a7510a9c0468303658383c00796dad2.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.mapping.ignore_malformed": true, + }, + mappings: { + properties: { + number_one: { + type: "byte", + }, + number_two: { + type: "integer", + ignore_malformed: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4aa81a694266fb634904224d14cd9a87.asciidoc b/docs/doc_examples/4aa81a694266fb634904224d14cd9a87.asciidoc new file mode 100644 index 000000000..54bc674f6 --- /dev/null +++ b/docs/doc_examples/4aa81a694266fb634904224d14cd9a87.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_queries2", + query: { + percolate: { + field: "query", + document: { + my_field: "wxyz", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4acf902c2598b2558f34f20c1744c433.asciidoc b/docs/doc_examples/4acf902c2598b2558f34f20c1744c433.asciidoc deleted file mode 100644 index 6af47f2ce..000000000 --- a/docs/doc_examples/4acf902c2598b2558f34f20c1744c433.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.refresh() -console.log(response0) - -const response1 = await client.search({ - index: 'twitter', - size: '0', - q: 'extra:test', - filter_path: 'hits.total' -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/4ae494d1e62231e832fc0436b04e2014.asciidoc b/docs/doc_examples/4ae494d1e62231e832fc0436b04e2014.asciidoc new file mode 100644 index 000000000..2593e595a --- /dev/null +++ b/docs/doc_examples/4ae494d1e62231e832fc0436b04e2014.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + query: { + bool: { + must: { + query_string: { + query: "*:*", + }, + }, + filter: { + term: { + "user.id": "kimchy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc b/docs/doc_examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc new file mode 100644 index 000000000..9b0f9cbc8 --- /dev/null +++ b/docs/doc_examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node", + filter_path: "aggregations", + aggs: { + ip: { + terms: { + field: "ip", + order: { + "tm.m": "desc", + }, + }, + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + date: "desc", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b1044259a6d777d87529eae25675005.asciidoc b/docs/doc_examples/4b1044259a6d777d87529eae25675005.asciidoc new file mode 100644 index 000000000..de6386e18 --- /dev/null +++ b/docs/doc_examples/4b1044259a6d777d87529eae25675005.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set-foo", + description: "sets foo", + processors: [ + { + set: { + field: "foo", + value: "bar", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.updateByQuery({ + index: "my-index-000001", + pipeline: "set-foo", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc b/docs/doc_examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc new file mode 100644 index 000000000..949a81873 --- /dev/null +++ b/docs/doc_examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "role_with_remote_indices", + remote_indices: [ + { + clusters: ["my_remote"], + names: ["logs*"], + privileges: ["read", "read_cross_cluster", "view_index_metadata"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc b/docs/doc_examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc new file mode 100644 index 000000000..d64246fd4 --- /dev/null +++ b/docs/doc_examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + documents: [ + { + message: "bonsai tree", + }, + { + message: "new tree", + }, + { + message: "the office", + }, + { + message: "office tree", + }, + ], + }, + }, + highlight: { + fields: { + message: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b5110a21676cc0e26e050a4b4552235.asciidoc b/docs/doc_examples/4b5110a21676cc0e26e050a4b4552235.asciidoc new file mode 100644 index 000000000..4366d9a73 --- /dev/null +++ b/docs/doc_examples/4b5110a21676cc0e26e050a4b4552235.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.getSynonym({ + id: "my-synonyms-set", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ba86373e13e106de044f190343be328.asciidoc b/docs/doc_examples/4ba86373e13e106de044f190343be328.asciidoc new file mode 100644 index 000000000..cc51c372c --- /dev/null +++ b/docs/doc_examples/4ba86373e13e106de044f190343be328.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + countries: { + terms: { + field: "artist.country", + order: [ + { + "rock>playback_stats.avg": "desc", + }, + { + _count: "desc", + }, + ], + }, + aggs: { + rock: { + filter: { + term: { + genre: "rock", + }, + }, + aggs: { + playback_stats: { + stats: { + field: "play_count", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc b/docs/doc_examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc new file mode 100644 index 000000000..1030165fb --- /dev/null +++ b/docs/doc_examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "my-index,logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc b/docs/doc_examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc new file mode 100644 index 000000000..6ca27ec56 --- /dev/null +++ b/docs/doc_examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + track_total_hits: false, + aggs: { + my_buckets: { + composite: { + sources: [ + { + user_name: { + terms: { + field: "user_name", + }, + }, + }, + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bba59cf745ac7b996bf90308bc26957.asciidoc b/docs/doc_examples/4bba59cf745ac7b996bf90308bc26957.asciidoc new file mode 100644 index 000000000..da7cbe01c --- /dev/null +++ b/docs/doc_examples/4bba59cf745ac7b996bf90308bc26957.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + bool: { + must: { + match: { + file_path: "16", + }, + }, + filter: { + term: { + "file_path.tree": "/User/alice", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bc4db44b8c74610b73f21a421099a13.asciidoc b/docs/doc_examples/4bc4db44b8c74610b73f21a421099a13.asciidoc new file mode 100644 index 000000000..9b8c6cf65 --- /dev/null +++ b/docs/doc_examples/4bc4db44b8c74610b73f21a421099a13.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + realm_name: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc b/docs/doc_examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc new file mode 100644 index 000000000..2e11299ce --- /dev/null +++ b/docs/doc_examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + op_type: "create", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc b/docs/doc_examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc new file mode 100644 index 000000000..4bc587d0b --- /dev/null +++ b/docs/doc_examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "suspicious_client_ips", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4be07b34db282044c88d5021c7ea08ee.asciidoc b/docs/doc_examples/4be07b34db282044c88d5021c7ea08ee.asciidoc new file mode 100644 index 000000000..f48a5f5fb --- /dev/null +++ b/docs/doc_examples/4be07b34db282044c88d5021c7ea08ee.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + }, + my_text: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index", + id: 1, + document: { + my_text: "text1", + my_vector: [0.5, 10, 6], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index", + id: 2, + document: { + my_text: "text2", + my_vector: [-0.5, 10, 10], + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc b/docs/doc_examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc new file mode 100644 index 000000000..c1d20522b --- /dev/null +++ b/docs/doc_examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + mappings: { + properties: { + host: { + properties: { + ip: { + type: "ip", + ignore_malformed: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc b/docs/doc_examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc new file mode 100644 index 000000000..23c616763 --- /dev/null +++ b/docs/doc_examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text_embedding: { + type: "dense_vector", + dims: 384, + index_options: { + type: "flat", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc b/docs/doc_examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc new file mode 100644 index 000000000..365539ce5 --- /dev/null +++ b/docs/doc_examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + filter: { + script: { + script: { + source: "doc['num1'].value > params.param1", + lang: "painless", + params: { + param1: 5, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc b/docs/doc_examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc new file mode 100644 index 000000000..9bacd56b7 --- /dev/null +++ b/docs/doc_examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateDatafeed({ + datafeed_id: "datafeed-test-job", + query: { + term: { + "geo.src": "US", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc b/docs/doc_examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc new file mode 100644 index 000000000..8315e4886 --- /dev/null +++ b/docs/doc_examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getTrainedModels(); +console.log(response); +---- diff --git a/docs/doc_examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc b/docs/doc_examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc new file mode 100644 index 000000000..cccd5f36f --- /dev/null +++ b/docs/doc_examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "this is a test", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc b/docs/doc_examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc new file mode 100644 index 000000000..704b7dd74 --- /dev/null +++ b/docs/doc_examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc b/docs/doc_examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc new file mode 100644 index 000000000..aae92e25f --- /dev/null +++ b/docs/doc_examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlDataFrameAnalytics({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc b/docs/doc_examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc new file mode 100644 index 000000000..7c466f200 --- /dev/null +++ b/docs/doc_examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + top_tags: { + terms: { + field: "type", + size: 3, + }, + aggs: { + top_sales_hits: { + top_hits: { + sort: [ + { + date: { + order: "desc", + }, + }, + ], + _source: { + includes: ["date", "price"], + }, + size: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c77d12039fe2445c9251e33979071ac.asciidoc b/docs/doc_examples/4c77d12039fe2445c9251e33979071ac.asciidoc new file mode 100644 index 000000000..925f2fd98 --- /dev/null +++ b/docs/doc_examples/4c77d12039fe2445c9251e33979071ac.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + categories: { + categorize_text: { + field: "message", + categorization_filters: ["\\w+\\_\\d{3}"], + similarity_threshold: 11, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c803b088c1915a7b0634d5cafabe606.asciidoc b/docs/doc_examples/4c803b088c1915a7b0634d5cafabe606.asciidoc new file mode 100644 index 000000000..420a3ae92 --- /dev/null +++ b/docs/doc_examples/4c803b088c1915a7b0634d5cafabe606.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc b/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc new file mode 100644 index 000000000..af5ca2ccc --- /dev/null +++ b/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/my-msmarco-minilm-model", + body: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + model_id: "msmarco-MiniLM-L12-cos-v5", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc b/docs/doc_examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc new file mode 100644 index 000000000..bff837e2d --- /dev/null +++ b/docs/doc_examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + tag: { + type: "text", + fielddata: true, + fielddata_frequency_filter: { + min: 0.001, + max: 0.1, + min_segment_size: 500, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc b/docs/doc_examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc new file mode 100644 index 000000000..50706a679 --- /dev/null +++ b/docs/doc_examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain(); +console.log(response); +---- diff --git a/docs/doc_examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc b/docs/doc_examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc new file mode 100644 index 000000000..4f7a524a1 --- /dev/null +++ b/docs/doc_examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "tiles-in-bounds": { + geohash_grid: { + field: "location", + precision: 8, + bounds: { + top_left: "POINT (4.21875 53.4375)", + bottom_right: "POINT (5.625 52.03125)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4cb44556b8c699f43489b17b42ddd475.asciidoc b/docs/doc_examples/4cb44556b8c699f43489b17b42ddd475.asciidoc new file mode 100644 index 000000000..9d45d4014 --- /dev/null +++ b/docs/doc_examples/4cb44556b8c699f43489b17b42ddd475.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + docs: [ + { + _index: "test", + _id: "1", + stored_fields: ["field1", "field2"], + }, + { + _index: "test", + _id: "2", + stored_fields: ["field3", "field4"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc b/docs/doc_examples/4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc deleted file mode 100644 index 6ad69bca6..000000000 --- a/docs/doc_examples/4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'ctx._source.tags.add(params.tag)', - lang: 'painless', - params: { - tag: 'blue' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc b/docs/doc_examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc new file mode 100644 index 000000000..4ee26b047 --- /dev/null +++ b/docs/doc_examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc @@ -0,0 +1,81 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + analysis: { + normalizer: { + my_normalizer: { + type: "custom", + char_filter: [], + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, + mappings: { + properties: { + foo: { + type: "keyword", + normalizer: "my_normalizer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + foo: "BÀR", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "index", + id: 2, + document: { + foo: "bar", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "index", + id: 3, + document: { + foo: "baz", + }, +}); +console.log(response3); + +const response4 = await client.indices.refresh({ + index: "index", +}); +console.log(response4); + +const response5 = await client.search({ + index: "index", + query: { + term: { + foo: "BAR", + }, + }, +}); +console.log(response5); + +const response6 = await client.search({ + index: "index", + query: { + match: { + foo: "BAR", + }, + }, +}); +console.log(response6); +---- diff --git a/docs/doc_examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc b/docs/doc_examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc new file mode 100644 index 000000000..f61623475 --- /dev/null +++ b/docs/doc_examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001", + fields: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc b/docs/doc_examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc new file mode 100644 index 000000000..21cbecbe4 --- /dev/null +++ b/docs/doc_examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc @@ -0,0 +1,64 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + id: "test-1", + synonyms: "hello, hi", + }, + ], +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "test-index", + settings: { + analysis: { + filter: { + synonyms_filter: { + type: "synonym_graph", + synonyms_set: "my-synonyms-set", + updateable: true, + }, + }, + analyzer: { + my_index_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase"], + }, + my_search_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "synonyms_filter"], + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + analyzer: "my_index_analyzer", + search_analyzer: "my_search_analyzer", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + id: "test-1", + synonyms: "hello, hi, howdy", + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/4ce4563e207233c48ffe849728052dca.asciidoc b/docs/doc_examples/4ce4563e207233c48ffe849728052dca.asciidoc new file mode 100644 index 000000000..02cf01978 --- /dev/null +++ b/docs/doc_examples/4ce4563e207233c48ffe849728052dca.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d21725453955582ff12b4a1104aa7b6.asciidoc b/docs/doc_examples/4d21725453955582ff12b4a1104aa7b6.asciidoc new file mode 100644 index 000000000..f220f30fb --- /dev/null +++ b/docs/doc_examples/4d21725453955582ff12b4a1104aa7b6.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateFilter({ + filter_id: "safe_domains", + description: "Updated list of domains", + add_items: ["*.myorg.com"], + remove_items: ["wikipedia.org"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc b/docs/doc_examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc new file mode 100644 index 000000000..6ff988112 --- /dev/null +++ b/docs/doc_examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + delay: "7d", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc b/docs/doc_examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc new file mode 100644 index 000000000..df784c1c8 --- /dev/null +++ b/docs/doc_examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["my_custom_word_delimiter_graph_filter"], + }, + }, + filter: { + my_custom_word_delimiter_graph_filter: { + type: "word_delimiter_graph", + type_table: ["- => ALPHA"], + split_on_case_change: false, + split_on_numerics: false, + stem_english_possessive: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d56b179242fed59e3d6476f817b6055.asciidoc b/docs/doc_examples/4d56b179242fed59e3d6476f817b6055.asciidoc deleted file mode 100644 index 88b67c658..000000000 --- a/docs/doc_examples/4d56b179242fed59e3d6476f817b6055.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'test', - body: { - aliases: { - alias_1: {}, - alias_2: { - filter: { - term: { - user: 'kimchy' - } - }, - routing: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/4d6997c70a1851f9151443c0d38b532e.asciidoc b/docs/doc_examples/4d6997c70a1851f9151443c0d38b532e.asciidoc deleted file mode 100644 index e14cc77bd..000000000 --- a/docs/doc_examples/4d6997c70a1851f9151443c0d38b532e.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'my_index', - id: '1', - body: { - message: 'some arrays in this document...', - tags: [ - 'elasticsearch', - 'wow' - ], - lists: [ - { - name: 'prog_list', - description: 'programming list' - }, - { - name: 'cool_list', - description: 'cool stuff list' - } - ] - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '2', - body: { - message: 'no arrays in this document...', - tags: 'elasticsearch', - lists: { - name: 'prog_list', - description: 'programming list' - } - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'my_index', - body: { - query: { - match: { - tags: 'elasticsearch' - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/4d7c0b52d3c0a084157428624c543c90.asciidoc b/docs/doc_examples/4d7c0b52d3c0a084157428624c543c90.asciidoc new file mode 100644 index 000000000..e54879982 --- /dev/null +++ b/docs/doc_examples/4d7c0b52d3c0a084157428624c543c90.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.info(); +console.log(response); +---- diff --git a/docs/doc_examples/4dab4c5168047ba596af1beb0e55b845.asciidoc b/docs/doc_examples/4dab4c5168047ba596af1beb0e55b845.asciidoc new file mode 100644 index 000000000..0ba906074 --- /dev/null +++ b/docs/doc_examples/4dab4c5168047ba596af1beb0e55b845.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc b/docs/doc_examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc new file mode 100644 index 000000000..df0d19f50 --- /dev/null +++ b/docs/doc_examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "ecommerce-customer-transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e1f02928ef243bf07fd425754b7642b.asciidoc b/docs/doc_examples/4e1f02928ef243bf07fd425754b7642b.asciidoc new file mode 100644 index 000000000..c49a47833 --- /dev/null +++ b/docs/doc_examples/4e1f02928ef243bf07fd425754b7642b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.postVotingConfigExclusions({ + node_names: "node_name", +}); +console.log(response); + +const response1 = await client.cluster.postVotingConfigExclusions({ + node_names: "node_name", + timeout: "1m", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc b/docs/doc_examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc new file mode 100644 index 000000000..fe78fcfc0 --- /dev/null +++ b/docs/doc_examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "path_hierarchy", + delimiter: "-", + replacement: "/", + skip: 2, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "one-two-three-four-five", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc b/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc new file mode 100644 index 000000000..64930ca4f --- /dev/null +++ b/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_inference/sparse_embedding/my-elser-model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc b/docs/doc_examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc new file mode 100644 index 000000000..72581c41b --- /dev/null +++ b/docs/doc_examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + match: { + my_text_field: "the query string", + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "my_tokens", + inference_id: "my-elser-endpoint", + query: "the query string", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc b/docs/doc_examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc new file mode 100644 index 000000000..782e49383 --- /dev/null +++ b/docs/doc_examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 10000, + query: { + match: { + "user.id": "elkbee", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, + sort: [ + { + "@timestamp": { + order: "asc", + format: "strict_date_optional_time_nanos", + }, + }, + { + _shard_doc: "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc b/docs/doc_examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc new file mode 100644 index 000000000..99ec5cffe --- /dev/null +++ b/docs/doc_examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: '{ "query": { "terms": { "tags": {{#toJson}}tags{{/toJson}} }}}', + params: { + tags: ["prod", "es01"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc b/docs/doc_examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc new file mode 100644 index 000000000..38d7d2790 --- /dev/null +++ b/docs/doc_examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + prefix: "nir", + completion: { + field: "suggest", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e926063a9494b563387617b08c4f232.asciidoc b/docs/doc_examples/4e926063a9494b563387617b08c4f232.asciidoc new file mode 100644 index 000000000..9aca6a082 --- /dev/null +++ b/docs/doc_examples/4e926063a9494b563387617b08c4f232.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "*", + verbose: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc b/docs/doc_examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc new file mode 100644 index 000000000..38c12bda0 --- /dev/null +++ b/docs/doc_examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + fields: "rating,title", + include_unmapped: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ed946065faa92f9950f04e402676a97.asciidoc b/docs/doc_examples/4ed946065faa92f9950f04e402676a97.asciidoc new file mode 100644 index 000000000..be96e1741 --- /dev/null +++ b/docs/doc_examples/4ed946065faa92f9950f04e402676a97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.info({ + human: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc b/docs/doc_examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc new file mode 100644 index 000000000..a98b7cbf1 --- /dev/null +++ b/docs/doc_examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putPrivileges({ + privileges: { + myapp: { + read: { + actions: ["data:read/*", "action:login"], + metadata: { + description: "Read access to myapp", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f08d9e21d9f199acc77abfb83287878.asciidoc b/docs/doc_examples/4f08d9e21d9f199acc77abfb83287878.asciidoc new file mode 100644 index 000000000..d3563e31a --- /dev/null +++ b/docs/doc_examples/4f08d9e21d9f199acc77abfb83287878.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my-app", + params: { + query_string: "my first query", + text_fields: [ + { + name: "title", + boost: 5, + }, + { + name: "description", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f140d8922efdf3420e41b1cb669a289.asciidoc b/docs/doc_examples/4f140d8922efdf3420e41b1cb669a289.asciidoc new file mode 100644 index 000000000..2f545ce1a --- /dev/null +++ b/docs/doc_examples/4f140d8922efdf3420e41b1cb669a289.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.deleteComponentTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f1e1205154d280db21fbd2754ed5398.asciidoc b/docs/doc_examples/4f1e1205154d280db21fbd2754ed5398.asciidoc new file mode 100644 index 000000000..a944b2044 --- /dev/null +++ b/docs/doc_examples/4f1e1205154d280db21fbd2754ed5398.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "stats-index", + mappings: { + properties: { + agg_metric: { + type: "aggregate_metric_double", + metrics: ["min", "max", "sum", "value_count"], + default_metric: "max", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc b/docs/doc_examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc new file mode 100644 index 000000000..7fc5cd674 --- /dev/null +++ b/docs/doc_examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + gauss: { + "@timestamp": { + origin: "2013-09-17", + scale: "10d", + offset: "5d", + decay: 0.5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc b/docs/doc_examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc new file mode 100644 index 000000000..7effcb6e3 --- /dev/null +++ b/docs/doc_examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f666d710758578e2582850dac3ad144.asciidoc b/docs/doc_examples/4f666d710758578e2582850dac3ad144.asciidoc new file mode 100644 index 000000000..1b9fc2667 --- /dev/null +++ b/docs/doc_examples/4f666d710758578e2582850dac3ad144.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserProfile({ + uid: "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", + data: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc b/docs/doc_examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc new file mode 100644 index 000000000..38e0e91b1 --- /dev/null +++ b/docs/doc_examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + missing: "N/A", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f792d86ff79dcfe4643cd95505f8d5f.asciidoc b/docs/doc_examples/4f792d86ff79dcfe4643cd95505f8d5f.asciidoc new file mode 100644 index 000000000..8d47d41ed --- /dev/null +++ b/docs/doc_examples/4f792d86ff79dcfe4643cd95505f8d5f.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic: "runtime", + runtime: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + }, + }, + }, + properties: { + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc b/docs/doc_examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc new file mode 100644 index 000000000..4031829ae --- /dev/null +++ b/docs/doc_examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.delete(); +console.log(response); +---- diff --git a/docs/doc_examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc b/docs/doc_examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc new file mode 100644 index 000000000..cec0a6911 --- /dev/null +++ b/docs/doc_examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDatafeeds({ + datafeed_id: "datafeed-high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4fb0629146ca78b85e823edd405497bb.asciidoc b/docs/doc_examples/4fb0629146ca78b85e823edd405497bb.asciidoc new file mode 100644 index 000000000..92b9fe5f2 --- /dev/null +++ b/docs/doc_examples/4fb0629146ca78b85e823edd405497bb.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "loan_classification", + source: { + index: "loan-applicants", + }, + dest: { + index: "loan-applicants-classified", + }, + analysis: { + classification: { + dependent_variable: "label", + training_percent: 75, + num_top_classes: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc b/docs/doc_examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc new file mode 100644 index 000000000..a1fd27e13 --- /dev/null +++ b/docs/doc_examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + should: [ + { + text_expansion: { + "ml.inference.title_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + { + text_expansion: { + "ml.inference.description_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + boost: 4, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4fe78a4dfb747fd5dc34145ec6b76183.asciidoc b/docs/doc_examples/4fe78a4dfb747fd5dc34145ec6b76183.asciidoc new file mode 100644 index 000000000..87df83fca --- /dev/null +++ b/docs/doc_examples/4fe78a4dfb747fd5dc34145ec6b76183.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + runtime: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + }, + }, + }, + properties: { + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc b/docs/doc_examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc new file mode 100644 index 000000000..edee66d52 --- /dev/null +++ b/docs/doc_examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + message: "number 1", + }, + }, + highlight: { + fields: { + message: { + type: "plain", + fragment_size: 15, + number_of_fragments: 3, + fragmenter: "simple", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc b/docs/doc_examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc new file mode 100644 index 000000000..ee1e99bc4 --- /dev/null +++ b/docs/doc_examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "csv", + delimiter: ";", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc b/docs/doc_examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc new file mode 100644 index 000000000..dfb8e219d --- /dev/null +++ b/docs/doc_examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "...", + processors: [ + { + grok: { + field: "message", + patterns: [ + "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes:int} %{NUMBER:duration:double}", + ], + }, + }, + ], + }, + docs: [ + { + _source: { + message: "55.3.244.1 GET /index.html 15824 0.043", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5043b83a89091fa00edb341ddf7ba370.asciidoc b/docs/doc_examples/5043b83a89091fa00edb341ddf7ba370.asciidoc deleted file mode 100644 index a94b20183..000000000 --- a/docs/doc_examples/5043b83a89091fa00edb341ddf7ba370.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'this is a testt', - fuzziness: 'AUTO' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc b/docs/doc_examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc new file mode 100644 index 000000000..c0ef20fe2 --- /dev/null +++ b/docs/doc_examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + name: { + type: "text", + fields: { + length: { + type: "token_count", + analyzer: "standard", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: "John Smith", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + name: "Rachel Alice Williams", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + term: { + "name.length": 3, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc b/docs/doc_examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc new file mode 100644 index 000000000..ed78e8d92 --- /dev/null +++ b/docs/doc_examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_index", + id: 1, + document: { + my_text: "histogram_1", + my_histogram: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + _doc_count: 45, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my_index", + id: 2, + document: { + my_text: "histogram_2", + my_histogram: { + values: [0.1, 0.25, 0.35, 0.4, 0.45, 0.5], + counts: [8, 17, 8, 7, 6, 2], + }, + _doc_count: 62, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/50764f4ea88079156b0aff2835bcdc45.asciidoc b/docs/doc_examples/50764f4ea88079156b0aff2835bcdc45.asciidoc new file mode 100644 index 000000000..790614490 --- /dev/null +++ b/docs/doc_examples/50764f4ea88079156b0aff2835bcdc45.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "metadata", + pretty: "true", + filter_path: "metadata.stored_scripts", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc b/docs/doc_examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc new file mode 100644 index 000000000..c5ebc07bd --- /dev/null +++ b/docs/doc_examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScript({ + id: "my-stored-script", +}); +console.log(response); +---- diff --git a/docs/doc_examples/50a9623c153cabe64101efb633e10e6c.asciidoc b/docs/doc_examples/50a9623c153cabe64101efb633e10e6c.asciidoc new file mode 100644 index 000000000..3df88c27c --- /dev/null +++ b/docs/doc_examples/50a9623c153cabe64101efb633e10e6c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.deleteAutoscalingPolicy({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/50b5c0332949d2154c72b629b5fa6222.asciidoc b/docs/doc_examples/50b5c0332949d2154c72b629b5fa6222.asciidoc new file mode 100644 index 000000000..9b53504df --- /dev/null +++ b/docs/doc_examples/50b5c0332949d2154c72b629b5fa6222.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + refresh: "wait_for", + document: { + user_id: 12345, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + refresh: "wait_for", + document: { + user_id: 12346, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc b/docs/doc_examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc new file mode 100644 index 000000000..d15963c65 --- /dev/null +++ b/docs/doc_examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my-lifecycle-policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + warm: { + min_age: "30d", + actions: { + shrink: { + number_of_shards: 1, + }, + forcemerge: { + max_num_segments: 1, + }, + }, + }, + cold: { + min_age: "60d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + frozen: { + min_age: "90d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + delete: { + min_age: "735d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50c2cea2adbe9523458c2686ab11df54.asciidoc b/docs/doc_examples/50c2cea2adbe9523458c2686ab11df54.asciidoc new file mode 100644 index 000000000..a049dc794 --- /dev/null +++ b/docs/doc_examples/50c2cea2adbe9523458c2686ab11df54.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "text_payloads", + mappings: { + properties: { + text: { + type: "text", + term_vector: "with_positions_payloads", + analyzer: "payload_delimiter", + }, + }, + }, + settings: { + analysis: { + analyzer: { + payload_delimiter: { + tokenizer: "whitespace", + filter: ["delimited_payload"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc b/docs/doc_examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc new file mode 100644 index 000000000..5c66601f8 --- /dev/null +++ b/docs/doc_examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["unique"], + text: "the quick fox jumps the lazy fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c5977410335d58217e0626618ce6641.asciidoc b/docs/doc_examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc similarity index 66% rename from docs/doc_examples/8c5977410335d58217e0626618ce6641.asciidoc rename to docs/doc_examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc index 83e2c51bd..4cee63e13 100644 --- a/docs/doc_examples/8c5977410335d58217e0626618ce6641.asciidoc +++ b/docs/doc_examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc @@ -4,12 +4,11 @@ [source, js] ---- const response = await client.index({ - index: 'my_index', - id: '2', - body: { - color: 'blue' - } -}) -console.log(response) + index: "idx", + id: 1, + document: { + "foo.bar.baz": 1, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/50dc35d3d8705bd62aed20a15209476c.asciidoc b/docs/doc_examples/50dc35d3d8705bd62aed20a15209476c.asciidoc new file mode 100644 index 000000000..7d0323216 --- /dev/null +++ b/docs/doc_examples/50dc35d3d8705bd62aed20a15209476c.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping9", + rules: { + field: { + "realm.name": "cloud-saml", + }, + }, + role_templates: [ + { + template: { + source: "saml_user", + }, + }, + { + template: { + source: "_user_{{username}}", + }, + }, + ], + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50ddf374cfa8128538ea092ee98b723d.asciidoc b/docs/doc_examples/50ddf374cfa8128538ea092ee98b723d.asciidoc new file mode 100644 index 000000000..ff7c02793 --- /dev/null +++ b/docs/doc_examples/50ddf374cfa8128538ea092ee98b723d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + filter_path: "nodes.*.tasks", +}); +console.log(response); +---- diff --git a/docs/doc_examples/50f922e9f002d8ac570953be59414b7b.asciidoc b/docs/doc_examples/50f922e9f002d8ac570953be59414b7b.asciidoc new file mode 100644 index 000000000..8f508f633 --- /dev/null +++ b/docs/doc_examples/50f922e9f002d8ac570953be59414b7b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + combined_fields: { + query: "database systems", + fields: ["title", "abstract"], + operator: "and", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2153f3100bf12c2de98f14eb86ab061.asciidoc b/docs/doc_examples/511e5bb8ab881171b7e8629095e30b85.asciidoc similarity index 78% rename from docs/doc_examples/d2153f3100bf12c2de98f14eb86ab061.asciidoc rename to docs/doc_examples/511e5bb8ab881171b7e8629095e30b85.asciidoc index 8d4335ffa..0871eea9c 100644 --- a/docs/doc_examples/d2153f3100bf12c2de98f14eb86ab061.asciidoc +++ b/docs/doc_examples/511e5bb8ab881171b7e8629095e30b85.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.search({ - index: 'twitter' -}) -console.log(response) + index: "datastream", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/51390ca10aa22d7104e8970f09ea4512.asciidoc b/docs/doc_examples/51390ca10aa22d7104e8970f09ea4512.asciidoc new file mode 100644 index 000000000..a717a783b --- /dev/null +++ b/docs/doc_examples/51390ca10aa22d7104e8970f09ea4512.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + binary: { + type: "binary", + doc_values: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + binary: ["IAA=", "EAA="], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/515e1104d136082e826d1b32af011759.asciidoc b/docs/doc_examples/515e1104d136082e826d1b32af011759.asciidoc new file mode 100644 index 000000000..af042e8f2 --- /dev/null +++ b/docs/doc_examples/515e1104d136082e826d1b32af011759.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "products", + id: 0, + refresh: "true", + document: { + name: "LED TV", + resellers: [ + { + reseller: "companyA", + price: 350, + }, + { + reseller: "companyB", + price: 500, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc b/docs/doc_examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc new file mode 100644 index 000000000..115cd3757 --- /dev/null +++ b/docs/doc_examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.clearCursor({ + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc b/docs/doc_examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc new file mode 100644 index 000000000..fc8e655af --- /dev/null +++ b/docs/doc_examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + preference: "_local", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5195a88194f7a139c635a84398d76205.asciidoc b/docs/doc_examples/5195a88194f7a139c635a84398d76205.asciidoc new file mode 100644 index 000000000..567ff6501 --- /dev/null +++ b/docs/doc_examples/5195a88194f7a139c635a84398d76205.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc b/docs/doc_examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc new file mode 100644 index 000000000..e17edea6d --- /dev/null +++ b/docs/doc_examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T05:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T06:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + date_histogram: { + field: "date", + calendar_interval: "day", + offset: "+6h", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/51b44224feee6e2e5974824334474c77.asciidoc b/docs/doc_examples/51b44224feee6e2e5974824334474c77.asciidoc new file mode 100644 index 000000000..ac951d7c0 --- /dev/null +++ b/docs/doc_examples/51b44224feee6e2e5974824334474c77.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + client: "my-client", + bucket: "my-bucket", + endpoint: "my.s3.endpoint", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/51f1a0930362594b231a5bcc17673768.asciidoc b/docs/doc_examples/51f1a0930362594b231a5bcc17673768.asciidoc new file mode 100644 index 000000000..126095322 --- /dev/null +++ b/docs/doc_examples/51f1a0930362594b231a5bcc17673768.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.modifyDataStream({ + actions: [ + { + remove_backing_index: { + data_stream: "my-logs", + index: ".ds-my-logs-2099.01.01-000001", + }, + }, + { + add_backing_index: { + data_stream: "my-logs", + index: "index-to-add", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/51f6cb682424e110f289af79c106f4c7.asciidoc b/docs/doc_examples/51f6cb682424e110f289af79c106f4c7.asciidoc new file mode 100644 index 000000000..236694db7 --- /dev/null +++ b/docs/doc_examples/51f6cb682424e110f289af79c106f4c7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node.frozen": 3200, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5271f4ff29bb48838396e5a674664ee0.asciidoc b/docs/doc_examples/5271f4ff29bb48838396e5a674664ee0.asciidoc deleted file mode 100644 index 0727dc5a2..000000000 --- a/docs/doc_examples/5271f4ff29bb48838396e5a674664ee0.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - city: { - type: 'text', - fields: { - raw: { - type: 'keyword' - } - } - } - } - } - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '1', - body: { - city: 'New York' - } -}) -console.log(response1) - -const response2 = await client.index({ - index: 'my_index', - id: '2', - body: { - city: 'York' - } -}) -console.log(response2) - -const response3 = await client.search({ - index: 'my_index', - body: { - query: { - match: { - city: 'york' - } - }, - sort: { - 'city.raw': 'asc' - }, - aggs: { - Cities: { - terms: { - field: 'city.raw' - } - } - } - } -}) -console.log(response3) ----- - diff --git a/docs/doc_examples/527324766814561b75aaee853ede49a7.asciidoc b/docs/doc_examples/527324766814561b75aaee853ede49a7.asciidoc deleted file mode 100644 index e3fc030b2..000000000 --- a/docs/doc_examples/527324766814561b75aaee853ede49a7.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - min_doc_count: 10 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5275842787967b6db876025f4a1c6942.asciidoc b/docs/doc_examples/5275842787967b6db876025f4a1c6942.asciidoc index ac7e9cd69..d2ccfd01b 100644 --- a/docs/doc_examples/5275842787967b6db876025f4a1c6942.asciidoc +++ b/docs/doc_examples/5275842787967b6db876025f4a1c6942.asciidoc @@ -4,22 +4,19 @@ [source, js] ---- const response = await client.search({ - body: { - suggest: { - text: 'tring out Elasticsearch', - 'my-suggest-1': { - term: { - field: 'message' - } + suggest: { + text: "tring out Elasticsearch", + "my-suggest-1": { + term: { + field: "message", }, - 'my-suggest-2': { - term: { - field: 'user' - } - } - } - } -}) -console.log(response) + }, + "my-suggest-2": { + term: { + field: "user", + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5276a831513623e43ed567eb52b6dba9.asciidoc b/docs/doc_examples/5276a831513623e43ed567eb52b6dba9.asciidoc new file mode 100644 index 000000000..8b4199dbd --- /dev/null +++ b/docs/doc_examples/5276a831513623e43ed567eb52b6dba9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + routing: "my-routing-value", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/528e5f1c345c3769248cc6889e8cf552.asciidoc b/docs/doc_examples/528e5f1c345c3769248cc6889e8cf552.asciidoc new file mode 100644 index 000000000..7b49f32ca --- /dev/null +++ b/docs/doc_examples/528e5f1c345c3769248cc6889e8cf552.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "index", + properties: { + title: { + type: "text", + similarity: "my_similarity", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/529b975b7cedaac58dce9821956adc37.asciidoc b/docs/doc_examples/529b975b7cedaac58dce9821956adc37.asciidoc new file mode 100644 index 000000000..91d0a569e --- /dev/null +++ b/docs/doc_examples/529b975b7cedaac58dce9821956adc37.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "MultiPolygon", + coordinates: [ + [ + [ + [102, 2], + [103, 2], + [103, 3], + [102, 3], + [102, 2], + ], + ], + [ + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + [100, 0], + ], + [ + [100.2, 0.2], + [100.8, 0.2], + [100.8, 0.8], + [100.2, 0.8], + [100.2, 0.2], + ], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52a2d119addb15366a935115518335fd.asciidoc b/docs/doc_examples/52a2d119addb15366a935115518335fd.asciidoc new file mode 100644 index 000000000..45074d6c2 --- /dev/null +++ b/docs/doc_examples/52a2d119addb15366a935115518335fd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my_source_index", + settings: { + settings: { + "index.number_of_replicas": 0, + "index.routing.allocation.require._name": "shrink_node_name", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc b/docs/doc_examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc new file mode 100644 index 000000000..7038db1a4 --- /dev/null +++ b/docs/doc_examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + human: "true", + filter_path: "nodes.*.name,nodes.*.indices.indexing", +}); +console.log(response); +---- diff --git a/docs/doc_examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc b/docs/doc_examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc new file mode 100644 index 000000000..72faf47d1 --- /dev/null +++ b/docs/doc_examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: { + query: { + match: { + message: "{{query_string}}", + }, + }, + from: "{{from}}", + size: "{{size}}", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52be795b68e6ef3f396f35fea52d0481.asciidoc b/docs/doc_examples/52be795b68e6ef3f396f35fea52d0481.asciidoc new file mode 100644 index 000000000..de83fe2e1 --- /dev/null +++ b/docs/doc_examples/52be795b68e6ef3f396f35fea52d0481.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc b/docs/doc_examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc new file mode 100644 index 000000000..b1ed824fb --- /dev/null +++ b/docs/doc_examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-image-index", + size: 10, + query: { + bool: { + must: { + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 3, + }, + }, + filter: { + term: { + "file-type": "png", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc b/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc index c15b0db56..0b1f25a10 100644 --- a/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc +++ b/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.deleteByQueryRethrottle({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619', - requests_per_second: '-1' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", + requests_per_second: "-1", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc b/docs/doc_examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc new file mode 100644 index 000000000..a15862221 --- /dev/null +++ b/docs/doc_examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic: false, + properties: { + user: { + properties: { + name: { + type: "text", + }, + social_networks: { + dynamic: true, + properties: {}, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52f1c1689ab35353858cdeaab7597546.asciidoc b/docs/doc_examples/52f1c1689ab35353858cdeaab7597546.asciidoc new file mode 100644 index 000000000..0a3fbbf4f --- /dev/null +++ b/docs/doc_examples/52f1c1689ab35353858cdeaab7597546.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-data-stream", + pipeline: "my-pipeline", + document: { + message: + '89.160.20.128 - - [05/May/2099:16:21:15 +0000] "GET /favicon.ico HTTP/1.1" 200 3638 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc b/docs/doc_examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc new file mode 100644 index 000000000..3044bb476 --- /dev/null +++ b/docs/doc_examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + number_of_bytes: { + type: "integer", + }, + time_in_seconds: { + type: "float", + }, + price: { + type: "scaled_float", + scaling_factor: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc b/docs/doc_examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc new file mode 100644 index 000000000..96d2153be --- /dev/null +++ b/docs/doc_examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "keyword_marker", + keywords: ["jumping"], + }, + "stemmer", + ], + text: "fox running and jumping", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc b/docs/doc_examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc new file mode 100644 index 000000000..fbb8023e0 --- /dev/null +++ b/docs/doc_examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.putNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", + type: "restart", + reason: "Demonstrating how the node shutdown API works", + allocation_delay: "20m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc b/docs/doc_examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc new file mode 100644 index 000000000..019795cfe --- /dev/null +++ b/docs/doc_examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_long", + mappings: { + properties: { + field: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/532f371934b61fb4992d37bedcc085de.asciidoc b/docs/doc_examples/532f371934b61fb4992d37bedcc085de.asciidoc new file mode 100644 index 000000000..9cd381107 --- /dev/null +++ b/docs/doc_examples/532f371934b61fb4992d37bedcc085de.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.putNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", + type: "restart", + reason: "Demonstrating how the node shutdown API works", + allocation_delay: "10m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfb1fe96d806a644214d06f9b4b87878.asciidoc b/docs/doc_examples/5330191ec9f11281ebf6867bf11c58ae.asciidoc similarity index 56% rename from docs/doc_examples/dfb1fe96d806a644214d06f9b4b87878.asciidoc rename to docs/doc_examples/5330191ec9f11281ebf6867bf11c58ae.asciidoc index 3cc9e1ed8..43089e612 100644 --- a/docs/doc_examples/dfb1fe96d806a644214d06f9b4b87878.asciidoc +++ b/docs/doc_examples/5330191ec9f11281ebf6867bf11c58ae.asciidoc @@ -4,16 +4,15 @@ [source, js] ---- const response = await client.deleteByQuery({ - index: 'twitter', - scroll_size: '5000', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) + index: "my-index-000001", + routing: 1, + query: { + range: { + age: { + gte: 10, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc b/docs/doc_examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc new file mode 100644 index 000000000..04bd5892f --- /dev/null +++ b/docs/doc_examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + english: "Some English text", + count: 5, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ce3606f1dba490eef83c4317b315b62.asciidoc b/docs/doc_examples/53aa8b21e2b1c4d48960343711296704.asciidoc similarity index 58% rename from docs/doc_examples/0ce3606f1dba490eef83c4317b315b62.asciidoc rename to docs/doc_examples/53aa8b21e2b1c4d48960343711296704.asciidoc index c89a1ffb3..17dd79085 100644 --- a/docs/doc_examples/0ce3606f1dba490eef83c4317b315b62.asciidoc +++ b/docs/doc_examples/53aa8b21e2b1c4d48960343711296704.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.search({ - index: 'twitter', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) + index: "my-index-000001", + query: { + regexp: { + "my_field.keyword": "a\\\\.*", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc b/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc index 519bfd36b..6ec1db62e 100644 --- a/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc +++ b/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc @@ -4,18 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'this is a test', - fields: [ - 'subject', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "this is a test", + fields: ["subject", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc b/docs/doc_examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc new file mode 100644 index 000000000..061f2096c --- /dev/null +++ b/docs/doc_examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + seq_no_primary_term: true, + query: { + match: { + "user.id": "yWIumJd7", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/53c6256295111524d5ff2885bdcb99a9.asciidoc b/docs/doc_examples/53c6256295111524d5ff2885bdcb99a9.asciidoc new file mode 100644 index 000000000..9822c80fa --- /dev/null +++ b/docs/doc_examples/53c6256295111524d5ff2885bdcb99a9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransform({ + transform_id: "_stats", + from: 5, + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc b/docs/doc_examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc new file mode 100644 index 000000000..c8cc6b7df --- /dev/null +++ b/docs/doc_examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "facilitator", + password: "", + roles: ["facilitator-role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/54059961f05904368ced52c894a50e23.asciidoc b/docs/doc_examples/54059961f05904368ced52c894a50e23.asciidoc new file mode 100644 index 000000000..c6040d699 --- /dev/null +++ b/docs/doc_examples/54059961f05904368ced52c894a50e23.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_max: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.max(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/54092c8c646133f5dbbc047990dd458d.asciidoc b/docs/doc_examples/54092c8c646133f5dbbc047990dd458d.asciidoc deleted file mode 100644 index 420836eac..000000000 --- a/docs/doc_examples/54092c8c646133f5dbbc047990dd458d.asciidoc +++ /dev/null @@ -1,36 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'drivers', - body: { - mappings: { - properties: { - driver: { - type: 'nested', - properties: { - last_name: { - type: 'text' - }, - vehicle: { - type: 'nested', - properties: { - make: { - type: 'text' - }, - model: { - type: 'text' - } - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/540aefc39303c925a4efff71ebe2f002.asciidoc b/docs/doc_examples/540aefc39303c925a4efff71ebe2f002.asciidoc new file mode 100644 index 000000000..3d74dc98c --- /dev/null +++ b/docs/doc_examples/540aefc39303c925a4efff71ebe2f002.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + significant_terms: { + field: "tag", + min_doc_count: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc b/docs/doc_examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc new file mode 100644 index 000000000..9b1d74392 --- /dev/null +++ b/docs/doc_examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "asciifold_example", + settings: { + analysis: { + analyzer: { + standard_asciifolding: { + tokenizer: "standard", + filter: ["asciifolding"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc b/docs/doc_examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc new file mode 100644 index 000000000..1dfbbd2b4 --- /dev/null +++ b/docs/doc_examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "indices", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "os,process", +}); +console.log(response1); + +const response2 = await client.nodes.stats({ + node_id: "10.0.0.1", + metric: "process", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/548b85bd9e6e7d33e36133953869449b.asciidoc b/docs/doc_examples/548b85bd9e6e7d33e36133953869449b.asciidoc new file mode 100644 index 000000000..719ae08ac --- /dev/null +++ b/docs/doc_examples/548b85bd9e6e7d33e36133953869449b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.monitoring.collection.enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc b/docs/doc_examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc new file mode 100644 index 000000000..3c2251e1a --- /dev/null +++ b/docs/doc_examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + _key: "asc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/55096381f811388fafd8e244dd2402c8.asciidoc b/docs/doc_examples/55096381f811388fafd8e244dd2402c8.asciidoc new file mode 100644 index 000000000..044e53645 --- /dev/null +++ b/docs/doc_examples/55096381f811388fafd8e244dd2402c8.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-alias", + settings: { + "index.number_of_shards": 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/553904c175a76d5ba83bc5d46fff7373.asciidoc b/docs/doc_examples/553904c175a76d5ba83bc5d46fff7373.asciidoc new file mode 100644 index 000000000..e4289442f --- /dev/null +++ b/docs/doc_examples/553904c175a76d5ba83bc5d46fff7373.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlLogout({ + token: "46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3", + refresh_token: "mJdXLtmvTUSpoLwMvdBt_w", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a1de18774f9c68cafa169847832b2bc.asciidoc b/docs/doc_examples/553d79817bb1333970e99507c37a159a.asciidoc similarity index 53% rename from docs/doc_examples/2a1de18774f9c68cafa169847832b2bc.asciidoc rename to docs/doc_examples/553d79817bb1333970e99507c37a159a.asciidoc index c0ae902fa..1097587d1 100644 --- a/docs/doc_examples/2a1de18774f9c68cafa169847832b2bc.asciidoc +++ b/docs/doc_examples/553d79817bb1333970e99507c37a159a.asciidoc @@ -4,17 +4,16 @@ [source, js] ---- const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - full_text: { - type: 'text' - } - } - } - } -}) -console.log(response) + index: "index", + settings: { + index: { + similarity: { + default: { + type: "boolean", + }, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc b/docs/doc_examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc new file mode 100644 index 000000000..a3365ef4b --- /dev/null +++ b/docs/doc_examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + bool: { + must: [ + { + match: { + "user.first": "Alice", + }, + }, + { + match: { + "user.last": "Smith", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5566cff431570f522e1fc5475b2ed875.asciidoc b/docs/doc_examples/5566cff431570f522e1fc5475b2ed875.asciidoc new file mode 100644 index 000000000..ac151dd64 --- /dev/null +++ b/docs/doc_examples/5566cff431570f522e1fc5475b2ed875.asciidoc @@ -0,0 +1,71 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + index: { + number_of_shards: 1, + analysis: { + analyzer: { + trigram: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "shingle"], + }, + reverse: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "reverse"], + }, + }, + filter: { + shingle: { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 3, + }, + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + fields: { + trigram: { + type: "text", + analyzer: "trigram", + }, + reverse: { + type: "text", + analyzer: "reverse", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + refresh: "true", + document: { + title: "noble warriors", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "test", + refresh: "true", + document: { + title: "nobel prize", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc b/docs/doc_examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc new file mode 100644 index 000000000..35b5bc77a --- /dev/null +++ b/docs/doc_examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + runtime_mappings: { + "load_time.seconds": { + type: "long", + script: { + source: "emit(doc['load_time'].value / params.timeUnit)", + params: { + timeUnit: 1000, + }, + }, + }, + }, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time.seconds", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc b/docs/doc_examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc new file mode 100644 index 000000000..1e6422794 --- /dev/null +++ b/docs/doc_examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-dfs-index", + settings: { + number_of_shards: 2, + number_of_replicas: 1, + }, + mappings: { + properties: { + "my-keyword": { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-dfs-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "my-keyword": "a", + }, + { + index: { + _id: "2", + }, + }, + { + "my-keyword": "b", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc b/docs/doc_examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc new file mode 100644 index 000000000..6afde064f --- /dev/null +++ b/docs/doc_examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: + "my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", + ccs_minimize_roundtrips: "false", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc b/docs/doc_examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc new file mode 100644 index 000000000..5e71a8116 --- /dev/null +++ b/docs/doc_examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "my-index-000001,cluster*:my-index-*,cluster_three:-my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/55e8ddf643726dec51531ada0bec7143.asciidoc b/docs/doc_examples/55e8ddf643726dec51531ada0bec7143.asciidoc new file mode 100644 index 000000000..7e7984e2e --- /dev/null +++ b/docs/doc_examples/55e8ddf643726dec51531ada0bec7143.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getStats(); +console.log(response); +---- diff --git a/docs/doc_examples/55f0fec6342f677af74de2124b801aa2.asciidoc b/docs/doc_examples/55f0fec6342f677af74de2124b801aa2.asciidoc new file mode 100644 index 000000000..1f7148276 --- /dev/null +++ b/docs/doc_examples/55f0fec6342f677af74de2124b801aa2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "byte-image-index", + knn: { + field: "byte-image-vector", + query_vector: [-5, 9], + k: 10, + num_candidates: 100, + }, + fields: ["title"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc b/docs/doc_examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc new file mode 100644 index 000000000..baa6dae78 --- /dev/null +++ b/docs/doc_examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.authenticate(); +console.log(response); +---- diff --git a/docs/doc_examples/5619103306878d58a058bce87c5bd82b.asciidoc b/docs/doc_examples/5619103306878d58a058bce87c5bd82b.asciidoc new file mode 100644 index 000000000..5f2179a7b --- /dev/null +++ b/docs/doc_examples/5619103306878d58a058bce87c5bd82b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + human: "true", + detailed: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc b/docs/doc_examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc new file mode 100644 index 000000000..bf8e62960 --- /dev/null +++ b/docs/doc_examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "/mount/backups/my_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/563dfbf421422c837ee6929ae2ede876.asciidoc b/docs/doc_examples/563dfbf421422c837ee6929ae2ede876.asciidoc new file mode 100644 index 000000000..f60a1d8d5 --- /dev/null +++ b/docs/doc_examples/563dfbf421422c837ee6929ae2ede876.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.migrateToDataStream({ + name: "my-logs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc b/docs/doc_examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc new file mode 100644 index 000000000..91ca422d8 --- /dev/null +++ b/docs/doc_examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_security/cross_cluster/api_key", + body: { + name: "my-cross-cluster-api-key", + expiration: "1d", + access: { + search: [ + { + names: ["logs*"], + }, + ], + replication: [ + { + names: ["archive*"], + }, + ], + }, + metadata: { + description: "phase one", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc b/docs/doc_examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc new file mode 100644 index 000000000..6815ee37a --- /dev/null +++ b/docs/doc_examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + runtime_mappings: { + "grade.weighted": { + type: "double", + script: + "\n emit(doc['grade'].value * doc['weight'].value)\n ", + }, + }, + aggs: { + grades_stats: { + stats: { + field: "grade.weighted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/568979150ce18739f8d3ea859355aaa3.asciidoc b/docs/doc_examples/568979150ce18739f8d3ea859355aaa3.asciidoc new file mode 100644 index 000000000..c421d5bd2 --- /dev/null +++ b/docs/doc_examples/568979150ce18739f8d3ea859355aaa3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUser({ + username: "jacknich", + with_profile_uid: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/569f10fee671632017c722fd983009d4.asciidoc b/docs/doc_examples/569f10fee671632017c722fd983009d4.asciidoc new file mode 100644 index 000000000..cc388a8c9 --- /dev/null +++ b/docs/doc_examples/569f10fee671632017c722fd983009d4.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + shop: { + terms: { + field: "shop", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc b/docs/doc_examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc new file mode 100644 index 000000000..256dc7ce2 --- /dev/null +++ b/docs/doc_examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + email: { + type: "keyword", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56a903530990313b753b1be33578997a.asciidoc b/docs/doc_examples/56a903530990313b753b1be33578997a.asciidoc new file mode 100644 index 000000000..789290384 --- /dev/null +++ b/docs/doc_examples/56a903530990313b753b1be33578997a.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + dis_max: { + queries: [ + { + multi_match: { + query: "Will Smith", + type: "cross_fields", + fields: ["first", "last"], + minimum_should_match: "50%", + }, + }, + { + multi_match: { + query: "Will Smith", + type: "cross_fields", + fields: ["*.edge"], + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56b6b50b174a935d368301ebd717231d.asciidoc b/docs/doc_examples/56b6b50b174a935d368301ebd717231d.asciidoc new file mode 100644 index 000000000..710fd7bb1 --- /dev/null +++ b/docs/doc_examples/56b6b50b174a935d368301ebd717231d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats({ + metric: "current_watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc b/docs/doc_examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc new file mode 100644 index 000000000..aeffe4d01 --- /dev/null +++ b/docs/doc_examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test*", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: { + order: "asc", + numeric_type: "double", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56db76c987106a870357854d3068ad98.asciidoc b/docs/doc_examples/56db76c987106a870357854d3068ad98.asciidoc new file mode 100644 index 000000000..3ae4090ca --- /dev/null +++ b/docs/doc_examples/56db76c987106a870357854d3068ad98.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules", +}); +console.log(response); +---- diff --git a/docs/doc_examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc b/docs/doc_examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc new file mode 100644 index 000000000..608ba2f7f --- /dev/null +++ b/docs/doc_examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_min: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.min(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc b/docs/doc_examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc new file mode 100644 index 000000000..ecc6eaa60 --- /dev/null +++ b/docs/doc_examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + flat_settings: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/56fa6c9e08258157d445e2f92274962b.asciidoc b/docs/doc_examples/56fa6c9e08258157d445e2f92274962b.asciidoc new file mode 100644 index 000000000..47651fd9a --- /dev/null +++ b/docs/doc_examples/56fa6c9e08258157d445e2f92274962b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 3, + output_unigrams: false, + }, + ], + text: "quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/571314a948e49f1f9614d36fcf79392a.asciidoc b/docs/doc_examples/571314a948e49f1f9614d36fcf79392a.asciidoc new file mode 100644 index 000000000..e70d505ba --- /dev/null +++ b/docs/doc_examples/571314a948e49f1f9614d36fcf79392a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "FjktRGJ1Y2w1U0phLTRhZnVyeUZ2MVEbWEJyeVBPQldTV3FGZGdIeUVabXBldzo5NzA4", +}); +console.log(response); +---- diff --git a/docs/doc_examples/578808065fee8691355b8f25c35782cd.asciidoc b/docs/doc_examples/578808065fee8691355b8f25c35782cd.asciidoc new file mode 100644 index 000000000..ad2b22c3e --- /dev/null +++ b/docs/doc_examples/578808065fee8691355b8f25c35782cd.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + filter_path: "profile.shards.fetch", + profile: true, + query: { + term: { + "user.id": { + value: "elkbee", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5797df4b8e71d821a1488cbb63481104.asciidoc b/docs/doc_examples/5797df4b8e71d821a1488cbb63481104.asciidoc new file mode 100644 index 000000000..00b4a3abb --- /dev/null +++ b/docs/doc_examples/5797df4b8e71d821a1488cbb63481104.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport({ + feature: "shards_capacity", +}); +console.log(response); +---- diff --git a/docs/doc_examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc b/docs/doc_examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc new file mode 100644 index 000000000..22059e18c --- /dev/null +++ b/docs/doc_examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "items", + query: { + bool: { + must: { + match: { + name: "chocolate", + }, + }, + should: { + distance_feature: { + field: "location", + pivot: "1000m", + origin: [-71.3, 41.15], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/57c690f8fa95bacf4b250803be7467e4.asciidoc b/docs/doc_examples/57c690f8fa95bacf4b250803be7467e4.asciidoc new file mode 100644 index 000000000..a09a17ef2 --- /dev/null +++ b/docs/doc_examples/57c690f8fa95bacf4b250803be7467e4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "BBOX (1000.0, 1002.0, 2000.0, 1000.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc new file mode 100644 index 000000000..39d1ae3a1 --- /dev/null +++ b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcPrepareAuthentication({}); +console.log(response); +---- diff --git a/docs/doc_examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc b/docs/doc_examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc new file mode 100644 index 000000000..9babe8e35 --- /dev/null +++ b/docs/doc_examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["temp*"], + priority: 0, + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 0, + }, + mappings: { + _source: { + enabled: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: "template_2", + index_patterns: ["template*"], + priority: 1, + template: { + settings: { + number_of_shards: 2, + }, + mappings: { + _source: { + enabled: true, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/582c4b05401dbc190b19411282d85310.asciidoc b/docs/doc_examples/582c4b05401dbc190b19411282d85310.asciidoc new file mode 100644 index 000000000..09b9fa2b8 --- /dev/null +++ b/docs/doc_examples/582c4b05401dbc190b19411282d85310.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params['tag'])) { ctx.op = 'delete' } else { ctx.op = 'none' }", + lang: "painless", + params: { + tag: "green", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/582da02c09e0597b4396c87e33571e7b.asciidoc b/docs/doc_examples/582da02c09e0597b4396c87e33571e7b.asciidoc new file mode 100644 index 000000000..e5be2eaef --- /dev/null +++ b/docs/doc_examples/582da02c09e0597b4396c87e33571e7b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc b/docs/doc_examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc new file mode 100644 index 000000000..b5d5d91f8 --- /dev/null +++ b/docs/doc_examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "my_model", + deployment_id: "my_model_for_search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/584f502cf840134f2db5f39e2483ced1.asciidoc b/docs/doc_examples/584f502cf840134f2db5f39e2483ced1.asciidoc new file mode 100644 index 000000000..15ffff9c6 --- /dev/null +++ b/docs/doc_examples/584f502cf840134f2db5f39e2483ced1.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "portuguese_example", + settings: { + analysis: { + filter: { + portuguese_stop: { + type: "stop", + stopwords: "_portuguese_", + }, + portuguese_keywords: { + type: "keyword_marker", + keywords: ["exemplo"], + }, + portuguese_stemmer: { + type: "stemmer", + language: "light_portuguese", + }, + }, + analyzer: { + rebuilt_portuguese: { + tokenizer: "standard", + filter: [ + "lowercase", + "portuguese_stop", + "portuguese_keywords", + "portuguese_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/585a34ad79aee16678b37da785933ac8.asciidoc b/docs/doc_examples/585a34ad79aee16678b37da785933ac8.asciidoc new file mode 100644 index 000000000..8e62c4d26 --- /dev/null +++ b/docs/doc_examples/585a34ad79aee16678b37da785933ac8.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/585b19369cb9b9763a7e8d405f009a47.asciidoc b/docs/doc_examples/585b19369cb9b9763a7e8d405f009a47.asciidoc new file mode 100644 index 000000000..02e8855dc --- /dev/null +++ b/docs/doc_examples/585b19369cb9b9763a7e8d405f009a47.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + day_of_week: null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc b/docs/doc_examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc new file mode 100644 index 000000000..9063935bf --- /dev/null +++ b/docs/doc_examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.explainDataLifecycle({ + index: ".ds-my-data-stream-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc b/docs/doc_examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc new file mode 100644 index 000000000..0b83eb8c3 --- /dev/null +++ b/docs/doc_examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_locations", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my_locations", + id: 1, + refresh: "true", + document: { + location: "POINT(4.912350 52.374081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my_locations", + id: 2, + refresh: "true", + document: { + location: "POINT(4.405200 51.222900)", + city: "Antwerp", + name: "Letterenhuis", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my_locations", + id: 3, + refresh: "true", + document: { + location: "POINT(2.336389 48.861111)", + city: "Paris", + name: "Musée du Louvre", + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/58b5003c0a53a39bf509aa3797aad471.asciidoc b/docs/doc_examples/58b5003c0a53a39bf509aa3797aad471.asciidoc deleted file mode 100644 index 259d93687..000000000 --- a/docs/doc_examples/58b5003c0a53a39bf509aa3797aad471.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'content', - 'name.*^5' - ], - query: 'this AND that OR thus' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/58ca855be30049f8f0879e532db51ee2.asciidoc b/docs/doc_examples/58ca855be30049f8f0879e532db51ee2.asciidoc new file mode 100644 index 000000000..4212f2f60 --- /dev/null +++ b/docs/doc_examples/58ca855be30049f8f0879e532db51ee2.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "ecommerce_transform1", + source: { + index: "kibana_sample_data_ecommerce", + query: { + term: { + "geoip.continent_name": { + value: "Asia", + }, + }, + }, + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + missing_bucket: true, + }, + }, + }, + aggregations: { + max_price: { + max: { + field: "taxful_total_price", + }, + }, + }, + }, + description: "Maximum priced ecommerce data by customer_id in Asia", + dest: { + index: "kibana_sample_data_ecommerce_transform1", + pipeline: "add_timestamp_pipeline", + }, + frequency: "5m", + sync: { + time: { + field: "order_date", + delay: "60s", + }, + }, + retention_policy: { + time: { + field: "order_date", + max_age: "30d", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/58e684e0b771b4646662fe12d3060c05.asciidoc b/docs/doc_examples/58e684e0b771b4646662fe12d3060c05.asciidoc new file mode 100644 index 000000000..22c848908 --- /dev/null +++ b/docs/doc_examples/58e684e0b771b4646662fe12d3060c05.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_width_example", + settings: { + analysis: { + analyzer: { + standard_cjk_width: { + tokenizer: "standard", + filter: ["cjk_width"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc b/docs/doc_examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc new file mode 100644 index 000000000..2fc9fec45 --- /dev/null +++ b/docs/doc_examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.indices.recovery": "DEBUG", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/591c7fb7451069829a14bba593136f1f.asciidoc b/docs/doc_examples/591c7fb7451069829a14bba593136f1f.asciidoc new file mode 100644 index 000000000..0b78c8eec --- /dev/null +++ b/docs/doc_examples/591c7fb7451069829a14bba593136f1f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.forecast({ + job_id: "low_request_rate", + duration: "10d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5969c446688c8b326acc80276573e9d2.asciidoc b/docs/doc_examples/5969c446688c8b326acc80276573e9d2.asciidoc new file mode 100644 index 000000000..7acc40c12 --- /dev/null +++ b/docs/doc_examples/5969c446688c8b326acc80276573e9d2.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + number_of_fragments: 3, + fragment_size: 150, + fields: { + body: { + pre_tags: [""], + post_tags: [""], + }, + "blog.title": { + number_of_fragments: 0, + }, + "blog.author": { + number_of_fragments: 0, + }, + "blog.comment": { + number_of_fragments: 5, + order: "score", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/59726e3c90e1218487a781508788c243.asciidoc b/docs/doc_examples/59726e3c90e1218487a781508788c243.asciidoc new file mode 100644 index 000000000..b895ddd4d --- /dev/null +++ b/docs/doc_examples/59726e3c90e1218487a781508788c243.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sale_date: { + auto_date_histogram: { + field: "date", + buckets: 10, + missing: "2000/01/01", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc b/docs/doc_examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc new file mode 100644 index 000000000..a57d2e49f --- /dev/null +++ b/docs/doc_examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + dynamic_templates: [ + { + strings: { + match_mapping_type: "string", + mapping: { + type: "keyword", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5987afb2c17c73fe3d860937565ef115.asciidoc b/docs/doc_examples/5987afb2c17c73fe3d860937565ef115.asciidoc new file mode 100644 index 000000000..0107606a5 --- /dev/null +++ b/docs/doc_examples/5987afb2c17c73fe3d860937565ef115.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.openPointInTime({ + index: "my-index-000001", + keep_alive: "1m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/599454613ac699d447537e79e65ae35a.asciidoc b/docs/doc_examples/599454613ac699d447537e79e65ae35a.asciidoc new file mode 100644 index 000000000..3f7bd9397 --- /dev/null +++ b/docs/doc_examples/599454613ac699d447537e79e65ae35a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + script_fields: { + my_doubled_field: { + script: { + source: "doc['my_field'].value * params['multiplier']", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc b/docs/doc_examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc new file mode 100644 index 000000000..b478fbc11 --- /dev/null +++ b/docs/doc_examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteTemplate({ + name: "my-legacy-index-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc b/docs/doc_examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc new file mode 100644 index 000000000..56b04dfe7 --- /dev/null +++ b/docs/doc_examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc b/docs/doc_examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc new file mode 100644 index 000000000..9915745fb --- /dev/null +++ b/docs/doc_examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "example2", + indices: [ + { + names: ["my-index-000001"], + privileges: ["read"], + query: { + template: { + source: { + term: { + "group.id": "{{_user.metadata.group_id}}", + }, + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc b/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc new file mode 100644 index 000000000..f1aea891e --- /dev/null +++ b/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/completion/openai-completion", + body: { + service: "openai", + service_settings: { + api_key: "", + model_id: "gpt-3.5-turbo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc b/docs/doc_examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc new file mode 100644 index 000000000..695b35e41 --- /dev/null +++ b/docs/doc_examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + index: "my-index-000001", + ids: ["1", "2"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a006feed86309b547bbaa1baca1c496.asciidoc b/docs/doc_examples/5a006feed86309b547bbaa1baca1c496.asciidoc new file mode 100644 index 000000000..5ab3acc0b --- /dev/null +++ b/docs/doc_examples/5a006feed86309b547bbaa1baca1c496.asciidoc @@ -0,0 +1,69 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + numeric_counts: { + match_mapping_type: ["long", "double"], + match: "count", + mapping: { + type: "{dynamic_type}", + index: false, + }, + }, + }, + { + integers: { + match_mapping_type: "long", + mapping: { + type: "integer", + }, + }, + }, + { + strings: { + match_mapping_type: "string", + mapping: { + type: "text", + fields: { + raw: { + type: "keyword", + ignore_above: 256, + }, + }, + }, + }, + }, + { + non_objects_keyword: { + match_mapping_type: "*", + unmatch_mapping_type: "object", + mapping: { + type: "keyword", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_integer: 5, + my_string: "Some string", + my_boolean: "false", + field: { + count: 4, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc b/docs/doc_examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc new file mode 100644 index 000000000..a0d7ddaaf --- /dev/null +++ b/docs/doc_examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "limit", + max_token_count: 2, + }, + ], + text: "quick fox jumps over lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc b/docs/doc_examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc new file mode 100644 index 000000000..bb83361b8 --- /dev/null +++ b/docs/doc_examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "postal_codes", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + postal_code: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc b/docs/doc_examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc new file mode 100644 index 000000000..ab820fab7 --- /dev/null +++ b/docs/doc_examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "logstash-reader", + indices: [ + { + names: ["logstash-*"], + privileges: ["read_cross_cluster", "read", "view_index_metadata"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a754dcc854b9154296550a0b581cb9d.asciidoc b/docs/doc_examples/5a754dcc854b9154296550a0b581cb9d.asciidoc new file mode 100644 index 000000000..02bc82b96 --- /dev/null +++ b/docs/doc_examples/5a754dcc854b9154296550a0b581cb9d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc b/docs/doc_examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc new file mode 100644 index 000000000..e163c01b6 --- /dev/null +++ b/docs/doc_examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + routing: "my-routing-value,my-routing-value-2", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc b/docs/doc_examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc new file mode 100644 index 000000000..090b5022a --- /dev/null +++ b/docs/doc_examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "datastream_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "5m", + }, + downsample: { + fixed_interval: "1h", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc b/docs/doc_examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc new file mode 100644 index 000000000..cd5c996c6 --- /dev/null +++ b/docs/doc_examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping5", + role_templates: [ + { + template: { + source: "{{#tojson}}groups{{/tojson}}", + }, + format: "json", + }, + ], + rules: { + field: { + "realm.name": "saml1", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5afbd9caed88c32f8a2968c07054f096.asciidoc b/docs/doc_examples/5afbd9caed88c32f8a2968c07054f096.asciidoc new file mode 100644 index 000000000..9b7bf7d52 --- /dev/null +++ b/docs/doc_examples/5afbd9caed88c32f8a2968c07054f096.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.logstash.deletePipeline({ + id: "my_pipeline", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc b/docs/doc_examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc new file mode 100644 index 000000000..e110d8fa3 --- /dev/null +++ b/docs/doc_examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.list({ + from: 0, + size: 3, + q: "app*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc b/docs/doc_examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc new file mode 100644 index 000000000..91150c314 --- /dev/null +++ b/docs/doc_examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserPrivileges(); +console.log(response); +---- diff --git a/docs/doc_examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc b/docs/doc_examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc new file mode 100644 index 000000000..4caea0a08 --- /dev/null +++ b/docs/doc_examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-index*", + query: + "\n sample by host\n [any where uptime > 0]\n [any where port > 100]\n [any where bool == true]\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b266deba5396c7810af1b8315c23596.asciidoc b/docs/doc_examples/5b266deba5396c7810af1b8315c23596.asciidoc new file mode 100644 index 000000000..76ad3f0dc --- /dev/null +++ b/docs/doc_examples/5b266deba5396c7810af1b8315c23596.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + size: 0, + aggs: { + grouped: { + geohash_grid: { + field: "location", + precision: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b281956e35a26e734c482b42b356c0d.asciidoc b/docs/doc_examples/5b281956e35a26e734c482b42b356c0d.asciidoc new file mode 100644 index 000000000..54aa6736a --- /dev/null +++ b/docs/doc_examples/5b281956e35a26e734c482b42b356c0d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.existsAlias({ + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc b/docs/doc_examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc new file mode 100644 index 000000000..ffa909eaf --- /dev/null +++ b/docs/doc_examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + }, + _meta: { + description: "set number of shards to one", + serialization: { + class: "MyComponentTemplate", + id: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc b/docs/doc_examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc new file mode 100644 index 000000000..b427412d8 --- /dev/null +++ b/docs/doc_examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "node", + mappings: { + properties: { + ip: { + type: "ip", + }, + date: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "node", + refresh: "true", + operations: [ + { + index: {}, + }, + { + ip: "192.168.0.1", + date: "2020-01-01T01:01:01", + m: 1, + }, + { + index: {}, + }, + { + ip: "192.168.0.1", + date: "2020-01-01T02:01:01", + m: 2, + }, + { + index: {}, + }, + { + ip: "192.168.0.2", + date: "2020-01-01T02:01:01", + m: 3, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "node", + filter_path: "aggregations", + aggs: { + ip: { + terms: { + field: "ip", + }, + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + date: "desc", + }, + }, + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5b58007f10700ec7934580f034404652.asciidoc b/docs/doc_examples/5b58007f10700ec7934580f034404652.asciidoc new file mode 100644 index 000000000..0385b42c4 --- /dev/null +++ b/docs/doc_examples/5b58007f10700ec7934580f034404652.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.create({ + index: "my-index-000001", + id: 1, + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc b/docs/doc_examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc new file mode 100644 index 000000000..3cb27b100 --- /dev/null +++ b/docs/doc_examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "25GB", + }, + }, + }, + delete: { + min_age: "30d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc b/docs/doc_examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc new file mode 100644 index 000000000..8a1350c9b --- /dev/null +++ b/docs/doc_examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "*", +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "_all", +}); +console.log(response1); + +const response2 = await client.indices.getMapping(); +console.log(response2); +---- diff --git a/docs/doc_examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc b/docs/doc_examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc new file mode 100644 index 000000000..f7dbd3f5e --- /dev/null +++ b/docs/doc_examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + is_published: true, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + is_published: false, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + aggs: { + publish_state: { + terms: { + field: "is_published", + }, + }, + }, + sort: ["is_published"], + fields: [ + { + field: "weight", + }, + ], + runtime_mappings: { + weight: { + type: "long", + script: "emit(doc['is_published'].value ? 10 : 0)", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc b/docs/doc_examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc new file mode 100644 index 000000000..9e0654221 --- /dev/null +++ b/docs/doc_examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + flattened: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + flattened: { + field: ["foo"], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc b/docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc new file mode 100644 index 000000000..d17ba0b28 --- /dev/null +++ b/docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + properties: ["content", "title"], + remove_binary: false, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc b/docs/doc_examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc new file mode 100644 index 000000000..8e695a1e8 --- /dev/null +++ b/docs/doc_examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "cluster_one:my-data-stream,cluster_two:my-data-stream", + query: '\n process where process.name == "regsvr32.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/5bbccf103107e505c17ae59863753efd.asciidoc b/docs/doc_examples/5bbccf103107e505c17ae59863753efd.asciidoc new file mode 100644 index 000000000..160980a8a --- /dev/null +++ b/docs/doc_examples/5bbccf103107e505c17ae59863753efd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getInfluencers({ + job_id: "high_sum_total_sales", + sort: "influencer_score", + desc: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc b/docs/doc_examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc new file mode 100644 index 000000000..c077c6112 --- /dev/null +++ b/docs/doc_examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + filter: { + script: { + script: + "\n double amount = doc['amount'].value;\n if (doc['type'].value == 'expense') {\n amount *= -1;\n }\n return amount < 10;\n ", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c22172a944864a7d138decdc08558b4.asciidoc b/docs/doc_examples/5c22172a944864a7d138decdc08558b4.asciidoc new file mode 100644 index 000000000..8c0f5c121 --- /dev/null +++ b/docs/doc_examples/5c22172a944864a7d138decdc08558b4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + index: "my-data-stream", + v: "true", + h: "health,status,index,docs.count", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc b/docs/doc_examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc new file mode 100644 index 000000000..a1c117415 --- /dev/null +++ b/docs/doc_examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_percentile: { + percentiles: { + field: "price", + percents: [1, 99], + }, + }, + the_movperc: { + moving_percentiles: { + buckets_path: "the_percentile", + window: 10, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc b/docs/doc_examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc new file mode 100644 index 000000000..40a5a3b59 --- /dev/null +++ b/docs/doc_examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + extended_stats: { + field: "grade", + missing: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc b/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc index 8455b50c9..cad6dce99 100644 --- a/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc +++ b/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc @@ -4,18 +4,15 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - range: { - timestamp: { - time_zone: '+01:00', - gte: '2020-01-01T00:00:00', - lte: 'now' - } - } - } - } -}) -console.log(response) + query: { + range: { + timestamp: { + time_zone: "+01:00", + gte: "2020-01-01T00:00:00", + lte: "now", + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc b/docs/doc_examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc new file mode 100644 index 000000000..3d20e9ceb --- /dev/null +++ b/docs/doc_examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + functions: [ + { + gauss: { + price: { + origin: "0", + scale: "20", + }, + }, + }, + { + gauss: { + location: { + origin: "11, 12", + scale: "2km", + }, + }, + }, + ], + query: { + match: { + properties: "balcony", + }, + }, + score_mode: "multiply", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c7ece1f30267adabdb832424871900a.asciidoc b/docs/doc_examples/5c7ece1f30267adabdb832424871900a.asciidoc new file mode 100644 index 000000000..68956e123 --- /dev/null +++ b/docs/doc_examples/5c7ece1f30267adabdb832424871900a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc b/docs/doc_examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc new file mode 100644 index 000000000..ce39cc7bb --- /dev/null +++ b/docs/doc_examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "circles", + mappings: { + properties: { + circle: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.ingest.putPipeline({ + id: "polygonize_circles", + description: "translate circle to polygon", + processors: [ + { + circle: { + field: "circle", + error_distance: 28, + shape_type: "geo_shape", + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc b/docs/doc_examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc new file mode 100644 index 000000000..cb0182201 --- /dev/null +++ b/docs/doc_examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc b/docs/doc_examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc new file mode 100644 index 000000000..8570b8d2d --- /dev/null +++ b/docs/doc_examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + store: true, + }, + date: { + type: "date", + store: true, + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + title: "Some short title", + date: "2015-01-01", + content: "A very long content field...", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + stored_fields: ["title", "date"], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc b/docs/doc_examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc new file mode 100644 index 000000000..44f1922f2 --- /dev/null +++ b/docs/doc_examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(4.912350 52.374081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [4.965305328369141, 52.39347642069457], + [4.966979026794433, 52.391721758934835], + [4.969425201416015, 52.39238958618537], + [4.967944622039794, 52.39420969150824], + [4.965305328369141, 52.39347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + centroid: { + geo_centroid: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d3016e4e8025362ad9a05ee86bb2061f.asciidoc b/docs/doc_examples/5d03bb385904d20c5323885706738459.asciidoc similarity index 56% rename from docs/doc_examples/d3016e4e8025362ad9a05ee86bb2061f.asciidoc rename to docs/doc_examples/5d03bb385904d20c5323885706738459.asciidoc index 17fdfe181..f2bc5d0b8 100644 --- a/docs/doc_examples/d3016e4e8025362ad9a05ee86bb2061f.asciidoc +++ b/docs/doc_examples/5d03bb385904d20c5323885706738459.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'twitter', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) + actions: [ + { + add: { + index: "my-data-stream", + alias: "my-alias", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5d32279dcd52b22d9e1178a02a3ad957.asciidoc b/docs/doc_examples/5d32279dcd52b22d9e1178a02a3ad957.asciidoc deleted file mode 100644 index 1e5878a4f..000000000 --- a/docs/doc_examples/5d32279dcd52b22d9e1178a02a3ad957.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - routing: 'kimchy', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc b/docs/doc_examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc new file mode 100644 index 000000000..29e5b0bf4 --- /dev/null +++ b/docs/doc_examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_bigram_example", + settings: { + analysis: { + analyzer: { + han_bigrams: { + tokenizer: "standard", + filter: ["han_bigrams_filter"], + }, + }, + filter: { + han_bigrams_filter: { + type: "cjk_bigram", + ignored_scripts: ["hangul", "hiragana", "katakana"], + output_unigrams: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d5b06468c54308f52c212cca5d58fef.asciidoc b/docs/doc_examples/5d5b06468c54308f52c212cca5d58fef.asciidoc new file mode 100644 index 000000000..5c8ee2a05 --- /dev/null +++ b/docs/doc_examples/5d5b06468c54308f52c212cca5d58fef.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl+v///w8=", + columnar: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc b/docs/doc_examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc new file mode 100644 index 000000000..5b625185e --- /dev/null +++ b/docs/doc_examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + elser: true, + text: true, + query_string: "where is the best mountain climbing?", + elser_fields: [ + { + name: "title", + boost: 1, + }, + { + name: "description", + boost: 1, + }, + ], + text_query_boost: 4, + min_score: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc b/docs/doc_examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc new file mode 100644 index 000000000..ca39dd250 --- /dev/null +++ b/docs/doc_examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.transport.TransportService.tracer": "TRACE", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc b/docs/doc_examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc new file mode 100644 index 000000000..5d0a781cd --- /dev/null +++ b/docs/doc_examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + en: { + tokenizer: "standard", + filter: ["my_shingle_filter"], + }, + }, + filter: { + my_shingle_filter: { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 5, + output_unigrams: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc b/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc index 78aaba5e1..3f404df64 100644 --- a/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc +++ b/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc @@ -4,32 +4,29 @@ [source, js] ---- const response = await client.search({ - body: { - size: 0, - aggs: { - expired_sessions: { - terms: { - field: 'account_id', - include: { - partition: 0, - num_partitions: 20 + size: 0, + aggs: { + expired_sessions: { + terms: { + field: "account_id", + include: { + partition: 0, + num_partitions: 20, + }, + size: 10000, + order: { + last_access: "asc", + }, + }, + aggs: { + last_access: { + max: { + field: "access_date", }, - size: 10000, - order: { - last_access: 'asc' - } }, - aggs: { - last_access: { - max: { - field: 'access_date' - } - } - } - } - } - } -}) -console.log(response) + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc b/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc index 6157dc7f0..e82ef5dc8 100644 --- a/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc +++ b/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc @@ -4,20 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'brown fox', - type: 'best_fields', - fields: [ - 'subject', - 'message' - ], - tie_breaker: 0.3 - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "brown fox", + type: "best_fields", + fields: ["subject", "message"], + tie_breaker: 0.3, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5db5349162a4fbe74bffb646926a2495.asciidoc b/docs/doc_examples/5db5349162a4fbe74bffb646926a2495.asciidoc new file mode 100644 index 000000000..2f634da70 --- /dev/null +++ b/docs/doc_examples/5db5349162a4fbe74bffb646926a2495.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + my_range: { + type: "long_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: { + gt: 200, + lt: 300, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5dbf06ca9058843f572676fcaf587f75.asciidoc b/docs/doc_examples/5dbf06ca9058843f572676fcaf587f75.asciidoc new file mode 100644 index 000000000..0b9e73b16 --- /dev/null +++ b/docs/doc_examples/5dbf06ca9058843f572676fcaf587f75.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + variable_width_histogram: { + field: "price", + buckets: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5dd695679b5141d9142d3d30ba8d300a.asciidoc b/docs/doc_examples/5dd695679b5141d9142d3d30ba8d300a.asciidoc deleted file mode 100644 index 5a9f90953..000000000 --- a/docs/doc_examples/5dd695679b5141d9142d3d30ba8d300a.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - types_count: { - value_count: { - field: 'type' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5ddc26da6e163fda54f52d33b5157051.asciidoc b/docs/doc_examples/5ddc26da6e163fda54f52d33b5157051.asciidoc new file mode 100644 index 000000000..30bdd7f18 --- /dev/null +++ b/docs/doc_examples/5ddc26da6e163fda54f52d33b5157051.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "my_tokens", + inference_id: "my-elser-endpoint", + query: "the query string", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/311c4b632a29b9ead63b02d01f10096b.asciidoc b/docs/doc_examples/5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc similarity index 64% rename from docs/doc_examples/311c4b632a29b9ead63b02d01f10096b.asciidoc rename to docs/doc_examples/5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc index cecf117ec..35334cd9c 100644 --- a/docs/doc_examples/311c4b632a29b9ead63b02d01f10096b.asciidoc +++ b/docs/doc_examples/5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc @@ -4,12 +4,11 @@ [source, js] ---- const response = await client.index({ - index: 'customer', - id: '1', - body: { - name: 'John Doe' - } -}) -console.log(response) + index: "my-index-000001", + id: 1, + document: { + my_field: 5, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc b/docs/doc_examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc new file mode 100644 index 000000000..db5e254b7 --- /dev/null +++ b/docs/doc_examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5dfe24287bb930ad33345caf092a004b.asciidoc b/docs/doc_examples/5dfe24287bb930ad33345caf092a004b.asciidoc new file mode 100644 index 000000000..c35b3e1f3 --- /dev/null +++ b/docs/doc_examples/5dfe24287bb930ad33345caf092a004b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must_not: { + exists: { + field: "user.id", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc b/docs/doc_examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc new file mode 100644 index 000000000..217e7631a --- /dev/null +++ b/docs/doc_examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + request_cache: "true", + size: 0, + aggs: { + popular_colors: { + terms: { + field: "colors", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e124875d97c27362ae858160ae1c6d5.asciidoc b/docs/doc_examples/5e124875d97c27362ae858160ae1c6d5.asciidoc new file mode 100644 index 000000000..4f049addd --- /dev/null +++ b/docs/doc_examples/5e124875d97c27362ae858160ae1c6d5.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.getAutoFollowPattern(); +console.log(response); +---- diff --git a/docs/doc_examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc b/docs/doc_examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc new file mode 100644 index 000000000..cee356963 --- /dev/null +++ b/docs/doc_examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "jwt1_users", + refresh: "true", + roles: ["user"], + rules: { + all: [ + { + field: { + "realm.name": "jwt1", + }, + }, + { + field: { + username: "principalname1", + }, + }, + { + field: { + dn: "CN=Principal Name 1,DC=example.com", + }, + }, + { + field: { + groups: "group1", + }, + }, + { + field: { + "metadata.jwt_claim_other": "other1", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc b/docs/doc_examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc new file mode 100644 index 000000000..0f146561b --- /dev/null +++ b/docs/doc_examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + "sort.field": ["username", "timestamp"], + "sort.order": ["asc", "desc"], + }, + }, + mappings: { + properties: { + username: { + type: "keyword", + doc_values: true, + }, + timestamp: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc b/docs/doc_examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc new file mode 100644 index 000000000..b68e17a38 --- /dev/null +++ b/docs/doc_examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + document: { + location: [ + { + coordinates: [46.25, 20.14], + type: "point", + }, + { + coordinates: [47.49, 19.04], + type: "point", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e415c490a46358643ee2aab554b4876.asciidoc b/docs/doc_examples/5e415c490a46358643ee2aab554b4876.asciidoc new file mode 100644 index 000000000..9b9aeab08 --- /dev/null +++ b/docs/doc_examples/5e415c490a46358643ee2aab554b4876.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + filter_path: + "index,node_allocation_decisions.node_name,node_allocation_decisions.deciders.*", + index: "my-index", + shard: 0, + primary: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc b/docs/doc_examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc new file mode 100644 index 000000000..403e117a9 --- /dev/null +++ b/docs/doc_examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: [ + { + lat: 40, + lon: -70, + }, + { + lat: 30, + lon: -80, + }, + { + lat: 20, + lon: -90, + }, + ], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc b/docs/doc_examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc new file mode 100644 index 000000000..2527dc592 --- /dev/null +++ b/docs/doc_examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + rename: { + description: "Rename 'provider' to 'cloud.provider'", + field: "provider", + target_field: "cloud.provider", + on_failure: [ + { + set: { + description: "Set 'error.message'", + field: "error.message", + value: + "Field 'provider' does not exist. Cannot rename to 'cloud.provider'", + override: false, + }, + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc b/docs/doc_examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc new file mode 100644 index 000000000..5f9ace6d7 --- /dev/null +++ b/docs/doc_examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle({ + policy_id: "nightly-snapshots", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc b/docs/doc_examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc new file mode 100644 index 000000000..2639ad24b --- /dev/null +++ b/docs/doc_examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + ignored_fields: { + terms: { + field: "_ignored", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46c4b0dfb674825f9579203d41e7f404.asciidoc b/docs/doc_examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc similarity index 52% rename from docs/doc_examples/46c4b0dfb674825f9579203d41e7f404.asciidoc rename to docs/doc_examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc index af100b302..edead60ff 100644 --- a/docs/doc_examples/46c4b0dfb674825f9579203d41e7f404.asciidoc +++ b/docs/doc_examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc @@ -4,17 +4,15 @@ [source, js] ---- const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - tags: { - type: 'keyword' - } - } - } - } -}) -console.log(response) + index: "example", + mappings: { + properties: { + comment: { + type: "text", + index_options: "offsets", + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc b/docs/doc_examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc new file mode 100644 index 000000000..e364f163d --- /dev/null +++ b/docs/doc_examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveIndex({ + name: "new-data-stream*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc b/docs/doc_examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc new file mode 100644 index 000000000..34cc9739c --- /dev/null +++ b/docs/doc_examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_id: { + type: "keyword", + ignore_above: 20, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f210f74725ea0c9265190346edfa246.asciidoc b/docs/doc_examples/5f210f74725ea0c9265190346edfa246.asciidoc deleted file mode 100644 index 9f05c2947..000000000 --- a/docs/doc_examples/5f210f74725ea0c9265190346edfa246.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - indices: [ - 'test1', - 'test2' - ], - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5f3373887e8d3dc31239b687a5151449.asciidoc b/docs/doc_examples/5f3373887e8d3dc31239b687a5151449.asciidoc new file mode 100644 index 000000000..92ac2ee4b --- /dev/null +++ b/docs/doc_examples/5f3373887e8d3dc31239b687a5151449.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + number_one: { + type: "integer", + }, + number_two: { + type: "integer", + coerce: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + number_one: "10", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + number_two: "10", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc b/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc index 5a623c1ef..64e8fd61e 100644 --- a/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc +++ b/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.search({ - index: 'index_long,index_double', - body: { - sort: [ - { - field: { - numeric_type: 'date_nanos' - } - } - ] - } -}) -console.log(response) + index: "index_long,index_double", + sort: [ + { + field: { + numeric_type: "date_nanos", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5f3a3eefeefe6fa85ec49d499212d245.asciidoc b/docs/doc_examples/5f3a3eefeefe6fa85ec49d499212d245.asciidoc deleted file mode 100644 index 2e6c1e650..000000000 --- a/docs/doc_examples/5f3a3eefeefe6fa85ec49d499212d245.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - city: { - type: 'text', - fields: { - raw: { - type: 'keyword' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5f72ab800c3db9d118df95e2a378d411.asciidoc b/docs/doc_examples/5f72ab800c3db9d118df95e2a378d411.asciidoc new file mode 100644 index 000000000..caf925227 --- /dev/null +++ b/docs/doc_examples/5f72ab800c3db9d118df95e2a378d411.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: ".ds-my-data-stream-2099.03.09-000003", + id: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc b/docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc new file mode 100644 index 000000000..08f2e3dc9 --- /dev/null +++ b/docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + intervals: [ + { + match: { + query: "the", + }, + }, + { + any_of: { + intervals: [ + { + match: { + query: "big", + }, + }, + { + match: { + query: "big bad", + }, + }, + ], + }, + }, + { + match: { + query: "wolf", + }, + }, + ], + max_gaps: 0, + ordered: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc b/docs/doc_examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc new file mode 100644 index 000000000..997f41a37 --- /dev/null +++ b/docs/doc_examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + runtime_mappings: { + "source.ip": { + type: "ip", + script: + "\n String sourceip=grok('%{IPORHOST:sourceip} .*').extract(doc[ \"message\" ].value)?.sourceip;\n if (sourceip != null) emit(sourceip);\n ", + }, + }, + query: { + bool: { + filter: [ + { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + { + range: { + "source.ip": { + gte: "192.0.2.0", + lte: "192.0.2.255", + }, + }, + }, + ], + }, + }, + fields: ["*"], + _source: false, + sort: [ + { + "@timestamp": "desc", + }, + { + "source.ip": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc b/docs/doc_examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc new file mode 100644 index 000000000..c04d41ee5 --- /dev/null +++ b/docs/doc_examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hyphenation_decompound_example", + settings: { + analysis: { + analyzer: { + standard_hyphenation_decompound: { + tokenizer: "standard", + filter: ["22_char_hyphenation_decompound"], + }, + }, + filter: { + "22_char_hyphenation_decompound": { + type: "hyphenation_decompounder", + word_list_path: "analysis/example_word_list.txt", + hyphenation_patterns_path: "analysis/hyphenation_patterns.xml", + max_subword_size: 22, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f8d90515995a5eee189d722abe3b111.asciidoc b/docs/doc_examples/5f8d90515995a5eee189d722abe3b111.asciidoc new file mode 100644 index 000000000..028813b2e --- /dev/null +++ b/docs/doc_examples/5f8d90515995a5eee189d722abe3b111.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "azure_ai_studio_embeddings", + processors: [ + { + inference: { + model_id: "azure_ai_studio_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc b/docs/doc_examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc new file mode 100644 index 000000000..fb2dc80e0 --- /dev/null +++ b/docs/doc_examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + number_of_shards: 1, + similarity: { + scripted_tfidf: { + type: "scripted", + weight_script: { + source: + "double idf = Math.log((field.docCount+1.0)/(term.docFreq+1.0)) + 1.0; return query.boost * idf;", + }, + script: { + source: + "double tf = Math.sqrt(doc.freq); double norm = 1/Math.sqrt(doc.length); return weight * tf * norm;", + }, + }, + }, + }, + mappings: { + properties: { + field: { + type: "text", + similarity: "scripted_tfidf", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5925c23a173a63bdb30b458248d1df76.asciidoc b/docs/doc_examples/5faa121e00a0582160b2adb2b72fed67.asciidoc similarity index 71% rename from docs/doc_examples/5925c23a173a63bdb30b458248d1df76.asciidoc rename to docs/doc_examples/5faa121e00a0582160b2adb2b72fed67.asciidoc index 6cb4a5a1e..6ef255e94 100644 --- a/docs/doc_examples/5925c23a173a63bdb30b458248d1df76.asciidoc +++ b/docs/doc_examples/5faa121e00a0582160b2adb2b72fed67.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.indices.getSettings({ - index: 'twitter', - flat_settings: 'false' -}) -console.log(response) + index: "log_2099_-*", + name: "index.number_*", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc b/docs/doc_examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc new file mode 100644 index 000000000..d3f030697 --- /dev/null +++ b/docs/doc_examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCalendars({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc b/docs/doc_examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc new file mode 100644 index 000000000..f3f9d875c --- /dev/null +++ b/docs/doc_examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules/my-ruleset", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc b/docs/doc_examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc new file mode 100644 index 000000000..064493f98 --- /dev/null +++ b/docs/doc_examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/600d33c80f8872dda85c87ed41da95fd.asciidoc b/docs/doc_examples/600d33c80f8872dda85c87ed41da95fd.asciidoc new file mode 100644 index 000000000..f2a3f5ba3 --- /dev/null +++ b/docs/doc_examples/600d33c80f8872dda85c87ed41da95fd.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "azure-ai-studio-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "azure_ai_studio_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6013ed65d2058da5ce704b47a504b60a.asciidoc b/docs/doc_examples/6013ed65d2058da5ce704b47a504b60a.asciidoc new file mode 100644 index 000000000..90a5764ce --- /dev/null +++ b/docs/doc_examples/6013ed65d2058da5ce704b47a504b60a.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + s: 1, + m: 3.1415, + }, + { + index: {}, + }, + { + s: 2, + m: 1, + }, + { + index: {}, + }, + { + s: 3, + m: 2.71828, + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "desc", + }, + size: 3, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc b/docs/doc_examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc new file mode 100644 index 000000000..a5afd1dfd --- /dev/null +++ b/docs/doc_examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getServiceCredentials({ + namespace: "elastic", + service: "fleet-server", +}); +console.log(response); +---- diff --git a/docs/doc_examples/60299454aa19fec15a604a0dd06fe522.asciidoc b/docs/doc_examples/60299454aa19fec15a604a0dd06fe522.asciidoc new file mode 100644 index 000000000..471a73692 --- /dev/null +++ b/docs/doc_examples/60299454aa19fec15a604a0dd06fe522.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + include_defaults: "true", + filter_path: "*.cluster.routing.allocation.disk.watermark.high*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/602e04051c092cf77de2f75a563661b8.asciidoc b/docs/doc_examples/602e04051c092cf77de2f75a563661b8.asciidoc new file mode 100644 index 000000000..20697b82b --- /dev/null +++ b/docs/doc_examples/602e04051c092cf77de2f75a563661b8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.master({ + help: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/604da59fe41160efa10a846a9dacc07a.asciidoc b/docs/doc_examples/604da59fe41160efa10a846a9dacc07a.asciidoc new file mode 100644 index 000000000..162c16e15 --- /dev/null +++ b/docs/doc_examples/604da59fe41160efa10a846a9dacc07a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.getStatus({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6061aadb3b870791278212d1e8f52b39.asciidoc b/docs/doc_examples/6061aadb3b870791278212d1e8f52b39.asciidoc new file mode 100644 index 000000000..525e27c32 --- /dev/null +++ b/docs/doc_examples/6061aadb3b870791278212d1e8f52b39.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getMemoryStats({ + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/608cadc6b8a3f194612b69279ccc96de.asciidoc b/docs/doc_examples/608cadc6b8a3f194612b69279ccc96de.asciidoc new file mode 100644 index 000000000..a3e925620 --- /dev/null +++ b/docs/doc_examples/608cadc6b8a3f194612b69279ccc96de.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "script_score": {\n "query": {\n "bool": {\n "filter": {\n "range": {\n "{{field}}": {\n "{{operator}}": {{value}}\n }\n }\n }\n }\n },\n "script": {\n "source": "cosineSimilarity({{#toJson}}query_vector{{/toJson}}, \'{{dense_vector_field}}\') + 1.0"\n }\n }\n }\n }\n ', + params: { + field: "price", + operator: "gte", + value: 1000, + dense_vector_field: "product-vector", + query_vector: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc b/docs/doc_examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc new file mode 100644 index 000000000..7dbdf3f08 --- /dev/null +++ b/docs/doc_examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", + synonyms: "hello => hi => howdy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc b/docs/doc_examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc new file mode 100644 index 000000000..1b1a9227e --- /dev/null +++ b/docs/doc_examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + content: "madrid", + }, + }, + aggs: { + tags: { + significant_text: { + field: "content", + background_filter: { + term: { + content: "spain", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc b/docs/doc_examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc new file mode 100644 index 000000000..fbf950f93 --- /dev/null +++ b/docs/doc_examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deleteWatch({ + id: "cluster_health_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc b/docs/doc_examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc new file mode 100644 index 000000000..6f4065839 --- /dev/null +++ b/docs/doc_examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "queries", + query: { + percolate: { + field: "query", + document: { + body: "fox jumps over the lazy dog", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc b/docs/doc_examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc new file mode 100644 index 000000000..dabaf1110 --- /dev/null +++ b/docs/doc_examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "amazon-bedrock-embeddings", + pipeline: "amazon_bedrock_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/60ee33f3acfdd0fe6f288ac77312c780.asciidoc b/docs/doc_examples/60ee33f3acfdd0fe6f288ac77312c780.asciidoc deleted file mode 100644 index 0bb1ce32a..000000000 --- a/docs/doc_examples/60ee33f3acfdd0fe6f288ac77312c780.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title' - ], - query: 'this that thus', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1c23507edd7a3c18538b68223378e4ab.asciidoc b/docs/doc_examples/60f889fbed5df3185444f7015b48ed76.asciidoc similarity index 77% rename from docs/doc_examples/1c23507edd7a3c18538b68223378e4ab.asciidoc rename to docs/doc_examples/60f889fbed5df3185444f7015b48ed76.asciidoc index eacb07fb1..5f4a5a357 100644 --- a/docs/doc_examples/1c23507edd7a3c18538b68223378e4ab.asciidoc +++ b/docs/doc_examples/60f889fbed5df3185444f7015b48ed76.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.create({ - index: 'twitter' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/610f629d0486a64546d62402a0a5e00f.asciidoc b/docs/doc_examples/610f629d0486a64546d62402a0a5e00f.asciidoc new file mode 100644 index 000000000..d728d2ac9 --- /dev/null +++ b/docs/doc_examples/610f629d0486a64546d62402a0a5e00f.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + query_string: { + query: "kimchy\\!", + fields: ["user.id"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc b/docs/doc_examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc new file mode 100644 index 000000000..8303a5630 --- /dev/null +++ b/docs/doc_examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.reroute({ + metric: "none", +}); +console.log(response); +---- diff --git a/docs/doc_examples/612c2e975f833de9815651135735eae5.asciidoc b/docs/doc_examples/612c2e975f833de9815651135735eae5.asciidoc new file mode 100644 index 000000000..0dc50cad8 --- /dev/null +++ b/docs/doc_examples/612c2e975f833de9815651135735eae5.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + nodes: "nodeId1,nodeId2", + actions: "*reindex", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6138d6919f3cbaaf61e1092f817d295c.asciidoc b/docs/doc_examples/6138d6919f3cbaaf61e1092f817d295c.asciidoc deleted file mode 100644 index 10fbf5e19..000000000 --- a/docs/doc_examples/6138d6919f3cbaaf61e1092f817d295c.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'this is a test', - operator: 'and' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/618c9d42284c067891fb57034a4fd834.asciidoc b/docs/doc_examples/618c9d42284c067891fb57034a4fd834.asciidoc new file mode 100644 index 000000000..59be85f0f --- /dev/null +++ b/docs/doc_examples/618c9d42284c067891fb57034a4fd834.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.startJob({ + id: "sensor", +}); +console.log(response); +---- diff --git a/docs/doc_examples/618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc b/docs/doc_examples/618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc deleted file mode 100644 index 988069e6d..000000000 --- a/docs/doc_examples/618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - regexp: { - user: { - value: 'k.*y', - flags: 'ALL', - max_determinized_states: 10000, - rewrite: 'constant_score' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc b/docs/doc_examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc new file mode 100644 index 000000000..7dd5f988e --- /dev/null +++ b/docs/doc_examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + type_count: { + cardinality: { + field: "type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0ec418bf416c62bed602b0a32a6d5f5.asciidoc b/docs/doc_examples/61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc similarity index 68% rename from docs/doc_examples/b0ec418bf416c62bed602b0a32a6d5f5.asciidoc rename to docs/doc_examples/61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc index 61ba1d017..f7b40662e 100644 --- a/docs/doc_examples/b0ec418bf416c62bed602b0a32a6d5f5.asciidoc +++ b/docs/doc_examples/61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc @@ -4,12 +4,11 @@ [source, js] ---- const response = await client.index({ - index: 'alias1', - id: '1', - body: { - foo: 'bar' - } -}) -console.log(response) + index: "data", + id: 1, + document: { + count: 5, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/61d6b9503459914c436930c3ae87d454.asciidoc b/docs/doc_examples/61d6b9503459914c436930c3ae87d454.asciidoc new file mode 100644 index 000000000..f6356f14a --- /dev/null +++ b/docs/doc_examples/61d6b9503459914c436930c3ae87d454.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules", + querystring: { + from: "0", + size: "3", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/61e38e95191f4dde791070c6fce8a092.asciidoc b/docs/doc_examples/61e38e95191f4dde791070c6fce8a092.asciidoc new file mode 100644 index 000000000..f29cf90ad --- /dev/null +++ b/docs/doc_examples/61e38e95191f4dde791070c6fce8a092.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.holt(values, 0.3, 0.1)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc b/docs/doc_examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc index 3fcf439cc..3dfc8f729 100644 --- a/docs/doc_examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc +++ b/docs/doc_examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.count({ - filter_path: '-_shards' -}) -console.log(response) + filter_path: "-_shards", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc b/docs/doc_examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc new file mode 100644 index 000000000..164decad8 --- /dev/null +++ b/docs/doc_examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [54, 10, -2], + k: 5, + num_candidates: 50, + filter: { + term: { + "file-type": "png", + }, + }, + }, + fields: ["title"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6220087321e6d288024a70c6b09bd720.asciidoc b/docs/doc_examples/6220087321e6d288024a70c6b09bd720.asciidoc new file mode 100644 index 000000000..b2213798e --- /dev/null +++ b/docs/doc_examples/6220087321e6d288024a70c6b09bd720.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 4, + refresh: "true", + document: { + query: { + match: { + message: "lazy dog", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6244204213f60edf2f23295f9059f2c9.asciidoc b/docs/doc_examples/6244204213f60edf2f23295f9059f2c9.asciidoc new file mode 100644 index 000000000..8c011d9f8 --- /dev/null +++ b/docs/doc_examples/6244204213f60edf2f23295f9059f2c9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats({ + metric: "queued_watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/624e69dedf42c4877234b87ec1d00068.asciidoc b/docs/doc_examples/624e69dedf42c4877234b87ec1d00068.asciidoc new file mode 100644 index 000000000..80c5388ca --- /dev/null +++ b/docs/doc_examples/624e69dedf42c4877234b87ec1d00068.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle({ + policy_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/625dc94df1f9affb49a082fd99d41620.asciidoc b/docs/doc_examples/625dc94df1f9affb49a082fd99d41620.asciidoc deleted file mode 100644 index 1e5878a4f..000000000 --- a/docs/doc_examples/625dc94df1f9affb49a082fd99d41620.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - routing: 'kimchy', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/626f8c4b3e2cd3d9beaa63a7f5799d7a.asciidoc b/docs/doc_examples/626f8c4b3e2cd3d9beaa63a7f5799d7a.asciidoc deleted file mode 100644 index c3be30fcc..000000000 --- a/docs/doc_examples/626f8c4b3e2cd3d9beaa63a7f5799d7a.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter', - body: { - query: { - match: { - message: 'tring out Elasticsearch' - } - }, - suggest: { - 'my-suggestion': { - text: 'tring out Elasticsearch', - term: { - field: 'message' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/62c311e7ab4de8b79e532929a5069975.asciidoc b/docs/doc_examples/62c311e7ab4de8b79e532929a5069975.asciidoc new file mode 100644 index 000000000..f1fbcf539 --- /dev/null +++ b/docs/doc_examples/62c311e7ab4de8b79e532929a5069975.asciidoc @@ -0,0 +1,83 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + topics: { + type: "rank_features", + }, + negative_reviews: { + type: "rank_features", + positive_score_impact: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + topics: { + politics: 20, + economics: 50.8, + }, + negative_reviews: { + "1star": 10, + "2star": 100, + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + topics: { + politics: 5.2, + sports: 80.1, + }, + negative_reviews: { + "1star": 1, + "2star": 10, + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + rank_feature: { + field: "topics.politics", + }, + }, +}); +console.log(response3); + +const response4 = await client.search({ + index: "my-index-000001", + query: { + rank_feature: { + field: "negative_reviews.1star", + }, + }, +}); +console.log(response4); + +const response5 = await client.search({ + index: "my-index-000001", + query: { + term: { + topics: "economics", + }, + }, +}); +console.log(response5); +---- diff --git a/docs/doc_examples/62ccee6ad356428c2d625742f961ceb7.asciidoc b/docs/doc_examples/62ccee6ad356428c2d625742f961ceb7.asciidoc new file mode 100644 index 000000000..eb4ca18cf --- /dev/null +++ b/docs/doc_examples/62ccee6ad356428c2d625742f961ceb7.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + role_descriptors: {}, +}); +console.log(response); +---- diff --git a/docs/doc_examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc b/docs/doc_examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc new file mode 100644 index 000000000..f26be6d36 --- /dev/null +++ b/docs/doc_examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "idx", + id: 1, + document: { + foo: [ + { + bar: 1, + }, + { + baz: 2, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc b/docs/doc_examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc new file mode 100644 index 000000000..49b4bbe2c --- /dev/null +++ b/docs/doc_examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + username: "myuser", +}); +console.log(response); +---- diff --git a/docs/doc_examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc b/docs/doc_examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc new file mode 100644 index 000000000..e03e20e45 --- /dev/null +++ b/docs/doc_examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "hunspell", + locale: "en_US", + }, + ], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/630d127ccedd25a6cff31ea098ac2847.asciidoc b/docs/doc_examples/630d127ccedd25a6cff31ea098ac2847.asciidoc new file mode 100644 index 000000000..e728af9fe --- /dev/null +++ b/docs/doc_examples/630d127ccedd25a6cff31ea098ac2847.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node_upgrade", + size: 0, + aggs: { + startup_time_ttest: { + t_test: { + a: { + field: "startup_time_before", + filter: { + term: { + group: "A", + }, + }, + }, + b: { + field: "startup_time_before", + filter: { + term: { + group: "B", + }, + }, + }, + type: "heteroscedastic", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc b/docs/doc_examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc new file mode 100644 index 000000000..ce4495d70 --- /dev/null +++ b/docs/doc_examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "quick brown fox", + popularity: 1, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + text: "quick fox", + popularity: 5, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + function_score: { + query: { + match: { + text: "quick brown fox", + }, + }, + script_score: { + script: { + lang: "expression", + source: "_score * doc['popularity']", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/633c8a9fc57268979d8735c557705809.asciidoc b/docs/doc_examples/633c8a9fc57268979d8735c557705809.asciidoc new file mode 100644 index 000000000..7b7b0f2b8 --- /dev/null +++ b/docs/doc_examples/633c8a9fc57268979d8735c557705809.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + kwd: { + type: "keyword", + store: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + kwd: ["foo", "foo", "bar", "baz"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc b/docs/doc_examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc new file mode 100644 index 000000000..039b6cb0b --- /dev/null +++ b/docs/doc_examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "website-product-search", + search_application: { + indices: ["website-products"], + template: { + script: { + source: { + query: { + term: { + "{{field_name}}": "{{field_value}}", + }, + }, + aggs: { + color_facet: { + terms: { + field: "color", + size: "{{agg_size}}", + }, + }, + }, + }, + params: { + field_name: "product_name", + field_value: "hello world", + agg_size: 5, + }, + }, + dictionary: { + properties: { + field_name: { + type: "string", + enum: ["name", "color", "description"], + }, + field_value: { + type: "string", + }, + agg_size: { + type: "integer", + minimum: 1, + maximum: 10, + }, + }, + required: ["field_name"], + additionalProperties: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc b/docs/doc_examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc new file mode 100644 index 000000000..ffee3ec5d --- /dev/null +++ b/docs/doc_examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_limit_example", + settings: { + analysis: { + analyzer: { + whitespace_five_token_limit: { + tokenizer: "whitespace", + filter: ["five_token_limit"], + }, + }, + filter: { + five_token_limit: { + type: "limit", + max_token_count: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6352e846bb83725ae6d853aa64d8697d.asciidoc b/docs/doc_examples/6352e846bb83725ae6d853aa64d8697d.asciidoc new file mode 100644 index 000000000..d5b4d57bf --- /dev/null +++ b/docs/doc_examples/6352e846bb83725ae6d853aa64d8697d.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6365312d470426cab1b77e9ffde49170.asciidoc b/docs/doc_examples/6365312d470426cab1b77e9ffde49170.asciidoc new file mode 100644 index 000000000..b8033119e --- /dev/null +++ b/docs/doc_examples/6365312d470426cab1b77e9ffde49170.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "click_role", + indices: [ + { + names: ["events-*"], + privileges: ["read"], + query: '{"match": {"category": "click"}}', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/636ee2066450605247ec1f68d04b8ee4.asciidoc b/docs/doc_examples/636ee2066450605247ec1f68d04b8ee4.asciidoc new file mode 100644 index 000000000..91d90ea7a --- /dev/null +++ b/docs/doc_examples/636ee2066450605247ec1f68d04b8ee4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "http.clientip": "40.135.0.0", + }, + }, + fields: ["*"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc b/docs/doc_examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc new file mode 100644 index 000000000..d8ded8387 --- /dev/null +++ b/docs/doc_examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteCalendar({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b9c5d7ca6ca9c6f747201f45337a4abf.asciidoc b/docs/doc_examples/63cc960215ae83b359c12df3c0993bfa.asciidoc similarity index 64% rename from docs/doc_examples/b9c5d7ca6ca9c6f747201f45337a4abf.asciidoc rename to docs/doc_examples/63cc960215ae83b359c12df3c0993bfa.asciidoc index d4ad0cff8..3ad9daa12 100644 --- a/docs/doc_examples/b9c5d7ca6ca9c6f747201f45337a4abf.asciidoc +++ b/docs/doc_examples/63cc960215ae83b359c12df3c0993bfa.asciidoc @@ -4,14 +4,13 @@ [source, js] ---- const response = await client.indices.create({ - index: 'twitter', - body: { - settings: { + index: "my-index-000001", + settings: { + index: { number_of_shards: 3, - number_of_replicas: 2 - } - } -}) -console.log(response) + number_of_replicas: 2, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc b/docs/doc_examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc new file mode 100644 index 000000000..6d46c2baa --- /dev/null +++ b/docs/doc_examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + kwd: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + kwd: ["foo", "foo", "bar", "baz"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/63e20883732ec30b5400046be2efb0f1.asciidoc b/docs/doc_examples/63e20883732ec30b5400046be2efb0f1.asciidoc new file mode 100644 index 000000000..57d1e690f --- /dev/null +++ b/docs/doc_examples/63e20883732ec30b5400046be2efb0f1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.flush({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/63ecdab34940af053acc409164914c32.asciidoc b/docs/doc_examples/63ecdab34940af053acc409164914c32.asciidoc new file mode 100644 index 000000000..b19b74623 --- /dev/null +++ b/docs/doc_examples/63ecdab34940af053acc409164914c32.asciidoc @@ -0,0 +1,81 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + analyzer: "standard", + }, + impact: { + type: "sparse_vector", + }, + positive: { + type: "sparse_vector", + }, + negative: { + type: "sparse_vector", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + document: { + text: "I had some terribly delicious carrots.", + impact: [ + { + I: 0.55, + had: 0.4, + some: 0.28, + terribly: 0.01, + delicious: 1.2, + carrots: 0.8, + }, + { + I: 0.54, + had: 0.4, + some: 0.28, + terribly: 2.01, + delicious: 0.02, + carrots: 0.4, + }, + ], + positive: { + I: 0.55, + had: 0.4, + some: 0.28, + terribly: 0.01, + delicious: 1.2, + carrots: 0.8, + }, + negative: { + I: 0.54, + had: 0.4, + some: 0.28, + terribly: 2.01, + delicious: 0.02, + carrots: 0.4, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + term: { + impact: { + value: "delicious", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc b/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc new file mode 100644 index 000000000..a9ee715a4 --- /dev/null +++ b/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_last_sync", + body: { + last_access_control_sync_error: "Houston, we have a problem!", + last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z", + last_access_control_sync_status: "pending", + last_deleted_document_count: 42, + last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z", + last_indexed_document_count: 42, + last_sync_error: "Houston, we have a problem!", + last_sync_scheduled_at: "2024-11-09T15:13:08.231Z", + last_sync_status: "completed", + last_synced: "2024-11-09T15:13:08.231Z", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/640a89d0b39630269433425ff476faf3.asciidoc b/docs/doc_examples/640a89d0b39630269433425ff476faf3.asciidoc new file mode 100644 index 000000000..a64753ac7 --- /dev/null +++ b/docs/doc_examples/640a89d0b39630269433425ff476faf3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "archived.*": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc b/docs/doc_examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc new file mode 100644 index 000000000..a61d31c9e --- /dev/null +++ b/docs/doc_examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.monitoring.collection.enabled": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc b/docs/doc_examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc new file mode 100644 index 000000000..e3b853969 --- /dev/null +++ b/docs/doc_examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/641009f2147e1ca56215c701f45c970b.asciidoc b/docs/doc_examples/641009f2147e1ca56215c701f45c970b.asciidoc new file mode 100644 index 000000000..a07dc7981 --- /dev/null +++ b/docs/doc_examples/641009f2147e1ca56215c701f45c970b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "tiles-in-bounds": { + geotile_grid: { + field: "location", + precision: 22, + bounds: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc b/docs/doc_examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc new file mode 100644 index 000000000..c6274ed1e --- /dev/null +++ b/docs/doc_examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments(); +console.log(response); +---- diff --git a/docs/doc_examples/641f75862c70e25e79d249d9e0a79f03.asciidoc b/docs/doc_examples/641f75862c70e25e79d249d9e0a79f03.asciidoc new file mode 100644 index 000000000..002fbb288 --- /dev/null +++ b/docs/doc_examples/641f75862c70e25e79d249d9e0a79f03.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + nested: { + path: "obj1", + query: { + bool: { + must: [ + { + match: { + "obj1.name": "blue", + }, + }, + { + range: { + "obj1.count": { + gt: 5, + }, + }, + }, + ], + }, + }, + score_mode: "avg", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/642161d70dacf7d153767d37d3726838.asciidoc b/docs/doc_examples/642161d70dacf7d153767d37d3726838.asciidoc new file mode 100644 index 000000000..d3906eec1 --- /dev/null +++ b/docs/doc_examples/642161d70dacf7d153767d37d3726838.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupIndexCaps({ + index: "*_rollup", +}); +console.log(response); +---- diff --git a/docs/doc_examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc b/docs/doc_examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc new file mode 100644 index 000000000..832c25a7a --- /dev/null +++ b/docs/doc_examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "keep_words_example", + settings: { + analysis: { + analyzer: { + standard_keep_word_array: { + tokenizer: "standard", + filter: ["keep_word_array"], + }, + standard_keep_word_file: { + tokenizer: "standard", + filter: ["keep_word_file"], + }, + }, + filter: { + keep_word_array: { + type: "keep", + keep_words: ["one", "two", "three"], + }, + keep_word_file: { + type: "keep", + keep_words_path: "analysis/example_word_list.txt", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc b/docs/doc_examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc new file mode 100644 index 000000000..1d7d9df89 --- /dev/null +++ b/docs/doc_examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + full_name: { + path_match: "name.*", + path_unmatch: "*.middle", + mapping: { + type: "text", + copy_to: "full_name", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: { + first: "John", + middle: "Winston", + last: "Lennon", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc b/docs/doc_examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc new file mode 100644 index 000000000..0bc357bb7 --- /dev/null +++ b/docs/doc_examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["logs-*"], + data_stream: {}, + template: { + settings: { + "index.mode": "logsdb", + }, + }, + priority: 101, +}); +console.log(response); +---- diff --git a/docs/doc_examples/645136747d37368a14ab34de8bd046c6.asciidoc b/docs/doc_examples/645136747d37368a14ab34de8bd046c6.asciidoc deleted file mode 100644 index ef507a7b1..000000000 --- a/docs/doc_examples/645136747d37368a14ab34de8bd046c6.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - date: { - type: 'date' - } - } - } - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '1', - body: { - date: '2015-01-01' - } -}) -console.log(response1) - -const response2 = await client.index({ - index: 'my_index', - id: '2', - body: { - date: '2015-01-01T12:10:30Z' - } -}) -console.log(response2) - -const response3 = await client.index({ - index: 'my_index', - id: '3', - body: { - date: 1420070400001 - } -}) -console.log(response3) - -const response4 = await client.search({ - index: 'my_index', - body: { - sort: { - date: 'asc' - } - } -}) -console.log(response4) ----- - diff --git a/docs/doc_examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc b/docs/doc_examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc new file mode 100644 index 000000000..ed93f04b5 --- /dev/null +++ b/docs/doc_examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc @@ -0,0 +1,534 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + refresh: "true", + pipeline: "my-timestamp-pipeline", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:49:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 91153, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 463314616, + }, + usage: { + bytes: 307007078, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 585236, + }, + rss: { + bytes: 102728, + }, + pagefaults: 120901, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:45:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 124501, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 982546514, + }, + usage: { + bytes: 360035574, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1339884, + }, + rss: { + bytes: 381174, + }, + pagefaults: 178473, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 38907, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 862723768, + }, + usage: { + bytes: 379572388, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 431227, + }, + rss: { + bytes: 386580, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 86706, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 103266017, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1724908, + }, + rss: { + bytes: 105431, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 150069, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 639054643, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1786511, + }, + rss: { + bytes: 189235, + }, + pagefaults: 138172, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 82260, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 854735585, + }, + usage: { + bytes: 309798052, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 924058, + }, + rss: { + bytes: 110838, + }, + pagefaults: 259073, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 153404, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 279586406, + }, + usage: { + bytes: 214904955, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1047265, + }, + rss: { + bytes: 91914, + }, + pagefaults: 302252, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:20Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 125613, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 822782853, + }, + usage: { + bytes: 100475044, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2109932, + }, + rss: { + bytes: 278446, + }, + pagefaults: 74843, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 100046, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 362826547, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1986724, + }, + rss: { + bytes: 402801, + }, + pagefaults: 296495, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:38:30Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 40018, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 1062428344, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2294743, + }, + rss: { + bytes: 340623, + }, + pagefaults: 224530, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/645796e8047967ca4a7635a22a876f4c.asciidoc b/docs/doc_examples/645796e8047967ca4a7635a22a876f4c.asciidoc deleted file mode 100644 index 76b389764..000000000 --- a/docs/doc_examples/645796e8047967ca4a7635a22a876f4c.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - size: 0, - aggs: { - group_by_state: { - terms: { - field: 'state.keyword', - order: { - average_balance: 'desc' - } - }, - aggs: { - average_balance: { - avg: { - field: 'balance' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/645c4c6e209719d3a4d25b1a629cb23b.asciidoc b/docs/doc_examples/645c4c6e209719d3a4d25b1a629cb23b.asciidoc deleted file mode 100644 index 89ed665b3..000000000 --- a/docs/doc_examples/645c4c6e209719d3a4d25b1a629cb23b.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - random_score: { - seed: 10, - field: '_seq_no' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/64622409407316d2d47094e692d9b516.asciidoc b/docs/doc_examples/64622409407316d2d47094e692d9b516.asciidoc new file mode 100644 index 000000000..19d1e10d5 --- /dev/null +++ b/docs/doc_examples/64622409407316d2d47094e692d9b516.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "student_performance_mathematics_reg", + query: { + term: { + "ml.is_training": { + value: false, + }, + }, + }, + evaluation: { + regression: { + actual_field: "G3", + predicted_field: "ml.G3_prediction", + metrics: { + r_squared: {}, + mse: {}, + msle: {}, + huber: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6464124d1677f4552ddddd95a340ca3a.asciidoc b/docs/doc_examples/6464124d1677f4552ddddd95a340ca3a.asciidoc index 91d70dc40..81f43eb4f 100644 --- a/docs/doc_examples/6464124d1677f4552ddddd95a340ca3a.asciidoc +++ b/docs/doc_examples/6464124d1677f4552ddddd95a340ca3a.asciidoc @@ -3,41 +3,40 @@ [source, js] ---- -const response0 = await client.index({ - index: 'library', - refresh: true, - body: { - title: 'Book #1', - rating: 200.1 - } -}) -console.log(response0) +const response = await client.index({ + index: "library", + refresh: "true", + document: { + title: "Book #1", + rating: 200.1, + }, +}); +console.log(response); const response1 = await client.index({ - index: 'library', - refresh: true, - body: { - title: 'Book #2', - rating: 1.7 - } -}) -console.log(response1) + index: "library", + refresh: "true", + document: { + title: "Book #2", + rating: 1.7, + }, +}); +console.log(response1); const response2 = await client.index({ - index: 'library', - refresh: true, - body: { - title: 'Book #3', - rating: 0.1 - } -}) -console.log(response2) + index: "library", + refresh: "true", + document: { + title: "Book #3", + rating: 0.1, + }, +}); +console.log(response2); const response3 = await client.search({ - filter_path: 'hits.hits._source', - _source: 'title', - sort: 'rating:desc' -}) -console.log(response3) + filter_path: "hits.hits._source", + _source: "title", + sort: "rating:desc", +}); +console.log(response3); ---- - diff --git a/docs/doc_examples/646d71869f1a18c5bede7759559bfc47.asciidoc b/docs/doc_examples/646d71869f1a18c5bede7759559bfc47.asciidoc new file mode 100644 index 000000000..9f9b95638 --- /dev/null +++ b/docs/doc_examples/646d71869f1a18c5bede7759559bfc47.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "_all", + fields: "message", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc b/docs/doc_examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc new file mode 100644 index 000000000..6406a2c30 --- /dev/null +++ b/docs/doc_examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "fingerprint", + text: "Yes yes, Gödel said this sentence is consistent and.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc b/docs/doc_examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc new file mode 100644 index 000000000..64d437f6f --- /dev/null +++ b/docs/doc_examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.translate({ + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64a79861225553799b26e118d7851dcc.asciidoc b/docs/doc_examples/64a79861225553799b26e118d7851dcc.asciidoc new file mode 100644 index 000000000..b093d850f --- /dev/null +++ b/docs/doc_examples/64a79861225553799b26e118d7851dcc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/64aff98cf477555e7411714c17006572.asciidoc b/docs/doc_examples/64aff98cf477555e7411714c17006572.asciidoc new file mode 100644 index 000000000..906360d56 --- /dev/null +++ b/docs/doc_examples/64aff98cf477555e7411714c17006572.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + range: { + timestamp: { + gte: "now-1d/d", + lte: "now/d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64b9baa6d7556b960b29698f3383aa31.asciidoc b/docs/doc_examples/64b9baa6d7556b960b29698f3383aa31.asciidoc deleted file mode 100644 index 4c2382017..000000000 --- a/docs/doc_examples/64b9baa6d7556b960b29698f3383aa31.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - remote: { - host: 'http://otherhost:9200/' - }, - index: 'source', - size: 10, - query: { - match: { - test: 'data' - } - } - }, - dest: { - index: 'dest' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc b/docs/doc_examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc new file mode 100644 index 000000000..2f0e298e1 --- /dev/null +++ b/docs/doc_examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.features.getFeatures(); +console.log(response); +---- diff --git a/docs/doc_examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc b/docs/doc_examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc new file mode 100644 index 000000000..71c886d2a --- /dev/null +++ b/docs/doc_examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + elser: true, + query_string: "where is the best mountain climbing?", + elser_fields: [ + { + name: "title", + boost: 1, + }, + { + name: "description", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc b/docs/doc_examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc new file mode 100644 index 000000000..c82166c2e --- /dev/null +++ b/docs/doc_examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + avg_grade: { + avg: { + field: "grade", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc b/docs/doc_examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc new file mode 100644 index 000000000..b5f8b138e --- /dev/null +++ b/docs/doc_examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + index: "test", + stored_fields: "field1,field2", + docs: [ + { + _id: "1", + }, + { + _id: "2", + stored_fields: ["field3", "field4"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc b/docs/doc_examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc new file mode 100644 index 000000000..cac6f7e52 --- /dev/null +++ b/docs/doc_examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index", + settings: { + "archived.*": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/650a0fb27c66a790c4687267423af1da.asciidoc b/docs/doc_examples/650a0fb27c66a790c4687267423af1da.asciidoc new file mode 100644 index 000000000..a82a5ec0e --- /dev/null +++ b/docs/doc_examples/650a0fb27c66a790c4687267423af1da.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + { + add: { + index: "logs-my_app-default", + alias: "logs", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6521c3578dc4ad4a6db697700986e78e.asciidoc b/docs/doc_examples/6521c3578dc4ad4a6db697700986e78e.asciidoc new file mode 100644 index 000000000..f0438bf28 --- /dev/null +++ b/docs/doc_examples/6521c3578dc4ad4a6db697700986e78e.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + pretty: "true", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + location: [ + { + lat: 43.6624803, + lon: -79.3863353, + precision: 2, + }, + { + context: { + lat: 43.6624803, + lon: -79.3863353, + }, + boost: 2, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc b/docs/doc_examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc new file mode 100644 index 000000000..40435f67a --- /dev/null +++ b/docs/doc_examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + actors: { + terms: { + field: "actors", + size: 10, + }, + aggs: { + costars: { + terms: { + field: "actors", + size: 5, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/654882f545eca8d7047695f867c63072.asciidoc b/docs/doc_examples/654882f545eca8d7047695f867c63072.asciidoc new file mode 100644 index 000000000..678ccc1fe --- /dev/null +++ b/docs/doc_examples/654882f545eca8d7047695f867c63072.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.stopTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/65578c390837cb4c0fcc77fb17857714.asciidoc b/docs/doc_examples/65578c390837cb4c0fcc77fb17857714.asciidoc new file mode 100644 index 000000000..8030d9270 --- /dev/null +++ b/docs/doc_examples/65578c390837cb4c0fcc77fb17857714.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + max_monthly_sales: { + max_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc b/docs/doc_examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc new file mode 100644 index 000000000..85ccdf9af --- /dev/null +++ b/docs/doc_examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "logs-foo_component1", + template: { + mappings: { + properties: { + "host.name": { + type: "keyword", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/658842bf41e0fcb7969937155946a0ff.asciidoc b/docs/doc_examples/658842bf41e0fcb7969937155946a0ff.asciidoc new file mode 100644 index 000000000..46cdd17c1 --- /dev/null +++ b/docs/doc_examples/658842bf41e0fcb7969937155946a0ff.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "slm-read-only", + cluster: ["read_slm"], + indices: [ + { + names: [".slm-history-*"], + privileges: ["read"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc b/docs/doc_examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc new file mode 100644 index 000000000..3fe957756 --- /dev/null +++ b/docs/doc_examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "the inference ID to produce the token weights", + query: "the query string", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc b/docs/doc_examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc new file mode 100644 index 000000000..5644ea387 --- /dev/null +++ b/docs/doc_examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings_around_amsterdam: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + ranges: [ + { + to: 100000, + }, + { + from: 100000, + to: 300000, + }, + { + from: 300000, + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/65e892a362d940e4a74965f21c15ca09.asciidoc b/docs/doc_examples/65e892a362d940e4a74965f21c15ca09.asciidoc new file mode 100644 index 000000000..fdab99f78 --- /dev/null +++ b/docs/doc_examples/65e892a362d940e4a74965f21c15ca09.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "openai-embeddings", + pipeline: "openai_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc b/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc new file mode 100644 index 000000000..4a0655e33 --- /dev/null +++ b/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_index_name", + body: { + index_name: "data-from-my-google-drive", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc b/docs/doc_examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc new file mode 100644 index 000000000..9856c1923 --- /dev/null +++ b/docs/doc_examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ledger", + size: 0, + query: { + match_all: {}, + }, + aggs: { + profit: { + scripted_metric: { + init_script: "state.transactions = []", + map_script: + "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)", + combine_script: + "double profit = 0; for (t in state.transactions) { profit += t } return profit", + reduce_script: + "double profit = 0; for (a in states) { profit += a } return profit", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc b/docs/doc_examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc new file mode 100644 index 000000000..a1e318f4e --- /dev/null +++ b/docs/doc_examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + h: "ip,port,heapPercent,name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/666c420fe61fa122386da3c356a64943.asciidoc b/docs/doc_examples/666c420fe61fa122386da3c356a64943.asciidoc new file mode 100644 index 000000000..1da863638 --- /dev/null +++ b/docs/doc_examples/666c420fe61fa122386da3c356a64943.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + term: { + user: "kimchy", + }, + }, + sort: { + _script: { + type: "number", + script: { + lang: "painless", + source: "doc['field_name'].value * params.factor", + params: { + factor: 1.1, + }, + }, + order: "asc", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6689aa213884196b47a6f482d4993749.asciidoc b/docs/doc_examples/6689aa213884196b47a6f482d4993749.asciidoc new file mode 100644 index 000000000..437216410 --- /dev/null +++ b/docs/doc_examples/6689aa213884196b47a6f482d4993749.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline-id", + description: "My optional pipeline description", + processors: [ + { + set: { + description: "My optional processor description", + field: "my-keyword-field", + value: "foo", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6693f0ffa0de3229b5dedda197810e70.asciidoc b/docs/doc_examples/6693f0ffa0de3229b5dedda197810e70.asciidoc new file mode 100644 index 000000000..6e92e6268 --- /dev/null +++ b/docs/doc_examples/6693f0ffa0de3229b5dedda197810e70.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + keep_alive: "5d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/669773766b041be768003055ad523038.asciidoc b/docs/doc_examples/669773766b041be768003055ad523038.asciidoc new file mode 100644 index 000000000..0934ce2fc --- /dev/null +++ b/docs/doc_examples/669773766b041be768003055ad523038.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: ".ds-my-data-stream-2099.03.08-000002", + id: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6705eca2095ade294548cfb25bf2dd86.asciidoc b/docs/doc_examples/6705eca2095ade294548cfb25bf2dd86.asciidoc new file mode 100644 index 000000000..7b190a4c5 --- /dev/null +++ b/docs/doc_examples/6705eca2095ade294548cfb25bf2dd86.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", + h: "index,shard,prirep,state,node,unassigned.reason", + s: "state", +}); +console.log(response); +---- diff --git a/docs/doc_examples/53d938c754f36a912fcbe6473abb463f.asciidoc b/docs/doc_examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc similarity index 59% rename from docs/doc_examples/53d938c754f36a912fcbe6473abb463f.asciidoc rename to docs/doc_examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc index 313a18ae4..c60551fda 100644 --- a/docs/doc_examples/53d938c754f36a912fcbe6473abb463f.asciidoc +++ b/docs/doc_examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'users' - }, - dest: { - index: 'new_users' - } - } -}) -console.log(response) + source: { + index: "my-index-000001", + }, + dest: { + index: "my-index-000002", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/672d30eb3af573140d966e88b14814f8.asciidoc b/docs/doc_examples/672d30eb3af573140d966e88b14814f8.asciidoc new file mode 100644 index 000000000..02dd6b739 --- /dev/null +++ b/docs/doc_examples/672d30eb3af573140d966e88b14814f8.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index", + id: 1, + pipeline: "monthlyindex", + document: { + date1: "2016-04-25T12:02:01.789Z", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc b/docs/doc_examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc new file mode 100644 index 000000000..7d1c236b7 --- /dev/null +++ b/docs/doc_examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + categories: { + categorize_text: { + field: "message", + categorization_filters: ["\\w+\\_\\d{3}"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/674bb755111c6fbaa4c5ac759395c122.asciidoc b/docs/doc_examples/674bb755111c6fbaa4c5ac759395c122.asciidoc new file mode 100644 index 000000000..21120aeb3 --- /dev/null +++ b/docs/doc_examples/674bb755111c6fbaa4c5ac759395c122.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index", + flat_settings: "true", + include_defaults: "true", +}); +console.log(response); + +const response1 = await client.cluster.getSettings({ + flat_settings: "true", + include_defaults: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/67967388db610dcb9d24fb59ede348d8.asciidoc b/docs/doc_examples/67967388db610dcb9d24fb59ede348d8.asciidoc new file mode 100644 index 000000000..577b3ea88 --- /dev/null +++ b/docs/doc_examples/67967388db610dcb9d24fb59ede348d8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + min_price: { + min: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc b/docs/doc_examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc new file mode 100644 index 000000000..a64330970 --- /dev/null +++ b/docs/doc_examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67a490d749a0c3bb16a266663423893d.asciidoc b/docs/doc_examples/67a490d749a0c3bb16a266663423893d.asciidoc new file mode 100644 index 000000000..f650cc07a --- /dev/null +++ b/docs/doc_examples/67a490d749a0c3bb16a266663423893d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deleteWatch({ + id: "log_error_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc b/docs/doc_examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc new file mode 100644 index 000000000..13c543473 --- /dev/null +++ b/docs/doc_examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(4.912350 52.374081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [4.965305328369141, 52.39347642069457], + [4.966979026794433, 52.391721758934835], + [4.969425201416015, 52.39238958618537], + [4.967944622039794, 52.39420969150824], + [4.965305328369141, 52.39347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + viewport: { + geo_bounds: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc b/docs/doc_examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc new file mode 100644 index 000000000..3dc53206e --- /dev/null +++ b/docs/doc_examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_snapshot/my_repository/_analyze", + querystring: { + blob_count: "10", + max_blob_size: "1mb", + timeout: "120s", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67bab07fda27ef77e3bc948211051a33.asciidoc b/docs/doc_examples/67bab07fda27ef77e3bc948211051a33.asciidoc new file mode 100644 index 000000000..5bc95aa00 --- /dev/null +++ b/docs/doc_examples/67bab07fda27ef77e3bc948211051a33.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + thread_pool_patterns: "write,search", + v: "true", + s: "n,nn", + h: "n,nn,q,a,r,c", +}); +console.log(response); +---- diff --git a/docs/doc_examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc b/docs/doc_examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc new file mode 100644 index 000000000..37ef597f7 --- /dev/null +++ b/docs/doc_examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + more_like_this: { + fields: ["title", "description"], + like: [ + { + _index: "imdb", + _id: "1", + }, + { + _index: "imdb", + _id: "2", + }, + "and potentially some more text here as well", + ], + min_term_freq: 1, + max_query_terms: 12, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67ceac4bf2d9ac7cc500390544cdcb41.asciidoc b/docs/doc_examples/67ceac4bf2d9ac7cc500390544cdcb41.asciidoc deleted file mode 100644 index b847c0c06..000000000 --- a/docs/doc_examples/67ceac4bf2d9ac7cc500390544cdcb41.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - range: { - timestamp: { - gte: 'now-1d/d', - lt: 'now/d' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/67ffa135c50c43d6788636c88078c7d1.asciidoc b/docs/doc_examples/67ffa135c50c43d6788636c88078c7d1.asciidoc new file mode 100644 index 000000000..16ae9d9b2 --- /dev/null +++ b/docs/doc_examples/67ffa135c50c43d6788636c88078c7d1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + id: "my-pipeline-id", + docs: [ + { + _index: "index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc b/docs/doc_examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc new file mode 100644 index 000000000..6da7fb4fa --- /dev/null +++ b/docs/doc_examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "azure-openai-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1536, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6843d859e2965d17cad4f033c81db83f.asciidoc b/docs/doc_examples/6843d859e2965d17cad4f033c81db83f.asciidoc new file mode 100644 index 000000000..440cd6741 --- /dev/null +++ b/docs/doc_examples/6843d859e2965d17cad4f033c81db83f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "sort.field": ["@timestamp"], + "sort.order": ["desc"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc b/docs/doc_examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc new file mode 100644 index 000000000..5184e4c0a --- /dev/null +++ b/docs/doc_examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + max_latency: { + max: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc b/docs/doc_examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc new file mode 100644 index 000000000..0558f50d4 --- /dev/null +++ b/docs/doc_examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "users", + id: 1, + refresh: "wait_for", + document: { + email: "mardy.brown@asciidocsmith.com", + first_name: "Mardy", + last_name: "Brown", + city: "New Orleans", + county: "Orleans", + state: "LA", + zip: 70116, + web: "mardy.asciidocsmith.com", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/686bc640b877de845c46bef372a9866c.asciidoc b/docs/doc_examples/686bc640b877de845c46bef372a9866c.asciidoc new file mode 100644 index 000000000..e081be4f1 --- /dev/null +++ b/docs/doc_examples/686bc640b877de845c46bef372a9866c.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "parent_example", + size: 0, + aggs: { + "top-names": { + terms: { + field: "owner.display_name.keyword", + size: 10, + }, + aggs: { + "to-questions": { + parent: { + type: "answer", + }, + aggs: { + "top-tags": { + terms: { + field: "tags.keyword", + size: 10, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc b/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc index 7dc6e69b6..6b3caa8d2 100644 --- a/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc +++ b/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc @@ -4,19 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'quick brown f', - type: 'bool_prefix', - fields: [ - 'subject', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "quick brown f", + type: "bool_prefix", + fields: ["subject", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc b/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc index f98801129..950fc0229 100644 --- a/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc +++ b/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.reindexRethrottle({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619', - requests_per_second: '-1' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", + requests_per_second: "-1", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/6884454f57c3a41059037ea762f48d77.asciidoc b/docs/doc_examples/6884454f57c3a41059037ea762f48d77.asciidoc new file mode 100644 index 000000000..c870eedf0 --- /dev/null +++ b/docs/doc_examples/6884454f57c3a41059037ea762f48d77.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/68a891f609ca3a379d2d64e4914f3067.asciidoc b/docs/doc_examples/68a891f609ca3a379d2d64e4914f3067.asciidoc new file mode 100644 index 000000000..324247ffd --- /dev/null +++ b/docs/doc_examples/68a891f609ca3a379d2d64e4914f3067.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["kstem"], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc b/docs/doc_examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc new file mode 100644 index 000000000..ed5756abf --- /dev/null +++ b/docs/doc_examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "plugins", +}); +console.log(response); +---- diff --git a/docs/doc_examples/68cb8a452e780ca78b0cb761be3629af.asciidoc b/docs/doc_examples/68cb8a452e780ca78b0cb761be3629af.asciidoc new file mode 100644 index 000000000..df5f75e1f --- /dev/null +++ b/docs/doc_examples/68cb8a452e780ca78b0cb761be3629af.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + stored_fields: "_none_", + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/691fe20d467324ed43a36fd15852c492.asciidoc b/docs/doc_examples/691fe20d467324ed43a36fd15852c492.asciidoc new file mode 100644 index 000000000..e97c3e203 --- /dev/null +++ b/docs/doc_examples/691fe20d467324ed43a36fd15852c492.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "kibana_sample_data_ecommerce", + wait_for_active_shards: 1, + remote_cluster: "clusterB", + leader_index: "kibana_sample_data_ecommerce2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/692606cc6d6462becc321d92961a3bac.asciidoc b/docs/doc_examples/692606cc6d6462becc321d92961a3bac.asciidoc new file mode 100644 index 000000000..368ac4df2 --- /dev/null +++ b/docs/doc_examples/692606cc6d6462becc321d92961a3bac.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.textStructure.testGrokPattern({ + grok_pattern: "Hello %{WORD:first_name} %{WORD:last_name}", + text: ["Hello John Doe", "this does not match"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/69582847099ee62ed34feddfaba83ef6.asciidoc b/docs/doc_examples/69582847099ee62ed34feddfaba83ef6.asciidoc new file mode 100644 index 000000000..836a958ed --- /dev/null +++ b/docs/doc_examples/69582847099ee62ed34feddfaba83ef6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + quantity: { + histogram: { + field: "quantity", + interval: 10, + missing: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc b/docs/doc_examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc new file mode 100644 index 000000000..55aba4454 --- /dev/null +++ b/docs/doc_examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + require_field_match: false, + fields: { + body: { + pre_tags: [""], + post_tags: [""], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc b/docs/doc_examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc new file mode 100644 index 000000000..07b79978d --- /dev/null +++ b/docs/doc_examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 1, + stored_fields: "tags,counter", +}); +console.log(response); +---- diff --git a/docs/doc_examples/69a7be47f85138b10437113ab2f0d72d.asciidoc b/docs/doc_examples/69a7be47f85138b10437113ab2f0d72d.asciidoc deleted file mode 100644 index c5f90d612..000000000 --- a/docs/doc_examples/69a7be47f85138b10437113ab2f0d72d.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '2', - routing: 'user1', - stored_fields: 'tags,counter' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/69ab708fe65a75f870223d2289c3d171.asciidoc b/docs/doc_examples/69ab708fe65a75f870223d2289c3d171.asciidoc new file mode 100644 index 000000000..58f053630 --- /dev/null +++ b/docs/doc_examples/69ab708fe65a75f870223d2289c3d171.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "Hide my IP", + processors: [ + { + redact: { + field: "message", + patterns: ["%{IP:REDACTED}", "%{EMAILADDRESS:REDACTED}"], + prefix: "*", + suffix: "*", + }, + }, + ], + }, + docs: [ + { + _source: { + message: "55.3.244.1 GET /index.html 15824 0.043 test@elastic.co", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/54a770f053f3225ea0d1e34334232411.asciidoc b/docs/doc_examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc similarity index 72% rename from docs/doc_examples/54a770f053f3225ea0d1e34334232411.asciidoc rename to docs/doc_examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc index e82c50e6a..a367d97aa 100644 --- a/docs/doc_examples/54a770f053f3225ea0d1e34334232411.asciidoc +++ b/docs/doc_examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.updateByQuery({ - index: 'twitter', - scroll_size: '100' -}) -console.log(response) + index: "my-index-000001,my-index-000002", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/69d5710bdec73041c66f21d5f96637e8.asciidoc b/docs/doc_examples/69d5710bdec73041c66f21d5f96637e8.asciidoc new file mode 100644 index 000000000..a4e5f6f09 --- /dev/null +++ b/docs/doc_examples/69d5710bdec73041c66f21d5f96637e8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_long", + mappings: { + properties: { + field: { + type: "date_nanos", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc b/docs/doc_examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc new file mode 100644 index 000000000..04aafb486 --- /dev/null +++ b/docs/doc_examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + pretty: "true", + sort: [ + { + "result.execution_time": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/69daf5ec2a9bc07096e1833286c36076.asciidoc b/docs/doc_examples/69daf5ec2a9bc07096e1833286c36076.asciidoc new file mode 100644 index 000000000..426c6447d --- /dev/null +++ b/docs/doc_examples/69daf5ec2a9bc07096e1833286c36076.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "timeseries_template", + index_patterns: ["timeseries-*"], + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "timeseries_policy", + "index.lifecycle.rollover_alias": "timeseries", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc b/docs/doc_examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc new file mode 100644 index 000000000..a17ce6569 --- /dev/null +++ b/docs/doc_examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.deprecations({ + index: "logstash-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc b/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc index f4959e703..16ed57458 100644 --- a/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc +++ b/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc @@ -4,18 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Will Smith', - fields: [ - 'title', - '*_name' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Will Smith", + fields: ["title", "*_name"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/6a350a17701e8c8158407191f2718b66.asciidoc b/docs/doc_examples/6a350a17701e8c8158407191f2718b66.asciidoc new file mode 100644 index 000000000..e9a2ea7cb --- /dev/null +++ b/docs/doc_examples/6a350a17701e8c8158407191f2718b66.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.unfollow({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc b/docs/doc_examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc new file mode 100644 index 000000000..da66cc8b8 --- /dev/null +++ b/docs/doc_examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "ingest.geoip.downloader.enabled": false, + "indices.lifecycle.history_index_enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc b/docs/doc_examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc new file mode 100644 index 000000000..34eea3284 --- /dev/null +++ b/docs/doc_examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.getPipeline({ + id: "my-pipeline-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc b/docs/doc_examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc new file mode 100644 index 000000000..174de7224 --- /dev/null +++ b/docs/doc_examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "my-index-000001", + tokenizer: "keyword", + char_filter: ["my_mappings_char_filter"], + text: "I'm delighted about it :(", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a4679531e64c492fce16dc12de6dcb0.asciidoc b/docs/doc_examples/6a4679531e64c492fce16dc12de6dcb0.asciidoc deleted file mode 100644 index 344a431a2..000000000 --- a/docs/doc_examples/6a4679531e64c492fce16dc12de6dcb0.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - _count: 'asc' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc b/docs/doc_examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc new file mode 100644 index 000000000..fe9c85ee0 --- /dev/null +++ b/docs/doc_examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.deleteAsync({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc b/docs/doc_examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc new file mode 100644 index 000000000..70459caa9 --- /dev/null +++ b/docs/doc_examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_unverified_backup", + verify: "false", + repository: { + type: "fs", + settings: { + location: "my_unverified_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a81d00f0d73bc5985e76b3cadab645e.asciidoc b/docs/doc_examples/6a81d00f0d73bc5985e76b3cadab645e.asciidoc deleted file mode 100644 index 9a500641a..000000000 --- a/docs/doc_examples/6a81d00f0d73bc5985e76b3cadab645e.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - tag: { - type: 'text', - fielddata: true, - fielddata_frequency_filter: { - min: 0.001, - max: 0.1, - min_segment_size: 500 - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc b/docs/doc_examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc new file mode 100644 index 000000000..f5f3bbd42 --- /dev/null +++ b/docs/doc_examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "stats-index", + id: 1, + document: { + agg_metric: { + min: -302.5, + max: 702.3, + sum: 200, + value_count: 25, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "stats-index", + id: 2, + document: { + agg_metric: { + min: -93, + max: 1702.3, + sum: 300, + value_count: 25, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc b/docs/doc_examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc new file mode 100644 index 000000000..fed00c64b --- /dev/null +++ b/docs/doc_examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6aa2941855d13f365f70aa8767ecb137.asciidoc b/docs/doc_examples/6aa2941855d13f365f70aa8767ecb137.asciidoc new file mode 100644 index 000000000..ff7934808 --- /dev/null +++ b/docs/doc_examples/6aa2941855d13f365f70aa8767ecb137.asciidoc @@ -0,0 +1,60 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + city: { + type: "text", + fields: { + raw: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + city: "New York", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + city: "York", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + match: { + city: "york", + }, + }, + sort: { + "city.raw": "asc", + }, + aggs: { + Cities: { + terms: { + field: "city.raw", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/6aca241c0361d26f134712821e2d09a9.asciidoc b/docs/doc_examples/6aca241c0361d26f134712821e2d09a9.asciidoc new file mode 100644 index 000000000..63445d9ea --- /dev/null +++ b/docs/doc_examples/6aca241c0361d26f134712821e2d09a9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.cleanupRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc b/docs/doc_examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc new file mode 100644 index 000000000..41332985c --- /dev/null +++ b/docs/doc_examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.exclude._ip": "192.168.2.*", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b0288acb739c4667d41339e5100c327.asciidoc b/docs/doc_examples/6b0288acb739c4667d41339e5100c327.asciidoc new file mode 100644 index 000000000..bc220ac1f --- /dev/null +++ b/docs/doc_examples/6b0288acb739c4667d41339e5100c327.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "this is a testt", + fuzziness: "AUTO", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc b/docs/doc_examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc new file mode 100644 index 000000000..494d76385 --- /dev/null +++ b/docs/doc_examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs-debug", + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + message: { + type: "text", + }, + level: { + type: "constant_keyword", + value: "debug", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc b/docs/doc_examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc new file mode 100644 index 000000000..6565c34d0 --- /dev/null +++ b/docs/doc_examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "beats", + remote_cluster: "leader", + leader_index_patterns: ["metricbeat-*", "packetbeat-*"], + follow_index_pattern: "{{leader_index}}-copy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc b/docs/doc_examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc new file mode 100644 index 000000000..dc348caaf --- /dev/null +++ b/docs/doc_examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc b/docs/doc_examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc new file mode 100644 index 000000000..496a684a3 --- /dev/null +++ b/docs/doc_examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + t_shirts: { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc b/docs/doc_examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc new file mode 100644 index 000000000..a892432a6 --- /dev/null +++ b/docs/doc_examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.deleteNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc b/docs/doc_examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc new file mode 100644 index 000000000..b7f2cb320 --- /dev/null +++ b/docs/doc_examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + sparse_vector: { + field: "ml.tokens", + query_vector: { + token1: 0.5, + token2: 0.3, + token3: 0.2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc new file mode 100644 index 000000000..3dff97a73 --- /dev/null +++ b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["example-index"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + params: { + query: "", + _es_filters: {}, + _es_aggs: {}, + _es_sort_fields: {}, + size: 10, + from: 0, + }, + dictionary: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc b/docs/doc_examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc new file mode 100644 index 000000000..fa4b12cc5 --- /dev/null +++ b/docs/doc_examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + range: { + age: { + gte: 10, + lte: 20, + boost: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc b/docs/doc_examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc new file mode 100644 index 000000000..ae22a8b4d --- /dev/null +++ b/docs/doc_examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + status: "pending", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc b/docs/doc_examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc new file mode 100644 index 000000000..9250d0925 --- /dev/null +++ b/docs/doc_examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putTemplate({ + name: "template_1", + index_patterns: ["te*"], + order: 0, + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: false, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putTemplate({ + name: "template_2", + index_patterns: ["tes*"], + order: 1, + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: true, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc b/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc index 9d44450c3..51a32d243 100644 --- a/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc +++ b/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc @@ -4,30 +4,27 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - should: [ - { - match: { - title: 'quick brown fox' - } + query: { + bool: { + should: [ + { + match: { + title: "quick brown fox", }, - { - match: { - 'title.original': 'quick brown fox' - } + }, + { + match: { + "title.original": "quick brown fox", }, - { - match: { - 'title.shingles': 'quick brown fox' - } - } - ] - } - } - } -}) -console.log(response) + }, + { + match: { + "title.shingles": "quick brown fox", + }, + }, + ], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/6be70810d6ebd6f09d8a49f9df847765.asciidoc b/docs/doc_examples/6be70810d6ebd6f09d8a49f9df847765.asciidoc deleted file mode 100644 index 66307ee3c..000000000 --- a/docs/doc_examples/6be70810d6ebd6f09d8a49f9df847765.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - body: { - query: { - nested: { - path: 'obj1', - query: { - bool: { - must: [ - { - match: { - 'obj1.name': 'blue' - } - }, - { - range: { - 'obj1.count': { - gt: 5 - } - } - } - ] - } - }, - score_mode: 'avg' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc b/docs/doc_examples/6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc deleted file mode 100644 index a252f320f..000000000 --- a/docs/doc_examples/6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - user_identifier: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc b/docs/doc_examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc new file mode 100644 index 000000000..0c0ea6b3e --- /dev/null +++ b/docs/doc_examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "place", + mappings: { + properties: { + suggest: { + type: "completion", + contexts: [ + { + name: "place_type", + type: "category", + }, + { + name: "location", + type: "geo", + precision: 4, + }, + ], + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "place_path_category", + mappings: { + properties: { + suggest: { + type: "completion", + contexts: [ + { + name: "place_type", + type: "category", + path: "cat", + }, + { + name: "location", + type: "geo", + precision: 4, + path: "loc", + }, + ], + }, + loc: { + type: "geo_point", + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc b/docs/doc_examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc new file mode 100644 index 000000000..e80bc72a3 --- /dev/null +++ b/docs/doc_examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script_score: { + script: { + source: "Math.log(2 + doc['my-int'].value)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc b/docs/doc_examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc new file mode 100644 index 000000000..73adb18c2 --- /dev/null +++ b/docs/doc_examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: [ + [-70, 40], + [-80, 30], + [-90, 20], + ], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc b/docs/doc_examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc new file mode 100644 index 000000000..38e73da9a --- /dev/null +++ b/docs/doc_examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "drivers", + mappings: { + properties: { + driver: { + type: "nested", + properties: { + last_name: { + type: "text", + }, + vehicle: { + type: "nested", + properties: { + make: { + type: "text", + }, + model: { + type: "text", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc b/docs/doc_examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc new file mode 100644 index 000000000..f9d6b2ee4 --- /dev/null +++ b/docs/doc_examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c72460570307f23478100db04a84c8e.asciidoc b/docs/doc_examples/6c72460570307f23478100db04a84c8e.asciidoc new file mode 100644 index 000000000..b7c25f62e --- /dev/null +++ b/docs/doc_examples/6c72460570307f23478100db04a84c8e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getComponentTemplate({ + name: "temp*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc b/docs/doc_examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc new file mode 100644 index 000000000..93e7af9bf --- /dev/null +++ b/docs/doc_examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + fields: [ + "user.id", + "http.response.*", + { + field: "@timestamp", + format: "epoch_millis", + }, + ], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc b/docs/doc_examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc new file mode 100644 index 000000000..624074238 --- /dev/null +++ b/docs/doc_examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.reloadSecureSettings({ + secure_settings_password: "keystore-password", +}); +console.log(response); + +const response1 = await client.nodes.reloadSecureSettings({ + node_id: "nodeId1,nodeId2", + secure_settings_password: "keystore-password", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc b/docs/doc_examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc new file mode 100644 index 000000000..49ffbd241 --- /dev/null +++ b/docs/doc_examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + username: "myuser", + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6cd083045bf06e80b83889a939a18451.asciidoc b/docs/doc_examples/6cd083045bf06e80b83889a939a18451.asciidoc new file mode 100644 index 000000000..9fb10298a --- /dev/null +++ b/docs/doc_examples/6cd083045bf06e80b83889a939a18451.asciidoc @@ -0,0 +1,95 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + group: "fans", + user: [ + { + first: "John", + last: "Smith", + }, + { + first: "Alice", + last: "White", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + nested: { + path: "user", + query: { + bool: { + must: [ + { + match: { + "user.first": "Alice", + }, + }, + { + match: { + "user.last": "Smith", + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + nested: { + path: "user", + query: { + bool: { + must: [ + { + match: { + "user.first": "Alice", + }, + }, + { + match: { + "user.last": "White", + }, + }, + ], + }, + }, + inner_hits: { + highlight: { + fields: { + "user.first": {}, + }, + }, + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/6ce8334def48552ba7d44025580d9105.asciidoc b/docs/doc_examples/6ce8334def48552ba7d44025580d9105.asciidoc new file mode 100644 index 000000000..2333655e3 --- /dev/null +++ b/docs/doc_examples/6ce8334def48552ba7d44025580d9105.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "", + aliases: { + "my-alias": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6cf3307c00f464c46475e352e067d714.asciidoc b/docs/doc_examples/6cf3307c00f464c46475e352e067d714.asciidoc new file mode 100644 index 000000000..489e7448a --- /dev/null +++ b/docs/doc_examples/6cf3307c00f464c46475e352e067d714.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: { + lat: 40.73, + lon: -74.1, + }, + bottom_right: { + lat: 40.01, + lon: -71.12, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc b/docs/doc_examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc new file mode 100644 index 000000000..3dd4b3ef8 --- /dev/null +++ b/docs/doc_examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.executeRetention(); +console.log(response); +---- diff --git a/docs/doc_examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc b/docs/doc_examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc new file mode 100644 index 000000000..2b2033138 --- /dev/null +++ b/docs/doc_examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my-weather-sensor-lifecycle-policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "1d", + max_primary_shard_size: "50gb", + }, + }, + }, + warm: { + min_age: "30d", + actions: { + shrink: { + number_of_shards: 1, + }, + forcemerge: { + max_num_segments: 1, + }, + }, + }, + cold: { + min_age: "60d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + frozen: { + min_age: "90d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + delete: { + min_age: "735d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6db118771354792646229e7a3c30c7e9.asciidoc b/docs/doc_examples/6db118771354792646229e7a3c30c7e9.asciidoc new file mode 100644 index 000000000..9623daabc --- /dev/null +++ b/docs/doc_examples/6db118771354792646229e7a3c30c7e9.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: 1516729294000, + temperature: 200, + voltage: 5.2, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516642894000, + temperature: 201, + voltage: 5.8, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516556494000, + temperature: 202, + voltage: 5.1, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516470094000, + temperature: 198, + voltage: 5.6, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516383694000, + temperature: 200, + voltage: 4.2, + node: "c", + }, + { + index: {}, + }, + { + timestamp: 1516297294000, + temperature: 202, + voltage: 4, + node: "c", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dbfe5565a95508e65d304131847f9fc.asciidoc b/docs/doc_examples/6dbfe5565a95508e65d304131847f9fc.asciidoc new file mode 100644 index 000000000..f308c6785 --- /dev/null +++ b/docs/doc_examples/6dbfe5565a95508e65d304131847f9fc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "edge_ngram", + min_gram: 1, + max_gram: 2, + }, + ], + text: "the quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc b/docs/doc_examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc new file mode 100644 index 000000000..fdbb82230 --- /dev/null +++ b/docs/doc_examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > 300 AND author == "Frank Herbert"\n | STATS count = COUNT(*) by year\n | WHERE count > 0\n | LIMIT 5\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc b/docs/doc_examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc new file mode 100644 index 000000000..76f1aa692 --- /dev/null +++ b/docs/doc_examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + username: "myuser", + realm_name: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc b/docs/doc_examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc new file mode 100644 index 000000000..018089d85 --- /dev/null +++ b/docs/doc_examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.delete({ + repository: "my_repository", + snapshot: "snapshot_2,snapshot_3", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc b/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc new file mode 100644 index 000000000..cb9376459 --- /dev/null +++ b/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/completion/azure_ai_studio_completion", + body: { + service: "azureaistudio", + service_settings: { + api_key: "", + target: "", + provider: "", + endpoint_type: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e000496a1fa8b57148518eaad692f35.asciidoc b/docs/doc_examples/6e000496a1fa8b57148518eaad692f35.asciidoc new file mode 100644 index 000000000..c909cdf1a --- /dev/null +++ b/docs/doc_examples/6e000496a1fa8b57148518eaad692f35.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_none: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e0b675eff7ed73c09a76a415930a486.asciidoc b/docs/doc_examples/6e0b675eff7ed73c09a76a415930a486.asciidoc new file mode 100644 index 000000000..9ddf0e410 --- /dev/null +++ b/docs/doc_examples/6e0b675eff7ed73c09a76a415930a486.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-join-field": { + type: "join", + relations: { + "my-parent": "my-child", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc b/docs/doc_examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc new file mode 100644 index 000000000..97695fc48 --- /dev/null +++ b/docs/doc_examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "letter_unique_pos_example", + settings: { + analysis: { + analyzer: { + letter_unique_pos: { + tokenizer: "letter", + filter: ["unique_pos"], + }, + }, + filter: { + unique_pos: { + type: "unique", + only_on_same_position: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc b/docs/doc_examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc new file mode 100644 index 000000000..2b2e6635e --- /dev/null +++ b/docs/doc_examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "GEOMETRYCOLLECTION (POINT (1000.0 100.0), LINESTRING (1001.0 100.0, 1002.0 100.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc b/docs/doc_examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc new file mode 100644 index 000000000..13215e509 --- /dev/null +++ b/docs/doc_examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + document: { + message: "A new bonsai tree in the office", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6ea062455229151e311869a81ee40252.asciidoc b/docs/doc_examples/6ea062455229151e311869a81ee40252.asciidoc new file mode 100644 index 000000000..b8f766fc4 --- /dev/null +++ b/docs/doc_examples/6ea062455229151e311869a81ee40252.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "_all", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "*", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc b/docs/doc_examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc new file mode 100644 index 000000000..ffbcb2402 --- /dev/null +++ b/docs/doc_examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + "lowercase", + { + type: "synonym_graph", + synonyms: ["pc => personal computer", "computer, pc, laptop"], + }, + ], + text: "Check how PC synonyms work", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc b/docs/doc_examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc new file mode 100644 index 000000000..8c778d24d --- /dev/null +++ b/docs/doc_examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + total_sales: { + sum: { + field: "price", + }, + }, + "t-shirts": { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + "t-shirt-percentage": { + bucket_script: { + buckets_path: { + tShirtSales: "t-shirts>sales", + totalSales: "total_sales", + }, + script: "params.tShirtSales / params.totalSales * 100", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc b/docs/doc_examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc new file mode 100644 index 000000000..7ca6b0211 --- /dev/null +++ b/docs/doc_examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enableUser({ + username: "logstash_system", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f07152055e99416deb10e95b428b847.asciidoc b/docs/doc_examples/6f07152055e99416deb10e95b428b847.asciidoc new file mode 100644 index 000000000..5257c944a --- /dev/null +++ b/docs/doc_examples/6f07152055e99416deb10e95b428b847.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "edge_ngram_custom_example", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["3_5_edgegrams"], + }, + }, + filter: { + "3_5_edgegrams": { + type: "edge_ngram", + min_gram: 3, + max_gram: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f21a878fee3b43c5332b81aaddbeac7.asciidoc b/docs/doc_examples/6f21a878fee3b43c5332b81aaddbeac7.asciidoc deleted file mode 100644 index 08d4db379..000000000 --- a/docs/doc_examples/6f21a878fee3b43c5332b81aaddbeac7.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title', - 'content' - ], - query: 'this OR that OR thus', - type: 'cross_fields', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6f34e27481460a95e59ffbacb76bd637.asciidoc b/docs/doc_examples/6f34e27481460a95e59ffbacb76bd637.asciidoc new file mode 100644 index 000000000..719ffc1b4 --- /dev/null +++ b/docs/doc_examples/6f34e27481460a95e59ffbacb76bd637.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + char_filter: ["emoticons"], + tokenizer: "punctuation", + filter: ["lowercase", "english_stop"], + }, + }, + tokenizer: { + punctuation: { + type: "pattern", + pattern: "[ .,!?]", + }, + }, + char_filter: { + emoticons: { + type: "mapping", + mappings: [":) => _happy_", ":( => _sad_"], + }, + }, + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_custom_analyzer", + text: "I'm a :) person, and you?", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc new file mode 100644 index 000000000..a9e30c9b3 --- /dev/null +++ b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkUpdateApiKeys({}); +console.log(response); +---- diff --git a/docs/doc_examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc b/docs/doc_examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc new file mode 100644 index 000000000..64d437f6f --- /dev/null +++ b/docs/doc_examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.translate({ + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc b/docs/doc_examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc new file mode 100644 index 000000000..f105ce8e3 --- /dev/null +++ b/docs/doc_examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-image-index", + size: 3, + query: { + bool: { + should: [ + { + match: { + title: { + query: "mountain lake", + boost: 1, + }, + }, + }, + { + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + boost: 2, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc b/docs/doc_examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc new file mode 100644 index 000000000..3287480a9 --- /dev/null +++ b/docs/doc_examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "logs", + document: { + timestamp: "2015-05-17T18:12:07.613Z", + request: "GET index.html", + status_code: 404, + message: "Error: File not found", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f6d5a4a90e1265822628d4ced963639.asciidoc b/docs/doc_examples/6f6d5a4a90e1265822628d4ced963639.asciidoc new file mode 100644 index 000000000..674318811 --- /dev/null +++ b/docs/doc_examples/6f6d5a4a90e1265822628d4ced963639.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "2015/09/02", + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6f842819c50e8490080dd085e0c6aca3.asciidoc b/docs/doc_examples/6f842819c50e8490080dd085e0c6aca3.asciidoc new file mode 100644 index 000000000..c9537fe0f --- /dev/null +++ b/docs/doc_examples/6f842819c50e8490080dd085e0c6aca3.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + size: 0, + aggs: { + foo_terms: { + terms: { + field: "foo", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc b/docs/doc_examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc new file mode 100644 index 000000000..bbf47db33 --- /dev/null +++ b/docs/doc_examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.logstash.getPipeline({ + id: "my_pipeline", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc b/docs/doc_examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc new file mode 100644 index 000000000..61c814366 --- /dev/null +++ b/docs/doc_examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + stored_fields: ["user", "postDate"], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fa570ae7039171e2ab722344ec1063f.asciidoc b/docs/doc_examples/6fa570ae7039171e2ab722344ec1063f.asciidoc new file mode 100644 index 000000000..0e259c87b --- /dev/null +++ b/docs/doc_examples/6fa570ae7039171e2ab722344ec1063f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "my-index-000001", + fields: "user", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6faf10a73f7d5fffbcb037bdb2cbaff8.asciidoc b/docs/doc_examples/6faf10a73f7d5fffbcb037bdb2cbaff8.asciidoc deleted file mode 100644 index a39044e28..000000000 --- a/docs/doc_examples/6faf10a73f7d5fffbcb037bdb2cbaff8.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - dayOfWeek: { - terms: { - script: { - lang: 'painless', - source: "doc['date'].value.dayOfWeekEnum.value" - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6fbb88f399618e1b47412082062ce2bd.asciidoc b/docs/doc_examples/6fbb88f399618e1b47412082062ce2bd.asciidoc new file mode 100644 index 000000000..ca9fca0c6 --- /dev/null +++ b/docs/doc_examples/6fbb88f399618e1b47412082062ce2bd.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_logs", + }, + pivot: { + group_by: { + timestamp: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + }, + }, + }, + aggregations: { + "bytes.max": { + max: { + field: "bytes", + }, + }, + top: { + top_metrics: { + metrics: [ + { + field: "clientip", + }, + { + field: "geo.src", + }, + ], + sort: { + bytes: "desc", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc b/docs/doc_examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc new file mode 100644 index 000000000..a8101d857 --- /dev/null +++ b/docs/doc_examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Polygon", + coordinates: [ + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + [100, 0], + ], + [ + [100.2, 0.2], + [100.8, 0.2], + [100.8, 0.8], + [100.2, 0.8], + [100.2, 0.2], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc b/docs/doc_examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc new file mode 100644 index 000000000..f55316e25 --- /dev/null +++ b/docs/doc_examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.clearCache({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc b/docs/doc_examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc new file mode 100644 index 000000000..42f8f8f12 --- /dev/null +++ b/docs/doc_examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "MultiLineString", + coordinates: [ + [ + [102, 2], + [103, 2], + [103, 3], + [102, 3], + ], + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + ], + [ + [100.2, 0.2], + [100.8, 0.2], + [100.8, 0.8], + [100.2, 0.8], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc b/docs/doc_examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc new file mode 100644 index 000000000..324d41871 --- /dev/null +++ b/docs/doc_examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataLifecycle({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6febf0e6883b23b15ac213abc4bac326.asciidoc b/docs/doc_examples/6febf0e6883b23b15ac213abc4bac326.asciidoc new file mode 100644 index 000000000..b44743e74 --- /dev/null +++ b/docs/doc_examples/6febf0e6883b23b15ac213abc4bac326.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + location: { + lat: 43.662, + lon: -79.38, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc b/docs/doc_examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc new file mode 100644 index 000000000..ccbea663c --- /dev/null +++ b/docs/doc_examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "text.tokens": { + type: "sparse_vector", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc b/docs/doc_examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc new file mode 100644 index 000000000..64ab745d0 --- /dev/null +++ b/docs/doc_examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7021ddb273a3a00847324d2f670c4c04.asciidoc b/docs/doc_examples/7021ddb273a3a00847324d2f670c4c04.asciidoc new file mode 100644 index 000000000..c1cc5cbae --- /dev/null +++ b/docs/doc_examples/7021ddb273a3a00847324d2f670c4c04.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + query: { + match: { + title: { + query: "mountain lake", + boost: 0.9, + }, + }, + }, + knn: [ + { + field: "image-vector", + query_vector: [54, 10, -2], + k: 5, + num_candidates: 50, + boost: 0.1, + }, + { + field: "title-vector", + query_vector: [1, 20, -52, 23, 10], + k: 10, + num_candidates: 10, + boost: 0.5, + }, + ], + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7067a498bb6c788854a26443a64b843a.asciidoc b/docs/doc_examples/7067a498bb6c788854a26443a64b843a.asciidoc new file mode 100644 index 000000000..40a3c3ca0 --- /dev/null +++ b/docs/doc_examples/7067a498bb6c788854a26443a64b843a.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + runtime_mappings: { + "amount.signed": { + type: "double", + script: + "\n double amount = doc['amount'].value;\n if (doc['type'].value == 'expense') {\n amount *= -1;\n }\n emit(amount);\n ", + }, + }, + query: { + bool: { + filter: { + range: { + "amount.signed": { + lt: 10, + }, + }, + }, + }, + }, + fields: [ + { + field: "amount.signed", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/708e7ec681be41791f232817a07cda82.asciidoc b/docs/doc_examples/708e7ec681be41791f232817a07cda82.asciidoc new file mode 100644 index 000000000..3533d112b --- /dev/null +++ b/docs/doc_examples/708e7ec681be41791f232817a07cda82.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*", + size: 2, + sort: "name", + offset: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc b/docs/doc_examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc new file mode 100644 index 000000000..cac8da9a9 --- /dev/null +++ b/docs/doc_examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "users", + roles: ["user"], + rules: { + field: { + dn: "cn=John Doe,ou=example,o=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/70c736ecb3746dbe839af0e468712805.asciidoc b/docs/doc_examples/70c736ecb3746dbe839af0e468712805.asciidoc new file mode 100644 index 000000000..7691586b6 --- /dev/null +++ b/docs/doc_examples/70c736ecb3746dbe839af0e468712805.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.deleteTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc b/docs/doc_examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc new file mode 100644 index 000000000..ee695268c --- /dev/null +++ b/docs/doc_examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateTrainedModelDeployment({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + number_of_allocations: 4, +}); +console.log(response); +---- diff --git a/docs/doc_examples/70f0aa5853697e265ef3b1df72940951.asciidoc b/docs/doc_examples/70f0aa5853697e265ef3b1df72940951.asciidoc deleted file mode 100644 index 626a72206..000000000 --- a/docs/doc_examples/70f0aa5853697e265ef3b1df72940951.asciidoc +++ /dev/null @@ -1,42 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'my_index', - id: '1', - refresh: true, - body: { - date: '2015-10-01T00:30:00Z' - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '2', - refresh: true, - body: { - date: '2015-10-01T01:30:00Z' - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'my_index', - size: '0', - body: { - aggs: { - by_day: { - date_histogram: { - field: 'date', - calendar_interval: 'day' - } - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc b/docs/doc_examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc new file mode 100644 index 000000000..efaf4c5c5 --- /dev/null +++ b/docs/doc_examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + description: "My optional pipeline description", + processors: [ + { + set: { + description: "My optional processor description", + field: "my-long-field", + value: 10, + }, + }, + { + set: { + description: "Set 'my-boolean-field' to true", + field: "my-boolean-field", + value: true, + }, + }, + { + lowercase: { + field: "my-keyword-field", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc b/docs/doc_examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc new file mode 100644 index 000000000..5802bc29b --- /dev/null +++ b/docs/doc_examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "ecommerce_transform2", + source: { + index: "kibana_sample_data_ecommerce", + }, + latest: { + unique_key: ["customer_id"], + sort: "order_date", + }, + description: "Latest order for each customer", + dest: { + index: "kibana_sample_data_ecommerce_transform2", + }, + frequency: "5m", + sync: { + time: { + field: "order_date", + delay: "60s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/710c7871f20f176d51209b1574b0d61b.asciidoc b/docs/doc_examples/710c7871f20f176d51209b1574b0d61b.asciidoc deleted file mode 100644 index 677e6bdfd..000000000 --- a/docs/doc_examples/710c7871f20f176d51209b1574b0d61b.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '1', - stored_fields: 'tags,counter' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/711443504b69d0d296e717c716a223e2.asciidoc b/docs/doc_examples/711443504b69d0d296e717c716a223e2.asciidoc new file mode 100644 index 000000000..773424205 --- /dev/null +++ b/docs/doc_examples/711443504b69d0d296e717c716a223e2.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "drivers", + query: { + nested: { + path: "driver", + query: { + nested: { + path: "driver.vehicle", + query: { + bool: { + must: [ + { + match: { + "driver.vehicle.make": "Powell Motors", + }, + }, + { + match: { + "driver.vehicle.model": "Canyonero", + }, + }, + ], + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7148c8512079d378af70302e65502dd2.asciidoc b/docs/doc_examples/7148c8512079d378af70302e65502dd2.asciidoc new file mode 100644 index 000000000..e2d513793 --- /dev/null +++ b/docs/doc_examples/7148c8512079d378af70302e65502dd2.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "timeseries-000001", + aliases: { + timeseries: { + is_write_index: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/719141517d83b7e8e929b347a8d67c9f.asciidoc b/docs/doc_examples/719141517d83b7e8e929b347a8d67c9f.asciidoc new file mode 100644 index 000000000..358afa030 --- /dev/null +++ b/docs/doc_examples/719141517d83b7e8e929b347a8d67c9f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + features: "settings", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/71b5b2ba9557d0f296ff2de91727d2f6.asciidoc b/docs/doc_examples/71b5b2ba9557d0f296ff2de91727d2f6.asciidoc deleted file mode 100644 index 8018c6c2c..000000000 --- a/docs/doc_examples/71b5b2ba9557d0f296ff2de91727d2f6.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - max_play_count: 'desc' - } - }, - aggs: { - max_play_count: { - max: { - field: 'play_count' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc b/docs/doc_examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc new file mode 100644 index 000000000..ccc77f823 --- /dev/null +++ b/docs/doc_examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + node_id: "node1,node*,master:false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc b/docs/doc_examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc new file mode 100644 index 000000000..87fdcbfec --- /dev/null +++ b/docs/doc_examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + keyed: true, + ranges: [ + { + key: "cheap", + to: 100, + }, + { + key: "average", + from: 100, + to: 200, + }, + { + key: "expensive", + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/71e47a83f632ef159956287bbfe4ca12.asciidoc b/docs/doc_examples/71e47a83f632ef159956287bbfe4ca12.asciidoc new file mode 100644 index 000000000..c76b1d751 --- /dev/null +++ b/docs/doc_examples/71e47a83f632ef159956287bbfe4ca12.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example", + query: { + shape: { + geometry: { + shape: { + type: "envelope", + coordinates: [ + [1355, 5355], + [1400, 5200], + ], + }, + relation: "within", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc b/docs/doc_examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc new file mode 100644 index 000000000..559eb3d85 --- /dev/null +++ b/docs/doc_examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + analyzer: "whitespace", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc b/docs/doc_examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc new file mode 100644 index 000000000..f2001fb6e --- /dev/null +++ b/docs/doc_examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + size: 0, + query: { + range: { + time_frame: { + gte: "2019-11-01", + format: "yyyy-MM-dd", + }, + }, + }, + aggs: { + november_data: { + date_histogram: { + field: "time_frame", + calendar_interval: "day", + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/72231b7debac60c95b9869a97dafda3a.asciidoc b/docs/doc_examples/72231b7debac60c95b9869a97dafda3a.asciidoc deleted file mode 100644 index 346397da6..000000000 --- a/docs/doc_examples/72231b7debac60c95b9869a97dafda3a.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match_phrase: { - message: { - query: 'this is a test', - analyzer: 'my_analyzer' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/726994d8f3793b86628255a797155a52.asciidoc b/docs/doc_examples/726994d8f3793b86628255a797155a52.asciidoc new file mode 100644 index 000000000..c85f3c379 --- /dev/null +++ b/docs/doc_examples/726994d8f3793b86628255a797155a52.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "ingest", + filter_path: "nodes.*.ingest.processors", +}); +console.log(response); +---- diff --git a/docs/doc_examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc b/docs/doc_examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc new file mode 100644 index 000000000..3fe8f1d65 --- /dev/null +++ b/docs/doc_examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["35.238.149.1:9300"], + skip_unavailable: true, + }, + cluster_two: { + seeds: ["35.238.149.2:9300"], + skip_unavailable: false, + }, + cluster_three: { + seeds: ["35.238.149.3:9300"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc b/docs/doc_examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc new file mode 100644 index 000000000..186d8540d --- /dev/null +++ b/docs/doc_examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.start(); +console.log(response); +---- diff --git a/docs/doc_examples/72b999120785dfba2827268482e9be0a.asciidoc b/docs/doc_examples/72b999120785dfba2827268482e9be0a.asciidoc new file mode 100644 index 000000000..042a2e3b2 --- /dev/null +++ b/docs/doc_examples/72b999120785dfba2827268482e9be0a.asciidoc @@ -0,0 +1,98 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + query: { + match: { + name: "musée", + }, + }, + aggs: { + viewport: { + geo_bounds: { + field: "location", + wrap_longitude: true, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc b/docs/doc_examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc new file mode 100644 index 000000000..d46bcd8d4 --- /dev/null +++ b/docs/doc_examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "*", + sort: "name", + from_sort_value: "snapshot_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/72beebe779a258c225dee7b023e60c52.asciidoc b/docs/doc_examples/72beebe779a258c225dee7b023e60c52.asciidoc index 15da6144c..261b4ebbb 100644 --- a/docs/doc_examples/72beebe779a258c225dee7b023e60c52.asciidoc +++ b/docs/doc_examples/72beebe779a258c225dee7b023e60c52.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.nodes.stats({ - metric: 'indices', - index_metric: 'search' -}) -console.log(response) + metric: "indices", + index_metric: "search", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/98f14fddddea54a7d6149ab7b92e099d.asciidoc b/docs/doc_examples/72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc similarity index 80% rename from docs/doc_examples/98f14fddddea54a7d6149ab7b92e099d.asciidoc rename to docs/doc_examples/72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc index 97b01621e..cb34ca584 100644 --- a/docs/doc_examples/98f14fddddea54a7d6149ab7b92e099d.asciidoc +++ b/docs/doc_examples/72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.delete({ - index: 'twitter' -}) -console.log(response) + index: "my-index", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/73250f845738c428246a3ade66a8f54c.asciidoc b/docs/doc_examples/73250f845738c428246a3ade66a8f54c.asciidoc new file mode 100644 index 000000000..e49b85234 --- /dev/null +++ b/docs/doc_examples/73250f845738c428246a3ade66a8f54c.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "exams", + refresh: "true", + document: { + grade: 100, + weight: [2, 3], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "exams", + refresh: "true", + document: { + grade: 80, + weight: 3, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "exams", + filter_path: "aggregations", + size: 0, + runtime_mappings: { + "weight.combined": { + type: "double", + script: + "\n double s = 0;\n for (double w : doc['weight']) {\n s += w;\n }\n emit(s);\n ", + }, + }, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + script: "doc.grade.value + 1", + }, + weight: { + field: "weight.combined", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc b/docs/doc_examples/734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc deleted file mode 100644 index af1817642..000000000 --- a/docs/doc_examples/734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'new_users', - body: { - mappings: { - properties: { - user_id: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/734e2b1d1ca84a305240a449738f0eba.asciidoc b/docs/doc_examples/734e2b1d1ca84a305240a449738f0eba.asciidoc new file mode 100644 index 000000000..0d616a95b --- /dev/null +++ b/docs/doc_examples/734e2b1d1ca84a305240a449738f0eba.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + v: "true", + index: + ".ds-my-data-stream-2022.06.17-000001,kibana_sample_data_flightsh=index,status,health", +}); +console.log(response); +---- diff --git a/docs/doc_examples/73646c12ad33a813ab2280f1dc83500e.asciidoc b/docs/doc_examples/73646c12ad33a813ab2280f1dc83500e.asciidoc new file mode 100644 index 000000000..e5c2d87bb --- /dev/null +++ b/docs/doc_examples/73646c12ad33a813ab2280f1dc83500e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "", + wait_for_active_shards: 1, + remote_cluster: "", + leader_index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/738db420e3ad2a127ea75fb8e5051926.asciidoc b/docs/doc_examples/738db420e3ad2a127ea75fb8e5051926.asciidoc new file mode 100644 index 000000000..a3947f0ec --- /dev/null +++ b/docs/doc_examples/738db420e3ad2a127ea75fb8e5051926.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "last-log-from-clientip", +}); +console.log(response); +---- diff --git a/docs/doc_examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc b/docs/doc_examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc new file mode 100644 index 000000000..7f0ee2b39 --- /dev/null +++ b/docs/doc_examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.getBehavioralAnalytics(); +console.log(response); +---- diff --git a/docs/doc_examples/73be1f93d789264e5b972ddb5991bc66.asciidoc b/docs/doc_examples/73be1f93d789264e5b972ddb5991bc66.asciidoc new file mode 100644 index 000000000..d5aaaf737 --- /dev/null +++ b/docs/doc_examples/73be1f93d789264e5b972ddb5991bc66.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.discovery": "DEBUG", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc b/docs/doc_examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc new file mode 100644 index 000000000..f8d2781a1 --- /dev/null +++ b/docs/doc_examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupIndexCaps({ + index: "sensor_rollup", +}); +console.log(response); +---- diff --git a/docs/doc_examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc b/docs/doc_examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc new file mode 100644 index 000000000..7b799e7e0 --- /dev/null +++ b/docs/doc_examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.ewma(values, 0.3)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73e5c88ad1488b213fb278ee1cb42289.asciidoc b/docs/doc_examples/73e5c88ad1488b213fb278ee1cb42289.asciidoc deleted file mode 100644 index 6e9524527..000000000 --- a/docs/doc_examples/73e5c88ad1488b213fb278ee1cb42289.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - sales_over_time: { - date_histogram: { - field: 'date', - calendar_interval: '2d' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc b/docs/doc_examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc new file mode 100644 index 000000000..b7156b497 --- /dev/null +++ b/docs/doc_examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc b/docs/doc_examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc new file mode 100644 index 000000000..387c13543 --- /dev/null +++ b/docs/doc_examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre", + }, + { + field: "product", + }, + ], + order: { + total_quantity: "desc", + }, + }, + aggs: { + total_quantity: { + sum: { + field: "quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc b/docs/doc_examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc new file mode 100644 index 000000000..05336a24a --- /dev/null +++ b/docs/doc_examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getServiceAccounts({ + namespace: "elastic", + service: "fleet-server", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7404c6e809fee5d7eb9678a82a872806.asciidoc b/docs/doc_examples/7404c6e809fee5d7eb9678a82a872806.asciidoc new file mode 100644 index 000000000..a9a9de6b8 --- /dev/null +++ b/docs/doc_examples/7404c6e809fee5d7eb9678a82a872806.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + aggs: { + "my-sub-agg-name": { + avg: { + field: "my-other-field", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/741180473ba526219578ad0422f4fe81.asciidoc b/docs/doc_examples/741180473ba526219578ad0422f4fe81.asciidoc new file mode 100644 index 000000000..8a5484224 --- /dev/null +++ b/docs/doc_examples/741180473ba526219578ad0422f4fe81.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_features", + body: { + features: { + document_level_security: { + enabled: true, + }, + incremental_sync: { + enabled: true, + }, + sync_rules: { + advanced: { + enabled: false, + }, + basic: { + enabled: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc new file mode 100644 index 000000000..fe849a80d --- /dev/null +++ b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_inference/text_embedding/my-cohere-endpoint", + body: { + input: + "The sky above the port was the color of television tuned to a dead channel.", + task_settings: { + input_type: "ingest", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc b/docs/doc_examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc new file mode 100644 index 000000000..c3d68dfcc --- /dev/null +++ b/docs/doc_examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mv", + mappings: { + properties: { + b: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: [1, 1], + }, + ], +}); +console.log(response1); + +const response2 = await client.esql.query({ + query: "FROM mv | LIMIT 2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc b/docs/doc_examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc new file mode 100644 index 000000000..c7336a48f --- /dev/null +++ b/docs/doc_examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_two: { + mode: null, + seeds: null, + skip_unavailable: null, + "transport.compress": null, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc b/docs/doc_examples/745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc deleted file mode 100644 index 35ee07f1c..000000000 --- a/docs/doc_examples/745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '0', - _source: '*.id,retweeted' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc b/docs/doc_examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc new file mode 100644 index 000000000..2c9c1ee30 --- /dev/null +++ b/docs/doc_examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + format: "MM-yyy", + ranges: [ + { + from: "01-2015", + to: "03-2015", + key: "quarter_01", + }, + { + from: "03-2015", + to: "06-2015", + key: "quarter_02", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/746e0a1cb5984f2672963b363505c7b3.asciidoc b/docs/doc_examples/746e0a1cb5984f2672963b363505c7b3.asciidoc new file mode 100644 index 000000000..4a55b9ef8 --- /dev/null +++ b/docs/doc_examples/746e0a1cb5984f2672963b363505c7b3.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + format: "strict_date_optional_time||epoch_second", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "example", + refresh: "true", + document: { + date: 1618321898, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: [ + { + field: "date", + }, + ], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc b/docs/doc_examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc new file mode 100644 index 000000000..9dce10243 --- /dev/null +++ b/docs/doc_examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + stats: { + field: "grade", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc b/docs/doc_examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc new file mode 100644 index 000000000..f48892684 --- /dev/null +++ b/docs/doc_examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + match: { + query: "hot porridge", + max_gaps: 10, + filter: { + not_containing: { + match: { + query: "salty", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7477671958734843dd67cf0b8e6c7515.asciidoc b/docs/doc_examples/7477671958734843dd67cf0b8e6c7515.asciidoc deleted file mode 100644 index 42a9afd21..000000000 --- a/docs/doc_examples/7477671958734843dd67cf0b8e6c7515.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_long', - body: { - mappings: { - properties: { - field: { - type: 'date_nanos' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/747a4b5001423938d7d05399d28f1995.asciidoc b/docs/doc_examples/747a4b5001423938d7d05399d28f1995.asciidoc new file mode 100644 index 000000000..9b0378abb --- /dev/null +++ b/docs/doc_examples/747a4b5001423938d7d05399d28f1995.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": "1m", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/98234499cfec70487cec5d013e976a84.asciidoc b/docs/doc_examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc similarity index 74% rename from docs/doc_examples/98234499cfec70487cec5d013e976a84.asciidoc rename to docs/doc_examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc index 669ec3eb3..335953144 100644 --- a/docs/doc_examples/98234499cfec70487cec5d013e976a84.asciidoc +++ b/docs/doc_examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.exists({ - index: 'twitter', - id: '0' -}) -console.log(response) + index: "my-index-000001", + id: 0, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc b/docs/doc_examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc new file mode 100644 index 000000000..ef9d24928 --- /dev/null +++ b/docs/doc_examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + _meta: { + class: "MyApp2::User3", + version: { + min: "1.3", + max: "1.5", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc b/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc new file mode 100644 index 000000000..b99aa857f --- /dev/null +++ b/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test-index", + query: { + nested: { + path: "inference_field.inference.chunks", + query: { + knn: { + field: "inference_field.inference.chunks.embeddings", + query_vector_builder: { + text_embedding: { + model_id: "my_inference_id", + model_text: "mountain lake", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/74da377bccad43da2b0e276c086d26ba.asciidoc b/docs/doc_examples/74da377bccad43da2b0e276c086d26ba.asciidoc new file mode 100644 index 000000000..6e3a95345 --- /dev/null +++ b/docs/doc_examples/74da377bccad43da2b0e276c086d26ba.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.info({ + target: "_all", +}); +console.log(response); + +const response1 = await client.cluster.info({ + target: "http", +}); +console.log(response1); + +const response2 = await client.cluster.info({ + target: "ingest", +}); +console.log(response2); + +const response3 = await client.cluster.info({ + target: "thread_pool", +}); +console.log(response3); + +const response4 = await client.cluster.info({ + target: "script", +}); +console.log(response4); + +const response5 = await client.cluster.info({ + target: "http,ingest", +}); +console.log(response5); +---- diff --git a/docs/doc_examples/75330ec1305d2beb0e2f34d2195464e2.asciidoc b/docs/doc_examples/75330ec1305d2beb0e2f34d2195464e2.asciidoc deleted file mode 100644 index 3db659111..000000000 --- a/docs/doc_examples/75330ec1305d2beb0e2f34d2195464e2.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match_all: { - boost: 1.2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc b/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc new file mode 100644 index 000000000..e98728bf2 --- /dev/null +++ b/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/amazon_bedrock_embeddings", + body: { + service: "amazonbedrock", + service_settings: { + access_key: "", + secret_key: "", + region: "", + provider: "", + model: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc b/docs/doc_examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc new file mode 100644 index 000000000..0ec4dd0eb --- /dev/null +++ b/docs/doc_examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.closeJob({ + job_id: "low_request_rate", +}); +console.log(response); +---- diff --git a/docs/doc_examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc b/docs/doc_examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc new file mode 100644 index 000000000..d60a21fe9 --- /dev/null +++ b/docs/doc_examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + explain: "true", + query: { + query_string: { + query: "@timestamp:foo", + lenient: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75c347b181112d2c4538c01ade903afe.asciidoc b/docs/doc_examples/75c347b181112d2c4538c01ade903afe.asciidoc new file mode 100644 index 000000000..2fbf4fba5 --- /dev/null +++ b/docs/doc_examples/75c347b181112d2c4538c01ade903afe.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + rewrite: "true", + query: { + match: { + "user.id": { + query: "kimchy", + fuzziness: "auto", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75e13a00f0909c955031ff62acc14a79.asciidoc b/docs/doc_examples/75e13a00f0909c955031ff62acc14a79.asciidoc new file mode 100644 index 000000000..2f6f4225f --- /dev/null +++ b/docs/doc_examples/75e13a00f0909c955031ff62acc14a79.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + }, + sort: [ + { + "http.response.bytes": { + order: "desc", + }, + }, + ], + from: 0, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc b/docs/doc_examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc new file mode 100644 index 000000000..5b9aad331 --- /dev/null +++ b/docs/doc_examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "transactions", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + type: "sale", + amount: 80, + }, + { + index: { + _id: 2, + }, + }, + { + type: "cost", + amount: 10, + }, + { + index: { + _id: 3, + }, + }, + { + type: "cost", + amount: 30, + }, + { + index: { + _id: 4, + }, + }, + { + type: "sale", + amount: 130, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc b/docs/doc_examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc new file mode 100644 index 000000000..fe5240085 --- /dev/null +++ b/docs/doc_examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor_rollup", + size: 0, + aggregations: { + avg_temperature: { + avg: { + field: "temperature", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc b/docs/doc_examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc new file mode 100644 index 000000000..0b3e61686 --- /dev/null +++ b/docs/doc_examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "k9s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc b/docs/doc_examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc new file mode 100644 index 000000000..de1d5548b --- /dev/null +++ b/docs/doc_examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "letter", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/764f9884b370cbdc82a1c5c42ed40ff3.asciidoc b/docs/doc_examples/764f9884b370cbdc82a1c5c42ed40ff3.asciidoc deleted file mode 100644 index 6944419e8..000000000 --- a/docs/doc_examples/764f9884b370cbdc82a1c5c42ed40ff3.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'twitter', - query: { - term: { - user: 'kimchy' - } - } - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc b/docs/doc_examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc new file mode 100644 index 000000000..b0ad2e9ce --- /dev/null +++ b/docs/doc_examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + user_name: { + terms: { + field: "user_name", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/765c9c8b40b67a42121648045dbf10fb.asciidoc b/docs/doc_examples/765c9c8b40b67a42121648045dbf10fb.asciidoc new file mode 100644 index 000000000..4a4cc035f --- /dev/null +++ b/docs/doc_examples/765c9c8b40b67a42121648045dbf10fb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + filter_path: "nodes.*.jvm.mem.pools.old", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0be2c28ee65384774b1e479b47dc3d92.asciidoc b/docs/doc_examples/766cfc1c9fcd2c186e965761ceb2c07d.asciidoc similarity index 65% rename from docs/doc_examples/0be2c28ee65384774b1e479b47dc3d92.asciidoc rename to docs/doc_examples/766cfc1c9fcd2c186e965761ceb2c07d.asciidoc index 4a8775f7e..a5adb3692 100644 --- a/docs/doc_examples/0be2c28ee65384774b1e479b47dc3d92.asciidoc +++ b/docs/doc_examples/766cfc1c9fcd2c186e965761ceb2c07d.asciidoc @@ -4,13 +4,12 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: 'twitter', - body: { + index: "my-index-000001", + settings: { index: { - refresh_interval: '1s' - } - } -}) -console.log(response) + number_of_replicas: 1, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/769f75829a8e6670aa4cf83d0d737046.asciidoc b/docs/doc_examples/769f75829a8e6670aa4cf83d0d737046.asciidoc new file mode 100644 index 000000000..4434f1c64 --- /dev/null +++ b/docs/doc_examples/769f75829a8e6670aa4cf83d0d737046.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T00:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T01:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 3, + refresh: "true", + document: { + date: "2015-10-01T02:30:00Z", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + auto_date_histogram: { + field: "date", + buckets: 3, + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/76b279835936ee4b546a171c671c3cd7.asciidoc b/docs/doc_examples/76b279835936ee4b546a171c671c3cd7.asciidoc new file mode 100644 index 000000000..f957fef1c --- /dev/null +++ b/docs/doc_examples/76b279835936ee4b546a171c671c3cd7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["cjk_width"], + text: "シーサイドライナー", +}); +console.log(response); +---- diff --git a/docs/doc_examples/76bc87c2592864152768687c2963d1d1.asciidoc b/docs/doc_examples/76bc87c2592864152768687c2963d1d1.asciidoc new file mode 100644 index 000000000..0fb78415d --- /dev/null +++ b/docs/doc_examples/76bc87c2592864152768687c2963d1d1.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + role_descriptors: { + "role-a": { + indices: [ + { + names: ["*"], + privileges: ["write"], + }, + ], + }, + }, + metadata: { + environment: { + level: 2, + trusted: true, + tags: ["production"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/76c167d8ab305cb43b594f140c902dfe.asciidoc b/docs/doc_examples/76c167d8ab305cb43b594f140c902dfe.asciidoc new file mode 100644 index 000000000..91593ace6 --- /dev/null +++ b/docs/doc_examples/76c167d8ab305cb43b594f140c902dfe.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_replicas": 1, + "index.number_of_shards": 1, + "index.codec": "best_compression", + }, + aliases: { + my_search_indices: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc b/docs/doc_examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc new file mode 100644 index 000000000..8d065bde7 --- /dev/null +++ b/docs/doc_examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7709a48020a6cefbbe547fb944541cdb.asciidoc b/docs/doc_examples/7709a48020a6cefbbe547fb944541cdb.asciidoc new file mode 100644 index 000000000..d657a9e99 --- /dev/null +++ b/docs/doc_examples/7709a48020a6cefbbe547fb944541cdb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-bit-vectors", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/77113c65e1755313183a8969233a5a07.asciidoc b/docs/doc_examples/77113c65e1755313183a8969233a5a07.asciidoc new file mode 100644 index 000000000..b9dea4e56 --- /dev/null +++ b/docs/doc_examples/77113c65e1755313183a8969233a5a07.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + kwd: { + type: "keyword", + ignore_above: 3, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + kwd: ["foo", "foo", "bang", "bar", "baz"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/77243bbf92f2a55e0fca6c2a349a1c15.asciidoc b/docs/doc_examples/77243bbf92f2a55e0fca6c2a349a1c15.asciidoc deleted file mode 100644 index 1a12ff2cc..000000000 --- a/docs/doc_examples/77243bbf92f2a55e0fca6c2a349a1c15.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': [ - [ - -70, - 40 - ], - [ - -71, - 42 - ] - ], - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7741a04e7e621c528cd72848d875776d.asciidoc b/docs/doc_examples/7741a04e7e621c528cd72848d875776d.asciidoc new file mode 100644 index 000000000..d09d2747f --- /dev/null +++ b/docs/doc_examples/7741a04e7e621c528cd72848d875776d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/77447e2966708e92f5e219d43ac3f00d.asciidoc b/docs/doc_examples/77447e2966708e92f5e219d43ac3f00d.asciidoc new file mode 100644 index 000000000..74f0318e5 --- /dev/null +++ b/docs/doc_examples/77447e2966708e92f5e219d43ac3f00d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + actions: "*reindex", + wait_for_completion: "true", + timeout: "10s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc b/docs/doc_examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc new file mode 100644 index 000000000..6334c10b3 --- /dev/null +++ b/docs/doc_examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.msearchTemplate({ + index: "my-index", + search_templates: [ + {}, + { + id: "my-search-template", + params: { + query_string: "hello world", + from: 0, + size: 10, + }, + }, + {}, + { + id: "my-other-search-template", + params: { + query_type: "match_all", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/774d715155cd13713e6e327adf6ce328.asciidoc b/docs/doc_examples/774d715155cd13713e6e327adf6ce328.asciidoc deleted file mode 100644 index 2b21e7c28..000000000 --- a/docs/doc_examples/774d715155cd13713e6e327adf6ce328.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - execution_hint: 'map' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc new file mode 100644 index 000000000..d5d893637 --- /dev/null +++ b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.textStructure.findMessageStructure({}); +console.log(response); +---- diff --git a/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc b/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc new file mode 100644 index 000000000..03b51a131 --- /dev/null +++ b/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_connector/another-connector", + querystring: { + delete_sync_jobs: "true", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc b/docs/doc_examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc new file mode 100644 index 000000000..4cb267864 --- /dev/null +++ b/docs/doc_examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "The movie was awesome!!", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7777326c6052fee28061e5b82540aedc.asciidoc b/docs/doc_examples/7777326c6052fee28061e5b82540aedc.asciidoc new file mode 100644 index 000000000..52190b90d --- /dev/null +++ b/docs/doc_examples/7777326c6052fee28061e5b82540aedc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + grade_percentiles: { + percentiles: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc b/docs/doc_examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc new file mode 100644 index 000000000..730b79191 --- /dev/null +++ b/docs/doc_examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.putBehavioralAnalytics({ + name: "my_analytics_collection", +}); +console.log(response); +---- diff --git a/docs/doc_examples/77828fcaecc3f058c48b955928198ff6.asciidoc b/docs/doc_examples/77828fcaecc3f058c48b955928198ff6.asciidoc new file mode 100644 index 000000000..6e2fce72a --- /dev/null +++ b/docs/doc_examples/77828fcaecc3f058c48b955928198ff6.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "parse multiple patterns", + processors: [ + { + grok: { + field: "message", + patterns: ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"], + pattern_definitions: { + FAVORITE_DOG: "beagle", + FAVORITE_CAT: "burmese", + }, + }, + }, + ], + }, + docs: [ + { + _source: { + message: "I love burmese cats!", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc b/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc new file mode 100644 index 000000000..7ab2d290f --- /dev/null +++ b/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/azure_openai_embeddings", + body: { + service: "azureopenai", + service_settings: { + api_key: "", + resource_name: "", + deployment_id: "", + api_version: "2024-02-01", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc b/docs/doc_examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc new file mode 100644 index 000000000..921f2ca66 --- /dev/null +++ b/docs/doc_examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.hotThreads(); +console.log(response); + +const response1 = await client.nodes.hotThreads({ + node_id: "nodeId1,nodeId2", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/77c50f982906718ecc59aa708aed728f.asciidoc b/docs/doc_examples/77c50f982906718ecc59aa708aed728f.asciidoc new file mode 100644 index 000000000..30b9ef4e8 --- /dev/null +++ b/docs/doc_examples/77c50f982906718ecc59aa708aed728f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: "ctx._source.counter += params.count", + lang: "painless", + params: { + count: 4, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc b/docs/doc_examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc new file mode 100644 index 000000000..1a9c3ccc7 --- /dev/null +++ b/docs/doc_examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getModelSnapshotUpgradeStats({ + job_id: "low_request_rate", + snapshot_id: "_all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc b/docs/doc_examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc new file mode 100644 index 000000000..ce23498c5 --- /dev/null +++ b/docs/doc_examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc @@ -0,0 +1,68 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:22-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:28-05:00", + message: "not a valid apache log", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc b/docs/doc_examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc new file mode 100644 index 000000000..12eb887d4 --- /dev/null +++ b/docs/doc_examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clone({ + index: "my-index-000001", + target: "cloned-my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc b/docs/doc_examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc new file mode 100644 index 000000000..c20499d5f --- /dev/null +++ b/docs/doc_examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.aliases({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/783c4fa5351a242364210fc32496beb2.asciidoc b/docs/doc_examples/783c4fa5351a242364210fc32496beb2.asciidoc new file mode 100644 index 000000000..932715a9c --- /dev/null +++ b/docs/doc_examples/783c4fa5351a242364210fc32496beb2.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "products", + id: 1567, + if_seq_no: 362, + if_primary_term: 2, + document: { + product: "r2d2", + details: "A resourceful astromech droid", + tags: ["droid"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c22b72c4a52ee098331b3f252c22860d.asciidoc b/docs/doc_examples/7841b65a3bb880ed66cec453925a50cf.asciidoc similarity index 64% rename from docs/doc_examples/c22b72c4a52ee098331b3f252c22860d.asciidoc rename to docs/doc_examples/7841b65a3bb880ed66cec453925a50cf.asciidoc index b4af4528c..672d5b18e 100644 --- a/docs/doc_examples/c22b72c4a52ee098331b3f252c22860d.asciidoc +++ b/docs/doc_examples/7841b65a3bb880ed66cec453925a50cf.asciidoc @@ -4,13 +4,10 @@ [source, js] ---- const response = await client.deleteByQuery({ - index: 'twitter,blog', - body: { - query: { - match_all: {} - } - } -}) -console.log(response) + index: "my-index-000001,my-index-000002", + query: { + match_all: {}, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7846974b47a3eab1832a475663d23ad9.asciidoc b/docs/doc_examples/7846974b47a3eab1832a475663d23ad9.asciidoc new file mode 100644 index 000000000..c9eeee3af --- /dev/null +++ b/docs/doc_examples/7846974b47a3eab1832a475663d23ad9.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 10000, + query: { + match: { + "user.id": "elkbee", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, + sort: [ + { + "@timestamp": { + order: "asc", + format: "strict_date_optional_time_nanos", + }, + }, + ], + search_after: ["2021-05-20T05:30:04.832Z", 4294967298], + track_total_hits: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc b/docs/doc_examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc new file mode 100644 index 000000000..08547635f --- /dev/null +++ b/docs/doc_examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "jwt8_users", + refresh: "true", + roles: ["user"], + rules: { + all: [ + { + field: { + "realm.name": "jwt8", + }, + }, + { + field: { + username: "principalname1", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc b/docs/doc_examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc new file mode 100644 index 000000000..2838ef1fb --- /dev/null +++ b/docs/doc_examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc b/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc index 1257d122a..8cb7ac484 100644 --- a/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc +++ b/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc @@ -4,21 +4,18 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'source', - query: { - match: { - company: 'cat' - } - } + source: { + index: "source", + query: { + match: { + company: "cat", + }, }, - dest: { - index: 'dest', - routing: '=cat' - } - } -}) -console.log(response) + }, + dest: { + index: "dest", + routing: "=cat", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc b/docs/doc_examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc new file mode 100644 index 000000000..3c6cc46c4 --- /dev/null +++ b/docs/doc_examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "index1", + alias: "logs-non-existing", + }, + }, + { + add: { + index: "index2", + alias: "logs-non-existing", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc b/docs/doc_examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc new file mode 100644 index 000000000..47e4a08cd --- /dev/null +++ b/docs/doc_examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations,my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: { + lat: 40.73, + lon: -74.1, + }, + bottom_right: { + lat: 40.01, + lon: -71.12, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/794d9a321b944347d2a8834a07b5eb22.asciidoc b/docs/doc_examples/794d9a321b944347d2a8834a07b5eb22.asciidoc new file mode 100644 index 000000000..b72e43b07 --- /dev/null +++ b/docs/doc_examples/794d9a321b944347d2a8834a07b5eb22.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + my_range: { + type: "integer_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: { + lte: 2147483647, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc b/docs/doc_examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc new file mode 100644 index 000000000..639870304 --- /dev/null +++ b/docs/doc_examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc @@ -0,0 +1,80 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_errors", + metadata: { + color: "red", + }, + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + transform: { + search: { + request: { + indices: "log-events", + body: { + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + actions: { + my_webhook: { + webhook: { + method: "POST", + host: "mylisteninghost", + port: 9200, + path: "/{{watch_id}}", + body: "Encountered {{ctx.payload.hits.total}} errors", + }, + }, + email_administrator: { + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc b/docs/doc_examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc new file mode 100644 index 000000000..db825c446 --- /dev/null +++ b/docs/doc_examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + auto_date_histogram: { + field: "date", + buckets: 5, + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc b/docs/doc_examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc new file mode 100644 index 000000000..91e3d5e2d --- /dev/null +++ b/docs/doc_examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + flat_settings: "true", + include_defaults: "true", +}); +console.log(response); + +const response1 = await client.cluster.getSettings({ + flat_settings: "true", + include_defaults: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc b/docs/doc_examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc new file mode 100644 index 000000000..359f35a94 --- /dev/null +++ b/docs/doc_examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + runtime_mappings: { + "http.response": { + type: "long", + script: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n ', + }, + }, + query: { + match: { + "http.response": "304", + }, + }, + fields: ["http.response"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/79e053326a3a8eec828523a035393f66.asciidoc b/docs/doc_examples/79e053326a3a8eec828523a035393f66.asciidoc new file mode 100644 index 000000000..8795c99d4 --- /dev/null +++ b/docs/doc_examples/79e053326a3a8eec828523a035393f66.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.delete({ + index: ".ds-my-data-stream-2099.03.08-000003", + id: "bfspvnIBr7VVZlfp2lqX", +}); +console.log(response); +---- diff --git a/docs/doc_examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc b/docs/doc_examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc new file mode 100644 index 000000000..65de39045 --- /dev/null +++ b/docs/doc_examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc b/docs/doc_examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc new file mode 100644 index 000000000..ad764d327 --- /dev/null +++ b/docs/doc_examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.getAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc b/docs/doc_examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc new file mode 100644 index 000000000..8f6ef79f5 --- /dev/null +++ b/docs/doc_examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc @@ -0,0 +1,69 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + comments: { + type: "nested", + properties: { + votes: { + type: "nested", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + title: "Test title", + comments: [ + { + author: "kimchy", + text: "comment text", + votes: [], + }, + { + author: "nik9000", + text: "words words words", + votes: [ + { + value: 1, + voter: "kimchy", + }, + { + value: -1, + voter: "other", + }, + ], + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + query: { + nested: { + path: "comments.votes", + query: { + match: { + "comments.votes.voter": "kimchy", + }, + }, + inner_hits: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/7a0c633a67244e9703344d036e584d95.asciidoc b/docs/doc_examples/7a0c633a67244e9703344d036e584d95.asciidoc new file mode 100644 index 000000000..e117ad484 --- /dev/null +++ b/docs/doc_examples/7a0c633a67244e9703344d036e584d95.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enableUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc b/docs/doc_examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc new file mode 100644 index 000000000..2da76ddb9 --- /dev/null +++ b/docs/doc_examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "ip_location", + refresh: "true", + document: { + ip: "192.168.1.1", + country: "Canada", + city: "Montreal", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "logs", + id: 1, + refresh: "true", + document: { + host: "192.168.1.1", + message: "the first message", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "logs", + id: 2, + refresh: "true", + document: { + host: "192.168.1.2", + message: "the second message", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "logs", + runtime_mappings: { + location: { + type: "lookup", + target_index: "ip_location", + input_field: "host", + target_field: "ip", + fetch_fields: ["country", "city"], + }, + }, + fields: ["host", "message", "location"], + _source: false, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/7a23a385a63c87cab58fd494870450fd.asciidoc b/docs/doc_examples/7a23a385a63c87cab58fd494870450fd.asciidoc new file mode 100644 index 000000000..3b2ace07c --- /dev/null +++ b/docs/doc_examples/7a23a385a63c87cab58fd494870450fd.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping4", + roles: ["superuser"], + enabled: true, + rules: { + any: [ + { + field: { + username: "esadmin", + }, + }, + { + field: { + groups: "cn=admins,dc=example,dc=com", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc b/docs/doc_examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc new file mode 100644 index 000000000..d368a4e1d --- /dev/null +++ b/docs/doc_examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test_index", + id: 1, + refresh: "true", + document: { + query: { + match: { + body: { + query: "miss bicycl", + analyzer: "whitespace", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc b/docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc new file mode 100644 index 000000000..ab73f60fd --- /dev/null +++ b/docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "my-index-000001", + alias: "my-index", + }, + }, + { + add: { + index: "my-index-000002", + alias: "my-index", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc b/docs/doc_examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc new file mode 100644 index 000000000..7470631a7 --- /dev/null +++ b/docs/doc_examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.delete({ + id: "FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a8de5606f283f4ef171b015eef6befa.asciidoc b/docs/doc_examples/7a8de5606f283f4ef171b015eef6befa.asciidoc new file mode 100644 index 000000000..bd6e331e5 --- /dev/null +++ b/docs/doc_examples/7a8de5606f283f4ef171b015eef6befa.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "search", + groups: "group1,group2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc b/docs/doc_examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc new file mode 100644 index 000000000..7c80d271b --- /dev/null +++ b/docs/doc_examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role5", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["*"], + except: ["customer.handle"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ab968a61bb0783f563dd6d29b253901.asciidoc b/docs/doc_examples/7ab968a61bb0783f563dd6d29b253901.asciidoc new file mode 100644 index 000000000..fbb338d57 --- /dev/null +++ b/docs/doc_examples/7ab968a61bb0783f563dd6d29b253901.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "catalan_example", + settings: { + analysis: { + filter: { + catalan_elision: { + type: "elision", + articles: ["d", "l", "m", "n", "s", "t"], + articles_case: true, + }, + catalan_stop: { + type: "stop", + stopwords: "_catalan_", + }, + catalan_keywords: { + type: "keyword_marker", + keywords: ["example"], + }, + catalan_stemmer: { + type: "stemmer", + language: "catalan", + }, + }, + analyzer: { + rebuilt_catalan: { + tokenizer: "standard", + filter: [ + "catalan_elision", + "lowercase", + "catalan_stop", + "catalan_keywords", + "catalan_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cde4dddae5c06e7f1d38c9d933dbc7ac.asciidoc b/docs/doc_examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc similarity index 70% rename from docs/doc_examples/cde4dddae5c06e7f1d38c9d933dbc7ac.asciidoc rename to docs/doc_examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc index 006195f99..613941fde 100644 --- a/docs/doc_examples/cde4dddae5c06e7f1d38c9d933dbc7ac.asciidoc +++ b/docs/doc_examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc @@ -4,8 +4,8 @@ [source, js] ---- const response = await client.updateByQuery({ - index: 'twitter,blog' -}) -console.log(response) + index: "my-index-000001", + conflicts: "proceed", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc b/docs/doc_examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc new file mode 100644 index 000000000..098cad713 --- /dev/null +++ b/docs/doc_examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.store.type": "hybridfs", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b3f255d28ce5b46d111402b96b41351.asciidoc b/docs/doc_examples/7b3f255d28ce5b46d111402b96b41351.asciidoc new file mode 100644 index 000000000..e563de4f6 --- /dev/null +++ b/docs/doc_examples/7b3f255d28ce5b46d111402b96b41351.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "admin_user", + refresh: "true", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["my_admin_role"], + full_name: "Eirian Zola", + metadata: { + intelligence: 7, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/015294a400986295039e52ebc62033be.asciidoc b/docs/doc_examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc similarity index 58% rename from docs/doc_examples/015294a400986295039e52ebc62033be.asciidoc rename to docs/doc_examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc index 324425149..04acf4a95 100644 --- a/docs/doc_examples/015294a400986295039e52ebc62033be.asciidoc +++ b/docs/doc_examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.update({ - index: 'test', - id: '1', - body: { - doc: { - name: 'new_name' - }, - detect_noop: false - } -}) -console.log(response) + index: "test", + id: 1, + doc: { + name: "new_name", + }, + doc_as_upsert: true, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc b/docs/doc_examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc new file mode 100644 index 000000000..ad9af9cfe --- /dev/null +++ b/docs/doc_examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh(); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + size: 0, + filter_path: "hits.total", + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc b/docs/doc_examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc new file mode 100644 index 000000000..04eba86cc --- /dev/null +++ b/docs/doc_examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + name: "my-api-key", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc b/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc index 2107c116d..6c4197dee 100644 --- a/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc +++ b/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc @@ -4,20 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'quick brown fox', - type: 'most_fields', - fields: [ - 'title', - 'title.original', - 'title.shingles' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "quick brown fox", + type: "most_fields", + fields: ["title", "title.original", "title.shingles"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc b/docs/doc_examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc new file mode 100644 index 000000000..6fca4f796 --- /dev/null +++ b/docs/doc_examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc b/docs/doc_examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc new file mode 100644 index 000000000..127d9e0d9 --- /dev/null +++ b/docs/doc_examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "range_index", + settings: { + number_of_shards: 2, + }, + mappings: { + properties: { + expected_attendees: { + type: "integer_range", + }, + time_frame: { + type: "date_range", + format: "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "range_index", + id: 1, + refresh: "true", + document: { + expected_attendees: { + gte: 10, + lt: 20, + }, + time_frame: { + gte: "2015-10-31 12:00:00", + lte: "2015-11-01", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7c3414279d47e9c29105d061ed316ef8.asciidoc b/docs/doc_examples/7c3414279d47e9c29105d061ed316ef8.asciidoc new file mode 100644 index 000000000..0c0bc6bf3 --- /dev/null +++ b/docs/doc_examples/7c3414279d47e9c29105d061ed316ef8.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "music", + id: 1, + refresh: "true", + document: { + suggest: ["Nevermind", "Nirvana"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc b/docs/doc_examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc new file mode 100644 index 000000000..36a0b7806 --- /dev/null +++ b/docs/doc_examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + camel: { + type: "pattern", + pattern: + "([^\\p{L}\\d]+)|(?<=\\D)(?=\\d)|(?<=\\d)(?=\\D)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "camel", + text: "MooseX::FTPClass2_beta", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc b/docs/doc_examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc new file mode 100644 index 000000000..ab7abc2bb --- /dev/null +++ b/docs/doc_examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc @@ -0,0 +1,109 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "network-traffic", + mappings: { + properties: { + ipv4: { + type: "ip", + }, + ipv6: { + type: "ip", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "network-traffic", + refresh: "true", + operations: [ + { + index: { + _id: 0, + }, + }, + { + ipv4: "192.168.1.10", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f10", + }, + { + index: { + _id: 1, + }, + }, + { + ipv4: "192.168.1.12", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f12", + }, + { + index: { + _id: 2, + }, + }, + { + ipv4: "192.168.1.33", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f33", + }, + { + index: { + _id: 3, + }, + }, + { + ipv4: "192.168.1.10", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f10", + }, + { + index: { + _id: 4, + }, + }, + { + ipv4: "192.168.2.41", + ipv6: "2001:db8:a4f8:112c:6001:0:12:7f41", + }, + { + index: { + _id: 5, + }, + }, + { + ipv4: "192.168.2.10", + ipv6: "2001:db8:a4f8:112c:6001:0:12:7f10", + }, + { + index: { + _id: 6, + }, + }, + { + ipv4: "192.168.2.23", + ipv6: "2001:db8:a4f8:112c:6001:0:12:7f23", + }, + { + index: { + _id: 7, + }, + }, + { + ipv4: "192.168.3.201", + ipv6: "2001:db8:a4f8:114f:6001:0:12:7201", + }, + { + index: { + _id: 8, + }, + }, + { + ipv4: "192.168.3.107", + ipv6: "2001:db8:a4f8:114f:6001:0:12:7307", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc b/docs/doc_examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc new file mode 100644 index 000000000..650854bef --- /dev/null +++ b/docs/doc_examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "follower_index", +}); +console.log(response); + +const response1 = await client.indices.close({ + index: "follower_index", +}); +console.log(response1); + +const response2 = await client.ccr.follow({ + index: "follower_index", + wait_for_active_shards: 1, + remote_cluster: "remote_cluster", + leader_index: "leader_index", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc b/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc new file mode 100644 index 000000000..f9344050e --- /dev/null +++ b/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({}); +console.log(response); +---- diff --git a/docs/doc_examples/7c9076f3e93a8f61189783c736bf6082.asciidoc b/docs/doc_examples/7c9076f3e93a8f61189783c736bf6082.asciidoc new file mode 100644 index 000000000..47e925855 --- /dev/null +++ b/docs/doc_examples/7c9076f3e93a8f61189783c736bf6082.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role2", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["event_*"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc b/docs/doc_examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc new file mode 100644 index 000000000..dd36cf357 --- /dev/null +++ b/docs/doc_examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + missing: "N/A", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc b/docs/doc_examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc new file mode 100644 index 000000000..0719c3219 --- /dev/null +++ b/docs/doc_examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "_all", + metric: "jvm", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfac8d098b50aa0181161bcd17b38ef4.asciidoc b/docs/doc_examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc similarity index 65% rename from docs/doc_examples/dfac8d098b50aa0181161bcd17b38ef4.asciidoc rename to docs/doc_examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc index 614fea4b9..837ad5aaf 100644 --- a/docs/doc_examples/dfac8d098b50aa0181161bcd17b38ef4.asciidoc +++ b/docs/doc_examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc @@ -4,13 +4,12 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: 'twitter', - body: { + index: "my-index-000001", + settings: { index: { - refresh_interval: '-1' - } - } -}) -console.log(response) + refresh_interval: null, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7cf71671859be7c1ecf673396db377cd.asciidoc b/docs/doc_examples/7cf71671859be7c1ecf673396db377cd.asciidoc deleted file mode 100644 index 1b52d5faa..000000000 --- a/docs/doc_examples/7cf71671859be7c1ecf673396db377cd.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test1', - alias: 'alias2', - filter: { - term: { - user: 'kimchy' - } - } - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc b/docs/doc_examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc new file mode 100644 index 000000000..bd1a07cfe --- /dev/null +++ b/docs/doc_examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + size: 2, + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7d880157a95f64ad339225d4af71c2de.asciidoc b/docs/doc_examples/7d880157a95f64ad339225d4af71c2de.asciidoc new file mode 100644 index 000000000..35848c635 --- /dev/null +++ b/docs/doc_examples/7d880157a95f64ad339225d4af71c2de.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.suggestUserProfiles({ + name: "jack", + hint: { + uids: [ + "u_8RKO7AKfEbSiIHZkZZ2LJy2MUSDPWDr3tMI_CkIGApU_0", + "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", + ], + labels: { + direction: ["north", "east"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc b/docs/doc_examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc new file mode 100644 index 000000000..687099666 --- /dev/null +++ b/docs/doc_examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "custom_stems", "porter_stem"], + }, + }, + filter: { + custom_stems: { + type: "stemmer_override", + rules_path: "analysis/stemmer_override.txt", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc b/docs/doc_examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc new file mode 100644 index 000000000..ae1c0059d --- /dev/null +++ b/docs/doc_examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.resetJob({ + job_id: "total-requests", + wait_for_completion: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc b/docs/doc_examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc new file mode 100644 index 000000000..32a9da209 --- /dev/null +++ b/docs/doc_examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + prune: true, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + rescore: { + window_size: 100, + query: { + rescore_query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + prune: true, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: true, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc b/docs/doc_examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc new file mode 100644 index 000000000..55bc7f4c7 --- /dev/null +++ b/docs/doc_examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + my_rate: { + rate: { + unit: "year", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc b/docs/doc_examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc new file mode 100644 index 000000000..d5c2807fa --- /dev/null +++ b/docs/doc_examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["delimited_payload"], + text: "the|0 brown|10 fox|5 is|0 quick|10", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc b/docs/doc_examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc new file mode 100644 index 000000000..4733b1f3d --- /dev/null +++ b/docs/doc_examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + terms: { + _routing: ["user1"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc b/docs/doc_examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc new file mode 100644 index 000000000..9d1e63bbd --- /dev/null +++ b/docs/doc_examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + bucket: "my-bucket", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc b/docs/doc_examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc new file mode 100644 index 000000000..a5cf11623 --- /dev/null +++ b/docs/doc_examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + aggs: { + price_ranges: { + terms: { + field: "price_range", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc b/docs/doc_examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc index e331f3326..fd2f71849 100644 --- a/docs/doc_examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc +++ b/docs/doc_examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.tasks.list({ - detailed: 'true', - actions: '*byquery' -}) -console.log(response) + detailed: "true", + actions: "*byquery", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc b/docs/doc_examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc new file mode 100644 index 000000000..62d725868 --- /dev/null +++ b/docs/doc_examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards(); +console.log(response); +---- diff --git a/docs/doc_examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc b/docs/doc_examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc new file mode 100644 index 000000000..de928127b --- /dev/null +++ b/docs/doc_examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_date_formats: ["MM/dd/yyyy"], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "09/25/2015", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc b/docs/doc_examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc new file mode 100644 index 000000000..4862dab30 --- /dev/null +++ b/docs/doc_examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + { + product: { + terms: { + field: "product", + order: "asc", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e2b9bf4ab353c377b761101775edf93.asciidoc b/docs/doc_examples/7e2b9bf4ab353c377b761101775edf93.asciidoc new file mode 100644 index 000000000..a42e937f1 --- /dev/null +++ b/docs/doc_examples/7e2b9bf4ab353c377b761101775edf93.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "metrics-weather_sensors-dev", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:21:15.000Z", + sensor_id: "HAL-000001", + location: "plains", + temperature: 26.7, + humidity: 49.9, + }, + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:25:42.000Z", + sensor_id: "SYKENET-000001", + location: "swamp", + temperature: 32.4, + humidity: 88.9, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics-weather_sensors-dev", + document: { + "@timestamp": "2099-05-06T16:21:15.000Z", + sensor_id: "SYKENET-000001", + location: "swamp", + temperature: 32.4, + humidity: 88.9, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0cc991e3f7f8511a34730e154b3c5edc.asciidoc b/docs/doc_examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc similarity index 58% rename from docs/doc_examples/0cc991e3f7f8511a34730e154b3c5edc.asciidoc rename to docs/doc_examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc index b76b0655e..198997e4a 100644 --- a/docs/doc_examples/0cc991e3f7f8511a34730e154b3c5edc.asciidoc +++ b/docs/doc_examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7e48648ca27024831c60b455e836c496.asciidoc b/docs/doc_examples/7e48648ca27024831c60b455e836c496.asciidoc new file mode 100644 index 000000000..8968a05ff --- /dev/null +++ b/docs/doc_examples/7e48648ca27024831c60b455e836c496.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + pinned: { + docs: [ + { + _index: "my-index-000001", + _id: "1", + }, + { + _id: "4", + }, + ], + organic: { + match: { + description: "iphone", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc b/docs/doc_examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc new file mode 100644 index 000000000..72f26c0a7 --- /dev/null +++ b/docs/doc_examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes(); +console.log(response); +---- diff --git a/docs/doc_examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc b/docs/doc_examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc new file mode 100644 index 000000000..fa38b85db --- /dev/null +++ b/docs/doc_examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.clearTrainedModelDeploymentCache({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e5bee18e61d950e823782da1b733903.asciidoc b/docs/doc_examples/7e5bee18e61d950e823782da1b733903.asciidoc new file mode 100644 index 000000000..f63ee943b --- /dev/null +++ b/docs/doc_examples/7e5bee18e61d950e823782da1b733903.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "semantic-embeddings", + query: { + semantic: { + field: "semantic_text", + query: "How to avoid muscle soreness while running?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e5faa551f2c95ffd627da352563d450.asciidoc b/docs/doc_examples/7e5faa551f2c95ffd627da352563d450.asciidoc new file mode 100644 index 000000000..27447be85 --- /dev/null +++ b/docs/doc_examples/7e5faa551f2c95ffd627da352563d450.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping6", + roles: ["example-user"], + enabled: true, + rules: { + field: { + dn: "*,ou=subtree,dc=example,dc=com", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc b/docs/doc_examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc new file mode 100644 index 000000000..629e908bd --- /dev/null +++ b/docs/doc_examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "rrf", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [3], + k: 5, + num_candidates: 5, + }, + }, + ], + rank_window_size: 5, + rank_constant: 1, + }, + }, + size: 3, + aggs: { + int_count: { + terms: { + field: "integer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc b/docs/doc_examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc new file mode 100644 index 000000000..5fda2a862 --- /dev/null +++ b/docs/doc_examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_query_rules/my-ruleset", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc b/docs/doc_examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc new file mode 100644 index 000000000..0750d7989 --- /dev/null +++ b/docs/doc_examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-knn-index", + mappings: { + properties: { + "my-vector": { + type: "dense_vector", + dims: 3, + index: true, + similarity: "l2_norm", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-knn-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "my-vector": [1, 5, -20], + }, + { + index: { + _id: "2", + }, + }, + { + "my-vector": [42, 8, -15], + }, + { + index: { + _id: "3", + }, + }, + { + "my-vector": [15, 11, 23], + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7ebfb30b3ece855c1b783d9210939469.asciidoc b/docs/doc_examples/7ebfb30b3ece855c1b783d9210939469.asciidoc new file mode 100644 index 000000000..7207b9cc1 --- /dev/null +++ b/docs/doc_examples/7ebfb30b3ece855c1b783d9210939469.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.flushJob({ + job_id: "total-requests", + advance_time: 1514804400000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc b/docs/doc_examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc new file mode 100644 index 000000000..d61869f16 --- /dev/null +++ b/docs/doc_examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + sales_deriv: { + derivative: { + buckets_path: "sales", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc b/docs/doc_examples/7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc deleted file mode 100644 index 874123199..000000000 --- a/docs/doc_examples/7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - actors: { - terms: { - field: 'actors', - size: 10 - }, - aggs: { - costars: { - terms: { - field: 'actors', - size: 5 - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc b/docs/doc_examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc new file mode 100644 index 000000000..09db83420 --- /dev/null +++ b/docs/doc_examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + action_modes: { + action1: "force_simulate", + action2: "skip", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7f465b7e8ed42df6c42251b4481e699e.asciidoc b/docs/doc_examples/7f465b7e8ed42df6c42251b4481e699e.asciidoc deleted file mode 100644 index 36b2e4f68..000000000 --- a/docs/doc_examples/7f465b7e8ed42df6c42251b4481e699e.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - date: { - type: 'date', - format: 'yyyy-MM-dd' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7f514e9e785e4323d16396359cb184f2.asciidoc b/docs/doc_examples/7f514e9e785e4323d16396359cb184f2.asciidoc new file mode 100644 index 000000000..9303413af --- /dev/null +++ b/docs/doc_examples/7f514e9e785e4323d16396359cb184f2.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "range_index", + properties: { + ip_allowlist: { + type: "ip_range", + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "range_index", + id: 2, + document: { + ip_allowlist: "192.168.0.0/16", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc b/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc index 1941d7597..c452de4e5 100644 --- a/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc +++ b/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'ny city', - auto_generate_synonyms_phrase_query: false - } - } - } - } -}) -console.log(response) + query: { + match: { + message: { + query: "ny city", + auto_generate_synonyms_phrase_query: false, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7f697eb436dfa3c30dfe610d8c32d132.asciidoc b/docs/doc_examples/7f697eb436dfa3c30dfe610d8c32d132.asciidoc deleted file mode 100644 index 83143ed69..000000000 --- a/docs/doc_examples/7f697eb436dfa3c30dfe610d8c32d132.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - remote: { - host: 'http://otherhost:9200/', - socket_timeout: '1m', - connect_timeout: '10s' - }, - index: 'source', - query: { - match: { - test: 'data' - } - } - }, - dest: { - index: 'dest' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc b/docs/doc_examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc new file mode 100644 index 000000000..61ec2bb09 --- /dev/null +++ b/docs/doc_examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 1, + query: { + match: { + client_ip: "211.11.9.0", + }, + }, + fields: ["*"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc b/docs/doc_examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc new file mode 100644 index 000000000..3abdbb52f --- /dev/null +++ b/docs/doc_examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScriptContext(); +console.log(response); +---- diff --git a/docs/doc_examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc b/docs/doc_examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc new file mode 100644 index 000000000..cd01ba6fc --- /dev/null +++ b/docs/doc_examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my-repo", + repository: { + type: "s3", + settings: { + bucket: "repo-bucket", + client: "elastic-internal-71bcd3", + base_path: "myrepo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc b/docs/doc_examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc new file mode 100644 index 000000000..8306c83a4 --- /dev/null +++ b/docs/doc_examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "The fooBarBaz method", + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: "bar", + }, + }, + highlight: { + fields: { + text: {}, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7fd5883564d183603e60b37d286ac7e2.asciidoc b/docs/doc_examples/7fd5883564d183603e60b37d286ac7e2.asciidoc new file mode 100644 index 000000000..6fa8a7c88 --- /dev/null +++ b/docs/doc_examples/7fd5883564d183603e60b37d286ac7e2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteExpiredData({ + timeout: "1h", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc b/docs/doc_examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc new file mode 100644 index 000000000..9fdb8dc45 --- /dev/null +++ b/docs/doc_examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > ?page_count AND author == ?author\n | STATS count = COUNT(*) by year\n | WHERE count > ?count\n | LIMIT 5\n ', + params: [ + { + page_count: 300, + }, + { + author: "Frank Herbert", + }, + { + count: 0, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fe2179705304af5e87eb382dca6235a.asciidoc b/docs/doc_examples/7fe2179705304af5e87eb382dca6235a.asciidoc new file mode 100644 index 000000000..7aa2b01be --- /dev/null +++ b/docs/doc_examples/7fe2179705304af5e87eb382dca6235a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc b/docs/doc_examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc new file mode 100644 index 000000000..6e49e53e5 --- /dev/null +++ b/docs/doc_examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.weighted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + query: { + constant_score: { + filter: { + match: { + type: "hat", + }, + }, + }, + }, + aggs: { + hat_prices: { + sum: { + field: "price.weighted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8baccd8688a6bad1749b8935f9601ea4.asciidoc b/docs/doc_examples/7fef68840761c6982c14ad7af96caf37.asciidoc similarity index 51% rename from docs/doc_examples/8baccd8688a6bad1749b8935f9601ea4.asciidoc rename to docs/doc_examples/7fef68840761c6982c14ad7af96caf37.asciidoc index f83ba6405..88a14a284 100644 --- a/docs/doc_examples/8baccd8688a6bad1749b8935f9601ea4.asciidoc +++ b/docs/doc_examples/7fef68840761c6982c14ad7af96caf37.asciidoc @@ -4,22 +4,21 @@ [source, js] ---- const response = await client.index({ - index: 'my_index', - id: '1', - body: { - group: 'fans', + index: "my-index-000001", + id: 1, + document: { + group: "fans", user: [ { - first: 'John', - last: 'Smith' + first: "John", + last: "Smith", }, { - first: 'Alice', - last: 'White' - } - ] - } -}) -console.log(response) + first: "Alice", + last: "White", + }, + ], + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7ff4124df0541ee2496034004f4146d4.asciidoc b/docs/doc_examples/7ff4124df0541ee2496034004f4146d4.asciidoc new file mode 100644 index 000000000..cc4fcf208 --- /dev/null +++ b/docs/doc_examples/7ff4124df0541ee2496034004f4146d4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + tags: { + type: "keyword", + eager_global_ordinals: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/800861c15bb33ca01a46fb97dde7537a.asciidoc b/docs/doc_examples/800861c15bb33ca01a46fb97dde7537a.asciidoc new file mode 100644 index 000000000..30819bdc6 --- /dev/null +++ b/docs/doc_examples/800861c15bb33ca01a46fb97dde7537a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getFilters({ + filter_id: "safe_domains", +}); +console.log(response); +---- diff --git a/docs/doc_examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc b/docs/doc_examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc new file mode 100644 index 000000000..1821e0de4 --- /dev/null +++ b/docs/doc_examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_text: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/804a97ff4d0613e6568e4efb19c52021.asciidoc b/docs/doc_examples/804a97ff4d0613e6568e4efb19c52021.asciidoc deleted file mode 100644 index fed037571..000000000 --- a/docs/doc_examples/804a97ff4d0613e6568e4efb19c52021.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.cluster.putSettings({ - body: { - persistent: { - 'action.auto_create_index': 'twitter,index10,-index1*,+ind*' - } - } -}) -console.log(response0) - -const response1 = await client.cluster.putSettings({ - body: { - persistent: { - 'action.auto_create_index': 'false' - } - } -}) -console.log(response1) - -const response2 = await client.cluster.putSettings({ - body: { - persistent: { - 'action.auto_create_index': 'true' - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/804cdf477ec829740e3d045140400c3b.asciidoc b/docs/doc_examples/804cdf477ec829740e3d045140400c3b.asciidoc new file mode 100644 index 000000000..255b2df23 --- /dev/null +++ b/docs/doc_examples/804cdf477ec829740e3d045140400c3b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "semantic-embeddings", + mappings: { + properties: { + semantic_text: { + type: "semantic_text", + inference_id: "my-elser-endpoint", + }, + content: { + type: "text", + copy_to: "semantic_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8051766cadded0892290bc2cc06e145c.asciidoc b/docs/doc_examples/8051766cadded0892290bc2cc06e145c.asciidoc new file mode 100644 index 000000000..c4522e245 --- /dev/null +++ b/docs/doc_examples/8051766cadded0892290bc2cc06e145c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "my_watch", + action_id: "action1,action2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc b/docs/doc_examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc new file mode 100644 index 000000000..409f1ce03 --- /dev/null +++ b/docs/doc_examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + content: "elasticsearch", + }, + }, + aggs: { + sample: { + sampler: { + shard_size: 100, + }, + aggs: { + keywords: { + significant_text: { + field: "content", + filter_duplicate_text: true, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc b/docs/doc_examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc new file mode 100644 index 000000000..b10488464 --- /dev/null +++ b/docs/doc_examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: true, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + any_of: { + intervals: [ + { + match: { + query: "hot water", + }, + }, + { + match: { + query: "cold porridge", + }, + }, + ], + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/808f4db1e2361be77dd6816c1f818139.asciidoc b/docs/doc_examples/808f4db1e2361be77dd6816c1f818139.asciidoc new file mode 100644 index 000000000..83b3a9af5 --- /dev/null +++ b/docs/doc_examples/808f4db1e2361be77dd6816c1f818139.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc b/docs/doc_examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc new file mode 100644 index 000000000..c74de8b5f --- /dev/null +++ b/docs/doc_examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteTemplate({ + name: ".cloud-hot-warm-allocation-0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc new file mode 100644 index 000000000..a9e30c9b3 --- /dev/null +++ b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkUpdateApiKeys({}); +console.log(response); +---- diff --git a/docs/doc_examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc b/docs/doc_examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc new file mode 100644 index 000000000..ad1cec287 --- /dev/null +++ b/docs/doc_examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.getSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc b/docs/doc_examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc new file mode 100644 index 000000000..4bea906fc --- /dev/null +++ b/docs/doc_examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + size: 0, + aggs: { + range_histo: { + histogram: { + field: "expected_attendees", + interval: 5, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc b/docs/doc_examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc new file mode 100644 index 000000000..62f019cdf --- /dev/null +++ b/docs/doc_examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "shapes", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "shapes", + id: "footprint", + document: { + geometry: { + type: "envelope", + coordinates: [ + [1355, 5355], + [1400, 5200], + ], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "example", + query: { + shape: { + geometry: { + indexed_shape: { + index: "shapes", + id: "footprint", + path: "geometry", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc b/docs/doc_examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc new file mode 100644 index 000000000..796fe255d --- /dev/null +++ b/docs/doc_examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid\n [ file where file.name == "cmd.exe" and process.pid != 2013 ]\n [ process where stringContains(process.executable, "regsvr32") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc b/docs/doc_examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc new file mode 100644 index 000000000..814d8e865 --- /dev/null +++ b/docs/doc_examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.count({ + index: "my-index-000001", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/81612c2537386e031b7eb604f6756a71.asciidoc b/docs/doc_examples/81612c2537386e031b7eb604f6756a71.asciidoc new file mode 100644 index 000000000..ae7191527 --- /dev/null +++ b/docs/doc_examples/81612c2537386e031b7eb604f6756a71.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clone({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_shards": 5, + }, + aliases: { + my_search_indices: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc b/docs/doc_examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc new file mode 100644 index 000000000..377f726ec --- /dev/null +++ b/docs/doc_examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + max_concurrent_shard_requests: 3, + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/819e00cc6547d925d80090b94e0650d7.asciidoc b/docs/doc_examples/819e00cc6547d925d80090b94e0650d7.asciidoc new file mode 100644 index 000000000..9e33adbad --- /dev/null +++ b/docs/doc_examples/819e00cc6547d925d80090b94e0650d7.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: + "my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/81c7a392efd505b686eed978fb7d9d17.asciidoc b/docs/doc_examples/81c7a392efd505b686eed978fb7d9d17.asciidoc new file mode 100644 index 000000000..d516dced4 --- /dev/null +++ b/docs/doc_examples/81c7a392efd505b686eed978fb7d9d17.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "english_example", + settings: { + analysis: { + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + english_keywords: { + type: "keyword_marker", + keywords: ["example"], + }, + english_stemmer: { + type: "stemmer", + language: "english", + }, + english_possessive_stemmer: { + type: "stemmer", + language: "possessive_english", + }, + }, + analyzer: { + rebuilt_english: { + tokenizer: "standard", + filter: [ + "english_possessive_stemmer", + "lowercase", + "english_stop", + "english_keywords", + "english_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/81c9aa2678d6166a9662ddf2c011a6a5.asciidoc b/docs/doc_examples/81c9aa2678d6166a9662ddf2c011a6a5.asciidoc deleted file mode 100644 index de99a4a47..000000000 --- a/docs/doc_examples/81c9aa2678d6166a9662ddf2c011a6a5.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match_none: {} - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/81ee2ad368208c4c78098292547b0577.asciidoc b/docs/doc_examples/81ee2ad368208c4c78098292547b0577.asciidoc new file mode 100644 index 000000000..f7aa01327 --- /dev/null +++ b/docs/doc_examples/81ee2ad368208c4c78098292547b0577.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "admin_user", + roles: ["monitoring"], + rules: { + field: { + dn: "cn=Admin,ou=example,o=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/81ef5774355180fc44d2a52b5182d24a.asciidoc b/docs/doc_examples/81ef5774355180fc44d2a52b5182d24a.asciidoc new file mode 100644 index 000000000..2a175137a --- /dev/null +++ b/docs/doc_examples/81ef5774355180fc44d2a52b5182d24a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + message_stats: { + string_stats: { + field: "message.keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc b/docs/doc_examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc new file mode 100644 index 000000000..a16d759b9 --- /dev/null +++ b/docs/doc_examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "shirts", + query: { + bool: { + filter: [ + { + term: { + color: "red", + }, + }, + { + term: { + brand: "gucci", + }, + }, + ], + }, + }, + aggs: { + models: { + terms: { + field: "model", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc b/docs/doc_examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc new file mode 100644 index 000000000..7b2517ea2 --- /dev/null +++ b/docs/doc_examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "job-candidates", + id: 2, + refresh: "true", + document: { + name: "Jason Response", + programming_languages: ["java", "php"], + required_matches: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/820f689eaaef15fc07abd1073fa880f8.asciidoc b/docs/doc_examples/820f689eaaef15fc07abd1073fa880f8.asciidoc new file mode 100644 index 000000000..7882ecb00 --- /dev/null +++ b/docs/doc_examples/820f689eaaef15fc07abd1073fa880f8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + from: 5, + size: 20, + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc b/docs/doc_examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc new file mode 100644 index 000000000..3e052ea88 --- /dev/null +++ b/docs/doc_examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteTrainedModelAlias({ + model_id: "flight-delay-prediction-1574775339910", + model_alias: "flight_delay_model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc b/docs/doc_examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc new file mode 100644 index 000000000..463b34a82 --- /dev/null +++ b/docs/doc_examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "metrics-weather_sensors-dev", +}); +console.log(response); +---- diff --git a/docs/doc_examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc b/docs/doc_examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc new file mode 100644 index 000000000..e01c1c4ce --- /dev/null +++ b/docs/doc_examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.resumeFollow({ + index: "follower_index", + max_read_request_operation_count: 1024, + max_outstanding_read_requests: 16, + max_read_request_size: "1024k", + max_write_request_operation_count: 32768, + max_write_request_size: "16k", + max_outstanding_write_requests: 8, + max_write_buffer_count: 512, + max_write_buffer_size: "512k", + max_retry_delay: "10s", + read_poll_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/827b7e9308ea288f18aea00a5accc38e.asciidoc b/docs/doc_examples/827b7e9308ea288f18aea00a5accc38e.asciidoc new file mode 100644 index 000000000..dd4323962 --- /dev/null +++ b/docs/doc_examples/827b7e9308ea288f18aea00a5accc38e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getComponentTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc b/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc new file mode 100644 index 000000000..f3f816377 --- /dev/null +++ b/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/completion/amazon_bedrock_completion", + body: { + service: "amazonbedrock", + service_settings: { + access_key: "", + secret_key: "", + region: "us-east-1", + provider: "amazontitan", + model: "amazon.titan-text-premier-v1:0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc b/docs/doc_examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc new file mode 100644 index 000000000..8d6320e7a --- /dev/null +++ b/docs/doc_examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + operator: "or", + query: "the quick brown", + }, + }, + }, + rescore: { + window_size: 50, + query: { + rescore_query: { + match_phrase: { + message: { + query: "the quick brown", + slop: 2, + }, + }, + }, + query_weight: 0.7, + rescore_query_weight: 1.2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/82d6de3081de7b0664f44adf2942675a.asciidoc b/docs/doc_examples/82d6de3081de7b0664f44adf2942675a.asciidoc new file mode 100644 index 000000000..429967543 --- /dev/null +++ b/docs/doc_examples/82d6de3081de7b0664f44adf2942675a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.getBehavioralAnalytics({ + name: "my_analytics_collection", +}); +console.log(response); +---- diff --git a/docs/doc_examples/82e94b6cdf65e324575f916b3776b779.asciidoc b/docs/doc_examples/82e94b6cdf65e324575f916b3776b779.asciidoc new file mode 100644 index 000000000..a6c307c15 --- /dev/null +++ b/docs/doc_examples/82e94b6cdf65e324575f916b3776b779.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_keywords: { + match_mapping_type: "string", + runtime: {}, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83062a543163370328cf2e21a68c1bd3.asciidoc b/docs/doc_examples/83062a543163370328cf2e21a68c1bd3.asciidoc new file mode 100644 index 000000000..71645d3b6 --- /dev/null +++ b/docs/doc_examples/83062a543163370328cf2e21a68c1bd3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + delete: { + actions: { + wait_for_snapshot: { + policy: "slm-policy-name", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/831f65d700577e11112c711236110f61.asciidoc b/docs/doc_examples/831f65d700577e11112c711236110f61.asciidoc new file mode 100644 index 000000000..701142ed9 --- /dev/null +++ b/docs/doc_examples/831f65d700577e11112c711236110f61.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_email_analyzer: { + type: "pattern", + pattern: "\\W|_", + lowercase: true, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_email_analyzer", + text: "John_Smith@foo-bar.com", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8330b2ea6317769e52d0647ba434b354.asciidoc b/docs/doc_examples/8330b2ea6317769e52d0647ba434b354.asciidoc new file mode 100644 index 000000000..3692a9350 --- /dev/null +++ b/docs/doc_examples/8330b2ea6317769e52d0647ba434b354.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + routing: "key1", + docs: [ + { + _index: "test", + _id: "1", + routing: "key2", + }, + { + _index: "test", + _id: "2", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8345d2615f43a934fe1871a5120eca1d.asciidoc b/docs/doc_examples/8345d2615f43a934fe1871a5120eca1d.asciidoc new file mode 100644 index 000000000..e9fab2a15 --- /dev/null +++ b/docs/doc_examples/8345d2615f43a934fe1871a5120eca1d.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + query: { + bool: { + filter: { + term: { + currency: "EUR", + }, + }, + }, + }, + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + "total_quantity.sum": { + sum: { + field: "total_quantity", + }, + }, + "taxless_total_price.sum": { + sum: { + field: "taxless_total_price", + }, + }, + "total_quantity.max": { + max: { + field: "total_quantity", + }, + }, + "order_id.cardinality": { + cardinality: { + field: "order_id", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/834764b2fba6cbb41eaabd740be75656.asciidoc b/docs/doc_examples/834764b2fba6cbb41eaabd740be75656.asciidoc new file mode 100644 index 000000000..2b286e62b --- /dev/null +++ b/docs/doc_examples/834764b2fba6cbb41eaabd740be75656.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + tokenizer: "standard", + filter: ["keyword_repeat", "porter_stem", "remove_duplicates"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8357aa6099089940589ae3e97e7bcffa.asciidoc b/docs/doc_examples/8357aa6099089940589ae3e97e7bcffa.asciidoc new file mode 100644 index 000000000..aa9ff97c5 --- /dev/null +++ b/docs/doc_examples/8357aa6099089940589ae3e97e7bcffa.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream(); +console.log(response); +---- diff --git a/docs/doc_examples/189a921df2f5b1fe580937210ce9c1c2.asciidoc b/docs/doc_examples/83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc similarity index 61% rename from docs/doc_examples/189a921df2f5b1fe580937210ce9c1c2.asciidoc rename to docs/doc_examples/83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc index 5c38430cc..b31cf25db 100644 --- a/docs/doc_examples/189a921df2f5b1fe580937210ce9c1c2.asciidoc +++ b/docs/doc_examples/83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc @@ -4,16 +4,11 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - match_all: {} + query: { + wrapper: { + query: "eyJ0ZXJtIiA6IHsgInVzZXIuaWQiIDogImtpbWNoeSIgfX0=", }, - stats: [ - 'group1', - 'group2' - ] - } -}) -console.log(response) + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/838a4eabebba4c06100fb37dc30c7722.asciidoc b/docs/doc_examples/838a4eabebba4c06100fb37dc30c7722.asciidoc new file mode 100644 index 000000000..6ff988112 --- /dev/null +++ b/docs/doc_examples/838a4eabebba4c06100fb37dc30c7722.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + delay: "7d", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/839710129a165cf93c6e329abedf9089.asciidoc b/docs/doc_examples/839710129a165cf93c6e329abedf9089.asciidoc new file mode 100644 index 000000000..e21691205 --- /dev/null +++ b/docs/doc_examples/839710129a165cf93c6e329abedf9089.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_security/cross_cluster/api_key", + body: { + name: "my-cross-cluster-api-key", + access: { + search: [ + { + names: ["logs*"], + }, + ], + }, + metadata: { + application: "search", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/839a4b2930856790e34cc9dfeb983284.asciidoc b/docs/doc_examples/839a4b2930856790e34cc9dfeb983284.asciidoc new file mode 100644 index 000000000..4c5a97c46 --- /dev/null +++ b/docs/doc_examples/839a4b2930856790e34cc9dfeb983284.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + downsample: { + fixed_interval: "1h", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc b/docs/doc_examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc new file mode 100644 index 000000000..7cfc7762b --- /dev/null +++ b/docs/doc_examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "", + aliases: { + "my-write-alias": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83cd4eb89818b4c32f654d370eafa920.asciidoc b/docs/doc_examples/83cd4eb89818b4c32f654d370eafa920.asciidoc new file mode 100644 index 000000000..d06f00c1d --- /dev/null +++ b/docs/doc_examples/83cd4eb89818b4c32f654d370eafa920.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "keep_types", + types: [""], + }, + ], + text: "1 quick fox 2 lazy dogs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/83d712b9ffb2e703212b762eba3c521a.asciidoc b/docs/doc_examples/83d712b9ffb2e703212b762eba3c521a.asciidoc new file mode 100644 index 000000000..e386b454b --- /dev/null +++ b/docs/doc_examples/83d712b9ffb2e703212b762eba3c521a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-alias", + ignore_unavailable: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/83d8c920460a12f87b9d5bf65515c367.asciidoc b/docs/doc_examples/83d8c920460a12f87b9d5bf65515c367.asciidoc new file mode 100644 index 000000000..3c1edaaca --- /dev/null +++ b/docs/doc_examples/83d8c920460a12f87b9d5bf65515c367.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_sum: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: + "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc b/docs/doc_examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc new file mode 100644 index 000000000..ee691348e --- /dev/null +++ b/docs/doc_examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_containing: { + little: { + span_term: { + field1: "foo", + }, + }, + big: { + span_near: { + clauses: [ + { + span_term: { + field1: "bar", + }, + }, + { + span_term: { + field1: "baz", + }, + }, + ], + slop: 5, + in_order: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc b/docs/doc_examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc new file mode 100644 index 000000000..e7f4dc8b5 --- /dev/null +++ b/docs/doc_examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: ".monitoring-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/83f95657beca9bf5d8264c80c7fb463f.asciidoc b/docs/doc_examples/83f95657beca9bf5d8264c80c7fb463f.asciidoc deleted file mode 100644 index f03c37724..000000000 --- a/docs/doc_examples/83f95657beca9bf5d8264c80c7fb463f.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match_phrase: { - message: 'this is a test' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc b/docs/doc_examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc new file mode 100644 index 000000000..cda364d7e --- /dev/null +++ b/docs/doc_examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc @@ -0,0 +1,95 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "file-path-test", + settings: { + analysis: { + analyzer: { + custom_path_tree: { + tokenizer: "custom_hierarchy", + }, + custom_path_tree_reversed: { + tokenizer: "custom_hierarchy_reversed", + }, + }, + tokenizer: { + custom_hierarchy: { + type: "path_hierarchy", + delimiter: "/", + }, + custom_hierarchy_reversed: { + type: "path_hierarchy", + delimiter: "/", + reverse: "true", + }, + }, + }, + }, + mappings: { + properties: { + file_path: { + type: "text", + fields: { + tree: { + type: "text", + analyzer: "custom_path_tree", + }, + tree_reversed: { + type: "text", + analyzer: "custom_path_tree_reversed", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "file-path-test", + id: 1, + document: { + file_path: "/User/alice/photos/2017/05/16/my_photo1.jpg", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "file-path-test", + id: 2, + document: { + file_path: "/User/alice/photos/2017/05/16/my_photo2.jpg", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "file-path-test", + id: 3, + document: { + file_path: "/User/alice/photos/2017/05/16/my_photo3.jpg", + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "file-path-test", + id: 4, + document: { + file_path: "/User/alice/photos/2017/05/15/my_photo1.jpg", + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "file-path-test", + id: 5, + document: { + file_path: "/User/bob/photos/2017/05/16/my_photo1.jpg", + }, +}); +console.log(response5); +---- diff --git a/docs/doc_examples/84108653e9e03b4edacd878ec870df77.asciidoc b/docs/doc_examples/84108653e9e03b4edacd878ec870df77.asciidoc new file mode 100644 index 000000000..23119d33f --- /dev/null +++ b/docs/doc_examples/84108653e9e03b4edacd878ec870df77.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hungarian_example", + settings: { + analysis: { + filter: { + hungarian_stop: { + type: "stop", + stopwords: "_hungarian_", + }, + hungarian_keywords: { + type: "keyword_marker", + keywords: ["példa"], + }, + hungarian_stemmer: { + type: "stemmer", + language: "hungarian", + }, + }, + analyzer: { + rebuilt_hungarian: { + tokenizer: "standard", + filter: [ + "lowercase", + "hungarian_stop", + "hungarian_keywords", + "hungarian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc b/docs/doc_examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc new file mode 100644 index 000000000..7848732d8 --- /dev/null +++ b/docs/doc_examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + index: "my-index-000001", + docs: [ + { + _id: "2", + fields: ["message"], + term_statistics: true, + }, + { + _id: "1", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/841d8b766902c8e3ae85c228a31383ac.asciidoc b/docs/doc_examples/841d8b766902c8e3ae85c228a31383ac.asciidoc new file mode 100644 index 000000000..bf5b9ae06 --- /dev/null +++ b/docs/doc_examples/841d8b766902c8e3ae85c228a31383ac.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsyncStatus({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/84243213614fe64930b1d430704afb29.asciidoc b/docs/doc_examples/84243213614fe64930b1d430704afb29.asciidoc new file mode 100644 index 000000000..3e58c1614 --- /dev/null +++ b/docs/doc_examples/84243213614fe64930b1d430704afb29.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + voltage_corrected: { + type: "double", + script: { + source: + "\n emit(doc['voltage'].value * params['multiplier'])\n ", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3653567181f43a5f64c74f934aa821c2.asciidoc b/docs/doc_examples/84465de841fe5c6099a0382f786f2cb8.asciidoc similarity index 55% rename from docs/doc_examples/3653567181f43a5f64c74f934aa821c2.asciidoc rename to docs/doc_examples/84465de841fe5c6099a0382f786f2cb8.asciidoc index cba7c881c..230633e38 100644 --- a/docs/doc_examples/3653567181f43a5f64c74f934aa821c2.asciidoc +++ b/docs/doc_examples/84465de841fe5c6099a0382f786f2cb8.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.indices.updateAliases({ - body: { - actions: [ - { - remove: { - index: 'test1', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) + actions: [ + { + remove: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc b/docs/doc_examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc new file mode 100644 index 000000000..572e30e93 --- /dev/null +++ b/docs/doc_examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "cohere-embeddings", + pipeline: "cohere_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc b/docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc new file mode 100644 index 000000000..b0acfaa1d --- /dev/null +++ b/docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.indexing.slowlog.threshold.index.warn": "10s", + "index.indexing.slowlog.threshold.index.info": "5s", + "index.indexing.slowlog.threshold.index.debug": "2s", + "index.indexing.slowlog.threshold.index.trace": "500ms", + "index.indexing.slowlog.source": "1000", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc b/docs/doc_examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc new file mode 100644 index 000000000..4bd021e11 --- /dev/null +++ b/docs/doc_examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter: { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + query: + '\n file where (file.type == "file" and file.name == "cmd.exe")\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc b/docs/doc_examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc new file mode 100644 index 000000000..66286fa8f --- /dev/null +++ b/docs/doc_examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "pipelineA", + description: "inner pipeline", + processors: [ + { + set: { + field: "inner_pipeline_set", + value: "inner", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/84c61160ca815e29e9973ba1380219dd.asciidoc b/docs/doc_examples/84c61160ca815e29e9973ba1380219dd.asciidoc new file mode 100644 index 000000000..ef2135f43 --- /dev/null +++ b/docs/doc_examples/84c61160ca815e29e9973ba1380219dd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.stats({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/84c69fb07050f0e89720007a6507a221.asciidoc b/docs/doc_examples/84c69fb07050f0e89720007a6507a221.asciidoc new file mode 100644 index 000000000..902a23594 --- /dev/null +++ b/docs/doc_examples/84c69fb07050f0e89720007a6507a221.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "oTUltX4IQMOUUVeiohTt8A:464", +}); +console.log(response); +---- diff --git a/docs/doc_examples/84d6a777a51963629272b1be5698b091.asciidoc b/docs/doc_examples/84d6a777a51963629272b1be5698b091.asciidoc deleted file mode 100644 index c89a1ffb3..000000000 --- a/docs/doc_examples/84d6a777a51963629272b1be5698b091.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc b/docs/doc_examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc new file mode 100644 index 000000000..06916ca58 --- /dev/null +++ b/docs/doc_examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/84edb44c5b74426f448b2baa101092d6.asciidoc b/docs/doc_examples/84edb44c5b74426f448b2baa101092d6.asciidoc new file mode 100644 index 000000000..0de7ea182 --- /dev/null +++ b/docs/doc_examples/84edb44c5b74426f448b2baa101092d6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + query: { + term: { + expected_attendees: { + value: 12, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc b/docs/doc_examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc new file mode 100644 index 000000000..13c112515 --- /dev/null +++ b/docs/doc_examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index1", + mappings: { + properties: { + comment: { + type: "text", + analyzer: "standard", + fields: { + english: { + type: "text", + analyzer: "english", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/84f3e8524f6ff80e870c03ab71551538.asciidoc b/docs/doc_examples/84f3e8524f6ff80e870c03ab71551538.asciidoc new file mode 100644 index 000000000..4859db54a --- /dev/null +++ b/docs/doc_examples/84f3e8524f6ff80e870c03ab71551538.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + preference: "my-custom-shard-string", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc b/docs/doc_examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc new file mode 100644 index 000000000..bf886d598 --- /dev/null +++ b/docs/doc_examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + "measures.voltage": { + type: "double", + script: { + source: + "if (doc['model_number.keyword'].value.equals('HG537PU'))\n {emit(1.7 * params._source['measures']['voltage']);}\n else{emit(params._source['measures']['voltage']);}", + }, + }, + }, + query: { + match: { + model_number: "HG537PU", + }, + }, + fields: ["measures.voltage"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/851f9754dbefc099c54c5423ca4565c0.asciidoc b/docs/doc_examples/851f9754dbefc099c54c5423ca4565c0.asciidoc new file mode 100644 index 000000000..9682a809d --- /dev/null +++ b/docs/doc_examples/851f9754dbefc099c54c5423ca4565c0.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv6-subnets": { + ip_prefix: { + field: "ipv6", + prefix_length: 64, + is_ipv6: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/852b394d78b8c79ee0055b5501981a4b.asciidoc b/docs/doc_examples/852b394d78b8c79ee0055b5501981a4b.asciidoc new file mode 100644 index 000000000..9dbde19b3 --- /dev/null +++ b/docs/doc_examples/852b394d78b8c79ee0055b5501981a4b.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + product_name: { + terms: { + field: "product", + missing_bucket: true, + missing_order: "last", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85479e02af00681210e17e3d0ff51e21.asciidoc b/docs/doc_examples/85479e02af00681210e17e3d0ff51e21.asciidoc new file mode 100644 index 000000000..cbe5e40f7 --- /dev/null +++ b/docs/doc_examples/85479e02af00681210e17e3d0ff51e21.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + format: "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85519a614ae18c998986d46bbad82b76.asciidoc b/docs/doc_examples/85519a614ae18c998986d46bbad82b76.asciidoc new file mode 100644 index 000000000..bd8e5bc57 --- /dev/null +++ b/docs/doc_examples/85519a614ae18c998986d46bbad82b76.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my_template", + index_patterns: ["test-*"], + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "my_policy", + "index.lifecycle.rollover_alias": "test-alias", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc b/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc new file mode 100644 index 000000000..f6ab408a7 --- /dev/null +++ b/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/mistral_embeddings", + body: { + service: "mistral", + service_settings: { + api_key: "", + model: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/856c10ad554c26b70f1121454caff40a.asciidoc b/docs/doc_examples/856c10ad554c26b70f1121454caff40a.asciidoc new file mode 100644 index 000000000..561da085c --- /dev/null +++ b/docs/doc_examples/856c10ad554c26b70f1121454caff40a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "byte-image-index", + knn: { + field: "byte-image-vector", + query_vector: "fb09", + k: 10, + num_candidates: 100, + }, + fields: ["title"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc b/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc index d99bd96dc..b37a39ab7 100644 --- a/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc +++ b/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc @@ -1,13 +1,16 @@ -[source,js] +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] ---- const response = await client.index({ - index: 'books', + index: "books", document: { - name: 'Snow Crash', - author: 'Neal Stephenson', - release_date: '1992-06-01', + name: "Snow Crash", + author: "Neal Stephenson", + release_date: "1992-06-01", page_count: 470, - } -}) -console.log(response) + }, +}); +console.log(response); ---- diff --git a/docs/doc_examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc b/docs/doc_examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc new file mode 100644 index 000000000..ec8e3ffd4 --- /dev/null +++ b/docs/doc_examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.disk.watermark.low": "90%", + "cluster.routing.allocation.disk.watermark.low.max_headroom": "100GB", + "cluster.routing.allocation.disk.watermark.high": "95%", + "cluster.routing.allocation.disk.watermark.high.max_headroom": "20GB", + "cluster.routing.allocation.disk.watermark.flood_stage": "97%", + "cluster.routing.allocation.disk.watermark.flood_stage.max_headroom": "5GB", + "cluster.routing.allocation.disk.watermark.flood_stage.frozen": "97%", + "cluster.routing.allocation.disk.watermark.flood_stage.frozen.max_headroom": + "5GB", + }, +}); +console.log(response); + +const response1 = await client.indices.putSettings({ + index: "*", + expand_wildcards: "all", + settings: { + "index.blocks.read_only_allow_delete": null, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/858fde15fb0a0340873b123043f8c3b4.asciidoc b/docs/doc_examples/858fde15fb0a0340873b123043f8c3b4.asciidoc new file mode 100644 index 000000000..249008681 --- /dev/null +++ b/docs/doc_examples/858fde15fb0a0340873b123043f8c3b4.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_text: "histogram_1", + my_histogram: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + my_text: "histogram_2", + my_histogram: { + values: [0.1, 0.25, 0.35, 0.4, 0.45, 0.5], + counts: [8, 17, 8, 7, 6, 2], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc b/docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc new file mode 100644 index 000000000..aae698a6f --- /dev/null +++ b/docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + mappings: { + properties: { + infer_field: { + type: "semantic_text", + inference_id: "my-elser-endpoint", + }, + source_field: { + type: "text", + copy_to: "infer_field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc b/docs/doc_examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc new file mode 100644 index 000000000..f546521a4 --- /dev/null +++ b/docs/doc_examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + runtime_mappings: { + release_day_of_week: { + type: "keyword", + script: + "\n emit(doc['release_date'].value.dayOfWeekEnum.toString())\n ", + }, + }, + query: + "\n SELECT * FROM library WHERE page_count > 300 AND author = 'Frank Herbert'\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc b/docs/doc_examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc new file mode 100644 index 000000000..3670bfbae --- /dev/null +++ b/docs/doc_examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "error_logs_alert", + metadata: { + color: "red", + }, + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + actions: { + email_administrator: { + throttle_period: "15m", + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc b/docs/doc_examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc new file mode 100644 index 000000000..dda69bb3e --- /dev/null +++ b/docs/doc_examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.getFeatureUpgradeStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc b/docs/doc_examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc new file mode 100644 index 000000000..563f66b85 --- /dev/null +++ b/docs/doc_examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "finnish_example", + settings: { + analysis: { + filter: { + finnish_stop: { + type: "stop", + stopwords: "_finnish_", + }, + finnish_keywords: { + type: "keyword_marker", + keywords: ["esimerkki"], + }, + finnish_stemmer: { + type: "stemmer", + language: "finnish", + }, + }, + analyzer: { + rebuilt_finnish: { + tokenizer: "standard", + filter: [ + "lowercase", + "finnish_stop", + "finnish_keywords", + "finnish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85f2839beeb71edb66988e5c82188be0.asciidoc b/docs/doc_examples/85f2839beeb71edb66988e5c82188be0.asciidoc new file mode 100644 index 000000000..70e3a8299 --- /dev/null +++ b/docs/doc_examples/85f2839beeb71edb66988e5c82188be0.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.post({ + licenses: [ + { + uid: "893361dc-9749-4997-93cb-802e3d7fa4xx", + type: "basic", + issue_date_in_millis: 1411948800000, + expiry_date_in_millis: 1914278399999, + max_nodes: 1, + issued_to: "issuedTo", + issuer: "issuer", + signature: "xx", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/85f6667f148d16d075493fddf07e2932.asciidoc b/docs/doc_examples/85f6667f148d16d075493fddf07e2932.asciidoc new file mode 100644 index 000000000..4b8133125 --- /dev/null +++ b/docs/doc_examples/85f6667f148d16d075493fddf07e2932.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: ".ds-my-data-stream-2099.03.07-000001", + }, + dest: { + index: "new-data-stream", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc b/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc new file mode 100644 index 000000000..64b3d669e --- /dev/null +++ b/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/rerank/cohere-rerank", + body: { + service: "cohere", + service_settings: { + api_key: "", + model_id: "rerank-english-v3.0", + }, + task_settings: { + top_n: 10, + return_documents: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/861f5f61409dc87f3671293b87839ff7.asciidoc b/docs/doc_examples/861f5f61409dc87f3671293b87839ff7.asciidoc new file mode 100644 index 000000000..c9fcd3320 --- /dev/null +++ b/docs/doc_examples/861f5f61409dc87f3671293b87839ff7.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + human: "true", + pretty: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc b/docs/doc_examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc new file mode 100644 index 000000000..2d6d44e58 --- /dev/null +++ b/docs/doc_examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getJobs({ + job_id: "high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/862907653d1c18d2e80eff7f421200e2.asciidoc b/docs/doc_examples/862907653d1c18d2e80eff7f421200e2.asciidoc new file mode 100644 index 000000000..f6b386605 --- /dev/null +++ b/docs/doc_examples/862907653d1c18d2e80eff7f421200e2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "saml-example", + roles: ["example_role"], + enabled: true, + rules: { + field: { + "realm.name": "saml1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc b/docs/doc_examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc new file mode 100644 index 000000000..e3b853969 --- /dev/null +++ b/docs/doc_examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8634c9993485d622fb12d24f4f242264.asciidoc b/docs/doc_examples/8634c9993485d622fb12d24f4f242264.asciidoc new file mode 100644 index 000000000..7a25d33e8 --- /dev/null +++ b/docs/doc_examples/8634c9993485d622fb12d24f4f242264.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.modifyDataStream({ + actions: [ + { + remove_backing_index: { + data_stream: "my-data-stream", + index: ".ds-my-data-stream-2023.07.26-000001", + }, + }, + { + add_backing_index: { + data_stream: "my-data-stream", + index: ".ds-my-data-stream-2023.07.26-000001-downsample", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/867f7d43a78066731ead2e223960fc07.asciidoc b/docs/doc_examples/867f7d43a78066731ead2e223960fc07.asciidoc new file mode 100644 index 000000000..331002c0c --- /dev/null +++ b/docs/doc_examples/867f7d43a78066731ead2e223960fc07.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "action.destructive_requires_name": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc b/docs/doc_examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc new file mode 100644 index 000000000..8d712c7ba --- /dev/null +++ b/docs/doc_examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.getPolicy({ + name: "my-policy,other-policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/86926bcebf213ac182d4373027554858.asciidoc b/docs/doc_examples/86926bcebf213ac182d4373027554858.asciidoc new file mode 100644 index 000000000..8baadb02c --- /dev/null +++ b/docs/doc_examples/86926bcebf213ac182d4373027554858.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_index", + mappings: { + properties: { + my_counter: { + type: "unsigned_long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8696ba08ca6cc4992110c331732e5f47.asciidoc b/docs/doc_examples/8696ba08ca6cc4992110c331732e5f47.asciidoc new file mode 100644 index 000000000..44ced54be --- /dev/null +++ b/docs/doc_examples/8696ba08ca6cc4992110c331732e5f47.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + grade_boxplot: { + boxplot: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8699d35269a47ba867fa8cc766287413.asciidoc b/docs/doc_examples/8699d35269a47ba867fa8cc766287413.asciidoc new file mode 100644 index 000000000..d3af85d46 --- /dev/null +++ b/docs/doc_examples/8699d35269a47ba867fa8cc766287413.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.postStartBasic(); +console.log(response); +---- diff --git a/docs/doc_examples/86c5594c4ec551391096c1abcd652b50.asciidoc b/docs/doc_examples/86c5594c4ec551391096c1abcd652b50.asciidoc new file mode 100644 index 000000000..ec1a73d6b --- /dev/null +++ b/docs/doc_examples/86c5594c4ec551391096c1abcd652b50.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + match_all: {}, + }, + script_fields: { + count10: { + script: { + source: "Long.divideUnsigned(doc['my_counter'].value, 10)", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc b/docs/doc_examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc new file mode 100644 index 000000000..102d6de69 --- /dev/null +++ b/docs/doc_examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_1", +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "index_2", +}); +console.log(response1); + +const response2 = await client.indices.create({ + index: "index_3", + settings: { + "index.priority": 10, + }, +}); +console.log(response2); + +const response3 = await client.indices.create({ + index: "index_4", + settings: { + "index.priority": 5, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/871154d08efd7251cf3272e758f06acf.asciidoc b/docs/doc_examples/871154d08efd7251cf3272e758f06acf.asciidoc new file mode 100644 index 000000000..9d1b127fc --- /dev/null +++ b/docs/doc_examples/871154d08efd7251cf3272e758f06acf.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "common_grams_example", + settings: { + analysis: { + analyzer: { + index_grams: { + tokenizer: "whitespace", + filter: ["common_grams"], + }, + }, + filter: { + common_grams: { + type: "common_grams", + common_words: ["a", "is", "the"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8731188553e14134b0a533010318f91a.asciidoc b/docs/doc_examples/8731188553e14134b0a533010318f91a.asciidoc new file mode 100644 index 000000000..476b8e9b5 --- /dev/null +++ b/docs/doc_examples/8731188553e14134b0a533010318f91a.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + terms: { + force: ["British Transport Police"], + }, + }, + aggregations: { + significant_crime_types: { + significant_terms: { + field: "crime_type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc b/docs/doc_examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc new file mode 100644 index 000000000..88e4bf500 --- /dev/null +++ b/docs/doc_examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc b/docs/doc_examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc new file mode 100644 index 000000000..966f0118a --- /dev/null +++ b/docs/doc_examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc @@ -0,0 +1,98 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + city: "Amsterdam", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + city: "Amsterdam", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + city: "Antwerp", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + city: "Paris", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + city: "Paris", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggs: { + centroid: { + geo_centroid: { + field: "location", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc b/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc index 6722b4fb3..73e8ab273 100644 --- a/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc +++ b/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc @@ -3,47 +3,46 @@ [source, js] ---- -const response0 = await client.index({ - index: 'drivers', - id: '1', - body: { +const response = await client.index({ + index: "drivers", + id: 1, + document: { driver: { - last_name: 'McQueen', + last_name: "McQueen", vehicle: [ { - make: 'Powell Motors', - model: 'Canyonero' + make: "Powell Motors", + model: "Canyonero", }, { - make: 'Miller-Meteor', - model: 'Ecto-1' - } - ] - } - } -}) -console.log(response0) + make: "Miller-Meteor", + model: "Ecto-1", + }, + ], + }, + }, +}); +console.log(response); const response1 = await client.index({ - index: 'drivers', - id: '2', - refresh: true, - body: { + index: "drivers", + id: 2, + refresh: "true", + document: { driver: { - last_name: 'Hudson', + last_name: "Hudson", vehicle: [ { - make: 'Mifune', - model: 'Mach Five' + make: "Mifune", + model: "Mach Five", }, { - make: 'Miller-Meteor', - model: 'Ecto-1' - } - ] - } - } -}) -console.log(response1) + make: "Miller-Meteor", + model: "Ecto-1", + }, + ], + }, + }, +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc b/docs/doc_examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc new file mode 100644 index 000000000..7a998c4c7 --- /dev/null +++ b/docs/doc_examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + "user.id": "kimchy", + }, +}); +console.log(response); + +const response1 = await client.count({ + index: "my-index-000001", + q: "user:kimchy", +}); +console.log(response1); + +const response2 = await client.count({ + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc b/docs/doc_examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc new file mode 100644 index 000000000..b6f07251e --- /dev/null +++ b/docs/doc_examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "geo.country_name", + inner_hits: { + name: "by_location", + collapse: { + field: "user.id", + }, + size: 3, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87457bb3467484bec3e9df4e25942ba6.asciidoc b/docs/doc_examples/87457bb3467484bec3e9df4e25942ba6.asciidoc new file mode 100644 index 000000000..238cd5ff2 --- /dev/null +++ b/docs/doc_examples/87457bb3467484bec3e9df4e25942ba6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: "FROM mv | EVAL b=MV_MIN(b) | EVAL b + 2, a + b | LIMIT 4", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc b/docs/doc_examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc new file mode 100644 index 000000000..ddbbf086a --- /dev/null +++ b/docs/doc_examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.getBehavioralAnalytics({ + name: "my*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87733deeea4b441b595d19a0f97346f0.asciidoc b/docs/doc_examples/87733deeea4b441b595d19a0f97346f0.asciidoc new file mode 100644 index 000000000..e2c5d71b0 --- /dev/null +++ b/docs/doc_examples/87733deeea4b441b595d19a0f97346f0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport({ + feature: "shards_availability", +}); +console.log(response); +---- diff --git a/docs/doc_examples/877ea90c663b5df9efe95717646a666f.asciidoc b/docs/doc_examples/877ea90c663b5df9efe95717646a666f.asciidoc new file mode 100644 index 000000000..7579e5be5 --- /dev/null +++ b/docs/doc_examples/877ea90c663b5df9efe95717646a666f.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + group: { + type: "keyword", + }, + user: { + type: "nested", + properties: { + first: { + type: "keyword", + }, + last: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + group: "fans", + user: [ + { + first: "John", + last: "Smith", + }, + { + first: "Alice", + last: "White", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: ["*"], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/87846c3ddacab1da4af626ae8099e4be.asciidoc b/docs/doc_examples/87846c3ddacab1da4af626ae8099e4be.asciidoc new file mode 100644 index 000000000..6b632a678 --- /dev/null +++ b/docs/doc_examples/87846c3ddacab1da4af626ae8099e4be.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_user", + roles: ["user"], + rules: { + field: { + dn: "cn=John Doe,ou=example,o=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc b/docs/doc_examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc new file mode 100644 index 000000000..c199db648 --- /dev/null +++ b/docs/doc_examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodeattrs({ + v: "true", + h: "name,pid,attr,value", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc b/docs/doc_examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc new file mode 100644 index 000000000..5d65f861a --- /dev/null +++ b/docs/doc_examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRoles({ + name: "my_admin_role,my_test_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87c42ef733a50954e4d757fc0a08decc.asciidoc b/docs/doc_examples/87c42ef733a50954e4d757fc0a08decc.asciidoc new file mode 100644 index 000000000..a5245804c --- /dev/null +++ b/docs/doc_examples/87c42ef733a50954e4d757fc0a08decc.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key-1", + metadata: { + application: "my-application", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87d970b4944b6d742c484d7184996c8a.asciidoc b/docs/doc_examples/87d970b4944b6d742c484d7184996c8a.asciidoc new file mode 100644 index 000000000..f258702ac --- /dev/null +++ b/docs/doc_examples/87d970b4944b6d742c484d7184996c8a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + query_string: "Where is the best place for mountain climbing?", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc b/docs/doc_examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc new file mode 100644 index 000000000..fd8b35e40 --- /dev/null +++ b/docs/doc_examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + size: 0, + aggs: { + review_variability: { + median_absolute_deviation: { + field: "rating", + missing: 5, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88195d87a350e7fff200131f410c3e88.asciidoc b/docs/doc_examples/88195d87a350e7fff200131f410c3e88.asciidoc new file mode 100644 index 000000000..7078a7afc --- /dev/null +++ b/docs/doc_examples/88195d87a350e7fff200131f410c3e88.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + keyed: true, + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc b/docs/doc_examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc new file mode 100644 index 000000000..5933f7fad --- /dev/null +++ b/docs/doc_examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + }, + dest: { + index: "sample_ecommerce_orders_by_customer", + }, + pivot: { + group_by: { + user: { + terms: { + field: "user", + }, + }, + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + order_count: { + value_count: { + field: "order_id", + }, + }, + total_order_amt: { + sum: { + field: "taxful_total_price", + }, + }, + avg_amt_per_order: { + avg: { + field: "taxful_total_price", + }, + }, + avg_unique_products_per_order: { + avg: { + field: "total_unique_products", + }, + }, + total_unique_products: { + cardinality: { + field: "products.product_id", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88554b79dba8fd79991855a692b69ff9.asciidoc b/docs/doc_examples/88554b79dba8fd79991855a692b69ff9.asciidoc new file mode 100644 index 000000000..0904a2fe0 --- /dev/null +++ b/docs/doc_examples/88554b79dba8fd79991855a692b69ff9.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.graph.explore({ + index: "clicklogs", + query: { + match: { + "query.raw": "midi", + }, + }, + controls: { + use_significance: false, + sample_size: 2000, + timeout: 2000, + sample_diversity: { + field: "category.raw", + max_docs_per_value: 500, + }, + }, + vertices: [ + { + field: "product", + size: 5, + min_doc_count: 10, + shard_min_doc_count: 3, + }, + ], + connections: { + query: { + bool: { + filter: [ + { + range: { + query_time: { + gte: "2015-10-01 00:00:00", + }, + }, + }, + ], + }, + }, + vertices: [ + { + field: "query.raw", + size: 5, + min_doc_count: 10, + shard_min_doc_count: 3, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc b/docs/doc_examples/8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc deleted file mode 100644 index 4ab63e7e5..000000000 --- a/docs/doc_examples/8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter', - version_type: 'external' - }, - script: { - source: "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", - lang: 'painless' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc b/docs/doc_examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc new file mode 100644 index 000000000..667429e4b --- /dev/null +++ b/docs/doc_examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc b/docs/doc_examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc new file mode 100644 index 000000000..fe244468b --- /dev/null +++ b/docs/doc_examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryUser(); +console.log(response); +---- diff --git a/docs/doc_examples/88b19973b970adf9b73fca82017d4951.asciidoc b/docs/doc_examples/88b19973b970adf9b73fca82017d4951.asciidoc new file mode 100644 index 000000000..6fb12eca1 --- /dev/null +++ b/docs/doc_examples/88b19973b970adf9b73fca82017d4951.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-*", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc b/docs/doc_examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc new file mode 100644 index 000000000..c37e3b663 --- /dev/null +++ b/docs/doc_examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.getStatus({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + keep_alive: "5d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/88cf60d3310a56d8ae12704abc05b565.asciidoc b/docs/doc_examples/88cf60d3310a56d8ae12704abc05b565.asciidoc new file mode 100644 index 000000000..0c4c71725 --- /dev/null +++ b/docs/doc_examples/88cf60d3310a56d8ae12704abc05b565.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.getTrialStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/88ec7fa6768a7e13cd2158667a69e97f.asciidoc b/docs/doc_examples/88ec7fa6768a7e13cd2158667a69e97f.asciidoc new file mode 100644 index 000000000..517644e96 --- /dev/null +++ b/docs/doc_examples/88ec7fa6768a7e13cd2158667a69e97f.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + runtime_mappings: { + day_of_week: { + type: "keyword", + script: + "\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))\n ", + }, + }, + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + dow: { + terms: { + field: "day_of_week", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc b/docs/doc_examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc new file mode 100644 index 000000000..800021f23 --- /dev/null +++ b/docs/doc_examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + names: { + type: "text", + position_increment_gap: 0, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + names: ["John Abraham", "Lincoln Smith"], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + names: "Abraham Lincoln", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc b/docs/doc_examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc new file mode 100644 index 000000000..3a2727d45 --- /dev/null +++ b/docs/doc_examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + query: { + match: { + message: "bonsai tree", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/899eef71a67a1b2aa11a2166ec7f48f1.asciidoc b/docs/doc_examples/899eef71a67a1b2aa11a2166ec7f48f1.asciidoc deleted file mode 100644 index 5c91f566b..000000000 --- a/docs/doc_examples/899eef71a67a1b2aa11a2166ec7f48f1.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - price: { - unmapped_type: 'long' - } - } - ], - query: { - term: { - product: 'chocolate' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc b/docs/doc_examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc new file mode 100644 index 000000000..13d14c876 --- /dev/null +++ b/docs/doc_examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "noble prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + direct_generator: [ + { + field: "title.trigram", + suggest_mode: "always", + min_word_length: 1, + }, + ], + collate: { + query: { + source: { + match: { + "{{field_name}}": "{{suggestion}}", + }, + }, + }, + params: { + field_name: "title", + }, + prune: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc b/docs/doc_examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc new file mode 100644 index 000000000..88aba54b9 --- /dev/null +++ b/docs/doc_examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "should": [\n {{#text}}\n {\n "multi_match": {\n "query": "{{query_string}}",\n "fields": [{{#text_fields}}"{{name}}^{{boost}}",{{/text_fields}}],\n "boost": "{{text_query_boost}}"\n }\n },\n {{/text}}\n {{#elser}}\n {{#elser_fields}}\n {\n "sparse_vector": {\n "field": "ml.inference.{{.}}_expanded.predicted_value",\n "inference_id": "",\n "query": "{{query_string}}"\n }\n },\n {{/elser_fields}}\n { "bool": { "must": [] } },\n {{/elser}}\n {{^text}}\n {{^elser}}\n {\n "query_string": {\n "query": "{{query_string}}",\n "default_field": "{{default_field}}",\n "default_operator": "{{default_operator}}",\n "boost": "{{text_query_boost}}"\n }\n },\n {{/elser}}\n {{/text}}\n { "bool": { "must": [] } }\n ],\n "minimum_should_match": 1\n }\n },\n "min_score": "{{min_score}}",\n "explain": "{{explain}}",\n "from": "{{from}}",\n "size": "{{size}}"\n }\n ', + params: { + text: false, + elser: false, + elser_fields: [ + { + name: "title", + boost: 1, + }, + { + name: "description", + boost: 1, + }, + ], + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 5, + }, + { + name: "state", + boost: 1, + }, + ], + query_string: "*", + text_query_boost: 4, + default_field: "*", + default_operator: "OR", + explain: false, + from: 0, + size: 10, + min_score: 0, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc b/docs/doc_examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc new file mode 100644 index 000000000..d0d409240 --- /dev/null +++ b/docs/doc_examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_repository", + repository: { + type: "fs", + settings: { + location: "my_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc b/docs/doc_examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc new file mode 100644 index 000000000..fe5711134 --- /dev/null +++ b/docs/doc_examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + index: "my-index-000001", + query: { + exists: { + field: "my-field", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc b/docs/doc_examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc new file mode 100644 index 000000000..700b24426 --- /dev/null +++ b/docs/doc_examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: "my_source_index", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/89dee10a24ea2727af5b00039a4271bd.asciidoc b/docs/doc_examples/89dee10a24ea2727af5b00039a4271bd.asciidoc new file mode 100644 index 000000000..4b84700b5 --- /dev/null +++ b/docs/doc_examples/89dee10a24ea2727af5b00039a4271bd.asciidoc @@ -0,0 +1,161 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "tour", + mappings: { + properties: { + city: { + type: "keyword", + time_series_dimension: true, + }, + category: { + type: "keyword", + }, + route: { + type: "long", + }, + name: { + type: "keyword", + }, + location: { + type: "geo_point", + }, + "@timestamp": { + type: "date", + }, + }, + }, + settings: { + index: { + mode: "time_series", + routing_path: ["city"], + time_series: { + start_time: "2023-01-01T00:00:00Z", + end_time: "2024-01-01T00:00:00Z", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "tour", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": "2023-01-02T09:00:00Z", + route: 0, + location: "POINT(4.889187 52.373184)", + city: "Amsterdam", + category: "Attraction", + name: "Royal Palace Amsterdam", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-02T10:00:00Z", + route: 1, + location: "POINT(4.885057 52.370159)", + city: "Amsterdam", + category: "Attraction", + name: "The Amsterdam Dungeon", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-02T13:00:00Z", + route: 2, + location: "POINT(4.901618 52.369219)", + city: "Amsterdam", + category: "Museum", + name: "Museum Het Rembrandthuis", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-02T16:00:00Z", + route: 3, + location: "POINT(4.912350 52.374081)", + city: "Amsterdam", + category: "Museum", + name: "NEMO Science Museum", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-03T12:00:00Z", + route: 4, + location: "POINT(4.914722 52.371667)", + city: "Amsterdam", + category: "Museum", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-04T09:00:00Z", + route: 5, + location: "POINT(4.401384 51.220292)", + city: "Antwerp", + category: "Attraction", + name: "Cathedral of Our Lady", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-04T12:00:00Z", + route: 6, + location: "POINT(4.405819 51.221758)", + city: "Antwerp", + category: "Museum", + name: "Snijders&Rockoxhuis", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-04T15:00:00Z", + route: 7, + location: "POINT(4.405200 51.222900)", + city: "Antwerp", + category: "Museum", + name: "Letterenhuis", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-05T10:00:00Z", + route: 8, + location: "POINT(2.336389 48.861111)", + city: "Paris", + category: "Museum", + name: "Musée du Louvre", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-05T14:00:00Z", + route: 9, + location: "POINT(2.327000 48.860000)", + city: "Paris", + category: "Museum", + name: "Musée dOrsay", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc b/docs/doc_examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc new file mode 100644 index 000000000..03db68d26 --- /dev/null +++ b/docs/doc_examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["snowball"], + text: "detailed output", + explain: true, + attributes: ["keyword"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a12cd824404d74f098d854716a26899.asciidoc b/docs/doc_examples/8a12cd824404d74f098d854716a26899.asciidoc new file mode 100644 index 000000000..0de5dd4f6 --- /dev/null +++ b/docs/doc_examples/8a12cd824404d74f098d854716a26899.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteDatafeed({ + datafeed_id: "datafeed-total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc b/docs/doc_examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc new file mode 100644 index 000000000..d88b6d247 --- /dev/null +++ b/docs/doc_examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.get({ + task_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc b/docs/doc_examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc new file mode 100644 index 000000000..75db49a3d --- /dev/null +++ b/docs/doc_examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a355eb25d2a01ba62dc1a22dd46f46f.asciidoc b/docs/doc_examples/8a355eb25d2a01ba62dc1a22dd46f46f.asciidoc deleted file mode 100644 index 9dcb0ef46..000000000 --- a/docs/doc_examples/8a355eb25d2a01ba62dc1a22dd46f46f.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - sales_over_time: { - date_histogram: { - field: 'date', - calendar_interval: '1M', - format: 'yyyy-MM-dd' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc b/docs/doc_examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc new file mode 100644 index 000000000..120f4e523 --- /dev/null +++ b/docs/doc_examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid with maxspan=1h\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc b/docs/doc_examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc new file mode 100644 index 000000000..b481e842e --- /dev/null +++ b/docs/doc_examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "cross-cluster-kibana", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["logstash-reader", "kibana-access"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc b/docs/doc_examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc new file mode 100644 index 000000000..c6532bbde --- /dev/null +++ b/docs/doc_examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.explainDataFrameAnalytics({ + source: { + index: "houses_sold_last_10_yrs", + }, + analysis: { + regression: { + dependent_variable: "price", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc b/docs/doc_examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc new file mode 100644 index 000000000..2146d1c0b --- /dev/null +++ b/docs/doc_examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bug_reports", + mappings: { + properties: { + title: { + type: "text", + }, + labels: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "bug_reports", + id: 1, + document: { + title: "Results are not sorted correctly.", + labels: { + priority: "urgent", + release: ["v1.2.5", "v1.3.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8ab11a25e017124a70484781ca11fb52.asciidoc b/docs/doc_examples/8ab11a25e017124a70484781ca11fb52.asciidoc new file mode 100644 index 000000000..abbd63ece --- /dev/null +++ b/docs/doc_examples/8ab11a25e017124a70484781ca11fb52.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "-hits.events", + query: '\n any where process.name == "regsvr32.exe" \n ', + size: 200, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8acc1d67b152e7027e0f0e1a8b4b2431.asciidoc b/docs/doc_examples/8acc1d67b152e7027e0f0e1a8b4b2431.asciidoc deleted file mode 100644 index 5f830bd88..000000000 --- a/docs/doc_examples/8acc1d67b152e7027e0f0e1a8b4b2431.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter', - routing: 'kimchy', - body: { - query: { - bool: { - must: { - query_string: { - query: 'some query string here' - } - }, - filter: { - term: { - user: 'kimchy' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc b/docs/doc_examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc new file mode 100644 index 000000000..f93c9a6ad --- /dev/null +++ b/docs/doc_examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + region: "US", + manager: { + age: 30, + name: { + first: "John", + last: "Smith", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b301122cbf42be6eafeda714a36559e.asciidoc b/docs/doc_examples/8b301122cbf42be6eafeda714a36559e.asciidoc new file mode 100644 index 000000000..fe63d2690 --- /dev/null +++ b/docs/doc_examples/8b301122cbf42be6eafeda714a36559e.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.logstash.putPipeline({ + id: "my_pipeline", + pipeline: { + description: "Sample pipeline for illustration purposes", + last_modified: "2021-01-02T02:50:51.250Z", + pipeline_metadata: { + type: "logstash_pipeline", + version: "1", + }, + username: "elastic", + pipeline: "input {}\n filter { grok {} }\n output {}", + pipeline_settings: { + "pipeline.workers": 1, + "pipeline.batch.size": 125, + "pipeline.batch.delay": 50, + "queue.type": "memory", + "queue.max_bytes": "1gb", + "queue.checkpoint.writes": 1024, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc b/docs/doc_examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc new file mode 100644 index 000000000..9d2e6845a --- /dev/null +++ b/docs/doc_examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-prod-tag-script", + script: { + lang: "painless", + source: + "\n Collection tags = ctx.tags;\n if(tags != null){\n for (String tag : tags) {\n if (tag.toLowerCase().contains('prod')) {\n return false;\n }\n }\n }\n return true;\n ", + }, +}); +console.log(response); + +const response1 = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents that don't contain 'prod' tag", + if: { + id: "my-prod-tag-script", + }, + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc b/docs/doc_examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc new file mode 100644 index 000000000..6a76e6bcd --- /dev/null +++ b/docs/doc_examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getRoleMapping({ + name: "mapping1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc b/docs/doc_examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc new file mode 100644 index 000000000..7f67c6988 --- /dev/null +++ b/docs/doc_examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + products_without_a_price: { + missing: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc b/docs/doc_examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc new file mode 100644 index 000000000..fad8ef639 --- /dev/null +++ b/docs/doc_examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + categories: { + categorize_text: { + field: "message", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b7956a2b88fd798a895d3466d671b58.asciidoc b/docs/doc_examples/8b7956a2b88fd798a895d3466d671b58.asciidoc new file mode 100644 index 000000000..6216793f1 --- /dev/null +++ b/docs/doc_examples/8b7956a2b88fd798a895d3466d671b58.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "http.tracer.include": "*", + "http.tracer.exclude": "", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc b/docs/doc_examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc new file mode 100644 index 000000000..c92fe8266 --- /dev/null +++ b/docs/doc_examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job/_error", + body: { + error: "some-error", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc b/docs/doc_examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc new file mode 100644 index 000000000..84bfecb7b --- /dev/null +++ b/docs/doc_examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + versions: { + type: "version", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + versions: ["8.0.0-beta1", "8.5.0", "0.90.12", "2.6.1", "1.3.4", "1.3.4"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8c2060b0272556457f4871c5d7a589fd.asciidoc b/docs/doc_examples/8c2060b0272556457f4871c5d7a589fd.asciidoc new file mode 100644 index 000000000..b2e2c8b0f --- /dev/null +++ b/docs/doc_examples/8c2060b0272556457f4871c5d7a589fd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "logstash-reader", + indices: [ + { + names: ["logstash-*"], + privileges: ["read", "view_index_metadata"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc b/docs/doc_examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc new file mode 100644 index 000000000..226992d54 --- /dev/null +++ b/docs/doc_examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteFilter({ + filter_id: "safe_domains", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c619666488927dac6ecb7dcebca44c2.asciidoc b/docs/doc_examples/8c619666488927dac6ecb7dcebca44c2.asciidoc new file mode 100644 index 000000000..d637e9ff4 --- /dev/null +++ b/docs/doc_examples/8c619666488927dac6ecb7dcebca44c2.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cohere-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "byte", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c693e057f6e85fbf2b56ca442719362.asciidoc b/docs/doc_examples/8c693e057f6e85fbf2b56ca442719362.asciidoc new file mode 100644 index 000000000..da0b3217f --- /dev/null +++ b/docs/doc_examples/8c693e057f6e85fbf2b56ca442719362.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stats-index", + size: 0, + aggs: { + metric_min: { + min: { + field: "agg_metric", + }, + }, + metric_max: { + max: { + field: "agg_metric", + }, + }, + metric_value_count: { + value_count: { + field: "agg_metric", + }, + }, + metric_sum: { + sum: { + field: "agg_metric", + }, + }, + metric_avg: { + avg: { + field: "agg_metric", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc b/docs/doc_examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc new file mode 100644 index 000000000..756573630 --- /dev/null +++ b/docs/doc_examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "model-flight-delays-pre", + source: { + index: ["kibana_sample_data_flights"], + query: { + range: { + DistanceKilometers: { + gt: 0, + }, + }, + }, + _source: { + includes: [], + excludes: ["FlightDelay", "FlightDelayType"], + }, + }, + dest: { + index: "df-flight-delays", + results_field: "ml-results", + }, + analysis: { + regression: { + dependent_variable: "FlightDelayMin", + training_percent: 90, + }, + }, + analyzed_fields: { + includes: [], + excludes: ["FlightNum"], + }, + model_memory_limit: "100mb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc b/docs/doc_examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc new file mode 100644 index 000000000..05fa659e7 --- /dev/null +++ b/docs/doc_examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Document with ID 1", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + text: "Document with ID 2", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + terms: { + _id: ["1", "2"], + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/8c9081dc738d1290fd76071b283fcaec.asciidoc b/docs/doc_examples/8c9081dc738d1290fd76071b283fcaec.asciidoc new file mode 100644 index 000000000..19241e817 --- /dev/null +++ b/docs/doc_examples/8c9081dc738d1290fd76071b283fcaec.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 2, + routing: "user1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc b/docs/doc_examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc new file mode 100644 index 000000000..bbd7c9edc --- /dev/null +++ b/docs/doc_examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + fields: ["voltage_corrected", "node"], + size: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc b/docs/doc_examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc new file mode 100644 index 000000000..117a672d0 --- /dev/null +++ b/docs/doc_examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer"], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc b/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc index bc1eb2c9b..0b2932f62 100644 --- a/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc +++ b/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc @@ -4,77 +4,76 @@ [source, js] ---- const response = await client.bulk({ - body: [ + operations: [ { update: { - _id: '1', - _index: 'index1', - retry_on_conflict: 3 - } + _id: "1", + _index: "index1", + retry_on_conflict: 3, + }, }, { doc: { - field: 'value' - } + field: "value", + }, }, { update: { - _id: '0', - _index: 'index1', - retry_on_conflict: 3 - } + _id: "0", + _index: "index1", + retry_on_conflict: 3, + }, }, { script: { - source: 'ctx._source.counter += params.param1', - lang: 'painless', + source: "ctx._source.counter += params.param1", + lang: "painless", params: { - param1: 1 - } + param1: 1, + }, }, upsert: { - counter: 1 - } + counter: 1, + }, }, { update: { - _id: '2', - _index: 'index1', - retry_on_conflict: 3 - } + _id: "2", + _index: "index1", + retry_on_conflict: 3, + }, }, { doc: { - field: 'value' + field: "value", }, - doc_as_upsert: true + doc_as_upsert: true, }, { update: { - _id: '3', - _index: 'index1', - _source: true - } + _id: "3", + _index: "index1", + _source: true, + }, }, { doc: { - field: 'value' - } + field: "value", + }, }, { update: { - _id: '4', - _index: 'index1' - } + _id: "4", + _index: "index1", + }, }, { doc: { - field: 'value' + field: "value", }, - _source: true - } - ] -}) -console.log(response) + _source: true, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc b/docs/doc_examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc new file mode 100644 index 000000000..a2825d9b1 --- /dev/null +++ b/docs/doc_examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "elision_case_insensitive_example", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["elision_case_insensitive"], + }, + }, + filter: { + elision_case_insensitive: { + type: "elision", + articles: ["l", "m", "t", "qu", "n", "s", "j"], + articles_case: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc b/docs/doc_examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc new file mode 100644 index 000000000..929bb2357 --- /dev/null +++ b/docs/doc_examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-text-embeddings-pipeline", + description: "Text embedding pipeline", + processors: [ + { + inference: { + model_id: ".elser_model_2", + input_output: [ + { + input_field: "my_text_field", + output_field: "my_tokens", + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d421c5bec38eecce4679b219cacc9db.asciidoc b/docs/doc_examples/8d421c5bec38eecce4679b219cacc9db.asciidoc new file mode 100644 index 000000000..ddc9bab51 --- /dev/null +++ b/docs/doc_examples/8d421c5bec38eecce4679b219cacc9db.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + runtime_mappings: { + "load_time.seconds": { + type: "long", + script: { + source: "emit(doc['load_time'].value / params.timeUnit)", + params: { + timeUnit: 1000, + }, + }, + }, + }, + aggs: { + load_time_ranks: { + percentile_ranks: { + values: [500, 600], + field: "load_time.seconds", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc b/docs/doc_examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc new file mode 100644 index 000000000..709910bb1 --- /dev/null +++ b/docs/doc_examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc b/docs/doc_examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc new file mode 100644 index 000000000..e4289442f --- /dev/null +++ b/docs/doc_examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlLogout({ + token: "46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3", + refresh_token: "mJdXLtmvTUSpoLwMvdBt_w", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc b/docs/doc_examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc new file mode 100644 index 000000000..4e6209177 --- /dev/null +++ b/docs/doc_examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + q: "kimchy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d7193902a353872740a3324c60c5001.asciidoc b/docs/doc_examples/8d7193902a353872740a3324c60c5001.asciidoc new file mode 100644 index 000000000..fe8e691c7 --- /dev/null +++ b/docs/doc_examples/8d7193902a353872740a3324c60c5001.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "events", + settings: { + index: { + "sort.field": "timestamp", + "sort.order": "desc", + }, + }, + mappings: { + properties: { + timestamp: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d9a63d7c31f08bd27d92ece3de1649c.asciidoc b/docs/doc_examples/8d9a63d7c31f08bd27d92ece3de1649c.asciidoc deleted file mode 100644 index 1c26cd619..000000000 --- a/docs/doc_examples/8d9a63d7c31f08bd27d92ece3de1649c.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'my_index', - id: '1', - body: { - text: 'Document with ID 1' - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '2', - refresh: 'true', - body: { - text: 'Document with ID 2' - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'my_index', - body: { - query: { - terms: { - _id: [ - '1', - '2' - ] - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/8d9b04f2a97f4229dec9e620126de049.asciidoc b/docs/doc_examples/8d9b04f2a97f4229dec9e620126de049.asciidoc new file mode 100644 index 000000000..22ae3fa2d --- /dev/null +++ b/docs/doc_examples/8d9b04f2a97f4229dec9e620126de049.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.com.amazonaws.request": "DEBUG", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8db799543eb084ec71547980863d60b9.asciidoc b/docs/doc_examples/8db799543eb084ec71547980863d60b9.asciidoc new file mode 100644 index 000000000..fb3df27c8 --- /dev/null +++ b/docs/doc_examples/8db799543eb084ec71547980863d60b9.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + sum_monthly_sales: { + sum_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8de3206f80e18185a5ad6481f4c2ee07.asciidoc b/docs/doc_examples/8de3206f80e18185a5ad6481f4c2ee07.asciidoc deleted file mode 100644 index 8dd0509a3..000000000 --- a/docs/doc_examples/8de3206f80e18185a5ad6481f4c2ee07.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - size: '0', - body: { - aggs: { - by_day: { - date_histogram: { - field: 'date', - calendar_interval: 'day', - time_zone: '-01:00' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc b/docs/doc_examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc new file mode 100644 index 000000000..6ff015fdb --- /dev/null +++ b/docs/doc_examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.segments({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc b/docs/doc_examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc new file mode 100644 index 000000000..5c43e7f8f --- /dev/null +++ b/docs/doc_examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + message_stats: { + string_stats: { + field: "message.keyword", + missing: "[empty message]", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e0f43829df9af20547ea6896f4c0124.asciidoc b/docs/doc_examples/8e0f43829df9af20547ea6896f4c0124.asciidoc new file mode 100644 index 000000000..738638e26 --- /dev/null +++ b/docs/doc_examples/8e0f43829df9af20547ea6896f4c0124.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "rollover_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_size: "50gb", + }, + }, + }, + delete: { + min_age: "1d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e208098a0156c4c92afe0a06960b230.asciidoc b/docs/doc_examples/8e208098a0156c4c92afe0a06960b230.asciidoc new file mode 100644 index 000000000..cf5a5c2b6 --- /dev/null +++ b/docs/doc_examples/8e208098a0156c4c92afe0a06960b230.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlAuthenticate({ + content: + "PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....", + ids: ["4fee3b046395c4e751011e97f8900b5273d56685"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc b/docs/doc_examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc new file mode 100644 index 000000000..fab31728c --- /dev/null +++ b/docs/doc_examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.deprecation": "OFF", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc b/docs/doc_examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc new file mode 100644 index 000000000..9a6f60e85 --- /dev/null +++ b/docs/doc_examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "indexing,search", + level: "shards", + human: "true", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e42a17edace2bc6e42c6a1532779937.asciidoc b/docs/doc_examples/8e42a17edace2bc6e42c6a1532779937.asciidoc new file mode 100644 index 000000000..f65a76375 --- /dev/null +++ b/docs/doc_examples/8e42a17edace2bc6e42c6a1532779937.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + max_price: { + max: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc b/docs/doc_examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc new file mode 100644 index 000000000..f94192bd1 --- /dev/null +++ b/docs/doc_examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followStats({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc b/docs/doc_examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc new file mode 100644 index 000000000..6f990c6e7 --- /dev/null +++ b/docs/doc_examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc @@ -0,0 +1,101 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": "2020-06-21T15:00:01-05:00", + message: + '211.11.9.0 - - [2020-06-21T15:00:01-05:00] "GET /english/index.html HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-06-21T15:00:01-05:00", + message: + '211.11.9.0 - - [2020-06-21T15:00:01-05:00] "GET /english/index.html HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [2020-04-30T14:30:17-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [2020-04-30T14:30:53-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [2020-04-30T14:31:12-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:19-05:00] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [2020-04-30T14:31:27-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:29-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:29-05:00] "GET /images/hm_brdl.gif HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:29-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:29-05:00] "GET /images/hm_arw.gif HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:32-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:32-05:00] "GET /images/nav_bg_top.gif HTTP/1.0" 200 929', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:43-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:43-05:00] "GET /french/images/nav_venue_off.gif HTTP/1.0" 304 0', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc b/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc new file mode 100644 index 000000000..ddab55399 --- /dev/null +++ b/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_scheduling", + body: { + scheduling: { + access_control: { + enabled: true, + interval: "0 10 0 * * ?", + }, + full: { + enabled: true, + interval: "0 20 0 * * ?", + }, + incremental: { + enabled: false, + interval: "0 30 0 * * ?", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc b/docs/doc_examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc new file mode 100644 index 000000000..7fdd02e82 --- /dev/null +++ b/docs/doc_examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteScript({ + id: "my-search-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc b/docs/doc_examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc new file mode 100644 index 000000000..f05ef2cfe --- /dev/null +++ b/docs/doc_examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "false", + timeout: "10m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc b/docs/doc_examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc new file mode 100644 index 000000000..89704ff94 --- /dev/null +++ b/docs/doc_examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "lowercase_example", + settings: { + analysis: { + analyzer: { + whitespace_lowercase: { + tokenizer: "whitespace", + filter: ["lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8eac28d2e9b6482b413d61817456a14f.asciidoc b/docs/doc_examples/8eac28d2e9b6482b413d61817456a14f.asciidoc new file mode 100644 index 000000000..ef8fbee12 --- /dev/null +++ b/docs/doc_examples/8eac28d2e9b6482b413d61817456a14f.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + max_play_count: "desc", + }, + }, + aggs: { + max_play_count: { + max: { + field: "play_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc b/docs/doc_examples/8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc deleted file mode 100644 index 54bc01d70..000000000 --- a/docs/doc_examples/8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - field_value_factor: { - field: 'likes', - factor: 1.2, - modifier: 'sqrt', - missing: 1 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc b/docs/doc_examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc new file mode 100644 index 000000000..e8e466065 --- /dev/null +++ b/docs/doc_examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["content", "name^5"], + query: "this AND that OR thus", + tie_breaker: 0, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8ed31628081db2b6e9106d61d1e142be.asciidoc b/docs/doc_examples/8ed31628081db2b6e9106d61d1e142be.asciidoc new file mode 100644 index 000000000..7940e4418 --- /dev/null +++ b/docs/doc_examples/8ed31628081db2b6e9106d61d1e142be.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "ny city", + auto_generate_synonyms_phrase_query: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc b/docs/doc_examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc new file mode 100644 index 000000000..59801baf0 --- /dev/null +++ b/docs/doc_examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my-search-app", + params: { + query_string: + "What is the most popular brand of coffee sold in the United States?", + elser_fields: ["title", "meta_description"], + text_fields: ["title", "meta_description"], + rrf: { + rank_window_size: 50, + rank_constant: 25, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8ee9521f57661a050efb614f02b4a090.asciidoc b/docs/doc_examples/8ee9521f57661a050efb614f02b4a090.asciidoc new file mode 100644 index 000000000..e74958326 --- /dev/null +++ b/docs/doc_examples/8ee9521f57661a050efb614f02b4a090.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f0511f8a5cb176ff2afdd4311799a33.asciidoc b/docs/doc_examples/8f0511f8a5cb176ff2afdd4311799a33.asciidoc deleted file mode 100644 index 15040fe9b..000000000 --- a/docs/doc_examples/8f0511f8a5cb176ff2afdd4311799a33.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'twitter', - id: '1', - refresh: true, - body: { - user: 'kimchy' - } -}) -console.log(response0) - -const response1 = await client.count({ - index: 'twitter', - q: 'user:kimchy' -}) -console.log(response1) - -const response2 = await client.count({ - index: 'twitter', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc b/docs/doc_examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc new file mode 100644 index 000000000..8f14d2f77 --- /dev/null +++ b/docs/doc_examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + rule: { + organic: { + query_string: { + query: "puggles", + }, + }, + match_criteria: { + query_string: "puggles", + user_country: "us", + }, + ruleset_ids: ["my-ruleset"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc b/docs/doc_examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc new file mode 100644 index 000000000..6d592d602 --- /dev/null +++ b/docs/doc_examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.adjusted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + aggs: { + min_price: { + min: { + field: "price.adjusted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc b/docs/doc_examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc new file mode 100644 index 000000000..4e03b85dd --- /dev/null +++ b/docs/doc_examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + actions: "*search", + detailed: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc b/docs/doc_examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc new file mode 100644 index 000000000..8d4db5856 --- /dev/null +++ b/docs/doc_examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "native1_users", + refresh: "true", + roles: ["user"], + rules: { + all: [ + { + field: { + "realm.name": "native1", + }, + }, + { + field: { + username: "principalname1", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f7936f219500305e5b2518dbbf949ea.asciidoc b/docs/doc_examples/8f7936f219500305e5b2518dbbf949ea.asciidoc new file mode 100644 index 000000000..51580581f --- /dev/null +++ b/docs/doc_examples/8f7936f219500305e5b2518dbbf949ea.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.status({ + id: "FmpwbThueVB4UkRDeUxqb1l4akIza3cbWEJyeVBPQldTV3FGZGdIeUVabXBldzoyMDIw", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc b/docs/doc_examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc new file mode 100644 index 000000000..939e09227 --- /dev/null +++ b/docs/doc_examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + execution_hint: "map", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc b/docs/doc_examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc new file mode 100644 index 000000000..c3470454d --- /dev/null +++ b/docs/doc_examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + term: { + ip_addr: "192.168.0.0/16", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8fdf2344c4fb3de6902ad7c5735270df.asciidoc b/docs/doc_examples/8fdf2344c4fb3de6902ad7c5735270df.asciidoc deleted file mode 100644 index 6bb55708a..000000000 --- a/docs/doc_examples/8fdf2344c4fb3de6902ad7c5735270df.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '0', - _source_includes: '*.id', - _source_excludes: 'entities' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8fe128323a944765f525c76d85af7a2f.asciidoc b/docs/doc_examples/8fe128323a944765f525c76d85af7a2f.asciidoc new file mode 100644 index 000000000..3d82c170f --- /dev/null +++ b/docs/doc_examples/8fe128323a944765f525c76d85af7a2f.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + track_total_hits: "false", + aggregations: { + sampling: { + random_sampler: { + probability: 0.1, + }, + aggs: { + price_percentiles: { + percentiles: { + field: "taxful_total_price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc b/docs/doc_examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc new file mode 100644 index 000000000..2d7c738ce --- /dev/null +++ b/docs/doc_examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "dsl-data-stream", + document: { + "@timestamp": "2023-10-18T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/90083d93e46fad2524755b8d4d1306fc.asciidoc b/docs/doc_examples/90083d93e46fad2524755b8d4d1306fc.asciidoc new file mode 100644 index 000000000..d1c80a37d --- /dev/null +++ b/docs/doc_examples/90083d93e46fad2524755b8d4d1306fc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job/_stats", + body: { + deleted_document_count: 10, + indexed_document_count: 20, + indexed_document_volume: 1000, + total_document_count: 2000, + last_seen: "2023-01-02T10:00:00Z", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/901d66919e584515717bf78ab5ca2cbb.asciidoc b/docs/doc_examples/901d66919e584515717bf78ab5ca2cbb.asciidoc new file mode 100644 index 000000000..696802ab2 --- /dev/null +++ b/docs/doc_examples/901d66919e584515717bf78ab5ca2cbb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + time_zone: "CET", + ranges: [ + { + to: "2016/02/01", + }, + { + from: "2016/02/01", + to: "now/d", + }, + { + from: "now/d", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc b/docs/doc_examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc new file mode 100644 index 000000000..66053e31e --- /dev/null +++ b/docs/doc_examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "kibana_sample_data_ecommerce2", + document: { + user: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc b/docs/doc_examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc new file mode 100644 index 000000000..da884497d --- /dev/null +++ b/docs/doc_examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.info(); +console.log(response); +---- diff --git a/docs/doc_examples/90631797c7fbda43902abf2cc0ea8304.asciidoc b/docs/doc_examples/90631797c7fbda43902abf2cc0ea8304.asciidoc new file mode 100644 index 000000000..5e3c8c70b --- /dev/null +++ b/docs/doc_examples/90631797c7fbda43902abf2cc0ea8304.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "indices", + index_metric: "request_cache", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc b/docs/doc_examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc new file mode 100644 index 000000000..d6ab00301 --- /dev/null +++ b/docs/doc_examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "passage_vectors", + fields: ["creation_time", "full_text"], + _source: false, + knn: { + query_vector: [0.45, 45], + field: "paragraph.vector", + k: 2, + num_candidates: 2, + inner_hits: { + _source: false, + fields: ["paragraph.text"], + size: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/909a032a9c1f7095b798444705b09ad6.asciidoc b/docs/doc_examples/909a032a9c1f7095b798444705b09ad6.asciidoc new file mode 100644 index 000000000..e72223aab --- /dev/null +++ b/docs/doc_examples/909a032a9c1f7095b798444705b09ad6.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "GEOMETRYCOLLECTION (POINT (100.0 0.0), LINESTRING (101.0 0.0, 102.0 1.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/90c087560ea6c0b7405f710971c86ef0.asciidoc b/docs/doc_examples/90c087560ea6c0b7405f710971c86ef0.asciidoc new file mode 100644 index 000000000..ed42ce39f --- /dev/null +++ b/docs/doc_examples/90c087560ea6c0b7405f710971c86ef0.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "my_auto_follow_pattern", + remote_cluster: "remote_cluster", + leader_index_patterns: ["leader_index*"], + follow_index_pattern: "{{leader_index}}-follower", + settings: { + "index.number_of_replicas": 0, + }, + max_read_request_operation_count: 1024, + max_outstanding_read_requests: 16, + max_read_request_size: "1024k", + max_write_request_operation_count: 32768, + max_write_request_size: "16k", + max_outstanding_write_requests: 8, + max_write_buffer_count: 512, + max_write_buffer_size: "512k", + max_retry_delay: "10s", + read_poll_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc b/docs/doc_examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc new file mode 100644 index 000000000..59859c078 --- /dev/null +++ b/docs/doc_examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.deleteAutoscalingPolicy({ + name: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/90f1f5304922fb6d097846dd1444c075.asciidoc b/docs/doc_examples/90f1f5304922fb6d097846dd1444c075.asciidoc new file mode 100644 index 000000000..9e800a478 --- /dev/null +++ b/docs/doc_examples/90f1f5304922fb6d097846dd1444c075.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + }, + }, + }, + condition: { + compare: { + "ctx.payload.status": { + eq: "red", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9116ee8a5b00cc877291ed5559563f24.asciidoc b/docs/doc_examples/9116ee8a5b00cc877291ed5559563f24.asciidoc new file mode 100644 index 000000000..9732b73ad --- /dev/null +++ b/docs/doc_examples/9116ee8a5b00cc877291ed5559563f24.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "my_watch", + trigger: { + schedule: { + yearly: { + in: "february", + on: 29, + at: "noon", + }, + }, + }, + input: { + simple: { + payload: { + send: "yes", + }, + }, + }, + condition: { + always: {}, + }, + actions: { + test_index: { + throttle_period: "15m", + index: { + index: "test", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/911c56114e50ce7440eb83efc91d28b8.asciidoc b/docs/doc_examples/911c56114e50ce7440eb83efc91d28b8.asciidoc new file mode 100644 index 000000000..352dda611 --- /dev/null +++ b/docs/doc_examples/911c56114e50ce7440eb83efc91d28b8.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-data-stream", + properties: { + host: { + properties: { + ip: { + type: "ip", + ignore_malformed: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc b/docs/doc_examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc new file mode 100644 index 000000000..00f36997e --- /dev/null +++ b/docs/doc_examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + field: "my-long-field", + value: 10, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/91270cef57ac455547ffd47839420887.asciidoc b/docs/doc_examples/91270cef57ac455547ffd47839420887.asciidoc new file mode 100644 index 000000000..84f7381a4 --- /dev/null +++ b/docs/doc_examples/91270cef57ac455547ffd47839420887.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + filter_path: "aggregations", + size: 0, + aggs: { + buckets: { + composite: { + sources: [ + { + month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + }, + }, + { + type: { + terms: { + field: "type", + }, + }, + }, + ], + }, + aggs: { + avg_price: { + rate: { + field: "price", + unit: "day", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9129dec88d35571b3166c6677297f03b.asciidoc b/docs/doc_examples/9129dec88d35571b3166c6677297f03b.asciidoc new file mode 100644 index 000000000..ac213dce2 --- /dev/null +++ b/docs/doc_examples/9129dec88d35571b3166c6677297f03b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransform({ + transform_id: "ecommerce_transform1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/913770050ebbf3b9b549a899bc11060a.asciidoc b/docs/doc_examples/913770050ebbf3b9b549a899bc11060a.asciidoc deleted file mode 100644 index 464593801..000000000 --- a/docs/doc_examples/913770050ebbf3b9b549a899bc11060a.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'twitter', - body: { - mappings: { - properties: { - counter: { - type: 'integer', - store: false - }, - tags: { - type: 'keyword', - store: true - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/9138550002cb26ab64918cce427963b8.asciidoc b/docs/doc_examples/9138550002cb26ab64918cce427963b8.asciidoc new file mode 100644 index 000000000..41cdf5cb4 --- /dev/null +++ b/docs/doc_examples/9138550002cb26ab64918cce427963b8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["foo", "bar"], + priority: 0, + template: { + settings: { + number_of_shards: 1, + }, + }, + version: 123, +}); +console.log(response); +---- diff --git a/docs/doc_examples/913c163c197802078a8af72150178061.asciidoc b/docs/doc_examples/913c163c197802078a8af72150178061.asciidoc new file mode 100644 index 000000000..bfc33ec56 --- /dev/null +++ b/docs/doc_examples/913c163c197802078a8af72150178061.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + sales_deriv: { + derivative: { + buckets_path: "sales", + }, + }, + sales_2nd_deriv: { + derivative: { + buckets_path: "sales_deriv", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9143be4f137574271953a7a8107e175b.asciidoc b/docs/doc_examples/9143be4f137574271953a7a8107e175b.asciidoc new file mode 100644 index 000000000..17b21295f --- /dev/null +++ b/docs/doc_examples/9143be4f137574271953a7a8107e175b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/91750571c195718f0ff246e058e4bc63.asciidoc b/docs/doc_examples/91750571c195718f0ff246e058e4bc63.asciidoc new file mode 100644 index 000000000..4e86f61b8 --- /dev/null +++ b/docs/doc_examples/91750571c195718f0ff246e058e4bc63.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "twitter", + query: { + match: { + title: "elasticsearch", + }, + }, + sort: [ + { + date: "asc", + }, + { + tie_breaker_id: "asc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc b/docs/doc_examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc new file mode 100644 index 000000000..17cd435b2 --- /dev/null +++ b/docs/doc_examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "my-index-*", + fields: "rating", + index_filter: { + range: { + "@timestamp": { + gte: "2018", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91c925fc71abe0ddfe52457e9130363b.asciidoc b/docs/doc_examples/91c925fc71abe0ddfe52457e9130363b.asciidoc new file mode 100644 index 000000000..ad067f7f4 --- /dev/null +++ b/docs/doc_examples/91c925fc71abe0ddfe52457e9130363b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.grantApiKey({ + grant_type: "password", + username: "test_admin", + password: "x-pack-test-password", + run_as: "test_user", + api_key: { + name: "another-api-key", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc b/docs/doc_examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc new file mode 100644 index 000000000..90c51d019 --- /dev/null +++ b/docs/doc_examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:kibana OR tags:javascript", + }, + }, + aggs: { + low_quality_keywords: { + significant_terms: { + field: "tags", + size: 3, + exclude: ["kibana", "javascript"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91ed08faaed54cb5ace9a295af937439.asciidoc b/docs/doc_examples/91ed08faaed54cb5ace9a295af937439.asciidoc new file mode 100644 index 000000000..8e51a524e --- /dev/null +++ b/docs/doc_examples/91ed08faaed54cb5ace9a295af937439.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + runtime_mappings: { + "message.length": { + type: "long", + script: "emit(doc['message.keyword'].value.length())", + }, + }, + aggs: { + message_length: { + histogram: { + interval: 10, + field: "message.length", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9200ed8d5f798a158def4c526e41269e.asciidoc b/docs/doc_examples/9200ed8d5f798a158def4c526e41269e.asciidoc new file mode 100644 index 000000000..71d3f7824 --- /dev/null +++ b/docs/doc_examples/9200ed8d5f798a158def4c526e41269e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "my-index-000001", + fields: "rating", +}); +console.log(response); +---- diff --git a/docs/doc_examples/92035a2a62d01a511662af65606d5fc6.asciidoc b/docs/doc_examples/92035a2a62d01a511662af65606d5fc6.asciidoc new file mode 100644 index 000000000..f5102d8e7 --- /dev/null +++ b/docs/doc_examples/92035a2a62d01a511662af65606d5fc6.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + bucket_truncate: { + bucket_sort: { + from: 1, + size: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9216e8e544e6d193eda1f59e9160a225.asciidoc b/docs/doc_examples/9216e8e544e6d193eda1f59e9160a225.asciidoc new file mode 100644 index 000000000..e28ae08d9 --- /dev/null +++ b/docs/doc_examples/9216e8e544e6d193eda1f59e9160a225.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_near: { + clauses: [ + { + span_term: { + field: "value1", + }, + }, + { + span_term: { + field: "value2", + }, + }, + { + span_term: { + field: "value3", + }, + }, + ], + slop: 12, + in_order: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/922529276f87cb9d116be2468d108466.asciidoc b/docs/doc_examples/922529276f87cb9d116be2468d108466.asciidoc new file mode 100644 index 000000000..1a1b6c4c6 --- /dev/null +++ b/docs/doc_examples/922529276f87cb9d116be2468d108466.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + type: "simple", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc b/docs/doc_examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc new file mode 100644 index 000000000..b3d7c123d --- /dev/null +++ b/docs/doc_examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getTrainedModelsStats(); +console.log(response); +---- diff --git a/docs/doc_examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc b/docs/doc_examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc new file mode 100644 index 000000000..dd0d0fa0a --- /dev/null +++ b/docs/doc_examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + flattened_field: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + flattened_field: { + subfield: "value", + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: ["flattened_field.subfield"], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/923aee95078219ee6eb321a252e1121b.asciidoc b/docs/doc_examples/923aee95078219ee6eb321a252e1121b.asciidoc new file mode 100644 index 000000000..93158444f --- /dev/null +++ b/docs/doc_examples/923aee95078219ee6eb321a252e1121b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "ngram_example", + settings: { + analysis: { + analyzer: { + standard_ngram: { + tokenizer: "standard", + filter: ["ngram"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc b/docs/doc_examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc new file mode 100644 index 000000000..d680c008d --- /dev/null +++ b/docs/doc_examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "words words", + flag: "foo", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc b/docs/doc_examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc new file mode 100644 index 000000000..aba08bf33 --- /dev/null +++ b/docs/doc_examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.templates({ + name: "my-template-*", + v: "true", + s: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc b/docs/doc_examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc new file mode 100644 index 000000000..446a2b57b --- /dev/null +++ b/docs/doc_examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > ? AND author == ?\n | STATS count = COUNT(*) by year\n | WHERE count > ?\n | LIMIT 5\n ', + params: [300, "Frank Herbert", 0], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9298aaf8232a819e79b3bf8471245e98.asciidoc b/docs/doc_examples/9298aaf8232a819e79b3bf8471245e98.asciidoc new file mode 100644 index 000000000..df7ed68c4 --- /dev/null +++ b/docs/doc_examples/9298aaf8232a819e79b3bf8471245e98.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getJobStats({ + job_id: "low_request_rate", +}); +console.log(response); +---- diff --git a/docs/doc_examples/92d0c12d53a900308150d572c3f2f82f.asciidoc b/docs/doc_examples/92d0c12d53a900308150d572c3f2f82f.asciidoc new file mode 100644 index 000000000..975c72f37 --- /dev/null +++ b/docs/doc_examples/92d0c12d53a900308150d572c3f2f82f.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_keywords: { + match_mapping_type: "string", + mapping: { + type: "keyword", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/92d343eb755971c44a939d0660bf5ac2.asciidoc b/docs/doc_examples/92d343eb755971c44a939d0660bf5ac2.asciidoc new file mode 100644 index 000000000..3680cbd11 --- /dev/null +++ b/docs/doc_examples/92d343eb755971c44a939d0660bf5ac2.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + test: "test", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 2, + refresh: "true", + document: { + test: "test", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/92f073762634a4b2274f71002494192e.asciidoc b/docs/doc_examples/92f073762634a4b2274f71002494192e.asciidoc new file mode 100644 index 000000000..5f45e9c3b --- /dev/null +++ b/docs/doc_examples/92f073762634a4b2274f71002494192e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + filter_path: "metadata.cluster_coordination.voting_config_exclusions", +}); +console.log(response); +---- diff --git a/docs/doc_examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc b/docs/doc_examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc new file mode 100644 index 000000000..6f5ec0f73 --- /dev/null +++ b/docs/doc_examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + range: { + timestamp: { + gte: "2020-04-30T14:31:27-05:00", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/92fe53019958ba466d1272da0834cf53.asciidoc b/docs/doc_examples/92fe53019958ba466d1272da0834cf53.asciidoc new file mode 100644 index 000000000..0d9bdfb80 --- /dev/null +++ b/docs/doc_examples/92fe53019958ba466d1272da0834cf53.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc b/docs/doc_examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc new file mode 100644 index 000000000..7091b8d7b --- /dev/null +++ b/docs/doc_examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_admin_role", + refresh: "true", + cluster: ["manage"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["manage"], + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["analyst_user"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc b/docs/doc_examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc new file mode 100644 index 000000000..1f6f7cebc --- /dev/null +++ b/docs/doc_examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + extended_stats: { + field: "grade", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc b/docs/doc_examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc new file mode 100644 index 000000000..1401e6fff --- /dev/null +++ b/docs/doc_examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.routing.allocation.total_shards_per_node", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc b/docs/doc_examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc new file mode 100644 index 000000000..914fa5b5b --- /dev/null +++ b/docs/doc_examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + operator: "or", + query: "the quick brown", + }, + }, + }, + rescore: [ + { + window_size: 100, + query: { + rescore_query: { + match_phrase: { + message: { + query: "the quick brown", + slop: 2, + }, + }, + }, + query_weight: 0.7, + rescore_query_weight: 1.2, + }, + }, + { + window_size: 10, + query: { + score_mode: "multiply", + rescore_query: { + function_score: { + script_score: { + script: { + source: "Math.log10(doc.count.value + 2)", + }, + }, + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc b/docs/doc_examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc new file mode 100644 index 000000000..90418dd1a --- /dev/null +++ b/docs/doc_examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + message: { + type: "text", + }, + query: { + type: "percolator", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/93444b445446c1a6033347d6267253d6.asciidoc b/docs/doc_examples/93444b445446c1a6033347d6267253d6.asciidoc new file mode 100644 index 000000000..c06ee69df --- /dev/null +++ b/docs/doc_examples/93444b445446c1a6033347d6267253d6.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_phrase_prefix: { + message: { + query: "quick brown f", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc b/docs/doc_examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc new file mode 100644 index 000000000..929aab1a1 --- /dev/null +++ b/docs/doc_examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + number_of_shards: 1, + }, + mappings: { + properties: { + field1: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/934ced0998552cc95a28e48554147e8b.asciidoc b/docs/doc_examples/934ced0998552cc95a28e48554147e8b.asciidoc new file mode 100644 index 000000000..7b219da91 --- /dev/null +++ b/docs/doc_examples/934ced0998552cc95a28e48554147e8b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index", + shard: 0, + primary: false, + current_node: "my-node", +}); +console.log(response); +---- diff --git a/docs/doc_examples/935566d5426d44ade486a49ec5289741.asciidoc b/docs/doc_examples/935566d5426d44ade486a49ec5289741.asciidoc new file mode 100644 index 000000000..197ace01b --- /dev/null +++ b/docs/doc_examples/935566d5426d44ade486a49ec5289741.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 10, + }, + dest: { + index: "semantic-embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc b/docs/doc_examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc new file mode 100644 index 000000000..c18b0521e --- /dev/null +++ b/docs/doc_examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc @@ -0,0 +1,93 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggregations: { + "large-grid": { + geotile_grid: { + field: "location", + precision: 8, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc b/docs/doc_examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc new file mode 100644 index 000000000..d05a21ad8 --- /dev/null +++ b/docs/doc_examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + date_detection: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "2015/09/02", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/937089157fc82cf08b68a954d0e6d52c.asciidoc b/docs/doc_examples/937089157fc82cf08b68a954d0e6d52c.asciidoc new file mode 100644 index 000000000..e8709d3b8 --- /dev/null +++ b/docs/doc_examples/937089157fc82cf08b68a954d0e6d52c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence with maxspan=1h\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/9370e4935ab6678571d3227973b8c830.asciidoc b/docs/doc_examples/9370e4935ab6678571d3227973b8c830.asciidoc new file mode 100644 index 000000000..22384fd94 --- /dev/null +++ b/docs/doc_examples/9370e4935ab6678571d3227973b8c830.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "_all", + filter_path: "*.aliases", +}); +console.log(response); +---- diff --git a/docs/doc_examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc b/docs/doc_examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc new file mode 100644 index 000000000..a0bfc9691 --- /dev/null +++ b/docs/doc_examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + is_published: { + type: "boolean", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + is_published: "true", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + term: { + is_published: true, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9382f022086c692ba05efb0acae65946.asciidoc b/docs/doc_examples/9382f022086c692ba05efb0acae65946.asciidoc new file mode 100644 index 000000000..5b5333497 --- /dev/null +++ b/docs/doc_examples/9382f022086c692ba05efb0acae65946.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "idx", + id: 1, + document: { + foo: [ + { + bar: 1, + }, + { + bar: 2, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc b/docs/doc_examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc new file mode 100644 index 000000000..c0d1061cf --- /dev/null +++ b/docs/doc_examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "child_example", + mappings: { + properties: { + join: { + type: "join", + relations: { + question: "answer", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc b/docs/doc_examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc new file mode 100644 index 000000000..ca7e7083d --- /dev/null +++ b/docs/doc_examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + my_id: "1", + text: "This is a question", + my_join_field: { + name: "question", + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + my_id: "2", + text: "This is another question", + my_join_field: { + name: "question", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc b/docs/doc_examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc new file mode 100644 index 000000000..bb5e1dd88 --- /dev/null +++ b/docs/doc_examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "hugging-face-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "hugging_face_embeddings", + model_text: "What's margin of error?", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc b/docs/doc_examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc new file mode 100644 index 000000000..94645c842 --- /dev/null +++ b/docs/doc_examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "envelope", + coordinates: [ + [100, 1], + [101, 0], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/93f1bdd72e79827dcf9a34efa02fd977.asciidoc b/docs/doc_examples/93f1bdd72e79827dcf9a34efa02fd977.asciidoc deleted file mode 100644 index f01780a88..000000000 --- a/docs/doc_examples/93f1bdd72e79827dcf9a34efa02fd977.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - _key: 'asc' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/93fb59d3204f37af952198b331fb6bb7.asciidoc b/docs/doc_examples/93fb59d3204f37af952198b331fb6bb7.asciidoc new file mode 100644 index 000000000..71a90d24f --- /dev/null +++ b/docs/doc_examples/93fb59d3204f37af952198b331fb6bb7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.get({ + task_id: "oTUltX4IQMOUUVeiohTt8A:12345", + wait_for_completion: "true", + timeout: "10s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9403764e6eccad7b321b65e9a10c5727.asciidoc b/docs/doc_examples/9403764e6eccad7b321b65e9a10c5727.asciidoc new file mode 100644 index 000000000..aabf664ea --- /dev/null +++ b/docs/doc_examples/9403764e6eccad7b321b65e9a10c5727.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + include: ".*sport.*", + exclude: "water_.*", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc b/docs/doc_examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc new file mode 100644 index 000000000..c96246fe5 --- /dev/null +++ b/docs/doc_examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments({ + index: "test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9410af79177dd1df9b7b16229a581e18.asciidoc b/docs/doc_examples/9410af79177dd1df9b7b16229a581e18.asciidoc new file mode 100644 index 000000000..41587cc87 --- /dev/null +++ b/docs/doc_examples/9410af79177dd1df9b7b16229a581e18.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.changePassword({ + username: "jacknich", + password: "new-test-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/941c8d05486200e835d97642e4ee05d5.asciidoc b/docs/doc_examples/941c8d05486200e835d97642e4ee05d5.asciidoc new file mode 100644 index 000000000..a185efe9b --- /dev/null +++ b/docs/doc_examples/941c8d05486200e835d97642e4ee05d5.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + term_vector: "with_positions_offsets_payloads", + store: true, + analyzer: "fulltext_analyzer", + }, + fullname: { + type: "text", + term_vector: "with_positions_offsets_payloads", + analyzer: "fulltext_analyzer", + }, + }, + }, + settings: { + index: { + number_of_shards: 1, + number_of_replicas: 0, + }, + analysis: { + analyzer: { + fulltext_analyzer: { + type: "custom", + tokenizer: "whitespace", + filter: ["lowercase", "type_as_payload"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/94246f45025ed394cd6415ed8d7a0588.asciidoc b/docs/doc_examples/94246f45025ed394cd6415ed8d7a0588.asciidoc new file mode 100644 index 000000000..44902fb96 --- /dev/null +++ b/docs/doc_examples/94246f45025ed394cd6415ed8d7a0588.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.deleteJob({ + id: "sensor", +}); +console.log(response); +---- diff --git a/docs/doc_examples/944806221eb89f5af2298ccdf2902277.asciidoc b/docs/doc_examples/944806221eb89f5af2298ccdf2902277.asciidoc new file mode 100644 index 000000000..3770b06fb --- /dev/null +++ b/docs/doc_examples/944806221eb89f5af2298ccdf2902277.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupCaps({ + id: "_all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/944a2dc22dae2a8503299926326a9c18.asciidoc b/docs/doc_examples/944a2dc22dae2a8503299926326a9c18.asciidoc new file mode 100644 index 000000000..24ff68e5a --- /dev/null +++ b/docs/doc_examples/944a2dc22dae2a8503299926326a9c18.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + ip_addr: { + type: "ip", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + ip_addr: "192.168.1.1", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + term: { + ip_addr: "192.168.0.0/16", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d8b115341da772a628a024e7d1644e73.asciidoc b/docs/doc_examples/946522c26d02bebf5c527ba28e55c724.asciidoc similarity index 73% rename from docs/doc_examples/d8b115341da772a628a024e7d1644e73.asciidoc rename to docs/doc_examples/946522c26d02bebf5c527ba28e55c724.asciidoc index afc639f0b..69e043fad 100644 --- a/docs/doc_examples/d8b115341da772a628a024e7d1644e73.asciidoc +++ b/docs/doc_examples/946522c26d02bebf5c527ba28e55c724.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.updateByQuery({ - index: 'twitter', - routing: '1' -}) -console.log(response) + index: "my-index-000001", + routing: 1, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/9467e52087a13b63b02d78c35ff6f798.asciidoc b/docs/doc_examples/9467e52087a13b63b02d78c35ff6f798.asciidoc new file mode 100644 index 000000000..9c2ef9bd8 --- /dev/null +++ b/docs/doc_examples/9467e52087a13b63b02d78c35ff6f798.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_phrase: { + message: "this is a test", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/94cd66bf93f99881c1bda547283a0357.asciidoc b/docs/doc_examples/94cd66bf93f99881c1bda547283a0357.asciidoc new file mode 100644 index 000000000..4007f1b3d --- /dev/null +++ b/docs/doc_examples/94cd66bf93f99881c1bda547283a0357.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "quantized-image-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "image-vector": [0.1, -2], + title: "moose family", + }, + { + index: { + _id: "2", + }, + }, + { + "image-vector": [0.75, -1], + title: "alpine lake", + }, + { + index: { + _id: "3", + }, + }, + { + "image-vector": [1.2, 0.1], + title: "full moon", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc b/docs/doc_examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc new file mode 100644 index 000000000..bc6ad4ec1 --- /dev/null +++ b/docs/doc_examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + term: { + "query.extraction_result": "failed", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/950f1230536422567f99a205ff4165ec.asciidoc b/docs/doc_examples/950f1230536422567f99a205ff4165ec.asciidoc new file mode 100644 index 000000000..ff8b26755 --- /dev/null +++ b/docs/doc_examples/950f1230536422567f99a205ff4165ec.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-write-alias", + conditions: { + max_age: "7d", + max_docs: 1000, + max_primary_shard_size: "50gb", + max_primary_shard_docs: "2000", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9524a9b7373fa4eb2905183b0e806962.asciidoc b/docs/doc_examples/9524a9b7373fa4eb2905183b0e806962.asciidoc deleted file mode 100644 index a3e252a35..000000000 --- a/docs/doc_examples/9524a9b7373fa4eb2905183b0e806962.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - sales_over_time: { - date_histogram: { - field: 'date', - calendar_interval: '1M', - format: 'yyyy-MM-dd', - keyed: true - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/95414139c7b1203e3c2d99a354415801.asciidoc b/docs/doc_examples/95414139c7b1203e3c2d99a354415801.asciidoc new file mode 100644 index 000000000..dd746d18a --- /dev/null +++ b/docs/doc_examples/95414139c7b1203e3c2d99a354415801.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.recovery({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9559de0c2190f99fcc344887fc7b232a.asciidoc b/docs/doc_examples/9559de0c2190f99fcc344887fc7b232a.asciidoc new file mode 100644 index 000000000..4b9fedd1f --- /dev/null +++ b/docs/doc_examples/9559de0c2190f99fcc344887fc7b232a.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bicycles", + mappings: { + properties: { + cycle_type: { + type: "constant_keyword", + value: "bicycle", + }, + name: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "other_cycles", + mappings: { + properties: { + cycle_type: { + type: "keyword", + }, + name: { + type: "text", + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/956cb470258024af964cd2dabbaf7c7c.asciidoc b/docs/doc_examples/956cb470258024af964cd2dabbaf7c7c.asciidoc new file mode 100644 index 000000000..f70c4bed1 --- /dev/null +++ b/docs/doc_examples/956cb470258024af964cd2dabbaf7c7c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index", + settings: { + "index.routing.allocation.require.data": null, + "index.routing.allocation.include._tier_preference": "data_warm,data_hot", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc b/docs/doc_examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc new file mode 100644 index 000000000..53a2b96c5 --- /dev/null +++ b/docs/doc_examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["content", "name"], + query: "this AND that", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9584b042223982e0bfde8d12d42c9705.asciidoc b/docs/doc_examples/9584b042223982e0bfde8d12d42c9705.asciidoc new file mode 100644 index 000000000..9583fffb8 --- /dev/null +++ b/docs/doc_examples/9584b042223982e0bfde8d12d42c9705.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "kerbrolemapping", + roles: ["monitoring_user"], + enabled: true, + rules: { + field: { + username: "user@REALM", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc b/docs/doc_examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc new file mode 100644 index 000000000..de34c8968 --- /dev/null +++ b/docs/doc_examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + color: ["blue", "green"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc b/docs/doc_examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc new file mode 100644 index 000000000..aab4afc2b --- /dev/null +++ b/docs/doc_examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + pretty: "true", + query: { + match: { + "result.condition.met": true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc b/docs/doc_examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc new file mode 100644 index 000000000..226313ea3 --- /dev/null +++ b/docs/doc_examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "galician_example", + settings: { + analysis: { + filter: { + galician_stop: { + type: "stop", + stopwords: "_galician_", + }, + galician_keywords: { + type: "keyword_marker", + keywords: ["exemplo"], + }, + galician_stemmer: { + type: "stemmer", + language: "galician", + }, + }, + analyzer: { + rebuilt_galician: { + tokenizer: "standard", + filter: [ + "lowercase", + "galician_stop", + "galician_keywords", + "galician_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9608820dbeac261ba53fb89bb9400560.asciidoc b/docs/doc_examples/9608820dbeac261ba53fb89bb9400560.asciidoc new file mode 100644 index 000000000..116032d11 --- /dev/null +++ b/docs/doc_examples/9608820dbeac261ba53fb89bb9400560.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/962e6187bbd71c5749376efed04b65ba.asciidoc b/docs/doc_examples/962e6187bbd71c5749376efed04b65ba.asciidoc new file mode 100644 index 000000000..eb3734014 --- /dev/null +++ b/docs/doc_examples/962e6187bbd71c5749376efed04b65ba.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role6", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + except: ["customer.handle"], + grant: ["customer.*"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc b/docs/doc_examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc new file mode 100644 index 000000000..bcb11e41e --- /dev/null +++ b/docs/doc_examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + analysis: { + char_filter: { + quote: { + type: "mapping", + mappings: ['« => "', '» => "'], + }, + }, + normalizer: { + my_normalizer: { + type: "custom", + char_filter: ["quote"], + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, + mappings: { + properties: { + foo: { + type: "keyword", + normalizer: "my_normalizer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc b/docs/doc_examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc new file mode 100644 index 000000000..20185078b --- /dev/null +++ b/docs/doc_examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.hasPrivileges({ + cluster: ["monitor", "manage"], + index: [ + { + names: ["suppliers", "products"], + privileges: ["read"], + }, + { + names: ["inventory"], + privileges: ["read", "write"], + }, + ], + application: [ + { + application: "inventory_manager", + privileges: ["read", "data:write/inventory"], + resources: ["product/1852563"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc b/docs/doc_examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc new file mode 100644 index 000000000..1be0187a6 --- /dev/null +++ b/docs/doc_examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node": 1200, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/96de5703ba0bd43fd4ac239ec5408542.asciidoc b/docs/doc_examples/96de5703ba0bd43fd4ac239ec5408542.asciidoc deleted file mode 100644 index 7de06d9cd..000000000 --- a/docs/doc_examples/96de5703ba0bd43fd4ac239ec5408542.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'ctx._source.counter += params.count', - lang: 'painless', - params: { - count: 4 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/96e137e42d12c180e2c702db30714a9e.asciidoc b/docs/doc_examples/96e137e42d12c180e2c702db30714a9e.asciidoc new file mode 100644 index 000000000..1f0fb8c86 --- /dev/null +++ b/docs/doc_examples/96e137e42d12c180e2c702db30714a9e.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_name: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/96ea0e80323d6d2d99964625c004a44d.asciidoc b/docs/doc_examples/96ea0e80323d6d2d99964625c004a44d.asciidoc new file mode 100644 index 000000000..0ce997004 --- /dev/null +++ b/docs/doc_examples/96ea0e80323d6d2d99964625c004a44d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "dsl-data-stream", + data_retention: "7d", + enabled: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc b/docs/doc_examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc new file mode 100644 index 000000000..f3aa57240 --- /dev/null +++ b/docs/doc_examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + foo: { + type: "keyword", + eager_global_ordinals: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc b/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc index 3cd422a08..2620184e7 100644 --- a/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc +++ b/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc @@ -4,19 +4,17 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'metricbeat-*' - }, - dest: { - index: 'metricbeat' - }, - script: { - lang: 'painless', - source: "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'" - } - } -}) -console.log(response) + source: { + index: "metricbeat-*", + }, + dest: { + index: "metricbeat", + }, + script: { + lang: "painless", + source: + "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/975b4b92464d52068516aa2f0f955cc1.asciidoc b/docs/doc_examples/975b4b92464d52068516aa2f0f955cc1.asciidoc new file mode 100644 index 000000000..0dc310f84 --- /dev/null +++ b/docs/doc_examples/975b4b92464d52068516aa2f0f955cc1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments({ + index: "test1,test2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc b/docs/doc_examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc new file mode 100644 index 000000000..f47d5d772 --- /dev/null +++ b/docs/doc_examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role1", + indices: [ + { + names: ["events-*"], + privileges: ["read"], + field_security: { + grant: ["category", "@timestamp", "message"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/97916243f245478b735471a9e37f33d1.asciidoc b/docs/doc_examples/97916243f245478b735471a9e37f33d1.asciidoc new file mode 100644 index 000000000..967dc2118 --- /dev/null +++ b/docs/doc_examples/97916243f245478b735471a9e37f33d1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 10, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + to: "10.0.0.5", + }, + { + from: "10.0.0.5", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/979d25dff2d8987119410291ad47b0d1.asciidoc b/docs/doc_examples/979d25dff2d8987119410291ad47b0d1.asciidoc deleted file mode 100644 index 69b52ce27..000000000 --- a/docs/doc_examples/979d25dff2d8987119410291ad47b0d1.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': { - lat: 40, - lon: -70 - }, - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc b/docs/doc_examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc new file mode 100644 index 000000000..be8ac4caf --- /dev/null +++ b/docs/doc_examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_internal/desired_balance", +}); +console.log(response); +---- diff --git a/docs/doc_examples/97ae2b62aa372a955278be6f660356ba.asciidoc b/docs/doc_examples/97ae2b62aa372a955278be6f660356ba.asciidoc new file mode 100644 index 000000000..b423d4033 --- /dev/null +++ b/docs/doc_examples/97ae2b62aa372a955278be6f660356ba.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + combined_fields: { + query: "distributed consensus", + fields: ["title^2", "body"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/97babc8d19ef0866774576716eb6d19e.asciidoc b/docs/doc_examples/97babc8d19ef0866774576716eb6d19e.asciidoc index 45f1ef2fd..bc8757667 100644 --- a/docs/doc_examples/97babc8d19ef0866774576716eb6d19e.asciidoc +++ b/docs/doc_examples/97babc8d19ef0866774576716eb6d19e.asciidoc @@ -3,24 +3,21 @@ [source, js] ---- -const response0 = await client.updateByQuery({ - index: 'test', - refresh: true, - conflicts: 'proceed' -}) -console.log(response0) +const response = await client.updateByQuery({ + index: "test", + refresh: "true", + conflicts: "proceed", +}); +console.log(response); const response1 = await client.search({ - index: 'test', - filter_path: 'hits.total', - body: { - query: { - match: { - flag: 'foo' - } - } - } -}) -console.log(response1) + index: "test", + filter_path: "hits.total", + query: { + match: { + flag: "foo", + }, + }, +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/97da68c09c9f1a97a21780fd404e213a.asciidoc b/docs/doc_examples/97da68c09c9f1a97a21780fd404e213a.asciidoc new file mode 100644 index 000000000..3ea9ccfc3 --- /dev/null +++ b/docs/doc_examples/97da68c09c9f1a97a21780fd404e213a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + append_prefix_length: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc b/docs/doc_examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc new file mode 100644 index 000000000..76e056902 --- /dev/null +++ b/docs/doc_examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.start(); +console.log(response); +---- diff --git a/docs/doc_examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc b/docs/doc_examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc new file mode 100644 index 000000000..de62cbe9d --- /dev/null +++ b/docs/doc_examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + agg_metric: { + type: "aggregate_metric_double", + metrics: ["min", "max", "sum", "value_count"], + default_metric: "max", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + agg_metric: { + min: -302.5, + max: 702.3, + sum: 200, + value_count: 25, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/97f5df84efec655f479fad78bc392d4d.asciidoc b/docs/doc_examples/97f5df84efec655f479fad78bc392d4d.asciidoc new file mode 100644 index 000000000..62eeaf223 --- /dev/null +++ b/docs/doc_examples/97f5df84efec655f479fad78bc392d4d.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + profile: true, + query: { + term: { + "user.id": { + value: "elkbee", + }, + }, + }, + aggs: { + my_scoped_agg: { + terms: { + field: "http.response.status_code", + }, + }, + my_global_agg: { + global: {}, + aggs: { + my_level_agg: { + terms: { + field: "http.response.status_code", + }, + }, + }, + }, + }, + post_filter: { + match: { + message: "search", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/983a867c90e63e070518f2f709f659ee.asciidoc b/docs/doc_examples/983a867c90e63e070518f2f709f659ee.asciidoc new file mode 100644 index 000000000..117b811b7 --- /dev/null +++ b/docs/doc_examples/983a867c90e63e070518f2f709f659ee.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + card: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + card: ["king", "ace", "ace", "jack"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc b/docs/doc_examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc new file mode 100644 index 000000000..375c8c058 --- /dev/null +++ b/docs/doc_examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + comments: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + title: "Test title", + comments: [ + { + author: "kimchy", + text: "comment text", + }, + { + author: "nik9000", + text: "words words words", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + query: { + nested: { + path: "comments", + query: { + match: { + "comments.text": "words", + }, + }, + inner_hits: { + _source: false, + docvalue_fields: ["comments.text.keyword"], + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9851f5225150bc032fb3b195cd447f4f.asciidoc b/docs/doc_examples/9851f5225150bc032fb3b195cd447f4f.asciidoc new file mode 100644 index 000000000..1adf874ce --- /dev/null +++ b/docs/doc_examples/9851f5225150bc032fb3b195cd447f4f.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "byte-image-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "byte-image-vector": [5, -20], + title: "moose family", + }, + { + index: { + _id: "2", + }, + }, + { + "byte-image-vector": [8, -15], + title: "alpine lake", + }, + { + index: { + _id: "3", + }, + }, + { + "byte-image-vector": [11, 23], + title: "full moon", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/98574a419b6be603a0af8f7f22a92d23.asciidoc b/docs/doc_examples/98574a419b6be603a0af8f7f22a92d23.asciidoc new file mode 100644 index 000000000..bbfc2a28b --- /dev/null +++ b/docs/doc_examples/98574a419b6be603a0af8f7f22a92d23.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.processorGrok(); +console.log(response); +---- diff --git a/docs/doc_examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc b/docs/doc_examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc new file mode 100644 index 000000000..f7c866698 --- /dev/null +++ b/docs/doc_examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence with maxspan=1h\n [ process where process.name == "regsvr32.exe" ] by process.pid\n [ file where stringContains(file.name, "scrobj.dll") ] by process.pid\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/98855f4bda8726d5d123aeebf7869e47.asciidoc b/docs/doc_examples/98855f4bda8726d5d123aeebf7869e47.asciidoc new file mode 100644 index 000000000..bc24781f2 --- /dev/null +++ b/docs/doc_examples/98855f4bda8726d5d123aeebf7869e47.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodeattrs({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9887f65af249bbf09190b1153ea2597b.asciidoc b/docs/doc_examples/9887f65af249bbf09190b1153ea2597b.asciidoc new file mode 100644 index 000000000..a37868f52 --- /dev/null +++ b/docs/doc_examples/9887f65af249bbf09190b1153ea2597b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsyncStatus({ + id: "FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/98aeb275f829b5f7b8eb2147701565ff.asciidoc b/docs/doc_examples/98aeb275f829b5f7b8eb2147701565ff.asciidoc deleted file mode 100644 index 15a6eef31..000000000 --- a/docs/doc_examples/98aeb275f829b5f7b8eb2147701565ff.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'none' }", - lang: 'painless', - params: { - tag: 'green' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/98b121bf47cebd85671a2cb519688d28.asciidoc b/docs/doc_examples/98b121bf47cebd85671a2cb519688d28.asciidoc deleted file mode 100644 index 37593d0bb..000000000 --- a/docs/doc_examples/98b121bf47cebd85671a2cb519688d28.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - JapaneseCars: { - terms: { - field: 'make', - include: [ - 'mazda', - 'honda' - ] - } - }, - ActiveCarManufacturers: { - terms: { - field: 'make', - exclude: [ - 'rover', - 'jensen' - ] - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/98b403c356a9b14544e9b9f646845e9f.asciidoc b/docs/doc_examples/98b403c356a9b14544e9b9f646845e9f.asciidoc new file mode 100644 index 000000000..2054692fc --- /dev/null +++ b/docs/doc_examples/98b403c356a9b14544e9b9f646845e9f.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: { + query: { + multi_match: { + query: "{{query_string}}", + fields: + "[{{#text_fields}}{{user_name}}{{^last}},{{/last}}{{/text_fields}}]", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/98c1080d8630d3a18d564312300d020f.asciidoc b/docs/doc_examples/98c1080d8630d3a18d564312300d020f.asciidoc new file mode 100644 index 000000000..962c58754 --- /dev/null +++ b/docs/doc_examples/98c1080d8630d3a18d564312300d020f.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + network_direction: { + internal_networks: ["private"], + }, + }, + ], + }, + docs: [ + { + _source: { + source: { + ip: "128.232.110.120", + }, + destination: { + ip: "192.168.1.1", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc b/docs/doc_examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc new file mode 100644 index 000000000..01c0b0b60 --- /dev/null +++ b/docs/doc_examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc b/docs/doc_examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc new file mode 100644 index 000000000..c64fae279 --- /dev/null +++ b/docs/doc_examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + wait_for_completion_timeout: "2s", + query: '\n process where process.name == "cmd.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc b/docs/doc_examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc new file mode 100644 index 000000000..ac79f2a9d --- /dev/null +++ b/docs/doc_examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataLifecycle({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc b/docs/doc_examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc new file mode 100644 index 000000000..cfffd248f --- /dev/null +++ b/docs/doc_examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + first_name: { + type: "text", + }, + last_name: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + first_name: "Barry", + last_name: "White", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + script_fields: { + full_name: { + script: { + lang: "painless", + source: "params._source.first_name + ' ' + params._source.last_name", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc b/docs/doc_examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc new file mode 100644 index 000000000..6d805dd85 --- /dev/null +++ b/docs/doc_examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + body: {}, + "blog.title": { + number_of_fragments: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99474a7e7979816c874aeac4403be5d0.asciidoc b/docs/doc_examples/99474a7e7979816c874aeac4403be5d0.asciidoc new file mode 100644 index 000000000..eebad0741 --- /dev/null +++ b/docs/doc_examples/99474a7e7979816c874aeac4403be5d0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + avg_price: { + rate: { + field: "price", + unit: "day", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/996521cef7803ef363a49ac6321ea1de.asciidoc b/docs/doc_examples/996521cef7803ef363a49ac6321ea1de.asciidoc new file mode 100644 index 000000000..673a49d35 --- /dev/null +++ b/docs/doc_examples/996521cef7803ef363a49ac6321ea1de.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence with maxspan=1d\n [ process where process.name == "cmd.exe" ]\n ![ process where stringContains(process.command_line, "ocx") ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc b/docs/doc_examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc new file mode 100644 index 000000000..41212a2e2 --- /dev/null +++ b/docs/doc_examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script_score: { + script: { + params: { + a: 5, + b: 1.2, + }, + source: "params.a / Math.pow(params.b, doc['my-int'].value)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc b/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc new file mode 100644 index 000000000..3989008d1 --- /dev/null +++ b/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/mistral-embeddings-test", + body: { + service: "mistral", + service_settings: { + api_key: "", + model: "mistral-embed", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/998651b98e152add530084a631a4ab5a.asciidoc b/docs/doc_examples/998651b98e152add530084a631a4ab5a.asciidoc new file mode 100644 index 000000000..9b0378abb --- /dev/null +++ b/docs/doc_examples/998651b98e152add530084a631a4ab5a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": "1m", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc b/docs/doc_examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc new file mode 100644 index 000000000..24b0adfe1 --- /dev/null +++ b/docs/doc_examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs", + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + message: { + type: "match_only_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc b/docs/doc_examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc new file mode 100644 index 000000000..74a851858 --- /dev/null +++ b/docs/doc_examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "place_path_category", + id: 1, + document: { + suggest: ["timmy's", "starbucks", "dunkin donuts"], + cat: ["cafe", "food"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc b/docs/doc_examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc new file mode 100644 index 000000000..e6e7c97af --- /dev/null +++ b/docs/doc_examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "", + remote_cluster: "", + leader_index_patterns: [""], + leader_index_exclusion_patterns: [""], + follow_index_pattern: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc b/docs/doc_examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc new file mode 100644 index 000000000..4a993773e --- /dev/null +++ b/docs/doc_examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T00:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T01:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + date_histogram: { + field: "date", + calendar_interval: "day", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc b/docs/doc_examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc new file mode 100644 index 000000000..fda0186bc --- /dev/null +++ b/docs/doc_examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "keep", + keep_words: ["dog", "elephant", "fox"], + }, + ], + text: "the quick fox jumps over the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a05cc10eea1251e23b82a4549913536.asciidoc b/docs/doc_examples/9a05cc10eea1251e23b82a4549913536.asciidoc new file mode 100644 index 000000000..e86a646f7 --- /dev/null +++ b/docs/doc_examples/9a05cc10eea1251e23b82a4549913536.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", + s: "node", + h: "node,shards,disk.percent,disk.indices,disk.used", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a09d33ec11e20b6081cae882282ca60.asciidoc b/docs/doc_examples/9a09d33ec11e20b6081cae882282ca60.asciidoc new file mode 100644 index 000000000..64d3dfd53 --- /dev/null +++ b/docs/doc_examples/9a09d33ec11e20b6081cae882282ca60.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedPrivileges({ + application: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc b/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc new file mode 100644 index 000000000..411649c25 --- /dev/null +++ b/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/cohere-embeddings", + body: { + service: "cohere", + service_settings: { + api_key: "", + model_id: "embed-english-light-v3.0", + embedding_type: "byte", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc b/docs/doc_examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc new file mode 100644 index 000000000..d0eb6f8e0 --- /dev/null +++ b/docs/doc_examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 10000, + query: { + geo_bounding_box: { + "my-geo-field": { + top_left: { + lat: -40.979898069620134, + lon: -45, + }, + bottom_right: { + lat: -66.51326044311186, + lon: 0, + }, + }, + }, + }, + aggregations: { + grid: { + geotile_grid: { + field: "my-geo-field", + precision: 11, + size: 65536, + bounds: { + top_left: { + lat: -40.979898069620134, + lon: -45, + }, + bottom_right: { + lat: -66.51326044311186, + lon: 0, + }, + }, + }, + }, + bounds: { + geo_bounds: { + field: "my-geo-field", + wrap_longitude: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc b/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc index db81c549c..cd2f894b2 100644 --- a/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc +++ b/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc @@ -3,24 +3,23 @@ [source, js] ---- -const response0 = await client.index({ - index: 'metricbeat-2016.05.30', - id: '1', - refresh: true, - body: { - 'system.cpu.idle.pct': 0.908 - } -}) -console.log(response0) +const response = await client.index({ + index: "metricbeat-2016.05.30", + id: 1, + refresh: "true", + document: { + "system.cpu.idle.pct": 0.908, + }, +}); +console.log(response); const response1 = await client.index({ - index: 'metricbeat-2016.05.31', - id: '1', - refresh: true, - body: { - 'system.cpu.idle.pct': 0.105 - } -}) -console.log(response1) + index: "metricbeat-2016.05.31", + id: 1, + refresh: "true", + document: { + "system.cpu.idle.pct": 0.105, + }, +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc b/docs/doc_examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc new file mode 100644 index 000000000..30ecd2bf5 --- /dev/null +++ b/docs/doc_examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_test_scores_2", + query: { + term: { + grad_year: "2099", + }, + }, + sort: [ + { + total_score: { + order: "desc", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc b/docs/doc_examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc new file mode 100644 index 000000000..b6c3a9585 --- /dev/null +++ b/docs/doc_examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + pretty: "true", + query: { + bool: { + must: [ + { + match: { + "result.condition.met": true, + }, + }, + { + range: { + "result.execution_time": { + from: "now-10s", + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc b/docs/doc_examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc new file mode 100644 index 000000000..531a8e414 --- /dev/null +++ b/docs/doc_examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "product-index", + query: { + script_score: { + query: { + bool: { + filter: { + range: { + price: { + gte: 1000, + }, + }, + }, + }, + }, + script: { + source: "cosineSimilarity(params.queryVector, 'product-vector') + 1.0", + params: { + queryVector: [-0.5, 90, -10, 14.8, -156], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc b/docs/doc_examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc new file mode 100644 index 000000000..aa641b73c --- /dev/null +++ b/docs/doc_examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.get({ + name: "my_search_application", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc b/docs/doc_examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc new file mode 100644 index 000000000..f2f4ae872 --- /dev/null +++ b/docs/doc_examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + leader: { + seeds: ["127.0.0.1:9300"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ae268058c0ea32ef8926568e011c728.asciidoc b/docs/doc_examples/9ae268058c0ea32ef8926568e011c728.asciidoc new file mode 100644 index 000000000..185a6a226 --- /dev/null +++ b/docs/doc_examples/9ae268058c0ea32ef8926568e011c728.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_features", + body: { + features: { + document_level_security: { + enabled: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc b/docs/doc_examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc new file mode 100644 index 000000000..5e9aae7b4 --- /dev/null +++ b/docs/doc_examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.geoIpStats(); +console.log(response); +---- diff --git a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc new file mode 100644 index 000000000..bb5d71a23 --- /dev/null +++ b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.postBehavioralAnalyticsEvent({ + collection_name: "my_analytics_collection", + event_type: "search_click", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc b/docs/doc_examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc new file mode 100644 index 000000000..2f79ad0e7 --- /dev/null +++ b/docs/doc_examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job-id/_cancel", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc b/docs/doc_examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc new file mode 100644 index 000000000..54a531e1a --- /dev/null +++ b/docs/doc_examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/9b345e0bfd45f3a37194585ec9193478.asciidoc b/docs/doc_examples/9b345e0bfd45f3a37194585ec9193478.asciidoc new file mode 100644 index 000000000..79d2f713b --- /dev/null +++ b/docs/doc_examples/9b345e0bfd45f3a37194585ec9193478.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b68748c061b768c0153c1f2508ce207.asciidoc b/docs/doc_examples/9b68748c061b768c0153c1f2508ce207.asciidoc new file mode 100644 index 000000000..889534c39 --- /dev/null +++ b/docs/doc_examples/9b68748c061b768c0153c1f2508ce207.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterA: { + mode: "proxy", + skip_unavailable: "true", + server_name: "clustera.es.region-a.gcp.elastic-cloud.com", + proxy_socket_connections: "18", + proxy_address: "clustera.es.region-a.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b92266d87170e93a84f9700596d9035.asciidoc b/docs/doc_examples/9b92266d87170e93a84f9700596d9035.asciidoc new file mode 100644 index 000000000..b9c2e1dfe --- /dev/null +++ b/docs/doc_examples/9b92266d87170e93a84f9700596d9035.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example", + refresh: "true", + document: { + name: "Wind & Wetter, Berlin, Germany", + location: { + type: "point", + coordinates: [13.400544, 52.530286], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc b/docs/doc_examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc new file mode 100644 index 000000000..c124140d0 --- /dev/null +++ b/docs/doc_examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "custom_stems", "porter_stem"], + }, + }, + filter: { + custom_stems: { + type: "stemmer_override", + rules: ["running, runs => run", "stemmer => stemmer"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc b/docs/doc_examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc new file mode 100644 index 000000000..2a5963d6d --- /dev/null +++ b/docs/doc_examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_size: "100gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/38c1d0f6668e9563c0827f839f9fa505.asciidoc b/docs/doc_examples/9bae72e974bdeb56007d9104e73eff92.asciidoc similarity index 63% rename from docs/doc_examples/38c1d0f6668e9563c0827f839f9fa505.asciidoc rename to docs/doc_examples/9bae72e974bdeb56007d9104e73eff92.asciidoc index ecd233312..9e58839bd 100644 --- a/docs/doc_examples/38c1d0f6668e9563c0827f839f9fa505.asciidoc +++ b/docs/doc_examples/9bae72e974bdeb56007d9104e73eff92.asciidoc @@ -4,14 +4,9 @@ [source, js] ---- const response = await client.update({ - index: 'test', - id: '1', - body: { - doc: { - name: 'new_name' - } - } -}) -console.log(response) + index: "test", + id: 1, + script: "ctx._source.remove('new_field')", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc b/docs/doc_examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc new file mode 100644 index 000000000..5486c20ed --- /dev/null +++ b/docs/doc_examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + index: "my-index-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc b/docs/doc_examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc new file mode 100644 index 000000000..87d69814b --- /dev/null +++ b/docs/doc_examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": "drm3btev3e86", + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9bfdda207b701028a3439e495e800c02.asciidoc b/docs/doc_examples/9bfdda207b701028a3439e495e800c02.asciidoc new file mode 100644 index 000000000..287a69f92 --- /dev/null +++ b/docs/doc_examples/9bfdda207b701028a3439e495e800c02.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "1M", + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc new file mode 100644 index 000000000..395ba9b0f --- /dev/null +++ b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcAuthenticate({}); +console.log(response); +---- diff --git a/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc b/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc new file mode 100644 index 000000000..f50a76d1b --- /dev/null +++ b/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-spo-connector/_configuration", + body: { + values: { + tenant_id: "my-tenant-id", + tenant_name: "my-sharepoint-site", + client_id: "foo", + secret_value: "bar", + site_collections: "*", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc b/docs/doc_examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc new file mode 100644 index 000000000..6de7fa524 --- /dev/null +++ b/docs/doc_examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: { + query: "Quick foxes", + analyzer: "stop", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc b/docs/doc_examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc new file mode 100644 index 000000000..221cdeaa1 --- /dev/null +++ b/docs/doc_examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "decimal_digit_example", + settings: { + analysis: { + analyzer: { + whitespace_decimal_digit: { + tokenizer: "whitespace", + filter: ["decimal_digit"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc b/docs/doc_examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc new file mode 100644 index 000000000..f1d5306fe --- /dev/null +++ b/docs/doc_examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + num_candidates: 100, + }, + fields: ["title", "file-type"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c7c8051592b6af3adb5d7c490849068.asciidoc b/docs/doc_examples/9c7c8051592b6af3adb5d7c490849068.asciidoc new file mode 100644 index 000000000..38e4630c1 --- /dev/null +++ b/docs/doc_examples/9c7c8051592b6af3adb5d7c490849068.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDatafeed({ + datafeed_id: "datafeed-test-job", + pretty: "true", + indices: ["kibana_sample_data_logs"], + query: { + bool: { + must: [ + { + match_all: {}, + }, + ], + }, + }, + job_id: "test-job", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc b/docs/doc_examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc new file mode 100644 index 000000000..d842bf3e9 --- /dev/null +++ b/docs/doc_examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + filter_path: "*.settings.index.routing.allocation.include._tier_preference", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc b/docs/doc_examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc new file mode 100644 index 000000000..2f7852343 --- /dev/null +++ b/docs/doc_examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "This is a very happy person", + }, + ], + inference_config: { + zero_shot_classification: { + labels: ["glad", "sad", "bad", "rad"], + multi_label: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc b/docs/doc_examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc new file mode 100644 index 000000000..25b3a01c8 --- /dev/null +++ b/docs/doc_examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc b/docs/doc_examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc new file mode 100644 index 000000000..4c9ea62c8 --- /dev/null +++ b/docs/doc_examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + watch: { + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_error: { + logging: { + text: "Found {{ctx.payload.hits.total}} errors in the logs", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc b/docs/doc_examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc new file mode 100644 index 000000000..9944c2a27 --- /dev/null +++ b/docs/doc_examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + action_modes: { + _all: "force_execute", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5a7050fb9dcb9574e081957ade28617.asciidoc b/docs/doc_examples/9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc similarity index 50% rename from docs/doc_examples/a5a7050fb9dcb9574e081957ade28617.asciidoc rename to docs/doc_examples/9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc index b9cb57e59..72fdaf837 100644 --- a/docs/doc_examples/a5a7050fb9dcb9574e081957ade28617.asciidoc +++ b/docs/doc_examples/9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc @@ -4,19 +4,16 @@ [source, js] ---- const response = await client.deleteByQuery({ - index: 'twitter', - refresh: true, - slices: '5', - body: { - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response) + index: "my-index-000001", + refresh: "true", + slices: 5, + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/9d1fb129ac783355a20097effded1845.asciidoc b/docs/doc_examples/9d1fb129ac783355a20097effded1845.asciidoc new file mode 100644 index 000000000..edbed6a51 --- /dev/null +++ b/docs/doc_examples/9d1fb129ac783355a20097effded1845.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + s: 1, + m: 3.1415, + }, + { + index: {}, + }, + { + s: 2, + m: 1, + }, + { + index: {}, + }, + { + s: 3, + m: 2.71828, + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "desc", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc b/docs/doc_examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc new file mode 100644 index 000000000..047504f75 --- /dev/null +++ b/docs/doc_examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + shrink: { + max_primary_shard_size: "50gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d461ae140ddc018efd2650559800cd1.asciidoc b/docs/doc_examples/9d461ae140ddc018efd2650559800cd1.asciidoc new file mode 100644 index 000000000..13f8b9c99 --- /dev/null +++ b/docs/doc_examples/9d461ae140ddc018efd2650559800cd1.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + allocate: { + number_of_replicas: 1, + require: { + box_type: "cold", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d47f02a063444da9f098858a1830d28.asciidoc b/docs/doc_examples/9d47f02a063444da9f098858a1830d28.asciidoc new file mode 100644 index 000000000..b10da9a05 --- /dev/null +++ b/docs/doc_examples/9d47f02a063444da9f098858a1830d28.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.disk.watermark.low": "30gb", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d5855075e7008270459cc88c189043d.asciidoc b/docs/doc_examples/9d5855075e7008270459cc88c189043d.asciidoc new file mode 100644 index 000000000..bd11081bd --- /dev/null +++ b/docs/doc_examples/9d5855075e7008270459cc88c189043d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "cross-cluster-user", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["remote-replication"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc b/docs/doc_examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc new file mode 100644 index 000000000..01c5e240c --- /dev/null +++ b/docs/doc_examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + size: 0, + aggs: { + review_variability: { + median_absolute_deviation: { + field: "rating", + compression: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d67db8370a98854812d38ae73ee2a12.asciidoc b/docs/doc_examples/9d67db8370a98854812d38ae73ee2a12.asciidoc new file mode 100644 index 000000000..357b432b0 --- /dev/null +++ b/docs/doc_examples/9d67db8370a98854812d38ae73ee2a12.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index2", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: { + type: "fvh", + matched_fields: ["comment", "comment.english"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc b/docs/doc_examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc new file mode 100644 index 000000000..08ce23ec8 --- /dev/null +++ b/docs/doc_examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport(); +console.log(response); +---- diff --git a/docs/doc_examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc b/docs/doc_examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc new file mode 100644 index 000000000..1cc661947 --- /dev/null +++ b/docs/doc_examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + total_sales: { + sum: { + field: "price", + }, + }, + sales_bucket_sort: { + bucket_sort: { + sort: [ + { + total_sales: { + order: "desc", + }, + }, + ], + size: 3, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de10a59a5f56dd0906be627896cc789.asciidoc b/docs/doc_examples/9de10a59a5f56dd0906be627896cc789.asciidoc new file mode 100644 index 000000000..7e05930ac --- /dev/null +++ b/docs/doc_examples/9de10a59a5f56dd0906be627896cc789.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bicycles,other_cycles", + query: { + match: { + description: "dutch", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de4704d2f047dae1259249112488697.asciidoc b/docs/doc_examples/9de4704d2f047dae1259249112488697.asciidoc new file mode 100644 index 000000000..68b58c208 --- /dev/null +++ b/docs/doc_examples/9de4704d2f047dae1259249112488697.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_backup", + repository: { + type: "azure", + settings: { + client: "secondary", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc b/docs/doc_examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc new file mode 100644 index 000000000..f22be9ab8 --- /dev/null +++ b/docs/doc_examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: "my-index-000001", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc b/docs/doc_examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc new file mode 100644 index 000000000..f5015c526 --- /dev/null +++ b/docs/doc_examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > ?1 AND author == ?2\n | STATS count = COUNT(*) by year\n | WHERE count > ?3\n | LIMIT 5\n ', + params: [300, "Frank Herbert", 0], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc b/docs/doc_examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc new file mode 100644 index 000000000..cbe2f22fa --- /dev/null +++ b/docs/doc_examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + my_field: "quick brown fox jump lazy dog", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc b/docs/doc_examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc new file mode 100644 index 000000000..8b5d51237 --- /dev/null +++ b/docs/doc_examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + sigmoid: { + pivot: 7, + exponent: 0.6, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc b/docs/doc_examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc new file mode 100644 index 000000000..0baae0b73 --- /dev/null +++ b/docs/doc_examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + id: "test-1", + synonyms: "hello, hi", + }, + { + synonyms: "bye, goodbye", + }, + { + id: "test-2", + synonyms: "test => check", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e56d79ad9a02b642c361f0b85dd95d7.asciidoc b/docs/doc_examples/9e56d79ad9a02b642c361f0b85dd95d7.asciidoc deleted file mode 100644 index ca673d252..000000000 --- a/docs/doc_examples/9e56d79ad9a02b642c361f0b85dd95d7.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - color: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc b/docs/doc_examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc new file mode 100644 index 000000000..51c64f7e8 --- /dev/null +++ b/docs/doc_examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Set '_routing' to 'geoip.country_iso_code' value", + field: "_routing", + value: "{{{geoip.country_iso_code}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc b/docs/doc_examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc new file mode 100644 index 000000000..891d78d79 --- /dev/null +++ b/docs/doc_examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: "{{query_string}}", + }, + }, + from: "{{from}}{{^from}}0{{/from}}", + size: "{{size}}{{^size}}10{{/size}}", + }, + params: { + query_string: "hello world", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc b/docs/doc_examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc new file mode 100644 index 000000000..5b700034d --- /dev/null +++ b/docs/doc_examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + bytes: "b", + s: "store.size:desc,index:asc", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc b/docs/doc_examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc new file mode 100644 index 000000000..960e07320 --- /dev/null +++ b/docs/doc_examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlAuthenticate({ + content: + "PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....", + ids: [], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc b/docs/doc_examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc new file mode 100644 index 000000000..3855ad705 --- /dev/null +++ b/docs/doc_examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_event_watch", + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_hits: { + foreach: "ctx.payload.hits.hits", + max_iterations: 500, + logging: { + text: "Found id {{ctx.payload._id}} with field {{ctx.payload._source.my_field}}", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc b/docs/doc_examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc new file mode 100644 index 000000000..98a85f208 --- /dev/null +++ b/docs/doc_examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + tags: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc b/docs/doc_examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc new file mode 100644 index 000000000..8beac4d25 --- /dev/null +++ b/docs/doc_examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc @@ -0,0 +1,98 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (491.2350 5237.4081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (490.1618 5236.9219)", + city: "Amsterdam", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (491.4722 5237.1667)", + city: "Amsterdam", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (440.5200 5122.2900)", + city: "Antwerp", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (233.6389 4886.1111)", + city: "Paris", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (232.7000 4886.0000)", + city: "Paris", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggs: { + centroid: { + cartesian_centroid: { + field: "location", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc b/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc new file mode 100644 index 000000000..0da3acf87 --- /dev/null +++ b/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/openai-embeddings", + body: { + service: "openai", + service_settings: { + api_key: "", + model_id: "text-embedding-ada-002", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f22a0920cc763eefa233ced963d9624.asciidoc b/docs/doc_examples/9f22a0920cc763eefa233ced963d9624.asciidoc new file mode 100644 index 000000000..873439e01 --- /dev/null +++ b/docs/doc_examples/9f22a0920cc763eefa233ced963d9624.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_term: { + "user.id": { + term: "kimchy", + boost: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc b/docs/doc_examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc new file mode 100644 index 000000000..b3b8507ef --- /dev/null +++ b/docs/doc_examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["my_pattern_replace_filter"], + }, + }, + filter: { + my_pattern_replace_filter: { + type: "pattern_replace", + pattern: "[£|€]", + replacement: "", + all: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc b/docs/doc_examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc new file mode 100644 index 000000000..173a6c38c --- /dev/null +++ b/docs/doc_examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "job-candidates", + query: { + terms_set: { + programming_languages: { + terms: ["c++", "java", "php"], + minimum_should_match_script: { + source: "Math.min(params.num_terms, doc['required_matches'].value)", + }, + boost: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc b/docs/doc_examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc new file mode 100644 index 000000000..7570912eb --- /dev/null +++ b/docs/doc_examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "index2", + refresh: "true", + operations: [ + { + index: { + _id: "doc1", + }, + }, + { + comment: "run with scissors", + }, + { + index: { + _id: "doc2", + }, + }, + { + comment: "running with scissors", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f7671119236423e0e40801ef6485af1.asciidoc b/docs/doc_examples/9f7671119236423e0e40801ef6485af1.asciidoc new file mode 100644 index 000000000..0a1a681c9 --- /dev/null +++ b/docs/doc_examples/9f7671119236423e0e40801ef6485af1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["uppercase"], + text: "the Quick FoX JUMPs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc b/docs/doc_examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc new file mode 100644 index 000000000..7d840f8ed --- /dev/null +++ b/docs/doc_examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateDataFrameAnalytics({ + id: "loganalytics", + model_memory_limit: "200mb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc b/docs/doc_examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc new file mode 100644 index 000000000..96753281c --- /dev/null +++ b/docs/doc_examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-data-stream", + settings: { + index: { + refresh_interval: "30s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9fda516a5dc60ba477b970eaad4429db.asciidoc b/docs/doc_examples/9fda516a5dc60ba477b970eaad4429db.asciidoc new file mode 100644 index 000000000..00673e725 --- /dev/null +++ b/docs/doc_examples/9fda516a5dc60ba477b970eaad4429db.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataLifecycle({ + name: "my-data-stream*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9feff356f302ea4915347ab71cc4887a.asciidoc b/docs/doc_examples/9feff356f302ea4915347ab71cc4887a.asciidoc new file mode 100644 index 000000000..28f39bf5a --- /dev/null +++ b/docs/doc_examples/9feff356f302ea4915347ab71cc4887a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.simulateTemplate({ + index_patterns: ["my-index-*"], + composed_of: ["ct2"], + priority: 10, + template: { + settings: { + "index.number_of_replicas": 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc b/docs/doc_examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc new file mode 100644 index 000000000..972ad5d5e --- /dev/null +++ b/docs/doc_examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.closePointInTime({ + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc b/docs/doc_examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc new file mode 100644 index 000000000..7742b99dd --- /dev/null +++ b/docs/doc_examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_within: { + little: { + span_term: { + field1: "foo", + }, + }, + big: { + span_near: { + clauses: [ + { + span_term: { + field1: "bar", + }, + }, + { + span_term: { + field1: "baz", + }, + }, + ], + slop: 5, + in_order: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc b/docs/doc_examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc new file mode 100644 index 000000000..df7650ac2 --- /dev/null +++ b/docs/doc_examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupCaps({ + id: "sensor-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a008f42379930edc354b4074e0a33344.asciidoc b/docs/doc_examples/a008f42379930edc354b4074e0a33344.asciidoc new file mode 100644 index 000000000..3f2438447 --- /dev/null +++ b/docs/doc_examples/a008f42379930edc354b4074e0a33344.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index", + id: 1, + document: { + designation: "spoon", + price: 13, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc b/docs/doc_examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc new file mode 100644 index 000000000..9461dd6d9 --- /dev/null +++ b/docs/doc_examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '{ "query": { "bool": { "filter": [ { "range": { "@timestamp": { "gte": {{#year_scope}} "now-1y/d" {{/year_scope}} {{^year_scope}} "now-1d/d" {{/year_scope}} , "lt": "now/d" }}}, { "term": { "user.id": "{{user_id}}" }}]}}}', + params: { + year_scope: true, + user_id: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc b/docs/doc_examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc new file mode 100644 index 000000000..c584944c7 --- /dev/null +++ b/docs/doc_examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat"], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0497157fdefecd04e597edb800a1a95.asciidoc b/docs/doc_examples/a0497157fdefecd04e597edb800a1a95.asciidoc new file mode 100644 index 000000000..2c2bb2bdd --- /dev/null +++ b/docs/doc_examples/a0497157fdefecd04e597edb800a1a95.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: "obj.*", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc b/docs/doc_examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc new file mode 100644 index 000000000..a035737b6 --- /dev/null +++ b/docs/doc_examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_queries1", + settings: { + analysis: { + analyzer: { + wildcard_prefix: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "wildcard_edge_ngram"], + }, + }, + filter: { + wildcard_edge_ngram: { + type: "edge_ngram", + min_gram: 1, + max_gram: 32, + }, + }, + }, + }, + mappings: { + properties: { + query: { + type: "percolator", + }, + my_field: { + type: "text", + fields: { + prefix: { + type: "text", + analyzer: "wildcard_prefix", + search_analyzer: "standard", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0871be90badeecd2f8d8ec90230e248.asciidoc b/docs/doc_examples/a0871be90badeecd2f8d8ec90230e248.asciidoc new file mode 100644 index 000000000..288ddcba7 --- /dev/null +++ b/docs/doc_examples/a0871be90badeecd2f8d8ec90230e248.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + char_filter: ["my_char_filter"], + filter: ["lowercase"], + }, + }, + char_filter: { + my_char_filter: { + type: "pattern_replace", + pattern: "(?<=\\p{Lower})(?=\\p{Upper})", + replacement: " ", + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "The fooBarBaz method", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc b/docs/doc_examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc new file mode 100644 index 000000000..036309628 --- /dev/null +++ b/docs/doc_examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + aggs: { + price_ranges: { + range: { + field: "price", + ranges: [ + { + to: 10, + }, + { + from: 10, + to: 100, + }, + { + from: 100, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc b/docs/doc_examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc new file mode 100644 index 000000000..de2db6d50 --- /dev/null +++ b/docs/doc_examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.profiling.templates.enabled": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ebb6b59fbc9325c17e45f524602d6be2.asciidoc b/docs/doc_examples/a0c868282c0514a342ad04998cdc2175.asciidoc similarity index 58% rename from docs/doc_examples/ebb6b59fbc9325c17e45f524602d6be2.asciidoc rename to docs/doc_examples/a0c868282c0514a342ad04998cdc2175.asciidoc index 061466a85..500a4aa86 100644 --- a/docs/doc_examples/ebb6b59fbc9325c17e45f524602d6be2.asciidoc +++ b/docs/doc_examples/a0c868282c0514a342ad04998cdc2175.asciidoc @@ -4,15 +4,11 @@ [source, js] ---- const response = await client.deleteByQuery({ - index: 'twitter', - body: { - query: { - match: { - message: 'some message' - } - } - } -}) -console.log(response) + index: "my-index-000001", + conflicts: "proceed", + query: { + match_all: {}, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc b/docs/doc_examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc new file mode 100644 index 000000000..d8c564f74 --- /dev/null +++ b/docs/doc_examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + runtime_mappings: { + "price.discounted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['product'].value == 'mad max') {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + price: { + histogram: { + interval: 5, + field: "price.discounted", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0f4e902d18460337684d74ea932fbe9.asciidoc b/docs/doc_examples/a0f4e902d18460337684d74ea932fbe9.asciidoc new file mode 100644 index 000000000..8e8910255 --- /dev/null +++ b/docs/doc_examples/a0f4e902d18460337684d74ea932fbe9.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + doc: { + name: "new_name", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1070cf2f5969d42d71cda057223f152.asciidoc b/docs/doc_examples/a1070cf2f5969d42d71cda057223f152.asciidoc new file mode 100644 index 000000000..945dcb537 --- /dev/null +++ b/docs/doc_examples/a1070cf2f5969d42d71cda057223f152.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a116949e446f34dc25ae57d4b703d0c1.asciidoc b/docs/doc_examples/a116949e446f34dc25ae57d4b703d0c1.asciidoc deleted file mode 100644 index ddb66dfb9..000000000 --- a/docs/doc_examples/a116949e446f34dc25ae57d4b703d0c1.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - range: { - age: { - gte: 10, - lte: 20, - boost: 2 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc b/docs/doc_examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc new file mode 100644 index 000000000..619608a67 --- /dev/null +++ b/docs/doc_examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + query: { + term: { + tags: "car", + }, + }, + aggs: { + by_sale: { + nested: { + path: "comments", + }, + aggs: { + by_user: { + terms: { + field: "comments.username", + size: 1, + }, + aggs: { + by_nested: { + top_hits: {}, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1490f71d705053951870fd2d3bceb39.asciidoc b/docs/doc_examples/a1490f71d705053951870fd2d3bceb39.asciidoc new file mode 100644 index 000000000..19c475da7 --- /dev/null +++ b/docs/doc_examples/a1490f71d705053951870fd2d3bceb39.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + session_data: { + type: "object", + enabled: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "session_1", + document: { + session_data: "foo bar", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc b/docs/doc_examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc new file mode 100644 index 000000000..2f4c0b3b7 --- /dev/null +++ b/docs/doc_examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "predicate_token_filter", + script: { + source: "\n token.term.length() > 3\n ", + }, + }, + ], + text: "the fox jumps the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc b/docs/doc_examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc new file mode 100644 index 000000000..668b41dfa --- /dev/null +++ b/docs/doc_examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle(); +console.log(response); +---- diff --git a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc new file mode 100644 index 000000000..82a81bced --- /dev/null +++ b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.renderQuery({ + name: "my_search_application", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc b/docs/doc_examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc new file mode 100644 index 000000000..d2545c390 --- /dev/null +++ b/docs/doc_examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-snapshots", + schedule: "0 30 2 * * ?", + name: "", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + feature_states: ["kibana", "security"], + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1879930c1dac36a57d7f094a680420b.asciidoc b/docs/doc_examples/a1879930c1dac36a57d7f094a680420b.asciidoc new file mode 100644 index 000000000..f7f63fea4 --- /dev/null +++ b/docs/doc_examples/a1879930c1dac36a57d7f094a680420b.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "u17", + bottom_right: "u17", + }, + }, + }, + aggregations: { + zoom1: { + geohash_grid: { + field: "location", + precision: 8, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a197076e0e74951ea88f20309ec257e2.asciidoc b/docs/doc_examples/a197076e0e74951ea88f20309ec257e2.asciidoc new file mode 100644 index 000000000..9b7a0457a --- /dev/null +++ b/docs/doc_examples/a197076e0e74951ea88f20309ec257e2.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "palindrome_list", + settings: { + analysis: { + analyzer: { + whitespace_reverse_first_token: { + tokenizer: "whitespace", + filter: ["reverse_first_token"], + }, + }, + filter: { + reverse_first_token: { + type: "condition", + filter: ["reverse"], + script: { + source: "token.getPosition() === 0", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1acf454bd6477183ce27ace872deb46.asciidoc b/docs/doc_examples/a1acf454bd6477183ce27ace872deb46.asciidoc new file mode 100644 index 000000000..7bae40744 --- /dev/null +++ b/docs/doc_examples/a1acf454bd6477183ce27ace872deb46.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role7", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["a.*"], + except: ["a.b*"], + }, + }, + ], +}); +console.log(response); + +const response1 = await client.security.putRole({ + name: "test_role8", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["a.b*"], + except: ["a.b.c*"], + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc b/docs/doc_examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc new file mode 100644 index 000000000..4d6375dbb --- /dev/null +++ b/docs/doc_examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1d0603b24a5b048f0959975d8057534.asciidoc b/docs/doc_examples/a1d0603b24a5b048f0959975d8057534.asciidoc new file mode 100644 index 000000000..0d951cbb3 --- /dev/null +++ b/docs/doc_examples/a1d0603b24a5b048f0959975d8057534.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + doc: { + fullname: "John Doe", + text: "test test test", + }, + fields: ["fullname"], + per_field_analyzer: { + fullname: "keyword", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1db5c822745fe167e9ef854dca3d129.asciidoc b/docs/doc_examples/a1db5c822745fe167e9ef854dca3d129.asciidoc deleted file mode 100644 index f15cf7bf7..000000000 --- a/docs/doc_examples/a1db5c822745fe167e9ef854dca3d129.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': 'drm3btev3e86', - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc b/docs/doc_examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc new file mode 100644 index 000000000..47f0b8b89 --- /dev/null +++ b/docs/doc_examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc b/docs/doc_examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc new file mode 100644 index 000000000..1442efa7b --- /dev/null +++ b/docs/doc_examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "products", + mappings: { + properties: { + resellers: { + type: "nested", + properties: { + reseller: { + type: "keyword", + }, + price: { + type: "double", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc b/docs/doc_examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc new file mode 100644 index 000000000..3ffa26e73 --- /dev/null +++ b/docs/doc_examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "thai", + text: "การที่ได้ต้องแสดงว่างานดี", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1f70bc71b763b58206814c40a7440e7.asciidoc b/docs/doc_examples/a1f70bc71b763b58206814c40a7440e7.asciidoc new file mode 100644 index 000000000..e8b0d8042 --- /dev/null +++ b/docs/doc_examples/a1f70bc71b763b58206814c40a7440e7.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_watcher/settings", + body: { + "index.auto_expand_replicas": "0-4", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc b/docs/doc_examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc new file mode 100644 index 000000000..51e111829 --- /dev/null +++ b/docs/doc_examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["decimal_digit"], + text: "१-one two-२ ३", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc b/docs/doc_examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc new file mode 100644 index 000000000..3d5c6a62d --- /dev/null +++ b/docs/doc_examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc b/docs/doc_examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc new file mode 100644 index 000000000..7ef15ed42 --- /dev/null +++ b/docs/doc_examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + sort: "@timestamp:desc", + size: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc b/docs/doc_examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc new file mode 100644 index 000000000..9a5a812a7 --- /dev/null +++ b/docs/doc_examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "czech_example", + settings: { + analysis: { + filter: { + czech_stop: { + type: "stop", + stopwords: "_czech_", + }, + czech_keywords: { + type: "keyword_marker", + keywords: ["příklad"], + }, + czech_stemmer: { + type: "stemmer", + language: "czech", + }, + }, + analyzer: { + rebuilt_czech: { + tokenizer: "standard", + filter: [ + "lowercase", + "czech_stop", + "czech_keywords", + "czech_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2a25aad1fea9a541b52ac613c78fb64.asciidoc b/docs/doc_examples/a2a25aad1fea9a541b52ac613c78fb64.asciidoc deleted file mode 100644 index af5b9dea4..000000000 --- a/docs/doc_examples/a2a25aad1fea9a541b52ac613c78fb64.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'content', - 'name^5' - ], - query: 'this AND that OR thus', - tie_breaker: 0 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc b/docs/doc_examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc new file mode 100644 index 000000000..721f3efb9 --- /dev/null +++ b/docs/doc_examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index2", + mappings: { + properties: { + comment: { + type: "text", + analyzer: "standard", + term_vector: "with_positions_offsets", + fields: { + english: { + type: "text", + analyzer: "english", + term_vector: "with_positions_offsets", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/073539a7e38be3cdf13008330b6a536a.asciidoc b/docs/doc_examples/a2b2ce031120dac49b5120b26eea8758.asciidoc similarity index 71% rename from docs/doc_examples/073539a7e38be3cdf13008330b6a536a.asciidoc rename to docs/doc_examples/a2b2ce031120dac49b5120b26eea8758.asciidoc index ed4d72b64..ce99834e7 100644 --- a/docs/doc_examples/073539a7e38be3cdf13008330b6a536a.asciidoc +++ b/docs/doc_examples/a2b2ce031120dac49b5120b26eea8758.asciidoc @@ -4,10 +4,9 @@ [source, js] ---- const response = await client.cat.indices({ - index: 'twi*', - v: true, - s: 'index' -}) -console.log(response) + index: "my-index-*", + v: "true", + s: "index", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc b/docs/doc_examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc new file mode 100644 index 000000000..c88eacc13 --- /dev/null +++ b/docs/doc_examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "0 0 * * * *", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "60m", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc b/docs/doc_examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc new file mode 100644 index 000000000..1432ddcbb --- /dev/null +++ b/docs/doc_examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + content_embedding: { + type: "sparse_vector", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc b/docs/doc_examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc new file mode 100644 index 000000000..98b41d8fb --- /dev/null +++ b/docs/doc_examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.upgradeTransforms(); +console.log(response); +---- diff --git a/docs/doc_examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc b/docs/doc_examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc new file mode 100644 index 000000000..5aa8a5c4d --- /dev/null +++ b/docs/doc_examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "trips", + fields: "route_*,transit_mode", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc b/docs/doc_examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc new file mode 100644 index 000000000..58862e58f --- /dev/null +++ b/docs/doc_examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-search", + indices: [ + { + names: ["target-indices"], + privileges: ["read", "read_cross_cluster"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a325f31e94fb1e8739258910593504a8.asciidoc b/docs/doc_examples/a325f31e94fb1e8739258910593504a8.asciidoc new file mode 100644 index 000000000..47b557307 --- /dev/null +++ b/docs/doc_examples/a325f31e94fb1e8739258910593504a8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "facilitator-role", + cluster: ["manage_oidc", "manage_token"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3464bd6f0a61623562162859566b078.asciidoc b/docs/doc_examples/a3464bd6f0a61623562162859566b078.asciidoc new file mode 100644 index 000000000..088a60324 --- /dev/null +++ b/docs/doc_examples/a3464bd6f0a61623562162859566b078.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "kibana_sample_data_ecommerce2", + wait_for_active_shards: 1, + remote_cluster: "clusterA", + leader_index: "kibana_sample_data_ecommerce", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a34d70d7022eb4ba48909d440c80390f.asciidoc b/docs/doc_examples/a34d70d7022eb4ba48909d440c80390f.asciidoc index 6f4fc8434..6202751e6 100644 --- a/docs/doc_examples/a34d70d7022eb4ba48909d440c80390f.asciidoc +++ b/docs/doc_examples/a34d70d7022eb4ba48909d440c80390f.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.search({ - index: '%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E', - body: { - query: { - match: { - test: 'data' - } - } - } -}) -console.log(response) + index: ",,", + query: { + match: { + test: "data", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc b/docs/doc_examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc new file mode 100644 index 000000000..fff9d8917 --- /dev/null +++ b/docs/doc_examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc b/docs/doc_examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc new file mode 100644 index 000000000..f3456d0d4 --- /dev/null +++ b/docs/doc_examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.health(); +console.log(response); +---- diff --git a/docs/doc_examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc b/docs/doc_examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc new file mode 100644 index 000000000..c0eaf8801 --- /dev/null +++ b/docs/doc_examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["trim"], + text: " fox ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc b/docs/doc_examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc new file mode 100644 index 000000000..31dec62e6 --- /dev/null +++ b/docs/doc_examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + role_descriptors: {}, + metadata: { + application: "myapp", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc b/docs/doc_examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc new file mode 100644 index 000000000..932cd80ac --- /dev/null +++ b/docs/doc_examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + verbose: "true", + pipeline: { + description: "_description", + processors: [ + { + set: { + field: "field2", + value: "_value2", + }, + }, + { + set: { + field: "field3", + value: "_value3", + }, + }, + ], + }, + docs: [ + { + _index: "index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3c8f474b0700711a356682f37e62b39.asciidoc b/docs/doc_examples/a3c8f474b0700711a356682f37e62b39.asciidoc new file mode 100644 index 000000000..b99d50485 --- /dev/null +++ b/docs/doc_examples/a3c8f474b0700711a356682f37e62b39.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "azure-ai-studio-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1536, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc b/docs/doc_examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc new file mode 100644 index 000000000..82ca0c679 --- /dev/null +++ b/docs/doc_examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + group_by: "none", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3cfd350c73a104b99a998c6be931408.asciidoc b/docs/doc_examples/a3cfd350c73a104b99a998c6be931408.asciidoc new file mode 100644 index 000000000..5c3d07b1c --- /dev/null +++ b/docs/doc_examples/a3cfd350c73a104b99a998c6be931408.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "blocks", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc b/docs/doc_examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc new file mode 100644 index 000000000..43d3aadd4 --- /dev/null +++ b/docs/doc_examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 0, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + to: "10.0.0.5", + }, + { + from: "10.0.0.5", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc b/docs/doc_examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc new file mode 100644 index 000000000..2a294fe3f --- /dev/null +++ b/docs/doc_examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.importDanglingIndex({ + index_uuid: "", + accept_data_loss: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2468ab381257d759d8a88af1141f6f9c.asciidoc b/docs/doc_examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc similarity index 74% rename from docs/doc_examples/2468ab381257d759d8a88af1141f6f9c.asciidoc rename to docs/doc_examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc index 61aef030b..869dc012e 100644 --- a/docs/doc_examples/2468ab381257d759d8a88af1141f6f9c.asciidoc +++ b/docs/doc_examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.existsSource({ - index: 'twitter', - id: '1' -}) -console.log(response) + index: "my-index-000001", + id: 1, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc b/docs/doc_examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc new file mode 100644 index 000000000..457176790 --- /dev/null +++ b/docs/doc_examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + significant_terms: { + field: "tags", + execution_hint: "map", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cfc37446bd892d1ac42a3c8e8b204e6c.asciidoc b/docs/doc_examples/a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc similarity index 77% rename from docs/doc_examples/cfc37446bd892d1ac42a3c8e8b204e6c.asciidoc rename to docs/doc_examples/a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc index b7d494527..3d954b8f4 100644 --- a/docs/doc_examples/cfc37446bd892d1ac42a3c8e8b204e6c.asciidoc +++ b/docs/doc_examples/a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.get({ - index: 'test2', - id: '1' -}) -console.log(response) + index: "users", + id: 0, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc b/docs/doc_examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc new file mode 100644 index 000000000..07806845c --- /dev/null +++ b/docs/doc_examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "networks-policy", + range: { + indices: "networks", + match_field: "range", + enrich_fields: ["name", "department"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3f66deb467df86edbf66e1dca31da51.asciidoc b/docs/doc_examples/a3f66deb467df86edbf66e1dca31da51.asciidoc new file mode 100644 index 000000000..56afe492c --- /dev/null +++ b/docs/doc_examples/a3f66deb467df86edbf66e1dca31da51.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + _source: "suggest", + suggest: { + "song-suggest": { + prefix: "nir", + completion: { + field: "suggest", + size: 5, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a412fe22a74900c72434391ed75139dc.asciidoc b/docs/doc_examples/a412fe22a74900c72434391ed75139dc.asciidoc new file mode 100644 index 000000000..868cabb01 --- /dev/null +++ b/docs/doc_examples/a412fe22a74900c72434391ed75139dc.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + aggregations: { + zoom1: { + geohex_grid: { + field: "location", + precision: 12, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a425fcab60f603504becee7d001f0a4b.asciidoc b/docs/doc_examples/a425fcab60f603504becee7d001f0a4b.asciidoc new file mode 100644 index 000000000..c2c279354 --- /dev/null +++ b/docs/doc_examples/a425fcab60f603504becee7d001f0a4b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "index_4", + settings: { + "index.priority": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a428d518162918733d49261ffd65cfc1.asciidoc b/docs/doc_examples/a428d518162918733d49261ffd65cfc1.asciidoc new file mode 100644 index 000000000..8d45447a2 --- /dev/null +++ b/docs/doc_examples/a428d518162918733d49261ffd65cfc1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_unique_example", + settings: { + analysis: { + analyzer: { + standard_truncate: { + tokenizer: "standard", + filter: ["unique"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a42f33e15b0995bb4b6058659bfdea85.asciidoc b/docs/doc_examples/a42f33e15b0995bb4b6058659bfdea85.asciidoc deleted file mode 100644 index 34bd3c81a..000000000 --- a/docs/doc_examples/a42f33e15b0995bb4b6058659bfdea85.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match_all: {} - }, - boost: '5', - random_score: {}, - boost_mode: 'multiply' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a43954d055f042d625a905513821f5f0.asciidoc b/docs/doc_examples/a43954d055f042d625a905513821f5f0.asciidoc new file mode 100644 index 000000000..0ecb3df45 --- /dev/null +++ b/docs/doc_examples/a43954d055f042d625a905513821f5f0.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + knn_field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + num_candidates: 100, + fields: ["title", "file-type"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45244aa3adbf3c793fede100786d1f5.asciidoc b/docs/doc_examples/a45244aa3adbf3c793fede100786d1f5.asciidoc new file mode 100644 index 000000000..7d7c8a339 --- /dev/null +++ b/docs/doc_examples/a45244aa3adbf3c793fede100786d1f5.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + auto_date_histogram: { + field: "date", + buckets: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45605347d6438e7aecdf3b37198616d.asciidoc b/docs/doc_examples/a45605347d6438e7aecdf3b37198616d.asciidoc new file mode 100644 index 000000000..802f482f4 --- /dev/null +++ b/docs/doc_examples/a45605347d6438e7aecdf3b37198616d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.moveToStep({ + index: "my-index-000001", + current_step: { + phase: "new", + action: "complete", + name: "complete", + }, + next_step: { + phase: "warm", + action: "forcemerge", + name: "forcemerge", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45810722dc4f468f81b1e8a451d21be.asciidoc b/docs/doc_examples/a45810722dc4f468f81b1e8a451d21be.asciidoc new file mode 100644 index 000000000..faefa6933 --- /dev/null +++ b/docs/doc_examples/a45810722dc4f468f81b1e8a451d21be.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.http.HttpTracer": "TRACE", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc b/docs/doc_examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc new file mode 100644 index 000000000..4ef252dfd --- /dev/null +++ b/docs/doc_examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "multipoint", + coordinates: [ + [1002, 1002], + [1003, 2000], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc b/docs/doc_examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc new file mode 100644 index 000000000..61cfb9a95 --- /dev/null +++ b/docs/doc_examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_error_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a49169b4622918992411fab4ec48191b.asciidoc b/docs/doc_examples/a49169b4622918992411fab4ec48191b.asciidoc deleted file mode 100644 index d47b04c96..000000000 --- a/docs/doc_examples/a49169b4622918992411fab4ec48191b.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - script: { - source: "'Genre: ' +_value", - lang: 'painless' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc b/docs/doc_examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc new file mode 100644 index 000000000..cda76bd30 --- /dev/null +++ b/docs/doc_examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["word_delimiter_graph"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc b/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc new file mode 100644 index 000000000..a340791bd --- /dev/null +++ b/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_inference/sparse_embedding/my-elser-model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc b/docs/doc_examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc new file mode 100644 index 000000000..3c2b0c7bf --- /dev/null +++ b/docs/doc_examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + price_range: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + designation: "spoon", + price: 13, + price_range: "10-100", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc b/docs/doc_examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc new file mode 100644 index 000000000..c58479347 --- /dev/null +++ b/docs/doc_examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + min_monthly_sales: { + min_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc b/docs/doc_examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc new file mode 100644 index 000000000..b7eab397b --- /dev/null +++ b/docs/doc_examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc @@ -0,0 +1,62 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "ecommerce-customer-transform", + source: { + index: ["kibana_sample_data_ecommerce"], + query: { + bool: { + filter: { + term: { + currency: "EUR", + }, + }, + }, + }, + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + "total_quantity.sum": { + sum: { + field: "total_quantity", + }, + }, + "taxless_total_price.sum": { + sum: { + field: "taxless_total_price", + }, + }, + "total_quantity.max": { + max: { + field: "total_quantity", + }, + }, + "order_id.cardinality": { + cardinality: { + field: "order_id", + }, + }, + }, + }, + dest: { + index: "ecommerce-customers", + }, + retention_policy: { + time: { + field: "order_date", + max_age: "60d", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc b/docs/doc_examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc new file mode 100644 index 000000000..e51f6e91e --- /dev/null +++ b/docs/doc_examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["stemmer"], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc b/docs/doc_examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc new file mode 100644 index 000000000..2f5f16ec0 --- /dev/null +++ b/docs/doc_examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + range: { + price: { + to: "500", + }, + }, + }, + }, + }, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + extended_bounds: { + min: 0, + max: 500, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc b/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc new file mode 100644 index 000000000..7179d8152 --- /dev/null +++ b/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/hugging_face_embeddings", + body: { + service: "hugging_face", + service_settings: { + api_key: "", + url: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc b/docs/doc_examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc new file mode 100644 index 000000000..bcfc419b0 --- /dev/null +++ b/docs/doc_examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "music", + id: 1, + refresh: "true", + document: { + suggest: { + input: ["Nevermind", "Nirvana"], + weight: 34, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a512e4dd8880ce0395937db1bab1d205.asciidoc b/docs/doc_examples/a512e4dd8880ce0395937db1bab1d205.asciidoc new file mode 100644 index 000000000..40db80a05 --- /dev/null +++ b/docs/doc_examples/a512e4dd8880ce0395937db1bab1d205.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "edge_ngram", + text: "Quick Fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a520168c1c8b454a8f102d6a13027c73.asciidoc b/docs/doc_examples/a520168c1c8b454a8f102d6a13027c73.asciidoc new file mode 100644 index 000000000..b99501640 --- /dev/null +++ b/docs/doc_examples/a520168c1c8b454a8f102d6a13027c73.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followInfo({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5217a93efabceee9be19949e484f930.asciidoc b/docs/doc_examples/a5217a93efabceee9be19949e484f930.asciidoc new file mode 100644 index 000000000..8566229cb --- /dev/null +++ b/docs/doc_examples/a5217a93efabceee9be19949e484f930.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "music", + id: 1, + refresh: "true", + document: { + suggest: [ + { + input: "Nevermind", + weight: 10, + }, + { + input: "Nirvana", + weight: 3, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a53ff77d83222c0e76453e630d64787e.asciidoc b/docs/doc_examples/a53ff77d83222c0e76453e630d64787e.asciidoc new file mode 100644 index 000000000..2113b36ee --- /dev/null +++ b/docs/doc_examples/a53ff77d83222c0e76453e630d64787e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "openai_embeddings", + processors: [ + { + inference: { + model_id: "openai_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a547bb926c25f670078b98fbe67de3cc.asciidoc b/docs/doc_examples/a547bb926c25f670078b98fbe67de3cc.asciidoc new file mode 100644 index 000000000..0a9165a6e --- /dev/null +++ b/docs/doc_examples/a547bb926c25f670078b98fbe67de3cc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.deleteSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a56c20a733a350673d41829c8daaafbe.asciidoc b/docs/doc_examples/a56c20a733a350673d41829c8daaafbe.asciidoc new file mode 100644 index 000000000..c577e3576 --- /dev/null +++ b/docs/doc_examples/a56c20a733a350673d41829c8daaafbe.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["data_hot"], + deciders: { + fixed: { + storage: "1tb", + memory: "32gb", + processors: 2.3, + nodes: 8, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc b/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc new file mode 100644 index 000000000..7ab2d290f --- /dev/null +++ b/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/azure_openai_embeddings", + body: { + service: "azureopenai", + service_settings: { + api_key: "", + resource_name: "", + deployment_id: "", + api_version: "2024-02-01", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc b/docs/doc_examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc new file mode 100644 index 000000000..8a9680f82 --- /dev/null +++ b/docs/doc_examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + unfollow: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc b/docs/doc_examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc new file mode 100644 index 000000000..74eaa913d --- /dev/null +++ b/docs/doc_examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "dictionary_decompound_example", + settings: { + analysis: { + analyzer: { + standard_dictionary_decompound: { + tokenizer: "standard", + filter: ["22_char_dictionary_decompound"], + }, + }, + filter: { + "22_char_dictionary_decompound": { + type: "dictionary_decompounder", + word_list_path: "analysis/example_word_list.txt", + max_subword_size: 22, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc b/docs/doc_examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc new file mode 100644 index 000000000..5ed800291 --- /dev/null +++ b/docs/doc_examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["my-example-app"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {\n "query_string": {\n "query": "{{query}}",\n "search_fields": {{#toJson}}search_fields{{/toJson}}\n }\n }\n {{/query}}\n ]\n }\n }\n }\n ', + params: { + query: "", + search_fields: "", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc b/docs/doc_examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc new file mode 100644 index 000000000..c2645790d --- /dev/null +++ b/docs/doc_examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlPrepareAuthentication({ + realm: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e2b3588258430f2e595abda98e3943.asciidoc b/docs/doc_examples/a5e2b3588258430f2e595abda98e3943.asciidoc new file mode 100644 index 000000000..5dc12ff3d --- /dev/null +++ b/docs/doc_examples/a5e2b3588258430f2e595abda98e3943.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "default_file", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc b/docs/doc_examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc new file mode 100644 index 000000000..312d22490 --- /dev/null +++ b/docs/doc_examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + documents: [ + { + message: "Japanse art", + }, + { + message: "Holand culture", + }, + { + message: "Japanese art and Holand culture", + }, + { + message: "no-match", + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e6ccfb6019238e6db602373b9af147.asciidoc b/docs/doc_examples/a5e6ccfb6019238e6db602373b9af147.asciidoc new file mode 100644 index 000000000..8ef447ce8 --- /dev/null +++ b/docs/doc_examples/a5e6ccfb6019238e6db602373b9af147.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e793d82a4455cf4105dac82a156617.asciidoc b/docs/doc_examples/a5e793d82a4455cf4105dac82a156617.asciidoc new file mode 100644 index 000000000..5ffed26d1 --- /dev/null +++ b/docs/doc_examples/a5e793d82a4455cf4105dac82a156617.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + rewrite: "true", + query: { + more_like_this: { + like: { + _id: "2", + }, + boost_terms: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc b/docs/doc_examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc new file mode 100644 index 000000000..634516cec --- /dev/null +++ b/docs/doc_examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + keyed: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5f9eb40087921e67d820775acf71522.asciidoc b/docs/doc_examples/a5f9eb40087921e67d820775acf71522.asciidoc new file mode 100644 index 000000000..041de0d6c --- /dev/null +++ b/docs/doc_examples/a5f9eb40087921e67d820775acf71522.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + city: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6169bc057ce8654bd306ff4b062081b.asciidoc b/docs/doc_examples/a6169bc057ce8654bd306ff4b062081b.asciidoc new file mode 100644 index 000000000..da8f00888 --- /dev/null +++ b/docs/doc_examples/a6169bc057ce8654bd306ff4b062081b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + prefix: "nor", + completion: { + field: "suggest", + skip_duplicates: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc b/docs/doc_examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc new file mode 100644 index 000000000..5fafb3e42 --- /dev/null +++ b/docs/doc_examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.openJob({ + job_id: "low_request_rate", + timeout: "35m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a62833baf15f2c9ac094a9289e56a012.asciidoc b/docs/doc_examples/a62833baf15f2c9ac094a9289e56a012.asciidoc new file mode 100644 index 000000000..38c0b3ec4 --- /dev/null +++ b/docs/doc_examples/a62833baf15f2c9ac094a9289e56a012.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "timeseries", + document: { + message: "logged the request", + "@timestamp": "1591890611", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc b/docs/doc_examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc new file mode 100644 index 000000000..be5a9a106 --- /dev/null +++ b/docs/doc_examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc @@ -0,0 +1,82 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + network: { + properties: { + name: { + type: "keyword", + }, + }, + }, + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [1, 3, 8, 12, 15], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [1, 6, 8, 12, 14], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + latency_ranges: { + range: { + field: "latency_histo", + ranges: [ + { + to: 2, + }, + { + from: 2, + to: 3, + }, + { + from: 3, + to: 10, + }, + { + from: 10, + }, + ], + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc b/docs/doc_examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc new file mode 100644 index 000000000..2b4f275e8 --- /dev/null +++ b/docs/doc_examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FjlmbndxNmJjU0RPdExBTGg0elNOOEEaQk9xSjJBQzBRMldZa1VVQ2pPa01YUToxMDY=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a692b4c0ca7825c467880b346841f5a5.asciidoc b/docs/doc_examples/a692b4c0ca7825c467880b346841f5a5.asciidoc new file mode 100644 index 000000000..04148381f --- /dev/null +++ b/docs/doc_examples/a692b4c0ca7825c467880b346841f5a5.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + name: { + properties: { + first: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a699189c8d1a7573beeaea768f2fc618.asciidoc b/docs/doc_examples/a699189c8d1a7573beeaea768f2fc618.asciidoc new file mode 100644 index 000000000..c1836d6f8 --- /dev/null +++ b/docs/doc_examples/a699189c8d1a7573beeaea768f2fc618.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot-20200617", + indices: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + include_aliases: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0d64d0a554549e5b2808002a0725493.asciidoc b/docs/doc_examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc similarity index 60% rename from docs/doc_examples/b0d64d0a554549e5b2808002a0725493.asciidoc rename to docs/doc_examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc index 76de92f22..44f85408c 100644 --- a/docs/doc_examples/b0d64d0a554549e5b2808002a0725493.asciidoc +++ b/docs/doc_examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc @@ -4,10 +4,7 @@ [source, js] ---- const response = await client.clearScroll({ - body: { - scroll_id: 'DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==' - } -}) -console.log(response) + scroll_id: "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc b/docs/doc_examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc new file mode 100644 index 000000000..3781991c0 --- /dev/null +++ b/docs/doc_examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "my_embeddings.predicted_value": { + type: "dense_vector", + dims: 384, + }, + my_text_field: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be8f28f31207b173de61be032fcf239c.asciidoc b/docs/doc_examples/a6bb306ca250cf651f19cae808b97012.asciidoc similarity index 77% rename from docs/doc_examples/be8f28f31207b173de61be032fcf239c.asciidoc rename to docs/doc_examples/a6bb306ca250cf651f19cae808b97012.asciidoc index ba09bad7d..ab06f18be 100644 --- a/docs/doc_examples/be8f28f31207b173de61be032fcf239c.asciidoc +++ b/docs/doc_examples/a6bb306ca250cf651f19cae808b97012.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.get({ - index: 'twitter' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc b/docs/doc_examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc new file mode 100644 index 000000000..c088b4ada --- /dev/null +++ b/docs/doc_examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.existsTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc b/docs/doc_examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc new file mode 100644 index 000000000..830812cdf --- /dev/null +++ b/docs/doc_examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "dsl-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc b/docs/doc_examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc new file mode 100644 index 000000000..5be7e17bc --- /dev/null +++ b/docs/doc_examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + missing: "1976/11/30", + ranges: [ + { + key: "Older", + to: "2016/02/01", + }, + { + key: "Newer", + from: "2016/02/01", + to: "now/d", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/210cf5c76bff517f48e80fa1c2d63907.asciidoc b/docs/doc_examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc similarity index 78% rename from docs/doc_examples/210cf5c76bff517f48e80fa1c2d63907.asciidoc rename to docs/doc_examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc index 54576e75d..c37e742e2 100644 --- a/docs/doc_examples/210cf5c76bff517f48e80fa1c2d63907.asciidoc +++ b/docs/doc_examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.getMapping({ - index: 'my_index' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc b/docs/doc_examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc new file mode 100644 index 000000000..176ffa5d0 --- /dev/null +++ b/docs/doc_examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlInvalidate({ + query: + "SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D", + realm: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a71c438cc4df1cafe3109ccff475afdb.asciidoc b/docs/doc_examples/a71c438cc4df1cafe3109ccff475afdb.asciidoc deleted file mode 100644 index bf8fb9c2c..000000000 --- a/docs/doc_examples/a71c438cc4df1cafe3109ccff475afdb.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - number_of_bytes: { - type: 'integer' - }, - time_in_seconds: { - type: 'float' - }, - price: { - type: 'scaled_float', - scaling_factor: 100 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a72613de3774571ba24def4b495161b5.asciidoc b/docs/doc_examples/a72613de3774571ba24def4b495161b5.asciidoc new file mode 100644 index 000000000..453cf9649 --- /dev/null +++ b/docs/doc_examples/a72613de3774571ba24def4b495161b5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + user_id: { + type: "alias", + path: "user_identifier", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a735081e715d385b4d471eea0f2b57da.asciidoc b/docs/doc_examples/a735081e715d385b4d471eea0f2b57da.asciidoc new file mode 100644 index 000000000..7f1c054ed --- /dev/null +++ b/docs/doc_examples/a735081e715d385b4d471eea0f2b57da.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "slm.retention_schedule": "0 30 1 * * ?", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc b/docs/doc_examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc new file mode 100644 index 000000000..7ffc8de28 --- /dev/null +++ b/docs/doc_examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTILINESTRING ((1002.0 200.0, 1003.0 200.0, 1003.0 300.0, 1002.0 300.0), (1000.0 100.0, 1001.0 100.0, 1001.0 100.0, 1000.0 100.0), (1000.2 0.2, 1000.8 100.2, 1000.8 100.8, 1000.2 100.8))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc b/docs/doc_examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc new file mode 100644 index 000000000..7faea78a5 --- /dev/null +++ b/docs/doc_examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job-id/_claim", + body: { + worker_hostname: "some-machine", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc b/docs/doc_examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc new file mode 100644 index 000000000..998774ae8 --- /dev/null +++ b/docs/doc_examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: "ctx._source['my-object'].remove('my-subfield')", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a799477dff04578b200788a63f9cff71.asciidoc b/docs/doc_examples/a799477dff04578b200788a63f9cff71.asciidoc new file mode 100644 index 000000000..23f5982b1 --- /dev/null +++ b/docs/doc_examples/a799477dff04578b200788a63f9cff71.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 0, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + key: "infinity", + to: "10.0.0.5", + }, + { + key: "and-beyond", + from: "10.0.0.5", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc b/docs/doc_examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc new file mode 100644 index 000000000..6fbda91b6 --- /dev/null +++ b/docs/doc_examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "pipeline-one", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc b/docs/doc_examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc new file mode 100644 index 000000000..b7b0aec7a --- /dev/null +++ b/docs/doc_examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + age: { + type: "integer", + }, + email: { + type: "keyword", + }, + name: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc b/docs/doc_examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc new file mode 100644 index 000000000..7a38525fe --- /dev/null +++ b/docs/doc_examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n process where process.name == "regsvr32.exe" and process.command_line.keyword != null\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/a8019280dab5b04211ae3b21e5e08223.asciidoc b/docs/doc_examples/a8019280dab5b04211ae3b21e5e08223.asciidoc new file mode 100644 index 000000000..8c7d9b215 --- /dev/null +++ b/docs/doc_examples/a8019280dab5b04211ae3b21e5e08223.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "My_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a80f5db4357bb25b8704d374c18318ed.asciidoc b/docs/doc_examples/a80f5db4357bb25b8704d374c18318ed.asciidoc deleted file mode 100644 index 01c0eda5a..000000000 --- a/docs/doc_examples/a80f5db4357bb25b8704d374c18318ed.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - pretty: true, - body: { - query: { - match: { - full_text: 'Quick Brown Foxes!' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a810da963d3b28d79dcd17be829bb271.asciidoc b/docs/doc_examples/a810da963d3b28d79dcd17be829bb271.asciidoc new file mode 100644 index 000000000..d242bfba4 --- /dev/null +++ b/docs/doc_examples/a810da963d3b28d79dcd17be829bb271.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + docvalue_fields: [ + "user.id", + "http.response.*", + { + field: "date", + format: "epoch_millis", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a811b82ba4632bdd9065829085188bc9.asciidoc b/docs/doc_examples/a811b82ba4632bdd9065829085188bc9.asciidoc new file mode 100644 index 000000000..175e7cf13 --- /dev/null +++ b/docs/doc_examples/a811b82ba4632bdd9065829085188bc9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc b/docs/doc_examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc new file mode 100644 index 000000000..6074505da --- /dev/null +++ b/docs/doc_examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["data_hot"], + deciders: { + proactive_storage: { + forecast_window: "10m", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a861a89f52008610e813b9f073951c58.asciidoc b/docs/doc_examples/a861a89f52008610e813b9f073951c58.asciidoc new file mode 100644 index 000000000..8199b57eb --- /dev/null +++ b/docs/doc_examples/a861a89f52008610e813b9f073951c58.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "merge,refresh", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc b/docs/doc_examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc new file mode 100644 index 000000000..a7161f96f --- /dev/null +++ b/docs/doc_examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "BBOX (100.0, 102.0, 2.0, 0.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a8add749c3f41ad1308a45308df14103.asciidoc b/docs/doc_examples/a8add749c3f41ad1308a45308df14103.asciidoc new file mode 100644 index 000000000..4246c611a --- /dev/null +++ b/docs/doc_examples/a8add749c3f41ad1308a45308df14103.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "sales", + id: 1, + refresh: "true", + document: { + tags: ["car", "auto"], + comments: [ + { + username: "baddriver007", + comment: "This car could have better brakes", + }, + { + username: "dr_who", + comment: "Where's the autopilot? Can't find it", + }, + { + username: "ilovemotorbikes", + comment: "This car has two extra wheels", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9280b55a7284952f604ec7bece712f6.asciidoc b/docs/doc_examples/a9280b55a7284952f604ec7bece712f6.asciidoc new file mode 100644 index 000000000..9dde4fa2f --- /dev/null +++ b/docs/doc_examples/a9280b55a7284952f604ec7bece712f6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + range: { + voltage_corrected: { + gte: 16, + lte: 20, + boost: 1, + }, + }, + }, + fields: ["voltage_corrected", "node"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a941fd568f2e20e13df909ab24506073.asciidoc b/docs/doc_examples/a941fd568f2e20e13df909ab24506073.asciidoc new file mode 100644 index 000000000..9803d3ef9 --- /dev/null +++ b/docs/doc_examples/a941fd568f2e20e13df909ab24506073.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings(); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + "xpack.monitoring.collection.enabled": false, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc b/docs/doc_examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc new file mode 100644 index 000000000..52fedcb9b --- /dev/null +++ b/docs/doc_examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "txt", + query: + "FROM library | KEEP author, name, page_count, release_date | SORT page_count DESC | LIMIT 5", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9554396506888e392a1aee0ca28e6fc.asciidoc b/docs/doc_examples/a9554396506888e392a1aee0ca28e6fc.asciidoc new file mode 100644 index 000000000..bf07ef824 --- /dev/null +++ b/docs/doc_examples/a9554396506888e392a1aee0ca28e6fc.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "my-index-2099.05.06-000001", + alias: "my-alias", + filter: { + bool: { + filter: [ + { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + { + term: { + "user.id": "kimchy", + }, + }, + ], + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a95a123b9f862e52ab1e8f875961c852.asciidoc b/docs/doc_examples/a95a123b9f862e52ab1e8f875961c852.asciidoc new file mode 100644 index 000000000..2a6be34f4 --- /dev/null +++ b/docs/doc_examples/a95a123b9f862e52ab1e8f875961c852.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + indices_boost: [ + { + "my-index-000001": 1.4, + }, + { + "my-index-000002": 1.3, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc b/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc new file mode 100644 index 000000000..758df0a4d --- /dev/null +++ b/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc b/docs/doc_examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc new file mode 100644 index 000000000..330a2fead --- /dev/null +++ b/docs/doc_examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "csv", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc b/docs/doc_examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc new file mode 100644 index 000000000..b9768bb41 --- /dev/null +++ b/docs/doc_examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + query: { + type: "percolator", + }, + field: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc b/docs/doc_examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc new file mode 100644 index 000000000..dc93cfbe7 --- /dev/null +++ b/docs/doc_examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + fields: "rating,title", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8cc4b331a19ff4df8e4a490f906ee69.asciidoc b/docs/doc_examples/a98692a565904ec0783884d81a7b71fc.asciidoc similarity index 82% rename from docs/doc_examples/f8cc4b331a19ff4df8e4a490f906ee69.asciidoc rename to docs/doc_examples/a98692a565904ec0783884d81a7b71fc.asciidoc index 0843c75f3..d8bba8dc5 100644 --- a/docs/doc_examples/f8cc4b331a19ff4df8e4a490f906ee69.asciidoc +++ b/docs/doc_examples/a98692a565904ec0783884d81a7b71fc.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.cat.health({ - v: true -}) -console.log(response) + v: "true", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/a999b5661bebb802bbbfe04faacf1971.asciidoc b/docs/doc_examples/a999b5661bebb802bbbfe04faacf1971.asciidoc new file mode 100644 index 000000000..1102705e5 --- /dev/null +++ b/docs/doc_examples/a999b5661bebb802bbbfe04faacf1971.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-2099.10.*", + }, + dest: { + index: "my-index-2099.10", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a99bc141066ef673e35f306157750ec9.asciidoc b/docs/doc_examples/a99bc141066ef673e35f306157750ec9.asciidoc new file mode 100644 index 000000000..1d26fbdef --- /dev/null +++ b/docs/doc_examples/a99bc141066ef673e35f306157750ec9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "lowercase", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc b/docs/doc_examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc new file mode 100644 index 000000000..42c77b7ae --- /dev/null +++ b/docs/doc_examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + routing: "my-routing-value", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9c08023354aa9b9023807962df71d13.asciidoc b/docs/doc_examples/a9c08023354aa9b9023807962df71d13.asciidoc new file mode 100644 index 000000000..1a765c0b6 --- /dev/null +++ b/docs/doc_examples/a9c08023354aa9b9023807962df71d13.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc b/docs/doc_examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc new file mode 100644 index 000000000..c5129e611 --- /dev/null +++ b/docs/doc_examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "tiles-in-bounds": { + geohex_grid: { + field: "location", + precision: 12, + bounds: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc b/docs/doc_examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc new file mode 100644 index 000000000..da7520e11 --- /dev/null +++ b/docs/doc_examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "should": [\n {{#elser_fields}}\n {\n "sparse_vector": {\n "field": "ml.inference.{{.}}_expanded.predicted_value",\n "inference_id": "",\n "query": "{{query_string}}"\n }\n },\n {{/elser_fields}}\n ]\n }\n },\n "min_score": "{{min_score}}"\n }\n ', + params: { + query_string: "*", + min_score: "10", + elser_fields: [ + { + name: "title", + }, + { + name: "description", + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9dd9595e96c307b8c798beaeb571521.asciidoc b/docs/doc_examples/a9dd9595e96c307b8c798beaeb571521.asciidoc new file mode 100644 index 000000000..c7a9de6be --- /dev/null +++ b/docs/doc_examples/a9dd9595e96c307b8c798beaeb571521.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.upgradeJobSnapshot({ + job_id: "low_request_rate", + snapshot_id: 1828371, + timeout: "45m", + wait_for_completion: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc b/docs/doc_examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc new file mode 100644 index 000000000..3f53a8934 --- /dev/null +++ b/docs/doc_examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + number_of_shards: 3, + number_of_replicas: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa1771b702f4b771491ba4ab743a9197.asciidoc b/docs/doc_examples/aa1771b702f4b771491ba4ab743a9197.asciidoc new file mode 100644 index 000000000..3ef343b39 --- /dev/null +++ b/docs/doc_examples/aa1771b702f4b771491ba4ab743a9197.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.number_of_replicas", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc b/docs/doc_examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc new file mode 100644 index 000000000..7f33df8f8 --- /dev/null +++ b/docs/doc_examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["lowercase"], + text: "THE Quick FoX JUMPs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc b/docs/doc_examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc new file mode 100644 index 000000000..753eecfad --- /dev/null +++ b/docs/doc_examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "monthly-snapshots", + name: "", + schedule: "0 56 23 1 * ?", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "366d", + min_count: 1, + max_count: 12, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc b/docs/doc_examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc new file mode 100644 index 000000000..09182d892 --- /dev/null +++ b/docs/doc_examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "reverse_example", + settings: { + analysis: { + analyzer: { + whitespace_reverse: { + tokenizer: "whitespace", + filter: ["reverse"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc b/docs/doc_examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc new file mode 100644 index 000000000..ee105f278 --- /dev/null +++ b/docs/doc_examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + wait_for: "started", + timeout: "1m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa699ff3234f54d091575a38e859a627.asciidoc b/docs/doc_examples/aa699ff3234f54d091575a38e859a627.asciidoc new file mode 100644 index 000000000..f05fd0730 --- /dev/null +++ b/docs/doc_examples/aa699ff3234f54d091575a38e859a627.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + typed_keys: "true", + aggs: { + "my-agg-name": { + histogram: { + field: "my-field", + interval: 1000, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa6bfe54e2436eb668091fe31c2fbf4d.asciidoc b/docs/doc_examples/aa6bfe54e2436eb668091fe31c2fbf4d.asciidoc deleted file mode 100644 index 4f34a65e2..000000000 --- a/docs/doc_examples/aa6bfe54e2436eb668091fe31c2fbf4d.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'my_index', - id: '1', - refresh: true, - body: { - date: '2015-10-01T05:30:00Z' - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '2', - refresh: true, - body: { - date: '2015-10-01T06:30:00Z' - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'my_index', - size: '0', - body: { - aggs: { - by_day: { - date_histogram: { - field: 'date', - calendar_interval: 'day', - offset: '+6h' - } - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc b/docs/doc_examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc new file mode 100644 index 000000000..d6cff373f --- /dev/null +++ b/docs/doc_examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "daily-snapshots", + schedule: "0 30 1 * * ?", + name: "", + repository: "my_repository", + config: { + indices: ["data-*", "important"], + ignore_unavailable: false, + include_global_state: false, + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc b/docs/doc_examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc new file mode 100644 index 000000000..d9523d9b3 --- /dev/null +++ b/docs/doc_examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate({ + name: "*", + filter_path: + "index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aaa7a61b07861235fb6e489b946c705c.asciidoc b/docs/doc_examples/aaa7a61b07861235fb6e489b946c705c.asciidoc new file mode 100644 index 000000000..e391c4895 --- /dev/null +++ b/docs/doc_examples/aaa7a61b07861235fb6e489b946c705c.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + version: 2, + version_type: "external", + document: { + user: { + id: "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc b/docs/doc_examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc new file mode 100644 index 000000000..3d38f131d --- /dev/null +++ b/docs/doc_examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.cacheStats(); +console.log(response); +---- diff --git a/docs/doc_examples/aaba346e0becdf12db13658296e0b8a1.asciidoc b/docs/doc_examples/aaba346e0becdf12db13658296e0b8a1.asciidoc new file mode 100644 index 000000000..528068982 --- /dev/null +++ b/docs/doc_examples/aaba346e0becdf12db13658296e0b8a1.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.number_of_shards": 2, + "index.lifecycle.name": "shrink-index", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aac5996a8398cc8f7701a063df0b2346.asciidoc b/docs/doc_examples/aac5996a8398cc8f7701a063df0b2346.asciidoc new file mode 100644 index 000000000..f5d21c1c6 --- /dev/null +++ b/docs/doc_examples/aac5996a8398cc8f7701a063df0b2346.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "saml-finance", + roles: ["finance_data"], + enabled: true, + rules: { + all: [ + { + field: { + "realm.name": "saml1", + }, + }, + { + field: { + groups: "finance-team", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aadf36ae37460a735e06b953b4cee494.asciidoc b/docs/doc_examples/aadf36ae37460a735e06b953b4cee494.asciidoc new file mode 100644 index 000000000..3b1270415 --- /dev/null +++ b/docs/doc_examples/aadf36ae37460a735e06b953b4cee494.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + runtime_mappings: { + price_range: { + type: "keyword", + script: { + source: + "\n def bucket_start = (long) Math.floor(doc['taxful_total_price'].value / 50) * 50;\n def bucket_end = bucket_start + 50;\n emit(bucket_start.toString() + \"-\" + bucket_end.toString());\n ", + }, + }, + }, + size: 0, + aggs: { + my_agg: { + frequent_item_sets: { + minimum_set_size: 4, + fields: [ + { + field: "category.keyword", + }, + { + field: "price_range", + }, + { + field: "geoip.city_name", + }, + ], + size: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc b/docs/doc_examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc new file mode 100644 index 000000000..6cedc3adc --- /dev/null +++ b/docs/doc_examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_id: { + type: "keyword", + }, + last_updated: { + type: "date", + }, + session_data: { + type: "object", + enabled: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "session_1", + document: { + user_id: "kimchy", + session_data: { + arbitrary_object: { + some_array: [ + "foo", + "bar", + { + baz: 2, + }, + ], + }, + }, + last_updated: "2015-12-06T18:20:22", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: "session_2", + document: { + user_id: "jpountz", + session_data: "none", + last_updated: "2015-12-06T18:22:13", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc b/docs/doc_examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc new file mode 100644 index 000000000..bf78c663c --- /dev/null +++ b/docs/doc_examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: + "my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", + ccs_minimize_roundtrips: "true", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc b/docs/doc_examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc new file mode 100644 index 000000000..08cd07a68 --- /dev/null +++ b/docs/doc_examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((1000.0 1000.0, 1001.0 1000.0, 1001.0 1001.0, 1000.0 1001.0, 1000.0 1000.0), (1000.2 1000.2, 1000.8 1000.2, 1000.8 1000.8, 1000.2 1000.8, 1000.2 1000.2))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc b/docs/doc_examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc new file mode 100644 index 000000000..6a968f8e5 --- /dev/null +++ b/docs/doc_examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + cost_price: 100, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + script_fields: { + sales_price: { + script: { + lang: "expression", + source: "doc['cost_price'] * markup", + params: { + markup: 0.2, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc b/docs/doc_examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc new file mode 100644 index 000000000..338d6cf21 --- /dev/null +++ b/docs/doc_examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + unit: "km", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 300, + }, + { + from: 300, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc b/docs/doc_examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc new file mode 100644 index 000000000..fa9f0fb4c --- /dev/null +++ b/docs/doc_examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + term_vector: "with_positions_offsets", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Quick brown fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: "brown fox", + }, + }, + highlight: { + fields: { + text: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc b/docs/doc_examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc new file mode 100644 index 000000000..2e5783745 --- /dev/null +++ b/docs/doc_examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T05:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T06:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "date", + calendar_interval: "day", + offset: "+6h", + format: "iso8601", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ab8b4537fad80107bc88f633d4039a52.asciidoc b/docs/doc_examples/ab8b4537fad80107bc88f633d4039a52.asciidoc new file mode 100644 index 000000000..54ed0255c --- /dev/null +++ b/docs/doc_examples/ab8b4537fad80107bc88f633d4039a52.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs", + aliases: { + "": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc b/docs/doc_examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc new file mode 100644 index 000000000..520c175f2 --- /dev/null +++ b/docs/doc_examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index-000001", + shard: 0, + primary: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abb4a58089574211d434946a923e5725.asciidoc b/docs/doc_examples/abb4a58089574211d434946a923e5725.asciidoc new file mode 100644 index 000000000..705d8bb1f --- /dev/null +++ b/docs/doc_examples/abb4a58089574211d434946a923e5725.asciidoc @@ -0,0 +1,89 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_logs", + size: 0, + aggs: { + client_ip: { + composite: { + sources: [ + { + client_ip: { + terms: { + field: "clientip", + }, + }, + }, + ], + }, + aggs: { + url_dc: { + cardinality: { + field: "url.keyword", + }, + }, + bytes_sum: { + sum: { + field: "bytes", + }, + }, + geo_src_dc: { + cardinality: { + field: "geo.src", + }, + }, + geo_dest_dc: { + cardinality: { + field: "geo.dest", + }, + }, + responses_total: { + value_count: { + field: "timestamp", + }, + }, + success: { + filter: { + term: { + response: "200", + }, + }, + }, + error404: { + filter: { + term: { + response: "404", + }, + }, + }, + error503: { + filter: { + term: { + response: "503", + }, + }, + }, + malicious_client_ip: { + inference: { + model_id: "malicious_clients_model", + buckets_path: { + response_count: "responses_total", + url_dc: "url_dc", + bytes_sum: "bytes_sum", + geo_src_dc: "geo_src_dc", + geo_dest_dc: "geo_dest_dc", + success: "success._count", + error404: "error404._count", + error503: "error503._count", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abc280775734daa6cf2c28868e155d10.asciidoc b/docs/doc_examples/abc280775734daa6cf2c28868e155d10.asciidoc new file mode 100644 index 000000000..3ea4cf6b5 --- /dev/null +++ b/docs/doc_examples/abc280775734daa6cf2c28868e155d10.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "exams", + refresh: "true", + document: { + grade: [1, 2, 3], + weight: 2, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "exams", + size: 0, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + field: "grade", + }, + weight: { + field: "weight", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/abc496de5fd013099a134db369b34a8b.asciidoc b/docs/doc_examples/abc496de5fd013099a134db369b34a8b.asciidoc new file mode 100644 index 000000000..014375370 --- /dev/null +++ b/docs/doc_examples/abc496de5fd013099a134db369b34a8b.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + match: { + type: "hat", + }, + }, + }, + }, + aggs: { + hat_prices: { + sum: { + field: "price", + missing: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abc7a670a47516b58b6b07d7497b140c.asciidoc b/docs/doc_examples/abc7a670a47516b58b6b07d7497b140c.asciidoc new file mode 100644 index 000000000..061bd7255 --- /dev/null +++ b/docs/doc_examples/abc7a670a47516b58b6b07d7497b140c.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + constant_score: { + filter: { + bool: { + should: [ + { + range: { + my_date: { + gte: "now-1h", + lte: "now-1h/m", + }, + }, + }, + { + range: { + my_date: { + gt: "now-1h/m", + lt: "now/m", + }, + }, + }, + { + range: { + my_date: { + gte: "now/m", + lte: "now", + }, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc b/docs/doc_examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc index 67681a279..f8ef5375f 100644 --- a/docs/doc_examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc +++ b/docs/doc_examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.search({ - index: 'test', - filter_path: 'hits.total', - body: { - query: { - match: { - flag: 'foo' - } - } - } -}) -console.log(response) + index: "test", + filter_path: "hits.total", + query: { + match: { + flag: "foo", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc b/docs/doc_examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc new file mode 100644 index 000000000..6963bfe68 --- /dev/null +++ b/docs/doc_examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUser(); +console.log(response); +---- diff --git a/docs/doc_examples/abf329ebefaf58acd4ee30e685731499.asciidoc b/docs/doc_examples/abf329ebefaf58acd4ee30e685731499.asciidoc deleted file mode 100644 index f319e125b..000000000 --- a/docs/doc_examples/abf329ebefaf58acd4ee30e685731499.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_double', - body: { - mappings: { - properties: { - field: { - type: 'double' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ac366b9dda7040e743dee85335354094.asciidoc b/docs/doc_examples/ac366b9dda7040e743dee85335354094.asciidoc new file mode 100644 index 000000000..67c8ca406 --- /dev/null +++ b/docs/doc_examples/ac366b9dda7040e743dee85335354094.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 3, + }, + ], + text: "quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac483996d479946d57c374c3a86b2621.asciidoc b/docs/doc_examples/ac483996d479946d57c374c3a86b2621.asciidoc new file mode 100644 index 000000000..efaf6b96b --- /dev/null +++ b/docs/doc_examples/ac483996d479946d57c374c3a86b2621.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_field: { + type: "search_as_you_type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac497917ef707538198a8458ae3d5c6b.asciidoc b/docs/doc_examples/ac497917ef707538198a8458ae3d5c6b.asciidoc new file mode 100644 index 000000000..f89e8cf0d --- /dev/null +++ b/docs/doc_examples/ac497917ef707538198a8458ae3d5c6b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: "this is a test", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac544eb247a29ca42aab13826ca88561.asciidoc b/docs/doc_examples/ac544eb247a29ca42aab13826ca88561.asciidoc deleted file mode 100644 index 6ea5217a4..000000000 --- a/docs/doc_examples/ac544eb247a29ca42aab13826ca88561.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }', - lang: 'painless', - params: { - tag: 'blue' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc b/docs/doc_examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc new file mode 100644 index 000000000..7edc35273 --- /dev/null +++ b/docs/doc_examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc b/docs/doc_examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc new file mode 100644 index 000000000..b0ca64df0 --- /dev/null +++ b/docs/doc_examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.getRepository(); +console.log(response); +---- diff --git a/docs/doc_examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc b/docs/doc_examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc new file mode 100644 index 000000000..d3ec042d7 --- /dev/null +++ b/docs/doc_examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + flat_settings: "true", + transient: { + "indices.recovery.max_bytes_per_sec": "20mb", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc b/docs/doc_examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc new file mode 100644 index 000000000..38ee3b575 --- /dev/null +++ b/docs/doc_examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_src_only_repository", + repository: { + type: "source", + settings: { + delegate_type: "fs", + location: "my_backup_repository", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/acb10091ad335ddd15d71021aaf23c62.asciidoc b/docs/doc_examples/acb10091ad335ddd15d71021aaf23c62.asciidoc new file mode 100644 index 000000000..af0bba0f8 --- /dev/null +++ b/docs/doc_examples/acb10091ad335ddd15d71021aaf23c62.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + track_scores: true, + sort: [ + { + post_date: { + order: "desc", + }, + }, + { + name: "desc", + }, + { + age: "desc", + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/acb850c08f51226eadb75be09e336076.asciidoc b/docs/doc_examples/acb850c08f51226eadb75be09e336076.asciidoc new file mode 100644 index 000000000..9c0903785 --- /dev/null +++ b/docs/doc_examples/acb850c08f51226eadb75be09e336076.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.status({ + id: "FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/acc52da725a996ae696b00d9f818dfde.asciidoc b/docs/doc_examples/acc52da725a996ae696b00d9f818dfde.asciidoc new file mode 100644 index 000000000..17e1e0a8a --- /dev/null +++ b/docs/doc_examples/acc52da725a996ae696b00d9f818dfde.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "file-path-test", + analyzer: "custom_path_tree", + text: "/User/alice/photos/2017/05/16/my_photo1.jpg", +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "file-path-test", + analyzer: "custom_path_tree_reversed", + text: "/User/alice/photos/2017/05/16/my_photo1.jpg", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc b/docs/doc_examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc new file mode 100644 index 000000000..3224e514b --- /dev/null +++ b/docs/doc_examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + query: "city.\\*:(this AND that OR thus)", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc b/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc index ceac02596..36fb42740 100644 --- a/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc +++ b/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc @@ -4,21 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Jon', - type: 'cross_fields', - fields: [ - 'first', - 'first.edge', - 'last', - 'last.edge' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Jon", + type: "cross_fields", + fields: ["first", "first.edge", "last", "last.edge"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/ad2416ca0581316cee6c63129685bca5.asciidoc b/docs/doc_examples/ad2416ca0581316cee6c63129685bca5.asciidoc new file mode 100644 index 000000000..8b68bcfdb --- /dev/null +++ b/docs/doc_examples/ad2416ca0581316cee6c63129685bca5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title", "content"], + query: "this OR that OR thus", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc b/docs/doc_examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc new file mode 100644 index 000000000..3768cd346 --- /dev/null +++ b/docs/doc_examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-image-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "image-vector": [1, 5, -20], + "file-type": "jpg", + title: "mountain lake", + }, + { + index: { + _id: "2", + }, + }, + { + "image-vector": [42, 8, -15], + "file-type": "png", + title: "frozen lake", + }, + { + index: { + _id: "3", + }, + }, + { + "image-vector": [15, 11, 23], + "file-type": "jpg", + title: "mountain lake lodge", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc b/docs/doc_examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc new file mode 100644 index 000000000..d855f164e --- /dev/null +++ b/docs/doc_examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + index: "my-index-000001", + q: "user:kimchy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad57ccba0a060da4f5313692fa26a235.asciidoc b/docs/doc_examples/ad57ccba0a060da4f5313692fa26a235.asciidoc new file mode 100644 index 000000000..9a83c4ccd --- /dev/null +++ b/docs/doc_examples/ad57ccba0a060da4f5313692fa26a235.asciidoc @@ -0,0 +1,72 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date_nanos", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + date: "2015-01-01", + }, + { + index: { + _id: "2", + }, + }, + { + date: "2015-01-01T12:10:30.123456789Z", + }, + { + index: { + _id: "3", + }, + }, + { + date: 1420070400000, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + sort: { + date: "asc", + }, + runtime_mappings: { + date_has_nanos: { + type: "boolean", + script: "emit(doc['date'].value.nano != 0)", + }, + }, + fields: [ + { + field: "date", + format: "strict_date_optional_time_nanos", + }, + { + field: "date_has_nanos", + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ad63eca6829a25293c9be589c1870547.asciidoc b/docs/doc_examples/ad63eca6829a25293c9be589c1870547.asciidoc new file mode 100644 index 000000000..c3d0e495e --- /dev/null +++ b/docs/doc_examples/ad63eca6829a25293c9be589c1870547.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_sum: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.sum(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad6d81be5fad4bad87486b699454dce5.asciidoc b/docs/doc_examples/ad6d81be5fad4bad87486b699454dce5.asciidoc new file mode 100644 index 000000000..6abd80ac6 --- /dev/null +++ b/docs/doc_examples/ad6d81be5fad4bad87486b699454dce5.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node_upgrade", + size: 0, + aggs: { + startup_time_ttest: { + t_test: { + a: { + field: "startup_time_before", + }, + b: { + field: "startup_time_after", + }, + type: "paired", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc b/docs/doc_examples/ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc deleted file mode 100644 index 79e77abd8..000000000 --- a/docs/doc_examples/ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - query: '(new york city) OR (big apple)', - default_field: 'content' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ad79228630684d950fe9792a768d24c5.asciidoc b/docs/doc_examples/ad79228630684d950fe9792a768d24c5.asciidoc deleted file mode 100644 index 7911e5d56..000000000 --- a/docs/doc_examples/ad79228630684d950fe9792a768d24c5.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias1', - is_write_index: false - } - }, - { - add: { - index: 'test2', - alias: 'alias1', - is_write_index: true - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ad88e46bb06739991498dee248850223.asciidoc b/docs/doc_examples/ad88e46bb06739991498dee248850223.asciidoc new file mode 100644 index 000000000..e9db96801 --- /dev/null +++ b/docs/doc_examples/ad88e46bb06739991498dee248850223.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool(); +console.log(response); +---- diff --git a/docs/doc_examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc b/docs/doc_examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc new file mode 100644 index 000000000..03f452f53 --- /dev/null +++ b/docs/doc_examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "hello world", + from: 20, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc b/docs/doc_examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc new file mode 100644 index 000000000..a7f2f8f65 --- /dev/null +++ b/docs/doc_examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + script_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script: { + source: "doc['my-int'].value / 10 ", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc b/docs/doc_examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc new file mode 100644 index 000000000..27998d47c --- /dev/null +++ b/docs/doc_examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.msearch({ + index: "my-index-000001", + searches: [ + {}, + { + query: { + match_all: {}, + }, + from: 0, + size: 10, + }, + {}, + { + query: { + match_all: {}, + }, + }, + { + index: "my-index-000002", + }, + { + query: { + match_all: {}, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b41dce56b0e640d32b1cf452f87cec17.asciidoc b/docs/doc_examples/add240aa149d8b11139947502b279ee0.asciidoc similarity index 56% rename from docs/doc_examples/b41dce56b0e640d32b1cf452f87cec17.asciidoc rename to docs/doc_examples/add240aa149d8b11139947502b279ee0.asciidoc index 9d92cfeba..c77295769 100644 --- a/docs/doc_examples/b41dce56b0e640d32b1cf452f87cec17.asciidoc +++ b/docs/doc_examples/add240aa149d8b11139947502b279ee0.asciidoc @@ -4,11 +4,8 @@ [source, js] ---- const response = await client.scroll({ - body: { - scroll: '1m', - scroll_id: 'DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==' - } -}) -console.log(response) + scroll: "1m", + scroll_id: "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc b/docs/doc_examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc new file mode 100644 index 000000000..a5724dbaa --- /dev/null +++ b/docs/doc_examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enableUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/adf728b0c11c5c309c730205609a379d.asciidoc b/docs/doc_examples/adf728b0c11c5c309c730205609a379d.asciidoc new file mode 100644 index 000000000..dcdc44c12 --- /dev/null +++ b/docs/doc_examples/adf728b0c11c5c309c730205609a379d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Set dynamic '' field to 'code' value", + field: "{{{service}}}", + value: "{{{code}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc b/docs/doc_examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc new file mode 100644 index 000000000..da6f2a257 --- /dev/null +++ b/docs/doc_examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc b/docs/doc_examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc new file mode 100644 index 000000000..b3ba17b23 --- /dev/null +++ b/docs/doc_examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + full_name: { + path_match: ["name.*", "user.name.*"], + path_unmatch: ["*.middle", "*.midinitial"], + mapping: { + type: "text", + copy_to: "full_name", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: { + first: "John", + middle: "Winston", + last: "Lennon", + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + user: { + name: { + first: "Jane", + midinitial: "M", + last: "Salazar", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ae4aa368617637a390074535df86e64b.asciidoc b/docs/doc_examples/ae4aa368617637a390074535df86e64b.asciidoc new file mode 100644 index 000000000..4c1c1e246 --- /dev/null +++ b/docs/doc_examples/ae4aa368617637a390074535df86e64b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "true", + timeout: "10m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc b/docs/doc_examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc new file mode 100644 index 000000000..0f73de942 --- /dev/null +++ b/docs/doc_examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_docs: 10000000, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc b/docs/doc_examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc new file mode 100644 index 000000000..4404c4b26 --- /dev/null +++ b/docs/doc_examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "student_performance_mathematics_0.3", + source: { + index: "student_performance_mathematics", + }, + dest: { + index: "student_performance_mathematics_reg", + }, + analysis: { + regression: { + dependent_variable: "G3", + training_percent: 70, + randomize_seed: 19673948271, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc b/docs/doc_examples/ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc deleted file mode 100644 index 44685f225..000000000 --- a/docs/doc_examples/ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - body: { - sort: [ - { - post_date: { - order: 'asc' - } - }, - 'user', - { - name: 'desc' - }, - { - age: 'desc' - }, - '_score' - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc b/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc index fd3601872..5b92e389a 100644 --- a/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc +++ b/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc @@ -4,44 +4,43 @@ [source, js] ---- const response = await client.bulk({ - body: [ + operations: [ { index: { - _index: 'test', - _id: '1' - } + _index: "test", + _id: "1", + }, }, { - field1: 'value1' + field1: "value1", }, { delete: { - _index: 'test', - _id: '2' - } + _index: "test", + _id: "2", + }, }, { create: { - _index: 'test', - _id: '3' - } + _index: "test", + _id: "3", + }, }, { - field1: 'value3' + field1: "value3", }, { update: { - _id: '1', - _index: 'test' - } + _id: "1", + _index: "test", + }, }, { doc: { - field2: 'value2' - } - } - ] -}) -console.log(response) + field2: "value2", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc b/docs/doc_examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc new file mode 100644 index 000000000..0c43a303d --- /dev/null +++ b/docs/doc_examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, + fields: ["text", "some_field_without_term_vectors"], + offsets: true, + positions: true, + term_statistics: true, + field_statistics: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc b/docs/doc_examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc new file mode 100644 index 000000000..8b4ea58cc --- /dev/null +++ b/docs/doc_examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "issues", + query: { + match_all: {}, + }, + aggs: { + comments: { + nested: { + path: "comments", + }, + aggs: { + top_usernames: { + terms: { + field: "comments.username", + }, + aggs: { + comment_to_issue: { + reverse_nested: {}, + aggs: { + top_tags_per_comment: { + terms: { + field: "tags", + }, + }, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc b/docs/doc_examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc new file mode 100644 index 000000000..36cbdb71c --- /dev/null +++ b/docs/doc_examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": [-70, 40], + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af00a58d9171d32f6efe52d94e51e526.asciidoc b/docs/doc_examples/af00a58d9171d32f6efe52d94e51e526.asciidoc new file mode 100644 index 000000000..bc40fbb31 --- /dev/null +++ b/docs/doc_examples/af00a58d9171d32f6efe52d94e51e526.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hindi_example", + settings: { + analysis: { + filter: { + hindi_stop: { + type: "stop", + stopwords: "_hindi_", + }, + hindi_keywords: { + type: "keyword_marker", + keywords: ["उदाहरण"], + }, + hindi_stemmer: { + type: "stemmer", + language: "hindi", + }, + }, + analyzer: { + rebuilt_hindi: { + tokenizer: "standard", + filter: [ + "lowercase", + "decimal_digit", + "hindi_keywords", + "indic_normalization", + "hindi_normalization", + "hindi_stop", + "hindi_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc b/docs/doc_examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc new file mode 100644 index 000000000..73aaca9ec --- /dev/null +++ b/docs/doc_examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.getPolicy({ + name: "my-policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc b/docs/doc_examples/af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc deleted file mode 100644 index e35a78b95..000000000 --- a/docs/doc_examples/af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - remove: { - index: 'test1', - alias: 'alias1' - } - }, - { - add: { - index: 'test1', - alias: 'alias2' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc b/docs/doc_examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc new file mode 100644 index 000000000..b56c54770 --- /dev/null +++ b/docs/doc_examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af517b6936fa41d124d68b107b2efdc3.asciidoc b/docs/doc_examples/af517b6936fa41d124d68b107b2efdc3.asciidoc new file mode 100644 index 000000000..e43a41a04 --- /dev/null +++ b/docs/doc_examples/af517b6936fa41d124d68b107b2efdc3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.deleteLifecycle({ + name: "my_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/af607715d0693587dd12962266359a96.asciidoc b/docs/doc_examples/af607715d0693587dd12962266359a96.asciidoc new file mode 100644 index 000000000..cb25ed36d --- /dev/null +++ b/docs/doc_examples/af607715d0693587dd12962266359a96.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + bucket: "my-bucket", + another_setting: "setting-value", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af746266a49a693ff6170c88da8a8c04.asciidoc b/docs/doc_examples/af746266a49a693ff6170c88da8a8c04.asciidoc new file mode 100644 index 000000000..0cb4cb18f --- /dev/null +++ b/docs/doc_examples/af746266a49a693ff6170c88da8a8c04.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["my_custom_stop_words_filter"], + }, + }, + filter: { + my_custom_stop_words_filter: { + type: "stop", + ignore_case: true, + stopwords: ["and", "is", "the"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af7c5add165b005aefb552d79130fed6.asciidoc b/docs/doc_examples/af7c5add165b005aefb552d79130fed6.asciidoc new file mode 100644 index 000000000..b1d4507ce --- /dev/null +++ b/docs/doc_examples/af7c5add165b005aefb552d79130fed6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_grid: { + location: { + geotile: "6/32/22", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af84b3995564a7ca84360a526a4ac896.asciidoc b/docs/doc_examples/af84b3995564a7ca84360a526a4ac896.asciidoc new file mode 100644 index 000000000..ff6a42618 --- /dev/null +++ b/docs/doc_examples/af84b3995564a7ca84360a526a4ac896.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "5_char_words_example", + settings: { + analysis: { + analyzer: { + lowercase_5_char: { + tokenizer: "lowercase", + filter: ["5_char_trunc"], + }, + }, + filter: { + "5_char_trunc": { + type: "truncate", + length: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc b/docs/doc_examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc new file mode 100644 index 000000000..ef4dfe564 --- /dev/null +++ b/docs/doc_examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + analyzer: "whitespace", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af91019991bee136df5460e2fd4ac72a.asciidoc b/docs/doc_examples/af91019991bee136df5460e2fd4ac72a.asciidoc new file mode 100644 index 000000000..c45e9cde5 --- /dev/null +++ b/docs/doc_examples/af91019991bee136df5460e2fd4ac72a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", + lazy: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc b/docs/doc_examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc new file mode 100644 index 000000000..a107b8955 --- /dev/null +++ b/docs/doc_examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + index: "test1,test2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc b/docs/doc_examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc new file mode 100644 index 000000000..663d97eb6 --- /dev/null +++ b/docs/doc_examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + size: 0, + aggs: { + tsid: { + terms: { + field: "_tsid", + }, + aggs: { + over_time: { + date_histogram: { + field: "@timestamp", + fixed_interval: "1d", + }, + aggs: { + min: { + min: { + field: "kubernetes.container.memory.usage.bytes", + }, + }, + max: { + max: { + field: "kubernetes.container.memory.usage.bytes", + }, + }, + avg: { + avg: { + field: "kubernetes.container.memory.usage.bytes", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc b/docs/doc_examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc new file mode 100644 index 000000000..0ed57946a --- /dev/null +++ b/docs/doc_examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "my-data-stream", + runtime_mappings: { + "source.ip": { + type: "ip", + script: + "\n String sourceip=grok('%{IPORHOST:sourceip} .*').extract(doc[ \"message\" ].value)?.sourceip;\n if (sourceip != null) emit(sourceip);\n ", + }, + }, + query: { + bool: { + filter: [ + { + range: { + "@timestamp": { + gte: "now-2y/d", + lt: "now/d", + }, + }, + }, + { + range: { + "source.ip": { + gte: "192.0.2.0", + lte: "192.0.2.255", + }, + }, + }, + ], + }, + }, + fields: ["*"], + _source: false, + sort: [ + { + "@timestamp": "desc", + }, + { + "source.ip": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc b/docs/doc_examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc new file mode 100644 index 000000000..3ee21ca53 --- /dev/null +++ b/docs/doc_examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "mylogs-*", + }, + dest: { + index: "mylogs", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc b/docs/doc_examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc new file mode 100644 index 000000000..6e0d97dd7 --- /dev/null +++ b/docs/doc_examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.delete({ + name: "my-app", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afc0a9cffc0100797a3f093094394763.asciidoc b/docs/doc_examples/afc0a9cffc0100797a3f093094394763.asciidoc new file mode 100644 index 000000000..9129b70e3 --- /dev/null +++ b/docs/doc_examples/afc0a9cffc0100797a3f093094394763.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlInvalidate({ + query_string: + "SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D", + realm: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afc29b61c532cf683f749baf013e7bfe.asciidoc b/docs/doc_examples/afc29b61c532cf683f749baf013e7bfe.asciidoc deleted file mode 100644 index 44d30f594..000000000 --- a/docs/doc_examples/afc29b61c532cf683f749baf013e7bfe.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - user_id: { - type: 'alias', - path: 'user_identifier' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/afcacd742d18bf220e02f0bc6891526d.asciidoc b/docs/doc_examples/afcacd742d18bf220e02f0bc6891526d.asciidoc new file mode 100644 index 000000000..2fb8b47c3 --- /dev/null +++ b/docs/doc_examples/afcacd742d18bf220e02f0bc6891526d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sale_date: { + auto_date_histogram: { + field: "date", + buckets: 10, + minimum_interval: "minute", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afd90d268187f995dc002abc189f818d.asciidoc b/docs/doc_examples/afd90d268187f995dc002abc189f818d.asciidoc new file mode 100644 index 000000000..ce066008c --- /dev/null +++ b/docs/doc_examples/afd90d268187f995dc002abc189f818d.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + format: "yyyy-MM-dd", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc b/docs/doc_examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc new file mode 100644 index 000000000..8ed6c634a --- /dev/null +++ b/docs/doc_examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents with 'network.name' of 'Guest'", + if: "ctx?.network?.name == 'Guest'", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/afe30f159937b38d74c869570cfcd369.asciidoc b/docs/doc_examples/afe30f159937b38d74c869570cfcd369.asciidoc new file mode 100644 index 000000000..699718a56 --- /dev/null +++ b/docs/doc_examples/afe30f159937b38d74c869570cfcd369.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "index_1", +}); +console.log(response); + +const response1 = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot_2", + wait_for_completion: "true", + indices: "index_1", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc b/docs/doc_examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc new file mode 100644 index 000000000..e5cb83536 --- /dev/null +++ b/docs/doc_examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + status_code: { + type: "keyword", + null_value: "NULL", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + status_code: null, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + status_code: [], + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + term: { + status_code: "NULL", + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/afe87a2850326e0328fbebbefec2e839.asciidoc b/docs/doc_examples/afe87a2850326e0328fbebbefec2e839.asciidoc new file mode 100644 index 000000000..4cdf9bf3e --- /dev/null +++ b/docs/doc_examples/afe87a2850326e0328fbebbefec2e839.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchShards({ + index: "my-index-000001", + routing: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afef5cac988592b97ae289ab39c2f437.asciidoc b/docs/doc_examples/afef5cac988592b97ae289ab39c2f437.asciidoc new file mode 100644 index 000000000..95eab5366 --- /dev/null +++ b/docs/doc_examples/afef5cac988592b97ae289ab39c2f437.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_field: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc b/docs/doc_examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc new file mode 100644 index 000000000..e7f29a690 --- /dev/null +++ b/docs/doc_examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + char_filter: ["html_strip"], + text: "I'm so happy
    !

    ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b00ac39faf96785e89be8d4205fb984d.asciidoc b/docs/doc_examples/b00ac39faf96785e89be8d4205fb984d.asciidoc new file mode 100644 index 000000000..3aa692211 --- /dev/null +++ b/docs/doc_examples/b00ac39faf96785e89be8d4205fb984d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + text: true, + size: 5, + query_string: "mountain climbing", + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 5, + }, + { + name: "state", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b00d74eed431a272c829c0f798e3a539.asciidoc b/docs/doc_examples/b00d74eed431a272c829c0f798e3a539.asciidoc new file mode 100644 index 000000000..01b12cb2d --- /dev/null +++ b/docs/doc_examples/b00d74eed431a272c829c0f798e3a539.asciidoc @@ -0,0 +1,84 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + d: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + s: 1, + m: 3.1415, + i: 1, + d: "2020-01-01T00:12:12Z", + t: "cat", + }, + { + index: {}, + }, + { + s: 2, + m: 1, + i: 6, + d: "2020-01-02T00:12:12Z", + t: "dog", + }, + { + index: {}, + }, + { + s: 3, + m: 2.71828, + i: -12, + d: "2019-12-31T00:12:12Z", + t: "chicken", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: [ + { + field: "m", + }, + { + field: "i", + }, + { + field: "d", + }, + { + field: "t.keyword", + }, + ], + sort: { + s: "desc", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc b/docs/doc_examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc new file mode 100644 index 000000000..7e6d9a983 --- /dev/null +++ b/docs/doc_examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC LIMIT 5", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc b/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc index f3eb9c277..f3456d0d4 100644 --- a/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc +++ b/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc @@ -3,7 +3,6 @@ [source, js] ---- -const response = await client.cluster.health() -console.log(response) +const response = await client.cluster.health(); +console.log(response); ---- - diff --git a/docs/doc_examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc b/docs/doc_examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc new file mode 100644 index 000000000..98f315c75 --- /dev/null +++ b/docs/doc_examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "circles", + id: 1, + pipeline: "polygonize_circles", + document: { + circle: "CIRCLE (30 10 40)", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "circles", + id: 1, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc new file mode 100644 index 000000000..82a81bced --- /dev/null +++ b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.renderQuery({ + name: "my_search_application", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc b/docs/doc_examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc new file mode 100644 index 000000000..ec64702c5 --- /dev/null +++ b/docs/doc_examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "monthly date-time index naming", + processors: [ + { + date_index_name: { + field: "date1", + index_name_prefix: "my-index-", + date_rounding: "M", + }, + }, + ], + }, + docs: [ + { + _source: { + date1: "2016-04-25T12:02:01.789Z", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc b/docs/doc_examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc new file mode 100644 index 000000000..58041fb5e --- /dev/null +++ b/docs/doc_examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "items", + id: 1, + refresh: "true", + document: { + name: "chocolate", + production_date: "2018-02-01", + location: [-71.34, 41.12], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "items", + id: 2, + refresh: "true", + document: { + name: "chocolate", + production_date: "2018-01-01", + location: [-71.3, 41.15], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "items", + id: 3, + refresh: "true", + document: { + name: "chocolate", + production_date: "2017-12-01", + location: [-71.3, 41.12], + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc b/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc index 02f53cb9a..a3b9fd534 100644 --- a/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc +++ b/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc @@ -4,26 +4,23 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - dis_max: { - queries: [ - { - match: { - subject: 'brown fox' - } + query: { + dis_max: { + queries: [ + { + match: { + subject: "brown fox", }, - { - match: { - message: 'brown fox' - } - } - ], - tie_breaker: 0.3 - } - } - } -}) -console.log(response) + }, + { + match: { + message: "brown fox", + }, + }, + ], + tie_breaker: 0.3, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/381fced1882ca8337143e6bb180a5715.asciidoc b/docs/doc_examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc similarity index 67% rename from docs/doc_examples/381fced1882ca8337143e6bb180a5715.asciidoc rename to docs/doc_examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc index b5c93dd1b..8c4ef9868 100644 --- a/docs/doc_examples/381fced1882ca8337143e6bb180a5715.asciidoc +++ b/docs/doc_examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.index({ - index: 'test', - id: '1', - body: { + index: "test", + id: 1, + document: { counter: 1, - tags: [ - 'red' - ] - } -}) -console.log(response) + tags: ["red"], + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc b/docs/doc_examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc new file mode 100644 index 000000000..4137f344f --- /dev/null +++ b/docs/doc_examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc b/docs/doc_examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc new file mode 100644 index 000000000..5a3bb37ba --- /dev/null +++ b/docs/doc_examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.linearWeightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b11a0675e49df0709be693297ca73a2c.asciidoc b/docs/doc_examples/b11a0675e49df0709be693297ca73a2c.asciidoc new file mode 100644 index 000000000..85c89bc6f --- /dev/null +++ b/docs/doc_examples/b11a0675e49df0709be693297ca73a2c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.info({ + categories: "build,features", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc b/docs/doc_examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc new file mode 100644 index 000000000..09bd844ad --- /dev/null +++ b/docs/doc_examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.reloadSecureSettings({ + secure_settings_password: "keystore-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b17143780e9904bfc1e1c53436497fa1.asciidoc b/docs/doc_examples/b17143780e9904bfc1e1c53436497fa1.asciidoc new file mode 100644 index 000000000..91bbd6972 --- /dev/null +++ b/docs/doc_examples/b17143780e9904bfc1e1c53436497fa1.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + wait_for_completion_timeout: "2s", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b176e0d428726705298184ef39ad5cb2.asciidoc b/docs/doc_examples/b176e0d428726705298184ef39ad5cb2.asciidoc new file mode 100644 index 000000000..06c244807 --- /dev/null +++ b/docs/doc_examples/b176e0d428726705298184ef39ad5cb2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping2", + roles: ["user", "admin"], + enabled: true, + rules: { + field: { + username: ["esadmin01", "esadmin02"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b195068563b1dc0f721f5f8c8d172312.asciidoc b/docs/doc_examples/b195068563b1dc0f721f5f8c8d172312.asciidoc new file mode 100644 index 000000000..894dc8c79 --- /dev/null +++ b/docs/doc_examples/b195068563b1dc0f721f5f8c8d172312.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "MULTIPOINT (1002.0 2000.0, 1003.0 2000.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc b/docs/doc_examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc new file mode 100644 index 000000000..58a34e6ca --- /dev/null +++ b/docs/doc_examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ",,", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc b/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc index 135a0872e..096b0c769 100644 --- a/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc +++ b/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'source' - }, - dest: { - index: 'dest', - pipeline: 'some_ingest_pipeline' - } - } -}) -console.log(response) + source: { + index: "source", + }, + dest: { + index: "dest", + pipeline: "some_ingest_pipeline", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc b/docs/doc_examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc new file mode 100644 index 000000000..0243ebab2 --- /dev/null +++ b/docs/doc_examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_two: { + "transport.compress": false, + }, + cluster_three: { + "transport.compress": true, + "transport.ping_schedule": "60s", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc b/docs/doc_examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc new file mode 100644 index 000000000..2ec43a9b9 --- /dev/null +++ b/docs/doc_examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + ids: ["VuaCfGcBCdbkQm-e5aOx"], + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc b/docs/doc_examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc new file mode 100644 index 000000000..cbb851c0f --- /dev/null +++ b/docs/doc_examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.tasks({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2440b492149b705ef107137fdccb0c2.asciidoc b/docs/doc_examples/b2440b492149b705ef107137fdccb0c2.asciidoc new file mode 100644 index 000000000..86f16baba --- /dev/null +++ b/docs/doc_examples/b2440b492149b705ef107137fdccb0c2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followInfo({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc b/docs/doc_examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc new file mode 100644 index 000000000..4d4945469 --- /dev/null +++ b/docs/doc_examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "text_payloads", + id: 1, + fields: ["text"], + payloads: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b25256ed615cd837461b0bfa590526b7.asciidoc b/docs/doc_examples/b25256ed615cd837461b0bfa590526b7.asciidoc new file mode 100644 index 000000000..9def5bea5 --- /dev/null +++ b/docs/doc_examples/b25256ed615cd837461b0bfa590526b7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2652b1763a5fd31e95c983869b433bd.asciidoc b/docs/doc_examples/b2652b1763a5fd31e95c983869b433bd.asciidoc new file mode 100644 index 000000000..5e97578d6 --- /dev/null +++ b/docs/doc_examples/b2652b1763a5fd31e95c983869b433bd.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "metrics_index", + id: 1, + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 2, + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "metrics_index", + size: 0, + aggs: { + avg_latency: { + avg: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc b/docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc new file mode 100644 index 000000000..8945d602e --- /dev/null +++ b/docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information from arrays", + processors: [ + { + foreach: { + field: "attachments", + processor: { + attachment: { + target_field: "_ingest._value.attachment", + field: "_ingest._value.data", + remove_binary: false, + }, + }, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + attachments: [ + { + filename: "ipsum.txt", + data: "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo=", + }, + { + filename: "test.txt", + data: "VGhpcyBpcyBhIHRlc3QK", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b2b26f8568c5dba7649e79f09b859272.asciidoc b/docs/doc_examples/b2b26f8568c5dba7649e79f09b859272.asciidoc new file mode 100644 index 000000000..461b239ec --- /dev/null +++ b/docs/doc_examples/b2b26f8568c5dba7649e79f09b859272.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "saml-service-user", + password: "", + roles: ["saml-service-role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2dec193082462c775169db438308bc3.asciidoc b/docs/doc_examples/b2dec193082462c775169db438308bc3.asciidoc new file mode 100644 index 000000000..99396dabc --- /dev/null +++ b/docs/doc_examples/b2dec193082462c775169db438308bc3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-replication", + cluster: ["read_ccr"], + indices: [ + { + names: ["leader-index-name"], + privileges: ["monitor", "read"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc b/docs/doc_examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc new file mode 100644 index 000000000..3b6862fdc --- /dev/null +++ b/docs/doc_examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc b/docs/doc_examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc new file mode 100644 index 000000000..c48370b26 --- /dev/null +++ b/docs/doc_examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "name,node.role,disk.used_percent,disk.used,disk.avail,disk.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc b/docs/doc_examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc new file mode 100644 index 000000000..afabda5ab --- /dev/null +++ b/docs/doc_examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index", + settings: { + "index.routing.allocation.require.data": null, + "index.routing.allocation.include._tier_preference": "data_hot", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc b/docs/doc_examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc new file mode 100644 index 000000000..2f0e298e1 --- /dev/null +++ b/docs/doc_examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.features.getFeatures(); +console.log(response); +---- diff --git a/docs/doc_examples/b3685560cb328f179d96ffe7c2668f72.asciidoc b/docs/doc_examples/b3685560cb328f179d96ffe7c2668f72.asciidoc new file mode 100644 index 000000000..1e269e68b --- /dev/null +++ b/docs/doc_examples/b3685560cb328f179d96ffe7c2668f72.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: + "if (values.length > 5*2) {MovingFunctions.holtWinters(values, 0.3, 0.1, 0.1, 5, false)}", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc b/docs/doc_examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc new file mode 100644 index 000000000..8d51c8bb6 --- /dev/null +++ b/docs/doc_examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_internal/desired_balance", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b37919cc438b47477343833b4e522408.asciidoc b/docs/doc_examples/b37919cc438b47477343833b4e522408.asciidoc new file mode 100644 index 000000000..111b4f7a6 --- /dev/null +++ b/docs/doc_examples/b37919cc438b47477343833b4e522408.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "imdb", + doc: { + plot: "When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil.", + }, + term_statistics: true, + field_statistics: true, + positions: false, + offsets: false, + filter: { + max_num_terms: 3, + min_term_freq: 1, + min_doc_freq: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3a1c4220617ded67ed43fff2051d324.asciidoc b/docs/doc_examples/b3a1c4220617ded67ed43fff2051d324.asciidoc new file mode 100644 index 000000000..b0861175a --- /dev/null +++ b/docs/doc_examples/b3a1c4220617ded67ed43fff2051d324.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + tags: { + type: "keyword", + eager_global_ordinals: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7cac05cb589f1614fd5b8589153bef06.asciidoc b/docs/doc_examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc similarity index 52% rename from docs/doc_examples/7cac05cb589f1614fd5b8589153bef06.asciidoc rename to docs/doc_examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc index 143a43ce2..89cd2af13 100644 --- a/docs/doc_examples/7cac05cb589f1614fd5b8589153bef06.asciidoc +++ b/docs/doc_examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc @@ -4,15 +4,15 @@ [source, js] ---- const response = await client.update({ - index: 'test', - id: '1', - body: { - doc: { - name: 'new_name' + index: "test", + id: 1, + script: { + source: "ctx._source.counter += params.count", + lang: "painless", + params: { + count: 4, }, - doc_as_upsert: true - } -}) -console.log(response) + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc b/docs/doc_examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc new file mode 100644 index 000000000..184318927 --- /dev/null +++ b/docs/doc_examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + percentiles_monthly_sales: { + percentiles_bucket: { + buckets_path: "sales_per_month>sales", + percents: [25, 50, 75], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc b/docs/doc_examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc new file mode 100644 index 000000000..02f50769a --- /dev/null +++ b/docs/doc_examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "MultiPoint", + coordinates: [ + [102, 2], + [103, 2], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc b/docs/doc_examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc new file mode 100644 index 000000000..2709e0881 --- /dev/null +++ b/docs/doc_examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "one-pipeline-to-rule-them-all", + processors: [ + { + pipeline: { + description: + "If 'service.name' is 'apache_httpd', use 'httpd_pipeline'", + if: "ctx.service?.name == 'apache_httpd'", + name: "httpd_pipeline", + }, + }, + { + pipeline: { + description: "If 'service.name' is 'syslog', use 'syslog_pipeline'", + if: "ctx.service?.name == 'syslog'", + name: "syslog_pipeline", + }, + }, + { + fail: { + description: + "If 'service.name' is not 'apache_httpd' or 'syslog', return a failure message", + if: "ctx.service?.name != 'apache_httpd' && ctx.service?.name != 'syslog'", + message: + "This pipeline requires service.name to be either `syslog` or `apache_httpd`", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b430122345d560bbd2a77826f5c475f7.asciidoc b/docs/doc_examples/b430122345d560bbd2a77826f5c475f7.asciidoc new file mode 100644 index 000000000..3d767067b --- /dev/null +++ b/docs/doc_examples/b430122345d560bbd2a77826f5c475f7.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + ip_fields: { + match: ["ip_*", "*_ip"], + unmatch: ["one*", "*two"], + mapping: { + type: "ip", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index", + id: 1, + document: { + one_ip: "will not match", + ip_two: "will not match", + three_ip: "12.12.12.12", + ip_four: "13.13.13.13", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc new file mode 100644 index 000000000..d51f2451f --- /dev/null +++ b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_inference/completion/openai_chat_completions", + body: { + input: "What is Elastic?", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b45c60f908b329835ab40609423f378e.asciidoc b/docs/doc_examples/b45c60f908b329835ab40609423f378e.asciidoc new file mode 100644 index 000000000..d97a8f4a6 --- /dev/null +++ b/docs/doc_examples/b45c60f908b329835ab40609423f378e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + h: "node.role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b468d0124dc485385a34504d5b7af82a.asciidoc b/docs/doc_examples/b468d0124dc485385a34504d5b7af82a.asciidoc new file mode 100644 index 000000000..cafdc3e63 --- /dev/null +++ b/docs/doc_examples/b468d0124dc485385a34504d5b7af82a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "hugging-face-embeddings", + pipeline: "hugging_face_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc b/docs/doc_examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc new file mode 100644 index 000000000..aef48d3ba --- /dev/null +++ b/docs/doc_examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cohere-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "cohere_embeddings", + model_text: "Muscles in human body", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b47945c7db8868dd36ba079b742f2a90.asciidoc b/docs/doc_examples/b47945c7db8868dd36ba079b742f2a90.asciidoc new file mode 100644 index 000000000..5ab2dad91 --- /dev/null +++ b/docs/doc_examples/b47945c7db8868dd36ba079b742f2a90.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my-app", + params: { + default_field: "author", + query_string: "Jane", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc b/docs/doc_examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc new file mode 100644 index 000000000..2bda8fc41 --- /dev/null +++ b/docs/doc_examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '{ "query": { "bool": { "filter": [ {{#year_scope}} { "range": { "@timestamp": { "gte": "now-1y/d", "lt": "now/d" } } }, {{/year_scope}} { "term": { "user.id": "{{user_id}}" }}]}}}', + params: { + year_scope: true, + user_id: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4a0d0ed512dffc10ee53bca2feca49b.asciidoc b/docs/doc_examples/b4a0d0ed512dffc10ee53bca2feca49b.asciidoc deleted file mode 100644 index 86335844e..000000000 --- a/docs/doc_examples/b4a0d0ed512dffc10ee53bca2feca49b.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match_all: {} - }, - boost: '5', - functions: [ - { - filter: { - match: { - test: 'bar' - } - }, - random_score: {}, - weight: 23 - }, - { - filter: { - match: { - test: 'cat' - } - }, - weight: 42 - } - ], - max_boost: 42, - score_mode: 'max', - boost_mode: 'multiply', - min_score: 42 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b4aec2a1d353852507c091bdb629b765.asciidoc b/docs/doc_examples/b4aec2a1d353852507c091bdb629b765.asciidoc new file mode 100644 index 000000000..0840b621e --- /dev/null +++ b/docs/doc_examples/b4aec2a1d353852507c091bdb629b765.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putFilter({ + filter_id: "safe_domains", + description: "A list of safe domains", + items: ["*.google.com", "wikipedia.org"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdcaba9547180439ff4b6275034a5170.asciidoc b/docs/doc_examples/b4d1fc887e40885cdf6ac2d01487cb76.asciidoc similarity index 51% rename from docs/doc_examples/fdcaba9547180439ff4b6275034a5170.asciidoc rename to docs/doc_examples/b4d1fc887e40885cdf6ac2d01487cb76.asciidoc index 276bdcdb1..77651c802 100644 --- a/docs/doc_examples/fdcaba9547180439ff4b6275034a5170.asciidoc +++ b/docs/doc_examples/b4d1fc887e40885cdf6ac2d01487cb76.asciidoc @@ -4,21 +4,18 @@ [source, js] ---- const response = await client.search({ - index: 'twitter', - scroll: '1m', - body: { - slice: { - field: 'date', - id: 0, - max: 10 - }, - query: { + query: { + span_multi: { match: { - title: 'elasticsearch' - } - } - } -}) -console.log(response) + prefix: { + "user.id": { + value: "ki", + boost: 1.08, + }, + }, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/b4d9d5017d42f27281e734e969949623.asciidoc b/docs/doc_examples/b4d9d5017d42f27281e734e969949623.asciidoc new file mode 100644 index 000000000..6e8f104ef --- /dev/null +++ b/docs/doc_examples/b4d9d5017d42f27281e734e969949623.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.getRepository({ + name: "my-repo", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc b/docs/doc_examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc new file mode 100644 index 000000000..6e6b62677 --- /dev/null +++ b/docs/doc_examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + the_sum: { + sum: { + field: "lemmings", + }, + }, + thirtieth_difference: { + serial_diff: { + buckets_path: "the_sum", + lag: 30, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4f3165e873f551fbaa03945877eb370.asciidoc b/docs/doc_examples/b4f3165e873f551fbaa03945877eb370.asciidoc new file mode 100644 index 000000000..604cc5327 --- /dev/null +++ b/docs/doc_examples/b4f3165e873f551fbaa03945877eb370.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_date_formats: ["yyyy/MM", "MM/dd/yyyy"], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "09/25/2015", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc b/docs/doc_examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc new file mode 100644 index 000000000..60c5ddca6 --- /dev/null +++ b/docs/doc_examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["127.0.0.1:{remote-interface-default-port}"], + }, + cluster_two: { + mode: "sniff", + seeds: ["127.0.0.1:{remote-interface-default-port-plus1}"], + "transport.compress": true, + skip_unavailable: true, + }, + cluster_three: { + mode: "proxy", + proxy_address: "127.0.0.1:{remote-interface-default-port-plus2}", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b504119238b44cddd3b5944da20a498d.asciidoc b/docs/doc_examples/b504119238b44cddd3b5944da20a498d.asciidoc new file mode 100644 index 000000000..858440978 --- /dev/null +++ b/docs/doc_examples/b504119238b44cddd3b5944da20a498d.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((1000.0 -1001.0, 1001.0 -1001.0, 1001.0 -1000.0, 1000.0 -1000.0, 1000.0 -1001.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b515427f8685ca7d79176def672d19fa.asciidoc b/docs/doc_examples/b515427f8685ca7d79176def672d19fa.asciidoc new file mode 100644 index 000000000..6d63883b0 --- /dev/null +++ b/docs/doc_examples/b515427f8685ca7d79176def672d19fa.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh(); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + size: 0, + q: "extra:test", + filter_path: "hits.total", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc b/docs/doc_examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc new file mode 100644 index 000000000..869f73861 --- /dev/null +++ b/docs/doc_examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + wildcard: { + "user.id": { + value: "ki*y", + boost: 1, + rewrite: "constant_score_blended", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc b/docs/doc_examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc new file mode 100644 index 000000000..052d3a53f --- /dev/null +++ b/docs/doc_examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs", + mappings: { + _source: { + includes: ["*.count", "meta.*"], + excludes: ["meta.description", "meta.other.*"], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "logs", + id: 1, + document: { + requests: { + count: 10, + foo: "bar", + }, + meta: { + name: "Some metric", + description: "Some metric description", + other: { + foo: "one", + baz: "two", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "logs", + query: { + match: { + "meta.other.foo": "one", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc b/docs/doc_examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc new file mode 100644 index 000000000..64bf78362 --- /dev/null +++ b/docs/doc_examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + stats_monthly_sales: { + stats_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc b/docs/doc_examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc new file mode 100644 index 000000000..2bad68c5e --- /dev/null +++ b/docs/doc_examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geoip", + description: "Add geoip info", + processors: [ + { + geoip: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "geoip", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc b/docs/doc_examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc new file mode 100644 index 000000000..17ee6fcc0 --- /dev/null +++ b/docs/doc_examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + aliases: { + alias1: {}, + alias2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + "{index}-alias": {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b58b17975bbce307b2ccce5051a449e8.asciidoc b/docs/doc_examples/b58b17975bbce307b2ccce5051a449e8.asciidoc new file mode 100644 index 000000000..6276393a4 --- /dev/null +++ b/docs/doc_examples/b58b17975bbce307b2ccce5051a449e8.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + filter_path: "hits.total", + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc b/docs/doc_examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc new file mode 100644 index 000000000..1e2a0ed25 --- /dev/null +++ b/docs/doc_examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor2", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + delay: "7d", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); + +const response1 = await client.rollup.getJobs({ + id: "_all", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc b/docs/doc_examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc new file mode 100644 index 000000000..b346dff5b --- /dev/null +++ b/docs/doc_examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:elasticsearch", + }, + }, + aggs: { + my_unbiased_sample: { + diversified_sampler: { + shard_size: 200, + field: "author", + }, + aggs: { + keywords: { + significant_terms: { + field: "tags", + exclude: ["elasticsearch"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b5f95bc097a201b29c7200fc8d3d31c1.asciidoc b/docs/doc_examples/b5f95bc097a201b29c7200fc8d3d31c1.asciidoc deleted file mode 100644 index be1a10ea3..000000000 --- a/docs/doc_examples/b5f95bc097a201b29c7200fc8d3d31c1.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - '*' - ], - order: 0, - settings: { - number_of_shards: 1 - }, - mappings: { - _source: { - enabled: false - } - } - } -}) -console.log(response0) - -const response1 = await client.indices.putTemplate({ - name: 'template_2', - body: { - index_patterns: [ - 'te*' - ], - order: 1, - settings: { - number_of_shards: 1 - }, - mappings: { - _source: { - enabled: true - } - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc b/docs/doc_examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc new file mode 100644 index 000000000..4f33f0da1 --- /dev/null +++ b/docs/doc_examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + max_monthly_sales: { + max_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc b/docs/doc_examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc new file mode 100644 index 000000000..34e45f1b9 --- /dev/null +++ b/docs/doc_examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + price: { + missing: "_last", + }, + }, + ], + query: { + term: { + product: "chocolate", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc b/docs/doc_examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc new file mode 100644 index 000000000..c872c534b --- /dev/null +++ b/docs/doc_examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "publications", + fields: "a*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b620ef4400d2f660fe2c67835938442c.asciidoc b/docs/doc_examples/b620ef4400d2f660fe2c67835938442c.asciidoc new file mode 100644 index 000000000..d756cff94 --- /dev/null +++ b/docs/doc_examples/b620ef4400d2f660fe2c67835938442c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.deleteAutoscalingPolicy({ + name: "my_autoscaling_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc new file mode 100644 index 000000000..7ffe922db --- /dev/null +++ b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -0,0 +1,216 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + refresh: "true", + operations: [ + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...", + }, + ], +}); +console.log(response); + +const response1 = await client.textStructure.findFieldStructure({ + index: "test-logs", + field: "message", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b638e11d6a8a084290f8934d224abd52.asciidoc b/docs/doc_examples/b638e11d6a8a084290f8934d224abd52.asciidoc new file mode 100644 index 000000000..9e193f94e --- /dev/null +++ b/docs/doc_examples/b638e11d6a8a084290f8934d224abd52.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node.frozen": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc b/docs/doc_examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc new file mode 100644 index 000000000..d787e9a7f --- /dev/null +++ b/docs/doc_examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + top_metrics_max_size: 100, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b65dbb51ddd496189c65a9326a53480c.asciidoc b/docs/doc_examples/b65dbb51ddd496189c65a9326a53480c.asciidoc new file mode 100644 index 000000000..cd9fd0281 --- /dev/null +++ b/docs/doc_examples/b65dbb51ddd496189c65a9326a53480c.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_read_only_url_repository", + repository: { + type: "url", + settings: { + url: "file:/mount/backups/my_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc b/docs/doc_examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc new file mode 100644 index 000000000..47fff6913 --- /dev/null +++ b/docs/doc_examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "metadata,routing_table", + index: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc b/docs/doc_examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc new file mode 100644 index 000000000..3bd3fc327 --- /dev/null +++ b/docs/doc_examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: '{ "query": { "bool": { "must": {{#toJson}}clauses{{/toJson}} }}}', + params: { + clauses: [ + { + term: { + "user.id": "kimchy", + }, + }, + { + term: { + "url.domain": "example.com", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc b/docs/doc_examples/b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc deleted file mode 100644 index f428e4fb7..000000000 --- a/docs/doc_examples/b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match: { - message: 'elasticsearch' - } - }, - script_score: { - script: { - params: { - a: 5, - b: 1.2 - }, - source: "params.a / Math.pow(params.b, doc['likes'].value)" - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b68ed7037042719945a2452d23e64c78.asciidoc b/docs/doc_examples/b68ed7037042719945a2452d23e64c78.asciidoc new file mode 100644 index 000000000..8ec894868 --- /dev/null +++ b/docs/doc_examples/b68ed7037042719945a2452d23e64c78.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 3, + refresh: "true", + document: { + query: { + match: { + message: "brown fox", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b691d41f84b5b46e9051b51db22a46af.asciidoc b/docs/doc_examples/b691d41f84b5b46e9051b51db22a46af.asciidoc new file mode 100644 index 000000000..e026fbdb7 --- /dev/null +++ b/docs/doc_examples/b691d41f84b5b46e9051b51db22a46af.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + include: ["swing", "rock"], + exclude: ["jazz"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc b/docs/doc_examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc new file mode 100644 index 000000000..363f0407f --- /dev/null +++ b/docs/doc_examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + filter_path: "persistent.cluster.remote", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc b/docs/doc_examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc new file mode 100644 index 000000000..c0ceba90e --- /dev/null +++ b/docs/doc_examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + should: [ + { + sparse_vector: { + field: "content_embedding", + inference_id: "my-elser-endpoint", + query: "How to avoid muscle soreness after running?", + boost: 1, + }, + }, + { + query_string: { + query: "toxins", + boost: 4, + }, + }, + ], + }, + }, + min_score: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc b/docs/doc_examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc new file mode 100644 index 000000000..0d1ab73b0 --- /dev/null +++ b/docs/doc_examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + min_doc_count: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc b/docs/doc_examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc new file mode 100644 index 000000000..e7faa2968 --- /dev/null +++ b/docs/doc_examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + analyzer: "whitespace", + search_analyzer: "simple", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6e385760e036e36827f719b540d9c11.asciidoc b/docs/doc_examples/b6e385760e036e36827f719b540d9c11.asciidoc new file mode 100644 index 000000000..55fd2549a --- /dev/null +++ b/docs/doc_examples/b6e385760e036e36827f719b540d9c11.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-dfs-index", + search_type: "dfs_query_then_fetch", + pretty: "true", + size: 0, + profile: true, + query: { + term: { + "my-keyword": { + value: "a", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc b/docs/doc_examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc new file mode 100644 index 000000000..bddd8cdb6 --- /dev/null +++ b/docs/doc_examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-byte-quantized-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 4, + index: true, + index_options: { + type: "int4_hnsw", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc b/docs/doc_examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc new file mode 100644 index 000000000..7f2287489 --- /dev/null +++ b/docs/doc_examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(491.2350 5237.4081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [496.5305328369141, 5239.347642069457], + [496.6979026794433, 5239.172175893484], + [496.9425201416015, 5239.238958618537], + [496.7944622039794, 5239.420969150824], + [496.5305328369141, 5239.347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + viewport: { + cartesian_bounds: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc b/docs/doc_examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc new file mode 100644 index 000000000..d283de54e --- /dev/null +++ b/docs/doc_examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + term: { + "file_path.tree": "/User/alice/photos/2017/05/16", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc b/docs/doc_examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc new file mode 100644 index 000000000..9138df541 --- /dev/null +++ b/docs/doc_examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", + h: "node,shards,disk.*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b789292f9cf63ce912e058c46d90ce20.asciidoc b/docs/doc_examples/b789292f9cf63ce912e058c46d90ce20.asciidoc deleted file mode 100644 index c2f132cd0..000000000 --- a/docs/doc_examples/b789292f9cf63ce912e058c46d90ce20.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - sales_over_time: { - date_histogram: { - field: 'date', - calendar_interval: 'month' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc b/docs/doc_examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc new file mode 100644 index 000000000..e911c937e --- /dev/null +++ b/docs/doc_examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "archive", + }, + dest: { + index: "my-data-stream", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc b/docs/doc_examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc new file mode 100644 index 000000000..4674047c2 --- /dev/null +++ b/docs/doc_examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.discovery": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc b/docs/doc_examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc new file mode 100644 index 000000000..9f14e1098 --- /dev/null +++ b/docs/doc_examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc b/docs/doc_examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc new file mode 100644 index 000000000..b560ec837 --- /dev/null +++ b/docs/doc_examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + histo: { + histogram: { + field: "price", + interval: 5, + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc b/docs/doc_examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc new file mode 100644 index 000000000..d82a47006 --- /dev/null +++ b/docs/doc_examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + message: "A new bonsai tree in the office", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc b/docs/doc_examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc new file mode 100644 index 000000000..0bb694919 --- /dev/null +++ b/docs/doc_examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.health({ + filter_path: "status,*_shards", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc b/docs/doc_examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc new file mode 100644 index 000000000..a89aac295 --- /dev/null +++ b/docs/doc_examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_query_rules/my-ruleset/_rule/my-rule1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc b/docs/doc_examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc new file mode 100644 index 000000000..55e88724b --- /dev/null +++ b/docs/doc_examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping7", + roles: ["ldap-example-user"], + enabled: true, + rules: { + all: [ + { + field: { + dn: "*,ou=subtree,dc=example,dc=com", + }, + }, + { + field: { + "realm.name": "ldap1", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc b/docs/doc_examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc new file mode 100644 index 000000000..e7be5898e --- /dev/null +++ b/docs/doc_examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "items", + mappings: { + properties: { + name: { + type: "keyword", + }, + production_date: { + type: "date", + }, + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc b/docs/doc_examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc new file mode 100644 index 000000000..8eb0ebc1a --- /dev/null +++ b/docs/doc_examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "geocells", + id: 1, + pipeline: "geotile2shape", + document: { + geocell: "4/8/5", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "geocells", + id: 1, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b839f79a5d58506baed5714f1876ab55.asciidoc b/docs/doc_examples/b839f79a5d58506baed5714f1876ab55.asciidoc new file mode 100644 index 000000000..6a88cfba7 --- /dev/null +++ b/docs/doc_examples/b839f79a5d58506baed5714f1876ab55.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: '\n process where process.name == "regsvr32.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc b/docs/doc_examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc new file mode 100644 index 000000000..51159b730 --- /dev/null +++ b/docs/doc_examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b85716ba42a57096452665c38995da7d.asciidoc b/docs/doc_examples/b85716ba42a57096452665c38995da7d.asciidoc new file mode 100644 index 000000000..71f3584f3 --- /dev/null +++ b/docs/doc_examples/b85716ba42a57096452665c38995da7d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.previewDataFrameAnalytics({ + config: { + source: { + index: "houses_sold_last_10_yrs", + }, + analysis: { + regression: { + dependent_variable: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b857abedc64e367def172bd07075e5c7.asciidoc b/docs/doc_examples/b857abedc64e367def172bd07075e5c7.asciidoc new file mode 100644 index 000000000..b4250ac7a --- /dev/null +++ b/docs/doc_examples/b857abedc64e367def172bd07075e5c7.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_fingerprint_analyzer: { + type: "fingerprint", + stopwords: "_english_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_fingerprint_analyzer", + text: "Yes yes, Gödel said this sentence is consistent and.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b87438263ccd68624b1d69d8750f9432.asciidoc b/docs/doc_examples/b87438263ccd68624b1d69d8750f9432.asciidoc new file mode 100644 index 000000000..ded6c67e6 --- /dev/null +++ b/docs/doc_examples/b87438263ccd68624b1d69d8750f9432.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + status_code: { + type: "long", + }, + session_id: { + type: "long", + index: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc b/docs/doc_examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc new file mode 100644 index 000000000..51f2cdbe2 --- /dev/null +++ b/docs/doc_examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + number_one: { + type: "integer", + ignore_malformed: true, + }, + number_two: { + type: "integer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Some text value", + number_one: "foo", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "Some text value", + number_two: "foo", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b88a2d96da1401d548a4540cca223d27.asciidoc b/docs/doc_examples/b88a2d96da1401d548a4540cca223d27.asciidoc new file mode 100644 index 000000000..dd71f035a --- /dev/null +++ b/docs/doc_examples/b88a2d96da1401d548a4540cca223d27.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchMvt({ + index: "museums", + field: "location", + zoom: 13, + x: 4207, + y: 2692, + grid_agg: "geotile", + grid_precision: 2, + fields: ["name", "price"], + query: { + term: { + included: true, + }, + }, + aggs: { + min_price: { + min: { + field: "price", + }, + }, + max_price: { + max: { + field: "price", + }, + }, + avg_price: { + avg: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc b/docs/doc_examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc new file mode 100644 index 000000000..752be7fd7 --- /dev/null +++ b/docs/doc_examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["cjk_bigram"], + text: "東京都は、日本の首都であり", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc b/docs/doc_examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc new file mode 100644 index 000000000..d818285db --- /dev/null +++ b/docs/doc_examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + enabled: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + user_id: "kimchy", + session_data: { + object: { + some_field: "some_value", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: [ + "user_id", + { + field: "session_data.object.*", + include_unmapped: true, + }, + ], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc b/docs/doc_examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc new file mode 100644 index 000000000..c04dca75e --- /dev/null +++ b/docs/doc_examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "last-log-from-clientip", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc b/docs/doc_examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc new file mode 100644 index 000000000..fe98fe20a --- /dev/null +++ b/docs/doc_examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + names: ["John Abraham", "Lincoln Smith"], + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + names: { + query: "Abraham Lincoln", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + names: { + query: "Abraham Lincoln", + slop: 101, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b918d6b798da673a33e49b94f61dcdc0.asciidoc b/docs/doc_examples/b918d6b798da673a33e49b94f61dcdc0.asciidoc deleted file mode 100644 index 688866783..000000000 --- a/docs/doc_examples/b918d6b798da673a33e49b94f61dcdc0.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - timeout: '5m', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b919f88e6f47a40d5793479440a90ba6.asciidoc b/docs/doc_examples/b919f88e6f47a40d5793479440a90ba6.asciidoc deleted file mode 100644 index a05989f32..000000000 --- a/docs/doc_examples/b919f88e6f47a40d5793479440a90ba6.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - user: { - type: 'nested' - } - } - } - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '1', - body: { - group: 'fans', - user: [ - { - first: 'John', - last: 'Smith' - }, - { - first: 'Alice', - last: 'White' - } - ] - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'my_index', - body: { - query: { - nested: { - path: 'user', - query: { - bool: { - must: [ - { - match: { - 'user.first': 'Alice' - } - }, - { - match: { - 'user.last': 'Smith' - } - } - ] - } - } - } - } - } -}) -console.log(response2) - -const response3 = await client.search({ - index: 'my_index', - body: { - query: { - nested: { - path: 'user', - query: { - bool: { - must: [ - { - match: { - 'user.first': 'Alice' - } - }, - { - match: { - 'user.last': 'White' - } - } - ] - } - }, - inner_hits: { - highlight: { - fields: { - 'user.first': {} - } - } - } - } - } - } -}) -console.log(response3) ----- - diff --git a/docs/doc_examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc b/docs/doc_examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc new file mode 100644 index 000000000..25f68fae7 --- /dev/null +++ b/docs/doc_examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["city.*"], + query: "this AND that OR thus", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b93ed4ef309819734f0eeea82e8b0f1f.asciidoc b/docs/doc_examples/b93ed4ef309819734f0eeea82e8b0f1f.asciidoc deleted file mode 100644 index 89988fe85..000000000 --- a/docs/doc_examples/b93ed4ef309819734f0eeea82e8b0f1f.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'sales', - size: '0', - body: { - aggs: { - t_shirts: { - filter: { - term: { - type: 't-shirt' - } - }, - aggs: { - avg_price: { - avg: { - field: 'price' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc b/docs/doc_examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc index 119b1e410..3445735bc 100644 --- a/docs/doc_examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc +++ b/docs/doc_examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc @@ -4,8 +4,8 @@ [source, js] ---- const response = await client.clearScroll({ - scroll_id: 'DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB' -}) -console.log(response) + scroll_id: + "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/b968853454b4416f7baa3209eb335957.asciidoc b/docs/doc_examples/b968853454b4416f7baa3209eb335957.asciidoc new file mode 100644 index 000000000..fbd141a5c --- /dev/null +++ b/docs/doc_examples/b968853454b4416f7baa3209eb335957.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + cities: { + terms: { + field: "city.keyword", + }, + aggs: { + centroid: { + cartesian_centroid: { + field: "location", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b96f465abb658fe32889c3d183f159a3.asciidoc b/docs/doc_examples/b96f465abb658fe32889c3d183f159a3.asciidoc new file mode 100644 index 000000000..8d6be0bbe --- /dev/null +++ b/docs/doc_examples/b96f465abb658fe32889c3d183f159a3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "limit_example", + settings: { + analysis: { + analyzer: { + standard_one_token_limit: { + tokenizer: "standard", + filter: ["limit"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b997885974522ef439d5e345924cc5ba.asciidoc b/docs/doc_examples/b997885974522ef439d5e345924cc5ba.asciidoc deleted file mode 100644 index b7d1582c0..000000000 --- a/docs/doc_examples/b997885974522ef439d5e345924cc5ba.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'my_index', - id: '1', - refresh: true, - body: { - product: 'chocolate', - price: [ - 20, - 4 - ] - } -}) -console.log(response0) - -const response1 = await client.search({ - body: { - query: { - term: { - product: 'chocolate' - } - }, - sort: [ - { - price: { - order: 'asc', - mode: 'avg' - } - } - ] - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc b/docs/doc_examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc new file mode 100644 index 000000000..b68c2dadb --- /dev/null +++ b/docs/doc_examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "_current", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b9f716219359a6c973dafc50b348de33.asciidoc b/docs/doc_examples/b9f716219359a6c973dafc50b348de33.asciidoc new file mode 100644 index 000000000..4e8a2557e --- /dev/null +++ b/docs/doc_examples/b9f716219359a6c973dafc50b348de33.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + _source: { + enabled: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc b/docs/doc_examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc new file mode 100644 index 000000000..f9c560d5f --- /dev/null +++ b/docs/doc_examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc @@ -0,0 +1,105 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggs: { + rings_around_amsterdam: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + ranges: [ + { + to: 100000, + }, + { + from: 100000, + to: 300000, + }, + { + from: 300000, + }, + ], + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ba0b4081c98f3387f76b77847c52ee9a.asciidoc b/docs/doc_examples/ba0b4081c98f3387f76b77847c52ee9a.asciidoc deleted file mode 100644 index b68015bb2..000000000 --- a/docs/doc_examples/ba0b4081c98f3387f76b77847c52ee9a.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.close({ - index: 'twitter' -}) -console.log(response0) - -const response1 = await client.indices.putSettings({ - index: 'twitter', - body: { - analysis: { - analyzer: { - content: { - type: 'custom', - tokenizer: 'whitespace' - } - } - } - } -}) -console.log(response1) - -const response2 = await client.indices.open({ - index: 'twitter' -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc b/docs/doc_examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc new file mode 100644 index 000000000..647beec9d --- /dev/null +++ b/docs/doc_examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: ".ds-logs-mysql-default_copy-2022-01-01-000001", + remote_cluster: "remote_cluster", + leader_index: ".ds-logs-mysql-default-2022-01-01-000001", + data_stream_name: "logs-mysql-default_copy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc b/docs/doc_examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc new file mode 100644 index 000000000..e5f5724e2 --- /dev/null +++ b/docs/doc_examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateUserProfileData({ + uid: "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", + labels: { + direction: "east", + }, + data: { + app1: { + theme: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc b/docs/doc_examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc new file mode 100644 index 000000000..3fbe7f25e --- /dev/null +++ b/docs/doc_examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc b/docs/doc_examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc new file mode 100644 index 000000000..4287d506e --- /dev/null +++ b/docs/doc_examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_ip: { + match_mapping_type: "string", + match: "ip*", + runtime: { + type: "ip", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc b/docs/doc_examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc new file mode 100644 index 000000000..3fd0c946f --- /dev/null +++ b/docs/doc_examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: "temp*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba66768ed04f7b87906badff40ff40ed.asciidoc b/docs/doc_examples/ba66768ed04f7b87906badff40ff40ed.asciidoc new file mode 100644 index 000000000..5e98e21e6 --- /dev/null +++ b/docs/doc_examples/ba66768ed04f7b87906badff40ff40ed.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc b/docs/doc_examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc new file mode 100644 index 000000000..46ae56d49 --- /dev/null +++ b/docs/doc_examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "-hits.events._source", + query: '\n process where process.name == "regsvr32.exe"\n ', + fields: [ + "event.type", + "process.*", + { + field: "@timestamp", + format: "epoch_millis", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc b/docs/doc_examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc new file mode 100644 index 000000000..c56554773 --- /dev/null +++ b/docs/doc_examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "classic", + max_token_length: 5, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc b/docs/doc_examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc new file mode 100644 index 000000000..6dc62aec5 --- /dev/null +++ b/docs/doc_examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.disableUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc b/docs/doc_examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc new file mode 100644 index 000000000..0cdc5da85 --- /dev/null +++ b/docs/doc_examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer", "remove_duplicates"], + text: "jumping dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb067c049331cc850a77b18bdfff81b5.asciidoc b/docs/doc_examples/bb067c049331cc850a77b18bdfff81b5.asciidoc new file mode 100644 index 000000000..a2845c63c --- /dev/null +++ b/docs/doc_examples/bb067c049331cc850a77b18bdfff81b5.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "lithuanian_example", + settings: { + analysis: { + filter: { + lithuanian_stop: { + type: "stop", + stopwords: "_lithuanian_", + }, + lithuanian_keywords: { + type: "keyword_marker", + keywords: ["pavyzdys"], + }, + lithuanian_stemmer: { + type: "stemmer", + language: "lithuanian", + }, + }, + analyzer: { + rebuilt_lithuanian: { + tokenizer: "standard", + filter: [ + "lowercase", + "lithuanian_stop", + "lithuanian_keywords", + "lithuanian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb143628fd04070683eeeadc9406d9cc.asciidoc b/docs/doc_examples/bb143628fd04070683eeeadc9406d9cc.asciidoc deleted file mode 100644 index 04d174d4f..000000000 --- a/docs/doc_examples/bb143628fd04070683eeeadc9406d9cc.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc b/docs/doc_examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc new file mode 100644 index 000000000..8fb900dc0 --- /dev/null +++ b/docs/doc_examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role4", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["customer.*"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc b/docs/doc_examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc new file mode 100644 index 000000000..a8bb92321 --- /dev/null +++ b/docs/doc_examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.disableUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc b/docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc new file mode 100644 index 000000000..d3998b385 --- /dev/null +++ b/docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + indexed_chars: 11, + indexed_chars_field: "max_size", + remove_binary: false, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc b/docs/doc_examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc new file mode 100644 index 000000000..ddbfb6569 --- /dev/null +++ b/docs/doc_examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "last-log-from-clientip", + source: { + index: ["kibana_sample_data_logs"], + }, + latest: { + unique_key: ["clientip"], + sort: "timestamp", + }, + frequency: "1m", + dest: { + index: "last-log-from-clientip", + }, + sync: { + time: { + field: "timestamp", + delay: "60s", + }, + }, + retention_policy: { + time: { + field: "timestamp", + max_age: "30d", + }, + }, + settings: { + max_page_search_size: 500, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb792e64a4c1f872296073b457aa03c8.asciidoc b/docs/doc_examples/bb792e64a4c1f872296073b457aa03c8.asciidoc new file mode 100644 index 000000000..0c98668fa --- /dev/null +++ b/docs/doc_examples/bb792e64a4c1f872296073b457aa03c8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.delete({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc b/docs/doc_examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc new file mode 100644 index 000000000..6d510961d --- /dev/null +++ b/docs/doc_examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 3, + routing: 1, + refresh: "true", + document: { + text: "This is a vote", + my_join_field: { + name: "vote", + parent: "2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc b/docs/doc_examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc new file mode 100644 index 000000000..f733254ab --- /dev/null +++ b/docs/doc_examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.count({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc b/docs/doc_examples/bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc deleted file mode 100644 index d44288214..000000000 --- a/docs/doc_examples/bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias1', - routing: '1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bc4d308069af23929a49d856f6bc3008.asciidoc b/docs/doc_examples/bc4d308069af23929a49d856f6bc3008.asciidoc new file mode 100644 index 000000000..0f34216b6 --- /dev/null +++ b/docs/doc_examples/bc4d308069af23929a49d856f6bc3008.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + unit: "km", + distance_type: "plane", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 300, + }, + { + from: 300, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc b/docs/doc_examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc new file mode 100644 index 000000000..6463ea56c --- /dev/null +++ b/docs/doc_examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + dcg: { + k: 20, + normalize: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc b/docs/doc_examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc new file mode 100644 index 000000000..e0bcddedd --- /dev/null +++ b/docs/doc_examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.fieldUsageStats({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc b/docs/doc_examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc new file mode 100644 index 000000000..d4b74e787 --- /dev/null +++ b/docs/doc_examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + filter_path: "aggregations", + size: 0, + aggs: { + top_values: { + terms: { + field: "my-field", + size: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc b/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc index 1708d0956..22c9c01d9 100644 --- a/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc +++ b/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc @@ -1,7 +1,10 @@ -[source,js] +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] ---- const response = await client.search({ - index: 'books' -}) -console.log(response) + index: "books", +}); +console.log(response); ---- diff --git a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc new file mode 100644 index 000000000..f9344050e --- /dev/null +++ b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({}); +console.log(response); +---- diff --git a/docs/doc_examples/bd0d30a7683037e1ebadd163514765d4.asciidoc b/docs/doc_examples/bd0d30a7683037e1ebadd163514765d4.asciidoc new file mode 100644 index 000000000..dfc485635 --- /dev/null +++ b/docs/doc_examples/bd0d30a7683037e1ebadd163514765d4.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_users", + roles: ["user"], + rules: { + any: [ + { + field: { + groups: "cn=users,dc=example,dc=com", + }, + }, + { + field: { + dn: "cn=John Doe,cn=contractors,dc=example,dc=com", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc b/docs/doc_examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc new file mode 100644 index 000000000..a175b8350 --- /dev/null +++ b/docs/doc_examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: ["40, -70", "30, -80", "20, -90"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4a396cd07657b3977713fb3a742c41b.asciidoc b/docs/doc_examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc similarity index 71% rename from docs/doc_examples/a4a396cd07657b3977713fb3a742c41b.asciidoc rename to docs/doc_examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc index 7b99e9855..05746a7fc 100644 --- a/docs/doc_examples/a4a396cd07657b3977713fb3a742c41b.asciidoc +++ b/docs/doc_examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.updateByQuery({ - index: 'twitter', - conflicts: 'proceed' -}) -console.log(response) + index: "my-index-000001", + scroll_size: 100, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/bd298b11933605c641626750c981d70b.asciidoc b/docs/doc_examples/bd298b11933605c641626750c981d70b.asciidoc new file mode 100644 index 000000000..c2ca58040 --- /dev/null +++ b/docs/doc_examples/bd298b11933605c641626750c981d70b.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "ct1", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "ct2", + template: { + settings: { + "index.number_of_replicas": 0, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.simulateTemplate({ + index_patterns: ["my*"], + template: { + settings: { + "index.number_of_shards": 3, + }, + }, + composed_of: ["ct1", "ct2"], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc b/docs/doc_examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc new file mode 100644 index 000000000..f1f4feed3 --- /dev/null +++ b/docs/doc_examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: { + query: { + multi_match: { + query: "{{query_string}}", + fields: "[{{#text_fields}}{{user_name}},{{/text_fields}}]", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd3d710ec50a151453e141691163af72.asciidoc b/docs/doc_examples/bd3d710ec50a151453e141691163af72.asciidoc new file mode 100644 index 000000000..fad53bf5d --- /dev/null +++ b/docs/doc_examples/bd3d710ec50a151453e141691163af72.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + group_by: "parents", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd458073196a19ecdeb24a8016488c20.asciidoc b/docs/doc_examples/bd458073196a19ecdeb24a8016488c20.asciidoc new file mode 100644 index 000000000..e41d391d0 --- /dev/null +++ b/docs/doc_examples/bd458073196a19ecdeb24a8016488c20.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteIndexTemplate({ + name: "my-index-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc b/docs/doc_examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc new file mode 100644 index 000000000..920798532 --- /dev/null +++ b/docs/doc_examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveIndex({ + name: "f*,remoteCluster1:bar*", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd5918ab903c0889bb1f09c8c2466e43.asciidoc b/docs/doc_examples/bd5918ab903c0889bb1f09c8c2466e43.asciidoc deleted file mode 100644 index b8e035f6f..000000000 --- a/docs/doc_examples/bd5918ab903c0889bb1f09c8c2466e43.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'users', - body: { - mappings: { - properties: { - user_id: { - type: 'long' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc b/docs/doc_examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc new file mode 100644 index 000000000..fa54a3549 --- /dev/null +++ b/docs/doc_examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "shrink-index", + policy: { + phases: { + warm: { + min_age: "5d", + actions: { + shrink: { + number_of_shards: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd68666ca2e0be12f7624016317a62bc.asciidoc b/docs/doc_examples/bd68666ca2e0be12f7624016317a62bc.asciidoc new file mode 100644 index 000000000..821f169b6 --- /dev/null +++ b/docs/doc_examples/bd68666ca2e0be12f7624016317a62bc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + groups: "_all", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "indices", + groups: "foo,bar", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc b/docs/doc_examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc new file mode 100644 index 000000000..d40a96760 --- /dev/null +++ b/docs/doc_examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearApiKeyCache({ + ids: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc b/docs/doc_examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc new file mode 100644 index 000000000..a063913c0 --- /dev/null +++ b/docs/doc_examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_queries2", + id: 2, + refresh: "true", + document: { + query: { + match: { + "my_field.suffix": "xyz", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc b/docs/doc_examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc new file mode 100644 index 000000000..810d4a7cd --- /dev/null +++ b/docs/doc_examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + match: { + file_path: "/User/bob/photos/2017/05", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd7a1417fc27b5a801334ec44462b376.asciidoc b/docs/doc_examples/bd7a1417fc27b5a801334ec44462b376.asciidoc new file mode 100644 index 000000000..d1fd5b125 --- /dev/null +++ b/docs/doc_examples/bd7a1417fc27b5a801334ec44462b376.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlDatafeeds({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc b/docs/doc_examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc new file mode 100644 index 000000000..47d5b8d52 --- /dev/null +++ b/docs/doc_examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "@timestamp": { + format: "strict_date_optional_time||epoch_second", + type: "date", + }, + message: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc b/docs/doc_examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc index c664a5c31..961cd52e4 100644 --- a/docs/doc_examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc +++ b/docs/doc_examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.updateByQueryRethrottle({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619', - requests_per_second: '-1' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", + requests_per_second: "-1", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc b/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc new file mode 100644 index 000000000..b53c375b8 --- /dev/null +++ b/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/sparse_embedding/my-elser-endpoint", + body: { + service: "elser", + service_settings: { + num_allocations: 1, + num_threads: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc b/docs/doc_examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc new file mode 100644 index 000000000..5d422bbb9 --- /dev/null +++ b/docs/doc_examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + record_execution: true, +}); +console.log(response); + +const response1 = await client.watcher.getWatch({ + id: "my_watch", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/bdc68012c121062628d6d73468bf4866.asciidoc b/docs/doc_examples/bdc68012c121062628d6d73468bf4866.asciidoc new file mode 100644 index 000000000..63445d9ea --- /dev/null +++ b/docs/doc_examples/bdc68012c121062628d6d73468bf4866.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.cleanupRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc b/docs/doc_examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc new file mode 100644 index 000000000..4da9dfa63 --- /dev/null +++ b/docs/doc_examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.get({ + name: "my-app", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc b/docs/doc_examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc new file mode 100644 index 000000000..fd79781de --- /dev/null +++ b/docs/doc_examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test*", + filter_path: "aggregations", + aggs: { + ip: { + terms: { + field: "ip", + }, + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "desc", + }, + size: 1, + }, + }, + having_tm: { + bucket_selector: { + buckets_path: { + top_m: "tm[m]", + }, + script: "params.top_m < 1000", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be1bd47393646ac6bbee177d1cdb7738.asciidoc b/docs/doc_examples/be1bd47393646ac6bbee177d1cdb7738.asciidoc deleted file mode 100644 index e6082026e..000000000 --- a/docs/doc_examples/be1bd47393646ac6bbee177d1cdb7738.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title', - 'content' - ], - query: 'this that thus', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc b/docs/doc_examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc new file mode 100644 index 000000000..78b727a26 --- /dev/null +++ b/docs/doc_examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "common_grams_example", + settings: { + analysis: { + analyzer: { + index_grams: { + tokenizer: "whitespace", + filter: ["common_grams_query"], + }, + }, + filter: { + common_grams_query: { + type: "common_grams", + common_words: ["a", "is", "the"], + ignore_case: true, + query_mode: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be30ea12f605fd61acba689b68e00bbe.asciidoc b/docs/doc_examples/be30ea12f605fd61acba689b68e00bbe.asciidoc new file mode 100644 index 000000000..b3f5d0d2b --- /dev/null +++ b/docs/doc_examples/be30ea12f605fd61acba689b68e00bbe.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "hugging_face_embeddings", + processors: [ + { + inference: { + model_id: "hugging_face_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc b/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc index 9bfabc15d..61e1776a5 100644 --- a/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc +++ b/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.tasks.get({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/52a87b81e4e0b6b11e23e85db1602a63.asciidoc b/docs/doc_examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc similarity index 56% rename from docs/doc_examples/52a87b81e4e0b6b11e23e85db1602a63.asciidoc rename to docs/doc_examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc index ab0bba967..27b13645c 100644 --- a/docs/doc_examples/52a87b81e4e0b6b11e23e85db1602a63.asciidoc +++ b/docs/doc_examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc @@ -4,16 +4,12 @@ [source, js] ---- const response = await client.updateByQuery({ - index: 'twitter', - conflicts: 'proceed', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) + index: "my-index-000001", + refresh: "true", + slices: 5, + script: { + source: "ctx._source['extra'] = 'test'", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/be5c5a9c25901737585e4fff9195da3c.asciidoc b/docs/doc_examples/be5c5a9c25901737585e4fff9195da3c.asciidoc new file mode 100644 index 000000000..7cbd2aa06 --- /dev/null +++ b/docs/doc_examples/be5c5a9c25901737585e4fff9195da3c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-bit-vectors", + filter_path: "hits.hits", + query: { + knn: { + query_vector: [127, -127, 0, 1, 42], + field: "my_vector", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc b/docs/doc_examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc new file mode 100644 index 000000000..818de22d5 --- /dev/null +++ b/docs/doc_examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + tdigest: { + compression: 200, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc b/docs/doc_examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc new file mode 100644 index 000000000..a988440a3 --- /dev/null +++ b/docs/doc_examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + scripted_upsert: true, + script: { + source: + "\n if ( ctx.op == 'create' ) {\n ctx._source.counter = params.count\n } else {\n ctx._source.counter += params.count\n }\n ", + params: { + count: 4, + }, + }, + upsert: {}, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc b/docs/doc_examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc new file mode 100644 index 000000000..42e3fb761 --- /dev/null +++ b/docs/doc_examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putTrainedModelDefinitionPart({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + part: 0, + definition: "...", + total_definition_length: 265632637, + total_parts: 64, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc b/docs/doc_examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc new file mode 100644 index 000000000..b76dc5c90 --- /dev/null +++ b/docs/doc_examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_flights", + query: { + bool: { + filter: [ + { + term: { + Cancelled: false, + }, + }, + ], + }, + }, + }, + dest: { + index: "sample_flight_delays_by_carrier", + }, + pivot: { + group_by: { + carrier: { + terms: { + field: "Carrier", + }, + }, + }, + aggregations: { + flights_count: { + value_count: { + field: "FlightNum", + }, + }, + delay_mins_total: { + sum: { + field: "FlightDelayMin", + }, + }, + flight_mins_total: { + sum: { + field: "FlightTimeMin", + }, + }, + delay_time_percentage: { + bucket_script: { + buckets_path: { + delay_time: "delay_mins_total.value", + flight_time: "flight_mins_total.value", + }, + script: "(params.delay_time / params.flight_time) * 100", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be9836fe55c5fada404a2adc1663d832.asciidoc b/docs/doc_examples/be9836fe55c5fada404a2adc1663d832.asciidoc new file mode 100644 index 000000000..e8b24cc01 --- /dev/null +++ b/docs/doc_examples/be9836fe55c5fada404a2adc1663d832.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + http: { + type: "composite", + script: 'emit(grok("%{COMMONAPACHELOG}").extract(doc["message"].value))', + fields: { + clientip: { + type: "ip", + }, + verb: { + type: "keyword", + }, + response: { + type: "long", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc b/docs/doc_examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc new file mode 100644 index 000000000..5a0da457f --- /dev/null +++ b/docs/doc_examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot_*", + sort: "start_time", + from_sort_value: 1577833200000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc b/docs/doc_examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc new file mode 100644 index 000000000..ae9b3f6f0 --- /dev/null +++ b/docs/doc_examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_identifier: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/beba2a9795c8a13653e1edf64eec4357.asciidoc b/docs/doc_examples/beba2a9795c8a13653e1edf64eec4357.asciidoc new file mode 100644 index 000000000..dcf66fbd5 --- /dev/null +++ b/docs/doc_examples/beba2a9795c8a13653e1edf64eec4357.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + "index.routing.allocation.require.size": "big", + "index.routing.allocation.require.rack": "rack1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bed14cc152522ca0726ac3746ebc31db.asciidoc b/docs/doc_examples/bed14cc152522ca0726ac3746ebc31db.asciidoc new file mode 100644 index 000000000..6b1fa92ed --- /dev/null +++ b/docs/doc_examples/bed14cc152522ca0726ac3746ebc31db.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my_index", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + my_counter: 0, + }, + { + index: { + _id: 2, + }, + }, + { + my_counter: 9223372036854776000, + }, + { + index: { + _id: 3, + }, + }, + { + my_counter: 18446744073709552000, + }, + { + index: { + _id: 4, + }, + }, + { + my_counter: 18446744073709552000, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf17440ac178d2ef5f5be643d033920b.asciidoc b/docs/doc_examples/bf17440ac178d2ef5f5be643d033920b.asciidoc new file mode 100644 index 000000000..240f711b1 --- /dev/null +++ b/docs/doc_examples/bf17440ac178d2ef5f5be643d033920b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "my-index", + pipeline: "elser-v2-test", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc b/docs/doc_examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc new file mode 100644 index 000000000..5af1d209c --- /dev/null +++ b/docs/doc_examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "remote_user", + password: "", + roles: ["remote1"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc b/docs/doc_examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc new file mode 100644 index 000000000..bfc119d2c --- /dev/null +++ b/docs/doc_examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + simple_query_string: { + fields: ["body"], + query: "ski", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6799d132c1c7ca3970763acde2337ef9.asciidoc b/docs/doc_examples/bf3f520b47581d861e802730aaf2a519.asciidoc similarity index 54% rename from docs/doc_examples/6799d132c1c7ca3970763acde2337ef9.asciidoc rename to docs/doc_examples/bf3f520b47581d861e802730aaf2a519.asciidoc index e22d0dadc..a2f8a7bfd 100644 --- a/docs/doc_examples/6799d132c1c7ca3970763acde2337ef9.asciidoc +++ b/docs/doc_examples/bf3f520b47581d861e802730aaf2a519.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test*', - alias: 'all_test_indices' - } - } - ] - } -}) -console.log(response) + actions: [ + { + add: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc b/docs/doc_examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc new file mode 100644 index 000000000..98dc289b4 --- /dev/null +++ b/docs/doc_examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: ".ds-my-data-stream-2099-03-08-000003", + id: "bfspvnIBr7VVZlfp2lqX", + if_seq_no: 0, + if_primary_term: 1, + document: { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf639275d0818be04317ee5ab6075da6.asciidoc b/docs/doc_examples/bf639275d0818be04317ee5ab6075da6.asciidoc new file mode 100644 index 000000000..b6894f8b7 --- /dev/null +++ b/docs/doc_examples/bf639275d0818be04317ee5ab6075da6.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + has_parent: { + parent_type: "parent", + query: { + term: { + tag: { + value: "Elasticsearch", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf8680d940c84e43a9483a25548dea57.asciidoc b/docs/doc_examples/bf8680d940c84e43a9483a25548dea57.asciidoc new file mode 100644 index 000000000..312348bb9 --- /dev/null +++ b/docs/doc_examples/bf8680d940c84e43a9483a25548dea57.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + filter: { + autocomplete_filter: { + type: "edge_ngram", + min_gram: 1, + max_gram: 20, + }, + }, + analyzer: { + autocomplete: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "autocomplete_filter"], + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + analyzer: "autocomplete", + search_analyzer: "standard", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Quick Brown Fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: { + query: "Quick Br", + operator: "and", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc b/docs/doc_examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc new file mode 100644 index 000000000..822b94914 --- /dev/null +++ b/docs/doc_examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc @@ -0,0 +1,73 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_locations", + mappings: { + properties: { + pin: { + properties: { + location: { + type: "geo_point", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my_locations", + id: 1, + document: { + pin: { + location: { + lat: 40.12, + lon: -71.34, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.create({ + index: "my_geoshapes", + mappings: { + properties: { + pin: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, + }, + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my_geoshapes", + id: 1, + document: { + pin: { + location: { + type: "polygon", + coordinates: [ + [ + [13, 51.5], + [15, 51.5], + [15, 54], + [13, 54], + [13, 51.5], + ], + ], + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc b/docs/doc_examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc new file mode 100644 index 000000000..1d21a7fb7 --- /dev/null +++ b/docs/doc_examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + script_fields: { + my_doubled_field: { + script: { + source: "field('my_field').get(null) * params['multiplier']", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc b/docs/doc_examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc new file mode 100644 index 000000000..2a993db0b --- /dev/null +++ b/docs/doc_examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bfcd65ab85d684d36a8550080032958d.asciidoc b/docs/doc_examples/bfcd65ab85d684d36a8550080032958d.asciidoc deleted file mode 100644 index 618f5153b..000000000 --- a/docs/doc_examples/bfcd65ab85d684d36a8550080032958d.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - q: 'message:number', - size: '0', - terminate_after: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc b/docs/doc_examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc new file mode 100644 index 000000000..77ceb7753 --- /dev/null +++ b/docs/doc_examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index1", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc b/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc index f57620210..518894b96 100644 --- a/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc +++ b/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc @@ -4,43 +4,42 @@ [source, js] ---- const response = await client.bulk({ - filter_path: 'items.*.error', - body: [ + filter_path: "items.*.error", + operations: [ { update: { - _id: '5', - _index: 'index1' - } + _id: "5", + _index: "index1", + }, }, { doc: { - my_field: 'baz' - } + my_field: "baz", + }, }, { update: { - _id: '6', - _index: 'index1' - } + _id: "6", + _index: "index1", + }, }, { doc: { - my_field: 'baz' - } + my_field: "baz", + }, }, { update: { - _id: '7', - _index: 'index1' - } + _id: "7", + _index: "index1", + }, }, { doc: { - my_field: 'baz' - } - } - ] -}) -console.log(response) + my_field: "baz", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc b/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc new file mode 100644 index 000000000..c5c37e0b5 --- /dev/null +++ b/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_name", + body: { + name: "Custom connector", + description: "This is my customized connector", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc b/docs/doc_examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc new file mode 100644 index 000000000..74becb406 --- /dev/null +++ b/docs/doc_examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + name: { + first: "Paul", + last: "McCartney", + title: { + value: "Sir", + category: "order of chivalry", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc b/docs/doc_examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc new file mode 100644 index 000000000..df440d14e --- /dev/null +++ b/docs/doc_examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match: { + text: "How often does the moon hide the sun?", + }, + }, + }, + }, + field: "text", + inference_id: "my-cohere-rerank-model", + inference_text: "How often does the moon hide the sun?", + rank_window_size: 100, + min_score: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc b/docs/doc_examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc new file mode 100644 index 000000000..7ddfea214 --- /dev/null +++ b/docs/doc_examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc b/docs/doc_examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc new file mode 100644 index 000000000..37c356419 --- /dev/null +++ b/docs/doc_examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["shingle"], + text: "quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c067182d385f59ce5952fb9a716fbf05.asciidoc b/docs/doc_examples/c067182d385f59ce5952fb9a716fbf05.asciidoc new file mode 100644 index 000000000..45c34f74e --- /dev/null +++ b/docs/doc_examples/c067182d385f59ce5952fb9a716fbf05.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.postCalendarEvents({ + calendar_id: "planned-outages", + events: [ + { + description: "event 1", + start_time: 1513641600000, + end_time: 1513728000000, + }, + { + description: "event 2", + start_time: 1513814400000, + end_time: 1513900800000, + }, + { + description: "event 3", + start_time: 1514160000000, + end_time: 1514246400000, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c088ce5291ae28650b6091cdec489398.asciidoc b/docs/doc_examples/c088ce5291ae28650b6091cdec489398.asciidoc new file mode 100644 index 000000000..c555add24 --- /dev/null +++ b/docs/doc_examples/c088ce5291ae28650b6091cdec489398.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 100, + query: { + match: { + title: "elasticsearch", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc b/docs/doc_examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc new file mode 100644 index 000000000..49f017323 --- /dev/null +++ b/docs/doc_examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.health(); +console.log(response); + +const response1 = await client.cat.nodes(); +console.log(response1); +---- diff --git a/docs/doc_examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc b/docs/doc_examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc new file mode 100644 index 000000000..6ade03c9c --- /dev/null +++ b/docs/doc_examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + term: { + "user.id": { + value: "kimchy", + boost: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc b/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc new file mode 100644 index 000000000..ad3b4d462 --- /dev/null +++ b/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-spo-connector/_configuration", + body: { + values: { + secret_value: "foo-bar", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c0ebaa33e750b87555dc352073f692e8.asciidoc b/docs/doc_examples/c0ebaa33e750b87555dc352073f692e8.asciidoc new file mode 100644 index 000000000..2adf4c42e --- /dev/null +++ b/docs/doc_examples/c0ebaa33e750b87555dc352073f692e8.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "my-index-000001", +}); +console.log(response); + +const response1 = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + content: { + type: "custom", + tokenizer: "whitespace", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.open({ + index: "my-index-000001", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d222c6a6ec7a3beca6c97011b0874512.asciidoc b/docs/doc_examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc similarity index 60% rename from docs/doc_examples/d222c6a6ec7a3beca6c97011b0874512.asciidoc rename to docs/doc_examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc index d5a16f507..8227fad3c 100644 --- a/docs/doc_examples/d222c6a6ec7a3beca6c97011b0874512.asciidoc +++ b/docs/doc_examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc @@ -4,11 +4,10 @@ [source, js] ---- const response = await client.getSource({ - index: 'twitter', - id: '1', - _source_includes: '*.id', - _source_excludes: 'entities' -}) -console.log(response) + index: "my-index-000001", + id: 1, + _source_includes: "*.id", + _source_excludes: "entities", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc b/docs/doc_examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc new file mode 100644 index 000000000..d2009383f --- /dev/null +++ b/docs/doc_examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-snapshots", + schedule: "0 30 1 * * ?", + name: "", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c11c4d6b30e882871bf0074f407149bd.asciidoc b/docs/doc_examples/c11c4d6b30e882871bf0074f407149bd.asciidoc new file mode 100644 index 000000000..7067915ee --- /dev/null +++ b/docs/doc_examples/c11c4d6b30e882871bf0074f407149bd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "This is a parent document.", + "my-join-field": "my-parent", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c12d6e962f083c728f9397932f05202e.asciidoc b/docs/doc_examples/c12d6e962f083c728f9397932f05202e.asciidoc new file mode 100644 index 000000000..8dc0ab1c1 --- /dev/null +++ b/docs/doc_examples/c12d6e962f083c728f9397932f05202e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + connector_id: "connector-1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1409f591a01589638d9b00436ce42c0.asciidoc b/docs/doc_examples/c1409f591a01589638d9b00436ce42c0.asciidoc new file mode 100644 index 000000000..e33cc36cb --- /dev/null +++ b/docs/doc_examples/c1409f591a01589638d9b00436ce42c0.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "default_file", + usernames: "rdeniro,alpacino", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc b/docs/doc_examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc new file mode 100644 index 000000000..f823fda2b --- /dev/null +++ b/docs/doc_examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + comment: { + type: "plain", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc b/docs/doc_examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc new file mode 100644 index 000000000..b214d0ee9 --- /dev/null +++ b/docs/doc_examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.delete({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc b/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc new file mode 100644 index 000000000..3c99976f3 --- /dev/null +++ b/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector", + querystring: { + index_name: "search-google-drive", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c186ecf6f799ddff7add1abdecea5821.asciidoc b/docs/doc_examples/c186ecf6f799ddff7add1abdecea5821.asciidoc new file mode 100644 index 000000000..9d4fb8338 --- /dev/null +++ b/docs/doc_examples/c186ecf6f799ddff7add1abdecea5821.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_name: { + type: "text", + store: true, + }, + title: { + type: "text", + store: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + full_name: "Alice Ball", + title: "Professor", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + script_fields: { + name_with_title: { + script: { + lang: "painless", + source: + "params._fields['title'].value + ' ' + params._fields['full_name'].value", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/c187b52646cedeebe0716327add65642.asciidoc b/docs/doc_examples/c187b52646cedeebe0716327add65642.asciidoc new file mode 100644 index 000000000..f599f4825 --- /dev/null +++ b/docs/doc_examples/c187b52646cedeebe0716327add65642.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsync({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc b/docs/doc_examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc new file mode 100644 index 000000000..b4564c6f3 --- /dev/null +++ b/docs/doc_examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "(24 - hamming(params.queryVector, 'my_byte_dense_vector')) / 24", + params: { + queryVector: [4, 3, 0], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1a895497066a3dac674d4b1a119048d.asciidoc b/docs/doc_examples/c1a895497066a3dac674d4b1a119048d.asciidoc new file mode 100644 index 000000000..08d37316d --- /dev/null +++ b/docs/doc_examples/c1a895497066a3dac674d4b1a119048d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + pretty: "true", + query: { + term: { + full_text: "Quick Brown Foxes!", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc b/docs/doc_examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc new file mode 100644 index 000000000..dc95c58bc --- /dev/null +++ b/docs/doc_examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + missing_bucket: true, + }, + }, + }, + aggregations: { + max_price: { + max: { + field: "taxful_total_price", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc b/docs/doc_examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc new file mode 100644 index 000000000..7f6259118 --- /dev/null +++ b/docs/doc_examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + expected_reciprocal_rank: { + maximum_relevance: 3, + k: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1bb395546102279296534522061829f.asciidoc b/docs/doc_examples/c1bb395546102279296534522061829f.asciidoc new file mode 100644 index 000000000..791890046 --- /dev/null +++ b/docs/doc_examples/c1bb395546102279296534522061829f.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + point: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + point: [ + { + lat: -90, + lon: -80, + }, + { + lat: 10, + lon: 30, + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc b/docs/doc_examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc new file mode 100644 index 000000000..7d4d534cd --- /dev/null +++ b/docs/doc_examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "keyword_example", + settings: { + analysis: { + analyzer: { + rebuilt_keyword: { + tokenizer: "keyword", + filter: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc b/docs/doc_examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc new file mode 100644 index 000000000..e9e3a968d --- /dev/null +++ b/docs/doc_examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.flush({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c208de54369379e8d78ab201be18b6be.asciidoc b/docs/doc_examples/c208de54369379e8d78ab201be18b6be.asciidoc new file mode 100644 index 000000000..79835e938 --- /dev/null +++ b/docs/doc_examples/c208de54369379e8d78ab201be18b6be.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + longs_as_strings: { + match_mapping_type: "string", + match: "long_*", + unmatch: "*_text", + mapping: { + type: "long", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + long_num: "5", + long_text: "foo", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc b/docs/doc_examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc new file mode 100644 index 000000000..cdde2a100 --- /dev/null +++ b/docs/doc_examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_query_rules/my-ruleset/_rule/my-rule1", + body: { + type: "pinned", + criteria: [ + { + type: "contains", + metadata: "user_query", + values: ["pugs", "puggles"], + }, + { + type: "exact", + metadata: "user_country", + values: ["us"], + }, + ], + actions: { + ids: ["id1", "id2"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc b/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc new file mode 100644 index 000000000..8c6ac6d07 --- /dev/null +++ b/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_service_type", + body: { + service_type: "sharepoint_online", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c23e32775340d7bc6f46820313014d8a.asciidoc b/docs/doc_examples/c23e32775340d7bc6f46820313014d8a.asciidoc new file mode 100644 index 000000000..4b72af338 --- /dev/null +++ b/docs/doc_examples/c23e32775340d7bc6f46820313014d8a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_test_scores_2", + pipeline: "my_test_scores_pipeline", + document: { + student: "kimchy", + grad_year: "2099", + math_score: 1200, + verbal_score: 800, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc b/docs/doc_examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc new file mode 100644 index 000000000..ede289d19 --- /dev/null +++ b/docs/doc_examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "logs*", + size: 0, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc b/docs/doc_examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc new file mode 100644 index 000000000..1b989add3 --- /dev/null +++ b/docs/doc_examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.scroll({ + scroll_id: "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc b/docs/doc_examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc new file mode 100644 index 000000000..d47342e95 --- /dev/null +++ b/docs/doc_examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc b/docs/doc_examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc index 3934b5aa6..125fe122d 100644 --- a/docs/doc_examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc +++ b/docs/doc_examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.clearScroll({ - scroll_id: '_all' -}) -console.log(response) + scroll_id: "_all", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/c2d7c36daac8608d2515c549b2c82436.asciidoc b/docs/doc_examples/c2d7c36daac8608d2515c549b2c82436.asciidoc new file mode 100644 index 000000000..b8646e655 --- /dev/null +++ b/docs/doc_examples/c2d7c36daac8608d2515c549b2c82436.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + tile: { + geotile_grid: { + field: "location", + precision: 22, + bounds: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c318fde926842722825a51e5c9c326a9.asciidoc b/docs/doc_examples/c318fde926842722825a51e5c9c326a9.asciidoc new file mode 100644 index 000000000..20508d4c1 --- /dev/null +++ b/docs/doc_examples/c318fde926842722825a51e5c9c326a9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + text: " fox ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc b/docs/doc_examples/c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc deleted file mode 100644 index 219f4e552..000000000 --- a/docs/doc_examples/c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.deleteByQuery({ - index: 'twitter', - routing: '1', - body: { - query: { - range: { - age: { - gte: 10 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc b/docs/doc_examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc new file mode 100644 index 000000000..7c02bc9da --- /dev/null +++ b/docs/doc_examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_phrase_prefix: { + my_field: "brown f", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc b/docs/doc_examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc new file mode 100644 index 000000000..368f20c7d --- /dev/null +++ b/docs/doc_examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + pretty: "true", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + place_type: ["cafe", "restaurants"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc b/docs/doc_examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc new file mode 100644 index 000000000..78ec45236 --- /dev/null +++ b/docs/doc_examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["word_delimiter"], + text: "Neil's-Super-Duper-XL500--42+AutoCoder", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc b/docs/doc_examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc new file mode 100644 index 000000000..f5e4eeae0 --- /dev/null +++ b/docs/doc_examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + locale: "fr-FR", + query: + '\n ROW birth_date_string = "2023-01-15T00:00:00.000Z"\n | EVAL birth_date = date_parse(birth_date_string)\n | EVAL month_of_birth = DATE_FORMAT("MMMM",birth_date)\n | LIMIT 5\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc b/docs/doc_examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc new file mode 100644 index 000000000..de2646cd4 --- /dev/null +++ b/docs/doc_examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + format: "MM-yyyy", + ranges: [ + { + to: "now-10M/M", + }, + { + from: "now-10M/M", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc b/docs/doc_examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc new file mode 100644 index 000000000..a698efa88 --- /dev/null +++ b/docs/doc_examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.plugins({ + v: "true", + s: "component", + h: "name,component,version,description", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc b/docs/doc_examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc new file mode 100644 index 000000000..9f1c29078 --- /dev/null +++ b/docs/doc_examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTIPOLYGON (((1002.0 200.0, 1003.0 200.0, 1003.0 300.0, 1002.0 300.0, 102.0 200.0)), ((1000.0 100.0, 1001.0 100.0, 1001.0 100.0, 1000.0 100.0, 1000.0 100.0), (1000.2 100.2, 1000.8 100.2, 1000.8 100.8, 1000.2 100.8, 1000.2 100.2)))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc b/docs/doc_examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc new file mode 100644 index 000000000..41ee348ba --- /dev/null +++ b/docs/doc_examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache(); +console.log(response); +---- diff --git a/docs/doc_examples/c4b278ba293abd0d02a0b5ad1a99f84a.asciidoc b/docs/doc_examples/c4b278ba293abd0d02a0b5ad1a99f84a.asciidoc deleted file mode 100644 index 625c007e3..000000000 --- a/docs/doc_examples/c4b278ba293abd0d02a0b5ad1a99f84a.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.ingest.putPipeline({ - id: 'set-foo', - body: { - description: 'sets foo', - processors: [ - { - set: { - field: 'foo', - value: 'bar' - } - } - ] - } -}) -console.log(response0) - -const response1 = await client.updateByQuery({ - index: 'twitter', - pipeline: 'set-foo' -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/c4b727723b57052b6504bb74fe09abc6.asciidoc b/docs/doc_examples/c4b727723b57052b6504bb74fe09abc6.asciidoc new file mode 100644 index 000000000..19ee3e92b --- /dev/null +++ b/docs/doc_examples/c4b727723b57052b6504bb74fe09abc6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["template*"], + priority: 1, + template: { + settings: { + number_of_shards: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4c1a87414741a678f6cb91804daf095.asciidoc b/docs/doc_examples/c4c1a87414741a678f6cb91804daf095.asciidoc new file mode 100644 index 000000000..693a16ba0 --- /dev/null +++ b/docs/doc_examples/c4c1a87414741a678f6cb91804daf095.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + linear: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc b/docs/doc_examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc new file mode 100644 index 000000000..745d088b2 --- /dev/null +++ b/docs/doc_examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + feature_states: ["geoip"], + include_global_state: false, + indices: "-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc b/docs/doc_examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc new file mode 100644 index 000000000..198997e4a --- /dev/null +++ b/docs/doc_examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c54597143ac86540726f6422fd98b22e.asciidoc b/docs/doc_examples/c54597143ac86540726f6422fd98b22e.asciidoc new file mode 100644 index 000000000..8de8f5661 --- /dev/null +++ b/docs/doc_examples/c54597143ac86540726f6422fd98b22e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_security/settings", + body: { + security: { + "index.auto_expand_replicas": "0-all", + }, + "security-tokens": { + "index.auto_expand_replicas": "0-all", + }, + "security-profile": { + "index.auto_expand_replicas": "0-all", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc b/docs/doc_examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc new file mode 100644 index 000000000..93d08c3c2 --- /dev/null +++ b/docs/doc_examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c580092fd3d36c32b09d63921708a67b.asciidoc b/docs/doc_examples/c580092fd3d36c32b09d63921708a67b.asciidoc new file mode 100644 index 000000000..91be3581e --- /dev/null +++ b/docs/doc_examples/c580092fd3d36c32b09d63921708a67b.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + dis_max: { + queries: [ + { + term: { + title: "Quick pets", + }, + }, + { + term: { + body: "Quick pets", + }, + }, + ], + tie_breaker: 0.7, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc b/docs/doc_examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc new file mode 100644 index 000000000..a55b94c8b --- /dev/null +++ b/docs/doc_examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + _count: "asc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc new file mode 100644 index 000000000..a9e30c9b3 --- /dev/null +++ b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkUpdateApiKeys({}); +console.log(response); +---- diff --git a/docs/doc_examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc b/docs/doc_examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc new file mode 100644 index 000000000..18b3c84e7 --- /dev/null +++ b/docs/doc_examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, + fields: "message", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc b/docs/doc_examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc new file mode 100644 index 000000000..9144b19a6 --- /dev/null +++ b/docs/doc_examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "process", + filter_path: "**.max_file_descriptors", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc b/docs/doc_examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc new file mode 100644 index 000000000..fc07f2f81 --- /dev/null +++ b/docs/doc_examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "quantized-image-index", + knn: { + field: "image-vector", + query_vector: [0.1, -2], + k: 10, + num_candidates: 100, + }, + fields: ["title"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc b/docs/doc_examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc new file mode 100644 index 000000000..31134e4a0 --- /dev/null +++ b/docs/doc_examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + filter_path: "hits.hits._source", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c612d93e7f682a0d731e385edf9f5d56.asciidoc b/docs/doc_examples/c612d93e7f682a0d731e385edf9f5d56.asciidoc deleted file mode 100644 index 7abc351c7..000000000 --- a/docs/doc_examples/c612d93e7f682a0d731e385edf9f5d56.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - obj1: { - type: 'nested' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c6151a0788a10a7f40da684d72c3255c.asciidoc b/docs/doc_examples/c6151a0788a10a7f40da684d72c3255c.asciidoc new file mode 100644 index 000000000..e865fbce5 --- /dev/null +++ b/docs/doc_examples/c6151a0788a10a7f40da684d72c3255c.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + title: "Something really urgent", + labels: { + priority: "urgent", + release: ["v1.2.5", "v1.3.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, + { + index: {}, + }, + { + title: "Somewhat less urgent", + labels: { + priority: "high", + release: ["v1.3.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, + { + index: {}, + }, + { + title: "Not urgent", + labels: { + priority: "low", + release: ["v1.2.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc b/docs/doc_examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc new file mode 100644 index 000000000..bb7f521f2 --- /dev/null +++ b/docs/doc_examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents that contain 'network.name' of 'Guest'", + if: "ctx.network?.name != null && ctx.network.name.contains('Guest')", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc b/docs/doc_examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc new file mode 100644 index 000000000..f24162ad8 --- /dev/null +++ b/docs/doc_examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + }, + version: 123, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c639036b87d02fb864e27c4ca29ef833.asciidoc b/docs/doc_examples/c639036b87d02fb864e27c4ca29ef833.asciidoc new file mode 100644 index 000000000..8bd9633cf --- /dev/null +++ b/docs/doc_examples/c639036b87d02fb864e27c4ca29ef833.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:kibana", + }, + }, + runtime_mappings: { + "tags.hash": { + type: "long", + script: "emit(doc['tags'].hashCode())", + }, + }, + aggs: { + my_unbiased_sample: { + diversified_sampler: { + shard_size: 200, + max_docs_per_value: 3, + field: "tags.hash", + }, + aggs: { + keywords: { + significant_terms: { + field: "tags", + exclude: ["kibana"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c64b61bedb21b9def8fce5092e677af9.asciidoc b/docs/doc_examples/c64b61bedb21b9def8fce5092e677af9.asciidoc new file mode 100644 index 000000000..4e2637039 --- /dev/null +++ b/docs/doc_examples/c64b61bedb21b9def8fce5092e677af9.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + suggest: { + "my-suggest-1": { + text: "tring out Elasticsearch", + term: { + field: "message", + }, + }, + "my-suggest-2": { + text: "kmichy", + term: { + field: "user.id", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c654b09be981be12fc7be0ba33f8652b.asciidoc b/docs/doc_examples/c654b09be981be12fc7be0ba33f8652b.asciidoc new file mode 100644 index 000000000..ed2a6cc50 --- /dev/null +++ b/docs/doc_examples/c654b09be981be12fc7be0ba33f8652b.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "multilinestring", + coordinates: [ + [ + [1002, 200], + [1003, 200], + [1003, 300], + [1002, 300], + ], + [ + [1000, 100], + [1001, 100], + [1001, 100], + [1000, 100], + ], + [ + [1000.2, 100.2], + [1000.8, 100.2], + [1000.8, 100.8], + [1000.2, 100.8], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc b/docs/doc_examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc new file mode 100644 index 000000000..236a07a74 --- /dev/null +++ b/docs/doc_examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransform({ + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc b/docs/doc_examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc new file mode 100644 index 000000000..4ba09eeaf --- /dev/null +++ b/docs/doc_examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + fields: ["user.first"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc b/docs/doc_examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc new file mode 100644 index 000000000..3dc3bcdf4 --- /dev/null +++ b/docs/doc_examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "tring out Elasticsearch", + }, + }, + suggest: { + "my-suggestion": { + text: "tring out Elasticsearch", + term: { + field: "message", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6abe91b5527870face2b826f37ba1da.asciidoc b/docs/doc_examples/c6abe91b5527870face2b826f37ba1da.asciidoc new file mode 100644 index 000000000..926646068 --- /dev/null +++ b/docs/doc_examples/c6abe91b5527870face2b826f37ba1da.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + query: { + match: { + title: { + query: "mountain lake", + boost: 0.9, + }, + }, + }, + knn: { + field: "image-vector", + query_vector: [54, 10, -2], + k: 5, + num_candidates: 50, + boost: 0.1, + }, + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc b/docs/doc_examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc new file mode 100644 index 000000000..cfd0cc222 --- /dev/null +++ b/docs/doc_examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my_index,my_other_index", + settings: { + "index.number_of_replicas": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc b/docs/doc_examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc new file mode 100644 index 000000000..788794cca --- /dev/null +++ b/docs/doc_examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "pipeline-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc b/docs/doc_examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc new file mode 100644 index 000000000..7bdb10fe5 --- /dev/null +++ b/docs/doc_examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "job-candidates", + id: 1, + refresh: "true", + document: { + name: "Jane Smith", + programming_languages: ["c++", "java"], + required_matches: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc b/docs/doc_examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc new file mode 100644 index 000000000..51e78e61c --- /dev/null +++ b/docs/doc_examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_id: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc b/docs/doc_examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc new file mode 100644 index 000000000..7f81cd4b5 --- /dev/null +++ b/docs/doc_examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "classic", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc b/docs/doc_examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc new file mode 100644 index 000000000..06f36c1fe --- /dev/null +++ b/docs/doc_examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "transport.tracer.include": "*", + "transport.tracer.exclude": "internal:coordination/fault_detection/*", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc b/docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc new file mode 100644 index 000000000..10a1ab91e --- /dev/null +++ b/docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_backup1", + repository: { + type: "azure", + }, +}); +console.log(response); + +const response1 = await client.snapshot.createRepository({ + name: "my_backup2", + repository: { + type: "azure", + settings: { + container: "backup-container", + base_path: "backups", + chunk_size: "32MB", + compress: true, + }, + }, +}); +console.log(response1); + +const response2 = await client.snapshot.createRepository({ + name: "my_backup3", + repository: { + type: "azure", + settings: { + client: "secondary", + }, + }, +}); +console.log(response2); + +const response3 = await client.snapshot.createRepository({ + name: "my_backup4", + repository: { + type: "azure", + settings: { + client: "secondary", + location_mode: "primary_only", + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/c733f20641b20e124f26198534755d6d.asciidoc b/docs/doc_examples/c733f20641b20e124f26198534755d6d.asciidoc new file mode 100644 index 000000000..4c0759c44 --- /dev/null +++ b/docs/doc_examples/c733f20641b20e124f26198534755d6d.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + "my-first-agg-name": { + terms: { + field: "my-field", + }, + }, + "my-second-agg-name": { + avg: { + field: "my-other-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc b/docs/doc_examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc new file mode 100644 index 000000000..ec92cc77c --- /dev/null +++ b/docs/doc_examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["data_hot"], + deciders: { + fixed: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c76cb6a080959b0d87afd780cf814be2.asciidoc b/docs/doc_examples/c76cb6a080959b0d87afd780cf814be2.asciidoc new file mode 100644 index 000000000..6226ba7c2 --- /dev/null +++ b/docs/doc_examples/c76cb6a080959b0d87afd780cf814be2.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + should: [ + { + term: { + message: "quick", + }, + }, + { + term: { + message: "brown", + }, + }, + { + prefix: { + message: "f", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc b/docs/doc_examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc new file mode 100644 index 000000000..2cf87b5b5 --- /dev/null +++ b/docs/doc_examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "kibana_sample_data_ecommerce", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc b/docs/doc_examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc new file mode 100644 index 000000000..a8aabae5a --- /dev/null +++ b/docs/doc_examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "dept_role", + indices: [ + { + names: ["*"], + privileges: ["read"], + query: { + term: { + department_id: 12, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc b/docs/doc_examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc new file mode 100644 index 000000000..4de7de316 --- /dev/null +++ b/docs/doc_examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "my-mappings", + template: { + mappings: { + properties: { + "@timestamp": { + type: "date", + format: "date_optional_time||epoch_millis", + }, + message: { + type: "wildcard", + }, + }, + }, + }, + _meta: { + description: "Mappings for @timestamp and message fields", + "my-custom-meta-field": "More arbitrary metadata", + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "my-settings", + template: { + settings: { + "index.lifecycle.name": "my-lifecycle-policy", + }, + }, + _meta: { + description: "Settings for ILM", + "my-custom-meta-field": "More arbitrary metadata", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c849c6c8f8659dbb93e1c14356f74e37.asciidoc b/docs/doc_examples/c849c6c8f8659dbb93e1c14356f74e37.asciidoc deleted file mode 100644 index 3b868e8eb..000000000 --- a/docs/doc_examples/c849c6c8f8659dbb93e1c14356f74e37.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - city: { - type: 'text' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/47b5ff897f26e9c943cee5c06034181d.asciidoc b/docs/doc_examples/c87038b96ab06d9a741a130f94de4f02.asciidoc similarity index 69% rename from docs/doc_examples/47b5ff897f26e9c943cee5c06034181d.asciidoc rename to docs/doc_examples/c87038b96ab06d9a741a130f94de4f02.asciidoc index d2a05c4fe..b3691d8b5 100644 --- a/docs/doc_examples/47b5ff897f26e9c943cee5c06034181d.asciidoc +++ b/docs/doc_examples/c87038b96ab06d9a741a130f94de4f02.asciidoc @@ -4,10 +4,9 @@ [source, js] ---- const response = await client.delete({ - index: 'twitter', - id: '1', - routing: 'kimchy' -}) -console.log(response) + index: "my-index-000001", + id: 1, + timeout: "5m", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/c873f9cd093e26515148f052e28c7805.asciidoc b/docs/doc_examples/c873f9cd093e26515148f052e28c7805.asciidoc new file mode 100644 index 000000000..56f09eb18 --- /dev/null +++ b/docs/doc_examples/c873f9cd093e26515148f052e28c7805.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getModelSnapshots({ + job_id: "high_sum_total_sales", + start: 1575402236000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc b/docs/doc_examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc new file mode 100644 index 000000000..333d095ab --- /dev/null +++ b/docs/doc_examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "classic", + filter: ["classic"], + text: "The 2 Q.U.I.C.K. Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c8e2109b19d50467ab83a40006462e9f.asciidoc b/docs/doc_examples/c8e2109b19d50467ab83a40006462e9f.asciidoc new file mode 100644 index 000000000..e83d550e1 --- /dev/null +++ b/docs/doc_examples/c8e2109b19d50467ab83a40006462e9f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.executePolicy({ + name: "my-policy", + wait_for_completion: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc b/docs/doc_examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc new file mode 100644 index 000000000..ae890cd27 --- /dev/null +++ b/docs/doc_examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "logs-my_app-settings", + template: { + settings: { + "index.default_pipeline": "logs-my_app-default", + "index.lifecycle.name": "logs", + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: "logs-my_app-template", + index_patterns: ["logs-my_app-*"], + data_stream: {}, + priority: 500, + composed_of: ["logs-my_app-settings", "logs-my_app-mappings"], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc b/docs/doc_examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc new file mode 100644 index 000000000..89b64a70b --- /dev/null +++ b/docs/doc_examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchTemplate({ + index: "my-index", + id: "my-search-template", + params: { + query_string: "hello world", + from: 0, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c95d5317525c2ff625e6971c277247af.asciidoc b/docs/doc_examples/c95d5317525c2ff625e6971c277247af.asciidoc new file mode 100644 index 000000000..655ad864e --- /dev/null +++ b/docs/doc_examples/c95d5317525c2ff625e6971c277247af.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["lowercase"], + text: "john.SMITH@example.COM", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c96669604d0e66a097ddf3093b025ccd.asciidoc b/docs/doc_examples/c96669604d0e66a097ddf3093b025ccd.asciidoc new file mode 100644 index 000000000..37d2b65d7 --- /dev/null +++ b/docs/doc_examples/c96669604d0e66a097ddf3093b025ccd.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc b/docs/doc_examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc new file mode 100644 index 000000000..bbc28e291 --- /dev/null +++ b/docs/doc_examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + composed_of: ["my-mappings", "my-settings"], + priority: 500, + _meta: { + description: "Template for my time series data", + "my-custom-meta-field": "More arbitrary metadata", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc b/docs/doc_examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc new file mode 100644 index 000000000..673c7aac1 --- /dev/null +++ b/docs/doc_examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.getPolicy(); +console.log(response); +---- diff --git a/docs/doc_examples/c9a6ab0a56bb0177f158277185f68302.asciidoc b/docs/doc_examples/c9a6ab0a56bb0177f158277185f68302.asciidoc new file mode 100644 index 000000000..ad55541b1 --- /dev/null +++ b/docs/doc_examples/c9a6ab0a56bb0177f158277185f68302.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + metrics: { + type: "object", + subobjects: false, + properties: { + time: { + type: "long", + }, + "time.min": { + type: "long", + }, + "time.max": { + type: "long", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "metric_1", + document: { + "metrics.time": 100, + "metrics.time.min": 10, + "metrics.time.max": 900, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: "metric_2", + document: { + metrics: { + time: 100, + "time.min": 10, + "time.max": 900, + }, + }, +}); +console.log(response2); + +const response3 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/c9afa715021f2e6450e72ac73271960c.asciidoc b/docs/doc_examples/c9afa715021f2e6450e72ac73271960c.asciidoc new file mode 100644 index 000000000..1d51cd87e --- /dev/null +++ b/docs/doc_examples/c9afa715021f2e6450e72ac73271960c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "parent_example", + id: 1, + document: { + join: { + name: "question", + }, + body: "I have Windows 2003 server and i bought a new Windows 2008 server...", + title: + "Whats the best way to file transfer my site from server to a newer one?", + tags: ["windows-server-2003", "windows-server-2008", "file-transfer"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc b/docs/doc_examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc new file mode 100644 index 000000000..4d40dcc30 --- /dev/null +++ b/docs/doc_examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + autocomplete: { + tokenizer: "autocomplete", + filter: ["lowercase"], + }, + autocomplete_search: { + tokenizer: "lowercase", + }, + }, + tokenizer: { + autocomplete: { + type: "edge_ngram", + min_gram: 2, + max_gram: 10, + token_chars: ["letter"], + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + analyzer: "autocomplete", + search_analyzer: "autocomplete_search", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + title: "Quick Foxes", + }, +}); +console.log(response1); + +const response2 = await client.indices.refresh({ + index: "my-index-000001", +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + match: { + title: { + query: "Quick Fo", + operator: "and", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc b/docs/doc_examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc new file mode 100644 index 000000000..819634ad8 --- /dev/null +++ b/docs/doc_examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + dynamic_templates: [ + { + geo_point: { + mapping: { + type: "geo_point", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.bulk({ + operations: [ + { + index: { + _index: "my_index", + _id: "1", + dynamic_templates: { + work_location: "geo_point", + }, + }, + }, + { + field: "value1", + work_location: "41.12,-71.34", + raw_location: "41.12,-71.34", + }, + { + create: { + _index: "my_index", + _id: "2", + dynamic_templates: { + home_location: "geo_point", + }, + }, + }, + { + field: "value2", + home_location: "41.12,-71.34", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc b/docs/doc_examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc new file mode 100644 index 000000000..d4f5c37d2 --- /dev/null +++ b/docs/doc_examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'noop' }", + lang: "painless", + params: { + tag: "green", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc b/docs/doc_examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc new file mode 100644 index 000000000..52d6e5890 --- /dev/null +++ b/docs/doc_examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + terms: { + "user.id": ["kimchy", "elkbee"], + boost: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca06db2aa4747910278f96315f7be94b.asciidoc b/docs/doc_examples/ca06db2aa4747910278f96315f7be94b.asciidoc new file mode 100644 index 000000000..4b2d56fb3 --- /dev/null +++ b/docs/doc_examples/ca06db2aa4747910278f96315f7be94b.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top: 40.73, + left: -74.1, + bottom: 40.01, + right: -71.12, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca08e511e5907d258081b10a1a9f0072.asciidoc b/docs/doc_examples/ca08e511e5907d258081b10a1a9f0072.asciidoc new file mode 100644 index 000000000..f8cc1818e --- /dev/null +++ b/docs/doc_examples/ca08e511e5907d258081b10a1a9f0072.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "new-data-stream-template", + index_patterns: ["new-data-stream*"], + data_stream: {}, + priority: 500, + template: { + mappings: { + properties: { + "@timestamp": { + type: "date_nanos", + }, + }, + }, + settings: { + "sort.field": ["@timestamp"], + "sort.order": ["desc"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc b/docs/doc_examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc new file mode 100644 index 000000000..ce21a8a26 --- /dev/null +++ b/docs/doc_examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + refresh: "true", + operations: [ + { + index: { + _index: ".ds-my-data-stream-2099.03.08-000003", + _id: "bfspvnIBr7VVZlfp2lqX", + if_seq_no: 0, + if_primary_term: 1, + }, + }, + { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca3bcd6278510ebced5f74484033cb36.asciidoc b/docs/doc_examples/ca3bcd6278510ebced5f74484033cb36.asciidoc new file mode 100644 index 000000000..79282a804 --- /dev/null +++ b/docs/doc_examples/ca3bcd6278510ebced5f74484033cb36.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScriptLanguages(); +console.log(response); +---- diff --git a/docs/doc_examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc b/docs/doc_examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc new file mode 100644 index 000000000..6fa3241c9 --- /dev/null +++ b/docs/doc_examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey(); +console.log(response); +---- diff --git a/docs/doc_examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc b/docs/doc_examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc new file mode 100644 index 000000000..6b66ad338 --- /dev/null +++ b/docs/doc_examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca98afbd6a90f63e02f62239d225313b.asciidoc b/docs/doc_examples/ca98afbd6a90f63e02f62239d225313b.asciidoc new file mode 100644 index 000000000..9f401ce83 --- /dev/null +++ b/docs/doc_examples/ca98afbd6a90f63e02f62239d225313b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.importDanglingIndex({ + index_uuid: "zmM4e0JtBkeUjiHD-MihPQ", + accept_data_loss: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/caaafef1a76c2bec677704c2dc233218.asciidoc b/docs/doc_examples/caaafef1a76c2bec677704c2dc233218.asciidoc new file mode 100644 index 000000000..d19897921 --- /dev/null +++ b/docs/doc_examples/caaafef1a76c2bec677704c2dc233218.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.simulateIndexTemplate({ + name: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/caab99520d3fe41f6154d74a7f696057.asciidoc b/docs/doc_examples/caab99520d3fe41f6154d74a7f696057.asciidoc new file mode 100644 index 000000000..d4a98b60b --- /dev/null +++ b/docs/doc_examples/caab99520d3fe41f6154d74a7f696057.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cac74a85c6b352a6e23d8673abae126f.asciidoc b/docs/doc_examples/cac74a85c6b352a6e23d8673abae126f.asciidoc new file mode 100644 index 000000000..3741af149 --- /dev/null +++ b/docs/doc_examples/cac74a85c6b352a6e23d8673abae126f.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + my_agg: { + frequent_item_sets: { + minimum_set_size: 3, + fields: [ + { + field: "category.keyword", + }, + { + field: "geoip.city_name", + }, + ], + size: 3, + filter: { + term: { + "geoip.continent_name": "Europe", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc b/docs/doc_examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc new file mode 100644 index 000000000..366ef7e6a --- /dev/null +++ b/docs/doc_examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.split({ + index: "my-index-000001", + target: "split-my-index-000001", + settings: { + "index.number_of_shards": 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f097c298a7abf4c032c4314920c49c8.asciidoc b/docs/doc_examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc similarity index 50% rename from docs/doc_examples/6f097c298a7abf4c032c4314920c49c8.asciidoc rename to docs/doc_examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc index 670e86ca3..62d570c80 100644 --- a/docs/doc_examples/6f097c298a7abf4c032c4314920c49c8.asciidoc +++ b/docs/doc_examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc @@ -4,18 +4,17 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: [ - 'twitter', - 'blog' - ] + source: { + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, }, - dest: { - index: 'all_together' - } - } -}) -console.log(response) + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/cb16f1ff85399ddaa418834be580c9de.asciidoc b/docs/doc_examples/cb16f1ff85399ddaa418834be580c9de.asciidoc new file mode 100644 index 000000000..15951883c --- /dev/null +++ b/docs/doc_examples/cb16f1ff85399ddaa418834be580c9de.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "slm-admin", + cluster: ["manage_slm", "cluster:admin/snapshot/*"], + indices: [ + { + names: [".slm-history-*"], + privileges: ["all"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc b/docs/doc_examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc new file mode 100644 index 000000000..9d4169eb7 --- /dev/null +++ b/docs/doc_examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataStream({ + name: "*", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc b/docs/doc_examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc new file mode 100644 index 000000000..43c87dda3 --- /dev/null +++ b/docs/doc_examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001,my-index-000002", + request: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc b/docs/doc_examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc new file mode 100644 index 000000000..93090861b --- /dev/null +++ b/docs/doc_examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "timeseries_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50GB", + max_age: "30d", + }, + }, + }, + delete: { + min_age: "90d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc b/docs/doc_examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc new file mode 100644 index 000000000..4c0f31631 --- /dev/null +++ b/docs/doc_examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example", + id: 1, + refresh: "wait_for", + document: { + name: "Lucky Landing", + geometry: { + type: "point", + coordinates: [1355.400544, 5255.530286], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc b/docs/doc_examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc new file mode 100644 index 000000000..dfa1e1323 --- /dev/null +++ b/docs/doc_examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.master({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc b/docs/doc_examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc new file mode 100644 index 000000000..ca22d0ffd --- /dev/null +++ b/docs/doc_examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + avg_start: { + avg: { + field: "measures.start", + }, + }, + avg_end: { + avg: { + field: "measures.end", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cba3462a307e2483c14e3e198f6960e3.asciidoc b/docs/doc_examples/cba3462a307e2483c14e3e198f6960e3.asciidoc new file mode 100644 index 000000000..70b810179 --- /dev/null +++ b/docs/doc_examples/cba3462a307e2483c14e3e198f6960e3.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + _meta: { + description: "used for nginx log", + project: { + name: "myProject", + department: "myDepartment", + }, + }, + phases: { + warm: { + min_age: "10d", + actions: { + forcemerge: { + max_num_segments: 1, + }, + }, + }, + delete: { + min_age: "30d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc b/docs/doc_examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc new file mode 100644 index 000000000..f58997869 --- /dev/null +++ b/docs/doc_examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-timestamp-pipeline", + description: "Shifts the @timestamp to the last 15 minutes", + processors: [ + { + set: { + field: "ingest_time", + value: "{{_ingest.timestamp}}", + }, + }, + { + script: { + lang: "painless", + source: + '\n def delta = ChronoUnit.SECONDS.between(\n ZonedDateTime.parse("2022-06-21T15:49:00Z"),\n ZonedDateTime.parse(ctx["ingest_time"])\n );\n ctx["@timestamp"] = ZonedDateTime.parse(ctx["@timestamp"]).plus(delta,ChronoUnit.SECONDS).toString();\n ', + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc b/docs/doc_examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc new file mode 100644 index 000000000..310baf041 --- /dev/null +++ b/docs/doc_examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.templates({ + v: "true", + s: "order:desc,index_patterns", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc0cca5556ec6224c7134c233734beed.asciidoc b/docs/doc_examples/cc0cca5556ec6224c7134c233734beed.asciidoc new file mode 100644 index 000000000..0985386a9 --- /dev/null +++ b/docs/doc_examples/cc0cca5556ec6224c7134c233734beed.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.remoteInfo(); +console.log(response); +---- diff --git a/docs/doc_examples/cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc b/docs/doc_examples/cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc new file mode 100644 index 000000000..4f6f802d9 --- /dev/null +++ b/docs/doc_examples/cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + }, + }, + }, + aggs: { + day_of_week: { + terms: { + field: "day_of_week", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc56be758d5d75febbd975786187c861.asciidoc b/docs/doc_examples/cc56be758d5d75febbd975786187c861.asciidoc new file mode 100644 index 000000000..b453c2cd4 --- /dev/null +++ b/docs/doc_examples/cc56be758d5d75febbd975786187c861.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createServiceToken({ + namespace: "elastic", + service: "fleet-server", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc b/docs/doc_examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc new file mode 100644 index 000000000..b9bbc5172 --- /dev/null +++ b/docs/doc_examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mv", + mappings: { + properties: { + b: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: ["foo", "foo", "bar"], + }, + { + index: {}, + }, + { + a: 2, + b: ["bar", "bar"], + }, + ], +}); +console.log(response1); + +const response2 = await client.esql.query({ + query: "FROM mv | LIMIT 2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc b/docs/doc_examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc new file mode 100644 index 000000000..d193cb365 --- /dev/null +++ b/docs/doc_examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + match: { + "http.response": "304", + }, + }, + fields: ["http.response"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc b/docs/doc_examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc new file mode 100644 index 000000000..c5bd1a7ef --- /dev/null +++ b/docs/doc_examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteTrainedModel({ + model_id: "regression-job-one-1574775307356", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc b/docs/doc_examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc new file mode 100644 index 000000000..b90fcd491 --- /dev/null +++ b/docs/doc_examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot_2", + wait_for_completion: "true", + indices: "index_1,index_2", + ignore_unavailable: true, + include_global_state: false, + rename_pattern: "index_(.+)", + rename_replacement: "restored_index_$1", + include_aliases: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc b/docs/doc_examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc new file mode 100644 index 000000000..a606b3cd3 --- /dev/null +++ b/docs/doc_examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteJob({ + job_id: "total-requests", + wait_for_completion: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc b/docs/doc_examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc new file mode 100644 index 000000000..1bd50f8c4 --- /dev/null +++ b/docs/doc_examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "standard_example", + settings: { + analysis: { + analyzer: { + rebuilt_standard: { + tokenizer: "standard", + filter: ["lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd16538654e0f834ff19fe6cf329c398.asciidoc b/docs/doc_examples/cd16538654e0f834ff19fe6cf329c398.asciidoc new file mode 100644 index 000000000..2b2407cee --- /dev/null +++ b/docs/doc_examples/cd16538654e0f834ff19fe6cf329c398.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hugging-face-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 768, + element_type: "float", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd247f267968aa0927bfdad56852f8f5.asciidoc b/docs/doc_examples/cd247f267968aa0927bfdad56852f8f5.asciidoc deleted file mode 100644 index a9e7678d6..000000000 --- a/docs/doc_examples/cd247f267968aa0927bfdad56852f8f5.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - match: { - address: 'mill lane' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cd373a6eb1ef4748616500b26fab3006.asciidoc b/docs/doc_examples/cd373a6eb1ef4748616500b26fab3006.asciidoc new file mode 100644 index 000000000..68187df30 --- /dev/null +++ b/docs/doc_examples/cd373a6eb1ef4748616500b26fab3006.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "sales*", + size: 0, + sort: [ + { + date: { + order: "asc", + }, + }, + ], + aggs: { + sale_date: { + date_histogram: { + field: "date", + calendar_interval: "1d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc b/docs/doc_examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc new file mode 100644 index 000000000..26a5401c2 --- /dev/null +++ b/docs/doc_examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_with_2_shards", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "template_with_3_shards", + template: { + settings: { + "index.number_of_shards": 3, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["t*"], + composed_of: ["template_with_2_shards", "template_with_3_shards"], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc b/docs/doc_examples/cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc deleted file mode 100644 index 7646900a9..000000000 --- a/docs/doc_examples/cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - actors: { - terms: { - field: 'actors', - size: 10, - collect_mode: 'breadth_first' - }, - aggs: { - costars: { - terms: { - field: 'actors', - size: 5 - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc b/docs/doc_examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc new file mode 100644 index 000000000..14d8971a1 --- /dev/null +++ b/docs/doc_examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-data-stream", + data_retention: "7d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc b/docs/doc_examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc new file mode 100644 index 000000000..d6447e49e --- /dev/null +++ b/docs/doc_examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "-hits.events._source", + runtime_mappings: { + day_of_week: { + type: "keyword", + script: "emit(doc['@timestamp'].value.dayOfWeekEnum.toString())", + }, + }, + query: '\n process where process.name == "regsvr32.exe"\n ', + fields: ["@timestamp", "day_of_week"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc b/docs/doc_examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc new file mode 100644 index 000000000..0cd77f88f --- /dev/null +++ b/docs/doc_examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_not: { + include: { + span_term: { + field1: "hoya", + }, + }, + exclude: { + span_near: { + clauses: [ + { + span_term: { + field1: "la", + }, + }, + { + span_term: { + field1: "hoya", + }, + }, + ], + slop: 0, + in_order: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd7da0c3769682f546cc1888e569382e.asciidoc b/docs/doc_examples/cd7da0c3769682f546cc1888e569382e.asciidoc new file mode 100644 index 000000000..7505626c3 --- /dev/null +++ b/docs/doc_examples/cd7da0c3769682f546cc1888e569382e.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + message: "number 1", + }, + }, + highlight: { + fields: { + message: { + type: "plain", + fragment_size: 15, + number_of_fragments: 3, + fragmenter: "span", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc b/docs/doc_examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc new file mode 100644 index 000000000..329ea0d48 --- /dev/null +++ b/docs/doc_examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getPrivileges({ + application: "myapp", + name: "read", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd93919e13f656ad2e6629f45c579b93.asciidoc b/docs/doc_examples/cd93919e13f656ad2e6629f45c579b93.asciidoc new file mode 100644 index 000000000..85109f792 --- /dev/null +++ b/docs/doc_examples/cd93919e13f656ad2e6629f45c579b93.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + index: "test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc b/docs/doc_examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc new file mode 100644 index 000000000..393db177b --- /dev/null +++ b/docs/doc_examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + pretty: "true", + query: { + match: { + full_text: "Quick Brown Foxes!", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc b/docs/doc_examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc new file mode 100644 index 000000000..75ecf1530 --- /dev/null +++ b/docs/doc_examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-new-index-000001", + mappings: { + properties: { + user_id: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc b/docs/doc_examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc new file mode 100644 index 000000000..db4342fe6 --- /dev/null +++ b/docs/doc_examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_keywords: { + match_mapping_type: "string", + mapping: { + type: "text", + norms: false, + fields: { + keyword: { + type: "keyword", + ignore_above: 256, + }, + }, + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdc38c98320a0df705ec8d173c725375.asciidoc b/docs/doc_examples/cdc38c98320a0df705ec8d173c725375.asciidoc new file mode 100644 index 000000000..b17259566 --- /dev/null +++ b/docs/doc_examples/cdc38c98320a0df705ec8d173c725375.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + size: 0, + aggs: { + grouped: { + geohex_grid: { + field: "location", + precision: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc b/docs/doc_examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc new file mode 100644 index 000000000..1eabbfa9a --- /dev/null +++ b/docs/doc_examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdd29b01e730b3996de68a2788050021.asciidoc b/docs/doc_examples/cdd29b01e730b3996de68a2788050021.asciidoc new file mode 100644 index 000000000..f1425e5c3 --- /dev/null +++ b/docs/doc_examples/cdd29b01e730b3996de68a2788050021.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.deletePolicy({ + name: "my-policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc b/docs/doc_examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc new file mode 100644 index 000000000..8f3de891a --- /dev/null +++ b/docs/doc_examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, + }, + max_docs: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cde19d110a58317610033ea3dcb0eb80.asciidoc b/docs/doc_examples/cde19d110a58317610033ea3dcb0eb80.asciidoc new file mode 100644 index 000000000..9d5aacd3a --- /dev/null +++ b/docs/doc_examples/cde19d110a58317610033ea3dcb0eb80.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '\n {\n "query": {\n "match": {\n {{#query_message}}\n {{#query_string}}\n "message": "Hello {{#first_name_section}}{{first_name}}{{/first_name_section}} {{#last_name_section}}{{last_name}}{{/last_name_section}}"\n {{/query_string}}\n {{/query_message}}\n }\n }\n }\n ', + params: { + query_message: { + query_string: { + first_name_section: { + first_name: "John", + }, + last_name_section: { + last_name: "kimchy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cde4104a29dfe942d55863cdd8718627.asciidoc b/docs/doc_examples/cde4104a29dfe942d55863cdd8718627.asciidoc new file mode 100644 index 000000000..baab28e94 --- /dev/null +++ b/docs/doc_examples/cde4104a29dfe942d55863cdd8718627.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/cdedd5f33f7e5f7acde561e97bff61de.asciidoc b/docs/doc_examples/cdedd5f33f7e5f7acde561e97bff61de.asciidoc deleted file mode 100644 index 29285ab2b..000000000 --- a/docs/doc_examples/cdedd5f33f7e5f7acde561e97bff61de.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - pretty: true, - body: { - query: { - term: { - full_text: 'Quick Brown Foxes!' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc b/docs/doc_examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc new file mode 100644 index 000000000..044a77056 --- /dev/null +++ b/docs/doc_examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: "ctx._source.tags.add(params.tag)", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc b/docs/doc_examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc new file mode 100644 index 000000000..235c96478 --- /dev/null +++ b/docs/doc_examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + settings: { + "index.number_of_replicas": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc b/docs/doc_examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc new file mode 100644 index 000000000..7e7984e2e --- /dev/null +++ b/docs/doc_examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getStats(); +console.log(response); +---- diff --git a/docs/doc_examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc b/docs/doc_examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc new file mode 100644 index 000000000..11032c855 --- /dev/null +++ b/docs/doc_examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc b/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc new file mode 100644 index 000000000..f33bbe266 --- /dev/null +++ b/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector", + body: { + index_name: "search-google-drive", + name: "My Connector", + description: "My Connector to sync data to Elastic index from Google Drive", + service_type: "google_drive", + language: "english", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce247fc08371e1b30cb52195e521c076.asciidoc b/docs/doc_examples/ce247fc08371e1b30cb52195e521c076.asciidoc new file mode 100644 index 000000000..da117c77f --- /dev/null +++ b/docs/doc_examples/ce247fc08371e1b30cb52195e521c076.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: [-74.1, 40.73], + bottom_right: [-71.12, 40.01], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc b/docs/doc_examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc new file mode 100644 index 000000000..b59f93b68 --- /dev/null +++ b/docs/doc_examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + text_embedding: { + type: "dense_vector", + dims: 384, + index_options: { + type: "int4_hnsw", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc b/docs/doc_examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc new file mode 100644 index 000000000..b9f5d4012 --- /dev/null +++ b/docs/doc_examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "loganalytics", + description: "Outlier detection on log data", + source: { + index: "logdata", + }, + dest: { + index: "logdata_out", + }, + analysis: { + outlier_detection: { + compute_feature_influence: true, + outlier_fraction: 0.05, + standardization_enabled: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce725697f93b3eebb3a266314568565a.asciidoc b/docs/doc_examples/ce725697f93b3eebb3a266314568565a.asciidoc new file mode 100644 index 000000000..c47955a54 --- /dev/null +++ b/docs/doc_examples/ce725697f93b3eebb3a266314568565a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "fingerprint_example", + settings: { + analysis: { + analyzer: { + rebuilt_fingerprint: { + tokenizer: "standard", + filter: ["lowercase", "asciifolding", "fingerprint"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc b/docs/doc_examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc new file mode 100644 index 000000000..d5a91c659 --- /dev/null +++ b/docs/doc_examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.queryWatches(); +console.log(response); +---- diff --git a/docs/doc_examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc b/docs/doc_examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc new file mode 100644 index 000000000..9a217bfec --- /dev/null +++ b/docs/doc_examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "logs-foo_component2", + template: { + mappings: { + properties: { + "host.ip": { + type: "ip", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce8eebfb810335803630abe83278bee7.asciidoc b/docs/doc_examples/ce8eebfb810335803630abe83278bee7.asciidoc new file mode 100644 index 000000000..2ae4c207b --- /dev/null +++ b/docs/doc_examples/ce8eebfb810335803630abe83278bee7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + active_only: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc b/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc new file mode 100644 index 000000000..1b8303420 --- /dev/null +++ b/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/sparse_embedding/my-elser-model", + body: { + service: "elser", + service_settings: { + num_allocations: 1, + num_threads: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc b/docs/doc_examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc new file mode 100644 index 000000000..6a20452e4 --- /dev/null +++ b/docs/doc_examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: + "The Amazon rainforest covers most of the Amazon basin in South America", + }, + ], + inference_config: { + ner: { + tokenization: { + bert: { + truncate: "first", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc b/docs/doc_examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc new file mode 100644 index 000000000..0a7361e50 --- /dev/null +++ b/docs/doc_examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getToken({ + grant_type: "client_credentials", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc b/docs/doc_examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc new file mode 100644 index 000000000..35fad32dd --- /dev/null +++ b/docs/doc_examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.total_shards_per_node": 5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6d1e75312a28a5ba23837abf768f2510.asciidoc b/docs/doc_examples/cf47cd4a39cd62a3ecad919e54a67bca.asciidoc similarity index 66% rename from docs/doc_examples/6d1e75312a28a5ba23837abf768f2510.asciidoc rename to docs/doc_examples/cf47cd4a39cd62a3ecad919e54a67bca.asciidoc index e6f30ade4..455ef41c7 100644 --- a/docs/doc_examples/6d1e75312a28a5ba23837abf768f2510.asciidoc +++ b/docs/doc_examples/cf47cd4a39cd62a3ecad919e54a67bca.asciidoc @@ -4,10 +4,11 @@ [source, js] ---- const response = await client.search({ - index: 'twitter', - size: 'surprise_me', - error_trace: 'true' -}) -console.log(response) + query: { + term: { + _ignored: "@timestamp", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc b/docs/doc_examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc new file mode 100644 index 000000000..51fa92abb --- /dev/null +++ b/docs/doc_examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + size: 0, + query: { + match: { + name: "led tv", + }, + }, + aggs: { + resellers: { + nested: { + path: "resellers", + }, + aggs: { + filter_reseller: { + filter: { + bool: { + filter: [ + { + term: { + "resellers.reseller": "companyB", + }, + }, + ], + }, + }, + aggs: { + min_price: { + min: { + field: "resellers.price", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc b/docs/doc_examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc new file mode 100644 index 000000000..379cc61c9 --- /dev/null +++ b/docs/doc_examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + keep_on_completion: true, + wait_for_completion_timeout: "2s", + query: '\n process where process.name == "cmd.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc b/docs/doc_examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc new file mode 100644 index 000000000..ec92536ff --- /dev/null +++ b/docs/doc_examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_internal/desired_nodes/_latest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc b/docs/doc_examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc new file mode 100644 index 000000000..24bc313b4 --- /dev/null +++ b/docs/doc_examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-replication", + cluster: ["manage_ccr"], + indices: [ + { + names: ["follower-index-name"], + privileges: ["monitor", "read", "write", "manage_follow_index"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc b/docs/doc_examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc new file mode 100644 index 000000000..24848abdd --- /dev/null +++ b/docs/doc_examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc b/docs/doc_examples/cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc deleted file mode 100644 index e21d4bca8..000000000 --- a/docs/doc_examples/cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - size: 0, - aggs: { - group_by_state: { - terms: { - field: 'state.keyword' - }, - aggs: { - average_balance: { - avg: { - field: 'balance' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cfd4b34f35e531a20739a3b308d57134.asciidoc b/docs/doc_examples/cfd4b34f35e531a20739a3b308d57134.asciidoc new file mode 100644 index 000000000..b7d51f35c --- /dev/null +++ b/docs/doc_examples/cfd4b34f35e531a20739a3b308d57134.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_docs: 100000000, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cffce059425d3d21e7f9571500d63524.asciidoc b/docs/doc_examples/cffce059425d3d21e7f9571500d63524.asciidoc new file mode 100644 index 000000000..341274e75 --- /dev/null +++ b/docs/doc_examples/cffce059425d3d21e7f9571500d63524.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteRole({ + name: "my_admin_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc b/docs/doc_examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc new file mode 100644 index 000000000..7af2430a5 --- /dev/null +++ b/docs/doc_examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "elser-v2-test", + processors: [ + { + inference: { + model_id: ".elser_model_2", + input_output: [ + { + input_field: "content", + output_field: "content_embedding", + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d003f9110e5a474230abe11f36da9297.asciidoc b/docs/doc_examples/d003f9110e5a474230abe11f36da9297.asciidoc new file mode 100644 index 000000000..3684f13dc --- /dev/null +++ b/docs/doc_examples/d003f9110e5a474230abe11f36da9297.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "Hide my IP", + processors: [ + { + redact: { + field: "message", + patterns: ["%{IP:client}"], + }, + }, + ], + }, + docs: [ + { + _source: { + message: "55.3.244.1 GET /index.html 15824 0.043", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc b/docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc new file mode 100644 index 000000000..ec1e35374 --- /dev/null +++ b/docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + flat_settings: "true", + filter_path: "transient", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc b/docs/doc_examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc new file mode 100644 index 000000000..597ee3209 --- /dev/null +++ b/docs/doc_examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "ct1", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "ct2", + template: { + settings: { + "index.number_of_replicas": 0, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.putIndexTemplate({ + name: "final-template", + index_patterns: ["my-index-*"], + composed_of: ["ct1", "ct2"], + priority: 5, +}); +console.log(response2); + +const response3 = await client.indices.simulateIndexTemplate({ + name: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc b/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc new file mode 100644 index 000000000..6c02dd8e5 --- /dev/null +++ b/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_check_in", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc b/docs/doc_examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc new file mode 100644 index 000000000..1b2d7df64 --- /dev/null +++ b/docs/doc_examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bulgarian_example", + settings: { + analysis: { + filter: { + bulgarian_stop: { + type: "stop", + stopwords: "_bulgarian_", + }, + bulgarian_keywords: { + type: "keyword_marker", + keywords: ["пример"], + }, + bulgarian_stemmer: { + type: "stemmer", + language: "bulgarian", + }, + }, + analyzer: { + rebuilt_bulgarian: { + tokenizer: "standard", + filter: [ + "lowercase", + "bulgarian_stop", + "bulgarian_keywords", + "bulgarian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc b/docs/doc_examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc new file mode 100644 index 000000000..fac12de59 --- /dev/null +++ b/docs/doc_examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + default_field: { + type: "text", + }, + boolean_sim_field: { + type: "text", + similarity: "boolean", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc b/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc index e5ce437b2..23de5f5d8 100644 --- a/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc +++ b/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc @@ -1,43 +1,66 @@ -[source,js] +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] ---- const response = await client.bulk({ operations: [ - { index: { _index: 'books' } }, { - name: 'Revelation Space', - author: 'Alastair Reynolds', - release_date: '2000-03-15', + index: { + _index: "books", + }, + }, + { + name: "Revelation Space", + author: "Alastair Reynolds", + release_date: "2000-03-15", page_count: 585, }, - { index: { _index: 'books' } }, { - name: '1984', - author: 'George Orwell', - release_date: '1985-06-01', + index: { + _index: "books", + }, + }, + { + name: "1984", + author: "George Orwell", + release_date: "1985-06-01", page_count: 328, }, - { index: { _index: 'books' } }, { - name: 'Fahrenheit 451', - author: 'Ray Bradbury', - release_date: '1953-10-15', + index: { + _index: "books", + }, + }, + { + name: "Fahrenheit 451", + author: "Ray Bradbury", + release_date: "1953-10-15", page_count: 227, }, - { index: { _index: 'books' } }, { - name: 'Brave New World', - author: 'Aldous Huxley', - release_date: '1932-06-01', + index: { + _index: "books", + }, + }, + { + name: "Brave New World", + author: "Aldous Huxley", + release_date: "1932-06-01", page_count: 268, }, - { index: { _index: 'books' } }, { - name: 'The Handmaids Tale', - author: 'Margaret Atwood', - release_date: '1985-06-01', + index: { + _index: "books", + }, + }, + { + name: "The Handmaids Tale", + author: "Margaret Atwood", + release_date: "1985-06-01", page_count: 311, - } - ] -}) -console.log(response) + }, + ], +}); +console.log(response); ---- diff --git a/docs/doc_examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc b/docs/doc_examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc new file mode 100644 index 000000000..cd4a3d736 --- /dev/null +++ b/docs/doc_examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + dot_expander: { + description: "Expand 'my-object-field.my-property'", + field: "my-object-field.my-property", + }, + }, + { + set: { + description: "Set 'my-object-field.my-property' to 10", + field: "my-object-field.my-property", + value: 10, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0546f047359b85a7e98207dc8de896a.asciidoc b/docs/doc_examples/d0546f047359b85a7e98207dc8de896a.asciidoc new file mode 100644 index 000000000..3375c612b --- /dev/null +++ b/docs/doc_examples/d0546f047359b85a7e98207dc8de896a.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.mapping.coerce": false, + }, + mappings: { + properties: { + number_one: { + type: "integer", + coerce: true, + }, + number_two: { + type: "integer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + number_one: "10", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + number_two: "10", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc b/docs/doc_examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc new file mode 100644 index 000000000..e865f4e45 --- /dev/null +++ b/docs/doc_examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + min_latency: { + min: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc b/docs/doc_examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc new file mode 100644 index 000000000..380d5aa17 --- /dev/null +++ b/docs/doc_examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc @@ -0,0 +1,68 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:22-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:28-05:00", + message: "not a valid apache log", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d095b422d9803c02b62c01adffc85376.asciidoc b/docs/doc_examples/d095b422d9803c02b62c01adffc85376.asciidoc new file mode 100644 index 000000000..5177e0ab0 --- /dev/null +++ b/docs/doc_examples/d095b422d9803c02b62c01adffc85376.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getJobs({ + id: "sensor", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0a8a938a2fa913b6fdbc871079a59dd.asciidoc b/docs/doc_examples/d0a8a938a2fa913b6fdbc871079a59dd.asciidoc deleted file mode 100644 index 62a4d9455..000000000 --- a/docs/doc_examples/d0a8a938a2fa913b6fdbc871079a59dd.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - term: { - user: { - value: 'Kimchy', - boost: 1 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d0dee031197214b59ff9ac7540527d2c.asciidoc b/docs/doc_examples/d0dee031197214b59ff9ac7540527d2c.asciidoc new file mode 100644 index 000000000..de8e1dc6e --- /dev/null +++ b/docs/doc_examples/d0dee031197214b59ff9ac7540527d2c.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movfn: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc b/docs/doc_examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc new file mode 100644 index 000000000..309e1a7ba --- /dev/null +++ b/docs/doc_examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc @@ -0,0 +1,103 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (491.2350 5237.4081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (490.1618 5236.9219)", + city: "Amsterdam", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (491.4722 5237.1667)", + city: "Amsterdam", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (440.5200 5122.2900)", + city: "Antwerp", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (233.6389 4886.1111)", + city: "Paris", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (232.7000 4886.0000)", + city: "Paris", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + query: { + match: { + name: "musée", + }, + }, + aggs: { + viewport: { + cartesian_bounds: { + field: "location", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc b/docs/doc_examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc new file mode 100644 index 000000000..1ad80af1d --- /dev/null +++ b/docs/doc_examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "foo | bar + baz*", + flags: "OR|AND|PREFIX", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc b/docs/doc_examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc new file mode 100644 index 000000000..8c3fb1686 --- /dev/null +++ b/docs/doc_examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc b/docs/doc_examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc new file mode 100644 index 000000000..cb85bad2c --- /dev/null +++ b/docs/doc_examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + daily: { + date_histogram: { + field: "time", + fixed_interval: "1d", + }, + aggs: { + categories: { + categorize_text: { + field: "message", + categorization_filters: ["\\w+\\_\\d{3}"], + }, + aggs: { + hit: { + top_hits: { + size: 1, + sort: ["time"], + _source: "message", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc b/docs/doc_examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc new file mode 100644 index 000000000..bf51e32ce --- /dev/null +++ b/docs/doc_examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "uax_url_email", + text: "Email me at john.smith@global-international.com", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc b/docs/doc_examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc new file mode 100644 index 000000000..a42b122e1 --- /dev/null +++ b/docs/doc_examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + docs: [ + { + _index: "my-index-000001", + doc: { + message: "test test test", + }, + }, + { + _index: "my-index-000001", + doc: { + message: "Another test ...", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc b/docs/doc_examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc new file mode 100644 index 000000000..85d920867 --- /dev/null +++ b/docs/doc_examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.adjusted": { + type: "double", + script: { + source: "emit(doc['price'].value * params.adjustment)", + params: { + adjustment: 0.9, + }, + }, + }, + }, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + avg_price: { + rate: { + field: "price.adjusted", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc b/docs/doc_examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc new file mode 100644 index 000000000..9eafbf38d --- /dev/null +++ b/docs/doc_examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: "my-index-000001", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d17269bb80fb63ec0bf37d219e003dcb.asciidoc b/docs/doc_examples/d17269bb80fb63ec0bf37d219e003dcb.asciidoc deleted file mode 100644 index b4973123c..000000000 --- a/docs/doc_examples/d17269bb80fb63ec0bf37d219e003dcb.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': [ - -70, - 40 - ], - order: 'asc', - unit: 'km', - mode: 'min', - distance_type: 'arc', - ignore_unmapped: true - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc b/docs/doc_examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc new file mode 100644 index 000000000..e21b336f5 --- /dev/null +++ b/docs/doc_examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "persian_example", + settings: { + analysis: { + char_filter: { + zero_width_spaces: { + type: "mapping", + mappings: ["\\u200C=>\\u0020"], + }, + }, + filter: { + persian_stop: { + type: "stop", + stopwords: "_persian_", + }, + }, + analyzer: { + rebuilt_persian: { + tokenizer: "standard", + char_filter: ["zero_width_spaces"], + filter: [ + "lowercase", + "decimal_digit", + "arabic_normalization", + "persian_normalization", + "persian_stop", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc b/docs/doc_examples/d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc deleted file mode 100644 index c08921115..000000000 --- a/docs/doc_examples/d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - post_date: { - type: 'date' - }, - user: { - type: 'keyword' - }, - name: { - type: 'keyword' - }, - age: { - type: 'integer' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc b/docs/doc_examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc new file mode 100644 index 000000000..8564b2dd5 --- /dev/null +++ b/docs/doc_examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.estimateModelMemory({ + analysis_config: { + bucket_span: "5m", + detectors: [ + { + function: "sum", + field_name: "bytes", + by_field_name: "status", + partition_field_name: "app", + }, + ], + influencers: ["source_ip", "dest_ip"], + }, + overall_cardinality: { + status: 10, + app: 50, + }, + max_bucket_cardinality: { + source_ip: 300, + dest_ip: 30, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc b/docs/doc_examples/d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc deleted file mode 100644 index 279780d17..000000000 --- a/docs/doc_examples/d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - pretty: true, - body: { - query: { - terms: { - color: { - index: 'my_index', - id: '2', - path: 'color' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc b/docs/doc_examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc new file mode 100644 index 000000000..ea46327a9 --- /dev/null +++ b/docs/doc_examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "hits.events._source.@timestamp,hits.events._source.process.pid", + query: '\n process where process.name == "regsvr32.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc b/docs/doc_examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc new file mode 100644 index 000000000..93df61ff0 --- /dev/null +++ b/docs/doc_examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "ml.inference.title_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "ml.inference.description_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + }, + ], + window_size: 10, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc b/docs/doc_examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc new file mode 100644 index 000000000..e19f93f40 --- /dev/null +++ b/docs/doc_examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + name: "my-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc b/docs/doc_examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc new file mode 100644 index 000000000..1d5b2b25e --- /dev/null +++ b/docs/doc_examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + question: "answer", + }, + eager_global_ordinals: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc b/docs/doc_examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc new file mode 100644 index 000000000..623cf32f0 --- /dev/null +++ b/docs/doc_examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + q: "extra:test", + filter_path: "hits.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d23452f333b77bf5b463310e2a665560.asciidoc b/docs/doc_examples/d23452f333b77bf5b463310e2a665560.asciidoc new file mode 100644 index 000000000..711ada3a1 --- /dev/null +++ b/docs/doc_examples/d23452f333b77bf5b463310e2a665560.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_director", + refresh: "true", + cluster: ["manage"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["manage"], + }, + ], + run_as: ["jacknich", "rdeniro"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc b/docs/doc_examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc new file mode 100644 index 000000000..9a5f60d47 --- /dev/null +++ b/docs/doc_examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "russian_example", + settings: { + analysis: { + filter: { + russian_stop: { + type: "stop", + stopwords: "_russian_", + }, + russian_keywords: { + type: "keyword_marker", + keywords: ["пример"], + }, + russian_stemmer: { + type: "stemmer", + language: "russian", + }, + }, + analyzer: { + rebuilt_russian: { + tokenizer: "standard", + filter: [ + "lowercase", + "russian_stop", + "russian_keywords", + "russian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d268aec16bb1eb909b634e856175094c.asciidoc b/docs/doc_examples/d268aec16bb1eb909b634e856175094c.asciidoc new file mode 100644 index 000000000..1e4498566 --- /dev/null +++ b/docs/doc_examples/d268aec16bb1eb909b634e856175094c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_stop_analyzer: { + type: "stop", + stopwords: ["the", "over"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_stop_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc b/docs/doc_examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc new file mode 100644 index 000000000..8ebe00905 --- /dev/null +++ b/docs/doc_examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + date: "2015-01-01", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + date: "2015-01-01T12:10:30Z", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-000001", + id: 3, + document: { + date: 1420070400001, + }, +}); +console.log(response3); + +const response4 = await client.search({ + index: "my-index-000001", + sort: { + date: "asc", + }, +}); +console.log(response4); +---- diff --git a/docs/doc_examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc b/docs/doc_examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc new file mode 100644 index 000000000..c9196570f --- /dev/null +++ b/docs/doc_examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_queries1", + query: { + percolate: { + field: "query", + document: { + my_field: "abcd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc b/docs/doc_examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc new file mode 100644 index 000000000..93e7ac887 --- /dev/null +++ b/docs/doc_examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "text_payloads", + id: 1, + document: { + text: "the|0 brown|3 fox|4 is|0 quick|10", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d305110a8cabfbebd1e38d85559d1023.asciidoc b/docs/doc_examples/d305110a8cabfbebd1e38d85559d1023.asciidoc new file mode 100644 index 000000000..42e37aee7 --- /dev/null +++ b/docs/doc_examples/d305110a8cabfbebd1e38d85559d1023.asciidoc @@ -0,0 +1,62 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_example", + settings: { + analysis: { + filter: { + english_stop: { + type: "stop", + stopwords: [ + "a", + "and", + "are", + "as", + "at", + "be", + "but", + "by", + "for", + "if", + "in", + "into", + "is", + "it", + "no", + "not", + "of", + "on", + "or", + "s", + "such", + "t", + "that", + "the", + "their", + "then", + "there", + "these", + "they", + "this", + "to", + "was", + "will", + "with", + "www", + ], + }, + }, + analyzer: { + rebuilt_cjk: { + tokenizer: "standard", + filter: ["cjk_width", "lowercase", "cjk_bigram", "english_stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc b/docs/doc_examples/d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc deleted file mode 100644 index aed022fbe..000000000 --- a/docs/doc_examples/d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'my_index', - id: '1', - body: { - color: [ - 'blue', - 'green' - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d31062ff8c015387889fed4ad86fd914.asciidoc b/docs/doc_examples/d31062ff8c015387889fed4ad86fd914.asciidoc deleted file mode 100644 index a4c4076b7..000000000 --- a/docs/doc_examples/d31062ff8c015387889fed4ad86fd914.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - wildcard: { - user: { - value: 'ki*y', - boost: 1, - rewrite: 'constant_score' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc b/docs/doc_examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc new file mode 100644 index 000000000..0fc058e58 --- /dev/null +++ b/docs/doc_examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d34946f59b6f938b141a37cb0b729308.asciidoc b/docs/doc_examples/d34946f59b6f938b141a37cb0b729308.asciidoc new file mode 100644 index 000000000..522b79829 --- /dev/null +++ b/docs/doc_examples/d34946f59b6f938b141a37cb0b729308.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "postal_policy", + geo_match: { + indices: "postal_codes", + match_field: "location", + enrich_fields: ["location", "postal_code"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc b/docs/doc_examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc new file mode 100644 index 000000000..80591c90a --- /dev/null +++ b/docs/doc_examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre", + }, + { + field: "product", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc new file mode 100644 index 000000000..39d1ae3a1 --- /dev/null +++ b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcPrepareAuthentication({}); +console.log(response); +---- diff --git a/docs/doc_examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc b/docs/doc_examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc new file mode 100644 index 000000000..4e26625de --- /dev/null +++ b/docs/doc_examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.getStatus({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc b/docs/doc_examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc new file mode 100644 index 000000000..6967389d1 --- /dev/null +++ b/docs/doc_examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "return values.length > 0 ? values[0] : Double.NaN", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc b/docs/doc_examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc new file mode 100644 index 000000000..4c5e7872e --- /dev/null +++ b/docs/doc_examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geoip", + description: "Add geoip info", + processors: [ + { + geoip: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "geoip", + document: { + ip: "80.231.5.0", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc b/docs/doc_examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc new file mode 100644 index 000000000..3a490c68d --- /dev/null +++ b/docs/doc_examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "http.responses": { + type: "long", + script: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n ', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3d117fec34301520ccdb26332e7c98a.asciidoc b/docs/doc_examples/d3d117fec34301520ccdb26332e7c98a.asciidoc new file mode 100644 index 000000000..96279479b --- /dev/null +++ b/docs/doc_examples/d3d117fec34301520ccdb26332e7c98a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + registered_domain: { + field: "fqdn", + target_field: "url", + }, + }, + ], + }, + docs: [ + { + _source: { + fqdn: "www.example.ac.uk", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc b/docs/doc_examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc new file mode 100644 index 000000000..91c379871 --- /dev/null +++ b/docs/doc_examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["content", "name.*^5"], + query: "this AND that OR thus", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc b/docs/doc_examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc new file mode 100644 index 000000000..716555e1c --- /dev/null +++ b/docs/doc_examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "clicks_admin", + run_as: ["clicks_watcher_1"], + cluster: ["monitor"], + indices: [ + { + names: ["events-*"], + privileges: ["read"], + field_security: { + grant: ["category", "@timestamp", "message"], + }, + query: '{"match": {"category": "click"}}', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc b/docs/doc_examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc new file mode 100644 index 000000000..7d704155a --- /dev/null +++ b/docs/doc_examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["my_script_filter"], + }, + }, + filter: { + my_script_filter: { + type: "predicate_token_filter", + script: { + source: + '\n token.type.contains("ALPHANUM")\n ', + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4323be84152fa91abd76e966d4751dc.asciidoc b/docs/doc_examples/d4323be84152fa91abd76e966d4751dc.asciidoc new file mode 100644 index 000000000..de913f758 --- /dev/null +++ b/docs/doc_examples/d4323be84152fa91abd76e966d4751dc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + query: { + term: { + name: { + value: "application-key-1", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc b/docs/doc_examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc new file mode 100644 index 000000000..138641329 --- /dev/null +++ b/docs/doc_examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "delimited_payload", + settings: { + analysis: { + analyzer: { + whitespace_delimited_payload: { + tokenizer: "whitespace", + filter: ["delimited_payload"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc b/docs/doc_examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc new file mode 100644 index 000000000..c90396ab3 --- /dev/null +++ b/docs/doc_examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + simple_query_string: { + query: "+elasticsearch +pozmantier", + }, + }, + _source: ["title", "source"], + highlight: { + fields: { + content: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc b/docs/doc_examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc new file mode 100644 index 000000000..63458b1c0 --- /dev/null +++ b/docs/doc_examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlCompleteLogout({ + realm: "saml1", + ids: ["_1c368075e0b3..."], + content: "PHNhbWxwOkxvZ291dFJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46...", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc b/docs/doc_examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc new file mode 100644 index 000000000..a1e4b7603 --- /dev/null +++ b/docs/doc_examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getToken({ + grant_type: "refresh_token", + refresh_token: "vLBPvmAB6KvwvJZr27cS", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc b/docs/doc_examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc new file mode 100644 index 000000000..3c6a63893 --- /dev/null +++ b/docs/doc_examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + index_options: { + type: "hnsw", + m: 32, + ef_construction: 100, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc b/docs/doc_examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc new file mode 100644 index 000000000..beea387be --- /dev/null +++ b/docs/doc_examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "7d", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc b/docs/doc_examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc new file mode 100644 index 000000000..eaa534005 --- /dev/null +++ b/docs/doc_examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "my_analytics_dest_index", + evaluation: { + outlier_detection: { + actual_field: "is_outlier", + predicted_probability_field: "ml.outlier_score", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc b/docs/doc_examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc new file mode 100644 index 000000000..2669eba06 --- /dev/null +++ b/docs/doc_examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.migrateToDataStream({ + name: "my-time-series-data", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4cdcf01014c75693b080c778071c1b5.asciidoc b/docs/doc_examples/d4cdcf01014c75693b080c778071c1b5.asciidoc new file mode 100644 index 000000000..a7d60de27 --- /dev/null +++ b/docs/doc_examples/d4cdcf01014c75693b080c778071c1b5.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + stats: { + field: "grade", + missing: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc b/docs/doc_examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc new file mode 100644 index 000000000..551748bea --- /dev/null +++ b/docs/doc_examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + user: { + id: "kimchy", + }, + "@timestamp": "2099-11-15T14:12:12", + message: "trying out Elasticsearch", + }, + { + index: { + _id: 2, + }, + }, + { + user: { + id: "kimchi", + }, + "@timestamp": "2099-11-15T14:12:13", + message: "My user ID is similar to kimchy!", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc b/docs/doc_examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc new file mode 100644 index 000000000..9f176704b --- /dev/null +++ b/docs/doc_examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.deleteAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4fb482a51d67a1af48e429af6019a46.asciidoc b/docs/doc_examples/d4fb482a51d67a1af48e429af6019a46.asciidoc new file mode 100644 index 000000000..2ab3635f9 --- /dev/null +++ b/docs/doc_examples/d4fb482a51d67a1af48e429af6019a46.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + "sort.field": ["username", "date"], + "sort.order": ["asc", "desc"], + }, + }, + mappings: { + properties: { + username: { + type: "keyword", + doc_values: true, + }, + date: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d50a3c64890f88af32c6d4ef4899d82a.asciidoc b/docs/doc_examples/d50a3c64890f88af32c6d4ef4899d82a.asciidoc deleted file mode 100644 index 754985636..000000000 --- a/docs/doc_examples/d50a3c64890f88af32c6d4ef4899d82a.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': '40,-70', - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc b/docs/doc_examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc new file mode 100644 index 000000000..4494a29ac --- /dev/null +++ b/docs/doc_examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putTrainedModelAlias({ + model_id: "flight-delay-prediction-1580004349800", + model_alias: "flight_delay_model", + reassign: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5132d34ae922fa8e898889b627a1405.asciidoc b/docs/doc_examples/d5132d34ae922fa8e898889b627a1405.asciidoc new file mode 100644 index 000000000..663f0c9b5 --- /dev/null +++ b/docs/doc_examples/d5132d34ae922fa8e898889b627a1405.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "child_example", + size: 0, + aggs: { + "top-tags": { + terms: { + field: "tags.keyword", + size: 10, + }, + aggs: { + "to-answers": { + children: { + type: "answer", + }, + aggs: { + "top-names": { + terms: { + field: "owner.display_name.keyword", + size: 10, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d524db57be9f16abac5396895b9a2a59.asciidoc b/docs/doc_examples/d524db57be9f16abac5396895b9a2a59.asciidoc new file mode 100644 index 000000000..2c534e759 --- /dev/null +++ b/docs/doc_examples/d524db57be9f16abac5396895b9a2a59.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveIndex({ + name: "my-index-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d547d55efbf75374f6de1f224323bc73.asciidoc b/docs/doc_examples/d547d55efbf75374f6de1f224323bc73.asciidoc new file mode 100644 index 000000000..06b300ec2 --- /dev/null +++ b/docs/doc_examples/d547d55efbf75374f6de1f224323bc73.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "geocells", + mappings: { + properties: { + geocell: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.ingest.putPipeline({ + id: "geotile2shape", + description: "translate rectangular z/x/y geotile to bounding box", + processors: [ + { + geo_grid: { + field: "geocell", + tile_type: "geotile", + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.ingest.putPipeline({ + id: "geohex2shape", + description: "translate H3 cell to polygon", + processors: [ + { + geo_grid: { + field: "geocell", + tile_type: "geohex", + target_format: "wkt", + }, + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc b/docs/doc_examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc new file mode 100644 index 000000000..1a2273a4e --- /dev/null +++ b/docs/doc_examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + counter: { + type: "integer", + store: false, + }, + tags: { + type: "keyword", + store: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d56a9d89282df56adbbc34b91390ac17.asciidoc b/docs/doc_examples/d56a9d89282df56adbbc34b91390ac17.asciidoc new file mode 100644 index 000000000..e987bbba2 --- /dev/null +++ b/docs/doc_examples/d56a9d89282df56adbbc34b91390ac17.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.getAutoFollowPattern({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d595b40bf1ea71923f9824d0f9c99c49.asciidoc b/docs/doc_examples/d595b40bf1ea71923f9824d0f9c99c49.asciidoc new file mode 100644 index 000000000..6c2200014 --- /dev/null +++ b/docs/doc_examples/d595b40bf1ea71923f9824d0f9c99c49.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_query_rules/my-ruleset", + body: { + rules: [ + { + rule_id: "rule1", + type: "pinned", + criteria: [ + { + type: "fuzzy", + metadata: "query_string", + values: ["puggles", "pugs"], + }, + { + type: "exact", + metadata: "user_country", + values: ["us"], + }, + ], + actions: { + ids: ["id1", "id2"], + }, + }, + { + rule_id: "rule2", + type: "pinned", + criteria: [ + { + type: "contains", + metadata: "query_string", + values: ["beagles"], + }, + ], + actions: { + docs: [ + { + _index: "my-index-000001", + _id: "id3", + }, + { + _index: "my-index-000002", + _id: "id4", + }, + ], + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d59e9cc75814575aa5e275dbe262918c.asciidoc b/docs/doc_examples/d59e9cc75814575aa5e275dbe262918c.asciidoc new file mode 100644 index 000000000..1e76b4c4b --- /dev/null +++ b/docs/doc_examples/d59e9cc75814575aa5e275dbe262918c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_grid: { + location: { + geohash: "u0", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc b/docs/doc_examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc new file mode 100644 index 000000000..20d959497 --- /dev/null +++ b/docs/doc_examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_all: {}, + }, + sort: ["my_id"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc b/docs/doc_examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc new file mode 100644 index 000000000..a6bdb28e8 --- /dev/null +++ b/docs/doc_examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: {}, + }, + script_fields: { + test1: { + script: "params['_source']['message']", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc b/docs/doc_examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc new file mode 100644 index 000000000..1d3f9bd7e --- /dev/null +++ b/docs/doc_examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + _source: ["user.id", "_doc"], + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc b/docs/doc_examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc index a0b22affe..37aa7ee9b 100644 --- a/docs/doc_examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc +++ b/docs/doc_examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc @@ -4,13 +4,8 @@ [source, js] ---- const response = await client.search({ - scroll: '1m', - body: { - sort: [ - '_doc' - ] - } -}) -console.log(response) + scroll: "1m", + sort: ["_doc"], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc b/docs/doc_examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc new file mode 100644 index 000000000..631a13ebf --- /dev/null +++ b/docs/doc_examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc b/docs/doc_examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc new file mode 100644 index 000000000..8f4d19539 --- /dev/null +++ b/docs/doc_examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.recovery({ + v: "true", + h: "i,s,t,ty,st,rep,snap,f,fp,b,bp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc b/docs/doc_examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc new file mode 100644 index 000000000..f00794963 --- /dev/null +++ b/docs/doc_examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + parent_id: { + type: "answer", + id: "1", + }, + }, + aggs: { + parents: { + terms: { + field: "my_join_field#question", + size: 10, + }, + }, + }, + runtime_mappings: { + parent: { + type: "long", + script: + "\n emit(Integer.parseInt(doc['my_join_field#question'].value)) \n ", + }, + }, + fields: [ + { + field: "parent", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d64d509440afbed7cefd04b6898962eb.asciidoc b/docs/doc_examples/d64d509440afbed7cefd04b6898962eb.asciidoc new file mode 100644 index 000000000..8643f368a --- /dev/null +++ b/docs/doc_examples/d64d509440afbed7cefd04b6898962eb.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "200km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc b/docs/doc_examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc new file mode 100644 index 000000000..f3794bd47 --- /dev/null +++ b/docs/doc_examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + track_total_hits: 100, + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d681508a745b2bc777d47ba606d24224.asciidoc b/docs/doc_examples/d681508a745b2bc777d47ba606d24224.asciidoc new file mode 100644 index 000000000..9a01deced --- /dev/null +++ b/docs/doc_examples/d681508a745b2bc777d47ba606d24224.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.fielddata({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d681b643da0d7f0a384f627b6d56111b.asciidoc b/docs/doc_examples/d681b643da0d7f0a384f627b6d56111b.asciidoc new file mode 100644 index 000000000..12af9491f --- /dev/null +++ b/docs/doc_examples/d681b643da0d7f0a384f627b6d56111b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + message: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d690a6af462c70a783625a323e11c72c.asciidoc b/docs/doc_examples/d690a6af462c70a783625a323e11c72c.asciidoc new file mode 100644 index 000000000..b6c413d2d --- /dev/null +++ b/docs/doc_examples/d690a6af462c70a783625a323e11c72c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "my_policy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d69bd36335774c8ae1286cee21310241.asciidoc b/docs/doc_examples/d69bd36335774c8ae1286cee21310241.asciidoc new file mode 100644 index 000000000..d74ae75be --- /dev/null +++ b/docs/doc_examples/d69bd36335774c8ae1286cee21310241.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-search", + remote_indices: [ + { + clusters: ["my_remote_cluster"], + names: ["target-index"], + privileges: ["read", "read_cross_cluster", "view_index_metadata"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d69cf7c82602431d9e339583e7dfb988.asciidoc b/docs/doc_examples/d69cf7c82602431d9e339583e7dfb988.asciidoc new file mode 100644 index 000000000..935a516fc --- /dev/null +++ b/docs/doc_examples/d69cf7c82602431d9e339583e7dfb988.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + std_english: { + type: "standard", + stopwords: "_english_", + }, + }, + }, + }, + mappings: { + properties: { + my_text: { + type: "text", + analyzer: "standard", + fields: { + english: { + type: "text", + analyzer: "std_english", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + field: "my_text", + text: "The old brown cow", +}); +console.log(response1); + +const response2 = await client.indices.analyze({ + index: "my-index-000001", + field: "my_text.english", + text: "The old brown cow", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc b/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc new file mode 100644 index 000000000..328c10112 --- /dev/null +++ b/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector", + querystring: { + from: "0", + size: "2", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc b/docs/doc_examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc new file mode 100644 index 000000000..812d3b354 --- /dev/null +++ b/docs/doc_examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.adjusted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + aggs: { + max_price: { + max: { + field: "price.adjusted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc b/docs/doc_examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc new file mode 100644 index 000000000..e06b1bd91 --- /dev/null +++ b/docs/doc_examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.features.resetFeatures(); +console.log(response); +---- diff --git a/docs/doc_examples/d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc b/docs/doc_examples/d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc deleted file mode 100644 index 318bce61b..000000000 --- a/docs/doc_examples/d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - op_type: 'create', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d7348119df9f89a556a7b767d5298c7e.asciidoc b/docs/doc_examples/d7348119df9f89a556a7b767d5298c7e.asciidoc new file mode 100644 index 000000000..01a333e19 --- /dev/null +++ b/docs/doc_examples/d7348119df9f89a556a7b767d5298c7e.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "tour", + filter_path: "aggregations", + aggregations: { + path: { + terms: { + field: "city", + }, + aggregations: { + museum_tour: { + geo_line: { + point: { + field: "location", + }, + sort: { + field: "@timestamp", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc b/docs/doc_examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc new file mode 100644 index 000000000..8cf16de62 --- /dev/null +++ b/docs/doc_examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "standard", + max_token_length: 5, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d775836a0d7abecc6637aa988f204c30.asciidoc b/docs/doc_examples/d775836a0d7abecc6637aa988f204c30.asciidoc new file mode 100644 index 000000000..5e976e1de --- /dev/null +++ b/docs/doc_examples/d775836a0d7abecc6637aa988f204c30.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + fullname: "John Doe", + text: "test test test ", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "wait_for", + document: { + fullname: "Jane Doe", + text: "Another test ...", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d7898526d239d2aea83727fb982f8f77.asciidoc b/docs/doc_examples/d7898526d239d2aea83727fb982f8f77.asciidoc new file mode 100644 index 000000000..de62a08de --- /dev/null +++ b/docs/doc_examples/d7898526d239d2aea83727fb982f8f77.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh(); +console.log(response); +---- diff --git a/docs/doc_examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc b/docs/doc_examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc new file mode 100644 index 000000000..649a0757e --- /dev/null +++ b/docs/doc_examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + my_field: { + type: "text", + fielddata: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7a55a7c491e97079e429483085f1d58.asciidoc b/docs/doc_examples/d7a55a7c491e97079e429483085f1d58.asciidoc new file mode 100644 index 000000000..7eef74763 --- /dev/null +++ b/docs/doc_examples/d7a55a7c491e97079e429483085f1d58.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "dsl-data-stream-template", + index_patterns: ["dsl-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "index.lifecycle.name": "pre-dsl-ilm-policy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc b/docs/doc_examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc new file mode 100644 index 000000000..8bfce16e0 --- /dev/null +++ b/docs/doc_examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "shoes", + }, + }, + }, + }, + { + standard: { + query: { + semantic: { + field: "semantic_field", + query: "shoes", + }, + }, + }, + }, + ], + rank_window_size: 50, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc b/docs/doc_examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc new file mode 100644 index 000000000..afe5e6046 --- /dev/null +++ b/docs/doc_examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startDatafeed({ + datafeed_id: "datafeed-low_request_rate", + start: "2019-04-07T18:22:16Z", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc b/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc new file mode 100644 index 000000000..2a932f6a2 --- /dev/null +++ b/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/cohere_embeddings", + body: { + service: "cohere", + service_settings: { + api_key: "", + model_id: "embed-english-v3.0", + embedding_type: "byte", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc b/docs/doc_examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc new file mode 100644 index 000000000..e854a2e49 --- /dev/null +++ b/docs/doc_examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role3", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["customer.handle"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc b/docs/doc_examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc new file mode 100644 index 000000000..6dd63ecb0 --- /dev/null +++ b/docs/doc_examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + t_shirts: { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7fe687201ac87b307cd06ed015dd317.asciidoc b/docs/doc_examples/d7fe687201ac87b307cd06ed015dd317.asciidoc new file mode 100644 index 000000000..c0b8ab7d9 --- /dev/null +++ b/docs/doc_examples/d7fe687201ac87b307cd06ed015dd317.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + user_id: { + type: "keyword", + ignore_above: 100, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc b/docs/doc_examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc new file mode 100644 index 000000000..76fcaf558 --- /dev/null +++ b/docs/doc_examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-data-stream", + query: { + range: { + "@timestamp": { + gte: "now-7d/d", + lte: "now/d", + }, + }, + }, + }, + dest: { + index: "new-data-stream", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc b/docs/doc_examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc new file mode 100644 index 000000000..efb173b6b --- /dev/null +++ b/docs/doc_examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-stored-script", + script: { + lang: "painless", + source: "Math.log(_score * 2) + params['my_modifier']", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc b/docs/doc_examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc new file mode 100644 index 000000000..2ebf0acd5 --- /dev/null +++ b/docs/doc_examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "parent_example", + id: 2, + routing: 1, + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Sam", + id: 48, + }, + body: "Unfortunately you're pretty much limited to FTP...", + creation_date: "2009-05-04T13:45:37.030", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "parent_example", + id: 3, + routing: 1, + refresh: "true", + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Troll", + id: 49, + }, + body: "Use Linux...", + creation_date: "2009-05-05T13:45:37.030", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc b/docs/doc_examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc new file mode 100644 index 000000000..ea74db82f --- /dev/null +++ b/docs/doc_examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + document: { + message: "The quick brown fox jumps over the lazy dog", + }, + }, + }, + highlight: { + fields: { + message: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc b/docs/doc_examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc new file mode 100644 index 000000000..6deb4fde6 --- /dev/null +++ b/docs/doc_examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "7d", + max_size: "100gb", + min_docs: 1000, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d851282dba548251d10db5954a339307.asciidoc b/docs/doc_examples/d851282dba548251d10db5954a339307.asciidoc new file mode 100644 index 000000000..0dcc1cb4e --- /dev/null +++ b/docs/doc_examples/d851282dba548251d10db5954a339307.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "twitter", + query: { + match: { + title: "elasticsearch", + }, + }, + search_after: [1463538857, "654323"], + sort: [ + { + date: "asc", + }, + { + tie_breaker_id: "asc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d870d5bd1f97fc75872a298fcddec513.asciidoc b/docs/doc_examples/d870d5bd1f97fc75872a298fcddec513.asciidoc new file mode 100644 index 000000000..0d7463275 --- /dev/null +++ b/docs/doc_examples/d870d5bd1f97fc75872a298fcddec513.asciidoc @@ -0,0 +1,155 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.textStructure.findStructure({ + text_files: [ + { + name: "Leviathan Wakes", + author: "James S.A. Corey", + release_date: "2011-06-02", + page_count: 561, + }, + { + name: "Hyperion", + author: "Dan Simmons", + release_date: "1989-05-26", + page_count: 482, + }, + { + name: "Dune", + author: "Frank Herbert", + release_date: "1965-06-01", + page_count: 604, + }, + { + name: "Dune Messiah", + author: "Frank Herbert", + release_date: "1969-10-15", + page_count: 331, + }, + { + name: "Children of Dune", + author: "Frank Herbert", + release_date: "1976-04-21", + page_count: 408, + }, + { + name: "God Emperor of Dune", + author: "Frank Herbert", + release_date: "1981-05-28", + page_count: 454, + }, + { + name: "Consider Phlebas", + author: "Iain M. Banks", + release_date: "1987-04-23", + page_count: 471, + }, + { + name: "Pandora's Star", + author: "Peter F. Hamilton", + release_date: "2004-03-02", + page_count: 768, + }, + { + name: "Revelation Space", + author: "Alastair Reynolds", + release_date: "2000-03-15", + page_count: 585, + }, + { + name: "A Fire Upon the Deep", + author: "Vernor Vinge", + release_date: "1992-06-01", + page_count: 613, + }, + { + name: "Ender's Game", + author: "Orson Scott Card", + release_date: "1985-06-01", + page_count: 324, + }, + { + name: "1984", + author: "George Orwell", + release_date: "1985-06-01", + page_count: 328, + }, + { + name: "Fahrenheit 451", + author: "Ray Bradbury", + release_date: "1953-10-15", + page_count: 227, + }, + { + name: "Brave New World", + author: "Aldous Huxley", + release_date: "1932-06-01", + page_count: 268, + }, + { + name: "Foundation", + author: "Isaac Asimov", + release_date: "1951-06-01", + page_count: 224, + }, + { + name: "The Giver", + author: "Lois Lowry", + release_date: "1993-04-26", + page_count: 208, + }, + { + name: "Slaughterhouse-Five", + author: "Kurt Vonnegut", + release_date: "1969-06-01", + page_count: 275, + }, + { + name: "The Hitchhiker's Guide to the Galaxy", + author: "Douglas Adams", + release_date: "1979-10-12", + page_count: 180, + }, + { + name: "Snow Crash", + author: "Neal Stephenson", + release_date: "1992-06-01", + page_count: 470, + }, + { + name: "Neuromancer", + author: "William Gibson", + release_date: "1984-07-01", + page_count: 271, + }, + { + name: "The Handmaid's Tale", + author: "Margaret Atwood", + release_date: "1985-06-01", + page_count: 311, + }, + { + name: "Starship Troopers", + author: "Robert A. Heinlein", + release_date: "1959-12-01", + page_count: 335, + }, + { + name: "The Left Hand of Darkness", + author: "Ursula K. Le Guin", + release_date: "1969-06-01", + page_count: 304, + }, + { + name: "The Moon is a Harsh Mistress", + author: "Robert A. Heinlein", + release_date: "1966-04-01", + page_count: 288, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/67bba546d835bca8f31df13e3587c348.asciidoc b/docs/doc_examples/d87175daed2327565d4325528c6d8b38.asciidoc similarity index 73% rename from docs/doc_examples/67bba546d835bca8f31df13e3587c348.asciidoc rename to docs/doc_examples/d87175daed2327565d4325528c6d8b38.asciidoc index 9e6ddc036..60b8237df 100644 --- a/docs/doc_examples/67bba546d835bca8f31df13e3587c348.asciidoc +++ b/docs/doc_examples/d87175daed2327565d4325528c6d8b38.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.get({ - index: 'test', - id: '1' -}) -console.log(response) + index: "my-index-000001", + id: 0, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc b/docs/doc_examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc new file mode 100644 index 000000000..4fe177b8f --- /dev/null +++ b/docs/doc_examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "uppercase_example", + settings: { + analysis: { + analyzer: { + whitespace_uppercase: { + tokenizer: "whitespace", + filter: ["uppercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc b/docs/doc_examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc new file mode 100644 index 000000000..25e54fdb2 --- /dev/null +++ b/docs/doc_examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + size: 2, + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + { + product: { + terms: { + field: "product", + order: "asc", + }, + }, + }, + ], + after: { + date: 1494288000000, + product: "mad max", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc b/docs/doc_examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc new file mode 100644 index 000000000..2c104ebce --- /dev/null +++ b/docs/doc_examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "length_custom_example", + settings: { + analysis: { + analyzer: { + whitespace_length_2_to_10_char: { + tokenizer: "whitespace", + filter: ["length_2_to_10_char"], + }, + }, + filter: { + length_2_to_10_char: { + type: "length", + min: 2, + max: 10, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d89d36741d906a71eca6c144e8d83889.asciidoc b/docs/doc_examples/d89d36741d906a71eca6c144e8d83889.asciidoc new file mode 100644 index 000000000..d45a6b65d --- /dev/null +++ b/docs/doc_examples/d89d36741d906a71eca6c144e8d83889.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "oTUltX4IQMOUUVeiohTt8A:12345", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc b/docs/doc_examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc new file mode 100644 index 000000000..60093c26e --- /dev/null +++ b/docs/doc_examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "name,node*,heap*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8b2a88b5eca99d3691ad3cd40266736.asciidoc b/docs/doc_examples/d8b2a88b5eca99d3691ad3cd40266736.asciidoc deleted file mode 100644 index 3526a8624..000000000 --- a/docs/doc_examples/d8b2a88b5eca99d3691ad3cd40266736.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my-index', - body: { - mappings: { - properties: { - age: { - type: 'integer' - }, - email: { - type: 'keyword' - }, - name: { - type: 'text' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc b/docs/doc_examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc new file mode 100644 index 000000000..402a1ea30 --- /dev/null +++ b/docs/doc_examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "ngram", + min_gram: 3, + max_gram: 3, + token_chars: ["letter", "digit"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "2 Quick Foxes.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc b/docs/doc_examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc new file mode 100644 index 000000000..f037ca9a4 --- /dev/null +++ b/docs/doc_examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "byte-image-index", + mappings: { + properties: { + "byte-image-vector": { + type: "dense_vector", + element_type: "byte", + dims: 2, + }, + title: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc b/docs/doc_examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc new file mode 100644 index 000000000..cc29b6058 --- /dev/null +++ b/docs/doc_examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + the_filter: { + filters: { + keyed: false, + filters: { + "t-shirt": { + term: { + type: "t-shirt", + }, + }, + hat: { + term: { + type: "hat", + }, + }, + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + sort_by_avg_price: { + bucket_sort: { + sort: { + avg_price: "asc", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc b/docs/doc_examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc new file mode 100644 index 000000000..d6c59ff90 --- /dev/null +++ b/docs/doc_examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "my-aliases", + template: { + aliases: { + "my-alias": {}, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["my-index-*"], + composed_of: ["my-aliases", "my-mappings", "my-settings"], + template: { + aliases: { + "yet-another-alias": {}, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d9474f66970c6955e24b17c7447e7b5f.asciidoc b/docs/doc_examples/d9474f66970c6955e24b17c7447e7b5f.asciidoc deleted file mode 100644 index 789b778b3..000000000 --- a/docs/doc_examples/d9474f66970c6955e24b17c7447e7b5f.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - name: { - properties: { - first: { - type: 'text' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc b/docs/doc_examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc new file mode 100644 index 000000000..617d7bfe4 --- /dev/null +++ b/docs/doc_examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "keep_types", + types: [""], + mode: "exclude", + }, + ], + text: "1 quick fox 2 lazy dogs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d952ac7c73219d8cabc080679e035514.asciidoc b/docs/doc_examples/d952ac7c73219d8cabc080679e035514.asciidoc new file mode 100644 index 000000000..7e7400f38 --- /dev/null +++ b/docs/doc_examples/d952ac7c73219d8cabc080679e035514.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + knn: { + field: "my_embeddings.predicted_value", + k: 10, + num_candidates: 100, + query_vector_builder: { + text_embedding: { + model_id: "sentence-transformers__msmarco-minilm-l-12-v3", + model_text: "the query string", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d979f934af0992fb8c8596beff80b638.asciidoc b/docs/doc_examples/d979f934af0992fb8c8596beff80b638.asciidoc new file mode 100644 index 000000000..77cbe0562 --- /dev/null +++ b/docs/doc_examples/d979f934af0992fb8c8596beff80b638.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: ["obj1.*", "obj2.*"], + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc b/docs/doc_examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc new file mode 100644 index 000000000..dd82fb061 --- /dev/null +++ b/docs/doc_examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "latvian_example", + settings: { + analysis: { + filter: { + latvian_stop: { + type: "stop", + stopwords: "_latvian_", + }, + latvian_keywords: { + type: "keyword_marker", + keywords: ["piemērs"], + }, + latvian_stemmer: { + type: "stemmer", + language: "latvian", + }, + }, + analyzer: { + rebuilt_latvian: { + tokenizer: "standard", + filter: [ + "lowercase", + "latvian_stop", + "latvian_keywords", + "latvian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc b/docs/doc_examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc new file mode 100644 index 000000000..e8c8e1c1e --- /dev/null +++ b/docs/doc_examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + timestamp: { + type: "date", + }, + temperature: { + type: "long", + }, + voltage: { + type: "double", + }, + node: { + type: "keyword", + }, + voltage_corrected: { + type: "double", + on_script_error: "fail", + script: { + source: + "\n emit(doc['voltage'].value * params['multiplier'])\n ", + params: { + multiplier: 4, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc b/docs/doc_examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc new file mode 100644 index 000000000..a010647b8 --- /dev/null +++ b/docs/doc_examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + question: ["answer", "comment"], + answer: "vote", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc b/docs/doc_examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc new file mode 100644 index 000000000..59f9b118c --- /dev/null +++ b/docs/doc_examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match: { + body: "foo", + }, + }, + functions: [ + { + script_score: { + script: { + source: "pure_df", + lang: "expert_scripts", + params: { + field: "body", + term: "foo", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc b/docs/doc_examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc new file mode 100644 index 000000000..9c9f58277 --- /dev/null +++ b/docs/doc_examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: + "SELECT YEAR(release_date) AS year FROM library WHERE page_count > ? AND author = ? GROUP BY year HAVING COUNT(*) > ?", + params: [300, "Frank Herbert", 0], +}); +console.log(response); +---- diff --git a/docs/doc_examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc b/docs/doc_examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc new file mode 100644 index 000000000..ed9394a53 --- /dev/null +++ b/docs/doc_examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match: { + body: "elasticsearch", + }, + }, + should: { + rank_feature: { + field: "pagerank", + saturation: { + pivot: 10, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/da18bae37cda566c0254b30c15221b01.asciidoc b/docs/doc_examples/da18bae37cda566c0254b30c15221b01.asciidoc new file mode 100644 index 000000000..c120c31c8 --- /dev/null +++ b/docs/doc_examples/da18bae37cda566c0254b30c15221b01.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedServiceTokens({ + namespace: "elastic", + service: "fleet-server", + name: "token1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc b/docs/doc_examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc new file mode 100644 index 000000000..a1f77cba3 --- /dev/null +++ b/docs/doc_examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index", +}); +console.log(response); + +const response1 = await client.reindex({ + source: { + index: "restored-my-index", + }, + dest: { + index: "my-index", + }, +}); +console.log(response1); + +const response2 = await client.indices.deleteDataStream({ + name: "logs-my_app-default", +}); +console.log(response2); + +const response3 = await client.reindex({ + source: { + index: "restored-logs-my_app-default", + }, + dest: { + index: "logs-my_app-default", + op_type: "create", + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/da3cecc36a7313385d32c7f52ccfb7e3.asciidoc b/docs/doc_examples/da3cecc36a7313385d32c7f52ccfb7e3.asciidoc new file mode 100644 index 000000000..4439e3423 --- /dev/null +++ b/docs/doc_examples/da3cecc36a7313385d32c7f52ccfb7e3.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "date.day_of_week": { + type: "keyword", + script: + "emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + }, + }, + aggs: { + day_of_week: { + terms: { + field: "date.day_of_week", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/da3f280bc65b581fb3097be768061bee.asciidoc b/docs/doc_examples/da3f280bc65b581fb3097be768061bee.asciidoc new file mode 100644 index 000000000..8297d5687 --- /dev/null +++ b/docs/doc_examples/da3f280bc65b581fb3097be768061bee.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlPrepareAuthentication({ + acs: "https://kibana.org/api/security/saml/callback", +}); +console.log(response); +---- diff --git a/docs/doc_examples/da8db0769dff7305f178c12b1111bc99.asciidoc b/docs/doc_examples/da8db0769dff7305f178c12b1111bc99.asciidoc new file mode 100644 index 000000000..1e6dc21fe --- /dev/null +++ b/docs/doc_examples/da8db0769dff7305f178c12b1111bc99.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "this is a test", + fields: ["subject^3", "message"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc b/docs/doc_examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc new file mode 100644 index 000000000..908eb903c --- /dev/null +++ b/docs/doc_examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "networks", + id: 1, + refresh: "wait_for", + document: { + range: "10.100.0.0/16", + name: "production", + department: "OPS", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc b/docs/doc_examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc new file mode 100644 index 000000000..4e1d675ae --- /dev/null +++ b/docs/doc_examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.exclude._name": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dabb159e0b3456024889fb9754a10655.asciidoc b/docs/doc_examples/dabb159e0b3456024889fb9754a10655.asciidoc new file mode 100644 index 000000000..1323fde55 --- /dev/null +++ b/docs/doc_examples/dabb159e0b3456024889fb9754a10655.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc b/docs/doc_examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc new file mode 100644 index 000000000..f96585338 --- /dev/null +++ b/docs/doc_examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + query_string: { + query: 'ip_addr:"2001:db8::/48"', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc b/docs/doc_examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc new file mode 100644 index 000000000..724f4c75b --- /dev/null +++ b/docs/doc_examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc @@ -0,0 +1,82 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "flight_prices", + source: { + index: ["kibana_sample_data_flights"], + }, + dest: { + index: "kibana_sample_flight_prices", + }, + analysis: { + regression: { + dependent_variable: "AvgTicketPrice", + num_top_feature_importance_values: 2, + feature_processors: [ + { + frequency_encoding: { + field: "DestWeather", + feature_name: "DestWeather_frequency", + frequency_map: { + Rain: 0.14604811155570188, + "Heavy Fog": 0.14604811155570188, + "Thunder & Lightning": 0.14604811155570188, + Cloudy: 0.14604811155570188, + "Damaging Wind": 0.14604811155570188, + Hail: 0.14604811155570188, + Sunny: 0.14604811155570188, + Clear: 0.14604811155570188, + }, + }, + }, + { + target_mean_encoding: { + field: "DestWeather", + feature_name: "DestWeather_targetmean", + target_map: { + Rain: 626.5588814585794, + "Heavy Fog": 626.5588814585794, + "Thunder & Lightning": 626.5588814585794, + Hail: 626.5588814585794, + "Damaging Wind": 626.5588814585794, + Cloudy: 626.5588814585794, + Clear: 626.5588814585794, + Sunny: 626.5588814585794, + }, + default_value: 624.0249512020454, + }, + }, + { + one_hot_encoding: { + field: "DestWeather", + hot_map: { + Rain: "DestWeather_Rain", + "Heavy Fog": "DestWeather_Heavy Fog", + "Thunder & Lightning": "DestWeather_Thunder & Lightning", + Cloudy: "DestWeather_Cloudy", + "Damaging Wind": "DestWeather_Damaging Wind", + Hail: "DestWeather_Hail", + Clear: "DestWeather_Clear", + Sunny: "DestWeather_Sunny", + }, + }, + }, + ], + }, + }, + analyzed_fields: { + includes: [ + "AvgTicketPrice", + "Cancelled", + "DestWeather", + "FlightDelayMin", + "DistanceMiles", + ], + }, + model_memory_limit: "30mb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dad2d4add751fde5c39475ca709cc14b.asciidoc b/docs/doc_examples/dad2d4add751fde5c39475ca709cc14b.asciidoc new file mode 100644 index 000000000..25d579fbc --- /dev/null +++ b/docs/doc_examples/dad2d4add751fde5c39475ca709cc14b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + "index.routing.allocation.include.size": "big,medium", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dadb69a225778ecd6528924c0aa029bb.asciidoc b/docs/doc_examples/dadb69a225778ecd6528924c0aa029bb.asciidoc new file mode 100644 index 000000000..fc4d72b96 --- /dev/null +++ b/docs/doc_examples/dadb69a225778ecd6528924c0aa029bb.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + }, + "title-vector": { + type: "dense_vector", + dims: 5, + similarity: "l2_norm", + }, + title: { + type: "text", + }, + "file-type": { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dae57cf7df18adb4dc64426eb159733a.asciidoc b/docs/doc_examples/dae57cf7df18adb4dc64426eb159733a.asciidoc new file mode 100644 index 000000000..43ab168fe --- /dev/null +++ b/docs/doc_examples/dae57cf7df18adb4dc64426eb159733a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + percents: [95, 99, 99.9], + hdr: { + number_of_significant_value_digits: 3, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc b/docs/doc_examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc new file mode 100644 index 000000000..228ba4197 --- /dev/null +++ b/docs/doc_examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "uax_url_email", + max_token_length: 5, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "john.smith@global-international.com", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc b/docs/doc_examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc new file mode 100644 index 000000000..8f0e29b61 --- /dev/null +++ b/docs/doc_examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.stopDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/db6cba451ba562abe953d09ad80cc15c.asciidoc b/docs/doc_examples/db6cba451ba562abe953d09ad80cc15c.asciidoc deleted file mode 100644 index 364ce69b0..000000000 --- a/docs/doc_examples/db6cba451ba562abe953d09ad80cc15c.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - query: 'city.\\*:(this AND that OR thus)' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/db773f690edf659ac9b044dc854c77eb.asciidoc b/docs/doc_examples/db773f690edf659ac9b044dc854c77eb.asciidoc new file mode 100644 index 000000000..5a4fd2320 --- /dev/null +++ b/docs/doc_examples/db773f690edf659ac9b044dc854c77eb.asciidoc @@ -0,0 +1,78 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + name: { + type: "keyword", + }, + price: { + type: "long", + }, + included: { + type: "boolean", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + price: 1750, + included: true, + }, + { + index: { + _id: "2", + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + price: 1500, + included: false, + }, + { + index: { + _id: "3", + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + price: 1650, + included: true, + }, + { + index: { + _id: "4", + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Amsterdam Centre for Architecture", + price: 0, + included: true, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/db8710a9793ae0817a45892d33468160.asciidoc b/docs/doc_examples/db8710a9793ae0817a45892d33468160.asciidoc new file mode 100644 index 000000000..28e18c33d --- /dev/null +++ b/docs/doc_examples/db8710a9793ae0817a45892d33468160.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.diskUsage({ + index: "my-index-000001", + run_expensive_tasks: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc b/docs/doc_examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc new file mode 100644 index 000000000..2bf5d8591 --- /dev/null +++ b/docs/doc_examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc @@ -0,0 +1,93 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggregations: { + "large-grid": { + geohash_grid: { + field: "location", + precision: 3, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc b/docs/doc_examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc new file mode 100644 index 000000000..8f8803735 --- /dev/null +++ b/docs/doc_examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings(); +console.log(response); +---- diff --git a/docs/doc_examples/dbc50b8c934171e94604575a8b36f349.asciidoc b/docs/doc_examples/dbc50b8c934171e94604575a8b36f349.asciidoc new file mode 100644 index 000000000..b69c9c9f2 --- /dev/null +++ b/docs/doc_examples/dbc50b8c934171e94604575a8b36f349.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001", + max_num_segments: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc b/docs/doc_examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc new file mode 100644 index 000000000..f4c200d3c --- /dev/null +++ b/docs/doc_examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "range_index", + settings: { + number_of_shards: 2, + }, + mappings: { + properties: { + expected_attendees: { + type: "integer_range", + }, + time_frame: { + type: "date_range", + format: "yyyy-MM-dd||epoch_millis", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "range_index", + id: 1, + refresh: "true", + document: { + expected_attendees: { + gte: 10, + lte: 20, + }, + time_frame: { + gte: "2019-10-28", + lte: "2019-11-04", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc b/docs/doc_examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc new file mode 100644 index 000000000..956826f86 --- /dev/null +++ b/docs/doc_examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + ids: { + values: ["1", "4", "100"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc b/docs/doc_examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc new file mode 100644 index 000000000..2b96106b8 --- /dev/null +++ b/docs/doc_examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "_all", + fields: "*.id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbf93d02ab86a09929a21232b19709cc.asciidoc b/docs/doc_examples/dbf93d02ab86a09929a21232b19709cc.asciidoc new file mode 100644 index 000000000..b7f768ac8 --- /dev/null +++ b/docs/doc_examples/dbf93d02ab86a09929a21232b19709cc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.stopTrainedModelDeployment({ + model_id: "my_model_for_search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbf9abc37899352751dab0ede62af2fd.asciidoc b/docs/doc_examples/dbf9abc37899352751dab0ede62af2fd.asciidoc new file mode 100644 index 000000000..9b11a1f56 --- /dev/null +++ b/docs/doc_examples/dbf9abc37899352751dab0ede62af2fd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + token: + "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc b/docs/doc_examples/dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc deleted file mode 100644 index f2d26605b..000000000 --- a/docs/doc_examples/dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - countries: { - terms: { - field: 'artist.country', - order: { - 'rock>playback_stats.avg': 'desc' - } - }, - aggs: { - rock: { - filter: { - term: { - genre: 'rock' - } - }, - aggs: { - playback_stats: { - stats: { - field: 'play_count' - } - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc b/docs/doc_examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc new file mode 100644 index 000000000..cb0e48671 --- /dev/null +++ b/docs/doc_examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "json", + query: + "\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", + columnar: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc3b7603e7d688106acb804059af7834.asciidoc b/docs/doc_examples/dc3b7603e7d688106acb804059af7834.asciidoc new file mode 100644 index 000000000..9d2cf81d9 --- /dev/null +++ b/docs/doc_examples/dc3b7603e7d688106acb804059af7834.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: false, + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc468865da947b4a9136a5b92878d918.asciidoc b/docs/doc_examples/dc468865da947b4a9136a5b92878d918.asciidoc new file mode 100644 index 000000000..30cdb1f6b --- /dev/null +++ b/docs/doc_examples/dc468865da947b4a9136a5b92878d918.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-other-api-key", + metadata: { + application: "my-application", + environment: { + level: 2, + trusted: true, + tags: ["dev", "staging"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc b/docs/doc_examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc new file mode 100644 index 000000000..32844b898 --- /dev/null +++ b/docs/doc_examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "path_hierarchy", + text: "/one/two/three", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc b/docs/doc_examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc new file mode 100644 index 000000000..dc8a7c3f0 --- /dev/null +++ b/docs/doc_examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["lowercase"], + char_filter: ["html_strip"], + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc b/docs/doc_examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc new file mode 100644 index 000000000..ff1616147 --- /dev/null +++ b/docs/doc_examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + docs: [ + { + _index: "my-index-000001", + _id: "1", + }, + { + _index: "my-index-000001", + _id: "2", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc b/docs/doc_examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc new file mode 100644 index 000000000..6e3b6b17d --- /dev/null +++ b/docs/doc_examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "@timestamp": { + format: "strict_date_optional_time||epoch_second", + type: "date", + }, + message: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dcfa7f479a33f459a2d222a92e651451.asciidoc b/docs/doc_examples/dcfa7f479a33f459a2d222a92e651451.asciidoc new file mode 100644 index 000000000..1287912d5 --- /dev/null +++ b/docs/doc_examples/dcfa7f479a33f459a2d222a92e651451.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_admin_role", + description: + "Grants full access to all management features within the cluster.", + cluster: ["all"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["all"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc b/docs/doc_examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc new file mode 100644 index 000000000..45ed76e36 --- /dev/null +++ b/docs/doc_examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc b/docs/doc_examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc new file mode 100644 index 000000000..4114ec4f6 --- /dev/null +++ b/docs/doc_examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.processorGrok({ + s: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc b/docs/doc_examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc new file mode 100644 index 000000000..9b43b5804 --- /dev/null +++ b/docs/doc_examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library WHERE release_date < '2000-01-01'", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc b/docs/doc_examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc new file mode 100644 index 000000000..0ff322366 --- /dev/null +++ b/docs/doc_examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getComponentTemplate(); +console.log(response); +---- diff --git a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc new file mode 100644 index 000000000..ff630da8a --- /dev/null +++ b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getSettings(); +console.log(response); +---- diff --git a/docs/doc_examples/dd792bb53703a57f9207e36d16e26255.asciidoc b/docs/doc_examples/dd792bb53703a57f9207e36d16e26255.asciidoc new file mode 100644 index 000000000..9623daabc --- /dev/null +++ b/docs/doc_examples/dd792bb53703a57f9207e36d16e26255.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: 1516729294000, + temperature: 200, + voltage: 5.2, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516642894000, + temperature: 201, + voltage: 5.8, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516556494000, + temperature: 202, + voltage: 5.1, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516470094000, + temperature: 198, + voltage: 5.6, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516383694000, + temperature: 200, + voltage: 4.2, + node: "c", + }, + { + index: {}, + }, + { + timestamp: 1516297294000, + temperature: 202, + voltage: 4, + node: "c", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dda949d20d07a9edbe64cefc623df945.asciidoc b/docs/doc_examples/dda949d20d07a9edbe64cefc623df945.asciidoc new file mode 100644 index 000000000..695caaf60 --- /dev/null +++ b/docs/doc_examples/dda949d20d07a9edbe64cefc623df945.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my_test_scores", + properties: { + total_score: { + type: "long", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddcfa47381d47078dbec651e31b69949.asciidoc b/docs/doc_examples/ddcfa47381d47078dbec651e31b69949.asciidoc new file mode 100644 index 000000000..984c274da --- /dev/null +++ b/docs/doc_examples/ddcfa47381d47078dbec651e31b69949.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n library where process.name == "regsvr32.exe" and dll.name == "scrobj.dll"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc b/docs/doc_examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc new file mode 100644 index 000000000..e8049b2cb --- /dev/null +++ b/docs/doc_examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: "FROM mv | EVAL b + 2, a + b | LIMIT 4", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc b/docs/doc_examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc new file mode 100644 index 000000000..eb773d5b0 --- /dev/null +++ b/docs/doc_examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc b/docs/doc_examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc new file mode 100644 index 000000000..0209a5c00 --- /dev/null +++ b/docs/doc_examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_internal/desired_nodes", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc b/docs/doc_examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc new file mode 100644 index 000000000..261dbdf37 --- /dev/null +++ b/docs/doc_examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + slice: { + id: 0, + max: 2, + }, + script: { + source: "ctx._source['extra'] = 'test'", + }, +}); +console.log(response); + +const response1 = await client.updateByQuery({ + index: "my-index-000001", + slice: { + id: 1, + max: 2, + }, + script: { + source: "ctx._source['extra'] = 'test'", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc b/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc index 3d0a76dd4..1318fe886 100644 --- a/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc +++ b/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc @@ -4,30 +4,27 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - term: { - product: 'chocolate' - } + query: { + term: { + product: "chocolate", }, - sort: [ - { - 'offer.price': { - mode: 'avg', - order: 'asc', - nested: { - path: 'offer', - filter: { - term: { - 'offer.color': 'blue' - } - } - } - } - } - ] - } -}) -console.log(response) + }, + sort: [ + { + "offer.price": { + mode: "avg", + order: "asc", + nested: { + path: "offer", + filter: { + term: { + "offer.color": "blue", + }, + }, + }, + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/de176bc4788ea286fff9e92418a43ea8.asciidoc b/docs/doc_examples/de176bc4788ea286fff9e92418a43ea8.asciidoc deleted file mode 100644 index 995280ef7..000000000 --- a/docs/doc_examples/de176bc4788ea286fff9e92418a43ea8.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'test' -}) -console.log(response0) - -const response1 = await client.indices.create({ - index: 'test_2' -}) -console.log(response1) - -const response2 = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test_2', - alias: 'test' - } - }, - { - remove_index: { - index: 'test' - } - } - ] - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/de2f59887737de3a27716177b60393a2.asciidoc b/docs/doc_examples/de2f59887737de3a27716177b60393a2.asciidoc new file mode 100644 index 000000000..e93b1c272 --- /dev/null +++ b/docs/doc_examples/de2f59887737de3a27716177b60393a2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + field: "obj1.field1", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/de876505acc75d371d1f6f484c449197.asciidoc b/docs/doc_examples/de876505acc75d371d1f6f484c449197.asciidoc new file mode 100644 index 000000000..373b13fdb --- /dev/null +++ b/docs/doc_examples/de876505acc75d371d1f6f484c449197.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + "index.write.wait_for_active_shards": "2", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc b/docs/doc_examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc new file mode 100644 index 000000000..6cb361f3e --- /dev/null +++ b/docs/doc_examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + with_limited_by: "true", + query: { + ids: { + values: ["VuaCfGcBCdbkQm-e5aOx"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc b/docs/doc_examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc new file mode 100644 index 000000000..789ac12d6 --- /dev/null +++ b/docs/doc_examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "my-index-000001", + fields: "employee-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc b/docs/doc_examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc new file mode 100644 index 000000000..3ae915d08 --- /dev/null +++ b/docs/doc_examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "place", + id: 1, + document: { + suggest: { + input: ["timmy's", "starbucks", "dunkin donuts"], + contexts: { + place_type: ["cafe", "food"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dead0682932ea6ec33c1197017bcb209.asciidoc b/docs/doc_examples/dead0682932ea6ec33c1197017bcb209.asciidoc new file mode 100644 index 000000000..0cfd8d93c --- /dev/null +++ b/docs/doc_examples/dead0682932ea6ec33c1197017bcb209.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: "dr5r9ydj2y73", + bottom_right: "drj7teegpus6", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc b/docs/doc_examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc new file mode 100644 index 000000000..e5d6dd96c --- /dev/null +++ b/docs/doc_examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 0, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + mask: "10.0.0.0/25", + }, + { + mask: "10.0.0.127/25", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dee3023098d9e63aa9e113beea5686da.asciidoc b/docs/doc_examples/dee3023098d9e63aa9e113beea5686da.asciidoc new file mode 100644 index 000000000..cf5b294bf --- /dev/null +++ b/docs/doc_examples/dee3023098d9e63aa9e113beea5686da.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "knn": {\n "field": "{{knn_field}}",\n "query_vector": {{#toJson}}query_vector{{/toJson}},\n "k": "{{k}}",\n "num_candidates": {{num_candidates}}\n },\n "fields": {{#toJson}}fields{{/toJson}}\n }\n ', + params: { + knn_field: "image-vector", + query_vector: [], + k: 10, + num_candidates: 100, + fields: ["title", "file-type"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc b/docs/doc_examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc new file mode 100644 index 000000000..3d7b78f05 --- /dev/null +++ b/docs/doc_examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "heap.max", +}); +console.log(response); +---- diff --git a/docs/doc_examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc b/docs/doc_examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc new file mode 100644 index 000000000..fcf3cf69f --- /dev/null +++ b/docs/doc_examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + analysis: { + analyzer: { + my_synonyms: { + tokenizer: "whitespace", + filter: ["synonym"], + }, + }, + filter: { + synonym: { + type: "synonym_graph", + synonyms_path: "analysis/synonym.txt", + updateable: true, + }, + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + analyzer: "standard", + search_analyzer: "my_synonyms", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df103a3df9b353357e72f9180ef421a1.asciidoc b/docs/doc_examples/df103a3df9b353357e72f9180ef421a1.asciidoc new file mode 100644 index 000000000..af7aa30b7 --- /dev/null +++ b/docs/doc_examples/df103a3df9b353357e72f9180ef421a1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + include: "swi*", + exclude: "electro*", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df1336e768fb6fc1826a5afa30a57285.asciidoc b/docs/doc_examples/df1336e768fb6fc1826a5afa30a57285.asciidoc new file mode 100644 index 000000000..7d322eefb --- /dev/null +++ b/docs/doc_examples/df1336e768fb6fc1826a5afa30a57285.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-data-stream", + document: { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df17f920b0deab3529b98df88b781f55.asciidoc b/docs/doc_examples/df17f920b0deab3529b98df88b781f55.asciidoc deleted file mode 100644 index b3e06489a..000000000 --- a/docs/doc_examples/df17f920b0deab3529b98df88b781f55.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - functions: [ - { - gauss: { - price: { - origin: '0', - scale: '20' - } - } - }, - { - gauss: { - location: { - origin: '11, 12', - scale: '2km' - } - } - } - ], - query: { - match: { - properties: 'balcony' - } - }, - score_mode: 'multiply' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc b/docs/doc_examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc new file mode 100644 index 000000000..0c820eea2 --- /dev/null +++ b/docs/doc_examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc @@ -0,0 +1,83 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + comments: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index", + id: 1, + refresh: "true", + document: { + comments: [ + { + author: "kimchy", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index", + id: 2, + refresh: "true", + document: { + comments: [ + { + author: "kimchy", + }, + { + author: "nik9000", + }, + ], + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index", + id: 3, + refresh: "true", + document: { + comments: [ + { + author: "nik9000", + }, + ], + }, +}); +console.log(response3); + +const response4 = await client.search({ + index: "my-index", + query: { + nested: { + path: "comments", + query: { + bool: { + must_not: [ + { + term: { + "comments.author": "nik9000", + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response4); +---- diff --git a/docs/doc_examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc b/docs/doc_examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc new file mode 100644 index 000000000..d6506f70e --- /dev/null +++ b/docs/doc_examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc b/docs/doc_examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc new file mode 100644 index 000000000..924b9da1c --- /dev/null +++ b/docs/doc_examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + thread_pool_patterns: "generic", + v: "true", + h: "id,name,active,rejected,completed", +}); +console.log(response); +---- diff --git a/docs/doc_examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc b/docs/doc_examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc new file mode 100644 index 000000000..de3179aa1 --- /dev/null +++ b/docs/doc_examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["fingerprint"], + text: "zebra jumps over resting resting dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfa16b7300d225e013f23625f44c087b.asciidoc b/docs/doc_examples/dfa16b7300d225e013f23625f44c087b.asciidoc new file mode 100644 index 000000000..90ef0a673 --- /dev/null +++ b/docs/doc_examples/dfa16b7300d225e013f23625f44c087b.asciidoc @@ -0,0 +1,65 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + number_of_shards: 1, + similarity: { + scripted_tfidf: { + type: "scripted", + script: { + source: + "double tf = Math.sqrt(doc.freq); double idf = Math.log((field.docCount+1.0)/(term.docFreq+1.0)) + 1.0; double norm = 1/Math.sqrt(doc.length); return query.boost * tf * idf * norm;", + }, + }, + }, + }, + mappings: { + properties: { + field: { + type: "text", + similarity: "scripted_tfidf", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + field: "foo bar foo", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "index", + id: 2, + document: { + field: "bar baz", + }, +}); +console.log(response2); + +const response3 = await client.indices.refresh({ + index: "index", +}); +console.log(response3); + +const response4 = await client.search({ + index: "index", + explain: "true", + query: { + query_string: { + query: "foo^1.7", + default_field: "field", + }, + }, +}); +console.log(response4); +---- diff --git a/docs/doc_examples/dfa75000edf4b960ed9002595a051871.asciidoc b/docs/doc_examples/dfa75000edf4b960ed9002595a051871.asciidoc new file mode 100644 index 000000000..8e62c4d26 --- /dev/null +++ b/docs/doc_examples/dfa75000edf4b960ed9002595a051871.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc b/docs/doc_examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc new file mode 100644 index 000000000..04facbcb5 --- /dev/null +++ b/docs/doc_examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + sort: [ + { + "result.execution_time": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc b/docs/doc_examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc new file mode 100644 index 000000000..aca2d7530 --- /dev/null +++ b/docs/doc_examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.getStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/dfbf53781adc6640493d49931a352167.asciidoc b/docs/doc_examples/dfbf53781adc6640493d49931a352167.asciidoc new file mode 100644 index 000000000..94e9cf226 --- /dev/null +++ b/docs/doc_examples/dfbf53781adc6640493d49931a352167.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + enabled: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "session_1", + document: { + user_id: "kimchy", + session_data: { + arbitrary_object: { + some_array: [ + "foo", + "bar", + { + baz: 2, + }, + ], + }, + }, + last_updated: "2015-12-06T18:20:22", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "session_1", +}); +console.log(response2); + +const response3 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc b/docs/doc_examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc new file mode 100644 index 000000000..1486e6f12 --- /dev/null +++ b/docs/doc_examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + obj1: { + type: "nested", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfcdadcf91529d3a399e05684195028e.asciidoc b/docs/doc_examples/dfcdadcf91529d3a399e05684195028e.asciidoc new file mode 100644 index 000000000..a9c2986d4 --- /dev/null +++ b/docs/doc_examples/dfcdadcf91529d3a399e05684195028e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "azure-ai-studio-embeddings", + pipeline: "azure_ai_studio_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc b/docs/doc_examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc new file mode 100644 index 000000000..fd702383f --- /dev/null +++ b/docs/doc_examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_internal/desired_nodes/Ywkh3INLQcuPT49f6kcppA/101", + body: { + nodes: [ + { + settings: { + "node.name": "instance-000187", + "node.external_id": "instance-000187", + "node.roles": ["data_hot", "master"], + "node.attr.data": "hot", + "node.attr.logical_availability_zone": "zone-0", + }, + processors_range: { + min: 8, + max: 10, + }, + memory: "58gb", + storage: "2tb", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfdf82b8d99436582f150117695190b3.asciidoc b/docs/doc_examples/dfdf82b8d99436582f150117695190b3.asciidoc new file mode 100644 index 000000000..8d4d07d06 --- /dev/null +++ b/docs/doc_examples/dfdf82b8d99436582f150117695190b3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "child_example", + id: 1, + document: { + join: { + name: "question", + }, + body: "I have Windows 2003 server and i bought a new Windows 2008 server...", + title: + "Whats the best way to file transfer my site from server to a newer one?", + tags: ["windows-server-2003", "windows-server-2008", "file-transfer"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfef545b1e2c247bafd1347e8e807ac1.asciidoc b/docs/doc_examples/dfef545b1e2c247bafd1347e8e807ac1.asciidoc deleted file mode 100644 index e05ce2602..000000000 --- a/docs/doc_examples/dfef545b1e2c247bafd1347e8e807ac1.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'test', - body: { - settings: { - number_of_shards: 1 - }, - mappings: { - properties: { - field1: { - type: 'text' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc b/docs/doc_examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc new file mode 100644 index 000000000..f3a92dcab --- /dev/null +++ b/docs/doc_examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "my-pipeline-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dffbbdc4025e5777c647d8818847b960.asciidoc b/docs/doc_examples/dffbbdc4025e5777c647d8818847b960.asciidoc new file mode 100644 index 000000000..d6249f4cf --- /dev/null +++ b/docs/doc_examples/dffbbdc4025e5777c647d8818847b960.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0734215054e1ff5df712ce3a826cdba.asciidoc b/docs/doc_examples/e0734215054e1ff5df712ce3a826cdba.asciidoc new file mode 100644 index 000000000..926699d94 --- /dev/null +++ b/docs/doc_examples/e0734215054e1ff5df712ce3a826cdba.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index", +}); +console.log(response); + +const response1 = await client.indices.deleteDataStream({ + name: "logs-my_app-default", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e08fb1435dc659c24badf25b676efb68.asciidoc b/docs/doc_examples/e08fb1435dc659c24badf25b676efb68.asciidoc new file mode 100644 index 000000000..0a05a4f32 --- /dev/null +++ b/docs/doc_examples/e08fb1435dc659c24badf25b676efb68.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + body_text: { + type: "text", + index_prefixes: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e095fc96504efecc588f97673912e3d3.asciidoc b/docs/doc_examples/e095fc96504efecc588f97673912e3d3.asciidoc new file mode 100644 index 000000000..a4c8438a0 --- /dev/null +++ b/docs/doc_examples/e095fc96504efecc588f97673912e3d3.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putJob({ + job_id: "test-job1", + pretty: "true", + analysis_config: { + bucket_span: "15m", + detectors: [ + { + detector_description: "Sum of bytes", + function: "sum", + field_name: "bytes", + }, + ], + }, + data_description: { + time_field: "timestamp", + time_format: "epoch_ms", + }, + analysis_limits: { + model_memory_limit: "11MB", + }, + model_plot_config: { + enabled: true, + annotations_enabled: true, + }, + results_index_name: "test-job1", + datafeed_config: { + indices: ["kibana_sample_data_logs"], + query: { + bool: { + must: [ + { + match_all: {}, + }, + ], + }, + }, + runtime_mappings: { + hour_of_day: { + type: "long", + script: { + source: "emit(doc['timestamp'].value.getHour());", + }, + }, + }, + datafeed_id: "datafeed-test-job1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e09d30195108bd6a1f6857394a6123ea.asciidoc b/docs/doc_examples/e09d30195108bd6a1f6857394a6123ea.asciidoc new file mode 100644 index 000000000..b6d3ff1e0 --- /dev/null +++ b/docs/doc_examples/e09d30195108bd6a1f6857394a6123ea.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["reverse"], + text: "quick fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc b/docs/doc_examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc new file mode 100644 index 000000000..69b1d067e --- /dev/null +++ b/docs/doc_examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + tokenizer: "standard", + filter: ["keyword_repeat", "stemmer", "remove_duplicates"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc b/docs/doc_examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc new file mode 100644 index 000000000..fa26514b1 --- /dev/null +++ b/docs/doc_examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + slice: { + id: 0, + max: 2, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); + +const response1 = await client.reindex({ + source: { + index: "my-index-000001", + slice: { + id: 1, + max: 2, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc b/docs/doc_examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc new file mode 100644 index 000000000..cc38f9896 --- /dev/null +++ b/docs/doc_examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + index: "index1,index2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc b/docs/doc_examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc new file mode 100644 index 000000000..552adb14c --- /dev/null +++ b/docs/doc_examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.fielddata({ + v: "true", + fields: "body", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0d4a800de2d8f4062e69433586c38db.asciidoc b/docs/doc_examples/e0d4a800de2d8f4062e69433586c38db.asciidoc new file mode 100644 index 000000000..ea1945982 --- /dev/null +++ b/docs/doc_examples/e0d4a800de2d8f4062e69433586c38db.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlCompleteLogout({ + realm: "saml1", + ids: ["_1c368075e0b3..."], + query_string: + "SAMLResponse=fZHLasMwEEVbfb1bf...&SigAlg=http%3A%2F%2Fwww.w3.org%2F2000%2F09%2Fxmldsig%23rsa-sha1&Signature=CuCmFn%2BLqnaZGZJqK...", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc b/docs/doc_examples/e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc deleted file mode 100644 index f3f0bb5cc..000000000 --- a/docs/doc_examples/e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'this is a test' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e0fcef99656799de6b88117d56f131e2.asciidoc b/docs/doc_examples/e0fcef99656799de6b88117d56f131e2.asciidoc new file mode 100644 index 000000000..03c15eafc --- /dev/null +++ b/docs/doc_examples/e0fcef99656799de6b88117d56f131e2.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.explain({ + index: "my-index-000001", + id: 0, + query: { + match: { + message: "elasticsearch", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc b/docs/doc_examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc new file mode 100644 index 000000000..6f338681d --- /dev/null +++ b/docs/doc_examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + top_tags: { + terms: { + field: "type", + size: 3, + }, + aggs: { + top_sales_hits: { + top_hits: { + sort: [ + { + date: { + order: "desc", + }, + }, + ], + _source: { + includes: ["date", "price"], + }, + size: 1, + }, + }, + "having.top_salary": { + bucket_selector: { + buckets_path: { + tp: "top_sales_hits[_source.price]", + }, + script: "params.tp < 180", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e12f2d2ddca387630e7855a6db952da2.asciidoc b/docs/doc_examples/e12f2d2ddca387630e7855a6db952da2.asciidoc new file mode 100644 index 000000000..5672d4c40 --- /dev/null +++ b/docs/doc_examples/e12f2d2ddca387630e7855a6db952da2.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + runtime_mappings: { + "price.euros": { + type: "double", + script: { + source: + "\n emit(doc['price'].value * params.conversion_rate)\n ", + params: { + conversion_rate: 0.835526591, + }, + }, + }, + }, + aggs: { + price_ranges: { + range: { + field: "price.euros", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc b/docs/doc_examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc new file mode 100644 index 000000000..44211f46b --- /dev/null +++ b/docs/doc_examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getToken({ + grant_type: "password", + username: "test_admin", + password: "x-pack-test-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc b/docs/doc_examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc new file mode 100644 index 000000000..627606897 --- /dev/null +++ b/docs/doc_examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "breaker", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e16a353e619b935c5c70769b1b9fa100.asciidoc b/docs/doc_examples/e16a353e619b935c5c70769b1b9fa100.asciidoc new file mode 100644 index 000000000..2e4b4802e --- /dev/null +++ b/docs/doc_examples/e16a353e619b935c5c70769b1b9fa100.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + tile: { + geotile_grid: { + field: "location", + precision: 8, + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc b/docs/doc_examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc new file mode 100644 index 000000000..ae53bc087 --- /dev/null +++ b/docs/doc_examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index2", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: { + type: "fvh", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc b/docs/doc_examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc new file mode 100644 index 000000000..75b53db1d --- /dev/null +++ b/docs/doc_examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + typed_keys: "true", + suggest: { + text: "some test mssage", + "my-first-suggester": { + term: { + field: "message", + }, + }, + "my-second-suggester": { + phrase: { + field: "message", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc b/docs/doc_examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc new file mode 100644 index 000000000..26bc23dc8 --- /dev/null +++ b/docs/doc_examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + my_date_histo: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + the_sum: { + sum: { + field: "lemmings", + }, + }, + the_deriv: { + derivative: { + buckets_path: "the_sum", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc b/docs/doc_examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc new file mode 100644 index 000000000..75701ade6 --- /dev/null +++ b/docs/doc_examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + bool: { + should: [ + { + percolate: { + field: "query", + document: { + message: "bonsai tree", + }, + name: "query1", + }, + }, + { + percolate: { + field: "query", + document: { + message: "tulip flower", + }, + name: "query2", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b9a153725b28fdd0a5aabd7f17a8c2d7.asciidoc b/docs/doc_examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc similarity index 73% rename from docs/doc_examples/b9a153725b28fdd0a5aabd7f17a8c2d7.asciidoc rename to docs/doc_examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc index 06989d62f..2d21ab128 100644 --- a/docs/doc_examples/b9a153725b28fdd0a5aabd7f17a8c2d7.asciidoc +++ b/docs/doc_examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.indices.getSettings({ - index: 'twitter', - flat_settings: 'true' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/36818c6d9f434d387819c30bd9addb14.asciidoc b/docs/doc_examples/e1f20ee96ce80edcc35b647cef731e15.asciidoc similarity index 53% rename from docs/doc_examples/36818c6d9f434d387819c30bd9addb14.asciidoc rename to docs/doc_examples/e1f20ee96ce80edcc35b647cef731e15.asciidoc index 5ee620974..89f63d5b9 100644 --- a/docs/doc_examples/36818c6d9f434d387819c30bd9addb14.asciidoc +++ b/docs/doc_examples/e1f20ee96ce80edcc35b647cef731e15.asciidoc @@ -4,13 +4,12 @@ [source, js] ---- const response = await client.index({ - index: 'twitter', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) + index: "my-index-000001", + id: "my_id", + pipeline: "user_lookup", + document: { + email: "mardy.brown@asciidocsmith.com", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc b/docs/doc_examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc new file mode 100644 index 000000000..29722ee03 --- /dev/null +++ b/docs/doc_examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "events", + size: 10, + sort: [ + { + timestamp: "desc", + }, + ], + track_total_hits: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc b/docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc new file mode 100644 index 000000000..3b4f9251b --- /dev/null +++ b/docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.blocks.read_only_allow_delete": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e26c96978096ccc592849cca9db67ffc.asciidoc b/docs/doc_examples/e26c96978096ccc592849cca9db67ffc.asciidoc new file mode 100644 index 000000000..f01887bd3 --- /dev/null +++ b/docs/doc_examples/e26c96978096ccc592849cca9db67ffc.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.requests.cache.enable": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc b/docs/doc_examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc new file mode 100644 index 000000000..ab6b687b0 --- /dev/null +++ b/docs/doc_examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc @@ -0,0 +1,90 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "sample_data", + mappings: { + properties: { + client_ip: { + type: "ip", + }, + message: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "sample_data", + operations: [ + { + index: {}, + }, + { + "@timestamp": "2023-10-23T12:15:03.360Z", + client_ip: "172.21.2.162", + message: "Connected to 10.1.0.3", + event_duration: 3450233, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T12:27:28.948Z", + client_ip: "172.21.2.113", + message: "Connected to 10.1.0.2", + event_duration: 2764889, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:33:34.937Z", + client_ip: "172.21.0.5", + message: "Disconnected", + event_duration: 1232382, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:51:54.732Z", + client_ip: "172.21.3.15", + message: "Connection error", + event_duration: 725448, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:52:55.015Z", + client_ip: "172.21.3.15", + message: "Connection error", + event_duration: 8268153, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:53:55.832Z", + client_ip: "172.21.3.15", + message: "Connection error", + event_duration: 5033755, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:55:01.543Z", + client_ip: "172.21.3.15", + message: "Connected to 10.1.0.1", + event_duration: 1756467, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc b/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc index 74cdf053b..c5ce59e19 100644 --- a/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc +++ b/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc @@ -4,20 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Will Smith', - type: 'best_fields', - fields: [ - 'first_name', - 'last_name' - ], - operator: 'and' - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Will Smith", + type: "best_fields", + fields: ["first_name", "last_name"], + operator: "and", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e273060a675c959fd5f3cde27c8aff07.asciidoc b/docs/doc_examples/e273060a675c959fd5f3cde27c8aff07.asciidoc new file mode 100644 index 000000000..fb7327c66 --- /dev/null +++ b/docs/doc_examples/e273060a675c959fd5f3cde27c8aff07.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + foo: { + type: "integer", + index: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc b/docs/doc_examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc new file mode 100644 index 000000000..22ace2fd1 --- /dev/null +++ b/docs/doc_examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + pagerank: { + type: "rank_feature", + }, + url_length: { + type: "rank_feature", + positive_score_impact: false, + }, + topics: { + type: "rank_features", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc b/docs/doc_examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc new file mode 100644 index 000000000..d08785c3f --- /dev/null +++ b/docs/doc_examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_version: { + type: "version", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2a042c629429855c3bcaefffb26b7fa.asciidoc b/docs/doc_examples/e2a042c629429855c3bcaefffb26b7fa.asciidoc deleted file mode 100644 index 9021864be..000000000 --- a/docs/doc_examples/e2a042c629429855c3bcaefffb26b7fa.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - date: { - type: 'date', - format: 'yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc b/docs/doc_examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc new file mode 100644 index 000000000..7ed084f76 --- /dev/null +++ b/docs/doc_examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + match: { + query: "salty", + filter: { + contained_by: { + match: { + query: "hot porridge", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2a753029b450942a3228e3003a55a7d.asciidoc b/docs/doc_examples/e2a753029b450942a3228e3003a55a7d.asciidoc new file mode 100644 index 000000000..e8feda761 --- /dev/null +++ b/docs/doc_examples/e2a753029b450942a3228e3003a55a7d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-weather-sensor-data-stream", + downsampling: [ + { + after: "1d", + fixed_interval: "10m", + }, + { + after: "7d", + fixed_interval: "1d", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc b/docs/doc_examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc new file mode 100644 index 000000000..498327da9 --- /dev/null +++ b/docs/doc_examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "elision_example", + settings: { + analysis: { + analyzer: { + whitespace_elision: { + tokenizer: "whitespace", + filter: ["elision"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc b/docs/doc_examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc new file mode 100644 index 000000000..fb0c3c9ac --- /dev/null +++ b/docs/doc_examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + range: { + "user.effective.date": { + gte: "{{date.min}}", + lte: "{{date.max}}", + format: + "{{#join delimiter='||'}}date.formats{{/join delimiter='||'}}", + }, + }, + }, + }, + params: { + date: { + min: "2098", + max: "06/05/2099", + formats: ["dd/MM/yyyy", "yyyy"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc b/docs/doc_examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc new file mode 100644 index 000000000..4f8760f0e --- /dev/null +++ b/docs/doc_examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.getSynonymsSets(); +console.log(response); +---- diff --git a/docs/doc_examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc b/docs/doc_examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc new file mode 100644 index 000000000..3da189a80 --- /dev/null +++ b/docs/doc_examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + percent_of_total_sales: { + normalize: { + buckets_path: "sales", + method: "percent_of_sum", + format: "00.00%", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc b/docs/doc_examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc new file mode 100644 index 000000000..12f8bfcc0 --- /dev/null +++ b/docs/doc_examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc new file mode 100644 index 000000000..39d1ae3a1 --- /dev/null +++ b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcPrepareAuthentication({}); +console.log(response); +---- diff --git a/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc b/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc index 6317ce7bb..28c54a237 100644 --- a/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc +++ b/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc @@ -4,18 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'this is a test', - fields: [ - 'subject^3', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "this is a test", + fields: ["subject^3", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e316271f668c9889bf548311fb421f1e.asciidoc b/docs/doc_examples/e316271f668c9889bf548311fb421f1e.asciidoc new file mode 100644 index 000000000..331097c1f --- /dev/null +++ b/docs/doc_examples/e316271f668c9889bf548311fb421f1e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + ip_addresses: { + terms: { + field: "destination_ip", + missing: "0.0.0.0", + value_type: "ip", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc b/docs/doc_examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc new file mode 100644 index 000000000..13b20ac31 --- /dev/null +++ b/docs/doc_examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "action.destructive_requires_name": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e324ea1547635180c31c1adf77870ba2.asciidoc b/docs/doc_examples/e324ea1547635180c31c1adf77870ba2.asciidoc new file mode 100644 index 000000000..6db19aaad --- /dev/null +++ b/docs/doc_examples/e324ea1547635180c31c1adf77870ba2.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "destination_template", + template: { + settings: { + index: { + number_of_replicas: 2, + number_of_shards: 2, + mode: "time_series", + routing_path: ["metricset"], + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + metricset: { + type: "keyword", + time_series_dimension: true, + }, + k8s: { + properties: { + tx: { + type: "long", + }, + rx: { + type: "long", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc b/docs/doc_examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc new file mode 100644 index 000000000..812f46fca --- /dev/null +++ b/docs/doc_examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_internal/prevalidate_node_removal", + querystring: { + names: "node1,node2", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc b/docs/doc_examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc new file mode 100644 index 000000000..ccdf39576 --- /dev/null +++ b/docs/doc_examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc @@ -0,0 +1,102 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Geopoint as an object using GeoJSON format", + location: { + type: "Point", + coordinates: [-71.34, 41.12], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "Geopoint as a WKT POINT primitive", + location: "POINT (-71.34 41.12)", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-000001", + id: 3, + document: { + text: "Geopoint as an object with 'lat' and 'lon' keys", + location: { + lat: 41.12, + lon: -71.34, + }, + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "my-index-000001", + id: 4, + document: { + text: "Geopoint as an array", + location: [-71.34, 41.12], + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "my-index-000001", + id: 5, + document: { + text: "Geopoint as a string", + location: "41.12,-71.34", + }, +}); +console.log(response5); + +const response6 = await client.index({ + index: "my-index-000001", + id: 6, + document: { + text: "Geopoint as a geohash", + location: "drm3btev3e86", + }, +}); +console.log(response6); + +const response7 = await client.search({ + index: "my-index-000001", + query: { + geo_bounding_box: { + location: { + top_left: { + lat: 42, + lon: -72, + }, + bottom_right: { + lat: 40, + lon: -74, + }, + }, + }, + }, +}); +console.log(response7); +---- diff --git a/docs/doc_examples/e3a6462ca79c101314da0680c97678cd.asciidoc b/docs/doc_examples/e3a6462ca79c101314da0680c97678cd.asciidoc new file mode 100644 index 000000000..5634f8005 --- /dev/null +++ b/docs/doc_examples/e3a6462ca79c101314da0680c97678cd.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: {}, + }, + script_fields: { + test1: { + script: { + lang: "painless", + source: "doc['price'].value * 2", + }, + }, + test2: { + script: { + lang: "painless", + source: "doc['price'].value * params.factor", + params: { + factor: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc b/docs/doc_examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc new file mode 100644 index 000000000..4e8f1c68e --- /dev/null +++ b/docs/doc_examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.store.preload": ["nvd", "dvd"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc b/docs/doc_examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc new file mode 100644 index 000000000..955bb8caf --- /dev/null +++ b/docs/doc_examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "GeometryCollection", + geometries: [ + { + type: "Point", + coordinates: [100, 0], + }, + { + type: "LineString", + coordinates: [ + [101, 0], + [102, 1], + ], + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4193867485595c9c92f909a052d2a90.asciidoc b/docs/doc_examples/e4193867485595c9c92f909a052d2a90.asciidoc new file mode 100644 index 000000000..ab10339a7 --- /dev/null +++ b/docs/doc_examples/e4193867485595c9c92f909a052d2a90.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-join-field": { + type: "join", + relations: { + parent: "child", + }, + }, + tag: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc b/docs/doc_examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc new file mode 100644 index 000000000..533bee458 --- /dev/null +++ b/docs/doc_examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + numeric_detection: true, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_float: "1.0", + my_integer: "1", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc b/docs/doc_examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc new file mode 100644 index 000000000..d8168a881 --- /dev/null +++ b/docs/doc_examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + statistics: { + matrix_stats: { + fields: ["poverty", "income"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e451900efbd8be50c2b8347a83816aa6.asciidoc b/docs/doc_examples/e451900efbd8be50c2b8347a83816aa6.asciidoc new file mode 100644 index 000000000..d4aa9c554 --- /dev/null +++ b/docs/doc_examples/e451900efbd8be50c2b8347a83816aa6.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + stats_monthly_sales: { + extended_stats_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e46c83db1580e14be844079cd008f518.asciidoc b/docs/doc_examples/e46c83db1580e14be844079cd008f518.asciidoc new file mode 100644 index 000000000..9e529c789 --- /dev/null +++ b/docs/doc_examples/e46c83db1580e14be844079cd008f518.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.enable": "all", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc b/docs/doc_examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc new file mode 100644 index 000000000..a75f8d567 --- /dev/null +++ b/docs/doc_examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Index the ingest timestamp as 'event.ingested'", + field: "event.ingested", + value: "{{{_ingest.timestamp}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc b/docs/doc_examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc new file mode 100644 index 000000000..41a817618 --- /dev/null +++ b/docs/doc_examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getOverallBuckets({ + job_id: "job-*", + overall_score: 80, + start: 1403532000000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e494162e83ce041c56b2e2bc29d33474.asciidoc b/docs/doc_examples/e494162e83ce041c56b2e2bc29d33474.asciidoc new file mode 100644 index 000000000..c6ab0c547 --- /dev/null +++ b/docs/doc_examples/e494162e83ce041c56b2e2bc29d33474.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid with maxspan=1h\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n until [ process where event.type == "termination" ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc b/docs/doc_examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc new file mode 100644 index 000000000..7b6bdf9f7 --- /dev/null +++ b/docs/doc_examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 4, + refresh: "wait_for", + document: { + test: "test", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc b/docs/doc_examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc new file mode 100644 index 000000000..75685cab4 --- /dev/null +++ b/docs/doc_examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "data_log", + source: { + index: "kibana_sample_data_logs", + }, + dest: { + index: "data-logs-by-client", + }, + pivot: { + group_by: { + "machine.os": { + terms: { + field: "machine.os.keyword", + }, + }, + "machine.ip": { + terms: { + field: "clientip", + }, + }, + }, + aggregations: { + "time_frame.lte": { + max: { + field: "timestamp", + }, + }, + "time_frame.gte": { + min: { + field: "timestamp", + }, + }, + time_length: { + bucket_script: { + buckets_path: { + min: "time_frame.gte.value", + max: "time_frame.lte.value", + }, + script: "params.max - params.min", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc b/docs/doc_examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc new file mode 100644 index 000000000..bdbb31513 --- /dev/null +++ b/docs/doc_examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.promoteDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc b/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc index bb1a7d6bb..3fdbb2aa8 100644 --- a/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc +++ b/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.indices.putMapping({ - index: 'publications', - body: { - properties: { - title: { - type: 'text' - } - } - } -}) -console.log(response) + index: "publications", + properties: { + title: { + type: "text", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc b/docs/doc_examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc new file mode 100644 index 000000000..246793183 --- /dev/null +++ b/docs/doc_examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + human: "true", + detailed: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4de6035653e8202c43631f02d244661.asciidoc b/docs/doc_examples/e4de6035653e8202c43631f02d244661.asciidoc new file mode 100644 index 000000000..23f0dd981 --- /dev/null +++ b/docs/doc_examples/e4de6035653e8202c43631f02d244661.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cluster_one:my-index-000001", + size: 1, + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc b/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc new file mode 100644 index 000000000..a73d17637 --- /dev/null +++ b/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryUser({}); +console.log(response); +---- diff --git a/docs/doc_examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc b/docs/doc_examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc new file mode 100644 index 000000000..d09d2747f --- /dev/null +++ b/docs/doc_examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e551ea38a2d8f8deac110b33304200cc.asciidoc b/docs/doc_examples/e551ea38a2d8f8deac110b33304200cc.asciidoc new file mode 100644 index 000000000..362c609dc --- /dev/null +++ b/docs/doc_examples/e551ea38a2d8f8deac110b33304200cc.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + multi_match: { + fields: ["title", "content"], + query: "the quick brown fox", + }, + }, + rescore: { + learning_to_rank: { + model_id: "ltr-model", + params: { + query_text: "the quick brown fox", + }, + }, + window_size: 100, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e566e898902e432bc7ea0568400f0c50.asciidoc b/docs/doc_examples/e566e898902e432bc7ea0568400f0c50.asciidoc new file mode 100644 index 000000000..8a135621a --- /dev/null +++ b/docs/doc_examples/e566e898902e432bc7ea0568400f0c50.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + ip: { + type: "ip", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + ip: [ + "192.168.0.1", + "192.168.0.1", + "10.10.12.123", + "2001:db8::1:0:0:1", + "::afff:4567:890a", + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e567e6dbf86300142573c73789c8fce4.asciidoc b/docs/doc_examples/e567e6dbf86300142573c73789c8fce4.asciidoc deleted file mode 100644 index a59112030..000000000 --- a/docs/doc_examples/e567e6dbf86300142573c73789c8fce4.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'new_twitter', - size: '0', - filter_path: 'hits.total' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e586d1d2a997133e039fd352a42a72b3.asciidoc b/docs/doc_examples/e586d1d2a997133e039fd352a42a72b3.asciidoc new file mode 100644 index 000000000..9f0f29ea4 --- /dev/null +++ b/docs/doc_examples/e586d1d2a997133e039fd352a42a72b3.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "job-candidates", + query: { + terms_set: { + programming_languages: { + terms: ["c++", "java", "php"], + minimum_should_match_field: "required_matches", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e58833449d01379df20ad06dc28144d8.asciidoc b/docs/doc_examples/e58833449d01379df20ad06dc28144d8.asciidoc new file mode 100644 index 000000000..93af1e480 --- /dev/null +++ b/docs/doc_examples/e58833449d01379df20ad06dc28144d8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + conflicts: "proceed", + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc b/docs/doc_examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc new file mode 100644 index 000000000..ded25932b --- /dev/null +++ b/docs/doc_examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.routing.allocation.enable", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc b/docs/doc_examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc new file mode 100644 index 000000000..15f694447 --- /dev/null +++ b/docs/doc_examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.recovery.max_bytes_per_sec": "50mb", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e5c710b08a545522d50b4ce35503bc46.asciidoc b/docs/doc_examples/e5c710b08a545522d50b4ce35503bc46.asciidoc new file mode 100644 index 000000000..fa2981ef5 --- /dev/null +++ b/docs/doc_examples/e5c710b08a545522d50b4ce35503bc46.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-data-stream", + pipeline: "my-pipeline", + document: { + "@timestamp": "2099-03-07T11:04:05.000Z", + "my-keyword-field": "foo", + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-data-stream", + pipeline: "my-pipeline", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-03-07T11:04:06.000Z", + "my-keyword-field": "foo", + }, + { + create: {}, + }, + { + "@timestamp": "2099-03-07T11:04:07.000Z", + "my-keyword-field": "bar", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e5d2172b524332196cac0f031c043659.asciidoc b/docs/doc_examples/e5d2172b524332196cac0f031c043659.asciidoc deleted file mode 100644 index eae6cccc9..000000000 --- a/docs/doc_examples/e5d2172b524332196cac0f031c043659.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'twitter', - body: { - settings: { - index: { - number_of_shards: 3, - number_of_replicas: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc b/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc index 1fa83ccb6..cf38e87ae 100644 --- a/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc +++ b/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc @@ -4,31 +4,25 @@ [source, js] ---- const response = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - 'te*', - 'bar*' - ], - settings: { - number_of_shards: 1 + name: "template_1", + index_patterns: ["te*", "bar*"], + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: false, }, - mappings: { - _source: { - enabled: false + properties: { + host_name: { + type: "keyword", }, - properties: { - host_name: { - type: 'keyword' - }, - created_at: { - type: 'date', - format: 'EEE MMM dd HH:mm:ss Z yyyy' - } - } - } - } -}) -console.log(response) + created_at: { + type: "date", + format: "EEE MMM dd HH:mm:ss Z yyyy", + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc b/docs/doc_examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc new file mode 100644 index 000000000..74dcb596e --- /dev/null +++ b/docs/doc_examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 0, + _source_includes: "*.id", + _source_excludes: "entities", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc b/docs/doc_examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc new file mode 100644 index 000000000..48076f9c2 --- /dev/null +++ b/docs/doc_examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": "all", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc b/docs/doc_examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc new file mode 100644 index 000000000..08f05042e --- /dev/null +++ b/docs/doc_examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc b/docs/doc_examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc new file mode 100644 index 000000000..e6c658b67 --- /dev/null +++ b/docs/doc_examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping3", + roles: ["ldap-user"], + enabled: true, + rules: { + field: { + "realm.name": "ldap1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc b/docs/doc_examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc new file mode 100644 index 000000000..2ff61b02f --- /dev/null +++ b/docs/doc_examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + more_like_this: { + fields: ["title", "description"], + like: "Once upon a time", + min_term_freq: 1, + max_query_terms: 12, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc b/docs/doc_examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc new file mode 100644 index 000000000..eb8c44fbb --- /dev/null +++ b/docs/doc_examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + products: { + terms: { + field: "product", + size: 5, + show_term_doc_count_error: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e619e896ce3dad9dcfc6f8700438be98.asciidoc b/docs/doc_examples/e619e896ce3dad9dcfc6f8700438be98.asciidoc new file mode 100644 index 000000000..7ca935056 --- /dev/null +++ b/docs/doc_examples/e619e896ce3dad9dcfc6f8700438be98.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_query_rules/my-ruleset", + body: { + rules: [ + { + rule_id: "my-rule1", + type: "pinned", + criteria: [ + { + type: "contains", + metadata: "user_query", + values: ["pugs", "puggles"], + }, + { + type: "exact", + metadata: "user_country", + values: ["us"], + }, + ], + actions: { + ids: ["id1", "id2"], + }, + }, + { + rule_id: "my-rule2", + type: "pinned", + criteria: [ + { + type: "fuzzy", + metadata: "user_query", + values: ["rescue dogs"], + }, + ], + actions: { + docs: [ + { + _index: "index1", + _id: "id3", + }, + { + _index: "index2", + _id: "id4", + }, + ], + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc b/docs/doc_examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc new file mode 100644 index 000000000..272176613 --- /dev/null +++ b/docs/doc_examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putCalendar({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6369e7cef82d881af593d5526bf79bd.asciidoc b/docs/doc_examples/e6369e7cef82d881af593d5526bf79bd.asciidoc new file mode 100644 index 000000000..38d057377 --- /dev/null +++ b/docs/doc_examples/e6369e7cef82d881af593d5526bf79bd.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_term: { + "user.id": { + value: "kimchy", + boost: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc b/docs/doc_examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc new file mode 100644 index 000000000..27156eddb --- /dev/null +++ b/docs/doc_examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.health({ + index: "my-index-000001", + level: "shards", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e63cf08350e9381f519c2835843be7cd.asciidoc b/docs/doc_examples/e63cf08350e9381f519c2835843be7cd.asciidoc new file mode 100644 index 000000000..492ee50b1 --- /dev/null +++ b/docs/doc_examples/e63cf08350e9381f519c2835843be7cd.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_date_formats: ["yyyy/MM||MM/dd/yyyy"], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "09/25/2015", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc b/docs/doc_examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc new file mode 100644 index 000000000..3bdd9984b --- /dev/null +++ b/docs/doc_examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "ingest.geoip.downloader.enabled": true, + "indices.lifecycle.history_index_enabled": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc b/docs/doc_examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc new file mode 100644 index 000000000..33c7ef397 --- /dev/null +++ b/docs/doc_examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + version_type: "external", + }, + script: { + source: + "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", + lang: "painless", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc b/docs/doc_examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc new file mode 100644 index 000000000..aee888618 --- /dev/null +++ b/docs/doc_examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC", + filter: { + range: { + page_count: { + gte: 100, + lte: 200, + }, + }, + }, + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc b/docs/doc_examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc new file mode 100644 index 000000000..0e09b23e2 --- /dev/null +++ b/docs/doc_examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + date_histogram: { + field: "date", + calendar_interval: "day", + time_zone: "-01:00", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc b/docs/doc_examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc new file mode 100644 index 000000000..0a8f5ebdc --- /dev/null +++ b/docs/doc_examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias(); +console.log(response); +---- diff --git a/docs/doc_examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc b/docs/doc_examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc new file mode 100644 index 000000000..673ac434a --- /dev/null +++ b/docs/doc_examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc b/docs/doc_examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc new file mode 100644 index 000000000..4868fe215 --- /dev/null +++ b/docs/doc_examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Use geo_point dynamic template for address field", + field: "_dynamic_templates", + value: { + address: "geo_point", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc b/docs/doc_examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc new file mode 100644 index 000000000..5099b0b3e --- /dev/null +++ b/docs/doc_examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + highlight: { + fields: [ + { + title: {}, + }, + { + text: {}, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc b/docs/doc_examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc new file mode 100644 index 000000000..3dd4b3ef8 --- /dev/null +++ b/docs/doc_examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.executeRetention(); +console.log(response); +---- diff --git a/docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc b/docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc new file mode 100644 index 000000000..97615c4ce --- /dev/null +++ b/docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + any_of: { + intervals: [ + { + match: { + query: "the big bad wolf", + ordered: true, + max_gaps: 0, + }, + }, + { + match: { + query: "the big wolf", + ordered: true, + max_gaps: 0, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e784fc00894635470adfd78a0c46b427.asciidoc b/docs/doc_examples/e784fc00894635470adfd78a0c46b427.asciidoc new file mode 100644 index 000000000..6572b4a1f --- /dev/null +++ b/docs/doc_examples/e784fc00894635470adfd78a0c46b427.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: false, + }, + properties: { + host_name: { + type: "keyword", + }, + created_at: { + type: "date", + format: "EEE MMM dd HH:mm:ss Z yyyy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7d819634d765cde269e2669e2dc677f.asciidoc b/docs/doc_examples/e7d819634d765cde269e2669e2dc677f.asciidoc new file mode 100644 index 000000000..8dbb9e88a --- /dev/null +++ b/docs/doc_examples/e7d819634d765cde269e2669e2dc677f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + username: "myuser", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7e95022867c72a6563137f066dd2973.asciidoc b/docs/doc_examples/e7e95022867c72a6563137f066dd2973.asciidoc new file mode 100644 index 000000000..3ff6b6cac --- /dev/null +++ b/docs/doc_examples/e7e95022867c72a6563137f066dd2973.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + hotspots: { + geohash_grid: { + field: "location", + precision: 5, + }, + aggs: { + significant_crime_types: { + significant_terms: { + field: "crime_type", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc b/docs/doc_examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc new file mode 100644 index 000000000..9ce8d6c4b --- /dev/null +++ b/docs/doc_examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + types_count: { + value_count: { + field: "type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc b/docs/doc_examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc new file mode 100644 index 000000000..eb43ead35 --- /dev/null +++ b/docs/doc_examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "house_price_regression_analysis", + source: { + index: "houses_sold_last_10_yrs", + }, + dest: { + index: "house_price_predictions", + }, + analysis: { + regression: { + dependent_variable: "price", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e821d27a8b810821707ba860e31f8b78.asciidoc b/docs/doc_examples/e821d27a8b810821707ba860e31f8b78.asciidoc new file mode 100644 index 000000000..941d4af41 --- /dev/null +++ b/docs/doc_examples/e821d27a8b810821707ba860e31f8b78.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + city: { + type: "text", + fields: { + raw: { + type: "keyword", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc b/docs/doc_examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc new file mode 100644 index 000000000..b4e52ceb7 --- /dev/null +++ b/docs/doc_examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.getWatch({ + id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e82c33def91faddcfeed7b02cd258605.asciidoc b/docs/doc_examples/e82c33def91faddcfeed7b02cd258605.asciidoc new file mode 100644 index 000000000..610783b30 --- /dev/null +++ b/docs/doc_examples/e82c33def91faddcfeed7b02cd258605.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre", + }, + { + field: "product", + missing: "Product Z", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc b/docs/doc_examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc new file mode 100644 index 000000000..219d478d6 --- /dev/null +++ b/docs/doc_examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + aliases: { + alias_1: {}, + alias_2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc b/docs/doc_examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc new file mode 100644 index 000000000..b5fa22aa5 --- /dev/null +++ b/docs/doc_examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + filter_path: "**.xpack.profiling.templates.enabled", + include_defaults: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e891e1d4805172da45a81f62b6b44aca.asciidoc b/docs/doc_examples/e891e1d4805172da45a81f62b6b44aca.asciidoc new file mode 100644 index 000000000..d35dec2fa --- /dev/null +++ b/docs/doc_examples/e891e1d4805172da45a81f62b6b44aca.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + runtime_mappings: { + normalized_genre: { + type: "keyword", + script: + "\n String genre = doc['genre'].value;\n if (doc['product'].value.startsWith('Anthology')) {\n emit(genre + ' anthology');\n } else {\n emit(genre);\n }\n ", + }, + }, + aggs: { + genres: { + terms: { + field: "normalized_genre", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc b/docs/doc_examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc new file mode 100644 index 000000000..fac95eaa4 --- /dev/null +++ b/docs/doc_examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + log: { + scaling_factor: 4, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8a2726eea5545355d1d0835d4599f55.asciidoc b/docs/doc_examples/e8a2726eea5545355d1d0835d4599f55.asciidoc new file mode 100644 index 000000000..19dd77247 --- /dev/null +++ b/docs/doc_examples/e8a2726eea5545355d1d0835d4599f55.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + term: { + ip_addr: "2001:db8::/48", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc b/docs/doc_examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc new file mode 100644 index 000000000..0cebca89a --- /dev/null +++ b/docs/doc_examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:kibana OR tags:javascript", + }, + }, + aggs: { + sample: { + sampler: { + shard_size: 200, + }, + aggs: { + keywords: { + significant_terms: { + field: "tags", + exclude: ["kibana", "javascript"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc b/docs/doc_examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc new file mode 100644 index 000000000..50aa4b1e8 --- /dev/null +++ b/docs/doc_examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchShards({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc b/docs/doc_examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc new file mode 100644 index 000000000..9bb62d070 --- /dev/null +++ b/docs/doc_examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + exists: { + field: "user", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8e451bc8c45bcf16df43804c4fc8329.asciidoc b/docs/doc_examples/e8e451bc8c45bcf16df43804c4fc8329.asciidoc deleted file mode 100644 index 84fe964c2..000000000 --- a/docs/doc_examples/e8e451bc8c45bcf16df43804c4fc8329.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - track_scores: true, - sort: [ - { - post_date: { - order: 'desc' - } - }, - { - name: 'desc' - }, - { - age: 'desc' - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc b/docs/doc_examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc new file mode 100644 index 000000000..271527227 --- /dev/null +++ b/docs/doc_examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "Will Smith", + fields: ["title", "*_name"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc b/docs/doc_examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc new file mode 100644 index 000000000..0ec3637d4 --- /dev/null +++ b/docs/doc_examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCategories({ + job_id: "esxi_log", + page: { + size: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e905543b281e9c41395304da76ed2ea3.asciidoc b/docs/doc_examples/e905543b281e9c41395304da76ed2ea3.asciidoc new file mode 100644 index 000000000..188e1d435 --- /dev/null +++ b/docs/doc_examples/e905543b281e9c41395304da76ed2ea3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: ".watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc b/docs/doc_examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc new file mode 100644 index 000000000..6b81643ec --- /dev/null +++ b/docs/doc_examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "openai-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1536, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc b/docs/doc_examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc new file mode 100644 index 000000000..0e82192f7 --- /dev/null +++ b/docs/doc_examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + pre_tags: [""], + post_tags: [""], + fields: { + body: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e944653610f311fa06148d5b0afdf697.asciidoc b/docs/doc_examples/e944653610f311fa06148d5b0afdf697.asciidoc new file mode 100644 index 000000000..4982ab849 --- /dev/null +++ b/docs/doc_examples/e944653610f311fa06148d5b0afdf697.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "component_template1", + template: { + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "runtime_component_template", + template: { + mappings: { + runtime: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e95ba581b298cd7bb598374afbfed315.asciidoc b/docs/doc_examples/e95ba581b298cd7bb598374afbfed315.asciidoc new file mode 100644 index 000000000..99b9c40c9 --- /dev/null +++ b/docs/doc_examples/e95ba581b298cd7bb598374afbfed315.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e95e61988dc3073a007f7b7445dd233b.asciidoc b/docs/doc_examples/e95e61988dc3073a007f7b7445dd233b.asciidoc new file mode 100644 index 000000000..c9aaddee0 --- /dev/null +++ b/docs/doc_examples/e95e61988dc3073a007f7b7445dd233b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "dsl-data-stream-template", + index_patterns: ["dsl-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "index.lifecycle.name": "pre-dsl-ilm-policy", + "index.lifecycle.prefer_ilm": false, + }, + lifecycle: { + data_retention: "7d", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9738fe09a99080506a07945795e8eda.asciidoc b/docs/doc_examples/e9738fe09a99080506a07945795e8eda.asciidoc new file mode 100644 index 000000000..9b7a8182a --- /dev/null +++ b/docs/doc_examples/e9738fe09a99080506a07945795e8eda.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["stop"], + text: "a quick fox jumps over the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc b/docs/doc_examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc new file mode 100644 index 000000000..1f5b22c33 --- /dev/null +++ b/docs/doc_examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + content: "Bird flu", + }, + }, + aggregations: { + my_sample: { + sampler: { + shard_size: 100, + }, + aggregations: { + keywords: { + significant_text: { + field: "content", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9a0b450af6219772631703d602c7092.asciidoc b/docs/doc_examples/e9a0b450af6219772631703d602c7092.asciidoc new file mode 100644 index 000000000..4073cd41b --- /dev/null +++ b/docs/doc_examples/e9a0b450af6219772631703d602c7092.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + text_expansion: { + "ml.tokens": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + }, + rescore: { + window_size: 100, + query: { + rescore_query: { + text_expansion: { + "ml.tokens": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9c2e15b36372d5281c879d336322b6c.asciidoc b/docs/doc_examples/e9c2e15b36372d5281c879d336322b6c.asciidoc deleted file mode 100644 index 5c3a8c543..000000000 --- a/docs/doc_examples/e9c2e15b36372d5281c879d336322b6c.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'twitter', - _source: [ - 'user', - '_doc' - ] - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e9f9e184499a793828233e536fac0487.asciidoc b/docs/doc_examples/e9f9e184499a793828233e536fac0487.asciidoc new file mode 100644 index 000000000..a8dbb9fcf --- /dev/null +++ b/docs/doc_examples/e9f9e184499a793828233e536fac0487.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + scroll_size: 5000, + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc b/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc new file mode 100644 index 000000000..3638bed6c --- /dev/null +++ b/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/completion/google_ai_studio_completion", + body: { + service: "googleaistudio", + service_settings: { + api_key: "", + model_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc b/docs/doc_examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc new file mode 100644 index 000000000..62c0a1363 --- /dev/null +++ b/docs/doc_examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "child_example", + id: 2, + routing: 1, + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Sam", + id: 48, + }, + body: "Unfortunately you're pretty much limited to FTP...", + creation_date: "2009-05-04T13:45:37.030", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "child_example", + id: 3, + routing: 1, + refresh: "true", + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Troll", + id: 49, + }, + body: "Use Linux...", + creation_date: "2009-05-05T13:45:37.030", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e9fe608f105d7e3268a15e409e2cb9ab.asciidoc b/docs/doc_examples/e9fe608f105d7e3268a15e409e2cb9ab.asciidoc deleted file mode 100644 index fc175a567..000000000 --- a/docs/doc_examples/e9fe608f105d7e3268a15e409e2cb9ab.asciidoc +++ /dev/null @@ -1,71 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'metrics_index', - id: '1', - body: { - 'network.name': 'net-1', - latency_histo: { - values: [ - 0.1, - 0.2, - 0.3, - 0.4, - 0.5 - ], - counts: [ - 3, - 7, - 23, - 12, - 6 - ] - } - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'metrics_index', - id: '2', - body: { - 'network.name': 'net-2', - latency_histo: { - values: [ - 0.1, - 0.2, - 0.3, - 0.4, - 0.5 - ], - counts: [ - 8, - 17, - 8, - 7, - 6 - ] - } - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'metrics_index', - size: '0', - body: { - aggs: { - total_requests: { - value_count: { - field: 'latency_histo' - } - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/ea020ea32d5cd35e577c61a120f92451.asciidoc b/docs/doc_examples/ea020ea32d5cd35e577c61a120f92451.asciidoc new file mode 100644 index 000000000..bfd1e2c92 --- /dev/null +++ b/docs/doc_examples/ea020ea32d5cd35e577c61a120f92451.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:25:42.000Z", + message: + '192.0.2.255 - - [06/May/2099:16:25:42 +0000] "GET /favicon.ico HTTP/1.0" 200 3638', + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-data-stream", + document: { + "@timestamp": "2099-05-06T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ea02de2dbe05091fcb0dac72c8ba5f83.asciidoc b/docs/doc_examples/ea02de2dbe05091fcb0dac72c8ba5f83.asciidoc deleted file mode 100644 index e3ba35c84..000000000 --- a/docs/doc_examples/ea02de2dbe05091fcb0dac72c8ba5f83.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.updateByQuery({ - index: 'twitter', - refresh: true, - slices: '5', - body: { - script: { - source: "ctx._source['extra'] = 'test'" - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc b/docs/doc_examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc new file mode 100644 index 000000000..b00988659 --- /dev/null +++ b/docs/doc_examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + parent_id: { + type: "my-child", + id: "1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc b/docs/doc_examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc new file mode 100644 index 000000000..d7a847278 --- /dev/null +++ b/docs/doc_examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + city: "madrid", + }, + }, + aggs: { + tags: { + significant_terms: { + field: "tag", + background_filter: { + term: { + text: "spain", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/42744a175125df5be0ef77413bf8f608.asciidoc b/docs/doc_examples/ea5391267ced860c00214c096e08c8d4.asciidoc similarity index 65% rename from docs/doc_examples/42744a175125df5be0ef77413bf8f608.asciidoc rename to docs/doc_examples/ea5391267ced860c00214c096e08c8d4.asciidoc index 6fc780ecc..fda39b10d 100644 --- a/docs/doc_examples/42744a175125df5be0ef77413bf8f608.asciidoc +++ b/docs/doc_examples/ea5391267ced860c00214c096e08c8d4.asciidoc @@ -4,13 +4,12 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: 'twitter', - body: { + index: "my-index-000001", + settings: { index: { - refresh_interval: null - } - } -}) -console.log(response) + number_of_replicas: 2, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc b/docs/doc_examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc new file mode 100644 index 000000000..e7e8a4292 --- /dev/null +++ b/docs/doc_examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: { + lat: 40.73, + lon: -74.1, + }, + bottom_right: { + lat: 40.01, + lon: -71.12, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc b/docs/doc_examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc new file mode 100644 index 000000000..83955e4ec --- /dev/null +++ b/docs/doc_examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + pagerank: { + type: "rank_feature", + }, + url_length: { + type: "rank_feature", + positive_score_impact: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + pagerank: 8, + url_length: 22, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + rank_feature: { + field: "pagerank", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ea66a620c23337545e409c120c4ed5d9.asciidoc b/docs/doc_examples/ea66a620c23337545e409c120c4ed5d9.asciidoc new file mode 100644 index 000000000..115c615f8 --- /dev/null +++ b/docs/doc_examples/ea66a620c23337545e409c120c4ed5d9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: ".ds-timeseries-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea68e3428cc2ca3455bf312d09451489.asciidoc b/docs/doc_examples/ea68e3428cc2ca3455bf312d09451489.asciidoc new file mode 100644 index 000000000..aa41e2ba4 --- /dev/null +++ b/docs/doc_examples/ea68e3428cc2ca3455bf312d09451489.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "product-index", + mappings: { + properties: { + "product-vector": { + type: "dense_vector", + dims: 5, + index: false, + }, + price: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc b/docs/doc_examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc new file mode 100644 index 000000000..115a9adf0 --- /dev/null +++ b/docs/doc_examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "length_example", + settings: { + analysis: { + analyzer: { + standard_length: { + tokenizer: "standard", + filter: ["length"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea92390651e8ecad0c890658985343c5.asciidoc b/docs/doc_examples/ea92390651e8ecad0c890658985343c5.asciidoc new file mode 100644 index 000000000..e71ac87a0 --- /dev/null +++ b/docs/doc_examples/ea92390651e8ecad0c890658985343c5.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "hourly-snapshots", + name: "", + schedule: "0 0 * * * ?", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "1d", + min_count: 1, + max_count: 24, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eab3cad0257c539c5efd2689aa52f242.asciidoc b/docs/doc_examples/eab3cad0257c539c5efd2689aa52f242.asciidoc new file mode 100644 index 000000000..32089e3ef --- /dev/null +++ b/docs/doc_examples/eab3cad0257c539c5efd2689aa52f242.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "my-data-stream", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc b/docs/doc_examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc new file mode 100644 index 000000000..f5decb0b8 --- /dev/null +++ b/docs/doc_examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + comment: { + query: "foo bar", + }, + }, + }, + rescore: { + window_size: 50, + query: { + rescore_query: { + match_phrase: { + comment: { + query: "foo bar", + slop: 1, + }, + }, + }, + rescore_query_weight: 10, + }, + }, + _source: false, + highlight: { + order: "score", + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + highlight_query: { + bool: { + must: { + match: { + comment: { + query: "foo bar", + }, + }, + }, + should: { + match_phrase: { + comment: { + query: "foo bar", + slop: 1, + boost: 10, + }, + }, + }, + minimum_should_match: 0, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc b/docs/doc_examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc new file mode 100644 index 000000000..2a73cee7d --- /dev/null +++ b/docs/doc_examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.deleteVotingConfigExclusions(); +console.log(response); + +const response1 = await client.cluster.deleteVotingConfigExclusions({ + wait_for_removal: "false", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc b/docs/doc_examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc new file mode 100644 index 000000000..bc3b41783 --- /dev/null +++ b/docs/doc_examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "metrics_index", + id: 1, + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 2, + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "metrics_index", + size: 0, + aggs: { + total_requests: { + value_count: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/eae8931d01b3b878dd0c45214121e662.asciidoc b/docs/doc_examples/eae8931d01b3b878dd0c45214121e662.asciidoc new file mode 100644 index 000000000..f36471ccb --- /dev/null +++ b/docs/doc_examples/eae8931d01b3b878dd0c45214121e662.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_bounding_box: { + "pin.location": { + top_left: "dr", + bottom_right: "dr", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc b/docs/doc_examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc new file mode 100644 index 000000000..400427410 --- /dev/null +++ b/docs/doc_examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sale_type: { + terms: { + field: "type", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + hat_vs_bag_ratio: { + bucket_script: { + buckets_path: { + hats: "sale_type['hat']>sales", + bags: "sale_type['bag']>sales", + }, + script: "params.hats / params.bags", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc b/docs/doc_examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc new file mode 100644 index 000000000..8f88b4f8d --- /dev/null +++ b/docs/doc_examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.lifecycle.name": "my_policy", + "index.lifecycle.rollover_alias": "my_data", + }, + aliases: { + my_data: { + is_write_index: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eafdabe80b21b90495555fa6d9089412.asciidoc b/docs/doc_examples/eafdabe80b21b90495555fa6d9089412.asciidoc new file mode 100644 index 000000000..fd4633709 --- /dev/null +++ b/docs/doc_examples/eafdabe80b21b90495555fa6d9089412.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedServiceTokens({ + namespace: "elastic", + service: "fleet-server", + name: "token1,token2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc b/docs/doc_examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc new file mode 100644 index 000000000..56ae5af9d --- /dev/null +++ b/docs/doc_examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "place", + id: 1, + document: { + suggest: { + input: "timmy's", + contexts: { + location: [ + { + lat: 43.6624803, + lon: -79.3863353, + }, + { + lat: 43.6624718, + lon: -79.3873227, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc b/docs/doc_examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc new file mode 100644 index 000000000..fea94f9a6 --- /dev/null +++ b/docs/doc_examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: "ctx._source.new_field = 'value_of_new_field'", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc b/docs/doc_examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc new file mode 100644 index 000000000..954f2ae31 --- /dev/null +++ b/docs/doc_examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + client_ip: { + type: "ip", + script: { + source: + 'String m = doc["message"].value; int end = m.indexOf(" "); emit(m.substring(0, end));', + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb4e43b47867b54214a8630172dd0e21.asciidoc b/docs/doc_examples/eb4e43b47867b54214a8630172dd0e21.asciidoc new file mode 100644 index 000000000..3caf88096 --- /dev/null +++ b/docs/doc_examples/eb4e43b47867b54214a8630172dd0e21.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteForecast({ + job_id: "total-requests", + forecast_id: "_all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc b/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc new file mode 100644 index 000000000..87276bbd2 --- /dev/null +++ b/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector", + querystring: { + service_type: "sharepoint_online,google_drive", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc b/docs/doc_examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc new file mode 100644 index 000000000..adf2ca795 --- /dev/null +++ b/docs/doc_examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + forcemerge: { + max_num_segments: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb5987b58dae90c3a8a1609410be0570.asciidoc b/docs/doc_examples/eb5987b58dae90c3a8a1609410be0570.asciidoc new file mode 100644 index 000000000..3d37bbfbb --- /dev/null +++ b/docs/doc_examples/eb5987b58dae90c3a8a1609410be0570.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "indonesian_example", + settings: { + analysis: { + filter: { + indonesian_stop: { + type: "stop", + stopwords: "_indonesian_", + }, + indonesian_keywords: { + type: "keyword_marker", + keywords: ["contoh"], + }, + indonesian_stemmer: { + type: "stemmer", + language: "indonesian", + }, + }, + analyzer: { + rebuilt_indonesian: { + tokenizer: "standard", + filter: [ + "lowercase", + "indonesian_stop", + "indonesian_keywords", + "indonesian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc b/docs/doc_examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc new file mode 100644 index 000000000..45a7c10da --- /dev/null +++ b/docs/doc_examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "obel prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + direct_generator: [ + { + field: "title.trigram", + suggest_mode: "always", + }, + { + field: "title.reverse", + suggest_mode: "always", + pre_filter: "reverse", + post_filter: "reverse", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc b/docs/doc_examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc new file mode 100644 index 000000000..7a8337ffa --- /dev/null +++ b/docs/doc_examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*", + size: 2, + sort: "name", + after: "c25hcHNob3RfMixteV9yZXBvc2l0b3J5LHNuYXBzaG90XzI=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc b/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc new file mode 100644 index 000000000..236d76185 --- /dev/null +++ b/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_scheduling", + body: { + scheduling: { + full: { + enabled: true, + interval: "0 10 0 * * ?", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc b/docs/doc_examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc new file mode 100644 index 000000000..9c53844b2 --- /dev/null +++ b/docs/doc_examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + number_of_routing_shards: 30, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc b/docs/doc_examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc new file mode 100644 index 000000000..71d69b8bd --- /dev/null +++ b/docs/doc_examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deletePrivileges({ + application: "myapp", + name: "read", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc b/docs/doc_examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc new file mode 100644 index 000000000..5a6237882 --- /dev/null +++ b/docs/doc_examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.removePolicy({ + index: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc b/docs/doc_examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc new file mode 100644 index 000000000..39544c478 --- /dev/null +++ b/docs/doc_examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n process where (process.name == "cmd.exe" and process.pid != 2013)\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec195297eb804cba1cb19c9926773059.asciidoc b/docs/doc_examples/ec195297eb804cba1cb19c9926773059.asciidoc new file mode 100644 index 000000000..4fcadbc73 --- /dev/null +++ b/docs/doc_examples/ec195297eb804cba1cb19c9926773059.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "mylogs-pre-ilm*", + settings: { + index: { + lifecycle: { + name: "mylogs_policy_existing", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec27afee074001b0e4e393611010842b.asciidoc b/docs/doc_examples/ec27afee074001b0e4e393611010842b.asciidoc deleted file mode 100644 index 8fe4fee7b..000000000 --- a/docs/doc_examples/ec27afee074001b0e4e393611010842b.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - gauss: { - date: { - origin: '2013-09-17', - scale: '10d', - offset: '5d', - decay: 0.5 - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc b/docs/doc_examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc new file mode 100644 index 000000000..80f12e99a --- /dev/null +++ b/docs/doc_examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "users", + id: 0, + pipeline: "postal_lookup", + document: { + first_name: "Mardy", + last_name: "Brown", + geo_location: "POINT (13.5 52.5)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc b/docs/doc_examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc new file mode 100644 index 000000000..b4eb794c2 --- /dev/null +++ b/docs/doc_examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + runtime_mappings: { + gc_size: { + type: "keyword", + script: + "\n Map gc=dissect('[%{@timestamp}][%{code}][%{desc}] %{ident} used %{usize}, capacity %{csize}, committed %{comsize}, reserved %{rsize}').extract(doc[\"gc.keyword\"].value);\n if (gc != null) emit(\"used\" + ' ' + gc.usize + ', ' + \"capacity\" + ' ' + gc.csize + ', ' + \"committed\" + ' ' + gc.comsize);\n ", + }, + }, + size: 1, + aggs: { + sizes: { + terms: { + field: "gc_size", + size: 10, + }, + }, + }, + fields: ["gc_size"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec473de07fe89bcbac1f8e278617fe46.asciidoc b/docs/doc_examples/ec473de07fe89bcbac1f8e278617fe46.asciidoc deleted file mode 100644 index 9d15fe856..000000000 --- a/docs/doc_examples/ec473de07fe89bcbac1f8e278617fe46.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match: { - message: 'elasticsearch' - } - }, - script_score: { - script: { - source: "Math.log(2 + doc['likes'].value)" - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc b/docs/doc_examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc new file mode 100644 index 000000000..311487347 --- /dev/null +++ b/docs/doc_examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set_ingest_time", + description: "Set ingest timestamp.", + processors: [ + { + set: { + field: "event.ingested", + value: "{{{_ingest.timestamp}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc b/docs/doc_examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc new file mode 100644 index 000000000..8a6eb97c8 --- /dev/null +++ b/docs/doc_examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "index1", + refresh: "true", + operations: [ + { + index: { + _id: "doc1", + }, + }, + { + comment: "run with scissors", + }, + { + index: { + _id: "doc2", + }, + }, + { + comment: "running with scissors", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec736c31f49c54e5424efa2e53b22906.asciidoc b/docs/doc_examples/ec736c31f49c54e5424efa2e53b22906.asciidoc new file mode 100644 index 000000000..9dd498c1a --- /dev/null +++ b/docs/doc_examples/ec736c31f49c54e5424efa2e53b22906.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "user_agent", + description: "Add user agent information", + processors: [ + { + user_agent: { + field: "agent", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "user_agent", + document: { + agent: + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc b/docs/doc_examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc new file mode 100644 index 000000000..454c2c49c --- /dev/null +++ b/docs/doc_examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + runtime_mappings: { + "genre.length": { + type: "long", + script: "emit(doc['genre'].value.length())", + }, + }, + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre.length", + }, + { + field: "product", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc b/docs/doc_examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc new file mode 100644 index 000000000..57ec2e036 --- /dev/null +++ b/docs/doc_examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "1M", + format: "yyyy-MM-dd", + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc b/docs/doc_examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc new file mode 100644 index 000000000..d33db1d05 --- /dev/null +++ b/docs/doc_examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + format: "MM-yyy", + ranges: [ + { + to: "now-10M/M", + }, + { + from: "now-10M/M", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc b/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc new file mode 100644 index 000000000..2c69648f6 --- /dev/null +++ b/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_error", + body: { + error: "Houston, we have a problem!", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc b/docs/doc_examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc new file mode 100644 index 000000000..4fccf4f6e --- /dev/null +++ b/docs/doc_examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_gcs_repository", + repository: { + type: "gcs", + settings: { + bucket: "my_bucket", + client: "my_alternate_client", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc b/docs/doc_examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc new file mode 100644 index 000000000..a5283d696 --- /dev/null +++ b/docs/doc_examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings_around_amsterdam: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + ranges: [ + { + to: 100000, + key: "first_ring", + }, + { + from: 100000, + to: 300000, + key: "second_ring", + }, + { + from: 300000, + key: "third_ring", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc b/docs/doc_examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc new file mode 100644 index 000000000..6e565f33e --- /dev/null +++ b/docs/doc_examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.reloadSearchAnalyzers({ + index: "my-index-000001", +}); +console.log(response); + +const response1 = await client.indices.clearCache({ + index: "my-index-000001", + request: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc b/docs/doc_examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc new file mode 100644 index 000000000..2fe91ad53 --- /dev/null +++ b/docs/doc_examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.processorGrok({ + ecs_compatibility: "v1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc b/docs/doc_examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc new file mode 100644 index 000000000..18ac24524 --- /dev/null +++ b/docs/doc_examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + filter_path: "**.mlockall", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed27843eff311f3011b679e97e6fda50.asciidoc b/docs/doc_examples/ed27843eff311f3011b679e97e6fda50.asciidoc new file mode 100644 index 000000000..c6ecce6fa --- /dev/null +++ b/docs/doc_examples/ed27843eff311f3011b679e97e6fda50.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "my-index,logs-my_app-default", + index_settings: { + "index.number_of_replicas": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc b/docs/doc_examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc new file mode 100644 index 000000000..ed11c2ffd --- /dev/null +++ b/docs/doc_examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "publications", + fields: "author.id,abstract,name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc b/docs/doc_examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc new file mode 100644 index 000000000..dfa1e1323 --- /dev/null +++ b/docs/doc_examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.master({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed5c3b45e8de912faba44507d827eb93.asciidoc b/docs/doc_examples/ed5c3b45e8de912faba44507d827eb93.asciidoc new file mode 100644 index 000000000..0ce9c3072 --- /dev/null +++ b/docs/doc_examples/ed5c3b45e8de912faba44507d827eb93.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": "POINT (-70 40)", + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc b/docs/doc_examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc new file mode 100644 index 000000000..724c1f0c0 --- /dev/null +++ b/docs/doc_examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "amazon-bedrock-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc b/docs/doc_examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc new file mode 100644 index 000000000..1dce69225 --- /dev/null +++ b/docs/doc_examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_queries1", + id: 1, + refresh: "true", + document: { + query: { + term: { + "my_field.prefix": "abc", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc b/docs/doc_examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc new file mode 100644 index 000000000..5e627dec3 --- /dev/null +++ b/docs/doc_examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "my-index-000001", + fields: "my-field", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc b/docs/doc_examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc new file mode 100644 index 000000000..da6e6d943 --- /dev/null +++ b/docs/doc_examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "name,master,node.role,disk.used_percent,disk.used,disk.avail,disk.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc b/docs/doc_examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc new file mode 100644 index 000000000..08ac02a73 --- /dev/null +++ b/docs/doc_examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "danish_example", + settings: { + analysis: { + filter: { + danish_stop: { + type: "stop", + stopwords: "_danish_", + }, + danish_keywords: { + type: "keyword_marker", + keywords: ["eksempel"], + }, + danish_stemmer: { + type: "stemmer", + language: "danish", + }, + }, + analyzer: { + rebuilt_danish: { + tokenizer: "standard", + filter: [ + "lowercase", + "danish_stop", + "danish_keywords", + "danish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/edae616e1244babf6032aecc6aaaf836.asciidoc b/docs/doc_examples/edae616e1244babf6032aecc6aaaf836.asciidoc new file mode 100644 index 000000000..72c6aa669 --- /dev/null +++ b/docs/doc_examples/edae616e1244babf6032aecc6aaaf836.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": { + lat: 40, + lon: -70, + }, + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/edb25dc0162b039d477cb06aed2d6275.asciidoc b/docs/doc_examples/edb25dc0162b039d477cb06aed2d6275.asciidoc new file mode 100644 index 000000000..31c711aa2 --- /dev/null +++ b/docs/doc_examples/edb25dc0162b039d477cb06aed2d6275.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + should: [ + { + sparse_vector: { + field: "ml.inference.title_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + { + sparse_vector: { + field: "ml.inference.description_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + boost: 4, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/edb5cad890208014ecd91f3f739ce193.asciidoc b/docs/doc_examples/edb5cad890208014ecd91f3f739ce193.asciidoc new file mode 100644 index 000000000..2bb41773d --- /dev/null +++ b/docs/doc_examples/edb5cad890208014ecd91f3f739ce193.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "metrics-weather_sensors-dev", +}); +console.log(response); +---- diff --git a/docs/doc_examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc b/docs/doc_examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc new file mode 100644 index 000000000..f3065ff0d --- /dev/null +++ b/docs/doc_examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match_all: {}, + }, + boost: "5", + functions: [ + { + filter: { + match: { + test: "bar", + }, + }, + random_score: {}, + weight: 23, + }, + { + filter: { + match: { + test: "cat", + }, + }, + weight: 42, + }, + ], + max_boost: 42, + score_mode: "max", + boost_mode: "multiply", + min_score: 42, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee08328cd157d547de19b4abe867b23e.asciidoc b/docs/doc_examples/ee08328cd157d547de19b4abe867b23e.asciidoc new file mode 100644 index 000000000..a2bbbd116 --- /dev/null +++ b/docs/doc_examples/ee08328cd157d547de19b4abe867b23e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + name: "logs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc b/docs/doc_examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc new file mode 100644 index 000000000..9e94a56b4 --- /dev/null +++ b/docs/doc_examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc @@ -0,0 +1,95 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + weighted_tokens: { + query_expansion_field: { + tokens: { + "2161": 0.4679, + "2621": 0.307, + "2782": 0.1299, + "2851": 0.1056, + "3088": 0.3041, + "3376": 0.1038, + "3467": 0.4873, + "3684": 0.8958, + "4380": 0.334, + "4542": 0.4636, + "4633": 2.2805, + "4785": 1.2628, + "4860": 1.0655, + "5133": 1.0709, + "7139": 1.0016, + "7224": 0.2486, + "7387": 0.0985, + "7394": 0.0542, + "8915": 0.369, + "9156": 2.8947, + "10505": 0.2771, + "11464": 0.3996, + "13525": 0.0088, + "14178": 0.8161, + "16893": 0.1376, + "17851": 1.5348, + "19939": 0.6012, + }, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + }, + rescore: { + window_size: 100, + query: { + rescore_query: { + weighted_tokens: { + query_expansion_field: { + tokens: { + "2161": 0.4679, + "2621": 0.307, + "2782": 0.1299, + "2851": 0.1056, + "3088": 0.3041, + "3376": 0.1038, + "3467": 0.4873, + "3684": 0.8958, + "4380": 0.334, + "4542": 0.4636, + "4633": 2.2805, + "4785": 1.2628, + "4860": 1.0655, + "5133": 1.0709, + "7139": 1.0016, + "7224": 0.2486, + "7387": 0.0985, + "7394": 0.0542, + "8915": 0.369, + "9156": 2.8947, + "10505": 0.2771, + "11464": 0.3996, + "13525": 0.0088, + "14178": 0.8161, + "16893": 0.1376, + "17851": 1.5348, + "19939": 0.6012, + }, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee223e604bb695cad2517d28ae63ac34.asciidoc b/docs/doc_examples/ee223e604bb695cad2517d28ae63ac34.asciidoc new file mode 100644 index 000000000..907093605 --- /dev/null +++ b/docs/doc_examples/ee223e604bb695cad2517d28ae63ac34.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "shoes", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [1.25, 2, 3.5], + k: 50, + num_candidates: 100, + }, + }, + ], + rank_window_size: 50, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc b/docs/doc_examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc new file mode 100644 index 000000000..6b15589c5 --- /dev/null +++ b/docs/doc_examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["truncate"], + text: "the quinquennial extravaganza carried on", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc b/docs/doc_examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc new file mode 100644 index 000000000..f643646c4 --- /dev/null +++ b/docs/doc_examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRoles({ + name: "my_admin_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee634d59def6302134d24fa90e18b609.asciidoc b/docs/doc_examples/ee634d59def6302134d24fa90e18b609.asciidoc new file mode 100644 index 000000000..7c7070cf7 --- /dev/null +++ b/docs/doc_examples/ee634d59def6302134d24fa90e18b609.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["ml"], + deciders: { + ml: { + num_anomaly_jobs_in_queue: 5, + num_analytics_jobs_in_queue: 3, + down_scale_delay: "30m", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee90d1fb22b59d30da339d825303b912.asciidoc b/docs/doc_examples/ee90d1fb22b59d30da339d825303b912.asciidoc new file mode 100644 index 000000000..40f37b043 --- /dev/null +++ b/docs/doc_examples/ee90d1fb22b59d30da339d825303b912.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putPrivileges({ + privileges: { + app01: { + read: { + actions: ["action:login", "data:read/*"], + }, + write: { + actions: ["action:login", "data:write/*"], + }, + }, + app02: { + all: { + actions: ["*"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc b/docs/doc_examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc new file mode 100644 index 000000000..429756a85 --- /dev/null +++ b/docs/doc_examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + cities: { + terms: { + field: "city.keyword", + }, + aggs: { + centroid: { + geo_centroid: { + field: "location", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc b/docs/doc_examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc new file mode 100644 index 000000000..e6563395a --- /dev/null +++ b/docs/doc_examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + from: 40, + size: 20, + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc b/docs/doc_examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc new file mode 100644 index 000000000..4ca308a64 --- /dev/null +++ b/docs/doc_examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "60m", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc b/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc new file mode 100644 index 000000000..2c82d4a70 --- /dev/null +++ b/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/hugging-face-embeddings", + body: { + service: "hugging_face", + service_settings: { + api_key: "", + url: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc b/docs/doc_examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc new file mode 100644 index 000000000..535a66e39 --- /dev/null +++ b/docs/doc_examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "*", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc b/docs/doc_examples/ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc deleted file mode 100644 index 7f7ab93ab..000000000 --- a/docs/doc_examples/ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - price: { - missing: '_last' - } - } - ], - query: { - term: { - product: 'chocolate' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc b/docs/doc_examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc new file mode 100644 index 000000000..ebdd4ee45 --- /dev/null +++ b/docs/doc_examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "synonym_graph", + synonyms: ["dns, domain name system"], + }, + "flatten_graph", + ], + text: "domain name system is fragile", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc b/docs/doc_examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc new file mode 100644 index 000000000..5e496f0cf --- /dev/null +++ b/docs/doc_examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + max_docs: 1, + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef33b3b373f7040b874146599db5d557.asciidoc b/docs/doc_examples/ef33b3b373f7040b874146599db5d557.asciidoc new file mode 100644 index 000000000..cce76bc0b --- /dev/null +++ b/docs/doc_examples/ef33b3b373f7040b874146599db5d557.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["lowercase"], + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b542e3ea87a742f95641d64dcfb1bdb.asciidoc b/docs/doc_examples/ef3666b5d288faefbcbc4a25e8f506da.asciidoc similarity index 73% rename from docs/doc_examples/1b542e3ea87a742f95641d64dcfb1bdb.asciidoc rename to docs/doc_examples/ef3666b5d288faefbcbc4a25e8f506da.asciidoc index 29dddce33..39568eb32 100644 --- a/docs/doc_examples/1b542e3ea87a742f95641d64dcfb1bdb.asciidoc +++ b/docs/doc_examples/ef3666b5d288faefbcbc4a25e8f506da.asciidoc @@ -4,9 +4,7 @@ [source, js] ---- const response = await client.count({ - index: 'twitter', - q: 'user:kimchy' -}) -console.log(response) + index: "my-index-000001", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc b/docs/doc_examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc new file mode 100644 index 000000000..7578ffc46 --- /dev/null +++ b/docs/doc_examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "date.promoted_is_tomorrow": { + type: "date", + script: + "\n long date = doc['date'].value.toInstant().toEpochMilli();\n if (doc['promoted'].value) {\n date += 86400;\n }\n emit(date);\n ", + }, + }, + aggs: { + sales_over_time: { + date_histogram: { + field: "date.promoted_is_tomorrow", + calendar_interval: "1M", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef867e563cbffe7866769a096b5d7a92.asciidoc b/docs/doc_examples/ef867e563cbffe7866769a096b5d7a92.asciidoc new file mode 100644 index 000000000..5f0cb29b7 --- /dev/null +++ b/docs/doc_examples/ef867e563cbffe7866769a096b5d7a92.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + cumulative_sales: { + cumulative_sum: { + buckets_path: "sales", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc b/docs/doc_examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc new file mode 100644 index 000000000..5971b624b --- /dev/null +++ b/docs/doc_examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef9111c1648d7820925f12e07d1346c5.asciidoc b/docs/doc_examples/ef9111c1648d7820925f12e07d1346c5.asciidoc deleted file mode 100644 index 384df2da6..000000000 --- a/docs/doc_examples/ef9111c1648d7820925f12e07d1346c5.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - my_field: { - type: 'text', - fields: { - keyword: { - type: 'keyword' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ef9c29759459904fef162acd223462c4.asciidoc b/docs/doc_examples/ef9c29759459904fef162acd223462c4.asciidoc new file mode 100644 index 000000000..674ca8a30 --- /dev/null +++ b/docs/doc_examples/ef9c29759459904fef162acd223462c4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc b/docs/doc_examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc new file mode 100644 index 000000000..78f36d9c1 --- /dev/null +++ b/docs/doc_examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index", + runtime: { + "http.response": { + type: "long", + script: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n ', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/efa924638043f3a6b23ccb824d757eba.asciidoc b/docs/doc_examples/efa924638043f3a6b23ccb824d757eba.asciidoc new file mode 100644 index 000000000..70c4d55ae --- /dev/null +++ b/docs/doc_examples/efa924638043f3a6b23ccb824d757eba.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: 3, + }, + ], +}); +console.log(response); + +const response1 = await client.esql.query({ + query: "FROM mv | LIMIT 2", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc b/docs/doc_examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc new file mode 100644 index 000000000..8ea87e2e1 --- /dev/null +++ b/docs/doc_examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match_all: {}, + }, + boost: "5", + random_score: {}, + boost_mode: "multiply", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eff8ecaed1ed084909c64450fc363a20.asciidoc b/docs/doc_examples/eff8ecaed1ed084909c64450fc363a20.asciidoc new file mode 100644 index 000000000..e612615ea --- /dev/null +++ b/docs/doc_examples/eff8ecaed1ed084909c64450fc363a20.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + transient: { + "indices.recovery.max_bytes_per_sec": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc b/docs/doc_examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc new file mode 100644 index 000000000..983f0e8c1 --- /dev/null +++ b/docs/doc_examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.delete({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc b/docs/doc_examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc new file mode 100644 index 000000000..1ae0987ba --- /dev/null +++ b/docs/doc_examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.fielddata({ + fields: "body,soul", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f085fb032dae56a3b104ab874eaea2ad.asciidoc b/docs/doc_examples/f085fb032dae56a3b104ab874eaea2ad.asciidoc deleted file mode 100644 index e3f1b5d1a..000000000 --- a/docs/doc_examples/f085fb032dae56a3b104ab874eaea2ad.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - missing: 'N/A' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc b/docs/doc_examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc new file mode 100644 index 000000000..af615dc9b --- /dev/null +++ b/docs/doc_examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "swedish_example", + settings: { + analysis: { + filter: { + swedish_stop: { + type: "stop", + stopwords: "_swedish_", + }, + swedish_keywords: { + type: "keyword_marker", + keywords: ["exempel"], + }, + swedish_stemmer: { + type: "stemmer", + language: "swedish", + }, + }, + analyzer: { + rebuilt_swedish: { + tokenizer: "standard", + filter: [ + "lowercase", + "swedish_stop", + "swedish_keywords", + "swedish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc b/docs/doc_examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc new file mode 100644 index 000000000..d844fc1ee --- /dev/null +++ b/docs/doc_examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "new_index", + mappings: { + properties: { + query: { + type: "percolator", + }, + body: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.reindex({ + refresh: "true", + source: { + index: "index", + }, + dest: { + index: "new_index", + }, +}); +console.log(response1); + +const response2 = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "index", + alias: "queries", + }, + }, + { + add: { + index: "new_index", + alias: "queries", + }, + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc b/docs/doc_examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc new file mode 100644 index 000000000..14f0cc66e --- /dev/null +++ b/docs/doc_examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + runtime_mappings: { + "load_time.seconds": { + type: "long", + script: { + source: "emit(doc['load_time'].value / params.timeUnit)", + params: { + timeUnit: 1000, + }, + }, + }, + }, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time.seconds", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc b/docs/doc_examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc new file mode 100644 index 000000000..abc598b1e --- /dev/null +++ b/docs/doc_examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + index: "my-index-000001", + ids: ["1", "2"], + parameters: { + fields: ["message"], + term_statistics: true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc b/docs/doc_examples/f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc deleted file mode 100644 index 054b26dd6..000000000 --- a/docs/doc_examples/f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test1', - alias: 'alias1' - } - }, - { - add: { - index: 'test2', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f10ab582387b2c157917a60205c993f7.asciidoc b/docs/doc_examples/f10ab582387b2c157917a60205c993f7.asciidoc new file mode 100644 index 000000000..26a38e4e8 --- /dev/null +++ b/docs/doc_examples/f10ab582387b2c157917a60205c993f7.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + latency: { + type: "long", + meta: { + unit: "ms", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc b/docs/doc_examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc new file mode 100644 index 000000000..c0c85c658 --- /dev/null +++ b/docs/doc_examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", + conditions: { + max_age: "7d", + max_docs: 1000, + max_primary_shard_size: "50gb", + max_primary_shard_docs: "2000", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc b/docs/doc_examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc new file mode 100644 index 000000000..71dd1a095 --- /dev/null +++ b/docs/doc_examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + size: 0, + aggs: { + grouped: { + geotile_grid: { + field: "location", + precision: 6, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1508a2221152842894819e762e63491.asciidoc b/docs/doc_examples/f1508a2221152842894819e762e63491.asciidoc new file mode 100644 index 000000000..61e99fb3d --- /dev/null +++ b/docs/doc_examples/f1508a2221152842894819e762e63491.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + keep_on_completion: true, + wait_for_completion_timeout: "2s", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f160561efab38e40c2feebf5a2542ab5.asciidoc b/docs/doc_examples/f160561efab38e40c2feebf5a2542ab5.asciidoc new file mode 100644 index 000000000..313a79df9 --- /dev/null +++ b/docs/doc_examples/f160561efab38e40c2feebf5a2542ab5.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest.pipelines", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f18248c181690b81d090275b072f0070.asciidoc b/docs/doc_examples/f18248c181690b81d090275b072f0070.asciidoc new file mode 100644 index 000000000..7753354ba --- /dev/null +++ b/docs/doc_examples/f18248c181690b81d090275b072f0070.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + keep_alive: "2d", + wait_for_completion_timeout: "2s", + query: '\n process where process.name == "cmd.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc b/docs/doc_examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc new file mode 100644 index 000000000..0498b4759 --- /dev/null +++ b/docs/doc_examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "cross-search-user", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["remote-search"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc new file mode 100644 index 000000000..a73c47467 --- /dev/null +++ b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_inference/rerank/cohere_rerank", + body: { + input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"], + query: "star wars main character", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc b/docs/doc_examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc new file mode 100644 index 000000000..9a4eac016 --- /dev/null +++ b/docs/doc_examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["127.0.0.1:9300"], + }, + cluster_two: { + seeds: ["127.0.0.1:9301"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc b/docs/doc_examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc new file mode 100644 index 000000000..c51090492 --- /dev/null +++ b/docs/doc_examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["word_delimiter"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc b/docs/doc_examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc new file mode 100644 index 000000000..5ce51ecf1 --- /dev/null +++ b/docs/doc_examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + term: { + "file_path.tree_reversed": { + value: "my_photo1.jpg", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc b/docs/doc_examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc new file mode 100644 index 000000000..ebf51b9ca --- /dev/null +++ b/docs/doc_examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.resetJob({ + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2175feadc2abe545899889e6d4ffcad.asciidoc b/docs/doc_examples/f2175feadc2abe545899889e6d4ffcad.asciidoc new file mode 100644 index 000000000..c8e0b3205 --- /dev/null +++ b/docs/doc_examples/f2175feadc2abe545899889e6d4ffcad.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle({ + policy_id: "daily-snapshots", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f235544a883fd04bed2dc369b0c450f3.asciidoc b/docs/doc_examples/f235544a883fd04bed2dc369b0c450f3.asciidoc new file mode 100644 index 000000000..11b7ad26f --- /dev/null +++ b/docs/doc_examples/f235544a883fd04bed2dc369b0c450f3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library", + filter: { + terms: { + _routing: ["abc"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc b/docs/doc_examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc new file mode 100644 index 000000000..48dedaf2a --- /dev/null +++ b/docs/doc_examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "admins", + roles: ["monitoring", "user"], + rules: { + field: { + groups: "cn=admins,dc=example,dc=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f268416813befd13c604642c6fe6eda9.asciidoc b/docs/doc_examples/f268416813befd13c604642c6fe6eda9.asciidoc new file mode 100644 index 000000000..45015b079 --- /dev/null +++ b/docs/doc_examples/f268416813befd13c604642c6fe6eda9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_lowercase_example", + settings: { + analysis: { + analyzer: { + greek_lowercase_example: { + type: "custom", + tokenizer: "standard", + filter: ["greek_lowercase"], + }, + }, + filter: { + greek_lowercase: { + type: "lowercase", + language: "greek", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc b/docs/doc_examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc new file mode 100644 index 000000000..fc520e33d --- /dev/null +++ b/docs/doc_examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.flush(); +console.log(response); +---- diff --git a/docs/doc_examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc b/docs/doc_examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc new file mode 100644 index 000000000..d280260a8 --- /dev/null +++ b/docs/doc_examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example_points", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_shape: { + location: { + shape: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + relation: "intersects", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc b/docs/doc_examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc new file mode 100644 index 000000000..4f46cf2e9 --- /dev/null +++ b/docs/doc_examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.pendingTasks({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc b/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc index 6cd9afe16..29b095fb2 100644 --- a/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc +++ b/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc @@ -4,39 +4,36 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - must: [ - { - match: { - title: 'Search' - } + query: { + bool: { + must: [ + { + match: { + title: "Search", }, - { - match: { - content: 'Elasticsearch' - } - } - ], - filter: [ - { - term: { - status: 'published' - } + }, + { + match: { + content: "Elasticsearch", }, - { - range: { - publish_date: { - gte: '2015-01-01' - } - } - } - ] - } - } - } -}) -console.log(response) + }, + ], + filter: [ + { + term: { + status: "published", + }, + }, + { + range: { + publish_date: { + gte: "2015-01-01", + }, + }, + }, + ], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f29b2674299ddf51a25ed87619025ede.asciidoc b/docs/doc_examples/f29b2674299ddf51a25ed87619025ede.asciidoc new file mode 100644 index 000000000..3400481d1 --- /dev/null +++ b/docs/doc_examples/f29b2674299ddf51a25ed87619025ede.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor_rollup", + size: 0, + aggregations: { + max_temperature: { + max: { + field: "temperature", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc b/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc new file mode 100644 index 000000000..a65ee1fe2 --- /dev/null +++ b/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/my-connector", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc b/docs/doc_examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc new file mode 100644 index 000000000..b318357eb --- /dev/null +++ b/docs/doc_examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc @@ -0,0 +1,106 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "suspicious_client_ips", + source: { + index: "kibana_sample_data_logs", + }, + dest: { + index: "sample_weblogs_by_clientip", + }, + sync: { + time: { + field: "timestamp", + delay: "60s", + }, + }, + pivot: { + group_by: { + clientip: { + terms: { + field: "clientip", + }, + }, + }, + aggregations: { + url_dc: { + cardinality: { + field: "url.keyword", + }, + }, + bytes_sum: { + sum: { + field: "bytes", + }, + }, + "geo.src_dc": { + cardinality: { + field: "geo.src", + }, + }, + agent_dc: { + cardinality: { + field: "agent.keyword", + }, + }, + "geo.dest_dc": { + cardinality: { + field: "geo.dest", + }, + }, + "responses.total": { + value_count: { + field: "timestamp", + }, + }, + success: { + filter: { + term: { + response: "200", + }, + }, + }, + error404: { + filter: { + term: { + response: "404", + }, + }, + }, + error5xx: { + filter: { + range: { + response: { + gte: 500, + lt: 600, + }, + }, + }, + }, + "timestamp.min": { + min: { + field: "timestamp", + }, + }, + "timestamp.max": { + max: { + field: "timestamp", + }, + }, + "timestamp.duration_ms": { + bucket_script: { + buckets_path: { + min_time: "timestamp.min.value", + max_time: "timestamp.max.value", + }, + script: "(params.max_time - params.min_time)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc b/docs/doc_examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc new file mode 100644 index 000000000..1aa9b2a81 --- /dev/null +++ b/docs/doc_examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + rename: { + description: "Rename 'provider' to 'cloud.provider'", + field: "provider", + target_field: "cloud.provider", + on_failure: [ + { + set: { + description: "Set 'error.message'", + field: "error.message", + value: + "Field 'provider' does not exist. Cannot rename to 'cloud.provider'", + override: false, + on_failure: [ + { + set: { + description: "Set 'error.message.multi'", + field: "error.message.multi", + value: "Document encountered multiple ingest errors", + override: true, + }, + }, + ], + }, + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc b/docs/doc_examples/f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc deleted file mode 100644 index f8bcb94ba..000000000 --- a/docs/doc_examples/f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'content', - 'name' - ], - query: 'this AND that' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f2e854b6c99659ccc1824e86c096e433.asciidoc b/docs/doc_examples/f2e854b6c99659ccc1824e86c096e433.asciidoc new file mode 100644 index 000000000..b5bc56dfb --- /dev/null +++ b/docs/doc_examples/f2e854b6c99659ccc1824e86c096e433.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.resumeAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc b/docs/doc_examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc new file mode 100644 index 000000000..3571da97b --- /dev/null +++ b/docs/doc_examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + mean_reciprocal_rank: { + k: 20, + relevant_rating_threshold: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc b/docs/doc_examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc new file mode 100644 index 000000000..838a4da46 --- /dev/null +++ b/docs/doc_examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.split({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_shards": 5, + }, + aliases: { + my_search_indices: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f329242d7c8406297eff9bf609870c37.asciidoc b/docs/doc_examples/f329242d7c8406297eff9bf609870c37.asciidoc new file mode 100644 index 000000000..875cd32a3 --- /dev/null +++ b/docs/doc_examples/f329242d7c8406297eff9bf609870c37.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + prefix: "nor", + completion: { + field: "suggest", + fuzzy: { + fuzziness: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc b/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc index d6baad2f7..bffd0b207 100644 --- a/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc +++ b/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - query_string: { - default_field: 'title', - query: 'ny city', - auto_generate_synonyms_phrase_query: false - } - } - } -}) -console.log(response) + query: { + query_string: { + default_field: "title", + query: "ny city", + auto_generate_synonyms_phrase_query: false, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f342465c65ba76383dedbb334b57b616.asciidoc b/docs/doc_examples/f342465c65ba76383dedbb334b57b616.asciidoc new file mode 100644 index 000000000..4a8ae5885 --- /dev/null +++ b/docs/doc_examples/f342465c65ba76383dedbb334b57b616.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + index_options: "offsets", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Quick brown fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: "brown fox", + }, + }, + highlight: { + fields: { + text: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/f34c02351662481dd61a5c2a3e206c60.asciidoc b/docs/doc_examples/f34c02351662481dd61a5c2a3e206c60.asciidoc new file mode 100644 index 000000000..ace60f481 --- /dev/null +++ b/docs/doc_examples/f34c02351662481dd61a5c2a3e206c60.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "hyphenation_decompounder", + hyphenation_patterns_path: "analysis/hyphenation_patterns.xml", + word_list: ["Kaffee", "zucker", "tasse"], + }, + ], + text: "Kaffeetasse", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3574cfee3971d98417b8dc574a91be0.asciidoc b/docs/doc_examples/f3574cfee3971d98417b8dc574a91be0.asciidoc new file mode 100644 index 000000000..0ae64f8ad --- /dev/null +++ b/docs/doc_examples/f3574cfee3971d98417b8dc574a91be0.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + flattened: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + flattened: { + field: [ + { + id: 1, + name: "foo", + }, + { + id: 2, + name: "bar", + }, + { + id: 3, + name: "baz", + }, + ], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc b/docs/doc_examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc new file mode 100644 index 000000000..f78c80064 --- /dev/null +++ b/docs/doc_examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.routing.allocation.require._name": null, + "index.blocks.write": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3697682a886ab129530f3e5c1b30632.asciidoc b/docs/doc_examples/f3697682a886ab129530f3e5c1b30632.asciidoc new file mode 100644 index 000000000..dfdaa28a7 --- /dev/null +++ b/docs/doc_examples/f3697682a886ab129530f3e5c1b30632.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc b/docs/doc_examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc new file mode 100644 index 000000000..d63f990e3 --- /dev/null +++ b/docs/doc_examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-new-index-000001", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc b/docs/doc_examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc new file mode 100644 index 000000000..ec74a3eb1 --- /dev/null +++ b/docs/doc_examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + my_range: { + type: "long_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: [ + { + gte: 200, + lte: 300, + }, + { + gte: 1, + lte: 100, + }, + { + gte: 200, + lte: 300, + }, + { + gte: 200, + lte: 500, + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc b/docs/doc_examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc new file mode 100644 index 000000000..ce1095fd2 --- /dev/null +++ b/docs/doc_examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + name: "my-api-key", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3942d9b34138dfca79dff707af270b7.asciidoc b/docs/doc_examples/f3942d9b34138dfca79dff707af270b7.asciidoc new file mode 100644 index 000000000..02d7258d2 --- /dev/null +++ b/docs/doc_examples/f3942d9b34138dfca79dff707af270b7.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + timestamp_field: "file.accessed", + event_category_field: "file.type", + query: '\n file where (file.size > 1 and file.type == "file")\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc b/docs/doc_examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc new file mode 100644 index 000000000..ee8421977 --- /dev/null +++ b/docs/doc_examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor_rollup", + size: 0, + aggregations: { + timeline: { + date_histogram: { + field: "timestamp", + fixed_interval: "7d", + }, + aggs: { + nodes: { + terms: { + field: "node", + }, + aggs: { + max_temperature: { + max: { + field: "temperature", + }, + }, + avg_voltage: { + avg: { + field: "voltage", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3ab820e1f2f54ea718017aeae865742.asciidoc b/docs/doc_examples/f3ab820e1f2f54ea718017aeae865742.asciidoc new file mode 100644 index 000000000..4e1adad3d --- /dev/null +++ b/docs/doc_examples/f3ab820e1f2f54ea718017aeae865742.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "oidc-finance", + roles: ["finance_data"], + enabled: true, + rules: { + all: [ + { + field: { + "realm.name": "oidc1", + }, + }, + { + field: { + groups: "finance-team", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc b/docs/doc_examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc new file mode 100644 index 000000000..8db77c5b0 --- /dev/null +++ b/docs/doc_examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + q: "user.id:kimchy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc b/docs/doc_examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc new file mode 100644 index 000000000..5a7ab8f36 --- /dev/null +++ b/docs/doc_examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + shrink: { + number_of_shards: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc b/docs/doc_examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc new file mode 100644 index 000000000..a6e06c5e2 --- /dev/null +++ b/docs/doc_examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + flat_settings: "true", + filter_path: "persistent.archived*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc b/docs/doc_examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc new file mode 100644 index 000000000..e9b09015b --- /dev/null +++ b/docs/doc_examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: "1 / (1 + l2norm(params.queryVector, 'my_dense_vector'))", + params: { + queryVector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc b/docs/doc_examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc new file mode 100644 index 000000000..5369161be --- /dev/null +++ b/docs/doc_examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedPrivileges({ + application: "myapp,my-other-app", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3fb52680482925c202c2e2f8af6f044.asciidoc b/docs/doc_examples/f3fb52680482925c202c2e2f8af6f044.asciidoc new file mode 100644 index 000000000..814d8e865 --- /dev/null +++ b/docs/doc_examples/f3fb52680482925c202c2e2f8af6f044.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.count({ + index: "my-index-000001", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc b/docs/doc_examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc new file mode 100644 index 000000000..4a3c06e4e --- /dev/null +++ b/docs/doc_examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "my_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc b/docs/doc_examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc new file mode 100644 index 000000000..b1a46928e --- /dev/null +++ b/docs/doc_examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + fixed_interval: "2w", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc b/docs/doc_examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc new file mode 100644 index 000000000..5d7b46190 --- /dev/null +++ b/docs/doc_examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f44d287c6937785eb09b91353c1deb1e.asciidoc b/docs/doc_examples/f44d287c6937785eb09b91353c1deb1e.asciidoc new file mode 100644 index 000000000..8e6ddc8cb --- /dev/null +++ b/docs/doc_examples/f44d287c6937785eb09b91353c1deb1e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDatafeedStats({ + datafeed_id: "datafeed-high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f453e14bcf30853e57618bf12f83e148.asciidoc b/docs/doc_examples/f453e14bcf30853e57618bf12f83e148.asciidoc new file mode 100644 index 000000000..afa69bc17 --- /dev/null +++ b/docs/doc_examples/f453e14bcf30853e57618bf12f83e148.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "pattern_example", + settings: { + analysis: { + tokenizer: { + split_on_non_word: { + type: "pattern", + pattern: "\\W+", + }, + }, + analyzer: { + rebuilt_pattern: { + tokenizer: "split_on_non_word", + filter: ["lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc b/docs/doc_examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc new file mode 100644 index 000000000..5c0f37868 --- /dev/null +++ b/docs/doc_examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + message: "some arrays in this document...", + tags: ["elasticsearch", "wow"], + lists: [ + { + name: "prog_list", + description: "programming list", + }, + { + name: "cool_list", + description: "cool stuff list", + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + message: "no arrays in this document...", + tags: "elasticsearch", + lists: { + name: "prog_list", + description: "programming list", + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + tags: "elasticsearch", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/f45990264f8755b96b11c69c12c90ff4.asciidoc b/docs/doc_examples/f45990264f8755b96b11c69c12c90ff4.asciidoc new file mode 100644 index 000000000..0852c2e70 --- /dev/null +++ b/docs/doc_examples/f45990264f8755b96b11c69c12c90ff4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.exists({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f495f9c99916a05e3b28166d31955fad.asciidoc b/docs/doc_examples/f495f9c99916a05e3b28166d31955fad.asciidoc new file mode 100644 index 000000000..5522bffda --- /dev/null +++ b/docs/doc_examples/f495f9c99916a05e3b28166d31955fad.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + "playback_stats.max": "desc", + }, + }, + aggs: { + playback_stats: { + stats: { + field: "play_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc b/docs/doc_examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc new file mode 100644 index 000000000..e40f14b9d --- /dev/null +++ b/docs/doc_examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + dims: 3, + index: true, + similarity: "l2_norm", + }, + "file-type": { + type: "keyword", + }, + title: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc b/docs/doc_examples/f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc deleted file mode 100644 index 12bf7d016..000000000 --- a/docs/doc_examples/f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_double', - body: { - mappings: { - properties: { - field: { - type: 'date' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc b/docs/doc_examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc new file mode 100644 index 000000000..15cfea68e --- /dev/null +++ b/docs/doc_examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "quantized-image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + element_type: "float", + dims: 2, + index: true, + index_options: { + type: "int8_hnsw", + }, + }, + title: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc b/docs/doc_examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc new file mode 100644 index 000000000..1c73cea43 --- /dev/null +++ b/docs/doc_examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termsEnum({ + index: "stackoverflow", + field: "tags", + string: "kiba", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc b/docs/doc_examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc new file mode 100644 index 000000000..06a8284a3 --- /dev/null +++ b/docs/doc_examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "multipolygon", + coordinates: [ + [ + [ + [1002, 200], + [1003, 200], + [1003, 300], + [1002, 300], + [1002, 200], + ], + ], + [ + [ + [1000, 200], + [1001, 100], + [1001, 100], + [1000, 100], + [1000, 100], + ], + [ + [1000.2, 200.2], + [1000.8, 100.2], + [1000.8, 100.8], + [1000.2, 100.8], + [1000.2, 100.2], + ], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc b/docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc new file mode 100644 index 000000000..0bf72b678 --- /dev/null +++ b/docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + indexed_chars: 11, + indexed_chars_field: "max_size", + remove_binary: false, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id_2", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + max_size: 5, + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id_2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc b/docs/doc_examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc new file mode 100644 index 000000000..e41ea896e --- /dev/null +++ b/docs/doc_examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.disk.watermark.low": null, + "cluster.routing.allocation.disk.watermark.low.max_headroom": null, + "cluster.routing.allocation.disk.watermark.high": null, + "cluster.routing.allocation.disk.watermark.high.max_headroom": null, + "cluster.routing.allocation.disk.watermark.flood_stage": null, + "cluster.routing.allocation.disk.watermark.flood_stage.max_headroom": null, + "cluster.routing.allocation.disk.watermark.flood_stage.frozen": null, + "cluster.routing.allocation.disk.watermark.flood_stage.frozen.max_headroom": + null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4f557716049b23f8840d58d71e748f0.asciidoc b/docs/doc_examples/f4f557716049b23f8840d58d71e748f0.asciidoc new file mode 100644 index 000000000..a7503c9a3 --- /dev/null +++ b/docs/doc_examples/f4f557716049b23f8840d58d71e748f0.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + refresh_interval: "-1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc b/docs/doc_examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc new file mode 100644 index 000000000..cd19c6359 --- /dev/null +++ b/docs/doc_examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.forgetFollower({ + index: "", + follower_cluster: "", + follower_index: "", + follower_index_uuid: "", + leader_remote_cluster: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc b/docs/doc_examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc new file mode 100644 index 000000000..f8876e0f8 --- /dev/null +++ b/docs/doc_examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + max_doc_count: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc b/docs/doc_examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc new file mode 100644 index 000000000..b6609f84d --- /dev/null +++ b/docs/doc_examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteAlias({ + index: "my-data-stream", + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f545bb95214769aca993c1632a71ad2c.asciidoc b/docs/doc_examples/f545bb95214769aca993c1632a71ad2c.asciidoc new file mode 100644 index 000000000..a54c8c9de --- /dev/null +++ b/docs/doc_examples/f545bb95214769aca993c1632a71ad2c.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "french_example", + settings: { + analysis: { + filter: { + french_elision: { + type: "elision", + articles_case: true, + articles: [ + "l", + "m", + "t", + "qu", + "n", + "s", + "j", + "d", + "c", + "jusqu", + "quoiqu", + "lorsqu", + "puisqu", + ], + }, + french_stop: { + type: "stop", + stopwords: "_french_", + }, + french_keywords: { + type: "keyword_marker", + keywords: ["Example"], + }, + french_stemmer: { + type: "stemmer", + language: "light_french", + }, + }, + analyzer: { + rebuilt_french: { + tokenizer: "standard", + filter: [ + "french_elision", + "lowercase", + "french_stop", + "french_keywords", + "french_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc b/docs/doc_examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc new file mode 100644 index 000000000..97adcb0dc --- /dev/null +++ b/docs/doc_examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "daily-snapshots", + name: "", + schedule: "0 45 23 * * ?", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "30d", + min_count: 1, + max_count: 31, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5569945024b9d664828693705c27c1a.asciidoc b/docs/doc_examples/f5569945024b9d664828693705c27c1a.asciidoc deleted file mode 100644 index 5218065d0..000000000 --- a/docs/doc_examples/f5569945024b9d664828693705c27c1a.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'kimchy,elasticsearch', - q: 'user:kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc b/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc new file mode 100644 index 000000000..1fce2d137 --- /dev/null +++ b/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/completion/azure_openai_completion", + body: { + service: "azureopenai", + service_settings: { + api_key: "", + resource_name: "", + deployment_id: "", + api_version: "2024-02-01", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5815d573cee0447910c9668003887b8.asciidoc b/docs/doc_examples/f5815d573cee0447910c9668003887b8.asciidoc new file mode 100644 index 000000000..82e48529d --- /dev/null +++ b/docs/doc_examples/f5815d573cee0447910c9668003887b8.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "2d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f58969ac405db85f439c5940d014964b.asciidoc b/docs/doc_examples/f58969ac405db85f439c5940d014964b.asciidoc new file mode 100644 index 000000000..3406c8013 --- /dev/null +++ b/docs/doc_examples/f58969ac405db85f439c5940d014964b.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + wkt: "BBOX (-74.1, -71.12, 40.73, 40.01)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc b/docs/doc_examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc new file mode 100644 index 000000000..eb4cc2eaf --- /dev/null +++ b/docs/doc_examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.postStartBasic({ + acknowledge: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc b/docs/doc_examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc new file mode 100644 index 000000000..46fe5e0e8 --- /dev/null +++ b/docs/doc_examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "imdb", + mappings: { + properties: { + title: { + type: "text", + term_vector: "yes", + }, + description: { + type: "text", + }, + tags: { + type: "text", + fields: { + raw: { + type: "text", + analyzer: "keyword", + term_vector: "yes", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc b/docs/doc_examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc new file mode 100644 index 000000000..b9617e400 --- /dev/null +++ b/docs/doc_examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "ecommerce-customers", + mappings: { + properties: { + "total_quantity.sum": { + type: "double", + }, + total_quantity: { + type: "object", + }, + taxless_total_price: { + type: "object", + }, + "taxless_total_price.sum": { + type: "double", + }, + "order_id.cardinality": { + type: "long", + }, + customer_id: { + type: "keyword", + }, + "total_quantity.max": { + type: "integer", + }, + order_id: { + type: "object", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc b/docs/doc_examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc new file mode 100644 index 000000000..115cd3757 --- /dev/null +++ b/docs/doc_examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.clearCursor({ + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc b/docs/doc_examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc new file mode 100644 index 000000000..bc7cd7b1a --- /dev/null +++ b/docs/doc_examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + type: "simple", + }, + default_search: { + type: "whitespace", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc b/docs/doc_examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc new file mode 100644 index 000000000..1f5c5accb --- /dev/null +++ b/docs/doc_examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc b/docs/doc_examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc new file mode 100644 index 000000000..b36c0c85e --- /dev/null +++ b/docs/doc_examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + match: { + type: "hat", + }, + }, + }, + }, + aggs: { + hat_prices: { + sum: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f63f6343e74bd5c844854272e746de14.asciidoc b/docs/doc_examples/f63f6343e74bd5c844854272e746de14.asciidoc new file mode 100644 index 000000000..a1a92a95d --- /dev/null +++ b/docs/doc_examples/f63f6343e74bd5c844854272e746de14.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deactivateWatch({ + watch_id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f642b64e592131f37209a5100fe161cc.asciidoc b/docs/doc_examples/f642b64e592131f37209a5100fe161cc.asciidoc new file mode 100644 index 000000000..df08d6beb --- /dev/null +++ b/docs/doc_examples/f642b64e592131f37209a5100fe161cc.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + named_analyzers: { + match_mapping_type: "string", + match: "*", + mapping: { + type: "text", + analyzer: "{name}", + }, + }, + }, + { + no_doc_values: { + match_mapping_type: "*", + mapping: { + type: "{dynamic_type}", + doc_values: false, + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + english: "Some English text", + count: 5, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f6566395f85d3afe917228643d7318d6.asciidoc b/docs/doc_examples/f6566395f85d3afe917228643d7318d6.asciidoc new file mode 100644 index 000000000..d4a98b60b --- /dev/null +++ b/docs/doc_examples/f6566395f85d3afe917228643d7318d6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f656c1e64268293ecc8ebd8065628faa.asciidoc b/docs/doc_examples/f656c1e64268293ecc8ebd8065628faa.asciidoc new file mode 100644 index 000000000..0c9f9c376 --- /dev/null +++ b/docs/doc_examples/f656c1e64268293ecc8ebd8065628faa.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedServiceTokens({ + namespace: "elastic", + service: "fleet-server", + name: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc b/docs/doc_examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc new file mode 100644 index 000000000..724fcf566 --- /dev/null +++ b/docs/doc_examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["ngram"], + text: "Quick fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f66643c54999426c5afa6d5a87435d4e.asciidoc b/docs/doc_examples/f66643c54999426c5afa6d5a87435d4e.asciidoc new file mode 100644 index 000000000..982d55165 --- /dev/null +++ b/docs/doc_examples/f66643c54999426c5afa6d5a87435d4e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearApiKeyCache({ + ids: "yVGMr3QByxdh1MSaicYx", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc b/docs/doc_examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc new file mode 100644 index 000000000..4cf7e3016 --- /dev/null +++ b/docs/doc_examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc @@ -0,0 +1,70 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_event_watch", + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + email_administrator: { + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + notify_pager: { + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + webhook: { + method: "POST", + host: "pager.service.domain", + port: 1234, + path: "/{{watch_id}}", + body: "Encountered {{ctx.payload.hits.total}} errors", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc b/docs/doc_examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc new file mode 100644 index 000000000..95355dc17 --- /dev/null +++ b/docs/doc_examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + combined_fields: { + query: "database systems", + fields: ["title", "abstract", "body"], + operator: "and", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc b/docs/doc_examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc new file mode 100644 index 000000000..33a796a1f --- /dev/null +++ b/docs/doc_examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteCalendarEvent({ + calendar_id: "planned-outages", + event_id: "LS8LJGEBMTCMA-qz49st", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6b5032bf27c2445d28845be0d413970.asciidoc b/docs/doc_examples/f6b5032bf27c2445d28845be0d413970.asciidoc deleted file mode 100644 index 654779b1a..000000000 --- a/docs/doc_examples/f6b5032bf27c2445d28845be0d413970.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_long', - body: { - mappings: { - properties: { - field: { - type: 'long' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc b/docs/doc_examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc new file mode 100644 index 000000000..2b2e4c2b3 --- /dev/null +++ b/docs/doc_examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: {}, + }, + _source: "route_length_miles", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6d493650b4344f17297b568016fb445.asciidoc b/docs/doc_examples/f6d493650b4344f17297b568016fb445.asciidoc new file mode 100644 index 000000000..2ca1d4a9c --- /dev/null +++ b/docs/doc_examples/f6d493650b4344f17297b568016fb445.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.unfollow({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6d6889667f56b8f49d2858070571a6b.asciidoc b/docs/doc_examples/f6d6889667f56b8f49d2858070571a6b.asciidoc deleted file mode 100644 index 19ad9c0a6..000000000 --- a/docs/doc_examples/f6d6889667f56b8f49d2858070571a6b.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias1', - is_write_index: true - } - }, - { - add: { - index: 'test2', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f6de702c3d097af0b0bd391c4f947233.asciidoc b/docs/doc_examples/f6de702c3d097af0b0bd391c4f947233.asciidoc new file mode 100644 index 000000000..f7a8647ae --- /dev/null +++ b/docs/doc_examples/f6de702c3d097af0b0bd391c4f947233.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + v: "true", + s: "rep:desc,pri.store.size:desc", + h: "health,index,pri,rep,store.size,pri.store.size", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc b/docs/doc_examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc new file mode 100644 index 000000000..5cbc31432 --- /dev/null +++ b/docs/doc_examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedPrivileges({ + application: "myapp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6ead39c5505045543b9225deca7367d.asciidoc b/docs/doc_examples/f6ead39c5505045543b9225deca7367d.asciidoc new file mode 100644 index 000000000..9e329ed3e --- /dev/null +++ b/docs/doc_examples/f6ead39c5505045543b9225deca7367d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.postVotingConfigExclusions({ + node_names: "nodeName1,nodeName2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc b/docs/doc_examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc new file mode 100644 index 000000000..34e9a18d1 --- /dev/null +++ b/docs/doc_examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + username: "johnsmith", + name: { + first: "John", + last: "Smith", + }, + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc b/docs/doc_examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc new file mode 100644 index 000000000..6d21aa440 --- /dev/null +++ b/docs/doc_examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + trigger_data: { + triggered_time: "now", + scheduled_time: "now", + }, + alternative_input: { + foo: "bar", + }, + ignore_condition: true, + action_modes: { + "my-action": "force_simulate", + }, + record_execution: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc b/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc index dd8d7e4a8..df0215037 100644 --- a/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc +++ b/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc @@ -4,18 +4,15 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - filter: { - term: { - status: 'active' - } - } - } - } - } -}) -console.log(response) + query: { + bool: { + filter: { + term: { + status: "active", + }, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc b/docs/doc_examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc new file mode 100644 index 000000000..4506b25ca --- /dev/null +++ b/docs/doc_examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my_test_scores", + }, + dest: { + index: "my_test_scores_2", + pipeline: "my_test_scores_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7139b3c0e066be832b9100ae17157cc.asciidoc b/docs/doc_examples/f7139b3c0e066be832b9100ae17157cc.asciidoc new file mode 100644 index 000000000..8635b5ce1 --- /dev/null +++ b/docs/doc_examples/f7139b3c0e066be832b9100ae17157cc.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "txt", + query: + "\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f733b25cd4c448b226bb76862974eef2.asciidoc b/docs/doc_examples/f733b25cd4c448b226bb76862974eef2.asciidoc new file mode 100644 index 000000000..390ed34ff --- /dev/null +++ b/docs/doc_examples/f733b25cd4c448b226bb76862974eef2.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + analysis: { + filter: { + code: { + type: "pattern_capture", + preserve_original: true, + patterns: ["(\\p{Ll}+|\\p{Lu}\\p{Ll}+|\\p{Lu}+)", "(\\d+)"], + }, + }, + analyzer: { + code: { + tokenizer: "pattern", + filter: ["code", "lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f749efe8f11ebd43ef83db91922c736e.asciidoc b/docs/doc_examples/f749efe8f11ebd43ef83db91922c736e.asciidoc new file mode 100644 index 000000000..630fed95c --- /dev/null +++ b/docs/doc_examples/f749efe8f11ebd43ef83db91922c736e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterB: { + mode: "proxy", + skip_unavailable: "true", + server_name: "clusterb.es.region-b.gcp.elastic-cloud.com", + proxy_socket_connections: "18", + proxy_address: "clusterb.es.region-b.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc b/docs/doc_examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc new file mode 100644 index 000000000..355dc8d8a --- /dev/null +++ b/docs/doc_examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + runtime: { + day_of_week: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc b/docs/doc_examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc new file mode 100644 index 000000000..b6d0265a7 --- /dev/null +++ b/docs/doc_examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job/my-connector-sync-job", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc b/docs/doc_examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc new file mode 100644 index 000000000..1a905fa5f --- /dev/null +++ b/docs/doc_examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "k8s", + }, + dest: { + index: "k9s", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc b/docs/doc_examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc new file mode 100644 index 000000000..2e19d6a11 --- /dev/null +++ b/docs/doc_examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "armenian_example", + settings: { + analysis: { + filter: { + armenian_stop: { + type: "stop", + stopwords: "_armenian_", + }, + armenian_keywords: { + type: "keyword_marker", + keywords: ["օրինակ"], + }, + armenian_stemmer: { + type: "stemmer", + language: "armenian", + }, + }, + analyzer: { + rebuilt_armenian: { + tokenizer: "standard", + filter: [ + "lowercase", + "armenian_stop", + "armenian_keywords", + "armenian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc b/docs/doc_examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc new file mode 100644 index 000000000..e942d1c0c --- /dev/null +++ b/docs/doc_examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["lowercase", "asciifolding"], + text: "Is this déja vu?", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc b/docs/doc_examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc new file mode 100644 index 000000000..e5501b1a8 --- /dev/null +++ b/docs/doc_examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f83eb6605c7c56e297a494b318400ef0.asciidoc b/docs/doc_examples/f83eb6605c7c56e297a494b318400ef0.asciidoc new file mode 100644 index 000000000..4f336dd82 --- /dev/null +++ b/docs/doc_examples/f83eb6605c7c56e297a494b318400ef0.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "shirts", + query: { + bool: { + filter: [ + { + term: { + color: "red", + }, + }, + { + term: { + brand: "gucci", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8525c2460a577edfef156c13f55b8a7.asciidoc b/docs/doc_examples/f8525c2460a577edfef156c13f55b8a7.asciidoc new file mode 100644 index 000000000..96afb8772 --- /dev/null +++ b/docs/doc_examples/f8525c2460a577edfef156c13f55b8a7.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + range: { + price: { + to: "500", + }, + }, + }, + }, + }, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + hard_bounds: { + min: 100, + max: 200, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc b/docs/doc_examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc new file mode 100644 index 000000000..60451049e --- /dev/null +++ b/docs/doc_examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_truncate_example", + settings: { + analysis: { + analyzer: { + standard_truncate: { + tokenizer: "standard", + filter: ["truncate"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8833488041f3d318435b60917fa877c.asciidoc b/docs/doc_examples/f8833488041f3d318435b60917fa877c.asciidoc new file mode 100644 index 000000000..2c6c95db2 --- /dev/null +++ b/docs/doc_examples/f8833488041f3d318435b60917fa877c.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["my_search_index1", "my_search_index2"], + template: { + script: { + source: { + query: { + query_string: { + query: "{{query_string}}", + default_field: "{{default_field}}", + }, + }, + }, + params: { + query_string: "*", + default_field: "*", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc b/docs/doc_examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc new file mode 100644 index 000000000..156c3a585 --- /dev/null +++ b/docs/doc_examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + term: { + "user.id": "kimchy", + }, + }, + filter: { + term: { + tags: "production", + }, + }, + must_not: { + range: { + age: { + gte: 10, + lte: 20, + }, + }, + }, + should: [ + { + term: { + tags: "env1", + }, + }, + { + term: { + tags: "deployed", + }, + }, + ], + minimum_should_match: 1, + boost: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc b/docs/doc_examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc new file mode 100644 index 000000000..85f1392c5 --- /dev/null +++ b/docs/doc_examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: { + type: "char_group", + tokenize_on_chars: ["whitespace", "-", "\n"], + }, + text: "The QUICK brown-fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc b/docs/doc_examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc new file mode 100644 index 000000000..c02fd9b75 --- /dev/null +++ b/docs/doc_examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupCaps({ + id: "sensor-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc b/docs/doc_examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc new file mode 100644 index 000000000..f74bd1fbc --- /dev/null +++ b/docs/doc_examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.getBasicStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/f9636d7ef1a45be4f36418c875cf6bef.asciidoc b/docs/doc_examples/f9636d7ef1a45be4f36418c875cf6bef.asciidoc deleted file mode 100644 index 7ecc68bf5..000000000 --- a/docs/doc_examples/f9636d7ef1a45be4f36418c875cf6bef.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'sessions', - id: 'dh3sgudg8gsrgl', - body: { - scripted_upsert: true, - script: { - id: 'my_web_session_summariser', - params: { - pageViewEvent: { - url: 'foo.com/bar', - response: 404, - time: '2014-01-01 12:32' - } - } - }, - upsert: {} - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f96d4614f2fc294339fef325b794355f.asciidoc b/docs/doc_examples/f96d4614f2fc294339fef325b794355f.asciidoc new file mode 100644 index 000000000..7e97b2c84 --- /dev/null +++ b/docs/doc_examples/f96d4614f2fc294339fef325b794355f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getBuckets({ + job_id: "low_request_rate", + anomaly_score: 80, + start: 1454530200001, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc b/docs/doc_examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc new file mode 100644 index 000000000..5dfedbed7 --- /dev/null +++ b/docs/doc_examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "test_watch", + trigger: { + schedule: { + hourly: { + minute: [0, 5], + }, + }, + }, + input: { + simple: { + payload: { + send: "yes", + }, + }, + }, + condition: { + always: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f9732ce07960134ea7156e118c2da8a6.asciidoc b/docs/doc_examples/f9732ce07960134ea7156e118c2da8a6.asciidoc new file mode 100644 index 000000000..f7a5e21cc --- /dev/null +++ b/docs/doc_examples/f9732ce07960134ea7156e118c2da8a6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_simple_analyzer: { + tokenizer: "lowercase", + filter: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f978088f5117d4addd55c11ee3777312.asciidoc b/docs/doc_examples/f978088f5117d4addd55c11ee3777312.asciidoc new file mode 100644 index 000000000..43ef49020 --- /dev/null +++ b/docs/doc_examples/f978088f5117d4addd55c11ee3777312.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createServiceToken({ + namespace: "elastic", + service: "fleet-server", + name: "token1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f97aa2efabbf11a534073041eb2658c9.asciidoc b/docs/doc_examples/f97aa2efabbf11a534073041eb2658c9.asciidoc new file mode 100644 index 000000000..a46773eb2 --- /dev/null +++ b/docs/doc_examples/f97aa2efabbf11a534073041eb2658c9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteScript({ + id: "my-stored-script", +}); +console.log(response); +---- diff --git a/docs/doc_examples/978088f989d45dd09339582e9cbc60e0.asciidoc b/docs/doc_examples/f98687271e1bec031cc34d05d8f4b60b.asciidoc similarity index 54% rename from docs/doc_examples/978088f989d45dd09339582e9cbc60e0.asciidoc rename to docs/doc_examples/f98687271e1bec031cc34d05d8f4b60b.asciidoc index 6f2b5c2aa..50dcca406 100644 --- a/docs/doc_examples/978088f989d45dd09339582e9cbc60e0.asciidoc +++ b/docs/doc_examples/f98687271e1bec031cc34d05d8f4b60b.asciidoc @@ -4,15 +4,17 @@ [source, js] ---- const response = await client.search({ - index: '%3Clogstash-%7Bnow%2Fd%7D%3E', - body: { - query: { + query: { + span_multi: { match: { - test: 'data' - } - } - } -}) -console.log(response) + prefix: { + "user.id": { + value: "ki", + }, + }, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc b/docs/doc_examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc new file mode 100644 index 000000000..4ffc48a55 --- /dev/null +++ b/docs/doc_examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTIPOLYGON (((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0, 102.0 2.0)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89a8ac1509936acc272fc2d72907bc45.asciidoc b/docs/doc_examples/f9c8245cc13770dff052b6759a749efa.asciidoc similarity index 74% rename from docs/doc_examples/89a8ac1509936acc272fc2d72907bc45.asciidoc rename to docs/doc_examples/f9c8245cc13770dff052b6759a749efa.asciidoc index f7af4f7b0..717368649 100644 --- a/docs/doc_examples/89a8ac1509936acc272fc2d72907bc45.asciidoc +++ b/docs/doc_examples/f9c8245cc13770dff052b6759a749efa.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.getSource({ - index: 'twitter', - id: '1' -}) -console.log(response) + index: "my-index-000001", + id: 1, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc b/docs/doc_examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc new file mode 100644 index 000000000..75d369723 --- /dev/null +++ b/docs/doc_examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + my_range: { + type: "date_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: [ + { + gte: 1504224000000, + lte: 1504569600000, + }, + { + gte: "2017-09-01", + lte: "2017-09-10", + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc b/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc new file mode 100644 index 000000000..0c7b48ea7 --- /dev/null +++ b/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + multi_match: { + query: "brown f", + type: "bool_prefix", + fields: ["my_field", "my_field._2gram", "my_field._3gram"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f9f541ae23a184301913f07e62d1afd3.asciidoc b/docs/doc_examples/f9f541ae23a184301913f07e62d1afd3.asciidoc new file mode 100644 index 000000000..78b1ad12c --- /dev/null +++ b/docs/doc_examples/f9f541ae23a184301913f07e62d1afd3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + keep_alive: "2d", + wait_for_completion_timeout: "2s", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc b/docs/doc_examples/fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc deleted file mode 100644 index 2b631649e..000000000 --- a/docs/doc_examples/fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias2', - search_routing: '1,2', - index_routing: '2' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fa2fe60f570bd930d2891778c6efbfe6.asciidoc b/docs/doc_examples/fa2fe60f570bd930d2891778c6efbfe6.asciidoc deleted file mode 100644 index e7e37ece7..000000000 --- a/docs/doc_examples/fa2fe60f570bd930d2891778c6efbfe6.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: 'this is a test' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc b/docs/doc_examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc new file mode 100644 index 000000000..73cdcbcd1 --- /dev/null +++ b/docs/doc_examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: "Quick Brown Foxes!", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc b/docs/doc_examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc new file mode 100644 index 000000000..ada16f058 --- /dev/null +++ b/docs/doc_examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "tour", + filter_path: "aggregations", + aggregations: { + path: { + time_series: {}, + aggregations: { + museum_tour: { + geo_line: { + point: { + field: "location", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc b/docs/doc_examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc new file mode 100644 index 000000000..77efc1c63 --- /dev/null +++ b/docs/doc_examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc @@ -0,0 +1,86 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "correlate_latency", + size: 0, + filter_path: "aggregations", + aggs: { + buckets: { + terms: { + field: "version", + size: 2, + }, + aggs: { + latency_ranges: { + range: { + field: "latency", + ranges: [ + { + to: 0, + }, + { + from: 0, + to: 105, + }, + { + from: 105, + to: 225, + }, + { + from: 225, + to: 445, + }, + { + from: 445, + to: 665, + }, + { + from: 665, + to: 885, + }, + { + from: 885, + to: 1115, + }, + { + from: 1115, + to: 1335, + }, + { + from: 1335, + to: 1555, + }, + { + from: 1555, + to: 1775, + }, + { + from: 1775, + }, + ], + }, + }, + bucket_correlation: { + bucket_correlation: { + buckets_path: "latency_ranges>_count", + function: { + count_correlation: { + indicator: { + expectations: [ + 0, 52.5, 165, 335, 555, 775, 1000, 1225, 1445, 1665, 1775, + ], + doc_count: 200, + }, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa82d86a046d67366cfe9ce65535e433.asciidoc b/docs/doc_examples/fa82d86a046d67366cfe9ce65535e433.asciidoc new file mode 100644 index 000000000..0120aa38c --- /dev/null +++ b/docs/doc_examples/fa82d86a046d67366cfe9ce65535e433.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.graph.explore({ + index: "clicklogs", + vertices: [ + { + field: "product", + include: ["1854873"], + }, + ], + connections: { + vertices: [ + { + field: "query.raw", + exclude: ["midi keyboard", "midi", "synth"], + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc b/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc index ac1a67fd2..b7e200d3f 100644 --- a/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc +++ b/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc @@ -4,21 +4,18 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - must: { - match_all: {} + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + term: { + status: "active", }, - filter: { - term: { - status: 'active' - } - } - } - } - } -}) -console.log(response) + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/fa946228e946da256d40264c8b070a1a.asciidoc b/docs/doc_examples/fa946228e946da256d40264c8b070a1a.asciidoc new file mode 100644 index 000000000..0cce8f2e4 --- /dev/null +++ b/docs/doc_examples/fa946228e946da256d40264c8b070a1a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + meta: { + "my-metadata-field": "foo", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc b/docs/doc_examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc new file mode 100644 index 000000000..7dc54746d --- /dev/null +++ b/docs/doc_examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "multiplexer_example", + analyzer: "my_analyzer", + text: "Going HOME", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc b/docs/doc_examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc new file mode 100644 index 000000000..a84fa74d6 --- /dev/null +++ b/docs/doc_examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fab702851e90e945c1b62dec0bb6a205.asciidoc b/docs/doc_examples/fab702851e90e945c1b62dec0bb6a205.asciidoc new file mode 100644 index 000000000..cf74a61dd --- /dev/null +++ b/docs/doc_examples/fab702851e90e945c1b62dec0bb6a205.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.deleteBehavioralAnalytics({ + name: "my_analytics_collection", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc b/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc index 02ddabcee..36202bf45 100644 --- a/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc +++ b/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.indices.create({ - index: 'test', - wait_for_active_shards: '2' -}) -console.log(response) + index: "test", + wait_for_active_shards: 2, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc b/docs/doc_examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc new file mode 100644 index 000000000..b6fb34952 --- /dev/null +++ b/docs/doc_examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDataFrameAnalyticsStats({ + id: "weblog-outliers", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fad524db23eb5718ff310956e590b00d.asciidoc b/docs/doc_examples/fad524db23eb5718ff310956e590b00d.asciidoc new file mode 100644 index 000000000..73022d70d --- /dev/null +++ b/docs/doc_examples/fad524db23eb5718ff310956e590b00d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + random_score: { + seed: 10, + field: "_seq_no", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc b/docs/doc_examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc new file mode 100644 index 000000000..c6e1be29f --- /dev/null +++ b/docs/doc_examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + precision: { + k: 20, + relevant_rating_threshold: 1, + ignore_unlabeled: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb1180992b2087dfb36576b44c4261e4.asciidoc b/docs/doc_examples/fb1180992b2087dfb36576b44c4261e4.asciidoc new file mode 100644 index 000000000..edb1ad6c8 --- /dev/null +++ b/docs/doc_examples/fb1180992b2087dfb36576b44c4261e4.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-data-stream", + write_index_only: "true", + properties: { + host: { + properties: { + ip: { + type: "ip", + ignore_malformed: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc b/docs/doc_examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc new file mode 100644 index 000000000..db4b69406 --- /dev/null +++ b/docs/doc_examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + set: { + field: "my_field", + value: "{{{input_field.1}}}", + }, + }, + ], + }, + docs: [ + { + _index: "index", + _id: "id", + _source: { + input_field: ["Ubuntu", "Windows", "Ventura"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc b/docs/doc_examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc new file mode 100644 index 000000000..e3465e741 --- /dev/null +++ b/docs/doc_examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "user_hits", + size: 0, + aggs: { + users_per_day: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + distinct_users: { + cardinality: { + field: "user_id", + }, + }, + total_new_users: { + cumulative_cardinality: { + buckets_path: "distinct_users", + }, + }, + incremental_new_users: { + derivative: { + buckets_path: "total_new_users", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc b/docs/doc_examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc new file mode 100644 index 000000000..5841a4fe8 --- /dev/null +++ b/docs/doc_examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "polygon", + orientation: "clockwise", + coordinates: [ + [ + [1000, 1000], + [1000, 1001], + [1001, 1001], + [1001, 1000], + [1000, 1000], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc b/docs/doc_examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc new file mode 100644 index 000000000..f659ab628 --- /dev/null +++ b/docs/doc_examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingPolicy({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb955375a202f66133af009c04cb77ad.asciidoc b/docs/doc_examples/fb955375a202f66133af009c04cb77ad.asciidoc new file mode 100644 index 000000000..5e3b5b376 --- /dev/null +++ b/docs/doc_examples/fb955375a202f66133af009c04cb77ad.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "networks", + mappings: { + properties: { + range: { + type: "ip_range", + }, + name: { + type: "keyword", + }, + department: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fbb38243221c8fb311660616e3add9ce.asciidoc b/docs/doc_examples/fbb38243221c8fb311660616e3add9ce.asciidoc new file mode 100644 index 000000000..543a8d989 --- /dev/null +++ b/docs/doc_examples/fbb38243221c8fb311660616e3add9ce.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": [-70, 40], + order: "asc", + unit: "km", + mode: "min", + distance_type: "arc", + ignore_unmapped: true, + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fbc5ab85b908480bf944b55da0a43488.asciidoc b/docs/doc_examples/fbc5ab85b908480bf944b55da0a43488.asciidoc new file mode 100644 index 000000000..2f9356831 --- /dev/null +++ b/docs/doc_examples/fbc5ab85b908480bf944b55da0a43488.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + prefix: { + "user.id": { + value: "ki", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc b/docs/doc_examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc new file mode 100644 index 000000000..62822d80b --- /dev/null +++ b/docs/doc_examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": "drm3btev3e86", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc1907515f6a913884a9f86451e90ee8.asciidoc b/docs/doc_examples/fc1907515f6a913884a9f86451e90ee8.asciidoc new file mode 100644 index 000000000..cd09758c7 --- /dev/null +++ b/docs/doc_examples/fc1907515f6a913884a9f86451e90ee8.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + _source: { + excludes: ["content_embedding"], + }, + properties: { + content_embedding: { + type: "sparse_vector", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc190fbbf71949331266dcb3f46a1198.asciidoc b/docs/doc_examples/fc190fbbf71949331266dcb3f46a1198.asciidoc new file mode 100644 index 000000000..679b55598 --- /dev/null +++ b/docs/doc_examples/fc190fbbf71949331266dcb3f46a1198.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc b/docs/doc_examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc new file mode 100644 index 000000000..990b731c8 --- /dev/null +++ b/docs/doc_examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + allocate: { + number_of_replicas: 2, + total_shards_per_node: 200, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc b/docs/doc_examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc new file mode 100644 index 000000000..eede615f7 --- /dev/null +++ b/docs/doc_examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_index", + mappings: { + properties: { + my_histogram: { + type: "histogram", + }, + my_text: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc49437ce2e7916facf58128308c2aa3.asciidoc b/docs/doc_examples/fc49437ce2e7916facf58128308c2aa3.asciidoc new file mode 100644 index 000000000..fed89c0fb --- /dev/null +++ b/docs/doc_examples/fc49437ce2e7916facf58128308c2aa3.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.mount({ + repository: "my_repository", + snapshot: "my_snapshot", + wait_for_completion: "true", + index: "my_docs", + renamed_index: "docs", + index_settings: { + "index.number_of_replicas": 0, + }, + ignore_index_settings: ["index.refresh_interval"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc b/docs/doc_examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc new file mode 100644 index 000000000..d6db2d338 --- /dev/null +++ b/docs/doc_examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "geometrycollection", + geometries: [ + { + type: "point", + coordinates: [1000, 100], + }, + { + type: "linestring", + coordinates: [ + [1001, 100], + [1002, 100], + ], + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc b/docs/doc_examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc new file mode 100644 index 000000000..1498d7d30 --- /dev/null +++ b/docs/doc_examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + auto_date_histogram: { + field: "date", + buckets: 3, + time_zone: "-01:00", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc b/docs/doc_examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc new file mode 100644 index 000000000..76340b434 --- /dev/null +++ b/docs/doc_examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + size: 0, + query: { + match: { + name: "led tv", + }, + }, + aggs: { + resellers: { + nested: { + path: "resellers", + }, + aggs: { + min_price: { + min: { + field: "resellers.price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc8097bdfb6f3a4017bf4186ccca8a84.asciidoc b/docs/doc_examples/fc8097bdfb6f3a4017bf4186ccca8a84.asciidoc deleted file mode 100644 index 9e70bd7e1..000000000 --- a/docs/doc_examples/fc8097bdfb6f3a4017bf4186ccca8a84.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - text: { - type: 'text', - fields: { - english: { - type: 'text', - analyzer: 'english' - } - } - } - } - } - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '1', - body: { - text: 'quick brown fox' - } -}) -console.log(response1) - -const response2 = await client.index({ - index: 'my_index', - id: '2', - body: { - text: 'quick brown foxes' - } -}) -console.log(response2) - -const response3 = await client.search({ - index: 'my_index', - body: { - query: { - multi_match: { - query: 'quick brown foxes', - fields: [ - 'text', - 'text.english' - ], - type: 'most_fields' - } - } - } -}) -console.log(response3) ----- - diff --git a/docs/doc_examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc b/docs/doc_examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc new file mode 100644 index 000000000..0342ca2e6 --- /dev/null +++ b/docs/doc_examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "events", + size: 10, + sort: [ + { + timestamp: "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc9a1b1173690a911725cff3912e9755.asciidoc b/docs/doc_examples/fc9a1b1173690a911725cff3912e9755.asciidoc new file mode 100644 index 000000000..e97eff174 --- /dev/null +++ b/docs/doc_examples/fc9a1b1173690a911725cff3912e9755.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + readonly: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fccbddfba9f975de7e321732874dfb78.asciidoc b/docs/doc_examples/fccbddfba9f975de7e321732874dfb78.asciidoc new file mode 100644 index 000000000..48de444fe --- /dev/null +++ b/docs/doc_examples/fccbddfba9f975de7e321732874dfb78.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "my-data-stream*", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fce5c03a388c893cb11a6696e068543f.asciidoc b/docs/doc_examples/fce5c03a388c893cb11a6696e068543f.asciidoc new file mode 100644 index 000000000..eede0ecb8 --- /dev/null +++ b/docs/doc_examples/fce5c03a388c893cb11a6696e068543f.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.hasPrivilegesUserProfile({ + uids: [ + "u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0", + "u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1", + "u_does-not-exist_0", + ], + privileges: { + cluster: ["monitor", "create_snapshot", "manage_ml"], + index: [ + { + names: ["suppliers", "products"], + privileges: ["create_doc"], + }, + { + names: ["inventory"], + privileges: ["read", "write"], + }, + ], + application: [ + { + application: "inventory_manager", + privileges: ["read", "data:write/inventory"], + resources: ["product/1852563"], + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc b/docs/doc_examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc new file mode 100644 index 000000000..0a773ef35 --- /dev/null +++ b/docs/doc_examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "\n double value = dotProduct(params.query_vector, 'my_dense_vector');\n return sigmoid(1, Math.E, -value); \n ", + params: { + query_vector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc b/docs/doc_examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc new file mode 100644 index 000000000..7805fd227 --- /dev/null +++ b/docs/doc_examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents that don't contain 'prod' tag", + if: "\n Collection tags = ctx.tags;\n if(tags != null){\n for (String tag : tags) {\n if (tag.toLowerCase().contains('prod')) {\n return false;\n }\n }\n }\n return true;\n ", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc b/docs/doc_examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc new file mode 100644 index 000000000..2c97c321d --- /dev/null +++ b/docs/doc_examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + bool: { + must: [ + { + match: { + content: "2016", + }, + }, + ], + should: [ + { + rank_feature: { + field: "pagerank", + }, + }, + { + rank_feature: { + field: "url_length", + boost: 0.1, + }, + }, + { + rank_feature: { + field: "topics.sports", + boost: 0.4, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc b/docs/doc_examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc new file mode 100644 index 000000000..7209a78f7 --- /dev/null +++ b/docs/doc_examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "trim_example", + settings: { + analysis: { + analyzer: { + keyword_trim: { + tokenizer: "keyword", + filter: ["trim"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc b/docs/doc_examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc new file mode 100644 index 000000000..ec8571bec --- /dev/null +++ b/docs/doc_examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate(); +console.log(response); +---- diff --git a/docs/doc_examples/fd352b472d44d197022a46fce90b6ecb.asciidoc b/docs/doc_examples/fd352b472d44d197022a46fce90b6ecb.asciidoc new file mode 100644 index 000000000..fcd562fa2 --- /dev/null +++ b/docs/doc_examples/fd352b472d44d197022a46fce90b6ecb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + docs: [ + { + _index: "test", + _id: "1", + _source: false, + }, + { + _index: "test", + _id: "2", + _source: ["field3", "field4"], + }, + { + _index: "test", + _id: "3", + _source: { + include: ["user"], + exclude: ["user.location"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd60b4092c6552164862cec287359676.asciidoc b/docs/doc_examples/fd60b4092c6552164862cec287359676.asciidoc new file mode 100644 index 000000000..3418d2f7b --- /dev/null +++ b/docs/doc_examples/fd60b4092c6552164862cec287359676.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.stopDatafeed({ + datafeed_id: "datafeed-low_request_rate", + timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc b/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc new file mode 100644 index 000000000..1131a4d95 --- /dev/null +++ b/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-sql-connector/_filtering", + body: { + advanced_snippet: { + value: [ + { + tables: ["users", "orders"], + query: + "SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id", + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc b/docs/doc_examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc new file mode 100644 index 000000000..13c5bfeb8 --- /dev/null +++ b/docs/doc_examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot-20200617", + feature_states: ["geoip"], + indices: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + include_aliases: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc b/docs/doc_examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc new file mode 100644 index 000000000..c7bd58f9e --- /dev/null +++ b/docs/doc_examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", + h: "index,prirep,shard,store", + s: "prirep,store", + bytes: "gb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc b/docs/doc_examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc new file mode 100644 index 000000000..b5b6b35b5 --- /dev/null +++ b/docs/doc_examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-replication", + cluster: ["manage_ccr"], + remote_indices: [ + { + clusters: ["my_remote_cluster"], + names: ["leader-index"], + privileges: ["cross_cluster_replication"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc b/docs/doc_examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc new file mode 100644 index 000000000..202324d8c --- /dev/null +++ b/docs/doc_examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: ["this is a test", "the second text"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdada036a875d7995d5d7aba9c06361e.asciidoc b/docs/doc_examples/fdada036a875d7995d5d7aba9c06361e.asciidoc new file mode 100644 index 000000000..245c36566 --- /dev/null +++ b/docs/doc_examples/fdada036a875d7995d5d7aba9c06361e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-2", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + index: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdc8e090293e78e9a6b283650b682517.asciidoc b/docs/doc_examples/fdc8e090293e78e9a6b283650b682517.asciidoc new file mode 100644 index 000000000..8fd30d6cf --- /dev/null +++ b/docs/doc_examples/fdc8e090293e78e9a6b283650b682517.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdd38f0d248385a444c777e7acd97846.asciidoc b/docs/doc_examples/fdd38f0d248385a444c777e7acd97846.asciidoc deleted file mode 100644 index 4b58cb9e3..000000000 --- a/docs/doc_examples/fdd38f0d248385a444c777e7acd97846.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title', - 'content' - ], - query: 'this OR that OR thus', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fde3463ddf136fdfff1306a60986515e.asciidoc b/docs/doc_examples/fde3463ddf136fdfff1306a60986515e.asciidoc new file mode 100644 index 000000000..1296b55b0 --- /dev/null +++ b/docs/doc_examples/fde3463ddf136fdfff1306a60986515e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "*", + flat_settings: "true", + filter_path: "**.settings.archived*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc b/docs/doc_examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc new file mode 100644 index 000000000..869b00ef4 --- /dev/null +++ b/docs/doc_examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + match: { + my_text_field: "the query string", + }, + }, + }, + }, + { + knn: { + field: "text_embedding.predicted_value", + k: 10, + num_candidates: 100, + query_vector_builder: { + text_embedding: { + model_id: "sentence-transformers__msmarco-minilm-l-12-v3", + model_text: "the query string", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc b/docs/doc_examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc new file mode 100644 index 000000000..256eb6384 --- /dev/null +++ b/docs/doc_examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "sensor-template", + index_patterns: ["sensor-*"], + data_stream: {}, + template: { + lifecycle: { + downsampling: [ + { + after: "1d", + fixed_interval: "1h", + }, + ], + }, + settings: { + "index.mode": "time_series", + }, + mappings: { + properties: { + node: { + type: "keyword", + time_series_dimension: true, + }, + temperature: { + type: "half_float", + time_series_metric: "gauge", + }, + voltage: { + type: "half_float", + time_series_metric: "gauge", + }, + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe3a927d868cbc530e08e05964d5174a.asciidoc b/docs/doc_examples/fe3a927d868cbc530e08e05964d5174a.asciidoc new file mode 100644 index 000000000..8988080e4 --- /dev/null +++ b/docs/doc_examples/fe3a927d868cbc530e08e05964d5174a.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + product: "chocolate", + price: [20, 4], + }, +}); +console.log(response); + +const response1 = await client.search({ + query: { + term: { + product: "chocolate", + }, + }, + sort: [ + { + price: { + order: "asc", + mode: "avg", + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc b/docs/doc_examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc new file mode 100644 index 000000000..c879695f1 --- /dev/null +++ b/docs/doc_examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "jwt_user1", + refresh: "true", + roles: ["jwt_role1"], + rules: { + all: [ + { + field: { + "realm.name": "jwt2", + }, + }, + { + field: { + username: "user2", + }, + }, + ], + }, + enabled: true, + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc b/docs/doc_examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc new file mode 100644 index 000000000..dadf119aa --- /dev/null +++ b/docs/doc_examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + date: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + date: ["2015-01-01T12:10:30Z", "2014-01-01T12:10:30Z"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc b/docs/doc_examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc new file mode 100644 index 000000000..cd8c15f98 --- /dev/null +++ b/docs/doc_examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + script_score: { + query: { + match: { + body: "elasticsearch", + }, + }, + script: { + source: "_score * saturation(doc['pagerank'].value, 10)", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc b/docs/doc_examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc new file mode 100644 index 000000000..4a08144cf --- /dev/null +++ b/docs/doc_examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_histogram: { + type: "histogram", + }, + my_text: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe806011466e7cdc1590da186297edb6.asciidoc b/docs/doc_examples/fe806011466e7cdc1590da186297edb6.asciidoc new file mode 100644 index 000000000..d7d6148cc --- /dev/null +++ b/docs/doc_examples/fe806011466e7cdc1590da186297edb6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe825c05e13e8163073166572c7ac97d.asciidoc b/docs/doc_examples/fe825c05e13e8163073166572c7ac97d.asciidoc new file mode 100644 index 000000000..47072cba1 --- /dev/null +++ b/docs/doc_examples/fe825c05e13e8163073166572c7ac97d.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "geocells", + id: 1, + pipeline: "geohex2shape", + document: { + geocell: "811fbffffffffff", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "geocells", + id: 1, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc b/docs/doc_examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc new file mode 100644 index 000000000..9c1b8804e --- /dev/null +++ b/docs/doc_examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "multi_match": {\n "query": "{{query_string}}",\n "fields": [{{#text_fields}}"{{name}}^{{boost}}",{{/text_fields}}]\n }\n },\n "explain": "{{explain}}",\n "from": "{{from}}",\n "size": "{{size}}"\n }\n ', + params: { + query_string: "*", + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 5, + }, + ], + explain: false, + from: 0, + size: 10, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc b/docs/doc_examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc new file mode 100644 index 000000000..a8701212d --- /dev/null +++ b/docs/doc_examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/febb71d774e0a1fc67454213d7448c53.asciidoc b/docs/doc_examples/febb71d774e0a1fc67454213d7448c53.asciidoc new file mode 100644 index 000000000..791a84783 --- /dev/null +++ b/docs/doc_examples/febb71d774e0a1fc67454213d7448c53.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: "ctx._source.remove('new_field')", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc b/docs/doc_examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc new file mode 100644 index 000000000..a820ab4eb --- /dev/null +++ b/docs/doc_examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + refresh: "true", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-03-08T11:04:05.000Z", + user: { + id: "vlb44hny", + }, + message: "Login attempt failed", + }, + { + create: {}, + }, + { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, + { + create: {}, + }, + { + "@timestamp": "2099-03-09T11:07:08.000Z", + user: { + id: "l7gk7f82", + }, + message: "Logout successful", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc b/docs/doc_examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc new file mode 100644 index 000000000..550ae710f --- /dev/null +++ b/docs/doc_examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "networks_lookup", + processors: [ + { + enrich: { + description: "Add 'network' data based on 'ip'", + policy_name: "networks-policy", + field: "ip", + target_field: "network", + max_matches: "10", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/feefeb68144002fd1fff57b77b95b85e.asciidoc b/docs/doc_examples/feefeb68144002fd1fff57b77b95b85e.asciidoc deleted file mode 100644 index 6c9393e31..000000000 --- a/docs/doc_examples/feefeb68144002fd1fff57b77b95b85e.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - size: 0, - aggs: { - group_by_state: { - terms: { - field: 'state.keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc b/docs/doc_examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc new file mode 100644 index 000000000..39457fccc --- /dev/null +++ b/docs/doc_examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-index*", + query: + "\n sample by host\n [any where uptime > 0] by os\n [any where port > 100] by op_sys\n [any where bool == true] by os\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff05842419968a2141bde0371ac2f6f4.asciidoc b/docs/doc_examples/ff05842419968a2141bde0371ac2f6f4.asciidoc new file mode 100644 index 000000000..f64626281 --- /dev/null +++ b/docs/doc_examples/ff05842419968a2141bde0371ac2f6f4.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + "user.group.emails": "{{#join}}emails{{/join}}", + }, + }, + }, + params: { + emails: ["user1@example.com", "user_one@example.com"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc b/docs/doc_examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc new file mode 100644 index 000000000..c0b5cd35e --- /dev/null +++ b/docs/doc_examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-new-index-000001", + size: 0, + filter_path: "hits.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc b/docs/doc_examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc new file mode 100644 index 000000000..1357bcc36 --- /dev/null +++ b/docs/doc_examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + timestamp: { + type: "date", + }, + temperature: { + type: "long", + }, + voltage: { + type: "double", + }, + node: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc b/docs/doc_examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc new file mode 100644 index 000000000..08c23aa74 --- /dev/null +++ b/docs/doc_examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + redact: { + field: "message", + patterns: ["%{GITHUB_NAME:GITHUB_NAME}"], + pattern_definitions: { + GITHUB_NAME: "@%{USERNAME}", + }, + }, + }, + ], + }, + docs: [ + { + _source: { + message: "@elastic-data-management the PR is ready for review", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc b/docs/doc_examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc new file mode 100644 index 000000000..0db142db9 --- /dev/null +++ b/docs/doc_examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.deleteRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff63ae39c34925dbfa54282ec9989124.asciidoc b/docs/doc_examples/ff63ae39c34925dbfa54282ec9989124.asciidoc new file mode 100644 index 000000000..6eec21a63 --- /dev/null +++ b/docs/doc_examples/ff63ae39c34925dbfa54282ec9989124.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + remote: { + host: "http://otherhost:9200/", + headers: { + Authorization: "ApiKey API_KEY_VALUE", + }, + }, + index: "my-index-000001", + query: { + match: { + test: "data", + }, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc b/docs/doc_examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc new file mode 100644 index 000000000..89bd3a53c --- /dev/null +++ b/docs/doc_examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "Hi my name is Josh and I live in Berlin", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff7b81fa96c3b994efa3dee230512291.asciidoc b/docs/doc_examples/ff7b81fa96c3b994efa3dee230512291.asciidoc new file mode 100644 index 000000000..0fdc9278b --- /dev/null +++ b/docs/doc_examples/ff7b81fa96c3b994efa3dee230512291.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.graph.explore({ + index: "clicklogs", + query: { + match: { + "query.raw": "midi", + }, + }, + vertices: [ + { + field: "product", + }, + ], + connections: { + vertices: [ + { + field: "query.raw", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc b/docs/doc_examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc new file mode 100644 index 000000000..e8bf6e810 --- /dev/null +++ b/docs/doc_examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index_1", + id: 1, + document: { + text: "Document in index 1", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index_2", + id: 2, + refresh: "true", + document: { + text: "Document in index 2", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "index_1,index_2", + query: { + terms: { + _tier: ["data_hot", "data_warm"], + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc b/docs/doc_examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc new file mode 100644 index 000000000..785b77d90 --- /dev/null +++ b/docs/doc_examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["word_delimiter_graph"], + text: "Neil's-Super-Duper-XL500--42+AutoCoder", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc b/docs/doc_examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc new file mode 100644 index 000000000..79abaeb97 --- /dev/null +++ b/docs/doc_examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc b/docs/doc_examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc new file mode 100644 index 000000000..876bb701d --- /dev/null +++ b/docs/doc_examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "\n float[] v = doc['my_dense_vector'].vectorValue;\n float vm = doc['my_dense_vector'].magnitude;\n float dotProduct = 0;\n for (int i = 0; i < v.length; i++) {\n dotProduct += v[i] * params.queryVector[i];\n }\n return dotProduct / (vm * (float) params.queryVectorMag);\n ", + params: { + queryVector: [4, 3.4, -0.2], + queryVectorMag: 5.25357, + }, + }, + }, + }, +}); +console.log(response); +---- From 84ab2a787d0922ae3d317c9f87c987339595b18c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 5 Aug 2024 13:12:20 -0500 Subject: [PATCH 369/647] Auto-approve codegen PRs (#2323) --- .github/workflows/auto-approve.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/auto-approve.yml diff --git a/.github/workflows/auto-approve.yml b/.github/workflows/auto-approve.yml new file mode 100644 index 000000000..0af84acb3 --- /dev/null +++ b/.github/workflows/auto-approve.yml @@ -0,0 +1,12 @@ +--- +name: "Auto-approve codegen PRs" +on: pull_request_target + +jobs: + auto-approve: + runs-on: ubuntu-latest + permissions: + pull-requests: write + if: github.actor == 'elasticmachine' + steps: + - uses: hmarr/auto-approve-action@v4 From 99cefe8b19461ecb054109ec7f952e20e33bb0fb Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 7 Aug 2024 02:31:39 +1000 Subject: [PATCH 370/647] Auto-generated code for main (#2320) Co-authored-by: Josh Mock --- docs/reference.asciidoc | 559 +++++++++++++-- src/api/api/cat.ts | 18 +- src/api/api/connector.ts | 1318 +++++++++++++++++++++++++++++++++++ src/api/api/ml.ts | 30 +- src/api/index.ts | 8 + src/api/types.ts | 81 ++- src/api/typesWithBodyKey.ts | 81 ++- 7 files changed, 2004 insertions(+), 91 deletions(-) create mode 100644 src/api/api/connector.ts diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 237af9280..880c16ebb 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -1624,8 +1624,8 @@ client.autoscaling.putAutoscalingPolicy({ name }) Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. -> info -> CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use [the /_alias endpoints](#endpoint-alias). + +CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. {ref}/cat-alias.html[Endpoint documentation] [source,ts] @@ -1663,9 +1663,9 @@ client.cat.allocation({ ... }) Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. -> info -> CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use [the /_component_template endpoints](#endpoint-component-template). + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. {ref}/cat-component-templates.html[Endpoint documentation] [source,ts] @@ -1684,9 +1684,9 @@ client.cat.componentTemplates({ ... }) Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. -> info -> CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use [the /_count endpoints](#endpoint-count). + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. {ref}/cat-count.html[Endpoint documentation] [source,ts] @@ -1763,9 +1763,6 @@ client.cat.help() ==== indices Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. -> info -> CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use an index endpoint. Use this request to get the following information for each index in a cluster: - shard count @@ -1775,7 +1772,10 @@ Use this request to get the following information for each index in a cluster: - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. -To get an accurate count of Elasticsearch documents, use the [/_cat/count](#operation-cat-count) or [count](#endpoint-count) endpoints. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. {ref}/cat-indices.html[Endpoint documentation] [source,ts] @@ -1813,10 +1813,9 @@ client.cat.master() Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/data_frame/analytics endpoints](#endpoint-ml). +application consumption, use the get data frame analytics jobs statistics API. {ref}/cat-dfanalytics.html[Endpoint documentation] [source,ts] @@ -1844,10 +1843,9 @@ This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/datafeeds endpoints](#endpoint-ml). +application consumption, use the get datafeed statistics API. {ref}/cat-datafeeds.html[Endpoint documentation] [source,ts] @@ -1881,10 +1879,9 @@ This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/anomaly_detectors endpoints](#endpoint-ml). +application consumption, use the get anomaly detection job statistics API. {ref}/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] @@ -1916,10 +1913,9 @@ matches. Get trained models. Returns configuration and usage information about inference trained models. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/trained_models endpoints](#endpoint-ml). +application consumption, use the get trained models statistics API. {ref}/cat-trained-model.html[Endpoint documentation] [source,ts] @@ -2159,10 +2155,9 @@ Accepts wildcard expressions. Get transforms. Returns configuration and usage information about transforms. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_transform endpoints](#endpoint-transform). +application consumption, use the get transform statistics API. {ref}/cat-transforms.html[Endpoint documentation] [source,ts] @@ -2582,7 +2577,7 @@ client.cluster.health({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or *. +** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. ** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Can be one of cluster, indices or shards. Controls the details level of the health information returned. ** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. @@ -2806,6 +2801,481 @@ client.cluster.stats({ ... }) If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. +[discrete] +=== connector +[discrete] +==== check_in +Updates the last_seen field in the connector, and sets it to current timestamp + +{ref}/check-in-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.checkIn({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be checked in + +[discrete] +==== delete +Deletes a connector. + +{ref}/delete-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.delete({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be deleted +** *`delete_sync_jobs` (Optional, boolean)*: A flag indicating if associated sync jobs should be also removed. Defaults to false. + +[discrete] +==== get +Retrieves a connector. + +{ref}/get-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.get({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector + +[discrete] +==== list +Returns existing connectors. + +{ref}/list-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.list({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: Specifies a max number of results to get +** *`index_name` (Optional, string | string[])*: A list of connector index names to fetch connector documents for +** *`connector_name` (Optional, string | string[])*: A list of connector names to fetch connector documents for +** *`service_type` (Optional, string | string[])*: A list of connector service types to fetch connector documents for +** *`query` (Optional, string)*: A wildcard query string that filters connectors with matching name, description or index name + +[discrete] +==== post +Creates a connector. + +{ref}/create-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.post({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`description` (Optional, string)* +** *`index_name` (Optional, string)* +** *`is_native` (Optional, boolean)* +** *`language` (Optional, string)* +** *`name` (Optional, string)* +** *`service_type` (Optional, string)* + +[discrete] +==== put +Creates or updates a connector. + +{ref}/create-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.put({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (Optional, string)*: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. +** *`description` (Optional, string)* +** *`index_name` (Optional, string)* +** *`is_native` (Optional, boolean)* +** *`language` (Optional, string)* +** *`name` (Optional, string)* +** *`service_type` (Optional, string)* + +[discrete] +==== sync_job_cancel +Cancels a connector sync job. + +{ref}/cancel-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobCancel({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job + +[discrete] +==== sync_job_check_in +Checks in a connector sync job (refreshes 'last_seen'). + +{ref}/check-in-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobCheckIn() +---- + + +[discrete] +==== sync_job_claim +Claims a connector sync job. +[source,ts] +---- +client.connector.syncJobClaim() +---- + + +[discrete] +==== sync_job_delete +Deletes a connector sync job. + +{ref}/delete-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobDelete({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be deleted + +[discrete] +==== sync_job_error +Sets an error for a connector sync job. + +{ref}/set-connector-sync-job-error-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobError() +---- + + +[discrete] +==== sync_job_get +Retrieves a connector sync job. + +{ref}/get-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobGet({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job + +[discrete] +==== sync_job_list +Lists connector sync jobs. + +{ref}/list-connector-sync-jobs-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobList({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: Specifies a max number of results to get +** *`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))*: A sync job status to fetch connector sync jobs for +** *`connector_id` (Optional, string)*: A connector id to fetch connector sync jobs for +** *`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])*: A list of job types to fetch the sync jobs for + +[discrete] +==== sync_job_post +Creates a connector sync job. + +{ref}/create-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobPost({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The id of the associated connector +** *`job_type` (Optional, Enum("full" | "incremental" | "access_control"))* +** *`trigger_method` (Optional, Enum("on_demand" | "scheduled"))* + +[discrete] +==== sync_job_update_stats +Updates the stats fields in the connector sync job document. + +{ref}/set-connector-sync-job-stats-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobUpdateStats() +---- + + +[discrete] +==== update_active_filtering +Activates the valid draft filtering for a connector. + +{ref}/update-connector-filtering-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateActiveFiltering({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated + +[discrete] +==== update_api_key_id +Updates the API key id in the connector document + +{ref}/update-connector-api-key-id-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateApiKeyId({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`api_key_id` (Optional, string)* +** *`api_key_secret_id` (Optional, string)* + +[discrete] +==== update_configuration +Updates the configuration field in the connector document + +{ref}/update-connector-configuration-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateConfiguration({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`configuration` (Optional, Record)* +** *`values` (Optional, Record)* + +[discrete] +==== update_error +Updates the filtering field in the connector document + +{ref}/update-connector-error-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateError({ connector_id, error }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`error` (T | null)* + +[discrete] +==== update_features +Updates the connector features in the connector document. + +{ref}/update-connector-features-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateFeatures() +---- + + +[discrete] +==== update_filtering +Updates the filtering field in the connector document + +{ref}/update-connector-filtering-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateFiltering({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`filtering` (Optional, { active, domain, draft }[])* +** *`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])* +** *`advanced_snippet` (Optional, { created_at, updated_at, value })* + +[discrete] +==== update_filtering_validation +Updates the draft filtering validation info for a connector. +[source,ts] +---- +client.connector.updateFilteringValidation({ connector_id, validation }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`validation` ({ errors, state })* + +[discrete] +==== update_index_name +Updates the index_name in the connector document + +{ref}/update-connector-index-name-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateIndexName({ connector_id, index_name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`index_name` (T | null)* + +[discrete] +==== update_name +Updates the name and description fields in the connector document + +{ref}/update-connector-name-description-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateName({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`name` (Optional, string)* +** *`description` (Optional, string)* + +[discrete] +==== update_native +Updates the is_native flag in the connector document +[source,ts] +---- +client.connector.updateNative({ connector_id, is_native }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`is_native` (boolean)* + +[discrete] +==== update_pipeline +Updates the pipeline field in the connector document + +{ref}/update-connector-pipeline-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updatePipeline({ connector_id, pipeline }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })* + +[discrete] +==== update_scheduling +Updates the scheduling field in the connector document + +{ref}/update-connector-scheduling-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateScheduling({ connector_id, scheduling }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`scheduling` ({ access_control, full, incremental })* + +[discrete] +==== update_service_type +Updates the service type of the connector + +{ref}/update-connector-service-type-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateServiceType({ connector_id, service_type }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`service_type` (string)* + +[discrete] +==== update_status +Updates the status of the connector + +{ref}/update-connector-status-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateStatus({ connector_id, status }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))* + [discrete] === dangling_indices [discrete] @@ -5517,7 +5987,8 @@ client.migration.postFeatureUpgrade() === ml [discrete] ==== clear_trained_model_deployment_cache -Clears a trained model deployment cache on all nodes where the trained model is assigned. +Clear trained model deployment cache. +Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. @@ -5559,6 +6030,7 @@ client.ml.closeJob({ job_id }) [discrete] ==== delete_calendar +Delete a calendar. Removes all scheduled events from a calendar, then deletes it. {ref}/ml-delete-calendar.html[Endpoint documentation] @@ -5575,7 +6047,7 @@ client.ml.deleteCalendar({ calendar_id }) [discrete] ==== delete_calendar_event -Deletes scheduled events from a calendar. +Delete events from a calendar. {ref}/ml-delete-calendar-event.html[Endpoint documentation] [source,ts] @@ -5593,7 +6065,7 @@ You can obtain this identifier by using the get calendar events API. [discrete] ==== delete_calendar_job -Deletes anomaly detection jobs from a calendar. +Delete anomaly jobs from a calendar. {ref}/ml-delete-calendar-job.html[Endpoint documentation] [source,ts] @@ -5611,7 +6083,7 @@ list of jobs or groups. [discrete] ==== delete_data_frame_analytics -Deletes a data frame analytics job. +Delete a data frame analytics job. {ref}/delete-dfanalytics.html[Endpoint documentation] [source,ts] @@ -5629,7 +6101,7 @@ client.ml.deleteDataFrameAnalytics({ id }) [discrete] ==== delete_datafeed -Deletes an existing datafeed. +Delete a datafeed. {ref}/ml-delete-datafeed.html[Endpoint documentation] [source,ts] @@ -5650,7 +6122,7 @@ stopping and deleting the datafeed. [discrete] ==== delete_expired_data -Deletes expired and unused machine learning data. +Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. @@ -5678,7 +6150,7 @@ behavior is no throttling. [discrete] ==== delete_filter -Deletes a filter. +Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. @@ -5696,7 +6168,7 @@ client.ml.deleteFilter({ filter_id }) [discrete] ==== delete_forecast -Deletes forecasts from a machine learning job. +Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more @@ -5755,7 +6227,7 @@ job deletion completes. [discrete] ==== delete_model_snapshot -Deletes an existing model snapshot. +Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. @@ -5775,8 +6247,8 @@ client.ml.deleteModelSnapshot({ job_id, snapshot_id }) [discrete] ==== delete_trained_model -Deletes an existing trained inference model that is currently not referenced -by an ingest pipeline. +Delete an unreferenced trained model. +The request deletes a trained inference model that is not referenced by an ingest pipeline. {ref}/delete-trained-models.html[Endpoint documentation] [source,ts] @@ -5793,7 +6265,7 @@ client.ml.deleteTrainedModel({ model_id }) [discrete] ==== delete_trained_model_alias -Deletes a trained model alias. +Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. @@ -5813,6 +6285,7 @@ client.ml.deleteTrainedModelAlias({ model_alias, model_id }) [discrete] ==== estimate_model_memory +Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. @@ -5844,7 +6317,7 @@ omitted from the request if no detectors have a `by_field_name`, [discrete] ==== evaluate_data_frame -Evaluates the data frame analytics for an annotated index. +Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth @@ -5866,7 +6339,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) [discrete] ==== explain_data_frame_analytics -Explains a data frame analytics config. +Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: @@ -9096,7 +9569,7 @@ client.security.putPrivileges({ ... }) ==== Arguments * *Request (object):* -** *`privileges` (Optional, Record>)* +** *`privileges` (Optional, Record>)* ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index 0a740b584..f157677ca 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -45,7 +45,7 @@ export default class Cat { } /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. > info > CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use [the /_alias endpoints](#endpoint-alias). + * Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -125,7 +125,7 @@ export default class Cat { } /** - * Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use [the /_component_template endpoints](#endpoint-component-template). + * Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -165,7 +165,7 @@ export default class Cat { } /** - * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use [the /_count endpoints](#endpoint-count). + * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -305,7 +305,7 @@ export default class Cat { } /** - * Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the [/_cat/count](#operation-cat-count) or [count](#endpoint-count) endpoints. + * Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -375,7 +375,7 @@ export default class Cat { } /** - * Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/data_frame/analytics endpoints](#endpoint-ml). + * Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -415,7 +415,7 @@ export default class Cat { } /** - * Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/datafeeds endpoints](#endpoint-ml). + * Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -455,7 +455,7 @@ export default class Cat { } /** - * Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/anomaly_detectors endpoints](#endpoint-ml). + * Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -495,7 +495,7 @@ export default class Cat { } /** - * Get trained models. Returns configuration and usage information about inference trained models. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/trained_models endpoints](#endpoint-ml). + * Get trained models. Returns configuration and usage information about inference trained models. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -955,7 +955,7 @@ export default class Cat { } /** - * Get transforms. Returns configuration and usage information about transforms. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_transform endpoints](#endpoint-transform). + * Get transforms. Returns configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html | Elasticsearch API documentation} */ async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts new file mode 100644 index 000000000..6a2585bc8 --- /dev/null +++ b/src/api/api/connector.ts @@ -0,0 +1,1318 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Connector { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Updates the last_seen field in the connector, and sets it to current timestamp + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html | Elasticsearch API documentation} + */ + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_check_in` + const meta: TransportRequestMetadata = { + name: 'connector.check_in', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html | Elasticsearch API documentation} + */ + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.delete', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html | Elasticsearch API documentation} + */ + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.get', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates last sync stats in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-last-sync-api.html | Elasticsearch API documentation} + */ + async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_last_sync` + const meta: TransportRequestMetadata = { + name: 'connector.last_sync', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns existing connectors. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html | Elasticsearch API documentation} + */ + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_connector' + const meta: TransportRequestMetadata = { + name: 'connector.list' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} + */ + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_connector' + const meta: TransportRequestMetadata = { + name: 'connector.post' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates or updates a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} + */ + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.connector_id != null) { + method = 'PUT' + path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + } else { + method = 'PUT' + path = '/_connector' + } + const meta: TransportRequestMetadata = { + name: 'connector.put', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes a connector secret. + */ + async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/_secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.secret_delete', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves a secret stored by Connectors. + */ + async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/_secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.secret_get', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates a secret for a Connector. + */ + async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_connector/_secret' + const meta: TransportRequestMetadata = { + name: 'connector.secret_post' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates or updates a secret for a Connector. + */ + async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.secret_put', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Cancels a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_cancel` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_cancel', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Checks in a connector sync job (refreshes 'last_seen'). + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_check_in` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_check_in', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Claims a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/claim-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_claim` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_claim', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_delete', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Sets an error for a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-error-api.html | Elasticsearch API documentation} + */ + async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_error` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_error', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_get', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Lists connector sync jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html | Elasticsearch API documentation} + */ + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_connector/_sync_job' + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_list' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_connector/_sync_job' + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_post' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the stats fields in the connector sync job document. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-stats-api.html | Elasticsearch API documentation} + */ + async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_stats` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_update_stats', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Activates the valid draft filtering for a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} + */ + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering/_activate` + const meta: TransportRequestMetadata = { + name: 'connector.update_active_filtering', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the API key id in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-api-key-id-api.html | Elasticsearch API documentation} + */ + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_api_key_id` + const meta: TransportRequestMetadata = { + name: 'connector.update_api_key_id', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the configuration field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html | Elasticsearch API documentation} + */ + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['configuration', 'values'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_configuration` + const meta: TransportRequestMetadata = { + name: 'connector.update_configuration', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the filtering field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html | Elasticsearch API documentation} + */ + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['error'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_error` + const meta: TransportRequestMetadata = { + name: 'connector.update_error', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the connector features in the connector document. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-features-api.html | Elasticsearch API documentation} + */ + async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_features` + const meta: TransportRequestMetadata = { + name: 'connector.update_features', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the filtering field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} + */ + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering` + const meta: TransportRequestMetadata = { + name: 'connector.update_filtering', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the draft filtering validation info for a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-validation-api.html | Elasticsearch API documentation} + */ + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['validation'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering/_validation` + const meta: TransportRequestMetadata = { + name: 'connector.update_filtering_validation', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the index_name in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-index-name-api.html | Elasticsearch API documentation} + */ + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['index_name'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_index_name` + const meta: TransportRequestMetadata = { + name: 'connector.update_index_name', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the name and description fields in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html | Elasticsearch API documentation} + */ + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['name', 'description'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_name` + const meta: TransportRequestMetadata = { + name: 'connector.update_name', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the is_native flag in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-native-api.html | Elasticsearch API documentation} + */ + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['is_native'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_native` + const meta: TransportRequestMetadata = { + name: 'connector.update_native', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the pipeline field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html | Elasticsearch API documentation} + */ + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['pipeline'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_pipeline` + const meta: TransportRequestMetadata = { + name: 'connector.update_pipeline', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the scheduling field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html | Elasticsearch API documentation} + */ + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['scheduling'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_scheduling` + const meta: TransportRequestMetadata = { + name: 'connector.update_scheduling', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the service type of the connector + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-service-type-api.html | Elasticsearch API documentation} + */ + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['service_type'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_service_type` + const meta: TransportRequestMetadata = { + name: 'connector.update_service_type', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the status of the connector + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-status-api.html | Elasticsearch API documentation} + */ + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['status'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_status` + const meta: TransportRequestMetadata = { + name: 'connector.update_status', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index e0805125a..62ac0df56 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -45,7 +45,7 @@ export default class Ml { } /** - * Clears a trained model deployment cache on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. + * Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html | Elasticsearch API documentation} */ async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -121,7 +121,7 @@ export default class Ml { } /** - * Removes all scheduled events from a calendar, then deletes it. + * Delete a calendar. Removes all scheduled events from a calendar, then deletes it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html | Elasticsearch API documentation} */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -153,7 +153,7 @@ export default class Ml { } /** - * Deletes scheduled events from a calendar. + * Delete events from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-event.html | Elasticsearch API documentation} */ async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -186,7 +186,7 @@ export default class Ml { } /** - * Deletes anomaly detection jobs from a calendar. + * Delete anomaly jobs from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-job.html | Elasticsearch API documentation} */ async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -219,7 +219,7 @@ export default class Ml { } /** - * Deletes a data frame analytics job. + * Delete a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html | Elasticsearch API documentation} */ async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -251,7 +251,7 @@ export default class Ml { } /** - * Deletes an existing datafeed. + * Delete a datafeed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafeed.html | Elasticsearch API documentation} */ async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -283,7 +283,7 @@ export default class Ml { } /** - * Deletes expired and unused machine learning data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . + * Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-expired-data.html | Elasticsearch API documentation} */ async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -335,7 +335,7 @@ export default class Ml { } /** - * Deletes a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. + * Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-filter.html | Elasticsearch API documentation} */ async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -367,7 +367,7 @@ export default class Ml { } /** - * Deletes forecasts from a machine learning job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. + * Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-forecast.html | Elasticsearch API documentation} */ async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -439,7 +439,7 @@ export default class Ml { } /** - * Deletes an existing model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. + * Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-snapshot.html | Elasticsearch API documentation} */ async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -472,7 +472,7 @@ export default class Ml { } /** - * Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. + * Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models.html | Elasticsearch API documentation} */ async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -504,7 +504,7 @@ export default class Ml { } /** - * Deletes a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. + * Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models-aliases.html | Elasticsearch API documentation} */ async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -537,7 +537,7 @@ export default class Ml { } /** - * Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. + * Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-apis.html | Elasticsearch API documentation} */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -579,7 +579,7 @@ export default class Ml { } /** - * Evaluates the data frame analytics for an annotated index. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. + * Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/evaluate-dfanalytics.html | Elasticsearch API documentation} */ async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -620,7 +620,7 @@ export default class Ml { } /** - * Explains a data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. + * Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/explain-dfanalytics.html | Elasticsearch API documentation} */ async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/index.ts b/src/api/index.ts index e2ef990bb..f69eb473d 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -35,6 +35,7 @@ import CcrApi from './api/ccr' import clearScrollApi from './api/clear_scroll' import closePointInTimeApi from './api/close_point_in_time' import ClusterApi from './api/cluster' +import ConnectorApi from './api/connector' import countApi from './api/count' import createApi from './api/create' import DanglingIndicesApi from './api/dangling_indices' @@ -123,6 +124,7 @@ export default interface API { clearScroll: typeof clearScrollApi closePointInTime: typeof closePointInTimeApi cluster: ClusterApi + connector: ConnectorApi count: typeof countApi create: typeof createApi danglingIndices: DanglingIndicesApi @@ -206,6 +208,7 @@ const kAutoscaling = Symbol('Autoscaling') const kCat = Symbol('Cat') const kCcr = Symbol('Ccr') const kCluster = Symbol('Cluster') +const kConnector = Symbol('Connector') const kDanglingIndices = Symbol('DanglingIndices') const kEnrich = Symbol('Enrich') const kEql = Symbol('Eql') @@ -248,6 +251,7 @@ export default class API { [kCat]: symbol | null [kCcr]: symbol | null [kCluster]: symbol | null + [kConnector]: symbol | null [kDanglingIndices]: symbol | null [kEnrich]: symbol | null [kEql]: symbol | null @@ -289,6 +293,7 @@ export default class API { this[kCat] = null this[kCcr] = null this[kCluster] = null + this[kConnector] = null this[kDanglingIndices] = null this[kEnrich] = null this[kEql] = null @@ -389,6 +394,9 @@ Object.defineProperties(API.prototype, { cluster: { get () { return this[kCluster] === null ? (this[kCluster] = new ClusterApi(this.transport)) : this[kCluster] } }, + connector: { + get () { return this[kConnector] === null ? (this[kConnector] = new ConnectorApi(this.transport)) : this[kConnector] } + }, danglingIndices: { get () { return this[kDanglingIndices] === null ? (this[kDanglingIndices] = new DanglingIndicesApi(this.transport)) : this[kDanglingIndices] } }, diff --git a/src/api/types.ts b/src/api/types.ts index d9940f8f3..ce9cb09ab 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -9125,9 +9125,9 @@ export interface ConnectorConnectorConfigProperties { required: boolean sensitive: boolean tooltip?: string | null - type: ConnectorConnectorFieldType - ui_restrictions: string[] - validations: ConnectorValidation[] + type?: ConnectorConnectorFieldType + ui_restrictions?: string[] + validations?: ConnectorValidation[] value: any } @@ -9989,22 +9989,51 @@ export interface GraphExploreResponse { vertices: GraphVertex[] } -export type IlmActions = any +export interface IlmActions { + allocate?: IlmAllocateAction + delete?: IlmDeleteAction + downsample?: IlmDownsampleAction + freeze?: EmptyObject + forcemerge?: IlmForceMergeAction + migrate?: IlmMigrateAction + readonly?: EmptyObject + rollover?: IlmRolloverAction + set_priority?: IlmSetPriorityAction + searchable_snapshot?: IlmSearchableSnapshotAction + shrink?: IlmShrinkAction + unfollow?: EmptyObject + wait_for_snapshot?: IlmWaitForSnapshotAction +} + +export interface IlmAllocateAction { + number_of_replicas?: integer + total_shards_per_node?: integer + include?: Record + exclude?: Record + require?: Record +} + +export interface IlmDeleteAction { + delete_searchable_snapshot?: boolean +} -export interface IlmConfigurations { - rollover?: IndicesRolloverRolloverConditions - forcemerge?: IlmForceMergeConfiguration - shrink?: IlmShrinkConfiguration +export interface IlmDownsampleAction { + fixed_interval: DurationLarge + wait_timeout?: Duration } -export interface IlmForceMergeConfiguration { +export interface IlmForceMergeAction { max_num_segments: integer + index_codec?: string +} + +export interface IlmMigrateAction { + enabled?: boolean } export interface IlmPhase { actions?: IlmActions min_age?: Duration | long - configurations?: IlmConfigurations } export interface IlmPhases { @@ -10020,8 +10049,36 @@ export interface IlmPolicy { _meta?: Metadata } -export interface IlmShrinkConfiguration { - number_of_shards: integer +export interface IlmRolloverAction { + max_size?: ByteSize + max_primary_shard_size?: ByteSize + max_age?: Duration + max_docs?: long + max_primary_shard_docs?: long + min_size?: ByteSize + min_primary_shard_size?: ByteSize + min_age?: Duration + min_docs?: long + min_primary_shard_docs?: long +} + +export interface IlmSearchableSnapshotAction { + snapshot_repository: string + force_merge_index?: boolean +} + +export interface IlmSetPriorityAction { + priority?: integer +} + +export interface IlmShrinkAction { + number_of_shards?: integer + max_primary_shard_size?: ByteSize + allow_write_after_shrink?: boolean +} + +export interface IlmWaitForSnapshotAction { + policy: string } export interface IlmDeleteLifecycleRequest extends RequestBase { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 1e2d44d87..cef483161 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -9226,9 +9226,9 @@ export interface ConnectorConnectorConfigProperties { required: boolean sensitive: boolean tooltip?: string | null - type: ConnectorConnectorFieldType - ui_restrictions: string[] - validations: ConnectorValidation[] + type?: ConnectorConnectorFieldType + ui_restrictions?: string[] + validations?: ConnectorValidation[] value: any } @@ -10154,22 +10154,51 @@ export interface GraphExploreResponse { vertices: GraphVertex[] } -export type IlmActions = any +export interface IlmActions { + allocate?: IlmAllocateAction + delete?: IlmDeleteAction + downsample?: IlmDownsampleAction + freeze?: EmptyObject + forcemerge?: IlmForceMergeAction + migrate?: IlmMigrateAction + readonly?: EmptyObject + rollover?: IlmRolloverAction + set_priority?: IlmSetPriorityAction + searchable_snapshot?: IlmSearchableSnapshotAction + shrink?: IlmShrinkAction + unfollow?: EmptyObject + wait_for_snapshot?: IlmWaitForSnapshotAction +} + +export interface IlmAllocateAction { + number_of_replicas?: integer + total_shards_per_node?: integer + include?: Record + exclude?: Record + require?: Record +} + +export interface IlmDeleteAction { + delete_searchable_snapshot?: boolean +} -export interface IlmConfigurations { - rollover?: IndicesRolloverRolloverConditions - forcemerge?: IlmForceMergeConfiguration - shrink?: IlmShrinkConfiguration +export interface IlmDownsampleAction { + fixed_interval: DurationLarge + wait_timeout?: Duration } -export interface IlmForceMergeConfiguration { +export interface IlmForceMergeAction { max_num_segments: integer + index_codec?: string +} + +export interface IlmMigrateAction { + enabled?: boolean } export interface IlmPhase { actions?: IlmActions min_age?: Duration | long - configurations?: IlmConfigurations } export interface IlmPhases { @@ -10185,8 +10214,36 @@ export interface IlmPolicy { _meta?: Metadata } -export interface IlmShrinkConfiguration { - number_of_shards: integer +export interface IlmRolloverAction { + max_size?: ByteSize + max_primary_shard_size?: ByteSize + max_age?: Duration + max_docs?: long + max_primary_shard_docs?: long + min_size?: ByteSize + min_primary_shard_size?: ByteSize + min_age?: Duration + min_docs?: long + min_primary_shard_docs?: long +} + +export interface IlmSearchableSnapshotAction { + snapshot_repository: string + force_merge_index?: boolean +} + +export interface IlmSetPriorityAction { + priority?: integer +} + +export interface IlmShrinkAction { + number_of_shards?: integer + max_primary_shard_size?: ByteSize + allow_write_after_shrink?: boolean +} + +export interface IlmWaitForSnapshotAction { + policy: string } export interface IlmDeleteLifecycleRequest extends RequestBase { From 37b8a332093bdaddf628d360e465e97ab0558155 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 12 Aug 2024 12:01:21 -0500 Subject: [PATCH 371/647] Try running auto-approve after other jobs (#2328) --- .github/workflows/auto-approve.yml | 12 ------------ .github/workflows/nodejs.yml | 12 +++++++++++- 2 files changed, 11 insertions(+), 13 deletions(-) delete mode 100644 .github/workflows/auto-approve.yml diff --git a/.github/workflows/auto-approve.yml b/.github/workflows/auto-approve.yml deleted file mode 100644 index 0af84acb3..000000000 --- a/.github/workflows/auto-approve.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -name: "Auto-approve codegen PRs" -on: pull_request_target - -jobs: - auto-approve: - runs-on: ubuntu-latest - permissions: - pull-requests: write - if: github.actor == 'elasticmachine' - steps: - - uses: hmarr/auto-approve-action@v4 diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 225b67732..7b61028d4 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -2,7 +2,7 @@ name: Node CI on: - pull_request: {} + pull_request_target: {} jobs: paths-filter: @@ -72,3 +72,13 @@ jobs: - name: License checker run: | npm run license-checker + + auto-approve: + name: Auto-approve + needs: [test, license] + runs-on: ubuntu-latest + permissions: + pull-requests: write + if: github.actor == 'elasticmachine' + steps: + - uses: hmarr/auto-approve-action@v4 From 69b243171b253cf04cdccc57e1af4eb88bff479b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 12 Aug 2024 13:07:43 -0500 Subject: [PATCH 372/647] Update changelog for 8.15 (#2332) --- docs/changelog.asciidoc | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index d3474617d..508bb8b9d 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -7,6 +7,12 @@ [discrete] ==== Features +[discrete] +===== Support for Elasticsearch `v8.15.0` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.15/release-notes-8.15.0.html[here]. + [discrete] ===== OpenTelemetry zero-code instrumentation support @@ -14,6 +20,17 @@ For those that use an observability service that supports OpenTelemetry spans, t See {jsclient}/observability.html#_opentelemetry[the docs] for more information. +[discrete] +=== 8.14.1 + +[discrete] +==== Features + +[discrete] +===== Improved support for Elasticsearch `8.14` + +Updated types based on fixes and changes to the Elasticsearch specification. + [discrete] === 8.14.0 From 77e2f613f234467f3cfdfe0337406e2968fd4abb Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 12 Aug 2024 13:20:48 -0500 Subject: [PATCH 373/647] Generate documentation example snippets (#2329) * Update docs example generation script * Add docs examples generation to codegen job --- .buildkite/make.mjs | 7 ++ package.json | 1 + scripts/generate-docs-examples.js | 203 +++++++++++------------------- 3 files changed, 84 insertions(+), 127 deletions(-) diff --git a/.buildkite/make.mjs b/.buildkite/make.mjs index 0f3e724fd..3026b61f3 100644 --- a/.buildkite/make.mjs +++ b/.buildkite/make.mjs @@ -125,6 +125,13 @@ async function codegen (args) { await $`cp -R ${join(import.meta.url, '..', '..', 'elastic-client-generator-js', 'output')}/* ${join(import.meta.url, '..', 'src', 'api')}` await $`mv ${join(import.meta.url, '..', 'src', 'api', 'reference.asciidoc')} ${join(import.meta.url, '..', 'docs', 'reference.asciidoc')}` await $`npm run build` + + // run docs example generation + if (version === 'main') { + await $`node ./scripts/generate-docs-examples.js` + } else { + await $`node ./scripts/generate-docs-examples.js ${version.split('.').slice(0, 2).join('.')}` + } } function onError (err) { diff --git a/package.json b/package.json index ed81cc6d4..a2f5293f7 100644 --- a/package.json +++ b/package.json @@ -50,6 +50,7 @@ "node": ">=18" }, "devDependencies": { + "@elastic/request-converter": "^8.16.0", "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "^4.1.7", "@types/ms": "^0.7.31", diff --git a/scripts/generate-docs-examples.js b/scripts/generate-docs-examples.js index 02a04420a..a9c229095 100644 --- a/scripts/generate-docs-examples.js +++ b/scripts/generate-docs-examples.js @@ -17,147 +17,96 @@ * under the License. */ -'use strict' - -/** - * To run this generator you must have the - * `alternatives_report.spec.json` placed in the root of this project. - * To get the `alternatives_report.spec.json` you must run the script - * to parse the original `alternatives_report.json`, which is not yet public - * and lives in github.com/elastic/clients-team/tree/master/scripts/docs-json-generator - * - * This script will remove the content of the `docs/doc_examples` folder and generate - * all the files present in the `enabledFiles` list below. - * You can run it with the following command: - * - * ```bash - * $ node scripts/generate-docs-examples.js - * ``` - */ - const { join } = require('path') -const { writeFileSync } = require('fs') +const { writeFile } = require('fs/promises') +const fetch = require('node-fetch') const rimraf = require('rimraf') -const standard = require('standard') -const dedent = require('dedent') +const ora = require('ora') +const { convertRequests } = require('@elastic/request-converter') +const minimist = require('minimist') const docsExamplesDir = join('docs', 'doc_examples') -const enabledFiles = [ - 'docs/delete.asciidoc', - 'docs/get.asciidoc', - 'docs/index_.asciidoc', - 'getting-started.asciidoc', - 'query-dsl/query-string-query.asciidoc', - 'query-dsl.asciidoc', - 'search/request-body.asciidoc', - 'setup/install/check-running.asciidoc', - 'mapping.asciidoc', - 'query-dsl/query_filter_context.asciidoc', - 'query-dsl/bool-query.asciidoc', - 'query-dsl/match-query.asciidoc', - 'indices/create-index.asciidoc', - 'docs/index_.asciidoc', - 'aggregations/bucket/terms-aggregation.asciidoc', - 'query-dsl/range-query.asciidoc', - 'search/search.asciidoc', - 'query-dsl/multi-match-query.asciidoc', - 'docs/bulk.asciidoc', - 'indices/delete-index.asciidoc', - 'indices/put-mapping.asciidoc', - 'query-dsl/match-all-query.asciidoc', - 'query-dsl/term-query.asciidoc', - 'docs/update.asciidoc', - 'docs/reindex.asciidoc', - 'indices/templates.asciidoc', - 'query-dsl/exists-query.asciidoc', - 'query-dsl/terms-query.asciidoc', - 'query-dsl/wildcard-query.asciidoc', - 'mapping/types/nested.asciidoc', - 'mapping/params/format.asciidoc', - 'docs/delete-by-query.asciidoc', - 'search/request/sort.asciidoc', - 'query-dsl/function-score-query.asciidoc', - 'query-dsl/nested-query.asciidoc', - 'query-dsl/regexp-query.asciidoc', - 'mapping/types/array.asciidoc', - 'mapping/types/date.asciidoc', - 'mapping/types/keyword.asciidoc', - 'mapping/params/fielddata.asciidoc', - 'cluster/health.asciidoc', - 'docs/bulk.asciidoc', - 'indices/aliases.asciidoc', - 'indices/update-settings.asciidoc', - 'search/request/from-size.asciidoc', - 'search/count.asciidoc', - 'setup/logging-config.asciidoc', - 'search/request/from-size.asciidoc', - 'query-dsl/match-phrase-query.asciidoc', - 'aggregations/metrics/valuecount-aggregation.asciidoc', - 'aggregations/bucket/datehistogram-aggregation.asciidoc', - 'aggregations/bucket/filter-aggregation.asciidoc', - 'mapping/types/numeric.asciidoc', - 'mapping/fields/id-field.asciidoc', - 'mapping/params/multi-fields.asciidoc', - 'api-conventions.asciidoc', - 'cat/indices.asciidoc', - 'docs/update-by-query.asciidoc', - 'indices/get-index.asciidoc', - 'indices/get-mapping.asciidoc', - 'search.asciidoc', - 'search/request/scroll.asciidoc', - 'search/suggesters.asciidoc' -] - -function generate () { - rimraf.sync(join(docsExamplesDir, '*')) - const examples = require(join(__dirname, '..', 'alternatives_report.spec.json')) - for (const example of examples) { - if (example.lang !== 'console') continue - if (!enabledFiles.includes(example.source_location.file)) continue - - const asciidoc = generateAsciidoc(example.parsed_source) - writeFileSync( - join(docsExamplesDir, `${example.digest}.asciidoc`), - asciidoc, - 'utf8' - ) +const log = ora('Generating example snippets') + +const failures = {} + +async function getAlternativesReport (version = 'master') { + const reportUrl = `https://raw.githubusercontent.com/elastic/built-docs/master/raw/en/elasticsearch/reference/${version}/alternatives_report.json` + const response = await fetch(reportUrl) + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) } + return await response.json() } -function generateAsciidoc (source) { - let asciidoc = '// This file is autogenerated, DO NOT EDIT\n' - asciidoc += '// Use `node scripts/generate-docs-examples.js` to generate the docs examples\n\n' - let code = 'async function run (client) {\n// START\n' - - for (let i = 0; i < source.length; i++) { - const { api, query, params, body } = source[i] - const apiArguments = Object.assign({}, params, query, body ? { body } : body) - const serializedApiArguments = Object.keys(apiArguments).length > 0 - ? JSON.stringify(apiArguments, null, 2) - : '' - code += `const response${getResponsePostfix(i)} = await client.${api.replace(/_([a-z])/g, g => g[1].toUpperCase())}(${serializedApiArguments}) -console.log(response${getResponsePostfix(i)}) -\n` +async function makeSnippet (example) { + const { source, digest } = example + const fileName = `${digest}.asciidoc` + const filePath = join(docsExamplesDir, fileName) + + try { + const code = await convertRequests(source, 'javascript', { + complete: false, + printResponse: true + }) + await writeFile(filePath, asciidocWrapper(code), 'utf8') + } catch (err) { + failures[digest] = err.message } +} + +async function generate (version) { + log.start() - code += '// END\n}' - const { results } = standard.lintTextSync(code, { fix: true }) - code = results[0].output - code = code.slice(code.indexOf('// START\n') + 9, code.indexOf('\n\n// END')) + rimraf.sync(join(docsExamplesDir, '*')) - asciidoc += `[source, js] + log.text = `Downloading alternatives report for version ${version}` + const examples = await getAlternativesReport(version) + + let counter = 1 + for (const example of examples) { + log.text = `${counter++}/${examples.length}: ${example.digest}` + + // skip over bad request definitions + if (example.source.startsWith('{') || example.source.endsWith('...')) { + failures[example.digest] = 'Incomplete request syntax' + continue + } + + await makeSnippet(example) + } +} + +function asciidocWrapper (source) { + return `// This file is autogenerated, DO NOT EDIT +// Use \`node scripts/generate-docs-examples.js\` to generate the docs examples + +[source, js] ---- -${dedent(code)} +${source.trim()} ---- - ` - return asciidoc +} - function getResponsePostfix (i) { - if (source.length === 1) return '' - return String(i) +const options = minimist(process.argv.slice(2), { + string: ['version'], + default: { + version: 'master' } -} +}) -generate() +generate(options.version) + .then(() => log.succeed('done!')) + .catch(err => log.fail(err.message)) + .finally(() => { + const keys = Object.keys(failures) + if (keys.length > 0) { + let message = 'Some examples failed to generate:\n\n' + for (const key of keys) { + message += `${key}: ${failures[key]}\n` + } + console.error(message) + } + }) From d62d8c9831868b3cbaa1a83e815d18e37c4c44cd Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 12 Aug 2024 13:29:08 -0500 Subject: [PATCH 374/647] Give actions permission to create releases and tags (#2335) --- .github/workflows/npm-publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index b4be036b1..9a37764eb 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -9,7 +9,7 @@ jobs: build: runs-on: ubuntu-latest permissions: - contents: read + contents: write id-token: write steps: - uses: actions/checkout@v4 From 4b8969cc784856bdd4fe79feb7b9c8f0c836ce65 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 12 Aug 2024 14:15:55 -0500 Subject: [PATCH 375/647] Add support for auto-merge (#2336) * Add support for auto-merge * Fix bad request-converter version * Switch back to pull_request pull_request_target is the wrong trigger for this action --- .github/workflows/auto-merge.yml | 17 +++++++++++++++++ .github/workflows/nodejs.yml | 2 +- package.json | 2 +- 3 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/auto-merge.yml diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml new file mode 100644 index 000000000..53d4557f8 --- /dev/null +++ b/.github/workflows/auto-merge.yml @@ -0,0 +1,17 @@ +name: Automerge + +on: + pull_request_review: + types: + - submitted + +jobs: + automerge: + runs-on: ubuntu-latest + steps: + - uses: reitermarkus/automerge@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + merge-method: squash + do-not-merge-labels: never-merge + pull-request-author-associations: OWNER diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 7b61028d4..aa0ea28be 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -2,7 +2,7 @@ name: Node CI on: - pull_request_target: {} + pull_request: {} jobs: paths-filter: diff --git a/package.json b/package.json index a2f5293f7..0eb974659 100644 --- a/package.json +++ b/package.json @@ -50,7 +50,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "^8.16.0", + "@elastic/request-converter": "^8.15.2", "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "^4.1.7", "@types/ms": "^0.7.31", From e2745b4c7596efa91f812a871c4716abfb550a2f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 14 Aug 2024 12:06:03 -0500 Subject: [PATCH 376/647] Migrate issue templates to forms (#2340) * Migrate feature issue template to a form * Migrate bug issue template to a form * Migrate question issue template to a form * Add support link * Migrate security issue template to a form --- .github/ISSUE_TEMPLATE/bug.md | 50 --------------------- .github/ISSUE_TEMPLATE/bug.yaml | 66 ++++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature.md | 23 ---------- .github/ISSUE_TEMPLATE/feature.yaml | 33 ++++++++++++++ .github/ISSUE_TEMPLATE/question.md | 11 ----- .github/ISSUE_TEMPLATE/question.yaml | 21 +++++++++ .github/ISSUE_TEMPLATE/security.md | 6 --- .github/ISSUE_TEMPLATE/security.yaml | 8 ++++ 8 files changed, 128 insertions(+), 90 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug.md create mode 100644 .github/ISSUE_TEMPLATE/bug.yaml delete mode 100644 .github/ISSUE_TEMPLATE/feature.md create mode 100644 .github/ISSUE_TEMPLATE/feature.yaml delete mode 100644 .github/ISSUE_TEMPLATE/question.md create mode 100644 .github/ISSUE_TEMPLATE/question.yaml delete mode 100644 .github/ISSUE_TEMPLATE/security.md create mode 100644 .github/ISSUE_TEMPLATE/security.yaml diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md deleted file mode 100644 index e93bca168..000000000 --- a/.github/ISSUE_TEMPLATE/bug.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -name: 🐛 Bug report -about: Create a report to help us improve -labels: ["Category: Bug"] ---- - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. - -**Please read this entire template before posting any issue. If you ignore these instructions -and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `Category: Not an issue` label.** - -## 🐛 Bug Report - -A clear and concise description of what the bug is. - -## To Reproduce - -Steps to reproduce the behavior: - -Paste your code here: - -```js - -``` - - - -## Expected behavior - -A clear and concise description of what you expected to happen. - -Paste the results here: - -```js - -``` - -## Your Environment - -- *node version*: 6,8,10 -- `@elastic/elasticsearch` *version*: >=7.0.0 -- *os*: Mac, Windows, Linux -- *any other relevant information* diff --git a/.github/ISSUE_TEMPLATE/bug.yaml b/.github/ISSUE_TEMPLATE/bug.yaml new file mode 100644 index 000000000..d4e41efbf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yaml @@ -0,0 +1,66 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve +labels: ["Category: Bug"] +body: + - type: markdown + attributes: + value: | + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + **Please read this entire template before posting any issue. If you ignore these instructions + and post an issue here that does not follow the instructions, your issue might be closed, + locked, and assigned the `Category: Not an issue` label.** + + - type: textarea + id: bug-report + attributes: + label: 🐛 Bug report + description: A clear and concise description of what the bug is. + validations: + required: true + + - type: textarea + id: reproduction + attributes: + label: To reproduce + description: Steps to reproduce the behavior + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected behavior + description: A clear and concise description of what you expected to happen. + validations: + required: true + + - type: input + id: node-js-version + attributes: + label: Node.js version + placeholder: 18.x, 20.x, etc. + validations: + required: true + + - type: input + id: client-version + attributes: + label: "@elastic/elasticsearch version" + placeholder: 7.17.0, 8.14.1, etc. + validations: + required: true + + - type: input + id: os + attributes: + label: Operating system + placeholder: Ubuntu 22.04, macOS, etc. + validations: + required: true + + - type: input + id: env-info + attributes: + label: Any other relevant environment information diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md deleted file mode 100644 index 4b5f8d648..000000000 --- a/.github/ISSUE_TEMPLATE/feature.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -name: 🚀 Feature Proposal -about: Submit a proposal for a new feature -labels: ["Category: Feature"] ---- - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. - -**Please read this entire template before posting any issue. If you ignore these instructions -and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `Category: Not an issue` label.** - -## 🚀 Feature Proposal - -A clear and concise description of what the feature is. - -## Motivation - -Please outline the motivation for the proposal. - -## Example - -Please provide an example for how this feature would be used. diff --git a/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml new file mode 100644 index 000000000..4a35cf6eb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yaml @@ -0,0 +1,33 @@ +--- +name: 🚀 Feature Proposal +description: Submit a proposal for a new feature +labels: ["Category: Feature"] +body: + - type: markdown + attributes: + value: | + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + **Please read this entire template before posting any issue. If you ignore these instructions + and post an issue here that does not follow the instructions, your issue might be closed, + locked, and assigned the `Category: Not an issue` label.** + + - type: textarea + id: feature-proposal + attributes: + label: 🚀 Feature Proposal + description: A clear and concise description of what the feature is. + validations: + required: true + + - type: textarea + id: motivation + attributes: + label: Motivation + description: Please outline the motivation for the proposal. + + - type: textarea + id: example + attributes: + label: Example + description: Please provide an example for how this feature would be used. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index fc7ab1490..000000000 --- a/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -name: 💬 Questions / Help -about: If you have questions, please check our Gitter or Help repo -labels: ["Category: Question"] ---- - -## 💬 Questions and Help - -### Please note that this issue tracker is not a help forum and this issue may be closed. - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. diff --git a/.github/ISSUE_TEMPLATE/question.yaml b/.github/ISSUE_TEMPLATE/question.yaml new file mode 100644 index 000000000..083cb7f2d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.yaml @@ -0,0 +1,21 @@ +--- +name: 💬 Questions / Help +description: If you have questions, please check our community forum or support +labels: ["Category: Question"] +body: + - type: markdown + attributes: + value: | + ### Please note that this issue tracker is not a help forum and this issue may be closed. + + Please check our [community forum](https://discuss.elastic.co/) or [contact Elastic support](https://www.elastic.co/support) if your issue is not specifically related to the documented functionality of this client library. + + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + - type: textarea + id: question + attributes: + label: Question + description: Your question or comment + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/security.md b/.github/ISSUE_TEMPLATE/security.md deleted file mode 100644 index 0529296fc..000000000 --- a/.github/ISSUE_TEMPLATE/security.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -name: 👮 Security Issue -about: Responsible Disclosure ---- - -If you want to report a security issue, please take a look at [elastic/security](https://www.elastic.co/community/security). diff --git a/.github/ISSUE_TEMPLATE/security.yaml b/.github/ISSUE_TEMPLATE/security.yaml new file mode 100644 index 000000000..e003a1e6b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/security.yaml @@ -0,0 +1,8 @@ +--- +name: 👮 Security Issue +description: Responsible disclosure +body: + - type: markdown + attributes: + value: | + If you want to report a security issue, please take a look at [elastic/security](https://www.elastic.co/community/security). From 9c959971a561f1cf1a12d18d3ad652afc4f97f1d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 19 Aug 2024 10:44:24 -0500 Subject: [PATCH 377/647] Adjust author associations for auto-merge (#2346) * Adjust author associations for auto-merge * Upgrade outdated actions --- .github/workflows/auto-merge.yml | 3 ++- .github/workflows/nodejs.yml | 2 +- .github/workflows/npm-publish.yml | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 53d4557f8..65efca90b 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -14,4 +14,5 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} merge-method: squash do-not-merge-labels: never-merge - pull-request-author-associations: OWNER + pull-request-author-associations: OWNER,MEMBER + review-author-associations: OWNER,MEMBER diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index aa0ea28be..e4765731f 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -12,7 +12,7 @@ jobs: src-only: "${{ steps.changes.outputs.src-only }}" steps: - uses: actions/checkout@v4 - - uses: dorny/paths-filter/@v2.11.1 + - uses: dorny/paths-filter/@v3.0.2 id: changes with: filters: | diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 9a37764eb..0b6d77877 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -15,9 +15,9 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.branch }} - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: - node-version: "20.x" + node-version: "22.x" registry-url: "https://registry.npmjs.org/" - run: npm install -g npm - run: npm install From 1042a02733786ba67cd4f674c052c549852d38e3 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 19 Aug 2024 12:11:03 -0500 Subject: [PATCH 378/647] Add collaborator to auto-merge (#2347) Trying to figure out what type of author association @github-actions has. --- .github/workflows/auto-merge.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 65efca90b..f8fb5c587 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -14,5 +14,5 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} merge-method: squash do-not-merge-labels: never-merge - pull-request-author-associations: OWNER,MEMBER + pull-request-author-associations: OWNER,MEMBER,COLLABORATOR review-author-associations: OWNER,MEMBER From 715292b5013c3d215879ea917e17d4730d78064a Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 20 Aug 2024 03:32:21 +1000 Subject: [PATCH 379/647] Auto-generated API code (#2344) Co-authored-by: Josh Mock --- .../0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc | 15 ++ .../0fbca60a487f5f22a4d51d73b2434cc4.asciidoc | 20 +++ .../10a16abe990288253ea25a1b1712fe3d.asciidoc | 5 + .../19c00c6b29bc7dbc5e92b3668da2da93.asciidoc | 21 ++- .../19d60e4890cc57151d596326484d9076.asciidoc | 11 ++ .../1b60ad542abb511cbd926ac8c55b609c.asciidoc | 21 +++ .../2a1eece9a59ac1773edcf0a932c26de0.asciidoc | 8 +- .../2acf75803494fef29f9ca70671aa6be1.asciidoc | 10 ++ .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 1 + .../30fa37c9575fe81a0ea7c12cfc08e277.asciidoc | 25 ++++ .../398389933901b572a06a752bc780af7c.asciidoc | 21 +++ .../3b6718257421b5419bf4cd6a7303c57e.asciidoc | 11 ++ .../3f1fe5f5f99b98d0891f38003e10b636.asciidoc | 8 +- .../405511f7c1f12cc0a227b4563fe7b2e2.asciidoc | 1 + .../44385b61342e20ea05f254015b2b04d7.asciidoc | 10 ++ .../4982c547be1ad9455ae836990aea92c5.asciidoc | 11 ++ .../517d291044c3e4448b8804322616ab4a.asciidoc | 21 +++ .../533087d787b48878a0bf3fa8d0851b64.asciidoc | 11 ++ .../57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc | 8 +- .../5d3ee81bcf6ad57f39052c9065963cc3.asciidoc | 28 ++++ .../5deeed427f35cbaee4b8ddc45002a9d7.asciidoc | 10 ++ .../5f79c42b0f74fdf71359cef82843fad3.asciidoc | 47 ------ ...5fd002a018c589eb73fadad25889dbe9.asciidoc} | 2 +- .../63bf3480627a89b4b4ede4150e1d6bc0.asciidoc | 59 ++++++++ ...6ce6cac9df216c52371c2e77e6e07ba1.asciidoc} | 2 +- .../6f3b723bf6179b96c3413597ed7f49e1.asciidoc | 6 +- .../77518e8c6198acfe77c0934fd2fe65cb.asciidoc | 29 +++- .../7c8f207e43115ea8f20d2298be5aaebc.asciidoc | 33 ++++- ...7f2d511cb64743c006225e5933a14bb4.asciidoc} | 6 + .../80dd7f5882c59b9c1c90e8351937441f.asciidoc | 24 ++- .../81aad155ff23b1b396833b1182c9d46b.asciidoc | 16 ++ .../840f8c863c30b04abcf2dd66b846f157.asciidoc | 22 +++ ...8b144b3eb20872595fd7cbc6c245c7c8.asciidoc} | 4 +- .../95c1b376652533c352bbf793c74d1b08.asciidoc | 17 +++ .../981b331db1404b39c1a612a135e4e76d.asciidoc | 16 ++ .../9afa0844883b7471883aa378a8dd10b4.asciidoc | 30 ++++ .../9c01db07c9ac395b6370e3b33965c21f.asciidoc | 10 +- .../a162eb50853331c80596f5994e9d1c38.asciidoc | 5 + .../a225fc8c134cb21a85bc6025dac9368b.asciidoc | 18 +++ .../aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc | 18 +++ .../b0bddf2ffaa83049b195829c06b875cd.asciidoc | 1 + .../b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc | 1 + .../bcdfaa4487747249699a86a0dcd22f5e.asciidoc | 32 +++- .../bfb0db2a72f22c9c2046119777efbb43.asciidoc | 18 +++ .../c580990a70028bb49cca8a6bde86bbf6.asciidoc | 7 +- .../c6f07c53eda4db77305bb14751b3263f.asciidoc | 50 ------- .../c9373ff5ed6b026173428fbb92ca2d9f.asciidoc | 18 +++ .../d27591881da6f5767523b1beb233adc7.asciidoc | 13 ++ .../d35c8cf7a98b3f112e1de8797ec6689d.asciidoc | 7 +- .../dfce1be1d035aff0b8fdf4a8839f7795.asciidoc | 15 ++ .../e3019fd5f23458ae49ad9854c97d321c.asciidoc | 6 +- .../e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc | 8 +- .../e7811867397b305efbbe8925d8a01c1a.asciidoc | 33 ----- .../eb141f8df8ead40ff7440b623ea92267.asciidoc | 24 +++ .../eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc | 59 ++++++++ docs/reference.asciidoc | 139 +++++++++++------- src/api/api/cat.ts | 2 +- src/api/api/ml.ts | 106 ++++++------- src/api/types.ts | 23 ++- src/api/typesWithBodyKey.ts | 23 ++- 60 files changed, 959 insertions(+), 267 deletions(-) create mode 100644 docs/doc_examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc create mode 100644 docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc create mode 100644 docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc create mode 100644 docs/doc_examples/2acf75803494fef29f9ca70671aa6be1.asciidoc create mode 100644 docs/doc_examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc create mode 100644 docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc create mode 100644 docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc create mode 100644 docs/doc_examples/44385b61342e20ea05f254015b2b04d7.asciidoc create mode 100644 docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc create mode 100644 docs/doc_examples/517d291044c3e4448b8804322616ab4a.asciidoc create mode 100644 docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc create mode 100644 docs/doc_examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc create mode 100644 docs/doc_examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc delete mode 100644 docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc rename docs/doc_examples/{d595b40bf1ea71923f9824d0f9c99c49.asciidoc => 5fd002a018c589eb73fadad25889dbe9.asciidoc} (97%) create mode 100644 docs/doc_examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc rename docs/doc_examples/{e619e896ce3dad9dcfc6f8700438be98.asciidoc => 6ce6cac9df216c52371c2e77e6e07ba1.asciidoc} (97%) rename docs/doc_examples/{0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc => 7f2d511cb64743c006225e5933a14bb4.asciidoc} (81%) create mode 100644 docs/doc_examples/81aad155ff23b1b396833b1182c9d46b.asciidoc create mode 100644 docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc rename docs/doc_examples/{72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc => 8b144b3eb20872595fd7cbc6c245c7c8.asciidoc} (71%) create mode 100644 docs/doc_examples/95c1b376652533c352bbf793c74d1b08.asciidoc create mode 100644 docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc create mode 100644 docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc create mode 100644 docs/doc_examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc create mode 100644 docs/doc_examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc delete mode 100644 docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc create mode 100644 docs/doc_examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc create mode 100644 docs/doc_examples/d27591881da6f5767523b1beb233adc7.asciidoc create mode 100644 docs/doc_examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc delete mode 100644 docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc create mode 100644 docs/doc_examples/eb141f8df8ead40ff7440b623ea92267.asciidoc create mode 100644 docs/doc_examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc diff --git a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc index 627ba004d..3801b625f 100644 --- a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc +++ b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -5,6 +5,21 @@ ---- const response = await client.searchApplication.renderQuery({ name: "my-app", + body: { + params: { + query_string: "my first query", + text_fields: [ + { + name: "title", + boost: 5, + }, + { + name: "description", + boost: 1, + }, + ], + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc b/docs/doc_examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc new file mode 100644 index 000000000..fadf2814c --- /dev/null +++ b/docs/doc_examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "elser-embeddings", + mappings: { + properties: { + content_embedding: { + type: "sparse_vector", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc b/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc index d8a89c5f8..2520b36c1 100644 --- a/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc +++ b/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc @@ -5,6 +5,11 @@ ---- const response = await client.security.queryUser({ with_profile_uid: "true", + query: { + prefix: { + roles: "other", + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc index f9344050e..c5453ffaf 100644 --- a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc +++ b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -3,6 +3,25 @@ [source, js] ---- -const response = await client.simulate.ingest({}); +const response = await client.simulate.ingest({ + body: { + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "bar", + }, + }, + { + _index: "my-index", + _id: "456", + _source: { + foo: "rab", + }, + }, + ], + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc b/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc new file mode 100644 index 000000000..c5e05aa73 --- /dev/null +++ b/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deleteGeoipDatabase({ + id: "my-database-id", + body: null, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc b/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc new file mode 100644 index 000000000..0c7d4b6f1 --- /dev/null +++ b/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/sparse_embedding/my-elser-model", + body: { + service: "elser", + service_settings: { + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc index a0a9b3805..4853ab9a3 100644 --- a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc +++ b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -3,6 +3,12 @@ [source, js] ---- -const response = await client.security.oidcLogout({}); +const response = await client.security.oidcLogout({ + body: { + token: + "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + refresh_token: "vLBPvmAB6KvwvJZr27cS", + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/2acf75803494fef29f9ca70671aa6be1.asciidoc b/docs/doc_examples/2acf75803494fef29f9ca70671aa6be1.asciidoc new file mode 100644 index 000000000..d5715eeeb --- /dev/null +++ b/docs/doc_examples/2acf75803494fef29f9ca70671aa6be1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkDeleteRole({ + names: ["my_admin_role", "superuser"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index 32a8ae35c..44648b27c 100644 --- a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -6,6 +6,7 @@ const response = await client.esql.asyncQueryGet({ id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", wait_for_completion_timeout: "30s", + body: null, }); console.log(response); ---- diff --git a/docs/doc_examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc b/docs/doc_examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc new file mode 100644 index 000000000..04f772fe1 --- /dev/null +++ b/docs/doc_examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bad_example_index", + mappings: { + properties: { + field_1: { + type: "text", + copy_to: "field_2", + }, + field_2: { + type: "text", + copy_to: "field_3", + }, + field_3: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc b/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc new file mode 100644 index 000000000..029b478da --- /dev/null +++ b/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/completion/anthropic_completion", + body: { + service: "anthropic", + service_settings: { + api_key: "", + model_id: "", + }, + task_settings: { + max_tokens: 1024, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc b/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc new file mode 100644 index 000000000..a00dcffd3 --- /dev/null +++ b/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.getGeoipDatabase({ + id: "my-database-id", + body: null, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc index be4d5355c..b8e2ede87 100644 --- a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc +++ b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -3,6 +3,12 @@ [source, js] ---- -const response = await client.esql.asyncQuery({}); +const response = await client.esql.asyncQuery({ + body: { + query: + "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", + wait_for_completion_timeout: "2s", + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc index c63439d9c..ab0617ea6 100644 --- a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc +++ b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -5,6 +5,7 @@ ---- const response = await client.esql.asyncQueryGet({ id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", + body: null, }); console.log(response); ---- diff --git a/docs/doc_examples/44385b61342e20ea05f254015b2b04d7.asciidoc b/docs/doc_examples/44385b61342e20ea05f254015b2b04d7.asciidoc new file mode 100644 index 000000000..fc102c343 --- /dev/null +++ b/docs/doc_examples/44385b61342e20ea05f254015b2b04d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkDeleteRole({ + names: ["my_admin_role", "my_user_role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc b/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc new file mode 100644 index 000000000..b5d5d91f8 --- /dev/null +++ b/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "my_model", + deployment_id: "my_model_for_search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/517d291044c3e4448b8804322616ab4a.asciidoc b/docs/doc_examples/517d291044c3e4448b8804322616ab4a.asciidoc new file mode 100644 index 000000000..67bb14a80 --- /dev/null +++ b/docs/doc_examples/517d291044c3e4448b8804322616ab4a.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "elser_embeddings", + processors: [ + { + inference: { + model_id: "elser_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc b/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc new file mode 100644 index 000000000..9dfe27bbc --- /dev/null +++ b/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deleteGeoipDatabase({ + id: "example-database-id", + body: null, +}); +console.log(response); +---- diff --git a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc index 39d1ae3a1..dcf9e4b2e 100644 --- a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc +++ b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -3,6 +3,12 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({}); +const response = await client.security.oidcPrepareAuthentication({ + body: { + realm: "oidc1", + state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", + nonce: "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5", + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc b/docs/doc_examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc new file mode 100644 index 000000000..6079dac32 --- /dev/null +++ b/docs/doc_examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test_index", + mappings: { + dynamic: "strict", + properties: { + description: { + properties: { + notes: { + type: "text", + copy_to: ["description.notes_raw"], + analyzer: "standard", + search_analyzer: "standard", + }, + notes_raw: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc b/docs/doc_examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc new file mode 100644 index 000000000..e5c8b337a --- /dev/null +++ b/docs/doc_examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkDeleteRole({ + names: ["my_admin_role", "not_an_existing_role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc b/docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc deleted file mode 100644 index 08f2e3dc9..000000000 --- a/docs/doc_examples/5f79c42b0f74fdf71359cef82843fad3.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - query: { - intervals: { - my_text: { - all_of: { - intervals: [ - { - match: { - query: "the", - }, - }, - { - any_of: { - intervals: [ - { - match: { - query: "big", - }, - }, - { - match: { - query: "big bad", - }, - }, - ], - }, - }, - { - match: { - query: "wolf", - }, - }, - ], - max_gaps: 0, - ordered: true, - }, - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/d595b40bf1ea71923f9824d0f9c99c49.asciidoc b/docs/doc_examples/5fd002a018c589eb73fadad25889dbe9.asciidoc similarity index 97% rename from docs/doc_examples/d595b40bf1ea71923f9824d0f9c99c49.asciidoc rename to docs/doc_examples/5fd002a018c589eb73fadad25889dbe9.asciidoc index 6c2200014..d3d537545 100644 --- a/docs/doc_examples/d595b40bf1ea71923f9824d0f9c99c49.asciidoc +++ b/docs/doc_examples/5fd002a018c589eb73fadad25889dbe9.asciidoc @@ -29,7 +29,7 @@ const response = await client.transport.request({ }, { rule_id: "rule2", - type: "pinned", + type: "exclude", criteria: [ { type: "contains", diff --git a/docs/doc_examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc b/docs/doc_examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc new file mode 100644 index 000000000..041c60f0a --- /dev/null +++ b/docs/doc_examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkPutRole({ + roles: { + my_admin_role: { + cluster: ["all"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["all"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + my_user_role: { + cluster: ["all"], + indices: [ + { + names: ["index1"], + privileges: ["read"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e619e896ce3dad9dcfc6f8700438be98.asciidoc b/docs/doc_examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc similarity index 97% rename from docs/doc_examples/e619e896ce3dad9dcfc6f8700438be98.asciidoc rename to docs/doc_examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc index 7ca935056..396d11fe4 100644 --- a/docs/doc_examples/e619e896ce3dad9dcfc6f8700438be98.asciidoc +++ b/docs/doc_examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc @@ -29,7 +29,7 @@ const response = await client.transport.request({ }, { rule_id: "my-rule2", - type: "pinned", + type: "exclude", criteria: [ { type: "fuzzy", diff --git a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc index a9e30c9b3..26bbeb20a 100644 --- a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc +++ b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -3,6 +3,10 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({}); +const response = await client.security.bulkUpdateApiKeys({ + body: { + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc index d5d893637..107aebead 100644 --- a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc +++ b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -3,6 +3,33 @@ [source, js] ---- -const response = await client.textStructure.findMessageStructure({}); +const response = await client.textStructure.findMessageStructure({ + body: { + messages: [ + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...", + ], + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc b/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc index f9344050e..eb0ee1488 100644 --- a/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc +++ b/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc @@ -3,6 +3,37 @@ [source, js] ---- -const response = await client.simulate.ingest({}); +const response = await client.simulate.ingest({ + body: { + docs: [ + { + _index: "my-index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "my-index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], + pipeline_substitutions: { + "my-pipeline": { + processors: [ + { + set: { + field: "field3", + value: "value3", + }, + }, + ], + }, + }, + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc b/docs/doc_examples/7f2d511cb64743c006225e5933a14bb4.asciidoc similarity index 81% rename from docs/doc_examples/0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc rename to docs/doc_examples/7f2d511cb64743c006225e5933a14bb4.asciidoc index 0ebd8a2a4..c492ea70d 100644 --- a/docs/doc_examples/0b8fa90bc9aeeadb420ad785bd0b9953.asciidoc +++ b/docs/doc_examples/7f2d511cb64743c006225e5933a14bb4.asciidoc @@ -19,6 +19,12 @@ const response = await client.security.putRole({ clusters: ["my_remote_cluster"], }, ], + remote_cluster: [ + { + privileges: ["monitor_enrich"], + clusters: ["my_remote_cluster"], + }, + ], }); console.log(response); ---- diff --git a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc index a9e30c9b3..659fc0e47 100644 --- a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc +++ b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -3,6 +3,28 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({}); +const response = await client.security.bulkUpdateApiKeys({ + body: { + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], + role_descriptors: { + "role-a": { + indices: [ + { + names: ["*"], + privileges: ["write"], + }, + ], + }, + }, + metadata: { + environment: { + level: 2, + trusted: true, + tags: ["production"], + }, + }, + expiration: "30d", + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/81aad155ff23b1b396833b1182c9d46b.asciidoc b/docs/doc_examples/81aad155ff23b1b396833b1182c9d46b.asciidoc new file mode 100644 index 000000000..a4744544e --- /dev/null +++ b/docs/doc_examples/81aad155ff23b1b396833b1182c9d46b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", +}); +console.log(response); + +const response1 = await client.cat.recovery({ + v: "true", + active_only: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc b/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc new file mode 100644 index 000000000..b38af10e2 --- /dev/null +++ b/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/text_embedding/my-e5-model", + body: { + service: "elasticsearch", + service_settings: { + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, + model_id: ".multilingual-e5-small", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc b/docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc similarity index 71% rename from docs/doc_examples/72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc rename to docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc index cb34ca584..3bccba38f 100644 --- a/docs/doc_examples/72d33fbd72b0766b2f14ea27d9ccf0fa.asciidoc +++ b/docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc @@ -3,8 +3,8 @@ [source, js] ---- -const response = await client.indices.delete({ - index: "my-index", +const response = await client.security.queryRole({ + sort: ["name"], }); console.log(response); ---- diff --git a/docs/doc_examples/95c1b376652533c352bbf793c74d1b08.asciidoc b/docs/doc_examples/95c1b376652533c352bbf793c74d1b08.asciidoc new file mode 100644 index 000000000..4dcac513f --- /dev/null +++ b/docs/doc_examples/95c1b376652533c352bbf793c74d1b08.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryRole({ + query: { + match: { + description: { + query: "user access", + }, + }, + }, + size: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc b/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc new file mode 100644 index 000000000..b3393e3ec --- /dev/null +++ b/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putGeoipDatabase({ + id: "my-database-id", + body: { + name: "GeoIP2-Domain", + maxmind: { + account_id: "1025402", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc index bb5d71a23..46c6ad610 100644 --- a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc +++ b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -6,6 +6,36 @@ const response = await client.searchApplication.postBehavioralAnalyticsEvent({ collection_name: "my_analytics_collection", event_type: "search_click", + body: { + session: { + id: "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9", + }, + user: { + id: "5f26f01a-bbee-4202-9298-81261067abbd", + }, + search: { + query: "search term", + results: { + items: [ + { + document: { + id: "123", + index: "products", + }, + }, + ], + total_results: 10, + }, + sort: { + name: "relevance", + }, + search_application: "website", + }, + document: { + id: "123", + index: "products", + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc index 395ba9b0f..64e6db48e 100644 --- a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc +++ b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -3,6 +3,14 @@ [source, js] ---- -const response = await client.security.oidcAuthenticate({}); +const response = await client.security.oidcAuthenticate({ + body: { + redirect_uri: + "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + state: "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + nonce: "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM", + realm: "oidc1", + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc index 82a81bced..6b9a54625 100644 --- a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc +++ b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -5,6 +5,11 @@ ---- const response = await client.searchApplication.renderQuery({ name: "my_search_application", + body: { + params: { + query_string: "rock climbing", + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc b/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc new file mode 100644 index 000000000..8cfc3b071 --- /dev/null +++ b/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_inference/sparse_embedding/elser_embeddings", + body: { + service: "elser", + service_settings: { + num_allocations: 1, + num_threads: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc b/docs/doc_examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc new file mode 100644 index 000000000..af5ed0954 --- /dev/null +++ b/docs/doc_examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_backup", + repository: { + type: "azure", + settings: { + client: "secondary", + container: "my_container", + base_path: "snapshots_prefix", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc index 82a81bced..6ce163623 100644 --- a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc +++ b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -5,6 +5,7 @@ ---- const response = await client.searchApplication.renderQuery({ name: "my_search_application", + body: null, }); console.log(response); ---- diff --git a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc index 7ffe922db..30687161f 100644 --- a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc +++ b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -211,6 +211,7 @@ console.log(response); const response1 = await client.textStructure.findFieldStructure({ index: "test-logs", field: "message", + body: null, }); console.log(response1); ---- diff --git a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc index f9344050e..06b5f58ec 100644 --- a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc +++ b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -3,6 +3,36 @@ [source, js] ---- -const response = await client.simulate.ingest({}); +const response = await client.simulate.ingest({ + body: { + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "bar", + }, + }, + { + _index: "my-index", + _id: "456", + _source: { + foo: "rab", + }, + }, + ], + pipeline_substitutions: { + "my-pipeline": { + processors: [ + { + uppercase: { + field: "foo", + }, + }, + ], + }, + }, + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc b/docs/doc_examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc new file mode 100644 index 000000000..6936bb686 --- /dev/null +++ b/docs/doc_examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "elser-embeddings", + query: { + sparse_vector: { + field: "content_embedding", + inference_id: "elser_embeddings", + query: "How to avoid muscle soreness after running?", + }, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc index a9e30c9b3..2a14bb328 100644 --- a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc +++ b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -3,6 +3,11 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({}); +const response = await client.security.bulkUpdateApiKeys({ + body: { + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], + role_descriptors: {}, + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc b/docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc deleted file mode 100644 index 10a1ab91e..000000000 --- a/docs/doc_examples/c6f07c53eda4db77305bb14751b3263f.asciidoc +++ /dev/null @@ -1,50 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.snapshot.createRepository({ - name: "my_backup1", - repository: { - type: "azure", - }, -}); -console.log(response); - -const response1 = await client.snapshot.createRepository({ - name: "my_backup2", - repository: { - type: "azure", - settings: { - container: "backup-container", - base_path: "backups", - chunk_size: "32MB", - compress: true, - }, - }, -}); -console.log(response1); - -const response2 = await client.snapshot.createRepository({ - name: "my_backup3", - repository: { - type: "azure", - settings: { - client: "secondary", - }, - }, -}); -console.log(response2); - -const response3 = await client.snapshot.createRepository({ - name: "my_backup4", - repository: { - type: "azure", - settings: { - client: "secondary", - location_mode: "primary_only", - }, - }, -}); -console.log(response3); ----- diff --git a/docs/doc_examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc b/docs/doc_examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc new file mode 100644 index 000000000..21b881926 --- /dev/null +++ b/docs/doc_examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "elser-embeddings", + pipeline: "elser_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d27591881da6f5767523b1beb233adc7.asciidoc b/docs/doc_examples/d27591881da6f5767523b1beb233adc7.asciidoc new file mode 100644 index 000000000..907c0a556 --- /dev/null +++ b/docs/doc_examples/d27591881da6f5767523b1beb233adc7.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_backup", + repository: { + type: "azure", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc index 39d1ae3a1..51dea2365 100644 --- a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc +++ b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -3,6 +3,11 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({}); +const response = await client.security.oidcPrepareAuthentication({ + body: { + iss: "http://127.0.0.1:8080/", + login_hint: "this_is_an_opaque_string", + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc b/docs/doc_examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc new file mode 100644 index 000000000..c46ec2537 --- /dev/null +++ b/docs/doc_examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateTrainedModelDeployment({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc index 39d1ae3a1..5a02c157a 100644 --- a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc +++ b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -3,6 +3,10 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({}); +const response = await client.security.oidcPrepareAuthentication({ + body: { + realm: "oidc1", + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc b/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc index a73d17637..7207d8204 100644 --- a/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc +++ b/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc @@ -3,6 +3,12 @@ [source, js] ---- -const response = await client.security.queryUser({}); +const response = await client.security.queryUser({ + query: { + prefix: { + roles: "other", + }, + }, +}); console.log(response); ---- diff --git a/docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc b/docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc deleted file mode 100644 index 97615c4ce..000000000 --- a/docs/doc_examples/e7811867397b305efbbe8925d8a01c1a.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - query: { - intervals: { - my_text: { - any_of: { - intervals: [ - { - match: { - query: "the big bad wolf", - ordered: true, - max_gaps: 0, - }, - }, - { - match: { - query: "the big wolf", - ordered: true, - max_gaps: 0, - }, - }, - ], - }, - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/eb141f8df8ead40ff7440b623ea92267.asciidoc b/docs/doc_examples/eb141f8df8ead40ff7440b623ea92267.asciidoc new file mode 100644 index 000000000..55f84f169 --- /dev/null +++ b/docs/doc_examples/eb141f8df8ead40ff7440b623ea92267.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "good_example_index", + mappings: { + properties: { + field_1: { + type: "text", + copy_to: ["field_2", "field_3"], + }, + field_2: { + type: "text", + }, + field_3: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc b/docs/doc_examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc new file mode 100644 index 000000000..a9cdeb962 --- /dev/null +++ b/docs/doc_examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkPutRole({ + roles: { + my_admin_role: { + cluster: ["bad_cluster_privilege"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["all"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + my_user_role: { + cluster: ["all"], + indices: [ + { + names: ["index1"], + privileges: ["read"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 880c16ebb..510ccb0d6 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -661,6 +661,12 @@ client.msearch({ ... }) ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. ** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. ** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes +the name of the matched query associated with its score (true) +or as an array containing the name of the matched queries (false) +This functionality reruns each named query on every hit in a search response. +Typically, this adds a small overhead to a request. +However, using computationally expensive named queries on a large number of hits may add significant overhead. ** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. ** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. ** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. @@ -1018,6 +1024,12 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. ** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes +the name of the matched query associated with its score (true) +or as an array containing the name of the matched queries (false) +This functionality reruns each named query on every hit in a search response. +Typically, this adds a small overhead to a request. +However, using computationally expensive named queries on a large number of hits may add significant overhead. ** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can only be used when the `q` query string parameter is specified. ** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently. @@ -1682,7 +1694,7 @@ client.cat.componentTemplates({ ... }) [discrete] ==== count Get a document count. -Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ +Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. @@ -2682,7 +2694,7 @@ client.cluster.putComponentTemplate({ name, template }) * *Request (object):* ** *`name` (string)*: Name of the component template to create. -Elasticsearch includes the following built-in component templates: `logs-mappings`; 'logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. @@ -4155,6 +4167,8 @@ Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== data_streams_stats @@ -4268,6 +4282,7 @@ client.indices.deleteDataStream({ name }) * *Request (object):* ** *`name` (string | string[])*: List of data streams to delete. Wildcard (`*`) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. [discrete] @@ -4632,6 +4647,7 @@ To target all data streams, omit this parameter or use `*` or `_all`. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_data_stream @@ -4653,6 +4669,7 @@ Wildcard (`*`) expressions are supported. If omitted, all data streams are retur ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. ** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_field_mapping @@ -4820,6 +4837,8 @@ client.indices.migrateToDataStream({ name }) * *Request (object):* ** *`name` (string)*: Name of the index alias to convert to a data stream. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== modify_data_stream @@ -4887,6 +4906,7 @@ client.indices.promoteDataStream({ name }) * *Request (object):* ** *`name` (string)*: The name of the data stream +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_alias @@ -6387,7 +6407,7 @@ learning node capacity for it to be immediately assigned to a node. [discrete] ==== flush_job -Forces any buffered data to be processed by the job. +Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are @@ -6416,12 +6436,12 @@ client.ml.flushJob({ job_id }) [discrete] ==== forecast -Predicts the future behavior of a time series by using its historical -behavior. +Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an -`over_field_name` in its configuration. +`over_field_name` in its configuration. Forcasts predict future behavior +based on historical data. {ref}/ml-forecast.html[Endpoint documentation] [source,ts] @@ -6441,7 +6461,7 @@ create a forecast; otherwise, an error occurs. [discrete] ==== get_buckets -Retrieves anomaly detection job results for one or more buckets. +Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. {ref}/ml-get-bucket.html[Endpoint documentation] @@ -6470,7 +6490,7 @@ parameter, the API returns information about all buckets. [discrete] ==== get_calendar_events -Retrieves information about the scheduled events in calendars. +Get info about events in calendars. {ref}/ml-get-calendar-event.html[Endpoint documentation] [source,ts] @@ -6491,7 +6511,7 @@ client.ml.getCalendarEvents({ calendar_id }) [discrete] ==== get_calendars -Retrieves configuration information for calendars. +Get calendar configuration info. {ref}/ml-get-calendar.html[Endpoint documentation] [source,ts] @@ -6510,7 +6530,7 @@ client.ml.getCalendars({ ... }) [discrete] ==== get_categories -Retrieves anomaly detection job results for one or more categories. +Get anomaly detection job results for categories. {ref}/ml-get-category.html[Endpoint documentation] [source,ts] @@ -6536,7 +6556,7 @@ This parameter has the `from` and `size` properties. [discrete] ==== get_data_frame_analytics -Retrieves configuration information for data frame analytics jobs. +Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. @@ -6573,7 +6593,7 @@ be retrieved and then added to another cluster. [discrete] ==== get_data_frame_analytics_stats -Retrieves usage information for data frame analytics jobs. +Get data frame analytics jobs usage info. {ref}/get-dfanalytics-stats.html[Endpoint documentation] [source,ts] @@ -6605,7 +6625,7 @@ there are no matches or only partial matches. [discrete] ==== get_datafeed_stats -Retrieves usage information for datafeeds. +Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the @@ -6639,7 +6659,7 @@ partial matches. If this parameter is `false`, the request returns a [discrete] ==== get_datafeeds -Retrieves configuration information for datafeeds. +Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the @@ -6675,7 +6695,7 @@ be retrieved and then added to another cluster. [discrete] ==== get_filters -Retrieves filters. +Get filters. You can get a single filter or all filters. {ref}/ml-get-filter.html[Endpoint documentation] @@ -6694,7 +6714,7 @@ client.ml.getFilters({ ... }) [discrete] ==== get_influencers -Retrieves anomaly detection job results for one or more influencers. +Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. @@ -6729,7 +6749,7 @@ means it is unset and results are not limited to specific timestamps. [discrete] ==== get_job_stats -Retrieves usage information for anomaly detection jobs. +Get anomaly detection jobs usage info. {ref}/ml-get-job-stats.html[Endpoint documentation] [source,ts] @@ -6758,7 +6778,7 @@ code when there are no matches or only partial matches. [discrete] ==== get_jobs -Retrieves configuration information for anomaly detection jobs. +Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using @@ -6793,6 +6813,7 @@ be retrieved and then added to another cluster. [discrete] ==== get_memory_stats +Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. @@ -6817,7 +6838,7 @@ fails and returns an error. [discrete] ==== get_model_snapshot_upgrade_stats -Retrieves usage information for anomaly detection job model snapshot upgrades. +Get anomaly detection job model snapshot upgrade usage info. {ref}/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] [source,ts] @@ -6845,7 +6866,7 @@ no matches or only partial matches. [discrete] ==== get_model_snapshots -Retrieves information about model snapshots. +Get model snapshots info. {ref}/ml-get-snapshot.html[Endpoint documentation] [source,ts] @@ -6871,7 +6892,9 @@ by specifying `*` as the snapshot ID, or by omitting the snapshot ID. [discrete] ==== get_overall_buckets -Retrieves overall bucket results that summarize the bucket results of +Get overall bucket results. + +Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the @@ -6915,7 +6938,7 @@ using `_all` or by specifying `*` as the ``. [discrete] ==== get_records -Retrieves anomaly records for an anomaly detection job. +Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. @@ -6950,7 +6973,7 @@ client.ml.getRecords({ job_id }) [discrete] ==== get_trained_models -Retrieves configuration information for a trained model. +Get trained model configuration info. {ref}/get-trained-models.html[Endpoint documentation] [source,ts] @@ -6990,7 +7013,8 @@ tags are returned. [discrete] ==== get_trained_models_stats -Retrieves usage information for trained models. You can get usage information for multiple trained +Get trained models usage info. +You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. {ref}/get-trained-models-stats.html[Endpoint documentation] @@ -7018,7 +7042,7 @@ subset of results when there are partial matches. [discrete] ==== infer_trained_model -Evaluates a trained model. +Evaluate a trained model. {ref}/infer-trained-model.html[Endpoint documentation] [source,ts] @@ -7039,6 +7063,7 @@ Currently, for NLP models, only a single value is allowed. [discrete] ==== info +Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not @@ -7057,9 +7082,8 @@ client.ml.info() [discrete] ==== open_job Open anomaly detection jobs. -An anomaly detection job must be opened in order for it to be ready to -receive and analyze data. It can be opened and closed multiple times -throughout its lifecycle. +An anomaly detection job must be opened to be ready to receive and analyze +data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once @@ -7080,7 +7104,7 @@ client.ml.openJob({ job_id }) [discrete] ==== post_calendar_events -Adds scheduled events to a calendar. +Add scheduled events to the calendar. {ref}/ml-post-calendar-event.html[Endpoint documentation] [source,ts] @@ -7097,7 +7121,7 @@ client.ml.postCalendarEvents({ calendar_id, events }) [discrete] ==== post_data -Sends data to an anomaly detection job for analysis. +Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. @@ -7119,6 +7143,7 @@ client.ml.postData({ job_id }) [discrete] ==== preview_data_frame_analytics +Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. {ref}/preview-dfanalytics.html[Endpoint documentation] @@ -7138,7 +7163,7 @@ this API. [discrete] ==== preview_datafeed -Previews a datafeed. +Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data @@ -7172,7 +7197,7 @@ used. You cannot specify a `job_config` object unless you also supply a `datafee [discrete] ==== put_calendar -Creates a calendar. +Create a calendar. {ref}/ml-put-calendar.html[Endpoint documentation] [source,ts] @@ -7190,7 +7215,7 @@ client.ml.putCalendar({ calendar_id }) [discrete] ==== put_calendar_job -Adds an anomaly detection job to a calendar. +Add anomaly detection job to calendar. {ref}/ml-put-calendar-job.html[Endpoint documentation] [source,ts] @@ -7207,7 +7232,7 @@ client.ml.putCalendarJob({ calendar_id, job_id }) [discrete] ==== put_data_frame_analytics -Instantiates a data frame analytics job. +Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. @@ -7280,7 +7305,7 @@ greater than that setting. [discrete] ==== put_datafeed -Instantiates a datafeed. +Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). @@ -7350,7 +7375,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value [discrete] ==== put_filter -Instantiates a filter. +Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. @@ -7403,7 +7428,8 @@ client.ml.putJob({ job_id, analysis_config, data_description }) [discrete] ==== put_trained_model -Enables you to supply a trained model that is not created by data frame analytics. +Create a trained model. +Enable you to supply a trained model that is not created by data frame analytics. {ref}/put-trained-models.html[Endpoint documentation] [source,ts] @@ -7449,8 +7475,9 @@ to complete. [discrete] ==== put_trained_model_alias -Creates or updates a trained model alias. A trained model alias is a logical -name used to reference a single trained model. +Create or update a trained model alias. +A trained model alias is a logical name used to reference a single trained +model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. @@ -7484,7 +7511,7 @@ already assigned and this parameter is false, the API returns an error. [discrete] ==== put_trained_model_definition_part -Creates part of a trained model definition. +Create part of a trained model definition. {ref}/put-trained-model-definition-part.html[Endpoint documentation] [source,ts] @@ -7505,7 +7532,7 @@ order of their part number. The first part must be `0` and the final part must b [discrete] ==== put_trained_model_vocabulary -Creates a trained model vocabulary. +Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. @@ -7526,7 +7553,7 @@ client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) [discrete] ==== reset_job -Resets an anomaly detection job. +Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a @@ -7551,7 +7578,7 @@ reset. [discrete] ==== revert_model_snapshot -Reverts to a specific snapshot. +Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior @@ -7578,6 +7605,7 @@ scratch when it is started. [discrete] ==== set_upgrade_mode +Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your @@ -7608,7 +7636,7 @@ starting. [discrete] ==== start_data_frame_analytics -Starts a data frame analytics job. +Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the @@ -7639,7 +7667,7 @@ starts. [discrete] ==== start_datafeed -Starts one or more datafeeds. +Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. @@ -7672,7 +7700,8 @@ characters. [discrete] ==== start_trained_model_deployment -Starts a trained model deployment, which allocates the model to every machine learning node. +Start a trained model deployment. +It allocates the model to every machine learning node. {ref}/start-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -7708,7 +7737,7 @@ it will automatically be changed to a value less than the number of hardware thr [discrete] ==== stop_data_frame_analytics -Stops one or more data frame analytics jobs. +Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. @@ -7742,7 +7771,7 @@ stops. Defaults to 20 seconds. [discrete] ==== stop_datafeed -Stops one or more datafeeds. +Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. @@ -7765,7 +7794,7 @@ the identifier. [discrete] ==== stop_trained_model_deployment -Stops a trained model deployment. +Stop a trained model deployment. {ref}/stop-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -7787,7 +7816,7 @@ restart the model deployment. [discrete] ==== update_data_frame_analytics -Updates an existing data frame analytics job. +Update a data frame analytics job. {ref}/update-dfanalytics.html[Endpoint documentation] [source,ts] @@ -7817,7 +7846,7 @@ learning node capacity for it to be immediately assigned to a node. [discrete] ==== update_datafeed -Updates the properties of a datafeed. +Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, @@ -7890,6 +7919,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value [discrete] ==== update_filter +Update a filter. Updates the description of a filter, adds items, or removes items from the list. {ref}/ml-update-filter.html[Endpoint documentation] @@ -7909,6 +7939,7 @@ client.ml.updateFilter({ filter_id }) [discrete] ==== update_job +Update an anomaly detection job. Updates certain properties of an anomaly detection job. {ref}/ml-update-job.html[Endpoint documentation] @@ -7974,6 +8005,7 @@ value is null, which means all results are retained. [discrete] ==== update_model_snapshot +Update a snapshot. Updates certain properties of a snapshot. {ref}/ml-update-snapshot.html[Endpoint documentation] @@ -7995,7 +8027,7 @@ snapshot will be deleted when the job is deleted. [discrete] ==== update_trained_model_deployment -Starts a trained model deployment, which allocates the model to every machine learning node. +Update a trained model deployment. {ref}/update-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -8017,6 +8049,7 @@ it will automatically be changed to a value less than the number of hardware thr [discrete] ==== upgrade_job_snapshot +Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous @@ -10669,7 +10702,7 @@ client.synonyms.putSynonym({ id, synonyms_set }) * *Request (object):* ** *`id` (string)*: The id of the synonyms set to be created or updated -** *`synonyms_set` ({ id, synonyms }[])*: The synonym set information to update +** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym set information to update [discrete] ==== put_synonym_rule diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index f157677ca..e06c34728 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -165,7 +165,7 @@ export default class Cat { } /** - * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. + * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 62ac0df56..d45ba30d7 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -672,7 +672,7 @@ export default class Ml { } /** - * Forces any buffered data to be processed by the job. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. + * Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-flush-job.html | Elasticsearch API documentation} */ async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -716,7 +716,7 @@ export default class Ml { } /** - * Predicts the future behavior of a time series by using its historical behavior. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. + * Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-forecast.html | Elasticsearch API documentation} */ async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -760,7 +760,7 @@ export default class Ml { } /** - * Retrieves anomaly detection job results for one or more buckets. The API presents a chronological view of the records, grouped by bucket. + * Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-bucket.html | Elasticsearch API documentation} */ async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -812,7 +812,7 @@ export default class Ml { } /** - * Retrieves information about the scheduled events in calendars. + * Get info about events in calendars. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar-event.html | Elasticsearch API documentation} */ async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -844,7 +844,7 @@ export default class Ml { } /** - * Retrieves configuration information for calendars. + * Get calendar configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar.html | Elasticsearch API documentation} */ async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -896,7 +896,7 @@ export default class Ml { } /** - * Retrieves anomaly detection job results for one or more categories. + * Get anomaly detection job results for categories. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-category.html | Elasticsearch API documentation} */ async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -948,7 +948,7 @@ export default class Ml { } /** - * Retrieves configuration information for data frame analytics jobs. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. + * Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics.html | Elasticsearch API documentation} */ async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -988,7 +988,7 @@ export default class Ml { } /** - * Retrieves usage information for data frame analytics jobs. + * Get data frame analytics jobs usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics-stats.html | Elasticsearch API documentation} */ async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1028,7 +1028,7 @@ export default class Ml { } /** - * Retrieves usage information for datafeeds. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. + * Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed-stats.html | Elasticsearch API documentation} */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1068,7 +1068,7 @@ export default class Ml { } /** - * Retrieves configuration information for datafeeds. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. + * Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed.html | Elasticsearch API documentation} */ async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1108,7 +1108,7 @@ export default class Ml { } /** - * Retrieves filters. You can get a single filter or all filters. + * Get filters. You can get a single filter or all filters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-filter.html | Elasticsearch API documentation} */ async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1148,7 +1148,7 @@ export default class Ml { } /** - * Retrieves anomaly detection job results for one or more influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. + * Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-influencer.html | Elasticsearch API documentation} */ async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1192,7 +1192,7 @@ export default class Ml { } /** - * Retrieves usage information for anomaly detection jobs. + * Get anomaly detection jobs usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-stats.html | Elasticsearch API documentation} */ async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1232,7 +1232,7 @@ export default class Ml { } /** - * Retrieves configuration information for anomaly detection jobs. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. + * Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job.html | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1272,7 +1272,7 @@ export default class Ml { } /** - * Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. + * Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-memory.html | Elasticsearch API documentation} */ async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1312,7 +1312,7 @@ export default class Ml { } /** - * Retrieves usage information for anomaly detection job model snapshot upgrades. + * Get anomaly detection job model snapshot upgrade usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-model-snapshot-upgrade-stats.html | Elasticsearch API documentation} */ async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1345,7 +1345,7 @@ export default class Ml { } /** - * Retrieves information about model snapshots. + * Get model snapshots info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-snapshot.html | Elasticsearch API documentation} */ async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1397,7 +1397,7 @@ export default class Ml { } /** - * Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. + * Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-overall-buckets.html | Elasticsearch API documentation} */ async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1441,7 +1441,7 @@ export default class Ml { } /** - * Retrieves anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. + * Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-record.html | Elasticsearch API documentation} */ async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1485,7 +1485,7 @@ export default class Ml { } /** - * Retrieves configuration information for a trained model. + * Get trained model configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models.html | Elasticsearch API documentation} */ async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1525,7 +1525,7 @@ export default class Ml { } /** - * Retrieves usage information for trained models. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. + * Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models-stats.html | Elasticsearch API documentation} */ async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1565,7 +1565,7 @@ export default class Ml { } /** - * Evaluates a trained model. + * Evaluate a trained model. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html | Elasticsearch API documentation} */ async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1609,7 +1609,7 @@ export default class Ml { } /** - * Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. + * Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1639,7 +1639,7 @@ export default class Ml { } /** - * Open anomaly detection jobs. An anomaly detection job must be opened in order for it to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. + * Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-open-job.html | Elasticsearch API documentation} */ async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1683,7 +1683,7 @@ export default class Ml { } /** - * Adds scheduled events to a calendar. + * Add scheduled events to the calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-calendar-event.html | Elasticsearch API documentation} */ async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1727,7 +1727,7 @@ export default class Ml { } /** - * Sends data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. + * Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-data.html | Elasticsearch API documentation} */ async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1764,7 +1764,7 @@ export default class Ml { } /** - * Previews the extracted features used by a data frame analytics config. + * Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/preview-dfanalytics.html | Elasticsearch API documentation} */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1816,7 +1816,7 @@ export default class Ml { } /** - * Previews a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. + * Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-preview-datafeed.html | Elasticsearch API documentation} */ async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -1868,7 +1868,7 @@ export default class Ml { } /** - * Creates a calendar. + * Create a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar.html | Elasticsearch API documentation} */ async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1912,7 +1912,7 @@ export default class Ml { } /** - * Adds an anomaly detection job to a calendar. + * Add anomaly detection job to calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar-job.html | Elasticsearch API documentation} */ async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1945,7 +1945,7 @@ export default class Ml { } /** - * Instantiates a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. + * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1989,7 +1989,7 @@ export default class Ml { } /** - * Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2033,7 +2033,7 @@ export default class Ml { } /** - * Instantiates a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + * Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-filter.html | Elasticsearch API documentation} */ async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2121,7 +2121,7 @@ export default class Ml { } /** - * Enables you to supply a trained model that is not created by data frame analytics. + * Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models.html | Elasticsearch API documentation} */ async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2165,7 +2165,7 @@ export default class Ml { } /** - * Creates or updates a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. + * Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models-aliases.html | Elasticsearch API documentation} */ async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2198,7 +2198,7 @@ export default class Ml { } /** - * Creates part of a trained model definition. + * Create part of a trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-definition-part.html | Elasticsearch API documentation} */ async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2243,7 +2243,7 @@ export default class Ml { } /** - * Creates a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. + * Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-vocabulary.html | Elasticsearch API documentation} */ async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2287,7 +2287,7 @@ export default class Ml { } /** - * Resets an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. + * Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-reset-job.html | Elasticsearch API documentation} */ async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2319,7 +2319,7 @@ export default class Ml { } /** - * Reverts to a specific snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. + * Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-revert-snapshot.html | Elasticsearch API documentation} */ async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2364,7 +2364,7 @@ export default class Ml { } /** - * Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. + * Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-set-upgrade-mode.html | Elasticsearch API documentation} */ async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2394,7 +2394,7 @@ export default class Ml { } /** - * Starts a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. + * Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-dfanalytics.html | Elasticsearch API documentation} */ async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2426,7 +2426,7 @@ export default class Ml { } /** - * Starts one or more datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. + * Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-start-datafeed.html | Elasticsearch API documentation} */ async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2470,7 +2470,7 @@ export default class Ml { } /** - * Starts a trained model deployment, which allocates the model to every machine learning node. + * Start a trained model deployment. It allocates the model to every machine learning node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html | Elasticsearch API documentation} */ async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2502,7 +2502,7 @@ export default class Ml { } /** - * Stops one or more data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. + * Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-dfanalytics.html | Elasticsearch API documentation} */ async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2534,7 +2534,7 @@ export default class Ml { } /** - * Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. + * Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-stop-datafeed.html | Elasticsearch API documentation} */ async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2578,7 +2578,7 @@ export default class Ml { } /** - * Stops a trained model deployment. + * Stop a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html | Elasticsearch API documentation} */ async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2610,7 +2610,7 @@ export default class Ml { } /** - * Updates an existing data frame analytics job. + * Update a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-dfanalytics.html | Elasticsearch API documentation} */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2654,7 +2654,7 @@ export default class Ml { } /** - * Updates the properties of a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. + * Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-datafeed.html | Elasticsearch API documentation} */ async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2698,7 +2698,7 @@ export default class Ml { } /** - * Updates the description of a filter, adds items, or removes items from the list. + * Update a filter. Updates the description of a filter, adds items, or removes items from the list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-filter.html | Elasticsearch API documentation} */ async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2742,7 +2742,7 @@ export default class Ml { } /** - * Updates certain properties of an anomaly detection job. + * Update an anomaly detection job. Updates certain properties of an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-job.html | Elasticsearch API documentation} */ async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2786,7 +2786,7 @@ export default class Ml { } /** - * Updates certain properties of a snapshot. + * Update a snapshot. Updates certain properties of a snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-snapshot.html | Elasticsearch API documentation} */ async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2831,7 +2831,7 @@ export default class Ml { } /** - * Starts a trained model deployment, which allocates the model to every machine learning node. + * Update a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation} */ async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2875,7 +2875,7 @@ export default class Ml { } /** - * Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. + * Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation} */ async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index ce9cb09ab..535b8fc47 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -773,6 +773,7 @@ export interface MsearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean max_concurrent_searches?: long max_concurrent_shard_requests?: long pre_filter_shard_size?: long @@ -1132,6 +1133,7 @@ export interface SearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean lenient?: boolean max_concurrent_shard_requests?: long min_compatible_shard_node?: VersionString @@ -1421,7 +1423,7 @@ export interface SearchHit { fields?: Record highlight?: Record inner_hits?: Record - matched_queries?: string[] + matched_queries?: string[] | Record _nested?: SearchNestedIdentity _ignored?: string[] ignored_field_values?: Record @@ -2294,6 +2296,7 @@ export interface KnnQuery extends QueryDslQueryBase { query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder num_candidates?: integer + k?: integer filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } @@ -4254,8 +4257,8 @@ export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisCustomAnalyzer { type: 'custom' - char_filter?: string[] - filter?: string[] + char_filter?: string | string[] + filter?: string | string[] position_increment_gap?: integer position_offset_gap?: integer tokenizer: string @@ -10872,6 +10875,8 @@ export interface IndicesCreateResponse { export interface IndicesCreateDataStreamRequest extends RequestBase { name: DataStreamName + master_timeout?: Duration + timeout?: Duration } export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase @@ -10929,6 +10934,7 @@ export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames + master_timeout?: Duration expand_wildcards?: ExpandWildcards } @@ -11156,6 +11162,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataLifecycleResponse { @@ -11166,6 +11173,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataStreamResponse { @@ -11246,6 +11254,8 @@ export type IndicesGetTemplateResponse = Record export interface IndicesMigrateToDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration + timeout?: Duration } export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase @@ -11283,6 +11293,7 @@ export interface IndicesOpenResponse { export interface IndicesPromoteDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration } export type IndicesPromoteDataStreamResponse = any @@ -13877,7 +13888,7 @@ export interface MlTrainedModelDeploymentStats { error_count: integer inference_count: integer model_id: Id - nodes: MlTrainedModelDeploymentNodesStats + nodes: MlTrainedModelDeploymentNodesStats[] number_of_allocations: integer queue_capacity: integer rejected_execution_count: integer @@ -13912,7 +13923,7 @@ export interface MlTrainedModelInferenceStats { failure_count: integer inference_count: integer missing_all_fields_count: integer - timestamp: DateTime + timestamp: EpochTime } export interface MlTrainedModelLocation { @@ -18299,7 +18310,7 @@ export interface SynonymsGetSynonymsSetsSynonymsSetItem { export interface SynonymsPutSynonymRequest extends RequestBase { id: Id - synonyms_set: SynonymsSynonymRule[] + synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] } export interface SynonymsPutSynonymResponse { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index cef483161..3f4b7ece2 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -801,6 +801,7 @@ export interface MsearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean max_concurrent_searches?: long max_concurrent_shard_requests?: long pre_filter_shard_size?: long @@ -1184,6 +1185,7 @@ export interface SearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean lenient?: boolean max_concurrent_shard_requests?: long min_compatible_shard_node?: VersionString @@ -1476,7 +1478,7 @@ export interface SearchHit { fields?: Record highlight?: Record inner_hits?: Record - matched_queries?: string[] + matched_queries?: string[] | Record _nested?: SearchNestedIdentity _ignored?: string[] ignored_field_values?: Record @@ -2367,6 +2369,7 @@ export interface KnnQuery extends QueryDslQueryBase { query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder num_candidates?: integer + k?: integer filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } @@ -4327,8 +4330,8 @@ export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisCustomAnalyzer { type: 'custom' - char_filter?: string[] - filter?: string[] + char_filter?: string | string[] + filter?: string | string[] position_increment_gap?: integer position_offset_gap?: integer tokenizer: string @@ -11055,6 +11058,8 @@ export interface IndicesCreateResponse { export interface IndicesCreateDataStreamRequest extends RequestBase { name: DataStreamName + master_timeout?: Duration + timeout?: Duration } export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase @@ -11112,6 +11117,7 @@ export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames + master_timeout?: Duration expand_wildcards?: ExpandWildcards } @@ -11340,6 +11346,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataLifecycleResponse { @@ -11350,6 +11357,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataStreamResponse { @@ -11430,6 +11438,8 @@ export type IndicesGetTemplateResponse = Record export interface IndicesMigrateToDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration + timeout?: Duration } export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase @@ -11470,6 +11480,7 @@ export interface IndicesOpenResponse { export interface IndicesPromoteDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration } export type IndicesPromoteDataStreamResponse = any @@ -14112,7 +14123,7 @@ export interface MlTrainedModelDeploymentStats { error_count: integer inference_count: integer model_id: Id - nodes: MlTrainedModelDeploymentNodesStats + nodes: MlTrainedModelDeploymentNodesStats[] number_of_allocations: integer queue_capacity: integer rejected_execution_count: integer @@ -14147,7 +14158,7 @@ export interface MlTrainedModelInferenceStats { failure_count: integer inference_count: integer missing_all_fields_count: integer - timestamp: DateTime + timestamp: EpochTime } export interface MlTrainedModelLocation { @@ -18774,7 +18785,7 @@ export interface SynonymsPutSynonymRequest extends RequestBase { id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - synonyms_set: SynonymsSynonymRule[] + synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] } } From bf4c57f7bc5ab5592c55b0647ba9b7e5fdb86fd4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 22 Aug 2024 11:18:31 -0500 Subject: [PATCH 380/647] Make client more ESM-friendly (#2348) * Use node: prefix for stdlib imports * Make code more ESM-friendly * Add missing mjs file * Drop mjs file from package.json --- package.json | 8 +++++--- src/client.ts | 15 +++++++++------ src/helpers.ts | 10 +++++----- src/sniffingTransport.ts | 2 +- test/integration/helper.js | 2 +- test/integration/helpers/bulk.test.js | 4 ++-- test/integration/helpers/msearch.test.js | 4 ++-- test/integration/helpers/scroll.test.js | 4 ++-- test/integration/helpers/search.test.js | 4 ++-- test/integration/reporter.js | 2 +- test/unit/helpers/bulk.test.ts | 22 +++++++++++----------- test/unit/helpers/search.test.ts | 3 ++- test/utils/buildCluster.ts | 2 +- 13 files changed, 44 insertions(+), 38 deletions(-) diff --git a/package.json b/package.json index 0eb974659..520ed4b04 100644 --- a/package.json +++ b/package.json @@ -3,9 +3,11 @@ "version": "8.15.0", "versionCanary": "8.15.0-canary.0", "description": "The official Elasticsearch client for Node.js", - "main": "index.js", + "main": "./index.js", "types": "index.d.ts", - "type": "commonjs", + "exports": { + "require": "./index.js" + }, "scripts": { "test": "npm run build && npm run lint && tap test/unit/{*,**/*}.test.ts", "test:unit": "npm run build && tap test/unit/{*,**/*}.test.ts", @@ -17,7 +19,7 @@ "lint:fix": "ts-standard --fix src", "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'", "prebuild": "npm run clean-build && npm run lint", - "build": "tsc", + "build": "tsc && rm lib/package.json && mv lib/src/* lib/ && rm -rf lib/src", "clean-build": "rimraf ./lib && mkdir lib", "prepublishOnly": "npm run build" }, diff --git a/src/client.ts b/src/client.ts index 12a88cc24..068f3573b 100644 --- a/src/client.ts +++ b/src/client.ts @@ -17,10 +17,11 @@ * under the License. */ -import { ConnectionOptions as TlsConnectionOptions } from 'tls' -import { URL } from 'url' -import buffer from 'buffer' -import os from 'os' +import process from 'node:process' +import { ConnectionOptions as TlsConnectionOptions } from 'node:tls' +import { URL } from 'node:url' +import buffer from 'node:buffer' +import os from 'node:os' import { Transport, UndiciConnection, @@ -48,16 +49,18 @@ import BaseConnection, { prepareHeaders, ConnectionOptions } from '@elastic/tran import SniffingTransport from './sniffingTransport' import Helpers from './helpers' import API from './api' +import packageJson from '../package.json' +import transportPackageJson from '@elastic/transport/package.json' const kChild = Symbol('elasticsearchjs-child') const kInitialOptions = Symbol('elasticsearchjs-initial-options') -let clientVersion: string = require('../package.json').version // eslint-disable-line +let clientVersion: string = packageJson.version /* istanbul ignore next */ if (clientVersion.includes('-')) { // clean prerelease clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' } -let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +let transportVersion: string = transportPackageJson.version // eslint-disable-line /* istanbul ignore next */ if (transportVersion.includes('-')) { // clean prerelease diff --git a/src/helpers.ts b/src/helpers.ts index 39b1d6bba..62040083a 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -21,9 +21,9 @@ /* eslint-disable @typescript-eslint/promise-function-async */ /* eslint-disable @typescript-eslint/no-unnecessary-type-assertion */ -import assert from 'assert' -import { promisify } from 'util' -import { Readable } from 'stream' +import assert from 'node:assert' +import * as timersPromises from 'node:timers/promises' +import { Readable } from 'node:stream' import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' import Client from './client' import * as T from './api/types' @@ -163,8 +163,8 @@ export interface EsqlToRecords { } const { ResponseError, ConfigurationError } = errors -const sleep = promisify(setTimeout) -const pImmediate = promisify(setImmediate) +const sleep = timersPromises.setTimeout +const pImmediate = timersPromises.setImmediate /* istanbul ignore next */ const noop = (): void => {} const kClient = Symbol('elasticsearch-client') diff --git a/src/sniffingTransport.ts b/src/sniffingTransport.ts index 4b02038fb..7c9cec43c 100644 --- a/src/sniffingTransport.ts +++ b/src/sniffingTransport.ts @@ -17,7 +17,7 @@ * under the License. */ -import assert from 'assert' +import assert from 'node:assert' import { Transport, SniffOptions } from '@elastic/transport' export default class SniffingTransport extends Transport { diff --git a/test/integration/helper.js b/test/integration/helper.js index d58252580..fe4e0b422 100644 --- a/test/integration/helper.js +++ b/test/integration/helper.js @@ -19,7 +19,7 @@ 'use strict' -const assert = require('assert') +const assert = require('node:assert') const fetch = require('node-fetch') function runInParallel (client, operation, options, clientOptions) { diff --git a/test/integration/helpers/bulk.test.js b/test/integration/helpers/bulk.test.js index 011f524c3..a1b2be118 100644 --- a/test/integration/helpers/bulk.test.js +++ b/test/integration/helpers/bulk.test.js @@ -19,8 +19,8 @@ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') diff --git a/test/integration/helpers/msearch.test.js b/test/integration/helpers/msearch.test.js index c9c726ecc..fb317b0f7 100644 --- a/test/integration/helpers/msearch.test.js +++ b/test/integration/helpers/msearch.test.js @@ -19,8 +19,8 @@ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') diff --git a/test/integration/helpers/scroll.test.js b/test/integration/helpers/scroll.test.js index e197ce21a..36f3b8528 100644 --- a/test/integration/helpers/scroll.test.js +++ b/test/integration/helpers/scroll.test.js @@ -19,8 +19,8 @@ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') diff --git a/test/integration/helpers/search.test.js b/test/integration/helpers/search.test.js index d4aa57c9a..7a6946a9f 100644 --- a/test/integration/helpers/search.test.js +++ b/test/integration/helpers/search.test.js @@ -19,8 +19,8 @@ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') diff --git a/test/integration/reporter.js b/test/integration/reporter.js index 5db288b8e..d94e09ba3 100644 --- a/test/integration/reporter.js +++ b/test/integration/reporter.js @@ -1,6 +1,6 @@ 'use strict' -const assert = require('assert') +const assert = require('node:assert') const { create } = require('xmlbuilder2') function createJunitReporter () { diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 0a15c3fc6..1f2ddf575 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -18,12 +18,12 @@ */ import FakeTimers from '@sinonjs/fake-timers' -import { AssertionError } from 'assert' -import { createReadStream } from 'fs' -import * as http from 'http' -import { join } from 'path' +import { AssertionError } from 'node:assert' +import { createReadStream } from 'node:fs' +import * as http from 'node:http' +import { join } from 'node:path' import split from 'split2' -import { Readable } from 'stream' +import { Readable } from 'node:stream' import { test } from 'tap' import { Client, errors } from '../../../' import { buildServer, connection } from '../../utils' @@ -936,11 +936,11 @@ test('bulk index', t => { onDocument (doc) { t.type(doc.user, 'string') // testing that doc is type of Document return [ - { - index: { - _index: 'test' - } - }, + { + index: { + _index: 'test' + } + }, { ...doc, updatedAt } ] }, @@ -1042,7 +1042,7 @@ test('bulk create', t => { _index: 'test', _id: String(id++) } - }, + }, { ...doc, updatedAt } ] }, diff --git a/test/unit/helpers/search.test.ts b/test/unit/helpers/search.test.ts index 697237ef3..9ed4605ab 100644 --- a/test/unit/helpers/search.test.ts +++ b/test/unit/helpers/search.test.ts @@ -108,4 +108,5 @@ test('Merge filter paths (snake_case)', async t => { { two: 'two' }, { three: 'three' } ]) -}) \ No newline at end of file +}) + diff --git a/test/utils/buildCluster.ts b/test/utils/buildCluster.ts index 608fcc268..79a8ba71b 100644 --- a/test/utils/buildCluster.ts +++ b/test/utils/buildCluster.ts @@ -18,7 +18,7 @@ */ import Debug from 'debug' -import * as http from 'http' +import * as http from 'node:http' import buildServer, { ServerHandler } from './buildServer' import { StoppableServer } from 'stoppable' From 608b517d644febfcb2455c7e6791a6f71df6516b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 26 Aug 2024 14:10:57 -0500 Subject: [PATCH 381/647] github-actions user is a contributor (#2352) --- .github/workflows/auto-merge.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index f8fb5c587..03b49ee21 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -15,4 +15,4 @@ jobs: merge-method: squash do-not-merge-labels: never-merge pull-request-author-associations: OWNER,MEMBER,COLLABORATOR - review-author-associations: OWNER,MEMBER + review-author-associations: OWNER,MEMBER,CONTRIBUTOR From 60aa521b7ea064f9043d4fddf39c09948aeb8b27 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 26 Aug 2024 20:31:53 +0100 Subject: [PATCH 382/647] Auto-generated code for main (#2351) Co-authored-by: Josh Mock --- .../14a33c364873c2f930ca83d0a3005389.asciidoc | 12 + .../2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc | 16 + .../8f2875d976332cf5da8fb7764097a307.asciidoc | 21 ++ .../e7cfe670b4177d1011076f845ec2916c.asciidoc | 13 + docs/reference.asciidoc | 3 +- src/api/types.ts | 292 +++++++++++++++++- src/api/typesWithBodyKey.ts | 292 +++++++++++++++++- 7 files changed, 620 insertions(+), 29 deletions(-) create mode 100644 docs/doc_examples/14a33c364873c2f930ca83d0a3005389.asciidoc create mode 100644 docs/doc_examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc create mode 100644 docs/doc_examples/8f2875d976332cf5da8fb7764097a307.asciidoc create mode 100644 docs/doc_examples/e7cfe670b4177d1011076f845ec2916c.asciidoc diff --git a/docs/doc_examples/14a33c364873c2f930ca83d0a3005389.asciidoc b/docs/doc_examples/14a33c364873c2f930ca83d0a3005389.asciidoc new file mode 100644 index 000000000..fa4c966b7 --- /dev/null +++ b/docs/doc_examples/14a33c364873c2f930ca83d0a3005389.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index", + shard: 0, + primary: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc b/docs/doc_examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc new file mode 100644 index 000000000..eb4f6acad --- /dev/null +++ b/docs/doc_examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "hourly-snapshots", + schedule: "1h", + name: "", + repository: "my_repository", + config: { + indices: ["data-*", "important"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f2875d976332cf5da8fb7764097a307.asciidoc b/docs/doc_examples/8f2875d976332cf5da8fb7764097a307.asciidoc new file mode 100644 index 000000000..2d5479b4c --- /dev/null +++ b/docs/doc_examples/8f2875d976332cf5da8fb7764097a307.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + lifecycle: { + data_retention: "7d", + }, + }, + _meta: { + description: "Template with data stream lifecycle", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7cfe670b4177d1011076f845ec2916c.asciidoc b/docs/doc_examples/e7cfe670b4177d1011076f845ec2916c.asciidoc new file mode 100644 index 000000000..760160b38 --- /dev/null +++ b/docs/doc_examples/e7cfe670b4177d1011076f845ec2916c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "data_streams.lifecycle.retention.default": "7d", + "data_streams.lifecycle.retention.max": "90d", + }, +}); +console.log(response); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 510ccb0d6..722b880da 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -1351,6 +1351,7 @@ If the index has a default ingest pipeline specified, then setting the value to If a final pipeline is configured it will always run, regardless of the value of this parameter. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`q` (Optional, string)*: Query in the Lucene query string syntax. ** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. ** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. ** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. @@ -8355,7 +8356,7 @@ client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) * *Request (object):* ** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated ** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated -** *`type` (Enum("pinned"))* +** *`type` (Enum("pinned" | "exclude"))* ** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])* ** *`actions` ({ ids, docs })* ** *`priority` (Optional, number)* diff --git a/src/api/types.ts b/src/api/types.ts index 535b8fc47..1c0e9d1a0 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -361,6 +361,7 @@ export interface FieldCapsResponse { export interface GetGetResult { _index: IndexName fields?: Record + _ignored?: string[] found: boolean _id: Id _primary_term?: long @@ -459,6 +460,16 @@ export interface HealthReportBaseIndicator { diagnosis?: HealthReportDiagnosis[] } +export interface HealthReportDataStreamLifecycleDetails { + stagnating_backing_indices_count: integer + total_backing_indices_in_error: integer + stagnating_backing_indices?: HealthReportStagnatingBackingIndices[] +} + +export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { + details?: HealthReportDataStreamLifecycleDetails +} + export interface HealthReportDiagnosis { id: string action: string @@ -518,6 +529,7 @@ export interface HealthReportIndicators { shards_availability?: HealthReportShardsAvailabilityIndicator disk?: HealthReportDiskIndicator repository_integrity?: HealthReportRepositoryIntegrityIndicator + data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator shards_capacity?: HealthReportShardsCapacityIndicator @@ -614,6 +626,12 @@ export interface HealthReportSlmIndicatorUnhealthyPolicies { invocations_since_last_success?: Record } +export interface HealthReportStagnatingBackingIndices { + index_name: IndexName + first_occurrence_timestamp: long + retry_count: integer +} + export interface IndexRequest extends RequestBase { id?: Id index: IndexName @@ -1402,7 +1420,6 @@ export interface SearchHighlightBase { export interface SearchHighlightField extends SearchHighlightBase { fragment_offset?: integer matched_fields?: Fields - analyzer?: AnalysisAnalyzer } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1904,6 +1921,7 @@ export interface UpdateByQueryRequest extends RequestBase { lenient?: boolean pipeline?: string preference?: string + q?: string refresh?: boolean request_cache?: boolean requests_per_second?: float @@ -4210,13 +4228,61 @@ export interface AggregationsWeightedAverageValue { export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer + +export interface AnalysisArabicAnalyzer { + type: 'arabic' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisArmenianAnalyzer { + type: 'armenian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' preserve_original?: SpecUtilsStringified } +export interface AnalysisBasqueAnalyzer { + type: 'basque' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBengaliAnalyzer { + type: 'bengali' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBrazilianAnalyzer { + type: 'brazilian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisBulgarianAnalyzer { + type: 'bulgarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisCatalanAnalyzer { + type: 'catalan' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisCharFilter = string | AnalysisCharFilterDefinition export interface AnalysisCharFilterBase { @@ -4231,6 +4297,18 @@ export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { max_token_length?: integer } +export interface AnalysisChineseAnalyzer { + type: 'chinese' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisCjkAnalyzer { + type: 'cjk' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' common_words?: string[] @@ -4270,6 +4348,19 @@ export interface AnalysisCustomNormalizer { filter?: string[] } +export interface AnalysisCzechAnalyzer { + type: 'czech' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisDanishAnalyzer { + type: 'danish' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { @@ -4285,6 +4376,8 @@ export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompo export interface AnalysisDutchAnalyzer { type: 'dutch' stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] } export type AnalysisEdgeNGramSide = 'front' | 'back' @@ -4312,6 +4405,19 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { articles_case?: SpecUtilsStringified } +export interface AnalysisEnglishAnalyzer { + type: 'english' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisEstonianAnalyzer { + type: 'estonian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' version?: VersionString @@ -4328,11 +4434,59 @@ export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase separator?: string } +export interface AnalysisFinnishAnalyzer { + type: 'finnish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisFrenchAnalyzer { + type: 'french' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGalicianAnalyzer { + type: 'galician' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGermanAnalyzer { + type: 'german' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGreekAnalyzer { + type: 'greek' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisHindiAnalyzer { + type: 'hindi' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' escaped_tags?: string[] } +export interface AnalysisHungarianAnalyzer { + type: 'hungarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' dedup?: boolean @@ -4408,6 +4562,27 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase id: string } +export interface AnalysisIndonesianAnalyzer { + type: 'indonesian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisIrishAnalyzer { + type: 'irish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisItalianAnalyzer { + type: 'italian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -4496,6 +4671,13 @@ export interface AnalysisLanguageAnalyzer { stopwords_path?: string } +export interface AnalysisLatvianAnalyzer { + type: 'latvian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' max?: integer @@ -4512,6 +4694,13 @@ export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterB max_token_count?: SpecUtilsStringified } +export interface AnalysisLithuanianAnalyzer { + type: 'lithuanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLowercaseNormalizer { type: 'lowercase' } @@ -4577,6 +4766,13 @@ export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer +export interface AnalysisNorwegianAnalyzer { + type: 'norwegian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' buffer_size?: SpecUtilsStringified @@ -4623,6 +4819,12 @@ export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { pattern?: string } +export interface AnalysisPersianAnalyzer { + type: 'persian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -4645,6 +4847,13 @@ export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { type: 'porter_stem' } +export interface AnalysisPortugueseAnalyzer { + type: 'portuguese' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' script: Script | string @@ -4658,6 +4867,27 @@ export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { type: 'reverse' } +export interface AnalysisRomanianAnalyzer { + type: 'romanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisRussianAnalyzer { + type: 'russian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSerbianAnalyzer { + type: 'serbian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' filler_token?: string @@ -4687,6 +4917,20 @@ export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { language?: AnalysisSnowballLanguage } +export interface AnalysisSoraniAnalyzer { + type: 'sorani' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSpanishAnalyzer { + type: 'spanish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisStandardAnalyzer { type: 'standard' max_token_length?: integer @@ -4727,6 +4971,13 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { export type AnalysisStopWords = string | string[] +export interface AnalysisSwedishAnalyzer { + type: 'swedish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisSynonymFormat = 'solr' | 'wordnet' export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { @@ -4753,6 +5004,12 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { updateable?: boolean } +export interface AnalysisThaiAnalyzer { + type: 'thai' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition @@ -4780,6 +5037,13 @@ export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { length?: integer } +export interface AnalysisTurkishAnalyzer { + type: 'turkish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { type: 'uax_url_email' max_token_length?: integer @@ -10257,6 +10521,7 @@ export interface IndicesCacheQueries { export interface IndicesDataStream { _meta?: Metadata allow_custom_routing?: boolean + failure_store?: IndicesFailureStore generation: integer hidden: boolean ilm_policy?: Name @@ -10266,6 +10531,7 @@ export interface IndicesDataStream { lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName replicated?: boolean + rollover_on_write: boolean status: HealthStatus system?: boolean template: Name @@ -10276,8 +10542,8 @@ export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid ilm_policy?: Name - managed_by: IndicesManagedBy - prefer_ilm: boolean + managed_by?: IndicesManagedBy + prefer_ilm?: boolean } export interface IndicesDataStreamLifecycle { @@ -10325,6 +10591,12 @@ export interface IndicesDownsamplingRound { config: IndicesDownsampleConfig } +export interface IndicesFailureStore { + enabled: boolean + indices: IndicesDataStreamIndex[] + rollover_on_write: boolean +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -13689,7 +13961,7 @@ export interface MlPerPartitionCategorization { stop_on_warn?: boolean } -export type MlPredictedValue = string | double | boolean | integer +export type MlPredictedValue = ScalarValue | ScalarValue[] export interface MlQuestionAnsweringInferenceOptions { num_top_classes?: integer @@ -15700,13 +15972,6 @@ export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodes nodes: Record } -export interface NodesHotThreadsHotThread { - hosts: Host[] - node_id: Id - node_name: Name - threads: string[] -} - export interface NodesHotThreadsRequest extends RequestBase { node_id?: NodeIds ignore_idle_threads?: boolean @@ -15720,7 +15985,6 @@ export interface NodesHotThreadsRequest extends RequestBase { } export interface NodesHotThreadsResponse { - hot_threads: NodesHotThreadsHotThread[] } export interface NodesInfoDeprecationIndexing { @@ -16158,7 +16422,7 @@ export interface QueryRulesQueryRuleCriteria { export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' -export type QueryRulesQueryRuleType = 'pinned' +export type QueryRulesQueryRuleType = 'pinned' | 'exclude' export interface QueryRulesQueryRuleset { ruleset_id: Id diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 3f4b7ece2..79fac3c04 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -382,6 +382,7 @@ export interface FieldCapsResponse { export interface GetGetResult { _index: IndexName fields?: Record + _ignored?: string[] found: boolean _id: Id _primary_term?: long @@ -480,6 +481,16 @@ export interface HealthReportBaseIndicator { diagnosis?: HealthReportDiagnosis[] } +export interface HealthReportDataStreamLifecycleDetails { + stagnating_backing_indices_count: integer + total_backing_indices_in_error: integer + stagnating_backing_indices?: HealthReportStagnatingBackingIndices[] +} + +export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { + details?: HealthReportDataStreamLifecycleDetails +} + export interface HealthReportDiagnosis { id: string action: string @@ -539,6 +550,7 @@ export interface HealthReportIndicators { shards_availability?: HealthReportShardsAvailabilityIndicator disk?: HealthReportDiskIndicator repository_integrity?: HealthReportRepositoryIntegrityIndicator + data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator shards_capacity?: HealthReportShardsCapacityIndicator @@ -635,6 +647,12 @@ export interface HealthReportSlmIndicatorUnhealthyPolicies { invocations_since_last_success?: Record } +export interface HealthReportStagnatingBackingIndices { + index_name: IndexName + first_occurrence_timestamp: long + retry_count: integer +} + export interface IndexRequest extends RequestBase { id?: Id index: IndexName @@ -1457,7 +1475,6 @@ export interface SearchHighlightBase { export interface SearchHighlightField extends SearchHighlightBase { fragment_offset?: integer matched_fields?: Fields - analyzer?: AnalysisAnalyzer } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1974,6 +1991,7 @@ export interface UpdateByQueryRequest extends RequestBase { lenient?: boolean pipeline?: string preference?: string + q?: string refresh?: boolean request_cache?: boolean requests_per_second?: float @@ -4283,13 +4301,61 @@ export interface AggregationsWeightedAverageValue { export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer + +export interface AnalysisArabicAnalyzer { + type: 'arabic' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisArmenianAnalyzer { + type: 'armenian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' preserve_original?: SpecUtilsStringified } +export interface AnalysisBasqueAnalyzer { + type: 'basque' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBengaliAnalyzer { + type: 'bengali' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBrazilianAnalyzer { + type: 'brazilian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisBulgarianAnalyzer { + type: 'bulgarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisCatalanAnalyzer { + type: 'catalan' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisCharFilter = string | AnalysisCharFilterDefinition export interface AnalysisCharFilterBase { @@ -4304,6 +4370,18 @@ export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { max_token_length?: integer } +export interface AnalysisChineseAnalyzer { + type: 'chinese' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisCjkAnalyzer { + type: 'cjk' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' common_words?: string[] @@ -4343,6 +4421,19 @@ export interface AnalysisCustomNormalizer { filter?: string[] } +export interface AnalysisCzechAnalyzer { + type: 'czech' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisDanishAnalyzer { + type: 'danish' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { @@ -4358,6 +4449,8 @@ export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompo export interface AnalysisDutchAnalyzer { type: 'dutch' stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] } export type AnalysisEdgeNGramSide = 'front' | 'back' @@ -4385,6 +4478,19 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { articles_case?: SpecUtilsStringified } +export interface AnalysisEnglishAnalyzer { + type: 'english' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisEstonianAnalyzer { + type: 'estonian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' version?: VersionString @@ -4401,11 +4507,59 @@ export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase separator?: string } +export interface AnalysisFinnishAnalyzer { + type: 'finnish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisFrenchAnalyzer { + type: 'french' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGalicianAnalyzer { + type: 'galician' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGermanAnalyzer { + type: 'german' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGreekAnalyzer { + type: 'greek' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisHindiAnalyzer { + type: 'hindi' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' escaped_tags?: string[] } +export interface AnalysisHungarianAnalyzer { + type: 'hungarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' dedup?: boolean @@ -4481,6 +4635,27 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase id: string } +export interface AnalysisIndonesianAnalyzer { + type: 'indonesian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisIrishAnalyzer { + type: 'irish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisItalianAnalyzer { + type: 'italian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -4569,6 +4744,13 @@ export interface AnalysisLanguageAnalyzer { stopwords_path?: string } +export interface AnalysisLatvianAnalyzer { + type: 'latvian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' max?: integer @@ -4585,6 +4767,13 @@ export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterB max_token_count?: SpecUtilsStringified } +export interface AnalysisLithuanianAnalyzer { + type: 'lithuanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLowercaseNormalizer { type: 'lowercase' } @@ -4650,6 +4839,13 @@ export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer +export interface AnalysisNorwegianAnalyzer { + type: 'norwegian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' buffer_size?: SpecUtilsStringified @@ -4696,6 +4892,12 @@ export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { pattern?: string } +export interface AnalysisPersianAnalyzer { + type: 'persian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -4718,6 +4920,13 @@ export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { type: 'porter_stem' } +export interface AnalysisPortugueseAnalyzer { + type: 'portuguese' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' script: Script | string @@ -4731,6 +4940,27 @@ export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { type: 'reverse' } +export interface AnalysisRomanianAnalyzer { + type: 'romanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisRussianAnalyzer { + type: 'russian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSerbianAnalyzer { + type: 'serbian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' filler_token?: string @@ -4760,6 +4990,20 @@ export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { language?: AnalysisSnowballLanguage } +export interface AnalysisSoraniAnalyzer { + type: 'sorani' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSpanishAnalyzer { + type: 'spanish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisStandardAnalyzer { type: 'standard' max_token_length?: integer @@ -4800,6 +5044,13 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { export type AnalysisStopWords = string | string[] +export interface AnalysisSwedishAnalyzer { + type: 'swedish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisSynonymFormat = 'solr' | 'wordnet' export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { @@ -4826,6 +5077,12 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { updateable?: boolean } +export interface AnalysisThaiAnalyzer { + type: 'thai' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition @@ -4853,6 +5110,13 @@ export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { length?: integer } +export interface AnalysisTurkishAnalyzer { + type: 'turkish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { type: 'uax_url_email' max_token_length?: integer @@ -10431,6 +10695,7 @@ export interface IndicesCacheQueries { export interface IndicesDataStream { _meta?: Metadata allow_custom_routing?: boolean + failure_store?: IndicesFailureStore generation: integer hidden: boolean ilm_policy?: Name @@ -10440,6 +10705,7 @@ export interface IndicesDataStream { lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName replicated?: boolean + rollover_on_write: boolean status: HealthStatus system?: boolean template: Name @@ -10450,8 +10716,8 @@ export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid ilm_policy?: Name - managed_by: IndicesManagedBy - prefer_ilm: boolean + managed_by?: IndicesManagedBy + prefer_ilm?: boolean } export interface IndicesDataStreamLifecycle { @@ -10499,6 +10765,12 @@ export interface IndicesDownsamplingRound { config: IndicesDownsampleConfig } +export interface IndicesFailureStore { + enabled: boolean + indices: IndicesDataStreamIndex[] + rollover_on_write: boolean +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -13924,7 +14196,7 @@ export interface MlPerPartitionCategorization { stop_on_warn?: boolean } -export type MlPredictedValue = string | double | boolean | integer +export type MlPredictedValue = ScalarValue | ScalarValue[] export interface MlQuestionAnsweringInferenceOptions { num_top_classes?: integer @@ -16049,13 +16321,6 @@ export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodes nodes: Record } -export interface NodesHotThreadsHotThread { - hosts: Host[] - node_id: Id - node_name: Name - threads: string[] -} - export interface NodesHotThreadsRequest extends RequestBase { node_id?: NodeIds ignore_idle_threads?: boolean @@ -16069,7 +16334,6 @@ export interface NodesHotThreadsRequest extends RequestBase { } export interface NodesHotThreadsResponse { - hot_threads: NodesHotThreadsHotThread[] } export interface NodesInfoDeprecationIndexing { @@ -16510,7 +16774,7 @@ export interface QueryRulesQueryRuleCriteria { export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' -export type QueryRulesQueryRuleType = 'pinned' +export type QueryRulesQueryRuleType = 'pinned' | 'exclude' export interface QueryRulesQueryRuleset { ruleset_id: Id From 83b32f7ef4a1f7dd9d83a41e3f9e67b8f639382d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 27 Aug 2024 13:01:25 -0500 Subject: [PATCH 383/647] Add bun export --- index.ts | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++ package.json | 3 ++- 2 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 index.ts diff --git a/index.ts b/index.ts new file mode 100644 index 000000000..0dac730c4 --- /dev/null +++ b/index.ts @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { + Diagnostic, + Transport, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, + Serializer, + errors, + events +} from '@elastic/transport' + +import Client from './src/client' +import SniffingTransport from './src/sniffingTransport' + +export { + Client, + SniffingTransport, + Diagnostic, + Transport, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, + Serializer, + errors, + events +} diff --git a/package.json b/package.json index 520ed4b04..413429b4f 100644 --- a/package.json +++ b/package.json @@ -6,7 +6,8 @@ "main": "./index.js", "types": "index.d.ts", "exports": { - "require": "./index.js" + "require": "./index.js", + "bun": "./index.ts" }, "scripts": { "test": "npm run build && npm run lint && tap test/unit/{*,**/*}.test.ts", From 889fee2316965c69b709f2b9a94f52e58a846b23 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 27 Aug 2024 13:03:50 -0500 Subject: [PATCH 384/647] Revert "Add bun export" (#2354) This reverts commit 83b32f7ef4a1f7dd9d83a41e3f9e67b8f639382d. --- index.ts | 53 ---------------------------------------------------- package.json | 3 +-- 2 files changed, 1 insertion(+), 55 deletions(-) delete mode 100644 index.ts diff --git a/index.ts b/index.ts deleted file mode 100644 index 0dac730c4..000000000 --- a/index.ts +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { - Diagnostic, - Transport, - WeightedConnectionPool, - ClusterConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - BaseConnection, - HttpConnection, - UndiciConnection, - Serializer, - errors, - events -} from '@elastic/transport' - -import Client from './src/client' -import SniffingTransport from './src/sniffingTransport' - -export { - Client, - SniffingTransport, - Diagnostic, - Transport, - WeightedConnectionPool, - ClusterConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - BaseConnection, - HttpConnection, - UndiciConnection, - Serializer, - errors, - events -} diff --git a/package.json b/package.json index 413429b4f..520ed4b04 100644 --- a/package.json +++ b/package.json @@ -6,8 +6,7 @@ "main": "./index.js", "types": "index.d.ts", "exports": { - "require": "./index.js", - "bun": "./index.ts" + "require": "./index.js" }, "scripts": { "test": "npm run build && npm run lint && tap test/unit/{*,**/*}.test.ts", From 9e08aaebe23b0489539ec57717a3562caf222ea6 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 28 Aug 2024 12:33:46 -0500 Subject: [PATCH 385/647] Add experimental Bun test runner (#2353) * Add experimental Bun test runner * Add TypeScript export for Bun * Clean up tests to prevent TypeScript build warnings * Use Node.js 22 to run codegen * Squash a couple TypeScript errors during tests These are expected errors, to test edge cases for non-TS users * Ignore Bun lockfile * Drop unused index.ts * Move unit test file list to tap config --- .github/make.sh | 186 +++++++++++++++--------------- .github/workflows/nodejs.yml | 30 +++++ .gitignore | 1 + .npmignore | 1 + package.json | 14 ++- test/unit/api.test.ts | 2 +- test/unit/helpers/msearch.test.ts | 36 +++--- test/unit/helpers/scroll.test.ts | 10 +- 8 files changed, 156 insertions(+), 124 deletions(-) diff --git a/.github/make.sh b/.github/make.sh index 4c32a77b4..f02663e97 100755 --- a/.github/make.sh +++ b/.github/make.sh @@ -37,7 +37,7 @@ product="elastic/elasticsearch-js" output_folder=".buildkite/output" codegen_folder=".buildkite/output" OUTPUT_DIR="$repo/${output_folder}" -NODE_JS_VERSION=18 +NODE_JS_VERSION=22 WORKFLOW=${WORKFLOW-staging} mkdir -p "$OUTPUT_DIR" @@ -46,83 +46,83 @@ echo -e "\033[34;1mINFO:\033[0m VERSION ${STACK_VERSION}\033[0m" echo -e "\033[34;1mINFO:\033[0m OUTPUT_DIR ${OUTPUT_DIR}\033[0m" case $CMD in - clean) - echo -e "\033[36;1mTARGET: clean workspace $output_folder\033[0m" - rm -rf "$output_folder" - echo -e "\033[32;1mdone.\033[0m" - exit 0 - ;; - assemble) - if [ -v $VERSION ]; then - echo -e "\033[31;1mTARGET: assemble -> missing version parameter\033[0m" - exit 1 - fi - echo -e "\033[36;1mTARGET: assemble artifact $VERSION\033[0m" - TASK=release - TASK_ARGS=("$VERSION" "$output_folder") - ;; - codegen) - if [ -v "$VERSION" ] || [[ -z "$VERSION" ]]; then - # fall back to branch name or `main` if no VERSION is set - branch_name=$(git rev-parse --abbrev-ref HEAD) - if [[ "$branch_name" =~ ^[0-9]+\.[0-9]+ ]]; then - echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using branch name: \`$branch_name\`\033[0m" - VERSION="$branch_name" - else - echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using \`main\`\033[0m" - VERSION="main" - fi - fi - if [ "$VERSION" = 'main' ]; then - echo -e "\033[36;1mTARGET: codegen API $VERSION\033[0m" - else - echo -e "\033[36;1mTARGET: codegen API v$VERSION\033[0m" - fi - - TASK=codegen - TASK_ARGS=("$VERSION") - ;; - docsgen) - if [ -v $VERSION ]; then - echo -e "\033[31;1mTARGET: docsgen -> missing version parameter\033[0m" - exit 1 - fi - echo -e "\033[36;1mTARGET: generate docs for $VERSION\033[0m" - TASK=codegen - TASK_ARGS=("$VERSION" "$codegen_folder") - ;; - examplesgen) - echo -e "\033[36;1mTARGET: generate examples\033[0m" - TASK=codegen - TASK_ARGS=("$VERSION" "$codegen_folder") - ;; - bump) - if [ -v $VERSION ]; then - echo -e "\033[31;1mTARGET: bump -> missing version parameter\033[0m" - exit 1 - fi - echo -e "\033[36;1mTARGET: bump to version $VERSION\033[0m" - TASK=bump - TASK_ARGS=("$VERSION") - ;; - bumpmatrix) - if [ -v $VERSION ]; then - echo -e "\033[31;1mTARGET: bumpmatrix -> missing version parameter\033[0m" - exit 1 - fi - echo -e "\033[36;1mTARGET: bump stack in test matrix to version $VERSION\033[0m" - TASK=bumpmatrix - TASK_ARGS=("$VERSION") - ;; - *) - echo -e "\n'$CMD' is not supported right now\n" - echo -e "\nUsage:" - echo -e "\t $0 release \$VERSION\n" - echo -e "\t $0 bump \$VERSION" - echo -e "\t $0 codegen \$VERSION" - exit 1 -esac +clean) + echo -e "\033[36;1mTARGET: clean workspace $output_folder\033[0m" + rm -rf "$output_folder" + echo -e "\033[32;1mdone.\033[0m" + exit 0 + ;; +assemble) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: assemble -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: assemble artifact $VERSION\033[0m" + TASK=release + TASK_ARGS=("$VERSION" "$output_folder") + ;; +codegen) + if [ -v "$VERSION" ] || [[ -z "$VERSION" ]]; then + # fall back to branch name or `main` if no VERSION is set + branch_name=$(git rev-parse --abbrev-ref HEAD) + if [[ "$branch_name" =~ ^[0-9]+\.[0-9]+ ]]; then + echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using branch name: \`$branch_name\`\033[0m" + VERSION="$branch_name" + else + echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using \`main\`\033[0m" + VERSION="main" + fi + fi + if [ "$VERSION" = 'main' ]; then + echo -e "\033[36;1mTARGET: codegen API $VERSION\033[0m" + else + echo -e "\033[36;1mTARGET: codegen API v$VERSION\033[0m" + fi + TASK=codegen + TASK_ARGS=("$VERSION") + ;; +docsgen) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: docsgen -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: generate docs for $VERSION\033[0m" + TASK=codegen + TASK_ARGS=("$VERSION" "$codegen_folder") + ;; +examplesgen) + echo -e "\033[36;1mTARGET: generate examples\033[0m" + TASK=codegen + TASK_ARGS=("$VERSION" "$codegen_folder") + ;; +bump) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: bump -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: bump to version $VERSION\033[0m" + TASK=bump + TASK_ARGS=("$VERSION") + ;; +bumpmatrix) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: bumpmatrix -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: bump stack in test matrix to version $VERSION\033[0m" + TASK=bumpmatrix + TASK_ARGS=("$VERSION") + ;; +*) + echo -e "\n'$CMD' is not supported right now\n" + echo -e "\nUsage:" + echo -e "\t $0 release \$VERSION\n" + echo -e "\t $0 bump \$VERSION" + echo -e "\t $0 codegen \$VERSION" + exit 1 + ;; +esac # ------------------------------------------------------- # # Build Container @@ -179,36 +179,36 @@ fi # ------------------------------------------------------- # if [[ "$CMD" == "assemble" ]]; then - if compgen -G ".buildkite/output/*" > /dev/null; then - echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m" - else - echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m" - exit 1 - fi + if compgen -G ".buildkite/output/*" >/dev/null; then + echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m" + exit 1 + fi fi if [[ "$CMD" == "bump" ]]; then - if [ -n "$(git status --porcelain)" ]; then - echo -e "\033[32;1mTARGET: successfully bumped client v$VERSION\033[0m" - else - echo -e "\033[31;1mTARGET: failed bumped client v$VERSION\033[0m" + if [ -n "$(git status --porcelain)" ]; then + echo -e "\033[32;1mTARGET: successfully bumped client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: failed bumped client v$VERSION\033[0m" exit 1 fi fi if [[ "$CMD" == "codegen" ]]; then - if [ -n "$(git status --porcelain)" ]; then - echo -e "\033[32;1mTARGET: successfully generated client v$VERSION\033[0m" - else - echo -e "\033[31;1mTARGET: failed generating client v$VERSION\033[0m" + if [ -n "$(git status --porcelain)" ]; then + echo -e "\033[32;1mTARGET: successfully generated client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: failed generating client v$VERSION\033[0m" exit 1 fi fi if [[ "$CMD" == "docsgen" ]]; then - echo "TODO" + echo "TODO" fi if [[ "$CMD" == "examplesgen" ]]; then - echo "TODO" + echo "TODO" fi diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index e4765731f..7dbec6fb0 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -73,6 +73,36 @@ jobs: run: | npm run license-checker + test-bun: + name: Test Bun + runs-on: ${{ matrix.os }} + needs: paths-filter + # only run if code relevant to unit tests was changed + if: needs.paths-filter.outputs.src-only == 'true' + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macOS-latest] + + steps: + - uses: actions/checkout@v4 + + - name: Use Bun + uses: oven-sh/setup-bun@v2 + + - name: Install + run: | + bun install + + - name: Lint + run: | + bun run lint + + - name: Unit test + run: | + bun run test:unit-bun + auto-approve: name: Auto-approve needs: [test, license] diff --git a/.gitignore b/.gitignore index c38ae71df..0dd9106ed 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,4 @@ test/bundlers/parcel-test/.parcel-cache lib junit-output +bun.lockb diff --git a/.npmignore b/.npmignore index 2e604be0f..08c1043f8 100644 --- a/.npmignore +++ b/.npmignore @@ -72,3 +72,4 @@ CODE_OF_CONDUCT.md CONTRIBUTING.md src +bun.lockb diff --git a/package.json b/package.json index 520ed4b04..31e6da8a4 100644 --- a/package.json +++ b/package.json @@ -9,11 +9,12 @@ "require": "./index.js" }, "scripts": { - "test": "npm run build && npm run lint && tap test/unit/{*,**/*}.test.ts", - "test:unit": "npm run build && tap test/unit/{*,**/*}.test.ts", - "test:coverage-100": "npm run build && tap test/unit/{*,**/*}.test.ts --coverage --100", - "test:coverage-report": "npm run build && tap test/unit/{*,**/*}.test.ts --coverage && nyc report --reporter=text-lcov > coverage.lcov", - "test:coverage-ui": "npm run build && tap test/unit/{*,**/*}.test.ts --coverage --coverage-report=html", + "test": "npm run build && npm run lint && tap", + "test:unit": "npm run build && tap", + "test:unit-bun": "bun run build && bunx tap", + "test:coverage-100": "npm run build && tap --coverage --100", + "test:coverage-report": "npm run build && tap --coverage && nyc report --reporter=text-lcov > coverage.lcov", + "test:coverage-ui": "npm run build && tap --coverage --coverage-report=html", "test:integration": "tsc && node test/integration/index.js", "lint": "ts-standard src", "lint:fix": "ts-standard --fix src", @@ -94,6 +95,7 @@ "jsx": false, "flow": false, "coverage": false, - "check-coverage": false + "check-coverage": false, + "files": "test/unit/{*,**/*}.test.ts" } } diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index 80b3a1f7f..8c9a72cdf 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -205,7 +205,7 @@ test('With generic document', async t => { } const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest (_opts) { return { statusCode: 200, body: { diff --git a/test/unit/helpers/msearch.test.ts b/test/unit/helpers/msearch.test.ts index f0290d3b3..e80c5977c 100644 --- a/test/unit/helpers/msearch.test.ts +++ b/test/unit/helpers/msearch.test.ts @@ -24,7 +24,7 @@ import FakeTimers from '@sinonjs/fake-timers' test('Basic', async t => { const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [{ @@ -78,7 +78,7 @@ test('Multiple searches (inside async iterator)', t => { t.plan(4) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [{ @@ -161,7 +161,7 @@ test('Multiple searches (async iterator exits)', t => { t.plan(4) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [{ @@ -242,7 +242,7 @@ test('Multiple searches (async iterator exits)', t => { test('Stop a msearch processor (promises)', async t => { const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: {} } } }) @@ -272,7 +272,7 @@ test('Bad header', t => { t.plan(1) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: {} } } }) @@ -297,7 +297,7 @@ test('Bad body', t => { t.plan(1) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: {} } } }) @@ -321,7 +321,7 @@ test('Bad body', t => { test('Retry on 429', async t => { let count = 0 const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { if (count++ === 0) { return { body: { @@ -384,7 +384,7 @@ test('Retry on 429', async t => { test('Single search errors', async t => { const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [{ @@ -419,7 +419,7 @@ test('Entire msearch fails', t => { t.plan(2) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { statusCode: 500, body: { @@ -454,7 +454,7 @@ test('Resolves the msearch helper', t => { t.plan(1) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: {} } } }) @@ -470,17 +470,17 @@ test('Resolves the msearch helper', t => { m.then( () => t.pass('called'), - e => t.fail('Should not fail') + _e => t.fail('Should not fail') ) - m.catch(e => t.fail('Should not fail')) + m.catch(_e => t.fail('Should not fail')) }) test('Stop the msearch helper with an error', t => { t.plan(3) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: {} } } }) @@ -511,7 +511,7 @@ test('Multiple searches (concurrency = 1)', t => { t.plan(4) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [{ @@ -587,7 +587,7 @@ test('Flush interval', t => { t.teardown(() => clock.uninstall()) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [{ @@ -640,7 +640,7 @@ test('Flush interval - early stop', t => { t.plan(2) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [{ @@ -684,7 +684,7 @@ test('Stop should resolve the helper', t => { t.plan(1) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [] @@ -709,7 +709,7 @@ test('Stop should resolve the helper (error)', t => { t.plan(3) const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { return { body: { responses: [] diff --git a/test/unit/helpers/scroll.test.ts b/test/unit/helpers/scroll.test.ts index b7ab9f735..88361bd7c 100644 --- a/test/unit/helpers/scroll.test.ts +++ b/test/unit/helpers/scroll.test.ts @@ -196,7 +196,7 @@ test('Scroll search (retry throws and maxRetries)', async t => { const expectedAttempts = maxRetries + 1 let count = 0 const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { count += 1 return { body: {}, statusCode: 429 } } @@ -217,8 +217,7 @@ test('Scroll search (retry throws and maxRetries)', async t => { }) try { - // @ts-expect-error - for await (const result of scrollSearch) { // eslint-disable-line + for await (const _result of scrollSearch) { // eslint-disable-line t.fail('we should not be here') } } catch (err: any) { @@ -344,7 +343,7 @@ test('Should not retry if maxRetries = 0', async t => { const expectedAttempts = 1 let count = 0 const MockConnection = connection.buildMockConnection({ - onRequest (params) { + onRequest (_params) { count += 1 return { body: {}, statusCode: 429 } } @@ -365,8 +364,7 @@ test('Should not retry if maxRetries = 0', async t => { }) try { - // @ts-expect-error - for await (const result of scrollSearch) { // eslint-disable-line + for await (const _result of scrollSearch) { // eslint-disable-line t.fail('we should not be here') } } catch (err: any) { From 132d6d6062461200b33a8922e3d3c28145c266f1 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 3 Sep 2024 15:36:33 +0100 Subject: [PATCH 386/647] Auto-generated code for main (#2357) --- .../00fea15cbca83be9d5f1a024ff2ec708.asciidoc | 8 +- .../04412d11783dac25b5fd2ec5407078a3.asciidoc | 11 +-- .../04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc | 8 +- .../0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc | 8 +- .../0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc | 43 +++++---- .../13ecdf99114098c76b050397d9c3d4e6.asciidoc | 12 ++- .../13fe12cdb73bc89f07a83f1e6b127511.asciidoc | 23 +++++ .../19d60e4890cc57151d596326484d9076.asciidoc | 6 +- .../1a56df055b94466ca76818e0858752c6.asciidoc | 8 +- .../1a9e03ce0355872a7db27fedc783fbec.asciidoc | 8 +- .../1b60ad542abb511cbd926ac8c55b609c.asciidoc | 8 +- .../1dadb7efe27b6c0c231eb6535e413bd9.asciidoc | 8 +- .../1e26353d546d733634187b8c3a7837a7.asciidoc | 8 +- .../20179a8889e949d6a8ee5fbf2ba35c96.asciidoc | 22 +++++ .../21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc | 8 +- .../342ddf9121aeddd82fea2464665e25da.asciidoc | 13 ++- .../398389933901b572a06a752bc780af7c.asciidoc | 8 +- .../3b6718257421b5419bf4cd6a7303c57e.asciidoc | 6 +- .../3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc | 20 +++++ .../41175d304e660da2931764f9a4418fd3.asciidoc | 17 ++-- .../430705509f8367aef92be413f702520b.asciidoc | 9 +- .../4c9350ed09b28f00e297ebe73c3b95a2.asciidoc | 8 +- .../4e3414fc712b16311f9e433dd366f49d.asciidoc | 6 +- .../52f4c5eb08d39f98e2e2f5527ece9731.asciidoc | 20 +++++ .../533087d787b48878a0bf3fa8d0851b64.asciidoc | 6 +- .../54c12d5099d7b715c15f5bbf65b386a1.asciidoc | 22 +++++ .../59d736a4d064ed2013c7ead8e32e0998.asciidoc | 8 +- .../640621cea39cdeeb76fbc95bff31a18d.asciidoc | 27 +++--- .../6606d46685d10377b996b5f20f1229b5.asciidoc | 9 +- .../6ddd4e657efbf45def430a6419825796.asciidoc | 8 +- .../7429b16221fe741fd31b0584786dd0b0.asciidoc | 16 ++-- .../7594a9a85c8511701e281974cbc253e1.asciidoc | 8 +- .../7752b677825523bfb0c38ad9325a6d47.asciidoc | 9 +- .../77b90f6787195767b6da60d8532714b4.asciidoc | 8 +- .../7c63a1d2fbec5283e913ff39fafd0604.asciidoc | 21 +++++ .../82844ef45e11c0eece100d3109db3182.asciidoc | 8 +- .../82eff1d681a5d0d1538ef011bb32ab9a.asciidoc | 18 ++++ .../840f8c863c30b04abcf2dd66b846f157.asciidoc | 8 +- .../8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc | 8 +- .../8619bd17bbfe33490b1f277007f654db.asciidoc | 8 +- .../8e89fee0be6a436c4e3d7c152659c47e.asciidoc | 31 +++---- .../9326e323f7ffde678fa04d2d1de3d3bc.asciidoc | 22 +++++ .../981b331db1404b39c1a612a135e4e76d.asciidoc | 5 +- .../9868ce609f4450702934fcbf4c340bf1.asciidoc | 21 +++++ .../986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc | 20 +++++ .../99803d7b111b862c0c82e9908e549b16.asciidoc | 8 +- .../9a203aae3e1412d919546276fb52a5ca.asciidoc | 8 +- .../9c021836acf7c0370e289f611325868d.asciidoc | 19 ++-- .../9d396afad93782699d7a929578c85284.asciidoc | 20 +++++ .../9f16fca9813304e398ee052aa857dbcd.asciidoc | 8 +- .../a225fc8c134cb21a85bc6025dac9368b.asciidoc | 8 +- .../a4a3c3cd09efa75168dab90105afb2e9.asciidoc | 6 +- .../a4ee2214d621bcfaf768c46d21325958.asciidoc | 8 +- .../a594f05459d9eecc8050c73fc8da336f.asciidoc | 8 +- .../a69c7c3412af73758f629e76263063b5.asciidoc | 18 ++++ .../a960b43e720b4934edb74ab4b085ca77.asciidoc | 5 +- .../aa676d54a59dee87ecd28bcc1edce59b.asciidoc | 20 +++++ .../b16700002af3aa70639f3e88c733bf35.asciidoc | 12 +++ .../b45a8c6fc746e9c90fd181e69a605fad.asciidoc | 10 +-- .../bdb671866e2f0195f8dfbdb7f20bf591.asciidoc | 8 +- .../c00c9412609832ebceb9e786dd9542df.asciidoc | 11 +-- .../c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc | 11 +-- .../c18100d62ed31bc9e05f62900156e6a8.asciidoc | 8 +- .../c21eb4bc30087188241cbba6b6b89999.asciidoc | 9 +- .../ce13afc0c976c5e1f424b58e0c97fd64.asciidoc | 17 ++-- .../cedb56a71cc743d80263ce352bb21720.asciidoc | 8 +- .../d03139a851888db53f8b7affd85eb495.asciidoc | 5 +- .../d6a21afa4a94b9baa734eac430940bcf.asciidoc | 10 +-- .../d7b61bfb6adb22986a43388b823894cc.asciidoc | 8 +- .../e9fc47015922d51c2b05e502ce9c622e.asciidoc | 8 +- .../eb54506fbc71a7d250e86b22d0600114.asciidoc | 8 +- .../eb96d7dd5f3116a50f7a86b729f1a934.asciidoc | 15 ++-- .../ecfd0d94dd14ef05dfa861f22544b388.asciidoc | 9 +- .../eee6110831c08b9c1b3f56b24656e95b.asciidoc | 8 +- .../f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc | 12 ++- .../f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc | 5 +- .../f57ce7de0946e9416ddb9150e95f4b74.asciidoc | 8 +- .../fd620f09dbce62c6f0f603a366623607.asciidoc | 23 +++-- docs/reference.asciidoc | 48 +++++++--- src/api/api/ingest.ts | 60 ++++++++----- src/api/types.ts | 84 ++++++++++++++++-- src/api/typesWithBodyKey.ts | 87 +++++++++++++++++-- 82 files changed, 797 insertions(+), 418 deletions(-) create mode 100644 docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc create mode 100644 docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc create mode 100644 docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc create mode 100644 docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc create mode 100644 docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc create mode 100644 docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc create mode 100644 docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc create mode 100644 docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc create mode 100644 docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc create mode 100644 docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc create mode 100644 docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc create mode 100644 docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc create mode 100644 docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc create mode 100644 docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc diff --git a/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc b/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc index d2c53fbf1..09675e02d 100644 --- a/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc +++ b/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/my-e5-model", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "my-e5-model", + inference_config: { service: "elasticsearch", service_settings: { num_allocations: 1, diff --git a/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc b/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc index dbf3b4c90..fbefc580b 100644 --- a/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc +++ b/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc @@ -3,13 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_api_key_id", - body: { - api_key_id: "my-api-key-id", - api_key_secret_id: "my-connector-secret-id", - }, +const response = await client.connector.updateApiKeyId({ + connector_id: "my-connector", + api_key_id: "my-api-key-id", + api_key_secret_id: "my-connector-secret-id", }); console.log(response); ---- diff --git a/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc b/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc index d78024183..00ef08a92 100644 --- a/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc +++ b/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/google_vertex_ai_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "google_vertex_ai_embeddings", + inference_config: { service: "googlevertexai", service_settings: { service_account_json: "", diff --git a/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc b/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc index 0fb258f3f..5c948b3d2 100644 --- a/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc +++ b/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/amazon_bedrock_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "amazon_bedrock_embeddings", + inference_config: { service: "amazonbedrock", service_settings: { access_key: "", diff --git a/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc b/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc index 5df1863c2..2c93643b2 100644 --- a/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc +++ b/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc @@ -3,29 +3,26 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-g-drive-connector/_filtering", - body: { - rules: [ - { - field: "file_extension", - id: "exclude-txt-files", - order: 0, - policy: "exclude", - rule: "equals", - value: "txt", - }, - { - field: "_", - id: "DEFAULT", - order: 1, - policy: "include", - rule: "regex", - value: ".*", - }, - ], - }, +const response = await client.connector.updateFiltering({ + connector_id: "my-g-drive-connector", + rules: [ + { + field: "file_extension", + id: "exclude-txt-files", + order: 0, + policy: "exclude", + rule: "equals", + value: "txt", + }, + { + field: "_", + id: "DEFAULT", + order: 1, + policy: "include", + rule: "regex", + value: ".*", + }, + ], }); console.log(response); ---- diff --git a/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc index 52c6688ac..01baab9cf 100644 --- a/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc +++ b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -3,13 +3,11 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/sparse_embedding/my-elser-model", - body: { - input: - "The sky above the port was the color of television tuned to a dead channel.", - }, +const response = await client.inference.inference({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", + input: + "The sky above the port was the color of television tuned to a dead channel.", }); console.log(response); ---- diff --git a/docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc b/docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc new file mode 100644 index 000000000..672620810 --- /dev/null +++ b/docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "google-vertex-ai-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 768, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc b/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc index c5e05aa73..6f918e3b2 100644 --- a/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc +++ b/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.ingest.deleteGeoipDatabase({ - id: "my-database-id", - body: null, +const response = await client.transport.request({ + method: "DELETE", + path: "/_ingest/geoip/database/my-database-id", }); console.log(response); ---- diff --git a/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc b/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc index dfb2fc9c9..46718769b 100644 --- a/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc +++ b/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/openai_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "openai_embeddings", + inference_config: { service: "openai", service_settings: { api_key: "", diff --git a/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc b/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc index f29d19695..06c636ead 100644 --- a/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc +++ b/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/rerank/google_vertex_ai_rerank", - body: { +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "google_vertex_ai_rerank", + inference_config: { service: "googlevertexai", service_settings: { service_account_json: "", diff --git a/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc b/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc index 0c7d4b6f1..160884d3b 100644 --- a/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc +++ b/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/sparse_embedding/my-elser-model", - body: { +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", + inference_config: { service: "elser", service_settings: { adaptive_allocations: { diff --git a/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc b/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc index 9319faa39..a38e95486 100644 --- a/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc +++ b/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/azure_ai_studio_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_ai_studio_embeddings", + inference_config: { service: "azureaistudio", service_settings: { api_key: "", diff --git a/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc b/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc index 2f307bbf3..6e8f3033b 100644 --- a/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc +++ b/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc @@ -3,12 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_connector", - querystring: { - service_type: "sharepoint_online", - }, +const response = await client.connector.list({ + service_type: "sharepoint_online", }); console.log(response); ---- diff --git a/docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc b/docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc new file mode 100644 index 000000000..3a746ae88 --- /dev/null +++ b/docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "google-vertex-ai-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "google_vertex_ai_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc b/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc index 2cf11a621..295b4ed6c 100644 --- a/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc +++ b/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/azure_ai_studio_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_ai_studio_embeddings", + inference_config: { service: "azureaistudio", service_settings: { api_key: "", diff --git a/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc b/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc index 00d2c0234..0b2b04c09 100644 --- a/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc +++ b/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc @@ -3,14 +3,11 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector", - body: { - index_name: "search-google-drive", - name: "My Connector", - service_type: "google_drive", - }, +const response = await client.connector.put({ + connector_id: "my-connector", + index_name: "search-google-drive", + name: "My Connector", + service_type: "google_drive", }); console.log(response); ---- diff --git a/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc b/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc index 029b478da..cfbe8ea75 100644 --- a/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc +++ b/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/completion/anthropic_completion", - body: { +const response = await client.inference.put({ + task_type: "completion", + inference_id: "anthropic_completion", + inference_config: { service: "anthropic", service_settings: { api_key: "", diff --git a/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc b/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc index a00dcffd3..7ad8dcc20 100644 --- a/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc +++ b/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.ingest.getGeoipDatabase({ - id: "my-database-id", - body: null, +const response = await client.transport.request({ + method: "GET", + path: "/_ingest/geoip/database/my-database-id", }); console.log(response); ---- diff --git a/docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc b/docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc new file mode 100644 index 000000000..d2b00a583 --- /dev/null +++ b/docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "alibabacloud_ai_search_embeddings", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "", + host: "", + workspace: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc b/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc index 10c2ec536..81079be09 100644 --- a/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc +++ b/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc @@ -3,16 +3,13 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_pipeline", - body: { - pipeline: { - extract_binary_content: true, - name: "my-connector-pipeline", - reduce_whitespace: true, - run_ml_inference: true, - }, +const response = await client.connector.updatePipeline({ + connector_id: "my-connector", + pipeline: { + extract_binary_content: true, + name: "my-connector-pipeline", + reduce_whitespace: true, + run_ml_inference: true, }, }); console.log(response); diff --git a/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc b/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc index 3fe855a63..c95b05eac 100644 --- a/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc +++ b/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc @@ -3,12 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_status", - body: { - status: "needs_configuration", - }, +const response = await client.connector.updateStatus({ + connector_id: "my-connector", + status: "needs_configuration", }); console.log(response); ---- diff --git a/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc b/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc index af5ca2ccc..2c8c7983b 100644 --- a/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc +++ b/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/my-msmarco-minilm-model", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "my-msmarco-minilm-model", + inference_config: { service: "elasticsearch", service_settings: { num_allocations: 1, diff --git a/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc b/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc index 64930ca4f..e9274320d 100644 --- a/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc +++ b/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "DELETE", - path: "/_inference/sparse_embedding/my-elser-model", +const response = await client.inference.delete({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", }); console.log(response); ---- diff --git a/docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc b/docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc new file mode 100644 index 000000000..a6490dd78 --- /dev/null +++ b/docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "alibabacloud_ai_search_sparse", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "ops-text-sparse-embedding-001", + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc b/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc index 9dfe27bbc..65425b66a 100644 --- a/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc +++ b/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.ingest.deleteGeoipDatabase({ - id: "example-database-id", - body: null, +const response = await client.transport.request({ + method: "DELETE", + path: "/_ingest/geoip/database/example-database-id", }); console.log(response); ---- diff --git a/docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc b/docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc new file mode 100644 index 000000000..ea16a6a2c --- /dev/null +++ b/docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "alibabacloud-ai-search-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "float", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc b/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc index f1aea891e..5ea918642 100644 --- a/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc +++ b/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/completion/openai-completion", - body: { +const response = await client.inference.put({ + task_type: "completion", + inference_id: "openai-completion", + inference_config: { service: "openai", service_settings: { api_key: "", diff --git a/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc b/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc index a9ee715a4..feecc4a39 100644 --- a/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc +++ b/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc @@ -3,21 +3,18 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_last_sync", - body: { - last_access_control_sync_error: "Houston, we have a problem!", - last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z", - last_access_control_sync_status: "pending", - last_deleted_document_count: 42, - last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z", - last_indexed_document_count: 42, - last_sync_error: "Houston, we have a problem!", - last_sync_scheduled_at: "2024-11-09T15:13:08.231Z", - last_sync_status: "completed", - last_synced: "2024-11-09T15:13:08.231Z", - }, +const response = await client.connector.lastSync({ + connector_id: "my-connector", + last_access_control_sync_error: "Houston, we have a problem!", + last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z", + last_access_control_sync_status: "pending", + last_deleted_document_count: 42, + last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z", + last_indexed_document_count: 42, + last_sync_error: "Houston, we have a problem!", + last_sync_scheduled_at: "2024-11-09T15:13:08.231Z", + last_sync_status: "completed", + last_synced: "2024-11-09T15:13:08.231Z", }); console.log(response); ---- diff --git a/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc b/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc index 4a0655e33..5cbb57477 100644 --- a/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc +++ b/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc @@ -3,12 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_index_name", - body: { - index_name: "data-from-my-google-drive", - }, +const response = await client.connector.updateIndexName({ + connector_id: "my-connector", + index_name: "data-from-my-google-drive", }); console.log(response); ---- diff --git a/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc b/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc index cb9376459..5bd1b226c 100644 --- a/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc +++ b/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/completion/azure_ai_studio_completion", - body: { +const response = await client.inference.put({ + task_type: "completion", + inference_id: "azure_ai_studio_completion", + inference_config: { service: "azureaistudio", service_settings: { api_key: "", diff --git a/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc index fe849a80d..8f897c69c 100644 --- a/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc +++ b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -3,15 +3,13 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/text_embedding/my-cohere-endpoint", - body: { - input: - "The sky above the port was the color of television tuned to a dead channel.", - task_settings: { - input_type: "ingest", - }, +const response = await client.inference.inference({ + task_type: "text_embedding", + inference_id: "my-cohere-endpoint", + input: + "The sky above the port was the color of television tuned to a dead channel.", + task_settings: { + input_type: "ingest", }, }); console.log(response); diff --git a/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc b/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc index e98728bf2..3c4dca864 100644 --- a/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc +++ b/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/amazon_bedrock_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "amazon_bedrock_embeddings", + inference_config: { service: "amazonbedrock", service_settings: { access_key: "", diff --git a/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc b/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc index 03b51a131..f6a5082a7 100644 --- a/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc +++ b/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc @@ -3,12 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "DELETE", - path: "/_connector/another-connector", - querystring: { - delete_sync_jobs: "true", - }, +const response = await client.connector.delete({ + connector_id: "another-connector", + delete_sync_jobs: "true", }); console.log(response); ---- diff --git a/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc b/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc index 7ab2d290f..08570d5c6 100644 --- a/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc +++ b/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/azure_openai_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_openai_embeddings", + inference_config: { service: "azureopenai", service_settings: { api_key: "", diff --git a/docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc b/docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc new file mode 100644 index 000000000..d83687e37 --- /dev/null +++ b/docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "google_vertex_ai_embeddings", + processors: [ + { + inference: { + model_id: "google_vertex_ai_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc b/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc index f3f816377..07d04a9b1 100644 --- a/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc +++ b/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/completion/amazon_bedrock_completion", - body: { +const response = await client.inference.put({ + task_type: "completion", + inference_id: "amazon_bedrock_completion", + inference_config: { service: "amazonbedrock", service_settings: { access_key: "", diff --git a/docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc b/docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc new file mode 100644 index 000000000..c7b1a1209 --- /dev/null +++ b/docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "alibabacloud-ai-search-embeddings", + pipeline: "alibabacloud_ai_search_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc b/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc index b38af10e2..e16ea6560 100644 --- a/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc +++ b/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/my-e5-model", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "my-e5-model", + inference_config: { service: "elasticsearch", service_settings: { adaptive_allocations: { diff --git a/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc b/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc index f6ab408a7..dad0bf4d5 100644 --- a/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc +++ b/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/mistral_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "mistral_embeddings", + inference_config: { service: "mistral", service_settings: { api_key: "", diff --git a/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc b/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc index 64b3d669e..48f850d0c 100644 --- a/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc +++ b/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/rerank/cohere-rerank", - body: { +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "cohere-rerank", + inference_config: { service: "cohere", service_settings: { api_key: "", diff --git a/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc b/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc index ddab55399..fb4577692 100644 --- a/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc +++ b/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc @@ -3,23 +3,20 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_scheduling", - body: { - scheduling: { - access_control: { - enabled: true, - interval: "0 10 0 * * ?", - }, - full: { - enabled: true, - interval: "0 20 0 * * ?", - }, - incremental: { - enabled: false, - interval: "0 30 0 * * ?", - }, +const response = await client.connector.updateScheduling({ + connector_id: "my-connector", + scheduling: { + access_control: { + enabled: true, + interval: "0 10 0 * * ?", + }, + full: { + enabled: true, + interval: "0 20 0 * * ?", + }, + incremental: { + enabled: false, + interval: "0 30 0 * * ?", }, }, }); diff --git a/docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc b/docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc new file mode 100644 index 000000000..8ca628047 --- /dev/null +++ b/docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "alibabacloud-ai-search-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "alibabacloud_ai_search_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc b/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc index b3393e3ec..0d3cdbbe3 100644 --- a/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc +++ b/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc @@ -3,8 +3,9 @@ [source, js] ---- -const response = await client.ingest.putGeoipDatabase({ - id: "my-database-id", +const response = await client.transport.request({ + method: "PUT", + path: "/_ingest/geoip/database/my-database-id", body: { name: "GeoIP2-Domain", maxmind: { diff --git a/docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc b/docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc new file mode 100644 index 000000000..efbc3834d --- /dev/null +++ b/docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "alibabacloud_ai_search_embeddings", + processors: [ + { + inference: { + model_id: "alibabacloud_ai_search_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc b/docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc new file mode 100644 index 000000000..570db554b --- /dev/null +++ b/docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "alibabacloud_ai_search_embeddings", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "ops-text-embedding-001", + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc b/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc index 3989008d1..620c04c62 100644 --- a/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc +++ b/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/mistral-embeddings-test", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "mistral-embeddings-test", + inference_config: { service: "mistral", service_settings: { api_key: "", diff --git a/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc b/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc index 411649c25..e1311ca66 100644 --- a/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc +++ b/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/cohere-embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "cohere-embeddings", + inference_config: { service: "cohere", service_settings: { api_key: "", diff --git a/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc b/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc index f50a76d1b..ed4557890 100644 --- a/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc +++ b/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc @@ -3,17 +3,14 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-spo-connector/_configuration", - body: { - values: { - tenant_id: "my-tenant-id", - tenant_name: "my-sharepoint-site", - client_id: "foo", - secret_value: "bar", - site_collections: "*", - }, +const response = await client.connector.updateConfiguration({ + connector_id: "my-spo-connector", + values: { + tenant_id: "my-tenant-id", + tenant_name: "my-sharepoint-site", + client_id: "foo", + secret_value: "bar", + site_collections: "*", }, }); console.log(response); diff --git a/docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc b/docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc new file mode 100644 index 000000000..2f0a85e90 --- /dev/null +++ b/docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "google_vertex_ai_embeddings", + inference_config: { + service: "googlevertexai", + service_settings: { + service_account_json: "", + model_id: "text-embedding-004", + location: "", + project_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc b/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc index 0da3acf87..6be472e3b 100644 --- a/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc +++ b/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/openai-embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "openai-embeddings", + inference_config: { service: "openai", service_settings: { api_key: "", diff --git a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc b/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc index 8cfc3b071..da9071e2c 100644 --- a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc +++ b/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/sparse_embedding/elser_embeddings", - body: { +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "elser_embeddings", + inference_config: { service: "elser", service_settings: { num_allocations: 1, diff --git a/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc b/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc index a340791bd..b03688330 100644 --- a/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc +++ b/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_inference/sparse_embedding/my-elser-model", +const response = await client.inference.get({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", }); console.log(response); ---- diff --git a/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc b/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc index 7179d8152..1b51b6936 100644 --- a/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc +++ b/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/hugging_face_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "hugging_face_embeddings", + inference_config: { service: "hugging_face", service_settings: { api_key: "", diff --git a/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc b/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc index 7ab2d290f..08570d5c6 100644 --- a/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc +++ b/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/azure_openai_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_openai_embeddings", + inference_config: { service: "azureopenai", service_settings: { api_key: "", diff --git a/docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc b/docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc new file mode 100644 index 000000000..2312ca864 --- /dev/null +++ b/docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "google-vertex-ai-embeddings", + pipeline: "google_vertex_ai_embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc b/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc index 758df0a4d..8e7ed5f3d 100644 --- a/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc +++ b/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc @@ -3,9 +3,6 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_connector", -}); +const response = await client.connector.list(); console.log(response); ---- diff --git a/docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc b/docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc new file mode 100644 index 000000000..a9bcf22e7 --- /dev/null +++ b/docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "alibabacloud_ai_search_rerank", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "ops-bge-reranker-larger", + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc b/docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc new file mode 100644 index 000000000..87a17c886 --- /dev/null +++ b/docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.openPointInTime({ + index: "my-index-000001", + keep_alive: "1m", + allow_partial_search_results: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc index d51f2451f..876b182d2 100644 --- a/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc +++ b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -3,12 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/completion/openai_chat_completions", - body: { - input: "What is Elastic?", - }, +const response = await client.inference.inference({ + task_type: "completion", + inference_id: "openai_chat_completions", + input: "What is Elastic?", }); console.log(response); ---- diff --git a/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc b/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc index b53c375b8..f758ada37 100644 --- a/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc +++ b/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/sparse_embedding/my-elser-endpoint", - body: { +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "my-elser-endpoint", + inference_config: { service: "elser", service_settings: { num_allocations: 1, diff --git a/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc b/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc index c5c37e0b5..f2b49ad8b 100644 --- a/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc +++ b/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc @@ -3,13 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_name", - body: { - name: "Custom connector", - description: "This is my customized connector", - }, +const response = await client.connector.updateName({ + connector_id: "my-connector", + name: "Custom connector", + description: "This is my customized connector", }); console.log(response); ---- diff --git a/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc b/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc index ad3b4d462..c0190ee1c 100644 --- a/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc +++ b/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc @@ -3,13 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-spo-connector/_configuration", - body: { - values: { - secret_value: "foo-bar", - }, +const response = await client.connector.updateConfiguration({ + connector_id: "my-spo-connector", + values: { + secret_value: "foo-bar", }, }); console.log(response); diff --git a/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc b/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc index 3c99976f3..54f13ca9d 100644 --- a/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc +++ b/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc @@ -3,12 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_connector", - querystring: { - index_name: "search-google-drive", - }, +const response = await client.connector.list({ + index_name: "search-google-drive", }); console.log(response); ---- diff --git a/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc b/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc index 8c6ac6d07..c33aa6f65 100644 --- a/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc +++ b/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc @@ -3,12 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_service_type", - body: { - service_type: "sharepoint_online", - }, +const response = await client.connector.updateServiceType({ + connector_id: "my-connector", + service_type: "sharepoint_online", }); console.log(response); ---- diff --git a/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc b/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc index f33bbe266..e06ff8a73 100644 --- a/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc +++ b/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc @@ -3,16 +3,13 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector", - body: { - index_name: "search-google-drive", - name: "My Connector", - description: "My Connector to sync data to Elastic index from Google Drive", - service_type: "google_drive", - language: "english", - }, +const response = await client.connector.put({ + connector_id: "my-connector", + index_name: "search-google-drive", + name: "My Connector", + description: "My Connector to sync data to Elastic index from Google Drive", + service_type: "google_drive", + language: "english", }); console.log(response); ---- diff --git a/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc b/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc index 1b8303420..c36f080a5 100644 --- a/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc +++ b/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/sparse_embedding/my-elser-model", - body: { +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", + inference_config: { service: "elser", service_settings: { num_allocations: 1, diff --git a/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc b/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc index 6c02dd8e5..78a5a38bc 100644 --- a/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc +++ b/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_check_in", +const response = await client.connector.checkIn({ + connector_id: "my-connector", }); console.log(response); ---- diff --git a/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc b/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc index 328c10112..17ba11227 100644 --- a/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc +++ b/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc @@ -3,13 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_connector", - querystring: { - from: "0", - size: "2", - }, +const response = await client.connector.list({ + from: 0, + size: 2, }); console.log(response); ---- diff --git a/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc b/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc index 2a932f6a2..3e0ce9910 100644 --- a/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc +++ b/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/cohere_embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "cohere_embeddings", + inference_config: { service: "cohere", service_settings: { api_key: "", diff --git a/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc b/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc index 3638bed6c..7ec14029d 100644 --- a/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc +++ b/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/completion/google_ai_studio_completion", - body: { +const response = await client.inference.put({ + task_type: "completion", + inference_id: "google_ai_studio_completion", + inference_config: { service: "googleaistudio", service_settings: { api_key: "", diff --git a/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc b/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc index 87276bbd2..027c4ff88 100644 --- a/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc +++ b/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc @@ -3,12 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_connector", - querystring: { - service_type: "sharepoint_online,google_drive", - }, +const response = await client.connector.list({ + service_type: "sharepoint_online,google_drive", }); console.log(response); ---- diff --git a/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc b/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc index 236d76185..9f2187174 100644 --- a/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc +++ b/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc @@ -3,15 +3,12 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_scheduling", - body: { - scheduling: { - full: { - enabled: true, - interval: "0 10 0 * * ?", - }, +const response = await client.connector.updateScheduling({ + connector_id: "my-connector", + scheduling: { + full: { + enabled: true, + interval: "0 10 0 * * ?", }, }, }); diff --git a/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc b/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc index 2c69648f6..3622004ac 100644 --- a/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc +++ b/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc @@ -3,12 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-connector/_error", - body: { - error: "Houston, we have a problem!", - }, +const response = await client.connector.updateError({ + connector_id: "my-connector", + error: "Houston, we have a problem!", }); console.log(response); ---- diff --git a/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc b/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc index 2c82d4a70..d414b928c 100644 --- a/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc +++ b/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/text_embedding/hugging-face-embeddings", - body: { +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "hugging-face-embeddings", + inference_config: { service: "hugging_face", service_settings: { api_key: "", diff --git a/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc index a73c47467..023d009ab 100644 --- a/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc +++ b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -3,13 +3,11 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/rerank/cohere_rerank", - body: { - input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"], - query: "star wars main character", - }, +const response = await client.inference.inference({ + task_type: "rerank", + inference_id: "cohere_rerank", + input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"], + query: "star wars main character", }); console.log(response); ---- diff --git a/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc b/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc index a65ee1fe2..ab808a3c4 100644 --- a/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc +++ b/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_connector/my-connector", +const response = await client.connector.get({ + connector_id: "my-connector", }); console.log(response); ---- diff --git a/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc b/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc index 1fce2d137..1b2520793 100644 --- a/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc +++ b/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_inference/completion/azure_openai_completion", - body: { +const response = await client.inference.put({ + task_type: "completion", + inference_id: "azure_openai_completion", + inference_config: { service: "azureopenai", service_settings: { api_key: "", diff --git a/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc b/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc index 1131a4d95..5fe2c6fd7 100644 --- a/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc +++ b/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc @@ -3,19 +3,16 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_connector/my-sql-connector/_filtering", - body: { - advanced_snippet: { - value: [ - { - tables: ["users", "orders"], - query: - "SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id", - }, - ], - }, +const response = await client.connector.updateFiltering({ + connector_id: "my-sql-connector", + advanced_snippet: { + value: [ + { + tables: ["users", "orders"], + query: + "SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id", + }, + ], }, }); console.log(response); diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 722b880da..1fdae1564 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -5667,12 +5667,20 @@ client.inference.put({ inference_id }) === ingest [discrete] ==== delete_geoip_database -Deletes a geoip database configuration +Deletes a geoip database configuration. [source,ts] ---- -client.ingest.deleteGeoipDatabase() +client.ingest.deleteGeoipDatabase({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string | string[])*: A list of geoip database configurations to delete +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_pipeline @@ -5708,12 +5716,21 @@ client.ingest.geoIpStats() [discrete] ==== get_geoip_database -Returns geoip database configuration. +Returns information about one or more geoip database configurations. [source,ts] ---- -client.ingest.getGeoipDatabase() +client.ingest.getGeoipDatabase({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_pipeline @@ -5752,12 +5769,23 @@ client.ingest.processorGrok() [discrete] ==== put_geoip_database -Puts the configuration for a geoip database to be downloaded +Returns information about one or more geoip database configurations. [source,ts] ---- -client.ingest.putGeoipDatabase() +client.ingest.putGeoipDatabase({ id, name, maxmind }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: ID of the database configuration to create or update. +** *`name` (string)*: The provider-assigned name of the IP geolocation database to download. +** *`maxmind` ({ account_id })*: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. +At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_pipeline @@ -5777,8 +5805,8 @@ client.ingest.putPipeline({ id }) ** *`id` (string)*: ID of the ingest pipeline to create or update. ** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. ** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. ** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -5791,16 +5819,16 @@ Executes an ingest pipeline against a set of provided documents. {ref}/simulate-pipeline-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.simulate({ ... }) +client.ingest.simulate({ docs }) ---- [discrete] ==== Arguments * *Request (object):* +** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. ** *`id` (Optional, string)*: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. -** *`docs` (Optional, { _id, _index, _source }[])*: Sample documents to test in the pipeline. ** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index b332c6279..122680b7d 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -45,22 +45,22 @@ export default class Ingest { } /** - * Deletes a geoip database configuration + * Deletes a geoip database configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} */ - async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -139,13 +139,13 @@ export default class Ingest { } /** - * Returns geoip database configuration. + * Returns information about one or more geoip database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} */ - async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -155,6 +155,7 @@ export default class Ingest { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -248,22 +249,34 @@ export default class Ingest { } /** - * Puts the configuration for a geoip database to be downloaded + * Returns information about one or more geoip database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} */ - async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['name', 'maxmind'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -327,10 +340,10 @@ export default class Ingest { * Executes an ingest pipeline against a set of provided documents. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html | Elasticsearch API documentation} */ - async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise - async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise { + async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise + async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['docs', 'pipeline'] const querystring: Record = {} @@ -343,7 +356,6 @@ export default class Ingest { body = userBody != null ? { ...userBody } : undefined } - params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} diff --git a/src/api/types.ts b/src/api/types.ts index 1c0e9d1a0..9c656edef 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1117,7 +1117,7 @@ export interface RenderSearchTemplateResponse { export interface ScriptsPainlessExecutePainlessContextSetup { document: any index: IndexName - query: QueryDslQueryContainer + query?: QueryDslQueryContainer } export interface ScriptsPainlessExecuteRequest extends RequestBase { @@ -4836,11 +4836,11 @@ export type AnalysisPhoneticRuleType = 'approx' | 'exact' export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { type: 'phonetic' encoder: AnalysisPhoneticEncoder - languageset: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] + languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] max_code_len?: integer - name_type: AnalysisPhoneticNameType + name_type?: AnalysisPhoneticNameType replace?: boolean - rule_type: AnalysisPhoneticRuleType + rule_type?: AnalysisPhoneticRuleType } export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { @@ -12445,6 +12445,11 @@ export interface IngestCsvProcessor extends IngestProcessorBase { trim?: boolean } +export interface IngestDatabaseConfiguration { + name: Name + maxmind: IngestMaxmind +} + export interface IngestDateIndexNameProcessor extends IngestProcessorBase { date_formats: string[] date_rounding: string @@ -12523,6 +12528,12 @@ export interface IngestGsubProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestHtmlStripProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field +} + export interface IngestInferenceConfig { regression?: IngestInferenceConfigRegression classification?: IngestInferenceConfigClassification @@ -12584,6 +12595,10 @@ export interface IngestLowercaseProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestMaxmind { + account_id: Id +} + export interface IngestPipeline { description?: string on_failure?: IngestProcessorContainer[] @@ -12629,6 +12644,7 @@ export interface IngestProcessorContainer { geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor gsub?: IngestGsubProcessor + html_strip?: IngestHtmlStripProcessor inference?: IngestInferenceProcessor join?: IngestJoinProcessor json?: IngestJsonProcessor @@ -12646,6 +12662,7 @@ export interface IngestProcessorContainer { trim?: IngestTrimProcessor uppercase?: IngestUppercaseProcessor urldecode?: IngestUrlDecodeProcessor + uri_parts?: IngestUriPartsProcessor user_agent?: IngestUserAgentProcessor } @@ -12716,6 +12733,14 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestUriPartsProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + keep_original?: boolean + remove_if_successful?: boolean + target_field?: Field +} + export interface IngestUrlDecodeProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean @@ -12732,6 +12757,14 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase { export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' +export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { + id: Ids + master_timeout?: Duration + timeout?: Duration +} + +export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase + export interface IngestDeletePipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -12744,8 +12777,9 @@ export interface IngestGeoIpStatsGeoIpDownloadStatistics { successful_downloads: integer failed_downloads: integer total_download_time: DurationValue - database_count: integer + databases_count: integer skipped_updates: integer + expired_databases: integer } export interface IngestGeoIpStatsGeoIpNodeDatabaseName { @@ -12765,6 +12799,22 @@ export interface IngestGeoIpStatsResponse { nodes: Record } +export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { + id: Id + version: long + modified_date_millis: EpochTime + database: IngestDatabaseConfiguration +} + +export interface IngestGetGeoipDatabaseRequest extends RequestBase { + id?: Ids + master_timeout?: Duration +} + +export interface IngestGetGeoipDatabaseResponse { + databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] +} + export interface IngestGetPipelineRequest extends RequestBase { id?: Id master_timeout?: Duration @@ -12780,6 +12830,16 @@ export interface IngestProcessorGrokResponse { patterns: Record } +export interface IngestPutGeoipDatabaseRequest extends RequestBase { + id: Id + master_timeout?: Duration + timeout?: Duration + name: Name + maxmind: IngestMaxmind +} + +export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase + export interface IngestPutPipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -12819,21 +12879,29 @@ export interface IngestSimulateIngest { export interface IngestSimulatePipelineSimulation { doc?: IngestSimulateDocumentSimulation - processor_results?: IngestSimulatePipelineSimulation[] tag?: string processor_type?: string status?: WatcherActionStatusOptions + description?: string + ignored_error?: ErrorCause + error?: ErrorCause } export interface IngestSimulateRequest extends RequestBase { id?: Id verbose?: boolean - docs?: IngestSimulateDocument[] + docs: IngestSimulateDocument[] pipeline?: IngestPipeline } export interface IngestSimulateResponse { - docs: IngestSimulatePipelineSimulation[] + docs: IngestSimulateSimulateDocumentResult[] +} + +export interface IngestSimulateSimulateDocumentResult { + doc?: IngestSimulateDocumentSimulation + error?: ErrorCause + processor_results?: IngestSimulatePipelineSimulation[] } export interface LicenseLicense { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 79fac3c04..655ba221f 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -1162,7 +1162,7 @@ export interface RenderSearchTemplateResponse { export interface ScriptsPainlessExecutePainlessContextSetup { document: any index: IndexName - query: QueryDslQueryContainer + query?: QueryDslQueryContainer } export interface ScriptsPainlessExecuteRequest extends RequestBase { @@ -4909,11 +4909,11 @@ export type AnalysisPhoneticRuleType = 'approx' | 'exact' export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { type: 'phonetic' encoder: AnalysisPhoneticEncoder - languageset: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] + languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] max_code_len?: integer - name_type: AnalysisPhoneticNameType + name_type?: AnalysisPhoneticNameType replace?: boolean - rule_type: AnalysisPhoneticRuleType + rule_type?: AnalysisPhoneticRuleType } export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { @@ -12670,6 +12670,11 @@ export interface IngestCsvProcessor extends IngestProcessorBase { trim?: boolean } +export interface IngestDatabaseConfiguration { + name: Name + maxmind: IngestMaxmind +} + export interface IngestDateIndexNameProcessor extends IngestProcessorBase { date_formats: string[] date_rounding: string @@ -12748,6 +12753,12 @@ export interface IngestGsubProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestHtmlStripProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + target_field?: Field +} + export interface IngestInferenceConfig { regression?: IngestInferenceConfigRegression classification?: IngestInferenceConfigClassification @@ -12809,6 +12820,10 @@ export interface IngestLowercaseProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestMaxmind { + account_id: Id +} + export interface IngestPipeline { description?: string on_failure?: IngestProcessorContainer[] @@ -12854,6 +12869,7 @@ export interface IngestProcessorContainer { geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor gsub?: IngestGsubProcessor + html_strip?: IngestHtmlStripProcessor inference?: IngestInferenceProcessor join?: IngestJoinProcessor json?: IngestJsonProcessor @@ -12871,6 +12887,7 @@ export interface IngestProcessorContainer { trim?: IngestTrimProcessor uppercase?: IngestUppercaseProcessor urldecode?: IngestUrlDecodeProcessor + uri_parts?: IngestUriPartsProcessor user_agent?: IngestUserAgentProcessor } @@ -12941,6 +12958,14 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestUriPartsProcessor extends IngestProcessorBase { + field: Field + ignore_missing?: boolean + keep_original?: boolean + remove_if_successful?: boolean + target_field?: Field +} + export interface IngestUrlDecodeProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean @@ -12957,6 +12982,14 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase { export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' +export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { + id: Ids + master_timeout?: Duration + timeout?: Duration +} + +export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase + export interface IngestDeletePipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -12969,8 +13002,9 @@ export interface IngestGeoIpStatsGeoIpDownloadStatistics { successful_downloads: integer failed_downloads: integer total_download_time: DurationValue - database_count: integer + databases_count: integer skipped_updates: integer + expired_databases: integer } export interface IngestGeoIpStatsGeoIpNodeDatabaseName { @@ -12990,6 +13024,22 @@ export interface IngestGeoIpStatsResponse { nodes: Record } +export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { + id: Id + version: long + modified_date_millis: EpochTime + database: IngestDatabaseConfiguration +} + +export interface IngestGetGeoipDatabaseRequest extends RequestBase { + id?: Ids + master_timeout?: Duration +} + +export interface IngestGetGeoipDatabaseResponse { + databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] +} + export interface IngestGetPipelineRequest extends RequestBase { id?: Id master_timeout?: Duration @@ -13005,6 +13055,19 @@ export interface IngestProcessorGrokResponse { patterns: Record } +export interface IngestPutGeoipDatabaseRequest extends RequestBase { + id: Id + master_timeout?: Duration + timeout?: Duration + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + name: Name + maxmind: IngestMaxmind + } +} + +export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase + export interface IngestPutPipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -13047,10 +13110,12 @@ export interface IngestSimulateIngest { export interface IngestSimulatePipelineSimulation { doc?: IngestSimulateDocumentSimulation - processor_results?: IngestSimulatePipelineSimulation[] tag?: string processor_type?: string status?: WatcherActionStatusOptions + description?: string + ignored_error?: ErrorCause + error?: ErrorCause } export interface IngestSimulateRequest extends RequestBase { @@ -13058,13 +13123,19 @@ export interface IngestSimulateRequest extends RequestBase { verbose?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - docs?: IngestSimulateDocument[] + docs: IngestSimulateDocument[] pipeline?: IngestPipeline } } export interface IngestSimulateResponse { - docs: IngestSimulatePipelineSimulation[] + docs: IngestSimulateSimulateDocumentResult[] +} + +export interface IngestSimulateSimulateDocumentResult { + doc?: IngestSimulateDocumentSimulation + error?: ErrorCause + processor_results?: IngestSimulatePipelineSimulation[] } export interface LicenseLicense { From 58b457eedc4fd8b5a8045ef73c58a2db22769139 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 3 Sep 2024 13:06:50 -0500 Subject: [PATCH 387/647] Changes to make auto-merge action work (#2360) Hopefully. --- .github/workflows/auto-merge.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 03b49ee21..f283bd655 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -8,11 +8,11 @@ on: jobs: automerge: runs-on: ubuntu-latest + if: github.event.review.state == 'approved' steps: - uses: reitermarkus/automerge@v2 with: - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ secrets.GH_TOKEN }} merge-method: squash - do-not-merge-labels: never-merge - pull-request-author-associations: OWNER,MEMBER,COLLABORATOR - review-author-associations: OWNER,MEMBER,CONTRIBUTOR + pull-request-author-associations: OWNER + review-author-associations: OWNER,CONTRIBUTOR From 34704b2e5c0d869269832c9e2cf17bf6aa6ae608 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 23 Sep 2024 21:54:43 +0200 Subject: [PATCH 388/647] Auto-generated code for main (#2362) --- ...01b23f09d2b7f140faf649eadbbf3ac3.asciidoc} | 2 +- .../01cd0ea360282a2c591a366679d7187d.asciidoc | 12 + ...05e637284bc3bedd46e0b7c26ad983c4.asciidoc} | 2 +- ...0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc} | 2 +- ...1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc} | 2 +- ...1cbecd19be22979aefb45b4f160e77ea.asciidoc} | 2 +- ...24a037008e0fc2550ecb6a5d36c04a93.asciidoc} | 2 +- ...2968ffb8135f77ba3a9b876dd4918119.asciidoc} | 2 +- ...310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc} | 2 +- ...36ac0ef9ea63efc431580f7ade8ad53c.asciidoc} | 2 +- .../3a489743e49902df38e3368cae00717a.asciidoc | 8 + ...3a4953663a5a3809b692c27446e16b7f.asciidoc} | 2 +- ...4a72c68b96f44e80463084dfc0449d51.asciidoc} | 2 +- ...4dc151eebefd484a28aed1a175743364.asciidoc} | 2 +- ...4eeded40f30949e359714a5bb6c88612.asciidoc} | 2 +- .../4f6694ef147a73b1163bde3c13779d26.asciidoc | 11 + ...548a9b6f447bb820380c1c23e57c18c3.asciidoc} | 2 +- ...551467688d8c701315d0a371850a4056.asciidoc} | 2 +- ...60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc} | 4 +- ...6b6e275efe3d2aafe0fc3443f2c96868.asciidoc} | 2 +- ...7888c509774a2abfe82ca370c43d8789.asciidoc} | 2 +- .../84237aa9da49ab4b4c4e2b21d2548df2.asciidoc | 11 + ...894fce12d8f0d01e4c4083885a0c0077.asciidoc} | 2 +- ...968fb5b92aa65af09544f7c002b0953e.asciidoc} | 2 +- ...9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc} | 4 +- ...a769d696bf12f5e9de4b3250646d250c.asciidoc} | 2 +- ...a95ae76fca7c3e273e4bd10323b3caa6.asciidoc} | 2 +- .../aa814309ad5f1630886ba75255b444f5.asciidoc | 8 + ...b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc} | 2 +- ...c8fa8d7e029792d539464fede18ce258.asciidoc} | 6 +- ...d3440ec81dde5f1a01c0206cb35e539c.asciidoc} | 2 +- ...dd7814258121d3c2e576a7f00469d7e3.asciidoc} | 2 +- .../e3fe842951dc873d7d00c8f6a010c53f.asciidoc | 12 + ...e9625da419bff6470ffd9927c59ca159.asciidoc} | 2 +- ...ef46c42d473b2acc151a6a41272e0f14.asciidoc} | 2 +- ...efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc} | 2 +- ...f03352bb1129938a89f97e4b650038dd.asciidoc} | 2 +- ...f86337e13526c968848cfe29a52d658f.asciidoc} | 2 +- docs/reference.asciidoc | 54 ++-- src/api/api/ingest.ts | 2 +- src/api/api/open_point_in_time.ts | 16 +- src/api/types.ts | 259 +++++++++++++---- src/api/typesWithBodyKey.ts | 262 ++++++++++++++---- 43 files changed, 550 insertions(+), 177 deletions(-) rename docs/doc_examples/{e944653610f311fa06148d5b0afdf697.asciidoc => 01b23f09d2b7f140faf649eadbbf3ac3.asciidoc} (94%) create mode 100644 docs/doc_examples/01cd0ea360282a2c591a366679d7187d.asciidoc rename docs/doc_examples/{9868ce609f4450702934fcbf4c340bf1.asciidoc => 05e637284bc3bedd46e0b7c26ad983c4.asciidoc} (89%) rename docs/doc_examples/{46025fc47dfbfa410790df0dd6bdad8d.asciidoc => 0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc} (91%) rename docs/doc_examples/{be30ea12f605fd61acba689b68e00bbe.asciidoc => 1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc} (91%) rename docs/doc_examples/{7c63a1d2fbec5283e913ff39fafd0604.asciidoc => 1cbecd19be22979aefb45b4f160e77ea.asciidoc} (90%) rename docs/doc_examples/{da3cecc36a7313385d32c7f52ccfb7e3.asciidoc => 24a037008e0fc2550ecb6a5d36c04a93.asciidoc} (93%) rename docs/doc_examples/{dfcdadcf91529d3a399e05684195028e.asciidoc => 2968ffb8135f77ba3a9b876dd4918119.asciidoc} (87%) rename docs/doc_examples/{5f8d90515995a5eee189d722abe3b111.asciidoc => 310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc} (90%) rename docs/doc_examples/{65e892a362d940e4a74965f21c15ca09.asciidoc => 36ac0ef9ea63efc431580f7ade8ad53c.asciidoc} (88%) create mode 100644 docs/doc_examples/3a489743e49902df38e3368cae00717a.asciidoc rename docs/doc_examples/{60d689aae3f8de1e6830329dfd69a6a6.asciidoc => 3a4953663a5a3809b692c27446e16b7f.asciidoc} (87%) rename docs/doc_examples/{cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc => 4a72c68b96f44e80463084dfc0449d51.asciidoc} (92%) rename docs/doc_examples/{a53ff77d83222c0e76453e630d64787e.asciidoc => 4dc151eebefd484a28aed1a175743364.asciidoc} (92%) rename docs/doc_examples/{c9373ff5ed6b026173428fbb92ca2d9f.asciidoc => 4eeded40f30949e359714a5bb6c88612.asciidoc} (88%) create mode 100644 docs/doc_examples/4f6694ef147a73b1163bde3c13779d26.asciidoc rename docs/doc_examples/{3a7a6ab88a49b484fafb10c8eb09b562.asciidoc => 548a9b6f447bb820380c1c23e57c18c3.asciidoc} (92%) rename docs/doc_examples/{b468d0124dc485385a34504d5b7af82a.asciidoc => 551467688d8c701315d0a371850a4056.asciidoc} (87%) rename docs/doc_examples/{1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc => 60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc} (66%) rename docs/doc_examples/{a69c7c3412af73758f629e76263063b5.asciidoc => 6b6e275efe3d2aafe0fc3443f2c96868.asciidoc} (86%) rename docs/doc_examples/{84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc => 7888c509774a2abfe82ca370c43d8789.asciidoc} (88%) create mode 100644 docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc rename docs/doc_examples/{113ac8466084ee6ac4ed272e342dc468.asciidoc => 894fce12d8f0d01e4c4083885a0c0077.asciidoc} (88%) rename docs/doc_examples/{7e5bee18e61d950e823782da1b733903.asciidoc => 968fb5b92aa65af09544f7c002b0953e.asciidoc} (91%) rename docs/doc_examples/{50ddf374cfa8128538ea092ee98b723d.asciidoc => 9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc} (79%) rename docs/doc_examples/{82eff1d681a5d0d1538ef011bb32ab9a.asciidoc => a769d696bf12f5e9de4b3250646d250c.asciidoc} (85%) rename docs/doc_examples/{0ba5acede9d43af424e85428e7d35420.asciidoc => a95ae76fca7c3e273e4bd10323b3caa6.asciidoc} (91%) create mode 100644 docs/doc_examples/aa814309ad5f1630886ba75255b444f5.asciidoc rename docs/doc_examples/{88ec7fa6768a7e13cd2158667a69e97f.asciidoc => b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc} (90%) rename docs/doc_examples/{804cdf477ec829740e3d045140400c3b.asciidoc => c8fa8d7e029792d539464fede18ce258.asciidoc} (81%) rename docs/doc_examples/{443f0e8fbba83777b2df624879d188d5.asciidoc => d3440ec81dde5f1a01c0206cb35e539c.asciidoc} (87%) rename docs/doc_examples/{0e3abd15dde97a2334621190c4ad4f96.asciidoc => dd7814258121d3c2e576a7f00469d7e3.asciidoc} (92%) create mode 100644 docs/doc_examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc rename docs/doc_examples/{4655c3dea0c61935b7ecf1e57441df66.asciidoc => e9625da419bff6470ffd9927c59ca159.asciidoc} (82%) rename docs/doc_examples/{4f792d86ff79dcfe4643cd95505f8d5f.asciidoc => ef46c42d473b2acc151a6a41272e0f14.asciidoc} (92%) rename docs/doc_examples/{4fe78a4dfb747fd5dc34145ec6b76183.asciidoc => efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc} (92%) rename docs/doc_examples/{349823d86980d40ac45248c19a59e339.asciidoc => f03352bb1129938a89f97e4b650038dd.asciidoc} (91%) rename docs/doc_examples/{517d291044c3e4448b8804322616ab4a.asciidoc => f86337e13526c968848cfe29a52d658f.asciidoc} (92%) diff --git a/docs/doc_examples/e944653610f311fa06148d5b0afdf697.asciidoc b/docs/doc_examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc similarity index 94% rename from docs/doc_examples/e944653610f311fa06148d5b0afdf697.asciidoc rename to docs/doc_examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc index 4982ab849..2752b8336 100644 --- a/docs/doc_examples/e944653610f311fa06148d5b0afdf697.asciidoc +++ b/docs/doc_examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc @@ -26,7 +26,7 @@ const response1 = await client.cluster.putComponentTemplate({ type: "keyword", script: { source: - "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", }, }, }, diff --git a/docs/doc_examples/01cd0ea360282a2c591a366679d7187d.asciidoc b/docs/doc_examples/01cd0ea360282a2c591a366679d7187d.asciidoc new file mode 100644 index 000000000..71f3092da --- /dev/null +++ b/docs/doc_examples/01cd0ea360282a2c591a366679d7187d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + human: "true", + detailed: "true", + actions: "indices:data/write/bulk", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc b/docs/doc_examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc similarity index 89% rename from docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc rename to docs/doc_examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc index efbc3834d..1b9967b11 100644 --- a/docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc +++ b/docs/doc_examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "alibabacloud_ai_search_embeddings", + id: "alibabacloud_ai_search_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/46025fc47dfbfa410790df0dd6bdad8d.asciidoc b/docs/doc_examples/0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc similarity index 91% rename from docs/doc_examples/46025fc47dfbfa410790df0dd6bdad8d.asciidoc rename to docs/doc_examples/0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc index 40e7af427..6f9ec7de2 100644 --- a/docs/doc_examples/46025fc47dfbfa410790df0dd6bdad8d.asciidoc +++ b/docs/doc_examples/0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc @@ -10,7 +10,7 @@ const response = await client.search({ type: "keyword", script: { source: - "emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))", + "emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))", }, }, }, diff --git a/docs/doc_examples/be30ea12f605fd61acba689b68e00bbe.asciidoc b/docs/doc_examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc similarity index 91% rename from docs/doc_examples/be30ea12f605fd61acba689b68e00bbe.asciidoc rename to docs/doc_examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc index b3f5d0d2b..0c57e6552 100644 --- a/docs/doc_examples/be30ea12f605fd61acba689b68e00bbe.asciidoc +++ b/docs/doc_examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "hugging_face_embeddings", + id: "hugging_face_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc b/docs/doc_examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc similarity index 90% rename from docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc rename to docs/doc_examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc index d83687e37..91636c2e3 100644 --- a/docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc +++ b/docs/doc_examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "google_vertex_ai_embeddings", + id: "google_vertex_ai_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/da3cecc36a7313385d32c7f52ccfb7e3.asciidoc b/docs/doc_examples/24a037008e0fc2550ecb6a5d36c04a93.asciidoc similarity index 93% rename from docs/doc_examples/da3cecc36a7313385d32c7f52ccfb7e3.asciidoc rename to docs/doc_examples/24a037008e0fc2550ecb6a5d36c04a93.asciidoc index 4439e3423..6972b07c9 100644 --- a/docs/doc_examples/da3cecc36a7313385d32c7f52ccfb7e3.asciidoc +++ b/docs/doc_examples/24a037008e0fc2550ecb6a5d36c04a93.asciidoc @@ -10,7 +10,7 @@ const response = await client.search({ "date.day_of_week": { type: "keyword", script: - "emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + "emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", }, }, aggs: { diff --git a/docs/doc_examples/dfcdadcf91529d3a399e05684195028e.asciidoc b/docs/doc_examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc similarity index 87% rename from docs/doc_examples/dfcdadcf91529d3a399e05684195028e.asciidoc rename to docs/doc_examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc index a9c2986d4..7a6220c32 100644 --- a/docs/doc_examples/dfcdadcf91529d3a399e05684195028e.asciidoc +++ b/docs/doc_examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "azure-ai-studio-embeddings", - pipeline: "azure_ai_studio_embeddings", + pipeline: "azure_ai_studio_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/5f8d90515995a5eee189d722abe3b111.asciidoc b/docs/doc_examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc similarity index 90% rename from docs/doc_examples/5f8d90515995a5eee189d722abe3b111.asciidoc rename to docs/doc_examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc index 028813b2e..34cfe5d2f 100644 --- a/docs/doc_examples/5f8d90515995a5eee189d722abe3b111.asciidoc +++ b/docs/doc_examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "azure_ai_studio_embeddings", + id: "azure_ai_studio_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/65e892a362d940e4a74965f21c15ca09.asciidoc b/docs/doc_examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc similarity index 88% rename from docs/doc_examples/65e892a362d940e4a74965f21c15ca09.asciidoc rename to docs/doc_examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc index fdab99f78..eead102c5 100644 --- a/docs/doc_examples/65e892a362d940e4a74965f21c15ca09.asciidoc +++ b/docs/doc_examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "openai-embeddings", - pipeline: "openai_embeddings", + pipeline: "openai_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/3a489743e49902df38e3368cae00717a.asciidoc b/docs/doc_examples/3a489743e49902df38e3368cae00717a.asciidoc new file mode 100644 index 000000000..5aec6e804 --- /dev/null +++ b/docs/doc_examples/3a489743e49902df38e3368cae00717a.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.hotThreads(); +console.log(response); +---- diff --git a/docs/doc_examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc b/docs/doc_examples/3a4953663a5a3809b692c27446e16b7f.asciidoc similarity index 87% rename from docs/doc_examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc rename to docs/doc_examples/3a4953663a5a3809b692c27446e16b7f.asciidoc index dabaf1110..1d6946015 100644 --- a/docs/doc_examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc +++ b/docs/doc_examples/3a4953663a5a3809b692c27446e16b7f.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "amazon-bedrock-embeddings", - pipeline: "amazon_bedrock_embeddings", + pipeline: "amazon_bedrock_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc b/docs/doc_examples/4a72c68b96f44e80463084dfc0449d51.asciidoc similarity index 92% rename from docs/doc_examples/cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc rename to docs/doc_examples/4a72c68b96f44e80463084dfc0449d51.asciidoc index 4f6f802d9..002ca35ca 100644 --- a/docs/doc_examples/cc28a3dafcd5056f2a3ec07f6fda5091.asciidoc +++ b/docs/doc_examples/4a72c68b96f44e80463084dfc0449d51.asciidoc @@ -10,7 +10,7 @@ const response = await client.search({ type: "keyword", script: { source: - "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", }, }, }, diff --git a/docs/doc_examples/a53ff77d83222c0e76453e630d64787e.asciidoc b/docs/doc_examples/4dc151eebefd484a28aed1a175743364.asciidoc similarity index 92% rename from docs/doc_examples/a53ff77d83222c0e76453e630d64787e.asciidoc rename to docs/doc_examples/4dc151eebefd484a28aed1a175743364.asciidoc index 2113b36ee..f7f5c27d2 100644 --- a/docs/doc_examples/a53ff77d83222c0e76453e630d64787e.asciidoc +++ b/docs/doc_examples/4dc151eebefd484a28aed1a175743364.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "openai_embeddings", + id: "openai_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc b/docs/doc_examples/4eeded40f30949e359714a5bb6c88612.asciidoc similarity index 88% rename from docs/doc_examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc rename to docs/doc_examples/4eeded40f30949e359714a5bb6c88612.asciidoc index 21b881926..435c2eaff 100644 --- a/docs/doc_examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc +++ b/docs/doc_examples/4eeded40f30949e359714a5bb6c88612.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "elser-embeddings", - pipeline: "elser_embeddings", + pipeline: "elser_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/4f6694ef147a73b1163bde3c13779d26.asciidoc b/docs/doc_examples/4f6694ef147a73b1163bde3c13779d26.asciidoc new file mode 100644 index 000000000..0793b4e08 --- /dev/null +++ b/docs/doc_examples/4f6694ef147a73b1163bde3c13779d26.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + human: "true", + filter_path: "nodes.*.indexing_pressure", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc b/docs/doc_examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc similarity index 92% rename from docs/doc_examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc rename to docs/doc_examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc index 1362f5945..376f11be8 100644 --- a/docs/doc_examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc +++ b/docs/doc_examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "cohere_embeddings", + id: "cohere_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/b468d0124dc485385a34504d5b7af82a.asciidoc b/docs/doc_examples/551467688d8c701315d0a371850a4056.asciidoc similarity index 87% rename from docs/doc_examples/b468d0124dc485385a34504d5b7af82a.asciidoc rename to docs/doc_examples/551467688d8c701315d0a371850a4056.asciidoc index cafdc3e63..cc9792da6 100644 --- a/docs/doc_examples/b468d0124dc485385a34504d5b7af82a.asciidoc +++ b/docs/doc_examples/551467688d8c701315d0a371850a4056.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "hugging-face-embeddings", - pipeline: "hugging_face_embeddings", + pipeline: "hugging_face_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc b/docs/doc_examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc similarity index 66% rename from docs/doc_examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc rename to docs/doc_examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc index 5c00b0b5c..627606897 100644 --- a/docs/doc_examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc +++ b/docs/doc_examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc @@ -3,8 +3,8 @@ [source, js] ---- -const response = await client.nodes.hotThreads({ - node_id: "my-node,my-other-node", +const response = await client.nodes.stats({ + metric: "breaker", }); console.log(response); ---- diff --git a/docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc b/docs/doc_examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc similarity index 86% rename from docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc rename to docs/doc_examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc index 2312ca864..ddb9c53f2 100644 --- a/docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc +++ b/docs/doc_examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "google-vertex-ai-embeddings", - pipeline: "google_vertex_ai_embeddings", + pipeline: "google_vertex_ai_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc b/docs/doc_examples/7888c509774a2abfe82ca370c43d8789.asciidoc similarity index 88% rename from docs/doc_examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc rename to docs/doc_examples/7888c509774a2abfe82ca370c43d8789.asciidoc index 572e30e93..d4e0c4db6 100644 --- a/docs/doc_examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc +++ b/docs/doc_examples/7888c509774a2abfe82ca370c43d8789.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "cohere-embeddings", - pipeline: "cohere_embeddings", + pipeline: "cohere_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc b/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc new file mode 100644 index 000000000..09a235c98 --- /dev/null +++ b/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "_verify_integrity", +}); +console.log(response); +---- diff --git a/docs/doc_examples/113ac8466084ee6ac4ed272e342dc468.asciidoc b/docs/doc_examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc similarity index 88% rename from docs/doc_examples/113ac8466084ee6ac4ed272e342dc468.asciidoc rename to docs/doc_examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc index 406ed089e..5a195c6f2 100644 --- a/docs/doc_examples/113ac8466084ee6ac4ed272e342dc468.asciidoc +++ b/docs/doc_examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "mistral-embeddings", - pipeline: "mistral_embeddings", + pipeline: "mistral_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/7e5bee18e61d950e823782da1b733903.asciidoc b/docs/doc_examples/968fb5b92aa65af09544f7c002b0953e.asciidoc similarity index 91% rename from docs/doc_examples/7e5bee18e61d950e823782da1b733903.asciidoc rename to docs/doc_examples/968fb5b92aa65af09544f7c002b0953e.asciidoc index f63ee943b..410d4fc38 100644 --- a/docs/doc_examples/7e5bee18e61d950e823782da1b733903.asciidoc +++ b/docs/doc_examples/968fb5b92aa65af09544f7c002b0953e.asciidoc @@ -7,7 +7,7 @@ const response = await client.search({ index: "semantic-embeddings", query: { semantic: { - field: "semantic_text", + field: "content", query: "How to avoid muscle soreness while running?", }, }, diff --git a/docs/doc_examples/50ddf374cfa8128538ea092ee98b723d.asciidoc b/docs/doc_examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc similarity index 79% rename from docs/doc_examples/50ddf374cfa8128538ea092ee98b723d.asciidoc rename to docs/doc_examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc index ff7c02793..8c361f48c 100644 --- a/docs/doc_examples/50ddf374cfa8128538ea092ee98b723d.asciidoc +++ b/docs/doc_examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc @@ -4,7 +4,9 @@ [source, js] ---- const response = await client.tasks.list({ - filter_path: "nodes.*.tasks", + pretty: "true", + human: "true", + detailed: "true", }); console.log(response); ---- diff --git a/docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc b/docs/doc_examples/a769d696bf12f5e9de4b3250646d250c.asciidoc similarity index 85% rename from docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc rename to docs/doc_examples/a769d696bf12f5e9de4b3250646d250c.asciidoc index c7b1a1209..6bce3ee50 100644 --- a/docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc +++ b/docs/doc_examples/a769d696bf12f5e9de4b3250646d250c.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "alibabacloud-ai-search-embeddings", - pipeline: "alibabacloud_ai_search_embeddings", + pipeline: "alibabacloud_ai_search_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/0ba5acede9d43af424e85428e7d35420.asciidoc b/docs/doc_examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc similarity index 91% rename from docs/doc_examples/0ba5acede9d43af424e85428e7d35420.asciidoc rename to docs/doc_examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc index a279b534c..fbff6cd91 100644 --- a/docs/doc_examples/0ba5acede9d43af424e85428e7d35420.asciidoc +++ b/docs/doc_examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "azure_openai_embeddings", + id: "azure_openai_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/aa814309ad5f1630886ba75255b444f5.asciidoc b/docs/doc_examples/aa814309ad5f1630886ba75255b444f5.asciidoc new file mode 100644 index 000000000..736d58e62 --- /dev/null +++ b/docs/doc_examples/aa814309ad5f1630886ba75255b444f5.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.pendingTasks(); +console.log(response); +---- diff --git a/docs/doc_examples/88ec7fa6768a7e13cd2158667a69e97f.asciidoc b/docs/doc_examples/b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc similarity index 90% rename from docs/doc_examples/88ec7fa6768a7e13cd2158667a69e97f.asciidoc rename to docs/doc_examples/b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc index 517644e96..ef0a0529b 100644 --- a/docs/doc_examples/88ec7fa6768a7e13cd2158667a69e97f.asciidoc +++ b/docs/doc_examples/b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc @@ -8,7 +8,7 @@ const response = await client.search({ day_of_week: { type: "keyword", script: - "\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))\n ", + "\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))\n ", }, }, size: 0, diff --git a/docs/doc_examples/804cdf477ec829740e3d045140400c3b.asciidoc b/docs/doc_examples/c8fa8d7e029792d539464fede18ce258.asciidoc similarity index 81% rename from docs/doc_examples/804cdf477ec829740e3d045140400c3b.asciidoc rename to docs/doc_examples/c8fa8d7e029792d539464fede18ce258.asciidoc index 255b2df23..598fd1f7b 100644 --- a/docs/doc_examples/804cdf477ec829740e3d045140400c3b.asciidoc +++ b/docs/doc_examples/c8fa8d7e029792d539464fede18ce258.asciidoc @@ -7,14 +7,10 @@ const response = await client.indices.create({ index: "semantic-embeddings", mappings: { properties: { - semantic_text: { + content: { type: "semantic_text", inference_id: "my-elser-endpoint", }, - content: { - type: "text", - copy_to: "semantic_text", - }, }, }, }); diff --git a/docs/doc_examples/443f0e8fbba83777b2df624879d188d5.asciidoc b/docs/doc_examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc similarity index 87% rename from docs/doc_examples/443f0e8fbba83777b2df624879d188d5.asciidoc rename to docs/doc_examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc index 30dfcbd4f..8a5eced01 100644 --- a/docs/doc_examples/443f0e8fbba83777b2df624879d188d5.asciidoc +++ b/docs/doc_examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc @@ -11,7 +11,7 @@ const response = await client.reindex({ }, dest: { index: "azure-openai-embeddings", - pipeline: "azure_openai_embeddings", + pipeline: "azure_openai_embeddings_pipeline", }, }); console.log(response); diff --git a/docs/doc_examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc b/docs/doc_examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc similarity index 92% rename from docs/doc_examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc rename to docs/doc_examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc index 17e833805..da4ac42b8 100644 --- a/docs/doc_examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc +++ b/docs/doc_examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "mistral_embeddings", + id: "mistral_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc b/docs/doc_examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc new file mode 100644 index 000000000..33c2f68ce --- /dev/null +++ b/docs/doc_examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + human: "true", + detailed: "true", + actions: "indices:data/write/search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc b/docs/doc_examples/e9625da419bff6470ffd9927c59ca159.asciidoc similarity index 82% rename from docs/doc_examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc rename to docs/doc_examples/e9625da419bff6470ffd9927c59ca159.asciidoc index c9f7520c1..d3ff499f4 100644 --- a/docs/doc_examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc +++ b/docs/doc_examples/e9625da419bff6470ffd9927c59ca159.asciidoc @@ -5,7 +5,7 @@ ---- const response = await client.cat.threadPool({ v: "true", - h: "id,name,active,rejected,completed", + h: "id,name,queue,active,rejected,completed", }); console.log(response); ---- diff --git a/docs/doc_examples/4f792d86ff79dcfe4643cd95505f8d5f.asciidoc b/docs/doc_examples/ef46c42d473b2acc151a6a41272e0f14.asciidoc similarity index 92% rename from docs/doc_examples/4f792d86ff79dcfe4643cd95505f8d5f.asciidoc rename to docs/doc_examples/ef46c42d473b2acc151a6a41272e0f14.asciidoc index 8d47d41ed..5fbb3c8b7 100644 --- a/docs/doc_examples/4f792d86ff79dcfe4643cd95505f8d5f.asciidoc +++ b/docs/doc_examples/ef46c42d473b2acc151a6a41272e0f14.asciidoc @@ -12,7 +12,7 @@ const response = await client.indices.create({ type: "keyword", script: { source: - "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", }, }, }, diff --git a/docs/doc_examples/4fe78a4dfb747fd5dc34145ec6b76183.asciidoc b/docs/doc_examples/efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc similarity index 92% rename from docs/doc_examples/4fe78a4dfb747fd5dc34145ec6b76183.asciidoc rename to docs/doc_examples/efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc index 87df83fca..0dca28ff5 100644 --- a/docs/doc_examples/4fe78a4dfb747fd5dc34145ec6b76183.asciidoc +++ b/docs/doc_examples/efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc @@ -11,7 +11,7 @@ const response = await client.indices.create({ type: "keyword", script: { source: - "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))", + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", }, }, }, diff --git a/docs/doc_examples/349823d86980d40ac45248c19a59e339.asciidoc b/docs/doc_examples/f03352bb1129938a89f97e4b650038dd.asciidoc similarity index 91% rename from docs/doc_examples/349823d86980d40ac45248c19a59e339.asciidoc rename to docs/doc_examples/f03352bb1129938a89f97e4b650038dd.asciidoc index 93a193897..f39a62d6f 100644 --- a/docs/doc_examples/349823d86980d40ac45248c19a59e339.asciidoc +++ b/docs/doc_examples/f03352bb1129938a89f97e4b650038dd.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "amazon_bedrock_embeddings", + id: "amazon_bedrock_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/doc_examples/517d291044c3e4448b8804322616ab4a.asciidoc b/docs/doc_examples/f86337e13526c968848cfe29a52d658f.asciidoc similarity index 92% rename from docs/doc_examples/517d291044c3e4448b8804322616ab4a.asciidoc rename to docs/doc_examples/f86337e13526c968848cfe29a52d658f.asciidoc index 67bb14a80..d244f1a1e 100644 --- a/docs/doc_examples/517d291044c3e4448b8804322616ab4a.asciidoc +++ b/docs/doc_examples/f86337e13526c968848cfe29a52d658f.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.ingest.putPipeline({ - id: "elser_embeddings", + id: "elser_embeddings_pipeline", processors: [ { inference: { diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 1fdae1564..ccd0a029c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -402,7 +402,7 @@ client.fieldCaps({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. ** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. ** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. -** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. +** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request @@ -750,6 +750,7 @@ client.openPointInTime({ index, keep_alive }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices ** *`keep_alive` (string | -1 | 0)*: Extends the time to live of the corresponding point in time. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to `match_none` on every shard. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. @@ -940,7 +941,7 @@ client.search({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. +** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. ** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field. ** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. @@ -999,7 +1000,7 @@ If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. ** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. @@ -1098,7 +1099,7 @@ client.searchMvt({ index, field, zoom, x, y }) ** *`zoom` (number)*: Zoom level for the vector tile to search ** *`x` (number)*: X coordinate for the vector tile to search ** *`y` (number)*: Y coordinate for the vector tile to search -** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. +** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg @@ -1126,7 +1127,7 @@ each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. @@ -1477,7 +1478,7 @@ client.asyncSearch.submit({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`aggregations` (Optional, Record)* +** *`aggregations` (Optional, Record)* ** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* ** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. @@ -1527,7 +1528,7 @@ parameter defaults to false. You can pass _source: true to return both source fi and stored fields in the search response. ** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using @@ -3511,7 +3512,7 @@ client.eql.search({ index, query }) ** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 ** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. ** *`result_position` (Optional, Enum("tail" | "head"))* -** *`runtime_mappings` (Optional, Record)* +** *`runtime_mappings` (Optional, Record)* ** *`allow_no_indices` (Optional, boolean)* ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* ** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. @@ -3565,7 +3566,7 @@ and its format can change at any time but it can give some insight into the perf of each part of the query. ** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. -** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, e.g. json, yaml. ** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. ** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. @@ -3668,7 +3669,7 @@ client.fleet.search({ index }) * *Request (object):* ** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. -** *`aggregations` (Optional, Record)* +** *`aggregations` (Optional, Record)* ** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* ** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. @@ -3717,7 +3718,7 @@ parameter defaults to false. You can pass _source: true to return both source fi and stored fields in the search response. ** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using @@ -4030,7 +4031,7 @@ If specified, the `analyzer` parameter overrides this value. ** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. ** *`text` (Optional, string | string[])*: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. -** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, max_token_length } | { type, max_token_length } | { type, max_token_length } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, flags, group, pattern } | { type, rule_files })*: Tokenizer to use to convert text into tokens. +** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })*: Tokenizer to use to convert text into tokens. [discrete] ==== clear_cache @@ -4671,6 +4672,7 @@ Wildcard (`*`) expressions are supported. If omitted, all data streams are retur Supports a list of values, such as `open,hidden`. ** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`verbose` (Optional, boolean)*: Whether the maximum timestamp for each data stream should be calculated and returned. [discrete] ==== get_field_mapping @@ -5061,7 +5063,7 @@ application-specific metadata. - Mapping parameters ** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents. ** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index. -** *`runtime` (Optional, Record)*: Mapping of runtime fields for the index. +** *`runtime` (Optional, Record)*: Mapping of runtime fields for the index. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. @@ -5262,6 +5264,10 @@ Resources on remote clusters can be specified using the ``:`` syn If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. [discrete] ==== rollover @@ -5805,9 +5811,11 @@ client.ingest.putPipeline({ id }) ** *`id` (string)*: ID of the ingest pipeline to create or update. ** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. ** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. ** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. +** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated. +When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates @@ -5829,7 +5837,7 @@ client.ingest.simulate({ docs }) ** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. ** *`id` (Optional, string)*: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. -** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*: Pipeline to test. +** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. ** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. @@ -5990,7 +5998,7 @@ client.logstash.putPipeline({ id }) * *Request (object):* ** *`id` (string)*: Identifier for the pipeline. -** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })* +** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })* [discrete] === migration @@ -7358,7 +7366,7 @@ client.ml.putDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. ** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. @@ -7389,7 +7397,7 @@ object is passed verbatim to Elasticsearch. not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. -** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. +** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. ** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. ** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. @@ -7894,7 +7902,7 @@ client.ml.updateDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. ** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of @@ -7928,7 +7936,7 @@ when you are satisfied with the results of the job. not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. -** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. +** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. ** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. ** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. @@ -8531,7 +8539,7 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. -** *`aggregations` (Optional, Record)*: Specifies aggregations. +** *`aggregations` (Optional, Record)*: Specifies aggregations. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response @@ -10588,7 +10596,7 @@ It ignores other request body parameters. ** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. ** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. ** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. ** *`params` (Optional, Record)*: Values for parameters in the query. diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 122680b7d..31c3fc3dd 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -301,7 +301,7 @@ export default class Ingest { async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version'] + const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index f0fdd689c..69aabd194 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -47,11 +47,23 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['index_filter'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { // @ts-expect-error diff --git a/src/api/types.ts b/src/api/types.ts index 9c656edef..b1eb67bd2 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -884,9 +884,11 @@ export interface OpenPointInTimeRequest extends RequestBase { preference?: string routing?: Routing expand_wildcards?: ExpandWildcards + index_filter?: QueryDslQueryContainer } export interface OpenPointInTimeResponse { + _shards: ShardStatistics id: Id } @@ -1751,11 +1753,23 @@ export interface SearchShardsRequest extends RequestBase { } export interface SearchShardsResponse { - nodes: Record + nodes: Record shards: NodeShard[][] indices: Record } +export interface SearchShardsSearchShardsNodeAttributes { + name: NodeName + ephemeral_id: Id + transport_address: TransportAddress + external_id: string + attributes: Record + roles: NodeRoles + version: VersionString + min_index_version: integer + max_index_version: integer +} + export interface SearchShardsShardStoreIndex { aliases?: Name[] filter?: QueryDslQueryContainer @@ -2243,6 +2257,8 @@ export interface GetStats { total: long } +export type GrokPattern = string + export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' export type Host = string @@ -2397,8 +2413,6 @@ export interface NodeAttributes { id?: NodeId name: NodeName transport_address: TransportAddress - roles?: NodeRoles - external_id?: string } export type NodeId = string @@ -2848,7 +2862,7 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys & { [property: string]: AggregationsAggregate | string | long } -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { meta?: Metadata @@ -2933,6 +2947,7 @@ export interface AggregationsAggregationContainer { sum?: AggregationsSumAggregation sum_bucket?: AggregationsSumBucketAggregation terms?: AggregationsTermsAggregation + time_series?: AggregationsTimeSeriesAggregation top_hits?: AggregationsTopHitsAggregation t_test?: AggregationsTTestAggregation top_metrics?: AggregationsTopMetricsAggregation @@ -2942,9 +2957,9 @@ export interface AggregationsAggregationContainer { } export interface AggregationsAggregationRange { - from?: double + from?: double | null key?: string - to?: double + to?: double | null } export interface AggregationsArrayPercentilesItem { @@ -4129,6 +4144,20 @@ export interface AggregationsTestPopulation { filter?: QueryDslQueryContainer } +export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { + size?: integer + keyed?: boolean +} + +export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase { + key: Record +} +export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys +& { [property: string]: AggregationsAggregate | Record | long } + export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { hits: SearchHitsMetadata } @@ -4309,6 +4338,11 @@ export interface AnalysisCjkAnalyzer { stopwords_path?: string } +export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { + type: 'classic' + max_token_length?: integer +} + export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' common_words?: string[] @@ -4395,7 +4429,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { custom_token_chars?: string max_gram: integer min_gram: integer - token_chars: AnalysisTokenChar[] + token_chars?: AnalysisTokenChar[] } export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { @@ -4738,7 +4772,7 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { custom_token_chars?: string max_gram: integer min_gram: integer - token_chars: AnalysisTokenChar[] + token_chars?: AnalysisTokenChar[] } export interface AnalysisNoriAnalyzer { @@ -4903,6 +4937,16 @@ export interface AnalysisSimpleAnalyzer { version?: VersionString } +export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase { + type: 'simple_pattern_split' + pattern?: string +} + +export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase { + type: 'simple_pattern' + pattern?: string +} + export interface AnalysisSnowballAnalyzer { type: 'snowball' version?: VersionString @@ -5010,6 +5054,10 @@ export interface AnalysisThaiAnalyzer { stopwords_path?: string } +export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase { + type: 'thai' +} + export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition @@ -5026,7 +5074,7 @@ export interface AnalysisTokenizerBase { version?: VersionString } -export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer +export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { type: 'trim' @@ -5151,6 +5199,10 @@ export interface MappingCompletionProperty extends MappingDocValuesPropertyBase type: 'completion' } +export interface MappingCompositeSubField { + type: MappingRuntimeFieldType +} + export interface MappingConstantKeywordProperty extends MappingPropertyBase { value?: any type: 'constant_keyword' @@ -5494,6 +5546,7 @@ export interface MappingRoutingField { } export interface MappingRuntimeField { + fields?: Record fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] format?: string input_field?: Field @@ -6684,39 +6737,39 @@ export interface CatAliasesRequest extends CatCatRequestBase { export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { - shards: string - s: string - 'shards.undesired': string | null - 'write_load.forecast': double | null - wlf: double | null - writeLoadForecast: double | null - 'disk.indices.forecast': ByteSize | null - dif: ByteSize | null - diskIndicesForecast: ByteSize | null - 'disk.indices': ByteSize | null - di: ByteSize | null - diskIndices: ByteSize | null - 'disk.used': ByteSize | null - du: ByteSize | null - diskUsed: ByteSize | null - 'disk.avail': ByteSize | null - da: ByteSize | null - diskAvail: ByteSize | null - 'disk.total': ByteSize | null - dt: ByteSize | null - diskTotal: ByteSize | null - 'disk.percent': Percentage | null - dp: Percentage | null - diskPercent: Percentage | null - host: Host | null - h: Host | null - ip: Ip | null - node: string - n: string - 'node.role': string | null - r: string | null - role: string | null - nodeRole: string | null + shards?: string + s?: string + 'shards.undesired'?: string | null + 'write_load.forecast'?: SpecUtilsStringified | null + wlf?: SpecUtilsStringified | null + writeLoadForecast?: SpecUtilsStringified | null + 'disk.indices.forecast'?: ByteSize | null + dif?: ByteSize | null + diskIndicesForecast?: ByteSize | null + 'disk.indices'?: ByteSize | null + di?: ByteSize | null + diskIndices?: ByteSize | null + 'disk.used'?: ByteSize | null + du?: ByteSize | null + diskUsed?: ByteSize | null + 'disk.avail'?: ByteSize | null + da?: ByteSize | null + diskAvail?: ByteSize | null + 'disk.total'?: ByteSize | null + dt?: ByteSize | null + diskTotal?: ByteSize | null + 'disk.percent'?: Percentage | null + dp?: Percentage | null + diskPercent?: Percentage | null + host?: Host | null + h?: Host | null + ip?: Ip | null + node?: string + n?: string + 'node.role'?: string | null + r?: string | null + role?: string | null + nodeRole?: string | null } export interface CatAllocationRequest extends CatCatRequestBase { @@ -6815,6 +6868,10 @@ export interface CatHealthHealthRecord { i?: string 'shards.initializing'?: string shardsInitializing?: string + 'unassign.pri'?: string + up?: string + 'shards.unassigned.primary'?: string + shardsUnassignedPrimary?: string unassign?: string u?: string 'shards.unassigned'?: string @@ -6878,6 +6935,7 @@ export interface CatIndicesIndicesRecord { ss?: string | null storeSize?: string | null 'pri.store.size'?: string | null + 'dataset.size'?: string | null 'completion.size'?: string cs?: string completionSize?: string @@ -7995,6 +8053,7 @@ export interface CatShardsShardsRecord { dc?: string | null store?: string | null sto?: string | null + dataset?: string | null ip?: string | null id?: string node?: string | null @@ -8717,6 +8776,7 @@ export interface ClusterAllocationExplainClusterInfo { export interface ClusterAllocationExplainCurrentNode { id: Id name: Name + roles: NodeRoles attributes: Record transport_address: TransportAddress weight_ranking: integer @@ -8739,6 +8799,7 @@ export interface ClusterAllocationExplainNodeAllocationExplanation { node_decision: ClusterAllocationExplainDecision node_id: Id node_name: Name + roles: NodeRoles store?: ClusterAllocationExplainAllocationStore transport_address: TransportAddress weight_ranking: integer @@ -8870,6 +8931,7 @@ export interface ClusterHealthHealthResponseBody { task_max_waiting_in_queue?: Duration task_max_waiting_in_queue_millis: DurationValue timed_out: boolean + unassigned_primary_shards: integer unassigned_shards: integer } @@ -8883,6 +8945,7 @@ export interface ClusterHealthIndexHealthStats { shards?: Record status: HealthStatus unassigned_shards: integer + unassigned_primary_shards: integer } export interface ClusterHealthRequest extends RequestBase { @@ -8909,6 +8972,7 @@ export interface ClusterHealthShardHealthStats { relocating_shards: integer status: HealthStatus unassigned_shards: integer + unassigned_primary_shards: integer } export interface ClusterInfoRequest extends RequestBase { @@ -9916,8 +9980,11 @@ export interface EnrichStatsCacheStats { node_id: Id count: integer hits: integer + hits_time_in_millis: DurationValue misses: integer + misses_time_in_millis: DurationValue evictions: integer + size_in_bytes: long } export interface EnrichStatsCoordinatorStats { @@ -10037,8 +10104,10 @@ export type EsqlTableValuesLongDouble = double | double[] export type EsqlTableValuesLongValue = long | long[] +export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' + export interface EsqlQueryRequest extends RequestBase { - format?: string + format?: EsqlQueryEsqlFormat delimiter?: string drop_null_columns?: boolean columnar?: boolean @@ -11446,6 +11515,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase { expand_wildcards?: ExpandWildcards include_defaults?: boolean master_timeout?: Duration + verbose?: boolean } export interface IndicesGetDataStreamResponse { @@ -11830,6 +11900,8 @@ export type IndicesResolveClusterResponse = Record - patterns: string[] + patterns: GrokPattern[] trace_match?: boolean } @@ -12604,6 +12694,7 @@ export interface IngestPipeline { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber + deprecated?: boolean _meta?: Metadata } @@ -12641,6 +12732,7 @@ export interface IngestProcessorContainer { enrich?: IngestEnrichProcessor fail?: IngestFailProcessor foreach?: IngestForeachProcessor + geo_grid?: IngestGeoGridProcessor geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor gsub?: IngestGsubProcessor @@ -12651,6 +12743,7 @@ export interface IngestProcessorContainer { kv?: IngestKeyValueProcessor lowercase?: IngestLowercaseProcessor pipeline?: IngestPipelineProcessor + redact?: IngestRedactProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor reroute?: IngestRerouteProcessor @@ -12666,6 +12759,16 @@ export interface IngestProcessorContainer { user_agent?: IngestUserAgentProcessor } +export interface IngestRedactProcessor extends IngestProcessorBase { + field: Field + patterns: GrokPattern[] + pattern_definitions?: Record + prefix?: string + suffix?: string + ignore_missing?: boolean + skip_if_unlicensed?: boolean +} + export interface IngestRemoveProcessor extends IngestProcessorBase { field: Fields keep?: Fields @@ -12750,12 +12853,13 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase { export interface IngestUserAgentProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean - options?: IngestUserAgentProperty[] regex_file?: string target_field?: Field + properties?: IngestUserAgentProperty[] + extract_device_type?: boolean } -export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' +export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { id: Ids @@ -12850,6 +12954,7 @@ export interface IngestPutPipelineRequest extends RequestBase { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber + deprecated?: boolean } export type IngestPutPipelineResponse = AcknowledgedResponseBase @@ -13253,7 +13358,7 @@ export type MlCategorizationStatus = 'ok' | 'warn' export interface MlCategory { category_id: ulong examples: string[] - grok_pattern?: string + grok_pattern?: GrokPattern job_id: Id max_matching_length: ulong partition_field_name?: string @@ -15736,6 +15841,25 @@ export interface NodesHttp { current_open?: integer total_opened?: long clients?: NodesClient[] + routes: Record +} + +export interface NodesHttpRoute { + requests: NodesHttpRouteRequests + responses: NodesHttpRouteResponses +} + +export interface NodesHttpRouteRequests { + count: long + total_size_in_bytes: long + size_histogram: NodesSizeHttpHistogram[] +} + +export interface NodesHttpRouteResponses { + count: long + total_size_in_bytes: long + handling_time_histogram: NodesTimeHttpHistogram[] + size_histogram: NodesSizeHttpHistogram[] } export interface NodesIndexingPressure { @@ -15750,16 +15874,25 @@ export interface NodesIndexingPressureMemory { } export interface NodesIngest { - pipelines?: Record + pipelines?: Record total?: NodesIngestTotal } +export interface NodesIngestStats { + count: long + current: long + failed: long + processors: Record[] + time_in_millis: DurationValue + ingested_as_first_pipeline_in_bytes: long + produced_as_first_pipeline_in_bytes: long +} + export interface NodesIngestTotal { - count?: long - current?: long - failed?: long - processors?: Record[] - time_in_millis?: DurationValue + count: long + current: long + failed: long + time_in_millis: DurationValue } export interface NodesIoStatDevice { @@ -15964,6 +16097,12 @@ export interface NodesSerializedClusterStateDetail { compressed_size_in_bytes?: long } +export interface NodesSizeHttpHistogram { + count: long + ge_bytes?: long + lt_bytes?: long +} + export interface NodesStats { adaptive_selection?: Record breakers?: Record @@ -15998,6 +16137,12 @@ export interface NodesThreadCount { threads?: long } +export interface NodesTimeHttpHistogram { + count: long + ge_millis?: long + lt_millis?: long +} + export interface NodesTransport { inbound_handling_time_histogram?: NodesTransportHistogram[] outbound_handling_time_histogram?: NodesTransportHistogram[] @@ -18754,7 +18899,7 @@ export interface TextStructureFindStructureRequest { ecs_compatibility?: string explain?: boolean format?: string - grok_pattern?: string + grok_pattern?: GrokPattern has_header_row?: boolean line_merge_size_limit?: uint lines_to_sample?: uint @@ -18781,7 +18926,7 @@ export interface TextStructureFindStructureResponse { num_lines_analyzed: integer column_names?: string[] explanation?: string[] - grok_pattern?: string + grok_pattern?: GrokPattern multiline_start_pattern?: string exclude_lines_pattern?: string java_timestamp_formats?: string[] @@ -18809,7 +18954,7 @@ export interface TextStructureTestGrokPatternMatchedText { export interface TextStructureTestGrokPatternRequest extends RequestBase { ecs_compatibility?: string - grok_pattern: string + grok_pattern: GrokPattern text: string[] } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 655ba221f..3d846a344 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -917,9 +917,14 @@ export interface OpenPointInTimeRequest extends RequestBase { preference?: string routing?: Routing expand_wildcards?: ExpandWildcards + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + index_filter?: QueryDslQueryContainer + } } export interface OpenPointInTimeResponse { + _shards: ShardStatistics id: Id } @@ -1809,11 +1814,23 @@ export interface SearchShardsRequest extends RequestBase { } export interface SearchShardsResponse { - nodes: Record + nodes: Record shards: NodeShard[][] indices: Record } +export interface SearchShardsSearchShardsNodeAttributes { + name: NodeName + ephemeral_id: Id + transport_address: TransportAddress + external_id: string + attributes: Record + roles: NodeRoles + version: VersionString + min_index_version: integer + max_index_version: integer +} + export interface SearchShardsShardStoreIndex { aliases?: Name[] filter?: QueryDslQueryContainer @@ -2316,6 +2333,8 @@ export interface GetStats { total: long } +export type GrokPattern = string + export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' export type Host = string @@ -2470,8 +2489,6 @@ export interface NodeAttributes { id?: NodeId name: NodeName transport_address: TransportAddress - roles?: NodeRoles - external_id?: string } export type NodeId = string @@ -2921,7 +2938,7 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys & { [property: string]: AggregationsAggregate | string | long } -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { meta?: Metadata @@ -3006,6 +3023,7 @@ export interface AggregationsAggregationContainer { sum?: AggregationsSumAggregation sum_bucket?: AggregationsSumBucketAggregation terms?: AggregationsTermsAggregation + time_series?: AggregationsTimeSeriesAggregation top_hits?: AggregationsTopHitsAggregation t_test?: AggregationsTTestAggregation top_metrics?: AggregationsTopMetricsAggregation @@ -3015,9 +3033,9 @@ export interface AggregationsAggregationContainer { } export interface AggregationsAggregationRange { - from?: double + from?: double | null key?: string - to?: double + to?: double | null } export interface AggregationsArrayPercentilesItem { @@ -4202,6 +4220,20 @@ export interface AggregationsTestPopulation { filter?: QueryDslQueryContainer } +export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { + size?: integer + keyed?: boolean +} + +export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase { + key: Record +} +export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys +& { [property: string]: AggregationsAggregate | Record | long } + export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { hits: SearchHitsMetadata } @@ -4382,6 +4414,11 @@ export interface AnalysisCjkAnalyzer { stopwords_path?: string } +export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { + type: 'classic' + max_token_length?: integer +} + export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' common_words?: string[] @@ -4468,7 +4505,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { custom_token_chars?: string max_gram: integer min_gram: integer - token_chars: AnalysisTokenChar[] + token_chars?: AnalysisTokenChar[] } export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { @@ -4811,7 +4848,7 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { custom_token_chars?: string max_gram: integer min_gram: integer - token_chars: AnalysisTokenChar[] + token_chars?: AnalysisTokenChar[] } export interface AnalysisNoriAnalyzer { @@ -4976,6 +5013,16 @@ export interface AnalysisSimpleAnalyzer { version?: VersionString } +export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase { + type: 'simple_pattern_split' + pattern?: string +} + +export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase { + type: 'simple_pattern' + pattern?: string +} + export interface AnalysisSnowballAnalyzer { type: 'snowball' version?: VersionString @@ -5083,6 +5130,10 @@ export interface AnalysisThaiAnalyzer { stopwords_path?: string } +export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase { + type: 'thai' +} + export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition @@ -5099,7 +5150,7 @@ export interface AnalysisTokenizerBase { version?: VersionString } -export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer +export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { type: 'trim' @@ -5224,6 +5275,10 @@ export interface MappingCompletionProperty extends MappingDocValuesPropertyBase type: 'completion' } +export interface MappingCompositeSubField { + type: MappingRuntimeFieldType +} + export interface MappingConstantKeywordProperty extends MappingPropertyBase { value?: any type: 'constant_keyword' @@ -5567,6 +5622,7 @@ export interface MappingRoutingField { } export interface MappingRuntimeField { + fields?: Record fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] format?: string input_field?: Field @@ -6761,39 +6817,39 @@ export interface CatAliasesRequest extends CatCatRequestBase { export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { - shards: string - s: string - 'shards.undesired': string | null - 'write_load.forecast': double | null - wlf: double | null - writeLoadForecast: double | null - 'disk.indices.forecast': ByteSize | null - dif: ByteSize | null - diskIndicesForecast: ByteSize | null - 'disk.indices': ByteSize | null - di: ByteSize | null - diskIndices: ByteSize | null - 'disk.used': ByteSize | null - du: ByteSize | null - diskUsed: ByteSize | null - 'disk.avail': ByteSize | null - da: ByteSize | null - diskAvail: ByteSize | null - 'disk.total': ByteSize | null - dt: ByteSize | null - diskTotal: ByteSize | null - 'disk.percent': Percentage | null - dp: Percentage | null - diskPercent: Percentage | null - host: Host | null - h: Host | null - ip: Ip | null - node: string - n: string - 'node.role': string | null - r: string | null - role: string | null - nodeRole: string | null + shards?: string + s?: string + 'shards.undesired'?: string | null + 'write_load.forecast'?: SpecUtilsStringified | null + wlf?: SpecUtilsStringified | null + writeLoadForecast?: SpecUtilsStringified | null + 'disk.indices.forecast'?: ByteSize | null + dif?: ByteSize | null + diskIndicesForecast?: ByteSize | null + 'disk.indices'?: ByteSize | null + di?: ByteSize | null + diskIndices?: ByteSize | null + 'disk.used'?: ByteSize | null + du?: ByteSize | null + diskUsed?: ByteSize | null + 'disk.avail'?: ByteSize | null + da?: ByteSize | null + diskAvail?: ByteSize | null + 'disk.total'?: ByteSize | null + dt?: ByteSize | null + diskTotal?: ByteSize | null + 'disk.percent'?: Percentage | null + dp?: Percentage | null + diskPercent?: Percentage | null + host?: Host | null + h?: Host | null + ip?: Ip | null + node?: string + n?: string + 'node.role'?: string | null + r?: string | null + role?: string | null + nodeRole?: string | null } export interface CatAllocationRequest extends CatCatRequestBase { @@ -6892,6 +6948,10 @@ export interface CatHealthHealthRecord { i?: string 'shards.initializing'?: string shardsInitializing?: string + 'unassign.pri'?: string + up?: string + 'shards.unassigned.primary'?: string + shardsUnassignedPrimary?: string unassign?: string u?: string 'shards.unassigned'?: string @@ -6955,6 +7015,7 @@ export interface CatIndicesIndicesRecord { ss?: string | null storeSize?: string | null 'pri.store.size'?: string | null + 'dataset.size'?: string | null 'completion.size'?: string cs?: string completionSize?: string @@ -8072,6 +8133,7 @@ export interface CatShardsShardsRecord { dc?: string | null store?: string | null sto?: string | null + dataset?: string | null ip?: string | null id?: string node?: string | null @@ -8806,6 +8868,7 @@ export interface ClusterAllocationExplainClusterInfo { export interface ClusterAllocationExplainCurrentNode { id: Id name: Name + roles: NodeRoles attributes: Record transport_address: TransportAddress weight_ranking: integer @@ -8828,6 +8891,7 @@ export interface ClusterAllocationExplainNodeAllocationExplanation { node_decision: ClusterAllocationExplainDecision node_id: Id node_name: Name + roles: NodeRoles store?: ClusterAllocationExplainAllocationStore transport_address: TransportAddress weight_ranking: integer @@ -8962,6 +9026,7 @@ export interface ClusterHealthHealthResponseBody { task_max_waiting_in_queue?: Duration task_max_waiting_in_queue_millis: DurationValue timed_out: boolean + unassigned_primary_shards: integer unassigned_shards: integer } @@ -8975,6 +9040,7 @@ export interface ClusterHealthIndexHealthStats { shards?: Record status: HealthStatus unassigned_shards: integer + unassigned_primary_shards: integer } export interface ClusterHealthRequest extends RequestBase { @@ -9001,6 +9067,7 @@ export interface ClusterHealthShardHealthStats { relocating_shards: integer status: HealthStatus unassigned_shards: integer + unassigned_primary_shards: integer } export interface ClusterInfoRequest extends RequestBase { @@ -10068,8 +10135,11 @@ export interface EnrichStatsCacheStats { node_id: Id count: integer hits: integer + hits_time_in_millis: DurationValue misses: integer + misses_time_in_millis: DurationValue evictions: integer + size_in_bytes: long } export interface EnrichStatsCoordinatorStats { @@ -10192,8 +10262,10 @@ export type EsqlTableValuesLongDouble = double | double[] export type EsqlTableValuesLongValue = long | long[] +export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' + export interface EsqlQueryRequest extends RequestBase { - format?: string + format?: EsqlQueryEsqlFormat delimiter?: string drop_null_columns?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -11630,6 +11702,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase { expand_wildcards?: ExpandWildcards include_defaults?: boolean master_timeout?: Duration + verbose?: boolean } export interface IndicesGetDataStreamResponse { @@ -12033,6 +12106,8 @@ export type IndicesResolveClusterResponse = Record - patterns: string[] + patterns: GrokPattern[] trace_match?: boolean } @@ -12829,6 +12922,7 @@ export interface IngestPipeline { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber + deprecated?: boolean _meta?: Metadata } @@ -12866,6 +12960,7 @@ export interface IngestProcessorContainer { enrich?: IngestEnrichProcessor fail?: IngestFailProcessor foreach?: IngestForeachProcessor + geo_grid?: IngestGeoGridProcessor geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor gsub?: IngestGsubProcessor @@ -12876,6 +12971,7 @@ export interface IngestProcessorContainer { kv?: IngestKeyValueProcessor lowercase?: IngestLowercaseProcessor pipeline?: IngestPipelineProcessor + redact?: IngestRedactProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor reroute?: IngestRerouteProcessor @@ -12891,6 +12987,16 @@ export interface IngestProcessorContainer { user_agent?: IngestUserAgentProcessor } +export interface IngestRedactProcessor extends IngestProcessorBase { + field: Field + patterns: GrokPattern[] + pattern_definitions?: Record + prefix?: string + suffix?: string + ignore_missing?: boolean + skip_if_unlicensed?: boolean +} + export interface IngestRemoveProcessor extends IngestProcessorBase { field: Fields keep?: Fields @@ -12975,12 +13081,13 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase { export interface IngestUserAgentProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean - options?: IngestUserAgentProperty[] regex_file?: string target_field?: Field + properties?: IngestUserAgentProperty[] + extract_device_type?: boolean } -export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD' +export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { id: Ids @@ -13080,6 +13187,7 @@ export interface IngestPutPipelineRequest extends RequestBase { on_failure?: IngestProcessorContainer[] processors?: IngestProcessorContainer[] version?: VersionNumber + deprecated?: boolean } } @@ -13491,7 +13599,7 @@ export type MlCategorizationStatus = 'ok' | 'warn' export interface MlCategory { category_id: ulong examples: string[] - grok_pattern?: string + grok_pattern?: GrokPattern job_id: Id max_matching_length: ulong partition_field_name?: string @@ -16088,6 +16196,25 @@ export interface NodesHttp { current_open?: integer total_opened?: long clients?: NodesClient[] + routes: Record +} + +export interface NodesHttpRoute { + requests: NodesHttpRouteRequests + responses: NodesHttpRouteResponses +} + +export interface NodesHttpRouteRequests { + count: long + total_size_in_bytes: long + size_histogram: NodesSizeHttpHistogram[] +} + +export interface NodesHttpRouteResponses { + count: long + total_size_in_bytes: long + handling_time_histogram: NodesTimeHttpHistogram[] + size_histogram: NodesSizeHttpHistogram[] } export interface NodesIndexingPressure { @@ -16102,16 +16229,25 @@ export interface NodesIndexingPressureMemory { } export interface NodesIngest { - pipelines?: Record + pipelines?: Record total?: NodesIngestTotal } +export interface NodesIngestStats { + count: long + current: long + failed: long + processors: Record[] + time_in_millis: DurationValue + ingested_as_first_pipeline_in_bytes: long + produced_as_first_pipeline_in_bytes: long +} + export interface NodesIngestTotal { - count?: long - current?: long - failed?: long - processors?: Record[] - time_in_millis?: DurationValue + count: long + current: long + failed: long + time_in_millis: DurationValue } export interface NodesIoStatDevice { @@ -16316,6 +16452,12 @@ export interface NodesSerializedClusterStateDetail { compressed_size_in_bytes?: long } +export interface NodesSizeHttpHistogram { + count: long + ge_bytes?: long + lt_bytes?: long +} + export interface NodesStats { adaptive_selection?: Record breakers?: Record @@ -16350,6 +16492,12 @@ export interface NodesThreadCount { threads?: long } +export interface NodesTimeHttpHistogram { + count: long + ge_millis?: long + lt_millis?: long +} + export interface NodesTransport { inbound_handling_time_histogram?: NodesTransportHistogram[] outbound_handling_time_histogram?: NodesTransportHistogram[] @@ -19236,7 +19384,7 @@ export interface TextStructureFindStructureRequest { ecs_compatibility?: string explain?: boolean format?: string - grok_pattern?: string + grok_pattern?: GrokPattern has_header_row?: boolean line_merge_size_limit?: uint lines_to_sample?: uint @@ -19264,7 +19412,7 @@ export interface TextStructureFindStructureResponse { num_lines_analyzed: integer column_names?: string[] explanation?: string[] - grok_pattern?: string + grok_pattern?: GrokPattern multiline_start_pattern?: string exclude_lines_pattern?: string java_timestamp_formats?: string[] @@ -19294,7 +19442,7 @@ export interface TextStructureTestGrokPatternRequest extends RequestBase { ecs_compatibility?: string /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - grok_pattern: string + grok_pattern: GrokPattern text: string[] } } From aad41df2311bde04eb5099279cac06053c78ddf8 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 26 Sep 2024 13:29:27 -0500 Subject: [PATCH 389/647] Upgrade transport to 8.8.1 (#2366) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 31e6da8a4..4e7eb5459 100644 --- a/package.json +++ b/package.json @@ -87,7 +87,7 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.7.0", + "@elastic/transport": "^8.8.1", "tslib": "^2.4.0" }, "tap": { From 428a7b023dda4c877526a98a525d4a6a6ba05d11 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 30 Sep 2024 20:37:11 +0200 Subject: [PATCH 390/647] Auto-generated code for main (#2368) --- ...0393ca5a2942e1f00ed87546d0d50732.asciidoc} | 4 +- ...083b92e8ea264e49bf9fd40fc6a3094b.asciidoc} | 1 + .../43d9e314431336a6f084cea76dfd6489.asciidoc | 18 ++ ...565386eee0951865a684e41fab53b40c.asciidoc} | 1 + .../58f6b72009512851843c7b7a20e9504a.asciidoc | 19 +++ .../69541f0bb81ab3797926bb2a00607cda.asciidoc | 19 +++ .../6e6b78e6b689a5d6aa637271b6d084e2.asciidoc | 45 +++++ ...745864ef2427188241a4702b94ea57be.asciidoc} | 2 +- .../7b1b947bddd7e78f77da265f7e645a61.asciidoc | 51 ++++++ ...8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc} | 2 +- .../8477e77e4fad19af66f03f81b8f2592b.asciidoc | 20 +++ ...8cad5d95a0e7c103f08be53d0b172558.asciidoc} | 6 +- .../8d750dfc067b1184c32a2423c60e4d06.asciidoc | 17 ++ .../9169d19a80175ec94f80865d0f9bef4c.asciidoc | 36 ++++ .../971fd23adb81bb5842c7750e0379336a.asciidoc | 26 +++ .../9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc | 17 ++ .../a1dda7e7c01be96a4acf7b725d70385f.asciidoc | 28 ++++ .../a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc | 32 ++++ .../dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc | 68 ++++++++ ...dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc} | 19 +++ .../e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc | 28 ++++ .../e04267ffc50d916800b919c6cdc9622a.asciidoc | 13 ++ .../e4b38973c74037335378d8480f1ce894.asciidoc | 44 +++++ ...f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc} | 2 +- docs/reference.asciidoc | 45 +++-- src/api/api/ingest.ts | 6 +- src/api/api/search_application.ts | 14 +- src/api/api/security.ts | 2 +- src/api/api/snapshot.ts | 32 ++++ src/api/types.ts | 155 ++++++++++++++++-- src/api/typesWithBodyKey.ts | 155 ++++++++++++++++-- 31 files changed, 863 insertions(+), 64 deletions(-) rename docs/doc_examples/{1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc => 0393ca5a2942e1f00ed87546d0d50732.asciidoc} (92%) rename docs/doc_examples/{840f8c863c30b04abcf2dd66b846f157.asciidoc => 083b92e8ea264e49bf9fd40fc6a3094b.asciidoc} (96%) create mode 100644 docs/doc_examples/43d9e314431336a6f084cea76dfd6489.asciidoc rename docs/doc_examples/{1b60ad542abb511cbd926ac8c55b609c.asciidoc => 565386eee0951865a684e41fab53b40c.asciidoc} (95%) create mode 100644 docs/doc_examples/58f6b72009512851843c7b7a20e9504a.asciidoc create mode 100644 docs/doc_examples/69541f0bb81ab3797926bb2a00607cda.asciidoc create mode 100644 docs/doc_examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc rename docs/doc_examples/{a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc => 745864ef2427188241a4702b94ea57be.asciidoc} (95%) create mode 100644 docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc rename docs/doc_examples/{9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc => 8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc} (94%) create mode 100644 docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc rename docs/doc_examples/{bdb671866e2f0195f8dfbdb7f20bf591.asciidoc => 8cad5d95a0e7c103f08be53d0b172558.asciidoc} (73%) create mode 100644 docs/doc_examples/8d750dfc067b1184c32a2423c60e4d06.asciidoc create mode 100644 docs/doc_examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc create mode 100644 docs/doc_examples/971fd23adb81bb5842c7750e0379336a.asciidoc create mode 100644 docs/doc_examples/9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc create mode 100644 docs/doc_examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc create mode 100644 docs/doc_examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc create mode 100644 docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc rename docs/doc_examples/{7c8f207e43115ea8f20d2298be5aaebc.asciidoc => dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc} (62%) create mode 100644 docs/doc_examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc create mode 100644 docs/doc_examples/e04267ffc50d916800b919c6cdc9622a.asciidoc create mode 100644 docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc rename docs/doc_examples/{f8525c2460a577edfef156c13f55b8a7.asciidoc => f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc} (95%) diff --git a/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc b/docs/doc_examples/0393ca5a2942e1f00ed87546d0d50732.asciidoc similarity index 92% rename from docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc rename to docs/doc_examples/0393ca5a2942e1f00ed87546d0d50732.asciidoc index 84fdefbae..4709c48bb 100644 --- a/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc +++ b/docs/doc_examples/0393ca5a2942e1f00ed87546d0d50732.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.indices.create({ - index: "my-index-000002", + index: "my-index-000003", mappings: { properties: { metrics: { @@ -29,7 +29,7 @@ const response = await client.indices.create({ console.log(response); const response1 = await client.indices.getMapping({ - index: "my-index-000002", + index: "my-index-000003", }); console.log(response1); ---- diff --git a/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc b/docs/doc_examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc similarity index 96% rename from docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc rename to docs/doc_examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc index e16ea6560..16eda0fcc 100644 --- a/docs/doc_examples/840f8c863c30b04abcf2dd66b846f157.asciidoc +++ b/docs/doc_examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc @@ -14,6 +14,7 @@ const response = await client.inference.put({ min_number_of_allocations: 3, max_number_of_allocations: 10, }, + num_threads: 1, model_id: ".multilingual-e5-small", }, }, diff --git a/docs/doc_examples/43d9e314431336a6f084cea76dfd6489.asciidoc b/docs/doc_examples/43d9e314431336a6f084cea76dfd6489.asciidoc new file mode 100644 index 000000000..bccc8d81f --- /dev/null +++ b/docs/doc_examples/43d9e314431336a6f084cea76dfd6489.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "restaurants", + retriever: { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc b/docs/doc_examples/565386eee0951865a684e41fab53b40c.asciidoc similarity index 95% rename from docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc rename to docs/doc_examples/565386eee0951865a684e41fab53b40c.asciidoc index 160884d3b..149cff486 100644 --- a/docs/doc_examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc +++ b/docs/doc_examples/565386eee0951865a684e41fab53b40c.asciidoc @@ -14,6 +14,7 @@ const response = await client.inference.put({ min_number_of_allocations: 3, max_number_of_allocations: 10, }, + num_threads: 1, }, }, }); diff --git a/docs/doc_examples/58f6b72009512851843c7b7a20e9504a.asciidoc b/docs/doc_examples/58f6b72009512851843c7b7a20e9504a.asciidoc new file mode 100644 index 000000000..ab21c2d80 --- /dev/null +++ b/docs/doc_examples/58f6b72009512851843c7b7a20e9504a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + inference_field: { + type: "semantic_text", + inference_id: "my-elser-endpoint-for-ingest", + search_inference_id: "my-elser-endpoint-for-search", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69541f0bb81ab3797926bb2a00607cda.asciidoc b/docs/doc_examples/69541f0bb81ab3797926bb2a00607cda.asciidoc new file mode 100644 index 000000000..bf62637fc --- /dev/null +++ b/docs/doc_examples/69541f0bb81ab3797926bb2a00607cda.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-msmarco-minilm-model", + inference_config: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + model_id: "cross-encoder__ms-marco-minilm-l-6-v2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc b/docs/doc_examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc new file mode 100644 index 000000000..a541500cb --- /dev/null +++ b/docs/doc_examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + sparse_vector: { + field: "plot_embedding", + inference_id: "my-elser-model", + query: "films that explore psychological depths", + }, + }, + }, + }, + { + standard: { + query: { + multi_match: { + query: "crime", + fields: ["plot", "title"], + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc b/docs/doc_examples/745864ef2427188241a4702b94ea57be.asciidoc similarity index 95% rename from docs/doc_examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc rename to docs/doc_examples/745864ef2427188241a4702b94ea57be.asciidoc index 2f5f16ec0..a33976eea 100644 --- a/docs/doc_examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc +++ b/docs/doc_examples/745864ef2427188241a4702b94ea57be.asciidoc @@ -11,7 +11,7 @@ const response = await client.search({ filter: { range: { price: { - to: "500", + lte: "500", }, }, }, diff --git a/docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc b/docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc new file mode 100644 index 000000000..56a48d68d --- /dev/null +++ b/docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000004", + mappings: { + properties: { + metrics: { + subobjects: "auto", + properties: { + time: { + type: "object", + properties: { + min: { + type: "long", + }, + }, + }, + to: { + type: "object", + properties: { + inner_metrics: { + type: "object", + subobjects: "auto", + properties: { + time: { + type: "object", + properties: { + max: { + type: "long", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000004", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc b/docs/doc_examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc similarity index 94% rename from docs/doc_examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc rename to docs/doc_examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc index b6c3a9585..12610d1a5 100644 --- a/docs/doc_examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc +++ b/docs/doc_examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc @@ -17,7 +17,7 @@ const response = await client.search({ { range: { "result.execution_time": { - from: "now-10s", + gte: "now-10s", }, }, }, diff --git a/docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc b/docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc new file mode 100644 index 000000000..246988f49 --- /dev/null +++ b/docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + semantic: { + field: "inference_field", + query: "mountain lake", + inner_hits: { + from: 1, + size: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc b/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc similarity index 73% rename from docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc rename to docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc index f758ada37..b5190e9a8 100644 --- a/docs/doc_examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc +++ b/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc @@ -9,7 +9,11 @@ const response = await client.inference.put({ inference_config: { service: "elser", service_settings: { - num_allocations: 1, + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, num_threads: 1, }, }, diff --git a/docs/doc_examples/8d750dfc067b1184c32a2423c60e4d06.asciidoc b/docs/doc_examples/8d750dfc067b1184c32a2423c60e4d06.asciidoc new file mode 100644 index 000000000..1e0682cbf --- /dev/null +++ b/docs/doc_examples/8d750dfc067b1184c32a2423c60e4d06.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + semantic: { + field: "inference_field", + query: "mountain lake", + inner_hits: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc b/docs/doc_examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc new file mode 100644 index 000000000..39f61a37a --- /dev/null +++ b/docs/doc_examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "restaurants", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + multi_match: { + query: "Austria", + fields: ["city", "region"], + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, + ], + rank_constant: 1, + rank_window_size: 50, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/971fd23adb81bb5842c7750e0379336a.asciidoc b/docs/doc_examples/971fd23adb81bb5842c7750e0379336a.asciidoc new file mode 100644 index 000000000..e1b27bc79 --- /dev/null +++ b/docs/doc_examples/971fd23adb81bb5842c7750e0379336a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match: { + genre: "drama", + }, + }, + }, + }, + field: "plot", + inference_id: "my-msmarco-minilm-model", + inference_text: "films that explore psychological depths", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc b/docs/doc_examples/9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc new file mode 100644 index 000000000..c55dc60ae --- /dev/null +++ b/docs/doc_examples/9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index", + id: "lake_tahoe", + document: { + inference_field: [ + "Lake Tahoe is the largest alpine lake in North America", + "When hiking in the area, please be on alert for bears", + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc b/docs/doc_examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc new file mode 100644 index 000000000..fffbd6549 --- /dev/null +++ b/docs/doc_examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match_phrase: { + text: "landmark in Paris", + }, + }, + }, + }, + field: "text", + inference_id: "my-cohere-rerank-model", + inference_text: "Most famous landmark in Paris", + rank_window_size: 100, + min_score: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc b/docs/doc_examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc new file mode 100644 index 000000000..abe4e885c --- /dev/null +++ b/docs/doc_examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "restaurants", + retriever: { + standard: { + query: { + bool: { + should: [ + { + match: { + region: "Austria", + }, + }, + ], + filter: [ + { + term: { + year: "2019", + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc b/docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc new file mode 100644 index 000000000..db6a26798 --- /dev/null +++ b/docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc @@ -0,0 +1,68 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + metrics: { + type: "object", + subobjects: "auto", + properties: { + inner: { + type: "object", + enabled: false, + }, + nested: { + type: "nested", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000002", + id: "metric_1", + document: { + "metrics.time": 100, + "metrics.time.min": 10, + "metrics.time.max": 900, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000002", + id: "metric_2", + document: { + metrics: { + time: 100, + "time.min": 10, + "time.max": 900, + inner: { + foo: "bar", + "path.to.some.field": "baz", + }, + nested: [ + { + id: 10, + }, + { + id: 1, + }, + ], + }, + }, +}); +console.log(response2); + +const response3 = await client.indices.getMapping({ + index: "my-index-000002", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc b/docs/doc_examples/dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc similarity index 62% rename from docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc rename to docs/doc_examples/dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc index eb0ee1488..00f3a3a14 100644 --- a/docs/doc_examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc +++ b/docs/doc_examples/dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc @@ -33,6 +33,25 @@ const response = await client.simulate.ingest({ ], }, }, + component_template_substitutions: { + "my-component-template": { + template: { + mappings: { + dynamic: "true", + properties: { + field3: { + type: "keyword", + }, + }, + }, + settings: { + index: { + default_pipeline: "my-pipeline", + }, + }, + }, + }, + }, }, }); console.log(response); diff --git a/docs/doc_examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc b/docs/doc_examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc new file mode 100644 index 000000000..153d1a4ff --- /dev/null +++ b/docs/doc_examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.postCalendarEvents({ + calendar_id: "dst-germany", + events: [ + { + description: "Fall 2024", + start_time: 1729994400000, + end_time: 1730167200000, + skip_result: false, + skip_model_update: false, + force_time_shift: -3600, + }, + { + description: "Spring 2025", + start_time: 1743296400000, + end_time: 1743469200000, + skip_result: false, + skip_model_update: false, + force_time_shift: 3600, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e04267ffc50d916800b919c6cdc9622a.asciidoc b/docs/doc_examples/e04267ffc50d916800b919c6cdc9622a.asciidoc new file mode 100644 index 000000000..5893b52d1 --- /dev/null +++ b/docs/doc_examples/e04267ffc50d916800b919c6cdc9622a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.mapping.ignore_above": 256, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc new file mode 100644 index 000000000..92b9b7363 --- /dev/null +++ b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({ + body: { + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "foo", + }, + }, + { + _index: "my-index", + _id: "456", + _source: { + bar: "rab", + }, + }, + ], + component_template_substitutions: { + "my-mappings_template": { + template: { + mappings: { + dynamic: "strict", + properties: { + foo: { + type: "keyword", + }, + bar: { + type: "keyword", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8525c2460a577edfef156c13f55b8a7.asciidoc b/docs/doc_examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc similarity index 95% rename from docs/doc_examples/f8525c2460a577edfef156c13f55b8a7.asciidoc rename to docs/doc_examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc index 96afb8772..a673f8da2 100644 --- a/docs/doc_examples/f8525c2460a577edfef156c13f55b8a7.asciidoc +++ b/docs/doc_examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc @@ -11,7 +11,7 @@ const response = await client.search({ filter: { range: { price: { - to: "500", + lte: "500", }, }, }, diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index ccd0a029c..c9fef6ef1 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -941,7 +941,7 @@ client.search({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. +** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. ** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field. ** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. @@ -1099,7 +1099,7 @@ client.searchMvt({ index, field, zoom, x, y }) ** *`zoom` (number)*: Zoom level for the vector tile to search ** *`x` (number)*: X coordinate for the vector tile to search ** *`y` (number)*: Y coordinate for the vector tile to search -** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. +** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg @@ -1478,7 +1478,7 @@ client.asyncSearch.submit({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`aggregations` (Optional, Record)* +** *`aggregations` (Optional, Record)* ** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* ** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. @@ -3669,7 +3669,7 @@ client.fleet.search({ index }) * *Request (object):* ** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. -** *`aggregations` (Optional, Record)* +** *`aggregations` (Optional, Record)* ** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* ** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. @@ -5056,7 +5056,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -5674,6 +5674,8 @@ client.inference.put({ inference_id }) [discrete] ==== delete_geoip_database Deletes a geoip database configuration. + +{ref}/delete-geoip-database-api.html[Endpoint documentation] [source,ts] ---- client.ingest.deleteGeoipDatabase({ id }) @@ -5723,6 +5725,8 @@ client.ingest.geoIpStats() [discrete] ==== get_geoip_database Returns information about one or more geoip database configurations. + +{ref}/get-geoip-database-api.html[Endpoint documentation] [source,ts] ---- client.ingest.getGeoipDatabase({ ... }) @@ -5776,6 +5780,8 @@ client.ingest.processorGrok() [discrete] ==== put_geoip_database Returns information about one or more geoip database configurations. + +{ref}/put-geoip-database-api.html[Endpoint documentation] [source,ts] ---- client.ingest.putGeoipDatabase({ id, name, maxmind }) @@ -7154,7 +7160,7 @@ client.ml.postCalendarEvents({ calendar_id, events }) * *Request (object):* ** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`events` ({ calendar_id, event_id, description, end_time, start_time }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. +** *`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. [discrete] ==== post_data @@ -7366,7 +7372,7 @@ client.ml.putDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. ** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. @@ -7902,7 +7908,7 @@ client.ml.updateDatafeed({ datafeed_id }) ** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. ** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of @@ -8539,7 +8545,7 @@ client.rollup.rollupSearch({ index }) * *Request (object):* ** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. -** *`aggregations` (Optional, Record)*: Specifies aggregations. +** *`aggregations` (Optional, Record)*: Specifies aggregations. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response @@ -8585,7 +8591,8 @@ If set to `false`, the API returns immediately and the indexer is stopped asynch === search_application [discrete] ==== delete -Deletes a search application. +Delete a search application. +Remove a search application and its associated alias. Indices attached to the search application are not removed. {ref}/delete-search-application.html[Endpoint documentation] [source,ts] @@ -8602,6 +8609,7 @@ client.searchApplication.delete({ name }) [discrete] ==== delete_behavioral_analytics Delete a behavioral analytics collection. +The associated data stream is also deleted. {ref}/delete-analytics-collection.html[Endpoint documentation] [source,ts] @@ -8617,7 +8625,7 @@ client.searchApplication.deleteBehavioralAnalytics({ name }) [discrete] ==== get -Returns the details about a search application +Get search application details. {ref}/get-search-application.html[Endpoint documentation] [source,ts] @@ -8633,7 +8641,7 @@ client.searchApplication.get({ name }) [discrete] ==== get_behavioral_analytics -Returns the existing behavioral analytics collections. +Get behavioral analytics collections. {ref}/list-analytics-collection.html[Endpoint documentation] [source,ts] @@ -8678,7 +8686,7 @@ client.searchApplication.postBehavioralAnalyticsEvent() [discrete] ==== put -Creates or updates a search application. +Create or update a search application. {ref}/put-search-application.html[Endpoint documentation] [source,ts] @@ -8696,7 +8704,7 @@ client.searchApplication.put({ name }) [discrete] ==== put_behavioral_analytics -Creates a behavioral analytics collection. +Create a behavioral analytics collection. {ref}/put-analytics-collection.html[Endpoint documentation] [source,ts] @@ -8723,7 +8731,9 @@ client.searchApplication.renderQuery() [discrete] ==== search -Perform a search against a search application. +Run a search application search. +Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +Unspecified template parameters are assigned their default values if applicable. {ref}/search-application-search.html[Endpoint documentation] [source,ts] @@ -9662,6 +9672,7 @@ client.security.putRole({ name }) ** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. +** *`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])*: A list of remote indices permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. ** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. ** *`description` (Optional, string)*: Optional description of the role descriptor @@ -10603,7 +10614,7 @@ precedence over mapped fields with the same name. ** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search. ** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. ** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false. -** *`format` (Optional, string)*: Format for the response. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: Format for the response. [discrete] ==== translate @@ -11500,7 +11511,7 @@ client.xpack.info({ ... }) ==== Arguments * *Request (object):* -** *`categories` (Optional, string[])*: A list of the information categories to include in the response. For example, `build,license,features`. +** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. For example, `build,license,features`. ** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true ** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 31c3fc3dd..442dcbcfe 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -46,7 +46,7 @@ export default class Ingest { /** * Deletes a geoip database configuration. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-geoip-database-api.html | Elasticsearch API documentation} */ async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -140,7 +140,7 @@ export default class Ingest { /** * Returns information about one or more geoip database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-geoip-database-api.html | Elasticsearch API documentation} */ async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -250,7 +250,7 @@ export default class Ingest { /** * Returns information about one or more geoip database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-geoip-database-api.html | Elasticsearch API documentation} */ async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index 52c8e9546..cd22d99c8 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -45,7 +45,7 @@ export default class SearchApplication { } /** - * Deletes a search application. + * Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-search-application.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class SearchApplication { } /** - * Delete a behavioral analytics collection. + * Delete a behavioral analytics collection. The associated data stream is also deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html | Elasticsearch API documentation} */ async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class SearchApplication { } /** - * Returns the details about a search application + * Get search application details. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html | Elasticsearch API documentation} */ async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -141,7 +141,7 @@ export default class SearchApplication { } /** - * Returns the existing behavioral analytics collections. + * Get behavioral analytics collections. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html | Elasticsearch API documentation} */ async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -244,7 +244,7 @@ export default class SearchApplication { } /** - * Creates or updates a search application. + * Create or update a search application. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html | Elasticsearch API documentation} */ async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -281,7 +281,7 @@ export default class SearchApplication { } /** - * Creates a behavioral analytics collection. + * Create a behavioral analytics collection. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html | Elasticsearch API documentation} */ async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -345,7 +345,7 @@ export default class SearchApplication { } /** - * Perform a search against a search application. + * Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html | Elasticsearch API documentation} */ async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index b5e25f38a..a3a602545 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1685,7 +1685,7 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'description', 'transient_metadata'] + const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'metadata', 'run_as', 'description', 'transient_metadata'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index cd0f53dcf..9e8419ebf 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -374,6 +374,38 @@ export default class Snapshot { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Verifies the integrity of the contents of a snapshot repository + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + */ + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_verify_integrity` + const meta: TransportRequestMetadata = { + name: 'snapshot.repository_verify_integrity', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Restores a snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} diff --git a/src/api/types.ts b/src/api/types.ts index b1eb67bd2..3b0eb9061 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1283,6 +1283,10 @@ export interface SearchAggregationProfileDebug { segments_counted?: integer segments_collected?: integer map_reducer?: string + brute_force_used?: integer + dynamic_pruning_attempted?: integer + dynamic_pruning_used?: integer + skipped_due_to_no_data?: integer } export interface SearchAggregationProfileDelegateDebugFilter { @@ -1335,6 +1339,39 @@ export interface SearchCompletionSuggester extends SearchSuggesterBase { export type SearchContext = string | GeoLocation +export interface SearchDfsKnnProfile { + vector_operations_count?: long + query: SearchKnnQueryProfileResult[] + rewrite_time: long + collector: SearchKnnCollectorResult[] +} + +export interface SearchDfsProfile { + statistics?: SearchDfsStatisticsProfile + knn?: SearchDfsKnnProfile[] +} + +export interface SearchDfsStatisticsBreakdown { + collection_statistics: long + collection_statistics_count: long + create_weight: long + create_weight_count: long + rewrite: long + rewrite_count: long + term_statistics: long + term_statistics_count: long +} + +export interface SearchDfsStatisticsProfile { + type: string + description: string + time?: Duration + time_in_nanos: DurationValue + breakdown: SearchDfsStatisticsBreakdown + debug?: Record + children?: SearchDfsStatisticsProfile[] +} + export interface SearchDirectGenerator { field: Field max_edits?: integer @@ -1442,10 +1479,10 @@ export interface SearchHit { fields?: Record highlight?: Record inner_hits?: Record - matched_queries?: string[] | Record + matched_queries?: string[] | Record _nested?: SearchNestedIdentity _ignored?: string[] - ignored_field_values?: Record + ignored_field_values?: Record _shard?: string _node?: string _routing?: string @@ -1486,6 +1523,47 @@ export interface SearchInnerHitsResult { hits: SearchHitsMetadata } +export interface SearchKnnCollectorResult { + name: string + reason: string + time?: Duration + time_in_nanos: DurationValue + children?: SearchKnnCollectorResult[] +} + +export interface SearchKnnQueryProfileBreakdown { + advance: long + advance_count: long + build_scorer: long + build_scorer_count: long + compute_max_score: long + compute_max_score_count: long + count_weight: long + count_weight_count: long + create_weight: long + create_weight_count: long + match: long + match_count: long + next_doc: long + next_doc_count: long + score: long + score_count: long + set_min_competitive_score: long + set_min_competitive_score_count: long + shallow_advance: long + shallow_advance_count: long +} + +export interface SearchKnnQueryProfileResult { + type: string + description: string + time?: Duration + time_in_nanos: DurationValue + breakdown: SearchKnnQueryProfileBreakdown + debug?: Record + children?: SearchKnnQueryProfileResult[] +} + export interface SearchLaplaceSmoothingModel { alpha: double } @@ -1576,6 +1654,8 @@ export interface SearchQueryBreakdown { score_count: long compute_max_score: long compute_max_score_count: long + count_weight: long + count_weight_count: long set_min_competitive_score: long set_min_competitive_score_count: long } @@ -1616,9 +1696,14 @@ export interface SearchSearchProfile { export interface SearchShardProfile { aggregations: SearchAggregationProfile[] + cluster: string + dfs?: SearchDfsProfile + fetch?: SearchFetchProfile id: string + index: IndexName + node_id: NodeId searches: SearchSearchProfile[] - fetch?: SearchFetchProfile + shard_id: long } export interface SearchSmoothingModelContainer { @@ -2936,6 +3021,7 @@ export interface AggregationsAggregationContainer { rare_terms?: AggregationsRareTermsAggregation rate?: AggregationsRateAggregation reverse_nested?: AggregationsReverseNestedAggregation + random_sampler?: AggregationsRandomSamplerAggregation sampler?: AggregationsSamplerAggregation scripted_metric?: AggregationsScriptedMetricAggregation serial_diff?: AggregationsSerialDifferencingAggregation @@ -3823,6 +3909,12 @@ export interface AggregationsPipelineAggregationBase extends AggregationsBucketP gap_policy?: AggregationsGapPolicy } +export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { + probability: double + seed?: integer + shard_seed?: integer +} + export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { } @@ -4644,14 +4736,14 @@ export interface AnalysisKeywordAnalyzer { export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_marker' ignore_case?: boolean - keywords?: string[] + keywords?: string | string[] keywords_path?: string keywords_pattern?: string } export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { type: 'keyword' - buffer_size: integer + buffer_size?: integer } export interface AnalysisKuromojiAnalyzer { @@ -5210,7 +5302,6 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase { export interface MappingCorePropertyBase extends MappingPropertyBase { copy_to?: Fields - similarity?: string store?: boolean } @@ -5291,7 +5382,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { index?: boolean index_options?: MappingIndexOptions index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes + index_prefixes?: MappingTextIndexPrefixes | null norms?: boolean position_increment_gap?: integer search_analyzer?: string @@ -5451,6 +5542,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { normalizer?: string norms?: boolean null_value?: string + similarity?: string | null split_queries_on_whitespace?: boolean time_series_dimension?: boolean type: 'keyword' @@ -5579,6 +5671,7 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase norms?: boolean search_analyzer?: string search_quote_analyzer?: string + similarity?: string | null term_vector?: MappingTermVectorOption type: 'search_as_you_type' } @@ -5644,11 +5737,12 @@ export interface MappingTextProperty extends MappingCorePropertyBase { index?: boolean index_options?: MappingIndexOptions index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes + index_prefixes?: MappingTextIndexPrefixes | null norms?: boolean position_increment_gap?: integer search_analyzer?: string search_quote_analyzer?: string + similarity?: string | null term_vector?: MappingTermVectorOption type: 'text' } @@ -6442,9 +6536,10 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup export interface QueryDslTermsSetQuery extends QueryDslQueryBase { + minimum_should_match?: MinimumShouldMatch minimum_should_match_field?: Field minimum_should_match_script?: Script | string - terms: string[] + terms: FieldValue[] } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { @@ -13343,6 +13438,9 @@ export interface MlCalendarEvent { description: string end_time: DateTime start_time: DateTime + skip_result?: boolean + skip_model_update?: boolean + force_time_shift?: integer } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition @@ -13747,9 +13845,7 @@ export interface MlDelayedDataCheckConfig { export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' -export type MlDeploymentAssignmentState = 'starting' | 'started' | 'stopping' | 'failed' - -export type MlDeploymentState = 'started' | 'starting' | 'stopping' +export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' export interface MlDetectionRule { actions?: MlRuleAction[] @@ -14339,7 +14435,7 @@ export interface MlTrainedModelDeploymentStats { rejected_execution_count: integer reason: string start_time: EpochTime - state: MlDeploymentState + state: MlDeploymentAssignmentState threads_per_allocation: integer timeout_count: integer } @@ -17131,6 +17227,15 @@ export interface SecurityRealmInfo { type: string } +export interface SecurityRemoteIndicesPrivileges { + clusters: Names + field_security?: SecurityFieldSecurity + names: Indices + privileges: SecurityIndexPrivilege[] + query?: SecurityIndicesPrivilegesQuery + allow_restricted_indices?: boolean +} + export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] @@ -17783,6 +17888,7 @@ export interface SecurityPutRoleRequest extends RequestBase { cluster?: SecurityClusterPrivilege[] global?: Record indices?: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] metadata?: Metadata run_as?: string[] description?: string @@ -18408,6 +18514,7 @@ export interface SnapshotSnapshotShardFailure { node_id?: Id reason: string shard_id: Id + index_uuid: Id status: string } @@ -18558,6 +18665,20 @@ export interface SnapshotGetRepositoryRequest extends RequestBase { export type SnapshotGetRepositoryResponse = Record +export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { + name: Names + meta_thread_pool_concurrency?: integer + blob_thread_pool_concurrency?: integer + snapshot_verification_concurrency?: integer + index_verification_concurrency?: integer + index_snapshot_verification_concurrency?: integer + max_failed_shard_snapshots?: integer + verify_blob_contents?: boolean + max_bytes_per_sec?: string +} + +export type SnapshotRepositoryVerifyIntegrityResponse = any + export interface SnapshotRestoreRequest extends RequestBase { repository: Name snapshot: Name @@ -18662,7 +18783,7 @@ export interface SqlGetAsyncStatusResponse { } export interface SqlQueryRequest extends RequestBase { - format?: string + format?: SqlQuerySqlFormat catalog?: string columnar?: boolean cursor?: string @@ -18690,6 +18811,8 @@ export interface SqlQueryResponse { rows: SqlRow[] } +export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' + export interface SqlTranslateRequest extends RequestBase { fetch_size?: integer filter?: QueryDslQueryContainer @@ -19977,7 +20100,7 @@ export interface XpackInfoNativeCodeInformation { } export interface XpackInfoRequest extends RequestBase { - categories?: string[] + categories?: XpackInfoXPackCategory[] accept_enterprise?: boolean human?: boolean } @@ -19989,6 +20112,8 @@ export interface XpackInfoResponse { tagline: string } +export type XpackInfoXPackCategory = 'build' | 'features' | 'license' + export interface XpackUsageAnalytics extends XpackUsageBase { stats: XpackUsageAnalyticsStatistics } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 3d846a344..32b8d047e 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -1341,6 +1341,10 @@ export interface SearchAggregationProfileDebug { segments_counted?: integer segments_collected?: integer map_reducer?: string + brute_force_used?: integer + dynamic_pruning_attempted?: integer + dynamic_pruning_used?: integer + skipped_due_to_no_data?: integer } export interface SearchAggregationProfileDelegateDebugFilter { @@ -1393,6 +1397,39 @@ export interface SearchCompletionSuggester extends SearchSuggesterBase { export type SearchContext = string | GeoLocation +export interface SearchDfsKnnProfile { + vector_operations_count?: long + query: SearchKnnQueryProfileResult[] + rewrite_time: long + collector: SearchKnnCollectorResult[] +} + +export interface SearchDfsProfile { + statistics?: SearchDfsStatisticsProfile + knn?: SearchDfsKnnProfile[] +} + +export interface SearchDfsStatisticsBreakdown { + collection_statistics: long + collection_statistics_count: long + create_weight: long + create_weight_count: long + rewrite: long + rewrite_count: long + term_statistics: long + term_statistics_count: long +} + +export interface SearchDfsStatisticsProfile { + type: string + description: string + time?: Duration + time_in_nanos: DurationValue + breakdown: SearchDfsStatisticsBreakdown + debug?: Record + children?: SearchDfsStatisticsProfile[] +} + export interface SearchDirectGenerator { field: Field max_edits?: integer @@ -1500,10 +1537,10 @@ export interface SearchHit { fields?: Record highlight?: Record inner_hits?: Record - matched_queries?: string[] | Record + matched_queries?: string[] | Record _nested?: SearchNestedIdentity _ignored?: string[] - ignored_field_values?: Record + ignored_field_values?: Record _shard?: string _node?: string _routing?: string @@ -1544,6 +1581,47 @@ export interface SearchInnerHitsResult { hits: SearchHitsMetadata } +export interface SearchKnnCollectorResult { + name: string + reason: string + time?: Duration + time_in_nanos: DurationValue + children?: SearchKnnCollectorResult[] +} + +export interface SearchKnnQueryProfileBreakdown { + advance: long + advance_count: long + build_scorer: long + build_scorer_count: long + compute_max_score: long + compute_max_score_count: long + count_weight: long + count_weight_count: long + create_weight: long + create_weight_count: long + match: long + match_count: long + next_doc: long + next_doc_count: long + score: long + score_count: long + set_min_competitive_score: long + set_min_competitive_score_count: long + shallow_advance: long + shallow_advance_count: long +} + +export interface SearchKnnQueryProfileResult { + type: string + description: string + time?: Duration + time_in_nanos: DurationValue + breakdown: SearchKnnQueryProfileBreakdown + debug?: Record + children?: SearchKnnQueryProfileResult[] +} + export interface SearchLaplaceSmoothingModel { alpha: double } @@ -1634,6 +1712,8 @@ export interface SearchQueryBreakdown { score_count: long compute_max_score: long compute_max_score_count: long + count_weight: long + count_weight_count: long set_min_competitive_score: long set_min_competitive_score_count: long } @@ -1674,9 +1754,14 @@ export interface SearchSearchProfile { export interface SearchShardProfile { aggregations: SearchAggregationProfile[] + cluster: string + dfs?: SearchDfsProfile + fetch?: SearchFetchProfile id: string + index: IndexName + node_id: NodeId searches: SearchSearchProfile[] - fetch?: SearchFetchProfile + shard_id: long } export interface SearchSmoothingModelContainer { @@ -3012,6 +3097,7 @@ export interface AggregationsAggregationContainer { rare_terms?: AggregationsRareTermsAggregation rate?: AggregationsRateAggregation reverse_nested?: AggregationsReverseNestedAggregation + random_sampler?: AggregationsRandomSamplerAggregation sampler?: AggregationsSamplerAggregation scripted_metric?: AggregationsScriptedMetricAggregation serial_diff?: AggregationsSerialDifferencingAggregation @@ -3899,6 +3985,12 @@ export interface AggregationsPipelineAggregationBase extends AggregationsBucketP gap_policy?: AggregationsGapPolicy } +export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { + probability: double + seed?: integer + shard_seed?: integer +} + export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { } @@ -4720,14 +4812,14 @@ export interface AnalysisKeywordAnalyzer { export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_marker' ignore_case?: boolean - keywords?: string[] + keywords?: string | string[] keywords_path?: string keywords_pattern?: string } export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { type: 'keyword' - buffer_size: integer + buffer_size?: integer } export interface AnalysisKuromojiAnalyzer { @@ -5286,7 +5378,6 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase { export interface MappingCorePropertyBase extends MappingPropertyBase { copy_to?: Fields - similarity?: string store?: boolean } @@ -5367,7 +5458,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { index?: boolean index_options?: MappingIndexOptions index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes + index_prefixes?: MappingTextIndexPrefixes | null norms?: boolean position_increment_gap?: integer search_analyzer?: string @@ -5527,6 +5618,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { normalizer?: string norms?: boolean null_value?: string + similarity?: string | null split_queries_on_whitespace?: boolean time_series_dimension?: boolean type: 'keyword' @@ -5655,6 +5747,7 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase norms?: boolean search_analyzer?: string search_quote_analyzer?: string + similarity?: string | null term_vector?: MappingTermVectorOption type: 'search_as_you_type' } @@ -5720,11 +5813,12 @@ export interface MappingTextProperty extends MappingCorePropertyBase { index?: boolean index_options?: MappingIndexOptions index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes + index_prefixes?: MappingTextIndexPrefixes | null norms?: boolean position_increment_gap?: integer search_analyzer?: string search_quote_analyzer?: string + similarity?: string | null term_vector?: MappingTermVectorOption type: 'text' } @@ -6518,9 +6612,10 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup export interface QueryDslTermsSetQuery extends QueryDslQueryBase { + minimum_should_match?: MinimumShouldMatch minimum_should_match_field?: Field minimum_should_match_script?: Script | string - terms: string[] + terms: FieldValue[] } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { @@ -13584,6 +13679,9 @@ export interface MlCalendarEvent { description: string end_time: DateTime start_time: DateTime + skip_result?: boolean + skip_model_update?: boolean + force_time_shift?: integer } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition @@ -13988,9 +14086,7 @@ export interface MlDelayedDataCheckConfig { export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' -export type MlDeploymentAssignmentState = 'starting' | 'started' | 'stopping' | 'failed' - -export type MlDeploymentState = 'started' | 'starting' | 'stopping' +export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' export interface MlDetectionRule { actions?: MlRuleAction[] @@ -14580,7 +14676,7 @@ export interface MlTrainedModelDeploymentStats { rejected_execution_count: integer reason: string start_time: EpochTime - state: MlDeploymentState + state: MlDeploymentAssignmentState threads_per_allocation: integer timeout_count: integer } @@ -17508,6 +17604,15 @@ export interface SecurityRealmInfo { type: string } +export interface SecurityRemoteIndicesPrivileges { + clusters: Names + field_security?: SecurityFieldSecurity + names: Indices + privileges: SecurityIndexPrivilege[] + query?: SecurityIndicesPrivilegesQuery + allow_restricted_indices?: boolean +} + export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] @@ -18196,6 +18301,7 @@ export interface SecurityPutRoleRequest extends RequestBase { cluster?: SecurityClusterPrivilege[] global?: Record indices?: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] metadata?: Metadata run_as?: string[] description?: string @@ -18868,6 +18974,7 @@ export interface SnapshotSnapshotShardFailure { node_id?: Id reason: string shard_id: Id + index_uuid: Id status: string } @@ -19025,6 +19132,20 @@ export interface SnapshotGetRepositoryRequest extends RequestBase { export type SnapshotGetRepositoryResponse = Record +export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { + name: Names + meta_thread_pool_concurrency?: integer + blob_thread_pool_concurrency?: integer + snapshot_verification_concurrency?: integer + index_verification_concurrency?: integer + index_snapshot_verification_concurrency?: integer + max_failed_shard_snapshots?: integer + verify_blob_contents?: boolean + max_bytes_per_sec?: string +} + +export type SnapshotRepositoryVerifyIntegrityResponse = any + export interface SnapshotRestoreRequest extends RequestBase { repository: Name snapshot: Name @@ -19135,7 +19256,7 @@ export interface SqlGetAsyncStatusResponse { } export interface SqlQueryRequest extends RequestBase { - format?: string + format?: SqlQuerySqlFormat /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { catalog?: string @@ -19166,6 +19287,8 @@ export interface SqlQueryResponse { rows: SqlRow[] } +export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' + export interface SqlTranslateRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { @@ -20484,7 +20607,7 @@ export interface XpackInfoNativeCodeInformation { } export interface XpackInfoRequest extends RequestBase { - categories?: string[] + categories?: XpackInfoXPackCategory[] accept_enterprise?: boolean human?: boolean } @@ -20496,6 +20619,8 @@ export interface XpackInfoResponse { tagline: string } +export type XpackInfoXPackCategory = 'build' | 'features' | 'license' + export interface XpackUsageAnalytics extends XpackUsageBase { stats: XpackUsageAnalyticsStatistics } From c274b1b32fff823886df36299b7e9530333bd81e Mon Sep 17 00:00:00 2001 From: Miguel Grinberg Date: Mon, 14 Oct 2024 17:13:55 +0100 Subject: [PATCH 391/647] Upgraded @types/node package to v18 (#2374) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 4e7eb5459..63bd50cea 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "^4.1.7", "@types/ms": "^0.7.31", - "@types/node": "^17.0.31", + "@types/node": "^18.11.9", "@types/sinonjs__fake-timers": "^8.1.2", "@types/split2": "^3.2.1", "@types/stoppable": "^1.1.1", From 810e009202a6bdcea6a88348a982f6f8ade54c92 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 14 Oct 2024 11:27:22 -0500 Subject: [PATCH 392/647] Update Github actions to reflect security best practices (#2375) * Update Github actions to reflect security best practices * Upgrade @types/node --- .github/workflows/auto-merge.yml | 18 ------------------ .github/workflows/nodejs.yml | 18 ++++++++---------- .github/workflows/npm-publish.yml | 1 + .github/workflows/serverless-patch.yml | 10 ++++++---- package.json | 2 +- 5 files changed, 16 insertions(+), 33 deletions(-) delete mode 100644 .github/workflows/auto-merge.yml diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml deleted file mode 100644 index f283bd655..000000000 --- a/.github/workflows/auto-merge.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Automerge - -on: - pull_request_review: - types: - - submitted - -jobs: - automerge: - runs-on: ubuntu-latest - if: github.event.review.state == 'approved' - steps: - - uses: reitermarkus/automerge@v2 - with: - token: ${{ secrets.GH_TOKEN }} - merge-method: squash - pull-request-author-associations: OWNER - review-author-associations: OWNER,CONTRIBUTOR diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 7dbec6fb0..dd93e3bdd 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -12,6 +12,8 @@ jobs: src-only: "${{ steps.changes.outputs.src-only }}" steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - uses: dorny/paths-filter/@v3.0.2 id: changes with: @@ -35,6 +37,8 @@ jobs: steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v4 @@ -59,6 +63,8 @@ jobs: steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - name: Use Node.js uses: actions/setup-node@v4 @@ -87,6 +93,8 @@ jobs: steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - name: Use Bun uses: oven-sh/setup-bun@v2 @@ -102,13 +110,3 @@ jobs: - name: Unit test run: | bun run test:unit-bun - - auto-approve: - name: Auto-approve - needs: [test, license] - runs-on: ubuntu-latest - permissions: - pull-requests: write - if: github.actor == 'elasticmachine' - steps: - - uses: hmarr/auto-approve-action@v4 diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 0b6d77877..956b688ec 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -14,6 +14,7 @@ jobs: steps: - uses: actions/checkout@v4 with: + persist-credentials: false ref: ${{ github.event.inputs.branch }} - uses: actions/setup-node@v4 with: diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 9cf9926a4..f3ae92f9e 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -28,12 +28,14 @@ jobs: steps: - uses: actions/checkout@v4 with: + persist-credentials: false repository: elastic/elasticsearch-js ref: main path: stack fetch-depth: 0 - uses: actions/checkout@v4 with: + persist-credentials: false repository: elastic/elasticsearch-serverless-js ref: main path: serverless @@ -44,8 +46,8 @@ jobs: with: token: ${{ secrets.GH_TOKEN }} path: serverless - title: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' - commit-message: 'Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}' + title: "Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}" + commit-message: "Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}" body-path: /tmp/pr_body - draft: '${{ steps.apply-patch.outputs.PR_DRAFT }}' - add-paths: ':!*.rej' + draft: "${{ steps.apply-patch.outputs.PR_DRAFT }}" + add-paths: ":!*.rej" diff --git a/package.json b/package.json index 63bd50cea..2b4b5820a 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "^4.1.7", "@types/ms": "^0.7.31", - "@types/node": "^18.11.9", + "@types/node": "^18.19.55", "@types/sinonjs__fake-timers": "^8.1.2", "@types/split2": "^3.2.1", "@types/stoppable": "^1.1.1", From 3430734fe051c9d020fdd8d941efde456867c877 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 14 Oct 2024 19:14:09 +0200 Subject: [PATCH 393/647] Auto-generated code for main (#2371) Co-authored-by: Josh Mock --- .../11be807bdeaeecc8174dec88e0851ea7.asciidoc | 15 ++++ .../12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc | 18 +++++ .../13fd7a99c5cf53279409ecc679084f87.asciidoc | 25 +++++++ .../1522a9297151d7046e6345b9b27539ca.asciidoc | 19 ++++++ .../16a7ce08b4a6b3af269f27eecc71d664.asciidoc | 15 ++++ .../17316a81c9dbdd120b7754116bf0461c.asciidoc | 26 +++++++ .../193704020a19714dec390452a4e75e8d.asciidoc | 10 +++ ...1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc} | 4 +- .../1c9dac4183a3532c91dbd1a46907729b.asciidoc | 10 +++ .../1fb2c77c0988bc6545040b20e3afa7e9.asciidoc | 33 +++++++++ .../2c86840a46242a38cf82024a9321be46.asciidoc | 28 ++++++++ .../2d0244c020075595acb625aa5ba8f455.asciidoc | 25 +++++++ .../4da0cb8693e9ceceee2ba3b558014bbf.asciidoc | 38 +++++++++++ .../529671ffaf7cc75fe83a81d729788be4.asciidoc | 31 +++++++++ .../54a47b5d07e7bfbea75c77f35eaae18d.asciidoc | 15 ++++ .../6329fb2840a4373ff6d342f2653247cb.asciidoc | 10 +++ .../63a53fcb0717ae9033a679cbfc932851.asciidoc | 20 ++++++ .../640dbeecb736bd25f6f2b392b76a7531.asciidoc | 10 +++ .../6f8bdca97e43aac75e32de655aa4314a.asciidoc | 10 +++ ...7163346755400594d1dd7e445aa19ff0.asciidoc} | 9 +-- .../7a27336a61284d079f3cc3994cf927d1.asciidoc | 50 ++++++++++++++ .../7af1f62b0cf496cbf593d83d30b472cc.asciidoc | 22 ++++++ .../7b1b947bddd7e78f77da265f7e645a61.asciidoc | 51 -------------- .../8477e77e4fad19af66f03f81b8f2592b.asciidoc | 20 ------ .../998c8479c8704bca0e121d5969859517.asciidoc | 10 +++ .../9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc | 35 ++++++++++ .../b8400dbe39215705060500f0e569f452.asciidoc | 10 +++ ...c26b185952ddf9842e18493aca2de147.asciidoc} | 0 .../c4654a4ca2f4600606dcc5bf37186c0b.asciidoc | 14 ++++ .../d1ea13e1e8372cbf1480a414723ff55a.asciidoc | 26 +++++++ .../dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc | 68 ------------------- ...dd3ee00ab2af607b32532180d60a41d4.asciidoc} | 2 +- .../eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc | 14 ++++ ...ef643bab44e7de6ddddde23a2eece5c7.asciidoc} | 12 ++-- .../f321d4e92aa83d573ecf52bf56b0b774.asciidoc | 15 ++++ .../f625fdbbe78c4198d9e40b35f3f008b3.asciidoc | 14 ++++ .../f679e414de48b8fe25e458844be05618.asciidoc | 13 ++++ .../f6f647eb644a2d236637ff05f833cb73.asciidoc | 12 ++++ .../fb56c2ac77d4c308d7702b6b33698382.asciidoc | 12 ++++ docs/reference.asciidoc | 12 ++-- src/api/types.ts | 34 ++++++---- src/api/typesWithBodyKey.ts | 34 ++++++---- 42 files changed, 661 insertions(+), 190 deletions(-) create mode 100644 docs/doc_examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc create mode 100644 docs/doc_examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc create mode 100644 docs/doc_examples/13fd7a99c5cf53279409ecc679084f87.asciidoc create mode 100644 docs/doc_examples/1522a9297151d7046e6345b9b27539ca.asciidoc create mode 100644 docs/doc_examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc create mode 100644 docs/doc_examples/17316a81c9dbdd120b7754116bf0461c.asciidoc create mode 100644 docs/doc_examples/193704020a19714dec390452a4e75e8d.asciidoc rename docs/doc_examples/{0393ca5a2942e1f00ed87546d0d50732.asciidoc => 1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc} (92%) create mode 100644 docs/doc_examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc create mode 100644 docs/doc_examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc create mode 100644 docs/doc_examples/2c86840a46242a38cf82024a9321be46.asciidoc create mode 100644 docs/doc_examples/2d0244c020075595acb625aa5ba8f455.asciidoc create mode 100644 docs/doc_examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc create mode 100644 docs/doc_examples/529671ffaf7cc75fe83a81d729788be4.asciidoc create mode 100644 docs/doc_examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc create mode 100644 docs/doc_examples/6329fb2840a4373ff6d342f2653247cb.asciidoc create mode 100644 docs/doc_examples/63a53fcb0717ae9033a679cbfc932851.asciidoc create mode 100644 docs/doc_examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc create mode 100644 docs/doc_examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc rename docs/doc_examples/{8d750dfc067b1184c32a2423c60e4d06.asciidoc => 7163346755400594d1dd7e445aa19ff0.asciidoc} (59%) create mode 100644 docs/doc_examples/7a27336a61284d079f3cc3994cf927d1.asciidoc create mode 100644 docs/doc_examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc delete mode 100644 docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc delete mode 100644 docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc create mode 100644 docs/doc_examples/998c8479c8704bca0e121d5969859517.asciidoc create mode 100644 docs/doc_examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc create mode 100644 docs/doc_examples/b8400dbe39215705060500f0e569f452.asciidoc rename docs/doc_examples/{8575c966b004fb124c7afd6bb5827b50.asciidoc => c26b185952ddf9842e18493aca2de147.asciidoc} (100%) create mode 100644 docs/doc_examples/c4654a4ca2f4600606dcc5bf37186c0b.asciidoc create mode 100644 docs/doc_examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc delete mode 100644 docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc rename docs/doc_examples/{3ff634a50e2e4556bad7ea8553576992.asciidoc => dd3ee00ab2af607b32532180d60a41d4.asciidoc} (94%) create mode 100644 docs/doc_examples/eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc rename docs/doc_examples/{9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc => ef643bab44e7de6ddddde23a2eece5c7.asciidoc} (53%) create mode 100644 docs/doc_examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc create mode 100644 docs/doc_examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc create mode 100644 docs/doc_examples/f679e414de48b8fe25e458844be05618.asciidoc create mode 100644 docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc create mode 100644 docs/doc_examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc diff --git a/docs/doc_examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc b/docs/doc_examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc new file mode 100644 index 000000000..9e732ae71 --- /dev/null +++ b/docs/doc_examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + connector_id: "my-connector-id", + size: "1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc b/docs/doc_examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc new file mode 100644 index 000000000..ba95e73e9 --- /dev/null +++ b/docs/doc_examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector_id", + doc: { + features: { + native_connector_api_keys: { + enabled: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13fd7a99c5cf53279409ecc679084f87.asciidoc b/docs/doc_examples/13fd7a99c5cf53279409ecc679084f87.asciidoc new file mode 100644 index 000000000..5ce2b993b --- /dev/null +++ b/docs/doc_examples/13fd7a99c5cf53279409ecc679084f87.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx_keep", + mappings: { + _source: { + mode: "synthetic", + }, + properties: { + path: { + type: "object", + synthetic_source_keep: "all", + }, + ids: { + type: "integer", + synthetic_source_keep: "arrays", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1522a9297151d7046e6345b9b27539ca.asciidoc b/docs/doc_examples/1522a9297151d7046e6345b9b27539ca.asciidoc new file mode 100644 index 000000000..2dab45f96 --- /dev/null +++ b/docs/doc_examples/1522a9297151d7046e6345b9b27539ca.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateConfiguration({ + connector_id: "my-connector-id", + values: { + host: "127.0.0.1", + port: 5432, + username: "myuser", + password: "mypassword", + database: "chinook", + schema: "public", + tables: "album,artist", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc b/docs/doc_examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc new file mode 100644 index 000000000..72589cd8f --- /dev/null +++ b/docs/doc_examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "books", +}); +console.log(response); + +const response1 = await client.indices.delete({ + index: "my-explicit-mappings-books", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/17316a81c9dbdd120b7754116bf0461c.asciidoc b/docs/doc_examples/17316a81c9dbdd120b7754116bf0461c.asciidoc new file mode 100644 index 000000000..13415ca5b --- /dev/null +++ b/docs/doc_examples/17316a81c9dbdd120b7754116bf0461c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-connector-api-key", + role_descriptors: { + "my-connector-connector-role": { + cluster: ["monitor", "manage_connector"], + indices: [ + { + names: [ + "my-index_name", + ".search-acl-filter-my-index_name", + ".elastic-connectors*", + ], + privileges: ["all"], + allow_restricted_indices: false, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/193704020a19714dec390452a4e75e8d.asciidoc b/docs/doc_examples/193704020a19714dec390452a4e75e8d.asciidoc new file mode 100644 index 000000000..1fcacfbd1 --- /dev/null +++ b/docs/doc_examples/193704020a19714dec390452a4e75e8d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "books", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0393ca5a2942e1f00ed87546d0d50732.asciidoc b/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc similarity index 92% rename from docs/doc_examples/0393ca5a2942e1f00ed87546d0d50732.asciidoc rename to docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc index 4709c48bb..84fdefbae 100644 --- a/docs/doc_examples/0393ca5a2942e1f00ed87546d0d50732.asciidoc +++ b/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.indices.create({ - index: "my-index-000003", + index: "my-index-000002", mappings: { properties: { metrics: { @@ -29,7 +29,7 @@ const response = await client.indices.create({ console.log(response); const response1 = await client.indices.getMapping({ - index: "my-index-000003", + index: "my-index-000002", }); console.log(response1); ---- diff --git a/docs/doc_examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc b/docs/doc_examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc new file mode 100644 index 000000000..b563e314b --- /dev/null +++ b/docs/doc_examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "music", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc b/docs/doc_examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc new file mode 100644 index 000000000..aced9c9d6 --- /dev/null +++ b/docs/doc_examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "john-api-key", + expiration: "1d", + role_descriptors: { + "sharepoint-online-role": { + index: [ + { + names: ["sharepoint-search-application"], + privileges: ["read"], + query: { + template: { + params: { + access_control: ["john@example.co", "Engineering Members"], + }, + source: + '\n {\n "bool": {\n "should": [\n {\n "bool": {\n "must_not": {\n "exists": {\n "field": "_allow_access_control"\n }\n }\n }\n },\n {\n "terms": {\n "_allow_access_control.enum": {{#toJson}}access_control{{/toJson}}\n }\n }\n ]\n }\n }\n ', + }, + }, + }, + ], + restriction: { + workflows: ["search_application_query"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c86840a46242a38cf82024a9321be46.asciidoc b/docs/doc_examples/2c86840a46242a38cf82024a9321be46.asciidoc new file mode 100644 index 000000000..3bda37c4b --- /dev/null +++ b/docs/doc_examples/2c86840a46242a38cf82024a9321be46.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-explicit-mappings-books", + mappings: { + dynamic: false, + properties: { + name: { + type: "text", + }, + author: { + type: "text", + }, + release_date: { + type: "date", + format: "yyyy-MM-dd", + }, + page_count: { + type: "integer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d0244c020075595acb625aa5ba8f455.asciidoc b/docs/doc_examples/2d0244c020075595acb625aa5ba8f455.asciidoc new file mode 100644 index 000000000..e7a2117c4 --- /dev/null +++ b/docs/doc_examples/2d0244c020075595acb625aa5ba8f455.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "idx_keep", + id: 1, + document: { + path: { + to: [ + { + foo: [3, 2, 1], + }, + { + foo: [30, 20, 10], + }, + ], + bar: "baz", + }, + ids: [200, 100, 300, 100], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc b/docs/doc_examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc new file mode 100644 index 000000000..effb6d63d --- /dev/null +++ b/docs/doc_examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "INDEX_NAME", + conflicts: "proceed", + query: { + bool: { + filter: [ + { + match: { + object_type: "drive_item", + }, + }, + { + exists: { + field: "file", + }, + }, + { + range: { + lastModifiedDateTime: { + lte: "now-180d", + }, + }, + }, + ], + }, + }, + script: { + source: "ctx._source.body = ''", + lang: "painless", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/529671ffaf7cc75fe83a81d729788be4.asciidoc b/docs/doc_examples/529671ffaf7cc75fe83a81d729788be4.asciidoc new file mode 100644 index 000000000..c493ead5a --- /dev/null +++ b/docs/doc_examples/529671ffaf7cc75fe83a81d729788be4.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector_id", + doc: { + configuration: { + field_a: { + type: "str", + value: "", + }, + field_b: { + type: "bool", + value: false, + }, + field_c: { + type: "int", + value: 1, + }, + field_d: { + type: "list", + value: "a,b", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc b/docs/doc_examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc new file mode 100644 index 000000000..3cd177602 --- /dev/null +++ b/docs/doc_examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: ".elastic-connectors-sync-jobs-v1", + properties: { + job_type: { + type: "keyword", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6329fb2840a4373ff6d342f2653247cb.asciidoc b/docs/doc_examples/6329fb2840a4373ff6d342f2653247cb.asciidoc new file mode 100644 index 000000000..cbb113a2e --- /dev/null +++ b/docs/doc_examples/6329fb2840a4373ff6d342f2653247cb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "books", +}); +console.log(response); +---- diff --git a/docs/doc_examples/63a53fcb0717ae9033a679cbfc932851.asciidoc b/docs/doc_examples/63a53fcb0717ae9033a679cbfc932851.asciidoc new file mode 100644 index 000000000..742fed240 --- /dev/null +++ b/docs/doc_examples/63a53fcb0717ae9033a679cbfc932851.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "alibabacloud_ai_search_completion", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + api_key: "{{API_KEY}}", + service_id: "ops-qwen-turbo", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc b/docs/doc_examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc new file mode 100644 index 000000000..54d88b41e --- /dev/null +++ b/docs/doc_examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + include_remotes: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc b/docs/doc_examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc new file mode 100644 index 000000000..b1295fe7d --- /dev/null +++ b/docs/doc_examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.delete({ + connector_id: "my-connector-id&delete_sync_jobs=true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d750dfc067b1184c32a2423c60e4d06.asciidoc b/docs/doc_examples/7163346755400594d1dd7e445aa19ff0.asciidoc similarity index 59% rename from docs/doc_examples/8d750dfc067b1184c32a2423c60e4d06.asciidoc rename to docs/doc_examples/7163346755400594d1dd7e445aa19ff0.asciidoc index 1e0682cbf..cbb6b62da 100644 --- a/docs/doc_examples/8d750dfc067b1184c32a2423c60e4d06.asciidoc +++ b/docs/doc_examples/7163346755400594d1dd7e445aa19ff0.asciidoc @@ -4,14 +4,7 @@ [source, js] ---- const response = await client.search({ - index: "my-index", - query: { - semantic: { - field: "inference_field", - query: "mountain lake", - inner_hits: {}, - }, - }, + index: "music", }); console.log(response); ---- diff --git a/docs/doc_examples/7a27336a61284d079f3cc3994cf927d1.asciidoc b/docs/doc_examples/7a27336a61284d079f3cc3994cf927d1.asciidoc new file mode 100644 index 000000000..a289078cb --- /dev/null +++ b/docs/doc_examples/7a27336a61284d079f3cc3994cf927d1.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + role_descriptors: { + "role-source1": { + indices: [ + { + names: ["source1"], + privileges: ["read"], + query: { + template: { + params: { + access_control: [ + "example.user@example.com", + "source1-user-group", + ], + }, + }, + source: "...", + }, + }, + ], + }, + "role-source2": { + indices: [ + { + names: ["source2"], + privileges: ["read"], + query: { + template: { + params: { + access_control: [ + "example.user@example.com", + "source2-user-group", + ], + }, + }, + source: "...", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc b/docs/doc_examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc new file mode 100644 index 000000000..838cdbbbd --- /dev/null +++ b/docs/doc_examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "music-connector", + role_descriptors: { + "music-connector-role": { + cluster: ["monitor", "manage_connector"], + indices: [ + { + names: ["music", ".search-acl-filter-music", ".elastic-connectors*"], + privileges: ["all"], + allow_restricted_indices: false, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc b/docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc deleted file mode 100644 index 56a48d68d..000000000 --- a/docs/doc_examples/7b1b947bddd7e78f77da265f7e645a61.asciidoc +++ /dev/null @@ -1,51 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: "my-index-000004", - mappings: { - properties: { - metrics: { - subobjects: "auto", - properties: { - time: { - type: "object", - properties: { - min: { - type: "long", - }, - }, - }, - to: { - type: "object", - properties: { - inner_metrics: { - type: "object", - subobjects: "auto", - properties: { - time: { - type: "object", - properties: { - max: { - type: "long", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, -}); -console.log(response); - -const response1 = await client.indices.getMapping({ - index: "my-index-000004", -}); -console.log(response1); ----- diff --git a/docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc b/docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc deleted file mode 100644 index 246988f49..000000000 --- a/docs/doc_examples/8477e77e4fad19af66f03f81b8f2592b.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: "my-index", - query: { - semantic: { - field: "inference_field", - query: "mountain lake", - inner_hits: { - from: 1, - size: 1, - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/998c8479c8704bca0e121d5969859517.asciidoc b/docs/doc_examples/998c8479c8704bca0e121d5969859517.asciidoc new file mode 100644 index 000000000..130ceb562 --- /dev/null +++ b/docs/doc_examples/998c8479c8704bca0e121d5969859517.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + index: "music", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc b/docs/doc_examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc new file mode 100644 index 000000000..2a2b518d4 --- /dev/null +++ b/docs/doc_examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector_id", + doc: { + configuration: { + tables: { + type: "list", + value: "*", + }, + ssl_enabled: { + type: "bool", + value: false, + }, + ssl_ca: { + type: "str", + value: "", + }, + fetch_size: { + type: "int", + value: 50, + }, + retry_count: { + type: "int", + value: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8400dbe39215705060500f0e569f452.asciidoc b/docs/doc_examples/b8400dbe39215705060500f0e569f452.asciidoc new file mode 100644 index 000000000..efd531967 --- /dev/null +++ b/docs/doc_examples/b8400dbe39215705060500f0e569f452.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.get({ + connector_id: "my-connector-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc b/docs/doc_examples/c26b185952ddf9842e18493aca2de147.asciidoc similarity index 100% rename from docs/doc_examples/8575c966b004fb124c7afd6bb5827b50.asciidoc rename to docs/doc_examples/c26b185952ddf9842e18493aca2de147.asciidoc diff --git a/docs/doc_examples/c4654a4ca2f4600606dcc5bf37186c0b.asciidoc b/docs/doc_examples/c4654a4ca2f4600606dcc5bf37186c0b.asciidoc new file mode 100644 index 000000000..674854196 --- /dev/null +++ b/docs/doc_examples/c4654a4ca2f4600606dcc5bf37186c0b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQuery({ + format: "json", + body: { + query: + "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc b/docs/doc_examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc new file mode 100644 index 000000000..8f098647f --- /dev/null +++ b/docs/doc_examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "connector_name-connector-api-key", + role_descriptors: { + "connector_name-connector-role": { + cluster: ["monitor", "manage_connector"], + indices: [ + { + names: [ + "index_name", + ".search-acl-filter-index_name", + ".elastic-connectors*", + ], + privileges: ["all"], + allow_restricted_indices: false, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc b/docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc deleted file mode 100644 index db6a26798..000000000 --- a/docs/doc_examples/dbb8fa2b8af6db66cf75ca4b83c0fb21.asciidoc +++ /dev/null @@ -1,68 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: "my-index-000002", - mappings: { - properties: { - metrics: { - type: "object", - subobjects: "auto", - properties: { - inner: { - type: "object", - enabled: false, - }, - nested: { - type: "nested", - }, - }, - }, - }, - }, -}); -console.log(response); - -const response1 = await client.index({ - index: "my-index-000002", - id: "metric_1", - document: { - "metrics.time": 100, - "metrics.time.min": 10, - "metrics.time.max": 900, - }, -}); -console.log(response1); - -const response2 = await client.index({ - index: "my-index-000002", - id: "metric_2", - document: { - metrics: { - time: 100, - "time.min": 10, - "time.max": 900, - inner: { - foo: "bar", - "path.to.some.field": "baz", - }, - nested: [ - { - id: 10, - }, - { - id: 1, - }, - ], - }, - }, -}); -console.log(response2); - -const response3 = await client.indices.getMapping({ - index: "my-index-000002", -}); -console.log(response3); ----- diff --git a/docs/doc_examples/3ff634a50e2e4556bad7ea8553576992.asciidoc b/docs/doc_examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc similarity index 94% rename from docs/doc_examples/3ff634a50e2e4556bad7ea8553576992.asciidoc rename to docs/doc_examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc index 48e3ffcda..ad777029c 100644 --- a/docs/doc_examples/3ff634a50e2e4556bad7ea8553576992.asciidoc +++ b/docs/doc_examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc @@ -16,7 +16,7 @@ const response = await client.indices.create({ filter: { my_snow: { type: "snowball", - language: "Lovins", + language: "English", }, }, }, diff --git a/docs/doc_examples/eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc b/docs/doc_examples/eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc new file mode 100644 index 000000000..68ca702c1 --- /dev/null +++ b/docs/doc_examples/eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQuery({ + format: "json", + body: { + query: + "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc b/docs/doc_examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc similarity index 53% rename from docs/doc_examples/9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc rename to docs/doc_examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc index c55dc60ae..3f692157d 100644 --- a/docs/doc_examples/9dfe3b02bd15409b4b8b36e9756e8f94.asciidoc +++ b/docs/doc_examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc @@ -4,13 +4,13 @@ [source, js] ---- const response = await client.index({ - index: "my-index", - id: "lake_tahoe", + index: "books", document: { - inference_field: [ - "Lake Tahoe is the largest alpine lake in North America", - "When hiking in the area, please be on alert for bears", - ], + name: "The Great Gatsby", + author: "F. Scott Fitzgerald", + release_date: "1925-04-10", + page_count: 180, + language: "EN", }, }); console.log(response); diff --git a/docs/doc_examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc b/docs/doc_examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc new file mode 100644 index 000000000..8e0dd2359 --- /dev/null +++ b/docs/doc_examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_connector/_sync_job", + body: { + id: "my-connector-id", + job_type: "full", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc b/docs/doc_examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc new file mode 100644 index 000000000..c506a9e44 --- /dev/null +++ b/docs/doc_examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector-id", + doc: { + custom_scheduling: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f679e414de48b8fe25e458844be05618.asciidoc b/docs/doc_examples/f679e414de48b8fe25e458844be05618.asciidoc new file mode 100644 index 000000000..1c0fa96c5 --- /dev/null +++ b/docs/doc_examples/f679e414de48b8fe25e458844be05618.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-connector-id", + name: "Music catalog", + index_name: "music", + service_type: "postgresql", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc new file mode 100644 index 000000000..81783cf66 --- /dev/null +++ b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.secretPost({ + body: { + value: "encoded_api_key", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc b/docs/doc_examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc new file mode 100644 index 000000000..1405a070d --- /dev/null +++ b/docs/doc_examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateApiKeyId({ + connector_id: "my_connector_id>", + api_key_id: "API key_id", + api_key_secret_id: "secret_id", +}); +console.log(response); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index c9fef6ef1..8331d23c6 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -2810,7 +2810,7 @@ client.cluster.stats({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: List of node filters used to limit returned information. Defaults to all nodes in the cluster. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`include_remotes` (Optional, boolean)*: Include remote cluster data into the response ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. @@ -3882,7 +3882,7 @@ Manually moves an index into the specified step and executes that step. {ref}/ilm-move-to-step.html[Endpoint documentation] [source,ts] ---- -client.ilm.moveToStep({ index }) +client.ilm.moveToStep({ index, current_step, next_step }) ---- [discrete] @@ -3890,8 +3890,8 @@ client.ilm.moveToStep({ index }) * *Request (object):* ** *`index` (string)*: The name of the index whose lifecycle step is to change -** *`current_step` (Optional, { action, name, phase })* -** *`next_step` (Optional, { action, name, phase })* +** *`current_step` ({ action, name, phase })* +** *`next_step` ({ action, name, phase })* [discrete] ==== put_lifecycle @@ -5003,7 +5003,7 @@ client.indices.putIndexTemplate({ name }) Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. ** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. -** *`data_stream` (Optional, { hidden })*: If this object is included, the template is used to create data streams and their backing indices. +** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. ** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. @@ -5428,7 +5428,7 @@ If set to `false`, then indices or data streams matching the template must alway Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. ** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. -** *`data_stream` (Optional, { hidden })*: If this object is included, the template is used to create data streams and their backing indices. +** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. ** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. diff --git a/src/api/types.ts b/src/api/types.ts index 3b0eb9061..8497cb915 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2761,7 +2761,7 @@ export interface ShardStatistics { } export interface ShardsOperationResponseBase { - _shards: ShardStatistics + _shards?: ShardStatistics } export interface SlicedScroll { @@ -4519,8 +4519,8 @@ export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { type: 'edge_ngram' custom_token_chars?: string - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer token_chars?: AnalysisTokenChar[] } @@ -4862,8 +4862,8 @@ export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { type: 'ngram' custom_token_chars?: string - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer token_chars?: AnalysisTokenChar[] } @@ -9474,7 +9474,7 @@ export interface ClusterStatsOperatingSystemMemoryInfo { export interface ClusterStatsRequest extends RequestBase { node_id?: NodeIds - flat_settings?: boolean + include_remotes?: boolean timeout?: Duration } @@ -10610,15 +10610,15 @@ export interface IlmMigrateToDataTiersResponse { export interface IlmMoveToStepRequest extends RequestBase { index: IndexName - current_step?: IlmMoveToStepStepKey - next_step?: IlmMoveToStepStepKey + current_step: IlmMoveToStepStepKey + next_step: IlmMoveToStepStepKey } export type IlmMoveToStepResponse = AcknowledgedResponseBase export interface IlmMoveToStepStepKey { - action: string - name: string + action?: string + name?: string phase: string } @@ -10744,6 +10744,7 @@ export interface IndicesDataStreamTimestampField { export interface IndicesDataStreamVisibility { hidden?: boolean + allow_custom_routing?: boolean } export interface IndicesDownsampleConfig { @@ -12862,6 +12863,7 @@ export interface IngestRedactProcessor extends IngestProcessorBase { suffix?: string ignore_missing?: boolean skip_if_unlicensed?: boolean + trace_redact?: boolean } export interface IngestRemoveProcessor extends IngestProcessorBase { @@ -13073,6 +13075,7 @@ export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationK & { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } export interface IngestSimulateIngest { + _redact?: IngestSimulateRedact timestamp: DateTime pipeline?: Name } @@ -13087,6 +13090,10 @@ export interface IngestSimulatePipelineSimulation { error?: ErrorCause } +export interface IngestSimulateRedact { + _is_redacted: boolean +} + export interface IngestSimulateRequest extends RequestBase { id?: Id verbose?: boolean @@ -20059,13 +20066,12 @@ export interface XpackInfoFeatures { aggregate_metric: XpackInfoFeature analytics: XpackInfoFeature ccr: XpackInfoFeature - data_frame?: XpackInfoFeature - data_science?: XpackInfoFeature data_streams: XpackInfoFeature data_tiers: XpackInfoFeature enrich: XpackInfoFeature + enterprise_search: XpackInfoFeature eql: XpackInfoFeature - flattened?: XpackInfoFeature + esql: XpackInfoFeature frozen_indices: XpackInfoFeature graph: XpackInfoFeature ilm: XpackInfoFeature @@ -20080,7 +20086,7 @@ export interface XpackInfoFeatures { spatial: XpackInfoFeature sql: XpackInfoFeature transform: XpackInfoFeature - vectors?: XpackInfoFeature + universal_profiling: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature archive: XpackInfoFeature diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 32b8d047e..7b8f18d85 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2837,7 +2837,7 @@ export interface ShardStatistics { } export interface ShardsOperationResponseBase { - _shards: ShardStatistics + _shards?: ShardStatistics } export interface SlicedScroll { @@ -4595,8 +4595,8 @@ export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { type: 'edge_ngram' custom_token_chars?: string - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer token_chars?: AnalysisTokenChar[] } @@ -4938,8 +4938,8 @@ export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { type: 'ngram' custom_token_chars?: string - max_gram: integer - min_gram: integer + max_gram?: integer + min_gram?: integer token_chars?: AnalysisTokenChar[] } @@ -9578,7 +9578,7 @@ export interface ClusterStatsOperatingSystemMemoryInfo { export interface ClusterStatsRequest extends RequestBase { node_id?: NodeIds - flat_settings?: boolean + include_remotes?: boolean timeout?: Duration } @@ -10783,16 +10783,16 @@ export interface IlmMoveToStepRequest extends RequestBase { index: IndexName /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - current_step?: IlmMoveToStepStepKey - next_step?: IlmMoveToStepStepKey + current_step: IlmMoveToStepStepKey + next_step: IlmMoveToStepStepKey } } export type IlmMoveToStepResponse = AcknowledgedResponseBase export interface IlmMoveToStepStepKey { - action: string - name: string + action?: string + name?: string phase: string } @@ -10921,6 +10921,7 @@ export interface IndicesDataStreamTimestampField { export interface IndicesDataStreamVisibility { hidden?: boolean + allow_custom_routing?: boolean } export interface IndicesDownsampleConfig { @@ -13090,6 +13091,7 @@ export interface IngestRedactProcessor extends IngestProcessorBase { suffix?: string ignore_missing?: boolean skip_if_unlicensed?: boolean + trace_redact?: boolean } export interface IngestRemoveProcessor extends IngestProcessorBase { @@ -13307,6 +13309,7 @@ export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationK & { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } export interface IngestSimulateIngest { + _redact?: IngestSimulateRedact timestamp: DateTime pipeline?: Name } @@ -13321,6 +13324,10 @@ export interface IngestSimulatePipelineSimulation { error?: ErrorCause } +export interface IngestSimulateRedact { + _is_redacted: boolean +} + export interface IngestSimulateRequest extends RequestBase { id?: Id verbose?: boolean @@ -20566,13 +20573,12 @@ export interface XpackInfoFeatures { aggregate_metric: XpackInfoFeature analytics: XpackInfoFeature ccr: XpackInfoFeature - data_frame?: XpackInfoFeature - data_science?: XpackInfoFeature data_streams: XpackInfoFeature data_tiers: XpackInfoFeature enrich: XpackInfoFeature + enterprise_search: XpackInfoFeature eql: XpackInfoFeature - flattened?: XpackInfoFeature + esql: XpackInfoFeature frozen_indices: XpackInfoFeature graph: XpackInfoFeature ilm: XpackInfoFeature @@ -20587,7 +20593,7 @@ export interface XpackInfoFeatures { spatial: XpackInfoFeature sql: XpackInfoFeature transform: XpackInfoFeature - vectors?: XpackInfoFeature + universal_profiling: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature archive: XpackInfoFeature From 661caf842202312a478a4239b0a0bc6bc17019a0 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 15 Oct 2024 11:36:54 -0500 Subject: [PATCH 394/647] Update changelog for 8.15.1 (#2379) --- docs/changelog.asciidoc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 508bb8b9d..b36bb13ee 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,17 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.15.1 + +[discrete] +==== Features + +[discrete] +===== Improved support for Elasticsearch `v8.15` + +Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. + [discrete] === 8.15.0 From 82acfc33a9710609760f28f199f0781b5d65acc2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 16 Oct 2024 14:19:44 -0500 Subject: [PATCH 395/647] Respect disablePrototypePoisoningProtection option (#2380) --- docs/basic-config.asciidoc | 4 ++-- docs/changelog.asciidoc | 20 +++++++++++++++++ src/client.ts | 16 ++++++++++++- test/unit/client.test.ts | 46 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 83 insertions(+), 3 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index a71269961..799866f93 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -252,8 +252,8 @@ const client = new Client({ ---- |`disablePrototypePoisoningProtection` -|`boolean`, `'proto'`, `'constructor'` - By the default the client will protect you against prototype poisoning attacks. Read https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08[this article] to learn more. If needed you can disable prototype poisoning protection entirely or one of the two checks. Read the `secure-json-parse` https://github.com/fastify/secure-json-parse[documentation] to learn more. + -_Default:_ `false` +|`boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. Read https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08[this article] to learn more about this security concern. If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. Read the `secure-json-parse` https://github.com/fastify/secure-json-parse[documentation] to learn more. + +_Default:_ `true` |`caFingerprint` |`string` - If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. + diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index b36bb13ee..734916a27 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,26 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.16.0 + +[discrete] +==== Features + +[discrete] +===== Support for Elasticsearch `v8.16` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.16/release-notes-8.16.0.html[here]. + +[discrete] +==== Fixes + +[discrete] +===== Pass prototype poisoning options to serializer correctly + +The client's `disablePrototypePoisoningProtection` option was set to `true` by default, but when it was set to any other value it was ignored, making it impossible to enable prototype poisoning protection without providing a custom serializer implementation. + [discrete] === 8.15.1 diff --git a/src/client.ts b/src/client.ts index 068f3573b..df0fa4c1a 100644 --- a/src/client.ts +++ b/src/client.ts @@ -228,7 +228,21 @@ export default class Client extends API { this.diagnostic = opts[kChild].diagnostic } else { this.diagnostic = new Diagnostic() - this.serializer = new options.Serializer() + + let serializerOptions + if (opts.disablePrototypePoisoningProtection != null) { + if (typeof opts.disablePrototypePoisoningProtection === 'boolean') { + serializerOptions = { + enablePrototypePoisoningProtection: !opts.disablePrototypePoisoningProtection + } + } else { + serializerOptions = { + enablePrototypePoisoningProtection: opts.disablePrototypePoisoningProtection + } + } + } + this.serializer = new options.Serializer(serializerOptions) + this.connectionPool = new options.ConnectionPool({ pingTimeout: options.pingTimeout, resurrectStrategy: options.resurrectStrategy, diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index b896946ad..ed56d76f5 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -482,3 +482,49 @@ test('Ensure new client does not time out at default (30s) when client sets requ t.end() } }) + +test('Pass disablePrototypePoisoningProtection option to serializer', async t => { + let client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: false + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'error', + constructorAction: 'error' + }) + + client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: true + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'ignore', + constructorAction: 'ignore' + }) + + client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: 'proto' + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'error', + constructorAction: 'ignore' + }) + + client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: 'constructor' + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'ignore', + constructorAction: 'error' + }) +}) + +test('disablePrototypePoisoningProtection is true by default', async t => { + const client = new Client({ node: 'http://localhost:9200/' }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'ignore', + constructorAction: 'ignore' + }) +}) From e9fdcb064791c8592bf0a5bec7365a02d19f5940 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 18 Oct 2024 11:13:41 -0500 Subject: [PATCH 396/647] Add doc about timeout best practices (#2381) --- docs/configuration.asciidoc | 3 ++- docs/index.asciidoc | 1 + docs/timeout-best-practices.asciidoc | 10 ++++++++++ 3 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 docs/timeout-best-practices.asciidoc diff --git a/docs/configuration.asciidoc b/docs/configuration.asciidoc index e5c4f32f8..402c5e9a3 100644 --- a/docs/configuration.asciidoc +++ b/docs/configuration.asciidoc @@ -2,10 +2,11 @@ == Configuration -The client is designed to be easily configured for your needs. In the following +The client is designed to be easily configured for your needs. In the following section, you can see the possible options that you can use to configure it. * <> * <> +* <> * <> * <> diff --git a/docs/index.asciidoc b/docs/index.asciidoc index eda790be4..51206f0b0 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -21,3 +21,4 @@ include::reference.asciidoc[] include::examples/index.asciidoc[] include::helpers.asciidoc[] include::redirects.asciidoc[] +include::timeout-best-practices.asciidoc[] diff --git a/docs/timeout-best-practices.asciidoc b/docs/timeout-best-practices.asciidoc new file mode 100644 index 000000000..0d2fb4772 --- /dev/null +++ b/docs/timeout-best-practices.asciidoc @@ -0,0 +1,10 @@ +[[timeout-best-practices]] +=== Timeout best practices + +This client is configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {es}, raising a `TimeoutError` when that time period has elapsed without receiving a response. However, {es} will always eventually respond to any request, even if it takes several minutes. The {ref}/modules-network.html#_http_client_configuration[official {es} recommendation] is to disable response timeouts entirely by default. + +Since changing this default would be a breaking change, we won't do that until the next major release. In the meantime, here is our recommendation for properly configuring your client: + +* Ensure keep-alive is enabled; this is the default, so no settings need to be changed, unless you have set `agent` to `false` or provided an alternate `agent` that disables keep-alive +* If using the default `UndiciConnection`, disable request timeouts by setting `timeout` to `0` +* If using the legacy `HttpConnection`, set `timeout` to a very large number (e.g. `86400000`, or one day) From c3247d0c663d29b492c9d78d8a107a4a7d483366 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 22 Oct 2024 15:00:18 -0500 Subject: [PATCH 397/647] Basic helper for ES|QL's Apache Arrow output format (#2391) --- package.json | 3 +- src/helpers.ts | 27 ++++++++++++--- test/unit/helpers/esql.test.ts | 62 ++++++++++++++++++++++++++++++++++ tsconfig.json | 5 +-- 4 files changed, 89 insertions(+), 8 deletions(-) diff --git a/package.json b/package.json index 2b4b5820a..87b7fb9bc 100644 --- a/package.json +++ b/package.json @@ -87,7 +87,8 @@ "zx": "^7.2.2" }, "dependencies": { - "@elastic/transport": "^8.8.1", + "@elastic/transport": "^8.9.0", + "@apache-arrow/esnext-cjs": "^17.0.0", "tslib": "^2.4.0" }, "tap": { diff --git a/src/helpers.ts b/src/helpers.ts index 62040083a..a54ee0964 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -25,6 +25,7 @@ import assert from 'node:assert' import * as timersPromises from 'node:timers/promises' import { Readable } from 'node:stream' import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' +import { Table, TypeMap, tableFromIPC } from '@apache-arrow/esnext-cjs' import Client from './client' import * as T from './api/types' @@ -155,6 +156,7 @@ export interface EsqlResponse { export interface EsqlHelper { toRecords: () => Promise> + toArrow: () => Promise> } export interface EsqlToRecords { @@ -965,11 +967,6 @@ export default class Helpers { * @returns {object} EsqlHelper instance */ esql (params: T.EsqlQueryRequest, reqOptions: TransportRequestOptions = {}): EsqlHelper { - if (this[kMetaHeader] !== null) { - reqOptions.headers = reqOptions.headers ?? {} - reqOptions.headers['x-elastic-client-meta'] = `${this[kMetaHeader] as string},h=qo` - } - const client = this[kClient] function toRecords (response: EsqlResponse): TDocument[] { @@ -985,17 +982,37 @@ export default class Helpers { }) } + const metaHeader = this[kMetaHeader] + const helper: EsqlHelper = { /** * Pivots ES|QL query results into an array of row objects, rather than the default format where each row is an array of values. */ async toRecords(): Promise> { + if (metaHeader !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qo` + } + params.format = 'json' + params.columnar = false // @ts-expect-error it's typed as ArrayBuffer but we know it will be JSON const response: EsqlResponse = await client.esql.query(params, reqOptions) const records: TDocument[] = toRecords(response) const { columns } = response return { records, columns } + }, + + async toArrow (): Promise> { + if (metaHeader !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa` + } + + params.format = 'arrow' + + const response = await client.esql.query(params, reqOptions) + return tableFromIPC(response) } } diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts index b029e1323..3685b7c53 100644 --- a/test/unit/helpers/esql.test.ts +++ b/test/unit/helpers/esql.test.ts @@ -18,6 +18,7 @@ */ import { test } from 'tap' +import { Table } from '@apache-arrow/esnext-cjs' import { connection } from '../../utils' import { Client } from '../../../' @@ -109,5 +110,66 @@ test('ES|QL helper', t => { t.end() }) + + test('toArrow', t => { + t.test('Parses a binary response into an Arrow table', async t => { + const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: Buffer.from(binaryContent, 'base64'), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrow() + t.ok(result instanceof Table) + + const table = [...result] + t.same(table[0], [ + ["amount", 4.900000095367432], + ["date", 1729532586965], + ]) + t.end() + }) + + t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { + const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + const header = params.headers?.['x-elastic-client-meta'] ?? '' + t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`) + return { + body: Buffer.from(binaryContent, 'base64'), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + await client.helpers.esql({ query: 'FROM sample_data' }).toArrow() + t.end() + }) + + t.end() + }) t.end() }) diff --git a/tsconfig.json b/tsconfig.json index e93828bd8..a7d7a1352 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "es2019", + "target": "ES2019", "module": "commonjs", "moduleResolution": "node", "declaration": true, @@ -21,7 +21,8 @@ "importHelpers": true, "outDir": "lib", "lib": [ - "esnext" + "ES2019", + "dom" ] }, "formatCodeOptions": { From cef328c93dee11212e7ab01e162442609a2dd0a2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 23 Oct 2024 08:48:04 -0500 Subject: [PATCH 398/647] Prep 8.16.0 (#2396) --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 87b7fb9bc..f45fea3b3 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.15.0", - "versionCanary": "8.15.0-canary.0", + "version": "8.16.0", + "versionCanary": "8.16.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", "types": "index.d.ts", From 8e79bf847a6ba08119f7df1bc6b518ea176c1018 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 23 Oct 2024 11:38:08 -0500 Subject: [PATCH 399/647] Enable Renovate (#2398) --- renovate.json | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 renovate.json diff --git a/renovate.json b/renovate.json new file mode 100644 index 000000000..604bcf1f3 --- /dev/null +++ b/renovate.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "local>elastic/renovate-config" + ], + "schedule": [ + "* * * * 0" + ], + "packageRules": [ + { + "matchDepTypes": [ + "devDependencies" + ], + "automerge": true, + "labels": [ + "backport 8.x" + ] + } + ] +} From 86b4d4e2f902c2947e150e1fadf502ae26ca02ae Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 24 Oct 2024 11:33:46 -0500 Subject: [PATCH 400/647] Upgrade tap to latest (#2400) --- .gitignore | 2 ++ package.json | 11 ++++------- test/unit/client.test.ts | 14 ++++++-------- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/.gitignore b/.gitignore index 0dd9106ed..99b15ab2f 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,5 @@ test/bundlers/parcel-test/.parcel-cache lib junit-output bun.lockb +test-results +processinfo diff --git a/package.json b/package.json index f45fea3b3..c8c28b35c 100644 --- a/package.json +++ b/package.json @@ -78,7 +78,7 @@ "semver": "^7.3.7", "split2": "^4.1.0", "stoppable": "^1.1.0", - "tap": "^16.1.0", + "tap": "^21.0.1", "ts-node": "^10.7.0", "ts-standard": "^11.0.0", "typescript": "^4.6.4", @@ -92,11 +92,8 @@ "tslib": "^2.4.0" }, "tap": { - "ts": true, - "jsx": false, - "flow": false, - "coverage": false, - "check-coverage": false, - "files": "test/unit/{*,**/*}.test.ts" + "files": [ + "test/unit/{*,**/*}.test.ts" + ] } } diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index ed56d76f5..cc9868cfe 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -293,14 +293,12 @@ test('Elastic Cloud config', t => { }) t.ok(client.connectionPool instanceof CloudConnectionPool) - t.match(client.connectionPool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64') - }, - tls: { secureProtocol: 'TLSv1_2_method' } - }) + const connection = client.connectionPool.connections.find(c => c.id === 'https://abcd.localhost/') + + t.equal(connection?.headers?.authorization, `Basic ${Buffer.from('elastic:changeme').toString('base64')}`) + t.same(connection?.tls, { secureProtocol: 'TLSv1_2_method' }) + t.equal(connection?.url.hostname, 'abcd.localhost') + t.equal(connection?.url.protocol, 'https:') t.end() }) From 572927b4f1fbe2734f6678d6a2e2f5fad3f014ee Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 24 Oct 2024 11:55:11 -0500 Subject: [PATCH 401/647] Don't generate coverage during standard unit test run (#2404) --- package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/package.json b/package.json index c8c28b35c..2624e1a78 100644 --- a/package.json +++ b/package.json @@ -92,6 +92,7 @@ "tslib": "^2.4.0" }, "tap": { + "disable-coverage": true, "files": [ "test/unit/{*,**/*}.test.ts" ] From 90d43f4f2899d9b3149affa24f05145a6de37e3f Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 11:03:46 -0500 Subject: [PATCH 402/647] Pin dependencies (#2408) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .buildkite/Dockerfile | 2 +- .buildkite/Dockerfile-make | 2 +- .github/workflows/nodejs.yml | 16 +++---- .github/workflows/npm-publish.yml | 4 +- .github/workflows/serverless-patch.yml | 6 +-- .github/workflows/stale.yml | 2 +- package.json | 62 +++++++++++++------------- 7 files changed, 47 insertions(+), 47 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 2bf3886dc..b91fe60ad 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -1,5 +1,5 @@ ARG NODE_VERSION=${NODE_VERSION:-18} -FROM node:$NODE_VERSION +FROM node:latest@sha256:840dad0077213cadd2d734d542ae11cd0f648200be29504eb1b6e2c995d2b75a:$NODE_VERSION # Install required tools RUN apt-get clean -y && \ diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 3805eb0a2..1261b2989 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -1,5 +1,5 @@ ARG NODE_JS_VERSION=${NODE_JS_VERSION:-18} -FROM node:${NODE_JS_VERSION} +FROM node:latest@sha256:840dad0077213cadd2d734d542ae11cd0f648200be29504eb1b6e2c995d2b75a:${NODE_JS_VERSION} ARG BUILDER_UID=1000 ARG BUILDER_GID=1000 diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index dd93e3bdd..e984adb53 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -11,10 +11,10 @@ jobs: outputs: src-only: "${{ steps.changes.outputs.src-only }}" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false - - uses: dorny/paths-filter/@v3.0.2 + - uses: dorny/paths-filter/@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: changes with: filters: | @@ -36,12 +36,12 @@ jobs: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v4 + uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4 with: node-version: ${{ matrix.node-version }} @@ -62,12 +62,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false - name: Use Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4 with: node-version: 22.x @@ -92,12 +92,12 @@ jobs: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false - name: Use Bun - uses: oven-sh/setup-bun@v2 + uses: oven-sh/setup-bun@4bc047ad259df6fc24a6c9b0f9a0cb08cf17fbe5 # v2 - name: Install run: | diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 956b688ec..68bb353d7 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -12,11 +12,11 @@ jobs: contents: write id-token: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false ref: ${{ github.event.inputs.branch }} - - uses: actions/setup-node@v4 + - uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4 with: node-version: "22.x" registry-url: "https://registry.npmjs.org/" diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index f3ae92f9e..2f583a7cd 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -26,14 +26,14 @@ jobs: ) ) steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false repository: elastic/elasticsearch-js ref: main path: stack fetch-depth: 0 - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false repository: elastic/elasticsearch-serverless-js @@ -42,7 +42,7 @@ jobs: - name: Apply patch from stack to serverless id: apply-patch run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh - - uses: peter-evans/create-pull-request@v6 + - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6 with: token: ${{ secrets.GH_TOKEN }} path: serverless diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 3970f1d8d..020c87722 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -8,7 +8,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@1160a2240286f5da8ec72b1c0816ce2481aabf84 # v8 with: stale-issue-label: stale stale-pr-label: stale diff --git a/package.json b/package.json index 2624e1a78..4a37f14fe 100644 --- a/package.json +++ b/package.json @@ -53,38 +53,38 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "^8.15.2", + "@elastic/request-converter": "8.16.0", "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", - "@types/debug": "^4.1.7", - "@types/ms": "^0.7.31", - "@types/node": "^18.19.55", - "@types/sinonjs__fake-timers": "^8.1.2", - "@types/split2": "^3.2.1", - "@types/stoppable": "^1.1.1", - "@types/tap": "^15.0.7", - "chai": "^4.3.7", - "cross-zip": "^4.0.0", - "desm": "^1.2.0", - "into-stream": "^7.0.0", - "js-yaml": "^4.1.0", - "license-checker": "^25.0.1", - "minimist": "^1.2.6", - "ms": "^2.1.3", - "node-abort-controller": "^3.0.1", - "node-fetch": "^2.6.7", - "ora": "^5.4.1", - "proxy": "^1.0.2", - "rimraf": "^3.0.2", - "semver": "^7.3.7", - "split2": "^4.1.0", - "stoppable": "^1.1.0", - "tap": "^21.0.1", - "ts-node": "^10.7.0", - "ts-standard": "^11.0.0", - "typescript": "^4.6.4", - "workq": "^3.0.0", - "xmlbuilder2": "^3.0.2", - "zx": "^7.2.2" + "@types/debug": "4.1.12", + "@types/ms": "0.7.34", + "@types/node": "18.19.59", + "@types/sinonjs__fake-timers": "8.1.5", + "@types/split2": "3.2.1", + "@types/stoppable": "1.1.3", + "@types/tap": "15.0.12", + "chai": "4.5.0", + "cross-zip": "4.0.1", + "desm": "1.3.1", + "into-stream": "7.0.0", + "js-yaml": "4.1.0", + "license-checker": "25.0.1", + "minimist": "1.2.8", + "ms": "2.1.3", + "node-abort-controller": "3.1.1", + "node-fetch": "2.7.0", + "ora": "5.4.1", + "proxy": "1.0.2", + "rimraf": "3.0.2", + "semver": "7.6.3", + "split2": "4.2.0", + "stoppable": "1.1.0", + "tap": "21.0.1", + "ts-node": "10.9.2", + "ts-standard": "11.0.0", + "typescript": "4.9.5", + "workq": "3.0.0", + "xmlbuilder2": "3.1.1", + "zx": "7.2.3" }, "dependencies": { "@elastic/transport": "^8.9.0", From e287c1edd9ea2db81dd68c5340ebab2103c3005d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 28 Oct 2024 11:19:02 -0500 Subject: [PATCH 403/647] Don't use hash-based Docker image version (#2414) --- .buildkite/Dockerfile | 2 +- .buildkite/Dockerfile-make | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index b91fe60ad..2bf3886dc 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -1,5 +1,5 @@ ARG NODE_VERSION=${NODE_VERSION:-18} -FROM node:latest@sha256:840dad0077213cadd2d734d542ae11cd0f648200be29504eb1b6e2c995d2b75a:$NODE_VERSION +FROM node:$NODE_VERSION # Install required tools RUN apt-get clean -y && \ diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 1261b2989..3805eb0a2 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -1,5 +1,5 @@ ARG NODE_JS_VERSION=${NODE_JS_VERSION:-18} -FROM node:latest@sha256:840dad0077213cadd2d734d542ae11cd0f648200be29504eb1b6e2c995d2b75a:${NODE_JS_VERSION} +FROM node:${NODE_JS_VERSION} ARG BUILDER_UID=1000 ARG BUILDER_GID=1000 From 20ac2a637ec5245f4442b8177e3c86282c9ea1d4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 28 Oct 2024 11:46:10 -0500 Subject: [PATCH 404/647] Skip flaky test (#2416) --- test/unit/helpers/bulk.test.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 1f2ddf575..94db5c5e4 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -1819,6 +1819,7 @@ test('Flush interval', t => { }) server.stop() + t.end() }) test(`flush timeout does not lock process when flushInterval is greater than server timeout`, async t => { @@ -1868,6 +1869,7 @@ test('Flush interval', t => { }) server.stop() + t.end() }) test(`flush timeout does not lock process when flushInterval is equal to server timeout`, async t => { @@ -1917,6 +1919,7 @@ test('Flush interval', t => { }) server.stop() + t.end() }) t.end() From a7123f807d09b672e63d0c19dd02d4c8942ebb9e Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 28 Oct 2024 21:12:59 +0100 Subject: [PATCH 405/647] Auto-generated code for main (#2384) --- ...01ae196538fac197eedbbf458a4ef31b.asciidoc} | 12 +- ...0709a38613d2de90d418ce12b36af30e.asciidoc} | 4 +- ...0bee07a581c5776e068f6f4efad5a399.asciidoc} | 1 + .../0c8be7aec84ea86b243904f5d4162f5a.asciidoc | 18 + ...0d30077cd34e93377a3a86f2ebd69415.asciidoc} | 2 +- .../0e31b8ad176b31028becf9500989bcbd.asciidoc | 21 + .../191074b2eebd5f74e628c2ada4b6d2e4.asciidoc | 55 ++ .../1a7483796087053ba55029d0dc2ab356.asciidoc | 19 + ...21d41e8cbd107fbdf0901f885834dafc.asciidoc} | 12 +- .../2c079d1ae4819a0c206b9e1aa5623523.asciidoc | 77 +++ ...3ab8f65fcb55a0e3664c55749ec41efd.asciidoc} | 1 + .../40f287bf733420bbab134b74c7d0ea5d.asciidoc | 16 + .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 14 + .../55085e6a2891040b6ac696561d0787c8.asciidoc | 32 ++ ...551799fef2f86e393db83a967e4a30d1.asciidoc} | 12 +- ...56da252798b8e7b006738428aa1a7f4c.asciidoc} | 12 +- .../58dd26afc919722e21358c91e112b27a.asciidoc | 18 + .../5a70db31f587b7ffed5e9bc1445430cb.asciidoc | 22 + ...5bba213a7f543190139d1a69ab2ed076.asciidoc} | 1 + .../5ceb734e3affe00e2cdc29af748d95bf.asciidoc | 23 + .../5cf12cc4f98d98dc79bead7e6556679c.asciidoc | 19 + ...5daf8ede198be9b118da5bee9896cb00.asciidoc} | 12 +- ...6cb1dae368c945ecf7c9ec332a5743a2.asciidoc} | 12 +- ...71998bb300ac2a58419b0772cdc1c586.asciidoc} | 12 +- ...750ac969f9a05567f5cdf4f93d6244b6.asciidoc} | 1 - .../76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc | 52 ++ .../77082b1ffaae9ac52dfc133fa597baa7.asciidoc | 18 + .../79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc | 49 ++ .../7b9691bd34a02dd859562eb927f175e0.asciidoc | 23 + ...7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc} | 12 +- .../7db09cab02d71f3a10d91071216d80fc.asciidoc | 18 + .../7db798942cf2d334456e30ef5fcb801b.asciidoc | 17 + ...8080cd9e24a8785728ce7c372ec4acf1.asciidoc} | 7 +- .../84237aa9da49ab4b4c4e2b21d2548df2.asciidoc | 5 +- .../84ef9fe951c6d3caa7438238a5b23319.asciidoc | 15 + .../85f9fc6f98e8573efed9b034e853d5ae.asciidoc | 17 + ...8b8b6aac2111b2d8b93758ac737e6543.asciidoc} | 12 +- .../8d05862be1f9e7edaba162b1888b5677.asciidoc | 61 ++ .../9ad0864bcd665b63551e944653d32423.asciidoc | 35 ++ ...9aedc45f83e022732789e8d796f5a43c.asciidoc} | 1 - ...9c2ce0132e4527077443f007d27b1158.asciidoc} | 12 +- ...a1b668795243398f5bc40bcc9bead884.asciidoc} | 12 +- ...a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc} | 12 +- ...a7d814caf2a995d2aeadecc3495011be.asciidoc} | 12 +- ...a8dff54362184b2732b9bd248cf6df8a.asciidoc} | 12 +- .../aab810de3314d5e11bd564ea096785b8.asciidoc | 21 + ...aad7d80990a6a3c391ff555ce09ae9dc.asciidoc} | 12 +- ...ac5b91aa75696f9880451c9439fd9eec.asciidoc} | 12 +- .../add82cbe7cd95c4be5ce1c9958f2f208.asciidoc | 16 + ...b3479ee4586c15020549afae58d94d65.asciidoc} | 12 +- ...b3cd07f02059165fd62a2f148be3dc58.asciidoc} | 12 +- ...b3f442a7d9eb391121dcab991787f9d6.asciidoc} | 12 +- .../b9ba66209b7fcc111a7bcef0b3e00052.asciidoc | 17 + ...ba650046f9063f6c43d76f47e0f94403.asciidoc} | 12 +- .../bb5a67e3d2d9cd3016e487e627769fe8.asciidoc | 88 +++ .../bc01aee2ab2ce1690986374bd836e1c7.asciidoc | 16 + ...bdaf00d791706d7fde25fd65d3735b94.asciidoc} | 12 +- .../befa73a8a419fcf3b7798548b54a20bf.asciidoc | 37 ++ ...c793efe7280e9b6e09981c4d4f832348.asciidoc} | 12 +- .../c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc | 23 + ...ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc} | 14 + .../dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc | 28 + .../ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc | 14 + .../f1bf0c03581b79c3324cfa3246a60e4d.asciidoc | 22 + ...f8f960550104c33e00dc78bc8723ccef.asciidoc} | 7 +- ...f95a4d7ab02bf400246c8822f0245f02.asciidoc} | 2 +- ...f9bad6fd369764185e1cb09b89ee39cc.asciidoc} | 12 +- ...fb0152f6c70f647a8b6709969113486d.asciidoc} | 12 +- ...fe6429d0d82174aa5acf95e96e237380.asciidoc} | 12 +- docs/reference.asciidoc | 534 +++++++++++++----- src/api/api/async_search.ts | 8 +- src/api/api/ccr.ts | 2 +- src/api/api/connector.ts | 50 +- src/api/api/dangling_indices.ts | 6 +- src/api/api/delete_by_query_rethrottle.ts | 2 +- src/api/api/eql.ts | 2 +- src/api/api/get_script_context.ts | 2 +- src/api/api/get_script_languages.ts | 2 +- src/api/api/indices.ts | 15 +- src/api/api/inference.ts | 40 ++ src/api/api/mget.ts | 2 +- src/api/api/ml.ts | 2 +- src/api/api/msearch.ts | 2 +- src/api/api/mtermvectors.ts | 2 +- src/api/api/query_rules.ts | 44 ++ src/api/api/reindex_rethrottle.ts | 2 +- src/api/api/scroll.ts | 2 +- src/api/api/security.ts | 104 ++-- src/api/api/ssl.ts | 2 +- src/api/api/terms_enum.ts | 2 +- src/api/api/update_by_query_rethrottle.ts | 2 +- src/api/types.ts | 179 ++++-- src/api/typesWithBodyKey.ts | 186 ++++-- 93 files changed, 2023 insertions(+), 438 deletions(-) rename docs/doc_examples/{77113c65e1755313183a8969233a5a07.asciidoc => 01ae196538fac197eedbbf458a4ef31b.asciidoc} (81%) rename docs/doc_examples/{611c1e05f4ebb48a1a8c8488238ce34d.asciidoc => 0709a38613d2de90d418ce12b36af30e.asciidoc} (70%) rename docs/doc_examples/{eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc => 0bee07a581c5776e068f6f4efad5a399.asciidoc} (93%) create mode 100644 docs/doc_examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc rename docs/doc_examples/{ce13afc0c976c5e1f424b58e0c97fd64.asciidoc => 0d30077cd34e93377a3a86f2ebd69415.asciidoc} (94%) create mode 100644 docs/doc_examples/0e31b8ad176b31028becf9500989bcbd.asciidoc create mode 100644 docs/doc_examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc create mode 100644 docs/doc_examples/1a7483796087053ba55029d0dc2ab356.asciidoc rename docs/doc_examples/{983a867c90e63e070518f2f709f659ee.asciidoc => 21d41e8cbd107fbdf0901f885834dafc.asciidoc} (80%) create mode 100644 docs/doc_examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc rename docs/doc_examples/{d1a285aa244ec461d68f13e7078a33c0.asciidoc => 3ab8f65fcb55a0e3664c55749ec41efd.asciidoc} (96%) create mode 100644 docs/doc_examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc create mode 100644 docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc create mode 100644 docs/doc_examples/55085e6a2891040b6ac696561d0787c8.asciidoc rename docs/doc_examples/{97f260817b60f3deb7f7034d7dee7e12.asciidoc => 551799fef2f86e393db83a967e4a30d1.asciidoc} (84%) rename docs/doc_examples/{5db5349162a4fbe74bffb646926a2495.asciidoc => 56da252798b8e7b006738428aa1a7f4c.asciidoc} (81%) create mode 100644 docs/doc_examples/58dd26afc919722e21358c91e112b27a.asciidoc create mode 100644 docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc rename docs/doc_examples/{c4654a4ca2f4600606dcc5bf37186c0b.asciidoc => 5bba213a7f543190139d1a69ab2ed076.asciidoc} (92%) create mode 100644 docs/doc_examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc create mode 100644 docs/doc_examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc rename docs/doc_examples/{1d827ae674970692643ea81991e5396e.asciidoc => 5daf8ede198be9b118da5bee9896cb00.asciidoc} (85%) rename docs/doc_examples/{10535507a9735fcf06600444b9067d4c.asciidoc => 6cb1dae368c945ecf7c9ec332a5743a2.asciidoc} (84%) rename docs/doc_examples/{8bf51fd50195b46bacbf872f460ebec2.asciidoc => 71998bb300ac2a58419b0772cdc1c586.asciidoc} (81%) rename docs/doc_examples/{35b686d9d9e915d0dea7a4251781767d.asciidoc => 750ac969f9a05567f5cdf4f93d6244b6.asciidoc} (95%) create mode 100644 docs/doc_examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc create mode 100644 docs/doc_examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc create mode 100644 docs/doc_examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc create mode 100644 docs/doc_examples/7b9691bd34a02dd859562eb927f175e0.asciidoc rename docs/doc_examples/{f3574cfee3971d98417b8dc574a91be0.asciidoc => 7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc} (85%) create mode 100644 docs/doc_examples/7db09cab02d71f3a10d91071216d80fc.asciidoc create mode 100644 docs/doc_examples/7db798942cf2d334456e30ef5fcb801b.asciidoc rename docs/doc_examples/{1d918e206ad8dab916e59183da24d9ec.asciidoc => 8080cd9e24a8785728ce7c372ec4acf1.asciidoc} (68%) create mode 100644 docs/doc_examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc create mode 100644 docs/doc_examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc rename docs/doc_examples/{13fd7a99c5cf53279409ecc679084f87.asciidoc => 8b8b6aac2111b2d8b93758ac737e6543.asciidoc} (80%) create mode 100644 docs/doc_examples/8d05862be1f9e7edaba162b1888b5677.asciidoc create mode 100644 docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc rename docs/doc_examples/{2cd8439db5054c93c49f1bf50433e1bb.asciidoc => 9aedc45f83e022732789e8d796f5a43c.asciidoc} (96%) rename docs/doc_examples/{5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc => 9c2ce0132e4527077443f007d27b1158.asciidoc} (81%) rename docs/doc_examples/{f38262ef72f73816ec35fa4c9c85760d.asciidoc => a1b668795243398f5bc40bcc9bead884.asciidoc} (85%) rename docs/doc_examples/{14a49c13c399840e64c00b487aa820c9.asciidoc => a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc} (81%) rename docs/doc_examples/{2826510e4aeb1c0d8dc43d317ed7624a.asciidoc => a7d814caf2a995d2aeadecc3495011be.asciidoc} (80%) rename docs/doc_examples/{794d9a321b944347d2a8834a07b5eb22.asciidoc => a8dff54362184b2732b9bd248cf6df8a.asciidoc} (81%) create mode 100644 docs/doc_examples/aab810de3314d5e11bd564ea096785b8.asciidoc rename docs/doc_examples/{0e5db64154a722a5cbdb84b588ce2ce8.asciidoc => aad7d80990a6a3c391ff555ce09ae9dc.asciidoc} (80%) rename docs/doc_examples/{f9cb2547ab04461a12bfd25a35be5f96.asciidoc => ac5b91aa75696f9880451c9439fd9eec.asciidoc} (84%) create mode 100644 docs/doc_examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc rename docs/doc_examples/{c1bb395546102279296534522061829f.asciidoc => b3479ee4586c15020549afae58d94d65.asciidoc} (83%) rename docs/doc_examples/{36063ff9a318dba7bb0be3a230655dc8.asciidoc => b3cd07f02059165fd62a2f148be3dc58.asciidoc} (80%) rename docs/doc_examples/{51390ca10aa22d7104e8970f09ea4512.asciidoc => b3f442a7d9eb391121dcab991787f9d6.asciidoc} (81%) create mode 100644 docs/doc_examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc rename docs/doc_examples/{fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc => ba650046f9063f6c43d76f47e0f94403.asciidoc} (81%) create mode 100644 docs/doc_examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc create mode 100644 docs/doc_examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc rename docs/doc_examples/{63d1c07d22a3ca3b0ec6d950547c011c.asciidoc => bdaf00d791706d7fde25fd65d3735b94.asciidoc} (80%) create mode 100644 docs/doc_examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc rename docs/doc_examples/{e566e898902e432bc7ea0568400f0c50.asciidoc => c793efe7280e9b6e09981c4d4f832348.asciidoc} (83%) create mode 100644 docs/doc_examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc rename docs/doc_examples/{dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc => ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc} (77%) create mode 100644 docs/doc_examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc create mode 100644 docs/doc_examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc create mode 100644 docs/doc_examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc rename docs/doc_examples/{0bef1fdefeb2956d60d52d3f38397cad.asciidoc => f8f960550104c33e00dc78bc8723ccef.asciidoc} (72%) rename docs/doc_examples/{1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc => f95a4d7ab02bf400246c8822f0245f02.asciidoc} (94%) rename docs/doc_examples/{1e0f203aced9344382081ab095c44dde.asciidoc => f9bad6fd369764185e1cb09b89ee39cc.asciidoc} (83%) rename docs/doc_examples/{633c8a9fc57268979d8735c557705809.asciidoc => fb0152f6c70f647a8b6709969113486d.asciidoc} (81%) rename docs/doc_examples/{1fcc4a3280be399753dcfd5c489ff682.asciidoc => fe6429d0d82174aa5acf95e96e237380.asciidoc} (82%) diff --git a/docs/doc_examples/77113c65e1755313183a8969233a5a07.asciidoc b/docs/doc_examples/01ae196538fac197eedbbf458a4ef31b.asciidoc similarity index 81% rename from docs/doc_examples/77113c65e1755313183a8969233a5a07.asciidoc rename to docs/doc_examples/01ae196538fac197eedbbf458a4ef31b.asciidoc index b9dea4e56..487139330 100644 --- a/docs/doc_examples/77113c65e1755313183a8969233a5a07.asciidoc +++ b/docs/doc_examples/01ae196538fac197eedbbf458a4ef31b.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { kwd: { type: "keyword", diff --git a/docs/doc_examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc b/docs/doc_examples/0709a38613d2de90d418ce12b36af30e.asciidoc similarity index 70% rename from docs/doc_examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc rename to docs/doc_examples/0709a38613d2de90d418ce12b36af30e.asciidoc index 8303a5630..dfe9d6be5 100644 --- a/docs/doc_examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc +++ b/docs/doc_examples/0709a38613d2de90d418ce12b36af30e.asciidoc @@ -3,8 +3,6 @@ [source, js] ---- -const response = await client.cluster.reroute({ - metric: "none", -}); +const response = await client.cluster.reroute(); console.log(response); ---- diff --git a/docs/doc_examples/eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc similarity index 93% rename from docs/doc_examples/eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc rename to docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc index 68ca702c1..5b0c7d4e7 100644 --- a/docs/doc_examples/eed968e0d9fa2a4545c36a4e5f47b64b.asciidoc +++ b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -8,6 +8,7 @@ const response = await client.esql.asyncQuery({ body: { query: "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata: true, }, }); console.log(response); diff --git a/docs/doc_examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc b/docs/doc_examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc new file mode 100644 index 000000000..54759101e --- /dev/null +++ b/docs/doc_examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + match: { + title: { + query: "fluffy pancakes breakfast", + minimum_should_match: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc b/docs/doc_examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc similarity index 94% rename from docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc rename to docs/doc_examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc index e06ff8a73..fb9189c62 100644 --- a/docs/doc_examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc +++ b/docs/doc_examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc @@ -9,7 +9,7 @@ const response = await client.connector.put({ name: "My Connector", description: "My Connector to sync data to Elastic index from Google Drive", service_type: "google_drive", - language: "english", + language: "en", }); console.log(response); ---- diff --git a/docs/doc_examples/0e31b8ad176b31028becf9500989bcbd.asciidoc b/docs/doc_examples/0e31b8ad176b31028becf9500989bcbd.asciidoc new file mode 100644 index 000000000..53fcff58d --- /dev/null +++ b/docs/doc_examples/0e31b8ad176b31028becf9500989bcbd.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "watsonx-embeddings", + inference_config: { + service: "watsonxai", + service_settings: { + api_key: "", + url: "", + model_id: "ibm/slate-30m-english-rtrvr", + project_id: "", + api_version: "2024-03-14", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc b/docs/doc_examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc new file mode 100644 index 000000000..9b24f99c0 --- /dev/null +++ b/docs/doc_examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + bool: { + must: [ + { + term: { + "category.keyword": "Main Course", + }, + }, + { + term: { + tags: "vegetarian", + }, + }, + { + range: { + rating: { + gte: 4.5, + }, + }, + }, + ], + should: [ + { + multi_match: { + query: "curry spicy", + fields: ["title^2", "description"], + }, + }, + { + range: { + date: { + gte: "now-1M/d", + }, + }, + }, + ], + must_not: [ + { + term: { + "category.keyword": "Dessert", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a7483796087053ba55029d0dc2ab356.asciidoc b/docs/doc_examples/1a7483796087053ba55029d0dc2ab356.asciidoc new file mode 100644 index 000000000..a5696747c --- /dev/null +++ b/docs/doc_examples/1a7483796087053ba55029d0dc2ab356.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "mv", + refresh: "true", + document: { + a: [2, null, 1], + }, +}); +console.log(response); + +const response1 = await client.esql.query({ + query: "FROM mv | LIMIT 1", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/983a867c90e63e070518f2f709f659ee.asciidoc b/docs/doc_examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc similarity index 80% rename from docs/doc_examples/983a867c90e63e070518f2f709f659ee.asciidoc rename to docs/doc_examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc index 117b811b7..01d52961a 100644 --- a/docs/doc_examples/983a867c90e63e070518f2f709f659ee.asciidoc +++ b/docs/doc_examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { card: { type: "wildcard", diff --git a/docs/doc_examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc b/docs/doc_examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc new file mode 100644 index 000000000..9e9cb47ce --- /dev/null +++ b/docs/doc_examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + attributes: { + type: "passthrough", + priority: 10, + properties: { + id: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + attributes: { + id: "foo", + zone: 10, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + bool: { + must: [ + { + match: { + id: "foo", + }, + }, + { + match: { + zone: 10, + }, + }, + ], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + bool: { + must: [ + { + match: { + "attributes.id": "foo", + }, + }, + { + match: { + "attributes.zone": 10, + }, + }, + ], + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc b/docs/doc_examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc similarity index 96% rename from docs/doc_examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc rename to docs/doc_examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc index e21b336f5..e498feff3 100644 --- a/docs/doc_examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc +++ b/docs/doc_examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc @@ -29,6 +29,7 @@ const response = await client.indices.create({ "arabic_normalization", "persian_normalization", "persian_stop", + "persian_stem", ], }, }, diff --git a/docs/doc_examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc b/docs/doc_examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc new file mode 100644 index 000000000..5b94aaeb7 --- /dev/null +++ b/docs/doc_examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "amazon-reviews", + id: 1, + document: { + review_text: + "This product is lifechanging! I'm telling all my friends about it.", + review_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc new file mode 100644 index 000000000..5f31b1de6 --- /dev/null +++ b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.streamInference({ + task_type: "completion", + inference_id: "openai-completion", + body: { + input: "What is Elastic?", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/55085e6a2891040b6ac696561d0787c8.asciidoc b/docs/doc_examples/55085e6a2891040b6ac696561d0787c8.asciidoc new file mode 100644 index 000000000..eebda22c2 --- /dev/null +++ b/docs/doc_examples/55085e6a2891040b6ac696561d0787c8.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + attributes: { + type: "passthrough", + priority: 10, + properties: { + id: { + type: "keyword", + }, + }, + }, + "resource.attributes": { + type: "passthrough", + priority: 20, + properties: { + id: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc b/docs/doc_examples/551799fef2f86e393db83a967e4a30d1.asciidoc similarity index 84% rename from docs/doc_examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc rename to docs/doc_examples/551799fef2f86e393db83a967e4a30d1.asciidoc index de62cbe9d..ee97fb114 100644 --- a/docs/doc_examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc +++ b/docs/doc_examples/551799fef2f86e393db83a967e4a30d1.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { agg_metric: { type: "aggregate_metric_double", diff --git a/docs/doc_examples/5db5349162a4fbe74bffb646926a2495.asciidoc b/docs/doc_examples/56da252798b8e7b006738428aa1a7f4c.asciidoc similarity index 81% rename from docs/doc_examples/5db5349162a4fbe74bffb646926a2495.asciidoc rename to docs/doc_examples/56da252798b8e7b006738428aa1a7f4c.asciidoc index 2f634da70..8f272acdb 100644 --- a/docs/doc_examples/5db5349162a4fbe74bffb646926a2495.asciidoc +++ b/docs/doc_examples/56da252798b8e7b006738428aa1a7f4c.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { my_range: { type: "long_range", diff --git a/docs/doc_examples/58dd26afc919722e21358c91e112b27a.asciidoc b/docs/doc_examples/58dd26afc919722e21358c91e112b27a.asciidoc new file mode 100644 index 000000000..61938b700 --- /dev/null +++ b/docs/doc_examples/58dd26afc919722e21358c91e112b27a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + range: { + date: { + gte: "2023-05-01", + lte: "2023-05-31", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc b/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc new file mode 100644 index 000000000..255b2df23 --- /dev/null +++ b/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "semantic-embeddings", + mappings: { + properties: { + semantic_text: { + type: "semantic_text", + inference_id: "my-elser-endpoint", + }, + content: { + type: "text", + copy_to: "semantic_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4654a4ca2f4600606dcc5bf37186c0b.asciidoc b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc similarity index 92% rename from docs/doc_examples/c4654a4ca2f4600606dcc5bf37186c0b.asciidoc rename to docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc index 674854196..c95b379f3 100644 --- a/docs/doc_examples/c4654a4ca2f4600606dcc5bf37186c0b.asciidoc +++ b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -8,6 +8,7 @@ const response = await client.esql.asyncQuery({ body: { query: "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata: true, }, }); console.log(response); diff --git a/docs/doc_examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc b/docs/doc_examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc new file mode 100644 index 000000000..e2a1539c1 --- /dev/null +++ b/docs/doc_examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "small_chunk_size", + inference_config: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + }, + chunking_settings: { + strategy: "sentence", + max_chunk_size: 100, + sentence_overlap: 0, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc b/docs/doc_examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc new file mode 100644 index 000000000..423a609ea --- /dev/null +++ b/docs/doc_examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d827ae674970692643ea81991e5396e.asciidoc b/docs/doc_examples/5daf8ede198be9b118da5bee9896cb00.asciidoc similarity index 85% rename from docs/doc_examples/1d827ae674970692643ea81991e5396e.asciidoc rename to docs/doc_examples/5daf8ede198be9b118da5bee9896cb00.asciidoc index ac92b9868..d554caaf0 100644 --- a/docs/doc_examples/1d827ae674970692643ea81991e5396e.asciidoc +++ b/docs/doc_examples/5daf8ede198be9b118da5bee9896cb00.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { flattened: { type: "flattened", diff --git a/docs/doc_examples/10535507a9735fcf06600444b9067d4c.asciidoc b/docs/doc_examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc similarity index 84% rename from docs/doc_examples/10535507a9735fcf06600444b9067d4c.asciidoc rename to docs/doc_examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc index c7bc3b58e..fd3ab96e4 100644 --- a/docs/doc_examples/10535507a9735fcf06600444b9067d4c.asciidoc +++ b/docs/doc_examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { text: { type: "text", diff --git a/docs/doc_examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc b/docs/doc_examples/71998bb300ac2a58419b0772cdc1c586.asciidoc similarity index 81% rename from docs/doc_examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc rename to docs/doc_examples/71998bb300ac2a58419b0772cdc1c586.asciidoc index 84bfecb7b..2efade53d 100644 --- a/docs/doc_examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc +++ b/docs/doc_examples/71998bb300ac2a58419b0772cdc1c586.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { versions: { type: "version", diff --git a/docs/doc_examples/35b686d9d9e915d0dea7a4251781767d.asciidoc b/docs/doc_examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc similarity index 95% rename from docs/doc_examples/35b686d9d9e915d0dea7a4251781767d.asciidoc rename to docs/doc_examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc index 918d625f6..8a82d0135 100644 --- a/docs/doc_examples/35b686d9d9e915d0dea7a4251781767d.asciidoc +++ b/docs/doc_examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc @@ -4,7 +4,6 @@ [source, js] ---- const response = await client.cluster.reroute({ - metric: "none", commands: [ { allocate_empty_primary: { diff --git a/docs/doc_examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc b/docs/doc_examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc new file mode 100644 index 000000000..b40a8f8d9 --- /dev/null +++ b/docs/doc_examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + operations: [ + { + index: { + _index: "amazon-reviews", + _id: "2", + }, + }, + { + review_text: "This product is amazing! I love it.", + review_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + }, + { + index: { + _index: "amazon-reviews", + _id: "3", + }, + }, + { + review_text: "This product is terrible. I hate it.", + review_vector: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], + }, + { + index: { + _index: "amazon-reviews", + _id: "4", + }, + }, + { + review_text: "This product is great. I can do anything with it.", + review_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + }, + { + index: { + _index: "amazon-reviews", + _id: "5", + }, + }, + { + review_text: + "This product has ruined my life and the lives of my family and friends.", + review_vector: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc b/docs/doc_examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc new file mode 100644 index 000000000..9bd1c1eea --- /dev/null +++ b/docs/doc_examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + match: { + description: { + query: "fluffy pancakes", + operator: "and", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc b/docs/doc_examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc new file mode 100644 index 000000000..13de04136 --- /dev/null +++ b/docs/doc_examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-metrics", + index_patterns: ["metrics-mymetrics-*"], + priority: 200, + data_stream: {}, + template: { + settings: { + "index.mode": "time_series", + }, + mappings: { + properties: { + attributes: { + type: "passthrough", + priority: 10, + time_series_dimension: true, + properties: { + "host.name": { + type: "keyword", + }, + }, + }, + cpu: { + type: "integer", + time_series_metric: "counter", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics-mymetrics-test", + document: { + "@timestamp": "2020-01-01T00:00:00.000Z", + attributes: { + "host.name": "foo", + zone: "bar", + }, + cpu: 10, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7b9691bd34a02dd859562eb927f175e0.asciidoc b/docs/doc_examples/7b9691bd34a02dd859562eb927f175e0.asciidoc new file mode 100644 index 000000000..847cbc4b8 --- /dev/null +++ b/docs/doc_examples/7b9691bd34a02dd859562eb927f175e0.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", + inference_config: { + service: "elasticsearch", + service_settings: { + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 1, + max_number_of_allocations: 10, + }, + num_threads: 1, + model_id: ".elser_model_2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3574cfee3971d98417b8dc574a91be0.asciidoc b/docs/doc_examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc similarity index 85% rename from docs/doc_examples/f3574cfee3971d98417b8dc574a91be0.asciidoc rename to docs/doc_examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc index 0ae64f8ad..b319e1c28 100644 --- a/docs/doc_examples/f3574cfee3971d98417b8dc574a91be0.asciidoc +++ b/docs/doc_examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { flattened: { type: "flattened", diff --git a/docs/doc_examples/7db09cab02d71f3a10d91071216d80fc.asciidoc b/docs/doc_examples/7db09cab02d71f3a10d91071216d80fc.asciidoc new file mode 100644 index 000000000..94a971289 --- /dev/null +++ b/docs/doc_examples/7db09cab02d71f3a10d91071216d80fc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "amazon-reviews", + retriever: { + knn: { + field: "review_vector", + query_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + k: 2, + num_candidates: 5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7db798942cf2d334456e30ef5fcb801b.asciidoc b/docs/doc_examples/7db798942cf2d334456e30ef5fcb801b.asciidoc new file mode 100644 index 000000000..79ead4d92 --- /dev/null +++ b/docs/doc_examples/7db798942cf2d334456e30ef5fcb801b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + match: { + description: { + query: "fluffy pancakes", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d918e206ad8dab916e59183da24d9ec.asciidoc b/docs/doc_examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc similarity index 68% rename from docs/doc_examples/1d918e206ad8dab916e59183da24d9ec.asciidoc rename to docs/doc_examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc index 6e0a57866..2904d2dc7 100644 --- a/docs/doc_examples/1d918e206ad8dab916e59183da24d9ec.asciidoc +++ b/docs/doc_examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc @@ -3,9 +3,10 @@ [source, js] ---- -const response = await client.indices.putSettings({ - index: ".watches", - settings: { +const response = await client.transport.request({ + method: "PUT", + path: "/_watcher/settings", + body: { "index.routing.allocation.include.role": "watcher", }, }); diff --git a/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc b/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc index 09a235c98..ae893a16e 100644 --- a/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc +++ b/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.snapshot.create({ - repository: "my_repository", - snapshot: "_verify_integrity", +const response = await client.snapshot.repositoryVerifyIntegrity({ + name: "my_repository", }); console.log(response); ---- diff --git a/docs/doc_examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc b/docs/doc_examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc new file mode 100644 index 000000000..5d2ef1d8d --- /dev/null +++ b/docs/doc_examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + term: { + "author.keyword": "Maria Rodriguez", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc b/docs/doc_examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc new file mode 100644 index 000000000..9bc60ea81 --- /dev/null +++ b/docs/doc_examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "use_existing_deployment", + inference_config: { + service: "elasticsearch", + service_settings: { + deployment_id: ".elser_model_2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13fd7a99c5cf53279409ecc679084f87.asciidoc b/docs/doc_examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc similarity index 80% rename from docs/doc_examples/13fd7a99c5cf53279409ecc679084f87.asciidoc rename to docs/doc_examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc index 5ce2b993b..9bee24620 100644 --- a/docs/doc_examples/13fd7a99c5cf53279409ecc679084f87.asciidoc +++ b/docs/doc_examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx_keep", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { path: { type: "object", diff --git a/docs/doc_examples/8d05862be1f9e7edaba162b1888b5677.asciidoc b/docs/doc_examples/8d05862be1f9e7edaba162b1888b5677.asciidoc new file mode 100644 index 000000000..0241e351c --- /dev/null +++ b/docs/doc_examples/8d05862be1f9e7edaba162b1888b5677.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "cooking_blog", + properties: { + title: { + type: "text", + analyzer: "standard", + fields: { + keyword: { + type: "keyword", + ignore_above: 256, + }, + }, + }, + description: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + author: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + date: { + type: "date", + format: "yyyy-MM-dd", + }, + category: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + tags: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + rating: { + type: "float", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc b/docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc new file mode 100644 index 000000000..f553c8706 --- /dev/null +++ b/docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "semantic-embeddings", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + match: { + content: "How to avoid muscle soreness while running?", + }, + }, + }, + }, + { + standard: { + query: { + semantic: { + field: "semantic_text", + query: "How to avoid muscle soreness while running?", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc b/docs/doc_examples/9aedc45f83e022732789e8d796f5a43c.asciidoc similarity index 96% rename from docs/doc_examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc rename to docs/doc_examples/9aedc45f83e022732789e8d796f5a43c.asciidoc index fb87919cd..f4f4c1e5d 100644 --- a/docs/doc_examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc +++ b/docs/doc_examples/9aedc45f83e022732789e8d796f5a43c.asciidoc @@ -4,7 +4,6 @@ [source, js] ---- const response = await client.cluster.reroute({ - metric: "none", commands: [ { move: { diff --git a/docs/doc_examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc b/docs/doc_examples/9c2ce0132e4527077443f007d27b1158.asciidoc similarity index 81% rename from docs/doc_examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc rename to docs/doc_examples/9c2ce0132e4527077443f007d27b1158.asciidoc index 9e0654221..a85d946d2 100644 --- a/docs/doc_examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc +++ b/docs/doc_examples/9c2ce0132e4527077443f007d27b1158.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { flattened: { type: "flattened", diff --git a/docs/doc_examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc b/docs/doc_examples/a1b668795243398f5bc40bcc9bead884.asciidoc similarity index 85% rename from docs/doc_examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc rename to docs/doc_examples/a1b668795243398f5bc40bcc9bead884.asciidoc index ec74a3eb1..31d504f3c 100644 --- a/docs/doc_examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc +++ b/docs/doc_examples/a1b668795243398f5bc40bcc9bead884.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { my_range: { type: "long_range", diff --git a/docs/doc_examples/14a49c13c399840e64c00b487aa820c9.asciidoc b/docs/doc_examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc similarity index 81% rename from docs/doc_examples/14a49c13c399840e64c00b487aa820c9.asciidoc rename to docs/doc_examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc index 0ad43b3d4..43cce7452 100644 --- a/docs/doc_examples/14a49c13c399840e64c00b487aa820c9.asciidoc +++ b/docs/doc_examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { date: { type: "date_nanos", diff --git a/docs/doc_examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc b/docs/doc_examples/a7d814caf2a995d2aeadecc3495011be.asciidoc similarity index 80% rename from docs/doc_examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc rename to docs/doc_examples/a7d814caf2a995d2aeadecc3495011be.asciidoc index 722ee1661..782a1c3cd 100644 --- a/docs/doc_examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc +++ b/docs/doc_examples/a7d814caf2a995d2aeadecc3495011be.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { bool: { type: "boolean", diff --git a/docs/doc_examples/794d9a321b944347d2a8834a07b5eb22.asciidoc b/docs/doc_examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc similarity index 81% rename from docs/doc_examples/794d9a321b944347d2a8834a07b5eb22.asciidoc rename to docs/doc_examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc index b72e43b07..e9c4ba6ea 100644 --- a/docs/doc_examples/794d9a321b944347d2a8834a07b5eb22.asciidoc +++ b/docs/doc_examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { my_range: { type: "integer_range", diff --git a/docs/doc_examples/aab810de3314d5e11bd564ea096785b8.asciidoc b/docs/doc_examples/aab810de3314d5e11bd564ea096785b8.asciidoc new file mode 100644 index 000000000..5ab6d8d6d --- /dev/null +++ b/docs/doc_examples/aab810de3314d5e11bd564ea096785b8.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + bool: { + filter: [ + { + term: { + "category.keyword": "Breakfast", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc b/docs/doc_examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc similarity index 80% rename from docs/doc_examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc rename to docs/doc_examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc index 05cd6d517..bc29bce3b 100644 --- a/docs/doc_examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc +++ b/docs/doc_examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { f: { type: "scaled_float", diff --git a/docs/doc_examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc b/docs/doc_examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc similarity index 84% rename from docs/doc_examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc rename to docs/doc_examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc index 75d369723..08b6a99c8 100644 --- a/docs/doc_examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc +++ b/docs/doc_examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { my_range: { type: "date_range", diff --git a/docs/doc_examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc b/docs/doc_examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc new file mode 100644 index 000000000..f924148c0 --- /dev/null +++ b/docs/doc_examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + multi_match: { + query: "vegetarian curry", + fields: ["title^3", "description^2", "tags"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1bb395546102279296534522061829f.asciidoc b/docs/doc_examples/b3479ee4586c15020549afae58d94d65.asciidoc similarity index 83% rename from docs/doc_examples/c1bb395546102279296534522061829f.asciidoc rename to docs/doc_examples/b3479ee4586c15020549afae58d94d65.asciidoc index 791890046..68cc5ab9c 100644 --- a/docs/doc_examples/c1bb395546102279296534522061829f.asciidoc +++ b/docs/doc_examples/b3479ee4586c15020549afae58d94d65.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { point: { type: "geo_point", diff --git a/docs/doc_examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc b/docs/doc_examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc similarity index 80% rename from docs/doc_examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc rename to docs/doc_examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc index af0f597cc..98b1807e4 100644 --- a/docs/doc_examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc +++ b/docs/doc_examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { long: { type: "long", diff --git a/docs/doc_examples/51390ca10aa22d7104e8970f09ea4512.asciidoc b/docs/doc_examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc similarity index 81% rename from docs/doc_examples/51390ca10aa22d7104e8970f09ea4512.asciidoc rename to docs/doc_examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc index a717a783b..15dcc7ae9 100644 --- a/docs/doc_examples/51390ca10aa22d7104e8970f09ea4512.asciidoc +++ b/docs/doc_examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { binary: { type: "binary", diff --git a/docs/doc_examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc b/docs/doc_examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc new file mode 100644 index 000000000..61939766d --- /dev/null +++ b/docs/doc_examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + attributes: { + id: "foo", + }, + id: "bar", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc b/docs/doc_examples/ba650046f9063f6c43d76f47e0f94403.asciidoc similarity index 81% rename from docs/doc_examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc rename to docs/doc_examples/ba650046f9063f6c43d76f47e0f94403.asciidoc index dadf119aa..ec556c8dc 100644 --- a/docs/doc_examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc +++ b/docs/doc_examples/ba650046f9063f6c43d76f47e0f94403.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { date: { type: "date", diff --git a/docs/doc_examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc b/docs/doc_examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc new file mode 100644 index 000000000..d73057f5b --- /dev/null +++ b/docs/doc_examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc @@ -0,0 +1,88 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "cooking_blog", + refresh: "wait_for", + operations: [ + { + index: { + _id: "1", + }, + }, + { + title: "Perfect Pancakes: A Fluffy Breakfast Delight", + description: + "Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.", + author: "Maria Rodriguez", + date: "2023-05-01", + category: "Breakfast", + tags: ["pancakes", "breakfast", "easy recipes"], + rating: 4.8, + }, + { + index: { + _id: "2", + }, + }, + { + title: "Spicy Thai Green Curry: A Vegetarian Adventure", + description: + "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", + author: "Liam Chen", + date: "2023-05-05", + category: "Main Course", + tags: ["thai", "vegetarian", "curry", "spicy"], + rating: 4.6, + }, + { + index: { + _id: "3", + }, + }, + { + title: "Classic Beef Stroganoff: A Creamy Comfort Food", + description: + "Indulge in this rich and creamy beef stroganoff. Tender strips of beef in a savory mushroom sauce, served over a bed of egg noodles. It's the ultimate comfort food for chilly evenings.", + author: "Emma Watson", + date: "2023-05-10", + category: "Main Course", + tags: ["beef", "pasta", "comfort food"], + rating: 4.7, + }, + { + index: { + _id: "4", + }, + }, + { + title: "Vegan Chocolate Avocado Mousse", + description: + "Discover the magic of avocado in this rich, vegan chocolate mousse. Creamy, indulgent, and secretly healthy, it's the perfect guilt-free dessert for chocolate lovers.", + author: "Alex Green", + date: "2023-05-15", + category: "Dessert", + tags: ["vegan", "chocolate", "avocado", "healthy dessert"], + rating: 4.5, + }, + { + index: { + _id: "5", + }, + }, + { + title: "Crispy Oven-Fried Chicken", + description: + "Get that perfect crunch without the deep fryer! This oven-fried chicken recipe delivers crispy, juicy results every time. A healthier take on the classic comfort food.", + author: "Maria Rodriguez", + date: "2023-05-20", + category: "Main Course", + tags: ["chicken", "oven-fried", "healthy"], + rating: 4.9, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc b/docs/doc_examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc new file mode 100644 index 000000000..59eabf7be --- /dev/null +++ b/docs/doc_examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + multi_match: { + query: "vegetarian curry", + fields: ["title", "description", "tags"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc b/docs/doc_examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc similarity index 80% rename from docs/doc_examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc rename to docs/doc_examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc index 6d46c2baa..f8485a4c3 100644 --- a/docs/doc_examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc +++ b/docs/doc_examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { kwd: { type: "keyword", diff --git a/docs/doc_examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc b/docs/doc_examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc new file mode 100644 index 000000000..2e7e049df --- /dev/null +++ b/docs/doc_examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 10, + knn: { + query_vector: [0.04283529, 0.85670587, -0.51402352, 0], + field: "my_int4_vector", + k: 20, + num_candidates: 50, + }, + rescore: { + window_size: 20, + query: { + rescore_query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", + params: { + queryVector: [0.04283529, 0.85670587, -0.51402352, 0], + }, + }, + }, + }, + query_weight: 0, + rescore_query_weight: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e566e898902e432bc7ea0568400f0c50.asciidoc b/docs/doc_examples/c793efe7280e9b6e09981c4d4f832348.asciidoc similarity index 83% rename from docs/doc_examples/e566e898902e432bc7ea0568400f0c50.asciidoc rename to docs/doc_examples/c793efe7280e9b6e09981c4d4f832348.asciidoc index 8a135621a..ca162517a 100644 --- a/docs/doc_examples/e566e898902e432bc7ea0568400f0c50.asciidoc +++ b/docs/doc_examples/c793efe7280e9b6e09981c4d4f832348.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { ip: { type: "ip", diff --git a/docs/doc_examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc b/docs/doc_examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc new file mode 100644 index 000000000..1bd714230 --- /dev/null +++ b/docs/doc_examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "amazon-reviews", + mappings: { + properties: { + review_vector: { + type: "dense_vector", + dims: 8, + index: true, + similarity: "cosine", + }, + review_text: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc similarity index 77% rename from docs/doc_examples/dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc rename to docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc index 00f3a3a14..c97a5d54f 100644 --- a/docs/doc_examples/dbce6cb1eaf9b2cc36b7f9a13afc63ea.asciidoc +++ b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -52,6 +52,20 @@ const response = await client.simulate.ingest({ }, }, }, + index_template_substitutions: { + "my-index-template": { + index_patterns: ["my-index-*"], + composed_of: ["component_template_1", "component_template_2"], + }, + }, + mapping_addition: { + dynamic: "strict", + properties: { + foo: { + type: "keyword", + }, + }, + }, }, }); console.log(response); diff --git a/docs/doc_examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc b/docs/doc_examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc new file mode 100644 index 000000000..384d005bd --- /dev/null +++ b/docs/doc_examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 10, + query: { + script_score: { + query: { + knn: { + query_vector: [0.04283529, 0.85670587, -0.51402352, 0], + field: "my_int4_vector", + num_candidates: 20, + }, + }, + script: { + source: "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", + params: { + queryVector: [0.04283529, 0.85670587, -0.51402352, 0], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc b/docs/doc_examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc new file mode 100644 index 000000000..bd9c35360 --- /dev/null +++ b/docs/doc_examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.inference({ + task_type: "my-inference-endpoint", + inference_id: "_update", + service_settings: { + api_key: "", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc b/docs/doc_examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc new file mode 100644 index 000000000..78223bc2b --- /dev/null +++ b/docs/doc_examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-byte-quantized-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 64, + index: true, + index_options: { + type: "bbq_hnsw", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc b/docs/doc_examples/f8f960550104c33e00dc78bc8723ccef.asciidoc similarity index 72% rename from docs/doc_examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc rename to docs/doc_examples/f8f960550104c33e00dc78bc8723ccef.asciidoc index 950e6fd01..8d3d91597 100644 --- a/docs/doc_examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc +++ b/docs/doc_examples/f8f960550104c33e00dc78bc8723ccef.asciidoc @@ -4,12 +4,7 @@ [source, js] ---- const response = await client.indices.create({ - index: "idx", - mappings: { - _source: { - mode: "synthetic", - }, - }, + index: "cooking_blog", }); console.log(response); ---- diff --git a/docs/doc_examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc b/docs/doc_examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc similarity index 94% rename from docs/doc_examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc rename to docs/doc_examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc index 5294ec4fa..90668404a 100644 --- a/docs/doc_examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc +++ b/docs/doc_examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc @@ -5,7 +5,7 @@ ---- const response = await client.cat.mlTrainedModels({ h: "c,o,l,ct,v", - v: "ture", + v: "true", }); console.log(response); ---- diff --git a/docs/doc_examples/1e0f203aced9344382081ab095c44dde.asciidoc b/docs/doc_examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc similarity index 83% rename from docs/doc_examples/1e0f203aced9344382081ab095c44dde.asciidoc rename to docs/doc_examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc index bd8639ace..2045d9c7d 100644 --- a/docs/doc_examples/1e0f203aced9344382081ab095c44dde.asciidoc +++ b/docs/doc_examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { text: { type: "text", diff --git a/docs/doc_examples/633c8a9fc57268979d8735c557705809.asciidoc b/docs/doc_examples/fb0152f6c70f647a8b6709969113486d.asciidoc similarity index 81% rename from docs/doc_examples/633c8a9fc57268979d8735c557705809.asciidoc rename to docs/doc_examples/fb0152f6c70f647a8b6709969113486d.asciidoc index 7b7b0f2b8..cd8a962e0 100644 --- a/docs/doc_examples/633c8a9fc57268979d8735c557705809.asciidoc +++ b/docs/doc_examples/fb0152f6c70f647a8b6709969113486d.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { kwd: { type: "keyword", diff --git a/docs/doc_examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc b/docs/doc_examples/fe6429d0d82174aa5acf95e96e237380.asciidoc similarity index 82% rename from docs/doc_examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc rename to docs/doc_examples/fe6429d0d82174aa5acf95e96e237380.asciidoc index 5bc970622..0e51c7393 100644 --- a/docs/doc_examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc +++ b/docs/doc_examples/fe6429d0d82174aa5acf95e96e237380.asciidoc @@ -5,10 +5,16 @@ ---- const response = await client.indices.create({ index: "idx", - mappings: { - _source: { - mode: "synthetic", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, }, + }, + mappings: { properties: { my_range: { type: "ip_range", diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 8331d23c6..42e6df856 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -256,7 +256,10 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] === delete_by_query_rethrottle -Changes the number of requests per second for a particular Delete By Query operation. +Throttle a delete by query operation. + +Change the number of requests per second for a particular delete by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. {ref}/docs-delete-by-query.html[Endpoint documentation] [source,ts] @@ -465,7 +468,9 @@ client.getScript({ id }) [discrete] === get_script_context -Returns all script contexts. +Get script contexts. + +Get a list of supported script contexts and their methods. {painless}/painless-contexts.html[Endpoint documentation] [source,ts] @@ -475,7 +480,9 @@ client.getScriptContext() [discrete] === get_script_languages -Returns available script types, languages and contexts +Get script languages. + +Get a list of available script types, languages, and contexts. {ref}/modules-scripting.html[Endpoint documentation] [source,ts] @@ -612,7 +619,11 @@ list of queries. If `filter` isn't provided, all documents are allowed to match. [discrete] === mget -Allows to get multiple documents in one request. +Get multiple documents. + +Get multiple JSON documents by ID from one or more indices. +If you specify an index in the request URI, you only need to specify the document IDs in the request body. +To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. {ref}/docs-multi-get.html[Endpoint documentation] [source,ts] @@ -643,7 +654,23 @@ If the `_source` parameter is `false`, this parameter is ignored. [discrete] === msearch -Allows to execute several search operations in one request. +Run multiple searches. + +The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. +The structure is as follows: + +``` +header\n +body\n +header\n +body\n +``` + +This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. + +IMPORTANT: The final line of data must end with a newline character `\n`. +Each newline character may be preceded by a carriage return `\r`. +When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. {ref}/search-multi-search.html[Endpoint documentation] [source,ts] @@ -702,7 +729,12 @@ If `false`, it returns `hits.total` as an object. [discrete] === mtermvectors -Returns multiple termvectors in one request. +Get multiple term vectors. + +You can specify existing documents by index and ID or provide artificial documents in the body of the request. +You can specify the index in the request body or request URI. +The response contains a `docs` array with all the fetched termvectors. +Each element has the structure provided by the termvectors API. {ref}/docs-multi-termvectors.html[Endpoint documentation] [source,ts] @@ -850,7 +882,9 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] === reindex_rethrottle -Copies documents from a source to a destination. +Throttle a reindex operation. + +Change the number of requests per second for a particular reindex operation. {ref}/docs-reindex.html[Endpoint documentation] [source,ts] @@ -908,7 +942,20 @@ client.scriptsPainlessExecute({ ... }) [discrete] === scroll -Allows to retrieve a large numbers of results from a single search request. +Run a scrolling search. + +IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). + +The scroll API gets large sets of results from a single scrolling search request. +To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. +The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. +The search response returns a scroll ID in the `_scroll_id` response body parameter. +You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. +If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. + +You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. + +IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. {ref}/search-request-body.html[Endpoint documentation] [source,ts] @@ -1213,7 +1260,15 @@ should be maintained for scrolled search. [discrete] === terms_enum -The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. +Get terms in an index. + +Discover terms that match a partial string in an index. +This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. + +If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. +This can occur due to a few reasons, such as a request timeout or a node error. + +NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. {ref}/search-terms-enum.html[Endpoint documentation] [source,ts] @@ -1380,7 +1435,10 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] === update_by_query_rethrottle -Changes the number of requests per second for a particular Update By Query operation. +Throttle an update by query operation. + +Change the number of requests per second for a particular update by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. {ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] @@ -1398,8 +1456,8 @@ client.updateByQueryRethrottle({ task_id }) === async_search [discrete] ==== delete -Deletes an async search by identifier. -If the search is still running, the search request will be cancelled. +Delete an async search. +If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. @@ -1417,7 +1475,8 @@ client.asyncSearch.delete({ id }) [discrete] ==== get -Retrieves the results of a previously submitted async search request given its identifier. +Get async search results. +Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. {ref}/async-search.html[Endpoint documentation] @@ -1443,8 +1502,8 @@ By default no timeout is set meaning that the currently available results will b [discrete] ==== status -Get async search status -Retrieves the status of a previously submitted async search request given its identifier, without retrieving search results. +Get async search status. +Retrieve the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. {ref}/async-search.html[Endpoint documentation] @@ -1461,10 +1520,12 @@ client.asyncSearch.status({ id }) [discrete] ==== submit -Runs a search request asynchronously. -When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field, hence partial results become available following the sort criteria that was requested. -Warning: Async search does not support scroll nor search requests that only include the suggest section. -By default, Elasticsearch doesn’t allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. +Run an async search. +When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. + +Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. + +By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. {ref}/async-search.html[Endpoint documentation] @@ -2219,27 +2280,37 @@ Creates a new follower index configured to follow the referenced leader index. {ref}/ccr-put-follow.html[Endpoint documentation] [source,ts] ---- -client.ccr.follow({ index }) +client.ccr.follow({ index, leader_index, remote_cluster }) ---- [discrete] ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the follower index -** *`leader_index` (Optional, string)* -** *`max_outstanding_read_requests` (Optional, number)* -** *`max_outstanding_write_requests` (Optional, number)* -** *`max_read_request_operation_count` (Optional, number)* -** *`max_read_request_size` (Optional, string)* -** *`max_retry_delay` (Optional, string | -1 | 0)* -** *`max_write_buffer_count` (Optional, number)* -** *`max_write_buffer_size` (Optional, string)* -** *`max_write_request_operation_count` (Optional, number)* -** *`max_write_request_size` (Optional, string)* -** *`read_poll_timeout` (Optional, string | -1 | 0)* -** *`remote_cluster` (Optional, string)* -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) +** *`index` (string)*: The name of the follower index. +** *`leader_index` (string)*: The name of the index in the leader cluster to follow. +** *`remote_cluster` (string)*: The remote cluster containing the leader index. +** *`data_stream_name` (Optional, string)*: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. +** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. +** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding write requests on the follower. +** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. +** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when +retrying. +** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be +deferred until the number of queued operations goes below the limit. +** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will +be deferred until the total bytes of queued operations goes below the limit. +** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. +** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. +** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. +When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. +Then the follower will immediately attempt to read from the leader again. +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Settings to override from the leader index. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be +active. +A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the +remote Lucene segment files to the follower index. [discrete] ==== follow_info @@ -2819,7 +2890,9 @@ However, timed out nodes are included in the response’s `_nodes.failed` proper === connector [discrete] ==== check_in -Updates the last_seen field in the connector, and sets it to current timestamp +Check in a connector. + +Update the `last_seen` field in the connector and set it to the current timestamp. {ref}/check-in-connector-api.html[Endpoint documentation] [source,ts] @@ -2835,7 +2908,12 @@ client.connector.checkIn({ connector_id }) [discrete] ==== delete -Deletes a connector. +Delete a connector. + +Removes a connector and associated sync jobs. +This is a destructive action that is not recoverable. +NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. +These need to be removed manually. {ref}/delete-connector-api.html[Endpoint documentation] [source,ts] @@ -2852,7 +2930,9 @@ client.connector.delete({ connector_id }) [discrete] ==== get -Retrieves a connector. +Get a connector. + +Get the details about a connector. {ref}/get-connector-api.html[Endpoint documentation] [source,ts] @@ -2868,7 +2948,9 @@ client.connector.get({ connector_id }) [discrete] ==== list -Returns existing connectors. +Get all connectors. + +Get information about all connectors. {ref}/list-connector-api.html[Endpoint documentation] [source,ts] @@ -2889,7 +2971,11 @@ client.connector.list({ ... }) [discrete] ==== post -Creates a connector. +Create a connector. + +Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. +Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. +Self-managed connectors (Connector clients) are self-managed on your infrastructure. {ref}/create-connector-api.html[Endpoint documentation] [source,ts] @@ -2910,7 +2996,7 @@ client.connector.post({ ... }) [discrete] ==== put -Creates or updates a connector. +Create or update a connector. {ref}/create-connector-api.html[Endpoint documentation] [source,ts] @@ -2932,7 +3018,10 @@ client.connector.put({ ... }) [discrete] ==== sync_job_cancel -Cancels a connector sync job. +Cancel a connector sync job. + +Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. +The connector service is then responsible for setting the status of connector sync jobs to cancelled. {ref}/cancel-connector-sync-job-api.html[Endpoint documentation] [source,ts] @@ -2960,6 +3049,8 @@ client.connector.syncJobCheckIn() [discrete] ==== sync_job_claim Claims a connector sync job. + +{ref}/claim-connector-sync-job-api.html[Endpoint documentation] [source,ts] ---- client.connector.syncJobClaim() @@ -2968,7 +3059,10 @@ client.connector.syncJobClaim() [discrete] ==== sync_job_delete -Deletes a connector sync job. +Delete a connector sync job. + +Remove a connector sync job and its associated data. +This is a destructive action that is not recoverable. {ref}/delete-connector-sync-job-api.html[Endpoint documentation] [source,ts] @@ -2995,7 +3089,7 @@ client.connector.syncJobError() [discrete] ==== sync_job_get -Retrieves a connector sync job. +Get a connector sync job. {ref}/get-connector-sync-job-api.html[Endpoint documentation] [source,ts] @@ -3011,7 +3105,9 @@ client.connector.syncJobGet({ connector_sync_job_id }) [discrete] ==== sync_job_list -Lists connector sync jobs. +Get all connector sync jobs. + +Get information about all stored connector sync jobs listed by their creation date in ascending order. {ref}/list-connector-sync-jobs-api.html[Endpoint documentation] [source,ts] @@ -3031,7 +3127,9 @@ client.connector.syncJobList({ ... }) [discrete] ==== sync_job_post -Creates a connector sync job. +Create a connector sync job. + +Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. {ref}/create-connector-sync-job-api.html[Endpoint documentation] [source,ts] @@ -3060,6 +3158,8 @@ client.connector.syncJobUpdateStats() [discrete] ==== update_active_filtering +Activate the connector draft filter. + Activates the valid draft filtering for a connector. {ref}/update-connector-filtering-api.html[Endpoint documentation] @@ -3076,7 +3176,12 @@ client.connector.updateActiveFiltering({ connector_id }) [discrete] ==== update_api_key_id -Updates the API key id in the connector document +Update the connector API key ID. + +Update the `api_key_id` and `api_key_secret_id` fields of a connector. +You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. +The connector secret ID is required only for Elastic managed (native) connectors. +Self-managed connectors (connector clients) do not use this field. {ref}/update-connector-api-key-id-api.html[Endpoint documentation] [source,ts] @@ -3094,7 +3199,9 @@ client.connector.updateApiKeyId({ connector_id }) [discrete] ==== update_configuration -Updates the configuration field in the connector document +Update the connector configuration. + +Update the configuration field in the connector document. {ref}/update-connector-configuration-api.html[Endpoint documentation] [source,ts] @@ -3112,7 +3219,11 @@ client.connector.updateConfiguration({ connector_id }) [discrete] ==== update_error -Updates the filtering field in the connector document +Update the connector error field. + +Set the error field for the connector. +If the error provided in the request body is non-null, the connector’s status is updated to error. +Otherwise, if the error is reset to null, the connector status is updated to connected. {ref}/update-connector-error-api.html[Endpoint documentation] [source,ts] @@ -3140,7 +3251,11 @@ client.connector.updateFeatures() [discrete] ==== update_filtering -Updates the filtering field in the connector document +Update the connector filtering. + +Update the draft filtering configuration of a connector and marks the draft validation state as edited. +The filtering draft is activated once validated by the running Elastic connector service. +The filtering property is used to configure sync rules (both basic and advanced) for a connector. {ref}/update-connector-filtering-api.html[Endpoint documentation] [source,ts] @@ -3159,7 +3274,9 @@ client.connector.updateFiltering({ connector_id }) [discrete] ==== update_filtering_validation -Updates the draft filtering validation info for a connector. +Update the connector draft filtering validation. + +Update the draft filtering validation info for a connector. [source,ts] ---- client.connector.updateFilteringValidation({ connector_id, validation }) @@ -3174,7 +3291,9 @@ client.connector.updateFilteringValidation({ connector_id, validation }) [discrete] ==== update_index_name -Updates the index_name in the connector document +Update the connector index name. + +Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. {ref}/update-connector-index-name-api.html[Endpoint documentation] [source,ts] @@ -3191,7 +3310,7 @@ client.connector.updateIndexName({ connector_id, index_name }) [discrete] ==== update_name -Updates the name and description fields in the connector document +Update the connector name and description. {ref}/update-connector-name-description-api.html[Endpoint documentation] [source,ts] @@ -3209,7 +3328,7 @@ client.connector.updateName({ connector_id }) [discrete] ==== update_native -Updates the is_native flag in the connector document +Update the connector is_native flag. [source,ts] ---- client.connector.updateNative({ connector_id, is_native }) @@ -3224,7 +3343,9 @@ client.connector.updateNative({ connector_id, is_native }) [discrete] ==== update_pipeline -Updates the pipeline field in the connector document +Update the connector pipeline. + +When you create a new connector, the configuration of an ingest pipeline is populated with default settings. {ref}/update-connector-pipeline-api.html[Endpoint documentation] [source,ts] @@ -3241,7 +3362,7 @@ client.connector.updatePipeline({ connector_id, pipeline }) [discrete] ==== update_scheduling -Updates the scheduling field in the connector document +Update the connector scheduling. {ref}/update-connector-scheduling-api.html[Endpoint documentation] [source,ts] @@ -3258,7 +3379,7 @@ client.connector.updateScheduling({ connector_id, scheduling }) [discrete] ==== update_service_type -Updates the service type of the connector +Update the connector service type. {ref}/update-connector-service-type-api.html[Endpoint documentation] [source,ts] @@ -3275,7 +3396,7 @@ client.connector.updateServiceType({ connector_id, service_type }) [discrete] ==== update_status -Updates the status of the connector +Update the connector status. {ref}/update-connector-status-api.html[Endpoint documentation] [source,ts] @@ -3294,7 +3415,10 @@ client.connector.updateStatus({ connector_id, status }) === dangling_indices [discrete] ==== delete_dangling_index -Deletes the specified dangling index +Delete a dangling index. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. {ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] @@ -3306,14 +3430,17 @@ client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) ==== Arguments * *Request (object):* -** *`index_uuid` (string)*: The UUID of the dangling index -** *`accept_data_loss` (boolean)*: Must be set to true in order to delete the dangling index +** *`index_uuid` (string)*: The UUID of the index to delete. Use the get dangling indices API to find the UUID. +** *`accept_data_loss` (boolean)*: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] ==== import_dangling_index -Imports the specified dangling index +Import a dangling index. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. {ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] @@ -3325,14 +3452,20 @@ client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) ==== Arguments * *Request (object):* -** *`index_uuid` (string)*: The UUID of the dangling index -** *`accept_data_loss` (boolean)*: Must be set to true in order to import the dangling index +** *`index_uuid` (string)*: The UUID of the index to import. Use the get dangling indices API to locate the UUID. +** *`accept_data_loss` (boolean)*: This parameter must be set to true to import a dangling index. +Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. ** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master ** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] ==== list_dangling_indices -Returns all dangling indices. +Get the dangling indices. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +Use this API to list dangling indices, which you can then import or delete. {ref}/modules-gateway-dangling-indices.html[Endpoint documentation] [source,ts] @@ -3513,6 +3646,9 @@ client.eql.search({ index, query }) ** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. ** *`result_position` (Optional, Enum("tail" | "head"))* ** *`runtime_mappings` (Optional, Record)* +** *`max_samples_per_key` (Optional, number)*: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` +parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the +`max_samples_per_key` parameter. Pipes are not supported for sample queries. ** *`allow_no_indices` (Optional, boolean)* ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* ** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. @@ -4004,7 +4140,8 @@ client.indices.addBlock({ index, block }) [discrete] ==== analyze -Performs analysis on a text string and returns the resulting tokens. +Get tokens from text analysis. +The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. {ref}/indices-analyze.html[Endpoint documentation] [source,ts] @@ -4968,11 +5105,7 @@ client.indices.putDataLifecycle({ name }) ** *`name` (string | string[])*: List of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. -** *`data_retention` (Optional, string | -1 | 0)*: If defined, every document added to this data stream will be stored at least for this time frame. -Any time after this duration the document could be deleted. -When empty, every document in this data stream will be stored indefinitely. -** *`downsampling` (Optional, { rounds })*: If defined, every backing index will execute the configured downsampling configuration after the backing -index is not the data stream write index anymore. +** *`lifecycle` (Optional, { data_retention, downsampling, enabled })* ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -5056,7 +5189,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -5669,6 +5802,15 @@ client.inference.put({ inference_id }) ** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type ** *`inference_config` (Optional, { service, service_settings, task_settings })* +[discrete] +==== stream_inference +Perform streaming inference +[source,ts] +---- +client.inference.streamInference() +---- + + [discrete] === ingest [discrete] @@ -5817,8 +5959,8 @@ client.ingest.putPipeline({ id }) ** *`id` (string)*: ID of the ingest pipeline to create or update. ** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. ** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. ** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. ** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. @@ -7352,7 +7494,7 @@ Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). -If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. @@ -8420,6 +8562,23 @@ client.queryRules.putRuleset({ ruleset_id, rules }) ** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated ** *`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])* +[discrete] +==== test +Creates or updates a query ruleset. + +{ref}/test-query-ruleset.html[Endpoint documentation] +[source,ts] +---- +client.queryRules.test({ ruleset_id, match_criteria }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated +** *`match_criteria` (Record)* + [discrete] === rollup [discrete] @@ -8699,7 +8858,7 @@ client.searchApplication.put({ name }) * *Request (object):* ** *`name` (string)*: The name of the search application to be created or updated. -** *`search_application` (Optional, { name, indices, updated_at_millis, analytics_collection_name, template })* +** *`search_application` (Optional, { indices, analytics_collection_name, template })* ** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing Search Applications. [discrete] @@ -8834,7 +8993,9 @@ client.searchableSnapshots.stats({ ... }) === security [discrete] ==== activate_user_profile -Creates or updates a user profile on behalf of another user. +Activate a user profile. + +Create or update a user profile on behalf of another user. {ref}/security-api-activate-user-profile.html[Endpoint documentation] [source,ts] @@ -8854,6 +9015,7 @@ client.security.activateUserProfile({ grant_type }) [discrete] ==== authenticate Authenticate a user. + Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. @@ -8868,6 +9030,8 @@ client.security.authenticate() [discrete] ==== bulk_delete_role +Bulk delete roles. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. @@ -8886,6 +9050,8 @@ client.security.bulkDeleteRole({ names }) [discrete] ==== bulk_put_role +Bulk create or update roles. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. @@ -8915,7 +9081,9 @@ client.security.bulkUpdateApiKeys() [discrete] ==== change_password -Changes the passwords of users in the native realm and built-in users. +Change passwords. + +Change the passwords of users in the native realm and built-in users. {ref}/security-api-change-password.html[Endpoint documentation] [source,ts] @@ -8938,7 +9106,9 @@ setting. [discrete] ==== clear_api_key_cache -Evicts a subset of all entries from the API key cache. +Clear the API key cache. + +Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. {ref}/security-api-clear-api-key-cache.html[Endpoint documentation] @@ -8957,7 +9127,10 @@ Does not support other wildcard patterns. [discrete] ==== clear_cached_privileges -Evicts application privileges from the native application privileges cache. +Clear the privileges cache. + +Evict privileges from the native application privilege cache. +The cache is also automatically cleared for applications that have their privileges updated. {ref}/security-api-clear-privilege-cache.html[Endpoint documentation] [source,ts] @@ -8973,7 +9146,9 @@ client.security.clearCachedPrivileges({ application }) [discrete] ==== clear_cached_realms -Evicts users from the user cache. Can completely clear the cache or evict specific users. +Clear the user cache. + +Evict users from the user cache. You can completely clear the cache or evict specific users. {ref}/security-api-clear-cache.html[Endpoint documentation] [source,ts] @@ -8990,7 +9165,9 @@ client.security.clearCachedRealms({ realms }) [discrete] ==== clear_cached_roles -Evicts roles from the native role cache. +Clear the roles cache. + +Evict roles from the native role cache. {ref}/security-api-clear-role-cache.html[Endpoint documentation] [source,ts] @@ -9006,7 +9183,9 @@ client.security.clearCachedRoles({ name }) [discrete] ==== clear_cached_service_tokens -Evicts tokens from the service account token caches. +Clear service account token caches. + +Evict a subset of all entries from the service account token caches. {ref}/security-api-clear-service-token-caches.html[Endpoint documentation] [source,ts] @@ -9025,7 +9204,8 @@ client.security.clearCachedServiceTokens({ namespace, service, name }) [discrete] ==== create_api_key Create an API key. -Creates an API key for access without requiring basic authentication. + +Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. @@ -9059,7 +9239,9 @@ client.security.createCrossClusterApiKey() [discrete] ==== create_service_token -Creates a service accounts token for access without requiring basic authentication. +Create a service account token. + +Create a service accounts token for access without requiring basic authentication. {ref}/security-api-create-service-token.html[Endpoint documentation] [source,ts] @@ -9078,7 +9260,7 @@ client.security.createServiceToken({ namespace, service }) [discrete] ==== delete_privileges -Removes application privileges. +Delete application privileges. {ref}/security-api-delete-privilege.html[Endpoint documentation] [source,ts] @@ -9096,7 +9278,9 @@ client.security.deletePrivileges({ application, name }) [discrete] ==== delete_role -Removes roles in the native realm. +Delete roles. + +Delete roles in the native realm. {ref}/security-api-delete-role.html[Endpoint documentation] [source,ts] @@ -9113,7 +9297,7 @@ client.security.deleteRole({ name }) [discrete] ==== delete_role_mapping -Removes role mappings. +Delete role mappings. {ref}/security-api-delete-role-mapping.html[Endpoint documentation] [source,ts] @@ -9130,7 +9314,9 @@ client.security.deleteRoleMapping({ name }) [discrete] ==== delete_service_token -Deletes a service account token. +Delete service account tokens. + +Delete service account tokens for a service in a specified namespace. {ref}/security-api-delete-service-token.html[Endpoint documentation] [source,ts] @@ -9149,7 +9335,9 @@ client.security.deleteServiceToken({ namespace, service, name }) [discrete] ==== delete_user -Deletes users from the native realm. +Delete users. + +Delete users from the native realm. {ref}/security-api-delete-user.html[Endpoint documentation] [source,ts] @@ -9166,7 +9354,9 @@ client.security.deleteUser({ username }) [discrete] ==== disable_user -Disables users in the native realm. +Disable users. + +Disable users in the native realm. {ref}/security-api-disable-user.html[Endpoint documentation] [source,ts] @@ -9183,7 +9373,9 @@ client.security.disableUser({ username }) [discrete] ==== disable_user_profile -Disables a user profile so it's not visible in user profile searches. +Disable a user profile. + +Disable user profiles so that they are not visible in user profile searches. {ref}/security-api-disable-user-profile.html[Endpoint documentation] [source,ts] @@ -9202,7 +9394,9 @@ visible to search, if 'false' do nothing with refreshes. [discrete] ==== enable_user -Enables users in the native realm. +Enable users. + +Enable users in the native realm. {ref}/security-api-enable-user.html[Endpoint documentation] [source,ts] @@ -9219,7 +9413,9 @@ client.security.enableUser({ username }) [discrete] ==== enable_user_profile -Enables a user profile so it's visible in user profile searches. +Enable a user profile. + +Enable user profiles to make them visible in user profile searches. {ref}/security-api-enable-user-profile.html[Endpoint documentation] [source,ts] @@ -9238,7 +9434,9 @@ visible to search, if 'false' do nothing with refreshes. [discrete] ==== enroll_kibana -Enables a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. +Enroll Kibana. + +Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. {ref}/security-api-kibana-enrollment.html[Endpoint documentation] [source,ts] @@ -9249,7 +9447,9 @@ client.security.enrollKibana() [discrete] ==== enroll_node -Allows a new node to join an existing cluster with security features enabled. +Enroll a node. + +Enroll a new node to allow it to join an existing cluster with security features enabled. {ref}/security-api-node-enrollment.html[Endpoint documentation] [source,ts] @@ -9261,6 +9461,7 @@ client.security.enrollNode() [discrete] ==== get_api_key Get API key information. + Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. @@ -9295,7 +9496,9 @@ descriptors and the owner user's role descriptors. [discrete] ==== get_builtin_privileges -Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. +Get builtin privileges. + +Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. {ref}/security-api-get-builtin-privileges.html[Endpoint documentation] [source,ts] @@ -9306,7 +9509,7 @@ client.security.getBuiltinPrivileges() [discrete] ==== get_privileges -Retrieves application privileges. +Get application privileges. {ref}/security-api-get-privileges.html[Endpoint documentation] [source,ts] @@ -9323,8 +9526,9 @@ client.security.getPrivileges({ ... }) [discrete] ==== get_role -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The get roles API cannot retrieve roles that are defined in roles files. +Get roles. + +Get roles in the native realm. {ref}/security-api-get-role.html[Endpoint documentation] [source,ts] @@ -9340,7 +9544,11 @@ client.security.getRole({ ... }) [discrete] ==== get_role_mapping -Retrieves role mappings. +Get role mappings. + +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The get role mappings API cannot retrieve role mappings that are defined in role mapping files. {ref}/security-api-get-role-mapping.html[Endpoint documentation] [source,ts] @@ -9356,7 +9564,9 @@ client.security.getRoleMapping({ ... }) [discrete] ==== get_service_accounts -This API returns a list of service accounts that match the provided path parameter(s). +Get service accounts. + +Get a list of service accounts that match the provided path parameters. {ref}/security-api-get-service-accounts.html[Endpoint documentation] [source,ts] @@ -9373,7 +9583,7 @@ client.security.getServiceAccounts({ ... }) [discrete] ==== get_service_credentials -Retrieves information of all service credentials for a service account. +Get service account credentials. {ref}/security-api-get-service-credentials.html[Endpoint documentation] [source,ts] @@ -9401,7 +9611,9 @@ client.security.getSettings() [discrete] ==== get_token -Creates a bearer token for access without requiring basic authentication. +Get a token. + +Create a bearer token for access without requiring basic authentication. {ref}/security-api-get-token.html[Endpoint documentation] [source,ts] @@ -9422,7 +9634,9 @@ client.security.getToken({ ... }) [discrete] ==== get_user -Retrieves information about users in the native realm and built-in users. +Get users. + +Get information about users in the native realm and built-in users. {ref}/security-api-get-user.html[Endpoint documentation] [source,ts] @@ -9439,7 +9653,7 @@ client.security.getUser({ ... }) [discrete] ==== get_user_privileges -Retrieves security privileges for the logged in user. +Get user privileges. {ref}/security-api-get-user-privileges.html[Endpoint documentation] [source,ts] @@ -9457,7 +9671,9 @@ client.security.getUserPrivileges({ ... }) [discrete] ==== get_user_profile -Retrieves a user's profile using the unique profile ID. +Get a user profile. + +Get a user's profile using the unique profile ID. {ref}/security-api-get-user-profile.html[Endpoint documentation] [source,ts] @@ -9477,8 +9693,10 @@ By default returns no `data` content. [discrete] ==== grant_api_key -Creates an API key on behalf of another user. -This API is similar to Create API keys, however it creates the API key for a user that is different than the user that runs the API. +Grant an API key. + +Create an API key on behalf of another user. +This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. @@ -9516,7 +9734,8 @@ It is not valid with other grant types. [discrete] ==== has_privileges Check user privileges. -Determines whether the specified user has a specified list of privileges. + +Determine whether the specified user has a specified list of privileges. {ref}/security-api-has-privileges.html[Endpoint documentation] [source,ts] @@ -9535,7 +9754,9 @@ client.security.hasPrivileges({ ... }) [discrete] ==== has_privileges_user_profile -Determines whether the users associated with the specified profile IDs have all the requested privileges. +Check user profile privileges. + +Determine whether the users associated with the specified user profile IDs have all the requested privileges. {ref}/security-api-has-privileges-user-profile.html[Endpoint documentation] [source,ts] @@ -9553,13 +9774,15 @@ client.security.hasPrivilegesUserProfile({ uids, privileges }) [discrete] ==== invalidate_api_key Invalidate API keys. -Invalidates one or more API keys. + +This API invalidates API keys created by the create API key or grant API key APIs. +Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. -- Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field. +- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. {ref}/security-api-invalidate-api-key.html[Endpoint documentation] [source,ts] @@ -9585,7 +9808,14 @@ This parameter cannot be used with either `ids` or `name`, or when `owner` flag [discrete] ==== invalidate_token -Invalidates one or more access tokens or refresh tokens. +Invalidate a token. + +The access tokens returned by the get token API have a finite period of time for which they are valid. +After that time period, they can no longer be used. +The time period is defined by the `xpack.security.authc.token.timeout` setting. + +The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. +If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. {ref}/security-api-invalidate-token.html[Endpoint documentation] [source,ts] @@ -9637,7 +9867,7 @@ client.security.oidcPrepareAuthentication() [discrete] ==== put_privileges -Adds or updates application privileges. +Create or update application privileges. {ref}/security-api-put-privileges.html[Endpoint documentation] [source,ts] @@ -9654,8 +9884,11 @@ client.security.putPrivileges({ ... }) [discrete] ==== put_role -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +Create or update roles. + +The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. +File-based role management is not available in Elastic Serverless. {ref}/security-api-put-role.html[Endpoint documentation] [source,ts] @@ -9667,7 +9900,7 @@ client.security.putRole({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the role. +** *`name` (string)*: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. ** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. ** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. @@ -9681,7 +9914,14 @@ client.security.putRole({ name }) [discrete] ==== put_role_mapping -Creates and updates role mappings. +Create or update role mappings. + +Role mappings define which roles are assigned to each user. +Each mapping has rules that identify users and a list of roles that are granted to those users. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. + +This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using the create or update roles API or roles files. {ref}/security-api-put-role-mapping.html[Endpoint documentation] [source,ts] @@ -9704,7 +9944,10 @@ client.security.putRoleMapping({ name }) [discrete] ==== put_user -Adds and updates users in the native realm. These users are commonly referred to as native users. +Create or update users. + +A password is required for adding a new user but is optional when updating an existing user. +To change a user’s password without updating any other fields, use the change password API. {ref}/security-api-put-user.html[Endpoint documentation] [source,ts] @@ -9728,8 +9971,9 @@ client.security.putUser({ username }) [discrete] ==== query_api_keys -Query API keys. -Retrieves a paginated list of API keys and their information. You can optionally filter the results with a query. +Find API keys with a query. + +Get a paginated list of API keys and their information. You can optionally filter the results with a query. {ref}/security-api-query-api-key.html[Endpoint documentation] [source,ts] @@ -9768,7 +10012,9 @@ An API key's actual permission is the intersection of its assigned role descript [discrete] ==== query_role -Retrieves roles in a paginated manner. You can optionally filter the results with a query. +Find roles with a query. + +Get roles in a paginated manner. You can optionally filter the results with a query. {ref}/security-api-query-role.html[Endpoint documentation] [source,ts] @@ -9798,7 +10044,10 @@ To page through more hits, use the `search_after` parameter. [discrete] ==== query_user -Retrieves information for Users in a paginated manner. You can optionally filter the results with a query. +Find users with a query. + +Get information for users in a paginated manner. +You can optionally filter the results with a query. {ref}/security-api-query-user.html[Endpoint documentation] [source,ts] @@ -9828,7 +10077,9 @@ To page through more hits, use the `search_after` parameter. [discrete] ==== saml_authenticate -Submits a SAML Response message to Elasticsearch for consumption. +Authenticate SAML. + +Submits a SAML response message to Elasticsearch for consumption. {ref}/security-api-saml-authenticate.html[Endpoint documentation] [source,ts] @@ -9846,6 +10097,8 @@ client.security.samlAuthenticate({ content, ids }) [discrete] ==== saml_complete_logout +Logout of SAML completely. + Verifies the logout response sent from the SAML IdP. {ref}/security-api-saml-complete-logout.html[Endpoint documentation] @@ -9865,6 +10118,8 @@ client.security.samlCompleteLogout({ realm, ids }) [discrete] ==== saml_invalidate +Invalidate SAML. + Submits a SAML LogoutRequest message to Elasticsearch for consumption. {ref}/security-api-saml-invalidate.html[Endpoint documentation] @@ -9887,6 +10142,8 @@ The client application must not attempt to parse or process the string in any wa [discrete] ==== saml_logout +Logout of SAML. + Submits a request to invalidate an access token and refresh token. {ref}/security-api-saml-logout.html[Endpoint documentation] @@ -9906,7 +10163,9 @@ Alternatively, the most recent refresh token that was received after refreshing [discrete] ==== saml_prepare_authentication -Creates a SAML authentication request () as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. +Prepare SAML authentication. + +Creates a SAML authentication request (``) as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. {ref}/security-api-saml-prepare-authentication.html[Endpoint documentation] [source,ts] @@ -9927,6 +10186,8 @@ If the Authentication Request is signed, this value is used as part of the signa [discrete] ==== saml_service_provider_metadata +Create SAML service provider metadata. + Generate SAML metadata for a SAML 2.0 Service Provider. {ref}/security-api-saml-sp-metadata.html[Endpoint documentation] @@ -9943,6 +10204,8 @@ client.security.samlServiceProviderMetadata({ realm_name }) [discrete] ==== suggest_user_profiles +Suggest a user profile. + Get suggestions for user profiles that match specified search criteria. {ref}/security-api-suggest-user-profile.html[Endpoint documentation] @@ -9970,6 +10233,7 @@ as long as the profile matches the `name` field query. [discrete] ==== update_api_key Update an API key. + Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. @@ -10023,7 +10287,9 @@ client.security.updateSettings() [discrete] ==== update_user_profile_data -Updates specific data for the user profile that's associated with the specified unique ID. +Update user profile data. + +Update specific data for the user profile that is associated with a unique ID. {ref}/security-api-update-user-profile-data.html[Endpoint documentation] [source,ts] @@ -10639,7 +10905,23 @@ client.sql.translate({ query }) === ssl [discrete] ==== certificates -Retrieves information about the X.509 certificates used to encrypt communications in the cluster. +Get SSL certificates. + +Get information about the X.509 certificates that are used to encrypt communications in the cluster. +The API returns a list that includes certificates from all TLS contexts including: + +- Settings for transport and HTTP interfaces +- TLS settings that are used within authentication realms +- TLS settings for remote monitoring exporters + +The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. +It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + +The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. + +NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. + +If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. {ref}/security-api-ssl.html[Endpoint documentation] [source,ts] diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 4a8cef916..f17bb56ce 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -45,7 +45,7 @@ export default class AsyncSearch { } /** - * Deletes an async search by identifier. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. + * Delete an async search. If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class AsyncSearch { } /** - * Retrieves the results of a previously submitted async search request given its identifier. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. + * Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -109,7 +109,7 @@ export default class AsyncSearch { } /** - * Get async search status Retrieves the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. + * Get async search status. Retrieve the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -141,7 +141,7 @@ export default class AsyncSearch { } /** - * Runs a search request asynchronously. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field, hence partial results become available following the sort criteria that was requested. Warning: Async search does not support scroll nor search requests that only include the suggest section. By default, Elasticsearch doesn’t allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. + * Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index ec3db24c8..0bf2cec5f 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -85,7 +85,7 @@ export default class Ccr { async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster'] + const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 6a2585bc8..6cceebe5d 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -45,7 +45,7 @@ export default class Connector { } /** - * Updates the last_seen field in the connector, and sets it to current timestamp + * Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html | Elasticsearch API documentation} */ async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Connector { } /** - * Deletes a connector. + * Delete a connector. Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html | Elasticsearch API documentation} */ async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class Connector { } /** - * Retrieves a connector. + * Get a connector. Get the details about a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html | Elasticsearch API documentation} */ async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -141,7 +141,7 @@ export default class Connector { } /** - * Updates last sync stats in the connector document + * Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-last-sync-api.html | Elasticsearch API documentation} */ async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -185,7 +185,7 @@ export default class Connector { } /** - * Returns existing connectors. + * Get all connectors. Get information about all connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html | Elasticsearch API documentation} */ async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -215,7 +215,7 @@ export default class Connector { } /** - * Creates a connector. + * Create a connector. Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} */ async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -257,7 +257,7 @@ export default class Connector { } /** - * Creates or updates a connector. + * Create or update a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} */ async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -430,7 +430,7 @@ export default class Connector { } /** - * Cancels a connector sync job. + * Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html | Elasticsearch API documentation} */ async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -526,7 +526,7 @@ export default class Connector { } /** - * Deletes a connector sync job. + * Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html | Elasticsearch API documentation} */ async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -590,7 +590,7 @@ export default class Connector { } /** - * Retrieves a connector sync job. + * Get a connector sync job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html | Elasticsearch API documentation} */ async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -622,7 +622,7 @@ export default class Connector { } /** - * Lists connector sync jobs. + * Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html | Elasticsearch API documentation} */ async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -652,7 +652,7 @@ export default class Connector { } /** - * Creates a connector sync job. + * Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html | Elasticsearch API documentation} */ async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -725,7 +725,7 @@ export default class Connector { } /** - * Activates the valid draft filtering for a connector. + * Activate the connector draft filter. Activates the valid draft filtering for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} */ async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -757,7 +757,7 @@ export default class Connector { } /** - * Updates the API key id in the connector document + * Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-api-key-id-api.html | Elasticsearch API documentation} */ async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -801,7 +801,7 @@ export default class Connector { } /** - * Updates the configuration field in the connector document + * Update the connector configuration. Update the configuration field in the connector document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html | Elasticsearch API documentation} */ async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -845,7 +845,7 @@ export default class Connector { } /** - * Updates the filtering field in the connector document + * Update the connector error field. Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html | Elasticsearch API documentation} */ async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -921,7 +921,7 @@ export default class Connector { } /** - * Updates the filtering field in the connector document + * Update the connector filtering. Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} */ async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -965,7 +965,7 @@ export default class Connector { } /** - * Updates the draft filtering validation info for a connector. + * Update the connector draft filtering validation. Update the draft filtering validation info for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-validation-api.html | Elasticsearch API documentation} */ async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1009,7 +1009,7 @@ export default class Connector { } /** - * Updates the index_name in the connector document + * Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-index-name-api.html | Elasticsearch API documentation} */ async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1053,7 +1053,7 @@ export default class Connector { } /** - * Updates the name and description fields in the connector document + * Update the connector name and description. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html | Elasticsearch API documentation} */ async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1097,7 +1097,7 @@ export default class Connector { } /** - * Updates the is_native flag in the connector document + * Update the connector is_native flag. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-native-api.html | Elasticsearch API documentation} */ async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1141,7 +1141,7 @@ export default class Connector { } /** - * Updates the pipeline field in the connector document + * Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html | Elasticsearch API documentation} */ async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1185,7 +1185,7 @@ export default class Connector { } /** - * Updates the scheduling field in the connector document + * Update the connector scheduling. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html | Elasticsearch API documentation} */ async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1229,7 +1229,7 @@ export default class Connector { } /** - * Updates the service type of the connector + * Update the connector service type. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-service-type-api.html | Elasticsearch API documentation} */ async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1273,7 +1273,7 @@ export default class Connector { } /** - * Updates the status of the connector + * Update the connector status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-status-api.html | Elasticsearch API documentation} */ async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index e042d801a..f167cc0c8 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -45,7 +45,7 @@ export default class DanglingIndices { } /** - * Deletes the specified dangling index + * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class DanglingIndices { } /** - * Imports the specified dangling index + * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class DanglingIndices { } /** - * Returns all dangling indices. + * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index d7847a1ba..54189a3d2 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Changes the number of requests per second for a particular Delete By Query operation. + * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 0e0ddd859..d0b9054b3 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -149,7 +149,7 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position', 'runtime_mappings'] + const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index f242c6870..b8ffcebb6 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns all script contexts. + * Get script contexts. Get a list of supported script contexts and their methods. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html | Elasticsearch API documentation} */ export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 1487bedf3..e20ad7ebb 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns available script types, languages and contexts + * Get script languages. Get a list of available script types, languages, and contexts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index b1cff556b..d4c0708cb 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -78,7 +78,7 @@ export default class Indices { } /** - * Performs analysis on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html | Elasticsearch API documentation} */ async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1468,22 +1468,15 @@ export default class Indices { async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['data_retention', 'downsampling'] + const acceptedBody: string[] = ['lifecycle'] const querystring: Record = {} // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error - body[key] = params[key] + body = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index ad69cb84a..9caa35a70 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -224,4 +224,44 @@ export default class Inference { } return await this.transport.request({ path, method, querystring, body, meta }, options) } + + /** + * Perform streaming inference + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html | Elasticsearch API documentation} + */ + async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['inference_id', 'task_type'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_stream` + } else { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_stream` + } + const meta: TransportRequestMetadata = { + name: 'inference.stream_inference', + pathParts: { + inference_id: params.inference_id, + task_type: params.task_type + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 75ffcaef0..ae069e696 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to get multiple documents in one request. + * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html | Elasticsearch API documentation} */ export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index d45ba30d7..3b4268957 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1989,7 +1989,7 @@ export default class Ml { } /** - * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 5d5fbc912..7d5778210 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to execute several search operations in one request. + * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html | Elasticsearch API documentation} */ export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index 5fdb4a43b..8886f5b9d 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns multiple termvectors in one request. + * Get multiple term vectors. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html | Elasticsearch API documentation} */ export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index 1a090d5ff..150788940 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -292,4 +292,48 @@ export default class QueryRules { } return await this.transport.request({ path, method, querystring, body, meta }, options) } + + /** + * Creates or updates a query ruleset. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/test-query-ruleset.html | Elasticsearch API documentation} + */ + async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptions): Promise + async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['ruleset_id'] + const acceptedBody: string[] = ['match_criteria'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_test` + const meta: TransportRequestMetadata = { + name: 'query_rules.test', + pathParts: { + ruleset_id: params.ruleset_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 4fedb4ed3..49d1a1b6c 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Copies documents from a source to a destination. + * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 13b86d8ee..ae356e9d3 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Allows to retrieve a large numbers of results from a single search request. + * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll | Elasticsearch API documentation} */ export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index a3a602545..007e543ed 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -45,7 +45,7 @@ export default class Security { } /** - * Creates or updates a user profile on behalf of another user. + * Activate a user profile. Create or update a user profile on behalf of another user. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html | Elasticsearch API documentation} */ async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -116,7 +116,7 @@ export default class Security { } /** - * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. + * Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-delete-role.html | Elasticsearch API documentation} */ async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -157,7 +157,7 @@ export default class Security { } /** - * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. + * Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-put-role.html | Elasticsearch API documentation} */ async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -227,7 +227,7 @@ export default class Security { } /** - * Changes the passwords of users in the native realm and built-in users. + * Change passwords. Change the passwords of users in the native realm and built-in users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-change-password.html | Elasticsearch API documentation} */ async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -279,7 +279,7 @@ export default class Security { } /** - * Evicts a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. + * Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-api-key-cache.html | Elasticsearch API documentation} */ async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -311,7 +311,7 @@ export default class Security { } /** - * Evicts application privileges from the native application privileges cache. + * Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-privilege-cache.html | Elasticsearch API documentation} */ async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -343,7 +343,7 @@ export default class Security { } /** - * Evicts users from the user cache. Can completely clear the cache or evict specific users. + * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-cache.html | Elasticsearch API documentation} */ async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -375,7 +375,7 @@ export default class Security { } /** - * Evicts roles from the native role cache. + * Clear the roles cache. Evict roles from the native role cache. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-role-cache.html | Elasticsearch API documentation} */ async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -407,7 +407,7 @@ export default class Security { } /** - * Evicts tokens from the service account token caches. + * Clear service account token caches. Evict a subset of all entries from the service account token caches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-service-token-caches.html | Elasticsearch API documentation} */ async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -441,7 +441,7 @@ export default class Security { } /** - * Create an API key. Creates an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. + * Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-api-key.html | Elasticsearch API documentation} */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -512,7 +512,7 @@ export default class Security { } /** - * Creates a service accounts token for access without requiring basic authentication. + * Create a service account token. Create a service accounts token for access without requiring basic authentication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-service-token.html | Elasticsearch API documentation} */ async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -553,7 +553,7 @@ export default class Security { } /** - * Removes application privileges. + * Delete application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-privilege.html | Elasticsearch API documentation} */ async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -586,7 +586,7 @@ export default class Security { } /** - * Removes roles in the native realm. + * Delete roles. Delete roles in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role.html | Elasticsearch API documentation} */ async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -618,7 +618,7 @@ export default class Security { } /** - * Removes role mappings. + * Delete role mappings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role-mapping.html | Elasticsearch API documentation} */ async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -650,7 +650,7 @@ export default class Security { } /** - * Deletes a service account token. + * Delete service account tokens. Delete service account tokens for a service in a specified namespace. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-service-token.html | Elasticsearch API documentation} */ async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -684,7 +684,7 @@ export default class Security { } /** - * Deletes users from the native realm. + * Delete users. Delete users from the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-user.html | Elasticsearch API documentation} */ async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -716,7 +716,7 @@ export default class Security { } /** - * Disables users in the native realm. + * Disable users. Disable users in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user.html | Elasticsearch API documentation} */ async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -748,7 +748,7 @@ export default class Security { } /** - * Disables a user profile so it's not visible in user profile searches. + * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html | Elasticsearch API documentation} */ async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -780,7 +780,7 @@ export default class Security { } /** - * Enables users in the native realm. + * Enable users. Enable users in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user.html | Elasticsearch API documentation} */ async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -812,7 +812,7 @@ export default class Security { } /** - * Enables a user profile so it's visible in user profile searches. + * Enable a user profile. Enable user profiles to make them visible in user profile searches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html | Elasticsearch API documentation} */ async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -844,7 +844,7 @@ export default class Security { } /** - * Enables a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. + * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html | Elasticsearch API documentation} */ async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -874,7 +874,7 @@ export default class Security { } /** - * Allows a new node to join an existing cluster with security features enabled. + * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html | Elasticsearch API documentation} */ async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -934,7 +934,7 @@ export default class Security { } /** - * Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. + * Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-builtin-privileges.html | Elasticsearch API documentation} */ async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -964,7 +964,7 @@ export default class Security { } /** - * Retrieves application privileges. + * Get application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-privileges.html | Elasticsearch API documentation} */ async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1008,7 +1008,7 @@ export default class Security { } /** - * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. + * Get roles. Get roles in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role.html | Elasticsearch API documentation} */ async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1048,7 +1048,7 @@ export default class Security { } /** - * Retrieves role mappings. + * Get role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role-mapping.html | Elasticsearch API documentation} */ async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1088,7 +1088,7 @@ export default class Security { } /** - * This API returns a list of service accounts that match the provided path parameter(s). + * Get service accounts. Get a list of service accounts that match the provided path parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-accounts.html | Elasticsearch API documentation} */ async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1132,7 +1132,7 @@ export default class Security { } /** - * Retrieves information of all service credentials for a service account. + * Get service account credentials. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-credentials.html | Elasticsearch API documentation} */ async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1194,7 +1194,7 @@ export default class Security { } /** - * Creates a bearer token for access without requiring basic authentication. + * Get a token. Create a bearer token for access without requiring basic authentication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-token.html | Elasticsearch API documentation} */ async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1236,7 +1236,7 @@ export default class Security { } /** - * Retrieves information about users in the native realm and built-in users. + * Get users. Get information about users in the native realm and built-in users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user.html | Elasticsearch API documentation} */ async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1276,7 +1276,7 @@ export default class Security { } /** - * Retrieves security privileges for the logged in user. + * Get user privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-privileges.html | Elasticsearch API documentation} */ async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1306,7 +1306,7 @@ export default class Security { } /** - * Retrieves a user's profile using the unique profile ID. + * Get a user profile. Get a user's profile using the unique profile ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html | Elasticsearch API documentation} */ async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1338,7 +1338,7 @@ export default class Security { } /** - * Creates an API key on behalf of another user. This API is similar to Create API keys, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. + * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-grant-api-key.html | Elasticsearch API documentation} */ async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1379,7 +1379,7 @@ export default class Security { } /** - * Check user privileges. Determines whether the specified user has a specified list of privileges. + * Check user privileges. Determine whether the specified user has a specified list of privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges.html | Elasticsearch API documentation} */ async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1431,7 +1431,7 @@ export default class Security { } /** - * Determines whether the users associated with the specified profile IDs have all the requested privileges. + * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges-user-profile.html | Elasticsearch API documentation} */ async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1472,7 +1472,7 @@ export default class Security { } /** - * Invalidate API keys. Invalidates one or more API keys. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field. + * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-api-key.html | Elasticsearch API documentation} */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1514,7 +1514,7 @@ export default class Security { } /** - * Invalidates one or more access tokens or refresh tokens. + * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-token.html | Elasticsearch API documentation} */ async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1643,7 +1643,7 @@ export default class Security { } /** - * Adds or updates application privileges. + * Create or update application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-privileges.html | Elasticsearch API documentation} */ async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1677,7 +1677,7 @@ export default class Security { } /** - * The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. + * Create or update roles. The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role.html | Elasticsearch API documentation} */ async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1721,7 +1721,7 @@ export default class Security { } /** - * Creates and updates role mappings. + * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role-mapping.html | Elasticsearch API documentation} */ async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1765,7 +1765,7 @@ export default class Security { } /** - * Adds and updates users in the native realm. These users are commonly referred to as native users. + * Create or update users. A password is required for adding a new user but is optional when updating an existing user. To change a user’s password without updating any other fields, use the change password API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-user.html | Elasticsearch API documentation} */ async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1809,7 +1809,7 @@ export default class Security { } /** - * Query API keys. Retrieves a paginated list of API keys and their information. You can optionally filter the results with a query. + * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-api-key.html | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1851,7 +1851,7 @@ export default class Security { } /** - * Retrieves roles in a paginated manner. You can optionally filter the results with a query. + * Find roles with a query. Get roles in a paginated manner. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-role.html | Elasticsearch API documentation} */ async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1893,7 +1893,7 @@ export default class Security { } /** - * Retrieves information for Users in a paginated manner. You can optionally filter the results with a query. + * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-user.html | Elasticsearch API documentation} */ async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1935,7 +1935,7 @@ export default class Security { } /** - * Submits a SAML Response message to Elasticsearch for consumption. + * Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-authenticate.html | Elasticsearch API documentation} */ async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1976,7 +1976,7 @@ export default class Security { } /** - * Verifies the logout response sent from the SAML IdP. + * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-complete-logout.html | Elasticsearch API documentation} */ async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2017,7 +2017,7 @@ export default class Security { } /** - * Submits a SAML LogoutRequest message to Elasticsearch for consumption. + * Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-invalidate.html | Elasticsearch API documentation} */ async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2058,7 +2058,7 @@ export default class Security { } /** - * Submits a request to invalidate an access token and refresh token. + * Logout of SAML. Submits a request to invalidate an access token and refresh token. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-logout.html | Elasticsearch API documentation} */ async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2099,7 +2099,7 @@ export default class Security { } /** - * Creates a SAML authentication request () as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. + * Prepare SAML authentication. Creates a SAML authentication request (``) as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-prepare-authentication.html | Elasticsearch API documentation} */ async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2141,7 +2141,7 @@ export default class Security { } /** - * Generate SAML metadata for a SAML 2.0 Service Provider. + * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-sp-metadata.html | Elasticsearch API documentation} */ async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2173,7 +2173,7 @@ export default class Security { } /** - * Get suggestions for user profiles that match specified search criteria. + * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html | Elasticsearch API documentation} */ async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2320,7 +2320,7 @@ export default class Security { } /** - * Updates specific data for the user profile that's associated with the specified unique ID. + * Update user profile data. Update specific data for the user profile that is associated with a unique ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html | Elasticsearch API documentation} */ async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 9ace268c1..08057b0f9 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -45,7 +45,7 @@ export default class Ssl { } /** - * Retrieves information about the X.509 certificates used to encrypt communications in the cluster. + * Get SSL certificates. Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: - Settings for transport and HTTP interfaces - TLS settings that are used within authentication realms - TLS settings for remote monitoring exporters The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-ssl.html | Elasticsearch API documentation} */ async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index 1dd51ece7..fe04897e9 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * The terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. + * Get terms in an index. Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-terms-enum.html | Elasticsearch API documentation} */ export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index 68f5cd9ed..75acdc834 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Changes the number of requests per second for a particular Update By Query operation. + * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 8497cb915..8087c1609 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -5336,22 +5336,28 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { type: 'date_range' } +export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' + export interface MappingDenseVectorIndexOptions { - type: string - m?: integer - ef_construction?: integer confidence_interval?: float + ef_construction?: integer + m?: integer + type: MappingDenseVectorIndexOptionsType } +export type MappingDenseVectorIndexOptionsType = 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' + export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' - element_type?: string dims?: integer - similarity?: string + element_type?: MappingDenseVectorElementType index?: boolean index_options?: MappingDenseVectorIndexOptions + similarity?: MappingDenseVectorSimilarity } +export type MappingDenseVectorSimilarity = 'cosine' | 'dot_product' | 'l2_norm' | 'max_inner_product' + export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } @@ -8643,18 +8649,20 @@ export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { index: IndexName wait_for_active_shards?: WaitForActiveShards - leader_index?: IndexName + data_stream_name?: string + leader_index: IndexName max_outstanding_read_requests?: long - max_outstanding_write_requests?: long - max_read_request_operation_count?: long - max_read_request_size?: string + max_outstanding_write_requests?: integer + max_read_request_operation_count?: integer + max_read_request_size?: ByteSize max_retry_delay?: Duration - max_write_buffer_count?: long - max_write_buffer_size?: string - max_write_request_operation_count?: long - max_write_request_size?: string + max_write_buffer_count?: integer + max_write_buffer_size?: ByteSize + max_write_request_operation_count?: integer + max_write_request_size?: ByteSize read_poll_timeout?: Duration - remote_cluster?: string + remote_cluster: string + settings?: IndicesIndexSettings } export interface CcrFollowResponse { @@ -8672,16 +8680,16 @@ export interface CcrFollowInfoFollowerIndex { } export interface CcrFollowInfoFollowerIndexParameters { - max_outstanding_read_requests: integer - max_outstanding_write_requests: integer - max_read_request_operation_count: integer - max_read_request_size: string - max_retry_delay: Duration - max_write_buffer_count: integer - max_write_buffer_size: string - max_write_request_operation_count: integer - max_write_request_size: string - read_poll_timeout: Duration + max_outstanding_read_requests?: long + max_outstanding_write_requests?: integer + max_read_request_operation_count?: integer + max_read_request_size?: ByteSize + max_retry_delay?: Duration + max_write_buffer_count?: integer + max_write_buffer_size?: ByteSize + max_write_request_operation_count?: integer + max_write_request_size?: ByteSize + read_poll_timeout?: Duration } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' @@ -9628,7 +9636,7 @@ export interface ConnectorFeatureEnabled { export interface ConnectorFilteringAdvancedSnippet { created_at?: DateTime updated_at?: DateTime - value: Record + value: any } export interface ConnectorFilteringConfig { @@ -10178,6 +10186,7 @@ export interface EqlSearchRequest extends RequestBase { fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition runtime_mappings?: MappingRuntimeFields + max_samples_per_key?: integer } export type EqlSearchResponse = EqlEqlSearchResponseBase @@ -10713,6 +10722,7 @@ export interface IndicesDataStreamIndex { export interface IndicesDataStreamLifecycle { data_retention?: Duration downsampling?: IndicesDataStreamLifecycleDownsampling + enabled?: boolean } export interface IndicesDataStreamLifecycleDownsampling { @@ -10732,9 +10742,7 @@ export interface IndicesDataStreamLifecycleRolloverConditions { max_primary_shard_docs?: long } -export interface IndicesDataStreamLifecycleWithRollover { - data_retention?: Duration - downsampling?: IndicesDataStreamLifecycleDownsampling +export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { rollover?: IndicesDataStreamLifecycleRolloverConditions } @@ -11592,7 +11600,7 @@ export type IndicesGetAliasResponse = Record @@ -12785,6 +12818,15 @@ export interface IngestMaxmind { account_id: Id } +export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { + source_ip?: Field + destination_ip?: Field + target_field?: Field + internal_networks?: string[] + internal_networks_field?: Field + ignore_missing?: boolean +} + export interface IngestPipeline { description?: string on_failure?: IngestProcessorContainer[] @@ -12818,6 +12860,7 @@ export interface IngestProcessorContainer { attachment?: IngestAttachmentProcessor bytes?: IngestBytesProcessor circle?: IngestCircleProcessor + community_id?: IngestCommunityIDProcessor convert?: IngestConvertProcessor csv?: IngestCsvProcessor date?: IngestDateProcessor @@ -12827,6 +12870,7 @@ export interface IngestProcessorContainer { drop?: IngestDropProcessor enrich?: IngestEnrichProcessor fail?: IngestFailProcessor + fingerprint?: IngestFingerprintProcessor foreach?: IngestForeachProcessor geo_grid?: IngestGeoGridProcessor geoip?: IngestGeoIpProcessor @@ -12838,8 +12882,10 @@ export interface IngestProcessorContainer { json?: IngestJsonProcessor kv?: IngestKeyValueProcessor lowercase?: IngestLowercaseProcessor + network_direction?: IngestNetworkDirectionProcessor pipeline?: IngestPipelineProcessor redact?: IngestRedactProcessor + registered_domain?: IngestRegisteredDomainProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor reroute?: IngestRerouteProcessor @@ -12848,6 +12894,7 @@ export interface IngestProcessorContainer { set_security_user?: IngestSetSecurityUserProcessor sort?: IngestSortProcessor split?: IngestSplitProcessor + terminate?: IngestTerminateProcessor trim?: IngestTrimProcessor uppercase?: IngestUppercaseProcessor urldecode?: IngestUrlDecodeProcessor @@ -12866,6 +12913,12 @@ export interface IngestRedactProcessor extends IngestProcessorBase { trace_redact?: boolean } +export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { + field: Field + target_field?: Field + ignore_missing?: boolean +} + export interface IngestRemoveProcessor extends IngestProcessorBase { field: Fields keep?: Fields @@ -12921,6 +12974,9 @@ export interface IngestSplitProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestTerminateProcessor extends IngestProcessorBase { +} + export interface IngestTrimProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean @@ -16528,7 +16584,7 @@ export interface NodesInfoNodeInfoSettingsIngest { } export interface NodesInfoNodeInfoSettingsNetwork { - host?: Host + host?: Host | Host[] } export interface NodesInfoNodeInfoSettingsNode { @@ -16561,6 +16617,7 @@ export interface NodesInfoNodeInfoXpack { license?: NodesInfoNodeInfoXpackLicense security: NodesInfoNodeInfoXpackSecurity notification?: Record + ml?: NodesInfoNodeInfoXpackMl } export interface NodesInfoNodeInfoXpackLicense { @@ -16571,16 +16628,20 @@ export interface NodesInfoNodeInfoXpackLicenseType { type: string } +export interface NodesInfoNodeInfoXpackMl { + use_auto_machine_memory_percent?: boolean +} + export interface NodesInfoNodeInfoXpackSecurity { - http: NodesInfoNodeInfoXpackSecuritySsl + http?: NodesInfoNodeInfoXpackSecuritySsl enabled: string transport?: NodesInfoNodeInfoXpackSecuritySsl authc?: NodesInfoNodeInfoXpackSecurityAuthc } export interface NodesInfoNodeInfoXpackSecurityAuthc { - realms: NodesInfoNodeInfoXpackSecurityAuthcRealms - token: NodesInfoNodeInfoXpackSecurityAuthcToken + realms?: NodesInfoNodeInfoXpackSecurityAuthcRealms + token?: NodesInfoNodeInfoXpackSecurityAuthcToken } export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { @@ -16809,6 +16870,21 @@ export interface QueryRulesPutRulesetResponse { result: Result } +export interface QueryRulesTestQueryRulesetMatchedRule { + ruleset_id: Id + rule_id: Id +} + +export interface QueryRulesTestRequest extends RequestBase { + ruleset_id: Id + match_criteria: Record +} + +export interface QueryRulesTestResponse { + total_matched_rules: integer + matched_rules: QueryRulesTestQueryRulesetMatchedRule[] +} + export interface RollupDateHistogramGrouping { delay?: Duration field: Field @@ -17004,10 +17080,13 @@ export interface SearchApplicationEventDataStream { name: IndexName } -export interface SearchApplicationSearchApplication { +export interface SearchApplicationSearchApplication extends SearchApplicationSearchApplicationParameters { name: Name - indices: IndexName[] updated_at_millis: EpochTime +} + +export interface SearchApplicationSearchApplicationParameters { + indices: IndexName[] analytics_collection_name?: Name template?: SearchApplicationSearchApplicationTemplate } @@ -17048,20 +17127,13 @@ export interface SearchApplicationListRequest extends RequestBase { export interface SearchApplicationListResponse { count: long - results: SearchApplicationListSearchApplicationListItem[] -} - -export interface SearchApplicationListSearchApplicationListItem { - name: Name - indices: IndexName[] - updated_at_millis: EpochTime - analytics_collection_name?: Name + results: SearchApplicationSearchApplication[] } export interface SearchApplicationPutRequest extends RequestBase { name: Name create?: boolean - search_application?: SearchApplicationSearchApplication + search_application?: SearchApplicationSearchApplicationParameters } export interface SearchApplicationPutResponse { @@ -17217,7 +17289,7 @@ export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'crea export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity - names: Indices + names: IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean @@ -17237,7 +17309,7 @@ export interface SecurityRealmInfo { export interface SecurityRemoteIndicesPrivileges { clusters: Names field_security?: SecurityFieldSecurity - names: Indices + names: IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean @@ -17315,7 +17387,7 @@ export interface SecurityUser { export interface SecurityUserIndicesPrivileges { field_security?: SecurityFieldSecurity[] - names: Indices + names: IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery[] allow_restricted_indices: boolean @@ -17625,7 +17697,7 @@ export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { export interface SecurityGetBuiltinPrivilegesResponse { cluster: string[] - index: Indices + index: IndexName[] } export interface SecurityGetPrivilegesRequest extends RequestBase { @@ -18704,7 +18776,8 @@ export interface SnapshotRestoreRequest extends RequestBase { } export interface SnapshotRestoreResponse { - snapshot: SnapshotRestoreSnapshotRestore + accepted?: boolean + snapshot?: SnapshotRestoreSnapshotRestore } export interface SnapshotRestoreSnapshotRestore { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 7b8f18d85..385a158b7 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -5412,22 +5412,28 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { type: 'date_range' } +export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' + export interface MappingDenseVectorIndexOptions { - type: string - m?: integer - ef_construction?: integer confidence_interval?: float + ef_construction?: integer + m?: integer + type: MappingDenseVectorIndexOptionsType } +export type MappingDenseVectorIndexOptionsType = 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' + export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' - element_type?: string dims?: integer - similarity?: string + element_type?: MappingDenseVectorElementType index?: boolean index_options?: MappingDenseVectorIndexOptions + similarity?: MappingDenseVectorSimilarity } +export type MappingDenseVectorSimilarity = 'cosine' | 'dot_product' | 'l2_norm' | 'max_inner_product' + export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean } @@ -8725,18 +8731,20 @@ export interface CcrFollowRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - leader_index?: IndexName + data_stream_name?: string + leader_index: IndexName max_outstanding_read_requests?: long - max_outstanding_write_requests?: long - max_read_request_operation_count?: long - max_read_request_size?: string + max_outstanding_write_requests?: integer + max_read_request_operation_count?: integer + max_read_request_size?: ByteSize max_retry_delay?: Duration - max_write_buffer_count?: long - max_write_buffer_size?: string - max_write_request_operation_count?: long - max_write_request_size?: string + max_write_buffer_count?: integer + max_write_buffer_size?: ByteSize + max_write_request_operation_count?: integer + max_write_request_size?: ByteSize read_poll_timeout?: Duration - remote_cluster?: string + remote_cluster: string + settings?: IndicesIndexSettings } } @@ -8755,16 +8763,16 @@ export interface CcrFollowInfoFollowerIndex { } export interface CcrFollowInfoFollowerIndexParameters { - max_outstanding_read_requests: integer - max_outstanding_write_requests: integer - max_read_request_operation_count: integer - max_read_request_size: string - max_retry_delay: Duration - max_write_buffer_count: integer - max_write_buffer_size: string - max_write_request_operation_count: integer - max_write_request_size: string - read_poll_timeout: Duration + max_outstanding_read_requests?: long + max_outstanding_write_requests?: integer + max_read_request_operation_count?: integer + max_read_request_size?: ByteSize + max_retry_delay?: Duration + max_write_buffer_count?: integer + max_write_buffer_size?: ByteSize + max_write_request_operation_count?: integer + max_write_request_size?: ByteSize + read_poll_timeout?: Duration } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' @@ -9732,7 +9740,7 @@ export interface ConnectorFeatureEnabled { export interface ConnectorFilteringAdvancedSnippet { created_at?: DateTime updated_at?: DateTime - value: Record + value: any } export interface ConnectorFilteringConfig { @@ -10335,6 +10343,7 @@ export interface EqlSearchRequest extends RequestBase { fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition runtime_mappings?: MappingRuntimeFields + max_samples_per_key?: integer } } @@ -10890,6 +10899,7 @@ export interface IndicesDataStreamIndex { export interface IndicesDataStreamLifecycle { data_retention?: Duration downsampling?: IndicesDataStreamLifecycleDownsampling + enabled?: boolean } export interface IndicesDataStreamLifecycleDownsampling { @@ -10909,9 +10919,7 @@ export interface IndicesDataStreamLifecycleRolloverConditions { max_primary_shard_docs?: long } -export interface IndicesDataStreamLifecycleWithRollover { - data_retention?: Duration - downsampling?: IndicesDataStreamLifecycleDownsampling +export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { rollover?: IndicesDataStreamLifecycleRolloverConditions } @@ -11779,7 +11787,7 @@ export type IndicesGetAliasResponse = Record @@ -13013,6 +13044,15 @@ export interface IngestMaxmind { account_id: Id } +export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { + source_ip?: Field + destination_ip?: Field + target_field?: Field + internal_networks?: string[] + internal_networks_field?: Field + ignore_missing?: boolean +} + export interface IngestPipeline { description?: string on_failure?: IngestProcessorContainer[] @@ -13046,6 +13086,7 @@ export interface IngestProcessorContainer { attachment?: IngestAttachmentProcessor bytes?: IngestBytesProcessor circle?: IngestCircleProcessor + community_id?: IngestCommunityIDProcessor convert?: IngestConvertProcessor csv?: IngestCsvProcessor date?: IngestDateProcessor @@ -13055,6 +13096,7 @@ export interface IngestProcessorContainer { drop?: IngestDropProcessor enrich?: IngestEnrichProcessor fail?: IngestFailProcessor + fingerprint?: IngestFingerprintProcessor foreach?: IngestForeachProcessor geo_grid?: IngestGeoGridProcessor geoip?: IngestGeoIpProcessor @@ -13066,8 +13108,10 @@ export interface IngestProcessorContainer { json?: IngestJsonProcessor kv?: IngestKeyValueProcessor lowercase?: IngestLowercaseProcessor + network_direction?: IngestNetworkDirectionProcessor pipeline?: IngestPipelineProcessor redact?: IngestRedactProcessor + registered_domain?: IngestRegisteredDomainProcessor remove?: IngestRemoveProcessor rename?: IngestRenameProcessor reroute?: IngestRerouteProcessor @@ -13076,6 +13120,7 @@ export interface IngestProcessorContainer { set_security_user?: IngestSetSecurityUserProcessor sort?: IngestSortProcessor split?: IngestSplitProcessor + terminate?: IngestTerminateProcessor trim?: IngestTrimProcessor uppercase?: IngestUppercaseProcessor urldecode?: IngestUrlDecodeProcessor @@ -13094,6 +13139,12 @@ export interface IngestRedactProcessor extends IngestProcessorBase { trace_redact?: boolean } +export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { + field: Field + target_field?: Field + ignore_missing?: boolean +} + export interface IngestRemoveProcessor extends IngestProcessorBase { field: Fields keep?: Fields @@ -13149,6 +13200,9 @@ export interface IngestSplitProcessor extends IngestProcessorBase { target_field?: Field } +export interface IngestTerminateProcessor extends IngestProcessorBase { +} + export interface IngestTrimProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean @@ -16883,7 +16937,7 @@ export interface NodesInfoNodeInfoSettingsIngest { } export interface NodesInfoNodeInfoSettingsNetwork { - host?: Host + host?: Host | Host[] } export interface NodesInfoNodeInfoSettingsNode { @@ -16916,6 +16970,7 @@ export interface NodesInfoNodeInfoXpack { license?: NodesInfoNodeInfoXpackLicense security: NodesInfoNodeInfoXpackSecurity notification?: Record + ml?: NodesInfoNodeInfoXpackMl } export interface NodesInfoNodeInfoXpackLicense { @@ -16926,16 +16981,20 @@ export interface NodesInfoNodeInfoXpackLicenseType { type: string } +export interface NodesInfoNodeInfoXpackMl { + use_auto_machine_memory_percent?: boolean +} + export interface NodesInfoNodeInfoXpackSecurity { - http: NodesInfoNodeInfoXpackSecuritySsl + http?: NodesInfoNodeInfoXpackSecuritySsl enabled: string transport?: NodesInfoNodeInfoXpackSecuritySsl authc?: NodesInfoNodeInfoXpackSecurityAuthc } export interface NodesInfoNodeInfoXpackSecurityAuthc { - realms: NodesInfoNodeInfoXpackSecurityAuthcRealms - token: NodesInfoNodeInfoXpackSecurityAuthcToken + realms?: NodesInfoNodeInfoXpackSecurityAuthcRealms + token?: NodesInfoNodeInfoXpackSecurityAuthcToken } export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { @@ -17173,6 +17232,24 @@ export interface QueryRulesPutRulesetResponse { result: Result } +export interface QueryRulesTestQueryRulesetMatchedRule { + ruleset_id: Id + rule_id: Id +} + +export interface QueryRulesTestRequest extends RequestBase { + ruleset_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + match_criteria: Record + } +} + +export interface QueryRulesTestResponse { + total_matched_rules: integer + matched_rules: QueryRulesTestQueryRulesetMatchedRule[] +} + export interface RollupDateHistogramGrouping { delay?: Duration field: Field @@ -17374,10 +17451,13 @@ export interface SearchApplicationEventDataStream { name: IndexName } -export interface SearchApplicationSearchApplication { +export interface SearchApplicationSearchApplication extends SearchApplicationSearchApplicationParameters { name: Name - indices: IndexName[] updated_at_millis: EpochTime +} + +export interface SearchApplicationSearchApplicationParameters { + indices: IndexName[] analytics_collection_name?: Name template?: SearchApplicationSearchApplicationTemplate } @@ -17418,21 +17498,14 @@ export interface SearchApplicationListRequest extends RequestBase { export interface SearchApplicationListResponse { count: long - results: SearchApplicationListSearchApplicationListItem[] -} - -export interface SearchApplicationListSearchApplicationListItem { - name: Name - indices: IndexName[] - updated_at_millis: EpochTime - analytics_collection_name?: Name + results: SearchApplicationSearchApplication[] } export interface SearchApplicationPutRequest extends RequestBase { name: Name create?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'search_application' instead. */ - body?: SearchApplicationSearchApplication + body?: SearchApplicationSearchApplicationParameters } export interface SearchApplicationPutResponse { @@ -17594,7 +17667,7 @@ export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'crea export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity - names: Indices + names: IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean @@ -17614,7 +17687,7 @@ export interface SecurityRealmInfo { export interface SecurityRemoteIndicesPrivileges { clusters: Names field_security?: SecurityFieldSecurity - names: Indices + names: IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean @@ -17692,7 +17765,7 @@ export interface SecurityUser { export interface SecurityUserIndicesPrivileges { field_security?: SecurityFieldSecurity[] - names: Indices + names: IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery[] allow_restricted_indices: boolean @@ -18017,7 +18090,7 @@ export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { export interface SecurityGetBuiltinPrivilegesResponse { cluster: string[] - index: Indices + index: IndexName[] } export interface SecurityGetPrivilegesRequest extends RequestBase { @@ -19174,7 +19247,8 @@ export interface SnapshotRestoreRequest extends RequestBase { } export interface SnapshotRestoreResponse { - snapshot: SnapshotRestoreSnapshotRestore + accepted?: boolean + snapshot?: SnapshotRestoreSnapshotRestore } export interface SnapshotRestoreSnapshotRestore { From 97bdca22d8ad7b10b1d6d32970ffccf86d1dc637 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:00:38 -0600 Subject: [PATCH 406/647] Update actions/stale action to v9 (#2423) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 020c87722..9f982123c 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -8,7 +8,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@1160a2240286f5da8ec72b1c0816ce2481aabf84 # v8 + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9 with: stale-issue-label: stale stale-pr-label: stale From 20f2c740cdef390fffc3ad323d8c1b4e881dfacb Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:01:14 -0600 Subject: [PATCH 407/647] Update buildkite plugin junit-annotate to v2.5.0 (#2422) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .buildkite/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 95ed83af2..370d1465f 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -25,7 +25,7 @@ steps: provider: "gcp" image: family/core-ubuntu-2204 plugins: - - junit-annotate#v2.4.1: + - junit-annotate#v2.5.0: artifacts: "junit-output/junit-*.xml" job-uuid-file-pattern: "junit-(.*).xml" fail-build-on-error: true From e0c613f898a654f25da4e43b45204686afcd9322 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 4 Nov 2024 17:06:34 +0100 Subject: [PATCH 408/647] Auto-generated code for main (#2425) --- .../18de6782bd18f4a9baec2feec8c02a8b.asciidoc | 18 -------- .../2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc | 13 ------ .../49a19615ebe2c013b8321152163478ab.asciidoc | 42 +++++++++++++++++++ .../67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc | 15 ------- .../7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc | 23 ---------- docs/reference.asciidoc | 2 +- src/api/types.ts | 11 ++++- src/api/typesWithBodyKey.ts | 11 ++++- 8 files changed, 63 insertions(+), 72 deletions(-) delete mode 100644 docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc delete mode 100644 docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc create mode 100644 docs/doc_examples/49a19615ebe2c013b8321152163478ab.asciidoc delete mode 100644 docs/doc_examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc delete mode 100644 docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc diff --git a/docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc b/docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc deleted file mode 100644 index d89eb07d1..000000000 --- a/docs/doc_examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: "my-index-000002", - mappings: { - properties: { - datetime: { - type: "date", - format: "uuuu/MM/dd HH:mm:ss||uuuu/MM/dd||epoch_millis", - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc b/docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc deleted file mode 100644 index 839a4d359..000000000 --- a/docs/doc_examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putSettings({ - index: "my-index-000001", - settings: { - "index.merge.policy.max_merge_at_once_explicit": null, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/49a19615ebe2c013b8321152163478ab.asciidoc b/docs/doc_examples/49a19615ebe2c013b8321152163478ab.asciidoc new file mode 100644 index 000000000..0c497534a --- /dev/null +++ b/docs/doc_examples/49a19615ebe2c013b8321152163478ab.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "quick brown fox", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + text: "quick fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + match: { + text: "quick brown fox", + }, + }, + script: { + source: "_termStats.termFreq().getAverage()", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc b/docs/doc_examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc deleted file mode 100644 index c60551fda..000000000 --- a/docs/doc_examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - source: { - index: "my-index-000001", - }, - dest: { - index: "my-index-000002", - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc b/docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc deleted file mode 100644 index ab73f60fd..000000000 --- a/docs/doc_examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - actions: [ - { - remove: { - index: "my-index-000001", - alias: "my-index", - }, - }, - { - add: { - index: "my-index-000002", - alias: "my-index", - }, - }, - ], -}); -console.log(response); ----- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 42e6df856..afd85fe33 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -1014,7 +1014,7 @@ A post filter has no impact on the aggregation results. NOTE: This is a debugging tool and adds significant overhead to search execution. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. -** *`retriever` (Optional, { standard, knn, rrf })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. +** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. ** *`size` (Optional, number)*: The number of hits to return. diff --git a/src/api/types.ts b/src/api/types.ts index 8087c1609..6f881c4df 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2628,12 +2628,14 @@ export interface Retries { export interface RetrieverBase { filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + min_score?: float } export interface RetrieverContainer { standard?: StandardRetriever knn?: KnnRetriever rrf?: RRFRetriever + text_similarity_reranker?: TextSimilarityReranker } export type Routing = string @@ -2798,7 +2800,6 @@ export interface StandardRetriever extends RetrieverBase { search_after?: SortResults terminate_after?: integer sort?: Sort - min_score?: float collapse?: SearchFieldCollapse } @@ -2835,6 +2836,14 @@ export interface TextEmbedding { model_text: string } +export interface TextSimilarityReranker extends RetrieverBase { + retriever: RetrieverContainer + rank_window_size?: integer + inference_id?: string + inference_text?: string + field?: string +} + export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' export type TimeOfDay = string diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 385a158b7..ed87499a7 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2704,12 +2704,14 @@ export interface Retries { export interface RetrieverBase { filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + min_score?: float } export interface RetrieverContainer { standard?: StandardRetriever knn?: KnnRetriever rrf?: RRFRetriever + text_similarity_reranker?: TextSimilarityReranker } export type Routing = string @@ -2874,7 +2876,6 @@ export interface StandardRetriever extends RetrieverBase { search_after?: SortResults terminate_after?: integer sort?: Sort - min_score?: float collapse?: SearchFieldCollapse } @@ -2911,6 +2912,14 @@ export interface TextEmbedding { model_text: string } +export interface TextSimilarityReranker extends RetrieverBase { + retriever: RetrieverContainer + rank_window_size?: integer + inference_id?: string + inference_text?: string + field?: string +} + export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' export type TimeOfDay = string From 11939fd22cdecef4649877bf77eb6e27be18b028 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 4 Nov 2024 15:47:53 -0600 Subject: [PATCH 409/647] Add streaming support to Arrow helper (#2407) --- docs/changelog.asciidoc | 5 ++ docs/connecting.asciidoc | 121 ++++++++++++++++++------------- docs/helpers.asciidoc | 109 +++++++++++++++++++--------- docs/transport.asciidoc | 43 +++++++++-- package.json | 4 +- src/helpers.ts | 20 +++++- test/unit/helpers/esql.test.ts | 128 +++++++++++++++++++++++++++++++-- 7 files changed, 328 insertions(+), 102 deletions(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 734916a27..e2fa3d194 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -13,6 +13,11 @@ You can find all the API changes https://www.elastic.co/guide/en/elasticsearch/reference/8.16/release-notes-8.16.0.html[here]. +[discrete] +===== Support Apache Arrow in ES|QL helper + +The ES|QL helper can now return results as an Apache Arrow `Table` or `RecordBatchReader`, which enables high-performance calculations on ES|QL results, even if the response data is larger than the system's available memory. See <> for more information. + [discrete] ==== Fixes diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 15007ceb3..4646ee5f1 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -1,7 +1,7 @@ [[client-connecting]] -== Connecting +== Connecting -This page contains the information you need to connect and use the Client with +This page contains the information you need to connect and use the Client with {es}. **On this page** @@ -19,7 +19,7 @@ This page contains the information you need to connect and use the Client with [discrete] === Authentication -This document contains code snippets to show you how to connect to various {es} +This document contains code snippets to show you how to connect to various {es} providers. @@ -27,18 +27,18 @@ providers. [[auth-ec]] ==== Elastic Cloud -If you are using https://www.elastic.co/cloud[Elastic Cloud], the client offers -an easy way to connect to it via the `cloud` option. You must pass the Cloud ID -that you can find in the cloud console, then your username and password inside +If you are using https://www.elastic.co/cloud[Elastic Cloud], the client offers +an easy way to connect to it via the `cloud` option. You must pass the Cloud ID +that you can find in the cloud console, then your username and password inside the `auth` option. -NOTE: When connecting to Elastic Cloud, the client will automatically enable -both request and response compression by default, since it yields significant -throughput improvements. Moreover, the client will also set the tls option -`secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still +NOTE: When connecting to Elastic Cloud, the client will automatically enable +both request and response compression by default, since it yields significant +throughput improvements. Moreover, the client will also set the tls option +`secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still override this option by configuring them. -IMPORTANT: Do not enable sniffing when using Elastic Cloud, since the nodes are +IMPORTANT: Do not enable sniffing when using Elastic Cloud, since the nodes are behind a load balancer, Elastic Cloud will take care of everything for you. Take a look https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[here] to know more. @@ -61,18 +61,18 @@ const client = new Client({ [[connect-self-managed-new]] === Connecting to a self-managed cluster -By default {es} will start with security features like authentication and TLS -enabled. To connect to the {es} cluster you'll need to configure the Node.js {es} -client to use HTTPS with the generated CA certificate in order to make requests +By default {es} will start with security features like authentication and TLS +enabled. To connect to the {es} cluster you'll need to configure the Node.js {es} +client to use HTTPS with the generated CA certificate in order to make requests successfully. -If you're just getting started with {es} we recommend reading the documentation -on https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html[configuring] -and -https://www.elastic.co/guide/en/elasticsearch/reference/current/starting-elasticsearch.html[starting {es}] +If you're just getting started with {es} we recommend reading the documentation +on https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html[configuring] +and +https://www.elastic.co/guide/en/elasticsearch/reference/current/starting-elasticsearch.html[starting {es}] to ensure your cluster is running as expected. -When you start {es} for the first time you'll see a distinct block like the one +When you start {es} for the first time you'll see a distinct block like the one below in the output from {es} (you may have to scroll up if it's been a while): [source,sh] @@ -90,24 +90,24 @@ below in the output from {es} (you may have to scroll up if it's been a while): ---- -Depending on the circumstances there are two options for verifying the HTTPS -connection, either verifying with the CA certificate itself or via the HTTP CA +Depending on the circumstances there are two options for verifying the HTTPS +connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint. [discrete] [[auth-tls]] ==== TLS configuration -The generated root CA certificate can be found in the `certs` directory in your -{es} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you're running {es} -in Docker there is +The generated root CA certificate can be found in the `certs` directory in your +{es} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you're running {es} +in Docker there is https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html[additional documentation for retrieving the CA certificate]. -Without any additional configuration you can specify `https://` node urls, and -the certificates used to sign these requests will be verified. To turn off -certificate verification, you must specify an `tls` object in the top level -config and set `rejectUnauthorized: false`. The default `tls` values are the -same that Node.js's https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[`tls.connect()`] +Without any additional configuration you can specify `https://` node urls, and +the certificates used to sign these requests will be verified. To turn off +certificate verification, you must specify an `tls` object in the top level +config and set `rejectUnauthorized: false`. The default `tls` values are the +same that Node.js's https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[`tls.connect()`] uses. [source,js] @@ -152,7 +152,7 @@ const client = new Client({ }) ---- -The certificate fingerprint can be calculated using `openssl x509` with the +The certificate fingerprint can be calculated using `openssl x509` with the certificate file: [source,sh] @@ -160,8 +160,8 @@ certificate file: openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt ---- -If you don't have access to the generated CA file from {es} you can use the -following script to output the root CA fingerprint of the {es} instance with +If you don't have access to the generated CA file from {es} you can use the +following script to output the root CA fingerprint of the {es} instance with `openssl s_client`: [source,sh] @@ -186,8 +186,8 @@ SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:2 WARNING: Running {es} without security enabled is not recommended. -If your cluster is configured with -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html[security explicitly disabled] +If your cluster is configured with +https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html[security explicitly disabled] then you can connect via HTTP: [source,js] @@ -208,14 +208,14 @@ Following you can find all the supported authentication strategies. [[auth-apikey]] ==== ApiKey authentication -You can use the -{ref-7x}/security-api-create-api-key.html[ApiKey] -authentication by passing the `apiKey` parameter via the `auth` option. The -`apiKey` parameter can be either a base64 encoded string or an object with the -values that you can obtain from the +You can use the +{ref-7x}/security-api-create-api-key.html[ApiKey] +authentication by passing the `apiKey` parameter via the `auth` option. The +`apiKey` parameter can be either a base64 encoded string or an object with the +values that you can obtain from the {ref-7x}/security-api-create-api-key.html[create api key endpoint]. -NOTE: If you provide both basic authentication credentials and the ApiKey +NOTE: If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence. [source,js] @@ -268,10 +268,10 @@ const client = new Client({ [[auth-basic]] ==== Basic authentication -You can provide your credentials by passing the `username` and `password` +You can provide your credentials by passing the `username` and `password` parameters via the `auth` option. -NOTE: If you provide both basic authentication credentials and the Api Key +NOTE: If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence. [source,js] @@ -342,14 +342,14 @@ const result = await client.search({ }, { meta: true }) ---- -In this case, the result will be: +In this case, the result will be: [source,ts] ---- { body: object | boolean statusCode: number headers: object - warnings: [string], + warnings: string[], meta: object } ---- @@ -361,7 +361,7 @@ NOTE: The body is a boolean value when you use `HEAD` APIs. If needed, you can abort a running request by using the `AbortController` standard. -CAUTION: If you abort a request, the request will fail with a +CAUTION: If you abort a request, the request will fail with a `RequestAbortedError`. @@ -410,19 +410,23 @@ The supported request specific options are: [cols=2*] |=== |`ignore` -|`[number]` -  HTTP status codes which should not be considered errors for this request. + +|`number[]` -  HTTP status codes which should not be considered errors for this request. + _Default:_ `null` |`requestTimeout` -|`number` - Max request timeout for the request in milliseconds, it overrides the client default. + +|`number | string` - Max request timeout for the request in milliseconds, it overrides the client default. + _Default:_ `30000` +|`retryOnTimeout` +|`boolean` - Retry requests that have timed out. +_Default:_ `false` + |`maxRetries` |`number` - Max number of retries for the request, it overrides the client default. + _Default:_ `3` |`compression` -|`string, boolean` - Enables body compression for the request. + +|`string | boolean` - Enables body compression for the request. + _Options:_ `false`, `'gzip'` + _Default:_ `false` @@ -446,6 +450,10 @@ _Default:_ `null` |`any` - Custom object per request. _(you can use it to pass data to the clients events)_ + _Default:_ `null` +|`opaqueId` +|`string` - Set the `X-Opaque-Id` HTTP header. See {ref}/api-conventions.html#x-opaque-id +_Default:_ `null` + |`maxResponseSize` |`number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH + _Default:_ `null` @@ -458,6 +466,17 @@ _Default:_ `null` |`AbortSignal` - The AbortSignal instance to allow request abortion. + _Default:_ `null` +|`meta` +|`boolean` - Rather than returning the body, return an object containing `body`, `statusCode`, `headers` and `meta` keys + +_Default_: `false` + +|`redaction` +|`object` - Options for redacting potentially sensitive data from error metadata. See <>. + +|`retryBackoff` +|`(min: number, max: number, attempt: number) => number;` - A function that calculates how long to sleep, in seconds, before the next request retry + +_Default:_ A built-in function that uses exponential backoff with jitter. + |=== [discrete] @@ -537,8 +556,8 @@ Resources used to assess these recommendations: ~Added~ ~in~ ~`v7.10.0`~ -If you need to pass through an http(s) proxy for connecting to {es}, the client -out of the box offers a handy configuration for helping you with it. Under the +If you need to pass through an http(s) proxy for connecting to {es}, the client +out of the box offers a handy configuration for helping you with it. Under the hood, it uses the https://github.com/delvedor/hpagent[`hpagent`] module. IMPORTANT: In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. @@ -715,5 +734,5 @@ This pre-flight product check allows the client to establish the version of Elas that it is communicating with. The product check requires one additional HTTP request to be sent to the server as part of the request pipeline before the main API call is sent. In most cases, this will succeed during the very first API call that the client sends. -Once the product check completes, no further product check HTTP requests are sent for +Once the product check completes, no further product check HTTP requests are sent for subsequent API calls. diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index fa8394a9d..cb60dbc51 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -1,10 +1,10 @@ [[client-helpers]] == Client helpers -The client comes with an handy collection of helpers to give you a more +The client comes with an handy collection of helpers to give you a more comfortable experience with some APIs. -CAUTION: The client helpers are experimental, and the API may change in the next +CAUTION: The client helpers are experimental, and the API may change in the next minor releases. The helpers will not work in any Node.js version lower than 10. @@ -14,7 +14,7 @@ minor releases. The helpers will not work in any Node.js version lower than 10. ~Added~ ~in~ ~`v7.7.0`~ -Running bulk requests can be complex due to the shape of the API, this helper +Running bulk requests can be complex due to the shape of the API, this helper aims to provide a nicer developer experience around the Bulk API. @@ -52,7 +52,7 @@ console.log(result) // } ---- -To create a new instance of the Bulk helper, access it as shown in the example +To create a new instance of the Bulk helper, access it as shown in the example above, the configuration options are: [cols=2*] |=== @@ -83,7 +83,7 @@ const b = client.helpers.bulk({ return { index: { _index: 'my-index' } } - } + } }) ---- @@ -94,7 +94,7 @@ a|A function that is called for everytime a document can't be indexed and it has const b = client.helpers.bulk({ onDrop (doc) { console.log(doc) - } + } }) ---- @@ -105,7 +105,7 @@ a|A function that is called for each successful operation in the bulk request, w const b = client.helpers.bulk({ onSuccess ({ result, document }) { console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`) - } + } }) ---- @@ -249,11 +249,11 @@ client.helpers.bulk({ [discrete] ==== Abort a bulk operation -If needed, you can abort a bulk operation at any time. The bulk helper returns a +If needed, you can abort a bulk operation at any time. The bulk helper returns a https://promisesaplus.com/[thenable], which has an `abort` method. -NOTE: The abort method stops the execution of the bulk operation, but if you -are using a concurrency higher than one, the operations that are already running +NOTE: The abort method stops the execution of the bulk operation, but if you +are using a concurrency higher than one, the operations that are already running will not be stopped. [source,js] @@ -275,7 +275,7 @@ const b = client.helpers.bulk({ }, onDrop (doc) { b.abort() - } + } }) console.log(await b) @@ -285,8 +285,8 @@ console.log(await b) [discrete] ==== Passing custom options to the Bulk API -You can pass any option supported by the link: -{ref}/docs-bulk.html#docs-bulk-api-query-params[Bulk API] to the helper, and the +You can pass any option supported by the link: +{ref}/docs-bulk.html#docs-bulk-api-query-params[Bulk API] to the helper, and the helper uses those options in conjunction with the Bulk API call. [source,js] @@ -371,10 +371,10 @@ console.log(result) ~Added~ ~in~ ~`v7.8.0`~ -If you send search request at a high rate, this helper might be useful -for you. It uses the multi search API under the hood to batch the requests -and improve the overall performances of your application. The `result` exposes a -`documents` property as well, which allows you to access directly the hits +If you send search request at a high rate, this helper might be useful +for you. It uses the multi search API under the hood to batch the requests +and improve the overall performances of your application. The `result` exposes a +`documents` property as well, which allows you to access directly the hits sources. @@ -399,7 +399,7 @@ m.search( .catch(err => console.error(err)) ---- -To create a new instance of the multi search (msearch) helper, you should access +To create a new instance of the multi search (msearch) helper, you should access it as shown in the example above, the configuration options are: [cols=2*] |=== @@ -459,18 +459,18 @@ const m = client.helpers.msearch({ [discrete] ==== Stopping the msearch helper -If needed, you can stop an msearch processor at any time. The msearch helper +If needed, you can stop an msearch processor at any time. The msearch helper returns a https://promisesaplus.com/[thenable], which has an `stop` method. -If you are creating multiple msearch helpers instances and using them for a -limitied period of time, remember to always use the `stop` method once you have +If you are creating multiple msearch helpers instances and using them for a +limitied period of time, remember to always use the `stop` method once you have finished using them, otherwise your application will start leaking memory. -The `stop` method accepts an optional error, that will be dispatched every +The `stop` method accepts an optional error, that will be dispatched every subsequent search request. -NOTE: The stop method stops the execution of the msearch processor, but if -you are using a concurrency higher than one, the operations that are already +NOTE: The stop method stops the execution of the msearch processor, but if +you are using a concurrency higher than one, the operations that are already running will not be stopped. [source,js] @@ -507,9 +507,9 @@ setImmediate(() => m.stop()) ~Added~ ~in~ ~`v7.7.0`~ -A simple wrapper around the search API. Instead of returning the entire `result` -object it returns only the search documents source. For improving the -performances, this helper automatically adds `filter_path=hits.hits._source` to +A simple wrapper around the search API. Instead of returning the entire `result` +object it returns only the search documents source. For improving the +performances, this helper automatically adds `filter_path=hits.hits._source` to the query string. [source,js] @@ -535,10 +535,10 @@ for (const doc of documents) { ~Added~ ~in~ ~`v7.7.0`~ -This helpers offers a simple and intuitive way to use the scroll search API. -Once called, it returns an -https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function[async iterator] -which can be used in conjuction with a for-await...of. It handles automatically +This helpers offers a simple and intuitive way to use the scroll search API. +Once called, it returns an +https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function[async iterator] +which can be used in conjuction with a for-await...of. It handles automatically the `429` error and uses the `maxRetries` option of the client. [source,js] @@ -576,7 +576,7 @@ for await (const result of scrollSearch) { [discrete] ==== Quickly getting the documents -If you only need the documents from the result of a scroll search, you can +If you only need the documents from the result of a scroll search, you can access them via `result.documents`: [source,js] @@ -593,9 +593,9 @@ for await (const result of scrollSearch) { ~Added~ ~in~ ~`v7.7.0`~ -It works in the same way as the scroll search helper, but it returns only the -documents instead. Note, every loop cycle returns a single document, and you -can't use the `clear` method. For improving the performances, this helper +It works in the same way as the scroll search helper, but it returns only the +documents instead. Note, every loop cycle returns a single document, and you +can't use the `clear` method. For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the query string. [source,js] @@ -707,3 +707,42 @@ const result = await client.helpers .esql({ query: 'FROM sample_data | LIMIT 2' }) .toRecords() ---- + +[discrete] +===== `toArrowReader` + +~Added~ ~in~ ~`v8.16.0`~ + +ES|QL can return results in multiple binary formats, including https://arrow.apache.org/[Apache Arrow]'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets. + +`toArrowReader` returns a https://arrow.apache.org/docs/js/classes/Arrow_dom.RecordBatchReader.html[`RecordBatchStreamReader`]. + +[source,ts] +---- +const reader = await client.helpers + .esql({ query: 'FROM sample_data' }) + .toArrowReader() + +// print each record as JSON +for (const recordBatch of reader) { + for (const record of recordBatch) { + console.log(record.toJSON()) + } +} +---- + +[discrete] +===== `toArrowTable` + +~Added~ ~in~ ~`v8.16.0`~ + +If you would like to pull the entire data set in Arrow format but without streaming, you can use the `toArrowTable` helper to get a https://arrow.apache.org/docs/js/classes/Arrow_dom.Table.html[Table] back instead. + +[source,ts] +---- +const table = await client.helpers + .esql({ query: 'FROM sample_data' }) + .toArrowTable() + +console.log(table.toArray()) +---- diff --git a/docs/transport.asciidoc b/docs/transport.asciidoc index 5096616ea..d32606b63 100644 --- a/docs/transport.asciidoc +++ b/docs/transport.asciidoc @@ -1,7 +1,7 @@ [[transport]] === Transport -This class is responsible for performing the request to {es} and handling +This class is responsible for performing the request to {es} and handling errors, it also handles sniffing. [source,js] @@ -20,7 +20,7 @@ const client = new Client({ }) ---- -Sometimes you need to inject a small snippet of your code and then continue to +Sometimes you need to inject a small snippet of your code and then continue to use the usual client code. In such cases, call `super.method`: [source,js] @@ -35,8 +35,39 @@ class MyTransport extends Transport { ==== Supported content types -- `application/json`, in this case the transport will return a plain JavaScript object -- `text/plain`, in this case the transport will return a plain string -- `application/vnd.mapbox-vector-tile`, in this case the transport will return a Buffer -- `application/vnd.elasticsearch+json`, in this case the transport will return a plain JavaScript object +Depending on the `content-type` of the response, the transport will return the body as different types: +[cols="1,1"] +|=== +|Content-Type |JavaScript type + +|`application/json` +|`object` + +|`text/plain` +|`string` + +|`application/vnd.elasticsearch+json` +|`object` + +|`application/vnd.mapbox-vector-tile` +|`Buffer` + +|`application/vnd.apache.arrow.stream` +|`Buffer` + +|`application/vnd.elasticsearch+arrow+stream` +|`Buffer` + +|`application/smile` +|`Buffer` + +|`application/vnd.elasticsearch+smile` +|`Buffer` + +|`application/cbor` +|`Buffer` + +|`application/vnd.elasticsearch+cbor` +|`Buffer` +|=== diff --git a/package.json b/package.json index 4a37f14fe..741c6316a 100644 --- a/package.json +++ b/package.json @@ -87,8 +87,8 @@ "zx": "7.2.3" }, "dependencies": { - "@elastic/transport": "^8.9.0", - "@apache-arrow/esnext-cjs": "^17.0.0", + "@elastic/transport": "^8.9.1", + "apache-arrow": "^18.0.0", "tslib": "^2.4.0" }, "tap": { diff --git a/src/helpers.ts b/src/helpers.ts index a54ee0964..16af051b6 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -25,7 +25,7 @@ import assert from 'node:assert' import * as timersPromises from 'node:timers/promises' import { Readable } from 'node:stream' import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' -import { Table, TypeMap, tableFromIPC } from '@apache-arrow/esnext-cjs' +import { Table, TypeMap, tableFromIPC, RecordBatchStreamReader } from 'apache-arrow/Arrow.node' import Client from './client' import * as T from './api/types' @@ -156,7 +156,8 @@ export interface EsqlResponse { export interface EsqlHelper { toRecords: () => Promise> - toArrow: () => Promise> + toArrowTable: () => Promise> + toArrowReader: () => Promise } export interface EsqlToRecords { @@ -1003,7 +1004,7 @@ export default class Helpers { return { records, columns } }, - async toArrow (): Promise> { + async toArrowTable (): Promise> { if (metaHeader !== null) { reqOptions.headers = reqOptions.headers ?? {} reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa` @@ -1013,6 +1014,19 @@ export default class Helpers { const response = await client.esql.query(params, reqOptions) return tableFromIPC(response) + }, + + async toArrowReader (): Promise { + if (metaHeader !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa` + reqOptions.asStream = true + } + + params.format = 'arrow' + + const response = await client.esql.query(params, reqOptions) + return RecordBatchStreamReader.from(response) } } diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts index 3685b7c53..c91e3cb03 100644 --- a/test/unit/helpers/esql.test.ts +++ b/test/unit/helpers/esql.test.ts @@ -18,7 +18,7 @@ */ import { test } from 'tap' -import { Table } from '@apache-arrow/esnext-cjs' +import * as arrow from 'apache-arrow' import { connection } from '../../utils' import { Client } from '../../../' @@ -111,7 +111,7 @@ test('ES|QL helper', t => { t.end() }) - test('toArrow', t => { + test('toArrowTable', t => { t.test('Parses a binary response into an Arrow table', async t => { const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' @@ -132,8 +132,8 @@ test('ES|QL helper', t => { Connection: MockConnection }) - const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrow() - t.ok(result instanceof Table) + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowTable() + t.ok(result instanceof arrow.Table) const table = [...result] t.same(table[0], [ @@ -165,7 +165,125 @@ test('ES|QL helper', t => { Connection: MockConnection }) - await client.helpers.esql({ query: 'FROM sample_data' }).toArrow() + await client.helpers.esql({ query: 'FROM sample_data' }).toArrowTable() + t.end() + }) + + t.end() + }) + + test('toArrowReader', t => { + t.test('Parses a binary response into an Arrow stream reader', async t => { + const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: Buffer.from(binaryContent, 'base64'), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() + t.ok(result.isStream()) + + const recordBatch = result.next().value + t.same(recordBatch.get(0)?.toJSON(), { + amount: 4.900000095367432, + date: 1729532586965, + }) + t.end() + }) + + t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { + const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + const header = params.headers?.['x-elastic-client-meta'] ?? '' + t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`) + return { + body: Buffer.from(binaryContent, 'base64'), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() + t.end() + }) + + t.test('multi-batch support', async t => { + const intType = new arrow.Uint32 + const floatType = new arrow.Float32 + const schema = new arrow.Schema([ + arrow.Field.new('id', intType), + arrow.Field.new('val', floatType) + ]) + + function getBatch(ids: number[], vals: number[]) { + const id = arrow.makeData({ type: intType, data: ids }) + const val = arrow.makeData({ type: floatType, data: vals }) + return new arrow.RecordBatch({ id, val }) + } + + const batch1 = getBatch([1, 2, 3], [0.1, 0.2, 0.3]) + const batch2 = getBatch([4, 5, 6], [0.4, 0.5, 0.6]) + const batch3 = getBatch([7, 8, 9], [0.7, 0.8, 0.9]) + + const table = new arrow.Table(schema, [ + new arrow.RecordBatch(schema, batch1.data), + new arrow.RecordBatch(schema, batch2.data), + new arrow.RecordBatch(schema, batch3.data), + ]) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: Buffer.from(arrow.tableToIPC(table, "stream")), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() + t.ok(result.isStream()) + + let counter = 0 + for (const batch of result) { + for (const row of batch) { + counter++ + const { id, val } = row.toJSON() + t.equal(id, counter) + // floating points are hard in JS + t.equal((Math.round(val * 10) / 10).toFixed(1), (counter * 0.1).toFixed(1)) + } + } t.end() }) From edb5563bf813f8a69481a772c554fc066bae3878 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:21:49 +0000 Subject: [PATCH 410/647] Update dependency @types/node to v18.19.64 (#2421) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 741c6316a..cddcdfec7 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "18.19.59", + "@types/node": "18.19.64", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "3.2.1", "@types/stoppable": "1.1.3", From 2455dac4e5b1730c97d0fe4266612c88fc11e088 Mon Sep 17 00:00:00 2001 From: Rami <72725910+ramikg@users.noreply.github.com> Date: Wed, 6 Nov 2024 20:20:10 +0200 Subject: [PATCH 411/647] Add `_id` to the result of `helpers.search` (#2432) --- src/helpers.ts | 13 ++++++++++--- test/unit/helpers/search.test.ts | 30 +++++++++++++++--------------- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/helpers.ts b/src/helpers.ts index 16af051b6..10fda499a 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -28,6 +28,7 @@ import { errors, TransportResult, TransportRequestOptions, TransportRequestOptio import { Table, TypeMap, tableFromIPC, RecordBatchStreamReader } from 'apache-arrow/Arrow.node' import Client from './client' import * as T from './api/types' +import { Id } from './api/types' export interface HelpersOptions { client: Client @@ -193,12 +194,18 @@ export default class Helpers { * @param {object} options - The client optional configuration for this request. * @return {array} The documents that matched the request. */ - async search (params: T.SearchRequest, options: TransportRequestOptions = {}): Promise { - appendFilterPath('hits.hits._source', params, true) + async search (params: T.SearchRequest, options: TransportRequestOptions = {}): Promise> { + appendFilterPath('hits.hits._id,hits.hits._source', params, true) options.meta = true const { body: result } = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) if (result.hits?.hits != null) { - return result.hits.hits.map(d => d._source as TDocument) + return result.hits.hits.map(d => ({ + // Starting with version 8.14.0, _id is optional, but in our case it's always present. + // See @es_quirk documentation in elasticsearch-specification/specification/_global/search/_types/hits.ts + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + _id: d._id!, + ...(d._source as TDocument) + })) } return [] } diff --git a/test/unit/helpers/search.test.ts b/test/unit/helpers/search.test.ts index 9ed4605ab..e318571a8 100644 --- a/test/unit/helpers/search.test.ts +++ b/test/unit/helpers/search.test.ts @@ -24,14 +24,14 @@ import { connection } from '../../utils' test('Search should have an additional documents property', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.equal(params.querystring, 'filter_path=hits.hits._source') + t.equal(params.querystring, 'filter_path=hits.hits._id%2Chits.hits._source') return { body: { hits: { hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } + { _id: '1', _source: { one: 'one' } }, + { _id: '2', _source: { two: 'two' } }, + { _id: '3', _source: { three: 'three' } } ] } } @@ -49,16 +49,16 @@ test('Search should have an additional documents property', async t => { query: { match_all: {} } }) t.same(result, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } + { _id: '1', one: 'one' }, + { _id: '2', two: 'two' }, + { _id: '3', three: 'three' } ]) }) test('kGetHits fallback', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.equal(params.querystring, 'filter_path=hits.hits._source') + t.equal(params.querystring, 'filter_path=hits.hits._id%2Chits.hits._source') return { body: {} } } }) @@ -78,14 +78,14 @@ test('kGetHits fallback', async t => { test('Merge filter paths (snake_case)', async t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { - t.equal(params.querystring, 'filter_path=foo%2Chits.hits._source') + t.equal(params.querystring, 'filter_path=foo%2Chits.hits._id%2Chits.hits._source') return { body: { hits: { hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } + { _id: '1', _source: { one: 'one' } }, + { _id: '2', _source: { two: 'two' } }, + { _id: '3', _source: { three: 'three' } } ] } } @@ -104,9 +104,9 @@ test('Merge filter paths (snake_case)', async t => { query: { match_all: {} } }) t.same(result, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } + { _id: '1', one: 'one' }, + { _id: '2', two: 'two' }, + { _id: '3', three: 'three' } ]) }) From 7bcd75bdb09d562162c1f5d62a8204feff11c2c9 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 11 Nov 2024 09:47:00 -0600 Subject: [PATCH 412/647] Add changelog for 8.15.2 (#2444) --- docs/changelog.asciidoc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index e2fa3d194..0f45de6c6 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -26,6 +26,17 @@ The ES|QL helper can now return results as an Apache Arrow `Table` or `RecordBat The client's `disablePrototypePoisoningProtection` option was set to `true` by default, but when it was set to any other value it was ignored, making it impossible to enable prototype poisoning protection without providing a custom serializer implementation. +[discrete] +=== 8.15.2 + +[discrete] +==== Features + +[discrete] +===== Improved support for Elasticsearch `v8.15` + +Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. + [discrete] === 8.15.1 From 2b2a2f03e67acc0c528857bb165206b04c5d57e6 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 11 Nov 2024 18:05:18 +0100 Subject: [PATCH 413/647] Auto-generated code for main (#2439) --- .../15f769bbd7b5fddeb3353ae726b71b14.asciidoc | 28 +++ .../2e7844477b41fcfa9efefee4ec0e7101.asciidoc | 43 ++++ .../339c4e5af9f9069ad9912aa574488b59.asciidoc | 52 ++++ .../44198781d164a15be633d4469485a544.asciidoc | 23 ++ .../828f0045747fde4888a947bb99e190e3.asciidoc | 27 ++ .../853fc710cea79fb4e1a85fb6d149f9c5.asciidoc | 42 ++++ ...acc44366a9908684b2c8c2b119a4fb2b.asciidoc} | 12 +- docs/reference.asciidoc | 237 +++++++++++++++--- src/api/api/async_search.ts | 2 +- src/api/api/autoscaling.ts | 8 +- src/api/api/clear_scroll.ts | 2 +- src/api/api/close_point_in_time.ts | 2 +- src/api/api/field_caps.ts | 2 +- src/api/api/knn_search.ts | 2 +- src/api/api/msearch_template.ts | 2 +- src/api/api/open_point_in_time.ts | 2 +- src/api/api/rank_eval.ts | 2 +- src/api/api/render_search_template.ts | 2 +- src/api/api/search.ts | 2 +- src/api/api/search_mvt.ts | 2 +- src/api/api/search_shards.ts | 2 +- src/api/api/search_template.ts | 2 +- src/api/api/security.ts | 56 +++-- src/api/api/termvectors.ts | 2 +- src/api/types.ts | 78 +++++- src/api/typesWithBodyKey.ts | 84 ++++++- 26 files changed, 625 insertions(+), 93 deletions(-) create mode 100644 docs/doc_examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc create mode 100644 docs/doc_examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc create mode 100644 docs/doc_examples/339c4e5af9f9069ad9912aa574488b59.asciidoc create mode 100644 docs/doc_examples/44198781d164a15be633d4469485a544.asciidoc create mode 100644 docs/doc_examples/828f0045747fde4888a947bb99e190e3.asciidoc create mode 100644 docs/doc_examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc rename docs/doc_examples/{8f0a3d7b5fbdf5351750a23c493cc078.asciidoc => acc44366a9908684b2c8c2b119a4fb2b.asciidoc} (71%) diff --git a/docs/doc_examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc b/docs/doc_examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc new file mode 100644 index 000000000..0db865fe6 --- /dev/null +++ b/docs/doc_examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-bit-vectors", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "dotProduct(params.query_vector, 'my_dense_vector')", + params: { + query_vector: [ + 0.23, 1.45, 3.67, 4.89, -0.56, 2.34, 3.21, 1.78, -2.45, 0.98, -0.12, + 3.45, 4.56, 2.78, 1.23, 0.67, 3.89, 4.12, -2.34, 1.56, 0.78, 3.21, + 4.12, 2.45, -1.67, 0.34, -3.45, 4.56, -2.78, 1.23, -0.67, 3.89, + -4.34, 2.12, -1.56, 0.78, -3.21, 4.45, 2.12, 1.67, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc b/docs/doc_examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc new file mode 100644 index 000000000..e19fb83da --- /dev/null +++ b/docs/doc_examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + retriever: { + rule: { + match_criteria: { + query_string: "puggles", + user_country: "us", + }, + ruleset_ids: ["my-ruleset"], + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "pugs", + }, + }, + }, + }, + { + standard: { + query: { + query_string: { + query: "puggles", + }, + }, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/339c4e5af9f9069ad9912aa574488b59.asciidoc b/docs/doc_examples/339c4e5af9f9069ad9912aa574488b59.asciidoc new file mode 100644 index 000000000..905e21821 --- /dev/null +++ b/docs/doc_examples/339c4e5af9f9069ad9912aa574488b59.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-bit-vectors", + mappings: { + properties: { + my_dense_vector: { + type: "dense_vector", + index: false, + element_type: "bit", + dims: 40, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-bit-vectors", + id: 1, + document: { + my_dense_vector: [8, 5, -15, 1, -7], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-bit-vectors", + id: 2, + document: { + my_dense_vector: [-1, 115, -3, 4, -128], + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-bit-vectors", + id: 3, + document: { + my_dense_vector: [2, 18, -5, 0, -124], + }, +}); +console.log(response3); + +const response4 = await client.indices.refresh({ + index: "my-index-bit-vectors", +}); +console.log(response4); +---- diff --git a/docs/doc_examples/44198781d164a15be633d4469485a544.asciidoc b/docs/doc_examples/44198781d164a15be633d4469485a544.asciidoc new file mode 100644 index 000000000..f3125224a --- /dev/null +++ b/docs/doc_examples/44198781d164a15be633d4469485a544.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-bit-vectors", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "dotProduct(params.query_vector, 'my_dense_vector')", + params: { + query_vector: [8, 5, -15, 1, -7], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/828f0045747fde4888a947bb99e190e3.asciidoc b/docs/doc_examples/828f0045747fde4888a947bb99e190e3.asciidoc new file mode 100644 index 000000000..a4e4969f9 --- /dev/null +++ b/docs/doc_examples/828f0045747fde4888a947bb99e190e3.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + rule: { + match_criteria: { + query_string: "harry potter", + }, + ruleset_ids: ["my-ruleset"], + retriever: { + standard: { + query: { + query_string: { + query: "harry potter", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc b/docs/doc_examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc new file mode 100644 index 000000000..0ca3674f6 --- /dev/null +++ b/docs/doc_examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + rule: { + match_criteria: { + query_string: "harry potter", + }, + ruleset_ids: ["my-ruleset"], + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "sorcerer's stone", + }, + }, + }, + }, + { + standard: { + query: { + query_string: { + query: "chamber of secrets", + }, + }, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc b/docs/doc_examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc similarity index 71% rename from docs/doc_examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc rename to docs/doc_examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc index 8f14d2f77..2dcd961c6 100644 --- a/docs/doc_examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc +++ b/docs/doc_examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc @@ -5,11 +5,15 @@ ---- const response = await client.search({ index: "my-index-000001", - query: { + retriever: { rule: { - organic: { - query_string: { - query: "puggles", + retriever: { + standard: { + query: { + query_string: { + query: "puggles", + }, + }, }, }, match_criteria: { diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index afd85fe33..403af082d 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -58,7 +58,9 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] === clear_scroll -Clears the search context and results for a scrolling search. +Clear a scrolling search. + +Clear the search context and results for a scrolling search. {ref}/clear-scroll-api.html[Endpoint documentation] [source,ts] @@ -74,7 +76,12 @@ To clear all scroll IDs, use `_all`. [discrete] === close_point_in_time -Closes a point-in-time. +Close a point in time. + +A point in time must be opened explicitly before being used in search requests. +The `keep_alive` parameter tells Elasticsearch how long it should persist. +A point in time is automatically closed when the `keep_alive` period has elapsed. +However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. {ref}/point-in-time-api.html[Endpoint documentation] [source,ts] @@ -389,9 +396,13 @@ Random by default. [discrete] === field_caps -The field capabilities API returns the information about the capabilities of fields among multiple indices. -The field capabilities API returns runtime fields like any other field. For example, a runtime field with a type -of keyword is returned as any other field that belongs to the `keyword` family. +Get the field capabilities. + +Get information about the capabilities of fields among multiple indices. + +For data streams, the API returns field capabilities among the stream’s backing indices. +It returns runtime fields like any other field. +For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. {ref}/search-field-caps.html[Endpoint documentation] [source,ts] @@ -588,7 +599,19 @@ client.info() [discrete] === knn_search -Performs a kNN search. +Run a knn search. + +NOTE: The kNN search API has been replaced by the `knn` option in the search API. + +Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. +Given a query vector, the API finds the k closest vectors and returns those documents as search hits. + +Elasticsearch uses the HNSW algorithm to support efficient kNN search. +Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. +This means the results returned are not always the true k closest neighbors. + +The kNN search API supports restricting the search using a filter. +The search will return the top k documents that also match the filter query. {ref}/search-search.html[Endpoint documentation] [source,ts] @@ -704,7 +727,7 @@ However, using computationally expensive named queries on a large number of hits [discrete] === msearch_template -Runs multiple templated searches with a single request. +Run multiple templated searches. {ref}/search-multi-search.html[Endpoint documentation] [source,ts] @@ -764,13 +787,18 @@ Random by default. [discrete] === open_point_in_time -A search request by default executes against the most recent visible data of the target indices, +Open a point in time. + +A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. +A point in time must be opened explicitly before being used in search requests. +The `keep_alive` parameter tells Elasticsearch how long it should persist. + {ref}/point-in-time-api.html[Endpoint documentation] [source,ts] ---- @@ -828,7 +856,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] === rank_eval -Enables you to evaluate the quality of ranked search results over a set of typical search queries. +Evaluate ranked search results. + +Evaluate the quality of ranked search results over a set of typical search queries. {ref}/search-rank-eval.html[Endpoint documentation] [source,ts] @@ -900,7 +930,9 @@ client.reindexRethrottle({ task_id }) [discrete] === render_search_template -Renders a search template as a search request body. +Render a search template. + +Render a search template as a search request body. {ref}/render-search-template-api.html[Endpoint documentation] [source,ts] @@ -972,7 +1004,9 @@ client.scroll({ scroll_id }) [discrete] === search -Returns search hits that match the query defined in the request. +Run a search. + +Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. @@ -1014,7 +1048,7 @@ A post filter has no impact on the aggregation results. NOTE: This is a debugging tool and adds significant overhead to search execution. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. -** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. +** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. ** *`size` (Optional, number)*: The number of hits to return. @@ -1082,8 +1116,6 @@ However, using computationally expensive named queries on a large number of hits This parameter can only be used when the `q` query string parameter is specified. ** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. -** *`min_compatible_shard_node` (Optional, string)*: The minimum version of the node that can handle the request -Any handling node with a lower version will fail the request. ** *`preference` (Optional, string)*: Nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: `_only_local` to run the search only on shards on the local node; @@ -1130,7 +1162,8 @@ Fetches with this enabled will be slower the enabling synthetic source natively [discrete] === search_mvt Search a vector tile. -Searches a vector tile for geospatial values. + +Search a vector tile for geospatial values. {ref}/search-vector-tile-api.html[Endpoint documentation] [source,ts] @@ -1189,7 +1222,11 @@ suggested label positions for the original features. [discrete] === search_shards -Returns information about the indices and shards that a search request would be executed against. +Get the search shards. + +Get the indices and shards that a search request would be run against. +This information can be useful for working out issues or planning optimizations with routing and shard preferences. +When filtered aliases are used, the filter is returned as part of the indices section. {ref}/search-shards.html[Endpoint documentation] [source,ts] @@ -1216,7 +1253,7 @@ Random by default. [discrete] === search_template -Runs a search with a search template. +Run a search with a search template. {ref}/search-template.html[Endpoint documentation] [source,ts] @@ -1291,7 +1328,8 @@ client.termsEnum({ index, field }) [discrete] === termvectors Get term vector information. -Returns information and statistics about terms in the fields of a particular document. + +Get information and statistics about terms in the fields of a particular document. {ref}/docs-termvectors.html[Endpoint documentation] [source,ts] @@ -1457,6 +1495,7 @@ client.updateByQueryRethrottle({ task_id }) [discrete] ==== delete Delete an async search. + If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. @@ -1476,6 +1515,7 @@ client.asyncSearch.delete({ id }) [discrete] ==== get Get async search results. + Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. @@ -1502,8 +1542,9 @@ By default no timeout is set meaning that the currently available results will b [discrete] ==== status -Get async search status. -Retrieve the status of a previously submitted async search request given its identifier, without retrieving search results. +Get the async search status. + +Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. {ref}/async-search.html[Endpoint documentation] @@ -1521,6 +1562,7 @@ client.asyncSearch.status({ id }) [discrete] ==== submit Run an async search. + When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. @@ -1613,7 +1655,6 @@ A partial reduction is performed every time the coordinating node has received a ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) ** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored ** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests -** *`min_compatible_shard_node` (Optional, string)* ** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) ** *`pre_filter_shard_size` (Optional, number)*: The default value cannot be changed, which enforces the execution of a pre-filter roundtrip to retrieve statistics from each shard so that the ones that surely don’t hold any document matching the query get skipped. ** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true @@ -1634,7 +1675,9 @@ A partial reduction is performed every time the coordinating node has received a === autoscaling [discrete] ==== delete_autoscaling_policy -Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Delete an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. {ref}/autoscaling-delete-autoscaling-policy.html[Endpoint documentation] [source,ts] @@ -1650,7 +1693,20 @@ client.autoscaling.deleteAutoscalingPolicy({ name }) [discrete] ==== get_autoscaling_capacity -Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Get the autoscaling capacity. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +This API gets the current autoscaling capacity based on the configured autoscaling policy. +It will return information to size the cluster appropriately to the current workload. + +The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. + +The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. + +The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. +This information is provided for diagnosis only. +Do not use this information to make autoscaling decisions. {ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] [source,ts] @@ -1661,7 +1717,9 @@ client.autoscaling.getAutoscalingCapacity() [discrete] ==== get_autoscaling_policy -Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Get an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. {ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] [source,ts] @@ -1677,7 +1735,9 @@ client.autoscaling.getAutoscalingPolicy({ name }) [discrete] ==== put_autoscaling_policy -Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Create or update an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. {ref}/autoscaling-put-autoscaling-policy.html[Endpoint documentation] [source,ts] @@ -1732,6 +1792,10 @@ client.cat.allocation({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: List of node identifiers or names used to limit the returned information. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== component_templates @@ -1753,6 +1817,10 @@ client.cat.componentTemplates({ ... }) * *Request (object):* ** *`name` (Optional, string)*: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== count @@ -1879,9 +1947,17 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-master.html[Endpoint documentation] [source,ts] ---- -client.cat.master() +client.cat.master({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== ml_data_frame_analytics @@ -2020,9 +2096,17 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-nodeattrs.html[Endpoint documentation] [source,ts] ---- -client.cat.nodeattrs() +client.cat.nodeattrs({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== nodes @@ -2051,9 +2135,17 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-pending-tasks.html[Endpoint documentation] [source,ts] ---- -client.cat.pendingTasks() +client.cat.pendingTasks({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== plugins @@ -2063,9 +2155,17 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-plugins.html[Endpoint documentation] [source,ts] ---- -client.cat.plugins() +client.cat.plugins({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== recovery @@ -2122,6 +2222,10 @@ client.cat.segments({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== shards @@ -2204,6 +2308,10 @@ client.cat.templates({ ... }) * *Request (object):* ** *`name` (Optional, string)*: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== thread_pool @@ -2224,6 +2332,10 @@ client.cat.threadPool({ ... }) ** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request. Accepts wildcard expressions. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. [discrete] ==== transforms @@ -3049,8 +3161,6 @@ client.connector.syncJobCheckIn() [discrete] ==== sync_job_claim Claims a connector sync job. - -{ref}/claim-connector-sync-job-api.html[Endpoint documentation] [source,ts] ---- client.connector.syncJobClaim() @@ -3871,7 +3981,6 @@ the indices stats API. ** *`ignore_unavailable` (Optional, boolean)* ** *`lenient` (Optional, boolean)* ** *`max_concurrent_shard_requests` (Optional, number)* -** *`min_compatible_shard_node` (Optional, string)* ** *`preference` (Optional, string)* ** *`pre_filter_shard_size` (Optional, number)* ** *`request_cache` (Optional, boolean)* @@ -4564,7 +4673,6 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. [discrete] ==== exists_index_template @@ -4762,7 +4870,6 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. [discrete] ==== get_data_lifecycle @@ -5465,7 +5572,6 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`verbose` (Optional, boolean)*: If `true`, the request returns a verbose response. [discrete] ==== shard_stores @@ -5959,8 +6065,8 @@ client.ingest.putPipeline({ id }) ** *`id` (string)*: ID of the ingest pipeline to create or update. ** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. ** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. ** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. ** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. @@ -9228,14 +9334,46 @@ client.security.createApiKey({ ... }) [discrete] ==== create_cross_cluster_api_key -Creates a cross-cluster API key for API key based remote cluster access. +Create a cross-cluster API key. + +Create an API key of the `cross_cluster` type for the API key based remote cluster access. +A `cross_cluster` API key cannot be used to authenticate through the REST interface. + +IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. + +Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. + +NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. + +A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +Cross-cluster API keys can only be updated with the update cross-cluster API key API. +Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. {ref}/security-api-create-cross-cluster-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.createCrossClusterApiKey() +client.security.createCrossClusterApiKey({ access, name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`access` ({ replication, search })*: The access to be granted to this API key. +The access is composed of permissions for cross-cluster search and cross-cluster replication. +At least one of them must be specified. + +NOTE: No explicit privileges should be specified for either search or replication access. +The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. +** *`name` (string)*: Specifies the name for this API key. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. +By default, API keys never expire. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. [discrete] ==== create_service_token @@ -10265,14 +10403,31 @@ client.security.updateApiKey({ id }) [discrete] ==== update_cross_cluster_api_key -Updates attributes of an existing cross-cluster API key. +Update a cross-cluster API key. + +Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. {ref}/security-api-update-cross-cluster-api-key.html[Endpoint documentation] [source,ts] ---- -client.security.updateCrossClusterApiKey() +client.security.updateCrossClusterApiKey({ id, access }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the cross-cluster API key to update. +** *`access` ({ replication, search })*: The access to be granted to this API key. +The access is composed of permissions for cross cluster search and cross cluster replication. +At least one of them must be specified. +When specified, the new access assignment fully replaces the previously assigned access. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. +By default, API keys never expire. This property can be omitted to leave the value unchanged. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this information fully replaces metadata previously associated with the API key. [discrete] ==== update_settings diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index f17bb56ce..f0c852368 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -109,7 +109,7 @@ export default class AsyncSearch { } /** - * Get async search status. Retrieve the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. + * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index 04a2ab060..9be9c4d4a 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -45,7 +45,7 @@ export default class Autoscaling { } /** - * Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-delete-autoscaling-policy.html | Elasticsearch API documentation} */ async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Autoscaling { } /** - * Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Get the autoscaling capacity. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -107,7 +107,7 @@ export default class Autoscaling { } /** - * Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -139,7 +139,7 @@ export default class Autoscaling { } /** - * Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-put-autoscaling-policy.html | Elasticsearch API documentation} */ async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 7c4848d45..5fa41160e 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Clears the search context and results for a scrolling search. + * Clear a scrolling search. Clear the search context and results for a scrolling search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api.html | Elasticsearch API documentation} */ export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index c4c779e5e..f14346169 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Closes a point-in-time. + * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 7a2c5bc12..082e83a53 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * The field capabilities API returns the information about the capabilities of fields among multiple indices. The field capabilities API returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned as any other field that belongs to the `keyword` family. + * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html | Elasticsearch API documentation} */ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index fe30ca47a..227dfb36c 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Performs a kNN search. + * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index a1897a483..2e33d922f 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Runs multiple templated searches with a single request. + * Run multiple templated searches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html | Elasticsearch API documentation} */ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 69aabd194..0a017637d 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * A search request by default executes against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. + * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 010a984a6..44052220b 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Enables you to evaluate the quality of ranked search results over a set of typical search queries. + * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html | Elasticsearch API documentation} */ export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index cd31ab4c8..d3d5ad472 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Renders a search template as a search request body. + * Render a search template. Render a search template as a search request body. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/render-search-template-api.html | Elasticsearch API documentation} */ export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 65dbb6c99..fb251b6fa 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. + * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 6d2f125b8..3311d222b 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Search a vector tile. Searches a vector tile for geospatial values. + * Search a vector tile. Search a vector tile for geospatial values. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html | Elasticsearch API documentation} */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index c9b2b299d..85ff0b79e 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns information about the indices and shards that a search request would be executed against. + * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html | Elasticsearch API documentation} */ export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index a158ad55a..be504abb3 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Runs a search with a search template. + * Run a search with a search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-template.html | Elasticsearch API documentation} */ export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 007e543ed..8af60e384 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -483,22 +483,34 @@ export default class Security { } /** - * Creates a cross-cluster API key for API key based remote cluster access. + * Create a cross-cluster API key. Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-cross-cluster-api-key.html | Elasticsearch API documentation} */ - async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async createCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -2259,22 +2271,34 @@ export default class Security { } /** - * Updates attributes of an existing cross-cluster API key. + * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-cross-cluster-api-key.html | Elasticsearch API documentation} */ - async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateCrossClusterApiKey (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['access', 'expiration', 'metadata'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 3e8c12034..f47ff74c8 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Get term vector information. Returns information and statistics about terms in the fields of a particular document. + * Get term vector information. Get information and statistics about terms in the fields of a particular document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html | Elasticsearch API documentation} */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 6f881c4df..926bf97f6 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1156,7 +1156,6 @@ export interface SearchRequest extends RequestBase { include_named_queries_score?: boolean lenient?: boolean max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -2636,6 +2635,7 @@ export interface RetrieverContainer { knn?: KnnRetriever rrf?: RRFRetriever text_similarity_reranker?: TextSimilarityReranker + rule?: RuleRetriever } export type Routing = string @@ -2645,6 +2645,13 @@ export interface RrfRank { rank_window_size?: long } +export interface RuleRetriever extends RetrieverBase { + ruleset_ids: Id[] + match_criteria: any + retriever: RetrieverContainer + rank_window_size?: integer +} + export type ScalarValue = long | double | string | boolean | null export interface ScoreSort { @@ -6684,7 +6691,6 @@ export interface AsyncSearchSubmitRequest extends RequestBase { ignore_unavailable?: boolean lenient?: boolean max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -6885,6 +6891,7 @@ export interface CatAllocationAllocationRecord { export interface CatAllocationRequest extends CatCatRequestBase { node_id?: NodeIds bytes?: Bytes + local?: boolean } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -6901,6 +6908,7 @@ export interface CatComponentTemplatesComponentTemplate { export interface CatComponentTemplatesRequest extends CatCatRequestBase { name?: string + local?: boolean } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] @@ -7326,6 +7334,7 @@ export interface CatMasterMasterRecord { } export interface CatMasterRequest extends CatCatRequestBase { + local?: boolean } export type CatMasterResponse = CatMasterMasterRecord[] @@ -7693,6 +7702,7 @@ export interface CatNodeattrsNodeAttributesRecord { } export interface CatNodeattrsRequest extends CatCatRequestBase { + local?: boolean } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] @@ -7987,6 +7997,7 @@ export interface CatPendingTasksPendingTasksRecord { } export interface CatPendingTasksRequest extends CatCatRequestBase { + local?: boolean } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -8006,6 +8017,7 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { + local?: boolean } export type CatPluginsResponse = CatPluginsPluginsRecord[] @@ -8092,6 +8104,7 @@ export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes + local?: boolean } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -8447,6 +8460,7 @@ export interface CatTasksTasksRecord { export interface CatTemplatesRequest extends CatCatRequestBase { name?: Name + local?: boolean } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] @@ -8468,6 +8482,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names time?: TimeUnit + local?: boolean } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -10304,7 +10319,6 @@ export interface FleetSearchRequest extends RequestBase { ignore_unavailable?: boolean lenient?: boolean max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -11447,7 +11461,6 @@ export interface IndicesExistsAliasRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - local?: boolean } export type IndicesExistsAliasResponse = boolean @@ -11602,7 +11615,6 @@ export interface IndicesGetAliasRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - local?: boolean } export type IndicesGetAliasResponse = Record @@ -12090,7 +12102,6 @@ export interface IndicesSegmentsRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - verbose?: boolean } export interface IndicesSegmentsResponse { @@ -12787,6 +12798,16 @@ export interface IngestInferenceProcessor extends IngestProcessorBase { inference_config?: IngestInferenceConfig } +export interface IngestIpLocationProcessor extends IngestProcessorBase { + database_file?: string + field: Field + first_only?: boolean + ignore_missing?: boolean + properties?: string[] + target_field?: Field + download_database_on_pipeline_creation?: boolean +} + export interface IngestJoinProcessor extends IngestProcessorBase { field: Field separator: string @@ -12881,6 +12902,7 @@ export interface IngestProcessorContainer { fail?: IngestFailProcessor fingerprint?: IngestFingerprintProcessor foreach?: IngestForeachProcessor + ip_location?: IngestIpLocationProcessor geo_grid?: IngestGeoGridProcessor geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor @@ -17236,6 +17258,11 @@ export interface SearchableSnapshotsStatsResponse { total: any } +export interface SecurityAccess { + replication?: SecurityReplicationAccess[] + search?: SecuritySearchAccess[] +} + export interface SecurityApiKey { creation?: long expiration?: long @@ -17324,6 +17351,10 @@ export interface SecurityRemoteIndicesPrivileges { allow_restricted_indices?: boolean } +export interface SecurityReplicationAccess { + names: IndexName[] +} + export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] @@ -17382,6 +17413,13 @@ export interface SecurityRoleTemplateScript { options?: Record } +export interface SecuritySearchAccess { + field_security?: SecurityFieldSecurity + names: IndexName[] + query?: SecurityIndicesPrivilegesQuery + allow_restricted_indices?: boolean +} + export type SecurityTemplateFormat = 'string' | 'json' export interface SecurityUser { @@ -17564,6 +17602,21 @@ export interface SecurityCreateApiKeyResponse { encoded: string } +export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { + access: SecurityAccess + expiration?: Duration + metadata?: Metadata + name: Name +} + +export interface SecurityCreateCrossClusterApiKeyResponse { + api_key: string + expiration?: DurationValue + id: Id + name: Name + encoded: string +} + export interface SecurityCreateServiceTokenRequest extends RequestBase { namespace: Namespace service: Service @@ -18242,6 +18295,17 @@ export interface SecurityUpdateApiKeyResponse { updated: boolean } +export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { + id: Id + access: SecurityAccess + expiration?: Duration + metadata?: Metadata +} + +export interface SecurityUpdateCrossClusterApiKeyResponse { + updated: boolean +} + export interface SecurityUpdateUserProfileDataRequest extends RequestBase { uid: SecurityUserProfileId if_seq_no?: SequenceNumber @@ -20158,6 +20222,7 @@ export interface XpackInfoFeatures { graph: XpackInfoFeature ilm: XpackInfoFeature logstash: XpackInfoFeature + logsdb: XpackInfoFeature ml: XpackInfoFeature monitoring: XpackInfoFeature rollup: XpackInfoFeature @@ -20654,7 +20719,6 @@ export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names help?: boolean - local?: boolean master_timeout?: Duration s?: Names v?: boolean diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index ed87499a7..af66b0744 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -1211,7 +1211,6 @@ export interface SearchRequest extends RequestBase { include_named_queries_score?: boolean lenient?: boolean max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -2712,6 +2711,7 @@ export interface RetrieverContainer { knn?: KnnRetriever rrf?: RRFRetriever text_similarity_reranker?: TextSimilarityReranker + rule?: RuleRetriever } export type Routing = string @@ -2721,6 +2721,13 @@ export interface RrfRank { rank_window_size?: long } +export interface RuleRetriever extends RetrieverBase { + ruleset_ids: Id[] + match_criteria: any + retriever: RetrieverContainer + rank_window_size?: integer +} + export type ScalarValue = long | double | string | boolean | null export interface ScoreSort { @@ -6760,7 +6767,6 @@ export interface AsyncSearchSubmitRequest extends RequestBase { ignore_unavailable?: boolean lenient?: boolean max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -6965,6 +6971,7 @@ export interface CatAllocationAllocationRecord { export interface CatAllocationRequest extends CatCatRequestBase { node_id?: NodeIds bytes?: Bytes + local?: boolean } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -6981,6 +6988,7 @@ export interface CatComponentTemplatesComponentTemplate { export interface CatComponentTemplatesRequest extends CatCatRequestBase { name?: string + local?: boolean } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] @@ -7406,6 +7414,7 @@ export interface CatMasterMasterRecord { } export interface CatMasterRequest extends CatCatRequestBase { + local?: boolean } export type CatMasterResponse = CatMasterMasterRecord[] @@ -7773,6 +7782,7 @@ export interface CatNodeattrsNodeAttributesRecord { } export interface CatNodeattrsRequest extends CatCatRequestBase { + local?: boolean } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] @@ -8067,6 +8077,7 @@ export interface CatPendingTasksPendingTasksRecord { } export interface CatPendingTasksRequest extends CatCatRequestBase { + local?: boolean } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -8086,6 +8097,7 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { + local?: boolean } export type CatPluginsResponse = CatPluginsPluginsRecord[] @@ -8172,6 +8184,7 @@ export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes + local?: boolean } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -8527,6 +8540,7 @@ export interface CatTasksTasksRecord { export interface CatTemplatesRequest extends CatCatRequestBase { name?: Name + local?: boolean } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] @@ -8548,6 +8562,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names time?: TimeUnit + local?: boolean } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -10466,7 +10481,6 @@ export interface FleetSearchRequest extends RequestBase { ignore_unavailable?: boolean lenient?: boolean max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -11634,7 +11648,6 @@ export interface IndicesExistsAliasRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - local?: boolean } export type IndicesExistsAliasResponse = boolean @@ -11789,7 +11802,6 @@ export interface IndicesGetAliasRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - local?: boolean } export type IndicesGetAliasResponse = Record @@ -12297,7 +12309,6 @@ export interface IndicesSegmentsRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean - verbose?: boolean } export interface IndicesSegmentsResponse { @@ -13013,6 +13024,16 @@ export interface IngestInferenceProcessor extends IngestProcessorBase { inference_config?: IngestInferenceConfig } +export interface IngestIpLocationProcessor extends IngestProcessorBase { + database_file?: string + field: Field + first_only?: boolean + ignore_missing?: boolean + properties?: string[] + target_field?: Field + download_database_on_pipeline_creation?: boolean +} + export interface IngestJoinProcessor extends IngestProcessorBase { field: Field separator: string @@ -13107,6 +13128,7 @@ export interface IngestProcessorContainer { fail?: IngestFailProcessor fingerprint?: IngestFingerprintProcessor foreach?: IngestForeachProcessor + ip_location?: IngestIpLocationProcessor geo_grid?: IngestGeoGridProcessor geoip?: IngestGeoIpProcessor grok?: IngestGrokProcessor @@ -17614,6 +17636,11 @@ export interface SearchableSnapshotsStatsResponse { total: any } +export interface SecurityAccess { + replication?: SecurityReplicationAccess[] + search?: SecuritySearchAccess[] +} + export interface SecurityApiKey { creation?: long expiration?: long @@ -17702,6 +17729,10 @@ export interface SecurityRemoteIndicesPrivileges { allow_restricted_indices?: boolean } +export interface SecurityReplicationAccess { + names: IndexName[] +} + export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] @@ -17760,6 +17791,13 @@ export interface SecurityRoleTemplateScript { options?: Record } +export interface SecuritySearchAccess { + field_security?: SecurityFieldSecurity + names: IndexName[] + query?: SecurityIndicesPrivilegesQuery + allow_restricted_indices?: boolean +} + export type SecurityTemplateFormat = 'string' | 'json' export interface SecurityUser { @@ -17957,6 +17995,24 @@ export interface SecurityCreateApiKeyResponse { encoded: string } +export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + access: SecurityAccess + expiration?: Duration + metadata?: Metadata + name: Name + } +} + +export interface SecurityCreateCrossClusterApiKeyResponse { + api_key: string + expiration?: DurationValue + id: Id + name: Name + encoded: string +} + export interface SecurityCreateServiceTokenRequest extends RequestBase { namespace: Namespace service: Service @@ -18694,6 +18750,20 @@ export interface SecurityUpdateApiKeyResponse { updated: boolean } +export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { + id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + access: SecurityAccess + expiration?: Duration + metadata?: Metadata + } +} + +export interface SecurityUpdateCrossClusterApiKeyResponse { + updated: boolean +} + export interface SecurityUpdateUserProfileDataRequest extends RequestBase { uid: SecurityUserProfileId if_seq_no?: SequenceNumber @@ -20666,6 +20736,7 @@ export interface XpackInfoFeatures { graph: XpackInfoFeature ilm: XpackInfoFeature logstash: XpackInfoFeature + logsdb: XpackInfoFeature ml: XpackInfoFeature monitoring: XpackInfoFeature rollup: XpackInfoFeature @@ -21162,7 +21233,6 @@ export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names help?: boolean - local?: boolean master_timeout?: Duration s?: Names v?: boolean From 44d890ec5786eeee5e5b04ee501b9cca2ec2cad7 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:16:54 +0000 Subject: [PATCH 414/647] Update dependency @types/node to v22 (#2437) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Co-authored-by: Josh Mock --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index cddcdfec7..70d497577 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "18.19.64", + "@types/node": "22.9.0", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "3.2.1", "@types/stoppable": "1.1.3", From 1ad057abcc061251dcc957e9f022925f25a99193 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 11:51:35 -0600 Subject: [PATCH 415/647] Update dependency @types/split2 to v4 (#2438) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 70d497577..400a20770 100644 --- a/package.json +++ b/package.json @@ -59,7 +59,7 @@ "@types/ms": "0.7.34", "@types/node": "22.9.0", "@types/sinonjs__fake-timers": "8.1.5", - "@types/split2": "3.2.1", + "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", "@types/tap": "15.0.12", "chai": "4.5.0", From 0b906136944f29a728cb317386356a3c22b00a40 Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:51:18 -0500 Subject: [PATCH 416/647] Address feedback and add clarity (#2449) --- docs/examples/bulk.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc index e35052e0e..74725c9e9 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/examples/bulk.asciidoc @@ -1,10 +1,10 @@ [[bulk_examples]] === Bulk -The `bulk` API makes it possible to perform many index/delete operations in a -single API call. This can greatly increase the indexing speed. +With the {jsclient}/api-reference.html#_bulk[`bulk` API], you can perform multiple index/delete operations in a +single API call. The `bulk` API significantly increases indexing speed. -NOTE: Did you know that we provide an helper for sending bulk request? You can find it {jsclient}/client-helpers.html[here]. +NOTE: You can also use the {jsclient}/client-helpers.html[bulk helper]. [source,js] ---- From 290639d168e416c1e413b5237890057e27d6deb6 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:27:52 -0600 Subject: [PATCH 417/647] Update dependency chai to v5 (#2453) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 400a20770..849aee1af 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", "@types/tap": "15.0.12", - "chai": "4.5.0", + "chai": "5.1.2", "cross-zip": "4.0.1", "desm": "1.3.1", "into-stream": "7.0.0", From 5880c84c1376751d0f8e482d18389d4e6cefb66d Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 17:34:24 +0000 Subject: [PATCH 418/647] Update dependency typescript to v5 (#2455) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 849aee1af..7804201f3 100644 --- a/package.json +++ b/package.json @@ -81,7 +81,7 @@ "tap": "21.0.1", "ts-node": "10.9.2", "ts-standard": "11.0.0", - "typescript": "4.9.5", + "typescript": "5.6.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", "zx": "7.2.3" From c5e4107181c545e63565059da2bb15385ce52260 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 13:44:49 -0600 Subject: [PATCH 419/647] Update peter-evans/create-pull-request action to v7 (#2456) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/serverless-patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 2f583a7cd..935ef28d5 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -42,7 +42,7 @@ jobs: - name: Apply patch from stack to serverless id: apply-patch run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh - - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6 + - uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7 with: token: ${{ secrets.GH_TOKEN }} path: serverless From 421f953b0081e9342fed86d2734b48f70ae19581 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 13 Nov 2024 10:42:33 -0600 Subject: [PATCH 420/647] Upgrade ts-standard (#2460) --- package.json | 2 +- src/helpers.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 7804201f3..929f01d52 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "stoppable": "1.1.0", "tap": "21.0.1", "ts-node": "10.9.2", - "ts-standard": "11.0.0", + "ts-standard": "12.0.2", "typescript": "5.6.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", diff --git a/src/helpers.ts b/src/helpers.ts index 10fda499a..5f4357893 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -194,7 +194,7 @@ export default class Helpers { * @param {object} options - The client optional configuration for this request. * @return {array} The documents that matched the request. */ - async search (params: T.SearchRequest, options: TransportRequestOptions = {}): Promise> { + async search (params: T.SearchRequest, options: TransportRequestOptions = {}): Promise> { appendFilterPath('hits.hits._id,hits.hits._source', params, true) options.meta = true const { body: result } = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) From 2b890af3559334f85c004b422f0b111e332fe32e Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 14 Nov 2024 09:37:36 -0600 Subject: [PATCH 421/647] Update integration test automation branches (#2463) --- .buildkite/pipeline.yml | 2 +- catalog-info.yaml | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 370d1465f..eee6e4387 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,7 +6,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.15.0 + STACK_VERSION: 8.16.0 matrix: setup: suite: diff --git a/catalog-info.yaml b/catalog-info.yaml index 4ce58c0b7..80d0514aa 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -42,6 +42,9 @@ spec: main: branch: "main" cronline: "@daily" + 8_x: + branch: "8.x" + cronline: "@daily" 8_14: - branch: "8.14" + branch: "8.16" cronline: "@daily" From 7c7ce29127f0a0aae7acadbad10aab60089387a7 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 11:13:38 -0600 Subject: [PATCH 422/647] Update dependency @elastic/request-converter to v8.16.1 (#2469) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 929f01d52..133e29116 100644 --- a/package.json +++ b/package.json @@ -53,7 +53,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "8.16.0", + "@elastic/request-converter": "8.16.1", "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "4.1.12", "@types/ms": "0.7.34", From 6dc83cd33e63f30f80dbd384d85be8cb07f39a16 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 18 Nov 2024 18:29:36 +0100 Subject: [PATCH 423/647] Auto-generated code for main (#2473) Co-authored-by: Josh Mock --- .../19d60e4890cc57151d596326484d9076.asciidoc | 11 -- ...216e24f05cbb82c1718713fbab8623d2.asciidoc} | 2 +- ...334811cfceb6858aeec5b3461717dd63.asciidoc} | 2 +- .../3b6718257421b5419bf4cd6a7303c57e.asciidoc | 11 -- .../533087d787b48878a0bf3fa8d0851b64.asciidoc | 11 -- ...5e021307d331a4483a5aa2198168451b.asciidoc} | 8 +- .../7f1fade93225f8cf6000b93334d76ce4.asciidoc | 34 ++++++ .../981b331db1404b39c1a612a135e4e76d.asciidoc | 17 --- .../a3779f21f132787c48681bfb50453592.asciidoc | 34 ++++++ ...b0ee6f19875fe5bad8aab02d60e3532c.asciidoc} | 2 +- .../e308899a306e61d1a590868308689955.asciidoc | 36 ++++++ docs/reference.asciidoc | 46 +++++++- src/api/api/ingest.ts | 103 ++++++++++++++++++ src/api/api/security.ts | 2 +- src/api/types.ts | 27 ++++- src/api/typesWithBodyKey.ts | 27 ++++- 16 files changed, 304 insertions(+), 69 deletions(-) delete mode 100644 docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc rename docs/doc_examples/{3fab530a2e43807929c0ef3ebf7d268c.asciidoc => 216e24f05cbb82c1718713fbab8623d2.asciidoc} (94%) rename docs/doc_examples/{d3a558ef226e9dccc1c7c61e1167547f.asciidoc => 334811cfceb6858aeec5b3461717dd63.asciidoc} (93%) delete mode 100644 docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc delete mode 100644 docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc rename docs/doc_examples/{4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc => 5e021307d331a4483a5aa2198168451b.asciidoc} (73%) create mode 100644 docs/doc_examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc delete mode 100644 docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc create mode 100644 docs/doc_examples/a3779f21f132787c48681bfb50453592.asciidoc rename docs/doc_examples/{b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc => b0ee6f19875fe5bad8aab02d60e3532c.asciidoc} (93%) create mode 100644 docs/doc_examples/e308899a306e61d1a590868308689955.asciidoc diff --git a/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc b/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc deleted file mode 100644 index 6f918e3b2..000000000 --- a/docs/doc_examples/19d60e4890cc57151d596326484d9076.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.transport.request({ - method: "DELETE", - path: "/_ingest/geoip/database/my-database-id", -}); -console.log(response); ----- diff --git a/docs/doc_examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc b/docs/doc_examples/216e24f05cbb82c1718713fbab8623d2.asciidoc similarity index 94% rename from docs/doc_examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc rename to docs/doc_examples/216e24f05cbb82c1718713fbab8623d2.asciidoc index e7b6ae812..dbaac1815 100644 --- a/docs/doc_examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc +++ b/docs/doc_examples/216e24f05cbb82c1718713fbab8623d2.asciidoc @@ -5,7 +5,7 @@ ---- const response = await client.ingest.putPipeline({ id: "geoip", - description: "Add geoip info", + description: "Add ip geolocation info", processors: [ { geoip: { diff --git a/docs/doc_examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc b/docs/doc_examples/334811cfceb6858aeec5b3461717dd63.asciidoc similarity index 93% rename from docs/doc_examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc rename to docs/doc_examples/334811cfceb6858aeec5b3461717dd63.asciidoc index 4c5e7872e..1e3f14f12 100644 --- a/docs/doc_examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc +++ b/docs/doc_examples/334811cfceb6858aeec5b3461717dd63.asciidoc @@ -5,7 +5,7 @@ ---- const response = await client.ingest.putPipeline({ id: "geoip", - description: "Add geoip info", + description: "Add ip geolocation info", processors: [ { geoip: { diff --git a/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc b/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc deleted file mode 100644 index 7ad8dcc20..000000000 --- a/docs/doc_examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.transport.request({ - method: "GET", - path: "/_ingest/geoip/database/my-database-id", -}); -console.log(response); ----- diff --git a/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc b/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc deleted file mode 100644 index 65425b66a..000000000 --- a/docs/doc_examples/533087d787b48878a0bf3fa8d0851b64.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.transport.request({ - method: "DELETE", - path: "/_ingest/geoip/database/example-database-id", -}); -console.log(response); ----- diff --git a/docs/doc_examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc b/docs/doc_examples/5e021307d331a4483a5aa2198168451b.asciidoc similarity index 73% rename from docs/doc_examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc rename to docs/doc_examples/5e021307d331a4483a5aa2198168451b.asciidoc index 949a81873..329574d90 100644 --- a/docs/doc_examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc +++ b/docs/doc_examples/5e021307d331a4483a5aa2198168451b.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.security.putRole({ - name: "role_with_remote_indices", + name: "only_remote_access_role", remote_indices: [ { clusters: ["my_remote"], @@ -12,6 +12,12 @@ const response = await client.security.putRole({ privileges: ["read", "read_cross_cluster", "view_index_metadata"], }, ], + remote_cluster: [ + { + clusters: ["my_remote"], + privileges: ["monitor_stats"], + }, + ], }); console.log(response); ---- diff --git a/docs/doc_examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc b/docs/doc_examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc new file mode 100644 index 000000000..9d18e53eb --- /dev/null +++ b/docs/doc_examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "ip_location", + description: "Add ip geolocation info", + processors: [ + { + ip_location: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "ip_location", + document: { + ip: "80.231.5.0", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc b/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc deleted file mode 100644 index 0d3cdbbe3..000000000 --- a/docs/doc_examples/981b331db1404b39c1a612a135e4e76d.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.transport.request({ - method: "PUT", - path: "/_ingest/geoip/database/my-database-id", - body: { - name: "GeoIP2-Domain", - maxmind: { - account_id: "1025402", - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/a3779f21f132787c48681bfb50453592.asciidoc b/docs/doc_examples/a3779f21f132787c48681bfb50453592.asciidoc new file mode 100644 index 000000000..aa591634b --- /dev/null +++ b/docs/doc_examples/a3779f21f132787c48681bfb50453592.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "ip_location", + description: "Add ip geolocation info", + processors: [ + { + ip_location: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "ip_location", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc b/docs/doc_examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc similarity index 93% rename from docs/doc_examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc rename to docs/doc_examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc index 2bad68c5e..abe2a362d 100644 --- a/docs/doc_examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc +++ b/docs/doc_examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc @@ -5,7 +5,7 @@ ---- const response = await client.ingest.putPipeline({ id: "geoip", - description: "Add geoip info", + description: "Add ip geolocation info", processors: [ { geoip: { diff --git a/docs/doc_examples/e308899a306e61d1a590868308689955.asciidoc b/docs/doc_examples/e308899a306e61d1a590868308689955.asciidoc new file mode 100644 index 000000000..28a832c3a --- /dev/null +++ b/docs/doc_examples/e308899a306e61d1a590868308689955.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "ip_location", + description: "Add ip geolocation info", + processors: [ + { + ip_location: { + field: "ip", + target_field: "geo", + database_file: "GeoLite2-Country.mmdb", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "ip_location", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 403af082d..bab82b7fb 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -5938,6 +5938,17 @@ client.ingest.deleteGeoipDatabase({ id }) If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== delete_ip_location_database +Deletes an ip location database configuration + +{ref}/delete-ip-location-database-api.html[Endpoint documentation] +[source,ts] +---- +client.ingest.deleteIpLocationDatabase() +---- + + [discrete] ==== delete_pipeline Deletes one or more existing ingest pipeline. @@ -5990,6 +6001,17 @@ To get all database configurations, omit this parameter or use `*`. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== get_ip_location_database +Returns the specified ip location database configuration + +{ref}/get-ip-location-database-api.html[Endpoint documentation] +[source,ts] +---- +client.ingest.getIpLocationDatabase() +---- + + [discrete] ==== get_pipeline Returns information about one or more ingest pipelines. @@ -6047,6 +6069,17 @@ At present, the only supported provider is maxmind, and the maxmind provider req If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== put_ip_location_database +Puts the configuration for a ip location database to be downloaded + +{ref}/put-ip-location-database-api.html[Endpoint documentation] +[source,ts] +---- +client.ingest.putIpLocationDatabase() +---- + + [discrete] ==== put_pipeline Creates or updates an ingest pipeline. @@ -9171,7 +9204,7 @@ client.security.bulkPutRole({ roles }) ==== Arguments * *Request (object):* -** *`roles` (Record)*: A dictionary of role name to RoleDescriptor objects to add or update +** *`roles` (Record)*: A dictionary of role name to RoleDescriptor objects to add or update ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -9328,7 +9361,7 @@ client.security.createApiKey({ ... }) * *Request (object):* ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. ** *`name` (Optional, string)*: Specifies the name for this API key. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -9887,7 +9920,7 @@ client.security.hasPrivileges({ ... }) * *Request (object):* ** *`user` (Optional, string)*: Username ** *`application` (Optional, { application, privileges, resources }[])* -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. ** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* [discrete] @@ -10040,10 +10073,11 @@ client.security.putRole({ name }) * *Request (object):* ** *`name` (string)*: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. ** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. ** *`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])*: A list of remote indices permissions entries. +** *`remote_cluster` (Optional, { clusters, privileges }[])*: A list of remote cluster permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. ** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. ** *`description` (Optional, string)*: Optional description of the role descriptor @@ -10397,7 +10431,7 @@ client.security.updateApiKey({ id }) * *Request (object):* ** *`id` (string)*: The ID of the API key to update. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. @@ -11266,7 +11300,7 @@ client.tasks.list({ ... }) ** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. ** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: Key used to group tasks in the response. -** *`node_id` (Optional, string[])*: List of node IDs or names used to limit returned information. +** *`nodes` (Optional, string | string[])*: List of node IDs or names used to limit returned information. ** *`parent_task_id` (Optional, string)*: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 442dcbcfe..1c7836950 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -76,6 +76,38 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Deletes an ip location database configuration + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-ip-location-database-api.html | Elasticsearch API documentation} + */ + async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ingest/ip_location/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.delete_ip_location_database', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Deletes one or more existing ingest pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html | Elasticsearch API documentation} @@ -178,6 +210,45 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Returns the specified ip location database configuration + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ip-location-database-api.html | Elasticsearch API documentation} + */ + async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ingest/ip_location/database/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ingest/ip_location/database' + } + const meta: TransportRequestMetadata = { + name: 'ingest.get_ip_location_database', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Returns information about one or more ingest pipelines. This API returns a local reference of the pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html | Elasticsearch API documentation} @@ -292,6 +363,38 @@ export default class Ingest { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Puts the configuration for a ip location database to be downloaded + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-ip-location-database-api.html | Elasticsearch API documentation} + */ + async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise + async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ingest/ip_location/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.put_ip_location_database', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Creates or updates an ingest pipeline. Changes made using this API take effect immediately. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html | Elasticsearch API documentation} diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 8af60e384..11f58ed03 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1697,7 +1697,7 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'metadata', 'run_as', 'description', 'transient_metadata'] + const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/types.ts b/src/api/types.ts index 926bf97f6..7a92daa1d 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -10954,6 +10954,8 @@ export interface IndicesIndexTemplate { _meta?: Metadata allow_auto_create?: boolean data_stream?: IndicesIndexTemplateDataStreamConfiguration + deprecated?: boolean + ignore_missing_component_templates?: Names } export interface IndicesIndexTemplateDataStreamConfiguration { @@ -13358,6 +13360,7 @@ export interface MigrationDeprecationsRequest extends RequestBase { export interface MigrationDeprecationsResponse { cluster_settings: MigrationDeprecationsDeprecation[] index_settings: Record + data_streams: Record node_settings: MigrationDeprecationsDeprecation[] ml_settings: MigrationDeprecationsDeprecation[] } @@ -17298,7 +17301,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean @@ -17342,6 +17345,13 @@ export interface SecurityRealmInfo { type: string } +export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' + +export interface SecurityRemoteClusterPrivileges { + clusters: Names + privileges: SecurityRemoteClusterPrivilege[] +} + export interface SecurityRemoteIndicesPrivileges { clusters: Names field_security?: SecurityFieldSecurity @@ -17353,12 +17363,15 @@ export interface SecurityRemoteIndicesPrivileges { export interface SecurityReplicationAccess { names: IndexName[] + allow_restricted_indices?: boolean } export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] index?: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata @@ -17371,6 +17384,8 @@ export interface SecurityRoleDescriptorRead { cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] index: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata @@ -17758,8 +17773,9 @@ export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { } export interface SecurityGetBuiltinPrivilegesResponse { - cluster: string[] + cluster: SecurityClusterPrivilege[] index: IndexName[] + remote_cluster: SecurityRemoteClusterPrivilege[] } export interface SecurityGetPrivilegesRequest extends RequestBase { @@ -17776,8 +17792,10 @@ export interface SecurityGetRoleRequest extends RequestBase { export type SecurityGetRoleResponse = Record export interface SecurityGetRoleRole { - cluster: string[] + cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata run_as: string[] transient_metadata?: Record @@ -18030,6 +18048,7 @@ export interface SecurityPutRoleRequest extends RequestBase { global?: Record indices?: SecurityIndicesPrivileges[] remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] metadata?: Metadata run_as?: string[] description?: string @@ -19147,7 +19166,7 @@ export interface TasksListRequest extends RequestBase { actions?: string | string[] detailed?: boolean group_by?: TasksGroupBy - node_id?: string[] + nodes?: NodeIds parent_task_id?: Id master_timeout?: Duration timeout?: Duration diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index af66b0744..1782c9bbf 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -11131,6 +11131,8 @@ export interface IndicesIndexTemplate { _meta?: Metadata allow_auto_create?: boolean data_stream?: IndicesIndexTemplateDataStreamConfiguration + deprecated?: boolean + ignore_missing_component_templates?: Names } export interface IndicesIndexTemplateDataStreamConfiguration { @@ -13597,6 +13599,7 @@ export interface MigrationDeprecationsRequest extends RequestBase { export interface MigrationDeprecationsResponse { cluster_settings: MigrationDeprecationsDeprecation[] index_settings: Record + data_streams: Record node_settings: MigrationDeprecationsDeprecation[] ml_settings: MigrationDeprecationsDeprecation[] } @@ -17676,7 +17679,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean @@ -17720,6 +17723,13 @@ export interface SecurityRealmInfo { type: string } +export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' + +export interface SecurityRemoteClusterPrivileges { + clusters: Names + privileges: SecurityRemoteClusterPrivilege[] +} + export interface SecurityRemoteIndicesPrivileges { clusters: Names field_security?: SecurityFieldSecurity @@ -17731,12 +17741,15 @@ export interface SecurityRemoteIndicesPrivileges { export interface SecurityReplicationAccess { names: IndexName[] + allow_restricted_indices?: boolean } export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] index?: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata @@ -17749,6 +17762,8 @@ export interface SecurityRoleDescriptorRead { cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] index: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege applications?: SecurityApplicationPrivileges[] metadata?: Metadata @@ -18154,8 +18169,9 @@ export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { } export interface SecurityGetBuiltinPrivilegesResponse { - cluster: string[] + cluster: SecurityClusterPrivilege[] index: IndexName[] + remote_cluster: SecurityRemoteClusterPrivilege[] } export interface SecurityGetPrivilegesRequest extends RequestBase { @@ -18172,8 +18188,10 @@ export interface SecurityGetRoleRequest extends RequestBase { export type SecurityGetRoleResponse = Record export interface SecurityGetRoleRole { - cluster: string[] + cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] + remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata run_as: string[] transient_metadata?: Record @@ -18447,6 +18465,7 @@ export interface SecurityPutRoleRequest extends RequestBase { global?: Record indices?: SecurityIndicesPrivileges[] remote_indices?: SecurityRemoteIndicesPrivileges[] + remote_cluster?: SecurityRemoteClusterPrivileges[] metadata?: Metadata run_as?: string[] description?: string @@ -19639,7 +19658,7 @@ export interface TasksListRequest extends RequestBase { actions?: string | string[] detailed?: boolean group_by?: TasksGroupBy - node_id?: string[] + nodes?: NodeIds parent_task_id?: Id master_timeout?: Duration timeout?: Duration From 36cfacc409153a5d5c32ad65cb03bba5a309f8fe Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 18 Nov 2024 12:14:58 -0600 Subject: [PATCH 424/647] Fix ECMAScript import (#2475) --- .github/workflows/nodejs.yml | 10 +++++++++- package.json | 5 ++++- test/esm/package.json | 7 +++++++ test/esm/test-import.mjs | 28 ++++++++++++++++++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 test/esm/package.json create mode 100644 test/esm/test-import.mjs diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index e984adb53..85f8131d3 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false matrix: - node-version: [18.x, 20.x, 22.x] + node-version: [18.x, 20.x, 22.x, 23.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: @@ -57,6 +57,10 @@ jobs: run: | npm run test:unit + - name: ECMAScript module test + run: | + npm run test:esm + license: name: License check runs-on: ubuntu-latest @@ -110,3 +114,7 @@ jobs: - name: Unit test run: | bun run test:unit-bun + + - name: ECMAScript module test + run: | + bun run test:esm diff --git a/package.json b/package.json index 133e29116..be1048c4d 100644 --- a/package.json +++ b/package.json @@ -6,12 +6,15 @@ "main": "./index.js", "types": "index.d.ts", "exports": { - "require": "./index.js" + "require": "./index.js", + "import": "./index.js", + "types": "./index.d.ts" }, "scripts": { "test": "npm run build && npm run lint && tap", "test:unit": "npm run build && tap", "test:unit-bun": "bun run build && bunx tap", + "test:esm": "npm run build && cd test/esm/ && npm install && node test-import.mjs", "test:coverage-100": "npm run build && tap --coverage --100", "test:coverage-report": "npm run build && tap --coverage && nyc report --reporter=text-lcov > coverage.lcov", "test:coverage-ui": "npm run build && tap --coverage --coverage-report=html", diff --git a/test/esm/package.json b/test/esm/package.json new file mode 100644 index 000000000..5209563e8 --- /dev/null +++ b/test/esm/package.json @@ -0,0 +1,7 @@ +{ + "name": "esm", + "version": "1.0.0", + "dependencies": { + "@elastic/elasticsearch": "file:../.." + } +} diff --git a/test/esm/test-import.mjs b/test/esm/test-import.mjs new file mode 100644 index 000000000..f7a6f09e6 --- /dev/null +++ b/test/esm/test-import.mjs @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { Client } from '@elastic/elasticsearch' + +new Client({ + node: 'http://localhost:9200/', + auth: { + username: 'elastic', + password: 'changeme', + } +}) From a30c3dca2d0ab0fc36f2845ac5b7545d3c005f1c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 18 Nov 2024 13:32:52 -0600 Subject: [PATCH 425/647] Add changelog for 8.16.1 (#2479) --- docs/changelog.asciidoc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 0f45de6c6..684829494 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,17 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.16.1 + +[discrete] +==== Fixes + +[discrete] +===== Fix ECMAScript imports + +Fixed package configuration to correctly support native ECMAScript `import` syntax. + [discrete] === 8.16.0 From fbbbece711c3b9fb35c0401f9905da554c399788 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 20 Nov 2024 14:43:36 -0600 Subject: [PATCH 426/647] Add docstrings for Client class and related properties (#2484) --- src/client.ts | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index df0fa4c1a..f65baf0ad 100644 --- a/src/client.ts +++ b/src/client.ts @@ -69,11 +69,16 @@ if (transportVersion.includes('-')) { const nodeVersion = process.versions.node export interface NodeOptions { + /** @property url Elasticsearch node's location */ url: URL id?: string + /** @property agent Custom HTTP agent options */ agent?: HttpAgentOptions | UndiciAgentOptions + /** @property ssl Overrides default TLS connection settings */ ssl?: TlsConnectionOptions + /** @property headers Custom HTTP headers that should be sent with each request */ headers?: Record + /** @property roles Common Elasticsearch roles that can be assigned to this node. Can be helpful when writing custom nodeFilter or nodeSelector functions. */ roles?: { master: boolean data: boolean @@ -83,40 +88,110 @@ export interface NodeOptions { } export interface ClientOptions { + /** @property node Elasticsearch node settings, if there is only one node. Required if `nodes` or `cloud` is not set. */ node?: string | string[] | NodeOptions | NodeOptions[] + /** @property nodes Elasticsearch node settings, if there are multiple nodes. Required if `node` or `cloud` is not set. */ nodes?: string | string[] | NodeOptions | NodeOptions[] + /** @property Connection HTTP connection class to use + * @defaultValue `UndiciConnection` */ Connection?: typeof BaseConnection + /** @property ConnectionPool HTTP connection pool class to use + * @defaultValue `CloudConnectionPool`, if connecting to Elastic Cloud, otherwise `WeightedConnectionPool` */ ConnectionPool?: typeof BaseConnectionPool + /** @property Transport Elastic transport class to use + * @defaultValue `Transport` */ Transport?: typeof Transport + /** @property Serializer Serialization class to use + * @defaultValue `Serializer` */ Serializer?: typeof Serializer + /** @property maxRetries Max number of retries for each request + * @defaultValue 3 */ maxRetries?: number + /** @property requestTimeout Max request timeout in milliseconds for each request + * @defaultValue 30000 */ requestTimeout?: number + /** @property pingTimeout Max number of milliseconds a `ClusterConnectionPool` will wait when pinging nodes before marking them dead + * @defaultValue 3000 */ pingTimeout?: number + /** @property sniffInterval Perform a sniff operation every `n` milliseconds + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue */ sniffInterval?: number | boolean + /** @property sniffOnStart Perform a sniff once the client is started + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue false */ sniffOnStart?: boolean + /** @property sniffEndpoint Endpoint to ping during a sniff + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue "_nodes/_all/http" */ sniffEndpoint?: string + /** @property sniffOnConnectionFault Perform a sniff on connection fault + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue false */ sniffOnConnectionFault?: boolean + /** @property resurrectStrategy Strategy for resurrecting dead nodes when using `ClusterConnectionPool`. 'ping' will issue a test request to a node and resurrect it if it responds. 'optimistic' marks a node as alive without testing it. 'none` will never attempt to revive a dead connection. + * @defaultValue 'ping' */ resurrectStrategy?: 'ping' | 'optimistic' | 'none' + /** @property compression Enables gzip request body compression + * @defaultValue `true` if connecting to Elastic Cloud, otherwise `false`. */ compression?: boolean + /** @property tls [TLS configuraton](https://nodejs.org/api/tls.html) + * @defaultValue null */ tls?: TlsConnectionOptions + /** @property agent Custom HTTP agent options + * @defaultValue null */ agent?: HttpAgentOptions | UndiciAgentOptions | agentFn | false + /** @property nodeFilter A custom function used by the connection pool to determine which nodes are qualified to receive a request + * @defaultValue () => true */ nodeFilter?: nodeFilterFn + /** @property nodeSelector A custom function used by the connection pool to determine which node should receive the next request + * @defaultValue A "round robin" function that loops sequentially through each node in the pool. */ nodeSelector?: nodeSelectorFn + /** @property headers Custom HTTP headers that should be sent with each request + * @defaultValue An object with a custom `user-agent` header */ headers?: Record + /** @property opaqueIdPrefix A string prefix to apply to every generated X-Opaque-Id header + * @defaultValue null */ opaqueIdPrefix?: string + /** @property generateRequestId A custom function for generating unique IDs for each request, to make it easier to associate each API request to a single response + * @defaultValue A function that increments a number counter starting from 1 */ generateRequestId?: generateRequestIdFn + /** @property name A name for this client + * @defaultValue 'elasticsearch-js' */ name?: string | symbol + /** @property auth Authentication options for this Elasticsearch cluster + * @defaultValue null */ auth?: BasicAuth | ApiKeyAuth | BearerAuth + /** @property context A custom object attached to each request that can be used to pass data to client events + * @defaultValue null */ context?: Context + /** @property proxy A proxy URL that, when provided, the client will automatically send all requests through + * @defaultValue null */ proxy?: string | URL + /** @property enableMetaHeader If true, adds an header named `x-elastic-client-meta`, containing a small amount of high-level telemetry data, such as the client and platform version + * @defaultValue true */ enableMetaHeader?: boolean + /** @property cloud Custom configuration for connecting to Elastic Cloud, in lieu of a `node` or `nodes` configuration + * @remarks Read https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage for more details + * @defaultValue null */ cloud?: { id: string } + /** @property disablePrototypePoisoningProtection Disables safe JSON parsing that protects execution of prototype poisoning attacks; disabled by default, as it can introduce a performance penalty + * @defaultValue true */ disablePrototypePoisoningProtection?: boolean | 'proto' | 'constructor' + /** @property caFingerprint If configured, verifies that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint; only accepts SHA256 digest fingerprints + * @defaultValue null */ caFingerprint?: string + /** @property maxResponseSize When configured, verifies that the uncompressed response size is lower than the configured number. If it's higher, it will abort the request. It cannot be higher than `buffer.constants.MAX_STRING_LENGTH` + * @defaultValue null */ maxResponseSize?: number + /** @property maxCompressedResponseSize When configured, verifies that the compressed response size is lower than the configured number. If it's higher, it will abort the request. It cannot be higher than `buffer.constants.MAX_LENGTH` + * @defaultValue null */ maxCompressedResponseSize?: number + /** @property redaction Options for how to redact potentially sensitive data from metadata attached to `Error` objects + * @remarks Read https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/advanced-config.html#redaction for more details + * @defaultValue Configuration that will replace known sources of sensitive data */ redaction?: RedactionOptions } @@ -127,6 +202,7 @@ export default class Client extends API { transport: SniffingTransport serializer: Serializer helpers: Helpers + constructor (opts: ClientOptions) { super() // @ts-expect-error kChild symbol is for internal use only @@ -139,7 +215,7 @@ export default class Client extends API { opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` - // Cloud has better performances with compression enabled + // Cloud has better performance with compression enabled // see https://github.com/elastic/elasticsearch-py/pull/704. // So unless the user specifies otherwise, we enable compression. if (opts.compression == null) opts.compression = true @@ -323,6 +399,10 @@ export default class Client extends API { }) } + /** + * Creates a child client instance that shared its connection pool with the parent client + * @see {@link https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html} + */ child (opts: ClientOptions): Client { // Merge the new options with the initial ones // @ts-expect-error kChild symbol is for internal use only @@ -344,6 +424,9 @@ export default class Client extends API { return new Client(options) } + /** + * Closes all connections in the connection pool. Connections shared with any parent or child instances will also be closed. + */ async close (): Promise { return await this.connectionPool.empty() } From 26ae2600585d071eb1ba9b1598a441ab02d0295c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Nov 2024 10:14:17 -0600 Subject: [PATCH 427/647] Ignore tap artifacts (#2487) --- .dockerignore | 1 + .gitignore | 1 + .npmignore | 1 + 3 files changed, 3 insertions(+) diff --git a/.dockerignore b/.dockerignore index e34f9ff27..a448fae9c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,3 +5,4 @@ elasticsearch .git lib junit-output +.tap diff --git a/.gitignore b/.gitignore index 99b15ab2f..adec49623 100644 --- a/.gitignore +++ b/.gitignore @@ -67,3 +67,4 @@ junit-output bun.lockb test-results processinfo +.tap diff --git a/.npmignore b/.npmignore index 08c1043f8..8a921bbd6 100644 --- a/.npmignore +++ b/.npmignore @@ -73,3 +73,4 @@ CONTRIBUTING.md src bun.lockb +.tap From 6009fab7fe70d278e1548b7922ff0f2e6b44ca6b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 21 Nov 2024 10:40:58 -0600 Subject: [PATCH 428/647] Update changelog to include 8.16.2 and 8.15.3 (#2490) --- docs/changelog.asciidoc | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 684829494..86998f309 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,22 @@ [[changelog-client]] == Release notes +[discrete] +=== 8.16.2 + +[discrete] +==== Fixes + +[discrete] +===== Improved support for Elasticsearch `v8.16` + +Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. + +[discrete] +===== Drop testing artifacts from npm package + +Tap, the unit testing tool used by this project, was recently upgraded and started writing to a `.tap` directory. Since tests are run prior to an `npm publish` in CI, this directory was being included in the published package and bloating its size. + [discrete] === 8.16.1 @@ -37,11 +53,27 @@ The ES|QL helper can now return results as an Apache Arrow `Table` or `RecordBat The client's `disablePrototypePoisoningProtection` option was set to `true` by default, but when it was set to any other value it was ignored, making it impossible to enable prototype poisoning protection without providing a custom serializer implementation. +[discrete] +=== 8.15.3 + +[discrete] +==== Fixes + +[discrete] +===== Improved support for Elasticsearch `v8.15` + +Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. + +[discrete] +===== Drop testing artifacts from npm package + +Tap, the unit testing tool, was recently upgraded and started writing to a `.tap` directory. Since tests are run prior to an `npm publish` in CI, this directory was being included in the published package and bloating its size. + [discrete] === 8.15.2 [discrete] -==== Features +==== Fixes [discrete] ===== Improved support for Elasticsearch `v8.15` @@ -52,7 +84,7 @@ Updated TypeScript types based on fixes and improvements to the Elasticsearch sp === 8.15.1 [discrete] -==== Features +==== Fixes [discrete] ===== Improved support for Elasticsearch `v8.15` From 86f488f68f39a8037f83a728be3fc500cf0cb135 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 12:03:41 -0600 Subject: [PATCH 429/647] Update dependency @types/node to v22.10.1 (#2499) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index be1048c4d..268b95600 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.9.0", + "@types/node": "22.10.1", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 5cb670256ec4daedcc8cbbe41b8046e19495f0cd Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 18:07:44 +0000 Subject: [PATCH 430/647] Update dependency typescript to v5.7.2 (#2506) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Co-authored-by: Josh Mock --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 268b95600..dd3e9f879 100644 --- a/package.json +++ b/package.json @@ -84,7 +84,7 @@ "tap": "21.0.1", "ts-node": "10.9.2", "ts-standard": "12.0.2", - "typescript": "5.6.3", + "typescript": "5.7.2", "workq": "3.0.0", "xmlbuilder2": "3.1.1", "zx": "7.2.3" From c1e90b12f00281645f6c1dc95755b4eca5c3dd91 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 18:12:01 +0000 Subject: [PATCH 431/647] Update dependency @elastic/request-converter to v8.16.2 (#2505) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index dd3e9f879..4894fefb3 100644 --- a/package.json +++ b/package.json @@ -56,7 +56,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "8.16.1", + "@elastic/request-converter": "8.16.2", "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "4.1.12", "@types/ms": "0.7.34", From ec0c561e36e392f27aac99b84bfbe3745bb8b03d Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 2 Dec 2024 18:12:27 +0000 Subject: [PATCH 432/647] Auto-generated code for main (#2504) Co-authored-by: Josh Mock --- .../015e6e6132b6d6d44bddb06bc3b316ed.asciidoc | 46 ++++++ .../0165d22da5f2fc7678392b31d8eb5566.asciidoc | 18 ++ .../0bc6155e0c88062a4d8490da49db3aa8.asciidoc | 49 ++++++ .../0d689ac6e78be5d438f9b5d441be2b44.asciidoc | 57 +++++++ ...17b1647c8509543f2388c886f2584a20.asciidoc} | 2 +- ...22b176a184517cf1b5801f5eb4f17f97.asciidoc} | 4 +- .../2577acb462b95bd4394523cf2f8a661f.asciidoc | 28 ---- .../2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc | 16 -- .../30d051f534aeb884176eedb2c11dac85.asciidoc | 23 +++ ...4edfb5934d14ad7655bd7e19a112b5c0.asciidoc} | 10 +- ...5f16358ebb5d14b86f57612d5f92d923.asciidoc} | 1 - ...68d7f7d4d268ee98caead5aef19933d6.asciidoc} | 2 +- .../76e02434835630cb830724beb92df354.asciidoc | 44 +++++ .../78043831fd32004a82930c8ac8a1d809.asciidoc | 46 ++++++ .../79d206a528be704050a437adce2496dd.asciidoc | 23 +++ ...7a2fdfd7b0553d63440af7598f9ad867.asciidoc} | 2 +- ...8a0b5f759de3f27f0801c1176e616117.asciidoc} | 1 - .../9313f534e1aa266cde7d4af74665497f.asciidoc | 13 ++ .../96e88611f99e6834bd64b58dc8a282c1.asciidoc | 18 ++ .../97c6c07f46f4177f0565a04bc50924a3.asciidoc | 37 +++++ .../a9f14efc26fdd3c37a71f06c310163d9.asciidoc | 27 +++ .../ac22cc2b0f4ad659055feed2852a2d59.asciidoc | 37 +++++ ...ad9889fd8a4b5930e312a51f3bc996dc.asciidoc} | 2 +- .../ae3473adaf1515afcf7773f26c018e5c.asciidoc | 14 ++ .../bb2ba5d1885f87506f90dbb002e518f4.asciidoc | 45 +++++ .../bee3fda7bb07086243424b62e5b16ca7.asciidoc | 83 ++++++++++ .../d01a590fa9ea8a0cb34ed8dda502296c.asciidoc | 11 -- .../d4158d486e7fee2702a14068b69e3b33.asciidoc | 154 ++++++++++++++++++ .../e22a1da3c622611be6855e534c0709ae.asciidoc | 16 ++ .../e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc | 44 +++++ .../ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc | 94 +++++++++++ docs/reference.asciidoc | 29 +++- src/api/types.ts | 65 ++++++-- src/api/typesWithBodyKey.ts | 65 ++++++-- 34 files changed, 1024 insertions(+), 102 deletions(-) create mode 100644 docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc create mode 100644 docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc create mode 100644 docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc create mode 100644 docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc rename docs/doc_examples/{c02c2916b97b6fa7db82dbc7f0378310.asciidoc => 17b1647c8509543f2388c886f2584a20.asciidoc} (92%) rename docs/doc_examples/{4dab4c5168047ba596af1beb0e55b845.asciidoc => 22b176a184517cf1b5801f5eb4f17f97.asciidoc} (69%) delete mode 100644 docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc delete mode 100644 docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc create mode 100644 docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc rename docs/doc_examples/{191074b2eebd5f74e628c2ada4b6d2e4.asciidoc => 4edfb5934d14ad7655bd7e19a112b5c0.asciidoc} (95%) rename docs/doc_examples/{19f1f9f25933f8e7aba59a10881c648b.asciidoc => 5f16358ebb5d14b86f57612d5f92d923.asciidoc} (89%) rename docs/doc_examples/{2fd458d37aab509fe2d970c0b6e2a10f.asciidoc => 68d7f7d4d268ee98caead5aef19933d6.asciidoc} (97%) create mode 100644 docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc create mode 100644 docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc create mode 100644 docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc rename docs/doc_examples/{58f6b72009512851843c7b7a20e9504a.asciidoc => 7a2fdfd7b0553d63440af7598f9ad867.asciidoc} (94%) rename docs/doc_examples/{c8fa8d7e029792d539464fede18ce258.asciidoc => 8a0b5f759de3f27f0801c1176e616117.asciidoc} (89%) create mode 100644 docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc create mode 100644 docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc create mode 100644 docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc create mode 100644 docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc create mode 100644 docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc rename docs/doc_examples/{7b9691bd34a02dd859562eb927f175e0.asciidoc => ad9889fd8a4b5930e312a51f3bc996dc.asciidoc} (93%) create mode 100644 docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc create mode 100644 docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc create mode 100644 docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc delete mode 100644 docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc create mode 100644 docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc create mode 100644 docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc create mode 100644 docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc create mode 100644 docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc diff --git a/docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc b/docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc new file mode 100644 index 000000000..dc90ae673 --- /dev/null +++ b/docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + range: { + year: { + gt: 2023, + }, + }, + }, + }, + }, + { + standard: { + query: { + term: { + topic: "elastic", + }, + }, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, + aggs: { + topics: { + terms: { + field: "topic", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc b/docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc new file mode 100644 index 000000000..279e91656 --- /dev/null +++ b/docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-rerank-model", + inference_config: { + service: "cohere", + service_settings: { + model_id: "rerank-english-v3.0", + api_key: "{{COHERE_API_KEY}}", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc b/docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc new file mode 100644 index 000000000..01200be80 --- /dev/null +++ b/docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example_nested", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + nested: { + path: "nested_field", + inner_hits: { + name: "nested_vector", + _source: false, + fields: ["nested_field.paragraph_id"], + }, + query: { + knn: { + field: "nested_field.nested_vector", + query_vector: [1, 0, 0.5], + k: 10, + }, + }, + }, + }, + }, + }, + { + standard: { + query: { + term: { + topic: "ai", + }, + }, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: ["topic"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc b/docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc new file mode 100644 index 000000000..c95b502ca --- /dev/null +++ b/docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + topic: "elastic", + }, + }, + }, + }, + { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: + "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, + size: 1, + explain: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc b/docs/doc_examples/17b1647c8509543f2388c886f2584a20.asciidoc similarity index 92% rename from docs/doc_examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc rename to docs/doc_examples/17b1647c8509543f2388c886f2584a20.asciidoc index df440d14e..e1f4fbd3c 100644 --- a/docs/doc_examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc +++ b/docs/doc_examples/17b1647c8509543f2388c886f2584a20.asciidoc @@ -16,7 +16,7 @@ const response = await client.search({ }, }, field: "text", - inference_id: "my-cohere-rerank-model", + inference_id: "elastic-rerank", inference_text: "How often does the moon hide the sun?", rank_window_size: 100, min_score: 0.5, diff --git a/docs/doc_examples/4dab4c5168047ba596af1beb0e55b845.asciidoc b/docs/doc_examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc similarity index 69% rename from docs/doc_examples/4dab4c5168047ba596af1beb0e55b845.asciidoc rename to docs/doc_examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc index 0ba906074..a0c450d19 100644 --- a/docs/doc_examples/4dab4c5168047ba596af1beb0e55b845.asciidoc +++ b/docs/doc_examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc @@ -3,8 +3,8 @@ [source, js] ---- -const response = await client.cluster.getSettings({ - flat_settings: "true", +const response = await client.indices.rollover({ + alias: "datastream", }); console.log(response); ---- diff --git a/docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc b/docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc deleted file mode 100644 index 9c0aff110..000000000 --- a/docs/doc_examples/2577acb462b95bd4394523cf2f8a661f.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.esql.query({ - format: "txt", - query: - "\n FROM library\n | SORT page_count DESC\n | KEEP name, author\n | LOOKUP era ON author\n | LIMIT 5\n ", - tables: { - era: { - author: { - keyword: [ - "Frank Herbert", - "Peter F. Hamilton", - "Vernor Vinge", - "Alastair Reynolds", - "James S.A. Corey", - ], - }, - era: { - keyword: ["The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"], - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc b/docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc deleted file mode 100644 index 5317e039e..000000000 --- a/docs/doc_examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.cluster.putSettings({ - persistent: { - "cluster.indices.close.enable": false, - "indices.recovery.max_bytes_per_sec": "50mb", - }, - transient: { - "*": null, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc b/docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc new file mode 100644 index 000000000..0ea31b07a --- /dev/null +++ b/docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-elastic-rerank", + inference_config: { + service: "elasticsearch", + service_settings: { + model_id: ".rerank-v1", + num_threads: 1, + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 1, + max_number_of_allocations: 4, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc b/docs/doc_examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc similarity index 95% rename from docs/doc_examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc rename to docs/doc_examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc index 9b24f99c0..3bce99ecc 100644 --- a/docs/doc_examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc +++ b/docs/doc_examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc @@ -8,11 +8,6 @@ const response = await client.search({ query: { bool: { must: [ - { - term: { - "category.keyword": "Main Course", - }, - }, { term: { tags: "vegetarian", @@ -27,6 +22,11 @@ const response = await client.search({ }, ], should: [ + { + term: { + category: "Main Course", + }, + }, { multi_match: { query: "curry spicy", diff --git a/docs/doc_examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc b/docs/doc_examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc similarity index 89% rename from docs/doc_examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc rename to docs/doc_examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc index 1d9708b3c..454dd9502 100644 --- a/docs/doc_examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc +++ b/docs/doc_examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc @@ -9,7 +9,6 @@ const response = await client.indices.create({ properties: { inference_field: { type: "semantic_text", - inference_id: "my-elser-endpoint", }, }, }, diff --git a/docs/doc_examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc b/docs/doc_examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc similarity index 97% rename from docs/doc_examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc rename to docs/doc_examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc index 2f15a5eb4..021aa7e19 100644 --- a/docs/doc_examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc +++ b/docs/doc_examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc @@ -45,7 +45,7 @@ console.log(response); const response1 = await client.indices.putIndexTemplate({ name: 2, - index_patterns: ["k8s*"], + index_patterns: ["k9s*"], composed_of: ["destination_template"], data_stream: {}, }); diff --git a/docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc b/docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc new file mode 100644 index 000000000..ab4d1fc80 --- /dev/null +++ b/docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + { + text_similarity_reranker: { + retriever: { + standard: { + query: { + term: { + topic: "ai", + }, + }, + }, + }, + field: "text", + inference_id: "my-rerank-model", + inference_text: + "Can I use generative AI to identify user intent and improve search relevance?", + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc b/docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc new file mode 100644 index 000000000..5151bb769 --- /dev/null +++ b/docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + text_similarity_reranker: { + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: + "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + field: "text", + inference_id: "my-rerank-model", + inference_text: + "What are the state of the art applications of AI in information retrieval?", + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc b/docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc new file mode 100644 index 000000000..60583c320 --- /dev/null +++ b/docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-elastic-rerank", + inference_config: { + service: "elasticsearch", + service_settings: { + model_id: ".rerank-v1", + num_threads: 1, + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 1, + max_number_of_allocations: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/58f6b72009512851843c7b7a20e9504a.asciidoc b/docs/doc_examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc similarity index 94% rename from docs/doc_examples/58f6b72009512851843c7b7a20e9504a.asciidoc rename to docs/doc_examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc index ab21c2d80..63fb1f69a 100644 --- a/docs/doc_examples/58f6b72009512851843c7b7a20e9504a.asciidoc +++ b/docs/doc_examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc @@ -4,7 +4,7 @@ [source, js] ---- const response = await client.indices.create({ - index: "my-index-000002", + index: "my-index-000003", mappings: { properties: { inference_field: { diff --git a/docs/doc_examples/c8fa8d7e029792d539464fede18ce258.asciidoc b/docs/doc_examples/8a0b5f759de3f27f0801c1176e616117.asciidoc similarity index 89% rename from docs/doc_examples/c8fa8d7e029792d539464fede18ce258.asciidoc rename to docs/doc_examples/8a0b5f759de3f27f0801c1176e616117.asciidoc index 598fd1f7b..5ac85568d 100644 --- a/docs/doc_examples/c8fa8d7e029792d539464fede18ce258.asciidoc +++ b/docs/doc_examples/8a0b5f759de3f27f0801c1176e616117.asciidoc @@ -9,7 +9,6 @@ const response = await client.indices.create({ properties: { content: { type: "semantic_text", - inference_id: "my-elser-endpoint", }, }, }, diff --git a/docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc b/docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc new file mode 100644 index 000000000..86e737ce5 --- /dev/null +++ b/docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-{service-name-stub}-connector", + index_name: "my-elasticsearch-index", + name: "Content synced from {service-name}", + service_type: "{service-name-stub}", +}); +console.log(response); +---- diff --git a/docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc b/docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc new file mode 100644 index 000000000..d3786611f --- /dev/null +++ b/docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + inference_field: { + type: "semantic_text", + inference_id: "my-openai-endpoint", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc b/docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc new file mode 100644 index 000000000..ae96b5501 --- /dev/null +++ b/docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc b/docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc new file mode 100644 index 000000000..488fb5205 --- /dev/null +++ b/docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match: { + text: "How often does the moon hide the sun?", + }, + }, + }, + }, + field: "text", + inference_id: "my-elastic-rerank", + inference_text: "How often does the moon hide the sun?", + rank_window_size: 100, + min_score: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc b/docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc new file mode 100644 index 000000000..0e3921a75 --- /dev/null +++ b/docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + text_similarity_reranker: { + retriever: { + text_similarity_reranker: { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + rank_window_size: 100, + field: "text", + inference_id: "my-rerank-model", + inference_text: + "What are the state of the art applications of AI in information retrieval?", + }, + }, + rank_window_size: 10, + field: "text", + inference_id: "my-other-more-expensive-rerank-model", + inference_text: + "Applications of Large Language Models in technology and their impact on user satisfaction", + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b9691bd34a02dd859562eb927f175e0.asciidoc b/docs/doc_examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc similarity index 93% rename from docs/doc_examples/7b9691bd34a02dd859562eb927f175e0.asciidoc rename to docs/doc_examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc index 847cbc4b8..ca15245b8 100644 --- a/docs/doc_examples/7b9691bd34a02dd859562eb927f175e0.asciidoc +++ b/docs/doc_examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc @@ -12,7 +12,7 @@ const response = await client.inference.put({ adaptive_allocations: { enabled: true, min_number_of_allocations: 1, - max_number_of_allocations: 10, + max_number_of_allocations: 4, }, num_threads: 1, model_id: ".elser_model_2", diff --git a/docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc b/docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc new file mode 100644 index 000000000..67a3d5d75 --- /dev/null +++ b/docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-{service-name-stub}-connector", + index_name: "my-elasticsearch-index", + name: "Content synced from {service-name}", + service_type: "{service-name-stub}", + is_native: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc b/docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc new file mode 100644 index 000000000..ac5f2bf5b --- /dev/null +++ b/docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + highlight: { + fields: { + text: { + fragment_size: 150, + number_of_fragments: 3, + }, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc b/docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc new file mode 100644 index 000000000..9ab13275d --- /dev/null +++ b/docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc @@ -0,0 +1,83 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "retrievers_example_nested", + mappings: { + properties: { + nested_field: { + type: "nested", + properties: { + paragraph_id: { + type: "keyword", + }, + nested_vector: { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + index: true, + }, + }, + }, + topic: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "retrievers_example_nested", + id: 1, + document: { + nested_field: [ + { + paragraph_id: "1a", + nested_vector: [-1.12, -0.59, 0.78], + }, + { + paragraph_id: "1b", + nested_vector: [-0.12, 1.56, 0.42], + }, + { + paragraph_id: "1c", + nested_vector: [1, -1, 0], + }, + ], + topic: ["ai"], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "retrievers_example_nested", + id: 2, + document: { + nested_field: [ + { + paragraph_id: "2a", + nested_vector: [0.23, 1.24, 0.65], + }, + ], + topic: ["information_retrieval"], + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "retrievers_example_nested", + id: 3, + document: { + topic: ["ai"], + }, +}); +console.log(response3); + +const response4 = await client.indices.refresh({ + index: "retrievers_example_nested", +}); +console.log(response4); +---- diff --git a/docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc b/docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc deleted file mode 100644 index ec1e35374..000000000 --- a/docs/doc_examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.cluster.getSettings({ - flat_settings: "true", - filter_path: "transient", -}); -console.log(response); ----- diff --git a/docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc b/docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc new file mode 100644 index 000000000..ccee5c776 --- /dev/null +++ b/docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc @@ -0,0 +1,154 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "datastream_template", + index_patterns: ["datastream*"], + data_stream: {}, + template: { + lifecycle: { + downsampling: [ + { + after: "1m", + fixed_interval: "1h", + }, + ], + }, + settings: { + index: { + mode: "time_series", + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + kubernetes: { + properties: { + container: { + properties: { + cpu: { + properties: { + usage: { + properties: { + core: { + properties: { + ns: { + type: "long", + }, + }, + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + nanocores: { + type: "long", + time_series_metric: "gauge", + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + }, + }, + memory: { + properties: { + available: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + majorpagefaults: { + type: "long", + }, + pagefaults: { + type: "long", + time_series_metric: "gauge", + }, + rss: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + usage: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + workingset: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + }, + }, + name: { + type: "keyword", + }, + start_time: { + type: "date", + }, + }, + }, + host: { + type: "keyword", + time_series_dimension: true, + }, + namespace: { + type: "keyword", + time_series_dimension: true, + }, + node: { + type: "keyword", + time_series_dimension: true, + }, + pod: { + type: "keyword", + time_series_dimension: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc b/docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc new file mode 100644 index 000000000..26c4b9b0a --- /dev/null +++ b/docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_query_rules/my-ruleset/_test", + body: { + match_criteria: { + query_string: "puggles", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc b/docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc new file mode 100644 index 000000000..a0bffb528 --- /dev/null +++ b/docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + collapse: { + field: "year", + inner_hits: { + name: "topic related documents", + _source: ["year"], + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc b/docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc new file mode 100644 index 000000000..abb8e0b60 --- /dev/null +++ b/docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc @@ -0,0 +1,94 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "retrievers_example", + mappings: { + properties: { + vector: { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + index: true, + }, + text: { + type: "text", + }, + year: { + type: "integer", + }, + topic: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "retrievers_example", + id: 1, + document: { + vector: [0.23, 0.67, 0.89], + text: "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", + year: 2024, + topic: ["llm", "ai", "information_retrieval"], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "retrievers_example", + id: 2, + document: { + vector: [0.12, 0.56, 0.78], + text: "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", + year: 2023, + topic: ["ai", "medicine"], + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "retrievers_example", + id: 3, + document: { + vector: [0.45, 0.32, 0.91], + text: "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", + year: 2024, + topic: ["ai", "security"], + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "retrievers_example", + id: 4, + document: { + vector: [0.34, 0.21, 0.98], + text: "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", + year: 2023, + topic: ["ai", "elastic", "assistant"], + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "retrievers_example", + id: 5, + document: { + vector: [0.11, 0.65, 0.47], + text: "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", + year: 2024, + topic: ["documentation", "observability", "elastic"], + }, +}); +console.log(response5); + +const response6 = await client.indices.refresh({ + index: "retrievers_example", +}); +console.log(response6); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index bab82b7fb..6f315c485 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -818,6 +818,8 @@ Random by default. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`allow_partial_search_results` (Optional, boolean)*: If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. +If `true`, the point in time will contain all the shards that are available at the time of the request. [discrete] === ping @@ -1558,6 +1560,8 @@ client.asyncSearch.status({ id }) * *Request (object):* ** *`id` (string)*: A unique identifier for the async search. +** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. [discrete] ==== submit @@ -1659,14 +1663,13 @@ A partial reduction is performed every time the coordinating node has received a ** *`pre_filter_shard_size` (Optional, number)*: The default value cannot be changed, which enforces the execution of a pre-filter roundtrip to retrieve statistics from each shard so that the ones that surely don’t hold any document matching the query get skipped. ** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true ** *`routing` (Optional, string)*: A list of specific routing values -** *`scroll` (Optional, string | -1 | 0)* ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type ** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. ** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode ** *`suggest_size` (Optional, number)*: How many suggestions to return in response ** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -** *`rest_total_hits_as_int` (Optional, boolean)* +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field ** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field ** *`q` (Optional, string)*: Query in the Lucene query string syntax @@ -1690,6 +1693,9 @@ client.autoscaling.deleteAutoscalingPolicy({ name }) * *Request (object):* ** *`name` (string)*: the name of the autoscaling policy +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_autoscaling_capacity @@ -1711,9 +1717,15 @@ Do not use this information to make autoscaling decisions. {ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] [source,ts] ---- -client.autoscaling.getAutoscalingCapacity() +client.autoscaling.getAutoscalingCapacity({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_autoscaling_policy @@ -1732,6 +1744,8 @@ client.autoscaling.getAutoscalingPolicy({ name }) * *Request (object):* ** *`name` (string)*: the name of the autoscaling policy +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_autoscaling_policy @@ -1751,6 +1765,9 @@ client.autoscaling.putAutoscalingPolicy({ name }) * *Request (object):* ** *`name` (string)*: the name of the autoscaling policy ** *`policy` (Optional, { roles, deciders })* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] === cat @@ -9204,7 +9221,7 @@ client.security.bulkPutRole({ roles }) ==== Arguments * *Request (object):* -** *`roles` (Record)*: A dictionary of role name to RoleDescriptor objects to add or update +** *`roles` (Record)*: A dictionary of role name to RoleDescriptor objects to add or update ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -9361,7 +9378,7 @@ client.security.createApiKey({ ... }) * *Request (object):* ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. ** *`name` (Optional, string)*: Specifies the name for this API key. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -10431,7 +10448,7 @@ client.security.updateApiKey({ id }) * *Request (object):* ** *`id` (string)*: The ID of the API key to update. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. ** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. diff --git a/src/api/types.ts b/src/api/types.ts index 7a92daa1d..d576ad3c0 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -498,6 +498,15 @@ export interface HealthReportDiskIndicatorDetails { nodes_with_unknown_disk_status: long } +export interface HealthReportFileSettingsIndicator extends HealthReportBaseIndicator { + details?: HealthReportFileSettingsIndicatorDetails +} + +export interface HealthReportFileSettingsIndicatorDetails { + failure_streak: long + most_recent_failure: string +} + export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { details?: HealthReportIlmIndicatorDetails } @@ -533,6 +542,7 @@ export interface HealthReportIndicators { ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator shards_capacity?: HealthReportShardsCapacityIndicator + file_settings?: HealthReportFileSettingsIndicator } export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { @@ -884,6 +894,7 @@ export interface OpenPointInTimeRequest extends RequestBase { preference?: string routing?: Routing expand_wildcards?: ExpandWildcards + allow_partial_search_results?: boolean index_filter?: QueryDslQueryContainer } @@ -6663,6 +6674,7 @@ export type AsyncSearchGetResponse + rule_type_counts: Record } export interface QueryRulesListRulesetsRequest extends RequestBase { @@ -17267,21 +17285,26 @@ export interface SecurityAccess { } export interface SecurityApiKey { - creation?: long - expiration?: long id: Id - invalidated?: boolean name: Name - realm?: string + type: SecurityApiKeyType + creation: EpochTime + expiration?: EpochTime + invalidated: boolean + invalidation?: EpochTime + username: Username + realm: string realm_type?: string - username?: Username - profile_uid?: string - metadata?: Metadata + metadata: Metadata role_descriptors?: Record limited_by?: Record[] + access?: SecurityAccess + profile_uid?: string _sort?: SortResults } +export type SecurityApiKeyType = 'rest' | 'cross_cluster' + export interface SecurityApplicationGlobalUserPrivileges { manage: SecurityManageUserPrivileges } @@ -17328,7 +17351,7 @@ export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'crea export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity - names: IndexName[] + names: IndexName | IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean @@ -17355,17 +17378,23 @@ export interface SecurityRemoteClusterPrivileges { export interface SecurityRemoteIndicesPrivileges { clusters: Names field_security?: SecurityFieldSecurity - names: IndexName[] + names: IndexName | IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean } export interface SecurityReplicationAccess { - names: IndexName[] + names: IndexName | IndexName[] allow_restricted_indices?: boolean } +export interface SecurityRestriction { + workflows: SecurityRestrictionWorkflow[] +} + +export type SecurityRestrictionWorkflow = 'search_application_query' | string + export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] @@ -17377,6 +17406,7 @@ export interface SecurityRoleDescriptor { metadata?: Metadata run_as?: string[] description?: string + restriction?: SecurityRestriction transient_metadata?: Record } @@ -17391,6 +17421,7 @@ export interface SecurityRoleDescriptorRead { metadata?: Metadata run_as?: string[] description?: string + restriction?: SecurityRestriction transient_metadata?: Record } @@ -17430,7 +17461,7 @@ export interface SecurityRoleTemplateScript { export interface SecuritySearchAccess { field_security?: SecurityFieldSecurity - names: IndexName[] + names: IndexName | IndexName[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean } @@ -17449,7 +17480,7 @@ export interface SecurityUser { export interface SecurityUserIndicesPrivileges { field_security?: SecurityFieldSecurity[] - names: IndexName[] + names: IndexName | IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery[] allow_restricted_indices: boolean @@ -17493,11 +17524,16 @@ export interface SecurityActivateUserProfileRequest extends RequestBase { export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata +export interface SecurityAuthenticateAuthenticateApiKey { + id: Id + name?: Name +} + export interface SecurityAuthenticateRequest extends RequestBase { } export interface SecurityAuthenticateResponse { - api_key?: SecurityApiKey + api_key?: SecurityAuthenticateAuthenticateApiKey authentication_realm: SecurityRealmInfo email?: string | null full_name?: Name | null @@ -19884,6 +19920,7 @@ export interface WatcherReportingEmailAttachment { export type WatcherResponseContentType = 'json' | 'yaml' | 'text' export interface WatcherScheduleContainer { + timezone?: string cron?: WatcherCronExpression daily?: WatcherDailySchedule hourly?: WatcherHourlySchedule diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 1782c9bbf..516bdefc2 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -519,6 +519,15 @@ export interface HealthReportDiskIndicatorDetails { nodes_with_unknown_disk_status: long } +export interface HealthReportFileSettingsIndicator extends HealthReportBaseIndicator { + details?: HealthReportFileSettingsIndicatorDetails +} + +export interface HealthReportFileSettingsIndicatorDetails { + failure_streak: long + most_recent_failure: string +} + export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { details?: HealthReportIlmIndicatorDetails } @@ -554,6 +563,7 @@ export interface HealthReportIndicators { ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator shards_capacity?: HealthReportShardsCapacityIndicator + file_settings?: HealthReportFileSettingsIndicator } export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { @@ -917,6 +927,7 @@ export interface OpenPointInTimeRequest extends RequestBase { preference?: string routing?: Routing expand_wildcards?: ExpandWildcards + allow_partial_search_results?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { index_filter?: QueryDslQueryContainer @@ -6739,6 +6750,7 @@ export type AsyncSearchGetResponse + rule_type_counts: Record } export interface QueryRulesListRulesetsRequest extends RequestBase { @@ -17645,21 +17663,26 @@ export interface SecurityAccess { } export interface SecurityApiKey { - creation?: long - expiration?: long id: Id - invalidated?: boolean name: Name - realm?: string + type: SecurityApiKeyType + creation: EpochTime + expiration?: EpochTime + invalidated: boolean + invalidation?: EpochTime + username: Username + realm: string realm_type?: string - username?: Username - profile_uid?: string - metadata?: Metadata + metadata: Metadata role_descriptors?: Record limited_by?: Record[] + access?: SecurityAccess + profile_uid?: string _sort?: SortResults } +export type SecurityApiKeyType = 'rest' | 'cross_cluster' + export interface SecurityApplicationGlobalUserPrivileges { manage: SecurityManageUserPrivileges } @@ -17706,7 +17729,7 @@ export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'crea export interface SecurityIndicesPrivileges { field_security?: SecurityFieldSecurity - names: IndexName[] + names: IndexName | IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean @@ -17733,17 +17756,23 @@ export interface SecurityRemoteClusterPrivileges { export interface SecurityRemoteIndicesPrivileges { clusters: Names field_security?: SecurityFieldSecurity - names: IndexName[] + names: IndexName | IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean } export interface SecurityReplicationAccess { - names: IndexName[] + names: IndexName | IndexName[] allow_restricted_indices?: boolean } +export interface SecurityRestriction { + workflows: SecurityRestrictionWorkflow[] +} + +export type SecurityRestrictionWorkflow = 'search_application_query' | string + export interface SecurityRoleDescriptor { cluster?: SecurityClusterPrivilege[] indices?: SecurityIndicesPrivileges[] @@ -17755,6 +17784,7 @@ export interface SecurityRoleDescriptor { metadata?: Metadata run_as?: string[] description?: string + restriction?: SecurityRestriction transient_metadata?: Record } @@ -17769,6 +17799,7 @@ export interface SecurityRoleDescriptorRead { metadata?: Metadata run_as?: string[] description?: string + restriction?: SecurityRestriction transient_metadata?: Record } @@ -17808,7 +17839,7 @@ export interface SecurityRoleTemplateScript { export interface SecuritySearchAccess { field_security?: SecurityFieldSecurity - names: IndexName[] + names: IndexName | IndexName[] query?: SecurityIndicesPrivilegesQuery allow_restricted_indices?: boolean } @@ -17827,7 +17858,7 @@ export interface SecurityUser { export interface SecurityUserIndicesPrivileges { field_security?: SecurityFieldSecurity[] - names: IndexName[] + names: IndexName | IndexName[] privileges: SecurityIndexPrivilege[] query?: SecurityIndicesPrivilegesQuery[] allow_restricted_indices: boolean @@ -17874,11 +17905,16 @@ export interface SecurityActivateUserProfileRequest extends RequestBase { export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata +export interface SecurityAuthenticateAuthenticateApiKey { + id: Id + name?: Name +} + export interface SecurityAuthenticateRequest extends RequestBase { } export interface SecurityAuthenticateResponse { - api_key?: SecurityApiKey + api_key?: SecurityAuthenticateAuthenticateApiKey authentication_realm: SecurityRealmInfo email?: string | null full_name?: Name | null @@ -20389,6 +20425,7 @@ export interface WatcherReportingEmailAttachment { export type WatcherResponseContentType = 'json' | 'yaml' | 'text' export interface WatcherScheduleContainer { + timezone?: string cron?: WatcherCronExpression daily?: WatcherDailySchedule hourly?: WatcherHourlySchedule From 101f34bd5eda3895ce1fd898860de8d7ff7dcf15 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 12:15:23 -0600 Subject: [PATCH 433/647] Update dependency into-stream to v8 (#2507) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 4894fefb3..121639c39 100644 --- a/package.json +++ b/package.json @@ -68,7 +68,7 @@ "chai": "5.1.2", "cross-zip": "4.0.1", "desm": "1.3.1", - "into-stream": "7.0.0", + "into-stream": "8.0.1", "js-yaml": "4.1.0", "license-checker": "25.0.1", "minimist": "1.2.8", From 0f187f47c46d0e57960e16cb957e5d9e0d5c706f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 2 Dec 2024 12:17:45 -0600 Subject: [PATCH 434/647] Disable Dockerfile Node.js upgrades by Renovate (#2509) --- renovate.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/renovate.json b/renovate.json index 604bcf1f3..84a098694 100644 --- a/renovate.json +++ b/renovate.json @@ -15,6 +15,15 @@ "labels": [ "backport 8.x" ] + }, + { + "matchPackageNames": [ + "node" + ], + "matchManagers": [ + "dockerfile" + ], + "enabled": false } ] } From e30e9641315a273d4d566eb864308baeddd5330b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 3 Dec 2024 11:18:16 -0600 Subject: [PATCH 435/647] Update stale rules to keep tracking issues open (#2514) --- .github/stale.yml | 26 -------------------------- .github/workflows/stale.yml | 10 +++++----- 2 files changed, 5 insertions(+), 31 deletions(-) delete mode 100644 .github/stale.yml diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index e4c52677c..000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,26 +0,0 @@ -# Number of days of inactivity before an issue becomes stale -daysUntilStale: 15 - -# Number of days of inactivity before a stale issue is closed -daysUntilClose: 7 - -# Issues with these labels will never be considered stale -exemptLabels: - - "discussion" - - "feature request" - - "bug" - - "todo" - - "good first issue" - -# Label to use when marking an issue as stale -staleLabel: stale - -# Comment to post when marking an issue as stale. Set to `false` to disable -markComment: | - We understand that this might be important for you, but this issue has been automatically marked as stale because it has not had recent activity either from our end or yours. - It will be closed if no further activity occurs, please write a comment if you would like to keep this going. - - Note: in the past months we have built a new client, that has just landed in master. If you want to open an issue or a pr for the legacy client, you should do that in https://github.com/elastic/elasticsearch-js-legacy - -# Comment to post when closing a stale issue. Set to `false` to disable -closeComment: false diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9f982123c..611d3c6ce 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,8 +1,8 @@ --- -name: 'Close stale issues and PRs' +name: "Close stale issues and PRs" on: schedule: - - cron: '30 1 * * *' + - cron: "30 1 * * *" jobs: stale: @@ -14,8 +14,8 @@ jobs: stale-pr-label: stale days-before-stale: 90 days-before-close: 14 - exempt-issue-labels: 'good first issue' + exempt-issue-labels: "good first issue,tracking" close-issue-label: closed-stale close-pr-label: closed-stale - stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days.' - stale-pr-message: 'This pull request is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days.' + stale-issue-message: "This issue is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days." + stale-pr-message: "This pull request is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days." From 15b9ee2f06985c6f2b9e8dc0fc29fd17fc2915ea Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 5 Dec 2024 10:37:46 -0600 Subject: [PATCH 436/647] Codegen for 8.x clients should use the 8.x generator branch (#2515) --- .github/make.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/make.sh b/.github/make.sh index f02663e97..c4d455168 100755 --- a/.github/make.sh +++ b/.github/make.sh @@ -150,7 +150,7 @@ if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x} -u "$(id -u):$(id -g)" \ --volume "$repo:/usr/src/elasticsearch-js" \ --volume /usr/src/elasticsearch-js/node_modules \ - --volume "$(realpath $repo/../elastic-client-generator-js):/usr/src/elastic-client-generator-js" \ + --volume "$(realpath "$repo/../elastic-client-generator-js"):/usr/src/elastic-client-generator-js" \ --env "WORKFLOW=$WORKFLOW" \ --name make-elasticsearch-js \ --rm \ @@ -159,6 +159,14 @@ if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x} node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}" else echo -e "\033[34;1mINFO: Running in CI mode" + + # determine branch to clone + GENERATOR_BRANCH="main" + if [[ "$VERSION" == 8.* ]]; then + GENERATOR_BRANCH="8.x" + fi + echo -e "\033[34;1mINFO: Generator branch: $GENERATOR_BRANCH" + docker run \ --volume "$repo:/usr/src/elasticsearch-js" \ --volume /usr/src/elasticsearch-js/node_modules \ From e9c2f8b0afd09e470a2c550bdb1f1a5e77b1f09a Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 5 Dec 2024 10:40:34 -0600 Subject: [PATCH 437/647] Bump version to 9.0.0-alpha.1 (#2516) * Bump version to 9.0.0-alpha.1 * Update npm publish workflow for 9.0.0 alpha --- .github/workflows/npm-publish.yml | 7 ++++--- package.json | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 68bb353d7..cd8c2caf9 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -23,13 +23,14 @@ jobs: - run: npm install -g npm - run: npm install - run: npm test - - run: npm publish --provenance --access public + - run: npm publish --provenance --access public --tag alpha env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - - run: | + - name: Publish version on GitHub + run: | version=$(jq -r .version package.json) gh release create \ - -n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)" \ + -n "This is a 9.0.0 pre-release alpha. Changes may not be stable." \ --target "$BRANCH_NAME" \ -t "v$version" \ "v$version" diff --git a/package.json b/package.json index 121639c39..68e183f6c 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "8.16.0", - "versionCanary": "8.16.0-canary.0", + "version": "9.0.0-alpha.1", + "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", "types": "index.d.ts", From 6447fc10bfe4563435d5911c7c4dcdc0efe11d1b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 5 Dec 2024 13:03:19 -0600 Subject: [PATCH 438/647] Drop support for body param in helpers and tests (#2521) * Update tests and bulk helper to stop using body param * Update compatible-with content-type header for 9.0 --- src/client.ts | 6 +- src/helpers.ts | 2 +- test/unit/api.test.ts | 123 +-------------------------------- test/unit/helpers/bulk.test.ts | 50 +++++++------- 4 files changed, 31 insertions(+), 150 deletions(-) diff --git a/src/client.ts b/src/client.ts index f65baf0ad..2549a5b37 100644 --- a/src/client.ts +++ b/src/client.ts @@ -383,9 +383,9 @@ export default class Client extends API { maxResponseSize: options.maxResponseSize, maxCompressedResponseSize: options.maxCompressedResponseSize, vendoredHeaders: { - jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=8', - ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', - accept: 'application/vnd.elasticsearch+json; compatible-with=8,text/plain' + jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=9', + ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + accept: 'application/vnd.elasticsearch+json; compatible-with=9,text/plain' }, redaction: options.redaction }) diff --git a/src/helpers.ts b/src/helpers.ts index 5f4357893..0043a8ab5 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -910,7 +910,7 @@ export default class Helpers { function tryBulk (bulkBody: string[], callback: (err: Error | null, bulkBody: string[]) => void): void { if (shouldAbort) return callback(null, []) - client.bulk(Object.assign({}, bulkOptions, { body: bulkBody }), reqOptions as TransportRequestOptionsWithMeta) + client.bulk(Object.assign({}, bulkOptions, { operations: bulkBody }), reqOptions as TransportRequestOptionsWithMeta) .then(response => { const result = response.body const results = zipBulkResults(result.items, bulkBody) diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index 8c9a72cdf..a0f513256 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -22,7 +22,7 @@ import { connection } from '../utils' import { Client } from '../..' import * as T from '../../lib/api/types' -test('Api without body key and top level body', async t => { +test('Api with top level body', async t => { t.plan(2) const Connection = connection.buildMockConnection({ @@ -50,37 +50,7 @@ test('Api without body key and top level body', async t => { t.equal(response.took, 42) }) -test('Api with body key and top level body', async t => { - t.plan(2) - - const Connection = connection.buildMockConnection({ - onRequest (opts) { - // @ts-expect-error - t.same(JSON.parse(opts.body), { query: { match_all: {} } }) - return { - statusCode: 200, - body: { took: 42 } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection - }) - - const response = await client.search({ - index: 'test', - allow_no_indices: true, - body: { - query: { match_all: {} } - } - }) - - t.equal(response.took, 42) -}) - -test('Api without body key and keyed body', async t => { +test('Api with keyed body', async t => { t.plan(2) const Connection = connection.buildMockConnection({ @@ -108,95 +78,6 @@ test('Api without body key and keyed body', async t => { t.equal(response.result, 'created') }) -test('Api with body key and keyed body', async t => { - t.plan(2) - - const Connection = connection.buildMockConnection({ - onRequest (opts) { - // @ts-expect-error - t.same(JSON.parse(opts.body), { foo: 'bar' }) - return { - statusCode: 200, - body: { result: 'created' } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection - }) - - const response = await client.create({ - index: 'test', - id: '1', - body: { foo: 'bar' } - }) - - t.equal(response.result, 'created') -}) - -test('Using the body key should not mutate the body', async t => { - t.plan(2) - - const Connection = connection.buildMockConnection({ - onRequest (opts) { - // @ts-expect-error - t.same(JSON.parse(opts.body), { query: { match_all: {} }, sort: 'foo' }) - return { - statusCode: 200, - body: { took: 42 } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection - }) - - const body = { query: { match_all: {} } } - await client.search({ - index: 'test', - sort: 'foo', - body - }) - - t.same(body, { query: { match_all: {} } }) -}) - -test('Using the body key with a string value', async t => { - t.plan(2) - - const Connection = connection.buildMockConnection({ - onRequest (opts) { - // @ts-expect-error - t.same(JSON.parse(opts.body), { query: { match_all: {} } }) - return { - statusCode: 200, - body: { took: 42 } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection - }) - - try { - const body = { query: { match_all: {} } } - await client.search({ - index: 'test', - // @ts-expect-error - body: JSON.stringify(body) - }) - t.pass('ok!') - } catch (err: any) { - t.fail(err) - } -}) - test('With generic document', async t => { t.plan(1) diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 94db5c5e4..3871c348f 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -58,7 +58,7 @@ test('bulk index', t => { onRequest (params) { t.equal(params.path, '/_bulk') t.match(params.headers, { - 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` }) // @ts-expect-error @@ -104,7 +104,7 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) t.notMatch(params.headers, { 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` }) @@ -150,7 +150,7 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error t.equal(params.body.split('\n').filter(Boolean).length, 6) return { body: { errors: false, items: new Array(3).fill({}) } } @@ -195,7 +195,7 @@ test('bulk index', t => { return { body: { acknowledged: true } } } else { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) @@ -241,7 +241,7 @@ test('bulk index', t => { return { body: { acknowledged: true } } } else { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) @@ -283,7 +283,7 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) @@ -328,7 +328,7 @@ test('bulk index', t => { t.test('Should perform a bulk request (retry)', async t => { async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) let body = '' req.setEncoding('utf8') @@ -446,7 +446,7 @@ test('bulk index', t => { t.test('Should perform a bulk request (failure)', async t => { async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) let body = '' req.setEncoding('utf8') @@ -587,7 +587,7 @@ test('bulk index', t => { t.test('Should abort a bulk request', async t => { async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) let body = '' req.setEncoding('utf8') @@ -724,7 +724,7 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) @@ -815,7 +815,7 @@ test('bulk index', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) @@ -914,7 +914,7 @@ test('bulk index', t => { onRequest (params) { t.equal(params.path, '/_bulk') t.match(params.headers, { - 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` }) // @ts-expect-error @@ -969,7 +969,7 @@ test('bulk create', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { create: { _index: 'test', _id: count } }) @@ -1017,7 +1017,7 @@ test('bulk create', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { create: { _index: 'test', _id: count } }) @@ -1073,7 +1073,7 @@ test('bulk update', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) @@ -1122,7 +1122,7 @@ test('bulk update', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) @@ -1169,7 +1169,7 @@ test('bulk update', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) @@ -1223,7 +1223,7 @@ test('bulk delete', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error t.same(JSON.parse(params.body), { delete: { _index: 'test', _id: count++ } }) return { body: { errors: false, items: [{}] } } @@ -1266,7 +1266,7 @@ test('bulk delete', t => { t.test('Should perform a bulk request (failure)', async t => { async function handler (req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) let body = '' req.setEncoding('utf8') @@ -1469,7 +1469,7 @@ test('transport options', t => { if (params.path === '/_bulk') { t.match(params.headers, { - 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', foo: 'bar' }) return { body: { errors: false, items: [{}] } } @@ -1618,7 +1618,7 @@ test('Flush interval', t => { const MockConnection = connection.buildMockConnection({ onRequest (params) { t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) @@ -1671,7 +1671,7 @@ test('Flush interval', t => { onRequest (params) { t.ok(count < 2) t.equal(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8' }) + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) // @ts-expect-error const [action, payload] = params.body.split('\n') t.same(JSON.parse(action), { index: { _index: 'test' } }) @@ -1730,7 +1730,7 @@ test('Flush interval', t => { onRequest (params) { t.equal(params.path, '/_bulk') t.match(params.headers, { - 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=8', + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` }) // @ts-expect-error @@ -1749,12 +1749,12 @@ test('Flush interval', t => { datasource: dataset.slice(), flushBytes: 1, concurrency: 1, - onDocument (doc) { + onDocument (_doc) { return { index: { _index: 'test' } } }, - onDrop (doc) { + onDrop (_doc) { t.fail('This should never be called') } }) From a4315a905e818f1aaed39cd3f72b11c65f343842 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 5 Dec 2024 19:28:38 +0000 Subject: [PATCH 439/647] Auto-generated code for main (#2522) --- .../0bee07a581c5776e068f6f4efad5a399.asciidoc | 8 +- .../0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc | 5 +- .../16634cfa7916cf4e8048a1d70e6240f2.asciidoc | 2 +- .../19c00c6b29bc7dbc5e92b3668da2da93.asciidoc | 4 +- .../2a1eece9a59ac1773edcf0a932c26de0.asciidoc | 4 +- .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 10 +- .../3f1fe5f5f99b98d0891f38003e10b636.asciidoc | 4 +- .../405511f7c1f12cc0a227b4563fe7b2e2.asciidoc | 6 +- .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 6 +- .../57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc | 4 +- .../5bba213a7f543190139d1a69ab2ed076.asciidoc | 8 +- .../6b6fd0a5942dfb9762ad2790cf421a80.asciidoc | 2 +- .../6f3b723bf6179b96c3413597ed7f49e1.asciidoc | 4 +- .../77518e8c6198acfe77c0934fd2fe65cb.asciidoc | 4 +- .../7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc | 11 + .../80dd7f5882c59b9c1c90e8351937441f.asciidoc | 4 +- .../91e106a2affbc8df32cd940684a779ed.asciidoc | 17 + .../99fb82d49ac477e6a9dfdd71f9465374.asciidoc | 11 + .../9afa0844883b7471883aa378a8dd10b4.asciidoc | 6 +- .../9c01db07c9ac395b6370e3b33965c21f.asciidoc | 4 +- .../a162eb50853331c80596f5994e9d1c38.asciidoc | 5 +- .../b0bddf2ffaa83049b195829c06b875cd.asciidoc | 6 +- .../b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc | 11 +- .../bcdfaa4487747249699a86a0dcd22f5e.asciidoc | 4 +- .../c580990a70028bb49cca8a6bde86bbf6.asciidoc | 4 +- .../ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc | 4 +- .../d35c8cf7a98b3f112e1de8797ec6689d.asciidoc | 4 +- .../d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc | 15 + .../d8c053ee26c1533ce936ec81101d8e1b.asciidoc | 11 + .../dd71b0c9f9197684ff29c61062c55660.asciidoc | 5 +- .../e3019fd5f23458ae49ad9854c97d321c.asciidoc | 4 +- .../e4b38973c74037335378d8480f1ce894.asciidoc | 4 +- .../f6f647eb644a2d236637ff05f833cb73.asciidoc | 4 +- docs/reference.asciidoc | 198 +- src/api/api/async_search.ts | 53 +- src/api/api/autoscaling.ts | 44 +- src/api/api/bulk.ts | 14 +- src/api/api/capabilities.ts | 11 +- src/api/api/cat.ts | 295 +- src/api/api/ccr.ts | 171 +- src/api/api/clear_scroll.ts | 21 +- src/api/api/close_point_in_time.ts | 21 +- src/api/api/cluster.ts | 201 +- src/api/api/connector.ts | 501 +- src/api/api/count.ts | 23 +- src/api/api/create.ts | 14 +- src/api/api/dangling_indices.ts | 31 +- src/api/api/delete.ts | 11 +- src/api/api/delete_by_query.ts | 21 +- src/api/api/delete_by_query_rethrottle.ts | 11 +- src/api/api/delete_script.ts | 11 +- src/api/api/enrich.ts | 63 +- src/api/api/eql.ts | 59 +- src/api/api/esql.ts | 43 +- src/api/api/exists.ts | 11 +- src/api/api/exists_source.ts | 11 +- src/api/api/explain.ts | 21 +- src/api/api/features.ts | 21 +- src/api/api/field_caps.ts | 21 +- src/api/api/fleet.ts | 80 +- src/api/api/get.ts | 11 +- src/api/api/get_script.ts | 11 +- src/api/api/get_script_context.ts | 11 +- src/api/api/get_script_languages.ts | 11 +- src/api/api/get_source.ts | 11 +- src/api/api/graph.ts | 23 +- src/api/api/health_report.ts | 11 +- src/api/api/ilm.ts | 141 +- src/api/api/index.ts | 14 +- src/api/api/indices.ts | 744 +- src/api/api/inference.ts | 64 +- src/api/api/info.ts | 11 +- src/api/api/ingest.ts | 169 +- src/api/api/knn_search.ts | 21 +- src/api/api/license.ts | 81 +- src/api/api/logstash.ts | 34 +- src/api/api/mget.ts | 21 +- src/api/api/migration.ts | 31 +- src/api/api/ml.ts | 1107 +- src/api/api/monitoring.ts | 14 +- src/api/api/msearch.ts | 14 +- src/api/api/msearch_template.ts | 14 +- src/api/api/mtermvectors.ts | 21 +- src/api/api/nodes.ts | 81 +- src/api/api/open_point_in_time.ts | 21 +- src/api/api/ping.ts | 11 +- src/api/api/profiling.ts | 41 +- src/api/api/put_script.ts | 21 +- src/api/api/query_rules.ts | 127 +- src/api/api/rank_eval.ts | 21 +- src/api/api/reindex.ts | 21 +- src/api/api/reindex_rethrottle.ts | 11 +- src/api/api/render_search_template.ts | 21 +- src/api/api/rollup.ts | 101 +- src/api/api/scripts_painless_execute.ts | 21 +- src/api/api/scroll.ts | 21 +- src/api/api/search.ts | 23 +- src/api/api/search_application.ts | 114 +- src/api/api/search_mvt.ts | 21 +- src/api/api/search_shards.ts | 11 +- src/api/api/search_template.ts | 21 +- src/api/api/searchable_snapshots.ts | 51 +- src/api/api/security.ts | 904 +- src/api/api/shutdown.ts | 41 +- src/api/api/simulate.ts | 11 +- src/api/api/slm.ts | 101 +- src/api/api/snapshot.ts | 164 +- src/api/api/sql.ts | 103 +- src/api/api/ssl.ts | 11 +- src/api/api/synonyms.ts | 105 +- src/api/api/tasks.ts | 31 +- src/api/api/terms_enum.ts | 21 +- src/api/api/termvectors.ts | 21 +- src/api/api/text_structure.ts | 54 +- src/api/api/transform.ts | 151 +- src/api/api/update.ts | 21 +- src/api/api/update_by_query.ts | 21 +- src/api/api/update_by_query_rethrottle.ts | 11 +- src/api/api/watcher.ts | 161 +- src/api/api/xpack.ts | 21 +- src/api/types.ts | 14 +- src/api/typesWithBodyKey.ts | 21298 ---------------- 122 files changed, 3130 insertions(+), 25672 deletions(-) create mode 100644 docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc create mode 100644 docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc create mode 100644 docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc create mode 100644 docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc create mode 100644 docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc delete mode 100644 src/api/typesWithBodyKey.ts diff --git a/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc index 5b0c7d4e7..506e4ff5b 100644 --- a/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc +++ b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -3,8 +3,12 @@ [source, js] ---- -const response = await client.esql.asyncQuery({ - format: "json", +const response = await client.transport.request({ + method: "POST", + path: "/_query/async", + querystring: { + format: "json", + }, body: { query: "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", diff --git a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc index 3801b625f..045036fa2 100644 --- a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc +++ b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -3,8 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.renderQuery({ - name: "my-app", +const response = await client.transport.request({ + method: "POST", + path: "/_application/search_application/my-app/_render_query", body: { params: { query_string: "my first query", diff --git a/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc index 64aa8e2d1..8771d32f7 100644 --- a/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc +++ b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc @@ -11,7 +11,7 @@ const response = await client.searchApplication.put({ script: { lang: "mustache", source: - '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', params: { query: "", _es_filters: {}, diff --git a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc index c5453ffaf..724b30762 100644 --- a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc +++ b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc index 4853ab9a3..b3545c105 100644 --- a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc +++ b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcLogout({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/logout", body: { token: "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", diff --git a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index 44648b27c..e05299751 100644 --- a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -3,10 +3,12 @@ [source, js] ---- -const response = await client.esql.asyncQueryGet({ - id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", - wait_for_completion_timeout: "30s", - body: null, +const response = await client.transport.request({ + method: "GET", + path: "/_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + querystring: { + wait_for_completion_timeout: "30s", + }, }); console.log(response); ---- diff --git a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc index b8e2ede87..221e42b58 100644 --- a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc +++ b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.esql.asyncQuery({ +const response = await client.transport.request({ + method: "POST", + path: "/_query/async", body: { query: "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", diff --git a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc index ab0617ea6..40d330c9d 100644 --- a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc +++ b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.esql.asyncQueryGet({ - id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", - body: null, +const response = await client.transport.request({ + method: "GET", + path: "/_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", }); console.log(response); ---- diff --git a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index 5f31b1de6..823515f74 100644 --- a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.inference.streamInference({ - task_type: "completion", - inference_id: "openai-completion", +const response = await client.transport.request({ + method: "POST", + path: "/_inference/completion/openai-completion/_stream", body: { input: "What is Elastic?", }, diff --git a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc index dcf9e4b2e..2598c7bce 100644 --- a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc +++ b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/prepare", body: { realm: "oidc1", state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", diff --git a/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc index c95b379f3..46cd0a13e 100644 --- a/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc +++ b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -3,8 +3,12 @@ [source, js] ---- -const response = await client.esql.asyncQuery({ - format: "json", +const response = await client.transport.request({ + method: "POST", + path: "/_query/async", + querystring: { + format: "json", + }, body: { query: "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", diff --git a/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc index 3dff97a73..30adbe22e 100644 --- a/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc +++ b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc @@ -11,7 +11,7 @@ const response = await client.searchApplication.put({ script: { lang: "mustache", source: - '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', params: { query: "", _es_filters: {}, diff --git a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc index 26bbeb20a..f5995e6b6 100644 --- a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc +++ b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/api_key/_bulk_update", body: { ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], }, diff --git a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc index 107aebead..ebe4fce86 100644 --- a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc +++ b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.textStructure.findMessageStructure({ +const response = await client.transport.request({ + method: "POST", + path: "/_text_structure/find_message_structure", body: { messages: [ "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", diff --git a/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc new file mode 100644 index 000000000..d1fcf443c --- /dev/null +++ b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_ingest/ip_location/database/my-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc index 659fc0e47..28fdff4a5 100644 --- a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc +++ b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/api_key/_bulk_update", body: { ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], role_descriptors: { diff --git a/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc new file mode 100644 index 000000000..8d9b9da8b --- /dev/null +++ b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_ingest/ip_location/database/my-database-1", + body: { + name: "GeoIP2-Domain", + maxmind: { + account_id: "1234567", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc new file mode 100644 index 000000000..3f2ffdf6b --- /dev/null +++ b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_ingest/ip_location/database/example-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc index 46c6ad610..0cf3aea4d 100644 --- a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc +++ b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.postBehavioralAnalyticsEvent({ - collection_name: "my_analytics_collection", - event_type: "search_click", +const response = await client.transport.request({ + method: "POST", + path: "/_application/analytics/my_analytics_collection/event/search_click", body: { session: { id: "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9", diff --git a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc index 64e6db48e..8e19908d0 100644 --- a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc +++ b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcAuthenticate({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/authenticate", body: { redirect_uri: "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", diff --git a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc index 6b9a54625..afaf9d7dc 100644 --- a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc +++ b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -3,8 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.renderQuery({ - name: "my_search_application", +const response = await client.transport.request({ + method: "POST", + path: "/_application/search_application/my_search_application/_render_query", body: { params: { query_string: "rock climbing", diff --git a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc index 6ce163623..5186df2ae 100644 --- a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc +++ b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.renderQuery({ - name: "my_search_application", - body: null, +const response = await client.transport.request({ + method: "POST", + path: "/_application/search_application/my_search_application/_render_query", }); console.log(response); ---- diff --git a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc index 30687161f..57b7fb69d 100644 --- a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc +++ b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -208,10 +208,13 @@ const response = await client.bulk({ }); console.log(response); -const response1 = await client.textStructure.findFieldStructure({ - index: "test-logs", - field: "message", - body: null, +const response1 = await client.transport.request({ + method: "GET", + path: "/_text_structure/find_field_structure", + querystring: { + index: "test-logs", + field: "message", + }, }); console.log(response1); ---- diff --git a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc index 06b5f58ec..80d974285 100644 --- a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc +++ b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc index 2a14bb328..abc332dd4 100644 --- a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc +++ b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/api_key/_bulk_update", body: { ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], role_descriptors: {}, diff --git a/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc index c97a5d54f..d44a7b669 100644 --- a/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc +++ b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc index 51dea2365..21bcc10b8 100644 --- a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc +++ b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/prepare", body: { iss: "http://127.0.0.1:8080/", login_hint: "this_is_an_opaque_string", diff --git a/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc new file mode 100644 index 000000000..592744a30 --- /dev/null +++ b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_ingest/ip_location/database/my-database-2", + body: { + name: "standard_location", + ipinfo: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc new file mode 100644 index 000000000..e80e90ffd --- /dev/null +++ b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_ingest/ip_location/database/my-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc index ff630da8a..1fe5e6b4c 100644 --- a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc +++ b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc @@ -3,6 +3,9 @@ [source, js] ---- -const response = await client.security.getSettings(); +const response = await client.transport.request({ + method: "GET", + path: "/_security/settings", +}); console.log(response); ---- diff --git a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc index 5a02c157a..febdc3354 100644 --- a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc +++ b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/prepare", body: { realm: "oidc1", }, diff --git a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc index 92b9b7363..ba52d081d 100644 --- a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc +++ b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc index 81783cf66..b7fdbd587 100644 --- a/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc +++ b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.connector.secretPost({ +const response = await client.transport.request({ + method: "POST", + path: "/_connector/_secret", body: { value: "encoded_api_key", }, diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 6f315c485..ddbff0a0b 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -96,7 +96,8 @@ client.closePointInTime({ id }) [discrete] === count -Returns number of documents matching a query. +Count search results. +Get the number of documents matching a query. {ref}/search-count.html[Endpoint documentation] [source,ts] @@ -1643,8 +1644,6 @@ the indices stats API. ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. ** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. -** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. -Ongoing async searches and any saved search results are deleted after this period. ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout ** *`analyzer` (Optional, string)*: The analyzer to use for the query string @@ -1660,7 +1659,6 @@ A partial reduction is performed every time the coordinating node has received a ** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored ** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests ** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`pre_filter_shard_size` (Optional, number)*: The default value cannot be changed, which enforces the execution of a pre-filter roundtrip to retrieve statistics from each shard so that the ones that surely don’t hold any document matching the query get skipped. ** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true ** *`routing` (Optional, string)*: A list of specific routing values ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type @@ -1794,7 +1792,8 @@ client.cat.aliases({ ... }) [discrete] ==== allocation -Provides a snapshot of the number of shards allocated to each data node and their disk space. +Get shard allocation information. +Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. {ref}/cat-allocation.html[Endpoint documentation] @@ -1863,7 +1862,8 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== fielddata -Returns the amount of heap memory currently used by the field data cache on every data node in the cluster. +Get field data cache information. +Get the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. @@ -1883,7 +1883,7 @@ To retrieve all fields, omit this parameter. [discrete] ==== health -Returns the health status of a cluster, similar to the cluster health API. +Get the cluster health status. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. @@ -1958,7 +1958,8 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== master -Returns information about the master node, including the ID, bound IP address, and name. +Get master node information. +Get information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-master.html[Endpoint documentation] @@ -2107,7 +2108,8 @@ If `false`, the API returns a 404 status code when there are no matches or only [discrete] ==== nodeattrs -Returns information about custom node attributes. +Get node attribute information. +Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-nodeattrs.html[Endpoint documentation] @@ -2127,7 +2129,8 @@ node will send requests for further information to each selected node. [discrete] ==== nodes -Returns information about the nodes in a cluster. +Get node information. +Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-nodes.html[Endpoint documentation] @@ -2146,7 +2149,8 @@ client.cat.nodes({ ... }) [discrete] ==== pending_tasks -Returns cluster-level changes that have not yet been executed. +Get pending task information. +Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. {ref}/cat-pending-tasks.html[Endpoint documentation] @@ -2166,7 +2170,8 @@ node will send requests for further information to each selected node. [discrete] ==== plugins -Returns a list of plugins running on each node of a cluster. +Get plugin information. +Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-plugins.html[Endpoint documentation] @@ -2186,7 +2191,8 @@ node will send requests for further information to each selected node. [discrete] ==== recovery -Returns information about ongoing and completed shard recoveries. +Get shard recovery information. +Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. @@ -2209,7 +2215,8 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== repositories -Returns the snapshot repositories for a cluster. +Get snapshot repository information. +Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. {ref}/cat-repositories.html[Endpoint documentation] @@ -2221,7 +2228,8 @@ client.cat.repositories() [discrete] ==== segments -Returns low-level information about the Lucene segments in index shards. +Get segment information. +Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. @@ -2246,7 +2254,8 @@ node will send requests for further information to each selected node. [discrete] ==== shards -Returns information about the shards in a cluster. +Get shard information. +Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. @@ -2267,7 +2276,8 @@ To target all data streams and indices, omit this parameter or use `*` or `_all` [discrete] ==== snapshots -Returns information about the snapshots stored in one or more repositories. +Get snapshot information +Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. @@ -2289,7 +2299,8 @@ If any repository fails during the request, Elasticsearch returns an error. [discrete] ==== tasks -Returns information about tasks currently executing in the cluster. +Get task information. +Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. {ref}/tasks.html[Endpoint documentation] @@ -2309,7 +2320,8 @@ client.cat.tasks({ ... }) [discrete] ==== templates -Returns information about index templates in a cluster. +Get index template information. +Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. @@ -2332,7 +2344,8 @@ node will send requests for further information to each selected node. [discrete] ==== thread_pool -Returns thread pool statistics for each node in a cluster. +Get thread pool statistics. +Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. @@ -2356,8 +2369,8 @@ node will send requests for further information to each selected node. [discrete] ==== transforms -Get transforms. -Returns configuration and usage information about transforms. +Get transform information. +Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For @@ -3622,7 +3635,8 @@ client.enrich.deletePolicy({ name }) [discrete] ==== execute_policy -Creates the enrich index for an existing enrich policy. +Run an enrich policy. +Create the enrich index for an existing enrich policy. {ref}/execute-enrich-policy-api.html[Endpoint documentation] [source,ts] @@ -3691,7 +3705,8 @@ client.enrich.stats() === eql [discrete] ==== delete -Deletes an async EQL search or a stored synchronous EQL search. +Delete an async EQL search. +Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. {ref}/eql-search-api.html[Endpoint documentation] @@ -3710,7 +3725,8 @@ A search ID is also provided if the request’s `keep_on_completion` parameter i [discrete] ==== get -Returns the current status and available results for an async EQL search or a stored synchronous EQL search. +Get async EQL search results. +Get the current status and available results for an async EQL search or a stored synchronous EQL search. {ref}/get-async-eql-search-api.html[Endpoint documentation] [source,ts] @@ -3730,7 +3746,8 @@ Defaults to no timeout, meaning the request waits for complete search results. [discrete] ==== get_status -Returns the current status for an async EQL search or a stored synchronous EQL search without returning results. +Get the async EQL status. +Get the current status for an async EQL search or a stored synchronous EQL search without returning results. {ref}/get-async-eql-status-api.html[Endpoint documentation] [source,ts] @@ -3746,7 +3763,9 @@ client.eql.getStatus({ id }) [discrete] ==== search -Returns results matching a query expressed in Event Query Language (EQL) +Get EQL search results. +Returns search results for an Event Query Language (EQL) query. +EQL assumes each document in a data stream or index corresponds to an event. {ref}/eql-search-api.html[Endpoint documentation] [source,ts] @@ -3806,7 +3825,8 @@ client.esql.asyncQueryGet() [discrete] ==== query -Executes an ES|QL request +Run an ES|QL query. +Get search results for an ES|QL (Elasticsearch query language) query. {ref}/esql-rest.html[Endpoint documentation] [source,ts] @@ -3862,7 +3882,9 @@ client.features.resetFeatures() === fleet [discrete] ==== global_checkpoints -Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. +Get global checkpoints. +Get the current global checkpoints for an index. +This API is designed for internal use by the Fleet server project. {ref}/get-global-checkpoints.html[Endpoint documentation] [source,ts] @@ -3886,9 +3908,10 @@ will cause Elasticsearch to immediately return the current global checkpoints. [discrete] ==== msearch -Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request. -The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it -supports the wait_for_checkpoints parameter. +Run multiple Fleet searches. +Run several Fleet searches with a single API request. +The API follows the same structure as the multi search API. +However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. [source,ts] ---- client.fleet.msearch({ ... }) @@ -3920,8 +3943,9 @@ which is true by default. [discrete] ==== search -The purpose of the fleet search api is to provide a search api where the search will only be executed -after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. +Run a Fleet search. +The purpose of the Fleet search API is to provide an API where the search will be run only +after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. [source,ts] ---- client.fleet.search({ index }) @@ -4024,7 +4048,12 @@ which is true by default. === graph [discrete] ==== explore -Extracts and summarizes information about the documents and terms in an Elasticsearch data stream or index. +Explore graph analytics. +Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. +An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. +Subsequent requests enable you to spider out from one more vertices of interest. +You can exclude vertices that have already been returned. {ref}/graph-explore-api.html[Endpoint documentation] [source,ts] @@ -4690,10 +4719,13 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== exists_index_template -Returns information about whether a particular index template exists. +Check index templates. +Check whether index templates exist. {ref}/index-templates.html[Endpoint documentation] [source,ts] @@ -4887,6 +4919,8 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_data_lifecycle @@ -5502,7 +5536,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== resolve_index -Resolves the specified name(s) and/or index patterns for indices, aliases, and data streams. +Resolve indices. +Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. {ref}/indices-resolve-index-api.html[Endpoint documentation] @@ -5938,7 +5973,8 @@ client.inference.streamInference() === ingest [discrete] ==== delete_geoip_database -Deletes a geoip database configuration. +Delete GeoIP database configurations. +Delete one or more IP geolocation database configurations. {ref}/delete-geoip-database-api.html[Endpoint documentation] [source,ts] @@ -5968,7 +6004,8 @@ client.ingest.deleteIpLocationDatabase() [discrete] ==== delete_pipeline -Deletes one or more existing ingest pipeline. +Delete pipelines. +Delete one or more ingest pipelines. {ref}/delete-pipeline-api.html[Endpoint documentation] [source,ts] @@ -5989,7 +6026,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== geo_ip_stats -Gets download statistics for GeoIP2 databases used with the geoip processor. +Get GeoIP statistics. +Get download statistics for GeoIP2 databases that are used with the GeoIP processor. {ref}/geoip-processor.html[Endpoint documentation] [source,ts] @@ -6000,7 +6038,8 @@ client.ingest.geoIpStats() [discrete] ==== get_geoip_database -Returns information about one or more geoip database configurations. +Get GeoIP database configurations. +Get information about one or more IP geolocation database configurations. {ref}/get-geoip-database-api.html[Endpoint documentation] [source,ts] @@ -6031,7 +6070,8 @@ client.ingest.getIpLocationDatabase() [discrete] ==== get_pipeline -Returns information about one or more ingest pipelines. +Get pipelines. +Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. {ref}/get-pipeline-api.html[Endpoint documentation] @@ -6053,8 +6093,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== processor_grok -Extracts structured fields out of a single text field within a document. -You choose which field to extract matched fields from, as well as the grok pattern you expect will match. +Run a grok processor. +Extract structured fields out of a single text field within a document. +You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. {ref}/grok-processor.html[Endpoint documentation] @@ -6066,7 +6107,8 @@ client.ingest.processorGrok() [discrete] ==== put_geoip_database -Returns information about one or more geoip database configurations. +Create or update GeoIP database configurations. +Create or update IP geolocation database configurations. {ref}/put-geoip-database-api.html[Endpoint documentation] [source,ts] @@ -6099,7 +6141,7 @@ client.ingest.putIpLocationDatabase() [discrete] ==== put_pipeline -Creates or updates an ingest pipeline. +Create or update a pipeline. Changes made using this API take effect immediately. {ref}/ingest.html[Endpoint documentation] @@ -6126,7 +6168,9 @@ When a deprecated ingest pipeline is referenced as the default or final pipeline [discrete] ==== simulate -Executes an ingest pipeline against a set of provided documents. +Simulate a pipeline. +Run an ingest pipeline against a set of provided documents. +You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. {ref}/simulate-pipeline-api.html[Endpoint documentation] [source,ts] @@ -8599,7 +8643,8 @@ If no response is received before the timeout expires, the request fails and ret === query_rules [discrete] ==== delete_rule -Deletes a query rule within a query ruleset. +Delete a query rule. +Delete a query rule within a query ruleset. {ref}/delete-query-rule.html[Endpoint documentation] [source,ts] @@ -8616,7 +8661,7 @@ client.queryRules.deleteRule({ ruleset_id, rule_id }) [discrete] ==== delete_ruleset -Deletes a query ruleset. +Delete a query ruleset. {ref}/delete-query-ruleset.html[Endpoint documentation] [source,ts] @@ -8632,7 +8677,8 @@ client.queryRules.deleteRuleset({ ruleset_id }) [discrete] ==== get_rule -Returns the details about a query rule within a query ruleset +Get a query rule. +Get details about a query rule within a query ruleset. {ref}/get-query-rule.html[Endpoint documentation] [source,ts] @@ -8649,7 +8695,8 @@ client.queryRules.getRule({ ruleset_id, rule_id }) [discrete] ==== get_ruleset -Returns the details about a query ruleset +Get a query ruleset. +Get details about a query ruleset. {ref}/get-query-ruleset.html[Endpoint documentation] [source,ts] @@ -8665,7 +8712,8 @@ client.queryRules.getRuleset({ ruleset_id }) [discrete] ==== list_rulesets -Returns summarized information about existing query rulesets. +Get all query rulesets. +Get summarized information about the query rulesets. {ref}/list-query-rulesets.html[Endpoint documentation] [source,ts] @@ -8682,7 +8730,8 @@ client.queryRules.listRulesets({ ... }) [discrete] ==== put_rule -Creates or updates a query rule within a query ruleset. +Create or update a query rule. +Create or update a query rule within a query ruleset. {ref}/put-query-rule.html[Endpoint documentation] [source,ts] @@ -8703,7 +8752,7 @@ client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) [discrete] ==== put_ruleset -Creates or updates a query ruleset. +Create or update a query ruleset. {ref}/put-query-ruleset.html[Endpoint documentation] [source,ts] @@ -8720,7 +8769,8 @@ client.queryRules.putRuleset({ ruleset_id, rules }) [discrete] ==== test -Creates or updates a query ruleset. +Test a query ruleset. +Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. {ref}/test-query-ruleset.html[Endpoint documentation] [source,ts] @@ -10984,7 +11034,7 @@ client.snapshot.verifyRepository({ repository }) === sql [discrete] ==== clear_cursor -Clears the SQL cursor +Clear an SQL search cursor. {ref}/clear-sql-cursor-api.html[Endpoint documentation] [source,ts] @@ -11000,7 +11050,9 @@ client.sql.clearCursor({ cursor }) [discrete] ==== delete_async -Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. +Delete an async SQL search. +Delete an async SQL search or a stored synchronous SQL search. +If the search is still running, the API cancels it. {ref}/delete-async-sql-search-api.html[Endpoint documentation] [source,ts] @@ -11016,7 +11068,8 @@ client.sql.deleteAsync({ id }) [discrete] ==== get_async -Returns the current status and available results for an async SQL search or stored synchronous SQL search +Get async SQL search results. +Get the current status and available results for an async SQL search or stored synchronous SQL search. {ref}/get-async-sql-search-api.html[Endpoint documentation] [source,ts] @@ -11039,7 +11092,8 @@ meaning the request waits for complete search results. [discrete] ==== get_async_status -Returns the current status of an async SQL search or a stored synchronous SQL search +Get the async SQL search status. +Get the current status of an async SQL search or a stored synchronous SQL search. {ref}/get-async-sql-search-status-api.html[Endpoint documentation] [source,ts] @@ -11055,7 +11109,8 @@ client.sql.getAsyncStatus({ id }) [discrete] ==== query -Executes a SQL request +Get SQL search results. +Run an SQL request. {ref}/sql-search-api.html[Endpoint documentation] [source,ts] @@ -11090,7 +11145,8 @@ precedence over mapped fields with the same name. [discrete] ==== translate -Translates SQL into Elasticsearch queries +Translate SQL into Elasticsearch queries. +Translate an SQL search into a search API request containing Query DSL. {ref}/sql-translate-api.html[Endpoint documentation] [source,ts] @@ -11140,7 +11196,7 @@ client.ssl.certificates() === synonyms [discrete] ==== delete_synonym -Deletes a synonym set +Delete a synonym set. {ref}/delete-synonyms-set.html[Endpoint documentation] [source,ts] @@ -11156,7 +11212,8 @@ client.synonyms.deleteSynonym({ id }) [discrete] ==== delete_synonym_rule -Deletes a synonym rule in a synonym set +Delete a synonym rule. +Delete a synonym rule from a synonym set. {ref}/delete-synonym-rule.html[Endpoint documentation] [source,ts] @@ -11173,7 +11230,7 @@ client.synonyms.deleteSynonymRule({ set_id, rule_id }) [discrete] ==== get_synonym -Retrieves a synonym set +Get a synonym set. {ref}/get-synonyms-set.html[Endpoint documentation] [source,ts] @@ -11191,7 +11248,8 @@ client.synonyms.getSynonym({ id }) [discrete] ==== get_synonym_rule -Retrieves a synonym rule from a synonym set +Get a synonym rule. +Get a synonym rule from a synonym set. {ref}/get-synonym-rule.html[Endpoint documentation] [source,ts] @@ -11208,7 +11266,8 @@ client.synonyms.getSynonymRule({ set_id, rule_id }) [discrete] ==== get_synonyms_sets -Retrieves a summary of all defined synonym sets +Get all synonym sets. +Get a summary of all defined synonym sets. {ref}/list-synonyms-sets.html[Endpoint documentation] [source,ts] @@ -11225,7 +11284,9 @@ client.synonyms.getSynonymsSets({ ... }) [discrete] ==== put_synonym -Creates or updates a synonym set. +Create or update a synonym set. +Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +If you need to manage more synonym rules, you can create multiple synonym sets. {ref}/put-synonyms-set.html[Endpoint documentation] [source,ts] @@ -11242,7 +11303,8 @@ client.synonyms.putSynonym({ id, synonyms_set }) [discrete] ==== put_synonym_rule -Creates or updates a synonym rule in a synonym set +Create or update a synonym rule. +Create or update a synonym rule in a synonym set. {ref}/put-synonym-rule.html[Endpoint documentation] [source,ts] diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index f0c852368..88d6104ed 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class AsyncSearch { @@ -48,10 +47,10 @@ export default class AsyncSearch { * Delete an async search. If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class AsyncSearch { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class AsyncSearch { * Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class AsyncSearch { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -112,10 +111,10 @@ export default class AsyncSearch { * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -123,7 +122,7 @@ export default class AsyncSearch { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -144,29 +143,19 @@ export default class AsyncSearch { * Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} */ - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line - // @ts-expect-error querystring[key] = params[key] } else { // @ts-expect-error @@ -174,7 +163,7 @@ export default class AsyncSearch { } } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index 9be9c4d4a..aec1226d8 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Autoscaling { @@ -48,10 +47,10 @@ export default class Autoscaling { * Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-delete-autoscaling-policy.html | Elasticsearch API documentation} */ - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Autoscaling { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Autoscaling { * Get the autoscaling capacity. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class Autoscaling { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -110,10 +109,10 @@ export default class Autoscaling { * Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -121,7 +120,7 @@ export default class Autoscaling { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -142,15 +141,14 @@ export default class Autoscaling { * Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-put-autoscaling-policy.html | Elasticsearch API documentation} */ - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['policy'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -158,7 +156,7 @@ export default class Autoscaling { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index b7a5dfa84..ebad1b11f 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -35,22 +35,20 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Bulk index or delete documents. Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html | Elasticsearch API documentation} */ -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise { +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['operations'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -58,7 +56,7 @@ export default async function BulkApi -export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> -export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise -export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -54,7 +53,7 @@ export default async function CapabilitiesApi (this: That, params?: T.TODO | TB. for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index e06c34728..b30c5365c 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Cat { @@ -48,10 +47,10 @@ export default class Cat { * Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} */ - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise { + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -85,13 +84,13 @@ export default class Cat { } /** - * Provides a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + * Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html | Elasticsearch API documentation} */ - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise { + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -100,7 +99,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -128,10 +127,10 @@ export default class Cat { * Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} */ - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -140,7 +139,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -168,10 +167,10 @@ export default class Cat { * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise { + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -180,7 +179,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -205,13 +204,13 @@ export default class Cat { } /** - * Returns the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. + * Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html | Elasticsearch API documentation} */ - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise { + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['fields'] const querystring: Record = {} const body = undefined @@ -220,7 +219,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -245,13 +244,13 @@ export default class Cat { } /** - * Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. + * Get the cluster health status. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html | Elasticsearch API documentation} */ - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise { + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -260,7 +259,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -278,10 +277,10 @@ export default class Cat { * Get CAT help. Returns help for the CAT APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html | Elasticsearch API documentation} */ - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise { + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -290,7 +289,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -308,10 +307,10 @@ export default class Cat { * Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} */ - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise { + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -320,7 +319,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -345,13 +344,13 @@ export default class Cat { } /** - * Returns information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * Get master node information. Get information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html | Elasticsearch API documentation} */ - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise { + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -360,7 +359,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -378,10 +377,10 @@ export default class Cat { * Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} */ - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -390,7 +389,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -418,10 +417,10 @@ export default class Cat { * Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} */ - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const querystring: Record = {} const body = undefined @@ -430,7 +429,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -458,10 +457,10 @@ export default class Cat { * Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} */ - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise { + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const querystring: Record = {} const body = undefined @@ -470,7 +469,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -498,10 +497,10 @@ export default class Cat { * Get trained models. Returns configuration and usage information about inference trained models. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} */ - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined @@ -510,7 +509,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -535,13 +534,13 @@ export default class Cat { } /** - * Returns information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * Get node attribute information. Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html | Elasticsearch API documentation} */ - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -550,7 +549,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -565,13 +564,13 @@ export default class Cat { } /** - * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * Get node information. Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html | Elasticsearch API documentation} */ - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise { + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -580,7 +579,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -595,13 +594,13 @@ export default class Cat { } /** - * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. + * Get pending task information. Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html | Elasticsearch API documentation} */ - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -610,7 +609,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -625,13 +624,13 @@ export default class Cat { } /** - * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * Get plugin information. Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html | Elasticsearch API documentation} */ - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise { + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -640,7 +639,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -655,13 +654,13 @@ export default class Cat { } /** - * Returns information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. + * Get shard recovery information. Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html | Elasticsearch API documentation} */ - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise { + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -670,7 +669,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -695,13 +694,13 @@ export default class Cat { } /** - * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. + * Get snapshot repository information. Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html | Elasticsearch API documentation} */ - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -710,7 +709,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -725,13 +724,13 @@ export default class Cat { } /** - * Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. + * Get segment information. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html | Elasticsearch API documentation} */ - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise { + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -740,7 +739,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -765,13 +764,13 @@ export default class Cat { } /** - * Returns information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + * Get shard information. Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html | Elasticsearch API documentation} */ - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise { + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -780,7 +779,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -805,13 +804,13 @@ export default class Cat { } /** - * Returns information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. + * Get snapshot information Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html | Elasticsearch API documentation} */ - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] const querystring: Record = {} const body = undefined @@ -820,7 +819,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -845,13 +844,13 @@ export default class Cat { } /** - * Returns information about tasks currently executing in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. + * Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise { + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -860,7 +859,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -875,13 +874,13 @@ export default class Cat { } /** - * Returns information about index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. + * Get index template information. Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html | Elasticsearch API documentation} */ - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise { + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -890,7 +889,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -915,13 +914,13 @@ export default class Cat { } /** - * Returns thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * Get thread pool statistics. Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html | Elasticsearch API documentation} */ - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['thread_pool_patterns'] const querystring: Record = {} const body = undefined @@ -930,7 +929,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -955,13 +954,13 @@ export default class Cat { } /** - * Get transforms. Returns configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. + * Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html | Elasticsearch API documentation} */ - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise { + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -970,7 +969,7 @@ export default class Cat { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 0bf2cec5f..7b3d86fd8 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Ccr { @@ -48,10 +47,10 @@ export default class Ccr { * Deletes auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-follow-pattern.html | Elasticsearch API documentation} */ - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,30 +79,22 @@ export default class Ccr { * Creates a new follower index configured to follow the referenced leader index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.html | Elasticsearch API documentation} */ - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise { + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -124,10 +115,10 @@ export default class Ccr { * Retrieves information about all follower indices, including parameters and status for each follower index * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-info.html | Elasticsearch API documentation} */ - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -135,7 +126,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -156,10 +147,10 @@ export default class Ccr { * Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-stats.html | Elasticsearch API documentation} */ - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -167,7 +158,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -188,30 +179,22 @@ export default class Ccr { * Removes the follower retention leases from the leader. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-follower.html | Elasticsearch API documentation} */ - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -232,10 +215,10 @@ export default class Ccr { * Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-follow-pattern.html | Elasticsearch API documentation} */ - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -244,7 +227,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -272,10 +255,10 @@ export default class Ccr { * Pauses an auto-follow pattern * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-follow-pattern.html | Elasticsearch API documentation} */ - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -283,7 +266,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -304,10 +287,10 @@ export default class Ccr { * Pauses a follower index. The follower index will not fetch any additional operations from the leader index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-follow.html | Elasticsearch API documentation} */ - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -315,7 +298,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -336,30 +319,22 @@ export default class Ccr { * Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-follow-pattern.html | Elasticsearch API documentation} */ - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -380,10 +355,10 @@ export default class Ccr { * Resumes an auto-follow pattern that has been paused * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-follow-pattern.html | Elasticsearch API documentation} */ - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -391,7 +366,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -412,30 +387,22 @@ export default class Ccr { * Resumes a follower index that has been paused * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-follow.html | Elasticsearch API documentation} */ - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -456,10 +423,10 @@ export default class Ccr { * Gets all stats related to cross-cluster replication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise { + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -468,7 +435,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -486,10 +453,10 @@ export default class Ccr { * Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow.html | Elasticsearch API documentation} */ - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -497,7 +464,7 @@ export default class Ccr { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 5fa41160e..59013642d 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -35,38 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Clear a scrolling search. Clear the search context and results for a scrolling search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api.html | Elasticsearch API documentation} */ -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise { +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll_id'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index f14346169..2df8577ec 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 6795c7f13..da9f656c7 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Cluster { @@ -48,31 +47,23 @@ export default class Cluster { * Provides explanations for shard allocations in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html | Elasticsearch API documentation} */ - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -90,10 +81,10 @@ export default class Cluster { * Delete component templates. Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -101,7 +92,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -122,10 +113,10 @@ export default class Cluster { * Clears cluster voting config exclusions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} */ - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -134,7 +125,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -152,10 +143,10 @@ export default class Cluster { * Check component templates. Returns information about whether a particular component template exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -163,7 +154,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -184,10 +175,10 @@ export default class Cluster { * Get component templates. Retrieves information about component templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -196,7 +187,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -224,10 +215,10 @@ export default class Cluster { * Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -236,7 +227,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -254,10 +245,10 @@ export default class Cluster { * The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html | Elasticsearch API documentation} */ - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise { + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -266,7 +257,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -294,10 +285,10 @@ export default class Cluster { * Get cluster info. Returns basic information about the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html | Elasticsearch API documentation} */ - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptions): Promise { + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['target'] const querystring: Record = {} const body = undefined @@ -305,7 +296,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -326,10 +317,10 @@ export default class Cluster { * Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html | Elasticsearch API documentation} */ - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -338,7 +329,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -356,10 +347,10 @@ export default class Cluster { * Updates the cluster voting config exclusions by node ids or node names. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} */ - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -368,7 +359,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -386,30 +377,22 @@ export default class Cluster { * Create or update a component template. Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -430,31 +413,23 @@ export default class Cluster { * Updates the cluster settings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html | Elasticsearch API documentation} */ - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['persistent', 'transient'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -472,10 +447,10 @@ export default class Cluster { * The cluster remote info API allows you to retrieve all of the configured remote cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html | Elasticsearch API documentation} */ - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -484,7 +459,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -502,31 +477,23 @@ export default class Cluster { * Allows to manually change the allocation of individual shards in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html | Elasticsearch API documentation} */ - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['commands'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -544,10 +511,10 @@ export default class Cluster { * Returns a comprehensive information about the state of the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html | Elasticsearch API documentation} */ - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise { + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric', 'index'] const querystring: Record = {} const body = undefined @@ -556,7 +523,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -588,10 +555,10 @@ export default class Cluster { * Returns cluster statistics. It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise { + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -600,7 +567,7 @@ export default class Cluster { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 6cceebe5d..d2ad09fa9 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Connector { @@ -48,10 +47,10 @@ export default class Connector { * Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html | Elasticsearch API documentation} */ - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Connector { * Delete a connector. Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -112,10 +111,10 @@ export default class Connector { * Get a connector. Get the details about a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html | Elasticsearch API documentation} */ - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const querystring: Record = {} const body = undefined @@ -123,7 +122,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -144,30 +143,22 @@ export default class Connector { * Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-last-sync-api.html | Elasticsearch API documentation} */ - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -188,10 +179,10 @@ export default class Connector { * Get all connectors. Get information about all connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html | Elasticsearch API documentation} */ - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise { + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -200,7 +191,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -218,31 +209,23 @@ export default class Connector { * Create a connector. Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} */ - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise { + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -260,31 +243,23 @@ export default class Connector { * Create or update a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} */ - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise { + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -311,10 +286,10 @@ export default class Connector { /** * Deletes a connector secret. */ - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -323,7 +298,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -342,10 +317,10 @@ export default class Connector { /** * Retrieves a secret stored by Connectors. */ - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -354,7 +329,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -373,10 +348,10 @@ export default class Connector { /** * Creates a secret for a Connector. */ - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -385,7 +360,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -401,10 +376,10 @@ export default class Connector { /** * Creates or updates a secret for a Connector. */ - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -413,7 +388,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -433,10 +408,10 @@ export default class Connector { * Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined @@ -444,7 +419,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -465,10 +440,10 @@ export default class Connector { * Checks in a connector sync job (refreshes 'last_seen'). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined @@ -477,7 +452,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -497,10 +472,10 @@ export default class Connector { * Claims a connector sync job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/claim-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined @@ -509,7 +484,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -529,10 +504,10 @@ export default class Connector { * Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined @@ -540,7 +515,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -561,10 +536,10 @@ export default class Connector { * Sets an error for a connector sync job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-error-api.html | Elasticsearch API documentation} */ - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined @@ -573,7 +548,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -593,10 +568,10 @@ export default class Connector { * Get a connector sync job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined @@ -604,7 +579,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -625,10 +600,10 @@ export default class Connector { * Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html | Elasticsearch API documentation} */ - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -637,7 +612,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -655,30 +630,22 @@ export default class Connector { * Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -696,10 +663,10 @@ export default class Connector { * Updates the stats fields in the connector sync job document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-stats-api.html | Elasticsearch API documentation} */ - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined @@ -708,7 +675,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -728,10 +695,10 @@ export default class Connector { * Activate the connector draft filter. Activates the valid draft filtering for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} */ - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const querystring: Record = {} const body = undefined @@ -739,7 +706,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -760,30 +727,22 @@ export default class Connector { * Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-api-key-id-api.html | Elasticsearch API documentation} */ - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -804,30 +763,22 @@ export default class Connector { * Update the connector configuration. Update the configuration field in the connector document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html | Elasticsearch API documentation} */ - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['configuration', 'values'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -848,30 +799,22 @@ export default class Connector { * Update the connector error field. Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html | Elasticsearch API documentation} */ - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['error'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -892,10 +835,10 @@ export default class Connector { * Updates the connector features in the connector document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-features-api.html | Elasticsearch API documentation} */ - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const querystring: Record = {} const body = undefined @@ -904,7 +847,7 @@ export default class Connector { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -924,30 +867,22 @@ export default class Connector { * Update the connector filtering. Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} */ - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -968,30 +903,22 @@ export default class Connector { * Update the connector draft filtering validation. Update the draft filtering validation info for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-validation-api.html | Elasticsearch API documentation} */ - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['validation'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1012,30 +939,22 @@ export default class Connector { * Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-index-name-api.html | Elasticsearch API documentation} */ - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['index_name'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1056,30 +975,22 @@ export default class Connector { * Update the connector name and description. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html | Elasticsearch API documentation} */ - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['name', 'description'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1100,30 +1011,22 @@ export default class Connector { * Update the connector is_native flag. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-native-api.html | Elasticsearch API documentation} */ - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['is_native'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1144,30 +1047,22 @@ export default class Connector { * Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html | Elasticsearch API documentation} */ - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['pipeline'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1188,30 +1083,22 @@ export default class Connector { * Update the connector scheduling. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html | Elasticsearch API documentation} */ - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['scheduling'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1232,30 +1119,22 @@ export default class Connector { * Update the connector service type. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-service-type-api.html | Elasticsearch API documentation} */ - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['service_type'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1276,30 +1155,22 @@ export default class Connector { * Update the connector status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-status-api.html | Elasticsearch API documentation} */ - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['status'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 32e09ad9b..e8f4561dc 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -35,38 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns number of documents matching a query. + * Count search results. Get the number of documents matching a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html | Elasticsearch API documentation} */ -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise { +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index f130eb4ac..1ab42f1f3 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -35,22 +35,20 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise { +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['document'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -58,7 +56,7 @@ export default async function CreateApi (this: That, params body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index f167cc0c8..3b01fcf7e 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class DanglingIndices { @@ -48,10 +47,10 @@ export default class DanglingIndices { * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index_uuid'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class DanglingIndices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class DanglingIndices { * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index_uuid'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class DanglingIndices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -112,10 +111,10 @@ export default class DanglingIndices { * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} */ - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -124,7 +123,7 @@ export default class DanglingIndices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 387a22356..356cf7375 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Delete a document. Removes a JSON document from the specified index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html | Elasticsearch API documentation} */ -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise { +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest | T for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 68776b013..f11b3a33c 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Delete documents. Deletes documents that match the specified query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'slice'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 54189a3d2..431e4a7a6 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function DeleteByQueryRethrottleApi (this: That, params: T. for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 801d4aae7..5ef36361f 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Delete a script or search template. Deletes a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise { +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index ada26a215..023ba410d 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Enrich { @@ -48,10 +47,10 @@ export default class Enrich { * Delete an enrich policy. Deletes an existing enrich policy and its enrich index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-enrich-policy-api.html | Elasticsearch API documentation} */ - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Enrich { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -77,13 +76,13 @@ export default class Enrich { } /** - * Creates the enrich index for an existing enrich policy. + * Run an enrich policy. Create the enrich index for an existing enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/execute-enrich-policy-api.html | Elasticsearch API documentation} */ - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class Enrich { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -112,10 +111,10 @@ export default class Enrich { * Get an enrich policy. Returns information about an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-enrich-policy-api.html | Elasticsearch API documentation} */ - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -124,7 +123,7 @@ export default class Enrich { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -152,30 +151,22 @@ export default class Enrich { * Create an enrich policy. Creates an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-enrich-policy-api.html | Elasticsearch API documentation} */ - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['geo_match', 'match', 'range'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -196,10 +187,10 @@ export default class Enrich { * Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/enrich-stats-api.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise { + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -208,7 +199,7 @@ export default class Enrich { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index d0b9054b3..4f1d3020b 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Eql { @@ -45,13 +44,13 @@ export default class Eql { } /** - * Deletes an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. + * Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/eql-search-api.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Eql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -77,13 +76,13 @@ export default class Eql { } /** - * Returns the current status and available results for an async EQL search or a stored synchronous EQL search. + * Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-search-api.html | Elasticsearch API documentation} */ - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise> - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class Eql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -109,13 +108,13 @@ export default class Eql { } /** - * Returns the current status for an async EQL search or a stored synchronous EQL search without returning results. + * Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-status-api.html | Elasticsearch API documentation} */ - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -123,7 +122,7 @@ export default class Eql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -141,33 +140,25 @@ export default class Eql { } /** - * Returns results matching a query expressed in Event Query Language (EQL) + * Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/eql-search-api.html | Elasticsearch API documentation} */ - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise { + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index da1570d79..b714d125c 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Esql { @@ -48,10 +47,10 @@ export default class Esql { * Executes an ESQL request asynchronously * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-api.html | Elasticsearch API documentation} */ - async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async asyncQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Esql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -77,10 +76,10 @@ export default class Esql { * Retrieves the results of a previously submitted async query request given its ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-get-api.html | Elasticsearch API documentation} */ - async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async asyncQueryGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -89,7 +88,7 @@ export default class Esql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -106,33 +105,25 @@ export default class Esql { } /** - * Executes an ES|QL request + * Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-rest.html | Elasticsearch API documentation} */ - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise { + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 8f5033eb2..5d6cf0cfa 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Check a document. Checks if a specified document exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise { +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest | T for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 8c6f14496..d97fa8feb 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Check for a document source. Checks if a document's `_source` is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise { +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index a65f6dc7a..49859bff6 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html | Elasticsearch API documentation} */ -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise> -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise { +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 6ec8b7c75..feab5b5c4 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Features { @@ -48,10 +47,10 @@ export default class Features { * Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html | Elasticsearch API documentation} */ - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Features { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -78,10 +77,10 @@ export default class Features { * Resets the internal state of features, usually by deleting system indices * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -90,7 +89,7 @@ export default class Features { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 082e83a53..1d907c54c 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -35,38 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html | Elasticsearch API documentation} */ -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 4fe0b0ed8..25f83089e 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Fleet { @@ -47,10 +46,10 @@ export default class Fleet { /** * Deletes a secret stored by Fleet. */ - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Fleet { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -78,10 +77,10 @@ export default class Fleet { /** * Retrieves a secret stored by Fleet. */ - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -90,7 +89,7 @@ export default class Fleet { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -107,13 +106,13 @@ export default class Fleet { } /** - * Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. + * Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-global-checkpoints.html | Elasticsearch API documentation} */ - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -121,7 +120,7 @@ export default class Fleet { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -139,17 +138,16 @@ export default class Fleet { } /** - * Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it supports the wait_for_checkpoints parameter. + * Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. */ - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptions): Promise> - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptions): Promise { + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['searches'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -157,7 +155,7 @@ export default class Fleet { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -184,10 +182,10 @@ export default class Fleet { /** * Creates a secret stored by Fleet. */ - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -196,7 +194,7 @@ export default class Fleet { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -210,32 +208,24 @@ export default class Fleet { } /** - * The purpose of the fleet search api is to provide a search api where the search will only be executed after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + * Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. */ - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise> - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise { + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 3a64e8f07..a1be83e3c 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get a document by its ID. Retrieves the document with the specified ID from an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise> -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise { +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function GetApi (this: That, params: T for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index e84a69da1..c5cae38cb 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get a script or search template. Retrieves a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise { +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index b8ffcebb6..c0a13575c 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get script contexts. Get a list of supported script contexts and their methods. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html | Elasticsearch API documentation} */ -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise { +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -54,7 +53,7 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index e20ad7ebb..4a44de5dc 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get script languages. Get a list of available script types, languages, and contexts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -54,7 +53,7 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 79abedad1..3ef802718 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get a document's source. Returns the source of a document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise> -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise { +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function GetSourceApi (this: That, par for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 01d14aa5d..f72fae89a 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Graph { @@ -45,33 +44,25 @@ export default class Graph { } /** - * Extracts and summarizes information about the documents and terms in an Elasticsearch data stream or index. + * Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/graph-explore-api.html | Elasticsearch API documentation} */ - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise { + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 58e098339..0539d0727 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Returns the health of the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/health-api.html | Elasticsearch API documentation} */ -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptions): Promise -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptions): Promise { +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['feature'] const querystring: Record = {} const body = undefined @@ -54,7 +53,7 @@ export default async function HealthReportApi (this: That, params?: T.HealthRepo for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 48c35c9fb..b144baac1 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Ilm { @@ -48,10 +47,10 @@ export default class Ilm { * Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-delete-lifecycle.html | Elasticsearch API documentation} */ - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Ilm { * Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-explain-lifecycle.html | Elasticsearch API documentation} */ - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -112,10 +111,10 @@ export default class Ilm { * Retrieves a lifecycle policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-lifecycle.html | Elasticsearch API documentation} */ - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -124,7 +123,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -152,10 +151,10 @@ export default class Ilm { * Retrieves the current index lifecycle management (ILM) status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-status.html | Elasticsearch API documentation} */ - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -164,7 +163,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -182,31 +181,23 @@ export default class Ilm { * Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} */ - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -224,30 +215,22 @@ export default class Ilm { * Manually moves an index into the specified step and executes that step. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-move-to-step.html | Elasticsearch API documentation} */ - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['current_step', 'next_step'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -268,30 +251,22 @@ export default class Ilm { * Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-put-lifecycle.html | Elasticsearch API documentation} */ - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['policy'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -312,10 +287,10 @@ export default class Ilm { * Removes the assigned lifecycle policy and stops managing the specified index * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-remove-policy.html | Elasticsearch API documentation} */ - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -323,7 +298,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -344,10 +319,10 @@ export default class Ilm { * Retries executing the policy for an index that is in the ERROR step. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-retry-policy.html | Elasticsearch API documentation} */ - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise { + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -355,7 +330,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -376,10 +351,10 @@ export default class Ilm { * Start the index lifecycle management (ILM) plugin. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-start.html | Elasticsearch API documentation} */ - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise { + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -388,7 +363,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -406,10 +381,10 @@ export default class Ilm { * Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-stop.html | Elasticsearch API documentation} */ - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise { + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -418,7 +393,7 @@ export default class Ilm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/index.ts b/src/api/api/index.ts index 89fba417f..dfdc7c20b 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -35,22 +35,20 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise { +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['document'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -58,7 +56,7 @@ export default async function IndexApi (this: That, params: body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index d4c0708cb..82e9227db 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Indices { @@ -48,10 +47,10 @@ export default class Indices { * Add an index block. Limits the operations allowed on an index by blocking specific operation types. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation} */ - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'block'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -81,31 +80,23 @@ export default class Indices { * Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html | Elasticsearch API documentation} */ - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -133,10 +124,10 @@ export default class Indices { * Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} */ - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -145,7 +136,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -173,30 +164,22 @@ export default class Indices { * Clones an existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html | Elasticsearch API documentation} */ - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise { + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -218,10 +201,10 @@ export default class Indices { * Closes an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-close.html | Elasticsearch API documentation} */ - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise { + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -229,7 +212,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -250,30 +233,22 @@ export default class Indices { * Create an index. Creates a new index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html | Elasticsearch API documentation} */ - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise { + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -294,10 +269,10 @@ export default class Indices { * Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -305,7 +280,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -326,10 +301,10 @@ export default class Indices { * Get data stream stats. Retrieves statistics for one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -338,7 +313,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -366,10 +341,10 @@ export default class Indices { * Delete indices. Deletes one or more indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -377,7 +352,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -398,10 +373,10 @@ export default class Indices { * Delete an alias. Removes a data stream or index from an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] const querystring: Record = {} const body = undefined @@ -409,7 +384,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -438,10 +413,10 @@ export default class Indices { * Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html | Elasticsearch API documentation} */ - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -449,7 +424,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -470,10 +445,10 @@ export default class Indices { * Delete data streams. Deletes one or more data streams and their backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -481,7 +456,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -502,10 +477,10 @@ export default class Indices { * Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template.html | Elasticsearch API documentation} */ - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -513,7 +488,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -534,10 +509,10 @@ export default class Indices { * Deletes a legacy index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html | Elasticsearch API documentation} */ - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -545,7 +520,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -566,10 +541,10 @@ export default class Indices { * Analyzes the disk usage of each field of an index or data stream. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html | Elasticsearch API documentation} */ - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -577,7 +552,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -598,15 +573,14 @@ export default class Indices { * Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-downsample-data-stream.html | Elasticsearch API documentation} */ - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target_index'] const acceptedBody: string[] = ['config'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -614,7 +588,7 @@ export default class Indices { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -636,10 +610,10 @@ export default class Indices { * Check indices. Checks if one or more indices, index aliases, or data streams exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html | Elasticsearch API documentation} */ - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise { + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -647,7 +621,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -668,10 +642,10 @@ export default class Indices { * Check aliases. Checks if one or more data stream or index aliases exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name', 'index'] const querystring: Record = {} const body = undefined @@ -679,7 +653,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -705,13 +679,13 @@ export default class Indices { } /** - * Returns information about whether a particular index template exists. + * Check index templates. Check whether index templates exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-templates.html | Elasticsearch API documentation} */ - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -719,7 +693,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -740,10 +714,10 @@ export default class Indices { * Check existence of index templates. Returns information about whether a particular index template exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html | Elasticsearch API documentation} */ - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -751,7 +725,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -772,10 +746,10 @@ export default class Indices { * Get the status for a data stream lifecycle. Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-explain-lifecycle.html | Elasticsearch API documentation} */ - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -783,7 +757,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -804,10 +778,10 @@ export default class Indices { * Returns field usage information for each shard and field of an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html | Elasticsearch API documentation} */ - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -815,7 +789,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -836,10 +810,10 @@ export default class Indices { * Flushes one or more data streams or indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html | Elasticsearch API documentation} */ - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise { + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -848,7 +822,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -876,10 +850,10 @@ export default class Indices { * Performs the force merge operation on one or more indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html | Elasticsearch API documentation} */ - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -888,7 +862,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -916,10 +890,10 @@ export default class Indices { * Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html | Elasticsearch API documentation} */ - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -927,7 +901,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -948,10 +922,10 @@ export default class Indices { * Get aliases. Retrieves information for one or more data stream or index aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name', 'index'] const querystring: Record = {} const body = undefined @@ -960,7 +934,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -995,10 +969,10 @@ export default class Indices { * Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html | Elasticsearch API documentation} */ - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1006,7 +980,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1027,10 +1001,10 @@ export default class Indices { * Get data streams. Retrieves information about one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1039,7 +1013,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1067,10 +1041,10 @@ export default class Indices { * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html | Elasticsearch API documentation} */ - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['fields', 'index'] const querystring: Record = {} const body = undefined @@ -1078,7 +1052,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1107,10 +1081,10 @@ export default class Indices { * Get index templates. Returns information about one or more index templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html | Elasticsearch API documentation} */ - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1119,7 +1093,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1147,10 +1121,10 @@ export default class Indices { * Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html | Elasticsearch API documentation} */ - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -1159,7 +1133,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1187,10 +1161,10 @@ export default class Indices { * Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] const querystring: Record = {} const body = undefined @@ -1199,7 +1173,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1234,10 +1208,10 @@ export default class Indices { * Get index templates. Retrieves information about one or more index templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html | Elasticsearch API documentation} */ - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1246,7 +1220,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1274,10 +1248,10 @@ export default class Indices { * Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1285,7 +1259,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1306,30 +1280,22 @@ export default class Indices { * Update data streams. Performs one or more data stream modification actions in a single atomic operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['actions'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1347,10 +1313,10 @@ export default class Indices { * Opens a closed index. For data streams, the API opens any closed backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html | Elasticsearch API documentation} */ - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise { + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -1358,7 +1324,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1379,10 +1345,10 @@ export default class Indices { * Promotes a data stream from a replicated data stream managed by CCR to a regular data stream * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1390,7 +1356,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1411,30 +1377,22 @@ export default class Indices { * Create or update an alias. Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1463,15 +1421,14 @@ export default class Indices { * Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html | Elasticsearch API documentation} */ - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['lifecycle'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -1479,7 +1436,7 @@ export default class Indices { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1500,30 +1457,22 @@ export default class Indices { * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html | Elasticsearch API documentation} */ - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1544,30 +1493,22 @@ export default class Indices { * Update field mappings. Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html | Elasticsearch API documentation} */ - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1588,15 +1529,14 @@ export default class Indices { * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html | Elasticsearch API documentation} */ - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['settings'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -1604,7 +1544,7 @@ export default class Indices { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1632,30 +1572,22 @@ export default class Indices { * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html | Elasticsearch API documentation} */ - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1676,10 +1608,10 @@ export default class Indices { * Returns information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html | Elasticsearch API documentation} */ - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -1688,7 +1620,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1716,10 +1648,10 @@ export default class Indices { * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html | Elasticsearch API documentation} */ - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -1728,7 +1660,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1756,10 +1688,10 @@ export default class Indices { * Reloads an index's search analyzers and their resources. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html | Elasticsearch API documentation} */ - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -1767,7 +1699,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1788,10 +1720,10 @@ export default class Indices { * Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise + async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1799,7 +1731,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1817,13 +1749,13 @@ export default class Indices { } /** - * Resolves the specified name(s) and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. + * Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html | Elasticsearch API documentation} */ - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1831,7 +1763,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1852,30 +1784,22 @@ export default class Indices { * Roll over to a new index. Creates a new index for a data stream or index alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html | Elasticsearch API documentation} */ - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['alias', 'new_index'] const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1904,10 +1828,10 @@ export default class Indices { * Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html | Elasticsearch API documentation} */ - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -1916,7 +1840,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1944,10 +1868,10 @@ export default class Indices { * Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html | Elasticsearch API documentation} */ - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -1956,7 +1880,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1984,30 +1908,22 @@ export default class Indices { * Shrinks an existing index into a new index with fewer primary shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html | Elasticsearch API documentation} */ - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2029,10 +1945,10 @@ export default class Indices { * Simulate an index. Returns the index configuration that would be applied to the specified index from an existing index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html | Elasticsearch API documentation} */ - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -2040,7 +1956,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2061,31 +1977,23 @@ export default class Indices { * Simulate an index template. Returns the index configuration that would be applied by a particular index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html | Elasticsearch API documentation} */ - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2113,30 +2021,22 @@ export default class Indices { * Splits an existing index into a new index with more primary shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html | Elasticsearch API documentation} */ - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise { + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2158,10 +2058,10 @@ export default class Indices { * Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise { + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric', 'index'] const querystring: Record = {} const body = undefined @@ -2170,7 +2070,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2205,10 +2105,10 @@ export default class Indices { * Unfreezes an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unfreeze-index-api.html | Elasticsearch API documentation} */ - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise { + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -2216,7 +2116,7 @@ export default class Indices { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2237,31 +2137,23 @@ export default class Indices { * Create or update an alias. Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['actions'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2279,31 +2171,23 @@ export default class Indices { * Validate a query. Validates a query without running it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html | Elasticsearch API documentation} */ - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 9caa35a70..6eaea8d28 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Inference { @@ -48,10 +47,10 @@ export default class Inference { * Delete an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Inference { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -88,10 +87,10 @@ export default class Inference { * Get an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html | Elasticsearch API documentation} */ - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const querystring: Record = {} const body = undefined @@ -100,7 +99,7 @@ export default class Inference { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -132,30 +131,22 @@ export default class Inference { * Perform inference on the service * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html | Elasticsearch API documentation} */ - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const acceptedBody: string[] = ['query', 'input', 'task_settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -184,15 +175,14 @@ export default class Inference { * Create an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html | Elasticsearch API documentation} */ - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptions): Promise - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptions): Promise { + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const acceptedBody: string[] = ['inference_config'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -200,7 +190,7 @@ export default class Inference { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -229,10 +219,10 @@ export default class Inference { * Perform streaming inference * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html | Elasticsearch API documentation} */ - async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async streamInference (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['inference_id', 'task_type'] const querystring: Record = {} const body = undefined @@ -241,7 +231,7 @@ export default class Inference { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 83ce76773..7c1b8a8ab 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get cluster info. Returns basic information about the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise { +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -54,7 +53,7 @@ export default async function InfoApi (this: That, params?: T.InfoRequest | TB.I for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 1c7836950..4b7f89de0 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Ingest { @@ -45,13 +44,13 @@ export default class Ingest { } /** - * Deletes a geoip database configuration. + * Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-geoip-database-api.html | Elasticsearch API documentation} */ - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Ingest { * Deletes an ip location database configuration * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-ip-location-database-api.html | Elasticsearch API documentation} */ - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -109,13 +108,13 @@ export default class Ingest { } /** - * Deletes one or more existing ingest pipeline. + * Delete pipelines. Delete one or more ingest pipelines. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html | Elasticsearch API documentation} */ - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -123,7 +122,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -141,13 +140,13 @@ export default class Ingest { } /** - * Gets download statistics for GeoIP2 databases used with the geoip processor. + * Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-processor.html | Elasticsearch API documentation} */ - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -156,7 +155,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -171,13 +170,13 @@ export default class Ingest { } /** - * Returns information about one or more geoip database configurations. + * Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-geoip-database-api.html | Elasticsearch API documentation} */ - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -186,7 +185,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -214,10 +213,10 @@ export default class Ingest { * Returns the specified ip location database configuration * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ip-location-database-api.html | Elasticsearch API documentation} */ - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -226,7 +225,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -250,13 +249,13 @@ export default class Ingest { } /** - * Returns information about one or more ingest pipelines. This API returns a local reference of the pipeline. + * Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html | Elasticsearch API documentation} */ - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -265,7 +264,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -290,13 +289,13 @@ export default class Ingest { } /** - * Extracts structured fields out of a single text field within a document. You choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. + * Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html | Elasticsearch API documentation} */ - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -305,7 +304,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -320,33 +319,25 @@ export default class Ingest { } /** - * Returns information about one or more geoip database configurations. + * Create or update GeoIP database configurations. Create or update IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-geoip-database-api.html | Elasticsearch API documentation} */ - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['name', 'maxmind'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -367,10 +358,10 @@ export default class Ingest { * Puts the configuration for a ip location database to be downloaded * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-ip-location-database-api.html | Elasticsearch API documentation} */ - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -379,7 +370,7 @@ export default class Ingest { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -396,33 +387,25 @@ export default class Ingest { } /** - * Creates or updates an ingest pipeline. Changes made using this API take effect immediately. + * Create or update a pipeline. Changes made using this API take effect immediately. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html | Elasticsearch API documentation} */ - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -440,33 +423,25 @@ export default class Ingest { } /** - * Executes an ingest pipeline against a set of provided documents. + * Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html | Elasticsearch API documentation} */ - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise { + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['docs', 'pipeline'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 227dfb36c..6188a31e9 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise { +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 4d12f0806..cd7c5a4a9 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class License { @@ -48,10 +47,10 @@ export default class License { * Deletes licensing information for the cluster * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html | Elasticsearch API documentation} */ - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class License { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -78,10 +77,10 @@ export default class License { * Get license information. Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html | Elasticsearch API documentation} */ - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -90,7 +89,7 @@ export default class License { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -108,10 +107,10 @@ export default class License { * Retrieves information about the status of the basic license. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html | Elasticsearch API documentation} */ - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -120,7 +119,7 @@ export default class License { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -138,10 +137,10 @@ export default class License { * Retrieves information about the status of the trial license. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html | Elasticsearch API documentation} */ - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -150,7 +149,7 @@ export default class License { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -168,31 +167,23 @@ export default class License { * Updates the license for the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html | Elasticsearch API documentation} */ - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['license', 'licenses'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -210,10 +201,10 @@ export default class License { * The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html | Elasticsearch API documentation} */ - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -222,7 +213,7 @@ export default class License { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -240,10 +231,10 @@ export default class License { * The start trial API enables you to start a 30-day trial, which gives access to all subscription features. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html | Elasticsearch API documentation} */ - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -252,7 +243,7 @@ export default class License { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index f92f8c5c3..9b3434506 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Logstash { @@ -48,10 +47,10 @@ export default class Logstash { * Deletes a pipeline used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Logstash { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Logstash { * Retrieves pipelines used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-get-pipeline.html | Elasticsearch API documentation} */ - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class Logstash { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -120,15 +119,14 @@ export default class Logstash { * Creates or updates a pipeline used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-put-pipeline.html | Elasticsearch API documentation} */ - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['pipeline'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -136,7 +134,7 @@ export default class Logstash { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index ae069e696..514691961 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -35,38 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html | Elasticsearch API documentation} */ -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise> -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise { +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 48bb46c23..5fc747195 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Migration { @@ -48,10 +47,10 @@ export default class Migration { * Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-deprecation.html | Elasticsearch API documentation} */ - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Migration { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -88,10 +87,10 @@ export default class Migration { * Find out whether system features need to be upgraded or not * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -100,7 +99,7 @@ export default class Migration { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -118,10 +117,10 @@ export default class Migration { * Begin upgrades for system features * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -130,7 +129,7 @@ export default class Migration { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 3b4268957..82419d6e1 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Ml { @@ -48,10 +47,10 @@ export default class Ml { * Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html | Elasticsearch API documentation} */ - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,30 +79,22 @@ export default class Ml { * Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-close-job.html | Elasticsearch API documentation} */ - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise { + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -124,10 +115,10 @@ export default class Ml { * Delete a calendar. Removes all scheduled events from a calendar, then deletes it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html | Elasticsearch API documentation} */ - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const querystring: Record = {} const body = undefined @@ -135,7 +126,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -156,10 +147,10 @@ export default class Ml { * Delete events from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-event.html | Elasticsearch API documentation} */ - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'event_id'] const querystring: Record = {} const body = undefined @@ -167,7 +158,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -189,10 +180,10 @@ export default class Ml { * Delete anomaly jobs from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-job.html | Elasticsearch API documentation} */ - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'job_id'] const querystring: Record = {} const body = undefined @@ -200,7 +191,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -222,10 +213,10 @@ export default class Ml { * Delete a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html | Elasticsearch API documentation} */ - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -233,7 +224,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -254,10 +245,10 @@ export default class Ml { * Delete a datafeed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafeed.html | Elasticsearch API documentation} */ - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const querystring: Record = {} const body = undefined @@ -265,7 +256,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -286,31 +277,23 @@ export default class Ml { * Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-expired-data.html | Elasticsearch API documentation} */ - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['requests_per_second', 'timeout'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -338,10 +321,10 @@ export default class Ml { * Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-filter.html | Elasticsearch API documentation} */ - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] const querystring: Record = {} const body = undefined @@ -349,7 +332,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -370,10 +353,10 @@ export default class Ml { * Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-forecast.html | Elasticsearch API documentation} */ - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'forecast_id'] const querystring: Record = {} const body = undefined @@ -381,7 +364,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -410,10 +393,10 @@ export default class Ml { * Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-job.html | Elasticsearch API documentation} */ - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const querystring: Record = {} const body = undefined @@ -421,7 +404,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -442,10 +425,10 @@ export default class Ml { * Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-snapshot.html | Elasticsearch API documentation} */ - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const querystring: Record = {} const body = undefined @@ -453,7 +436,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -475,10 +458,10 @@ export default class Ml { * Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models.html | Elasticsearch API documentation} */ - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined @@ -486,7 +469,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -507,10 +490,10 @@ export default class Ml { * Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models-aliases.html | Elasticsearch API documentation} */ - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_alias', 'model_id'] const querystring: Record = {} const body = undefined @@ -518,7 +501,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -540,31 +523,23 @@ export default class Ml { * Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-apis.html | Elasticsearch API documentation} */ - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -582,30 +557,22 @@ export default class Ml { * Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/evaluate-dfanalytics.html | Elasticsearch API documentation} */ - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['evaluation', 'index', 'query'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -623,31 +590,23 @@ export default class Ml { * Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/explain-dfanalytics.html | Elasticsearch API documentation} */ - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -675,30 +634,22 @@ export default class Ml { * Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-flush-job.html | Elasticsearch API documentation} */ - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise { + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -719,30 +670,22 @@ export default class Ml { * Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-forecast.html | Elasticsearch API documentation} */ - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise { + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -763,30 +706,22 @@ export default class Ml { * Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-bucket.html | Elasticsearch API documentation} */ - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'timestamp'] const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -815,10 +750,10 @@ export default class Ml { * Get info about events in calendars. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar-event.html | Elasticsearch API documentation} */ - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const querystring: Record = {} const body = undefined @@ -826,7 +761,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -847,31 +782,23 @@ export default class Ml { * Get calendar configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar.html | Elasticsearch API documentation} */ - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -899,30 +826,22 @@ export default class Ml { * Get anomaly detection job results for categories. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-category.html | Elasticsearch API documentation} */ - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'category_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -951,10 +870,10 @@ export default class Ml { * Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics.html | Elasticsearch API documentation} */ - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -963,7 +882,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -991,10 +910,10 @@ export default class Ml { * Get data frame analytics jobs usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics-stats.html | Elasticsearch API documentation} */ - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -1003,7 +922,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1031,10 +950,10 @@ export default class Ml { * Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed-stats.html | Elasticsearch API documentation} */ - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const querystring: Record = {} const body = undefined @@ -1043,7 +962,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1071,10 +990,10 @@ export default class Ml { * Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed.html | Elasticsearch API documentation} */ - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const querystring: Record = {} const body = undefined @@ -1083,7 +1002,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1111,10 +1030,10 @@ export default class Ml { * Get filters. You can get a single filter or all filters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-filter.html | Elasticsearch API documentation} */ - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] const querystring: Record = {} const body = undefined @@ -1123,7 +1042,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1151,30 +1070,22 @@ export default class Ml { * Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-influencer.html | Elasticsearch API documentation} */ - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['page'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1195,10 +1106,10 @@ export default class Ml { * Get anomaly detection jobs usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-stats.html | Elasticsearch API documentation} */ - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const querystring: Record = {} const body = undefined @@ -1207,7 +1118,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1235,10 +1146,10 @@ export default class Ml { * Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job.html | Elasticsearch API documentation} */ - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise { + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const querystring: Record = {} const body = undefined @@ -1247,7 +1158,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1275,10 +1186,10 @@ export default class Ml { * Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-memory.html | Elasticsearch API documentation} */ - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -1287,7 +1198,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1315,10 +1226,10 @@ export default class Ml { * Get anomaly detection job model snapshot upgrade usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-model-snapshot-upgrade-stats.html | Elasticsearch API documentation} */ - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const querystring: Record = {} const body = undefined @@ -1326,7 +1237,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1348,30 +1259,22 @@ export default class Ml { * Get model snapshots info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-snapshot.html | Elasticsearch API documentation} */ - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1400,30 +1303,22 @@ export default class Ml { * Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-overall-buckets.html | Elasticsearch API documentation} */ - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1444,30 +1339,22 @@ export default class Ml { * Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-record.html | Elasticsearch API documentation} */ - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1488,10 +1375,10 @@ export default class Ml { * Get trained model configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models.html | Elasticsearch API documentation} */ - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined @@ -1500,7 +1387,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1528,10 +1415,10 @@ export default class Ml { * Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models-stats.html | Elasticsearch API documentation} */ - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined @@ -1540,7 +1427,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1568,30 +1455,22 @@ export default class Ml { * Evaluate a trained model. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html | Elasticsearch API documentation} */ - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['docs', 'inference_config'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1612,10 +1491,10 @@ export default class Ml { * Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-info.html | Elasticsearch API documentation} */ - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise { + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -1624,7 +1503,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1642,30 +1521,22 @@ export default class Ml { * Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-open-job.html | Elasticsearch API documentation} */ - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise { + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['timeout'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1686,30 +1557,22 @@ export default class Ml { * Add scheduled events to the calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-calendar-event.html | Elasticsearch API documentation} */ - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['events'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1730,15 +1593,14 @@ export default class Ml { * Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-data.html | Elasticsearch API documentation} */ - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise { + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['data'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -1746,7 +1608,7 @@ export default class Ml { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1767,31 +1629,23 @@ export default class Ml { * Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/preview-dfanalytics.html | Elasticsearch API documentation} */ - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['config'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1819,31 +1673,23 @@ export default class Ml { * Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-preview-datafeed.html | Elasticsearch API documentation} */ - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['datafeed_config', 'job_config'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1871,30 +1717,22 @@ export default class Ml { * Create a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar.html | Elasticsearch API documentation} */ - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['job_ids', 'description'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1915,10 +1753,10 @@ export default class Ml { * Add anomaly detection job to calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar-job.html | Elasticsearch API documentation} */ - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'job_id'] const querystring: Record = {} const body = undefined @@ -1926,7 +1764,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1948,30 +1786,22 @@ export default class Ml { * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation} */ - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source', 'headers', 'version'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1992,30 +1822,22 @@ export default class Ml { * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} */ - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2036,30 +1858,22 @@ export default class Ml { * Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-filter.html | Elasticsearch API documentation} */ - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise { + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['description', 'items'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2080,30 +1894,22 @@ export default class Ml { * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-job.html | Elasticsearch API documentation} */ - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise { + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2124,30 +1930,22 @@ export default class Ml { * Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models.html | Elasticsearch API documentation} */ - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2168,10 +1966,10 @@ export default class Ml { * Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models-aliases.html | Elasticsearch API documentation} */ - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_alias', 'model_id'] const querystring: Record = {} const body = undefined @@ -2179,7 +1977,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2201,30 +1999,22 @@ export default class Ml { * Create part of a trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-definition-part.html | Elasticsearch API documentation} */ - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id', 'part'] const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2246,30 +2036,22 @@ export default class Ml { * Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-vocabulary.html | Elasticsearch API documentation} */ - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2290,10 +2072,10 @@ export default class Ml { * Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-reset-job.html | Elasticsearch API documentation} */ - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise { + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const querystring: Record = {} const body = undefined @@ -2301,7 +2083,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2322,30 +2104,22 @@ export default class Ml { * Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-revert-snapshot.html | Elasticsearch API documentation} */ - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['delete_intervening_results'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2367,10 +2141,10 @@ export default class Ml { * Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-set-upgrade-mode.html | Elasticsearch API documentation} */ - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -2379,7 +2153,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2397,10 +2171,10 @@ export default class Ml { * Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-dfanalytics.html | Elasticsearch API documentation} */ - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -2408,7 +2182,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2429,30 +2203,22 @@ export default class Ml { * Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-start-datafeed.html | Elasticsearch API documentation} */ - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['end', 'start', 'timeout'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2473,10 +2239,10 @@ export default class Ml { * Start a trained model deployment. It allocates the model to every machine learning node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html | Elasticsearch API documentation} */ - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined @@ -2484,7 +2250,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2505,10 +2271,10 @@ export default class Ml { * Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-dfanalytics.html | Elasticsearch API documentation} */ - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -2516,7 +2282,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2537,30 +2303,22 @@ export default class Ml { * Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-stop-datafeed.html | Elasticsearch API documentation} */ - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2581,10 +2339,10 @@ export default class Ml { * Stop a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html | Elasticsearch API documentation} */ - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const querystring: Record = {} const body = undefined @@ -2592,7 +2350,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2613,30 +2371,22 @@ export default class Ml { * Update a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-dfanalytics.html | Elasticsearch API documentation} */ - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2657,30 +2407,22 @@ export default class Ml { * Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-datafeed.html | Elasticsearch API documentation} */ - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2701,30 +2443,22 @@ export default class Ml { * Update a filter. Updates the description of a filter, adds items, or removes items from the list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-filter.html | Elasticsearch API documentation} */ - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2745,30 +2479,22 @@ export default class Ml { * Update an anomaly detection job. Updates certain properties of an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-job.html | Elasticsearch API documentation} */ - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2789,30 +2515,22 @@ export default class Ml { * Update a snapshot. Updates certain properties of a snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-snapshot.html | Elasticsearch API documentation} */ - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['description', 'retain'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2834,30 +2552,22 @@ export default class Ml { * Update a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation} */ - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['number_of_allocations'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2878,10 +2588,10 @@ export default class Ml { * Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation} */ - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const querystring: Record = {} const body = undefined @@ -2889,7 +2599,7 @@ export default class Ml { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2911,31 +2621,23 @@ export default class Ml { * Validates an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} */ - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise { + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2953,15 +2655,14 @@ export default class Ml { * Validates an anomaly detection detector. * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} */ - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['detector'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -2969,7 +2670,7 @@ export default class Ml { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index f58cf06af..a33e18e3a 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Monitoring { @@ -48,15 +47,14 @@ export default class Monitoring { * Used by the monitoring features to send monitoring data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html | Elasticsearch API documentation} */ - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['type'] const acceptedBody: string[] = ['operations'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -64,7 +62,7 @@ export default class Monitoring { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 7d5778210..7575ba44c 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -35,22 +35,20 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html | Elasticsearch API documentation} */ -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise> -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise { +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['searches'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -58,7 +56,7 @@ export default async function MsearchApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> -export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['search_templates'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -58,7 +56,7 @@ export default async function MsearchTemplateApi -export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise -export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise { +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index caf750c89..3f6a51e20 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Nodes { @@ -48,10 +47,10 @@ export default class Nodes { * You can use this API to clear the archived repositories metering information in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-repositories-metering-archive-api.html | Elasticsearch API documentation} */ - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'max_archive_version'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Nodes { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -81,10 +80,10 @@ export default class Nodes { * You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-repositories-metering-api.html | Elasticsearch API documentation} */ - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class Nodes { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -113,10 +112,10 @@ export default class Nodes { * This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html | Elasticsearch API documentation} */ - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -125,7 +124,7 @@ export default class Nodes { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -153,10 +152,10 @@ export default class Nodes { * Returns cluster nodes information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html | Elasticsearch API documentation} */ - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise { + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric'] const querystring: Record = {} const body = undefined @@ -165,7 +164,7 @@ export default class Nodes { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -200,31 +199,23 @@ export default class Nodes { * Reloads the keystore on nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation} */ - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const acceptedBody: string[] = ['secure_settings_password'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -252,10 +243,10 @@ export default class Nodes { * Returns cluster nodes statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise { + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] const querystring: Record = {} const body = undefined @@ -264,7 +255,7 @@ export default class Nodes { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -306,10 +297,10 @@ export default class Nodes { * Returns information on the usage of features. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html | Elasticsearch API documentation} */ - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise { + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric'] const querystring: Record = {} const body = undefined @@ -318,7 +309,7 @@ export default class Nodes { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 0a017637d..cf566cc3c 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['index_filter'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 9d07552f0..8552b1b58 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Ping the cluster. Returns whether the cluster is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise { +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -54,7 +53,7 @@ export default async function PingApi (this: That, params?: T.PingRequest | TB.P for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index 0d729387c..2abd1c907 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Profiling { @@ -48,10 +47,10 @@ export default class Profiling { * Extracts a UI-optimized structure to render flamegraphs from Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Profiling { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -77,10 +76,10 @@ export default class Profiling { * Extracts raw stacktrace information from Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -89,7 +88,7 @@ export default class Profiling { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -106,10 +105,10 @@ export default class Profiling { * Returns basic information about the status of Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -118,7 +117,7 @@ export default class Profiling { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -135,10 +134,10 @@ export default class Profiling { * Extracts a list of topN functions from Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -147,7 +146,7 @@ export default class Profiling { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 94c3449d9..f42e42c3b 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Create or update a script or search template. Creates or updates a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} */ -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise { +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'context'] const acceptedBody: string[] = ['script'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index 150788940..0f462b5a5 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class QueryRules { @@ -45,13 +44,13 @@ export default class QueryRules { } /** - * Deletes a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-rule.html | Elasticsearch API documentation} */ - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id', 'rule_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class QueryRules { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -78,13 +77,13 @@ export default class QueryRules { } /** - * Deletes a query ruleset. + * Delete a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html | Elasticsearch API documentation} */ - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class QueryRules { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -110,13 +109,13 @@ export default class QueryRules { } /** - * Returns the details about a query rule within a query ruleset + * Get a query rule. Get details about a query rule within a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-rule.html | Elasticsearch API documentation} */ - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id', 'rule_id'] const querystring: Record = {} const body = undefined @@ -124,7 +123,7 @@ export default class QueryRules { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -143,13 +142,13 @@ export default class QueryRules { } /** - * Returns the details about a query ruleset + * Get a query ruleset. Get details about a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html | Elasticsearch API documentation} */ - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] const querystring: Record = {} const body = undefined @@ -157,7 +156,7 @@ export default class QueryRules { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -175,13 +174,13 @@ export default class QueryRules { } /** - * Returns summarized information about existing query rulesets. + * Get all query rulesets. Get summarized information about the query rulesets. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html | Elasticsearch API documentation} */ - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -190,7 +189,7 @@ export default class QueryRules { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -205,33 +204,25 @@ export default class QueryRules { } /** - * Creates or updates a query rule within a query ruleset. + * Create or update a query rule. Create or update a query rule within a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-rule.html | Elasticsearch API documentation} */ - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id', 'rule_id'] const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -250,33 +241,25 @@ export default class QueryRules { } /** - * Creates or updates a query ruleset. + * Create or update a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html | Elasticsearch API documentation} */ - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] const acceptedBody: string[] = ['rules'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -294,33 +277,25 @@ export default class QueryRules { } /** - * Creates or updates a query ruleset. + * Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/test-query-ruleset.html | Elasticsearch API documentation} */ - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptions): Promise - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] const acceptedBody: string[] = ['match_criteria'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 44052220b..bafa83313 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html | Elasticsearch API documentation} */ -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise { +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['requests', 'metric'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 69d23a4f6..4ef63245c 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Reindex documents. Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise { +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 49d1a1b6c..5cd9e6d88 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index d3d5ad472..3a55809e1 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -35,38 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Render a search template. Render a search template as a search request body. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/render-search-template-api.html | Elasticsearch API documentation} */ -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['file', 'params', 'source'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index d9aad8fd7..1b2864192 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Rollup { @@ -48,10 +47,10 @@ export default class Rollup { * Deletes an existing rollup job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html | Elasticsearch API documentation} */ - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Rollup { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Rollup { * Retrieves the configuration, stats, and status of rollup jobs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html | Elasticsearch API documentation} */ - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class Rollup { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -120,10 +119,10 @@ export default class Rollup { * Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html | Elasticsearch API documentation} */ - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -132,7 +131,7 @@ export default class Rollup { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -160,10 +159,10 @@ export default class Rollup { * Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html | Elasticsearch API documentation} */ - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -171,7 +170,7 @@ export default class Rollup { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -192,30 +191,22 @@ export default class Rollup { * Creates a rollup job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html | Elasticsearch API documentation} */ - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise { + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -236,30 +227,22 @@ export default class Rollup { * Enables searching rolled-up data using the standard Query DSL. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html | Elasticsearch API documentation} */ - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -280,10 +263,10 @@ export default class Rollup { * Starts an existing, stopped rollup job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html | Elasticsearch API documentation} */ - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise { + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -291,7 +274,7 @@ export default class Rollup { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -312,10 +295,10 @@ export default class Rollup { * Stops an existing, started rollup job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html | Elasticsearch API documentation} */ - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise { + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -323,7 +306,7 @@ export default class Rollup { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index a1a9fa0b5..25df53762 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -35,38 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Run a script. Runs a script and returns a result. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html | Elasticsearch API documentation} */ -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['context', 'context_setup', 'script'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index ae356e9d3..b2ccbb07a 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll | Elasticsearch API documentation} */ -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise { +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll', 'scroll_id'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search.ts b/src/api/api/search.ts index fb251b6fa..ec845a47c 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -35,36 +35,25 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line - // @ts-expect-error querystring[key] = params[key] } else { // @ts-expect-error @@ -72,7 +61,7 @@ export default async function SearchApi - async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class SearchApplication { * Delete a behavioral analytics collection. The associated data stream is also deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html | Elasticsearch API documentation} */ - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -112,10 +111,10 @@ export default class SearchApplication { * Get search application details. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html | Elasticsearch API documentation} */ - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -123,7 +122,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -144,10 +143,10 @@ export default class SearchApplication { * Get behavioral analytics collections. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html | Elasticsearch API documentation} */ - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -156,7 +155,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -184,10 +183,10 @@ export default class SearchApplication { * Returns the existing search applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html | Elasticsearch API documentation} */ - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptions): Promise - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -196,7 +195,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -214,10 +213,10 @@ export default class SearchApplication { * Creates a behavioral analytics event for existing collection. * @see {@link http://todo.com/tbd | Elasticsearch API documentation} */ - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['collection_name', 'event_type'] const querystring: Record = {} const body = undefined @@ -226,7 +225,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -247,15 +246,14 @@ export default class SearchApplication { * Create or update a search application. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html | Elasticsearch API documentation} */ - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['search_application'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -263,7 +261,7 @@ export default class SearchApplication { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -284,10 +282,10 @@ export default class SearchApplication { * Create a behavioral analytics collection. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html | Elasticsearch API documentation} */ - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -295,7 +293,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -316,10 +314,10 @@ export default class SearchApplication { * Renders a query for given search application search parameters * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html | Elasticsearch API documentation} */ - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -328,7 +326,7 @@ export default class SearchApplication { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -348,30 +346,22 @@ export default class SearchApplication { * Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html | Elasticsearch API documentation} */ - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['params'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 3311d222b..c76751fbb 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Search a vector tile. Search a vector tile for geospatial values. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html | Elasticsearch API documentation} */ -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise { +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index 85ff0b79e..e5d724e02 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html | Elasticsearch API documentation} */ -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise { +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -54,7 +53,7 @@ export default async function SearchShardsApi (this: That, params?: T.SearchShar for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index be504abb3..30c828f0a 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -35,38 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Run a search with a search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-template.html | Elasticsearch API documentation} */ -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise> -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise { +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 90ac0b9e2..0ddbce496 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class SearchableSnapshots { @@ -48,10 +47,10 @@ export default class SearchableSnapshots { * Retrieve node-level cache statistics about searchable snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class SearchableSnapshots { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -88,10 +87,10 @@ export default class SearchableSnapshots { * Clear the cache of searchable snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -100,7 +99,7 @@ export default class SearchableSnapshots { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -128,30 +127,22 @@ export default class SearchableSnapshots { * Mount a snapshot as a searchable index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html | Elasticsearch API documentation} */ - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -173,10 +164,10 @@ export default class SearchableSnapshots { * Retrieve shard-level statistics about searchable snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -185,7 +176,7 @@ export default class SearchableSnapshots { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 11f58ed03..70b0a80b7 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Security { @@ -48,30 +47,22 @@ export default class Security { * Activate a user profile. Create or update a user profile on behalf of another user. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html | Elasticsearch API documentation} */ - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -89,10 +80,10 @@ export default class Security { * Authenticate a user. Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-authenticate.html | Elasticsearch API documentation} */ - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -101,7 +92,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -119,30 +110,22 @@ export default class Security { * Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-delete-role.html | Elasticsearch API documentation} */ - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['names'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -160,30 +143,22 @@ export default class Security { * Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-put-role.html | Elasticsearch API documentation} */ - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['roles'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -201,10 +176,10 @@ export default class Security { * Updates the attributes of multiple existing API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-update-api-keys.html | Elasticsearch API documentation} */ - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -213,7 +188,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -230,31 +205,23 @@ export default class Security { * Change passwords. Change the passwords of users in the native realm and built-in users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-change-password.html | Elasticsearch API documentation} */ - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] const acceptedBody: string[] = ['password', 'password_hash'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -282,10 +249,10 @@ export default class Security { * Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-api-key-cache.html | Elasticsearch API documentation} */ - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ids'] const querystring: Record = {} const body = undefined @@ -293,7 +260,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -314,10 +281,10 @@ export default class Security { * Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-privilege-cache.html | Elasticsearch API documentation} */ - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application'] const querystring: Record = {} const body = undefined @@ -325,7 +292,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -346,10 +313,10 @@ export default class Security { * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-cache.html | Elasticsearch API documentation} */ - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['realms'] const querystring: Record = {} const body = undefined @@ -357,7 +324,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -378,10 +345,10 @@ export default class Security { * Clear the roles cache. Evict roles from the native role cache. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-role-cache.html | Elasticsearch API documentation} */ - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -389,7 +356,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -410,10 +377,10 @@ export default class Security { * Clear service account token caches. Evict a subset of all entries from the service account token caches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-service-token-caches.html | Elasticsearch API documentation} */ - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] const querystring: Record = {} const body = undefined @@ -421,7 +388,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -444,31 +411,23 @@ export default class Security { * Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-api-key.html | Elasticsearch API documentation} */ - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -486,30 +445,22 @@ export default class Security { * Create a cross-cluster API key. Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-cross-cluster-api-key.html | Elasticsearch API documentation} */ - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -527,10 +478,10 @@ export default class Security { * Create a service account token. Create a service accounts token for access without requiring basic authentication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-service-token.html | Elasticsearch API documentation} */ - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] const querystring: Record = {} const body = undefined @@ -538,7 +489,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -568,10 +519,10 @@ export default class Security { * Delete application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-privilege.html | Elasticsearch API documentation} */ - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application', 'name'] const querystring: Record = {} const body = undefined @@ -579,7 +530,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -601,10 +552,10 @@ export default class Security { * Delete roles. Delete roles in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role.html | Elasticsearch API documentation} */ - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -612,7 +563,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -633,10 +584,10 @@ export default class Security { * Delete role mappings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role-mapping.html | Elasticsearch API documentation} */ - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -644,7 +595,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -665,10 +616,10 @@ export default class Security { * Delete service account tokens. Delete service account tokens for a service in a specified namespace. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-service-token.html | Elasticsearch API documentation} */ - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] const querystring: Record = {} const body = undefined @@ -676,7 +627,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -699,10 +650,10 @@ export default class Security { * Delete users. Delete users from the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-user.html | Elasticsearch API documentation} */ - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] const querystring: Record = {} const body = undefined @@ -710,7 +661,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -731,10 +682,10 @@ export default class Security { * Disable users. Disable users in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user.html | Elasticsearch API documentation} */ - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] const querystring: Record = {} const body = undefined @@ -742,7 +693,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -763,10 +714,10 @@ export default class Security { * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html | Elasticsearch API documentation} */ - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const querystring: Record = {} const body = undefined @@ -774,7 +725,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -795,10 +746,10 @@ export default class Security { * Enable users. Enable users in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user.html | Elasticsearch API documentation} */ - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] const querystring: Record = {} const body = undefined @@ -806,7 +757,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -827,10 +778,10 @@ export default class Security { * Enable a user profile. Enable user profiles to make them visible in user profile searches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html | Elasticsearch API documentation} */ - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const querystring: Record = {} const body = undefined @@ -838,7 +789,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -859,10 +810,10 @@ export default class Security { * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html | Elasticsearch API documentation} */ - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -871,7 +822,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -889,10 +840,10 @@ export default class Security { * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html | Elasticsearch API documentation} */ - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -901,7 +852,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -919,10 +870,10 @@ export default class Security { * Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-api-key.html | Elasticsearch API documentation} */ - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -931,7 +882,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -949,10 +900,10 @@ export default class Security { * Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-builtin-privileges.html | Elasticsearch API documentation} */ - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -961,7 +912,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -979,10 +930,10 @@ export default class Security { * Get application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-privileges.html | Elasticsearch API documentation} */ - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application', 'name'] const querystring: Record = {} const body = undefined @@ -991,7 +942,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1023,10 +974,10 @@ export default class Security { * Get roles. Get roles in the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role.html | Elasticsearch API documentation} */ - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1035,7 +986,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1063,10 +1014,10 @@ export default class Security { * Get role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role-mapping.html | Elasticsearch API documentation} */ - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -1075,7 +1026,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1103,10 +1054,10 @@ export default class Security { * Get service accounts. Get a list of service accounts that match the provided path parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-accounts.html | Elasticsearch API documentation} */ - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service'] const querystring: Record = {} const body = undefined @@ -1115,7 +1066,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1147,10 +1098,10 @@ export default class Security { * Get service account credentials. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-credentials.html | Elasticsearch API documentation} */ - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service'] const querystring: Record = {} const body = undefined @@ -1158,7 +1109,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1180,10 +1131,10 @@ export default class Security { * Retrieve settings for the security system indices * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -1192,7 +1143,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -1209,31 +1160,23 @@ export default class Security { * Get a token. Create a bearer token for access without requiring basic authentication. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-token.html | Elasticsearch API documentation} */ - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1251,10 +1194,10 @@ export default class Security { * Get users. Get information about users in the native realm and built-in users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user.html | Elasticsearch API documentation} */ - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] const querystring: Record = {} const body = undefined @@ -1263,7 +1206,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1291,10 +1234,10 @@ export default class Security { * Get user privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-privileges.html | Elasticsearch API documentation} */ - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -1303,7 +1246,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1321,10 +1264,10 @@ export default class Security { * Get a user profile. Get a user's profile using the unique profile ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html | Elasticsearch API documentation} */ - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const querystring: Record = {} const body = undefined @@ -1332,7 +1275,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1353,30 +1296,22 @@ export default class Security { * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-grant-api-key.html | Elasticsearch API documentation} */ - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1394,31 +1329,23 @@ export default class Security { * Check user privileges. Determine whether the specified user has a specified list of privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges.html | Elasticsearch API documentation} */ - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['user'] const acceptedBody: string[] = ['application', 'cluster', 'index'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1446,30 +1373,22 @@ export default class Security { * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges-user-profile.html | Elasticsearch API documentation} */ - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['uids', 'privileges'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1487,31 +1406,23 @@ export default class Security { * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-api-key.html | Elasticsearch API documentation} */ - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1529,31 +1440,23 @@ export default class Security { * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-token.html | Elasticsearch API documentation} */ - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1571,10 +1474,10 @@ export default class Security { * Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-authenticate.html | Elasticsearch API documentation} */ - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -1583,7 +1486,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -1600,10 +1503,10 @@ export default class Security { * Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-logout.html | Elasticsearch API documentation} */ - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -1612,7 +1515,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -1629,10 +1532,10 @@ export default class Security { * Creates an OAuth 2.0 authentication request as a URL string * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-prepare-authentication.html | Elasticsearch API documentation} */ - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -1641,7 +1544,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -1658,15 +1561,14 @@ export default class Security { * Create or update application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-privileges.html | Elasticsearch API documentation} */ - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['privileges'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -1674,7 +1576,7 @@ export default class Security { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1692,30 +1594,22 @@ export default class Security { * Create or update roles. The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role.html | Elasticsearch API documentation} */ - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1736,30 +1630,22 @@ export default class Security { * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role-mapping.html | Elasticsearch API documentation} */ - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1780,30 +1666,22 @@ export default class Security { * Create or update users. A password is required for adding a new user but is optional when updating an existing user. To change a user’s password without updating any other fields, use the change password API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-user.html | Elasticsearch API documentation} */ - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1824,31 +1702,23 @@ export default class Security { * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-api-key.html | Elasticsearch API documentation} */ - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1866,31 +1736,23 @@ export default class Security { * Find roles with a query. Get roles in a paginated manner. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-role.html | Elasticsearch API documentation} */ - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1908,31 +1770,23 @@ export default class Security { * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-user.html | Elasticsearch API documentation} */ - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1950,30 +1804,22 @@ export default class Security { * Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-authenticate.html | Elasticsearch API documentation} */ - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['content', 'ids', 'realm'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -1991,30 +1837,22 @@ export default class Security { * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-complete-logout.html | Elasticsearch API documentation} */ - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2032,30 +1870,22 @@ export default class Security { * Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-invalidate.html | Elasticsearch API documentation} */ - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['acs', 'query_string', 'realm'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2073,30 +1903,22 @@ export default class Security { * Logout of SAML. Submits a request to invalidate an access token and refresh token. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-logout.html | Elasticsearch API documentation} */ - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['token', 'refresh_token'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2114,31 +1936,23 @@ export default class Security { * Prepare SAML authentication. Creates a SAML authentication request (``) as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-prepare-authentication.html | Elasticsearch API documentation} */ - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2156,10 +1970,10 @@ export default class Security { * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-sp-metadata.html | Elasticsearch API documentation} */ - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['realm_name'] const querystring: Record = {} const body = undefined @@ -2167,7 +1981,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2188,31 +2002,23 @@ export default class Security { * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html | Elasticsearch API documentation} */ - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2230,30 +2036,22 @@ export default class Security { * Update an API key. Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. This API supports updates to an API key’s access scope and metadata. The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-api-key.html | Elasticsearch API documentation} */ - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2274,30 +2072,22 @@ export default class Security { * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-cross-cluster-api-key.html | Elasticsearch API documentation} */ - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['access', 'expiration', 'metadata'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -2318,10 +2108,10 @@ export default class Security { * Update settings for the security system index * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-settings.html | Elasticsearch API documentation} */ - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -2330,7 +2120,7 @@ export default class Security { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -2347,30 +2137,22 @@ export default class Security { * Update user profile data. Update specific data for the user profile that is associated with a unique ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html | Elasticsearch API documentation} */ - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const acceptedBody: string[] = ['labels', 'data'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index cf83485f3..030c6678b 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Shutdown { @@ -48,10 +47,10 @@ export default class Shutdown { * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Shutdown { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Shutdown { * Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class Shutdown { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -120,30 +119,22 @@ export default class Shutdown { * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index a5a76325c..b0c6ab5de 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Simulate { @@ -48,10 +47,10 @@ export default class Simulate { * Simulates running ingest with example documents. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html | Elasticsearch API documentation} */ - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async ingest (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async ingest (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async ingest (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async ingest (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Simulate { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 16f0913c8..c940bafc2 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Slm { @@ -48,10 +47,10 @@ export default class Slm { * Deletes an existing snapshot lifecycle policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-delete-policy.html | Elasticsearch API documentation} */ - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Slm { * Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-lifecycle.html | Elasticsearch API documentation} */ - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -112,10 +111,10 @@ export default class Slm { * Deletes any snapshots that are expired according to the policy's retention rules. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-retention.html | Elasticsearch API documentation} */ - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -124,7 +123,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -142,10 +141,10 @@ export default class Slm { * Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-policy.html | Elasticsearch API documentation} */ - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] const querystring: Record = {} const body = undefined @@ -154,7 +153,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -182,10 +181,10 @@ export default class Slm { * Returns global and policy-level statistics about actions taken by snapshot lifecycle management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html | Elasticsearch API documentation} */ - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -194,7 +193,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -212,10 +211,10 @@ export default class Slm { * Retrieves the status of snapshot lifecycle management (SLM). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-status.html | Elasticsearch API documentation} */ - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -224,7 +223,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -242,30 +241,22 @@ export default class Slm { * Creates or updates a snapshot lifecycle policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-put-policy.html | Elasticsearch API documentation} */ - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -286,10 +277,10 @@ export default class Slm { * Turns on snapshot lifecycle management (SLM). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-start.html | Elasticsearch API documentation} */ - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise { + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -298,7 +289,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -316,10 +307,10 @@ export default class Slm { * Turns off snapshot lifecycle management (SLM). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-stop.html | Elasticsearch API documentation} */ - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise { + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -328,7 +319,7 @@ export default class Slm { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 9e8419ebf..377c1d252 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Snapshot { @@ -48,10 +47,10 @@ export default class Snapshot { * Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html | Elasticsearch API documentation} */ - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,30 +79,22 @@ export default class Snapshot { * Clones indices from one snapshot into another snapshot in the same repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] const acceptedBody: string[] = ['indices'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -126,30 +117,22 @@ export default class Snapshot { * Creates a snapshot in a repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['ignore_unavailable', 'include_global_state', 'indices', 'feature_states', 'metadata', 'partial'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -171,15 +154,14 @@ export default class Snapshot { * Creates a repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['repository'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -187,7 +169,7 @@ export default class Snapshot { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -208,10 +190,10 @@ export default class Snapshot { * Deletes one or more snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const querystring: Record = {} const body = undefined @@ -219,7 +201,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -241,10 +223,10 @@ export default class Snapshot { * Deletes a repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -252,7 +234,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -273,10 +255,10 @@ export default class Snapshot { * Returns information about a snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const querystring: Record = {} const body = undefined @@ -284,7 +266,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -306,10 +288,10 @@ export default class Snapshot { * Returns information about a repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -318,7 +300,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -346,10 +328,10 @@ export default class Snapshot { * Analyzes a repository for correctness and performance * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async repositoryAnalyze (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] const querystring: Record = {} const body = undefined @@ -358,7 +340,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -378,10 +360,10 @@ export default class Snapshot { * Verifies the integrity of the contents of a snapshot repository * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -389,7 +371,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -410,30 +392,22 @@ export default class Snapshot { * Restores a snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -455,10 +429,10 @@ export default class Snapshot { * Returns information about the status of a snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const querystring: Record = {} const body = undefined @@ -467,7 +441,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -499,10 +473,10 @@ export default class Snapshot { * Verifies a repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined @@ -510,7 +484,7 @@ export default class Snapshot { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index fbd30e803..ce8144cc9 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Sql { @@ -45,33 +44,25 @@ export default class Sql { } /** - * Clears the SQL cursor + * Clear an SQL search cursor. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-sql-cursor-api.html | Elasticsearch API documentation} */ - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['cursor'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -86,13 +77,13 @@ export default class Sql { } /** - * Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. + * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html | Elasticsearch API documentation} */ - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -100,7 +91,7 @@ export default class Sql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -118,13 +109,13 @@ export default class Sql { } /** - * Returns the current status and available results for an async SQL search or stored synchronous SQL search + * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html | Elasticsearch API documentation} */ - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -132,7 +123,7 @@ export default class Sql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -150,13 +141,13 @@ export default class Sql { } /** - * Returns the current status of an async SQL search or a stored synchronous SQL search + * Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-status-api.html | Elasticsearch API documentation} */ - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -164,7 +155,7 @@ export default class Sql { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -182,34 +173,26 @@ export default class Sql { } /** - * Executes a SQL request + * Get SQL search results. Run an SQL request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/sql-search-api.html | Elasticsearch API documentation} */ - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise { + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['catalog', 'columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency', 'runtime_mappings', 'wait_for_completion_timeout', 'params', 'keep_alive', 'keep_on_completion', 'index_using_frozen'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -224,33 +207,25 @@ export default class Sql { } /** - * Translates SQL into Elasticsearch queries + * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/sql-translate-api.html | Elasticsearch API documentation} */ - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise { + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 08057b0f9..8ebb1a7ac 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Ssl { @@ -48,10 +47,10 @@ export default class Ssl { * Get SSL certificates. Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: - Settings for transport and HTTP interfaces - TLS settings that are used within authentication realms - TLS settings for remote monitoring exporters The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-ssl.html | Elasticsearch API documentation} */ - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise { + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Ssl { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index abbf98749..896bc4d2b 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Synonyms { @@ -45,13 +44,13 @@ export default class Synonyms { } /** - * Deletes a synonym set + * Delete a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html | Elasticsearch API documentation} */ - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Synonyms { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -77,13 +76,13 @@ export default class Synonyms { } /** - * Deletes a synonym rule in a synonym set + * Delete a synonym rule. Delete a synonym rule from a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html | Elasticsearch API documentation} */ - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['set_id', 'rule_id'] const querystring: Record = {} const body = undefined @@ -91,7 +90,7 @@ export default class Synonyms { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -110,13 +109,13 @@ export default class Synonyms { } /** - * Retrieves a synonym set + * Get a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html | Elasticsearch API documentation} */ - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -124,7 +123,7 @@ export default class Synonyms { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -142,13 +141,13 @@ export default class Synonyms { } /** - * Retrieves a synonym rule from a synonym set + * Get a synonym rule. Get a synonym rule from a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html | Elasticsearch API documentation} */ - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['set_id', 'rule_id'] const querystring: Record = {} const body = undefined @@ -156,7 +155,7 @@ export default class Synonyms { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -175,13 +174,13 @@ export default class Synonyms { } /** - * Retrieves a summary of all defined synonym sets + * Get all synonym sets. Get a summary of all defined synonym sets. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html | Elasticsearch API documentation} */ - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -190,7 +189,7 @@ export default class Synonyms { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -205,33 +204,25 @@ export default class Synonyms { } /** - * Creates or updates a synonym set. + * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html | Elasticsearch API documentation} */ - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['synonyms_set'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -249,33 +240,25 @@ export default class Synonyms { } /** - * Creates or updates a synonym rule in a synonym set + * Create or update a synonym rule. Create or update a synonym rule in a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html | Elasticsearch API documentation} */ - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['set_id', 'rule_id'] const acceptedBody: string[] = ['synonyms'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index 0cb901686..1c931bc6b 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Tasks { @@ -48,10 +47,10 @@ export default class Tasks { * Cancels a task, if it can be cancelled through an API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise { + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Tasks { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -88,10 +87,10 @@ export default class Tasks { * Get task information. Returns information about the tasks currently executing in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise { + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] const querystring: Record = {} const body = undefined @@ -99,7 +98,7 @@ export default class Tasks { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -120,10 +119,10 @@ export default class Tasks { * The task management API returns information about tasks currently executing on one or more nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise { + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -132,7 +131,7 @@ export default class Tasks { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index fe04897e9..1176ced32 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get terms in an index. Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-terms-enum.html | Elasticsearch API documentation} */ -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise { +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index f47ff74c8..f113c2c15 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Get term vector information. Get information and statistics about terms in the fields of a particular document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html | Elasticsearch API documentation} */ -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise { +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'id'] const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 972556a6d..6efaaaf27 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class TextStructure { @@ -48,10 +47,10 @@ export default class TextStructure { * Finds the structure of a text field in an index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-field-structure.html | Elasticsearch API documentation} */ - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class TextStructure { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -77,10 +76,10 @@ export default class TextStructure { * Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-message-structure.html | Elasticsearch API documentation} */ - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -89,7 +88,7 @@ export default class TextStructure { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -106,15 +105,14 @@ export default class TextStructure { * Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-structure.html | Elasticsearch API documentation} */ - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['text_files'] const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + let body: any for (const key in params) { if (acceptedBody.includes(key)) { @@ -122,7 +120,7 @@ export default class TextStructure { body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -140,30 +138,22 @@ export default class TextStructure { * Tests a Grok pattern on some text. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/test-grok-pattern.html | Elasticsearch API documentation} */ - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['grok_pattern', 'text'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index e1b9dc6bc..9b56e861d 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Transform { @@ -48,10 +47,10 @@ export default class Transform { * Delete a transform. Deletes a transform. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-transform.html | Elasticsearch API documentation} */ - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -80,10 +79,10 @@ export default class Transform { * Retrieves transform usage information for transform nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-node-stats.html | Elasticsearch API documentation} */ - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -92,7 +91,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -109,10 +108,10 @@ export default class Transform { * Get transforms. Retrieves configuration information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform.html | Elasticsearch API documentation} */ - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -121,7 +120,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -149,10 +148,10 @@ export default class Transform { * Get transform stats. Retrieves usage information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-stats.html | Elasticsearch API documentation} */ - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -160,7 +159,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -181,31 +180,23 @@ export default class Transform { * Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/preview-transform.html | Elasticsearch API documentation} */ - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -233,30 +224,22 @@ export default class Transform { * Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-transform.html | Elasticsearch API documentation} */ - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -277,10 +260,10 @@ export default class Transform { * Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/reset-transform.html | Elasticsearch API documentation} */ - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptions): Promise - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -288,7 +271,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -309,10 +292,10 @@ export default class Transform { * Schedule a transform to start now. Instantly runs a transform to process data. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/schedule-now-transform.html | Elasticsearch API documentation} */ - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -320,7 +303,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -341,10 +324,10 @@ export default class Transform { * Start a transform. Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-transform.html | Elasticsearch API documentation} */ - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -352,7 +335,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -373,10 +356,10 @@ export default class Transform { * Stop transforms. Stops one or more transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-transform.html | Elasticsearch API documentation} */ - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const querystring: Record = {} const body = undefined @@ -384,7 +367,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -405,30 +388,22 @@ export default class Transform { * Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-transform.html | Elasticsearch API documentation} */ - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -449,10 +424,10 @@ export default class Transform { * Upgrades all transforms. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/upgrade-transforms.html | Elasticsearch API documentation} */ - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -461,7 +436,7 @@ export default class Transform { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 0dd6f4220..400f8dae0 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Update a document. Updates a document by running a script or passing a partial document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html | Elasticsearch API documentation} */ -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise> -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise { +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 64d5c95ef..83ea42500 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -35,37 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index 75acdc834..bd5fa29d7 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -35,17 +35,16 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] const querystring: Record = {} const body = undefined @@ -53,7 +52,7 @@ export default async function UpdateByQueryRethrottleApi (this: That, params: T. for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 5b98a7593..324ab9edd 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Watcher { @@ -48,10 +47,10 @@ export default class Watcher { * Acknowledges a watch, manually throttling the execution of the watch's actions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-ack-watch.html | Elasticsearch API documentation} */ - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id', 'action_id'] const querystring: Record = {} const body = undefined @@ -59,7 +58,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -88,10 +87,10 @@ export default class Watcher { * Activates a currently inactive watch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-activate-watch.html | Elasticsearch API documentation} */ - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id'] const querystring: Record = {} const body = undefined @@ -99,7 +98,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -120,10 +119,10 @@ export default class Watcher { * Deactivates a currently active watch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-deactivate-watch.html | Elasticsearch API documentation} */ - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id'] const querystring: Record = {} const body = undefined @@ -131,7 +130,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -152,10 +151,10 @@ export default class Watcher { * Removes a watch from Watcher. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-delete-watch.html | Elasticsearch API documentation} */ - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -163,7 +162,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -184,31 +183,23 @@ export default class Watcher { * This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-execute-watch.html | Elasticsearch API documentation} */ - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -236,10 +227,10 @@ export default class Watcher { * Retrieve settings for the watcher system index * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -248,7 +239,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } @@ -265,10 +256,10 @@ export default class Watcher { * Retrieves a watch by its ID. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-watch.html | Elasticsearch API documentation} */ - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -276,7 +267,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -297,30 +288,22 @@ export default class Watcher { * Creates a new watch, or updates an existing one. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-put-watch.html | Elasticsearch API documentation} */ - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'transform', 'trigger'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -341,31 +324,23 @@ export default class Watcher { * Retrieves stored watches. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-query-watches.html | Elasticsearch API documentation} */ - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined - } + const body: Record = {} params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { - body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -383,10 +358,10 @@ export default class Watcher { * Starts Watcher if it is not already running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-start.html | Elasticsearch API documentation} */ - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise { + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -395,7 +370,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -413,10 +388,10 @@ export default class Watcher { * Retrieves the current Watcher metrics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise { + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric'] const querystring: Record = {} const body = undefined @@ -425,7 +400,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -453,10 +428,10 @@ export default class Watcher { * Stops Watcher if it is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stop.html | Elasticsearch API documentation} */ - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise { + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -465,7 +440,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -483,10 +458,10 @@ export default class Watcher { * Update settings for the watcher system index * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-update-settings.html | Elasticsearch API documentation} */ - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -495,7 +470,7 @@ export default class Watcher { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { querystring[key] = params[key] } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 3472a080c..e8ad16753 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -35,7 +35,6 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' interface That { transport: Transport } export default class Xpack { @@ -48,10 +47,10 @@ export default class Xpack { * Provides general information about the installed X-Pack features. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/info-api.html | Elasticsearch API documentation} */ - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise { + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -60,7 +59,7 @@ export default class Xpack { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } @@ -78,10 +77,10 @@ export default class Xpack { * This API provides information about which features are currently enabled and available under the current license and some usage statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/usage-api.html | Elasticsearch API documentation} */ - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise { + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -90,7 +89,7 @@ export default class Xpack { for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/types.ts b/src/api/types.ts index d576ad3c0..fbe66a62f 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -6689,7 +6689,6 @@ export interface AsyncSearchSubmitRequest extends RequestBase { index?: Indices wait_for_completion_timeout?: Duration keep_on_completion?: boolean - keep_alive?: Duration allow_no_indices?: boolean allow_partial_search_results?: boolean analyzer?: string @@ -6704,7 +6703,6 @@ export interface AsyncSearchSubmitRequest extends RequestBase { lenient?: boolean max_concurrent_shard_requests?: long preference?: string - pre_filter_shard_size?: long request_cache?: boolean routing?: Routing search_type?: SearchType @@ -11045,8 +11043,8 @@ export interface IndicesMappingLimitSettingsNestedObjects { } export interface IndicesMappingLimitSettingsTotalFields { - limit?: long - ignore_dynamic_beyond_limit?: boolean + limit?: long | string + ignore_dynamic_beyond_limit?: boolean | string } export interface IndicesMerge { @@ -11480,6 +11478,7 @@ export interface IndicesExistsAliasRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean + master_timeout?: Duration } export type IndicesExistsAliasResponse = boolean @@ -11634,6 +11633,7 @@ export interface IndicesGetAliasRequest extends RequestBase { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean + master_timeout?: Duration } export type IndicesGetAliasResponse = Record @@ -13362,10 +13362,12 @@ export interface LogstashPutPipelineRequest extends RequestBase { export type LogstashPutPipelineResponse = boolean export interface MigrationDeprecationsDeprecation { - details: string + details?: string level: MigrationDeprecationsDeprecationLevel message: string url: string + resolve_during_rolling_upgrade: boolean + _meta?: Record } export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' @@ -16532,7 +16534,7 @@ export interface NodesInfoNodeInfoPath { logs?: string home?: string repo?: string[] - data?: string[] + data?: string | string[] } export interface NodesInfoNodeInfoRepositories { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts deleted file mode 100644 index 516bdefc2..000000000 --- a/src/api/typesWithBodyKey.ts +++ /dev/null @@ -1,21298 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint-disable @typescript-eslint/array-type */ -/* eslint-disable @typescript-eslint/no-empty-interface */ -/* eslint-disable @typescript-eslint/no-unused-vars */ - -/** - * We are still working on this type, it will arrive soon. - * If it's critical for you, please open an issue. - * https://github.com/elastic/elasticsearch-js - */ -export type TODO = Record - -export interface BulkCreateOperation extends BulkWriteOperation { -} - -export interface BulkDeleteOperation extends BulkOperationBase { -} - -export interface BulkIndexOperation extends BulkWriteOperation { -} - -export interface BulkOperationBase { - _id?: Id - _index?: IndexName - routing?: Routing - if_primary_term?: long - if_seq_no?: SequenceNumber - version?: VersionNumber - version_type?: VersionType -} - -export interface BulkOperationContainer { - index?: BulkIndexOperation - create?: BulkCreateOperation - update?: BulkUpdateOperation - delete?: BulkDeleteOperation -} - -export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' - -export interface BulkRequest extends RequestBase { - index?: IndexName - pipeline?: string - refresh?: Refresh - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - require_alias?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ - body?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] -} - -export interface BulkResponse { - errors: boolean - items: Partial>[] - took: long - ingest_took?: long -} - -export interface BulkResponseItem { - _id?: string | null - _index: string - status: integer - error?: ErrorCause - _primary_term?: long - result?: string - _seq_no?: SequenceNumber - _shards?: ShardStatistics - _version?: VersionNumber - forced_refresh?: boolean - get?: InlineGet> -} - -export interface BulkUpdateAction { - detect_noop?: boolean - doc?: TPartialDocument - doc_as_upsert?: boolean - script?: Script | string - scripted_upsert?: boolean - _source?: SearchSourceConfig - upsert?: TDocument -} - -export interface BulkUpdateOperation extends BulkOperationBase { - require_alias?: boolean - retry_on_conflict?: integer -} - -export interface BulkWriteOperation extends BulkOperationBase { - dynamic_templates?: Record - pipeline?: string - require_alias?: boolean -} - -export interface ClearScrollRequest extends RequestBase { - scroll_id?: ScrollIds - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - scroll_id?: ScrollIds - } -} - -export interface ClearScrollResponse { - succeeded: boolean - num_freed: integer -} - -export interface ClosePointInTimeRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - id: Id - } -} - -export interface ClosePointInTimeResponse { - succeeded: boolean - num_freed: integer -} - -export interface CountRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - min_score?: double - preference?: string - routing?: Routing - terminate_after?: long - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: QueryDslQueryContainer - } -} - -export interface CountResponse { - count: long - _shards: ShardStatistics -} - -export interface CreateRequest extends RequestBase { - id: Id - index: IndexName - pipeline?: string - refresh?: Refresh - routing?: Routing - timeout?: Duration - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, use 'document' instead. */ - body?: TDocument -} - -export type CreateResponse = WriteResponseBase - -export interface DeleteRequest extends RequestBase { - id: Id - index: IndexName - if_primary_term?: long - if_seq_no?: SequenceNumber - refresh?: Refresh - routing?: Routing - timeout?: Duration - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards -} - -export type DeleteResponse = WriteResponseBase - -export interface DeleteByQueryRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - conflicts?: Conflicts - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - from?: long - ignore_unavailable?: boolean - lenient?: boolean - preference?: string - refresh?: boolean - request_cache?: boolean - requests_per_second?: float - routing?: Routing - q?: string - scroll?: Duration - scroll_size?: long - search_timeout?: Duration - search_type?: SearchType - slices?: Slices - sort?: string[] - stats?: string[] - terminate_after?: long - timeout?: Duration - version?: boolean - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - max_docs?: long - query?: QueryDslQueryContainer - slice?: SlicedScroll - } -} - -export interface DeleteByQueryResponse { - batches?: long - deleted?: long - failures?: BulkIndexByScrollFailure[] - noops?: long - requests_per_second?: float - retries?: Retries - slice_id?: integer - task?: TaskId - throttled?: Duration - throttled_millis?: DurationValue - throttled_until?: Duration - throttled_until_millis?: DurationValue - timed_out?: boolean - took?: DurationValue - total?: long - version_conflicts?: long -} - -export interface DeleteByQueryRethrottleRequest extends RequestBase { - task_id: TaskId - requests_per_second?: float -} - -export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase - -export interface DeleteScriptRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration -} - -export type DeleteScriptResponse = AcknowledgedResponseBase - -export interface ExistsRequest extends RequestBase { - id: Id - index: IndexName - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type ExistsResponse = boolean - -export interface ExistsSourceRequest extends RequestBase { - id: Id - index: IndexName - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type ExistsSourceResponse = boolean - -export interface ExplainExplanation { - description: string - details: ExplainExplanationDetail[] - value: float -} - -export interface ExplainExplanationDetail { - description: string - details?: ExplainExplanationDetail[] - value: float -} - -export interface ExplainRequest extends RequestBase { - id: Id - index: IndexName - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - lenient?: boolean - preference?: string - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: QueryDslQueryContainer - } -} - -export interface ExplainResponse { - _index: IndexName - _id: Id - matched: boolean - explanation?: ExplainExplanationDetail - get?: InlineGet -} - -export interface FieldCapsFieldCapability { - aggregatable: boolean - indices?: Indices - meta?: Metadata - non_aggregatable_indices?: Indices - non_searchable_indices?: Indices - searchable: boolean - type: string - metadata_field?: boolean - time_series_dimension?: boolean - time_series_metric?: MappingTimeSeriesMetricType - non_dimension_indices?: IndexName[] - metric_conflicts_indices?: IndexName[] -} - -export interface FieldCapsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - include_unmapped?: boolean - filters?: string - types?: string[] - include_empty_fields?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - fields?: Fields - index_filter?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields - } -} - -export interface FieldCapsResponse { - indices: Indices - fields: Record> -} - -export interface GetGetResult { - _index: IndexName - fields?: Record - _ignored?: string[] - found: boolean - _id: Id - _primary_term?: long - _routing?: string - _seq_no?: SequenceNumber - _source?: TDocument - _version?: VersionNumber -} - -export interface GetRequest extends RequestBase { - id: Id - index: IndexName - force_synthetic_source?: boolean - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type GetResponse = GetGetResult - -export interface GetScriptRequest extends RequestBase { - id: Id - master_timeout?: Duration -} - -export interface GetScriptResponse { - _id: Id - found: boolean - script?: StoredScript -} - -export interface GetScriptContextContext { - methods: GetScriptContextContextMethod[] - name: Name -} - -export interface GetScriptContextContextMethod { - name: Name - return_type: string - params: GetScriptContextContextMethodParam[] -} - -export interface GetScriptContextContextMethodParam { - name: Name - type: string -} - -export interface GetScriptContextRequest extends RequestBase { -} - -export interface GetScriptContextResponse { - contexts: GetScriptContextContext[] -} - -export interface GetScriptLanguagesLanguageContext { - contexts: string[] - language: ScriptLanguage -} - -export interface GetScriptLanguagesRequest extends RequestBase { -} - -export interface GetScriptLanguagesResponse { - language_contexts: GetScriptLanguagesLanguageContext[] - types_allowed: string[] -} - -export interface GetSourceRequest extends RequestBase { - id: Id - index: IndexName - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type GetSourceResponse = TDocument - -export interface HealthReportBaseIndicator { - status: HealthReportIndicatorHealthStatus - symptom: string - impacts?: HealthReportImpact[] - diagnosis?: HealthReportDiagnosis[] -} - -export interface HealthReportDataStreamLifecycleDetails { - stagnating_backing_indices_count: integer - total_backing_indices_in_error: integer - stagnating_backing_indices?: HealthReportStagnatingBackingIndices[] -} - -export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { - details?: HealthReportDataStreamLifecycleDetails -} - -export interface HealthReportDiagnosis { - id: string - action: string - affected_resources: HealthReportDiagnosisAffectedResources - cause: string - help_url: string -} - -export interface HealthReportDiagnosisAffectedResources { - indices?: Indices - nodes?: HealthReportIndicatorNode[] - slm_policies?: string[] - feature_states?: string[] - snapshot_repositories?: string[] -} - -export interface HealthReportDiskIndicator extends HealthReportBaseIndicator { - details?: HealthReportDiskIndicatorDetails -} - -export interface HealthReportDiskIndicatorDetails { - indices_with_readonly_block: long - nodes_with_enough_disk_space: long - nodes_over_high_watermark: long - nodes_over_flood_stage_watermark: long - nodes_with_unknown_disk_status: long -} - -export interface HealthReportFileSettingsIndicator extends HealthReportBaseIndicator { - details?: HealthReportFileSettingsIndicatorDetails -} - -export interface HealthReportFileSettingsIndicatorDetails { - failure_streak: long - most_recent_failure: string -} - -export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { - details?: HealthReportIlmIndicatorDetails -} - -export interface HealthReportIlmIndicatorDetails { - ilm_status: LifecycleOperationMode - policies: long - stagnating_indices: integer -} - -export interface HealthReportImpact { - description: string - id: string - impact_areas: HealthReportImpactArea[] - severity: integer -} - -export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' - -export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' - -export interface HealthReportIndicatorNode { - name: string | null - node_id: string | null -} - -export interface HealthReportIndicators { - master_is_stable?: HealthReportMasterIsStableIndicator - shards_availability?: HealthReportShardsAvailabilityIndicator - disk?: HealthReportDiskIndicator - repository_integrity?: HealthReportRepositoryIntegrityIndicator - data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator - ilm?: HealthReportIlmIndicator - slm?: HealthReportSlmIndicator - shards_capacity?: HealthReportShardsCapacityIndicator - file_settings?: HealthReportFileSettingsIndicator -} - -export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { - details?: HealthReportMasterIsStableIndicatorDetails -} - -export interface HealthReportMasterIsStableIndicatorClusterFormationNode { - name?: string - node_id: string - cluster_formation_message: string -} - -export interface HealthReportMasterIsStableIndicatorDetails { - current_master: HealthReportIndicatorNode - recent_masters: HealthReportIndicatorNode[] - exception_fetching_history?: HealthReportMasterIsStableIndicatorExceptionFetchingHistory - cluster_formation?: HealthReportMasterIsStableIndicatorClusterFormationNode[] -} - -export interface HealthReportMasterIsStableIndicatorExceptionFetchingHistory { - message: string - stack_trace: string -} - -export interface HealthReportRepositoryIntegrityIndicator extends HealthReportBaseIndicator { - details?: HealthReportRepositoryIntegrityIndicatorDetails -} - -export interface HealthReportRepositoryIntegrityIndicatorDetails { - total_repositories?: long - corrupted_repositories?: long - corrupted?: string[] -} - -export interface HealthReportRequest extends RequestBase { - feature?: string | string[] - timeout?: Duration - verbose?: boolean - size?: integer -} - -export interface HealthReportResponse { - cluster_name: string - indicators: HealthReportIndicators - status?: HealthReportIndicatorHealthStatus -} - -export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { - details?: HealthReportShardsAvailabilityIndicatorDetails -} - -export interface HealthReportShardsAvailabilityIndicatorDetails { - creating_primaries: long - creating_replicas: long - initializing_primaries: long - initializing_replicas: long - restarting_primaries: long - restarting_replicas: long - started_primaries: long - started_replicas: long - unassigned_primaries: long - unassigned_replicas: long -} - -export interface HealthReportShardsCapacityIndicator extends HealthReportBaseIndicator { - details?: HealthReportShardsCapacityIndicatorDetails -} - -export interface HealthReportShardsCapacityIndicatorDetails { - data: HealthReportShardsCapacityIndicatorTierDetail - frozen: HealthReportShardsCapacityIndicatorTierDetail -} - -export interface HealthReportShardsCapacityIndicatorTierDetail { - max_shards_in_cluster: integer - current_used_shards?: integer -} - -export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { - details?: HealthReportSlmIndicatorDetails -} - -export interface HealthReportSlmIndicatorDetails { - slm_status: LifecycleOperationMode - policies: long - unhealthy_policies?: HealthReportSlmIndicatorUnhealthyPolicies -} - -export interface HealthReportSlmIndicatorUnhealthyPolicies { - count: long - invocations_since_last_success?: Record -} - -export interface HealthReportStagnatingBackingIndices { - index_name: IndexName - first_occurrence_timestamp: long - retry_count: integer -} - -export interface IndexRequest extends RequestBase { - id?: Id - index: IndexName - if_primary_term?: long - if_seq_no?: SequenceNumber - op_type?: OpType - pipeline?: string - refresh?: Refresh - routing?: Routing - timeout?: Duration - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards - require_alias?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'document' instead. */ - body?: TDocument -} - -export type IndexResponse = WriteResponseBase - -export interface InfoRequest extends RequestBase { -} - -export interface InfoResponse { - cluster_name: Name - cluster_uuid: Uuid - name: Name - tagline: string - version: ElasticsearchVersionInfo -} - -export interface KnnSearchRequest extends RequestBase { - index: Indices - routing?: Routing - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - _source?: SearchSourceConfig - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - stored_fields?: Fields - fields?: Fields - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - knn: KnnSearchQuery - } -} - -export interface KnnSearchResponse { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - fields?: Record - max_score?: double -} - -export interface KnnSearchQuery { - field: Field - query_vector: QueryVector - k: integer - num_candidates: integer -} - -export interface MgetMultiGetError { - error: ErrorCause - _id: Id - _index: IndexName -} - -export interface MgetOperation { - _id: Id - _index?: IndexName - routing?: Routing - _source?: SearchSourceConfig - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export interface MgetRequest extends RequestBase { - index?: IndexName - force_synthetic_source?: boolean - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs?: MgetOperation[] - ids?: Ids - } -} - -export interface MgetResponse { - docs: MgetResponseItem[] -} - -export type MgetResponseItem = GetGetResult | MgetMultiGetError - -export interface MsearchMultiSearchItem extends SearchResponseBody { - status?: integer -} - -export interface MsearchMultiSearchResult> { - took: long - responses: MsearchResponseItem[] -} - -export interface MsearchMultisearchBody { - aggregations?: Record - aggs?: Record - collapse?: SearchFieldCollapse - query?: QueryDslQueryContainer - explain?: boolean - ext?: Record - stored_fields?: Fields - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - from?: integer - highlight?: SearchHighlight - indices_boost?: Record[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - terminate_after?: long - stats?: string[] - timeout?: string - track_scores?: boolean - track_total_hits?: SearchTrackHits - version?: boolean - runtime_mappings?: MappingRuntimeFields - seq_no_primary_term?: boolean - pit?: SearchPointInTimeReference - suggest?: SearchSuggester -} - -export interface MsearchMultisearchHeader { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - index?: Indices - preference?: string - request_cache?: boolean - routing?: Routing - search_type?: SearchType - ccs_minimize_roundtrips?: boolean - allow_partial_search_results?: boolean - ignore_throttled?: boolean -} - -export interface MsearchRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - ccs_minimize_roundtrips?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - include_named_queries_score?: boolean - max_concurrent_searches?: long - max_concurrent_shard_requests?: long - pre_filter_shard_size?: long - rest_total_hits_as_int?: boolean - routing?: Routing - search_type?: SearchType - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ - body?: MsearchRequestItem[] -} - -export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody - -export type MsearchResponse> = MsearchMultiSearchResult - -export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase - -export interface MsearchTemplateRequest extends RequestBase { - index?: Indices - ccs_minimize_roundtrips?: boolean - max_concurrent_searches?: long - search_type?: SearchType - rest_total_hits_as_int?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'search_templates' instead. */ - body?: MsearchTemplateRequestItem[] -} - -export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig - -export type MsearchTemplateResponse> = MsearchMultiSearchResult - -export interface MsearchTemplateTemplateConfig { - explain?: boolean - id?: Id - params?: Record - profile?: boolean - source?: string -} - -export interface MtermvectorsOperation { - _id?: Id - _index?: IndexName - doc?: any - fields?: Fields - field_statistics?: boolean - filter?: TermvectorsFilter - offsets?: boolean - payloads?: boolean - positions?: boolean - routing?: Routing - term_statistics?: boolean - version?: VersionNumber - version_type?: VersionType -} - -export interface MtermvectorsRequest extends RequestBase { - index?: IndexName - fields?: Fields - field_statistics?: boolean - offsets?: boolean - payloads?: boolean - positions?: boolean - preference?: string - realtime?: boolean - routing?: Routing - term_statistics?: boolean - version?: VersionNumber - version_type?: VersionType - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs?: MtermvectorsOperation[] - ids?: Id[] - } -} - -export interface MtermvectorsResponse { - docs: MtermvectorsTermVectorsResult[] -} - -export interface MtermvectorsTermVectorsResult { - _id?: Id - _index: IndexName - _version?: VersionNumber - took?: long - found?: boolean - term_vectors?: Record - error?: ErrorCause -} - -export interface OpenPointInTimeRequest extends RequestBase { - index: Indices - keep_alive: Duration - ignore_unavailable?: boolean - preference?: string - routing?: Routing - expand_wildcards?: ExpandWildcards - allow_partial_search_results?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index_filter?: QueryDslQueryContainer - } -} - -export interface OpenPointInTimeResponse { - _shards: ShardStatistics - id: Id -} - -export interface PingRequest extends RequestBase { -} - -export type PingResponse = boolean - -export interface PutScriptRequest extends RequestBase { - id: Id - context?: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - script: StoredScript - } -} - -export type PutScriptResponse = AcknowledgedResponseBase - -export interface RankEvalDocumentRating { - _id: Id - _index: IndexName - rating: integer -} - -export interface RankEvalRankEvalHit { - _id: Id - _index: IndexName - _score: double -} - -export interface RankEvalRankEvalHitItem { - hit: RankEvalRankEvalHit - rating?: double | null -} - -export interface RankEvalRankEvalMetric { - precision?: RankEvalRankEvalMetricPrecision - recall?: RankEvalRankEvalMetricRecall - mean_reciprocal_rank?: RankEvalRankEvalMetricMeanReciprocalRank - dcg?: RankEvalRankEvalMetricDiscountedCumulativeGain - expected_reciprocal_rank?: RankEvalRankEvalMetricExpectedReciprocalRank -} - -export interface RankEvalRankEvalMetricBase { - k?: integer -} - -export interface RankEvalRankEvalMetricDetail { - metric_score: double - unrated_docs: RankEvalUnratedDocument[] - hits: RankEvalRankEvalHitItem[] - metric_details: Record> -} - -export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { - normalize?: boolean -} - -export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { - maximum_relevance: integer -} - -export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEvalMetricRatingTreshold { -} - -export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { - ignore_unlabeled?: boolean -} - -export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { - relevant_rating_threshold?: integer -} - -export interface RankEvalRankEvalMetricRecall extends RankEvalRankEvalMetricRatingTreshold { -} - -export interface RankEvalRankEvalQuery { - query: QueryDslQueryContainer - size?: integer -} - -export interface RankEvalRankEvalRequestItem { - id: Id - request?: RankEvalRankEvalQuery | QueryDslQueryContainer - ratings: RankEvalDocumentRating[] - template_id?: Id - params?: Record -} - -export interface RankEvalRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - search_type?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - requests: RankEvalRankEvalRequestItem[] - metric?: RankEvalRankEvalMetric - } -} - -export interface RankEvalResponse { - metric_score: double - details: Record - failures: Record -} - -export interface RankEvalUnratedDocument { - _id: Id - _index: IndexName -} - -export interface ReindexDestination { - index: IndexName - op_type?: OpType - pipeline?: string - routing?: Routing - version_type?: VersionType -} - -export interface ReindexRemoteSource { - connect_timeout?: Duration - headers?: Record - host: Host - username?: Username - password?: Password - socket_timeout?: Duration -} - -export interface ReindexRequest extends RequestBase { - refresh?: boolean - requests_per_second?: float - scroll?: Duration - slices?: Slices - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - require_alias?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - conflicts?: Conflicts - dest: ReindexDestination - max_docs?: long - script?: Script | string - size?: long - source: ReindexSource - } -} - -export interface ReindexResponse { - batches?: long - created?: long - deleted?: long - failures?: BulkIndexByScrollFailure[] - noops?: long - retries?: Retries - requests_per_second?: float - slice_id?: integer - task?: TaskId - throttled_millis?: EpochTime - throttled_until_millis?: EpochTime - timed_out?: boolean - took?: DurationValue - total?: long - updated?: long - version_conflicts?: long -} - -export interface ReindexSource { - index: Indices - query?: QueryDslQueryContainer - remote?: ReindexRemoteSource - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: Fields - runtime_mappings?: MappingRuntimeFields -} - -export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { - tasks: Record -} - -export interface ReindexRethrottleReindexStatus { - batches: long - created: long - deleted: long - noops: long - requests_per_second: float - retries: Retries - throttled?: Duration - throttled_millis: DurationValue - throttled_until?: Duration - throttled_until_millis: DurationValue - total: long - updated: long - version_conflicts: long -} - -export interface ReindexRethrottleReindexTask { - action: string - cancellable: boolean - description: string - id: long - node: Name - running_time_in_nanos: DurationValue - start_time_in_millis: EpochTime - status: ReindexRethrottleReindexStatus - type: string - headers: HttpHeaders -} - -export interface ReindexRethrottleRequest extends RequestBase { - task_id: Id - requests_per_second?: float -} - -export interface ReindexRethrottleResponse { - nodes: Record -} - -export interface RenderSearchTemplateRequest extends RequestBase { - id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - file?: string - params?: Record - source?: string - } -} - -export interface RenderSearchTemplateResponse { - template_output: Record -} - -export interface ScriptsPainlessExecutePainlessContextSetup { - document: any - index: IndexName - query?: QueryDslQueryContainer -} - -export interface ScriptsPainlessExecuteRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - context?: string - context_setup?: ScriptsPainlessExecutePainlessContextSetup - script?: Script | string - } -} - -export interface ScriptsPainlessExecuteResponse { - result: TResult -} - -export interface ScrollRequest extends RequestBase { - scroll_id?: ScrollId - rest_total_hits_as_int?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - scroll?: Duration - scroll_id: ScrollId - } -} - -export type ScrollResponse> = SearchResponseBody - -export interface SearchRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - allow_partial_search_results?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - ccs_minimize_roundtrips?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - include_named_queries_score?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - preference?: string - pre_filter_shard_size?: long - request_cache?: boolean - routing?: Routing - scroll?: Duration - search_type?: SearchType - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - typed_keys?: boolean - rest_total_hits_as_int?: boolean - _source_excludes?: Fields - _source_includes?: Fields - q?: string - force_synthetic_source?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - collapse?: SearchFieldCollapse - explain?: boolean - ext?: Record - from?: integer - highlight?: SearchHighlight - track_total_hits?: SearchTrackHits - indices_boost?: Record[] - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - rank?: RankContainer - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - query?: QueryDslQueryContainer - rescore?: SearchRescore | SearchRescore[] - retriever?: RetrieverContainer - script_fields?: Record - search_after?: SortResults - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - suggest?: SearchSuggester - terminate_after?: long - timeout?: string - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean - stored_fields?: Fields - pit?: SearchPointInTimeReference - runtime_mappings?: MappingRuntimeFields - stats?: string[] - } -} - -export type SearchResponse> = SearchResponseBody - -export interface SearchResponseBody> { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: TAggregations - _clusters?: ClusterStatistics - fields?: Record - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - suggest?: Record[]> - terminated_early?: boolean -} - -export interface SearchAggregationBreakdown { - build_aggregation: long - build_aggregation_count: long - build_leaf_collector: long - build_leaf_collector_count: long - collect: long - collect_count: long - initialize: long - initialize_count: long - post_collection?: long - post_collection_count?: long - reduce: long - reduce_count: long -} - -export interface SearchAggregationProfile { - breakdown: SearchAggregationBreakdown - description: string - time_in_nanos: DurationValue - type: string - debug?: SearchAggregationProfileDebug - children?: SearchAggregationProfile[] -} - -export interface SearchAggregationProfileDebug { - segments_with_multi_valued_ords?: integer - collection_strategy?: string - segments_with_single_valued_ords?: integer - total_buckets?: integer - built_buckets?: integer - result_strategy?: string - has_filter?: boolean - delegate?: string - delegate_debug?: SearchAggregationProfileDebug - chars_fetched?: integer - extract_count?: integer - extract_ns?: integer - values_fetched?: integer - collect_analyzed_ns?: integer - collect_analyzed_count?: integer - surviving_buckets?: integer - ordinals_collectors_used?: integer - ordinals_collectors_overhead_too_high?: integer - string_hashing_collectors_used?: integer - numeric_collectors_used?: integer - empty_collectors_used?: integer - deferred_aggregators?: string[] - segments_with_doc_count_field?: integer - segments_with_deleted_docs?: integer - filters?: SearchAggregationProfileDelegateDebugFilter[] - segments_counted?: integer - segments_collected?: integer - map_reducer?: string - brute_force_used?: integer - dynamic_pruning_attempted?: integer - dynamic_pruning_used?: integer - skipped_due_to_no_data?: integer -} - -export interface SearchAggregationProfileDelegateDebugFilter { - results_from_metadata?: integer - query?: string - specialized_for?: string - segments_counted_in_constant_time?: integer -} - -export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' - -export interface SearchCollector { - name: string - reason: string - time_in_nanos: DurationValue - children?: SearchCollector[] -} - -export interface SearchCompletionContext { - boost?: double - context: SearchContext - neighbours?: GeoHashPrecision[] - precision?: GeoHashPrecision - prefix?: boolean -} - -export interface SearchCompletionSuggest extends SearchSuggestBase { - options: SearchCompletionSuggestOption | SearchCompletionSuggestOption[] -} - -export interface SearchCompletionSuggestOption { - collate_match?: boolean - contexts?: Record - fields?: Record - _id?: string - _index?: IndexName - _routing?: Routing - _score?: double - _source?: TDocument - text: string - score?: double -} - -export interface SearchCompletionSuggester extends SearchSuggesterBase { - contexts?: Record - fuzzy?: SearchSuggestFuzziness - regex?: SearchRegexOptions - skip_duplicates?: boolean -} - -export type SearchContext = string | GeoLocation - -export interface SearchDfsKnnProfile { - vector_operations_count?: long - query: SearchKnnQueryProfileResult[] - rewrite_time: long - collector: SearchKnnCollectorResult[] -} - -export interface SearchDfsProfile { - statistics?: SearchDfsStatisticsProfile - knn?: SearchDfsKnnProfile[] -} - -export interface SearchDfsStatisticsBreakdown { - collection_statistics: long - collection_statistics_count: long - create_weight: long - create_weight_count: long - rewrite: long - rewrite_count: long - term_statistics: long - term_statistics_count: long -} - -export interface SearchDfsStatisticsProfile { - type: string - description: string - time?: Duration - time_in_nanos: DurationValue - breakdown: SearchDfsStatisticsBreakdown - debug?: Record - children?: SearchDfsStatisticsProfile[] -} - -export interface SearchDirectGenerator { - field: Field - max_edits?: integer - max_inspections?: float - max_term_freq?: float - min_doc_freq?: float - min_word_length?: integer - post_filter?: string - pre_filter?: string - prefix_length?: integer - size?: integer - suggest_mode?: SuggestMode -} - -export interface SearchFetchProfile { - type: string - description: string - time_in_nanos: DurationValue - breakdown: SearchFetchProfileBreakdown - debug?: SearchFetchProfileDebug - children?: SearchFetchProfile[] -} - -export interface SearchFetchProfileBreakdown { - load_source?: integer - load_source_count?: integer - load_stored_fields?: integer - load_stored_fields_count?: integer - next_reader?: integer - next_reader_count?: integer - process_count?: integer - process?: integer -} - -export interface SearchFetchProfileDebug { - stored_fields?: string[] - fast_path?: integer -} - -export interface SearchFieldCollapse { - field: Field - inner_hits?: SearchInnerHits | SearchInnerHits[] - max_concurrent_group_searches?: integer - collapse?: SearchFieldCollapse -} - -export interface SearchFieldSuggester { - completion?: SearchCompletionSuggester - phrase?: SearchPhraseSuggester - term?: SearchTermSuggester - prefix?: string - regex?: string - text?: string -} - -export interface SearchHighlight extends SearchHighlightBase { - encoder?: SearchHighlighterEncoder - fields: Record -} - -export interface SearchHighlightBase { - type?: SearchHighlighterType - boundary_chars?: string - boundary_max_scan?: integer - boundary_scanner?: SearchBoundaryScanner - boundary_scanner_locale?: string - force_source?: boolean - fragmenter?: SearchHighlighterFragmenter - fragment_size?: integer - highlight_filter?: boolean - highlight_query?: QueryDslQueryContainer - max_fragment_length?: integer - max_analyzed_offset?: integer - no_match_size?: integer - number_of_fragments?: integer - options?: Record - order?: SearchHighlighterOrder - phrase_limit?: integer - post_tags?: string[] - pre_tags?: string[] - require_field_match?: boolean - tags_schema?: SearchHighlighterTagsSchema -} - -export interface SearchHighlightField extends SearchHighlightBase { - fragment_offset?: integer - matched_fields?: Fields -} - -export type SearchHighlighterEncoder = 'default' | 'html' - -export type SearchHighlighterFragmenter = 'simple' | 'span' - -export type SearchHighlighterOrder = 'score' - -export type SearchHighlighterTagsSchema = 'styled' - -export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string - -export interface SearchHit { - _index: IndexName - _id?: Id - _score?: double | null - _explanation?: ExplainExplanation - fields?: Record - highlight?: Record - inner_hits?: Record - matched_queries?: string[] | Record - _nested?: SearchNestedIdentity - _ignored?: string[] - ignored_field_values?: Record - _shard?: string - _node?: string - _routing?: string - _source?: TDocument - _rank?: integer - _seq_no?: SequenceNumber - _primary_term?: long - _version?: VersionNumber - sort?: SortResults -} - -export interface SearchHitsMetadata { - total?: SearchTotalHits | long - hits: SearchHit[] - max_score?: double | null -} - -export interface SearchInnerHits { - name?: Name - size?: integer - from?: integer - collapse?: SearchFieldCollapse - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - explain?: boolean - highlight?: SearchHighlight - ignore_unmapped?: boolean - script_fields?: Record - seq_no_primary_term?: boolean - fields?: Fields - sort?: Sort - _source?: SearchSourceConfig - stored_fields?: Fields - track_scores?: boolean - version?: boolean -} - -export interface SearchInnerHitsResult { - hits: SearchHitsMetadata -} - -export interface SearchKnnCollectorResult { - name: string - reason: string - time?: Duration - time_in_nanos: DurationValue - children?: SearchKnnCollectorResult[] -} - -export interface SearchKnnQueryProfileBreakdown { - advance: long - advance_count: long - build_scorer: long - build_scorer_count: long - compute_max_score: long - compute_max_score_count: long - count_weight: long - count_weight_count: long - create_weight: long - create_weight_count: long - match: long - match_count: long - next_doc: long - next_doc_count: long - score: long - score_count: long - set_min_competitive_score: long - set_min_competitive_score_count: long - shallow_advance: long - shallow_advance_count: long -} - -export interface SearchKnnQueryProfileResult { - type: string - description: string - time?: Duration - time_in_nanos: DurationValue - breakdown: SearchKnnQueryProfileBreakdown - debug?: Record - children?: SearchKnnQueryProfileResult[] -} - -export interface SearchLaplaceSmoothingModel { - alpha: double -} - -export interface SearchLearningToRank { - model_id: string - params?: Record -} - -export interface SearchLinearInterpolationSmoothingModel { - bigram_lambda: double - trigram_lambda: double - unigram_lambda: double -} - -export interface SearchNestedIdentity { - field: Field - offset: integer - _nested?: SearchNestedIdentity -} - -export interface SearchPhraseSuggest extends SearchSuggestBase { - options: SearchPhraseSuggestOption | SearchPhraseSuggestOption[] -} - -export interface SearchPhraseSuggestCollate { - params?: Record - prune?: boolean - query: SearchPhraseSuggestCollateQuery -} - -export interface SearchPhraseSuggestCollateQuery { - id?: Id - source?: string -} - -export interface SearchPhraseSuggestHighlight { - post_tag: string - pre_tag: string -} - -export interface SearchPhraseSuggestOption { - text: string - score: double - highlighted?: string - collate_match?: boolean -} - -export interface SearchPhraseSuggester extends SearchSuggesterBase { - collate?: SearchPhraseSuggestCollate - confidence?: double - direct_generator?: SearchDirectGenerator[] - force_unigrams?: boolean - gram_size?: integer - highlight?: SearchPhraseSuggestHighlight - max_errors?: double - real_word_error_likelihood?: double - separator?: string - shard_size?: integer - smoothing?: SearchSmoothingModelContainer - text?: string - token_limit?: integer -} - -export interface SearchPointInTimeReference { - id: Id - keep_alive?: Duration -} - -export interface SearchProfile { - shards: SearchShardProfile[] -} - -export interface SearchQueryBreakdown { - advance: long - advance_count: long - build_scorer: long - build_scorer_count: long - create_weight: long - create_weight_count: long - match: long - match_count: long - shallow_advance: long - shallow_advance_count: long - next_doc: long - next_doc_count: long - score: long - score_count: long - compute_max_score: long - compute_max_score_count: long - count_weight: long - count_weight_count: long - set_min_competitive_score: long - set_min_competitive_score_count: long -} - -export interface SearchQueryProfile { - breakdown: SearchQueryBreakdown - description: string - time_in_nanos: DurationValue - type: string - children?: SearchQueryProfile[] -} - -export interface SearchRegexOptions { - flags?: integer | string - max_determinized_states?: integer -} - -export interface SearchRescore { - window_size?: integer - query?: SearchRescoreQuery - learning_to_rank?: SearchLearningToRank -} - -export interface SearchRescoreQuery { - rescore_query: QueryDslQueryContainer - query_weight?: double - rescore_query_weight?: double - score_mode?: SearchScoreMode -} - -export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' - -export interface SearchSearchProfile { - collector: SearchCollector[] - query: SearchQueryProfile[] - rewrite_time: long -} - -export interface SearchShardProfile { - aggregations: SearchAggregationProfile[] - cluster: string - dfs?: SearchDfsProfile - fetch?: SearchFetchProfile - id: string - index: IndexName - node_id: NodeId - searches: SearchSearchProfile[] - shard_id: long -} - -export interface SearchSmoothingModelContainer { - laplace?: SearchLaplaceSmoothingModel - linear_interpolation?: SearchLinearInterpolationSmoothingModel - stupid_backoff?: SearchStupidBackoffSmoothingModel -} - -export type SearchSourceConfig = boolean | SearchSourceFilter | Fields - -export type SearchSourceConfigParam = boolean | Fields - -export interface SearchSourceFilter { - excludes?: Fields - exclude?: Fields - includes?: Fields - include?: Fields -} - -export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' - -export interface SearchStupidBackoffSmoothingModel { - discount: double -} - -export type SearchSuggest = SearchCompletionSuggest | SearchPhraseSuggest | SearchTermSuggest - -export interface SearchSuggestBase { - length: integer - offset: integer - text: string -} - -export interface SearchSuggestFuzziness { - fuzziness?: Fuzziness - min_length?: integer - prefix_length?: integer - transpositions?: boolean - unicode_aware?: boolean -} - -export type SearchSuggestSort = 'score' | 'frequency' - -export interface SearchSuggesterKeys { - text?: string -} -export type SearchSuggester = SearchSuggesterKeys -& { [property: string]: SearchFieldSuggester | string } - -export interface SearchSuggesterBase { - field: Field - analyzer?: string - size?: integer -} - -export interface SearchTermSuggest extends SearchSuggestBase { - options: SearchTermSuggestOption | SearchTermSuggestOption[] -} - -export interface SearchTermSuggestOption { - text: string - score: double - freq: long - highlighted?: string - collate_match?: boolean -} - -export interface SearchTermSuggester extends SearchSuggesterBase { - lowercase_terms?: boolean - max_edits?: integer - max_inspections?: integer - max_term_freq?: float - min_doc_freq?: float - min_word_length?: integer - prefix_length?: integer - shard_size?: integer - sort?: SearchSuggestSort - string_distance?: SearchStringDistance - suggest_mode?: SuggestMode - text?: string -} - -export interface SearchTotalHits { - relation: SearchTotalHitsRelation - value: long -} - -export type SearchTotalHitsRelation = 'eq' | 'gte' - -export type SearchTrackHits = boolean | integer - -export interface SearchMvtRequest extends RequestBase { - index: Indices - field: Field - zoom: SearchMvtZoomLevel - x: SearchMvtCoordinate - y: SearchMvtCoordinate - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggs?: Record - buffer?: integer - exact_bounds?: boolean - extent?: integer - fields?: Fields - grid_agg?: SearchMvtGridAggregationType - grid_precision?: integer - grid_type?: SearchMvtGridType - query?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields - size?: integer - sort?: Sort - track_total_hits?: SearchTrackHits - with_labels?: boolean - } -} - -export type SearchMvtResponse = MapboxVectorTiles - -export type SearchMvtCoordinate = integer - -export type SearchMvtGridAggregationType = 'geotile' | 'geohex' - -export type SearchMvtGridType = 'grid' | 'point' | 'centroid' - -export type SearchMvtZoomLevel = integer - -export interface SearchShardsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean - preference?: string - routing?: Routing -} - -export interface SearchShardsResponse { - nodes: Record - shards: NodeShard[][] - indices: Record -} - -export interface SearchShardsSearchShardsNodeAttributes { - name: NodeName - ephemeral_id: Id - transport_address: TransportAddress - external_id: string - attributes: Record - roles: NodeRoles - version: VersionString - min_index_version: integer - max_index_version: integer -} - -export interface SearchShardsShardStoreIndex { - aliases?: Name[] - filter?: QueryDslQueryContainer -} - -export interface SearchTemplateRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - ccs_minimize_roundtrips?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - preference?: string - routing?: Routing - scroll?: Duration - search_type?: SearchType - rest_total_hits_as_int?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - explain?: boolean - id?: Id - params?: Record - profile?: boolean - source?: string - } -} - -export interface SearchTemplateResponse { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: Record - _clusters?: ClusterStatistics - fields?: Record - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - suggest?: Record[]> - terminated_early?: boolean -} - -export interface TermsEnumRequest extends RequestBase { - index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - field: Field - size?: integer - timeout?: Duration - case_insensitive?: boolean - index_filter?: QueryDslQueryContainer - string?: string - search_after?: string - } -} - -export interface TermsEnumResponse { - _shards: ShardStatistics - terms: string[] - complete: boolean -} - -export interface TermvectorsFieldStatistics { - doc_count: integer - sum_doc_freq: long - sum_ttf: long -} - -export interface TermvectorsFilter { - max_doc_freq?: integer - max_num_terms?: integer - max_term_freq?: integer - max_word_length?: integer - min_doc_freq?: integer - min_term_freq?: integer - min_word_length?: integer -} - -export interface TermvectorsRequest extends RequestBase { - index: IndexName - id?: Id - fields?: Fields - field_statistics?: boolean - offsets?: boolean - payloads?: boolean - positions?: boolean - preference?: string - realtime?: boolean - routing?: Routing - term_statistics?: boolean - version?: VersionNumber - version_type?: VersionType - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - doc?: TDocument - filter?: TermvectorsFilter - per_field_analyzer?: Record - } -} - -export interface TermvectorsResponse { - found: boolean - _id?: Id - _index: IndexName - term_vectors?: Record - took: long - _version: VersionNumber -} - -export interface TermvectorsTerm { - doc_freq?: integer - score?: double - term_freq: integer - tokens?: TermvectorsToken[] - ttf?: integer -} - -export interface TermvectorsTermVector { - field_statistics?: TermvectorsFieldStatistics - terms: Record -} - -export interface TermvectorsToken { - end_offset?: integer - payload?: string - position: integer - start_offset?: integer -} - -export interface UpdateRequest extends RequestBase { - id: Id - index: IndexName - if_primary_term?: long - if_seq_no?: SequenceNumber - lang?: string - refresh?: Refresh - require_alias?: boolean - retry_on_conflict?: integer - routing?: Routing - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - _source_excludes?: Fields - _source_includes?: Fields - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - detect_noop?: boolean - doc?: TPartialDocument - doc_as_upsert?: boolean - script?: Script | string - scripted_upsert?: boolean - _source?: SearchSourceConfig - upsert?: TDocument - } -} - -export type UpdateResponse = UpdateUpdateWriteResponseBase - -export interface UpdateUpdateWriteResponseBase extends WriteResponseBase { - get?: InlineGet -} - -export interface UpdateByQueryRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - from?: long - ignore_unavailable?: boolean - lenient?: boolean - pipeline?: string - preference?: string - q?: string - refresh?: boolean - request_cache?: boolean - requests_per_second?: float - routing?: Routing - scroll?: Duration - scroll_size?: long - search_timeout?: Duration - search_type?: SearchType - slices?: Slices - sort?: string[] - stats?: string[] - terminate_after?: long - timeout?: Duration - version?: boolean - version_type?: boolean - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - max_docs?: long - query?: QueryDslQueryContainer - script?: Script | string - slice?: SlicedScroll - conflicts?: Conflicts - } -} - -export interface UpdateByQueryResponse { - batches?: long - failures?: BulkIndexByScrollFailure[] - noops?: long - deleted?: long - requests_per_second?: float - retries?: Retries - task?: TaskId - timed_out?: boolean - took?: DurationValue - total?: long - updated?: long - version_conflicts?: long - throttled?: Duration - throttled_millis?: DurationValue - throttled_until?: Duration - throttled_until_millis?: DurationValue -} - -export interface UpdateByQueryRethrottleRequest extends RequestBase { - task_id: Id - requests_per_second?: float -} - -export interface UpdateByQueryRethrottleResponse { - nodes: Record -} - -export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { - tasks: Record -} - -export interface SpecUtilsBaseNode { - attributes: Record - host: Host - ip: Ip - name: Name - roles?: NodeRoles - transport_address: TransportAddress -} - -export type SpecUtilsNullValue = null - -export type SpecUtilsPipeSeparatedFlags = T | string - -export type SpecUtilsStringified = T | string - -export type SpecUtilsWithNullValue = T | SpecUtilsNullValue - -export interface AcknowledgedResponseBase { - acknowledged: boolean -} - -export type AggregateName = string - -export interface BulkIndexByScrollFailure { - cause: ErrorCause - id: Id - index: IndexName - status: integer - type: string -} - -export interface BulkStats { - total_operations: long - total_time?: Duration - total_time_in_millis: DurationValue - total_size?: ByteSize - total_size_in_bytes: long - avg_time?: Duration - avg_time_in_millis: DurationValue - avg_size?: ByteSize - avg_size_in_bytes: long -} - -export type ByteSize = long | string - -export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' - -export type CategoryId = string - -export type ClusterAlias = string - -export interface ClusterDetails { - status: ClusterSearchStatus - indices: string - took?: DurationValue - timed_out: boolean - _shards?: ShardStatistics - failures?: ShardFailure[] -} - -export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' - -export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[] - -export type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' - -export interface ClusterStatistics { - skipped: integer - successful: integer - total: integer - running: integer - partial: integer - failed: integer - details?: Record -} - -export interface CompletionStats { - size_in_bytes: long - size?: ByteSize - fields?: Record -} - -export type Conflicts = 'abort' | 'proceed' - -export interface CoordsGeoBounds { - top: double - bottom: double - left: double - right: double -} - -export type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared' - -export type DFRAfterEffect = 'no' | 'b' | 'l' - -export type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p' - -export type DataStreamName = string - -export type DataStreamNames = DataStreamName | DataStreamName[] - -export type DateFormat = string - -export type DateMath = string | Date - -export type DateTime = string | EpochTime | Date - -export type Distance = string - -export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' - -export interface DocStats { - count: long - deleted?: long -} - -export type Duration = string | -1 | 0 - -export type DurationLarge = string - -export type DurationValue = Unit - -export interface ElasticsearchVersionInfo { - build_date: DateTime - build_flavor: string - build_hash: string - build_snapshot: boolean - build_type: string - lucene_version: VersionString - minimum_index_compatibility_version: VersionString - minimum_wire_compatibility_version: VersionString - number: string -} - -export interface ElasticsearchVersionMinInfo { - build_flavor: string - minimum_index_compatibility_version: VersionString - minimum_wire_compatibility_version: VersionString - number: string -} - -export interface EmptyObject { -} - -export type EpochTime = Unit - -export interface ErrorCauseKeys { - type: string - reason?: string - stack_trace?: string - caused_by?: ErrorCause - root_cause?: ErrorCause[] - suppressed?: ErrorCause[] -} -export type ErrorCause = ErrorCauseKeys -& { [property: string]: any } - -export interface ErrorResponseBase { - error: ErrorCause - status: integer -} - -export type EsqlColumns = ArrayBuffer - -export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' - -export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] - -export type Field = string - -export interface FieldMemoryUsage { - memory_size?: ByteSize - memory_size_in_bytes: long -} - -export interface FieldSizeUsage { - size?: ByteSize - size_in_bytes: long -} - -export interface FieldSort { - missing?: AggregationsMissing - mode?: SortMode - nested?: NestedSortValue - order?: SortOrder - unmapped_type?: MappingFieldType - numeric_type?: FieldSortNumericType - format?: string -} - -export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' - -export type FieldValue = long | double | string | boolean | null | any - -export interface FielddataStats { - evictions?: long - memory_size?: ByteSize - memory_size_in_bytes: long - fields?: Record -} - -export type Fields = Field | Field[] - -export interface FlushStats { - periodic: long - total: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export type Fuzziness = string | integer - -export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds - -export interface GeoDistanceSortKeys { - mode?: SortMode - distance_type?: GeoDistanceType - ignore_unmapped?: boolean - order?: SortOrder - unit?: DistanceUnit - nested?: NestedSortValue -} -export type GeoDistanceSort = GeoDistanceSortKeys -& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit | NestedSortValue } - -export type GeoDistanceType = 'arc' | 'plane' - -export type GeoHash = string - -export interface GeoHashLocation { - geohash: GeoHash -} - -export type GeoHashPrecision = number | string - -export type GeoHexCell = string - -export interface GeoLine { - type: string - coordinates: double[][] -} - -export type GeoLocation = LatLonGeoLocation | GeoHashLocation | double[] | string - -export type GeoShape = any - -export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' - -export type GeoTile = string - -export type GeoTilePrecision = number - -export interface GetStats { - current: long - exists_time?: Duration - exists_time_in_millis: DurationValue - exists_total: long - missing_time?: Duration - missing_time_in_millis: DurationValue - missing_total: long - time?: Duration - time_in_millis: DurationValue - total: long -} - -export type GrokPattern = string - -export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' - -export type Host = string - -export type HttpHeaders = Record - -export type IBDistribution = 'll' | 'spl' - -export type IBLambda = 'df' | 'ttf' - -export type Id = string - -export type Ids = Id | Id[] - -export type IndexAlias = string - -export type IndexName = string - -export type IndexPattern = string - -export type IndexPatterns = IndexPattern[] - -export interface IndexingStats { - index_current: long - delete_current: long - delete_time?: Duration - delete_time_in_millis: DurationValue - delete_total: long - is_throttled: boolean - noop_update_total: long - throttle_time?: Duration - throttle_time_in_millis: DurationValue - index_time?: Duration - index_time_in_millis: DurationValue - index_total: long - index_failed: long - types?: Record - write_load?: double -} - -export type Indices = IndexName | IndexName[] - -export interface IndicesOptions { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - ignore_throttled?: boolean -} - -export interface IndicesResponseBase extends AcknowledgedResponseBase { - _shards?: ShardStatistics -} - -export interface InlineGetKeys { - fields?: Record - found: boolean - _seq_no?: SequenceNumber - _primary_term?: long - _routing?: Routing - _source?: TDocument -} -export type InlineGet = InlineGetKeys -& { [property: string]: any } - -export type Ip = string - -export interface KnnQuery extends QueryDslQueryBase { - field: Field - query_vector?: QueryVector - query_vector_builder?: QueryVectorBuilder - num_candidates?: integer - k?: integer - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - similarity?: float -} - -export interface KnnRetriever extends RetrieverBase { - field: string - query_vector?: QueryVector - query_vector_builder?: QueryVectorBuilder - k: integer - num_candidates: integer - similarity?: float -} - -export interface KnnSearch { - field: Field - query_vector?: QueryVector - query_vector_builder?: QueryVectorBuilder - k?: integer - num_candidates?: integer - boost?: float - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - similarity?: float - inner_hits?: SearchInnerHits -} - -export interface LatLonGeoLocation { - lat: double - lon: double -} - -export type Level = 'cluster' | 'indices' | 'shards' - -export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' - -export type MapboxVectorTiles = ArrayBuffer - -export interface MergesStats { - current: long - current_docs: long - current_size?: string - current_size_in_bytes: long - total: long - total_auto_throttle?: string - total_auto_throttle_in_bytes: long - total_docs: long - total_size?: string - total_size_in_bytes: long - total_stopped_time?: Duration - total_stopped_time_in_millis: DurationValue - total_throttled_time?: Duration - total_throttled_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export type Metadata = Record - -export type Metrics = string | string[] - -export type MinimumShouldMatch = integer | string - -export type MultiTermQueryRewrite = string - -export type Name = string - -export type Names = Name | Name[] - -export type Namespace = string - -export interface NestedSortValue { - filter?: QueryDslQueryContainer - max_children?: integer - nested?: NestedSortValue - path: Field -} - -export interface NodeAttributes { - attributes: Record - ephemeral_id: Id - id?: NodeId - name: NodeName - transport_address: TransportAddress -} - -export type NodeId = string - -export type NodeIds = NodeId | NodeId[] - -export type NodeName = string - -export type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only' - -export type NodeRoles = NodeRole[] - -export interface NodeShard { - state: IndicesStatsShardRoutingState - primary: boolean - node?: NodeName - shard: integer - index: IndexName - allocation_id?: Record - recovery_source?: Record - unassigned_info?: ClusterAllocationExplainUnassignedInformation - relocating_node?: NodeId | null - relocation_failure_info?: RelocationFailureInfo -} - -export interface NodeStatistics { - failures?: ErrorCause[] - total: integer - successful: integer - failed: integer -} - -export type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z' - -export type OpType = 'index' | 'create' - -export type Password = string - -export type Percentage = string | float - -export type PipelineName = string - -export interface PluginStats { - classname: string - description: string - elasticsearch_version: VersionString - extended_plugins: string[] - has_native_controller: boolean - java_version: VersionString - name: Name - version: VersionString - licensed: boolean -} - -export type PropertyName = string - -export interface QueryCacheStats { - cache_count: long - cache_size: long - evictions: long - hit_count: long - memory_size?: ByteSize - memory_size_in_bytes: long - miss_count: long - total_count: long -} - -export type QueryVector = float[] - -export interface QueryVectorBuilder { - text_embedding?: TextEmbedding -} - -export interface RRFRetriever extends RetrieverBase { - retrievers: RetrieverContainer[] - rank_constant?: integer - rank_window_size?: integer -} - -export interface RankBase { -} - -export interface RankContainer { - rrf?: RrfRank -} - -export interface RecoveryStats { - current_as_source: long - current_as_target: long - throttle_time?: Duration - throttle_time_in_millis: DurationValue -} - -export type Refresh = boolean | 'true' | 'false' | 'wait_for' - -export interface RefreshStats { - external_total: long - external_total_time_in_millis: DurationValue - listeners: long - total: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export type RelationName = string - -export interface RelocationFailureInfo { - failed_attempts: integer -} - -export interface RequestBase extends SpecUtilsCommonQueryParameters { -} - -export interface RequestCacheStats { - evictions: long - hit_count: long - memory_size?: string - memory_size_in_bytes: long - miss_count: long -} - -export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' - -export interface Retries { - bulk: long - search: long -} - -export interface RetrieverBase { - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - min_score?: float -} - -export interface RetrieverContainer { - standard?: StandardRetriever - knn?: KnnRetriever - rrf?: RRFRetriever - text_similarity_reranker?: TextSimilarityReranker - rule?: RuleRetriever -} - -export type Routing = string - -export interface RrfRank { - rank_constant?: long - rank_window_size?: long -} - -export interface RuleRetriever extends RetrieverBase { - ruleset_ids: Id[] - match_criteria: any - retriever: RetrieverContainer - rank_window_size?: integer -} - -export type ScalarValue = long | double | string | boolean | null - -export interface ScoreSort { - order?: SortOrder -} - -export interface Script { - source?: string - id?: Id - params?: Record - lang?: ScriptLanguage - options?: Record -} - -export interface ScriptField { - script: Script | string - ignore_failure?: boolean -} - -export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string - -export interface ScriptSort { - order?: SortOrder - script: Script | string - type?: ScriptSortType - mode?: SortMode - nested?: NestedSortValue -} - -export type ScriptSortType = 'string' | 'number' | 'version' - -export interface ScriptTransform { - lang?: string - params?: Record - source?: string - id?: string -} - -export type ScrollId = string - -export type ScrollIds = ScrollId | ScrollId[] - -export interface SearchStats { - fetch_current: long - fetch_time?: Duration - fetch_time_in_millis: DurationValue - fetch_total: long - open_contexts?: long - query_current: long - query_time?: Duration - query_time_in_millis: DurationValue - query_total: long - scroll_current: long - scroll_time?: Duration - scroll_time_in_millis: DurationValue - scroll_total: long - suggest_current: long - suggest_time?: Duration - suggest_time_in_millis: DurationValue - suggest_total: long - groups?: Record -} - -export interface SearchTransform { - request: WatcherSearchInputRequestDefinition - timeout: Duration -} - -export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' - -export interface SegmentsStats { - count: integer - doc_values_memory?: ByteSize - doc_values_memory_in_bytes: long - file_sizes: Record - fixed_bit_set?: ByteSize - fixed_bit_set_memory_in_bytes: long - index_writer_memory?: ByteSize - index_writer_max_memory_in_bytes?: long - index_writer_memory_in_bytes: long - max_unsafe_auto_id_timestamp: long - memory?: ByteSize - memory_in_bytes: long - norms_memory?: ByteSize - norms_memory_in_bytes: long - points_memory?: ByteSize - points_memory_in_bytes: long - stored_memory?: ByteSize - stored_fields_memory_in_bytes: long - terms_memory_in_bytes: long - terms_memory?: ByteSize - term_vectory_memory?: ByteSize - term_vectors_memory_in_bytes: long - version_map_memory?: ByteSize - version_map_memory_in_bytes: long -} - -export type SequenceNumber = long - -export type Service = string - -export interface ShardFailure { - index?: IndexName - node?: string - reason: ErrorCause - shard: integer - status?: string -} - -export interface ShardStatistics { - failed: uint - successful: uint - total: uint - failures?: ShardFailure[] - skipped?: uint -} - -export interface ShardsOperationResponseBase { - _shards?: ShardStatistics -} - -export interface SlicedScroll { - field?: Field - id: Id - max: integer -} - -export type Slices = integer | SlicesCalculation - -export type SlicesCalculation = 'auto' - -export type Sort = SortCombinations | SortCombinations[] - -export type SortCombinations = Field | SortOptions - -export type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' - -export interface SortOptionsKeys { - _score?: ScoreSort - _doc?: ScoreSort - _geo_distance?: GeoDistanceSort - _script?: ScriptSort -} -export type SortOptions = SortOptionsKeys -& { [property: string]: FieldSort | SortOrder | ScoreSort | GeoDistanceSort | ScriptSort } - -export type SortOrder = 'asc' | 'desc' - -export type SortResults = FieldValue[] - -export interface StandardRetriever extends RetrieverBase { - query?: QueryDslQueryContainer - search_after?: SortResults - terminate_after?: integer - sort?: Sort - collapse?: SearchFieldCollapse -} - -export interface StoreStats { - size?: ByteSize - size_in_bytes: long - reserved?: ByteSize - reserved_in_bytes: long - total_data_set_size?: ByteSize - total_data_set_size_in_bytes?: long -} - -export interface StoredScript { - lang: ScriptLanguage - options?: Record - source: string -} - -export type SuggestMode = 'missing' | 'popular' | 'always' - -export type SuggestionName = string - -export interface TaskFailure { - task_id: long - node_id: NodeId - status: string - reason: ErrorCause -} - -export type TaskId = string | integer - -export interface TextEmbedding { - model_id: string - model_text: string -} - -export interface TextSimilarityReranker extends RetrieverBase { - retriever: RetrieverContainer - rank_window_size?: integer - inference_id?: string - inference_text?: string - field?: string -} - -export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' - -export type TimeOfDay = string - -export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' - -export type TimeZone = string - -export interface TopLeftBottomRightGeoBounds { - top_left: GeoLocation - bottom_right: GeoLocation -} - -export interface TopRightBottomLeftGeoBounds { - top_right: GeoLocation - bottom_left: GeoLocation -} - -export interface TransformContainer { - chain?: TransformContainer[] - script?: ScriptTransform - search?: SearchTransform -} - -export interface TranslogStats { - earliest_last_modified_age: long - operations: long - size?: string - size_in_bytes: long - uncommitted_operations: integer - uncommitted_size?: string - uncommitted_size_in_bytes: long -} - -export type TransportAddress = string - -export type UnitFloatMillis = double - -export type UnitMillis = long - -export type UnitNanos = long - -export type UnitSeconds = long - -export type Username = string - -export type Uuid = string - -export type VersionNumber = long - -export type VersionString = string - -export type VersionType = 'internal' | 'external' | 'external_gte' | 'force' - -export type WaitForActiveShardOptions = 'all' | 'index-setting' - -export type WaitForActiveShards = integer | WaitForActiveShardOptions - -export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' - -export interface WarmerStats { - current: long - total: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface WktGeoBounds { - wkt: string -} - -export interface WriteResponseBase { - _id: Id - _index: IndexName - _primary_term?: long - result: Result - _seq_no?: SequenceNumber - _shards: ShardStatistics - _version: VersionNumber - forced_refresh?: boolean -} - -export type byte = number - -export type double = number - -export type float = number - -export type integer = number - -export type long = number - -export type short = number - -export type uint = number - -export type ulong = number - -export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { - filters?: Record - separator?: string -} - -export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { - key: string -} -export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys -& { [property: string]: AggregationsAggregate | string | long } - -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate - -export interface AggregationsAggregateBase { - meta?: Metadata -} - -export type AggregationsAggregateOrder = Partial> | Partial>[] - -export interface AggregationsAggregation { -} - -export interface AggregationsAggregationContainer { - aggregations?: Record - aggs?: Record - meta?: Metadata - adjacency_matrix?: AggregationsAdjacencyMatrixAggregation - auto_date_histogram?: AggregationsAutoDateHistogramAggregation - avg?: AggregationsAverageAggregation - avg_bucket?: AggregationsAverageBucketAggregation - boxplot?: AggregationsBoxplotAggregation - bucket_script?: AggregationsBucketScriptAggregation - bucket_selector?: AggregationsBucketSelectorAggregation - bucket_sort?: AggregationsBucketSortAggregation - bucket_count_ks_test?: AggregationsBucketKsAggregation - bucket_correlation?: AggregationsBucketCorrelationAggregation - cardinality?: AggregationsCardinalityAggregation - categorize_text?: AggregationsCategorizeTextAggregation - children?: AggregationsChildrenAggregation - composite?: AggregationsCompositeAggregation - cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation - cumulative_sum?: AggregationsCumulativeSumAggregation - date_histogram?: AggregationsDateHistogramAggregation - date_range?: AggregationsDateRangeAggregation - derivative?: AggregationsDerivativeAggregation - diversified_sampler?: AggregationsDiversifiedSamplerAggregation - extended_stats?: AggregationsExtendedStatsAggregation - extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation - frequent_item_sets?: AggregationsFrequentItemSetsAggregation - filter?: QueryDslQueryContainer - filters?: AggregationsFiltersAggregation - geo_bounds?: AggregationsGeoBoundsAggregation - geo_centroid?: AggregationsGeoCentroidAggregation - geo_distance?: AggregationsGeoDistanceAggregation - geohash_grid?: AggregationsGeoHashGridAggregation - geo_line?: AggregationsGeoLineAggregation - geotile_grid?: AggregationsGeoTileGridAggregation - geohex_grid?: AggregationsGeohexGridAggregation - global?: AggregationsGlobalAggregation - histogram?: AggregationsHistogramAggregation - ip_range?: AggregationsIpRangeAggregation - ip_prefix?: AggregationsIpPrefixAggregation - inference?: AggregationsInferenceAggregation - line?: AggregationsGeoLineAggregation - matrix_stats?: AggregationsMatrixStatsAggregation - max?: AggregationsMaxAggregation - max_bucket?: AggregationsMaxBucketAggregation - median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation - min?: AggregationsMinAggregation - min_bucket?: AggregationsMinBucketAggregation - missing?: AggregationsMissingAggregation - moving_avg?: AggregationsMovingAverageAggregation - moving_percentiles?: AggregationsMovingPercentilesAggregation - moving_fn?: AggregationsMovingFunctionAggregation - multi_terms?: AggregationsMultiTermsAggregation - nested?: AggregationsNestedAggregation - normalize?: AggregationsNormalizeAggregation - parent?: AggregationsParentAggregation - percentile_ranks?: AggregationsPercentileRanksAggregation - percentiles?: AggregationsPercentilesAggregation - percentiles_bucket?: AggregationsPercentilesBucketAggregation - range?: AggregationsRangeAggregation - rare_terms?: AggregationsRareTermsAggregation - rate?: AggregationsRateAggregation - reverse_nested?: AggregationsReverseNestedAggregation - random_sampler?: AggregationsRandomSamplerAggregation - sampler?: AggregationsSamplerAggregation - scripted_metric?: AggregationsScriptedMetricAggregation - serial_diff?: AggregationsSerialDifferencingAggregation - significant_terms?: AggregationsSignificantTermsAggregation - significant_text?: AggregationsSignificantTextAggregation - stats?: AggregationsStatsAggregation - stats_bucket?: AggregationsStatsBucketAggregation - string_stats?: AggregationsStringStatsAggregation - sum?: AggregationsSumAggregation - sum_bucket?: AggregationsSumBucketAggregation - terms?: AggregationsTermsAggregation - time_series?: AggregationsTimeSeriesAggregation - top_hits?: AggregationsTopHitsAggregation - t_test?: AggregationsTTestAggregation - top_metrics?: AggregationsTopMetricsAggregation - value_count?: AggregationsValueCountAggregation - weighted_avg?: AggregationsWeightedAverageAggregation - variable_width_histogram?: AggregationsVariableWidthHistogramAggregation -} - -export interface AggregationsAggregationRange { - from?: double | null - key?: string - to?: double | null -} - -export interface AggregationsArrayPercentilesItem { - key: string - value: double | null - value_as_string?: string -} - -export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { - interval: DurationLarge -} - -export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { - buckets?: integer - field?: Field - format?: string - minimum_interval?: AggregationsMinimumInterval - missing?: DateTime - offset?: string - params?: Record - script?: Script | string - time_zone?: TimeZone -} - -export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsAvgAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { - min: double - max: double - q1: double - q2: double - q3: double - lower: double - upper: double - min_as_string?: string - max_as_string?: string - q1_as_string?: string - q2_as_string?: string - q3_as_string?: string - lower_as_string?: string - upper_as_string?: string -} - -export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { - compression?: double -} - -export interface AggregationsBucketAggregationBase { -} - -export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { - function: AggregationsBucketCorrelationFunction -} - -export interface AggregationsBucketCorrelationFunction { - count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation -} - -export interface AggregationsBucketCorrelationFunctionCountCorrelation { - indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator -} - -export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { - doc_count: integer - expectations: double[] - fractions?: double[] -} - -export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { - alternative?: string[] - fractions?: double[] - sampling_method?: string -} - -export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { - keys: string[] -} - -export interface AggregationsBucketPathAggregation { - buckets_path?: AggregationsBucketsPath -} - -export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string -} - -export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string -} - -export interface AggregationsBucketSortAggregation { - from?: integer - gap_policy?: AggregationsGapPolicy - size?: integer - sort?: Sort -} - -export type AggregationsBuckets = Record | TBucket[] - -export type AggregationsBucketsPath = string | string[] | Record - -export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y' - -export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { - value: long -} - -export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { - precision_threshold?: integer - rehash?: boolean - execution_hint?: AggregationsCardinalityExecutionMode -} - -export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' - -export interface AggregationsCategorizeTextAggregation { - field: Field - max_unique_tokens?: integer - max_matched_tokens?: integer - similarity_threshold?: integer - categorization_filters?: string[] - categorization_analyzer?: AggregationsCategorizeTextAnalyzer - shard_size?: integer - size?: integer - min_doc_count?: integer - shard_min_doc_count?: integer -} - -export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer - -export interface AggregationsChiSquareHeuristic { - background_is_superset: boolean - include_negatives: boolean -} - -export interface AggregationsChildrenAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { - type?: RelationName -} - -export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { - after_key?: AggregationsCompositeAggregateKey -} - -export type AggregationsCompositeAggregateKey = Record - -export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { - after?: AggregationsCompositeAggregateKey - size?: integer - sources?: Record[] -} - -export interface AggregationsCompositeAggregationBase { - field?: Field - missing_bucket?: boolean - missing_order?: AggregationsMissingOrder - script?: Script | string - value_type?: AggregationsValueType - order?: SortOrder -} - -export interface AggregationsCompositeAggregationSource { - terms?: AggregationsCompositeTermsAggregation - histogram?: AggregationsCompositeHistogramAggregation - date_histogram?: AggregationsCompositeDateHistogramAggregation - geotile_grid?: AggregationsCompositeGeoTileGridAggregation -} - -export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { - key: AggregationsCompositeAggregateKey -} -export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys -& { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long } - -export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { - format?: string - calendar_interval?: DurationLarge - fixed_interval?: DurationLarge - offset?: Duration - time_zone?: TimeZone -} - -export interface AggregationsCompositeGeoTileGridAggregation extends AggregationsCompositeAggregationBase { - precision?: integer - bounds?: GeoBounds -} - -export interface AggregationsCompositeHistogramAggregation extends AggregationsCompositeAggregationBase { - interval: double -} - -export interface AggregationsCompositeTermsAggregation extends AggregationsCompositeAggregationBase { -} - -export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { - value: long - value_as_string?: string -} - -export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsCustomCategorizeTextAnalyzer { - char_filter?: string[] - tokenizer?: string - filter?: string[] -} - -export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { - calendar_interval?: AggregationsCalendarInterval - extended_bounds?: AggregationsExtendedBounds - hard_bounds?: AggregationsExtendedBounds - field?: Field - fixed_interval?: Duration - format?: string - interval?: Duration - min_doc_count?: integer - missing?: DateTime - offset?: Duration - order?: AggregationsAggregateOrder - params?: Record - script?: Script | string - time_zone?: TimeZone - keyed?: boolean -} - -export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { - key_as_string?: string - key: EpochTime -} -export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys -& { [property: string]: AggregationsAggregate | string | EpochTime | long } - -export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { -} - -export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { - field?: Field - format?: string - missing?: AggregationsMissing - ranges?: AggregationsDateRangeExpression[] - time_zone?: TimeZone - keyed?: boolean -} - -export interface AggregationsDateRangeExpression { - from?: AggregationsFieldDateMath - key?: string - to?: AggregationsFieldDateMath -} - -export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { - normalized_value?: double - normalized_value_as_string?: string -} - -export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { - execution_hint?: AggregationsSamplerAggregationExecutionHint - max_docs_per_value?: integer - script?: Script | string - shard_size?: integer - field?: Field -} - -export interface AggregationsDoubleTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsDoubleTermsBucketKeys extends AggregationsTermsBucketBase { - key: double - key_as_string?: string -} -export type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys -& { [property: string]: AggregationsAggregate | double | string | long } - -export interface AggregationsEwmaModelSettings { - alpha?: float -} - -export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'ewma' - settings: AggregationsEwmaModelSettings -} - -export interface AggregationsExtendedBounds { - max?: T - min?: T -} - -export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { - sum_of_squares: double | null - variance: double | null - variance_population: double | null - variance_sampling: double | null - std_deviation: double | null - std_deviation_population: double | null - std_deviation_sampling: double | null - std_deviation_bounds?: AggregationsStandardDeviationBounds - sum_of_squares_as_string?: string - variance_as_string?: string - variance_population_as_string?: string - variance_sampling_as_string?: string - std_deviation_as_string?: string - std_deviation_bounds_as_string?: AggregationsStandardDeviationBoundsAsString -} - -export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { - sigma?: double -} - -export interface AggregationsExtendedStatsBucketAggregate extends AggregationsExtendedStatsAggregate { -} - -export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { - sigma?: double -} - -export type AggregationsFieldDateMath = DateMath | double - -export interface AggregationsFilterAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsFilterAggregate = AggregationsFilterAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: AggregationsBuckets - other_bucket?: boolean - other_bucket_key?: string - keyed?: boolean -} - -export interface AggregationsFiltersBucketKeys extends AggregationsMultiBucketBase { -} -export type AggregationsFiltersBucket = AggregationsFiltersBucketKeys -& { [property: string]: AggregationsAggregate | long } - -export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { - format?: string -} - -export interface AggregationsFormattableMetricAggregation extends AggregationsMetricAggregationBase { - format?: string -} - -export interface AggregationsFrequentItemSetsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsFrequentItemSetsAggregation { - fields: AggregationsFrequentItemSetsField[] - minimum_set_size?: integer - minimum_support?: double - size?: integer - filter?: QueryDslQueryContainer -} - -export interface AggregationsFrequentItemSetsBucketKeys extends AggregationsMultiBucketBase { - key: Record - support: double -} -export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys -& { [property: string]: AggregationsAggregate | Record | double | long } - -export interface AggregationsFrequentItemSetsField { - field: Field - exclude?: AggregationsTermsExclude - include?: AggregationsTermsInclude -} - -export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' - -export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { - bounds?: GeoBounds -} - -export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { - wrap_longitude?: boolean -} - -export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { - count: long - location?: GeoLocation -} - -export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { - count?: long - location?: GeoLocation -} - -export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggregate { -} - -export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { - distance_type?: GeoDistanceType - field?: Field - origin?: GeoLocation - ranges?: AggregationsAggregationRange[] - unit?: DistanceUnit -} - -export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { - bounds?: GeoBounds - field?: Field - precision?: GeoHashPrecision - shard_size?: integer - size?: integer -} - -export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBucketBase { - key: GeoHash -} -export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys -& { [property: string]: AggregationsAggregate | GeoHash | long } - -export interface AggregationsGeoHexGridAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsGeoHexGridBucketKeys extends AggregationsMultiBucketBase { - key: GeoHexCell -} -export type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys -& { [property: string]: AggregationsAggregate | GeoHexCell | long } - -export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { - type: string - geometry: GeoLine - properties: any -} - -export interface AggregationsGeoLineAggregation { - point: AggregationsGeoLinePoint - sort: AggregationsGeoLineSort - include_sort?: boolean - sort_order?: SortOrder - size?: integer -} - -export interface AggregationsGeoLinePoint { - field: Field -} - -export interface AggregationsGeoLineSort { - field: Field -} - -export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { - field?: Field - precision?: GeoTilePrecision - shard_size?: integer - size?: integer - bounds?: GeoBounds -} - -export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBucketBase { - key: GeoTile -} -export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys -& { [property: string]: AggregationsAggregate | GeoTile | long } - -export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { - field: Field - precision?: integer - bounds?: GeoBounds - size?: integer - shard_size?: integer -} - -export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { -} - -export interface AggregationsGoogleNormalizedDistanceHeuristic { - background_is_superset?: boolean -} - -export interface AggregationsHdrMethod { - number_of_significant_value_digits?: integer -} - -export interface AggregationsHdrPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsHdrPercentilesAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsHistogramAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { - extended_bounds?: AggregationsExtendedBounds - hard_bounds?: AggregationsExtendedBounds - field?: Field - interval?: double - min_doc_count?: integer - missing?: double - offset?: double - order?: AggregationsAggregateOrder - script?: Script | string - format?: string - keyed?: boolean -} - -export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucketBase { - key_as_string?: string - key: double -} -export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys -& { [property: string]: AggregationsAggregate | string | double | long } - -export interface AggregationsHoltLinearModelSettings { - alpha?: float - beta?: float -} - -export interface AggregationsHoltMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'holt' - settings: AggregationsHoltLinearModelSettings -} - -export interface AggregationsHoltWintersModelSettings { - alpha?: float - beta?: float - gamma?: float - pad?: boolean - period?: integer - type?: AggregationsHoltWintersType -} - -export interface AggregationsHoltWintersMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'holt_winters' - settings: AggregationsHoltWintersModelSettings -} - -export type AggregationsHoltWintersType = 'add' | 'mult' - -export interface AggregationsInferenceAggregateKeys extends AggregationsAggregateBase { - value?: FieldValue - feature_importance?: AggregationsInferenceFeatureImportance[] - top_classes?: AggregationsInferenceTopClassEntry[] - warning?: string -} -export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys -& { [property: string]: any } - -export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { - model_id: Name - inference_config?: AggregationsInferenceConfigContainer -} - -export interface AggregationsInferenceClassImportance { - class_name: string - importance: double -} - -export interface AggregationsInferenceConfigContainer { - regression?: MlRegressionInferenceOptions - classification?: MlClassificationInferenceOptions -} - -export interface AggregationsInferenceFeatureImportance { - feature_name: string - importance?: double - classes?: AggregationsInferenceClassImportance[] -} - -export interface AggregationsInferenceTopClassEntry { - class_name: FieldValue - class_probability: double - class_score: double -} - -export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { - field: Field - prefix_length: integer - is_ipv6?: boolean - append_prefix_length?: boolean - keyed?: boolean - min_doc_count?: long -} - -export interface AggregationsIpPrefixBucketKeys extends AggregationsMultiBucketBase { - is_ipv6: boolean - key: string - prefix_length: integer - netmask?: string -} -export type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys -& { [property: string]: AggregationsAggregate | boolean | string | integer | long } - -export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { - field?: Field - ranges?: AggregationsIpRangeAggregationRange[] -} - -export interface AggregationsIpRangeAggregationRange { - from?: string | null - mask?: string - to?: string | null -} - -export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { - key?: string - from?: string - to?: string -} -export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys -& { [property: string]: AggregationsAggregate | string | long } - -export type AggregationsKeyedPercentiles = Record - -export interface AggregationsLinearMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'linear' - settings: EmptyObject -} - -export interface AggregationsLongRareTermsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsLongRareTermsBucketKeys extends AggregationsMultiBucketBase { - key: long - key_as_string?: string -} -export type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys -& { [property: string]: AggregationsAggregate | long | string } - -export interface AggregationsLongTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { - key: long - key_as_string?: string -} -export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys -& { [property: string]: AggregationsAggregate | long | string } - -export interface AggregationsMatrixAggregation { - fields?: Fields - missing?: Record -} - -export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { - doc_count: long - fields?: AggregationsMatrixStatsFields[] -} - -export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { - mode?: SortMode -} - -export interface AggregationsMatrixStatsFields { - name: Field - count: long - mean: double - variance: double - skewness: double - kurtosis: double - covariance: Record - correlation: Record -} - -export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsMedianAbsoluteDeviationAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { - compression?: double -} - -export interface AggregationsMetricAggregationBase { - field?: Field - missing?: AggregationsMissing - script?: Script | string -} - -export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsMinBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year' - -export type AggregationsMissing = string | integer | double | boolean - -export interface AggregationsMissingAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { - field?: Field - missing?: AggregationsMissing -} - -export type AggregationsMissingOrder = 'first' | 'last' | 'default' - -export type AggregationsMovingAverageAggregation = AggregationsLinearMovingAverageAggregation | AggregationsSimpleMovingAverageAggregation | AggregationsEwmaMovingAverageAggregation | AggregationsHoltMovingAverageAggregation | AggregationsHoltWintersMovingAverageAggregation - -export interface AggregationsMovingAverageAggregationBase extends AggregationsPipelineAggregationBase { - minimize?: boolean - predict?: integer - window?: integer -} - -export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { - script?: string - shift?: integer - window?: integer -} - -export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { - window?: integer - shift?: integer - keyed?: boolean -} - -export interface AggregationsMultiBucketAggregateBase extends AggregationsAggregateBase { - buckets: AggregationsBuckets -} - -export interface AggregationsMultiBucketBase { - doc_count: long -} - -export interface AggregationsMultiTermLookup { - field: Field - missing?: AggregationsMissing -} - -export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { - collect_mode?: AggregationsTermsAggregationCollectMode - order?: AggregationsAggregateOrder - min_doc_count?: long - shard_min_doc_count?: long - shard_size?: integer - show_term_doc_count_error?: boolean - size?: integer - terms: AggregationsMultiTermLookup[] -} - -export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { - key: FieldValue[] - key_as_string?: string - doc_count_error_upper_bound?: long -} -export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys -& { [property: string]: AggregationsAggregate | FieldValue[] | string | long } - -export interface AggregationsMutualInformationHeuristic { - background_is_superset?: boolean - include_negatives?: boolean -} - -export interface AggregationsNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { - path?: Field -} - -export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { - method?: AggregationsNormalizeMethod -} - -export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' - -export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsParentAggregate = AggregationsParentAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { - type?: RelationName -} - -export interface AggregationsPercentageScoreHeuristic { -} - -export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { - keyed?: boolean - values?: double[] | null - hdr?: AggregationsHdrMethod - tdigest?: AggregationsTDigest -} - -export type AggregationsPercentiles = AggregationsKeyedPercentiles | AggregationsArrayPercentilesItem[] - -export interface AggregationsPercentilesAggregateBase extends AggregationsAggregateBase { - values: AggregationsPercentiles -} - -export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { - keyed?: boolean - percents?: double[] - hdr?: AggregationsHdrMethod - tdigest?: AggregationsTDigest -} - -export interface AggregationsPercentilesBucketAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { - percents?: double[] -} - -export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { - format?: string - gap_policy?: AggregationsGapPolicy -} - -export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { - probability: double - seed?: integer - shard_seed?: integer -} - -export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { - field?: Field - missing?: integer - ranges?: AggregationsAggregationRange[] - script?: Script | string - keyed?: boolean - format?: string -} - -export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { - from?: double - to?: double - from_as_string?: string - to_as_string?: string - key?: string -} -export type AggregationsRangeBucket = AggregationsRangeBucketKeys -& { [property: string]: AggregationsAggregate | double | string | long } - -export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { - exclude?: AggregationsTermsExclude - field?: Field - include?: AggregationsTermsInclude - max_doc_count?: long - missing?: AggregationsMissing - precision?: double - value_type?: string -} - -export interface AggregationsRateAggregate extends AggregationsAggregateBase { - value: double - value_as_string?: string -} - -export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { - unit?: AggregationsCalendarInterval - mode?: AggregationsRateMode -} - -export type AggregationsRateMode = 'sum' | 'value_count' - -export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { - path?: Field -} - -export interface AggregationsSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { - shard_size?: integer -} - -export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' - -export interface AggregationsScriptedHeuristic { - script: Script | string -} - -export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { - value: any -} - -export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { - combine_script?: Script | string - init_script?: Script | string - map_script?: Script | string - params?: Record - reduce_script?: Script | string -} - -export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { - lag?: integer -} - -export interface AggregationsSignificantLongTermsAggregate extends AggregationsSignificantTermsAggregateBase { -} - -export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { - key: long - key_as_string?: string -} -export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys -& { [property: string]: AggregationsAggregate | long | string | double } - -export interface AggregationsSignificantStringTermsAggregate extends AggregationsSignificantTermsAggregateBase { -} - -export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { - key: string -} -export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | double | long } - -export interface AggregationsSignificantTermsAggregateBase extends AggregationsMultiBucketAggregateBase { - bg_count?: long - doc_count?: long -} - -export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { - background_filter?: QueryDslQueryContainer - chi_square?: AggregationsChiSquareHeuristic - exclude?: AggregationsTermsExclude - execution_hint?: AggregationsTermsAggregationExecutionHint - field?: Field - gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: AggregationsTermsInclude - jlh?: EmptyObject - min_doc_count?: long - mutual_information?: AggregationsMutualInformationHeuristic - percentage?: AggregationsPercentageScoreHeuristic - script_heuristic?: AggregationsScriptedHeuristic - shard_min_doc_count?: long - shard_size?: integer - size?: integer -} - -export interface AggregationsSignificantTermsBucketBase extends AggregationsMultiBucketBase { - score: double - bg_count: long -} - -export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { - background_filter?: QueryDslQueryContainer - chi_square?: AggregationsChiSquareHeuristic - exclude?: AggregationsTermsExclude - execution_hint?: AggregationsTermsAggregationExecutionHint - field?: Field - filter_duplicate_text?: boolean - gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: AggregationsTermsInclude - jlh?: EmptyObject - min_doc_count?: long - mutual_information?: AggregationsMutualInformationHeuristic - percentage?: AggregationsPercentageScoreHeuristic - script_heuristic?: AggregationsScriptedHeuristic - shard_min_doc_count?: long - shard_size?: integer - size?: integer - source_fields?: Fields -} - -export interface AggregationsSimpleMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'simple' - settings: EmptyObject -} - -export interface AggregationsSimpleValueAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsSingleBucketAggregateBase extends AggregationsAggregateBase { - doc_count: long -} - -export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { - value: double | null - value_as_string?: string -} - -export interface AggregationsStandardDeviationBounds { - upper: double | null - lower: double | null - upper_population: double | null - lower_population: double | null - upper_sampling: double | null - lower_sampling: double | null -} - -export interface AggregationsStandardDeviationBoundsAsString { - upper: string - lower: string - upper_population: string - lower_population: string - upper_sampling: string - lower_sampling: string -} - -export interface AggregationsStatsAggregate extends AggregationsAggregateBase { - count: long - min: double | null - max: double | null - avg: double | null - sum: double - min_as_string?: string - max_as_string?: string - avg_as_string?: string - sum_as_string?: string -} - -export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsStatsBucketAggregate extends AggregationsStatsAggregate { -} - -export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsStringRareTermsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsStringRareTermsBucketKeys extends AggregationsMultiBucketBase { - key: string -} -export type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | long } - -export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { - count: long - min_length: integer | null - max_length: integer | null - avg_length: double | null - entropy: double | null - distribution?: Record | null - min_length_as_string?: string - max_length_as_string?: string - avg_length_as_string?: string -} - -export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { - show_distribution?: boolean -} - -export interface AggregationsStringTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { - key: FieldValue -} -export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys -& { [property: string]: AggregationsAggregate | FieldValue | long } - -export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsSumBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsTDigest { - compression?: integer -} - -export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsTDigestPercentilesAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsTTestAggregate extends AggregationsAggregateBase { - value: double | null - value_as_string?: string -} - -export interface AggregationsTTestAggregation { - a?: AggregationsTestPopulation - b?: AggregationsTestPopulation - type?: AggregationsTTestType -} - -export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' - -export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { - doc_count_error_upper_bound?: long - sum_other_doc_count?: long -} - -export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { - collect_mode?: AggregationsTermsAggregationCollectMode - exclude?: AggregationsTermsExclude - execution_hint?: AggregationsTermsAggregationExecutionHint - field?: Field - include?: AggregationsTermsInclude - min_doc_count?: integer - missing?: AggregationsMissing - missing_order?: AggregationsMissingOrder - missing_bucket?: boolean - value_type?: string - order?: AggregationsAggregateOrder - script?: Script | string - shard_min_doc_count?: long - shard_size?: integer - show_term_doc_count_error?: boolean - size?: integer - format?: string -} - -export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' - -export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' - -export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { - doc_count_error_upper_bound?: long -} - -export type AggregationsTermsExclude = string | string[] - -export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition - -export interface AggregationsTermsPartition { - num_partitions: long - partition: long -} - -export interface AggregationsTestPopulation { - field: Field - script?: Script | string - filter?: QueryDslQueryContainer -} - -export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { - size?: integer - keyed?: boolean -} - -export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase { - key: Record -} -export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys -& { [property: string]: AggregationsAggregate | Record | long } - -export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { - hits: SearchHitsMetadata -} - -export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - explain?: boolean - fields?: (QueryDslFieldAndFormat | Field)[] - from?: integer - highlight?: SearchHighlight - script_fields?: Record - size?: integer - sort?: Sort - _source?: SearchSourceConfig - stored_fields?: Fields - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean -} - -export interface AggregationsTopMetrics { - sort: (FieldValue | null)[] - metrics: Record -} - -export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { - top: AggregationsTopMetrics[] -} - -export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { - metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] - size?: integer - sort?: Sort -} - -export interface AggregationsTopMetricsValue { - field: Field -} - -export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsSignificantTermsAggregateBase { -} - -export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { -} - -export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' - -export interface AggregationsVariableWidthHistogramAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsVariableWidthHistogramAggregation { - field?: Field - buckets?: integer - shard_size?: integer - initial_buffer?: integer - script?: Script | string -} - -export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { - min: double - key: double - max: double - min_as_string?: string - key_as_string?: string - max_as_string?: string -} -export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys -& { [property: string]: AggregationsAggregate | double | string | long } - -export interface AggregationsWeightedAverageAggregation { - format?: string - value?: AggregationsWeightedAverageValue - value_type?: AggregationsValueType - weight?: AggregationsWeightedAverageValue -} - -export interface AggregationsWeightedAverageValue { - field?: Field - missing?: double - script?: Script | string -} - -export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { -} - -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer - -export interface AnalysisArabicAnalyzer { - type: 'arabic' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisArmenianAnalyzer { - type: 'armenian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { - type: 'asciifolding' - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisBasqueAnalyzer { - type: 'basque' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisBengaliAnalyzer { - type: 'bengali' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisBrazilianAnalyzer { - type: 'brazilian' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisBulgarianAnalyzer { - type: 'bulgarian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisCatalanAnalyzer { - type: 'catalan' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export type AnalysisCharFilter = string | AnalysisCharFilterDefinition - -export interface AnalysisCharFilterBase { - version?: VersionString -} - -export type AnalysisCharFilterDefinition = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter - -export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { - type: 'char_group' - tokenize_on_chars: string[] - max_token_length?: integer -} - -export interface AnalysisChineseAnalyzer { - type: 'chinese' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisCjkAnalyzer { - type: 'cjk' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { - type: 'classic' - max_token_length?: integer -} - -export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { - type: 'common_grams' - common_words?: string[] - common_words_path?: string - ignore_case?: boolean - query_mode?: boolean -} - -export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { - hyphenation_patterns_path?: string - max_subword_size?: integer - min_subword_size?: integer - min_word_size?: integer - only_longest_match?: boolean - word_list?: string[] - word_list_path?: string -} - -export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { - type: 'condition' - filter: string[] - script: Script | string -} - -export interface AnalysisCustomAnalyzer { - type: 'custom' - char_filter?: string | string[] - filter?: string | string[] - position_increment_gap?: integer - position_offset_gap?: integer - tokenizer: string -} - -export interface AnalysisCustomNormalizer { - type: 'custom' - char_filter?: string[] - filter?: string[] -} - -export interface AnalysisCzechAnalyzer { - type: 'czech' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisDanishAnalyzer { - type: 'danish' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' - -export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { - type: 'delimited_payload' - delimiter?: string - encoding?: AnalysisDelimitedPayloadEncoding -} - -export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { - type: 'dictionary_decompounder' -} - -export interface AnalysisDutchAnalyzer { - type: 'dutch' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export type AnalysisEdgeNGramSide = 'front' | 'back' - -export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { - type: 'edge_ngram' - max_gram?: integer - min_gram?: integer - side?: AnalysisEdgeNGramSide - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { - type: 'edge_ngram' - custom_token_chars?: string - max_gram?: integer - min_gram?: integer - token_chars?: AnalysisTokenChar[] -} - -export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { - type: 'elision' - articles?: string[] - articles_path?: string - articles_case?: SpecUtilsStringified -} - -export interface AnalysisEnglishAnalyzer { - type: 'english' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisEstonianAnalyzer { - type: 'estonian' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisFingerprintAnalyzer { - type: 'fingerprint' - version?: VersionString - max_output_size: integer - preserve_original: boolean - separator: string - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { - type: 'fingerprint' - max_output_size?: integer - separator?: string -} - -export interface AnalysisFinnishAnalyzer { - type: 'finnish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisFrenchAnalyzer { - type: 'french' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisGalicianAnalyzer { - type: 'galician' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisGermanAnalyzer { - type: 'german' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisGreekAnalyzer { - type: 'greek' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisHindiAnalyzer { - type: 'hindi' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { - type: 'html_strip' - escaped_tags?: string[] -} - -export interface AnalysisHungarianAnalyzer { - type: 'hungarian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { - type: 'hunspell' - dedup?: boolean - dictionary?: string - locale: string - longest_only?: boolean -} - -export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { - type: 'hyphenation_decompounder' -} - -export interface AnalysisIcuAnalyzer { - type: 'icu_analyzer' - method: AnalysisIcuNormalizationType - mode: AnalysisIcuNormalizationMode -} - -export type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable' - -export type AnalysisIcuCollationCaseFirst = 'lower' | 'upper' - -export type AnalysisIcuCollationDecomposition = 'no' | 'identical' - -export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical' - -export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_collation' - alternate?: AnalysisIcuCollationAlternate - case_first?: AnalysisIcuCollationCaseFirst - case_level?: boolean - country?: string - decomposition?: AnalysisIcuCollationDecomposition - hiragana_quaternary_mode?: boolean - language?: string - numeric?: boolean - rules?: string - strength?: AnalysisIcuCollationStrength - variable_top?: string - variant?: string -} - -export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_folding' - unicode_set_filter: string -} - -export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBase { - type: 'icu_normalizer' - mode?: AnalysisIcuNormalizationMode - name?: AnalysisIcuNormalizationType -} - -export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' - -export interface AnalysisIcuNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_normalizer' - name: AnalysisIcuNormalizationType -} - -export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' - -export interface AnalysisIcuTokenizer extends AnalysisTokenizerBase { - type: 'icu_tokenizer' - rule_files: string -} - -export type AnalysisIcuTransformDirection = 'forward' | 'reverse' - -export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_transform' - dir?: AnalysisIcuTransformDirection - id: string -} - -export interface AnalysisIndonesianAnalyzer { - type: 'indonesian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisIrishAnalyzer { - type: 'irish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisItalianAnalyzer { - type: 'italian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { - type: 'kstem' -} - -export type AnalysisKeepTypesMode = 'include' | 'exclude' - -export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { - type: 'keep_types' - mode?: AnalysisKeepTypesMode - types?: string[] -} - -export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { - type: 'keep' - keep_words?: string[] - keep_words_case?: boolean - keep_words_path?: string -} - -export interface AnalysisKeywordAnalyzer { - type: 'keyword' - version?: VersionString -} - -export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { - type: 'keyword_marker' - ignore_case?: boolean - keywords?: string | string[] - keywords_path?: string - keywords_pattern?: string -} - -export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { - type: 'keyword' - buffer_size?: integer -} - -export interface AnalysisKuromojiAnalyzer { - type: 'kuromoji' - mode: AnalysisKuromojiTokenizationMode - user_dictionary?: string -} - -export interface AnalysisKuromojiIterationMarkCharFilter extends AnalysisCharFilterBase { - type: 'kuromoji_iteration_mark' - normalize_kana: boolean - normalize_kanji: boolean -} - -export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { - type: 'kuromoji_part_of_speech' - stoptags: string[] -} - -export interface AnalysisKuromojiReadingFormTokenFilter extends AnalysisTokenFilterBase { - type: 'kuromoji_readingform' - use_romaji: boolean -} - -export interface AnalysisKuromojiStemmerTokenFilter extends AnalysisTokenFilterBase { - type: 'kuromoji_stemmer' - minimum_length: integer -} - -export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' - -export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { - type: 'kuromoji_tokenizer' - discard_punctuation?: boolean - mode: AnalysisKuromojiTokenizationMode - nbest_cost?: integer - nbest_examples?: string - user_dictionary?: string - user_dictionary_rules?: string[] - discard_compound_token?: boolean -} - -export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' - -export interface AnalysisLanguageAnalyzer { - type: 'language' - version?: VersionString - language: AnalysisLanguage - stem_exclusion: string[] - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisLatvianAnalyzer { - type: 'latvian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { - type: 'length' - max?: integer - min?: integer -} - -export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { - type: 'letter' -} - -export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { - type: 'limit' - consume_all_tokens?: boolean - max_token_count?: SpecUtilsStringified -} - -export interface AnalysisLithuanianAnalyzer { - type: 'lithuanian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisLowercaseNormalizer { - type: 'lowercase' -} - -export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { - type: 'lowercase' - language?: string -} - -export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { - type: 'lowercase' -} - -export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { - type: 'mapping' - mappings?: string[] - mappings_path?: string -} - -export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { - type: 'multiplexer' - filters: string[] - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { - type: 'ngram' - max_gram?: integer - min_gram?: integer - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { - type: 'ngram' - custom_token_chars?: string - max_gram?: integer - min_gram?: integer - token_chars?: AnalysisTokenChar[] -} - -export interface AnalysisNoriAnalyzer { - type: 'nori' - version?: VersionString - decompound_mode?: AnalysisNoriDecompoundMode - stoptags?: string[] - user_dictionary?: string -} - -export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' - -export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { - type: 'nori_part_of_speech' - stoptags?: string[] -} - -export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { - type: 'nori_tokenizer' - decompound_mode?: AnalysisNoriDecompoundMode - discard_punctuation?: boolean - user_dictionary?: string - user_dictionary_rules?: string[] -} - -export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer - -export interface AnalysisNorwegianAnalyzer { - type: 'norwegian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { - type: 'path_hierarchy' - buffer_size?: SpecUtilsStringified - delimiter?: string - replacement?: string - reverse?: SpecUtilsStringified - skip?: SpecUtilsStringified -} - -export interface AnalysisPatternAnalyzer { - type: 'pattern' - version?: VersionString - flags?: string - lowercase?: boolean - pattern: string - stopwords?: AnalysisStopWords -} - -export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { - type: 'pattern_capture' - patterns: string[] - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { - type: 'pattern_replace' - flags?: string - pattern: string - replacement?: string -} - -export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { - type: 'pattern_replace' - all?: boolean - flags?: string - pattern: string - replacement?: string -} - -export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { - type: 'pattern' - flags?: string - group?: integer - pattern?: string -} - -export interface AnalysisPersianAnalyzer { - type: 'persian' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' - -export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' - -export type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic' - -export type AnalysisPhoneticRuleType = 'approx' | 'exact' - -export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { - type: 'phonetic' - encoder: AnalysisPhoneticEncoder - languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] - max_code_len?: integer - name_type?: AnalysisPhoneticNameType - replace?: boolean - rule_type?: AnalysisPhoneticRuleType -} - -export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { - type: 'porter_stem' -} - -export interface AnalysisPortugueseAnalyzer { - type: 'portuguese' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { - type: 'predicate_token_filter' - script: Script | string -} - -export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { - type: 'remove_duplicates' -} - -export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { - type: 'reverse' -} - -export interface AnalysisRomanianAnalyzer { - type: 'romanian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisRussianAnalyzer { - type: 'russian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisSerbianAnalyzer { - type: 'serbian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { - type: 'shingle' - filler_token?: string - max_shingle_size?: integer | string - min_shingle_size?: integer | string - output_unigrams?: boolean - output_unigrams_if_no_shingles?: boolean - token_separator?: string -} - -export interface AnalysisSimpleAnalyzer { - type: 'simple' - version?: VersionString -} - -export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase { - type: 'simple_pattern_split' - pattern?: string -} - -export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase { - type: 'simple_pattern' - pattern?: string -} - -export interface AnalysisSnowballAnalyzer { - type: 'snowball' - version?: VersionString - language: AnalysisSnowballLanguage - stopwords?: AnalysisStopWords -} - -export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' - -export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { - type: 'snowball' - language?: AnalysisSnowballLanguage -} - -export interface AnalysisSoraniAnalyzer { - type: 'sorani' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisSpanishAnalyzer { - type: 'spanish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisStandardAnalyzer { - type: 'standard' - max_token_length?: integer - stopwords?: AnalysisStopWords -} - -export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { - type: 'standard' - max_token_length?: integer -} - -export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { - type: 'stemmer_override' - rules?: string[] - rules_path?: string -} - -export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { - type: 'stemmer' - language?: string - name?: string -} - -export interface AnalysisStopAnalyzer { - type: 'stop' - version?: VersionString - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { - type: 'stop' - ignore_case?: boolean - remove_trailing?: boolean - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export type AnalysisStopWords = string | string[] - -export interface AnalysisSwedishAnalyzer { - type: 'swedish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export type AnalysisSynonymFormat = 'solr' | 'wordnet' - -export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { - type: 'synonym_graph' - expand?: boolean - format?: AnalysisSynonymFormat - lenient?: boolean - synonyms?: string[] - synonyms_path?: string - synonyms_set?: string - tokenizer?: string - updateable?: boolean -} - -export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { - type: 'synonym' - expand?: boolean - format?: AnalysisSynonymFormat - lenient?: boolean - synonyms?: string[] - synonyms_path?: string - synonyms_set?: string - tokenizer?: string - updateable?: boolean -} - -export interface AnalysisThaiAnalyzer { - type: 'thai' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase { - type: 'thai' -} - -export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' - -export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition - -export interface AnalysisTokenFilterBase { - version?: VersionString -} - -export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter - -export type AnalysisTokenizer = string | AnalysisTokenizerDefinition - -export interface AnalysisTokenizerBase { - version?: VersionString -} - -export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer - -export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { - type: 'trim' -} - -export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { - type: 'truncate' - length?: integer -} - -export interface AnalysisTurkishAnalyzer { - type: 'turkish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { - type: 'uax_url_email' - max_token_length?: integer -} - -export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { - type: 'unique' - only_on_same_position?: boolean -} - -export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { - type: 'uppercase' -} - -export interface AnalysisWhitespaceAnalyzer { - type: 'whitespace' - version?: VersionString -} - -export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { - type: 'whitespace' - max_token_length?: integer -} - -export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { - type: 'word_delimiter_graph' - adjust_offsets?: boolean - catenate_all?: boolean - catenate_numbers?: boolean - catenate_words?: boolean - generate_number_parts?: boolean - generate_word_parts?: boolean - ignore_keywords?: boolean - preserve_original?: SpecUtilsStringified - protected_words?: string[] - protected_words_path?: string - split_on_case_change?: boolean - split_on_numerics?: boolean - stem_english_possessive?: boolean - type_table?: string[] - type_table_path?: string -} - -export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { - type: 'word_delimiter' - catenate_all?: boolean - catenate_numbers?: boolean - catenate_words?: boolean - generate_number_parts?: boolean - generate_word_parts?: boolean - preserve_original?: SpecUtilsStringified - protected_words?: string[] - protected_words_path?: string - split_on_case_change?: boolean - split_on_numerics?: boolean - stem_english_possessive?: boolean - type_table?: string[] - type_table_path?: string -} - -export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { - type: 'aggregate_metric_double' - default_metric: string - metrics: string[] - time_series_metric?: MappingTimeSeriesMetricType -} - -export interface MappingAllField { - analyzer: string - enabled: boolean - omit_norms: boolean - search_analyzer: string - similarity: string - store: boolean - store_term_vector_offsets: boolean - store_term_vector_payloads: boolean - store_term_vector_positions: boolean - store_term_vectors: boolean -} - -export interface MappingBinaryProperty extends MappingDocValuesPropertyBase { - type: 'binary' -} - -export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { - boost?: double - fielddata?: IndicesNumericFielddata - index?: boolean - null_value?: boolean - type: 'boolean' -} - -export interface MappingByteNumberProperty extends MappingNumberPropertyBase { - type: 'byte' - null_value?: byte -} - -export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { - analyzer?: string - contexts?: MappingSuggestContext[] - max_input_length?: integer - preserve_position_increments?: boolean - preserve_separators?: boolean - search_analyzer?: string - type: 'completion' -} - -export interface MappingCompositeSubField { - type: MappingRuntimeFieldType -} - -export interface MappingConstantKeywordProperty extends MappingPropertyBase { - value?: any - type: 'constant_keyword' -} - -export interface MappingCorePropertyBase extends MappingPropertyBase { - copy_to?: Fields - store?: boolean -} - -export interface MappingDataStreamTimestamp { - enabled: boolean -} - -export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { - boost?: double - format?: string - ignore_malformed?: boolean - index?: boolean - null_value?: DateTime - precision_step?: integer - type: 'date_nanos' -} - -export interface MappingDateProperty extends MappingDocValuesPropertyBase { - boost?: double - fielddata?: IndicesNumericFielddata - format?: string - ignore_malformed?: boolean - index?: boolean - null_value?: DateTime - precision_step?: integer - locale?: string - type: 'date' -} - -export interface MappingDateRangeProperty extends MappingRangePropertyBase { - format?: string - type: 'date_range' -} - -export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' - -export interface MappingDenseVectorIndexOptions { - confidence_interval?: float - ef_construction?: integer - m?: integer - type: MappingDenseVectorIndexOptionsType -} - -export type MappingDenseVectorIndexOptionsType = 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' - -export interface MappingDenseVectorProperty extends MappingPropertyBase { - type: 'dense_vector' - dims?: integer - element_type?: MappingDenseVectorElementType - index?: boolean - index_options?: MappingDenseVectorIndexOptions - similarity?: MappingDenseVectorSimilarity -} - -export type MappingDenseVectorSimilarity = 'cosine' | 'dot_product' | 'l2_norm' | 'max_inner_product' - -export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { - doc_values?: boolean -} - -export interface MappingDoubleNumberProperty extends MappingNumberPropertyBase { - type: 'double' - null_value?: double -} - -export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { - type: 'double_range' -} - -export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' - -export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { - type: '{dynamic_type}' - enabled?: boolean - null_value?: FieldValue - boost?: double - coerce?: boolean - script?: Script | string - on_script_error?: MappingOnScriptError - ignore_malformed?: boolean - time_series_metric?: MappingTimeSeriesMetricType - analyzer?: string - eager_global_ordinals?: boolean - index?: boolean - index_options?: MappingIndexOptions - index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes | null - norms?: boolean - position_increment_gap?: integer - search_analyzer?: string - search_quote_analyzer?: string - term_vector?: MappingTermVectorOption - format?: string - precision_step?: integer - locale?: string -} - -export interface MappingDynamicTemplate { - mapping?: MappingProperty - runtime?: MappingProperty - match?: string | string[] - path_match?: string | string[] - unmatch?: string | string[] - path_unmatch?: string | string[] - match_mapping_type?: string | string[] - unmatch_mapping_type?: string | string[] - match_pattern?: MappingMatchType -} - -export interface MappingFieldAliasProperty extends MappingPropertyBase { - path?: Field - type: 'alias' -} - -export interface MappingFieldMapping { - full_name: string - mapping: Partial> -} - -export interface MappingFieldNamesField { - enabled: boolean -} - -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' - -export interface MappingFlattenedProperty extends MappingPropertyBase { - boost?: double - depth_limit?: integer - doc_values?: boolean - eager_global_ordinals?: boolean - index?: boolean - index_options?: MappingIndexOptions - null_value?: string - similarity?: string - split_queries_on_whitespace?: boolean - type: 'flattened' -} - -export interface MappingFloatNumberProperty extends MappingNumberPropertyBase { - type: 'float' - null_value?: float -} - -export interface MappingFloatRangeProperty extends MappingRangePropertyBase { - type: 'float_range' -} - -export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' - -export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { - ignore_malformed?: boolean - ignore_z_value?: boolean - null_value?: GeoLocation - index?: boolean - on_script_error?: MappingOnScriptError - script?: Script | string - type: 'geo_point' -} - -export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { - coerce?: boolean - ignore_malformed?: boolean - ignore_z_value?: boolean - orientation?: MappingGeoOrientation - strategy?: MappingGeoStrategy - type: 'geo_shape' -} - -export type MappingGeoStrategy = 'recursive' | 'term' - -export interface MappingHalfFloatNumberProperty extends MappingNumberPropertyBase { - type: 'half_float' - null_value?: float -} - -export interface MappingHistogramProperty extends MappingPropertyBase { - ignore_malformed?: boolean - type: 'histogram' -} - -export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBase { - type: 'icu_collation_keyword' - norms?: boolean - index_options?: MappingIndexOptions - index?: boolean - null_value?: string - rules?: string - language?: string - country?: string - variant?: string - strength?: AnalysisIcuCollationStrength - decomposition?: AnalysisIcuCollationDecomposition - alternate?: AnalysisIcuCollationAlternate - case_level?: boolean - case_first?: AnalysisIcuCollationCaseFirst - numeric?: boolean - variable_top?: string - hiragana_quaternary_mode?: boolean -} - -export interface MappingIndexField { - enabled: boolean -} - -export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' - -export interface MappingIntegerNumberProperty extends MappingNumberPropertyBase { - type: 'integer' - null_value?: integer -} - -export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { - type: 'integer_range' -} - -export interface MappingIpProperty extends MappingDocValuesPropertyBase { - boost?: double - index?: boolean - ignore_malformed?: boolean - null_value?: string - on_script_error?: MappingOnScriptError - script?: Script | string - time_series_dimension?: boolean - type: 'ip' -} - -export interface MappingIpRangeProperty extends MappingRangePropertyBase { - type: 'ip_range' -} - -export interface MappingJoinProperty extends MappingPropertyBase { - relations?: Record - eager_global_ordinals?: boolean - type: 'join' -} - -export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { - boost?: double - eager_global_ordinals?: boolean - index?: boolean - index_options?: MappingIndexOptions - script?: Script | string - on_script_error?: MappingOnScriptError - normalizer?: string - norms?: boolean - null_value?: string - similarity?: string | null - split_queries_on_whitespace?: boolean - time_series_dimension?: boolean - type: 'keyword' -} - -export interface MappingLongNumberProperty extends MappingNumberPropertyBase { - type: 'long' - null_value?: long -} - -export interface MappingLongRangeProperty extends MappingRangePropertyBase { - type: 'long_range' -} - -export interface MappingMatchOnlyTextProperty { - type: 'match_only_text' - fields?: Record - meta?: Record - copy_to?: Fields -} - -export type MappingMatchType = 'simple' | 'regex' - -export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { - type: 'murmur3' -} - -export interface MappingNestedProperty extends MappingCorePropertyBase { - enabled?: boolean - include_in_parent?: boolean - include_in_root?: boolean - type: 'nested' -} - -export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { - boost?: double - coerce?: boolean - ignore_malformed?: boolean - index?: boolean - on_script_error?: MappingOnScriptError - script?: Script | string - time_series_metric?: MappingTimeSeriesMetricType - time_series_dimension?: boolean -} - -export interface MappingObjectProperty extends MappingCorePropertyBase { - enabled?: boolean - subobjects?: boolean - type?: 'object' -} - -export type MappingOnScriptError = 'fail' | 'continue' - -export interface MappingPercolatorProperty extends MappingPropertyBase { - type: 'percolator' -} - -export interface MappingPointProperty extends MappingDocValuesPropertyBase { - ignore_malformed?: boolean - ignore_z_value?: boolean - null_value?: string - type: 'point' -} - -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty - -export interface MappingPropertyBase { - meta?: Record - properties?: Record - ignore_above?: integer - dynamic?: MappingDynamicMapping - fields?: Record -} - -export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { - boost?: double - coerce?: boolean - index?: boolean -} - -export interface MappingRankFeatureProperty extends MappingPropertyBase { - positive_score_impact?: boolean - type: 'rank_feature' -} - -export interface MappingRankFeaturesProperty extends MappingPropertyBase { - positive_score_impact?: boolean - type: 'rank_features' -} - -export interface MappingRoutingField { - required: boolean -} - -export interface MappingRuntimeField { - fields?: Record - fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] - format?: string - input_field?: Field - target_field?: Field - target_index?: IndexName - script?: Script | string - type: MappingRuntimeFieldType -} - -export interface MappingRuntimeFieldFetchFields { - field: Field - format?: string -} - -export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' - -export type MappingRuntimeFields = Record - -export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { - type: 'scaled_float' - null_value?: double - scaling_factor?: double -} - -export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { - analyzer?: string - index?: boolean - index_options?: MappingIndexOptions - max_shingle_size?: integer - norms?: boolean - search_analyzer?: string - search_quote_analyzer?: string - similarity?: string | null - term_vector?: MappingTermVectorOption - type: 'search_as_you_type' -} - -export interface MappingSemanticTextProperty { - type: 'semantic_text' - meta?: Record - inference_id: Id -} - -export interface MappingShapeProperty extends MappingDocValuesPropertyBase { - coerce?: boolean - ignore_malformed?: boolean - ignore_z_value?: boolean - orientation?: MappingGeoOrientation - type: 'shape' -} - -export interface MappingShortNumberProperty extends MappingNumberPropertyBase { - type: 'short' - null_value?: short -} - -export interface MappingSizeField { - enabled: boolean -} - -export interface MappingSourceField { - compress?: boolean - compress_threshold?: string - enabled?: boolean - excludes?: string[] - includes?: string[] - mode?: MappingSourceFieldMode -} - -export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' - -export interface MappingSparseVectorProperty extends MappingPropertyBase { - type: 'sparse_vector' -} - -export interface MappingSuggestContext { - name: Name - path?: Field - type: string - precision?: integer | string -} - -export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' - -export interface MappingTextIndexPrefixes { - max_chars: integer - min_chars: integer -} - -export interface MappingTextProperty extends MappingCorePropertyBase { - analyzer?: string - boost?: double - eager_global_ordinals?: boolean - fielddata?: boolean - fielddata_frequency_filter?: IndicesFielddataFrequencyFilter - index?: boolean - index_options?: MappingIndexOptions - index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes | null - norms?: boolean - position_increment_gap?: integer - search_analyzer?: string - search_quote_analyzer?: string - similarity?: string | null - term_vector?: MappingTermVectorOption - type: 'text' -} - -export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position' - -export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { - analyzer?: string - boost?: double - index?: boolean - null_value?: double - enable_position_increments?: boolean - type: 'token_count' -} - -export interface MappingTypeMapping { - all_field?: MappingAllField - date_detection?: boolean - dynamic?: MappingDynamicMapping - dynamic_date_formats?: string[] - dynamic_templates?: Record[] - _field_names?: MappingFieldNamesField - index_field?: MappingIndexField - _meta?: Metadata - numeric_detection?: boolean - properties?: Record - _routing?: MappingRoutingField - _size?: MappingSizeField - _source?: MappingSourceField - runtime?: Record - enabled?: boolean - subobjects?: boolean - _data_stream_timestamp?: MappingDataStreamTimestamp -} - -export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { - type: 'unsigned_long' - null_value?: ulong -} - -export interface MappingVersionProperty extends MappingDocValuesPropertyBase { - type: 'version' -} - -export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { - type: 'wildcard' - null_value?: string -} - -export interface QueryDslBoolQuery extends QueryDslQueryBase { - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - minimum_should_match?: MinimumShouldMatch - must?: QueryDslQueryContainer | QueryDslQueryContainer[] - must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] - should?: QueryDslQueryContainer | QueryDslQueryContainer[] -} - -export interface QueryDslBoostingQuery extends QueryDslQueryBase { - negative_boost: double - negative: QueryDslQueryContainer - positive: QueryDslQueryContainer -} - -export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' - -export type QueryDslCombinedFieldsOperator = 'or' | 'and' - -export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { - fields: Field[] - query: string - auto_generate_synonyms_phrase_query?: boolean - operator?: QueryDslCombinedFieldsOperator - minimum_should_match?: MinimumShouldMatch - zero_terms_query?: QueryDslCombinedFieldsZeroTerms -} - -export type QueryDslCombinedFieldsZeroTerms = 'none' | 'all' - -export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { - analyzer?: string - cutoff_frequency?: double - high_freq_operator?: QueryDslOperator - low_freq_operator?: QueryDslOperator - minimum_should_match?: MinimumShouldMatch - query: string -} - -export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { - filter: QueryDslQueryContainer -} - -export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { -} - -export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { - format?: DateFormat - time_zone?: TimeZone -} - -export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction - -export interface QueryDslDecayFunctionBase { - multi_value_mode?: QueryDslMultiValueMode -} - -export interface QueryDslDecayPlacement { - decay?: double - offset?: TScale - scale?: TScale - origin?: TOrigin -} - -export interface QueryDslDisMaxQuery extends QueryDslQueryBase { - queries: QueryDslQueryContainer[] - tie_breaker?: double -} - -export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery - -export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { - origin: TOrigin - pivot: TDistance - field: Field -} - -export interface QueryDslExistsQuery extends QueryDslQueryBase { - field: Field -} - -export interface QueryDslFieldAndFormat { - field: Field - format?: string - include_unmapped?: boolean -} - -export interface QueryDslFieldLookup { - id: Id - index?: IndexName - path?: Field - routing?: Routing -} - -export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' - -export interface QueryDslFieldValueFactorScoreFunction { - field: Field - factor?: double - missing?: double - modifier?: QueryDslFieldValueFactorModifier -} - -export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' - -export interface QueryDslFunctionScoreContainer { - exp?: QueryDslDecayFunction - gauss?: QueryDslDecayFunction - linear?: QueryDslDecayFunction - field_value_factor?: QueryDslFieldValueFactorScoreFunction - random_score?: QueryDslRandomScoreFunction - script_score?: QueryDslScriptScoreFunction - filter?: QueryDslQueryContainer - weight?: double -} - -export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' - -export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { - boost_mode?: QueryDslFunctionBoostMode - functions?: QueryDslFunctionScoreContainer[] - max_boost?: double - min_score?: double - query?: QueryDslQueryContainer - score_mode?: QueryDslFunctionScoreMode -} - -export interface QueryDslFuzzyQuery extends QueryDslQueryBase { - max_expansions?: integer - prefix_length?: integer - rewrite?: MultiTermQueryRewrite - transpositions?: boolean - fuzziness?: Fuzziness - value: string | double | boolean -} - -export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { - type?: QueryDslGeoExecution - validation_method?: QueryDslGeoValidationMethod - ignore_unmapped?: boolean -} -export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys -& { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } - -export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { -} - -export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { - distance: Distance - distance_type?: GeoDistanceType - validation_method?: QueryDslGeoValidationMethod - ignore_unmapped?: boolean -} -export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys -& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | boolean | float | string } - -export type QueryDslGeoExecution = 'memory' | 'indexed' - -export interface QueryDslGeoPolygonPoints { - points: GeoLocation[] -} - -export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { - validation_method?: QueryDslGeoValidationMethod - ignore_unmapped?: boolean -} -export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys -& { [property: string]: QueryDslGeoPolygonPoints | QueryDslGeoValidationMethod | boolean | float | string } - -export interface QueryDslGeoShapeFieldQuery { - shape?: GeoShape - indexed_shape?: QueryDslFieldLookup - relation?: GeoShapeRelation -} - -export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { - ignore_unmapped?: boolean -} -export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys -& { [property: string]: QueryDslGeoShapeFieldQuery | boolean | float | string } - -export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' - -export interface QueryDslHasChildQuery extends QueryDslQueryBase { - ignore_unmapped?: boolean - inner_hits?: SearchInnerHits - max_children?: integer - min_children?: integer - query: QueryDslQueryContainer - score_mode?: QueryDslChildScoreMode - type: RelationName -} - -export interface QueryDslHasParentQuery extends QueryDslQueryBase { - ignore_unmapped?: boolean - inner_hits?: SearchInnerHits - parent_type: RelationName - query: QueryDslQueryContainer - score?: boolean -} - -export interface QueryDslIdsQuery extends QueryDslQueryBase { - values?: Ids -} - -export interface QueryDslIntervalsAllOf { - intervals: QueryDslIntervalsContainer[] - max_gaps?: integer - ordered?: boolean - filter?: QueryDslIntervalsFilter -} - -export interface QueryDslIntervalsAnyOf { - intervals: QueryDslIntervalsContainer[] - filter?: QueryDslIntervalsFilter -} - -export interface QueryDslIntervalsContainer { - all_of?: QueryDslIntervalsAllOf - any_of?: QueryDslIntervalsAnyOf - fuzzy?: QueryDslIntervalsFuzzy - match?: QueryDslIntervalsMatch - prefix?: QueryDslIntervalsPrefix - wildcard?: QueryDslIntervalsWildcard -} - -export interface QueryDslIntervalsFilter { - after?: QueryDslIntervalsContainer - before?: QueryDslIntervalsContainer - contained_by?: QueryDslIntervalsContainer - containing?: QueryDslIntervalsContainer - not_contained_by?: QueryDslIntervalsContainer - not_containing?: QueryDslIntervalsContainer - not_overlapping?: QueryDslIntervalsContainer - overlapping?: QueryDslIntervalsContainer - script?: Script | string -} - -export interface QueryDslIntervalsFuzzy { - analyzer?: string - fuzziness?: Fuzziness - prefix_length?: integer - term: string - transpositions?: boolean - use_field?: Field -} - -export interface QueryDslIntervalsMatch { - analyzer?: string - max_gaps?: integer - ordered?: boolean - query: string - use_field?: Field - filter?: QueryDslIntervalsFilter -} - -export interface QueryDslIntervalsPrefix { - analyzer?: string - prefix: string - use_field?: Field -} - -export interface QueryDslIntervalsQuery extends QueryDslQueryBase { - all_of?: QueryDslIntervalsAllOf - any_of?: QueryDslIntervalsAnyOf - fuzzy?: QueryDslIntervalsFuzzy - match?: QueryDslIntervalsMatch - prefix?: QueryDslIntervalsPrefix - wildcard?: QueryDslIntervalsWildcard -} - -export interface QueryDslIntervalsWildcard { - analyzer?: string - pattern: string - use_field?: Field -} - -export type QueryDslLike = string | QueryDslLikeDocument - -export interface QueryDslLikeDocument { - doc?: any - fields?: Field[] - _id?: Id - _index?: IndexName - per_field_analyzer?: Record - routing?: Routing - version?: VersionNumber - version_type?: VersionType -} - -export interface QueryDslMatchAllQuery extends QueryDslQueryBase { -} - -export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { - analyzer?: string - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: QueryDslOperator - prefix_length?: integer - query: string -} - -export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { -} - -export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { - analyzer?: string - max_expansions?: integer - query: string - slop?: integer - zero_terms_query?: QueryDslZeroTermsQuery -} - -export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { - analyzer?: string - query: string - slop?: integer - zero_terms_query?: QueryDslZeroTermsQuery -} - -export interface QueryDslMatchQuery extends QueryDslQueryBase { - analyzer?: string - auto_generate_synonyms_phrase_query?: boolean - cutoff_frequency?: double - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: QueryDslOperator - prefix_length?: integer - query: string | float | boolean - zero_terms_query?: QueryDslZeroTermsQuery -} - -export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { - analyzer?: string - boost_terms?: double - fail_on_unsupported_field?: boolean - fields?: Field[] - include?: boolean - like: QueryDslLike | QueryDslLike[] - max_doc_freq?: integer - max_query_terms?: integer - max_word_length?: integer - min_doc_freq?: integer - minimum_should_match?: MinimumShouldMatch - min_term_freq?: integer - min_word_length?: integer - routing?: Routing - stop_words?: AnalysisStopWords - unlike?: QueryDslLike | QueryDslLike[] - version?: VersionNumber - version_type?: VersionType -} - -export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { - analyzer?: string - auto_generate_synonyms_phrase_query?: boolean - cutoff_frequency?: double - fields?: Fields - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: QueryDslOperator - prefix_length?: integer - query: string - slop?: integer - tie_breaker?: double - type?: QueryDslTextQueryType - zero_terms_query?: QueryDslZeroTermsQuery -} - -export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' - -export interface QueryDslNestedQuery extends QueryDslQueryBase { - ignore_unmapped?: boolean - inner_hits?: SearchInnerHits - path: Field - query: QueryDslQueryContainer - score_mode?: QueryDslChildScoreMode -} - -export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { -} - -export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' - -export interface QueryDslParentIdQuery extends QueryDslQueryBase { - id?: Id - ignore_unmapped?: boolean - type?: RelationName -} - -export interface QueryDslPercolateQuery extends QueryDslQueryBase { - document?: any - documents?: any[] - field: Field - id?: Id - index?: IndexName - name?: string - preference?: string - routing?: Routing - version?: VersionNumber -} - -export interface QueryDslPinnedDoc { - _id: Id - _index: IndexName -} - -export interface QueryDslPinnedQuery extends QueryDslQueryBase { - organic: QueryDslQueryContainer - ids?: Id[] - docs?: QueryDslPinnedDoc[] -} - -export interface QueryDslPrefixQuery extends QueryDslQueryBase { - rewrite?: MultiTermQueryRewrite - value: string - case_insensitive?: boolean -} - -export interface QueryDslQueryBase { - boost?: float - _name?: string -} - -export interface QueryDslQueryContainer { - bool?: QueryDslBoolQuery - boosting?: QueryDslBoostingQuery - common?: Partial> - combined_fields?: QueryDslCombinedFieldsQuery - constant_score?: QueryDslConstantScoreQuery - dis_max?: QueryDslDisMaxQuery - distance_feature?: QueryDslDistanceFeatureQuery - exists?: QueryDslExistsQuery - function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] - fuzzy?: Partial> - geo_bounding_box?: QueryDslGeoBoundingBoxQuery - geo_distance?: QueryDslGeoDistanceQuery - geo_polygon?: QueryDslGeoPolygonQuery - geo_shape?: QueryDslGeoShapeQuery - has_child?: QueryDslHasChildQuery - has_parent?: QueryDslHasParentQuery - ids?: QueryDslIdsQuery - intervals?: Partial> - knn?: KnnQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - match_bool_prefix?: Partial> - match_none?: QueryDslMatchNoneQuery - match_phrase?: Partial> - match_phrase_prefix?: Partial> - more_like_this?: QueryDslMoreLikeThisQuery - multi_match?: QueryDslMultiMatchQuery - nested?: QueryDslNestedQuery - parent_id?: QueryDslParentIdQuery - percolate?: QueryDslPercolateQuery - pinned?: QueryDslPinnedQuery - prefix?: Partial> - query_string?: QueryDslQueryStringQuery - range?: Partial> - rank_feature?: QueryDslRankFeatureQuery - regexp?: Partial> - rule?: QueryDslRuleQuery - script?: QueryDslScriptQuery - script_score?: QueryDslScriptScoreQuery - semantic?: QueryDslSemanticQuery - shape?: QueryDslShapeQuery - simple_query_string?: QueryDslSimpleQueryStringQuery - span_containing?: QueryDslSpanContainingQuery - span_field_masking?: QueryDslSpanFieldMaskingQuery - span_first?: QueryDslSpanFirstQuery - span_multi?: QueryDslSpanMultiTermQuery - span_near?: QueryDslSpanNearQuery - span_not?: QueryDslSpanNotQuery - span_or?: QueryDslSpanOrQuery - span_term?: Partial> - span_within?: QueryDslSpanWithinQuery - sparse_vector?: QueryDslSparseVectorQuery - term?: Partial> - terms?: QueryDslTermsQuery - terms_set?: Partial> - text_expansion?: Partial> - weighted_tokens?: Partial> - wildcard?: Partial> - wrapper?: QueryDslWrapperQuery - type?: QueryDslTypeQuery -} - -export interface QueryDslQueryStringQuery extends QueryDslQueryBase { - allow_leading_wildcard?: boolean - analyzer?: string - analyze_wildcard?: boolean - auto_generate_synonyms_phrase_query?: boolean - default_field?: Field - default_operator?: QueryDslOperator - enable_position_increments?: boolean - escape?: boolean - fields?: Field[] - fuzziness?: Fuzziness - fuzzy_max_expansions?: integer - fuzzy_prefix_length?: integer - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_determinized_states?: integer - minimum_should_match?: MinimumShouldMatch - phrase_slop?: double - query: string - quote_analyzer?: string - quote_field_suffix?: string - rewrite?: MultiTermQueryRewrite - tie_breaker?: double - time_zone?: TimeZone - type?: QueryDslTextQueryType -} - -export interface QueryDslRandomScoreFunction { - field?: Field - seed?: long | string -} - -export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery - -export interface QueryDslRangeQueryBase extends QueryDslQueryBase { - relation?: QueryDslRangeRelation - gt?: T - gte?: T - lt?: T - lte?: T - from?: T | null - to?: T | null -} - -export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' - -export interface QueryDslRankFeatureFunction { -} - -export interface QueryDslRankFeatureFunctionLinear { -} - -export interface QueryDslRankFeatureFunctionLogarithm { - scaling_factor: float -} - -export interface QueryDslRankFeatureFunctionSaturation { - pivot?: float -} - -export interface QueryDslRankFeatureFunctionSigmoid { - pivot: float - exponent: float -} - -export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { - field: Field - saturation?: QueryDslRankFeatureFunctionSaturation - log?: QueryDslRankFeatureFunctionLogarithm - linear?: QueryDslRankFeatureFunctionLinear - sigmoid?: QueryDslRankFeatureFunctionSigmoid -} - -export interface QueryDslRegexpQuery extends QueryDslQueryBase { - case_insensitive?: boolean - flags?: string - max_determinized_states?: integer - rewrite?: MultiTermQueryRewrite - value: string -} - -export interface QueryDslRuleQuery extends QueryDslQueryBase { - organic: QueryDslQueryContainer - ruleset_ids: Id[] - match_criteria: any -} - -export interface QueryDslScriptQuery extends QueryDslQueryBase { - script: Script | string -} - -export interface QueryDslScriptScoreFunction { - script: Script | string -} - -export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { - min_score?: float - query: QueryDslQueryContainer - script: Script | string -} - -export interface QueryDslSemanticQuery extends QueryDslQueryBase { - field: string - query: string -} - -export interface QueryDslShapeFieldQuery { - indexed_shape?: QueryDslFieldLookup - relation?: GeoShapeRelation - shape?: GeoShape -} - -export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { - ignore_unmapped?: boolean -} -export type QueryDslShapeQuery = QueryDslShapeQueryKeys -& { [property: string]: QueryDslShapeFieldQuery | boolean | float | string } - -export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' - -export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags - -export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { - analyzer?: string - analyze_wildcard?: boolean - auto_generate_synonyms_phrase_query?: boolean - default_operator?: QueryDslOperator - fields?: Field[] - flags?: QueryDslSimpleQueryStringFlags - fuzzy_max_expansions?: integer - fuzzy_prefix_length?: integer - fuzzy_transpositions?: boolean - lenient?: boolean - minimum_should_match?: MinimumShouldMatch - query: string - quote_field_suffix?: string -} - -export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { - big: QueryDslSpanQuery - little: QueryDslSpanQuery -} - -export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { - field: Field - query: QueryDslSpanQuery -} - -export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { - end: integer - match: QueryDslSpanQuery -} - -export type QueryDslSpanGapQuery = Partial> - -export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { - match: QueryDslQueryContainer -} - -export interface QueryDslSpanNearQuery extends QueryDslQueryBase { - clauses: QueryDslSpanQuery[] - in_order?: boolean - slop?: integer -} - -export interface QueryDslSpanNotQuery extends QueryDslQueryBase { - dist?: integer - exclude: QueryDslSpanQuery - include: QueryDslSpanQuery - post?: integer - pre?: integer -} - -export interface QueryDslSpanOrQuery extends QueryDslQueryBase { - clauses: QueryDslSpanQuery[] -} - -export interface QueryDslSpanQuery { - span_containing?: QueryDslSpanContainingQuery - span_field_masking?: QueryDslSpanFieldMaskingQuery - span_first?: QueryDslSpanFirstQuery - span_gap?: QueryDslSpanGapQuery - span_multi?: QueryDslSpanMultiTermQuery - span_near?: QueryDslSpanNearQuery - span_not?: QueryDslSpanNotQuery - span_or?: QueryDslSpanOrQuery - span_term?: Partial> - span_within?: QueryDslSpanWithinQuery -} - -export interface QueryDslSpanTermQuery extends QueryDslQueryBase { - value: string -} - -export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { - big: QueryDslSpanQuery - little: QueryDslSpanQuery -} - -export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { - field: Field - query_vector?: Record - inference_id?: Id - query?: string - prune?: boolean - pruning_config?: QueryDslTokenPruningConfig -} - -export interface QueryDslTermQuery extends QueryDslQueryBase { - value: FieldValue - case_insensitive?: boolean -} - -export interface QueryDslTermRangeQuery extends QueryDslRangeQueryBase { -} - -export interface QueryDslTermsLookup { - index: IndexName - id: Id - path: Field - routing?: Routing -} - -export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { -} -export type QueryDslTermsQuery = QueryDslTermsQueryKeys -& { [property: string]: QueryDslTermsQueryField | float | string } - -export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup - -export interface QueryDslTermsSetQuery extends QueryDslQueryBase { - minimum_should_match?: MinimumShouldMatch - minimum_should_match_field?: Field - minimum_should_match_script?: Script | string - terms: FieldValue[] -} - -export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { - model_id: string - model_text: string - pruning_config?: QueryDslTokenPruningConfig -} - -export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' - -export interface QueryDslTokenPruningConfig { - tokens_freq_ratio_threshold?: integer - tokens_weight_threshold?: float - only_score_pruned_tokens?: boolean -} - -export interface QueryDslTypeQuery extends QueryDslQueryBase { - value: string -} - -export interface QueryDslUntypedDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { -} - -export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { - format?: DateFormat - time_zone?: TimeZone -} - -export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { - tokens: Record - pruning_config?: QueryDslTokenPruningConfig -} - -export interface QueryDslWildcardQuery extends QueryDslQueryBase { - case_insensitive?: boolean - rewrite?: MultiTermQueryRewrite - value?: string - wildcard?: string -} - -export interface QueryDslWrapperQuery extends QueryDslQueryBase { - query: string -} - -export type QueryDslZeroTermsQuery = 'all' | 'none' - -export interface AsyncSearchAsyncSearch> { - aggregations?: TAggregations - _clusters?: ClusterStatistics - fields?: Record - hits: SearchHitsMetadata - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - _shards: ShardStatistics - suggest?: Record[]> - terminated_early?: boolean - timed_out: boolean - took: long -} - -export interface AsyncSearchAsyncSearchDocumentResponseBase> extends AsyncSearchAsyncSearchResponseBase { - response: AsyncSearchAsyncSearch -} - -export interface AsyncSearchAsyncSearchResponseBase { - id?: Id - is_partial: boolean - is_running: boolean - expiration_time?: DateTime - expiration_time_in_millis: EpochTime - start_time?: DateTime - start_time_in_millis: EpochTime - completion_time?: DateTime - completion_time_in_millis?: EpochTime -} - -export interface AsyncSearchDeleteRequest extends RequestBase { - id: Id -} - -export type AsyncSearchDeleteResponse = AcknowledgedResponseBase - -export interface AsyncSearchGetRequest extends RequestBase { - id: Id - keep_alive?: Duration - typed_keys?: boolean - wait_for_completion_timeout?: Duration -} - -export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase - -export interface AsyncSearchStatusRequest extends RequestBase { - id: Id - keep_alive?: Duration -} - -export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase - -export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { - _shards: ShardStatistics - _clusters?: ClusterStatistics - completion_status?: integer -} - -export interface AsyncSearchSubmitRequest extends RequestBase { - index?: Indices - wait_for_completion_timeout?: Duration - keep_on_completion?: boolean - keep_alive?: Duration - allow_no_indices?: boolean - allow_partial_search_results?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - ccs_minimize_roundtrips?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - preference?: string - pre_filter_shard_size?: long - request_cache?: boolean - routing?: Routing - search_type?: SearchType - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - typed_keys?: boolean - rest_total_hits_as_int?: boolean - _source_excludes?: Fields - _source_includes?: Fields - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - collapse?: SearchFieldCollapse - explain?: boolean - ext?: Record - from?: integer - highlight?: SearchHighlight - track_total_hits?: SearchTrackHits - indices_boost?: Record[] - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - query?: QueryDslQueryContainer - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - suggest?: SearchSuggester - terminate_after?: long - timeout?: string - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean - stored_fields?: Fields - pit?: SearchPointInTimeReference - runtime_mappings?: MappingRuntimeFields - stats?: string[] - } -} - -export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase - -export interface AutoscalingAutoscalingPolicy { - roles: string[] - deciders: Record -} - -export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase - -export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { - node: AutoscalingGetAutoscalingCapacityAutoscalingResources - total: AutoscalingGetAutoscalingCapacityAutoscalingResources -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingDecider { - required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity - reason_summary?: string - reason_details?: any -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingDeciders { - required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity - current_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity - current_nodes: AutoscalingGetAutoscalingCapacityAutoscalingNode[] - deciders: Record -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingNode { - name: NodeName -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { - storage: integer - memory: integer -} - -export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { - master_timeout?: Duration -} - -export interface AutoscalingGetAutoscalingCapacityResponse { - policies: Record -} - -export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration -} - -export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy - -export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'policy' instead. */ - body?: AutoscalingAutoscalingPolicy -} - -export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase - -export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' - -export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] - -export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' - -export type CatCatDatafeedColumns = CatCatDatafeedColumn | CatCatDatafeedColumn[] - -export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v' - -export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] - -export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { -} - -export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' - -export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] - -export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' - -export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] - -export interface CatAliasesAliasesRecord { - alias?: string - a?: string - index?: IndexName - i?: IndexName - idx?: IndexName - filter?: string - f?: string - fi?: string - 'routing.index'?: string - ri?: string - routingIndex?: string - 'routing.search'?: string - rs?: string - routingSearch?: string - is_write_index?: string - w?: string - isWriteIndex?: string -} - -export interface CatAliasesRequest extends CatCatRequestBase { - name?: Names - expand_wildcards?: ExpandWildcards -} - -export type CatAliasesResponse = CatAliasesAliasesRecord[] - -export interface CatAllocationAllocationRecord { - shards?: string - s?: string - 'shards.undesired'?: string | null - 'write_load.forecast'?: SpecUtilsStringified | null - wlf?: SpecUtilsStringified | null - writeLoadForecast?: SpecUtilsStringified | null - 'disk.indices.forecast'?: ByteSize | null - dif?: ByteSize | null - diskIndicesForecast?: ByteSize | null - 'disk.indices'?: ByteSize | null - di?: ByteSize | null - diskIndices?: ByteSize | null - 'disk.used'?: ByteSize | null - du?: ByteSize | null - diskUsed?: ByteSize | null - 'disk.avail'?: ByteSize | null - da?: ByteSize | null - diskAvail?: ByteSize | null - 'disk.total'?: ByteSize | null - dt?: ByteSize | null - diskTotal?: ByteSize | null - 'disk.percent'?: Percentage | null - dp?: Percentage | null - diskPercent?: Percentage | null - host?: Host | null - h?: Host | null - ip?: Ip | null - node?: string - n?: string - 'node.role'?: string | null - r?: string | null - role?: string | null - nodeRole?: string | null -} - -export interface CatAllocationRequest extends CatCatRequestBase { - node_id?: NodeIds - bytes?: Bytes - local?: boolean -} - -export type CatAllocationResponse = CatAllocationAllocationRecord[] - -export interface CatComponentTemplatesComponentTemplate { - name: string - version: string - alias_count: string - mapping_count: string - settings_count: string - metadata_count: string - included_in: string -} - -export interface CatComponentTemplatesRequest extends CatCatRequestBase { - name?: string - local?: boolean -} - -export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] - -export interface CatCountCountRecord { - epoch?: SpecUtilsStringified> - t?: SpecUtilsStringified> - time?: SpecUtilsStringified> - timestamp?: TimeOfDay - ts?: TimeOfDay - hms?: TimeOfDay - hhmmss?: TimeOfDay - count?: string - dc?: string - 'docs.count'?: string - docsCount?: string -} - -export interface CatCountRequest extends CatCatRequestBase { - index?: Indices -} - -export type CatCountResponse = CatCountCountRecord[] - -export interface CatFielddataFielddataRecord { - id?: string - host?: string - h?: string - ip?: string - node?: string - n?: string - field?: string - f?: string - size?: string -} - -export interface CatFielddataRequest extends CatCatRequestBase { - fields?: Fields - bytes?: Bytes -} - -export type CatFielddataResponse = CatFielddataFielddataRecord[] - -export interface CatHealthHealthRecord { - epoch?: SpecUtilsStringified> - time?: SpecUtilsStringified> - timestamp?: TimeOfDay - ts?: TimeOfDay - hms?: TimeOfDay - hhmmss?: TimeOfDay - cluster?: string - cl?: string - status?: string - st?: string - 'node.total'?: string - nt?: string - nodeTotal?: string - 'node.data'?: string - nd?: string - nodeData?: string - shards?: string - t?: string - sh?: string - 'shards.total'?: string - shardsTotal?: string - pri?: string - p?: string - 'shards.primary'?: string - shardsPrimary?: string - relo?: string - r?: string - 'shards.relocating'?: string - shardsRelocating?: string - init?: string - i?: string - 'shards.initializing'?: string - shardsInitializing?: string - 'unassign.pri'?: string - up?: string - 'shards.unassigned.primary'?: string - shardsUnassignedPrimary?: string - unassign?: string - u?: string - 'shards.unassigned'?: string - shardsUnassigned?: string - pending_tasks?: string - pt?: string - pendingTasks?: string - max_task_wait_time?: string - mtwt?: string - maxTaskWaitTime?: string - active_shards_percent?: string - asp?: string - activeShardsPercent?: string -} - -export interface CatHealthRequest extends CatCatRequestBase { - time?: TimeUnit - ts?: boolean -} - -export type CatHealthResponse = CatHealthHealthRecord[] - -export interface CatHelpHelpRecord { - endpoint: string -} - -export interface CatHelpRequest extends CatCatRequestBase { -} - -export type CatHelpResponse = CatHelpHelpRecord[] - -export interface CatIndicesIndicesRecord { - health?: string - h?: string - status?: string - s?: string - index?: string - i?: string - idx?: string - uuid?: string - id?: string - pri?: string - p?: string - 'shards.primary'?: string - shardsPrimary?: string - rep?: string - r?: string - 'shards.replica'?: string - shardsReplica?: string - 'docs.count'?: string | null - dc?: string | null - docsCount?: string | null - 'docs.deleted'?: string | null - dd?: string | null - docsDeleted?: string | null - 'creation.date'?: string - cd?: string - 'creation.date.string'?: string - cds?: string - 'store.size'?: string | null - ss?: string | null - storeSize?: string | null - 'pri.store.size'?: string | null - 'dataset.size'?: string | null - 'completion.size'?: string - cs?: string - completionSize?: string - 'pri.completion.size'?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'pri.fielddata.memory_size'?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'pri.fielddata.evictions'?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'pri.query_cache.memory_size'?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'pri.query_cache.evictions'?: string - 'request_cache.memory_size'?: string - rcm?: string - requestCacheMemory?: string - 'pri.request_cache.memory_size'?: string - 'request_cache.evictions'?: string - rce?: string - requestCacheEvictions?: string - 'pri.request_cache.evictions'?: string - 'request_cache.hit_count'?: string - rchc?: string - requestCacheHitCount?: string - 'pri.request_cache.hit_count'?: string - 'request_cache.miss_count'?: string - rcmc?: string - requestCacheMissCount?: string - 'pri.request_cache.miss_count'?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'pri.flush.total'?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'pri.flush.total_time'?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'pri.get.current'?: string - 'get.time'?: string - gti?: string - getTime?: string - 'pri.get.time'?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'pri.get.total'?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'pri.get.exists_time'?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'pri.get.exists_total'?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'pri.get.missing_time'?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'pri.get.missing_total'?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'pri.indexing.delete_current'?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'pri.indexing.delete_time'?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'pri.indexing.delete_total'?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'pri.indexing.index_current'?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'pri.indexing.index_time'?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'pri.indexing.index_total'?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'pri.indexing.index_failed'?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'pri.merges.current'?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'pri.merges.current_docs'?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'pri.merges.current_size'?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'pri.merges.total'?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'pri.merges.total_docs'?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'pri.merges.total_size'?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'pri.merges.total_time'?: string - 'refresh.total'?: string - rto?: string - refreshTotal?: string - 'pri.refresh.total'?: string - 'refresh.time'?: string - rti?: string - refreshTime?: string - 'pri.refresh.time'?: string - 'refresh.external_total'?: string - reto?: string - 'pri.refresh.external_total'?: string - 'refresh.external_time'?: string - reti?: string - 'pri.refresh.external_time'?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'pri.refresh.listeners'?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'pri.search.fetch_current'?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'pri.search.fetch_time'?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'pri.search.fetch_total'?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'pri.search.open_contexts'?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'pri.search.query_current'?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'pri.search.query_time'?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'pri.search.query_total'?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'pri.search.scroll_current'?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'pri.search.scroll_time'?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'pri.search.scroll_total'?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'pri.segments.count'?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'pri.segments.memory'?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'pri.segments.index_writer_memory'?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'pri.segments.version_map_memory'?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'pri.segments.fixed_bitset_memory'?: string - 'warmer.current'?: string - wc?: string - warmerCurrent?: string - 'pri.warmer.current'?: string - 'warmer.total'?: string - wto?: string - warmerTotal?: string - 'pri.warmer.total'?: string - 'warmer.total_time'?: string - wtt?: string - warmerTotalTime?: string - 'pri.warmer.total_time'?: string - 'suggest.current'?: string - suc?: string - suggestCurrent?: string - 'pri.suggest.current'?: string - 'suggest.time'?: string - suti?: string - suggestTime?: string - 'pri.suggest.time'?: string - 'suggest.total'?: string - suto?: string - suggestTotal?: string - 'pri.suggest.total'?: string - 'memory.total'?: string - tm?: string - memoryTotal?: string - 'pri.memory.total'?: string - 'search.throttled'?: string - sth?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperation?: string - 'pri.bulk.total_operations'?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'pri.bulk.total_time'?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'pri.bulk.total_size_in_bytes'?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'pri.bulk.avg_time'?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string - 'pri.bulk.avg_size_in_bytes'?: string -} - -export interface CatIndicesRequest extends CatCatRequestBase { - index?: Indices - bytes?: Bytes - expand_wildcards?: ExpandWildcards - health?: HealthStatus - include_unloaded_segments?: boolean - pri?: boolean - time?: TimeUnit -} - -export type CatIndicesResponse = CatIndicesIndicesRecord[] - -export interface CatMasterMasterRecord { - id?: string - host?: string - h?: string - ip?: string - node?: string - n?: string -} - -export interface CatMasterRequest extends CatCatRequestBase { - local?: boolean -} - -export type CatMasterResponse = CatMasterMasterRecord[] - -export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { - id?: Id - type?: string - t?: string - create_time?: string - ct?: string - createTime?: string - version?: VersionString - v?: VersionString - source_index?: IndexName - si?: IndexName - sourceIndex?: IndexName - dest_index?: IndexName - di?: IndexName - destIndex?: IndexName - description?: string - d?: string - model_memory_limit?: string - mml?: string - modelMemoryLimit?: string - state?: string - s?: string - failure_reason?: string - fr?: string - failureReason?: string - progress?: string - p?: string - assignment_explanation?: string - ae?: string - assignmentExplanation?: string - 'node.id'?: Id - ni?: Id - nodeId?: Id - 'node.name'?: Name - nn?: Name - nodeName?: Name - 'node.ephemeral_id'?: Id - ne?: Id - nodeEphemeralId?: Id - 'node.address'?: string - na?: string - nodeAddress?: string -} - -export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { - id?: Id - allow_no_match?: boolean - bytes?: Bytes - h?: CatCatDfaColumns - s?: CatCatDfaColumns - time?: Duration -} - -export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] - -export interface CatMlDatafeedsDatafeedsRecord { - id?: string - state?: MlDatafeedState - s?: MlDatafeedState - assignment_explanation?: string - ae?: string - 'buckets.count'?: string - bc?: string - bucketsCount?: string - 'search.count'?: string - sc?: string - searchCount?: string - 'search.time'?: string - st?: string - searchTime?: string - 'search.bucket_avg'?: string - sba?: string - searchBucketAvg?: string - 'search.exp_avg_hour'?: string - seah?: string - searchExpAvgHour?: string - 'node.id'?: string - ni?: string - nodeId?: string - 'node.name'?: string - nn?: string - nodeName?: string - 'node.ephemeral_id'?: string - ne?: string - nodeEphemeralId?: string - 'node.address'?: string - na?: string - nodeAddress?: string -} - -export interface CatMlDatafeedsRequest extends CatCatRequestBase { - datafeed_id?: Id - allow_no_match?: boolean - h?: CatCatDatafeedColumns - s?: CatCatDatafeedColumns - time?: TimeUnit -} - -export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] - -export interface CatMlJobsJobsRecord { - id?: Id - state?: MlJobState - s?: MlJobState - opened_time?: string - ot?: string - assignment_explanation?: string - ae?: string - 'data.processed_records'?: string - dpr?: string - dataProcessedRecords?: string - 'data.processed_fields'?: string - dpf?: string - dataProcessedFields?: string - 'data.input_bytes'?: ByteSize - dib?: ByteSize - dataInputBytes?: ByteSize - 'data.input_records'?: string - dir?: string - dataInputRecords?: string - 'data.input_fields'?: string - dif?: string - dataInputFields?: string - 'data.invalid_dates'?: string - did?: string - dataInvalidDates?: string - 'data.missing_fields'?: string - dmf?: string - dataMissingFields?: string - 'data.out_of_order_timestamps'?: string - doot?: string - dataOutOfOrderTimestamps?: string - 'data.empty_buckets'?: string - deb?: string - dataEmptyBuckets?: string - 'data.sparse_buckets'?: string - dsb?: string - dataSparseBuckets?: string - 'data.buckets'?: string - db?: string - dataBuckets?: string - 'data.earliest_record'?: string - der?: string - dataEarliestRecord?: string - 'data.latest_record'?: string - dlr?: string - dataLatestRecord?: string - 'data.last'?: string - dl?: string - dataLast?: string - 'data.last_empty_bucket'?: string - dleb?: string - dataLastEmptyBucket?: string - 'data.last_sparse_bucket'?: string - dlsb?: string - dataLastSparseBucket?: string - 'model.bytes'?: ByteSize - mb?: ByteSize - modelBytes?: ByteSize - 'model.memory_status'?: MlMemoryStatus - mms?: MlMemoryStatus - modelMemoryStatus?: MlMemoryStatus - 'model.bytes_exceeded'?: ByteSize - mbe?: ByteSize - modelBytesExceeded?: ByteSize - 'model.memory_limit'?: string - mml?: string - modelMemoryLimit?: string - 'model.by_fields'?: string - mbf?: string - modelByFields?: string - 'model.over_fields'?: string - mof?: string - modelOverFields?: string - 'model.partition_fields'?: string - mpf?: string - modelPartitionFields?: string - 'model.bucket_allocation_failures'?: string - mbaf?: string - modelBucketAllocationFailures?: string - 'model.categorization_status'?: MlCategorizationStatus - mcs?: MlCategorizationStatus - modelCategorizationStatus?: MlCategorizationStatus - 'model.categorized_doc_count'?: string - mcdc?: string - modelCategorizedDocCount?: string - 'model.total_category_count'?: string - mtcc?: string - modelTotalCategoryCount?: string - 'model.frequent_category_count'?: string - modelFrequentCategoryCount?: string - 'model.rare_category_count'?: string - mrcc?: string - modelRareCategoryCount?: string - 'model.dead_category_count'?: string - mdcc?: string - modelDeadCategoryCount?: string - 'model.failed_category_count'?: string - mfcc?: string - modelFailedCategoryCount?: string - 'model.log_time'?: string - mlt?: string - modelLogTime?: string - 'model.timestamp'?: string - mt?: string - modelTimestamp?: string - 'forecasts.total'?: string - ft?: string - forecastsTotal?: string - 'forecasts.memory.min'?: string - fmmin?: string - forecastsMemoryMin?: string - 'forecasts.memory.max'?: string - fmmax?: string - forecastsMemoryMax?: string - 'forecasts.memory.avg'?: string - fmavg?: string - forecastsMemoryAvg?: string - 'forecasts.memory.total'?: string - fmt?: string - forecastsMemoryTotal?: string - 'forecasts.records.min'?: string - frmin?: string - forecastsRecordsMin?: string - 'forecasts.records.max'?: string - frmax?: string - forecastsRecordsMax?: string - 'forecasts.records.avg'?: string - fravg?: string - forecastsRecordsAvg?: string - 'forecasts.records.total'?: string - frt?: string - forecastsRecordsTotal?: string - 'forecasts.time.min'?: string - ftmin?: string - forecastsTimeMin?: string - 'forecasts.time.max'?: string - ftmax?: string - forecastsTimeMax?: string - 'forecasts.time.avg'?: string - ftavg?: string - forecastsTimeAvg?: string - 'forecasts.time.total'?: string - ftt?: string - forecastsTimeTotal?: string - 'node.id'?: NodeId - ni?: NodeId - nodeId?: NodeId - 'node.name'?: string - nn?: string - nodeName?: string - 'node.ephemeral_id'?: NodeId - ne?: NodeId - nodeEphemeralId?: NodeId - 'node.address'?: string - na?: string - nodeAddress?: string - 'buckets.count'?: string - bc?: string - bucketsCount?: string - 'buckets.time.total'?: string - btt?: string - bucketsTimeTotal?: string - 'buckets.time.min'?: string - btmin?: string - bucketsTimeMin?: string - 'buckets.time.max'?: string - btmax?: string - bucketsTimeMax?: string - 'buckets.time.exp_avg'?: string - btea?: string - bucketsTimeExpAvg?: string - 'buckets.time.exp_avg_hour'?: string - bteah?: string - bucketsTimeExpAvgHour?: string -} - -export interface CatMlJobsRequest extends CatCatRequestBase { - job_id?: Id - allow_no_match?: boolean - bytes?: Bytes - h?: CatCatAnonalyDetectorColumns - s?: CatCatAnonalyDetectorColumns - time?: TimeUnit -} - -export type CatMlJobsResponse = CatMlJobsJobsRecord[] - -export interface CatMlTrainedModelsRequest extends CatCatRequestBase { - model_id?: Id - allow_no_match?: boolean - bytes?: Bytes - h?: CatCatTrainedModelsColumns - s?: CatCatTrainedModelsColumns - from?: integer - size?: integer -} - -export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] - -export interface CatMlTrainedModelsTrainedModelsRecord { - id?: Id - created_by?: string - c?: string - createdBy?: string - heap_size?: ByteSize - hs?: ByteSize - modelHeapSize?: ByteSize - operations?: string - o?: string - modelOperations?: string - license?: string - l?: string - create_time?: DateTime - ct?: DateTime - version?: VersionString - v?: VersionString - description?: string - d?: string - 'ingest.pipelines'?: string - ip?: string - ingestPipelines?: string - 'ingest.count'?: string - ic?: string - ingestCount?: string - 'ingest.time'?: string - it?: string - ingestTime?: string - 'ingest.current'?: string - icurr?: string - ingestCurrent?: string - 'ingest.failed'?: string - if?: string - ingestFailed?: string - 'data_frame.id'?: string - dfid?: string - dataFrameAnalytics?: string - 'data_frame.create_time'?: string - dft?: string - dataFrameAnalyticsTime?: string - 'data_frame.source_index'?: string - dfsi?: string - dataFrameAnalyticsSrcIndex?: string - 'data_frame.analysis'?: string - dfa?: string - dataFrameAnalyticsAnalysis?: string - type?: string -} - -export interface CatNodeattrsNodeAttributesRecord { - node?: string - id?: string - pid?: string - host?: string - h?: string - ip?: string - i?: string - port?: string - attr?: string - value?: string -} - -export interface CatNodeattrsRequest extends CatCatRequestBase { - local?: boolean -} - -export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] - -export interface CatNodesNodesRecord { - id?: Id - nodeId?: Id - pid?: string - p?: string - ip?: string - i?: string - port?: string - po?: string - http_address?: string - http?: string - version?: VersionString - v?: VersionString - flavor?: string - f?: string - type?: string - t?: string - build?: string - b?: string - jdk?: string - j?: string - 'disk.total'?: ByteSize - dt?: ByteSize - diskTotal?: ByteSize - 'disk.used'?: ByteSize - du?: ByteSize - diskUsed?: ByteSize - 'disk.avail'?: ByteSize - d?: ByteSize - da?: ByteSize - disk?: ByteSize - diskAvail?: ByteSize - 'disk.used_percent'?: Percentage - dup?: Percentage - diskUsedPercent?: Percentage - 'heap.current'?: string - hc?: string - heapCurrent?: string - 'heap.percent'?: Percentage - hp?: Percentage - heapPercent?: Percentage - 'heap.max'?: string - hm?: string - heapMax?: string - 'ram.current'?: string - rc?: string - ramCurrent?: string - 'ram.percent'?: Percentage - rp?: Percentage - ramPercent?: Percentage - 'ram.max'?: string - rn?: string - ramMax?: string - 'file_desc.current'?: string - fdc?: string - fileDescriptorCurrent?: string - 'file_desc.percent'?: Percentage - fdp?: Percentage - fileDescriptorPercent?: Percentage - 'file_desc.max'?: string - fdm?: string - fileDescriptorMax?: string - cpu?: string - load_1m?: string - load_5m?: string - load_15m?: string - l?: string - uptime?: string - u?: string - 'node.role'?: string - r?: string - role?: string - nodeRole?: string - master?: string - m?: string - name?: Name - n?: Name - 'completion.size'?: string - cs?: string - completionSize?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'query_cache.hit_count'?: string - qchc?: string - queryCacheHitCount?: string - 'query_cache.miss_count'?: string - qcmc?: string - queryCacheMissCount?: string - 'request_cache.memory_size'?: string - rcm?: string - requestCacheMemory?: string - 'request_cache.evictions'?: string - rce?: string - requestCacheEvictions?: string - 'request_cache.hit_count'?: string - rchc?: string - requestCacheHitCount?: string - 'request_cache.miss_count'?: string - rcmc?: string - requestCacheMissCount?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'get.time'?: string - gti?: string - getTime?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'refresh.total'?: string - 'refresh.time'?: string - 'refresh.external_total'?: string - rto?: string - refreshTotal?: string - 'refresh.external_time'?: string - rti?: string - refreshTime?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'script.compilations'?: string - scrcc?: string - scriptCompilations?: string - 'script.cache_evictions'?: string - scrce?: string - scriptCacheEvictions?: string - 'script.compilation_limit_triggered'?: string - scrclt?: string - scriptCacheCompilationLimitTriggered?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'suggest.current'?: string - suc?: string - suggestCurrent?: string - 'suggest.time'?: string - suti?: string - suggestTime?: string - 'suggest.total'?: string - suto?: string - suggestTotal?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperations?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string -} - -export interface CatNodesRequest extends CatCatRequestBase { - bytes?: Bytes - full_id?: boolean | string - include_unloaded_segments?: boolean -} - -export type CatNodesResponse = CatNodesNodesRecord[] - -export interface CatPendingTasksPendingTasksRecord { - insertOrder?: string - o?: string - timeInQueue?: string - t?: string - priority?: string - p?: string - source?: string - s?: string -} - -export interface CatPendingTasksRequest extends CatCatRequestBase { - local?: boolean -} - -export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] - -export interface CatPluginsPluginsRecord { - id?: NodeId - name?: Name - n?: Name - component?: string - c?: string - version?: VersionString - v?: VersionString - description?: string - d?: string - type?: string - t?: string -} - -export interface CatPluginsRequest extends CatCatRequestBase { - local?: boolean -} - -export type CatPluginsResponse = CatPluginsPluginsRecord[] - -export interface CatRecoveryRecoveryRecord { - index?: IndexName - i?: IndexName - idx?: IndexName - shard?: string - s?: string - sh?: string - start_time?: DateTime - start?: DateTime - start_time_millis?: EpochTime - start_millis?: EpochTime - stop_time?: DateTime - stop?: DateTime - stop_time_millis?: EpochTime - stop_millis?: EpochTime - time?: Duration - t?: Duration - ti?: Duration - type?: string - ty?: string - stage?: string - st?: string - source_host?: string - shost?: string - source_node?: string - snode?: string - target_host?: string - thost?: string - target_node?: string - tnode?: string - repository?: string - rep?: string - snapshot?: string - snap?: string - files?: string - f?: string - files_recovered?: string - fr?: string - files_percent?: Percentage - fp?: Percentage - files_total?: string - tf?: string - bytes?: string - b?: string - bytes_recovered?: string - br?: string - bytes_percent?: Percentage - bp?: Percentage - bytes_total?: string - tb?: string - translog_ops?: string - to?: string - translog_ops_recovered?: string - tor?: string - translog_ops_percent?: Percentage - top?: Percentage -} - -export interface CatRecoveryRequest extends CatCatRequestBase { - index?: Indices - active_only?: boolean - bytes?: Bytes - detailed?: boolean -} - -export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] - -export interface CatRepositoriesRepositoriesRecord { - id?: string - repoId?: string - type?: string - t?: string -} - -export interface CatRepositoriesRequest extends CatCatRequestBase { -} - -export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] - -export interface CatSegmentsRequest extends CatCatRequestBase { - index?: Indices - bytes?: Bytes - local?: boolean -} - -export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] - -export interface CatSegmentsSegmentsRecord { - index?: IndexName - i?: IndexName - idx?: IndexName - shard?: string - s?: string - sh?: string - prirep?: string - p?: string - pr?: string - primaryOrReplica?: string - ip?: string - id?: NodeId - segment?: string - seg?: string - generation?: string - g?: string - gen?: string - 'docs.count'?: string - dc?: string - docsCount?: string - 'docs.deleted'?: string - dd?: string - docsDeleted?: string - size?: ByteSize - si?: ByteSize - 'size.memory'?: ByteSize - sm?: ByteSize - sizeMemory?: ByteSize - committed?: string - ic?: string - isCommitted?: string - searchable?: string - is?: string - isSearchable?: string - version?: VersionString - v?: VersionString - compound?: string - ico?: string - isCompound?: string -} - -export interface CatShardsRequest extends CatCatRequestBase { - index?: Indices - bytes?: Bytes -} - -export type CatShardsResponse = CatShardsShardsRecord[] - -export interface CatShardsShardsRecord { - index?: string - i?: string - idx?: string - shard?: string - s?: string - sh?: string - prirep?: string - p?: string - pr?: string - primaryOrReplica?: string - state?: string - st?: string - docs?: string | null - d?: string | null - dc?: string | null - store?: string | null - sto?: string | null - dataset?: string | null - ip?: string | null - id?: string - node?: string | null - n?: string | null - sync_id?: string - 'unassigned.reason'?: string - ur?: string - 'unassigned.at'?: string - ua?: string - 'unassigned.for'?: string - uf?: string - 'unassigned.details'?: string - ud?: string - 'recoverysource.type'?: string - rs?: string - 'completion.size'?: string - cs?: string - completionSize?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'get.time'?: string - gti?: string - getTime?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'refresh.total'?: string - 'refresh.time'?: string - 'refresh.external_total'?: string - rto?: string - refreshTotal?: string - 'refresh.external_time'?: string - rti?: string - refreshTime?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'seq_no.max'?: string - sqm?: string - maxSeqNo?: string - 'seq_no.local_checkpoint'?: string - sql?: string - localCheckpoint?: string - 'seq_no.global_checkpoint'?: string - sqg?: string - globalCheckpoint?: string - 'warmer.current'?: string - wc?: string - warmerCurrent?: string - 'warmer.total'?: string - wto?: string - warmerTotal?: string - 'warmer.total_time'?: string - wtt?: string - warmerTotalTime?: string - 'path.data'?: string - pd?: string - dataPath?: string - 'path.state'?: string - ps?: string - statsPath?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperations?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string -} - -export interface CatSnapshotsRequest extends CatCatRequestBase { - repository?: Names - ignore_unavailable?: boolean -} - -export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] - -export interface CatSnapshotsSnapshotsRecord { - id?: string - snapshot?: string - repository?: string - re?: string - repo?: string - status?: string - s?: string - start_epoch?: SpecUtilsStringified> - ste?: SpecUtilsStringified> - startEpoch?: SpecUtilsStringified> - start_time?: WatcherScheduleTimeOfDay - sti?: WatcherScheduleTimeOfDay - startTime?: WatcherScheduleTimeOfDay - end_epoch?: SpecUtilsStringified> - ete?: SpecUtilsStringified> - endEpoch?: SpecUtilsStringified> - end_time?: TimeOfDay - eti?: TimeOfDay - endTime?: TimeOfDay - duration?: Duration - dur?: Duration - indices?: string - i?: string - successful_shards?: string - ss?: string - failed_shards?: string - fs?: string - total_shards?: string - ts?: string - reason?: string - r?: string -} - -export interface CatTasksRequest extends CatCatRequestBase { - actions?: string[] - detailed?: boolean - node_id?: string[] - parent_task_id?: string -} - -export type CatTasksResponse = CatTasksTasksRecord[] - -export interface CatTasksTasksRecord { - id?: Id - action?: string - ac?: string - task_id?: Id - ti?: Id - parent_task_id?: string - pti?: string - type?: string - ty?: string - start_time?: string - start?: string - timestamp?: string - ts?: string - hms?: string - hhmmss?: string - running_time_ns?: string - running_time?: string - time?: string - node_id?: NodeId - ni?: NodeId - ip?: string - i?: string - port?: string - po?: string - node?: string - n?: string - version?: VersionString - v?: VersionString - x_opaque_id?: string - x?: string - description?: string - desc?: string -} - -export interface CatTemplatesRequest extends CatCatRequestBase { - name?: Name - local?: boolean -} - -export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] - -export interface CatTemplatesTemplatesRecord { - name?: Name - n?: Name - index_patterns?: string - t?: string - order?: string - o?: string - p?: string - version?: VersionString | null - v?: VersionString | null - composed_of?: string - c?: string -} - -export interface CatThreadPoolRequest extends CatCatRequestBase { - thread_pool_patterns?: Names - time?: TimeUnit - local?: boolean -} - -export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] - -export interface CatThreadPoolThreadPoolRecord { - node_name?: string - nn?: string - node_id?: NodeId - id?: NodeId - ephemeral_node_id?: string - eid?: string - pid?: string - p?: string - host?: string - h?: string - ip?: string - i?: string - port?: string - po?: string - name?: string - n?: string - type?: string - t?: string - active?: string - a?: string - pool_size?: string - psz?: string - queue?: string - q?: string - queue_size?: string - qs?: string - rejected?: string - r?: string - largest?: string - l?: string - completed?: string - c?: string - core?: string | null - cr?: string | null - max?: string | null - mx?: string | null - size?: string | null - sz?: string | null - keep_alive?: string | null - ka?: string | null -} - -export interface CatTransformsRequest extends CatCatRequestBase { - transform_id?: Id - allow_no_match?: boolean - from?: integer - h?: CatCatTransformColumns - s?: CatCatTransformColumns - time?: TimeUnit - size?: integer -} - -export type CatTransformsResponse = CatTransformsTransformsRecord[] - -export interface CatTransformsTransformsRecord { - id?: Id - state?: string - s?: string - checkpoint?: string - c?: string - documents_processed?: string - docp?: string - documentsProcessed?: string - checkpoint_progress?: string | null - cp?: string | null - checkpointProgress?: string | null - last_search_time?: string | null - lst?: string | null - lastSearchTime?: string | null - changes_last_detection_time?: string | null - cldt?: string | null - create_time?: string - ct?: string - createTime?: string - version?: VersionString - v?: VersionString - source_index?: string - si?: string - sourceIndex?: string - dest_index?: string - di?: string - destIndex?: string - pipeline?: string - p?: string - description?: string - d?: string - transform_type?: string - tt?: string - frequency?: string - f?: string - max_page_search_size?: string - mpsz?: string - docs_per_second?: string - dps?: string - reason?: string - r?: string - search_total?: string - st?: string - search_failure?: string - sf?: string - search_time?: string - stime?: string - index_total?: string - it?: string - index_failure?: string - if?: string - index_time?: string - itime?: string - documents_indexed?: string - doci?: string - delete_time?: string - dtime?: string - documents_deleted?: string - docd?: string - trigger_count?: string - tc?: string - pages_processed?: string - pp?: string - processing_time?: string - pt?: string - checkpoint_duration_time_exp_avg?: string - cdtea?: string - checkpointTimeExpAvg?: string - indexed_documents_exp_avg?: string - idea?: string - processed_documents_exp_avg?: string - pdea?: string -} - -export interface CcrFollowIndexStats { - index: IndexName - shards: CcrShardStats[] -} - -export interface CcrReadException { - exception: ErrorCause - from_seq_no: SequenceNumber - retries: integer -} - -export interface CcrShardStats { - bytes_read: long - failed_read_requests: long - failed_write_requests: long - fatal_exception?: ErrorCause - follower_aliases_version: VersionNumber - follower_global_checkpoint: long - follower_index: string - follower_mapping_version: VersionNumber - follower_max_seq_no: SequenceNumber - follower_settings_version: VersionNumber - last_requested_seq_no: SequenceNumber - leader_global_checkpoint: long - leader_index: string - leader_max_seq_no: SequenceNumber - operations_read: long - operations_written: long - outstanding_read_requests: integer - outstanding_write_requests: integer - read_exceptions: CcrReadException[] - remote_cluster: string - shard_id: integer - successful_read_requests: long - successful_write_requests: long - time_since_last_read?: Duration - time_since_last_read_millis: DurationValue - total_read_remote_exec_time?: Duration - total_read_remote_exec_time_millis: DurationValue - total_read_time?: Duration - total_read_time_millis: DurationValue - total_write_time?: Duration - total_write_time_millis: DurationValue - write_buffer_operation_count: long - write_buffer_size_in_bytes: ByteSize -} - -export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { - name: Name -} - -export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrFollowRequest extends RequestBase { - index: IndexName - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - data_stream_name?: string - leader_index: IndexName - max_outstanding_read_requests?: long - max_outstanding_write_requests?: integer - max_read_request_operation_count?: integer - max_read_request_size?: ByteSize - max_retry_delay?: Duration - max_write_buffer_count?: integer - max_write_buffer_size?: ByteSize - max_write_request_operation_count?: integer - max_write_request_size?: ByteSize - read_poll_timeout?: Duration - remote_cluster: string - settings?: IndicesIndexSettings - } -} - -export interface CcrFollowResponse { - follow_index_created: boolean - follow_index_shards_acked: boolean - index_following_started: boolean -} - -export interface CcrFollowInfoFollowerIndex { - follower_index: IndexName - leader_index: IndexName - parameters?: CcrFollowInfoFollowerIndexParameters - remote_cluster: Name - status: CcrFollowInfoFollowerIndexStatus -} - -export interface CcrFollowInfoFollowerIndexParameters { - max_outstanding_read_requests?: long - max_outstanding_write_requests?: integer - max_read_request_operation_count?: integer - max_read_request_size?: ByteSize - max_retry_delay?: Duration - max_write_buffer_count?: integer - max_write_buffer_size?: ByteSize - max_write_request_operation_count?: integer - max_write_request_size?: ByteSize - read_poll_timeout?: Duration -} - -export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' - -export interface CcrFollowInfoRequest extends RequestBase { - index: Indices -} - -export interface CcrFollowInfoResponse { - follower_indices: CcrFollowInfoFollowerIndex[] -} - -export interface CcrFollowStatsRequest extends RequestBase { - index: Indices -} - -export interface CcrFollowStatsResponse { - indices: CcrFollowIndexStats[] -} - -export interface CcrForgetFollowerRequest extends RequestBase { - index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - follower_cluster?: string - follower_index?: IndexName - follower_index_uuid?: Uuid - leader_remote_cluster?: string - } -} - -export interface CcrForgetFollowerResponse { - _shards: ShardStatistics -} - -export interface CcrGetAutoFollowPatternAutoFollowPattern { - name: Name - pattern: CcrGetAutoFollowPatternAutoFollowPatternSummary -} - -export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { - active: boolean - remote_cluster: string - follow_index_pattern?: IndexPattern - leader_index_patterns: IndexPatterns - leader_index_exclusion_patterns: IndexPatterns - max_outstanding_read_requests: integer -} - -export interface CcrGetAutoFollowPatternRequest extends RequestBase { - name?: Name -} - -export interface CcrGetAutoFollowPatternResponse { - patterns: CcrGetAutoFollowPatternAutoFollowPattern[] -} - -export interface CcrPauseAutoFollowPatternRequest extends RequestBase { - name: Name -} - -export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrPauseFollowRequest extends RequestBase { - index: IndexName -} - -export type CcrPauseFollowResponse = AcknowledgedResponseBase - -export interface CcrPutAutoFollowPatternRequest extends RequestBase { - name: Name - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - remote_cluster: string - follow_index_pattern?: IndexPattern - leader_index_patterns?: IndexPatterns - leader_index_exclusion_patterns?: IndexPatterns - max_outstanding_read_requests?: integer - settings?: Record - max_outstanding_write_requests?: integer - read_poll_timeout?: Duration - max_read_request_operation_count?: integer - max_read_request_size?: ByteSize - max_retry_delay?: Duration - max_write_buffer_count?: integer - max_write_buffer_size?: ByteSize - max_write_request_operation_count?: integer - max_write_request_size?: ByteSize - } -} - -export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrResumeAutoFollowPatternRequest extends RequestBase { - name: Name -} - -export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrResumeFollowRequest extends RequestBase { - index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - max_outstanding_read_requests?: long - max_outstanding_write_requests?: long - max_read_request_operation_count?: long - max_read_request_size?: string - max_retry_delay?: Duration - max_write_buffer_count?: long - max_write_buffer_size?: string - max_write_request_operation_count?: long - max_write_request_size?: string - read_poll_timeout?: Duration - } -} - -export type CcrResumeFollowResponse = AcknowledgedResponseBase - -export interface CcrStatsAutoFollowStats { - auto_followed_clusters: CcrStatsAutoFollowedCluster[] - number_of_failed_follow_indices: long - number_of_failed_remote_cluster_state_requests: long - number_of_successful_follow_indices: long - recent_auto_follow_errors: ErrorCause[] -} - -export interface CcrStatsAutoFollowedCluster { - cluster_name: Name - last_seen_metadata_version: VersionNumber - time_since_last_check_millis: DurationValue -} - -export interface CcrStatsFollowStats { - indices: CcrFollowIndexStats[] -} - -export interface CcrStatsRequest extends RequestBase { -} - -export interface CcrStatsResponse { - auto_follow_stats: CcrStatsAutoFollowStats - follow_stats: CcrStatsFollowStats -} - -export interface CcrUnfollowRequest extends RequestBase { - index: IndexName -} - -export type CcrUnfollowResponse = AcknowledgedResponseBase - -export interface ClusterComponentTemplate { - name: Name - component_template: ClusterComponentTemplateNode -} - -export interface ClusterComponentTemplateNode { - template: ClusterComponentTemplateSummary - version?: VersionNumber - _meta?: Metadata -} - -export interface ClusterComponentTemplateSummary { - _meta?: Metadata - version?: VersionNumber - settings?: Record - mappings?: MappingTypeMapping - aliases?: Record - lifecycle?: IndicesDataStreamLifecycleWithRollover -} - -export interface ClusterAllocationExplainAllocationDecision { - decider: string - decision: ClusterAllocationExplainAllocationExplainDecision - explanation: string -} - -export type ClusterAllocationExplainAllocationExplainDecision = 'NO' | 'YES' | 'THROTTLE' | 'ALWAYS' - -export interface ClusterAllocationExplainAllocationStore { - allocation_id: string - found: boolean - in_sync: boolean - matching_size_in_bytes: long - matching_sync_id: boolean - store_exception: string -} - -export interface ClusterAllocationExplainClusterInfo { - nodes: Record - shard_sizes: Record - shard_data_set_sizes?: Record - shard_paths: Record - reserved_sizes: ClusterAllocationExplainReservedSize[] -} - -export interface ClusterAllocationExplainCurrentNode { - id: Id - name: Name - roles: NodeRoles - attributes: Record - transport_address: TransportAddress - weight_ranking: integer -} - -export type ClusterAllocationExplainDecision = 'yes' | 'no' | 'worse_balance' | 'throttled' | 'awaiting_info' | 'allocation_delayed' | 'no_valid_shard_copy' | 'no_attempt' - -export interface ClusterAllocationExplainDiskUsage { - path: string - total_bytes: long - used_bytes: long - free_bytes: long - free_disk_percent: double - used_disk_percent: double -} - -export interface ClusterAllocationExplainNodeAllocationExplanation { - deciders: ClusterAllocationExplainAllocationDecision[] - node_attributes: Record - node_decision: ClusterAllocationExplainDecision - node_id: Id - node_name: Name - roles: NodeRoles - store?: ClusterAllocationExplainAllocationStore - transport_address: TransportAddress - weight_ranking: integer -} - -export interface ClusterAllocationExplainNodeDiskUsage { - node_name: Name - least_available: ClusterAllocationExplainDiskUsage - most_available: ClusterAllocationExplainDiskUsage -} - -export interface ClusterAllocationExplainRequest extends RequestBase { - include_disk_info?: boolean - include_yes_decisions?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - current_node?: string - index?: IndexName - primary?: boolean - shard?: integer - } -} - -export interface ClusterAllocationExplainReservedSize { - node_id: Id - path: string - total: long - shards: string[] -} - -export interface ClusterAllocationExplainResponse { - allocate_explanation?: string - allocation_delay?: Duration - allocation_delay_in_millis?: DurationValue - can_allocate?: ClusterAllocationExplainDecision - can_move_to_other_node?: ClusterAllocationExplainDecision - can_rebalance_cluster?: ClusterAllocationExplainDecision - can_rebalance_cluster_decisions?: ClusterAllocationExplainAllocationDecision[] - can_rebalance_to_other_node?: ClusterAllocationExplainDecision - can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] - can_remain_on_current_node?: ClusterAllocationExplainDecision - cluster_info?: ClusterAllocationExplainClusterInfo - configured_delay?: Duration - configured_delay_in_millis?: DurationValue - current_node?: ClusterAllocationExplainCurrentNode - current_state: string - index: IndexName - move_explanation?: string - node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] - primary: boolean - rebalance_explanation?: string - remaining_delay?: Duration - remaining_delay_in_millis?: DurationValue - shard: integer - unassigned_info?: ClusterAllocationExplainUnassignedInformation - note?: string -} - -export interface ClusterAllocationExplainUnassignedInformation { - at: DateTime - last_allocation_status?: string - reason: ClusterAllocationExplainUnassignedInformationReason - details?: string - failed_allocation_attempts?: integer - delayed?: boolean - allocation_status?: string -} - -export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' - -export interface ClusterDeleteComponentTemplateRequest extends RequestBase { - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase - -export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { - wait_for_removal?: boolean -} - -export type ClusterDeleteVotingConfigExclusionsResponse = boolean - -export interface ClusterExistsComponentTemplateRequest extends RequestBase { - name: Names - master_timeout?: Duration - local?: boolean -} - -export type ClusterExistsComponentTemplateResponse = boolean - -export interface ClusterGetComponentTemplateRequest extends RequestBase { - name?: Name - flat_settings?: boolean - include_defaults?: boolean - local?: boolean - master_timeout?: Duration -} - -export interface ClusterGetComponentTemplateResponse { - component_templates: ClusterComponentTemplate[] -} - -export interface ClusterGetSettingsRequest extends RequestBase { - flat_settings?: boolean - include_defaults?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export interface ClusterGetSettingsResponse { - persistent: Record - transient: Record - defaults?: Record -} - -export interface ClusterHealthHealthResponseBody { - active_primary_shards: integer - active_shards: integer - active_shards_percent_as_number: Percentage - cluster_name: Name - delayed_unassigned_shards: integer - indices?: Record - initializing_shards: integer - number_of_data_nodes: integer - number_of_in_flight_fetch: integer - number_of_nodes: integer - number_of_pending_tasks: integer - relocating_shards: integer - status: HealthStatus - task_max_waiting_in_queue?: Duration - task_max_waiting_in_queue_millis: DurationValue - timed_out: boolean - unassigned_primary_shards: integer - unassigned_shards: integer -} - -export interface ClusterHealthIndexHealthStats { - active_primary_shards: integer - active_shards: integer - initializing_shards: integer - number_of_replicas: integer - number_of_shards: integer - relocating_shards: integer - shards?: Record - status: HealthStatus - unassigned_shards: integer - unassigned_primary_shards: integer -} - -export interface ClusterHealthRequest extends RequestBase { - index?: Indices - expand_wildcards?: ExpandWildcards - level?: Level - local?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - wait_for_events?: WaitForEvents - wait_for_nodes?: string | integer - wait_for_no_initializing_shards?: boolean - wait_for_no_relocating_shards?: boolean - wait_for_status?: HealthStatus -} - -export type ClusterHealthResponse = ClusterHealthHealthResponseBody - -export interface ClusterHealthShardHealthStats { - active_shards: integer - initializing_shards: integer - primary_active: boolean - relocating_shards: integer - status: HealthStatus - unassigned_shards: integer - unassigned_primary_shards: integer -} - -export interface ClusterInfoRequest extends RequestBase { - target: ClusterInfoTargets -} - -export interface ClusterInfoResponse { - cluster_name: Name - http?: NodesHttp - ingest?: NodesIngest - thread_pool?: Record - script?: NodesScripting -} - -export interface ClusterPendingTasksPendingTask { - executing: boolean - insert_order: integer - priority: string - source: string - time_in_queue?: Duration - time_in_queue_millis: DurationValue -} - -export interface ClusterPendingTasksRequest extends RequestBase { - local?: boolean - master_timeout?: Duration -} - -export interface ClusterPendingTasksResponse { - tasks: ClusterPendingTasksPendingTask[] -} - -export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { - node_names?: Names - node_ids?: Ids - timeout?: Duration -} - -export type ClusterPostVotingConfigExclusionsResponse = boolean - -export interface ClusterPutComponentTemplateRequest extends RequestBase { - name: Name - create?: boolean - master_timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - template: IndicesIndexState - version?: VersionNumber - _meta?: Metadata - deprecated?: boolean - } -} - -export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase - -export interface ClusterPutSettingsRequest extends RequestBase { - flat_settings?: boolean - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - persistent?: Record - transient?: Record - } -} - -export interface ClusterPutSettingsResponse { - acknowledged: boolean - persistent: Record - transient: Record -} - -export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo - -export interface ClusterRemoteInfoClusterRemoteProxyInfo { - mode: 'proxy' - connected: boolean - initial_connect_timeout: Duration - skip_unavailable: boolean - proxy_address: string - server_name: string - num_proxy_sockets_connected: integer - max_proxy_socket_connections: integer -} - -export interface ClusterRemoteInfoClusterRemoteSniffInfo { - mode: 'sniff' - connected: boolean - max_connections_per_cluster: integer - num_nodes_connected: long - initial_connect_timeout: Duration - skip_unavailable: boolean - seeds: string[] -} - -export interface ClusterRemoteInfoRequest extends RequestBase { -} - -export type ClusterRemoteInfoResponse = Record - -export interface ClusterRerouteCommand { - cancel?: ClusterRerouteCommandCancelAction - move?: ClusterRerouteCommandMoveAction - allocate_replica?: ClusterRerouteCommandAllocateReplicaAction - allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction - allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction -} - -export interface ClusterRerouteCommandAllocatePrimaryAction { - index: IndexName - shard: integer - node: string - accept_data_loss: boolean -} - -export interface ClusterRerouteCommandAllocateReplicaAction { - index: IndexName - shard: integer - node: string -} - -export interface ClusterRerouteCommandCancelAction { - index: IndexName - shard: integer - node: string - allow_primary?: boolean -} - -export interface ClusterRerouteCommandMoveAction { - index: IndexName - shard: integer - from_node: string - to_node: string -} - -export interface ClusterRerouteRequest extends RequestBase { - dry_run?: boolean - explain?: boolean - metric?: Metrics - retry_failed?: boolean - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - commands?: ClusterRerouteCommand[] - } -} - -export interface ClusterRerouteRerouteDecision { - decider: string - decision: string - explanation: string -} - -export interface ClusterRerouteRerouteExplanation { - command: string - decisions: ClusterRerouteRerouteDecision[] - parameters: ClusterRerouteRerouteParameters -} - -export interface ClusterRerouteRerouteParameters { - allow_primary: boolean - index: IndexName - node: NodeName - shard: integer - from_node?: NodeName - to_node?: NodeName -} - -export interface ClusterRerouteResponse { - acknowledged: boolean - explanations?: ClusterRerouteRerouteExplanation[] - state?: any -} - -export interface ClusterStateRequest extends RequestBase { - metric?: Metrics - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - local?: boolean - master_timeout?: Duration - wait_for_metadata_version?: VersionNumber - wait_for_timeout?: Duration -} - -export type ClusterStateResponse = any - -export interface ClusterStatsCharFilterTypes { - analyzer_types: ClusterStatsFieldTypes[] - built_in_analyzers: ClusterStatsFieldTypes[] - built_in_char_filters: ClusterStatsFieldTypes[] - built_in_filters: ClusterStatsFieldTypes[] - built_in_tokenizers: ClusterStatsFieldTypes[] - char_filter_types: ClusterStatsFieldTypes[] - filter_types: ClusterStatsFieldTypes[] - tokenizer_types: ClusterStatsFieldTypes[] -} - -export interface ClusterStatsClusterFileSystem { - available_in_bytes: long - free_in_bytes: long - total_in_bytes: long -} - -export interface ClusterStatsClusterIndices { - analysis: ClusterStatsCharFilterTypes - completion: CompletionStats - count: long - docs: DocStats - fielddata: FielddataStats - query_cache: QueryCacheStats - segments: SegmentsStats - shards: ClusterStatsClusterIndicesShards - store: StoreStats - mappings: ClusterStatsFieldTypesMappings - versions?: ClusterStatsIndicesVersions[] -} - -export interface ClusterStatsClusterIndicesShards { - index?: ClusterStatsClusterIndicesShardsIndex - primaries?: double - replication?: double - total?: double -} - -export interface ClusterStatsClusterIndicesShardsIndex { - primaries: ClusterStatsClusterShardMetrics - replication: ClusterStatsClusterShardMetrics - shards: ClusterStatsClusterShardMetrics -} - -export interface ClusterStatsClusterIngest { - number_of_pipelines: integer - processor_stats: Record -} - -export interface ClusterStatsClusterJvm { - max_uptime_in_millis: DurationValue - mem: ClusterStatsClusterJvmMemory - threads: long - versions: ClusterStatsClusterJvmVersion[] -} - -export interface ClusterStatsClusterJvmMemory { - heap_max_in_bytes: long - heap_used_in_bytes: long -} - -export interface ClusterStatsClusterJvmVersion { - bundled_jdk: boolean - count: integer - using_bundled_jdk: boolean - version: VersionString - vm_name: string - vm_vendor: string - vm_version: VersionString -} - -export interface ClusterStatsClusterNetworkTypes { - http_types: Record - transport_types: Record -} - -export interface ClusterStatsClusterNodeCount { - coordinating_only: integer - data: integer - data_cold: integer - data_content: integer - data_frozen?: integer - data_hot: integer - data_warm: integer - ingest: integer - master: integer - ml: integer - remote_cluster_client: integer - total: integer - transform: integer - voting_only: integer -} - -export interface ClusterStatsClusterNodes { - count: ClusterStatsClusterNodeCount - discovery_types: Record - fs: ClusterStatsClusterFileSystem - indexing_pressure: ClusterStatsIndexingPressure - ingest: ClusterStatsClusterIngest - jvm: ClusterStatsClusterJvm - network_types: ClusterStatsClusterNetworkTypes - os: ClusterStatsClusterOperatingSystem - packaging_types: ClusterStatsNodePackagingType[] - plugins: PluginStats[] - process: ClusterStatsClusterProcess - versions: VersionString[] -} - -export interface ClusterStatsClusterOperatingSystem { - allocated_processors: integer - architectures?: ClusterStatsClusterOperatingSystemArchitecture[] - available_processors: integer - mem: ClusterStatsOperatingSystemMemoryInfo - names: ClusterStatsClusterOperatingSystemName[] - pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] -} - -export interface ClusterStatsClusterOperatingSystemArchitecture { - arch: string - count: integer -} - -export interface ClusterStatsClusterOperatingSystemName { - count: integer - name: Name -} - -export interface ClusterStatsClusterOperatingSystemPrettyName { - count: integer - pretty_name: Name -} - -export interface ClusterStatsClusterProcess { - cpu: ClusterStatsClusterProcessCpu - open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors -} - -export interface ClusterStatsClusterProcessCpu { - percent: integer -} - -export interface ClusterStatsClusterProcessOpenFileDescriptors { - avg: long - max: long - min: long -} - -export interface ClusterStatsClusterProcessor { - count: long - current: long - failed: long - time?: Duration - time_in_millis: DurationValue -} - -export interface ClusterStatsClusterShardMetrics { - avg: double - max: double - min: double -} - -export interface ClusterStatsFieldTypes { - name: Name - count: integer - index_count: integer - indexed_vector_count?: long - indexed_vector_dim_max?: long - indexed_vector_dim_min?: long - script_count?: integer -} - -export interface ClusterStatsFieldTypesMappings { - field_types: ClusterStatsFieldTypes[] - runtime_field_types?: ClusterStatsRuntimeFieldTypes[] - total_field_count?: integer - total_deduplicated_field_count?: integer - total_deduplicated_mapping_size?: ByteSize - total_deduplicated_mapping_size_in_bytes?: long -} - -export interface ClusterStatsIndexingPressure { - memory: ClusterStatsIndexingPressureMemory -} - -export interface ClusterStatsIndexingPressureMemory { - current: ClusterStatsIndexingPressureMemorySummary - limit_in_bytes: long - total: ClusterStatsIndexingPressureMemorySummary -} - -export interface ClusterStatsIndexingPressureMemorySummary { - all_in_bytes: long - combined_coordinating_and_primary_in_bytes: long - coordinating_in_bytes: long - coordinating_rejections?: long - primary_in_bytes: long - primary_rejections?: long - replica_in_bytes: long - replica_rejections?: long -} - -export interface ClusterStatsIndicesVersions { - index_count: integer - primary_shard_count: integer - total_primary_bytes: long - version: VersionString -} - -export interface ClusterStatsNodePackagingType { - count: integer - flavor: string - type: string -} - -export interface ClusterStatsOperatingSystemMemoryInfo { - adjusted_total_in_bytes?: long - free_in_bytes: long - free_percent: integer - total_in_bytes: long - used_in_bytes: long - used_percent: integer -} - -export interface ClusterStatsRequest extends RequestBase { - node_id?: NodeIds - include_remotes?: boolean - timeout?: Duration -} - -export type ClusterStatsResponse = ClusterStatsStatsResponseBase - -export interface ClusterStatsRuntimeFieldTypes { - chars_max: integer - chars_total: integer - count: integer - doc_max: integer - doc_total: integer - index_count: integer - lang: string[] - lines_max: integer - lines_total: integer - name: Name - scriptless_count: integer - shadowed_count: integer - source_max: integer - source_total: integer -} - -export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { - cluster_name: Name - cluster_uuid: Uuid - indices: ClusterStatsClusterIndices - nodes: ClusterStatsClusterNodes - status: HealthStatus - timestamp: long -} - -export interface ConnectorConnector { - api_key_id?: string - api_key_secret_id?: string - configuration: ConnectorConnectorConfiguration - custom_scheduling: ConnectorConnectorCustomScheduling - description?: string - error?: string | null - features?: ConnectorConnectorFeatures - filtering: ConnectorFilteringConfig[] - id?: Id - index_name?: IndexName | null - is_native: boolean - language?: string - last_access_control_sync_error?: string - last_access_control_sync_scheduled_at?: DateTime - last_access_control_sync_status?: ConnectorSyncStatus - last_deleted_document_count?: long - last_incremental_sync_scheduled_at?: DateTime - last_indexed_document_count?: long - last_seen?: DateTime - last_sync_error?: string - last_sync_scheduled_at?: DateTime - last_sync_status?: ConnectorSyncStatus - last_synced?: DateTime - name?: string - pipeline?: ConnectorIngestPipelineParams - scheduling: ConnectorSchedulingConfiguration - service_type?: string - status: ConnectorConnectorStatus - sync_cursor?: any - sync_now: boolean -} - -export interface ConnectorConnectorConfigProperties { - category?: string - default_value: ScalarValue - depends_on: ConnectorDependency[] - display: ConnectorDisplayType - label: string - options: ConnectorSelectOption[] - order?: integer - placeholder?: string - required: boolean - sensitive: boolean - tooltip?: string | null - type?: ConnectorConnectorFieldType - ui_restrictions?: string[] - validations?: ConnectorValidation[] - value: any -} - -export type ConnectorConnectorConfiguration = Record - -export type ConnectorConnectorCustomScheduling = Record - -export interface ConnectorConnectorFeatures { - document_level_security?: ConnectorFeatureEnabled - incremental_sync?: ConnectorFeatureEnabled - native_connector_api_keys?: ConnectorFeatureEnabled - sync_rules?: ConnectorSyncRulesFeature -} - -export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' - -export interface ConnectorConnectorScheduling { - enabled: boolean - interval: string -} - -export type ConnectorConnectorStatus = 'created' | 'needs_configuration' | 'configured' | 'connected' | 'error' - -export interface ConnectorConnectorSyncJob { - cancelation_requested_at?: DateTime - canceled_at?: DateTime - completed_at?: DateTime - connector: ConnectorSyncJobConnectorReference - created_at: DateTime - deleted_document_count: long - error?: string - id: Id - indexed_document_count: long - indexed_document_volume: long - job_type: ConnectorSyncJobType - last_seen?: DateTime - metadata: Record - started_at?: DateTime - status: ConnectorSyncStatus - total_document_count: long - trigger_method: ConnectorSyncJobTriggerMethod - worker_hostname?: string -} - -export interface ConnectorCustomScheduling { - configuration_overrides: ConnectorCustomSchedulingConfigurationOverrides - enabled: boolean - interval: string - last_synced?: DateTime - name: string -} - -export interface ConnectorCustomSchedulingConfigurationOverrides { - max_crawl_depth?: integer - sitemap_discovery_disabled?: boolean - domain_allowlist?: string[] - sitemap_urls?: string[] - seed_urls?: string[] -} - -export interface ConnectorDependency { - field: string - value: ScalarValue -} - -export type ConnectorDisplayType = 'textbox' | 'textarea' | 'numeric' | 'toggle' | 'dropdown' - -export interface ConnectorFeatureEnabled { - enabled: boolean -} - -export interface ConnectorFilteringAdvancedSnippet { - created_at?: DateTime - updated_at?: DateTime - value: any -} - -export interface ConnectorFilteringConfig { - active: ConnectorFilteringRules - domain?: string - draft: ConnectorFilteringRules -} - -export type ConnectorFilteringPolicy = 'exclude' | 'include' - -export interface ConnectorFilteringRule { - created_at?: DateTime - field: Field - id: Id - order: integer - policy: ConnectorFilteringPolicy - rule: ConnectorFilteringRuleRule - updated_at?: DateTime - value: string -} - -export type ConnectorFilteringRuleRule = 'contains' | 'ends_with' | 'equals' | 'regex' | 'starts_with' | '>' | '<' - -export interface ConnectorFilteringRules { - advanced_snippet: ConnectorFilteringAdvancedSnippet - rules: ConnectorFilteringRule[] - validation: ConnectorFilteringRulesValidation -} - -export interface ConnectorFilteringRulesValidation { - errors: ConnectorFilteringValidation[] - state: ConnectorFilteringValidationState -} - -export interface ConnectorFilteringValidation { - ids: Id[] - messages: string[] -} - -export type ConnectorFilteringValidationState = 'edited' | 'invalid' | 'valid' - -export interface ConnectorGreaterThanValidation { - type: 'greater_than' - constraint: double -} - -export interface ConnectorIncludedInValidation { - type: 'included_in' - constraint: ScalarValue[] -} - -export interface ConnectorIngestPipelineParams { - extract_binary_content: boolean - name: string - reduce_whitespace: boolean - run_ml_inference: boolean -} - -export interface ConnectorLessThanValidation { - type: 'less_than' - constraint: double -} - -export interface ConnectorListTypeValidation { - type: 'list_type' - constraint: string -} - -export interface ConnectorRegexValidation { - type: 'regex' - constraint: string -} - -export interface ConnectorSchedulingConfiguration { - access_control?: ConnectorConnectorScheduling - full?: ConnectorConnectorScheduling - incremental?: ConnectorConnectorScheduling -} - -export interface ConnectorSelectOption { - label: string - value: ScalarValue -} - -export interface ConnectorSyncJobConnectorReference { - configuration: ConnectorConnectorConfiguration - filtering: ConnectorFilteringRules - id: Id - index_name: string - language?: string - pipeline?: ConnectorIngestPipelineParams - service_type: string - sync_cursor?: any -} - -export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' - -export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' - -export interface ConnectorSyncRulesFeature { - advanced?: ConnectorFeatureEnabled - basic?: ConnectorFeatureEnabled -} - -export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'error' | 'in_progress' | 'pending' | 'suspended' - -export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation - -export interface ConnectorCheckInRequest extends RequestBase { - connector_id: Id -} - -export interface ConnectorCheckInResponse { - result: Result -} - -export interface ConnectorDeleteRequest extends RequestBase { - connector_id: Id - delete_sync_jobs?: boolean -} - -export type ConnectorDeleteResponse = AcknowledgedResponseBase - -export interface ConnectorGetRequest extends RequestBase { - connector_id: Id -} - -export type ConnectorGetResponse = ConnectorConnector - -export interface ConnectorLastSyncRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - last_access_control_sync_error?: string - last_access_control_sync_scheduled_at?: DateTime - last_access_control_sync_status?: ConnectorSyncStatus - last_deleted_document_count?: long - last_incremental_sync_scheduled_at?: DateTime - last_indexed_document_count?: long - last_seen?: DateTime - last_sync_error?: string - last_sync_scheduled_at?: DateTime - last_sync_status?: ConnectorSyncStatus - last_synced?: DateTime - sync_cursor?: any - } -} - -export interface ConnectorLastSyncResponse { - result: Result -} - -export interface ConnectorListRequest extends RequestBase { - from?: integer - size?: integer - index_name?: Indices - connector_name?: Names - service_type?: Names - query?: string -} - -export interface ConnectorListResponse { - count: long - results: ConnectorConnector[] -} - -export interface ConnectorPostRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - index_name?: IndexName - is_native?: boolean - language?: string - name?: string - service_type?: string - } -} - -export interface ConnectorPostResponse { - result: Result - id: Id -} - -export interface ConnectorPutRequest extends RequestBase { - connector_id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - index_name?: IndexName - is_native?: boolean - language?: string - name?: string - service_type?: string - } -} - -export interface ConnectorPutResponse { - result: Result - id: Id -} - -export interface ConnectorSyncJobCancelRequest extends RequestBase { - connector_sync_job_id: Id -} - -export interface ConnectorSyncJobCancelResponse { - result: Result -} - -export interface ConnectorSyncJobDeleteRequest extends RequestBase { - connector_sync_job_id: Id -} - -export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase - -export interface ConnectorSyncJobGetRequest extends RequestBase { - connector_sync_job_id: Id -} - -export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob - -export interface ConnectorSyncJobListRequest extends RequestBase { - from?: integer - size?: integer - status?: ConnectorSyncStatus - connector_id?: Id - job_type?: ConnectorSyncJobType | ConnectorSyncJobType[] -} - -export interface ConnectorSyncJobListResponse { - count: long - results: ConnectorConnectorSyncJob[] -} - -export interface ConnectorSyncJobPostRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - id: Id - job_type?: ConnectorSyncJobType - trigger_method?: ConnectorSyncJobTriggerMethod - } -} - -export interface ConnectorSyncJobPostResponse { - id: Id -} - -export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { - connector_id: Id -} - -export interface ConnectorUpdateActiveFilteringResponse { - result: Result -} - -export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - api_key_id?: string - api_key_secret_id?: string - } -} - -export interface ConnectorUpdateApiKeyIdResponse { - result: Result -} - -export interface ConnectorUpdateConfigurationRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - configuration?: ConnectorConnectorConfiguration - values?: Record - } -} - -export interface ConnectorUpdateConfigurationResponse { - result: Result -} - -export interface ConnectorUpdateErrorRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - error: SpecUtilsWithNullValue - } -} - -export interface ConnectorUpdateErrorResponse { - result: Result -} - -export interface ConnectorUpdateFilteringRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - filtering?: ConnectorFilteringConfig[] - rules?: ConnectorFilteringRule[] - advanced_snippet?: ConnectorFilteringAdvancedSnippet - } -} - -export interface ConnectorUpdateFilteringResponse { - result: Result -} - -export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - validation: ConnectorFilteringRulesValidation - } -} - -export interface ConnectorUpdateFilteringValidationResponse { - result: Result -} - -export interface ConnectorUpdateIndexNameRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index_name: SpecUtilsWithNullValue - } -} - -export interface ConnectorUpdateIndexNameResponse { - result: Result -} - -export interface ConnectorUpdateNameRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - name?: string - description?: string - } -} - -export interface ConnectorUpdateNameResponse { - result: Result -} - -export interface ConnectorUpdateNativeRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - is_native: boolean - } -} - -export interface ConnectorUpdateNativeResponse { - result: Result -} - -export interface ConnectorUpdatePipelineRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - pipeline: ConnectorIngestPipelineParams - } -} - -export interface ConnectorUpdatePipelineResponse { - result: Result -} - -export interface ConnectorUpdateSchedulingRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - scheduling: ConnectorSchedulingConfiguration - } -} - -export interface ConnectorUpdateSchedulingResponse { - result: Result -} - -export interface ConnectorUpdateServiceTypeRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - service_type: string - } -} - -export interface ConnectorUpdateServiceTypeResponse { - result: Result -} - -export interface ConnectorUpdateStatusRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - status: ConnectorConnectorStatus - } -} - -export interface ConnectorUpdateStatusResponse { - result: Result -} - -export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { - index_uuid: Uuid - accept_data_loss: boolean - master_timeout?: Duration - timeout?: Duration -} - -export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase - -export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { - index_uuid: Uuid - accept_data_loss: boolean - master_timeout?: Duration - timeout?: Duration -} - -export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase - -export interface DanglingIndicesListDanglingIndicesDanglingIndex { - index_name: string - index_uuid: string - creation_date_millis: EpochTime - node_ids: Ids -} - -export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { -} - -export interface DanglingIndicesListDanglingIndicesResponse { - dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] -} - -export interface EnrichPolicy { - enrich_fields: Fields - indices: Indices - match_field: Field - query?: QueryDslQueryContainer - name?: Name - elasticsearch_version?: string -} - -export type EnrichPolicyType = 'geo_match' | 'match' | 'range' - -export interface EnrichSummary { - config: Partial> -} - -export interface EnrichDeletePolicyRequest extends RequestBase { - name: Name -} - -export type EnrichDeletePolicyResponse = AcknowledgedResponseBase - -export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' - -export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { - phase: EnrichExecutePolicyEnrichPolicyPhase -} - -export interface EnrichExecutePolicyRequest extends RequestBase { - name: Name - wait_for_completion?: boolean -} - -export interface EnrichExecutePolicyResponse { - status?: EnrichExecutePolicyExecuteEnrichPolicyStatus - task_id?: TaskId -} - -export interface EnrichGetPolicyRequest extends RequestBase { - name?: Names -} - -export interface EnrichGetPolicyResponse { - policies: EnrichSummary[] -} - -export interface EnrichPutPolicyRequest extends RequestBase { - name: Name - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - geo_match?: EnrichPolicy - match?: EnrichPolicy - range?: EnrichPolicy - } -} - -export type EnrichPutPolicyResponse = AcknowledgedResponseBase - -export interface EnrichStatsCacheStats { - node_id: Id - count: integer - hits: integer - hits_time_in_millis: DurationValue - misses: integer - misses_time_in_millis: DurationValue - evictions: integer - size_in_bytes: long -} - -export interface EnrichStatsCoordinatorStats { - executed_searches_total: long - node_id: Id - queue_size: integer - remote_requests_current: integer - remote_requests_total: long -} - -export interface EnrichStatsExecutingPolicy { - name: Name - task: TasksTaskInfo -} - -export interface EnrichStatsRequest extends RequestBase { -} - -export interface EnrichStatsResponse { - coordinator_stats: EnrichStatsCoordinatorStats[] - executing_policies: EnrichStatsExecutingPolicy[] - cache_stats?: EnrichStatsCacheStats[] -} - -export interface EqlEqlHits { - total?: SearchTotalHits - events?: EqlHitsEvent[] - sequences?: EqlHitsSequence[] -} - -export interface EqlEqlSearchResponseBase { - id?: Id - is_partial?: boolean - is_running?: boolean - took?: DurationValue - timed_out?: boolean - hits: EqlEqlHits -} - -export interface EqlHitsEvent { - _index: IndexName - _id: Id - _source: TEvent - missing?: boolean - fields?: Record -} - -export interface EqlHitsSequence { - events: EqlHitsEvent[] - join_keys?: any[] -} - -export interface EqlDeleteRequest extends RequestBase { - id: Id -} - -export type EqlDeleteResponse = AcknowledgedResponseBase - -export interface EqlGetRequest extends RequestBase { - id: Id - keep_alive?: Duration - wait_for_completion_timeout?: Duration -} - -export type EqlGetResponse = EqlEqlSearchResponseBase - -export interface EqlGetStatusRequest extends RequestBase { - id: Id -} - -export interface EqlGetStatusResponse { - id: Id - is_partial: boolean - is_running: boolean - start_time_in_millis?: EpochTime - expiration_time_in_millis?: EpochTime - completion_status?: integer -} - -export interface EqlSearchRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query: string - case_sensitive?: boolean - event_category_field?: Field - tiebreaker_field?: Field - timestamp_field?: Field - fetch_size?: uint - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - keep_alive?: Duration - keep_on_completion?: boolean - wait_for_completion_timeout?: Duration - size?: uint - fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] - result_position?: EqlSearchResultPosition - runtime_mappings?: MappingRuntimeFields - max_samples_per_key?: integer - } -} - -export type EqlSearchResponse = EqlEqlSearchResponseBase - -export type EqlSearchResultPosition = 'tail' | 'head' - -export interface EsqlTableValuesContainer { - integer?: EsqlTableValuesIntegerValue[] - keyword?: EsqlTableValuesKeywordValue[] - long?: EsqlTableValuesLongValue[] - double?: EsqlTableValuesLongDouble[] -} - -export type EsqlTableValuesIntegerValue = integer | integer[] - -export type EsqlTableValuesKeywordValue = string | string[] - -export type EsqlTableValuesLongDouble = double | double[] - -export type EsqlTableValuesLongValue = long | long[] - -export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' - -export interface EsqlQueryRequest extends RequestBase { - format?: EsqlQueryEsqlFormat - delimiter?: string - drop_null_columns?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - columnar?: boolean - filter?: QueryDslQueryContainer - locale?: string - params?: FieldValue[] - profile?: boolean - query: string - tables?: Record> - } -} - -export type EsqlQueryResponse = EsqlColumns - -export interface FeaturesFeature { - name: string - description: string -} - -export interface FeaturesGetFeaturesRequest extends RequestBase { -} - -export interface FeaturesGetFeaturesResponse { - features: FeaturesFeature[] -} - -export interface FeaturesResetFeaturesRequest extends RequestBase { -} - -export interface FeaturesResetFeaturesResponse { - features: FeaturesFeature[] -} - -export type FleetCheckpoint = long - -export interface FleetGlobalCheckpointsRequest extends RequestBase { - index: IndexName | IndexAlias - wait_for_advance?: boolean - wait_for_index?: boolean - checkpoints?: FleetCheckpoint[] - timeout?: Duration -} - -export interface FleetGlobalCheckpointsResponse { - global_checkpoints: FleetCheckpoint[] - timed_out: boolean -} - -export interface FleetMsearchRequest extends RequestBase { - index?: IndexName | IndexAlias - allow_no_indices?: boolean - ccs_minimize_roundtrips?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - max_concurrent_searches?: long - max_concurrent_shard_requests?: long - pre_filter_shard_size?: long - search_type?: SearchType - rest_total_hits_as_int?: boolean - typed_keys?: boolean - wait_for_checkpoints?: FleetCheckpoint[] - allow_partial_search_results?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ - body?: MsearchRequestItem[] -} - -export interface FleetMsearchResponse { - docs: MsearchResponseItem[] -} - -export interface FleetSearchRequest extends RequestBase { - index: IndexName | IndexAlias - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - ccs_minimize_roundtrips?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - preference?: string - pre_filter_shard_size?: long - request_cache?: boolean - routing?: Routing - scroll?: Duration - search_type?: SearchType - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - typed_keys?: boolean - rest_total_hits_as_int?: boolean - _source_excludes?: Fields - _source_includes?: Fields - q?: string - wait_for_checkpoints?: FleetCheckpoint[] - allow_partial_search_results?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - collapse?: SearchFieldCollapse - explain?: boolean - ext?: Record - from?: integer - highlight?: SearchHighlight - track_total_hits?: SearchTrackHits - indices_boost?: Record[] - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - query?: QueryDslQueryContainer - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - suggest?: SearchSuggester - terminate_after?: long - timeout?: string - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean - stored_fields?: Fields - pit?: SearchPointInTimeReference - runtime_mappings?: MappingRuntimeFields - stats?: string[] - } -} - -export interface FleetSearchResponse { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: Record - _clusters?: ClusterStatistics - fields?: Record - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - suggest?: Record[]> - terminated_early?: boolean -} - -export interface GraphConnection { - doc_count: long - source: long - target: long - weight: double -} - -export interface GraphExploreControls { - sample_diversity?: GraphSampleDiversity - sample_size?: integer - timeout?: Duration - use_significance: boolean -} - -export interface GraphHop { - connections?: GraphHop - query: QueryDslQueryContainer - vertices: GraphVertexDefinition[] -} - -export interface GraphSampleDiversity { - field: Field - max_docs_per_value: integer -} - -export interface GraphVertex { - depth: long - field: Field - term: string - weight: double -} - -export interface GraphVertexDefinition { - exclude?: string[] - field: Field - include?: GraphVertexInclude[] - min_doc_count?: long - shard_min_doc_count?: long - size?: integer -} - -export interface GraphVertexInclude { - boost: double - term: string -} - -export interface GraphExploreRequest extends RequestBase { - index: Indices - routing?: Routing - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - connections?: GraphHop - controls?: GraphExploreControls - query?: QueryDslQueryContainer - vertices?: GraphVertexDefinition[] - } -} - -export interface GraphExploreResponse { - connections: GraphConnection[] - failures: ShardFailure[] - timed_out: boolean - took: long - vertices: GraphVertex[] -} - -export interface IlmActions { - allocate?: IlmAllocateAction - delete?: IlmDeleteAction - downsample?: IlmDownsampleAction - freeze?: EmptyObject - forcemerge?: IlmForceMergeAction - migrate?: IlmMigrateAction - readonly?: EmptyObject - rollover?: IlmRolloverAction - set_priority?: IlmSetPriorityAction - searchable_snapshot?: IlmSearchableSnapshotAction - shrink?: IlmShrinkAction - unfollow?: EmptyObject - wait_for_snapshot?: IlmWaitForSnapshotAction -} - -export interface IlmAllocateAction { - number_of_replicas?: integer - total_shards_per_node?: integer - include?: Record - exclude?: Record - require?: Record -} - -export interface IlmDeleteAction { - delete_searchable_snapshot?: boolean -} - -export interface IlmDownsampleAction { - fixed_interval: DurationLarge - wait_timeout?: Duration -} - -export interface IlmForceMergeAction { - max_num_segments: integer - index_codec?: string -} - -export interface IlmMigrateAction { - enabled?: boolean -} - -export interface IlmPhase { - actions?: IlmActions - min_age?: Duration | long -} - -export interface IlmPhases { - cold?: IlmPhase - delete?: IlmPhase - frozen?: IlmPhase - hot?: IlmPhase - warm?: IlmPhase -} - -export interface IlmPolicy { - phases: IlmPhases - _meta?: Metadata -} - -export interface IlmRolloverAction { - max_size?: ByteSize - max_primary_shard_size?: ByteSize - max_age?: Duration - max_docs?: long - max_primary_shard_docs?: long - min_size?: ByteSize - min_primary_shard_size?: ByteSize - min_age?: Duration - min_docs?: long - min_primary_shard_docs?: long -} - -export interface IlmSearchableSnapshotAction { - snapshot_repository: string - force_merge_index?: boolean -} - -export interface IlmSetPriorityAction { - priority?: integer -} - -export interface IlmShrinkAction { - number_of_shards?: integer - max_primary_shard_size?: ByteSize - allow_write_after_shrink?: boolean -} - -export interface IlmWaitForSnapshotAction { - policy: string -} - -export interface IlmDeleteLifecycleRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase - -export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged - -export interface IlmExplainLifecycleLifecycleExplainManaged { - action?: Name - action_time?: DateTime - action_time_millis?: EpochTime - age?: Duration - failed_step?: Name - failed_step_retry_count?: integer - index?: IndexName - index_creation_date?: DateTime - index_creation_date_millis?: EpochTime - is_auto_retryable_error?: boolean - lifecycle_date?: DateTime - lifecycle_date_millis?: EpochTime - managed: true - phase: Name - phase_time?: DateTime - phase_time_millis?: EpochTime - policy: Name - step?: Name - step_info?: Record - step_time?: DateTime - step_time_millis?: EpochTime - phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution - time_since_index_creation?: Duration -} - -export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { - policy: Name - version: VersionNumber - modified_date_in_millis: EpochTime -} - -export interface IlmExplainLifecycleLifecycleExplainUnmanaged { - index: IndexName - managed: false -} - -export interface IlmExplainLifecycleRequest extends RequestBase { - index: IndexName - only_errors?: boolean - only_managed?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export interface IlmExplainLifecycleResponse { - indices: Record -} - -export interface IlmGetLifecycleLifecycle { - modified_date: DateTime - policy: IlmPolicy - version: VersionNumber -} - -export interface IlmGetLifecycleRequest extends RequestBase { - name?: Name - master_timeout?: Duration - timeout?: Duration -} - -export type IlmGetLifecycleResponse = Record - -export interface IlmGetStatusRequest extends RequestBase { -} - -export interface IlmGetStatusResponse { - operation_mode: LifecycleOperationMode -} - -export interface IlmMigrateToDataTiersRequest extends RequestBase { - dry_run?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - legacy_template_to_delete?: string - node_attribute?: string - } -} - -export interface IlmMigrateToDataTiersResponse { - dry_run: boolean - removed_legacy_template: string - migrated_ilm_policies: string[] - migrated_indices: Indices - migrated_legacy_templates: string[] - migrated_composable_templates: string[] - migrated_component_templates: string[] -} - -export interface IlmMoveToStepRequest extends RequestBase { - index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - current_step: IlmMoveToStepStepKey - next_step: IlmMoveToStepStepKey - } -} - -export type IlmMoveToStepResponse = AcknowledgedResponseBase - -export interface IlmMoveToStepStepKey { - action?: string - name?: string - phase: string -} - -export interface IlmPutLifecycleRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - policy?: IlmPolicy - } -} - -export type IlmPutLifecycleResponse = AcknowledgedResponseBase - -export interface IlmRemovePolicyRequest extends RequestBase { - index: IndexName -} - -export interface IlmRemovePolicyResponse { - failed_indexes: IndexName[] - has_failures: boolean -} - -export interface IlmRetryRequest extends RequestBase { - index: IndexName -} - -export type IlmRetryResponse = AcknowledgedResponseBase - -export interface IlmStartRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type IlmStartResponse = AcknowledgedResponseBase - -export interface IlmStopRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type IlmStopResponse = AcknowledgedResponseBase - -export interface IndicesAlias { - filter?: QueryDslQueryContainer - index_routing?: Routing - is_hidden?: boolean - is_write_index?: boolean - routing?: Routing - search_routing?: Routing -} - -export interface IndicesAliasDefinition { - filter?: QueryDslQueryContainer - index_routing?: string - is_write_index?: boolean - routing?: string - search_routing?: string - is_hidden?: boolean -} - -export interface IndicesCacheQueries { - enabled: boolean -} - -export interface IndicesDataStream { - _meta?: Metadata - allow_custom_routing?: boolean - failure_store?: IndicesFailureStore - generation: integer - hidden: boolean - ilm_policy?: Name - next_generation_managed_by: IndicesManagedBy - prefer_ilm: boolean - indices: IndicesDataStreamIndex[] - lifecycle?: IndicesDataStreamLifecycleWithRollover - name: DataStreamName - replicated?: boolean - rollover_on_write: boolean - status: HealthStatus - system?: boolean - template: Name - timestamp_field: IndicesDataStreamTimestampField -} - -export interface IndicesDataStreamIndex { - index_name: IndexName - index_uuid: Uuid - ilm_policy?: Name - managed_by?: IndicesManagedBy - prefer_ilm?: boolean -} - -export interface IndicesDataStreamLifecycle { - data_retention?: Duration - downsampling?: IndicesDataStreamLifecycleDownsampling - enabled?: boolean -} - -export interface IndicesDataStreamLifecycleDownsampling { - rounds: IndicesDownsamplingRound[] -} - -export interface IndicesDataStreamLifecycleRolloverConditions { - min_age?: Duration - max_age?: string - min_docs?: long - max_docs?: long - min_size?: ByteSize - max_size?: ByteSize - min_primary_shard_size?: ByteSize - max_primary_shard_size?: ByteSize - min_primary_shard_docs?: long - max_primary_shard_docs?: long -} - -export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { - rollover?: IndicesDataStreamLifecycleRolloverConditions -} - -export interface IndicesDataStreamTimestampField { - name: Field -} - -export interface IndicesDataStreamVisibility { - hidden?: boolean - allow_custom_routing?: boolean -} - -export interface IndicesDownsampleConfig { - fixed_interval: DurationLarge -} - -export interface IndicesDownsamplingRound { - after: Duration - config: IndicesDownsampleConfig -} - -export interface IndicesFailureStore { - enabled: boolean - indices: IndicesDataStreamIndex[] - rollover_on_write: boolean -} - -export interface IndicesFielddataFrequencyFilter { - max: double - min: double - min_segment_size: integer -} - -export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum' - -export interface IndicesIndexRouting { - allocation?: IndicesIndexRoutingAllocation - rebalance?: IndicesIndexRoutingRebalance -} - -export interface IndicesIndexRoutingAllocation { - enable?: IndicesIndexRoutingAllocationOptions - include?: IndicesIndexRoutingAllocationInclude - initial_recovery?: IndicesIndexRoutingAllocationInitialRecovery - disk?: IndicesIndexRoutingAllocationDisk -} - -export interface IndicesIndexRoutingAllocationDisk { - threshold_enabled?: boolean | string -} - -export interface IndicesIndexRoutingAllocationInclude { - _tier_preference?: string - _id?: Id -} - -export interface IndicesIndexRoutingAllocationInitialRecovery { - _id?: Id -} - -export type IndicesIndexRoutingAllocationOptions = 'all' | 'primaries' | 'new_primaries' | 'none' - -export interface IndicesIndexRoutingRebalance { - enable: IndicesIndexRoutingRebalanceOptions -} - -export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' - -export interface IndicesIndexSegmentSort { - field?: Fields - order?: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] - mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[] - missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[] -} - -export interface IndicesIndexSettingBlocks { - read_only?: SpecUtilsStringified - read_only_allow_delete?: SpecUtilsStringified - read?: SpecUtilsStringified - write?: SpecUtilsStringified - metadata?: SpecUtilsStringified -} - -export interface IndicesIndexSettingsKeys { - index?: IndicesIndexSettings - mode?: string - routing_path?: string | string[] - soft_deletes?: IndicesSoftDeletes - sort?: IndicesIndexSegmentSort - number_of_shards?: integer | string - number_of_replicas?: integer | string - number_of_routing_shards?: integer - check_on_startup?: IndicesIndexCheckOnStartup - codec?: string - routing_partition_size?: SpecUtilsStringified - load_fixed_bitset_filters_eagerly?: boolean - hidden?: boolean | string - auto_expand_replicas?: string - merge?: IndicesMerge - search?: IndicesSettingsSearch - refresh_interval?: Duration - max_result_window?: integer - max_inner_result_window?: integer - max_rescore_window?: integer - max_docvalue_fields_search?: integer - max_script_fields?: integer - max_ngram_diff?: integer - max_shingle_diff?: integer - blocks?: IndicesIndexSettingBlocks - max_refresh_listeners?: integer - analyze?: IndicesSettingsAnalyze - highlight?: IndicesSettingsHighlight - max_terms_count?: integer - max_regex_length?: integer - routing?: IndicesIndexRouting - gc_deletes?: Duration - default_pipeline?: PipelineName - final_pipeline?: PipelineName - lifecycle?: IndicesIndexSettingsLifecycle - provided_name?: Name - creation_date?: SpecUtilsStringified> - creation_date_string?: DateTime - uuid?: Uuid - version?: IndicesIndexVersioning - verified_before_close?: boolean | string - format?: string | integer - max_slices_per_scroll?: integer - translog?: IndicesTranslog - query_string?: IndicesSettingsQueryString - priority?: integer | string - top_metrics_max_size?: integer - analysis?: IndicesIndexSettingsAnalysis - settings?: IndicesIndexSettings - time_series?: IndicesIndexSettingsTimeSeries - queries?: IndicesQueries - similarity?: Record - mapping?: IndicesMappingLimitSettings - 'indexing.slowlog'?: IndicesIndexingSlowlogSettings - indexing_pressure?: IndicesIndexingPressure - store?: IndicesStorage -} -export type IndicesIndexSettings = IndicesIndexSettingsKeys -& { [property: string]: any } - -export interface IndicesIndexSettingsAnalysis { - analyzer?: Record - char_filter?: Record - filter?: Record - normalizer?: Record - tokenizer?: Record -} - -export interface IndicesIndexSettingsLifecycle { - name?: Name - indexing_complete?: SpecUtilsStringified - origination_date?: long - parse_origination_date?: boolean - step?: IndicesIndexSettingsLifecycleStep - rollover_alias?: string -} - -export interface IndicesIndexSettingsLifecycleStep { - wait_time_threshold?: Duration -} - -export interface IndicesIndexSettingsTimeSeries { - end_time?: DateTime - start_time?: DateTime -} - -export interface IndicesIndexState { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - defaults?: IndicesIndexSettings - data_stream?: DataStreamName - lifecycle?: IndicesDataStreamLifecycle -} - -export interface IndicesIndexTemplate { - index_patterns: Names - composed_of: Name[] - template?: IndicesIndexTemplateSummary - version?: VersionNumber - priority?: long - _meta?: Metadata - allow_auto_create?: boolean - data_stream?: IndicesIndexTemplateDataStreamConfiguration - deprecated?: boolean - ignore_missing_component_templates?: Names -} - -export interface IndicesIndexTemplateDataStreamConfiguration { - hidden?: boolean - allow_custom_routing?: boolean -} - -export interface IndicesIndexTemplateSummary { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - lifecycle?: IndicesDataStreamLifecycleWithRollover -} - -export interface IndicesIndexVersioning { - created?: VersionString - created_string?: string -} - -export interface IndicesIndexingPressure { - memory: IndicesIndexingPressureMemory -} - -export interface IndicesIndexingPressureMemory { - limit?: integer -} - -export interface IndicesIndexingSlowlogSettings { - level?: string - source?: integer - reformat?: boolean - threshold?: IndicesIndexingSlowlogTresholds -} - -export interface IndicesIndexingSlowlogTresholds { - index?: IndicesSlowlogTresholdLevels -} - -export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' - -export interface IndicesMappingLimitSettings { - coerce?: boolean - total_fields?: IndicesMappingLimitSettingsTotalFields - depth?: IndicesMappingLimitSettingsDepth - nested_fields?: IndicesMappingLimitSettingsNestedFields - nested_objects?: IndicesMappingLimitSettingsNestedObjects - field_name_length?: IndicesMappingLimitSettingsFieldNameLength - dimension_fields?: IndicesMappingLimitSettingsDimensionFields - ignore_malformed?: boolean -} - -export interface IndicesMappingLimitSettingsDepth { - limit?: long -} - -export interface IndicesMappingLimitSettingsDimensionFields { - limit?: long -} - -export interface IndicesMappingLimitSettingsFieldNameLength { - limit?: long -} - -export interface IndicesMappingLimitSettingsNestedFields { - limit?: long -} - -export interface IndicesMappingLimitSettingsNestedObjects { - limit?: long -} - -export interface IndicesMappingLimitSettingsTotalFields { - limit?: long - ignore_dynamic_beyond_limit?: boolean -} - -export interface IndicesMerge { - scheduler?: IndicesMergeScheduler -} - -export interface IndicesMergeScheduler { - max_thread_count?: SpecUtilsStringified - max_merge_count?: SpecUtilsStringified -} - -export interface IndicesNumericFielddata { - format: IndicesNumericFielddataFormat -} - -export type IndicesNumericFielddataFormat = 'array' | 'disabled' - -export interface IndicesQueries { - cache?: IndicesCacheQueries -} - -export interface IndicesRetentionLease { - period: Duration -} - -export interface IndicesSearchIdle { - after?: Duration -} - -export type IndicesSegmentSortMissing = '_last' | '_first' - -export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' - -export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' - -export interface IndicesSettingsAnalyze { - max_token_count?: SpecUtilsStringified -} - -export interface IndicesSettingsHighlight { - max_analyzed_offset?: integer -} - -export interface IndicesSettingsQueryString { - lenient: SpecUtilsStringified -} - -export interface IndicesSettingsSearch { - idle?: IndicesSearchIdle - slowlog?: IndicesSlowlogSettings -} - -export type IndicesSettingsSimilarity = IndicesSettingsSimilarityBm25 | IndicesSettingsSimilarityBoolean | IndicesSettingsSimilarityDfi | IndicesSettingsSimilarityDfr | IndicesSettingsSimilarityIb | IndicesSettingsSimilarityLmd | IndicesSettingsSimilarityLmj | IndicesSettingsSimilarityScripted - -export interface IndicesSettingsSimilarityBm25 { - type: 'BM25' - b?: double - discount_overlaps?: boolean - k1?: double -} - -export interface IndicesSettingsSimilarityBoolean { - type: 'boolean' -} - -export interface IndicesSettingsSimilarityDfi { - type: 'DFI' - independence_measure: DFIIndependenceMeasure -} - -export interface IndicesSettingsSimilarityDfr { - type: 'DFR' - after_effect: DFRAfterEffect - basic_model: DFRBasicModel - normalization: Normalization -} - -export interface IndicesSettingsSimilarityIb { - type: 'IB' - distribution: IBDistribution - lambda: IBLambda - normalization: Normalization -} - -export interface IndicesSettingsSimilarityLmd { - type: 'LMDirichlet' - mu?: double -} - -export interface IndicesSettingsSimilarityLmj { - type: 'LMJelinekMercer' - lambda?: double -} - -export interface IndicesSettingsSimilarityScripted { - type: 'scripted' - script: Script | string - weight_script?: Script | string -} - -export interface IndicesSlowlogSettings { - level?: string - source?: integer - reformat?: boolean - threshold?: IndicesSlowlogTresholds -} - -export interface IndicesSlowlogTresholdLevels { - warn?: Duration - info?: Duration - debug?: Duration - trace?: Duration -} - -export interface IndicesSlowlogTresholds { - query?: IndicesSlowlogTresholdLevels - fetch?: IndicesSlowlogTresholdLevels -} - -export interface IndicesSoftDeletes { - enabled?: boolean - retention_lease?: IndicesRetentionLease -} - -export interface IndicesStorage { - type: IndicesStorageType - allow_mmap?: boolean -} - -export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string - -export interface IndicesTemplateMapping { - aliases: Record - index_patterns: Name[] - mappings: MappingTypeMapping - order: integer - settings: Record - version?: VersionNumber -} - -export interface IndicesTranslog { - sync_interval?: Duration - durability?: IndicesTranslogDurability - flush_threshold_size?: ByteSize - retention?: IndicesTranslogRetention -} - -export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' - -export interface IndicesTranslogRetention { - size?: ByteSize - age?: Duration -} - -export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' - -export interface IndicesAddBlockIndicesBlockStatus { - name: IndexName - blocked: boolean -} - -export interface IndicesAddBlockRequest extends RequestBase { - index: IndexName - block: IndicesAddBlockIndicesBlockOptions - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export interface IndicesAddBlockResponse { - acknowledged: boolean - shards_acknowledged: boolean - indices: IndicesAddBlockIndicesBlockStatus[] -} - -export interface IndicesAnalyzeAnalyzeDetail { - analyzer?: IndicesAnalyzeAnalyzerDetail - charfilters?: IndicesAnalyzeCharFilterDetail[] - custom_analyzer: boolean - tokenfilters?: IndicesAnalyzeTokenDetail[] - tokenizer?: IndicesAnalyzeTokenDetail -} - -export interface IndicesAnalyzeAnalyzeToken { - end_offset: long - position: long - positionLength?: long - start_offset: long - token: string - type: string -} - -export interface IndicesAnalyzeAnalyzerDetail { - name: string - tokens: IndicesAnalyzeExplainAnalyzeToken[] -} - -export interface IndicesAnalyzeCharFilterDetail { - filtered_text: string[] - name: string -} - -export interface IndicesAnalyzeExplainAnalyzeTokenKeys { - bytes: string - end_offset: long - keyword?: boolean - position: long - positionLength: long - start_offset: long - termFrequency: long - token: string - type: string -} -export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeTokenKeys -& { [property: string]: any } - -export interface IndicesAnalyzeRequest extends RequestBase { - index?: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - analyzer?: string - attributes?: string[] - char_filter?: AnalysisCharFilter[] - explain?: boolean - field?: Field - filter?: AnalysisTokenFilter[] - normalizer?: string - text?: IndicesAnalyzeTextToAnalyze - tokenizer?: AnalysisTokenizer - } -} - -export interface IndicesAnalyzeResponse { - detail?: IndicesAnalyzeAnalyzeDetail - tokens?: IndicesAnalyzeAnalyzeToken[] -} - -export type IndicesAnalyzeTextToAnalyze = string | string[] - -export interface IndicesAnalyzeTokenDetail { - name: string - tokens: IndicesAnalyzeExplainAnalyzeToken[] -} - -export interface IndicesClearCacheRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - fielddata?: boolean - fields?: Fields - ignore_unavailable?: boolean - query?: boolean - request?: boolean -} - -export type IndicesClearCacheResponse = ShardsOperationResponseBase - -export interface IndicesCloneRequest extends RequestBase { - index: IndexName - target: Name - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - settings?: Record - } -} - -export interface IndicesCloneResponse { - acknowledged: boolean - index: IndexName - shards_acknowledged: boolean -} - -export interface IndicesCloseCloseIndexResult { - closed: boolean - shards?: Record -} - -export interface IndicesCloseCloseShardResult { - failures: ShardFailure[] -} - -export interface IndicesCloseRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards -} - -export interface IndicesCloseResponse { - acknowledged: boolean - indices: Record - shards_acknowledged: boolean -} - -export interface IndicesCreateRequest extends RequestBase { - index: IndexName - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - } -} - -export interface IndicesCreateResponse { - index: IndexName - shards_acknowledged: boolean - acknowledged: boolean -} - -export interface IndicesCreateDataStreamRequest extends RequestBase { - name: DataStreamName - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesDataStreamsStatsDataStreamsStatsItem { - backing_indices: integer - data_stream: Name - maximum_timestamp: EpochTime - store_size?: ByteSize - store_size_bytes: long -} - -export interface IndicesDataStreamsStatsRequest extends RequestBase { - name?: IndexName - expand_wildcards?: ExpandWildcards -} - -export interface IndicesDataStreamsStatsResponse { - _shards: ShardStatistics - backing_indices: integer - data_stream_count: integer - data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] - total_store_sizes?: ByteSize - total_store_size_bytes: long -} - -export interface IndicesDeleteRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteResponse = IndicesResponseBase - -export interface IndicesDeleteAliasRequest extends RequestBase { - index: Indices - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteAliasResponse = AcknowledgedResponseBase - -export interface IndicesDeleteDataLifecycleRequest extends RequestBase { - name: DataStreamNames - expand_wildcards?: ExpandWildcards - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase - -export interface IndicesDeleteDataStreamRequest extends RequestBase { - name: DataStreamNames - master_timeout?: Duration - expand_wildcards?: ExpandWildcards -} - -export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesDeleteIndexTemplateRequest extends RequestBase { - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase - -export interface IndicesDeleteTemplateRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase - -export interface IndicesDiskUsageRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flush?: boolean - ignore_unavailable?: boolean - run_expensive_tasks?: boolean -} - -export type IndicesDiskUsageResponse = any - -export interface IndicesDownsampleRequest extends RequestBase { - index: IndexName - target_index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, use 'config' instead. */ - body?: IndicesDownsampleConfig -} - -export type IndicesDownsampleResponse = any - -export interface IndicesExistsRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean -} - -export type IndicesExistsResponse = boolean - -export interface IndicesExistsAliasRequest extends RequestBase { - name: Names - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export type IndicesExistsAliasResponse = boolean - -export interface IndicesExistsIndexTemplateRequest extends RequestBase { - name: Name - master_timeout?: Duration -} - -export type IndicesExistsIndexTemplateResponse = boolean - -export interface IndicesExistsTemplateRequest extends RequestBase { - name: Names - flat_settings?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesExistsTemplateResponse = boolean - -export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { - index: IndexName - managed_by_lifecycle: boolean - index_creation_date_millis?: EpochTime - time_since_index_creation?: Duration - rollover_date_millis?: EpochTime - time_since_rollover?: Duration - lifecycle?: IndicesDataStreamLifecycleWithRollover - generation_time?: Duration - error?: string -} - -export interface IndicesExplainDataLifecycleRequest extends RequestBase { - index: Indices - include_defaults?: boolean - master_timeout?: Duration -} - -export interface IndicesExplainDataLifecycleResponse { - indices: Record -} - -export interface IndicesFieldUsageStatsFieldSummary { - any: uint - stored_fields: uint - doc_values: uint - points: uint - norms: uint - term_vectors: uint - knn_vectors: uint - inverted_index: IndicesFieldUsageStatsInvertedIndex -} - -export interface IndicesFieldUsageStatsFieldsUsageBodyKeys { - _shards: ShardStatistics -} -export type IndicesFieldUsageStatsFieldsUsageBody = IndicesFieldUsageStatsFieldsUsageBodyKeys -& { [property: string]: IndicesFieldUsageStatsUsageStatsIndex | ShardStatistics } - -export interface IndicesFieldUsageStatsInvertedIndex { - terms: uint - postings: uint - proximity: uint - positions: uint - term_frequencies: uint - offsets: uint - payloads: uint -} - -export interface IndicesFieldUsageStatsRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - fields?: Fields - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards -} - -export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody - -export interface IndicesFieldUsageStatsShardsStats { - all_fields: IndicesFieldUsageStatsFieldSummary - fields: Record -} - -export interface IndicesFieldUsageStatsUsageStatsIndex { - shards: IndicesFieldUsageStatsUsageStatsShards[] -} - -export interface IndicesFieldUsageStatsUsageStatsShards { - routing: IndicesStatsShardRouting - stats: IndicesFieldUsageStatsShardsStats - tracking_id: string - tracking_started_at_millis: EpochTime -} - -export interface IndicesFlushRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - force?: boolean - ignore_unavailable?: boolean - wait_if_ongoing?: boolean -} - -export type IndicesFlushResponse = ShardsOperationResponseBase - -export interface IndicesForcemergeRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flush?: boolean - ignore_unavailable?: boolean - max_num_segments?: long - only_expunge_deletes?: boolean - wait_for_completion?: boolean -} - -export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody - -export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { - task?: string -} - -export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' - -export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] - -export interface IndicesGetRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean - master_timeout?: Duration - features?: IndicesGetFeatures -} - -export type IndicesGetResponse = Record - -export interface IndicesGetAliasIndexAliases { - aliases: Record -} - -export interface IndicesGetAliasRequest extends RequestBase { - name?: Names - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export type IndicesGetAliasResponse = Record - -export interface IndicesGetDataLifecycleDataStreamWithLifecycle { - name: DataStreamName - lifecycle?: IndicesDataStreamLifecycleWithRollover -} - -export interface IndicesGetDataLifecycleRequest extends RequestBase { - name: DataStreamNames - expand_wildcards?: ExpandWildcards - include_defaults?: boolean - master_timeout?: Duration -} - -export interface IndicesGetDataLifecycleResponse { - data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] -} - -export interface IndicesGetDataStreamRequest extends RequestBase { - name?: DataStreamNames - expand_wildcards?: ExpandWildcards - include_defaults?: boolean - master_timeout?: Duration - verbose?: boolean -} - -export interface IndicesGetDataStreamResponse { - data_streams: IndicesDataStream[] -} - -export interface IndicesGetFieldMappingRequest extends RequestBase { - fields: Fields - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean -} - -export type IndicesGetFieldMappingResponse = Record - -export interface IndicesGetFieldMappingTypeFieldMappings { - mappings: Record -} - -export interface IndicesGetIndexTemplateIndexTemplateItem { - name: Name - index_template: IndicesIndexTemplate -} - -export interface IndicesGetIndexTemplateRequest extends RequestBase { - name?: Name - local?: boolean - flat_settings?: boolean - master_timeout?: Duration - include_defaults?: boolean -} - -export interface IndicesGetIndexTemplateResponse { - index_templates: IndicesGetIndexTemplateIndexTemplateItem[] -} - -export interface IndicesGetMappingIndexMappingRecord { - item?: MappingTypeMapping - mappings: MappingTypeMapping -} - -export interface IndicesGetMappingRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesGetMappingResponse = Record - -export interface IndicesGetSettingsRequest extends RequestBase { - index?: Indices - name?: Names - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesGetSettingsResponse = Record - -export interface IndicesGetTemplateRequest extends RequestBase { - name?: Names - flat_settings?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesGetTemplateResponse = Record - -export interface IndicesMigrateToDataStreamRequest extends RequestBase { - name: IndexName - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesModifyDataStreamAction { - add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction - remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction -} - -export interface IndicesModifyDataStreamIndexAndDataStreamAction { - data_stream: DataStreamName - index: IndexName -} - -export interface IndicesModifyDataStreamRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - actions: IndicesModifyDataStreamAction[] - } -} - -export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesOpenRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards -} - -export interface IndicesOpenResponse { - acknowledged: boolean - shards_acknowledged: boolean -} - -export interface IndicesPromoteDataStreamRequest extends RequestBase { - name: IndexName - master_timeout?: Duration -} - -export type IndicesPromoteDataStreamResponse = any - -export interface IndicesPutAliasRequest extends RequestBase { - index: Indices - name: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - filter?: QueryDslQueryContainer - index_routing?: Routing - is_write_index?: boolean - routing?: Routing - search_routing?: Routing - } -} - -export type IndicesPutAliasResponse = AcknowledgedResponseBase - -export interface IndicesPutDataLifecycleRequest extends RequestBase { - name: DataStreamNames - expand_wildcards?: ExpandWildcards - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'lifecycle' instead. */ - body?: IndicesDataStreamLifecycle -} - -export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase - -export interface IndicesPutIndexTemplateIndexTemplateMapping { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - lifecycle?: IndicesDataStreamLifecycle -} - -export interface IndicesPutIndexTemplateRequest extends RequestBase { - name: Name - create?: boolean - master_timeout?: Duration - cause?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index_patterns?: Indices - composed_of?: Name[] - template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStreamVisibility - priority?: long - version?: VersionNumber - _meta?: Metadata - allow_auto_create?: boolean - ignore_missing_component_templates?: string[] - deprecated?: boolean - } -} - -export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase - -export interface IndicesPutMappingRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - write_index_only?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - date_detection?: boolean - dynamic?: MappingDynamicMapping - dynamic_date_formats?: string[] - dynamic_templates?: Record | Record[] - _field_names?: MappingFieldNamesField - _meta?: Metadata - numeric_detection?: boolean - properties?: Record - _routing?: MappingRoutingField - _source?: MappingSourceField - runtime?: MappingRuntimeFields - } -} - -export type IndicesPutMappingResponse = IndicesResponseBase - -export interface IndicesPutSettingsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - master_timeout?: Duration - preserve_existing?: boolean - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'settings' instead. */ - body?: IndicesIndexSettings -} - -export type IndicesPutSettingsResponse = AcknowledgedResponseBase - -export interface IndicesPutTemplateRequest extends RequestBase { - name: Name - create?: boolean - master_timeout?: Duration - cause?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - index_patterns?: string | string[] - mappings?: MappingTypeMapping - order?: integer - settings?: IndicesIndexSettings - version?: VersionNumber - } -} - -export type IndicesPutTemplateResponse = AcknowledgedResponseBase - -export interface IndicesRecoveryFileDetails { - length: long - name: string - recovered: long -} - -export interface IndicesRecoveryRecoveryBytes { - percent: Percentage - recovered?: ByteSize - recovered_in_bytes: ByteSize - recovered_from_snapshot?: ByteSize - recovered_from_snapshot_in_bytes?: ByteSize - reused?: ByteSize - reused_in_bytes: ByteSize - total?: ByteSize - total_in_bytes: ByteSize -} - -export interface IndicesRecoveryRecoveryFiles { - details?: IndicesRecoveryFileDetails[] - percent: Percentage - recovered: long - reused: long - total: long -} - -export interface IndicesRecoveryRecoveryIndexStatus { - bytes?: IndicesRecoveryRecoveryBytes - files: IndicesRecoveryRecoveryFiles - size: IndicesRecoveryRecoveryBytes - source_throttle_time?: Duration - source_throttle_time_in_millis: DurationValue - target_throttle_time?: Duration - target_throttle_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRecoveryRecoveryOrigin { - hostname?: string - host?: Host - transport_address?: TransportAddress - id?: Id - ip?: Ip - name?: Name - bootstrap_new_history_uuid?: boolean - repository?: Name - snapshot?: Name - version?: VersionString - restoreUUID?: Uuid - index?: IndexName -} - -export interface IndicesRecoveryRecoveryStartStatus { - check_index_time?: Duration - check_index_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRecoveryRecoveryStatus { - shards: IndicesRecoveryShardRecovery[] -} - -export interface IndicesRecoveryRequest extends RequestBase { - index?: Indices - active_only?: boolean - detailed?: boolean -} - -export type IndicesRecoveryResponse = Record - -export interface IndicesRecoveryShardRecovery { - id: long - index: IndicesRecoveryRecoveryIndexStatus - primary: boolean - source: IndicesRecoveryRecoveryOrigin - stage: string - start?: IndicesRecoveryRecoveryStartStatus - start_time?: DateTime - start_time_in_millis: EpochTime - stop_time?: DateTime - stop_time_in_millis?: EpochTime - target: IndicesRecoveryRecoveryOrigin - total_time?: Duration - total_time_in_millis: DurationValue - translog: IndicesRecoveryTranslogStatus - type: string - verify_index: IndicesRecoveryVerifyIndex -} - -export interface IndicesRecoveryTranslogStatus { - percent: Percentage - recovered: long - total: long - total_on_start: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRecoveryVerifyIndex { - check_index_time?: Duration - check_index_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRefreshRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export type IndicesRefreshResponse = ShardsOperationResponseBase - -export interface IndicesReloadSearchAnalyzersReloadDetails { - index: string - reloaded_analyzers: string[] - reloaded_node_ids: string[] -} - -export interface IndicesReloadSearchAnalyzersReloadResult { - reload_details: IndicesReloadSearchAnalyzersReloadDetails[] - _shards: ShardStatistics -} - -export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult - -export interface IndicesResolveClusterRequest extends RequestBase { - name: Names - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean -} - -export interface IndicesResolveClusterResolveClusterInfo { - connected: boolean - skip_unavailable: boolean - matching_indices?: boolean - error?: string - version?: ElasticsearchVersionMinInfo -} - -export type IndicesResolveClusterResponse = Record - -export interface IndicesResolveIndexRequest extends RequestBase { - name: Names - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - allow_no_indices?: boolean -} - -export interface IndicesResolveIndexResolveIndexAliasItem { - name: Name - indices: Indices -} - -export interface IndicesResolveIndexResolveIndexDataStreamsItem { - name: DataStreamName - timestamp_field: Field - backing_indices: Indices -} - -export interface IndicesResolveIndexResolveIndexItem { - name: Name - aliases?: string[] - attributes: string[] - data_stream?: DataStreamName -} - -export interface IndicesResolveIndexResponse { - indices: IndicesResolveIndexResolveIndexItem[] - aliases: IndicesResolveIndexResolveIndexAliasItem[] - data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] -} - -export interface IndicesRolloverRequest extends RequestBase { - alias: IndexAlias - new_index?: IndexName - dry_run?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - conditions?: IndicesRolloverRolloverConditions - mappings?: MappingTypeMapping - settings?: Record - } -} - -export interface IndicesRolloverResponse { - acknowledged: boolean - conditions: Record - dry_run: boolean - new_index: string - old_index: string - rolled_over: boolean - shards_acknowledged: boolean -} - -export interface IndicesRolloverRolloverConditions { - min_age?: Duration - max_age?: Duration - max_age_millis?: DurationValue - min_docs?: long - max_docs?: long - max_size?: ByteSize - max_size_bytes?: long - min_size?: ByteSize - min_size_bytes?: long - max_primary_shard_size?: ByteSize - max_primary_shard_size_bytes?: long - min_primary_shard_size?: ByteSize - min_primary_shard_size_bytes?: long - max_primary_shard_docs?: long - min_primary_shard_docs?: long -} - -export interface IndicesSegmentsIndexSegment { - shards: Record -} - -export interface IndicesSegmentsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export interface IndicesSegmentsResponse { - indices: Record - _shards: ShardStatistics -} - -export interface IndicesSegmentsSegment { - attributes: Record - committed: boolean - compound: boolean - deleted_docs: long - generation: integer - search: boolean - size_in_bytes: double - num_docs: long - version: VersionString -} - -export interface IndicesSegmentsShardSegmentRouting { - node: string - primary: boolean - state: string -} - -export interface IndicesSegmentsShardsSegment { - num_committed_segments: integer - routing: IndicesSegmentsShardSegmentRouting - num_search_segments: integer - segments: Record -} - -export interface IndicesShardStoresIndicesShardStores { - shards: Record -} - -export interface IndicesShardStoresRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] -} - -export interface IndicesShardStoresResponse { - indices: Record -} - -export interface IndicesShardStoresShardStoreKeys { - allocation: IndicesShardStoresShardStoreAllocation - allocation_id?: Id - store_exception?: IndicesShardStoresShardStoreException -} -export type IndicesShardStoresShardStore = IndicesShardStoresShardStoreKeys -& { [property: string]: IndicesShardStoresShardStoreNode | IndicesShardStoresShardStoreAllocation | Id | IndicesShardStoresShardStoreException } - -export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused' - -export interface IndicesShardStoresShardStoreException { - reason: string - type: string -} - -export interface IndicesShardStoresShardStoreNode { - attributes: Record - ephemeral_id?: string - external_id?: string - name: Name - roles: string[] - transport_address: TransportAddress -} - -export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all' - -export interface IndicesShardStoresShardStoreWrapper { - stores: IndicesShardStoresShardStore[] -} - -export interface IndicesShrinkRequest extends RequestBase { - index: IndexName - target: IndexName - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - settings?: Record - } -} - -export interface IndicesShrinkResponse { - acknowledged: boolean - shards_acknowledged: boolean - index: IndexName -} - -export interface IndicesSimulateIndexTemplateRequest extends RequestBase { - name: Name - master_timeout?: Duration - include_defaults?: boolean -} - -export interface IndicesSimulateIndexTemplateResponse { - overlapping?: IndicesSimulateTemplateOverlapping[] - template: IndicesSimulateTemplateTemplate -} - -export interface IndicesSimulateTemplateOverlapping { - name: Name - index_patterns: string[] -} - -export interface IndicesSimulateTemplateRequest extends RequestBase { - name?: Name - create?: boolean - master_timeout?: Duration - include_defaults?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_auto_create?: boolean - index_patterns?: Indices - composed_of?: Name[] - template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStreamVisibility - priority?: long - version?: VersionNumber - _meta?: Metadata - ignore_missing_component_templates?: string[] - deprecated?: boolean - } -} - -export interface IndicesSimulateTemplateResponse { - overlapping?: IndicesSimulateTemplateOverlapping[] - template: IndicesSimulateTemplateTemplate -} - -export interface IndicesSimulateTemplateTemplate { - aliases: Record - mappings: MappingTypeMapping - settings: IndicesIndexSettings -} - -export interface IndicesSplitRequest extends RequestBase { - index: IndexName - target: IndexName - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - settings?: Record - } -} - -export interface IndicesSplitResponse { - acknowledged: boolean - shards_acknowledged: boolean - index: IndexName -} - -export type IndicesStatsIndexMetadataState = 'open' | 'close' - -export interface IndicesStatsIndexStats { - completion?: CompletionStats - docs?: DocStats - fielddata?: FielddataStats - flush?: FlushStats - get?: GetStats - indexing?: IndexingStats - indices?: IndicesStatsIndicesStats - merges?: MergesStats - query_cache?: QueryCacheStats - recovery?: RecoveryStats - refresh?: RefreshStats - request_cache?: RequestCacheStats - search?: SearchStats - segments?: SegmentsStats - store?: StoreStats - translog?: TranslogStats - warmer?: WarmerStats - bulk?: BulkStats - shard_stats?: IndicesStatsShardsTotalStats -} - -export interface IndicesStatsIndicesStats { - primaries?: IndicesStatsIndexStats - shards?: Record - total?: IndicesStatsIndexStats - uuid?: Uuid - health?: HealthStatus - status?: IndicesStatsIndexMetadataState -} - -export interface IndicesStatsMappingStats { - total_count: long - total_estimated_overhead?: ByteSize - total_estimated_overhead_in_bytes: long -} - -export interface IndicesStatsRequest extends RequestBase { - metric?: Metrics - index?: Indices - completion_fields?: Fields - expand_wildcards?: ExpandWildcards - fielddata_fields?: Fields - fields?: Fields - forbid_closed_indices?: boolean - groups?: string | string[] - include_segment_file_sizes?: boolean - include_unloaded_segments?: boolean - level?: Level -} - -export interface IndicesStatsResponse { - indices?: Record - _shards: ShardStatistics - _all: IndicesStatsIndicesStats -} - -export interface IndicesStatsShardCommit { - generation: integer - id: Id - num_docs: long - user_data: Record -} - -export interface IndicesStatsShardFileSizeInfo { - description: string - size_in_bytes: long - min_size_in_bytes?: long - max_size_in_bytes?: long - average_size_in_bytes?: long - count?: long -} - -export interface IndicesStatsShardLease { - id: Id - retaining_seq_no: SequenceNumber - timestamp: long - source: string -} - -export interface IndicesStatsShardPath { - data_path: string - is_custom_data_path: boolean - state_path: string -} - -export interface IndicesStatsShardQueryCache { - cache_count: long - cache_size: long - evictions: long - hit_count: long - memory_size_in_bytes: long - miss_count: long - total_count: long -} - -export interface IndicesStatsShardRetentionLeases { - primary_term: long - version: VersionNumber - leases: IndicesStatsShardLease[] -} - -export interface IndicesStatsShardRouting { - node: string - primary: boolean - relocating_node?: string | null - state: IndicesStatsShardRoutingState -} - -export type IndicesStatsShardRoutingState = 'UNASSIGNED' | 'INITIALIZING' | 'STARTED' | 'RELOCATING' - -export interface IndicesStatsShardSequenceNumber { - global_checkpoint: long - local_checkpoint: long - max_seq_no: SequenceNumber -} - -export interface IndicesStatsShardStats { - commit?: IndicesStatsShardCommit - completion?: CompletionStats - docs?: DocStats - fielddata?: FielddataStats - flush?: FlushStats - get?: GetStats - indexing?: IndexingStats - mappings?: IndicesStatsMappingStats - merges?: MergesStats - shard_path?: IndicesStatsShardPath - query_cache?: IndicesStatsShardQueryCache - recovery?: RecoveryStats - refresh?: RefreshStats - request_cache?: RequestCacheStats - retention_leases?: IndicesStatsShardRetentionLeases - routing?: IndicesStatsShardRouting - search?: SearchStats - segments?: SegmentsStats - seq_no?: IndicesStatsShardSequenceNumber - store?: StoreStats - translog?: TranslogStats - warmer?: WarmerStats - bulk?: BulkStats - shards?: Record - shard_stats?: IndicesStatsShardsTotalStats - indices?: IndicesStatsIndicesStats -} - -export interface IndicesStatsShardsTotalStats { - total_count: long -} - -export interface IndicesUnfreezeRequest extends RequestBase { - index: IndexName - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: string -} - -export interface IndicesUnfreezeResponse { - acknowledged: boolean - shards_acknowledged: boolean -} - -export interface IndicesUpdateAliasesAction { - add?: IndicesUpdateAliasesAddAction - remove?: IndicesUpdateAliasesRemoveAction - remove_index?: IndicesUpdateAliasesRemoveIndexAction -} - -export interface IndicesUpdateAliasesAddAction { - alias?: IndexAlias - aliases?: IndexAlias | IndexAlias[] - filter?: QueryDslQueryContainer - index?: IndexName - indices?: Indices - index_routing?: Routing - is_hidden?: boolean - is_write_index?: boolean - routing?: Routing - search_routing?: Routing - must_exist?: boolean -} - -export interface IndicesUpdateAliasesRemoveAction { - alias?: IndexAlias - aliases?: IndexAlias | IndexAlias[] - index?: IndexName - indices?: Indices - must_exist?: boolean -} - -export interface IndicesUpdateAliasesRemoveIndexAction { - index?: IndexName - indices?: Indices - must_exist?: boolean -} - -export interface IndicesUpdateAliasesRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - actions?: IndicesUpdateAliasesAction[] - } -} - -export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase - -export interface IndicesValidateQueryIndicesValidationExplanation { - error?: string - explanation?: string - index: IndexName - valid: boolean -} - -export interface IndicesValidateQueryRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - all_shards?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - explain?: boolean - ignore_unavailable?: boolean - lenient?: boolean - rewrite?: boolean - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: QueryDslQueryContainer - } -} - -export interface IndicesValidateQueryResponse { - explanations?: IndicesValidateQueryIndicesValidationExplanation[] - _shards?: ShardStatistics - valid: boolean - error?: string -} - -export interface InferenceCompletionResult { - result: string -} - -export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { - pipelines: string[] -} - -export type InferenceDenseByteVector = byte[] - -export type InferenceDenseVector = float[] - -export interface InferenceInferenceEndpoint { - service: string - service_settings: InferenceServiceSettings - task_settings?: InferenceTaskSettings -} - -export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskType -} - -export interface InferenceInferenceResult { - text_embedding_bytes?: InferenceTextEmbeddingByteResult[] - text_embedding?: InferenceTextEmbeddingResult[] - sparse_embedding?: InferenceSparseEmbeddingResult[] - completion?: InferenceCompletionResult[] - rerank?: InferenceRankedDocument[] -} - -export interface InferenceRankedDocument { - index: integer - score: float - text?: string -} - -export type InferenceServiceSettings = any - -export interface InferenceSparseEmbeddingResult { - embedding: InferenceSparseVector -} - -export type InferenceSparseVector = Record - -export type InferenceTaskSettings = any - -export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' - -export interface InferenceTextEmbeddingByteResult { - embedding: InferenceDenseByteVector -} - -export interface InferenceTextEmbeddingResult { - embedding: InferenceDenseVector -} - -export interface InferenceDeleteRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id: Id - dry_run?: boolean - force?: boolean -} - -export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult - -export interface InferenceGetRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id?: Id -} - -export interface InferenceGetResponse { - endpoints: InferenceInferenceEndpointInfo[] -} - -export interface InferenceInferenceRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: string - input: string | string[] - task_settings?: InferenceTaskSettings - } -} - -export type InferenceInferenceResponse = InferenceInferenceResult - -export interface InferencePutRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id: Id - /** @deprecated The use of the 'body' key has been deprecated, use 'inference_config' instead. */ - body?: InferenceInferenceEndpoint -} - -export type InferencePutResponse = InferenceInferenceEndpointInfo - -export interface IngestAppendProcessor extends IngestProcessorBase { - field: Field - value: any | any[] - allow_duplicates?: boolean -} - -export interface IngestAttachmentProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - indexed_chars?: long - indexed_chars_field?: Field - properties?: string[] - target_field?: Field - remove_binary?: boolean - resource_name?: string -} - -export interface IngestBytesProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestCircleProcessor extends IngestProcessorBase { - error_distance: double - field: Field - ignore_missing?: boolean - shape_type: IngestShapeType - target_field?: Field -} - -export interface IngestCommunityIDProcessor extends IngestProcessorBase { - source_ip?: Field - source_port?: Field - destination_ip?: Field - destination_port?: Field - iana_number?: Field - icmp_type?: Field - icmp_code?: Field - transport?: Field - target_field?: Field - seed?: integer - ignore_missing?: boolean -} - -export interface IngestConvertProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field - type: IngestConvertType -} - -export type IngestConvertType = 'integer' | 'long' | 'double' | 'float' | 'boolean' | 'ip' | 'string' | 'auto' - -export interface IngestCsvProcessor extends IngestProcessorBase { - empty_value?: any - field: Field - ignore_missing?: boolean - quote?: string - separator?: string - target_fields: Fields - trim?: boolean -} - -export interface IngestDatabaseConfiguration { - name: Name - maxmind: IngestMaxmind -} - -export interface IngestDateIndexNameProcessor extends IngestProcessorBase { - date_formats: string[] - date_rounding: string - field: Field - index_name_format?: string - index_name_prefix?: string - locale?: string - timezone?: string -} - -export interface IngestDateProcessor extends IngestProcessorBase { - field: Field - formats: string[] - locale?: string - target_field?: Field - timezone?: string - output_format?: string -} - -export interface IngestDissectProcessor extends IngestProcessorBase { - append_separator?: string - field: Field - ignore_missing?: boolean - pattern: string -} - -export interface IngestDotExpanderProcessor extends IngestProcessorBase { - field: Field - override?: boolean - path?: string -} - -export interface IngestDropProcessor extends IngestProcessorBase { -} - -export interface IngestEnrichProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - max_matches?: integer - override?: boolean - policy_name: string - shape_relation?: GeoShapeRelation - target_field: Field -} - -export interface IngestFailProcessor extends IngestProcessorBase { - message: string -} - -export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' - -export interface IngestFingerprintProcessor extends IngestProcessorBase { - fields: Fields - target_field?: Field - salt?: string - method?: IngestFingerprintDigest - ignore_missing?: boolean -} - -export interface IngestForeachProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - processor: IngestProcessorContainer -} - -export interface IngestGeoGridProcessor extends IngestProcessorBase { - field: string - tile_type: IngestGeoGridTileType - target_field?: Field - parent_field?: Field - children_field?: Field - non_children_field?: Field - precision_field?: Field - ignore_missing?: boolean - target_format?: IngestGeoGridTargetFormat -} - -export type IngestGeoGridTargetFormat = 'geojson' | 'wkt' - -export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash' - -export interface IngestGeoIpProcessor extends IngestProcessorBase { - database_file?: string - field: Field - first_only?: boolean - ignore_missing?: boolean - properties?: string[] - target_field?: Field - download_database_on_pipeline_creation?: boolean -} - -export interface IngestGrokProcessor extends IngestProcessorBase { - ecs_compatibility?: string - field: Field - ignore_missing?: boolean - pattern_definitions?: Record - patterns: GrokPattern[] - trace_match?: boolean -} - -export interface IngestGsubProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - pattern: string - replacement: string - target_field?: Field -} - -export interface IngestHtmlStripProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestInferenceConfig { - regression?: IngestInferenceConfigRegression - classification?: IngestInferenceConfigClassification -} - -export interface IngestInferenceConfigClassification { - num_top_classes?: integer - num_top_feature_importance_values?: integer - results_field?: Field - top_classes_results_field?: Field - prediction_field_type?: string -} - -export interface IngestInferenceConfigRegression { - results_field?: Field - num_top_feature_importance_values?: integer -} - -export interface IngestInferenceProcessor extends IngestProcessorBase { - model_id: Id - target_field?: Field - field_map?: Record - inference_config?: IngestInferenceConfig -} - -export interface IngestIpLocationProcessor extends IngestProcessorBase { - database_file?: string - field: Field - first_only?: boolean - ignore_missing?: boolean - properties?: string[] - target_field?: Field - download_database_on_pipeline_creation?: boolean -} - -export interface IngestJoinProcessor extends IngestProcessorBase { - field: Field - separator: string - target_field?: Field -} - -export interface IngestJsonProcessor extends IngestProcessorBase { - add_to_root?: boolean - add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy - allow_duplicate_keys?: boolean - field: Field - target_field?: Field -} - -export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' - -export interface IngestKeyValueProcessor extends IngestProcessorBase { - exclude_keys?: string[] - field: Field - field_split: string - ignore_missing?: boolean - include_keys?: string[] - prefix?: string - strip_brackets?: boolean - target_field?: Field - trim_key?: string - trim_value?: string - value_split: string -} - -export interface IngestLowercaseProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestMaxmind { - account_id: Id -} - -export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { - source_ip?: Field - destination_ip?: Field - target_field?: Field - internal_networks?: string[] - internal_networks_field?: Field - ignore_missing?: boolean -} - -export interface IngestPipeline { - description?: string - on_failure?: IngestProcessorContainer[] - processors?: IngestProcessorContainer[] - version?: VersionNumber - deprecated?: boolean - _meta?: Metadata -} - -export interface IngestPipelineConfig { - description?: string - version?: VersionNumber - processors: IngestProcessorContainer[] -} - -export interface IngestPipelineProcessor extends IngestProcessorBase { - name: Name - ignore_missing_pipeline?: boolean -} - -export interface IngestProcessorBase { - description?: string - if?: string - ignore_failure?: boolean - on_failure?: IngestProcessorContainer[] - tag?: string -} - -export interface IngestProcessorContainer { - append?: IngestAppendProcessor - attachment?: IngestAttachmentProcessor - bytes?: IngestBytesProcessor - circle?: IngestCircleProcessor - community_id?: IngestCommunityIDProcessor - convert?: IngestConvertProcessor - csv?: IngestCsvProcessor - date?: IngestDateProcessor - date_index_name?: IngestDateIndexNameProcessor - dissect?: IngestDissectProcessor - dot_expander?: IngestDotExpanderProcessor - drop?: IngestDropProcessor - enrich?: IngestEnrichProcessor - fail?: IngestFailProcessor - fingerprint?: IngestFingerprintProcessor - foreach?: IngestForeachProcessor - ip_location?: IngestIpLocationProcessor - geo_grid?: IngestGeoGridProcessor - geoip?: IngestGeoIpProcessor - grok?: IngestGrokProcessor - gsub?: IngestGsubProcessor - html_strip?: IngestHtmlStripProcessor - inference?: IngestInferenceProcessor - join?: IngestJoinProcessor - json?: IngestJsonProcessor - kv?: IngestKeyValueProcessor - lowercase?: IngestLowercaseProcessor - network_direction?: IngestNetworkDirectionProcessor - pipeline?: IngestPipelineProcessor - redact?: IngestRedactProcessor - registered_domain?: IngestRegisteredDomainProcessor - remove?: IngestRemoveProcessor - rename?: IngestRenameProcessor - reroute?: IngestRerouteProcessor - script?: IngestScriptProcessor - set?: IngestSetProcessor - set_security_user?: IngestSetSecurityUserProcessor - sort?: IngestSortProcessor - split?: IngestSplitProcessor - terminate?: IngestTerminateProcessor - trim?: IngestTrimProcessor - uppercase?: IngestUppercaseProcessor - urldecode?: IngestUrlDecodeProcessor - uri_parts?: IngestUriPartsProcessor - user_agent?: IngestUserAgentProcessor -} - -export interface IngestRedactProcessor extends IngestProcessorBase { - field: Field - patterns: GrokPattern[] - pattern_definitions?: Record - prefix?: string - suffix?: string - ignore_missing?: boolean - skip_if_unlicensed?: boolean - trace_redact?: boolean -} - -export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { - field: Field - target_field?: Field - ignore_missing?: boolean -} - -export interface IngestRemoveProcessor extends IngestProcessorBase { - field: Fields - keep?: Fields - ignore_missing?: boolean -} - -export interface IngestRenameProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field: Field -} - -export interface IngestRerouteProcessor extends IngestProcessorBase { - destination?: string - dataset?: string | string[] - namespace?: string | string[] -} - -export interface IngestScriptProcessor extends IngestProcessorBase { - id?: Id - lang?: string - params?: Record - source?: string -} - -export interface IngestSetProcessor extends IngestProcessorBase { - copy_from?: Field - field: Field - ignore_empty_value?: boolean - media_type?: string - override?: boolean - value?: any -} - -export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { - field: Field - properties?: string[] -} - -export type IngestShapeType = 'geo_shape' | 'shape' - -export interface IngestSortProcessor extends IngestProcessorBase { - field: Field - order?: SortOrder - target_field?: Field -} - -export interface IngestSplitProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - preserve_trailing?: boolean - separator: string - target_field?: Field -} - -export interface IngestTerminateProcessor extends IngestProcessorBase { -} - -export interface IngestTrimProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestUppercaseProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestUriPartsProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - keep_original?: boolean - remove_if_successful?: boolean - target_field?: Field -} - -export interface IngestUrlDecodeProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestUserAgentProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - regex_file?: string - target_field?: Field - properties?: IngestUserAgentProperty[] - extract_device_type?: boolean -} - -export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' - -export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { - id: Ids - master_timeout?: Duration - timeout?: Duration -} - -export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase - -export interface IngestDeletePipelineRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration -} - -export type IngestDeletePipelineResponse = AcknowledgedResponseBase - -export interface IngestGeoIpStatsGeoIpDownloadStatistics { - successful_downloads: integer - failed_downloads: integer - total_download_time: DurationValue - databases_count: integer - skipped_updates: integer - expired_databases: integer -} - -export interface IngestGeoIpStatsGeoIpNodeDatabaseName { - name: Name -} - -export interface IngestGeoIpStatsGeoIpNodeDatabases { - databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] - files_in_temp: string[] -} - -export interface IngestGeoIpStatsRequest extends RequestBase { -} - -export interface IngestGeoIpStatsResponse { - stats: IngestGeoIpStatsGeoIpDownloadStatistics - nodes: Record -} - -export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { - id: Id - version: long - modified_date_millis: EpochTime - database: IngestDatabaseConfiguration -} - -export interface IngestGetGeoipDatabaseRequest extends RequestBase { - id?: Ids - master_timeout?: Duration -} - -export interface IngestGetGeoipDatabaseResponse { - databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] -} - -export interface IngestGetPipelineRequest extends RequestBase { - id?: Id - master_timeout?: Duration - summary?: boolean -} - -export type IngestGetPipelineResponse = Record - -export interface IngestProcessorGrokRequest extends RequestBase { -} - -export interface IngestProcessorGrokResponse { - patterns: Record -} - -export interface IngestPutGeoipDatabaseRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - name: Name - maxmind: IngestMaxmind - } -} - -export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase - -export interface IngestPutPipelineRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration - if_version?: VersionNumber - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - _meta?: Metadata - description?: string - on_failure?: IngestProcessorContainer[] - processors?: IngestProcessorContainer[] - version?: VersionNumber - deprecated?: boolean - } -} - -export type IngestPutPipelineResponse = AcknowledgedResponseBase - -export interface IngestSimulateDocument { - _id?: Id - _index?: IndexName - _source: any -} - -export interface IngestSimulateDocumentSimulationKeys { - _id: Id - _index: IndexName - _ingest: IngestSimulateIngest - _routing?: string - _source: Record - _version?: SpecUtilsStringified - _version_type?: VersionType -} -export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationKeys -& { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } - -export interface IngestSimulateIngest { - _redact?: IngestSimulateRedact - timestamp: DateTime - pipeline?: Name -} - -export interface IngestSimulatePipelineSimulation { - doc?: IngestSimulateDocumentSimulation - tag?: string - processor_type?: string - status?: WatcherActionStatusOptions - description?: string - ignored_error?: ErrorCause - error?: ErrorCause -} - -export interface IngestSimulateRedact { - _is_redacted: boolean -} - -export interface IngestSimulateRequest extends RequestBase { - id?: Id - verbose?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs: IngestSimulateDocument[] - pipeline?: IngestPipeline - } -} - -export interface IngestSimulateResponse { - docs: IngestSimulateSimulateDocumentResult[] -} - -export interface IngestSimulateSimulateDocumentResult { - doc?: IngestSimulateDocumentSimulation - error?: ErrorCause - processor_results?: IngestSimulatePipelineSimulation[] -} - -export interface LicenseLicense { - expiry_date_in_millis: EpochTime - issue_date_in_millis: EpochTime - start_date_in_millis?: EpochTime - issued_to: string - issuer: string - max_nodes?: long | null - max_resource_units?: long - signature: string - type: LicenseLicenseType - uid: string -} - -export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' - -export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' - -export interface LicenseDeleteRequest extends RequestBase { -} - -export type LicenseDeleteResponse = AcknowledgedResponseBase - -export interface LicenseGetLicenseInformation { - expiry_date?: DateTime - expiry_date_in_millis?: EpochTime - issue_date: DateTime - issue_date_in_millis: EpochTime - issued_to: string - issuer: string - max_nodes: long | null - max_resource_units?: integer | null - status: LicenseLicenseStatus - type: LicenseLicenseType - uid: Uuid - start_date_in_millis: EpochTime -} - -export interface LicenseGetRequest extends RequestBase { - accept_enterprise?: boolean - local?: boolean -} - -export interface LicenseGetResponse { - license: LicenseGetLicenseInformation -} - -export interface LicenseGetBasicStatusRequest extends RequestBase { -} - -export interface LicenseGetBasicStatusResponse { - eligible_to_start_basic: boolean -} - -export interface LicenseGetTrialStatusRequest extends RequestBase { -} - -export interface LicenseGetTrialStatusResponse { - eligible_to_start_trial: boolean -} - -export interface LicensePostAcknowledgement { - license: string[] - message: string -} - -export interface LicensePostRequest extends RequestBase { - acknowledge?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - license?: LicenseLicense - licenses?: LicenseLicense[] - } -} - -export interface LicensePostResponse { - acknowledge?: LicensePostAcknowledgement - acknowledged: boolean - license_status: LicenseLicenseStatus -} - -export interface LicensePostStartBasicRequest extends RequestBase { - acknowledge?: boolean -} - -export interface LicensePostStartBasicResponse { - acknowledged: boolean - basic_was_started: boolean - error_message?: string - type?: LicenseLicenseType - acknowledge?: Record -} - -export interface LicensePostStartTrialRequest extends RequestBase { - acknowledge?: boolean - type_query_string?: string -} - -export interface LicensePostStartTrialResponse { - acknowledged: boolean - error_message?: string - trial_was_started: boolean - type?: LicenseLicenseType -} - -export interface LogstashPipeline { - description: string - last_modified: DateTime - pipeline_metadata: LogstashPipelineMetadata - username: string - pipeline: string - pipeline_settings: LogstashPipelineSettings -} - -export interface LogstashPipelineMetadata { - type: string - version: string -} - -export interface LogstashPipelineSettings { - 'pipeline.workers': integer - 'pipeline.batch.size': integer - 'pipeline.batch.delay': integer - 'queue.type': string - 'queue.max_bytes.number': integer - 'queue.max_bytes.units': string - 'queue.checkpoint.writes': integer -} - -export interface LogstashDeletePipelineRequest extends RequestBase { - id: Id -} - -export type LogstashDeletePipelineResponse = boolean - -export interface LogstashGetPipelineRequest extends RequestBase { - id?: Ids -} - -export type LogstashGetPipelineResponse = Record - -export interface LogstashPutPipelineRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, use 'pipeline' instead. */ - body?: LogstashPipeline -} - -export type LogstashPutPipelineResponse = boolean - -export interface MigrationDeprecationsDeprecation { - details: string - level: MigrationDeprecationsDeprecationLevel - message: string - url: string -} - -export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' - -export interface MigrationDeprecationsRequest extends RequestBase { - index?: IndexName -} - -export interface MigrationDeprecationsResponse { - cluster_settings: MigrationDeprecationsDeprecation[] - index_settings: Record - data_streams: Record - node_settings: MigrationDeprecationsDeprecation[] - ml_settings: MigrationDeprecationsDeprecation[] -} - -export interface MigrationGetFeatureUpgradeStatusMigrationFeature { - feature_name: string - minimum_index_version: VersionString - migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus - indices: MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo[] -} - -export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { - index: IndexName - version: VersionString - failure_cause?: ErrorCause -} - -export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR' - -export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { -} - -export interface MigrationGetFeatureUpgradeStatusResponse { - features: MigrationGetFeatureUpgradeStatusMigrationFeature[] - migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus -} - -export interface MigrationPostFeatureUpgradeMigrationFeature { - feature_name: string -} - -export interface MigrationPostFeatureUpgradeRequest extends RequestBase { -} - -export interface MigrationPostFeatureUpgradeResponse { - accepted: boolean - features: MigrationPostFeatureUpgradeMigrationFeature[] -} - -export interface MlAnalysisConfig { - bucket_span?: Duration - categorization_analyzer?: MlCategorizationAnalyzer - categorization_field_name?: Field - categorization_filters?: string[] - detectors: MlDetector[] - influencers?: Field[] - latency?: Duration - model_prune_window?: Duration - multivariate_by_fields?: boolean - per_partition_categorization?: MlPerPartitionCategorization - summary_count_field_name?: Field -} - -export interface MlAnalysisConfigRead { - bucket_span: Duration - categorization_analyzer?: MlCategorizationAnalyzer - categorization_field_name?: Field - categorization_filters?: string[] - detectors: MlDetectorRead[] - influencers: Field[] - model_prune_window?: Duration - latency?: Duration - multivariate_by_fields?: boolean - per_partition_categorization?: MlPerPartitionCategorization - summary_count_field_name?: Field -} - -export interface MlAnalysisLimits { - categorization_examples_limit?: long - model_memory_limit?: string -} - -export interface MlAnalysisMemoryLimit { - model_memory_limit: string -} - -export interface MlAnomaly { - actual?: double[] - anomaly_score_explanation?: MlAnomalyExplanation - bucket_span: DurationValue - by_field_name?: string - by_field_value?: string - causes?: MlAnomalyCause[] - detector_index: integer - field_name?: string - function?: string - function_description?: string - geo_results?: MlGeoResults - influencers?: MlInfluence[] - initial_record_score: double - is_interim: boolean - job_id: string - over_field_name?: string - over_field_value?: string - partition_field_name?: string - partition_field_value?: string - probability: double - record_score: double - result_type: string - timestamp: EpochTime - typical?: double[] -} - -export interface MlAnomalyCause { - actual: double[] - by_field_name: Name - by_field_value: string - correlated_by_field_value: string - field_name: Field - function: string - function_description: string - influencers: MlInfluence[] - over_field_name: Name - over_field_value: string - partition_field_name: string - partition_field_value: string - probability: double - typical: double[] -} - -export interface MlAnomalyExplanation { - anomaly_characteristics_impact?: integer - anomaly_length?: integer - anomaly_type?: string - high_variance_penalty?: boolean - incomplete_bucket_penalty?: boolean - lower_confidence_bound?: double - multi_bucket_impact?: integer - single_bucket_impact?: integer - typical_value?: double - upper_confidence_bound?: double -} - -export interface MlApiKeyAuthorization { - id: string - name: string -} - -export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' - -export interface MlBucketInfluencer { - anomaly_score: double - bucket_span: DurationValue - influencer_field_name: Field - initial_anomaly_score: double - is_interim: boolean - job_id: Id - probability: double - raw_anomaly_score: double - result_type: string - timestamp: EpochTime - timestamp_string?: DateTime -} - -export interface MlBucketSummary { - anomaly_score: double - bucket_influencers: MlBucketInfluencer[] - bucket_span: DurationValue - event_count: long - initial_anomaly_score: double - is_interim: boolean - job_id: Id - processing_time_ms: DurationValue - result_type: string - timestamp: EpochTime - timestamp_string?: DateTime -} - -export interface MlCalendarEvent { - calendar_id?: Id - event_id?: Id - description: string - end_time: DateTime - start_time: DateTime - skip_result?: boolean - skip_model_update?: boolean - force_time_shift?: integer -} - -export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition - -export interface MlCategorizationAnalyzerDefinition { - char_filter?: AnalysisCharFilter[] - filter?: AnalysisTokenFilter[] - tokenizer?: AnalysisTokenizer -} - -export type MlCategorizationStatus = 'ok' | 'warn' - -export interface MlCategory { - category_id: ulong - examples: string[] - grok_pattern?: GrokPattern - job_id: Id - max_matching_length: ulong - partition_field_name?: string - partition_field_value?: string - regex: string - terms: string - num_matches?: long - preferred_to_categories?: Id[] - p?: string - result_type: string - mlcategory: string -} - -export interface MlChunkingConfig { - mode: MlChunkingMode - time_span?: Duration -} - -export type MlChunkingMode = 'auto' | 'manual' | 'off' - -export interface MlClassificationInferenceOptions { - num_top_classes?: integer - num_top_feature_importance_values?: integer - prediction_field_type?: string - results_field?: string - top_classes_results_field?: string -} - -export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' - -export type MlCustomSettings = any - -export interface MlDataCounts { - bucket_count: long - earliest_record_timestamp?: long - empty_bucket_count: long - input_bytes: long - input_field_count: long - input_record_count: long - invalid_date_count: long - job_id: Id - last_data_time?: long - latest_empty_bucket_timestamp?: long - latest_record_timestamp?: long - latest_sparse_bucket_timestamp?: long - latest_bucket_timestamp?: long - log_time?: long - missing_field_count: long - out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long - sparse_bucket_count: long -} - -export interface MlDataDescription { - format?: string - time_field?: Field - time_format?: string - field_delimiter?: string -} - -export interface MlDatafeed { - aggregations?: Record - aggs?: Record - authorization?: MlDatafeedAuthorization - chunking_config?: MlChunkingConfig - datafeed_id: Id - frequency?: Duration - indices: string[] - indexes?: string[] - job_id: Id - max_empty_searches?: integer - query: QueryDslQueryContainer - query_delay?: Duration - script_fields?: Record - scroll_size?: integer - delayed_data_check_config: MlDelayedDataCheckConfig - runtime_mappings?: MappingRuntimeFields - indices_options?: IndicesOptions -} - -export interface MlDatafeedAuthorization { - api_key?: MlApiKeyAuthorization - roles?: string[] - service_account?: string -} - -export interface MlDatafeedConfig { - aggregations?: Record - aggs?: Record - chunking_config?: MlChunkingConfig - datafeed_id?: Id - delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Duration - indices?: Indices - indexes?: Indices - indices_options?: IndicesOptions - job_id?: Id - max_empty_searches?: integer - query?: QueryDslQueryContainer - query_delay?: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size?: integer -} - -export interface MlDatafeedRunningState { - real_time_configured: boolean - real_time_running: boolean - search_interval?: MlRunningStateSearchInterval -} - -export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' - -export interface MlDatafeedStats { - assignment_explanation?: string - datafeed_id: Id - node?: MlDiscoveryNode - state: MlDatafeedState - timing_stats: MlDatafeedTimingStats - running_state?: MlDatafeedRunningState -} - -export interface MlDatafeedTimingStats { - bucket_count: long - exponential_average_search_time_per_hour_ms: DurationValue - job_id: Id - search_count: long - total_search_time_ms: DurationValue - average_search_time_per_bucket_ms?: DurationValue -} - -export interface MlDataframeAnalysis { - alpha?: double - dependent_variable: string - downsample_factor?: double - early_stopping_enabled?: boolean - eta?: double - eta_growth_rate_per_tree?: double - feature_bag_fraction?: double - feature_processors?: MlDataframeAnalysisFeatureProcessor[] - gamma?: double - lambda?: double - max_optimization_rounds_per_hyperparameter?: integer - max_trees?: integer - maximum_number_trees?: integer - num_top_feature_importance_values?: integer - prediction_field_name?: Field - randomize_seed?: double - soft_tree_depth_limit?: integer - soft_tree_depth_tolerance?: double - training_percent?: Percentage -} - -export interface MlDataframeAnalysisAnalyzedFields { - includes: string[] - excludes: string[] -} - -export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { - class_assignment_objective?: string - num_top_classes?: integer -} - -export interface MlDataframeAnalysisContainer { - classification?: MlDataframeAnalysisClassification - outlier_detection?: MlDataframeAnalysisOutlierDetection - regression?: MlDataframeAnalysisRegression -} - -export interface MlDataframeAnalysisFeatureProcessor { - frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding - multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding - n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding - one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding - target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding -} - -export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { - feature_name: Name - field: Field - frequency_map: Record -} - -export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { - processors: integer[] -} - -export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { - feature_prefix?: string - field: Field - length?: integer - n_grams: integer[] - start?: integer - custom?: boolean -} - -export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { - field: Field - hot_map: string -} - -export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { - default_value: integer - feature_name: Name - field: Field - target_map: Record -} - -export interface MlDataframeAnalysisOutlierDetection { - compute_feature_influence?: boolean - feature_influence_threshold?: double - method?: string - n_neighbors?: integer - outlier_fraction?: double - standardization_enabled?: boolean -} - -export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { - loss_function?: string - loss_function_parameter?: double -} - -export interface MlDataframeAnalytics { - analysis_stats?: MlDataframeAnalyticsStatsContainer - assignment_explanation?: string - data_counts: MlDataframeAnalyticsStatsDataCounts - id: Id - memory_usage: MlDataframeAnalyticsStatsMemoryUsage - node?: NodeAttributes - progress: MlDataframeAnalyticsStatsProgress[] - state: MlDataframeState -} - -export interface MlDataframeAnalyticsAuthorization { - api_key?: MlApiKeyAuthorization - roles?: string[] - service_account?: string -} - -export interface MlDataframeAnalyticsDestination { - index: IndexName - results_field?: Field -} - -export interface MlDataframeAnalyticsFieldSelection { - is_included: boolean - is_required: boolean - feature_type?: string - mapping_types: string[] - name: Field - reason?: string -} - -export interface MlDataframeAnalyticsMemoryEstimation { - expected_memory_with_disk: string - expected_memory_without_disk: string -} - -export interface MlDataframeAnalyticsSource { - index: Indices - query?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields - _source?: MlDataframeAnalysisAnalyzedFields | string[] -} - -export interface MlDataframeAnalyticsStatsContainer { - classification_stats?: MlDataframeAnalyticsStatsHyperparameters - outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection - regression_stats?: MlDataframeAnalyticsStatsHyperparameters -} - -export interface MlDataframeAnalyticsStatsDataCounts { - skipped_docs_count: integer - test_docs_count: integer - training_docs_count: integer -} - -export interface MlDataframeAnalyticsStatsHyperparameters { - hyperparameters: MlHyperparameters - iteration: integer - timestamp: EpochTime - timing_stats: MlTimingStats - validation_loss: MlValidationLoss -} - -export interface MlDataframeAnalyticsStatsMemoryUsage { - memory_reestimate_bytes?: long - peak_usage_bytes: long - status: string - timestamp?: EpochTime -} - -export interface MlDataframeAnalyticsStatsOutlierDetection { - parameters: MlOutlierDetectionParameters - timestamp: EpochTime - timing_stats: MlTimingStats -} - -export interface MlDataframeAnalyticsStatsProgress { - phase: string - progress_percent: integer -} - -export interface MlDataframeAnalyticsSummary { - allow_lazy_start?: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - authorization?: MlDataframeAnalyticsAuthorization - create_time?: EpochTime - description?: string - dest: MlDataframeAnalyticsDestination - id: Id - max_num_threads?: integer - model_memory_limit?: string - source: MlDataframeAnalyticsSource - version?: VersionString -} - -export interface MlDataframeEvaluationClassification { - actual_field: Field - predicted_field?: Field - top_classes_field?: Field - metrics?: MlDataframeEvaluationClassificationMetrics -} - -export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { - accuracy?: Record - multiclass_confusion_matrix?: Record -} - -export interface MlDataframeEvaluationClassificationMetricsAucRoc { - class_name?: Name - include_curve?: boolean -} - -export interface MlDataframeEvaluationContainer { - classification?: MlDataframeEvaluationClassification - outlier_detection?: MlDataframeEvaluationOutlierDetection - regression?: MlDataframeEvaluationRegression -} - -export interface MlDataframeEvaluationMetrics { - auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc - precision?: Record - recall?: Record -} - -export interface MlDataframeEvaluationOutlierDetection { - actual_field: Field - predicted_probability_field: Field - metrics?: MlDataframeEvaluationOutlierDetectionMetrics -} - -export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { - confusion_matrix?: Record -} - -export interface MlDataframeEvaluationRegression { - actual_field: Field - predicted_field: Field - metrics?: MlDataframeEvaluationRegressionMetrics -} - -export interface MlDataframeEvaluationRegressionMetrics { - mse?: Record - msle?: MlDataframeEvaluationRegressionMetricsMsle - huber?: MlDataframeEvaluationRegressionMetricsHuber - r_squared?: Record -} - -export interface MlDataframeEvaluationRegressionMetricsHuber { - delta?: double -} - -export interface MlDataframeEvaluationRegressionMetricsMsle { - offset?: double -} - -export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' - -export interface MlDelayedDataCheckConfig { - check_window?: Duration - enabled: boolean -} - -export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' - -export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' - -export interface MlDetectionRule { - actions?: MlRuleAction[] - conditions?: MlRuleCondition[] - scope?: Record -} - -export interface MlDetector { - by_field_name?: Field - custom_rules?: MlDetectionRule[] - detector_description?: string - detector_index?: integer - exclude_frequent?: MlExcludeFrequent - field_name?: Field - function?: string - over_field_name?: Field - partition_field_name?: Field - use_null?: boolean -} - -export interface MlDetectorRead { - by_field_name?: Field - custom_rules?: MlDetectionRule[] - detector_description?: string - detector_index?: integer - exclude_frequent?: MlExcludeFrequent - field_name?: Field - function: string - over_field_name?: Field - partition_field_name?: Field - use_null?: boolean -} - -export interface MlDiscoveryNode { - attributes: Record - ephemeral_id: Id - id: Id - name: Name - transport_address: TransportAddress -} - -export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' - -export interface MlFillMaskInferenceOptions { - mask_token?: string - num_top_classes?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string -} - -export interface MlFillMaskInferenceUpdateOptions { - num_top_classes?: integer - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlFilter { - description?: string - filter_id: Id - items: string[] -} - -export interface MlFilterRef { - filter_id: Id - filter_type?: MlFilterType -} - -export type MlFilterType = 'include' | 'exclude' - -export interface MlGeoResults { - actual_point: string - typical_point: string -} - -export interface MlHyperparameter { - absolute_importance?: double - name: Name - relative_importance?: double - supplied: boolean - value: double -} - -export interface MlHyperparameters { - alpha?: double - lambda?: double - gamma?: double - eta?: double - eta_growth_rate_per_tree?: double - feature_bag_fraction?: double - downsample_factor?: double - max_attempts_to_add_tree?: integer - max_optimization_rounds_per_hyperparameter?: integer - max_trees?: integer - num_folds?: integer - num_splits_per_feature?: integer - soft_tree_depth_limit?: integer - soft_tree_depth_tolerance?: double -} - -export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' - -export interface MlInferenceConfigCreateContainer { - regression?: MlRegressionInferenceOptions - classification?: MlClassificationInferenceOptions - text_classification?: MlTextClassificationInferenceOptions - zero_shot_classification?: MlZeroShotClassificationInferenceOptions - fill_mask?: MlFillMaskInferenceOptions - ner?: MlNerInferenceOptions - pass_through?: MlPassThroughInferenceOptions - text_embedding?: MlTextEmbeddingInferenceOptions - text_expansion?: MlTextExpansionInferenceOptions - question_answering?: MlQuestionAnsweringInferenceOptions -} - -export interface MlInferenceConfigUpdateContainer { - regression?: MlRegressionInferenceOptions - classification?: MlClassificationInferenceOptions - text_classification?: MlTextClassificationInferenceUpdateOptions - zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions - fill_mask?: MlFillMaskInferenceUpdateOptions - ner?: MlNerInferenceUpdateOptions - pass_through?: MlPassThroughInferenceUpdateOptions - text_embedding?: MlTextEmbeddingInferenceUpdateOptions - text_expansion?: MlTextExpansionInferenceUpdateOptions - question_answering?: MlQuestionAnsweringInferenceUpdateOptions -} - -export interface MlInferenceResponseResult { - entities?: MlTrainedModelEntities[] - is_truncated?: boolean - predicted_value?: MlPredictedValue | MlPredictedValue[] - predicted_value_sequence?: string - prediction_probability?: double - prediction_score?: double - top_classes?: MlTopClassEntry[] - warning?: string - feature_importance?: MlTrainedModelInferenceFeatureImportance[] -} - -export interface MlInfluence { - influencer_field_name: string - influencer_field_values: string[] -} - -export interface MlInfluencer { - bucket_span: DurationValue - influencer_score: double - influencer_field_name: Field - influencer_field_value: string - initial_influencer_score: double - is_interim: boolean - job_id: Id - probability: double - result_type: string - timestamp: EpochTime - foo?: string -} - -export interface MlJob { - allow_lazy_open: boolean - analysis_config: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - background_persist_interval?: Duration - blocked?: MlJobBlocked - create_time?: DateTime - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days?: long - data_description: MlDataDescription - datafeed_config?: MlDatafeed - deleting?: boolean - description?: string - finished_time?: DateTime - groups?: string[] - job_id: Id - job_type?: string - job_version?: VersionString - model_plot_config?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days: long - renormalization_window_days?: long - results_index_name: IndexName - results_retention_days?: long -} - -export interface MlJobBlocked { - reason: MlJobBlockedReason - task_id?: TaskId -} - -export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' - -export interface MlJobConfig { - allow_lazy_open?: boolean - analysis_config: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - background_persist_interval?: Duration - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days?: long - data_description: MlDataDescription - datafeed_config?: MlDatafeedConfig - description?: string - groups?: string[] - job_id?: Id - job_type?: string - model_plot_config?: MlModelPlotConfig - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_index_name?: IndexName - results_retention_days?: long -} - -export interface MlJobForecastStatistics { - memory_bytes?: MlJobStatistics - processing_time_ms?: MlJobStatistics - records?: MlJobStatistics - status?: Record - total: long - forecasted_jobs: integer -} - -export type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening' - -export interface MlJobStatistics { - avg: double - max: double - min: double - total: double -} - -export interface MlJobStats { - assignment_explanation?: string - data_counts: MlDataCounts - forecasts_stats: MlJobForecastStatistics - job_id: string - model_size_stats: MlModelSizeStats - node?: MlDiscoveryNode - open_time?: DateTime - state: MlJobState - timing_stats: MlJobTimingStats - deleting?: boolean -} - -export interface MlJobTimingStats { - average_bucket_processing_time_ms?: DurationValue - bucket_count: long - exponential_average_bucket_processing_time_ms?: DurationValue - exponential_average_bucket_processing_time_per_hour_ms: DurationValue - job_id: Id - total_bucket_processing_time_ms: DurationValue - maximum_bucket_processing_time_ms?: DurationValue - minimum_bucket_processing_time_ms?: DurationValue -} - -export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' - -export interface MlModelPlotConfig { - annotations_enabled?: boolean - enabled?: boolean - terms?: Field -} - -export interface MlModelSizeStats { - bucket_allocation_failures_count: long - job_id: Id - log_time: DateTime - memory_status: MlMemoryStatus - model_bytes: ByteSize - model_bytes_exceeded?: ByteSize - model_bytes_memory_limit?: ByteSize - peak_model_bytes?: ByteSize - assignment_memory_basis?: string - result_type: string - total_by_field_count: long - total_over_field_count: long - total_partition_field_count: long - categorization_status: MlCategorizationStatus - categorized_doc_count: integer - dead_category_count: integer - failed_category_count: integer - frequent_category_count: integer - rare_category_count: integer - total_category_count: integer - timestamp?: long -} - -export interface MlModelSnapshot { - description?: string - job_id: Id - latest_record_time_stamp?: integer - latest_result_time_stamp?: integer - min_version: VersionString - model_size_stats?: MlModelSizeStats - retain: boolean - snapshot_doc_count: long - snapshot_id: Id - timestamp: long -} - -export interface MlModelSnapshotUpgrade { - job_id: Id - snapshot_id: Id - state: MlSnapshotUpgradeState - node: MlDiscoveryNode - assignment_explanation: string -} - -export interface MlNerInferenceOptions { - tokenization?: MlTokenizationConfigContainer - results_field?: string - classification_labels?: string[] - vocabulary?: MlVocabulary -} - -export interface MlNerInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlNlpBertTokenizationConfig { - do_lower_case?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer -} - -export interface MlNlpRobertaTokenizationConfig { - add_prefix_space?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer -} - -export interface MlNlpTokenizationUpdateOptions { - truncate?: MlTokenizationTruncate - span?: integer -} - -export interface MlOutlierDetectionParameters { - compute_feature_influence?: boolean - feature_influence_threshold?: double - method?: string - n_neighbors?: integer - outlier_fraction?: double - standardization_enabled?: boolean -} - -export interface MlOverallBucket { - bucket_span: DurationValue - is_interim: boolean - jobs: MlOverallBucketJob[] - overall_score: double - result_type: string - timestamp: EpochTime - timestamp_string: DateTime -} - -export interface MlOverallBucketJob { - job_id: Id - max_anomaly_score: double -} - -export interface MlPage { - from?: integer - size?: integer -} - -export interface MlPassThroughInferenceOptions { - tokenization?: MlTokenizationConfigContainer - results_field?: string - vocabulary?: MlVocabulary -} - -export interface MlPassThroughInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlPerPartitionCategorization { - enabled?: boolean - stop_on_warn?: boolean -} - -export type MlPredictedValue = ScalarValue | ScalarValue[] - -export interface MlQuestionAnsweringInferenceOptions { - num_top_classes?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string - max_answer_length?: integer -} - -export interface MlQuestionAnsweringInferenceUpdateOptions { - question: string - num_top_classes?: integer - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string - max_answer_length?: integer -} - -export interface MlRegressionInferenceOptions { - results_field?: Field - num_top_feature_importance_values?: integer -} - -export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' - -export type MlRuleAction = 'skip_result' | 'skip_model_update' - -export interface MlRuleCondition { - applies_to: MlAppliesTo - operator: MlConditionOperator - value: double -} - -export interface MlRunningStateSearchInterval { - end?: Duration - end_ms: DurationValue - start?: Duration - start_ms: DurationValue -} - -export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' - -export interface MlTextClassificationInferenceOptions { - num_top_classes?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string - classification_labels?: string[] -} - -export interface MlTextClassificationInferenceUpdateOptions { - num_top_classes?: integer - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string - classification_labels?: string[] -} - -export interface MlTextEmbeddingInferenceOptions { - embedding_size?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string -} - -export interface MlTextEmbeddingInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlTextExpansionInferenceOptions { - tokenization?: MlTokenizationConfigContainer - results_field?: string -} - -export interface MlTextExpansionInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlTimingStats { - elapsed_time: DurationValue - iteration_time?: DurationValue -} - -export interface MlTokenizationConfigContainer { - bert?: MlNlpBertTokenizationConfig - mpnet?: MlNlpBertTokenizationConfig - roberta?: MlNlpRobertaTokenizationConfig -} - -export type MlTokenizationTruncate = 'first' | 'second' | 'none' - -export interface MlTopClassEntry { - class_name: string - class_probability: double - class_score: double -} - -export interface MlTotalFeatureImportance { - feature_name: Name - importance: MlTotalFeatureImportanceStatistics[] - classes: MlTotalFeatureImportanceClass[] -} - -export interface MlTotalFeatureImportanceClass { - class_name: Name - importance: MlTotalFeatureImportanceStatistics[] -} - -export interface MlTotalFeatureImportanceStatistics { - mean_magnitude: double - max: integer - min: integer -} - -export interface MlTrainedModelAssignment { - assignment_state: MlDeploymentAssignmentState - max_assigned_allocations?: integer - routing_table: Record - start_time: DateTime - task_parameters: MlTrainedModelAssignmentTaskParameters -} - -export interface MlTrainedModelAssignmentRoutingTable { - reason: string - routing_state: MlRoutingState - current_allocations: integer - target_allocations: integer -} - -export interface MlTrainedModelAssignmentTaskParameters { - model_bytes: integer - model_id: Id - deployment_id: Id - cache_size: ByteSize - number_of_allocations: integer - priority: MlTrainingPriority - queue_capacity: integer - threads_per_allocation: integer -} - -export interface MlTrainedModelConfig { - model_id: Id - model_type?: MlTrainedModelType - tags: string[] - version?: VersionString - compressed_definition?: string - created_by?: string - create_time?: DateTime - default_field_map?: Record - description?: string - estimated_heap_memory_usage_bytes?: integer - estimated_operations?: integer - fully_defined?: boolean - inference_config?: MlInferenceConfigCreateContainer - input: MlTrainedModelConfigInput - license_level?: string - metadata?: MlTrainedModelConfigMetadata - model_size_bytes?: ByteSize - location?: MlTrainedModelLocation - prefix_strings?: MlTrainedModelPrefixStrings -} - -export interface MlTrainedModelConfigInput { - field_names: Field[] -} - -export interface MlTrainedModelConfigMetadata { - model_aliases?: string[] - feature_importance_baseline?: Record - hyperparameters?: MlHyperparameter[] - total_feature_importance?: MlTotalFeatureImportance[] -} - -export interface MlTrainedModelDeploymentAllocationStatus { - allocation_count: integer - state: MlDeploymentAllocationState - target_allocation_count: integer -} - -export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms: DurationValue - error_count: integer - inference_count: integer - last_access: long - node: MlDiscoveryNode - number_of_allocations: integer - number_of_pending_requests: integer - rejection_execution_count: integer - routing_state: MlTrainedModelAssignmentRoutingTable - start_time: EpochTime - threads_per_allocation: integer - timeout_count: integer -} - -export interface MlTrainedModelDeploymentStats { - allocation_status: MlTrainedModelDeploymentAllocationStatus - cache_size?: ByteSize - deployment_id: Id - error_count: integer - inference_count: integer - model_id: Id - nodes: MlTrainedModelDeploymentNodesStats[] - number_of_allocations: integer - queue_capacity: integer - rejected_execution_count: integer - reason: string - start_time: EpochTime - state: MlDeploymentAssignmentState - threads_per_allocation: integer - timeout_count: integer -} - -export interface MlTrainedModelEntities { - class_name: string - class_probability: double - entity: string - start_pos: integer - end_pos: integer -} - -export interface MlTrainedModelInferenceClassImportance { - class_name: string - importance: double -} - -export interface MlTrainedModelInferenceFeatureImportance { - feature_name: string - importance?: double - classes?: MlTrainedModelInferenceClassImportance[] -} - -export interface MlTrainedModelInferenceStats { - cache_miss_count: integer - failure_count: integer - inference_count: integer - missing_all_fields_count: integer - timestamp: EpochTime -} - -export interface MlTrainedModelLocation { - index: MlTrainedModelLocationIndex -} - -export interface MlTrainedModelLocationIndex { - name: IndexName -} - -export interface MlTrainedModelPrefixStrings { - ingest?: string - search?: string -} - -export interface MlTrainedModelSizeStats { - model_size_bytes: ByteSize - required_native_memory_bytes: ByteSize -} - -export interface MlTrainedModelStats { - deployment_stats?: MlTrainedModelDeploymentStats - inference_stats?: MlTrainedModelInferenceStats - ingest?: Record - model_id: Id - model_size_stats: MlTrainedModelSizeStats - pipeline_count: integer -} - -export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' - -export type MlTrainingPriority = 'normal' | 'low' - -export interface MlTransformAuthorization { - api_key?: MlApiKeyAuthorization - roles?: string[] - service_account?: string -} - -export interface MlValidationLoss { - fold_values: string[] - loss_type: string -} - -export interface MlVocabulary { - index: IndexName -} - -export interface MlZeroShotClassificationInferenceOptions { - tokenization?: MlTokenizationConfigContainer - hypothesis_template?: string - classification_labels: string[] - results_field?: string - multi_label?: boolean - labels?: string[] -} - -export interface MlZeroShotClassificationInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string - multi_label?: boolean - labels: string[] -} - -export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { - model_id: Id -} - -export interface MlClearTrainedModelDeploymentCacheResponse { - cleared: boolean -} - -export interface MlCloseJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_no_match?: boolean - force?: boolean - timeout?: Duration - } -} - -export interface MlCloseJobResponse { - closed: boolean -} - -export interface MlDeleteCalendarRequest extends RequestBase { - calendar_id: Id -} - -export type MlDeleteCalendarResponse = AcknowledgedResponseBase - -export interface MlDeleteCalendarEventRequest extends RequestBase { - calendar_id: Id - event_id: Id -} - -export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase - -export interface MlDeleteCalendarJobRequest extends RequestBase { - calendar_id: Id - job_id: Ids -} - -export interface MlDeleteCalendarJobResponse { - calendar_id: Id - description?: string - job_ids: Ids -} - -export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { - id: Id - force?: boolean - timeout?: Duration -} - -export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase - -export interface MlDeleteDatafeedRequest extends RequestBase { - datafeed_id: Id - force?: boolean -} - -export type MlDeleteDatafeedResponse = AcknowledgedResponseBase - -export interface MlDeleteExpiredDataRequest extends RequestBase { - job_id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - requests_per_second?: float - timeout?: Duration - } -} - -export interface MlDeleteExpiredDataResponse { - deleted: boolean -} - -export interface MlDeleteFilterRequest extends RequestBase { - filter_id: Id -} - -export type MlDeleteFilterResponse = AcknowledgedResponseBase - -export interface MlDeleteForecastRequest extends RequestBase { - job_id: Id - forecast_id?: Id - allow_no_forecasts?: boolean - timeout?: Duration -} - -export type MlDeleteForecastResponse = AcknowledgedResponseBase - -export interface MlDeleteJobRequest extends RequestBase { - job_id: Id - force?: boolean - delete_user_annotations?: boolean - wait_for_completion?: boolean -} - -export type MlDeleteJobResponse = AcknowledgedResponseBase - -export interface MlDeleteModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id -} - -export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase - -export interface MlDeleteTrainedModelRequest extends RequestBase { - model_id: Id - force?: boolean -} - -export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase - -export interface MlDeleteTrainedModelAliasRequest extends RequestBase { - model_alias: Name - model_id: Id -} - -export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase - -export interface MlEstimateModelMemoryRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - analysis_config?: MlAnalysisConfig - max_bucket_cardinality?: Record - overall_cardinality?: Record - } -} - -export interface MlEstimateModelMemoryResponse { - model_memory_estimate: string -} - -export interface MlEvaluateDataFrameConfusionMatrixItem { - actual_class: Name - actual_class_doc_count: integer - predicted_classes: MlEvaluateDataFrameConfusionMatrixPrediction[] - other_predicted_class_doc_count: integer -} - -export interface MlEvaluateDataFrameConfusionMatrixPrediction { - predicted_class: Name - count: integer -} - -export interface MlEvaluateDataFrameConfusionMatrixThreshold { - tp: integer - fp: integer - tn: integer - fn: integer -} - -export interface MlEvaluateDataFrameDataframeClassificationSummary { - auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc - accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy - multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix - precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision - recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryAccuracy { - classes: MlEvaluateDataFrameDataframeEvaluationClass[] - overall_accuracy: double -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix { - confusion_matrix: MlEvaluateDataFrameConfusionMatrixItem[] - other_actual_class_count: integer -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryPrecision { - classes: MlEvaluateDataFrameDataframeEvaluationClass[] - avg_precision: double -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryRecall { - classes: MlEvaluateDataFrameDataframeEvaluationClass[] - avg_recall: double -} - -export interface MlEvaluateDataFrameDataframeEvaluationClass extends MlEvaluateDataFrameDataframeEvaluationValue { - class_name: Name -} - -export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc extends MlEvaluateDataFrameDataframeEvaluationValue { - curve?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem[] -} - -export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem { - tpr: double - fpr: double - threshold: double -} - -export interface MlEvaluateDataFrameDataframeEvaluationValue { - value: double -} - -export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { - auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc - precision?: Record - recall?: Record - confusion_matrix?: Record -} - -export interface MlEvaluateDataFrameDataframeRegressionSummary { - huber?: MlEvaluateDataFrameDataframeEvaluationValue - mse?: MlEvaluateDataFrameDataframeEvaluationValue - msle?: MlEvaluateDataFrameDataframeEvaluationValue - r_squared?: MlEvaluateDataFrameDataframeEvaluationValue -} - -export interface MlEvaluateDataFrameRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - evaluation: MlDataframeEvaluationContainer - index: IndexName - query?: QueryDslQueryContainer - } -} - -export interface MlEvaluateDataFrameResponse { - classification?: MlEvaluateDataFrameDataframeClassificationSummary - outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary - regression?: MlEvaluateDataFrameDataframeRegressionSummary -} - -export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { - id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - source?: MlDataframeAnalyticsSource - dest?: MlDataframeAnalyticsDestination - analysis?: MlDataframeAnalysisContainer - description?: string - model_memory_limit?: string - max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - allow_lazy_start?: boolean - } -} - -export interface MlExplainDataFrameAnalyticsResponse { - field_selection: MlDataframeAnalyticsFieldSelection[] - memory_estimation: MlDataframeAnalyticsMemoryEstimation -} - -export interface MlFlushJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - advance_time?: DateTime - calc_interim?: boolean - end?: DateTime - skip_time?: DateTime - start?: DateTime - } -} - -export interface MlFlushJobResponse { - flushed: boolean - last_finalized_bucket_end?: integer -} - -export interface MlForecastRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - duration?: Duration - expires_in?: Duration - max_model_memory?: string - } -} - -export interface MlForecastResponse { - acknowledged: boolean - forecast_id: Id -} - -export interface MlGetBucketsRequest extends RequestBase { - job_id: Id - timestamp?: DateTime - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - anomaly_score?: double - desc?: boolean - end?: DateTime - exclude_interim?: boolean - expand?: boolean - page?: MlPage - sort?: Field - start?: DateTime - } -} - -export interface MlGetBucketsResponse { - buckets: MlBucketSummary[] - count: long -} - -export interface MlGetCalendarEventsRequest extends RequestBase { - calendar_id: Id - end?: DateTime - from?: integer - job_id?: Id - size?: integer - start?: DateTime -} - -export interface MlGetCalendarEventsResponse { - count: long - events: MlCalendarEvent[] -} - -export interface MlGetCalendarsCalendar { - calendar_id: Id - description?: string - job_ids: Id[] -} - -export interface MlGetCalendarsRequest extends RequestBase { - calendar_id?: Id - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - page?: MlPage - } -} - -export interface MlGetCalendarsResponse { - calendars: MlGetCalendarsCalendar[] - count: long -} - -export interface MlGetCategoriesRequest extends RequestBase { - job_id: Id - category_id?: CategoryId - from?: integer - partition_field_value?: string - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - page?: MlPage - } -} - -export interface MlGetCategoriesResponse { - categories: MlCategory[] - count: long -} - -export interface MlGetDataFrameAnalyticsRequest extends RequestBase { - id?: Id - allow_no_match?: boolean - from?: integer - size?: integer - exclude_generated?: boolean -} - -export interface MlGetDataFrameAnalyticsResponse { - count: integer - data_frame_analytics: MlDataframeAnalyticsSummary[] -} - -export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { - id?: Id - allow_no_match?: boolean - from?: integer - size?: integer - verbose?: boolean -} - -export interface MlGetDataFrameAnalyticsStatsResponse { - count: long - data_frame_analytics: MlDataframeAnalytics[] -} - -export interface MlGetDatafeedStatsRequest extends RequestBase { - datafeed_id?: Ids - allow_no_match?: boolean -} - -export interface MlGetDatafeedStatsResponse { - count: long - datafeeds: MlDatafeedStats[] -} - -export interface MlGetDatafeedsRequest extends RequestBase { - datafeed_id?: Ids - allow_no_match?: boolean - exclude_generated?: boolean -} - -export interface MlGetDatafeedsResponse { - count: long - datafeeds: MlDatafeed[] -} - -export interface MlGetFiltersRequest extends RequestBase { - filter_id?: Ids - from?: integer - size?: integer -} - -export interface MlGetFiltersResponse { - count: long - filters: MlFilter[] -} - -export interface MlGetInfluencersRequest extends RequestBase { - job_id: Id - desc?: boolean - end?: DateTime - exclude_interim?: boolean - influencer_score?: double - from?: integer - size?: integer - sort?: Field - start?: DateTime - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - page?: MlPage - } -} - -export interface MlGetInfluencersResponse { - count: long - influencers: MlInfluencer[] -} - -export interface MlGetJobStatsRequest extends RequestBase { - job_id?: Id - allow_no_match?: boolean -} - -export interface MlGetJobStatsResponse { - count: long - jobs: MlJobStats[] -} - -export interface MlGetJobsRequest extends RequestBase { - job_id?: Ids - allow_no_match?: boolean - exclude_generated?: boolean -} - -export interface MlGetJobsResponse { - count: long - jobs: MlJob[] -} - -export interface MlGetMemoryStatsJvmStats { - heap_max?: ByteSize - heap_max_in_bytes: integer - java_inference?: ByteSize - java_inference_in_bytes: integer - java_inference_max?: ByteSize - java_inference_max_in_bytes: integer -} - -export interface MlGetMemoryStatsMemMlStats { - anomaly_detectors?: ByteSize - anomaly_detectors_in_bytes: integer - data_frame_analytics?: ByteSize - data_frame_analytics_in_bytes: integer - max?: ByteSize - max_in_bytes: integer - native_code_overhead?: ByteSize - native_code_overhead_in_bytes: integer - native_inference?: ByteSize - native_inference_in_bytes: integer -} - -export interface MlGetMemoryStatsMemStats { - adjusted_total?: ByteSize - adjusted_total_in_bytes: integer - total?: ByteSize - total_in_bytes: integer - ml: MlGetMemoryStatsMemMlStats -} - -export interface MlGetMemoryStatsMemory { - attributes: Record - jvm: MlGetMemoryStatsJvmStats - mem: MlGetMemoryStatsMemStats - name: Name - roles: string[] - transport_address: TransportAddress - ephemeral_id: Id -} - -export interface MlGetMemoryStatsRequest extends RequestBase { - node_id?: Id - human?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export interface MlGetMemoryStatsResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { - job_id: Id - snapshot_id: Id - allow_no_match?: boolean -} - -export interface MlGetModelSnapshotUpgradeStatsResponse { - count: long - model_snapshot_upgrades: MlModelSnapshotUpgrade[] -} - -export interface MlGetModelSnapshotsRequest extends RequestBase { - job_id: Id - snapshot_id?: Id - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - desc?: boolean - end?: DateTime - page?: MlPage - sort?: Field - start?: DateTime - } -} - -export interface MlGetModelSnapshotsResponse { - count: long - model_snapshots: MlModelSnapshot[] -} - -export interface MlGetOverallBucketsRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_no_match?: boolean - bucket_span?: Duration - end?: DateTime - exclude_interim?: boolean - overall_score?: double | string - start?: DateTime - top_n?: integer - } -} - -export interface MlGetOverallBucketsResponse { - count: long - overall_buckets: MlOverallBucket[] -} - -export interface MlGetRecordsRequest extends RequestBase { - job_id: Id - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - desc?: boolean - end?: DateTime - exclude_interim?: boolean - page?: MlPage - record_score?: double - sort?: Field - start?: DateTime - } -} - -export interface MlGetRecordsResponse { - count: long - records: MlAnomaly[] -} - -export interface MlGetTrainedModelsRequest extends RequestBase { - model_id?: Ids - allow_no_match?: boolean - decompress_definition?: boolean - exclude_generated?: boolean - from?: integer - include?: MlInclude - size?: integer - tags?: string | string[] -} - -export interface MlGetTrainedModelsResponse { - count: integer - trained_model_configs: MlTrainedModelConfig[] -} - -export interface MlGetTrainedModelsStatsRequest extends RequestBase { - model_id?: Ids - allow_no_match?: boolean - from?: integer - size?: integer -} - -export interface MlGetTrainedModelsStatsResponse { - count: integer - trained_model_stats: MlTrainedModelStats[] -} - -export interface MlInferTrainedModelRequest extends RequestBase { - model_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs: Record[] - inference_config?: MlInferenceConfigUpdateContainer - } -} - -export interface MlInferTrainedModelResponse { - inference_results: MlInferenceResponseResult[] -} - -export interface MlInfoAnomalyDetectors { - categorization_analyzer: MlCategorizationAnalyzer - categorization_examples_limit: integer - model_memory_limit: string - model_snapshot_retention_days: integer - daily_model_snapshot_retention_after_days: integer -} - -export interface MlInfoDatafeeds { - scroll_size: integer -} - -export interface MlInfoDefaults { - anomaly_detectors: MlInfoAnomalyDetectors - datafeeds: MlInfoDatafeeds -} - -export interface MlInfoLimits { - max_model_memory_limit?: string - effective_max_model_memory_limit: string - total_ml_memory: string -} - -export interface MlInfoNativeCode { - build_hash: string - version: VersionString -} - -export interface MlInfoRequest extends RequestBase { -} - -export interface MlInfoResponse { - defaults: MlInfoDefaults - limits: MlInfoLimits - upgrade_mode: boolean - native_code: MlInfoNativeCode -} - -export interface MlOpenJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - timeout?: Duration - } -} - -export interface MlOpenJobResponse { - opened: boolean - node: NodeId -} - -export interface MlPostCalendarEventsRequest extends RequestBase { - calendar_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - events: MlCalendarEvent[] - } -} - -export interface MlPostCalendarEventsResponse { - events: MlCalendarEvent[] -} - -export interface MlPostDataRequest extends RequestBase { - job_id: Id - reset_end?: DateTime - reset_start?: DateTime - /** @deprecated The use of the 'body' key has been deprecated, use 'data' instead. */ - body?: TData[] -} - -export interface MlPostDataResponse { - bucket_count: long - earliest_record_timestamp: long - empty_bucket_count: long - input_bytes: long - input_field_count: long - input_record_count: long - invalid_date_count: long - job_id: Id - last_data_time: integer - latest_record_timestamp: long - missing_field_count: long - out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long - sparse_bucket_count: long -} - -export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { - source: MlDataframeAnalyticsSource - analysis: MlDataframeAnalysisContainer - model_memory_limit?: string - max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] -} - -export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { - id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig - } -} - -export interface MlPreviewDataFrameAnalyticsResponse { - feature_values: Record[] -} - -export interface MlPreviewDatafeedRequest extends RequestBase { - datafeed_id?: Id - start?: DateTime - end?: DateTime - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - datafeed_config?: MlDatafeedConfig - job_config?: MlJobConfig - } -} - -export type MlPreviewDatafeedResponse = TDocument[] - -export interface MlPutCalendarRequest extends RequestBase { - calendar_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - job_ids?: Id[] - description?: string - } -} - -export interface MlPutCalendarResponse { - calendar_id: Id - description?: string - job_ids: Ids -} - -export interface MlPutCalendarJobRequest extends RequestBase { - calendar_id: Id - job_id: Ids -} - -export interface MlPutCalendarJobResponse { - calendar_id: Id - description?: string - job_ids: Ids -} - -export interface MlPutDataFrameAnalyticsRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_lazy_start?: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - description?: string - dest: MlDataframeAnalyticsDestination - max_num_threads?: integer - model_memory_limit?: string - source: MlDataframeAnalyticsSource - headers?: HttpHeaders - version?: VersionString - } -} - -export interface MlPutDataFrameAnalyticsResponse { - authorization?: MlDataframeAnalyticsAuthorization - allow_lazy_start: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - create_time: EpochTime - description?: string - dest: MlDataframeAnalyticsDestination - id: Id - max_num_threads: integer - model_memory_limit: string - source: MlDataframeAnalyticsSource - version: VersionString -} - -export interface MlPutDatafeedRequest extends RequestBase { - datafeed_id: Id - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - chunking_config?: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Duration - indices?: Indices - /** @alias indices */ - indexes?: Indices - indices_options?: IndicesOptions - job_id?: Id - max_empty_searches?: integer - query?: QueryDslQueryContainer - query_delay?: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size?: integer - headers?: HttpHeaders - } -} - -export interface MlPutDatafeedResponse { - aggregations?: Record - authorization?: MlDatafeedAuthorization - chunking_config: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - datafeed_id: Id - frequency?: Duration - indices: string[] - job_id: Id - indices_options?: IndicesOptions - max_empty_searches?: integer - query: QueryDslQueryContainer - query_delay: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size: integer -} - -export interface MlPutFilterRequest extends RequestBase { - filter_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - items?: string[] - } -} - -export interface MlPutFilterResponse { - description: string - filter_id: Id - items: string[] -} - -export interface MlPutJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_lazy_open?: boolean - analysis_config: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - background_persist_interval?: Duration - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days?: long - data_description: MlDataDescription - datafeed_config?: MlDatafeedConfig - description?: string - groups?: string[] - model_plot_config?: MlModelPlotConfig - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_index_name?: IndexName - results_retention_days?: long - } -} - -export interface MlPutJobResponse { - allow_lazy_open: boolean - analysis_config: MlAnalysisConfigRead - analysis_limits: MlAnalysisLimits - background_persist_interval?: Duration - create_time: DateTime - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days: long - data_description: MlDataDescription - datafeed_config?: MlDatafeed - description?: string - groups?: string[] - job_id: Id - job_type: string - job_version: string - model_plot_config?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days: long - renormalization_window_days?: long - results_index_name: string - results_retention_days?: long -} - -export interface MlPutTrainedModelAggregateOutput { - logistic_regression?: MlPutTrainedModelWeights - weighted_sum?: MlPutTrainedModelWeights - weighted_mode?: MlPutTrainedModelWeights - exponent?: MlPutTrainedModelWeights -} - -export interface MlPutTrainedModelDefinition { - preprocessors?: MlPutTrainedModelPreprocessor[] - trained_model: MlPutTrainedModelTrainedModel -} - -export interface MlPutTrainedModelEnsemble { - aggregate_output?: MlPutTrainedModelAggregateOutput - classification_labels?: string[] - feature_names?: string[] - target_type?: string - trained_models: MlPutTrainedModelTrainedModel[] -} - -export interface MlPutTrainedModelFrequencyEncodingPreprocessor { - field: string - feature_name: string - frequency_map: Record -} - -export interface MlPutTrainedModelInput { - field_names: Names -} - -export interface MlPutTrainedModelOneHotEncodingPreprocessor { - field: string - hot_map: Record -} - -export interface MlPutTrainedModelPreprocessor { - frequency_encoding?: MlPutTrainedModelFrequencyEncodingPreprocessor - one_hot_encoding?: MlPutTrainedModelOneHotEncodingPreprocessor - target_mean_encoding?: MlPutTrainedModelTargetMeanEncodingPreprocessor -} - -export interface MlPutTrainedModelRequest extends RequestBase { - model_id: Id - defer_definition_decompression?: boolean - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - compressed_definition?: string - definition?: MlPutTrainedModelDefinition - description?: string - inference_config?: MlInferenceConfigCreateContainer - input?: MlPutTrainedModelInput - metadata?: any - model_type?: MlTrainedModelType - model_size_bytes?: long - platform_architecture?: string - tags?: string[] - prefix_strings?: MlTrainedModelPrefixStrings - } -} - -export type MlPutTrainedModelResponse = MlTrainedModelConfig - -export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { - field: string - feature_name: string - target_map: Record - default_value: double -} - -export interface MlPutTrainedModelTrainedModel { - tree?: MlPutTrainedModelTrainedModelTree - tree_node?: MlPutTrainedModelTrainedModelTreeNode - ensemble?: MlPutTrainedModelEnsemble -} - -export interface MlPutTrainedModelTrainedModelTree { - classification_labels?: string[] - feature_names: string[] - target_type?: string - tree_structure: MlPutTrainedModelTrainedModelTreeNode[] -} - -export interface MlPutTrainedModelTrainedModelTreeNode { - decision_type?: string - default_left?: boolean - leaf_value?: double - left_child?: integer - node_index: integer - right_child?: integer - split_feature?: integer - split_gain?: integer - threshold?: double -} - -export interface MlPutTrainedModelWeights { - weights: double -} - -export interface MlPutTrainedModelAliasRequest extends RequestBase { - model_alias: Name - model_id: Id - reassign?: boolean -} - -export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase - -export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { - model_id: Id - part: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - definition: string - total_definition_length: long - total_parts: integer - } -} - -export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase - -export interface MlPutTrainedModelVocabularyRequest extends RequestBase { - model_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - vocabulary: string[] - merges?: string[] - scores?: double[] - } -} - -export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase - -export interface MlResetJobRequest extends RequestBase { - job_id: Id - wait_for_completion?: boolean - delete_user_annotations?: boolean -} - -export type MlResetJobResponse = AcknowledgedResponseBase - -export interface MlRevertModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - delete_intervening_results?: boolean - } -} - -export interface MlRevertModelSnapshotResponse { - model: MlModelSnapshot -} - -export interface MlSetUpgradeModeRequest extends RequestBase { - enabled?: boolean - timeout?: Duration -} - -export type MlSetUpgradeModeResponse = AcknowledgedResponseBase - -export interface MlStartDataFrameAnalyticsRequest extends RequestBase { - id: Id - timeout?: Duration -} - -export interface MlStartDataFrameAnalyticsResponse { - acknowledged: boolean - node: NodeId -} - -export interface MlStartDatafeedRequest extends RequestBase { - datafeed_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - end?: DateTime - start?: DateTime - timeout?: Duration - } -} - -export interface MlStartDatafeedResponse { - node: NodeIds - started: boolean -} - -export interface MlStartTrainedModelDeploymentRequest extends RequestBase { - model_id: Id - cache_size?: ByteSize - deployment_id?: string - number_of_allocations?: integer - priority?: MlTrainingPriority - queue_capacity?: integer - threads_per_allocation?: integer - timeout?: Duration - wait_for?: MlDeploymentAllocationState -} - -export interface MlStartTrainedModelDeploymentResponse { - assignment: MlTrainedModelAssignment -} - -export interface MlStopDataFrameAnalyticsRequest extends RequestBase { - id: Id - allow_no_match?: boolean - force?: boolean - timeout?: Duration -} - -export interface MlStopDataFrameAnalyticsResponse { - stopped: boolean -} - -export interface MlStopDatafeedRequest extends RequestBase { - datafeed_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_no_match?: boolean - force?: boolean - timeout?: Duration - } -} - -export interface MlStopDatafeedResponse { - stopped: boolean -} - -export interface MlStopTrainedModelDeploymentRequest extends RequestBase { - model_id: Id - allow_no_match?: boolean - force?: boolean -} - -export interface MlStopTrainedModelDeploymentResponse { - stopped: boolean -} - -export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - model_memory_limit?: string - max_num_threads?: integer - allow_lazy_start?: boolean - } -} - -export interface MlUpdateDataFrameAnalyticsResponse { - authorization?: MlDataframeAnalyticsAuthorization - allow_lazy_start: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - create_time: long - description?: string - dest: MlDataframeAnalyticsDestination - id: Id - max_num_threads: integer - model_memory_limit: string - source: MlDataframeAnalyticsSource - version: VersionString -} - -export interface MlUpdateDatafeedRequest extends RequestBase { - datafeed_id: Id - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - chunking_config?: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Duration - indices?: string[] - /** @alias indices */ - indexes?: string[] - indices_options?: IndicesOptions - job_id?: Id - max_empty_searches?: integer - query?: QueryDslQueryContainer - query_delay?: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size?: integer - } -} - -export interface MlUpdateDatafeedResponse { - authorization?: MlDatafeedAuthorization - aggregations?: Record - chunking_config: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - datafeed_id: Id - frequency?: Duration - indices: string[] - indices_options?: IndicesOptions - job_id: Id - max_empty_searches?: integer - query: QueryDslQueryContainer - query_delay: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size: integer -} - -export interface MlUpdateFilterRequest extends RequestBase { - filter_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - add_items?: string[] - description?: string - remove_items?: string[] - } -} - -export interface MlUpdateFilterResponse { - description: string - filter_id: Id - items: string[] -} - -export interface MlUpdateJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_lazy_open?: boolean - analysis_limits?: MlAnalysisMemoryLimit - background_persist_interval?: Duration - custom_settings?: Record - categorization_filters?: string[] - description?: string - model_plot_config?: MlModelPlotConfig - model_prune_window?: Duration - daily_model_snapshot_retention_after_days?: long - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_retention_days?: long - groups?: string[] - detectors?: MlDetector[] - per_partition_categorization?: MlPerPartitionCategorization - } -} - -export interface MlUpdateJobResponse { - allow_lazy_open: boolean - analysis_config: MlAnalysisConfigRead - analysis_limits: MlAnalysisLimits - background_persist_interval?: Duration - create_time: EpochTime - finished_time?: EpochTime - custom_settings?: Record - daily_model_snapshot_retention_after_days: long - data_description: MlDataDescription - datafeed_config?: MlDatafeed - description?: string - groups?: string[] - job_id: Id - job_type: string - job_version: VersionString - model_plot_config?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days: long - renormalization_window_days?: long - results_index_name: IndexName - results_retention_days?: long -} - -export interface MlUpdateModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - retain?: boolean - } -} - -export interface MlUpdateModelSnapshotResponse { - acknowledged: boolean - model: MlModelSnapshot -} - -export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { - model_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - number_of_allocations?: integer - } -} - -export interface MlUpdateTrainedModelDeploymentResponse { - assignment: MlTrainedModelAssignment -} - -export interface MlUpgradeJobSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - wait_for_completion?: boolean - timeout?: Duration -} - -export interface MlUpgradeJobSnapshotResponse { - node: NodeId - completed: boolean -} - -export interface MlValidateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - job_id?: Id - analysis_config?: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - data_description?: MlDataDescription - description?: string - model_plot?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days?: long - results_index_name?: IndexName - } -} - -export type MlValidateResponse = AcknowledgedResponseBase - -export interface MlValidateDetectorRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, use 'detector' instead. */ - body?: MlDetector -} - -export type MlValidateDetectorResponse = AcknowledgedResponseBase - -export interface MonitoringBulkRequest extends RequestBase { - type?: string - system_id: string - system_api_version: string - interval: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ - body?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] -} - -export interface MonitoringBulkResponse { - error?: ErrorCause - errors: boolean - ignored: boolean - took: long -} - -export interface NodesAdaptiveSelection { - avg_queue_size?: long - avg_response_time?: Duration - avg_response_time_ns?: long - avg_service_time?: Duration - avg_service_time_ns?: long - outgoing_searches?: long - rank?: string -} - -export interface NodesBreaker { - estimated_size?: string - estimated_size_in_bytes?: long - limit_size?: string - limit_size_in_bytes?: long - overhead?: float - tripped?: float -} - -export interface NodesCgroup { - cpuacct?: NodesCpuAcct - cpu?: NodesCgroupCpu - memory?: NodesCgroupMemory -} - -export interface NodesCgroupCpu { - control_group?: string - cfs_period_micros?: integer - cfs_quota_micros?: integer - stat?: NodesCgroupCpuStat -} - -export interface NodesCgroupCpuStat { - number_of_elapsed_periods?: long - number_of_times_throttled?: long - time_throttled_nanos?: DurationValue -} - -export interface NodesCgroupMemory { - control_group?: string - limit_in_bytes?: string - usage_in_bytes?: string -} - -export interface NodesClient { - id?: long - agent?: string - local_address?: string - remote_address?: string - last_uri?: string - opened_time_millis?: long - closed_time_millis?: long - last_request_time_millis?: long - request_count?: long - request_size_bytes?: long - x_opaque_id?: string -} - -export interface NodesClusterAppliedStats { - recordings?: NodesRecording[] -} - -export interface NodesClusterStateQueue { - total?: long - pending?: long - committed?: long -} - -export interface NodesClusterStateUpdate { - count: long - computation_time?: Duration - computation_time_millis?: DurationValue - publication_time?: Duration - publication_time_millis?: DurationValue - context_construction_time?: Duration - context_construction_time_millis?: DurationValue - commit_time?: Duration - commit_time_millis?: DurationValue - completion_time?: Duration - completion_time_millis?: DurationValue - master_apply_time?: Duration - master_apply_time_millis?: DurationValue - notification_time?: Duration - notification_time_millis?: DurationValue -} - -export interface NodesContext { - context?: string - compilations?: long - cache_evictions?: long - compilation_limit_triggered?: long -} - -export interface NodesCpu { - percent?: integer - sys?: Duration - sys_in_millis?: DurationValue - total?: Duration - total_in_millis?: DurationValue - user?: Duration - user_in_millis?: DurationValue - load_average?: Record -} - -export interface NodesCpuAcct { - control_group?: string - usage_nanos?: DurationValue -} - -export interface NodesDataPathStats { - available?: string - available_in_bytes?: long - disk_queue?: string - disk_reads?: long - disk_read_size?: string - disk_read_size_in_bytes?: long - disk_writes?: long - disk_write_size?: string - disk_write_size_in_bytes?: long - free?: string - free_in_bytes?: long - mount?: string - path?: string - total?: string - total_in_bytes?: long - type?: string -} - -export interface NodesDiscovery { - cluster_state_queue?: NodesClusterStateQueue - published_cluster_states?: NodesPublishedClusterStates - cluster_state_update?: Record - serialized_cluster_states?: NodesSerializedClusterState - cluster_applier_stats?: NodesClusterAppliedStats -} - -export interface NodesExtendedMemoryStats extends NodesMemoryStats { - free_percent?: integer - used_percent?: integer -} - -export interface NodesFileSystem { - data?: NodesDataPathStats[] - timestamp?: long - total?: NodesFileSystemTotal - io_stats?: NodesIoStats -} - -export interface NodesFileSystemTotal { - available?: string - available_in_bytes?: long - free?: string - free_in_bytes?: long - total?: string - total_in_bytes?: long -} - -export interface NodesGarbageCollector { - collectors?: Record -} - -export interface NodesGarbageCollectorTotal { - collection_count?: long - collection_time?: string - collection_time_in_millis?: long -} - -export interface NodesHttp { - current_open?: integer - total_opened?: long - clients?: NodesClient[] - routes: Record -} - -export interface NodesHttpRoute { - requests: NodesHttpRouteRequests - responses: NodesHttpRouteResponses -} - -export interface NodesHttpRouteRequests { - count: long - total_size_in_bytes: long - size_histogram: NodesSizeHttpHistogram[] -} - -export interface NodesHttpRouteResponses { - count: long - total_size_in_bytes: long - handling_time_histogram: NodesTimeHttpHistogram[] - size_histogram: NodesSizeHttpHistogram[] -} - -export interface NodesIndexingPressure { - memory?: NodesIndexingPressureMemory -} - -export interface NodesIndexingPressureMemory { - limit?: ByteSize - limit_in_bytes?: long - current?: NodesPressureMemory - total?: NodesPressureMemory -} - -export interface NodesIngest { - pipelines?: Record - total?: NodesIngestTotal -} - -export interface NodesIngestStats { - count: long - current: long - failed: long - processors: Record[] - time_in_millis: DurationValue - ingested_as_first_pipeline_in_bytes: long - produced_as_first_pipeline_in_bytes: long -} - -export interface NodesIngestTotal { - count: long - current: long - failed: long - time_in_millis: DurationValue -} - -export interface NodesIoStatDevice { - device_name?: string - operations?: long - read_kilobytes?: long - read_operations?: long - write_kilobytes?: long - write_operations?: long -} - -export interface NodesIoStats { - devices?: NodesIoStatDevice[] - total?: NodesIoStatDevice -} - -export interface NodesJvm { - buffer_pools?: Record - classes?: NodesJvmClasses - gc?: NodesGarbageCollector - mem?: NodesJvmMemoryStats - threads?: NodesJvmThreads - timestamp?: long - uptime?: string - uptime_in_millis?: long -} - -export interface NodesJvmClasses { - current_loaded_count?: long - total_loaded_count?: long - total_unloaded_count?: long -} - -export interface NodesJvmMemoryStats { - heap_used_in_bytes?: long - heap_used_percent?: long - heap_committed_in_bytes?: long - heap_max_in_bytes?: long - non_heap_used_in_bytes?: long - non_heap_committed_in_bytes?: long - pools?: Record -} - -export interface NodesJvmThreads { - count?: long - peak_count?: long -} - -export interface NodesKeyedProcessor { - stats?: NodesProcessor - type?: string -} - -export interface NodesMemoryStats { - adjusted_total_in_bytes?: long - resident?: string - resident_in_bytes?: long - share?: string - share_in_bytes?: long - total_virtual?: string - total_virtual_in_bytes?: long - total_in_bytes?: long - free_in_bytes?: long - used_in_bytes?: long -} - -export interface NodesNodeBufferPool { - count?: long - total_capacity?: string - total_capacity_in_bytes?: long - used?: string - used_in_bytes?: long -} - -export interface NodesNodeReloadError { - name: Name - reload_exception?: ErrorCause -} - -export type NodesNodeReloadResult = NodesStats | NodesNodeReloadError - -export interface NodesNodesResponseBase { - _nodes?: NodeStatistics -} - -export interface NodesOperatingSystem { - cpu?: NodesCpu - mem?: NodesExtendedMemoryStats - swap?: NodesMemoryStats - cgroup?: NodesCgroup - timestamp?: long -} - -export interface NodesPool { - used_in_bytes?: long - max_in_bytes?: long - peak_used_in_bytes?: long - peak_max_in_bytes?: long -} - -export interface NodesPressureMemory { - all?: ByteSize - all_in_bytes?: long - combined_coordinating_and_primary?: ByteSize - combined_coordinating_and_primary_in_bytes?: long - coordinating?: ByteSize - coordinating_in_bytes?: long - primary?: ByteSize - primary_in_bytes?: long - replica?: ByteSize - replica_in_bytes?: long - coordinating_rejections?: long - primary_rejections?: long - replica_rejections?: long -} - -export interface NodesProcess { - cpu?: NodesCpu - mem?: NodesMemoryStats - open_file_descriptors?: integer - max_file_descriptors?: integer - timestamp?: long -} - -export interface NodesProcessor { - count?: long - current?: long - failed?: long - time_in_millis?: DurationValue -} - -export interface NodesPublishedClusterStates { - full_states?: long - incompatible_diffs?: long - compatible_diffs?: long -} - -export interface NodesRecording { - name?: string - cumulative_execution_count?: long - cumulative_execution_time?: Duration - cumulative_execution_time_millis?: DurationValue -} - -export interface NodesRepositoryLocation { - base_path: string - container?: string - bucket?: string -} - -export interface NodesRepositoryMeteringInformation { - repository_name: Name - repository_type: string - repository_location: NodesRepositoryLocation - repository_ephemeral_id: Id - repository_started_at: EpochTime - repository_stopped_at?: EpochTime - archived: boolean - cluster_version?: VersionNumber - request_counts: NodesRequestCounts -} - -export interface NodesRequestCounts { - GetBlobProperties?: long - GetBlob?: long - ListBlobs?: long - PutBlob?: long - PutBlock?: long - PutBlockList?: long - GetObject?: long - ListObjects?: long - InsertObject?: long - PutObject?: long - PutMultipartObject?: long -} - -export interface NodesScriptCache { - cache_evictions?: long - compilation_limit_triggered?: long - compilations?: long - context?: string -} - -export interface NodesScripting { - cache_evictions?: long - compilations?: long - compilations_history?: Record - compilation_limit_triggered?: long - contexts?: NodesContext[] -} - -export interface NodesSerializedClusterState { - full_states?: NodesSerializedClusterStateDetail - diffs?: NodesSerializedClusterStateDetail -} - -export interface NodesSerializedClusterStateDetail { - count?: long - uncompressed_size?: string - uncompressed_size_in_bytes?: long - compressed_size?: string - compressed_size_in_bytes?: long -} - -export interface NodesSizeHttpHistogram { - count: long - ge_bytes?: long - lt_bytes?: long -} - -export interface NodesStats { - adaptive_selection?: Record - breakers?: Record - fs?: NodesFileSystem - host?: Host - http?: NodesHttp - ingest?: NodesIngest - ip?: Ip | Ip[] - jvm?: NodesJvm - name?: Name - os?: NodesOperatingSystem - process?: NodesProcess - roles?: NodeRoles - script?: NodesScripting - script_cache?: Record - thread_pool?: Record - timestamp?: long - transport?: NodesTransport - transport_address?: TransportAddress - attributes?: Record - discovery?: NodesDiscovery - indexing_pressure?: NodesIndexingPressure - indices?: IndicesStatsShardStats -} - -export interface NodesThreadCount { - active?: long - completed?: long - largest?: long - queue?: long - rejected?: long - threads?: long -} - -export interface NodesTimeHttpHistogram { - count: long - ge_millis?: long - lt_millis?: long -} - -export interface NodesTransport { - inbound_handling_time_histogram?: NodesTransportHistogram[] - outbound_handling_time_histogram?: NodesTransportHistogram[] - rx_count?: long - rx_size?: string - rx_size_in_bytes?: long - server_open?: integer - tx_count?: long - tx_size?: string - tx_size_in_bytes?: long - total_outbound_connections?: long -} - -export interface NodesTransportHistogram { - count?: long - lt_millis?: long - ge_millis?: long -} - -export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { - node_id: NodeIds - max_archive_version: long -} - -export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase - -export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { - node_id: NodeIds -} - -export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase - -export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesHotThreadsRequest extends RequestBase { - node_id?: NodeIds - ignore_idle_threads?: boolean - interval?: Duration - snapshots?: long - master_timeout?: Duration - threads?: long - timeout?: Duration - type?: ThreadType - sort?: ThreadType -} - -export interface NodesHotThreadsResponse { -} - -export interface NodesInfoDeprecationIndexing { - enabled: boolean | string -} - -export interface NodesInfoNodeInfo { - attributes: Record - build_flavor: string - build_hash: string - build_type: string - host: Host - http?: NodesInfoNodeInfoHttp - ip: Ip - jvm?: NodesInfoNodeJvmInfo - name: Name - network?: NodesInfoNodeInfoNetwork - os?: NodesInfoNodeOperatingSystemInfo - plugins?: PluginStats[] - process?: NodesInfoNodeProcessInfo - roles: NodeRoles - settings?: NodesInfoNodeInfoSettings - thread_pool?: Record - total_indexing_buffer?: long - total_indexing_buffer_in_bytes?: ByteSize - transport?: NodesInfoNodeInfoTransport - transport_address: TransportAddress - version: VersionString - modules?: PluginStats[] - ingest?: NodesInfoNodeInfoIngest - aggregations?: Record -} - -export interface NodesInfoNodeInfoAction { - destructive_requires_name: string -} - -export interface NodesInfoNodeInfoAggregation { - types: string[] -} - -export interface NodesInfoNodeInfoBootstrap { - memory_lock: string -} - -export interface NodesInfoNodeInfoClient { - type: string -} - -export interface NodesInfoNodeInfoDiscoverKeys { - seed_hosts?: string[] - type?: string - seed_providers?: string[] -} -export type NodesInfoNodeInfoDiscover = NodesInfoNodeInfoDiscoverKeys -& { [property: string]: any } - -export interface NodesInfoNodeInfoHttp { - bound_address: string[] - max_content_length?: ByteSize - max_content_length_in_bytes: long - publish_address: string -} - -export interface NodesInfoNodeInfoIngest { - processors: NodesInfoNodeInfoIngestProcessor[] -} - -export interface NodesInfoNodeInfoIngestDownloader { - enabled: string -} - -export interface NodesInfoNodeInfoIngestInfo { - downloader: NodesInfoNodeInfoIngestDownloader -} - -export interface NodesInfoNodeInfoIngestProcessor { - type: string -} - -export interface NodesInfoNodeInfoJvmMemory { - direct_max?: ByteSize - direct_max_in_bytes: long - heap_init?: ByteSize - heap_init_in_bytes: long - heap_max?: ByteSize - heap_max_in_bytes: long - non_heap_init?: ByteSize - non_heap_init_in_bytes: long - non_heap_max?: ByteSize - non_heap_max_in_bytes: long -} - -export interface NodesInfoNodeInfoMemory { - total: string - total_in_bytes: long -} - -export interface NodesInfoNodeInfoNetwork { - primary_interface: NodesInfoNodeInfoNetworkInterface - refresh_interval: integer -} - -export interface NodesInfoNodeInfoNetworkInterface { - address: string - mac_address: string - name: Name -} - -export interface NodesInfoNodeInfoOSCPU { - cache_size: string - cache_size_in_bytes: integer - cores_per_socket: integer - mhz: integer - model: string - total_cores: integer - total_sockets: integer - vendor: string -} - -export interface NodesInfoNodeInfoPath { - logs?: string - home?: string - repo?: string[] - data?: string[] -} - -export interface NodesInfoNodeInfoRepositories { - url: NodesInfoNodeInfoRepositoriesUrl -} - -export interface NodesInfoNodeInfoRepositoriesUrl { - allowed_urls: string -} - -export interface NodesInfoNodeInfoScript { - allowed_types: string - disable_max_compilations_rate?: string -} - -export interface NodesInfoNodeInfoSearch { - remote: NodesInfoNodeInfoSearchRemote -} - -export interface NodesInfoNodeInfoSearchRemote { - connect: string -} - -export interface NodesInfoNodeInfoSettings { - cluster: NodesInfoNodeInfoSettingsCluster - node: NodesInfoNodeInfoSettingsNode - path?: NodesInfoNodeInfoPath - repositories?: NodesInfoNodeInfoRepositories - discovery?: NodesInfoNodeInfoDiscover - action?: NodesInfoNodeInfoAction - client?: NodesInfoNodeInfoClient - http: NodesInfoNodeInfoSettingsHttp - bootstrap?: NodesInfoNodeInfoBootstrap - transport: NodesInfoNodeInfoSettingsTransport - network?: NodesInfoNodeInfoSettingsNetwork - xpack?: NodesInfoNodeInfoXpack - script?: NodesInfoNodeInfoScript - search?: NodesInfoNodeInfoSearch - ingest?: NodesInfoNodeInfoSettingsIngest -} - -export interface NodesInfoNodeInfoSettingsCluster { - name: Name - routing?: IndicesIndexRouting - election: NodesInfoNodeInfoSettingsClusterElection - initial_master_nodes?: string[] - deprecation_indexing?: NodesInfoDeprecationIndexing -} - -export interface NodesInfoNodeInfoSettingsClusterElection { - strategy: Name -} - -export interface NodesInfoNodeInfoSettingsHttp { - type: NodesInfoNodeInfoSettingsHttpType | string - 'type.default'?: string - compression?: boolean | string - port?: integer | string -} - -export interface NodesInfoNodeInfoSettingsHttpType { - default: string -} - -export interface NodesInfoNodeInfoSettingsIngest { - attachment?: NodesInfoNodeInfoIngestInfo - append?: NodesInfoNodeInfoIngestInfo - csv?: NodesInfoNodeInfoIngestInfo - convert?: NodesInfoNodeInfoIngestInfo - date?: NodesInfoNodeInfoIngestInfo - date_index_name?: NodesInfoNodeInfoIngestInfo - dot_expander?: NodesInfoNodeInfoIngestInfo - enrich?: NodesInfoNodeInfoIngestInfo - fail?: NodesInfoNodeInfoIngestInfo - foreach?: NodesInfoNodeInfoIngestInfo - json?: NodesInfoNodeInfoIngestInfo - user_agent?: NodesInfoNodeInfoIngestInfo - kv?: NodesInfoNodeInfoIngestInfo - geoip?: NodesInfoNodeInfoIngestInfo - grok?: NodesInfoNodeInfoIngestInfo - gsub?: NodesInfoNodeInfoIngestInfo - join?: NodesInfoNodeInfoIngestInfo - lowercase?: NodesInfoNodeInfoIngestInfo - remove?: NodesInfoNodeInfoIngestInfo - rename?: NodesInfoNodeInfoIngestInfo - script?: NodesInfoNodeInfoIngestInfo - set?: NodesInfoNodeInfoIngestInfo - sort?: NodesInfoNodeInfoIngestInfo - split?: NodesInfoNodeInfoIngestInfo - trim?: NodesInfoNodeInfoIngestInfo - uppercase?: NodesInfoNodeInfoIngestInfo - urldecode?: NodesInfoNodeInfoIngestInfo - bytes?: NodesInfoNodeInfoIngestInfo - dissect?: NodesInfoNodeInfoIngestInfo - set_security_user?: NodesInfoNodeInfoIngestInfo - pipeline?: NodesInfoNodeInfoIngestInfo - drop?: NodesInfoNodeInfoIngestInfo - circle?: NodesInfoNodeInfoIngestInfo - inference?: NodesInfoNodeInfoIngestInfo -} - -export interface NodesInfoNodeInfoSettingsNetwork { - host?: Host | Host[] -} - -export interface NodesInfoNodeInfoSettingsNode { - name: Name - attr: Record - max_local_storage_nodes?: string -} - -export interface NodesInfoNodeInfoSettingsTransport { - type: NodesInfoNodeInfoSettingsTransportType | string - 'type.default'?: string - features?: NodesInfoNodeInfoSettingsTransportFeatures -} - -export interface NodesInfoNodeInfoSettingsTransportFeatures { - 'x-pack': string -} - -export interface NodesInfoNodeInfoSettingsTransportType { - default: string -} - -export interface NodesInfoNodeInfoTransport { - bound_address: string[] - publish_address: string - profiles: Record -} - -export interface NodesInfoNodeInfoXpack { - license?: NodesInfoNodeInfoXpackLicense - security: NodesInfoNodeInfoXpackSecurity - notification?: Record - ml?: NodesInfoNodeInfoXpackMl -} - -export interface NodesInfoNodeInfoXpackLicense { - self_generated: NodesInfoNodeInfoXpackLicenseType -} - -export interface NodesInfoNodeInfoXpackLicenseType { - type: string -} - -export interface NodesInfoNodeInfoXpackMl { - use_auto_machine_memory_percent?: boolean -} - -export interface NodesInfoNodeInfoXpackSecurity { - http?: NodesInfoNodeInfoXpackSecuritySsl - enabled: string - transport?: NodesInfoNodeInfoXpackSecuritySsl - authc?: NodesInfoNodeInfoXpackSecurityAuthc -} - -export interface NodesInfoNodeInfoXpackSecurityAuthc { - realms?: NodesInfoNodeInfoXpackSecurityAuthcRealms - token?: NodesInfoNodeInfoXpackSecurityAuthcToken -} - -export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { - file?: Record - native?: Record - pki?: Record -} - -export interface NodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { - enabled?: string - order: string -} - -export interface NodesInfoNodeInfoXpackSecurityAuthcToken { - enabled: string -} - -export interface NodesInfoNodeInfoXpackSecuritySsl { - ssl: Record -} - -export interface NodesInfoNodeJvmInfo { - gc_collectors: string[] - mem: NodesInfoNodeInfoJvmMemory - memory_pools: string[] - pid: integer - start_time_in_millis: EpochTime - version: VersionString - vm_name: Name - vm_vendor: string - vm_version: VersionString - using_bundled_jdk: boolean - bundled_jdk: boolean - using_compressed_ordinary_object_pointers?: boolean | string - input_arguments: string[] -} - -export interface NodesInfoNodeOperatingSystemInfo { - arch: string - available_processors: integer - allocated_processors?: integer - name: Name - pretty_name: Name - refresh_interval_in_millis: DurationValue - version: VersionString - cpu?: NodesInfoNodeInfoOSCPU - mem?: NodesInfoNodeInfoMemory - swap?: NodesInfoNodeInfoMemory -} - -export interface NodesInfoNodeProcessInfo { - id: long - mlockall: boolean - refresh_interval_in_millis: DurationValue -} - -export interface NodesInfoNodeThreadPoolInfo { - core?: integer - keep_alive?: Duration - max?: integer - queue_size: integer - size?: integer - type: string -} - -export interface NodesInfoRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - flat_settings?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export type NodesInfoResponse = NodesInfoResponseBase - -export interface NodesInfoResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesReloadSecureSettingsRequest extends RequestBase { - node_id?: NodeIds - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - secure_settings_password?: Password - } -} - -export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase - -export interface NodesReloadSecureSettingsResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesStatsRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - index_metric?: Metrics - completion_fields?: Fields - fielddata_fields?: Fields - fields?: Fields - groups?: boolean - include_segment_file_sizes?: boolean - level?: Level - master_timeout?: Duration - timeout?: Duration - types?: string[] - include_unloaded_segments?: boolean -} - -export type NodesStatsResponse = NodesStatsResponseBase - -export interface NodesStatsResponseBase extends NodesNodesResponseBase { - cluster_name?: Name - nodes: Record -} - -export interface NodesUsageNodeUsage { - rest_actions: Record - since: EpochTime - timestamp: EpochTime - aggregations: Record -} - -export interface NodesUsageRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - timeout?: Duration -} - -export type NodesUsageResponse = NodesUsageResponseBase - -export interface NodesUsageResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface QueryRulesQueryRule { - rule_id: Id - type: QueryRulesQueryRuleType - criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] - actions: QueryRulesQueryRuleActions - priority?: integer -} - -export interface QueryRulesQueryRuleActions { - ids?: Id[] - docs?: QueryDslPinnedDoc[] -} - -export interface QueryRulesQueryRuleCriteria { - type: QueryRulesQueryRuleCriteriaType - metadata?: string - values?: any[] -} - -export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' - -export type QueryRulesQueryRuleType = 'pinned' | 'exclude' - -export interface QueryRulesQueryRuleset { - ruleset_id: Id - rules: QueryRulesQueryRule[] -} - -export interface QueryRulesDeleteRuleRequest extends RequestBase { - ruleset_id: Id - rule_id: Id -} - -export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase - -export interface QueryRulesDeleteRulesetRequest extends RequestBase { - ruleset_id: Id -} - -export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase - -export interface QueryRulesGetRuleRequest extends RequestBase { - ruleset_id: Id - rule_id: Id -} - -export type QueryRulesGetRuleResponse = QueryRulesQueryRule - -export interface QueryRulesGetRulesetRequest extends RequestBase { - ruleset_id: Id -} - -export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset - -export interface QueryRulesListRulesetsQueryRulesetListItem { - ruleset_id: Id - rule_total_count: integer - rule_criteria_types_counts: Record - rule_type_counts: Record -} - -export interface QueryRulesListRulesetsRequest extends RequestBase { - from?: integer - size?: integer -} - -export interface QueryRulesListRulesetsResponse { - count: long - results: QueryRulesListRulesetsQueryRulesetListItem[] -} - -export interface QueryRulesPutRuleRequest extends RequestBase { - ruleset_id: Id - rule_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - type: QueryRulesQueryRuleType - criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] - actions: QueryRulesQueryRuleActions - priority?: integer - } -} - -export interface QueryRulesPutRuleResponse { - result: Result -} - -export interface QueryRulesPutRulesetRequest extends RequestBase { - ruleset_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - rules: QueryRulesQueryRule | QueryRulesQueryRule[] - } -} - -export interface QueryRulesPutRulesetResponse { - result: Result -} - -export interface QueryRulesTestQueryRulesetMatchedRule { - ruleset_id: Id - rule_id: Id -} - -export interface QueryRulesTestRequest extends RequestBase { - ruleset_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - match_criteria: Record - } -} - -export interface QueryRulesTestResponse { - total_matched_rules: integer - matched_rules: QueryRulesTestQueryRulesetMatchedRule[] -} - -export interface RollupDateHistogramGrouping { - delay?: Duration - field: Field - format?: string - interval?: Duration - calendar_interval?: Duration - fixed_interval?: Duration - time_zone?: TimeZone -} - -export interface RollupFieldMetric { - field: Field - metrics: RollupMetric[] -} - -export interface RollupGroupings { - date_histogram?: RollupDateHistogramGrouping - histogram?: RollupHistogramGrouping - terms?: RollupTermsGrouping -} - -export interface RollupHistogramGrouping { - fields: Fields - interval: long -} - -export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' - -export interface RollupTermsGrouping { - fields: Fields -} - -export interface RollupDeleteJobRequest extends RequestBase { - id: Id -} - -export interface RollupDeleteJobResponse { - acknowledged: boolean - task_failures?: TaskFailure[] -} - -export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' - -export interface RollupGetJobsRequest extends RequestBase { - id?: Id -} - -export interface RollupGetJobsResponse { - jobs: RollupGetJobsRollupJob[] -} - -export interface RollupGetJobsRollupJob { - config: RollupGetJobsRollupJobConfiguration - stats: RollupGetJobsRollupJobStats - status: RollupGetJobsRollupJobStatus -} - -export interface RollupGetJobsRollupJobConfiguration { - cron: string - groups: RollupGroupings - id: Id - index_pattern: string - metrics: RollupFieldMetric[] - page_size: long - rollup_index: IndexName - timeout: Duration -} - -export interface RollupGetJobsRollupJobStats { - documents_processed: long - index_failures: long - index_time_in_ms: DurationValue - index_total: long - pages_processed: long - rollups_indexed: long - search_failures: long - search_time_in_ms: DurationValue - search_total: long - trigger_count: long - processing_time_in_ms: DurationValue - processing_total: long -} - -export interface RollupGetJobsRollupJobStatus { - current_position?: Record - job_state: RollupGetJobsIndexingJobState - upgraded_doc_id?: boolean -} - -export interface RollupGetRollupCapsRequest extends RequestBase { - id?: Id -} - -export type RollupGetRollupCapsResponse = Record - -export interface RollupGetRollupCapsRollupCapabilities { - rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] -} - -export interface RollupGetRollupCapsRollupCapabilitySummary { - fields: Record - index_pattern: string - job_id: string - rollup_index: string -} - -export interface RollupGetRollupCapsRollupFieldSummary { - agg: string - calendar_interval?: Duration - time_zone?: TimeZone -} - -export interface RollupGetRollupIndexCapsIndexCapabilities { - rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[] -} - -export interface RollupGetRollupIndexCapsRequest extends RequestBase { - index: Ids -} - -export type RollupGetRollupIndexCapsResponse = Record - -export interface RollupGetRollupIndexCapsRollupJobSummary { - fields: Record - index_pattern: string - job_id: Id - rollup_index: IndexName -} - -export interface RollupGetRollupIndexCapsRollupJobSummaryField { - agg: string - time_zone?: TimeZone - calendar_interval?: Duration -} - -export interface RollupPutJobRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - cron: string - groups: RollupGroupings - index_pattern: string - metrics?: RollupFieldMetric[] - page_size: integer - rollup_index: IndexName - timeout?: Duration - headers?: HttpHeaders - } -} - -export type RollupPutJobResponse = AcknowledgedResponseBase - -export interface RollupRollupSearchRequest extends RequestBase { - index: Indices - rest_total_hits_as_int?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - query?: QueryDslQueryContainer - size?: integer - } -} - -export interface RollupRollupSearchResponse> { - took: long - timed_out: boolean - terminated_early?: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: TAggregations -} - -export interface RollupStartJobRequest extends RequestBase { - id: Id -} - -export interface RollupStartJobResponse { - started: boolean -} - -export interface RollupStopJobRequest extends RequestBase { - id: Id - timeout?: Duration - wait_for_completion?: boolean -} - -export interface RollupStopJobResponse { - stopped: boolean -} - -export interface SearchApplicationAnalyticsCollection { - event_data_stream: SearchApplicationEventDataStream -} - -export interface SearchApplicationEventDataStream { - name: IndexName -} - -export interface SearchApplicationSearchApplication extends SearchApplicationSearchApplicationParameters { - name: Name - updated_at_millis: EpochTime -} - -export interface SearchApplicationSearchApplicationParameters { - indices: IndexName[] - analytics_collection_name?: Name - template?: SearchApplicationSearchApplicationTemplate -} - -export interface SearchApplicationSearchApplicationTemplate { - script: Script | string -} - -export interface SearchApplicationDeleteRequest extends RequestBase { - name: Name -} - -export type SearchApplicationDeleteResponse = AcknowledgedResponseBase - -export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { - name: Name -} - -export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase - -export interface SearchApplicationGetRequest extends RequestBase { - name: Name -} - -export type SearchApplicationGetResponse = SearchApplicationSearchApplication - -export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { - name?: Name[] -} - -export type SearchApplicationGetBehavioralAnalyticsResponse = Record - -export interface SearchApplicationListRequest extends RequestBase { - q?: string - from?: integer - size?: integer -} - -export interface SearchApplicationListResponse { - count: long - results: SearchApplicationSearchApplication[] -} - -export interface SearchApplicationPutRequest extends RequestBase { - name: Name - create?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'search_application' instead. */ - body?: SearchApplicationSearchApplicationParameters -} - -export interface SearchApplicationPutResponse { - result: Result -} - -export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { - name: Name -} - -export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { - name: Name -} - -export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase - -export interface SearchApplicationSearchRequest extends RequestBase { - name: Name - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - params?: Record - } -} - -export type SearchApplicationSearchResponse> = SearchResponseBody - -export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' - -export interface SearchableSnapshotsCacheStatsNode { - shared_cache: SearchableSnapshotsCacheStatsShared -} - -export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { - node_id?: NodeIds - master_timeout?: Duration -} - -export interface SearchableSnapshotsCacheStatsResponse { - nodes: Record -} - -export interface SearchableSnapshotsCacheStatsShared { - reads: long - bytes_read_in_bytes: ByteSize - writes: long - bytes_written_in_bytes: ByteSize - evictions: long - num_regions: integer - size_in_bytes: ByteSize - region_size_in_bytes: ByteSize -} - -export interface SearchableSnapshotsClearCacheRequest extends RequestBase { - index?: Indices - expand_wildcards?: ExpandWildcards - allow_no_indices?: boolean - ignore_unavailable?: boolean - pretty?: boolean - human?: boolean -} - -export type SearchableSnapshotsClearCacheResponse = any - -export interface SearchableSnapshotsMountMountedSnapshot { - snapshot: Name - indices: Indices - shards: ShardStatistics -} - -export interface SearchableSnapshotsMountRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration - wait_for_completion?: boolean - storage?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index: IndexName - renamed_index?: IndexName - index_settings?: Record - ignore_index_settings?: string[] - } -} - -export interface SearchableSnapshotsMountResponse { - snapshot: SearchableSnapshotsMountMountedSnapshot -} - -export interface SearchableSnapshotsStatsRequest extends RequestBase { - index?: Indices - level?: SearchableSnapshotsStatsLevel -} - -export interface SearchableSnapshotsStatsResponse { - stats: any - total: any -} - -export interface SecurityAccess { - replication?: SecurityReplicationAccess[] - search?: SecuritySearchAccess[] -} - -export interface SecurityApiKey { - id: Id - name: Name - type: SecurityApiKeyType - creation: EpochTime - expiration?: EpochTime - invalidated: boolean - invalidation?: EpochTime - username: Username - realm: string - realm_type?: string - metadata: Metadata - role_descriptors?: Record - limited_by?: Record[] - access?: SecurityAccess - profile_uid?: string - _sort?: SortResults -} - -export type SecurityApiKeyType = 'rest' | 'cross_cluster' - -export interface SecurityApplicationGlobalUserPrivileges { - manage: SecurityManageUserPrivileges -} - -export interface SecurityApplicationPrivileges { - application: string - privileges: string[] - resources: string[] -} - -export interface SecurityBulkError { - count: integer - details: Record -} - -export interface SecurityClusterNode { - name: Name -} - -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string - -export interface SecurityCreatedStatus { - created: boolean -} - -export interface SecurityFieldRule { - username?: Names - dn?: Names - groups?: Names -} - -export interface SecurityFieldSecurity { - except?: Fields - grant?: Fields -} - -export interface SecurityGlobalPrivilege { - application: SecurityApplicationGlobalUserPrivileges -} - -export type SecurityGrantType = 'password' | 'access_token' - -export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string - -export interface SecurityIndicesPrivileges { - field_security?: SecurityFieldSecurity - names: IndexName | IndexName[] - privileges: SecurityIndexPrivilege[] - query?: SecurityIndicesPrivilegesQuery - allow_restricted_indices?: boolean -} - -export type SecurityIndicesPrivilegesQuery = string | QueryDslQueryContainer | SecurityRoleTemplateQuery - -export interface SecurityManageUserPrivileges { - applications: string[] -} - -export interface SecurityRealmInfo { - name: Name - type: string -} - -export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' - -export interface SecurityRemoteClusterPrivileges { - clusters: Names - privileges: SecurityRemoteClusterPrivilege[] -} - -export interface SecurityRemoteIndicesPrivileges { - clusters: Names - field_security?: SecurityFieldSecurity - names: IndexName | IndexName[] - privileges: SecurityIndexPrivilege[] - query?: SecurityIndicesPrivilegesQuery - allow_restricted_indices?: boolean -} - -export interface SecurityReplicationAccess { - names: IndexName | IndexName[] - allow_restricted_indices?: boolean -} - -export interface SecurityRestriction { - workflows: SecurityRestrictionWorkflow[] -} - -export type SecurityRestrictionWorkflow = 'search_application_query' | string - -export interface SecurityRoleDescriptor { - cluster?: SecurityClusterPrivilege[] - indices?: SecurityIndicesPrivileges[] - index?: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege - applications?: SecurityApplicationPrivileges[] - metadata?: Metadata - run_as?: string[] - description?: string - restriction?: SecurityRestriction - transient_metadata?: Record -} - -export interface SecurityRoleDescriptorRead { - cluster: SecurityClusterPrivilege[] - indices: SecurityIndicesPrivileges[] - index: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege - applications?: SecurityApplicationPrivileges[] - metadata?: Metadata - run_as?: string[] - description?: string - restriction?: SecurityRestriction - transient_metadata?: Record -} - -export interface SecurityRoleMapping { - enabled: boolean - metadata: Metadata - roles?: string[] - role_templates?: SecurityRoleTemplate[] - rules: SecurityRoleMappingRule -} - -export interface SecurityRoleMappingRule { - any?: SecurityRoleMappingRule[] - all?: SecurityRoleMappingRule[] - field?: SecurityFieldRule - except?: SecurityRoleMappingRule -} - -export interface SecurityRoleTemplate { - format?: SecurityTemplateFormat - template: Script | string -} - -export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer - -export interface SecurityRoleTemplateQuery { - template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery -} - -export interface SecurityRoleTemplateScript { - source?: SecurityRoleTemplateInlineQuery - id?: Id - params?: Record - lang?: ScriptLanguage - options?: Record -} - -export interface SecuritySearchAccess { - field_security?: SecurityFieldSecurity - names: IndexName | IndexName[] - query?: SecurityIndicesPrivilegesQuery - allow_restricted_indices?: boolean -} - -export type SecurityTemplateFormat = 'string' | 'json' - -export interface SecurityUser { - email?: string | null - full_name?: Name | null - metadata: Metadata - roles: string[] - username: Username - enabled: boolean - profile_uid?: SecurityUserProfileId -} - -export interface SecurityUserIndicesPrivileges { - field_security?: SecurityFieldSecurity[] - names: IndexName | IndexName[] - privileges: SecurityIndexPrivilege[] - query?: SecurityIndicesPrivilegesQuery[] - allow_restricted_indices: boolean -} - -export interface SecurityUserProfile { - uid: SecurityUserProfileId - user: SecurityUserProfileUser - data: Record - labels: Record - enabled?: boolean -} - -export interface SecurityUserProfileHitMetadata { - _primary_term: long - _seq_no: SequenceNumber -} - -export type SecurityUserProfileId = string - -export interface SecurityUserProfileUser { - email?: string | null - full_name?: Name | null - realm_name: Name - realm_domain?: Name - roles: string[] - username: Username -} - -export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { - last_synchronized: long - _doc: SecurityUserProfileHitMetadata -} - -export interface SecurityActivateUserProfileRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - access_token?: string - grant_type: SecurityGrantType - password?: string - username?: string - } -} - -export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata - -export interface SecurityAuthenticateAuthenticateApiKey { - id: Id - name?: Name -} - -export interface SecurityAuthenticateRequest extends RequestBase { -} - -export interface SecurityAuthenticateResponse { - api_key?: SecurityAuthenticateAuthenticateApiKey - authentication_realm: SecurityRealmInfo - email?: string | null - full_name?: Name | null - lookup_realm: SecurityRealmInfo - metadata: Metadata - roles: string[] - username: Username - enabled: boolean - authentication_type: string - token?: SecurityAuthenticateToken -} - -export interface SecurityAuthenticateToken { - name: Name - type?: string -} - -export interface SecurityBulkDeleteRoleRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - names: string[] - } -} - -export interface SecurityBulkDeleteRoleResponse { - deleted?: string[] - not_found?: string[] - errors?: SecurityBulkError -} - -export interface SecurityBulkPutRoleRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - roles: Record - } -} - -export interface SecurityBulkPutRoleResponse { - created?: string[] - updated?: string[] - noop?: string[] - errors?: SecurityBulkError -} - -export interface SecurityChangePasswordRequest extends RequestBase { - username?: Username - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - password?: Password - password_hash?: string - } -} - -export interface SecurityChangePasswordResponse { -} - -export interface SecurityClearApiKeyCacheRequest extends RequestBase { - ids: Ids -} - -export interface SecurityClearApiKeyCacheResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedPrivilegesRequest extends RequestBase { - application: Name -} - -export interface SecurityClearCachedPrivilegesResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedRealmsRequest extends RequestBase { - realms: Names - usernames?: string[] -} - -export interface SecurityClearCachedRealmsResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedRolesRequest extends RequestBase { - name: Names -} - -export interface SecurityClearCachedRolesResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedServiceTokensRequest extends RequestBase { - namespace: Namespace - service: Service - name: Names -} - -export interface SecurityClearCachedServiceTokensResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityCreateApiKeyRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - expiration?: Duration - name?: Name - role_descriptors?: Record - metadata?: Metadata - } -} - -export interface SecurityCreateApiKeyResponse { - api_key: string - expiration?: long - id: Id - name: Name - encoded: string -} - -export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - access: SecurityAccess - expiration?: Duration - metadata?: Metadata - name: Name - } -} - -export interface SecurityCreateCrossClusterApiKeyResponse { - api_key: string - expiration?: DurationValue - id: Id - name: Name - encoded: string -} - -export interface SecurityCreateServiceTokenRequest extends RequestBase { - namespace: Namespace - service: Service - name?: Name - refresh?: Refresh -} - -export interface SecurityCreateServiceTokenResponse { - created: boolean - token: SecurityCreateServiceTokenToken -} - -export interface SecurityCreateServiceTokenToken { - name: Name - value: string -} - -export interface SecurityDeletePrivilegesFoundStatus { - found: boolean -} - -export interface SecurityDeletePrivilegesRequest extends RequestBase { - application: Name - name: Names - refresh?: Refresh -} - -export type SecurityDeletePrivilegesResponse = Record> - -export interface SecurityDeleteRoleRequest extends RequestBase { - name: Name - refresh?: Refresh -} - -export interface SecurityDeleteRoleResponse { - found: boolean -} - -export interface SecurityDeleteRoleMappingRequest extends RequestBase { - name: Name - refresh?: Refresh -} - -export interface SecurityDeleteRoleMappingResponse { - found: boolean -} - -export interface SecurityDeleteServiceTokenRequest extends RequestBase { - namespace: Namespace - service: Service - name: Name - refresh?: Refresh -} - -export interface SecurityDeleteServiceTokenResponse { - found: boolean -} - -export interface SecurityDeleteUserRequest extends RequestBase { - username: Username - refresh?: Refresh -} - -export interface SecurityDeleteUserResponse { - found: boolean -} - -export interface SecurityDisableUserRequest extends RequestBase { - username: Username - refresh?: Refresh -} - -export interface SecurityDisableUserResponse { -} - -export interface SecurityDisableUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId - refresh?: Refresh -} - -export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase - -export interface SecurityEnableUserRequest extends RequestBase { - username: Username - refresh?: Refresh -} - -export interface SecurityEnableUserResponse { -} - -export interface SecurityEnableUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId - refresh?: Refresh -} - -export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase - -export interface SecurityEnrollKibanaRequest extends RequestBase { -} - -export interface SecurityEnrollKibanaResponse { - token: SecurityEnrollKibanaToken - http_ca: string -} - -export interface SecurityEnrollKibanaToken { - name: string - value: string -} - -export interface SecurityEnrollNodeRequest extends RequestBase { -} - -export interface SecurityEnrollNodeResponse { - http_ca_key: string - http_ca_cert: string - transport_ca_cert: string - transport_key: string - transport_cert: string - nodes_addresses: string[] -} - -export interface SecurityGetApiKeyRequest extends RequestBase { - id?: Id - name?: Name - owner?: boolean - realm_name?: Name - username?: Username - with_limited_by?: boolean - active_only?: boolean - with_profile_uid?: boolean -} - -export interface SecurityGetApiKeyResponse { - api_keys: SecurityApiKey[] -} - -export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { -} - -export interface SecurityGetBuiltinPrivilegesResponse { - cluster: SecurityClusterPrivilege[] - index: IndexName[] - remote_cluster: SecurityRemoteClusterPrivilege[] -} - -export interface SecurityGetPrivilegesRequest extends RequestBase { - application?: Name - name?: Names -} - -export type SecurityGetPrivilegesResponse = Record> - -export interface SecurityGetRoleRequest extends RequestBase { - name?: Names -} - -export type SecurityGetRoleResponse = Record - -export interface SecurityGetRoleRole { - cluster: SecurityClusterPrivilege[] - indices: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - metadata: Metadata - run_as: string[] - transient_metadata?: Record - applications: SecurityApplicationPrivileges[] - role_templates?: SecurityRoleTemplate[] - global?: Record>> -} - -export interface SecurityGetRoleMappingRequest extends RequestBase { - name?: Names -} - -export type SecurityGetRoleMappingResponse = Record - -export interface SecurityGetServiceAccountsRequest extends RequestBase { - namespace?: Namespace - service?: Service -} - -export type SecurityGetServiceAccountsResponse = Record - -export interface SecurityGetServiceAccountsRoleDescriptorWrapper { - role_descriptor: SecurityRoleDescriptorRead -} - -export interface SecurityGetServiceCredentialsNodesCredentials { - _nodes: NodeStatistics - file_tokens: Record -} - -export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { - nodes: string[] -} - -export interface SecurityGetServiceCredentialsRequest extends RequestBase { - namespace: Namespace - service: Name -} - -export interface SecurityGetServiceCredentialsResponse { - service_account: string - count: integer - tokens: Record - nodes_credentials: SecurityGetServiceCredentialsNodesCredentials -} - -export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' - -export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { - authentication_realm: SecurityGetTokenUserRealm - lookup_realm: SecurityGetTokenUserRealm - authentication_provider?: SecurityGetTokenAuthenticationProvider - authentication_type: string -} - -export interface SecurityGetTokenAuthenticationProvider { - type: string - name: Name -} - -export interface SecurityGetTokenRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - grant_type?: SecurityGetTokenAccessTokenGrantType - scope?: string - password?: Password - kerberos_ticket?: string - refresh_token?: string - username?: Username - } -} - -export interface SecurityGetTokenResponse { - access_token: string - expires_in: long - scope?: string - type: string - refresh_token?: string - kerberos_authentication_response_token?: string - authentication: SecurityGetTokenAuthenticatedUser -} - -export interface SecurityGetTokenUserRealm { - name: Name - type: string -} - -export interface SecurityGetUserRequest extends RequestBase { - username?: Username | Username[] - with_profile_uid?: boolean -} - -export type SecurityGetUserResponse = Record - -export interface SecurityGetUserPrivilegesRequest extends RequestBase { - application?: Name - priviledge?: Name - username?: Name | null -} - -export interface SecurityGetUserPrivilegesResponse { - applications: SecurityApplicationPrivileges[] - cluster: string[] - global: SecurityGlobalPrivilege[] - indices: SecurityUserIndicesPrivileges[] - run_as: string[] -} - -export interface SecurityGetUserProfileGetUserProfileErrors { - count: long - details: Record -} - -export interface SecurityGetUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId | SecurityUserProfileId[] - data?: string | string[] -} - -export interface SecurityGetUserProfileResponse { - profiles: SecurityUserProfileWithMetadata[] - errors?: SecurityGetUserProfileGetUserProfileErrors -} - -export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' - -export interface SecurityGrantApiKeyGrantApiKey { - name: Name - expiration?: DurationLarge - role_descriptors?: Record | Record[] - metadata?: Metadata -} - -export interface SecurityGrantApiKeyRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - api_key: SecurityGrantApiKeyGrantApiKey - grant_type: SecurityGrantApiKeyApiKeyGrantType - access_token?: string - username?: Username - password?: Password - run_as?: Username - } -} - -export interface SecurityGrantApiKeyResponse { - api_key: string - id: Id - name: Name - expiration?: EpochTime - encoded: string -} - -export interface SecurityHasPrivilegesApplicationPrivilegesCheck { - application: string - privileges: string[] - resources: string[] -} - -export type SecurityHasPrivilegesApplicationsPrivileges = Record - -export interface SecurityHasPrivilegesIndexPrivilegesCheck { - names: Indices - privileges: SecurityIndexPrivilege[] - allow_restricted_indices?: boolean -} - -export type SecurityHasPrivilegesPrivileges = Record - -export interface SecurityHasPrivilegesRequest extends RequestBase { - user?: Name - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] - cluster?: SecurityClusterPrivilege[] - index?: SecurityHasPrivilegesIndexPrivilegesCheck[] - } -} - -export type SecurityHasPrivilegesResourcePrivileges = Record - -export interface SecurityHasPrivilegesResponse { - application: SecurityHasPrivilegesApplicationsPrivileges - cluster: Record - has_all_requested: boolean - index: Record - username: Username -} - -export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors { - count: long - details: Record -} - -export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { - application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] - cluster?: SecurityClusterPrivilege[] - index?: SecurityHasPrivilegesIndexPrivilegesCheck[] -} - -export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - uids: SecurityUserProfileId[] - privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck - } -} - -export interface SecurityHasPrivilegesUserProfileResponse { - has_privilege_uids: SecurityUserProfileId[] - errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors -} - -export interface SecurityInvalidateApiKeyRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - id?: Id - ids?: Id[] - name?: Name - owner?: boolean - realm_name?: string - username?: Username - } -} - -export interface SecurityInvalidateApiKeyResponse { - error_count: integer - error_details?: ErrorCause[] - invalidated_api_keys: string[] - previously_invalidated_api_keys: string[] -} - -export interface SecurityInvalidateTokenRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - token?: string - refresh_token?: string - realm_name?: Name - username?: Username - } -} - -export interface SecurityInvalidateTokenResponse { - error_count: long - error_details?: ErrorCause[] - invalidated_tokens: long - previously_invalidated_tokens: long -} - -export interface SecurityPutPrivilegesActions { - actions: string[] - application?: string - name?: Name - metadata?: Metadata -} - -export interface SecurityPutPrivilegesRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, use 'privileges' instead. */ - body?: Record> -} - -export type SecurityPutPrivilegesResponse = Record> - -export interface SecurityPutRoleRequest extends RequestBase { - name: Name - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - applications?: SecurityApplicationPrivileges[] - cluster?: SecurityClusterPrivilege[] - global?: Record - indices?: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - metadata?: Metadata - run_as?: string[] - description?: string - transient_metadata?: Record - } -} - -export interface SecurityPutRoleResponse { - role: SecurityCreatedStatus -} - -export interface SecurityPutRoleMappingRequest extends RequestBase { - name: Name - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - enabled?: boolean - metadata?: Metadata - roles?: string[] - role_templates?: SecurityRoleTemplate[] - rules?: SecurityRoleMappingRule - run_as?: string[] - } -} - -export interface SecurityPutRoleMappingResponse { - created?: boolean - role_mapping: SecurityCreatedStatus -} - -export interface SecurityPutUserRequest extends RequestBase { - username: Username - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - username?: Username - email?: string | null - full_name?: string | null - metadata?: Metadata - password?: Password - password_hash?: string - roles?: string[] - enabled?: boolean - } -} - -export interface SecurityPutUserResponse { - created: boolean -} - -export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate - -export interface SecurityQueryApiKeysApiKeyAggregationContainer { - aggregations?: Record - aggs?: Record - meta?: Metadata - cardinality?: AggregationsCardinalityAggregation - composite?: AggregationsCompositeAggregation - date_range?: AggregationsDateRangeAggregation - filter?: SecurityQueryApiKeysApiKeyQueryContainer - filters?: SecurityQueryApiKeysApiKeyFiltersAggregation - missing?: AggregationsMissingAggregation - range?: AggregationsRangeAggregation - terms?: AggregationsTermsAggregation - value_count?: AggregationsValueCountAggregation -} - -export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: AggregationsBuckets - other_bucket?: boolean - other_bucket_key?: string - keyed?: boolean -} - -export interface SecurityQueryApiKeysApiKeyQueryContainer { - bool?: QueryDslBoolQuery - exists?: QueryDslExistsQuery - ids?: QueryDslIdsQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - prefix?: Partial> - range?: Partial> - simple_query_string?: QueryDslSimpleQueryStringQuery - term?: Partial> - terms?: QueryDslTermsQuery - wildcard?: Partial> -} - -export interface SecurityQueryApiKeysRequest extends RequestBase { - with_limited_by?: boolean - with_profile_uid?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - query?: SecurityQueryApiKeysApiKeyQueryContainer - from?: integer - sort?: Sort - size?: integer - search_after?: SortResults - } -} - -export interface SecurityQueryApiKeysResponse { - total: integer - count: integer - api_keys: SecurityApiKey[] - aggregations?: Record -} - -export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { - _sort?: SortResults - name: string -} - -export interface SecurityQueryRoleRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: SecurityQueryRoleRoleQueryContainer - from?: integer - sort?: Sort - size?: integer - search_after?: SortResults - } -} - -export interface SecurityQueryRoleResponse { - total: integer - count: integer - roles: SecurityQueryRoleQueryRole[] -} - -export interface SecurityQueryRoleRoleQueryContainer { - bool?: QueryDslBoolQuery - exists?: QueryDslExistsQuery - ids?: QueryDslIdsQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - prefix?: Partial> - range?: Partial> - simple_query_string?: QueryDslSimpleQueryStringQuery - term?: Partial> - terms?: QueryDslTermsQuery - wildcard?: Partial> -} - -export interface SecurityQueryUserQueryUser extends SecurityUser { - _sort?: SortResults -} - -export interface SecurityQueryUserRequest extends RequestBase { - with_profile_uid?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: SecurityQueryUserUserQueryContainer - from?: integer - sort?: Sort - size?: integer - search_after?: SortResults - } -} - -export interface SecurityQueryUserResponse { - total: integer - count: integer - users: SecurityQueryUserQueryUser[] -} - -export interface SecurityQueryUserUserQueryContainer { - ids?: QueryDslIdsQuery - bool?: QueryDslBoolQuery - exists?: QueryDslExistsQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - prefix?: Partial> - range?: Partial> - simple_query_string?: QueryDslSimpleQueryStringQuery - term?: Partial> - terms?: QueryDslTermsQuery - wildcard?: Partial> -} - -export interface SecuritySamlAuthenticateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - content: string - ids: Ids - realm?: string - } -} - -export interface SecuritySamlAuthenticateResponse { - access_token: string - username: string - expires_in: integer - refresh_token: string - realm: string -} - -export interface SecuritySamlCompleteLogoutRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - realm: string - ids: Ids - query_string?: string - content?: string - } -} - -export type SecuritySamlCompleteLogoutResponse = boolean - -export interface SecuritySamlInvalidateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - acs?: string - query_string: string - realm?: string - } -} - -export interface SecuritySamlInvalidateResponse { - invalidated: integer - realm: string - redirect: string -} - -export interface SecuritySamlLogoutRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - token: string - refresh_token?: string - } -} - -export interface SecuritySamlLogoutResponse { - redirect: string -} - -export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - acs?: string - realm?: string - relay_state?: string - } -} - -export interface SecuritySamlPrepareAuthenticationResponse { - id: Id - realm: string - redirect: string -} - -export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { - realm_name: Name -} - -export interface SecuritySamlServiceProviderMetadataResponse { - metadata: string -} - -export interface SecuritySuggestUserProfilesHint { - uids?: SecurityUserProfileId[] - labels?: Record -} - -export interface SecuritySuggestUserProfilesRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - name?: string - size?: long - data?: string | string[] - hint?: SecuritySuggestUserProfilesHint - } -} - -export interface SecuritySuggestUserProfilesResponse { - total: SecuritySuggestUserProfilesTotalUserProfiles - took: long - profiles: SecurityUserProfile[] -} - -export interface SecuritySuggestUserProfilesTotalUserProfiles { - value: long - relation: RelationName -} - -export interface SecurityUpdateApiKeyRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - role_descriptors?: Record - metadata?: Metadata - expiration?: Duration - } -} - -export interface SecurityUpdateApiKeyResponse { - updated: boolean -} - -export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - access: SecurityAccess - expiration?: Duration - metadata?: Metadata - } -} - -export interface SecurityUpdateCrossClusterApiKeyResponse { - updated: boolean -} - -export interface SecurityUpdateUserProfileDataRequest extends RequestBase { - uid: SecurityUserProfileId - if_seq_no?: SequenceNumber - if_primary_term?: long - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - labels?: Record - data?: Record - } -} - -export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase - -export type ShutdownType = 'restart' | 'remove' | 'replace' - -export interface ShutdownDeleteNodeRequest extends RequestBase { - node_id: NodeId - master_timeout?: TimeUnit - timeout?: TimeUnit -} - -export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase - -export interface ShutdownGetNodeNodeShutdownStatus { - node_id: NodeId - type: ShutdownGetNodeShutdownType - reason: string - shutdown_startedmillis: EpochTime - status: ShutdownGetNodeShutdownStatus - shard_migration: ShutdownGetNodeShardMigrationStatus - persistent_tasks: ShutdownGetNodePersistentTaskStatus - plugins: ShutdownGetNodePluginsStatus -} - -export interface ShutdownGetNodePersistentTaskStatus { - status: ShutdownGetNodeShutdownStatus -} - -export interface ShutdownGetNodePluginsStatus { - status: ShutdownGetNodeShutdownStatus -} - -export interface ShutdownGetNodeRequest extends RequestBase { - node_id?: NodeIds - master_timeout?: TimeUnit - timeout?: TimeUnit -} - -export interface ShutdownGetNodeResponse { - nodes: ShutdownGetNodeNodeShutdownStatus[] -} - -export interface ShutdownGetNodeShardMigrationStatus { - status: ShutdownGetNodeShutdownStatus -} - -export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'stalled' | 'complete' - -export type ShutdownGetNodeShutdownType = 'remove' | 'restart' - -export interface ShutdownPutNodeRequest extends RequestBase { - node_id: NodeId - master_timeout?: TimeUnit - timeout?: TimeUnit - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - type: ShutdownType - reason: string - allocation_delay?: string - target_node_name?: string - } -} - -export type ShutdownPutNodeResponse = AcknowledgedResponseBase - -export interface SlmConfiguration { - ignore_unavailable?: boolean - indices?: Indices - include_global_state?: boolean - feature_states?: string[] - metadata?: Metadata - partial?: boolean -} - -export interface SlmInProgress { - name: Name - start_time_millis: EpochTime - state: string - uuid: Uuid -} - -export interface SlmInvocation { - snapshot_name: Name - time: DateTime -} - -export interface SlmPolicy { - config?: SlmConfiguration - name: Name - repository: string - retention?: SlmRetention - schedule: WatcherCronExpression -} - -export interface SlmRetention { - expire_after: Duration - max_count: integer - min_count: integer -} - -export interface SlmSnapshotLifecycle { - in_progress?: SlmInProgress - last_failure?: SlmInvocation - last_success?: SlmInvocation - modified_date?: DateTime - modified_date_millis: EpochTime - next_execution?: DateTime - next_execution_millis: EpochTime - policy: SlmPolicy - version: VersionNumber - stats: SlmStatistics -} - -export interface SlmStatistics { - retention_deletion_time?: Duration - retention_deletion_time_millis?: DurationValue - retention_failed?: long - retention_runs?: long - retention_timed_out?: long - policy?: Id - total_snapshots_deleted?: long - snapshots_deleted?: long - total_snapshot_deletion_failures?: long - snapshot_deletion_failures?: long - total_snapshots_failed?: long - snapshots_failed?: long - total_snapshots_taken?: long - snapshots_taken?: long -} - -export interface SlmDeleteLifecycleRequest extends RequestBase { - policy_id: Name -} - -export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase - -export interface SlmExecuteLifecycleRequest extends RequestBase { - policy_id: Name -} - -export interface SlmExecuteLifecycleResponse { - snapshot_name: Name -} - -export interface SlmExecuteRetentionRequest extends RequestBase { -} - -export type SlmExecuteRetentionResponse = AcknowledgedResponseBase - -export interface SlmGetLifecycleRequest extends RequestBase { - policy_id?: Names -} - -export type SlmGetLifecycleResponse = Record - -export interface SlmGetStatsRequest extends RequestBase { -} - -export interface SlmGetStatsResponse { - retention_deletion_time: Duration - retention_deletion_time_millis: DurationValue - retention_failed: long - retention_runs: long - retention_timed_out: long - total_snapshots_deleted: long - total_snapshot_deletion_failures: long - total_snapshots_failed: long - total_snapshots_taken: long - policy_stats: string[] -} - -export interface SlmGetStatusRequest extends RequestBase { -} - -export interface SlmGetStatusResponse { - operation_mode: LifecycleOperationMode -} - -export interface SlmPutLifecycleRequest extends RequestBase { - policy_id: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - config?: SlmConfiguration - name?: Name - repository?: string - retention?: SlmRetention - schedule?: WatcherCronExpression - } -} - -export type SlmPutLifecycleResponse = AcknowledgedResponseBase - -export interface SlmStartRequest extends RequestBase { -} - -export type SlmStartResponse = AcknowledgedResponseBase - -export interface SlmStopRequest extends RequestBase { -} - -export type SlmStopResponse = AcknowledgedResponseBase - -export interface SnapshotAzureRepository extends SnapshotRepositoryBase { - type: 'azure' - settings: SnapshotAzureRepositorySettings -} - -export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { - client?: string - container?: string - base_path?: string - readonly?: boolean - location_mode?: string -} - -export interface SnapshotFileCountSnapshotStats { - file_count: integer - size_in_bytes: long -} - -export interface SnapshotGcsRepository extends SnapshotRepositoryBase { - type: 'gcs' - settings: SnapshotGcsRepositorySettings -} - -export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { - bucket: string - client?: string - base_path?: string - readonly?: boolean - application_name?: string -} - -export interface SnapshotIndexDetails { - shard_count: integer - size?: ByteSize - size_in_bytes: long - max_segments_per_shard: long -} - -export interface SnapshotInfoFeatureState { - feature_name: string - indices: Indices -} - -export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { - type: 'url' - settings: SnapshotReadOnlyUrlRepositorySettings -} - -export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { - http_max_retries?: integer - http_socket_timeout?: Duration - max_number_of_snapshots?: integer - url: string -} - -export type SnapshotRepository = SnapshotAzureRepository | SnapshotGcsRepository | SnapshotS3Repository | SnapshotSharedFileSystemRepository | SnapshotReadOnlyUrlRepository | SnapshotSourceOnlyRepository - -export interface SnapshotRepositoryBase { - uuid?: Uuid -} - -export interface SnapshotRepositorySettingsBase { - chunk_size?: ByteSize - compress?: boolean - max_restore_bytes_per_sec?: ByteSize - max_snapshot_bytes_per_sec?: ByteSize -} - -export interface SnapshotS3Repository extends SnapshotRepositoryBase { - type: 's3' - settings: SnapshotS3RepositorySettings -} - -export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { - bucket: string - client?: string - base_path?: string - readonly?: boolean - server_side_encryption?: boolean - buffer_size?: ByteSize - canned_acl?: string - storage_class?: string -} - -export interface SnapshotShardsStats { - done: long - failed: long - finalizing: long - initializing: long - started: long - total: long -} - -export type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' | 'STARTED' - -export interface SnapshotShardsStatsSummary { - incremental: SnapshotShardsStatsSummaryItem - total: SnapshotShardsStatsSummaryItem - start_time_in_millis: EpochTime - time?: Duration - time_in_millis: DurationValue -} - -export interface SnapshotShardsStatsSummaryItem { - file_count: long - size_in_bytes: long -} - -export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { - type: 'fs' - settings: SnapshotSharedFileSystemRepositorySettings -} - -export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { - location: string - max_number_of_snapshots?: integer - readonly?: boolean -} - -export interface SnapshotSnapshotIndexStats { - shards: Record - shards_stats: SnapshotShardsStats - stats: SnapshotSnapshotStats -} - -export interface SnapshotSnapshotInfo { - data_streams: string[] - duration?: Duration - duration_in_millis?: DurationValue - end_time?: DateTime - end_time_in_millis?: EpochTime - failures?: SnapshotSnapshotShardFailure[] - include_global_state?: boolean - indices?: IndexName[] - index_details?: Record - metadata?: Metadata - reason?: string - repository?: Name - snapshot: Name - shards?: ShardStatistics - start_time?: DateTime - start_time_in_millis?: EpochTime - state?: string - uuid: Uuid - version?: VersionString - version_id?: VersionNumber - feature_states?: SnapshotInfoFeatureState[] -} - -export interface SnapshotSnapshotShardFailure { - index: IndexName - node_id?: Id - reason: string - shard_id: Id - index_uuid: Id - status: string -} - -export interface SnapshotSnapshotShardsStatus { - stage: SnapshotShardsStatsStage - stats: SnapshotShardsStatsSummary -} - -export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' - -export interface SnapshotSnapshotStats { - incremental: SnapshotFileCountSnapshotStats - start_time_in_millis: EpochTime - time?: Duration - time_in_millis: DurationValue - total: SnapshotFileCountSnapshotStats -} - -export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { - type: 'source' - settings: SnapshotSourceOnlyRepositorySettings -} - -export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { - delegate_type?: string - max_number_of_snapshots?: integer - read_only?: boolean - readonly?: boolean -} - -export interface SnapshotStatus { - include_global_state: boolean - indices: Record - repository: string - shards_stats: SnapshotShardsStats - snapshot: string - state: string - stats: SnapshotSnapshotStats - uuid: Uuid -} - -export interface SnapshotCleanupRepositoryCleanupRepositoryResults { - deleted_blobs: long - deleted_bytes: long -} - -export interface SnapshotCleanupRepositoryRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export interface SnapshotCleanupRepositoryResponse { - results: SnapshotCleanupRepositoryCleanupRepositoryResults -} - -export interface SnapshotCloneRequest extends RequestBase { - repository: Name - snapshot: Name - target_snapshot: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - indices: string - } -} - -export type SnapshotCloneResponse = AcknowledgedResponseBase - -export interface SnapshotCreateRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - ignore_unavailable?: boolean - include_global_state?: boolean - indices?: Indices - feature_states?: string[] - metadata?: Metadata - partial?: boolean - } -} - -export interface SnapshotCreateResponse { - accepted?: boolean - snapshot?: SnapshotSnapshotInfo -} - -export interface SnapshotCreateRepositoryRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration - verify?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'repository' instead. */ - body?: SnapshotRepository -} - -export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase - -export interface SnapshotDeleteRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration -} - -export type SnapshotDeleteResponse = AcknowledgedResponseBase - -export interface SnapshotDeleteRepositoryRequest extends RequestBase { - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase - -export interface SnapshotGetRequest extends RequestBase { - repository: Name - snapshot: Names - ignore_unavailable?: boolean - master_timeout?: Duration - verbose?: boolean - index_details?: boolean - index_names?: boolean - include_repository?: boolean - sort?: SnapshotSnapshotSort - size?: integer - order?: SortOrder - after?: string - offset?: integer - from_sort_value?: string - slm_policy_filter?: Name -} - -export interface SnapshotGetResponse { - responses?: SnapshotGetSnapshotResponseItem[] - snapshots?: SnapshotSnapshotInfo[] - total: integer - remaining: integer -} - -export interface SnapshotGetSnapshotResponseItem { - repository: Name - snapshots?: SnapshotSnapshotInfo[] - error?: ErrorCause -} - -export interface SnapshotGetRepositoryRequest extends RequestBase { - name?: Names - local?: boolean - master_timeout?: Duration -} - -export type SnapshotGetRepositoryResponse = Record - -export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { - name: Names - meta_thread_pool_concurrency?: integer - blob_thread_pool_concurrency?: integer - snapshot_verification_concurrency?: integer - index_verification_concurrency?: integer - index_snapshot_verification_concurrency?: integer - max_failed_shard_snapshots?: integer - verify_blob_contents?: boolean - max_bytes_per_sec?: string -} - -export type SnapshotRepositoryVerifyIntegrityResponse = any - -export interface SnapshotRestoreRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - feature_states?: string[] - ignore_index_settings?: string[] - ignore_unavailable?: boolean - include_aliases?: boolean - include_global_state?: boolean - index_settings?: IndicesIndexSettings - indices?: Indices - partial?: boolean - rename_pattern?: string - rename_replacement?: string - } -} - -export interface SnapshotRestoreResponse { - accepted?: boolean - snapshot?: SnapshotRestoreSnapshotRestore -} - -export interface SnapshotRestoreSnapshotRestore { - indices: IndexName[] - snapshot: string - shards: ShardStatistics -} - -export interface SnapshotStatusRequest extends RequestBase { - repository?: Name - snapshot?: Names - ignore_unavailable?: boolean - master_timeout?: Duration -} - -export interface SnapshotStatusResponse { - snapshots: SnapshotStatus[] -} - -export interface SnapshotVerifyRepositoryCompactNodeInfo { - name: Name -} - -export interface SnapshotVerifyRepositoryRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export interface SnapshotVerifyRepositoryResponse { - nodes: Record -} - -export interface SqlColumn { - name: Name - type: string -} - -export type SqlRow = any[] - -export interface SqlClearCursorRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - cursor: string - } -} - -export interface SqlClearCursorResponse { - succeeded: boolean -} - -export interface SqlDeleteAsyncRequest extends RequestBase { - id: Id -} - -export type SqlDeleteAsyncResponse = AcknowledgedResponseBase - -export interface SqlGetAsyncRequest extends RequestBase { - id: Id - delimiter?: string - format?: string - keep_alive?: Duration - wait_for_completion_timeout?: Duration -} - -export interface SqlGetAsyncResponse { - id: Id - is_running: boolean - is_partial: boolean - columns?: SqlColumn[] - cursor?: string - rows: SqlRow[] -} - -export interface SqlGetAsyncStatusRequest extends RequestBase { - id: Id -} - -export interface SqlGetAsyncStatusResponse { - id: string - is_running: boolean - is_partial: boolean - start_time_in_millis: EpochTime - expiration_time_in_millis: EpochTime - completion_status?: uint -} - -export interface SqlQueryRequest extends RequestBase { - format?: SqlQuerySqlFormat - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - catalog?: string - columnar?: boolean - cursor?: string - fetch_size?: integer - filter?: QueryDslQueryContainer - query?: string - request_timeout?: Duration - page_timeout?: Duration - time_zone?: TimeZone - field_multi_value_leniency?: boolean - runtime_mappings?: MappingRuntimeFields - wait_for_completion_timeout?: Duration - params?: Record - keep_alive?: Duration - keep_on_completion?: boolean - index_using_frozen?: boolean - } -} - -export interface SqlQueryResponse { - id?: Id - is_running?: boolean - is_partial?: boolean - columns?: SqlColumn[] - cursor?: string - rows: SqlRow[] -} - -export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' - -export interface SqlTranslateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - fetch_size?: integer - filter?: QueryDslQueryContainer - query: string - time_zone?: TimeZone - } -} - -export interface SqlTranslateResponse { - aggregations?: Record - size?: long - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - query?: QueryDslQueryContainer - sort?: Sort -} - -export interface SslCertificatesCertificateInformation { - alias: string | null - expiry: DateTime - format: string - has_private_key: boolean - issuer?: string - path: string - serial_number: string - subject_dn: string -} - -export interface SslCertificatesRequest extends RequestBase { -} - -export type SslCertificatesResponse = SslCertificatesCertificateInformation[] - -export interface SynonymsSynonymRule { - id?: Id - synonyms: SynonymsSynonymString -} - -export interface SynonymsSynonymRuleRead { - id: Id - synonyms: SynonymsSynonymString -} - -export type SynonymsSynonymString = string - -export interface SynonymsSynonymsUpdateResult { - result: Result - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult -} - -export interface SynonymsDeleteSynonymRequest extends RequestBase { - id: Id -} - -export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase - -export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { - set_id: Id - rule_id: Id -} - -export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult - -export interface SynonymsGetSynonymRequest extends RequestBase { - id: Id - from?: integer - size?: integer -} - -export interface SynonymsGetSynonymResponse { - count: integer - synonyms_set: SynonymsSynonymRuleRead[] -} - -export interface SynonymsGetSynonymRuleRequest extends RequestBase { - set_id: Id - rule_id: Id -} - -export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead - -export interface SynonymsGetSynonymsSetsRequest extends RequestBase { - from?: integer - size?: integer -} - -export interface SynonymsGetSynonymsSetsResponse { - count: integer - results: SynonymsGetSynonymsSetsSynonymsSetItem[] -} - -export interface SynonymsGetSynonymsSetsSynonymsSetItem { - synonyms_set: Id - count: integer -} - -export interface SynonymsPutSynonymRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] - } -} - -export interface SynonymsPutSynonymResponse { - result: Result - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult -} - -export interface SynonymsPutSynonymRuleRequest extends RequestBase { - set_id: Id - rule_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - synonyms: SynonymsSynonymString - } -} - -export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult - -export type TasksGroupBy = 'nodes' | 'parents' | 'none' - -export interface TasksNodeTasks { - name?: NodeId - transport_address?: TransportAddress - host?: Host - ip?: Ip - roles?: string[] - attributes?: Record - tasks: Record -} - -export interface TasksParentTaskInfo extends TasksTaskInfo { - children?: TasksTaskInfo[] -} - -export interface TasksTaskInfo { - action: string - cancelled?: boolean - cancellable: boolean - description?: string - headers: Record - id: long - node: NodeId - running_time?: Duration - running_time_in_nanos: DurationValue - start_time_in_millis: EpochTime - status?: any - type: string - parent_task_id?: TaskId -} - -export type TasksTaskInfos = TasksTaskInfo[] | Record - -export interface TasksTaskListResponseBase { - node_failures?: ErrorCause[] - task_failures?: TaskFailure[] - nodes?: Record - tasks?: TasksTaskInfos -} - -export interface TasksCancelRequest extends RequestBase { - task_id?: TaskId - actions?: string | string[] - nodes?: string[] - parent_task_id?: string - wait_for_completion?: boolean -} - -export type TasksCancelResponse = TasksTaskListResponseBase - -export interface TasksGetRequest extends RequestBase { - task_id: Id - timeout?: Duration - wait_for_completion?: boolean -} - -export interface TasksGetResponse { - completed: boolean - task: TasksTaskInfo - response?: any - error?: ErrorCause -} - -export interface TasksListRequest extends RequestBase { - actions?: string | string[] - detailed?: boolean - group_by?: TasksGroupBy - nodes?: NodeIds - parent_task_id?: Id - master_timeout?: Duration - timeout?: Duration - wait_for_completion?: boolean -} - -export type TasksListResponse = TasksTaskListResponseBase - -export interface TextStructureFindStructureFieldStat { - count: integer - cardinality: integer - top_hits: TextStructureFindStructureTopHit[] - mean_value?: integer - median_value?: integer - max_value?: integer - min_value?: integer - earliest?: string - latest?: string -} - -export interface TextStructureFindStructureRequest { - charset?: string - column_names?: string - delimiter?: string - ecs_compatibility?: string - explain?: boolean - format?: string - grok_pattern?: GrokPattern - has_header_row?: boolean - line_merge_size_limit?: uint - lines_to_sample?: uint - quote?: string - should_trim_fields?: boolean - timeout?: Duration - timestamp_field?: Field - timestamp_format?: string - /** @deprecated The use of the 'body' key has been deprecated, use 'text_files' instead. */ - body?: TJsonDocument[] -} - -export interface TextStructureFindStructureResponse { - charset: string - has_header_row?: boolean - has_byte_order_marker: boolean - format: string - field_stats: Record - sample_start: string - num_messages_analyzed: integer - mappings: MappingTypeMapping - quote?: string - delimiter?: string - need_client_timezone: boolean - num_lines_analyzed: integer - column_names?: string[] - explanation?: string[] - grok_pattern?: GrokPattern - multiline_start_pattern?: string - exclude_lines_pattern?: string - java_timestamp_formats?: string[] - joda_timestamp_formats?: string[] - timestamp_field?: Field - should_trim_fields?: boolean - ingest_pipeline: IngestPipelineConfig -} - -export interface TextStructureFindStructureTopHit { - count: long - value: any -} - -export interface TextStructureTestGrokPatternMatchedField { - match: string - offset: integer - length: integer -} - -export interface TextStructureTestGrokPatternMatchedText { - matched: boolean - fields?: Record -} - -export interface TextStructureTestGrokPatternRequest extends RequestBase { - ecs_compatibility?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - grok_pattern: GrokPattern - text: string[] - } -} - -export interface TextStructureTestGrokPatternResponse { - matches: TextStructureTestGrokPatternMatchedText[] -} - -export interface TransformDestination { - index?: IndexName - pipeline?: string -} - -export interface TransformLatest { - sort: Field - unique_key: Field[] -} - -export interface TransformPivot { - aggregations?: Record - aggs?: Record - group_by?: Record -} - -export interface TransformPivotGroupByContainer { - date_histogram?: AggregationsDateHistogramAggregation - geotile_grid?: AggregationsGeoTileGridAggregation - histogram?: AggregationsHistogramAggregation - terms?: AggregationsTermsAggregation -} - -export interface TransformRetentionPolicy { - field: Field - max_age: Duration -} - -export interface TransformRetentionPolicyContainer { - time?: TransformRetentionPolicy -} - -export interface TransformSettings { - align_checkpoints?: boolean - dates_as_epoch_millis?: boolean - deduce_mappings?: boolean - docs_per_second?: float - max_page_search_size?: integer - unattended?: boolean -} - -export interface TransformSource { - index: Indices - query?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields -} - -export interface TransformSyncContainer { - time?: TransformTimeSync -} - -export interface TransformTimeSync { - delay?: Duration - field: Field -} - -export interface TransformDeleteTransformRequest extends RequestBase { - transform_id: Id - force?: boolean - delete_dest_index?: boolean - timeout?: Duration -} - -export type TransformDeleteTransformResponse = AcknowledgedResponseBase - -export interface TransformGetTransformRequest extends RequestBase { - transform_id?: Names - allow_no_match?: boolean - from?: integer - size?: integer - exclude_generated?: boolean -} - -export interface TransformGetTransformResponse { - count: long - transforms: TransformGetTransformTransformSummary[] -} - -export interface TransformGetTransformTransformSummary { - authorization?: MlTransformAuthorization - create_time?: EpochTime - description?: string - dest: ReindexDestination - frequency?: Duration - id: Id - latest?: TransformLatest - pivot?: TransformPivot - retention_policy?: TransformRetentionPolicyContainer - settings?: TransformSettings - source: TransformSource - sync?: TransformSyncContainer - version?: VersionString - _meta?: Metadata -} - -export interface TransformGetTransformStatsCheckpointStats { - checkpoint: long - checkpoint_progress?: TransformGetTransformStatsTransformProgress - timestamp?: DateTime - timestamp_millis?: EpochTime - time_upper_bound?: DateTime - time_upper_bound_millis?: EpochTime -} - -export interface TransformGetTransformStatsCheckpointing { - changes_last_detected_at?: long - changes_last_detected_at_date_time?: DateTime - last: TransformGetTransformStatsCheckpointStats - next?: TransformGetTransformStatsCheckpointStats - operations_behind?: long - last_search_time?: long -} - -export interface TransformGetTransformStatsRequest extends RequestBase { - transform_id: Names - allow_no_match?: boolean - from?: long - size?: long - timeout?: Duration -} - -export interface TransformGetTransformStatsResponse { - count: long - transforms: TransformGetTransformStatsTransformStats[] -} - -export interface TransformGetTransformStatsTransformIndexerStats { - delete_time_in_ms?: EpochTime - documents_indexed: long - documents_deleted?: long - documents_processed: long - exponential_avg_checkpoint_duration_ms: DurationValue - exponential_avg_documents_indexed: double - exponential_avg_documents_processed: double - index_failures: long - index_time_in_ms: DurationValue - index_total: long - pages_processed: long - processing_time_in_ms: DurationValue - processing_total: long - search_failures: long - search_time_in_ms: DurationValue - search_total: long - trigger_count: long -} - -export interface TransformGetTransformStatsTransformProgress { - docs_indexed: long - docs_processed: long - docs_remaining?: long - percent_complete?: double - total_docs?: long -} - -export interface TransformGetTransformStatsTransformStats { - checkpointing: TransformGetTransformStatsCheckpointing - health?: TransformGetTransformStatsTransformStatsHealth - id: Id - node?: NodeAttributes - reason?: string - state: string - stats: TransformGetTransformStatsTransformIndexerStats -} - -export interface TransformGetTransformStatsTransformStatsHealth { - status: HealthStatus -} - -export interface TransformPreviewTransformRequest extends RequestBase { - transform_id?: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - dest?: TransformDestination - description?: string - frequency?: Duration - pivot?: TransformPivot - source?: TransformSource - settings?: TransformSettings - sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer - latest?: TransformLatest - } -} - -export interface TransformPreviewTransformResponse { - generated_dest_index: IndicesIndexState - preview: TTransform[] -} - -export interface TransformPutTransformRequest extends RequestBase { - transform_id: Id - defer_validation?: boolean - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - dest: TransformDestination - description?: string - frequency?: Duration - latest?: TransformLatest - _meta?: Metadata - pivot?: TransformPivot - retention_policy?: TransformRetentionPolicyContainer - settings?: TransformSettings - source: TransformSource - sync?: TransformSyncContainer - } -} - -export type TransformPutTransformResponse = AcknowledgedResponseBase - -export interface TransformResetTransformRequest extends RequestBase { - transform_id: Id - force?: boolean -} - -export type TransformResetTransformResponse = AcknowledgedResponseBase - -export interface TransformScheduleNowTransformRequest extends RequestBase { - transform_id: Id - timeout?: Duration -} - -export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase - -export interface TransformStartTransformRequest extends RequestBase { - transform_id: Id - timeout?: Duration - from?: string -} - -export type TransformStartTransformResponse = AcknowledgedResponseBase - -export interface TransformStopTransformRequest extends RequestBase { - transform_id: Name - allow_no_match?: boolean - force?: boolean - timeout?: Duration - wait_for_checkpoint?: boolean - wait_for_completion?: boolean -} - -export type TransformStopTransformResponse = AcknowledgedResponseBase - -export interface TransformUpdateTransformRequest extends RequestBase { - transform_id: Id - defer_validation?: boolean - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - dest?: TransformDestination - description?: string - frequency?: Duration - _meta?: Metadata - source?: TransformSource - settings?: TransformSettings - sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer | null - } -} - -export interface TransformUpdateTransformResponse { - authorization?: MlTransformAuthorization - create_time: long - description: string - dest: ReindexDestination - frequency?: Duration - id: Id - latest?: TransformLatest - pivot?: TransformPivot - retention_policy?: TransformRetentionPolicyContainer - settings: TransformSettings - source: ReindexSource - sync?: TransformSyncContainer - version: VersionString - _meta?: Metadata -} - -export interface TransformUpgradeTransformsRequest extends RequestBase { - dry_run?: boolean - timeout?: Duration -} - -export interface TransformUpgradeTransformsResponse { - needs_update: integer - no_action: integer - updated: integer -} - -export interface WatcherAcknowledgeState { - state: WatcherAcknowledgementOptions - timestamp: DateTime -} - -export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' - -export interface WatcherAction { - action_type?: WatcherActionType - condition?: WatcherConditionContainer - foreach?: string - max_iterations?: integer - name?: Name - throttle_period?: Duration - throttle_period_in_millis?: DurationValue - transform?: TransformContainer - index?: WatcherIndexAction - logging?: WatcherLoggingAction - email?: WatcherEmailAction - pagerduty?: WatcherPagerDutyAction - slack?: WatcherSlackAction - webhook?: WatcherWebhookAction -} - -export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' - -export interface WatcherActionStatus { - ack: WatcherAcknowledgeState - last_execution?: WatcherExecutionState - last_successful_execution?: WatcherExecutionState - last_throttle?: WatcherThrottleState -} - -export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | 'throttled' - -export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' - -export type WatcherActions = Record - -export interface WatcherActivationState { - active: boolean - timestamp: DateTime -} - -export interface WatcherActivationStatus { - actions: WatcherActions - state: WatcherActivationState - version: VersionNumber -} - -export interface WatcherAlwaysCondition { -} - -export interface WatcherArrayCompareConditionKeys { - path: string -} -export type WatcherArrayCompareCondition = WatcherArrayCompareConditionKeys -& { [property: string]: WatcherArrayCompareOpParams | string } - -export interface WatcherArrayCompareOpParams { - quantifier: WatcherQuantifier - value: FieldValue -} - -export interface WatcherChainInput { - inputs: Partial>[] -} - -export interface WatcherConditionContainer { - always?: WatcherAlwaysCondition - array_compare?: Partial> - compare?: Partial>>> - never?: WatcherNeverCondition - script?: WatcherScriptCondition -} - -export type WatcherConditionOp = 'not_eq' | 'eq' | 'lt' | 'gt' | 'lte' | 'gte' - -export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' - -export type WatcherConnectionScheme = 'http' | 'https' - -export type WatcherCronExpression = string - -export interface WatcherDailySchedule { - at: WatcherScheduleTimeOfDay[] -} - -export type WatcherDataAttachmentFormat = 'json' | 'yaml' - -export interface WatcherDataEmailAttachment { - format?: WatcherDataAttachmentFormat -} - -export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' - -export interface WatcherEmail { - id?: Id - bcc?: string[] - body?: WatcherEmailBody - cc?: string[] - from?: string - priority?: WatcherEmailPriority - reply_to?: string[] - sent_date?: DateTime - subject: string - to: string[] - attachments?: Record -} - -export interface WatcherEmailAction extends WatcherEmail { -} - -export interface WatcherEmailAttachmentContainer { - http?: WatcherHttpEmailAttachment - reporting?: WatcherReportingEmailAttachment - data?: WatcherDataEmailAttachment -} - -export interface WatcherEmailBody { - html?: string - text?: string -} - -export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' - -export interface WatcherEmailResult { - account?: string - message: WatcherEmail - reason?: string -} - -export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished' - -export interface WatcherExecutionResult { - actions: WatcherExecutionResultAction[] - condition: WatcherExecutionResultCondition - execution_duration: DurationValue - execution_time: DateTime - input: WatcherExecutionResultInput -} - -export interface WatcherExecutionResultAction { - email?: WatcherEmailResult - id: Id - index?: WatcherIndexResult - logging?: WatcherLoggingResult - pagerduty?: WatcherPagerDutyResult - reason?: string - slack?: WatcherSlackResult - status: WatcherActionStatusOptions - type: WatcherActionType - webhook?: WatcherWebhookResult - error?: ErrorCause -} - -export interface WatcherExecutionResultCondition { - met: boolean - status: WatcherActionStatusOptions - type: WatcherConditionType -} - -export interface WatcherExecutionResultInput { - payload: Record - status: WatcherActionStatusOptions - type: WatcherInputType -} - -export interface WatcherExecutionState { - successful: boolean - timestamp: DateTime - reason?: string -} - -export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' - -export interface WatcherExecutionThreadPool { - max_size: long - queue_size: long -} - -export interface WatcherHourAndMinute { - hour: integer[] - minute: integer[] -} - -export interface WatcherHourlySchedule { - minute: integer[] -} - -export interface WatcherHttpEmailAttachment { - content_type?: string - inline?: boolean - request?: WatcherHttpInputRequestDefinition -} - -export interface WatcherHttpInput { - extract?: string[] - request?: WatcherHttpInputRequestDefinition - response_content_type?: WatcherResponseContentType -} - -export interface WatcherHttpInputAuthentication { - basic: WatcherHttpInputBasicAuthentication -} - -export interface WatcherHttpInputBasicAuthentication { - password: Password - username: Username -} - -export type WatcherHttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete' - -export interface WatcherHttpInputProxy { - host: Host - port: uint -} - -export interface WatcherHttpInputRequestDefinition { - auth?: WatcherHttpInputAuthentication - body?: string - connection_timeout?: Duration - headers?: Record - host?: Host - method?: WatcherHttpInputMethod - params?: Record - path?: string - port?: uint - proxy?: WatcherHttpInputProxy - read_timeout?: Duration - scheme?: WatcherConnectionScheme - url?: string -} - -export interface WatcherHttpInputRequestResult extends WatcherHttpInputRequestDefinition { -} - -export interface WatcherHttpInputResponseResult { - body: string - headers: HttpHeaders - status: integer -} - -export interface WatcherIndexAction { - index: IndexName - doc_id?: Id - refresh?: Refresh - op_type?: OpType - timeout?: Duration - execution_time_field?: Field -} - -export interface WatcherIndexResult { - response: WatcherIndexResultSummary -} - -export interface WatcherIndexResultSummary { - created: boolean - id: Id - index: IndexName - result: Result - version: VersionNumber -} - -export interface WatcherInputContainer { - chain?: WatcherChainInput - http?: WatcherHttpInput - search?: WatcherSearchInput - simple?: Record -} - -export type WatcherInputType = 'http' | 'search' | 'simple' - -export interface WatcherLoggingAction { - level?: string - text: string - category?: string -} - -export interface WatcherLoggingResult { - logged_text: string -} - -export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' - -export interface WatcherNeverCondition { -} - -export interface WatcherPagerDutyAction extends WatcherPagerDutyEvent { -} - -export interface WatcherPagerDutyContext { - href?: string - src?: string - type: WatcherPagerDutyContextType -} - -export type WatcherPagerDutyContextType = 'link' | 'image' - -export interface WatcherPagerDutyEvent { - account?: string - attach_payload: boolean - client?: string - client_url?: string - contexts?: WatcherPagerDutyContext[] - context?: WatcherPagerDutyContext[] - description: string - event_type?: WatcherPagerDutyEventType - incident_key: string - proxy?: WatcherPagerDutyEventProxy -} - -export interface WatcherPagerDutyEventProxy { - host?: Host - port?: integer -} - -export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' - -export interface WatcherPagerDutyResult { - event: WatcherPagerDutyEvent - reason?: string - request?: WatcherHttpInputRequestResult - response?: WatcherHttpInputResponseResult -} - -export type WatcherQuantifier = 'some' | 'all' - -export interface WatcherQueryWatch { - _id: Id - status?: WatcherWatchStatus - watch?: WatcherWatch - _primary_term?: integer - _seq_no?: SequenceNumber -} - -export interface WatcherReportingEmailAttachment { - url: string - inline?: boolean - retries?: integer - interval?: Duration - request?: WatcherHttpInputRequestDefinition -} - -export type WatcherResponseContentType = 'json' | 'yaml' | 'text' - -export interface WatcherScheduleContainer { - timezone?: string - cron?: WatcherCronExpression - daily?: WatcherDailySchedule - hourly?: WatcherHourlySchedule - interval?: Duration - monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[] - weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[] - yearly?: WatcherTimeOfYear | WatcherTimeOfYear[] -} - -export type WatcherScheduleTimeOfDay = string | WatcherHourAndMinute - -export interface WatcherScheduleTriggerEvent { - scheduled_time: DateTime - triggered_time?: DateTime -} - -export interface WatcherScriptCondition { - lang?: string - params?: Record - source?: string - id?: string -} - -export interface WatcherSearchInput { - extract?: string[] - request: WatcherSearchInputRequestDefinition - timeout?: Duration -} - -export interface WatcherSearchInputRequestBody { - query: QueryDslQueryContainer -} - -export interface WatcherSearchInputRequestDefinition { - body?: WatcherSearchInputRequestBody - indices?: IndexName[] - indices_options?: IndicesOptions - search_type?: SearchType - template?: WatcherSearchTemplateRequestBody - rest_total_hits_as_int?: boolean -} - -export interface WatcherSearchTemplateRequestBody { - explain?: boolean - id?: Id - params?: Record - profile?: boolean - source?: string -} - -export interface WatcherSimulatedActions { - actions: string[] - all: WatcherSimulatedActions - use_all: boolean -} - -export interface WatcherSlackAction { - account?: string - message: WatcherSlackMessage -} - -export interface WatcherSlackAttachment { - author_icon?: string - author_link?: string - author_name: string - color?: string - fallback?: string - fields?: WatcherSlackAttachmentField[] - footer?: string - footer_icon?: string - image_url?: string - pretext?: string - text?: string - thumb_url?: string - title: string - title_link?: string - ts?: EpochTime -} - -export interface WatcherSlackAttachmentField { - short: boolean - title: string - value: string -} - -export interface WatcherSlackDynamicAttachment { - attachment_template: WatcherSlackAttachment - list_path: string -} - -export interface WatcherSlackMessage { - attachments: WatcherSlackAttachment[] - dynamic_attachments?: WatcherSlackDynamicAttachment - from: string - icon?: string - text: string - to: string[] -} - -export interface WatcherSlackResult { - account?: string - message: WatcherSlackMessage -} - -export interface WatcherThrottleState { - reason: string - timestamp: DateTime -} - -export interface WatcherTimeOfMonth { - at: string[] - on: integer[] -} - -export interface WatcherTimeOfWeek { - at: string[] - on: WatcherDay[] -} - -export interface WatcherTimeOfYear { - at: string[] - int: WatcherMonth[] - on: integer[] -} - -export interface WatcherTriggerContainer { - schedule?: WatcherScheduleContainer -} - -export interface WatcherTriggerEventContainer { - schedule?: WatcherScheduleTriggerEvent -} - -export interface WatcherTriggerEventResult { - manual: WatcherTriggerEventContainer - triggered_time: DateTime - type: string -} - -export interface WatcherWatch { - actions: Record - condition: WatcherConditionContainer - input: WatcherInputContainer - metadata?: Metadata - status?: WatcherWatchStatus - throttle_period?: Duration - throttle_period_in_millis?: DurationValue - transform?: TransformContainer - trigger: WatcherTriggerContainer -} - -export interface WatcherWatchStatus { - actions: WatcherActions - last_checked?: DateTime - last_met_condition?: DateTime - state: WatcherActivationState - version: VersionNumber - execution_state?: string -} - -export interface WatcherWebhookAction extends WatcherHttpInputRequestDefinition { -} - -export interface WatcherWebhookResult { - request: WatcherHttpInputRequestResult - response?: WatcherHttpInputResponseResult -} - -export interface WatcherAckWatchRequest extends RequestBase { - watch_id: Name - action_id?: Names -} - -export interface WatcherAckWatchResponse { - status: WatcherWatchStatus -} - -export interface WatcherActivateWatchRequest extends RequestBase { - watch_id: Name -} - -export interface WatcherActivateWatchResponse { - status: WatcherActivationStatus -} - -export interface WatcherDeactivateWatchRequest extends RequestBase { - watch_id: Name -} - -export interface WatcherDeactivateWatchResponse { - status: WatcherActivationStatus -} - -export interface WatcherDeleteWatchRequest extends RequestBase { - id: Name -} - -export interface WatcherDeleteWatchResponse { - found: boolean - _id: Id - _version: VersionNumber -} - -export interface WatcherExecuteWatchRequest extends RequestBase { - id?: Id - debug?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - action_modes?: Record - alternative_input?: Record - ignore_condition?: boolean - record_execution?: boolean - simulated_actions?: WatcherSimulatedActions - trigger_data?: WatcherScheduleTriggerEvent - watch?: WatcherWatch - } -} - -export interface WatcherExecuteWatchResponse { - _id: Id - watch_record: WatcherExecuteWatchWatchRecord -} - -export interface WatcherExecuteWatchWatchRecord { - condition: WatcherConditionContainer - input: WatcherInputContainer - messages: string[] - metadata?: Metadata - node: string - result: WatcherExecutionResult - state: WatcherExecutionStatus - trigger_event: WatcherTriggerEventResult - user: Username - watch_id: Id - status?: WatcherWatchStatus -} - -export interface WatcherGetWatchRequest extends RequestBase { - id: Name -} - -export interface WatcherGetWatchResponse { - found: boolean - _id: Id - status?: WatcherWatchStatus - watch?: WatcherWatch - _primary_term?: integer - _seq_no?: SequenceNumber - _version?: VersionNumber -} - -export interface WatcherPutWatchRequest extends RequestBase { - id: Id - active?: boolean - if_primary_term?: long - if_seq_no?: SequenceNumber - version?: VersionNumber - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - actions?: Record - condition?: WatcherConditionContainer - input?: WatcherInputContainer - metadata?: Metadata - throttle_period?: string - transform?: TransformContainer - trigger?: WatcherTriggerContainer - } -} - -export interface WatcherPutWatchResponse { - created: boolean - _id: Id - _primary_term: long - _seq_no: SequenceNumber - _version: VersionNumber -} - -export interface WatcherQueryWatchesRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - from?: integer - size?: integer - query?: QueryDslQueryContainer - sort?: Sort - search_after?: SortResults - } -} - -export interface WatcherQueryWatchesResponse { - count: integer - watches: WatcherQueryWatch[] -} - -export interface WatcherStartRequest extends RequestBase { -} - -export type WatcherStartResponse = AcknowledgedResponseBase - -export interface WatcherStatsRequest extends RequestBase { - metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] - emit_stacktraces?: boolean -} - -export interface WatcherStatsResponse { - _nodes: NodeStatistics - cluster_name: Name - manually_stopped: boolean - stats: WatcherStatsWatcherNodeStats[] -} - -export interface WatcherStatsWatchRecordQueuedStats { - execution_time: DateTime -} - -export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { - execution_phase: WatcherExecutionPhase - triggered_time: DateTime - executed_actions?: string[] - watch_id: Id - watch_record_id: Id -} - -export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' - -export interface WatcherStatsWatcherNodeStats { - current_watches?: WatcherStatsWatchRecordStats[] - execution_thread_pool: WatcherExecutionThreadPool - queued_watches?: WatcherStatsWatchRecordQueuedStats[] - watch_count: long - watcher_state: WatcherStatsWatcherState - node_id: Id -} - -export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' - -export interface WatcherStopRequest extends RequestBase { -} - -export type WatcherStopResponse = AcknowledgedResponseBase - -export interface XpackInfoBuildInformation { - date: DateTime - hash: string -} - -export interface XpackInfoFeature { - available: boolean - description?: string - enabled: boolean - native_code_info?: XpackInfoNativeCodeInformation -} - -export interface XpackInfoFeatures { - aggregate_metric: XpackInfoFeature - analytics: XpackInfoFeature - ccr: XpackInfoFeature - data_streams: XpackInfoFeature - data_tiers: XpackInfoFeature - enrich: XpackInfoFeature - enterprise_search: XpackInfoFeature - eql: XpackInfoFeature - esql: XpackInfoFeature - frozen_indices: XpackInfoFeature - graph: XpackInfoFeature - ilm: XpackInfoFeature - logstash: XpackInfoFeature - logsdb: XpackInfoFeature - ml: XpackInfoFeature - monitoring: XpackInfoFeature - rollup: XpackInfoFeature - runtime_fields?: XpackInfoFeature - searchable_snapshots: XpackInfoFeature - security: XpackInfoFeature - slm: XpackInfoFeature - spatial: XpackInfoFeature - sql: XpackInfoFeature - transform: XpackInfoFeature - universal_profiling: XpackInfoFeature - voting_only: XpackInfoFeature - watcher: XpackInfoFeature - archive: XpackInfoFeature -} - -export interface XpackInfoMinimalLicenseInformation { - expiry_date_in_millis: EpochTime - mode: LicenseLicenseType - status: LicenseLicenseStatus - type: LicenseLicenseType - uid: string -} - -export interface XpackInfoNativeCodeInformation { - build_hash: string - version: VersionString -} - -export interface XpackInfoRequest extends RequestBase { - categories?: XpackInfoXPackCategory[] - accept_enterprise?: boolean - human?: boolean -} - -export interface XpackInfoResponse { - build: XpackInfoBuildInformation - features: XpackInfoFeatures - license: XpackInfoMinimalLicenseInformation - tagline: string -} - -export type XpackInfoXPackCategory = 'build' | 'features' | 'license' - -export interface XpackUsageAnalytics extends XpackUsageBase { - stats: XpackUsageAnalyticsStatistics -} - -export interface XpackUsageAnalyticsStatistics { - boxplot_usage: long - cumulative_cardinality_usage: long - string_stats_usage: long - top_metrics_usage: long - t_test_usage: long - moving_percentiles_usage: long - normalize_usage: long - rate_usage: long - multi_terms_usage?: long -} - -export interface XpackUsageArchive extends XpackUsageBase { - indices_count: long -} - -export interface XpackUsageAudit extends XpackUsageFeatureToggle { - outputs?: string[] -} - -export interface XpackUsageBase { - available: boolean - enabled: boolean -} - -export interface XpackUsageCcr extends XpackUsageBase { - auto_follow_patterns_count: integer - follower_indices_count: integer -} - -export interface XpackUsageCounter { - active: long - total: long -} - -export interface XpackUsageDataStreams extends XpackUsageBase { - data_streams: long - indices_count: long -} - -export interface XpackUsageDataTierPhaseStatistics { - node_count: long - index_count: long - total_shard_count: long - primary_shard_count: long - doc_count: long - total_size_bytes: long - primary_size_bytes: long - primary_shard_size_avg_bytes: long - primary_shard_size_median_bytes: long - primary_shard_size_mad_bytes: long -} - -export interface XpackUsageDataTiers extends XpackUsageBase { - data_warm: XpackUsageDataTierPhaseStatistics - data_frozen?: XpackUsageDataTierPhaseStatistics - data_cold: XpackUsageDataTierPhaseStatistics - data_content: XpackUsageDataTierPhaseStatistics - data_hot: XpackUsageDataTierPhaseStatistics -} - -export interface XpackUsageDatafeed { - count: long -} - -export interface XpackUsageEql extends XpackUsageBase { - features: XpackUsageEqlFeatures - queries: Record -} - -export interface XpackUsageEqlFeatures { - join: uint - joins: XpackUsageEqlFeaturesJoin - keys: XpackUsageEqlFeaturesKeys - event: uint - pipes: XpackUsageEqlFeaturesPipes - sequence: uint - sequences: XpackUsageEqlFeaturesSequences -} - -export interface XpackUsageEqlFeaturesJoin { - join_queries_two: uint - join_queries_three: uint - join_until: uint - join_queries_five_or_more: uint - join_queries_four: uint -} - -export interface XpackUsageEqlFeaturesKeys { - join_keys_two: uint - join_keys_one: uint - join_keys_three: uint - join_keys_five_or_more: uint - join_keys_four: uint -} - -export interface XpackUsageEqlFeaturesPipes { - pipe_tail: uint - pipe_head: uint -} - -export interface XpackUsageEqlFeaturesSequences { - sequence_queries_three: uint - sequence_queries_four: uint - sequence_queries_two: uint - sequence_until: uint - sequence_queries_five_or_more: uint - sequence_maxspan: uint -} - -export interface XpackUsageFeatureToggle { - enabled: boolean -} - -export interface XpackUsageFlattened extends XpackUsageBase { - field_count: integer -} - -export interface XpackUsageFrozenIndices extends XpackUsageBase { - indices_count: long -} - -export interface XpackUsageHealthStatistics extends XpackUsageBase { - invocations: XpackUsageInvocations -} - -export interface XpackUsageIlm { - policy_count: integer - policy_stats: XpackUsageIlmPolicyStatistics[] -} - -export interface XpackUsageIlmPolicyStatistics { - indices_managed: integer - phases: IlmPhases -} - -export interface XpackUsageInvocations { - total: long -} - -export interface XpackUsageIpFilter { - http: boolean - transport: boolean -} - -export interface XpackUsageJobUsage { - count: integer - created_by: Record - detectors: MlJobStatistics - forecasts: XpackUsageMlJobForecasts - model_size: MlJobStatistics -} - -export interface XpackUsageMachineLearning extends XpackUsageBase { - datafeeds: Record - jobs: Record - node_count: integer - data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs - inference: XpackUsageMlInference -} - -export interface XpackUsageMlCounter { - count: long -} - -export interface XpackUsageMlDataFrameAnalyticsJobs { - memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory - _all: XpackUsageMlDataFrameAnalyticsJobsCount - analysis_counts?: XpackUsageMlDataFrameAnalyticsJobsAnalysis - stopped?: XpackUsageMlDataFrameAnalyticsJobsCount -} - -export interface XpackUsageMlDataFrameAnalyticsJobsAnalysis { - classification?: integer - outlier_detection?: integer - regression?: integer -} - -export interface XpackUsageMlDataFrameAnalyticsJobsCount { - count: long -} - -export interface XpackUsageMlDataFrameAnalyticsJobsMemory { - peak_usage_bytes: MlJobStatistics -} - -export interface XpackUsageMlInference { - ingest_processors: Record - trained_models: XpackUsageMlInferenceTrainedModels - deployments?: XpackUsageMlInferenceDeployments -} - -export interface XpackUsageMlInferenceDeployments { - count: integer - inference_counts: MlJobStatistics - model_sizes_bytes: MlJobStatistics - time_ms: XpackUsageMlInferenceDeploymentsTimeMs -} - -export interface XpackUsageMlInferenceDeploymentsTimeMs { - avg: double -} - -export interface XpackUsageMlInferenceIngestProcessor { - num_docs_processed: XpackUsageMlInferenceIngestProcessorCount - pipelines: XpackUsageMlCounter - num_failures: XpackUsageMlInferenceIngestProcessorCount - time_ms: XpackUsageMlInferenceIngestProcessorCount -} - -export interface XpackUsageMlInferenceIngestProcessorCount { - max: long - sum: long - min: long -} - -export interface XpackUsageMlInferenceTrainedModels { - estimated_operations?: MlJobStatistics - estimated_heap_memory_usage_bytes?: MlJobStatistics - count?: XpackUsageMlInferenceTrainedModelsCount - _all: XpackUsageMlCounter - model_size_bytes?: MlJobStatistics -} - -export interface XpackUsageMlInferenceTrainedModelsCount { - total: long - prepackaged: long - other: long - pass_through?: long - regression?: long - classification?: long - ner?: long - text_embedding?: long -} - -export interface XpackUsageMlJobForecasts { - total: long - forecasted_jobs: long -} - -export interface XpackUsageMonitoring extends XpackUsageBase { - collection_enabled: boolean - enabled_exporters: Record -} - -export interface XpackUsageQuery { - count?: integer - failed?: integer - paging?: integer - total?: integer -} - -export interface XpackUsageRealm extends XpackUsageBase { - name?: string[] - order?: long[] - size?: long[] - cache?: XpackUsageRealmCache[] - has_authorization_realms?: boolean[] - has_default_username_pattern?: boolean[] - has_truststore?: boolean[] - is_authentication_delegated?: boolean[] -} - -export interface XpackUsageRealmCache { - size: long -} - -export interface XpackUsageRequest extends RequestBase { - master_timeout?: Duration -} - -export interface XpackUsageResponse { - aggregate_metric: XpackUsageBase - analytics: XpackUsageAnalytics - archive: XpackUsageArchive - watcher: XpackUsageWatcher - ccr: XpackUsageCcr - data_frame?: XpackUsageBase - data_science?: XpackUsageBase - data_streams?: XpackUsageDataStreams - data_tiers: XpackUsageDataTiers - enrich?: XpackUsageBase - eql: XpackUsageEql - flattened?: XpackUsageFlattened - frozen_indices: XpackUsageFrozenIndices - graph: XpackUsageBase - health_api?: XpackUsageHealthStatistics - ilm: XpackUsageIlm - logstash: XpackUsageBase - ml: XpackUsageMachineLearning - monitoring: XpackUsageMonitoring - rollup: XpackUsageBase - runtime_fields?: XpackUsageRuntimeFieldTypes - spatial: XpackUsageBase - searchable_snapshots: XpackUsageSearchableSnapshots - security: XpackUsageSecurity - slm: XpackUsageSlm - sql: XpackUsageSql - transform: XpackUsageBase - vectors?: XpackUsageVector - voting_only: XpackUsageBase -} - -export interface XpackUsageRoleMapping { - enabled: integer - size: integer -} - -export interface XpackUsageRuntimeFieldTypes extends XpackUsageBase { - field_types: XpackUsageRuntimeFieldsType[] -} - -export interface XpackUsageRuntimeFieldsType { - chars_max: long - chars_total: long - count: long - doc_max: long - doc_total: long - index_count: long - lang: string[] - lines_max: long - lines_total: long - name: Field - scriptless_count: long - shadowed_count: long - source_max: long - source_total: long -} - -export interface XpackUsageSearchableSnapshots extends XpackUsageBase { - indices_count: integer - full_copy_indices_count?: integer - shared_cache_indices_count?: integer -} - -export interface XpackUsageSecurity extends XpackUsageBase { - api_key_service: XpackUsageFeatureToggle - anonymous: XpackUsageFeatureToggle - audit: XpackUsageAudit - fips_140: XpackUsageFeatureToggle - ipfilter: XpackUsageIpFilter - realms: Record - role_mapping: Record - roles: XpackUsageSecurityRoles - ssl: XpackUsageSsl - system_key?: XpackUsageFeatureToggle - token_service: XpackUsageFeatureToggle - operator_privileges: XpackUsageBase -} - -export interface XpackUsageSecurityRoles { - native: XpackUsageSecurityRolesNative - dls: XpackUsageSecurityRolesDls - file: XpackUsageSecurityRolesFile -} - -export interface XpackUsageSecurityRolesDls { - bit_set_cache: XpackUsageSecurityRolesDlsBitSetCache -} - -export interface XpackUsageSecurityRolesDlsBitSetCache { - count: integer - memory?: ByteSize - memory_in_bytes: ulong -} - -export interface XpackUsageSecurityRolesFile { - dls: boolean - fls: boolean - size: long -} - -export interface XpackUsageSecurityRolesNative { - dls: boolean - fls: boolean - size: long -} - -export interface XpackUsageSlm extends XpackUsageBase { - policy_count?: integer - policy_stats?: SlmStatistics -} - -export interface XpackUsageSql extends XpackUsageBase { - features: Record - queries: Record -} - -export interface XpackUsageSsl { - http: XpackUsageFeatureToggle - transport: XpackUsageFeatureToggle -} - -export interface XpackUsageVector extends XpackUsageBase { - dense_vector_dims_avg_count: integer - dense_vector_fields_count: integer - sparse_vector_fields_count?: integer -} - -export interface XpackUsageWatcher extends XpackUsageBase { - execution: XpackUsageWatcherActions - watch: XpackUsageWatcherWatch - count: XpackUsageCounter -} - -export interface XpackUsageWatcherActionTotals { - total: Duration - total_time_in_ms: DurationValue -} - -export interface XpackUsageWatcherActions { - actions: Record -} - -export interface XpackUsageWatcherWatch { - input: Record - condition?: Record - action?: Record - trigger: XpackUsageWatcherWatchTrigger -} - -export interface XpackUsageWatcherWatchTrigger { - schedule?: XpackUsageWatcherWatchTriggerSchedule - _all: XpackUsageCounter -} - -export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter { - cron: XpackUsageCounter - _all: XpackUsageCounter -} - -export interface SpecUtilsAdditionalProperties { -} - -export interface SpecUtilsAdditionalProperty { -} - -export interface SpecUtilsCommonQueryParameters { - error_trace?: boolean - filter_path?: string | string[] - human?: boolean - pretty?: boolean -} - -export interface SpecUtilsCommonCatQueryParameters { - format?: string - h?: Names - help?: boolean - master_timeout?: Duration - s?: Names - v?: boolean -} - -export interface SpecUtilsOverloadOf { -} From 7cb973a206a5cb544e29bbb250cfc6a4b6d0a14c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 5 Dec 2024 14:12:59 -0600 Subject: [PATCH 440/647] Update changelog for 9.0.0 body param removal (#2523) --- docs/changelog.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 86998f309..02745171c 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -1,6 +1,17 @@ [[changelog-client]] == Release notes +[discrete] +=== 9.0.0 + +[discrete] +==== Breaking changes + +[discrete] +===== Drop support for deprecated `body` parameter + +In 8.0, the top-level `body` parameter that was available on all API functions <>. In 9.0 this property is completely removed. + [discrete] === 8.16.2 @@ -645,6 +656,7 @@ ac.abort() ---- [discrete] +[[remove-body-key]] ===== Remove the body key from the request *Breaking: Yes* | *Migration effort: Small* From f33aa8cccd13b005e5bb46fcfad500e002682b06 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 5 Dec 2024 14:30:28 -0600 Subject: [PATCH 441/647] Publish 9.0 tags as prereleases (#2524) --- .github/workflows/npm-publish.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index cd8c2caf9..0eedbc9cf 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -31,8 +31,10 @@ jobs: version=$(jq -r .version package.json) gh release create \ -n "This is a 9.0.0 pre-release alpha. Changes may not be stable." \ + --latest=false \ + --prerelease \ --target "$BRANCH_NAME" \ - -t "v$version" \ + --title "v$version" \ "v$version" env: BRANCH_NAME: ${{ github.event.inputs.branch }} From ed3cace127d28567dab1ea395f5cc7abe56e486d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 10 Dec 2024 09:58:14 -0600 Subject: [PATCH 442/647] Checkout correct branch of generator (#2531) --- .github/make.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/make.sh b/.github/make.sh index c4d455168..acaef57ce 100755 --- a/.github/make.sh +++ b/.github/make.sh @@ -176,7 +176,7 @@ else --rm \ $product \ /bin/bash -c "cd /usr/src && \ - git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ + git clone --branch $GENERATOR_BRANCH https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ mkdir -p /usr/src/elastic-client-generator-js/output && \ cd /usr/src/elasticsearch-js && \ node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}" From bfdae663334ee11d2b638a11b8950ebfd20f0950 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 10 Dec 2024 16:03:40 +0000 Subject: [PATCH 443/647] Auto-generated code for main (#2530) --- .../53d9d2ec9cb8d211772d764e76fe6890.asciidoc | 18 ++ .../931817b168e055ecf738785c721125dd.asciidoc | 32 ++++ .../d29031409016b2b798148ef173a196ae.asciidoc | 24 +++ docs/reference.asciidoc | 173 ++++++++++++++---- src/api/api/cluster.ts | 22 +-- src/api/api/health_report.ts | 2 +- src/api/api/nodes.ts | 14 +- src/api/api/ping.ts | 2 +- 8 files changed, 236 insertions(+), 51 deletions(-) create mode 100644 docs/doc_examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc create mode 100644 docs/doc_examples/931817b168e055ecf738785c721125dd.asciidoc create mode 100644 docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc diff --git a/docs/doc_examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc b/docs/doc_examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc new file mode 100644 index 000000000..58eee5923 --- /dev/null +++ b/docs/doc_examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + id: "query_helper_pipeline", + docs: [ + { + _source: { + content: + "artificial intelligence in medicine articles published in the last 12 months", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/931817b168e055ecf738785c721125dd.asciidoc b/docs/doc_examples/931817b168e055ecf738785c721125dd.asciidoc new file mode 100644 index 000000000..3d34dce97 --- /dev/null +++ b/docs/doc_examples/931817b168e055ecf738785c721125dd.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "query_helper_pipeline", + processors: [ + { + script: { + source: + "ctx.prompt = 'Please generate an elasticsearch search query on index `articles_index` for the following natural language query. Dates are in the field `@timestamp`, document types are in the field `type` (options are `news`, `publication`), categories in the field `category` and can be multiple (options are `medicine`, `pharmaceuticals`, `technology`), and document names are in the field `title` which should use a fuzzy match. Ignore fields which cannot be determined from the natural language query context: ' + ctx.content", + }, + }, + { + inference: { + model_id: "openai_chat_completions", + input_output: { + input_field: "prompt", + output_field: "query", + }, + }, + }, + { + remove: { + field: "prompt", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc b/docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc new file mode 100644 index 000000000..fac02d172 --- /dev/null +++ b/docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + query: { + semantic: { + field: "my_semantic_field", + }, + }, + highlight: { + fields: { + my_semantic_field: { + type: "semantic", + number_of_fragments: 2, + order: "score", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index ddbff0a0b..4b7dae886 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -531,7 +531,24 @@ client.getSource({ id, index }) [discrete] === health_report -Returns the health of the cluster. +Get the cluster health. +Get a report with the health status of an Elasticsearch cluster. +The report contains a list of indicators that compose Elasticsearch functionality. + +Each indicator has a health status of: green, unknown, yellow or red. +The indicator will provide an explanation and metadata describing the reason for its current health status. + +The cluster’s status is controlled by the worst indicator status. + +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. +Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. + +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. +The root cause and remediation steps are encapsulated in a diagnosis. +A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. + +NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. +When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. {ref}/health-api.html[Endpoint documentation] [source,ts] @@ -825,7 +842,7 @@ If `true`, the point in time will contain all the shards that are available at t [discrete] === ping Ping the cluster. -Returns whether the cluster is running. +Get information about whether the cluster is running. {ref}/index.html[Endpoint documentation] [source,ts] @@ -2658,7 +2675,11 @@ client.ccr.unfollow({ index }) === cluster [discrete] ==== allocation_explain -Provides explanations for shard allocations in the cluster. +Explain the shard allocations. +Get explanations for shard allocations in the cluster. +For unassigned shards, it provides an explanation for why the shard is unassigned. +For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. +This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. {ref}/cluster-allocation-explain.html[Endpoint documentation] [source,ts] @@ -2701,7 +2722,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_voting_config_exclusions -Clears cluster voting config exclusions. +Clear cluster voting config exclusions. +Remove master-eligible nodes from the voting configuration exclusion list. {ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] @@ -2769,7 +2791,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_settings -Returns cluster-wide settings. +Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. {ref}/cluster-get-settings.html[Endpoint documentation] @@ -2791,8 +2813,16 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== health -The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. -The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. +Get the cluster health status. +You can also use the API to get the health status of only specified data streams and indices. +For data streams, the API retrieves the health status of the stream’s backing indices. + +The cluster health status is: green, yellow or red. +On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. +The index level status is controlled by the worst shard status. + +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. +The cluster status is controlled by the worst index status. {ref}/cluster-health.html[Endpoint documentation] [source,ts] @@ -2836,9 +2866,11 @@ client.cluster.info({ target }) [discrete] ==== pending_tasks -Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. +Get the pending cluster tasks. +Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. + NOTE: This API returns a list of any pending updates to the cluster state. -These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. {ref}/cluster-pending.html[Endpoint documentation] @@ -2858,7 +2890,24 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== post_voting_config_exclusions -Updates the cluster voting config exclusions by node ids or node names. +Update voting configuration exclusions. +Update the cluster voting config exclusions by node IDs or node names. +By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. +If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. +The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. +It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. + +Clusters should have no voting configuration exclusions in normal operation. +Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. +This API waits for the nodes to be fully removed from the cluster before it returns. +If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. + +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. +In that case, you may safely retry the call. + +NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. +They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. {ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] @@ -2929,7 +2978,24 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_settings -Updates the cluster settings. +Update the cluster settings. +Configure and update dynamic settings on a running cluster. +You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. + +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. +You can also reset transient or persistent settings by assigning them a null value. + +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. +For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. +However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. + +TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. +If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. +Only use `elasticsearch.yml` for static cluster settings and node settings. +The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. + +WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. +If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. {ref}/cluster-update-settings.html[Endpoint documentation] [source,ts] @@ -2949,9 +3015,9 @@ client.cluster.putSettings({ ... }) [discrete] ==== remote_info -The cluster remote info API allows you to retrieve all of the configured -remote cluster information. It returns connection and endpoint information -keyed by the configured remote cluster alias. +Get remote cluster information. +Get all of the configured remote cluster information. +This API returns connection and endpoint information keyed by the configured remote cluster alias. {ref}/cluster-remote-info.html[Endpoint documentation] [source,ts] @@ -2962,7 +3028,20 @@ client.cluster.remoteInfo() [discrete] ==== reroute -Allows to manually change the allocation of individual shards in the cluster. +Reroute the cluster. +Manually change the allocation of individual shards in the cluster. +For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. + +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. +For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. + +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. +If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. + +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. +This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. + +Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. {ref}/cluster-reroute.html[Endpoint documentation] [source,ts] @@ -2975,8 +3054,9 @@ client.cluster.reroute({ ... }) * *Request (object):* ** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. -** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation only and returns the resulting state. -** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot be executed. +** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation. +It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. +** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot run. ** *`metric` (Optional, string | string[])*: Limits the information returned to the specified metrics. ** *`retry_failed` (Optional, boolean)*: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -2984,7 +3064,25 @@ client.cluster.reroute({ ... }) [discrete] ==== state -Returns a comprehensive information about the state of the cluster. +Get the cluster state. +Get comprehensive information about the state of the cluster. + +The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. + +The elected master node ensures that every node in the cluster has a copy of the same cluster state. +This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. +You may need to consult the Elasticsearch source code to determine the precise meaning of the response. + +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. +You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. + +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. +If you use this API repeatedly, your cluster may become unstable. + +WARNING: The response is a representation of an internal data structure. +Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. +Do not query this API using external monitoring tools. +Instead, obtain the information you require using other more stable cluster APIs. {ref}/cluster-state.html[Endpoint documentation] [source,ts] @@ -3009,8 +3107,8 @@ client.cluster.state({ ... }) [discrete] ==== stats -Returns cluster statistics. -It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). +Get cluster statistics. +Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). {ref}/cluster-stats.html[Endpoint documentation] [source,ts] @@ -8487,7 +8585,8 @@ client.monitoring.bulk({ system_id, system_api_version, interval }) === nodes [discrete] ==== clear_repositories_metering_archive -You can use this API to clear the archived repositories metering information in the cluster. +Clear the archived repositories metering. +Clear the archived repositories metering information in the cluster. {ref}/clear-repositories-metering-archive-api.html[Endpoint documentation] [source,ts] @@ -8505,10 +8604,10 @@ All the nodes selective options are explained [here](https://www.elastic.co/guid [discrete] ==== get_repositories_metering_info -You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. -This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the -information needed to compute aggregations over a period of time. Additionally, the information exposed by this -API is volatile, meaning that it won’t be present after node restarts. +Get cluster repositories metering. +Get repositories metering information for a cluster. +This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. +Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. {ref}/get-repositories-metering-api.html[Endpoint documentation] [source,ts] @@ -8525,8 +8624,9 @@ All the nodes selective options are explained [here](https://www.elastic.co/guid [discrete] ==== hot_threads -This API yields a breakdown of the hot threads on each selected node in the cluster. -The output is plain text with a breakdown of each node’s top hot threads. +Get the hot threads for nodes. +Get a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of the top hot threads for each node. {ref}/cluster-nodes-hot-threads.html[Endpoint documentation] [source,ts] @@ -8554,7 +8654,8 @@ before the timeout expires, the request fails and returns an error. [discrete] ==== info -Returns cluster nodes information. +Get node information. +By default, the API returns all attributes and core settings for cluster nodes. {ref}/cluster-nodes-info.html[Endpoint documentation] [source,ts] @@ -8574,7 +8675,15 @@ client.nodes.info({ ... }) [discrete] ==== reload_secure_settings -Reloads the keystore on nodes in the cluster. +Reload the keystore on nodes in the cluster. + +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. +That is, you can change them on disk and reload them without restarting any nodes in the cluster. +When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. + +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. +Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. +Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. {ref}/secure-settings.html[Endpoint documentation] [source,ts] @@ -8593,7 +8702,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== stats -Returns cluster nodes statistics. +Get node statistics. +Get statistics for nodes in a cluster. +By default, all stats are returned. You can limit the returned information by using metrics. {ref}/cluster-nodes-stats.html[Endpoint documentation] [source,ts] @@ -8621,7 +8732,7 @@ client.nodes.stats({ ... }) [discrete] ==== usage -Returns information on the usage of features. +Get feature usage information. {ref}/cluster-nodes-usage.html[Endpoint documentation] [source,ts] diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index da9f656c7..75bdb4107 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -44,7 +44,7 @@ export default class Cluster { } /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html | Elasticsearch API documentation} */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -110,7 +110,7 @@ export default class Cluster { } /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} */ async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -212,7 +212,7 @@ export default class Cluster { } /** - * Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined. + * Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -242,7 +242,7 @@ export default class Cluster { } /** - * The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. + * Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html | Elasticsearch API documentation} */ async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -314,7 +314,7 @@ export default class Cluster { } /** - * Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -344,7 +344,7 @@ export default class Cluster { } /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} */ async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -410,7 +410,7 @@ export default class Cluster { } /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -444,7 +444,7 @@ export default class Cluster { } /** - * The cluster remote info API allows you to retrieve all of the configured remote cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias. + * Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html | Elasticsearch API documentation} */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -474,7 +474,7 @@ export default class Cluster { } /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html | Elasticsearch API documentation} */ async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -508,7 +508,7 @@ export default class Cluster { } /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html | Elasticsearch API documentation} */ async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -552,7 +552,7 @@ export default class Cluster { } /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 0539d0727..f3b76edd0 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/health-api.html | Elasticsearch API documentation} */ export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 3f6a51e20..3cb956ad5 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -44,7 +44,7 @@ export default class Nodes { } /** - * You can use this API to clear the archived repositories metering information in the cluster. + * Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-repositories-metering-archive-api.html | Elasticsearch API documentation} */ async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Nodes { } /** - * You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts. + * Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-repositories-metering-api.html | Elasticsearch API documentation} */ async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class Nodes { } /** - * This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html | Elasticsearch API documentation} */ async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -149,7 +149,7 @@ export default class Nodes { } /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core settings for cluster nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -196,7 +196,7 @@ export default class Nodes { } /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation} */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -240,7 +240,7 @@ export default class Nodes { } /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -294,7 +294,7 @@ export default class Nodes { } /** - * Returns information on the usage of features. + * Get feature usage information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 8552b1b58..c9a086011 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Ping the cluster. Returns whether the cluster is running. + * Ping the cluster. Get information about whether the cluster is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise From e992c329c357dda135f22eadbb058b1fc52d2082 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 10 Dec 2024 10:32:18 -0600 Subject: [PATCH 444/647] Parse branch name during code gen including 'x' (#2534) --- .github/make.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/make.sh b/.github/make.sh index acaef57ce..d8d9cc391 100755 --- a/.github/make.sh +++ b/.github/make.sh @@ -65,7 +65,7 @@ codegen) if [ -v "$VERSION" ] || [[ -z "$VERSION" ]]; then # fall back to branch name or `main` if no VERSION is set branch_name=$(git rev-parse --abbrev-ref HEAD) - if [[ "$branch_name" =~ ^[0-9]+\.[0-9]+ ]]; then + if [[ "$branch_name" =~ ^[0-9]+\.([0-9]+|x) ]]; then echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using branch name: \`$branch_name\`\033[0m" VERSION="$branch_name" else From c490dd08219d3d87852483f3dd73162d7d855bae Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 12 Dec 2024 09:35:45 -0600 Subject: [PATCH 445/647] Update changelog for 8.16.3 (#2541) --- docs/changelog.asciidoc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 02745171c..695668f29 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -12,6 +12,17 @@ In 8.0, the top-level `body` parameter that was available on all API functions <>. In 9.0 this property is completely removed. +[discrete] +=== 8.16.3 + +[discrete] +==== Fixes + +[discrete] +===== Improved support for Elasticsearch `v8.16` + +Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. + [discrete] === 8.16.2 From 100be27ad1e50ec27f952d7d7ab528c6bced2f64 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 12 Dec 2024 11:48:57 -0600 Subject: [PATCH 446/647] Update changelog for 8.17.0 (#2545) --- docs/changelog.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 695668f29..dec73d23e 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -12,6 +12,18 @@ In 8.0, the top-level `body` parameter that was available on all API functions <>. In 9.0 this property is completely removed. +[discrete] +=== 8.17.0 + +[discrete] +==== Features + +[discrete] +===== Support for Elasticsearch `v8.17` + +You can find all the API changes +https://www.elastic.co/guide/en/elasticsearch/reference/8.17/release-notes-8.17.0.html[here]. + [discrete] === 8.16.3 From 3a288cf86b4a1b4dc68463b907e69d7ec0e743e9 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 12:31:13 -0600 Subject: [PATCH 447/647] Update buildkite plugin junit-annotate to v2.6.0 (#2554) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .buildkite/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index eee6e4387..c5146fc68 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -25,7 +25,7 @@ steps: provider: "gcp" image: family/core-ubuntu-2204 plugins: - - junit-annotate#v2.5.0: + - junit-annotate#v2.6.0: artifacts: "junit-output/junit-*.xml" job-uuid-file-pattern: "junit-(.*).xml" fail-build-on-error: true From b8c3ac446e7248ef99e4ea75fee14fac2c712581 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 12:34:18 -0600 Subject: [PATCH 448/647] Update peter-evans/create-pull-request digest to 67ccf78 (#2556) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/serverless-patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml index 935ef28d5..c83e3ec0e 100644 --- a/.github/workflows/serverless-patch.yml +++ b/.github/workflows/serverless-patch.yml @@ -42,7 +42,7 @@ jobs: - name: Apply patch from stack to serverless id: apply-patch run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh - - uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7 + - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7 with: token: ${{ secrets.GH_TOKEN }} path: serverless From e688f363966faa5a9b42d0d8fa106d0de2bfc436 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 12:39:44 -0600 Subject: [PATCH 449/647] Update dependency @elastic/request-converter to v8.17.0 (#2555) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 68e183f6c..7be026a22 100644 --- a/package.json +++ b/package.json @@ -56,7 +56,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "8.16.2", + "@elastic/request-converter": "8.17.0", "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "4.1.12", "@types/ms": "0.7.34", From f835fa3b12358f985bbe2f8d4851429e0a443391 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 12:50:24 -0600 Subject: [PATCH 450/647] Update @sinonjs/fake-timers digest to 48f089f (#2527) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 7be026a22..b8219983d 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ }, "devDependencies": { "@elastic/request-converter": "8.17.0", - "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", + "@sinonjs/fake-timers": "github:sinonjs/fake-timers#48f089f", "@types/debug": "4.1.12", "@types/ms": "0.7.34", "@types/node": "22.10.1", From 0ee486bc9c86e52d23a6a801ef802304f395bd63 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 7 Jan 2025 11:06:18 -0600 Subject: [PATCH 451/647] Drop @types/tap for built-in types (#2561) --- package.json | 1 - 1 file changed, 1 deletion(-) diff --git a/package.json b/package.json index b8219983d..baaae155f 100644 --- a/package.json +++ b/package.json @@ -64,7 +64,6 @@ "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", - "@types/tap": "15.0.12", "chai": "5.1.2", "cross-zip": "4.0.1", "desm": "1.3.1", From b95aa986b78758746e3a904646168800d8465cb7 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 7 Jan 2025 17:48:46 +0000 Subject: [PATCH 452/647] Auto-generated code for main (#2551) --- ...0722b302b2b3275a988d858044f99d5d.asciidoc} | 7 +- ...074e4602d1ca54412380a40867d078bc.asciidoc} | 2 + .../082e78c7a2061a7c4a52b494e5ede0e8.asciidoc | 42 + ...0c52af573c9401a2a687e86a4beb182b.asciidoc} | 2 +- .../0e83f140237d75469a428ff403564bb5.asciidoc | 15 - .../1420a22aa817c7a996baaed0ad366d6f.asciidoc | 22 - .../246763219ec06172f7aa57bba28d344a.asciidoc | 67 + .../2a21674c40f9b182a8944769d20b2357.asciidoc | 26 + .../2a67608dadbf220a2f040f3a79d3677d.asciidoc | 35 + ...3312c82f81816bf76629db9582991812.asciidoc} | 1 + .../37f367ca81a16d3aef4ef7126ec33a2e.asciidoc | 67 + .../3ea4c971b3f47735dcc207ee2645fa03.asciidoc | 16 + .../3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc | 18 + ...48e142e6c69014e0509d4c9251749d77.asciidoc} | 3 +- .../49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc | 17 - .../5a70db31f587b7ffed5e9bc1445430cb.asciidoc | 22 - ...6b67c6121efb86ee100d40c2646f77b5.asciidoc} | 4 +- ...6fa02c2ad485bbe91f44b321158250f3.asciidoc} | 7 + .../730045fae3743c39b612813a42c330c3.asciidoc | 24 + .../7478ff69113fb53f41ea07cdf911fa67.asciidoc | 33 + .../74b229a6e020113e5749099451979c89.asciidoc | 26 - .../7dd0d9cc6c5982a2c003d301e90feeba.asciidoc | 37 + ...80135e8c644e34cc70ce8a4e7915d1a2.asciidoc} | 2 +- .../8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc | 39 + .../8cad5d95a0e7c103f08be53d0b172558.asciidoc | 22 - ...93c77c65f1e11382f8043d0300e87b89.asciidoc} | 2 +- .../9cc952d4a03264b700136cbc45abc8c6.asciidoc | 30 + .../b590241c4296299b836fbb5a95bdd2dc.asciidoc | 18 + .../b6d278737d27973e498ac61cda9e5126.asciidoc | 21 + .../bccd4eb26b1a325d103b12e198a13c08.asciidoc | 12 + .../bdc55256fa5f701680631a149dbb75a9.asciidoc | 22 + .../bdd28276618235487ac96bd6679bc206.asciidoc | 31 + ...cecfaa659af6646b3b67d7b311586fa0.asciidoc} | 2 +- ...d5242b1ab0213f25e5e0742032274ce6.asciidoc} | 2 +- ...df81b88a2192dd6f9912e0c948a44487.asciidoc} | 2 +- .../e375c7da666276c4df6664c6821cd5f4.asciidoc | 29 + ...e77c2f41a7eca765b0c5f734a66d919f.asciidoc} | 2 +- ...ea8c4229afa6dd4f1321355542be9912.asciidoc} | 2 +- ...ec4b43c3ebd8816799fa004596b2f0cb.asciidoc} | 3 +- docs/reference.asciidoc | 1387 ++++++++++++++--- src/api/api/ccr.ts | 26 +- src/api/api/connector.ts | 72 +- src/api/api/eql.ts | 2 +- src/api/api/features.ts | 4 +- src/api/api/ilm.ts | 22 +- src/api/api/indices.ts | 40 +- src/api/api/inference.ts | 2 +- src/api/api/ingest.ts | 45 +- src/api/api/license.ts | 14 +- src/api/api/logstash.ts | 6 +- src/api/api/migration.ts | 6 +- src/api/api/ml.ts | 14 +- src/api/api/monitoring.ts | 2 +- src/api/api/rollup.ts | 16 +- src/api/api/search_application.ts | 44 +- src/api/api/searchable_snapshots.ts | 8 +- src/api/api/security.ts | 59 +- src/api/api/shutdown.ts | 6 +- src/api/api/slm.ts | 18 +- src/api/api/snapshot.ts | 24 +- src/api/api/tasks.ts | 6 +- src/api/api/text_structure.ts | 36 +- src/api/api/transform.ts | 2 +- src/api/api/watcher.ts | 22 +- src/api/api/xpack.ts | 4 +- src/api/types.ts | 489 +++++- 66 files changed, 2459 insertions(+), 651 deletions(-) rename docs/doc_examples/{e20037f66bf54bcac7d10f536f031f34.asciidoc => 0722b302b2b3275a988d858044f99d5d.asciidoc} (53%) rename docs/doc_examples/{844928da2ff9a1394af5347a5e2e4f78.asciidoc => 074e4602d1ca54412380a40867d078bc.asciidoc} (85%) create mode 100644 docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc rename docs/doc_examples/{160986f49758f4e8345d183a842f6351.asciidoc => 0c52af573c9401a2a687e86a4beb182b.asciidoc} (92%) delete mode 100644 docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc delete mode 100644 docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc create mode 100644 docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc create mode 100644 docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc create mode 100644 docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc rename docs/doc_examples/{23af230e824f48b9cd56a4cf973d788c.asciidoc => 3312c82f81816bf76629db9582991812.asciidoc} (93%) create mode 100644 docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc create mode 100644 docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc create mode 100644 docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc rename docs/doc_examples/{9f16fca9813304e398ee052aa857dbcd.asciidoc => 48e142e6c69014e0509d4c9251749d77.asciidoc} (85%) delete mode 100644 docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc delete mode 100644 docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc rename docs/doc_examples/{34cdeefb09bbbe5206957a8bc1bd513d.asciidoc => 6b67c6121efb86ee100d40c2646f77b5.asciidoc} (69%) rename docs/doc_examples/{f9ee5d55a73f4c1fe7d507609047aefd.asciidoc => 6fa02c2ad485bbe91f44b321158250f3.asciidoc} (76%) create mode 100644 docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc create mode 100644 docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc delete mode 100644 docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc create mode 100644 docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc rename docs/doc_examples/{f4d0ef2e0f76babee83d999fe35127f2.asciidoc => 80135e8c644e34cc70ce8a4e7915d1a2.asciidoc} (96%) create mode 100644 docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc delete mode 100644 docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc rename docs/doc_examples/{8593715fcc70315a0816b435551258e0.asciidoc => 93c77c65f1e11382f8043d0300e87b89.asciidoc} (90%) create mode 100644 docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc create mode 100644 docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc create mode 100644 docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc create mode 100644 docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc create mode 100644 docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc create mode 100644 docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc rename docs/doc_examples/{b26b5574438e4eaf146b2428bf537c51.asciidoc => cecfaa659af6646b3b67d7b311586fa0.asciidoc} (96%) rename docs/doc_examples/{35fd9549350926f8d57dc1765e2f40d3.asciidoc => d5242b1ab0213f25e5e0742032274ce6.asciidoc} (96%) rename docs/doc_examples/{a225fc8c134cb21a85bc6025dac9368b.asciidoc => df81b88a2192dd6f9912e0c948a44487.asciidoc} (92%) create mode 100644 docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc rename docs/doc_examples/{5ba32ebaa7ee28a339c7693696d305ca.asciidoc => e77c2f41a7eca765b0c5f734a66d919f.asciidoc} (93%) rename docs/doc_examples/{bb5a1319c496acc862c670cc7224e59a.asciidoc => ea8c4229afa6dd4f1321355542be9912.asciidoc} (96%) rename docs/doc_examples/{2f07b81fd47ec3b074242a760f0c4e9e.asciidoc => ec4b43c3ebd8816799fa004596b2f0cb.asciidoc} (80%) diff --git a/docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc similarity index 53% rename from docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc rename to docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc index 3b4f9251b..84abd3971 100644 --- a/docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc +++ b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc @@ -3,11 +3,8 @@ [source, js] ---- -const response = await client.indices.putSettings({ - index: "my-index-000001", - settings: { - "index.blocks.read_only_allow_delete": null, - }, +const response = await client.indices.getMapping({ + index: "kibana_sample_data_ecommerce", }); console.log(response); ---- diff --git a/docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc b/docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc similarity index 85% rename from docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc rename to docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc index b0acfaa1d..9d98d539a 100644 --- a/docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc +++ b/docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc @@ -11,6 +11,8 @@ const response = await client.indices.putSettings({ "index.indexing.slowlog.threshold.index.debug": "2s", "index.indexing.slowlog.threshold.index.trace": "500ms", "index.indexing.slowlog.source": "1000", + "index.indexing.slowlog.reformat": true, + "index.indexing.slowlog.include.user": true, }, }); console.log(response); diff --git a/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc b/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc new file mode 100644 index 000000000..269067032 --- /dev/null +++ b/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-bit", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "bit", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-rank-vectors-bit", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc b/docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc similarity index 92% rename from docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc rename to docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc index 3aeb9d6f4..e0413d7dd 100644 --- a/docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc +++ b/docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc @@ -10,7 +10,7 @@ const response = await client.ingest.putPipeline({ { attachment: { field: "data", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc b/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc deleted file mode 100644 index aac173f77..000000000 --- a/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.cluster.putSettings({ - persistent: { - "cluster.routing.allocation.disk.watermark.low": "100gb", - "cluster.routing.allocation.disk.watermark.high": "50gb", - "cluster.routing.allocation.disk.watermark.flood_stage": "10gb", - "cluster.info.update.interval": "1m", - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc b/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc deleted file mode 100644 index ce7709b43..000000000 --- a/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: "test-index", - query: { - nested: { - path: "inference_field.inference.chunks", - query: { - sparse_vector: { - field: "inference_field.inference.chunks.embeddings", - inference_id: "my-inference-id", - query: "mountain lake", - }, - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc b/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc new file mode 100644 index 000000000..deabe9511 --- /dev/null +++ b/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-bit", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "bit", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-rank-vectors-bit", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-rank-vectors-bit", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "maxSimDotProduct(params.query_vector, 'my_vector')", + params: { + query_vector: [ + [ + 0.35, 0.77, 0.95, 0.15, 0.11, 0.08, 0.58, 0.06, 0.44, 0.52, 0.21, + 0.62, 0.65, 0.16, 0.64, 0.39, 0.93, 0.06, 0.93, 0.31, 0.92, 0, + 0.66, 0.86, 0.92, 0.03, 0.81, 0.31, 0.2, 0.92, 0.95, 0.64, 0.19, + 0.26, 0.77, 0.64, 0.78, 0.32, 0.97, 0.84, + ], + ], + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc b/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc new file mode 100644 index 000000000..07c3eb29d --- /dev/null +++ b/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-rank-vectors-float", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "maxSimDotProduct(params.query_vector, 'my_vector')", + params: { + query_vector: [ + [0.5, 10, 6], + [-0.5, 10, 10], + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc b/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc new file mode 100644 index 000000000..93ccfa9d8 --- /dev/null +++ b/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information including original binary", + processors: [ + { + attachment: { + field: "data", + remove_binary: false, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc b/docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc similarity index 93% rename from docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc rename to docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc index 693b70a4f..f1ba5e168 100644 --- a/docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc +++ b/docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc @@ -14,6 +14,7 @@ const response = await client.indices.putSettings({ "index.search.slowlog.threshold.fetch.info": "800ms", "index.search.slowlog.threshold.fetch.debug": "500ms", "index.search.slowlog.threshold.fetch.trace": "200ms", + "index.search.slowlog.include.user": true, }, }); console.log(response); diff --git a/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc b/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc new file mode 100644 index 000000000..8651f44c6 --- /dev/null +++ b/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + size: 10, + retriever: { + rescorer: { + rescore: { + query: { + window_size: 50, + rescore_query: { + script_score: { + script: { + source: + "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", + params: { + queryVector: [-0.5, 90, -10, 14.8, -156], + }, + }, + }, + }, + }, + }, + retriever: { + rrf: { + rank_window_size: 100, + retrievers: [ + { + standard: { + query: { + sparse_vector: { + field: "plot_embedding", + inference_id: "my-elser-model", + query: "films that explore psychological depths", + }, + }, + }, + }, + { + standard: { + query: { + multi_match: { + query: "crime", + fields: ["plot", "title"], + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc b/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc new file mode 100644 index 000000000..32f004a99 --- /dev/null +++ b/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove_index: { + index: "my-index-2099.05.06-000001", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc new file mode 100644 index 000000000..7818a3f0c --- /dev/null +++ b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + order_stats: { + stats: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc b/docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc similarity index 85% rename from docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc rename to docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc index 6be472e3b..7c4401c7d 100644 --- a/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc +++ b/docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc @@ -10,7 +10,8 @@ const response = await client.inference.put({ service: "openai", service_settings: { api_key: "", - model_id: "text-embedding-ada-002", + model_id: "text-embedding-3-small", + dimensions: 128, }, }, }); diff --git a/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc b/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc deleted file mode 100644 index d66581f09..000000000 --- a/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.knnSearch({ - index: "my-index", - knn: { - field: "image_vector", - query_vector: [0.3, 0.1, 1.2], - k: 10, - num_candidates: 100, - }, - _source: ["name", "file_type"], -}); -console.log(response); ----- diff --git a/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc b/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc deleted file mode 100644 index 255b2df23..000000000 --- a/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: "semantic-embeddings", - mappings: { - properties: { - semantic_text: { - type: "semantic_text", - inference_id: "my-elser-endpoint", - }, - content: { - type: "text", - copy_to: "semantic_text", - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc b/docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc similarity index 69% rename from docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc rename to docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc index 9537a8386..3226a57c7 100644 --- a/docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc +++ b/docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc @@ -4,9 +4,11 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: "my-index-000001", + index: "*", settings: { "index.search.slowlog.include.user": true, + "index.search.slowlog.threshold.fetch.warn": "30s", + "index.search.slowlog.threshold.query.warn": "30s", }, }); console.log(response); diff --git a/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc similarity index 76% rename from docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc rename to docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc index 0c7b48ea7..afea3d985 100644 --- a/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc +++ b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc @@ -12,6 +12,13 @@ const response = await client.search({ fields: ["my_field", "my_field._2gram", "my_field._3gram"], }, }, + highlight: { + fields: { + my_field: { + matched_fields: ["my_field._index_prefix"], + }, + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc new file mode 100644 index 000000000..b2400e39b --- /dev/null +++ b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + prefix: { + full_name: { + value: "ki", + }, + }, + }, + highlight: { + fields: { + full_name: { + matched_fields: ["full_name._index_prefix"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc new file mode 100644 index 000000000..047487632 --- /dev/null +++ b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + daily_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + smoothed_revenue: { + moving_fn: { + buckets_path: "daily_revenue", + window: 3, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc b/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc deleted file mode 100644 index b99aa857f..000000000 --- a/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: "test-index", - query: { - nested: { - path: "inference_field.inference.chunks", - query: { - knn: { - field: "inference_field.inference.chunks.embeddings", - query_vector_builder: { - text_embedding: { - model_id: "my_inference_id", - model_text: "mountain lake", - }, - }, - }, - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc new file mode 100644 index 000000000..733c366ba --- /dev/null +++ b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + unique_customers: { + cardinality: { + field: "customer_id", + }, + }, + avg_basket_size: { + avg: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc b/docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc similarity index 96% rename from docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc rename to docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc index 0bf72b678..bb8174e35 100644 --- a/docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc +++ b/docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc @@ -12,7 +12,7 @@ const response = await client.ingest.putPipeline({ field: "data", indexed_chars: 11, indexed_chars_field: "max_size", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc new file mode 100644 index 000000000..aa09492cf --- /dev/null +++ b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + categories: { + terms: { + field: "category.keyword", + size: 5, + order: { + total_revenue: "desc", + }, + }, + aggs: { + total_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + total_items: { + sum: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc b/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc deleted file mode 100644 index b5190e9a8..000000000 --- a/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.inference.put({ - task_type: "sparse_embedding", - inference_id: "my-elser-endpoint", - inference_config: { - service: "elser", - service_settings: { - adaptive_allocations: { - enabled: true, - min_number_of_allocations: 3, - max_number_of_allocations: 10, - }, - num_threads: 1, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc b/docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc similarity index 90% rename from docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc rename to docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc index aae698a6f..7a9809243 100644 --- a/docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc +++ b/docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc @@ -9,7 +9,7 @@ const response = await client.indices.create({ properties: { infer_field: { type: "semantic_text", - inference_id: "my-elser-endpoint", + inference_id: ".elser-2-elasticsearch", }, source_field: { type: "text", diff --git a/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc b/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc new file mode 100644 index 000000000..1bc8b2cc7 --- /dev/null +++ b/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-byte", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "byte", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-rank-vectors-byte", + id: 1, + document: { + my_vector: [ + [1, 2, 3], + [4, 5, 6], + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc new file mode 100644 index 000000000..f71aebf61 --- /dev/null +++ b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc new file mode 100644 index 000000000..446bba938 --- /dev/null +++ b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_orders: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + min_doc_count: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc b/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc new file mode 100644 index 000000000..e63a33d34 --- /dev/null +++ b/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "_all", + expand_wildcards: "all", + filter_path: "*.settings.index.*.slowlog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc new file mode 100644 index 000000000..4e074487d --- /dev/null +++ b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + sales_by_category: { + terms: { + field: "category.keyword", + size: 5, + order: { + _count: "desc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc new file mode 100644 index 000000000..b518cae85 --- /dev/null +++ b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + cumulative_revenue: { + cumulative_sum: { + buckets_path: "revenue", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc b/docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc similarity index 96% rename from docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc rename to docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc index 8945d602e..bae03c0ff 100644 --- a/docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc +++ b/docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc @@ -14,7 +14,7 @@ const response = await client.ingest.putPipeline({ attachment: { target_field: "_ingest._value.attachment", field: "_ingest._value.data", - remove_binary: false, + remove_binary: true, }, }, }, diff --git a/docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc b/docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc similarity index 96% rename from docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc rename to docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc index 3302992dc..865d407f9 100644 --- a/docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc +++ b/docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc @@ -10,7 +10,7 @@ const response = await client.ingest.putPipeline({ { attachment: { field: "data", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc similarity index 92% rename from docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc rename to docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc index da9071e2c..d4a4521d5 100644 --- a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc +++ b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc @@ -7,7 +7,7 @@ const response = await client.inference.put({ task_type: "sparse_embedding", inference_id: "elser_embeddings", inference_config: { - service: "elser", + service: "elasticsearch", service_settings: { num_allocations: 1, num_threads: 1, diff --git a/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc b/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc new file mode 100644 index 000000000..da7018754 --- /dev/null +++ b/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-float", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-rank-vectors-float", + id: 1, + document: { + my_vector: [ + [0.5, 10, 6], + [-0.5, 10, 10], + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc b/docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc similarity index 93% rename from docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc rename to docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc index d17ba0b28..76351698d 100644 --- a/docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc +++ b/docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc @@ -11,7 +11,7 @@ const response = await client.ingest.putPipeline({ attachment: { field: "data", properties: ["content", "title"], - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc b/docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc similarity index 96% rename from docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc rename to docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc index d3998b385..c4744fb4e 100644 --- a/docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc +++ b/docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc @@ -12,7 +12,7 @@ const response = await client.ingest.putPipeline({ field: "data", indexed_chars: 11, indexed_chars_field: "max_size", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc b/docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc similarity index 80% rename from docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc rename to docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc index d2f122662..5c1d8b6ed 100644 --- a/docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc +++ b/docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc @@ -4,9 +4,10 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: "my-index-000001", + index: "*", settings: { "index.indexing.slowlog.include.user": true, + "index.indexing.slowlog.threshold.index.warn": "30s", }, }); console.log(response); diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 4b7dae886..6a8ad651c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -42,6 +42,7 @@ client.bulk({ ... }) * *Request (object):* ** *`index` (Optional, string)*: Name of the data stream, index, or index alias to perform bulk actions on. ** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* +** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were executed for each index or create. ** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. @@ -55,6 +56,7 @@ Valid values: `true`, `false`, `wait_for`. ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ** *`require_alias` (Optional, boolean)*: If `true`, the request’s actions must target an index alias. +** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to-be-created). [discrete] === clear_scroll @@ -1806,6 +1808,7 @@ client.cat.aliases({ ... }) * *Request (object):* ** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== allocation @@ -1829,6 +1832,7 @@ client.cat.allocation({ ... }) local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== component_templates @@ -1854,6 +1858,7 @@ client.cat.componentTemplates({ ... }) local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== count @@ -1972,6 +1977,7 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para ** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. ** *`pri` (Optional, boolean)*: If true, the response only includes information from primary shards. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== master @@ -1993,6 +1999,7 @@ client.cat.master({ ... }) local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== ml_data_frame_analytics @@ -2019,7 +2026,7 @@ client.cat.mlDataFrameAnalytics({ ... }) ** *`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names to display. ** *`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, string | -1 | 0)*: Unit used to display time values. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== ml_datafeeds @@ -2122,6 +2129,7 @@ If `false`, the API returns a 404 status code when there are no matches or only ** *`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names or aliases used to sort the response. ** *`from` (Optional, number)*: Skips the specified number of transforms. ** *`size` (Optional, number)*: The maximum number of transforms to display. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== nodeattrs @@ -2143,6 +2151,7 @@ client.cat.nodeattrs({ ... }) local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== nodes @@ -2163,6 +2172,8 @@ client.cat.nodes({ ... }) ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. ** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID. ** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== pending_tasks @@ -2184,6 +2195,8 @@ client.cat.pendingTasks({ ... }) local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== plugins @@ -2201,10 +2214,12 @@ client.cat.plugins({ ... }) ==== Arguments * *Request (object):* +** *`include_bootstrap` (Optional, boolean)*: Include bootstrap plugins in the response ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== recovery @@ -2229,6 +2244,7 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para ** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== repositories @@ -2239,9 +2255,18 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-repositories.html[Endpoint documentation] [source,ts] ---- -client.cat.repositories() +client.cat.repositories({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== segments @@ -2268,6 +2293,7 @@ To target all data streams and indices, omit this parameter or use `*` or `_all` local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== shards @@ -2290,6 +2316,8 @@ client.cat.shards({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== snapshots @@ -2313,6 +2341,8 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, the response does not include information from unavailable snapshots. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== tasks @@ -2332,8 +2362,12 @@ client.cat.tasks({ ... }) * *Request (object):* ** *`actions` (Optional, string[])*: The task action names, which are used to limit the response. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. -** *`node_id` (Optional, string[])*: Unique node identifiers, which are used to limit the response. +** *`nodes` (Optional, string[])*: Unique node identifiers, which are used to limit the response. ** *`parent_task_id` (Optional, string)*: The parent task identifier, which is used to limit the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. [discrete] ==== templates @@ -2358,6 +2392,7 @@ Accepts wildcard expressions. If omitted, all templates are returned. local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== thread_pool @@ -2383,6 +2418,7 @@ Accepts wildcard expressions. local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== transforms @@ -2418,7 +2454,8 @@ If `false`, the request returns a 404 status code when there are no matches or o === ccr [discrete] ==== delete_auto_follow_pattern -Deletes auto-follow patterns. +Delete auto-follow patterns. +Delete a collection of cross-cluster replication auto-follow patterns. {ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2434,7 +2471,9 @@ client.ccr.deleteAutoFollowPattern({ name }) [discrete] ==== follow -Creates a new follower index configured to follow the referenced leader index. +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. {ref}/ccr-put-follow.html[Endpoint documentation] [source,ts] @@ -2473,7 +2512,9 @@ remote Lucene segment files to the follower index. [discrete] ==== follow_info -Retrieves information about all follower indices, including parameters and status for each follower index +Get follower information. +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. {ref}/ccr-get-follow-info.html[Endpoint documentation] [source,ts] @@ -2489,7 +2530,9 @@ client.ccr.followInfo({ index }) [discrete] ==== follow_stats -Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. +Get follower stats. +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. {ref}/ccr-get-follow-stats.html[Endpoint documentation] [source,ts] @@ -2505,7 +2548,18 @@ client.ccr.followStats({ index }) [discrete] ==== forget_follower -Removes the follower retention leases from the leader. +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. + +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. + +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. {ref}/ccr-post-forget-follower.html[Endpoint documentation] [source,ts] @@ -2525,7 +2579,8 @@ client.ccr.forgetFollower({ index }) [discrete] ==== get_auto_follow_pattern -Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. +Get auto-follow patterns. +Get cross-cluster replication auto-follow patterns. {ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2541,7 +2596,14 @@ client.ccr.getAutoFollowPattern({ ... }) [discrete] ==== pause_auto_follow_pattern -Pauses an auto-follow pattern +Pause an auto-follow pattern. +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. + +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. {ref}/ccr-pause-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2557,7 +2619,11 @@ client.ccr.pauseAutoFollowPattern({ name }) [discrete] ==== pause_follow -Pauses a follower index. The follower index will not fetch any additional operations from the leader index. +Pause a follower. +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. {ref}/ccr-post-pause-follow.html[Endpoint documentation] [source,ts] @@ -2573,7 +2639,13 @@ client.ccr.pauseFollow({ index }) [discrete] ==== put_auto_follow_pattern -Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. + +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. {ref}/ccr-put-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2604,7 +2676,10 @@ client.ccr.putAutoFollowPattern({ name, remote_cluster }) [discrete] ==== resume_auto_follow_pattern -Resumes an auto-follow pattern that has been paused +Resume an auto-follow pattern. +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. {ref}/ccr-resume-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2620,7 +2695,11 @@ client.ccr.resumeAutoFollowPattern({ name }) [discrete] ==== resume_follow -Resumes a follower index that has been paused +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. {ref}/ccr-post-resume-follow.html[Endpoint documentation] [source,ts] @@ -2646,7 +2725,8 @@ client.ccr.resumeFollow({ index }) [discrete] ==== stats -Gets all stats related to cross-cluster replication. +Get cross-cluster replication stats. +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. {ref}/ccr-get-stats.html[Endpoint documentation] [source,ts] @@ -2657,7 +2737,12 @@ client.ccr.stats() [discrete] ==== unfollow -Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +Unfollow an index. +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. + +NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. {ref}/ccr-post-unfollow.html[Endpoint documentation] [source,ts] @@ -3277,23 +3362,48 @@ client.connector.syncJobCancel({ connector_sync_job_id }) [discrete] ==== sync_job_check_in -Checks in a connector sync job (refreshes 'last_seen'). +Check in a connector sync job. +Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/check-in-connector-sync-job-api.html[Endpoint documentation] [source,ts] ---- -client.connector.syncJobCheckIn() +client.connector.syncJobCheckIn({ connector_sync_job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be checked in. [discrete] ==== sync_job_claim -Claims a connector sync job. +Claim a connector sync job. +This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. +Additionally, it can set the `sync_cursor` property for the sync job. + +This API is not intended for direct connector management by users. +It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. [source,ts] ---- -client.connector.syncJobClaim() +client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. +** *`worker_hostname` (string)*: The host name of the current system that will run the job. +** *`sync_cursor` (Optional, User-defined value)*: The cursor object from the last incremental sync job. +This should reference the `sync_cursor` field in the connector state for which the job runs. [discrete] ==== sync_job_delete @@ -3316,14 +3426,24 @@ client.connector.syncJobDelete({ connector_sync_job_id }) [discrete] ==== sync_job_error -Sets an error for a connector sync job. +Set a connector sync job error. +Set the `error` field for a connector sync job and set its `status` to `error`. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/set-connector-sync-job-error-api.html[Endpoint documentation] [source,ts] ---- -client.connector.syncJobError() +client.connector.syncJobError({ connector_sync_job_id, error }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier for the connector sync job. +** *`error` (string)*: The error for the connector sync job error field. [discrete] ==== sync_job_get @@ -3478,14 +3598,33 @@ client.connector.updateError({ connector_id, error }) [discrete] ==== update_features -Updates the connector features in the connector document. +Update the connector features. +Update the connector features in the connector document. +This API can be used to control the following aspects of a connector: + +* document-level security +* incremental syncs +* advanced sync rules +* basic sync rules + +Normally, the running connector service automatically manages these features. +However, you can use this API to override the default behavior. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/update-connector-features-api.html[Endpoint documentation] [source,ts] ---- -client.connector.updateFeatures() +client.connector.updateFeatures({ connector_id, features }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated. +** *`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })* [discrete] ==== update_filtering @@ -3886,6 +4025,8 @@ client.eql.search({ index, query }) ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* +** *`allow_partial_search_results` (Optional, boolean)* +** *`allow_partial_sequence_results` (Optional, boolean)* ** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 ** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. ** *`result_position` (Optional, Enum("tail" | "head"))* @@ -3956,7 +4097,16 @@ Defaults to `false`. If `true` then the response will include an extra section u === features [discrete] ==== get_features -Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. {ref}/get-features-api.html[Endpoint documentation] [source,ts] @@ -3967,7 +4117,23 @@ client.features.getFeatures() [discrete] ==== reset_features -Resets the internal state of features, usually by deleting system indices +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. + +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. + +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. + +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. + +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. + +To list the features that will be affected, use the get features API. + +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -4177,7 +4343,8 @@ Defaults to no timeout. === ilm [discrete] ==== delete_lifecycle -Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. {ref}/ilm-delete-lifecycle.html[Endpoint documentation] [source,ts] @@ -4195,7 +4362,11 @@ client.ilm.deleteLifecycle({ policy }) [discrete] ==== explain_lifecycle -Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. + +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. {ref}/ilm-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -4216,7 +4387,7 @@ To target all data streams and indices, use `*` or `_all`. [discrete] ==== get_lifecycle -Retrieves a lifecycle policy. +Get lifecycle policies. {ref}/ilm-get-lifecycle.html[Endpoint documentation] [source,ts] @@ -4234,7 +4405,8 @@ client.ilm.getLifecycle({ ... }) [discrete] ==== get_status -Retrieves the current index lifecycle management (ILM) status. +Get the ILM status. +Get the current index lifecycle management status. {ref}/ilm-get-status.html[Endpoint documentation] [source,ts] @@ -4245,10 +4417,21 @@ client.ilm.getStatus() [discrete] ==== migrate_to_data_tiers -Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and -attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: + +1. Stop setting the custom hot attribute on new indices. +2. Remove custom allocation settings from existing ILM policies. +3. Replace custom allocation settings from existing indices with the corresponding tier preference. + +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. + {ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] [source,ts] ---- @@ -4266,7 +4449,20 @@ This provides a way to retrieve the indices and ILM policies that need to be mig [discrete] ==== move_to_step -Manually moves an index into the specified step and executes that step. +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. + +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. + +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. + +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. {ref}/ilm-move-to-step.html[Endpoint documentation] [source,ts] @@ -4284,7 +4480,10 @@ client.ilm.moveToStep({ index, current_step, next_step }) [discrete] ==== put_lifecycle -Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. + +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. {ref}/ilm-put-lifecycle.html[Endpoint documentation] [source,ts] @@ -4302,7 +4501,9 @@ client.ilm.putLifecycle({ policy }) [discrete] ==== remove_policy -Removes the assigned lifecycle policy and stops managing the specified index +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. {ref}/ilm-remove-policy.html[Endpoint documentation] [source,ts] @@ -4318,7 +4519,10 @@ client.ilm.removePolicy({ index }) [discrete] ==== retry -Retries executing the policy for an index that is in the ERROR step. +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. {ref}/ilm-retry-policy.html[Endpoint documentation] [source,ts] @@ -4334,7 +4538,10 @@ client.ilm.retry({ index }) [discrete] ==== start -Start the index lifecycle management (ILM) plugin. +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. {ref}/ilm-start.html[Endpoint documentation] [source,ts] @@ -4351,7 +4558,12 @@ client.ilm.start({ ... }) [discrete] ==== stop -Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. {ref}/ilm-stop.html[Endpoint documentation] [source,ts] @@ -4425,8 +4637,9 @@ If an array of strings is provided, it is analyzed as a multi-value field. [discrete] ==== clear_cache -Clears the caches of one or more indices. -For data streams, the API clears the caches of the stream’s backing indices. +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. {ref}/indices-clearcache.html[Endpoint documentation] [source,ts] @@ -4456,7 +4669,29 @@ Use the `fields` parameter to clear the cache of specific fields only. [discrete] ==== clone -Clones an existing index. +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be cloned if they meet the following requirements: + +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-clone-index.html[Endpoint documentation] [source,ts] @@ -4481,7 +4716,24 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== close -Closes an index. +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. {ref}/indices-close.html[Endpoint documentation] [source,ts] @@ -4721,7 +4973,10 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== disk_usage -Analyzes the disk usage of each field of an index or data stream. +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. {ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] @@ -4749,7 +5004,14 @@ To use the API, this parameter must be set to `true`. [discrete] ==== downsample -Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +Downsample an index. +Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. + +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read only (`index.blocks.write: true`). {ref}/indices-downsample-data-stream.html[Endpoint documentation] [source,ts] @@ -4861,7 +5123,7 @@ client.indices.existsTemplate({ name }) [discrete] ==== explain_data_lifecycle Get the status for a data stream lifecycle. -Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. {ref}/data-streams-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -4879,7 +5141,10 @@ client.indices.explainDataLifecycle({ index }) [discrete] ==== field_usage_stats -Returns field usage information for each shard and field of an index. +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. {ref}/field-usage-stats.html[Endpoint documentation] [source,ts] @@ -4909,7 +5174,17 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== flush -Flushes one or more data streams or indices. +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. {ref}/indices-flush.html[Endpoint documentation] [source,ts] @@ -4937,7 +5212,19 @@ If `false`, Elasticsearch returns an error if you request a flush when another f [discrete] ==== forcemerge -Performs the force merge operation on one or more indices. +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. {ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] @@ -5289,7 +5576,17 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== promote_data_stream -Promotes a data stream from a replicated data stream managed by CCR to a regular data stream +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -5445,7 +5742,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -5508,6 +5805,16 @@ error. ==== put_template Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. {ref}/indices-templates-v1.html[Endpoint documentation] [source,ts] @@ -5539,8 +5846,27 @@ received before the timeout expires, the request fails and returns an error. [discrete] ==== recovery -Returns information about ongoing and completed shard recoveries for one or more indices. -For data streams, the API returns information for the stream’s backing indices. +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. + +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. + +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. {ref}/indices-recovery.html[Endpoint documentation] [source,ts] @@ -5587,7 +5913,20 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== reload_search_analyzers -Reloads an index's search analyzers and their resources. +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. + +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. + +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. {ref}/indices-reload-analyzers.html[Endpoint documentation] [source,ts] @@ -5606,10 +5945,23 @@ client.indices.reloadSearchAnalyzers({ index }) [discrete] ==== resolve_cluster -Resolves the specified index expressions to return information about each cluster, including -the local cluster, if included. +Resolve the cluster. +Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. + +For each cluster in the index expression, information is returned about: + +* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. + {ref}/indices-resolve-cluster-api.html[Endpoint documentation] [source,ts] ---- @@ -5699,8 +6051,9 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== segments -Returns low-level information about the Lucene segments in index shards. -For data streams, the API returns information about the stream’s backing indices. +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. {ref}/indices-segments.html[Endpoint documentation] [source,ts] @@ -5725,8 +6078,18 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== shard_stores -Retrieves store information about replica shards in one or more indices. -For data streams, the API retrieves store information for the stream’s backing indices. +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. + +The index shard stores API returns the following information: + +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. + +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. {ref}/indices-shards-stores.html[Endpoint documentation] [source,ts] @@ -5749,7 +6112,38 @@ this argument determines whether wildcard expressions match hidden data streams. [discrete] ==== shrink -Shrinks an existing index into a new index with fewer primary shards. +Shrink an index. +Shrink an index into a new index with fewer primary shards. + +Before you can shrink an index: + +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. + +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. + +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + +A shrink operation: + +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-shrink-index.html[Endpoint documentation] [source,ts] @@ -5839,7 +6233,30 @@ that uses deprecated components, Elasticsearch will emit a deprecation warning. [discrete] ==== split -Splits an existing index into a new index with more primary shards. +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + +A split operation: + +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be split if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-split-index.html[Endpoint documentation] [source,ts] @@ -5864,8 +6281,17 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== stats -Returns statistics for one or more indices. -For data streams, the API retrieves statistics for the stream’s backing indices. +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. + +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. + +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. {ref}/indices-stats.html[Endpoint documentation] [source,ts] @@ -5893,7 +6319,8 @@ such as `open,hidden`. [discrete] ==== unfreeze -Unfreezes an index. +Unfreeze an index. +When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. {ref}/unfreeze-index-api.html[Endpoint documentation] [source,ts] @@ -6042,7 +6469,16 @@ Not required for other tasks. [discrete] ==== put -Create an inference endpoint +Create an inference endpoint. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. {ref}/put-inference-api.html[Endpoint documentation] [source,ts] @@ -6091,14 +6527,25 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_ip_location_database -Deletes an ip location database configuration +Delete IP geolocation database configurations. {ref}/delete-ip-location-database-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.deleteIpLocationDatabase() +client.ingest.deleteIpLocationDatabase({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string | string[])*: A list of IP location database configurations. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. [discrete] ==== delete_pipeline @@ -6157,14 +6604,24 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_ip_location_database -Returns the specified ip location database configuration +Get IP geolocation database configurations. {ref}/get-ip-location-database-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.getIpLocationDatabase() +client.ingest.getIpLocationDatabase({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. [discrete] ==== get_pipeline @@ -6205,8 +6662,8 @@ client.ingest.processorGrok() [discrete] ==== put_geoip_database -Create or update GeoIP database configurations. -Create or update IP geolocation database configurations. +Create or update a GeoIP database configuration. +Refer to the create or update IP geolocation database configuration API. {ref}/put-geoip-database-api.html[Endpoint documentation] [source,ts] @@ -6228,14 +6685,26 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_ip_location_database -Puts the configuration for a ip location database to be downloaded +Create or update an IP geolocation database configuration. {ref}/put-ip-location-database-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.putIpLocationDatabase() +client.ingest.putIpLocationDatabase({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The database configuration identifier. +** *`configuration` (Optional, { name, maxmind, ipinfo })* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. +A value of `-1` indicates that the request should never time out. [discrete] ==== put_pipeline @@ -6292,7 +6761,10 @@ If you specify both this and the request path parameter, the API only uses the r === license [discrete] ==== delete -Deletes licensing information for the cluster +Delete the license. +When the license expires, your subscription level reverts to Basic. + +If the operator privileges feature is enabled, only operator users can use this API. {ref}/delete-license.html[Endpoint documentation] [source,ts] @@ -6304,8 +6776,10 @@ client.license.delete() [discrete] ==== get Get license information. -Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. -For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. + +NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. {ref}/get-license.html[Endpoint documentation] [source,ts] @@ -6323,7 +6797,7 @@ This parameter is deprecated and will always be set to true in 8.x. [discrete] ==== get_basic_status -Retrieves information about the status of the basic license. +Get the basic license status. {ref}/get-basic-status.html[Endpoint documentation] [source,ts] @@ -6334,7 +6808,7 @@ client.license.getBasicStatus() [discrete] ==== get_trial_status -Retrieves information about the status of the trial license. +Get the trial status. {ref}/get-trial-status.html[Endpoint documentation] [source,ts] @@ -6345,7 +6819,14 @@ client.license.getTrialStatus() [discrete] ==== post -Updates the license for the cluster. +Update the license. +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. + +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. {ref}/update-license.html[Endpoint documentation] [source,ts] @@ -6363,8 +6844,15 @@ client.license.post({ ... }) [discrete] ==== post_start_basic -The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. -To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). +Start a basic license. +Start an indefinite basic license, which gives access to all the basic features. + +NOTE: In order to start a basic license, you must not currently have a basic license. + +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. + +To check the status of your basic license, use the get basic license API. {ref}/start-basic.html[Endpoint documentation] [source,ts] @@ -6380,7 +6868,13 @@ client.license.postStartBasic({ ... }) [discrete] ==== post_start_trial -The start trial API enables you to start a 30-day trial, which gives access to all subscription features. +Start a trial. +Start a 30-day trial, which gives access to all subscription features. + +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + +To check the status of your trial, use the get trial status API. {ref}/start-trial.html[Endpoint documentation] [source,ts] @@ -6399,7 +6893,9 @@ client.license.postStartTrial({ ... }) === logstash [discrete] ==== delete_pipeline -Deletes a pipeline used for Logstash Central Management. +Delete a Logstash pipeline. + +Delete a pipeline that is used for Logstash Central Management. {ref}/logstash-api-delete-pipeline.html[Endpoint documentation] [source,ts] @@ -6411,11 +6907,13 @@ client.logstash.deletePipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the pipeline. +** *`id` (string)*: An identifier for the pipeline. [discrete] ==== get_pipeline -Retrieves pipelines used for Logstash Central Management. +Get Logstash pipelines. + +Get pipelines that are used for Logstash Central Management. {ref}/logstash-api-get-pipeline.html[Endpoint documentation] [source,ts] @@ -6427,11 +6925,14 @@ client.logstash.getPipeline({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string | string[])*: List of pipeline identifiers. +** *`id` (Optional, string | string[])*: A list of pipeline identifiers. [discrete] ==== put_pipeline -Creates or updates a pipeline used for Logstash Central Management. +Create or update a Logstash pipeline. + +Create a pipeline that is used for Logstash Central Management. +If the specified pipeline exists, it is replaced. {ref}/logstash-api-put-pipeline.html[Endpoint documentation] [source,ts] @@ -6443,14 +6944,17 @@ client.logstash.putPipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the pipeline. +** *`id` (string)*: An identifier for the pipeline. ** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })* [discrete] === migration [discrete] ==== deprecations -Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. +Get deprecation information. +Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + +TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. {ref}/migration-api-deprecation.html[Endpoint documentation] [source,ts] @@ -6466,7 +6970,12 @@ client.migration.deprecations({ ... }) [discrete] ==== get_feature_upgrade_status -Find out whether system features need to be upgraded or not +Get feature migration information. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +Check which features need to be migrated and the status of any migrations that are in progress. + +TIP: This API is designed for indirect use by the Upgrade Assistant. +We strongly recommend you use the Upgrade Assistant. {ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] @@ -6477,7 +6986,13 @@ client.migration.getFeatureUpgradeStatus() [discrete] ==== post_feature_upgrade -Begin upgrades for system features +Start the feature migration. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +This API starts the automatic migration process. + +Some functionality might be temporarily unavailable during the migration process. + +TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. {ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] @@ -6630,7 +7145,7 @@ Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by -using a job identifier, a group name, a comma-separated list of jobs, or a +using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . @@ -7041,7 +7556,7 @@ This parameter has the `from` and `size` properties. ==== get_data_frame_analytics Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single -API request by using a comma-separated list of data frame analytics jobs or a +API request by using a list of data frame analytics jobs or a wildcard expression. {ref}/get-dfanalytics.html[Endpoint documentation] @@ -7110,7 +7625,7 @@ there are no matches or only partial matches. ==== get_datafeed_stats Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by -using a comma-separated list of datafeeds or a wildcard expression. You can +using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. @@ -7144,7 +7659,7 @@ partial matches. If this parameter is `false`, the request returns a ==== get_datafeeds Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by -using a comma-separated list of datafeeds or a wildcard expression. You can +using a list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. @@ -7263,7 +7778,7 @@ code when there are no matches or only partial matches. ==== get_jobs Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API -request by using a group name, a comma-separated list of jobs, or a wildcard +request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. @@ -7489,6 +8004,7 @@ be retrieved and then added to another cluster. ** *`from` (Optional, number)*: Skips the specified number of models. ** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response body. +** *`include_model_definition` (Optional, boolean)*: parameter is deprecated! Use [include=definition] instead ** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. ** *`tags` (Optional, string | string[])*: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -7498,7 +8014,7 @@ tags are returned. ==== get_trained_models_stats Get trained models usage info. You can get usage information for multiple trained -models in a single API request by using a comma-separated list of model IDs or a wildcard expression. +models in a single API request by using a list of model IDs or a wildcard expression. {ref}/get-trained-models-stats.html[Endpoint documentation] [source,ts] @@ -7546,8 +8062,8 @@ Currently, for NLP models, only a single value is allowed. [discrete] ==== info -Return ML defaults and limits. -Returns defaults and limits used by machine learning. +Get machine learning information. +Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be @@ -7607,7 +8123,7 @@ client.ml.postCalendarEvents({ calendar_id, events }) Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. -It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. +It is not currently possible to post data to multiple jobs using wildcards or a list. {ref}/ml-post-data.html[Endpoint documentation] [source,ts] @@ -7778,6 +8294,7 @@ model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. +** *`_meta` (Optional, Record)* ** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try @@ -7908,6 +8425,18 @@ client.ml.putJob({ job_id, analysis_config, data_description }) ** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. ** *`results_index_name` (Optional, string)*: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. ** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. [discrete] ==== put_trained_model @@ -8483,7 +9012,7 @@ bucket result. If this property has a non-null value, once per day at than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. ** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. -** *`detectors` (Optional, { by_field_name, custom_rules, detector_description, detector_index, exclude_frequent, field_name, function, over_field_name, partition_field_name, use_null }[])*: An array of detector update objects. +** *`detectors` (Optional, { detector_index, description, custom_rules }[])*: An array of detector update objects. ** *`per_partition_categorization` (Optional, { enabled, stop_on_warn })*: Settings related to how categorization interacts with partition fields. [discrete] @@ -8559,28 +9088,6 @@ client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) Otherwise, it responds as soon as the upgrade task is assigned to a node. ** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the request to complete. -[discrete] -=== monitoring -[discrete] -==== bulk -Used by the monitoring features to send monitoring data. - -{ref}/monitor-elasticsearch-cluster.html[Endpoint documentation] -[source,ts] ----- -client.monitoring.bulk({ system_id, system_api_version, interval }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`system_id` (string)*: Identifier of the monitored system -** *`system_api_version` (string)* -** *`interval` (string | -1 | 0)*: Collection interval (e.g., '10s' or '10000ms') of the payload -** *`type` (Optional, string)*: Default document type for items which don't provide one -** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* - [discrete] === nodes [discrete] @@ -8900,7 +9407,30 @@ client.queryRules.test({ ruleset_id, match_criteria }) === rollup [discrete] ==== delete_job -Deletes an existing rollup job. +Delete a rollup job. + +A job must be stopped before it can be deleted. +If you attempt to delete a started job, an error occurs. +Similarly, if you attempt to delete a nonexistent job, an exception occurs. + +IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. +The API does not delete any previously rolled up data. +This is by design; a user may wish to roll up a static data set. +Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). +Thus the job can be deleted, leaving behind the rolled up data for analysis. +If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. +If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: + +``` +POST my_rollup_index/_delete_by_query +{ + "query": { + "term": { + "_rollup.id": "the_rollup_job_id" + } + } +} +``` {ref}/rollup-delete-job.html[Endpoint documentation] [source,ts] @@ -8916,7 +9446,12 @@ client.rollup.deleteJob({ id }) [discrete] ==== get_jobs -Retrieves the configuration, stats, and status of rollup jobs. +Get rollup job information. +Get the configuration, stats, and status of rollup jobs. + +NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +If a job was created, ran for a while, then was deleted, the API does not return any details about it. +For details about a historical rollup job, the rollup capabilities API may be more useful. {ref}/rollup-get-job.html[Endpoint documentation] [source,ts] @@ -8933,7 +9468,15 @@ If it is `_all` or omitted, the API returns all rollup jobs. [discrete] ==== get_rollup_caps -Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. +Get the rollup job capabilities. +Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. +Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. +This API enables you to inspect an index and determine: + +1. Does this index have associated rollup data somewhere in the cluster? +2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? {ref}/rollup-get-rollup-caps.html[Endpoint documentation] [source,ts] @@ -8950,7 +9493,12 @@ client.rollup.getRollupCaps({ ... }) [discrete] ==== get_rollup_index_caps -Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). +Get the rollup index capabilities. +Get the rollup capabilities of all jobs inside of a rollup index. +A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: + +* What jobs are stored in an index (or indices specified via a pattern)? +* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? {ref}/rollup-get-rollup-index-caps.html[Endpoint documentation] [source,ts] @@ -8967,7 +9515,15 @@ Wildcard (`*`) expressions are supported. [discrete] ==== put_job -Creates a rollup job. +Create a rollup job. + +WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. + +The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. + +There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. + +Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. {ref}/rollup-put-job.html[Endpoint documentation] [source,ts] @@ -9008,7 +9564,9 @@ on a per-field basis and for each field you configure which metric should be col [discrete] ==== rollup_search -Enables searching rolled-up data using the standard Query DSL. +Search rolled-up data. +The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. {ref}/rollup-search.html[Endpoint documentation] [source,ts] @@ -9029,7 +9587,9 @@ client.rollup.rollupSearch({ index }) [discrete] ==== start_job -Starts an existing, stopped rollup job. +Start rollup jobs. +If you try to start a job that does not exist, an exception occurs. +If you try to start a job that is already started, nothing happens. {ref}/rollup-start-job.html[Endpoint documentation] [source,ts] @@ -9045,7 +9605,9 @@ client.rollup.startJob({ id }) [discrete] ==== stop_job -Stops an existing, started rollup job. +Stop rollup jobs. +If you try to stop a job that does not exist, an exception occurs. +If you try to stop a job that is already stopped, nothing happens. {ref}/rollup-stop-job.html[Endpoint documentation] [source,ts] @@ -9133,7 +9695,8 @@ client.searchApplication.getBehavioralAnalytics({ ... }) [discrete] ==== list -Returns the existing search applications. +Get search applications. +Get information about search applications. {ref}/list-search-applications.html[Endpoint documentation] [source,ts] @@ -9151,14 +9714,22 @@ client.searchApplication.list({ ... }) [discrete] ==== post_behavioral_analytics_event -Creates a behavioral analytics event for existing collection. +Create a behavioral analytics collection event. -http://todo.com/tbd[Endpoint documentation] +{ref}/post-analytics-collection-event.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.postBehavioralAnalyticsEvent() +client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`collection_name` (string)*: The name of the behavioral analytics collection. +** *`event_type` (Enum("page_view" | "search" | "search_click"))*: The analytics event type. +** *`payload` (Optional, User-defined value)* +** *`debug` (Optional, boolean)*: Whether the response type has to include more details [discrete] ==== put @@ -9196,14 +9767,25 @@ client.searchApplication.putBehavioralAnalytics({ name }) [discrete] ==== render_query -Renders a query for given search application search parameters +Render a search application query. +Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. +The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. + +You must have `read` privileges on the backing alias of the search application. {ref}/search-application-render-query.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.renderQuery() +client.searchApplication.renderQuery({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to render teh query for. +** *`params` (Optional, Record)* [discrete] ==== search @@ -9229,7 +9811,8 @@ client.searchApplication.search({ name }) === searchable_snapshots [discrete] ==== cache_stats -Retrieve node-level cache statistics about searchable snapshots. +Get cache statistics. +Get statistics about the shared cache for partially mounted indices. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] @@ -9246,7 +9829,8 @@ client.searchableSnapshots.cacheStats({ ... }) [discrete] ==== clear_cache -Clear the cache of searchable snapshots. +Clear the cache. +Clear indices and data streams from the shared cache for partially mounted indices. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] @@ -9267,7 +9851,10 @@ client.searchableSnapshots.clearCache({ ... }) [discrete] ==== mount -Mount a snapshot as a searchable index. +Mount a snapshot. +Mount a snapshot as a searchable snapshot index. +Do not use this API for snapshots managed by index lifecycle management (ILM). +Manually mounting ILM-managed snapshots can interfere with ILM processes. {ref}/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] [source,ts] @@ -9291,7 +9878,7 @@ client.searchableSnapshots.mount({ repository, snapshot, index }) [discrete] ==== stats -Retrieve shard-level statistics about searchable snapshots. +Get searchable snapshot statistics. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] @@ -10183,36 +10770,86 @@ client.security.invalidateToken({ ... }) [discrete] ==== oidc_authenticate -Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair +Authenticate OpenID Connect. +Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. {ref}/security-api-oidc-authenticate.html[Endpoint documentation] [source,ts] ---- -client.security.oidcAuthenticate() +client.security.oidcAuthenticate({ nonce, redirect_uri, state }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`nonce` (string)*: Associate a client session with an ID token and mitigate replay attacks. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +** *`redirect_uri` (string)*: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. +This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. +** *`state` (string)*: Maintain state between the authentication request and the response. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +** *`realm` (Optional, string)*: The name of the OpenID Connect realm. +This property is useful in cases where multiple realms are defined. [discrete] ==== oidc_logout -Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API +Logout of OpenID Connect. +Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. + +If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. {ref}/security-api-oidc-logout.html[Endpoint documentation] [source,ts] ---- -client.security.oidcLogout() +client.security.oidcLogout({ access_token }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`access_token` (string)*: The access token to be invalidated. +** *`refresh_token` (Optional, string)*: The refresh token to be invalidated. [discrete] ==== oidc_prepare_authentication -Creates an OAuth 2.0 authentication request as a URL string +Prepare OpenID connect authentication. +Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. + +The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. {ref}/security-api-oidc-prepare-authentication.html[Endpoint documentation] [source,ts] ---- -client.security.oidcPrepareAuthentication() +client.security.oidcPrepareAuthentication({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`iss` (Optional, string)*: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. +It cannot be specified when *realm* is specified. +One of *realm* or *iss* is required. +** *`login_hint` (Optional, string)*: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. +This parameter is not valid when *realm* is specified. +** *`nonce` (Optional, string)*: The value used to associate a client session with an ID token and to mitigate replay attacks. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +** *`realm` (Optional, string)*: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. +It cannot be specified when *iss* is specified. +One of *realm* or *iss* is required. +** *`state` (Optional, string)*: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. [discrete] ==== put_privileges @@ -10683,7 +11320,15 @@ visible to search, if 'false' do nothing with refreshes. === shutdown [discrete] ==== delete_node -Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Cancel node shutdown preparations. +Remove a node from the shutdown list so it can resume normal operations. +You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. +Shutdown requests are never removed automatically by Elasticsearch. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. +Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] [source,ts] @@ -10701,7 +11346,14 @@ client.shutdown.deleteNode({ node_id }) [discrete] ==== get_node -Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Get the shutdown status. + +Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. +The API returns status information for each part of the shut down process. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] [source,ts] @@ -10719,7 +11371,20 @@ client.shutdown.getNode({ ... }) [discrete] ==== put_node -Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Prepare a node to be shut down. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. + +The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. +This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. + +You must specify the type of shutdown: `restart`, `remove`, or `replace`. +If a node is already being prepared for shutdown, you can use this API to change the shutdown type. + +IMPORTANT: This API does NOT terminate the Elasticsearch process. +Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] [source,ts] @@ -10769,7 +11434,9 @@ client.simulate.ingest() === slm [discrete] ==== delete_lifecycle -Deletes an existing snapshot lifecycle policy. +Delete a policy. +Delete a snapshot lifecycle policy definition. +This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. {ref}/slm-api-delete-policy.html[Endpoint documentation] [source,ts] @@ -10785,7 +11452,9 @@ client.slm.deleteLifecycle({ policy_id }) [discrete] ==== execute_lifecycle -Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. +Run a policy. +Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. {ref}/slm-api-execute-lifecycle.html[Endpoint documentation] [source,ts] @@ -10801,7 +11470,9 @@ client.slm.executeLifecycle({ policy_id }) [discrete] ==== execute_retention -Deletes any snapshots that are expired according to the policy's retention rules. +Run a retention policy. +Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +The retention policy is normally applied according to its schedule. {ref}/slm-api-execute-retention.html[Endpoint documentation] [source,ts] @@ -10812,7 +11483,8 @@ client.slm.executeRetention() [discrete] ==== get_lifecycle -Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. +Get policy information. +Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. {ref}/slm-api-get-policy.html[Endpoint documentation] [source,ts] @@ -10828,7 +11500,8 @@ client.slm.getLifecycle({ ... }) [discrete] ==== get_stats -Returns global and policy-level statistics about actions taken by snapshot lifecycle management. +Get snapshot lifecycle management statistics. +Get global and policy-level statistics about actions taken by snapshot lifecycle management. {ref}/slm-api-get-stats.html[Endpoint documentation] [source,ts] @@ -10839,7 +11512,7 @@ client.slm.getStats() [discrete] ==== get_status -Retrieves the status of snapshot lifecycle management (SLM). +Get the snapshot lifecycle management status. {ref}/slm-api-get-status.html[Endpoint documentation] [source,ts] @@ -10850,7 +11523,10 @@ client.slm.getStatus() [discrete] ==== put_lifecycle -Creates or updates a snapshot lifecycle policy. +Create or update a policy. +Create or update a snapshot lifecycle policy. +If the policy already exists, this request increments the policy version. +Only the latest version of a policy is stored. {ref}/slm-api-put-policy.html[Endpoint documentation] [source,ts] @@ -10862,7 +11538,7 @@ client.slm.putLifecycle({ policy_id }) ==== Arguments * *Request (object):* -** *`policy_id` (string)*: ID for the snapshot lifecycle policy you want to create or update. +** *`policy_id` (string)*: The identifier for the snapshot lifecycle policy you want to create or update. ** *`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })*: Configuration for each snapshot created by the policy. ** *`name` (Optional, string)*: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. ** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. @@ -10873,7 +11549,9 @@ client.slm.putLifecycle({ policy_id }) [discrete] ==== start -Turns on snapshot lifecycle management (SLM). +Start snapshot lifecycle management. +Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +Manually starting SLM is necessary only if it has been stopped using the stop SLM API. {ref}/slm-api-start.html[Endpoint documentation] [source,ts] @@ -10884,7 +11562,14 @@ client.slm.start() [discrete] ==== stop -Turns off snapshot lifecycle management (SLM). +Stop snapshot lifecycle management. +Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. +Stopping SLM does not stop any snapshots that are in progress. +You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. + +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. +Use the get snapshot lifecycle management status API to see if SLM is running. {ref}/slm-api-stop.html[Endpoint documentation] [source,ts] @@ -10897,7 +11582,8 @@ client.slm.stop() === snapshot [discrete] ==== cleanup_repository -Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. +Clean up the snapshot repository. +Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. {ref}/clean-up-snapshot-repo-api.html[Endpoint documentation] [source,ts] @@ -10915,7 +11601,8 @@ client.snapshot.cleanupRepository({ repository }) [discrete] ==== clone -Clones indices from one snapshot into another snapshot in the same repository. +Clone a snapshot. +Clone part of all of a snapshot into another snapshot in the same repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10936,7 +11623,8 @@ client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) [discrete] ==== create -Creates a snapshot in a repository. +Create a snapshot. +Take a snapshot of a cluster or of data streams and indices. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10961,7 +11649,10 @@ client.snapshot.create({ repository, snapshot }) [discrete] ==== create_repository -Creates a repository. +Create or update a snapshot repository. +IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +To register a snapshot repository, the cluster's global metadata must be writeable. +Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10980,7 +11671,7 @@ client.snapshot.createRepository({ repository }) [discrete] ==== delete -Deletes one or more snapshots. +Delete snapshots. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10998,7 +11689,9 @@ client.snapshot.delete({ repository, snapshot }) [discrete] ==== delete_repository -Deletes a repository. +Delete snapshot repositories. +When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +The snapshots themselves are left untouched and in place. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -11016,7 +11709,7 @@ client.snapshot.deleteRepository({ repository }) [discrete] ==== get -Returns information about a snapshot. +Get snapshot information. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -11048,7 +11741,7 @@ client.snapshot.get({ repository, snapshot }) [discrete] ==== get_repository -Returns information about a repository. +Get snapshot repository information. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -11077,7 +11770,24 @@ client.snapshot.repositoryAnalyze() [discrete] ==== restore -Restores a snapshot. +Restore a snapshot. +Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. +The snapshot repository must be registered and available to the cluster. +The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. + +Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: + +``` +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +``` + +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. + +If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -11106,7 +11816,17 @@ client.snapshot.restore({ repository, snapshot }) [discrete] ==== status -Returns information about the status of a snapshot. +Get the snapshot status. +Get a detailed description of the current state for each shard participating in the snapshot. +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. +If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. + +WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. +The API requires a read from the repository for each shard in each snapshot. +For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). + +Depending on the latency of your storage, such requests can take an extremely long time to return results. +These requests can also tax machine resources and, when using cloud storage, incur high processing costs. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -11125,7 +11845,8 @@ client.snapshot.status({ ... }) [discrete] ==== verify_repository -Verifies a repository. +Verify a snapshot repository. +Check for common misconfigurations in a snapshot repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -11435,7 +12156,14 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) === tasks [discrete] ==== cancel -Cancels a task, if it can be cancelled through an API. +Cancel a task. +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. +It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. +The get task information API will continue to list these cancelled tasks until they complete. +The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. + +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. +You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -11456,7 +12184,7 @@ client.tasks.cancel({ ... }) [discrete] ==== get Get task information. -Returns information about the tasks currently executing in the cluster. +Get information about a task currently running in the cluster. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -11475,7 +12203,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== list -The task management API returns information about tasks currently executing on one or more nodes in the cluster. +Get all tasks. +Get information about the tasks currently running on one or more nodes in the cluster. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -11489,6 +12218,7 @@ client.tasks.list({ ... }) * *Request (object):* ** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +This information is useful to distinguish tasks from each other but is more costly to run. ** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: Key used to group tasks in the response. ** *`nodes` (Optional, string | string[])*: List of node IDs or names used to limit returned information. ** *`parent_task_id` (Optional, string)*: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. @@ -11500,29 +12230,222 @@ client.tasks.list({ ... }) === text_structure [discrete] ==== find_field_structure -Finds the structure of a text field in an index. +Find the structure of a text field. +Find the structure of a text field in an Elasticsearch index. {ref}/find-field-structure.html[Endpoint documentation] [source,ts] ---- -client.textStructure.findFieldStructure() ----- - +client.textStructure.findFieldStructure({ field, index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`field` (string)*: The field that should be analyzed. +** *`index` (string)*: The name of the index that contains the analyzed field. +** *`column_names` (Optional, string)*: If `format` is set to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header row, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`documents_to_sample` (Optional, number)*: The number of documents to include in the structural analysis. +The minimum value is 2. +** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. +The intention in that situation is that a user who knows the meanings will rename the fields before using them. +** *`explain` (Optional, boolean)*: If true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is false. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. [discrete] ==== find_message_structure -Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. +Find the structure of text messages. +Find the structure of a list of text messages. +The messages must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. {ref}/find-message-structure.html[Endpoint documentation] [source,ts] ---- -client.textStructure.findMessageStructure() ----- - +client.textStructure.findMessageStructure({ messages }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`messages` (string[])*: The list of messages you want to analyze. +** *`column_names` (Optional, string)*: If the format is `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you the format is `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is false. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. [discrete] ==== find_structure -Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. +Find the structure of a text file. +The text file must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. +It must, however, be text; binary text formats are not currently supported. +The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. + +The response from the API contains: + +* A couple of messages from the beginning of the text. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. {ref}/find-structure.html[Endpoint documentation] [source,ts] @@ -11536,10 +12459,11 @@ client.textStructure.findStructure({ ... }) * *Request (object):* ** *`text_files` (Optional, TJsonDocument[])* ** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. -** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", etc. +** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. ** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. ** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). ** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. ** *`format` (Optional, string)*: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. ** *`grok_pattern` (Optional, string)*: If you have set format to semi_structured_text, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the timestamp_field parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If grok_pattern is not specified, the structure finder creates a Grok pattern. ** *`has_header_row` (Optional, boolean)*: If you have set format to delimited, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. @@ -11547,13 +12471,15 @@ client.textStructure.findStructure({ ... }) ** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2; If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. ** *`quote` (Optional, string)*: If you have set format to delimited, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote ("). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. ** *`should_trim_fields` (Optional, boolean)*: If you have set format to delimited, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. -** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis make take. If the analysis is still running when the timeout expires then it will be aborted. +** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. ** *`timestamp_field` (Optional, string)*: Optional parameter to specify the timestamp field in the file ** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. [discrete] ==== test_grok_pattern -Tests a Grok pattern on some text. +Test a Grok pattern. +Test a Grok pattern on one or more lines of text. +The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. {ref}/test-grok-pattern.html[Endpoint documentation] [source,ts] @@ -11919,12 +12845,20 @@ timeout expires, the request fails and returns an error. [discrete] ==== upgrade_transforms -Upgrades all transforms. -This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It -also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not -affect the source and destination indices. The upgrade also does not affect the roles that transforms use when -Elasticsearch security features are enabled; the role used to read source data and write to the destination index -remains unchanged. +Upgrade all transforms. +Transforms are compatible across minor versions and between supported major versions. +However, over time, the format of transform configuration information may change. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. +It also cleans up the internal data structures that store the transform state and checkpoints. +The upgrade does not affect the source and destination indices. +The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. +Resolve the issue then re-run the process again. +A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. +You may want to perform a recent cluster backup prior to the upgrade. {ref}/upgrade-transforms.html[Endpoint documentation] [source,ts] @@ -11944,7 +12878,13 @@ returns an error. === watcher [discrete] ==== ack_watch -Acknowledges a watch, manually throttling the execution of the watch's actions. +Acknowledge a watch. +Acknowledging a watch enables you to manually throttle the execution of the watch's actions. + +The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. + +IMPORTANT: If the specified watch is currently being executed, this API will return an error +The reason for this behavior is to prevent overwriting the watch status from a watch execution. {ref}/watcher-api-ack-watch.html[Endpoint documentation] [source,ts] @@ -11961,7 +12901,8 @@ client.watcher.ackWatch({ watch_id }) [discrete] ==== activate_watch -Activates a currently inactive watch. +Activate a watch. +A watch can be either active or inactive. {ref}/watcher-api-activate-watch.html[Endpoint documentation] [source,ts] @@ -11977,7 +12918,8 @@ client.watcher.activateWatch({ watch_id }) [discrete] ==== deactivate_watch -Deactivates a currently active watch. +Deactivate a watch. +A watch can be either active or inactive. {ref}/watcher-api-deactivate-watch.html[Endpoint documentation] [source,ts] @@ -11993,7 +12935,14 @@ client.watcher.deactivateWatch({ watch_id }) [discrete] ==== delete_watch -Removes a watch from Watcher. +Delete a watch. +When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. + +Deleting a watch does not delete any watch execution records related to this watch from the watch history. + +IMPORTANT: Deleting a watch must be done by using only this API. +Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API +When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. {ref}/watcher-api-delete-watch.html[Endpoint documentation] [source,ts] @@ -12009,8 +12958,15 @@ client.watcher.deleteWatch({ id }) [discrete] ==== execute_watch +Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. -For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. + +For testing and debugging purposes, you also have fine-grained control on how the watch runs. +You can run the watch without running all of its actions or alternatively by simulating them. +You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. + +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. +This serves as great tool for testing and debugging your watches prior to adding them to Watcher. {ref}/watcher-api-execute-watch.html[Endpoint documentation] [source,ts] @@ -12045,7 +13001,7 @@ client.watcher.getSettings() [discrete] ==== get_watch -Retrieves a watch by its ID. +Get a watch. {ref}/watcher-api-get-watch.html[Endpoint documentation] [source,ts] @@ -12061,7 +13017,18 @@ client.watcher.getWatch({ id }) [discrete] ==== put_watch -Creates a new watch, or updates an existing one. +Create or update a watch. +When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. +Typically for the `schedule` trigger, the scheduler is the trigger engine. + +IMPORTANT: You must use Kibana or this API to create a watch. +Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. +If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. + +When you add a watch you can also define its initial active state by setting the *active* parameter. + +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. +If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. {ref}/watcher-api-put-watch.html[Endpoint documentation] [source,ts] @@ -12088,7 +13055,8 @@ client.watcher.putWatch({ id }) [discrete] ==== query_watches -Retrieves stored watches. +Query watches. +Get all registered watches in a paginated manner and optionally filter watches by a query. {ref}/watcher-api-query-watches.html[Endpoint documentation] [source,ts] @@ -12108,7 +13076,8 @@ client.watcher.queryWatches({ ... }) [discrete] ==== start -Starts Watcher if it is not already running. +Start the watch service. +Start the Watcher service if it is not already running. {ref}/watcher-api-start.html[Endpoint documentation] [source,ts] @@ -12119,7 +13088,7 @@ client.watcher.start() [discrete] ==== stats -Retrieves the current Watcher metrics. +Get Watcher statistics. {ref}/watcher-api-stats.html[Endpoint documentation] [source,ts] @@ -12136,7 +13105,8 @@ client.watcher.stats({ ... }) [discrete] ==== stop -Stops Watcher if it is running. +Stop the watch service. +Stop the Watcher service if it is running. {ref}/watcher-api-stop.html[Endpoint documentation] [source,ts] @@ -12160,7 +13130,12 @@ client.watcher.updateSettings() === xpack [discrete] ==== info -Provides general information about the installed X-Pack features. +Get information. +The information provided by the API includes: + +* Build information including the build number and timestamp. +* License information about the currently installed license. +* Feature information for the features that are currently enabled and available under the current license. {ref}/info-api.html[Endpoint documentation] [source,ts] @@ -12178,7 +13153,9 @@ client.xpack.info({ ... }) [discrete] ==== usage -This API provides information about which features are currently enabled and available under the current license and some usage statistics. +Get usage information. +Get information about the features that are currently enabled and available under the current license. +The API also provides some usage statistics. {ref}/usage-api.html[Endpoint documentation] [source,ts] diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 7b3d86fd8..8fafab146 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -44,7 +44,7 @@ export default class Ccr { } /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-follow-pattern.html | Elasticsearch API documentation} */ async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Ccr { } /** - * Creates a new follower index configured to follow the referenced leader index. + * Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.html | Elasticsearch API documentation} */ async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -112,7 +112,7 @@ export default class Ccr { } /** - * Retrieves information about all follower indices, including parameters and status for each follower index + * Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-info.html | Elasticsearch API documentation} */ async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -144,7 +144,7 @@ export default class Ccr { } /** - * Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. + * Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-stats.html | Elasticsearch API documentation} */ async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -176,7 +176,7 @@ export default class Ccr { } /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-follower.html | Elasticsearch API documentation} */ async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -212,7 +212,7 @@ export default class Ccr { } /** - * Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-follow-pattern.html | Elasticsearch API documentation} */ async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -252,7 +252,7 @@ export default class Ccr { } /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-follow-pattern.html | Elasticsearch API documentation} */ async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -284,7 +284,7 @@ export default class Ccr { } /** - * Pauses a follower index. The follower index will not fetch any additional operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-follow.html | Elasticsearch API documentation} */ async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -316,7 +316,7 @@ export default class Ccr { } /** - * Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-follow-pattern.html | Elasticsearch API documentation} */ async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -352,7 +352,7 @@ export default class Ccr { } /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-follow-pattern.html | Elasticsearch API documentation} */ async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -384,7 +384,7 @@ export default class Ccr { } /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-follow.html | Elasticsearch API documentation} */ async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -420,7 +420,7 @@ export default class Ccr { } /** - * Gets all stats related to cross-cluster replication. + * Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -450,7 +450,7 @@ export default class Ccr { } /** - * Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow.html | Elasticsearch API documentation} */ async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index d2ad09fa9..12f6858a4 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -437,22 +437,22 @@ export default class Connector { } /** - * Checks in a connector sync job (refreshes 'last_seen'). + * Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async syncJobCheckIn (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -469,22 +469,26 @@ export default class Connector { } /** - * Claims a connector sync job. + * Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/claim-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async syncJobClaim (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -533,22 +537,26 @@ export default class Connector { } /** - * Sets an error for a connector sync job. + * Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-error-api.html | Elasticsearch API documentation} */ - async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async syncJobError (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['error'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -832,22 +840,26 @@ export default class Connector { } /** - * Updates the connector features in the connector document. + * Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-features-api.html | Elasticsearch API documentation} */ - async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async updateFeatures (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['features'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 4f1d3020b..258a79a2d 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -148,7 +148,7 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] + const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'allow_partial_search_results', 'allow_partial_sequence_results', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] const querystring: Record = {} const body: Record = {} diff --git a/src/api/api/features.ts b/src/api/api/features.ts index feab5b5c4..555be12bc 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -44,7 +44,7 @@ export default class Features { } /** - * Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot + * Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html | Elasticsearch API documentation} */ async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -74,7 +74,7 @@ export default class Features { } /** - * Resets the internal state of features, usually by deleting system indices + * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index b144baac1..86748d989 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -44,7 +44,7 @@ export default class Ilm { } /** - * Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Ilm { } /** - * Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-explain-lifecycle.html | Elasticsearch API documentation} */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -108,7 +108,7 @@ export default class Ilm { } /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-lifecycle.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -148,7 +148,7 @@ export default class Ilm { } /** - * Retrieves the current index lifecycle management (ILM) status. + * Get the ILM status. Get the current index lifecycle management status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-status.html | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -178,7 +178,7 @@ export default class Ilm { } /** - * Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -212,7 +212,7 @@ export default class Ilm { } /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-move-to-step.html | Elasticsearch API documentation} */ async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -248,7 +248,7 @@ export default class Ilm { } /** - * Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. + * Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-put-lifecycle.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -284,7 +284,7 @@ export default class Ilm { } /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-remove-policy.html | Elasticsearch API documentation} */ async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -316,7 +316,7 @@ export default class Ilm { } /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-retry-policy.html | Elasticsearch API documentation} */ async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -348,7 +348,7 @@ export default class Ilm { } /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -378,7 +378,7 @@ export default class Ilm { } /** - * Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 82e9227db..98bc01435 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -121,7 +121,7 @@ export default class Indices { } /** - * Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -161,7 +161,7 @@ export default class Indices { } /** - * Clones an existing index. + * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -198,7 +198,7 @@ export default class Indices { } /** - * Closes an index. + * Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-close.html | Elasticsearch API documentation} */ async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -538,7 +538,7 @@ export default class Indices { } /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -570,7 +570,7 @@ export default class Indices { } /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-downsample-data-stream.html | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -743,7 +743,7 @@ export default class Indices { } /** - * Get the status for a data stream lifecycle. Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-explain-lifecycle.html | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -775,7 +775,7 @@ export default class Indices { } /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -807,7 +807,7 @@ export default class Indices { } /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html | Elasticsearch API documentation} */ async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -847,7 +847,7 @@ export default class Indices { } /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1342,7 +1342,7 @@ export default class Indices { } /** - * Promotes a data stream from a replicated data stream managed by CCR to a regular data stream + * Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1569,7 +1569,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1605,7 +1605,7 @@ export default class Indices { } /** - * Returns information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. + * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1685,7 +1685,7 @@ export default class Indices { } /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html | Elasticsearch API documentation} */ async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1717,7 +1717,7 @@ export default class Indices { } /** - * Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1825,7 +1825,7 @@ export default class Indices { } /** - * Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html | Elasticsearch API documentation} */ async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1865,7 +1865,7 @@ export default class Indices { } /** - * Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. + * Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: * The node on which each replica shard exists. * The allocation ID for each replica shard. * A unique ID for each replica shard. * Any errors encountered while opening the shard index or from an earlier failure. By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html | Elasticsearch API documentation} */ async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1905,7 +1905,7 @@ export default class Indices { } /** - * Shrinks an existing index into a new index with fewer primary shards. + * Shrink an index. Shrink an index into a new index with fewer primary shards. Before you can shrink an index: * The index must be read-only. * A copy of every shard in the index must reside on the same node. * The index must have a green health status. To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation. The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. A shrink operation: * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html | Elasticsearch API documentation} */ async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2018,7 +2018,7 @@ export default class Indices { } /** - * Splits an existing index into a new index with more primary shards. + * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2055,7 +2055,7 @@ export default class Indices { } /** - * Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices. By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2102,7 +2102,7 @@ export default class Indices { } /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unfreeze-index-api.html | Elasticsearch API documentation} */ async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 6eaea8d28..195cf46a2 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -172,7 +172,7 @@ export default class Inference { } /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 4b7f89de0..6999e2064 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -76,22 +76,22 @@ export default class Ingest { } /** - * Deletes an ip location database configuration + * Delete IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-ip-location-database-api.html | Elasticsearch API documentation} */ - async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async deleteIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -210,13 +210,13 @@ export default class Ingest { } /** - * Returns the specified ip location database configuration + * Get IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ip-location-database-api.html | Elasticsearch API documentation} */ - async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -226,6 +226,7 @@ export default class Ingest { if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -319,7 +320,7 @@ export default class Ingest { } /** - * Create or update GeoIP database configurations. Create or update IP geolocation database configurations. + * Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-geoip-database-api.html | Elasticsearch API documentation} */ async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -355,22 +356,26 @@ export default class Ingest { } /** - * Puts the configuration for a ip location database to be downloaded + * Create or update an IP geolocation database configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-ip-location-database-api.html | Elasticsearch API documentation} */ - async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async putIpLocationDatabase (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['configuration'] const querystring: Record = {} - const body = undefined + let body: any - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index cd7c5a4a9..9f2e8c627 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -44,7 +44,7 @@ export default class License { } /** - * Deletes licensing information for the cluster + * Delete the license. When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html | Elasticsearch API documentation} */ async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -74,7 +74,7 @@ export default class License { } /** - * Get license information. Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -104,7 +104,7 @@ export default class License { } /** - * Retrieves information about the status of the basic license. + * Get the basic license status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html | Elasticsearch API documentation} */ async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -134,7 +134,7 @@ export default class License { } /** - * Retrieves information about the status of the trial license. + * Get the trial status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html | Elasticsearch API documentation} */ async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -164,7 +164,7 @@ export default class License { } /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html | Elasticsearch API documentation} */ async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -198,7 +198,7 @@ export default class License { } /** - * The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + * Start a basic license. Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html | Elasticsearch API documentation} */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -228,7 +228,7 @@ export default class License { } /** - * The start trial API enables you to start a 30-day trial, which gives access to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription features. NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html | Elasticsearch API documentation} */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 9b3434506..79f3556fb 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -44,7 +44,7 @@ export default class Logstash { } /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Logstash { } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-get-pipeline.html | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -116,7 +116,7 @@ export default class Logstash { } /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-put-pipeline.html | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 5fc747195..4b7d54cc5 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -44,7 +44,7 @@ export default class Migration { } /** - * Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-deprecation.html | Elasticsearch API documentation} */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -84,7 +84,7 @@ export default class Migration { } /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -114,7 +114,7 @@ export default class Migration { } /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 82419d6e1..97c9d7f9b 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1488,7 +1488,7 @@ export default class Ml { } /** - * Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. + * Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1791,7 +1791,7 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source', 'headers', 'version'] + const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] const querystring: Record = {} const body: Record = {} @@ -1827,7 +1827,7 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] const querystring: Record = {} const body: Record = {} @@ -1898,8 +1898,8 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} const body: Record = {} @@ -2618,7 +2618,7 @@ export default class Ml { } /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} */ async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2652,7 +2652,7 @@ export default class Ml { } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} */ async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index a33e18e3a..70360e38e 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -44,7 +44,7 @@ export default class Monitoring { } /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send monitoring data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html | Elasticsearch API documentation} */ async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 1b2864192..4eebd5bd9 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -44,7 +44,7 @@ export default class Rollup { } /** - * Deletes an existing rollup job. + * Delete a rollup job. A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Rollup { } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of rollup jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -116,7 +116,7 @@ export default class Rollup { } /** - * Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: 1. Does this index have associated rollup data somewhere in the cluster? 2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html | Elasticsearch API documentation} */ async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -156,7 +156,7 @@ export default class Rollup { } /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). + * Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html | Elasticsearch API documentation} */ async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -188,7 +188,7 @@ export default class Rollup { } /** - * Creates a rollup job. + * Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html | Elasticsearch API documentation} */ async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -224,7 +224,7 @@ export default class Rollup { } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -260,7 +260,7 @@ export default class Rollup { } /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html | Elasticsearch API documentation} */ async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -292,7 +292,7 @@ export default class Rollup { } /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html | Elasticsearch API documentation} */ async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index 72476e0a1..3a9818c07 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -180,7 +180,7 @@ export default class SearchApplication { } /** - * Returns the existing search applications. + * Get search applications. Get information about search applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html | Elasticsearch API documentation} */ async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -210,22 +210,26 @@ export default class SearchApplication { } /** - * Creates a behavioral analytics event for existing collection. - * @see {@link http://todo.com/tbd | Elasticsearch API documentation} + * Create a behavioral analytics collection event. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-analytics-collection-event.html | Elasticsearch API documentation} */ - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['collection_name', 'event_type'] + const acceptedBody: string[] = ['payload'] const querystring: Record = {} - const body = undefined + let body: any - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -311,22 +315,26 @@ export default class SearchApplication { } /** - * Renders a query for given search application search parameters + * Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html | Elasticsearch API documentation} */ - async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async renderQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['params'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 0ddbce496..d7ba14581 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -44,7 +44,7 @@ export default class SearchableSnapshots { } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially mounted indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -84,7 +84,7 @@ export default class SearchableSnapshots { } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -124,7 +124,7 @@ export default class SearchableSnapshots { } /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html | Elasticsearch API documentation} */ async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -161,7 +161,7 @@ export default class SearchableSnapshots { } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 70b0a80b7..d75a50c86 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1471,22 +1471,26 @@ export default class Security { } /** - * Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-authenticate.html | Elasticsearch API documentation} */ - async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async oidcAuthenticate (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -1500,22 +1504,26 @@ export default class Security { } /** - * Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API + * Logout of OpenID Connect. Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-logout.html | Elasticsearch API documentation} */ - async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async oidcLogout (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['access_token', 'refresh_token'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -1529,22 +1537,27 @@ export default class Security { } /** - * Creates an OAuth 2.0 authentication request as a URL string + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-prepare-authentication.html | Elasticsearch API documentation} */ - async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async oidcPrepareAuthentication (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] const querystring: Record = {} - const body = undefined + const body: Record = {} params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 030c6678b..e1471befa 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -44,7 +44,7 @@ export default class Shutdown { } /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Shutdown { } /** - * Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -116,7 +116,7 @@ export default class Shutdown { } /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index c940bafc2..7dd058206 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -44,7 +44,7 @@ export default class Slm { } /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-delete-policy.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Slm { } /** - * Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-lifecycle.html | Elasticsearch API documentation} */ async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -108,7 +108,7 @@ export default class Slm { } /** - * Deletes any snapshots that are expired according to the policy's retention rules. + * Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-retention.html | Elasticsearch API documentation} */ async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -138,7 +138,7 @@ export default class Slm { } /** - * Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-policy.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -178,7 +178,7 @@ export default class Slm { } /** - * Returns global and policy-level statistics about actions taken by snapshot lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html | Elasticsearch API documentation} */ async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -208,7 +208,7 @@ export default class Slm { } /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-status.html | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -238,7 +238,7 @@ export default class Slm { } /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-put-policy.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -274,7 +274,7 @@ export default class Slm { } /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -304,7 +304,7 @@ export default class Slm { } /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 377c1d252..f688c32be 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -44,7 +44,7 @@ export default class Snapshot { } /** - * Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html | Elasticsearch API documentation} */ async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Snapshot { } /** - * Clones indices from one snapshot into another snapshot in the same repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -114,7 +114,7 @@ export default class Snapshot { } /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -151,7 +151,7 @@ export default class Snapshot { } /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -187,7 +187,7 @@ export default class Snapshot { } /** - * Deletes one or more snapshots. + * Delete snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -220,7 +220,7 @@ export default class Snapshot { } /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -252,7 +252,7 @@ export default class Snapshot { } /** - * Returns information about a snapshot. + * Get snapshot information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -285,7 +285,7 @@ export default class Snapshot { } /** - * Returns information about a repository. + * Get snapshot repository information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -357,7 +357,7 @@ export default class Snapshot { } /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -389,7 +389,7 @@ export default class Snapshot { } /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -426,7 +426,7 @@ export default class Snapshot { } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -470,7 +470,7 @@ export default class Snapshot { } /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index 1c931bc6b..51ec2731d 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -44,7 +44,7 @@ export default class Tasks { } /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -84,7 +84,7 @@ export default class Tasks { } /** - * Get task information. Returns information about the tasks currently executing in the cluster. + * Get task information. Get information about a task currently running in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -116,7 +116,7 @@ export default class Tasks { } /** - * The task management API returns information about tasks currently executing on one or more nodes in the cluster. + * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 6efaaaf27..395613b49 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -44,22 +44,22 @@ export default class TextStructure { } /** - * Finds the structure of a text field in an index. + * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-field-structure.html | Elasticsearch API documentation} */ - async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async findFieldStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -73,22 +73,26 @@ export default class TextStructure { } /** - * Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. + * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-message-structure.html | Elasticsearch API documentation} */ - async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async findMessageStructure (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['messages'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -102,7 +106,7 @@ export default class TextStructure { } /** - * Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. + * Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: * A couple of messages from the beginning of the text. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-structure.html | Elasticsearch API documentation} */ async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -135,7 +139,7 @@ export default class TextStructure { } /** - * Tests a Grok pattern on some text. + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/test-grok-pattern.html | Elasticsearch API documentation} */ async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 9b56e861d..ffbaec1f5 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -421,7 +421,7 @@ export default class Transform { } /** - * Upgrades all transforms. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + * Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/upgrade-transforms.html | Elasticsearch API documentation} */ async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 324ab9edd..c76b4c4a9 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -44,7 +44,7 @@ export default class Watcher { } /** - * Acknowledges a watch, manually throttling the execution of the watch's actions. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-ack-watch.html | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -84,7 +84,7 @@ export default class Watcher { } /** - * Activates a currently inactive watch. + * Activate a watch. A watch can be either active or inactive. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-activate-watch.html | Elasticsearch API documentation} */ async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -116,7 +116,7 @@ export default class Watcher { } /** - * Deactivates a currently active watch. + * Deactivate a watch. A watch can be either active or inactive. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-deactivate-watch.html | Elasticsearch API documentation} */ async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -148,7 +148,7 @@ export default class Watcher { } /** - * Removes a watch from Watcher. + * Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-delete-watch.html | Elasticsearch API documentation} */ async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -180,7 +180,7 @@ export default class Watcher { } /** - * This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. + * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-execute-watch.html | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -253,7 +253,7 @@ export default class Watcher { } /** - * Retrieves a watch by its ID. + * Get a watch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-watch.html | Elasticsearch API documentation} */ async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -285,7 +285,7 @@ export default class Watcher { } /** - * Creates a new watch, or updates an existing one. + * Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. When you add a watch you can also define its initial active state by setting the *active* parameter. When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-put-watch.html | Elasticsearch API documentation} */ async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -321,7 +321,7 @@ export default class Watcher { } /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-query-watches.html | Elasticsearch API documentation} */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -355,7 +355,7 @@ export default class Watcher { } /** - * Starts Watcher if it is not already running. + * Start the watch service. Start the Watcher service if it is not already running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -385,7 +385,7 @@ export default class Watcher { } /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -425,7 +425,7 @@ export default class Watcher { } /** - * Stops Watcher if it is running. + * Stop the watch service. Stop the Watcher service if it is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index e8ad16753..b57d2d754 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -44,7 +44,7 @@ export default class Xpack { } /** - * Provides general information about the installed X-Pack features. + * Get information. The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/info-api.html | Elasticsearch API documentation} */ async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -74,7 +74,7 @@ export default class Xpack { } /** - * This API provides information about which features are currently enabled and available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/usage-api.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index fbe66a62f..b75242334 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -58,6 +58,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { index?: IndexName + list_executed_pipelines?: boolean pipeline?: string refresh?: Refresh routing?: Routing @@ -67,6 +68,7 @@ export interface BulkRequest ex timeout?: Duration wait_for_active_shards?: WaitForActiveShards require_alias?: boolean + require_data_stream?: boolean operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } @@ -5452,7 +5454,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'passthrough' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5629,6 +5631,13 @@ export interface MappingObjectProperty extends MappingCorePropertyBase { export type MappingOnScriptError = 'fail' | 'continue' +export interface MappingPassthroughObjectProperty extends MappingCorePropertyBase { + type?: 'passthrough' + enabled?: boolean + priority?: integer + time_series_dimension?: boolean +} + export interface MappingPercolatorProperty extends MappingPropertyBase { type: 'percolator' } @@ -5640,7 +5649,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { meta?: Record @@ -6863,6 +6872,7 @@ export interface CatAliasesAliasesRecord { export interface CatAliasesRequest extends CatCatRequestBase { name?: Names expand_wildcards?: ExpandWildcards + master_timeout?: Duration } export type CatAliasesResponse = CatAliasesAliasesRecord[] @@ -6907,6 +6917,7 @@ export interface CatAllocationRequest extends CatCatRequestBase { node_id?: NodeIds bytes?: Bytes local?: boolean + master_timeout?: Duration } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -6924,6 +6935,7 @@ export interface CatComponentTemplatesComponentTemplate { export interface CatComponentTemplatesRequest extends CatCatRequestBase { name?: string local?: boolean + master_timeout?: Duration } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] @@ -7027,15 +7039,12 @@ export interface CatHealthRequest extends CatCatRequestBase { export type CatHealthResponse = CatHealthHealthRecord[] -export interface CatHelpHelpRecord { - endpoint: string +export interface CatHelpRequest { } -export interface CatHelpRequest extends CatCatRequestBase { +export interface CatHelpResponse { } -export type CatHelpResponse = CatHelpHelpRecord[] - export interface CatIndicesIndicesRecord { health?: string h?: string @@ -7335,6 +7344,7 @@ export interface CatIndicesRequest extends CatCatRequestBase { include_unloaded_segments?: boolean pri?: boolean time?: TimeUnit + master_timeout?: Duration } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -7350,6 +7360,7 @@ export interface CatMasterMasterRecord { export interface CatMasterRequest extends CatCatRequestBase { local?: boolean + master_timeout?: Duration } export type CatMasterResponse = CatMasterMasterRecord[] @@ -7404,7 +7415,7 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { bytes?: Bytes h?: CatCatDfaColumns s?: CatCatDfaColumns - time?: Duration + time?: TimeUnit } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -7650,6 +7661,7 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { s?: CatCatTrainedModelsColumns from?: integer size?: integer + time?: TimeUnit } export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] @@ -7718,6 +7730,7 @@ export interface CatNodeattrsNodeAttributesRecord { export interface CatNodeattrsRequest extends CatCatRequestBase { local?: boolean + master_timeout?: Duration } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] @@ -7996,6 +8009,8 @@ export interface CatNodesRequest extends CatCatRequestBase { bytes?: Bytes full_id?: boolean | string include_unloaded_segments?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatNodesResponse = CatNodesNodesRecord[] @@ -8013,6 +8028,8 @@ export interface CatPendingTasksPendingTasksRecord { export interface CatPendingTasksRequest extends CatCatRequestBase { local?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -8032,7 +8049,9 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { + include_bootstrap?: boolean local?: boolean + master_timeout?: Duration } export type CatPluginsResponse = CatPluginsPluginsRecord[] @@ -8100,6 +8119,7 @@ export interface CatRecoveryRequest extends CatCatRequestBase { active_only?: boolean bytes?: Bytes detailed?: boolean + time?: TimeUnit } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] @@ -8112,6 +8132,8 @@ export interface CatRepositoriesRepositoriesRecord { } export interface CatRepositoriesRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration } export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] @@ -8120,6 +8142,7 @@ export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes local?: boolean + master_timeout?: Duration } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -8169,6 +8192,8 @@ export interface CatSegmentsSegmentsRecord { export interface CatShardsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes + master_timeout?: Duration + time?: TimeUnit } export type CatShardsResponse = CatShardsShardsRecord[] @@ -8391,6 +8416,8 @@ export interface CatShardsShardsRecord { export interface CatSnapshotsRequest extends CatCatRequestBase { repository?: Names ignore_unavailable?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] @@ -8432,8 +8459,11 @@ export interface CatSnapshotsSnapshotsRecord { export interface CatTasksRequest extends CatCatRequestBase { actions?: string[] detailed?: boolean - node_id?: string[] + nodes?: string[] parent_task_id?: string + time?: TimeUnit + timeout?: Duration + wait_for_completion?: boolean } export type CatTasksResponse = CatTasksTasksRecord[] @@ -8476,6 +8506,7 @@ export interface CatTasksTasksRecord { export interface CatTemplatesRequest extends CatCatRequestBase { name?: Name local?: boolean + master_timeout?: Duration } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] @@ -8498,6 +8529,7 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names time?: TimeUnit local?: boolean + master_timeout?: Duration } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -9876,12 +9908,36 @@ export interface ConnectorSyncJobCancelResponse { result: Result } +export interface ConnectorSyncJobCheckInRequest extends RequestBase { + connector_sync_job_id: Id +} + +export interface ConnectorSyncJobCheckInResponse { +} + +export interface ConnectorSyncJobClaimRequest extends RequestBase { + connector_sync_job_id: Id + sync_cursor?: any + worker_hostname: string +} + +export interface ConnectorSyncJobClaimResponse { +} + export interface ConnectorSyncJobDeleteRequest extends RequestBase { connector_sync_job_id: Id } export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase +export interface ConnectorSyncJobErrorRequest extends RequestBase { + connector_sync_job_id: Id + error: string +} + +export interface ConnectorSyncJobErrorResponse { +} + export interface ConnectorSyncJobGetRequest extends RequestBase { connector_sync_job_id: Id } @@ -9948,6 +10004,15 @@ export interface ConnectorUpdateErrorResponse { result: Result } +export interface ConnectorUpdateFeaturesRequest extends RequestBase { + connector_id: Id + features: ConnectorConnectorFeatures +} + +export interface ConnectorUpdateFeaturesResponse { + result: Result +} + export interface ConnectorUpdateFilteringRequest extends RequestBase { connector_id: Id filtering?: ConnectorFilteringConfig[] @@ -10164,6 +10229,7 @@ export interface EqlEqlSearchResponseBase { took?: DurationValue timed_out?: boolean hits: EqlEqlHits + shard_failures?: ShardFailure[] } export interface EqlHitsEvent { @@ -10221,6 +10287,8 @@ export interface EqlSearchRequest extends RequestBase { keep_alive?: Duration keep_on_completion?: boolean wait_for_completion_timeout?: Duration + allow_partial_search_results?: boolean + allow_partial_sequence_results?: boolean size?: uint fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition @@ -12675,7 +12743,16 @@ export interface IngestCsvProcessor extends IngestProcessorBase { export interface IngestDatabaseConfiguration { name: Name - maxmind: IngestMaxmind + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo +} + +export interface IngestDatabaseConfigurationFull { + web?: IngestWeb + local?: IngestLocal + name: Name + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { @@ -12827,6 +12904,9 @@ export interface IngestIpLocationProcessor extends IngestProcessorBase { download_database_on_pipeline_creation?: boolean } +export interface IngestIpinfo { +} + export interface IngestJoinProcessor extends IngestProcessorBase { field: Field separator: string @@ -12857,6 +12937,10 @@ export interface IngestKeyValueProcessor extends IngestProcessorBase { value_split: string } +export interface IngestLocal { + type: string +} + export interface IngestLowercaseProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean @@ -13064,6 +13148,9 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase { export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' +export interface IngestWeb { +} + export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { id: Ids master_timeout?: Duration @@ -13072,6 +13159,14 @@ export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase +export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { + id: Ids + master_timeout?: Duration + timeout?: Duration +} + +export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase + export interface IngestDeletePipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -13122,6 +13217,23 @@ export interface IngestGetGeoipDatabaseResponse { databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] } +export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { + id: Id + version: VersionNumber + modified_date_millis?: EpochTime + modified_date?: EpochTime + database: IngestDatabaseConfigurationFull +} + +export interface IngestGetIpLocationDatabaseRequest extends RequestBase { + id?: Ids + master_timeout?: Duration +} + +export interface IngestGetIpLocationDatabaseResponse { + databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[] +} + export interface IngestGetPipelineRequest extends RequestBase { id?: Id master_timeout?: Duration @@ -13147,6 +13259,15 @@ export interface IngestPutGeoipDatabaseRequest extends RequestBase { export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase +export interface IngestPutIpLocationDatabaseRequest extends RequestBase { + id: Id + master_timeout?: Duration + timeout?: Duration + configuration?: IngestDatabaseConfiguration +} + +export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase + export interface IngestPutPipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -13321,10 +13442,10 @@ export interface LicensePostStartTrialResponse { export interface LogstashPipeline { description: string last_modified: DateTime - pipeline_metadata: LogstashPipelineMetadata - username: string pipeline: string + pipeline_metadata: LogstashPipelineMetadata pipeline_settings: LogstashPipelineSettings + username: string } export interface LogstashPipelineMetadata { @@ -13419,6 +13540,12 @@ export interface MigrationPostFeatureUpgradeResponse { features: MigrationPostFeatureUpgradeMigrationFeature[] } +export interface MlAdaptiveAllocationsSettings { + enabled: boolean + min_number_of_allocations?: integer + max_number_of_allocations?: integer +} + export interface MlAnalysisConfig { bucket_span?: Duration categorization_analyzer?: MlCategorizationAnalyzer @@ -13449,7 +13576,7 @@ export interface MlAnalysisConfigRead { export interface MlAnalysisLimits { categorization_examples_limit?: long - model_memory_limit?: string + model_memory_limit?: ByteSize } export interface MlAnalysisMemoryLimit { @@ -13601,6 +13728,14 @@ export interface MlClassificationInferenceOptions { top_classes_results_field?: string } +export interface MlCommonTokenizationConfig { + do_lower_case?: boolean + max_sequence_length?: integer + span?: integer + truncate?: MlTokenizationTruncate + with_special_tokens?: boolean +} + export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' export type MlCustomSettings = any @@ -13690,15 +13825,16 @@ export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { assignment_explanation?: string datafeed_id: Id - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact state: MlDatafeedState - timing_stats: MlDatafeedTimingStats + timing_stats?: MlDatafeedTimingStats running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { bucket_count: long exponential_average_search_time_per_hour_ms: DurationValue + exponential_average_calculation_context?: MlExponentialAverageCalculationContext job_id: Id search_count: long total_search_time_ms: DurationValue @@ -13890,6 +14026,7 @@ export interface MlDataframeAnalyticsSummary { model_memory_limit?: string source: MlDataframeAnalyticsSource version?: VersionString + _meta?: Metadata } export interface MlDataframeEvaluationClassification { @@ -13995,21 +14132,48 @@ export interface MlDetectorRead { use_null?: boolean } -export interface MlDiscoveryNode { - attributes: Record +export interface MlDetectorUpdate { + detector_index: integer + description?: string + custom_rules?: MlDetectionRule[] +} + +export type MlDiscoveryNode = Partial> + +export interface MlDiscoveryNodeCompact { + name: Name ephemeral_id: Id id: Id - name: Name transport_address: TransportAddress + attributes: Record +} + +export interface MlDiscoveryNodeContent { + name?: Name + ephemeral_id: Id + transport_address: TransportAddress + external_id: string + attributes: Record + roles: string[] + version: VersionString + min_index_version: integer + max_index_version: integer } export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' +export interface MlExponentialAverageCalculationContext { + incremental_metric_value_ms: DurationValue + latest_timestamp?: EpochTime + previous_exponential_average_ms?: DurationValue +} + export interface MlFillMaskInferenceOptions { mask_token?: string num_top_classes?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { @@ -14197,7 +14361,7 @@ export interface MlJobStats { forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact open_time?: DateTime state: MlJobState timing_stats: MlJobTimingStats @@ -14217,6 +14381,23 @@ export interface MlJobTimingStats { export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' +export interface MlModelPackageConfig { + create_time?: EpochTime + description?: string + inference_config?: Record + metadata?: Metadata + minimum_version?: string + model_repository?: string + model_type?: string + packaged_model_id: Id + platform_architecture?: string + prefix_strings?: MlTrainedModelPrefixStrings + size?: ByteSize + sha256?: string + tags?: string[] + vocabulary_file?: string +} + export interface MlModelPlotConfig { annotations_enabled?: boolean enabled?: boolean @@ -14231,6 +14412,7 @@ export interface MlModelSizeStats { model_bytes: ByteSize model_bytes_exceeded?: ByteSize model_bytes_memory_limit?: ByteSize + output_memory_allocator_bytes?: ByteSize peak_model_bytes?: ByteSize assignment_memory_basis?: string result_type: string @@ -14280,20 +14462,11 @@ export interface MlNerInferenceUpdateOptions { results_field?: string } -export interface MlNlpBertTokenizationConfig { - do_lower_case?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer +export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig { } -export interface MlNlpRobertaTokenizationConfig { +export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { add_prefix_space?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer } export interface MlNlpTokenizationUpdateOptions { @@ -14317,7 +14490,7 @@ export interface MlOverallBucket { overall_score: double result_type: string timestamp: EpochTime - timestamp_string: DateTime + timestamp_string?: DateTime } export interface MlOverallBucketJob { @@ -14405,6 +14578,7 @@ export interface MlTextEmbeddingInferenceOptions { embedding_size?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { @@ -14415,6 +14589,7 @@ export interface MlTextEmbeddingInferenceUpdateOptions { export interface MlTextExpansionInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { @@ -14429,6 +14604,7 @@ export interface MlTimingStats { export interface MlTokenizationConfigContainer { bert?: MlNlpBertTokenizationConfig + bert_ja?: MlNlpBertTokenizationConfig mpnet?: MlNlpBertTokenizationConfig roberta?: MlNlpRobertaTokenizationConfig } @@ -14459,27 +14635,31 @@ export interface MlTotalFeatureImportanceStatistics { } export interface MlTrainedModelAssignment { + adaptive_allocations?: MlAdaptiveAllocationsSettings | null assignment_state: MlDeploymentAssignmentState max_assigned_allocations?: integer + reason?: string routing_table: Record start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters } export interface MlTrainedModelAssignmentRoutingTable { - reason: string + reason?: string routing_state: MlRoutingState current_allocations: integer target_allocations: integer } export interface MlTrainedModelAssignmentTaskParameters { - model_bytes: integer + model_bytes: ByteSize model_id: Id deployment_id: Id - cache_size: ByteSize + cache_size?: ByteSize number_of_allocations: integer priority: MlTrainingPriority + per_deployment_memory_bytes: ByteSize + per_allocation_memory_bytes: ByteSize queue_capacity: integer threads_per_allocation: integer } @@ -14502,6 +14682,7 @@ export interface MlTrainedModelConfig { license_level?: string metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize + model_package?: MlModelPackageConfig location?: MlTrainedModelLocation prefix_strings?: MlTrainedModelPrefixStrings } @@ -14524,36 +14705,45 @@ export interface MlTrainedModelDeploymentAllocationStatus { } export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms: DurationValue - error_count: integer - inference_count: integer - last_access: long - node: MlDiscoveryNode - number_of_allocations: integer - number_of_pending_requests: integer - rejection_execution_count: integer + average_inference_time_ms?: DurationValue + average_inference_time_ms_last_minute?: DurationValue + average_inference_time_ms_excluding_cache_hits?: DurationValue + error_count?: integer + inference_count?: long + inference_cache_hit_count?: long + inference_cache_hit_count_last_minute?: long + last_access?: EpochTime + node?: MlDiscoveryNode + number_of_allocations?: integer + number_of_pending_requests?: integer + peak_throughput_per_minute: long + rejection_execution_count?: integer routing_state: MlTrainedModelAssignmentRoutingTable - start_time: EpochTime - threads_per_allocation: integer - timeout_count: integer + start_time?: EpochTime + threads_per_allocation?: integer + throughput_last_minute: integer + timeout_count?: integer } export interface MlTrainedModelDeploymentStats { - allocation_status: MlTrainedModelDeploymentAllocationStatus + adaptive_allocations?: MlAdaptiveAllocationsSettings + allocation_status?: MlTrainedModelDeploymentAllocationStatus cache_size?: ByteSize deployment_id: Id - error_count: integer - inference_count: integer + error_count?: integer + inference_count?: integer model_id: Id nodes: MlTrainedModelDeploymentNodesStats[] - number_of_allocations: integer - queue_capacity: integer - rejected_execution_count: integer - reason: string + number_of_allocations?: integer + peak_throughput_per_minute: long + priority: MlTrainingPriority + queue_capacity?: integer + rejected_execution_count?: integer + reason?: string start_time: EpochTime - state: MlDeploymentAssignmentState - threads_per_allocation: integer - timeout_count: integer + state?: MlDeploymentAssignmentState + threads_per_allocation?: integer + timeout_count?: integer } export interface MlTrainedModelEntities { @@ -15187,6 +15377,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase { exclude_generated?: boolean from?: integer include?: MlInclude + include_model_definition?: boolean size?: integer tags?: string | string[] } @@ -15237,9 +15428,11 @@ export interface MlInfoDefaults { } export interface MlInfoLimits { - max_model_memory_limit?: string - effective_max_model_memory_limit: string - total_ml_memory: string + max_single_ml_node_processors?: integer + total_ml_processors?: integer + max_model_memory_limit?: ByteSize + effective_max_model_memory_limit?: ByteSize + total_ml_memory: ByteSize } export interface MlInfoNativeCode { @@ -15284,21 +15477,24 @@ export interface MlPostDataRequest extends RequestBase { } export interface MlPostDataResponse { - bucket_count: long - earliest_record_timestamp: long - empty_bucket_count: long + job_id: Id + processed_record_count: long + processed_field_count: long input_bytes: long input_field_count: long - input_record_count: long invalid_date_count: long - job_id: Id - last_data_time: integer - latest_record_timestamp: long missing_field_count: long out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long + empty_bucket_count: long sparse_bucket_count: long + bucket_count: long + earliest_record_timestamp?: EpochTime + latest_record_timestamp?: EpochTime + last_data_time?: EpochTime + latest_empty_bucket_timestamp?: EpochTime + latest_sparse_bucket_timestamp?: EpochTime + input_record_count: long + log_time?: EpochTime } export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { @@ -15359,6 +15555,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { description?: string dest: MlDataframeAnalyticsDestination max_num_threads?: integer + _meta?: Metadata model_memory_limit?: string source: MlDataframeAnalyticsSource headers?: HttpHeaders @@ -15375,6 +15572,7 @@ export interface MlPutDataFrameAnalyticsResponse { dest: MlDataframeAnalyticsDestination id: Id max_num_threads: integer + _meta?: Metadata model_memory_limit: string source: MlDataframeAnalyticsSource version: VersionString @@ -15387,6 +15585,8 @@ export interface MlPutDatafeedRequest extends RequestBase { ignore_throttled?: boolean ignore_unavailable?: boolean aggregations?: Record + /** @alias aggregations */ + aggs?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration @@ -15436,6 +15636,10 @@ export interface MlPutFilterResponse { export interface MlPutJobRequest extends RequestBase { job_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean allow_lazy_open?: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits @@ -15786,7 +15990,7 @@ export interface MlUpdateJobRequest extends RequestBase { renormalization_window_days?: long results_retention_days?: long groups?: string[] - detectors?: MlDetector[] + detectors?: MlDetectorUpdate[] per_partition_categorization?: MlPerPartitionCategorization } @@ -17134,6 +17338,8 @@ export interface SearchApplicationEventDataStream { name: IndexName } +export type SearchApplicationEventType = 'page_view' | 'search' | 'search_click' + export interface SearchApplicationSearchApplication extends SearchApplicationSearchApplicationParameters { name: Name updated_at_millis: EpochTime @@ -17184,6 +17390,18 @@ export interface SearchApplicationListResponse { results: SearchApplicationSearchApplication[] } +export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { + collection_name: Name + event_type: SearchApplicationEventType + debug?: boolean + payload?: any +} + +export interface SearchApplicationPostBehavioralAnalyticsEventResponse { + accepted: boolean + event?: any +} + export interface SearchApplicationPutRequest extends RequestBase { name: Name create?: boolean @@ -17204,6 +17422,14 @@ export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestB export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase +export interface SearchApplicationRenderQueryRequest extends RequestBase { + name: Name + params?: Record +} + +export interface SearchApplicationRenderQueryResponse { +} + export interface SearchApplicationSearchRequest extends RequestBase { name: Name typed_keys?: boolean @@ -18064,6 +18290,44 @@ export interface SecurityInvalidateTokenResponse { previously_invalidated_tokens: long } +export interface SecurityOidcAuthenticateRequest extends RequestBase { + nonce: string + realm?: string + redirect_uri: string + state: string +} + +export interface SecurityOidcAuthenticateResponse { + access_token: string + expires_in: integer + refresh_token: string + type: string +} + +export interface SecurityOidcLogoutRequest extends RequestBase { + access_token: string + refresh_token?: string +} + +export interface SecurityOidcLogoutResponse { + redirect: string +} + +export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { + iss?: string + login_hint?: string + nonce?: string + realm?: string + state?: string +} + +export interface SecurityOidcPrepareAuthenticationResponse { + nonce: string + realm: string + redirect: string + state: string +} + export interface SecurityPutPrivilegesActions { actions: string[] application?: string @@ -19213,10 +19477,12 @@ export interface TasksListRequest extends RequestBase { export type TasksListResponse = TasksTaskListResponseBase -export interface TextStructureFindStructureFieldStat { +export type TextStructureEcsCompatibilityType = 'disabled' | 'v1' + +export interface TextStructureFieldStat { count: integer cardinality: integer - top_hits: TextStructureFindStructureTopHit[] + top_hits: TextStructureTopHit[] mean_value?: integer median_value?: integer max_value?: integer @@ -19225,6 +19491,81 @@ export interface TextStructureFindStructureFieldStat { latest?: string } +export type TextStructureFormatType = 'delimited' | 'ndjson' | 'semi_structured_text' | 'xml' + +export interface TextStructureTopHit { + count: long + value: any +} + +export interface TextStructureFindFieldStructureRequest extends RequestBase { + column_names?: string + delimiter?: string + documents_to_sample?: uint + ecs_compatibility?: TextStructureEcsCompatibilityType + explain?: boolean + field: Field + format?: TextStructureFormatType + grok_pattern?: GrokPattern + index: IndexName + quote?: string + should_trim_fields?: boolean + timeout?: Duration + timestamp_field?: Field + timestamp_format?: string +} + +export interface TextStructureFindFieldStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + +export interface TextStructureFindMessageStructureRequest extends RequestBase { + column_names?: string + delimiter?: string + ecs_compatibility?: TextStructureEcsCompatibilityType + explain?: boolean + format?: TextStructureFormatType + grok_pattern?: GrokPattern + quote?: string + should_trim_fields?: boolean + timeout?: Duration + timestamp_field?: Field + timestamp_format?: string + messages: string[] +} + +export interface TextStructureFindMessageStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + export interface TextStructureFindStructureRequest { charset?: string column_names?: string @@ -19249,7 +19590,7 @@ export interface TextStructureFindStructureResponse { has_header_row?: boolean has_byte_order_marker: boolean format: string - field_stats: Record + field_stats: Record sample_start: string num_messages_analyzed: integer mappings: MappingTypeMapping @@ -19269,11 +19610,6 @@ export interface TextStructureFindStructureResponse { ingest_pipeline: IngestPipelineConfig } -export interface TextStructureFindStructureTopHit { - count: long - value: any -} - export interface TextStructureTestGrokPatternMatchedField { match: string offset: integer @@ -20777,7 +21113,6 @@ export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names help?: boolean - master_timeout?: Duration s?: Names v?: boolean } From 2b2a9947c7200dfc8ae14528f7e24d744f6c5948 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:21:31 -0600 Subject: [PATCH 453/647] Update dependency @types/node to v22.10.5 (#2547) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index baaae155f..d94e67408 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "github:sinonjs/fake-timers#48f089f", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.10.1", + "@types/node": "22.10.5", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 63104b5e5e6d4b98e405c649ca6ddbe7994369c9 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 13 Jan 2025 15:44:45 +0000 Subject: [PATCH 454/647] Auto-generated code for main (#2566) --- .../41d24383d29b2808a65258a0a3256e96.asciidoc | 18 + .../5836b09198feb1269ed12839b416123d.asciidoc | 16 + .../790684b45bef2bb848ea932f0fd0cfbd.asciidoc | 35 + .../9250ac57ec81d5192e8ad4c462438489.asciidoc | 42 + .../bf3c3bc41c593a80faebef1df353e483.asciidoc | 22 + .../cdb7613b445e6ed6e8b473f9cae1af90.asciidoc | 35 + .../d3672a87a857ddb87519788236e57497.asciidoc | 28 + .../fff86117c47f974074284644e8a97a99.asciidoc | 18 + docs/reference.asciidoc | 1573 +++++++++++++---- src/api/api/cluster.ts | 6 +- src/api/api/connector.ts | 20 +- src/api/api/dangling_indices.ts | 6 +- src/api/api/indices.ts | 91 +- src/api/api/info.ts | 4 +- src/api/api/logstash.ts | 2 +- src/api/api/migration.ts | 8 +- src/api/api/ml.ts | 6 +- src/api/api/query_rules.ts | 8 +- src/api/api/rollup.ts | 4 +- src/api/api/searchable_snapshots.ts | 6 +- src/api/api/security.ts | 83 +- src/api/api/shutdown.ts | 8 +- src/api/api/simulate.ts | 20 +- src/api/api/snapshot.ts | 32 +- src/api/api/sql.ts | 8 +- src/api/api/synonyms.ts | 8 +- src/api/api/text_structure.ts | 4 +- src/api/api/watcher.ts | 40 +- src/api/types.ts | 363 +++- 29 files changed, 1978 insertions(+), 536 deletions(-) create mode 100644 docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc create mode 100644 docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc create mode 100644 docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc create mode 100644 docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc create mode 100644 docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc create mode 100644 docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc create mode 100644 docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc create mode 100644 docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc diff --git a/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc b/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc new file mode 100644 index 000000000..67b3c97a4 --- /dev/null +++ b/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "jinaai-index", + mappings: { + properties: { + content: { + type: "semantic_text", + inference_id: "jinaai-embeddings", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc b/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc new file mode 100644 index 000000000..12ea79855 --- /dev/null +++ b/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "jinaai-index", + query: { + semantic: { + field: "content", + query: "who inspired taking care of the sea?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc b/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc new file mode 100644 index 000000000..d5144150f --- /dev/null +++ b/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: false, + max_gaps: 1, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + match: { + query: "cold porridge", + max_gaps: 4, + ordered: true, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc b/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc new file mode 100644 index 000000000..8a6e40755 --- /dev/null +++ b/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "jinaai-index", + operations: [ + { + index: { + _index: "jinaai-index", + _id: "1", + }, + }, + { + content: + "Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades.", + }, + { + index: { + _index: "jinaai-index", + _id: "2", + }, + }, + { + content: + "She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. ", + }, + { + index: { + _index: "jinaai-index", + _id: "3", + }, + }, + { + content: + "Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists.", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc b/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc new file mode 100644 index 000000000..b0b47f121 --- /dev/null +++ b/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "jinaai-rerank", + inference_config: { + service: "jinaai", + service_settings: { + api_key: "", + model_id: "jina-reranker-v2-base-multilingual", + }, + task_settings: { + top_n: 10, + return_documents: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc b/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc new file mode 100644 index 000000000..ee14809f6 --- /dev/null +++ b/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: true, + max_gaps: 1, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + match: { + query: "cold porridge", + max_gaps: 4, + ordered: true, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc b/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc new file mode 100644 index 000000000..dad59f975 --- /dev/null +++ b/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "jinaai-index", + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + semantic: { + field: "content", + query: "who inspired taking care of the sea?", + }, + }, + }, + }, + field: "content", + rank_window_size: 100, + inference_id: "jinaai-rerank", + inference_text: "who inspired taking care of the sea?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc b/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc new file mode 100644 index 000000000..339435f69 --- /dev/null +++ b/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "jinaai-embeddings", + inference_config: { + service: "jinaai", + service_settings: { + model_id: "jina-embeddings-v3", + api_key: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 6a8ad651c..449cc261c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -609,9 +609,9 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] === info Get cluster info. -Returns basic information about the cluster. +Get basic build, version, and cluster information. -{ref}/index.html[Endpoint documentation] +{ref}/rest-api-root.html[Endpoint documentation] [source,ts] ---- client.info() @@ -1269,6 +1269,7 @@ Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. ** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. @@ -2468,6 +2469,7 @@ client.ccr.deleteAutoFollowPattern({ name }) * *Request (object):* ** *`name` (string)*: The name of the auto follow pattern. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== follow @@ -2505,6 +2507,7 @@ be deferred until the total bytes of queued operations goes below the limit. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Settings to override from the leader index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the @@ -2527,6 +2530,7 @@ client.ccr.followInfo({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== follow_stats @@ -2545,6 +2549,7 @@ client.ccr.followStats({ index }) * *Request (object):* ** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== forget_follower @@ -2576,6 +2581,7 @@ client.ccr.forgetFollower({ index }) ** *`follower_index` (Optional, string)* ** *`follower_index_uuid` (Optional, string)* ** *`leader_remote_cluster` (Optional, string)* +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_auto_follow_pattern @@ -2593,6 +2599,7 @@ client.ccr.getAutoFollowPattern({ ... }) * *Request (object):* ** *`name` (Optional, string)*: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== pause_auto_follow_pattern @@ -2616,6 +2623,7 @@ client.ccr.pauseAutoFollowPattern({ name }) * *Request (object):* ** *`name` (string)*: The name of the auto follow pattern that should pause discovering new indices to follow. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== pause_follow @@ -2636,6 +2644,7 @@ client.ccr.pauseFollow({ index }) * *Request (object):* ** *`index` (string)*: The name of the follower index that should pause following its leader index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== put_auto_follow_pattern @@ -2673,6 +2682,7 @@ client.ccr.putAutoFollowPattern({ name, remote_cluster }) ** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. ** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. ** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== resume_auto_follow_pattern @@ -2692,6 +2702,7 @@ client.ccr.resumeAutoFollowPattern({ name }) * *Request (object):* ** *`name` (string)*: The name of the auto follow pattern to resume discovering new indices to follow. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== resume_follow @@ -2722,6 +2733,7 @@ client.ccr.resumeFollow({ index }) ** *`max_write_request_operation_count` (Optional, number)* ** *`max_write_request_size` (Optional, string)* ** *`read_poll_timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== stats @@ -2731,9 +2743,15 @@ This API returns stats about auto-following and the same shard-level stats as th {ref}/ccr-get-stats.html[Endpoint documentation] [source,ts] ---- -client.ccr.stats() +client.ccr.stats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== unfollow @@ -2755,6 +2773,7 @@ client.ccr.unfollow({ index }) * *Request (object):* ** *`index` (string)*: The name of the follower index that should be turned into a regular index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] === cluster @@ -2782,11 +2801,11 @@ client.cluster.allocationExplain({ ... }) ** *`shard` (Optional, number)*: Specifies the ID of the shard that you would like an explanation for. ** *`include_disk_info` (Optional, boolean)*: If true, returns information about disk usage and shard sizes. ** *`include_yes_decisions` (Optional, boolean)*: If true, returns YES decisions in explanation. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== delete_component_template Delete component templates. -Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. {ref}/indices-component-template.html[Endpoint documentation] @@ -2820,6 +2839,7 @@ client.cluster.deleteVotingConfigExclusions({ ... }) ==== Arguments * *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`wait_for_removal` (Optional, boolean)*: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from @@ -2853,7 +2873,7 @@ Defaults to false, which means information is retrieved from the master node. [discrete] ==== get_component_template Get component templates. -Retrieves information about component templates. +Get information about component templates. {ref}/indices-component-template.html[Endpoint documentation] [source,ts] @@ -3008,6 +3028,7 @@ client.cluster.postVotingConfigExclusions({ ... }) voting configuration. If specified, you may not also specify node_ids. ** *`node_ids` (Optional, string | string[])*: A list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`timeout` (Optional, string | -1 | 0)*: When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition @@ -3016,7 +3037,6 @@ is satisfied, the request fails and returns an error. [discrete] ==== put_component_template Create or update a component template. -Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. @@ -3032,6 +3052,11 @@ Changes to component templates do not affect existing indices, including a strea You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. +**Applying component templates** + +You cannot directly apply a component template to a data stream or index. +To be applied, a component template must be included in an index template's `composed_of` list. + {ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- @@ -3052,7 +3077,7 @@ If you don’t use Elastic Agent and want to disable all built-in component and This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. ** *`_meta` (Optional, Record)*: Optional user metadata about the component template. -May have any contents. This map is not automatically generated by Elasticsearch. +It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. ** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template @@ -3505,14 +3530,31 @@ client.connector.syncJobPost({ id }) [discrete] ==== sync_job_update_stats -Updates the stats fields in the connector sync job document. +Set the connector sync job stats. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. +You can also update `last_seen`. +This API is mainly used by the connector service for updating sync job information. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/set-connector-sync-job-stats-api.html[Endpoint documentation] [source,ts] ---- -client.connector.syncJobUpdateStats() +client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. +** *`deleted_document_count` (number)*: The number of documents the sync job deleted. +** *`indexed_document_count` (number)*: The number of documents the sync job indexed. +** *`indexed_document_volume` (number)*: The total size of the data (in MiB) the sync job indexed. +** *`last_seen` (Optional, string | -1 | 0)*: The timestamp to use in the `last_seen` property for the connector sync job. +** *`metadata` (Optional, Record)*: The connector-specific metadata. +** *`total_document_count` (Optional, number)*: The total number of documents in the target index after the sync job finished. [discrete] ==== update_active_filtering @@ -3793,11 +3835,10 @@ client.connector.updateStatus({ connector_id, status }) [discrete] ==== delete_dangling_index Delete a dangling index. - If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/dangling-index-delete.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) @@ -3819,7 +3860,7 @@ Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/dangling-index-import.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) @@ -3844,7 +3885,7 @@ For example, this can happen if you delete more than `cluster.indices.tombstones Use this API to list dangling indices, which you can then import or delete. -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/dangling-indices-list.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.listDanglingIndices() @@ -3869,6 +3910,7 @@ client.enrich.deletePolicy({ name }) * *Request (object):* ** *`name` (string)*: Enrich policy to delete. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== execute_policy @@ -3886,6 +3928,7 @@ client.enrich.executePolicy({ name }) * *Request (object):* ** *`name` (string)*: Enrich policy to execute. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks other enrich policy execution requests until complete. [discrete] @@ -3905,6 +3948,7 @@ client.enrich.getPolicy({ ... }) * *Request (object):* ** *`name` (Optional, string | string[])*: List of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== put_policy @@ -3925,6 +3969,7 @@ client.enrich.putPolicy({ name }) ** *`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `geo_shape` query. ** *`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `term` query. ** *`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== stats @@ -3934,9 +3979,14 @@ Returns enrich coordinator statistics and information about enrich policies that {ref}/enrich-stats-api.html[Endpoint documentation] [source,ts] ---- -client.enrich.stats() +client.enrich.stats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] === eql @@ -4111,9 +4161,14 @@ In order for a feature state to be listed in this API and recognized as a valid {ref}/get-features-api.html[Endpoint documentation] [source,ts] ---- -client.features.getFeatures() +client.features.getFeatures({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== reset_features @@ -4138,9 +4193,14 @@ IMPORTANT: The features installed on the node you submit this request to are the {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] ---- -client.features.resetFeatures() +client.features.resetFeatures({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] === fleet @@ -4383,7 +4443,6 @@ To target all data streams and indices, use `*` or `_all`. ** *`only_errors` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. ** *`only_managed` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_lifecycle @@ -4475,8 +4534,8 @@ client.ilm.moveToStep({ index, current_step, next_step }) * *Request (object):* ** *`index` (string)*: The name of the index whose lifecycle step is to change -** *`current_step` ({ action, name, phase })* -** *`next_step` ({ action, name, phase })* +** *`current_step` ({ action, name, phase })*: The step that the index is expected to be in. +** *`next_step` ({ action, name, phase })*: The step that you want to run. [discrete] ==== put_lifecycle @@ -4553,8 +4612,8 @@ client.ilm.start({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== stop @@ -4575,8 +4634,8 @@ client.ilm.stop({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] === indices @@ -4606,7 +4665,12 @@ client.indices.addBlock({ index, block }) [discrete] ==== analyze Get tokens from text analysis. -The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. +The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. +The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. +If more than this limit of tokens gets generated, an error occurs. +The `_analyze` endpoint without a specified index will always use `10000` as its limit. {ref}/indices-analyze.html[Endpoint documentation] [source,ts] @@ -4641,6 +4705,10 @@ Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. +By default, the clear cache API clears all caches. +To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. +To clear the cache only of specific fields, use the `fields` parameter. + {ref}/indices-clearcache.html[Endpoint documentation] [source,ts] ---- @@ -4689,10 +4757,32 @@ Cloning works as follows: IMPORTANT: Indices can only be cloned if they meet the following requirements: +* The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. +The current write index on a data stream cannot be cloned. +In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. + +NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. + +**Monitor the cloning process** + +The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. +At this point, all shards are in the state unassigned. +If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. + +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. +When the clone operation completes, the shard will become active. +At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. + +**Wait for active shards** + +Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. + {ref}/indices-clone-index.html[Endpoint documentation] [source,ts] ---- @@ -4763,7 +4853,25 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== create Create an index. -Creates a new index. +You can use the create index API to add a new index to an Elasticsearch cluster. +When creating an index, you can specify the following: + +* Settings for the index. +* Mappings for fields in the index. +* Index aliases + +**Wait for active shards** + +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. +The index creation response will indicate what happened. +For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. +Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. +These values simply indicate whether the operation completed before the timeout. +If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. +If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. +Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. {ref}/indices-create-index.html[Endpoint documentation] [source,ts] @@ -4838,7 +4946,12 @@ Supports a list of values, such as `open,hidden`. [discrete] ==== delete Delete indices. -Deletes one or more indices. +Deleting an index deletes its documents, shards, and metadata. +It does not delete related Kibana components, such as data views, visualizations, or dashboards. + +You cannot delete the current write index of a data stream. +To delete the index, you must roll over the data stream so a new write index is created. +You can then use the delete index API to delete the previous write index. {ref}/indices-delete-index.html[Endpoint documentation] [source,ts] @@ -4871,7 +4984,7 @@ If no response is received before the timeout expires, the request fails and ret Delete an alias. Removes a data stream or index from an alias. -{ref}/indices-aliases.html[Endpoint documentation] +{ref}/indices-delete-alias.html[Endpoint documentation] [source,ts] ---- client.indices.deleteAlias({ index, name }) @@ -4952,7 +5065,7 @@ client.indices.deleteIndexTemplate({ name }) [discrete] ==== delete_template -Deletes a legacy index template. +Delete a legacy index template. {ref}/indices-delete-template-v1.html[Endpoint documentation] [source,ts] @@ -4978,6 +5091,10 @@ Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. +NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. +Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. +The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. + {ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] ---- @@ -5030,7 +5147,7 @@ client.indices.downsample({ index, target_index }) [discrete] ==== exists Check indices. -Checks if one or more indices, index aliases, or data streams exist. +Check if one or more indices, index aliases, or data streams exist. {ref}/indices-exists.html[Endpoint documentation] [source,ts] @@ -5103,7 +5220,10 @@ client.indices.existsIndexTemplate({ name }) [discrete] ==== exists_template Check existence of index templates. -Returns information about whether a particular index template exists. +Get information about whether index templates exist. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. {ref}/indices-template-exists-v1.html[Endpoint documentation] [source,ts] @@ -5115,10 +5235,13 @@ client.indices.existsTemplate({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: The comma separated names of the index templates -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`name` (string | string[])*: A list of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +** *`flat_settings` (Optional, boolean)*: Indicates whether to use a flat format for the response. +** *`local` (Optional, boolean)*: Indicates whether to get information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== explain_data_lifecycle @@ -5146,6 +5269,9 @@ Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. +The response body reports the per-shard usage count of the data structures that back the fields in the index. +A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. + {ref}/field-usage-stats.html[Endpoint documentation] [source,ts] ---- @@ -5165,10 +5291,6 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. ** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). @@ -5226,6 +5348,46 @@ But force merge can cause very large (greater than 5 GB) segments to be produced So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. +**Blocks during a force merge** + +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). +If the client connection is lost before completion then the force merge process will continue in the background. +Any new requests to force merge the same indices will also block until the ongoing force merge is complete. + +**Running force merge asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. +However, you can not cancel this task as the force merge task is not cancelable. +Elasticsearch creates a record of this task as a document at `_tasks/`. +When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +**Force merging multiple indices** + +You can force merge multiple indices with a single request by targeting: + +* One or more data streams that contain multiple backing indices +* Multiple indices +* One or more aliases +* All data streams and indices in a cluster + +Each targeted shard is force-merged separately using the force_merge threadpool. +By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. +If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + +Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. + +**Data streams and time-based indices** + +Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. +In these cases, each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, its shards can be force-merged to a single segment. +This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. +For example: + +``` +POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +``` + {ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] ---- @@ -5248,7 +5410,7 @@ client.indices.forcemerge({ ... }) [discrete] ==== get Get index information. -Returns information about one or more indices. For data streams, the API returns information about the +Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. {ref}/indices-get-index.html[Endpoint documentation] @@ -5280,8 +5442,6 @@ such as open,hidden. ==== get_alias Get aliases. Retrieves information for one or more data stream or index aliases. - -{ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.getAlias({ ... }) @@ -5331,6 +5491,18 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== get_data_lifecycle_stats +Get data stream lifecycle stats. +Get statistics about the data streams that are managed by a data stream lifecycle. + +{ref}/data-streams-get-lifecycle-stats.html[Endpoint documentation] +[source,ts] +---- +client.indices.getDataLifecycleStats() +---- + + [discrete] ==== get_data_stream Get data streams. @@ -5360,6 +5532,8 @@ Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. +This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. + {ref}/indices-get-field-mapping.html[Endpoint documentation] [source,ts] ---- @@ -5371,6 +5545,7 @@ client.indices.getFieldMapping({ fields }) * *Request (object):* ** *`fields` (string | string[])*: List or wildcard expression of fields used to limit returned information. +Supports wildcards (`*`). ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. @@ -5387,7 +5562,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== get_index_template Get index templates. -Returns information about one or more index templates. +Get information about one or more index templates. {ref}/indices-get-template.html[Endpoint documentation] [source,ts] @@ -5408,7 +5583,6 @@ client.indices.getIndexTemplate({ ... }) [discrete] ==== get_mapping Get mapping definitions. -Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. {ref}/indices-get-mapping.html[Endpoint documentation] @@ -5438,8 +5612,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_settings Get index settings. -Returns setting information for one or more indices. For data streams, -returns setting information for the stream’s backing indices. +Get setting information for one or more indices. +For data streams, it returns setting information for the stream's backing indices. {ref}/indices-get-settings.html[Endpoint documentation] [source,ts] @@ -5475,7 +5649,9 @@ error. [discrete] ==== get_template Get index templates. -Retrieves information about one or more index templates. +Get information about one or more index templates. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. {ref}/indices-get-template-v1.html[Endpoint documentation] [source,ts] @@ -5542,9 +5718,30 @@ client.indices.modifyDataStream({ actions }) [discrete] ==== open -Opens a closed index. +Open a closed index. For data streams, the API opens any closed backing indices. +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. + +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behavior can be turned off by using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. + {ref}/indices-open-close.html[Endpoint documentation] [source,ts] ---- @@ -5673,6 +5870,32 @@ If no response is received before the timeout expires, the request fails and ret Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. +Index templates are applied during data stream or index creation. +For data streams, these settings and mappings are applied when the stream's backing indices are created. +Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. +Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Multiple matching templates** + +If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. + +Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + +**Composing aliases, mappings, and settings** + +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. +Any mappings, settings, or aliases from the parent index template are merged in next. +Finally, any configuration on the index request itself is merged. +Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. +If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. +This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. +If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. +If an entry already exists with the same key, then it is overwritten by the new definition. + {ref}/indices-put-template.html[Endpoint documentation] [source,ts] ---- @@ -5698,9 +5921,13 @@ If no priority is specified the template is treated as though it is of priority This number is not automatically generated by Elasticsearch. ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. +External systems can use these version numbers to simplify template management. +To unset a version, replace the template without specifying one. ** *`_meta` (Optional, Record)*: Optional user metadata about the index template. -May have any contents. -This map is not automatically generated by Elasticsearch. +It may have any contents. +It is not automatically generated or used by Elasticsearch. +This user-defined object is stored in the cluster state, so keeping it short is preferable +To unset the metadata, replace the template without specifying it. ** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. @@ -5716,10 +5943,35 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_mapping Update field mappings. -Adds new fields to an existing data stream or index. -You can also use this API to change the search settings of existing fields. +Add new fields to an existing data stream or index. +You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. +**Add multi-fields to an existing field** + +Multi-fields let you index the same field in different ways. +You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. +WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. +You can populate the new multi-field with the update by query API. + +**Change supported mapping parameters for an existing field** + +The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. +For example, you can use the update mapping API to update the `ignore_above` parameter. + +**Change the mapping of an existing field** + +Except for supported mapping parameters, you can't change the mapping or field type of an existing field. +Changing an existing field could invalidate data that's already indexed. + +If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. +If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. + +**Rename a field** + +Renaming a field would invalidate data already indexed under the old field name. +Instead, add an alias field to create an alternate field name. + {ref}/indices-put-mapping.html[Endpoint documentation] [source,ts] ---- @@ -5766,8 +6018,21 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_settings Update index settings. -Changes dynamic index settings in real time. For data streams, index setting -changes are applied to all backing indices by default. +Changes dynamic index settings in real time. +For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. +The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. +To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + +NOTE: You can only define new analyzers on closed indices. +To add an analyzer, you must close the index, define the analyzer, and reopen the index. +You cannot close the write index of a data stream. +To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. +Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. +This affects searches and any new data added to the stream after the rollover. +However, it does not affect the data stream's backing indices or their existing data. +To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. {ref}/indices-update-settings.html[Endpoint documentation] [source,ts] @@ -5816,6 +6081,15 @@ Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Indices matching multiple templates** + +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. +The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. +NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. + {ref}/indices-templates-v1.html[Endpoint documentation] [source,ts] ---- @@ -5839,6 +6113,7 @@ Templates with lower 'order' values are merged first. Templates with higher ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. +To unset a version, replace the template without specifying one. ** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -5850,6 +6125,8 @@ Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. + Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. @@ -5890,6 +6167,17 @@ Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. +You can change this default interval with the `index.refresh_interval` setting. + +Refresh requests are synchronous and do not return a response until the refresh operation completes. + +Refreshes are resource-intensive. +To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. + +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. +This option ensures the indexing operation waits for a periodic refresh before running the search. + {ref}/indices-refresh.html[Endpoint documentation] [source,ts] ---- @@ -5962,6 +6250,18 @@ For each cluster in the index expression, information is returned about: * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. +Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + +**Advantages of using this endpoint before a cross-cluster search** + +You may want to exclude a cluster or index from a search when: + +* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. +* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. +* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) +* A remote cluster is an older version that does not support the feature you want to use in your search. + {ref}/indices-resolve-cluster-api.html[Endpoint documentation] [source,ts] ---- @@ -6014,7 +6314,43 @@ For example, a request targeting `foo*,bar*` returns an error if an index starts [discrete] ==== rollover Roll over to a new index. -Creates a new index for a data stream or index alias. +TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. + +The rollover API creates a new index for a data stream or index alias. +The API behavior depends on the rollover target. + +**Roll over a data stream** + +If you roll over a data stream, the API creates a new write index for the stream. +The stream's previous write index becomes a regular backing index. +A rollover also increments the data stream's generation. + +**Roll over an index alias with a write index** + +TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. +Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. + +If an index alias points to multiple indices, one of the indices must be a write index. +The rollover API creates a new write index for the alias with `is_write_index` set to `true`. +The API also `sets is_write_index` to `false` for the previous write index. + +**Roll over an index alias with one index** + +If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. + +NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. + +**Increment index names for an alias** + +When you roll over an index alias, you can specify a name for the new index. +If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. +For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. +This number is always six characters and zero-padded, regardless of the previous index's name. + +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. +For example, you can create an alias that points to an index named ``. +If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. +If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. {ref}/indices-rollover-index.html[Endpoint documentation] [source,ts] @@ -6170,7 +6506,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== simulate_index_template Simulate an index. -Returns the index configuration that would be applied to the specified index from an existing index template. +Get the index configuration that would be applied to the specified index from an existing index template. {ref}/indices-simulate-index.html[Endpoint documentation] [source,ts] @@ -6189,7 +6525,7 @@ client.indices.simulateIndexTemplate({ name }) [discrete] ==== simulate_template Simulate an index template. -Returns the index configuration that would be applied by a particular index template. +Get the index configuration that would be applied by a particular index template. {ref}/indices-simulate-template.html[Endpoint documentation] [source,ts] @@ -6240,6 +6576,15 @@ Split an index into a new index with more primary shards. * The index must be read-only. * The cluster health status must be green. +You can do make an index read-only with the following request using the add index block API: + +``` +PUT /my_source_index/_block/write +``` + +The current write index on a data stream cannot be split. +In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. + The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. @@ -6599,8 +6944,6 @@ client.ingest.getGeoipDatabase({ ... }) ** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_ip_location_database @@ -6769,9 +7112,15 @@ If the operator privileges feature is enabled, only operator users can use this {ref}/delete-license.html[Endpoint documentation] [source,ts] ---- -client.license.delete() +client.license.delete({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get @@ -6841,6 +7190,8 @@ client.license.post({ ... }) ** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* ** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. ** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== post_start_basic @@ -6865,6 +7216,8 @@ client.license.postStartBasic({ ... }) * *Request (object):* ** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== post_start_trial @@ -6888,14 +7241,15 @@ client.license.postStartTrial({ ... }) * *Request (object):* ** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) ** *`type_query_string` (Optional, string)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] === logstash [discrete] ==== delete_pipeline Delete a Logstash pipeline. - Delete a pipeline that is used for Logstash Central Management. +If the request succeeds, you receive an empty response with an appropriate status code. {ref}/logstash-api-delete-pipeline.html[Endpoint documentation] [source,ts] @@ -6912,7 +7266,6 @@ client.logstash.deletePipeline({ id }) [discrete] ==== get_pipeline Get Logstash pipelines. - Get pipelines that are used for Logstash Central Management. {ref}/logstash-api-get-pipeline.html[Endpoint documentation] @@ -6954,7 +7307,8 @@ client.logstash.putPipeline({ id }) Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. -TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. +TIP: This APIs is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. {ref}/migration-api-deprecation.html[Endpoint documentation] [source,ts] @@ -6975,9 +7329,9 @@ Version upgrades sometimes require changes to how features store configuration i Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. -We strongly recommend you use the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. -{ref}/migration-api-feature-upgrade.html[Endpoint documentation] +{ref}/feature-migration-api.html[Endpoint documentation] [source,ts] ---- client.migration.getFeatureUpgradeStatus() @@ -6994,7 +7348,7 @@ Some functionality might be temporarily unavailable during the migration process TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. -{ref}/migration-api-feature-upgrade.html[Endpoint documentation] +{ref}/feature-migration-api.html[Endpoint documentation] [source,ts] ---- client.migration.postFeatureUpgrade() @@ -7280,6 +7634,7 @@ client.ml.deleteTrainedModel({ model_id }) * *Request (object):* ** *`model_id` (string)*: The unique identifier of the trained model. ** *`force` (Optional, boolean)*: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_trained_model_alias @@ -7827,8 +8182,6 @@ client.ml.getMemoryStats({ ... }) * *Request (object):* ** *`node_id` (Optional, string)*: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` -** *`human` (Optional, boolean)*: Specify this query parameter to include the fields with units in the response. Otherwise only -the `_in_bytes` sizes are returned in the response. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request @@ -8234,6 +8587,11 @@ client.ml.putCalendarJob({ calendar_id, job_id }) Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. +By default, the query used in the source configuration is `{"match_all": {}}`. + +If the destination index does not exist, it is created automatically when you start the job. + +If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. {ref}/put-dfanalytics.html[Endpoint documentation] [source,ts] @@ -8309,7 +8667,9 @@ Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). -If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. +If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. @@ -8398,6 +8758,7 @@ Up to 10000 items are allowed in each filter. ==== put_job Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. +If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. {ref}/ml-put-job.html[Endpoint documentation] [source,ts] @@ -9106,8 +9467,7 @@ client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) * *Request (object):* ** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. -All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). -** *`max_archive_version` (number)*: Specifies the maximum [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) to be cleared from the archive. +** *`max_archive_version` (number)*: Specifies the maximum `archive_version` to be cleared from the archive. [discrete] ==== get_repositories_metering_info @@ -9150,9 +9510,6 @@ client.nodes.hotThreads({ ... }) a task from an empty queue) are filtered out. ** *`interval` (Optional, string | -1 | 0)*: The interval to do the second sampling of threads. ** *`snapshots` (Optional, number)*: Number of samples of thread stacktrace. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response -is received before the timeout expires, the request fails and -returns an error. ** *`threads` (Optional, number)*: Specifies the number of hot threads to provide information for. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -9177,7 +9534,6 @@ client.nodes.info({ ... }) ** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. ** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. ** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] @@ -9232,7 +9588,6 @@ client.nodes.stats({ ... }) ** *`groups` (Optional, boolean)*: List of search groups to include in the search statistics. ** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). ** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ** *`types` (Optional, string[])*: A list of document types for the indexing index metric. ** *`include_unloaded_segments` (Optional, boolean)*: If `true`, the response includes information from segments that are not loaded into memory. @@ -9263,6 +9618,7 @@ If no response is received before the timeout expires, the request fails and ret ==== delete_rule Delete a query rule. Delete a query rule within a query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. {ref}/delete-query-rule.html[Endpoint documentation] [source,ts] @@ -9280,6 +9636,8 @@ client.queryRules.deleteRule({ ruleset_id, rule_id }) [discrete] ==== delete_ruleset Delete a query ruleset. +Remove a query ruleset and its associated data. +This is a destructive action that is not recoverable. {ref}/delete-query-ruleset.html[Endpoint documentation] [source,ts] @@ -9343,14 +9701,19 @@ client.queryRules.listRulesets({ ... }) ==== Arguments * *Request (object):* -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: specifies a max number of results to get +** *`from` (Optional, number)*: The offset from the first result to fetch. +** *`size` (Optional, number)*: The maximum number of results to retrieve. [discrete] ==== put_rule Create or update a query rule. Create or update a query rule within a query ruleset. +IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + {ref}/put-query-rule.html[Endpoint documentation] [source,ts] ---- @@ -9361,16 +9724,25 @@ client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) ==== Arguments * *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated -** *`type` (Enum("pinned" | "exclude"))* -** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])* -** *`actions` ({ ids, docs })* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated. +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated. +** *`type` (Enum("pinned" | "exclude"))*: The type of rule. +** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])*: The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. +** *`actions` ({ ids, docs })*: The actions to take when the rule is matched. +The format of this action depends on the rule type. ** *`priority` (Optional, number)* [discrete] ==== put_ruleset Create or update a query ruleset. +There is a limit of 100 rules per ruleset. +This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + +IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. {ref}/put-query-ruleset.html[Endpoint documentation] [source,ts] @@ -9382,7 +9754,7 @@ client.queryRules.putRuleset({ ruleset_id, rules }) ==== Arguments * *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated. ** *`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])* [discrete] @@ -9401,7 +9773,8 @@ client.queryRules.test({ ruleset_id, match_criteria }) * *Request (object):* ** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated -** *`match_criteria` (Record)* +** *`match_criteria` (Record)*: The match criteria to apply to rules in the given query ruleset. +Match criteria should match the keys defined in the `criteria.metadata` field of the rule. [discrete] === rollup @@ -9568,6 +9941,39 @@ Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. +The request body supports a subset of features from the regular search API. +The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. +`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. + +**Searching both historical rollup and non-rollup data** + +The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. +This is done by simply adding the live indices to the URI. For example: + +``` +GET sensor-1,sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +``` + +The rollup search endpoint does two things when the search runs: + +* The original request is sent to the non-rollup index unaltered. +* A rewritten version of the original request is sent to the rollup index. + +When the two responses are received, the endpoint rewrites the rollup response and merges the two together. +During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. + {ref}/rollup-search.html[Endpoint documentation] [source,ts] ---- @@ -9578,9 +9984,15 @@ client.rollup.rollupSearch({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. +** *`index` (string | string[])*: A list of data streams and indices used to limit the request. +This parameter has the following rules: + +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* Multiple non-rollup indices may be specified. +* Only one rollup index may be specified. If more than one are supplied, an exception occurs. +* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. ** *`aggregations` (Optional, Record)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query that is subject to some limitations. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -9609,6 +10021,15 @@ Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. +This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: + +``` +POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +``` +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. +If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. + {ref}/rollup-stop-job.html[Endpoint documentation] [source,ts] ---- @@ -9622,6 +10043,8 @@ client.rollup.stopJob({ id }) ** *`id` (string)*: Identifier for the rollup job. ** *`timeout` (Optional, string | -1 | 0)*: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. +NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. +The timeout simply means the API call itself timed out while waiting for the status change. ** *`wait_for_completion` (Optional, boolean)*: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. @@ -9814,7 +10237,7 @@ client.searchApplication.search({ name }) Get cache statistics. Get statistics about the shared cache for partially mounted indices. -{ref}/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-api-cache-stats.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.cacheStats({ ... }) @@ -9824,7 +10247,7 @@ client.searchableSnapshots.cacheStats({ ... }) ==== Arguments * *Request (object):* -** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +** *`node_id` (Optional, string | string[])*: The names of the nodes in the cluster to target. ** *`master_timeout` (Optional, string | -1 | 0)* [discrete] @@ -9832,7 +10255,7 @@ client.searchableSnapshots.cacheStats({ ... }) Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. -{ref}/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-api-clear-cache.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.clearCache({ ... }) @@ -9842,12 +10265,11 @@ client.searchableSnapshots.clearCache({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to clear from the cache. +It supports wildcards (`*`). ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`pretty` (Optional, boolean)* -** *`human` (Optional, boolean)* [discrete] ==== mount @@ -9866,21 +10288,24 @@ client.searchableSnapshots.mount({ repository, snapshot, index }) ==== Arguments * *Request (object):* -** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount -** *`snapshot` (string)*: The name of the snapshot of the index to mount -** *`index` (string)* -** *`renamed_index` (Optional, string)* -** *`index_settings` (Optional, Record)* -** *`ignore_index_settings` (Optional, string[])* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning -** *`storage` (Optional, string)*: Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy` +** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount. +** *`snapshot` (string)*: The name of the snapshot of the index to mount. +** *`index` (string)*: The name of the index contained in the snapshot whose data is to be mounted. +If no `renamed_index` is specified, this name will also be used to create the new index. +** *`renamed_index` (Optional, string)*: The name of the index that will be created. +** *`index_settings` (Optional, Record)*: The settings that should be added to the index when it is mounted. +** *`ignore_index_settings` (Optional, string[])*: The names of settings that should be removed from the index when it is mounted. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`wait_for_completion` (Optional, boolean)*: If true, the request blocks until the operation is complete. +** *`storage` (Optional, string)*: The mount option for the searchable snapshot index. [discrete] ==== stats Get searchable snapshot statistics. -{ref}/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-api-stats.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.stats({ ... }) @@ -9890,7 +10315,7 @@ client.searchableSnapshots.stats({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names +** *`index` (Optional, string | string[])*: A list of data streams and indices to retrieve statistics for. ** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Return stats aggregated at cluster, index or shard level [discrete] @@ -9974,14 +10399,47 @@ client.security.bulkPutRole({ roles }) [discrete] ==== bulk_update_api_keys -Updates the attributes of multiple existing API keys. +Bulk update API keys. +Update the attributes for multiple API keys. + +IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. + +This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. + +It is not possible to update expired or invalidated API keys. + +This API supports updates to API key access scope, metadata and expiration. +The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. {ref}/security-api-bulk-update-api-keys.html[Endpoint documentation] [source,ts] ---- -client.security.bulkUpdateApiKeys() +client.security.bulkUpdateApiKeys({ ids }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`ids` (string | string[])*: The API key identifiers. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API keys. +By default, API keys never expire. +This property can be omitted to leave the value unchanged. +** *`metadata` (Optional, Record)*: Arbitrary nested metadata to associate with the API keys. +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. +** *`role_descriptors` (Optional, Record)*: The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. +The structure of a role descriptor is the same as the request for the create API keys API. [discrete] ==== change_password @@ -10194,6 +10652,35 @@ client.security.createServiceToken({ namespace, service }) ** *`name` (Optional, string)*: An identifier for the token name ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +[discrete] +==== delegate_pki +Delegate PKI authentication. +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. +The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. +A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. + +This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. + +IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. +This is part of the TLS authentication process and it is delegated to the proxy that calls this API. +The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. + +{ref}/security-api-delegate-pki-authentication.html[Endpoint documentation] +[source,ts] +---- +client.security.delegatePki({ x509_certificate_chain }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`x509_certificate_chain` (string[])*: The X509Certificate chain, which is represented as an ordered string array. +Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + +The first element is the target certificate that contains the subject distinguished name that is requesting access. +This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. + [discrete] ==== delete_privileges Delete application privileges. @@ -10536,20 +11023,27 @@ client.security.getServiceCredentials({ namespace, service }) [discrete] ==== get_settings -Retrieve settings for the security system indices +Get security index settings. +Get the user-configurable settings for the security internal index (`.security` and associated indices). {ref}/security-api-get-settings.html[Endpoint documentation] [source,ts] ---- -client.security.getSettings() +client.security.getSettings({ ... }) ---- - [discrete] -==== get_token -Get a token. +==== Arguments -Create a bearer token for access without requiring basic authentication. +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_token +Get a token. + +Create a bearer token for access without requiring basic authentication. {ref}/security-api-get-token.html[Endpoint documentation] [source,ts] @@ -11280,14 +11774,28 @@ When specified, this information fully replaces metadata previously associated w [discrete] ==== update_settings -Update settings for the security system index +Update security index settings. +Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified, for example `index.auto_expand_replicas` and `index.number_of_replicas`. + +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. {ref}/security-api-update-settings.html[Endpoint documentation] [source,ts] ---- -client.security.updateSettings() +client.security.updateSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`security` (Optional, { index })*: Settings for the index used for most security configuration, including native realm users and roles configured with the API. +** *`security-profile` (Optional, { index })*: Settings for the index used to store profile information. +** *`security-tokens` (Optional, { index })*: Settings for the index used to store tokens. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== update_user_profile_data @@ -11330,7 +11838,7 @@ Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +{ref}/delete-shutdown.html[Endpoint documentation] [source,ts] ---- client.shutdown.deleteNode({ node_id }) @@ -11355,7 +11863,7 @@ NOTE: This feature is designed for indirect use by Elasticsearch Service, Elasti If the operator privileges feature is enabled, you must be an operator to use this API. -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +{ref}/get-shutdown.html[Endpoint documentation] [source,ts] ---- client.shutdown.getNode({ ... }) @@ -11367,7 +11875,6 @@ client.shutdown.getNode({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: Which node for which to retrieve the shutdown status ** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_node @@ -11375,6 +11882,8 @@ Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. + If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. @@ -11386,7 +11895,7 @@ If a node is already being prepared for shutdown, you can use this API to change IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +{ref}/put-shutdown.html[Endpoint documentation] [source,ts] ---- client.shutdown.putNode({ node_id, type, reason }) @@ -11396,7 +11905,10 @@ client.shutdown.putNode({ node_id, type, reason }) ==== Arguments * *Request (object):* -** *`node_id` (string)*: The node id of node to be shut down +** *`node_id` (string)*: The node identifier. +This parameter is not validated against the cluster's active nodes. +This enables you to register a node for shut down while it is offline. +No error is thrown if you specify an invalid node ID. ** *`type` (Enum("restart" | "remove" | "replace"))*: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. @@ -11414,21 +11926,55 @@ If you specify both a restart allocation delay and an index-level allocation del Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] === simulate [discrete] ==== ingest -Simulates running ingest with example documents. +Simulate data ingestion. +Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. + +This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. + +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. +If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. +No data is indexed into Elasticsearch. +Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. +The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. + +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. +The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. + +By default, the pipeline definitions that are currently in the system are used. +However, you can supply substitute pipeline definitions in the body of the request. +These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. {ref}/simulate-ingest-api.html[Endpoint documentation] [source,ts] ---- -client.simulate.ingest() +client.simulate.ingest({ docs }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. +** *`index` (Optional, string)*: The index to simulate ingesting into. +This value can be overridden by specifying an index on each document. +If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. +** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. +** *`index_template_subtitutions` (Optional, Record)*: A map of index template names to substitute index template definition objects. +** *`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })* +** *`pipeline_substitutions` (Optional, Record)*: Pipelines to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +** *`pipeline` (Optional, string)*: The pipeline to use as the default pipeline. +This value can be used to override the default pipeline of the index. [discrete] === slm @@ -11449,6 +11995,10 @@ client.slm.deleteLifecycle({ policy_id }) * *Request (object):* ** *`policy_id` (string)*: The id of the snapshot lifecycle policy to remove +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== execute_lifecycle @@ -11467,6 +12017,10 @@ client.slm.executeLifecycle({ policy_id }) * *Request (object):* ** *`policy_id` (string)*: The id of the snapshot lifecycle policy to be executed +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== execute_retention @@ -11477,9 +12031,17 @@ The retention policy is normally applied according to its schedule. {ref}/slm-api-execute-retention.html[Endpoint documentation] [source,ts] ---- -client.slm.executeRetention() +client.slm.executeRetention({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_lifecycle @@ -11497,6 +12059,10 @@ client.slm.getLifecycle({ ... }) * *Request (object):* ** *`policy_id` (Optional, string | string[])*: List of snapshot lifecycle policies to retrieve +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_stats @@ -11506,9 +12072,15 @@ Get global and policy-level statistics about actions taken by snapshot lifecycle {ref}/slm-api-get-stats.html[Endpoint documentation] [source,ts] ---- -client.slm.getStats() +client.slm.getStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_status @@ -11517,9 +12089,19 @@ Get the snapshot lifecycle management status. {ref}/slm-api-get-status.html[Endpoint documentation] [source,ts] ---- -client.slm.getStatus() +client.slm.getStatus({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== put_lifecycle @@ -11544,8 +12126,12 @@ client.slm.putLifecycle({ policy_id }) ** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. ** *`retention` (Optional, { expire_after, max_count, min_count })*: Retention rules used to retain and delete snapshots created by the policy. ** *`schedule` (Optional, string)*: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== start @@ -11556,9 +12142,19 @@ Manually starting SLM is necessary only if it has been stopped using the stop SL {ref}/slm-api-start.html[Endpoint documentation] [source,ts] ---- -client.slm.start() +client.slm.start({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== stop @@ -11574,9 +12170,19 @@ Use the get snapshot lifecycle management status API to see if SLM is running. {ref}/slm-api-stop.html[Endpoint documentation] [source,ts] ---- -client.slm.stop() +client.slm.stop({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] === snapshot @@ -11595,16 +12201,20 @@ client.snapshot.cleanupRepository({ repository }) ==== Arguments * *Request (object):* -** *`repository` (string)*: Snapshot repository to clean up. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +** *`repository` (string)*: The name of the snapshot repository to clean up. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1` +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== clone Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/clone-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) @@ -11614,19 +12224,23 @@ client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) ==== Arguments * *Request (object):* -** *`repository` (string)*: A repository name -** *`snapshot` (string)*: The name of the snapshot to clone from -** *`target_snapshot` (string)*: The name of the cloned snapshot to create -** *`indices` (string)* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)* +** *`repository` (string)*: The name of the snapshot repository that both source and target snapshot belong to. +** *`snapshot` (string)*: The source snapshot name. +** *`target_snapshot` (string)*: The target snapshot name. +** *`indices` (string)*: A list of indices to include in the snapshot. +Multi-target syntax is supported. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period of time to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== create Create a snapshot. Take a snapshot of a cluster or of data streams and indices. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/create-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.create({ repository, snapshot }) @@ -11636,16 +12250,45 @@ client.snapshot.create({ repository, snapshot }) ==== Arguments * *Request (object):* -** *`repository` (string)*: Repository for the snapshot. -** *`snapshot` (string)*: Name of the snapshot. Must be unique in the repository. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. -** *`include_global_state` (Optional, boolean)*: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). -** *`indices` (Optional, string | string[])*: Data streams and indices to include in the snapshot. Supports multi-target syntax. Includes all data streams and indices by default. -** *`feature_states` (Optional, string[])*: Feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. -** *`metadata` (Optional, Record)*: Optional metadata for the snapshot. May have any contents. Must be less than 1024 bytes. This map is not automatically generated by Elasticsearch. -** *`partial` (Optional, boolean)*: If `true`, allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. +** *`repository` (string)*: The name of the repository for the snapshot. +** *`snapshot` (string)*: The name of the snapshot. +It supportes date math. +It must be unique in the repository. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Determines how wildcard patterns in the `indices` parameter match data streams and indices. +It supports a list of values such as `open,hidden`. +** *`feature_states` (Optional, string[])*: The feature states to include in the snapshot. +Each feature state includes one or more system indices containing related data. +You can view a list of eligible features using the get features API. + +If `include_global_state` is `true`, all current feature states are included by default. +If `include_global_state` is `false`, no feature states are included by default. + +Note that specifying an empty array will result in the default behavior. +To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). +** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. +If `false`, the request returns an error for any data stream or index that is missing or closed. +** *`include_global_state` (Optional, boolean)*: If `true`, the current cluster state is included in the snapshot. +The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. +It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). +** *`indices` (Optional, string | string[])*: A list of data streams and indices to include in the snapshot. +It supports a multi-target syntax. +The default is an empty array (`[]`), which includes all regular data streams and regular indices. +To exclude all data streams and indices, use `-*`. + +You can't use this parameter to include or exclude system indices or system data streams from a snapshot. +Use `feature_states` instead. +** *`metadata` (Optional, Record)*: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. +It can have any contents but it must be less than 1024 bytes. +This information is not automatically generated by Elasticsearch. +** *`partial` (Optional, boolean)*: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. + +If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. +If `false`, the request returns a response when the snapshot initializes. [discrete] ==== create_repository @@ -11654,7 +12297,10 @@ IMPORTANT: If you are migrating searchable snapshots, the repository name must b To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. -{ref}/modules-snapshots.html[Endpoint documentation] +Several options for this API can be specified using a query parameter or a request body parameter. +If both parameters are specified, only the query parameter is used. + +{ref}/put-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.createRepository({ repository }) @@ -11664,16 +12310,22 @@ client.snapshot.createRepository({ repository }) ==== Arguments * *Request (object):* -** *`repository` (string)*: A repository name -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`verify` (Optional, boolean)*: Whether to verify the repository after creation +** *`repository` (string)*: The name of the snapshot repository to register or update. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +** *`verify` (Optional, boolean)*: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. +If `false`, this verification is skipped. +You can also perform this verification with the verify snapshot repository API. [discrete] ==== delete Delete snapshots. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/delete-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.delete({ repository, snapshot }) @@ -11683,9 +12335,12 @@ client.snapshot.delete({ repository, snapshot }) ==== Arguments * *Request (object):* -** *`repository` (string)*: A repository name -** *`snapshot` (string)*: A list of snapshot names -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`repository` (string)*: The name of the repository to delete a snapshot from. +** *`snapshot` (string)*: A list of snapshot names to delete. +It also accepts wildcards (`*`). +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== delete_repository @@ -11693,7 +12348,7 @@ Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/delete-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.deleteRepository({ repository }) @@ -11703,15 +12358,24 @@ client.snapshot.deleteRepository({ repository }) ==== Arguments * *Request (object):* -** *`repository` (string | string[])*: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`repository` (string | string[])*: The ame of the snapshot repositories to unregister. +Wildcard (`*`) patterns are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== get Get snapshot information. -{ref}/modules-snapshots.html[Endpoint documentation] +NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. +It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. +Snapshots concurrently created may be seen during an iteration. + +{ref}/get-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.get({ repository, snapshot }) @@ -11721,29 +12385,47 @@ client.snapshot.get({ repository, snapshot }) ==== Arguments * *Request (object):* -** *`repository` (string)*: List of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. -** *`snapshot` (string | string[])*: List of snapshot names to retrieve. Also accepts wildcards (*). -- To get information about all snapshots in a registered repository, use a wildcard (*) or _all. -- To get information about any snapshots that are currently running, use _current. -** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error for any snapshots that are unavailable. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`verbose` (Optional, boolean)*: If true, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. -** *`index_details` (Optional, boolean)*: If true, returns additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. Defaults to false, meaning that this information is omitted. -** *`index_names` (Optional, boolean)*: If true, returns the name of each index in each snapshot. -** *`include_repository` (Optional, boolean)*: If true, returns the repository name in each snapshot. -** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))*: Allows setting a sort order for the result. Defaults to start_time, i.e. sorting by snapshot start time stamp. -** *`size` (Optional, number)*: Maximum number of snapshots to return. Defaults to 0 which means return all that match the request without limit. -** *`order` (Optional, Enum("asc" | "desc"))*: Sort order. Valid values are asc for ascending and desc for descending order. Defaults to asc, meaning ascending order. -** *`after` (Optional, string)*: Offset identifier to start pagination from as returned by the next field in the response body. +** *`repository` (string)*: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported. +** *`snapshot` (string | string[])*: A list of snapshot names to retrieve +Wildcards (`*`) are supported. + +* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. +* To get information about any snapshots that are currently running, use `_current`. +** *`after` (Optional, string)*: An offset identifier to start pagination from as returned by the next field in the response body. +** *`from_sort_value` (Optional, string)*: The value of the current sort column at which to start retrieval. +It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. +It can be a millisecond time value or a number when sorting by `index-` or shard count. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error for any snapshots that are unavailable. +** *`index_details` (Optional, boolean)*: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. +The default is `false`, meaning that this information is omitted. +** *`index_names` (Optional, boolean)*: If `true`, the response includes the name of each index in each snapshot. +** *`include_repository` (Optional, boolean)*: If `true`, the response includes the repository name in each snapshot. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`order` (Optional, Enum("asc" | "desc"))*: The sort order. +Valid values are `asc` for ascending and `desc` for descending order. +The default behavior is ascending order. ** *`offset` (Optional, number)*: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. -** *`from_sort_value` (Optional, string)*: Value of the current sort column at which to start retrieval. Can either be a string snapshot- or repository name when sorting by snapshot or repository name, a millisecond time value or a number when sorting by index- or shard count. -** *`slm_policy_filter` (Optional, string)*: Filter snapshots by a list of SLM policy names that snapshots belong to. Also accepts wildcards (*) and combinations of wildcards followed by exclude patterns starting with -. To include snapshots not created by an SLM policy you can use the special pattern _none that will match all snapshots without an SLM policy. +** *`size` (Optional, number)*: The maximum number of snapshots to return. +The default is 0, which means to return all that match the request without limit. +** *`slm_policy_filter` (Optional, string)*: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + +You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. +For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. +Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. +To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. +** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))*: The sort order for the result. +The default behavior is sorting by snapshot start time stamp. +** *`verbose` (Optional, boolean)*: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + +NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. [discrete] ==== get_repository Get snapshot repository information. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/get-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.getRepository({ ... }) @@ -11753,9 +12435,15 @@ client.snapshot.getRepository({ ... }) ==== Arguments * *Request (object):* -** *`repository` (Optional, string | string[])*: A list of repository names -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`repository` (Optional, string | string[])*: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + +To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. +** *`local` (Optional, boolean)*: If `true`, the request gets information from the local node only. +If `false`, the request gets information from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== repository_analyze @@ -11789,7 +12477,7 @@ If no such template exists, you can create one or restore a cluster state that c If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/restore-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.restore({ repository, snapshot }) @@ -11799,28 +12487,80 @@ client.snapshot.restore({ repository, snapshot }) ==== Arguments * *Request (object):* -** *`repository` (string)*: A repository name -** *`snapshot` (string)*: A snapshot name -** *`feature_states` (Optional, string[])* -** *`ignore_index_settings` (Optional, string[])* -** *`ignore_unavailable` (Optional, boolean)* -** *`include_aliases` (Optional, boolean)* -** *`include_global_state` (Optional, boolean)* -** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* -** *`indices` (Optional, string | string[])* -** *`partial` (Optional, boolean)* -** *`rename_pattern` (Optional, string)* -** *`rename_replacement` (Optional, string)* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning +** *`repository` (string)*: The name of the repository to restore a snapshot from. +** *`snapshot` (string)*: The name of the snapshot to restore. +** *`feature_states` (Optional, string[])*: The feature states to restore. +If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. +If `include_global_state` is `false`, the request restores no feature states by default. +Note that specifying an empty array will result in the default behavior. +To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). +** *`ignore_index_settings` (Optional, string[])*: The index settings to not restore from the snapshot. +You can't use this option to ignore `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. +If `false`, the request returns an error for any missing index or data stream. +** *`include_aliases` (Optional, boolean)*: If `true`, the request restores aliases for any restored data streams and indices. +If `false`, the request doesn’t restore aliases. +** *`include_global_state` (Optional, boolean)*: If `true`, restore the cluster state. The cluster state includes: + +* Persistent cluster settings +* Index templates +* Legacy index templates +* Ingest pipelines +* Index lifecycle management (ILM) policies +* Stored scripts +* For snapshots taken after 7.12.0, feature states + +If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. +It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + +Use the `feature_states` parameter to configure how feature states are restored. + +If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. +** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Index settings to add or change in restored indices, including backing indices. +You can't use this option to change `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +** *`indices` (Optional, string | string[])*: A list of indices and data streams to restore. +It supports a multi-target syntax. +The default behavior is all regular indices and regular data streams in the snapshot. + +You can't use this parameter to restore system indices or system data streams. +Use `feature_states` instead. +** *`partial` (Optional, boolean)*: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + +If true, it allows restoring a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. +** *`rename_pattern` (Optional, string)*: A rename pattern to apply to restored data streams and indices. +Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + +The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. +** *`rename_replacement` (Optional, string)*: The rename replacement string that is used with the `rename_pattern`. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the restore operation completes. +The operation is complete when it finishes all attempts to recover primary shards for restored indices. +This applies even if one or more of the recovery attempts fail. + +If `false`, the request returns a response when the restore operation initializes. [discrete] ==== status Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. + Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. +If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. +This usage is preferred. +If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. + WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). @@ -11828,7 +12568,7 @@ For example, if you have 100 snapshots with 1,000 shards each, an API request th Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/get-snapshot-status-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.status({ ... }) @@ -11838,17 +12578,23 @@ client.snapshot.status({ ... }) ==== Arguments * *Request (object):* -** *`repository` (Optional, string)*: A repository name -** *`snapshot` (Optional, string | string[])*: A list of snapshot names -** *`ignore_unavailable` (Optional, boolean)*: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`repository` (Optional, string)*: The snapshot repository name used to limit the request. +It supports wildcards (`*`) if `` isn't specified. +** *`snapshot` (Optional, string | string[])*: A list of snapshots to retrieve status for. +The default is currently running snapshots. +Wildcards (`*`) are not supported. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error for any snapshots that are unavailable. +If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== verify_repository Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/verify-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.verifyRepository({ repository }) @@ -11858,9 +12604,13 @@ client.snapshot.verifyRepository({ repository }) ==== Arguments * *Request (object):* -** *`repository` (string)*: A repository name -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`repository` (string)*: The name of the snapshot repository to verify. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. [discrete] === sql @@ -11886,6 +12636,11 @@ Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. +If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: + +* Users with the `cancel_task` cluster privilege. +* The user who first submitted the search. + {ref}/delete-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- @@ -11896,13 +12651,15 @@ client.sql.deleteAsync({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the search. +** *`id` (string)*: The identifier for the search. [discrete] ==== get_async Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. +If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. + {ref}/get-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- @@ -11913,14 +12670,16 @@ client.sql.getAsync({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the search. -** *`delimiter` (Optional, string)*: Separator for CSV results. The API only supports this parameter for CSV responses. -** *`format` (Optional, string)*: Format for the response. You must specify a format using this parameter or the -Accept HTTP header. If you specify both, the API uses this parameter. -** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for the search and its results. Defaults -to the `keep_alive` period for the original SQL search. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, -meaning the request waits for complete search results. +** *`id` (string)*: The identifier for the search. +** *`delimiter` (Optional, string)*: The separator for CSV results. +The API supports this parameter only for CSV responses. +** *`format` (Optional, string)*: The format for the response. +You must specify a format using this parameter or the `Accept` HTTP header. +If you specify both, the API uses this parameter. +** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for the search and its results. +It defaults to the `keep_alive` period for the original SQL search. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. [discrete] ==== get_async_status @@ -11937,7 +12696,7 @@ client.sql.getAsyncStatus({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the search. +** *`id` (string)*: The identifier for the search. [discrete] ==== query @@ -11954,31 +12713,46 @@ client.sql.query({ ... }) ==== Arguments * *Request (object):* -** *`catalog` (Optional, string)*: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. -** *`columnar` (Optional, boolean)*: If true, the results in a columnar fashion: one row represents all the values of a certain column from the current page of results. -** *`cursor` (Optional, string)*: Cursor used to retrieve a set of paginated results. +** *`allow_partial_search_results` (Optional, boolean)*: If `true`, the response has partial results when there are shard request timeouts or shard failures. +If `false`, the API returns an error with no partial results. +** *`catalog` (Optional, string)*: The default catalog (cluster) for queries. +If unspecified, the queries execute on the data in the local cluster only. +** *`columnar` (Optional, boolean)*: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. +The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. +** *`cursor` (Optional, string)*: The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. -** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. -** *`query` (Optional, string)*: SQL query to run. +** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. +** *`field_multi_value_leniency` (Optional, boolean)*: If `false`, the API returns an exception when encountering multiple values for a field. +If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. +** *`index_using_frozen` (Optional, boolean)*: If `true`, the search can run on frozen indices. +** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for an async or saved synchronous search. +** *`keep_on_completion` (Optional, boolean)*: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. +If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. +** *`page_timeout` (Optional, string | -1 | 0)*: The minimum retention period for the scroll cursor. +After this time period, a pagination request might fail because the scroll cursor is no longer available. +Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. +** *`params` (Optional, Record)*: The values for parameters in the query. +** *`query` (Optional, string)*: The SQL query to run. ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. -** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. -** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. -** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. -** *`params` (Optional, Record)*: Values for parameters in the query. -** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search. -** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. -** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false. -** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: Format for the response. +** *`runtime_mappings` (Optional, Record)*: One or more runtime fields for the search request. +These fields take precedence over mapped fields with the same name. +** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. +If the search doesn't finish within this period, the search becomes async. + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: The format for the response. +You can also specify a format using the `Accept` HTTP header. +If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. [discrete] ==== translate Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. +It accepts the same request body parameters as the SQL search API, excluding `cursor`. {ref}/sql-translate-api.html[Endpoint documentation] [source,ts] @@ -11990,10 +12764,10 @@ client.sql.translate({ query }) ==== Arguments * *Request (object):* -** *`query` (string)*: SQL query to run. +** *`query` (string)*: The SQL query to run. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. -** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. +** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. [discrete] === ssl @@ -12030,6 +12804,23 @@ client.ssl.certificates() ==== delete_synonym Delete a synonym set. +You can only delete a synonyms set that is not in use by any index analyzer. + +Synonyms sets can be used in synonym graph token filters and synonym token filters. +These synonym filters can be used as part of search analyzers. + +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). +Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. + +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. +To prevent that, synonyms sets that are used in analyzers can't be deleted. +A delete request in this case will return a 400 response code. + +To remove a synonyms set, you must first remove all indices that contain analyzers using it. +You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. +Once finished, you can delete the index. +When the synonyms set is not used in analyzers, you will be able to delete it. + {ref}/delete-synonyms-set.html[Endpoint documentation] [source,ts] ---- @@ -12040,7 +12831,7 @@ client.synonyms.deleteSynonym({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The id of the synonyms set to be deleted +** *`id` (string)*: The synonyms set identifier to delete. [discrete] ==== delete_synonym_rule @@ -12057,8 +12848,8 @@ client.synonyms.deleteSynonymRule({ set_id, rule_id }) ==== Arguments * *Request (object):* -** *`set_id` (string)*: The id of the synonym set to be updated -** *`rule_id` (string)*: The id of the synonym rule to be deleted +** *`set_id` (string)*: The ID of the synonym set to update. +** *`rule_id` (string)*: The ID of the synonym rule to delete. [discrete] ==== get_synonym @@ -12074,9 +12865,9 @@ client.synonyms.getSynonym({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: "The id of the synonyms set to be retrieved -** *`from` (Optional, number)*: Starting offset for query rules to be retrieved -** *`size` (Optional, number)*: specifies a max number of query rules to retrieve +** *`id` (string)*: The synonyms set identifier to retrieve. +** *`from` (Optional, number)*: The starting offset for query rules to retrieve. +** *`size` (Optional, number)*: The max number of query rules to retrieve. [discrete] ==== get_synonym_rule @@ -12093,15 +12884,15 @@ client.synonyms.getSynonymRule({ set_id, rule_id }) ==== Arguments * *Request (object):* -** *`set_id` (string)*: The id of the synonym set to retrieve the synonym rule from -** *`rule_id` (string)*: The id of the synonym rule to retrieve +** *`set_id` (string)*: The ID of the synonym set to retrieve the synonym rule from. +** *`rule_id` (string)*: The ID of the synonym rule to retrieve. [discrete] ==== get_synonyms_sets Get all synonym sets. Get a summary of all defined synonym sets. -{ref}/list-synonyms-sets.html[Endpoint documentation] +{ref}/get-synonyms-set.html[Endpoint documentation] [source,ts] ---- client.synonyms.getSynonymsSets({ ... }) @@ -12111,8 +12902,8 @@ client.synonyms.getSynonymsSets({ ... }) ==== Arguments * *Request (object):* -** *`from` (Optional, number)*: Starting offset -** *`size` (Optional, number)*: specifies a max number of results to get +** *`from` (Optional, number)*: The starting offset for synonyms sets to retrieve. +** *`size` (Optional, number)*: The maximum number of synonyms sets to retrieve. [discrete] ==== put_synonym @@ -12120,6 +12911,9 @@ Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. +This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + {ref}/put-synonyms-set.html[Endpoint documentation] [source,ts] ---- @@ -12130,14 +12924,18 @@ client.synonyms.putSynonym({ id, synonyms_set }) ==== Arguments * *Request (object):* -** *`id` (string)*: The id of the synonyms set to be created or updated -** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym set information to update +** *`id` (string)*: The ID of the synonyms set to be created or updated. +** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym rules definitions for the synonyms set. [discrete] ==== put_synonym_rule Create or update a synonym rule. Create or update a synonym rule in a synonym set. +If any of the synonym rules included is invalid, the API returns an error. + +When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. + {ref}/put-synonym-rule.html[Endpoint documentation] [source,ts] ---- @@ -12148,9 +12946,9 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) ==== Arguments * *Request (object):* -** *`set_id` (string)*: The id of the synonym set to be updated with the synonym rule -** *`rule_id` (string)*: The id of the synonym rule to be updated or created -** *`synonyms` (string)* +** *`set_id` (string)*: The ID of the synonym set. +** *`rule_id` (string)*: The ID of the synonym rule to be updated or created. +** *`synonyms` (string)*: The synonym rule information definition, which must be in Solr format. [discrete] === tasks @@ -12222,7 +13020,6 @@ This information is useful to distinguish tasks from each other but is more cost ** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: Key used to group tasks in the response. ** *`nodes` (Optional, string | string[])*: List of node IDs or names used to limit returned information. ** *`parent_task_id` (Optional, string)*: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. @@ -12233,6 +13030,22 @@ This information is useful to distinguish tasks from each other but is more cost Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. +For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + {ref}/find-field-structure.html[Endpoint documentation] [source,ts] ---- @@ -12260,7 +13073,7 @@ Use this parameter to specify whether to use ECS Grok patterns instead of legacy This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. -** *`explain` (Optional, boolean)*: If true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`explain` (Optional, boolean)*: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. ** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. @@ -12275,7 +13088,7 @@ If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. ** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. -Otherwise, the default value is false. +Otherwise, the default value is `false`. ** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. ** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. @@ -12331,6 +13144,7 @@ The messages must contain data that is suitable to be ingested into Elasticsearc This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. + The response from the API contains: * Sample messages. @@ -12341,6 +13155,9 @@ Appropriate mappings for an Elasticsearch index, which you could use to ingest t All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + {ref}/find-message-structure.html[Endpoint documentation] [source,ts] ---- @@ -12379,7 +13196,7 @@ If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. ** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. -Otherwise, the default value is false. +Otherwise, the default value is `false`. ** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. ** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. @@ -12458,23 +13275,100 @@ client.textStructure.findStructure({ ... }) * *Request (object):* ** *`text_files` (Optional, TJsonDocument[])* -** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. -** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. -** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). -** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +** *`charset` (Optional, string)*: The text's character set. +It must be a character set that is supported by the JVM that Elasticsearch uses. +For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +If this parameter is not specified, the structure finder chooses an appropriate character set. +** *`column_names` (Optional, string)*: If you have set format to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +** *`explain` (Optional, boolean)*: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. -** *`format` (Optional, string)*: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. -** *`grok_pattern` (Optional, string)*: If you have set format to semi_structured_text, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the timestamp_field parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If grok_pattern is not specified, the structure finder creates a Grok pattern. -** *`has_header_row` (Optional, boolean)*: If you have set format to delimited, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. -** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. -** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2; If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. -** *`quote` (Optional, string)*: If you have set format to delimited, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote ("). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -** *`should_trim_fields` (Optional, boolean)*: If you have set format to delimited, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. -** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. -** *`timestamp_field` (Optional, string)*: Optional parameter to specify the timestamp field in the file +** *`format` (Optional, string)*: The high level structure of the text. +Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`has_header_row` (Optional, boolean)*: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. +If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. +The minimum is 2. +If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + +NOTE: The number of lines and the variation of the lines affects the speed of the analysis. +For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. +If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. +** *`quote` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. +Otherwise, the default value is `false`. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires then it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. ** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. +Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. + [discrete] ==== test_grok_pattern Test a Grok pattern. @@ -12491,9 +13385,11 @@ client.textStructure.testGrokPattern({ grok_pattern, text }) ==== Arguments * *Request (object):* -** *`grok_pattern` (string)*: Grok pattern to run on the text. -** *`text` (string[])*: Lines of text to run the Grok pattern on. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). +** *`grok_pattern` (string)*: The Grok pattern to run on the text. +** *`text` (string[])*: The lines of text to run the Grok pattern on. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. [discrete] === transform @@ -12711,6 +13607,7 @@ client.transform.resetTransform({ transform_id }) hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. ** *`force` (Optional, boolean)*: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== schedule_now_transform @@ -12886,6 +13783,9 @@ The acknowledgement state of an action is stored in the `status.actions..ack IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. +This happens when the condition of the watch is not met (the condition evaluates to false). + {ref}/watcher-api-ack-watch.html[Endpoint documentation] [source,ts] ---- @@ -12896,8 +13796,9 @@ client.watcher.ackWatch({ watch_id }) ==== Arguments * *Request (object):* -** *`watch_id` (string)*: Watch ID -** *`action_id` (Optional, string | string[])*: A list of the action ids to be acked +** *`watch_id` (string)*: The watch identifier. +** *`action_id` (Optional, string | string[])*: A list of the action identifiers to acknowledge. +If you omit this parameter, all of the actions of the watch are acknowledged. [discrete] ==== activate_watch @@ -12914,7 +13815,7 @@ client.watcher.activateWatch({ watch_id }) ==== Arguments * *Request (object):* -** *`watch_id` (string)*: Watch ID +** *`watch_id` (string)*: The watch identifier. [discrete] ==== deactivate_watch @@ -12931,7 +13832,7 @@ client.watcher.deactivateWatch({ watch_id }) ==== Arguments * *Request (object):* -** *`watch_id` (string)*: Watch ID +** *`watch_id` (string)*: The watch identifier. [discrete] ==== delete_watch @@ -12954,7 +13855,7 @@ client.watcher.deleteWatch({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Watch ID +** *`id` (string)*: The watch identifier. [discrete] ==== execute_watch @@ -12968,6 +13869,11 @@ You can also force execution by ignoring the watch condition and control whether You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. +If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. + +When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. + {ref}/watcher-api-execute-watch.html[Endpoint documentation] [source,ts] ---- @@ -12978,26 +13884,37 @@ client.watcher.executeWatch({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: Identifier for the watch. +** *`id` (Optional, string)*: The watch identifier. ** *`action_modes` (Optional, Record)*: Determines how to handle the watch actions as part of the watch execution. ** *`alternative_input` (Optional, Record)*: When present, the watch uses this object as a payload instead of executing its own input. ** *`ignore_condition` (Optional, boolean)*: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. -** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent executions. This can also be specified as an HTTP parameter. +** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. +In addition, the status of the watch is updated, possibly throttling subsequent runs. +This can also be specified as an HTTP parameter. ** *`simulated_actions` (Optional, { actions, all, use_all })* -** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution -** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and record_execution cannot be set. +** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution. +** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. +This watch is not persisted to the index and `record_execution` cannot be set. ** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. [discrete] ==== get_settings -Retrieve settings for the watcher system index +Get Watcher index settings. +Get settings for the Watcher internal index (`.watches`). +Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. {ref}/watcher-api-get-settings.html[Endpoint documentation] [source,ts] ---- -client.watcher.getSettings() +client.watcher.getSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_watch @@ -13013,7 +13930,7 @@ client.watcher.getWatch({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Watch ID +** *`id` (string)*: The watch identifier. [discrete] ==== put_watch @@ -13040,15 +13957,20 @@ client.watcher.putWatch({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Watch ID -** *`actions` (Optional, Record)* -** *`condition` (Optional, { always, array_compare, compare, never, script })* -** *`input` (Optional, { chain, http, search, simple })* -** *`metadata` (Optional, Record)* -** *`throttle_period` (Optional, string)* -** *`transform` (Optional, { chain, script, search })* -** *`trigger` (Optional, { schedule })* -** *`active` (Optional, boolean)*: Specify whether the watch is in/active by default +** *`id` (string)*: The identifier for the watch. +** *`actions` (Optional, Record)*: The list of actions that will be run if the condition matches. +** *`condition` (Optional, { always, array_compare, compare, never, script })*: The condition that defines if the actions should be run. +** *`input` (Optional, { chain, http, search, simple })*: The input that defines the input that loads the data for the watch. +** *`metadata` (Optional, Record)*: Metadata JSON that will be copied into the history entries. +** *`throttle_period` (Optional, string | -1 | 0)*: The minimum time between actions being run. +The default is 5 seconds. +This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. +If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. +** *`throttle_period_in_millis` (Optional, Unit)*: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. +** *`transform` (Optional, { chain, script, search })*: The transform that processes the watch payload to prepare it for the watch actions. +** *`trigger` (Optional, { schedule })*: The trigger that defines when the watch should run. +** *`active` (Optional, boolean)*: The initial state of the watch. +The default value is `true`, which means the watch is active by default. ** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term ** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number ** *`version` (Optional, number)*: Explicit version number for concurrency control @@ -13058,6 +13980,8 @@ client.watcher.putWatch({ id }) Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. +Note that only the `_id` and `metadata.*` fields are queryable or sortable. + {ref}/watcher-api-query-watches.html[Endpoint documentation] [source,ts] ---- @@ -13068,11 +13992,13 @@ client.watcher.queryWatches({ ... }) ==== Arguments * *Request (object):* -** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. -** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. +** *`from` (Optional, number)*: The offset from the first result to fetch. +It must be non-negative. +** *`size` (Optional, number)*: The number of hits to return. +It must be non-negative. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query that filters the watches to be returned. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: One or more fields used to sort the search results. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Retrieve the next page of hits using a set of sort values from the previous page. [discrete] ==== start @@ -13082,13 +14008,20 @@ Start the Watcher service if it is not already running. {ref}/watcher-api-start.html[Endpoint documentation] [source,ts] ---- -client.watcher.start() +client.watcher.start({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== stats Get Watcher statistics. +This API always returns basic metrics. +You retrieve more metrics by using the metric parameter. {ref}/watcher-api-stats.html[Endpoint documentation] [source,ts] @@ -13111,20 +14044,40 @@ Stop the Watcher service if it is running. {ref}/watcher-api-stop.html[Endpoint documentation] [source,ts] ---- -client.watcher.stop() +client.watcher.stop({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== update_settings -Update settings for the watcher system index +Update Watcher index settings. +Update settings for the Watcher internal index (`.watches`). +Only a subset of settings can be modified. +This includes `index.auto_expand_replicas` and `index.number_of_replicas`. {ref}/watcher-api-update-settings.html[Endpoint documentation] [source,ts] ---- -client.watcher.updateSettings() +client.watcher.updateSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index.auto_expand_replicas` (Optional, string)* +** *`index.number_of_replicas` (Optional, number)* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] === xpack @@ -13147,9 +14100,11 @@ client.xpack.info({ ... }) ==== Arguments * *Request (object):* -** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. For example, `build,license,features`. +** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. +For example, `build,license,features`. ** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true -** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. +** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. +In particular, it adds descriptions and a tag line. [discrete] ==== usage @@ -13167,5 +14122,7 @@ client.xpack.usage({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 75bdb4107..4da8aedfa 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -78,7 +78,7 @@ export default class Cluster { } /** - * Delete component templates. Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + * Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -172,7 +172,7 @@ export default class Cluster { } /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -374,7 +374,7 @@ export default class Cluster { } /** - * Create or update a component template. Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. + * Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 12f6858a4..399537099 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -668,22 +668,26 @@ export default class Connector { } /** - * Updates the stats fields in the connector sync job document. + * Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-stats-api.html | Elasticsearch API documentation} */ - async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async syncJobUpdateStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 3b01fcf7e..3cb191687 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -45,7 +45,7 @@ export default class DanglingIndices { /** * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dangling-index-delete.html | Elasticsearch API documentation} */ async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -77,7 +77,7 @@ export default class DanglingIndices { /** * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dangling-index-import.html | Elasticsearch API documentation} */ async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -109,7 +109,7 @@ export default class DanglingIndices { /** * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dangling-indices-list.html | Elasticsearch API documentation} */ async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 98bc01435..1973b1e76 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -77,7 +77,7 @@ export default class Indices { } /** - * Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html | Elasticsearch API documentation} */ async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -121,7 +121,7 @@ export default class Indices { } /** - * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -161,7 +161,7 @@ export default class Indices { } /** - * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. + * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. **Monitor the cloning process** The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. **Wait for active shards** Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -230,7 +230,7 @@ export default class Indices { } /** - * Create an index. Creates a new index. + * Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: * Settings for the index. * Mappings for fields in the index. * Index aliases **Wait for active shards** By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html | Elasticsearch API documentation} */ async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -338,7 +338,7 @@ export default class Indices { } /** - * Delete indices. Deletes one or more indices. + * Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html | Elasticsearch API documentation} */ async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -371,7 +371,7 @@ export default class Indices { /** * Delete an alias. Removes a data stream or index from an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-alias.html | Elasticsearch API documentation} */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -506,7 +506,7 @@ export default class Indices { } /** - * Deletes a legacy index template. + * Delete a legacy index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -538,7 +538,7 @@ export default class Indices { } /** - * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. + * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -607,7 +607,7 @@ export default class Indices { } /** - * Check indices. Checks if one or more indices, index aliases, or data streams exist. + * Check indices. Check if one or more indices, index aliases, or data streams exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html | Elasticsearch API documentation} */ async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -711,7 +711,7 @@ export default class Indices { } /** - * Check existence of index templates. Returns information about whether a particular index template exists. + * Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html | Elasticsearch API documentation} */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -775,7 +775,7 @@ export default class Indices { } /** - * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. + * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -847,7 +847,7 @@ export default class Indices { } /** - * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. + * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. **Blocks during a force merge** Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. **Force merging multiple indices** You can force merge multiple indices with a single request by targeting: * One or more data streams that contain multiple backing indices * Multiple indices * One or more aliases * All data streams and indices in a cluster Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. **Data streams and time-based indices** Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -887,7 +887,7 @@ export default class Indices { } /** - * Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. + * Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html | Elasticsearch API documentation} */ async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -920,7 +920,6 @@ export default class Indices { /** * Get aliases. Retrieves information for one or more data stream or index aliases. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -997,6 +996,36 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle-stats.html | Elasticsearch API documentation} + */ + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_lifecycle/stats' + const meta: TransportRequestMetadata = { + name: 'indices.get_data_lifecycle_stats' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get data streams. Retrieves information about one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} @@ -1038,7 +1067,7 @@ export default class Indices { } /** - * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. + * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html | Elasticsearch API documentation} */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1078,7 +1107,7 @@ export default class Indices { } /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html | Elasticsearch API documentation} */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1118,7 +1147,7 @@ export default class Indices { } /** - * Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html | Elasticsearch API documentation} */ async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1158,7 +1187,7 @@ export default class Indices { } /** - * Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1205,7 +1234,7 @@ export default class Indices { } /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1310,7 +1339,7 @@ export default class Indices { } /** - * Opens a closed index. For data streams, the API opens any closed backing indices. + * Open a closed index. For data streams, the API opens any closed backing indices. A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html | Elasticsearch API documentation} */ async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1454,7 +1483,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html | Elasticsearch API documentation} */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1490,7 +1519,7 @@ export default class Indices { } /** - * Update field mappings. Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. + * Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. **Add multi-fields to an existing field** Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. **Change supported mapping parameters for an existing field** The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. **Change the mapping of an existing field** Except for supported mapping parameters, you can't change the mapping or field type of an existing field. Changing an existing field could invalidate data that's already indexed. If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. **Rename a field** Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1526,7 +1555,7 @@ export default class Indices { } /** - * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1569,7 +1598,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1605,7 +1634,7 @@ export default class Indices { } /** - * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. + * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1645,7 +1674,7 @@ export default class Indices { } /** - * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. + * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1717,7 +1746,7 @@ export default class Indices { } /** - * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. **Advantages of using this endpoint before a cross-cluster search** You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1781,7 +1810,7 @@ export default class Indices { } /** - * Roll over to a new index. Creates a new index for a data stream or index alias. + * Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1942,7 +1971,7 @@ export default class Indices { } /** - * Simulate an index. Returns the index configuration that would be applied to the specified index from an existing index template. + * Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html | Elasticsearch API documentation} */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1974,7 +2003,7 @@ export default class Indices { } /** - * Simulate an index template. Returns the index configuration that would be applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied by a particular index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html | Elasticsearch API documentation} */ async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2018,7 +2047,7 @@ export default class Indices { } /** - * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. + * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. You can do make an index read-only with the following request using the add index block API: ``` PUT /my_source_index/_block/write ``` The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 7c1b8a8ab..507fc78d9 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get cluster info. Returns basic information about the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + * Get cluster info. Get basic build, version, and cluster information. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rest-api-root.html | Elasticsearch API documentation} */ export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 79f3556fb..bee5575b5 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -44,7 +44,7 @@ export default class Logstash { } /** - * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 4b7d54cc5..d07851611 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -44,7 +44,7 @@ export default class Migration { } /** - * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-deprecation.html | Elasticsearch API documentation} */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -84,8 +84,8 @@ export default class Migration { } /** - * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} + * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/feature-migration-api.html | Elasticsearch API documentation} */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -115,7 +115,7 @@ export default class Migration { /** * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-feature-upgrade.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/feature-migration-api.html | Elasticsearch API documentation} */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 97c9d7f9b..e5f997b39 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1783,7 +1783,7 @@ export default class Ml { } /** - * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. + * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. If the destination index does not exist, it is created automatically when you start the job. If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1819,7 +1819,7 @@ export default class Ml { } /** - * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1891,7 +1891,7 @@ export default class Ml { } /** - * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. + * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-job.html | Elasticsearch API documentation} */ async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index 0f462b5a5..ff826dd38 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -44,7 +44,7 @@ export default class QueryRules { } /** - * Delete a query rule. Delete a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-rule.html | Elasticsearch API documentation} */ async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class QueryRules { } /** - * Delete a query ruleset. + * Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html | Elasticsearch API documentation} */ async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -204,7 +204,7 @@ export default class QueryRules { } /** - * Create or update a query rule. Create or update a query rule within a query ruleset. + * Create or update a query rule. Create or update a query rule within a query ruleset. IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-rule.html | Elasticsearch API documentation} */ async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -241,7 +241,7 @@ export default class QueryRules { } /** - * Create or update a query ruleset. + * Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html | Elasticsearch API documentation} */ async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 4eebd5bd9..36fbbee92 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -224,7 +224,7 @@ export default class Rollup { } /** - * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -292,7 +292,7 @@ export default class Rollup { } /** - * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ``` The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html | Elasticsearch API documentation} */ async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index d7ba14581..037bcca8c 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -45,7 +45,7 @@ export default class SearchableSnapshots { /** * Get cache statistics. Get statistics about the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-cache-stats.html | Elasticsearch API documentation} */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -85,7 +85,7 @@ export default class SearchableSnapshots { /** * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-clear-cache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -162,7 +162,7 @@ export default class SearchableSnapshots { /** * Get searchable snapshot statistics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index d75a50c86..8f2309225 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -173,22 +173,26 @@ export default class Security { } /** - * Updates the attributes of multiple existing API keys. + * Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. It is not possible to update expired or invalidated API keys. This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-update-api-keys.html | Elasticsearch API documentation} */ - async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async bulkUpdateApiKeys (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -515,6 +519,39 @@ export default class Security { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Delegate PKI authentication. This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-as if the user connected directly to Elasticsearch. IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delegate-pki-authentication.html | Elasticsearch API documentation} + */ + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['x509_certificate_chain'] + const querystring: Record = {} + const body: Record = {} + + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/delegate_pki' + const meta: TransportRequestMetadata = { + name: 'security.delegate_pki' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Delete application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-privilege.html | Elasticsearch API documentation} @@ -1128,13 +1165,13 @@ export default class Security { } /** - * Retrieve settings for the security system indices + * Get security index settings. Get the user-configurable settings for the security internal index (`.security` and associated indices). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -1144,6 +1181,7 @@ export default class Security { if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -2118,22 +2156,27 @@ export default class Security { } /** - * Update settings for the security system index + * Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified, for example `index.auto_expand_replicas` and `index.number_of_replicas`. If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-settings.html | Elasticsearch API documentation} */ - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['security', 'security-profile', 'security-tokens'] const querystring: Record = {} - const body = undefined + const body: Record = {} params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index e1471befa..873402d5d 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -45,7 +45,7 @@ export default class Shutdown { /** * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-shutdown.html | Elasticsearch API documentation} */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -77,7 +77,7 @@ export default class Shutdown { /** * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-shutdown.html | Elasticsearch API documentation} */ async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -116,8 +116,8 @@ export default class Shutdown { } /** - * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} + * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-shutdown.html | Elasticsearch API documentation} */ async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index b0c6ab5de..f1ced5c0a 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -44,22 +44,26 @@ export default class Simulate { } /** - * Simulates running ingest with example documents. + * Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html | Elasticsearch API documentation} */ - async ingest (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async ingest (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async ingest (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async ingest (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions'] const querystring: Record = {} - const body = undefined + const body: Record = {} - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index f688c32be..a6b98caaa 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -77,7 +77,7 @@ export default class Snapshot { /** * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clone-snapshot-api.html | Elasticsearch API documentation} */ async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -115,14 +115,14 @@ export default class Snapshot { /** * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-snapshot-api.html | Elasticsearch API documentation} */ async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['ignore_unavailable', 'include_global_state', 'indices', 'feature_states', 'metadata', 'partial'] + const acceptedBody: string[] = ['expand_wildcards', 'feature_states', 'ignore_unavailable', 'include_global_state', 'indices', 'metadata', 'partial'] const querystring: Record = {} const body: Record = {} @@ -151,8 +151,8 @@ export default class Snapshot { } /** - * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-snapshot-repo-api.html | Elasticsearch API documentation} */ async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -188,7 +188,7 @@ export default class Snapshot { /** * Delete snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-snapshot-api.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -221,7 +221,7 @@ export default class Snapshot { /** * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-snapshot-repo-api.html | Elasticsearch API documentation} */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -252,8 +252,8 @@ export default class Snapshot { } /** - * Get snapshot information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * Get snapshot information. NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-snapshot-api.html | Elasticsearch API documentation} */ async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -286,7 +286,7 @@ export default class Snapshot { /** * Get snapshot repository information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-snapshot-repo-api.html | Elasticsearch API documentation} */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -357,8 +357,8 @@ export default class Snapshot { } /** - * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. For instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/verify-repo-integrity-api.html | Elasticsearch API documentation} */ async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -390,7 +390,7 @@ export default class Snapshot { /** * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/restore-snapshot-api.html | Elasticsearch API documentation} */ async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -426,8 +426,8 @@ export default class Snapshot { } /** - * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-snapshot-status-api.html | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -471,7 +471,7 @@ export default class Snapshot { /** * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/verify-snapshot-repo-api.html | Elasticsearch API documentation} */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index ce8144cc9..286316bcb 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -77,7 +77,7 @@ export default class Sql { } /** - * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. + * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html | Elasticsearch API documentation} */ async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class Sql { } /** - * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. + * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html | Elasticsearch API documentation} */ async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -181,7 +181,7 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['catalog', 'columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency', 'runtime_mappings', 'wait_for_completion_timeout', 'params', 'keep_alive', 'keep_on_completion', 'index_using_frozen'] + const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] const querystring: Record = {} const body: Record = {} @@ -207,7 +207,7 @@ export default class Sql { } /** - * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. + * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/sql-translate-api.html | Elasticsearch API documentation} */ async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 896bc4d2b..107dae465 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -44,7 +44,7 @@ export default class Synonyms { } /** - * Delete a synonym set. + * Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code. To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html | Elasticsearch API documentation} */ async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -175,7 +175,7 @@ export default class Synonyms { /** * Get all synonym sets. Get a summary of all defined synonym sets. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html | Elasticsearch API documentation} */ async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -204,7 +204,7 @@ export default class Synonyms { } /** - * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. + * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -240,7 +240,7 @@ export default class Synonyms { } /** - * Create or update a synonym rule. Create or update a synonym rule in a synonym set. + * Create or update a synonym rule. Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html | Elasticsearch API documentation} */ async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 395613b49..35de18ecf 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -44,7 +44,7 @@ export default class TextStructure { } /** - * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. + * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-field-structure.html | Elasticsearch API documentation} */ async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -73,7 +73,7 @@ export default class TextStructure { } /** - * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-message-structure.html | Elasticsearch API documentation} */ async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index c76b4c4a9..de32a06e7 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -44,7 +44,7 @@ export default class Watcher { } /** - * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-ack-watch.html | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -180,7 +180,7 @@ export default class Watcher { } /** - * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. + * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-execute-watch.html | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -224,13 +224,13 @@ export default class Watcher { } /** - * Retrieve settings for the watcher system index + * Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -240,6 +240,7 @@ export default class Watcher { if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } @@ -293,7 +294,7 @@ export default class Watcher { async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'transform', 'trigger'] + const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] const querystring: Record = {} const body: Record = {} @@ -321,7 +322,7 @@ export default class Watcher { } /** - * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. + * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-query-watches.html | Elasticsearch API documentation} */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -385,7 +386,7 @@ export default class Watcher { } /** - * Get Watcher statistics. + * Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -455,22 +456,27 @@ export default class Watcher { } /** - * Update settings for the watcher system index + * Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-update-settings.html | Elasticsearch API documentation} */ - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async updateSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] const querystring: Record = {} - const body = undefined + const body: Record = {} params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/types.ts b/src/api/types.ts index b75242334..33075707b 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1845,6 +1845,7 @@ export interface SearchShardsRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean local?: boolean + master_timeout?: Duration preference?: string routing?: Routing } @@ -8713,12 +8714,14 @@ export interface CcrShardStats { export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { name: Name + master_timeout?: Duration } export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { index: IndexName + master_timeout?: Duration wait_for_active_shards?: WaitForActiveShards data_stream_name?: string leader_index: IndexName @@ -8767,6 +8770,7 @@ export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' export interface CcrFollowInfoRequest extends RequestBase { index: Indices + master_timeout?: Duration } export interface CcrFollowInfoResponse { @@ -8775,6 +8779,7 @@ export interface CcrFollowInfoResponse { export interface CcrFollowStatsRequest extends RequestBase { index: Indices + timeout?: Duration } export interface CcrFollowStatsResponse { @@ -8783,6 +8788,7 @@ export interface CcrFollowStatsResponse { export interface CcrForgetFollowerRequest extends RequestBase { index: IndexName + timeout?: Duration follower_cluster?: string follower_index?: IndexName follower_index_uuid?: Uuid @@ -8809,6 +8815,7 @@ export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { export interface CcrGetAutoFollowPatternRequest extends RequestBase { name?: Name + master_timeout?: Duration } export interface CcrGetAutoFollowPatternResponse { @@ -8817,18 +8824,21 @@ export interface CcrGetAutoFollowPatternResponse { export interface CcrPauseAutoFollowPatternRequest extends RequestBase { name: Name + master_timeout?: Duration } export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { index: IndexName + master_timeout?: Duration } export type CcrPauseFollowResponse = AcknowledgedResponseBase export interface CcrPutAutoFollowPatternRequest extends RequestBase { name: Name + master_timeout?: Duration remote_cluster: string follow_index_pattern?: IndexPattern leader_index_patterns?: IndexPatterns @@ -8850,12 +8860,14 @@ export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { name: Name + master_timeout?: Duration } export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { index: IndexName + master_timeout?: Duration max_outstanding_read_requests?: long max_outstanding_write_requests?: long max_read_request_operation_count?: long @@ -8889,6 +8901,8 @@ export interface CcrStatsFollowStats { } export interface CcrStatsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export interface CcrStatsResponse { @@ -8898,6 +8912,7 @@ export interface CcrStatsResponse { export interface CcrUnfollowRequest extends RequestBase { index: IndexName + master_timeout?: Duration } export type CcrUnfollowResponse = AcknowledgedResponseBase @@ -8988,6 +9003,7 @@ export interface ClusterAllocationExplainNodeDiskUsage { export interface ClusterAllocationExplainRequest extends RequestBase { include_disk_info?: boolean include_yes_decisions?: boolean + master_timeout?: Duration current_node?: string index?: IndexName primary?: boolean @@ -9050,6 +9066,7 @@ export interface ClusterDeleteComponentTemplateRequest extends RequestBase { export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { + master_timeout?: Duration wait_for_removal?: boolean } @@ -9182,6 +9199,7 @@ export interface ClusterPendingTasksResponse { export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { node_names?: Names node_ids?: Ids + master_timeout?: Duration timeout?: Duration } @@ -9967,6 +9985,19 @@ export interface ConnectorSyncJobPostResponse { id: Id } +export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { + connector_sync_job_id: Id + deleted_document_count: long + indexed_document_count: long + indexed_document_volume: long + last_seen?: Duration + metadata?: Metadata + total_document_count?: integer +} + +export interface ConnectorSyncJobUpdateStatsResponse { +} + export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { connector_id: Id } @@ -10146,6 +10177,7 @@ export interface EnrichSummary { export interface EnrichDeletePolicyRequest extends RequestBase { name: Name + master_timeout?: Duration } export type EnrichDeletePolicyResponse = AcknowledgedResponseBase @@ -10158,6 +10190,7 @@ export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { export interface EnrichExecutePolicyRequest extends RequestBase { name: Name + master_timeout?: Duration wait_for_completion?: boolean } @@ -10168,6 +10201,7 @@ export interface EnrichExecutePolicyResponse { export interface EnrichGetPolicyRequest extends RequestBase { name?: Names + master_timeout?: Duration } export interface EnrichGetPolicyResponse { @@ -10176,6 +10210,7 @@ export interface EnrichGetPolicyResponse { export interface EnrichPutPolicyRequest extends RequestBase { name: Name + master_timeout?: Duration geo_match?: EnrichPolicy match?: EnrichPolicy range?: EnrichPolicy @@ -10208,6 +10243,7 @@ export interface EnrichStatsExecutingPolicy { } export interface EnrichStatsRequest extends RequestBase { + master_timeout?: Duration } export interface EnrichStatsResponse { @@ -10338,6 +10374,7 @@ export interface FeaturesFeature { } export interface FeaturesGetFeaturesRequest extends RequestBase { + master_timeout?: Duration } export interface FeaturesGetFeaturesResponse { @@ -10345,6 +10382,7 @@ export interface FeaturesGetFeaturesResponse { } export interface FeaturesResetFeaturesRequest extends RequestBase { + master_timeout?: Duration } export interface FeaturesResetFeaturesResponse { @@ -10679,7 +10717,6 @@ export interface IlmExplainLifecycleRequest extends RequestBase { only_errors?: boolean only_managed?: boolean master_timeout?: Duration - timeout?: Duration } export interface IlmExplainLifecycleResponse { @@ -11622,8 +11659,6 @@ export interface IndicesFieldUsageStatsRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_unavailable?: boolean fields?: Fields - master_timeout?: Duration - timeout?: Duration wait_for_active_shards?: WaitForActiveShards } @@ -11722,6 +11757,22 @@ export interface IndicesGetDataLifecycleResponse { data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] } +export interface IndicesGetDataLifecycleStatsDataStreamStats { + backing_indices_in_error: integer + backing_indices_in_total: integer + name: DataStreamName +} + +export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { +} + +export interface IndicesGetDataLifecycleStatsResponse { + data_stream_count: integer + data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] + last_run_duration_in_millis?: DurationValue + time_between_starts_in_millis?: DurationValue +} + export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards @@ -12781,6 +12832,24 @@ export interface IngestDissectProcessor extends IngestProcessorBase { pattern: string } +export interface IngestDocument { + _id?: Id + _index?: IndexName + _source: any +} + +export interface IngestDocumentSimulationKeys { + _id: Id + _index: IndexName + _ingest: IngestIngest + _routing?: string + _source: Record + _version?: SpecUtilsStringified + _version_type?: VersionType +} +export type IngestDocumentSimulation = IngestDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } + export interface IngestDotExpanderProcessor extends IngestProcessorBase { field: Field override?: boolean @@ -12894,6 +12963,12 @@ export interface IngestInferenceProcessor extends IngestProcessorBase { inference_config?: IngestInferenceConfig } +export interface IngestIngest { + _redact?: IngestRedact + timestamp: DateTime + pipeline?: Name +} + export interface IngestIpLocationProcessor extends IngestProcessorBase { database_file?: string field: Field @@ -12980,6 +13055,16 @@ export interface IngestPipelineProcessor extends IngestProcessorBase { ignore_missing_pipeline?: boolean } +export interface IngestPipelineSimulation { + doc?: IngestDocumentSimulation + tag?: string + processor_type?: string + status?: WatcherActionStatusOptions + description?: string + ignored_error?: ErrorCause + error?: ErrorCause +} + export interface IngestProcessorBase { description?: string if?: string @@ -13036,6 +13121,10 @@ export interface IngestProcessorContainer { user_agent?: IngestUserAgentProcessor } +export interface IngestRedact { + _is_redacted: boolean +} + export interface IngestRedactProcessor extends IngestProcessorBase { field: Field patterns: GrokPattern[] @@ -13094,6 +13183,12 @@ export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { export type IngestShapeType = 'geo_shape' | 'shape' +export interface IngestSimulateDocumentResult { + doc?: IngestDocumentSimulation + error?: ErrorCause + processor_results?: IngestPipelineSimulation[] +} + export interface IngestSortProcessor extends IngestProcessorBase { field: Field order?: SortOrder @@ -13210,7 +13305,6 @@ export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { export interface IngestGetGeoipDatabaseRequest extends RequestBase { id?: Ids - master_timeout?: Duration } export interface IngestGetGeoipDatabaseResponse { @@ -13283,59 +13377,15 @@ export interface IngestPutPipelineRequest extends RequestBase { export type IngestPutPipelineResponse = AcknowledgedResponseBase -export interface IngestSimulateDocument { - _id?: Id - _index?: IndexName - _source: any -} - -export interface IngestSimulateDocumentSimulationKeys { - _id: Id - _index: IndexName - _ingest: IngestSimulateIngest - _routing?: string - _source: Record - _version?: SpecUtilsStringified - _version_type?: VersionType -} -export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationKeys -& { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } - -export interface IngestSimulateIngest { - _redact?: IngestSimulateRedact - timestamp: DateTime - pipeline?: Name -} - -export interface IngestSimulatePipelineSimulation { - doc?: IngestSimulateDocumentSimulation - tag?: string - processor_type?: string - status?: WatcherActionStatusOptions - description?: string - ignored_error?: ErrorCause - error?: ErrorCause -} - -export interface IngestSimulateRedact { - _is_redacted: boolean -} - export interface IngestSimulateRequest extends RequestBase { id?: Id verbose?: boolean - docs: IngestSimulateDocument[] + docs: IngestDocument[] pipeline?: IngestPipeline } export interface IngestSimulateResponse { - docs: IngestSimulateSimulateDocumentResult[] -} - -export interface IngestSimulateSimulateDocumentResult { - doc?: IngestSimulateDocumentSimulation - error?: ErrorCause - processor_results?: IngestSimulatePipelineSimulation[] + docs: IngestSimulateDocumentResult[] } export interface LicenseLicense { @@ -13356,6 +13406,8 @@ export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' export interface LicenseDeleteRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type LicenseDeleteResponse = AcknowledgedResponseBase @@ -13405,6 +13457,8 @@ export interface LicensePostAcknowledgement { export interface LicensePostRequest extends RequestBase { acknowledge?: boolean + master_timeout?: Duration + timeout?: Duration license?: LicenseLicense licenses?: LicenseLicense[] } @@ -13417,6 +13471,8 @@ export interface LicensePostResponse { export interface LicensePostStartBasicRequest extends RequestBase { acknowledge?: boolean + master_timeout?: Duration + timeout?: Duration } export interface LicensePostStartBasicResponse { @@ -13430,6 +13486,7 @@ export interface LicensePostStartBasicResponse { export interface LicensePostStartTrialRequest extends RequestBase { acknowledge?: boolean type_query_string?: string + master_timeout?: Duration } export interface LicensePostStartTrialResponse { @@ -14937,6 +14994,7 @@ export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { model_id: Id force?: boolean + timeout?: Duration } export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase @@ -15297,7 +15355,6 @@ export interface MlGetMemoryStatsMemory { export interface MlGetMemoryStatsRequest extends RequestBase { node_id?: Id - human?: boolean master_timeout?: Duration timeout?: Duration } @@ -16606,7 +16663,6 @@ export interface NodesHotThreadsRequest extends RequestBase { ignore_idle_threads?: boolean interval?: Duration snapshots?: long - master_timeout?: Duration threads?: long timeout?: Duration type?: ThreadType @@ -16968,7 +17024,6 @@ export interface NodesInfoRequest extends RequestBase { node_id?: NodeIds metric?: Metrics flat_settings?: boolean - master_timeout?: Duration timeout?: Duration } @@ -17002,7 +17057,6 @@ export interface NodesStatsRequest extends RequestBase { groups?: boolean include_segment_file_sizes?: boolean level?: Level - master_timeout?: Duration timeout?: Duration types?: string[] include_unloaded_segments?: boolean @@ -17469,8 +17523,6 @@ export interface SearchableSnapshotsClearCacheRequest extends RequestBase { expand_wildcards?: ExpandWildcards allow_no_indices?: boolean ignore_unavailable?: boolean - pretty?: boolean - human?: boolean } export type SearchableSnapshotsClearCacheResponse = any @@ -17694,6 +17746,10 @@ export interface SecuritySearchAccess { allow_restricted_indices?: boolean } +export interface SecuritySecuritySettings { + index?: IndicesIndexSettings +} + export type SecurityTemplateFormat = 'string' | 'json' export interface SecurityUser { @@ -17802,6 +17858,19 @@ export interface SecurityBulkPutRoleResponse { errors?: SecurityBulkError } +export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { + expiration?: Duration + ids: string | string[] + metadata?: Metadata + role_descriptors?: Record +} + +export interface SecurityBulkUpdateApiKeysResponse { + errors?: SecurityBulkError + noops: string[] + updated: string[] +} + export interface SecurityChangePasswordRequest extends RequestBase { username?: Username refresh?: Refresh @@ -17913,6 +17982,37 @@ export interface SecurityCreateServiceTokenToken { value: string } +export interface SecurityDelegatePkiAuthentication { + username: string + roles: string[] + full_name: string | null + email: string | null + token?: Record + metadata: Metadata + enabled: boolean + authentication_realm: SecurityDelegatePkiAuthenticationRealm + lookup_realm: SecurityDelegatePkiAuthenticationRealm + authentication_type: string + api_key?: Record +} + +export interface SecurityDelegatePkiAuthenticationRealm { + name: string + type: string + domain?: string +} + +export interface SecurityDelegatePkiRequest extends RequestBase { + x509_certificate_chain: string[] +} + +export interface SecurityDelegatePkiResponse { + access_token: string + expires_in: long + type: string + authentication?: SecurityDelegatePkiAuthentication +} + export interface SecurityDeletePrivilegesFoundStatus { found: boolean } @@ -18106,6 +18206,16 @@ export interface SecurityGetServiceCredentialsResponse { nodes_credentials: SecurityGetServiceCredentialsNodesCredentials } +export interface SecurityGetSettingsRequest extends RequestBase { + master_timeout?: Duration +} + +export interface SecurityGetSettingsResponse { + security: SecuritySecuritySettings + 'security-profile': SecuritySecuritySettings + 'security-tokens': SecuritySecuritySettings +} + export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { @@ -18627,6 +18737,18 @@ export interface SecurityUpdateCrossClusterApiKeyResponse { updated: boolean } +export interface SecurityUpdateSettingsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration + security?: SecuritySecuritySettings + 'security-profile'?: SecuritySecuritySettings + 'security-tokens'?: SecuritySecuritySettings +} + +export interface SecurityUpdateSettingsResponse { + acknowledged: boolean +} + export interface SecurityUpdateUserProfileDataRequest extends RequestBase { uid: SecurityUserProfileId if_seq_no?: SequenceNumber @@ -18670,7 +18792,6 @@ export interface ShutdownGetNodePluginsStatus { export interface ShutdownGetNodeRequest extends RequestBase { node_id?: NodeIds master_timeout?: TimeUnit - timeout?: TimeUnit } export interface ShutdownGetNodeResponse { @@ -18697,6 +18818,20 @@ export interface ShutdownPutNodeRequest extends RequestBase { export type ShutdownPutNodeResponse = AcknowledgedResponseBase +export interface SimulateIngestRequest extends RequestBase { + index?: IndexName + pipeline?: PipelineName + docs: IngestDocument[] + component_template_substitutions?: Record + index_template_subtitutions?: Record + mapping_addition?: MappingTypeMapping + pipeline_substitutions?: Record +} + +export interface SimulateIngestResponse { + docs: IngestSimulateDocumentResult[] +} + export interface SlmConfiguration { ignore_unavailable?: boolean indices?: Indices @@ -18764,12 +18899,16 @@ export interface SlmStatistics { export interface SlmDeleteLifecycleRequest extends RequestBase { policy_id: Name + master_timeout?: Duration + timeout?: Duration } export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { policy_id: Name + master_timeout?: Duration + timeout?: Duration } export interface SlmExecuteLifecycleResponse { @@ -18777,17 +18916,23 @@ export interface SlmExecuteLifecycleResponse { } export interface SlmExecuteRetentionRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { policy_id?: Names + master_timeout?: Duration + timeout?: Duration } export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export interface SlmGetStatsResponse { @@ -18804,6 +18949,8 @@ export interface SlmGetStatsResponse { } export interface SlmGetStatusRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export interface SlmGetStatusResponse { @@ -18824,11 +18971,15 @@ export interface SlmPutLifecycleRequest extends RequestBase { export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmStopResponse = AcknowledgedResponseBase @@ -18839,11 +18990,13 @@ export interface SnapshotAzureRepository extends SnapshotRepositoryBase { } export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { + base_path?: string client?: string container?: string - base_path?: string - readonly?: boolean + delete_objects_max_size?: integer location_mode?: string + max_concurrent_batch_deletes?: integer + readonly?: boolean } export interface SnapshotFileCountSnapshotStats { @@ -18858,10 +19011,10 @@ export interface SnapshotGcsRepository extends SnapshotRepositoryBase { export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { bucket: string - client?: string + application_name?: string base_path?: string + client?: string readonly?: boolean - application_name?: string } export interface SnapshotIndexDetails { @@ -18908,13 +19061,20 @@ export interface SnapshotS3Repository extends SnapshotRepositoryBase { export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { bucket: string - client?: string base_path?: string - readonly?: boolean - server_side_encryption?: boolean buffer_size?: ByteSize canned_acl?: string + client?: string + delete_objects_max_size?: integer + get_register_retry_delay?: Duration + max_multipart_parts?: integer + max_multipart_upload_cleanup_size?: integer + readonly?: boolean + server_side_encryption?: boolean storage_class?: string + 'throttled_delete_retry.delay_increment'?: Duration + 'throttled_delete_retry.maximum_delay'?: Duration + 'throttled_delete_retry.maximum_number_of_retries'?: integer } export interface SnapshotShardsStats { @@ -19060,10 +19220,11 @@ export interface SnapshotCreateRequest extends RequestBase { snapshot: Name master_timeout?: Duration wait_for_completion?: boolean + expand_wildcards?: ExpandWildcards + feature_states?: string[] ignore_unavailable?: boolean include_global_state?: boolean indices?: Indices - feature_states?: string[] metadata?: Metadata partial?: boolean } @@ -19102,26 +19263,27 @@ export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase export interface SnapshotGetRequest extends RequestBase { repository: Name snapshot: Names + after?: string + from_sort_value?: string ignore_unavailable?: boolean - master_timeout?: Duration - verbose?: boolean index_details?: boolean index_names?: boolean include_repository?: boolean - sort?: SnapshotSnapshotSort - size?: integer + master_timeout?: Duration order?: SortOrder - after?: string offset?: integer - from_sort_value?: string + size?: integer slm_policy_filter?: Name + sort?: SnapshotSnapshotSort + verbose?: boolean } export interface SnapshotGetResponse { + remaining: integer + total: integer + next?: string responses?: SnapshotGetSnapshotResponseItem[] snapshots?: SnapshotSnapshotInfo[] - total: integer - remaining: integer } export interface SnapshotGetSnapshotResponseItem { @@ -19140,14 +19302,14 @@ export type SnapshotGetRepositoryResponse = Record export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { name: Names - meta_thread_pool_concurrency?: integer blob_thread_pool_concurrency?: integer - snapshot_verification_concurrency?: integer - index_verification_concurrency?: integer index_snapshot_verification_concurrency?: integer + index_verification_concurrency?: integer + max_bytes_per_sec?: string max_failed_shard_snapshots?: integer + meta_thread_pool_concurrency?: integer + snapshot_verification_concurrency?: integer verify_blob_contents?: boolean - max_bytes_per_sec?: string } export type SnapshotRepositoryVerifyIntegrityResponse = any @@ -19248,40 +19410,41 @@ export interface SqlGetAsyncStatusRequest extends RequestBase { } export interface SqlGetAsyncStatusResponse { + expiration_time_in_millis: EpochTime id: string is_running: boolean is_partial: boolean start_time_in_millis: EpochTime - expiration_time_in_millis: EpochTime completion_status?: uint } export interface SqlQueryRequest extends RequestBase { format?: SqlQuerySqlFormat + allow_partial_search_results?: boolean catalog?: string columnar?: boolean cursor?: string fetch_size?: integer + field_multi_value_leniency?: boolean filter?: QueryDslQueryContainer + index_using_frozen?: boolean + keep_alive?: Duration + keep_on_completion?: boolean + page_timeout?: Duration + params?: Record query?: string request_timeout?: Duration - page_timeout?: Duration - time_zone?: TimeZone - field_multi_value_leniency?: boolean runtime_mappings?: MappingRuntimeFields + time_zone?: TimeZone wait_for_completion_timeout?: Duration - params?: Record - keep_alive?: Duration - keep_on_completion?: boolean - index_using_frozen?: boolean } export interface SqlQueryResponse { + columns?: SqlColumn[] + cursor?: string id?: Id is_running?: boolean is_partial?: boolean - columns?: SqlColumn[] - cursor?: string rows: SqlRow[] } @@ -19470,7 +19633,6 @@ export interface TasksListRequest extends RequestBase { group_by?: TasksGroupBy nodes?: NodeIds parent_task_id?: Id - master_timeout?: Duration timeout?: Duration wait_for_completion?: boolean } @@ -19839,6 +20001,7 @@ export type TransformPutTransformResponse = AcknowledgedResponseBase export interface TransformResetTransformRequest extends RequestBase { transform_id: Id force?: boolean + timeout?: Duration } export type TransformResetTransformResponse = AcknowledgedResponseBase @@ -20493,6 +20656,14 @@ export interface WatcherExecuteWatchWatchRecord { status?: WatcherWatchStatus } +export interface WatcherGetSettingsRequest extends RequestBase { + master_timeout?: Duration +} + +export interface WatcherGetSettingsResponse { + index: IndicesIndexSettings +} + export interface WatcherGetWatchRequest extends RequestBase { id: Name } @@ -20517,7 +20688,8 @@ export interface WatcherPutWatchRequest extends RequestBase { condition?: WatcherConditionContainer input?: WatcherInputContainer metadata?: Metadata - throttle_period?: string + throttle_period?: Duration + throttle_period_in_millis?: DurationValue transform?: TransformContainer trigger?: WatcherTriggerContainer } @@ -20544,6 +20716,7 @@ export interface WatcherQueryWatchesResponse { } export interface WatcherStartRequest extends RequestBase { + master_timeout?: Duration } export type WatcherStartResponse = AcknowledgedResponseBase @@ -20586,10 +20759,22 @@ export interface WatcherStatsWatcherNodeStats { export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' export interface WatcherStopRequest extends RequestBase { + master_timeout?: Duration } export type WatcherStopResponse = AcknowledgedResponseBase +export interface WatcherUpdateSettingsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration + 'index.auto_expand_replicas'?: string + 'index.number_of_replicas'?: integer +} + +export interface WatcherUpdateSettingsResponse { + acknowledged: boolean +} + export interface XpackInfoBuildInformation { date: DateTime hash: string From 2bcbd36d755b7f8233bb250fe6288cb372e72248 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 10:09:10 -0600 Subject: [PATCH 455/647] Update dependency typescript to v5.7.3 (#2565) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d94e67408..e27834dfc 100644 --- a/package.json +++ b/package.json @@ -83,7 +83,7 @@ "tap": "21.0.1", "ts-node": "10.9.2", "ts-standard": "12.0.2", - "typescript": "5.7.2", + "typescript": "5.7.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", "zx": "7.2.3" From 3676c8229a24d8f108ba38bd21ab2b021207ba81 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 13 Jan 2025 10:15:27 -0600 Subject: [PATCH 456/647] Fix Renovate rules for Dockerfile (#2572) --- renovate.json | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/renovate.json b/renovate.json index 84a098694..8bc137193 100644 --- a/renovate.json +++ b/renovate.json @@ -17,13 +17,10 @@ ] }, { - "matchPackageNames": [ - "node" - ], "matchManagers": [ "dockerfile" ], - "enabled": false + "pinDigests": false } ] } From 5f9561d5662a3c644b8adfe09448d3af575a9161 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 12:39:09 -0600 Subject: [PATCH 457/647] Update dependency @types/node to v22.10.7 (#2576) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index e27834dfc..1b7e082f7 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "github:sinonjs/fake-timers#48f089f", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.10.5", + "@types/node": "22.10.7", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 26ce906b5bdde1f7939a146c68bd9fa97d88a96f Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 11:32:20 -0600 Subject: [PATCH 458/647] Update actions/stale digest to 5bef64f (#2586) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 611d3c6ce..6a851af1b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -8,7 +8,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9 + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9 with: stale-issue-label: stale stale-pr-label: stale From 5eb35540833579bf1ce7219dc3ee3bb1515c4099 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 28 Jan 2025 19:26:27 +0100 Subject: [PATCH 459/647] Auto-generated API code (#2577) Co-authored-by: Josh Mock --- .../120fcf9f55128d6a81d5e87a9c235bbd.asciidoc | 20 + ...141ef0ebaa3b0772892b79b9bb85efb0.asciidoc} | 8 +- .../2f72a63c73dd672ac2dc3997ad15dd41.asciidoc | 23 + .../31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc | 10 - .../38ba93890494bfa7beece58dffa44f98.asciidoc | 23 - .../45954b8aaedfed57012be8b6538b0a24.asciidoc | 47 + .../519e46350316a33162740e5d7968aa2c.asciidoc | 20 + ...681d24c2633f598fc43d6afff8996dbb.asciidoc} | 6 + ...77cebba946fe648873a1e7375c13df41.asciidoc} | 3 +- ...7bdc283b96c7a965fae23013647b8578.asciidoc} | 8 +- .../82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc | 34 + ...948418e0ef1b7e7cfee2f11be715d7d2.asciidoc} | 6 + docs/reference.asciidoc | 3066 ++++++++--- src/api/api/async_search.ts | 65 +- src/api/api/autoscaling.ts | 55 +- src/api/api/bulk.ts | 9 +- src/api/api/capabilities.ts | 16 +- src/api/api/cat.ts | 440 +- src/api/api/ccr.ts | 212 +- src/api/api/clear_scroll.ts | 17 +- src/api/api/close_point_in_time.ts | 17 +- src/api/api/cluster.ts | 260 +- src/api/api/connector.ts | 564 +- src/api/api/count.ts | 19 +- src/api/api/create.ts | 9 +- src/api/api/dangling_indices.ts | 48 +- src/api/api/delete.ts | 18 +- src/api/api/delete_by_query.ts | 19 +- src/api/api/delete_by_query_rethrottle.ts | 18 +- src/api/api/delete_script.ts | 16 +- src/api/api/enrich.ts | 81 +- src/api/api/eql.ts | 65 +- src/api/api/esql.ts | 122 +- src/api/api/exists.ts | 18 +- src/api/api/exists_source.ts | 18 +- src/api/api/explain.ts | 17 +- src/api/api/features.ts | 32 +- src/api/api/field_caps.ts | 17 +- src/api/api/fleet.ts | 88 +- src/api/api/get.ts | 18 +- src/api/api/get_script.ts | 16 +- src/api/api/get_script_context.ts | 16 +- src/api/api/get_script_languages.ts | 16 +- src/api/api/get_source.ts | 18 +- src/api/api/graph.ts | 17 +- src/api/api/health_report.ts | 16 +- src/api/api/ilm.ts | 179 +- src/api/api/index.ts | 9 +- src/api/api/indices.ts | 1120 +++- src/api/api/inference.ts | 193 +- src/api/api/info.ts | 16 +- src/api/api/ingest.ts | 186 +- src/api/api/knn_search.ts | 17 +- src/api/api/license.ts | 113 +- src/api/api/logstash.ts | 39 +- src/api/api/mget.ts | 17 +- src/api/api/migration.ts | 48 +- src/api/api/ml.ts | 1187 +++- src/api/api/monitoring.ts | 7 +- src/api/api/msearch.ts | 7 +- src/api/api/msearch_template.ts | 7 +- src/api/api/mtermvectors.ts | 17 +- src/api/api/nodes.ts | 113 +- src/api/api/open_point_in_time.ts | 19 +- src/api/api/ping.ts | 16 +- src/api/api/profiling.ts | 64 +- src/api/api/put_script.ts | 17 +- src/api/api/query_rules.ts | 131 +- src/api/api/rank_eval.ts | 17 +- src/api/api/reindex.ts | 19 +- src/api/api/reindex_rethrottle.ts | 18 +- src/api/api/render_search_template.ts | 17 +- src/api/api/rollup.ts | 130 +- src/api/api/scripts_painless_execute.ts | 19 +- src/api/api/scroll.ts | 17 +- src/api/api/search.ts | 19 +- src/api/api/search_application.ts | 144 +- src/api/api/search_mvt.ts | 17 +- src/api/api/search_shards.ts | 16 +- src/api/api/search_template.ts | 17 +- src/api/api/searchable_snapshots.ts | 65 +- src/api/api/security.ts | 1136 +++- src/api/api/shutdown.ts | 49 +- src/api/api/simulate.ts | 17 +- src/api/api/slm.ts | 145 +- src/api/api/snapshot.ts | 222 +- src/api/api/sql.ts | 99 +- src/api/api/ssl.ts | 16 +- src/api/api/synonyms.ts | 114 +- src/api/api/tasks.ts | 54 +- src/api/api/terms_enum.ts | 17 +- src/api/api/termvectors.ts | 17 +- src/api/api/text_structure.ts | 57 +- src/api/api/transform.ts | 195 +- src/api/api/update.ts | 19 +- src/api/api/update_by_query.ts | 19 +- src/api/api/update_by_query_rethrottle.ts | 18 +- src/api/api/watcher.ts | 212 +- src/api/api/xpack.ts | 32 +- src/api/types.ts | 4841 ++++++++++++++++- 100 files changed, 14354 insertions(+), 2613 deletions(-) create mode 100644 docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc rename docs/doc_examples/{ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc => 141ef0ebaa3b0772892b79b9bb85efb0.asciidoc} (65%) create mode 100644 docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc delete mode 100644 docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc delete mode 100644 docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc create mode 100644 docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc create mode 100644 docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc rename docs/doc_examples/{ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc => 681d24c2633f598fc43d6afff8996dbb.asciidoc} (96%) rename docs/doc_examples/{9d47f02a063444da9f098858a1830d28.asciidoc => 77cebba946fe648873a1e7375c13df41.asciidoc} (66%) rename docs/doc_examples/{93c77c65f1e11382f8043d0300e87b89.asciidoc => 7bdc283b96c7a965fae23013647b8578.asciidoc} (100%) create mode 100644 docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc rename docs/doc_examples/{bee3fda7bb07086243424b62e5b16ca7.asciidoc => 948418e0ef1b7e7cfee2f11be715d7d2.asciidoc} (93%) diff --git a/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc new file mode 100644 index 000000000..f6c1cb881 --- /dev/null +++ b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_inference/chat_completion/openai-completion/_stream", + body: { + model: "gpt-4o", + messages: [ + { + role: "user", + content: "What is Elastic?", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc similarity index 65% rename from docs/doc_examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc rename to docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc index bd9c35360..7d7aeab98 100644 --- a/docs/doc_examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc +++ b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc @@ -3,11 +3,13 @@ [source, js] ---- -const response = await client.inference.inference({ +const response = await client.inference.put({ task_type: "my-inference-endpoint", inference_id: "_update", - service_settings: { - api_key: "", + inference_config: { + service_settings: { + api_key: "", + }, }, }); console.log(response); diff --git a/docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc b/docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc new file mode 100644 index 000000000..e1edb8658 --- /dev/null +++ b/docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + mappings: { + properties: { + source_field: { + type: "text", + fields: { + infer_field: { + type: "semantic_text", + inference_id: ".elser-2-elasticsearch", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc b/docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc deleted file mode 100644 index 36c1c5a79..000000000 --- a/docs/doc_examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.unfreeze({ - index: "my-index-000001", -}); -console.log(response); ----- diff --git a/docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc b/docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc deleted file mode 100644 index f9dca64fb..000000000 --- a/docs/doc_examples/38ba93890494bfa7beece58dffa44f98.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.bulk({ - index: "test-index", - operations: [ - { - update: { - _id: "1", - }, - }, - { - doc: { - infer_field: "updated inference field", - source_field: "updated source field", - }, - }, - ], -}); -console.log(response); ----- diff --git a/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc new file mode 100644 index 000000000..a2ff623e6 --- /dev/null +++ b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_inference/chat_completion/openai-completion/_stream", + body: { + messages: [ + { + role: "user", + content: [ + { + type: "text", + text: "What's the price of a scarf?", + }, + ], + }, + ], + tools: [ + { + type: "function", + function: { + name: "get_current_price", + description: "Get the current price of a item", + parameters: { + type: "object", + properties: { + item: { + id: "123", + }, + }, + }, + }, + }, + ], + tool_choice: { + type: "function", + function: { + name: "get_current_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc b/docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc new file mode 100644 index 000000000..3c92986f6 --- /dev/null +++ b/docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + num_candidates: 100, + rescore_vector: { + oversample: 2, + }, + }, + fields: ["title", "file-type"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc b/docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc similarity index 96% rename from docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc rename to docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc index abb8e0b60..bfb21cf32 100644 --- a/docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc +++ b/docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc @@ -5,6 +5,9 @@ ---- const response = await client.indices.create({ index: "retrievers_example", + settings: { + number_of_shards: 1, + }, mappings: { properties: { vector: { @@ -12,6 +15,9 @@ const response = await client.indices.create({ dims: 3, similarity: "l2_norm", index: true, + index_options: { + type: "flat", + }, }, text: { type: "text", diff --git a/docs/doc_examples/9d47f02a063444da9f098858a1830d28.asciidoc b/docs/doc_examples/77cebba946fe648873a1e7375c13df41.asciidoc similarity index 66% rename from docs/doc_examples/9d47f02a063444da9f098858a1830d28.asciidoc rename to docs/doc_examples/77cebba946fe648873a1e7375c13df41.asciidoc index b10da9a05..a09e089bb 100644 --- a/docs/doc_examples/9d47f02a063444da9f098858a1830d28.asciidoc +++ b/docs/doc_examples/77cebba946fe648873a1e7375c13df41.asciidoc @@ -5,7 +5,8 @@ ---- const response = await client.cluster.putSettings({ persistent: { - "cluster.routing.allocation.disk.watermark.low": "30gb", + "cluster.routing.allocation.disk.watermark.low": "90%", + "cluster.routing.allocation.disk.watermark.high": "95%", }, }); console.log(response); diff --git a/docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc b/docs/doc_examples/7bdc283b96c7a965fae23013647b8578.asciidoc similarity index 100% rename from docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc rename to docs/doc_examples/7bdc283b96c7a965fae23013647b8578.asciidoc index 7a9809243..7f416d2de 100644 --- a/docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc +++ b/docs/doc_examples/7bdc283b96c7a965fae23013647b8578.asciidoc @@ -7,14 +7,14 @@ const response = await client.indices.create({ index: "test-index", mappings: { properties: { - infer_field: { - type: "semantic_text", - inference_id: ".elser-2-elasticsearch", - }, source_field: { type: "text", copy_to: "infer_field", }, + infer_field: { + type: "semantic_text", + inference_id: ".elser-2-elasticsearch", + }, }, }, }); diff --git a/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc new file mode 100644 index 000000000..7c7a7cba1 --- /dev/null +++ b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_inference/chat_completion/openai-completion/_stream", + body: { + messages: [ + { + role: "assistant", + content: "Let's find out what the weather is", + tool_calls: [ + { + id: "call_KcAjWtAww20AihPHphUh46Gd", + type: "function", + function: { + name: "get_current_weather", + arguments: '{"location":"Boston, MA"}', + }, + }, + ], + }, + { + role: "tool", + content: "The weather is cold", + tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc b/docs/doc_examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc similarity index 93% rename from docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc rename to docs/doc_examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc index 9ab13275d..5a4bb2510 100644 --- a/docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc +++ b/docs/doc_examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc @@ -5,6 +5,9 @@ ---- const response = await client.indices.create({ index: "retrievers_example_nested", + settings: { + number_of_shards: 1, + }, mappings: { properties: { nested_field: { @@ -18,6 +21,9 @@ const response = await client.indices.create({ dims: 3, similarity: "l2_norm", index: true, + index_options: { + type: "flat", + }, }, }, }, diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 449cc261c..62f283b1a 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -28,9 +28,116 @@ [discrete] === bulk Bulk index or delete documents. -Performs multiple indexing or delete operations in a single API call. +Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. +* To use the `index` action, you must have the `create`, `index`, or `write` index privilege. +* To use the `delete` action, you must have the `delete` or `write` index privilege. +* To use the `update` action, you must have the `index` or `write` index privilege. +* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. +* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: + +---- +action_and_meta_data\n +optional_source\n +action_and_meta_data\n +optional_source\n +.... +action_and_meta_data\n +optional_source\n +---- + +The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. +A `create` action fails if a document with the same ID already exists in the target +An `index` action adds or replaces a document as necessary. + +NOTE: Data streams support only the `create` action. +To update or delete a document in a data stream, you must target the backing index containing the document. + +An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. + +A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. + +NOTE: The final line of data must end with a newline character (`\n`). +Each newline character may be preceded by a carriage return (`\r`). +When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. +Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. + +If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. + +A note on the format: the idea here is to make processing as fast as possible. +As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. + +Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. + +There is no "correct" number of actions to perform in a single bulk request. +Experiment with different settings to find the optimal size for your particular workload. +Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. +It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. +For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. + +**Client suppport for bulk requests** + +Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: + +* Go: Check out `esutil.BulkIndexer` +* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` +* Python: Check out `elasticsearch.helpers.*` +* JavaScript: Check out `client.helpers.*` +* .NET: Check out `BulkAllObservable` +* PHP: Check out bulk indexing. + +**Submitting bulk requests with cURL** + +If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. +The latter doesn't preserve newlines. For example: + +---- +$ cat requests +{ "index" : { "_index" : "test", "_id" : "1" } } +{ "field1" : "value1" } +$ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo +{"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +---- + +**Optimistic concurrency control** + +Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. +The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. + +**Versioning** + +Each bulk item can include the version value using the `version` field. +It automatically follows the behavior of the index or delete operation based on the `_version` mapping. +It also support the `version_type`. + +**Routing** + +Each bulk item can include the routing value using the `routing` field. +It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Wait for active shards** + +When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. + +**Refresh** + +Control when the changes made by this request are visible to search. + +NOTE: Only the shards that receive the bulk request will be affected by refresh. +Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. +The request will only wait for those three shards to refresh. +The other two shards that make up the index do not participate in the `_bulk` request at all. + {ref}/docs-bulk.html[Endpoint documentation] [source,ts] ---- @@ -40,28 +147,23 @@ client.bulk({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string)*: Name of the data stream, index, or index alias to perform bulk actions on. +** *`index` (Optional, string)*: The name of the data stream, index, or index alias to perform bulk actions on. ** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* -** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were executed for each index or create. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`timeout` (Optional, string | -1 | 0)*: Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`require_alias` (Optional, boolean)*: If `true`, the request’s actions must target an index alias. -** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to-be-created). +** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were run for each index or create. +** *`pipeline` (Optional, string)*: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`timeout` (Optional, string | -1 | 0)*: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. +** *`require_alias` (Optional, boolean)*: If `true`, the request's actions must target an index alias. +** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to be created). [discrete] === clear_scroll Clear a scrolling search. - Clear the search context and results for a scrolling search. {ref}/clear-scroll-api.html[Endpoint documentation] @@ -73,13 +175,11 @@ client.clearScroll({ ... }) ==== Arguments * *Request (object):* -** *`scroll_id` (Optional, string | string[])*: List of scroll IDs to clear. -To clear all scroll IDs, use `_all`. +** *`scroll_id` (Optional, string | string[])*: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. [discrete] === close_point_in_time Close a point in time. - A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. @@ -101,6 +201,15 @@ client.closePointInTime({ id }) Count search results. Get the number of documents matching a query. +The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. +The latter must be nested in a `query` key, which is the same as the search API. + +The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. + +The operation is broadcast across all shards. +For each shard ID group, a replica is chosen and the search is run against it. +This means that replicas increase the scalability of the count. + {ref}/search-count.html[Endpoint documentation] [source,ts] ---- @@ -110,40 +219,97 @@ client.count({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -This parameter can only be used when the `q` query string parameter is specified. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -This parameter can only be used when the `q` query string parameter is specified. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded, or aliased indices are ignored when frozen. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`min_score` (Optional, number)*: Sets the minimum `_score` value that documents must have to be included in the result. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`min_score` (Optional, number)*: The minimum `_score` value that documents must have to be included in the result. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`q` (Optional, string)*: The query in Lucene query string syntax. [discrete] === create -Index a document. -Adds a JSON document to the specified data stream or index and makes it searchable. -If the target is an index and the document already exists, the request updates the document and increments its version. +Create a new document in the index. + +You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs +Using `_create` guarantees that the document is indexed only if it does not already exist. +It returns a 409 response when a document with a same ID already exists in the index. +To update an existing document, you must use the `//_doc/` API. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. {ref}/docs-index_.html[Endpoint documentation] [source,ts] @@ -154,28 +320,58 @@ client.create({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Unique identifier for the document. -** *`index` (string)*: Name of the data stream or index to target. -If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. -If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. +** *`id` (string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. +** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. ** *`document` (Optional, object)*: A document. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +** *`version` (Optional, number)*: The explicit version number for concurrency control. It must be a non-negative long number. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. [discrete] === delete Delete a document. -Removes a JSON document from the specified index. + +Remove a JSON document from the specified index. + +NOTE: You cannot send deletion requests directly to a data stream. +To delete a document in a data stream, you must target the backing index containing the document. + +**Optimistic concurrency control** + +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Versioning** + +Each document indexed is versioned. +When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. +Every write operation run on a document, deletes included, causes its version to be incremented. +The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. +The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to delete a document. + +If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. + +For example: + +---- +DELETE /my-index-000001/_doc/1?routing=shard-1 +---- + +This request deletes the document with ID 1, but it is routed based on the user. +The document is not deleted if the correct routing is not specified. + +**Distributed** + +The delete operation gets hashed into a specific shard ID. +It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. {ref}/docs-delete.html[Endpoint documentation] [source,ts] @@ -186,25 +382,100 @@ client.delete({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Unique identifier for the document. -** *`index` (string)*: Name of the target index. +** *`id` (string)*: A unique identifier for the document. +** *`index` (string)*: The name of the target index. ** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. ** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for active shards. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. +** *`version` (Optional, number)*: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. [discrete] === delete_by_query Delete documents. + Deletes documents that match the specified query. +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `delete` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. +When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. +If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. + +NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. + +While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. +A bulk delete request is performed for each batch of matching documents. +If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. +If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. +Any delete requests that completed successfully still stick, they are not rolled back. + +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. + +**Throttling delete requests** + +To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to disable throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +---- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +---- + +Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Delete by query supports sliced scroll to parallelize the delete process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Adding slices to the delete by query operation creates sub-requests which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with slices only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. +* Delete performance scales linearly across available resources with the number of slices. + +Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Cancel a delete by query operation** + +Any delete by query can be canceled using the task cancel API. For example: + +---- +POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +---- + +The task ID can be found by using the get tasks API. + +Cancellation should happen quickly but might take a few seconds. +The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. + {ref}/docs-delete-by-query.html[Endpoint documentation] [source,ts] ---- @@ -214,55 +485,38 @@ client.deleteByQuery({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to delete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to delete specified with Query DSL. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. ** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if delete by query hits version conflicts: `abort` or `proceed`. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. ** *`from` (Optional, number)*: Starting offset (default: 0) ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. -** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. -Defaults to the index-level setting. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. +** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. Defaults to the index-level setting. ** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. -** *`scroll_size` (Optional, number)*: Size of the scroll request that powers the operation. -** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. -Defaults to no timeout. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`q` (Optional, string)*: A query in the Lucene query string syntax. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. +** *`scroll_size` (Optional, number)*: The size of the scroll request that powers the operation. +** *`search_timeout` (Optional, string | -1 | 0)*: The explicit timeout for each search request. It defaults to no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. ** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. -** *`sort` (Optional, string[])*: A list of : pairs. -** *`stats` (Optional, string[])*: Specific `tag` of the request for logging and statistical purposes. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -Use with caution. -Elasticsearch applies this parameter to each shard handling the request. -When possible, let Elasticsearch perform early termination automatically. -Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`timeout` (Optional, string | -1 | 0)*: Period each deletion request waits for active shards. +** *`sort` (Optional, string[])*: A list of `:` pairs. +** *`stats` (Optional, string[])*: The specific `tag` of the request for logging and statistical purposes. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`timeout` (Optional, string | -1 | 0)*: The period each deletion request waits for active shards. ** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. [discrete] === delete_by_query_rethrottle @@ -281,7 +535,7 @@ client.deleteByQueryRethrottle({ task_id }) * *Request (object):* ** *`task_id` (string | number)*: The ID for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. [discrete] === delete_script @@ -298,15 +552,30 @@ client.deleteScript({ id }) * *Request (object):* ** *`id` (string)*: Identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] === exists Check a document. -Checks if a specified document exists. + +Verify that a document exists. +For example, check to see if a document with the `_id` 0 exists: + +---- +HEAD my-index-000001/_doc/0 +---- + +If the document exists, the API returns a status code of `200 - OK`. +If the document doesn’t exist, the API returns `404 - Not Found`. + +**Versioning support** + +You can use the `version` parameter to check the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -317,28 +586,31 @@ client.exists({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier of the document. -** *`index` (string)*: List of data streams, indices, and aliases. -Supports wildcards (`*`). -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. +** *`id` (string)*: A unique document identifier. +** *`index` (string)*: A list of data streams, indices, and aliases. It supports wildcards (`*`). +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. ** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. -If no fields are specified, no stored fields are included in the response. -If this field is specified, the `_source` parameter defaults to false. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. +** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === exists_source Check for a document source. -Checks if a document's `_source` is stored. + +Check whether a document source exists in an index. +For example: + +---- +HEAD my-index-000001/_source/1 +---- + +A document's source is not available if it is disabled in the mapping. {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -349,20 +621,17 @@ client.existsSource({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier of the document. -** *`index` (string)*: List of data streams, indices, and aliases. -Supports wildcards (`*`). -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. +** *`id` (string)*: A unique identifier for the document. +** *`index` (string)*: A list of data streams, indices, and aliases. It supports wildcards (`*`). +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. ** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. ** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. +** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === explain @@ -379,17 +648,14 @@ client.explain({ id, index }) * *Request (object):* ** *`id` (string)*: Defines the document ID. -** *`index` (string)*: Index names used to limit the request. -Only a single index name can be provided to this parameter. +** *`index` (string)*: Index names used to limit the request. Only a single index name can be provided to this parameter. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the `q` query string parameter is specified. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. ** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. ** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. ** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. ** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. ** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. @@ -419,11 +685,8 @@ client.fieldCaps({ ... }) ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. ** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. ** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. -** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. -These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, -or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request -targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. +** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. ** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. @@ -434,7 +697,63 @@ targeting `foo*,bar*` returns an error if an index starts with foo but no index [discrete] === get Get a document by its ID. -Retrieves the document with the specified ID from an index. + +Get a document and its source or stored fields from an index. + +By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). +In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. +To turn off realtime behavior, set the `realtime` parameter to false. + +**Source filtering** + +By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. +You can turn off `_source` retrieval by using the `_source` parameter: + +---- +GET my-index-000001/_doc/0?_source=false +---- + +If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. +This can be helpful with large documents where partial retrieval can save on network overhead +Both parameters take a comma separated list of fields or wildcard expressions. +For example: + +---- +GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +---- + +If you only want to specify includes, you can use a shorter notation: + +---- +GET my-index-000001/_doc/0?_source=*.id +---- + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to retrieve a document. +For example: + +---- +GET my-index-000001/_doc/2?routing=user1 +---- + +This request gets the document with ID 2, but it is routed based on the user. +The document is not fetched if the correct routing is not specified. + +**Distributed** + +The GET operation is hashed into a specific shard ID. +It is then redirected to one of the replicas within that shard ID and returns the result. +The replicas are the primary shard and its replicas within that shard ID group. +This means that the more replicas you have, the better your GET scaling will be. + +**Versioning support** + +You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -445,23 +764,19 @@ client.get({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Unique identifier of the document. -** *`index` (string)*: Name of the index that contains the document. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? -Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. -Fetches with this enabled will be slower the enabling synthetic source natively in the index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`id` (string)*: A unique document identifier. +** *`index` (string)*: The name of the index that contains the document. +** *`force_synthetic_source` (Optional, boolean)*: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. ** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. -If no fields are specified, no stored fields are included in the response. -If this field is specified, the `_source` parameter defaults to false. -** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: internal, external, external_gte. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === get_script @@ -507,7 +822,19 @@ client.getScriptLanguages() [discrete] === get_source Get a document's source. -Returns the source of a document. + +Get the source of a document. +For example: + +---- +GET my-index-000001/_source/1 +---- + +You can use the source filtering parameters to control which parts of the `_source` are returned: + +---- +GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +---- {ref}/docs-get.html[Endpoint documentation] [source,ts] @@ -518,18 +845,18 @@ client.getSource({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Unique identifier of the document. -** *`index` (string)*: Name of the index that contains the document. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`realtime` (Optional, boolean)*: Boolean) If true, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return. +** *`id` (string)*: A unique document identifier. +** *`index` (string)*: The name of the index that contains the document. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. ** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. ** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])* -** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: internal, external, external_gte. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. +** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === health_report @@ -568,10 +895,127 @@ client.healthReport({ ... }) [discrete] === index -Index a document. -Adds a JSON document to the specified data stream or index and makes it searchable. +Create or update a document in an index. + +Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. +NOTE: You cannot use this API to send update requests for existing documents in a data stream. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. +* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +NOTE: Replica shards might not all be started when an indexing operation returns successfully. +By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Optimistic concurrency control** + +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +**No operation (noop) updates** + +When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. +If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. +The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. + +There isn't a definitive rule for when noop updates aren't acceptable. +It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. + +**Versioning** + +Each indexed document is given a version number. +By default, internal versioning is used that starts at 1 and increments with each update, deletes included. +Optionally, the version number can be set to an external value (for example, if maintained in a database). +To enable this functionality, `version_type` should be set to `external`. +The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. + +NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. +If no version is provided, the operation runs without any version checks. + +When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. +If true, the document will be indexed and the new version number used. +If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: + +---- +PUT my-index-000001/_doc/1?version=2&version_type=external +{ + "user": { + "id": "elkbee" + } +} +---- + +In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. +If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). + +A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. +Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. + {ref}/docs-index_.html[Endpoint documentation] [source,ts] ---- @@ -581,29 +1025,19 @@ client.index({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: Name of the data stream or index to target. -** *`id` (Optional, string)*: Unique identifier for the document. +** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. +** *`id` (Optional, string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. ** *`document` (Optional, object)*: A document. ** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. ** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`op_type` (Optional, Enum("index" | "create"))*: Set to create to only index the document if it does not already exist (put if absent). -If a document with the specified `_id` already exists, the indexing operation will fail. -Same as using the `/_create` endpoint. -Valid values: `index`, `create`. -If document id is specified, it defaults to `index`. -Otherwise, it defaults to `create`. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`op_type` (Optional, Enum("index" | "create"))*: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +** *`version` (Optional, number)*: An explicit version number for concurrency control. It must be a non-negative long number. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. ** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. [discrete] @@ -642,22 +1076,13 @@ client.knnSearch({ index, knn }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names to search; -use `_all` or to perform the operation on all indices +** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices ** *`knn` ({ field, query_vector, k, num_candidates })*: kNN query to execute -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These -fields are returned in the hits._source property of the search response. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns -in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, -no stored fields are included in the response. If this field is specified, the _source -parameter defaults to false. You can pass _source: true to return both source fields -and stored fields in the search response. -** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns -in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top -`k` documents that also match this filter. The value can be a single query or a -list of queries. If `filter` isn't provided, all documents are allowed to match. +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. +** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. ** *`routing` (Optional, string)*: A list of specific routing values [discrete] @@ -680,19 +1105,14 @@ client.mget({ ... }) ** *`index` (Optional, string)*: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. ** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. ** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? -Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. -Fetches with this enabled will be slower the enabling synthetic source natively in the index. +** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. ** *`refresh` (Optional, boolean)*: If `true`, the request refreshes relevant shards before retrieving documents. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. -If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. ** *`stored_fields` (Optional, string | string[])*: If `true`, retrieves the document fields stored in the index rather than the document `_source`. [discrete] @@ -702,12 +1122,12 @@ Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: -``` +---- header\n body\n header\n body\n -``` +---- This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. @@ -731,12 +1151,7 @@ client.msearch({ ... }) ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. ** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. ** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes -the name of the matched query associated with its score (true) -or as an array containing the name of the matched queries (false) -This functionality reruns each named query on every hit in a search response. -Typically, this adds a small overhead to a request. -However, using computationally expensive named queries on a large number of hits may add significant overhead. +** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. ** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. ** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. ** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. @@ -758,16 +1173,12 @@ client.msearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams and indices, omit this parameter or use `*`. +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. ** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. ** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the API can run. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -Available options: `query_then_fetch`, `dfs_query_then_fetch`. -** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. -If `false`, it returns `hits.total` as an object. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. ** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. [discrete] @@ -791,14 +1202,12 @@ client.mtermvectors({ ... }) ** *`index` (Optional, string)*: Name of the index that contains the documents. ** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: Array of existing or artificial documents. ** *`ids` (Optional, string[])*: Simplified syntax to specify documents by their ID if they're in the same index. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. ** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. ** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. ** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. ** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`term_statistics` (Optional, boolean)*: If true, the response includes term frequency and document frequency. @@ -817,7 +1226,33 @@ search requests using the same point in time. For example, if refreshes happen b between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. -The `keep_alive` parameter tells Elasticsearch how long it should persist. + +A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. + +Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. +If you want to retrieve more hits, use PIT with `search_after`. + +IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. + +When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. + +**Keeping point in time alive** + +The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. +The value does not need to be long enough to process all data — it just needs to be long enough for the next request. + +Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. +Once the smaller segments are no longer needed they are deleted. +However, open point-in-times prevent the old segments from being deleted since they are still in use. + +TIP: Keeping older segments alive means that more disk space and file handles are needed. +Ensure that you have configured your nodes to have ample free file handles. + +Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. +Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. +Note that a point-in-time doesn't prevent its associated indices from being deleted. +You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. {ref}/point-in-time-api.html[Endpoint documentation] [source,ts] @@ -829,17 +1264,13 @@ client.openPointInTime({ index, keep_alive }) * *Request (object):* ** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices -** *`keep_alive` (string | -1 | 0)*: Extends the time to live of the corresponding point in time. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to `match_none` on every shard. +** *`keep_alive` (string | -1 | 0)*: Extend the length of time that the point in time persists. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`allow_partial_search_results` (Optional, boolean)*: If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. -If `true`, the point in time will contain all the shards that are available at the time of the request. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`allow_partial_search_results` (Optional, boolean)*: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. [discrete] === ping @@ -866,15 +1297,11 @@ client.putScript({ id, script }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -Must be unique within the cluster. +** *`id` (string)*: Identifier for the stored script or search template. Must be unique within the cluster. ** *`script` ({ lang, options, source })*: Contains the script or search template, its parameters, and its language. -** *`context` (Optional, string)*: Context in which the script or search template should run. -To prevent errors, the API immediately compiles the script or template in this context. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. +** *`context` (Optional, string)*: Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] === rank_eval @@ -892,8 +1319,7 @@ client.rankEval({ requests }) * *Request (object):* ** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. -To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. ** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. @@ -903,7 +1329,187 @@ To target all data streams and indices in a cluster, omit this parameter or use [discrete] === reindex Reindex documents. -Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. + +Copy documents from a source to a destination. +You can copy all documents to the destination index or reindex a subset of the documents. +The source can be any existing index, alias, or data stream. +The destination must differ from the source. +For example, you cannot reindex a data stream into itself. + +IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. +The destination should be configured as wanted before calling the reindex API. +Reindex does not copy the settings from the source or its associated template. +Mappings, shard counts, and replicas, for example, must be configured ahead of time. + +If the Elasticsearch security features are enabled, you must have the following security privileges: + +* The `read` index privilege for the source data stream, index, or alias. +* The `write` index privilege for the destination data stream, index, or index alias. +* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. +* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. + +If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. +Automatic data stream creation requires a matching index template with data stream enabled. + +The `dest` element can be configured like the index API to control optimistic concurrency control. +Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. + +Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. + +Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. +All existing documents will cause a version conflict. + +IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. +A reindex can only add new documents to a destination data stream. +It cannot update existing documents in a destination data stream. + +By default, version conflicts abort the reindex process. +To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. +In this case, the response includes a count of the version conflicts that were encountered. +Note that the handling of other error types is unaffected by the `conflicts` property. +Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. + +NOTE: The reindex API makes no effort to handle ID collisions. +The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. +Instead, make sure that IDs are unique by using a script. + +**Running reindex asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `_tasks/`. + +**Reindex from multiple sources** + +If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. +That way you can resume the process if there are any errors by removing the partially completed source and starting over. +It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. + +For example, you can use a bash script like this: + +---- +for index in i1 i2 i3 i4 i5; do + curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ + "source": { + "index": "'$index'" + }, + "dest": { + "index": "'$index'-reindexed" + } + }' +done +---- + +**Throttling** + +Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. +Requests are throttled by padding each batch with a wait time. +To turn off throttling, set `requests_per_second` to `-1`. + +The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +---- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +---- + +Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Reindex supports sliced scroll to parallelize the reindexing process. +This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + +You can slice a reindex request manually by providing a slice ID and total number of slices to each request. +You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. +The `slices` parameter specifies the number of slices to use. + +Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: + +* You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. +* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. + +If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. +If slicing manually or otherwise tuning automatic slicing, use the following guidelines. + +Query performance is most efficient when the number of slices is equal to the number of shards in the index. +If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. +Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. + +Indexing performance scales linearly across available resources with the number of slices. + +Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Modify documents during reindexing** + +Like `_update_by_query`, reindex operations support a script that modifies the document. +Unlike `_update_by_query`, the script is allowed to modify the document's metadata. + +Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. +For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. +Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. +The deletion will be reported in the `deleted` counter in the response body. +Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. + +Think of the possibilities! Just be careful; you are able to change: + +* `_id` +* `_index` +* `_version` +* `_routing` + +Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. +It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. + +**Reindex from remote** + +Reindex supports reindexing from a remote Elasticsearch cluster. +The `host` parameter must contain a scheme, host, port, and optional path. +The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. +Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. +There are a range of settings available to configure the behavior of the HTTPS connection. + +When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. +Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. +It can be set to a comma delimited list of allowed remote host and port combinations. +Scheme is ignored; only the host and port are used. +For example: + +---- +reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] +---- + +The list of allowed hosts must be configured on any nodes that will coordinate the reindex. +This feature should work with remote clusters of any version of Elasticsearch. +This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. + +WARNING: Elasticsearch does not support forward compatibility across major versions. +For example, you cannot reindex from a 7.x cluster into a 6.x cluster. + +To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. + +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + +Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. +If the remote index includes very large documents you'll need to use a smaller batch size. +It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. +Both default to 30 seconds. + +**Configuring SSL parameters** + +Reindex from remote supports configurable SSL settings. +These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. +It is not possible to configure SSL in the body of the reindex request. {ref}/docs-reindex.html[Endpoint documentation] [source,ts] @@ -916,19 +1522,16 @@ client.reindex({ dest, source }) * *Request (object):* ** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination you are copying to. ** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source you are copying from. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: Set to proceed to continue reindexing even if there are conflicts. -** *`max_docs` (Optional, number)*: The maximum number of documents to reindex. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: Indicates whether to continue reindexing even when there are conflicts. +** *`max_docs` (Optional, number)*: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. ** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when reindexing. ** *`size` (Optional, number)* ** *`refresh` (Optional, boolean)*: If `true`, the request refreshes affected shards to make this operation visible to search. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -Defaults to no throttle. -** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. -Defaults to 1 slice, meaning the task isn’t sliced into subtasks. -** *`timeout` (Optional, string | -1 | 0)*: Period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. By default, there is no throttle. +** *`scroll` (Optional, string | -1 | 0)*: The period of time that a consistent view of the index should be maintained for scrolled search. +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. +** *`timeout` (Optional, string | -1 | 0)*: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. ** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. ** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. @@ -936,7 +1539,16 @@ Set to `all` or any positive integer up to the total number of shards in the ind === reindex_rethrottle Throttle a reindex operation. -Change the number of requests per second for a particular reindex operation. +Change the number of requests per second for a particular reindex operation. +For example: + +---- +POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +---- + +Rethrottling that speeds up the query takes effect immediately. +Rethrottling that slows down the query will take effect after completing the current batch. +This behavior prevents scroll timeouts. {ref}/docs-reindex.html[Endpoint documentation] [source,ts] @@ -947,8 +1559,8 @@ client.reindexRethrottle({ task_id }) ==== Arguments * *Request (object):* -** *`task_id` (string)*: Identifier for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`task_id` (string)*: The task identifier, which can be found by using the tasks API. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. [discrete] === render_search_template @@ -965,21 +1577,22 @@ client.renderSearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: ID of the search template to render. -If no `source` is specified, this or the `id` request body parameter is required. +** *`id` (Optional, string)*: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. ** *`file` (Optional, string)* -** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. -The key is the variable name. -The value is the variable value. -** *`source` (Optional, string)*: An inline search template. -Supports the same parameters as the search API's request body. -These parameters also support Mustache variables. -If no `id` or `` is specified, this parameter is required. +** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. [discrete] === scripts_painless_execute Run a script. + Runs a script and returns a result. +Use this API to build and test scripts, such as when defining a script for a runtime field. +This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. + +The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. + +Each context requires a script, but additional parameters depend on the context you're using for that script. {painless}/painless-execute-api.html[Endpoint documentation] [source,ts] @@ -990,9 +1603,9 @@ client.scriptsPainlessExecute({ ... }) ==== Arguments * *Request (object):* -** *`context` (Optional, string)*: The context that the script should run in. -** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. -** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to execute. +** *`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))*: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. +** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to run. [discrete] === scroll @@ -1032,6 +1645,21 @@ Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. +If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. +To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. + +**Search slicing** + +When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. +By default the splitting is done first on the shards, then locally on each shard. +The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. + +For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. + +IMPORTANT: The same point-in-time ID should be used for all slices. +If different PIT IDs are used, slices can overlap and miss documents. +This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. + {ref}/search-search.html[Endpoint documentation] [source,ts] ---- @@ -1041,145 +1669,71 @@ client.search({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams and indices, omit this parameter or use `*` or `_all`. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. ** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. ** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field. -** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. +** *`explain` (Optional, boolean)*: If `true`, the request returns detailed information about score computation as part of a hit. ** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: Starting document offset. -Needs to be non-negative. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. +** *`from` (Optional, number)*: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. ** *`highlight` (Optional, { encoder, fields })*: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. -If `true`, the exact number of hits is returned at the cost of some performance. -If `false`, the response does not include the total number of hits matching the query. -** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. -The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. -** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. -** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. -Documents with a lower `_score` are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. -The search hits are filtered after the aggregations are calculated. -A post filter has no impact on the aggregation results. -** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. -NOTE: This is a debugging tool and adds significant overhead to search execution. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +** *`indices_boost` (Optional, Record[])*: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: The approximate kNN search to run. +** *`rank` (Optional, { rrf })*: The Reciprocal Rank Fusion (RRF) to use. +** *`min_score` (Optional, number)*: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. +** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. -** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. +** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. -** *`size` (Optional, number)*: The number of hits to return. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`slice` (Optional, { field, id, max })*: Can be used to split a scrolled search into multiple slices that can be consumed independently. +** *`size` (Optional, number)*: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. +** *`slice` (Optional, { field, id, max })*: Split a scrolled search into multiple slices that can be consumed independently. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: A list of : pairs. -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. -These fields are returned in the hits._source property of the search response. -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. -The request returns values for field names matching these patterns in the `hits.fields` property of the response. +** *`_source` (Optional, boolean | { excludes, includes })*: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +** *`fields` (Optional, { field, format, include_unmapped }[])*: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. ** *`suggest` (Optional, { text })*: Defines a suggester that provides similar looking terms based on a provided text. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -Use with caution. -Elasticsearch applies this parameter to each shard handling the request. -When possible, let Elasticsearch perform early termination automatically. -Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -If set to `0` (default), the query does not terminate early. -** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. -If no response is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. -** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. -** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If `true`, returns sequence number and primary term of the last modification of each hit. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. -If no fields are specified, no stored fields are included in the response. -If this field is specified, the `_source` parameter defaults to `false`. -You can pass `_source: true` to return both source fields and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). -If you provide a PIT, you cannot specify an `` in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. -These fields take precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: Stats groups to associate with the search. -Each group maintains a statistics aggregation for its associated searches. -You can retrieve these stats using the indices stats API. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the q query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If true, wildcard and prefix queries are analyzed. -This parameter can only be used when the q query string parameter is specified. -** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. -This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network round-trips between the coordinating node and the remote clusters are minimized when executing cross-cluster search (CCS) requests. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: AND or OR. -This parameter can only be used when the `q` query string parameter is specified. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -This parameter can only be used when the q query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. +** *`timeout` (Optional, string)*: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. +** *`track_scores` (Optional, boolean)*: If `true`, calculate and return document scores, even if the scores are not used for sorting. +** *`version` (Optional, boolean)*: If `true`, the request returns the document version as part of a hit. +** *`seq_no_primary_term` (Optional, boolean)*: If `true`, the request returns sequence number and primary term of the last modification of each hit. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. +** *`runtime_mappings` (Optional, Record)*: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`allow_partial_search_results` (Optional, boolean)*: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. ** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes -the name of the matched query associated with its score (true) -or as an array containing the name of the matched queries (false) -This functionality reruns each named query on every hit in a search response. -Typically, this adds a small overhead to a request. -However, using computationally expensive named queries on a large number of hits may add significant overhead. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -This parameter can only be used when the `q` query string parameter is specified. -** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently. -This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. -** *`preference` (Optional, string)*: Nodes and shards used for the search. -By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: -`_only_local` to run the search only on shards on the local node; -`_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method; -`_only_nodes:,` to run the search on only the specified nodes IDs, where, if suitable shards exist on more than one selected node, use shards on those nodes using the default method, or if none of the specified nodes are available, select shards from any available node using the default method; -`_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards using the default method; -`_shards:,` to run the search only on the specified shards; -`` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. -** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. -This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). -When unspecified, the pre-filter phase is executed if any of these conditions is met: -the request targets more than 128 shards; -the request targets one or more read-only index; -the primary sort of the query targets an indexed field. -** *`request_cache` (Optional, boolean)*: If `true`, the caching of search results is enabled for requests where `size` is `0`. -Defaults to index level settings. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. See Scroll search results. -By default, this value cannot exceed `1d` (24 hours). -You can change this limit using the `search.max_keep_alive` cluster-level setting. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: How distributed term frequencies are calculated for relevance scoring. -** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specifies the suggest mode. -This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`suggest_size` (Optional, number)*: Number of suggestions to return. -This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`include_named_queries_score` (Optional, boolean)*: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +** *`preference` (Optional, string)*: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +** *`pre_filter_shard_size` (Optional, number)*: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. +** *`request_cache` (Optional, boolean)*: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates how distributed term frequencies are calculated for relevance scoring. +** *`suggest_field` (Optional, string)*: The field to use for suggestions. +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`suggest_size` (Optional, number)*: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. ** *`typed_keys` (Optional, boolean)*: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. -If the `_source` parameter is `false`, this parameter is ignored. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -If this parameter is specified, only these source fields are returned. -You can exclude fields from this subset using the `_source_excludes` query parameter. -If the `_source` parameter is `false`, this parameter is ignored. -** *`q` (Optional, string)*: Query in the Lucene query string syntax using query parameter search. -Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? -Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. -Fetches with this enabled will be slower the enabling synthetic source natively in the index. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`q` (Optional, string)*: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. +** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. [discrete] === search_mvt @@ -1201,46 +1755,20 @@ client.searchMvt({ index, field, zoom, x, y }) ** *`zoom` (number)*: Zoom level for the vector tile to search ** *`x` (number)*: X coordinate for the vector tile to search ** *`y` (number)*: Y coordinate for the vector tile to search -** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. - -Supports the following aggregation types: -- avg -- cardinality -- max -- min -- sum -** *`buffer` (Optional, number)*: Size, in pixels, of a clipping buffer outside the tile. This allows renderers -to avoid outline artifacts from geometries that extend past the extent of the tile. -** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. -If true, the meta layer’s feature is a bounding box resulting from a -geo_bounds aggregation. The aggregation runs on values that intersect -the // tile with wrap_longitude set to false. The resulting -bounding box may be larger than the vector tile. +** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum +** *`buffer` (Optional, number)*: Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. ** *`extent` (Optional, number)*: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. -** *`fields` (Optional, string | string[])*: Fields to return in the `hits` layer. Supports wildcards (`*`). -This parameter does not support fields with array values. Fields with array -values may return inconsistent results. +** *`fields` (Optional, string | string[])*: Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. ** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: Aggregation used to create a grid for the `field`. -** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if is 7 -and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results -don’t include the aggs layer. -** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, -each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon -of the cells bounding box. If 'point' each feature is a Point that is the centroid -of the cell. +** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. +** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. -If 0, results don’t include the hits layer. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sorts features in the hits layer. By default, the API calculates a bounding -box for each feature. It sorts features based on this box’s diagonal length, -from longest to shortest. -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number -of hits is returned at the cost of some performance. If `false`, the response does -not include the total number of hits matching the query. -** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing -suggested label positions for the original features. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. [discrete] === search_shards @@ -1260,18 +1788,12 @@ client.searchShards({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: Returns the indices and shards that a search request would be executed against. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. ** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. [discrete] @@ -1287,33 +1809,20 @@ client.searchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, -and aliases to search. Supports wildcards (*). +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (*). ** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. -** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, -this parameter is required. -** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. -The key is the variable name. -The value is the variable value. +** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, this parameter is required. +** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. ** *`profile` (Optional, boolean)*: If `true`, the query execution is profiled. -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's -request body. Also supports Mustache variables. If no id is specified, this -parameter is required. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index -should be maintained for scrolled search. +** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. ** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are rendered as an integer in the response. ** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. @@ -1368,14 +1877,12 @@ client.termvectors({ index }) ** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors. ** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. ** *`per_field_analyzer` (Optional, Record)*: Overrides the default per-field analyzer. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. ** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. ** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. ** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. ** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. ** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. ** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. ** *`term_statistics` (Optional, boolean)*: If `true`, the response includes term frequency and document frequency. @@ -1385,7 +1892,24 @@ Random by default. [discrete] === update Update a document. -Updates a document by running a script or passing a partial document. + +Update a document by running a script or passing a partial document. + +If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. + +The script can update, delete, or skip modifying the document. +The API also supports passing a partial document, which is merged into the existing document. +To fully replace an existing document, use the index API. +This operation: + +* Gets the document (collocated with the shard) from the index. +* Runs the specified script. +* Indexes the result. + +The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. + +The `_source` field must be enabled to use this API. +In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). {ref}/docs-update.html[Endpoint documentation] [source,ts] @@ -1396,35 +1920,26 @@ client.update({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Document ID -** *`index` (string)*: The name of the index -** *`detect_noop` (Optional, boolean)*: Set to false to disable setting 'result' in the response -to 'noop' if no change to the document occurred. -** *`doc` (Optional, object)*: A partial update to an existing document. -** *`doc_as_upsert` (Optional, boolean)*: Set to true to use the contents of 'doc' as the value of 'upsert' -** *`script` (Optional, { source, id, params, lang, options })*: Script to execute to update the document. -** *`scripted_upsert` (Optional, boolean)*: Set to true to execute the script whether or not the document exists. -** *`_source` (Optional, boolean | { excludes, includes })*: Set to false to disable source retrieval. You can also specify a comma-separated -list of the fields you want to retrieve. -** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a -new document. If the document exists, the 'script' is executed. +** *`id` (string)*: A unique identifier for the document to be updated. +** *`index` (string)*: The name of the target index. By default, the index is created automatically if it doesn't exist. +** *`detect_noop` (Optional, boolean)*: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. +** *`doc` (Optional, object)*: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. +** *`doc_as_upsert` (Optional, boolean)*: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. +** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document. +** *`scripted_upsert` (Optional, boolean)*: If `true`, run the script whether or not the document exists. +** *`_source` (Optional, boolean | { excludes, includes })*: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. ** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. ** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. ** *`lang` (Optional, string)*: The script language. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. -** *`require_alias` (Optional, boolean)*: If true, the destination must be an index alias. -** *`retry_on_conflict` (Optional, number)*: Specify how many times should the operation be retried when a conflict occurs. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for dynamic mapping updates and active shards. -This guarantees Elasticsearch waits for at least the timeout before failing. -The actual wait time could be longer, particularly when multiple waits occur. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operations. -Set to 'all' or any positive integer up to the total number of shards in the index -(number_of_replicas+1). Defaults to 1 meaning the primary shard. -** *`_source_excludes` (Optional, string | string[])*: Specify the source fields you want to exclude. -** *`_source_includes` (Optional, string | string[])*: Specify the source fields you want to retrieve. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. +** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. +** *`retry_on_conflict` (Optional, number)*: The number of times the operation should be retried when a conflict occurs. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. +** *`_source_excludes` (Optional, string | string[])*: The source fields you want to exclude. +** *`_source_includes` (Optional, string | string[])*: The source fields you want to retrieve. [discrete] === update_by_query @@ -1432,6 +1947,87 @@ Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `index` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. +When the versions match, the document is updated and the version number is incremented. +If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. + +NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. + +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. +A bulk update request is performed for each batch of matching documents. +Any query or update failures cause the update by query request to fail and the failures are shown in the response. +Any update requests that completed successfully still stick, they are not rolled back. + +**Throttling update requests** + +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to turn off throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is 1000, so if `requests_per_second` is set to `500`: + +---- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +---- + +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Update by query supports sliced scroll to parallelize the update process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + +Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with slices will cancel each sub-request. +* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +* Update performance scales linearly across available resources with the number of slices. + +Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Update the document source** + +Update by query supports scripts to update the document source. +As with the update API, you can set `ctx.op` to change the operation that is performed. + +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. +The update by query operation skips updating the document and increments the `noop` counter. + +Set `ctx.op = "delete"` if your script decides that the document should be deleted. +The update by query operation deletes the document and increments the `deleted` counter. + +Update by query supports only `index`, `noop`, and `delete`. +Setting `ctx.op` to anything else is an error. +Setting any other field in `ctx` is an error. +This API enables you to only modify the source of matching documents; you cannot move them. + {ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- @@ -1441,58 +2037,41 @@ client.updateByQuery({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to update using the Query DSL. ** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when updating. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`from` (Optional, number)*: Starting offset (default: 0) ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. -** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`q` (Optional, string)*: A query in the Lucene query string syntax. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. +** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. It defaults to the index-level setting. ** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. -** *`scroll_size` (Optional, number)*: Size of the scroll request that powers the operation. -** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. +** *`scroll_size` (Optional, number)*: The size of the scroll request that powers the operation. +** *`search_timeout` (Optional, string | -1 | 0)*: An explicit timeout for each search request. By default, there is no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. ** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. ** *`sort` (Optional, string[])*: A list of : pairs. -** *`stats` (Optional, string[])*: Specific `tag` of the request for logging and statistical purposes. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -Use with caution. -Elasticsearch applies this parameter to each shard handling the request. -When possible, let Elasticsearch perform early termination automatically. -Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`timeout` (Optional, string | -1 | 0)*: Period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. +** *`stats` (Optional, string[])*: The specific `tag` of the request for logging and statistical purposes. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`timeout` (Optional, string | -1 | 0)*: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. ** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. ** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. [discrete] === update_by_query_rethrottle @@ -1511,7 +2090,7 @@ client.updateByQueryRethrottle({ task_id }) * *Request (object):* ** *`task_id` (string)*: The ID for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. [discrete] === async_search @@ -1621,7 +2200,7 @@ Defaults to 10,000 hits. ** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. ** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: Defines the approximate kNN search to run. ** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. ** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* @@ -1663,6 +2242,8 @@ aggregation for its associated searches. You can retrieve these stats using the indices stats API. ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. +** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. ** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout @@ -1792,10 +2373,11 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== aliases Get aliases. -Retrieves the cluster’s index aliases, including filter and routing information. -The API does not return data stream aliases. -CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. +Get the cluster's index aliases, including filter and routing information. +This API does not return data stream aliases. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. {ref}/cat-alias.html[Endpoint documentation] [source,ts] @@ -1808,14 +2390,20 @@ client.cat.aliases({ ... }) * *Request (object):* ** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicated that the request should never timeout, you can set it to `-1`. [discrete] ==== allocation Get shard allocation information. + Get a snapshot of the number of shards allocated to each data node and their disk space. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. {ref}/cat-allocation.html[Endpoint documentation] [source,ts] @@ -1827,7 +2415,7 @@ client.cat.allocation({ ... }) ==== Arguments * *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node identifiers or names used to limit the returned information. +** *`node_id` (Optional, string | string[])*: A list of node identifiers or names used to limit the returned information. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed @@ -1838,10 +2426,11 @@ node will send requests for further information to each selected node. [discrete] ==== component_templates Get component templates. -Returns information about component templates in a cluster. + +Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. -CAT APIs are only intended for human consumption using the command line or Kibana console. +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. {ref}/cat-component-templates.html[Endpoint documentation] @@ -1854,20 +2443,23 @@ client.cat.componentTemplates({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string)*: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. +** *`name` (Optional, string)*: The name of the component template. +It accepts wildcard expressions. +If it is omitted, all component templates are returned. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. [discrete] ==== count Get a document count. -Provides quick access to a document count for a data stream, an index, or an entire cluster. + +Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. -CAT APIs are only intended for human consumption using the command line or Kibana console. +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. {ref}/cat-count.html[Endpoint documentation] @@ -1880,13 +2472,16 @@ client.cat.count({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +It supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. [discrete] ==== fielddata Get field data cache information. + Get the amount of heap memory currently used by the field data cache on every data node in the cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. @@ -1907,7 +2502,8 @@ To retrieve all fields, omit this parameter. [discrete] ==== health Get the cluster health status. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: @@ -1933,7 +2529,8 @@ client.cat.health({ ... }) [discrete] ==== help Get CAT help. -Returns help for the CAT APIs. + +Get help for the CAT APIs. {ref}/cat.html[Endpoint documentation] [source,ts] @@ -1945,7 +2542,8 @@ client.cat.help() [discrete] ==== indices Get index information. -Returns high-level information about indices in a cluster, including backing indices for data streams. + +Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count @@ -1983,7 +2581,9 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== master Get master node information. + Get information about the master node, including the ID, bound IP address, and name. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. {ref}/cat-master.html[Endpoint documentation] @@ -2005,9 +2605,10 @@ node will send requests for further information to each selected node. [discrete] ==== ml_data_frame_analytics Get data frame analytics jobs. -Returns configuration and usage information about data frame analytics jobs. -CAT APIs are only intended for human consumption using the Kibana +Get configuration and usage information about data frame analytics jobs. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. @@ -2032,12 +2633,13 @@ response. [discrete] ==== ml_datafeeds Get datafeeds. -Returns configuration and usage information about datafeeds. + +Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -CAT APIs are only intended for human consumption using the Kibana +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. @@ -2068,12 +2670,13 @@ partial matches. [discrete] ==== ml_jobs Get anomaly detection jobs. -Returns configuration and usage information for anomaly detection jobs. + +Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -CAT APIs are only intended for human consumption using the Kibana +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. @@ -2105,9 +2708,10 @@ matches. [discrete] ==== ml_trained_models Get trained models. -Returns configuration and usage information about inference trained models. -CAT APIs are only intended for human consumption using the Kibana +Get configuration and usage information about inference trained models. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. @@ -2135,6 +2739,7 @@ If `false`, the API returns a 404 status code when there are no matches or only [discrete] ==== nodeattrs Get node attribute information. + Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. @@ -2157,6 +2762,7 @@ node will send requests for further information to each selected node. [discrete] ==== nodes Get node information. + Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. @@ -2179,6 +2785,7 @@ client.cat.nodes({ ... }) [discrete] ==== pending_tasks Get pending task information. + Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. @@ -2202,6 +2809,7 @@ node will send requests for further information to each selected node. [discrete] ==== plugins Get plugin information. + Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. @@ -2225,6 +2833,7 @@ node will send requests for further information to each selected node. [discrete] ==== recovery Get shard recovery information. + Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. @@ -2250,6 +2859,7 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para [discrete] ==== repositories Get snapshot repository information. + Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. @@ -2272,6 +2882,7 @@ node will send requests for further information to each selected node. [discrete] ==== segments Get segment information. + Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. @@ -2299,6 +2910,7 @@ node will send requests for further information to each selected node. [discrete] ==== shards Get shard information. + Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. @@ -2322,7 +2934,8 @@ To target all data streams and indices, omit this parameter or use `*` or `_all` [discrete] ==== snapshots -Get snapshot information +Get snapshot information. + Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. @@ -2348,6 +2961,7 @@ If any repository fails during the request, Elasticsearch returns an error. [discrete] ==== tasks Get task information. + Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. @@ -2373,6 +2987,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== templates Get index template information. + Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. @@ -2398,6 +3013,7 @@ node will send requests for further information to each selected node. [discrete] ==== thread_pool Get thread pool statistics. + Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. @@ -2424,6 +3040,7 @@ node will send requests for further information to each selected node. [discrete] ==== transforms Get transform information. + Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana @@ -3277,6 +3894,7 @@ client.connector.delete({ connector_id }) * *Request (object):* ** *`connector_id` (string)*: The unique identifier of the connector to be deleted ** *`delete_sync_jobs` (Optional, boolean)*: A flag indicating if associated sync jobs should be also removed. Defaults to false. +** *`hard` (Optional, boolean)*: A flag indicating if the connector should be hard deleted. [discrete] ==== get @@ -3295,6 +3913,7 @@ client.connector.get({ connector_id }) * *Request (object):* ** *`connector_id` (string)*: The unique identifier of the connector +** *`include_deleted` (Optional, boolean)*: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. [discrete] ==== list @@ -3317,6 +3936,7 @@ client.connector.list({ ... }) ** *`index_name` (Optional, string | string[])*: A list of connector index names to fetch connector documents for ** *`connector_name` (Optional, string | string[])*: A list of connector names to fetch connector documents for ** *`service_type` (Optional, string | string[])*: A list of connector service types to fetch connector documents for +** *`include_deleted` (Optional, boolean)*: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. ** *`query` (Optional, string)*: A wildcard query string that filters connectors with matching name, description or index name [discrete] @@ -4092,25 +4712,100 @@ parameter to get a smaller or larger set of samples. To retrieve more than one s === esql [discrete] ==== async_query -Executes an ESQL request asynchronously +Run an async ES|QL query. +Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. + +The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. {ref}/esql-async-query-api.html[Endpoint documentation] [source,ts] ---- -client.esql.asyncQuery() +client.esql.asyncQuery({ query }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +** *`locale` (Optional, string)* +** *`params` (Optional, number | number | string | boolean | null | User-defined value[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. +It is valid only for the CSV format. +** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, for example `json` or `yaml`. +** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster. +The default period is five days. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. +** *`keep_on_completion` (Optional, boolean)*: Indicates whether the query and its results are stored in the cluster. +If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish. +By default, the request waits for 1 second for the query results. +If the query completes during this period, results are returned +Otherwise, a query ID is returned that can later be used to retrieve the results. + +[discrete] +==== async_query_delete +Delete an async ES|QL query. +If the query is still running, it is cancelled. +Otherwise, the stored results are deleted. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: + +* The authenticated user that submitted the original query request +* Users with the `cancel_task` cluster privilege + +{ref}/esql-async-query-delete-api.html[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQueryDelete({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. [discrete] ==== async_query_get -Retrieves the results of a previously submitted async query request given its ID. +Get async ES|QL query results. +Get the current status and available results or stored results for an ES|QL asynchronous query. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. {ref}/esql-async-query-get-api.html[Endpoint documentation] [source,ts] ---- -client.esql.asyncQueryGet() +client.esql.asyncQueryGet({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish. +By default, the request waits for complete query results. +If the request completes during the period specified in this parameter, complete query results are returned. +Otherwise, the response returns an `is_running` value of `true` and no results. [discrete] ==== query @@ -4484,9 +5179,9 @@ Using node roles enables ILM to automatically move the indices between data tier Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: -1. Stop setting the custom hot attribute on new indices. -2. Remove custom allocation settings from existing ILM policies. -3. Replace custom allocation settings from existing indices with the corresponding tier preference. +. Stop setting the custom hot attribute on new indices. +. Remove custom allocation settings from existing ILM policies. +. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. @@ -4699,6 +5394,22 @@ If specified, the `analyzer` parameter overrides this value. If an array of strings is provided, it is analyzed as a multi-value field. ** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })*: Tokenizer to use to convert text into tokens. +[discrete] +==== cancel_migrate_reindex +Cancel a migration reindex operation. + +Cancel a migration reindex attempt for a data stream or index. +[source,ts] +---- +client.indices.cancelMigrateReindex({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The index or data stream name + [discrete] ==== clear_cache Clear the cache. @@ -4922,6 +5633,24 @@ Cannot be longer than 255 bytes. Multi-byte characters count towards this limit ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== create_from +Create an index from a source index. + +Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. +[source,ts] +---- +client.indices.createFrom({ source, dest }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`source` (string)*: The source index or data stream name +** *`dest` (string)*: The destination index or data stream name +** *`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })* + [discrete] ==== data_streams_stats Get data stream stats. @@ -5384,9 +6113,9 @@ Once an index receive no more writes, its shards can be force-merged to a single This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: -``` +---- POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 -``` +---- {ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] @@ -5442,6 +6171,8 @@ such as open,hidden. ==== get_alias Get aliases. Retrieves information for one or more data stream or index aliases. + +{ref}/indices-get-alias.html[Endpoint documentation] [source,ts] ---- client.indices.getAlias({ ... }) @@ -5609,6 +6340,22 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== get_migrate_reindex_status +Get the migration reindexing status. + +Get the status of a migration reindex attempt for a data stream or index. +[source,ts] +---- +client.indices.getMigrateReindexStatus({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The index or data stream name. + [discrete] ==== get_settings Get index settings. @@ -5671,6 +6418,24 @@ To return all index templates, omit this parameter or use a value of `_all` or ` ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== migrate_reindex +Reindex legacy backing indices. + +Reindex all legacy backing indices for a data stream. +This operation occurs in a persistent task. +The persistent task ID is returned immediately and the reindexing work is completed in that task. +[source,ts] +---- +client.indices.migrateReindex({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`reindex` (Optional, { mode, source })* + [discrete] ==== migrate_to_data_stream Convert an index alias to a data stream. @@ -5994,7 +6759,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -6578,9 +7343,9 @@ Split an index into a new index with more primary shards. You can do make an index read-only with the following request using the add index block API: -``` +---- PUT /my_source_index/_block/write -``` +---- The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. @@ -6662,36 +7427,6 @@ such as `open,hidden`. ** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. ** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. -[discrete] -==== unfreeze -Unfreeze an index. -When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. - -{ref}/unfreeze-index-api.html[Endpoint documentation] -[source,ts] ----- -client.indices.unfreeze({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Identifier for the index. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, string)*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - [discrete] ==== update_aliases Create or update an alias. @@ -6841,12 +7576,78 @@ client.inference.put({ inference_id }) [discrete] ==== stream_inference -Perform streaming inference +Perform streaming inference. +Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +This API works only with the completion task type. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + +{ref}/stream-inference-api.html[Endpoint documentation] +[source,ts] +---- +client.inference.streamInference({ inference_id, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The unique identifier for the inference endpoint. +** *`input` (string | string[])*: The text on which you want to perform the inference task. +It can be a single string or an array. + +NOTE: Inference endpoints for the completion task type currently only support a single string as input. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of task that the model performs. + +[discrete] +==== unified_inference +Perform inference on the service using the Unified Schema +[source,ts] +---- +client.inference.unifiedInference({ inference_id, messages }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`messages` ({ content, role, tool_call_id, tool_calls }[])*: A list of objects representing the conversation. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type +** *`model` (Optional, string)*: The ID of the model to use. +** *`max_completion_tokens` (Optional, number)*: The upper bound limit for the number of tokens that can be generated for a completion request. +** *`stop` (Optional, string[])*: A sequence of strings to control when the model should stop generating additional tokens. +** *`temperature` (Optional, float)*: The sampling temperature to use. +** *`tool_choice` (Optional, string | { type, function })*: Controls which tool is called by the model. +** *`tools` (Optional, { type, function }[])*: A list of tools that the model can call. +** *`top_p` (Optional, float)*: Nucleus sampling, an alternative to sampling with temperature. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. + +[discrete] +==== update +Update an inference endpoint. + +Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +{ref}/update-inference-api.html[Endpoint documentation] [source,ts] ---- -client.inference.streamInference() +client.inference.update({ inference_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The unique identifier of the inference endpoint. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs. +** *`inference_config` (Optional, { service, service_settings, task_settings })* [discrete] === ingest @@ -7929,10 +8730,10 @@ option, the API returns information for the first hundred data frame analytics jobs. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no data frame analytics +. Contains wildcard expressions and there are no data frame analytics jobs that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. @@ -7963,10 +8764,10 @@ option, the API returns information for the first hundred data frame analytics jobs. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no data frame analytics +. Contains wildcard expressions and there are no data frame analytics jobs that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. @@ -8001,9 +8802,9 @@ wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no datafeeds that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains wildcard expressions and there are no datafeeds that match. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are @@ -8034,9 +8835,9 @@ wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no datafeeds that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains wildcard expressions and there are no datafeeds that match. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are @@ -8120,9 +8921,9 @@ you do not specify one of these options, the API returns information for all anomaly detection jobs. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no jobs that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains wildcard expressions and there are no jobs that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial @@ -8152,9 +8953,9 @@ group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no jobs that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains wildcard expressions and there are no jobs that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial @@ -9129,10 +9930,10 @@ lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no data frame analytics +. Contains wildcard expressions and there are no data frame analytics jobs that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are @@ -9794,7 +10595,7 @@ Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: -``` +---- POST my_rollup_index/_delete_by_query { "query": { @@ -9803,7 +10604,7 @@ POST my_rollup_index/_delete_by_query } } } -``` +---- {ref}/rollup-delete-job.html[Endpoint documentation] [source,ts] @@ -9848,8 +10649,8 @@ This API is useful because a rollup job is often configured to rollup only a sub Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: -1. Does this index have associated rollup data somewhere in the cluster? -2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? +. Does this index have associated rollup data somewhere in the cluster? +. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? {ref}/rollup-get-rollup-caps.html[Endpoint documentation] [source,ts] @@ -9952,7 +10753,7 @@ The following functionality is not available: The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: -``` +---- GET sensor-1,sensor_rollup/_rollup_search { "size": 0, @@ -9964,7 +10765,7 @@ GET sensor-1,sensor_rollup/_rollup_search } } } -``` +---- The rollup search endpoint does two things when the search runs: @@ -10024,9 +10825,9 @@ If you try to stop a job that is already stopped, nothing happens. Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: -``` +---- POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s -``` +---- The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. @@ -10326,6 +11127,17 @@ Activate a user profile. Create or update a user profile on behalf of another user. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. +For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. + +When updating a profile document, the API enables the document if it was disabled. +Any updates do not change existing content for either the `labels` or `data` fields. + {ref}/security-api-activate-user-profile.html[Endpoint documentation] [source,ts] ---- @@ -10336,10 +11148,17 @@ client.security.activateUserProfile({ grant_type }) ==== Arguments * *Request (object):* -** *`grant_type` (Enum("password" | "access_token"))* -** *`access_token` (Optional, string)* -** *`password` (Optional, string)* -** *`username` (Optional, string)* +** *`grant_type` (Enum("password" | "access_token"))*: The type of grant. +** *`access_token` (Optional, string)*: The user's Elasticsearch access token or JWT. +Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +** *`password` (Optional, string)*: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +** *`username` (Optional, string)*: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. [discrete] ==== authenticate @@ -10504,13 +11323,20 @@ client.security.clearCachedPrivileges({ application }) ==== Arguments * *Request (object):* -** *`application` (string)*: A list of application names +** *`application` (string)*: A list of applications. +To clear all applications, use an asterism (`*`). +It does not support other wildcard patterns. [discrete] ==== clear_cached_realms Clear the user cache. -Evict users from the user cache. You can completely clear the cache or evict specific users. +Evict users from the user cache. +You can completely clear the cache or evict specific users. + +User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. +There are realm settings that you can use to configure the user cache. +For more information, refer to the documentation about controlling the user cache. {ref}/security-api-clear-cache.html[Endpoint documentation] [source,ts] @@ -10522,8 +11348,11 @@ client.security.clearCachedRealms({ realms }) ==== Arguments * *Request (object):* -** *`realms` (string | string[])*: List of realms to clear -** *`usernames` (Optional, string[])*: List of usernames to clear from the cache +** *`realms` (string | string[])*: A list of realms. +To clear all realms, use an asterisk (`*`). +It does not support other wildcard patterns. +** *`usernames` (Optional, string[])*: A list of the users to clear from the cache. +If you do not specify this parameter, the API evicts all users from the user cache. [discrete] ==== clear_cached_roles @@ -10541,13 +11370,20 @@ client.security.clearCachedRoles({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: Role name +** *`name` (string | string[])*: A list of roles to evict from the role cache. +To evict all roles, use an asterisk (`*`). +It does not support other wildcard patterns. [discrete] ==== clear_cached_service_tokens Clear service account token caches. Evict a subset of all entries from the service account token caches. +Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. +This API clears matching entries from both caches. + +The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. +The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. {ref}/security-api-clear-service-token-caches.html[Endpoint documentation] [source,ts] @@ -10559,19 +11395,29 @@ client.security.clearCachedServiceTokens({ namespace, service, name }) ==== Arguments * *Request (object):* -** *`namespace` (string)*: An identifier for the namespace -** *`service` (string)*: An identifier for the service name -** *`name` (string | string[])*: A list of service token names +** *`namespace` (string)*: The namespace, which is a top-level grouping of service accounts. +** *`service` (string)*: The name of the service, which must be unique within its namespace. +** *`name` (string | string[])*: A list of token names to evict from the service account token caches. +Use a wildcard (`*`) to evict all tokens that belong to a service account. +It does not support other wildcard patterns. [discrete] ==== create_api_key Create an API key. Create an API key for access without requiring basic authentication. + +IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. +If you specify privileges, the API returns an error. + A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. +To configure or turn off the API key service, refer to API key service setting documentation. + {ref}/security-api-create-api-key.html[Endpoint documentation] [source,ts] ---- @@ -10582,9 +11428,18 @@ client.security.createApiKey({ ... }) ==== Arguments * *Request (object):* -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. -** *`name` (Optional, string)*: Specifies the name for this API key. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. +** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. +By default, API keys never expire. +** *`name` (Optional, string)*: A name for the API key. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. +When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. +If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. +The structure of role descriptor is the same as the request for the create role API. +For more details, refer to the create or update roles API. + +NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. +In this case, you must explicitly specify a role descriptor with no privileges. +The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -10637,6 +11492,9 @@ Create a service account token. Create a service accounts token for access without requiring basic authentication. +NOTE: Service account tokens never expire. +You must actively delete them if they are no longer needed. + {ref}/security-api-create-service-token.html[Endpoint documentation] [source,ts] ---- @@ -10647,14 +11505,22 @@ client.security.createServiceToken({ namespace, service }) ==== Arguments * *Request (object):* -** *`namespace` (string)*: An identifier for the namespace -** *`service` (string)*: An identifier for the service name -** *`name` (Optional, string)*: An identifier for the token name +** *`namespace` (string)*: The name of the namespace, which is a top-level grouping of service accounts. +** *`service` (string)*: The name of the service. +** *`name` (Optional, string)*: The name for the service account token. +If omitted, a random name will be generated. + +Token names must be at least one and no more than 256 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + +NOTE: Token names must be unique in the context of the associated service account. +They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] ==== delegate_pki Delegate PKI authentication. + This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. @@ -10685,6 +11551,11 @@ This may be followed by additional certificates; each subsequent certificate is ==== delete_privileges Delete application privileges. +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + {ref}/security-api-delete-privilege.html[Endpoint documentation] [source,ts] ---- @@ -10695,8 +11566,9 @@ client.security.deletePrivileges({ application, name }) ==== Arguments * *Request (object):* -** *`application` (string)*: Application name -** *`name` (string | string[])*: Privilege name +** *`application` (string)*: The name of the application. +Application privileges are always associated with exactly one application. +** *`name` (string | string[])*: The name of the privilege. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -10704,6 +11576,8 @@ client.security.deletePrivileges({ application, name }) Delete roles. Delete roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The delete roles API cannot remove roles that are defined in roles files. {ref}/security-api-delete-role.html[Endpoint documentation] [source,ts] @@ -10715,13 +11589,17 @@ client.security.deleteRole({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: Role name +** *`name` (string)*: The name of the role. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] ==== delete_role_mapping Delete role mappings. +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The delete role mappings API cannot remove role mappings that are defined in role mapping files. + {ref}/security-api-delete-role-mapping.html[Endpoint documentation] [source,ts] ---- @@ -10732,7 +11610,8 @@ client.security.deleteRoleMapping({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: Role-mapping name +** *`name` (string)*: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -10751,9 +11630,9 @@ client.security.deleteServiceToken({ namespace, service, name }) ==== Arguments * *Request (object):* -** *`namespace` (string)*: An identifier for the namespace -** *`service` (string)*: An identifier for the service name -** *`name` (string)*: An identifier for the token name +** *`namespace` (string)*: The namespace, which is a top-level grouping of service accounts. +** *`service` (string)*: The service name. +** *`name` (string)*: The name of the service account token. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -10772,7 +11651,7 @@ client.security.deleteUser({ username }) ==== Arguments * *Request (object):* -** *`username` (string)*: username +** *`username` (string)*: An identifier for the user. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -10780,6 +11659,8 @@ client.security.deleteUser({ username }) Disable users. Disable users in the native realm. +By default, when you create users, they are enabled. +You can use this API to revoke a user's access to Elasticsearch. {ref}/security-api-disable-user.html[Endpoint documentation] [source,ts] @@ -10791,7 +11672,7 @@ client.security.disableUser({ username }) ==== Arguments * *Request (object):* -** *`username` (string)*: The username of the user to disable +** *`username` (string)*: An identifier for the user. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -10800,6 +11681,13 @@ Disable a user profile. Disable user profiles so that they are not visible in user profile searches. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. +To re-enable a disabled user profile, use the enable user profile API . + {ref}/security-api-disable-user-profile.html[Endpoint documentation] [source,ts] ---- @@ -10811,15 +11699,16 @@ client.security.disableUserProfile({ uid }) * *Request (object):* ** *`uid` (string)*: Unique identifier for the user profile. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', it does nothing with refreshes. [discrete] ==== enable_user Enable users. Enable users in the native realm. +By default, when you create users, they are enabled. {ref}/security-api-enable-user.html[Endpoint documentation] [source,ts] @@ -10831,7 +11720,7 @@ client.security.enableUser({ username }) ==== Arguments * *Request (object):* -** *`username` (string)*: The username of the user to enable +** *`username` (string)*: An identifier for the user. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. [discrete] @@ -10840,6 +11729,13 @@ Enable a user profile. Enable user profiles to make them visible in user profile searches. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, it's automatically enabled and visible in user profile searches. +If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. + {ref}/security-api-enable-user-profile.html[Endpoint documentation] [source,ts] ---- @@ -10850,10 +11746,11 @@ client.security.enableUserProfile({ uid }) ==== Arguments * *Request (object):* -** *`uid` (string)*: Unique identifier for the user profile. +** *`uid` (string)*: A unique identifier for the user profile. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. [discrete] ==== enroll_kibana @@ -10861,6 +11758,9 @@ Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. +NOTE: This API is currently intended for internal use only by Kibana. +Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. + {ref}/security-api-kibana-enrollment.html[Endpoint documentation] [source,ts] ---- @@ -10874,6 +11774,9 @@ Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. +The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. +The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. + {ref}/security-api-node-enrollment.html[Endpoint documentation] [source,ts] ---- @@ -10934,6 +11837,11 @@ client.security.getBuiltinPrivileges() ==== get_privileges Get application privileges. +To use this API, you must have one of the following privileges: + +* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + {ref}/security-api-get-privileges.html[Endpoint documentation] [source,ts] ---- @@ -10944,14 +11852,19 @@ client.security.getPrivileges({ ... }) ==== Arguments * *Request (object):* -** *`application` (Optional, string)*: Application name -** *`name` (Optional, string | string[])*: Privilege name +** *`application` (Optional, string)*: The name of the application. +Application privileges are always associated with exactly one application. +If you do not specify this parameter, the API returns information about all privileges for all applications. +** *`name` (Optional, string | string[])*: The name of the privilege. +If you do not specify this parameter, the API returns information about all privileges for the requested application. [discrete] ==== get_role Get roles. Get roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The get roles API cannot retrieve roles that are defined in roles files. {ref}/security-api-get-role.html[Endpoint documentation] [source,ts] @@ -10963,7 +11876,9 @@ client.security.getRole({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string | string[])*: The name of the role. You can specify multiple roles as a list. If you do not specify this parameter, the API returns information about all roles. +** *`name` (Optional, string | string[])*: The name of the role. +You can specify multiple roles as a list. +If you do not specify this parameter, the API returns information about all roles. [discrete] ==== get_role_mapping @@ -10991,6 +11906,8 @@ Get service accounts. Get a list of service accounts that match the provided path parameters. +NOTE: Currently, only the `elastic/fleet-server` service account is available. + {ref}/security-api-get-service-accounts.html[Endpoint documentation] [source,ts] ---- @@ -11001,13 +11918,23 @@ client.security.getServiceAccounts({ ... }) ==== Arguments * *Request (object):* -** *`namespace` (Optional, string)*: Name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. -** *`service` (Optional, string)*: Name of the service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. +** *`namespace` (Optional, string)*: The name of the namespace. +Omit this parameter to retrieve information about all service accounts. +If you omit this parameter, you must also omit the `service` parameter. +** *`service` (Optional, string)*: The service name. +Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. [discrete] ==== get_service_credentials Get service account credentials. +To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). + +The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. + +NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. +Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. + {ref}/security-api-get-service-credentials.html[Endpoint documentation] [source,ts] ---- @@ -11018,13 +11945,19 @@ client.security.getServiceCredentials({ namespace, service }) ==== Arguments * *Request (object):* -** *`namespace` (string)*: Name of the namespace. -** *`service` (string)*: Name of the service name. +** *`namespace` (string)*: The name of the namespace. +** *`service` (string)*: The service name. [discrete] ==== get_settings Get security index settings. + Get the user-configurable settings for the security internal index (`.security` and associated indices). +Only a subset of the index settings — those that are user-configurable—will be shown. +This includes: + +* `index.auto_expand_replicas` +* `index.number_of_replicas` {ref}/security-api-get-settings.html[Endpoint documentation] [source,ts] @@ -11044,6 +11977,17 @@ If no response is received before the timeout expires, the request fails and ret Get a token. Create a bearer token for access without requiring basic authentication. +The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. +Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. +When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. + +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. + +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. + +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. +That time period is defined by the `xpack.security.authc.token.timeout` setting. +If you want to invalidate a token immediately, you can do so by using the invalidate token API. {ref}/security-api-get-token.html[Endpoint documentation] [source,ts] @@ -11055,12 +11999,22 @@ client.security.getToken({ ... }) ==== Arguments * *Request (object):* -** *`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))* -** *`scope` (Optional, string)* -** *`password` (Optional, string)* -** *`kerberos_ticket` (Optional, string)* -** *`refresh_token` (Optional, string)* -** *`username` (Optional, string)* +** *`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))*: The type of grant. +Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. +** *`scope` (Optional, string)*: The scope of the token. +Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. +** *`password` (Optional, string)*: The user's password. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +** *`kerberos_ticket` (Optional, string)*: The base64 encoded kerberos ticket. +If you specify the `_kerberos` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +** *`refresh_token` (Optional, string)*: The string that was returned when you created the token, which enables you to extend its life. +If you specify the `refresh_token` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +** *`username` (Optional, string)*: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. [discrete] ==== get_user @@ -11079,12 +12033,17 @@ client.security.getUser({ ... }) * *Request (object):* ** *`username` (Optional, string | string[])*: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. -** *`with_profile_uid` (Optional, boolean)*: If true will return the User Profile ID for a user, if any. +** *`with_profile_uid` (Optional, boolean)*: Determines whether to retrieve the user profile UID, if it exists, for the users. [discrete] ==== get_user_privileges Get user privileges. +Get the security privileges for the logged in user. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. +To check whether a user has a specific list of privileges, use the has privileges API. + {ref}/security-api-get-user-privileges.html[Endpoint documentation] [source,ts] ---- @@ -11105,6 +12064,10 @@ Get a user profile. Get a user's profile using the unique profile ID. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + {ref}/security-api-get-user-profile.html[Endpoint documentation] [source,ts] ---- @@ -11116,9 +12079,9 @@ client.security.getUserProfile({ uid }) * *Request (object):* ** *`uid` (string | string[])*: A unique identifier for the user profile. -** *`data` (Optional, string | string[])*: List of filters for the `data` field of the profile document. -To return all content use `data=*`. To return a subset of content -use `data=` to retrieve content nested under the specified ``. +** *`data` (Optional, string | string[])*: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. [discrete] @@ -11127,12 +12090,19 @@ Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. -The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. -It is not possible to use this API to create an API key without that user’s credentials. +The caller must have authentication credentials for the user on whose behalf the API key will be created. +It is not possible to use this API to create an API key without that user's credentials. +The supported user authentication credential types are: + +* username and password +* Elasticsearch access tokens +* JWTs + The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. @@ -11149,15 +12119,16 @@ client.security.grantApiKey({ api_key, grant_type }) ==== Arguments * *Request (object):* -** *`api_key` ({ name, expiration, role_descriptors, metadata })*: Defines the API key. +** *`api_key` ({ name, expiration, role_descriptors, metadata })*: The API key. ** *`grant_type` (Enum("access_token" | "password"))*: The type of grant. Supported grant types are: `access_token`, `password`. -** *`access_token` (Optional, string)*: The user’s access token. +** *`access_token` (Optional, string)*: The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. ** *`username` (Optional, string)*: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. -** *`password` (Optional, string)*: The user’s password. If you specify the `password` grant type, this parameter is required. +** *`password` (Optional, string)*: The user's password. +If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. ** *`run_as` (Optional, string)*: The name of the user to be impersonated. @@ -11166,6 +12137,8 @@ It is not valid with other grant types. Check user privileges. Determine whether the specified user has a specified list of privileges. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. {ref}/security-api-has-privileges.html[Endpoint documentation] [source,ts] @@ -11188,6 +12161,9 @@ Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + {ref}/security-api-has-privileges-user-profile.html[Endpoint documentation] [source,ts] ---- @@ -11199,7 +12175,7 @@ client.security.hasPrivilegesUserProfile({ uids, privileges }) * *Request (object):* ** *`uids` (string[])*: A list of profile IDs. The privileges are checked for associated users of the profiles. -** *`privileges` ({ application, cluster, index })* +** *`privileges` ({ application, cluster, index })*: An object containing all the privileges to be checked. [discrete] ==== invalidate_api_key @@ -11207,11 +12183,15 @@ Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. -The `manage_api_key` privilege allows deleting any API keys. -The `manage_own_api_key` only allows deleting API keys that are owned by the user. + +To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. +The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. +The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. +The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: + - Set the parameter `owner=true`. -- Or, set both `username` and `realm_name` to match the user’s identity. +- Or, set both `username` and `realm_name` to match the user's identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. {ref}/security-api-invalidate-api-key.html[Endpoint documentation] @@ -11229,12 +12209,14 @@ client.security.invalidateApiKey({ ... }) This parameter cannot be used with any of `name`, `realm_name`, or `username`. ** *`name` (Optional, string)*: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. -** *`owner` (Optional, boolean)*: Can be used to query API keys owned by the currently authenticated user. +** *`owner` (Optional, boolean)*: Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + +NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. ** *`realm_name` (Optional, string)*: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. ** *`username` (Optional, string)*: The username of a user. -This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. +This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. [discrete] ==== invalidate_token @@ -11244,9 +12226,14 @@ The access tokens returned by the get token API have a finite period of time for After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. -The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. +The refresh tokens returned by the get token API are only valid for 24 hours. +They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. +NOTE: While all parameters are optional, at least one of them is required. +More specifically, either one of `token` or `refresh_token` parameters is required. +If none of these two are specified, then `realm_name` and/or `username` need to be specified. + {ref}/security-api-invalidate-token.html[Endpoint documentation] [source,ts] ---- @@ -11257,14 +12244,19 @@ client.security.invalidateToken({ ... }) ==== Arguments * *Request (object):* -** *`token` (Optional, string)* -** *`refresh_token` (Optional, string)* -** *`realm_name` (Optional, string)* -** *`username` (Optional, string)* +** *`token` (Optional, string)*: An access token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +** *`refresh_token` (Optional, string)*: A refresh token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +** *`realm_name` (Optional, string)*: The name of an authentication realm. +This parameter cannot be used with either `refresh_token` or `token`. +** *`username` (Optional, string)*: The username of a user. +This parameter cannot be used with either `refresh_token` or `token`. [discrete] ==== oidc_authenticate Authenticate OpenID Connect. + Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. @@ -11292,6 +12284,7 @@ This property is useful in cases where multiple realms are defined. [discrete] ==== oidc_logout Logout of OpenID Connect. + Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. @@ -11315,6 +12308,7 @@ client.security.oidcLogout({ access_token }) [discrete] ==== oidc_prepare_authentication Prepare OpenID connect authentication. + Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. @@ -11349,6 +12343,24 @@ If the caller of the API does not provide a value, Elasticsearch will generate o ==== put_privileges Create or update application privileges. +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +Application names are formed from a prefix, with an optional suffix that conform to the following rules: + +* The prefix must begin with a lowercase ASCII letter. +* The prefix must contain only ASCII letters or digits. +* The prefix must be at least 3 characters long. +* If the suffix exists, it must begin with either a dash `-` or `_`. +* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. +* No part of the name can contain whitespace. + +Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. + +Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. + {ref}/security-api-put-privileges.html[Endpoint documentation] [source,ts] ---- @@ -11386,6 +12398,9 @@ client.security.putRole({ name }) ** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. ** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. ** *`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])*: A list of remote indices permissions entries. + +NOTE: Remote indices are effective for remote clusters configured with the API key based model. +They have no effect for remote clusters configured with the certificate based model. ** *`remote_cluster` (Optional, { clusters, privileges }[])*: A list of remote cluster permissions entries. ** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. ** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. @@ -11401,9 +12416,27 @@ Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. -This API does not create roles. Rather, it maps users to existing roles. +NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. +**Role templates** + +The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. +The `roles` field is used for this purpose. + +For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. + +NOTE: To use role templates successfully, the relevant scripting feature must be enabled. +Otherwise, all attempts to create a role mapping with role templates fail. + +All of the user fields that are available in the role mapping rules are also available in the role templates. +Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. + +By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. +If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. + {ref}/security-api-put-role-mapping.html[Endpoint documentation] [source,ts] ---- @@ -11414,12 +12447,17 @@ client.security.putRoleMapping({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: Role-mapping name -** *`enabled` (Optional, boolean)* -** *`metadata` (Optional, Record)* -** *`roles` (Optional, string[])* -** *`role_templates` (Optional, { format, template }[])* -** *`rules` (Optional, { any, all, field, except })* +** *`name` (string)*: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +** *`enabled` (Optional, boolean)*: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. +** *`metadata` (Optional, Record)*: Additional metadata that helps define which roles are assigned to each user. +Within the metadata object, keys beginning with `_` are reserved for system usage. +** *`roles` (Optional, string[])*: A list of role names that are granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +** *`role_templates` (Optional, { format, template }[])*: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +** *`rules` (Optional, { any, all, field, except })*: The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. ** *`run_as` (Optional, string[])* ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -11427,8 +12465,9 @@ client.security.putRoleMapping({ name }) ==== put_user Create or update users. +Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. -To change a user’s password without updating any other fields, use the change password API. +To change a user's password without updating any other fields, use the change password API. {ref}/security-api-put-user.html[Endpoint documentation] [source,ts] @@ -11440,21 +12479,40 @@ client.security.putUser({ username }) ==== Arguments * *Request (object):* -** *`username` (string)*: The username of the User -** *`email` (Optional, string | null)* -** *`full_name` (Optional, string | null)* -** *`metadata` (Optional, Record)* -** *`password` (Optional, string)* -** *`password_hash` (Optional, string)* -** *`roles` (Optional, string[])* -** *`enabled` (Optional, boolean)* -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +** *`username` (string)*: An identifier for the user. + +NOTE: Usernames must be at least 1 and no more than 507 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. +Leading or trailing whitespace is not allowed. +** *`email` (Optional, string | null)*: The email of the user. +** *`full_name` (Optional, string | null)*: The full name of the user. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the user. +** *`password` (Optional, string)*: The user's password. +Passwords must be at least 6 characters long. +When adding a user, one of `password` or `password_hash` is required. +When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password +** *`password_hash` (Optional, string)*: A hash of the user's password. +This must be produced using the same hashing algorithm as has been configured for password storage. +For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. +Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. +The `password` parameter and the `password_hash` parameter cannot be used in the same request. +** *`roles` (Optional, string[])*: A set of roles the user has. +The roles determine the user's access permissions. +To create a user without any roles, specify an empty list (`[]`). +** *`enabled` (Optional, boolean)*: Specifies whether the user is enabled. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: Valid values are `true`, `false`, and `wait_for`. +These values have the same meaning as in the index API, but the default value for this API is true. [discrete] ==== query_api_keys Find API keys with a query. -Get a paginated list of API keys and their information. You can optionally filter the results with a query. +Get a paginated list of API keys and their information. +You can optionally filter the results with a query. + +To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. +If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. {ref}/security-api-query-api-key.html[Endpoint documentation] [source,ts] @@ -11477,25 +12535,39 @@ The query supports a subset of query types, including `match_all`, `bool`, `term `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. -** *`from` (Optional, number)*: Starting document offset. -By default, you cannot page through more than 10,000 hits using the from and size parameters. + +NOTE: The queryable string values associated with API keys are internally mapped as keywords. +Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. +Such a match query is hence equivalent to a `term` query. +** *`from` (Optional, number)*: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Other than `id`, all public fields of an API key are eligible for sorting. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. +Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. ** *`size` (Optional, number)*: The number of hits to return. +It must not be negative. +The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: The search after definition. ** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. -An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. -** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. +An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). +An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. +** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile UID for the API key owner principal. +If it exists, the profile UID is returned under the `profile_uid` response field for each API key. ** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. [discrete] ==== query_role Find roles with a query. -Get roles in a paginated manner. You can optionally filter the results with a query. +Get roles in a paginated manner. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. +You can optionally filter the results with a query. +Also, the results can be paginated and sorted. {ref}/security-api-query-role.html[Endpoint documentation] [source,ts] @@ -11512,16 +12584,19 @@ If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, -`applications.application`, `applications.privileges`, `applications.resources`. -** *`from` (Optional, number)*: Starting document offset. -By default, you cannot page through more than 10,000 hits using the from and size parameters. +`applications.application`, `applications.privileges`, and `applications.resources`. +** *`from` (Optional, number)*: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: All public fields of a role are eligible for sorting. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. +You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. ** *`size` (Optional, number)*: The number of hits to return. +It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: The search after definition. [discrete] ==== query_user @@ -11530,6 +12605,9 @@ Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. +NOTE: As opposed to the get user API, built-in users are excluded from the result. +This API is only for native users. + {ref}/security-api-query-user.html[Endpoint documentation] [source,ts] ---- @@ -11544,23 +12622,39 @@ client.security.queryUser({ ... }) If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. -You can query the following information associated with user: `username`, `roles`, `enabled` -** *`from` (Optional, number)*: Starting document offset. -By default, you cannot page through more than 10,000 hits using the from and size parameters. +You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. +** *`from` (Optional, number)*: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Fields eligible for sorting are: username, roles, enabled +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. +Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. ** *`size` (Optional, number)*: The number of hits to return. +It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition -** *`with_profile_uid` (Optional, boolean)*: If true will return the User Profile ID for the users in the query result, if any. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: The search after definition +** *`with_profile_uid` (Optional, boolean)*: Determines whether to retrieve the user profile UID, if it exists, for the users. [discrete] ==== saml_authenticate Authenticate SAML. -Submits a SAML response message to Elasticsearch for consumption. +Submit a SAML response message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML message that is submitted can be: + +* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. +* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. + +In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. + +After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. +This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. {ref}/security-api-saml-authenticate.html[Endpoint documentation] [source,ts] @@ -11572,8 +12666,8 @@ client.security.samlAuthenticate({ content, ids }) ==== Arguments * *Request (object):* -** *`content` (string)*: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. -** *`ids` (string | string[])*: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. +** *`content` (string)*: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. +** *`ids` (string | string[])*: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. ** *`realm` (Optional, string)*: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. [discrete] @@ -11582,6 +12676,15 @@ Logout of SAML completely. Verifies the logout response sent from the SAML IdP. +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. +This API verifies the response by ensuring the content is relevant and validating its signature. +An empty response is returned if the verification process is successful. +The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. +The caller of this API must prepare the request accordingly so that this API can handle either of them. + {ref}/security-api-saml-complete-logout.html[Endpoint documentation] [source,ts] ---- @@ -11593,7 +12696,7 @@ client.security.samlCompleteLogout({ realm, ids }) * *Request (object):* ** *`realm` (string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. -** *`ids` (string | string[])*: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. +** *`ids` (string | string[])*: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. ** *`query_string` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. ** *`content` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. @@ -11601,7 +12704,15 @@ client.security.samlCompleteLogout({ realm, ids }) ==== saml_invalidate Invalidate SAML. -Submits a SAML LogoutRequest message to Elasticsearch for consumption. +Submit a SAML LogoutRequest message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The logout request comes from the SAML IdP during an IdP initiated Single Logout. +The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. +After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. +Thus the user can be redirected back to their IdP. {ref}/security-api-saml-invalidate.html[Endpoint documentation] [source,ts] @@ -11614,12 +12725,12 @@ client.security.samlInvalidate({ query_string }) * *Request (object):* ** *`query_string` (string)*: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. -This query should include a single parameter named SAMLRequest that contains a SAML logout request that is deflated and Base64 encoded. -If the SAML IdP has signed the logout request, the URL should include two extra parameters named SigAlg and Signature that contain the algorithm used for the signature and the signature value itself. -In order for Elasticsearch to be able to verify the IdP’s signature, the value of the query_string field must be an exact match to the string provided by the browser. +This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. +If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. +In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. -** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the realm parameter. -** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the acs parameter. +** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. +** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. [discrete] ==== saml_logout @@ -11627,6 +12738,12 @@ Logout of SAML. Submits a request to invalidate an access token and refresh token. +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API invalidates the tokens that were generated for a user by the SAML authenticate API. +If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). + {ref}/security-api-saml-logout.html[Endpoint documentation] [source,ts] ---- @@ -11638,7 +12755,7 @@ client.security.samlLogout({ token }) * *Request (object):* ** *`token` (string)*: The access token that was returned as a response to calling the SAML authenticate API. -Alternatively, the most recent token that was received after refreshing the original one by using a refresh_token. +Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. ** *`refresh_token` (Optional, string)*: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. @@ -11646,7 +12763,18 @@ Alternatively, the most recent refresh token that was received after refreshing ==== saml_prepare_authentication Prepare SAML authentication. -Creates a SAML authentication request (``) as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. +Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API returns a URL pointing to the SAML Identity Provider. +You can use the URL to redirect the browser of the user in order to continue the authentication process. +The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. +If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. +These parameters contain the algorithm used for the signature and the signature value itself. +It also returns a random string that uniquely identifies this SAML Authentication request. +The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. {ref}/security-api-saml-prepare-authentication.html[Endpoint documentation] [source,ts] @@ -11659,10 +12787,10 @@ client.security.samlPrepareAuthentication({ ... }) * *Request (object):* ** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. -The realm is used to generate the authentication request. You must specify either this parameter or the realm parameter. +The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. ** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. -You must specify either this parameter or the acs parameter. -** *`relay_state` (Optional, string)*: A string that will be included in the redirect URL that this API returns as the RelayState query parameter. +You must specify either this parameter or the `acs` parameter. +** *`relay_state` (Optional, string)*: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. [discrete] @@ -11671,6 +12799,9 @@ Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. +The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. +This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. + {ref}/security-api-saml-sp-metadata.html[Endpoint documentation] [source,ts] ---- @@ -11689,6 +12820,10 @@ Suggest a user profile. Get suggestions for user profiles that match specified search criteria. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + {ref}/security-api-suggest-user-profile.html[Endpoint documentation] [source,ts] ---- @@ -11699,35 +12834,40 @@ client.security.suggestUserProfiles({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string)*: Query string used to match name-related fields in user profile documents. +** *`name` (Optional, string)*: A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`. -** *`size` (Optional, number)*: Number of profiles to return. -** *`data` (Optional, string | string[])*: List of filters for the `data` field of the profile document. -To return all content use `data=*`. To return a subset of content -use `data=` to retrieve content nested under the specified ``. -By default returns no `data` content. +** *`size` (Optional, number)*: The number of profiles to return. +** *`data` (Optional, string | string[])*: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content, use `data=` to retrieve content nested under the specified ``. +By default, the API returns no `data` content. +It is an error to specify `data` as both the query parameter and the request body field. ** *`hint` (Optional, { uids, labels })*: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. -Profiles not matching the hint don't exclude the profile from the response -as long as the profile matches the `name` field query. +Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. [discrete] ==== update_api_key Update an API key. -Updates attributes of an existing API key. +Update attributes of an existing API key. +This API supports updates to an API key's access scope, expiration, and metadata. + +To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. -Use this API to update API keys created by the create API Key or grant API Key APIs. -If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. -It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. -This API supports updates to an API key’s access scope and metadata. -The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. -The snapshot of the owner’s permissions is updated automatically on every call. -If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. -This change can occur if the owner user’s permissions have changed since the API key was created or last modified. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. -IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. -To update an API key, the owner user’s credentials are required. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. + +Use this API to update API keys created by the create API key or grant API Key APIs. +If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. +It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. + +The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. {ref}/security-api-update-api-key.html[Endpoint documentation] [source,ts] @@ -11740,9 +12880,20 @@ client.security.updateApiKey({ id }) * *Request (object):* ** *`id` (string)*: The ID of the API key to update. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. -** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. +** *`role_descriptors` (Optional, Record)*: The role descriptors to assign to this API key. +The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the create API keys API. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. +It supports a nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this value fully replaces the metadata previously associated with the API key. +** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. +By default, API keys never expire. +This property can be omitted to leave the expiration unchanged. [discrete] ==== update_cross_cluster_api_key @@ -11750,6 +12901,20 @@ Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. +To use this API, you must have at least the `manage_security` cluster privilege. +Users can only update API keys that they created. +To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user's credentials are required. + +It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. + +This API supports updates to an API key's access scope, metadata, and expiration. +The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. + +NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. + {ref}/security-api-update-cross-cluster-api-key.html[Endpoint documentation] [source,ts] ---- @@ -11765,7 +12930,7 @@ client.security.updateCrossClusterApiKey({ id, access }) The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. +** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. ** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. @@ -11775,9 +12940,13 @@ When specified, this information fully replaces metadata previously associated w [discrete] ==== update_settings Update security index settings. -Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified, for example `index.auto_expand_replicas` and `index.number_of_replicas`. -If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. +Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + +NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. + +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. +This API does not yet support configuring the settings for indices before they are in use. {ref}/security-api-update-settings.html[Endpoint documentation] [source,ts] @@ -11803,6 +12972,21 @@ Update user profile data. Update specific data for the user profile that is associated with a unique ID. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +To use this API, you must have one of the following privileges: + +* The `manage_user_profile` cluster privilege. +* The `update_profile_data` global privilege for the namespaces that are referenced in the request. + +This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. +New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. + +For both labels and data, content is namespaced by the top-level fields. +The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. + {ref}/security-api-update-user-profile-data.html[Endpoint documentation] [source,ts] ---- @@ -11814,15 +12998,19 @@ client.security.updateUserProfileData({ uid }) * *Request (object):* ** *`uid` (string)*: A unique identifier for the user profile. -** *`labels` (Optional, Record)*: Searchable data that you want to associate with the user profile. This -field supports a nested data structure. +** *`labels` (Optional, Record)*: Searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). ** *`data` (Optional, Record)*: Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. +Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +The data object is not searchable, but can be retrieved with the get user profile API. ** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. ** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. [discrete] === shutdown @@ -12447,14 +13635,132 @@ To indicate that the request should never timeout, set it to `-1`. [discrete] ==== repository_analyze -Analyzes a repository for correctness and performance +Analyze a snapshot repository. +Analyze the performance characteristics and any incorrect behaviour found in a repository. -{ref}/modules-snapshots.html[Endpoint documentation] +The response exposes implementation details of the analysis which may change from version to version. +The response body format is therefore not considered stable and may be different in newer versions. + +There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. +Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. + +The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. +Run your first analysis with the default parameter values to check for simple problems. +If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. +Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. +Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. + +If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. +This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. +If so, this storage system is not suitable for use as a snapshot repository. +You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. + +If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. +You can use this information to determine the performance of your storage system. +If any operation fails or returns an incorrect result, the API returns an error. +If the API returns an error, it may not have removed all the data it wrote to the repository. +The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. +Some clients are configured to close their connection if no response is received within a certain timeout. +An analysis takes a long time to complete so you might need to relax any such client-side timeouts. +On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. +The path to the leftover data is recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. +The analysis attempts to detect common bugs but it does not offer 100% coverage. +Additionally, it does not test the following: + +* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. +* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. +* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. + +IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. +This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. +You must ensure this load does not affect other users of these systems. +Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. + +NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. + +NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. +A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. +This indicates it behaves incorrectly in ways that the former version did not detect. +You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. + +NOTE: This API may not work correctly in a mixed-version cluster. + +*Implementation details* + +NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. + +The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. +These tasks are distributed over the data and master-eligible nodes in the cluster for execution. + +For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. +The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. +If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. +These reads are permitted to fail, but must not return partial data. +If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. +In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. +If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. + +The executing node will use a variety of different methods to write the blob. +For instance, where applicable, it will use both single-part and multi-part uploads. +Similarly, the reading nodes will use a variety of different methods to read the data back again. +For instance they may read the entire blob from start to end or may read only a subset of the data. + +For some blob-level tasks, the executing node will cancel the write before it is complete. +In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. + +Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. +This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. +The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. +Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. +Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. +If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. +Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. +Some operations also verify the behavior on small blobs with sizes other than 8 bytes. + +{ref}/repo-analysis-api.html[Endpoint documentation] [source,ts] ---- -client.snapshot.repositoryAnalyze() +client.snapshot.repositoryAnalyze({ repository }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the repository. +** *`blob_count` (Optional, number)*: The total number of blobs to write to the repository during the test. +For realistic experiments, you should set it to at least `2000`. +** *`concurrency` (Optional, number)*: The number of operations to run concurrently during the test. +** *`detailed` (Optional, boolean)*: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. +If false, it returns only a summary of the analysis. +** *`early_read_node_count` (Optional, number)*: The number of nodes on which to perform an early read operation while writing each blob. +Early read operations are only rarely performed. +** *`max_blob_size` (Optional, number | string)*: The maximum size of a blob to be written during the test. +For realistic experiments, you should set it to at least `2gb`. +** *`max_total_data_size` (Optional, number | string)*: An upper limit on the total size of all the blobs written during the test. +For realistic experiments, you should set it to at least `1tb`. +** *`rare_action_probability` (Optional, number)*: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. +** *`rarely_abort_writes` (Optional, boolean)*: Indicates whether to rarely cancel writes before they complete. +** *`read_node_count` (Optional, number)*: The number of nodes on which to read a blob after writing. +** *`register_operation_count` (Optional, number)*: The minimum number of linearizable register operations to perform in total. +For realistic experiments, you should set it to at least `100`. +** *`seed` (Optional, number)*: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. +To repeat the same set of operations in multiple experiments, use the same seed in each experiment. +Note that the operations are performed concurrently so might not always happen in the same order on each run. +** *`timeout` (Optional, string | -1 | 0)*: The period of time to wait for the test to complete. +If no response is received before the timeout expires, the test is cancelled and returns an error. [discrete] ==== restore @@ -12469,9 +13775,9 @@ To restore a snapshot, the cluster's global metadata must be writable. Ensure th Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: -``` +---- GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream -``` +---- If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. @@ -12955,6 +14261,10 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) [discrete] ==== cancel Cancel a task. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. @@ -12973,17 +14283,22 @@ client.tasks.cancel({ ... }) ==== Arguments * *Request (object):* -** *`task_id` (Optional, string | number)*: ID of the task. -** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. -** *`nodes` (Optional, string[])*: List of node IDs or names used to limit the request. -** *`parent_task_id` (Optional, string)*: Parent task ID used to limit the tasks. -** *`wait_for_completion` (Optional, boolean)*: Should the request block until the cancellation of the task and its descendant tasks is completed. Defaults to false +** *`task_id` (Optional, string | number)*: The task identifier. +** *`actions` (Optional, string | string[])*: A list or wildcard expression of actions that is used to limit the request. +** *`nodes` (Optional, string[])*: A list of node IDs or names that is used to limit the request. +** *`parent_task_id` (Optional, string)*: A parent task ID that is used to limit the tasks. +** *`wait_for_completion` (Optional, boolean)*: If true, the request blocks until all found tasks are complete. [discrete] ==== get Get task information. Get information about a task currently running in the cluster. +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. + {ref}/tasks.html[Endpoint documentation] [source,ts] ---- @@ -12994,8 +14309,8 @@ client.tasks.get({ task_id }) ==== Arguments * *Request (object):* -** *`task_id` (string)*: ID of the task. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +** *`task_id` (string)*: The task identifier. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. @@ -13004,6 +14319,63 @@ If no response is received before the timeout expires, the request fails and ret Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +**Identifying running tasks** + +The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. +This enables you to track certain calls or associate certain tasks with the client that started them. +For example: + +---- +curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" +---- + +The API returns the following result: + +---- +HTTP/1.1 200 OK +X-Opaque-Id: 123456 +content-type: application/json; charset=UTF-8 +content-length: 831 + +{ + "tasks" : { + "u5lcZHqcQhu-rUoFaqDphA:45" : { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 45, + "type" : "transport", + "action" : "cluster:monitor/tasks/lists", + "start_time_in_millis" : 1513823752749, + "running_time_in_nanos" : 293139, + "cancellable" : false, + "headers" : { + "X-Opaque-Id" : "123456" + }, + "children" : [ + { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, + "type" : "direct", + "action" : "cluster:monitor/tasks/lists[n]", + "start_time_in_millis" : 1513823752750, + "running_time_in_nanos" : 92133, + "cancellable" : false, + "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + "headers" : { + "X-Opaque-Id" : "123456" + } + } + ] + } + } + } +---- +In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. +The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. +The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. + {ref}/tasks.html[Endpoint documentation] [source,ts] ---- @@ -13014,13 +14386,19 @@ client.tasks.list({ ... }) ==== Arguments * *Request (object):* -** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`actions` (Optional, string | string[])*: A list or wildcard expression of actions used to limit the request. +For example, you can use `cluser:*` to retrieve all cluster-related tasks. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. -** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: Key used to group tasks in the response. -** *`nodes` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`parent_task_id` (Optional, string)*: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: A key that is used to group tasks in the response. +The task lists can be grouped either by nodes or by parent tasks. +** *`nodes` (Optional, string | string[])*: A list of node IDs or names that is used to limit the returned information. +** *`parent_task_id` (Optional, string)*: A parent task identifier that is used to limit returned information. +To return all tasks, omit this parameter or use a value of `-1`. +If the parent task is not found, the API does not return a 404 response code. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its information. +However, timed out nodes are included in the `node_failures` property. ** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. [discrete] @@ -13445,9 +14823,9 @@ wildcard expression. You can get information for all transforms by using ``. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no transforms that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains wildcard expressions and there are no transforms that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. @@ -13478,9 +14856,9 @@ wildcard expression. You can get information for all transforms by using ``. ** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: -1. Contains wildcard expressions and there are no transforms that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. +. Contains wildcard expressions and there are no transforms that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 88d6104ed..b4d711bc4 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -52,13 +52,23 @@ export default class AsyncSearch { async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class AsyncSearch { async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,13 +136,23 @@ export default class AsyncSearch { async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -149,12 +179,23 @@ export default class AsyncSearch { async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line querystring[key] = params[key] } else { @@ -163,7 +204,7 @@ export default class AsyncSearch { } } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index aec1226d8..f1e588e08 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -52,13 +52,23 @@ export default class Autoscaling { async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,14 +94,24 @@ export default class Autoscaling { async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -114,13 +134,23 @@ export default class Autoscaling { async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -147,16 +177,17 @@ export default class Autoscaling { async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['policy'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index ebad1b11f..5654ffc1f 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Bulk index or delete documents. Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. + * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html | Elasticsearch API documentation} */ export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,16 +47,17 @@ export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['operations'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/capabilities.ts b/src/api/api/capabilities.ts index facd38421..1d52df861 100644 --- a/src/api/api/capabilities.ts +++ b/src/api/api/capabilities.ts @@ -46,14 +46,24 @@ export default async function CapabilitiesApi (this: That, params?: T.TODO, opti export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index b30c5365c..e4dc21b04 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -44,7 +44,7 @@ export default class Cat { } /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. + * Get aliases. Get the cluster's index aliases, including filter and routing information. This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -52,14 +52,24 @@ export default class Cat { async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,7 +94,7 @@ export default class Cat { } /** - * Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + * Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html | Elasticsearch API documentation} */ async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -92,14 +102,24 @@ export default class Cat { async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -124,7 +144,7 @@ export default class Cat { } /** - * Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. + * Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -132,14 +152,24 @@ export default class Cat { async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -164,7 +194,7 @@ export default class Cat { } /** - * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. + * Get a document count. Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -172,14 +202,24 @@ export default class Cat { async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -212,14 +252,24 @@ export default class Cat { async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['fields'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -244,7 +294,7 @@ export default class Cat { } /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. + * Get the cluster health status. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html | Elasticsearch API documentation} */ async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -252,14 +302,24 @@ export default class Cat { async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -274,7 +334,7 @@ export default class Cat { } /** - * Get CAT help. Returns help for the CAT APIs. + * Get CAT help. Get help for the CAT APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html | Elasticsearch API documentation} */ async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -282,14 +342,24 @@ export default class Cat { async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -304,7 +374,7 @@ export default class Cat { } /** - * Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. + * Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -312,14 +382,24 @@ export default class Cat { async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -352,14 +432,24 @@ export default class Cat { async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -374,7 +464,7 @@ export default class Cat { } /** - * Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. + * Get data frame analytics jobs. Get configuration and usage information about data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -382,14 +472,24 @@ export default class Cat { async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -414,7 +514,7 @@ export default class Cat { } /** - * Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. + * Get datafeeds. Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -422,14 +522,24 @@ export default class Cat { async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -454,7 +564,7 @@ export default class Cat { } /** - * Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. + * Get anomaly detection jobs. Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -462,14 +572,24 @@ export default class Cat { async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -494,7 +614,7 @@ export default class Cat { } /** - * Get trained models. Returns configuration and usage information about inference trained models. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. + * Get trained models. Get configuration and usage information about inference trained models. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -502,14 +622,24 @@ export default class Cat { async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -542,14 +672,24 @@ export default class Cat { async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -572,14 +712,24 @@ export default class Cat { async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -602,14 +752,24 @@ export default class Cat { async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -632,14 +792,24 @@ export default class Cat { async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -662,14 +832,24 @@ export default class Cat { async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -702,14 +882,24 @@ export default class Cat { async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -732,14 +922,24 @@ export default class Cat { async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -772,14 +972,24 @@ export default class Cat { async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -804,7 +1014,7 @@ export default class Cat { } /** - * Get snapshot information Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. + * Get snapshot information. Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html | Elasticsearch API documentation} */ async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -812,14 +1022,24 @@ export default class Cat { async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -852,14 +1072,24 @@ export default class Cat { async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -882,14 +1112,24 @@ export default class Cat { async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -922,14 +1162,24 @@ export default class Cat { async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['thread_pool_patterns'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -962,14 +1212,24 @@ export default class Cat { async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 8fafab146..3631d2dcb 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -52,13 +52,23 @@ export default class Ccr { async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -85,16 +95,27 @@ export default class Ccr { async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,13 +141,23 @@ export default class Ccr { async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -152,13 +183,23 @@ export default class Ccr { async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -185,16 +226,27 @@ export default class Ccr { async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -220,14 +272,24 @@ export default class Ccr { async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -260,13 +322,23 @@ export default class Ccr { async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -292,13 +364,23 @@ export default class Ccr { async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -325,16 +407,27 @@ export default class Ccr { async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -360,13 +453,23 @@ export default class Ccr { async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -393,16 +496,27 @@ export default class Ccr { async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -428,14 +542,24 @@ export default class Ccr { async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -458,13 +582,23 @@ export default class Ccr { async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 59013642d..5fab83bb9 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -47,17 +47,28 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll_id'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 2df8577ec..52334debe 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -47,16 +47,27 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 4da8aedfa..9da8711f1 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -53,17 +53,28 @@ export default class Cluster { async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -86,13 +97,23 @@ export default class Cluster { async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -118,14 +139,24 @@ export default class Cluster { async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -148,13 +179,23 @@ export default class Cluster { async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -180,14 +221,24 @@ export default class Cluster { async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -220,14 +271,24 @@ export default class Cluster { async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -250,14 +311,24 @@ export default class Cluster { async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -290,13 +361,23 @@ export default class Cluster { async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['target'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -322,14 +403,24 @@ export default class Cluster { async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -352,14 +443,24 @@ export default class Cluster { async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -383,16 +484,27 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -419,17 +531,28 @@ export default class Cluster { async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['persistent', 'transient'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -452,14 +575,24 @@ export default class Cluster { async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -483,17 +616,28 @@ export default class Cluster { async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['commands'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -516,14 +660,24 @@ export default class Cluster { async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -560,14 +714,24 @@ export default class Cluster { async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 399537099..a181384b2 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -52,13 +52,23 @@ export default class Connector { async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class Connector { async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,13 +136,23 @@ export default class Connector { async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -149,16 +179,27 @@ export default class Connector { async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -184,14 +225,24 @@ export default class Connector { async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -215,17 +266,28 @@ export default class Connector { async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -249,17 +311,28 @@ export default class Connector { async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -291,14 +364,24 @@ export default class Connector { async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -322,14 +405,24 @@ export default class Connector { async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -353,14 +446,24 @@ export default class Connector { async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -381,14 +484,24 @@ export default class Connector { async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -413,13 +526,23 @@ export default class Connector { async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -445,13 +568,23 @@ export default class Connector { async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -478,16 +611,27 @@ export default class Connector { async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -513,13 +657,23 @@ export default class Connector { async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -546,16 +700,27 @@ export default class Connector { async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const acceptedBody: string[] = ['error'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -581,13 +746,23 @@ export default class Connector { async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -613,14 +788,24 @@ export default class Connector { async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -644,16 +829,27 @@ export default class Connector { async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -677,16 +873,27 @@ export default class Connector { async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -712,13 +919,23 @@ export default class Connector { async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -745,16 +962,27 @@ export default class Connector { async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -781,16 +1009,27 @@ export default class Connector { async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['configuration', 'values'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -817,16 +1056,27 @@ export default class Connector { async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['error'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -853,16 +1103,27 @@ export default class Connector { async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['features'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -889,16 +1150,27 @@ export default class Connector { async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -925,16 +1197,27 @@ export default class Connector { async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['validation'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -961,16 +1244,27 @@ export default class Connector { async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['index_name'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -997,16 +1291,27 @@ export default class Connector { async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['name', 'description'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1033,16 +1338,27 @@ export default class Connector { async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['is_native'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1069,16 +1385,27 @@ export default class Connector { async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['pipeline'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1105,16 +1432,27 @@ export default class Connector { async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['scheduling'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1141,16 +1479,27 @@ export default class Connector { async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['service_type'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1177,16 +1526,27 @@ export default class Connector { async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] const acceptedBody: string[] = ['status'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index e8f4561dc..9370a8928 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Count search results. Get the number of documents matching a query. + * Count search results. Get the number of documents matching a query. The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. The latter must be nested in a `query` key, which is the same as the search API. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html | Elasticsearch API documentation} */ export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,17 +47,28 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 1ab42f1f3..6d46dec88 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. + * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,16 +47,17 @@ export default async function CreateApi (this: That, params export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['document'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 3cb191687..00a5e88ac 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -52,13 +52,23 @@ export default class DanglingIndices { async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index_uuid'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class DanglingIndices { async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index_uuid'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,14 +136,24 @@ export default class DanglingIndices { async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 356cf7375..ced9363fb 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Delete a document. Removes a JSON document from the specified index. + * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html | Elasticsearch API documentation} */ export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -46,13 +46,23 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest, op export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index f11b3a33c..e2ce7ab74 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Delete documents. Deletes documents that match the specified query. + * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} */ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,16 +47,27 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'slice'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 431e4a7a6..57d9bacda 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -39,20 +39,30 @@ interface That { transport: Transport } /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html#docs-delete-by-query-rethrottle | Elasticsearch API documentation} */ export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 5ef36361f..e07b5c09b 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -46,13 +46,23 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 023ba410d..2539819a6 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -52,13 +52,23 @@ export default class Enrich { async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class Enrich { async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,14 +136,24 @@ export default class Enrich { async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -157,16 +187,27 @@ export default class Enrich { async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['geo_match', 'match', 'range'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -192,14 +233,24 @@ export default class Enrich { async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 258a79a2d..551257c86 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -52,13 +52,23 @@ export default class Eql { async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class Eql { async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,13 +136,23 @@ export default class Eql { async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -149,16 +179,27 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'allow_partial_search_results', 'allow_partial_sequence_results', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index b714d125c..2aede7e26 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -44,22 +44,37 @@ export default class Esql { } /** - * Executes an ESQL request asynchronously + * Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-api.html | Elasticsearch API documentation} */ - async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async asyncQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -73,22 +88,74 @@ export default class Esql { } /** - * Retrieves the results of a previously submitted async query request given its ID. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-get-api.html | Elasticsearch API documentation} + * Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: * The authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-delete-api.html | Elasticsearch API documentation} */ - async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async asyncQueryGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_query/async/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'esql.async_query_delete', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-get-api.html | Elasticsearch API documentation} + */ + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -114,16 +181,27 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 5d6cf0cfa..e0d66762e 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Check a document. Checks if a specified document exists. + * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -46,13 +46,23 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest, op export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index d97fa8feb..a57b4b804 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Check for a document source. Checks if a document's `_source` is stored. + * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -46,13 +46,23 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 49859bff6..c93ab5275 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -47,16 +47,27 @@ export default async function ExplainApi (this: That, param export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['query'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 555be12bc..7e90c2808 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -52,14 +52,24 @@ export default class Features { async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -82,14 +92,24 @@ export default class Features { async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 1d907c54c..6f04e7f32 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -47,17 +47,28 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 25f83089e..8f4d75d31 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -51,14 +51,24 @@ export default class Fleet { async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -82,14 +92,24 @@ export default class Fleet { async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -114,13 +134,23 @@ export default class Fleet { async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -146,16 +176,17 @@ export default class Fleet { async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['searches'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -187,14 +218,24 @@ export default class Fleet { async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -216,16 +257,27 @@ export default class Fleet { async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index a1be83e3c..55d638696 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get a document by its ID. Retrieves the document with the specified ID from an index. + * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -46,13 +46,23 @@ export default async function GetApi (this: That, params: T export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index c5cae38cb..a3947bdb6 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -46,13 +46,23 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index c0a13575c..2f73053f3 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -46,14 +46,24 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 4a44de5dc..d5b706d9b 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -46,14 +46,24 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 3ef802718..f72c08cfc 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get a document's source. Returns the source of a document. + * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} */ export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -46,13 +46,23 @@ export default async function GetSourceApi (this: That, par export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index f72fae89a..fc1a19fe6 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -53,16 +53,27 @@ export default class Graph { async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index f3b76edd0..1e271ecc0 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -46,14 +46,24 @@ export default async function HealthReportApi (this: That, params?: T.HealthRepo export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['feature'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 86748d989..506120df2 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -52,13 +52,23 @@ export default class Ilm { async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class Ilm { async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,14 +136,24 @@ export default class Ilm { async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -156,14 +186,24 @@ export default class Ilm { async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -187,17 +227,28 @@ export default class Ilm { async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -221,16 +272,27 @@ export default class Ilm { async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['current_step', 'next_step'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -257,16 +319,27 @@ export default class Ilm { async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['policy'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -292,13 +365,23 @@ export default class Ilm { async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -324,13 +407,23 @@ export default class Ilm { async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -356,14 +449,24 @@ export default class Ilm { async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -386,14 +489,24 @@ export default class Ilm { async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/index.ts b/src/api/api/index.ts index dfdc7c20b..13455eca0 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. + * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} */ export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,16 +47,17 @@ export default async function IndexApi (this: That, params: export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['document'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 1973b1e76..9fb65257a 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -52,13 +52,23 @@ export default class Indices { async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'block'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -86,17 +96,28 @@ export default class Indices { async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,6 +141,48 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + */ + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_migration/reindex/${encodeURIComponent(params.index.toString())}/_cancel` + const meta: TransportRequestMetadata = { + name: 'indices.cancel_migrate_reindex', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} @@ -129,14 +192,24 @@ export default class Indices { async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -170,16 +243,27 @@ export default class Indices { async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -206,13 +290,23 @@ export default class Indices { async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -239,16 +333,27 @@ export default class Indices { async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -274,13 +379,23 @@ export default class Indices { async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -297,6 +412,44 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + */ + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['source', 'dest'] + const acceptedBody: string[] = ['create_from'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_create_from/${encodeURIComponent(params.source.toString())}/${encodeURIComponent(params.dest.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.create_from', + pathParts: { + source: params.source, + dest: params.dest + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get data stream stats. Retrieves statistics for one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} @@ -306,14 +459,24 @@ export default class Indices { async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -346,13 +509,23 @@ export default class Indices { async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -378,13 +551,23 @@ export default class Indices { async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -418,13 +601,23 @@ export default class Indices { async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -450,13 +643,23 @@ export default class Indices { async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -482,13 +685,23 @@ export default class Indices { async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -514,13 +727,23 @@ export default class Indices { async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -546,13 +769,23 @@ export default class Indices { async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -579,16 +812,17 @@ export default class Indices { async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target_index'] const acceptedBody: string[] = ['config'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -615,13 +849,23 @@ export default class Indices { async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -647,13 +891,23 @@ export default class Indices { async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -687,13 +941,23 @@ export default class Indices { async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -719,13 +983,23 @@ export default class Indices { async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -751,13 +1025,23 @@ export default class Indices { async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -783,13 +1067,23 @@ export default class Indices { async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -815,14 +1109,24 @@ export default class Indices { async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -855,14 +1159,24 @@ export default class Indices { async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -895,13 +1209,23 @@ export default class Indices { async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -920,20 +1244,31 @@ export default class Indices { /** * Get aliases. Retrieves information for one or more data stream or index aliases. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-alias.html | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -973,13 +1308,23 @@ export default class Indices { async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1005,14 +1350,24 @@ export default class Indices { async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1035,14 +1390,24 @@ export default class Indices { async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1075,13 +1440,23 @@ export default class Indices { async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['fields', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1115,14 +1490,24 @@ export default class Indices { async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1155,14 +1540,24 @@ export default class Indices { async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1186,6 +1581,48 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + */ + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['index'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_migration/reindex/${encodeURIComponent(params.index.toString())}/_status` + const meta: TransportRequestMetadata = { + name: 'indices.get_migrate_reindex_status', + pathParts: { + index: params.index + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html | Elasticsearch API documentation} @@ -1195,14 +1632,24 @@ export default class Indices { async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1242,14 +1689,24 @@ export default class Indices { async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1273,6 +1730,40 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Reindex legacy backing indices. Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + */ + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['reindex'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_migration/reindex' + const meta: TransportRequestMetadata = { + name: 'indices.migrate_reindex' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} @@ -1282,13 +1773,23 @@ export default class Indices { async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1315,16 +1816,27 @@ export default class Indices { async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['actions'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1347,13 +1859,23 @@ export default class Indices { async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1379,13 +1901,23 @@ export default class Indices { async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1412,16 +1944,27 @@ export default class Indices { async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'name'] const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1456,16 +1999,17 @@ export default class Indices { async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['lifecycle'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1492,16 +2036,27 @@ export default class Indices { async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1528,16 +2083,27 @@ export default class Indices { async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1564,16 +2130,17 @@ export default class Indices { async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['settings'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1607,16 +2174,27 @@ export default class Indices { async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1642,14 +2220,24 @@ export default class Indices { async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1682,14 +2270,24 @@ export default class Indices { async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1722,13 +2320,23 @@ export default class Indices { async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1754,13 +2362,23 @@ export default class Indices { async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1786,13 +2404,23 @@ export default class Indices { async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1819,16 +2447,27 @@ export default class Indices { async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['alias', 'new_index'] const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1862,14 +2501,24 @@ export default class Indices { async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1902,14 +2551,24 @@ export default class Indices { async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1943,16 +2602,27 @@ export default class Indices { async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1979,13 +2649,23 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2012,17 +2692,28 @@ export default class Indices { async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2056,16 +2747,27 @@ export default class Indices { async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'target'] const acceptedBody: string[] = ['aliases', 'settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2092,14 +2794,24 @@ export default class Indices { async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric', 'index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2130,38 +2842,6 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } - /** - * Unfreeze an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unfreeze-index-api.html | Elasticsearch API documentation} - */ - async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise - async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined - - for (const key in params) { - if (acceptedPath.includes(key)) { - continue - } else { - // @ts-expect-error - querystring[key] = params[key] - } - } - - const method = 'POST' - const path = `/${encodeURIComponent(params.index.toString())}/_unfreeze` - const meta: TransportRequestMetadata = { - name: 'indices.unfreeze', - pathParts: { - index: params.index - } - } - return await this.transport.request({ path, method, querystring, body, meta }, options) - } - /** * Create or update an alias. Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} @@ -2172,17 +2852,28 @@ export default class Indices { async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['actions'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2206,17 +2897,28 @@ export default class Indices { async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['query'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 195cf46a2..6bdede5f1 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -52,13 +52,23 @@ export default class Inference { async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -92,14 +102,24 @@ export default class Inference { async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -137,16 +157,27 @@ export default class Inference { async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const acceptedBody: string[] = ['query', 'input', 'task_settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -181,16 +212,17 @@ export default class Inference { async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_type', 'inference_id'] const acceptedBody: string[] = ['inference_config'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -216,22 +248,37 @@ export default class Inference { } /** - * Perform streaming inference - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html | Elasticsearch API documentation} + * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stream-inference-api.html | Elasticsearch API documentation} */ - async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async streamInference (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise + async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['inference_id', 'task_type'] - const querystring: Record = {} - const body = undefined + const acceptedBody: string[] = ['input'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -254,4 +301,104 @@ export default class Inference { } return await this.transport.request({ path, method, querystring, body, meta }, options) } + + /** + * Perform inference on the service using the Unified Schema + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unified-inference-api.html | Elasticsearch API documentation} + */ + async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise + async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['task_type', 'inference_id'] + const acceptedBody: string[] = ['messages', 'model', 'max_completion_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_unified` + } else { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_unified` + } + const meta: TransportRequestMetadata = { + name: 'inference.unified_inference', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-inference-api.html | Elasticsearch API documentation} + */ + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['inference_id', 'task_type'] + const acceptedBody: string[] = ['inference_config'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_update` + } else { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_update` + } + const meta: TransportRequestMetadata = { + name: 'inference.update', + pathParts: { + inference_id: params.inference_id, + task_type: params.task_type + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 507fc78d9..0c1bf083d 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -46,14 +46,24 @@ export default async function InfoApi (this: That, params?: T.InfoRequest, optio export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 6999e2064..f691fb5b5 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -52,13 +52,23 @@ export default class Ingest { async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class Ingest { async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,13 +136,23 @@ export default class Ingest { async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -148,14 +178,24 @@ export default class Ingest { async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -178,14 +218,24 @@ export default class Ingest { async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -218,14 +268,24 @@ export default class Ingest { async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -258,14 +318,24 @@ export default class Ingest { async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -298,14 +368,24 @@ export default class Ingest { async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -329,16 +409,27 @@ export default class Ingest { async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['name', 'maxmind'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -365,16 +456,17 @@ export default class Ingest { async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['configuration'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -401,16 +493,27 @@ export default class Ingest { async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -437,16 +540,27 @@ export default class Ingest { async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['docs', 'pipeline'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 6188a31e9..e23a2d87a 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -47,16 +47,27 @@ export default async function KnnSearchApi (this: That, par export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 9f2e8c627..852661841 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -52,14 +52,24 @@ export default class License { async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -82,14 +92,24 @@ export default class License { async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,14 +132,24 @@ export default class License { async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -142,14 +172,24 @@ export default class License { async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -173,17 +213,28 @@ export default class License { async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['license', 'licenses'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -206,14 +257,24 @@ export default class License { async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -236,14 +297,24 @@ export default class License { async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index bee5575b5..559b10011 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -52,13 +52,23 @@ export default class Logstash { async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,14 +94,24 @@ export default class Logstash { async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -125,16 +145,17 @@ export default class Logstash { async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['pipeline'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 514691961..62f275d77 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -47,17 +47,28 @@ export default async function MgetApi (this: That, params?: export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index d07851611..49fed1c3b 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -52,14 +52,24 @@ export default class Migration { async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -92,14 +102,24 @@ export default class Migration { async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -122,14 +142,24 @@ export default class Migration { async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index e5f997b39..1dbc32a63 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -52,13 +52,23 @@ export default class Ml { async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -85,16 +95,27 @@ export default class Ml { async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,13 +141,23 @@ export default class Ml { async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -152,13 +183,23 @@ export default class Ml { async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'event_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -185,13 +226,23 @@ export default class Ml { async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -218,13 +269,23 @@ export default class Ml { async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -250,13 +311,23 @@ export default class Ml { async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -283,17 +354,28 @@ export default class Ml { async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['requests_per_second', 'timeout'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -326,13 +408,23 @@ export default class Ml { async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -358,13 +450,23 @@ export default class Ml { async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'forecast_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -398,13 +500,23 @@ export default class Ml { async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -430,13 +542,23 @@ export default class Ml { async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -463,13 +585,23 @@ export default class Ml { async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -495,13 +627,23 @@ export default class Ml { async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_alias', 'model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -529,17 +671,28 @@ export default class Ml { async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -563,16 +716,27 @@ export default class Ml { async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['evaluation', 'index', 'query'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -596,17 +760,28 @@ export default class Ml { async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -640,16 +815,27 @@ export default class Ml { async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -676,16 +862,27 @@ export default class Ml { async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -712,16 +909,27 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'timestamp'] const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -755,13 +963,23 @@ export default class Ml { async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -788,17 +1006,28 @@ export default class Ml { async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['page'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -832,16 +1061,27 @@ export default class Ml { async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'category_id'] const acceptedBody: string[] = ['page'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -875,14 +1115,24 @@ export default class Ml { async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -915,14 +1165,24 @@ export default class Ml { async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -955,14 +1215,24 @@ export default class Ml { async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -995,14 +1265,24 @@ export default class Ml { async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1035,14 +1315,24 @@ export default class Ml { async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1076,16 +1366,27 @@ export default class Ml { async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['page'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1111,14 +1412,24 @@ export default class Ml { async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1151,14 +1462,24 @@ export default class Ml { async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1191,14 +1512,24 @@ export default class Ml { async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1231,13 +1562,23 @@ export default class Ml { async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1265,16 +1606,27 @@ export default class Ml { async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1309,16 +1661,27 @@ export default class Ml { async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1345,16 +1708,27 @@ export default class Ml { async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1380,14 +1754,24 @@ export default class Ml { async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1420,14 +1804,24 @@ export default class Ml { async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1461,16 +1855,27 @@ export default class Ml { async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['docs', 'inference_config'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1496,14 +1901,24 @@ export default class Ml { async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1527,16 +1942,27 @@ export default class Ml { async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['timeout'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1563,16 +1989,27 @@ export default class Ml { async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['events'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1599,16 +2036,17 @@ export default class Ml { async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['data'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1635,17 +2073,28 @@ export default class Ml { async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['config'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1679,17 +2128,28 @@ export default class Ml { async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['datafeed_config', 'job_config'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1723,16 +2183,27 @@ export default class Ml { async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id'] const acceptedBody: string[] = ['job_ids', 'description'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1758,13 +2229,23 @@ export default class Ml { async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['calendar_id', 'job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1792,16 +2273,27 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1828,16 +2320,27 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1864,16 +2367,27 @@ export default class Ml { async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['description', 'items'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1900,16 +2414,27 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1936,16 +2461,27 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1971,13 +2507,23 @@ export default class Ml { async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_alias', 'model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2005,16 +2551,27 @@ export default class Ml { async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id', 'part'] const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2042,16 +2599,27 @@ export default class Ml { async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2077,13 +2645,23 @@ export default class Ml { async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2110,16 +2688,27 @@ export default class Ml { async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['delete_intervening_results'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2146,14 +2735,24 @@ export default class Ml { async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2176,13 +2775,23 @@ export default class Ml { async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2209,16 +2818,27 @@ export default class Ml { async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['end', 'start', 'timeout'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2244,13 +2864,23 @@ export default class Ml { async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2276,13 +2906,23 @@ export default class Ml { async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2309,16 +2949,27 @@ export default class Ml { async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2344,13 +2995,23 @@ export default class Ml { async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2377,16 +3038,27 @@ export default class Ml { async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2413,16 +3085,27 @@ export default class Ml { async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2449,16 +3132,27 @@ export default class Ml { async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['filter_id'] const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2485,16 +3179,27 @@ export default class Ml { async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id'] const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2521,16 +3226,27 @@ export default class Ml { async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] const acceptedBody: string[] = ['description', 'retain'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2558,16 +3274,27 @@ export default class Ml { async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] const acceptedBody: string[] = ['number_of_allocations'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2593,13 +3320,23 @@ export default class Ml { async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2627,17 +3364,28 @@ export default class Ml { async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2661,16 +3409,17 @@ export default class Ml { async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['detector'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 70360e38e..c4a09fc33 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -53,16 +53,17 @@ export default class Monitoring { async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['type'] const acceptedBody: string[] = ['operations'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 7575ba44c..575e95b19 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -47,16 +47,17 @@ export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['searches'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index 9247cf4b9..54ffc9d32 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -47,16 +47,17 @@ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['search_templates'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index 0c2d4880c..fef6e4592 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -47,17 +47,28 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'ids'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 3cb956ad5..27ca13040 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -52,13 +52,23 @@ export default class Nodes { async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'max_archive_version'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -85,13 +95,23 @@ export default class Nodes { async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -117,14 +137,24 @@ export default class Nodes { async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -157,14 +187,24 @@ export default class Nodes { async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -205,17 +245,28 @@ export default class Nodes { async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const acceptedBody: string[] = ['secure_settings_password'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -248,14 +299,24 @@ export default class Nodes { async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -302,14 +363,24 @@ export default class Nodes { async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id', 'metric'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index cf566cc3c..685e46870 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. + * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} */ export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,16 +47,27 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['index_filter'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index c9a086011..277e3c725 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -46,14 +46,24 @@ export default async function PingApi (this: That, params?: T.PingRequest, optio export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index 2abd1c907..75f2d46cc 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -52,14 +52,24 @@ export default class Profiling { async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -81,14 +91,24 @@ export default class Profiling { async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -110,14 +130,24 @@ export default class Profiling { async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -139,14 +169,24 @@ export default class Profiling { async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index f42e42c3b..a989cf966 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -47,16 +47,27 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'context'] const acceptedBody: string[] = ['script'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index ff826dd38..efddebcc9 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -52,13 +52,23 @@ export default class QueryRules { async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -85,13 +95,23 @@ export default class QueryRules { async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -117,13 +137,23 @@ export default class QueryRules { async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -150,13 +180,23 @@ export default class QueryRules { async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -182,14 +222,24 @@ export default class QueryRules { async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -213,16 +263,27 @@ export default class QueryRules { async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id', 'rule_id'] const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -250,16 +311,27 @@ export default class QueryRules { async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] const acceptedBody: string[] = ['rules'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -286,16 +358,27 @@ export default class QueryRules { async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ruleset_id'] const acceptedBody: string[] = ['match_criteria'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index bafa83313..890b2eec2 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -47,16 +47,27 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['requests', 'metric'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 4ef63245c..74f29853c 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Reindex documents. Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. + * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. NOTE: The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,16 +47,27 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 5cd9e6d88..9653803eb 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. + * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} */ export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -46,13 +46,23 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 3a55809e1..650ba34b1 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -47,17 +47,28 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['file', 'params', 'source'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 36fbbee92..16401f165 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -52,13 +52,23 @@ export default class Rollup { async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,14 +94,24 @@ export default class Rollup { async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -124,14 +144,24 @@ export default class Rollup { async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -164,13 +194,23 @@ export default class Rollup { async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -197,16 +237,27 @@ export default class Rollup { async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -233,16 +284,27 @@ export default class Rollup { async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -268,13 +330,23 @@ export default class Rollup { async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -300,13 +372,23 @@ export default class Rollup { async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index 25df53762..bbafbeff1 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Run a script. Runs a script and returns a result. + * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html | Elasticsearch API documentation} */ export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -47,17 +47,28 @@ export default async function ScriptsPainlessExecuteApi (this export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['context', 'context_setup', 'script'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index b2ccbb07a..e6b3fb611 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -47,16 +47,27 @@ export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['scroll', 'scroll_id'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search.ts b/src/api/api/search.ts index ec845a47c..11294d979 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. + * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -47,12 +47,23 @@ export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line querystring[key] = params[key] } else { @@ -61,7 +72,7 @@ export default async function SearchApi async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class SearchApplication { async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,13 +136,23 @@ export default class SearchApplication { async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -148,14 +178,24 @@ export default class SearchApplication { async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -188,14 +228,24 @@ export default class SearchApplication { async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -219,16 +269,17 @@ export default class SearchApplication { async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['collection_name', 'event_type'] const acceptedBody: string[] = ['payload'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -256,16 +307,17 @@ export default class SearchApplication { async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['search_application'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -291,13 +343,23 @@ export default class SearchApplication { async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -324,16 +386,27 @@ export default class SearchApplication { async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['params'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -360,16 +433,27 @@ export default class SearchApplication { async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['params'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index c76751fbb..54e002d7a 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -47,16 +47,27 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index e5d724e02..544660325 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -46,14 +46,24 @@ export default async function SearchShardsApi (this: That, params?: T.SearchShar export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index 30c828f0a..2bcef664e 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -47,17 +47,28 @@ export default async function SearchTemplateApi (this: That export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 037bcca8c..d26592c27 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -52,14 +52,24 @@ export default class SearchableSnapshots { async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -92,14 +102,24 @@ export default class SearchableSnapshots { async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -133,16 +153,27 @@ export default class SearchableSnapshots { async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -169,14 +200,24 @@ export default class SearchableSnapshots { async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 8f2309225..201d2c550 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -44,7 +44,7 @@ export default class Security { } /** - * Activate a user profile. Create or update a user profile on behalf of another user. + * Activate a user profile. Create or update a user profile on behalf of another user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html | Elasticsearch API documentation} */ async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -53,16 +53,27 @@ export default class Security { async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -85,14 +96,24 @@ export default class Security { async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,16 +137,27 @@ export default class Security { async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['names'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -149,16 +181,27 @@ export default class Security { async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['roles'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -182,16 +225,27 @@ export default class Security { async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -215,17 +269,28 @@ export default class Security { async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] const acceptedBody: string[] = ['password', 'password_hash'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -258,13 +323,23 @@ export default class Security { async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['ids'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -290,13 +365,23 @@ export default class Security { async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -314,7 +399,7 @@ export default class Security { } /** - * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. + * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-cache.html | Elasticsearch API documentation} */ async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -322,13 +407,23 @@ export default class Security { async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['realms'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -354,13 +449,23 @@ export default class Security { async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -378,7 +483,7 @@ export default class Security { } /** - * Clear service account token caches. Evict a subset of all entries from the service account token caches. + * Clear service account token caches. Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-service-token-caches.html | Elasticsearch API documentation} */ async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -386,13 +491,23 @@ export default class Security { async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -412,7 +527,7 @@ export default class Security { } /** - * Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. + * Create an API key. Create an API key for access without requiring basic authentication. IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-api-key.html | Elasticsearch API documentation} */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -421,17 +536,28 @@ export default class Security { async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -455,16 +581,27 @@ export default class Security { async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -479,7 +616,7 @@ export default class Security { } /** - * Create a service account token. Create a service accounts token for access without requiring basic authentication. + * Create a service account token. Create a service accounts token for access without requiring basic authentication. NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-service-token.html | Elasticsearch API documentation} */ async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -487,13 +624,23 @@ export default class Security { async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -529,16 +676,27 @@ export default class Security { async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['x509_certificate_chain'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -553,7 +711,7 @@ export default class Security { } /** - * Delete application privileges. + * Delete application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-privilege.html | Elasticsearch API documentation} */ async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -561,13 +719,23 @@ export default class Security { async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application', 'name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -586,7 +754,7 @@ export default class Security { } /** - * Delete roles. Delete roles in the native realm. + * Delete roles. Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role.html | Elasticsearch API documentation} */ async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -594,13 +762,23 @@ export default class Security { async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -618,7 +796,7 @@ export default class Security { } /** - * Delete role mappings. + * Delete role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role-mapping.html | Elasticsearch API documentation} */ async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -626,13 +804,23 @@ export default class Security { async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -658,13 +846,23 @@ export default class Security { async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service', 'name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -692,13 +890,23 @@ export default class Security { async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -716,7 +924,7 @@ export default class Security { } /** - * Disable users. Disable users in the native realm. + * Disable users. Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user.html | Elasticsearch API documentation} */ async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -724,13 +932,23 @@ export default class Security { async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -748,7 +966,7 @@ export default class Security { } /** - * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. + * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html | Elasticsearch API documentation} */ async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -756,13 +974,23 @@ export default class Security { async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -780,7 +1008,7 @@ export default class Security { } /** - * Enable users. Enable users in the native realm. + * Enable users. Enable users in the native realm. By default, when you create users, they are enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user.html | Elasticsearch API documentation} */ async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -788,13 +1016,23 @@ export default class Security { async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -812,7 +1050,7 @@ export default class Security { } /** - * Enable a user profile. Enable user profiles to make them visible in user profile searches. + * Enable a user profile. Enable user profiles to make them visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html | Elasticsearch API documentation} */ async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -820,13 +1058,23 @@ export default class Security { async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -844,7 +1092,7 @@ export default class Security { } /** - * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. + * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html | Elasticsearch API documentation} */ async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -852,14 +1100,24 @@ export default class Security { async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -874,7 +1132,7 @@ export default class Security { } /** - * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. + * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html | Elasticsearch API documentation} */ async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -882,14 +1140,24 @@ export default class Security { async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -912,14 +1180,24 @@ export default class Security { async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -942,14 +1220,24 @@ export default class Security { async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -964,7 +1252,7 @@ export default class Security { } /** - * Get application privileges. + * Get application privileges. To use this API, you must have one of the following privileges: * The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-privileges.html | Elasticsearch API documentation} */ async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -972,14 +1260,24 @@ export default class Security { async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['application', 'name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1008,7 +1306,7 @@ export default class Security { } /** - * Get roles. Get roles in the native realm. + * Get roles. Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role.html | Elasticsearch API documentation} */ async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1016,14 +1314,24 @@ export default class Security { async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1056,14 +1364,24 @@ export default class Security { async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1088,7 +1406,7 @@ export default class Security { } /** - * Get service accounts. Get a list of service accounts that match the provided path parameters. + * Get service accounts. Get a list of service accounts that match the provided path parameters. NOTE: Currently, only the `elastic/fleet-server` service account is available. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-accounts.html | Elasticsearch API documentation} */ async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1096,14 +1414,24 @@ export default class Security { async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1132,7 +1460,7 @@ export default class Security { } /** - * Get service account credentials. + * Get service account credentials. To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-credentials.html | Elasticsearch API documentation} */ async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1140,13 +1468,23 @@ export default class Security { async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['namespace', 'service'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1165,7 +1503,7 @@ export default class Security { } /** - * Get security index settings. Get the user-configurable settings for the security internal index (`.security` and associated indices). + * Get security index settings. Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` * `index.number_of_replicas` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1173,14 +1511,24 @@ export default class Security { async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1195,7 +1543,7 @@ export default class Security { } /** - * Get a token. Create a bearer token for access without requiring basic authentication. + * Get a token. Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-token.html | Elasticsearch API documentation} */ async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1204,17 +1552,28 @@ export default class Security { async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1237,14 +1596,24 @@ export default class Security { async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1269,7 +1638,7 @@ export default class Security { } /** - * Get user privileges. + * Get user privileges. Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-privileges.html | Elasticsearch API documentation} */ async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1277,14 +1646,24 @@ export default class Security { async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1299,7 +1678,7 @@ export default class Security { } /** - * Get a user profile. Get a user's profile using the unique profile ID. + * Get a user profile. Get a user's profile using the unique profile ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html | Elasticsearch API documentation} */ async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1307,13 +1686,23 @@ export default class Security { async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1331,7 +1720,7 @@ export default class Security { } /** - * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. + * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user's credentials. The supported user authentication credential types are: * username and password * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-grant-api-key.html | Elasticsearch API documentation} */ async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1340,16 +1729,27 @@ export default class Security { async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1364,7 +1764,7 @@ export default class Security { } /** - * Check user privileges. Determine whether the specified user has a specified list of privileges. + * Check user privileges. Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges.html | Elasticsearch API documentation} */ async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1373,17 +1773,28 @@ export default class Security { async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['user'] const acceptedBody: string[] = ['application', 'cluster', 'index'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1408,7 +1819,7 @@ export default class Security { } /** - * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. + * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges-user-profile.html | Elasticsearch API documentation} */ async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1417,16 +1828,27 @@ export default class Security { async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['uids', 'privileges'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1441,7 +1863,7 @@ export default class Security { } /** - * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. + * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user's identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-api-key.html | Elasticsearch API documentation} */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1450,17 +1872,28 @@ export default class Security { async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1475,7 +1908,7 @@ export default class Security { } /** - * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. + * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-token.html | Elasticsearch API documentation} */ async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1484,17 +1917,28 @@ export default class Security { async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1518,16 +1962,27 @@ export default class Security { async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1551,16 +2006,27 @@ export default class Security { async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['access_token', 'refresh_token'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1584,17 +2050,28 @@ export default class Security { async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1609,7 +2086,7 @@ export default class Security { } /** - * Create or update application privileges. + * Create or update application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. Application names are formed from a prefix, with an optional suffix that conform to the following rules: * The prefix must begin with a lowercase ASCII letter. * The prefix must contain only ASCII letters or digits. * The prefix must be at least 3 characters long. * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-privileges.html | Elasticsearch API documentation} */ async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1618,16 +2095,17 @@ export default class Security { async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['privileges'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1651,16 +2129,27 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1678,7 +2167,7 @@ export default class Security { } /** - * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. + * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. **Role templates** The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. NOTE: To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role-mapping.html | Elasticsearch API documentation} */ async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1687,16 +2176,27 @@ export default class Security { async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1714,7 +2214,7 @@ export default class Security { } /** - * Create or update users. A password is required for adding a new user but is optional when updating an existing user. To change a user’s password without updating any other fields, use the change password API. + * Create or update users. Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-user.html | Elasticsearch API documentation} */ async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1723,16 +2223,27 @@ export default class Security { async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1750,7 +2261,7 @@ export default class Security { } /** - * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. + * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-api-key.html | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1759,17 +2270,28 @@ export default class Security { async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1784,7 +2306,7 @@ export default class Security { } /** - * Find roles with a query. Get roles in a paginated manner. You can optionally filter the results with a query. + * Find roles with a query. Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-role.html | Elasticsearch API documentation} */ async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1793,17 +2315,28 @@ export default class Security { async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1818,7 +2351,7 @@ export default class Security { } /** - * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. + * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-user.html | Elasticsearch API documentation} */ async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1827,17 +2360,28 @@ export default class Security { async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1852,7 +2396,7 @@ export default class Security { } /** - * Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. + * Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML message that is submitted can be: * A response to a SAML authentication request that was previously created using the SAML prepare authentication API. * An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-authenticate.html | Elasticsearch API documentation} */ async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1861,16 +2405,27 @@ export default class Security { async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['content', 'ids', 'realm'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1885,7 +2440,7 @@ export default class Security { } /** - * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. + * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-complete-logout.html | Elasticsearch API documentation} */ async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1894,16 +2449,27 @@ export default class Security { async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1918,7 +2484,7 @@ export default class Security { } /** - * Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. + * Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-invalidate.html | Elasticsearch API documentation} */ async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1927,16 +2493,27 @@ export default class Security { async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['acs', 'query_string', 'realm'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1951,7 +2528,7 @@ export default class Security { } /** - * Logout of SAML. Submits a request to invalidate an access token and refresh token. + * Logout of SAML. Submits a request to invalidate an access token and refresh token. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-logout.html | Elasticsearch API documentation} */ async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1960,16 +2537,27 @@ export default class Security { async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['token', 'refresh_token'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1984,7 +2572,7 @@ export default class Security { } /** - * Prepare SAML authentication. Creates a SAML authentication request (``) as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. + * Prepare SAML authentication. Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-prepare-authentication.html | Elasticsearch API documentation} */ async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1993,17 +2581,28 @@ export default class Security { async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2018,7 +2617,7 @@ export default class Security { } /** - * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. + * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-sp-metadata.html | Elasticsearch API documentation} */ async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2026,13 +2625,23 @@ export default class Security { async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['realm_name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2050,7 +2659,7 @@ export default class Security { } /** - * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. + * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html | Elasticsearch API documentation} */ async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2059,17 +2668,28 @@ export default class Security { async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2084,7 +2704,7 @@ export default class Security { } /** - * Update an API key. Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. This API supports updates to an API key’s access scope and metadata. The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. + * Update an API key. Update attributes of an existing API key. This API supports updates to an API key's access scope, expiration, and metadata. To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-api-key.html | Elasticsearch API documentation} */ async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2093,16 +2713,27 @@ export default class Security { async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2120,7 +2751,7 @@ export default class Security { } /** - * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. + * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-cross-cluster-api-key.html | Elasticsearch API documentation} */ async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2129,16 +2760,27 @@ export default class Security { async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['access', 'expiration', 'metadata'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2156,7 +2798,7 @@ export default class Security { } /** - * Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified, for example `index.auto_expand_replicas` and `index.number_of_replicas`. If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. + * Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-settings.html | Elasticsearch API documentation} */ async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2165,17 +2807,28 @@ export default class Security { async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['security', 'security-profile', 'security-tokens'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2190,7 +2843,7 @@ export default class Security { } /** - * Update user profile data. Update specific data for the user profile that is associated with a unique ID. + * Update user profile data. Update specific data for the user profile that is associated with a unique ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. To use this API, you must have one of the following privileges: * The `manage_user_profile` cluster privilege. * The `update_profile_data` global privilege for the namespaces that are referenced in the request. This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html | Elasticsearch API documentation} */ async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2199,16 +2852,27 @@ export default class Security { async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['uid'] const acceptedBody: string[] = ['labels', 'data'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 873402d5d..61a4ce6cb 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -52,13 +52,23 @@ export default class Shutdown { async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,14 +94,24 @@ export default class Shutdown { async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -125,16 +145,27 @@ export default class Shutdown { async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['node_id'] const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index f1ced5c0a..ee6e13de6 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -53,16 +53,27 @@ export default class Simulate { async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 7dd058206..9cb2542aa 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -52,13 +52,23 @@ export default class Slm { async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class Slm { async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,14 +136,24 @@ export default class Slm { async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -146,14 +176,24 @@ export default class Slm { async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -186,14 +226,24 @@ export default class Slm { async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -216,14 +266,24 @@ export default class Slm { async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -247,16 +307,27 @@ export default class Slm { async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['policy_id'] const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -282,14 +353,24 @@ export default class Slm { async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -312,14 +393,24 @@ export default class Slm { async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index a6b98caaa..762afb7fb 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -52,13 +52,23 @@ export default class Snapshot { async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -85,16 +95,27 @@ export default class Snapshot { async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] const acceptedBody: string[] = ['indices'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -123,16 +144,27 @@ export default class Snapshot { async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['expand_wildcards', 'feature_states', 'ignore_unavailable', 'include_global_state', 'indices', 'metadata', 'partial'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -160,16 +192,17 @@ export default class Snapshot { async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const acceptedBody: string[] = ['repository'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -195,13 +228,23 @@ export default class Snapshot { async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -228,13 +271,23 @@ export default class Snapshot { async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -260,13 +313,23 @@ export default class Snapshot { async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -293,14 +356,24 @@ export default class Snapshot { async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -325,32 +398,42 @@ export default class Snapshot { } /** - * Analyzes a repository for correctness and performance - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/repo-analysis-api.html | Elasticsearch API documentation} */ - async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async repositoryAnalyze (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] - const querystring: Record = {} - const body = undefined + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['name'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } const method = 'POST' - const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_analyze` + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_analyze` const meta: TransportRequestMetadata = { name: 'snapshot.repository_analyze', pathParts: { - repository: params.repository + name: params.name } } return await this.transport.request({ path, method, querystring, body, meta }, options) @@ -365,13 +448,23 @@ export default class Snapshot { async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -398,16 +491,27 @@ export default class Snapshot { async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -434,14 +538,24 @@ export default class Snapshot { async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['repository', 'snapshot'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -478,13 +592,23 @@ export default class Snapshot { async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 286316bcb..df4bac5d9 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -53,16 +53,27 @@ export default class Sql { async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['cursor'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -85,13 +96,23 @@ export default class Sql { async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -117,13 +138,23 @@ export default class Sql { async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -149,13 +180,23 @@ export default class Sql { async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -182,17 +223,28 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -216,16 +268,27 @@ export default class Sql { async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 8ebb1a7ac..9f4b81c50 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -52,14 +52,24 @@ export default class Ssl { async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 107dae465..9f52fcff5 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -52,13 +52,23 @@ export default class Synonyms { async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,13 +94,23 @@ export default class Synonyms { async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['set_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -117,13 +137,23 @@ export default class Synonyms { async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -149,13 +179,23 @@ export default class Synonyms { async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['set_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -182,14 +222,24 @@ export default class Synonyms { async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -213,16 +263,27 @@ export default class Synonyms { async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['synonyms_set'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -249,16 +310,27 @@ export default class Synonyms { async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['set_id', 'rule_id'] const acceptedBody: string[] = ['synonyms'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index 51ec2731d..a906a58fd 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -44,7 +44,7 @@ export default class Tasks { } /** - * Cancel a task. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. + * Cancel a task. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -52,14 +52,24 @@ export default class Tasks { async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,7 +94,7 @@ export default class Tasks { } /** - * Get task information. Get information about a task currently running in the cluster. + * Get task information. Get information about a task currently running in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -92,13 +102,23 @@ export default class Tasks { async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -116,7 +136,7 @@ export default class Tasks { } /** - * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. + * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} */ async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -124,14 +144,24 @@ export default class Tasks { async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index 1176ced32..af8c1d1e4 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -47,16 +47,27 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index f113c2c15..a4333ccc0 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -47,16 +47,27 @@ export default async function TermvectorsApi (this: That, p export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index', 'id'] const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 35de18ecf..86efca83c 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -52,13 +52,23 @@ export default class TextStructure { async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -82,16 +92,27 @@ export default class TextStructure { async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['messages'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -115,16 +136,17 @@ export default class TextStructure { async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['text_files'] - const querystring: Record = {} - let body: any + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -148,16 +170,27 @@ export default class TextStructure { async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['grok_pattern', 'text'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index ffbaec1f5..b826dfffd 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -52,13 +52,23 @@ export default class Transform { async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -84,14 +94,24 @@ export default class Transform { async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -113,14 +133,24 @@ export default class Transform { async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -153,13 +183,23 @@ export default class Transform { async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -186,17 +226,28 @@ export default class Transform { async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -230,16 +281,27 @@ export default class Transform { async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -265,13 +327,23 @@ export default class Transform { async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -297,13 +369,23 @@ export default class Transform { async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -329,13 +411,23 @@ export default class Transform { async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -361,13 +453,23 @@ export default class Transform { async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -394,16 +496,27 @@ export default class Transform { async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['transform_id'] const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -429,14 +542,24 @@ export default class Transform { async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 400f8dae0..d4dc6f183 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Update a document. Updates a document by running a script or passing a partial document. + * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html | Elasticsearch API documentation} */ export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -47,16 +47,27 @@ export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id', 'index'] const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 83ea42500..fb09d42f4 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -38,7 +38,7 @@ import * as T from '../types' interface That { transport: Transport } /** - * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -47,16 +47,27 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index bd5fa29d7..c5c9b6c31 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -39,20 +39,30 @@ interface That { transport: Transport } /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html#docs-update-by-query-rethrottle | Elasticsearch API documentation} */ export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index de32a06e7..df5201232 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -52,13 +52,23 @@ export default class Watcher { async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id', 'action_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -92,13 +102,23 @@ export default class Watcher { async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -124,13 +144,23 @@ export default class Watcher { async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['watch_id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -156,13 +186,23 @@ export default class Watcher { async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -189,17 +229,28 @@ export default class Watcher { async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -232,14 +283,24 @@ export default class Watcher { async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -262,13 +323,23 @@ export default class Watcher { async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -295,16 +366,27 @@ export default class Watcher { async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -331,17 +413,28 @@ export default class Watcher { async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -364,14 +457,24 @@ export default class Watcher { async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -394,14 +497,24 @@ export default class Watcher { async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['metric'] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -434,14 +547,24 @@ export default class Watcher { async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -465,17 +588,28 @@ export default class Watcher { async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] - const querystring: Record = {} - const body: Record = {} + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index b57d2d754..0082b65e2 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -52,14 +52,24 @@ export default class Xpack { async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -82,14 +92,24 @@ export default class Xpack { async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/types.ts b/src/api/types.ts index 33075707b..b2c5dc05d 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -57,19 +57,35 @@ export interface BulkOperationContainer { export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { +/** The name of the data stream, index, or index alias to perform bulk actions on. */ index?: IndexName + /** If `true`, the response will include the ingest pipelines that were run for each index or create. */ list_executed_pipelines?: boolean + /** The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. */ refresh?: Refresh + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ require_data_stream?: boolean operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } } export interface BulkResponse { @@ -115,7 +131,12 @@ export interface BulkWriteOperation extends BulkOperationBase { } export interface ClearScrollRequest extends RequestBase { +/** A comma-separated list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ scroll_id?: ScrollIds + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { scroll_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { scroll_id?: never } } export interface ClearScrollResponse { @@ -124,7 +145,12 @@ export interface ClearScrollResponse { } export interface ClosePointInTimeRequest extends RequestBase { +/** The ID of the point-in-time. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface ClosePointInTimeResponse { @@ -133,22 +159,42 @@ export interface ClosePointInTimeResponse { } export interface CountRequest extends RequestBase { +/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The minimum `_score` value that documents must have to be included in the result. */ min_score?: double + /** The node or shard the operation should be performed on. By default, it is random. */ preference?: string + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long + /** The query in Lucene query string syntax. */ q?: string + /** Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } } export interface CountResponse { @@ -157,68 +203,131 @@ export interface CountResponse { } export interface CreateRequest extends RequestBase { +/** A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. */ id: Id + /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName + /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The explicit version number for concurrency control. It must be a non-negative long number. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards document?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } } export type CreateResponse = WriteResponseBase export interface DeleteRequest extends RequestBase { +/** A unique identifier for the document. */ id: Id + /** The name of the target index. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ timeout?: Duration + /** An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } } export type DeleteResponse = WriteResponseBase export interface DeleteByQueryRequest extends RequestBase { +/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** What to do if delete by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts + /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** Starting offset (default: 0) */ from?: long + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The node or shard the operation should be performed on. It is random by default. */ preference?: string + /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. */ refresh?: boolean + /** If `true`, the request cache is used for this request. Defaults to the index-level setting. */ request_cache?: boolean + /** The throttle for this request in sub-requests per second. */ requests_per_second?: float + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** A query in the Lucene query string syntax. */ q?: string + /** The period to retain the search context for scrolling. */ scroll?: Duration + /** The size of the scroll request that powers the operation. */ scroll_size?: long + /** The explicit timeout for each search request. It defaults to no timeout. */ search_timeout?: Duration + /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType + /** The number of slices this task should be divided into. */ slices?: Slices + /** A comma-separated list of `:` pairs. */ sort?: string[] + /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] + /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long + /** The period each deletion request waits for active shards. */ timeout?: Duration + /** If `true`, returns the document version as part of a hit. */ version?: boolean + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ wait_for_completion?: boolean + /** The maximum number of documents to delete. */ max_docs?: long + /** The documents to delete specified with Query DSL. */ query?: QueryDslQueryContainer + /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never } } export interface DeleteByQueryResponse { @@ -241,49 +350,93 @@ export interface DeleteByQueryResponse { } export interface DeleteByQueryRethrottleRequest extends RequestBase { +/** The ID for the task. */ task_id: TaskId + /** The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. */ requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } } export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { +/** Identifier for the stored script or search template. */ id: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type DeleteScriptResponse = AcknowledgedResponseBase export interface ExistsRequest extends RequestBase { +/** A unique document identifier. */ id: Id + /** A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). */ index: IndexName + /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. */ stored_fields?: Fields + /** Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } } export type ExistsResponse = boolean export interface ExistsSourceRequest extends RequestBase { +/** A unique identifier for the document. */ id: Id + /** A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). */ index: IndexName + /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude in the response. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields + /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } } export type ExistsSourceResponse = boolean @@ -301,21 +454,40 @@ export interface ExplainExplanationDetail { } export interface ExplainRequest extends RequestBase { +/** Defines the document ID. */ id: Id + /** Index names used to limit the request. Only a single index name can be provided to this parameter. */ index: IndexName + /** Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. */ default_operator?: QueryDslOperator + /** Field to use as default where no field prefix is given in the query string. */ df?: string + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. */ lenient?: boolean + /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** True or false to return the `_source` field or not, or a list of fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields + /** A comma-separated list of stored fields to return in the response. */ stored_fields?: Fields + /** Query in the Lucene query string syntax. */ q?: string + /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, lenient?: never, preference?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, lenient?: never, preference?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, q?: never, query?: never } } export interface ExplainResponse { @@ -342,17 +514,32 @@ export interface FieldCapsFieldCapability { } export interface FieldCapsRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** If true, unmapped fields are included in the response. */ include_unmapped?: boolean + /** An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent */ filters?: string + /** Only return results for fields that have one of the types in the list */ types?: string[] + /** If false, empty fields are not included in the response. */ include_empty_fields?: boolean + /** List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ fields?: Fields + /** Allows to filter indices if the provided query rewrites to match_none on every shard. */ index_filter?: QueryDslQueryContainer + /** Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ runtime_mappings?: MappingRuntimeFields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } } export interface FieldCapsResponse { @@ -374,26 +561,49 @@ export interface GetGetResult { } export interface GetRequest extends RequestBase { +/** A unique document identifier. */ id: Id + /** The name of the index that contains the document. */ index: IndexName + /** Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ force_synthetic_source?: boolean + /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. */ stored_fields?: Fields + /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } } export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { +/** Identifier for the stored script or search template. */ id: Id + /** Specify timeout for connection to master */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never } } export interface GetScriptResponse { @@ -419,6 +629,10 @@ export interface GetScriptContextContextMethodParam { } export interface GetScriptContextRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface GetScriptContextResponse { @@ -431,6 +645,10 @@ export interface GetScriptLanguagesLanguageContext { } export interface GetScriptLanguagesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface GetScriptLanguagesResponse { @@ -439,18 +657,34 @@ export interface GetScriptLanguagesResponse { } export interface GetSourceRequest extends RequestBase { +/** A unique document identifier. */ id: Id + /** The name of the index that contains the document. */ index: IndexName + /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude in the response. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields + /** A comma-separated list of stored fields to return as part of a hit. */ stored_fields?: Fields + /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } } export type GetSourceResponse = TDocument @@ -580,10 +814,18 @@ export interface HealthReportRepositoryIntegrityIndicatorDetails { } export interface HealthReportRequest extends RequestBase { +/** A feature of the cluster, as returned by the top-level health report API. */ feature?: string | string[] + /** Explicit operation timeout. */ timeout?: Duration + /** Opt-in for more information about the health of the system. */ verbose?: boolean + /** Limit the number of affected resources the health report API returns. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { feature?: never, timeout?: never, verbose?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { feature?: never, timeout?: never, verbose?: never, size?: never } } export interface HealthReportResponse { @@ -645,25 +887,46 @@ export interface HealthReportStagnatingBackingIndices { } export interface IndexRequest extends RequestBase { +/** A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ id?: Id + /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType + /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** An explicit version number for concurrency control. It must be a non-negative long number. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the destination must be an index alias. */ require_alias?: boolean document?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } } export type IndexResponse = WriteResponseBase export interface InfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface InfoResponse { @@ -675,14 +938,26 @@ export interface InfoResponse { } export interface KnnSearchRequest extends RequestBase { +/** A comma-separated list of index names to search; use `_all` or to perform the operation on all indices */ index: Indices + /** A comma-separated list of specific routing values */ routing?: Routing + /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig + /** The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ stored_fields?: Fields + /** The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. */ fields?: Fields + /** Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** kNN query to execute */ knn: KnnSearchQuery + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } } export interface KnnSearchResponse { @@ -718,18 +993,34 @@ export interface MgetOperation { } export interface MgetRequest extends RequestBase { +/** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ index?: IndexName + /** Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean + /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes relevant shards before retrieving documents. */ refresh?: boolean + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** True or false to return the `_source` field or not, or a list of fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** If `true`, retrieves the document fields stored in the index rather than the document `_source`. */ stored_fields?: Fields + /** The documents you want to retrieve. Required if no index is specified in the request URI. */ docs?: MgetOperation[] + /** The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. */ ids?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, docs?: never, ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, docs?: never, ids?: never } } export interface MgetResponse { @@ -797,21 +1088,39 @@ export interface MsearchMultisearchHeader { } export interface MsearchRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and index aliases to search. */ index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean + /** Maximum number of concurrent searches the multi search API can execute. */ max_concurrent_searches?: long + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: long + /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long + /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ rest_total_hits_as_int?: boolean + /** Custom routing value used to route search operations to a specific shard. */ routing?: Routing + /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ search_type?: SearchType + /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean searches?: MsearchRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } } export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody @@ -821,13 +1130,23 @@ export type MsearchResponse = MsearchMultiSearchItem | ErrorResponseBase export interface MsearchTemplateRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. */ index?: Indices + /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** Maximum number of concurrent searches the API can run. */ max_concurrent_searches?: long + /** The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. */ search_type?: SearchType + /** If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. */ rest_total_hits_as_int?: boolean + /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean search_templates?: MsearchTemplateRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } } export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig @@ -859,20 +1178,38 @@ export interface MtermvectorsOperation { } export interface MtermvectorsRequest extends RequestBase { +/** Name of the index that contains the documents. */ index?: IndexName + /** Comma-separated list or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean + /** If `true`, the response includes term offsets. */ offsets?: boolean + /** If `true`, the response includes term payloads. */ payloads?: boolean + /** If `true`, the response includes term positions. */ positions?: boolean + /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber + /** Specific version type. */ version_type?: VersionType + /** Array of existing or artificial documents. */ docs?: MtermvectorsOperation[] + /** Simplified syntax to specify documents by their ID if they're in the same index. */ ids?: Id[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, docs?: never, ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, docs?: never, ids?: never } } export interface MtermvectorsResponse { @@ -890,14 +1227,26 @@ export interface MtermvectorsTermVectorsResult { } export interface OpenPointInTimeRequest extends RequestBase { +/** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ index: Indices + /** Extend the length of time that the point in time persists. */ keep_alive: Duration + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** The node or shard the operation should be performed on. By default, it is random. */ preference?: string + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. */ allow_partial_search_results?: boolean + /** Filter indices if the provided query rewrites to `match_none` on every shard. */ index_filter?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, index_filter?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, index_filter?: never } } export interface OpenPointInTimeResponse { @@ -906,16 +1255,29 @@ export interface OpenPointInTimeResponse { } export interface PingRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export type PingResponse = boolean export interface PutScriptRequest extends RequestBase { +/** Identifier for the stored script or search template. Must be unique within the cluster. */ id: Id + /** Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. */ context?: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Contains the script or search template, its parameters, and its language. */ script: StoredScript + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, context?: never, master_timeout?: never, timeout?: never, script?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, context?: never, master_timeout?: never, timeout?: never, script?: never } } export type PutScriptResponse = AcknowledgedResponseBase @@ -992,13 +1354,24 @@ export interface RankEvalRankEvalRequestItem { } export interface RankEvalRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Search operation type */ search_type?: string + /** A set of typical search requests, together with their provided ratings. */ requests: RankEvalRankEvalRequestItem[] + /** Definition of the evaluation metric to calculate. */ metric?: RankEvalRankEvalMetric + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, search_type?: never, requests?: never, metric?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, search_type?: never, requests?: never, metric?: never } } export interface RankEvalResponse { @@ -1030,20 +1403,37 @@ export interface ReindexRemoteSource { } export interface ReindexRequest extends RequestBase { +/** If `true`, the request refreshes affected shards to make this operation visible to search. */ refresh?: boolean + /** The throttle for this request in sub-requests per second. By default, there is no throttle. */ requests_per_second?: float + /** The period of time that a consistent view of the index should be maintained for scrolled search. */ scroll?: Duration + /** The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ slices?: Slices + /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean + /** If `true`, the destination must be an index alias. */ require_alias?: boolean + /** Indicates whether to continue reindexing even when there are conflicts. */ conflicts?: Conflicts + /** The destination you are copying to. */ dest: ReindexDestination + /** The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ max_docs?: long + /** The script to run to update the document source or metadata when reindexing. */ script?: Script | string size?: long + /** The source you are copying from. */ source: ReindexSource + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, requests_per_second?: never, scroll?: never, slices?: never, timeout?: never, wait_for_active_shards?: never, wait_for_completion?: never, require_alias?: never, conflicts?: never, dest?: never, max_docs?: never, script?: never, size?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, requests_per_second?: never, scroll?: never, slices?: never, timeout?: never, wait_for_active_shards?: never, wait_for_completion?: never, require_alias?: never, conflicts?: never, dest?: never, max_docs?: never, script?: never, size?: never, source?: never } } export interface ReindexResponse { @@ -1110,8 +1500,14 @@ export interface ReindexRethrottleReindexTask { } export interface ReindexRethrottleRequest extends RequestBase { +/** The task identifier, which can be found by using the tasks API. */ task_id: Id + /** The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } } export interface ReindexRethrottleResponse { @@ -1119,16 +1515,25 @@ export interface ReindexRethrottleResponse { } export interface RenderSearchTemplateRequest extends RequestBase { +/** ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. */ id?: Id file?: string + /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ params?: Record + /** An inline search template. Supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. */ source?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } } export interface RenderSearchTemplateResponse { template_output: Record } +export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' + export interface ScriptsPainlessExecutePainlessContextSetup { document: any index: IndexName @@ -1136,9 +1541,16 @@ export interface ScriptsPainlessExecutePainlessContextSetup { } export interface ScriptsPainlessExecuteRequest extends RequestBase { - context?: string +/** The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. */ + context?: ScriptsPainlessExecutePainlessContext + /** Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ context_setup?: ScriptsPainlessExecutePainlessContextSetup + /** The Painless script to run. */ script?: Script | string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { context?: never, context_setup?: never, script?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { context?: never, context_setup?: never, script?: never } } export interface ScriptsPainlessExecuteResponse { @@ -1146,81 +1558,158 @@ export interface ScriptsPainlessExecuteResponse { } export interface ScrollRequest extends RequestBase { +/** The scroll ID */ scroll_id?: ScrollId + /** If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. */ rest_total_hits_as_int?: boolean + /** Period to retain the search context for scrolling. */ scroll?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { scroll_id?: never, rest_total_hits_as_int?: never, scroll?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { scroll_id?: never, rest_total_hits_as_int?: never, scroll?: never } } export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { +/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ allow_partial_search_results?: boolean + /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ batched_reduce_size?: long + /** If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. */ ccs_minimize_roundtrips?: boolean + /** The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices will be ignored when frozen. */ ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ max_concurrent_shard_requests?: long + /** The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ preference?: string + /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. */ pre_filter_shard_size?: long + /** If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. */ request_cache?: boolean + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ scroll?: Duration + /** Indicates how distributed term frequencies are calculated for relevance scoring. */ search_type?: SearchType + /** The field to use for suggestions. */ suggest_field?: Field + /** The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_mode?: SuggestMode + /** The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_size?: long + /** The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_text?: string + /** If `true`, aggregation and suggester names are be prefixed by their respective types in the response. */ typed_keys?: boolean + /** Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. */ rest_total_hits_as_int?: boolean + /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. */ q?: string + /** Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean + /** Defines the aggregations that are run as part of the search request. */ aggregations?: Record /** @alias aggregations */ + /** Defines the aggregations that are run as part of the search request. */ aggs?: Record + /** Collapses search results the values of the specified field. */ collapse?: SearchFieldCollapse + /** If `true`, the request returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record + /** The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits + /** Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. */ indices_boost?: Record[] + /** An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** The approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] + /** The Reciprocal Rank Fusion (RRF) to use. */ rank?: RankContainer + /** The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. */ min_score?: double + /** Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. */ post_filter?: QueryDslQueryContainer + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. */ profile?: boolean + /** The search definition using the Query DSL. */ query?: QueryDslQueryContainer + /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ rescore?: SearchRescore | SearchRescore[] + /** A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ retriever?: RetrieverContainer + /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record + /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults + /** The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. */ size?: integer + /** Split a scrolled search into multiple slices that can be consumed independently. */ slice?: SlicedScroll + /** A comma-separated list of : pairs. */ sort?: Sort + /** The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. */ _source?: SearchSourceConfig + /** An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines a suggester that provides similar looking terms based on a provided text. */ suggest?: SearchSuggester + /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. */ terminate_after?: long + /** The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ timeout?: string + /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If `true`, the request returns the document version as part of a hit. */ version?: boolean + /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean + /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. */ stored_fields?: Fields + /** Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. */ pit?: SearchPointInTimeReference + /** One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export type SearchResponse> = SearchResponseBody @@ -1808,25 +2297,48 @@ export type SearchTotalHitsRelation = 'eq' | 'gte' export type SearchTrackHits = boolean | integer export interface SearchMvtRequest extends RequestBase { +/** Comma-separated list of data streams, indices, or aliases to search */ index: Indices + /** Field containing geospatial data to return */ field: Field + /** Zoom level for the vector tile to search */ zoom: SearchMvtZoomLevel + /** X coordinate for the vector tile to search */ x: SearchMvtCoordinate + /** Y coordinate for the vector tile to search */ y: SearchMvtCoordinate + /** Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum */ aggs?: Record + /** Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. */ buffer?: integer + /** If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. */ exact_bounds?: boolean + /** Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ extent?: integer + /** Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. */ fields?: Fields + /** Aggregation used to create a grid for the `field`. */ grid_agg?: SearchMvtGridAggregationType + /** Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. */ grid_precision?: integer + /** Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. */ grid_type?: SearchMvtGridType + /** Query DSL used to filter documents for the search. */ query?: QueryDslQueryContainer + /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. */ size?: integer + /** Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. */ sort?: Sort + /** Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits + /** If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. */ with_labels?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } } export type SearchMvtResponse = MapboxVectorTiles @@ -1840,14 +2352,26 @@ export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer export interface SearchShardsRequest extends RequestBase { +/** Returns the indices and shards that a search request would be executed against. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, preference?: never, routing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, preference?: never, routing?: never } } export interface SearchShardsResponse { @@ -1874,23 +2398,44 @@ export interface SearchShardsShardStoreIndex { } export interface SearchTemplateRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (*). */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** Specifies how long a consistent view of the index should be maintained for scrolled search. */ scroll?: Duration + /** The type of the search operation. */ search_type?: SearchType + /** If true, hits.total are rendered as an integer in the response. */ rest_total_hits_as_int?: boolean + /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean + /** If `true`, returns detailed information about score calculation as part of each hit. */ explain?: boolean + /** ID of the search template to use. If no source is specified, this parameter is required. */ id?: Id + /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ params?: Record + /** If `true`, the query execution is profiled. */ profile?: boolean + /** An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. */ source?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } } export interface SearchTemplateResponse { @@ -1911,14 +2456,25 @@ export interface SearchTemplateResponse { } export interface TermsEnumRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. */ index: IndexName + /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ field: Field + /** How many matching terms to return. */ size?: integer + /** The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. */ timeout?: Duration + /** When true the provided search string is matched against index terms without case sensitivity. */ case_insensitive?: boolean + /** Allows to filter an index shard if the provided query rewrites to match_none. */ index_filter?: QueryDslQueryContainer + /** The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. */ string?: string search_after?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } } export interface TermsEnumResponse { @@ -1944,22 +2500,42 @@ export interface TermvectorsFilter { } export interface TermvectorsRequest extends RequestBase { +/** Name of the index that contains the document. */ index: IndexName + /** Unique identifier of the document. */ id?: Id + /** Comma-separated list or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean + /** If `true`, the response includes term offsets. */ offsets?: boolean + /** If `true`, the response includes term payloads. */ payloads?: boolean + /** If `true`, the response includes term positions. */ positions?: boolean + /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** If `true`, the response includes term frequency and document frequency. */ term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber + /** Specific version type. */ version_type?: VersionType + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: TDocument + /** Filter terms based on their tf-idf scores. */ filter?: TermvectorsFilter + /** Overrides the default per-field analyzer. */ per_field_analyzer?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, id?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, doc?: never, filter?: never, per_field_analyzer?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, id?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, doc?: never, filter?: never, per_field_analyzer?: never } } export interface TermvectorsResponse { @@ -1992,26 +2568,50 @@ export interface TermvectorsToken { } export interface UpdateRequest extends RequestBase { +/** A unique identifier for the document to be updated. */ id: Id + /** The name of the target index. By default, the index is created automatically if it doesn't exist. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** The script language. */ lang?: string + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. */ refresh?: Refresh + /** If `true`, the destination must be an index alias. */ require_alias?: boolean + /** The number of times the operation should be retried when a conflict occurs. */ retry_on_conflict?: integer + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** The source fields you want to exclude. */ _source_excludes?: Fields + /** The source fields you want to retrieve. */ _source_includes?: Fields + /** If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. */ detect_noop?: boolean + /** A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. */ doc?: TPartialDocument + /** If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ doc_as_upsert?: boolean + /** The script to run to update the document. */ script?: Script | string + /** If `true`, run the script whether or not the document exists. */ scripted_upsert?: boolean + /** If `false`, turn off source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig + /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. */ upsert?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } } export type UpdateResponse = UpdateUpdateWriteResponseBase @@ -2021,41 +2621,80 @@ export interface UpdateUpdateWriteResponseBase extends Writ } export interface UpdateByQueryRequest extends RequestBase { +/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** Starting offset (default: 0) */ from?: long + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string + /** The node or shard the operation should be performed on. It is random by default. */ preference?: string + /** A query in the Lucene query string syntax. */ q?: string + /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ refresh?: boolean + /** If `true`, the request cache is used for this request. It defaults to the index-level setting. */ request_cache?: boolean + /** The throttle for this request in sub-requests per second. */ requests_per_second?: float + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The period to retain the search context for scrolling. */ scroll?: Duration + /** The size of the scroll request that powers the operation. */ scroll_size?: long + /** An explicit timeout for each search request. By default, there is no timeout. */ search_timeout?: Duration + /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType + /** The number of slices this task should be divided into. */ slices?: Slices + /** A comma-separated list of : pairs. */ sort?: string[] + /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] + /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long + /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** If `true`, returns the document version as part of a hit. */ version?: boolean + /** Should the document increment the version number (internal) on hit or not (reindex) */ version_type?: boolean + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ wait_for_completion?: boolean + /** The maximum number of documents to update. */ max_docs?: long + /** The documents to update using the Query DSL. */ query?: QueryDslQueryContainer + /** The script to run to update the document source or metadata when updating. */ script?: Script | string + /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, pipeline?: never, preference?: never, q?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, script?: never, slice?: never, conflicts?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, pipeline?: never, preference?: never, q?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, script?: never, slice?: never, conflicts?: never } } export interface UpdateByQueryResponse { @@ -2078,8 +2717,14 @@ export interface UpdateByQueryResponse { } export interface UpdateByQueryRethrottleRequest extends RequestBase { +/** The ID for the task. */ task_id: Id + /** The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. */ requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } } export interface UpdateByQueryRethrottleResponse { @@ -2431,6 +3076,7 @@ export interface KnnQuery extends QueryDslQueryBase { k?: integer filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float + rescore_vector?: RescoreVector } export interface KnnRetriever extends RetrieverBase { @@ -2440,6 +3086,7 @@ export interface KnnRetriever extends RetrieverBase { k: integer num_candidates: integer similarity?: float + rescore_vector?: RescoreVector } export interface KnnSearch { @@ -2452,6 +3099,7 @@ export interface KnnSearch { filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float inner_hits?: SearchInnerHits + rescore_vector?: RescoreVector } export interface LatLonGeoLocation { @@ -2632,6 +3280,10 @@ export interface RequestCacheStats { miss_count: long } +export interface RescoreVector { + oversample: float +} + export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { @@ -2839,6 +3491,8 @@ export interface StoredScript { source: string } +export type StreamResult = ArrayBuffer + export type SuggestMode = 'missing' | 'popular' | 'always' export type SuggestionName = string @@ -5335,6 +5989,11 @@ export interface MappingCorePropertyBase extends MappingPropertyBase { store?: boolean } +export interface MappingCountedKeywordProperty extends MappingPropertyBase { + type: 'counted_keyword' + index?: boolean +} + export interface MappingDataStreamTimestamp { enabled: boolean } @@ -5455,7 +6114,7 @@ export interface MappingFieldNamesField { enabled: boolean } -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'passthrough' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'passthrough' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'counted_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double @@ -5650,7 +6309,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { meta?: Record @@ -5658,6 +6317,7 @@ export interface MappingPropertyBase { ignore_above?: integer dynamic?: MappingDynamicMapping fields?: Record + synthetic_source_keep?: MappingSyntheticSourceKeepEnum } export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { @@ -5764,6 +6424,8 @@ export interface MappingSuggestContext { precision?: integer | string } +export type MappingSyntheticSourceKeepEnum = 'none' | 'arrays' | 'all' + export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' export interface MappingTextIndexPrefixes { @@ -6668,23 +7330,42 @@ export interface AsyncSearchAsyncSearchResponseBase { } export interface AsyncSearchDeleteRequest extends RequestBase { +/** A unique identifier for the async search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { +/** A unique identifier for the async search. */ id: Id + /** Specifies how long the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. */ keep_alive?: Duration + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean + /** Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } } export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AsyncSearchStatusRequest extends RequestBase { +/** A unique identifier for the async search. */ id: Id + /** Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never } } export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase @@ -6696,69 +7377,126 @@ export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSea } export interface AsyncSearchSubmitRequest extends RequestBase { +/** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ wait_for_completion_timeout?: Duration + /** Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. */ + keep_alive?: Duration + /** If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. */ keep_on_completion?: boolean + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Indicate if an error should be returned if there is a partial search failure or timeout */ allow_partial_search_results?: boolean + /** The analyzer to use for the query string */ analyzer?: string + /** Specify whether wildcard and prefix queries should be analyzed (default: false) */ analyze_wildcard?: boolean + /** Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ batched_reduce_size?: long + /** The default value is the only supported value. */ ccs_minimize_roundtrips?: boolean + /** The default operator for query string query (AND or OR) */ default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string */ df?: string + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Whether specified concrete, expanded or aliased indices should be ignored when throttled */ ignore_throttled?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Specify whether format-based query failures (such as providing text to a numeric field) should be ignored */ lenient?: boolean + /** The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ max_concurrent_shard_requests?: long + /** Specify the node or shard the operation should be performed on (default: random) */ preference?: string + /** Specify if request cache should be used for this request or not, defaults to true */ request_cache?: boolean + /** A comma-separated list of specific routing values */ routing?: Routing + /** Search operation type */ search_type?: SearchType + /** Specifies which field to use for suggestions. */ suggest_field?: Field + /** Specify suggest mode */ suggest_mode?: SuggestMode + /** How many suggestions to return in response */ suggest_size?: long + /** The source text for which the suggestions should be returned. */ suggest_text?: string + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean + /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean + /** A list of fields to exclude from the returned _source field */ _source_excludes?: Fields + /** A list of fields to extract and return from the _source field */ _source_includes?: Fields + /** Query in the Lucene query string syntax */ q?: string aggregations?: Record /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse + /** If true, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record + /** Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ from?: integer highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits + /** Boosts the _score of documents from specified indices. */ indices_boost?: Record[] + /** Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines the approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] + /** Minimum _score for matching documents. Documents with a lower _score are not included in the search results. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean + /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] + /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults + /** The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort + /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig + /** Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester + /** Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long + /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ timeout?: string + /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If true, returns document version as part of a hit. */ version?: boolean + /** If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean + /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ stored_fields?: Fields + /** Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. */ pit?: SearchPointInTimeReference + /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase @@ -6769,9 +7507,16 @@ export interface AutoscalingAutoscalingPolicy { } export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { +/** the name of the autoscaling policy */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase @@ -6804,7 +7549,12 @@ export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { } export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { +/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface AutoscalingGetAutoscalingCapacityResponse { @@ -6812,17 +7562,30 @@ export interface AutoscalingGetAutoscalingCapacityResponse { } export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { +/** the name of the autoscaling policy */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { +/** the name of the autoscaling policy */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration policy?: AutoscalingAutoscalingPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } } export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase @@ -6871,9 +7634,16 @@ export interface CatAliasesAliasesRecord { } export interface CatAliasesRequest extends CatCatRequestBase { +/** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } } export type CatAliasesResponse = CatAliasesAliasesRecord[] @@ -6915,17 +7685,25 @@ export interface CatAllocationAllocationRecord { } export interface CatAllocationRequest extends CatCatRequestBase { +/** A comma-separated list of node identifiers or names used to limit the returned information. */ node_id?: NodeIds + /** The unit used to display byte values. */ bytes?: Bytes + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, bytes?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, bytes?: never, local?: never, master_timeout?: never } } export type CatAllocationResponse = CatAllocationAllocationRecord[] export interface CatComponentTemplatesComponentTemplate { name: string - version: string + version: string | null alias_count: string mapping_count: string settings_count: string @@ -6934,9 +7712,16 @@ export interface CatComponentTemplatesComponentTemplate { } export interface CatComponentTemplatesRequest extends CatCatRequestBase { +/** The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. */ name?: string + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** The period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] @@ -6956,7 +7741,12 @@ export interface CatCountCountRecord { } export interface CatCountRequest extends CatCatRequestBase { +/** A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export type CatCountResponse = CatCountCountRecord[] @@ -6974,8 +7764,14 @@ export interface CatFielddataFielddataRecord { } export interface CatFielddataRequest extends CatCatRequestBase { +/** Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. */ fields?: Fields + /** The unit used to display byte values. */ bytes?: Bytes + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fields?: never, bytes?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fields?: never, bytes?: never } } export type CatFielddataResponse = CatFielddataFielddataRecord[] @@ -7034,13 +7830,23 @@ export interface CatHealthHealthRecord { } export interface CatHealthRequest extends CatCatRequestBase { +/** The unit used to display time values. */ time?: TimeUnit + /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { time?: never, ts?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { time?: never, ts?: never } } export type CatHealthResponse = CatHealthHealthRecord[] export interface CatHelpRequest { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface CatHelpResponse { @@ -7338,14 +8144,26 @@ export interface CatIndicesIndicesRecord { } export interface CatIndicesRequest extends CatCatRequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** The unit used to display byte values. */ bytes?: Bytes + /** The type of index that wildcard patterns can match. */ expand_wildcards?: ExpandWildcards + /** The health status used to limit returned indices. By default, the response includes indices of any health status. */ health?: HealthStatus + /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** If true, the response only includes information from primary shards. */ pri?: boolean + /** The unit used to display time values. */ time?: TimeUnit + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never } } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -7360,8 +8178,14 @@ export interface CatMasterMasterRecord { } export interface CatMasterRequest extends CatCatRequestBase { +/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } } export type CatMasterResponse = CatMasterMasterRecord[] @@ -7411,12 +8235,22 @@ export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { } export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { +/** The ID of the data frame analytics to fetch */ id?: Id + /** Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) */ allow_no_match?: boolean + /** The unit in which to display byte values */ bytes?: Bytes + /** Comma-separated list of column names to display. */ h?: CatCatDfaColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatDfaColumns + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -7457,11 +8291,20 @@ export interface CatMlDatafeedsDatafeedsRecord { } export interface CatMlDatafeedsRequest extends CatCatRequestBase { +/** A numerical character string that uniquely identifies the datafeed. */ datafeed_id?: Id + /** Specifies what to do when the request: * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Comma-separated list of column names to display. */ h?: CatCatDatafeedColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatDatafeedColumns + /** The unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never, time?: never } } export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] @@ -7644,25 +8487,47 @@ export interface CatMlJobsJobsRecord { } export interface CatMlJobsRequest extends CatCatRequestBase { +/** Identifier for the anomaly detection job. */ job_id?: Id + /** Specifies what to do when the request: * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** The unit used to display byte values. */ bytes?: Bytes + /** Comma-separated list of column names to display. */ h?: CatCatAnonalyDetectorColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatAnonalyDetectorColumns + /** The unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } } export type CatMlJobsResponse = CatMlJobsJobsRecord[] export interface CatMlTrainedModelsRequest extends CatCatRequestBase { +/** A unique identifier for the trained model. */ model_id?: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** The unit used to display byte values. */ bytes?: Bytes + /** A comma-separated list of column names to display. */ h?: CatCatTrainedModelsColumns + /** A comma-separated list of column names or aliases used to sort the response. */ s?: CatCatTrainedModelsColumns + /** Skips the specified number of transforms. */ from?: integer + /** The maximum number of transforms to display. */ size?: integer + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, from?: never, size?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, from?: never, size?: never, time?: never } } export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] @@ -7730,8 +8595,14 @@ export interface CatNodeattrsNodeAttributesRecord { } export interface CatNodeattrsRequest extends CatCatRequestBase { +/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] @@ -8007,11 +8878,20 @@ export interface CatNodesNodesRecord { } export interface CatNodesRequest extends CatCatRequestBase { +/** The unit used to display byte values. */ bytes?: Bytes + /** If `true`, return the full node ID. If `false`, return the shortened node ID. */ full_id?: boolean | string + /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, master_timeout?: never, time?: never } } export type CatNodesResponse = CatNodesNodesRecord[] @@ -8028,9 +8908,16 @@ export interface CatPendingTasksPendingTasksRecord { } export interface CatPendingTasksRequest extends CatCatRequestBase { +/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { local?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { local?: never, master_timeout?: never, time?: never } } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -8050,9 +8937,16 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { +/** Include bootstrap plugins in the response */ include_bootstrap?: boolean + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { include_bootstrap?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { include_bootstrap?: never, local?: never, master_timeout?: never } } export type CatPluginsResponse = CatPluginsPluginsRecord[] @@ -8116,11 +9010,20 @@ export interface CatRecoveryRecoveryRecord { } export interface CatRecoveryRequest extends CatCatRequestBase { +/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean + /** The unit used to display byte values. */ bytes?: Bytes + /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, time?: never } } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] @@ -8133,17 +9036,31 @@ export interface CatRepositoriesRepositoriesRecord { } export interface CatRepositoriesRequest extends CatCatRequestBase { +/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } } export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { +/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** The unit used to display byte values. */ bytes?: Bytes + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, bytes?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, bytes?: never, local?: never, master_timeout?: never } } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -8191,10 +9108,18 @@ export interface CatSegmentsSegmentsRecord { } export interface CatShardsRequest extends CatCatRequestBase { +/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** The unit used to display byte values. */ bytes?: Bytes + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, bytes?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, bytes?: never, master_timeout?: never, time?: never } } export type CatShardsResponse = CatShardsShardsRecord[] @@ -8415,10 +9340,18 @@ export interface CatShardsShardsRecord { } export interface CatSnapshotsRequest extends CatCatRequestBase { +/** A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. */ repository?: Names + /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, ignore_unavailable?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, ignore_unavailable?: never, master_timeout?: never, time?: never } } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] @@ -8458,13 +9391,24 @@ export interface CatSnapshotsSnapshotsRecord { } export interface CatTasksRequest extends CatCatRequestBase { +/** The task action names, which are used to limit the response. */ actions?: string[] + /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** Unique node identifiers, which are used to limit the response. */ nodes?: string[] + /** The parent task identifier, which is used to limit the response. */ parent_task_id?: string + /** Unit used to display time values. */ time?: TimeUnit + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, time?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, time?: never, timeout?: never, wait_for_completion?: never } } export type CatTasksResponse = CatTasksTasksRecord[] @@ -8505,9 +9449,16 @@ export interface CatTasksTasksRecord { } export interface CatTemplatesRequest extends CatCatRequestBase { +/** The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. */ name?: Name + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] @@ -8527,10 +9478,18 @@ export interface CatTemplatesTemplatesRecord { } export interface CatThreadPoolRequest extends CatCatRequestBase { +/** A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. */ thread_pool_patterns?: Names + /** The unit used to display time values. */ time?: TimeUnit + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { thread_pool_patterns?: never, time?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { thread_pool_patterns?: never, time?: never, local?: never, master_timeout?: never } } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -8579,13 +9538,24 @@ export interface CatThreadPoolThreadPoolRecord { } export interface CatTransformsRequest extends CatCatRequestBase { +/** A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. */ transform_id?: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of transforms. */ from?: integer + /** Comma-separated list of column names to display. */ h?: CatCatTransformColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatTransformColumns + /** The unit used to display time values. */ time?: TimeUnit + /** The maximum number of transforms to obtain. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, time?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, time?: never, size?: never } } export type CatTransformsResponse = CatTransformsTransformsRecord[] @@ -8713,30 +9683,57 @@ export interface CcrShardStats { } export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { +/** The name of the auto follow pattern. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { +/** The name of the follower index. */ index: IndexName + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. */ wait_for_active_shards?: WaitForActiveShards + /** If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. */ data_stream_name?: string + /** The name of the index in the leader cluster to follow. */ leader_index: IndexName + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: long + /** The maximum number of outstanding write requests on the follower. */ max_outstanding_write_requests?: integer + /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. */ max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration + /** The remote cluster containing the leader index. */ remote_cluster: string + /** Settings to override from the leader index. */ settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, wait_for_active_shards?: never, data_stream_name?: never, leader_index?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never, remote_cluster?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, wait_for_active_shards?: never, data_stream_name?: never, leader_index?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never, remote_cluster?: never, settings?: never } } export interface CcrFollowResponse { @@ -8769,8 +9766,14 @@ export interface CcrFollowInfoFollowerIndexParameters { export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' export interface CcrFollowInfoRequest extends RequestBase { +/** A comma-separated list of index patterns; use `_all` to perform the operation on all indices */ index: Indices + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } } export interface CcrFollowInfoResponse { @@ -8778,8 +9781,14 @@ export interface CcrFollowInfoResponse { } export interface CcrFollowStatsRequest extends RequestBase { +/** A comma-separated list of index patterns; use `_all` to perform the operation on all indices */ index: Indices + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, timeout?: never } } export interface CcrFollowStatsResponse { @@ -8787,12 +9796,18 @@ export interface CcrFollowStatsResponse { } export interface CcrForgetFollowerRequest extends RequestBase { +/** the name of the leader index for which specified follower retention leases should be removed */ index: IndexName + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration follower_cluster?: string follower_index?: IndexName follower_index_uuid?: Uuid leader_remote_cluster?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, timeout?: never, follower_cluster?: never, follower_index?: never, follower_index_uuid?: never, leader_remote_cluster?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, timeout?: never, follower_cluster?: never, follower_index?: never, follower_index_uuid?: never, leader_remote_cluster?: never } } export interface CcrForgetFollowerResponse { @@ -8814,8 +9829,14 @@ export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { } export interface CcrGetAutoFollowPatternRequest extends RequestBase { +/** Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. */ name?: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export interface CcrGetAutoFollowPatternResponse { @@ -8823,50 +9844,91 @@ export interface CcrGetAutoFollowPatternResponse { } export interface CcrPauseAutoFollowPatternRequest extends RequestBase { +/** The name of the auto follow pattern that should pause discovering new indices to follow. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { +/** The name of the follower index that should pause following its leader index. */ index: IndexName + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } } export type CcrPauseFollowResponse = AcknowledgedResponseBase export interface CcrPutAutoFollowPatternRequest extends RequestBase { +/** The name of the collection of auto-follow patterns. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** The remote cluster containing the leader indices to match against. */ remote_cluster: string + /** The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. */ follow_index_pattern?: IndexPattern + /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ leader_index_patterns?: IndexPatterns + /** An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. */ leader_index_exclusion_patterns?: IndexPatterns + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: integer + /** Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). */ settings?: Record + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_write_requests?: integer + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration + /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. */ max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, remote_cluster?: never, follow_index_pattern?: never, leader_index_patterns?: never, leader_index_exclusion_patterns?: never, max_outstanding_read_requests?: never, settings?: never, max_outstanding_write_requests?: never, read_poll_timeout?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, remote_cluster?: never, follow_index_pattern?: never, leader_index_patterns?: never, leader_index_exclusion_patterns?: never, max_outstanding_read_requests?: never, settings?: never, max_outstanding_write_requests?: never, read_poll_timeout?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never } } export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { +/** The name of the auto follow pattern to resume discovering new indices to follow. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { +/** The name of the follow index to resume following. */ index: IndexName + /** Period to wait for a connection to the master node. */ master_timeout?: Duration max_outstanding_read_requests?: long max_outstanding_write_requests?: long @@ -8878,6 +9940,10 @@ export interface CcrResumeFollowRequest extends RequestBase { max_write_request_operation_count?: long max_write_request_size?: string read_poll_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never } } export type CcrResumeFollowResponse = AcknowledgedResponseBase @@ -8901,8 +9967,14 @@ export interface CcrStatsFollowStats { } export interface CcrStatsRequest extends RequestBase { +/** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export interface CcrStatsResponse { @@ -8911,8 +9983,14 @@ export interface CcrStatsResponse { } export interface CcrUnfollowRequest extends RequestBase { +/** The name of the follower index that should be turned into a regular index. */ index: IndexName + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } } export type CcrUnfollowResponse = AcknowledgedResponseBase @@ -9001,13 +10079,24 @@ export interface ClusterAllocationExplainNodeDiskUsage { } export interface ClusterAllocationExplainRequest extends RequestBase { +/** If true, returns information about disk usage and shard sizes. */ include_disk_info?: boolean + /** If true, returns YES decisions in explanation. */ include_yes_decisions?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. */ current_node?: string + /** Specifies the name of the index that you would like an explanation for. */ index?: IndexName + /** If true, returns explanation for the primary shard for the given shard ID. */ primary?: boolean + /** Specifies the ID of the shard that you would like an explanation for. */ shard?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, current_node?: never, index?: never, primary?: never, shard?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, current_node?: never, index?: never, primary?: never, shard?: never } } export interface ClusterAllocationExplainReservedSize { @@ -9058,34 +10147,63 @@ export interface ClusterAllocationExplainUnassignedInformation { export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' export interface ClusterDeleteComponentTemplateRequest extends RequestBase { +/** Comma-separated list or wildcard expression of component template names used to limit the request. */ name: Names + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { +/** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. */ wait_for_removal?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } } export type ClusterDeleteVotingConfigExclusionsResponse = boolean export interface ClusterExistsComponentTemplateRequest extends RequestBase { +/** Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. */ name: Names + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } } export type ClusterExistsComponentTemplateResponse = boolean export interface ClusterGetComponentTemplateRequest extends RequestBase { +/** Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. */ name?: Name + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** Return all default configurations for the component template (default: false) */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, include_defaults?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, include_defaults?: never, local?: never, master_timeout?: never } } export interface ClusterGetComponentTemplateResponse { @@ -9093,10 +10211,18 @@ export interface ClusterGetComponentTemplateResponse { } export interface ClusterGetSettingsRequest extends RequestBase { +/** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `true`, returns default cluster settings from the local node. */ include_defaults?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } } export interface ClusterGetSettingsResponse { @@ -9140,18 +10266,34 @@ export interface ClusterHealthIndexHealthStats { } export interface ClusterHealthRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ index?: Indices + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Can be one of cluster, indices or shards. Controls the details level of the health information returned. */ level?: Level + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. */ wait_for_active_shards?: WaitForActiveShards + /** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */ wait_for_events?: WaitForEvents + /** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. */ wait_for_status?: HealthStatus + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, level?: never, local?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, wait_for_events?: never, wait_for_nodes?: never, wait_for_no_initializing_shards?: never, wait_for_no_relocating_shards?: never, wait_for_status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, level?: never, local?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, wait_for_events?: never, wait_for_nodes?: never, wait_for_no_initializing_shards?: never, wait_for_no_relocating_shards?: never, wait_for_status?: never } } export type ClusterHealthResponse = ClusterHealthHealthResponseBody @@ -9167,7 +10309,12 @@ export interface ClusterHealthShardHealthStats { } export interface ClusterInfoRequest extends RequestBase { +/** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */ target: ClusterInfoTargets + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { target?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { target?: never } } export interface ClusterInfoResponse { @@ -9188,8 +10335,14 @@ export interface ClusterPendingTasksPendingTask { } export interface ClusterPendingTasksRequest extends RequestBase { +/** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } } export interface ClusterPendingTasksResponse { @@ -9197,32 +10350,58 @@ export interface ClusterPendingTasksResponse { } export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { +/** A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. */ node_names?: Names + /** A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. */ node_ids?: Ids + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } } export type ClusterPostVotingConfigExclusionsResponse = boolean export interface ClusterPutComponentTemplateRequest extends RequestBase { +/** Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ name: Name + /** If `true`, this request cannot replace or update existing component templates. */ create?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The template to be applied which includes mappings, settings, or aliases configuration. */ template: IndicesIndexState + /** Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. */ version?: VersionNumber + /** Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. */ _meta?: Metadata + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } } export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { +/** Return settings in flat format (default: false) */ flat_settings?: boolean + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration persistent?: Record transient?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { flat_settings?: never, master_timeout?: never, timeout?: never, persistent?: never, transient?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { flat_settings?: never, master_timeout?: never, timeout?: never, persistent?: never, transient?: never } } export interface ClusterPutSettingsResponse { @@ -9255,6 +10434,10 @@ export interface ClusterRemoteInfoClusterRemoteSniffInfo { } export interface ClusterRemoteInfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export type ClusterRemoteInfoResponse = Record @@ -9295,13 +10478,24 @@ export interface ClusterRerouteCommandMoveAction { } export interface ClusterRerouteRequest extends RequestBase { +/** If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ dry_run?: boolean + /** If true, then the response contains an explanation of why the commands can or cannot run. */ explain?: boolean + /** Limits the information returned to the specified metrics. */ metric?: Metrics + /** If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. */ retry_failed?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Defines the commands to perform. */ commands?: ClusterRerouteCommand[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, explain?: never, metric?: never, retry_failed?: never, master_timeout?: never, timeout?: never, commands?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, explain?: never, metric?: never, retry_failed?: never, master_timeout?: never, timeout?: never, commands?: never } } export interface ClusterRerouteRerouteDecision { @@ -9332,16 +10526,30 @@ export interface ClusterRerouteResponse { } export interface ClusterStateRequest extends RequestBase { +/** Limit the information returned to the specified metrics */ metric?: Metrics + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Return settings in flat format (default: false) */ flat_settings?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Return local information, do not retrieve the state from master node (default: false) */ local?: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Wait for the metadata version to be equal or greater than the specified metadata version */ wait_for_metadata_version?: VersionNumber + /** The maximum time to wait for wait_for_metadata_version before timing out */ wait_for_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, wait_for_metadata_version?: never, wait_for_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, wait_for_metadata_version?: never, wait_for_timeout?: never } } export type ClusterStateResponse = any @@ -9570,9 +10778,16 @@ export interface ClusterStatsOperatingSystemMemoryInfo { } export interface ClusterStatsRequest extends RequestBase { +/** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds + /** Include remote cluster data into the response */ include_remotes?: boolean + /** Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } } export type ClusterStatsResponse = ClusterStatsStatsResponseBase @@ -9608,6 +10823,7 @@ export interface ConnectorConnector { api_key_secret_id?: string configuration: ConnectorConnectorConfiguration custom_scheduling: ConnectorConnectorCustomScheduling + deleted: boolean description?: string error?: string | null features?: ConnectorConnectorFeatures @@ -9835,7 +11051,12 @@ export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'erro export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation export interface ConnectorCheckInRequest extends RequestBase { +/** The unique identifier of the connector to be checked in */ connector_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never } } export interface ConnectorCheckInResponse { @@ -9843,19 +11064,35 @@ export interface ConnectorCheckInResponse { } export interface ConnectorDeleteRequest extends RequestBase { +/** The unique identifier of the connector to be deleted */ connector_id: Id + /** A flag indicating if associated sync jobs should be also removed. Defaults to false. */ delete_sync_jobs?: boolean + /** A flag indicating if the connector should be hard deleted. */ + hard?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, delete_sync_jobs?: never, hard?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, delete_sync_jobs?: never, hard?: never } } export type ConnectorDeleteResponse = AcknowledgedResponseBase export interface ConnectorGetRequest extends RequestBase { +/** The unique identifier of the connector */ connector_id: Id + /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ + include_deleted?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, include_deleted?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, include_deleted?: never } } export type ConnectorGetResponse = ConnectorConnector export interface ConnectorLastSyncRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id last_access_control_sync_error?: string last_access_control_sync_scheduled_at?: DateTime @@ -9869,6 +11106,10 @@ export interface ConnectorLastSyncRequest extends RequestBase { last_sync_status?: ConnectorSyncStatus last_synced?: DateTime sync_cursor?: any + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, last_access_control_sync_error?: never, last_access_control_sync_scheduled_at?: never, last_access_control_sync_status?: never, last_deleted_document_count?: never, last_incremental_sync_scheduled_at?: never, last_indexed_document_count?: never, last_seen?: never, last_sync_error?: never, last_sync_scheduled_at?: never, last_sync_status?: never, last_synced?: never, sync_cursor?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, last_access_control_sync_error?: never, last_access_control_sync_scheduled_at?: never, last_access_control_sync_status?: never, last_deleted_document_count?: never, last_incremental_sync_scheduled_at?: never, last_indexed_document_count?: never, last_seen?: never, last_sync_error?: never, last_sync_scheduled_at?: never, last_sync_status?: never, last_synced?: never, sync_cursor?: never } } export interface ConnectorLastSyncResponse { @@ -9876,12 +11117,24 @@ export interface ConnectorLastSyncResponse { } export interface ConnectorListRequest extends RequestBase { +/** Starting offset (default: 0) */ from?: integer + /** Specifies a max number of results to get */ size?: integer + /** A comma-separated list of connector index names to fetch connector documents for */ index_name?: Indices + /** A comma-separated list of connector names to fetch connector documents for */ connector_name?: Names + /** A comma-separated list of connector service types to fetch connector documents for */ service_type?: Names + /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ + include_deleted?: boolean + /** A wildcard query string that filters connectors with matching name, description or index name */ query?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, index_name?: never, connector_name?: never, service_type?: never, include_deleted?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, index_name?: never, connector_name?: never, service_type?: never, include_deleted?: never, query?: never } } export interface ConnectorListResponse { @@ -9896,6 +11149,10 @@ export interface ConnectorPostRequest extends RequestBase { language?: string name?: string service_type?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } } export interface ConnectorPostResponse { @@ -9904,6 +11161,7 @@ export interface ConnectorPostResponse { } export interface ConnectorPutRequest extends RequestBase { +/** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ connector_id?: Id description?: string index_name?: IndexName @@ -9911,6 +11169,10 @@ export interface ConnectorPutRequest extends RequestBase { language?: string name?: string service_type?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } } export interface ConnectorPutResponse { @@ -9919,7 +11181,12 @@ export interface ConnectorPutResponse { } export interface ConnectorSyncJobCancelRequest extends RequestBase { +/** The unique identifier of the connector sync job */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export interface ConnectorSyncJobCancelResponse { @@ -9927,47 +11194,84 @@ export interface ConnectorSyncJobCancelResponse { } export interface ConnectorSyncJobCheckInRequest extends RequestBase { +/** The unique identifier of the connector sync job to be checked in. */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export interface ConnectorSyncJobCheckInResponse { } export interface ConnectorSyncJobClaimRequest extends RequestBase { +/** The unique identifier of the connector sync job. */ connector_sync_job_id: Id + /** The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. */ sync_cursor?: any + /** The host name of the current system that will run the job. */ worker_hostname: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, sync_cursor?: never, worker_hostname?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, sync_cursor?: never, worker_hostname?: never } } export interface ConnectorSyncJobClaimResponse { } export interface ConnectorSyncJobDeleteRequest extends RequestBase { +/** The unique identifier of the connector sync job to be deleted */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase export interface ConnectorSyncJobErrorRequest extends RequestBase { +/** The unique identifier for the connector sync job. */ connector_sync_job_id: Id + /** The error for the connector sync job error field. */ error: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, error?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, error?: never } } export interface ConnectorSyncJobErrorResponse { } export interface ConnectorSyncJobGetRequest extends RequestBase { +/** The unique identifier of the connector sync job */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob export interface ConnectorSyncJobListRequest extends RequestBase { +/** Starting offset (default: 0) */ from?: integer + /** Specifies a max number of results to get */ size?: integer + /** A sync job status to fetch connector sync jobs for */ status?: ConnectorSyncStatus + /** A connector id to fetch connector sync jobs for */ connector_id?: Id + /** A comma-separated list of job types to fetch the sync jobs for */ job_type?: ConnectorSyncJobType | ConnectorSyncJobType[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, status?: never, connector_id?: never, job_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, status?: never, connector_id?: never, job_type?: never } } export interface ConnectorSyncJobListResponse { @@ -9976,9 +11280,14 @@ export interface ConnectorSyncJobListResponse { } export interface ConnectorSyncJobPostRequest extends RequestBase { +/** The id of the associated connector */ id: Id job_type?: ConnectorSyncJobType trigger_method?: ConnectorSyncJobTriggerMethod + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, job_type?: never, trigger_method?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, job_type?: never, trigger_method?: never } } export interface ConnectorSyncJobPostResponse { @@ -9986,20 +11295,36 @@ export interface ConnectorSyncJobPostResponse { } export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { +/** The unique identifier of the connector sync job. */ connector_sync_job_id: Id + /** The number of documents the sync job deleted. */ deleted_document_count: long + /** The number of documents the sync job indexed. */ indexed_document_count: long + /** The total size of the data (in MiB) the sync job indexed. */ indexed_document_volume: long + /** The timestamp to use in the `last_seen` property for the connector sync job. */ last_seen?: Duration + /** The connector-specific metadata. */ metadata?: Metadata + /** The total number of documents in the target index after the sync job finished. */ total_document_count?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, deleted_document_count?: never, indexed_document_count?: never, indexed_document_volume?: never, last_seen?: never, metadata?: never, total_document_count?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, deleted_document_count?: never, indexed_document_count?: never, indexed_document_volume?: never, last_seen?: never, metadata?: never, total_document_count?: never } } export interface ConnectorSyncJobUpdateStatsResponse { } export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never } } export interface ConnectorUpdateActiveFilteringResponse { @@ -10007,9 +11332,14 @@ export interface ConnectorUpdateActiveFilteringResponse { } export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id api_key_id?: string api_key_secret_id?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, api_key_id?: never, api_key_secret_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, api_key_id?: never, api_key_secret_id?: never } } export interface ConnectorUpdateApiKeyIdResponse { @@ -10017,9 +11347,14 @@ export interface ConnectorUpdateApiKeyIdResponse { } export interface ConnectorUpdateConfigurationRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id configuration?: ConnectorConnectorConfiguration values?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, configuration?: never, values?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, configuration?: never, values?: never } } export interface ConnectorUpdateConfigurationResponse { @@ -10027,8 +11362,13 @@ export interface ConnectorUpdateConfigurationResponse { } export interface ConnectorUpdateErrorRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id error: SpecUtilsWithNullValue + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, error?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, error?: never } } export interface ConnectorUpdateErrorResponse { @@ -10036,8 +11376,13 @@ export interface ConnectorUpdateErrorResponse { } export interface ConnectorUpdateFeaturesRequest extends RequestBase { +/** The unique identifier of the connector to be updated. */ connector_id: Id features: ConnectorConnectorFeatures + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, features?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, features?: never } } export interface ConnectorUpdateFeaturesResponse { @@ -10045,10 +11390,15 @@ export interface ConnectorUpdateFeaturesResponse { } export interface ConnectorUpdateFilteringRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id filtering?: ConnectorFilteringConfig[] rules?: ConnectorFilteringRule[] advanced_snippet?: ConnectorFilteringAdvancedSnippet + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, filtering?: never, rules?: never, advanced_snippet?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, filtering?: never, rules?: never, advanced_snippet?: never } } export interface ConnectorUpdateFilteringResponse { @@ -10056,8 +11406,13 @@ export interface ConnectorUpdateFilteringResponse { } export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id validation: ConnectorFilteringRulesValidation + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, validation?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, validation?: never } } export interface ConnectorUpdateFilteringValidationResponse { @@ -10065,8 +11420,13 @@ export interface ConnectorUpdateFilteringValidationResponse { } export interface ConnectorUpdateIndexNameRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id index_name: SpecUtilsWithNullValue + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, index_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, index_name?: never } } export interface ConnectorUpdateIndexNameResponse { @@ -10074,9 +11434,14 @@ export interface ConnectorUpdateIndexNameResponse { } export interface ConnectorUpdateNameRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id name?: string description?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, name?: never, description?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, name?: never, description?: never } } export interface ConnectorUpdateNameResponse { @@ -10084,8 +11449,13 @@ export interface ConnectorUpdateNameResponse { } export interface ConnectorUpdateNativeRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id is_native: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, is_native?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, is_native?: never } } export interface ConnectorUpdateNativeResponse { @@ -10093,8 +11463,13 @@ export interface ConnectorUpdateNativeResponse { } export interface ConnectorUpdatePipelineRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id pipeline: ConnectorIngestPipelineParams + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, pipeline?: never } } export interface ConnectorUpdatePipelineResponse { @@ -10102,8 +11477,13 @@ export interface ConnectorUpdatePipelineResponse { } export interface ConnectorUpdateSchedulingRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id scheduling: ConnectorSchedulingConfiguration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, scheduling?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, scheduling?: never } } export interface ConnectorUpdateSchedulingResponse { @@ -10111,8 +11491,13 @@ export interface ConnectorUpdateSchedulingResponse { } export interface ConnectorUpdateServiceTypeRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id service_type: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, service_type?: never } } export interface ConnectorUpdateServiceTypeResponse { @@ -10120,8 +11505,13 @@ export interface ConnectorUpdateServiceTypeResponse { } export interface ConnectorUpdateStatusRequest extends RequestBase { +/** The unique identifier of the connector to be updated */ connector_id: Id status: ConnectorConnectorStatus + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, status?: never } } export interface ConnectorUpdateStatusResponse { @@ -10129,19 +11519,35 @@ export interface ConnectorUpdateStatusResponse { } export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { +/** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ index_uuid: Uuid + /** This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. */ accept_data_loss: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } } export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { +/** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ index_uuid: Uuid + /** This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ accept_data_loss: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } } export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase @@ -10154,6 +11560,10 @@ export interface DanglingIndicesListDanglingIndicesDanglingIndex { } export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface DanglingIndicesListDanglingIndicesResponse { @@ -10176,8 +11586,14 @@ export interface EnrichSummary { } export interface EnrichDeletePolicyRequest extends RequestBase { +/** Enrich policy to delete. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type EnrichDeletePolicyResponse = AcknowledgedResponseBase @@ -10189,9 +11605,16 @@ export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { } export interface EnrichExecutePolicyRequest extends RequestBase { +/** Enrich policy to execute. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** If `true`, the request blocks other enrich policy execution requests until complete. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, wait_for_completion?: never } } export interface EnrichExecutePolicyResponse { @@ -10200,8 +11623,14 @@ export interface EnrichExecutePolicyResponse { } export interface EnrichGetPolicyRequest extends RequestBase { +/** Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. */ name?: Names + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export interface EnrichGetPolicyResponse { @@ -10209,11 +11638,20 @@ export interface EnrichGetPolicyResponse { } export interface EnrichPutPolicyRequest extends RequestBase { +/** Name of the enrich policy to create or update. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Matches enrich data to incoming documents based on a `geo_shape` query. */ geo_match?: EnrichPolicy + /** Matches enrich data to incoming documents based on a `term` query. */ match?: EnrichPolicy + /** Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. */ range?: EnrichPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, geo_match?: never, match?: never, range?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, geo_match?: never, match?: never, range?: never } } export type EnrichPutPolicyResponse = AcknowledgedResponseBase @@ -10243,7 +11681,12 @@ export interface EnrichStatsExecutingPolicy { } export interface EnrichStatsRequest extends RequestBase { +/** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface EnrichStatsResponse { @@ -10282,21 +11725,38 @@ export interface EqlHitsSequence { } export interface EqlDeleteRequest extends RequestBase { +/** Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { +/** Identifier for the search. */ id: Id + /** Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. */ keep_alive?: Duration + /** Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } } export type EqlGetResponse = EqlEqlSearchResponseBase export interface EqlGetStatusRequest extends RequestBase { +/** Identifier for the search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface EqlGetStatusResponse { @@ -10309,33 +11769,50 @@ export interface EqlGetStatusResponse { } export interface EqlSearchRequest extends RequestBase { +/** The name of the index to scope the operation */ index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** EQL query you wish to run. */ query: string case_sensitive?: boolean + /** Field containing the event classification, such as process, file, or network. */ event_category_field?: Field + /** Field used to sort hits with the same timestamp in ascending order */ tiebreaker_field?: Field + /** Field containing event timestamp. Default "@timestamp" */ timestamp_field?: Field + /** Maximum number of events to search at a time for sequence queries. */ fetch_size?: uint + /** Query, written in Query DSL, used to filter the events on which the EQL query runs. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] keep_alive?: Duration keep_on_completion?: boolean wait_for_completion_timeout?: Duration allow_partial_search_results?: boolean allow_partial_sequence_results?: boolean + /** For basic queries, the maximum number of matching events to return. Defaults to 10 */ size?: uint + /** Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. */ fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition runtime_mappings?: MappingRuntimeFields + /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } } export type EqlSearchResponse = EqlEqlSearchResponseBase export type EqlSearchResultPosition = 'tail' | 'head' +export type EsqlEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' + export interface EsqlTableValuesContainer { integer?: EsqlTableValuesIntegerValue[] keyword?: EsqlTableValuesKeywordValue[] @@ -10351,19 +11828,99 @@ export type EsqlTableValuesLongDouble = double | double[] export type EsqlTableValuesLongValue = long | long[] -export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' +export interface EsqlAsyncQueryRequest extends RequestBase { +/** The character to use between values within a CSV row. It is valid only for the CSV format. */ + delimiter?: string + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + drop_null_columns?: boolean + /** A short version of the Accept header, for example `json` or `yaml`. */ + format?: EsqlEsqlFormat + /** The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ + keep_alive?: Duration + /** Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ + keep_on_completion?: boolean + /** The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. */ + wait_for_completion_timeout?: Duration + /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ + columnar?: boolean + /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ + filter?: QueryDslQueryContainer + locale?: string + /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ + params?: FieldValue[] + /** If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. */ + profile?: boolean + /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ + query: string + /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ + tables?: Record> + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } +} + +export interface EsqlAsyncQueryResponse { + columns?: EsqlColumns + id?: string + is_running: boolean +} + +export interface EsqlAsyncQueryDeleteRequest extends RequestBase { +/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase + +export interface EsqlAsyncQueryGetRequest extends RequestBase { +/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + id: Id + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + drop_null_columns?: boolean + /** The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. */ + keep_alive?: Duration + /** The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. */ + wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } +} + +export interface EsqlAsyncQueryGetResponse { + columns?: EsqlColumns + is_running: boolean +} export interface EsqlQueryRequest extends RequestBase { - format?: EsqlQueryEsqlFormat +/** A short version of the Accept header, e.g. json, yaml. */ + format?: EsqlEsqlFormat + /** The character to use between values within a CSV row. Only valid for the CSV format. */ delimiter?: string + /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ drop_null_columns?: boolean + /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean + /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ filter?: QueryDslQueryContainer locale?: string + /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[] + /** If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. */ profile?: boolean + /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string + /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ tables?: Record> + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } } export type EsqlQueryResponse = EsqlColumns @@ -10374,7 +11931,12 @@ export interface FeaturesFeature { } export interface FeaturesGetFeaturesRequest extends RequestBase { +/** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface FeaturesGetFeaturesResponse { @@ -10382,7 +11944,12 @@ export interface FeaturesGetFeaturesResponse { } export interface FeaturesResetFeaturesRequest extends RequestBase { +/** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface FeaturesResetFeaturesResponse { @@ -10392,11 +11959,20 @@ export interface FeaturesResetFeaturesResponse { export type FleetCheckpoint = long export interface FleetGlobalCheckpointsRequest extends RequestBase { +/** A single index or index alias that resolves to a single index. */ index: IndexName | IndexAlias + /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. */ wait_for_advance?: boolean + /** A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. */ wait_for_index?: boolean + /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. */ checkpoints?: FleetCheckpoint[] + /** Period to wait for a global checkpoints to advance past `checkpoints`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, wait_for_advance?: never, wait_for_index?: never, checkpoints?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, wait_for_advance?: never, wait_for_index?: never, checkpoints?: never, timeout?: never } } export interface FleetGlobalCheckpointsResponse { @@ -10405,21 +11981,39 @@ export interface FleetGlobalCheckpointsResponse { } export interface FleetMsearchRequest extends RequestBase { +/** A single target to search. If the target is an index alias, it must resolve to a single index. */ index?: IndexName | IndexAlias + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Maximum number of concurrent searches the multi search API can execute. */ max_concurrent_searches?: long + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: long + /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long + /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ search_type?: SearchType + /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ rest_total_hits_as_int?: boolean + /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] + /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. */ allow_partial_search_results?: boolean searches?: MsearchRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, searches?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, searches?: never } } export interface FleetMsearchResponse { @@ -10427,6 +12021,7 @@ export interface FleetMsearchResponse { } export interface FleetSearchRequest extends RequestBase { +/** A single target to search. If the target is an index alias, it must resolve to a single index. */ index: IndexName | IndexAlias allow_no_indices?: boolean analyzer?: string @@ -10446,50 +12041,79 @@ export interface FleetSearchRequest extends RequestBase { routing?: Routing scroll?: Duration search_type?: SearchType + /** Specifies which field to use for suggestions. */ suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long + /** The source text for which the suggestions should be returned. */ suggest_text?: string typed_keys?: boolean rest_total_hits_as_int?: boolean _source_excludes?: Fields _source_includes?: Fields q?: string + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] + /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. */ allow_partial_search_results?: boolean aggregations?: Record /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse + /** If true, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record + /** Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ from?: integer highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits + /** Boosts the _score of documents from specified indices. */ indices_boost?: Record[] + /** Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** Minimum _score for matching documents. Documents with a lower _score are not included in the search results. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean + /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] + /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults + /** The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort + /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig + /** Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester + /** Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long + /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ timeout?: string + /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If true, returns document version as part of a hit. */ version?: boolean + /** If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean + /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ stored_fields?: Fields + /** Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. */ pit?: SearchPointInTimeReference + /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export interface FleetSearchResponse { @@ -10556,13 +12180,24 @@ export interface GraphVertexInclude { } export interface GraphExploreRequest extends RequestBase { +/** Name of the index. */ index: Indices + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ timeout?: Duration + /** Specifies or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop + /** Direct the Graph API how to build the graph. */ controls?: GraphExploreControls + /** A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. */ query?: QueryDslQueryContainer + /** Specifies one or more fields that contain the terms you want to include in the graph as vertices. */ vertices?: GraphVertexDefinition[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, routing?: never, timeout?: never, connections?: never, controls?: never, query?: never, vertices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, routing?: never, timeout?: never, connections?: never, controls?: never, query?: never, vertices?: never } } export interface GraphExploreResponse { @@ -10617,7 +12252,7 @@ export interface IlmMigrateAction { export interface IlmPhase { actions?: IlmActions - min_age?: Duration | long + min_age?: Duration } export interface IlmPhases { @@ -10666,9 +12301,16 @@ export interface IlmWaitForSnapshotAction { } export interface IlmDeleteLifecycleRequest extends RequestBase { +/** Identifier for the policy. */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase @@ -10713,10 +12355,18 @@ export interface IlmExplainLifecycleLifecycleExplainUnmanaged { } export interface IlmExplainLifecycleRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. */ index: IndexName + /** Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. */ only_errors?: boolean + /** Filters the returned indices to only indices that are managed by ILM. */ only_managed?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, only_errors?: never, only_managed?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, only_errors?: never, only_managed?: never, master_timeout?: never } } export interface IlmExplainLifecycleResponse { @@ -10730,14 +12380,25 @@ export interface IlmGetLifecycleLifecycle { } export interface IlmGetLifecycleRequest extends RequestBase { +/** Identifier for the policy. */ name?: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IlmGetLifecycleResponse = Record export interface IlmGetStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IlmGetStatusResponse { @@ -10745,9 +12406,14 @@ export interface IlmGetStatusResponse { } export interface IlmMigrateToDataTiersRequest extends RequestBase { +/** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. */ dry_run?: boolean legacy_template_to_delete?: string node_attribute?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, legacy_template_to_delete?: never, node_attribute?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, legacy_template_to_delete?: never, node_attribute?: never } } export interface IlmMigrateToDataTiersResponse { @@ -10761,9 +12427,16 @@ export interface IlmMigrateToDataTiersResponse { } export interface IlmMoveToStepRequest extends RequestBase { +/** The name of the index whose lifecycle step is to change */ index: IndexName + /** The step that the index is expected to be in. */ current_step: IlmMoveToStepStepKey + /** The step that you want to run. */ next_step: IlmMoveToStepStepKey + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, current_step?: never, next_step?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, current_step?: never, next_step?: never } } export type IlmMoveToStepResponse = AcknowledgedResponseBase @@ -10775,16 +12448,28 @@ export interface IlmMoveToStepStepKey { } export interface IlmPutLifecycleRequest extends RequestBase { +/** Identifier for the policy. */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration policy?: IlmPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } } export type IlmPutLifecycleResponse = AcknowledgedResponseBase export interface IlmRemovePolicyRequest extends RequestBase { +/** The name of the index to remove policy on */ index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export interface IlmRemovePolicyResponse { @@ -10793,21 +12478,38 @@ export interface IlmRemovePolicyResponse { } export interface IlmRetryRequest extends RequestBase { +/** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { +/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { +/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type IlmStopResponse = AcknowledgedResponseBase @@ -11045,6 +12747,7 @@ export interface IndicesIndexSettingsLifecycle { parse_origination_date?: boolean step?: IndicesIndexSettingsLifecycleStep rollover_alias?: string + prefer_ilm?: boolean | string } export interface IndicesIndexSettingsLifecycleStep { @@ -11124,7 +12827,8 @@ export interface IndicesMappingLimitSettings { nested_objects?: IndicesMappingLimitSettingsNestedObjects field_name_length?: IndicesMappingLimitSettingsFieldNameLength dimension_fields?: IndicesMappingLimitSettingsDimensionFields - ignore_malformed?: boolean + source?: IndicesMappingLimitSettingsSourceFields + ignore_malformed?: boolean | string } export interface IndicesMappingLimitSettingsDepth { @@ -11147,6 +12851,10 @@ export interface IndicesMappingLimitSettingsNestedObjects { limit?: long } +export interface IndicesMappingLimitSettingsSourceFields { + mode: IndicesSourceMode +} + export interface IndicesMappingLimitSettingsTotalFields { limit?: long | string ignore_dynamic_beyond_limit?: boolean | string @@ -11274,6 +12982,8 @@ export interface IndicesSoftDeletes { retention_lease?: IndicesRetentionLease } +export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' + export interface IndicesStorage { type: IndicesStorageType allow_mmap?: boolean @@ -11312,13 +13022,24 @@ export interface IndicesAddBlockIndicesBlockStatus { } export interface IndicesAddBlockRequest extends RequestBase { +/** A comma separated list of indices to add a block to */ index: IndexName + /** The block to add (one of read, write, read_only or metadata) */ block: IndicesAddBlockIndicesBlockOptions + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } } export interface IndicesAddBlockResponse { @@ -11369,16 +13090,30 @@ export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeToke & { [property: string]: any } export interface IndicesAnalyzeRequest extends RequestBase { +/** Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ index?: IndexName + /** The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ analyzer?: string + /** Array of token attributes used to filter the output of the `explain` parameter. */ attributes?: string[] + /** Array of character filters used to preprocess characters before the tokenizer. */ char_filter?: AnalysisCharFilter[] + /** If `true`, the response includes token attributes and additional details. */ explain?: boolean + /** Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. */ field?: Field + /** Array of token filters used to apply after the tokenizer. */ filter?: AnalysisTokenFilter[] + /** Normalizer to use to convert text into a single token. */ normalizer?: string + /** Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. */ text?: IndicesAnalyzeTextToAnalyze + /** Tokenizer to use to convert text into tokens. */ tokenizer?: AnalysisTokenizer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, analyzer?: never, attributes?: never, char_filter?: never, explain?: never, field?: never, filter?: never, normalizer?: never, text?: never, tokenizer?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, analyzer?: never, attributes?: never, char_filter?: never, explain?: never, field?: never, filter?: never, normalizer?: never, text?: never, tokenizer?: never } } export interface IndicesAnalyzeResponse { @@ -11393,27 +13128,61 @@ export interface IndicesAnalyzeTokenDetail { tokens: IndicesAnalyzeExplainAnalyzeToken[] } +export interface IndicesCancelMigrateReindexRequest extends RequestBase { +/** The index or data stream name */ + index: Indices + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export type IndicesCancelMigrateReindexResponse = AcknowledgedResponseBase + export interface IndicesClearCacheRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. */ fielddata?: boolean + /** Comma-separated list of field names used to limit the `fielddata` parameter. */ fields?: Fields + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, clears the query cache. */ query?: boolean + /** If `true`, clears the request cache. */ request?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, fielddata?: never, fields?: never, ignore_unavailable?: never, query?: never, request?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, fielddata?: never, fields?: never, ignore_unavailable?: never, query?: never, request?: never } } export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { +/** Name of the source index to clone. */ index: IndexName + /** Name of the target index to create. */ target: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** Aliases for the resulting index. */ aliases?: Record + /** Configuration options for the target index. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } } export interface IndicesCloneResponse { @@ -11432,13 +13201,24 @@ export interface IndicesCloseCloseShardResult { } export interface IndicesCloseRequest extends RequestBase { +/** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } } export interface IndicesCloseResponse { @@ -11448,13 +13228,24 @@ export interface IndicesCloseResponse { } export interface IndicesCreateRequest extends RequestBase { +/** Name of the index you wish to create. */ index: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** Aliases for the index. */ aliases?: Record + /** Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, mappings?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, mappings?: never, settings?: never } } export interface IndicesCreateResponse { @@ -11464,13 +13255,44 @@ export interface IndicesCreateResponse { } export interface IndicesCreateDataStreamRequest extends RequestBase { +/** Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ name: DataStreamName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase +export interface IndicesCreateFromCreateFrom { + mappings_override?: MappingTypeMapping + settings_override?: IndicesIndexSettings + remove_index_blocks?: boolean +} + +export interface IndicesCreateFromRequest extends RequestBase { +/** The source index or data stream name */ + source: IndexName + /** The destination index or data stream name */ + dest: IndexName + create_from?: IndicesCreateFromCreateFrom + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { source?: never, dest?: never, create_from?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { source?: never, dest?: never, create_from?: never } +} + +export interface IndicesCreateFromResponse { + acknowledged: boolean + index: IndexName + shards_acknowledged: boolean +} + export interface IndicesDataStreamsStatsDataStreamsStatsItem { backing_indices: integer data_stream: Name @@ -11480,8 +13302,14 @@ export interface IndicesDataStreamsStatsDataStreamsStatsItem { } export interface IndicesDataStreamsStatsRequest extends RequestBase { +/** Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. */ name?: IndexName + /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never } } export interface IndicesDataStreamsStatsResponse { @@ -11494,112 +13322,210 @@ export interface IndicesDataStreamsStatsResponse { } export interface IndicesDeleteRequest extends RequestBase { +/** Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteResponse = IndicesResponseBase export interface IndicesDeleteAliasRequest extends RequestBase { +/** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). */ index: Indices + /** Comma-separated list of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ name: Names + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteAliasResponse = AcknowledgedResponseBase export interface IndicesDeleteDataLifecycleRequest extends RequestBase { +/** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ name: DataStreamNames + /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ expand_wildcards?: ExpandWildcards + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Explicit timestamp for the document */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { +/** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ name: DataStreamNames + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, expand_wildcards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, expand_wildcards?: never } } export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { +/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Names + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { +/** The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ index: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. */ flush?: boolean + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. */ run_expensive_tasks?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } } export type IndicesDiskUsageResponse = any export interface IndicesDownsampleRequest extends RequestBase { +/** Name of the time series index to downsample. */ index: IndexName + /** Name of the index to create. */ target_index: IndexName config?: IndicesDownsampleConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target_index?: never, config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target_index?: never, config?: never } } export type IndicesDownsampleResponse = any export interface IndicesExistsRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } } export type IndicesExistsResponse = boolean export interface IndicesExistsAliasRequest extends RequestBase { +/** Comma-separated list of aliases to check. Supports wildcards (`*`). */ name: Names + /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } } export type IndicesExistsAliasResponse = boolean export interface IndicesExistsIndexTemplateRequest extends RequestBase { +/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type IndicesExistsIndexTemplateResponse = boolean export interface IndicesExistsTemplateRequest extends RequestBase { +/** A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. */ name: Names + /** Indicates whether to use a flat format for the response. */ flat_settings?: boolean + /** Indicates whether to get information from the local node only. */ local?: boolean + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } } export type IndicesExistsTemplateResponse = boolean @@ -11617,9 +13543,16 @@ export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { } export interface IndicesExplainDataLifecycleRequest extends RequestBase { +/** The name of the index to explain */ index: Indices + /** indicates if the API should return the default values the system uses for the index's lifecycle */ include_defaults?: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, include_defaults?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, include_defaults?: never, master_timeout?: never } } export interface IndicesExplainDataLifecycleResponse { @@ -11654,12 +13587,22 @@ export interface IndicesFieldUsageStatsInvertedIndex { } export interface IndicesFieldUsageStatsRequest extends RequestBase { +/** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields + /** The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never, wait_for_active_shards?: never } } export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody @@ -11681,25 +13624,47 @@ export interface IndicesFieldUsageStatsUsageStatsShards { } export interface IndicesFlushRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `true`, the request forces a flush even if there are no changes to commit to the index. */ force?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ wait_if_ongoing?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } } export type IndicesFlushResponse = ShardsOperationResponseBase export interface IndicesForcemergeRequest extends RequestBase { +/** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Specify whether the index should be flushed after performing the operation (default: true) */ flush?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** The number of segments the index should be merged into (default: dynamic) */ max_num_segments?: long + /** Specify whether the operation should only expunge deleted documents */ only_expunge_deletes?: boolean + /** Should the request wait until the force merge is completed. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, max_num_segments?: never, only_expunge_deletes?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, max_num_segments?: never, only_expunge_deletes?: never, wait_for_completion?: never } } export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody @@ -11713,15 +13678,28 @@ export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] export interface IndicesGetRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. */ index: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden. */ expand_wildcards?: ExpandWildcards + /** If true, returns settings in flat format. */ flat_settings?: boolean + /** If false, requests that target a missing index return an error. */ ignore_unavailable?: boolean + /** If true, return all default settings in the response. */ include_defaults?: boolean + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Return only information on specified index features */ features?: IndicesGetFeatures + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never, features?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never, features?: never } } export type IndicesGetResponse = Record @@ -11731,12 +13709,22 @@ export interface IndicesGetAliasIndexAliases { } export interface IndicesGetAliasRequest extends RequestBase { +/** Comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names + /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } } export type IndicesGetAliasResponse = Record @@ -11747,10 +13735,18 @@ export interface IndicesGetDataLifecycleDataStreamWithLifecycle { } export interface IndicesGetDataLifecycleRequest extends RequestBase { +/** Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. */ name: DataStreamNames + /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never } } export interface IndicesGetDataLifecycleResponse { @@ -11764,6 +13760,10 @@ export interface IndicesGetDataLifecycleStatsDataStreamStats { } export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IndicesGetDataLifecycleStatsResponse { @@ -11774,11 +13774,20 @@ export interface IndicesGetDataLifecycleStatsResponse { } export interface IndicesGetDataStreamRequest extends RequestBase { +/** Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ name?: DataStreamNames + /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Whether the maximum timestamp for each data stream should be calculated and returned. */ verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never, verbose?: never } } export interface IndicesGetDataStreamResponse { @@ -11786,13 +13795,24 @@ export interface IndicesGetDataStreamResponse { } export interface IndicesGetFieldMappingRequest extends RequestBase { +/** Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). */ fields: Fields + /** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } } export type IndicesGetFieldMappingResponse = Record @@ -11807,11 +13827,20 @@ export interface IndicesGetIndexTemplateIndexTemplateItem { } export interface IndicesGetIndexTemplateRequest extends RequestBase { +/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name?: Name + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** If true, returns settings in flat format. */ flat_settings?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never, include_defaults?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never, include_defaults?: never } } export interface IndicesGetIndexTemplateResponse { @@ -11824,43 +13853,135 @@ export interface IndicesGetMappingIndexMappingRecord { } export interface IndicesGetMappingRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } } export type IndicesGetMappingResponse = Record +export interface IndicesGetMigrateReindexStatusRequest extends RequestBase { +/** The index or data stream name. */ + index: Indices + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface IndicesGetMigrateReindexStatusResponse { + start_time?: DateTime + start_time_millis: EpochTime + complete: boolean + total_indices_in_data_stream: integer + total_indices_requiring_upgrade: integer + successes: integer + in_progress: IndicesGetMigrateReindexStatusStatusInProgress[] + pending: integer + errors: IndicesGetMigrateReindexStatusStatusError[] + exception?: string +} + +export interface IndicesGetMigrateReindexStatusStatusError { + index: string + message: string +} + +export interface IndicesGetMigrateReindexStatusStatusInProgress { + index: string + total_doc_count: long + reindexed_doc_count: long +} + export interface IndicesGetSettingsRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** Comma-separated list or wildcard expression of settings to retrieve. */ name?: Names + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } } export type IndicesGetSettingsResponse = Record export interface IndicesGetTemplateRequest extends RequestBase { +/** Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. */ name?: Names + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } } export type IndicesGetTemplateResponse = Record +export interface IndicesMigrateReindexMigrateReindex { + mode: IndicesMigrateReindexModeEnum + source: IndicesMigrateReindexSourceIndex +} + +export type IndicesMigrateReindexModeEnum = 'upgrade' + +export interface IndicesMigrateReindexRequest extends RequestBase { + reindex?: IndicesMigrateReindexMigrateReindex + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { reindex?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { reindex?: never } +} + +export type IndicesMigrateReindexResponse = AcknowledgedResponseBase + +export interface IndicesMigrateReindexSourceIndex { + index: IndexName +} + export interface IndicesMigrateToDataStreamRequest extends RequestBase { +/** Name of the index alias to convert to a data stream. */ name: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase @@ -11876,19 +13997,35 @@ export interface IndicesModifyDataStreamIndexAndDataStreamAction { } export interface IndicesModifyDataStreamRequest extends RequestBase { +/** Actions to perform. */ actions: IndicesModifyDataStreamAction[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never } } export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase export interface IndicesOpenRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } } export interface IndicesOpenResponse { @@ -11897,32 +14034,59 @@ export interface IndicesOpenResponse { } export interface IndicesPromoteDataStreamRequest extends RequestBase { +/** The name of the data stream */ name: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type IndicesPromoteDataStreamResponse = any export interface IndicesPutAliasRequest extends RequestBase { +/** Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. */ index: Indices + /** Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. */ index_routing?: Routing + /** If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. */ routing?: Routing + /** Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. */ search_routing?: Routing + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } } export type IndicesPutAliasResponse = AcknowledgedResponseBase export interface IndicesPutDataLifecycleRequest extends RequestBase { +/** Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. */ name: DataStreamNames + /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ expand_wildcards?: ExpandWildcards + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration lifecycle?: IndicesDataStreamLifecycle + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, lifecycle?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, lifecycle?: never } } export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase @@ -11935,72 +14099,137 @@ export interface IndicesPutIndexTemplateIndexTemplateMapping { } export interface IndicesPutIndexTemplateRequest extends RequestBase { +/** Index or template name */ name: Name + /** If `true`, this request cannot replace or update existing index templates. */ create?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** User defined reason for creating/updating the index template */ cause?: string + /** Name of the index template to create. */ index_patterns?: Indices + /** An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] + /** Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping + /** If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility + /** Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. */ priority?: long + /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. */ version?: VersionNumber + /** Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. */ _meta?: Metadata + /** This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean + /** The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist */ ignore_missing_component_templates?: string[] + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } } export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesPutMappingRequest extends RequestBase { +/** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** If `true`, the mappings are applied only to the current write index for the target. */ write_index_only?: boolean + /** Controls whether dynamic date detection is enabled. */ date_detection?: boolean + /** Controls whether new fields are added dynamically. */ dynamic?: MappingDynamicMapping + /** If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. */ dynamic_date_formats?: string[] + /** Specify dynamic templates for the mapping. */ dynamic_templates?: Record | Record[] + /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField + /** A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. */ _meta?: Metadata + /** Automatically map strings into numeric data types for all fields. */ numeric_detection?: boolean + /** Mapping for a field. For new fields, this mapping can include: - Field name - Field data type - Mapping parameters */ properties?: Record + /** Enable making a routing value required on indexed documents. */ _routing?: MappingRoutingField + /** Control whether the _source field is enabled on the index. */ _source?: MappingSourceField + /** Mapping of runtime fields for the index. */ runtime?: MappingRuntimeFields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, write_index_only?: never, date_detection?: never, dynamic?: never, dynamic_date_formats?: never, dynamic_templates?: never, _field_names?: never, _meta?: never, numeric_detection?: never, properties?: never, _routing?: never, _source?: never, runtime?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, write_index_only?: never, date_detection?: never, dynamic?: never, dynamic_date_formats?: never, dynamic_templates?: never, _field_names?: never, _meta?: never, numeric_detection?: never, properties?: never, _routing?: never, _source?: never, runtime?: never } } export type IndicesPutMappingResponse = IndicesResponseBase export interface IndicesPutSettingsRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `true`, returns settings in flat format. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If `true`, existing index settings remain unchanged. */ preserve_existing?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, timeout?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, timeout?: never, settings?: never } } export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { +/** The name of the template */ name: Name + /** If true, this request cannot replace or update existing index templates. */ create?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration cause?: string + /** Aliases for the index. */ aliases?: Record + /** Array of wildcard expressions used to match the names of indices during creation. */ index_patterns?: string | string[] + /** Mapping for fields in the index. */ mappings?: MappingTypeMapping + /** Order in which Elasticsearch applies this template if index matches multiple templates. Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values. */ order?: integer + /** Configuration options for the index. */ settings?: IndicesIndexSettings + /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. */ version?: VersionNumber + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } } export type IndicesPutTemplateResponse = AcknowledgedResponseBase @@ -12070,9 +14299,16 @@ export interface IndicesRecoveryRecoveryStatus { } export interface IndicesRecoveryRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean + /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never } } export type IndicesRecoveryResponse = Record @@ -12113,10 +14349,18 @@ export interface IndicesRecoveryVerifyIndex { } export interface IndicesRefreshRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } } export type IndicesRefreshResponse = ShardsOperationResponseBase @@ -12133,20 +14377,37 @@ export interface IndicesReloadSearchAnalyzersReloadResult { } export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { +/** A comma-separated list of index names to reload analyzers for */ index: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } } export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveClusterRequest extends RequestBase { +/** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. */ name: Names + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. */ ignore_throttled?: boolean + /** If false, the request returns an error if it targets a missing or closed index. Defaults to false. */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never } } export interface IndicesResolveClusterResolveClusterInfo { @@ -12160,10 +14421,18 @@ export interface IndicesResolveClusterResolveClusterInfo { export type IndicesResolveClusterResponse = Record export interface IndicesResolveIndexRequest extends RequestBase { +/** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. */ name: Names + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } } export interface IndicesResolveIndexResolveIndexAliasItem { @@ -12191,16 +14460,30 @@ export interface IndicesResolveIndexResponse { } export interface IndicesRolloverRequest extends RequestBase { +/** Name of the data stream or index alias to roll over. */ alias: IndexAlias + /** Name of the index to create. Supports date math. Data streams do not support this parameter. */ new_index?: IndexName + /** If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. */ dry_run?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** Aliases for the target index. Data streams do not support this parameter. */ aliases?: Record + /** Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ conditions?: IndicesRolloverRolloverConditions + /** Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. Data streams do not support this parameter. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } } export interface IndicesRolloverResponse { @@ -12236,10 +14519,18 @@ export interface IndicesSegmentsIndexSegment { } export interface IndicesSegmentsRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } } export interface IndicesSegmentsResponse { @@ -12277,11 +14568,20 @@ export interface IndicesShardStoresIndicesShardStores { } export interface IndicesShardStoresRequest extends RequestBase { +/** List of data streams, indices, and aliases used to limit the request. */ index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** List of shard health statuses used to limit the request. */ status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, status?: never } } export interface IndicesShardStoresResponse { @@ -12319,13 +14619,24 @@ export interface IndicesShardStoresShardStoreWrapper { } export interface IndicesShrinkRequest extends RequestBase { +/** Name of the source index to shrink. */ index: IndexName + /** Name of the target index to create. */ target: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** The key is the alias name. Index alias names support date math. */ aliases?: Record + /** Configuration options for the target index. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } } export interface IndicesShrinkResponse { @@ -12335,9 +14646,16 @@ export interface IndicesShrinkResponse { } export interface IndicesSimulateIndexTemplateRequest extends RequestBase { +/** Name of the index to simulate */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, include_defaults?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, include_defaults?: never } } export interface IndicesSimulateIndexTemplateResponse { @@ -12351,20 +14669,38 @@ export interface IndicesSimulateTemplateOverlapping { } export interface IndicesSimulateTemplateRequest extends RequestBase { +/** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. */ name?: Name + /** If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. */ create?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean + /** Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. */ index_patterns?: Indices + /** An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] + /** Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping + /** If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility + /** Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. */ priority?: long + /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. */ version?: VersionNumber + /** Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata + /** The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist */ ignore_missing_component_templates?: string[] + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } } export interface IndicesSimulateTemplateResponse { @@ -12379,13 +14715,24 @@ export interface IndicesSimulateTemplateTemplate { } export interface IndicesSplitRequest extends RequestBase { +/** Name of the source index to split. */ index: IndexName + /** Name of the target index to create. */ target: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** Aliases for the resulting index. */ aliases?: Record + /** Configuration options for the target index. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } } export interface IndicesSplitResponse { @@ -12434,17 +14781,32 @@ export interface IndicesStatsMappingStats { } export interface IndicesStatsRequest extends RequestBase { +/** Limit the information returned the specific metrics. */ metric?: Metrics + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields + /** If true, statistics are not collected from closed indices. */ forbid_closed_indices?: boolean + /** Comma-separated list of search groups to include in the search statistics. */ groups?: string | string[] + /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean + /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ level?: Level + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, index?: never, completion_fields?: never, expand_wildcards?: never, fielddata_fields?: never, fields?: never, forbid_closed_indices?: never, groups?: never, include_segment_file_sizes?: never, include_unloaded_segments?: never, level?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, index?: never, completion_fields?: never, expand_wildcards?: never, fielddata_fields?: never, fields?: never, forbid_closed_indices?: never, groups?: never, include_segment_file_sizes?: never, include_unloaded_segments?: never, level?: never } } export interface IndicesStatsResponse { @@ -12546,21 +14908,6 @@ export interface IndicesStatsShardsTotalStats { total_count: long } -export interface IndicesUnfreezeRequest extends RequestBase { - index: IndexName - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: string -} - -export interface IndicesUnfreezeResponse { - acknowledged: boolean - shards_acknowledged: boolean -} - export interface IndicesUpdateAliasesAction { add?: IndicesUpdateAliasesAddAction remove?: IndicesUpdateAliasesRemoveAction @@ -12596,9 +14943,16 @@ export interface IndicesUpdateAliasesRemoveIndexAction { } export interface IndicesUpdateAliasesRequest extends RequestBase { +/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Actions to perform. */ actions?: IndicesUpdateAliasesAction[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, actions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, actions?: never } } export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase @@ -12611,20 +14965,38 @@ export interface IndicesValidateQueryIndicesValidationExplanation { } export interface IndicesValidateQueryRequest extends RequestBase { +/** Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** If `true`, the validation is executed on all shards instead of one random shard per index. */ all_shards?: boolean + /** Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. */ default_operator?: QueryDslOperator + /** Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. */ df?: string + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `true`, the response returns detailed information if an error has occurred. */ explain?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. */ lenient?: boolean + /** If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. */ rewrite?: boolean + /** Query in the Lucene query string syntax. */ q?: string + /** Query in the Lucene query string syntax. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, all_shards?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, explain?: never, ignore_unavailable?: never, lenient?: never, rewrite?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, all_shards?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, explain?: never, ignore_unavailable?: never, lenient?: never, rewrite?: never, q?: never, query?: never } } export interface IndicesValidateQueryResponse { @@ -12692,17 +15064,31 @@ export interface InferenceTextEmbeddingResult { } export interface InferenceDeleteRequest extends RequestBase { +/** The task type */ task_type?: InferenceTaskType + /** The inference Id */ inference_id: Id + /** When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned */ dry_run?: boolean + /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields */ force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } } export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult export interface InferenceGetRequest extends RequestBase { +/** The task type */ task_type?: InferenceTaskType + /** The inference Id */ inference_id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never } } export interface InferenceGetResponse { @@ -12710,24 +15096,148 @@ export interface InferenceGetResponse { } export interface InferenceInferenceRequest extends RequestBase { +/** The task type */ task_type?: InferenceTaskType + /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration + /** Query input, required for rerank task. Not required for other tasks. */ query?: string + /** Inference input. Either a string or an array of strings. */ input: string | string[] + /** Optional task settings */ task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } } export type InferenceInferenceResponse = InferenceInferenceResult export interface InferencePutRequest extends RequestBase { +/** The task type */ task_type?: InferenceTaskType + /** The inference Id */ inference_id: Id inference_config?: InferenceInferenceEndpoint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } } export type InferencePutResponse = InferenceInferenceEndpointInfo +export interface InferenceStreamInferenceRequest extends RequestBase { +/** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The type of task that the model performs. */ + task_type?: InferenceTaskType + /** The text on which you want to perform the inference task. It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ + input: string | string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, task_type?: never, input?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, task_type?: never, input?: never } +} + +export type InferenceStreamInferenceResponse = StreamResult + +export interface InferenceUnifiedInferenceCompletionTool { + type: string + function: InferenceUnifiedInferenceCompletionToolFunction +} + +export interface InferenceUnifiedInferenceCompletionToolChoice { + type: string + function: InferenceUnifiedInferenceCompletionToolChoiceFunction +} + +export interface InferenceUnifiedInferenceCompletionToolChoiceFunction { + name: string +} + +export interface InferenceUnifiedInferenceCompletionToolFunction { + description?: string + name: string + parameters?: any + strict?: boolean +} + +export type InferenceUnifiedInferenceCompletionToolType = string | InferenceUnifiedInferenceCompletionToolChoice + +export interface InferenceUnifiedInferenceContentObject { + text: string + type: string +} + +export interface InferenceUnifiedInferenceMessage { + content?: InferenceUnifiedInferenceMessageContent + role: string + tool_call_id?: Id + tool_calls?: InferenceUnifiedInferenceToolCall[] +} + +export type InferenceUnifiedInferenceMessageContent = string | InferenceUnifiedInferenceContentObject[] + +export interface InferenceUnifiedInferenceRequest extends RequestBase { +/** The task type */ + task_type?: InferenceTaskType + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** A list of objects representing the conversation. */ + messages: InferenceUnifiedInferenceMessage[] + /** The ID of the model to use. */ + model?: string + /** The upper bound limit for the number of tokens that can be generated for a completion request. */ + max_completion_tokens?: long + /** A sequence of strings to control when the model should stop generating additional tokens. */ + stop?: string[] + /** The sampling temperature to use. */ + temperature?: float + /** Controls which tool is called by the model. */ + tool_choice?: InferenceUnifiedInferenceCompletionToolType + /** A list of tools that the model can call. */ + tools?: InferenceUnifiedInferenceCompletionTool[] + /** Nucleus sampling, an alternative to sampling with temperature. */ + top_p?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, messages?: never, model?: never, max_completion_tokens?: never, stop?: never, temperature?: never, tool_choice?: never, tools?: never, top_p?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, messages?: never, model?: never, max_completion_tokens?: never, stop?: never, temperature?: never, tool_choice?: never, tools?: never, top_p?: never } +} + +export type InferenceUnifiedInferenceResponse = StreamResult + +export interface InferenceUnifiedInferenceToolCall { + id: Id + function: InferenceUnifiedInferenceToolCallFunction + type: string +} + +export interface InferenceUnifiedInferenceToolCallFunction { + arguments: string + name: string +} + +export interface InferenceUpdateRequest extends RequestBase { +/** The unique identifier of the inference endpoint. */ + inference_id: Id + /** The type of inference task that the model performs. */ + task_type?: InferenceTaskType + inference_config?: InferenceInferenceEndpoint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, task_type?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, task_type?: never, inference_config?: never } +} + +export type InferenceUpdateResponse = InferenceInferenceEndpointInfo + export interface IngestAppendProcessor extends IngestProcessorBase { field: Field value: any | any[] @@ -13247,25 +15757,46 @@ export interface IngestWeb { } export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { +/** A comma-separated list of geoip database configurations to delete */ id: Ids + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { +/** A comma-separated list of IP location database configurations. */ id: Ids + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestDeletePipelineRequest extends RequestBase { +/** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. */ id: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type IngestDeletePipelineResponse = AcknowledgedResponseBase @@ -13289,6 +15820,10 @@ export interface IngestGeoIpStatsGeoIpNodeDatabases { } export interface IngestGeoIpStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IngestGeoIpStatsResponse { @@ -13304,7 +15839,12 @@ export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { } export interface IngestGetGeoipDatabaseRequest extends RequestBase { +/** Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface IngestGetGeoipDatabaseResponse { @@ -13320,8 +15860,14 @@ export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { } export interface IngestGetIpLocationDatabaseRequest extends RequestBase { +/** Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ id?: Ids + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never } } export interface IngestGetIpLocationDatabaseResponse { @@ -13329,14 +15875,25 @@ export interface IngestGetIpLocationDatabaseResponse { } export interface IngestGetPipelineRequest extends RequestBase { +/** Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. */ id?: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Return pipelines without their definitions (default: false) */ summary?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, summary?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, summary?: never } } export type IngestGetPipelineResponse = Record export interface IngestProcessorGrokRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IngestProcessorGrokResponse { @@ -13344,44 +15901,82 @@ export interface IngestProcessorGrokResponse { } export interface IngestPutGeoipDatabaseRequest extends RequestBase { +/** ID of the database configuration to create or update. */ id: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The provider-assigned name of the IP geolocation database to download. */ name: Name + /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ maxmind: IngestMaxmind + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } } export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestPutIpLocationDatabaseRequest extends RequestBase { +/** The database configuration identifier. */ id: Id + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. */ timeout?: Duration configuration?: IngestDatabaseConfiguration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, configuration?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, configuration?: never } } export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestPutPipelineRequest extends RequestBase { +/** ID of the ingest pipeline to create or update. */ id: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Required version for optimistic concurrency control for pipeline updates */ if_version?: VersionNumber + /** Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata + /** Description of the ingest pipeline. */ description?: string + /** Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. */ on_failure?: IngestProcessorContainer[] + /** Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. */ processors?: IngestProcessorContainer[] + /** Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. */ version?: VersionNumber + /** Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } } export type IngestPutPipelineResponse = AcknowledgedResponseBase export interface IngestSimulateRequest extends RequestBase { +/** Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. */ id?: Id + /** If `true`, the response includes output data for each processor in the executed pipeline. */ verbose?: boolean + /** Sample documents to test in the pipeline. */ docs: IngestDocument[] + /** Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline?: IngestPipeline + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } } export interface IngestSimulateResponse { @@ -13406,8 +16001,14 @@ export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' export interface LicenseDeleteRequest extends RequestBase { +/** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type LicenseDeleteResponse = AcknowledgedResponseBase @@ -13428,8 +16029,14 @@ export interface LicenseGetLicenseInformation { } export interface LicenseGetRequest extends RequestBase { +/** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. */ accept_enterprise?: boolean + /** Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { accept_enterprise?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { accept_enterprise?: never, local?: never } } export interface LicenseGetResponse { @@ -13437,6 +16044,10 @@ export interface LicenseGetResponse { } export interface LicenseGetBasicStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface LicenseGetBasicStatusResponse { @@ -13444,6 +16055,10 @@ export interface LicenseGetBasicStatusResponse { } export interface LicenseGetTrialStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface LicenseGetTrialStatusResponse { @@ -13456,11 +16071,19 @@ export interface LicensePostAcknowledgement { } export interface LicensePostRequest extends RequestBase { +/** Specifies whether you acknowledge the license changes. */ acknowledge?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration license?: LicenseLicense + /** A sequence of one or more JSON documents containing the license information. */ licenses?: LicenseLicense[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never, license?: never, licenses?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never, license?: never, licenses?: never } } export interface LicensePostResponse { @@ -13470,9 +16093,16 @@ export interface LicensePostResponse { } export interface LicensePostStartBasicRequest extends RequestBase { +/** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never } } export interface LicensePostStartBasicResponse { @@ -13484,9 +16114,15 @@ export interface LicensePostStartBasicResponse { } export interface LicensePostStartTrialRequest extends RequestBase { +/** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean type_query_string?: string + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, type_query_string?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, type_query_string?: never, master_timeout?: never } } export interface LicensePostStartTrialResponse { @@ -13521,20 +16157,35 @@ export interface LogstashPipelineSettings { } export interface LogstashDeletePipelineRequest extends RequestBase { +/** An identifier for the pipeline. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type LogstashDeletePipelineResponse = boolean export interface LogstashGetPipelineRequest extends RequestBase { +/** A comma-separated list of pipeline identifiers. */ id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type LogstashGetPipelineResponse = Record export interface LogstashPutPipelineRequest extends RequestBase { +/** An identifier for the pipeline. */ id: Id pipeline?: LogstashPipeline + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, pipeline?: never } } export type LogstashPutPipelineResponse = boolean @@ -13551,7 +16202,12 @@ export interface MigrationDeprecationsDeprecation { export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' export interface MigrationDeprecationsRequest extends RequestBase { +/** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ index?: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export interface MigrationDeprecationsResponse { @@ -13578,6 +16234,10 @@ export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR' export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface MigrationGetFeatureUpgradeStatusResponse { @@ -13590,6 +16250,10 @@ export interface MigrationPostFeatureUpgradeMigrationFeature { } export interface MigrationPostFeatureUpgradeRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface MigrationPostFeatureUpgradeResponse { @@ -14893,7 +17557,12 @@ export interface MlZeroShotClassificationInferenceUpdateOptions { } export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { +/** The unique identifier of the trained model. */ model_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never } } export interface MlClearTrainedModelDeploymentCacheResponse { @@ -14901,10 +17570,18 @@ export interface MlClearTrainedModelDeploymentCacheResponse { } export interface MlCloseJobRequest extends RequestBase { +/** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ job_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean + /** Refer to the descriptiion for the `force` query parameter. */ force?: boolean + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, force?: never, timeout?: never } } export interface MlCloseJobResponse { @@ -14912,21 +17589,38 @@ export interface MlCloseJobResponse { } export interface MlDeleteCalendarRequest extends RequestBase { +/** A string that uniquely identifies a calendar. */ calendar_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never } } export type MlDeleteCalendarResponse = AcknowledgedResponseBase export interface MlDeleteCalendarEventRequest extends RequestBase { +/** A string that uniquely identifies a calendar. */ calendar_id: Id + /** Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. */ event_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, event_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, event_id?: never } } export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { +/** A string that uniquely identifies a calendar. */ calendar_id: Id + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ job_id: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_id?: never } } export interface MlDeleteCalendarJobResponse { @@ -14936,24 +17630,44 @@ export interface MlDeleteCalendarJobResponse { } export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. */ id: Id + /** If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. */ force?: boolean + /** The time to wait for the job to be deleted. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, force?: never, timeout?: never } } export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase export interface MlDeleteDatafeedRequest extends RequestBase { +/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ datafeed_id: Id + /** Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. */ force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, force?: never } } export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { +/** Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. */ job_id?: Id + /** The desired requests per second for the deletion processes. The default behavior is no throttling. */ requests_per_second?: float + /** How long can the underlying delete processes run until they are canceled. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, requests_per_second?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, requests_per_second?: never, timeout?: never } } export interface MlDeleteExpiredDataResponse { @@ -14961,55 +17675,102 @@ export interface MlDeleteExpiredDataResponse { } export interface MlDeleteFilterRequest extends RequestBase { +/** A string that uniquely identifies a filter. */ filter_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never } } export type MlDeleteFilterResponse = AcknowledgedResponseBase export interface MlDeleteForecastRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. */ forecast_id?: Id + /** Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. */ allow_no_forecasts?: boolean + /** Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } } export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. */ force?: boolean + /** Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. */ delete_user_annotations?: boolean + /** Specifies whether the request should return immediately or wait until the job deletion completes. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } } export type MlDeleteJobResponse = AcknowledgedResponseBase export interface MlDeleteModelSnapshotRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** Identifier for the model snapshot. */ snapshot_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never } } export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { +/** The unique identifier of the trained model. */ model_id: Id + /** Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. */ force?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, force?: never, timeout?: never } } export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelAliasRequest extends RequestBase { +/** The model alias to delete. */ model_alias: Name + /** The trained model ID to which the model alias refers. */ model_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_alias?: never, model_id?: never } } export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlEstimateModelMemoryRequest extends RequestBase { +/** For a list of the properties that you can specify in the `analysis_config` component of the body of this API. */ analysis_config?: MlAnalysisConfig + /** Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. */ max_bucket_cardinality?: Record + /** Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. */ overall_cardinality?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } } export interface MlEstimateModelMemoryResponse { @@ -15096,9 +17857,16 @@ export interface MlEvaluateDataFrameDataframeRegressionSummary { } export interface MlEvaluateDataFrameRequest extends RequestBase { +/** Defines the type of evaluation you want to perform. */ evaluation: MlDataframeEvaluationContainer + /** Defines the `index` in which the evaluation will be performed. */ index: IndexName + /** A query clause that retrieves a subset of data from the source index. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { evaluation?: never, index?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { evaluation?: never, index?: never, query?: never } } export interface MlEvaluateDataFrameResponse { @@ -15108,15 +17876,28 @@ export interface MlEvaluateDataFrameResponse { } export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ id?: Id + /** The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. */ source?: MlDataframeAnalyticsSource + /** The destination configuration, consisting of index and optionally results_field (ml by default). */ dest?: MlDataframeAnalyticsDestination + /** The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. */ analysis?: MlDataframeAnalysisContainer + /** A description of the job. */ description?: string + /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ model_memory_limit?: string + /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ max_num_threads?: integer + /** Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } } export interface MlExplainDataFrameAnalyticsResponse { @@ -15125,12 +17906,22 @@ export interface MlExplainDataFrameAnalyticsResponse { } export interface MlFlushJobRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** Refer to the description for the `advance_time` query parameter. */ advance_time?: DateTime + /** Refer to the description for the `calc_interim` query parameter. */ calc_interim?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `skip_time` query parameter. */ skip_time?: DateTime + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, advance_time?: never, calc_interim?: never, end?: never, skip_time?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, advance_time?: never, calc_interim?: never, end?: never, skip_time?: never, start?: never } } export interface MlFlushJobResponse { @@ -15139,10 +17930,18 @@ export interface MlFlushJobResponse { } export interface MlForecastRequest extends RequestBase { +/** Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. */ job_id: Id + /** Refer to the description for the `duration` query parameter. */ duration?: Duration + /** Refer to the description for the `expires_in` query parameter. */ expires_in?: Duration + /** Refer to the description for the `max_model_memory` query parameter. */ max_model_memory?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, duration?: never, expires_in?: never, max_model_memory?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, duration?: never, expires_in?: never, max_model_memory?: never } } export interface MlForecastResponse { @@ -15151,18 +17950,33 @@ export interface MlForecastResponse { } export interface MlGetBucketsRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. */ timestamp?: DateTime + /** Skips the specified number of buckets. */ from?: integer + /** Specifies the maximum number of buckets to obtain. */ size?: integer + /** Refer to the description for the `anomaly_score` query parameter. */ anomaly_score?: double + /** Refer to the description for the `desc` query parameter. */ desc?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean + /** Refer to the description for the `expand` query parameter. */ expand?: boolean page?: MlPage + /** Refer to the desription for the `sort` query parameter. */ sort?: Field + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, timestamp?: never, from?: never, size?: never, anomaly_score?: never, desc?: never, end?: never, exclude_interim?: never, expand?: never, page?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, timestamp?: never, from?: never, size?: never, anomaly_score?: never, desc?: never, end?: never, exclude_interim?: never, expand?: never, page?: never, sort?: never, start?: never } } export interface MlGetBucketsResponse { @@ -15171,12 +17985,22 @@ export interface MlGetBucketsResponse { } export interface MlGetCalendarEventsRequest extends RequestBase { +/** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id: Id + /** Specifies to get events with timestamps earlier than this time. */ end?: DateTime + /** Skips the specified number of events. */ from?: integer + /** Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. */ job_id?: Id + /** Specifies the maximum number of events to obtain. */ size?: integer + /** Specifies to get events with timestamps after this time. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, end?: never, from?: never, job_id?: never, size?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, end?: never, from?: never, job_id?: never, size?: never, start?: never } } export interface MlGetCalendarEventsResponse { @@ -15191,10 +18015,18 @@ export interface MlGetCalendarsCalendar { } export interface MlGetCalendarsRequest extends RequestBase { +/** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id?: Id + /** Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. */ from?: integer + /** Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. */ size?: integer + /** This object is supported only when you omit the calendar identifier. */ page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, from?: never, size?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, from?: never, size?: never, page?: never } } export interface MlGetCalendarsResponse { @@ -15203,12 +18035,22 @@ export interface MlGetCalendarsResponse { } export interface MlGetCategoriesRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. */ category_id?: CategoryId + /** Skips the specified number of categories. */ from?: integer + /** Only return categories for the specified partition. */ partition_field_value?: string + /** Specifies the maximum number of categories to obtain. */ size?: integer + /** Configures pagination. This parameter has the `from` and `size` properties. */ page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } } export interface MlGetCategoriesResponse { @@ -15217,11 +18059,20 @@ export interface MlGetCategoriesResponse { } export interface MlGetDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. */ id?: Id + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of data frame analytics jobs. */ from?: integer + /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer + /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } } export interface MlGetDataFrameAnalyticsResponse { @@ -15230,11 +18081,20 @@ export interface MlGetDataFrameAnalyticsResponse { } export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { +/** Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. */ id?: Id + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of data frame analytics jobs. */ from?: integer + /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer + /** Defines whether the stats response should be verbose. */ verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, verbose?: never } } export interface MlGetDataFrameAnalyticsStatsResponse { @@ -15243,8 +18103,14 @@ export interface MlGetDataFrameAnalyticsStatsResponse { } export interface MlGetDatafeedStatsRequest extends RequestBase { +/** Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. */ datafeed_id?: Ids + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } } export interface MlGetDatafeedStatsResponse { @@ -15253,9 +18119,16 @@ export interface MlGetDatafeedStatsResponse { } export interface MlGetDatafeedsRequest extends RequestBase { +/** Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. */ datafeed_id?: Ids + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } } export interface MlGetDatafeedsResponse { @@ -15264,9 +18137,16 @@ export interface MlGetDatafeedsResponse { } export interface MlGetFiltersRequest extends RequestBase { +/** A string that uniquely identifies a filter. */ filter_id?: Ids + /** Skips the specified number of filters. */ from?: integer + /** Specifies the maximum number of filters to obtain. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, from?: never, size?: never } } export interface MlGetFiltersResponse { @@ -15275,16 +18155,30 @@ export interface MlGetFiltersResponse { } export interface MlGetInfluencersRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** If true, the results are sorted in descending order. */ desc?: boolean + /** Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. */ end?: DateTime + /** If true, the output excludes interim results. By default, interim results are included. */ exclude_interim?: boolean + /** Returns influencers with anomaly scores greater than or equal to this value. */ influencer_score?: double + /** Skips the specified number of influencers. */ from?: integer + /** Specifies the maximum number of influencers to obtain. */ size?: integer + /** Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. */ sort?: Field + /** Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. */ start?: DateTime + /** Configures pagination. This parameter has the `from` and `size` properties. */ page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } } export interface MlGetInfluencersResponse { @@ -15293,8 +18187,14 @@ export interface MlGetInfluencersResponse { } export interface MlGetJobStatsRequest extends RequestBase { +/** Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. */ job_id?: Id + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never } } export interface MlGetJobStatsResponse { @@ -15303,9 +18203,16 @@ export interface MlGetJobStatsResponse { } export interface MlGetJobsRequest extends RequestBase { +/** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. */ job_id?: Ids + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } } export interface MlGetJobsResponse { @@ -15354,9 +18261,16 @@ export interface MlGetMemoryStatsMemory { } export interface MlGetMemoryStatsRequest extends RequestBase { +/** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` */ node_id?: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } } export interface MlGetMemoryStatsResponse { @@ -15366,9 +18280,16 @@ export interface MlGetMemoryStatsResponse { } export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id: Id + /** Specifies what to do when the request: - Contains wildcard expressions and there are no jobs that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } } export interface MlGetModelSnapshotUpgradeStatsResponse { @@ -15377,15 +18298,27 @@ export interface MlGetModelSnapshotUpgradeStatsResponse { } export interface MlGetModelSnapshotsRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id?: Id + /** Skips the specified number of snapshots. */ from?: integer + /** Specifies the maximum number of snapshots to obtain. */ size?: integer + /** Refer to the description for the `desc` query parameter. */ desc?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime page?: MlPage + /** Refer to the description for the `sort` query parameter. */ sort?: Field + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, from?: never, size?: never, desc?: never, end?: never, page?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, from?: never, size?: never, desc?: never, end?: never, page?: never, sort?: never, start?: never } } export interface MlGetModelSnapshotsResponse { @@ -15394,14 +18327,26 @@ export interface MlGetModelSnapshotsResponse { } export interface MlGetOverallBucketsRequest extends RequestBase { +/** Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. */ job_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean + /** Refer to the description for the `bucket_span` query parameter. */ bucket_span?: Duration + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean + /** Refer to the description for the `overall_score` query parameter. */ overall_score?: double | string + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** Refer to the description for the `top_n` query parameter. */ top_n?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, bucket_span?: never, end?: never, exclude_interim?: never, overall_score?: never, start?: never, top_n?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, bucket_span?: never, end?: never, exclude_interim?: never, overall_score?: never, start?: never, top_n?: never } } export interface MlGetOverallBucketsResponse { @@ -15410,16 +18355,29 @@ export interface MlGetOverallBucketsResponse { } export interface MlGetRecordsRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** Skips the specified number of records. */ from?: integer + /** Specifies the maximum number of records to obtain. */ size?: integer + /** Refer to the description for the `desc` query parameter. */ desc?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean page?: MlPage + /** Refer to the description for the `record_score` query parameter. */ record_score?: double + /** Refer to the description for the `sort` query parameter. */ sort?: Field + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, from?: never, size?: never, desc?: never, end?: never, exclude_interim?: never, page?: never, record_score?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, from?: never, size?: never, desc?: never, end?: never, exclude_interim?: never, page?: never, record_score?: never, sort?: never, start?: never } } export interface MlGetRecordsResponse { @@ -15428,15 +18386,28 @@ export interface MlGetRecordsResponse { } export interface MlGetTrainedModelsRequest extends RequestBase { +/** The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. */ model_id?: Ids + /** Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. */ allow_no_match?: boolean + /** Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). */ decompress_definition?: boolean + /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** Skips the specified number of models. */ from?: integer + /** A comma delimited string of optional fields to include in the response body. */ include?: MlInclude + /** parameter is deprecated! Use [include=definition] instead */ include_model_definition?: boolean + /** Specifies the maximum number of models to obtain. */ size?: integer + /** A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. */ tags?: string | string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } } export interface MlGetTrainedModelsResponse { @@ -15445,10 +18416,18 @@ export interface MlGetTrainedModelsResponse { } export interface MlGetTrainedModelsStatsRequest extends RequestBase { +/** The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. */ model_id?: Ids + /** Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. */ allow_no_match?: boolean + /** Skips the specified number of models. */ from?: integer + /** Specifies the maximum number of models to obtain. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, from?: never, size?: never } } export interface MlGetTrainedModelsStatsResponse { @@ -15457,10 +18436,18 @@ export interface MlGetTrainedModelsStatsResponse { } export interface MlInferTrainedModelRequest extends RequestBase { +/** The unique identifier of the trained model. */ model_id: Id + /** Controls the amount of time to wait for inference results. */ timeout?: Duration + /** An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. */ docs: Record[] + /** The inference configuration updates to apply on the API call */ inference_config?: MlInferenceConfigUpdateContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, timeout?: never, docs?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, timeout?: never, docs?: never, inference_config?: never } } export interface MlInferTrainedModelResponse { @@ -15498,6 +18485,10 @@ export interface MlInfoNativeCode { } export interface MlInfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface MlInfoResponse { @@ -15508,8 +18499,14 @@ export interface MlInfoResponse { } export interface MlOpenJobRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, timeout?: never } } export interface MlOpenJobResponse { @@ -15518,8 +18515,14 @@ export interface MlOpenJobResponse { } export interface MlPostCalendarEventsRequest extends RequestBase { +/** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. */ events: MlCalendarEvent[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, events?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, events?: never } } export interface MlPostCalendarEventsResponse { @@ -15527,10 +18530,17 @@ export interface MlPostCalendarEventsResponse { } export interface MlPostDataRequest extends RequestBase { +/** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ job_id: Id + /** Specifies the end of the bucket resetting range. */ reset_end?: DateTime + /** Specifies the start of the bucket resetting range. */ reset_start?: DateTime data?: TData[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, reset_end?: never, reset_start?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, reset_end?: never, reset_start?: never, data?: never } } export interface MlPostDataResponse { @@ -15563,8 +18573,14 @@ export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { } export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. */ id?: Id + /** A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. */ config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, config?: never } } export interface MlPreviewDataFrameAnalyticsResponse { @@ -15572,19 +18588,35 @@ export interface MlPreviewDataFrameAnalyticsResponse { } export interface MlPreviewDatafeedRequest extends RequestBase { +/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. */ datafeed_id?: Id + /** The start time from where the datafeed preview should begin */ start?: DateTime + /** The end time when the datafeed preview should stop */ end?: DateTime + /** The datafeed definition to preview. */ datafeed_config?: MlDatafeedConfig + /** The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ job_config?: MlJobConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } } export type MlPreviewDatafeedResponse = TDocument[] export interface MlPutCalendarRequest extends RequestBase { +/** A string that uniquely identifies a calendar. */ calendar_id: Id + /** An array of anomaly detection job identifiers. */ job_ids?: Id[] + /** A description of the calendar. */ description?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_ids?: never, description?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_ids?: never, description?: never } } export interface MlPutCalendarResponse { @@ -15594,8 +18626,14 @@ export interface MlPutCalendarResponse { } export interface MlPutCalendarJobRequest extends RequestBase { +/** A string that uniquely identifies a calendar. */ calendar_id: Id + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ job_id: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_id?: never } } export interface MlPutCalendarJobResponse { @@ -15605,18 +18643,31 @@ export interface MlPutCalendarJobResponse { } export interface MlPutDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ id: Id + /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. */ allow_lazy_start?: boolean + /** The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. */ analysis: MlDataframeAnalysisContainer + /** Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** A description of the job. */ description?: string + /** The destination configuration. */ dest: MlDataframeAnalyticsDestination + /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ max_num_threads?: integer _meta?: Metadata + /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ model_memory_limit?: string + /** The configuration of how to source the analysis data. */ source: MlDataframeAnalyticsSource headers?: HttpHeaders version?: VersionString + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_lazy_start?: never, analysis?: never, analyzed_fields?: never, description?: never, dest?: never, max_num_threads?: never, _meta?: never, model_memory_limit?: never, source?: never, headers?: never, version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_lazy_start?: never, analysis?: never, analyzed_fields?: never, description?: never, dest?: never, max_num_threads?: never, _meta?: never, model_memory_limit?: never, source?: never, headers?: never, version?: never } } export interface MlPutDataFrameAnalyticsResponse { @@ -15636,29 +18687,53 @@ export interface MlPutDataFrameAnalyticsResponse { } export interface MlPutDatafeedRequest extends RequestBase { +/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ datafeed_id: Id + /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If true, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record /** @alias aggregations */ + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ aggs?: Record + /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. */ chunking_config?: MlChunkingConfig + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices /** @alias indices */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indexes?: Indices + /** Specifies index expansion options that are used during search */ indices_options?: IndicesOptions + /** Identifier for the anomaly detection job. */ job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer headers?: HttpHeaders + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, aggs?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never, headers?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, aggs?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never, headers?: never } } export interface MlPutDatafeedResponse { @@ -15680,9 +18755,16 @@ export interface MlPutDatafeedResponse { } export interface MlPutFilterRequest extends RequestBase { +/** A string that uniquely identifies a filter. */ filter_id: Id + /** A description of the filter. */ description?: string + /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. */ items?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, description?: never, items?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, description?: never, items?: never } } export interface MlPutFilterResponse { @@ -15692,26 +18774,50 @@ export interface MlPutFilterResponse { } export interface MlPutJobRequest extends RequestBase { +/** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ job_id: Id + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. */ allow_lazy_open?: boolean + /** Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. */ background_persist_interval?: Duration + /** Advanced configuration option. Contains custom meta data about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long + /** Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. */ data_description: MlDataDescription + /** Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. */ datafeed_config?: MlDatafeedConfig + /** A description of the job. */ description?: string + /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] + /** This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. */ model_plot_config?: MlModelPlotConfig + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days?: long + /** Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. */ results_index_name?: IndexName + /** Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */ results_retention_days?: long + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_lazy_open?: never, analysis_config?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, daily_model_snapshot_retention_after_days?: never, data_description?: never, datafeed_config?: never, description?: never, groups?: never, model_plot_config?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_index_name?: never, results_retention_days?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_lazy_open?: never, analysis_config?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, daily_model_snapshot_retention_after_days?: never, data_description?: never, datafeed_config?: never, description?: never, groups?: never, model_plot_config?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_index_name?: never, results_retention_days?: never } } export interface MlPutJobResponse { @@ -15779,20 +18885,38 @@ export interface MlPutTrainedModelPreprocessor { } export interface MlPutTrainedModelRequest extends RequestBase { +/** The unique identifier of the trained model. */ model_id: Id + /** If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. */ defer_definition_decompression?: boolean + /** Whether to wait for all child operations (e.g. model download) to complete. */ wait_for_completion?: boolean + /** The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. */ compressed_definition?: string + /** The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. */ definition?: MlPutTrainedModelDefinition + /** A human-readable description of the inference trained model. */ description?: string + /** The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer + /** The input field names for the model definition. */ input?: MlPutTrainedModelInput + /** An object map that contains metadata about the model. */ metadata?: any + /** The model type. */ model_type?: MlTrainedModelType + /** The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. */ model_size_bytes?: long + /** The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. */ platform_architecture?: string + /** An array of tags to organize the model. */ tags?: string[] + /** Optional prefix strings applied at inference */ prefix_strings?: MlTrainedModelPrefixStrings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, defer_definition_decompression?: never, wait_for_completion?: never, compressed_definition?: never, definition?: never, description?: never, inference_config?: never, input?: never, metadata?: never, model_type?: never, model_size_bytes?: never, platform_architecture?: never, tags?: never, prefix_strings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, defer_definition_decompression?: never, wait_for_completion?: never, compressed_definition?: never, definition?: never, description?: never, inference_config?: never, input?: never, metadata?: never, model_type?: never, model_size_bytes?: never, platform_architecture?: never, tags?: never, prefix_strings?: never } } export type MlPutTrainedModelResponse = MlTrainedModelConfig @@ -15834,44 +18958,82 @@ export interface MlPutTrainedModelWeights { } export interface MlPutTrainedModelAliasRequest extends RequestBase { +/** The alias to create or update. This value cannot end in numbers. */ model_alias: Name + /** The identifier for the trained model that the alias refers to. */ model_id: Id + /** Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. */ reassign?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } } export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { +/** The unique identifier of the trained model. */ model_id: Id + /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ part: integer + /** The definition part for the model. Must be a base64 encoded string. */ definition: string + /** The total uncompressed definition length in bytes. Not base64 encoded. */ total_definition_length: long + /** The total number of parts that will be uploaded. Must be greater than 0. */ total_parts: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, part?: never, definition?: never, total_definition_length?: never, total_parts?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, part?: never, definition?: never, total_definition_length?: never, total_parts?: never } } export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase export interface MlPutTrainedModelVocabularyRequest extends RequestBase { +/** The unique identifier of the trained model. */ model_id: Id + /** The model vocabulary, which must not be empty. */ vocabulary: string[] + /** The optional model merges if required by the tokenizer. */ merges?: string[] + /** The optional vocabulary value scores if required by the tokenizer. */ scores?: double[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, vocabulary?: never, merges?: never, scores?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, vocabulary?: never, merges?: never, scores?: never } } export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { +/** The ID of the job to reset. */ job_id: Id + /** Should this request wait until the operation has completed before returning. */ wait_for_completion?: boolean + /** Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. */ delete_user_annotations?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } } export type MlResetJobResponse = AcknowledgedResponseBase export interface MlRevertModelSnapshotRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. */ snapshot_id: Id + /** Refer to the description for the `delete_intervening_results` query parameter. */ delete_intervening_results?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, delete_intervening_results?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, delete_intervening_results?: never } } export interface MlRevertModelSnapshotResponse { @@ -15879,15 +19041,27 @@ export interface MlRevertModelSnapshotResponse { } export interface MlSetUpgradeModeRequest extends RequestBase { +/** When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. */ enabled?: boolean + /** The time to wait for the request to be completed. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { enabled?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { enabled?: never, timeout?: never } } export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ id: Id + /** Controls the amount of time to wait until the data frame analytics job starts. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, timeout?: never } } export interface MlStartDataFrameAnalyticsResponse { @@ -15896,10 +19070,18 @@ export interface MlStartDataFrameAnalyticsResponse { } export interface MlStartDatafeedRequest extends RequestBase { +/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ datafeed_id: Id + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, end?: never, start?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, end?: never, start?: never, timeout?: never } } export interface MlStartDatafeedResponse { @@ -15908,15 +19090,28 @@ export interface MlStartDatafeedResponse { } export interface MlStartTrainedModelDeploymentRequest extends RequestBase { +/** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id + /** The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. */ cache_size?: ByteSize + /** A unique identifier for the deployment of the model. */ deployment_id?: string + /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. */ number_of_allocations?: integer + /** The deployment priority. */ priority?: MlTrainingPriority + /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. */ queue_capacity?: integer + /** Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. */ threads_per_allocation?: integer + /** Specifies the amount of time to wait for the model to deploy. */ timeout?: Duration + /** Specifies the allocation status to wait for before returning. */ wait_for?: MlDeploymentAllocationState + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never } } export interface MlStartTrainedModelDeploymentResponse { @@ -15924,10 +19119,18 @@ export interface MlStartTrainedModelDeploymentResponse { } export interface MlStopDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ id: Id + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** If true, the data frame analytics job is stopped forcefully. */ force?: boolean + /** Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } } export interface MlStopDataFrameAnalyticsResponse { @@ -15935,10 +19138,18 @@ export interface MlStopDataFrameAnalyticsResponse { } export interface MlStopDatafeedRequest extends RequestBase { +/** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. */ datafeed_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean + /** Refer to the description for the `force` query parameter. */ force?: boolean + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, force?: never, timeout?: never } } export interface MlStopDatafeedResponse { @@ -15946,9 +19157,16 @@ export interface MlStopDatafeedResponse { } export interface MlStopTrainedModelDeploymentRequest extends RequestBase { +/** The unique identifier of the trained model. */ model_id: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you restart the model deployment. */ force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } } export interface MlStopTrainedModelDeploymentResponse { @@ -15956,11 +19174,20 @@ export interface MlStopTrainedModelDeploymentResponse { } export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { +/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ id: Id + /** A description of the job. */ description?: string + /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ model_memory_limit?: string + /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ max_num_threads?: integer + /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } } export interface MlUpdateDataFrameAnalyticsResponse { @@ -15979,26 +19206,48 @@ export interface MlUpdateDataFrameAnalyticsResponse { } export interface MlUpdateDatafeedRequest extends RequestBase { +/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ datafeed_id: Id + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record + /** Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. */ chunking_config?: MlChunkingConfig + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indices?: string[] /** @alias indices */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indexes?: string[] + /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. */ query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. */ scroll_size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } } export interface MlUpdateDatafeedResponse { @@ -16020,10 +19269,18 @@ export interface MlUpdateDatafeedResponse { } export interface MlUpdateFilterRequest extends RequestBase { +/** A string that uniquely identifies a filter. */ filter_id: Id + /** The items to add to the filter. */ add_items?: string[] + /** A description for the filter. */ description?: string + /** The items to remove from the filter. */ remove_items?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, add_items?: never, description?: never, remove_items?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, add_items?: never, description?: never, remove_items?: never } } export interface MlUpdateFilterResponse { @@ -16033,22 +19290,38 @@ export interface MlUpdateFilterResponse { } export interface MlUpdateJobRequest extends RequestBase { +/** Identifier for the job. */ job_id: Id + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. */ allow_lazy_open?: boolean analysis_limits?: MlAnalysisMemoryLimit + /** Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. */ background_persist_interval?: Duration + /** Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. */ custom_settings?: Record categorization_filters?: string[] + /** A description of the job. */ description?: string model_plot_config?: MlModelPlotConfig model_prune_window?: Duration + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. */ model_snapshot_retention_days?: long + /** Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. */ renormalization_window_days?: long + /** Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. */ results_retention_days?: long + /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] + /** An array of detector update objects. */ detectors?: MlDetectorUpdate[] + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_lazy_open?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, categorization_filters?: never, description?: never, model_plot_config?: never, model_prune_window?: never, daily_model_snapshot_retention_after_days?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_retention_days?: never, groups?: never, detectors?: never, per_partition_categorization?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_lazy_open?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, categorization_filters?: never, description?: never, model_plot_config?: never, model_prune_window?: never, daily_model_snapshot_retention_after_days?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_retention_days?: never, groups?: never, detectors?: never, per_partition_categorization?: never } } export interface MlUpdateJobResponse { @@ -16076,10 +19349,18 @@ export interface MlUpdateJobResponse { } export interface MlUpdateModelSnapshotRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** Identifier for the model snapshot. */ snapshot_id: Id + /** A description of the model snapshot. */ description?: string + /** If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. */ retain?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } } export interface MlUpdateModelSnapshotResponse { @@ -16088,8 +19369,14 @@ export interface MlUpdateModelSnapshotResponse { } export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { +/** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id + /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. */ number_of_allocations?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, number_of_allocations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, number_of_allocations?: never } } export interface MlUpdateTrainedModelDeploymentResponse { @@ -16097,10 +19384,18 @@ export interface MlUpdateTrainedModelDeploymentResponse { } export interface MlUpgradeJobSnapshotRequest extends RequestBase { +/** Identifier for the anomaly detection job. */ job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id + /** When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. */ wait_for_completion?: boolean + /** Controls the time to wait for the request to complete. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, wait_for_completion?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, wait_for_completion?: never, timeout?: never } } export interface MlUpgradeJobSnapshotResponse { @@ -16118,22 +19413,38 @@ export interface MlValidateRequest extends RequestBase { model_snapshot_id?: Id model_snapshot_retention_days?: long results_index_name?: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, analysis_config?: never, analysis_limits?: never, data_description?: never, description?: never, model_plot?: never, model_snapshot_id?: never, model_snapshot_retention_days?: never, results_index_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, analysis_config?: never, analysis_limits?: never, data_description?: never, description?: never, model_plot?: never, model_snapshot_id?: never, model_snapshot_retention_days?: never, results_index_name?: never } } export type MlValidateResponse = AcknowledgedResponseBase export interface MlValidateDetectorRequest extends RequestBase { detector?: MlDetector + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { detector?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { detector?: never } } export type MlValidateDetectorResponse = AcknowledgedResponseBase export interface MonitoringBulkRequest extends RequestBase { +/** Default document type for items which don't provide one */ type?: string + /** Identifier of the monitored system */ system_id: string + /** */ system_api_version: string + /** Collection interval (e.g., '10s' or '10000ms') of the payload */ interval: Duration operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { type?: never, system_id?: never, system_api_version?: never, interval?: never, operations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { type?: never, system_id?: never, system_api_version?: never, interval?: never, operations?: never } } export interface MonitoringBulkResponse { @@ -16636,8 +19947,14 @@ export interface NodesTransportHistogram { } export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { +/** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds + /** Specifies the maximum `archive_version` to be cleared from the archive. */ max_archive_version: long + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, max_archive_version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, max_archive_version?: never } } export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase @@ -16648,7 +19965,12 @@ export interface NodesClearRepositoriesMeteringArchiveResponseBase extends Nodes } export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { +/** Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). */ node_id: NodeIds + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never } } export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase @@ -16659,14 +19981,26 @@ export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodes } export interface NodesHotThreadsRequest extends RequestBase { +/** List of node IDs or names used to limit returned information. */ node_id?: NodeIds + /** If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. */ ignore_idle_threads?: boolean + /** The interval to do the second sampling of threads. */ interval?: Duration + /** Number of samples of thread stacktrace. */ snapshots?: long + /** Specifies the number of hot threads to provide information for. */ threads?: long + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The type to sample. */ type?: ThreadType + /** The sort order for 'cpu' type (default: total) */ sort?: ThreadType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, ignore_idle_threads?: never, interval?: never, snapshots?: never, threads?: never, timeout?: never, type?: never, sort?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, ignore_idle_threads?: never, interval?: never, snapshots?: never, threads?: never, timeout?: never, type?: never, sort?: never } } export interface NodesHotThreadsResponse { @@ -17021,10 +20355,18 @@ export interface NodesInfoNodeThreadPoolInfo { } export interface NodesInfoRequest extends RequestBase { +/** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds + /** Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. */ metric?: Metrics + /** If true, returns settings in flat format. */ flat_settings?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, flat_settings?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, flat_settings?: never, timeout?: never } } export type NodesInfoResponse = NodesInfoResponseBase @@ -17035,9 +20377,16 @@ export interface NodesInfoResponseBase extends NodesNodesResponseBase { } export interface NodesReloadSecureSettingsRequest extends RequestBase { +/** The names of particular nodes in the cluster to target. */ node_id?: NodeIds + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The password for the Elasticsearch keystore. */ secure_settings_password?: Password + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, timeout?: never, secure_settings_password?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, timeout?: never, secure_settings_password?: never } } export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase @@ -17048,18 +20397,34 @@ export interface NodesReloadSecureSettingsResponseBase extends NodesNodesRespons } export interface NodesStatsRequest extends RequestBase { +/** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds + /** Limit the information returned to the specified metrics */ metric?: Metrics + /** Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. */ index_metric?: Metrics + /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields + /** Comma-separated list of search groups to include in the search statistics. */ groups?: boolean + /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean + /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ level?: Level + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** A comma-separated list of document types for the indexing index metric. */ types?: string[] + /** If `true`, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, index_metric?: never, completion_fields?: never, fielddata_fields?: never, fields?: never, groups?: never, include_segment_file_sizes?: never, level?: never, timeout?: never, types?: never, include_unloaded_segments?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, index_metric?: never, completion_fields?: never, fielddata_fields?: never, fields?: never, groups?: never, include_segment_file_sizes?: never, level?: never, timeout?: never, types?: never, include_unloaded_segments?: never } } export type NodesStatsResponse = NodesStatsResponseBase @@ -17077,9 +20442,16 @@ export interface NodesUsageNodeUsage { } export interface NodesUsageRequest extends RequestBase { +/** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ node_id?: NodeIds + /** Limits the information returned to the specific metrics. A comma-separated list of the following options: `_all`, `rest_actions`. */ metric?: Metrics + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } } export type NodesUsageResponse = NodesUsageResponseBase @@ -17118,27 +20490,49 @@ export interface QueryRulesQueryRuleset { } export interface QueryRulesDeleteRuleRequest extends RequestBase { +/** The unique identifier of the query ruleset containing the rule to delete */ ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to delete */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never } } export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase export interface QueryRulesDeleteRulesetRequest extends RequestBase { +/** The unique identifier of the query ruleset to delete */ ruleset_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never } } export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase export interface QueryRulesGetRuleRequest extends RequestBase { +/** The unique identifier of the query ruleset containing the rule to retrieve */ ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to retrieve */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never } } export type QueryRulesGetRuleResponse = QueryRulesQueryRule export interface QueryRulesGetRulesetRequest extends RequestBase { +/** The unique identifier of the query ruleset */ ruleset_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never } } export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset @@ -17151,8 +20545,14 @@ export interface QueryRulesListRulesetsQueryRulesetListItem { } export interface QueryRulesListRulesetsRequest extends RequestBase { +/** The offset from the first result to fetch. */ from?: integer + /** The maximum number of results to retrieve. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never } } export interface QueryRulesListRulesetsResponse { @@ -17161,12 +20561,21 @@ export interface QueryRulesListRulesetsResponse { } export interface QueryRulesPutRuleRequest extends RequestBase { +/** The unique identifier of the query ruleset containing the rule to be created or updated. */ ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to be created or updated. */ rule_id: Id + /** The type of rule. */ type: QueryRulesQueryRuleType + /** The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + /** The actions to take when the rule is matched. The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions priority?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never, type?: never, criteria?: never, actions?: never, priority?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never, type?: never, criteria?: never, actions?: never, priority?: never } } export interface QueryRulesPutRuleResponse { @@ -17174,8 +20583,13 @@ export interface QueryRulesPutRuleResponse { } export interface QueryRulesPutRulesetRequest extends RequestBase { +/** The unique identifier of the query ruleset to be created or updated. */ ruleset_id: Id rules: QueryRulesQueryRule | QueryRulesQueryRule[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rules?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rules?: never } } export interface QueryRulesPutRulesetResponse { @@ -17188,8 +20602,14 @@ export interface QueryRulesTestQueryRulesetMatchedRule { } export interface QueryRulesTestRequest extends RequestBase { +/** The unique identifier of the query ruleset to be created or updated */ ruleset_id: Id + /** The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ match_criteria: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } } export interface QueryRulesTestResponse { @@ -17230,7 +20650,12 @@ export interface RollupTermsGrouping { } export interface RollupDeleteJobRequest extends RequestBase { +/** Identifier for the job. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface RollupDeleteJobResponse { @@ -17241,7 +20666,12 @@ export interface RollupDeleteJobResponse { export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' export interface RollupGetJobsRequest extends RequestBase { +/** Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. */ id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface RollupGetJobsResponse { @@ -17287,7 +20717,12 @@ export interface RollupGetJobsRollupJobStatus { } export interface RollupGetRollupCapsRequest extends RequestBase { +/** Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. */ id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type RollupGetRollupCapsResponse = Record @@ -17314,7 +20749,12 @@ export interface RollupGetRollupIndexCapsIndexCapabilities { } export interface RollupGetRollupIndexCapsRequest extends RequestBase { +/** Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. */ index: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export type RollupGetRollupIndexCapsResponse = Record @@ -17333,28 +20773,51 @@ export interface RollupGetRollupIndexCapsRollupJobSummaryField { } export interface RollupPutJobRequest extends RequestBase { +/** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. */ id: Id + /** A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. */ cron: string + /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ groups: RollupGroupings + /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. */ index_pattern: string + /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. */ metrics?: RollupFieldMetric[] + /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ page_size: integer + /** The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. */ rollup_index: IndexName + /** Time to wait for the request to complete. */ timeout?: Duration headers?: HttpHeaders + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, cron?: never, groups?: never, index_pattern?: never, metrics?: never, page_size?: never, rollup_index?: never, timeout?: never, headers?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, cron?: never, groups?: never, index_pattern?: never, metrics?: never, page_size?: never, rollup_index?: never, timeout?: never, headers?: never } } export type RollupPutJobResponse = AcknowledgedResponseBase export interface RollupRollupSearchRequest extends RequestBase { +/** A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ index: Indices + /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean + /** Specifies aggregations. */ aggregations?: Record /** @alias aggregations */ + /** Specifies aggregations. */ aggs?: Record + /** Specifies a DSL query that is subject to some limitations. */ query?: QueryDslQueryContainer + /** Must be zero if set, as rollups work on pre-aggregated data. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, rest_total_hits_as_int?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, rest_total_hits_as_int?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, size?: never } } export interface RollupRollupSearchResponse> { @@ -17367,7 +20830,12 @@ export interface RollupRollupSearchResponse export interface SearchApplicationListRequest extends RequestBase { +/** Query in the Lucene query string syntax. */ q?: string + /** Starting offset. */ from?: integer + /** Specifies a max number of results to get. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { q?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { q?: never, from?: never, size?: never } } export interface SearchApplicationListResponse { @@ -17445,10 +20947,17 @@ export interface SearchApplicationListResponse { } export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { +/** The name of the behavioral analytics collection. */ collection_name: Name + /** The analytics event type. */ event_type: SearchApplicationEventType + /** Whether the response type has to include more details */ debug?: boolean payload?: any + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { collection_name?: never, event_type?: never, debug?: never, payload?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { collection_name?: never, event_type?: never, debug?: never, payload?: never } } export interface SearchApplicationPostBehavioralAnalyticsEventResponse { @@ -17457,9 +20966,15 @@ export interface SearchApplicationPostBehavioralAnalyticsEventResponse { } export interface SearchApplicationPutRequest extends RequestBase { +/** The name of the search application to be created or updated. */ name: Name + /** If `true`, this request cannot replace or update existing Search Applications. */ create?: boolean search_application?: SearchApplicationSearchApplicationParameters + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, search_application?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, search_application?: never } } export interface SearchApplicationPutResponse { @@ -17471,23 +20986,40 @@ export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResp } export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { +/** The name of the analytics collection to be created or updated. */ name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase export interface SearchApplicationRenderQueryRequest extends RequestBase { +/** The name of the search application to render teh query for. */ name: Name params?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, params?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, params?: never } } export interface SearchApplicationRenderQueryResponse { } export interface SearchApplicationSearchRequest extends RequestBase { +/** The name of the search application to be searched. */ name: Name + /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean + /** Query parameters specific to this request, which will override any defaults specified in the template. */ params?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, typed_keys?: never, params?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, typed_keys?: never, params?: never } } export type SearchApplicationSearchResponse> = SearchResponseBody @@ -17499,8 +21031,13 @@ export interface SearchableSnapshotsCacheStatsNode { } export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { +/** The names of the nodes in the cluster to target. */ node_id?: NodeIds master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never } } export interface SearchableSnapshotsCacheStatsResponse { @@ -17519,10 +21056,18 @@ export interface SearchableSnapshotsCacheStatsShared { } export interface SearchableSnapshotsClearCacheRequest extends RequestBase { +/** A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). */ index?: Indices + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_unavailable?: never } } export type SearchableSnapshotsClearCacheResponse = any @@ -17534,15 +21079,28 @@ export interface SearchableSnapshotsMountMountedSnapshot { } export interface SearchableSnapshotsMountRequest extends RequestBase { +/** The name of the repository containing the snapshot of the index to mount. */ repository: Name + /** The name of the snapshot of the index to mount. */ snapshot: Name + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** If true, the request blocks until the operation is complete. */ wait_for_completion?: boolean + /** The mount option for the searchable snapshot index. */ storage?: string + /** The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. */ index: IndexName + /** The name of the index that will be created. */ renamed_index?: IndexName + /** The settings that should be added to the index when it is mounted. */ index_settings?: Record + /** The names of settings that should be removed from the index when it is mounted. */ ignore_index_settings?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, storage?: never, index?: never, renamed_index?: never, index_settings?: never, ignore_index_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, storage?: never, index?: never, renamed_index?: never, index_settings?: never, ignore_index_settings?: never } } export interface SearchableSnapshotsMountResponse { @@ -17550,8 +21108,14 @@ export interface SearchableSnapshotsMountResponse { } export interface SearchableSnapshotsStatsRequest extends RequestBase { +/** A comma-separated list of data streams and indices to retrieve statistics for. */ index?: Indices + /** Return stats aggregated at cluster, index or shard level */ level?: SearchableSnapshotsStatsLevel + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, level?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, level?: never } } export interface SearchableSnapshotsStatsResponse { @@ -17800,10 +21364,18 @@ export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { } export interface SecurityActivateUserProfileRequest extends RequestBase { +/** The user's Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. */ access_token?: string + /** The type of grant. */ grant_type: SecurityGrantType + /** The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ password?: string + /** The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ username?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } } export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata @@ -17814,6 +21386,10 @@ export interface SecurityAuthenticateAuthenticateApiKey { } export interface SecurityAuthenticateRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityAuthenticateResponse { @@ -17836,8 +21412,14 @@ export interface SecurityAuthenticateToken { } export interface SecurityBulkDeleteRoleRequest extends RequestBase { +/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** An array of role names to delete */ names: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, names?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, names?: never } } export interface SecurityBulkDeleteRoleResponse { @@ -17847,8 +21429,14 @@ export interface SecurityBulkDeleteRoleResponse { } export interface SecurityBulkPutRoleRequest extends RequestBase { +/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** A dictionary of role name to RoleDescriptor objects to add or update */ roles: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, roles?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, roles?: never } } export interface SecurityBulkPutRoleResponse { @@ -17859,10 +21447,18 @@ export interface SecurityBulkPutRoleResponse { } export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { +/** Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. */ expiration?: Duration + /** The API key identifiers. */ ids: string | string[] + /** Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. */ metadata?: Metadata + /** The role descriptors to assign to the API keys. An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } } export interface SecurityBulkUpdateApiKeysResponse { @@ -17872,17 +21468,30 @@ export interface SecurityBulkUpdateApiKeysResponse { } export interface SecurityChangePasswordRequest extends RequestBase { +/** The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. */ username?: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** The new password value. Passwords must be at least 6 characters long. */ password?: Password + /** A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. */ password_hash?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } } export interface SecurityChangePasswordResponse { } export interface SecurityClearApiKeyCacheRequest extends RequestBase { +/** Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. */ ids: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ids?: never } } export interface SecurityClearApiKeyCacheResponse { @@ -17892,7 +21501,12 @@ export interface SecurityClearApiKeyCacheResponse { } export interface SecurityClearCachedPrivilegesRequest extends RequestBase { +/** A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. */ application: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never } } export interface SecurityClearCachedPrivilegesResponse { @@ -17902,8 +21516,14 @@ export interface SecurityClearCachedPrivilegesResponse { } export interface SecurityClearCachedRealmsRequest extends RequestBase { +/** A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. */ realms: Names + /** A comma-separated list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. */ usernames?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realms?: never, usernames?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realms?: never, usernames?: never } } export interface SecurityClearCachedRealmsResponse { @@ -17913,7 +21533,12 @@ export interface SecurityClearCachedRealmsResponse { } export interface SecurityClearCachedRolesRequest extends RequestBase { +/** A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. */ name: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export interface SecurityClearCachedRolesResponse { @@ -17923,9 +21548,16 @@ export interface SecurityClearCachedRolesResponse { } export interface SecurityClearCachedServiceTokensRequest extends RequestBase { +/** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace + /** The name of the service, which must be unique within its namespace. */ service: Service + /** A comma-separated list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. */ name: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never } } export interface SecurityClearCachedServiceTokensResponse { @@ -17935,11 +21567,20 @@ export interface SecurityClearCachedServiceTokensResponse { } export interface SecurityCreateApiKeyRequest extends RequestBase { +/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** The expiration time for the API key. By default, API keys never expire. */ expiration?: Duration + /** A name for the API key. */ name?: Name + /** An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ role_descriptors?: Record + /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, expiration?: never, name?: never, role_descriptors?: never, metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, expiration?: never, name?: never, role_descriptors?: never, metadata?: never } } export interface SecurityCreateApiKeyResponse { @@ -17951,10 +21592,18 @@ export interface SecurityCreateApiKeyResponse { } export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { +/** The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. NOTE: No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ access: SecurityAccess + /** Expiration time for the API key. By default, API keys never expire. */ expiration?: Duration + /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata + /** Specifies the name for this API key. */ name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never } } export interface SecurityCreateCrossClusterApiKeyResponse { @@ -17966,10 +21615,18 @@ export interface SecurityCreateCrossClusterApiKeyResponse { } export interface SecurityCreateServiceTokenRequest extends RequestBase { +/** The name of the namespace, which is a top-level grouping of service accounts. */ namespace: Namespace + /** The name of the service. */ service: Service + /** The name for the service account token. If omitted, a random name will be generated. Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ name?: Name + /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } } export interface SecurityCreateServiceTokenResponse { @@ -18003,7 +21660,12 @@ export interface SecurityDelegatePkiAuthenticationRealm { } export interface SecurityDelegatePkiRequest extends RequestBase { +/** The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ x509_certificate_chain: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { x509_certificate_chain?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { x509_certificate_chain?: never } } export interface SecurityDelegatePkiResponse { @@ -18018,16 +21680,29 @@ export interface SecurityDeletePrivilegesFoundStatus { } export interface SecurityDeletePrivilegesRequest extends RequestBase { +/** The name of the application. Application privileges are always associated with exactly one application. */ application: Name + /** The name of the privilege. */ name: Names + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never, name?: never, refresh?: never } } export type SecurityDeletePrivilegesResponse = Record> export interface SecurityDeleteRoleRequest extends RequestBase { +/** The name of the role. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never } } export interface SecurityDeleteRoleResponse { @@ -18035,8 +21710,14 @@ export interface SecurityDeleteRoleResponse { } export interface SecurityDeleteRoleMappingRequest extends RequestBase { +/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never } } export interface SecurityDeleteRoleMappingResponse { @@ -18044,10 +21725,18 @@ export interface SecurityDeleteRoleMappingResponse { } export interface SecurityDeleteServiceTokenRequest extends RequestBase { +/** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace + /** The service name. */ service: Service + /** The name of the service account token. */ name: Name + /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } } export interface SecurityDeleteServiceTokenResponse { @@ -18055,8 +21744,14 @@ export interface SecurityDeleteServiceTokenResponse { } export interface SecurityDeleteUserRequest extends RequestBase { +/** An identifier for the user. */ username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } } export interface SecurityDeleteUserResponse { @@ -18064,36 +21759,64 @@ export interface SecurityDeleteUserResponse { } export interface SecurityDisableUserRequest extends RequestBase { +/** An identifier for the user. */ username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } } export interface SecurityDisableUserResponse { } export interface SecurityDisableUserProfileRequest extends RequestBase { +/** Unique identifier for the user profile. */ uid: SecurityUserProfileId + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, refresh?: never } } export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase export interface SecurityEnableUserRequest extends RequestBase { +/** An identifier for the user. */ username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } } export interface SecurityEnableUserResponse { } export interface SecurityEnableUserProfileRequest extends RequestBase { +/** A unique identifier for the user profile. */ uid: SecurityUserProfileId + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, refresh?: never } } export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase export interface SecurityEnrollKibanaRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityEnrollKibanaResponse { @@ -18107,6 +21830,10 @@ export interface SecurityEnrollKibanaToken { } export interface SecurityEnrollNodeRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityEnrollNodeResponse { @@ -18119,14 +21846,26 @@ export interface SecurityEnrollNodeResponse { } export interface SecurityGetApiKeyRequest extends RequestBase { +/** An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. */ id?: Id + /** An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. */ name?: Name + /** A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ owner?: boolean + /** The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ realm_name?: Name + /** The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ username?: Username + /** Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. */ with_limited_by?: boolean + /** A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. */ active_only?: boolean + /** Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. */ with_profile_uid?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, name?: never, owner?: never, realm_name?: never, username?: never, with_limited_by?: never, active_only?: never, with_profile_uid?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, name?: never, owner?: never, realm_name?: never, username?: never, with_limited_by?: never, active_only?: never, with_profile_uid?: never } } export interface SecurityGetApiKeyResponse { @@ -18134,6 +21873,10 @@ export interface SecurityGetApiKeyResponse { } export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityGetBuiltinPrivilegesResponse { @@ -18143,14 +21886,25 @@ export interface SecurityGetBuiltinPrivilegesResponse { } export interface SecurityGetPrivilegesRequest extends RequestBase { +/** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name + /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never, name?: never } } export type SecurityGetPrivilegesResponse = Record> export interface SecurityGetRoleRequest extends RequestBase { +/** The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about all roles. */ name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SecurityGetRoleResponse = Record @@ -18169,14 +21923,25 @@ export interface SecurityGetRoleRole { } export interface SecurityGetRoleMappingRequest extends RequestBase { +/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SecurityGetRoleMappingResponse = Record export interface SecurityGetServiceAccountsRequest extends RequestBase { +/** The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. */ namespace?: Namespace + /** The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ service?: Service + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never } } export type SecurityGetServiceAccountsResponse = Record @@ -18195,8 +21960,14 @@ export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { } export interface SecurityGetServiceCredentialsRequest extends RequestBase { +/** The name of the namespace. */ namespace: Namespace + /** The service name. */ service: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never } } export interface SecurityGetServiceCredentialsResponse { @@ -18207,7 +21978,12 @@ export interface SecurityGetServiceCredentialsResponse { } export interface SecurityGetSettingsRequest extends RequestBase { +/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface SecurityGetSettingsResponse { @@ -18231,12 +22007,22 @@ export interface SecurityGetTokenAuthenticationProvider { } export interface SecurityGetTokenRequest extends RequestBase { +/** The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ grant_type?: SecurityGetTokenAccessTokenGrantType + /** The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ scope?: string + /** The user's password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ password?: Password + /** The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ kerberos_ticket?: string + /** The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ refresh_token?: string + /** The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } } export interface SecurityGetTokenResponse { @@ -18255,16 +22041,28 @@ export interface SecurityGetTokenUserRealm { } export interface SecurityGetUserRequest extends RequestBase { +/** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ username?: Username | Username[] + /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, with_profile_uid?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, with_profile_uid?: never } } export type SecurityGetUserResponse = Record export interface SecurityGetUserPrivilegesRequest extends RequestBase { +/** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name + /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ priviledge?: Name username?: Name | null + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never, priviledge?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never, priviledge?: never, username?: never } } export interface SecurityGetUserPrivilegesResponse { @@ -18281,8 +22079,14 @@ export interface SecurityGetUserProfileGetUserProfileErrors { } export interface SecurityGetUserProfileRequest extends RequestBase { +/** A unique identifier for the user profile. */ uid: SecurityUserProfileId | SecurityUserProfileId[] + /** A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. */ data?: string | string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, data?: never } } export interface SecurityGetUserProfileResponse { @@ -18300,12 +22104,22 @@ export interface SecurityGrantApiKeyGrantApiKey { } export interface SecurityGrantApiKeyRequest extends RequestBase { +/** The API key. */ api_key: SecurityGrantApiKeyGrantApiKey + /** The type of grant. Supported grant types are: `access_token`, `password`. */ grant_type: SecurityGrantApiKeyApiKeyGrantType + /** The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. */ access_token?: string + /** The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ username?: Username + /** The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ password?: Password + /** The name of the user to be impersonated. */ run_as?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } } export interface SecurityGrantApiKeyResponse { @@ -18333,10 +22147,16 @@ export interface SecurityHasPrivilegesIndexPrivilegesCheck { export type SecurityHasPrivilegesPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { +/** Username */ user?: Name application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + /** A list of the cluster privileges that you want to check. */ cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { user?: never, application?: never, cluster?: never, index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { user?: never, application?: never, cluster?: never, index?: never } } export type SecurityHasPrivilegesResourcePrivileges = Record @@ -18361,8 +22181,14 @@ export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { } export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { +/** A list of profile IDs. The privileges are checked for associated users of the profiles. */ uids: SecurityUserProfileId[] + /** An object containing all the privileges to be checked. */ privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uids?: never, privileges?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uids?: never, privileges?: never } } export interface SecurityHasPrivilegesUserProfileResponse { @@ -18372,11 +22198,20 @@ export interface SecurityHasPrivilegesUserProfileResponse { export interface SecurityInvalidateApiKeyRequest extends RequestBase { id?: Id + /** A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ ids?: Id[] + /** An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ name?: Name + /** Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ owner?: boolean + /** The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ realm_name?: string + /** The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } } export interface SecurityInvalidateApiKeyResponse { @@ -18387,10 +22222,18 @@ export interface SecurityInvalidateApiKeyResponse { } export interface SecurityInvalidateTokenRequest extends RequestBase { +/** An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ token?: string + /** A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ refresh_token?: string + /** The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. */ realm_name?: Name + /** The username of a user. This parameter cannot be used with either `refresh_token` or `token`. */ username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } } export interface SecurityInvalidateTokenResponse { @@ -18401,10 +22244,18 @@ export interface SecurityInvalidateTokenResponse { } export interface SecurityOidcAuthenticateRequest extends RequestBase { +/** Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ nonce: string + /** The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. */ realm?: string + /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ redirect_uri: string + /** Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ state: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } } export interface SecurityOidcAuthenticateResponse { @@ -18415,8 +22266,14 @@ export interface SecurityOidcAuthenticateResponse { } export interface SecurityOidcLogoutRequest extends RequestBase { +/** The access token to be invalidated. */ access_token: string + /** The refresh token to be invalidated. */ refresh_token?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { access_token?: never, refresh_token?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { access_token?: never, refresh_token?: never } } export interface SecurityOidcLogoutResponse { @@ -18424,11 +22281,20 @@ export interface SecurityOidcLogoutResponse { } export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { +/** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when *realm* is specified. One of *realm* or *iss* is required. */ iss?: string + /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. This parameter is not valid when *realm* is specified. */ login_hint?: string + /** The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ nonce?: string + /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when *iss* is specified. One of *realm* or *iss* is required. */ realm?: string + /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ state?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } } export interface SecurityOidcPrepareAuthenticationResponse { @@ -18446,25 +22312,46 @@ export interface SecurityPutPrivilegesActions { } export interface SecurityPutPrivilegesRequest extends RequestBase { +/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh privileges?: Record> + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, privileges?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, privileges?: never } } export type SecurityPutPrivilegesResponse = Record> export interface SecurityPutRoleRequest extends RequestBase { +/** The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** A list of application privilege entries. */ applications?: SecurityApplicationPrivileges[] + /** A list of cluster privileges. These privileges define the cluster-level actions for users with this role. */ cluster?: SecurityClusterPrivilege[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. */ global?: Record + /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[] + /** A list of remote indices permissions entries. NOTE: Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of remote cluster permissions entries. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. */ metadata?: Metadata + /** A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ run_as?: string[] + /** Optional description of the role descriptor */ description?: string + /** Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. */ transient_metadata?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never, applications?: never, cluster?: never, global?: never, indices?: never, remote_indices?: never, remote_cluster?: never, metadata?: never, run_as?: never, description?: never, transient_metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never, applications?: never, cluster?: never, global?: never, indices?: never, remote_indices?: never, remote_cluster?: never, metadata?: never, run_as?: never, description?: never, transient_metadata?: never } } export interface SecurityPutRoleResponse { @@ -18472,14 +22359,25 @@ export interface SecurityPutRoleResponse { } export interface SecurityPutRoleMappingRequest extends RequestBase { +/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** Mappings that have `enabled` set to `false` are ignored when role mapping is performed. */ enabled?: boolean + /** Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. */ roles?: string[] + /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. */ role_templates?: SecurityRoleTemplate[] + /** The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. */ rules?: SecurityRoleMappingRule run_as?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never, enabled?: never, metadata?: never, roles?: never, role_templates?: never, rules?: never, run_as?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never, enabled?: never, metadata?: never, roles?: never, role_templates?: never, rules?: never, run_as?: never } } export interface SecurityPutRoleMappingResponse { @@ -18488,15 +22386,28 @@ export interface SecurityPutRoleMappingResponse { } export interface SecurityPutUserRequest extends RequestBase { +/** An identifier for the user. NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. */ username: Username + /** Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. */ refresh?: Refresh + /** The email of the user. */ email?: string | null + /** The full name of the user. */ full_name?: string | null + /** Arbitrary metadata that you want to associate with the user. */ metadata?: Metadata + /** The user's password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ password?: Password + /** A hash of the user's password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ password_hash?: string + /** A set of roles the user has. The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`). */ roles?: string[] + /** Specifies whether the user is enabled. */ enabled?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never, email?: never, full_name?: never, metadata?: never, password?: never, password_hash?: never, roles?: never, enabled?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never, email?: never, full_name?: never, metadata?: never, password?: never, password_hash?: never, roles?: never, enabled?: never } } export interface SecurityPutUserResponse { @@ -18542,17 +22453,31 @@ export interface SecurityQueryApiKeysApiKeyQueryContainer { } export interface SecurityQueryApiKeysRequest extends RequestBase { +/** Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ with_limited_by?: boolean + /** Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ with_profile_uid?: boolean + /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean + /** Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. */ aggregations?: Record /** @alias aggregations */ + /** Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. */ aggs?: Record + /** A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. NOTE: The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. */ query?: SecurityQueryApiKeysApiKeyQueryContainer + /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ from?: integer + /** The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort + /** The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ size?: integer + /** The search after definition. */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { with_limited_by?: never, with_profile_uid?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { with_limited_by?: never, with_profile_uid?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } } export interface SecurityQueryApiKeysResponse { @@ -18568,11 +22493,20 @@ export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { } export interface SecurityQueryRoleRequest extends RequestBase { +/** A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. */ query?: SecurityQueryRoleRoleQueryContainer + /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ from?: integer + /** The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort + /** The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ size?: integer + /** The search after definition. */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { query?: never, from?: never, sort?: never, size?: never, search_after?: never } } export interface SecurityQueryRoleResponse { @@ -18600,12 +22534,22 @@ export interface SecurityQueryUserQueryUser extends SecurityUser { } export interface SecurityQueryUserRequest extends RequestBase { +/** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean + /** A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ query?: SecurityQueryUserUserQueryContainer + /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ from?: integer + /** The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort + /** The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ size?: integer + /** The search after definition */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { with_profile_uid?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { with_profile_uid?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } } export interface SecurityQueryUserResponse { @@ -18629,9 +22573,16 @@ export interface SecurityQueryUserUserQueryContainer { } export interface SecuritySamlAuthenticateRequest extends RequestBase { +/** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ content: string + /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids + /** The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. */ realm?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { content?: never, ids?: never, realm?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { content?: never, ids?: never, realm?: never } } export interface SecuritySamlAuthenticateResponse { @@ -18643,18 +22594,33 @@ export interface SecuritySamlAuthenticateResponse { } export interface SecuritySamlCompleteLogoutRequest extends RequestBase { +/** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ realm: string + /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids + /** If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. */ query_string?: string + /** If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. */ content?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realm?: never, ids?: never, query_string?: never, content?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realm?: never, ids?: never, query_string?: never, content?: never } } export type SecuritySamlCompleteLogoutResponse = boolean export interface SecuritySamlInvalidateRequest extends RequestBase { +/** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ acs?: string + /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. */ query_string: string + /** The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. */ realm?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acs?: never, query_string?: never, realm?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acs?: never, query_string?: never, realm?: never } } export interface SecuritySamlInvalidateResponse { @@ -18664,8 +22630,14 @@ export interface SecuritySamlInvalidateResponse { } export interface SecuritySamlLogoutRequest extends RequestBase { +/** The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ token: string + /** The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. */ refresh_token?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never } } export interface SecuritySamlLogoutResponse { @@ -18673,9 +22645,16 @@ export interface SecuritySamlLogoutResponse { } export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { +/** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ acs?: string + /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. */ realm?: string + /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. */ relay_state?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } } export interface SecuritySamlPrepareAuthenticationResponse { @@ -18685,7 +22664,12 @@ export interface SecuritySamlPrepareAuthenticationResponse { } export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { +/** The name of the SAML realm in Elasticsearch. */ realm_name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realm_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realm_name?: never } } export interface SecuritySamlServiceProviderMetadataResponse { @@ -18698,10 +22682,18 @@ export interface SecuritySuggestUserProfilesHint { } export interface SecuritySuggestUserProfilesRequest extends RequestBase { +/** A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`. */ name?: string + /** The number of profiles to return. */ size?: long + /** A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. */ data?: string | string[] + /** Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ hint?: SecuritySuggestUserProfilesHint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } } export interface SecuritySuggestUserProfilesResponse { @@ -18716,10 +22708,18 @@ export interface SecuritySuggestUserProfilesTotalUserProfiles { } export interface SecurityUpdateApiKeyRequest extends RequestBase { +/** The ID of the API key to update. */ id: Id + /** The role descriptors to assign to this API key. The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record + /** Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. */ metadata?: Metadata + /** The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. */ expiration?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } } export interface SecurityUpdateApiKeyResponse { @@ -18727,10 +22727,18 @@ export interface SecurityUpdateApiKeyResponse { } export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { +/** The ID of the cross-cluster API key to update. */ id: Id + /** The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. */ access: SecurityAccess + /** The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. */ expiration?: Duration + /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. */ metadata?: Metadata + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } } export interface SecurityUpdateCrossClusterApiKeyResponse { @@ -18738,11 +22746,20 @@ export interface SecurityUpdateCrossClusterApiKeyResponse { } export interface SecurityUpdateSettingsRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security?: SecuritySecuritySettings + /** Settings for the index used to store profile information. */ 'security-profile'?: SecuritySecuritySettings + /** Settings for the index used to store tokens. */ 'security-tokens'?: SecuritySecuritySettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, security?: never, 'security-profile'?: never, 'security-tokens'?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, security?: never, 'security-profile'?: never, 'security-tokens'?: never } } export interface SecurityUpdateSettingsResponse { @@ -18750,12 +22767,22 @@ export interface SecurityUpdateSettingsResponse { } export interface SecurityUpdateUserProfileDataRequest extends RequestBase { +/** A unique identifier for the user profile. */ uid: SecurityUserProfileId + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. */ refresh?: Refresh + /** Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ labels?: Record + /** Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. */ data?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } } export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase @@ -18763,9 +22790,16 @@ export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase export type ShutdownType = 'restart' | 'remove' | 'replace' export interface ShutdownDeleteNodeRequest extends RequestBase { +/** The node id of node to be removed from the shutdown state */ node_id: NodeId + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } } export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase @@ -18790,8 +22824,14 @@ export interface ShutdownGetNodePluginsStatus { } export interface ShutdownGetNodeRequest extends RequestBase { +/** Which node for which to retrieve the shutdown status */ node_id?: NodeIds + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never } } export interface ShutdownGetNodeResponse { @@ -18807,25 +22847,46 @@ export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'sta export type ShutdownGetNodeShutdownType = 'remove' | 'restart' export interface ShutdownPutNodeRequest extends RequestBase { +/** The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. */ node_id: NodeId + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit + /** Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ type: ShutdownType + /** A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. */ reason: string + /** Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ allocation_delay?: string + /** Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ target_node_name?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } } export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SimulateIngestRequest extends RequestBase { +/** The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ index?: IndexName + /** The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. */ pipeline?: PipelineName + /** Sample documents to test in the pipeline. */ docs: IngestDocument[] + /** A map of component template names to substitute component template definition objects. */ component_template_substitutions?: Record + /** A map of index template names to substitute index template definition objects. */ index_template_subtitutions?: Record mapping_addition?: MappingTypeMapping + /** Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline_substitutions?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_subtitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_subtitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } } export interface SimulateIngestResponse { @@ -18898,17 +22959,31 @@ export interface SlmStatistics { } export interface SlmDeleteLifecycleRequest extends RequestBase { +/** The id of the snapshot lifecycle policy to remove */ policy_id: Name + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } } export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { +/** The id of the snapshot lifecycle policy to be executed */ policy_id: Name + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } } export interface SlmExecuteLifecycleResponse { @@ -18916,23 +22991,42 @@ export interface SlmExecuteLifecycleResponse { } export interface SlmExecuteRetentionRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { +/** Comma-separated list of snapshot lifecycle policies to retrieve */ policy_id?: Names + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } } export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { +/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export interface SlmGetStatsResponse { @@ -18949,8 +23043,14 @@ export interface SlmGetStatsResponse { } export interface SlmGetStatusRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export interface SlmGetStatusResponse { @@ -18958,28 +23058,52 @@ export interface SlmGetStatusResponse { } export interface SlmPutLifecycleRequest extends RequestBase { +/** The identifier for the snapshot lifecycle policy you want to create or update. */ policy_id: Name + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** Configuration for each snapshot created by the policy. */ config?: SlmConfiguration + /** Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. */ name?: Name + /** Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. */ repository?: string + /** Retention rules used to retain and delete snapshots created by the policy. */ retention?: SlmRetention + /** Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. */ schedule?: WatcherCronExpression + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never, config?: never, name?: never, repository?: never, retention?: never, schedule?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never, config?: never, name?: never, repository?: never, retention?: never, schedule?: never } } export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type SlmStopResponse = AcknowledgedResponseBase @@ -19195,9 +23319,16 @@ export interface SnapshotCleanupRepositoryCleanupRepositoryResults { } export interface SnapshotCleanupRepositoryRequest extends RequestBase { +/** The name of the snapshot repository to clean up. */ name: Name + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` */ master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export interface SnapshotCleanupRepositoryResponse { @@ -19205,28 +23336,53 @@ export interface SnapshotCleanupRepositoryResponse { } export interface SnapshotCloneRequest extends RequestBase { +/** The name of the snapshot repository that both source and target snapshot belong to. */ repository: Name + /** The source snapshot name. */ snapshot: Name + /** The target snapshot name. */ target_snapshot: Name + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period of time to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** A comma-separated list of indices to include in the snapshot. Multi-target syntax is supported. */ indices: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, timeout?: never, indices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, timeout?: never, indices?: never } } export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { +/** The name of the repository for the snapshot. */ repository: Name + /** The name of the snapshot. It supportes date math. It must be unique in the repository. */ snapshot: Name + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. */ wait_for_completion?: boolean + /** Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). */ feature_states?: string[] + /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. */ ignore_unavailable?: boolean + /** If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ include_global_state?: boolean + /** A comma-separated list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. You can't use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. */ indices?: Indices + /** Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. */ metadata?: Metadata + /** If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, expand_wildcards?: never, feature_states?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, metadata?: never, partial?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, expand_wildcards?: never, feature_states?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, metadata?: never, partial?: never } } export interface SnapshotCreateResponse { @@ -19235,47 +23391,88 @@ export interface SnapshotCreateResponse { } export interface SnapshotCreateRepositoryRequest extends RequestBase { +/** The name of the snapshot repository to register or update. */ name: Name + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. */ verify?: boolean repository?: SnapshotRepository + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, verify?: never, repository?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, verify?: never, repository?: never } } export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { +/** The name of the repository to delete a snapshot from. */ repository: Name + /** A comma-separated list of snapshot names to delete. It also accepts wildcards (`*`). */ snapshot: Name + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never } } export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { +/** The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. */ name: Names + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase export interface SnapshotGetRequest extends RequestBase { +/** A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. */ repository: Name + /** A comma-separated list of snapshot names to retrieve Wildcards (`*`) are supported. * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * To get information about any snapshots that are currently running, use `_current`. */ snapshot: Names + /** An offset identifier to start pagination from as returned by the next field in the response body. */ after?: string + /** The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. */ from_sort_value?: string + /** If `false`, the request returns an error for any snapshots that are unavailable. */ ignore_unavailable?: boolean + /** If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. */ index_details?: boolean + /** If `true`, the response includes the name of each index in each snapshot. */ index_names?: boolean + /** If `true`, the response includes the repository name in each snapshot. */ include_repository?: boolean + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. */ order?: SortOrder + /** Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. */ offset?: integer + /** The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. */ size?: integer + /** Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. */ slm_policy_filter?: Name + /** The sort order for the result. The default behavior is sorting by snapshot start time stamp. */ sort?: SnapshotSnapshotSort + /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, verbose?: never } } export interface SnapshotGetResponse { @@ -19293,42 +23490,204 @@ export interface SnapshotGetSnapshotResponseItem { } export interface SnapshotGetRepositoryRequest extends RequestBase { +/** A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. */ name?: Names + /** If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. */ local?: boolean + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } } export type SnapshotGetRepositoryResponse = Record +export interface SnapshotRepositoryAnalyzeBlobDetails { + name: string + overwritten: boolean + read_early: boolean + read_end: long + read_start: long + reads: SnapshotRepositoryAnalyzeReadBlobDetails + size: ByteSize + size_bytes: long +} + +export interface SnapshotRepositoryAnalyzeDetailsInfo { + blob: SnapshotRepositoryAnalyzeBlobDetails + overwrite_elapsed?: Duration + overwrite_elapsed_nanos?: DurationValue + write_elapsed: Duration + write_elapsed_nanos: DurationValue + write_throttled: Duration + write_throttled_nanos: DurationValue + writer_node: SnapshotRepositoryAnalyzeNodeInfo +} + +export interface SnapshotRepositoryAnalyzeNodeInfo { + id: Id + name: Name +} + +export interface SnapshotRepositoryAnalyzeReadBlobDetails { + before_write_complete?: boolean + elapsed?: Duration + elapsed_nanos?: DurationValue + first_byte_time?: Duration + first_byte_time_nanos: DurationValue + found: boolean + node: SnapshotRepositoryAnalyzeNodeInfo + throttled?: Duration + throttled_nanos?: DurationValue +} + +export interface SnapshotRepositoryAnalyzeReadSummaryInfo { + count: integer + max_wait: Duration + max_wait_nanos: DurationValue + total_elapsed: Duration + total_elapsed_nanos: DurationValue + total_size: ByteSize + total_size_bytes: long + total_throttled: Duration + total_throttled_nanos: DurationValue + total_wait: Duration + total_wait_nanos: DurationValue +} + +export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { +/** The name of the repository. */ + name: Name + /** The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. */ + blob_count?: integer + /** The number of operations to run concurrently during the test. */ + concurrency?: integer + /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. */ + detailed?: boolean + /** The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. */ + early_read_node_count?: integer + /** The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. */ + max_blob_size?: ByteSize + /** An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. */ + max_total_data_size?: ByteSize + /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. */ + rare_action_probability?: double + /** Indicates whether to rarely cancel writes before they complete. */ + rarely_abort_writes?: boolean + /** The number of nodes on which to read a blob after writing. */ + read_node_count?: integer + /** The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. */ + register_operation_count?: integer + /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. */ + seed?: integer + /** The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } +} + +export interface SnapshotRepositoryAnalyzeResponse { + blob_count: integer + blob_path: string + concurrency: integer + coordinating_node: SnapshotRepositoryAnalyzeNodeInfo + delete_elapsed: Duration + delete_elapsed_nanos: DurationValue + details: SnapshotRepositoryAnalyzeDetailsInfo + early_read_node_count: integer + issues_detected: string[] + listing_elapsed: Duration + listing_elapsed_nanos: DurationValue + max_blob_size: ByteSize + max_blob_size_bytes: long + max_total_data_size: ByteSize + max_total_data_size_bytes: long + rare_action_probability: double + read_node_count: integer + repository: string + seed: long + summary: SnapshotRepositoryAnalyzeSummaryInfo +} + +export interface SnapshotRepositoryAnalyzeSummaryInfo { + read: SnapshotRepositoryAnalyzeReadSummaryInfo + write: SnapshotRepositoryAnalyzeWriteSummaryInfo +} + +export interface SnapshotRepositoryAnalyzeWriteSummaryInfo { + count: integer + total_elapsed: Duration + total_elapsed_nanos: DurationValue + total_size: ByteSize + total_size_bytes: long + total_throttled: Duration + total_throttled_nanos: long +} + export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { +/** The name of the snapshot repository. */ name: Names + /** If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. */ blob_thread_pool_concurrency?: integer + /** The maximum number of index snapshots to verify concurrently within each index verification. */ index_snapshot_verification_concurrency?: integer + /** The number of indices to verify concurrently. The default behavior is to use the entire `snapshot_meta` thread pool. */ index_verification_concurrency?: integer + /** If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. */ max_bytes_per_sec?: string + /** The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. If your repository contains more than this number of shard snapshot failures, the verification will fail. */ max_failed_shard_snapshots?: integer + /** The maximum number of snapshot metadata operations to run concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ meta_thread_pool_concurrency?: integer + /** The number of snapshots to verify concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ snapshot_verification_concurrency?: integer + /** Indicates whether to verify the checksum of every data blob in the repository. If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. */ verify_blob_contents?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, blob_thread_pool_concurrency?: never, index_snapshot_verification_concurrency?: never, index_verification_concurrency?: never, max_bytes_per_sec?: never, max_failed_shard_snapshots?: never, meta_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, verify_blob_contents?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, blob_thread_pool_concurrency?: never, index_snapshot_verification_concurrency?: never, index_verification_concurrency?: never, max_bytes_per_sec?: never, max_failed_shard_snapshots?: never, meta_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, verify_blob_contents?: never } } export type SnapshotRepositoryVerifyIntegrityResponse = any export interface SnapshotRestoreRequest extends RequestBase { +/** The name of the repository to restore a snapshot from. */ repository: Name + /** The name of the snapshot to restore. */ snapshot: Name + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. If `false`, the request returns a response when the restore operation initializes. */ wait_for_completion?: boolean + /** The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). */ feature_states?: string[] + /** The index settings to not restore from the snapshot. You can't use this option to ignore `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. */ ignore_index_settings?: string[] + /** If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. */ ignore_unavailable?: boolean + /** If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. */ include_aliases?: boolean + /** If `true`, restore the cluster state. The cluster state includes: * Persistent cluster settings * Index templates * Legacy index templates * Ingest pipelines * Index lifecycle management (ILM) policies * Stored scripts * For snapshots taken after 7.12.0, feature states If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. Use the `feature_states` parameter to configure how feature states are restored. If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. */ include_global_state?: boolean + /** Index settings to add or change in restored indices, including backing indices. You can't use this option to change `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. */ index_settings?: IndicesIndexSettings + /** A comma-separated list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. You can't use this parameter to restore system indices or system data streams. Use `feature_states` instead. */ indices?: Indices + /** If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. */ partial?: boolean + /** A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. */ rename_pattern?: string + /** The rename replacement string that is used with the `rename_pattern`. */ rename_replacement?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, feature_states?: never, ignore_index_settings?: never, ignore_unavailable?: never, include_aliases?: never, include_global_state?: never, index_settings?: never, indices?: never, partial?: never, rename_pattern?: never, rename_replacement?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, feature_states?: never, ignore_index_settings?: never, ignore_unavailable?: never, include_aliases?: never, include_global_state?: never, index_settings?: never, indices?: never, partial?: never, rename_pattern?: never, rename_replacement?: never } } export interface SnapshotRestoreResponse { @@ -19343,10 +23702,18 @@ export interface SnapshotRestoreSnapshotRestore { } export interface SnapshotStatusRequest extends RequestBase { +/** The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified. */ repository?: Name + /** A comma-separated list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. */ snapshot?: Names + /** If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. */ ignore_unavailable?: boolean + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } } export interface SnapshotStatusResponse { @@ -19358,9 +23725,16 @@ export interface SnapshotVerifyRepositoryCompactNodeInfo { } export interface SnapshotVerifyRepositoryRequest extends RequestBase { +/** The name of the snapshot repository to verify. */ name: Name + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export interface SnapshotVerifyRepositoryResponse { @@ -19375,7 +23749,12 @@ export interface SqlColumn { export type SqlRow = any[] export interface SqlClearCursorRequest extends RequestBase { +/** Cursor to clear. */ cursor: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { cursor?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { cursor?: never } } export interface SqlClearCursorResponse { @@ -19383,17 +23762,31 @@ export interface SqlClearCursorResponse { } export interface SqlDeleteAsyncRequest extends RequestBase { +/** The identifier for the search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type SqlDeleteAsyncResponse = AcknowledgedResponseBase export interface SqlGetAsyncRequest extends RequestBase { +/** The identifier for the search. */ id: Id + /** The separator for CSV results. The API supports this parameter only for CSV responses. */ delimiter?: string + /** The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. */ format?: string + /** The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. */ keep_alive?: Duration + /** The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } } export interface SqlGetAsyncResponse { @@ -19406,7 +23799,12 @@ export interface SqlGetAsyncResponse { } export interface SqlGetAsyncStatusRequest extends RequestBase { +/** The identifier for the search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface SqlGetAsyncStatusResponse { @@ -19419,24 +23817,46 @@ export interface SqlGetAsyncStatusResponse { } export interface SqlQueryRequest extends RequestBase { +/** The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ format?: SqlQuerySqlFormat + /** If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. */ allow_partial_search_results?: boolean + /** The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. */ catalog?: string + /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ columnar?: boolean + /** The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. */ cursor?: string + /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer + /** If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ field_multi_value_leniency?: boolean + /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer + /** If `true`, the search can run on frozen indices. */ index_using_frozen?: boolean + /** The retention period for an async or saved synchronous search. */ keep_alive?: Duration + /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ keep_on_completion?: boolean + /** The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ page_timeout?: Duration + /** The values for parameters in the query. */ params?: Record + /** The SQL query to run. */ query?: string + /** The timeout before the request fails. */ request_timeout?: Duration + /** One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone + /** The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async. To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } } export interface SqlQueryResponse { @@ -19451,10 +23871,18 @@ export interface SqlQueryResponse { export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' export interface SqlTranslateRequest extends RequestBase { +/** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer + /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer + /** The SQL query to run. */ query: string + /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fetch_size?: never, filter?: never, query?: never, time_zone?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fetch_size?: never, filter?: never, query?: never, time_zone?: never } } export interface SqlTranslateResponse { @@ -19478,6 +23906,10 @@ export interface SslCertificatesCertificateInformation { } export interface SslCertificatesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export type SslCertificatesResponse = SslCertificatesCertificateInformation[] @@ -19500,22 +23932,40 @@ export interface SynonymsSynonymsUpdateResult { } export interface SynonymsDeleteSynonymRequest extends RequestBase { +/** The synonyms set identifier to delete. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { +/** The ID of the synonym set to update. */ set_id: Id + /** The ID of the synonym rule to delete. */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never } } export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult export interface SynonymsGetSynonymRequest extends RequestBase { +/** The synonyms set identifier to retrieve. */ id: Id + /** The starting offset for query rules to retrieve. */ from?: integer + /** The max number of query rules to retrieve. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, from?: never, size?: never } } export interface SynonymsGetSynonymResponse { @@ -19524,15 +23974,27 @@ export interface SynonymsGetSynonymResponse { } export interface SynonymsGetSynonymRuleRequest extends RequestBase { +/** The ID of the synonym set to retrieve the synonym rule from. */ set_id: Id + /** The ID of the synonym rule to retrieve. */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never } } export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead export interface SynonymsGetSynonymsSetsRequest extends RequestBase { +/** The starting offset for synonyms sets to retrieve. */ from?: integer + /** The maximum number of synonyms sets to retrieve. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never } } export interface SynonymsGetSynonymsSetsResponse { @@ -19546,8 +24008,14 @@ export interface SynonymsGetSynonymsSetsSynonymsSetItem { } export interface SynonymsPutSynonymRequest extends RequestBase { +/** The ID of the synonyms set to be created or updated. */ id: Id + /** The synonym rules definitions for the synonyms set. */ synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, synonyms_set?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, synonyms_set?: never } } export interface SynonymsPutSynonymResponse { @@ -19556,9 +24024,16 @@ export interface SynonymsPutSynonymResponse { } export interface SynonymsPutSynonymRuleRequest extends RequestBase { +/** The ID of the synonym set. */ set_id: Id + /** The ID of the synonym rule to be updated or created. */ rule_id: Id + /** The synonym rule information definition, which must be in Solr format. */ synonyms: SynonymsSynonymString + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never, synonyms?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never, synonyms?: never } } export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult @@ -19605,19 +24080,35 @@ export interface TasksTaskListResponseBase { } export interface TasksCancelRequest extends RequestBase { +/** The task identifier. */ task_id?: TaskId + /** A comma-separated list or wildcard expression of actions that is used to limit the request. */ actions?: string | string[] + /** A comma-separated list of node IDs or names that is used to limit the request. */ nodes?: string[] + /** A parent task ID that is used to limit the tasks. */ parent_task_id?: string + /** If true, the request blocks until all found tasks are complete. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, actions?: never, nodes?: never, parent_task_id?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, actions?: never, nodes?: never, parent_task_id?: never, wait_for_completion?: never } } export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { +/** The task identifier. */ task_id: Id + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, timeout?: never, wait_for_completion?: never } } export interface TasksGetResponse { @@ -19628,13 +24119,24 @@ export interface TasksGetResponse { } export interface TasksListRequest extends RequestBase { +/** A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ actions?: string | string[] + /** If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. */ detailed?: boolean + /** A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. */ group_by?: TasksGroupBy + /** A comma-separated list of node IDs or names that is used to limit the returned information. */ nodes?: NodeIds + /** A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. */ parent_task_id?: Id + /** The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. */ timeout?: Duration + /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, group_by?: never, nodes?: never, parent_task_id?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, group_by?: never, nodes?: never, parent_task_id?: never, timeout?: never, wait_for_completion?: never } } export type TasksListResponse = TasksTaskListResponseBase @@ -19661,20 +24163,38 @@ export interface TextStructureTopHit { } export interface TextStructureFindFieldStructureRequest extends RequestBase { +/** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ column_names?: string + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string + /** The number of documents to include in the structural analysis. The minimum value is 2. */ documents_to_sample?: uint + /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ ecs_compatibility?: TextStructureEcsCompatibilityType + /** If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean + /** The field that should be analyzed. */ field: Field + /** The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern + /** The name of the index that contains the analyzed field. */ index: IndexName + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. */ should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field + /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } } export interface TextStructureFindFieldStructureResponse { @@ -19696,18 +24216,34 @@ export interface TextStructureFindFieldStructureResponse { } export interface TextStructureFindMessageStructureRequest extends RequestBase { +/** If the format is `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string + /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string + /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: TextStructureEcsCompatibilityType + /** If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean + /** The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. */ should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field + /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string + /** The list of messages you want to analyze. */ messages: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, messages?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, messages?: never } } export interface TextStructureFindMessageStructureResponse { @@ -19729,22 +24265,41 @@ export interface TextStructureFindMessageStructureResponse { } export interface TextStructureFindStructureRequest { +/** The text's character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. */ charset?: string + /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string + /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: string + /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ explain?: boolean + /** The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: string + /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern + /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ has_header_row?: boolean + /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ line_merge_size_limit?: uint + /** The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. NOTE: The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ lines_to_sample?: uint + /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string + /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. */ should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. */ timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field + /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string text_files?: TJsonDocument[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { charset?: never, column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, has_header_row?: never, line_merge_size_limit?: never, lines_to_sample?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, text_files?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { charset?: never, column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, has_header_row?: never, line_merge_size_limit?: never, lines_to_sample?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, text_files?: never } } export interface TextStructureFindStructureResponse { @@ -19784,9 +24339,16 @@ export interface TextStructureTestGrokPatternMatchedText { } export interface TextStructureTestGrokPatternRequest extends RequestBase { +/** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. */ ecs_compatibility?: string + /** The Grok pattern to run on the text. */ grok_pattern: GrokPattern + /** The lines of text to run the Grok pattern on. */ text: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ecs_compatibility?: never, grok_pattern?: never, text?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ecs_compatibility?: never, grok_pattern?: never, text?: never } } export interface TextStructureTestGrokPatternResponse { @@ -19850,20 +24412,37 @@ export interface TransformTimeSync { } export interface TransformDeleteTransformRequest extends RequestBase { +/** Identifier for the transform. */ transform_id: Id + /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. */ force?: boolean + /** If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted */ delete_dest_index?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, force?: never, delete_dest_index?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, force?: never, delete_dest_index?: never, timeout?: never } } export type TransformDeleteTransformResponse = AcknowledgedResponseBase export interface TransformGetTransformRequest extends RequestBase { +/** Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. */ transform_id?: Names + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of transforms. */ from?: integer + /** Specifies the maximum number of transforms to obtain. */ size?: integer + /** Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } } export interface TransformGetTransformResponse { @@ -19907,11 +24486,20 @@ export interface TransformGetTransformStatsCheckpointing { } export interface TransformGetTransformStatsRequest extends RequestBase { +/** Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. */ transform_id: Names + /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of transforms. */ from?: long + /** Specifies the maximum number of transforms to obtain. */ size?: long + /** Controls the time to wait for the stats */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, timeout?: never } } export interface TransformGetTransformStatsResponse { @@ -19962,17 +24550,32 @@ export interface TransformGetTransformStatsTransformStatsHealth { } export interface TransformPreviewTransformRequest extends RequestBase { +/** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. */ transform_id?: Id + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The destination for the transform. */ dest?: TransformDestination + /** Free text description of the transform. */ description?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration + /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. */ pivot?: TransformPivot + /** The source of the data for the transform. */ source?: TransformSource + /** Defines optional transform settings. */ settings?: TransformSettings + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer + /** The latest method transforms the data by finding the latest document for each unique key. */ latest?: TransformLatest + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } } export interface TransformPreviewTransformResponse { @@ -19981,69 +24584,131 @@ export interface TransformPreviewTransformResponse { } export interface TransformPutTransformRequest extends RequestBase { +/** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id + /** When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. */ defer_validation?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The destination for the transform. */ dest: TransformDestination + /** Free text description of the transform. */ description?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. */ frequency?: Duration + /** The latest method transforms the data by finding the latest document for each unique key. */ latest?: TransformLatest + /** Defines optional transform metadata. */ _meta?: Metadata + /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. */ pivot?: TransformPivot + /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer + /** Defines optional transform settings. */ settings?: TransformSettings + /** The source of the data for the transform. */ source: TransformSource + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, latest?: never, _meta?: never, pivot?: never, retention_policy?: never, settings?: never, source?: never, sync?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, latest?: never, _meta?: never, pivot?: never, retention_policy?: never, settings?: never, source?: never, sync?: never } } export type TransformPutTransformResponse = AcknowledgedResponseBase export interface TransformResetTransformRequest extends RequestBase { +/** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id + /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. */ force?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, force?: never, timeout?: never } } export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformScheduleNowTransformRequest extends RequestBase { +/** Identifier for the transform. */ transform_id: Id + /** Controls the time to wait for the scheduling to take place */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never } } export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase export interface TransformStartTransformRequest extends RequestBase { +/** Identifier for the transform. */ transform_id: Id + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. */ from?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, from?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never, from?: never } } export type TransformStartTransformResponse = AcknowledgedResponseBase export interface TransformStopTransformRequest extends RequestBase { +/** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. */ transform_id: Name + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** If it is true, the API forcefully stops the transforms. */ force?: boolean + /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. */ timeout?: Duration + /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. */ wait_for_checkpoint?: boolean + /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } } export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { +/** Identifier for the transform. */ transform_id: Id + /** When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. */ defer_validation?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The destination for the transform. */ dest?: TransformDestination + /** Free text description of the transform. */ description?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration + /** Defines optional transform metadata. */ _meta?: Metadata + /** The source of the data for the transform. */ source?: TransformSource + /** Defines optional transform settings. */ settings?: TransformSettings + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer | null + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } } export interface TransformUpdateTransformResponse { @@ -20064,8 +24729,14 @@ export interface TransformUpdateTransformResponse { } export interface TransformUpgradeTransformsRequest extends RequestBase { +/** When true, the request checks for updates but does not run them. */ dry_run?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, timeout?: never } } export interface TransformUpgradeTransformsResponse { @@ -20591,8 +25262,14 @@ export interface WatcherWebhookResult { } export interface WatcherAckWatchRequest extends RequestBase { +/** The watch identifier. */ watch_id: Name + /** A comma-separated list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. */ action_id?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never, action_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never, action_id?: never } } export interface WatcherAckWatchResponse { @@ -20600,7 +25277,12 @@ export interface WatcherAckWatchResponse { } export interface WatcherActivateWatchRequest extends RequestBase { +/** The watch identifier. */ watch_id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never } } export interface WatcherActivateWatchResponse { @@ -20608,7 +25290,12 @@ export interface WatcherActivateWatchResponse { } export interface WatcherDeactivateWatchRequest extends RequestBase { +/** The watch identifier. */ watch_id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never } } export interface WatcherDeactivateWatchResponse { @@ -20616,7 +25303,12 @@ export interface WatcherDeactivateWatchResponse { } export interface WatcherDeleteWatchRequest extends RequestBase { +/** The watch identifier. */ id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface WatcherDeleteWatchResponse { @@ -20626,15 +25318,27 @@ export interface WatcherDeleteWatchResponse { } export interface WatcherExecuteWatchRequest extends RequestBase { +/** The watch identifier. */ id?: Id + /** Defines whether the watch runs in debug mode. */ debug?: boolean + /** Determines how to handle the watch actions as part of the watch execution. */ action_modes?: Record + /** When present, the watch uses this object as a payload instead of executing its own input. */ alternative_input?: Record + /** When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. */ ignore_condition?: boolean + /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. */ record_execution?: boolean simulated_actions?: WatcherSimulatedActions + /** This structure is parsed as the data of the trigger event that will be used during the watch execution. */ trigger_data?: WatcherScheduleTriggerEvent + /** When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. */ watch?: WatcherWatch + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } } export interface WatcherExecuteWatchResponse { @@ -20657,7 +25361,12 @@ export interface WatcherExecuteWatchWatchRecord { } export interface WatcherGetSettingsRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface WatcherGetSettingsResponse { @@ -20665,7 +25374,12 @@ export interface WatcherGetSettingsResponse { } export interface WatcherGetWatchRequest extends RequestBase { +/** The watch identifier. */ id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface WatcherGetWatchResponse { @@ -20679,19 +25393,36 @@ export interface WatcherGetWatchResponse { } export interface WatcherPutWatchRequest extends RequestBase { +/** The identifier for the watch. */ id: Id + /** The initial state of the watch. The default value is `true`, which means the watch is active by default. */ active?: boolean + /** only update the watch if the last operation that has changed the watch has the specified primary term */ if_primary_term?: long + /** only update the watch if the last operation that has changed the watch has the specified sequence number */ if_seq_no?: SequenceNumber + /** Explicit version number for concurrency control */ version?: VersionNumber + /** The list of actions that will be run if the condition matches. */ actions?: Record + /** The condition that defines if the actions should be run. */ condition?: WatcherConditionContainer + /** The input that defines the input that loads the data for the watch. */ input?: WatcherInputContainer + /** Metadata JSON that will be copied into the history entries. */ metadata?: Metadata + /** The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period?: Duration + /** Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period_in_millis?: DurationValue + /** The transform that processes the watch payload to prepare it for the watch actions. */ transform?: TransformContainer + /** The trigger that defines when the watch should run. */ trigger?: WatcherTriggerContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, active?: never, if_primary_term?: never, if_seq_no?: never, version?: never, actions?: never, condition?: never, input?: never, metadata?: never, throttle_period?: never, throttle_period_in_millis?: never, transform?: never, trigger?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, active?: never, if_primary_term?: never, if_seq_no?: never, version?: never, actions?: never, condition?: never, input?: never, metadata?: never, throttle_period?: never, throttle_period_in_millis?: never, transform?: never, trigger?: never } } export interface WatcherPutWatchResponse { @@ -20703,11 +25434,20 @@ export interface WatcherPutWatchResponse { } export interface WatcherQueryWatchesRequest extends RequestBase { +/** The offset from the first result to fetch. It must be non-negative. */ from?: integer + /** The number of hits to return. It must be non-negative. */ size?: integer + /** A query that filters the watches to be returned. */ query?: QueryDslQueryContainer + /** One or more fields used to sort the search results. */ sort?: Sort + /** Retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, query?: never, sort?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, query?: never, sort?: never, search_after?: never } } export interface WatcherQueryWatchesResponse { @@ -20716,14 +25456,25 @@ export interface WatcherQueryWatchesResponse { } export interface WatcherStartRequest extends RequestBase { +/** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export type WatcherStartResponse = AcknowledgedResponseBase export interface WatcherStatsRequest extends RequestBase { +/** Defines which additional metrics are included in the response. */ metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] + /** Defines whether stack traces are generated for each watch that is running. */ emit_stacktraces?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, emit_stacktraces?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, emit_stacktraces?: never } } export interface WatcherStatsResponse { @@ -20759,16 +25510,27 @@ export interface WatcherStatsWatcherNodeStats { export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' export interface WatcherStopRequest extends RequestBase { +/** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export type WatcherStopResponse = AcknowledgedResponseBase export interface WatcherUpdateSettingsRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration 'index.auto_expand_replicas'?: string 'index.number_of_replicas'?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, 'index.auto_expand_replicas'?: never, 'index.number_of_replicas'?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, 'index.auto_expand_replicas'?: never, 'index.number_of_replicas'?: never } } export interface WatcherUpdateSettingsResponse { @@ -20797,7 +25559,6 @@ export interface XpackInfoFeatures { enterprise_search: XpackInfoFeature eql: XpackInfoFeature esql: XpackInfoFeature - frozen_indices: XpackInfoFeature graph: XpackInfoFeature ilm: XpackInfoFeature logstash: XpackInfoFeature @@ -20832,9 +25593,16 @@ export interface XpackInfoNativeCodeInformation { } export interface XpackInfoRequest extends RequestBase { +/** A comma-separated list of the information categories to include in the response. For example, `build,license,features`. */ categories?: XpackInfoXPackCategory[] + /** If this param is used it must be set to true */ accept_enterprise?: boolean + /** Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. */ human?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } } export interface XpackInfoResponse { @@ -20968,10 +25736,6 @@ export interface XpackUsageFlattened extends XpackUsageBase { field_count: integer } -export interface XpackUsageFrozenIndices extends XpackUsageBase { - indices_count: long -} - export interface XpackUsageHealthStatistics extends XpackUsageBase { invocations: XpackUsageInvocations } @@ -20983,7 +25747,7 @@ export interface XpackUsageIlm { export interface XpackUsageIlmPolicyStatistics { indices_managed: integer - phases: IlmPhases + phases: XpackUsagePhases } export interface XpackUsageInvocations { @@ -21095,6 +25859,19 @@ export interface XpackUsageMonitoring extends XpackUsageBase { enabled_exporters: Record } +export interface XpackUsagePhase { + actions: string[] + min_age: DurationValue +} + +export interface XpackUsagePhases { + cold?: XpackUsagePhase + delete?: XpackUsagePhase + frozen?: XpackUsagePhase + hot?: XpackUsagePhase + warm?: XpackUsagePhase +} + export interface XpackUsageQuery { count?: integer failed?: integer @@ -21118,7 +25895,12 @@ export interface XpackUsageRealmCache { } export interface XpackUsageRequest extends RequestBase { +/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface XpackUsageResponse { @@ -21134,7 +25916,6 @@ export interface XpackUsageResponse { enrich?: XpackUsageBase eql: XpackUsageEql flattened?: XpackUsageFlattened - frozen_indices: XpackUsageFrozenIndices graph: XpackUsageBase health_api?: XpackUsageHealthStatistics ilm: XpackUsageIlm From 869174f953e83022c36ff2523d67f6b33be3722f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 28 Jan 2025 12:28:42 -0600 Subject: [PATCH 460/647] Bump version to 9.0.0-alpha.2 (#2574) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 1b7e082f7..759bd0636 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.1", + "version": "9.0.0-alpha.2", "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", From 39b2700adde448f85271c114673c15d4209c2357 Mon Sep 17 00:00:00 2001 From: Jan Calanog Date: Tue, 28 Jan 2025 22:55:26 +0100 Subject: [PATCH 461/647] github-action: Add AsciiDoc freeze warning (#2588) --- .../workflows/comment-on-asciidoc-changes.yml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .github/workflows/comment-on-asciidoc-changes.yml diff --git a/.github/workflows/comment-on-asciidoc-changes.yml b/.github/workflows/comment-on-asciidoc-changes.yml new file mode 100644 index 000000000..f218f0fd1 --- /dev/null +++ b/.github/workflows/comment-on-asciidoc-changes.yml @@ -0,0 +1,20 @@ +--- +name: Comment on PR for .asciidoc changes + +on: + pull_request: + types: + - synchronize + - opened + - reopened + branches: + - main + - master + - "9.0" + +jobs: + comment-on-asciidoc-change: + permissions: + contents: read + pull-requests: write + uses: elastic/docs-builder/.github/workflows/comment-on-asciidoc-changes.yml@main From 6dbf91a9c3f66459d43a190190ee022c13b209b1 Mon Sep 17 00:00:00 2001 From: Jan Calanog Date: Thu, 30 Jan 2025 01:51:52 +0100 Subject: [PATCH 462/647] github-action: Add AsciiDoc freeze warning (#2589) * github-action: Add AsciiDoc freeze warning * github-action: Add AsciiDoc freeze warning --- .github/workflows/comment-on-asciidoc-changes.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/comment-on-asciidoc-changes.yml b/.github/workflows/comment-on-asciidoc-changes.yml index f218f0fd1..8e5f836b1 100644 --- a/.github/workflows/comment-on-asciidoc-changes.yml +++ b/.github/workflows/comment-on-asciidoc-changes.yml @@ -2,7 +2,8 @@ name: Comment on PR for .asciidoc changes on: - pull_request: + # We need to use pull_request_target to be able to comment on PRs from forks + pull_request_target: types: - synchronize - opened From 0ad42ff1a2aa46d9082b4966040f6c93325317c0 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 30 Jan 2025 10:37:53 -0600 Subject: [PATCH 463/647] Drop default 30-second timeout (#2573) --- docs/basic-config.asciidoc | 3 +-- docs/changelog.asciidoc | 5 +++++ docs/child.asciidoc | 23 +++++++++++------------ docs/connecting.asciidoc | 4 ++-- docs/timeout-best-practices.asciidoc | 8 +++----- package.json | 4 ++-- src/client.ts | 6 +++--- test/unit/client.test.ts | 28 ++++++++++++++++++---------- test/unit/helpers/bulk.test.ts | 4 ++-- test/unit/helpers/msearch.test.ts | 2 +- 10 files changed, 48 insertions(+), 39 deletions(-) diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index 799866f93..0d4a0a73d 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -13,7 +13,6 @@ const client = new Client({ cloud: { id: '' }, auth: { apiKey: 'base64EncodedKey' }, maxRetries: 5, - requestTimeout: 60000, sniffOnStart: true }) ---- @@ -82,7 +81,7 @@ _Default:_ `3` |`requestTimeout` |`number` - Max request timeout in milliseconds for each request. + -_Default:_ `30000` +_Default:_ No value |`pingTimeout` |`number` - Max ping request timeout in milliseconds for each request. + diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index dec73d23e..78b23e45a 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -12,6 +12,11 @@ In 8.0, the top-level `body` parameter that was available on all API functions <>. In 9.0 this property is completely removed. +[discrete] +===== Remove the default 30-second timeout on all requests sent to Elasticsearch + +Setting HTTP timeouts on Elasticsearch requests goes against Elastic's recommendations. See <> for more information. + [discrete] === 8.17.0 diff --git a/docs/child.asciidoc b/docs/child.asciidoc index 0bd7ace21..25575bbe2 100644 --- a/docs/child.asciidoc +++ b/docs/child.asciidoc @@ -1,22 +1,22 @@ [[child]] === Creating a child client -There are some use cases where you may need multiple instances of the client. -You can easily do that by calling `new Client()` as many times as you need, but -you will lose all the benefits of using one single client, such as the long -living connections and the connection pool handling. To avoid this problem, the -client offers a `child` API, which returns a new client instance that shares the +There are some use cases where you may need multiple instances of the client. +You can easily do that by calling `new Client()` as many times as you need, but +you will lose all the benefits of using one single client, such as the long +living connections and the connection pool handling. To avoid this problem, the +client offers a `child` API, which returns a new client instance that shares the connection pool with the parent client. -NOTE: The event emitter is shared between the parent and the child(ren). If you -extend the parent client, the child client will have the same extensions, while +NOTE: The event emitter is shared between the parent and the child(ren). If you +extend the parent client, the child client will have the same extensions, while if the child client adds an extension, the parent client will not be extended. -You can pass to the `child` every client option you would pass to a normal -client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`, +You can pass to the `child` every client option you would pass to a normal +client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`, `Connection`, and `resurrectStrategy`). -CAUTION: If you call `close` in any of the parent/child clients, every client +CAUTION: If you call `close` in any of the parent/child clients, every client will be closed. [source,js] @@ -28,9 +28,8 @@ const client = new Client({ }) const child = client.child({ headers: { 'x-foo': 'bar' }, - requestTimeout: 1000 }) client.info().then(console.log, console.log) child.info().then(console.log, console.log) ----- \ No newline at end of file +---- diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc index 4646ee5f1..f87961edb 100644 --- a/docs/connecting.asciidoc +++ b/docs/connecting.asciidoc @@ -414,8 +414,8 @@ The supported request specific options are: _Default:_ `null` |`requestTimeout` -|`number | string` - Max request timeout for the request in milliseconds, it overrides the client default. + -_Default:_ `30000` +|`number | string | null` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#_http_client_configuration[Elasticsearch best practices for HTML clients] for more info. + +_Default:_ No timeout |`retryOnTimeout` |`boolean` - Retry requests that have timed out. diff --git a/docs/timeout-best-practices.asciidoc b/docs/timeout-best-practices.asciidoc index 0d2fb4772..5116034af 100644 --- a/docs/timeout-best-practices.asciidoc +++ b/docs/timeout-best-practices.asciidoc @@ -1,10 +1,8 @@ [[timeout-best-practices]] === Timeout best practices -This client is configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {es}, raising a `TimeoutError` when that time period has elapsed without receiving a response. However, {es} will always eventually respond to any request, even if it takes several minutes. The {ref}/modules-network.html#_http_client_configuration[official {es} recommendation] is to disable response timeouts entirely by default. +Starting in 9.0.0, this client is configured to not time out any HTTP request by default. {es} will always eventually respond to any request, even if it takes several minutes. Reissuing a request that it has not responded to yet can cause performance side effects. See the {ref}/modules-network.html#_http_client_configuration[official {es} recommendations for HTTP clients] for more information. -Since changing this default would be a breaking change, we won't do that until the next major release. In the meantime, here is our recommendation for properly configuring your client: +Prior to 9.0, this client was configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {es}, raising a `TimeoutError` when that time period elapsed without receiving a response. -* Ensure keep-alive is enabled; this is the default, so no settings need to be changed, unless you have set `agent` to `false` or provided an alternate `agent` that disables keep-alive -* If using the default `UndiciConnection`, disable request timeouts by setting `timeout` to `0` -* If using the legacy `HttpConnection`, set `timeout` to a very large number (e.g. `86400000`, or one day) +If your circumstances require you to set timeouts on Elasticsearch requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0. diff --git a/package.json b/package.json index 759bd0636..17366f82c 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ }, "devDependencies": { "@elastic/request-converter": "8.17.0", - "@sinonjs/fake-timers": "github:sinonjs/fake-timers#48f089f", + "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", "@types/node": "22.10.7", @@ -89,7 +89,7 @@ "zx": "7.2.3" }, "dependencies": { - "@elastic/transport": "^8.9.1", + "@elastic/transport": "9.0.0-alpha.1", "apache-arrow": "^18.0.0", "tslib": "^2.4.0" }, diff --git a/src/client.ts b/src/client.ts index 2549a5b37..fcbc5d35b 100644 --- a/src/client.ts +++ b/src/client.ts @@ -108,14 +108,15 @@ export interface ClientOptions { * @defaultValue 3 */ maxRetries?: number /** @property requestTimeout Max request timeout in milliseconds for each request - * @defaultValue 30000 */ + * @defaultValue No timeout + * @remarks Read [the Elasticsearch docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#_http_client_configuration) about HTTP client configuration for details. */ requestTimeout?: number /** @property pingTimeout Max number of milliseconds a `ClusterConnectionPool` will wait when pinging nodes before marking them dead * @defaultValue 3000 */ pingTimeout?: number /** @property sniffInterval Perform a sniff operation every `n` milliseconds * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. - * @defaultValue */ + * @defaultValue false */ sniffInterval?: number | boolean /** @property sniffOnStart Perform a sniff once the client is started * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. @@ -244,7 +245,6 @@ export default class Client extends API { Serializer, ConnectionPool: (opts.cloud != null) ? CloudConnectionPool : WeightedConnectionPool, maxRetries: 3, - requestTimeout: 30000, pingTimeout: 3000, sniffInterval: false, sniffOnStart: false, diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index cc9868cfe..fc2af683c 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -17,9 +17,10 @@ * under the License. */ -import * as http from 'http' +import * as http from 'node:http' +import { URL } from 'node:url' +import { setTimeout } from 'node:timers/promises' import { test } from 'tap' -import { URL } from 'url' import FakeTimers from '@sinonjs/fake-timers' import { buildServer, connection } from '../utils' import { Client, errors } from '../..' @@ -451,30 +452,37 @@ test('Ensure new client instance stores requestTimeout for each connection', t = t.end() }) -test('Ensure new client does not time out at default (30s) when client sets requestTimeout', async t => { - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) +test('No request timeout is set by default', t => { + const client = new Client({ + node: { url: new URL('http://localhost:9200/') }, + }) + t.equal(client.connectionPool.connections[0].timeout, null) + t.end() +}) + +test('Ensure new client does not time out if requestTimeout is not set', async t => { + const clock = FakeTimers.install({ toFake: ['setTimeout'] }) t.teardown(() => clock.uninstall()) function handler (_req: http.IncomingMessage, res: http.ServerResponse) { - setTimeout(() => { - t.pass('timeout ended') + setTimeout(1000 * 60 * 60).then(() => { + t.ok('timeout ended') res.setHeader('content-type', 'application/json') res.end(JSON.stringify({ success: true })) - }, 31000) // default is 30000 - clock.runToLast() + }) + clock.tick(1000 * 60 * 60) } const [{ port }, server] = await buildServer(handler) const client = new Client({ node: `http://localhost:${port}`, - requestTimeout: 60000 }) try { await client.transport.request({ method: 'GET', path: '/' }) } catch (error) { - t.fail('timeout error hit') + t.fail('Error should not be thrown', error) } finally { server.stop() t.end() diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index 3871c348f..d45d2d003 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -1611,7 +1611,7 @@ test('errors', t => { test('Flush interval', t => { t.test('Slow producer', async t => { - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) + const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'], shouldClearNativeTimers: true }) t.teardown(() => clock.uninstall()) let count = 0 @@ -1663,7 +1663,7 @@ test('Flush interval', t => { }) t.test('Abort operation', async t => { - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) + const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'], shouldClearNativeTimers: true }) t.teardown(() => clock.uninstall()) let count = 0 diff --git a/test/unit/helpers/msearch.test.ts b/test/unit/helpers/msearch.test.ts index e80c5977c..ba2457587 100644 --- a/test/unit/helpers/msearch.test.ts +++ b/test/unit/helpers/msearch.test.ts @@ -583,7 +583,7 @@ test('Multiple searches (concurrency = 1)', t => { test('Flush interval', t => { t.plan(2) - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) + const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'], shouldClearNativeTimers: true }) t.teardown(() => clock.uninstall()) const MockConnection = connection.buildMockConnection({ From d2c63b4c5f67a4c4499f5f14a699a2a42d9c5d8e Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 30 Jan 2025 11:45:47 -0600 Subject: [PATCH 464/647] bump 9.0.0 alpha.3 (#2591) --- docs/examples/bulk.asciidoc | 4 ++-- docs/examples/exists.asciidoc | 4 ++-- docs/examples/get.asciidoc | 8 ++++---- docs/examples/ignore.asciidoc | 2 +- docs/examples/scroll.asciidoc | 24 ++++++++++++------------ docs/examples/update.asciidoc | 12 ++++++------ docs/examples/update_by_query.asciidoc | 8 ++++---- package.json | 2 +- 8 files changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc index 74725c9e9..c357d5130 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/examples/bulk.asciidoc @@ -1,10 +1,10 @@ [[bulk_examples]] === Bulk -With the {jsclient}/api-reference.html#_bulk[`bulk` API], you can perform multiple index/delete operations in a +With the {jsclient}/api-reference.html#_bulk[`bulk` API], you can perform multiple index/delete operations in a single API call. The `bulk` API significantly increases indexing speed. -NOTE: You can also use the {jsclient}/client-helpers.html[bulk helper]. +NOTE: You can also use the <>. [source,js] ---- diff --git a/docs/examples/exists.asciidoc b/docs/examples/exists.asciidoc index 29a39a196..3553796fb 100644 --- a/docs/examples/exists.asciidoc +++ b/docs/examples/exists.asciidoc @@ -6,7 +6,7 @@ Check that the document `/game-of-thrones/1` exists. NOTE: Since this API uses the `HEAD` method, the body value will be boolean. [source,js] ---------- +---- 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -34,4 +34,4 @@ async function run () { } run().catch(console.log) ---------- \ No newline at end of file +---- diff --git a/docs/examples/get.asciidoc b/docs/examples/get.asciidoc index f6dd94ddf..fe0268647 100644 --- a/docs/examples/get.asciidoc +++ b/docs/examples/get.asciidoc @@ -1,12 +1,12 @@ [[get_examples]] === Get -The get API allows to get a typed JSON document from the index based on its id. -The following example gets a JSON document from an index called +The get API allows to get a typed JSON document from the index based on its id. +The following example gets a JSON document from an index called `game-of-thrones`, under a type called `_doc`, with id valued `'1'`. [source,js] ---------- +---- 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -34,4 +34,4 @@ async function run () { } run().catch(console.log) ---------- \ No newline at end of file +---- diff --git a/docs/examples/ignore.asciidoc b/docs/examples/ignore.asciidoc index de5577dcd..0b4c6fa98 100644 --- a/docs/examples/ignore.asciidoc +++ b/docs/examples/ignore.asciidoc @@ -62,4 +62,4 @@ async function run () { } run().catch(console.log) ----- \ No newline at end of file +---- diff --git a/docs/examples/scroll.asciidoc b/docs/examples/scroll.asciidoc index 0f23a1bc1..87f302876 100644 --- a/docs/examples/scroll.asciidoc +++ b/docs/examples/scroll.asciidoc @@ -1,25 +1,25 @@ [[scroll_examples]] === Scroll -While a search request returns a single “page” of results, the scroll API can be -used to retrieve large numbers of results (or even all results) from a single -search request, in much the same way as you would use a cursor on a traditional +While a search request returns a single “page” of results, the scroll API can be +used to retrieve large numbers of results (or even all results) from a single +search request, in much the same way as you would use a cursor on a traditional database. -Scrolling is not intended for real time user requests, but rather for processing -large amounts of data, for example in order to reindex the contents of one index +Scrolling is not intended for real time user requests, but rather for processing +large amounts of data, for example in order to reindex the contents of one index into a new index with a different configuration. -NOTE: The results that are returned from a scroll request reflect the state of -the index at the time that the initial search request was made, like a snapshot -in time. Subsequent changes to documents (index, update or delete) will only +NOTE: The results that are returned from a scroll request reflect the state of +the index at the time that the initial search request was made, like a snapshot +in time. Subsequent changes to documents (index, update or delete) will only affect later search requests. -In order to use scrolling, the initial search request should specify the scroll -parameter in the query string, which tells {es} how long it should keep the +In order to use scrolling, the initial search request should specify the scroll +parameter in the query string, which tells {es} how long it should keep the “search context” alive. -NOTE: Did you know that we provide an helper for sending scroll requests? You can find it {jsclient}/client-helpers.html[here]. +NOTE: Did you know that we provide an helper for sending scroll requests? You can find it <>. [source,js] ---- @@ -113,7 +113,7 @@ async function run () { run().catch(console.log) ---- -Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using +Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using async iteration! [source,js] diff --git a/docs/examples/update.asciidoc b/docs/examples/update.asciidoc index 3c83acd25..b7e0272ae 100644 --- a/docs/examples/update.asciidoc +++ b/docs/examples/update.asciidoc @@ -1,12 +1,12 @@ [[update_examples]] === Update -The update API allows updates of a specific document using the given script. In -the following example, we will index a document that also tracks how many times +The update API allows updates of a specific document using the given script. In +the following example, we will index a document that also tracks how many times a character has said the given quote, and then we will update the `times` field. [source,js] ---------- +---- 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -48,12 +48,12 @@ async function run () { run().catch(console.log) ---------- +---- With the update API, you can also run a partial update of a document. [source,js] ---------- +---- 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -92,4 +92,4 @@ async function run () { run().catch(console.log) ---------- \ No newline at end of file +---- diff --git a/docs/examples/update_by_query.asciidoc b/docs/examples/update_by_query.asciidoc index d17b5c455..80c52fd90 100644 --- a/docs/examples/update_by_query.asciidoc +++ b/docs/examples/update_by_query.asciidoc @@ -1,12 +1,12 @@ [[update_by_query_examples]] === Update By Query -The simplest usage of _update_by_query just performs an update on every document -in the index without changing the source. This is useful to pick up a new +The simplest usage of _update_by_query just performs an update on every document +in the index without changing the source. This is useful to pick up a new property or some other online mapping change. [source,js] ---------- +---- 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -57,4 +57,4 @@ async function run () { run().catch(console.log) ---------- +---- diff --git a/package.json b/package.json index 17366f82c..902abf626 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.2", + "version": "9.0.0-alpha.3", "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", From 11ff146ae804a862ea517fdeed36840b117f5066 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 12:11:08 -0600 Subject: [PATCH 465/647] Update actions/setup-node digest to 1d0ff46 (#2594) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/nodejs.yml | 4 ++-- .github/workflows/npm-publish.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 85f8131d3..bc73e0713 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -41,7 +41,7 @@ jobs: persist-credentials: false - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4 + uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4 with: node-version: ${{ matrix.node-version }} @@ -71,7 +71,7 @@ jobs: persist-credentials: false - name: Use Node.js - uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4 + uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4 with: node-version: 22.x diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 0eedbc9cf..6040880b4 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -16,7 +16,7 @@ jobs: with: persist-credentials: false ref: ${{ github.event.inputs.branch }} - - uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4 + - uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4 with: node-version: "22.x" registry-url: "https://registry.npmjs.org/" From 86169003b4ae10974402220fc00520b753269cc5 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 3 Feb 2025 19:48:17 +0100 Subject: [PATCH 466/647] Auto-generated API code (#2595) --- .../00ad41bde67beac991534ae0e04b1296.asciidoc | 11 + .../0f028f71f04c1d569fab402869565a84.asciidoc | 15 + .../12adea5d76f73d94d80d42f53f67563f.asciidoc | 11 + .../1ead35c954963e83f89872048dabdbe9.asciidoc | 19 + .../272e27bf1fcc4fe5dbd4092679dd0342.asciidoc | 11 + ...2afd49985950cbcccf727fa858d00067.asciidoc} | 6 +- .../31832bd71c31c46a1ccf8d1c210d89d4.asciidoc | 28 + .../32c8c86702ccd68eb70f1573409c2a1f.asciidoc | 31 + ...36792c81c053e0555407d1e83e7e054f.asciidoc} | 5 +- .../3722dad876023e0757138dd5a6d3240e.asciidoc | 23 + .../3a204b57072a104d9b50f3a9e064a8f6.asciidoc | 19 + .../3bc4a3681e3ea9cb3de49f72085807d8.asciidoc | 61 + .../4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc | 17 + .../537bce129338d9227bccb6a0283dab45.asciidoc | 12 + .../59aa5216630f80c5dc298fc5bba4a819.asciidoc | 10 + .../6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc | 23 + .../6e498b9dc753b94abf2618c407fa5cd8.asciidoc | 16 + .../8621c05cc7cf3880bde751f6670a0c3a.asciidoc | 15 + .../89f547649895176c246bb8c41313ff21.asciidoc | 12 + .../8c47c80139f40f25db44f5781ca2dfbe.asciidoc | 10 + .../a46f566ca031375658c22f89b87dc6d2.asciidoc | 12 + .../a675fafa7c688cb3ea1be09bf887ebf0.asciidoc | 12 + ...ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc} | 1 + ...bcd1afb793240b1dddd9fa5d3f21192b.asciidoc} | 10 +- .../c3b77e11b16e37e9e37e28dec922432e.asciidoc | 11 + .../d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc | 11 + .../d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc | 44 + .../d6a4548b29e939fb197189c20c7c016f.asciidoc | 17 + .../dd16c9c981551c9da47ebb5ef5105fa0.asciidoc | 57 + ...e715fb8c792bf09ac98f0ceca99beb84.asciidoc} | 4 +- ...f994498dd6576be657dedce2822d2b9e.asciidoc} | 7 + ...ffda10edaa7ce087703193c3cb95a426.asciidoc} | 8 + docs/reference.asciidoc | 1722 ++++++++++------- src/api/api/async_search.ts | 8 +- src/api/api/autoscaling.ts | 8 +- src/api/api/bulk.ts | 2 +- src/api/api/cat.ts | 52 +- src/api/api/ccr.ts | 28 +- src/api/api/clear_scroll.ts | 2 +- src/api/api/close_point_in_time.ts | 2 +- src/api/api/cluster.ts | 34 +- src/api/api/connector.ts | 56 +- src/api/api/count.ts | 4 +- src/api/api/create.ts | 2 +- src/api/api/dangling_indices.ts | 6 +- src/api/api/delete.ts | 2 +- src/api/api/delete_by_query.ts | 2 +- src/api/api/delete_by_query_rethrottle.ts | 2 +- src/api/api/delete_script.ts | 2 +- src/api/api/enrich.ts | 10 +- src/api/api/eql.ts | 8 +- src/api/api/esql.ts | 48 +- src/api/api/exists.ts | 2 +- src/api/api/exists_source.ts | 2 +- src/api/api/explain.ts | 4 +- src/api/api/features.ts | 4 +- src/api/api/field_caps.ts | 2 +- src/api/api/fleet.ts | 4 +- src/api/api/get.ts | 2 +- src/api/api/get_script.ts | 2 +- src/api/api/get_script_context.ts | 2 +- src/api/api/get_script_languages.ts | 2 +- src/api/api/get_source.ts | 2 +- src/api/api/graph.ts | 2 +- src/api/api/health_report.ts | 2 +- src/api/api/ilm.ts | 22 +- src/api/api/index.ts | 2 +- src/api/api/indices.ts | 136 +- src/api/api/inference.ts | 14 +- src/api/api/info.ts | 2 +- src/api/api/ingest.ts | 18 +- src/api/api/knn_search.ts | 4 +- src/api/api/license.ts | 16 +- src/api/api/logstash.ts | 6 +- src/api/api/mget.ts | 4 +- src/api/api/migration.ts | 6 +- src/api/api/ml.ts | 154 +- src/api/api/monitoring.ts | 2 +- src/api/api/msearch.ts | 2 +- src/api/api/msearch_template.ts | 4 +- src/api/api/mtermvectors.ts | 4 +- src/api/api/nodes.ts | 14 +- src/api/api/open_point_in_time.ts | 2 +- src/api/api/ping.ts | 2 +- src/api/api/put_script.ts | 2 +- src/api/api/query_rules.ts | 16 +- src/api/api/rank_eval.ts | 2 +- src/api/api/reindex.ts | 2 +- src/api/api/reindex_rethrottle.ts | 2 +- src/api/api/render_search_template.ts | 6 +- src/api/api/rollup.ts | 16 +- src/api/api/scroll.ts | 2 +- src/api/api/search.ts | 2 +- src/api/api/search_application.ts | 20 +- src/api/api/search_mvt.ts | 4 +- src/api/api/search_shards.ts | 4 +- src/api/api/search_template.ts | 2 +- src/api/api/searchable_snapshots.ts | 8 +- src/api/api/security.ts | 128 +- src/api/api/shutdown.ts | 6 +- src/api/api/simulate.ts | 2 +- src/api/api/slm.ts | 18 +- src/api/api/snapshot.ts | 26 +- src/api/api/sql.ts | 12 +- src/api/api/ssl.ts | 2 +- src/api/api/synonyms.ts | 14 +- src/api/api/tasks.ts | 6 +- src/api/api/terms_enum.ts | 4 +- src/api/api/termvectors.ts | 4 +- src/api/api/text_structure.ts | 8 +- src/api/api/transform.ts | 34 +- src/api/api/update.ts | 2 +- src/api/api/update_by_query.ts | 2 +- src/api/api/update_by_query_rethrottle.ts | 2 +- src/api/api/watcher.ts | 26 +- src/api/api/xpack.ts | 4 +- src/api/types.ts | 347 ++-- 117 files changed, 2344 insertions(+), 1386 deletions(-) create mode 100644 docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc create mode 100644 docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc create mode 100644 docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc create mode 100644 docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc create mode 100644 docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc rename docs/doc_examples/{d29031409016b2b798148ef173a196ae.asciidoc => 2afd49985950cbcccf727fa858d00067.asciidoc} (83%) create mode 100644 docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc create mode 100644 docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc rename docs/doc_examples/{37f367ca81a16d3aef4ef7126ec33a2e.asciidoc => 36792c81c053e0555407d1e83e7e054f.asciidoc} (94%) create mode 100644 docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc create mode 100644 docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc create mode 100644 docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc create mode 100644 docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc create mode 100644 docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc create mode 100644 docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc create mode 100644 docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc create mode 100644 docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc create mode 100644 docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc create mode 100644 docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc create mode 100644 docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc create mode 100644 docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc create mode 100644 docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc rename docs/doc_examples/{357edc9d10e98ed776401c7a439a1a55.asciidoc => ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc} (94%) rename docs/doc_examples/{436d50b85fc8f0977d02059eec00719b.asciidoc => bcd1afb793240b1dddd9fa5d3f21192b.asciidoc} (65%) create mode 100644 docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc create mode 100644 docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc create mode 100644 docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc create mode 100644 docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc create mode 100644 docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc rename docs/doc_examples/{8b144b3eb20872595fd7cbc6c245c7c8.asciidoc => e715fb8c792bf09ac98f0ceca99beb84.asciidoc} (67%) rename docs/doc_examples/{9ad0864bcd665b63551e944653d32423.asciidoc => f994498dd6576be657dedce2822d2b9e.asciidoc} (87%) rename docs/doc_examples/{681d24c2633f598fc43d6afff8996dbb.asciidoc => ffda10edaa7ce087703193c3cb95a426.asciidoc} (92%) diff --git a/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc b/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc new file mode 100644 index 000000000..aad48ff29 --- /dev/null +++ b/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream", + filter_path: "data_streams.indices.index_name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc b/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc new file mode 100644 index 000000000..007f558d8 --- /dev/null +++ b/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", + settings: { + index: { + number_of_replicas: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc b/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc new file mode 100644 index 000000000..83d87f9c6 --- /dev/null +++ b/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ml-anomalies-custom-example", + block: "read_only", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc b/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc new file mode 100644 index 000000000..347f3152e --- /dev/null +++ b/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryRole({ + query: { + bool: { + must_not: { + term: { + "metadata._reserved": true, + }, + }, + }, + }, + sort: ["name"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc b/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc new file mode 100644 index 000000000..5a65c9753 --- /dev/null +++ b/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ml-anomalies-custom-example", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc b/docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc similarity index 83% rename from docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc rename to docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc index fac02d172..38aa159a8 100644 --- a/docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc +++ b/docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc @@ -6,13 +6,13 @@ const response = await client.indices.create({ index: "test-index", query: { - semantic: { - field: "my_semantic_field", + match: { + my_field: "Which country is Paris in?", }, }, highlight: { fields: { - my_semantic_field: { + my_field: { type: "semantic", number_of_fragments: 2, order: "score", diff --git a/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc b/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc new file mode 100644 index 000000000..e61007e9c --- /dev/null +++ b/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-*", + query: { + bool: { + must: [ + { + match: { + "user.id": "kimchy", + }, + }, + ], + must_not: [ + { + terms: { + _index: ["my-index-01"], + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc b/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc new file mode 100644 index 000000000..dcc8ff429 --- /dev/null +++ b/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + searchable_snapshot: { + snapshot_repository: "backing_repo", + replicate_for: "14d", + }, + }, + }, + delete: { + min_age: "28d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc b/docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc similarity index 94% rename from docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc rename to docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc index 8651f44c6..2256f4c94 100644 --- a/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc +++ b/docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc @@ -9,10 +9,13 @@ const response = await client.search({ retriever: { rescorer: { rescore: { + window_size: 50, query: { - window_size: 50, rescore_query: { script_score: { + query: { + match_all: {}, + }, script: { source: "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", diff --git a/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc b/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc new file mode 100644 index 000000000..e071509a9 --- /dev/null +++ b/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + settings: { + index: { + number_of_shards: 3, + "blocks.write": true, + }, + }, + mappings: { + properties: { + field1: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc b/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc new file mode 100644 index 000000000..087b6dc1b --- /dev/null +++ b/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".ml-anomalies-custom-example", + size: 0, + aggs: { + job_ids: { + terms: { + field: "job_id", + size: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc b/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc new file mode 100644 index 000000000..929ab0ee8 --- /dev/null +++ b/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + linear: { + retrievers: [ + { + retriever: { + standard: { + query: { + function_score: { + query: { + term: { + topic: "ai", + }, + }, + functions: [ + { + script_score: { + script: { + source: "doc['timestamp'].value.millis", + }, + }, + }, + ], + boost_mode: "replace", + }, + }, + sort: { + timestamp: { + order: "asc", + }, + }, + }, + }, + weight: 2, + normalizer: "minmax", + }, + { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + weight: 1.5, + }, + ], + rank_window_size: 10, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc b/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc new file mode 100644 index 000000000..22100f235 --- /dev/null +++ b/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "elser-model-eis", + inference_config: { + service: "elastic", + service_settings: { + model_name: "elser", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc b/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc new file mode 100644 index 000000000..cfeed0dff --- /dev/null +++ b/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "migrate.data_stream_reindex_max_request_per_second": 10000, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc b/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc new file mode 100644 index 000000000..61ac89373 --- /dev/null +++ b/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc b/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc new file mode 100644 index 000000000..41c42d206 --- /dev/null +++ b/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test-index", + query: { + match: { + my_semantic_field: "Which country is Paris in?", + }, + }, + highlight: { + fields: { + my_semantic_field: { + number_of_fragments: 2, + order: "score", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc b/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc new file mode 100644 index 000000000..fdd6ab8f3 --- /dev/null +++ b/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: ".ml-anomalies-custom-example", + }, + dest: { + index: ".reindexed-v9-ml-anomalies-custom-example", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc b/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc new file mode 100644 index 000000000..7cb4b44d1 --- /dev/null +++ b/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", + settings: { + index: { + number_of_replicas: 0, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc b/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc new file mode 100644 index 000000000..571f64436 --- /dev/null +++ b/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\nFROM library\n| EVAL year = DATE_EXTRACT("year", release_date)\n| WHERE page_count > ? AND match(author, ?, {"minimum_should_match": ?})\n| LIMIT 5\n', + params: [300, "Frank Herbert", 2], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc b/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc new file mode 100644 index 000000000..680f24481 --- /dev/null +++ b/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + index: ".ml-anomalies-custom-example", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc b/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc new file mode 100644 index 000000000..f11302fa4 --- /dev/null +++ b/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + index: ".ml-anomalies-custom-example", + v: "true", + h: "index,store.size", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc b/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc new file mode 100644 index 000000000..2837854a9 --- /dev/null +++ b/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: ".migrated-ds-my-data-stream-2025.01.23-000001", + human: "true", + filter_path: "*.settings.index.version.created_string", +}); +console.log(response); +---- diff --git a/docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc b/docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc similarity index 94% rename from docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc rename to docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc index 088bda3bc..42c6d4763 100644 --- a/docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc +++ b/docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc @@ -6,6 +6,7 @@ const response = await client.indices.resolveCluster({ name: "not-present,clust*:my-index*,oldcluster:*", ignore_unavailable: "false", + timeout: "5s", }); console.log(response); ---- diff --git a/docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc b/docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc similarity index 65% rename from docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc rename to docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc index d1a2f84de..5dc68e409 100644 --- a/docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc +++ b/docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc @@ -6,15 +6,11 @@ const response = await client.update({ index: "test", id: 1, - script: { - source: "ctx._source.counter += params.count", - lang: "painless", - params: { - count: 4, - }, + doc: { + product_price: 100, }, upsert: { - counter: 1, + product_price: 50, }, }); console.log(response); diff --git a/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc b/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc new file mode 100644 index 000000000..f80f1ac99 --- /dev/null +++ b/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\nFROM library\n| WHERE match(author, "Frank Herbert", {"minimum_should_match": 2, "operator": "AND"})\n| LIMIT 5\n', +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc b/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc new file mode 100644 index 000000000..ff0c652e4 --- /dev/null +++ b/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "metadata", + filter_path: "metadata.indices.*.system", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc b/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc new file mode 100644 index 000000000..46940cf06 --- /dev/null +++ b/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + linear: { + retrievers: [ + { + retriever: { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + weight: 2, + normalizer: "minmax", + }, + { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + weight: 1.5, + normalizer: "minmax", + }, + ], + rank_window_size: 10, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc b/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc new file mode 100644 index 000000000..745cb7efe --- /dev/null +++ b/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "chat_completion", + inference_id: "chat-completion-endpoint", + inference_config: { + service: "elastic", + service_settings: { + model_id: "model-1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc b/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc new file mode 100644 index 000000000..c00660b74 --- /dev/null +++ b/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-example1", + filter: { + term: { + job_id: { + value: "example1", + }, + }, + }, + is_hidden: true, + }, + }, + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-example2", + filter: { + term: { + job_id: { + value: "example2", + }, + }, + }, + is_hidden: true, + }, + }, + { + remove: { + index: ".ml-anomalies-custom-example", + aliases: ".ml-anomalies-*", + }, + }, + { + remove_index: { + index: ".ml-anomalies-custom-example", + }, + }, + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-custom-example", + is_hidden: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc b/docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc similarity index 67% rename from docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc rename to docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc index 3bccba38f..1b37d265b 100644 --- a/docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc +++ b/docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc @@ -3,8 +3,8 @@ [source, js] ---- -const response = await client.security.queryRole({ - sort: ["name"], +const response = await client.migration.deprecations({ + index: ".ml-anomalies-*", }); console.log(response); ---- diff --git a/docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc b/docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc similarity index 87% rename from docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc rename to docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc index f553c8706..21a737450 100644 --- a/docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc +++ b/docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc @@ -30,6 +30,13 @@ const response = await client.search({ ], }, }, + highlight: { + fields: { + semantic_text: { + number_of_fragments: 2, + }, + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc b/docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc similarity index 92% rename from docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc rename to docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc index bfb21cf32..0ccb2c77a 100644 --- a/docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc +++ b/docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc @@ -28,6 +28,9 @@ const response = await client.indices.create({ topic: { type: "keyword", }, + timestamp: { + type: "date", + }, }, }, }); @@ -41,6 +44,7 @@ const response1 = await client.index({ text: "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", year: 2024, topic: ["llm", "ai", "information_retrieval"], + timestamp: "2021-01-01T12:10:30", }, }); console.log(response1); @@ -53,6 +57,7 @@ const response2 = await client.index({ text: "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", year: 2023, topic: ["ai", "medicine"], + timestamp: "2022-01-01T12:10:30", }, }); console.log(response2); @@ -65,6 +70,7 @@ const response3 = await client.index({ text: "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", year: 2024, topic: ["ai", "security"], + timestamp: "2023-01-01T12:10:30", }, }); console.log(response3); @@ -77,6 +83,7 @@ const response4 = await client.index({ text: "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", year: 2023, topic: ["ai", "elastic", "assistant"], + timestamp: "2024-01-01T12:10:30", }, }); console.log(response4); @@ -89,6 +96,7 @@ const response5 = await client.index({ text: "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", year: 2024, topic: ["documentation", "observability", "elastic"], + timestamp: "2025-01-01T12:10:30", }, }); console.log(response5); diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 62f283b1a..8aee6b840 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -138,7 +138,7 @@ Imagine a `_bulk?refresh=wait_for` request with three documents in it that happe The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. -{ref}/docs-bulk.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk[Endpoint documentation] [source,ts] ---- client.bulk({ ... }) @@ -166,7 +166,7 @@ client.bulk({ ... }) Clear a scrolling search. Clear the search context and results for a scrolling search. -{ref}/clear-scroll-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll[Endpoint documentation] [source,ts] ---- client.clearScroll({ ... }) @@ -185,7 +185,7 @@ The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. -{ref}/point-in-time-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time[Endpoint documentation] [source,ts] ---- client.closePointInTime({ id }) @@ -201,8 +201,8 @@ client.closePointInTime({ id }) Count search results. Get the number of documents matching a query. -The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. -The latter must be nested in a `query` key, which is the same as the search API. +The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. +The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. @@ -210,7 +210,7 @@ The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. -{ref}/search-count.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count[Endpoint documentation] [source,ts] ---- client.count({ ... }) @@ -220,7 +220,7 @@ client.count({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. ** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. @@ -234,7 +234,7 @@ client.count({ ... }) ** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. ** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. ** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`q` (Optional, string)*: The query in Lucene query string syntax. +** *`q` (Optional, string)*: The query in Lucene query string syntax. This parameter cannot be used with a request body. [discrete] === create @@ -311,7 +311,7 @@ It is important to note that this setting greatly reduces the chances of the wri After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. -{ref}/docs-index_.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create[Endpoint documentation] [source,ts] ---- client.create({ id, index }) @@ -373,7 +373,7 @@ The document is not deleted if the correct routing is not specified. The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. -{ref}/docs-delete.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete[Endpoint documentation] [source,ts] ---- client.delete({ id, index }) @@ -476,7 +476,7 @@ The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. -{ref}/docs-delete-by-query.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query[Endpoint documentation] [source,ts] ---- client.deleteByQuery({ index }) @@ -525,7 +525,7 @@ Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. -{ref}/docs-delete-by-query.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle[Endpoint documentation] [source,ts] ---- client.deleteByQueryRethrottle({ task_id }) @@ -542,7 +542,7 @@ client.deleteByQueryRethrottle({ task_id }) Delete a script or search template. Deletes a stored script or search template. -{ref}/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script[Endpoint documentation] [source,ts] ---- client.deleteScript({ id }) @@ -551,9 +551,9 @@ client.deleteScript({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`id` (string)*: The identifier for the stored script or search template. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. [discrete] === exists @@ -577,7 +577,7 @@ Internally, Elasticsearch has marked the old document as deleted and added an en The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. -{ref}/docs-get.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] [source,ts] ---- client.exists({ id, index }) @@ -612,7 +612,7 @@ HEAD my-index-000001/_source/1 A document's source is not available if it is disabled in the mapping. -{ref}/docs-get.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] [source,ts] ---- client.existsSource({ id, index }) @@ -636,9 +636,10 @@ client.existsSource({ id, index }) [discrete] === explain Explain a document match result. -Returns information about why a specific document matches, or doesn’t match, a query. +Get information about why a specific document matches, or doesn't match, a query. +It computes a score explanation for a query and a specific document. -{ref}/search-explain.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain[Endpoint documentation] [source,ts] ---- client.explain({ id, index }) @@ -647,21 +648,21 @@ client.explain({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Defines the document ID. -** *`index` (string)*: Index names used to limit the request. Only a single index name can be provided to this parameter. +** *`id` (string)*: The document identifier. +** *`index` (string)*: Index names that are used to limit the request. Only a single index name can be provided to this parameter. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: `True` or `false` to return the `_source` field or not or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. ** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. +** *`q` (Optional, string)*: The query in the Lucene query string syntax. [discrete] === field_caps @@ -673,7 +674,7 @@ For data streams, the API returns field capabilities among the stream’s backin It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. -{ref}/search-field-caps.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps[Endpoint documentation] [source,ts] ---- client.fieldCaps({ ... }) @@ -682,16 +683,16 @@ client.fieldCaps({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. -** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. -** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +** *`fields` (Optional, string | string[])*: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. +** *`runtime_mappings` (Optional, Record)*: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. ** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. -** *`filters` (Optional, string)*: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent -** *`types` (Optional, string[])*: Only return results for fields that have one of the types in the list +** *`filters` (Optional, string)*: A list of filters to apply to the response. +** *`types` (Optional, string[])*: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. ** *`include_empty_fields` (Optional, boolean)*: If false, empty fields are not included in the response. [discrete] @@ -755,7 +756,7 @@ Internally, Elasticsearch has marked the old document as deleted and added an en The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. -{ref}/docs-get.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] [source,ts] ---- client.get({ id, index }) @@ -783,7 +784,7 @@ client.get({ id, index }) Get a script or search template. Retrieves a stored script or search template. -{ref}/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script[Endpoint documentation] [source,ts] ---- client.getScript({ id }) @@ -792,8 +793,8 @@ client.getScript({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`id` (string)*: The identifier for the stored script or search template. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. [discrete] === get_script_context @@ -801,7 +802,7 @@ Get script contexts. Get a list of supported script contexts and their methods. -{painless}/painless-contexts.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context[Endpoint documentation] [source,ts] ---- client.getScriptContext() @@ -813,7 +814,7 @@ Get script languages. Get a list of available script types, languages, and contexts. -{ref}/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages[Endpoint documentation] [source,ts] ---- client.getScriptLanguages() @@ -836,7 +837,7 @@ You can use the source filtering parameters to control which parts of the `_sour GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ---- -{ref}/docs-get.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] [source,ts] ---- client.getSource({ id, index }) @@ -879,7 +880,7 @@ A diagnosis contains a cause detailing a root cause analysis, an action containi NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. -{ref}/health-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report[Endpoint documentation] [source,ts] ---- client.healthReport({ ... }) @@ -1016,7 +1017,7 @@ If the document was already updated and its version was set to 2 or higher, the A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. -{ref}/docs-index_.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create[Endpoint documentation] [source,ts] ---- client.index({ index }) @@ -1045,7 +1046,7 @@ client.index({ index }) Get cluster info. Get basic build, version, and cluster information. -{ref}/rest-api-root.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info[Endpoint documentation] [source,ts] ---- client.info() @@ -1067,7 +1068,13 @@ This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. -{ref}/search-search.html[Endpoint documentation] +A kNN search response has the exact same structure as a search API response. +However, certain sections have a meaning specific to kNN search: + +* The document `_score` is determined by the similarity between the query and document vector. +* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. + +{ref}/knn-search-api.html[Endpoint documentation] [source,ts] ---- client.knnSearch({ index, knn }) @@ -1076,14 +1083,14 @@ client.knnSearch({ index, knn }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices -** *`knn` ({ field, query_vector, k, num_candidates })*: kNN query to execute -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. -** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. -** *`routing` (Optional, string)*: A list of specific routing values +** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices. +** *`knn` ({ field, query_vector, k, num_candidates })*: The kNN query to run. +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. +** *`routing` (Optional, string)*: A list of specific routing values. [discrete] === mget @@ -1093,7 +1100,19 @@ Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. -{ref}/docs-multi-get.html[Endpoint documentation] +**Filter source fields** + +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. + +**Get stored fields** + +Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. +Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget[Endpoint documentation] [source,ts] ---- client.mget({ ... }) @@ -1135,7 +1154,7 @@ IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. -{ref}/search-multi-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch[Endpoint documentation] [source,ts] ---- client.msearch({ ... }) @@ -1164,7 +1183,21 @@ client.msearch({ ... }) === msearch_template Run multiple templated searches. -{ref}/search-multi-search.html[Endpoint documentation] +Run multiple templated searches with a single request. +If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. +For example: + +---- +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} + +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +---- + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template[Endpoint documentation] [source,ts] ---- client.msearchTemplate({ ... }) @@ -1173,11 +1206,11 @@ client.msearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. ** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the API can run. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`max_concurrent_searches` (Optional, number)*: The maximum number of concurrent searches the API can run. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. ** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. ** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. @@ -1185,12 +1218,18 @@ client.msearchTemplate({ ... }) === mtermvectors Get multiple term vectors. +Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. -{ref}/docs-multi-termvectors.html[Endpoint documentation] +**Artificial documents** + +You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. +The mapping used is determined by the specified `_index`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors[Endpoint documentation] [source,ts] ---- client.mtermvectors({ ... }) @@ -1199,20 +1238,20 @@ client.mtermvectors({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string)*: Name of the index that contains the documents. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: Array of existing or artificial documents. -** *`ids` (Optional, string[])*: Simplified syntax to specify documents by their ID if they're in the same index. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`index` (Optional, string)*: The name of the index that contains the documents. +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: An array of existing or artificial documents. +** *`ids` (Optional, string[])*: A simplified syntax to specify documents by their ID if they're in the same index. +** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. ** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. ** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. ** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. ** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. ** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. ** *`term_statistics` (Optional, boolean)*: If true, the response includes term frequency and document frequency. ** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === open_point_in_time @@ -1254,7 +1293,7 @@ Ensure that your nodes have sufficient heap space if you have many open point-in Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. -{ref}/point-in-time-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time[Endpoint documentation] [source,ts] ---- client.openPointInTime({ index, keep_alive }) @@ -1277,7 +1316,7 @@ client.openPointInTime({ index, keep_alive }) Ping the cluster. Get information about whether the cluster is running. -{ref}/index.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster[Endpoint documentation] [source,ts] ---- client.ping() @@ -1288,7 +1327,7 @@ client.ping() Create or update a script or search template. Creates or updates a stored script or search template. -{ref}/modules-scripting.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script[Endpoint documentation] [source,ts] ---- client.putScript({ id, script }) @@ -1297,11 +1336,11 @@ client.putScript({ id, script }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. Must be unique within the cluster. -** *`script` ({ lang, options, source })*: Contains the script or search template, its parameters, and its language. -** *`context` (Optional, string)*: Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`id` (string)*: The identifier for the stored script or search template. It must be unique within the cluster. +** *`script` ({ lang, options, source })*: The script or search template, its parameters, and its language. +** *`context` (Optional, string)*: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. [discrete] === rank_eval @@ -1309,7 +1348,7 @@ Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. -{ref}/search-rank-eval.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval[Endpoint documentation] [source,ts] ---- client.rankEval({ requests }) @@ -1319,7 +1358,7 @@ client.rankEval({ requests }) * *Request (object):* ** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. ** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. @@ -1511,7 +1550,7 @@ Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. -{ref}/docs-reindex.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex[Endpoint documentation] [source,ts] ---- client.reindex({ dest, source }) @@ -1550,7 +1589,7 @@ Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. -{ref}/docs-reindex.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex[Endpoint documentation] [source,ts] ---- client.reindexRethrottle({ task_id }) @@ -1568,7 +1607,7 @@ Render a search template. Render a search template as a search request body. -{ref}/render-search-template-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template[Endpoint documentation] [source,ts] ---- client.renderSearchTemplate({ ... }) @@ -1577,10 +1616,10 @@ client.renderSearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. +** *`id` (Optional, string)*: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. ** *`file` (Optional, string)* ** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. +** *`source` (Optional, string)*: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. [discrete] === scripts_painless_execute @@ -1624,7 +1663,7 @@ You can also use the scroll API to specify a new scroll parameter that extends o IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. -{ref}/search-request-body.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll[Endpoint documentation] [source,ts] ---- client.scroll({ scroll_id }) @@ -1633,8 +1672,8 @@ client.scroll({ scroll_id }) ==== Arguments * *Request (object):* -** *`scroll_id` (string)*: Scroll ID of the search. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. +** *`scroll_id` (string)*: The scroll ID of the search. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. ** *`rest_total_hits_as_int` (Optional, boolean)*: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. [discrete] @@ -1660,7 +1699,7 @@ IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. -{ref}/search-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search[Endpoint documentation] [source,ts] ---- client.search({ ... }) @@ -1740,8 +1779,140 @@ client.search({ ... }) Search a vector tile. Search a vector tile for geospatial values. +Before using this API, you should be familiar with the Mapbox vector tile specification. +The API returns results as a binary mapbox vector tile. + +Internally, Elasticsearch translates a vector tile search API request into a search containing: + +* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. +* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. +* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. +* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. + +For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search + +---- +GET my-index/_search +{ + "size": 10000, + "query": { + "geo_bounding_box": { + "my-geo-field": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "aggregations": { + "grid": { + "geotile_grid": { + "field": "my-geo-field", + "precision": 11, + "size": 65536, + "bounds": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "bounds": { + "geo_bounds": { + "field": "my-geo-field", + "wrap_longitude": false + } + } + } +} +---- -{ref}/search-vector-tile-api.html[Endpoint documentation] +The API returns results as a binary Mapbox vector tile. +Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. +* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. +* A meta layer containing: + * A feature containing a bounding box. By default, this is the bounding box of the tile. + * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. + * Metadata for the search. + +The API only returns features that can display at its zoom level. +For example, if a polygon feature has no area at its zoom level, the API omits it. +The API returns errors as UTF-8 encoded JSON. + +IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. +If you specify both parameters, the query parameter takes precedence. + +**Grid precision for geotile** + +For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. +`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. +For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. +The maximum final precision is 29. +The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. +For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +The `aggs` layer only contains features for cells with matching data. + +**Grid precision for geohex** + +For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + +This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. +The following table maps the H3 resolution for each precision. +For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +At a precision of 6, hexagonal cells have an H3 resolution of 2. +If `` is 3 and `grid_precision` is 4, the precision is 7. +At a precision of 7, hexagonal cells have an H3 resolution of 3. + +| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +| --------- | ---------------- | ------------- | ----------------| ----- | +| 1 | 4 | 0 | 122 | 30.5 | +| 2 | 16 | 0 | 122 | 7.625 | +| 3 | 64 | 1 | 842 | 13.15625 | +| 4 | 256 | 1 | 842 | 3.2890625 | +| 5 | 1024 | 2 | 5882 | 5.744140625 | +| 6 | 4096 | 2 | 5882 | 1.436035156 | +| 7 | 16384 | 3 | 41162 | 2.512329102 | +| 8 | 65536 | 3 | 41162 | 0.6280822754 | +| 9 | 262144 | 4 | 288122 | 1.099098206 | +| 10 | 1048576 | 4 | 288122 | 0.2747745514 | +| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + +Hexagonal cells don't align perfectly on a vector tile. +Some cells may intersect more than one vector tile. +To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. +Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt[Endpoint documentation] [source,ts] ---- client.searchMvt({ index, field, zoom, x, y }) @@ -1755,20 +1926,20 @@ client.searchMvt({ index, field, zoom, x, y }) ** *`zoom` (number)*: Zoom level for the vector tile to search ** *`x` (number)*: X coordinate for the vector tile to search ** *`y` (number)*: Y coordinate for the vector tile to search -** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum -** *`buffer` (Optional, number)*: Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. -** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. -** *`extent` (Optional, number)*: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. -** *`fields` (Optional, string | string[])*: Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. -** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: Aggregation used to create a grid for the `field`. -** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. -** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +** *`buffer` (Optional, number)*: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +** *`exact_bounds` (Optional, boolean)*: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. +** *`extent` (Optional, number)*: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +** *`fields` (Optional, string | string[])*: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: The aggregation used to create a grid for the `field`. +** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. +** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. -** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. -** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. +** *`size` (Optional, number)*: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +** *`track_total_hits` (Optional, boolean | number)*: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. [discrete] === search_shards @@ -1776,9 +1947,11 @@ Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. -When filtered aliases are used, the filter is returned as part of the indices section. +When filtered aliases are used, the filter is returned as part of the `indices` section. + +If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. -{ref}/search-shards.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards[Endpoint documentation] [source,ts] ---- client.searchShards({ ... }) @@ -1787,20 +1960,20 @@ client.searchShards({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: Returns the indices and shards that a search request would be executed against. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. ** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. [discrete] === search_template Run a search with a search template. -{ref}/search-template.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template[Endpoint documentation] [source,ts] ---- client.searchTemplate({ ... }) @@ -1809,22 +1982,22 @@ client.searchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (*). -** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. -** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, this parameter is required. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. +** *`id` (Optional, string)*: The ID of the search template to use. If no `source` is specified, this parameter is required. ** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. ** *`profile` (Optional, boolean)*: If `true`, the query execution is profiled. -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. +** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. ** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are rendered as an integer in the response. +** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. ** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. [discrete] @@ -1832,14 +2005,12 @@ client.searchTemplate({ ... }) Get terms in an index. Discover terms that match a partial string in an index. -This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. - -If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. -This can occur due to a few reasons, such as a request timeout or a node error. +This API is designed for low-latency look-ups used in auto-complete scenarios. -NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. +> info +> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. -{ref}/search-terms-enum.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum[Endpoint documentation] [source,ts] ---- client.termsEnum({ index, field }) @@ -1848,14 +2019,14 @@ client.termsEnum({ index, field }) ==== Arguments * *Request (object):* -** *`index` (string)*: List of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. +** *`index` (string)*: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`field` (string)*: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. -** *`size` (Optional, number)*: How many matching terms to return. -** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. -** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. -** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. -** *`search_after` (Optional, string)* +** *`size` (Optional, number)*: The number of matching terms to return. +** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +** *`case_insensitive` (Optional, boolean)*: When `true`, the provided search string is matched against index terms without case sensitivity. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter an index shard if the provided query rewrites to `match_none`. +** *`string` (Optional, string)*: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. +** *`search_after` (Optional, string)*: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. [discrete] === termvectors @@ -1863,7 +2034,45 @@ Get term vector information. Get information and statistics about terms in the fields of a particular document. -{ref}/docs-termvectors.html[Endpoint documentation] +You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. +You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. +For example: + +---- +GET /my-index-000001/_termvectors/1?fields=message +---- + +Fields can be specified using wildcards, similar to the multi match query. + +Term vectors are real-time by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. + +You can request three types of values: _term information_, _term statistics_, and _field statistics_. +By default, all term information and field statistics are returned for all fields but term statistics are excluded. + +**Term information** + +* term frequency in the field (always returned) +* term positions (`positions: true`) +* start and end offsets (`offsets: true`) +* term payloads (`payloads: true`), as base64 encoded bytes + +If the requested information wasn't stored in the index, it will be computed on the fly if possible. +Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. + +> warn +> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. + +**Behaviour** + +The term and field statistics are not accurate. +Deleted documents are not taken into account. +The information is only retrieved for the shard the requested document resides in. +The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. +By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. +Use `routing` only to hit a particular shard. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors[Endpoint documentation] [source,ts] ---- client.termvectors({ index }) @@ -1872,22 +2081,22 @@ client.termvectors({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: Name of the index that contains the document. -** *`id` (Optional, string)*: Unique identifier of the document. +** *`index` (string)*: The name of the index that contains the document. +** *`id` (Optional, string)*: A unique identifier for the document. ** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors. -** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. -** *`per_field_analyzer` (Optional, Record)*: Overrides the default per-field analyzer. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. -** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. +** *`per_field_analyzer` (Optional, Record)*: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. +** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`field_statistics` (Optional, boolean)*: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). ** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. ** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. ** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. ** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`term_statistics` (Optional, boolean)*: If `true`, the response includes term frequency and document frequency. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`term_statistics` (Optional, boolean)*: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. ** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === update @@ -1911,7 +2120,7 @@ The document must still be reindexed, but using this API removes some network ro The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). -{ref}/docs-update.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update[Endpoint documentation] [source,ts] ---- client.update({ id, index }) @@ -2028,7 +2237,7 @@ Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. -{ref}/docs-update-by-query.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query[Endpoint documentation] [source,ts] ---- client.updateByQuery({ index }) @@ -2080,7 +2289,7 @@ Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. -{ref}/docs-update-by-query.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle[Endpoint documentation] [source,ts] ---- client.updateByQueryRethrottle({ task_id }) @@ -2102,7 +2311,7 @@ If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. -{ref}/async-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] [source,ts] ---- client.asyncSearch.delete({ id }) @@ -2121,7 +2330,7 @@ Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. -{ref}/async-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] [source,ts] ---- client.asyncSearch.get({ id }) @@ -2149,7 +2358,7 @@ Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. -{ref}/async-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] [source,ts] ---- client.asyncSearch.status({ id }) @@ -2174,7 +2383,7 @@ Warning: Asynchronous search does not support scroll or search requests that inc By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. -{ref}/async-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] [source,ts] ---- client.asyncSearch.submit({ ... }) @@ -2281,7 +2490,7 @@ Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -{ref}/autoscaling-delete-autoscaling-policy.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy[Endpoint documentation] [source,ts] ---- client.autoscaling.deleteAutoscalingPolicy({ name }) @@ -2313,7 +2522,7 @@ The response contains decider-specific information you can use to diagnose how a This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. -{ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity[Endpoint documentation] [source,ts] ---- client.autoscaling.getAutoscalingCapacity({ ... }) @@ -2332,7 +2541,7 @@ Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -{ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity[Endpoint documentation] [source,ts] ---- client.autoscaling.getAutoscalingPolicy({ name }) @@ -2352,7 +2561,7 @@ Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -{ref}/autoscaling-put-autoscaling-policy.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy[Endpoint documentation] [source,ts] ---- client.autoscaling.putAutoscalingPolicy({ name }) @@ -2379,7 +2588,7 @@ This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. -{ref}/cat-alias.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases[Endpoint documentation] [source,ts] ---- client.cat.aliases({ ... }) @@ -2405,7 +2614,7 @@ Get a snapshot of the number of shards allocated to each data node and their dis IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. -{ref}/cat-allocation.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation[Endpoint documentation] [source,ts] ---- client.cat.allocation({ ... }) @@ -2433,7 +2642,7 @@ Component templates are building blocks for constructing index templates that sp IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. -{ref}/cat-component-templates.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates[Endpoint documentation] [source,ts] ---- client.cat.componentTemplates({ ... }) @@ -2462,7 +2671,7 @@ The document count only includes live documents, not deleted documents which hav IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. -{ref}/cat-count.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count[Endpoint documentation] [source,ts] ---- client.cat.count({ ... }) @@ -2485,7 +2694,7 @@ Get the amount of heap memory currently used by the field data cache on every da IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. -{ref}/cat-fielddata.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata[Endpoint documentation] [source,ts] ---- client.cat.fielddata({ ... }) @@ -2513,7 +2722,7 @@ The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. -{ref}/cat-health.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health[Endpoint documentation] [source,ts] ---- client.cat.health({ ... }) @@ -2532,7 +2741,7 @@ Get CAT help. Get help for the CAT APIs. -{ref}/cat.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat[Endpoint documentation] [source,ts] ---- client.cat.help() @@ -2558,7 +2767,7 @@ To get an accurate count of Elasticsearch documents, use the cat count or count CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. -{ref}/cat-indices.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices[Endpoint documentation] [source,ts] ---- client.cat.indices({ ... }) @@ -2586,7 +2795,7 @@ Get information about the master node, including the ID, bound IP address, and n IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -{ref}/cat-master.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master[Endpoint documentation] [source,ts] ---- client.cat.master({ ... }) @@ -2612,7 +2821,7 @@ IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. -{ref}/cat-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.cat.mlDataFrameAnalytics({ ... }) @@ -2643,7 +2852,7 @@ IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. -{ref}/cat-datafeeds.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds[Endpoint documentation] [source,ts] ---- client.cat.mlDatafeeds({ ... }) @@ -2680,7 +2889,7 @@ IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. -{ref}/cat-anomaly-detectors.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs[Endpoint documentation] [source,ts] ---- client.cat.mlJobs({ ... }) @@ -2715,7 +2924,7 @@ IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. -{ref}/cat-trained-model.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models[Endpoint documentation] [source,ts] ---- client.cat.mlTrainedModels({ ... }) @@ -2743,7 +2952,7 @@ Get node attribute information. Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -{ref}/cat-nodeattrs.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs[Endpoint documentation] [source,ts] ---- client.cat.nodeattrs({ ... }) @@ -2766,7 +2975,7 @@ Get node information. Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -{ref}/cat-nodes.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes[Endpoint documentation] [source,ts] ---- client.cat.nodes({ ... }) @@ -2789,7 +2998,7 @@ Get pending task information. Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. -{ref}/cat-pending-tasks.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks[Endpoint documentation] [source,ts] ---- client.cat.pendingTasks({ ... }) @@ -2813,7 +3022,7 @@ Get plugin information. Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -{ref}/cat-plugins.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins[Endpoint documentation] [source,ts] ---- client.cat.plugins({ ... }) @@ -2839,7 +3048,7 @@ Shard recovery is the process of initializing a shard copy, such as restoring a For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. -{ref}/cat-recovery.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery[Endpoint documentation] [source,ts] ---- client.cat.recovery({ ... }) @@ -2863,7 +3072,7 @@ Get snapshot repository information. Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. -{ref}/cat-repositories.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories[Endpoint documentation] [source,ts] ---- client.cat.repositories({ ... }) @@ -2887,7 +3096,7 @@ Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. -{ref}/cat-segments.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments[Endpoint documentation] [source,ts] ---- client.cat.segments({ ... }) @@ -2915,7 +3124,7 @@ Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. -{ref}/cat-shards.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards[Endpoint documentation] [source,ts] ---- client.cat.shards({ ... }) @@ -2940,7 +3149,7 @@ Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. -{ref}/cat-snapshots.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots[Endpoint documentation] [source,ts] ---- client.cat.snapshots({ ... }) @@ -2965,7 +3174,7 @@ Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. -{ref}/tasks.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks[Endpoint documentation] [source,ts] ---- client.cat.tasks({ ... }) @@ -2992,7 +3201,7 @@ Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. -{ref}/cat-templates.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates[Endpoint documentation] [source,ts] ---- client.cat.templates({ ... }) @@ -3018,7 +3227,7 @@ Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -{ref}/cat-thread-pool.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool[Endpoint documentation] [source,ts] ---- client.cat.threadPool({ ... }) @@ -3047,7 +3256,7 @@ CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. -{ref}/cat-transforms.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms[Endpoint documentation] [source,ts] ---- client.cat.transforms({ ... }) @@ -3075,7 +3284,7 @@ If `false`, the request returns a 404 status code when there are no matches or o Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. -{ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern[Endpoint documentation] [source,ts] ---- client.ccr.deleteAutoFollowPattern({ name }) @@ -3094,7 +3303,7 @@ Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. -{ref}/ccr-put-follow.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow[Endpoint documentation] [source,ts] ---- client.ccr.follow({ index, leader_index, remote_cluster }) @@ -3136,7 +3345,7 @@ Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. -{ref}/ccr-get-follow-info.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info[Endpoint documentation] [source,ts] ---- client.ccr.followInfo({ index }) @@ -3152,10 +3361,11 @@ client.ccr.followInfo({ index }) [discrete] ==== follow_stats Get follower stats. + Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. -{ref}/ccr-get-follow-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats[Endpoint documentation] [source,ts] ---- client.ccr.followStats({ index }) @@ -3165,8 +3375,9 @@ client.ccr.followStats({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`index` (string | string[])*: A comma-delimited list of index patterns. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== forget_follower @@ -3183,7 +3394,7 @@ This API exists to enable manually removing the leases when the unfollow API is NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. -{ref}/ccr-post-forget-follower.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower[Endpoint documentation] [source,ts] ---- client.ccr.forgetFollower({ index }) @@ -3203,9 +3414,10 @@ client.ccr.forgetFollower({ index }) [discrete] ==== get_auto_follow_pattern Get auto-follow patterns. + Get cross-cluster replication auto-follow patterns. -{ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1[Endpoint documentation] [source,ts] ---- client.ccr.getAutoFollowPattern({ ... }) @@ -3215,12 +3427,16 @@ client.ccr.getAutoFollowPattern({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string)*: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`name` (Optional, string)*: The auto-follow pattern collection that you want to retrieve. +If you do not specify a name, the API returns information for all collections. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== pause_auto_follow_pattern Pause an auto-follow pattern. + Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. @@ -3229,7 +3445,7 @@ You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. -{ref}/ccr-pause-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern[Endpoint documentation] [source,ts] ---- client.ccr.pauseAutoFollowPattern({ name }) @@ -3239,18 +3455,21 @@ client.ccr.pauseAutoFollowPattern({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the auto follow pattern that should pause discovering new indices to follow. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`name` (string)*: The name of the auto-follow pattern to pause. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== pause_follow Pause a follower. + Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. -{ref}/ccr-post-pause-follow.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow[Endpoint documentation] [source,ts] ---- client.ccr.pauseFollow({ index }) @@ -3260,8 +3479,10 @@ client.ccr.pauseFollow({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the follower index that should pause following its leader index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`index` (string)*: The name of the follower index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== put_auto_follow_pattern @@ -3273,7 +3494,7 @@ Indices on the remote cluster that were created before the auto-follow pattern w This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. -{ref}/ccr-put-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern[Endpoint documentation] [source,ts] ---- client.ccr.putAutoFollowPattern({ name, remote_cluster }) @@ -3304,11 +3525,12 @@ client.ccr.putAutoFollowPattern({ name, remote_cluster }) [discrete] ==== resume_auto_follow_pattern Resume an auto-follow pattern. + Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. -{ref}/ccr-resume-auto-follow-pattern.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern[Endpoint documentation] [source,ts] ---- client.ccr.resumeAutoFollowPattern({ name }) @@ -3318,8 +3540,10 @@ client.ccr.resumeAutoFollowPattern({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the auto follow pattern to resume discovering new indices to follow. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`name` (string)*: The name of the auto-follow pattern to resume. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== resume_follow @@ -3329,7 +3553,7 @@ The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. -{ref}/ccr-post-resume-follow.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow[Endpoint documentation] [source,ts] ---- client.ccr.resumeFollow({ index }) @@ -3355,9 +3579,10 @@ client.ccr.resumeFollow({ index }) [discrete] ==== stats Get cross-cluster replication stats. + This API returns stats about auto-following and the same shard-level stats as the get follower stats API. -{ref}/ccr-get-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats[Endpoint documentation] [source,ts] ---- client.ccr.stats({ ... }) @@ -3367,19 +3592,23 @@ client.ccr.stats({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== unfollow Unfollow an index. + Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. -NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. +> info +> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. -{ref}/ccr-post-unfollow.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow[Endpoint documentation] [source,ts] ---- client.ccr.unfollow({ index }) @@ -3389,8 +3618,10 @@ client.ccr.unfollow({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the follower index that should be turned into a regular index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`index` (string)*: The name of the follower index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] === cluster @@ -3402,7 +3633,7 @@ For unassigned shards, it provides an explanation for why the shard is unassigne For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. -{ref}/cluster-allocation-explain.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain[Endpoint documentation] [source,ts] ---- client.cluster.allocationExplain({ ... }) @@ -3425,7 +3656,7 @@ client.cluster.allocationExplain({ ... }) Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. -{ref}/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] [source,ts] ---- client.cluster.deleteComponentTemplate({ name }) @@ -3446,7 +3677,7 @@ If no response is received before the timeout expires, the request fails and ret Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. -{ref}/voting-config-exclusions.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation] [source,ts] ---- client.cluster.deleteVotingConfigExclusions({ ... }) @@ -3469,7 +3700,7 @@ nodes are still in the cluster. Check component templates. Returns information about whether a particular component template exists. -{ref}/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] [source,ts] ---- client.cluster.existsComponentTemplate({ name }) @@ -3492,7 +3723,7 @@ Defaults to false, which means information is retrieved from the master node. Get component templates. Get information about component templates. -{ref}/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] [source,ts] ---- client.cluster.getComponentTemplate({ ... }) @@ -3516,7 +3747,7 @@ If no response is received before the timeout expires, the request fails and ret Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. -{ref}/cluster-get-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings[Endpoint documentation] [source,ts] ---- client.cluster.getSettings({ ... }) @@ -3536,6 +3767,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== health Get the cluster health status. + You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. @@ -3546,7 +3778,7 @@ The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. -{ref}/cluster-health.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health[Endpoint documentation] [source,ts] ---- client.cluster.health({ ... }) @@ -3574,7 +3806,7 @@ client.cluster.health({ ... }) Get cluster info. Returns basic information about the cluster. -{ref}/cluster-info.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info[Endpoint documentation] [source,ts] ---- client.cluster.info({ target }) @@ -3595,7 +3827,7 @@ NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. -{ref}/cluster-pending.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks[Endpoint documentation] [source,ts] ---- client.cluster.pendingTasks({ ... }) @@ -3631,7 +3863,7 @@ In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. -{ref}/voting-config-exclusions.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation] [source,ts] ---- client.cluster.postVotingConfigExclusions({ ... }) @@ -3674,7 +3906,7 @@ You can include comments anywhere in the request body except before the opening You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. -{ref}/indices-component-template.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] [source,ts] ---- client.cluster.putComponentTemplate({ name, template }) @@ -3706,6 +3938,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_settings Update the cluster settings. + Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. @@ -3724,7 +3957,7 @@ The API doesn’t require a restart and ensures a setting’s value is the same WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. -{ref}/cluster-update-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings[Endpoint documentation] [source,ts] ---- client.cluster.putSettings({ ... }) @@ -3743,10 +3976,17 @@ client.cluster.putSettings({ ... }) [discrete] ==== remote_info Get remote cluster information. -Get all of the configured remote cluster information. -This API returns connection and endpoint information keyed by the configured remote cluster alias. -{ref}/cluster-remote-info.html[Endpoint documentation] +Get information about configured remote clusters. +The API returns connection and endpoint information keyed by the configured remote cluster alias. + +> info +> This API returns information that reflects current state on the local cluster. +> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. +> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. +> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info[Endpoint documentation] [source,ts] ---- client.cluster.remoteInfo() @@ -3770,7 +4010,7 @@ This scenario can be caused by structural problems such as having an analyzer wh Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. -{ref}/cluster-reroute.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute[Endpoint documentation] [source,ts] ---- client.cluster.reroute({ ... }) @@ -3811,7 +4051,7 @@ Its format is not subject to the same compatibility guarantees as other more sta Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. -{ref}/cluster-state.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state[Endpoint documentation] [source,ts] ---- client.cluster.state({ ... }) @@ -3837,7 +4077,7 @@ client.cluster.state({ ... }) Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). -{ref}/cluster-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats[Endpoint documentation] [source,ts] ---- client.cluster.stats({ ... }) @@ -3861,7 +4101,7 @@ Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. -{ref}/check-in-connector-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in[Endpoint documentation] [source,ts] ---- client.connector.checkIn({ connector_id }) @@ -3882,7 +4122,7 @@ This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. -{ref}/delete-connector-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete[Endpoint documentation] [source,ts] ---- client.connector.delete({ connector_id }) @@ -3902,7 +4142,7 @@ Get a connector. Get the details about a connector. -{ref}/get-connector-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get[Endpoint documentation] [source,ts] ---- client.connector.get({ connector_id }) @@ -3921,7 +4161,7 @@ Get all connectors. Get information about all connectors. -{ref}/list-connector-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list[Endpoint documentation] [source,ts] ---- client.connector.list({ ... }) @@ -3947,7 +4187,7 @@ Connectors are Elasticsearch integrations that bring content from third-party da Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. -{ref}/create-connector-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put[Endpoint documentation] [source,ts] ---- client.connector.post({ ... }) @@ -3968,7 +4208,7 @@ client.connector.post({ ... }) ==== put Create or update a connector. -{ref}/create-connector-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put[Endpoint documentation] [source,ts] ---- client.connector.put({ ... }) @@ -3993,7 +4233,7 @@ Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. -{ref}/cancel-connector-sync-job-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel[Endpoint documentation] [source,ts] ---- client.connector.syncJobCancel({ connector_sync_job_id }) @@ -4013,7 +4253,7 @@ Check in a connector sync job and set the `last_seen` field to the current time To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. -{ref}/check-in-connector-sync-job-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in[Endpoint documentation] [source,ts] ---- client.connector.syncJobCheckIn({ connector_sync_job_id }) @@ -4036,6 +4276,8 @@ It supports the implementation of services that utilize the connector protocol t To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim[Endpoint documentation] [source,ts] ---- client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) @@ -4057,7 +4299,7 @@ Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. -{ref}/delete-connector-sync-job-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete[Endpoint documentation] [source,ts] ---- client.connector.syncJobDelete({ connector_sync_job_id }) @@ -4077,7 +4319,7 @@ Set the `error` field for a connector sync job and set its `status` to `error`. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. -{ref}/set-connector-sync-job-error-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error[Endpoint documentation] [source,ts] ---- client.connector.syncJobError({ connector_sync_job_id, error }) @@ -4094,7 +4336,7 @@ client.connector.syncJobError({ connector_sync_job_id, error }) ==== sync_job_get Get a connector sync job. -{ref}/get-connector-sync-job-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get[Endpoint documentation] [source,ts] ---- client.connector.syncJobGet({ connector_sync_job_id }) @@ -4112,7 +4354,7 @@ Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. -{ref}/list-connector-sync-jobs-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list[Endpoint documentation] [source,ts] ---- client.connector.syncJobList({ ... }) @@ -4134,7 +4376,7 @@ Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. -{ref}/create-connector-sync-job-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post[Endpoint documentation] [source,ts] ---- client.connector.syncJobPost({ id }) @@ -4158,7 +4400,7 @@ This API is mainly used by the connector service for updating sync job informati To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. -{ref}/set-connector-sync-job-stats-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats[Endpoint documentation] [source,ts] ---- client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) @@ -4182,7 +4424,7 @@ Activate the connector draft filter. Activates the valid draft filtering for a connector. -{ref}/update-connector-filtering-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering[Endpoint documentation] [source,ts] ---- client.connector.updateActiveFiltering({ connector_id }) @@ -4203,7 +4445,7 @@ You can specify the ID of the API key used for authorization and the ID of the c The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. -{ref}/update-connector-api-key-id-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id[Endpoint documentation] [source,ts] ---- client.connector.updateApiKeyId({ connector_id }) @@ -4223,7 +4465,7 @@ Update the connector configuration. Update the configuration field in the connector document. -{ref}/update-connector-configuration-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration[Endpoint documentation] [source,ts] ---- client.connector.updateConfiguration({ connector_id }) @@ -4245,7 +4487,7 @@ Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. -{ref}/update-connector-error-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error[Endpoint documentation] [source,ts] ---- client.connector.updateError({ connector_id, error }) @@ -4275,7 +4517,7 @@ However, you can use this API to override the default behavior. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. -{ref}/update-connector-features-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features[Endpoint documentation] [source,ts] ---- client.connector.updateFeatures({ connector_id, features }) @@ -4296,7 +4538,7 @@ Update the draft filtering configuration of a connector and marks the draft vali The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. -{ref}/update-connector-filtering-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering[Endpoint documentation] [source,ts] ---- client.connector.updateFiltering({ connector_id }) @@ -4334,7 +4576,7 @@ Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. -{ref}/update-connector-index-name-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name[Endpoint documentation] [source,ts] ---- client.connector.updateIndexName({ connector_id, index_name }) @@ -4351,7 +4593,7 @@ client.connector.updateIndexName({ connector_id, index_name }) ==== update_name Update the connector name and description. -{ref}/update-connector-name-description-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name[Endpoint documentation] [source,ts] ---- client.connector.updateName({ connector_id }) @@ -4386,7 +4628,7 @@ Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. -{ref}/update-connector-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline[Endpoint documentation] [source,ts] ---- client.connector.updatePipeline({ connector_id, pipeline }) @@ -4403,7 +4645,7 @@ client.connector.updatePipeline({ connector_id, pipeline }) ==== update_scheduling Update the connector scheduling. -{ref}/update-connector-scheduling-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling[Endpoint documentation] [source,ts] ---- client.connector.updateScheduling({ connector_id, scheduling }) @@ -4420,7 +4662,7 @@ client.connector.updateScheduling({ connector_id, scheduling }) ==== update_service_type Update the connector service type. -{ref}/update-connector-service-type-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type[Endpoint documentation] [source,ts] ---- client.connector.updateServiceType({ connector_id, service_type }) @@ -4437,7 +4679,7 @@ client.connector.updateServiceType({ connector_id, service_type }) ==== update_status Update the connector status. -{ref}/update-connector-status-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status[Endpoint documentation] [source,ts] ---- client.connector.updateStatus({ connector_id, status }) @@ -4458,7 +4700,7 @@ Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. -{ref}/dangling-index-delete.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index[Endpoint documentation] [source,ts] ---- client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) @@ -4480,7 +4722,7 @@ Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. -{ref}/dangling-index-import.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index[Endpoint documentation] [source,ts] ---- client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) @@ -4505,7 +4747,7 @@ For example, this can happen if you delete more than `cluster.indices.tombstones Use this API to list dangling indices, which you can then import or delete. -{ref}/dangling-indices-list.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices[Endpoint documentation] [source,ts] ---- client.danglingIndices.listDanglingIndices() @@ -4519,7 +4761,7 @@ client.danglingIndices.listDanglingIndices() Delete an enrich policy. Deletes an existing enrich policy and its enrich index. -{ref}/delete-enrich-policy-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy[Endpoint documentation] [source,ts] ---- client.enrich.deletePolicy({ name }) @@ -4537,7 +4779,7 @@ client.enrich.deletePolicy({ name }) Run an enrich policy. Create the enrich index for an existing enrich policy. -{ref}/execute-enrich-policy-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy[Endpoint documentation] [source,ts] ---- client.enrich.executePolicy({ name }) @@ -4556,7 +4798,7 @@ client.enrich.executePolicy({ name }) Get an enrich policy. Returns information about an enrich policy. -{ref}/get-enrich-policy-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy[Endpoint documentation] [source,ts] ---- client.enrich.getPolicy({ ... }) @@ -4575,7 +4817,7 @@ To return information for all enrich policies, omit this parameter. Create an enrich policy. Creates an enrich policy. -{ref}/put-enrich-policy-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy[Endpoint documentation] [source,ts] ---- client.enrich.putPolicy({ name }) @@ -4596,7 +4838,7 @@ client.enrich.putPolicy({ name }) Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. -{ref}/enrich-stats-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats[Endpoint documentation] [source,ts] ---- client.enrich.stats({ ... }) @@ -4616,7 +4858,7 @@ Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. -{ref}/eql-search-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete[Endpoint documentation] [source,ts] ---- client.eql.delete({ id }) @@ -4635,7 +4877,7 @@ A search ID is also provided if the request’s `keep_on_completion` parameter i Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. -{ref}/get-async-eql-search-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get[Endpoint documentation] [source,ts] ---- client.eql.get({ id }) @@ -4656,7 +4898,7 @@ Defaults to no timeout, meaning the request waits for complete search results. Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. -{ref}/get-async-eql-status-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status[Endpoint documentation] [source,ts] ---- client.eql.getStatus({ id }) @@ -4674,7 +4916,7 @@ Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. -{ref}/eql-search-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search[Endpoint documentation] [source,ts] ---- client.eql.search({ index, query }) @@ -4695,8 +4937,12 @@ client.eql.search({ index, query }) ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* -** *`allow_partial_search_results` (Optional, boolean)* -** *`allow_partial_sequence_results` (Optional, boolean)* +** *`allow_partial_search_results` (Optional, boolean)*: Allow query execution also in case of shard failures. +If true, the query will keep running and will return results based on the available shards. +For sequences, the behavior can be further refined using allow_partial_sequence_results +** *`allow_partial_sequence_results` (Optional, boolean)*: This flag applies only to sequences and has effect only if allow_partial_search_results=true. +If true, the sequence query will return results based on the available shards, ignoring the others. +If false, the sequence query will return successfully, but will always have empty results. ** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 ** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. ** *`result_position` (Optional, Enum("tail" | "head"))* @@ -4717,7 +4963,7 @@ Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its pr The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. -{ref}/esql-async-query-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query[Endpoint documentation] [source,ts] ---- client.esql.asyncQuery({ query }) @@ -4765,7 +5011,7 @@ If the Elasticsearch security features are enabled, only the following users can * The authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege -{ref}/esql-async-query-delete-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete[Endpoint documentation] [source,ts] ---- client.esql.asyncQueryDelete({ id }) @@ -4785,7 +5031,7 @@ Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. -{ref}/esql-async-query-get-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get[Endpoint documentation] [source,ts] ---- client.esql.asyncQueryGet({ id }) @@ -4807,6 +5053,29 @@ By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. +[discrete] +==== async_query_stop +Stop async ES|QL query. + +This API interrupts the query execution and returns the results so far. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. + +{ref}/esql-async-query-stop-api.html[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQueryStop({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + [discrete] ==== query Run an ES|QL query. @@ -4853,7 +5122,7 @@ In order to ensure data integrity, all system indices that comprise a feature st The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. -{ref}/get-features-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features[Endpoint documentation] [source,ts] ---- client.features.getFeatures({ ... }) @@ -4885,7 +5154,7 @@ To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. -{ref}/modules-snapshots.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features[Endpoint documentation] [source,ts] ---- client.features.resetFeatures({ ... }) @@ -4902,10 +5171,11 @@ client.features.resetFeatures({ ... }) [discrete] ==== global_checkpoints Get global checkpoints. + Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. -{ref}/get-global-checkpoints.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet[Endpoint documentation] [source,ts] ---- client.fleet.globalCheckpoints({ index }) @@ -4931,6 +5201,8 @@ Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch[Endpoint documentation] [source,ts] ---- client.fleet.msearch({ ... }) @@ -4965,6 +5237,8 @@ which is true by default. Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search[Endpoint documentation] [source,ts] ---- client.fleet.search({ index }) @@ -5074,7 +5348,7 @@ An initial request to the `_explore` API contains a seed query that identifies t Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. -{ref}/graph-explore-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph[Endpoint documentation] [source,ts] ---- client.graph.explore({ index }) @@ -5101,7 +5375,7 @@ Defaults to no timeout. Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. -{ref}/ilm-delete-lifecycle.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle[Endpoint documentation] [source,ts] ---- client.ilm.deleteLifecycle({ policy }) @@ -5123,7 +5397,7 @@ For data streams, the API retrieves the current lifecycle status for the stream' The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. -{ref}/ilm-explain-lifecycle.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle[Endpoint documentation] [source,ts] ---- client.ilm.explainLifecycle({ index }) @@ -5143,7 +5417,7 @@ To target all data streams and indices, use `*` or `_all`. ==== get_lifecycle Get lifecycle policies. -{ref}/ilm-get-lifecycle.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle[Endpoint documentation] [source,ts] ---- client.ilm.getLifecycle({ ... }) @@ -5160,9 +5434,10 @@ client.ilm.getLifecycle({ ... }) [discrete] ==== get_status Get the ILM status. + Get the current index lifecycle management status. -{ref}/ilm-get-status.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status[Endpoint documentation] [source,ts] ---- client.ilm.getStatus() @@ -5186,7 +5461,7 @@ This API provides an automated way of performing three out of the four manual st ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. -{ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers[Endpoint documentation] [source,ts] ---- client.ilm.migrateToDataTiers({ ... }) @@ -5218,7 +5493,7 @@ If the phase and action are specified, the index will move to the first step of Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. -{ref}/ilm-move-to-step.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step[Endpoint documentation] [source,ts] ---- client.ilm.moveToStep({ index, current_step, next_step }) @@ -5239,7 +5514,7 @@ If the specified policy exists, it is replaced and the policy version is increme NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. -{ref}/ilm-put-lifecycle.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle[Endpoint documentation] [source,ts] ---- client.ilm.putLifecycle({ policy }) @@ -5259,7 +5534,7 @@ Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. -{ref}/ilm-remove-policy.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy[Endpoint documentation] [source,ts] ---- client.ilm.removePolicy({ index }) @@ -5278,7 +5553,7 @@ Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. -{ref}/ilm-retry-policy.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry[Endpoint documentation] [source,ts] ---- client.ilm.retry({ index }) @@ -5297,7 +5572,7 @@ Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. -{ref}/ilm-start.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start[Endpoint documentation] [source,ts] ---- client.ilm.start({ ... }) @@ -5319,7 +5594,7 @@ This is useful when you are performing maintenance on the cluster and need to pr The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. -{ref}/ilm-stop.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop[Endpoint documentation] [source,ts] ---- client.ilm.stop({ ... }) @@ -5337,9 +5612,11 @@ client.ilm.stop({ ... }) [discrete] ==== add_block Add an index block. -Limits the operations allowed on an index by blocking specific operation types. -{ref}/index-modules-blocks.html[Endpoint documentation] +Add an index block to an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block[Endpoint documentation] [source,ts] ---- client.indices.addBlock({ index, block }) @@ -5349,13 +5626,24 @@ client.indices.addBlock({ index, block }) ==== Arguments * *Request (object):* -** *`index` (string)*: A comma separated list of indices to add a block to -** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block to add (one of read, write, read_only or metadata) -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`index` (string)*: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are adding blocks to. +To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block type to add to the index. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== analyze @@ -5367,7 +5655,7 @@ The `index.analyze.max_token_count` setting enables you to limit the number of t If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. -{ref}/indices-analyze.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze[Endpoint documentation] [source,ts] ---- client.indices.analyze({ ... }) @@ -5420,7 +5708,7 @@ By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. -{ref}/indices-clearcache.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache[Endpoint documentation] [source,ts] ---- client.indices.clearCache({ ... }) @@ -5494,7 +5782,7 @@ At that point, Elasticsearch will try to allocate any replicas and may decide to Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. -{ref}/indices-clone-index.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone[Endpoint documentation] [source,ts] ---- client.indices.clone({ index, target }) @@ -5536,7 +5824,7 @@ To open or close indices with `_all`, `*`, or other wildcard expressions, change Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. -{ref}/indices-close.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close[Endpoint documentation] [source,ts] ---- client.indices.close({ index }) @@ -5584,7 +5872,7 @@ If `shards_acknowledged` is false, then the request timed out before the requisi You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. -{ref}/indices-create-index.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create[Endpoint documentation] [source,ts] ---- client.indices.create({ index }) @@ -5611,10 +5899,10 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== create_data_stream Create a data stream. -Creates a data stream. + You must have a matching index template with data stream enabled. -{ref}/data-streams.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream[Endpoint documentation] [source,ts] ---- client.indices.createDataStream({ name }) @@ -5654,9 +5942,10 @@ client.indices.createFrom({ source, dest }) [discrete] ==== data_streams_stats Get data stream stats. -Retrieves statistics for one or more data streams. -{ref}/data-streams.html[Endpoint documentation] +Get statistics for one or more data streams. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1[Endpoint documentation] [source,ts] ---- client.indices.dataStreamsStats({ ... }) @@ -5682,7 +5971,7 @@ You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. -{ref}/indices-delete-index.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete[Endpoint documentation] [source,ts] ---- client.indices.delete({ index }) @@ -5713,7 +6002,7 @@ If no response is received before the timeout expires, the request fails and ret Delete an alias. Removes a data stream or index from an alias. -{ref}/indices-delete-alias.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias[Endpoint documentation] [source,ts] ---- client.indices.deleteAlias({ index, name }) @@ -5737,7 +6026,7 @@ If no response is received before the timeout expires, the request fails and ret Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. -{ref}/data-streams-delete-lifecycle.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle[Endpoint documentation] [source,ts] ---- client.indices.deleteDataLifecycle({ name }) @@ -5757,7 +6046,7 @@ client.indices.deleteDataLifecycle({ name }) Delete data streams. Deletes one or more data streams and their backing indices. -{ref}/data-streams.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream[Endpoint documentation] [source,ts] ---- client.indices.deleteDataStream({ name }) @@ -5778,7 +6067,7 @@ The provided may contain multiple template names separated by a names are specified then there is no wildcard support and the provided names should match completely with existing templates. -{ref}/indices-delete-template.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template[Endpoint documentation] [source,ts] ---- client.indices.deleteIndexTemplate({ name }) @@ -5796,7 +6085,7 @@ client.indices.deleteIndexTemplate({ name }) ==== delete_template Delete a legacy index template. -{ref}/indices-delete-template-v1.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template[Endpoint documentation] [source,ts] ---- client.indices.deleteTemplate({ name }) @@ -5824,7 +6113,7 @@ NOTE: The total size of fields of the analyzed shards of the index in the respon Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. -{ref}/indices-disk-usage.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage[Endpoint documentation] [source,ts] ---- client.indices.diskUsage({ index }) @@ -5859,7 +6148,7 @@ NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). -{ref}/indices-downsample-data-stream.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample[Endpoint documentation] [source,ts] ---- client.indices.downsample({ index, target_index }) @@ -5878,7 +6167,7 @@ client.indices.downsample({ index, target_index }) Check indices. Check if one or more indices, index aliases, or data streams exist. -{ref}/indices-exists.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists[Endpoint documentation] [source,ts] ---- client.indices.exists({ index }) @@ -5903,9 +6192,10 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== exists_alias Check aliases. -Checks if one or more data stream or index aliases exist. -{ref}/indices-aliases.html[Endpoint documentation] +Check if one or more data stream or index aliases exist. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias[Endpoint documentation] [source,ts] ---- client.indices.existsAlias({ name }) @@ -5931,9 +6221,10 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== exists_index_template Check index templates. + Check whether index templates exist. -{ref}/index-templates.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template[Endpoint documentation] [source,ts] ---- client.indices.existsIndexTemplate({ name }) @@ -5954,7 +6245,7 @@ Index templates define settings, mappings, and aliases that can be applied autom IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -{ref}/indices-template-exists-v1.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template[Endpoint documentation] [source,ts] ---- client.indices.existsTemplate({ name }) @@ -5977,7 +6268,7 @@ To indicate that the request should never timeout, set it to `-1`. Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. -{ref}/data-streams-explain-lifecycle.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle[Endpoint documentation] [source,ts] ---- client.indices.explainDataLifecycle({ index }) @@ -6001,7 +6292,7 @@ A shard-level search request that accesses a given field, even if multiple times The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. -{ref}/field-usage-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats[Endpoint documentation] [source,ts] ---- client.indices.fieldUsageStats({ index }) @@ -6037,7 +6328,7 @@ The transaction log is made up of multiple files, called generations, and Elasti It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. -{ref}/indices-flush.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush[Endpoint documentation] [source,ts] ---- client.indices.flush({ ... }) @@ -6117,7 +6408,7 @@ For example: POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ---- -{ref}/indices-forcemerge.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge[Endpoint documentation] [source,ts] ---- client.indices.forcemerge({ ... }) @@ -6142,7 +6433,7 @@ Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. -{ref}/indices-get-index.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get[Endpoint documentation] [source,ts] ---- client.indices.get({ index }) @@ -6172,7 +6463,7 @@ such as open,hidden. Get aliases. Retrieves information for one or more data stream or index aliases. -{ref}/indices-get-alias.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias[Endpoint documentation] [source,ts] ---- client.indices.getAlias({ ... }) @@ -6201,9 +6492,10 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_data_lifecycle Get data stream lifecycles. -Retrieves the data stream lifecycle configuration of one or more data streams. -{ref}/data-streams-get-lifecycle.html[Endpoint documentation] +Get the data stream lifecycle configuration of one or more data streams. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle[Endpoint documentation] [source,ts] ---- client.indices.getDataLifecycle({ name }) @@ -6227,7 +6519,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. -{ref}/data-streams-get-lifecycle-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats[Endpoint documentation] [source,ts] ---- client.indices.getDataLifecycleStats() @@ -6237,9 +6529,10 @@ client.indices.getDataLifecycleStats() [discrete] ==== get_data_stream Get data streams. -Retrieves information about one or more data streams. -{ref}/data-streams.html[Endpoint documentation] +Get information about one or more data streams. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream[Endpoint documentation] [source,ts] ---- client.indices.getDataStream({ ... }) @@ -6265,7 +6558,7 @@ For data streams, the API retrieves field mappings for the stream’s backing in This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. -{ref}/indices-get-field-mapping.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping[Endpoint documentation] [source,ts] ---- client.indices.getFieldMapping({ fields }) @@ -6295,7 +6588,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Get index templates. Get information about one or more index templates. -{ref}/indices-get-template.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template[Endpoint documentation] [source,ts] ---- client.indices.getIndexTemplate({ ... }) @@ -6316,7 +6609,7 @@ client.indices.getIndexTemplate({ ... }) Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. -{ref}/indices-get-mapping.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping[Endpoint documentation] [source,ts] ---- client.indices.getMapping({ ... }) @@ -6362,7 +6655,7 @@ Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. -{ref}/indices-get-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings[Endpoint documentation] [source,ts] ---- client.indices.getSettings({ ... }) @@ -6400,7 +6693,7 @@ Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -{ref}/indices-get-template-v1.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template[Endpoint documentation] [source,ts] ---- client.indices.getTemplate({ ... }) @@ -6507,7 +6800,7 @@ Closing indices can be turned off with the cluster settings API by setting `clus Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. -{ref}/indices-open-close.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open[Endpoint documentation] [source,ts] ---- client.indices.open({ index }) @@ -6737,7 +7030,7 @@ If you need to change the mapping of a field in other indices, create a new inde Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. -{ref}/indices-put-mapping.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping[Endpoint documentation] [source,ts] ---- client.indices.putMapping({ index }) @@ -6799,7 +7092,7 @@ This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. -{ref}/indices-update-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings[Endpoint documentation] [source,ts] ---- client.indices.putSettings({ ... }) @@ -6855,7 +7148,7 @@ Multiple index templates can potentially match an index, in this case, both the The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. -{ref}/indices-templates-v1.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template[Endpoint documentation] [source,ts] ---- client.indices.putTemplate({ name }) @@ -6910,7 +7203,7 @@ The index recovery API reports information about completed recoveries only for s It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. -{ref}/indices-recovery.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery[Endpoint documentation] [source,ts] ---- client.indices.recovery({ ... }) @@ -6943,7 +7236,7 @@ To ensure good cluster performance, it's recommended to wait for Elasticsearch's If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. -{ref}/indices-refresh.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh[Endpoint documentation] [source,ts] ---- client.indices.refresh({ ... }) @@ -6981,7 +7274,7 @@ As a result, the total shard count returned by the API can differ from the numbe Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. -{ref}/indices-reload-analyzers.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers[Endpoint documentation] [source,ts] ---- client.indices.reloadSearchAnalyzers({ index }) @@ -6999,8 +7292,9 @@ client.indices.reloadSearchAnalyzers({ index }) [discrete] ==== resolve_cluster Resolve the cluster. -Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. -Multiple patterns and remote clusters are supported. + +Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. +If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. @@ -7009,7 +7303,7 @@ Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: -* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. +* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). @@ -7018,7 +7312,14 @@ For each cluster in the index expression, information is returned about: For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. -**Advantages of using this endpoint before a cross-cluster search** +## Note on backwards compatibility +The ability to query without an index expression was added in version 8.18, so when +querying remote clusters older than that, the local cluster will send the index +expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference +to that index expression even though you didn't request it. If it causes a problem, you can +instead include an index expression like `*:*` to bypass the issue. + +## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: @@ -7027,27 +7328,56 @@ You may want to exclude a cluster or index from a search when: * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. -{ref}/indices-resolve-cluster-api.html[Endpoint documentation] +## Test availability of remote clusters + +The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. +The remote cluster may be available, while the local cluster is not currently connected to it. + +You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. +For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +The `connected` field in the response will indicate whether it was successful. +If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster[Endpoint documentation] [source,ts] ---- -client.indices.resolveCluster({ name }) +client.indices.resolveCluster({ ... }) ---- [discrete] ==== Arguments * *Request (object):* -** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +** *`name` (Optional, string | string[])*: A list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing +Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +If no index expression is specified, information about all remote clusters configured on the local cluster +is returned without doing any index matching +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request -targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. -** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. Defaults to false. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`timeout` (Optional, string | -1 | 0)*: The maximum time to wait for remote clusters to respond. +If a remote cluster does not respond within this timeout period, the API response +will show the cluster as not connected and include an error message that the +request timed out. + +The default timeout is unset and the query can take +as long as the networking layer is configured to wait for remote clusters that are +not responding (typically 30 seconds). [discrete] ==== resolve_index @@ -7055,7 +7385,7 @@ Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. -{ref}/indices-resolve-index-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index[Endpoint documentation] [source,ts] ---- client.indices.resolveIndex({ name }) @@ -7117,7 +7447,7 @@ For example, you can create an alias that points to an index named ` info +> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference[Endpoint documentation] [source,ts] ---- client.inference.inference({ inference_id, input }) @@ -7538,14 +7875,18 @@ client.inference.inference({ inference_id, input }) ==== Arguments * *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`input` (string | string[])*: Inference input. -Either a string or an array of strings. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`query` (Optional, string)*: Query input, required for rerank task. -Not required for other tasks. -** *`task_settings` (Optional, User-defined value)*: Optional task settings -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. +** *`inference_id` (string)*: The unique identifier for the inference endpoint. +** *`input` (string | string[])*: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs. +** *`query` (Optional, string)*: The query input, which is required only for the `rerank` task. +It is not required for other tasks. +** *`task_settings` (Optional, User-defined value)*: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +** *`timeout` (Optional, string | -1 | 0)*: The amount of time to wait for the inference request to complete. [discrete] ==== put @@ -7560,7 +7901,7 @@ IMPORTANT: The inference APIs enable you to use certain services, such as built- For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -{ref}/put-inference-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put[Endpoint documentation] [source,ts] ---- client.inference.put({ inference_id }) @@ -7584,7 +7925,7 @@ IMPORTANT: The inference APIs enable you to use certain services, such as built- This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. -{ref}/stream-inference-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference[Endpoint documentation] [source,ts] ---- client.inference.streamInference({ inference_id, input }) @@ -7635,7 +7976,7 @@ IMPORTANT: The inference APIs enable you to use certain services, such as built- For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -{ref}/update-inference-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update[Endpoint documentation] [source,ts] ---- client.inference.update({ inference_id }) @@ -7654,9 +7995,10 @@ client.inference.update({ inference_id }) [discrete] ==== delete_geoip_database Delete GeoIP database configurations. + Delete one or more IP geolocation database configurations. -{ref}/delete-geoip-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database[Endpoint documentation] [source,ts] ---- client.ingest.deleteGeoipDatabase({ id }) @@ -7667,15 +8009,15 @@ client.ingest.deleteGeoipDatabase({ id }) * *Request (object):* ** *`id` (string | string[])*: A list of geoip database configurations to delete -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_ip_location_database Delete IP geolocation database configurations. -{ref}/delete-ip-location-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database[Endpoint documentation] [source,ts] ---- client.ingest.deleteIpLocationDatabase({ id }) @@ -7698,7 +8040,7 @@ A value of `-1` indicates that the request should never time out. Delete pipelines. Delete one or more ingest pipelines. -{ref}/delete-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline[Endpoint documentation] [source,ts] ---- client.ingest.deletePipeline({ id }) @@ -7730,9 +8072,10 @@ client.ingest.geoIpStats() [discrete] ==== get_geoip_database Get GeoIP database configurations. + Get information about one or more IP geolocation database configurations. -{ref}/get-geoip-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database[Endpoint documentation] [source,ts] ---- client.ingest.getGeoipDatabase({ ... }) @@ -7742,7 +8085,7 @@ client.ingest.getGeoipDatabase({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. +** *`id` (Optional, string | string[])*: A list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. @@ -7750,7 +8093,7 @@ To get all database configurations, omit this parameter or use `*`. ==== get_ip_location_database Get IP geolocation database configurations. -{ref}/get-ip-location-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database[Endpoint documentation] [source,ts] ---- client.ingest.getIpLocationDatabase({ ... }) @@ -7770,10 +8113,11 @@ A value of `-1` indicates that the request should never time out. [discrete] ==== get_pipeline Get pipelines. + Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. -{ref}/get-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline[Endpoint documentation] [source,ts] ---- client.ingest.getPipeline({ ... }) @@ -7807,9 +8151,10 @@ client.ingest.processorGrok() [discrete] ==== put_geoip_database Create or update a GeoIP database configuration. + Refer to the create or update IP geolocation database configuration API. -{ref}/put-geoip-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database[Endpoint documentation] [source,ts] ---- client.ingest.putGeoipDatabase({ id, name, maxmind }) @@ -7831,7 +8176,7 @@ If no response is received before the timeout expires, the request fails and ret ==== put_ip_location_database Create or update an IP geolocation database configuration. -{ref}/put-ip-location-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database[Endpoint documentation] [source,ts] ---- client.ingest.putIpLocationDatabase({ id }) @@ -7880,10 +8225,11 @@ When a deprecated ingest pipeline is referenced as the default or final pipeline [discrete] ==== simulate Simulate a pipeline. + Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. -{ref}/simulate-pipeline-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate[Endpoint documentation] [source,ts] ---- client.ingest.simulate({ docs }) @@ -7894,10 +8240,10 @@ client.ingest.simulate({ docs }) * *Request (object):* ** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. -** *`id` (Optional, string)*: Pipeline to test. -If you don’t specify a `pipeline` in the request body, this parameter is required. -** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: Pipeline to test. -If you don’t specify the `pipeline` request path parameter, this parameter is required. +** *`id` (Optional, string)*: The pipeline to test. +If you don't specify a `pipeline` in the request body, this parameter is required. +** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: The pipeline to test. +If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. ** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. @@ -7906,11 +8252,12 @@ If you specify both this and the request path parameter, the API only uses the r [discrete] ==== delete Delete the license. + When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. -{ref}/delete-license.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete[Endpoint documentation] [source,ts] ---- client.license.delete({ ... }) @@ -7920,18 +8267,20 @@ client.license.delete({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get Get license information. + Get information about your Elastic license including its type, its status, when it was issued, and when it expires. -NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. -If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. +>info +> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. -{ref}/get-license.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get[Endpoint documentation] [source,ts] ---- client.license.get({ ... }) @@ -7949,7 +8298,7 @@ This parameter is deprecated and will always be set to true in 8.x. ==== get_basic_status Get the basic license status. -{ref}/get-basic-status.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status[Endpoint documentation] [source,ts] ---- client.license.getBasicStatus() @@ -7960,7 +8309,7 @@ client.license.getBasicStatus() ==== get_trial_status Get the trial status. -{ref}/get-trial-status.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status[Endpoint documentation] [source,ts] ---- client.license.getTrialStatus() @@ -7970,6 +8319,7 @@ client.license.getTrialStatus() [discrete] ==== post Update the license. + You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. @@ -7978,7 +8328,7 @@ You must then re-submit the API request with the acknowledge parameter set to tr NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. -{ref}/update-license.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post[Endpoint documentation] [source,ts] ---- client.license.post({ ... }) @@ -7991,12 +8341,13 @@ client.license.post({ ... }) ** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* ** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. ** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== post_start_basic Start a basic license. + Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. @@ -8006,7 +8357,7 @@ You must then re-submit the API request with the `acknowledge` parameter set to To check the status of your basic license, use the get basic license API. -{ref}/start-basic.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic[Endpoint documentation] [source,ts] ---- client.license.postStartBasic({ ... }) @@ -8030,7 +8381,7 @@ For example, if you have already activated a trial for v8.0, you cannot start a To check the status of your trial, use the get trial status API. -{ref}/start-trial.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial[Endpoint documentation] [source,ts] ---- client.license.postStartTrial({ ... }) @@ -8052,7 +8403,7 @@ Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. -{ref}/logstash-api-delete-pipeline.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline[Endpoint documentation] [source,ts] ---- client.logstash.deletePipeline({ id }) @@ -8069,7 +8420,7 @@ client.logstash.deletePipeline({ id }) Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. -{ref}/logstash-api-get-pipeline.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline[Endpoint documentation] [source,ts] ---- client.logstash.getPipeline({ ... }) @@ -8088,7 +8439,7 @@ Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. -{ref}/logstash-api-put-pipeline.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline[Endpoint documentation] [source,ts] ---- client.logstash.putPipeline({ id }) @@ -8111,7 +8462,7 @@ Get information about different cluster, node, and index level settings that use TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. -{ref}/migration-api-deprecation.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations[Endpoint documentation] [source,ts] ---- client.migration.deprecations({ ... }) @@ -8132,7 +8483,7 @@ Check which features need to be migrated and the status of any migrations that a TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. -{ref}/feature-migration-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status[Endpoint documentation] [source,ts] ---- client.migration.getFeatureUpgradeStatus() @@ -8149,7 +8500,7 @@ Some functionality might be temporarily unavailable during the migration process TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. -{ref}/feature-migration-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status[Endpoint documentation] [source,ts] ---- client.migration.postFeatureUpgrade() @@ -8161,12 +8512,13 @@ client.migration.postFeatureUpgrade() [discrete] ==== clear_trained_model_deployment_cache Clear trained model deployment cache. + Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. -{ref}/clear-trained-model-deployment-cache.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache[Endpoint documentation] [source,ts] ---- client.ml.clearTrainedModelDeploymentCache({ model_id }) @@ -8181,12 +8533,13 @@ client.ml.clearTrainedModelDeploymentCache({ model_id }) [discrete] ==== close_job Close anomaly detection jobs. + A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. -{ref}/ml-close-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job[Endpoint documentation] [source,ts] ---- client.ml.closeJob({ job_id }) @@ -8204,9 +8557,10 @@ client.ml.closeJob({ job_id }) [discrete] ==== delete_calendar Delete a calendar. -Removes all scheduled events from a calendar, then deletes it. -{ref}/ml-delete-calendar.html[Endpoint documentation] +Remove all scheduled events from a calendar, then delete it. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendar({ calendar_id }) @@ -8222,7 +8576,7 @@ client.ml.deleteCalendar({ calendar_id }) ==== delete_calendar_event Delete events from a calendar. -{ref}/ml-delete-calendar-event.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendarEvent({ calendar_id, event_id }) @@ -8240,7 +8594,7 @@ You can obtain this identifier by using the get calendar events API. ==== delete_calendar_job Delete anomaly jobs from a calendar. -{ref}/ml-delete-calendar-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job[Endpoint documentation] [source,ts] ---- client.ml.deleteCalendarJob({ calendar_id, job_id }) @@ -8258,7 +8612,7 @@ list of jobs or groups. ==== delete_data_frame_analytics Delete a data frame analytics job. -{ref}/delete-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.deleteDataFrameAnalytics({ id }) @@ -8276,7 +8630,7 @@ client.ml.deleteDataFrameAnalytics({ id }) ==== delete_datafeed Delete a datafeed. -{ref}/ml-delete-datafeed.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed[Endpoint documentation] [source,ts] ---- client.ml.deleteDatafeed({ datafeed_id }) @@ -8296,16 +8650,17 @@ stopping and deleting the datafeed. [discrete] ==== delete_expired_data Delete expired ML data. -Deletes all job results, model snapshots and forecast data that have exceeded + +Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection -jobs by using _all, by specifying * as the , or by omitting the -. +jobs by using `_all`, by specifying `*` as the ``, or by omitting the +``. -{ref}/ml-delete-expired-data.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data[Endpoint documentation] [source,ts] ---- client.ml.deleteExpiredData({ ... }) @@ -8324,10 +8679,11 @@ behavior is no throttling. [discrete] ==== delete_filter Delete a filter. + If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. -{ref}/ml-delete-filter.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter[Endpoint documentation] [source,ts] ---- client.ml.deleteFilter({ filter_id }) @@ -8342,12 +8698,13 @@ client.ml.deleteFilter({ filter_id }) [discrete] ==== delete_forecast Delete forecasts from a job. + By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. -{ref}/ml-delete-forecast.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast[Endpoint documentation] [source,ts] ---- client.ml.deleteForecast({ job_id }) @@ -8372,6 +8729,7 @@ error. [discrete] ==== delete_job Delete an anomaly detection job. + All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request @@ -8379,7 +8737,7 @@ first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. -{ref}/ml-delete-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job[Endpoint documentation] [source,ts] ---- client.ml.deleteJob({ job_id }) @@ -8401,11 +8759,12 @@ job deletion completes. [discrete] ==== delete_model_snapshot Delete a model snapshot. + You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. -{ref}/ml-delete-snapshot.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot[Endpoint documentation] [source,ts] ---- client.ml.deleteModelSnapshot({ job_id, snapshot_id }) @@ -8421,9 +8780,10 @@ client.ml.deleteModelSnapshot({ job_id, snapshot_id }) [discrete] ==== delete_trained_model Delete an unreferenced trained model. + The request deletes a trained inference model that is not referenced by an ingest pipeline. -{ref}/delete-trained-models.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model[Endpoint documentation] [source,ts] ---- client.ml.deleteTrainedModel({ model_id }) @@ -8440,11 +8800,12 @@ client.ml.deleteTrainedModel({ model_id }) [discrete] ==== delete_trained_model_alias Delete a trained model alias. + This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. -{ref}/delete-trained-models-aliases.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias[Endpoint documentation] [source,ts] ---- client.ml.deleteTrainedModelAlias({ model_alias, model_id }) @@ -8460,11 +8821,12 @@ client.ml.deleteTrainedModelAlias({ model_alias, model_id }) [discrete] ==== estimate_model_memory Estimate job model memory usage. -Makes an estimation of the memory usage for an anomaly detection job model. -It is based on analysis configuration details for the job and cardinality + +Make an estimation of the memory usage for an anomaly detection job model. +The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references. -{ref}/ml-apis.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory[Endpoint documentation] [source,ts] ---- client.ml.estimateModelMemory({ ... }) @@ -8492,12 +8854,13 @@ omitted from the request if no detectors have a `by_field_name`, [discrete] ==== evaluate_data_frame Evaluate data frame analytics. + The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. -{ref}/evaluate-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame[Endpoint documentation] [source,ts] ---- client.ml.evaluateDataFrame({ evaluation, index }) @@ -8514,6 +8877,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) [discrete] ==== explain_data_frame_analytics Explain data frame analytics config. + This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: @@ -8521,7 +8885,7 @@ explanations are provided: * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. -{ref}/explain-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.explainDataFrameAnalytics({ ... }) @@ -8571,7 +8935,7 @@ to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. -{ref}/ml-flush-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job[Endpoint documentation] [source,ts] ---- client.ml.flushJob({ job_id }) @@ -8597,7 +8961,7 @@ error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. -{ref}/ml-forecast.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast[Endpoint documentation] [source,ts] ---- client.ml.forecast({ job_id }) @@ -8618,7 +8982,7 @@ create a forecast; otherwise, an error occurs. Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. -{ref}/ml-get-bucket.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets[Endpoint documentation] [source,ts] ---- client.ml.getBuckets({ job_id }) @@ -8646,7 +9010,7 @@ parameter, the API returns information about all buckets. ==== get_calendar_events Get info about events in calendars. -{ref}/ml-get-calendar-event.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events[Endpoint documentation] [source,ts] ---- client.ml.getCalendarEvents({ calendar_id }) @@ -8667,7 +9031,7 @@ client.ml.getCalendarEvents({ calendar_id }) ==== get_calendars Get calendar configuration info. -{ref}/ml-get-calendar.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars[Endpoint documentation] [source,ts] ---- client.ml.getCalendars({ ... }) @@ -8686,7 +9050,7 @@ client.ml.getCalendars({ ... }) ==== get_categories Get anomaly detection job results for categories. -{ref}/ml-get-category.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories[Endpoint documentation] [source,ts] ---- client.ml.getCategories({ job_id }) @@ -8715,7 +9079,7 @@ You can get information for multiple data frame analytics jobs in a single API request by using a list of data frame analytics jobs or a wildcard expression. -{ref}/get-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.getDataFrameAnalytics({ ... }) @@ -8749,7 +9113,7 @@ be retrieved and then added to another cluster. ==== get_data_frame_analytics_stats Get data frame analytics jobs usage info. -{ref}/get-dfanalytics-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats[Endpoint documentation] [source,ts] ---- client.ml.getDataFrameAnalyticsStats({ ... }) @@ -8787,7 +9151,7 @@ get statistics for all datafeeds by using `_all`, by specifying `*` as the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. -{ref}/ml-get-datafeed-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats[Endpoint documentation] [source,ts] ---- client.ml.getDatafeedStats({ ... }) @@ -8820,7 +9184,7 @@ get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. -{ref}/ml-get-datafeed.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds[Endpoint documentation] [source,ts] ---- client.ml.getDatafeeds({ ... }) @@ -8852,7 +9216,7 @@ be retrieved and then added to another cluster. Get filters. You can get a single filter or all filters. -{ref}/ml-get-filter.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters[Endpoint documentation] [source,ts] ---- client.ml.getFilters({ ... }) @@ -8873,7 +9237,7 @@ Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. -{ref}/ml-get-influencer.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers[Endpoint documentation] [source,ts] ---- client.ml.getInfluencers({ job_id }) @@ -8905,7 +9269,7 @@ means it is unset and results are not limited to specific timestamps. ==== get_job_stats Get anomaly detection jobs usage info. -{ref}/ml-get-job-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats[Endpoint documentation] [source,ts] ---- client.ml.getJobStats({ ... }) @@ -8938,7 +9302,7 @@ request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. -{ref}/ml-get-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs[Endpoint documentation] [source,ts] ---- client.ml.getJobs({ ... }) @@ -8971,7 +9335,7 @@ Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. -{ref}/get-ml-memory.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats[Endpoint documentation] [source,ts] ---- client.ml.getMemoryStats({ ... }) @@ -8992,7 +9356,7 @@ fails and returns an error. ==== get_model_snapshot_upgrade_stats Get anomaly detection job model snapshot upgrade usage info. -{ref}/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats[Endpoint documentation] [source,ts] ---- client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) @@ -9020,7 +9384,7 @@ no matches or only partial matches. ==== get_model_snapshots Get model snapshots info. -{ref}/ml-get-snapshot.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots[Endpoint documentation] [source,ts] ---- client.ml.getModelSnapshots({ job_id }) @@ -9064,7 +9428,7 @@ greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. -{ref}/ml-get-overall-buckets.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets[Endpoint documentation] [source,ts] ---- client.ml.getOverallBuckets({ job_id }) @@ -9102,7 +9466,7 @@ The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. -{ref}/ml-get-record.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records[Endpoint documentation] [source,ts] ---- client.ml.getRecords({ job_id }) @@ -9127,7 +9491,7 @@ client.ml.getRecords({ job_id }) ==== get_trained_models Get trained model configuration info. -{ref}/get-trained-models.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models[Endpoint documentation] [source,ts] ---- client.ml.getTrainedModels({ ... }) @@ -9170,7 +9534,7 @@ Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. -{ref}/get-trained-models-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats[Endpoint documentation] [source,ts] ---- client.ml.getTrainedModelsStats({ ... }) @@ -9197,7 +9561,7 @@ subset of results when there are partial matches. ==== infer_trained_model Evaluate a trained model. -{ref}/infer-trained-model.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model[Endpoint documentation] [source,ts] ---- client.ml.inferTrainedModel({ model_id, docs }) @@ -9225,7 +9589,7 @@ used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. -{ref}/get-ml-info.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info[Endpoint documentation] [source,ts] ---- client.ml.info() @@ -9235,6 +9599,7 @@ client.ml.info() [discrete] ==== open_job Open anomaly detection jobs. + An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. @@ -9242,7 +9607,7 @@ When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. -{ref}/ml-open-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job[Endpoint documentation] [source,ts] ---- client.ml.openJob({ job_id }) @@ -9259,7 +9624,7 @@ client.ml.openJob({ job_id }) ==== post_calendar_events Add scheduled events to the calendar. -{ref}/ml-post-calendar-event.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events[Endpoint documentation] [source,ts] ---- client.ml.postCalendarEvents({ calendar_id, events }) @@ -9279,7 +9644,7 @@ Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a list. -{ref}/ml-post-data.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data[Endpoint documentation] [source,ts] ---- client.ml.postData({ job_id }) @@ -9297,9 +9662,9 @@ client.ml.postData({ job_id }) [discrete] ==== preview_data_frame_analytics Preview features used by data frame analytics. -Previews the extracted features used by a data frame analytics config. +Preview the extracted features used by a data frame analytics config. -{ref}/preview-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.previewDataFrameAnalytics({ ... }) @@ -9326,7 +9691,7 @@ called the API. However, when the datafeed starts it uses the roles of the last datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. -{ref}/ml-preview-datafeed.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed[Endpoint documentation] [source,ts] ---- client.ml.previewDatafeed({ ... }) @@ -9352,7 +9717,7 @@ used. You cannot specify a `job_config` object unless you also supply a `datafee ==== put_calendar Create a calendar. -{ref}/ml-put-calendar.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar[Endpoint documentation] [source,ts] ---- client.ml.putCalendar({ calendar_id }) @@ -9370,7 +9735,7 @@ client.ml.putCalendar({ calendar_id }) ==== put_calendar_job Add anomaly detection job to calendar. -{ref}/ml-put-calendar-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job[Endpoint documentation] [source,ts] ---- client.ml.putCalendarJob({ calendar_id, job_id }) @@ -9394,7 +9759,7 @@ If the destination index does not exist, it is created automatically when you st If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. -{ref}/put-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) @@ -9477,7 +9842,7 @@ those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. -{ref}/ml-put-datafeed.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed[Endpoint documentation] [source,ts] ---- client.ml.putDatafeed({ datafeed_id }) @@ -9540,7 +9905,7 @@ Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. -{ref}/ml-put-filter.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter[Endpoint documentation] [source,ts] ---- client.ml.putFilter({ filter_id }) @@ -9558,10 +9923,11 @@ Up to 10000 items are allowed in each filter. [discrete] ==== put_job Create an anomaly detection job. + If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. -{ref}/ml-put-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job[Endpoint documentation] [source,ts] ---- client.ml.putJob({ job_id, analysis_config, data_description }) @@ -9605,7 +9971,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. -{ref}/put-trained-models.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModel({ model_id }) @@ -9667,7 +10033,7 @@ If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. -{ref}/put-trained-models-aliases.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModelAlias({ model_alias, model_id }) @@ -9687,7 +10053,7 @@ already assigned and this parameter is false, the API returns an error. ==== put_trained_model_definition_part Create part of a trained model definition. -{ref}/put-trained-model-definition-part.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) @@ -9710,7 +10076,7 @@ Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. -{ref}/put-trained-model-vocabulary.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary[Endpoint documentation] [source,ts] ---- client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) @@ -9733,7 +10099,7 @@ it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. -{ref}/ml-reset-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job[Endpoint documentation] [source,ts] ---- client.ml.resetJob({ job_id }) @@ -9761,7 +10127,7 @@ one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. -{ref}/ml-revert-snapshot.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot[Endpoint documentation] [source,ts] ---- client.ml.revertModelSnapshot({ job_id, snapshot_id }) @@ -9793,7 +10159,7 @@ indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. -{ref}/ml-set-upgrade-mode.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode[Endpoint documentation] [source,ts] ---- client.ml.setUpgradeMode({ ... }) @@ -9823,7 +10189,7 @@ If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. -{ref}/start-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.startDataFrameAnalytics({ id }) @@ -9855,7 +10221,7 @@ When Elasticsearch security features are enabled, your datafeed remembers which update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. -{ref}/ml-start-datafeed.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed[Endpoint documentation] [source,ts] ---- client.ml.startDatafeed({ datafeed_id }) @@ -9877,7 +10243,7 @@ characters. Start a trained model deployment. It allocates the model to every machine learning node. -{ref}/start-trained-model-deployment.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment[Endpoint documentation] [source,ts] ---- client.ml.startTrainedModelDeployment({ model_id }) @@ -9915,7 +10281,7 @@ Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. -{ref}/stop-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.stopDataFrameAnalytics({ id }) @@ -9949,7 +10315,7 @@ Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. -{ref}/ml-stop-datafeed.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed[Endpoint documentation] [source,ts] ---- client.ml.stopDatafeed({ datafeed_id }) @@ -9970,7 +10336,7 @@ the identifier. ==== stop_trained_model_deployment Stop a trained model deployment. -{ref}/stop-trained-model-deployment.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment[Endpoint documentation] [source,ts] ---- client.ml.stopTrainedModelDeployment({ model_id }) @@ -9992,7 +10358,7 @@ restart the model deployment. ==== update_data_frame_analytics Update a data frame analytics job. -{ref}/update-dfanalytics.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics[Endpoint documentation] [source,ts] ---- client.ml.updateDataFrameAnalytics({ id }) @@ -10026,7 +10392,7 @@ When Elasticsearch security features are enabled, your datafeed remembers which the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. -{ref}/ml-update-datafeed.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed[Endpoint documentation] [source,ts] ---- client.ml.updateDatafeed({ datafeed_id }) @@ -10096,7 +10462,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value Update a filter. Updates the description of a filter, adds items, or removes items from the list. -{ref}/ml-update-filter.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter[Endpoint documentation] [source,ts] ---- client.ml.updateFilter({ filter_id }) @@ -10116,7 +10482,7 @@ client.ml.updateFilter({ filter_id }) Update an anomaly detection job. Updates certain properties of an anomaly detection job. -{ref}/ml-update-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job[Endpoint documentation] [source,ts] ---- client.ml.updateJob({ job_id }) @@ -10182,7 +10548,7 @@ value is null, which means all results are retained. Update a snapshot. Updates certain properties of a snapshot. -{ref}/ml-update-snapshot.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot[Endpoint documentation] [source,ts] ---- client.ml.updateModelSnapshot({ job_id, snapshot_id }) @@ -10203,7 +10569,7 @@ snapshot will be deleted when the job is deleted. ==== update_trained_model_deployment Update a trained model deployment. -{ref}/update-trained-model-deployment.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment[Endpoint documentation] [source,ts] ---- client.ml.updateTrainedModelDeployment({ model_id }) @@ -10224,7 +10590,7 @@ it will automatically be changed to a value less than the number of hardware thr [discrete] ==== upgrade_job_snapshot Upgrade a snapshot. -Upgrades an anomaly detection model snapshot to the latest major version. +Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. @@ -10234,7 +10600,7 @@ Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. -{ref}/ml-upgrade-job-model-snapshot.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot[Endpoint documentation] [source,ts] ---- client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) @@ -10257,7 +10623,7 @@ Otherwise, it responds as soon as the upgrade task is assigned to a node. Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. -{ref}/clear-repositories-metering-archive-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive[Endpoint documentation] [source,ts] ---- client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) @@ -10277,7 +10643,7 @@ Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. -{ref}/get-repositories-metering-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info[Endpoint documentation] [source,ts] ---- client.nodes.getRepositoriesMeteringInfo({ node_id }) @@ -10296,7 +10662,7 @@ Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. -{ref}/cluster-nodes-hot-threads.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads[Endpoint documentation] [source,ts] ---- client.nodes.hotThreads({ ... }) @@ -10320,9 +10686,10 @@ before the timeout expires, the request fails and returns an error. [discrete] ==== info Get node information. + By default, the API returns all attributes and core settings for cluster nodes. -{ref}/cluster-nodes-info.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info[Endpoint documentation] [source,ts] ---- client.nodes.info({ ... }) @@ -10349,7 +10716,7 @@ When the Elasticsearch keystore is password protected and not simply obfuscated, Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. -{ref}/secure-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings[Endpoint documentation] [source,ts] ---- client.nodes.reloadSecureSettings({ ... }) @@ -10370,7 +10737,7 @@ Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. -{ref}/cluster-nodes-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats[Endpoint documentation] [source,ts] ---- client.nodes.stats({ ... }) @@ -10397,7 +10764,7 @@ client.nodes.stats({ ... }) ==== usage Get feature usage information. -{ref}/cluster-nodes-usage.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage[Endpoint documentation] [source,ts] ---- client.nodes.usage({ ... }) @@ -10421,7 +10788,7 @@ Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. -{ref}/delete-query-rule.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule[Endpoint documentation] [source,ts] ---- client.queryRules.deleteRule({ ruleset_id, rule_id }) @@ -10440,7 +10807,7 @@ Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. -{ref}/delete-query-ruleset.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset[Endpoint documentation] [source,ts] ---- client.queryRules.deleteRuleset({ ruleset_id }) @@ -10457,7 +10824,7 @@ client.queryRules.deleteRuleset({ ruleset_id }) Get a query rule. Get details about a query rule within a query ruleset. -{ref}/get-query-rule.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule[Endpoint documentation] [source,ts] ---- client.queryRules.getRule({ ruleset_id, rule_id }) @@ -10475,7 +10842,7 @@ client.queryRules.getRule({ ruleset_id, rule_id }) Get a query ruleset. Get details about a query ruleset. -{ref}/get-query-ruleset.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset[Endpoint documentation] [source,ts] ---- client.queryRules.getRuleset({ ruleset_id }) @@ -10492,7 +10859,7 @@ client.queryRules.getRuleset({ ruleset_id }) Get all query rulesets. Get summarized information about the query rulesets. -{ref}/list-query-rulesets.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets[Endpoint documentation] [source,ts] ---- client.queryRules.listRulesets({ ... }) @@ -10515,7 +10882,7 @@ It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. -{ref}/put-query-rule.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule[Endpoint documentation] [source,ts] ---- client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) @@ -10545,7 +10912,7 @@ It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. -{ref}/put-query-ruleset.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset[Endpoint documentation] [source,ts] ---- client.queryRules.putRuleset({ ruleset_id, rules }) @@ -10563,7 +10930,7 @@ client.queryRules.putRuleset({ ruleset_id, rules }) Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. -{ref}/test-query-ruleset.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test[Endpoint documentation] [source,ts] ---- client.queryRules.test({ ruleset_id, match_criteria }) @@ -10606,7 +10973,7 @@ POST my_rollup_index/_delete_by_query } ---- -{ref}/rollup-delete-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job[Endpoint documentation] [source,ts] ---- client.rollup.deleteJob({ id }) @@ -10627,7 +10994,7 @@ NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. -{ref}/rollup-get-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs[Endpoint documentation] [source,ts] ---- client.rollup.getJobs({ ... }) @@ -10652,7 +11019,7 @@ This API enables you to inspect an index and determine: . Does this index have associated rollup data somewhere in the cluster? . If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? -{ref}/rollup-get-rollup-caps.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps[Endpoint documentation] [source,ts] ---- client.rollup.getRollupCaps({ ... }) @@ -10674,7 +11041,7 @@ A single rollup index may store the data for multiple rollup jobs and may have a * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? -{ref}/rollup-get-rollup-index-caps.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps[Endpoint documentation] [source,ts] ---- client.rollup.getRollupIndexCaps({ index }) @@ -10699,7 +11066,7 @@ There are three main sections to the job configuration: the logistical details a Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. -{ref}/rollup-put-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job[Endpoint documentation] [source,ts] ---- client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) @@ -10775,7 +11142,7 @@ The rollup search endpoint does two things when the search runs: When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. -{ref}/rollup-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search[Endpoint documentation] [source,ts] ---- client.rollup.rollupSearch({ index }) @@ -10804,7 +11171,7 @@ Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. -{ref}/rollup-start-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job[Endpoint documentation] [source,ts] ---- client.rollup.startJob({ id }) @@ -10831,7 +11198,7 @@ POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. -{ref}/rollup-stop-job.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job[Endpoint documentation] [source,ts] ---- client.rollup.stopJob({ id }) @@ -10854,9 +11221,10 @@ If set to `false`, the API returns immediately and the indexer is stopped asynch [discrete] ==== delete Delete a search application. + Remove a search application and its associated alias. Indices attached to the search application are not removed. -{ref}/delete-search-application.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete[Endpoint documentation] [source,ts] ---- client.searchApplication.delete({ name }) @@ -10866,14 +11234,14 @@ client.searchApplication.delete({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the search application to delete +** *`name` (string)*: The name of the search application to delete. [discrete] ==== delete_behavioral_analytics Delete a behavioral analytics collection. The associated data stream is also deleted. -{ref}/delete-analytics-collection.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics[Endpoint documentation] [source,ts] ---- client.searchApplication.deleteBehavioralAnalytics({ name }) @@ -10889,7 +11257,7 @@ client.searchApplication.deleteBehavioralAnalytics({ name }) ==== get Get search application details. -{ref}/get-search-application.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get[Endpoint documentation] [source,ts] ---- client.searchApplication.get({ name }) @@ -10905,7 +11273,7 @@ client.searchApplication.get({ name }) ==== get_behavioral_analytics Get behavioral analytics collections. -{ref}/list-analytics-collection.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics[Endpoint documentation] [source,ts] ---- client.searchApplication.getBehavioralAnalytics({ ... }) @@ -10922,7 +11290,7 @@ client.searchApplication.getBehavioralAnalytics({ ... }) Get search applications. Get information about search applications. -{ref}/list-search-applications.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics[Endpoint documentation] [source,ts] ---- client.searchApplication.list({ ... }) @@ -10940,7 +11308,7 @@ client.searchApplication.list({ ... }) ==== post_behavioral_analytics_event Create a behavioral analytics collection event. -{ref}/post-analytics-collection-event.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event[Endpoint documentation] [source,ts] ---- client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) @@ -10959,7 +11327,7 @@ client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_t ==== put Create or update a search application. -{ref}/put-search-application.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put[Endpoint documentation] [source,ts] ---- client.searchApplication.put({ name }) @@ -10977,7 +11345,7 @@ client.searchApplication.put({ name }) ==== put_behavioral_analytics Create a behavioral analytics collection. -{ref}/put-analytics-collection.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics[Endpoint documentation] [source,ts] ---- client.searchApplication.putBehavioralAnalytics({ name }) @@ -10998,7 +11366,7 @@ The API returns the specific Elasticsearch query that would be generated and run You must have `read` privileges on the backing alias of the search application. -{ref}/search-application-render-query.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query[Endpoint documentation] [source,ts] ---- client.searchApplication.renderQuery({ name }) @@ -11017,7 +11385,7 @@ Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. -{ref}/search-application-search.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search[Endpoint documentation] [source,ts] ---- client.searchApplication.search({ name }) @@ -11038,7 +11406,7 @@ client.searchApplication.search({ name }) Get cache statistics. Get statistics about the shared cache for partially mounted indices. -{ref}/searchable-snapshots-api-cache-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.cacheStats({ ... }) @@ -11056,7 +11424,7 @@ client.searchableSnapshots.cacheStats({ ... }) Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. -{ref}/searchable-snapshots-api-clear-cache.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.clearCache({ ... }) @@ -11079,7 +11447,7 @@ Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. -{ref}/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.mount({ repository, snapshot, index }) @@ -11106,7 +11474,7 @@ To indicate that the request should never timeout, set it to `-1`. ==== stats Get searchable snapshot statistics. -{ref}/searchable-snapshots-api-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.stats({ ... }) @@ -11138,7 +11506,7 @@ For example, in the JWT `access_token` case, the profile user's `username` is ex When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. -{ref}/security-api-activate-user-profile.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile[Endpoint documentation] [source,ts] ---- client.security.activateUserProfile({ grant_type }) @@ -11169,7 +11537,7 @@ Include the user information in a [basic auth header](https://en.wikipedia.org/w A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. -{ref}/security-api-authenticate.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate[Endpoint documentation] [source,ts] ---- client.security.authenticate() @@ -11183,7 +11551,7 @@ Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. -{ref}/security-api-bulk-delete-role.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role[Endpoint documentation] [source,ts] ---- client.security.bulkDeleteRole({ names }) @@ -11203,7 +11571,7 @@ Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. -{ref}/security-api-bulk-put-role.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role[Endpoint documentation] [source,ts] ---- client.security.bulkPutRole({ roles }) @@ -11235,7 +11603,7 @@ IMPORTANT: If you don't specify `role_descriptors` in the request, a call to thi A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. -{ref}/security-api-bulk-update-api-keys.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys[Endpoint documentation] [source,ts] ---- client.security.bulkUpdateApiKeys({ ids }) @@ -11266,7 +11634,7 @@ Change passwords. Change the passwords of users in the native realm and built-in users. -{ref}/security-api-change-password.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password[Endpoint documentation] [source,ts] ---- client.security.changePassword({ ... }) @@ -11292,7 +11660,7 @@ Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. -{ref}/security-api-clear-api-key-cache.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache[Endpoint documentation] [source,ts] ---- client.security.clearApiKeyCache({ ids }) @@ -11313,7 +11681,7 @@ Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. -{ref}/security-api-clear-privilege-cache.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges[Endpoint documentation] [source,ts] ---- client.security.clearCachedPrivileges({ application }) @@ -11338,7 +11706,7 @@ User credentials are cached in memory on each node to avoid connecting to a remo There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. -{ref}/security-api-clear-cache.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms[Endpoint documentation] [source,ts] ---- client.security.clearCachedRealms({ realms }) @@ -11360,7 +11728,7 @@ Clear the roles cache. Evict roles from the native role cache. -{ref}/security-api-clear-role-cache.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles[Endpoint documentation] [source,ts] ---- client.security.clearCachedRoles({ name }) @@ -11385,7 +11753,7 @@ This API clears matching entries from both caches. The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. -{ref}/security-api-clear-service-token-caches.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens[Endpoint documentation] [source,ts] ---- client.security.clearCachedServiceTokens({ namespace, service, name }) @@ -11418,7 +11786,7 @@ NOTE: By default, API keys never expire. You can specify expiration information The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. -{ref}/security-api-create-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key[Endpoint documentation] [source,ts] ---- client.security.createApiKey({ ... }) @@ -11463,7 +11831,7 @@ By default, API keys never expire. You can specify expiration information when y Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. -{ref}/security-api-create-cross-cluster-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key[Endpoint documentation] [source,ts] ---- client.security.createCrossClusterApiKey({ access, name }) @@ -11495,7 +11863,7 @@ Create a service accounts token for access without requiring basic authenticatio NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed. -{ref}/security-api-create-service-token.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token[Endpoint documentation] [source,ts] ---- client.security.createServiceToken({ namespace, service }) @@ -11531,7 +11899,7 @@ IMPORTANT: The association between the subject public key in the target certific This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. -{ref}/security-api-delegate-pki-authentication.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki[Endpoint documentation] [source,ts] ---- client.security.delegatePki({ x509_certificate_chain }) @@ -11556,7 +11924,7 @@ To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. -{ref}/security-api-delete-privilege.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges[Endpoint documentation] [source,ts] ---- client.security.deletePrivileges({ application, name }) @@ -11579,7 +11947,7 @@ Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. -{ref}/security-api-delete-role.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role[Endpoint documentation] [source,ts] ---- client.security.deleteRole({ name }) @@ -11600,7 +11968,7 @@ Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. -{ref}/security-api-delete-role-mapping.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping[Endpoint documentation] [source,ts] ---- client.security.deleteRoleMapping({ name }) @@ -11620,7 +11988,7 @@ Delete service account tokens. Delete service account tokens for a service in a specified namespace. -{ref}/security-api-delete-service-token.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token[Endpoint documentation] [source,ts] ---- client.security.deleteServiceToken({ namespace, service, name }) @@ -11641,7 +12009,7 @@ Delete users. Delete users from the native realm. -{ref}/security-api-delete-user.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user[Endpoint documentation] [source,ts] ---- client.security.deleteUser({ username }) @@ -11662,7 +12030,7 @@ Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. -{ref}/security-api-disable-user.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user[Endpoint documentation] [source,ts] ---- client.security.disableUser({ username }) @@ -11688,7 +12056,7 @@ Elastic reserves the right to change or remove this feature in future releases w When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . -{ref}/security-api-disable-user-profile.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile[Endpoint documentation] [source,ts] ---- client.security.disableUserProfile({ uid }) @@ -11710,7 +12078,7 @@ Enable users. Enable users in the native realm. By default, when you create users, they are enabled. -{ref}/security-api-enable-user.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user[Endpoint documentation] [source,ts] ---- client.security.enableUser({ username }) @@ -11736,7 +12104,7 @@ Elastic reserves the right to change or remove this feature in future releases w When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. -{ref}/security-api-enable-user-profile.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile[Endpoint documentation] [source,ts] ---- client.security.enableUserProfile({ uid }) @@ -11761,7 +12129,7 @@ Enable a Kibana instance to configure itself for communication with a secured El NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. -{ref}/security-api-kibana-enrollment.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana[Endpoint documentation] [source,ts] ---- client.security.enrollKibana() @@ -11777,7 +12145,7 @@ Enroll a new node to allow it to join an existing cluster with security features The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. -{ref}/security-api-node-enrollment.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node[Endpoint documentation] [source,ts] ---- client.security.enrollNode() @@ -11792,7 +12160,7 @@ Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. -{ref}/security-api-get-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key[Endpoint documentation] [source,ts] ---- client.security.getApiKey({ ... }) @@ -11826,7 +12194,7 @@ Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. -{ref}/security-api-get-builtin-privileges.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges[Endpoint documentation] [source,ts] ---- client.security.getBuiltinPrivileges() @@ -11842,7 +12210,7 @@ To use this API, you must have one of the following privileges: * The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. -{ref}/security-api-get-privileges.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges[Endpoint documentation] [source,ts] ---- client.security.getPrivileges({ ... }) @@ -11866,7 +12234,7 @@ Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. -{ref}/security-api-get-role.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role[Endpoint documentation] [source,ts] ---- client.security.getRole({ ... }) @@ -11888,7 +12256,7 @@ Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. -{ref}/security-api-get-role-mapping.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping[Endpoint documentation] [source,ts] ---- client.security.getRoleMapping({ ... }) @@ -11908,7 +12276,7 @@ Get a list of service accounts that match the provided path parameters. NOTE: Currently, only the `elastic/fleet-server` service account is available. -{ref}/security-api-get-service-accounts.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts[Endpoint documentation] [source,ts] ---- client.security.getServiceAccounts({ ... }) @@ -11935,7 +12303,7 @@ The response includes service account tokens that were created with the create s NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. -{ref}/security-api-get-service-credentials.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials[Endpoint documentation] [source,ts] ---- client.security.getServiceCredentials({ namespace, service }) @@ -11959,7 +12327,7 @@ This includes: * `index.auto_expand_replicas` * `index.number_of_replicas` -{ref}/security-api-get-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings[Endpoint documentation] [source,ts] ---- client.security.getSettings({ ... }) @@ -11989,7 +12357,7 @@ The tokens returned by the get token API have a finite period of time for which That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. -{ref}/security-api-get-token.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token[Endpoint documentation] [source,ts] ---- client.security.getToken({ ... }) @@ -12022,7 +12390,7 @@ Get users. Get information about users in the native realm and built-in users. -{ref}/security-api-get-user.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user[Endpoint documentation] [source,ts] ---- client.security.getUser({ ... }) @@ -12044,7 +12412,7 @@ All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. -{ref}/security-api-get-user-privileges.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges[Endpoint documentation] [source,ts] ---- client.security.getUserPrivileges({ ... }) @@ -12068,7 +12436,7 @@ NOTE: The user profile feature is designed only for use by Kibana and Elastic's Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -{ref}/security-api-get-user-profile.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile[Endpoint documentation] [source,ts] ---- client.security.getUserProfile({ uid }) @@ -12109,7 +12477,7 @@ If applicable, it also returns expiration information for the API key in millise By default, API keys never expire. You can specify expiration information when you create the API keys. -{ref}/security-api-grant-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key[Endpoint documentation] [source,ts] ---- client.security.grantApiKey({ api_key, grant_type }) @@ -12140,7 +12508,7 @@ Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. -{ref}/security-api-has-privileges.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges[Endpoint documentation] [source,ts] ---- client.security.hasPrivileges({ ... }) @@ -12164,7 +12532,7 @@ Determine whether the users associated with the specified user profile IDs have NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -{ref}/security-api-has-privileges-user-profile.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile[Endpoint documentation] [source,ts] ---- client.security.hasPrivilegesUserProfile({ uids, privileges }) @@ -12194,7 +12562,7 @@ In addition, with the `manage_own_api_key` privilege, an invalidation request mu - Or, set both `username` and `realm_name` to match the user's identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. -{ref}/security-api-invalidate-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key[Endpoint documentation] [source,ts] ---- client.security.invalidateApiKey({ ... }) @@ -12234,7 +12602,7 @@ NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. -{ref}/security-api-invalidate-token.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token[Endpoint documentation] [source,ts] ---- client.security.invalidateToken({ ... }) @@ -12262,7 +12630,7 @@ Exchange an OpenID Connect authentication response message for an Elasticsearch Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. -{ref}/security-api-oidc-authenticate.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate[Endpoint documentation] [source,ts] ---- client.security.oidcAuthenticate({ nonce, redirect_uri, state }) @@ -12292,7 +12660,7 @@ If the OpenID Connect authentication realm in Elasticsearch is accordingly confi Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. -{ref}/security-api-oidc-logout.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout[Endpoint documentation] [source,ts] ---- client.security.oidcLogout({ access_token }) @@ -12316,7 +12684,7 @@ The response of this API is a URL pointing to the Authorization Endpoint of the Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. -{ref}/security-api-oidc-prepare-authentication.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication[Endpoint documentation] [source,ts] ---- client.security.oidcPrepareAuthentication({ ... }) @@ -12361,7 +12729,7 @@ Privilege names must begin with a lowercase ASCII letter and must contain only A Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. -{ref}/security-api-put-privileges.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges[Endpoint documentation] [source,ts] ---- client.security.putPrivileges({ ... }) @@ -12382,7 +12750,7 @@ The role management APIs are generally the preferred way to manage roles in the The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. -{ref}/security-api-put-role.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role[Endpoint documentation] [source,ts] ---- client.security.putRole({ name }) @@ -12437,7 +12805,7 @@ Thus it is possible to assign a user to a role that reflects their username, the By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. -{ref}/security-api-put-role-mapping.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping[Endpoint documentation] [source,ts] ---- client.security.putRoleMapping({ name }) @@ -12469,7 +12837,7 @@ Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API. -{ref}/security-api-put-user.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user[Endpoint documentation] [source,ts] ---- client.security.putUser({ username }) @@ -12514,7 +12882,7 @@ To use this API, you must have at least the `manage_own_api_key` or the `read_se If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. -{ref}/security-api-query-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys[Endpoint documentation] [source,ts] ---- client.security.queryApiKeys({ ... }) @@ -12569,7 +12937,7 @@ The query roles API does not retrieve roles that are defined in roles files, nor You can optionally filter the results with a query. Also, the results can be paginated and sorted. -{ref}/security-api-query-role.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role[Endpoint documentation] [source,ts] ---- client.security.queryRole({ ... }) @@ -12608,7 +12976,7 @@ You can optionally filter the results with a query. NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. -{ref}/security-api-query-user.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user[Endpoint documentation] [source,ts] ---- client.security.queryUser({ ... }) @@ -12656,7 +13024,7 @@ In either case, the SAML message needs to be a base64 encoded XML document with After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. -{ref}/security-api-saml-authenticate.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate[Endpoint documentation] [source,ts] ---- client.security.samlAuthenticate({ content, ids }) @@ -12685,7 +13053,7 @@ An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. -{ref}/security-api-saml-complete-logout.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout[Endpoint documentation] [source,ts] ---- client.security.samlCompleteLogout({ realm, ids }) @@ -12714,7 +13082,7 @@ The custom web application can use this API to have Elasticsearch process the `L After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. -{ref}/security-api-saml-invalidate.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate[Endpoint documentation] [source,ts] ---- client.security.samlInvalidate({ query_string }) @@ -12744,7 +13112,7 @@ If you are using Kibana, refer to the documentation for configuring SAML single- This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). -{ref}/security-api-saml-logout.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout[Endpoint documentation] [source,ts] ---- client.security.samlLogout({ token }) @@ -12776,7 +13144,7 @@ These parameters contain the algorithm used for the signature and the signature It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. -{ref}/security-api-saml-prepare-authentication.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication[Endpoint documentation] [source,ts] ---- client.security.samlPrepareAuthentication({ ... }) @@ -12802,7 +13170,7 @@ Generate SAML metadata for a SAML 2.0 Service Provider. The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. -{ref}/security-api-saml-sp-metadata.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata[Endpoint documentation] [source,ts] ---- client.security.samlServiceProviderMetadata({ realm_name }) @@ -12824,7 +13192,7 @@ NOTE: The user profile feature is designed only for use by Kibana and Elastic's Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -{ref}/security-api-suggest-user-profile.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles[Endpoint documentation] [source,ts] ---- client.security.suggestUserProfiles({ ... }) @@ -12869,7 +13237,7 @@ The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. -{ref}/security-api-update-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key[Endpoint documentation] [source,ts] ---- client.security.updateApiKey({ id }) @@ -12915,7 +13283,7 @@ The owner user's information, such as the `username` and `realm`, is also update NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. -{ref}/security-api-update-cross-cluster-api-key.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key[Endpoint documentation] [source,ts] ---- client.security.updateCrossClusterApiKey({ id, access }) @@ -12948,7 +13316,7 @@ NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. -{ref}/security-api-update-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings[Endpoint documentation] [source,ts] ---- client.security.updateSettings({ ... }) @@ -12987,7 +13355,7 @@ New keys and their values are added to the profile document and conflicting keys For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. -{ref}/security-api-update-user-profile-data.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data[Endpoint documentation] [source,ts] ---- client.security.updateUserProfileData({ uid }) @@ -13026,7 +13394,7 @@ Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. -{ref}/delete-shutdown.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node[Endpoint documentation] [source,ts] ---- client.shutdown.deleteNode({ node_id }) @@ -13051,7 +13419,7 @@ NOTE: This feature is designed for indirect use by Elasticsearch Service, Elasti If the operator privileges feature is enabled, you must be an operator to use this API. -{ref}/get-shutdown.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node[Endpoint documentation] [source,ts] ---- client.shutdown.getNode({ ... }) @@ -13083,7 +13451,7 @@ If a node is already being prepared for shutdown, you can use this API to change IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. -{ref}/put-shutdown.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node[Endpoint documentation] [source,ts] ---- client.shutdown.putNode({ node_id, type, reason }) @@ -13141,7 +13509,7 @@ By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. -{ref}/simulate-ingest-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest[Endpoint documentation] [source,ts] ---- client.simulate.ingest({ docs }) @@ -13155,7 +13523,7 @@ client.simulate.ingest({ docs }) ** *`index` (Optional, string)*: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. -** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. +** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. ** *`index_template_subtitutions` (Optional, Record)*: A map of index template names to substitute index template definition objects. ** *`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })* ** *`pipeline_substitutions` (Optional, Record)*: Pipelines to test. @@ -13172,7 +13540,7 @@ Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. -{ref}/slm-api-delete-policy.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle[Endpoint documentation] [source,ts] ---- client.slm.deleteLifecycle({ policy_id }) @@ -13194,7 +13562,7 @@ Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. -{ref}/slm-api-execute-lifecycle.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle[Endpoint documentation] [source,ts] ---- client.slm.executeLifecycle({ policy_id }) @@ -13216,7 +13584,7 @@ Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. -{ref}/slm-api-execute-retention.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention[Endpoint documentation] [source,ts] ---- client.slm.executeRetention({ ... }) @@ -13236,7 +13604,7 @@ If no response is received before the timeout expires, the request fails and ret Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. -{ref}/slm-api-get-policy.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle[Endpoint documentation] [source,ts] ---- client.slm.getLifecycle({ ... }) @@ -13257,7 +13625,7 @@ If no response is received before the timeout expires, the request fails and ret Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. -{ref}/slm-api-get-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats[Endpoint documentation] [source,ts] ---- client.slm.getStats({ ... }) @@ -13274,7 +13642,7 @@ client.slm.getStats({ ... }) ==== get_status Get the snapshot lifecycle management status. -{ref}/slm-api-get-status.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status[Endpoint documentation] [source,ts] ---- client.slm.getStatus({ ... }) @@ -13298,7 +13666,7 @@ Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. -{ref}/slm-api-put-policy.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle[Endpoint documentation] [source,ts] ---- client.slm.putLifecycle({ policy_id }) @@ -13327,7 +13695,7 @@ Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. -{ref}/slm-api-start.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start[Endpoint documentation] [source,ts] ---- client.slm.start({ ... }) @@ -13355,7 +13723,7 @@ You can manually trigger snapshots with the run snapshot lifecycle policy API ev The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. -{ref}/slm-api-stop.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop[Endpoint documentation] [source,ts] ---- client.slm.stop({ ... }) @@ -13379,7 +13747,7 @@ To indicate that the request should never timeout, set it to `-1`. Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. -{ref}/clean-up-snapshot-repo-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository[Endpoint documentation] [source,ts] ---- client.snapshot.cleanupRepository({ repository }) @@ -13402,7 +13770,7 @@ To indicate that the request should never timeout, set it to `-1`. Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. -{ref}/clone-snapshot-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone[Endpoint documentation] [source,ts] ---- client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) @@ -13428,7 +13796,7 @@ If no response is received before the timeout expires, the request fails and ret Create a snapshot. Take a snapshot of a cluster or of data streams and indices. -{ref}/create-snapshot-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create[Endpoint documentation] [source,ts] ---- client.snapshot.create({ repository, snapshot }) @@ -13488,7 +13856,7 @@ Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. -{ref}/put-snapshot-repo-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository[Endpoint documentation] [source,ts] ---- client.snapshot.createRepository({ repository }) @@ -13513,7 +13881,7 @@ You can also perform this verification with the verify snapshot repository API. ==== delete Delete snapshots. -{ref}/delete-snapshot-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete[Endpoint documentation] [source,ts] ---- client.snapshot.delete({ repository, snapshot }) @@ -13536,7 +13904,7 @@ Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. -{ref}/delete-snapshot-repo-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository[Endpoint documentation] [source,ts] ---- client.snapshot.deleteRepository({ repository }) @@ -13563,7 +13931,7 @@ NOTE: The `after` parameter and `next` field enable you to iterate through snaps It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. -{ref}/get-snapshot-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get[Endpoint documentation] [source,ts] ---- client.snapshot.get({ repository, snapshot }) @@ -13613,7 +13981,7 @@ NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm ==== get_repository Get snapshot repository information. -{ref}/get-snapshot-repo-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository[Endpoint documentation] [source,ts] ---- client.snapshot.getRepository({ ... }) @@ -13729,7 +14097,7 @@ If an operation fails due to contention, Elasticsearch retries the operation unt Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. -{ref}/repo-analysis-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze[Endpoint documentation] [source,ts] ---- client.snapshot.repositoryAnalyze({ repository }) @@ -13783,7 +14151,7 @@ If no such template exists, you can create one or restore a cluster state that c If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. -{ref}/restore-snapshot-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore[Endpoint documentation] [source,ts] ---- client.snapshot.restore({ repository, snapshot }) @@ -13874,7 +14242,7 @@ For example, if you have 100 snapshots with 1,000 shards each, an API request th Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. -{ref}/get-snapshot-status-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status[Endpoint documentation] [source,ts] ---- client.snapshot.status({ ... }) @@ -13900,7 +14268,7 @@ To indicate that the request should never timeout, set it to `-1`. Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. -{ref}/verify-snapshot-repo-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository[Endpoint documentation] [source,ts] ---- client.snapshot.verifyRepository({ repository }) @@ -13924,7 +14292,7 @@ To indicate that the request should never timeout, set it to `-1`. ==== clear_cursor Clear an SQL search cursor. -{ref}/clear-sql-cursor-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor[Endpoint documentation] [source,ts] ---- client.sql.clearCursor({ cursor }) @@ -13947,7 +14315,7 @@ If the Elasticsearch security features are enabled, only the following users can * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. -{ref}/delete-async-sql-search-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async[Endpoint documentation] [source,ts] ---- client.sql.deleteAsync({ id }) @@ -13966,7 +14334,7 @@ Get the current status and available results for an async SQL search or stored s If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. -{ref}/get-async-sql-search-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async[Endpoint documentation] [source,ts] ---- client.sql.getAsync({ id }) @@ -13992,7 +14360,7 @@ It defaults to no timeout, meaning the request waits for complete search results Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. -{ref}/get-async-sql-search-status-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status[Endpoint documentation] [source,ts] ---- client.sql.getAsyncStatus({ id }) @@ -14009,7 +14377,7 @@ client.sql.getAsyncStatus({ id }) Get SQL search results. Run an SQL request. -{ref}/sql-search-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query[Endpoint documentation] [source,ts] ---- client.sql.query({ ... }) @@ -14060,7 +14428,7 @@ Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. -{ref}/sql-translate-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate[Endpoint documentation] [source,ts] ---- client.sql.translate({ query }) @@ -14097,7 +14465,7 @@ NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API r If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. -{ref}/security-api-ssl.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates[Endpoint documentation] [source,ts] ---- client.ssl.certificates() @@ -14127,7 +14495,7 @@ You can migrate an index by creating a new index that does not contain the token Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. -{ref}/delete-synonyms-set.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym[Endpoint documentation] [source,ts] ---- client.synonyms.deleteSynonym({ id }) @@ -14144,7 +14512,7 @@ client.synonyms.deleteSynonym({ id }) Delete a synonym rule. Delete a synonym rule from a synonym set. -{ref}/delete-synonym-rule.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule[Endpoint documentation] [source,ts] ---- client.synonyms.deleteSynonymRule({ set_id, rule_id }) @@ -14161,7 +14529,7 @@ client.synonyms.deleteSynonymRule({ set_id, rule_id }) ==== get_synonym Get a synonym set. -{ref}/get-synonyms-set.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym[Endpoint documentation] [source,ts] ---- client.synonyms.getSynonym({ id }) @@ -14180,7 +14548,7 @@ client.synonyms.getSynonym({ id }) Get a synonym rule. Get a synonym rule from a synonym set. -{ref}/get-synonym-rule.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule[Endpoint documentation] [source,ts] ---- client.synonyms.getSynonymRule({ set_id, rule_id }) @@ -14198,7 +14566,7 @@ client.synonyms.getSynonymRule({ set_id, rule_id }) Get all synonym sets. Get a summary of all defined synonym sets. -{ref}/get-synonyms-set.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym[Endpoint documentation] [source,ts] ---- client.synonyms.getSynonymsSets({ ... }) @@ -14220,7 +14588,7 @@ If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. -{ref}/put-synonyms-set.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym[Endpoint documentation] [source,ts] ---- client.synonyms.putSynonym({ id, synonyms_set }) @@ -14242,7 +14610,7 @@ If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. -{ref}/put-synonym-rule.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule[Endpoint documentation] [source,ts] ---- client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) @@ -14273,7 +14641,7 @@ The cancelled flag in the response indicates that the cancellation command has b To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. -{ref}/tasks.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] [source,ts] ---- client.tasks.cancel({ ... }) @@ -14299,7 +14667,7 @@ The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. -{ref}/tasks.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] [source,ts] ---- client.tasks.get({ task_id }) @@ -14376,7 +14744,7 @@ In this example, `X-Opaque-Id: 123456` is the ID as a part of the response heade The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. -{ref}/tasks.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] [source,ts] ---- client.tasks.list({ ... }) @@ -14424,7 +14792,7 @@ However, you can optionally override some of the decisions about the text struct If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. -{ref}/find-field-structure.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure[Endpoint documentation] [source,ts] ---- client.textStructure.findFieldStructure({ field, index }) @@ -14536,7 +14904,7 @@ However, you can optionally override some of the decisions about the text struct If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. -{ref}/find-message-structure.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure[Endpoint documentation] [source,ts] ---- client.textStructure.findMessageStructure({ messages }) @@ -14642,7 +15010,7 @@ The response from the API contains: All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. -{ref}/find-structure.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure[Endpoint documentation] [source,ts] ---- client.textStructure.findStructure({ ... }) @@ -14753,7 +15121,7 @@ Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. -{ref}/test-grok-pattern.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern[Endpoint documentation] [source,ts] ---- client.textStructure.testGrokPattern({ grok_pattern, text }) @@ -14774,9 +15142,8 @@ Valid values are `disabled` and `v1`. [discrete] ==== delete_transform Delete a transform. -Deletes a transform. -{ref}/delete-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform[Endpoint documentation] [source,ts] ---- client.transform.deleteTransform({ transform_id }) @@ -14805,9 +15172,9 @@ client.transform.getNodeStats() [discrete] ==== get_transform Get transforms. -Retrieves configuration information for transforms. +Get configuration information for transforms. -{ref}/get-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform[Endpoint documentation] [source,ts] ---- client.transform.getTransform({ ... }) @@ -14838,9 +15205,10 @@ be retrieved and then added to another cluster. [discrete] ==== get_transform_stats Get transform stats. -Retrieves usage information for transforms. -{ref}/get-transform-stats.html[Endpoint documentation] +Get usage information for transforms. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats[Endpoint documentation] [source,ts] ---- client.transform.getTransformStats({ transform_id }) @@ -14875,7 +15243,7 @@ It returns a maximum of 100 results. The calculations are based on all the curre generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. -{ref}/preview-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform[Endpoint documentation] [source,ts] ---- client.transform.previewTransform({ ... }) @@ -14931,7 +15299,7 @@ NOTE: You must use Kibana or this API to create a transform. Do not add a transf not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. -{ref}/put-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform[Endpoint documentation] [source,ts] ---- client.transform.putTransform({ transform_id, dest, source }) @@ -14967,11 +15335,11 @@ the exception of privilege checks. [discrete] ==== reset_transform Reset a transform. -Resets a transform. + Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. -{ref}/reset-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform[Endpoint documentation] [source,ts] ---- client.transform.resetTransform({ transform_id }) @@ -14990,14 +15358,14 @@ must be stopped before it can be reset. [discrete] ==== schedule_now_transform Schedule a transform to start now. -Instantly runs a transform to process data. -If you _schedule_now a transform, it will process the new data instantly, -without waiting for the configured frequency interval. After _schedule_now API is called, -the transform will be processed again at now + frequency unless _schedule_now API +Instantly run a transform to process data. +If you run this API, the transform will process the new data instantly, +without waiting for the configured frequency interval. After the API is called, +the transform will be processed again at `now + frequency` unless the API is called again in the meantime. -{ref}/schedule-now-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform[Endpoint documentation] [source,ts] ---- client.transform.scheduleNowTransform({ transform_id }) @@ -15013,7 +15381,6 @@ client.transform.scheduleNowTransform({ transform_id }) [discrete] ==== start_transform Start a transform. -Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping @@ -15030,7 +15397,7 @@ Elasticsearch security features are enabled, the transform remembers which roles time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. -{ref}/start-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform[Endpoint documentation] [source,ts] ---- client.transform.startTransform({ transform_id }) @@ -15049,7 +15416,7 @@ client.transform.startTransform({ transform_id }) Stop transforms. Stops one or more transforms. -{ref}/stop-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform[Endpoint documentation] [source,ts] ---- client.transform.stopTransform({ transform_id }) @@ -15089,7 +15456,7 @@ privileges for the source indices. You must also have `index` and `read` privile Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. -{ref}/update-transform.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform[Endpoint documentation] [source,ts] ---- client.transform.updateTransform({ transform_id }) @@ -15121,6 +15488,7 @@ timeout expires, the request fails and returns an error. [discrete] ==== upgrade_transforms Upgrade all transforms. + Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. @@ -15135,7 +15503,7 @@ A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. -{ref}/upgrade-transforms.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms[Endpoint documentation] [source,ts] ---- client.transform.upgradeTransforms({ ... }) @@ -15164,7 +15532,7 @@ The reason for this behavior is to prevent overwriting the watch status from a w Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). -{ref}/watcher-api-ack-watch.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch[Endpoint documentation] [source,ts] ---- client.watcher.ackWatch({ watch_id }) @@ -15183,7 +15551,7 @@ If you omit this parameter, all of the actions of the watch are acknowledged. Activate a watch. A watch can be either active or inactive. -{ref}/watcher-api-activate-watch.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch[Endpoint documentation] [source,ts] ---- client.watcher.activateWatch({ watch_id }) @@ -15200,7 +15568,7 @@ client.watcher.activateWatch({ watch_id }) Deactivate a watch. A watch can be either active or inactive. -{ref}/watcher-api-deactivate-watch.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch[Endpoint documentation] [source,ts] ---- client.watcher.deactivateWatch({ watch_id }) @@ -15223,7 +15591,7 @@ IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. -{ref}/watcher-api-delete-watch.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch[Endpoint documentation] [source,ts] ---- client.watcher.deleteWatch({ id }) @@ -15252,7 +15620,7 @@ If your user is allowed to read index `a`, but not index `b`, then the exact sam When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. -{ref}/watcher-api-execute-watch.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch[Endpoint documentation] [source,ts] ---- client.watcher.executeWatch({ ... }) @@ -15281,7 +15649,7 @@ Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. -{ref}/watcher-api-get-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings[Endpoint documentation] [source,ts] ---- client.watcher.getSettings({ ... }) @@ -15298,7 +15666,7 @@ If no response is received before the timeout expires, the request fails and ret ==== get_watch Get a watch. -{ref}/watcher-api-get-watch.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch[Endpoint documentation] [source,ts] ---- client.watcher.getWatch({ id }) @@ -15325,7 +15693,7 @@ When you add a watch you can also define its initial active state by setting the When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. -{ref}/watcher-api-put-watch.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch[Endpoint documentation] [source,ts] ---- client.watcher.putWatch({ id }) @@ -15360,7 +15728,7 @@ Get all registered watches in a paginated manner and optionally filter watches b Note that only the `_id` and `metadata.*` fields are queryable or sortable. -{ref}/watcher-api-query-watches.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches[Endpoint documentation] [source,ts] ---- client.watcher.queryWatches({ ... }) @@ -15383,7 +15751,7 @@ It must be non-negative. Start the watch service. Start the Watcher service if it is not already running. -{ref}/watcher-api-start.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start[Endpoint documentation] [source,ts] ---- client.watcher.start({ ... }) @@ -15401,7 +15769,7 @@ Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. -{ref}/watcher-api-stats.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats[Endpoint documentation] [source,ts] ---- client.watcher.stats({ ... }) @@ -15419,7 +15787,7 @@ client.watcher.stats({ ... }) Stop the watch service. Stop the Watcher service if it is running. -{ref}/watcher-api-stop.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop[Endpoint documentation] [source,ts] ---- client.watcher.stop({ ... }) @@ -15440,7 +15808,7 @@ Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. -{ref}/watcher-api-update-settings.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings[Endpoint documentation] [source,ts] ---- client.watcher.updateSettings({ ... }) @@ -15468,7 +15836,7 @@ The information provided by the API includes: * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. -{ref}/info-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info[Endpoint documentation] [source,ts] ---- client.xpack.info({ ... }) @@ -15490,7 +15858,7 @@ Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. -{ref}/usage-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack[Endpoint documentation] [source,ts] ---- client.xpack.usage({ ... }) diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index b4d711bc4..256420631 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -45,7 +45,7 @@ export default class AsyncSearch { /** * Delete an async search. If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} */ async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class AsyncSearch { /** * Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} */ async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -129,7 +129,7 @@ export default class AsyncSearch { /** * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} */ async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -171,7 +171,7 @@ export default class AsyncSearch { /** * Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/async-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} */ async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index f1e588e08..7f123c5a2 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -45,7 +45,7 @@ export default class Autoscaling { /** * Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-delete-autoscaling-policy.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy | Elasticsearch API documentation} */ async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Autoscaling { /** * Get the autoscaling capacity. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} */ async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -127,7 +127,7 @@ export default class Autoscaling { /** * Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} */ async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -169,7 +169,7 @@ export default class Autoscaling { /** * Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/autoscaling-put-autoscaling-policy.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy | Elasticsearch API documentation} */ async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index 5654ffc1f..ccdedfcb2 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk | Elasticsearch API documentation} */ export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index e4dc21b04..bc397b310 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -45,7 +45,7 @@ export default class Cat { /** * Get aliases. Get the cluster's index aliases, including filter and routing information. This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -95,7 +95,7 @@ export default class Cat { /** * Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation | Elasticsearch API documentation} */ async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -145,7 +145,7 @@ export default class Cat { /** * Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -195,7 +195,7 @@ export default class Cat { /** * Get a document count. Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -245,7 +245,7 @@ export default class Cat { /** * Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata | Elasticsearch API documentation} */ async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -295,7 +295,7 @@ export default class Cat { /** * Get the cluster health status. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health | Elasticsearch API documentation} */ async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -335,7 +335,7 @@ export default class Cat { /** * Get CAT help. Get help for the CAT APIs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat | Elasticsearch API documentation} */ async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -375,7 +375,7 @@ export default class Cat { /** * Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -425,7 +425,7 @@ export default class Cat { /** * Get master node information. Get information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master | Elasticsearch API documentation} */ async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -465,7 +465,7 @@ export default class Cat { /** * Get data frame analytics jobs. Get configuration and usage information about data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -515,7 +515,7 @@ export default class Cat { /** * Get datafeeds. Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -565,7 +565,7 @@ export default class Cat { /** * Get anomaly detection jobs. Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -615,7 +615,7 @@ export default class Cat { /** * Get trained models. Get configuration and usage information about inference trained models. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -665,7 +665,7 @@ export default class Cat { /** * Get node attribute information. Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs | Elasticsearch API documentation} */ async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -705,7 +705,7 @@ export default class Cat { /** * Get node information. Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes | Elasticsearch API documentation} */ async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -745,7 +745,7 @@ export default class Cat { /** * Get pending task information. Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -785,7 +785,7 @@ export default class Cat { /** * Get plugin information. Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins | Elasticsearch API documentation} */ async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -825,7 +825,7 @@ export default class Cat { /** * Get shard recovery information. Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery | Elasticsearch API documentation} */ async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -875,7 +875,7 @@ export default class Cat { /** * Get snapshot repository information. Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories | Elasticsearch API documentation} */ async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -915,7 +915,7 @@ export default class Cat { /** * Get segment information. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments | Elasticsearch API documentation} */ async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -965,7 +965,7 @@ export default class Cat { /** * Get shard information. Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards | Elasticsearch API documentation} */ async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1015,7 +1015,7 @@ export default class Cat { /** * Get snapshot information. Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots | Elasticsearch API documentation} */ async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1065,7 +1065,7 @@ export default class Cat { /** * Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks | Elasticsearch API documentation} */ async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1105,7 +1105,7 @@ export default class Cat { /** * Get index template information. Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates | Elasticsearch API documentation} */ async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1155,7 +1155,7 @@ export default class Cat { /** * Get thread pool statistics. Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool | Elasticsearch API documentation} */ async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1205,7 +1205,7 @@ export default class Cat { /** * Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms | Elasticsearch API documentation} */ async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 3631d2dcb..29455527c 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -45,7 +45,7 @@ export default class Ccr { /** * Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-follow-pattern.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern | Elasticsearch API documentation} */ async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Ccr { /** * Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow | Elasticsearch API documentation} */ async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -134,7 +134,7 @@ export default class Ccr { /** * Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-info.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info | Elasticsearch API documentation} */ async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -176,7 +176,7 @@ export default class Ccr { /** * Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats | Elasticsearch API documentation} */ async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -218,7 +218,7 @@ export default class Ccr { /** * Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-follower.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower | Elasticsearch API documentation} */ async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -265,7 +265,7 @@ export default class Ccr { /** * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-follow-pattern.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 | Elasticsearch API documentation} */ async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -315,7 +315,7 @@ export default class Ccr { /** * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-follow-pattern.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern | Elasticsearch API documentation} */ async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -357,7 +357,7 @@ export default class Ccr { /** * Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-follow.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow | Elasticsearch API documentation} */ async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -399,7 +399,7 @@ export default class Ccr { /** * Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-follow-pattern.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern | Elasticsearch API documentation} */ async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -446,7 +446,7 @@ export default class Ccr { /** * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-follow-pattern.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern | Elasticsearch API documentation} */ async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -488,7 +488,7 @@ export default class Ccr { /** * Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-follow.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow | Elasticsearch API documentation} */ async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -535,7 +535,7 @@ export default class Ccr { /** * Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -574,8 +574,8 @@ export default class Ccr { } /** - * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow.html | Elasticsearch API documentation} + * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. > info > Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow | Elasticsearch API documentation} */ async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 5fab83bb9..7b7258503 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Clear a scrolling search. Clear the search context and results for a scrolling search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-scroll-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll | Elasticsearch API documentation} */ export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 52334debe..26d5b0e26 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time | Elasticsearch API documentation} */ export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 9da8711f1..730c942d2 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -45,7 +45,7 @@ export default class Cluster { /** * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain | Elasticsearch API documentation} */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -90,7 +90,7 @@ export default class Cluster { /** * Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -132,7 +132,7 @@ export default class Cluster { /** * Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} */ async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -172,7 +172,7 @@ export default class Cluster { /** * Check component templates. Returns information about whether a particular component template exists. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -214,7 +214,7 @@ export default class Cluster { /** * Get component templates. Get information about component templates. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -264,7 +264,7 @@ export default class Cluster { /** * Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -304,7 +304,7 @@ export default class Cluster { /** * Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health | Elasticsearch API documentation} */ async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -354,7 +354,7 @@ export default class Cluster { /** * Get cluster info. Returns basic information about the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info | Elasticsearch API documentation} */ async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -396,7 +396,7 @@ export default class Cluster { /** * Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -436,7 +436,7 @@ export default class Cluster { /** * Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} */ async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -476,7 +476,7 @@ export default class Cluster { /** * Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -523,7 +523,7 @@ export default class Cluster { /** * Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings | Elasticsearch API documentation} */ async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -567,8 +567,8 @@ export default class Cluster { } /** - * Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html | Elasticsearch API documentation} + * Get remote cluster information. Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias. > info > This API returns information that reflects current state on the local cluster. > The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. > Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. > To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info | Elasticsearch API documentation} */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -608,7 +608,7 @@ export default class Cluster { /** * Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute | Elasticsearch API documentation} */ async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -653,7 +653,7 @@ export default class Cluster { /** * Get the cluster state. Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state | Elasticsearch API documentation} */ async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -707,7 +707,7 @@ export default class Cluster { /** * Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index a181384b2..141aa8002 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -45,7 +45,7 @@ export default class Connector { /** * Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in | Elasticsearch API documentation} */ async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Connector { /** * Delete a connector. Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class Connector { /** * Get a connector. Get the details about a connector. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get | Elasticsearch API documentation} */ async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -171,7 +171,7 @@ export default class Connector { /** * Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-last-sync-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync | Elasticsearch API documentation} */ async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -218,7 +218,7 @@ export default class Connector { /** * Get all connectors. Get information about all connectors. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list | Elasticsearch API documentation} */ async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -258,7 +258,7 @@ export default class Connector { /** * Create a connector. Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put | Elasticsearch API documentation} */ async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -303,7 +303,7 @@ export default class Connector { /** * Create or update a connector. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put | Elasticsearch API documentation} */ async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -519,7 +519,7 @@ export default class Connector { /** * Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel | Elasticsearch API documentation} */ async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -561,7 +561,7 @@ export default class Connector { /** * Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-sync-job-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in | Elasticsearch API documentation} */ async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -603,7 +603,7 @@ export default class Connector { /** * Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/claim-connector-sync-job-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim | Elasticsearch API documentation} */ async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -650,7 +650,7 @@ export default class Connector { /** * Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete | Elasticsearch API documentation} */ async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -692,7 +692,7 @@ export default class Connector { /** * Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-error-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error | Elasticsearch API documentation} */ async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -739,7 +739,7 @@ export default class Connector { /** * Get a connector sync job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get | Elasticsearch API documentation} */ async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -781,7 +781,7 @@ export default class Connector { /** * Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list | Elasticsearch API documentation} */ async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -821,7 +821,7 @@ export default class Connector { /** * Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post | Elasticsearch API documentation} */ async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -865,7 +865,7 @@ export default class Connector { /** * Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-stats-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats | Elasticsearch API documentation} */ async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -912,7 +912,7 @@ export default class Connector { /** * Activate the connector draft filter. Activates the valid draft filtering for a connector. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering | Elasticsearch API documentation} */ async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -954,7 +954,7 @@ export default class Connector { /** * Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-api-key-id-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id | Elasticsearch API documentation} */ async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1001,7 +1001,7 @@ export default class Connector { /** * Update the connector configuration. Update the configuration field in the connector document. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration | Elasticsearch API documentation} */ async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1048,7 +1048,7 @@ export default class Connector { /** * Update the connector error field. Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error | Elasticsearch API documentation} */ async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1095,7 +1095,7 @@ export default class Connector { /** * Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-features-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features | Elasticsearch API documentation} */ async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1142,7 +1142,7 @@ export default class Connector { /** * Update the connector filtering. Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering | Elasticsearch API documentation} */ async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1236,7 +1236,7 @@ export default class Connector { /** * Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-index-name-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name | Elasticsearch API documentation} */ async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1283,7 +1283,7 @@ export default class Connector { /** * Update the connector name and description. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name | Elasticsearch API documentation} */ async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1377,7 +1377,7 @@ export default class Connector { /** * Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline | Elasticsearch API documentation} */ async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1424,7 +1424,7 @@ export default class Connector { /** * Update the connector scheduling. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling | Elasticsearch API documentation} */ async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1471,7 +1471,7 @@ export default class Connector { /** * Update the connector service type. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-service-type-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type | Elasticsearch API documentation} */ async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1518,7 +1518,7 @@ export default class Connector { /** * Update the connector status. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-status-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status | Elasticsearch API documentation} */ async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 9370a8928..6e060b369 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Count search results. Get the number of documents matching a query. The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. The latter must be nested in a `query` key, which is the same as the search API. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html | Elasticsearch API documentation} + * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count | Elasticsearch API documentation} */ export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 6d46dec88..c8c663fa3 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create | Elasticsearch API documentation} */ export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 00a5e88ac..e8dc5399d 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -45,7 +45,7 @@ export default class DanglingIndices { /** * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dangling-index-delete.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index | Elasticsearch API documentation} */ async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class DanglingIndices { /** * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dangling-index-import.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index | Elasticsearch API documentation} */ async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class DanglingIndices { /** * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/dangling-indices-list.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices | Elasticsearch API documentation} */ async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index ced9363fb..63b4cf22b 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete | Elasticsearch API documentation} */ export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index e2ce7ab74..f99e09670 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query | Elasticsearch API documentation} */ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 57d9bacda..4da430635 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html#docs-delete-by-query-rethrottle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle | Elasticsearch API documentation} */ export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index e07b5c09b..e6519dffd 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Delete a script or search template. Deletes a stored script or search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script | Elasticsearch API documentation} */ export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 2539819a6..ea301cac5 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -45,7 +45,7 @@ export default class Enrich { /** * Delete an enrich policy. Deletes an existing enrich policy and its enrich index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-enrich-policy-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy | Elasticsearch API documentation} */ async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Enrich { /** * Run an enrich policy. Create the enrich index for an existing enrich policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/execute-enrich-policy-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy | Elasticsearch API documentation} */ async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class Enrich { /** * Get an enrich policy. Returns information about an enrich policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-enrich-policy-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy | Elasticsearch API documentation} */ async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -179,7 +179,7 @@ export default class Enrich { /** * Create an enrich policy. Creates an enrich policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-enrich-policy-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy | Elasticsearch API documentation} */ async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -226,7 +226,7 @@ export default class Enrich { /** * Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/enrich-stats-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 551257c86..9f490aca9 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -45,7 +45,7 @@ export default class Eql { /** * Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/eql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Eql { /** * Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get | Elasticsearch API documentation} */ async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -129,7 +129,7 @@ export default class Eql { /** * Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-eql-status-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status | Elasticsearch API documentation} */ async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -171,7 +171,7 @@ export default class Eql { /** * Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/eql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search | Elasticsearch API documentation} */ async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 2aede7e26..a507e0952 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -45,7 +45,7 @@ export default class Esql { /** * Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query | Elasticsearch API documentation} */ async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -89,7 +89,7 @@ export default class Esql { /** * Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: * The authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-delete-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete | Elasticsearch API documentation} */ async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -131,7 +131,7 @@ export default class Esql { /** * Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-get-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get | Elasticsearch API documentation} */ async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -171,6 +171,48 @@ export default class Esql { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html | Elasticsearch API documentation} + */ + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_query/async/${encodeURIComponent(params.id.toString())}/stop` + const meta: TransportRequestMetadata = { + name: 'esql.async_query_stop', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-rest.html | Elasticsearch API documentation} diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index e0d66762e..0c5f99bde 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} */ export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index a57b4b804..750302a6f 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} */ export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index c93ab5275..16150530b 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html | Elasticsearch API documentation} + * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain | Elasticsearch API documentation} */ export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 7e90c2808..670d84cda 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -45,7 +45,7 @@ export default class Features { /** * Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features | Elasticsearch API documentation} */ async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -85,7 +85,7 @@ export default class Features { /** * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features | Elasticsearch API documentation} */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index 6f04e7f32..de9d61a0e 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps | Elasticsearch API documentation} */ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 8f4d75d31..042fcbfd1 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -127,7 +127,7 @@ export default class Fleet { /** * Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-global-checkpoints.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet | Elasticsearch API documentation} */ async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -169,6 +169,7 @@ export default class Fleet { /** * Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch | Elasticsearch API documentation} */ async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -250,6 +251,7 @@ export default class Fleet { /** * Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search | Elasticsearch API documentation} */ async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 55d638696..3cb82914a 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} */ export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index a3947bdb6..d079ba650 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get a script or search template. Retrieves a stored script or search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script | Elasticsearch API documentation} */ export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index 2f73053f3..b263ed089 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get script contexts. Get a list of supported script contexts and their methods. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context | Elasticsearch API documentation} */ export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index d5b706d9b..7b52735c4 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get script languages. Get a list of available script types, languages, and contexts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages | Elasticsearch API documentation} */ export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index f72c08cfc..a4eef8c97 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} */ export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index fc1a19fe6..33534fe4a 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -45,7 +45,7 @@ export default class Graph { /** * Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/graph-explore-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph | Elasticsearch API documentation} */ async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 1e271ecc0..51a48a265 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/health-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report | Elasticsearch API documentation} */ export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 506120df2..1c097071c 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -45,7 +45,7 @@ export default class Ilm { /** * Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-delete-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Ilm { /** * Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-explain-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle | Elasticsearch API documentation} */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class Ilm { /** * Get lifecycle policies. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -179,7 +179,7 @@ export default class Ilm { /** * Get the ILM status. Get the current index lifecycle management status. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-status.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -219,7 +219,7 @@ export default class Ilm { /** * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers | Elasticsearch API documentation} */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -264,7 +264,7 @@ export default class Ilm { /** * Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-move-to-step.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step | Elasticsearch API documentation} */ async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -311,7 +311,7 @@ export default class Ilm { /** * Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-put-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -358,7 +358,7 @@ export default class Ilm { /** * Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-remove-policy.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy | Elasticsearch API documentation} */ async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -400,7 +400,7 @@ export default class Ilm { /** * Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-retry-policy.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry | Elasticsearch API documentation} */ async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -442,7 +442,7 @@ export default class Ilm { /** * Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-start.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start | Elasticsearch API documentation} */ async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -482,7 +482,7 @@ export default class Ilm { /** * Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-stop.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop | Elasticsearch API documentation} */ async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/index.ts b/src/api/api/index.ts index 13455eca0..bcd3842eb 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create | Elasticsearch API documentation} */ export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 9fb65257a..c39e5f3a2 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -44,8 +44,8 @@ export default class Indices { } /** - * Add an index block. Limits the operations allowed on an index by blocking specific operation types. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation} + * Add an index block. Add an index block to an index. Index blocks limit the operations allowed on an index by blocking specific operation types. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block | Elasticsearch API documentation} */ async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -88,7 +88,7 @@ export default class Indices { /** * Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze | Elasticsearch API documentation} */ async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -185,7 +185,7 @@ export default class Indices { /** * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -235,7 +235,7 @@ export default class Indices { /** * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. **Monitor the cloning process** The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. **Wait for active shards** Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -283,7 +283,7 @@ export default class Indices { /** * Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-close.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close | Elasticsearch API documentation} */ async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -325,7 +325,7 @@ export default class Indices { /** * Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: * Settings for the index. * Mappings for fields in the index. * Index aliases **Wait for active shards** By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create | Elasticsearch API documentation} */ async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -371,8 +371,8 @@ export default class Indices { } /** - * Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * Create a data stream. You must have a matching index template with data stream enabled. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream | Elasticsearch API documentation} */ async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -451,8 +451,8 @@ export default class Indices { } /** - * Get data stream stats. Retrieves statistics for one or more data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * Get data stream stats. Get statistics for one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 | Elasticsearch API documentation} */ async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -502,7 +502,7 @@ export default class Indices { /** * Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -544,7 +544,7 @@ export default class Indices { /** * Delete an alias. Removes a data stream or index from an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-alias.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias | Elasticsearch API documentation} */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -594,7 +594,7 @@ export default class Indices { /** * Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle | Elasticsearch API documentation} */ async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -636,7 +636,7 @@ export default class Indices { /** * Delete data streams. Deletes one or more data streams and their backing indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream | Elasticsearch API documentation} */ async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -678,7 +678,7 @@ export default class Indices { /** * Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template | Elasticsearch API documentation} */ async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -720,7 +720,7 @@ export default class Indices { /** * Delete a legacy index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -762,7 +762,7 @@ export default class Indices { /** * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -804,7 +804,7 @@ export default class Indices { /** * Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-downsample-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -842,7 +842,7 @@ export default class Indices { /** * Check indices. Check if one or more indices, index aliases, or data streams exist. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists | Elasticsearch API documentation} */ async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -883,8 +883,8 @@ export default class Indices { } /** - * Check aliases. Checks if one or more data stream or index aliases exist. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} + * Check aliases. Check if one or more data stream or index aliases exist. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias | Elasticsearch API documentation} */ async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -934,7 +934,7 @@ export default class Indices { /** * Check index templates. Check whether index templates exist. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template | Elasticsearch API documentation} */ async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -976,7 +976,7 @@ export default class Indices { /** * Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template | Elasticsearch API documentation} */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1018,7 +1018,7 @@ export default class Indices { /** * Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-explain-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1060,7 +1060,7 @@ export default class Indices { /** * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1102,7 +1102,7 @@ export default class Indices { /** * Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush | Elasticsearch API documentation} */ async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1152,7 +1152,7 @@ export default class Indices { /** * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. **Blocks during a force merge** Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. **Force merging multiple indices** You can force merge multiple indices with a single request by targeting: * One or more data streams that contain multiple backing indices * Multiple indices * One or more aliases * All data streams and indices in a cluster Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. **Data streams and time-based indices** Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1202,7 +1202,7 @@ export default class Indices { /** * Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get | Elasticsearch API documentation} */ async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1244,7 +1244,7 @@ export default class Indices { /** * Get aliases. Retrieves information for one or more data stream or index aliases. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-alias.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1300,8 +1300,8 @@ export default class Indices { } /** - * Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html | Elasticsearch API documentation} + * Get data stream lifecycles. Get the data stream lifecycle configuration of one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle | Elasticsearch API documentation} */ async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1343,7 +1343,7 @@ export default class Indices { /** * Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats | Elasticsearch API documentation} */ async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1382,8 +1382,8 @@ export default class Indices { } /** - * Get data streams. Retrieves information about one or more data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * Get data streams. Get information about one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream | Elasticsearch API documentation} */ async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1433,7 +1433,7 @@ export default class Indices { /** * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1483,7 +1483,7 @@ export default class Indices { /** * Get index templates. Get information about one or more index templates. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template | Elasticsearch API documentation} */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1533,7 +1533,7 @@ export default class Indices { /** * Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} */ async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1625,7 +1625,7 @@ export default class Indices { /** * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1682,7 +1682,7 @@ export default class Indices { /** * Get index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1852,7 +1852,7 @@ export default class Indices { /** * Open a closed index. For data streams, the API opens any closed backing indices. A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open | Elasticsearch API documentation} */ async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2075,7 +2075,7 @@ export default class Indices { /** * Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. **Add multi-fields to an existing field** Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. **Change supported mapping parameters for an existing field** The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. **Change the mapping of an existing field** Except for supported mapping parameters, you can't change the mapping or field type of an existing field. Changing an existing field could invalidate data that's already indexed. If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. **Rename a field** Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2122,7 +2122,7 @@ export default class Indices { /** * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2166,7 +2166,7 @@ export default class Indices { /** * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2213,7 +2213,7 @@ export default class Indices { /** * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2263,7 +2263,7 @@ export default class Indices { /** * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2313,7 +2313,7 @@ export default class Indices { /** * Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers | Elasticsearch API documentation} */ async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2354,13 +2354,13 @@ export default class Indices { } /** - * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. **Advantages of using this endpoint before a cross-cluster search** You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html | Elasticsearch API documentation} + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. ## Note on backwards compatibility The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like `*:*` to bypass the issue. ## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. ## Test availability of remote clusters The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it. You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. The `connected` field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster | Elasticsearch API documentation} */ - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2375,6 +2375,7 @@ export default class Indices { } } + params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue @@ -2384,8 +2385,15 @@ export default class Indices { } } - const method = 'GET' - const path = `/_resolve/cluster/${encodeURIComponent(params.name.toString())}` + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_resolve/cluster/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_resolve/cluster' + } const meta: TransportRequestMetadata = { name: 'indices.resolve_cluster', pathParts: { @@ -2397,7 +2405,7 @@ export default class Indices { /** * Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index | Elasticsearch API documentation} */ async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2439,7 +2447,7 @@ export default class Indices { /** * Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2494,7 +2502,7 @@ export default class Indices { /** * Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments | Elasticsearch API documentation} */ async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2544,7 +2552,7 @@ export default class Indices { /** * Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: * The node on which each replica shard exists. * The allocation ID for each replica shard. * A unique ID for each replica shard. * Any errors encountered while opening the shard index or from an earlier failure. By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores | Elasticsearch API documentation} */ async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2594,7 +2602,7 @@ export default class Indices { /** * Shrink an index. Shrink an index into a new index with fewer primary shards. Before you can shrink an index: * The index must be read-only. * A copy of every shard in the index must reside on the same node. * The index must have a green health status. To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation. The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. A shrink operation: * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink | Elasticsearch API documentation} */ async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2642,7 +2650,7 @@ export default class Indices { /** * Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template | Elasticsearch API documentation} */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2684,7 +2692,7 @@ export default class Indices { /** * Simulate an index template. Get the index configuration that would be applied by a particular index template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template | Elasticsearch API documentation} */ async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2739,7 +2747,7 @@ export default class Indices { /** * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. You can do make an index read-only with the following request using the add index block API: ``` PUT /my_source_index/_block/write ``` The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2787,7 +2795,7 @@ export default class Indices { /** * Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices. By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 6bdede5f1..b7c9fb55a 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -45,7 +45,7 @@ export default class Inference { /** * Delete an inference endpoint - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -95,7 +95,7 @@ export default class Inference { /** * Get an inference endpoint - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get | Elasticsearch API documentation} */ async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -148,8 +148,8 @@ export default class Inference { } /** - * Perform inference on the service - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html | Elasticsearch API documentation} + * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} */ async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -204,7 +204,7 @@ export default class Inference { /** * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -249,7 +249,7 @@ export default class Inference { /** * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stream-inference-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference | Elasticsearch API documentation} */ async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -359,7 +359,7 @@ export default class Inference { /** * Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-inference-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update | Elasticsearch API documentation} */ async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 0c1bf083d..1681fe6f3 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Get cluster info. Get basic build, version, and cluster information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rest-api-root.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info | Elasticsearch API documentation} */ export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index f691fb5b5..51ad39aff 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -45,7 +45,7 @@ export default class Ingest { /** * Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-geoip-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database | Elasticsearch API documentation} */ async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Ingest { /** * Delete IP geolocation database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-ip-location-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database | Elasticsearch API documentation} */ async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class Ingest { /** * Delete pipelines. Delete one or more ingest pipelines. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -211,7 +211,7 @@ export default class Ingest { /** * Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-geoip-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database | Elasticsearch API documentation} */ async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -261,7 +261,7 @@ export default class Ingest { /** * Get IP geolocation database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ip-location-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database | Elasticsearch API documentation} */ async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -311,7 +311,7 @@ export default class Ingest { /** * Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -401,7 +401,7 @@ export default class Ingest { /** * Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-geoip-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database | Elasticsearch API documentation} */ async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -448,7 +448,7 @@ export default class Ingest { /** * Create or update an IP geolocation database configuration. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-ip-location-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database | Elasticsearch API documentation} */ async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -532,7 +532,7 @@ export default class Ingest { /** * Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate | Elasticsearch API documentation} */ async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index e23a2d87a..d1a319461 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} + * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/knn-search-api.html | Elasticsearch API documentation} */ export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 852661841..b80733dd9 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -45,7 +45,7 @@ export default class License { /** * Delete the license. When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete | Elasticsearch API documentation} */ async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -84,8 +84,8 @@ export default class License { } /** - * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html | Elasticsearch API documentation} + * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. >info > If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. > If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -125,7 +125,7 @@ export default class License { /** * Get the basic license status. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status | Elasticsearch API documentation} */ async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -165,7 +165,7 @@ export default class License { /** * Get the trial status. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status | Elasticsearch API documentation} */ async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -205,7 +205,7 @@ export default class License { /** * Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post | Elasticsearch API documentation} */ async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -250,7 +250,7 @@ export default class License { /** * Start a basic license. Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic | Elasticsearch API documentation} */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -290,7 +290,7 @@ export default class License { /** * Start a trial. Start a 30-day trial, which gives access to all subscription features. NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial | Elasticsearch API documentation} */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 559b10011..df33e03ac 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -45,7 +45,7 @@ export default class Logstash { /** * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-delete-pipeline.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Logstash { /** * Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-get-pipeline.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -137,7 +137,7 @@ export default class Logstash { /** * Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/logstash-api-put-pipeline.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 62f275d77..c254d5fd8 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html | Elasticsearch API documentation} + * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget | Elasticsearch API documentation} */ export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 49fed1c3b..5ddf19b7d 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -45,7 +45,7 @@ export default class Migration { /** * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migration-api-deprecation.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations | Elasticsearch API documentation} */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -95,7 +95,7 @@ export default class Migration { /** * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/feature-migration-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -135,7 +135,7 @@ export default class Migration { /** * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/feature-migration-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 1dbc32a63..52548d378 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -45,7 +45,7 @@ export default class Ml { /** * Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache | Elasticsearch API documentation} */ async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Ml { /** * Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-close-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job | Elasticsearch API documentation} */ async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -133,8 +133,8 @@ export default class Ml { } /** - * Delete a calendar. Removes all scheduled events from a calendar, then deletes it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html | Elasticsearch API documentation} + * Delete a calendar. Remove all scheduled events from a calendar, then delete it. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar | Elasticsearch API documentation} */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -176,7 +176,7 @@ export default class Ml { /** * Delete events from a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-event.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event | Elasticsearch API documentation} */ async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -219,7 +219,7 @@ export default class Ml { /** * Delete anomaly jobs from a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job | Elasticsearch API documentation} */ async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -262,7 +262,7 @@ export default class Ml { /** * Delete a data frame analytics job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics | Elasticsearch API documentation} */ async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -304,7 +304,7 @@ export default class Ml { /** * Delete a datafeed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafeed.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed | Elasticsearch API documentation} */ async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -345,8 +345,8 @@ export default class Ml { } /** - * Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-expired-data.html | Elasticsearch API documentation} + * Delete expired ML data. Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data | Elasticsearch API documentation} */ async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -401,7 +401,7 @@ export default class Ml { /** * Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-filter.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter | Elasticsearch API documentation} */ async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -443,7 +443,7 @@ export default class Ml { /** * Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-forecast.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast | Elasticsearch API documentation} */ async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -493,7 +493,7 @@ export default class Ml { /** * Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -535,7 +535,7 @@ export default class Ml { /** * Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-snapshot.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot | Elasticsearch API documentation} */ async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -578,7 +578,7 @@ export default class Ml { /** * Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model | Elasticsearch API documentation} */ async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -620,7 +620,7 @@ export default class Ml { /** * Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias | Elasticsearch API documentation} */ async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -662,8 +662,8 @@ export default class Ml { } /** - * Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-apis.html | Elasticsearch API documentation} + * Estimate job model memory usage. Make an estimation of the memory usage for an anomaly detection job model. The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory | Elasticsearch API documentation} */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -708,7 +708,7 @@ export default class Ml { /** * Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/evaluate-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame | Elasticsearch API documentation} */ async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -752,7 +752,7 @@ export default class Ml { /** * Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/explain-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics | Elasticsearch API documentation} */ async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -807,7 +807,7 @@ export default class Ml { /** * Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-flush-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job | Elasticsearch API documentation} */ async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -854,7 +854,7 @@ export default class Ml { /** * Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-forecast.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast | Elasticsearch API documentation} */ async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -901,7 +901,7 @@ export default class Ml { /** * Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-bucket.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets | Elasticsearch API documentation} */ async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -956,7 +956,7 @@ export default class Ml { /** * Get info about events in calendars. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar-event.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events | Elasticsearch API documentation} */ async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -998,7 +998,7 @@ export default class Ml { /** * Get calendar configuration info. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars | Elasticsearch API documentation} */ async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1053,7 +1053,7 @@ export default class Ml { /** * Get anomaly detection job results for categories. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-category.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories | Elasticsearch API documentation} */ async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1108,7 +1108,7 @@ export default class Ml { /** * Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics | Elasticsearch API documentation} */ async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1158,7 +1158,7 @@ export default class Ml { /** * Get data frame analytics jobs usage info. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats | Elasticsearch API documentation} */ async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1208,7 +1208,7 @@ export default class Ml { /** * Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats | Elasticsearch API documentation} */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1258,7 +1258,7 @@ export default class Ml { /** * Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds | Elasticsearch API documentation} */ async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1308,7 +1308,7 @@ export default class Ml { /** * Get filters. You can get a single filter or all filters. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-filter.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters | Elasticsearch API documentation} */ async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1358,7 +1358,7 @@ export default class Ml { /** * Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-influencer.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers | Elasticsearch API documentation} */ async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1405,7 +1405,7 @@ export default class Ml { /** * Get anomaly detection jobs usage info. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats | Elasticsearch API documentation} */ async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1455,7 +1455,7 @@ export default class Ml { /** * Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1505,7 +1505,7 @@ export default class Ml { /** * Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-memory.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats | Elasticsearch API documentation} */ async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1555,7 +1555,7 @@ export default class Ml { /** * Get anomaly detection job model snapshot upgrade usage info. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-model-snapshot-upgrade-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats | Elasticsearch API documentation} */ async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1598,7 +1598,7 @@ export default class Ml { /** * Get model snapshots info. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-snapshot.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots | Elasticsearch API documentation} */ async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1653,7 +1653,7 @@ export default class Ml { /** * Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-overall-buckets.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets | Elasticsearch API documentation} */ async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1700,7 +1700,7 @@ export default class Ml { /** * Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-record.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records | Elasticsearch API documentation} */ async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1747,7 +1747,7 @@ export default class Ml { /** * Get trained model configuration info. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models | Elasticsearch API documentation} */ async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1797,7 +1797,7 @@ export default class Ml { /** * Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats | Elasticsearch API documentation} */ async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1847,7 +1847,7 @@ export default class Ml { /** * Evaluate a trained model. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model | Elasticsearch API documentation} */ async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1894,7 +1894,7 @@ export default class Ml { /** * Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-info.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info | Elasticsearch API documentation} */ async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1934,7 +1934,7 @@ export default class Ml { /** * Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-open-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job | Elasticsearch API documentation} */ async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1981,7 +1981,7 @@ export default class Ml { /** * Add scheduled events to the calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-calendar-event.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events | Elasticsearch API documentation} */ async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2028,7 +2028,7 @@ export default class Ml { /** * Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-data.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data | Elasticsearch API documentation} */ async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2064,8 +2064,8 @@ export default class Ml { } /** - * Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. - * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/preview-dfanalytics.html | Elasticsearch API documentation} + * Preview features used by data frame analytics. Preview the extracted features used by a data frame analytics config. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics | Elasticsearch API documentation} */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2120,7 +2120,7 @@ export default class Ml { /** * Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-preview-datafeed.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed | Elasticsearch API documentation} */ async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -2175,7 +2175,7 @@ export default class Ml { /** * Create a calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar | Elasticsearch API documentation} */ async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2222,7 +2222,7 @@ export default class Ml { /** * Add anomaly detection job to calendar. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job | Elasticsearch API documentation} */ async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2265,7 +2265,7 @@ export default class Ml { /** * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. If the destination index does not exist, it is created automatically when you start the job. If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2312,7 +2312,7 @@ export default class Ml { /** * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2359,7 +2359,7 @@ export default class Ml { /** * Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-filter.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter | Elasticsearch API documentation} */ async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2406,7 +2406,7 @@ export default class Ml { /** * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job | Elasticsearch API documentation} */ async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2453,7 +2453,7 @@ export default class Ml { /** * Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model | Elasticsearch API documentation} */ async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2500,7 +2500,7 @@ export default class Ml { /** * Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias | Elasticsearch API documentation} */ async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2543,7 +2543,7 @@ export default class Ml { /** * Create part of a trained model definition. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-definition-part.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part | Elasticsearch API documentation} */ async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2591,7 +2591,7 @@ export default class Ml { /** * Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-vocabulary.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary | Elasticsearch API documentation} */ async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2638,7 +2638,7 @@ export default class Ml { /** * Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-reset-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job | Elasticsearch API documentation} */ async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2680,7 +2680,7 @@ export default class Ml { /** * Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-revert-snapshot.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot | Elasticsearch API documentation} */ async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2728,7 +2728,7 @@ export default class Ml { /** * Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-set-upgrade-mode.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode | Elasticsearch API documentation} */ async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2768,7 +2768,7 @@ export default class Ml { /** * Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics | Elasticsearch API documentation} */ async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2810,7 +2810,7 @@ export default class Ml { /** * Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-start-datafeed.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed | Elasticsearch API documentation} */ async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2857,7 +2857,7 @@ export default class Ml { /** * Start a trained model deployment. It allocates the model to every machine learning node. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment | Elasticsearch API documentation} */ async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2899,7 +2899,7 @@ export default class Ml { /** * Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics | Elasticsearch API documentation} */ async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2941,7 +2941,7 @@ export default class Ml { /** * Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-stop-datafeed.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed | Elasticsearch API documentation} */ async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2988,7 +2988,7 @@ export default class Ml { /** * Stop a trained model deployment. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment | Elasticsearch API documentation} */ async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3030,7 +3030,7 @@ export default class Ml { /** * Update a data frame analytics job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation} */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3077,7 +3077,7 @@ export default class Ml { /** * Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-datafeed.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed | Elasticsearch API documentation} */ async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3124,7 +3124,7 @@ export default class Ml { /** * Update a filter. Updates the description of a filter, adds items, or removes items from the list. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-filter.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter | Elasticsearch API documentation} */ async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3171,7 +3171,7 @@ export default class Ml { /** * Update an anomaly detection job. Updates certain properties of an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job | Elasticsearch API documentation} */ async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3218,7 +3218,7 @@ export default class Ml { /** * Update a snapshot. Updates certain properties of a snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-snapshot.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot | Elasticsearch API documentation} */ async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3266,7 +3266,7 @@ export default class Ml { /** * Update a trained model deployment. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment | Elasticsearch API documentation} */ async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3312,8 +3312,8 @@ export default class Ml { } /** - * Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation} + * Upgrade a snapshot. Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot | Elasticsearch API documentation} */ async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3401,7 +3401,7 @@ export default class Ml { /** * Validate an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch | Elasticsearch API documentation} */ async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index c4a09fc33..053fea53a 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -45,7 +45,7 @@ export default class Monitoring { /** * Send monitoring data. This API is used by the monitoring features to send monitoring data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch | Elasticsearch API documentation} */ async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 575e95b19..573c4f385 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch | Elasticsearch API documentation} */ export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index 54ffc9d32..019b9d1b8 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Run multiple templated searches. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html | Elasticsearch API documentation} + * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template | Elasticsearch API documentation} */ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index fef6e4592..bd4faccbb 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get multiple term vectors. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html | Elasticsearch API documentation} + * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors | Elasticsearch API documentation} */ export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 27ca13040..1ce489ae0 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -45,7 +45,7 @@ export default class Nodes { /** * Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-repositories-metering-archive-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive | Elasticsearch API documentation} */ async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -88,7 +88,7 @@ export default class Nodes { /** * Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-repositories-metering-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info | Elasticsearch API documentation} */ async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -130,7 +130,7 @@ export default class Nodes { /** * Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads | Elasticsearch API documentation} */ async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -180,7 +180,7 @@ export default class Nodes { /** * Get node information. By default, the API returns all attributes and core settings for cluster nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info | Elasticsearch API documentation} */ async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -237,7 +237,7 @@ export default class Nodes { /** * Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings | Elasticsearch API documentation} */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -292,7 +292,7 @@ export default class Nodes { /** * Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -356,7 +356,7 @@ export default class Nodes { /** * Get feature usage information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage | Elasticsearch API documentation} */ async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 685e46870..4cd2a733e 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time | Elasticsearch API documentation} */ export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 277e3c725..908709afd 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Ping the cluster. Get information about whether the cluster is running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster | Elasticsearch API documentation} */ export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index a989cf966..d3350ca5b 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Create or update a script or search template. Creates or updates a stored script or search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script | Elasticsearch API documentation} */ export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index efddebcc9..bb7a964ee 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -45,7 +45,7 @@ export default class QueryRules { /** * Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-rule.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule | Elasticsearch API documentation} */ async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -88,7 +88,7 @@ export default class QueryRules { /** * Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset | Elasticsearch API documentation} */ async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -130,7 +130,7 @@ export default class QueryRules { /** * Get a query rule. Get details about a query rule within a query ruleset. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-rule.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule | Elasticsearch API documentation} */ async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -173,7 +173,7 @@ export default class QueryRules { /** * Get a query ruleset. Get details about a query ruleset. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset | Elasticsearch API documentation} */ async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -215,7 +215,7 @@ export default class QueryRules { /** * Get all query rulesets. Get summarized information about the query rulesets. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets | Elasticsearch API documentation} */ async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -255,7 +255,7 @@ export default class QueryRules { /** * Create or update a query rule. Create or update a query rule within a query ruleset. IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-rule.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule | Elasticsearch API documentation} */ async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -303,7 +303,7 @@ export default class QueryRules { /** * Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset | Elasticsearch API documentation} */ async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -350,7 +350,7 @@ export default class QueryRules { /** * Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/test-query-ruleset.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test | Elasticsearch API documentation} */ async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 890b2eec2..bd3af65e5 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval | Elasticsearch API documentation} */ export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 74f29853c..5c83f147b 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. NOTE: The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex | Elasticsearch API documentation} */ export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 9653803eb..d32f80c01 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex | Elasticsearch API documentation} */ export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 650ba34b1..57b5377c6 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -39,14 +39,14 @@ interface That { transport: Transport } /** * Render a search template. Render a search template as a search request body. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/render-search-template-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template | Elasticsearch API documentation} */ export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['file', 'params', 'source'] + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id', 'file', 'params', 'source'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 16401f165..b45043728 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -45,7 +45,7 @@ export default class Rollup { /** * Delete a rollup job. A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Rollup { /** * Get rollup job information. Get the configuration, stats, and status of rollup jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -137,7 +137,7 @@ export default class Rollup { /** * Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: 1. Does this index have associated rollup data somewhere in the cluster? 2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps | Elasticsearch API documentation} */ async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -187,7 +187,7 @@ export default class Rollup { /** * Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps | Elasticsearch API documentation} */ async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -229,7 +229,7 @@ export default class Rollup { /** * Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job | Elasticsearch API documentation} */ async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -276,7 +276,7 @@ export default class Rollup { /** * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -323,7 +323,7 @@ export default class Rollup { /** * Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job | Elasticsearch API documentation} */ async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -365,7 +365,7 @@ export default class Rollup { /** * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ``` The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job | Elasticsearch API documentation} */ async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index e6b3fb611..5bd03110b 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll | Elasticsearch API documentation} */ export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/search.ts b/src/api/api/search.ts index 11294d979..cf2630413 100644 --- a/src/api/api/search.ts +++ b/src/api/api/search.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search | Elasticsearch API documentation} */ export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index b892ec9dd..71e583672 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -45,7 +45,7 @@ export default class SearchApplication { /** * Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-search-application.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class SearchApplication { /** * Delete a behavioral analytics collection. The associated data stream is also deleted. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics | Elasticsearch API documentation} */ async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class SearchApplication { /** * Get search application details. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get | Elasticsearch API documentation} */ async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -171,7 +171,7 @@ export default class SearchApplication { /** * Get behavioral analytics collections. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} */ async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -221,7 +221,7 @@ export default class SearchApplication { /** * Get search applications. Get information about search applications. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} */ async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -261,7 +261,7 @@ export default class SearchApplication { /** * Create a behavioral analytics collection event. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/post-analytics-collection-event.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event | Elasticsearch API documentation} */ async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -299,7 +299,7 @@ export default class SearchApplication { /** * Create or update a search application. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put | Elasticsearch API documentation} */ async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -336,7 +336,7 @@ export default class SearchApplication { /** * Create a behavioral analytics collection. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics | Elasticsearch API documentation} */ async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -378,7 +378,7 @@ export default class SearchApplication { /** * Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query | Elasticsearch API documentation} */ async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -425,7 +425,7 @@ export default class SearchApplication { /** * Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search | Elasticsearch API documentation} */ async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 54e002d7a..c9384a91e 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Search a vector tile. Search a vector tile for geospatial values. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html | Elasticsearch API documentation} + * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt | Elasticsearch API documentation} */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index 544660325..f2fff30a5 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html | Elasticsearch API documentation} + * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards | Elasticsearch API documentation} */ export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index 2bcef664e..f63c77a45 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Run a search with a search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template | Elasticsearch API documentation} */ export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index d26592c27..4c8af1dda 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -45,7 +45,7 @@ export default class SearchableSnapshots { /** * Get cache statistics. Get statistics about the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-cache-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats | Elasticsearch API documentation} */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -95,7 +95,7 @@ export default class SearchableSnapshots { /** * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-clear-cache.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -145,7 +145,7 @@ export default class SearchableSnapshots { /** * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount | Elasticsearch API documentation} */ async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -193,7 +193,7 @@ export default class SearchableSnapshots { /** * Get searchable snapshot statistics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 201d2c550..3484f5933 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -45,7 +45,7 @@ export default class Security { /** * Activate a user profile. Create or update a user profile on behalf of another user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-activate-user-profile.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile | Elasticsearch API documentation} */ async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -89,7 +89,7 @@ export default class Security { /** * Authenticate a user. Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-authenticate.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | Elasticsearch API documentation} */ async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class Security { /** * Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-delete-role.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role | Elasticsearch API documentation} */ async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -173,7 +173,7 @@ export default class Security { /** * Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-put-role.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role | Elasticsearch API documentation} */ async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -217,7 +217,7 @@ export default class Security { /** * Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. It is not possible to update expired or invalidated API keys. This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-bulk-update-api-keys.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys | Elasticsearch API documentation} */ async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -261,7 +261,7 @@ export default class Security { /** * Change passwords. Change the passwords of users in the native realm and built-in users. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-change-password.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password | Elasticsearch API documentation} */ async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -316,7 +316,7 @@ export default class Security { /** * Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-api-key-cache.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache | Elasticsearch API documentation} */ async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -358,7 +358,7 @@ export default class Security { /** * Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-privilege-cache.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges | Elasticsearch API documentation} */ async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -400,7 +400,7 @@ export default class Security { /** * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-cache.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms | Elasticsearch API documentation} */ async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -442,7 +442,7 @@ export default class Security { /** * Clear the roles cache. Evict roles from the native role cache. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-role-cache.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles | Elasticsearch API documentation} */ async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -484,7 +484,7 @@ export default class Security { /** * Clear service account token caches. Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-clear-service-token-caches.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens | Elasticsearch API documentation} */ async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -528,7 +528,7 @@ export default class Security { /** * Create an API key. Create an API key for access without requiring basic authentication. IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key | Elasticsearch API documentation} */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -573,7 +573,7 @@ export default class Security { /** * Create a cross-cluster API key. Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-cross-cluster-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key | Elasticsearch API documentation} */ async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -617,7 +617,7 @@ export default class Security { /** * Create a service account token. Create a service accounts token for access without requiring basic authentication. NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-create-service-token.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token | Elasticsearch API documentation} */ async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -668,7 +668,7 @@ export default class Security { /** * Delegate PKI authentication. This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-as if the user connected directly to Elasticsearch. IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delegate-pki-authentication.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki | Elasticsearch API documentation} */ async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -712,7 +712,7 @@ export default class Security { /** * Delete application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-privilege.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges | Elasticsearch API documentation} */ async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -755,7 +755,7 @@ export default class Security { /** * Delete roles. Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role | Elasticsearch API documentation} */ async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -797,7 +797,7 @@ export default class Security { /** * Delete role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-role-mapping.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping | Elasticsearch API documentation} */ async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -839,7 +839,7 @@ export default class Security { /** * Delete service account tokens. Delete service account tokens for a service in a specified namespace. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-service-token.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token | Elasticsearch API documentation} */ async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -883,7 +883,7 @@ export default class Security { /** * Delete users. Delete users from the native realm. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delete-user.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user | Elasticsearch API documentation} */ async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -925,7 +925,7 @@ export default class Security { /** * Disable users. Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user | Elasticsearch API documentation} */ async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -967,7 +967,7 @@ export default class Security { /** * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile | Elasticsearch API documentation} */ async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1009,7 +1009,7 @@ export default class Security { /** * Enable users. Enable users in the native realm. By default, when you create users, they are enabled. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user | Elasticsearch API documentation} */ async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1051,7 +1051,7 @@ export default class Security { /** * Enable a user profile. Enable user profiles to make them visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile | Elasticsearch API documentation} */ async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1093,7 +1093,7 @@ export default class Security { /** * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana | Elasticsearch API documentation} */ async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1133,7 +1133,7 @@ export default class Security { /** * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node | Elasticsearch API documentation} */ async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1173,7 +1173,7 @@ export default class Security { /** * Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key | Elasticsearch API documentation} */ async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1213,7 +1213,7 @@ export default class Security { /** * Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-builtin-privileges.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges | Elasticsearch API documentation} */ async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1253,7 +1253,7 @@ export default class Security { /** * Get application privileges. To use this API, you must have one of the following privileges: * The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-privileges.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges | Elasticsearch API documentation} */ async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1307,7 +1307,7 @@ export default class Security { /** * Get roles. Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role | Elasticsearch API documentation} */ async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1357,7 +1357,7 @@ export default class Security { /** * Get role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-role-mapping.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping | Elasticsearch API documentation} */ async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1407,7 +1407,7 @@ export default class Security { /** * Get service accounts. Get a list of service accounts that match the provided path parameters. NOTE: Currently, only the `elastic/fleet-server` service account is available. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-accounts.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts | Elasticsearch API documentation} */ async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1461,7 +1461,7 @@ export default class Security { /** * Get service account credentials. To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-service-credentials.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials | Elasticsearch API documentation} */ async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1504,7 +1504,7 @@ export default class Security { /** * Get security index settings. Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` * `index.number_of_replicas` - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1544,7 +1544,7 @@ export default class Security { /** * Get a token. Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-token.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token | Elasticsearch API documentation} */ async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1589,7 +1589,7 @@ export default class Security { /** * Get users. Get information about users in the native realm and built-in users. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user | Elasticsearch API documentation} */ async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1639,7 +1639,7 @@ export default class Security { /** * Get user privileges. Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-privileges.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges | Elasticsearch API documentation} */ async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1679,7 +1679,7 @@ export default class Security { /** * Get a user profile. Get a user's profile using the unique profile ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-get-user-profile.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile | Elasticsearch API documentation} */ async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1721,7 +1721,7 @@ export default class Security { /** * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user's credentials. The supported user authentication credential types are: * username and password * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-grant-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key | Elasticsearch API documentation} */ async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1765,7 +1765,7 @@ export default class Security { /** * Check user privileges. Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges | Elasticsearch API documentation} */ async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1820,7 +1820,7 @@ export default class Security { /** * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-has-privileges-user-profile.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile | Elasticsearch API documentation} */ async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1864,7 +1864,7 @@ export default class Security { /** * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user's identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key | Elasticsearch API documentation} */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1909,7 +1909,7 @@ export default class Security { /** * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-invalidate-token.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token | Elasticsearch API documentation} */ async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1954,7 +1954,7 @@ export default class Security { /** * Authenticate OpenID Connect. Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-authenticate.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate | Elasticsearch API documentation} */ async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1998,7 +1998,7 @@ export default class Security { /** * Logout of OpenID Connect. Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-logout.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout | Elasticsearch API documentation} */ async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2042,7 +2042,7 @@ export default class Security { /** * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-oidc-prepare-authentication.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication | Elasticsearch API documentation} */ async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2087,7 +2087,7 @@ export default class Security { /** * Create or update application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. Application names are formed from a prefix, with an optional suffix that conform to the following rules: * The prefix must begin with a lowercase ASCII letter. * The prefix must contain only ASCII letters or digits. * The prefix must be at least 3 characters long. * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-privileges.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges | Elasticsearch API documentation} */ async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2121,7 +2121,7 @@ export default class Security { /** * Create or update roles. The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role | Elasticsearch API documentation} */ async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2168,7 +2168,7 @@ export default class Security { /** * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. **Role templates** The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. NOTE: To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-role-mapping.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping | Elasticsearch API documentation} */ async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2215,7 +2215,7 @@ export default class Security { /** * Create or update users. Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-put-user.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user | Elasticsearch API documentation} */ async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2262,7 +2262,7 @@ export default class Security { /** * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2307,7 +2307,7 @@ export default class Security { /** * Find roles with a query. Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-role.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role | Elasticsearch API documentation} */ async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2352,7 +2352,7 @@ export default class Security { /** * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-query-user.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user | Elasticsearch API documentation} */ async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2397,7 +2397,7 @@ export default class Security { /** * Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML message that is submitted can be: * A response to a SAML authentication request that was previously created using the SAML prepare authentication API. * An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-authenticate.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate | Elasticsearch API documentation} */ async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2441,7 +2441,7 @@ export default class Security { /** * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-complete-logout.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout | Elasticsearch API documentation} */ async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2485,7 +2485,7 @@ export default class Security { /** * Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-invalidate.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate | Elasticsearch API documentation} */ async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2529,7 +2529,7 @@ export default class Security { /** * Logout of SAML. Submits a request to invalidate an access token and refresh token. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-logout.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout | Elasticsearch API documentation} */ async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2573,7 +2573,7 @@ export default class Security { /** * Prepare SAML authentication. Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-prepare-authentication.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication | Elasticsearch API documentation} */ async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2618,7 +2618,7 @@ export default class Security { /** * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-saml-sp-metadata.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata | Elasticsearch API documentation} */ async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2660,7 +2660,7 @@ export default class Security { /** * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles | Elasticsearch API documentation} */ async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2705,7 +2705,7 @@ export default class Security { /** * Update an API key. Update attributes of an existing API key. This API supports updates to an API key's access scope, expiration, and metadata. To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key | Elasticsearch API documentation} */ async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2752,7 +2752,7 @@ export default class Security { /** * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-cross-cluster-api-key.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key | Elasticsearch API documentation} */ async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2799,7 +2799,7 @@ export default class Security { /** * Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings | Elasticsearch API documentation} */ async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2844,7 +2844,7 @@ export default class Security { /** * Update user profile data. Update specific data for the user profile that is associated with a unique ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. To use this API, you must have one of the following privileges: * The `manage_user_profile` cluster privilege. * The `update_profile_data` global privilege for the namespaces that are referenced in the request. This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-update-user-profile-data.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data | Elasticsearch API documentation} */ async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 61a4ce6cb..ffa3b9c39 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -45,7 +45,7 @@ export default class Shutdown { /** * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-shutdown.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node | Elasticsearch API documentation} */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Shutdown { /** * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-shutdown.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node | Elasticsearch API documentation} */ async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -137,7 +137,7 @@ export default class Shutdown { /** * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-shutdown.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node | Elasticsearch API documentation} */ async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index ee6e13de6..ba1689505 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -45,7 +45,7 @@ export default class Simulate { /** * Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest | Elasticsearch API documentation} */ async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithOutMeta): Promise async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 9cb2542aa..9e6a856f9 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -45,7 +45,7 @@ export default class Slm { /** * Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-delete-policy.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Slm { /** * Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle | Elasticsearch API documentation} */ async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -129,7 +129,7 @@ export default class Slm { /** * Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-execute-retention.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention | Elasticsearch API documentation} */ async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -169,7 +169,7 @@ export default class Slm { /** * Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-policy.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -219,7 +219,7 @@ export default class Slm { /** * Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats | Elasticsearch API documentation} */ async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -259,7 +259,7 @@ export default class Slm { /** * Get the snapshot lifecycle management status. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-status.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -299,7 +299,7 @@ export default class Slm { /** * Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-put-policy.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -346,7 +346,7 @@ export default class Slm { /** * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-start.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start | Elasticsearch API documentation} */ async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -386,7 +386,7 @@ export default class Slm { /** * Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-stop.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop | Elasticsearch API documentation} */ async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 762afb7fb..3b37c9bdb 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -45,7 +45,7 @@ export default class Snapshot { /** * Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository | Elasticsearch API documentation} */ async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Snapshot { /** * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clone-snapshot-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone | Elasticsearch API documentation} */ async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -136,7 +136,7 @@ export default class Snapshot { /** * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-snapshot-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create | Elasticsearch API documentation} */ async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -184,7 +184,7 @@ export default class Snapshot { /** * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-snapshot-repo-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository | Elasticsearch API documentation} */ async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -221,7 +221,7 @@ export default class Snapshot { /** * Delete snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-snapshot-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -264,7 +264,7 @@ export default class Snapshot { /** * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-snapshot-repo-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository | Elasticsearch API documentation} */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -306,7 +306,7 @@ export default class Snapshot { /** * Get snapshot information. NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-snapshot-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get | Elasticsearch API documentation} */ async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -349,7 +349,7 @@ export default class Snapshot { /** * Get snapshot repository information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-snapshot-repo-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository | Elasticsearch API documentation} */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -399,7 +399,7 @@ export default class Snapshot { /** * Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/repo-analysis-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze | Elasticsearch API documentation} */ async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -441,7 +441,7 @@ export default class Snapshot { /** * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. For instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/verify-repo-integrity-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity | Elasticsearch API documentation} */ async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -483,7 +483,7 @@ export default class Snapshot { /** * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/restore-snapshot-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore | Elasticsearch API documentation} */ async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -531,7 +531,7 @@ export default class Snapshot { /** * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-snapshot-status-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -585,7 +585,7 @@ export default class Snapshot { /** * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/verify-snapshot-repo-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository | Elasticsearch API documentation} */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index df4bac5d9..871cb7139 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -45,7 +45,7 @@ export default class Sql { /** * Clear an SQL search cursor. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-sql-cursor-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor | Elasticsearch API documentation} */ async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -89,7 +89,7 @@ export default class Sql { /** * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async | Elasticsearch API documentation} */ async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -131,7 +131,7 @@ export default class Sql { /** * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async | Elasticsearch API documentation} */ async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -173,7 +173,7 @@ export default class Sql { /** * Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-status-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status | Elasticsearch API documentation} */ async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -215,7 +215,7 @@ export default class Sql { /** * Get SQL search results. Run an SQL request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/sql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query | Elasticsearch API documentation} */ async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -260,7 +260,7 @@ export default class Sql { /** * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/sql-translate-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate | Elasticsearch API documentation} */ async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 9f4b81c50..29f25f090 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -45,7 +45,7 @@ export default class Ssl { /** * Get SSL certificates. Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: - Settings for transport and HTTP interfaces - TLS settings that are used within authentication realms - TLS settings for remote monitoring exporters The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-ssl.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates | Elasticsearch API documentation} */ async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 9f52fcff5..379510816 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -45,7 +45,7 @@ export default class Synonyms { /** * Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code. To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym | Elasticsearch API documentation} */ async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -87,7 +87,7 @@ export default class Synonyms { /** * Delete a synonym rule. Delete a synonym rule from a synonym set. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule | Elasticsearch API documentation} */ async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -130,7 +130,7 @@ export default class Synonyms { /** * Get a synonym set. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} */ async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -172,7 +172,7 @@ export default class Synonyms { /** * Get a synonym rule. Get a synonym rule from a synonym set. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule | Elasticsearch API documentation} */ async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -215,7 +215,7 @@ export default class Synonyms { /** * Get all synonym sets. Get a summary of all defined synonym sets. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} */ async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -255,7 +255,7 @@ export default class Synonyms { /** * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -302,7 +302,7 @@ export default class Synonyms { /** * Create or update a synonym rule. Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule | Elasticsearch API documentation} */ async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index a906a58fd..a8f7ccf20 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -45,7 +45,7 @@ export default class Tasks { /** * Cancel a task. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} */ async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -95,7 +95,7 @@ export default class Tasks { /** * Get task information. Get information about a task currently running in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} */ async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -137,7 +137,7 @@ export default class Tasks { /** * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} */ async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index af8c1d1e4..ad9fa1e0e 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get terms in an index. Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-terms-enum.html | Elasticsearch API documentation} + * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum | Elasticsearch API documentation} */ export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index a4333ccc0..c3f461487 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -38,8 +38,8 @@ import * as T from '../types' interface That { transport: Transport } /** - * Get term vector information. Get information and statistics about terms in the fields of a particular document. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html | Elasticsearch API documentation} + * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors | Elasticsearch API documentation} */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 86efca83c..fd245e577 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -45,7 +45,7 @@ export default class TextStructure { /** * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-field-structure.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure | Elasticsearch API documentation} */ async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -84,7 +84,7 @@ export default class TextStructure { /** * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-message-structure.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure | Elasticsearch API documentation} */ async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -128,7 +128,7 @@ export default class TextStructure { /** * Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: * A couple of messages from the beginning of the text. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/find-structure.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure | Elasticsearch API documentation} */ async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -162,7 +162,7 @@ export default class TextStructure { /** * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/test-grok-pattern.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern | Elasticsearch API documentation} */ async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index b826dfffd..4872de3e1 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -44,8 +44,8 @@ export default class Transform { } /** - * Delete a transform. Deletes a transform. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-transform.html | Elasticsearch API documentation} + * Delete a transform. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform | Elasticsearch API documentation} */ async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -125,8 +125,8 @@ export default class Transform { } /** - * Get transforms. Retrieves configuration information for transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform.html | Elasticsearch API documentation} + * Get transforms. Get configuration information for transforms. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform | Elasticsearch API documentation} */ async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -175,8 +175,8 @@ export default class Transform { } /** - * Get transform stats. Retrieves usage information for transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-stats.html | Elasticsearch API documentation} + * Get transform stats. Get usage information for transforms. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats | Elasticsearch API documentation} */ async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -218,7 +218,7 @@ export default class Transform { /** * Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/preview-transform.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform | Elasticsearch API documentation} */ async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -273,7 +273,7 @@ export default class Transform { /** * Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-transform.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform | Elasticsearch API documentation} */ async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -319,8 +319,8 @@ export default class Transform { } /** - * Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/reset-transform.html | Elasticsearch API documentation} + * Reset a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform | Elasticsearch API documentation} */ async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -361,8 +361,8 @@ export default class Transform { } /** - * Schedule a transform to start now. Instantly runs a transform to process data. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/schedule-now-transform.html | Elasticsearch API documentation} + * Schedule a transform to start now. Instantly run a transform to process data. If you run this API, the transform will process the new data instantly, without waiting for the configured frequency interval. After the API is called, the transform will be processed again at `now + frequency` unless the API is called again in the meantime. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform | Elasticsearch API documentation} */ async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -403,8 +403,8 @@ export default class Transform { } /** - * Start a transform. Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-transform.html | Elasticsearch API documentation} + * Start a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform | Elasticsearch API documentation} */ async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -446,7 +446,7 @@ export default class Transform { /** * Stop transforms. Stops one or more transforms. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-transform.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform | Elasticsearch API documentation} */ async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -488,7 +488,7 @@ export default class Transform { /** * Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-transform.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform | Elasticsearch API documentation} */ async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -535,7 +535,7 @@ export default class Transform { /** * Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/upgrade-transforms.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms | Elasticsearch API documentation} */ async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/update.ts b/src/api/api/update.ts index d4dc6f183..06d06ae63 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update | Elasticsearch API documentation} */ export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index fb09d42f4..38acf4531 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query | Elasticsearch API documentation} */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index c5c9b6c31..eb96ad0ed 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -39,7 +39,7 @@ interface That { transport: Transport } /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html#docs-update-by-query-rethrottle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle | Elasticsearch API documentation} */ export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index df5201232..7e795d62b 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -45,7 +45,7 @@ export default class Watcher { /** * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-ack-watch.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -95,7 +95,7 @@ export default class Watcher { /** * Activate a watch. A watch can be either active or inactive. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-activate-watch.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch | Elasticsearch API documentation} */ async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -137,7 +137,7 @@ export default class Watcher { /** * Deactivate a watch. A watch can be either active or inactive. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-deactivate-watch.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch | Elasticsearch API documentation} */ async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -179,7 +179,7 @@ export default class Watcher { /** * Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-delete-watch.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch | Elasticsearch API documentation} */ async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -221,7 +221,7 @@ export default class Watcher { /** * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-execute-watch.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -276,7 +276,7 @@ export default class Watcher { /** * Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -316,7 +316,7 @@ export default class Watcher { /** * Get a watch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-get-watch.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch | Elasticsearch API documentation} */ async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -358,7 +358,7 @@ export default class Watcher { /** * Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. When you add a watch you can also define its initial active state by setting the *active* parameter. When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-put-watch.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch | Elasticsearch API documentation} */ async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -405,7 +405,7 @@ export default class Watcher { /** * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-query-watches.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches | Elasticsearch API documentation} */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -450,7 +450,7 @@ export default class Watcher { /** * Start the watch service. Start the Watcher service if it is not already running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-start.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start | Elasticsearch API documentation} */ async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -490,7 +490,7 @@ export default class Watcher { /** * Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -540,7 +540,7 @@ export default class Watcher { /** * Stop the watch service. Stop the Watcher service if it is running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-stop.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop | Elasticsearch API documentation} */ async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -580,7 +580,7 @@ export default class Watcher { /** * Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/watcher-api-update-settings.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings | Elasticsearch API documentation} */ async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 0082b65e2..9e6a66f7b 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -45,7 +45,7 @@ export default class Xpack { /** * Get information. The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/info-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info | Elasticsearch API documentation} */ async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -85,7 +85,7 @@ export default class Xpack { /** * Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/usage-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack | Elasticsearch API documentation} */ async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/types.ts b/src/api/types.ts index b2c5dc05d..03cc037eb 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -187,9 +187,9 @@ export interface CountRequest extends RequestBase { routing?: Routing /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long - /** The query in Lucene query string syntax. */ + /** The query in Lucene query string syntax. This parameter cannot be used with a request body. */ q?: string - /** Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. */ + /** Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. */ query?: QueryDslQueryContainer /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } @@ -363,11 +363,11 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { -/** Identifier for the stored script or search template. */ +/** The identifier for the stored script or search template. */ id: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -454,33 +454,33 @@ export interface ExplainExplanationDetail { } export interface ExplainRequest extends RequestBase { -/** Defines the document ID. */ +/** The document identifier. */ id: Id - /** Index names used to limit the request. Only a single index name can be provided to this parameter. */ + /** Index names that are used to limit the request. Only a single index name can be provided to this parameter. */ index: IndexName - /** Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. */ + /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. */ + /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** Field to use as default where no field prefix is given in the query string. */ + /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** Specifies the node or shard the operation should be performed on. Random by default. */ + /** The node or shard the operation should be performed on. It is random by default. */ preference?: string - /** Custom value used to route operations to a specific shard. */ + /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** True or false to return the `_source` field or not, or a list of fields to return. */ + /** `True` or `false` to return the `_source` field or not or a list of fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. */ + /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. */ + /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields /** A comma-separated list of stored fields to return in the response. */ stored_fields?: Fields - /** Query in the Lucene query string syntax. */ + /** The query in the Lucene query string syntax. */ q?: string /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer @@ -514,27 +514,27 @@ export interface FieldCapsFieldCapability { } export interface FieldCapsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ +/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ index?: Indices /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean /** If true, unmapped fields are included in the response. */ include_unmapped?: boolean - /** An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent */ + /** A comma-separated list of filters to apply to the response. */ filters?: string - /** Only return results for fields that have one of the types in the list */ + /** A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. */ types?: string[] /** If false, empty fields are not included in the response. */ include_empty_fields?: boolean - /** List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ + /** A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ fields?: Fields - /** Allows to filter indices if the provided query rewrites to match_none on every shard. */ + /** Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ index_filter?: QueryDslQueryContainer - /** Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ + /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ runtime_mappings?: MappingRuntimeFields /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } @@ -596,9 +596,9 @@ export interface GetRequest extends RequestBase { export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { -/** Identifier for the stored script or search template. */ +/** The identifier for the stored script or search template. */ id: Id - /** Specify timeout for connection to master */ + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } @@ -938,21 +938,21 @@ export interface InfoResponse { } export interface KnnSearchRequest extends RequestBase { -/** A comma-separated list of index names to search; use `_all` or to perform the operation on all indices */ +/** A comma-separated list of index names to search; use `_all` or to perform the operation on all indices. */ index: Indices - /** A comma-separated list of specific routing values */ + /** A comma-separated list of specific routing values. */ routing?: Routing - /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ + /** Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. */ _source?: SearchSourceConfig - /** The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. */ + /** The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ + /** A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. */ stored_fields?: Fields - /** The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. */ + /** The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. */ fields?: Fields - /** Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. */ + /** A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - /** kNN query to execute */ + /** The kNN query to run. */ knn: KnnSearchQuery /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } @@ -1130,13 +1130,13 @@ export type MsearchResponse = MsearchMultiSearchItem | ErrorResponseBase export interface MsearchTemplateRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. */ +/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. */ index?: Indices /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean - /** Maximum number of concurrent searches the API can run. */ + /** The maximum number of concurrent searches the API can run. */ max_concurrent_searches?: long - /** The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. */ + /** The type of the search operation. */ search_type?: SearchType /** If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. */ rest_total_hits_as_int?: boolean @@ -1178,9 +1178,9 @@ export interface MtermvectorsOperation { } export interface MtermvectorsRequest extends RequestBase { -/** Name of the index that contains the documents. */ +/** The name of the index that contains the documents. */ index?: IndexName - /** Comma-separated list or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + /** A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean @@ -1190,21 +1190,21 @@ export interface MtermvectorsRequest extends RequestBase { payloads?: boolean /** If `true`, the response includes term positions. */ positions?: boolean - /** Specifies the node or shard the operation should be performed on. Random by default. */ + /** The node or shard the operation should be performed on. It is random by default. */ preference?: string /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** Custom value used to route operations to a specific shard. */ + /** A custom value used to route operations to a specific shard. */ routing?: Routing /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber - /** Specific version type. */ + /** The version type. */ version_type?: VersionType - /** Array of existing or artificial documents. */ + /** An array of existing or artificial documents. */ docs?: MtermvectorsOperation[] - /** Simplified syntax to specify documents by their ID if they're in the same index. */ + /** A simplified syntax to specify documents by their ID if they're in the same index. */ ids?: Id[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, docs?: never, ids?: never } @@ -1264,15 +1264,15 @@ export interface PingRequest extends RequestBase { export type PingResponse = boolean export interface PutScriptRequest extends RequestBase { -/** Identifier for the stored script or search template. Must be unique within the cluster. */ +/** The identifier for the stored script or search template. It must be unique within the cluster. */ id: Id - /** Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. */ + /** The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. */ context?: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration - /** Contains the script or search template, its parameters, and its language. */ + /** The script or search template, its parameters, and its language. */ script: StoredScript /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, context?: never, master_timeout?: never, timeout?: never, script?: never } @@ -1354,7 +1354,7 @@ export interface RankEvalRankEvalRequestItem { } export interface RankEvalRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ +/** A comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ index?: Indices /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean @@ -1515,12 +1515,12 @@ export interface ReindexRethrottleResponse { } export interface RenderSearchTemplateRequest extends RequestBase { -/** ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. */ +/** The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. */ id?: Id file?: string /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ params?: Record - /** An inline search template. Supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. */ + /** An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. */ source?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } @@ -1562,7 +1562,7 @@ export interface ScrollRequest extends RequestBase { scroll_id?: ScrollId /** If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. */ rest_total_hits_as_int?: boolean - /** Period to retain the search context for scrolling. */ + /** The period to retain the search context for scrolling. */ scroll?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { scroll_id?: never, rest_total_hits_as_int?: never, scroll?: never } @@ -2307,33 +2307,33 @@ export interface SearchMvtRequest extends RequestBase { x: SearchMvtCoordinate /** Y coordinate for the vector tile to search */ y: SearchMvtCoordinate - /** Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum */ + /** Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ aggs?: Record - /** Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. */ + /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. */ buffer?: integer - /** If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. */ + /** If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. */ exact_bounds?: boolean - /** Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ + /** The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ extent?: integer - /** Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. */ + /** The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. */ fields?: Fields - /** Aggregation used to create a grid for the `field`. */ + /** The aggregation used to create a grid for the `field`. */ grid_agg?: SearchMvtGridAggregationType - /** Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. */ + /** Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. */ grid_precision?: integer - /** Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. */ + /** Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. */ grid_type?: SearchMvtGridType - /** Query DSL used to filter documents for the search. */ + /** The query DSL used to filter documents for the search. */ query?: QueryDslQueryContainer /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. */ + /** The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. */ size?: integer - /** Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. */ + /** Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. */ sort?: Sort - /** Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ + /** The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits - /** If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. */ + /** If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ with_labels?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } @@ -2352,7 +2352,7 @@ export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer export interface SearchShardsRequest extends RequestBase { -/** Returns the indices and shards that a search request would be executed against. */ +/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean @@ -2362,11 +2362,11 @@ export interface SearchShardsRequest extends RequestBase { ignore_unavailable?: boolean /** If `true`, the request retrieves information from the local node only. */ local?: boolean - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** Specifies the node or shard the operation should be performed on. Random by default. */ + /** The node or shard the operation should be performed on. It is random by default. */ preference?: string - /** Custom value used to route operations to a specific shard. */ + /** A custom value used to route operations to a specific shard. */ routing?: Routing /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, preference?: never, routing?: never } @@ -2398,39 +2398,39 @@ export interface SearchShardsShardStoreIndex { } export interface SearchTemplateRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (*). */ +/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). */ index?: Indices /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ ignore_throttled?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Specifies the node or shard the operation should be performed on. Random by default. */ + /** The node or shard the operation should be performed on. It is random by default. */ preference?: string - /** Custom value used to route operations to a specific shard. */ + /** A custom value used to route operations to a specific shard. */ routing?: Routing /** Specifies how long a consistent view of the index should be maintained for scrolled search. */ scroll?: Duration /** The type of the search operation. */ search_type?: SearchType - /** If true, hits.total are rendered as an integer in the response. */ + /** If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. */ rest_total_hits_as_int?: boolean /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean - /** If `true`, returns detailed information about score calculation as part of each hit. */ + /** If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ explain?: boolean - /** ID of the search template to use. If no source is specified, this parameter is required. */ + /** The ID of the search template to use. If no `source` is specified, this parameter is required. */ id?: Id /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ params?: Record /** If `true`, the query execution is profiled. */ profile?: boolean - /** An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. */ + /** An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. */ source?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } @@ -2456,20 +2456,21 @@ export interface SearchTemplateResponse { } export interface TermsEnumRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. */ +/** A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: IndexName /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ field: Field - /** How many matching terms to return. */ + /** The number of matching terms to return. */ size?: integer - /** The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. */ + /** The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ timeout?: Duration - /** When true the provided search string is matched against index terms without case sensitivity. */ + /** When `true`, the provided search string is matched against index terms without case sensitivity. */ case_insensitive?: boolean - /** Allows to filter an index shard if the provided query rewrites to match_none. */ + /** Filter an index shard if the provided query rewrites to `match_none`. */ index_filter?: QueryDslQueryContainer - /** The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. */ + /** The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ string?: string + /** The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ search_after?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } @@ -2500,13 +2501,13 @@ export interface TermvectorsFilter { } export interface TermvectorsRequest extends RequestBase { -/** Name of the index that contains the document. */ +/** The name of the index that contains the document. */ index: IndexName - /** Unique identifier of the document. */ + /** A unique identifier for the document. */ id?: Id - /** Comma-separated list or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + /** A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields - /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ + /** If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ field_statistics?: boolean /** If `true`, the response includes term offsets. */ offsets?: boolean @@ -2514,23 +2515,23 @@ export interface TermvectorsRequest extends RequestBase { payloads?: boolean /** If `true`, the response includes term positions. */ positions?: boolean - /** Specifies the node or shard the operation should be performed on. Random by default. */ + /** The node or shard the operation should be performed on. It is random by default. */ preference?: string /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** Custom value used to route operations to a specific shard. */ + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** If `true`, the response includes term frequency and document frequency. */ + /** If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. */ term_statistics?: boolean /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber - /** Specific version type. */ + /** The version type. */ version_type?: VersionType /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: TDocument - /** Filter terms based on their tf-idf scores. */ + /** Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. */ filter?: TermvectorsFilter - /** Overrides the default per-field analyzer. */ + /** Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ per_field_analyzer?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, id?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, doc?: never, filter?: never, per_field_analyzer?: never } @@ -2897,7 +2898,7 @@ export interface ErrorResponseBase { status: integer } -export type EsqlColumns = ArrayBuffer +export type EsqlResult = ArrayBuffer export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' @@ -9781,9 +9782,9 @@ export interface CcrFollowInfoResponse { } export interface CcrFollowStatsRequest extends RequestBase { -/** A comma-separated list of index patterns; use `_all` to perform the operation on all indices */ +/** A comma-delimited list of index patterns. */ index: Indices - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, timeout?: never } @@ -9829,9 +9830,9 @@ export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { } export interface CcrGetAutoFollowPatternRequest extends RequestBase { -/** Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. */ +/** The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. */ name?: Name - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9844,9 +9845,9 @@ export interface CcrGetAutoFollowPatternResponse { } export interface CcrPauseAutoFollowPatternRequest extends RequestBase { -/** The name of the auto follow pattern that should pause discovering new indices to follow. */ +/** The name of the auto-follow pattern to pause. */ name: Name - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9857,9 +9858,9 @@ export interface CcrPauseAutoFollowPatternRequest extends RequestBase { export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { -/** The name of the follower index that should pause following its leader index. */ +/** The name of the follower index. */ index: IndexName - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -9913,9 +9914,9 @@ export interface CcrPutAutoFollowPatternRequest extends RequestBase { export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { -/** The name of the auto follow pattern to resume discovering new indices to follow. */ +/** The name of the auto-follow pattern to resume. */ name: Name - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9967,9 +9968,9 @@ export interface CcrStatsFollowStats { } export interface CcrStatsRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ +/** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -9983,9 +9984,9 @@ export interface CcrStatsResponse { } export interface CcrUnfollowRequest extends RequestBase { -/** The name of the follower index that should be turned into a regular index. */ +/** The name of the follower index. */ index: IndexName - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -10004,6 +10005,7 @@ export interface ClusterComponentTemplateNode { template: ClusterComponentTemplateSummary version?: VersionNumber _meta?: Metadata + deprecated?: boolean } export interface ClusterComponentTemplateSummary { @@ -10421,6 +10423,7 @@ export interface ClusterRemoteInfoClusterRemoteProxyInfo { server_name: string num_proxy_sockets_connected: integer max_proxy_socket_connections: integer + cluster_credentials?: string } export interface ClusterRemoteInfoClusterRemoteSniffInfo { @@ -11791,7 +11794,9 @@ export interface EqlSearchRequest extends RequestBase { keep_alive?: Duration keep_on_completion?: boolean wait_for_completion_timeout?: Duration + /** Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results */ allow_partial_search_results?: boolean + /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results. */ allow_partial_sequence_results?: boolean /** For basic queries, the maximum number of matching events to return. Defaults to 10 */ size?: uint @@ -11860,11 +11865,7 @@ export interface EsqlAsyncQueryRequest extends RequestBase { querystring?: { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } } -export interface EsqlAsyncQueryResponse { - columns?: EsqlColumns - id?: string - is_running: boolean -} +export type EsqlAsyncQueryResponse = EsqlResult export interface EsqlAsyncQueryDeleteRequest extends RequestBase { /** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ @@ -11892,11 +11893,21 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } } -export interface EsqlAsyncQueryGetResponse { - columns?: EsqlColumns - is_running: boolean +export type EsqlAsyncQueryGetResponse = EsqlResult + +export interface EsqlAsyncQueryStopRequest extends RequestBase { +/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + id: Id + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + drop_null_columns?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never } } +export type EsqlAsyncQueryStopResponse = EsqlResult + export interface EsqlQueryRequest extends RequestBase { /** A short version of the Accept header, e.g. json, yaml. */ format?: EsqlEsqlFormat @@ -11923,7 +11934,7 @@ export interface EsqlQueryRequest extends RequestBase { querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } } -export type EsqlQueryResponse = EsqlColumns +export type EsqlQueryResponse = EsqlResult export interface FeaturesFeature { name: string @@ -12982,7 +12993,7 @@ export interface IndicesSoftDeletes { retention_lease?: IndicesRetentionLease } -export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' +export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC' export interface IndicesStorage { type: IndicesStorageType @@ -13022,19 +13033,19 @@ export interface IndicesAddBlockIndicesBlockStatus { } export interface IndicesAddBlockRequest extends RequestBase { -/** A comma separated list of indices to add a block to */ +/** A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ index: IndexName - /** The block to add (one of read, write, read_only or metadata) */ + /** The block type to add to the index. */ block: IndicesAddBlockIndicesBlockOptions - /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Specify timeout for connection to master */ + /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** Explicit operation timeout */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } @@ -14394,20 +14405,22 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveClusterRequest extends RequestBase { -/** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. */ - name: Names - /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ +/** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. If no index expression is specified, information about all remote clusters configured on the local cluster is returned without doing any index matching */ + name?: Names + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ expand_wildcards?: ExpandWildcards - /** If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. */ + /** If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_throttled?: boolean - /** If false, the request returns an error if it targets a missing or closed index. Defaults to false. */ + /** If false, the request returns an error if it targets a missing or closed index. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_unavailable?: boolean + /** The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out. The default timeout is unset and the query can take as long as the networking layer is configured to wait for remote clusters that are not responding (typically 30 seconds). */ + timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never } + body?: string | { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never } + querystring?: { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } } export interface IndicesResolveClusterResolveClusterInfo { @@ -15066,11 +15079,11 @@ export interface InferenceTextEmbeddingResult { export interface InferenceDeleteRequest extends RequestBase { /** The task type */ task_type?: InferenceTaskType - /** The inference Id */ + /** The inference identifier. */ inference_id: Id - /** When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned */ + /** When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. */ dry_run?: boolean - /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields */ + /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. */ force?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } @@ -15096,17 +15109,17 @@ export interface InferenceGetResponse { } export interface InferenceInferenceRequest extends RequestBase { -/** The task type */ +/** The type of inference task that the model performs. */ task_type?: InferenceTaskType - /** The inference Id */ + /** The unique identifier for the inference endpoint. */ inference_id: Id - /** Specifies the amount of time to wait for the inference request to complete. */ + /** The amount of time to wait for the inference request to complete. */ timeout?: Duration - /** Query input, required for rerank task. Not required for other tasks. */ + /** The query input, which is required only for the `rerank` task. It is not required for other tasks. */ query?: string - /** Inference input. Either a string or an array of strings. */ + /** The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[] - /** Optional task settings */ + /** Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } @@ -15759,9 +15772,9 @@ export interface IngestWeb { export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { /** A comma-separated list of geoip database configurations to delete */ id: Ids - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -15839,7 +15852,7 @@ export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { } export interface IngestGetGeoipDatabaseRequest extends RequestBase { -/** Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ +/** A comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ id?: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -15965,13 +15978,13 @@ export interface IngestPutPipelineRequest extends RequestBase { export type IngestPutPipelineResponse = AcknowledgedResponseBase export interface IngestSimulateRequest extends RequestBase { -/** Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. */ +/** The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. */ id?: Id /** If `true`, the response includes output data for each processor in the executed pipeline. */ verbose?: boolean /** Sample documents to test in the pipeline. */ docs: IngestDocument[] - /** Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ + /** The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline?: IngestPipeline /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } @@ -16001,9 +16014,9 @@ export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' export interface LicenseDeleteRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ +/** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -16073,9 +16086,9 @@ export interface LicensePostAcknowledgement { export interface LicensePostRequest extends RequestBase { /** Specifies whether you acknowledge the license changes. */ acknowledge?: boolean - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration license?: LicenseLicense /** A sequence of one or more JSON documents containing the license information. */ @@ -16216,6 +16229,8 @@ export interface MigrationDeprecationsResponse { data_streams: Record node_settings: MigrationDeprecationsDeprecation[] ml_settings: MigrationDeprecationsDeprecation[] + templates: Record + ilm_policies: Record } export interface MigrationGetFeatureUpgradeStatusMigrationFeature { @@ -20885,7 +20900,7 @@ export interface SearchApplicationSearchApplicationTemplate { } export interface SearchApplicationDeleteRequest extends RequestBase { -/** The name of the search application to delete */ +/** The name of the search application to delete. */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -23523,12 +23538,7 @@ export interface SnapshotRepositoryAnalyzeDetailsInfo { write_elapsed_nanos: DurationValue write_throttled: Duration write_throttled_nanos: DurationValue - writer_node: SnapshotRepositoryAnalyzeNodeInfo -} - -export interface SnapshotRepositoryAnalyzeNodeInfo { - id: Id - name: Name + writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo } export interface SnapshotRepositoryAnalyzeReadBlobDetails { @@ -23538,7 +23548,7 @@ export interface SnapshotRepositoryAnalyzeReadBlobDetails { first_byte_time?: Duration first_byte_time_nanos: DurationValue found: boolean - node: SnapshotRepositoryAnalyzeNodeInfo + node: SnapshotRepositoryAnalyzeSnapshotNodeInfo throttled?: Duration throttled_nanos?: DurationValue } @@ -23594,7 +23604,7 @@ export interface SnapshotRepositoryAnalyzeResponse { blob_count: integer blob_path: string concurrency: integer - coordinating_node: SnapshotRepositoryAnalyzeNodeInfo + coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo delete_elapsed: Duration delete_elapsed_nanos: DurationValue details: SnapshotRepositoryAnalyzeDetailsInfo @@ -23613,6 +23623,11 @@ export interface SnapshotRepositoryAnalyzeResponse { summary: SnapshotRepositoryAnalyzeSummaryInfo } +export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { + id: Id + name: Name +} + export interface SnapshotRepositoryAnalyzeSummaryInfo { read: SnapshotRepositoryAnalyzeReadSummaryInfo write: SnapshotRepositoryAnalyzeWriteSummaryInfo @@ -24843,15 +24858,15 @@ export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursd export interface WatcherEmail { id?: Id - bcc?: string[] + bcc?: string | string[] body?: WatcherEmailBody - cc?: string[] + cc?: string | string[] from?: string priority?: WatcherEmailPriority - reply_to?: string[] + reply_to?: string | string[] sent_date?: DateTime subject: string - to: string[] + to: string | string[] attachments?: Record } From 947e09e62adef78568f845fb198b00a34672b547 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 3 Feb 2025 12:51:53 -0600 Subject: [PATCH 467/647] Drop serverless side-port automation (#2600) --- .github/workflows/serverless-patch.sh | 43 --------------------- .github/workflows/serverless-patch.yml | 53 -------------------------- 2 files changed, 96 deletions(-) delete mode 100755 .github/workflows/serverless-patch.sh delete mode 100644 .github/workflows/serverless-patch.yml diff --git a/.github/workflows/serverless-patch.sh b/.github/workflows/serverless-patch.sh deleted file mode 100755 index a38eda9ec..000000000 --- a/.github/workflows/serverless-patch.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -set -exuo pipefail - -merge_commit_sha=$(jq -r '.pull_request.merge_commit_sha' "$GITHUB_EVENT_PATH") -pull_request_id=$(jq -r '.pull_request.number' "$GITHUB_EVENT_PATH") -pr_shortcode="elastic/elasticsearch-js#$pull_request_id" - -# generate patch file -cd "$GITHUB_WORKSPACE/stack" -git format-patch -1 --stdout "$merge_commit_sha" > /tmp/patch.diff - -# set committer info -git config --global user.email "elasticmachine@users.noreply.github.com" -git config --global user.name "Elastic Machine" - -# apply patch file -cd "$GITHUB_WORKSPACE/serverless" -git am -C1 --reject /tmp/patch.diff || git am --quit - -# generate PR body comment -comment="Patch applied from $pr_shortcode" - -# enumerate rejected patches in PR comment -has_rejects='false' -for f in ./**/*.rej; do - has_rejects='true' - comment="$comment - -## Rejected patch \`$f\` must be resolved: - -\`\`\`diff -$(cat "$f") -\`\`\` -" -done - -# delete .rej files -rm -fv ./**/*.rej - -# send data to output parameters -echo "$comment" > /tmp/pr_body -echo "PR_DRAFT=$has_rejects" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/serverless-patch.yml b/.github/workflows/serverless-patch.yml deleted file mode 100644 index c83e3ec0e..000000000 --- a/.github/workflows/serverless-patch.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -name: Apply PR changes to serverless -on: - pull_request_target: - types: - - closed - - labeled - -jobs: - apply-patch: - name: Apply patch - runs-on: ubuntu-latest - # Only react to merged PRs for security reasons. - # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target. - if: > - github.event.pull_request.merged - && ( - ( - github.event.action == 'closed' - && contains(github.event.pull_request.labels.*.name, 'apply-to-serverless') - ) - || - ( - github.event.action == 'labeled' - && github.event.label.name == 'apply-to-serverless' - ) - ) - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - with: - persist-credentials: false - repository: elastic/elasticsearch-js - ref: main - path: stack - fetch-depth: 0 - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - with: - persist-credentials: false - repository: elastic/elasticsearch-serverless-js - ref: main - path: serverless - - name: Apply patch from stack to serverless - id: apply-patch - run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh - - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7 - with: - token: ${{ secrets.GH_TOKEN }} - path: serverless - title: "Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}" - commit-message: "Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}" - body-path: /tmp/pr_body - draft: "${{ steps.apply-patch.outputs.PR_DRAFT }}" - add-paths: ":!*.rej" From 172180cb21cf0097aa97fe769d14d2d29b4e6055 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 3 Feb 2025 12:52:26 -0600 Subject: [PATCH 468/647] Report correct transport connection type in telemetry (#2599) Fixes #2324 --- src/client.ts | 9 ++++++++- test/unit/client.test.ts | 40 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/src/client.ts b/src/client.ts index fcbc5d35b..7f9f8fabe 100644 --- a/src/client.ts +++ b/src/client.ts @@ -287,7 +287,14 @@ export default class Client extends API { } if (options.enableMetaHeader) { - options.headers['x-elastic-client-meta'] = `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}` + let clientMeta = `es=${clientVersion},js=${nodeVersion},t=${transportVersion}` + if (options.Connection === UndiciConnection) { + clientMeta += `,un=${nodeVersion}` + } else { + // assumes HttpConnection + clientMeta += `,hc=${nodeVersion}` + } + options.headers['x-elastic-client-meta'] = clientMeta } this.name = options.name diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index fc2af683c..2e64e5927 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -25,7 +25,7 @@ import FakeTimers from '@sinonjs/fake-timers' import { buildServer, connection } from '../utils' import { Client, errors } from '../..' import * as symbols from '@elastic/transport/lib/symbols' -import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool } from '@elastic/transport' +import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool, HttpConnection } from '@elastic/transport' let clientVersion: string = require('../../package.json').version // eslint-disable-line if (clientVersion.includes('-')) { @@ -404,6 +404,44 @@ test('Meta header disabled', async t => { await client.transport.request({ method: 'GET', path: '/' }) }) +test('Meta header indicates when UndiciConnection is used', async t => { + t.plan(1) + + function handler (req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},un=${nodeVersion}`) + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + + const client = new Client({ + node: `http://localhost:${port}`, + // Connection: UndiciConnection is the default + }) + + await client.transport.request({ method: 'GET', path: '/' }) + server.stop() +}) + +test('Meta header indicates when HttpConnection is used', async t => { + t.plan(1) + + function handler (req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}`) + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + + const client = new Client({ + node: `http://localhost:${port}`, + Connection: HttpConnection, + }) + + await client.transport.request({ method: 'GET', path: '/' }) + server.stop() +}) + test('caFingerprint', t => { const client = new Client({ node: 'https://localhost:9200/', From a25bc972971203facb8bad08b6aa81a30a97800d Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 12:52:56 -0600 Subject: [PATCH 469/647] Update dependency @types/node to v22.13.1 (#2607) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 902abf626..a5e2f8af3 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.10.7", + "@types/node": "22.13.1", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 9e4572f4e1bc00fc3066243de35e671bc33d88f3 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 10 Feb 2025 13:13:02 -0600 Subject: [PATCH 470/647] Another attempt to get Renovate to stop trying to upgrade a Docker image (#2614) The Docker image version is dictated by an upstream env var so the change it keeps trying to make will have no effect anyway. --- renovate.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/renovate.json b/renovate.json index 8bc137193..287cfa2b9 100644 --- a/renovate.json +++ b/renovate.json @@ -21,6 +21,12 @@ "dockerfile" ], "pinDigests": false + }, + { + "matchDatasources": [ + "docker" + ], + "pinDigests": false } ] } From 9139662bcca454a0f9c4a23c5a01d5673c34840e Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 11 Feb 2025 17:09:49 +0100 Subject: [PATCH 471/647] Auto-generated API code (#2610) Co-authored-by: Josh Mock --- docs/reference.asciidoc | 27 ++++++-- src/api/api/async_search.ts | 2 +- src/api/types.ts | 123 ++++++++++++++++++++++-------------- 3 files changed, 98 insertions(+), 54 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 8aee6b840..505544757 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -149,6 +149,7 @@ client.bulk({ ... }) * *Request (object):* ** *`index` (Optional, string)*: The name of the data stream, index, or index alias to perform bulk actions on. ** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. ** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were run for each index or create. ** *`pipeline` (Optional, string)*: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. @@ -323,6 +324,7 @@ client.create({ id, index }) ** *`id` (string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. ** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. ** *`document` (Optional, object)*: A document. +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. ** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. ** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. @@ -1031,6 +1033,7 @@ client.index({ index }) ** *`document` (Optional, object)*: A document. ** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. ** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. ** *`op_type` (Optional, Enum("index" | "create"))*: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. ** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. @@ -2140,6 +2143,7 @@ client.update({ id, index }) ** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. ** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. ** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. ** *`lang` (Optional, string)*: The script language. ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. ** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. @@ -2341,7 +2345,7 @@ client.asyncSearch.get({ id }) * *Request (object):* ** *`id` (string)*: A unique identifier for the async search. -** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search should be available in the cluster. +** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. @@ -2356,7 +2360,10 @@ By default no timeout is set meaning that the currently available results will b Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. -If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. +If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: + +* The user or API key that submitted the original async search request. +* Users that have the `monitor` cluster privilege or greater privileges. https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] [source,ts] @@ -2369,7 +2376,7 @@ client.asyncSearch.status({ id }) * *Request (object):* ** *`id` (string)*: A unique identifier for the async search. -** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. +** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. [discrete] @@ -3282,6 +3289,7 @@ If `false`, the request returns a 404 status code when there are no matches or o [discrete] ==== delete_auto_follow_pattern Delete auto-follow patterns. + Delete a collection of cross-cluster replication auto-follow patterns. https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern[Endpoint documentation] @@ -3294,8 +3302,10 @@ client.ccr.deleteAutoFollowPattern({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the auto follow pattern. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`name` (string)*: The auto-follow pattern collection to delete. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== follow @@ -3342,6 +3352,7 @@ remote Lucene segment files to the follower index. [discrete] ==== follow_info Get follower information. + Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. @@ -3355,8 +3366,10 @@ client.ccr.followInfo({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`index` (string | string[])*: A comma-delimited list of follower index patterns. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== follow_stats diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 256420631..b3dd631c1 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -128,7 +128,7 @@ export default class AsyncSearch { } /** - * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. + * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: * The user or API key that submitted the original async search request. * Users that have the `monitor` cluster privilege or greater privileges. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} */ async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 03cc037eb..4f6b554dd 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -34,6 +34,8 @@ export interface BulkCreateOperation extends BulkWriteOperation { export interface BulkDeleteOperation extends BulkOperationBase { } +export type BulkFailureStoreStatus = 'not_applicable_or_unknown' | 'used' | 'not_enabled' | 'failed' + export interface BulkIndexOperation extends BulkWriteOperation { } @@ -59,6 +61,8 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { /** The name of the data stream, index, or index alias to perform bulk actions on. */ index?: IndexName + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean /** If `true`, the response will include the ingest pipelines that were run for each index or create. */ list_executed_pipelines?: boolean /** The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ @@ -83,9 +87,9 @@ export interface BulkRequest ex require_data_stream?: boolean operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } + body?: string | { [key: string]: any } & { index?: never, include_source_on_error?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } + querystring?: { [key: string]: any } & { index?: never, include_source_on_error?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } } export interface BulkResponse { @@ -99,6 +103,7 @@ export interface BulkResponseItem { _id?: string | null _index: string status: integer + failure_store?: BulkFailureStoreStatus error?: ErrorCause _primary_term?: long result?: string @@ -207,6 +212,8 @@ export interface CreateRequest extends RequestBase { id: Id /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ @@ -223,9 +230,9 @@ export interface CreateRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards document?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } } export type CreateResponse = WriteResponseBase @@ -895,6 +902,8 @@ export interface IndexRequest extends RequestBase { if_primary_term?: long /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean /** Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ @@ -915,9 +924,9 @@ export interface IndexRequest extends RequestBase { require_alias?: boolean document?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } } export type IndexResponse = WriteResponseBase @@ -2577,6 +2586,8 @@ export interface UpdateRequest if_primary_term?: long /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean /** The script language. */ lang?: string /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. */ @@ -2610,9 +2621,9 @@ export interface UpdateRequest /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. */ upsert?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } } export type UpdateResponse = UpdateUpdateWriteResponseBase @@ -5232,9 +5243,8 @@ export interface AnalysisEstonianAnalyzer { export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' version?: VersionString - max_output_size: integer - preserve_original: boolean - separator: string + max_output_size?: integer + separator?: string stopwords?: AnalysisStopWords stopwords_path?: string } @@ -5598,8 +5608,9 @@ export interface AnalysisPatternAnalyzer { version?: VersionString flags?: string lowercase?: boolean - pattern: string + pattern?: string stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { @@ -5756,6 +5767,7 @@ export interface AnalysisStandardAnalyzer { type: 'standard' max_token_length?: integer stopwords?: AnalysisStopWords + stopwords_path?: string } export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { @@ -6927,7 +6939,7 @@ export interface QueryDslPercolateQuery extends QueryDslQueryBase { export interface QueryDslPinnedDoc { _id: Id - _index: IndexName + _index?: IndexName } export interface QueryDslPinnedQuery extends QueryDslQueryBase { @@ -6997,7 +7009,7 @@ export interface QueryDslQueryContainer { span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery span_or?: QueryDslSpanOrQuery - span_term?: Partial> + span_term?: Partial> span_within?: QueryDslSpanWithinQuery sparse_vector?: QueryDslSparseVectorQuery term?: Partial> @@ -7197,12 +7209,13 @@ export interface QueryDslSpanQuery { span_near?: QueryDslSpanNearQuery span_not?: QueryDslSpanNotQuery span_or?: QueryDslSpanOrQuery - span_term?: Partial> + span_term?: Partial> span_within?: QueryDslSpanWithinQuery } export interface QueryDslSpanTermQuery extends QueryDslQueryBase { - value: string + value: FieldValue + term: FieldValue } export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { @@ -7344,7 +7357,7 @@ export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { /** A unique identifier for the async search. */ id: Id - /** Specifies how long the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. */ + /** The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. */ keep_alive?: Duration /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean @@ -7361,7 +7374,7 @@ export type AsyncSearchGetResponse + _version: SpecUtilsStringified + executed_pipelines: string[] + ignored_fields?: Record[] + error?: ErrorCause +} +export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause } + export interface SimulateIngestRequest extends RequestBase { /** The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ index?: IndexName @@ -22905,7 +22932,11 @@ export interface SimulateIngestRequest extends RequestBase { } export interface SimulateIngestResponse { - docs: IngestSimulateDocumentResult[] + docs: SimulateIngestSimulateIngestDocumentResult[] +} + +export interface SimulateIngestSimulateIngestDocumentResult { + doc?: SimulateIngestIngestDocumentSimulation } export interface SlmConfiguration { @@ -23125,7 +23156,7 @@ export type SlmStopResponse = AcknowledgedResponseBase export interface SnapshotAzureRepository extends SnapshotRepositoryBase { type: 'azure' - settings: SnapshotAzureRepositorySettings + settings?: SnapshotAzureRepositorySettings } export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { From 4795a8c0d5ae882ed9f2ec9bc6144cd5542022e0 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 11 Feb 2025 11:19:47 -0600 Subject: [PATCH 472/647] Slack alerts for integration test suite (#2615) * Notify Slack on integration test failure * Run integration tests on most recent minors * Whoops, wrong alpha --- catalog-info.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 80d0514aa..4d1e41757 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -28,6 +28,9 @@ spec: spec: repository: elastic/elasticsearch-js pipeline_file: .buildkite/pipeline.yml + env: + ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" + SLACK_NOTIFICATIONS_CHANNEL: "#devtools-notify-javascript" teams: devtools-team: access_level: MANAGE_BUILD_AND_READ @@ -45,6 +48,9 @@ spec: 8_x: branch: "8.x" cronline: "@daily" - 8_14: - branch: "8.16" + 8_17: + branch: "8.17" + cronline: "@daily" + 8_18: + branch: "8.18" cronline: "@daily" From c8504fe6164c783e71368980fffd812c482a2c67 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 10:36:55 -0600 Subject: [PATCH 473/647] Update dependency @types/node to v22.13.4 (#2616) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index a5e2f8af3..95936c445 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.13.1", + "@types/node": "22.13.4", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 20fb610d825691092144505edf0afcb105ce3848 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 10:37:41 -0600 Subject: [PATCH 474/647] Update dependency tap to v21.0.2 (#2617) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 95936c445..562c1c018 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "semver": "7.6.3", "split2": "4.2.0", "stoppable": "1.1.0", - "tap": "21.0.1", + "tap": "21.0.2", "ts-node": "10.9.2", "ts-standard": "12.0.2", "typescript": "5.7.3", From c5b2915b5a0ef7f9728c0fc03b4e569eeef87c0f Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 18 Feb 2025 17:40:56 +0100 Subject: [PATCH 475/647] Auto-generated API code (#2618) --- docs/reference.asciidoc | 24 +++++++++++++++--------- src/api/api/esql.ts | 4 ++-- src/api/types.ts | 27 ++++++++++++++++++--------- 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 505544757..9515f828a 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -1730,7 +1730,7 @@ client.search({ ... }) ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. +** *`search_after` (Optional, number | number | string | boolean | null[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. ** *`size` (Optional, number)*: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. ** *`slice` (Optional, { field, id, max })*: Split a scrolled search into multiple slices that can be consumed independently. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: A list of : pairs. @@ -2424,7 +2424,7 @@ not included in the search results. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`search_after` (Optional, number | number | string | boolean | null[])* ** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. @@ -4990,13 +4990,16 @@ client.esql.asyncQuery({ query }) ** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. ** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. ** *`locale` (Optional, string)* -** *`params` (Optional, number | number | string | boolean | null | User-defined value[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. ** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. ** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. +** *`include_ccs_metadata` (Optional, boolean)*: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. ** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. It is valid only for the CSV format. ** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. @@ -5108,13 +5111,16 @@ client.esql.query({ query }) ** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. ** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. ** *`locale` (Optional, string)* -** *`params` (Optional, number | number | string | boolean | null | User-defined value[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. ** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. ** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. +** *`include_ccs_metadata` (Optional, boolean)*: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. ** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, e.g. json, yaml. ** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. ** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? @@ -5284,7 +5290,7 @@ not included in the search results. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* ** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* +** *`search_after` (Optional, number | number | string | boolean | null[])* ** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. @@ -12932,7 +12938,7 @@ It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: The search after definition. +** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition. ** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. @@ -12977,7 +12983,7 @@ In addition, sort can also be applied to the `_doc` field to sort by index order It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: The search after definition. +** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition. [discrete] ==== query_user @@ -13015,7 +13021,7 @@ In addition, sort can also be applied to the `_doc` field to sort by index order It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: The search after definition +** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition ** *`with_profile_uid` (Optional, boolean)*: Determines whether to retrieve the user profile UID, if it exists, for the users. [discrete] @@ -15757,7 +15763,7 @@ It must be non-negative. It must be non-negative. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query that filters the watches to be returned. ** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: One or more fields used to sort the search results. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Retrieve the next page of hits using a set of sort values from the previous page. +** *`search_after` (Optional, number | number | string | boolean | null[])*: Retrieve the next page of hits using a set of sort values from the previous page. [discrete] ==== start diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index a507e0952..d76ed6962 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -52,7 +52,7 @@ export default class Esql { async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables'] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -222,7 +222,7 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables'] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/types.ts b/src/api/types.ts index 4f6b554dd..cabb92f1b 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1992,7 +1992,7 @@ export interface SearchHit { matched_queries?: string[] | Record _nested?: SearchNestedIdentity _ignored?: string[] - ignored_field_values?: Record + ignored_field_values?: Record _shard?: string _node?: string _routing?: string @@ -2939,7 +2939,7 @@ export interface FieldSort { export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' -export type FieldValue = long | double | string | boolean | null | any +export type FieldValue = long | double | string | boolean | null export interface FielddataStats { evictions?: long @@ -11872,10 +11872,12 @@ export interface EsqlAsyncQueryRequest extends RequestBase { query: string /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ tables?: Record> + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ + include_ccs_metadata?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } + body?: string | { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } + querystring?: { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } } export type EsqlAsyncQueryResponse = EsqlResult @@ -11941,10 +11943,12 @@ export interface EsqlQueryRequest extends RequestBase { query: string /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ tables?: Record> + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ + include_ccs_metadata?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } + body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never } + querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } } export type EsqlQueryResponse = EsqlResult @@ -12348,7 +12352,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { age?: Duration failed_step?: Name failed_step_retry_count?: integer - index?: IndexName + index: IndexName index_creation_date?: DateTime index_creation_date_millis?: EpochTime is_auto_retryable_error?: boolean @@ -12358,7 +12362,11 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { phase: Name phase_time?: DateTime phase_time_millis?: EpochTime - policy: Name + policy?: Name + previous_step_info?: Record + repository_name?: string + snapshot_name?: string + shrink_index_name?: string step?: Name step_info?: Record step_time?: DateTime @@ -12368,6 +12376,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { } export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { + phase_definition?: IlmPhase policy: Name version: VersionNumber modified_date_in_millis: EpochTime @@ -15066,7 +15075,7 @@ export interface InferenceInferenceResult { export interface InferenceRankedDocument { index: integer - score: float + relevance_score: float text?: string } From c99abab0585ae0bc59e64c573d96b8c7ee487dc8 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 10:52:07 -0600 Subject: [PATCH 476/647] Update dependency @types/node to v22.13.5 (#2623) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 562c1c018..22ccac6c4 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.13.4", + "@types/node": "22.13.5", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From a411cc7c7d4512670d0be363f5a9d6147be4a43c Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 24 Feb 2025 16:54:40 +0000 Subject: [PATCH 477/647] Auto-generated API code (#2627) --- docs/reference.asciidoc | 103 ++++++++++++++++++- src/api/api/indices.ts | 16 ++- src/api/api/ml.ts | 9 +- src/api/types.ts | 219 ++++++++++++++++++++++++++++------------ 4 files changed, 276 insertions(+), 71 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 9515f828a..fbcf6c39f 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -2606,6 +2606,10 @@ client.cat.aliases({ ... }) * *Request (object):* ** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. @@ -2633,6 +2637,10 @@ client.cat.allocation({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: A list of node identifiers or names used to limit the returned information. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2662,6 +2670,10 @@ client.cat.componentTemplates({ ... }) ** *`name` (Optional, string)*: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2691,6 +2703,10 @@ client.cat.count({ ... }) ** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. [discrete] ==== fielddata @@ -2714,6 +2730,10 @@ client.cat.fielddata({ ... }) ** *`fields` (Optional, string | string[])*: List of fields used to limit returned information. To retrieve all fields, omit this parameter. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. [discrete] ==== health @@ -2741,6 +2761,10 @@ client.cat.health({ ... }) * *Request (object):* ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. ** *`ts` (Optional, boolean)*: If true, returns `HH:MM:SS` and Unix epoch timestamps. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. [discrete] ==== help @@ -2793,6 +2817,10 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para ** *`pri` (Optional, boolean)*: If true, the response only includes information from primary shards. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. [discrete] ==== master @@ -2812,6 +2840,10 @@ client.cat.master({ ... }) ==== Arguments * *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2969,6 +3001,10 @@ client.cat.nodeattrs({ ... }) ==== Arguments * *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2995,6 +3031,10 @@ client.cat.nodes({ ... }) ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. ** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID. ** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. @@ -3015,6 +3055,10 @@ client.cat.pendingTasks({ ... }) ==== Arguments * *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -3039,6 +3083,10 @@ client.cat.plugins({ ... }) ==== Arguments * *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`include_bootstrap` (Optional, boolean)*: Include bootstrap plugins in the response ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed @@ -3070,6 +3118,10 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para ** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] @@ -3089,6 +3141,10 @@ client.cat.repositories({ ... }) ==== Arguments * *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -3117,6 +3173,10 @@ client.cat.segments({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -3145,6 +3205,10 @@ client.cat.shards({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. @@ -3171,6 +3235,10 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, the response does not include information from unavailable snapshots. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. @@ -3195,6 +3263,10 @@ client.cat.tasks({ ... }) ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. ** *`nodes` (Optional, string[])*: Unique node identifiers, which are used to limit the response. ** *`parent_task_id` (Optional, string)*: The parent task identifier, which is used to limit the response. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. ** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -3220,6 +3292,10 @@ client.cat.templates({ ... }) * *Request (object):* ** *`name` (Optional, string)*: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -3246,6 +3322,10 @@ client.cat.threadPool({ ... }) * *Request (object):* ** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request. Accepts wildcard expressions. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. ** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed @@ -5494,6 +5574,9 @@ client.ilm.migrateToDataTiers({ ... }) ** *`node_attribute` (Optional, string)* ** *`dry_run` (Optional, boolean)*: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== move_to_step @@ -6600,7 +6683,6 @@ Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. ** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. [discrete] ==== get_index_template @@ -6932,7 +7014,12 @@ client.indices.putDataLifecycle({ name }) ** *`name` (string | string[])*: List of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. -** *`lifecycle` (Optional, { data_retention, downsampling, enabled })* +** *`data_retention` (Optional, string | -1 | 0)*: If defined, every document added to this data stream will be stored at least for this time frame. +Any time after this duration the document could be deleted. +When empty, every document in this data stream will be stored indefinitely. +** *`downsampling` (Optional, { rounds })*: The downsampling configuration to execute for the managed backing index after rollover. +** *`enabled` (Optional, boolean)*: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle +that's disabled (enabled: `false`) will have no effect on the data stream. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -7932,7 +8019,7 @@ client.inference.put({ inference_id }) * *Request (object):* ** *`inference_id` (string)*: The inference Id ** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`inference_config` (Optional, { service, service_settings, task_settings })* +** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })* [discrete] ==== stream_inference @@ -8007,7 +8094,7 @@ client.inference.update({ inference_id }) * *Request (object):* ** *`inference_id` (string)*: The unique identifier of the inference endpoint. ** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs. -** *`inference_config` (Optional, { service, service_settings, task_settings })* +** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })* [discrete] === ingest @@ -10273,6 +10360,9 @@ client.ml.startTrainedModelDeployment({ model_id }) * *Request (object):* ** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. +** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. ** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. @@ -10283,6 +10373,7 @@ a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. ** *`priority` (Optional, Enum("normal" | "low"))*: The deployment priority. ** *`queue_capacity` (Optional, number)*: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. @@ -10605,6 +10696,10 @@ a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. [discrete] ==== upgrade_job_snapshot diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index c39e5f3a2..8af3fb23d 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1998,15 +1998,25 @@ export default class Indices { async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['lifecycle'] + const acceptedBody: string[] = ['data_retention', 'downsampling', 'enabled'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} - let body: any = params.body ?? undefined + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - body = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 52548d378..282fc38a5 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -2864,6 +2864,7 @@ export default class Ml { async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['adaptive_allocations'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2878,7 +2879,11 @@ export default class Ml { } for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error @@ -3273,7 +3278,7 @@ export default class Ml { async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['number_of_allocations'] + const acceptedBody: string[] = ['number_of_allocations', 'adaptive_allocations'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/types.ts b/src/api/types.ts index cabb92f1b..e242e803c 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -7650,14 +7650,18 @@ export interface CatAliasesAliasesRecord { export interface CatAliasesRequest extends CatCatRequestBase { /** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, master_timeout?: never } } export type CatAliasesResponse = CatAliasesAliasesRecord[] @@ -7703,14 +7707,18 @@ export interface CatAllocationRequest extends CatCatRequestBase { node_id?: NodeIds /** The unit used to display byte values. */ bytes?: Bytes + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { node_id?: never, bytes?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { node_id?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { node_id?: never, bytes?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { node_id?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -7728,14 +7736,18 @@ export interface CatComponentTemplatesComponentTemplate { export interface CatComponentTemplatesRequest extends CatCatRequestBase { /** The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. */ name?: string + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] @@ -7757,10 +7769,14 @@ export interface CatCountCountRecord { export interface CatCountRequest extends CatCatRequestBase { /** A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never } + body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never } + querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never } } export type CatCountResponse = CatCountCountRecord[] @@ -7782,10 +7798,14 @@ export interface CatFielddataRequest extends CatCatRequestBase { fields?: Fields /** The unit used to display byte values. */ bytes?: Bytes + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { fields?: never, bytes?: never } + body?: string | { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { fields?: never, bytes?: never } + querystring?: { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } } export type CatFielddataResponse = CatFielddataFielddataRecord[] @@ -7848,10 +7868,14 @@ export interface CatHealthRequest extends CatCatRequestBase { time?: TimeUnit /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { time?: never, ts?: never } + body?: string | { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { time?: never, ts?: never } + querystring?: { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } } export type CatHealthResponse = CatHealthHealthRecord[] @@ -8174,10 +8198,14 @@ export interface CatIndicesRequest extends CatCatRequestBase { time?: TimeUnit /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -8192,14 +8220,18 @@ export interface CatMasterMasterRecord { } export interface CatMasterRequest extends CatCatRequestBase { -/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ +/** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } } export type CatMasterResponse = CatMasterMasterRecord[] @@ -8609,14 +8641,18 @@ export interface CatNodeattrsNodeAttributesRecord { } export interface CatNodeattrsRequest extends CatCatRequestBase { -/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ +/** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] @@ -8898,14 +8934,18 @@ export interface CatNodesRequest extends CatCatRequestBase { full_id?: boolean | string /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** Unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } } export type CatNodesResponse = CatNodesNodesRecord[] @@ -8922,16 +8962,20 @@ export interface CatPendingTasksPendingTasksRecord { } export interface CatPendingTasksRequest extends CatCatRequestBase { -/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ +/** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** Unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { local?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never, time?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { local?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never, time?: never } } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -8951,16 +8995,20 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { -/** Include bootstrap plugins in the response */ +/** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names + /** Include bootstrap plugins in the response */ include_bootstrap?: boolean /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { include_bootstrap?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { h?: never, s?: never, include_bootstrap?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { include_bootstrap?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { h?: never, s?: never, include_bootstrap?: never, local?: never, master_timeout?: never } } export type CatPluginsResponse = CatPluginsPluginsRecord[] @@ -9032,12 +9080,16 @@ export interface CatRecoveryRequest extends CatCatRequestBase { bytes?: Bytes /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** Unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, time?: never } + body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, time?: never } + querystring?: { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] @@ -9050,14 +9102,18 @@ export interface CatRepositoriesRepositoriesRecord { } export interface CatRepositoriesRequest extends CatCatRequestBase { -/** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ +/** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } } export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] @@ -9067,14 +9123,18 @@ export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices /** The unit used to display byte values. */ bytes?: Bytes + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, bytes?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, bytes?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -9126,14 +9186,18 @@ export interface CatShardsRequest extends CatCatRequestBase { index?: Indices /** The unit used to display byte values. */ bytes?: Bytes + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** Unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, bytes?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, bytes?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } } export type CatShardsResponse = CatShardsShardsRecord[] @@ -9358,14 +9422,18 @@ export interface CatSnapshotsRequest extends CatCatRequestBase { repository?: Names /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** Unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { repository?: never, ignore_unavailable?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never, time?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { repository?: never, ignore_unavailable?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never, time?: never } } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] @@ -9413,6 +9481,10 @@ export interface CatTasksRequest extends CatCatRequestBase { nodes?: string[] /** The parent task identifier, which is used to limit the response. */ parent_task_id?: string + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** Unit used to display time values. */ time?: TimeUnit /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ @@ -9420,9 +9492,9 @@ export interface CatTasksRequest extends CatCatRequestBase { /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, time?: never, timeout?: never, wait_for_completion?: never } + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, time?: never, timeout?: never, wait_for_completion?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, time?: never, timeout?: never, wait_for_completion?: never } + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, time?: never, timeout?: never, wait_for_completion?: never } } export type CatTasksResponse = CatTasksTasksRecord[] @@ -9465,14 +9537,18 @@ export interface CatTasksTasksRecord { export interface CatTemplatesRequest extends CatCatRequestBase { /** The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. */ name?: Name + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] @@ -9494,6 +9570,10 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { /** A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. */ thread_pool_patterns?: Names + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: Names /** The unit used to display time values. */ time?: TimeUnit /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ @@ -9501,9 +9581,9 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { thread_pool_patterns?: never, time?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { thread_pool_patterns?: never, time?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -12441,12 +12521,14 @@ export interface IlmGetStatusResponse { export interface IlmMigrateToDataTiersRequest extends RequestBase { /** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. */ dry_run?: boolean + /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration legacy_template_to_delete?: string node_attribute?: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { dry_run?: never, legacy_template_to_delete?: never, node_attribute?: never } + body?: string | { [key: string]: any } & { dry_run?: never, master_timeout?: never, legacy_template_to_delete?: never, node_attribute?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { dry_run?: never, legacy_template_to_delete?: never, node_attribute?: never } + querystring?: { [key: string]: any } & { dry_run?: never, master_timeout?: never, legacy_template_to_delete?: never, node_attribute?: never } } export interface IlmMigrateToDataTiersResponse { @@ -13840,12 +13922,10 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { ignore_unavailable?: boolean /** If `true`, return all default settings in the response. */ include_defaults?: boolean - /** If `true`, the request retrieves information from the local node only. */ - local?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } + body?: string | { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } + querystring?: { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never } } export type IndicesGetFieldMappingResponse = Record @@ -14115,11 +14195,16 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - lifecycle?: IndicesDataStreamLifecycle + /** If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: Duration + /** The downsampling configuration to execute for the managed backing index after rollover. */ + downsampling?: IndicesDataStreamLifecycleDownsampling + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, lifecycle?: never } + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, lifecycle?: never } + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } } export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase @@ -15053,7 +15138,15 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] +export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint { + max_chunk_size?: integer + overlap?: integer + sentence_overlap?: integer + strategy?: string +} + export interface InferenceInferenceEndpoint { + chunking_settings?: InferenceInferenceChunkingSettings service: string service_settings: InferenceServiceSettings task_settings?: InferenceTaskSettings @@ -19135,7 +19228,7 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase { cache_size?: ByteSize /** A unique identifier for the deployment of the model. */ deployment_id?: string - /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. */ + /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer /** The deployment priority. */ priority?: MlTrainingPriority @@ -19147,10 +19240,12 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase { timeout?: Duration /** Specifies the allocation status to wait for before returning. */ wait_for?: MlDeploymentAllocationState + /** Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. */ + adaptive_allocations?: MlAdaptiveAllocationsSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never } + body?: string | { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never } + querystring?: { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } } export interface MlStartTrainedModelDeploymentResponse { @@ -19410,12 +19505,14 @@ export interface MlUpdateModelSnapshotResponse { export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id - /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. */ + /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer + /** Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. */ + adaptive_allocations?: MlAdaptiveAllocationsSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { model_id?: never, number_of_allocations?: never } + body?: string | { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { model_id?: never, number_of_allocations?: never } + querystring?: { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } } export interface MlUpdateTrainedModelDeploymentResponse { @@ -26132,9 +26229,7 @@ export interface SpecUtilsCommonQueryParameters { export interface SpecUtilsCommonCatQueryParameters { format?: string - h?: Names help?: boolean - s?: Names v?: boolean } From d86eb82e82d2c0b0d28d2e67f34784f4b9749e56 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 10:57:17 -0600 Subject: [PATCH 478/647] Update dependency chai to v5.2.0 (#2624) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Co-authored-by: Josh Mock --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 22ccac6c4..377ba7b6b 100644 --- a/package.json +++ b/package.json @@ -64,7 +64,7 @@ "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", - "chai": "5.1.2", + "chai": "5.2.0", "cross-zip": "4.0.1", "desm": "1.3.1", "into-stream": "8.0.1", From d5a0f1171f76ace37207d19c27f0ea31afc460f5 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 24 Feb 2025 13:37:34 -0600 Subject: [PATCH 479/647] Add recent releases to changelog (#2633) --- docs/changelog.asciidoc | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 78b23e45a..0fdb9e682 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -17,6 +17,22 @@ In 8.0, the top-level `body` parameter that was available on all API functions < Setting HTTP timeouts on Elasticsearch requests goes against Elastic's recommendations. See <> for more information. +[discrete] +=== 8.17.1 + +[discrete] +==== Fixes + +[discrete] +===== Improved support for Elasticsearch `v8.17` + +Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. + +[discrete] +===== Report correct transport connection type in telemetry + +The client's telemetry reporting mechanism was incorrectly reporting all traffic as using `HttpConnection` when the default is `UndiciConnection`. https://github.com/elastic/elasticsearch-js/issues/2324[#2324] + [discrete] === 8.17.0 @@ -29,6 +45,22 @@ Setting HTTP timeouts on Elasticsearch requests goes against Elastic's recommend You can find all the API changes https://www.elastic.co/guide/en/elasticsearch/reference/8.17/release-notes-8.17.0.html[here]. +[discrete] +=== 8.16.4 + +[discrete] +==== Fixes + +[discrete] +===== Improved support for Elasticsearch `v8.16` + +Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. + +[discrete] +===== Report correct transport connection type in telemetry + +The client's telemetry reporting mechanism was incorrectly reporting all traffic as using `HttpConnection` when the default is `UndiciConnection`. https://github.com/elastic/elasticsearch-js/issues/2324[#2324] + [discrete] === 8.16.3 From 7449adbd1f60375ef5195efad3fd39d456e257a6 Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Wed, 26 Feb 2025 11:09:05 -0600 Subject: [PATCH 480/647] add the new ci checks (#2634) --- .../workflows/comment-on-asciidoc-changes.yml | 21 ------------------- .github/workflows/docs-build.yml | 19 +++++++++++++++++ .github/workflows/docs-cleanup.yml | 14 +++++++++++++ 3 files changed, 33 insertions(+), 21 deletions(-) delete mode 100644 .github/workflows/comment-on-asciidoc-changes.yml create mode 100644 .github/workflows/docs-build.yml create mode 100644 .github/workflows/docs-cleanup.yml diff --git a/.github/workflows/comment-on-asciidoc-changes.yml b/.github/workflows/comment-on-asciidoc-changes.yml deleted file mode 100644 index 8e5f836b1..000000000 --- a/.github/workflows/comment-on-asciidoc-changes.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Comment on PR for .asciidoc changes - -on: - # We need to use pull_request_target to be able to comment on PRs from forks - pull_request_target: - types: - - synchronize - - opened - - reopened - branches: - - main - - master - - "9.0" - -jobs: - comment-on-asciidoc-change: - permissions: - contents: read - pull-requests: write - uses: elastic/docs-builder/.github/workflows/comment-on-asciidoc-changes.yml@main diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml new file mode 100644 index 000000000..bb466166d --- /dev/null +++ b/.github/workflows/docs-build.yml @@ -0,0 +1,19 @@ +name: docs-build + +on: + push: + branches: + - main + pull_request_target: ~ + merge_group: ~ + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-build.yml@main + with: + path-pattern: docs/** + permissions: + deployments: write + id-token: write + contents: read + pull-requests: read diff --git a/.github/workflows/docs-cleanup.yml b/.github/workflows/docs-cleanup.yml new file mode 100644 index 000000000..f83e017b5 --- /dev/null +++ b/.github/workflows/docs-cleanup.yml @@ -0,0 +1,14 @@ +name: docs-cleanup + +on: + pull_request_target: + types: + - closed + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main + permissions: + contents: none + id-token: write + deployments: write From 6f9e1062f3d12e05b70dc4b583217d2d23458513 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 26 Feb 2025 18:31:25 +0000 Subject: [PATCH 481/647] Auto-generated API code (#2636) --- docs/reference.asciidoc | 2 +- src/api/api/async_search.ts | 156 +- src/api/api/autoscaling.ts | 82 +- src/api/api/bulk.ts | 50 +- src/api/api/capabilities.ts | 18 +- src/api/api/cat.ts | 456 ++++- src/api/api/ccr.ts | 292 +++- src/api/api/clear_scroll.ts | 35 +- src/api/api/close_point_in_time.ts | 35 +- src/api/api/cluster.ts | 324 +++- src/api/api/connector.ts | 762 +++++++-- src/api/api/count.ts | 52 +- src/api/api/create.ts | 47 +- src/api/api/dangling_indices.ts | 51 +- src/api/api/delete.ts | 30 +- src/api/api/delete_by_query.ts | 69 +- src/api/api/delete_by_query_rethrottle.ts | 22 +- src/api/api/delete_script.ts | 23 +- src/api/api/enrich.ts | 97 +- src/api/api/eql.ts | 102 +- src/api/api/esql.ts | 128 +- src/api/api/exists.ts | 32 +- src/api/api/exists_source.ts | 31 +- src/api/api/explain.ts | 51 +- src/api/api/features.ts | 33 +- src/api/api/field_caps.ts | 48 +- src/api/api/fleet.ts | 205 ++- src/api/api/get.ts | 33 +- src/api/api/get_script.ts | 22 +- src/api/api/get_script_context.ts | 18 +- src/api/api/get_script_languages.ts | 18 +- src/api/api/get_source.ts | 32 +- src/api/api/graph.ts | 44 +- src/api/api/health_report.ts | 24 +- src/api/api/ilm.ts | 204 ++- src/api/api/index.ts | 51 +- src/api/api/indices.ts | 1386 ++++++++++++++-- src/api/api/inference.ts | 193 ++- src/api/api/info.ts | 18 +- src/api/api/ingest.ts | 244 ++- src/api/api/knn_search.ts | 44 +- src/api/api/license.ts | 115 +- src/api/api/logstash.ts | 62 +- src/api/api/mget.ts | 48 +- src/api/api/migration.ts | 41 +- src/api/api/ml.ts | 1833 ++++++++++++++++++--- src/api/api/monitoring.ts | 42 +- src/api/api/msearch.ts | 51 +- src/api/api/msearch_template.ts | 43 +- src/api/api/mtermvectors.ts | 51 +- src/api/api/nodes.ts | 140 +- src/api/api/open_point_in_time.ts | 44 +- src/api/api/ping.ts | 18 +- src/api/api/profiling.ts | 49 +- src/api/api/put_script.ts | 42 +- src/api/api/query_rules.ts | 159 +- src/api/api/rank_eval.ts | 43 +- src/api/api/reindex.ts | 49 +- src/api/api/reindex_rethrottle.ts | 22 +- src/api/api/render_search_template.ts | 38 +- src/api/api/rollup.ts | 153 +- src/api/api/scripts_painless_execute.ts | 37 +- src/api/api/scroll.ts | 40 +- src/api/api/search.ts | 116 +- src/api/api/search_application.ts | 200 ++- src/api/api/search_mvt.ts | 62 +- src/api/api/search_shards.ts | 28 +- src/api/api/search_template.ts | 55 +- src/api/api/searchable_snapshots.ts | 90 +- src/api/api/security.ts | 1400 ++++++++++++++-- src/api/api/shutdown.ts | 73 +- src/api/api/simulate.ts | 44 +- src/api/api/slm.ts | 155 +- src/api/api/snapshot.ts | 315 +++- src/api/api/sql.ts | 148 +- src/api/api/ssl.ts | 19 +- src/api/api/synonyms.ts | 132 +- src/api/api/tasks.ts | 59 +- src/api/api/terms_enum.ts | 43 +- src/api/api/termvectors.ts | 52 +- src/api/api/text_structure.ts | 142 +- src/api/api/transform.ts | 259 ++- src/api/api/update.ts | 58 +- src/api/api/update_by_query.ts | 73 +- src/api/api/update_by_query_rethrottle.ts | 22 +- src/api/api/watcher.ts | 255 ++- src/api/api/xpack.ts | 35 +- src/api/types.ts | 18 +- 88 files changed, 11299 insertions(+), 1238 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index fbcf6c39f..516f02386 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -7152,7 +7152,7 @@ client.indices.putMapping({ index }) ** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. -** *`dynamic_templates` (Optional, Record | Record[])*: Specify dynamic templates for the mapping. +** *`dynamic_templates` (Optional, Record[])*: Specify dynamic templates for the mapping. ** *`_field_names` (Optional, { enabled })*: Control whether field names are enabled for the index. ** *`_meta` (Optional, Record)*: A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index b3dd631c1..9cc1582e1 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -35,12 +35,133 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class AsyncSearch { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'async_search.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'async_search.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'typed_keys', + 'wait_for_completion_timeout' + ] + }, + 'async_search.status': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive' + ] + }, + 'async_search.submit': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'request_cache', + 'routing', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort' + ] + } + } } /** @@ -51,7 +172,10 @@ export default class AsyncSearch { async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +217,10 @@ export default class AsyncSearch { async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +262,10 @@ export default class AsyncSearch { async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,8 +307,12 @@ export default class AsyncSearch { async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['async_search.submit'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -205,8 +339,14 @@ export default class AsyncSearch { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index 7f123c5a2..e53887579 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -35,12 +35,59 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Autoscaling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'autoscaling.delete_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'autoscaling.get_autoscaling_capacity': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.get_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.put_autoscaling_policy': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +98,10 @@ export default class Autoscaling { async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.delete_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +143,10 @@ export default class Autoscaling { async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_capacity'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -133,7 +186,10 @@ export default class Autoscaling { async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,8 +231,12 @@ export default class Autoscaling { async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['autoscaling.put_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -188,8 +248,14 @@ export default class Autoscaling { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index ccdedfcb2..b7508e514 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -35,7 +35,37 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + bulk: { + path: [ + 'index' + ], + body: [ + 'operations' + ], + query: [ + 'include_source_on_error', + 'list_executed_pipelines', + 'pipeline', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'timeout', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } +} /** * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. @@ -45,8 +75,12 @@ export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['operations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.bulk + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +92,14 @@ export default async function BulkApi = { + capabilities: { + path: [], + body: [], + query: [] + } +} /** * Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported @@ -45,7 +56,10 @@ export default async function CapabilitiesApi (this: That, params?: T.TODO, opti export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.capabilities + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index bc397b310..c163f4c8b 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -35,12 +35,336 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Cat { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cat.aliases': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'expand_wildcards', + 'master_timeout' + ] + }, + 'cat.allocation': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.component_templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.count': { + path: [ + 'index' + ], + body: [], + query: [ + 'h', + 's' + ] + }, + 'cat.fielddata': { + path: [ + 'fields' + ], + body: [], + query: [ + 'bytes', + 'fields', + 'h', + 's' + ] + }, + 'cat.health': { + path: [], + body: [], + query: [ + 'time', + 'ts', + 'h', + 's' + ] + }, + 'cat.help': { + path: [], + body: [], + query: [] + }, + 'cat.indices': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'expand_wildcards', + 'health', + 'include_unloaded_segments', + 'pri', + 'time', + 'master_timeout', + 'h', + 's' + ] + }, + 'cat.master': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.ml_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'from', + 'size', + 'time' + ] + }, + 'cat.nodeattrs': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.nodes': { + path: [], + body: [], + query: [ + 'bytes', + 'full_id', + 'include_unloaded_segments', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.pending_tasks': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout', + 'time' + ] + }, + 'cat.plugins': { + path: [], + body: [], + query: [ + 'h', + 's', + 'include_bootstrap', + 'local', + 'master_timeout' + ] + }, + 'cat.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'bytes', + 'detailed', + 'index', + 'h', + 's', + 'time' + ] + }, + 'cat.repositories': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.shards': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.snapshots': { + path: [ + 'repository' + ], + body: [], + query: [ + 'ignore_unavailable', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.tasks': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'nodes', + 'parent_task_id', + 'h', + 's', + 'time', + 'timeout', + 'wait_for_completion' + ] + }, + 'cat.templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.thread_pool': { + path: [ + 'thread_pool_patterns' + ], + body: [], + query: [ + 'h', + 's', + 'time', + 'local', + 'master_timeout' + ] + }, + 'cat.transforms': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'h', + 's', + 'time', + 'size' + ] + } + } } /** @@ -51,7 +375,10 @@ export default class Cat { async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.aliases'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +428,10 @@ export default class Cat { async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.allocation'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -151,7 +481,10 @@ export default class Cat { async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.component_templates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -201,7 +534,10 @@ export default class Cat { async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.count'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,7 +587,10 @@ export default class Cat { async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields'] + const { + path: acceptedPath + } = this.acceptedParams['cat.fielddata'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -301,7 +640,10 @@ export default class Cat { async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.health'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -341,7 +683,10 @@ export default class Cat { async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.help'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -381,7 +726,10 @@ export default class Cat { async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.indices'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -431,7 +779,10 @@ export default class Cat { async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.master'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -471,7 +822,10 @@ export default class Cat { async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -521,7 +875,10 @@ export default class Cat { async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_datafeeds'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -571,7 +928,10 @@ export default class Cat { async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -621,7 +981,10 @@ export default class Cat { async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_trained_models'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -671,7 +1034,10 @@ export default class Cat { async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.nodeattrs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -711,7 +1077,10 @@ export default class Cat { async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.nodes'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -751,7 +1120,10 @@ export default class Cat { async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.pending_tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -791,7 +1163,10 @@ export default class Cat { async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.plugins'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -831,7 +1206,10 @@ export default class Cat { async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.recovery'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -881,7 +1259,10 @@ export default class Cat { async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.repositories'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -921,7 +1302,10 @@ export default class Cat { async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.segments'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -971,7 +1355,10 @@ export default class Cat { async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.shards'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1021,7 +1408,10 @@ export default class Cat { async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] + const { + path: acceptedPath + } = this.acceptedParams['cat.snapshots'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1071,7 +1461,10 @@ export default class Cat { async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1111,7 +1504,10 @@ export default class Cat { async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.templates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1161,7 +1557,10 @@ export default class Cat { async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['thread_pool_patterns'] + const { + path: acceptedPath + } = this.acceptedParams['cat.thread_pool'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1211,7 +1610,10 @@ export default class Cat { async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.transforms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 29455527c..dd704a23f 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -35,12 +35,185 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ccr { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ccr.delete_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow': { + path: [ + 'index' + ], + body: [ + 'data_stream_name', + 'leader_index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'remote_cluster', + 'settings' + ], + query: [ + 'master_timeout', + 'wait_for_active_shards' + ] + }, + 'ccr.follow_info': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ccr.forget_follower': { + path: [ + 'index' + ], + body: [ + 'follower_cluster', + 'follower_index', + 'follower_index_uuid', + 'leader_remote_cluster' + ], + query: [ + 'timeout' + ] + }, + 'ccr.get_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_follow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.put_auto_follow_pattern': { + path: [ + 'name' + ], + body: [ + 'remote_cluster', + 'follow_index_pattern', + 'leader_index_patterns', + 'leader_index_exclusion_patterns', + 'max_outstanding_read_requests', + 'settings', + 'max_outstanding_write_requests', + 'read_poll_timeout', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_follow': { + path: [ + 'index' + ], + body: [ + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ccr.unfollow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +224,10 @@ export default class Ccr { async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.delete_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,8 +269,12 @@ export default class Ccr { async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +296,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -140,7 +326,10 @@ export default class Ccr { async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -182,7 +371,10 @@ export default class Ccr { async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -224,8 +416,12 @@ export default class Ccr { async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.forget_follower'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -247,8 +443,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -271,7 +473,10 @@ export default class Ccr { async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.get_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -321,7 +526,10 @@ export default class Ccr { async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -363,7 +571,10 @@ export default class Ccr { async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -405,8 +616,12 @@ export default class Ccr { async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.put_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -428,8 +643,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -452,7 +673,10 @@ export default class Ccr { async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.resume_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -494,8 +718,12 @@ export default class Ccr { async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.resume_follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -517,8 +745,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -541,7 +775,10 @@ export default class Ccr { async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ccr.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -581,7 +818,10 @@ export default class Ccr { async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.unfollow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 7b7258503..e78c05005 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -35,7 +35,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + clear_scroll: { + path: [], + body: [ + 'scroll_id' + ], + query: [] + } +} /** * Clear a scrolling search. Clear the search context and results for a scrolling search. @@ -45,8 +60,12 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.clear_scroll + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +88,14 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 26d5b0e26..96d22ced1 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -35,7 +35,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + close_point_in_time: { + path: [], + body: [ + 'id' + ], + query: [] + } +} /** * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. @@ -45,8 +60,12 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.close_point_in_time + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +87,14 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 730c942d2..a2e8e4495 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -35,12 +35,202 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Cluster { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cluster.allocation_explain': { + path: [], + body: [ + 'current_node', + 'index', + 'primary', + 'shard' + ], + query: [ + 'include_disk_info', + 'include_yes_decisions', + 'master_timeout' + ] + }, + 'cluster.delete_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'cluster.delete_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'master_timeout', + 'wait_for_removal' + ] + }, + 'cluster.exists_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'local' + ] + }, + 'cluster.get_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'cluster.get_settings': { + path: [], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.health': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'level', + 'local', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'wait_for_events', + 'wait_for_nodes', + 'wait_for_no_initializing_shards', + 'wait_for_no_relocating_shards', + 'wait_for_status' + ] + }, + 'cluster.info': { + path: [ + 'target' + ], + body: [], + query: [] + }, + 'cluster.pending_tasks': { + path: [], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'cluster.post_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'node_names', + 'node_ids', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.put_component_template': { + path: [ + 'name' + ], + body: [ + 'template', + 'version', + '_meta', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout' + ] + }, + 'cluster.put_settings': { + path: [], + body: [ + 'persistent', + 'transient' + ], + query: [ + 'flat_settings', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.remote_info': { + path: [], + body: [], + query: [] + }, + 'cluster.reroute': { + path: [], + body: [ + 'commands' + ], + query: [ + 'dry_run', + 'explain', + 'metric', + 'retry_failed', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.state': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'wait_for_metadata_version', + 'wait_for_timeout' + ] + }, + 'cluster.stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'include_remotes', + 'timeout' + ] + } + } } /** @@ -51,8 +241,12 @@ export default class Cluster { async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.allocation_explain'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -75,8 +269,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -96,7 +296,10 @@ export default class Cluster { async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -138,7 +341,10 @@ export default class Cluster { async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_voting_config_exclusions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -178,7 +384,10 @@ export default class Cluster { async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.exists_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -220,7 +429,10 @@ export default class Cluster { async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -270,7 +482,10 @@ export default class Cluster { async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -310,7 +525,10 @@ export default class Cluster { async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.health'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -360,7 +578,10 @@ export default class Cluster { async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['target'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -402,7 +623,10 @@ export default class Cluster { async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.pending_tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -442,7 +666,10 @@ export default class Cluster { async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.post_voting_config_exclusions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -482,8 +709,12 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -505,8 +736,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -529,8 +766,12 @@ export default class Cluster { async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['persistent', 'transient'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -553,8 +794,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -574,7 +821,10 @@ export default class Cluster { async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.remote_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -614,8 +864,12 @@ export default class Cluster { async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['commands'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.reroute'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -638,8 +892,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -659,7 +919,10 @@ export default class Cluster { async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.state'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -713,7 +976,10 @@ export default class Cluster { async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 141aa8002..41cdb4316 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -35,12 +35,342 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Connector { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'connector.check_in': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.delete': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'delete_sync_jobs', + 'hard' + ] + }, + 'connector.get': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'include_deleted' + ] + }, + 'connector.last_sync': { + path: [ + 'connector_id' + ], + body: [ + 'last_access_control_sync_error', + 'last_access_control_sync_scheduled_at', + 'last_access_control_sync_status', + 'last_deleted_document_count', + 'last_incremental_sync_scheduled_at', + 'last_indexed_document_count', + 'last_seen', + 'last_sync_error', + 'last_sync_scheduled_at', + 'last_sync_status', + 'last_synced', + 'sync_cursor' + ], + query: [] + }, + 'connector.list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'index_name', + 'connector_name', + 'service_type', + 'include_deleted', + 'query' + ] + }, + 'connector.post': { + path: [], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.put': { + path: [ + 'connector_id' + ], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.secret_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_get': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_post': { + path: [], + body: [], + query: [] + }, + 'connector.secret_put': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.sync_job_cancel': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_check_in': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_claim': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'sync_cursor', + 'worker_hostname' + ], + query: [] + }, + 'connector.sync_job_delete': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_error': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.sync_job_get': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'status', + 'connector_id', + 'job_type' + ] + }, + 'connector.sync_job_post': { + path: [], + body: [ + 'id', + 'job_type', + 'trigger_method' + ], + query: [] + }, + 'connector.sync_job_update_stats': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'deleted_document_count', + 'indexed_document_count', + 'indexed_document_volume', + 'last_seen', + 'metadata', + 'total_document_count' + ], + query: [] + }, + 'connector.update_active_filtering': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.update_api_key_id': { + path: [ + 'connector_id' + ], + body: [ + 'api_key_id', + 'api_key_secret_id' + ], + query: [] + }, + 'connector.update_configuration': { + path: [ + 'connector_id' + ], + body: [ + 'configuration', + 'values' + ], + query: [] + }, + 'connector.update_error': { + path: [ + 'connector_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.update_features': { + path: [ + 'connector_id' + ], + body: [ + 'features' + ], + query: [] + }, + 'connector.update_filtering': { + path: [ + 'connector_id' + ], + body: [ + 'filtering', + 'rules', + 'advanced_snippet' + ], + query: [] + }, + 'connector.update_filtering_validation': { + path: [ + 'connector_id' + ], + body: [ + 'validation' + ], + query: [] + }, + 'connector.update_index_name': { + path: [ + 'connector_id' + ], + body: [ + 'index_name' + ], + query: [] + }, + 'connector.update_name': { + path: [ + 'connector_id' + ], + body: [ + 'name', + 'description' + ], + query: [] + }, + 'connector.update_native': { + path: [ + 'connector_id' + ], + body: [ + 'is_native' + ], + query: [] + }, + 'connector.update_pipeline': { + path: [ + 'connector_id' + ], + body: [ + 'pipeline' + ], + query: [] + }, + 'connector.update_scheduling': { + path: [ + 'connector_id' + ], + body: [ + 'scheduling' + ], + query: [] + }, + 'connector.update_service_type': { + path: [ + 'connector_id' + ], + body: [ + 'service_type' + ], + query: [] + }, + 'connector.update_status': { + path: [ + 'connector_id' + ], + body: [ + 'status' + ], + query: [] + } + } } /** @@ -51,7 +381,10 @@ export default class Connector { async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.check_in'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +426,10 @@ export default class Connector { async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +471,10 @@ export default class Connector { async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,8 +516,12 @@ export default class Connector { async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.last_sync'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -200,8 +543,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -224,7 +573,10 @@ export default class Connector { async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -264,8 +616,12 @@ export default class Connector { async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -288,8 +644,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -309,8 +671,12 @@ export default class Connector { async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -333,8 +699,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -363,7 +735,10 @@ export default class Connector { async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -404,7 +779,10 @@ export default class Connector { async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -445,7 +823,10 @@ export default class Connector { async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -483,7 +864,10 @@ export default class Connector { async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -525,7 +909,10 @@ export default class Connector { async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_cancel'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -567,7 +954,10 @@ export default class Connector { async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_check_in'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -609,8 +999,12 @@ export default class Connector { async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_claim'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -632,8 +1026,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -656,7 +1056,10 @@ export default class Connector { async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -698,8 +1101,12 @@ export default class Connector { async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['error'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_error'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -721,8 +1128,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -745,7 +1158,10 @@ export default class Connector { async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -787,7 +1203,10 @@ export default class Connector { async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -827,8 +1246,12 @@ export default class Connector { async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -850,8 +1273,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -871,8 +1300,12 @@ export default class Connector { async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_update_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -894,8 +1327,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -918,7 +1357,10 @@ export default class Connector { async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.update_active_filtering'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -960,8 +1402,12 @@ export default class Connector { async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_api_key_id'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -983,8 +1429,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1007,8 +1459,12 @@ export default class Connector { async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['configuration', 'values'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_configuration'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1030,8 +1486,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1054,8 +1516,12 @@ export default class Connector { async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['error'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_error'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1077,8 +1543,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1101,8 +1573,12 @@ export default class Connector { async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['features'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1124,8 +1600,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1148,8 +1630,12 @@ export default class Connector { async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1171,8 +1657,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1195,8 +1687,12 @@ export default class Connector { async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['validation'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering_validation'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1218,8 +1714,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1242,8 +1744,12 @@ export default class Connector { async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['index_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_index_name'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1265,8 +1771,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1289,8 +1801,12 @@ export default class Connector { async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['name', 'description'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_name'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1312,8 +1828,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1336,8 +1858,12 @@ export default class Connector { async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['is_native'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_native'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1359,8 +1885,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1383,8 +1915,12 @@ export default class Connector { async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1406,8 +1942,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1430,8 +1972,12 @@ export default class Connector { async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['scheduling'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_scheduling'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1453,8 +1999,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1477,8 +2029,12 @@ export default class Connector { async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_service_type'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1500,8 +2056,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1524,8 +2086,12 @@ export default class Connector { async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['status'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1547,8 +2113,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 6e060b369..f86aa7690 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -35,7 +35,39 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + count: { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'min_score', + 'preference', + 'routing', + 'terminate_after', + 'q' + ] + } +} /** * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. @@ -45,8 +77,12 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.count + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +105,14 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index c8c663fa3..e43ee6b8e 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -35,7 +35,34 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + create: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'include_source_on_error', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. @@ -45,8 +72,12 @@ export default async function CreateApi (this: That, params export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.create + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +89,14 @@ export default async function CreateApi (this: That, params } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index e8dc5399d..57ff08d00 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -35,12 +35,46 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class DanglingIndices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'dangling_indices.delete_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.import_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.list_dangling_indices': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +85,10 @@ export default class DanglingIndices { async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.delete_dangling_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +130,10 @@ export default class DanglingIndices { async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.import_dangling_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +175,10 @@ export default class DanglingIndices { async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.list_dangling_indices'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 63b4cf22b..69d7fd9bd 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -35,7 +35,30 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'if_primary_term', + 'if_seq_no', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. @@ -45,7 +68,10 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest, op export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.delete + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index f99e09670..6842ec280 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -35,7 +35,56 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + delete_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'slice' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'preference', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'q', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. @@ -45,8 +94,12 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'slice'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.delete_by_query + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +121,14 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 4da430635..23b331b7a 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -35,7 +35,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. @@ -45,7 +60,10 @@ export default async function DeleteByQueryRethrottleApi (this: That, params: T. export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.delete_by_query_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index e6519dffd..68949420a 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -35,7 +35,23 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } +} /** * Delete a script or search template. Deletes a stored script or search template. @@ -45,7 +61,10 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = acceptedParams.delete_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index ea301cac5..2a1dff7fc 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -35,12 +35,69 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Enrich { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'enrich.delete_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.execute_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'enrich.get_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.put_policy': { + path: [ + 'name' + ], + body: [ + 'geo_match', + 'match', + 'range' + ], + query: [ + 'master_timeout' + ] + }, + 'enrich.stats': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +108,10 @@ export default class Enrich { async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.delete_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +153,10 @@ export default class Enrich { async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.execute_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +198,10 @@ export default class Enrich { async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.get_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,8 +251,12 @@ export default class Enrich { async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['geo_match', 'match', 'range'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['enrich.put_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -208,8 +278,14 @@ export default class Enrich { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -232,7 +308,10 @@ export default class Enrich { async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['enrich.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 9f490aca9..e645386a6 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -35,12 +35,79 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Eql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'eql.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'eql.get_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.search': { + path: [ + 'index' + ], + body: [ + 'query', + 'case_sensitive', + 'event_category_field', + 'tiebreaker_field', + 'timestamp_field', + 'fetch_size', + 'filter', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'size', + 'fields', + 'result_position', + 'runtime_mappings', + 'max_samples_per_key' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'expand_wildcards', + 'ignore_unavailable', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout' + ] + } + } } /** @@ -51,7 +118,10 @@ export default class Eql { async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +163,10 @@ export default class Eql { async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +208,10 @@ export default class Eql { async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,8 +253,12 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'allow_partial_search_results', 'allow_partial_sequence_results', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['eql.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -200,8 +280,14 @@ export default class Eql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index d76ed6962..d7bcfa0c1 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -35,12 +35,87 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Esql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'esql.async_query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata' + ], + query: [ + 'delimiter', + 'drop_null_columns', + 'format', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'esql.async_query_get': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_stop': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns' + ] + }, + 'esql.query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata' + ], + query: [ + 'format', + 'delimiter', + 'drop_null_columns' + ] + } + } } /** @@ -51,8 +126,12 @@ export default class Esql { async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.async_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +153,14 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -95,7 +180,10 @@ export default class Esql { async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,7 +225,10 @@ export default class Esql { async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,7 +270,10 @@ export default class Esql { async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,8 +315,12 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -244,8 +342,14 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 0c5f99bde..50bb2b07f 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -35,7 +35,32 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. @@ -45,7 +70,10 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest, op export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.exists + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 750302a6f..44009ea87 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -35,7 +35,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } +} /** * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. @@ -45,7 +69,10 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.exists_source + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 16150530b..727c5dcbe 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -35,7 +35,38 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + explain: { + path: [ + 'id', + 'index' + ], + body: [ + 'query' + ], + query: [ + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'lenient', + 'preference', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'q' + ] + } +} /** * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. @@ -45,8 +76,12 @@ export default async function ExplainApi (this: That, param export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.explain + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +103,14 @@ export default async function ExplainApi (this: That, param } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 670d84cda..ee12f298a 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -35,12 +35,33 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Features { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'features.get_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'features.reset_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +72,10 @@ export default class Features { async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['features.get_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +115,10 @@ export default class Features { async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['features.reset_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index de9d61a0e..f863e52a3 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -35,7 +35,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + field_caps: { + path: [ + 'index' + ], + body: [ + 'fields', + 'index_filter', + 'runtime_mappings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'fields', + 'ignore_unavailable', + 'include_unmapped', + 'filters', + 'types', + 'include_empty_fields' + ] + } +} /** * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. @@ -45,8 +73,12 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.field_caps + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +101,14 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 042fcbfd1..d14151237 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -35,12 +35,159 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Fleet { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'fleet.delete_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.get_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.global_checkpoints': { + path: [ + 'index' + ], + body: [], + query: [ + 'wait_for_advance', + 'wait_for_index', + 'checkpoints', + 'timeout' + ] + }, + 'fleet.msearch': { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + }, + 'fleet.post_secret': { + path: [], + body: [], + query: [] + }, + 'fleet.search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + } + } } /** @@ -50,7 +197,10 @@ export default class Fleet { async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.delete_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +241,10 @@ export default class Fleet { async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.get_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -133,7 +286,10 @@ export default class Fleet { async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.global_checkpoints'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,8 +331,12 @@ export default class Fleet { async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.msearch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -188,8 +348,14 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -218,7 +384,10 @@ export default class Fleet { async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['fleet.post_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -257,8 +426,12 @@ export default class Fleet { async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,8 +453,14 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 3cb82914a..cb55c656b 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -35,7 +35,33 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. @@ -45,7 +71,10 @@ export default async function GetApi (this: That, params: T export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.get + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index d079ba650..694f144a6 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -35,7 +35,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + } +} /** * Get a script or search template. Retrieves a stored script or search template. @@ -45,7 +60,10 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = acceptedParams.get_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index b263ed089..a33514bbf 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -35,7 +35,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_context: { + path: [], + body: [], + query: [] + } +} /** * Get script contexts. Get a list of supported script contexts and their methods. @@ -45,7 +56,10 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.get_script_context + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 7b52735c4..f8e7f14f5 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -35,7 +35,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_languages: { + path: [], + body: [], + query: [] + } +} /** * Get script languages. Get a list of available script types, languages, and contexts. @@ -45,7 +56,10 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.get_script_languages + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index a4eef8c97..b9c7191b5 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -35,7 +35,32 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` @@ -45,7 +70,10 @@ export default async function GetSourceApi (this: That, par export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.get_source + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 33534fe4a..a509820dd 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -35,12 +35,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Graph { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'graph.explore': { + path: [ + 'index' + ], + body: [ + 'connections', + 'controls', + 'query', + 'vertices' + ], + query: [ + 'routing', + 'timeout' + ] + } + } } /** @@ -51,8 +75,12 @@ export default class Graph { async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['graph.explore'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +102,14 @@ export default class Graph { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 51a48a265..8a7539b04 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -35,7 +35,24 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + health_report: { + path: [ + 'feature' + ], + body: [], + query: [ + 'timeout', + 'verbose', + 'size' + ] + } +} /** * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. @@ -45,7 +62,10 @@ export default async function HealthReportApi (this: That, params?: T.HealthRepo export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['feature'] + const { + path: acceptedPath + } = acceptedParams.health_report + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 1c097071c..dd4537454 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -35,12 +35,120 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ilm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ilm.delete_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.explain_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'only_errors', + 'only_managed', + 'master_timeout' + ] + }, + 'ilm.get_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.get_status': { + path: [], + body: [], + query: [] + }, + 'ilm.migrate_to_data_tiers': { + path: [], + body: [ + 'legacy_template_to_delete', + 'node_attribute' + ], + query: [ + 'dry_run', + 'master_timeout' + ] + }, + 'ilm.move_to_step': { + path: [ + 'index' + ], + body: [ + 'current_step', + 'next_step' + ], + query: [] + }, + 'ilm.put_lifecycle': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.remove_policy': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.retry': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +159,10 @@ export default class Ilm { async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.delete_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +204,10 @@ export default class Ilm { async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.explain_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +249,10 @@ export default class Ilm { async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,7 +302,10 @@ export default class Ilm { async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -225,8 +345,12 @@ export default class Ilm { async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.migrate_to_data_tiers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -249,8 +373,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -270,8 +400,12 @@ export default class Ilm { async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['current_step', 'next_step'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.move_to_step'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -293,8 +427,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -317,8 +457,12 @@ export default class Ilm { async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.put_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -340,8 +484,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -364,7 +514,10 @@ export default class Ilm { async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.remove_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -406,7 +559,10 @@ export default class Ilm { async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.retry'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -448,7 +604,10 @@ export default class Ilm { async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -488,7 +647,10 @@ export default class Ilm { async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/index.ts b/src/api/api/index.ts index bcd3842eb..20adfbc81 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -35,7 +35,38 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + index: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'require_alias' + ] + } +} /** * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. @@ -45,8 +76,12 @@ export default async function IndexApi (this: That, params: export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.index + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +93,14 @@ export default async function IndexApi (this: That, params: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 8af3fb23d..85cf78aea 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -35,12 +35,834 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Indices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'indices.add_block': { + path: [ + 'index', + 'block' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.analyze': { + path: [ + 'index' + ], + body: [ + 'analyzer', + 'attributes', + 'char_filter', + 'explain', + 'field', + 'filter', + 'normalizer', + 'text', + 'tokenizer' + ], + query: [] + }, + 'indices.cancel_migrate_reindex': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'fielddata', + 'fields', + 'ignore_unavailable', + 'query', + 'request' + ] + }, + 'indices.clone': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.close': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create': { + path: [ + 'index' + ], + body: [ + 'aliases', + 'mappings', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.create_from': { + path: [ + 'source', + 'dest' + ], + body: [ + 'create_from' + ], + query: [] + }, + 'indices.data_streams_stats': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards' + ] + }, + 'indices.delete': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_alias': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'expand_wildcards' + ] + }, + 'indices.delete_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.disk_usage': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'run_expensive_tasks' + ] + }, + 'indices.downsample': { + path: [ + 'index', + 'target_index' + ], + body: [ + 'config' + ], + query: [] + }, + 'indices.exists': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + }, + 'indices.exists_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.exists_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.exists_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.explain_data_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.field_usage_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'fields', + 'wait_for_active_shards' + ] + }, + 'indices.flush': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'force', + 'ignore_unavailable', + 'wait_if_ongoing' + ] + }, + 'indices.forcemerge': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'max_num_segments', + 'only_expunge_deletes', + 'wait_for_completion' + ] + }, + 'indices.get': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout', + 'features' + ] + }, + 'indices.get_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle_stats': { + path: [], + body: [], + query: [] + }, + 'indices.get_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout', + 'verbose' + ] + }, + 'indices.get_field_mapping': { + path: [ + 'fields', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'include_defaults' + ] + }, + 'indices.get_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.get_mapping': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout' + ] + }, + 'indices.get_migrate_reindex_status': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_settings': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'indices.get_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.migrate_reindex': { + path: [], + body: [ + 'reindex' + ], + query: [] + }, + 'indices.migrate_to_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.modify_data_stream': { + path: [], + body: [ + 'actions' + ], + query: [] + }, + 'indices.open': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.promote_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.put_alias': { + path: [ + 'index', + 'name' + ], + body: [ + 'filter', + 'index_routing', + 'is_write_index', + 'routing', + 'search_routing' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_lifecycle': { + path: [ + 'name' + ], + body: [ + 'data_retention', + 'downsampling', + 'enabled' + ], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_index_template': { + path: [ + 'name' + ], + body: [ + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'allow_auto_create', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout', + 'cause' + ] + }, + 'indices.put_mapping': { + path: [ + 'index' + ], + body: [ + 'date_detection', + 'dynamic', + 'dynamic_date_formats', + 'dynamic_templates', + '_field_names', + '_meta', + 'numeric_detection', + 'properties', + '_routing', + '_source', + 'runtime' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'write_index_only' + ] + }, + 'indices.put_settings': { + path: [ + 'index' + ], + body: [ + 'settings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'master_timeout', + 'preserve_existing', + 'timeout' + ] + }, + 'indices.put_template': { + path: [ + 'name' + ], + body: [ + 'aliases', + 'index_patterns', + 'mappings', + 'order', + 'settings', + 'version' + ], + query: [ + 'create', + 'master_timeout', + 'order', + 'cause' + ] + }, + 'indices.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'detailed' + ] + }, + 'indices.refresh': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.reload_search_analyzers': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.resolve_cluster': { + path: [ + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'timeout' + ] + }, + 'indices.resolve_index': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'ignore_unavailable', + 'allow_no_indices' + ] + }, + 'indices.rollover': { + path: [ + 'alias', + 'new_index' + ], + body: [ + 'aliases', + 'conditions', + 'mappings', + 'settings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.shard_stores': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'status' + ] + }, + 'indices.shrink': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.simulate_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.simulate_template': { + path: [ + 'name' + ], + body: [ + 'allow_auto_create', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.split': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.stats': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'completion_fields', + 'expand_wildcards', + 'fielddata_fields', + 'fields', + 'forbid_closed_indices', + 'groups', + 'include_segment_file_sizes', + 'include_unloaded_segments', + 'level' + ] + }, + 'indices.update_aliases': { + path: [], + body: [ + 'actions' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.validate_query': { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'all_shards', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'explain', + 'ignore_unavailable', + 'lenient', + 'rewrite', + 'q' + ] + } + } } /** @@ -51,7 +873,10 @@ export default class Indices { async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'block'] + const { + path: acceptedPath + } = this.acceptedParams['indices.add_block'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -94,8 +919,12 @@ export default class Indices { async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.analyze'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -118,8 +947,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -149,7 +984,10 @@ export default class Indices { async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.cancel_migrate_reindex'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,7 +1029,10 @@ export default class Indices { async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.clear_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -241,8 +1082,12 @@ export default class Indices { async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.clone'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -264,8 +1109,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -289,7 +1140,10 @@ export default class Indices { async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.close'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -331,8 +1185,12 @@ export default class Indices { async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -354,8 +1212,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -378,7 +1242,10 @@ export default class Indices { async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.create_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -420,8 +1287,12 @@ export default class Indices { async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['source', 'dest'] - const acceptedBody: string[] = ['create_from'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create_from'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -433,8 +1304,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -458,7 +1335,10 @@ export default class Indices { async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.data_streams_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -508,7 +1388,10 @@ export default class Indices { async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -550,7 +1433,10 @@ export default class Indices { async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -600,7 +1486,10 @@ export default class Indices { async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -642,7 +1531,10 @@ export default class Indices { async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -684,7 +1576,10 @@ export default class Indices { async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -726,7 +1621,10 @@ export default class Indices { async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -768,7 +1666,10 @@ export default class Indices { async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.disk_usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -810,8 +1711,12 @@ export default class Indices { async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target_index'] - const acceptedBody: string[] = ['config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.downsample'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -823,8 +1728,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -848,7 +1759,10 @@ export default class Indices { async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -890,7 +1804,10 @@ export default class Indices { async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -940,7 +1857,10 @@ export default class Indices { async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -982,7 +1902,10 @@ export default class Indices { async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1024,7 +1947,10 @@ export default class Indices { async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.explain_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1066,7 +1992,10 @@ export default class Indices { async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.field_usage_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1108,7 +2037,10 @@ export default class Indices { async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.flush'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1158,7 +2090,10 @@ export default class Indices { async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.forcemerge'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1208,7 +2143,10 @@ export default class Indices { async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1250,7 +2188,10 @@ export default class Indices { async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1307,7 +2248,10 @@ export default class Indices { async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1349,7 +2293,10 @@ export default class Indices { async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1389,7 +2336,10 @@ export default class Indices { async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1439,7 +2389,10 @@ export default class Indices { async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_field_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1489,7 +2442,10 @@ export default class Indices { async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1539,7 +2495,10 @@ export default class Indices { async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1589,7 +2548,10 @@ export default class Indices { async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_migrate_reindex_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1631,7 +2593,10 @@ export default class Indices { async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1688,7 +2653,10 @@ export default class Indices { async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1738,8 +2706,12 @@ export default class Indices { async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['reindex'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.migrate_reindex'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1751,8 +2723,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1772,7 +2750,10 @@ export default class Indices { async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.migrate_to_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1814,8 +2795,12 @@ export default class Indices { async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.modify_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1837,8 +2822,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1858,7 +2849,10 @@ export default class Indices { async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.open'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1900,7 +2894,10 @@ export default class Indices { async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.promote_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1942,8 +2939,12 @@ export default class Indices { async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] - const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1965,8 +2966,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1997,8 +3004,12 @@ export default class Indices { async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['data_retention', 'downsampling', 'enabled'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2020,8 +3031,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2044,8 +3061,12 @@ export default class Indices { async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2067,8 +3088,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2091,8 +3118,12 @@ export default class Indices { async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2114,8 +3145,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2138,8 +3175,12 @@ export default class Indices { async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2151,8 +3192,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2182,8 +3229,12 @@ export default class Indices { async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2205,8 +3256,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2229,7 +3286,10 @@ export default class Indices { async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.recovery'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2279,7 +3339,10 @@ export default class Indices { async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.refresh'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2329,7 +3392,10 @@ export default class Indices { async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.reload_search_analyzers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2371,7 +3437,10 @@ export default class Indices { async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_cluster'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2421,7 +3490,10 @@ export default class Indices { async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2463,8 +3535,12 @@ export default class Indices { async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['alias', 'new_index'] - const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.rollover'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2486,8 +3562,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2518,7 +3600,10 @@ export default class Indices { async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.segments'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2568,7 +3653,10 @@ export default class Indices { async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.shard_stores'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2618,8 +3706,12 @@ export default class Indices { async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.shrink'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2641,8 +3733,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2666,7 +3764,10 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.simulate_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2708,8 +3809,12 @@ export default class Indices { async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.simulate_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2732,8 +3837,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2763,8 +3874,12 @@ export default class Indices { async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.split'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2786,8 +3901,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2811,7 +3932,10 @@ export default class Indices { async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2868,8 +3992,12 @@ export default class Indices { async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.update_aliases'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2892,8 +4020,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2913,8 +4047,12 @@ export default class Indices { async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.validate_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2937,8 +4075,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index b7c9fb55a..6b3309021 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -35,12 +35,103 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Inference { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'inference.delete': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [ + 'dry_run', + 'force' + ] + }, + 'inference.get': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [] + }, + 'inference.inference': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'query', + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'inference_config' + ], + query: [] + }, + 'inference.stream_inference': { + path: [ + 'inference_id', + 'task_type' + ], + body: [ + 'input' + ], + query: [] + }, + 'inference.unified_inference': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'messages', + 'model', + 'max_completion_tokens', + 'stop', + 'temperature', + 'tool_choice', + 'tools', + 'top_p' + ], + query: [ + 'timeout' + ] + }, + 'inference.update': { + path: [ + 'inference_id', + 'task_type' + ], + body: [ + 'inference_config' + ], + query: [] + } + } } /** @@ -51,7 +142,10 @@ export default class Inference { async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] + const { + path: acceptedPath + } = this.acceptedParams['inference.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +195,10 @@ export default class Inference { async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] + const { + path: acceptedPath + } = this.acceptedParams['inference.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -155,8 +252,12 @@ export default class Inference { async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['query', 'input', 'task_settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.inference'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -178,8 +279,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -210,8 +317,12 @@ export default class Inference { async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -223,8 +334,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -255,8 +372,12 @@ export default class Inference { async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id', 'task_type'] - const acceptedBody: string[] = ['input'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.stream_inference'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -278,8 +399,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -310,8 +437,12 @@ export default class Inference { async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['messages', 'model', 'max_completion_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.unified_inference'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -333,8 +464,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -365,8 +502,12 @@ export default class Inference { async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id', 'task_type'] - const acceptedBody: string[] = ['inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.update'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -378,8 +519,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 1681fe6f3..ebbdb0fac 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -35,7 +35,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + info: { + path: [], + body: [], + query: [] + } +} /** * Get cluster info. Get basic build, version, and cluster information. @@ -45,7 +56,10 @@ export default async function InfoApi (this: That, params?: T.InfoRequest, optio export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.info + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 51ad39aff..502b4c0cf 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -35,12 +35,142 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ingest { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ingest.delete_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.geo_ip_stats': { + path: [], + body: [], + query: [] + }, + 'ingest.get_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'ingest.get_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ingest.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'summary' + ] + }, + 'ingest.processor_grok': { + path: [], + body: [], + query: [] + }, + 'ingest.put_geoip_database': { + path: [ + 'id' + ], + body: [ + 'name', + 'maxmind' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_ip_location_database': { + path: [ + 'id' + ], + body: [ + 'configuration' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_pipeline': { + path: [ + 'id' + ], + body: [ + '_meta', + 'description', + 'on_failure', + 'processors', + 'version', + 'deprecated' + ], + query: [ + 'master_timeout', + 'timeout', + 'if_version' + ] + }, + 'ingest.simulate': { + path: [ + 'id' + ], + body: [ + 'docs', + 'pipeline' + ], + query: [ + 'verbose' + ] + } + } } /** @@ -51,7 +181,10 @@ export default class Ingest { async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +226,10 @@ export default class Ingest { async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +271,10 @@ export default class Ingest { async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,7 +316,10 @@ export default class Ingest { async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ingest.geo_ip_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -217,7 +359,10 @@ export default class Ingest { async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -267,7 +412,10 @@ export default class Ingest { async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -317,7 +465,10 @@ export default class Ingest { async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -367,7 +518,10 @@ export default class Ingest { async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ingest.processor_grok'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -407,8 +561,12 @@ export default class Ingest { async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['name', 'maxmind'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -430,8 +588,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -454,8 +618,12 @@ export default class Ingest { async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['configuration'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -467,8 +635,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -491,8 +665,12 @@ export default class Ingest { async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -514,8 +692,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -538,8 +722,12 @@ export default class Ingest { async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['docs', 'pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.simulate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -561,8 +749,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index d1a319461..a24519479 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -35,7 +35,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + knn_search: { + path: [ + 'index' + ], + body: [ + '_source', + 'docvalue_fields', + 'stored_fields', + 'fields', + 'filter', + 'knn' + ], + query: [ + 'routing' + ] + } +} /** * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. @@ -45,8 +69,12 @@ export default async function KnnSearchApi (this: That, par export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.knn_search + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +96,14 @@ export default async function KnnSearchApi (this: That, par } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index b80733dd9..15655585a 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -35,12 +35,77 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class License { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'license.delete': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'license.get': { + path: [], + body: [], + query: [ + 'accept_enterprise', + 'local' + ] + }, + 'license.get_basic_status': { + path: [], + body: [], + query: [] + }, + 'license.get_trial_status': { + path: [], + body: [], + query: [] + }, + 'license.post': { + path: [], + body: [ + 'license', + 'licenses' + ], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_basic': { + path: [], + body: [], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_trial': { + path: [], + body: [], + query: [ + 'acknowledge', + 'type_query_string', + 'master_timeout' + ] + } + } } /** @@ -51,7 +116,10 @@ export default class License { async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +159,10 @@ export default class License { async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -131,7 +202,10 @@ export default class License { async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get_basic_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,7 +245,10 @@ export default class License { async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get_trial_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -211,8 +288,12 @@ export default class License { async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['license', 'licenses'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['license.post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,8 +316,14 @@ export default class License { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -256,7 +343,10 @@ export default class License { async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_basic'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -296,7 +386,10 @@ export default class License { async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_trial'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index df33e03ac..3434c0429 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -35,12 +35,44 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Logstash { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'logstash.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.put_pipeline': { + path: [ + 'id' + ], + body: [ + 'pipeline' + ], + query: [] + } + } } /** @@ -51,7 +83,10 @@ export default class Logstash { async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['logstash.delete_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +128,10 @@ export default class Logstash { async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['logstash.get_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,8 +181,12 @@ export default class Logstash { async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['logstash.put_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -156,8 +198,14 @@ export default class Logstash { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index c254d5fd8..0a37d3b40 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -35,7 +35,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + mget: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields' + ] + } +} /** * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. @@ -45,8 +73,12 @@ export default async function MgetApi (this: That, params?: export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mget + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +101,14 @@ export default async function MgetApi (this: That, params?: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 5ddf19b7d..28c0188a7 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -35,12 +35,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Migration { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'migration.deprecations': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'migration.get_feature_upgrade_status': { + path: [], + body: [], + query: [] + }, + 'migration.post_feature_upgrade': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +75,10 @@ export default class Migration { async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['migration.deprecations'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +128,10 @@ export default class Migration { async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['migration.get_feature_upgrade_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -141,7 +171,10 @@ export default class Migration { async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['migration.post_feature_upgrade'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 282fc38a5..677f3e54f 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -35,12 +35,958 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ml { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ml.clear_trained_model_deployment_cache': { + path: [ + 'model_id' + ], + body: [], + query: [] + }, + 'ml.close_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.delete_calendar': { + path: [ + 'calendar_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_event': { + path: [ + 'calendar_id', + 'event_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.delete_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_datafeed': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'force' + ] + }, + 'ml.delete_expired_data': { + path: [ + 'job_id' + ], + body: [ + 'requests_per_second', + 'timeout' + ], + query: [ + 'requests_per_second', + 'timeout' + ] + }, + 'ml.delete_filter': { + path: [ + 'filter_id' + ], + body: [], + query: [] + }, + 'ml.delete_forecast': { + path: [ + 'job_id', + 'forecast_id' + ], + body: [], + query: [ + 'allow_no_forecasts', + 'timeout' + ] + }, + 'ml.delete_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'force', + 'delete_user_annotations', + 'wait_for_completion' + ] + }, + 'ml.delete_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [] + }, + 'ml.delete_trained_model': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [] + }, + 'ml.estimate_model_memory': { + path: [], + body: [ + 'analysis_config', + 'max_bucket_cardinality', + 'overall_cardinality' + ], + query: [] + }, + 'ml.evaluate_data_frame': { + path: [], + body: [ + 'evaluation', + 'index', + 'query' + ], + query: [] + }, + 'ml.explain_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'source', + 'dest', + 'analysis', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'analyzed_fields', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.flush_job': { + path: [ + 'job_id' + ], + body: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ], + query: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ] + }, + 'ml.forecast': { + path: [ + 'job_id' + ], + body: [ + 'duration', + 'expires_in', + 'max_model_memory' + ], + query: [ + 'duration', + 'expires_in', + 'max_model_memory' + ] + }, + 'ml.get_buckets': { + path: [ + 'job_id', + 'timestamp' + ], + body: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'page', + 'sort', + 'start' + ], + query: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_calendar_events': { + path: [ + 'calendar_id' + ], + body: [], + query: [ + 'end', + 'from', + 'job_id', + 'size', + 'start' + ] + }, + 'ml.get_calendars': { + path: [ + 'calendar_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_categories': { + path: [ + 'job_id', + 'category_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'partition_field_value', + 'size' + ] + }, + 'ml.get_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'ml.get_data_frame_analytics_stats': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'verbose' + ] + }, + 'ml.get_datafeed_stats': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_filters': { + path: [ + 'filter_id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_influencers': { + path: [ + 'job_id' + ], + body: [ + 'page' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'influencer_score', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_job_stats': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_memory_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ml.get_model_snapshot_upgrade_stats': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_model_snapshots': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'desc', + 'end', + 'page', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_overall_buckets': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ], + query: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ] + }, + 'ml.get_records': { + path: [ + 'job_id' + ], + body: [ + 'desc', + 'end', + 'exclude_interim', + 'page', + 'record_score', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'from', + 'record_score', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'decompress_definition', + 'exclude_generated', + 'from', + 'include', + 'include_model_definition', + 'size', + 'tags' + ] + }, + 'ml.get_trained_models_stats': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size' + ] + }, + 'ml.infer_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'docs', + 'inference_config' + ], + query: [ + 'timeout' + ] + }, + 'ml.info': { + path: [], + body: [], + query: [] + }, + 'ml.open_job': { + path: [ + 'job_id' + ], + body: [ + 'timeout' + ], + query: [ + 'timeout' + ] + }, + 'ml.post_calendar_events': { + path: [ + 'calendar_id' + ], + body: [ + 'events' + ], + query: [] + }, + 'ml.post_data': { + path: [ + 'job_id' + ], + body: [ + 'data' + ], + query: [ + 'reset_end', + 'reset_start' + ] + }, + 'ml.preview_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'config' + ], + query: [] + }, + 'ml.preview_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'datafeed_config', + 'job_config' + ], + query: [ + 'start', + 'end' + ] + }, + 'ml.put_calendar': { + path: [ + 'calendar_id' + ], + body: [ + 'job_ids', + 'description' + ], + query: [] + }, + 'ml.put_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.put_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'allow_lazy_start', + 'analysis', + 'analyzed_fields', + 'description', + 'dest', + 'max_num_threads', + '_meta', + 'model_memory_limit', + 'source', + 'headers', + 'version' + ], + query: [] + }, + 'ml.put_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'aggs', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'headers' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_filter': { + path: [ + 'filter_id' + ], + body: [ + 'description', + 'items' + ], + query: [] + }, + 'ml.put_job': { + path: [], + body: [ + 'allow_lazy_open', + 'analysis_config', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'daily_model_snapshot_retention_after_days', + 'data_description', + 'datafeed_config', + 'description', + 'job_id', + 'groups', + 'model_plot_config', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_index_name', + 'results_retention_days' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'compressed_definition', + 'definition', + 'description', + 'inference_config', + 'input', + 'metadata', + 'model_type', + 'model_size_bytes', + 'platform_architecture', + 'tags', + 'prefix_strings' + ], + query: [ + 'defer_definition_decompression', + 'wait_for_completion' + ] + }, + 'ml.put_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [ + 'reassign' + ] + }, + 'ml.put_trained_model_definition_part': { + path: [ + 'model_id', + 'part' + ], + body: [ + 'definition', + 'total_definition_length', + 'total_parts' + ], + query: [] + }, + 'ml.put_trained_model_vocabulary': { + path: [ + 'model_id' + ], + body: [ + 'vocabulary', + 'merges', + 'scores' + ], + query: [] + }, + 'ml.reset_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'delete_user_annotations' + ] + }, + 'ml.revert_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'delete_intervening_results' + ], + query: [ + 'delete_intervening_results' + ] + }, + 'ml.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, + 'ml.start_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ml.start_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'end', + 'start', + 'timeout' + ], + query: [ + 'end', + 'start', + 'timeout' + ] + }, + 'ml.start_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'adaptive_allocations' + ], + query: [ + 'cache_size', + 'deployment_id', + 'number_of_allocations', + 'priority', + 'queue_capacity', + 'threads_per_allocation', + 'timeout', + 'wait_for' + ] + }, + 'ml.stop_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force' + ] + }, + 'ml.update_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'description', + 'model_memory_limit', + 'max_num_threads', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.update_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.update_filter': { + path: [ + 'filter_id' + ], + body: [ + 'add_items', + 'description', + 'remove_items' + ], + query: [] + }, + 'ml.update_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_lazy_open', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'categorization_filters', + 'description', + 'model_plot_config', + 'model_prune_window', + 'daily_model_snapshot_retention_after_days', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_retention_days', + 'groups', + 'detectors', + 'per_partition_categorization' + ], + query: [] + }, + 'ml.update_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'description', + 'retain' + ], + query: [] + }, + 'ml.update_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'number_of_allocations', + 'adaptive_allocations' + ], + query: [ + 'number_of_allocations' + ] + }, + 'ml.upgrade_job_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'timeout' + ] + }, + 'ml.validate': { + path: [], + body: [ + 'job_id', + 'analysis_config', + 'analysis_limits', + 'data_description', + 'description', + 'model_plot', + 'model_snapshot_id', + 'model_snapshot_retention_days', + 'results_index_name' + ], + query: [] + }, + 'ml.validate_detector': { + path: [], + body: [ + 'detector' + ], + query: [] + } + } } /** @@ -51,7 +997,10 @@ export default class Ml { async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.clear_trained_model_deployment_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,8 +1042,12 @@ export default class Ml { async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.close_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +1069,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -140,7 +1099,10 @@ export default class Ml { async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -182,7 +1144,10 @@ export default class Ml { async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'event_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_event'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -225,7 +1190,10 @@ export default class Ml { async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -268,7 +1236,10 @@ export default class Ml { async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -310,7 +1281,10 @@ export default class Ml { async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -352,8 +1326,12 @@ export default class Ml { async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['requests_per_second', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.delete_expired_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -376,8 +1354,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -407,7 +1391,10 @@ export default class Ml { async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -449,7 +1436,10 @@ export default class Ml { async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'forecast_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_forecast'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -499,7 +1489,10 @@ export default class Ml { async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -541,7 +1534,10 @@ export default class Ml { async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -584,7 +1580,10 @@ export default class Ml { async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -626,7 +1625,10 @@ export default class Ml { async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -669,8 +1671,12 @@ export default class Ml { async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.estimate_model_memory'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -693,8 +1699,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -714,8 +1726,12 @@ export default class Ml { async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['evaluation', 'index', 'query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.evaluate_data_frame'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -737,8 +1753,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -758,8 +1780,12 @@ export default class Ml { async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.explain_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -782,8 +1808,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -813,8 +1845,12 @@ export default class Ml { async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.flush_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -836,8 +1872,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -860,8 +1902,12 @@ export default class Ml { async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.forecast'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -883,8 +1929,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -907,8 +1959,12 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_buckets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -930,8 +1986,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -962,7 +2024,10 @@ export default class Ml { async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_calendar_events'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1004,8 +2069,12 @@ export default class Ml { async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_calendars'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1028,8 +2097,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1059,8 +2134,12 @@ export default class Ml { async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'category_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_categories'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1082,8 +2161,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1114,7 +2199,10 @@ export default class Ml { async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1164,7 +2252,10 @@ export default class Ml { async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1214,7 +2305,10 @@ export default class Ml { async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeed_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1264,7 +2358,10 @@ export default class Ml { async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeeds'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1314,7 +2411,10 @@ export default class Ml { async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_filters'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1364,8 +2464,12 @@ export default class Ml { async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_influencers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1387,8 +2491,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1411,7 +2521,10 @@ export default class Ml { async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_job_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1461,7 +2574,10 @@ export default class Ml { async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1511,7 +2627,10 @@ export default class Ml { async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_memory_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1561,7 +2680,10 @@ export default class Ml { async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_model_snapshot_upgrade_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1604,8 +2726,12 @@ export default class Ml { async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_model_snapshots'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1627,8 +2753,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1659,8 +2791,12 @@ export default class Ml { async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_overall_buckets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1682,8 +2818,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1706,8 +2848,12 @@ export default class Ml { async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_records'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1729,8 +2875,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1753,7 +2905,10 @@ export default class Ml { async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1803,7 +2958,10 @@ export default class Ml { async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1853,8 +3011,12 @@ export default class Ml { async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['docs', 'inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.infer_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1876,8 +3038,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1900,7 +3068,10 @@ export default class Ml { async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ml.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1940,8 +3111,12 @@ export default class Ml { async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.open_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1963,8 +3138,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1987,8 +3168,12 @@ export default class Ml { async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['events'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_calendar_events'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2010,8 +3195,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2034,8 +3225,12 @@ export default class Ml { async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['data'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2047,8 +3242,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2071,8 +3272,12 @@ export default class Ml { async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2095,8 +3300,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2126,8 +3337,12 @@ export default class Ml { async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['datafeed_config', 'job_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2150,8 +3365,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2181,8 +3402,12 @@ export default class Ml { async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['job_ids', 'description'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_calendar'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2204,8 +3429,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2228,7 +3459,10 @@ export default class Ml { async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.put_calendar_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2271,8 +3505,12 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2294,8 +3532,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2318,8 +3562,12 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2341,8 +3589,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2365,8 +3619,12 @@ export default class Ml { async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['description', 'items'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2388,8 +3646,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2412,8 +3676,12 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2435,8 +3703,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2459,8 +3733,12 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2482,8 +3760,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2506,7 +3790,10 @@ export default class Ml { async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.put_trained_model_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2549,8 +3836,12 @@ export default class Ml { async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id', 'part'] - const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_definition_part'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2572,8 +3863,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2597,8 +3894,12 @@ export default class Ml { async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_vocabulary'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2620,8 +3921,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2644,7 +3951,10 @@ export default class Ml { async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.reset_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2686,8 +3996,12 @@ export default class Ml { async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['delete_intervening_results'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.revert_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2709,8 +4023,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2734,7 +4054,10 @@ export default class Ml { async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ml.set_upgrade_mode'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2774,7 +4097,10 @@ export default class Ml { async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.start_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2816,8 +4142,12 @@ export default class Ml { async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['end', 'start', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2839,8 +4169,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2863,8 +4199,12 @@ export default class Ml { async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['adaptive_allocations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2886,8 +4226,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2910,7 +4256,10 @@ export default class Ml { async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2952,8 +4301,12 @@ export default class Ml { async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.stop_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2975,8 +4328,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2999,7 +4358,10 @@ export default class Ml { async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3041,8 +4403,12 @@ export default class Ml { async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3064,8 +4430,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3088,8 +4460,12 @@ export default class Ml { async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3111,8 +4487,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3135,8 +4517,12 @@ export default class Ml { async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3158,8 +4544,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3182,8 +4574,12 @@ export default class Ml { async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3205,8 +4601,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3229,8 +4631,12 @@ export default class Ml { async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['description', 'retain'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3252,8 +4658,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3277,8 +4689,12 @@ export default class Ml { async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['number_of_allocations', 'adaptive_allocations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3300,8 +4716,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3324,7 +4746,10 @@ export default class Ml { async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.upgrade_job_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3367,8 +4792,12 @@ export default class Ml { async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3391,8 +4820,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3412,8 +4847,12 @@ export default class Ml { async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['detector'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate_detector'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3425,8 +4864,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 053fea53a..58bde33fa 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -35,12 +35,34 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Monitoring { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'monitoring.bulk': { + path: [ + 'type' + ], + body: [ + 'operations' + ], + query: [ + 'system_id', + 'system_api_version', + 'interval' + ] + } + } } /** @@ -51,8 +73,12 @@ export default class Monitoring { async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['type'] - const acceptedBody: string[] = ['operations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['monitoring.bulk'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -64,8 +90,14 @@ export default class Monitoring { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 573c4f385..a70ea2055 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -35,7 +35,38 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + msearch: { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'rest_total_hits_as_int', + 'routing', + 'search_type', + 'typed_keys' + ] + } +} /** * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. @@ -45,8 +76,12 @@ export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +93,14 @@ export default async function MsearchApi = { + msearch_template: { + path: [ + 'index' + ], + body: [ + 'search_templates' + ], + query: [ + 'ccs_minimize_roundtrips', + 'max_concurrent_searches', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` @@ -45,8 +68,12 @@ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['search_templates'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +85,14 @@ export default async function MsearchTemplateApi = { + mtermvectors: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'ids', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. @@ -45,8 +76,12 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mtermvectors + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +104,14 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 1ce489ae0..8980be517 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -35,12 +35,102 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Nodes { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'nodes.clear_repositories_metering_archive': { + path: [ + 'node_id', + 'max_archive_version' + ], + body: [], + query: [] + }, + 'nodes.get_repositories_metering_info': { + path: [ + 'node_id' + ], + body: [], + query: [] + }, + 'nodes.hot_threads': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'ignore_idle_threads', + 'interval', + 'snapshots', + 'threads', + 'timeout', + 'type', + 'sort' + ] + }, + 'nodes.info': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'flat_settings', + 'timeout' + ] + }, + 'nodes.reload_secure_settings': { + path: [ + 'node_id' + ], + body: [ + 'secure_settings_password' + ], + query: [ + 'timeout' + ] + }, + 'nodes.stats': { + path: [ + 'node_id', + 'metric', + 'index_metric' + ], + body: [], + query: [ + 'completion_fields', + 'fielddata_fields', + 'fields', + 'groups', + 'include_segment_file_sizes', + 'level', + 'timeout', + 'types', + 'include_unloaded_segments' + ] + }, + 'nodes.usage': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'timeout' + ] + } + } } /** @@ -51,7 +141,10 @@ export default class Nodes { async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'max_archive_version'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.clear_repositories_metering_archive'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -94,7 +187,10 @@ export default class Nodes { async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.get_repositories_metering_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -136,7 +232,10 @@ export default class Nodes { async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.hot_threads'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -186,7 +285,10 @@ export default class Nodes { async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -243,8 +345,12 @@ export default class Nodes { async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['secure_settings_password'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['nodes.reload_secure_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -267,8 +373,14 @@ export default class Nodes { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -298,7 +410,10 @@ export default class Nodes { async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -362,7 +477,10 @@ export default class Nodes { async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 4cd2a733e..1ff65e50e 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -35,7 +35,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + open_point_in_time: { + path: [ + 'index' + ], + body: [ + 'index_filter' + ], + query: [ + 'keep_alive', + 'ignore_unavailable', + 'preference', + 'routing', + 'expand_wildcards', + 'allow_partial_search_results' + ] + } +} /** * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. @@ -45,8 +69,12 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['index_filter'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.open_point_in_time + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +96,14 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 908709afd..81c05df92 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -35,7 +35,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + ping: { + path: [], + body: [], + query: [] + } +} /** * Ping the cluster. Get information about whether the cluster is running. @@ -45,7 +56,10 @@ export default async function PingApi (this: That, params?: T.PingRequest, optio export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.ping + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index 75f2d46cc..631c1df17 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -35,12 +35,39 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Profiling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'profiling.flamegraph': { + path: [], + body: [], + query: [] + }, + 'profiling.stacktraces': { + path: [], + body: [], + query: [] + }, + 'profiling.status': { + path: [], + body: [], + query: [] + }, + 'profiling.topn_functions': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +78,10 @@ export default class Profiling { async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.flamegraph'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -90,7 +120,10 @@ export default class Profiling { async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.stacktraces'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,7 +162,10 @@ export default class Profiling { async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -168,7 +204,10 @@ export default class Profiling { async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.topn_functions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index d3350ca5b..c412b1faf 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -35,7 +35,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + put_script: { + path: [ + 'id', + 'context' + ], + body: [ + 'script' + ], + query: [ + 'context', + 'master_timeout', + 'timeout' + ] + } +} /** * Create or update a script or search template. Creates or updates a stored script or search template. @@ -45,8 +67,12 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'context'] - const acceptedBody: string[] = ['script'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.put_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +94,14 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index bb7a964ee..3fa80dccd 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -35,12 +35,90 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class QueryRules { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'query_rules.delete_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.delete_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.get_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.get_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.list_rulesets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'query_rules.put_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [ + 'type', + 'criteria', + 'actions', + 'priority' + ], + query: [] + }, + 'query_rules.put_ruleset': { + path: [ + 'ruleset_id' + ], + body: [ + 'rules' + ], + query: [] + }, + 'query_rules.test': { + path: [ + 'ruleset_id' + ], + body: [ + 'match_criteria' + ], + query: [] + } + } } /** @@ -51,7 +129,10 @@ export default class QueryRules { async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -94,7 +175,10 @@ export default class QueryRules { async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -136,7 +220,10 @@ export default class QueryRules { async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,7 +266,10 @@ export default class QueryRules { async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,7 +311,10 @@ export default class QueryRules { async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.list_rulesets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -261,8 +354,12 @@ export default class QueryRules { async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,8 +381,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -309,8 +412,12 @@ export default class QueryRules { async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['rules'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -332,8 +439,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -356,8 +469,12 @@ export default class QueryRules { async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['match_criteria'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.test'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -379,8 +496,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index bd3af65e5..cd9207896 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -35,7 +35,30 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + rank_eval: { + path: [ + 'index' + ], + body: [ + 'requests', + 'metric' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'search_type' + ] + } +} /** * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. @@ -45,8 +68,12 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['requests', 'metric'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.rank_eval + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +95,14 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 5c83f147b..2fe9d235d 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -35,7 +35,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + reindex: { + path: [], + body: [ + 'conflicts', + 'dest', + 'max_docs', + 'script', + 'size', + 'source' + ], + query: [ + 'refresh', + 'requests_per_second', + 'scroll', + 'slices', + 'timeout', + 'wait_for_active_shards', + 'wait_for_completion', + 'require_alias' + ] + } +} /** * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. NOTE: The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. @@ -45,8 +74,12 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.reindex + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +101,14 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index d32f80c01..13a52ff25 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -35,7 +35,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + reindex_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. @@ -45,7 +60,10 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.reindex_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 57b5377c6..40af73935 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -35,7 +35,25 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + render_search_template: { + path: [], + body: [ + 'id', + 'file', + 'params', + 'source' + ], + query: [] + } +} /** * Render a search template. Render a search template as a search request body. @@ -45,8 +63,12 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'file', 'params', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.render_search_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +91,14 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index b45043728..753acf55a 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -35,12 +35,97 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Rollup { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'rollup.delete_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_jobs': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_caps': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_index_caps': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'rollup.put_job': { + path: [ + 'id' + ], + body: [ + 'cron', + 'groups', + 'index_pattern', + 'metrics', + 'page_size', + 'rollup_index', + 'timeout', + 'headers' + ], + query: [] + }, + 'rollup.rollup_search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'query', + 'size' + ], + query: [ + 'rest_total_hits_as_int', + 'typed_keys' + ] + }, + 'rollup.start_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.stop_job': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + } + } } /** @@ -51,7 +136,10 @@ export default class Rollup { async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.delete_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +181,10 @@ export default class Rollup { async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,7 +234,10 @@ export default class Rollup { async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_caps'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -193,7 +287,10 @@ export default class Rollup { async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_index_caps'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,8 +332,12 @@ export default class Rollup { async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.put_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -258,8 +359,14 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -282,8 +389,12 @@ export default class Rollup { async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.rollup_search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -305,8 +416,14 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -329,7 +446,10 @@ export default class Rollup { async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.start_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -371,7 +491,10 @@ export default class Rollup { async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.stop_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index bbafbeff1..35fcd6225 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -35,7 +35,24 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scripts_painless_execute: { + path: [], + body: [ + 'context', + 'context_setup', + 'script' + ], + query: [] + } +} /** * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. @@ -45,8 +62,12 @@ export default async function ScriptsPainlessExecuteApi (this export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['context', 'context_setup', 'script'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scripts_painless_execute + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +90,14 @@ export default async function ScriptsPainlessExecuteApi (this } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 5bd03110b..184e45b2a 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -35,7 +35,27 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scroll: { + path: [], + body: [ + 'scroll', + 'scroll_id' + ], + query: [ + 'scroll', + 'scroll_id', + 'rest_total_hits_as_int' + ] + } +} /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. @@ -45,8 +65,12 @@ export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll', 'scroll_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scroll + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +92,14 @@ export default async function ScrollApi = { + search: { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'rank', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'retriever', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'force_synthetic_source' + ] + } +} /** * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. @@ -45,8 +141,12 @@ export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -73,8 +173,14 @@ export default async function SearchApi +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchApplication { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'search_application.delete': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.delete_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.list': { + path: [], + body: [], + query: [ + 'q', + 'from', + 'size' + ] + }, + 'search_application.post_behavioral_analytics_event': { + path: [ + 'collection_name', + 'event_type' + ], + body: [ + 'payload' + ], + query: [ + 'debug' + ] + }, + 'search_application.put': { + path: [ + 'name' + ], + body: [ + 'search_application' + ], + query: [ + 'create' + ] + }, + 'search_application.put_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.render_query': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [] + }, + 'search_application.search': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [ + 'typed_keys' + ] + } + } } /** @@ -51,7 +147,10 @@ export default class SearchApplication { async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +192,10 @@ export default class SearchApplication { async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +237,10 @@ export default class SearchApplication { async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,7 +282,10 @@ export default class SearchApplication { async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.get_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -227,7 +335,10 @@ export default class SearchApplication { async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['search_application.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -267,8 +378,12 @@ export default class SearchApplication { async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['collection_name', 'event_type'] - const acceptedBody: string[] = ['payload'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.post_behavioral_analytics_event'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,8 +395,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -305,8 +426,12 @@ export default class SearchApplication { async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['search_application'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -318,8 +443,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -342,7 +473,10 @@ export default class SearchApplication { async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.put_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -384,8 +518,12 @@ export default class SearchApplication { async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.render_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -407,8 +545,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -431,8 +575,12 @@ export default class SearchApplication { async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -454,8 +602,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index c9384a91e..34695497b 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -35,7 +35,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_mvt: { + path: [ + 'index', + 'field', + 'zoom', + 'x', + 'y' + ], + body: [ + 'aggs', + 'buffer', + 'exact_bounds', + 'extent', + 'fields', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'query', + 'runtime_mappings', + 'size', + 'sort', + 'track_total_hits', + 'with_labels' + ], + query: [ + 'exact_bounds', + 'extent', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'size', + 'with_labels' + ] + } +} /** * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. @@ -45,8 +87,12 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_mvt + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +114,14 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index f2fff30a5..87a8ba52e 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -35,7 +35,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + search_shards: { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'preference', + 'routing' + ] + } +} /** * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. @@ -45,7 +66,10 @@ export default async function SearchShardsApi (this: That, params?: T.SearchShar export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = acceptedParams.search_shards + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index f63c77a45..0a47b171c 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -35,7 +35,42 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_template: { + path: [ + 'index' + ], + body: [ + 'explain', + 'id', + 'params', + 'profile', + 'source' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'preference', + 'profile', + 'routing', + 'scroll', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run a search with a search template. @@ -45,8 +80,12 @@ export default async function SearchTemplateApi (this: That export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +108,14 @@ export default async function SearchTemplateApi (this: That } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 4c8af1dda..2d5f792ae 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -35,12 +35,67 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchableSnapshots { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'searchable_snapshots.cache_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'searchable_snapshots.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'allow_no_indices', + 'ignore_unavailable' + ] + }, + 'searchable_snapshots.mount': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'index', + 'renamed_index', + 'index_settings', + 'ignore_index_settings' + ], + query: [ + 'master_timeout', + 'wait_for_completion', + 'storage' + ] + }, + 'searchable_snapshots.stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'level' + ] + } + } } /** @@ -51,7 +106,10 @@ export default class SearchableSnapshots { async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.cache_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +159,10 @@ export default class SearchableSnapshots { async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.clear_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -151,8 +212,12 @@ export default class SearchableSnapshots { async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['searchable_snapshots.mount'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -174,8 +239,14 @@ export default class SearchableSnapshots { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -199,7 +270,10 @@ export default class SearchableSnapshots { async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 3484f5933..0f3021cc3 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -35,12 +35,648 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Security { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'security.activate_user_profile': { + path: [], + body: [ + 'access_token', + 'grant_type', + 'password', + 'username' + ], + query: [] + }, + 'security.authenticate': { + path: [], + body: [], + query: [] + }, + 'security.bulk_delete_role': { + path: [], + body: [ + 'names' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_put_role': { + path: [], + body: [ + 'roles' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_update_api_keys': { + path: [], + body: [ + 'expiration', + 'ids', + 'metadata', + 'role_descriptors' + ], + query: [] + }, + 'security.change_password': { + path: [ + 'username' + ], + body: [ + 'password', + 'password_hash' + ], + query: [ + 'refresh' + ] + }, + 'security.clear_api_key_cache': { + path: [ + 'ids' + ], + body: [], + query: [] + }, + 'security.clear_cached_privileges': { + path: [ + 'application' + ], + body: [], + query: [] + }, + 'security.clear_cached_realms': { + path: [ + 'realms' + ], + body: [], + query: [ + 'usernames' + ] + }, + 'security.clear_cached_roles': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.clear_cached_service_tokens': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [] + }, + 'security.create_api_key': { + path: [], + body: [ + 'expiration', + 'name', + 'role_descriptors', + 'metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.create_cross_cluster_api_key': { + path: [], + body: [ + 'access', + 'expiration', + 'metadata', + 'name' + ], + query: [] + }, + 'security.create_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delegate_pki': { + path: [], + body: [ + 'x509_certificate_chain' + ], + query: [] + }, + 'security.delete_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enroll_kibana': { + path: [], + body: [], + query: [] + }, + 'security.enroll_node': { + path: [], + body: [], + query: [] + }, + 'security.get_api_key': { + path: [], + body: [], + query: [ + 'id', + 'name', + 'owner', + 'realm_name', + 'username', + 'with_limited_by', + 'active_only', + 'with_profile_uid' + ] + }, + 'security.get_builtin_privileges': { + path: [], + body: [], + query: [] + }, + 'security.get_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [] + }, + 'security.get_role': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_service_accounts': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_service_credentials': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'security.get_token': { + path: [], + body: [ + 'grant_type', + 'scope', + 'password', + 'kerberos_ticket', + 'refresh_token', + 'username' + ], + query: [] + }, + 'security.get_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'with_profile_uid' + ] + }, + 'security.get_user_privileges': { + path: [], + body: [], + query: [ + 'application', + 'priviledge', + 'username' + ] + }, + 'security.get_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'data' + ] + }, + 'security.grant_api_key': { + path: [], + body: [ + 'api_key', + 'grant_type', + 'access_token', + 'username', + 'password', + 'run_as' + ], + query: [] + }, + 'security.has_privileges': { + path: [ + 'user' + ], + body: [ + 'application', + 'cluster', + 'index' + ], + query: [] + }, + 'security.has_privileges_user_profile': { + path: [], + body: [ + 'uids', + 'privileges' + ], + query: [] + }, + 'security.invalidate_api_key': { + path: [], + body: [ + 'id', + 'ids', + 'name', + 'owner', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.invalidate_token': { + path: [], + body: [ + 'token', + 'refresh_token', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.oidc_authenticate': { + path: [], + body: [ + 'nonce', + 'realm', + 'redirect_uri', + 'state' + ], + query: [] + }, + 'security.oidc_logout': { + path: [], + body: [ + 'access_token', + 'refresh_token' + ], + query: [] + }, + 'security.oidc_prepare_authentication': { + path: [], + body: [ + 'iss', + 'login_hint', + 'nonce', + 'realm', + 'state' + ], + query: [] + }, + 'security.put_privileges': { + path: [], + body: [ + 'privileges' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role': { + path: [ + 'name' + ], + body: [ + 'applications', + 'cluster', + 'global', + 'indices', + 'remote_indices', + 'remote_cluster', + 'metadata', + 'run_as', + 'description', + 'transient_metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role_mapping': { + path: [ + 'name' + ], + body: [ + 'enabled', + 'metadata', + 'roles', + 'role_templates', + 'rules', + 'run_as' + ], + query: [ + 'refresh' + ] + }, + 'security.put_user': { + path: [], + body: [ + 'username', + 'email', + 'full_name', + 'metadata', + 'password', + 'password_hash', + 'roles', + 'enabled' + ], + query: [ + 'refresh' + ] + }, + 'security.query_api_keys': { + path: [], + body: [ + 'aggregations', + 'aggs', + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_limited_by', + 'with_profile_uid', + 'typed_keys' + ] + }, + 'security.query_role': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [] + }, + 'security.query_user': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_profile_uid' + ] + }, + 'security.saml_authenticate': { + path: [], + body: [ + 'content', + 'ids', + 'realm' + ], + query: [] + }, + 'security.saml_complete_logout': { + path: [], + body: [ + 'realm', + 'ids', + 'query_string', + 'content' + ], + query: [] + }, + 'security.saml_invalidate': { + path: [], + body: [ + 'acs', + 'query_string', + 'realm' + ], + query: [] + }, + 'security.saml_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.saml_prepare_authentication': { + path: [], + body: [ + 'acs', + 'realm', + 'relay_state' + ], + query: [] + }, + 'security.saml_service_provider_metadata': { + path: [ + 'realm_name' + ], + body: [], + query: [] + }, + 'security.suggest_user_profiles': { + path: [], + body: [ + 'name', + 'size', + 'data', + 'hint' + ], + query: [ + 'data' + ] + }, + 'security.update_api_key': { + path: [ + 'id' + ], + body: [ + 'role_descriptors', + 'metadata', + 'expiration' + ], + query: [] + }, + 'security.update_cross_cluster_api_key': { + path: [ + 'id' + ], + body: [ + 'access', + 'expiration', + 'metadata' + ], + query: [] + }, + 'security.update_settings': { + path: [], + body: [ + 'security', + 'security-profile', + 'security-tokens' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'security.update_user_profile_data': { + path: [ + 'uid' + ], + body: [ + 'labels', + 'data' + ], + query: [ + 'if_seq_no', + 'if_primary_term', + 'refresh' + ] + } + } } /** @@ -51,8 +687,12 @@ export default class Security { async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.activate_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +714,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -95,7 +741,10 @@ export default class Security { async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,8 +784,12 @@ export default class Security { async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['names'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_delete_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -158,8 +811,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -179,8 +838,12 @@ export default class Security { async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['roles'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_put_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -202,8 +865,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -223,8 +892,12 @@ export default class Security { async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_update_api_keys'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -246,8 +919,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -267,8 +946,12 @@ export default class Security { async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const acceptedBody: string[] = ['password', 'password_hash'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.change_password'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -291,8 +974,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -322,7 +1011,10 @@ export default class Security { async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ids'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_api_key_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -364,7 +1056,10 @@ export default class Security { async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -406,7 +1101,10 @@ export default class Security { async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realms'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_realms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -448,7 +1146,10 @@ export default class Security { async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_roles'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -490,7 +1191,10 @@ export default class Security { async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_service_tokens'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -534,8 +1238,12 @@ export default class Security { async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -558,8 +1266,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -579,8 +1293,12 @@ export default class Security { async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_cross_cluster_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -602,8 +1320,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -623,7 +1347,10 @@ export default class Security { async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.create_service_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -674,8 +1401,12 @@ export default class Security { async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['x509_certificate_chain'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.delegate_pki'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -697,8 +1428,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -718,7 +1455,10 @@ export default class Security { async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -761,7 +1501,10 @@ export default class Security { async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -803,7 +1546,10 @@ export default class Security { async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -845,7 +1591,10 @@ export default class Security { async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_service_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -889,7 +1638,10 @@ export default class Security { async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -931,7 +1683,10 @@ export default class Security { async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -973,7 +1728,10 @@ export default class Security { async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1015,7 +1773,10 @@ export default class Security { async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1057,7 +1818,10 @@ export default class Security { async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1099,7 +1863,10 @@ export default class Security { async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_kibana'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1139,7 +1906,10 @@ export default class Security { async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1179,7 +1949,10 @@ export default class Security { async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1219,7 +1992,10 @@ export default class Security { async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_builtin_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1259,7 +2035,10 @@ export default class Security { async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1313,7 +2092,10 @@ export default class Security { async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1363,7 +2145,10 @@ export default class Security { async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1413,7 +2198,10 @@ export default class Security { async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_accounts'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1467,7 +2255,10 @@ export default class Security { async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_credentials'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1510,7 +2301,10 @@ export default class Security { async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1550,8 +2344,12 @@ export default class Security { async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.get_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1574,8 +2372,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1595,7 +2399,10 @@ export default class Security { async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1645,7 +2452,10 @@ export default class Security { async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1685,7 +2495,10 @@ export default class Security { async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1727,8 +2540,12 @@ export default class Security { async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.grant_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1750,8 +2567,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1771,8 +2594,12 @@ export default class Security { async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['user'] - const acceptedBody: string[] = ['application', 'cluster', 'index'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1795,8 +2622,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1826,8 +2659,12 @@ export default class Security { async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['uids', 'privileges'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1849,8 +2686,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1870,8 +2713,12 @@ export default class Security { async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1894,8 +2741,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1915,8 +2768,12 @@ export default class Security { async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1939,8 +2796,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1960,8 +2823,12 @@ export default class Security { async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1983,8 +2850,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2004,8 +2877,12 @@ export default class Security { async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access_token', 'refresh_token'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2027,8 +2904,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2048,8 +2931,12 @@ export default class Security { async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_prepare_authentication'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2072,8 +2959,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2093,8 +2986,12 @@ export default class Security { async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['privileges'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2106,8 +3003,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2127,8 +3030,12 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2150,8 +3057,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2174,8 +3087,12 @@ export default class Security { async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2197,8 +3114,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2221,8 +3144,12 @@ export default class Security { async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2244,8 +3171,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2268,8 +3201,12 @@ export default class Security { async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_api_keys'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2292,8 +3229,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2313,8 +3256,12 @@ export default class Security { async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2337,8 +3284,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2358,8 +3311,12 @@ export default class Security { async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2382,8 +3339,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2403,8 +3366,12 @@ export default class Security { async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['content', 'ids', 'realm'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2426,8 +3393,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2447,8 +3420,12 @@ export default class Security { async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_complete_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2470,8 +3447,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2491,8 +3474,12 @@ export default class Security { async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'query_string', 'realm'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_invalidate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2514,8 +3501,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2535,8 +3528,12 @@ export default class Security { async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2558,8 +3555,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2579,8 +3582,12 @@ export default class Security { async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_prepare_authentication'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2603,8 +3610,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2624,7 +3637,10 @@ export default class Security { async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realm_name'] + const { + path: acceptedPath + } = this.acceptedParams['security.saml_service_provider_metadata'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2666,8 +3682,12 @@ export default class Security { async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.suggest_user_profiles'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2690,8 +3710,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2711,8 +3737,12 @@ export default class Security { async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2734,8 +3764,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2758,8 +3794,12 @@ export default class Security { async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['access', 'expiration', 'metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_cross_cluster_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2781,8 +3821,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2805,8 +3851,12 @@ export default class Security { async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['security', 'security-profile', 'security-tokens'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2829,8 +3879,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2850,8 +3906,12 @@ export default class Security { async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] - const acceptedBody: string[] = ['labels', 'data'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_user_profile_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2873,8 +3933,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index ffa3b9c39..1cac3a03b 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -35,12 +35,55 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Shutdown { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'shutdown.delete_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'shutdown.get_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'shutdown.put_node': { + path: [ + 'node_id' + ], + body: [ + 'type', + 'reason', + 'allocation_delay', + 'target_node_name' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +94,10 @@ export default class Shutdown { async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['shutdown.delete_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +139,10 @@ export default class Shutdown { async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['shutdown.get_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,8 +192,12 @@ export default class Shutdown { async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['shutdown.put_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -166,8 +219,14 @@ export default class Shutdown { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index ba1689505..c1b3fc539 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -35,12 +35,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Simulate { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'simulate.ingest': { + path: [ + 'index' + ], + body: [ + 'docs', + 'component_template_substitutions', + 'index_template_subtitutions', + 'mapping_addition', + 'pipeline_substitutions' + ], + query: [ + 'pipeline' + ] + } + } } /** @@ -51,8 +75,12 @@ export default class Simulate { async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['simulate.ingest'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +102,14 @@ export default class Simulate { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 9e6a856f9..79a2f6f20 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -35,12 +35,107 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Slm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'slm.delete_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_retention': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_status': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.put_lifecycle': { + path: [ + 'policy_id' + ], + body: [ + 'config', + 'name', + 'repository', + 'retention', + 'schedule' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +146,10 @@ export default class Slm { async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.delete_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +191,10 @@ export default class Slm { async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +236,10 @@ export default class Slm { async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_retention'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,7 +279,10 @@ export default class Slm { async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -225,7 +332,10 @@ export default class Slm { async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -265,7 +375,10 @@ export default class Slm { async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -305,8 +418,12 @@ export default class Slm { async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] - const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['slm.put_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -328,8 +445,14 @@ export default class Slm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -352,7 +475,10 @@ export default class Slm { async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -392,7 +518,10 @@ export default class Slm { async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 3b37c9bdb..6b5ea2848 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -35,12 +35,208 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Snapshot { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'snapshot.cleanup_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.clone': { + path: [ + 'repository', + 'snapshot', + 'target_snapshot' + ], + body: [ + 'indices' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.create': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'expand_wildcards', + 'feature_states', + 'ignore_unavailable', + 'include_global_state', + 'indices', + 'metadata', + 'partial' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.create_repository': { + path: [ + 'name' + ], + body: [ + 'repository' + ], + query: [ + 'master_timeout', + 'timeout', + 'verify' + ] + }, + 'snapshot.delete': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'snapshot.delete_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.get': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'after', + 'from_sort_value', + 'ignore_unavailable', + 'index_details', + 'index_names', + 'include_repository', + 'master_timeout', + 'order', + 'offset', + 'size', + 'slm_policy_filter', + 'sort', + 'verbose' + ] + }, + 'snapshot.get_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'snapshot.repository_analyze': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_count', + 'concurrency', + 'detailed', + 'early_read_node_count', + 'max_blob_size', + 'max_total_data_size', + 'rare_action_probability', + 'rarely_abort_writes', + 'read_node_count', + 'register_operation_count', + 'seed', + 'timeout' + ] + }, + 'snapshot.repository_verify_integrity': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_thread_pool_concurrency', + 'index_snapshot_verification_concurrency', + 'index_verification_concurrency', + 'max_bytes_per_sec', + 'max_failed_shard_snapshots', + 'meta_thread_pool_concurrency', + 'snapshot_verification_concurrency', + 'verify_blob_contents' + ] + }, + 'snapshot.restore': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'feature_states', + 'ignore_index_settings', + 'ignore_unavailable', + 'include_aliases', + 'include_global_state', + 'index_settings', + 'indices', + 'partial', + 'rename_pattern', + 'rename_replacement' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.status': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'snapshot.verify_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +247,10 @@ export default class Snapshot { async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.cleanup_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,8 +292,12 @@ export default class Snapshot { async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] - const acceptedBody: string[] = ['indices'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.clone'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +319,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -142,8 +351,12 @@ export default class Snapshot { async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['expand_wildcards', 'feature_states', 'ignore_unavailable', 'include_global_state', 'indices', 'metadata', 'partial'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,8 +378,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -190,8 +409,12 @@ export default class Snapshot { async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['repository'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -203,8 +426,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -227,7 +456,10 @@ export default class Snapshot { async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -270,7 +502,10 @@ export default class Snapshot { async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -312,7 +547,10 @@ export default class Snapshot { async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -355,7 +593,10 @@ export default class Snapshot { async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -405,7 +646,10 @@ export default class Snapshot { async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_analyze'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -447,7 +691,10 @@ export default class Snapshot { async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_verify_integrity'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -489,8 +736,12 @@ export default class Snapshot { async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.restore'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -512,8 +763,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -537,7 +794,10 @@ export default class Snapshot { async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -591,7 +851,10 @@ export default class Snapshot { async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.verify_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 871cb7139..60478c7f7 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -35,12 +35,89 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Sql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'sql.clear_cursor': { + path: [], + body: [ + 'cursor' + ], + query: [] + }, + 'sql.delete_async': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.get_async': { + path: [ + 'id' + ], + body: [], + query: [ + 'delimiter', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'sql.get_async_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.query': { + path: [], + body: [ + 'allow_partial_search_results', + 'catalog', + 'columnar', + 'cursor', + 'fetch_size', + 'field_multi_value_leniency', + 'filter', + 'index_using_frozen', + 'keep_alive', + 'keep_on_completion', + 'page_timeout', + 'params', + 'query', + 'request_timeout', + 'runtime_mappings', + 'time_zone', + 'wait_for_completion_timeout' + ], + query: [ + 'format' + ] + }, + 'sql.translate': { + path: [], + body: [ + 'fetch_size', + 'filter', + 'query', + 'time_zone' + ], + query: [] + } + } } /** @@ -51,8 +128,12 @@ export default class Sql { async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['cursor'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.clear_cursor'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +155,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -95,7 +182,10 @@ export default class Sql { async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.delete_async'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,7 +227,10 @@ export default class Sql { async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,7 +272,10 @@ export default class Sql { async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,8 +317,12 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,8 +345,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -266,8 +372,12 @@ export default class Sql { async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.translate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -289,8 +399,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 29f25f090..6197e6805 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -35,12 +35,24 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Ssl { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ssl.certificates': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +63,10 @@ export default class Ssl { async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ssl.certificates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 379510816..125d5301f 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -35,12 +35,81 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Synonyms { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'synonyms.delete_synonym': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'synonyms.delete_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonym': { + path: [ + 'id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.get_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonyms_sets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.put_synonym': { + path: [ + 'id' + ], + body: [ + 'synonyms_set' + ], + query: [] + }, + 'synonyms.put_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [ + 'synonyms' + ], + query: [] + } + } } /** @@ -51,7 +120,10 @@ export default class Synonyms { async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +165,10 @@ export default class Synonyms { async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -136,7 +211,10 @@ export default class Synonyms { async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -178,7 +256,10 @@ export default class Synonyms { async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,7 +302,10 @@ export default class Synonyms { async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonyms_sets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -261,8 +345,12 @@ export default class Synonyms { async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['synonyms_set'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,8 +372,14 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -308,8 +402,12 @@ export default class Synonyms { async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] - const acceptedBody: string[] = ['synonyms'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -331,8 +429,14 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index a8f7ccf20..c5da070be 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -35,12 +35,54 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Tasks { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'tasks.cancel': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'actions', + 'nodes', + 'parent_task_id', + 'wait_for_completion' + ] + }, + 'tasks.get': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + }, + 'tasks.list': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'group_by', + 'nodes', + 'parent_task_id', + 'timeout', + 'wait_for_completion' + ] + } + } } /** @@ -51,7 +93,10 @@ export default class Tasks { async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = this.acceptedParams['tasks.cancel'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +146,10 @@ export default class Tasks { async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = this.acceptedParams['tasks.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,7 +191,10 @@ export default class Tasks { async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['tasks.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index ad9fa1e0e..38a264ef8 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -35,7 +35,30 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + terms_enum: { + path: [ + 'index' + ], + body: [ + 'field', + 'size', + 'timeout', + 'case_insensitive', + 'index_filter', + 'string', + 'search_after' + ], + query: [] + } +} /** * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. @@ -45,8 +68,12 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.terms_enum + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +95,14 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index c3f461487..331e9fe69 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -35,7 +35,39 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + termvectors: { + path: [ + 'index', + 'id' + ], + body: [ + 'doc', + 'filter', + 'per_field_analyzer' + ], + query: [ + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. @@ -45,8 +77,12 @@ export default async function TermvectorsApi (this: That, p export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'id'] - const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.termvectors + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +104,14 @@ export default async function TermvectorsApi (this: That, p } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index fd245e577..7d18c5f3b 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -35,12 +35,93 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class TextStructure { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'text_structure.find_field_structure': { + path: [], + body: [], + query: [ + 'column_names', + 'delimiter', + 'documents_to_sample', + 'ecs_compatibility', + 'explain', + 'field', + 'format', + 'grok_pattern', + 'index', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_message_structure': { + path: [], + body: [ + 'messages' + ], + query: [ + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_structure': { + path: [], + body: [ + 'text_files' + ], + query: [ + 'charset', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'has_header_row', + 'line_merge_size_limit', + 'lines_to_sample', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.test_grok_pattern': { + path: [], + body: [ + 'grok_pattern', + 'text' + ], + query: [ + 'ecs_compatibility' + ] + } + } } /** @@ -51,7 +132,10 @@ export default class TextStructure { async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['text_structure.find_field_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -90,8 +174,12 @@ export default class TextStructure { async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['messages'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_message_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -113,8 +201,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -134,8 +228,12 @@ export default class TextStructure { async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['text_files'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -147,8 +245,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -168,8 +272,12 @@ export default class TextStructure { async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grok_pattern', 'text'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.test_grok_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,8 +299,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 4872de3e1..4e168f30b 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -35,12 +35,170 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Transform { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'transform.delete_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'delete_dest_index', + 'timeout' + ] + }, + 'transform.get_node_stats': { + path: [], + body: [], + query: [] + }, + 'transform.get_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'transform.get_transform_stats': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'timeout' + ] + }, + 'transform.preview_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'pivot', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'latest' + ], + query: [ + 'timeout' + ] + }, + 'transform.put_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'latest', + '_meta', + 'pivot', + 'retention_policy', + 'settings', + 'source', + 'sync' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.reset_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'transform.schedule_now_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'transform.start_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout', + 'from' + ] + }, + 'transform.stop_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout', + 'wait_for_checkpoint', + 'wait_for_completion' + ] + }, + 'transform.update_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + '_meta', + 'source', + 'settings', + 'sync', + 'retention_policy' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.upgrade_transforms': { + path: [], + body: [], + query: [ + 'dry_run', + 'timeout' + ] + } + } } /** @@ -51,7 +209,10 @@ export default class Transform { async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.delete_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +254,10 @@ export default class Transform { async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_node_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -132,7 +296,10 @@ export default class Transform { async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -182,7 +349,10 @@ export default class Transform { async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -224,8 +394,12 @@ export default class Transform { async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.preview_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -248,8 +422,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -279,8 +459,12 @@ export default class Transform { async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.put_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -302,8 +486,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -326,7 +516,10 @@ export default class Transform { async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.reset_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -368,7 +561,10 @@ export default class Transform { async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.schedule_now_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -410,7 +606,10 @@ export default class Transform { async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.start_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -452,7 +651,10 @@ export default class Transform { async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.stop_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -494,8 +696,12 @@ export default class Transform { async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.update_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -517,8 +723,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -541,7 +753,10 @@ export default class Transform { async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['transform.upgrade_transforms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 06d06ae63..64419582a 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -35,7 +35,45 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + update: { + path: [ + 'id', + 'index' + ], + body: [ + 'detect_noop', + 'doc', + 'doc_as_upsert', + 'script', + 'scripted_upsert', + '_source', + 'upsert' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'lang', + 'refresh', + 'require_alias', + 'retry_on_conflict', + 'routing', + 'timeout', + 'wait_for_active_shards', + '_source', + '_source_excludes', + '_source_includes' + ] + } +} /** * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). @@ -45,8 +83,12 @@ export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +110,14 @@ export default async function UpdateApi = { + update_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'script', + 'slice', + 'conflicts' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'pipeline', + 'preference', + 'q', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. @@ -45,8 +98,12 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update_by_query + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +125,14 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index eb96ad0ed..9572be0f7 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -35,7 +35,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + update_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. @@ -45,7 +60,10 @@ export default async function UpdateByQueryRethrottleApi (this: That, params: T. export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.update_by_query_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 7e795d62b..b1956dfce 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -35,12 +35,148 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Watcher { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'watcher.ack_watch': { + path: [ + 'watch_id', + 'action_id' + ], + body: [], + query: [] + }, + 'watcher.activate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.deactivate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.delete_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.execute_watch': { + path: [ + 'id' + ], + body: [ + 'action_modes', + 'alternative_input', + 'ignore_condition', + 'record_execution', + 'simulated_actions', + 'trigger_data', + 'watch' + ], + query: [ + 'debug' + ] + }, + 'watcher.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.get_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.put_watch': { + path: [ + 'id' + ], + body: [ + 'actions', + 'condition', + 'input', + 'metadata', + 'throttle_period', + 'throttle_period_in_millis', + 'transform', + 'trigger' + ], + query: [ + 'active', + 'if_primary_term', + 'if_seq_no', + 'version' + ] + }, + 'watcher.query_watches': { + path: [], + body: [ + 'from', + 'size', + 'query', + 'sort', + 'search_after' + ], + query: [] + }, + 'watcher.start': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.stats': { + path: [ + 'metric' + ], + body: [], + query: [ + 'emit_stacktraces', + 'metric' + ] + }, + 'watcher.stop': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.update_settings': { + path: [], + body: [ + 'index.auto_expand_replicas', + 'index.number_of_replicas' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +187,10 @@ export default class Watcher { async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id', 'action_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.ack_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +240,10 @@ export default class Watcher { async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.activate_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,7 +285,10 @@ export default class Watcher { async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.deactivate_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,7 +330,10 @@ export default class Watcher { async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.delete_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -227,8 +375,12 @@ export default class Watcher { async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.execute_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,8 +403,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -282,7 +440,10 @@ export default class Watcher { async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -322,7 +483,10 @@ export default class Watcher { async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -364,8 +528,12 @@ export default class Watcher { async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.put_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -387,8 +555,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -411,8 +585,12 @@ export default class Watcher { async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.query_watches'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -435,8 +613,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -456,7 +640,10 @@ export default class Watcher { async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -496,7 +683,10 @@ export default class Watcher { async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -546,7 +736,10 @@ export default class Watcher { async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -586,8 +779,12 @@ export default class Watcher { async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.update_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -610,8 +807,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 9e6a66f7b..084fa20ec 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -35,12 +35,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Xpack { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'xpack.info': { + path: [], + body: [], + query: [ + 'categories', + 'accept_enterprise', + 'human' + ] + }, + 'xpack.usage': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +74,10 @@ export default class Xpack { async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['xpack.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +117,10 @@ export default class Xpack { async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['xpack.usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/types.ts b/src/api/types.ts index e242e803c..668948f05 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2775,7 +2775,6 @@ export interface BulkIndexByScrollFailure { id: Id index: IndexName status: integer - type: string } export interface BulkStats { @@ -6103,7 +6102,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { export interface MappingDynamicTemplate { mapping?: MappingProperty - runtime?: MappingProperty + runtime?: MappingRuntimeField match?: string | string[] path_match?: string | string[] unmatch?: string | string[] @@ -12800,7 +12799,7 @@ export interface IndicesIndexSettingsKeys { routing_partition_size?: SpecUtilsStringified load_fixed_bitset_filters_eagerly?: boolean hidden?: boolean | string - auto_expand_replicas?: string + auto_expand_replicas?: SpecUtilsWithNullValue merge?: IndicesMerge search?: IndicesSettingsSearch refresh_interval?: Duration @@ -13097,7 +13096,7 @@ export interface IndicesSoftDeletes { retention_lease?: IndicesRetentionLease } -export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC' +export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' export interface IndicesStorage { type: IndicesStorageType @@ -14275,7 +14274,7 @@ export interface IndicesPutMappingRequest extends RequestBase { /** If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. */ dynamic_date_formats?: string[] /** Specify dynamic templates for the mapping. */ - dynamic_templates?: Record | Record[] + dynamic_templates?: Record[] /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField /** A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. */ @@ -21364,6 +21363,10 @@ export interface SecurityRemoteIndicesPrivileges { allow_restricted_indices?: boolean } +export interface SecurityRemoteUserIndicesPrivileges extends SecurityUserIndicesPrivileges { + clusters: string[] +} + export interface SecurityReplicationAccess { names: IndexName | IndexName[] allow_restricted_indices?: boolean @@ -22051,7 +22054,8 @@ export interface SecurityGetRoleRole { remote_indices?: SecurityRemoteIndicesPrivileges[] remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata - run_as: string[] + description?: string + run_as?: string[] transient_metadata?: Record applications: SecurityApplicationPrivileges[] role_templates?: SecurityRoleTemplate[] @@ -22204,8 +22208,10 @@ export interface SecurityGetUserPrivilegesRequest extends RequestBase { export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[] cluster: string[] + remote_cluster?: SecurityRemoteClusterPrivileges[] global: SecurityGlobalPrivilege[] indices: SecurityUserIndicesPrivileges[] + remote_indices?: SecurityRemoteUserIndicesPrivileges[] run_as: string[] } From a22c4622d9c56dc06e9a35a513ade104c6012625 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 26 Feb 2025 12:37:46 -0600 Subject: [PATCH 482/647] Bump to 9.0.0-alpha.4 (#2637) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 377ba7b6b..0960ee05b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.3", + "version": "9.0.0-alpha.4", "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", From ac231c859ebb0ab72a86fe44186af6c7cb630c2b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 26 Feb 2025 13:04:18 -0600 Subject: [PATCH 483/647] Improve npm publish version parsing (#2638) --- .github/workflows/npm-publish.yml | 35 ++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 6040880b4..8994a003b 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -23,19 +23,38 @@ jobs: - run: npm install -g npm - run: npm install - run: npm test - - run: npm publish --provenance --access public --tag alpha + - name: npm publish + run: | + version=$(jq -r .version package.json) + tag_meta=$(echo "$version" | cut -s -d '-' -f2) + if [[ -z "$tag_meta" ]]; then + npm publish --provenance --access public + else + tag=$(echo "$tag_meta" | cut -d '.' -f1) + npm publish --provenance --access public --tag "$tag" + fi env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - name: Publish version on GitHub run: | version=$(jq -r .version package.json) - gh release create \ - -n "This is a 9.0.0 pre-release alpha. Changes may not be stable." \ - --latest=false \ - --prerelease \ - --target "$BRANCH_NAME" \ - --title "v$version" \ - "v$version" + tag_meta=$(echo "$version" | cut -s -d '-' -f2) + if [[ -z "$tag_meta" ]]; then + gh release create \ + -n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)" + --target "$BRANCH_NAME" \ + --title "v$version" \ + "v$version" + else + tag_main=$(echo "$version" | cut -d '-' -f1) + gh release create \ + -n "This is a $tag_main pre-release. Changes may not be stable." \ + --latest=false \ + --prerelease \ + --target "$BRANCH_NAME" \ + --title "v$version" \ + "v$version" + fi env: BRANCH_NAME: ${{ github.event.inputs.branch }} GH_TOKEN: ${{ github.token }} From 3e5e568c071b56de48c508296d72050884be6331 Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Thu, 27 Feb 2025 11:51:14 -0600 Subject: [PATCH 484/647] [docs] Migrate docs from AsciiDoc to Markdown (#2635) * delete asciidoc files * add migrated files * Apply suggestions from review Co-authored-by: Josh Mock * Apply suggestions from review Co-authored-by: Josh Mock * add the new ci checks (#2634) --------- Co-authored-by: Marci W <333176+marciw@users.noreply.github.com> Co-authored-by: Josh Mock --- .github/workflows/npm-publish.yml | 35 +- docs/basic-config.asciidoc | 269 - docs/changelog.asciidoc | 966 - docs/child.asciidoc | 35 - docs/configuration.asciidoc | 12 - docs/connecting.asciidoc | 738 - docs/docset.yml | 488 + docs/examples/index.asciidoc | 34 - docs/getting-started.asciidoc | 170 - docs/helpers.asciidoc | 748 - docs/index.asciidoc | 24 - docs/installation.asciidoc | 116 - docs/integrations.asciidoc | 8 - docs/redirects.asciidoc | 17 - docs/reference.asciidoc | 15988 ---------------- .../advanced-config.md} | 104 +- docs/reference/api-reference.md | 14377 ++++++++++++++ .../as_stream_examples.md} | 28 +- docs/reference/basic-config.md | 51 + .../bulk_examples.md} | 22 +- docs/reference/child.md | 34 + docs/reference/client-helpers.md | 532 + docs/reference/client-testing.md | 121 + docs/reference/configuration.md | 19 + docs/reference/connecting.md | 524 + docs/reference/examples.md | 38 + .../exists_examples.md} | 19 +- .../get_examples.md} | 18 +- docs/reference/getting-started.md | 154 + .../ignore_examples.md} | 14 +- .../index.md} | 62 +- docs/reference/installation.md | 65 + docs/reference/integrations.md | 16 + .../msearch_examples.md} | 17 +- .../observability.md} | 250 +- .../reindex_examples.md} | 23 +- .../scroll_examples.md} | 48 +- .../search_examples.md} | 20 +- .../sql_query_examples.md} | 25 +- .../suggest_examples.md} | 19 +- docs/reference/toc.yml | 34 + docs/reference/transport.md | 53 + .../transport_request_examples.md} | 32 +- .../typescript.md} | 46 +- .../update_by_query_examples.md} | 17 +- .../update_examples.md} | 24 +- docs/release-notes/breaking-changes.md | 28 + docs/release-notes/deprecations.md | 28 + docs/release-notes/index.md | 27 + docs/release-notes/known-issues.md | 20 + docs/release-notes/toc.yml | 5 + docs/testing.asciidoc | 158 - docs/timeout-best-practices.asciidoc | 8 - docs/transport.asciidoc | 73 - package.json | 2 +- src/api/api/async_search.ts | 156 +- src/api/api/autoscaling.ts | 82 +- src/api/api/bulk.ts | 50 +- src/api/api/capabilities.ts | 18 +- src/api/api/cat.ts | 456 +- src/api/api/ccr.ts | 292 +- src/api/api/clear_scroll.ts | 35 +- src/api/api/close_point_in_time.ts | 35 +- src/api/api/cluster.ts | 324 +- src/api/api/connector.ts | 762 +- src/api/api/count.ts | 52 +- src/api/api/create.ts | 47 +- src/api/api/dangling_indices.ts | 51 +- src/api/api/delete.ts | 30 +- src/api/api/delete_by_query.ts | 69 +- src/api/api/delete_by_query_rethrottle.ts | 22 +- src/api/api/delete_script.ts | 23 +- src/api/api/enrich.ts | 97 +- src/api/api/eql.ts | 102 +- src/api/api/esql.ts | 128 +- src/api/api/exists.ts | 32 +- src/api/api/exists_source.ts | 31 +- src/api/api/explain.ts | 51 +- src/api/api/features.ts | 33 +- src/api/api/field_caps.ts | 48 +- src/api/api/fleet.ts | 205 +- src/api/api/get.ts | 33 +- src/api/api/get_script.ts | 22 +- src/api/api/get_script_context.ts | 18 +- src/api/api/get_script_languages.ts | 18 +- src/api/api/get_source.ts | 32 +- src/api/api/graph.ts | 44 +- src/api/api/health_report.ts | 24 +- src/api/api/ilm.ts | 204 +- src/api/api/index.ts | 51 +- src/api/api/indices.ts | 1386 +- src/api/api/inference.ts | 193 +- src/api/api/info.ts | 18 +- src/api/api/ingest.ts | 244 +- src/api/api/knn_search.ts | 44 +- src/api/api/license.ts | 115 +- src/api/api/logstash.ts | 62 +- src/api/api/mget.ts | 48 +- src/api/api/migration.ts | 41 +- src/api/api/ml.ts | 1833 +- src/api/api/monitoring.ts | 42 +- src/api/api/msearch.ts | 51 +- src/api/api/msearch_template.ts | 43 +- src/api/api/mtermvectors.ts | 51 +- src/api/api/nodes.ts | 140 +- src/api/api/open_point_in_time.ts | 44 +- src/api/api/ping.ts | 18 +- src/api/api/profiling.ts | 49 +- src/api/api/put_script.ts | 42 +- src/api/api/query_rules.ts | 159 +- src/api/api/rank_eval.ts | 43 +- src/api/api/reindex.ts | 49 +- src/api/api/reindex_rethrottle.ts | 22 +- src/api/api/render_search_template.ts | 38 +- src/api/api/rollup.ts | 153 +- src/api/api/scripts_painless_execute.ts | 37 +- src/api/api/scroll.ts | 40 +- src/api/api/search.ts | 116 +- src/api/api/search_application.ts | 200 +- src/api/api/search_mvt.ts | 62 +- src/api/api/search_shards.ts | 28 +- src/api/api/search_template.ts | 55 +- src/api/api/searchable_snapshots.ts | 90 +- src/api/api/security.ts | 1400 +- src/api/api/shutdown.ts | 73 +- src/api/api/simulate.ts | 44 +- src/api/api/slm.ts | 155 +- src/api/api/snapshot.ts | 315 +- src/api/api/sql.ts | 148 +- src/api/api/ssl.ts | 19 +- src/api/api/synonyms.ts | 132 +- src/api/api/tasks.ts | 59 +- src/api/api/terms_enum.ts | 43 +- src/api/api/termvectors.ts | 52 +- src/api/api/text_structure.ts | 142 +- src/api/api/transform.ts | 259 +- src/api/api/update.ts | 58 +- src/api/api/update_by_query.ts | 73 +- src/api/api/update_by_query_rethrottle.ts | 22 +- src/api/api/watcher.ts | 255 +- src/api/api/xpack.ts | 35 +- src/api/types.ts | 18 +- 142 files changed, 18205 insertions(+), 31133 deletions(-) delete mode 100644 docs/basic-config.asciidoc delete mode 100644 docs/changelog.asciidoc delete mode 100644 docs/child.asciidoc delete mode 100644 docs/configuration.asciidoc delete mode 100644 docs/connecting.asciidoc create mode 100644 docs/docset.yml delete mode 100644 docs/examples/index.asciidoc delete mode 100644 docs/getting-started.asciidoc delete mode 100644 docs/helpers.asciidoc delete mode 100644 docs/index.asciidoc delete mode 100644 docs/installation.asciidoc delete mode 100644 docs/integrations.asciidoc delete mode 100644 docs/redirects.asciidoc delete mode 100644 docs/reference.asciidoc rename docs/{advanced-config.asciidoc => reference/advanced-config.md} (67%) create mode 100644 docs/reference/api-reference.md rename docs/{examples/asStream.asciidoc => reference/as_stream_examples.md} (84%) create mode 100644 docs/reference/basic-config.md rename docs/{examples/bulk.asciidoc => reference/bulk_examples.md} (83%) create mode 100644 docs/reference/child.md create mode 100644 docs/reference/client-helpers.md create mode 100644 docs/reference/client-testing.md create mode 100644 docs/reference/configuration.md create mode 100644 docs/reference/connecting.md create mode 100644 docs/reference/examples.md rename docs/{examples/exists.asciidoc => reference/exists_examples.md} (68%) rename docs/{examples/get.asciidoc => reference/get_examples.md} (64%) create mode 100644 docs/reference/getting-started.md rename docs/{examples/ignore.asciidoc => reference/ignore_examples.md} (88%) rename docs/{introduction.asciidoc => reference/index.md} (59%) create mode 100644 docs/reference/installation.md create mode 100644 docs/reference/integrations.md rename docs/{examples/msearch.asciidoc => reference/msearch_examples.md} (86%) rename docs/{observability.asciidoc => reference/observability.md} (58%) rename docs/{examples/reindex.asciidoc => reference/reindex_examples.md} (73%) rename docs/{examples/scroll.asciidoc => reference/scroll_examples.md} (78%) rename docs/{examples/search.asciidoc => reference/search_examples.md} (70%) rename docs/{examples/sql.query.asciidoc => reference/sql_query_examples.md} (62%) rename docs/{examples/suggest.asciidoc => reference/suggest_examples.md} (78%) create mode 100644 docs/reference/toc.yml create mode 100644 docs/reference/transport.md rename docs/{examples/transport.request.asciidoc => reference/transport_request_examples.md} (57%) rename docs/{typescript.asciidoc => reference/typescript.md} (64%) rename docs/{examples/update_by_query.asciidoc => reference/update_by_query_examples.md} (75%) rename docs/{examples/update.asciidoc => reference/update_examples.md} (81%) create mode 100644 docs/release-notes/breaking-changes.md create mode 100644 docs/release-notes/deprecations.md create mode 100644 docs/release-notes/index.md create mode 100644 docs/release-notes/known-issues.md create mode 100644 docs/release-notes/toc.yml delete mode 100644 docs/testing.asciidoc delete mode 100644 docs/timeout-best-practices.asciidoc delete mode 100644 docs/transport.asciidoc diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 8994a003b..6040880b4 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -23,38 +23,19 @@ jobs: - run: npm install -g npm - run: npm install - run: npm test - - name: npm publish - run: | - version=$(jq -r .version package.json) - tag_meta=$(echo "$version" | cut -s -d '-' -f2) - if [[ -z "$tag_meta" ]]; then - npm publish --provenance --access public - else - tag=$(echo "$tag_meta" | cut -d '.' -f1) - npm publish --provenance --access public --tag "$tag" - fi + - run: npm publish --provenance --access public --tag alpha env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - name: Publish version on GitHub run: | version=$(jq -r .version package.json) - tag_meta=$(echo "$version" | cut -s -d '-' -f2) - if [[ -z "$tag_meta" ]]; then - gh release create \ - -n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)" - --target "$BRANCH_NAME" \ - --title "v$version" \ - "v$version" - else - tag_main=$(echo "$version" | cut -d '-' -f1) - gh release create \ - -n "This is a $tag_main pre-release. Changes may not be stable." \ - --latest=false \ - --prerelease \ - --target "$BRANCH_NAME" \ - --title "v$version" \ - "v$version" - fi + gh release create \ + -n "This is a 9.0.0 pre-release alpha. Changes may not be stable." \ + --latest=false \ + --prerelease \ + --target "$BRANCH_NAME" \ + --title "v$version" \ + "v$version" env: BRANCH_NAME: ${{ github.event.inputs.branch }} GH_TOKEN: ${{ github.token }} diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc deleted file mode 100644 index 0d4a0a73d..000000000 --- a/docs/basic-config.asciidoc +++ /dev/null @@ -1,269 +0,0 @@ -[[basic-config]] -=== Basic configuration - -This page shows you the possible basic configuration options that the clients -offers. - - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' }, - maxRetries: 5, - sniffOnStart: true -}) ----- - - -[cols=2*] -|=== -|`node` or `nodes` -a|The Elasticsearch endpoint to use. + -It can be a single string or an array of strings: -[source,js] ----- -node: 'http://localhost:9200/' ----- -Or it can be an object (or an array of objects) that represents the node: -[source,js] ----- -node: { - url: new URL('http://localhost:9200/'), - tls: 'tls options', - agent: 'http agent options', - id: 'custom node id', - headers: { 'custom': 'headers' } - roles: { - master: true, - data: true, - ingest: true, - ml: false - } -} ----- - -|`auth` -a|Your authentication data. You can use both basic authentication and -{ref}/security-api-create-api-key.html[ApiKey]. + -See <> for more details. + -_Default:_ `null` - -Basic authentication: -[source,js] ----- -auth: { - username: 'elastic', - password: 'changeme' -} ----- -{ref}/security-api-create-api-key.html[ApiKey] authentication: -[source,js] ----- -auth: { - apiKey: 'base64EncodedKey' -} ----- -Bearer authentication, useful for https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html[service account tokens]. Be aware that it does not handle automatic token refresh: -[source,js] ----- -auth: { - bearer: 'token' -} ----- - - -|`maxRetries` -|`number` - Max number of retries for each request. + -_Default:_ `3` - -|`requestTimeout` -|`number` - Max request timeout in milliseconds for each request. + -_Default:_ No value - -|`pingTimeout` -|`number` - Max ping request timeout in milliseconds for each request. + -_Default:_ `3000` - -|`sniffInterval` -|`number, boolean` - Perform a sniff operation every `n` milliseconds. Sniffing might not be the best solution for you, take a look https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[here] to know more. + -_Default:_ `false` - -|`sniffOnStart` -|`boolean` - Perform a sniff once the client is started. Sniffing might not be the best solution for you, take a look https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[here] to know more. + -_Default:_ `false` - -|`sniffEndpoint` -|`string` - Endpoint to ping during a sniff. + -_Default:_ `'_nodes/_all/http'` - -|`sniffOnConnectionFault` -|`boolean` - Perform a sniff on connection fault. Sniffing might not be the best solution for you, take a look https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[here] to know more. + -_Default:_ `false` - -|`resurrectStrategy` -|`string` - Configure the node resurrection strategy. + -_Options:_ `'ping'`, `'optimistic'`, `'none'` + -_Default:_ `'ping'` - -|`suggestCompression` -|`boolean` - Adds `accept-encoding` header to every request. + -_Default:_ `false` - -|`compression` -|`string, boolean` - Enables gzip request body compression. + -_Options:_ `'gzip'`, `false` + -_Default:_ `false` - -|`tls` -|`http.SecureContextOptions` - tls https://nodejs.org/api/tls.html[configuraton]. + -_Default:_ `null` - -|`proxy` -a|`string, URL` - If you are using an http(s) proxy, you can put its url here. -The client will automatically handle the connection to it. + -_Default:_ `null` -[source,js] ----- -const client = new Client({ - node: 'http://localhost:9200/', - proxy: 'http://localhost:8080/' -}) - -// Proxy with basic authentication -const client = new Client({ - node: 'http://localhost:9200/', - proxy: 'http://user:pwd@localhost:8080/' -}) ----- - -|`agent` -a|`http.AgentOptions, function` - http agent https://nodejs.org/api/http.html#http_new_agent_options[options], -or a function that returns an actual http agent instance. If you want to disable the http agent use entirely -(and disable the `keep-alive` feature), set the agent to `false`. + -_Default:_ `null` -[source,js] ----- -const client = new Client({ - node: 'http://localhost:9200/', - agent: { agent: 'options' } -}) - -const client = new Client({ - node: 'http://localhost:9200/', - // the function takes as parameter the option - // object passed to the Connection constructor - agent: (opts) => new CustomAgent() -}) - -const client = new Client({ - node: 'http://localhost:9200/', - // Disable agent and keep-alive - agent: false -}) ----- - -|`nodeFilter` -a|`function` - Filters which node not to use for a request. + -_Default:_ -[source,js] ----- -function defaultNodeFilter (node) { - // avoid master only nodes - if (node.roles.master === true && - node.roles.data === false && - node.roles.ingest === false) { - return false - } - return true -} ----- - -|`nodeSelector` -a|`function` - custom selection strategy. + -_Options:_ `'round-robin'`, `'random'`, custom function + -_Default:_ `'round-robin'` + -_Custom function example:_ -[source,js] ----- -function nodeSelector (connections) { - const index = calculateIndex() - return connections[index] -} ----- - -|`generateRequestId` -a|`function` - function to generate the request id for every request, it takes -two parameters, the request parameters and options. + -By default it generates an incremental integer for every request. + -_Custom function example:_ -[source,js] ----- -function generateRequestId (params, options) { - // your id generation logic - // must be syncronous - return 'id' -} ----- - -|`name` -|`string, symbol` - The name to identify the client instance in the events. + -_Default:_ `elasticsearch-js` - -|`opaqueIdPrefix` -|`string` - A string that will be use to prefix any `X-Opaque-Id` header. + -See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html#_x-opaque-id_support[`X-Opaque-Id` support] for more details. + -_Default:_ `null` - -|`headers` -|`object` - A set of custom headers to send in every request. + -_Default:_ `{}` - -|`context` -|`object` - A custom object that you can use for observability in your events. -It will be merged with the API level context option. + -_Default:_ `null` - -|`enableMetaHeader` -|`boolean` - If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data, -such as the client and platform version. + -_Default:_ `true` - -|`cloud` -a|`object` - Custom configuration for connecting to -https://cloud.elastic.co[Elastic Cloud]. See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] -for more details. + -_Default:_ `null` + -_Cloud configuration example:_ -[source,js] ----- -const client = new Client({ - cloud: { - id: '' - }, - auth: { - username: 'elastic', - password: 'changeme' - } -}) ----- - -|`disablePrototypePoisoningProtection` -|`boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. Read https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08[this article] to learn more about this security concern. If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. Read the `secure-json-parse` https://github.com/fastify/secure-json-parse[documentation] to learn more. + -_Default:_ `true` - -|`caFingerprint` -|`string` - If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. + -_Default:_ `null` - -|`maxResponseSize` -|`number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENGTH + -_Default:_ `null` - -|`maxCompressedResponseSize` -|`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENGTH + -_Default:_ `null` - -|=== diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc deleted file mode 100644 index 0fdb9e682..000000000 --- a/docs/changelog.asciidoc +++ /dev/null @@ -1,966 +0,0 @@ -[[changelog-client]] -== Release notes - -[discrete] -=== 9.0.0 - -[discrete] -==== Breaking changes - -[discrete] -===== Drop support for deprecated `body` parameter - -In 8.0, the top-level `body` parameter that was available on all API functions <>. In 9.0 this property is completely removed. - -[discrete] -===== Remove the default 30-second timeout on all requests sent to Elasticsearch - -Setting HTTP timeouts on Elasticsearch requests goes against Elastic's recommendations. See <> for more information. - -[discrete] -=== 8.17.1 - -[discrete] -==== Fixes - -[discrete] -===== Improved support for Elasticsearch `v8.17` - -Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. - -[discrete] -===== Report correct transport connection type in telemetry - -The client's telemetry reporting mechanism was incorrectly reporting all traffic as using `HttpConnection` when the default is `UndiciConnection`. https://github.com/elastic/elasticsearch-js/issues/2324[#2324] - -[discrete] -=== 8.17.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.17` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.17/release-notes-8.17.0.html[here]. - -[discrete] -=== 8.16.4 - -[discrete] -==== Fixes - -[discrete] -===== Improved support for Elasticsearch `v8.16` - -Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. - -[discrete] -===== Report correct transport connection type in telemetry - -The client's telemetry reporting mechanism was incorrectly reporting all traffic as using `HttpConnection` when the default is `UndiciConnection`. https://github.com/elastic/elasticsearch-js/issues/2324[#2324] - -[discrete] -=== 8.16.3 - -[discrete] -==== Fixes - -[discrete] -===== Improved support for Elasticsearch `v8.16` - -Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. - -[discrete] -=== 8.16.2 - -[discrete] -==== Fixes - -[discrete] -===== Improved support for Elasticsearch `v8.16` - -Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. - -[discrete] -===== Drop testing artifacts from npm package - -Tap, the unit testing tool used by this project, was recently upgraded and started writing to a `.tap` directory. Since tests are run prior to an `npm publish` in CI, this directory was being included in the published package and bloating its size. - -[discrete] -=== 8.16.1 - -[discrete] -==== Fixes - -[discrete] -===== Fix ECMAScript imports - -Fixed package configuration to correctly support native ECMAScript `import` syntax. - -[discrete] -=== 8.16.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.16` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.16/release-notes-8.16.0.html[here]. - -[discrete] -===== Support Apache Arrow in ES|QL helper - -The ES|QL helper can now return results as an Apache Arrow `Table` or `RecordBatchReader`, which enables high-performance calculations on ES|QL results, even if the response data is larger than the system's available memory. See <> for more information. - -[discrete] -==== Fixes - -[discrete] -===== Pass prototype poisoning options to serializer correctly - -The client's `disablePrototypePoisoningProtection` option was set to `true` by default, but when it was set to any other value it was ignored, making it impossible to enable prototype poisoning protection without providing a custom serializer implementation. - -[discrete] -=== 8.15.3 - -[discrete] -==== Fixes - -[discrete] -===== Improved support for Elasticsearch `v8.15` - -Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. - -[discrete] -===== Drop testing artifacts from npm package - -Tap, the unit testing tool, was recently upgraded and started writing to a `.tap` directory. Since tests are run prior to an `npm publish` in CI, this directory was being included in the published package and bloating its size. - -[discrete] -=== 8.15.2 - -[discrete] -==== Fixes - -[discrete] -===== Improved support for Elasticsearch `v8.15` - -Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. - -[discrete] -=== 8.15.1 - -[discrete] -==== Fixes - -[discrete] -===== Improved support for Elasticsearch `v8.15` - -Updated TypeScript types based on fixes and improvements to the Elasticsearch specification. - -[discrete] -=== 8.15.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.15.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.15/release-notes-8.15.0.html[here]. - -[discrete] -===== OpenTelemetry zero-code instrumentation support - -For those that use an observability service that supports OpenTelemetry spans, the client will now automatically generate traces for each Elasticsearch request it makes. -See {jsclient}/observability.html#_opentelemetry[the docs] -for more information. - -[discrete] -=== 8.14.1 - -[discrete] -==== Features - -[discrete] -===== Improved support for Elasticsearch `8.14` - -Updated types based on fixes and changes to the Elasticsearch specification. - -[discrete] -=== 8.14.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.14.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.14/release-notes-8.14.0.html[here]. - -[discrete] -===== ES|QL object API helper - -A helper method has been added that parses the response of an ES|QL query and converts it into an array of objects. -A TypeScript type parameter can also be provided to improve developer experience when working with the result. https://github.com/elastic/elasticsearch-js/pull/2238[#2238] - -[discrete] -===== `onSuccess` callback added to bulk helper - -The bulk helper now supports an `onSuccess` callback that will be called for each successful operation. https://github.com/elastic/elasticsearch-js/pull/2199[#2199] - -[discrete] -===== Request retries are more polite - -https://github.com/elastic/elastic-transport-js/releases/tag/v8.6.0[`@elastic/transport` v8.6.0] was released, which refactored when and how failed requests are retried. Timed-out requests are no longer retried by default, and retries now use exponential backoff rather than running immediately. - - -[discrete] -=== 8.13.1 - -[discrete] -==== Fixes - -[discrete] -===== Pin @elastic/transport to `~8.4.1` - -Switching from `^8.4.1` to `~8.4.1` ensures 8.13 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -v8.13.0 was also released depending on v8.4.0 of `@elastic/transport` instead of v8.4.1, which was unintentional. - -[discrete] -=== 8.13.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.13.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.13/release-notes-8.13.0.html[here]. - -[discrete] -==== Fixes - -[discrete] -===== Ensure new connections inherit client's set defaults https://github.com/elastic/elasticsearch-js/pull/2159[#2159] - -When instantiating a client, any connection-related defaults (e.g. `requestTimeout`) set on that client instance would not be inherited by nodes if they were entered as strings rather than a `ConnectionOptions` object. - -[discrete] -=== 8.12.3 - -[discrete] -==== Fixes - -[discrete] -===== Bump @elastic/transport to `~8.4.1` - -Switching from `^8.4.1` to `~8.4.1` ensures 8.12 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -[discrete] -=== 8.12.2 - -[discrete] -==== Fixes - -[discrete] -===== Upgrade transport to 8.4.1 https://github.com/elastic/elasticsearch-js/pull/2137[#2137] - -Upgrades `@elastic/transport` to 8.4.1 to resolve https://github.com/elastic/elastic-transport-js/pull/83[a bug] where arrays in error diagnostics were unintentionally transformed into objects. - -[discrete] -=== 8.12.1 - -[discrete] -==== Fixes - -[discrete] -===== Fix hang in bulk helper semaphore https://github.com/elastic/elasticsearch-js/pull/2027[#2027] - -The failing state could be reached when a server's response times are slower than flushInterval. - -[discrete] -=== 8.12.0 - -[discrete] -=== Features - -[discrete] -===== Support for Elasticsearch `v8.12.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.12/release-notes-8.12.0.html[here]. - -[discrete] -=== 8.11.1 - -[discrete] -==== Fixes - -[discrete] -===== Bump @elastic/transport to `~8.4.0` - -Switching from `^8.4.0` to `~8.4.0` ensures 8.11 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -[discrete] -=== 8.11.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.11.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.11/release-notes-8.11.0.html[here]. - -[discrete] -===== Enhanced support for redacting potentially sensitive data https://github.com/elastic/elasticsearch-js/pull/2095[#2095] - -`@elastic/transport` https://github.com/elastic/elastic-transport-js/releases/tag/v8.4.0[version 8.4.0] introduces enhanced measures for ensuring that request metadata attached to some `Error` objects is redacted. This functionality is primarily to address custom logging solutions that don't use common serialization methods like `JSON.stringify`, `console.log`, or `util.inspect`, which were already accounted for. - -See <> for more information. - -[discrete] -=== 8.10.1 - -[discrete] -==== Fixes - -[discrete] -===== Bump @elastic/transport to `~8.3.4` - -Switching from `^8.3.4` to `~8.3.4` ensures 8.10 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -[discrete] -=== 8.10.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.10.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.10/release-notes-8.10.0.html[here]. - -[discrete] -=== 8.9.2 - -[discrete] -==== Fixes - -[discrete] -===== Bump @elastic/transport to `~8.3.4` - -Switching from `^8.3.4` to `~8.3.4` ensures 8.9 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -[discrete] -=== 8.9.1 - -[discrete] -==== Fixes - -[discrete] -===== Upgrade Transport https://github.com/elastic/elasticsearch-js/pull/1968[#1968] - -Upgrades `@elastic/transport` to the latest patch release to fix https://github.com/elastic/elastic-transport-js/pull/69[a bug] that could cause the process to exit when handling malformed `HEAD` requests. - -[discrete] -=== 8.9.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.9.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.9/release-notes-8.9.0.html[here]. - -[discrete] -===== Allow document to be overwritten in `onDocument` iteratee of bulk helper https://github.com/elastic/elasticsearch-js/pull/1732[#1732] - -In the {jsclient}/client-helpers.html#bulk-helper[bulk helper], documents could not be modified before being sent to Elasticsearch. It is now possible to {jsclient}/client-helpers.html#_modifying_a_document_before_operation[modify a document] before sending it. - -[discrete] -==== Fixes - -[discrete] -===== Updated `user-agent` header https://github.com/elastic/elasticsearch-js/pull/1954[#1954] - -The `user-agent` header the client used to connect to Elasticsearch was using a non-standard format that has been improved. - -[discrete] -=== 8.8.2 - -[discrete] -==== Fixes - -[discrete] -===== Bump @elastic/transport to `~8.3.2` - -Switching from `^8.3.2` to `~8.3.2` ensures 8.8 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -[discrete] -=== 8.8.1 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.8.1` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.8.1.html[here]. - -[discrete] -==== Fixes - -[discrete] -===== Fix index drift bug in bulk helper https://github.com/elastic/elasticsearch-js/pull/1759[#1759] - -Fixes a bug in the bulk helper that would cause `onDrop` to send back the wrong JSON document or error on a nonexistent document when an error occurred on a bulk HTTP request that contained a `delete` action. - -[discrete] -===== Fix a memory leak caused by an outdated version of Undici https://github.com/elastic/elasticsearch-js/pull/1902[#1902] - -Undici 5.5.1, used by https://github.com/elastic/elastic-transport-js[elastic-transport-js], could create a memory leak when a high volume of requests created too many HTTP `abort` listeners. Upgrading Undici to 5.22.1 removed the memory leak. - -[discrete] -=== 8.8.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.8.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.8/release-notes-8.8.0.html[here]. - -[discrete] -==== Fixes - -[discrete] -===== Fix type declarations for legacy types with a body key https://github.com/elastic/elasticsearch-js/pull/1784[#1784] - -Prior releases contained a bug where type declarations for legacy types that include a `body` key were not actually importing the type that includes the `body` key. - -[discrete] -=== 8.7.3 - -[discrete] -==== Fixes - -[discrete] -===== Bump @elastic/transport to `~8.3.1` - -Switching from `^8.3.1` to `~8.3.1` ensures 8.7 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -[discrete] -=== 8.7.0 - -[discrete] -===== Support for Elasticsearch `v8.7.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.7/release-notes-8.7.0.html[here]. - -[discrete] -=== 8.6.1 - -[discrete] -==== Fixes - -[discrete] -===== Bump @elastic/transport to `~8.3.1` - -Switching from `^8.3.1` to `~8.3.1` ensures 8.6 client users are not required to update to Node.js v18+, which is a new requirement set by `@elastic/transport` v8.5.0. See https://github.com/elastic/elastic-transport-js/issues/91[elastic/elastic-transport-js#91] for details. - -[discrete] -=== 8.6.0 - -[discrete] -===== Bump @elastic/transport to 8.3.1+ https://github.com/elastic/elasticsearch-js/pull/1802[#1802] - -The `@elastic/transport` dependency has been bumped to `~8.3.1` to ensure -fixes to the `maxResponseSize` option are available in the client. - -[discrete] -===== Support for Elasticsearch `v8.6.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.6/release-notes-8.6.0.html[here]. - -[discrete] -=== 8.5.0 - -[discrete] -===== Support for Elasticsearch `v8.5.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.5/release-notes-8.5.0.html[here]. - -[discrete] -=== 8.4.0 - -[discrete] -===== Support for Elasticsearch `v8.4.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.4/release-notes-8.4.0.html[here]. - -[discrete] -=== 8.2.1 - -[discrete] -==== Fixes - -[discrete] -===== Support for Elasticsearch `v8.2.1` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.2/release-notes-8.2.1.html[here]. - -[discrete] -===== Fix ndjson APIs https://github.com/elastic/elasticsearch-js/pull/1688[#1688] - -The previous release contained a bug that broken ndjson APIs. -We have released `v8.2.0-patch.1` to address this. -This fix is the same as the one we have released and we strongly recommend upgrading to this version. - -[discrete] -===== Fix node shutdown apis https://github.com/elastic/elasticsearch-js/pull/1697[#1697] - -The shutdown APIs wheren't complete, this fix completes them. - -[discrete] -==== Types: move query keys to body https://github.com/elastic/elasticsearch-js/pull/1693[#1693] - -The types definitions where wrongly representing the types of fields present in both query and body. - -[discrete] -=== 8.2.0 - -[discrete] -==== Breaking changes - -[discrete] -===== Drop Node.js v12 https://github.com/elastic/elasticsearch-js/pull/1670[#1670] - -According to our https://github.com/elastic/elasticsearch-js#nodejs-support[Node.js support matrix]. - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.2` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.2/release-notes-8.2.0.html[here]. - -[discrete] -===== More lenient parameter checks https://github.com/elastic/elasticsearch-js/pull/1662[#1662] - -When creating a new client, an `undefined` `caFingerprint` no longer trigger an error for a http connection. - -[discrete] -===== Update TypeScript docs and export estypes https://github.com/elastic/elasticsearch-js/pull/1675[#1675] - -You can import the full TypeScript requests & responses definitions as it follows: -[source,ts] ----- -import { estypes } from '@elastic/elasticsearch' ----- - -If you need the legacy definitions with the body, you can do the following: - -[source,ts] ----- -import { estypesWithBody } from '@elastic/elasticsearch' ----- - -[discrete] -==== Fixes - -[discrete] -===== Updated hpagent to the latest version https://github.com/elastic/elastic-transport-js/pull/49[transport/#49] - -You can fing the related changes https://github.com/delvedor/hpagent/releases/tag/v1.0.0[here]. - -[discrete] -=== 8.1.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.1` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.1/release-notes-8.1.0.html[here]. - -[discrete] -===== Export SniffingTransport https://github.com/elastic/elasticsearch-js/pull/1653[#1653] - -Now the client exports the SniffingTransport class. - -[discrete] -==== Fixes - -[discrete] -===== Fix onFlushTimeout timer not being cleared when upstream errors https://github.com/elastic/elasticsearch-js/pull/1616[#1616] - -Fixes a memory leak caused by an error in the upstream dataset of the bulk helper. - -[discrete] -===== Cleanup abort listener https://github.com/elastic/elastic-transport-js/pull/42[transport/#42] - -The legacy http client was not cleaning up the abort listener, which could cause a memory leak. - -[discrete] -===== Improve undici performances https://github.com/elastic/elastic-transport-js/pull/41[transport/#41] - -Improve the stream body collection and keep alive timeout. - -[discrete] -=== 8.0.0 - -[discrete] -==== Features - -[discrete] -===== Support for Elasticsearch `v8.0` - -You can find all the API changes -https://www.elastic.co/guide/en/elasticsearch/reference/8.0/release-notes-8.0.0.html[here]. - -[discrete] -===== Drop old typescript definitions - -*Breaking: Yes* | *Migration effort: Medium* - -The current TypeScript definitions will be removed from the client, and the new definitions, which contain request and response definitions as well will be shipped by default. - -[discrete] -===== Drop callback-style API - -*Breaking: Yes* | *Migration effort: Large* - -Maintaining both API styles is not a problem per se, but it makes error handling more convoluted due to async stack traces. -Moving to a full-promise API will solve this issue. - -[source,js] ----- -// callback-style api -client.search({ params }, { options }, (err, result) => { - console.log(err || result) -}) - -// promise-style api -client.search({ params }, { options }) - .then(console.log) - .catch(console.log) - -// async-style (sugar syntax on top of promises) -const response = await client.search({ params }, { options }) -console.log(response) ----- - -If you are already using the promise-style API, this won't be a breaking change for you. - -[discrete] -===== Remove the current abort API and use the new AbortController standard - -*Breaking: Yes* | *Migration effort: Small* - -The old abort API makes sense for callbacks but it's annoying to use with promises - -[source,js] ----- -// callback-style api -const request = client.search({ params }, { options }, (err, result) => { - console.log(err) // RequestAbortedError -}) - -request.abort() - -// promise-style api -const promise = client.search({ params }, { options }) - -promise - .then(console.log) - .catch(console.log) // RequestAbortedError - -promise.abort() ----- - -Node v12 has added the standard https://nodejs.org/api/globals.html#globals_class_abortcontroller[`AbortController`] API which is designed to work well with both callbacks and promises. -[source,js] ----- -const ac = new AbortController() -client.search({ params }, { signal: ac.signal }) - .then(console.log) - .catch(console.log) // RequestAbortedError - -ac.abort() ----- - -[discrete] -[[remove-body-key]] -===== Remove the body key from the request - -*Breaking: Yes* | *Migration effort: Small* - -Thanks to the new types we are developing now we know exactly where a parameter should go. -The client API leaks HTTP-related notions in many places, and removing them would definitely improve the DX. - -This could be a rather big breaking change, so a double solution could be used during the 8.x lifecycle. (accepting body keys without them being wrapped in the body as well as the current solution). - -To convert code from 7.x, you need to remove the `body` parameter in all the endpoints request. -For instance, this is an example for the `search` endpoint: - -[source,js] ----- -// from -const response = await client.search({ - index: 'test', - body: { - query: { - match_all: {} - } - } -}) - -// to -const response = await client.search({ - index: 'test', - query: { - match_all: {} - } -}) ----- - -[discrete] -===== Migrate to new separate transport - -*Breaking: Yes* | *Migration effort: Small to none* - -The separated transport has been rewritten in TypeScript and has already dropped the callback style API. -Given that now is separated, most of the Elasticsearch specific concepts have been removed, and the client will likely need to extend parts of it for reintroducing them. -If you weren't extending the internals of the client, this won't be a breaking change for you. - -[discrete] -===== The returned value of API calls is the body and not the HTTP related keys - -*Breaking: Yes* | *Migration effort: Small* - -The client API leaks HTTP-related notions in many places, and removing them would definitely improve the DX. -The client will expose a new request-specific option to still get the full response details. - -The new behaviour returns the `body` value directly as response. -If you want to have the 7.x response format, you need to add `meta : true` in the request. -This will return all the HTTP meta information, including the `body`. - -For instance, this is an example for the `search` endpoint: - -[source,js] ----- -// from -const response = await client.search({ - index: 'test', - body: { - query: { - match_all: {} - } - } -}) -console.log(response) // { body: SearchResponse, statusCode: number, headers: object, warnings: array } - -// to -const response = await client.search({ - index: 'test', - query: { - match_all: {} - } -}) -console.log(response) // SearchResponse - -// with a bit of TypeScript and JavaScript magic... -const response = await client.search({ - index: 'test', - query: { - match_all: {} - } -}, { - meta: true -}) -console.log(response) // { body: SearchResponse, statusCode: number, headers: object, warnings: array } ----- - -[discrete] -===== Use a weighted connection pool - -*Breaking: Yes* | *Migration effort: Small to none* - -Move from the current cluster connection pool to a weight-based implementation. -This new implementation offers better performances and runs less code in the background, the old connection pool can still be used. -If you weren't extending the internals of the client, this won't be a breaking change for you. - -[discrete] -===== Migrate to the "undici" http client - -*Breaking: Yes* | *Migration effort: Small to none* - -By default, the HTTP client will no longer be the default Node.js HTTP client, but https://github.com/nodejs/undici[undici] instead. -Undici is a brand new HTTP client written from scratch, it offers vastly improved performances and has better support for promises. -Furthermore, it offers comprehensive and predictable error handling. The old HTTP client can still be used. -If you weren't extending the internals of the client, this won't be a breaking change for you. - -[discrete] -===== Drop support for old camelCased keys - -*Breaking: Yes* | *Migration effort: Medium* - -Currently, every path or query parameter could be expressed in both `snake_case` and `camelCase`. Internally the client will convert everything to `snake_case`. -This was done in an effort to reduce the friction of migrating from the legacy to the new client, but now it no longer makes sense. -If you are already using `snake_case` keys, this won't be a breaking change for you. - -[discrete] -===== Rename `ssl` option to `tls` - -*Breaking: Yes* | *Migration effort: Small* - -People usually refers to this as `tls`, furthermore, internally we use the tls API and Node.js refers to it as tls everywhere. -[source,js] ----- -// before -const client = new Client({ - node: 'https://localhost:9200/', - ssl: { - rejectUnauthorized: false - } -}) - -// after -const client = new Client({ - node: 'https://localhost:9200/', - tls: { - rejectUnauthorized: false - } -}) ----- - -[discrete] -===== Remove prototype poisoning protection - -*Breaking: Yes* | *Migration effort: Small* - -Prototype poisoning protection is very useful, but it can cause performances issues with big payloads. -In v8 it will be removed, and the documentation will show how to add it back with a custom serializer. - -[discrete] -===== Remove client extensions API - -*Breaking: Yes* | *Migration effort: Large* - -Nowadays the client support the entire Elasticsearch API, and the `transport.request` method can be used if necessary. The client extensions API have no reason to exist. -[source,js] ----- -client.extend('utility.index', ({ makeRequest }) => { - return function _index (params, options) { - // your code - } -}) - -client.utility.index(...) ----- - -If you weren't using client extensions, this won't be a breaking change for you. - -[discrete] -===== Move to TypeScript - -*Breaking: No* | *Migration effort: None* - -The new separated transport is already written in TypeScript, and it makes sense that the client v8 will be fully written in TypeScript as well. - -[discrete] -===== Move from emitter-like interface to a diagnostic method - -*Breaking: Yes* | *Migration effort: Small* - -Currently, the client offers a subset of methods of the `EventEmitter` class, v8 will ship with a `diagnostic` property which will be a proper event emitter. -[source,js] ----- -// from -client.on('request', console.log) - -// to -client.diagnostic.on('request', console.log) ----- - -[discrete] -===== Remove username & password properties from Cloud configuration - -*Breaking: Yes* | *Migration effort: Small* - -The Cloud configuration does not support ApiKey and Bearer auth, while the `auth` options does. -There is no need to keep the legacy basic auth support in the cloud configuration. -[source,js] ----- -// before -const client = new Client({ - cloud: { - id: '', - username: 'elastic', - password: 'changeme' - } -}) - -// after -const client = new Client({ - cloud: { - id: '' - }, - auth: { - username: 'elastic', - password: 'changeme' - } -}) ----- - -If you are already passing the basic auth options in the `auth` configuration, this won't be a breaking change for you. - -[discrete] -===== Calling `client.close` will reject new requests - -Once you call `client.close` every new request after that will be rejected with a `NoLivingConnectionsError`. In-flight requests will be executed normally unless an in-flight request requires a retry, in which case it will be rejected. - -[discrete] -===== Parameters rename - -- `ilm.delete_lifecycle`: `policy` parameter has been renamed to `name` -- `ilm.get_lifecycle`: `policy` parameter has been renamed to `name` -- `ilm.put_lifecycle`: `policy` parameter has been renamed to `name` -- `snapshot.cleanup_repository`: `repository` parameter has been renamed to `name` -- `snapshot.create_repository`: `repository` parameter has been renamed to `name` -- `snapshot.delete_repository`: `repository` parameter has been renamed to `name` -- `snapshot.get_repository`: `repository` parameter has been renamed to `name` -- `snapshot.verify_repository`: `repository` parameter has been renamed to `name` - -[discrete] -===== Removal of snake_cased methods - -The v7 client provided snake_cased methods, such as `client.delete_by_query`. This is no longer supported, now only camelCased method are present. -So `client.delete_by_query` can be accessed with `client.deleteByQuery` - diff --git a/docs/child.asciidoc b/docs/child.asciidoc deleted file mode 100644 index 25575bbe2..000000000 --- a/docs/child.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -[[child]] -=== Creating a child client - -There are some use cases where you may need multiple instances of the client. -You can easily do that by calling `new Client()` as many times as you need, but -you will lose all the benefits of using one single client, such as the long -living connections and the connection pool handling. To avoid this problem, the -client offers a `child` API, which returns a new client instance that shares the -connection pool with the parent client. - -NOTE: The event emitter is shared between the parent and the child(ren). If you -extend the parent client, the child client will have the same extensions, while -if the child client adds an extension, the parent client will not be extended. - -You can pass to the `child` every client option you would pass to a normal -client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`, -`Connection`, and `resurrectStrategy`). - -CAUTION: If you call `close` in any of the parent/child clients, every client -will be closed. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) -const child = client.child({ - headers: { 'x-foo': 'bar' }, -}) - -client.info().then(console.log, console.log) -child.info().then(console.log, console.log) ----- diff --git a/docs/configuration.asciidoc b/docs/configuration.asciidoc deleted file mode 100644 index 402c5e9a3..000000000 --- a/docs/configuration.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[client-configuration]] -== Configuration - - -The client is designed to be easily configured for your needs. In the following -section, you can see the possible options that you can use to configure it. - -* <> -* <> -* <> -* <> -* <> diff --git a/docs/connecting.asciidoc b/docs/connecting.asciidoc deleted file mode 100644 index f87961edb..000000000 --- a/docs/connecting.asciidoc +++ /dev/null @@ -1,738 +0,0 @@ -[[client-connecting]] -== Connecting - -This page contains the information you need to connect and use the Client with -{es}. - -**On this page** - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -[[authentication]] -[discrete] -=== Authentication - -This document contains code snippets to show you how to connect to various {es} -providers. - - -[discrete] -[[auth-ec]] -==== Elastic Cloud - -If you are using https://www.elastic.co/cloud[Elastic Cloud], the client offers -an easy way to connect to it via the `cloud` option. You must pass the Cloud ID -that you can find in the cloud console, then your username and password inside -the `auth` option. - -NOTE: When connecting to Elastic Cloud, the client will automatically enable -both request and response compression by default, since it yields significant -throughput improvements. Moreover, the client will also set the tls option -`secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still -override this option by configuring them. - -IMPORTANT: Do not enable sniffing when using Elastic Cloud, since the nodes are -behind a load balancer, Elastic Cloud will take care of everything for you. -Take a look https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[here] -to know more. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { - id: '' - }, - auth: { - username: 'elastic', - password: 'changeme' - } -}) ----- - -[discrete] -[[connect-self-managed-new]] -=== Connecting to a self-managed cluster - -By default {es} will start with security features like authentication and TLS -enabled. To connect to the {es} cluster you'll need to configure the Node.js {es} -client to use HTTPS with the generated CA certificate in order to make requests -successfully. - -If you're just getting started with {es} we recommend reading the documentation -on https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html[configuring] -and -https://www.elastic.co/guide/en/elasticsearch/reference/current/starting-elasticsearch.html[starting {es}] -to ensure your cluster is running as expected. - -When you start {es} for the first time you'll see a distinct block like the one -below in the output from {es} (you may have to scroll up if it's been a while): - -[source,sh] ----- - --> Elasticsearch security features have been automatically configured! --> Authentication is enabled and cluster connections are encrypted. - --> Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`): - lhQpLELkjkrawaBoaz0Q - --> HTTP CA certificate SHA-256 fingerprint: - a52dd93511e8c6045e21f16654b77c9ee0f34aea26d9f40320b531c474676228 -... - ----- - -Depending on the circumstances there are two options for verifying the HTTPS -connection, either verifying with the CA certificate itself or via the HTTP CA -certificate fingerprint. - -[discrete] -[[auth-tls]] -==== TLS configuration - -The generated root CA certificate can be found in the `certs` directory in your -{es} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you're running {es} -in Docker there is -https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html[additional documentation for retrieving the CA certificate]. - -Without any additional configuration you can specify `https://` node urls, and -the certificates used to sign these requests will be verified. To turn off -certificate verification, you must specify an `tls` object in the top level -config and set `rejectUnauthorized: false`. The default `tls` values are the -same that Node.js's https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[`tls.connect()`] -uses. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - username: 'elastic', - password: 'changeme' - }, - tls: { - ca: fs.readFileSync('./http_ca.crt'), - rejectUnauthorized: false - } -}) ----- - -[discrete] -[[auth-ca-fingerprint]] -==== CA fingerprint - -You can configure the client to only trust certificates that are signed by a specific CA certificate -(CA certificate pinning) by providing a `caFingerprint` option. -This will verify that the fingerprint of the CA certificate that has signed -the certificate of the server matches the supplied value. -You must configure a SHA256 digest. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://example.com/' - auth: { ... }, - // the fingerprint (SHA256) of the CA certificate that is used to sign - // the certificate that the Elasticsearch node presents for TLS. - caFingerprint: '20:0D:CA:FA:76:...', - tls: { - // might be required if it's a self-signed certificate - rejectUnauthorized: false - } -}) ----- - -The certificate fingerprint can be calculated using `openssl x509` with the -certificate file: - -[source,sh] ----- -openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt ----- - -If you don't have access to the generated CA file from {es} you can use the -following script to output the root CA fingerprint of the {es} instance with -`openssl s_client`: - -[source,sh] ----- -# Replace the values of 'localhost' and '9200' to the -# corresponding host and port values for the cluster. -openssl s_client -connect localhost:9200 -servername localhost -showcerts /dev/null \ - | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin ----- - -The output of `openssl x509` will look something like this: - -[source,sh] ----- -SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28 ----- - - -[discrete] -[[connect-no-security]] -=== Connecting without security enabled - -WARNING: Running {es} without security enabled is not recommended. - -If your cluster is configured with -https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html[security explicitly disabled] -then you can connect via HTTP: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'http://example.com/' -}) ----- - -[discrete] -[[auth-strategies]] -=== Authentication strategies - -Following you can find all the supported authentication strategies. - -[discrete] -[[auth-apikey]] -==== ApiKey authentication - -You can use the -{ref-7x}/security-api-create-api-key.html[ApiKey] -authentication by passing the `apiKey` parameter via the `auth` option. The -`apiKey` parameter can be either a base64 encoded string or an object with the -values that you can obtain from the -{ref-7x}/security-api-create-api-key.html[create api key endpoint]. - -NOTE: If you provide both basic authentication credentials and the ApiKey -configuration, the ApiKey takes precedence. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - apiKey: 'base64EncodedKey' - } -}) ----- - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - apiKey: { - id: 'foo', - api_key: 'bar' - } - } -}) ----- - -[discrete] -[[auth-bearer]] -==== Bearer authentication - -You can provide your credentials by passing the `bearer` token -parameter via the `auth` option. -Useful for https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html[service account tokens]. -Be aware that it does not handle automatic token refresh. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - bearer: 'token' - } -}) ----- - - -[discrete] -[[auth-basic]] -==== Basic authentication - -You can provide your credentials by passing the `username` and `password` -parameters via the `auth` option. - -NOTE: If you provide both basic authentication credentials and the Api Key -configuration, the Api Key will take precedence. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - username: 'elastic', - password: 'changeme' - } -}) ----- - - -Otherwise, you can provide your credentials in the node(s) URL. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://username:password@localhost:9200/' -}) ----- - - -[discrete] -[[client-usage]] -=== Usage - -Using the client is straightforward, it supports all the public APIs of {es}, -and every method exposes the same signature. - - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) - -const result = await client.search({ - index: 'my-index', - query: { - match: { hello: 'world' } - } -}) ----- - -The returned value of every API call is the response body from {es}. -If you need to access additonal metadata, such as the status code or headers, -you must specify `meta: true` in the request options: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) - -const result = await client.search({ - index: 'my-index', - query: { - match: { hello: 'world' } - } -}, { meta: true }) ----- - -In this case, the result will be: -[source,ts] ----- -{ - body: object | boolean - statusCode: number - headers: object - warnings: string[], - meta: object -} ----- - -NOTE: The body is a boolean value when you use `HEAD` APIs. - -[discrete] -==== Aborting a request - -If needed, you can abort a running request by using the `AbortController` standard. - -CAUTION: If you abort a request, the request will fail with a -`RequestAbortedError`. - - -[source,js] ----- -const AbortController = require('node-abort-controller') -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) - -const abortController = new AbortController() -setImmediate(() => abortController.abort()) - -const result = await client.search({ - index: 'my-index', - query: { - match: { hello: 'world' } - } -}, { signal: abortController.signal }) ----- - -[discrete] -==== Request specific options - -If needed you can pass request specific options in a second object: - -[source,js] ----- -const result = await client.search({ - index: 'my-index', - body: { - query: { - match: { hello: 'world' } - } - } -}, { - ignore: [404], - maxRetries: 3 -}) ----- - - -The supported request specific options are: -[cols=2*] -|=== -|`ignore` -|`number[]` -  HTTP status codes which should not be considered errors for this request. + -_Default:_ `null` - -|`requestTimeout` -|`number | string | null` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#_http_client_configuration[Elasticsearch best practices for HTML clients] for more info. + -_Default:_ No timeout - -|`retryOnTimeout` -|`boolean` - Retry requests that have timed out. -_Default:_ `false` - -|`maxRetries` -|`number` - Max number of retries for the request, it overrides the client default. + -_Default:_ `3` - -|`compression` -|`string | boolean` - Enables body compression for the request. + -_Options:_ `false`, `'gzip'` + -_Default:_ `false` - -|`asStream` -|`boolean` - Instead of getting the parsed body back, you get the raw Node.js stream of data. + -_Default:_ `false` - -|`headers` -|`object` - Custom headers for the request. + -_Default:_ `null` - -|`querystring` -|`object` - Custom querystring for the request. + -_Default:_ `null` - -|`id` -|`any` - Custom request id. _(overrides the top level request id generator)_ + -_Default:_ `null` - -|`context` -|`any` - Custom object per request. _(you can use it to pass data to the clients events)_ + -_Default:_ `null` - -|`opaqueId` -|`string` - Set the `X-Opaque-Id` HTTP header. See {ref}/api-conventions.html#x-opaque-id -_Default:_ `null` - -|`maxResponseSize` -|`number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH + -_Default:_ `null` - -|`maxCompressedResponseSize` -|`number` - When configured, it verifies that the compressed response size is lower than the configured number, if it's higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH + -_Default:_ `null` - -|`signal` -|`AbortSignal` - The AbortSignal instance to allow request abortion. + -_Default:_ `null` - -|`meta` -|`boolean` - Rather than returning the body, return an object containing `body`, `statusCode`, `headers` and `meta` keys + -_Default_: `false` - -|`redaction` -|`object` - Options for redacting potentially sensitive data from error metadata. See <>. - -|`retryBackoff` -|`(min: number, max: number, attempt: number) => number;` - A function that calculates how long to sleep, in seconds, before the next request retry + -_Default:_ A built-in function that uses exponential backoff with jitter. - -|=== - -[discrete] -[[client-faas-env]] -=== Using the Client in a Function-as-a-Service Environment - -This section illustrates the best practices for leveraging the {es} client in a Function-as-a-Service (FaaS) environment. -The most influential optimization is to initialize the client outside of the function, the global scope. -This practice does not only improve performance but also enables background functionality as – for example – https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[sniffing]. -The following examples provide a skeleton for the best practices. - -[discrete] -==== GCP Cloud Functions - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - // client initialisation -}) - -exports.testFunction = async function (req, res) { - // use the client -} ----- - -[discrete] -==== AWS Lambda - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - // client initialisation -}) - -exports.handler = async function (event, context) { - // use the client -} ----- - -[discrete] -==== Azure Functions - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - // client initialisation -}) - -module.exports = async function (context, req) { - // use the client -} ----- - -Resources used to assess these recommendations: - -- https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations[GCP Cloud Functions: Tips & Tricks] -- https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html[Best practices for working with AWS Lambda functions] -- https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables[Azure Functions Python developer guide] -- https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html[AWS Lambda: Comparing the effect of global scope] - - -[discrete] -[[client-connect-proxy]] -=== Connecting through a proxy - -~Added~ ~in~ ~`v7.10.0`~ - -If you need to pass through an http(s) proxy for connecting to {es}, the client -out of the box offers a handy configuration for helping you with it. Under the -hood, it uses the https://github.com/delvedor/hpagent[`hpagent`] module. - -IMPORTANT: In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. -To use a proxy, you will need to use the `HttpConnection` class from `@elastic/transport` instead. - -[source,js] ----- -import { HttpConnection } from '@elastic/transport' - -const client = new Client({ - node: 'http://localhost:9200/', - proxy: 'http://localhost:8080/', - Connection: HttpConnection, -}) ----- - -Basic authentication is supported as well: - -[source,js] ----- -const client = new Client({ - node: 'http://localhost:9200/', - proxy: 'http:user:pwd@//localhost:8080', - Connection: HttpConnection, -}) ----- - -If you are connecting through a non-http(s) proxy, such as a `socks5` or `pac`, -you can use the `agent` option to configure it. - -[source,js] ----- -const SocksProxyAgent = require('socks-proxy-agent') -const client = new Client({ - node: 'http://localhost:9200/', - agent () { - return new SocksProxyAgent('socks://127.0.0.1:1080') - }, - Connection: HttpConnection, -}) ----- - - -[discrete] -[[client-error-handling]] -=== Error handling - -The client exposes a variety of error objects that you can use to enhance your -error handling. You can find all the error objects inside the `errors` key in -the client. - -[source,js] ----- -const { errors } = require('@elastic/elasticsearch') -console.log(errors) ----- - - -You can find the errors exported by the client in the table below. - -[cols=3*] -|=== -|*Error* -|*Description* -|*Properties* - -|`ElasticsearchClientError` -|Every error inherits from this class, it is the basic error generated by the client. -a|* `name` - `string` -* `message` - `string` - -|`TimeoutError` -|Generated when a request exceeds the `requestTimeout` option. -a|* `name` - `string` -* `message` - `string` -* `meta` - `object`, contains all the information about the request - -|`ConnectionError` -|Generated when an error occurs during the request, it can be a connection error or a malformed stream of data. -a|* `name` - `string` -* `message` - `string` -* `meta` - `object`, contains all the information about the request - -|`RequestAbortedError` -|Generated if the user calls the `request.abort()` method. -a|* `name` - `string` -* `message` - `string` -* `meta` - `object`, contains all the information about the request - -|`NoLivingConnectionsError` -|Given the configuration, the ConnectionPool was not able to find a usable Connection for this request. -a|* `name` - `string` -* `message` - `string` -* `meta` - `object`, contains all the information about the request - -|`SerializationError` -|Generated if the serialization fails. -a|* `name` - `string` -* `message` - `string` -* `data` - `object`, the object to serialize - -|`DeserializationError` -|Generated if the deserialization fails. -a|* `name` - `string` -* `message` - `string` -* `data` - `string`, the string to deserialize - -|`ConfigurationError` -|Generated if there is a malformed configuration or parameter. -a|* `name` - `string` -* `message` - `string` - -|`ResponseError` -|Generated when in case of a `4xx` or `5xx` response. -a|* `name` - `string` -* `message` - `string` -* `meta` - `object`, contains all the information about the request -* `body` - `object`, the response body -* `statusCode` - `object`, the response headers -* `headers` - `object`, the response status code -|=== - -[[keep-alive]] -[discrete] -=== Keep-alive connections - -By default, the client uses persistent, keep-alive connections to reduce the overhead of creating a new HTTP connection for each Elasticsearch request. -If you are using the default `UndiciConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 10 minutes. -If you are using the legacy `HttpConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 1 minute. - -If you need to disable keep-alive connections, you can override the HTTP agent with your preferred https://nodejs.org/api/http.html#http_new_agent_options[HTTP agent options]: - -[source,js] ----- -const client = new Client({ - node: 'http://localhost:9200/', - // the function takes as parameter the option - // object passed to the Connection constructor - agent: (opts) => new CustomAgent() -}) ----- - -Or you can disable the HTTP agent entirely: - -[source,js] ----- -const client = new Client({ - node: 'http://localhost:9200/', - // Disable agent and keep-alive - agent: false -}) ----- - -[discrete] -[[close-connections]] -=== Closing a client's connections - -If you would like to close all open connections being managed by an instance of the client, use the `close()` function: - -[source,js] ----- -const client = new Client({ - node: 'http://localhost:9200/' -}); -client.close(); ----- - -[discrete] -[[product-check]] -=== Automatic product check - -Since v7.14.0, the client performs a required product check before the first call. -This pre-flight product check allows the client to establish the version of Elasticsearch -that it is communicating with. The product check requires one additional HTTP request to -be sent to the server as part of the request pipeline before the main API call is sent. -In most cases, this will succeed during the very first API call that the client sends. -Once the product check completes, no further product check HTTP requests are sent for -subsequent API calls. diff --git a/docs/docset.yml b/docs/docset.yml new file mode 100644 index 000000000..10fc0529d --- /dev/null +++ b/docs/docset.yml @@ -0,0 +1,488 @@ +project: 'Node.js client' +exclude: + - examples/proxy/README.md +cross_links: + - elasticsearch +toc: + - toc: reference + - toc: release-notes +subs: + ref: "https://www.elastic.co/guide/en/elasticsearch/reference/current" + ref-bare: "https://www.elastic.co/guide/en/elasticsearch/reference" + ref-8x: "https://www.elastic.co/guide/en/elasticsearch/reference/8.1" + ref-80: "https://www.elastic.co/guide/en/elasticsearch/reference/8.0" + ref-7x: "https://www.elastic.co/guide/en/elasticsearch/reference/7.17" + ref-70: "https://www.elastic.co/guide/en/elasticsearch/reference/7.0" + ref-60: "https://www.elastic.co/guide/en/elasticsearch/reference/6.0" + ref-64: "https://www.elastic.co/guide/en/elasticsearch/reference/6.4" + xpack-ref: "https://www.elastic.co/guide/en/x-pack/6.2" + logstash-ref: "https://www.elastic.co/guide/en/logstash/current" + kibana-ref: "https://www.elastic.co/guide/en/kibana/current" + kibana-ref-all: "https://www.elastic.co/guide/en/kibana" + beats-ref-root: "https://www.elastic.co/guide/en/beats" + beats-ref: "https://www.elastic.co/guide/en/beats/libbeat/current" + beats-ref-60: "https://www.elastic.co/guide/en/beats/libbeat/6.0" + beats-ref-63: "https://www.elastic.co/guide/en/beats/libbeat/6.3" + beats-devguide: "https://www.elastic.co/guide/en/beats/devguide/current" + auditbeat-ref: "https://www.elastic.co/guide/en/beats/auditbeat/current" + packetbeat-ref: "https://www.elastic.co/guide/en/beats/packetbeat/current" + metricbeat-ref: "https://www.elastic.co/guide/en/beats/metricbeat/current" + filebeat-ref: "https://www.elastic.co/guide/en/beats/filebeat/current" + functionbeat-ref: "https://www.elastic.co/guide/en/beats/functionbeat/current" + winlogbeat-ref: "https://www.elastic.co/guide/en/beats/winlogbeat/current" + heartbeat-ref: "https://www.elastic.co/guide/en/beats/heartbeat/current" + journalbeat-ref: "https://www.elastic.co/guide/en/beats/journalbeat/current" + ingest-guide: "https://www.elastic.co/guide/en/ingest/current" + fleet-guide: "https://www.elastic.co/guide/en/fleet/current" + apm-guide-ref: "https://www.elastic.co/guide/en/apm/guide/current" + apm-guide-7x: "https://www.elastic.co/guide/en/apm/guide/7.17" + apm-app-ref: "https://www.elastic.co/guide/en/kibana/current" + apm-agents-ref: "https://www.elastic.co/guide/en/apm/agent" + apm-android-ref: "https://www.elastic.co/guide/en/apm/agent/android/current" + apm-py-ref: "https://www.elastic.co/guide/en/apm/agent/python/current" + apm-py-ref-3x: "https://www.elastic.co/guide/en/apm/agent/python/3.x" + apm-node-ref-index: "https://www.elastic.co/guide/en/apm/agent/nodejs" + apm-node-ref: "https://www.elastic.co/guide/en/apm/agent/nodejs/current" + apm-node-ref-1x: "https://www.elastic.co/guide/en/apm/agent/nodejs/1.x" + apm-rum-ref: "https://www.elastic.co/guide/en/apm/agent/rum-js/current" + apm-ruby-ref: "https://www.elastic.co/guide/en/apm/agent/ruby/current" + apm-java-ref: "https://www.elastic.co/guide/en/apm/agent/java/current" + apm-go-ref: "https://www.elastic.co/guide/en/apm/agent/go/current" + apm-dotnet-ref: "https://www.elastic.co/guide/en/apm/agent/dotnet/current" + apm-php-ref: "https://www.elastic.co/guide/en/apm/agent/php/current" + apm-ios-ref: "https://www.elastic.co/guide/en/apm/agent/swift/current" + apm-lambda-ref: "https://www.elastic.co/guide/en/apm/lambda/current" + apm-attacher-ref: "https://www.elastic.co/guide/en/apm/attacher/current" + docker-logging-ref: "https://www.elastic.co/guide/en/beats/loggingplugin/current" + esf-ref: "https://www.elastic.co/guide/en/esf/current" + kinesis-firehose-ref: "https://www.elastic.co/guide/en/kinesis/%7B%7Bkinesis_version%7D%7D" + estc-welcome-current: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current" + estc-welcome: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current" + estc-welcome-all: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions" + hadoop-ref: "https://www.elastic.co/guide/en/elasticsearch/hadoop/current" + stack-ref: "https://www.elastic.co/guide/en/elastic-stack/current" + stack-ref-67: "https://www.elastic.co/guide/en/elastic-stack/6.7" + stack-ref-68: "https://www.elastic.co/guide/en/elastic-stack/6.8" + stack-ref-70: "https://www.elastic.co/guide/en/elastic-stack/7.0" + stack-ref-80: "https://www.elastic.co/guide/en/elastic-stack/8.0" + stack-ov: "https://www.elastic.co/guide/en/elastic-stack-overview/current" + stack-gs: "https://www.elastic.co/guide/en/elastic-stack-get-started/current" + stack-gs-current: "https://www.elastic.co/guide/en/elastic-stack-get-started/current" + javaclient: "https://www.elastic.co/guide/en/elasticsearch/client/java-api/current" + java-api-client: "https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current" + java-rest: "https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current" + jsclient: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current" + jsclient-current: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current" + es-ruby-client: "https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current" + es-dotnet-client: "https://www.elastic.co/guide/en/elasticsearch/client/net-api/current" + es-php-client: "https://www.elastic.co/guide/en/elasticsearch/client/php-api/current" + es-python-client: "https://www.elastic.co/guide/en/elasticsearch/client/python-api/current" + defguide: "https://www.elastic.co/guide/en/elasticsearch/guide/2.x" + painless: "https://www.elastic.co/guide/en/elasticsearch/painless/current" + plugins: "https://www.elastic.co/guide/en/elasticsearch/plugins/current" + plugins-8x: "https://www.elastic.co/guide/en/elasticsearch/plugins/8.1" + plugins-7x: "https://www.elastic.co/guide/en/elasticsearch/plugins/7.17" + plugins-6x: "https://www.elastic.co/guide/en/elasticsearch/plugins/6.8" + glossary: "https://www.elastic.co/guide/en/elastic-stack-glossary/current" + upgrade_guide: "https://www.elastic.co/products/upgrade_guide" + blog-ref: "https://www.elastic.co/blog/" + curator-ref: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current" + curator-ref-current: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current" + metrics-ref: "https://www.elastic.co/guide/en/metrics/current" + metrics-guide: "https://www.elastic.co/guide/en/metrics/guide/current" + logs-ref: "https://www.elastic.co/guide/en/logs/current" + logs-guide: "https://www.elastic.co/guide/en/logs/guide/current" + uptime-guide: "https://www.elastic.co/guide/en/uptime/current" + observability-guide: "https://www.elastic.co/guide/en/observability/current" + observability-guide-all: "https://www.elastic.co/guide/en/observability" + siem-guide: "https://www.elastic.co/guide/en/siem/guide/current" + security-guide: "https://www.elastic.co/guide/en/security/current" + security-guide-all: "https://www.elastic.co/guide/en/security" + endpoint-guide: "https://www.elastic.co/guide/en/endpoint/current" + sql-odbc: "https://www.elastic.co/guide/en/elasticsearch/sql-odbc/current" + ecs-ref: "https://www.elastic.co/guide/en/ecs/current" + ecs-logging-ref: "https://www.elastic.co/guide/en/ecs-logging/overview/current" + ecs-logging-go-logrus-ref: "https://www.elastic.co/guide/en/ecs-logging/go-logrus/current" + ecs-logging-go-zap-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current" + ecs-logging-go-zerolog-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current" + ecs-logging-java-ref: "https://www.elastic.co/guide/en/ecs-logging/java/current" + ecs-logging-dotnet-ref: "https://www.elastic.co/guide/en/ecs-logging/dotnet/current" + ecs-logging-nodejs-ref: "https://www.elastic.co/guide/en/ecs-logging/nodejs/current" + ecs-logging-php-ref: "https://www.elastic.co/guide/en/ecs-logging/php/current" + ecs-logging-python-ref: "https://www.elastic.co/guide/en/ecs-logging/python/current" + ecs-logging-ruby-ref: "https://www.elastic.co/guide/en/ecs-logging/ruby/current" + ml-docs: "https://www.elastic.co/guide/en/machine-learning/current" + eland-docs: "https://www.elastic.co/guide/en/elasticsearch/client/eland/current" + eql-ref: "https://eql.readthedocs.io/en/latest/query-guide" + extendtrial: "https://www.elastic.co/trialextension" + wikipedia: "https://en.wikipedia.org/wiki" + forum: "https://discuss.elastic.co/" + xpack-forum: "https://discuss.elastic.co/c/50-x-pack" + security-forum: "https://discuss.elastic.co/c/x-pack/shield" + watcher-forum: "https://discuss.elastic.co/c/x-pack/watcher" + monitoring-forum: "https://discuss.elastic.co/c/x-pack/marvel" + graph-forum: "https://discuss.elastic.co/c/x-pack/graph" + apm-forum: "https://discuss.elastic.co/c/apm" + enterprise-search-ref: "https://www.elastic.co/guide/en/enterprise-search/current" + app-search-ref: "https://www.elastic.co/guide/en/app-search/current" + workplace-search-ref: "https://www.elastic.co/guide/en/workplace-search/current" + enterprise-search-node-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/enterprise-search-node/current" + enterprise-search-php-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/php/current" + enterprise-search-python-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/python/current" + enterprise-search-ruby-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/ruby/current" + elastic-maps-service: "https://maps.elastic.co/" + integrations-docs: "https://docs.elastic.co/en/integrations" + integrations-devguide: "https://www.elastic.co/guide/en/integrations-developer/current" + time-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units" + byte-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units" + apm-py-ref-v: "https://www.elastic.co/guide/en/apm/agent/python/current" + apm-node-ref-v: "https://www.elastic.co/guide/en/apm/agent/nodejs/current" + apm-rum-ref-v: "https://www.elastic.co/guide/en/apm/agent/rum-js/current" + apm-ruby-ref-v: "https://www.elastic.co/guide/en/apm/agent/ruby/current" + apm-java-ref-v: "https://www.elastic.co/guide/en/apm/agent/java/current" + apm-go-ref-v: "https://www.elastic.co/guide/en/apm/agent/go/current" + apm-ios-ref-v: "https://www.elastic.co/guide/en/apm/agent/swift/current" + apm-dotnet-ref-v: "https://www.elastic.co/guide/en/apm/agent/dotnet/current" + apm-php-ref-v: "https://www.elastic.co/guide/en/apm/agent/php/current" + ecloud: "Elastic Cloud" + esf: "Elastic Serverless Forwarder" + ess: "Elasticsearch Service" + ece: "Elastic Cloud Enterprise" + eck: "Elastic Cloud on Kubernetes" + serverless-full: "Elastic Cloud Serverless" + serverless-short: "Serverless" + es-serverless: "Elasticsearch Serverless" + es3: "Elasticsearch Serverless" + obs-serverless: "Elastic Observability Serverless" + sec-serverless: "Elastic Security Serverless" + serverless-docs: "https://docs.elastic.co/serverless" + cloud: "https://www.elastic.co/guide/en/cloud/current" + ess-utm-params: "?page=docs&placement=docs-body" + ess-baymax: "?page=docs&placement=docs-body" + ess-trial: "https://cloud.elastic.co/registration?page=docs&placement=docs-body" + ess-product: "https://www.elastic.co/cloud/elasticsearch-service?page=docs&placement=docs-body" + ess-console: "https://cloud.elastic.co/?page=docs&placement=docs-body" + ess-console-name: "Elasticsearch Service Console" + ess-deployments: "https://cloud.elastic.co/deployments?page=docs&placement=docs-body" + ece-ref: "https://www.elastic.co/guide/en/cloud-enterprise/current" + eck-ref: "https://www.elastic.co/guide/en/cloud-on-k8s/current" + ess-leadin: "You can run Elasticsearch on your own hardware or use our hosted Elasticsearch Service that is available on AWS, GCP, and Azure. https://cloud.elastic.co/registration{ess-utm-params}[Try the Elasticsearch Service for free]." + ess-leadin-short: "Our hosted Elasticsearch Service is available on AWS, GCP, and Azure, and you can https://cloud.elastic.co/registration{ess-utm-params}[try it for free]." + ess-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elasticsearch Service\"]" + ece-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud_ece.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elastic Cloud Enterprise\"]" + cloud-only: "This feature is designed for indirect use by https://cloud.elastic.co/registration{ess-utm-params}[Elasticsearch Service], https://www.elastic.co/guide/en/cloud-enterprise/{ece-version-link}[Elastic Cloud Enterprise], and https://www.elastic.co/guide/en/cloud-on-k8s/current[Elastic Cloud on Kubernetes]. Direct use is not supported." + ess-setting-change: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"{ess-trial}\", title=\"Supported on {ess}\"] indicates a change to a supported https://www.elastic.co/guide/en/cloud/current/ec-add-user-settings.html[user setting] for Elasticsearch Service." + ess-skip-section: "If you use Elasticsearch Service, skip this section. Elasticsearch Service handles these changes for you." + api-cloud: "https://www.elastic.co/docs/api/doc/cloud" + api-ece: "https://www.elastic.co/docs/api/doc/cloud-enterprise" + api-kibana-serverless: "https://www.elastic.co/docs/api/doc/serverless" + es-feature-flag: "This feature is in development and not yet available for use. This documentation is provided for informational purposes only." + es-ref-dir: "'{{elasticsearch-root}}/docs/reference'" + apm-app: "APM app" + uptime-app: "Uptime app" + synthetics-app: "Synthetics app" + logs-app: "Logs app" + metrics-app: "Metrics app" + infrastructure-app: "Infrastructure app" + siem-app: "SIEM app" + security-app: "Elastic Security app" + ml-app: "Machine Learning" + dev-tools-app: "Dev Tools" + ingest-manager-app: "Ingest Manager" + stack-manage-app: "Stack Management" + stack-monitor-app: "Stack Monitoring" + alerts-ui: "Alerts and Actions" + rules-ui: "Rules" + rac-ui: "Rules and Connectors" + connectors-ui: "Connectors" + connectors-feature: "Actions and Connectors" + stack-rules-feature: "Stack Rules" + user-experience: "User Experience" + ems: "Elastic Maps Service" + ems-init: "EMS" + hosted-ems: "Elastic Maps Server" + ipm-app: "Index Pattern Management" + ingest-pipelines: "ingest pipelines" + ingest-pipelines-app: "Ingest Pipelines" + ingest-pipelines-cap: "Ingest pipelines" + ls-pipelines: "Logstash pipelines" + ls-pipelines-app: "Logstash Pipelines" + maint-windows: "maintenance windows" + maint-windows-app: "Maintenance Windows" + maint-windows-cap: "Maintenance windows" + custom-roles-app: "Custom Roles" + data-source: "data view" + data-sources: "data views" + data-source-caps: "Data View" + data-sources-caps: "Data Views" + data-source-cap: "Data view" + data-sources-cap: "Data views" + project-settings: "Project settings" + manage-app: "Management" + index-manage-app: "Index Management" + data-views-app: "Data Views" + rules-app: "Rules" + saved-objects-app: "Saved Objects" + tags-app: "Tags" + api-keys-app: "API keys" + transforms-app: "Transforms" + connectors-app: "Connectors" + files-app: "Files" + reports-app: "Reports" + maps-app: "Maps" + alerts-app: "Alerts" + crawler: "Enterprise Search web crawler" + ents: "Enterprise Search" + app-search-crawler: "App Search web crawler" + agent: "Elastic Agent" + agents: "Elastic Agents" + fleet: "Fleet" + fleet-server: "Fleet Server" + integrations-server: "Integrations Server" + ingest-manager: "Ingest Manager" + ingest-management: "ingest management" + package-manager: "Elastic Package Manager" + integrations: "Integrations" + package-registry: "Elastic Package Registry" + artifact-registry: "Elastic Artifact Registry" + aws: "AWS" + stack: "Elastic Stack" + xpack: "X-Pack" + es: "Elasticsearch" + kib: "Kibana" + esms: "Elastic Stack Monitoring Service" + esms-init: "ESMS" + ls: "Logstash" + beats: "Beats" + auditbeat: "Auditbeat" + filebeat: "Filebeat" + heartbeat: "Heartbeat" + metricbeat: "Metricbeat" + packetbeat: "Packetbeat" + winlogbeat: "Winlogbeat" + functionbeat: "Functionbeat" + journalbeat: "Journalbeat" + es-sql: "Elasticsearch SQL" + esql: "ES|QL" + elastic-agent: "Elastic Agent" + k8s: "Kubernetes" + log-driver-long: "Elastic Logging Plugin for Docker" + security: "X-Pack security" + security-features: "security features" + operator-feature: "operator privileges feature" + es-security-features: "Elasticsearch security features" + stack-security-features: "Elastic Stack security features" + endpoint-sec: "Endpoint Security" + endpoint-cloud-sec: "Endpoint and Cloud Security" + elastic-defend: "Elastic Defend" + elastic-sec: "Elastic Security" + elastic-endpoint: "Elastic Endpoint" + swimlane: "Swimlane" + sn: "ServiceNow" + sn-itsm: "ServiceNow ITSM" + sn-itom: "ServiceNow ITOM" + sn-sir: "ServiceNow SecOps" + jira: "Jira" + ibm-r: "IBM Resilient" + webhook: "Webhook" + webhook-cm: "Webhook - Case Management" + opsgenie: "Opsgenie" + bedrock: "Amazon Bedrock" + gemini: "Google Gemini" + hive: "TheHive" + monitoring: "X-Pack monitoring" + monitor-features: "monitoring features" + stack-monitor-features: "Elastic Stack monitoring features" + watcher: "Watcher" + alert-features: "alerting features" + reporting: "X-Pack reporting" + report-features: "reporting features" + graph: "X-Pack graph" + graph-features: "graph analytics features" + searchprofiler: "Search Profiler" + xpackml: "X-Pack machine learning" + ml: "machine learning" + ml-cap: "Machine learning" + ml-init: "ML" + ml-features: "machine learning features" + stack-ml-features: "Elastic Stack machine learning features" + ccr: "cross-cluster replication" + ccr-cap: "Cross-cluster replication" + ccr-init: "CCR" + ccs: "cross-cluster search" + ccs-cap: "Cross-cluster search" + ccs-init: "CCS" + ilm: "index lifecycle management" + ilm-cap: "Index lifecycle management" + ilm-init: "ILM" + dlm: "data lifecycle management" + dlm-cap: "Data lifecycle management" + dlm-init: "DLM" + search-snap: "searchable snapshot" + search-snaps: "searchable snapshots" + search-snaps-cap: "Searchable snapshots" + slm: "snapshot lifecycle management" + slm-cap: "Snapshot lifecycle management" + slm-init: "SLM" + rollup-features: "data rollup features" + ipm: "index pattern management" + ipm-cap: "Index pattern" + rollup: "rollup" + rollup-cap: "Rollup" + rollups: "rollups" + rollups-cap: "Rollups" + rollup-job: "rollup job" + rollup-jobs: "rollup jobs" + rollup-jobs-cap: "Rollup jobs" + dfeed: "datafeed" + dfeeds: "datafeeds" + dfeed-cap: "Datafeed" + dfeeds-cap: "Datafeeds" + ml-jobs: "machine learning jobs" + ml-jobs-cap: "Machine learning jobs" + anomaly-detect: "anomaly detection" + anomaly-detect-cap: "Anomaly detection" + anomaly-job: "anomaly detection job" + anomaly-jobs: "anomaly detection jobs" + anomaly-jobs-cap: "Anomaly detection jobs" + dataframe: "data frame" + dataframes: "data frames" + dataframe-cap: "Data frame" + dataframes-cap: "Data frames" + watcher-transform: "payload transform" + watcher-transforms: "payload transforms" + watcher-transform-cap: "Payload transform" + watcher-transforms-cap: "Payload transforms" + transform: "transform" + transforms: "transforms" + transform-cap: "Transform" + transforms-cap: "Transforms" + dataframe-transform: "transform" + dataframe-transform-cap: "Transform" + dataframe-transforms: "transforms" + dataframe-transforms-cap: "Transforms" + dfanalytics-cap: "Data frame analytics" + dfanalytics: "data frame analytics" + dataframe-analytics-config: "'{dataframe} analytics config'" + dfanalytics-job: "'{dataframe} analytics job'" + dfanalytics-jobs: "'{dataframe} analytics jobs'" + dfanalytics-jobs-cap: "'{dataframe-cap} analytics jobs'" + cdataframe: "continuous data frame" + cdataframes: "continuous data frames" + cdataframe-cap: "Continuous data frame" + cdataframes-cap: "Continuous data frames" + cdataframe-transform: "continuous transform" + cdataframe-transforms: "continuous transforms" + cdataframe-transforms-cap: "Continuous transforms" + ctransform: "continuous transform" + ctransform-cap: "Continuous transform" + ctransforms: "continuous transforms" + ctransforms-cap: "Continuous transforms" + oldetection: "outlier detection" + oldetection-cap: "Outlier detection" + olscore: "outlier score" + olscores: "outlier scores" + fiscore: "feature influence score" + evaluatedf-api: "evaluate {dataframe} analytics API" + evaluatedf-api-cap: "Evaluate {dataframe} analytics API" + binarysc: "binary soft classification" + binarysc-cap: "Binary soft classification" + regression: "regression" + regression-cap: "Regression" + reganalysis: "regression analysis" + reganalysis-cap: "Regression analysis" + depvar: "dependent variable" + feature-var: "feature variable" + feature-vars: "feature variables" + feature-vars-cap: "Feature variables" + classification: "classification" + classification-cap: "Classification" + classanalysis: "classification analysis" + classanalysis-cap: "Classification analysis" + infer-cap: "Inference" + infer: "inference" + lang-ident-cap: "Language identification" + lang-ident: "language identification" + data-viz: "Data Visualizer" + file-data-viz: "File Data Visualizer" + feat-imp: "feature importance" + feat-imp-cap: "Feature importance" + nlp: "natural language processing" + nlp-cap: "Natural language processing" + apm-agent: "APM agent" + apm-go-agent: "Elastic APM Go agent" + apm-go-agents: "Elastic APM Go agents" + apm-ios-agent: "Elastic APM iOS agent" + apm-ios-agents: "Elastic APM iOS agents" + apm-java-agent: "Elastic APM Java agent" + apm-java-agents: "Elastic APM Java agents" + apm-dotnet-agent: "Elastic APM .NET agent" + apm-dotnet-agents: "Elastic APM .NET agents" + apm-node-agent: "Elastic APM Node.js agent" + apm-node-agents: "Elastic APM Node.js agents" + apm-php-agent: "Elastic APM PHP agent" + apm-php-agents: "Elastic APM PHP agents" + apm-py-agent: "Elastic APM Python agent" + apm-py-agents: "Elastic APM Python agents" + apm-ruby-agent: "Elastic APM Ruby agent" + apm-ruby-agents: "Elastic APM Ruby agents" + apm-rum-agent: "Elastic APM Real User Monitoring (RUM) JavaScript agent" + apm-rum-agents: "Elastic APM RUM JavaScript agents" + apm-lambda-ext: "Elastic APM AWS Lambda extension" + project-monitors: "project monitors" + project-monitors-cap: "Project monitors" + private-location: "Private Location" + private-locations: "Private Locations" + pwd: "YOUR_PASSWORD" + esh: "ES-Hadoop" + default-dist: "default distribution" + oss-dist: "OSS-only distribution" + observability: "Observability" + api-request-title: "Request" + api-prereq-title: "Prerequisites" + api-description-title: "Description" + api-path-parms-title: "Path parameters" + api-query-parms-title: "Query parameters" + api-request-body-title: "Request body" + api-response-codes-title: "Response codes" + api-response-body-title: "Response body" + api-example-title: "Example" + api-examples-title: "Examples" + api-definitions-title: "Properties" + multi-arg: "†footnoteref:[multi-arg,This parameter accepts multiple arguments.]" + multi-arg-ref: "†footnoteref:[multi-arg]" + yes-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png[Yes,20,15]" + no-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png[No,20,15]" + es-repo: "https://github.com/elastic/elasticsearch/" + es-issue: "https://github.com/elastic/elasticsearch/issues/" + es-pull: "https://github.com/elastic/elasticsearch/pull/" + es-commit: "https://github.com/elastic/elasticsearch/commit/" + kib-repo: "https://github.com/elastic/kibana/" + kib-issue: "https://github.com/elastic/kibana/issues/" + kibana-issue: "'{kib-repo}issues/'" + kib-pull: "https://github.com/elastic/kibana/pull/" + kibana-pull: "'{kib-repo}pull/'" + kib-commit: "https://github.com/elastic/kibana/commit/" + ml-repo: "https://github.com/elastic/ml-cpp/" + ml-issue: "https://github.com/elastic/ml-cpp/issues/" + ml-pull: "https://github.com/elastic/ml-cpp/pull/" + ml-commit: "https://github.com/elastic/ml-cpp/commit/" + apm-repo: "https://github.com/elastic/apm-server/" + apm-issue: "https://github.com/elastic/apm-server/issues/" + apm-pull: "https://github.com/elastic/apm-server/pull/" + kibana-blob: "https://github.com/elastic/kibana/blob/current/" + apm-get-started-ref: "https://www.elastic.co/guide/en/apm/get-started/current" + apm-server-ref: "https://www.elastic.co/guide/en/apm/server/current" + apm-server-ref-v: "https://www.elastic.co/guide/en/apm/server/current" + apm-server-ref-m: "https://www.elastic.co/guide/en/apm/server/master" + apm-server-ref-62: "https://www.elastic.co/guide/en/apm/server/6.2" + apm-server-ref-64: "https://www.elastic.co/guide/en/apm/server/6.4" + apm-server-ref-70: "https://www.elastic.co/guide/en/apm/server/7.0" + apm-overview-ref-v: "https://www.elastic.co/guide/en/apm/get-started/current" + apm-overview-ref-70: "https://www.elastic.co/guide/en/apm/get-started/7.0" + apm-overview-ref-m: "https://www.elastic.co/guide/en/apm/get-started/master" + infra-guide: "https://www.elastic.co/guide/en/infrastructure/guide/current" + a-data-source: "a data view" + icon-bug: "pass:[]" + icon-checkInCircleFilled: "pass:[]" + icon-warningFilled: "pass:[]" diff --git a/docs/examples/index.asciidoc b/docs/examples/index.asciidoc deleted file mode 100644 index e786675ec..000000000 --- a/docs/examples/index.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[examples]] -== Examples - -Following you can find some examples on how to use the client. - -* Use of the <> parameter; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Use of the <> parameter; -* Executing a <> request; -* How do I <>? -* Executing a <> request; -* I need <>; -* How to use the <> method; - -include::asStream.asciidoc[] -include::bulk.asciidoc[] -include::exists.asciidoc[] -include::get.asciidoc[] -include::ignore.asciidoc[] -include::msearch.asciidoc[] -include::scroll.asciidoc[] -include::search.asciidoc[] -include::suggest.asciidoc[] -include::transport.request.asciidoc[] -include::sql.query.asciidoc[] -include::update.asciidoc[] -include::update_by_query.asciidoc[] -include::reindex.asciidoc[] diff --git a/docs/getting-started.asciidoc b/docs/getting-started.asciidoc deleted file mode 100644 index d272d1302..000000000 --- a/docs/getting-started.asciidoc +++ /dev/null @@ -1,170 +0,0 @@ -[[getting-started-js]] -== Getting started - -This page guides you through the installation process of the Node.js client, -shows you how to instantiate the client, and how to perform basic Elasticsearch -operations with it. - -[discrete] -=== Requirements - -* https://nodejs.org/[Node.js] version 14.x or newer -* https://docs.npmjs.com/downloading-and-installing-node-js-and-npm[`npm`], usually bundled with Node.js - -[discrete] -=== Installation - -To install the latest version of the client, run the following command: - -[source,shell] --------------------------- -npm install @elastic/elasticsearch --------------------------- - -Refer to the <> page to learn more. - - -[discrete] -=== Connecting - -You can connect to the Elastic Cloud using an API key and the Elasticsearch -endpoint. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://.../', // Elasticsearch endpoint - auth: { - apiKey: { // API key ID and secret - id: 'foo', - api_key: 'bar', - } - } -}) ----- - -Your Elasticsearch endpoint can be found on the **My deployment** page of your -deployment: - -image::images/es-endpoint.jpg[alt="Finding Elasticsearch endpoint",align="center"] - -You can generate an API key on the **Management** page under Security. - -image::images/create-api-key.png[alt="Create API key",align="center"] - -For other connection options, refer to the <> section. - - -[discrete] -=== Operations - -Time to use Elasticsearch! This section walks you through the basic, and most -important, operations of Elasticsearch. - - -[discrete] -==== Creating an index - -This is how you create the `my_index` index: - -[source,js] ----- -await client.indices.create({ index: 'my_index' }) ----- - - -[discrete] -==== Indexing documents - -This is a simple way of indexing a document: - -[source,js] ----- -await client.index({ - index: 'my_index', - id: 'my_document_id', - document: { - foo: 'foo', - bar: 'bar', - }, -}) ----- - - -[discrete] -==== Getting documents - -You can get documents by using the following code: - -[source,js] ----- -await client.get({ - index: 'my_index', - id: 'my_document_id', -}) ----- - - -[discrete] -==== Searching documents - -This is how you can create a single match query with the client: - -[source,js] ----- -await client.search({ - query: { - match: { - foo: 'foo' - } - } -}) ----- - - -[discrete] -==== Updating documents - -This is how you can update a document, for example to add a new field: - -[source,js] ----- -await client.update({ - index: 'my_index', - id: 'my_document_id', - doc: { - foo: 'bar', - new_field: 'new value' - } -}) ----- - - -[discrete] -==== Deleting documents - -[source,js] ----- -await client.delete({ - index: 'my_index', - id: 'my_document_id', -}) ----- - - -[discrete] -==== Deleting an index - -[source,js] ----- -await client.indices.delete({ index: 'my_index' }) ----- - - -[discrete] -== Further reading - -* Use <> for a more comfortable experience with the APIs. -* For an elaborate example of how to ingest data into Elastic Cloud, -refer to {cloud}/ec-getting-started-node-js.html[this page]. diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc deleted file mode 100644 index cb60dbc51..000000000 --- a/docs/helpers.asciidoc +++ /dev/null @@ -1,748 +0,0 @@ -[[client-helpers]] -== Client helpers - -The client comes with an handy collection of helpers to give you a more -comfortable experience with some APIs. - -CAUTION: The client helpers are experimental, and the API may change in the next -minor releases. The helpers will not work in any Node.js version lower than 10. - - -[discrete] -[[bulk-helper]] -=== Bulk helper - -~Added~ ~in~ ~`v7.7.0`~ - -Running bulk requests can be complex due to the shape of the API, this helper -aims to provide a nicer developer experience around the Bulk API. - - -[discrete] -==== Usage - -[source,js] ----- -const { createReadStream } = require('fs') -const split = require('split2') -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) -const result = await client.helpers.bulk({ - datasource: createReadStream('./dataset.ndjson').pipe(split()), - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - } -}) - -console.log(result) -// { -// total: number, -// failed: number, -// retry: number, -// successful: number, -// time: number, -// bytes: number, -// aborted: boolean -// } ----- - -To create a new instance of the Bulk helper, access it as shown in the example -above, the configuration options are: -[cols=2*] -|=== -|`datasource` -a|An array, async generator or a readable stream with the data you need to index/create/update/delete. -It can be an array of strings or objects, but also a stream of json strings or JavaScript objects. + -If it is a stream, we recommend to use the https://www.npmjs.com/package/split2[`split2`] package, that splits the stream on new lines delimiters. + -This parameter is mandatory. -[source,js] ----- -const { createReadStream } = require('fs') -const split = require('split2') -const b = client.helpers.bulk({ - // if you just use split(), the data will be used as array of strings - datasource: createReadStream('./dataset.ndjson').pipe(split()) - // if you need to manipulate the data, you can pass JSON.parse to split - datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse)) -}) ----- - -|`onDocument` -a|A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the link:{ref}/docs-bulk.html[Bulk API documentation] to see the supported operations. + -This parameter is mandatory. -[source,js] ----- -const b = client.helpers.bulk({ - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - } -}) ----- - -|`onDrop` -a|A function that is called for everytime a document can't be indexed and it has reached the maximum amount of retries. -[source,js] ----- -const b = client.helpers.bulk({ - onDrop (doc) { - console.log(doc) - } -}) ----- - -|`onSuccess` -a|A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations. -[source,js] ----- -const b = client.helpers.bulk({ - onSuccess ({ result, document }) { - console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`) - } -}) ----- - -|`flushBytes` -a|The size of the bulk body in bytes to reach before to send it. Default of 5MB. + -_Default:_ `5000000` -[source,js] ----- -const b = client.helpers.bulk({ - flushBytes: 1000000 -}) ----- - -|`flushInterval` -a|How much time (in milliseconds) the helper waits before flushing the body from the last document read. + -_Default:_ `30000` -[source,js] ----- -const b = client.helpers.bulk({ - flushInterval: 30000 -}) ----- - -|`concurrency` -a|How many request is executed at the same time. + -_Default:_ `5` -[source,js] ----- -const b = client.helpers.bulk({ - concurrency: 10 -}) ----- - -|`retries` -a|How many times a document is retried before to call the `onDrop` callback. + -_Default:_ Client max retries. -[source,js] ----- -const b = client.helpers.bulk({ - retries: 3 -}) ----- - -|`wait` -a|How much time to wait before retries in milliseconds. + -_Default:_ 5000. -[source,js] ----- -const b = client.helpers.bulk({ - wait: 3000 -}) ----- - -|`refreshOnCompletion` -a|If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices. + -_Default:_ false. -[source,js] ----- -const b = client.helpers.bulk({ - refreshOnCompletion: true - // or - refreshOnCompletion: 'index-name' -}) ----- - -|=== - - -[discrete] -==== Supported operations - - -[discrete] -===== Index - -[source,js] ----- -client.helpers.bulk({ - datasource: myDatasource, - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - } -}) ----- - - -[discrete] -===== Create - -[source,js] ----- -client.helpers.bulk({ - datasource: myDatasource, - onDocument (doc) { - return { - create: { _index: 'my-index', _id: doc.id } - } - } -}) ----- - - -[discrete] -===== Update - -[source,js] ----- -client.helpers.bulk({ - datasource: myDatasource, - onDocument (doc) { - // Note that the update operation requires you to return - // an array, where the first element is the action, while - // the second are the document option - return [ - { update: { _index: 'my-index', _id: doc.id } }, - { doc_as_upsert: true } - ] - } -}) ----- - - -[discrete] -===== Delete - -[source,js] ----- -client.helpers.bulk({ - datasource: myDatasource, - onDocument (doc) { - return { - delete: { _index: 'my-index', _id: doc.id } - } - } -}) ----- - - -[discrete] -==== Abort a bulk operation - -If needed, you can abort a bulk operation at any time. The bulk helper returns a -https://promisesaplus.com/[thenable], which has an `abort` method. - -NOTE: The abort method stops the execution of the bulk operation, but if you -are using a concurrency higher than one, the operations that are already running -will not be stopped. - -[source,js] ----- -const { createReadStream } = require('fs') -const split = require('split2') -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) -const b = client.helpers.bulk({ - datasource: createReadStream('./dataset.ndjson').pipe(split()), - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - }, - onDrop (doc) { - b.abort() - } -}) - -console.log(await b) ----- - - -[discrete] -==== Passing custom options to the Bulk API - -You can pass any option supported by the link: -{ref}/docs-bulk.html#docs-bulk-api-query-params[Bulk API] to the helper, and the -helper uses those options in conjunction with the Bulk API call. - -[source,js] ----- -const result = await client.helpers.bulk({ - datasource: [...], - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - }, - pipeline: 'my-pipeline' -}) ----- - - -[discrete] -==== Usage with an async generator - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -async function * generator () { - const dataset = [ - { user: 'jon', age: 23 }, - { user: 'arya', age: 18 }, - { user: 'tyrion', age: 39 } - ] - for (const doc of dataset) { - yield doc - } -} - -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) -const result = await client.helpers.bulk({ - datasource: generator(), - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - } -}) - -console.log(result) ----- - -[discrete] -==== Modifying a document before operation - -~Added~ ~in~ ~`v8.8.2`~ - -If you need to modify documents in your datasource before it is sent to Elasticsearch, you can return an array in the `onDocument` function rather than an operation object. The first item in the array must be the operation object, and the second item must be the document or partial document object as you'd like it to be sent to Elasticsearch. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) -const result = await client.helpers.bulk({ - datasource: [...], - onDocument (doc) { - return [ - { index: { _index: 'my-index' } }, - { ...doc, favorite_color: 'mauve' }, - ] - } -}) - -console.log(result) ----- - -[discrete] -[[multi-search-helper]] -=== Multi search helper - -~Added~ ~in~ ~`v7.8.0`~ - -If you send search request at a high rate, this helper might be useful -for you. It uses the multi search API under the hood to batch the requests -and improve the overall performances of your application. The `result` exposes a -`documents` property as well, which allows you to access directly the hits -sources. - - -[discrete] -==== Usage - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) -const m = client.helpers.msearch() - -m.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'javascript' } } } - ) - .then(result => console.log(result.body)) // or result.documents - .catch(err => console.error(err)) ----- - -To create a new instance of the multi search (msearch) helper, you should access -it as shown in the example above, the configuration options are: -[cols=2*] -|=== -|`operations` -a|How many search operations should be sent in a single msearch request. + -_Default:_ `5` -[source,js] ----- -const m = client.helpers.msearch({ - operations: 10 -}) ----- - -|`flushInterval` -a|How much time (in milliseconds) the helper waits before flushing the operations from the last operation read. + -_Default:_ `500` -[source,js] ----- -const m = client.helpers.msearch({ - flushInterval: 500 -}) ----- - -|`concurrency` -a|How many request is executed at the same time. + -_Default:_ `5` -[source,js] ----- -const m = client.helpers.msearch({ - concurrency: 10 -}) ----- - -|`retries` -a|How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error. + -_Default:_ Client max retries. -[source,js] ----- -const m = client.helpers.msearch({ - retries: 3 -}) ----- - -|`wait` -a|How much time to wait before retries in milliseconds. + -_Default:_ 5000. -[source,js] ----- -const m = client.helpers.msearch({ - wait: 3000 -}) ----- - -|=== - - -[discrete] -==== Stopping the msearch helper - -If needed, you can stop an msearch processor at any time. The msearch helper -returns a https://promisesaplus.com/[thenable], which has an `stop` method. - -If you are creating multiple msearch helpers instances and using them for a -limitied period of time, remember to always use the `stop` method once you have -finished using them, otherwise your application will start leaking memory. - -The `stop` method accepts an optional error, that will be dispatched every -subsequent search request. - -NOTE: The stop method stops the execution of the msearch processor, but if -you are using a concurrency higher than one, the operations that are already -running will not be stopped. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' } -}) -const m = client.helpers.msearch() - -m.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'javascript' } } } - ) - .then(result => console.log(result.body)) - .catch(err => console.error(err)) - -m.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'ruby' } } } - ) - .then(result => console.log(result.body)) - .catch(err => console.error(err)) - -setImmediate(() => m.stop()) ----- - - -[discrete] -[[search-helper]] -=== Search helper - -~Added~ ~in~ ~`v7.7.0`~ - -A simple wrapper around the search API. Instead of returning the entire `result` -object it returns only the search documents source. For improving the -performances, this helper automatically adds `filter_path=hits.hits._source` to -the query string. - -[source,js] ----- -const documents = await client.helpers.search({ - index: 'stackoverflow', - query: { - match: { - title: 'javascript' - } - } -}) - -for (const doc of documents) { - console.log(doc) -} ----- - - -[discrete] -[[scroll-search-helper]] -=== Scroll search helper - -~Added~ ~in~ ~`v7.7.0`~ - -This helpers offers a simple and intuitive way to use the scroll search API. -Once called, it returns an -https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function[async iterator] -which can be used in conjuction with a for-await...of. It handles automatically -the `429` error and uses the `maxRetries` option of the client. - -[source,js] ----- -const scrollSearch = client.helpers.scrollSearch({ - index: 'stackoverflow', - query: { - match: { - title: 'javascript' - } - } -}) - -for await (const result of scrollSearch) { - console.log(result) -} ----- - - -[discrete] -==== Clear a scroll search - -If needed, you can clear a scroll search by calling `result.clear()`: - -[source,js] ----- -for await (const result of scrollSearch) { - if (condition) { - await result.clear() - } -} ----- - - -[discrete] -==== Quickly getting the documents - -If you only need the documents from the result of a scroll search, you can -access them via `result.documents`: - -[source,js] ----- -for await (const result of scrollSearch) { - console.log(result.documents) -} ----- - - -[discrete] -[[scroll-documents-helper]] -=== Scroll documents helper - -~Added~ ~in~ ~`v7.7.0`~ - -It works in the same way as the scroll search helper, but it returns only the -documents instead. Note, every loop cycle returns a single document, and you -can't use the `clear` method. For improving the performances, this helper -automatically adds `filter_path=hits.hits._source` to the query string. - -[source,js] ----- -const scrollSearch = client.helpers.scrollDocuments({ - index: 'stackoverflow', - query: { - match: { - title: 'javascript' - } - } -}) - -for await (const doc of scrollSearch) { - console.log(doc) -} ----- - -[discrete] -[[esql-helper]] -=== ES|QL helper - -ES|QL queries can return their results in {ref}/esql-rest.html#esql-rest-format[several formats]. -The default JSON format returned by ES|QL queries contains arrays of values -for each row, with column names and types returned separately: - -[discrete] -==== Usage - -[discrete] -===== `toRecords` - -~Added~ ~in~ ~`v8.14.0`~ - -The default JSON format returned by ES|QL queries contains arrays of values -for each row, with column names and types returned separately: - -[source,json] ----- -{ - "columns": [ - { "name": "@timestamp", "type": "date" }, - { "name": "client_ip", "type": "ip" }, - { "name": "event_duration", "type": "long" }, - { "name": "message", "type": "keyword" } - ], - "values": [ - [ - "2023-10-23T12:15:03.360Z", - "172.21.2.162", - 3450233, - "Connected to 10.1.0.3" - ], - [ - "2023-10-23T12:27:28.948Z", - "172.21.2.113", - 2764889, - "Connected to 10.1.0.2" - ] - ] -} ----- - -In many cases, it's preferable to operate on an array of objects, one object per row, -rather than an array of arrays. The ES|QL `toRecords` helper converts row data into objects. - -[source,js] ----- -await client.helpers - .esql({ query: 'FROM sample_data | LIMIT 2' }) - .toRecords() -// => -// { -// "columns": [ -// { "name": "@timestamp", "type": "date" }, -// { "name": "client_ip", "type": "ip" }, -// { "name": "event_duration", "type": "long" }, -// { "name": "message", "type": "keyword" } -// ], -// "records": [ -// { -// "@timestamp": "2023-10-23T12:15:03.360Z", -// "client_ip": "172.21.2.162", -// "event_duration": 3450233, -// "message": "Connected to 10.1.0.3" -// }, -// { -// "@timestamp": "2023-10-23T12:27:28.948Z", -// "client_ip": "172.21.2.113", -// "event_duration": 2764889, -// "message": "Connected to 10.1.0.2" -// }, -// ] -// } ----- - -In TypeScript, you can declare the type that `toRecords` returns: - -[source,ts] ----- -type EventLog = { - '@timestamp': string, - client_ip: string, - event_duration: number, - message: string, -} - -const result = await client.helpers - .esql({ query: 'FROM sample_data | LIMIT 2' }) - .toRecords() ----- - -[discrete] -===== `toArrowReader` - -~Added~ ~in~ ~`v8.16.0`~ - -ES|QL can return results in multiple binary formats, including https://arrow.apache.org/[Apache Arrow]'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets. - -`toArrowReader` returns a https://arrow.apache.org/docs/js/classes/Arrow_dom.RecordBatchReader.html[`RecordBatchStreamReader`]. - -[source,ts] ----- -const reader = await client.helpers - .esql({ query: 'FROM sample_data' }) - .toArrowReader() - -// print each record as JSON -for (const recordBatch of reader) { - for (const record of recordBatch) { - console.log(record.toJSON()) - } -} ----- - -[discrete] -===== `toArrowTable` - -~Added~ ~in~ ~`v8.16.0`~ - -If you would like to pull the entire data set in Arrow format but without streaming, you can use the `toArrowTable` helper to get a https://arrow.apache.org/docs/js/classes/Arrow_dom.Table.html[Table] back instead. - -[source,ts] ----- -const table = await client.helpers - .esql({ query: 'FROM sample_data' }) - .toArrowTable() - -console.log(table.toArray()) ----- diff --git a/docs/index.asciidoc b/docs/index.asciidoc deleted file mode 100644 index 51206f0b0..000000000 --- a/docs/index.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -= Elasticsearch JavaScript Client - -include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes.asciidoc[] - -include::introduction.asciidoc[] -include::getting-started.asciidoc[] -include::changelog.asciidoc[] -include::installation.asciidoc[] -include::connecting.asciidoc[] -include::configuration.asciidoc[] -include::basic-config.asciidoc[] -include::advanced-config.asciidoc[] -include::child.asciidoc[] -include::testing.asciidoc[] -include::integrations.asciidoc[] -include::observability.asciidoc[] -include::transport.asciidoc[] -include::typescript.asciidoc[] -include::reference.asciidoc[] -include::examples/index.asciidoc[] -include::helpers.asciidoc[] -include::redirects.asciidoc[] -include::timeout-best-practices.asciidoc[] diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc deleted file mode 100644 index cd36cf3c2..000000000 --- a/docs/installation.asciidoc +++ /dev/null @@ -1,116 +0,0 @@ -[[installation]] -== Installation - -This page guides you through the installation process of the client. - -To install the latest version of the client, run the following command: - -[source,sh] ----- -npm install @elastic/elasticsearch ----- - -To install a specific major version of the client, run the following command: - -[source,sh] ----- -npm install @elastic/elasticsearch@ ----- - -To learn more about the supported major versions, please refer to the -<>. - -[discrete] -[[nodejs-support]] -=== Node.js support - -NOTE: The minimum supported version of Node.js is `v18`. - -The client versioning follows the {stack} versioning, this means that -major, minor, and patch releases are done following a precise schedule that -often does not coincide with the https://nodejs.org/en/about/releases/[Node.js release] times. - -To avoid support insecure and unsupported versions of Node.js, the -client *will drop the support of EOL versions of Node.js between minor releases*. -Typically, as soon as a Node.js version goes into EOL, the client will continue -to support that version for at least another minor release. If you are using the client -with a version of Node.js that will be unsupported soon, you will see a warning -in your logs (the client will start logging the warning with two minors in advance). - -Unless you are *always* using a supported version of Node.js, -we recommend defining the client dependency in your -`package.json` with the `~` instead of `^`. In this way, you will lock the -dependency on the minor release and not the major. (for example, `~7.10.0` instead -of `^7.10.0`). - -[%header,cols=3*] -|=== -|Node.js Version -|Node.js EOL date -|End of support - -|`8.x` -|December 2019 -|`7.11` (early 2021) - -|`10.x` -|April 2021 -|`7.12` (mid 2021) - -|`12.x` -|April 2022 -|`8.2` (early 2022) - -|`14.x` -|April 2023 -|`8.8` (early 2023) - -|`16.x` -|September 2023 -|`8.11` (late 2023) -|=== - -[discrete] -[[js-compatibility-matrix]] -=== Compatibility matrix - -Language clients are forward compatible; meaning that clients support -communicating with greater or equal minor versions of {es} without breaking. It -does not mean that the client automatically supports new features of newer {es} -versions; it is only possible after a release of a new client version. For -example, a 8.12 client version won't automatically support the new features of -the 8.13 version of {es}, the 8.13 client version is required for that. -{es} language clients are only backwards compatible with default distributions -and without guarantees made. - -[%header,cols=3*] -|=== -|{es} Version -|Client Version -|Supported - -|`8.x` -|`8.x` -|`8.x` - -|`7.x` -|`7.x` -|`7.17` - -|`6.x` -|`6.x` -| - -|`5.x` -|`5.x` -| -|=== - - -[discrete] -==== Browser - -WARNING: There is no official support for the browser environment. It exposes -your {es} instance to everyone, which could lead to security issues. We -recommend you to write a lightweight proxy that uses this client instead, -you can see a proxy example https://github.com/elastic/elasticsearch-js/tree/master/docs/examples/proxy[here]. diff --git a/docs/integrations.asciidoc b/docs/integrations.asciidoc deleted file mode 100644 index 84f854ab2..000000000 --- a/docs/integrations.asciidoc +++ /dev/null @@ -1,8 +0,0 @@ -[[integrations]] -== Integrations - -The Client offers the following integration options for you: - -* <> -* <> -* <> \ No newline at end of file diff --git a/docs/redirects.asciidoc b/docs/redirects.asciidoc deleted file mode 100644 index f2d0aecbb..000000000 --- a/docs/redirects.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -["appendix",role="exclude",id="redirects"] -= Deleted pages - -The following pages have moved or been deleted. - -[role="exclude",id="auth-reference"] -== Authentication - -This page has moved. See <>. - -[role="exclude",id="breaking-changes"] -== Breaking changes - -For information about migrating from the legacy elasticsearch.js client to the -new Elasticsearch JavaScript client, refer to the -https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/7.17/breaking-changes.html[7.17 -JavaScript client migration guide]. diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc deleted file mode 100644 index 516f02386..000000000 --- a/docs/reference.asciidoc +++ /dev/null @@ -1,15988 +0,0 @@ -[[api-reference]] -//////// -=========================================================================================================================== -|| || -|| || -|| || -|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || -|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || -|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || -|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || -|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || -|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || -|| || -|| || -|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || -|| You should update the script that does the generation, which can be found in: || -|| https://github.com/elastic/elastic-client-generator-js || -|| || -|| You can run the script with the following command: || -|| npm run elasticsearch -- --version || -|| || -|| || -|| || -=========================================================================================================================== -//////// -== API Reference - -[discrete] -=== bulk -Bulk index or delete documents. -Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. -This reduces overhead and can greatly increase indexing speed. - -If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: - -* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. -* To use the `index` action, you must have the `create`, `index`, or `write` index privilege. -* To use the `delete` action, you must have the `delete` or `write` index privilege. -* To use the `update` action, you must have the `index` or `write` index privilege. -* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. -* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. - -Automatic data stream creation requires a matching index template with data stream enabled. - -The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: - ----- -action_and_meta_data\n -optional_source\n -action_and_meta_data\n -optional_source\n -.... -action_and_meta_data\n -optional_source\n ----- - -The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. -A `create` action fails if a document with the same ID already exists in the target -An `index` action adds or replaces a document as necessary. - -NOTE: Data streams support only the `create` action. -To update or delete a document in a data stream, you must target the backing index containing the document. - -An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. - -A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. - -NOTE: The final line of data must end with a newline character (`\n`). -Each newline character may be preceded by a carriage return (`\r`). -When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. -Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. - -If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. - -A note on the format: the idea here is to make processing as fast as possible. -As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. - -Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. - -There is no "correct" number of actions to perform in a single bulk request. -Experiment with different settings to find the optimal size for your particular workload. -Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. -It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. -For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. - -**Client suppport for bulk requests** - -Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: - -* Go: Check out `esutil.BulkIndexer` -* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` -* Python: Check out `elasticsearch.helpers.*` -* JavaScript: Check out `client.helpers.*` -* .NET: Check out `BulkAllObservable` -* PHP: Check out bulk indexing. - -**Submitting bulk requests with cURL** - -If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. -The latter doesn't preserve newlines. For example: - ----- -$ cat requests -{ "index" : { "_index" : "test", "_id" : "1" } } -{ "field1" : "value1" } -$ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo -{"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ----- - -**Optimistic concurrency control** - -Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. -The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. - -**Versioning** - -Each bulk item can include the version value using the `version` field. -It automatically follows the behavior of the index or delete operation based on the `_version` mapping. -It also support the `version_type`. - -**Routing** - -Each bulk item can include the routing value using the `routing` field. -It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. - -NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. - -**Wait for active shards** - -When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. - -**Refresh** - -Control when the changes made by this request are visible to search. - -NOTE: Only the shards that receive the bulk request will be affected by refresh. -Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. -The request will only wait for those three shards to refresh. -The other two shards that make up the index do not participate in the `_bulk` request at all. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk[Endpoint documentation] -[source,ts] ----- -client.bulk({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: The name of the data stream, index, or index alias to perform bulk actions on. -** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* -** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. -** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were run for each index or create. -** *`pipeline` (Optional, string)*: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`timeout` (Optional, string | -1 | 0)*: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. -** *`require_alias` (Optional, boolean)*: If `true`, the request's actions must target an index alias. -** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to be created). - -[discrete] -=== clear_scroll -Clear a scrolling search. -Clear the search context and results for a scrolling search. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll[Endpoint documentation] -[source,ts] ----- -client.clearScroll({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`scroll_id` (Optional, string | string[])*: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. - -[discrete] -=== close_point_in_time -Close a point in time. -A point in time must be opened explicitly before being used in search requests. -The `keep_alive` parameter tells Elasticsearch how long it should persist. -A point in time is automatically closed when the `keep_alive` period has elapsed. -However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time[Endpoint documentation] -[source,ts] ----- -client.closePointInTime({ id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The ID of the point-in-time. - -[discrete] -=== count -Count search results. -Get the number of documents matching a query. - -The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. -The query is optional. When no query is provided, the API uses `match_all` to count all the documents. - -The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. - -The operation is broadcast across all shards. -For each shard ID group, a replica is chosen and the search is run against it. -This means that replicas increase the scalability of the count. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count[Endpoint documentation] -[source,ts] ----- -client.count({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. -** *`df` (Optional, string)*: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded, or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. -** *`min_score` (Optional, number)*: The minimum `_score` value that documents must have to be included in the result. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`q` (Optional, string)*: The query in Lucene query string syntax. This parameter cannot be used with a request body. - -[discrete] -=== create -Create a new document in the index. - -You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs -Using `_create` guarantees that the document is indexed only if it does not already exist. -It returns a 409 response when a document with a same ID already exists in the index. -To update an existing document, you must use the `//_doc/` API. - -If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: - -* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. -* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. - -Automatic data stream creation requires a matching index template with data stream enabled. - -**Automatically create data streams and indices** - -If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. - -If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. - -NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. - -If no mapping exists, the index operation creates a dynamic mapping. -By default, new fields and objects are automatically added to the mapping if needed. - -Automatic index creation is controlled by the `action.auto_create_index` setting. -If it is `true`, any index can be created automatically. -You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. -Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. -When a list is specified, the default behaviour is to disallow. - -NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. -It does not affect the creation of data streams. - -**Routing** - -By default, shard placement — or routing — is controlled by using a hash of the document's ID value. -For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. - -When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. -This does come at the (very minimal) cost of an additional document parsing pass. -If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. - -NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. - -**Distributed** - -The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. -After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. - -**Active shards** - -To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. -If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. -By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). -This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. -To alter this behavior per operation, use the `wait_for_active_shards request` parameter. - -Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). -Specifying a negative value or a number greater than the number of shard copies will throw an error. - -For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). -If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. -This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. -If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. -This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. -However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. -The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. - -It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. -After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. -The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create[Endpoint documentation] -[source,ts] ----- -client.create({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. -** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. -** *`document` (Optional, object)*: A document. -** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. -** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. -** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. -** *`version` (Optional, number)*: The explicit version number for concurrency control. It must be a non-negative long number. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - -[discrete] -=== delete -Delete a document. - -Remove a JSON document from the specified index. - -NOTE: You cannot send deletion requests directly to a data stream. -To delete a document in a data stream, you must target the backing index containing the document. - -**Optimistic concurrency control** - -Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. -If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. - -**Versioning** - -Each document indexed is versioned. -When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. -Every write operation run on a document, deletes included, causes its version to be incremented. -The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. -The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. - -**Routing** - -If routing is used during indexing, the routing value also needs to be specified to delete a document. - -If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. - -For example: - ----- -DELETE /my-index-000001/_doc/1?routing=shard-1 ----- - -This request deletes the document with ID 1, but it is routed based on the user. -The document is not deleted if the correct routing is not specified. - -**Distributed** - -The delete operation gets hashed into a specific shard ID. -It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete[Endpoint documentation] -[source,ts] ----- -client.delete({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the document. -** *`index` (string)*: The name of the target index. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. -** *`version` (Optional, number)*: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - -[discrete] -=== delete_by_query -Delete documents. - -Deletes documents that match the specified query. - -If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: - -* `read` -* `delete` or `write` - -You can specify the query criteria in the request URI or the request body using the same syntax as the search API. -When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. -If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. - -NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. - -While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. -A bulk delete request is performed for each batch of matching documents. -If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. -If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. -Any delete requests that completed successfully still stick, they are not rolled back. - -You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. -Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. - -**Throttling delete requests** - -To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. -This pads each batch with a wait time to throttle the rate. -Set `requests_per_second` to `-1` to disable throttling. - -Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. -The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. -By default the batch size is `1000`, so if `requests_per_second` is set to `500`: - ----- -target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ----- - -Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. -This is "bursty" instead of "smooth". - -**Slicing** - -Delete by query supports sliced scroll to parallelize the delete process. -This can improve efficiency and provide a convenient way to break the request down into smaller parts. - -Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. -This setting will use one slice per shard, up to a certain limit. -If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. -Adding slices to the delete by query operation creates sub-requests which means it has some quirks: - -* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. -* Fetching the status of the task for the request with slices only contains the status of completed slices. -* These sub-requests are individually addressable for things like cancellation and rethrottling. -* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. -* Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. -* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. -* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. - -If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: - -* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. -* Delete performance scales linearly across available resources with the number of slices. - -Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. - -**Cancel a delete by query operation** - -Any delete by query can be canceled using the task cancel API. For example: - ----- -POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ----- - -The task ID can be found by using the get tasks API. - -Cancellation should happen quickly but might take a few seconds. -The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query[Endpoint documentation] -[source,ts] ----- -client.deleteByQuery({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`max_docs` (Optional, number)*: The maximum number of documents to delete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to delete specified with Query DSL. -** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if delete by query hits version conflicts: `abort` or `proceed`. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. -** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. Defaults to the index-level setting. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`q` (Optional, string)*: A query in the Lucene query string syntax. -** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. -** *`scroll_size` (Optional, number)*: The size of the scroll request that powers the operation. -** *`search_timeout` (Optional, string | -1 | 0)*: The explicit timeout for each search request. It defaults to no timeout. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. -** *`sort` (Optional, string[])*: A list of `:` pairs. -** *`stats` (Optional, string[])*: The specific `tag` of the request for logging and statistical purposes. -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`timeout` (Optional, string | -1 | 0)*: The period each deletion request waits for active shards. -** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. - -[discrete] -=== delete_by_query_rethrottle -Throttle a delete by query operation. - -Change the number of requests per second for a particular delete by query operation. -Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle[Endpoint documentation] -[source,ts] ----- -client.deleteByQueryRethrottle({ task_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string | number)*: The ID for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. - -[discrete] -=== delete_script -Delete a script or search template. -Deletes a stored script or search template. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script[Endpoint documentation] -[source,ts] ----- -client.deleteScript({ id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -=== exists -Check a document. - -Verify that a document exists. -For example, check to see if a document with the `_id` 0 exists: - ----- -HEAD my-index-000001/_doc/0 ----- - -If the document exists, the API returns a status code of `200 - OK`. -If the document doesn’t exist, the API returns `404 - Not Found`. - -**Versioning support** - -You can use the `version` parameter to check the document only if its current version is equal to the specified one. - -Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. -The old version of the document doesn't disappear immediately, although you won't be able to access it. -Elasticsearch cleans up deleted documents in the background as you continue to index more data. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] -[source,ts] ----- -client.exists({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique document identifier. -** *`index` (string)*: A list of data streams, indices, and aliases. It supports wildcards (`*`). -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. -** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. - -[discrete] -=== exists_source -Check for a document source. - -Check whether a document source exists in an index. -For example: - ----- -HEAD my-index-000001/_source/1 ----- - -A document's source is not available if it is disabled in the mapping. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] -[source,ts] ----- -client.existsSource({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the document. -** *`index` (string)*: A list of data streams, indices, and aliases. It supports wildcards (`*`). -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. - -[discrete] -=== explain -Explain a document match result. -Get information about why a specific document matches, or doesn't match, a query. -It computes a score explanation for a query and a specific document. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain[Endpoint documentation] -[source,ts] ----- -client.explain({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The document identifier. -** *`index` (string)*: Index names that are used to limit the request. Only a single index name can be provided to this parameter. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: `True` or `false` to return the `_source` field or not or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response. -** *`q` (Optional, string)*: The query in the Lucene query string syntax. - -[discrete] -=== field_caps -Get the field capabilities. - -Get information about the capabilities of fields among multiple indices. - -For data streams, the API returns field capabilities among the stream’s backing indices. -It returns runtime fields like any other field. -For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps[Endpoint documentation] -[source,ts] ----- -client.fieldCaps({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. -** *`fields` (Optional, string | string[])*: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. -** *`runtime_mappings` (Optional, Record)*: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. -** *`filters` (Optional, string)*: A list of filters to apply to the response. -** *`types` (Optional, string[])*: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. -** *`include_empty_fields` (Optional, boolean)*: If false, empty fields are not included in the response. - -[discrete] -=== get -Get a document by its ID. - -Get a document and its source or stored fields from an index. - -By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). -In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. -To turn off realtime behavior, set the `realtime` parameter to false. - -**Source filtering** - -By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. -You can turn off `_source` retrieval by using the `_source` parameter: - ----- -GET my-index-000001/_doc/0?_source=false ----- - -If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. -This can be helpful with large documents where partial retrieval can save on network overhead -Both parameters take a comma separated list of fields or wildcard expressions. -For example: - ----- -GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ----- - -If you only want to specify includes, you can use a shorter notation: - ----- -GET my-index-000001/_doc/0?_source=*.id ----- - -**Routing** - -If routing is used during indexing, the routing value also needs to be specified to retrieve a document. -For example: - ----- -GET my-index-000001/_doc/2?routing=user1 ----- - -This request gets the document with ID 2, but it is routed based on the user. -The document is not fetched if the correct routing is not specified. - -**Distributed** - -The GET operation is hashed into a specific shard ID. -It is then redirected to one of the replicas within that shard ID and returns the result. -The replicas are the primary shard and its replicas within that shard ID group. -This means that the more replicas you have, the better your GET scaling will be. - -**Versioning support** - -You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. - -Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. -The old version of the document doesn't disappear immediately, although you won't be able to access it. -Elasticsearch cleans up deleted documents in the background as you continue to index more data. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] -[source,ts] ----- -client.get({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique document identifier. -** *`index` (string)*: The name of the index that contains the document. -** *`force_synthetic_source` (Optional, boolean)*: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. -** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. - -[discrete] -=== get_script -Get a script or search template. -Retrieves a stored script or search template. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script[Endpoint documentation] -[source,ts] ----- -client.getScript({ id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -=== get_script_context -Get script contexts. - -Get a list of supported script contexts and their methods. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context[Endpoint documentation] -[source,ts] ----- -client.getScriptContext() ----- - -[discrete] -=== get_script_languages -Get script languages. - -Get a list of available script types, languages, and contexts. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages[Endpoint documentation] -[source,ts] ----- -client.getScriptLanguages() ----- - -[discrete] -=== get_source -Get a document's source. - -Get the source of a document. -For example: - ----- -GET my-index-000001/_source/1 ----- - -You can use the source filtering parameters to control which parts of the `_source` are returned: - ----- -GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ----- - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] -[source,ts] ----- -client.getSource({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique document identifier. -** *`index` (string)*: The name of the index that contains the document. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. -** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. - -[discrete] -=== health_report -Get the cluster health. -Get a report with the health status of an Elasticsearch cluster. -The report contains a list of indicators that compose Elasticsearch functionality. - -Each indicator has a health status of: green, unknown, yellow or red. -The indicator will provide an explanation and metadata describing the reason for its current health status. - -The cluster’s status is controlled by the worst indicator status. - -In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. -Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. - -Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. -The root cause and remediation steps are encapsulated in a diagnosis. -A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. - -NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. -When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report[Endpoint documentation] -[source,ts] ----- -client.healthReport({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`feature` (Optional, string | string[])*: A feature of the cluster, as returned by the top-level health report API. -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout. -** *`verbose` (Optional, boolean)*: Opt-in for more information about the health of the system. -** *`size` (Optional, number)*: Limit the number of affected resources the health report API returns. - -[discrete] -=== index -Create or update a document in an index. - -Add a JSON document to the specified data stream or index and make it searchable. -If the target is an index and the document already exists, the request updates the document and increments its version. - -NOTE: You cannot use this API to send update requests for existing documents in a data stream. - -If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: - -* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. -* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. -* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. - -Automatic data stream creation requires a matching index template with data stream enabled. - -NOTE: Replica shards might not all be started when an indexing operation returns successfully. -By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. - -**Automatically create data streams and indices** - -If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. - -If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. - -NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. - -If no mapping exists, the index operation creates a dynamic mapping. -By default, new fields and objects are automatically added to the mapping if needed. - -Automatic index creation is controlled by the `action.auto_create_index` setting. -If it is `true`, any index can be created automatically. -You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. -Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. -When a list is specified, the default behaviour is to disallow. - -NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. -It does not affect the creation of data streams. - -**Optimistic concurrency control** - -Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. -If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. - -**Routing** - -By default, shard placement — or routing — is controlled by using a hash of the document's ID value. -For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. - -When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. -This does come at the (very minimal) cost of an additional document parsing pass. -If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. - -NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. - -**Distributed** - -The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. -After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. - -**Active shards** - -To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. -If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. -By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). -This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. -To alter this behavior per operation, use the `wait_for_active_shards request` parameter. - -Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). -Specifying a negative value or a number greater than the number of shard copies will throw an error. - -For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). -If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. -This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. -If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. -This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. -However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. -The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. - -It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. -After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. -The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. - -**No operation (noop) updates** - -When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. -If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. -The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. - -There isn't a definitive rule for when noop updates aren't acceptable. -It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. - -**Versioning** - -Each indexed document is given a version number. -By default, internal versioning is used that starts at 1 and increments with each update, deletes included. -Optionally, the version number can be set to an external value (for example, if maintained in a database). -To enable this functionality, `version_type` should be set to `external`. -The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. - -NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. -If no version is provided, the operation runs without any version checks. - -When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. -If true, the document will be indexed and the new version number used. -If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: - ----- -PUT my-index-000001/_doc/1?version=2&version_type=external -{ - "user": { - "id": "elkbee" - } -} ----- - -In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. -If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). - -A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. -Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create[Endpoint documentation] -[source,ts] ----- -client.index({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. -** *`id` (Optional, string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. -** *`document` (Optional, object)*: A document. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. -** *`op_type` (Optional, Enum("index" | "create"))*: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. -** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. -** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. -** *`version` (Optional, number)*: An explicit version number for concurrency control. It must be a non-negative long number. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. -** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. - -[discrete] -=== info -Get cluster info. -Get basic build, version, and cluster information. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info[Endpoint documentation] -[source,ts] ----- -client.info() ----- - -[discrete] -=== knn_search -Run a knn search. - -NOTE: The kNN search API has been replaced by the `knn` option in the search API. - -Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. -Given a query vector, the API finds the k closest vectors and returns those documents as search hits. - -Elasticsearch uses the HNSW algorithm to support efficient kNN search. -Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. -This means the results returned are not always the true k closest neighbors. - -The kNN search API supports restricting the search using a filter. -The search will return the top k documents that also match the filter query. - -A kNN search response has the exact same structure as a search API response. -However, certain sections have a meaning specific to kNN search: - -* The document `_score` is determined by the similarity between the query and document vector. -* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. - -{ref}/knn-search-api.html[Endpoint documentation] -[source,ts] ----- -client.knnSearch({ index, knn }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices. -** *`knn` ({ field, query_vector, k, num_candidates })*: The kNN query to run. -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. -** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. -** *`routing` (Optional, string)*: A list of specific routing values. - -[discrete] -=== mget -Get multiple documents. - -Get multiple JSON documents by ID from one or more indices. -If you specify an index in the request URI, you only need to specify the document IDs in the request body. -To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. - -**Filter source fields** - -By default, the `_source` field is returned for every document (if stored). -Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. -You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. - -**Get stored fields** - -Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. -Any requested fields that are not stored are ignored. -You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget[Endpoint documentation] -[source,ts] ----- -client.mget({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. -** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes relevant shards before retrieving documents. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`stored_fields` (Optional, string | string[])*: If `true`, retrieves the document fields stored in the index rather than the document `_source`. - -[discrete] -=== msearch -Run multiple searches. - -The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. -The structure is as follows: - ----- -header\n -body\n -header\n -body\n ----- - -This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. - -IMPORTANT: The final line of data must end with a newline character `\n`. -Each newline character may be preceded by a carriage return `\r`. -When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch[Endpoint documentation] -[source,ts] ----- -client.msearch({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases to search. -** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. -** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. -** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. -** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. -** *`routing` (Optional, string)*: Custom routing value used to route search operations to a specific shard. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. -** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. - -[discrete] -=== msearch_template -Run multiple templated searches. - -Run multiple templated searches with a single request. -If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. -For example: - ----- -$ cat requests -{ "index": "my-index" } -{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} -{ "index": "my-other-index" } -{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} - -$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ----- - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template[Endpoint documentation] -[source,ts] ----- -client.msearchTemplate({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. -** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`max_concurrent_searches` (Optional, number)*: The maximum number of concurrent searches the API can run. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. -** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. - -[discrete] -=== mtermvectors -Get multiple term vectors. - -Get multiple term vectors with a single request. -You can specify existing documents by index and ID or provide artificial documents in the body of the request. -You can specify the index in the request body or request URI. -The response contains a `docs` array with all the fetched termvectors. -Each element has the structure provided by the termvectors API. - -**Artificial documents** - -You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. -The mapping used is determined by the specified `_index`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors[Endpoint documentation] -[source,ts] ----- -client.mtermvectors({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: The name of the index that contains the documents. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: An array of existing or artificial documents. -** *`ids` (Optional, string[])*: A simplified syntax to specify documents by their ID if they're in the same index. -** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. -** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. -** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. -** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. -** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. -** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`term_statistics` (Optional, boolean)*: If true, the response includes term frequency and document frequency. -** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. - -[discrete] -=== open_point_in_time -Open a point in time. - -A search request by default runs against the most recent visible data of the target indices, -which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the -state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple -search requests using the same point in time. For example, if refreshes happen between -`search_after` requests, then the results of those requests might not be consistent as changes happening -between searches are only visible to the more recent point in time. - -A point in time must be opened explicitly before being used in search requests. - -A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. - -Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. -If you want to retrieve more hits, use PIT with `search_after`. - -IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. - -When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. -To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. - -**Keeping point in time alive** - -The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. -The value does not need to be long enough to process all data — it just needs to be long enough for the next request. - -Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. -Once the smaller segments are no longer needed they are deleted. -However, open point-in-times prevent the old segments from being deleted since they are still in use. - -TIP: Keeping older segments alive means that more disk space and file handles are needed. -Ensure that you have configured your nodes to have ample free file handles. - -Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. -Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. -Note that a point-in-time doesn't prevent its associated indices from being deleted. -You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time[Endpoint documentation] -[source,ts] ----- -client.openPointInTime({ index, keep_alive }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices -** *`keep_alive` (string | -1 | 0)*: Extend the length of time that the point in time persists. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. -** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`allow_partial_search_results` (Optional, boolean)*: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. - -[discrete] -=== ping -Ping the cluster. -Get information about whether the cluster is running. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster[Endpoint documentation] -[source,ts] ----- -client.ping() ----- - -[discrete] -=== put_script -Create or update a script or search template. -Creates or updates a stored script or search template. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script[Endpoint documentation] -[source,ts] ----- -client.putScript({ id, script }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The identifier for the stored script or search template. It must be unique within the cluster. -** *`script` ({ lang, options, source })*: The script or search template, its parameters, and its language. -** *`context` (Optional, string)*: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -=== rank_eval -Evaluate ranked search results. - -Evaluate the quality of ranked search results over a set of typical search queries. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval[Endpoint documentation] -[source,ts] ----- -client.rankEval({ requests }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. -** *`index` (Optional, string | string[])*: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. -** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`search_type` (Optional, string)*: Search operation type - -[discrete] -=== reindex -Reindex documents. - -Copy documents from a source to a destination. -You can copy all documents to the destination index or reindex a subset of the documents. -The source can be any existing index, alias, or data stream. -The destination must differ from the source. -For example, you cannot reindex a data stream into itself. - -IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. -The destination should be configured as wanted before calling the reindex API. -Reindex does not copy the settings from the source or its associated template. -Mappings, shard counts, and replicas, for example, must be configured ahead of time. - -If the Elasticsearch security features are enabled, you must have the following security privileges: - -* The `read` index privilege for the source data stream, index, or alias. -* The `write` index privilege for the destination data stream, index, or index alias. -* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. -* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. - -If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. -Automatic data stream creation requires a matching index template with data stream enabled. - -The `dest` element can be configured like the index API to control optimistic concurrency control. -Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. - -Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. - -Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. -All existing documents will cause a version conflict. - -IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. -A reindex can only add new documents to a destination data stream. -It cannot update existing documents in a destination data stream. - -By default, version conflicts abort the reindex process. -To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. -In this case, the response includes a count of the version conflicts that were encountered. -Note that the handling of other error types is unaffected by the `conflicts` property. -Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. - -NOTE: The reindex API makes no effort to handle ID collisions. -The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. -Instead, make sure that IDs are unique by using a script. - -**Running reindex asynchronously** - -If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. -Elasticsearch creates a record of this task as a document at `_tasks/`. - -**Reindex from multiple sources** - -If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. -That way you can resume the process if there are any errors by removing the partially completed source and starting over. -It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. - -For example, you can use a bash script like this: - ----- -for index in i1 i2 i3 i4 i5; do - curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ - "source": { - "index": "'$index'" - }, - "dest": { - "index": "'$index'-reindexed" - } - }' -done ----- - -**Throttling** - -Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. -Requests are throttled by padding each batch with a wait time. -To turn off throttling, set `requests_per_second` to `-1`. - -The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. -The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. -By default the batch size is `1000`, so if `requests_per_second` is set to `500`: - ----- -target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ----- - -Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. -This is "bursty" instead of "smooth". - -**Slicing** - -Reindex supports sliced scroll to parallelize the reindexing process. -This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. - -NOTE: Reindexing from remote clusters does not support manual or automatic slicing. - -You can slice a reindex request manually by providing a slice ID and total number of slices to each request. -You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. -The `slices` parameter specifies the number of slices to use. - -Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: - -* You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. -* Fetching the status of the task for the request with `slices` only contains the status of completed slices. -* These sub-requests are individually addressable for things like cancellation and rethrottling. -* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. -* Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. -* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. -* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. - -If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. -If slicing manually or otherwise tuning automatic slicing, use the following guidelines. - -Query performance is most efficient when the number of slices is equal to the number of shards in the index. -If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. -Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. - -Indexing performance scales linearly across available resources with the number of slices. - -Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. - -**Modify documents during reindexing** - -Like `_update_by_query`, reindex operations support a script that modifies the document. -Unlike `_update_by_query`, the script is allowed to modify the document's metadata. - -Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. -For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. -Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. -The deletion will be reported in the `deleted` counter in the response body. -Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. - -Think of the possibilities! Just be careful; you are able to change: - -* `_id` -* `_index` -* `_version` -* `_routing` - -Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. -It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. - -**Reindex from remote** - -Reindex supports reindexing from a remote Elasticsearch cluster. -The `host` parameter must contain a scheme, host, port, and optional path. -The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. -Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. -There are a range of settings available to configure the behavior of the HTTPS connection. - -When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. -Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. -It can be set to a comma delimited list of allowed remote host and port combinations. -Scheme is ignored; only the host and port are used. -For example: - ----- -reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ----- - -The list of allowed hosts must be configured on any nodes that will coordinate the reindex. -This feature should work with remote clusters of any version of Elasticsearch. -This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. - -WARNING: Elasticsearch does not support forward compatibility across major versions. -For example, you cannot reindex from a 7.x cluster into a 6.x cluster. - -To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. - -NOTE: Reindexing from remote clusters does not support manual or automatic slicing. - -Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. -If the remote index includes very large documents you'll need to use a smaller batch size. -It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. -Both default to 30 seconds. - -**Configuring SSL parameters** - -Reindex from remote supports configurable SSL settings. -These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. -It is not possible to configure SSL in the body of the reindex request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex[Endpoint documentation] -[source,ts] ----- -client.reindex({ dest, source }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination you are copying to. -** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source you are copying from. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: Indicates whether to continue reindexing even when there are conflicts. -** *`max_docs` (Optional, number)*: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. -** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when reindexing. -** *`size` (Optional, number)* -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes affected shards to make this operation visible to search. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. By default, there is no throttle. -** *`scroll` (Optional, string | -1 | 0)*: The period of time that a consistent view of the index should be maintained for scrolled search. -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. -** *`timeout` (Optional, string | -1 | 0)*: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. -** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. - -[discrete] -=== reindex_rethrottle -Throttle a reindex operation. - -Change the number of requests per second for a particular reindex operation. -For example: - ----- -POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ----- - -Rethrottling that speeds up the query takes effect immediately. -Rethrottling that slows down the query will take effect after completing the current batch. -This behavior prevents scroll timeouts. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex[Endpoint documentation] -[source,ts] ----- -client.reindexRethrottle({ task_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string)*: The task identifier, which can be found by using the tasks API. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. - -[discrete] -=== render_search_template -Render a search template. - -Render a search template as a search request body. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template[Endpoint documentation] -[source,ts] ----- -client.renderSearchTemplate({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. -** *`file` (Optional, string)* -** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. -** *`source` (Optional, string)*: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. - -[discrete] -=== scripts_painless_execute -Run a script. - -Runs a script and returns a result. -Use this API to build and test scripts, such as when defining a script for a runtime field. -This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. - -The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. - -Each context requires a script, but additional parameters depend on the context you're using for that script. - -{painless}/painless-execute-api.html[Endpoint documentation] -[source,ts] ----- -client.scriptsPainlessExecute({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))*: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. -** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. -** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to run. - -[discrete] -=== scroll -Run a scrolling search. - -IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). - -The scroll API gets large sets of results from a single scrolling search request. -To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. -The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. -The search response returns a scroll ID in the `_scroll_id` response body parameter. -You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. -If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. - -You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. - -IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll[Endpoint documentation] -[source,ts] ----- -client.scroll({ scroll_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`scroll_id` (string)*: The scroll ID of the search. -** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. - -[discrete] -=== search -Run a search. - -Get search hits that match the query defined in the request. -You can provide search queries using the `q` query string parameter or the request body. -If both are specified, only the query parameter is used. - -If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. -To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. - -**Search slicing** - -When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. -By default the splitting is done first on the shards, then locally on each shard. -The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. - -For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. - -IMPORTANT: The same point-in-time ID should be used for all slices. -If different PIT IDs are used, slices can overlap and miss documents. -This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search[Endpoint documentation] -[source,ts] ----- -client.search({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field. -** *`explain` (Optional, boolean)*: If `true`, the request returns detailed information about score computation as part of a hit. -** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -** *`highlight` (Optional, { encoder, fields })*: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. -** *`indices_boost` (Optional, Record[])*: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: The approximate kNN search to run. -** *`rank` (Optional, { rrf })*: The Reciprocal Rank Fusion (RRF) to use. -** *`min_score` (Optional, number)*: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. -** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL. -** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. -** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. -** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. -** *`size` (Optional, number)*: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. -** *`slice` (Optional, { field, id, max })*: Split a scrolled search into multiple slices that can be consumed independently. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: A list of : pairs. -** *`_source` (Optional, boolean | { excludes, includes })*: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. -** *`fields` (Optional, { field, format, include_unmapped }[])*: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. -** *`suggest` (Optional, { text })*: Defines a suggester that provides similar looking terms based on a provided text. -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. -** *`timeout` (Optional, string)*: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. -** *`track_scores` (Optional, boolean)*: If `true`, calculate and return document scores, even if the scores are not used for sorting. -** *`version` (Optional, boolean)*: If `true`, the request returns the document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If `true`, the request returns sequence number and primary term of the last modification of each hit. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. -** *`runtime_mappings` (Optional, Record)*: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`allow_partial_search_results` (Optional, boolean)*: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. -** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. -** *`df` (Optional, string)*: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_named_queries_score` (Optional, boolean)*: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. -** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. -** *`preference` (Optional, string)*: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. -** *`pre_filter_shard_size` (Optional, number)*: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. -** *`request_cache` (Optional, boolean)*: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. -** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates how distributed term frequencies are calculated for relevance scoring. -** *`suggest_field` (Optional, string)*: The field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`suggest_size` (Optional, number)*: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`typed_keys` (Optional, boolean)*: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -** *`q` (Optional, string)*: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - -[discrete] -=== search_mvt -Search a vector tile. - -Search a vector tile for geospatial values. -Before using this API, you should be familiar with the Mapbox vector tile specification. -The API returns results as a binary mapbox vector tile. - -Internally, Elasticsearch translates a vector tile search API request into a search containing: - -* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. -* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. -* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. -* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. - -For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search - ----- -GET my-index/_search -{ - "size": 10000, - "query": { - "geo_bounding_box": { - "my-geo-field": { - "top_left": { - "lat": -40.979898069620134, - "lon": -45 - }, - "bottom_right": { - "lat": -66.51326044311186, - "lon": 0 - } - } - } - }, - "aggregations": { - "grid": { - "geotile_grid": { - "field": "my-geo-field", - "precision": 11, - "size": 65536, - "bounds": { - "top_left": { - "lat": -40.979898069620134, - "lon": -45 - }, - "bottom_right": { - "lat": -66.51326044311186, - "lon": 0 - } - } - } - }, - "bounds": { - "geo_bounds": { - "field": "my-geo-field", - "wrap_longitude": false - } - } - } -} ----- - -The API returns results as a binary Mapbox vector tile. -Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: - -* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. -* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. -* A meta layer containing: - * A feature containing a bounding box. By default, this is the bounding box of the tile. - * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. - * Metadata for the search. - -The API only returns features that can display at its zoom level. -For example, if a polygon feature has no area at its zoom level, the API omits it. -The API returns errors as UTF-8 encoded JSON. - -IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. -If you specify both parameters, the query parameter takes precedence. - -**Grid precision for geotile** - -For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. -`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. -For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. -The maximum final precision is 29. -The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. -For example, a value of 8 divides the tile into a grid of 256 x 256 cells. -The `aggs` layer only contains features for cells with matching data. - -**Grid precision for geohex** - -For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. - -This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. -The following table maps the H3 resolution for each precision. -For example, if `` is 3 and `grid_precision` is 3, the precision is 6. -At a precision of 6, hexagonal cells have an H3 resolution of 2. -If `` is 3 and `grid_precision` is 4, the precision is 7. -At a precision of 7, hexagonal cells have an H3 resolution of 3. - -| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | -| --------- | ---------------- | ------------- | ----------------| ----- | -| 1 | 4 | 0 | 122 | 30.5 | -| 2 | 16 | 0 | 122 | 7.625 | -| 3 | 64 | 1 | 842 | 13.15625 | -| 4 | 256 | 1 | 842 | 3.2890625 | -| 5 | 1024 | 2 | 5882 | 5.744140625 | -| 6 | 4096 | 2 | 5882 | 1.436035156 | -| 7 | 16384 | 3 | 41162 | 2.512329102 | -| 8 | 65536 | 3 | 41162 | 0.6280822754 | -| 9 | 262144 | 4 | 288122 | 1.099098206 | -| 10 | 1048576 | 4 | 288122 | 0.2747745514 | -| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | -| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | -| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | -| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | -| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | -| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | -| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | -| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | -| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | -| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | -| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | -| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | -| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | -| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | -| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | -| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | -| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | -| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | -| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | - -Hexagonal cells don't align perfectly on a vector tile. -Some cells may intersect more than one vector tile. -To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. -Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt[Endpoint documentation] -[source,ts] ----- -client.searchMvt({ index, field, zoom, x, y }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, or aliases to search -** *`field` (string)*: Field containing geospatial data to return -** *`zoom` (number)*: Zoom level for the vector tile to search -** *`x` (number)*: X coordinate for the vector tile to search -** *`y` (number)*: Y coordinate for the vector tile to search -** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. -** *`buffer` (Optional, number)*: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. -** *`exact_bounds` (Optional, boolean)*: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. -** *`extent` (Optional, number)*: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. -** *`fields` (Optional, string | string[])*: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. -** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: The aggregation used to create a grid for the `field`. -** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. -** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The query DSL used to filter documents for the search. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. -** *`size` (Optional, number)*: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. -** *`track_total_hits` (Optional, boolean | number)*: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. -** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. - -[discrete] -=== search_shards -Get the search shards. - -Get the indices and shards that a search request would be run against. -This information can be useful for working out issues or planning optimizations with routing and shard preferences. -When filtered aliases are used, the filter is returned as part of the `indices` section. - -If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards[Endpoint documentation] -[source,ts] ----- -client.searchShards({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. - -[discrete] -=== search_template -Run a search with a search template. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template[Endpoint documentation] -[source,ts] ----- -client.searchTemplate({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). -** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. -** *`id` (Optional, string)*: The ID of the search template to use. If no `source` is specified, this parameter is required. -** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. -** *`profile` (Optional, boolean)*: If `true`, the query execution is profiled. -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. -** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. - -[discrete] -=== terms_enum -Get terms in an index. - -Discover terms that match a partial string in an index. -This API is designed for low-latency look-ups used in auto-complete scenarios. - -> info -> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum[Endpoint documentation] -[source,ts] ----- -client.termsEnum({ index, field }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`field` (string)*: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. -** *`size` (Optional, number)*: The number of matching terms to return. -** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. -** *`case_insensitive` (Optional, boolean)*: When `true`, the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter an index shard if the provided query rewrites to `match_none`. -** *`string` (Optional, string)*: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. -** *`search_after` (Optional, string)*: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. - -[discrete] -=== termvectors -Get term vector information. - -Get information and statistics about terms in the fields of a particular document. - -You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. -You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. -For example: - ----- -GET /my-index-000001/_termvectors/1?fields=message ----- - -Fields can be specified using wildcards, similar to the multi match query. - -Term vectors are real-time by default, not near real-time. -This can be changed by setting `realtime` parameter to `false`. - -You can request three types of values: _term information_, _term statistics_, and _field statistics_. -By default, all term information and field statistics are returned for all fields but term statistics are excluded. - -**Term information** - -* term frequency in the field (always returned) -* term positions (`positions: true`) -* start and end offsets (`offsets: true`) -* term payloads (`payloads: true`), as base64 encoded bytes - -If the requested information wasn't stored in the index, it will be computed on the fly if possible. -Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. - -> warn -> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. - -**Behaviour** - -The term and field statistics are not accurate. -Deleted documents are not taken into account. -The information is only retrieved for the shard the requested document resides in. -The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. -By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. -Use `routing` only to hit a particular shard. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors[Endpoint documentation] -[source,ts] ----- -client.termvectors({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the index that contains the document. -** *`id` (Optional, string)*: A unique identifier for the document. -** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors. -** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. -** *`per_field_analyzer` (Optional, Record)*: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. -** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. -** *`field_statistics` (Optional, boolean)*: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). -** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. -** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. -** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. -** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. -** *`term_statistics` (Optional, boolean)*: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. -** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. - -[discrete] -=== update -Update a document. - -Update a document by running a script or passing a partial document. - -If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. - -The script can update, delete, or skip modifying the document. -The API also supports passing a partial document, which is merged into the existing document. -To fully replace an existing document, use the index API. -This operation: - -* Gets the document (collocated with the shard) from the index. -* Runs the specified script. -* Indexes the result. - -The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. - -The `_source` field must be enabled to use this API. -In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update[Endpoint documentation] -[source,ts] ----- -client.update({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the document to be updated. -** *`index` (string)*: The name of the target index. By default, the index is created automatically if it doesn't exist. -** *`detect_noop` (Optional, boolean)*: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. -** *`doc` (Optional, object)*: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. -** *`doc_as_upsert` (Optional, boolean)*: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. -** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document. -** *`scripted_upsert` (Optional, boolean)*: If `true`, run the script whether or not the document exists. -** *`_source` (Optional, boolean | { excludes, includes })*: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. -** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. -** *`lang` (Optional, string)*: The script language. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. -** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. -** *`retry_on_conflict` (Optional, number)*: The number of times the operation should be retried when a conflict occurs. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. -** *`_source_excludes` (Optional, string | string[])*: The source fields you want to exclude. -** *`_source_includes` (Optional, string | string[])*: The source fields you want to retrieve. - -[discrete] -=== update_by_query -Update documents. -Updates documents that match the specified query. -If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. - -If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: - -* `read` -* `index` or `write` - -You can specify the query criteria in the request URI or the request body using the same syntax as the search API. - -When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. -When the versions match, the document is updated and the version number is incremented. -If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. -You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. -Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. - -NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. - -While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. -A bulk update request is performed for each batch of matching documents. -Any query or update failures cause the update by query request to fail and the failures are shown in the response. -Any update requests that completed successfully still stick, they are not rolled back. - -**Throttling update requests** - -To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. -This pads each batch with a wait time to throttle the rate. -Set `requests_per_second` to `-1` to turn off throttling. - -Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. -The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. -By default the batch size is 1000, so if `requests_per_second` is set to `500`: - ----- -target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ----- - -Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. -This is "bursty" instead of "smooth". - -**Slicing** - -Update by query supports sliced scroll to parallelize the update process. -This can improve efficiency and provide a convenient way to break the request down into smaller parts. - -Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. -This setting will use one slice per shard, up to a certain limit. -If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. - -Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: - -* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. -* Fetching the status of the task for the request with `slices` only contains the status of completed slices. -* These sub-requests are individually addressable for things like cancellation and rethrottling. -* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. -* Canceling the request with slices will cancel each sub-request. -* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. -* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. -* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. - -If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: - -* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. -* Update performance scales linearly across available resources with the number of slices. - -Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. - -**Update the document source** - -Update by query supports scripts to update the document source. -As with the update API, you can set `ctx.op` to change the operation that is performed. - -Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. -The update by query operation skips updating the document and increments the `noop` counter. - -Set `ctx.op = "delete"` if your script decides that the document should be deleted. -The update by query operation deletes the document and increments the `deleted` counter. - -Update by query supports only `index`, `noop`, and `delete`. -Setting `ctx.op` to anything else is an error. -Setting any other field in `ctx` is an error. -This API enables you to only modify the source of matching documents; you cannot move them. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query[Endpoint documentation] -[source,ts] ----- -client.updateByQuery({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to update using the Query DSL. -** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when updating. -** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. -** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. -** *`q` (Optional, string)*: A query in the Lucene query string syntax. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. -** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. It defaults to the index-level setting. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. -** *`scroll_size` (Optional, number)*: The size of the scroll request that powers the operation. -** *`search_timeout` (Optional, string | -1 | 0)*: An explicit timeout for each search request. By default, there is no timeout. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. -** *`sort` (Optional, string[])*: A list of : pairs. -** *`stats` (Optional, string[])*: The specific `tag` of the request for logging and statistical purposes. -** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`timeout` (Optional, string | -1 | 0)*: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. -** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. - -[discrete] -=== update_by_query_rethrottle -Throttle an update by query operation. - -Change the number of requests per second for a particular update by query operation. -Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle[Endpoint documentation] -[source,ts] ----- -client.updateByQueryRethrottle({ task_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string)*: The ID for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. - -[discrete] -=== async_search -[discrete] -==== delete -Delete an async search. - -If the asynchronous search is still running, it is cancelled. -Otherwise, the saved search results are deleted. -If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.delete({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the async search. - -[discrete] -==== get -Get async search results. - -Retrieve the results of a previously submitted asynchronous search request. -If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.get({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the async search. -** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search should be available in the cluster. -When not specified, the `keep_alive` set with the corresponding submit async request will be used. -Otherwise, it is possible to override the value and extend the validity of the request. -When this period expires, the search, if still running, is cancelled. -If the search is completed, its saved results are deleted. -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Specifies to wait for the search to be completed up until the provided timeout. -Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. -By default no timeout is set meaning that the currently available results will be returned without any additional wait. - -[discrete] -==== status -Get the async search status. - -Get the status of a previously submitted async search request given its identifier, without retrieving search results. -If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: - -* The user or API key that submitted the original async search request. -* Users that have the `monitor` cluster privilege or greater privileges. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.status({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the async search. -** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search needs to be available. -Ongoing async searches and any saved search results are deleted after this period. - -[discrete] -==== submit -Run an async search. - -When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. - -Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. - -By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. -The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.submit({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`aggregations` (Optional, Record)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* -** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. -** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 -hits using the from and size parameters. To page through more hits, use the -search_after parameter. -** *`highlight` (Optional, { encoder, fields })* -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact -number of hits is returned at the cost of some performance. If false, the -response does not include the total number of hits matching the query. -Defaults to 10,000 hits. -** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field -names matching these patterns in the hits.fields property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: Defines the approximate kNN search to run. -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* -** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null[])* -** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more -than 10,000 hits using the from and size parameters. To page through more -hits, use the search_after parameter. -** *`slice` (Optional, { field, id, max })* -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These -fields are returned in the hits._source property of the search response. -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this -limit, Elasticsearch terminates the query early. Elasticsearch collects documents -before sorting. Defaults to 0, which does not terminate query execution early. -** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response -is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. -** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. -** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification -of each hit. See Optimistic concurrency control. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, -no stored fields are included in the response. If this field is specified, the _source -parameter defaults to false. You can pass _source: true to return both source fields -and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics -aggregation for its associated searches. You can retrieve these stats using -the indices stats API. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout. -When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. -** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. -Ongoing async searches and any saved search results are deleted after this period. -** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout -** *`analyzer` (Optional, string)*: The analyzer to use for the query string -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`batched_reduce_size` (Optional, number)*: Affects how often partial results become available, which happens whenever shard results are reduced. -A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). -** *`ccs_minimize_roundtrips` (Optional, boolean)*: The default value is the only supported value. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true -** *`routing` (Optional, string)*: A list of specific routing values -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type -** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode -** *`suggest_size` (Optional, number)*: How many suggestions to return in response -** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field -** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`q` (Optional, string)*: Query in the Lucene query string syntax - -[discrete] -=== autoscaling -[discrete] -==== delete_autoscaling_policy -Delete an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy[Endpoint documentation] -[source,ts] ----- -client.autoscaling.deleteAutoscalingPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: the name of the autoscaling policy -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_autoscaling_capacity -Get the autoscaling capacity. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -This API gets the current autoscaling capacity based on the configured autoscaling policy. -It will return information to size the cluster appropriately to the current workload. - -The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. - -The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. - -The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. -This information is provided for diagnosis only. -Do not use this information to make autoscaling decisions. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity[Endpoint documentation] -[source,ts] ----- -client.autoscaling.getAutoscalingCapacity({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_autoscaling_policy -Get an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity[Endpoint documentation] -[source,ts] ----- -client.autoscaling.getAutoscalingPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: the name of the autoscaling policy -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_autoscaling_policy -Create or update an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy[Endpoint documentation] -[source,ts] ----- -client.autoscaling.putAutoscalingPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: the name of the autoscaling policy -** *`policy` (Optional, { roles, deciders })* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== cat -[discrete] -==== aliases -Get aliases. - -Get the cluster's index aliases, including filter and routing information. -This API does not return data stream aliases. - -IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases[Endpoint documentation] -[source,ts] ----- -client.cat.aliases({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -It supports a list of values, such as `open,hidden`. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicated that the request should never timeout, you can set it to `-1`. - -[discrete] -==== allocation -Get shard allocation information. - -Get a snapshot of the number of shards allocated to each data node and their disk space. - -IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation[Endpoint documentation] -[source,ts] ----- -client.cat.allocation({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: A list of node identifiers or names used to limit the returned information. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== component_templates -Get component templates. - -Get information about component templates in a cluster. -Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the get component template API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates[Endpoint documentation] -[source,ts] ----- -client.cat.componentTemplates({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: The name of the component template. -It accepts wildcard expressions. -If it is omitted, all component templates are returned. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. - -[discrete] -==== count -Get a document count. - -Get quick access to a document count for a data stream, an index, or an entire cluster. -The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. - -IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the count API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count[Endpoint documentation] -[source,ts] ----- -client.cat.count({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. -It supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. - -[discrete] -==== fielddata -Get field data cache information. - -Get the amount of heap memory currently used by the field data cache on every data node in the cluster. - -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the nodes stats API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata[Endpoint documentation] -[source,ts] ----- -client.cat.fielddata({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`fields` (Optional, string | string[])*: List of fields used to limit returned information. -To retrieve all fields, omit this parameter. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. - -[discrete] -==== health -Get the cluster health status. - -IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the cluster health API. -This API is often used to check malfunctioning clusters. -To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: -`HH:MM:SS`, which is human-readable but includes no date information; -`Unix epoch time`, which is machine-sortable and includes date information. -The latter format is useful for cluster recoveries that take multiple days. -You can use the cat health API to verify cluster health across multiple nodes. -You also can use the API to track the recovery of a large cluster over a longer period of time. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health[Endpoint documentation] -[source,ts] ----- -client.cat.health({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. -** *`ts` (Optional, boolean)*: If true, returns `HH:MM:SS` and Unix epoch timestamps. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. - -[discrete] -==== help -Get CAT help. - -Get help for the CAT APIs. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat[Endpoint documentation] -[source,ts] ----- -client.cat.help() ----- - - -[discrete] -==== indices -Get index information. - -Get high-level information about indices in a cluster, including backing indices for data streams. - -Use this request to get the following information for each index in a cluster: -- shard count -- document count -- deleted document count -- primary store size -- total store size of all shards, including shard replicas - -These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. -To get an accurate count of Elasticsearch documents, use the cat count or count APIs. - -CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use an index endpoint. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices[Endpoint documentation] -[source,ts] ----- -client.cat.indices({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. -** *`health` (Optional, Enum("green" | "yellow" | "red"))*: The health status used to limit returned indices. By default, the response includes indices of any health status. -** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. -** *`pri` (Optional, boolean)*: If true, the response only includes information from primary shards. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. - -[discrete] -==== master -Get master node information. - -Get information about the master node, including the ID, bound IP address, and name. - -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master[Endpoint documentation] -[source,ts] ----- -client.cat.master({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== ml_data_frame_analytics -Get data frame analytics jobs. - -Get configuration and usage information about data frame analytics jobs. - -IMPORTANT: CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get data frame analytics jobs statistics API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.cat.mlDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: The ID of the data frame analytics to fetch -** *`allow_no_match` (Optional, boolean)*: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit in which to display byte values -** *`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names to display. -** *`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names or column aliases used to sort the -response. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. - -[discrete] -==== ml_datafeeds -Get datafeeds. - -Get configuration and usage information about datafeeds. -This API returns a maximum of 10,000 datafeeds. -If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` -cluster privileges to use this API. - -IMPORTANT: CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get datafeed statistics API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds[Endpoint documentation] -[source,ts] ----- -client.cat.mlDatafeeds({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -* Contains wildcard expressions and there are no datafeeds that match. -* Contains the `_all` string or no identifiers and there are no matches. -* Contains wildcard expressions and there are only partial matches. - -If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when -there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only -partial matches. -** *`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names to display. -** *`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. - -[discrete] -==== ml_jobs -Get anomaly detection jobs. - -Get configuration and usage information for anomaly detection jobs. -This API returns a maximum of 10,000 jobs. -If the Elasticsearch security features are enabled, you must have `monitor_ml`, -`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. - -IMPORTANT: CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get anomaly detection job statistics API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs[Endpoint documentation] -[source,ts] ----- -client.cat.mlJobs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -* Contains wildcard expressions and there are no jobs that match. -* Contains the `_all` string or no identifiers and there are no matches. -* Contains wildcard expressions and there are only partial matches. - -If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there -are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial -matches. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names to display. -** *`s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. - -[discrete] -==== ml_trained_models -Get trained models. - -Get configuration and usage information about inference trained models. - -IMPORTANT: CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get trained models statistics API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models[Endpoint documentation] -[source,ts] ----- -client.cat.mlTrainedModels({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (Optional, string)*: A unique identifier for the trained model. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. -If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. -If `false`, the API returns a 404 status code when there are no matches or only partial matches. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names to display. -** *`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names or aliases used to sort the response. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`size` (Optional, number)*: The maximum number of transforms to display. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. - -[discrete] -==== nodeattrs -Get node attribute information. - -Get information about custom node attributes. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs[Endpoint documentation] -[source,ts] ----- -client.cat.nodeattrs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== nodes -Get node information. - -Get information about the nodes in a cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes[Endpoint documentation] -[source,ts] ----- -client.cat.nodes({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID. -** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. - -[discrete] -==== pending_tasks -Get pending task information. - -Get information about cluster-level changes that have not yet taken effect. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks[Endpoint documentation] -[source,ts] ----- -client.cat.pendingTasks({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. - -[discrete] -==== plugins -Get plugin information. - -Get a list of plugins running on each node of a cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins[Endpoint documentation] -[source,ts] ----- -client.cat.plugins({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`include_bootstrap` (Optional, boolean)*: Include bootstrap plugins in the response -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== recovery -Get shard recovery information. - -Get information about ongoing and completed shard recoveries. -Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. -For data streams, the API returns information about the stream’s backing indices. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery[Endpoint documentation] -[source,ts] ----- -client.cat.recovery({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. - -[discrete] -==== repositories -Get snapshot repository information. - -Get a list of snapshot repositories for a cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories[Endpoint documentation] -[source,ts] ----- -client.cat.repositories({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== segments -Get segment information. - -Get low-level information about the Lucene segments in index shards. -For data streams, the API returns information about the backing indices. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments[Endpoint documentation] -[source,ts] ----- -client.cat.segments({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== shards -Get shard information. - -Get information about the shards in a cluster. -For data streams, the API returns information about the backing indices. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards[Endpoint documentation] -[source,ts] ----- -client.cat.shards({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. - -[discrete] -==== snapshots -Get snapshot information. - -Get information about the snapshots stored in one or more repositories. -A snapshot is a backup of an index or running Elasticsearch cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots[Endpoint documentation] -[source,ts] ----- -client.cat.snapshots({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (Optional, string | string[])*: A list of snapshot repositories used to limit the request. -Accepts wildcard expressions. -`_all` returns all repositories. -If any repository fails during the request, Elasticsearch returns an error. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, the response does not include information from unavailable snapshots. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. - -[discrete] -==== tasks -Get task information. - -Get information about tasks currently running in the cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks[Endpoint documentation] -[source,ts] ----- -client.cat.tasks({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` (Optional, string[])*: The task action names, which are used to limit the response. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. -** *`nodes` (Optional, string[])*: Unique node identifiers, which are used to limit the response. -** *`parent_task_id` (Optional, string)*: The parent task identifier, which is used to limit the response. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. - -[discrete] -==== templates -Get index template information. - -Get information about the index templates in a cluster. -You can use index templates to apply index settings and field mappings to new indices at creation. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates[Endpoint documentation] -[source,ts] ----- -client.cat.templates({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: The name of the template to return. -Accepts wildcard expressions. If omitted, all templates are returned. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== thread_pool -Get thread pool statistics. - -Get thread pool statistics for each node in a cluster. -Returned information includes all built-in thread pools and custom thread pools. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool[Endpoint documentation] -[source,ts] ----- -client.cat.threadPool({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request. -Accepts wildcard expressions. -** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. -** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. -Sorting defaults to ascending and can be changed by setting `:asc` -or `:desc` as a suffix to the column name. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== transforms -Get transform information. - -Get configuration and usage information about transforms. - -CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get transform statistics API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms[Endpoint documentation] -[source,ts] ----- -client.cat.transforms({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (Optional, string)*: A transform identifier or a wildcard expression. -If you do not specify one of these options, the API returns information for all transforms. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. -If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. -If `false`, the request returns a 404 status code when there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names to display. -** *`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. -** *`size` (Optional, number)*: The maximum number of transforms to obtain. - -[discrete] -=== ccr -[discrete] -==== delete_auto_follow_pattern -Delete auto-follow patterns. - -Delete a collection of cross-cluster replication auto-follow patterns. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern[Endpoint documentation] -[source,ts] ----- -client.ccr.deleteAutoFollowPattern({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The auto-follow pattern collection to delete. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== follow -Create a follower. -Create a cross-cluster replication follower index that follows a specific leader index. -When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow[Endpoint documentation] -[source,ts] ----- -client.ccr.follow({ index, leader_index, remote_cluster }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follower index. -** *`leader_index` (string)*: The name of the index in the leader cluster to follow. -** *`remote_cluster` (string)*: The remote cluster containing the leader index. -** *`data_stream_name` (Optional, string)*: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. -** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. -** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding write requests on the follower. -** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. -** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. -** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when -retrying. -** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be -deferred until the number of queued operations goes below the limit. -** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will -be deferred until the total bytes of queued operations goes below the limit. -** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. -** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. -** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. -When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. -Then the follower will immediately attempt to read from the leader again. -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Settings to override from the leader index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be -active. -A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the -remote Lucene segment files to the follower index. - -[discrete] -==== follow_info -Get follower information. - -Get information about all cross-cluster replication follower indices. -For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info[Endpoint documentation] -[source,ts] ----- -client.ccr.followInfo({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A comma-delimited list of follower index patterns. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== follow_stats -Get follower stats. - -Get cross-cluster replication follower stats. -The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats[Endpoint documentation] -[source,ts] ----- -client.ccr.followStats({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A comma-delimited list of index patterns. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== forget_follower -Forget a follower. -Remove the cross-cluster replication follower retention leases from the leader. - -A following index takes out retention leases on its leader index. -These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. -When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. -However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. -While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. -This API exists to enable manually removing the leases when the unfollow API is unable to do so. - -NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. -The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower[Endpoint documentation] -[source,ts] ----- -client.ccr.forgetFollower({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: the name of the leader index for which specified follower retention leases should be removed -** *`follower_cluster` (Optional, string)* -** *`follower_index` (Optional, string)* -** *`follower_index_uuid` (Optional, string)* -** *`leader_remote_cluster` (Optional, string)* -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_auto_follow_pattern -Get auto-follow patterns. - -Get cross-cluster replication auto-follow patterns. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1[Endpoint documentation] -[source,ts] ----- -client.ccr.getAutoFollowPattern({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: The auto-follow pattern collection that you want to retrieve. -If you do not specify a name, the API returns information for all collections. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== pause_auto_follow_pattern -Pause an auto-follow pattern. - -Pause a cross-cluster replication auto-follow pattern. -When the API returns, the auto-follow pattern is inactive. -New indices that are created on the remote cluster and match the auto-follow patterns are ignored. - -You can resume auto-following with the resume auto-follow pattern API. -When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. -Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern[Endpoint documentation] -[source,ts] ----- -client.ccr.pauseAutoFollowPattern({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the auto-follow pattern to pause. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== pause_follow -Pause a follower. - -Pause a cross-cluster replication follower index. -The follower index will not fetch any additional operations from the leader index. -You can resume following with the resume follower API. -You can pause and resume a follower index to change the configuration of the following task. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow[Endpoint documentation] -[source,ts] ----- -client.ccr.pauseFollow({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follower index. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== put_auto_follow_pattern -Create or update auto-follow patterns. -Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. -Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. -Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. - -This API can also be used to update auto-follow patterns. -NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern[Endpoint documentation] -[source,ts] ----- -client.ccr.putAutoFollowPattern({ name, remote_cluster }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the collection of auto-follow patterns. -** *`remote_cluster` (string)*: The remote cluster containing the leader indices to match against. -** *`follow_index_pattern` (Optional, string)*: The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. -** *`leader_index_patterns` (Optional, string[])*: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. -** *`leader_index_exclusion_patterns` (Optional, string[])*: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. -** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. -** *`settings` (Optional, Record)*: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). -** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. -** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. -** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. -** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. -** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. -** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. -** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. -** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. -** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== resume_auto_follow_pattern -Resume an auto-follow pattern. - -Resume a cross-cluster replication auto-follow pattern that was paused. -The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. -Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern[Endpoint documentation] -[source,ts] ----- -client.ccr.resumeAutoFollowPattern({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the auto-follow pattern to resume. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== resume_follow -Resume a follower. -Resume a cross-cluster replication follower index that was paused. -The follower index could have been paused with the pause follower API. -Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. -When this API returns, the follower index will resume fetching operations from the leader index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow[Endpoint documentation] -[source,ts] ----- -client.ccr.resumeFollow({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follow index to resume following. -** *`max_outstanding_read_requests` (Optional, number)* -** *`max_outstanding_write_requests` (Optional, number)* -** *`max_read_request_operation_count` (Optional, number)* -** *`max_read_request_size` (Optional, string)* -** *`max_retry_delay` (Optional, string | -1 | 0)* -** *`max_write_buffer_count` (Optional, number)* -** *`max_write_buffer_size` (Optional, string)* -** *`max_write_request_operation_count` (Optional, number)* -** *`max_write_request_size` (Optional, string)* -** *`read_poll_timeout` (Optional, string | -1 | 0)* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== stats -Get cross-cluster replication stats. - -This API returns stats about auto-following and the same shard-level stats as the get follower stats API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats[Endpoint documentation] -[source,ts] ----- -client.ccr.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== unfollow -Unfollow an index. - -Convert a cross-cluster replication follower index to a regular index. -The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. -The follower index must be paused and closed before you call the unfollow API. - -> info -> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow[Endpoint documentation] -[source,ts] ----- -client.ccr.unfollow({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follower index. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -=== cluster -[discrete] -==== allocation_explain -Explain the shard allocations. -Get explanations for shard allocations in the cluster. -For unassigned shards, it provides an explanation for why the shard is unassigned. -For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. -This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain[Endpoint documentation] -[source,ts] ----- -client.cluster.allocationExplain({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`current_node` (Optional, string)*: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. -** *`index` (Optional, string)*: Specifies the name of the index that you would like an explanation for. -** *`primary` (Optional, boolean)*: If true, returns explanation for the primary shard for the given shard ID. -** *`shard` (Optional, number)*: Specifies the ID of the shard that you would like an explanation for. -** *`include_disk_info` (Optional, boolean)*: If true, returns information about disk usage and shard sizes. -** *`include_yes_decisions` (Optional, boolean)*: If true, returns YES decisions in explanation. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== delete_component_template -Delete component templates. -Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] -[source,ts] ----- -client.cluster.deleteComponentTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List or wildcard expression of component template names used to limit the request. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_voting_config_exclusions -Clear cluster voting config exclusions. -Remove master-eligible nodes from the voting configuration exclusion list. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation] -[source,ts] ----- -client.cluster.deleteVotingConfigExclusions({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`wait_for_removal` (Optional, boolean)*: Specifies whether to wait for all excluded nodes to be removed from the -cluster before clearing the voting configuration exclusions list. -Defaults to true, meaning that all excluded nodes must be removed from -the cluster before this API takes any action. If set to false then the -voting configuration exclusions list is cleared even if some excluded -nodes are still in the cluster. - -[discrete] -==== exists_component_template -Check component templates. -Returns information about whether a particular component template exists. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] -[source,ts] ----- -client.cluster.existsComponentTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of component template names used to limit the request. -Wildcard (*) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. -Defaults to false, which means information is retrieved from the master node. - -[discrete] -==== get_component_template -Get component templates. -Get information about component templates. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] -[source,ts] ----- -client.cluster.getComponentTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: List of component template names used to limit the request. -Wildcard (`*`) expressions are supported. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`include_defaults` (Optional, boolean)*: Return all default configurations for the component template (default: false) -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -If `false`, information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_settings -Get cluster-wide settings. -By default, it returns only settings that have been explicitly defined. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings[Endpoint documentation] -[source,ts] ----- -client.cluster.getSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`include_defaults` (Optional, boolean)*: If `true`, returns default cluster settings from the local node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== health -Get the cluster health status. - -You can also use the API to get the health status of only specified data streams and indices. -For data streams, the API retrieves the health status of the stream’s backing indices. - -The cluster health status is: green, yellow or red. -On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. -The index level status is controlled by the worst shard status. - -One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. -The cluster status is controlled by the worst index status. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health[Endpoint documentation] -[source,ts] ----- -client.cluster.health({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Can be one of cluster, indices or shards. Controls the details level of the health information returned. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. -** *`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))*: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. -** *`wait_for_nodes` (Optional, string | number)*: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. - -[discrete] -==== info -Get cluster info. -Returns basic information about the cluster. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info[Endpoint documentation] -[source,ts] ----- -client.cluster.info({ target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])*: Limits the information returned to the specific target. Supports a list, such as http,ingest. - -[discrete] -==== pending_tasks -Get the pending cluster tasks. -Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. - -NOTE: This API returns a list of any pending updates to the cluster state. -These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. -However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks[Endpoint documentation] -[source,ts] ----- -client.cluster.pendingTasks({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -If `false`, information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== post_voting_config_exclusions -Update voting configuration exclusions. -Update the cluster voting config exclusions by node IDs or node names. -By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. -If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. -The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. -It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. - -Clusters should have no voting configuration exclusions in normal operation. -Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. -This API waits for the nodes to be fully removed from the cluster before it returns. -If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. - -A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. -If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. -In that case, you may safely retry the call. - -NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. -They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation] -[source,ts] ----- -client.cluster.postVotingConfigExclusions({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_names` (Optional, string | string[])*: A list of the names of the nodes to exclude from the -voting configuration. If specified, you may not also specify node_ids. -** *`node_ids` (Optional, string | string[])*: A list of the persistent ids of the nodes to exclude -from the voting configuration. If specified, you may not also specify node_names. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: When adding a voting configuration exclusion, the API waits for the -specified nodes to be excluded from the voting configuration before -returning. If the timeout expires before the appropriate condition -is satisfied, the request fails and returns an error. - -[discrete] -==== put_component_template -Create or update a component template. -Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -An index template can be composed of multiple component templates. -To use a component template, specify it in an index template’s `composed_of` list. -Component templates are only applied to new data streams and indices as part of a matching index template. - -Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. - -Component templates are only used during index creation. -For data streams, this includes data stream creation and the creation of a stream’s backing indices. -Changes to component templates do not affect existing indices, including a stream’s backing indices. - -You can use C-style `/* *\/` block comments in component templates. -You can include comments anywhere in the request body except before the opening curly bracket. - -**Applying component templates** - -You cannot directly apply a component template to a data stream or index. -To be applied, a component template must be included in an index template's `composed_of` list. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] -[source,ts] ----- -client.cluster.putComponentTemplate({ name, template }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the component template to create. -Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. -Elastic Agent uses these templates to configure backing indices for its data streams. -If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. -If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. -** *`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })*: The template to be applied which includes mappings, settings, or aliases configuration. -** *`version` (Optional, number)*: Version number used to manage component templates externally. -This number isn't automatically generated or incremented by Elasticsearch. -To unset a version, replace the template without specifying a version. -** *`_meta` (Optional, Record)*: Optional user metadata about the component template. -It may have any contents. This map is not automatically generated by Elasticsearch. -This information is stored in the cluster state, so keeping it short is preferable. -To unset `_meta`, replace the template without specifying this information. -** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template -that uses deprecated components, Elasticsearch will emit a deprecation warning. -** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing component templates. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_settings -Update the cluster settings. - -Configure and update dynamic settings on a running cluster. -You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. - -Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. -You can also reset transient or persistent settings by assigning them a null value. - -If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. -For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. -However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. - -TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. -If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. -Only use `elasticsearch.yml` for static cluster settings and node settings. -The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. - -WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. -If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings[Endpoint documentation] -[source,ts] ----- -client.cluster.putSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`persistent` (Optional, Record)* -** *`transient` (Optional, Record)* -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== remote_info -Get remote cluster information. - -Get information about configured remote clusters. -The API returns connection and endpoint information keyed by the configured remote cluster alias. - -> info -> This API returns information that reflects current state on the local cluster. -> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. -> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. -> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info[Endpoint documentation] -[source,ts] ----- -client.cluster.remoteInfo() ----- - - -[discrete] -==== reroute -Reroute the cluster. -Manually change the allocation of individual shards in the cluster. -For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. - -It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. -For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. - -The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. -If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. - -The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. -This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. - -Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute[Endpoint documentation] -[source,ts] ----- -client.cluster.reroute({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. -** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation. -It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. -** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot run. -** *`metric` (Optional, string | string[])*: Limits the information returned to the specified metrics. -** *`retry_failed` (Optional, boolean)*: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== state -Get the cluster state. -Get comprehensive information about the state of the cluster. - -The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. - -The elected master node ensures that every node in the cluster has a copy of the same cluster state. -This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. -You may need to consult the Elasticsearch source code to determine the precise meaning of the response. - -By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. -You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. - -Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. -If you use this API repeatedly, your cluster may become unstable. - -WARNING: The response is a representation of an internal data structure. -Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. -Do not query this API using external monitoring tools. -Instead, obtain the information you require using other more stable cluster APIs. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state[Endpoint documentation] -[source,ts] ----- -client.cluster.state({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`wait_for_metadata_version` (Optional, number)*: Wait for the metadata version to be equal or greater than the specified metadata version -** *`wait_for_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for wait_for_metadata_version before timing out - -[discrete] -==== stats -Get cluster statistics. -Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats[Endpoint documentation] -[source,ts] ----- -client.cluster.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node filters used to limit returned information. Defaults to all nodes in the cluster. -** *`include_remotes` (Optional, boolean)*: Include remote cluster data into the response -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for each node to respond. -If a node does not respond before its timeout expires, the response does not include its stats. -However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. - -[discrete] -=== connector -[discrete] -==== check_in -Check in a connector. - -Update the `last_seen` field in the connector and set it to the current timestamp. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in[Endpoint documentation] -[source,ts] ----- -client.connector.checkIn({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be checked in - -[discrete] -==== delete -Delete a connector. - -Removes a connector and associated sync jobs. -This is a destructive action that is not recoverable. -NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. -These need to be removed manually. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete[Endpoint documentation] -[source,ts] ----- -client.connector.delete({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be deleted -** *`delete_sync_jobs` (Optional, boolean)*: A flag indicating if associated sync jobs should be also removed. Defaults to false. -** *`hard` (Optional, boolean)*: A flag indicating if the connector should be hard deleted. - -[discrete] -==== get -Get a connector. - -Get the details about a connector. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get[Endpoint documentation] -[source,ts] ----- -client.connector.get({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector -** *`include_deleted` (Optional, boolean)*: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. - -[discrete] -==== list -Get all connectors. - -Get information about all connectors. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list[Endpoint documentation] -[source,ts] ----- -client.connector.list({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: Specifies a max number of results to get -** *`index_name` (Optional, string | string[])*: A list of connector index names to fetch connector documents for -** *`connector_name` (Optional, string | string[])*: A list of connector names to fetch connector documents for -** *`service_type` (Optional, string | string[])*: A list of connector service types to fetch connector documents for -** *`include_deleted` (Optional, boolean)*: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. -** *`query` (Optional, string)*: A wildcard query string that filters connectors with matching name, description or index name - -[discrete] -==== post -Create a connector. - -Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. -Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. -Self-managed connectors (Connector clients) are self-managed on your infrastructure. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put[Endpoint documentation] -[source,ts] ----- -client.connector.post({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`description` (Optional, string)* -** *`index_name` (Optional, string)* -** *`is_native` (Optional, boolean)* -** *`language` (Optional, string)* -** *`name` (Optional, string)* -** *`service_type` (Optional, string)* - -[discrete] -==== put -Create or update a connector. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put[Endpoint documentation] -[source,ts] ----- -client.connector.put({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (Optional, string)*: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. -** *`description` (Optional, string)* -** *`index_name` (Optional, string)* -** *`is_native` (Optional, boolean)* -** *`language` (Optional, string)* -** *`name` (Optional, string)* -** *`service_type` (Optional, string)* - -[discrete] -==== sync_job_cancel -Cancel a connector sync job. - -Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. -The connector service is then responsible for setting the status of connector sync jobs to cancelled. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobCancel({ connector_sync_job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job - -[discrete] -==== sync_job_check_in -Check in a connector sync job. -Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. -This service runs automatically on Elastic Cloud for Elastic managed connectors. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobCheckIn({ connector_sync_job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be checked in. - -[discrete] -==== sync_job_claim -Claim a connector sync job. -This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. -Additionally, it can set the `sync_cursor` property for the sync job. - -This API is not intended for direct connector management by users. -It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. -This service runs automatically on Elastic Cloud for Elastic managed connectors. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. -** *`worker_hostname` (string)*: The host name of the current system that will run the job. -** *`sync_cursor` (Optional, User-defined value)*: The cursor object from the last incremental sync job. -This should reference the `sync_cursor` field in the connector state for which the job runs. - -[discrete] -==== sync_job_delete -Delete a connector sync job. - -Remove a connector sync job and its associated data. -This is a destructive action that is not recoverable. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobDelete({ connector_sync_job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be deleted - -[discrete] -==== sync_job_error -Set a connector sync job error. -Set the `error` field for a connector sync job and set its `status` to `error`. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. -This service runs automatically on Elastic Cloud for Elastic managed connectors. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobError({ connector_sync_job_id, error }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier for the connector sync job. -** *`error` (string)*: The error for the connector sync job error field. - -[discrete] -==== sync_job_get -Get a connector sync job. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobGet({ connector_sync_job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job - -[discrete] -==== sync_job_list -Get all connector sync jobs. - -Get information about all stored connector sync jobs listed by their creation date in ascending order. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobList({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: Specifies a max number of results to get -** *`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))*: A sync job status to fetch connector sync jobs for -** *`connector_id` (Optional, string)*: A connector id to fetch connector sync jobs for -** *`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])*: A list of job types to fetch the sync jobs for - -[discrete] -==== sync_job_post -Create a connector sync job. - -Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobPost({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The id of the associated connector -** *`job_type` (Optional, Enum("full" | "incremental" | "access_control"))* -** *`trigger_method` (Optional, Enum("on_demand" | "scheduled"))* - -[discrete] -==== sync_job_update_stats -Set the connector sync job stats. -Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. -You can also update `last_seen`. -This API is mainly used by the connector service for updating sync job information. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. -This service runs automatically on Elastic Cloud for Elastic managed connectors. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. -** *`deleted_document_count` (number)*: The number of documents the sync job deleted. -** *`indexed_document_count` (number)*: The number of documents the sync job indexed. -** *`indexed_document_volume` (number)*: The total size of the data (in MiB) the sync job indexed. -** *`last_seen` (Optional, string | -1 | 0)*: The timestamp to use in the `last_seen` property for the connector sync job. -** *`metadata` (Optional, Record)*: The connector-specific metadata. -** *`total_document_count` (Optional, number)*: The total number of documents in the target index after the sync job finished. - -[discrete] -==== update_active_filtering -Activate the connector draft filter. - -Activates the valid draft filtering for a connector. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering[Endpoint documentation] -[source,ts] ----- -client.connector.updateActiveFiltering({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated - -[discrete] -==== update_api_key_id -Update the connector API key ID. - -Update the `api_key_id` and `api_key_secret_id` fields of a connector. -You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. -The connector secret ID is required only for Elastic managed (native) connectors. -Self-managed connectors (connector clients) do not use this field. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id[Endpoint documentation] -[source,ts] ----- -client.connector.updateApiKeyId({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`api_key_id` (Optional, string)* -** *`api_key_secret_id` (Optional, string)* - -[discrete] -==== update_configuration -Update the connector configuration. - -Update the configuration field in the connector document. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration[Endpoint documentation] -[source,ts] ----- -client.connector.updateConfiguration({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`configuration` (Optional, Record)* -** *`values` (Optional, Record)* - -[discrete] -==== update_error -Update the connector error field. - -Set the error field for the connector. -If the error provided in the request body is non-null, the connector’s status is updated to error. -Otherwise, if the error is reset to null, the connector status is updated to connected. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error[Endpoint documentation] -[source,ts] ----- -client.connector.updateError({ connector_id, error }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`error` (T | null)* - -[discrete] -==== update_features -Update the connector features. -Update the connector features in the connector document. -This API can be used to control the following aspects of a connector: - -* document-level security -* incremental syncs -* advanced sync rules -* basic sync rules - -Normally, the running connector service automatically manages these features. -However, you can use this API to override the default behavior. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. -This service runs automatically on Elastic Cloud for Elastic managed connectors. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features[Endpoint documentation] -[source,ts] ----- -client.connector.updateFeatures({ connector_id, features }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated. -** *`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })* - -[discrete] -==== update_filtering -Update the connector filtering. - -Update the draft filtering configuration of a connector and marks the draft validation state as edited. -The filtering draft is activated once validated by the running Elastic connector service. -The filtering property is used to configure sync rules (both basic and advanced) for a connector. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering[Endpoint documentation] -[source,ts] ----- -client.connector.updateFiltering({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`filtering` (Optional, { active, domain, draft }[])* -** *`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])* -** *`advanced_snippet` (Optional, { created_at, updated_at, value })* - -[discrete] -==== update_filtering_validation -Update the connector draft filtering validation. - -Update the draft filtering validation info for a connector. -[source,ts] ----- -client.connector.updateFilteringValidation({ connector_id, validation }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`validation` ({ errors, state })* - -[discrete] -==== update_index_name -Update the connector index name. - -Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name[Endpoint documentation] -[source,ts] ----- -client.connector.updateIndexName({ connector_id, index_name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`index_name` (T | null)* - -[discrete] -==== update_name -Update the connector name and description. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name[Endpoint documentation] -[source,ts] ----- -client.connector.updateName({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`name` (Optional, string)* -** *`description` (Optional, string)* - -[discrete] -==== update_native -Update the connector is_native flag. -[source,ts] ----- -client.connector.updateNative({ connector_id, is_native }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`is_native` (boolean)* - -[discrete] -==== update_pipeline -Update the connector pipeline. - -When you create a new connector, the configuration of an ingest pipeline is populated with default settings. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline[Endpoint documentation] -[source,ts] ----- -client.connector.updatePipeline({ connector_id, pipeline }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })* - -[discrete] -==== update_scheduling -Update the connector scheduling. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling[Endpoint documentation] -[source,ts] ----- -client.connector.updateScheduling({ connector_id, scheduling }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`scheduling` ({ access_control, full, incremental })* - -[discrete] -==== update_service_type -Update the connector service type. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type[Endpoint documentation] -[source,ts] ----- -client.connector.updateServiceType({ connector_id, service_type }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`service_type` (string)* - -[discrete] -==== update_status -Update the connector status. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status[Endpoint documentation] -[source,ts] ----- -client.connector.updateStatus({ connector_id, status }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))* - -[discrete] -=== dangling_indices -[discrete] -==== delete_dangling_index -Delete a dangling index. -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. -For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index[Endpoint documentation] -[source,ts] ----- -client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index_uuid` (string)*: The UUID of the index to delete. Use the get dangling indices API to find the UUID. -** *`accept_data_loss` (boolean)*: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== import_dangling_index -Import a dangling index. - -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. -For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index[Endpoint documentation] -[source,ts] ----- -client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index_uuid` (string)*: The UUID of the index to import. Use the get dangling indices API to locate the UUID. -** *`accept_data_loss` (boolean)*: This parameter must be set to true to import a dangling index. -Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== list_dangling_indices -Get the dangling indices. - -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. -For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - -Use this API to list dangling indices, which you can then import or delete. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices[Endpoint documentation] -[source,ts] ----- -client.danglingIndices.listDanglingIndices() ----- - - -[discrete] -=== enrich -[discrete] -==== delete_policy -Delete an enrich policy. -Deletes an existing enrich policy and its enrich index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy[Endpoint documentation] -[source,ts] ----- -client.enrich.deletePolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Enrich policy to delete. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== execute_policy -Run an enrich policy. -Create the enrich index for an existing enrich policy. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy[Endpoint documentation] -[source,ts] ----- -client.enrich.executePolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Enrich policy to execute. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks other enrich policy execution requests until complete. - -[discrete] -==== get_policy -Get an enrich policy. -Returns information about an enrich policy. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy[Endpoint documentation] -[source,ts] ----- -client.enrich.getPolicy({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of enrich policy names used to limit the request. -To return information for all enrich policies, omit this parameter. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== put_policy -Create an enrich policy. -Creates an enrich policy. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy[Endpoint documentation] -[source,ts] ----- -client.enrich.putPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the enrich policy to create or update. -** *`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `geo_shape` query. -** *`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `term` query. -** *`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== stats -Get enrich stats. -Returns enrich coordinator statistics and information about enrich policies that are currently executing. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats[Endpoint documentation] -[source,ts] ----- -client.enrich.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -=== eql -[discrete] -==== delete -Delete an async EQL search. -Delete an async EQL search or a stored synchronous EQL search. -The API also deletes results for the search. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete[Endpoint documentation] -[source,ts] ----- -client.eql.delete({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search to delete. -A search ID is provided in the EQL search API's response for an async search. -A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. - -[discrete] -==== get -Get async EQL search results. -Get the current status and available results for an async EQL search or a stored synchronous EQL search. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get[Endpoint documentation] -[source,ts] ----- -client.eql.get({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search. -** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. -Defaults to the keep_alive value set by the search’s EQL search API request. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. -Defaults to no timeout, meaning the request waits for complete search results. - -[discrete] -==== get_status -Get the async EQL status. -Get the current status for an async EQL search or a stored synchronous EQL search without returning results. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status[Endpoint documentation] -[source,ts] ----- -client.eql.getStatus({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search. - -[discrete] -==== search -Get EQL search results. -Returns search results for an Event Query Language (EQL) query. -EQL assumes each document in a data stream or index corresponds to an event. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search[Endpoint documentation] -[source,ts] ----- -client.eql.search({ index, query }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: The name of the index to scope the operation -** *`query` (string)*: EQL query you wish to run. -** *`case_sensitive` (Optional, boolean)* -** *`event_category_field` (Optional, string)*: Field containing the event classification, such as process, file, or network. -** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order -** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" -** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. -** *`keep_alive` (Optional, string | -1 | 0)* -** *`keep_on_completion` (Optional, boolean)* -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* -** *`allow_partial_search_results` (Optional, boolean)*: Allow query execution also in case of shard failures. -If true, the query will keep running and will return results based on the available shards. -For sequences, the behavior can be further refined using allow_partial_sequence_results -** *`allow_partial_sequence_results` (Optional, boolean)*: This flag applies only to sequences and has effect only if allow_partial_search_results=true. -If true, the sequence query will return results based on the available shards, ignoring the others. -If false, the sequence query will return successfully, but will always have empty results. -** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 -** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. -** *`result_position` (Optional, Enum("tail" | "head"))* -** *`runtime_mappings` (Optional, Record)* -** *`max_samples_per_key` (Optional, number)*: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` -parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the -`max_samples_per_key` parameter. Pipes are not supported for sample queries. -** *`allow_no_indices` (Optional, boolean)* -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. - -[discrete] -=== esql -[discrete] -==== async_query -Run an async ES|QL query. -Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. - -The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query[Endpoint documentation] -[source,ts] ----- -client.esql.asyncQuery({ query }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. -** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. -** *`locale` (Optional, string)* -** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. -** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object -with information on how the query was executed. This information is for human debugging -and its format can change at any time but it can give some insight into the performance -of each part of the query. -** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table -name and the next level key is the column name. -** *`include_ccs_metadata` (Optional, boolean)*: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` -object with information about the clusters that participated in the search along with info such as shards -count. -** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. -It is valid only for the CSV format. -** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. -If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. -** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, for example `json` or `yaml`. -** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster. -The default period is five days. -When this period expires, the query and its results are deleted, even if the query is still ongoing. -If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. -** *`keep_on_completion` (Optional, boolean)*: Indicates whether the query and its results are stored in the cluster. -If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish. -By default, the request waits for 1 second for the query results. -If the query completes during this period, results are returned -Otherwise, a query ID is returned that can later be used to retrieve the results. - -[discrete] -==== async_query_delete -Delete an async ES|QL query. -If the query is still running, it is cancelled. -Otherwise, the stored results are deleted. - -If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: - -* The authenticated user that submitted the original query request -* Users with the `cancel_task` cluster privilege - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete[Endpoint documentation] -[source,ts] ----- -client.esql.asyncQueryDelete({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The unique identifier of the query. -A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. -A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. - -[discrete] -==== async_query_get -Get async ES|QL query results. -Get the current status and available results or stored results for an ES|QL asynchronous query. -If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get[Endpoint documentation] -[source,ts] ----- -client.esql.asyncQueryGet({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The unique identifier of the query. -A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. -A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. -** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. -If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. -** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster. -When this period expires, the query and its results are deleted, even if the query is still ongoing. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish. -By default, the request waits for complete query results. -If the request completes during the period specified in this parameter, complete query results are returned. -Otherwise, the response returns an `is_running` value of `true` and no results. - -[discrete] -==== async_query_stop -Stop async ES|QL query. - -This API interrupts the query execution and returns the results so far. -If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. - -{ref}/esql-async-query-stop-api.html[Endpoint documentation] -[source,ts] ----- -client.esql.asyncQueryStop({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The unique identifier of the query. -A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. -A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. -** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. -If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. - -[discrete] -==== query -Run an ES|QL query. -Get search results for an ES|QL (Elasticsearch query language) query. - -{ref}/esql-rest.html[Endpoint documentation] -[source,ts] ----- -client.esql.query({ query }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. -** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. -** *`locale` (Optional, string)* -** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. -** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object -with information on how the query was executed. This information is for human debugging -and its format can change at any time but it can give some insight into the performance -of each part of the query. -** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table -name and the next level key is the column name. -** *`include_ccs_metadata` (Optional, boolean)*: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` -object with information about the clusters that participated in the search along with info such as shards -count. -** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, e.g. json, yaml. -** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. -** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? -Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. - -[discrete] -=== features -[discrete] -==== get_features -Get the features. -Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. -You can use this API to determine which feature states to include when taking a snapshot. -By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. - -A feature state includes one or more system indices necessary for a given feature to function. -In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. - -The features listed by this API are a combination of built-in features and features defined by plugins. -In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features[Endpoint documentation] -[source,ts] ----- -client.features.getFeatures({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== reset_features -Reset the features. -Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. - -WARNING: Intended for development and testing use only. Do not reset features on a production cluster. - -Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. -This deletes all state information stored in system indices. - -The response code is HTTP 200 if the state is successfully reset for all features. -It is HTTP 500 if the reset operation failed for any feature. - -Note that select features might provide a way to reset particular system indices. -Using this API resets all features, both those that are built-in and implemented as plugins. - -To list the features that will be affected, use the get features API. - -IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features[Endpoint documentation] -[source,ts] ----- -client.features.resetFeatures({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -=== fleet -[discrete] -==== global_checkpoints -Get global checkpoints. - -Get the current global checkpoints for an index. -This API is designed for internal use by the Fleet server project. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet[Endpoint documentation] -[source,ts] ----- -client.fleet.globalCheckpoints({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string)*: A single index or index alias that resolves to a single index. -** *`wait_for_advance` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the global checkpoints -to advance past the provided `checkpoints`. -** *`wait_for_index` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the target index to exist -and all primary shards be active. Can only be true when `wait_for_advance` is true. -** *`checkpoints` (Optional, number[])*: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, -the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list -will cause Elasticsearch to immediately return the current global checkpoints. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a global checkpoints to advance past `checkpoints`. - -[discrete] -==== msearch -Run multiple Fleet searches. -Run several Fleet searches with a single API request. -The API follows the same structure as the multi search API. -However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch[Endpoint documentation] -[source,ts] ----- -client.fleet.msearch({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. -** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. -** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. -** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. -** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. -** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard -after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause -Elasticsearch to immediately execute the search. -** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns -an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` -which is true by default. - -[discrete] -==== search -Run a Fleet search. -The purpose of the Fleet search API is to provide an API where the search will be run only -after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search[Endpoint documentation] -[source,ts] ----- -client.fleet.search({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. -** *`aggregations` (Optional, Record)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* -** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. -** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 -hits using the from and size parameters. To page through more hits, use the -search_after parameter. -** *`highlight` (Optional, { encoder, fields })* -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact -number of hits is returned at the cost of some performance. If false, the -response does not include the total number of hits matching the query. -Defaults to 10,000 hits. -** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field -names matching these patterns in the hits.fields property of the response. -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* -** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null[])* -** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more -than 10,000 hits using the from and size parameters. To page through more -hits, use the search_after parameter. -** *`slice` (Optional, { field, id, max })* -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These -fields are returned in the hits._source property of the search response. -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this -limit, Elasticsearch terminates the query early. Elasticsearch collects documents -before sorting. Defaults to 0, which does not terminate query execution early. -** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response -is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. -** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. -** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification -of each hit. See Optimistic concurrency control. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, -no stored fields are included in the response. If this field is specified, the _source -parameter defaults to false. You can pass _source: true to return both source fields -and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an in the request path. -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics -aggregation for its associated searches. You can retrieve these stats using -the indices stats API. -** *`allow_no_indices` (Optional, boolean)* -** *`analyzer` (Optional, string)* -** *`analyze_wildcard` (Optional, boolean)* -** *`batched_reduce_size` (Optional, number)* -** *`ccs_minimize_roundtrips` (Optional, boolean)* -** *`default_operator` (Optional, Enum("and" | "or"))* -** *`df` (Optional, string)* -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* -** *`ignore_throttled` (Optional, boolean)* -** *`ignore_unavailable` (Optional, boolean)* -** *`lenient` (Optional, boolean)* -** *`max_concurrent_shard_requests` (Optional, number)* -** *`preference` (Optional, string)* -** *`pre_filter_shard_size` (Optional, number)* -** *`request_cache` (Optional, boolean)* -** *`routing` (Optional, string)* -** *`scroll` (Optional, string | -1 | 0)* -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))* -** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))* -** *`suggest_size` (Optional, number)* -** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`typed_keys` (Optional, boolean)* -** *`rest_total_hits_as_int` (Optional, boolean)* -** *`_source_excludes` (Optional, string | string[])* -** *`_source_includes` (Optional, string | string[])* -** *`q` (Optional, string)* -** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard -after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause -Elasticsearch to immediately execute the search. -** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns -an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` -which is true by default. - -[discrete] -=== graph -[discrete] -==== explore -Explore graph analytics. -Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. -The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. -An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. -Subsequent requests enable you to spider out from one more vertices of interest. -You can exclude vertices that have already been returned. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph[Endpoint documentation] -[source,ts] ----- -client.graph.explore({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: Name of the index. -** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. -** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. -** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. -If no response is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. - -[discrete] -=== ilm -[discrete] -==== delete_lifecycle -Delete a lifecycle policy. -You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle[Endpoint documentation] -[source,ts] ----- -client.ilm.deleteLifecycle({ policy }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy` (string)*: Identifier for the policy. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== explain_lifecycle -Explain the lifecycle state. -Get the current lifecycle status for one or more indices. -For data streams, the API retrieves the current lifecycle status for the stream's backing indices. - -The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle[Endpoint documentation] -[source,ts] ----- -client.ilm.explainLifecycle({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: List of data streams, indices, and aliases to target. Supports wildcards (`*`). -To target all data streams and indices, use `*` or `_all`. -** *`only_errors` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. -** *`only_managed` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_lifecycle -Get lifecycle policies. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle[Endpoint documentation] -[source,ts] ----- -client.ilm.getLifecycle({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy` (Optional, string)*: Identifier for the policy. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_status -Get the ILM status. - -Get the current index lifecycle management status. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status[Endpoint documentation] -[source,ts] ----- -client.ilm.getStatus() ----- - - -[discrete] -==== migrate_to_data_tiers -Migrate to data tiers routing. -Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. -Optionally, delete one legacy index template. -Using node roles enables ILM to automatically move the indices between data tiers. - -Migrating away from custom node attributes routing can be manually performed. -This API provides an automated way of performing three out of the four manual steps listed in the migration guide: - -. Stop setting the custom hot attribute on new indices. -. Remove custom allocation settings from existing ILM policies. -. Replace custom allocation settings from existing indices with the corresponding tier preference. - -ILM must be stopped before performing the migration. -Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers[Endpoint documentation] -[source,ts] ----- -client.ilm.migrateToDataTiers({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`legacy_template_to_delete` (Optional, string)* -** *`node_attribute` (Optional, string)* -** *`dry_run` (Optional, boolean)*: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. -This provides a way to retrieve the indices and ILM policies that need to be migrated. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== move_to_step -Move to a lifecycle step. -Manually move an index into a specific step in the lifecycle policy and run that step. - -WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. - -You must specify both the current step and the step to be executed in the body of the request. -The request will fail if the current step does not match the step currently running for the index -This is to prevent the index from being moved from an unexpected step into the next step. - -When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. -If only the phase is specified, the index will move to the first step of the first action in the target phase. -If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. -Only actions specified in the ILM policy are considered valid. -An index cannot move to a step that is not part of its policy. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step[Endpoint documentation] -[source,ts] ----- -client.ilm.moveToStep({ index, current_step, next_step }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the index whose lifecycle step is to change -** *`current_step` ({ action, name, phase })*: The step that the index is expected to be in. -** *`next_step` ({ action, name, phase })*: The step that you want to run. - -[discrete] -==== put_lifecycle -Create or update a lifecycle policy. -If the specified policy exists, it is replaced and the policy version is incremented. - -NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle[Endpoint documentation] -[source,ts] ----- -client.ilm.putLifecycle({ policy }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy` (string)*: Identifier for the policy. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== remove_policy -Remove policies from an index. -Remove the assigned lifecycle policies from an index or a data stream's backing indices. -It also stops managing the indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy[Endpoint documentation] -[source,ts] ----- -client.ilm.removePolicy({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the index to remove policy on - -[discrete] -==== retry -Retry a policy. -Retry running the lifecycle policy for an index that is in the ERROR step. -The API sets the policy back to the step where the error occurred and runs the step. -Use the explain lifecycle state API to determine whether an index is in the ERROR step. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry[Endpoint documentation] -[source,ts] ----- -client.ilm.retry({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the indices (comma-separated) whose failed lifecycle step is to be retry - -[discrete] -==== start -Start the ILM plugin. -Start the index lifecycle management plugin if it is currently stopped. -ILM is started automatically when the cluster is formed. -Restarting ILM is necessary only when it has been stopped using the stop ILM API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start[Endpoint documentation] -[source,ts] ----- -client.ilm.start({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== stop -Stop the ILM plugin. -Halt all lifecycle management operations and stop the index lifecycle management plugin. -This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. - -The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. -Use the get ILM status API to check whether ILM is running. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop[Endpoint documentation] -[source,ts] ----- -client.ilm.stop({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== indices -[discrete] -==== add_block -Add an index block. - -Add an index block to an index. -Index blocks limit the operations allowed on an index by blocking specific operation types. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block[Endpoint documentation] -[source,ts] ----- -client.indices.addBlock({ index, block }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: A list or wildcard expression of index names used to limit the request. -By default, you must explicitly name the indices you are adding blocks to. -To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. -You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. -** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block type to add to the index. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -It supports a list of values, such as `open,hidden`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -It can also be set to `-1` to indicate that the request should never timeout. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. -If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. -It can also be set to `-1` to indicate that the request should never timeout. - -[discrete] -==== analyze -Get tokens from text analysis. -The analyze API performs analysis on a text string and returns the resulting tokens. - -Generating excessive amount of tokens may cause a node to run out of memory. -The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. -If more than this limit of tokens gets generated, an error occurs. -The `_analyze` endpoint without a specified index will always use `10000` as its limit. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze[Endpoint documentation] -[source,ts] ----- -client.indices.analyze({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Index used to derive the analyzer. -If specified, the `analyzer` or field parameter overrides this value. -If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. -** *`analyzer` (Optional, string)*: The name of the analyzer that should be applied to the provided `text`. -This could be a built-in analyzer, or an analyzer that’s been configured in the index. -** *`attributes` (Optional, string[])*: Array of token attributes used to filter the output of the `explain` parameter. -** *`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer. -** *`explain` (Optional, boolean)*: If `true`, the response includes token attributes and additional details. -** *`field` (Optional, string)*: Field used to derive the analyzer. -To use this parameter, you must specify an index. -If specified, the `analyzer` parameter overrides this value. -** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. -** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. -** *`text` (Optional, string | string[])*: Text to analyze. -If an array of strings is provided, it is analyzed as a multi-value field. -** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })*: Tokenizer to use to convert text into tokens. - -[discrete] -==== cancel_migrate_reindex -Cancel a migration reindex operation. - -Cancel a migration reindex attempt for a data stream or index. -[source,ts] ----- -client.indices.cancelMigrateReindex({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: The index or data stream name - -[discrete] -==== clear_cache -Clear the cache. -Clear the cache of one or more indices. -For data streams, the API clears the caches of the stream's backing indices. - -By default, the clear cache API clears all caches. -To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. -To clear the cache only of specific fields, use the `fields` parameter. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache[Endpoint documentation] -[source,ts] ----- -client.indices.clearCache({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`fielddata` (Optional, boolean)*: If `true`, clears the fields cache. -Use the `fields` parameter to clear the cache of specific fields only. -** *`fields` (Optional, string | string[])*: List of field names used to limit the `fielddata` parameter. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`query` (Optional, boolean)*: If `true`, clears the query cache. -** *`request` (Optional, boolean)*: If `true`, clears the request cache. - -[discrete] -==== clone -Clone an index. -Clone an existing index into a new index. -Each original primary shard is cloned into a new primary shard in the new index. - -IMPORTANT: Elasticsearch does not apply index templates to the resulting index. -The API also does not copy index metadata from the original index. -Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. -For example, if you clone a CCR follower index, the resulting clone will not be a follower index. - -The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. -To set the number of replicas in the resulting index, configure these settings in the clone request. - -Cloning works as follows: - -* First, it creates a new target index with the same definition as the source index. -* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. -* Finally, it recovers the target index as though it were a closed index which had just been re-opened. - -IMPORTANT: Indices can only be cloned if they meet the following requirements: - -* The index must be marked as read-only and have a cluster health status of green. -* The target index must not exist. -* The source index must have the same number of primary shards as the target index. -* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. - -The current write index on a data stream cannot be cloned. -In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. - -NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. - -**Monitor the cloning process** - -The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. - -The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. -At this point, all shards are in the state unassigned. -If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. - -Once the primary shard is allocated, it moves to state initializing, and the clone process begins. -When the clone operation completes, the shard will become active. -At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. - -**Wait for active shards** - -Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone[Endpoint documentation] -[source,ts] ----- -client.indices.clone({ index, target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the source index to clone. -** *`target` (string)*: Name of the target index to create. -** *`aliases` (Optional, Record)*: Aliases for the resulting index. -** *`settings` (Optional, Record)*: Configuration options for the target index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== close -Close an index. -A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. -It is not possible to index documents or to search for documents in a closed index. -Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. - -When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. -The shards will then go through the normal recovery process. -The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. - -You can open and close multiple indices. -An error is thrown if the request explicitly refers to a missing index. -This behaviour can be turned off using the `ignore_unavailable=true` parameter. - -By default, you must explicitly name the indices you are opening or closing. -To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. - -Closed indices consume a significant amount of disk-space which can cause problems in managed environments. -Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close[Endpoint documentation] -[source,ts] ----- -client.indices.close({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== create -Create an index. -You can use the create index API to add a new index to an Elasticsearch cluster. -When creating an index, you can specify the following: - -* Settings for the index. -* Mappings for fields in the index. -* Index aliases - -**Wait for active shards** - -By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. -The index creation response will indicate what happened. -For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. -Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. -These values simply indicate whether the operation completed before the timeout. -If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. -If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). - -You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. -Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create[Endpoint documentation] -[source,ts] ----- -client.indices.create({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the index you wish to create. -** *`aliases` (Optional, Record)*: Aliases for the index. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: -- Field names -- Field data types -- Mapping parameters -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== create_data_stream -Create a data stream. - -You must have a matching index template with data stream enabled. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream[Endpoint documentation] -[source,ts] ----- -client.indices.createDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the data stream, which must meet the following criteria: -Lowercase only; -Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; -Cannot start with `-`, `_`, `+`, or `.ds-`; -Cannot be `.` or `..`; -Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== create_from -Create an index from a source index. - -Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. -[source,ts] ----- -client.indices.createFrom({ source, dest }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`source` (string)*: The source index or data stream name -** *`dest` (string)*: The destination index or data stream name -** *`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })* - -[discrete] -==== data_streams_stats -Get data stream stats. - -Get statistics for one or more data streams. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1[Endpoint documentation] -[source,ts] ----- -client.indices.dataStreamsStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: List of data streams used to limit the request. -Wildcard expressions (`*`) are supported. -To target all data streams in a cluster, omit this parameter or use `*`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. - -[discrete] -==== delete -Delete indices. -Deleting an index deletes its documents, shards, and metadata. -It does not delete related Kibana components, such as data views, visualizations, or dashboards. - -You cannot delete the current write index of a data stream. -To delete the index, you must roll over the data stream so a new write index is created. -You can then use the delete index API to delete the previous write index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete[Endpoint documentation] -[source,ts] ----- -client.indices.delete({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of indices to delete. -You cannot specify index aliases. -By default, this parameter does not support wildcards (`*`) or `_all`. -To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_alias -Delete an alias. -Removes a data stream or index from an alias. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias[Endpoint documentation] -[source,ts] ----- -client.indices.deleteAlias({ index, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams or indices used to limit the request. -Supports wildcards (`*`). -** *`name` (string | string[])*: List of aliases to remove. -Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_data_lifecycle -Delete data stream lifecycles. -Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle[Endpoint documentation] -[source,ts] ----- -client.indices.deleteDataLifecycle({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document - -[discrete] -==== delete_data_stream -Delete data streams. -Deletes one or more data streams and their backing indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream[Endpoint documentation] -[source,ts] ----- -client.indices.deleteDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of data streams to delete. Wildcard (`*`) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. - -[discrete] -==== delete_index_template -Delete an index template. -The provided may contain multiple template names separated by a comma. If multiple template -names are specified then there is no wildcard support and the provided names should match completely with -existing templates. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template[Endpoint documentation] -[source,ts] ----- -client.indices.deleteIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of index template names used to limit the request. Wildcard (*) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_template -Delete a legacy index template. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template[Endpoint documentation] -[source,ts] ----- -client.indices.deleteTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the legacy index template to delete. -Wildcard (`*`) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== disk_usage -Analyze the index disk usage. -Analyze the disk usage of each field of an index or data stream. -This API might not support indices created in previous Elasticsearch versions. -The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. - -NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. -Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. -The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage[Endpoint documentation] -[source,ts] ----- -client.indices.diskUsage({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. -It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`flush` (Optional, boolean)*: If `true`, the API performs a flush before analysis. -If `false`, the response may not include uncommitted data. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`run_expensive_tasks` (Optional, boolean)*: Analyzing field disk usage is resource-intensive. -To use the API, this parameter must be set to `true`. - -[discrete] -==== downsample -Downsample an index. -Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. -For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. -All documents within an hour interval are summarized and stored as a single document in the downsample index. - -NOTE: Only indices in a time series data stream are supported. -Neither field nor document level security can be defined on the source index. -The source index must be read only (`index.blocks.write: true`). - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample[Endpoint documentation] -[source,ts] ----- -client.indices.downsample({ index, target_index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the time series index to downsample. -** *`target_index` (string)*: Name of the index to create. -** *`config` (Optional, { fixed_interval })* - -[discrete] -==== exists -Check indices. -Check if one or more indices, index aliases, or data streams exist. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists[Endpoint documentation] -[source,ts] ----- -client.indices.exists({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases. Supports wildcards (`*`). -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. - -[discrete] -==== exists_alias -Check aliases. - -Check if one or more data stream or index aliases exist. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias[Endpoint documentation] -[source,ts] ----- -client.indices.existsAlias({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of aliases to check. Supports wildcards (`*`). -** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== exists_index_template -Check index templates. - -Check whether index templates exist. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template[Endpoint documentation] -[source,ts] ----- -client.indices.existsIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== exists_template -Check existence of index templates. -Get information about whether index templates exist. -Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - -IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template[Endpoint documentation] -[source,ts] ----- -client.indices.existsTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: A list of index template names used to limit the request. -Wildcard (`*`) expressions are supported. -** *`flat_settings` (Optional, boolean)*: Indicates whether to use a flat format for the response. -** *`local` (Optional, boolean)*: Indicates whether to get information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== explain_data_lifecycle -Get the status for a data stream lifecycle. -Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle[Endpoint documentation] -[source,ts] ----- -client.indices.explainDataLifecycle({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: The name of the index to explain -** *`include_defaults` (Optional, boolean)*: indicates if the API should return the default values the system uses for the index's lifecycle -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master - -[discrete] -==== field_usage_stats -Get field usage stats. -Get field usage information for each shard and field of an index. -Field usage statistics are automatically captured when queries are running on a cluster. -A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. - -The response body reports the per-shard usage count of the data structures that back the fields in the index. -A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats[Endpoint documentation] -[source,ts] ----- -client.indices.fieldUsageStats({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== flush -Flush data streams or indices. -Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. -When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. -Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. - -After each operation has been flushed it is permanently stored in the Lucene index. -This may mean that there is no need to maintain an additional copy of it in the transaction log. -The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. - -It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. -If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush[Endpoint documentation] -[source,ts] ----- -client.indices.flush({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to flush. -Supports wildcards (`*`). -To flush all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`force` (Optional, boolean)*: If `true`, the request forces a flush even if there are no changes to commit to the index. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`wait_if_ongoing` (Optional, boolean)*: If `true`, the flush operation blocks until execution when another flush operation is running. -If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. - -[discrete] -==== forcemerge -Force a merge. -Perform the force merge operation on the shards of one or more indices. -For data streams, the API forces a merge on the shards of the stream's backing indices. - -Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. -Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. - -WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). -When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". -These soft-deleted documents are automatically cleaned up during regular segment merges. -But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. -So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. -If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. - -**Blocks during a force merge** - -Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). -If the client connection is lost before completion then the force merge process will continue in the background. -Any new requests to force merge the same indices will also block until the ongoing force merge is complete. - -**Running force merge asynchronously** - -If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. -However, you can not cancel this task as the force merge task is not cancelable. -Elasticsearch creates a record of this task as a document at `_tasks/`. -When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. - -**Force merging multiple indices** - -You can force merge multiple indices with a single request by targeting: - -* One or more data streams that contain multiple backing indices -* Multiple indices -* One or more aliases -* All data streams and indices in a cluster - -Each targeted shard is force-merged separately using the force_merge threadpool. -By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. -If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel - -Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. - -**Data streams and time-based indices** - -Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. -In these cases, each index only receives indexing traffic for a certain period of time. -Once an index receive no more writes, its shards can be force-merged to a single segment. -This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. -For example: - ----- -POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ----- - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge[Endpoint documentation] -[source,ts] ----- -client.indices.forcemerge({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`flush` (Optional, boolean)*: Specify whether the index should be flushed after performing the operation (default: true) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`max_num_segments` (Optional, number)*: The number of segments the index should be merged into (default: dynamic) -** *`only_expunge_deletes` (Optional, boolean)*: Specify whether the operation should only expunge deleted documents -** *`wait_for_completion` (Optional, boolean)*: Should the request wait until the force merge is completed. - -[discrete] -==== get -Get index information. -Get information about one or more indices. For data streams, the API returns information about the -stream’s backing indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get[Endpoint documentation] -[source,ts] ----- -client.indices.get({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and index aliases used to limit the request. -Wildcard expressions (*) are supported. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only -missing or closed indices. This behavior applies even if the request targets other open indices. For example, -a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument -determines whether wildcard expressions match hidden data streams. Supports a list of values, -such as open,hidden. -** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If false, requests that target a missing index return an error. -** *`include_defaults` (Optional, boolean)*: If true, return all default settings in the response. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`features` (Optional, { name, description } | { name, description }[])*: Return only information on specified index features - -[discrete] -==== get_alias -Get aliases. -Retrieves information for one or more data stream or index aliases. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias[Endpoint documentation] -[source,ts] ----- -client.indices.getAlias({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of aliases to retrieve. -Supports wildcards (`*`). -To retrieve all aliases, omit this parameter or use `*` or `_all`. -** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_data_lifecycle -Get data stream lifecycles. - -Get the data stream lifecycle configuration of one or more data streams. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle[Endpoint documentation] -[source,ts] ----- -client.indices.getDataLifecycle({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of data streams to limit the request. -Supports wildcards (`*`). -To target all data streams, omit this parameter or use `*` or `_all`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_data_lifecycle_stats -Get data stream lifecycle stats. -Get statistics about the data streams that are managed by a data stream lifecycle. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats[Endpoint documentation] -[source,ts] ----- -client.indices.getDataLifecycleStats() ----- - - -[discrete] -==== get_data_stream -Get data streams. - -Get information about one or more data streams. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream[Endpoint documentation] -[source,ts] ----- -client.indices.getDataStream({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of data stream names used to limit the request. -Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`verbose` (Optional, boolean)*: Whether the maximum timestamp for each data stream should be calculated and returned. - -[discrete] -==== get_field_mapping -Get mapping definitions. -Retrieves mapping definitions for one or more fields. -For data streams, the API retrieves field mappings for the stream’s backing indices. - -This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping[Endpoint documentation] -[source,ts] ----- -client.indices.getFieldMapping({ fields }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`fields` (string | string[])*: List or wildcard expression of fields used to limit returned information. -Supports wildcards (`*`). -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. - -[discrete] -==== get_index_template -Get index templates. -Get information about one or more index templates. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template[Endpoint documentation] -[source,ts] ----- -client.indices.getIndexTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. - -[discrete] -==== get_mapping -Get mapping definitions. -For data streams, the API retrieves mappings for the stream’s backing indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping[Endpoint documentation] -[source,ts] ----- -client.indices.getMapping({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_migrate_reindex_status -Get the migration reindexing status. - -Get the status of a migration reindex attempt for a data stream or index. -[source,ts] ----- -client.indices.getMigrateReindexStatus({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: The index or data stream name. - -[discrete] -==== get_settings -Get index settings. -Get setting information for one or more indices. -For data streams, it returns setting information for the stream's backing indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings[Endpoint documentation] -[source,ts] ----- -client.indices.getSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit -the request. Supports wildcards (`*`). To target all data streams and -indices, omit this parameter or use `*` or `_all`. -** *`name` (Optional, string | string[])*: List or wildcard expression of settings to retrieve. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index -alias, or `_all` value targets only missing or closed indices. This -behavior applies even if the request targets other open indices. For -example, a request targeting `foo*,bar*` returns an error if an index -starts with foo but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. If -`false`, information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. - -[discrete] -==== get_template -Get index templates. -Get information about one or more index templates. - -IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template[Endpoint documentation] -[source,ts] ----- -client.indices.getTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of index template names used to limit the request. -Wildcard (`*`) expressions are supported. -To return all index templates, omit this parameter or use a value of `_all` or `*`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== migrate_reindex -Reindex legacy backing indices. - -Reindex all legacy backing indices for a data stream. -This operation occurs in a persistent task. -The persistent task ID is returned immediately and the reindexing work is completed in that task. -[source,ts] ----- -client.indices.migrateReindex({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`reindex` (Optional, { mode, source })* - -[discrete] -==== migrate_to_data_stream -Convert an index alias to a data stream. -Converts an index alias to a data stream. -You must have a matching index template that is data stream enabled. -The alias must meet the following criteria: -The alias must have a write index; -All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; -The alias must not have any filters; -The alias must not use custom routing. -If successful, the request removes the alias and creates a data stream with the same name. -The indices for the alias become hidden backing indices for the stream. -The write index for the alias becomes the write index for the stream. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.migrateToDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the index alias to convert to a data stream. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== modify_data_stream -Update data streams. -Performs one or more data stream modification actions in a single atomic operation. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.modifyDataStream({ actions }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` ({ add_backing_index, remove_backing_index }[])*: Actions to perform. - -[discrete] -==== open -Open a closed index. -For data streams, the API opens any closed backing indices. - -A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. -It is not possible to index documents or to search for documents in a closed index. -This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. - -When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. -The shards will then go through the normal recovery process. -The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. - -You can open and close multiple indices. -An error is thrown if the request explicitly refers to a missing index. -This behavior can be turned off by using the `ignore_unavailable=true` parameter. - -By default, you must explicitly name the indices you are opening or closing. -To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. -This setting can also be changed with the cluster update settings API. - -Closed indices consume a significant amount of disk-space which can cause problems in managed environments. -Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. - -Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open[Endpoint documentation] -[source,ts] ----- -client.indices.open({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -By default, you must explicitly name the indices you using to limit the request. -To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. -You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== promote_data_stream -Promote a data stream. -Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. - -With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. -These data streams can't be rolled over in the local cluster. -These replicated data streams roll over only if the upstream data stream rolls over. -In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. - -NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. -If this is missing, the data stream will not be able to roll over until a matching index template is created. -This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.promoteDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the data stream -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_alias -Create or update an alias. -Adds a data stream or index to an alias. - -{ref}/indices-aliases.html[Endpoint documentation] -[source,ts] ----- -client.indices.putAlias({ index, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams or indices to add. -Supports wildcards (`*`). -Wildcard patterns that match both data streams and indices return an error. -** *`name` (string)*: Alias to update. -If the alias doesn’t exist, the request creates it. -Index alias names support date math. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. -** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. -If specified, this overwrites the `routing` value for indexing operations. -Data stream aliases don’t support this parameter. -** *`is_write_index` (Optional, boolean)*: If `true`, sets the write index or data stream for the alias. -If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. -If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. -Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. -** *`routing` (Optional, string)*: Value used to route indexing and search operations to a specific shard. -Data stream aliases don’t support this parameter. -** *`search_routing` (Optional, string)*: Value used to route search operations to a specific shard. -If specified, this overwrites the `routing` value for search operations. -Data stream aliases don’t support this parameter. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_data_lifecycle -Update data stream lifecycles. -Update the data stream lifecycle of the specified data streams. - -{ref}/data-streams-put-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.indices.putDataLifecycle({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of data streams used to limit the request. -Supports wildcards (`*`). -To target all data streams use `*` or `_all`. -** *`data_retention` (Optional, string | -1 | 0)*: If defined, every document added to this data stream will be stored at least for this time frame. -Any time after this duration the document could be deleted. -When empty, every document in this data stream will be stored indefinitely. -** *`downsampling` (Optional, { rounds })*: The downsampling configuration to execute for the managed backing index after rollover. -** *`enabled` (Optional, boolean)*: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle -that's disabled (enabled: `false`) will have no effect on the data stream. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `hidden`, `open`, `closed`, `none`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_index_template -Create or update an index template. -Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - -Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. -Index templates are applied during data stream or index creation. -For data streams, these settings and mappings are applied when the stream's backing indices are created. -Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. -Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. - -You can use C-style `/* *\/` block comments in index templates. -You can include comments anywhere in the request body, except before the opening curly bracket. - -**Multiple matching templates** - -If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. - -Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. - -**Composing aliases, mappings, and settings** - -When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. -Any mappings, settings, or aliases from the parent index template are merged in next. -Finally, any configuration on the index request itself is merged. -Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. -If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. -This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. -If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. -If an entry already exists with the same key, then it is overwritten by the new definition. - -{ref}/indices-put-template.html[Endpoint documentation] -[source,ts] ----- -client.indices.putIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Index or template name -** *`index_patterns` (Optional, string | string[])*: Name of the index template to create. -** *`composed_of` (Optional, string[])*: An ordered list of component template names. -Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. -** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. -It may optionally include an `aliases`, `mappings`, or `settings` configuration. -** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. -Supports an empty object. -Data streams require a matching index template with a `data_stream` object. -** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. -The index template with the highest priority is chosen. -If no priority is specified the template is treated as though it is of priority 0 (lowest priority). -This number is not automatically generated by Elasticsearch. -** *`version` (Optional, number)*: Version number used to manage index templates externally. -This number is not automatically generated by Elasticsearch. -External systems can use these version numbers to simplify template management. -To unset a version, replace the template without specifying one. -** *`_meta` (Optional, Record)*: Optional user metadata about the index template. -It may have any contents. -It is not automatically generated or used by Elasticsearch. -This user-defined object is stored in the cluster state, so keeping it short is preferable -To unset the metadata, replace the template without specifying it. -** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. -If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. -If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. -** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template -references a component template that might not exist -** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template -that uses deprecated components, Elasticsearch will emit a deprecation warning. -** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing index templates. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`cause` (Optional, string)*: User defined reason for creating/updating the index template - -[discrete] -==== put_mapping -Update field mappings. -Add new fields to an existing data stream or index. -You can also use this API to change the search settings of existing fields and add new properties to existing object fields. -For data streams, these changes are applied to all backing indices by default. - -**Add multi-fields to an existing field** - -Multi-fields let you index the same field in different ways. -You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. -WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. -You can populate the new multi-field with the update by query API. - -**Change supported mapping parameters for an existing field** - -The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. -For example, you can use the update mapping API to update the `ignore_above` parameter. - -**Change the mapping of an existing field** - -Except for supported mapping parameters, you can't change the mapping or field type of an existing field. -Changing an existing field could invalidate data that's already indexed. - -If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. -If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. - -**Rename a field** - -Renaming a field would invalidate data already indexed under the old field name. -Instead, add an alias field to create an alternate field name. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping[Endpoint documentation] -[source,ts] ----- -client.indices.putMapping({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. -** *`date_detection` (Optional, boolean)*: Controls whether dynamic date detection is enabled. -** *`dynamic` (Optional, Enum("strict" | "runtime" | true | false))*: Controls whether new fields are added dynamically. -** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked -against 'dynamic_date_formats' and if the value matches then -a new date field is added instead of string. -** *`dynamic_templates` (Optional, Record[])*: Specify dynamic templates for the mapping. -** *`_field_names` (Optional, { enabled })*: Control whether field names are enabled for the index. -** *`_meta` (Optional, Record)*: A mapping type can have custom meta data associated with it. These are -not used at all by Elasticsearch, but can be used to store -application-specific metadata. -** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - -- Field name -- Field data type -- Mapping parameters -** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents. -** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index. -** *`runtime` (Optional, Record)*: Mapping of runtime fields for the index. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`write_index_only` (Optional, boolean)*: If `true`, the mappings are applied only to the current write index for the target. - -[discrete] -==== put_settings -Update index settings. -Changes dynamic index settings in real time. -For data streams, index setting changes are applied to all backing indices by default. - -To revert a setting to the default value, use a null value. -The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. -To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. - -NOTE: You can only define new analyzers on closed indices. -To add an analyzer, you must close the index, define the analyzer, and reopen the index. -You cannot close the write index of a data stream. -To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. -Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. -This affects searches and any new data added to the stream after the rollover. -However, it does not affect the data stream's backing indices or their existing data. -To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings[Endpoint documentation] -[source,ts] ----- -client.indices.putSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit -the request. Supports wildcards (`*`). To target all data streams and -indices, omit this parameter or use `*` or `_all`. -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index -alias, or `_all` value targets only missing or closed indices. This -behavior applies even if the request targets other open indices. For -example, a request targeting `foo*,bar*` returns an error if an index -starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target -data streams, this argument determines whether wildcard expressions match -hidden data streams. Supports a list of values, such as -`open,hidden`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. -** *`preserve_existing` (Optional, boolean)*: If `true`, existing index settings remain unchanged. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the - timeout expires, the request fails and returns an error. - -[discrete] -==== put_template -Create or update an index template. -Index templates define settings, mappings, and aliases that can be applied automatically to new indices. -Elasticsearch applies templates to new indices based on an index pattern that matches the index name. - -IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - -Composable templates always take precedence over legacy templates. -If no composable template matches a new index, matching legacy templates are applied according to their order. - -Index templates are only applied during index creation. -Changes to index templates do not affect existing indices. -Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. - -You can use C-style `/* *\/` block comments in index templates. -You can include comments anywhere in the request body, except before the opening curly bracket. - -**Indices matching multiple templates** - -Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. -The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. -NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template[Endpoint documentation] -[source,ts] ----- -client.indices.putTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the template -** *`aliases` (Optional, Record)*: Aliases for the index. -** *`index_patterns` (Optional, string | string[])*: Array of wildcard expressions used to match the names -of indices during creation. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. -** *`order` (Optional, number)*: Order in which Elasticsearch applies this template if index -matches multiple templates. - -Templates with lower 'order' values are merged first. Templates with higher -'order' values are merged later, overriding templates with lower values. -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. -** *`version` (Optional, number)*: Version number used to manage index templates externally. This number -is not automatically generated by Elasticsearch. -To unset a version, replace the template without specifying one. -** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an error. -** *`cause` (Optional, string)* - -[discrete] -==== recovery -Get index recovery information. -Get information about ongoing and completed shard recoveries for one or more indices. -For data streams, the API returns information for the stream's backing indices. - -All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. - -Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. -When a shard recovery completes, the recovered shard is available for search and indexing. - -Recovery automatically occurs during the following processes: - -* When creating an index for the first time. -* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. -* Creation of new replica shard copies from the primary. -* Relocation of a shard copy to a different node in the same cluster. -* A snapshot restore operation. -* A clone, shrink, or split operation. - -You can determine the cause of a shard recovery using the recovery or cat recovery APIs. - -The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. -It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. -This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery[Endpoint documentation] -[source,ts] ----- -client.indices.recovery({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. - -[discrete] -==== refresh -Refresh an index. -A refresh makes recent operations performed on one or more indices available for search. -For data streams, the API runs the refresh operation on the stream’s backing indices. - -By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. -You can change this default interval with the `index.refresh_interval` setting. - -Refresh requests are synchronous and do not return a response until the refresh operation completes. - -Refreshes are resource-intensive. -To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. - -If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. -This option ensures the indexing operation waits for a periodic refresh before running the search. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh[Endpoint documentation] -[source,ts] ----- -client.indices.refresh({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. - -[discrete] -==== reload_search_analyzers -Reload search analyzers. -Reload an index's search analyzers and their resources. -For data streams, the API reloads search analyzers and resources for the stream's backing indices. - -IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. - -You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. -To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. - -NOTE: This API does not perform a reload for each shard of an index. -Instead, it performs a reload for each node containing index shards. -As a result, the total shard count returned by the API can differ from the number of index shards. -Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. -This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers[Endpoint documentation] -[source,ts] ----- -client.indices.reloadSearchAnalyzers({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names to reload analyzers for -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) - -[discrete] -==== resolve_cluster -Resolve the cluster. - -Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. -If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. - -This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. - -You use the same index expression with this endpoint as you would for cross-cluster search. -Index and cluster exclusions are also supported with this endpoint. - -For each cluster in the index expression, information is returned about: - -* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. -* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. -* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. -* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). -* Cluster version information, including the Elasticsearch server version. - -For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. -Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. - -## Note on backwards compatibility -The ability to query without an index expression was added in version 8.18, so when -querying remote clusters older than that, the local cluster will send the index -expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference -to that index expression even though you didn't request it. If it causes a problem, you can -instead include an index expression like `*:*` to bypass the issue. - -## Advantages of using this endpoint before a cross-cluster search - -You may want to exclude a cluster or index from a search when: - -* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. -* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. -* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) -* A remote cluster is an older version that does not support the feature you want to use in your search. - -## Test availability of remote clusters - -The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. -The remote cluster may be available, while the local cluster is not currently connected to it. - -You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. -For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. -The `connected` field in the response will indicate whether it was successful. -If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster[Endpoint documentation] -[source,ts] ----- -client.indices.resolveCluster({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: A list of names or index patterns for the indices, aliases, and data streams to resolve. -Resources on remote clusters can be specified using the ``:`` syntax. -Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. -If no index expression is specified, information about all remote clusters configured on the local cluster -is returned without doing any index matching -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing -or closed indices. This behavior applies even if the request targets other open indices. For example, a request -targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index -options to the `_resolve/cluster` API endpoint that takes no index expression. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index -options to the `_resolve/cluster` API endpoint that takes no index expression. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. -NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index -options to the `_resolve/cluster` API endpoint that takes no index expression. -** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. -NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index -options to the `_resolve/cluster` API endpoint that takes no index expression. -** *`timeout` (Optional, string | -1 | 0)*: The maximum time to wait for remote clusters to respond. -If a remote cluster does not respond within this timeout period, the API response -will show the cluster as not connected and include an error message that the -request timed out. - -The default timeout is unset and the query can take -as long as the networking layer is configured to wait for remote clusters that are -not responding (typically 30 seconds). - -[discrete] -==== resolve_index -Resolve indices. -Resolve the names and/or index patterns for indices, aliases, and data streams. -Multiple patterns and remote clusters are supported. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index[Endpoint documentation] -[source,ts] ----- -client.indices.resolveIndex({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. -Resources on remote clusters can be specified using the ``:`` syntax. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - -[discrete] -==== rollover -Roll over to a new index. -TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. - -The rollover API creates a new index for a data stream or index alias. -The API behavior depends on the rollover target. - -**Roll over a data stream** - -If you roll over a data stream, the API creates a new write index for the stream. -The stream's previous write index becomes a regular backing index. -A rollover also increments the data stream's generation. - -**Roll over an index alias with a write index** - -TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. -Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. - -If an index alias points to multiple indices, one of the indices must be a write index. -The rollover API creates a new write index for the alias with `is_write_index` set to `true`. -The API also `sets is_write_index` to `false` for the previous write index. - -**Roll over an index alias with one index** - -If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. - -NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. - -**Increment index names for an alias** - -When you roll over an index alias, you can specify a name for the new index. -If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. -For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. -This number is always six characters and zero-padded, regardless of the previous index's name. - -If you use an index alias for time series data, you can use date math in the index name to track the rollover date. -For example, you can create an alias that points to an index named ``. -If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. -If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover[Endpoint documentation] -[source,ts] ----- -client.indices.rollover({ alias }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`alias` (string)*: Name of the data stream or index alias to roll over. -** *`new_index` (Optional, string)*: Name of the index to create. -Supports date math. -Data streams do not support this parameter. -** *`aliases` (Optional, Record)*: Aliases for the target index. -Data streams do not support this parameter. -** *`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })*: Conditions for the rollover. -If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. -If this parameter is not specified, Elasticsearch performs the rollover unconditionally. -If conditions are specified, at least one of them must be a `max_*` condition. -The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. -If specified, this mapping can include field names, field data types, and mapping paramaters. -** *`settings` (Optional, Record)*: Configuration options for the index. -Data streams do not support this parameter. -** *`dry_run` (Optional, boolean)*: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== segments -Get index segments. -Get low-level information about the Lucene segments in index shards. -For data streams, the API returns information about the stream's backing indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments[Endpoint documentation] -[source,ts] ----- -client.indices.segments({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. - -[discrete] -==== shard_stores -Get index shard stores. -Get store information about replica shards in one or more indices. -For data streams, the API retrieves store information for the stream's backing indices. - -The index shard stores API returns the following information: - -* The node on which each replica shard exists. -* The allocation ID for each replica shard. -* A unique ID for each replica shard. -* Any errors encountered while opening the shard index or from an earlier failure. - -By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores[Endpoint documentation] -[source,ts] ----- -client.indices.shardStores({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all -value targets only missing or closed indices. This behavior applies even if the request -targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, -this argument determines whether wildcard expressions match hidden data streams. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])*: List of shard health statuses used to limit the request. - -[discrete] -==== shrink -Shrink an index. -Shrink an index into a new index with fewer primary shards. - -Before you can shrink an index: - -* The index must be read-only. -* A copy of every shard in the index must reside on the same node. -* The index must have a green health status. - -To make shard allocation easier, we recommend you also remove the index's replica shards. -You can later re-add replica shards as part of the shrink operation. - -The requested number of primary shards in the target index must be a factor of the number of shards in the source index. -For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. -If the number of shards in the index is a prime number it can only be shrunk into a single primary shard - Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. - -The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. - -A shrink operation: - -* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. -* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. -* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. - -IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: - -* The target index must not exist. -* The source index must have more primary shards than the target index. -* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. -* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. -* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink[Endpoint documentation] -[source,ts] ----- -client.indices.shrink({ index, target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the source index to shrink. -** *`target` (string)*: Name of the target index to create. -** *`aliases` (Optional, Record)*: The key is the alias name. -Index alias names support date math. -** *`settings` (Optional, Record)*: Configuration options for the target index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== simulate_index_template -Simulate an index. -Get the index configuration that would be applied to the specified index from an existing index template. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template[Endpoint documentation] -[source,ts] ----- -client.indices.simulateIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the index to simulate -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. - -[discrete] -==== simulate_template -Simulate an index template. -Get the index configuration that would be applied by a particular index template. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template[Endpoint documentation] -[source,ts] ----- -client.indices.simulateTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit -this parameter and specify the template configuration in the request body. -** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. -If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. -If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. -** *`index_patterns` (Optional, string | string[])*: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. -** *`composed_of` (Optional, string[])*: An ordered list of component template names. -Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. -** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. -It may optionally include an `aliases`, `mappings`, or `settings` configuration. -** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. -Supports an empty object. -Data streams require a matching index template with a `data_stream` object. -** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. -The index template with the highest priority is chosen. -If no priority is specified the template is treated as though it is of priority 0 (lowest priority). -This number is not automatically generated by Elasticsearch. -** *`version` (Optional, number)*: Version number used to manage index templates externally. -This number is not automatically generated by Elasticsearch. -** *`_meta` (Optional, Record)*: Optional user metadata about the index template. -May have any contents. -This map is not automatically generated by Elasticsearch. -** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template -references a component template that might not exist -** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template -that uses deprecated components, Elasticsearch will emit a deprecation warning. -** *`create` (Optional, boolean)*: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. - -[discrete] -==== split -Split an index. -Split an index into a new index with more primary shards. -* Before you can split an index: - -* The index must be read-only. -* The cluster health status must be green. - -You can do make an index read-only with the following request using the add index block API: - ----- -PUT /my_source_index/_block/write ----- - -The current write index on a data stream cannot be split. -In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. - -The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. -The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. -For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. - -A split operation: - -* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. -* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. -* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. -* Recovers the target index as though it were a closed index which had just been re-opened. - -IMPORTANT: Indices can only be split if they satisfy the following requirements: - -* The target index must not exist. -* The source index must have fewer primary shards than the target index. -* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. -* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split[Endpoint documentation] -[source,ts] ----- -client.indices.split({ index, target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the source index to split. -** *`target` (string)*: Name of the target index to create. -** *`aliases` (Optional, Record)*: Aliases for the resulting index. -** *`settings` (Optional, Record)*: Configuration options for the target index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== stats -Get index statistics. -For data streams, the API retrieves statistics for the stream's backing indices. - -By default, the returned statistics are index-level with `primaries` and `total` aggregations. -`primaries` are the values for only the primary shards. -`total` are the accumulated values for both primary and replica shards. - -To get shard-level statistics, set the `level` parameter to `shards`. - -NOTE: When moving to another node, the shard-level statistics for a shard are cleared. -Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats[Endpoint documentation] -[source,ts] ----- -client.indices.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`metric` (Optional, string | string[])*: Limit the information returned the specific metrics. -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument -determines whether wildcard expressions match hidden data streams. Supports a list of values, -such as `open,hidden`. -** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`forbid_closed_indices` (Optional, boolean)*: If true, statistics are not collected from closed indices. -** *`groups` (Optional, string | string[])*: List of search groups to include in the search statistics. -** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). -** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. - -[discrete] -==== update_aliases -Create or update an alias. -Adds a data stream or index to an alias. - -{ref}/indices-aliases.html[Endpoint documentation] -[source,ts] ----- -client.indices.updateAliases({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` (Optional, { add_backing_index, remove_backing_index }[])*: Actions to perform. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== validate_query -Validate a query. -Validates a query without running it. - -{ref}/search-validate.html[Endpoint documentation] -[source,ts] ----- -client.indices.validateQuery({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`explain` (Optional, boolean)*: If `true`, the response returns detailed information if an error has occurred. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`rewrite` (Optional, boolean)*: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. - -[discrete] -=== inference -[discrete] -==== delete -Delete an inference endpoint - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete[Endpoint documentation] -[source,ts] ----- -client.inference.delete({ inference_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The inference identifier. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`dry_run` (Optional, boolean)*: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. -** *`force` (Optional, boolean)*: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. - -[discrete] -==== get -Get an inference endpoint - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get[Endpoint documentation] -[source,ts] ----- -client.inference.get({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`inference_id` (Optional, string)*: The inference Id - -[discrete] -==== inference -Perform inference on the service. - -This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. -It returns a response with the results of the tasks. -The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. - -> info -> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference[Endpoint documentation] -[source,ts] ----- -client.inference.inference({ inference_id, input }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The unique identifier for the inference endpoint. -** *`input` (string | string[])*: The text on which you want to perform the inference task. -It can be a single string or an array. - -> info -> Inference endpoints for the `completion` task type currently only support a single string as input. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs. -** *`query` (Optional, string)*: The query input, which is required only for the `rerank` task. -It is not required for other tasks. -** *`task_settings` (Optional, User-defined value)*: Task settings for the individual inference request. -These settings are specific to the task type you specified and override the task settings specified when initializing the service. -** *`timeout` (Optional, string | -1 | 0)*: The amount of time to wait for the inference request to complete. - -[discrete] -==== put -Create an inference endpoint. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - -IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. -For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. -However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put[Endpoint documentation] -[source,ts] ----- -client.inference.put({ inference_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })* - -[discrete] -==== stream_inference -Perform streaming inference. -Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. -This API works only with the completion task type. - -IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - -This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference[Endpoint documentation] -[source,ts] ----- -client.inference.streamInference({ inference_id, input }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The unique identifier for the inference endpoint. -** *`input` (string | string[])*: The text on which you want to perform the inference task. -It can be a single string or an array. - -NOTE: Inference endpoints for the completion task type currently only support a single string as input. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of task that the model performs. - -[discrete] -==== unified_inference -Perform inference on the service using the Unified Schema -[source,ts] ----- -client.inference.unifiedInference({ inference_id, messages }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`messages` ({ content, role, tool_call_id, tool_calls }[])*: A list of objects representing the conversation. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`model` (Optional, string)*: The ID of the model to use. -** *`max_completion_tokens` (Optional, number)*: The upper bound limit for the number of tokens that can be generated for a completion request. -** *`stop` (Optional, string[])*: A sequence of strings to control when the model should stop generating additional tokens. -** *`temperature` (Optional, float)*: The sampling temperature to use. -** *`tool_choice` (Optional, string | { type, function })*: Controls which tool is called by the model. -** *`tools` (Optional, { type, function }[])*: A list of tools that the model can call. -** *`top_p` (Optional, float)*: Nucleus sampling, an alternative to sampling with temperature. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. - -[discrete] -==== update -Update an inference endpoint. - -Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. - -IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. -For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. -However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update[Endpoint documentation] -[source,ts] ----- -client.inference.update({ inference_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The unique identifier of the inference endpoint. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs. -** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })* - -[discrete] -=== ingest -[discrete] -==== delete_geoip_database -Delete GeoIP database configurations. - -Delete one or more IP geolocation database configurations. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database[Endpoint documentation] -[source,ts] ----- -client.ingest.deleteGeoipDatabase({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string | string[])*: A list of geoip database configurations to delete -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_ip_location_database -Delete IP geolocation database configurations. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database[Endpoint documentation] -[source,ts] ----- -client.ingest.deleteIpLocationDatabase({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string | string[])*: A list of IP location database configurations. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -A value of `-1` indicates that the request should never time out. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -A value of `-1` indicates that the request should never time out. - -[discrete] -==== delete_pipeline -Delete pipelines. -Delete one or more ingest pipelines. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline[Endpoint documentation] -[source,ts] ----- -client.ingest.deletePipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. -To delete all ingest pipelines in a cluster, use a value of `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== geo_ip_stats -Get GeoIP statistics. -Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - -{ref}/geoip-processor.html[Endpoint documentation] -[source,ts] ----- -client.ingest.geoIpStats() ----- - - -[discrete] -==== get_geoip_database -Get GeoIP database configurations. - -Get information about one or more IP geolocation database configurations. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database[Endpoint documentation] -[source,ts] ----- -client.ingest.getGeoipDatabase({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string | string[])*: A list of database configuration IDs to retrieve. -Wildcard (`*`) expressions are supported. -To get all database configurations, omit this parameter or use `*`. - -[discrete] -==== get_ip_location_database -Get IP geolocation database configurations. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database[Endpoint documentation] -[source,ts] ----- -client.ingest.getIpLocationDatabase({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. -Wildcard (`*`) expressions are supported. -To get all database configurations, omit this parameter or use `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -A value of `-1` indicates that the request should never time out. - -[discrete] -==== get_pipeline -Get pipelines. - -Get information about one or more ingest pipelines. -This API returns a local reference of the pipeline. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline[Endpoint documentation] -[source,ts] ----- -client.ingest.getPipeline({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: List of pipeline IDs to retrieve. -Wildcard (`*`) expressions are supported. -To get all ingest pipelines, omit this parameter or use `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`summary` (Optional, boolean)*: Return pipelines without their definitions (default: false) - -[discrete] -==== processor_grok -Run a grok processor. -Extract structured fields out of a single text field within a document. -You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. -A grok pattern is like a regular expression that supports aliased expressions that can be reused. - -{ref}/grok-processor.html[Endpoint documentation] -[source,ts] ----- -client.ingest.processorGrok() ----- - - -[discrete] -==== put_geoip_database -Create or update a GeoIP database configuration. - -Refer to the create or update IP geolocation database configuration API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database[Endpoint documentation] -[source,ts] ----- -client.ingest.putGeoipDatabase({ id, name, maxmind }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: ID of the database configuration to create or update. -** *`name` (string)*: The provider-assigned name of the IP geolocation database to download. -** *`maxmind` ({ account_id })*: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. -At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_ip_location_database -Create or update an IP geolocation database configuration. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database[Endpoint documentation] -[source,ts] ----- -client.ingest.putIpLocationDatabase({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The database configuration identifier. -** *`configuration` (Optional, { name, maxmind, ipinfo })* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -A value of `-1` indicates that the request should never time out. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. -If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. -A value of `-1` indicates that the request should never time out. - -[discrete] -==== put_pipeline -Create or update a pipeline. -Changes made using this API take effect immediately. - -{ref}/ingest.html[Endpoint documentation] -[source,ts] ----- -client.ingest.putPipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: ID of the ingest pipeline to create or update. -** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. -** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. -** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. -** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated. -When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates - -[discrete] -==== simulate -Simulate a pipeline. - -Run an ingest pipeline against a set of provided documents. -You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate[Endpoint documentation] -[source,ts] ----- -client.ingest.simulate({ docs }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. -** *`id` (Optional, string)*: The pipeline to test. -If you don't specify a `pipeline` in the request body, this parameter is required. -** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: The pipeline to test. -If you don't specify the `pipeline` request path parameter, this parameter is required. -If you specify both this and the request path parameter, the API only uses the request path parameter. -** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. - -[discrete] -=== license -[discrete] -==== delete -Delete the license. - -When the license expires, your subscription level reverts to Basic. - -If the operator privileges feature is enabled, only operator users can use this API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete[Endpoint documentation] -[source,ts] ----- -client.license.delete({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get -Get license information. - -Get information about your Elastic license including its type, its status, when it was issued, and when it expires. - ->info -> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. -> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get[Endpoint documentation] -[source,ts] ----- -client.license.get({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`accept_enterprise` (Optional, boolean)*: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. -This parameter is deprecated and will always be set to true in 8.x. -** *`local` (Optional, boolean)*: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. - -[discrete] -==== get_basic_status -Get the basic license status. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status[Endpoint documentation] -[source,ts] ----- -client.license.getBasicStatus() ----- - - -[discrete] -==== get_trial_status -Get the trial status. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status[Endpoint documentation] -[source,ts] ----- -client.license.getTrialStatus() ----- - - -[discrete] -==== post -Update the license. - -You can update your license at runtime without shutting down your nodes. -License updates take effect immediately. -If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. -You must then re-submit the API request with the acknowledge parameter set to true. - -NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. -If the operator privileges feature is enabled, only operator users can use this API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post[Endpoint documentation] -[source,ts] ----- -client.license.post({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* -** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. -** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== post_start_basic -Start a basic license. - -Start an indefinite basic license, which gives access to all the basic features. - -NOTE: In order to start a basic license, you must not currently have a basic license. - -If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. -You must then re-submit the API request with the `acknowledge` parameter set to `true`. - -To check the status of your basic license, use the get basic license API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic[Endpoint documentation] -[source,ts] ----- -client.license.postStartBasic({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== post_start_trial -Start a trial. -Start a 30-day trial, which gives access to all subscription features. - -NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. -For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. - -To check the status of your trial, use the get trial status API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial[Endpoint documentation] -[source,ts] ----- -client.license.postStartTrial({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) -** *`type_query_string` (Optional, string)* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -=== logstash -[discrete] -==== delete_pipeline -Delete a Logstash pipeline. -Delete a pipeline that is used for Logstash Central Management. -If the request succeeds, you receive an empty response with an appropriate status code. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline[Endpoint documentation] -[source,ts] ----- -client.logstash.deletePipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: An identifier for the pipeline. - -[discrete] -==== get_pipeline -Get Logstash pipelines. -Get pipelines that are used for Logstash Central Management. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline[Endpoint documentation] -[source,ts] ----- -client.logstash.getPipeline({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string | string[])*: A list of pipeline identifiers. - -[discrete] -==== put_pipeline -Create or update a Logstash pipeline. - -Create a pipeline that is used for Logstash Central Management. -If the specified pipeline exists, it is replaced. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline[Endpoint documentation] -[source,ts] ----- -client.logstash.putPipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: An identifier for the pipeline. -** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })* - -[discrete] -=== migration -[discrete] -==== deprecations -Get deprecation information. -Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. - -TIP: This APIs is designed for indirect use by the Upgrade Assistant. -You are strongly recommended to use the Upgrade Assistant. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations[Endpoint documentation] -[source,ts] ----- -client.migration.deprecations({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. - -[discrete] -==== get_feature_upgrade_status -Get feature migration information. -Version upgrades sometimes require changes to how features store configuration information and data in system indices. -Check which features need to be migrated and the status of any migrations that are in progress. - -TIP: This API is designed for indirect use by the Upgrade Assistant. -You are strongly recommended to use the Upgrade Assistant. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status[Endpoint documentation] -[source,ts] ----- -client.migration.getFeatureUpgradeStatus() ----- - - -[discrete] -==== post_feature_upgrade -Start the feature migration. -Version upgrades sometimes require changes to how features store configuration information and data in system indices. -This API starts the automatic migration process. - -Some functionality might be temporarily unavailable during the migration process. - -TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status[Endpoint documentation] -[source,ts] ----- -client.migration.postFeatureUpgrade() ----- - - -[discrete] -=== ml -[discrete] -==== clear_trained_model_deployment_cache -Clear trained model deployment cache. - -Cache will be cleared on all nodes where the trained model is assigned. -A trained model deployment may have an inference cache enabled. -As requests are handled by each allocated node, their responses may be cached on that individual node. -Calling this API clears the caches without restarting the deployment. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache[Endpoint documentation] -[source,ts] ----- -client.ml.clearTrainedModelDeploymentCache({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. - -[discrete] -==== close_job -Close anomaly detection jobs. - -A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. -When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. -If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. -When a datafeed that has a specified end date stops, it automatically closes its associated job. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job[Endpoint documentation] -[source,ts] ----- -client.ml.closeJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. -** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. -** *`force` (Optional, boolean)*: Refer to the descriptiion for the `force` query parameter. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== delete_calendar -Delete a calendar. - -Remove all scheduled events from a calendar, then delete it. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar[Endpoint documentation] -[source,ts] ----- -client.ml.deleteCalendar({ calendar_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. - -[discrete] -==== delete_calendar_event -Delete events from a calendar. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event[Endpoint documentation] -[source,ts] ----- -client.ml.deleteCalendarEvent({ calendar_id, event_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`event_id` (string)*: Identifier for the scheduled event. -You can obtain this identifier by using the get calendar events API. - -[discrete] -==== delete_calendar_job -Delete anomaly jobs from a calendar. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job[Endpoint documentation] -[source,ts] ----- -client.ml.deleteCalendarJob({ calendar_id, job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a -list of jobs or groups. - -[discrete] -==== delete_data_frame_analytics -Delete a data frame analytics job. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.deleteDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. -** *`force` (Optional, boolean)*: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. -** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the job to be deleted. - -[discrete] -==== delete_datafeed -Delete a datafeed. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed[Endpoint documentation] -[source,ts] ----- -client.ml.deleteDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This -identifier can contain lowercase alphanumeric characters (a-z and 0-9), -hyphens, and underscores. It must start and end with alphanumeric -characters. -** *`force` (Optional, boolean)*: Use to forcefully delete a started datafeed; this method is quicker than -stopping and deleting the datafeed. - -[discrete] -==== delete_expired_data -Delete expired ML data. - -Delete all job results, model snapshots and forecast data that have exceeded -their retention days period. Machine learning state documents that are not -associated with any job are also deleted. -You can limit the request to a single or set of anomaly detection jobs by -using a job identifier, a group name, a list of jobs, or a -wildcard expression. You can delete expired data for all anomaly detection -jobs by using `_all`, by specifying `*` as the ``, or by omitting the -``. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data[Endpoint documentation] -[source,ts] ----- -client.ml.deleteExpiredData({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string)*: Identifier for an anomaly detection job. It can be a job identifier, a -group name, or a wildcard expression. -** *`requests_per_second` (Optional, float)*: The desired requests per second for the deletion processes. The default -behavior is no throttling. -** *`timeout` (Optional, string | -1 | 0)*: How long can the underlying delete processes run until they are canceled. - -[discrete] -==== delete_filter -Delete a filter. - -If an anomaly detection job references the filter, you cannot delete the -filter. You must update or delete the job before you can delete the filter. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter[Endpoint documentation] -[source,ts] ----- -client.ml.deleteFilter({ filter_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (string)*: A string that uniquely identifies a filter. - -[discrete] -==== delete_forecast -Delete forecasts from a job. - -By default, forecasts are retained for 14 days. You can specify a -different retention period with the `expires_in` parameter in the forecast -jobs API. The delete forecast API enables you to delete one or more -forecasts before they expire. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast[Endpoint documentation] -[source,ts] ----- -client.ml.deleteForecast({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`forecast_id` (Optional, string)*: A list of forecast identifiers. If you do not specify -this optional parameter or if you specify `_all` or `*` the API deletes -all forecasts from the job. -** *`allow_no_forecasts` (Optional, boolean)*: Specifies whether an error occurs when there are no forecasts. In -particular, if this parameter is set to `false` and there are no -forecasts associated with the job, attempts to delete all forecasts -return an error. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for the completion of the delete -operation. When this period of time elapses, the API fails and returns an -error. - -[discrete] -==== delete_job -Delete an anomaly detection job. - -All job configuration, model state and results are deleted. -It is not currently possible to delete multiple jobs using wildcards or a -comma separated list. If you delete a job that has a datafeed, the request -first tries to delete the datafeed. This behavior is equivalent to calling -the delete datafeed API with the same timeout and force parameters as the -delete job request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job[Endpoint documentation] -[source,ts] ----- -client.ml.deleteJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`force` (Optional, boolean)*: Use to forcefully delete an opened job; this method is quicker than -closing and deleting the job. -** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the -user should be deleted along with any auto-generated annotations when the job is -reset. -** *`wait_for_completion` (Optional, boolean)*: Specifies whether the request should return immediately or wait until the -job deletion completes. - -[discrete] -==== delete_model_snapshot -Delete a model snapshot. - -You cannot delete the active model snapshot. To delete that snapshot, first -revert to a different one. To identify the active model snapshot, refer to -the `model_snapshot_id` in the results from the get jobs API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot[Endpoint documentation] -[source,ts] ----- -client.ml.deleteModelSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: Identifier for the model snapshot. - -[discrete] -==== delete_trained_model -Delete an unreferenced trained model. - -The request deletes a trained inference model that is not referenced by an ingest pipeline. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model[Endpoint documentation] -[source,ts] ----- -client.ml.deleteTrainedModel({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`force` (Optional, boolean)*: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_trained_model_alias -Delete a trained model alias. - -This API deletes an existing model alias that refers to a trained model. If -the model alias is missing or refers to a model other than the one identified -by the `model_id`, this API returns an error. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias[Endpoint documentation] -[source,ts] ----- -client.ml.deleteTrainedModelAlias({ model_alias, model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_alias` (string)*: The model alias to delete. -** *`model_id` (string)*: The trained model ID to which the model alias refers. - -[discrete] -==== estimate_model_memory -Estimate job model memory usage. - -Make an estimation of the memory usage for an anomaly detection job model. -The estimate is based on analysis configuration details for the job and cardinality -estimates for the fields it references. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory[Endpoint documentation] -[source,ts] ----- -client.ml.estimateModelMemory({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: For a list of the properties that you can specify in the -`analysis_config` component of the body of this API. -** *`max_bucket_cardinality` (Optional, Record)*: Estimates of the highest cardinality in a single bucket that is observed -for influencer fields over the time period that the job analyzes data. -To produce a good answer, values must be provided for all influencer -fields. Providing values for fields that are not listed as `influencers` -has no effect on the estimation. -** *`overall_cardinality` (Optional, Record)*: Estimates of the cardinality that is observed for fields over the whole -time period that the job analyzes data. To produce a good answer, values -must be provided for fields referenced in the `by_field_name`, -`over_field_name` and `partition_field_name` of any detectors. Providing -values for other fields has no effect on the estimation. It can be -omitted from the request if no detectors have a `by_field_name`, -`over_field_name` or `partition_field_name`. - -[discrete] -==== evaluate_data_frame -Evaluate data frame analytics. - -The API packages together commonly used evaluation metrics for various types -of machine learning features. This has been designed for use on indexes -created by data frame analytics. Evaluation requires both a ground truth -field and an analytics result field to be present. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame[Endpoint documentation] -[source,ts] ----- -client.ml.evaluateDataFrame({ evaluation, index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. -** *`index` (string)*: Defines the `index` in which the evaluation will be performed. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. - -[discrete] -==== explain_data_frame_analytics -Explain data frame analytics config. - -This API provides explanations for a data frame analytics config that either -exists already or one that has not been created yet. The following -explanations are provided: -* which fields are included or not in the analysis and why, -* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. -If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.explainDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`source` (Optional, { index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. It requires an -index. Optionally, query and _source may be specified. -** *`dest` (Optional, { index, results_field })*: The destination configuration, consisting of index and optionally -results_field (ml by default). -** *`analysis` (Optional, { classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to -perform one of the following types of analysis: classification, outlier -detection, or regression. -** *`description` (Optional, string)*: A description of the job. -** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for -analytical processing. If your `elasticsearch.yml` file contains an -`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to -create data frame analytics jobs that have `model_memory_limit` values -greater than that setting. -** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more -threads may decrease the time necessary to complete the analysis at the -cost of using more CPU. Note that the process may use additional threads -for operational functionality other than the analysis itself. -** *`analyzed_fields` (Optional, { includes, excludes })*: Specify includes and/or excludes patterns to select which fields will be -included in the analysis. The patterns specified in excludes are applied -last, therefore excludes takes precedence. In other words, if the same -field is specified in both includes and excludes, then the field will not -be included in the analysis. -** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine -learning node capacity for it to be immediately assigned to a node. - -[discrete] -==== flush_job -Force buffered data to be processed. -The flush jobs API is only applicable when sending data for analysis using -the post data API. Depending on the content of the buffer, then it might -additionally calculate new results. Both flush and close operations are -similar, however the flush is more efficient if you are expecting to send -more data for analysis. When flushing, the job remains open and is available -to continue analyzing data. A close operation additionally prunes and -persists the model state to disk and the job must be opened again before -analyzing further data. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job[Endpoint documentation] -[source,ts] ----- -client.ml.flushJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`advance_time` (Optional, string | Unit)*: Refer to the description for the `advance_time` query parameter. -** *`calc_interim` (Optional, boolean)*: Refer to the description for the `calc_interim` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`skip_time` (Optional, string | Unit)*: Refer to the description for the `skip_time` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. - -[discrete] -==== forecast -Predict future behavior of a time series. - -Forecasts are not supported for jobs that perform population analysis; an -error occurs if you try to create a forecast for a job that has an -`over_field_name` in its configuration. Forcasts predict future behavior -based on historical data. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast[Endpoint documentation] -[source,ts] ----- -client.ml.forecast({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. The job must be open when you -create a forecast; otherwise, an error occurs. -** *`duration` (Optional, string | -1 | 0)*: Refer to the description for the `duration` query parameter. -** *`expires_in` (Optional, string | -1 | 0)*: Refer to the description for the `expires_in` query parameter. -** *`max_model_memory` (Optional, string)*: Refer to the description for the `max_model_memory` query parameter. - -[discrete] -==== get_buckets -Get anomaly detection job results for buckets. -The API presents a chronological view of the records, grouped by bucket. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets[Endpoint documentation] -[source,ts] ----- -client.ml.getBuckets({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`timestamp` (Optional, string | Unit)*: The timestamp of a single bucket result. If you do not specify this -parameter, the API returns information about all buckets. -** *`anomaly_score` (Optional, number)*: Refer to the description for the `anomaly_score` query parameter. -** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. -** *`expand` (Optional, boolean)*: Refer to the description for the `expand` query parameter. -** *`page` (Optional, { from, size })* -** *`sort` (Optional, string)*: Refer to the desription for the `sort` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`from` (Optional, number)*: Skips the specified number of buckets. -** *`size` (Optional, number)*: Specifies the maximum number of buckets to obtain. - -[discrete] -==== get_calendar_events -Get info about events in calendars. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events[Endpoint documentation] -[source,ts] ----- -client.ml.getCalendarEvents({ calendar_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. -** *`end` (Optional, string | Unit)*: Specifies to get events with timestamps earlier than this time. -** *`from` (Optional, number)*: Skips the specified number of events. -** *`job_id` (Optional, string)*: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. -** *`size` (Optional, number)*: Specifies the maximum number of events to obtain. -** *`start` (Optional, string | Unit)*: Specifies to get events with timestamps after this time. - -[discrete] -==== get_calendars -Get calendar configuration info. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars[Endpoint documentation] -[source,ts] ----- -client.ml.getCalendars({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (Optional, string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. -** *`page` (Optional, { from, size })*: This object is supported only when you omit the calendar identifier. -** *`from` (Optional, number)*: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. -** *`size` (Optional, number)*: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. - -[discrete] -==== get_categories -Get anomaly detection job results for categories. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories[Endpoint documentation] -[source,ts] ----- -client.ml.getCategories({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`category_id` (Optional, string)*: Identifier for the category, which is unique in the job. If you specify -neither the category ID nor the partition_field_value, the API returns -information about all categories. If you specify only the -partition_field_value, it returns information about all categories for -the specified partition. -** *`page` (Optional, { from, size })*: Configures pagination. -This parameter has the `from` and `size` properties. -** *`from` (Optional, number)*: Skips the specified number of categories. -** *`partition_field_value` (Optional, string)*: Only return categories for the specified partition. -** *`size` (Optional, number)*: Specifies the maximum number of categories to obtain. - -[discrete] -==== get_data_frame_analytics -Get data frame analytics job configuration info. -You can get information for multiple data frame analytics jobs in a single -API request by using a list of data frame analytics jobs or a -wildcard expression. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.getDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this -option, the API returns information for the first hundred data frame -analytics jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no data frame analytics -jobs that match. -. Contains the `_all` string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -The default value returns an empty data_frame_analytics array when there -are no matches and the subset of results when there are partial matches. -If this parameter is `false`, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. -** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_data_frame_analytics_stats -Get data frame analytics jobs usage info. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats[Endpoint documentation] -[source,ts] ----- -client.ml.getDataFrameAnalyticsStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this -option, the API returns information for the first hundred data frame -analytics jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no data frame analytics -jobs that match. -. Contains the `_all` string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -The default value returns an empty data_frame_analytics array when there -are no matches and the subset of results when there are partial matches. -If this parameter is `false`, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. -** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. -** *`verbose` (Optional, boolean)*: Defines whether the stats response should be verbose. - -[discrete] -==== get_datafeed_stats -Get datafeeds usage info. -You can get statistics for multiple datafeeds in a single API request by -using a list of datafeeds or a wildcard expression. You can -get statistics for all datafeeds by using `_all`, by specifying `*` as the -``, or by omitting the ``. If the datafeed is stopped, the -only information you receive is the `datafeed_id` and the `state`. -This API returns a maximum of 10,000 datafeeds. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats[Endpoint documentation] -[source,ts] ----- -client.ml.getDatafeedStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a -wildcard expression. If you do not specify one of these options, the API -returns information about all datafeeds. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no datafeeds that match. -. Contains the `_all` string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -The default value is `true`, which returns an empty `datafeeds` array -when there are no matches and the subset of results when there are -partial matches. If this parameter is `false`, the request returns a -`404` status code when there are no matches or only partial matches. - -[discrete] -==== get_datafeeds -Get datafeeds configuration info. -You can get information for multiple datafeeds in a single API request by -using a list of datafeeds or a wildcard expression. You can -get information for all datafeeds by using `_all`, by specifying `*` as the -``, or by omitting the ``. -This API returns a maximum of 10,000 datafeeds. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds[Endpoint documentation] -[source,ts] ----- -client.ml.getDatafeeds({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a -wildcard expression. If you do not specify one of these options, the API -returns information about all datafeeds. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no datafeeds that match. -. Contains the `_all` string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -The default value is `true`, which returns an empty `datafeeds` array -when there are no matches and the subset of results when there are -partial matches. If this parameter is `false`, the request returns a -`404` status code when there are no matches or only partial matches. -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_filters -Get filters. -You can get a single filter or all filters. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters[Endpoint documentation] -[source,ts] ----- -client.ml.getFilters({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (Optional, string | string[])*: A string that uniquely identifies a filter. -** *`from` (Optional, number)*: Skips the specified number of filters. -** *`size` (Optional, number)*: Specifies the maximum number of filters to obtain. - -[discrete] -==== get_influencers -Get anomaly detection job results for influencers. -Influencers are the entities that have contributed to, or are to blame for, -the anomalies. Influencer results are available only if an -`influencer_field_name` is specified in the job configuration. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers[Endpoint documentation] -[source,ts] ----- -client.ml.getInfluencers({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`page` (Optional, { from, size })*: Configures pagination. -This parameter has the `from` and `size` properties. -** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. -** *`end` (Optional, string | Unit)*: Returns influencers with timestamps earlier than this time. -The default value means it is unset and results are not limited to -specific timestamps. -** *`exclude_interim` (Optional, boolean)*: If true, the output excludes interim results. By default, interim results -are included. -** *`influencer_score` (Optional, number)*: Returns influencers with anomaly scores greater than or equal to this -value. -** *`from` (Optional, number)*: Skips the specified number of influencers. -** *`size` (Optional, number)*: Specifies the maximum number of influencers to obtain. -** *`sort` (Optional, string)*: Specifies the sort field for the requested influencers. By default, the -influencers are sorted by the `influencer_score` value. -** *`start` (Optional, string | Unit)*: Returns influencers with timestamps after this time. The default value -means it is unset and results are not limited to specific timestamps. - -[discrete] -==== get_job_stats -Get anomaly detection jobs usage info. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats[Endpoint documentation] -[source,ts] ----- -client.ml.getJobStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. It can be a job identifier, a -group name, a list of jobs, or a wildcard expression. If -you do not specify one of these options, the API returns information for -all anomaly detection jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no jobs that match. -. Contains the _all string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -If `true`, the API returns an empty `jobs` array when -there are no matches and the subset of results when there are partial -matches. If `false`, the API returns a `404` status -code when there are no matches or only partial matches. - -[discrete] -==== get_jobs -Get anomaly detection jobs configuration info. -You can get information for multiple anomaly detection jobs in a single API -request by using a group name, a list of jobs, or a wildcard -expression. You can get information for all anomaly detection jobs by using -`_all`, by specifying `*` as the ``, or by omitting the ``. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs[Endpoint documentation] -[source,ts] ----- -client.ml.getJobs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string | string[])*: Identifier for the anomaly detection job. It can be a job identifier, a -group name, or a wildcard expression. If you do not specify one of these -options, the API returns information for all anomaly detection jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no jobs that match. -. Contains the _all string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -The default value is `true`, which returns an empty `jobs` array when -there are no matches and the subset of results when there are partial -matches. If this parameter is `false`, the request returns a `404` status -code when there are no matches or only partial matches. -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_memory_stats -Get machine learning memory usage info. -Get information about how machine learning jobs and trained models are using memory, -on each node, both within the JVM heap, and natively, outside of the JVM. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats[Endpoint documentation] -[source,ts] ----- -client.ml.getMemoryStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string)*: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or -`ml:true` -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout -expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request -fails and returns an error. - -[discrete] -==== get_model_snapshot_upgrade_stats -Get anomaly detection job model snapshot upgrade usage info. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats[Endpoint documentation] -[source,ts] ----- -client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple -snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, -by specifying `*` as the snapshot ID, or by omitting the snapshot ID. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - - - Contains wildcard expressions and there are no jobs that match. - - Contains the _all string or no identifiers and there are no matches. - - Contains wildcard expressions and there are only partial matches. - -The default value is true, which returns an empty jobs array when there are no matches and the subset of results -when there are partial matches. If this parameter is false, the request returns a 404 status code when there are -no matches or only partial matches. - -[discrete] -==== get_model_snapshots -Get model snapshots info. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots[Endpoint documentation] -[source,ts] ----- -client.ml.getModelSnapshots({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (Optional, string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple -snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, -by specifying `*` as the snapshot ID, or by omitting the snapshot ID. -** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`page` (Optional, { from, size })* -** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`from` (Optional, number)*: Skips the specified number of snapshots. -** *`size` (Optional, number)*: Specifies the maximum number of snapshots to obtain. - -[discrete] -==== get_overall_buckets -Get overall bucket results. - -Retrievs overall bucket results that summarize the bucket results of -multiple anomaly detection jobs. - -The `overall_score` is calculated by combining the scores of all the -buckets within the overall bucket span. First, the maximum -`anomaly_score` per anomaly detection job in the overall bucket is -calculated. Then the `top_n` of those scores are averaged to result in -the `overall_score`. This means that you can fine-tune the -`overall_score` so that it is more or less sensitive to the number of -jobs that detect an anomaly at the same time. For example, if you set -`top_n` to `1`, the `overall_score` is the maximum bucket score in the -overall bucket. Alternatively, if you set `top_n` to the number of jobs, -the `overall_score` is high only when all jobs detect anomalies in that -overall bucket. If you set the `bucket_span` parameter (to a value -greater than its default), the `overall_score` is the maximum -`overall_score` of the overall buckets that have a span equal to the -jobs' largest bucket span. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets[Endpoint documentation] -[source,ts] ----- -client.ml.getOverallBuckets({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a -group name, a list of jobs or groups, or a wildcard -expression. - -You can summarize the bucket results for all anomaly detection jobs by -using `_all` or by specifying `*` as the ``. -** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. -** *`bucket_span` (Optional, string | -1 | 0)*: Refer to the description for the `bucket_span` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. -** *`overall_score` (Optional, number | string)*: Refer to the description for the `overall_score` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`top_n` (Optional, number)*: Refer to the description for the `top_n` query parameter. - -[discrete] -==== get_records -Get anomaly records for an anomaly detection job. -Records contain the detailed analytical results. They describe the anomalous -activity that has been identified in the input data based on the detector -configuration. -There can be many anomaly records depending on the characteristics and size -of the input data. In practice, there are often too many to be able to -manually process them. The machine learning features therefore perform a -sophisticated aggregation of the anomaly records into buckets. -The number of record results depends on the number of anomalies found in each -bucket, which relates to the number of time series being modeled and the -number of detectors. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records[Endpoint documentation] -[source,ts] ----- -client.ml.getRecords({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. -** *`page` (Optional, { from, size })* -** *`record_score` (Optional, number)*: Refer to the description for the `record_score` query parameter. -** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`from` (Optional, number)*: Skips the specified number of records. -** *`size` (Optional, number)*: Specifies the maximum number of records to obtain. - -[discrete] -==== get_trained_models -Get trained model configuration info. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models[Endpoint documentation] -[source,ts] ----- -client.ml.getTrainedModels({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. - -You can get information for multiple trained models in a single API -request by using a list of model IDs or a wildcard -expression. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -- Contains wildcard expressions and there are no models that match. -- Contains the _all string or no identifiers and there are no matches. -- Contains wildcard expressions and there are only partial matches. - -If true, it returns an empty array when there are no matches and the -subset of results when there are partial matches. -** *`decompress_definition` (Optional, boolean)*: Specifies whether the included model definition should be returned as a -JSON map (true) or in a custom compressed format (false). -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. -** *`from` (Optional, number)*: Skips the specified number of models. -** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response -body. -** *`include_model_definition` (Optional, boolean)*: parameter is deprecated! Use [include=definition] instead -** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. -** *`tags` (Optional, string | string[])*: A comma delimited string of tags. A trained model can have many tags, or -none. When supplied, only trained models that contain all the supplied -tags are returned. - -[discrete] -==== get_trained_models_stats -Get trained models usage info. -You can get usage information for multiple trained -models in a single API request by using a list of model IDs or a wildcard expression. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats[Endpoint documentation] -[source,ts] ----- -client.ml.getTrainedModelsStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. It can be a -list or a wildcard expression. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -- Contains wildcard expressions and there are no models that match. -- Contains the _all string or no identifiers and there are no matches. -- Contains wildcard expressions and there are only partial matches. - -If true, it returns an empty array when there are no matches and the -subset of results when there are partial matches. -** *`from` (Optional, number)*: Skips the specified number of models. -** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. - -[discrete] -==== infer_trained_model -Evaluate a trained model. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model[Endpoint documentation] -[source,ts] ----- -client.ml.inferTrainedModel({ model_id, docs }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`docs` (Record[])*: An array of objects to pass to the model for inference. The objects should contain a fields matching your -configured trained model input. Typically, for NLP models, the field name is `text_field`. -Currently, for NLP models, only a single value is allowed. -** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The inference configuration updates to apply on the API call -** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait for inference results. - -[discrete] -==== info -Get machine learning information. -Get defaults and limits used by machine learning. -This endpoint is designed to be used by a user interface that needs to fully -understand machine learning configurations where some options are not -specified, meaning that the defaults should be used. This endpoint may be -used to find out what those defaults are. It also provides information about -the maximum size of machine learning jobs that could run in the current -cluster configuration. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info[Endpoint documentation] -[source,ts] ----- -client.ml.info() ----- - - -[discrete] -==== open_job -Open anomaly detection jobs. - -An anomaly detection job must be opened to be ready to receive and analyze -data. It can be opened and closed multiple times throughout its lifecycle. -When you open a new job, it starts with an empty model. -When you open an existing job, the most recent model state is automatically -loaded. The job is ready to resume its analysis from where it left off, once -new data is received. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job[Endpoint documentation] -[source,ts] ----- -client.ml.openJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== post_calendar_events -Add scheduled events to the calendar. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events[Endpoint documentation] -[source,ts] ----- -client.ml.postCalendarEvents({ calendar_id, events }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. - -[discrete] -==== post_data -Send data to an anomaly detection job for analysis. - -IMPORTANT: For each job, data can be accepted from only a single connection at a time. -It is not currently possible to post data to multiple jobs using wildcards or a list. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data[Endpoint documentation] -[source,ts] ----- -client.ml.postData({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. -** *`data` (Optional, TData[])* -** *`reset_end` (Optional, string | Unit)*: Specifies the end of the bucket resetting range. -** *`reset_start` (Optional, string | Unit)*: Specifies the start of the bucket resetting range. - -[discrete] -==== preview_data_frame_analytics -Preview features used by data frame analytics. -Preview the extracted features used by a data frame analytics config. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.previewDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. -** *`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })*: A data frame analytics config as described in create data frame analytics -jobs. Note that `id` and `dest` don’t need to be provided in the context of -this API. - -[discrete] -==== preview_datafeed -Preview a datafeed. -This API returns the first "page" of search results from a datafeed. -You can preview an existing datafeed or provide configuration details for a datafeed -and anomaly detection job in the API. The preview shows the structure of the data -that will be passed to the anomaly detection engine. -IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that -called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the -datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. -You can also use secondary authorization headers to supply the credentials. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed[Endpoint documentation] -[source,ts] ----- -client.ml.previewDatafeed({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase -alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric -characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job -configuration details in the request body. -** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: The datafeed definition to preview. -** *`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })*: The configuration details for the anomaly detection job that is associated with the datafeed. If the -`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must -supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is -used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. -** *`start` (Optional, string | Unit)*: The start time from where the datafeed preview should begin -** *`end` (Optional, string | Unit)*: The end time when the datafeed preview should stop - -[discrete] -==== put_calendar -Create a calendar. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar[Endpoint documentation] -[source,ts] ----- -client.ml.putCalendar({ calendar_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`job_ids` (Optional, string[])*: An array of anomaly detection job identifiers. -** *`description` (Optional, string)*: A description of the calendar. - -[discrete] -==== put_calendar_job -Add anomaly detection job to calendar. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job[Endpoint documentation] -[source,ts] ----- -client.ml.putCalendarJob({ calendar_id, job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. - -[discrete] -==== put_data_frame_analytics -Create a data frame analytics job. -This API creates a data frame analytics job that performs an analysis on the -source indices and stores the outcome in a destination index. -By default, the query used in the source configuration is `{"match_all": {}}`. - -If the destination index does not exist, it is created automatically when you start the job. - -If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`analysis` ({ classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to -perform one of the following types of analysis: classification, outlier -detection, or regression. -** *`dest` ({ index, results_field })*: The destination configuration. -** *`source` ({ index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. -** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine -learning node capacity for it to be immediately assigned to a node. If -set to `false` and a machine learning node with capacity to run the job -cannot be immediately found, the API returns an error. If set to `true`, -the API does not return an error; the job waits in the `starting` state -until sufficient machine learning node capacity is available. This -behavior is also affected by the cluster-wide -`xpack.ml.max_lazy_ml_nodes` setting. -** *`analyzed_fields` (Optional, { includes, excludes })*: Specifies `includes` and/or `excludes` patterns to select which fields -will be included in the analysis. The patterns specified in `excludes` -are applied last, therefore `excludes` takes precedence. In other words, -if the same field is specified in both `includes` and `excludes`, then -the field will not be included in the analysis. If `analyzed_fields` is -not set, only the relevant fields will be included. For example, all the -numeric fields for outlier detection. -The supported fields vary for each type of analysis. Outlier detection -requires numeric or `boolean` data to analyze. The algorithms don’t -support missing values therefore fields that have data types other than -numeric or boolean are ignored. Documents where included fields contain -missing values, null values, or an array are also ignored. Therefore the -`dest` index may contain documents that don’t have an outlier score. -Regression supports fields that are numeric, `boolean`, `text`, -`keyword`, and `ip` data types. It is also tolerant of missing values. -Fields that are supported are included in the analysis, other fields are -ignored. Documents where included fields contain an array with two or -more values are also ignored. Documents in the `dest` index that don’t -contain a results field are not included in the regression analysis. -Classification supports fields that are numeric, `boolean`, `text`, -`keyword`, and `ip` data types. It is also tolerant of missing values. -Fields that are supported are included in the analysis, other fields are -ignored. Documents where included fields contain an array with two or -more values are also ignored. Documents in the `dest` index that don’t -contain a results field are not included in the classification analysis. -Classification analysis can be improved by mapping ordinal variable -values to a single number. For example, in case of age ranges, you can -model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. -** *`description` (Optional, string)*: A description of the job. -** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more -threads may decrease the time necessary to complete the analysis at the -cost of using more CPU. Note that the process may use additional threads -for operational functionality other than the analysis itself. -** *`_meta` (Optional, Record)* -** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for -analytical processing. If your `elasticsearch.yml` file contains an -`xpack.ml.max_model_memory_limit` setting, an error occurs when you try -to create data frame analytics jobs that have `model_memory_limit` values -greater than that setting. -** *`headers` (Optional, Record)* -** *`version` (Optional, string)* - -[discrete] -==== put_datafeed -Create a datafeed. -Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. -You can associate only one datafeed with each anomaly detection job. -The datafeed contains a query that runs at a defined interval (`frequency`). -If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. -By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. - -When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had -at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, -those credentials are used instead. -You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed -directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed[Endpoint documentation] -[source,ts] ----- -client.ml.putDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. -This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. -It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. -Support for aggregations is limited and should be used only with low cardinality data. -** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years. -This search is split into time chunks in order to ensure the load on Elasticsearch is managed. -Chunking configuration controls how the size of these time chunks are calculated; -it is an advanced configuration option. -** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. -The datafeed can optionally search over indices that have already been read in an effort to determine whether -any data has subsequently been added to the index. If missing data is found, it is a good indication that the -`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. -This check runs only on real-time datafeeds. -** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. -The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible -fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last -(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses -aggregations, this value must be divisible by the interval of the date histogram aggregation. -** *`indices` (Optional, string | string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine -learning nodes must have the `remote_cluster_client` role. -** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search -** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. -** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically -stops and closes the associated job after this many real-time searches return no documents. In other words, -it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no -end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an -Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this -object is passed verbatim to Elasticsearch. -** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might -not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default -value is randomly selected between `60s` and `120s`. This randomness improves the query performance -when there are multiple jobs running on the same node. -** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. -** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. -The detector configuration objects in a job can contain functions that use these script fields. -** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. -The maximum value is the value of `index.max_result_window`, which is 10,000 by default. -** *`headers` (Optional, Record)* -** *`allow_no_indices` (Optional, boolean)*: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` -string or when no indices are specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If true, unavailable indices (missing or closed) are ignored. - -[discrete] -==== put_filter -Create a filter. -A filter contains a list of strings. It can be used by one or more anomaly detection jobs. -Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter[Endpoint documentation] -[source,ts] ----- -client.ml.putFilter({ filter_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (string)*: A string that uniquely identifies a filter. -** *`description` (Optional, string)*: A description of the filter. -** *`items` (Optional, string[])*: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. -Up to 10000 items are allowed in each filter. - -[discrete] -==== put_job -Create an anomaly detection job. - -If you include a `datafeed_config`, you must have read index privileges on the source index. -If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job[Endpoint documentation] -[source,ts] ----- -client.ml.putJob({ job_id, analysis_config, data_description }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. -** *`data_description` ({ format, time_field, time_format, field_delimiter })*: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. -** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. -** *`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })*: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. -** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. -** *`custom_settings` (Optional, User-defined value)*: Advanced configuration option. Contains custom meta data about the job. -** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. -** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. -** *`description` (Optional, string)*: A description of the job. -** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. -** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })*: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. -** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. -** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. -** *`results_index_name` (Optional, string)*: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. -** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. -** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the -`_all` string or when no indices are specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: - -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. - -[discrete] -==== put_trained_model -Create a trained model. -Enable you to supply a trained model that is not created by data frame analytics. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModel({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`compressed_definition` (Optional, string)*: The compressed (GZipped and Base64 encoded) inference definition of the -model. If compressed_definition is specified, then definition cannot be -specified. -** *`definition` (Optional, { preprocessors, trained_model })*: The inference definition for the model. If definition is specified, then -compressed_definition cannot be specified. -** *`description` (Optional, string)*: A human-readable description of the inference trained model. -** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression -or classification configuration. It must match the underlying -definition.trained_model's target_type. For pre-packaged models such as -ELSER the config is not required. -** *`input` (Optional, { field_names })*: The input field names for the model definition. -** *`metadata` (Optional, User-defined value)*: An object map that contains metadata about the model. -** *`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))*: The model type. -** *`model_size_bytes` (Optional, number)*: The estimated memory usage in bytes to keep the trained model in memory. -This property is supported only if defer_definition_decompression is true -or the model definition is not supplied. -** *`platform_architecture` (Optional, string)*: The platform architecture (if applicable) of the trained mode. If the model -only works on one platform, because it is heavily optimized for a particular -processor architecture and OS combination, then this field specifies which. -The format of the string must match the platform identifiers used by Elasticsearch, -so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, -or `windows-x86_64`. For portable models (those that work independent of processor -architecture or OS features), leave this field unset. -** *`tags` (Optional, string[])*: An array of tags to organize the model. -** *`prefix_strings` (Optional, { ingest, search })*: Optional prefix strings applied at inference -** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, -the request defers definition decompression and skips relevant -validations. -** *`wait_for_completion` (Optional, boolean)*: Whether to wait for all child operations (e.g. model download) -to complete. - -[discrete] -==== put_trained_model_alias -Create or update a trained model alias. -A trained model alias is a logical name used to reference a single trained -model. -You can use aliases instead of trained model identifiers to make it easier to -reference your models. For example, you can use aliases in inference -aggregations and processors. -An alias must be unique and refer to only a single trained model. However, -you can have multiple aliases for each trained model. -If you use this API to update an alias such that it references a different -trained model ID and the model uses a different type of data frame analytics, -an error occurs. For example, this situation occurs if you have a trained -model for regression analysis and a trained model for classification -analysis; you cannot reassign an alias from one type of trained model to -another. -If you use this API to update an alias and there are very few input fields in -common between the old and new trained models for the model alias, the API -returns a warning. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModelAlias({ model_alias, model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_alias` (string)*: The alias to create or update. This value cannot end in numbers. -** *`model_id` (string)*: The identifier for the trained model that the alias refers to. -** *`reassign` (Optional, boolean)*: Specifies whether the alias gets reassigned to the specified trained -model if it is already assigned to a different model. If the alias is -already assigned and this parameter is false, the API returns an error. - -[discrete] -==== put_trained_model_definition_part -Create part of a trained model definition. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`part` (number)*: The definition part number. When the definition is loaded for inference the definition parts are streamed in the -order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. -** *`definition` (string)*: The definition part for the model. Must be a base64 encoded string. -** *`total_definition_length` (number)*: The total uncompressed definition length in bytes. Not base64 encoded. -** *`total_parts` (number)*: The total number of parts that will be uploaded. Must be greater than 0. - -[discrete] -==== put_trained_model_vocabulary -Create a trained model vocabulary. -This API is supported only for natural language processing (NLP) models. -The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`vocabulary` (string[])*: The model vocabulary, which must not be empty. -** *`merges` (Optional, string[])*: The optional model merges if required by the tokenizer. -** *`scores` (Optional, number[])*: The optional vocabulary value scores if required by the tokenizer. - -[discrete] -==== reset_job -Reset an anomaly detection job. -All model state and results are deleted. The job is ready to start over as if -it had just been created. -It is not currently possible to reset multiple jobs using wildcards or a -comma separated list. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job[Endpoint documentation] -[source,ts] ----- -client.ml.resetJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: The ID of the job to reset. -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before -returning. -** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the -user should be deleted along with any auto-generated annotations when the job is -reset. - -[discrete] -==== revert_model_snapshot -Revert to a snapshot. -The machine learning features react quickly to anomalous input, learning new -behaviors in data. Highly anomalous input increases the variance in the -models whilst the system learns whether this is a new step-change in behavior -or a one-off event. In the case where this anomalous input is known to be a -one-off, then it might be appropriate to reset the model state to a time -before this event. For example, you might consider reverting to a saved -snapshot after Black Friday or a critical system failure. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot[Endpoint documentation] -[source,ts] ----- -client.ml.revertModelSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: You can specify `empty` as the . Reverting to the empty -snapshot means the anomaly detection job starts learning a new model from -scratch when it is started. -** *`delete_intervening_results` (Optional, boolean)*: Refer to the description for the `delete_intervening_results` query parameter. - -[discrete] -==== set_upgrade_mode -Set upgrade_mode for ML indices. -Sets a cluster wide upgrade_mode setting that prepares machine learning -indices for an upgrade. -When upgrading your cluster, in some circumstances you must restart your -nodes and reindex your machine learning indices. In those circumstances, -there must be no machine learning jobs running. You can close the machine -learning jobs, do the upgrade, then open all the jobs again. Alternatively, -you can use this API to temporarily halt tasks associated with the jobs and -datafeeds and prevent new jobs from opening. You can also use this API -during upgrades that do not require you to reindex your machine learning -indices, though stopping jobs is not a requirement in that case. -You can see the current value for the upgrade_mode setting by using the get -machine learning info API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode[Endpoint documentation] -[source,ts] ----- -client.ml.setUpgradeMode({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`enabled` (Optional, boolean)*: When `true`, it enables `upgrade_mode` which temporarily halts all job -and datafeed tasks and prohibits new job and datafeed tasks from -starting. -** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the request to be completed. - -[discrete] -==== start_data_frame_analytics -Start a data frame analytics job. -A data frame analytics job can be started and stopped multiple times -throughout its lifecycle. -If the destination index does not exist, it is created automatically the -first time you start the data frame analytics job. The -`index.number_of_shards` and `index.number_of_replicas` settings for the -destination index are copied from the source index. If there are multiple -source indices, the destination index copies the highest setting values. The -mappings for the destination index are also copied from the source indices. -If there are any mapping conflicts, the job fails to start. -If the destination index exists, it is used as is. You can therefore set up -the destination index in advance with custom settings and mappings. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.startDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job -starts. - -[discrete] -==== start_datafeed -Start datafeeds. - -A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped -multiple times throughout its lifecycle. - -Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. - -If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. -If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. - -When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or -update it had at the time of creation or update and runs the query using those same roles. If you provided secondary -authorization headers when you created or updated the datafeed, those credentials are used instead. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed[Endpoint documentation] -[source,ts] ----- -client.ml.startDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase -alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric -characters. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== start_trained_model_deployment -Start a trained model deployment. -It allocates the model to every machine learning node. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment[Endpoint documentation] -[source,ts] ----- -client.ml.startTrainedModelDeployment({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. -** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations -is set based on the current load. -If adaptive_allocations is enabled, do not set the number of allocations manually. -** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model. -The default value is the same size as the `model_size_bytes`. To disable the cache, -`0b` can be provided. -** *`deployment_id` (Optional, string)*: A unique identifier for the deployment of the model. -** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. -All allocations on a node share the same copy of the model in memory but use -a separate set of threads to evaluate the model. -Increasing this value generally increases the throughput. -If this setting is greater than the number of hardware threads -it will automatically be changed to a value less than the number of hardware threads. -If adaptive_allocations is enabled, do not set this value, because it’s automatically set. -** *`priority` (Optional, Enum("normal" | "low"))*: The deployment priority. -** *`queue_capacity` (Optional, number)*: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds -this value, new requests are rejected with a 429 error. -** *`threads_per_allocation` (Optional, number)*: Sets the number of threads used by each model allocation during inference. This generally increases -the inference speed. The inference process is a compute-bound process; any number -greater than the number of available hardware threads on the machine does not increase the -inference speed. If this setting is greater than the number of hardware threads -it will automatically be changed to a value less than the number of hardware threads. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the model to deploy. -** *`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))*: Specifies the allocation status to wait for before returning. - -[discrete] -==== stop_data_frame_analytics -Stop data frame analytics jobs. -A data frame analytics job can be started and stopped multiple times -throughout its lifecycle. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.stopDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no data frame analytics -jobs that match. -. Contains the _all string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -The default value is true, which returns an empty data_frame_analytics -array when there are no matches and the subset of results when there are -partial matches. If this parameter is false, the request returns a 404 -status code when there are no matches or only partial matches. -** *`force` (Optional, boolean)*: If true, the data frame analytics job is stopped forcefully. -** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job -stops. Defaults to 20 seconds. - -[discrete] -==== stop_datafeed -Stop datafeeds. -A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped -multiple times throughout its lifecycle. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed[Endpoint documentation] -[source,ts] ----- -client.ml.stopDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated -list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as -the identifier. -** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. -** *`force` (Optional, boolean)*: Refer to the description for the `force` query parameter. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== stop_trained_model_deployment -Stop a trained model deployment. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment[Endpoint documentation] -[source,ts] ----- -client.ml.stopTrainedModelDeployment({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; -contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and -there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. -If `false`, the request returns a 404 status code when there are no matches or only partial matches. -** *`force` (Optional, boolean)*: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you -restart the model deployment. - -[discrete] -==== update_data_frame_analytics -Update a data frame analytics job. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics[Endpoint documentation] -[source,ts] ----- -client.ml.updateDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`description` (Optional, string)*: A description of the job. -** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for -analytical processing. If your `elasticsearch.yml` file contains an -`xpack.ml.max_model_memory_limit` setting, an error occurs when you try -to create data frame analytics jobs that have `model_memory_limit` values -greater than that setting. -** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more -threads may decrease the time necessary to complete the analysis at the -cost of using more CPU. Note that the process may use additional threads -for operational functionality other than the analysis itself. -** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine -learning node capacity for it to be immediately assigned to a node. - -[discrete] -==== update_datafeed -Update a datafeed. -You must stop and start the datafeed for the changes to be applied. -When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at -the time of the update and runs the query using those same roles. If you provide secondary authorization headers, -those credentials are used instead. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed[Endpoint documentation] -[source,ts] ----- -client.ml.updateDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. -This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. -It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only -with low cardinality data. -** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time -chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of -these time chunks are calculated; it is an advanced configuration option. -** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally -search over indices that have already been read in an effort to determine whether any data has subsequently been -added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and -the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time -datafeeds. -** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is -either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket -span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are -written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value -must be divisible by the interval of the date histogram aggregation. -** *`indices` (Optional, string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine -learning nodes must have the `remote_cluster_client` role. -** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search. -** *`job_id` (Optional, string)* -** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically -stops and closes the associated job after this many real-time searches return no documents. In other words, -it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no -end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an -Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this -object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also -changed. Therefore, the time required to learn might be long and the understandability of the results is -unpredictable. If you want to make significant changes to the source data, it is recommended that you -clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one -when you are satisfied with the results of the job. -** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might -not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default -value is randomly selected between `60s` and `120s`. This randomness improves the query performance -when there are multiple jobs running on the same node. -** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. -** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. -The detector configuration objects in a job can contain functions that use these script fields. -** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. -The maximum value is the value of `index.max_result_window`. -** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the -`_all` string or when no indices are specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: - -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. - -[discrete] -==== update_filter -Update a filter. -Updates the description of a filter, adds items, or removes items from the list. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter[Endpoint documentation] -[source,ts] ----- -client.ml.updateFilter({ filter_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (string)*: A string that uniquely identifies a filter. -** *`add_items` (Optional, string[])*: The items to add to the filter. -** *`description` (Optional, string)*: A description for the filter. -** *`remove_items` (Optional, string[])*: The items to remove from the filter. - -[discrete] -==== update_job -Update an anomaly detection job. -Updates certain properties of an anomaly detection job. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job[Endpoint documentation] -[source,ts] ----- -client.ml.updateJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the job. -** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when -there is insufficient machine learning node capacity for it to be -immediately assigned to a node. If `false` and a machine learning node -with capacity to run the job cannot immediately be found, the open -anomaly detection jobs API returns an error. However, this is also -subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this -option is set to `true`, the open anomaly detection jobs API does not -return an error and the job waits in the opening state until sufficient -machine learning node capacity is available. -** *`analysis_limits` (Optional, { model_memory_limit })* -** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence -of the model. -The default value is a randomized value between 3 to 4 hours, which -avoids all jobs persisting at exactly the same time. The smallest allowed -value is 1 hour. -For very large models (several GB), persistence could take 10-20 minutes, -so do not set the value too low. -If the job is open when you make the update, you must stop the datafeed, -close the job, then reopen the job and restart the datafeed for the -changes to take effect. -** *`custom_settings` (Optional, Record)*: Advanced configuration option. Contains custom meta data about the job. -For example, it can contain custom URL information as shown in Adding -custom URLs to machine learning results. -** *`categorization_filters` (Optional, string[])* -** *`description` (Optional, string)*: A description of the job. -** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })* -** *`model_prune_window` (Optional, string | -1 | 0)* -** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old -model snapshots for this job. It specifies a period of time (in days) -after which only the first snapshot per day is retained. This period is -relative to the timestamp of the most recent snapshot for this job. Valid -values range from 0 to `model_snapshot_retention_days`. For jobs created -before version 7.8.0, the default value matches -`model_snapshot_retention_days`. -** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old -model snapshots for this job. It specifies the maximum period of time (in -days) that snapshots are retained. This period is relative to the -timestamp of the most recent snapshot for this job. -** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the -score are applied, as new data is seen. -** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results -are retained. Age is calculated relative to the timestamp of the latest -bucket result. If this property has a non-null value, once per day at -00:30 (server time), results that are the specified number of days older -than the latest bucket result are deleted from Elasticsearch. The default -value is null, which means all results are retained. -** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. -** *`detectors` (Optional, { detector_index, description, custom_rules }[])*: An array of detector update objects. -** *`per_partition_categorization` (Optional, { enabled, stop_on_warn })*: Settings related to how categorization interacts with partition fields. - -[discrete] -==== update_model_snapshot -Update a snapshot. -Updates certain properties of a snapshot. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot[Endpoint documentation] -[source,ts] ----- -client.ml.updateModelSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: Identifier for the model snapshot. -** *`description` (Optional, string)*: A description of the model snapshot. -** *`retain` (Optional, boolean)*: If `true`, this snapshot will not be deleted during automatic cleanup of -snapshots older than `model_snapshot_retention_days`. However, this -snapshot will be deleted when the job is deleted. - -[discrete] -==== update_trained_model_deployment -Update a trained model deployment. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment[Endpoint documentation] -[source,ts] ----- -client.ml.updateTrainedModelDeployment({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. -** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. -All allocations on a node share the same copy of the model in memory but use -a separate set of threads to evaluate the model. -Increasing this value generally increases the throughput. -If this setting is greater than the number of hardware threads -it will automatically be changed to a value less than the number of hardware threads. -If adaptive_allocations is enabled, do not set this value, because it’s automatically set. -** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations -is set based on the current load. -If adaptive_allocations is enabled, do not set the number of allocations manually. - -[discrete] -==== upgrade_job_snapshot -Upgrade a snapshot. -Upgrade an anomaly detection model snapshot to the latest major version. -Over time, older snapshot formats are deprecated and removed. Anomaly -detection jobs support only snapshots that are from the current or previous -major version. -This API provides a means to upgrade a snapshot to the current major version. -This aids in preparing the cluster for an upgrade to the next major version. -Only one snapshot per anomaly detection job can be upgraded at a time and the -upgraded snapshot cannot be the current snapshot of the anomaly detection -job. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot[Endpoint documentation] -[source,ts] ----- -client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. -** *`wait_for_completion` (Optional, boolean)*: When true, the API won’t respond until the upgrade is complete. -Otherwise, it responds as soon as the upgrade task is assigned to a node. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the request to complete. - -[discrete] -=== nodes -[discrete] -==== clear_repositories_metering_archive -Clear the archived repositories metering. -Clear the archived repositories metering information in the cluster. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive[Endpoint documentation] -[source,ts] ----- -client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. -** *`max_archive_version` (number)*: Specifies the maximum `archive_version` to be cleared from the archive. - -[discrete] -==== get_repositories_metering_info -Get cluster repositories metering. -Get repositories metering information for a cluster. -This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. -Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info[Endpoint documentation] -[source,ts] ----- -client.nodes.getRepositoriesMeteringInfo({ node_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. -All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). - -[discrete] -==== hot_threads -Get the hot threads for nodes. -Get a breakdown of the hot threads on each selected node in the cluster. -The output is plain text with a breakdown of the top hot threads for each node. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads[Endpoint documentation] -[source,ts] ----- -client.nodes.hotThreads({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`ignore_idle_threads` (Optional, boolean)*: If true, known idle threads (e.g. waiting in a socket select, or to get -a task from an empty queue) are filtered out. -** *`interval` (Optional, string | -1 | 0)*: The interval to do the second sampling of threads. -** *`snapshots` (Optional, number)*: Number of samples of thread stacktrace. -** *`threads` (Optional, number)*: Specifies the number of hot threads to provide information for. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received -before the timeout expires, the request fails and returns an error. -** *`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The type to sample. -** *`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The sort order for 'cpu' type (default: total) - -[discrete] -==== info -Get node information. - -By default, the API returns all attributes and core settings for cluster nodes. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info[Endpoint documentation] -[source,ts] ----- -client.nodes.info({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. -** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== reload_secure_settings -Reload the keystore on nodes in the cluster. - -Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. -That is, you can change them on disk and reload them without restarting any nodes in the cluster. -When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. - -When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. -Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. -Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings[Endpoint documentation] -[source,ts] ----- -client.nodes.reloadSecureSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: The names of particular nodes in the cluster to target. -** *`secure_settings_password` (Optional, string)*: The password for the Elasticsearch keystore. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== stats -Get node statistics. -Get statistics for nodes in a cluster. -By default, all stats are returned. You can limit the returned information by using metrics. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats[Endpoint documentation] -[source,ts] ----- -client.nodes.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics -** *`index_metric` (Optional, string | string[])*: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. -** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. -** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`groups` (Optional, boolean)*: List of search groups to include in the search statistics. -** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`types` (Optional, string[])*: A list of document types for the indexing index metric. -** *`include_unloaded_segments` (Optional, boolean)*: If `true`, the response includes information from segments that are not loaded into memory. - -[discrete] -==== usage -Get feature usage information. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage[Endpoint documentation] -[source,ts] ----- -client.nodes.usage({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. -A list of the following options: `_all`, `rest_actions`. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== query_rules -[discrete] -==== delete_rule -Delete a query rule. -Delete a query rule within a query ruleset. -This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule[Endpoint documentation] -[source,ts] ----- -client.queryRules.deleteRule({ ruleset_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to delete -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to delete - -[discrete] -==== delete_ruleset -Delete a query ruleset. -Remove a query ruleset and its associated data. -This is a destructive action that is not recoverable. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset[Endpoint documentation] -[source,ts] ----- -client.queryRules.deleteRuleset({ ruleset_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to delete - -[discrete] -==== get_rule -Get a query rule. -Get details about a query rule within a query ruleset. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule[Endpoint documentation] -[source,ts] ----- -client.queryRules.getRule({ ruleset_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to retrieve -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to retrieve - -[discrete] -==== get_ruleset -Get a query ruleset. -Get details about a query ruleset. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset[Endpoint documentation] -[source,ts] ----- -client.queryRules.getRuleset({ ruleset_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset - -[discrete] -==== list_rulesets -Get all query rulesets. -Get summarized information about the query rulesets. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets[Endpoint documentation] -[source,ts] ----- -client.queryRules.listRulesets({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: The offset from the first result to fetch. -** *`size` (Optional, number)*: The maximum number of results to retrieve. - -[discrete] -==== put_rule -Create or update a query rule. -Create or update a query rule within a query ruleset. - -IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. -It is advised to use one or the other in query rulesets, to avoid errors. -Additionally, pinned queries have a maximum limit of 100 pinned hits. -If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule[Endpoint documentation] -[source,ts] ----- -client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated. -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated. -** *`type` (Enum("pinned" | "exclude"))*: The type of rule. -** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])*: The criteria that must be met for the rule to be applied. -If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. -** *`actions` ({ ids, docs })*: The actions to take when the rule is matched. -The format of this action depends on the rule type. -** *`priority` (Optional, number)* - -[discrete] -==== put_ruleset -Create or update a query ruleset. -There is a limit of 100 rules per ruleset. -This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. - -IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. -It is advised to use one or the other in query rulesets, to avoid errors. -Additionally, pinned queries have a maximum limit of 100 pinned hits. -If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset[Endpoint documentation] -[source,ts] ----- -client.queryRules.putRuleset({ ruleset_id, rules }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated. -** *`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])* - -[discrete] -==== test -Test a query ruleset. -Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test[Endpoint documentation] -[source,ts] ----- -client.queryRules.test({ ruleset_id, match_criteria }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated -** *`match_criteria` (Record)*: The match criteria to apply to rules in the given query ruleset. -Match criteria should match the keys defined in the `criteria.metadata` field of the rule. - -[discrete] -=== rollup -[discrete] -==== delete_job -Delete a rollup job. - -A job must be stopped before it can be deleted. -If you attempt to delete a started job, an error occurs. -Similarly, if you attempt to delete a nonexistent job, an exception occurs. - -IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. -The API does not delete any previously rolled up data. -This is by design; a user may wish to roll up a static data set. -Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). -Thus the job can be deleted, leaving behind the rolled up data for analysis. -If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. -If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: - ----- -POST my_rollup_index/_delete_by_query -{ - "query": { - "term": { - "_rollup.id": "the_rollup_job_id" - } - } -} ----- - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job[Endpoint documentation] -[source,ts] ----- -client.rollup.deleteJob({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the job. - -[discrete] -==== get_jobs -Get rollup job information. -Get the configuration, stats, and status of rollup jobs. - -NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. -If a job was created, ran for a while, then was deleted, the API does not return any details about it. -For details about a historical rollup job, the rollup capabilities API may be more useful. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs[Endpoint documentation] -[source,ts] ----- -client.rollup.getJobs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the rollup job. -If it is `_all` or omitted, the API returns all rollup jobs. - -[discrete] -==== get_rollup_caps -Get the rollup job capabilities. -Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - -This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. -Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. -This API enables you to inspect an index and determine: - -. Does this index have associated rollup data somewhere in the cluster? -. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps[Endpoint documentation] -[source,ts] ----- -client.rollup.getRollupCaps({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Index, indices or index-pattern to return rollup capabilities for. -`_all` may be used to fetch rollup capabilities from all jobs. - -[discrete] -==== get_rollup_index_caps -Get the rollup index capabilities. -Get the rollup capabilities of all jobs inside of a rollup index. -A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: - -* What jobs are stored in an index (or indices specified via a pattern)? -* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps[Endpoint documentation] -[source,ts] ----- -client.rollup.getRollupIndexCaps({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: Data stream or index to check for rollup capabilities. -Wildcard (`*`) expressions are supported. - -[discrete] -==== put_job -Create a rollup job. - -WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. - -The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. - -There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. - -Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job[Endpoint documentation] -[source,ts] ----- -client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the -data that is associated with the rollup job. The ID is persistent; it is stored with the rolled -up data. If you create a job, let it run for a while, then delete the job, the data that the job -rolled up is still be associated with this job ID. You cannot create a new job with the same ID -since that could lead to problems with mismatched job configurations. -** *`cron` (string)*: A cron string which defines the intervals when the rollup job should be executed. When the interval -triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated -to the time interval of the data being rolled up. For example, you may wish to create hourly rollups -of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The -cron pattern is defined just like a Watcher cron schedule. -** *`groups` ({ date_histogram, histogram, terms })*: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be -available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of -the groups configuration as defining a set of tools that can later be used in aggregations to partition the -data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide -enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. -** *`index_pattern` (string)*: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to -rollup the entire index or index-pattern. -** *`page_size` (number)*: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends -to execute faster, but requires more memory during processing. This value has no effect on how the data is -rolled up; it is merely used for tweaking the speed or memory cost of the indexer. -** *`rollup_index` (string)*: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. -** *`metrics` (Optional, { field, metrics }[])*: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each -group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined -on a per-field basis and for each field you configure which metric should be collected. -** *`timeout` (Optional, string | -1 | 0)*: Time to wait for the request to complete. -** *`headers` (Optional, Record)* - -[discrete] -==== rollup_search -Search rolled-up data. -The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. -It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. - -The request body supports a subset of features from the regular search API. -The following functionality is not available: - -`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. -`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. - -**Searching both historical rollup and non-rollup data** - -The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. -This is done by simply adding the live indices to the URI. For example: - ----- -GET sensor-1,sensor_rollup/_rollup_search -{ - "size": 0, - "aggregations": { - "max_temperature": { - "max": { - "field": "temperature" - } - } - } -} ----- - -The rollup search endpoint does two things when the search runs: - -* The original request is sent to the non-rollup index unaltered. -* A rewritten version of the original request is sent to the rollup index. - -When the two responses are received, the endpoint rewrites the rollup response and merges the two together. -During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search[Endpoint documentation] -[source,ts] ----- -client.rollup.rollupSearch({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of data streams and indices used to limit the request. -This parameter has the following rules: - -* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. -* Multiple non-rollup indices may be specified. -* Only one rollup index may be specified. If more than one are supplied, an exception occurs. -* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. -** *`aggregations` (Optional, Record)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query that is subject to some limitations. -** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -[discrete] -==== start_job -Start rollup jobs. -If you try to start a job that does not exist, an exception occurs. -If you try to start a job that is already started, nothing happens. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job[Endpoint documentation] -[source,ts] ----- -client.rollup.startJob({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the rollup job. - -[discrete] -==== stop_job -Stop rollup jobs. -If you try to stop a job that does not exist, an exception occurs. -If you try to stop a job that is already stopped, nothing happens. - -Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. -This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: - ----- -POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ----- -The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. -If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job[Endpoint documentation] -[source,ts] ----- -client.rollup.stopJob({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the rollup job. -** *`timeout` (Optional, string | -1 | 0)*: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. -If more than `timeout` time has passed, the API throws a timeout exception. -NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. -The timeout simply means the API call itself timed out while waiting for the status change. -** *`wait_for_completion` (Optional, boolean)*: If set to `true`, causes the API to block until the indexer state completely stops. -If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. - -[discrete] -=== search_application -[discrete] -==== delete -Delete a search application. - -Remove a search application and its associated alias. Indices attached to the search application are not removed. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete[Endpoint documentation] -[source,ts] ----- -client.searchApplication.delete({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application to delete. - -[discrete] -==== delete_behavioral_analytics -Delete a behavioral analytics collection. -The associated data stream is also deleted. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics[Endpoint documentation] -[source,ts] ----- -client.searchApplication.deleteBehavioralAnalytics({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the analytics collection to be deleted - -[discrete] -==== get -Get search application details. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get[Endpoint documentation] -[source,ts] ----- -client.searchApplication.get({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application - -[discrete] -==== get_behavioral_analytics -Get behavioral analytics collections. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics[Endpoint documentation] -[source,ts] ----- -client.searchApplication.getBehavioralAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string[])*: A list of analytics collections to limit the returned information - -[discrete] -==== list -Get search applications. -Get information about search applications. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics[Endpoint documentation] -[source,ts] ----- -client.searchApplication.list({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`q` (Optional, string)*: Query in the Lucene query string syntax. -** *`from` (Optional, number)*: Starting offset. -** *`size` (Optional, number)*: Specifies a max number of results to get. - -[discrete] -==== post_behavioral_analytics_event -Create a behavioral analytics collection event. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event[Endpoint documentation] -[source,ts] ----- -client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`collection_name` (string)*: The name of the behavioral analytics collection. -** *`event_type` (Enum("page_view" | "search" | "search_click"))*: The analytics event type. -** *`payload` (Optional, User-defined value)* -** *`debug` (Optional, boolean)*: Whether the response type has to include more details - -[discrete] -==== put -Create or update a search application. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put[Endpoint documentation] -[source,ts] ----- -client.searchApplication.put({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application to be created or updated. -** *`search_application` (Optional, { indices, analytics_collection_name, template })* -** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing Search Applications. - -[discrete] -==== put_behavioral_analytics -Create a behavioral analytics collection. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics[Endpoint documentation] -[source,ts] ----- -client.searchApplication.putBehavioralAnalytics({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the analytics collection to be created or updated. - -[discrete] -==== render_query -Render a search application query. -Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. -If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. -The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. - -You must have `read` privileges on the backing alias of the search application. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query[Endpoint documentation] -[source,ts] ----- -client.searchApplication.renderQuery({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application to render teh query for. -** *`params` (Optional, Record)* - -[discrete] -==== search -Run a search application search. -Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. -Unspecified template parameters are assigned their default values if applicable. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search[Endpoint documentation] -[source,ts] ----- -client.searchApplication.search({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application to be searched. -** *`params` (Optional, Record)*: Query parameters specific to this request, which will override any defaults specified in the template. -** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. - -[discrete] -=== searchable_snapshots -[discrete] -==== cache_stats -Get cache statistics. -Get statistics about the shared cache for partially mounted indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.cacheStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: The names of the nodes in the cluster to target. -** *`master_timeout` (Optional, string | -1 | 0)* - -[discrete] -==== clear_cache -Clear the cache. -Clear indices and data streams from the shared cache for partially mounted indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.clearCache({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to clear from the cache. -It supports wildcards (`*`). -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) - -[discrete] -==== mount -Mount a snapshot. -Mount a snapshot as a searchable snapshot index. -Do not use this API for snapshots managed by index lifecycle management (ILM). -Manually mounting ILM-managed snapshots can interfere with ILM processes. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.mount({ repository, snapshot, index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount. -** *`snapshot` (string)*: The name of the snapshot of the index to mount. -** *`index` (string)*: The name of the index contained in the snapshot whose data is to be mounted. -If no `renamed_index` is specified, this name will also be used to create the new index. -** *`renamed_index` (Optional, string)*: The name of the index that will be created. -** *`index_settings` (Optional, Record)*: The settings that should be added to the index when it is mounted. -** *`ignore_index_settings` (Optional, string[])*: The names of settings that should be removed from the index when it is mounted. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`wait_for_completion` (Optional, boolean)*: If true, the request blocks until the operation is complete. -** *`storage` (Optional, string)*: The mount option for the searchable snapshot index. - -[discrete] -==== stats -Get searchable snapshot statistics. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams and indices to retrieve statistics for. -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Return stats aggregated at cluster, index or shard level - -[discrete] -=== security -[discrete] -==== activate_user_profile -Activate a user profile. - -Create or update a user profile on behalf of another user. - -NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. -Individual users and external applications should not call this API directly. -The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. -Elastic reserves the right to change or remove this feature in future releases without prior notice. - -This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. -For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. - -When updating a profile document, the API enables the document if it was disabled. -Any updates do not change existing content for either the `labels` or `data` fields. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile[Endpoint documentation] -[source,ts] ----- -client.security.activateUserProfile({ grant_type }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`grant_type` (Enum("password" | "access_token"))*: The type of grant. -** *`access_token` (Optional, string)*: The user's Elasticsearch access token or JWT. -Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. -If you specify the `access_token` grant type, this parameter is required. -It is not valid with other grant types. -** *`password` (Optional, string)*: The user's password. -If you specify the `password` grant type, this parameter is required. -It is not valid with other grant types. -** *`username` (Optional, string)*: The username that identifies the user. -If you specify the `password` grant type, this parameter is required. -It is not valid with other grant types. - -[discrete] -==== authenticate -Authenticate a user. - -Authenticates a user and returns information about the authenticated user. -Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). -A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. -If the user cannot be authenticated, this API returns a 401 status code. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate[Endpoint documentation] -[source,ts] ----- -client.security.authenticate() ----- - - -[discrete] -==== bulk_delete_role -Bulk delete roles. - -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The bulk delete roles API cannot delete roles that are defined in roles files. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role[Endpoint documentation] -[source,ts] ----- -client.security.bulkDeleteRole({ names }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`names` (string[])*: An array of role names to delete -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== bulk_put_role -Bulk create or update roles. - -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The bulk create or update roles API cannot update roles that are defined in roles files. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role[Endpoint documentation] -[source,ts] ----- -client.security.bulkPutRole({ roles }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`roles` (Record)*: A dictionary of role name to RoleDescriptor objects to add or update -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== bulk_update_api_keys -Bulk update API keys. -Update the attributes for multiple API keys. - -IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. - -This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. - -It is not possible to update expired or invalidated API keys. - -This API supports updates to API key access scope, metadata and expiration. -The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. -The snapshot of the owner's permissions is updated automatically on every call. - -IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. - -A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys[Endpoint documentation] -[source,ts] ----- -client.security.bulkUpdateApiKeys({ ids }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ids` (string | string[])*: The API key identifiers. -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API keys. -By default, API keys never expire. -This property can be omitted to leave the value unchanged. -** *`metadata` (Optional, Record)*: Arbitrary nested metadata to associate with the API keys. -Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. -Any information specified with this parameter fully replaces metadata previously associated with the API key. -** *`role_descriptors` (Optional, Record)*: The role descriptors to assign to the API keys. -An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. -You can assign new privileges by specifying them in this parameter. -To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. -If an API key has no assigned privileges, it inherits the owner user's full permissions. -The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. -The structure of a role descriptor is the same as the request for the create API keys API. - -[discrete] -==== change_password -Change passwords. - -Change the passwords of users in the native realm and built-in users. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password[Endpoint documentation] -[source,ts] ----- -client.security.changePassword({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (Optional, string)*: The user whose password you want to change. If you do not specify this -parameter, the password is changed for the current user. -** *`password` (Optional, string)*: The new password value. Passwords must be at least 6 characters long. -** *`password_hash` (Optional, string)*: A hash of the new password value. This must be produced using the same -hashing algorithm as has been configured for password storage. For more details, -see the explanation of the `xpack.security.authc.password_hashing.algorithm` -setting. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== clear_api_key_cache -Clear the API key cache. - -Evict a subset of all entries from the API key cache. -The cache is also automatically cleared on state changes of the security index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache[Endpoint documentation] -[source,ts] ----- -client.security.clearApiKeyCache({ ids }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ids` (string | string[])*: List of API key IDs to evict from the API key cache. -To evict all API keys, use `*`. -Does not support other wildcard patterns. - -[discrete] -==== clear_cached_privileges -Clear the privileges cache. - -Evict privileges from the native application privilege cache. -The cache is also automatically cleared for applications that have their privileges updated. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedPrivileges({ application }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (string)*: A list of applications. -To clear all applications, use an asterism (`*`). -It does not support other wildcard patterns. - -[discrete] -==== clear_cached_realms -Clear the user cache. - -Evict users from the user cache. -You can completely clear the cache or evict specific users. - -User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. -There are realm settings that you can use to configure the user cache. -For more information, refer to the documentation about controlling the user cache. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedRealms({ realms }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`realms` (string | string[])*: A list of realms. -To clear all realms, use an asterisk (`*`). -It does not support other wildcard patterns. -** *`usernames` (Optional, string[])*: A list of the users to clear from the cache. -If you do not specify this parameter, the API evicts all users from the user cache. - -[discrete] -==== clear_cached_roles -Clear the roles cache. - -Evict roles from the native role cache. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedRoles({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: A list of roles to evict from the role cache. -To evict all roles, use an asterisk (`*`). -It does not support other wildcard patterns. - -[discrete] -==== clear_cached_service_tokens -Clear service account token caches. - -Evict a subset of all entries from the service account token caches. -Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. -This API clears matching entries from both caches. - -The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. -The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedServiceTokens({ namespace, service, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: The namespace, which is a top-level grouping of service accounts. -** *`service` (string)*: The name of the service, which must be unique within its namespace. -** *`name` (string | string[])*: A list of token names to evict from the service account token caches. -Use a wildcard (`*`) to evict all tokens that belong to a service account. -It does not support other wildcard patterns. - -[discrete] -==== create_api_key -Create an API key. - -Create an API key for access without requiring basic authentication. - -IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. -If you specify privileges, the API returns an error. - -A successful request returns a JSON structure that contains the API key, its unique id, and its name. -If applicable, it also returns expiration information for the API key in milliseconds. - -NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. - -The API keys are created by the Elasticsearch API key service, which is automatically enabled. -To configure or turn off the API key service, refer to API key service setting documentation. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key[Endpoint documentation] -[source,ts] ----- -client.security.createApiKey({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. -By default, API keys never expire. -** *`name` (Optional, string)*: A name for the API key. -** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. -When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. -If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. -The structure of role descriptor is the same as the request for the create role API. -For more details, refer to the create or update roles API. - -NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. -In this case, you must explicitly specify a role descriptor with no privileges. -The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. -** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== create_cross_cluster_api_key -Create a cross-cluster API key. - -Create an API key of the `cross_cluster` type for the API key based remote cluster access. -A `cross_cluster` API key cannot be used to authenticate through the REST interface. - -IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. - -Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. - -NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. - -A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. - -By default, API keys never expire. You can specify expiration information when you create the API keys. - -Cross-cluster API keys can only be updated with the update cross-cluster API key API. -Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key[Endpoint documentation] -[source,ts] ----- -client.security.createCrossClusterApiKey({ access, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`access` ({ replication, search })*: The access to be granted to this API key. -The access is composed of permissions for cross-cluster search and cross-cluster replication. -At least one of them must be specified. - -NOTE: No explicit privileges should be specified for either search or replication access. -The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. -** *`name` (string)*: Specifies the name for this API key. -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. -By default, API keys never expire. -** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. -It supports nested data structure. -Within the metadata object, keys beginning with `_` are reserved for system usage. - -[discrete] -==== create_service_token -Create a service account token. - -Create a service accounts token for access without requiring basic authentication. - -NOTE: Service account tokens never expire. -You must actively delete them if they are no longer needed. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token[Endpoint documentation] -[source,ts] ----- -client.security.createServiceToken({ namespace, service }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: The name of the namespace, which is a top-level grouping of service accounts. -** *`service` (string)*: The name of the service. -** *`name` (Optional, string)*: The name for the service account token. -If omitted, a random name will be generated. - -Token names must be at least one and no more than 256 characters. -They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. - -NOTE: Token names must be unique in the context of the associated service account. -They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delegate_pki -Delegate PKI authentication. - -This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. -The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. -A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. - -This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. - -IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. -This is part of the TLS authentication process and it is delegated to the proxy that calls this API. -The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki[Endpoint documentation] -[source,ts] ----- -client.security.delegatePki({ x509_certificate_chain }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`x509_certificate_chain` (string[])*: The X509Certificate chain, which is represented as an ordered string array. -Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. - -The first element is the target certificate that contains the subject distinguished name that is requesting access. -This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. - -[discrete] -==== delete_privileges -Delete application privileges. - -To use this API, you must have one of the following privileges: - -* The `manage_security` cluster privilege (or a greater privilege such as `all`). -* The "Manage Application Privileges" global privilege for the application being referenced in the request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges[Endpoint documentation] -[source,ts] ----- -client.security.deletePrivileges({ application, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (string)*: The name of the application. -Application privileges are always associated with exactly one application. -** *`name` (string | string[])*: The name of the privilege. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_role -Delete roles. - -Delete roles in the native realm. -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The delete roles API cannot remove roles that are defined in roles files. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role[Endpoint documentation] -[source,ts] ----- -client.security.deleteRole({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the role. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_role_mapping -Delete role mappings. - -Role mappings define which roles are assigned to each user. -The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. -The delete role mappings API cannot remove role mappings that are defined in role mapping files. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping[Endpoint documentation] -[source,ts] ----- -client.security.deleteRoleMapping({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The distinct name that identifies the role mapping. -The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_service_token -Delete service account tokens. - -Delete service account tokens for a service in a specified namespace. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token[Endpoint documentation] -[source,ts] ----- -client.security.deleteServiceToken({ namespace, service, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: The namespace, which is a top-level grouping of service accounts. -** *`service` (string)*: The service name. -** *`name` (string)*: The name of the service account token. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_user -Delete users. - -Delete users from the native realm. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user[Endpoint documentation] -[source,ts] ----- -client.security.deleteUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: An identifier for the user. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== disable_user -Disable users. - -Disable users in the native realm. -By default, when you create users, they are enabled. -You can use this API to revoke a user's access to Elasticsearch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user[Endpoint documentation] -[source,ts] ----- -client.security.disableUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: An identifier for the user. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== disable_user_profile -Disable a user profile. - -Disable user profiles so that they are not visible in user profile searches. - -NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. -Individual users and external applications should not call this API directly. -Elastic reserves the right to change or remove this feature in future releases without prior notice. - -When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. -To re-enable a disabled user profile, use the enable user profile API . - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile[Endpoint documentation] -[source,ts] ----- -client.security.disableUserProfile({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string)*: Unique identifier for the user profile. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. -If 'wait_for', it waits for a refresh to make this operation visible to search. -If 'false', it does nothing with refreshes. - -[discrete] -==== enable_user -Enable users. - -Enable users in the native realm. -By default, when you create users, they are enabled. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user[Endpoint documentation] -[source,ts] ----- -client.security.enableUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: An identifier for the user. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== enable_user_profile -Enable a user profile. - -Enable user profiles to make them visible in user profile searches. - -NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. -Individual users and external applications should not call this API directly. -Elastic reserves the right to change or remove this feature in future releases without prior notice. - -When you activate a user profile, it's automatically enabled and visible in user profile searches. -If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile[Endpoint documentation] -[source,ts] ----- -client.security.enableUserProfile({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string)*: A unique identifier for the user profile. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search. -If 'wait_for', it waits for a refresh to make this operation visible to search. -If 'false', nothing is done with refreshes. - -[discrete] -==== enroll_kibana -Enroll Kibana. - -Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. - -NOTE: This API is currently intended for internal use only by Kibana. -Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana[Endpoint documentation] -[source,ts] ----- -client.security.enrollKibana() ----- - - -[discrete] -==== enroll_node -Enroll a node. - -Enroll a new node to allow it to join an existing cluster with security features enabled. - -The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. -The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node[Endpoint documentation] -[source,ts] ----- -client.security.enrollNode() ----- - - -[discrete] -==== get_api_key -Get API key information. - -Retrieves information for one or more API keys. -NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. -If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key[Endpoint documentation] -[source,ts] ----- -client.security.getApiKey({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: An API key id. -This parameter cannot be used with any of `name`, `realm_name` or `username`. -** *`name` (Optional, string)*: An API key name. -This parameter cannot be used with any of `id`, `realm_name` or `username`. -It supports prefix search with wildcard. -** *`owner` (Optional, boolean)*: A boolean flag that can be used to query API keys owned by the currently authenticated user. -The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. -** *`realm_name` (Optional, string)*: The name of an authentication realm. -This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. -** *`username` (Optional, string)*: The username of a user. -This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. -** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors -associated with the API key. An API key's actual -permission is the intersection of its assigned role -descriptors and the owner user's role descriptors. -** *`active_only` (Optional, boolean)*: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. -** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. - -[discrete] -==== get_builtin_privileges -Get builtin privileges. - -Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges[Endpoint documentation] -[source,ts] ----- -client.security.getBuiltinPrivileges() ----- - - -[discrete] -==== get_privileges -Get application privileges. - -To use this API, you must have one of the following privileges: - -* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). -* The "Manage Application Privileges" global privilege for the application being referenced in the request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges[Endpoint documentation] -[source,ts] ----- -client.security.getPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (Optional, string)*: The name of the application. -Application privileges are always associated with exactly one application. -If you do not specify this parameter, the API returns information about all privileges for all applications. -** *`name` (Optional, string | string[])*: The name of the privilege. -If you do not specify this parameter, the API returns information about all privileges for the requested application. - -[discrete] -==== get_role -Get roles. - -Get roles in the native realm. -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The get roles API cannot retrieve roles that are defined in roles files. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role[Endpoint documentation] -[source,ts] ----- -client.security.getRole({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: The name of the role. -You can specify multiple roles as a list. -If you do not specify this parameter, the API returns information about all roles. - -[discrete] -==== get_role_mapping -Get role mappings. - -Role mappings define which roles are assigned to each user. -The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. -The get role mappings API cannot retrieve role mappings that are defined in role mapping files. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping[Endpoint documentation] -[source,ts] ----- -client.security.getRoleMapping({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. - -[discrete] -==== get_service_accounts -Get service accounts. - -Get a list of service accounts that match the provided path parameters. - -NOTE: Currently, only the `elastic/fleet-server` service account is available. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts[Endpoint documentation] -[source,ts] ----- -client.security.getServiceAccounts({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (Optional, string)*: The name of the namespace. -Omit this parameter to retrieve information about all service accounts. -If you omit this parameter, you must also omit the `service` parameter. -** *`service` (Optional, string)*: The service name. -Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. - -[discrete] -==== get_service_credentials -Get service account credentials. - -To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). - -The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. - -NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. -Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials[Endpoint documentation] -[source,ts] ----- -client.security.getServiceCredentials({ namespace, service }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: The name of the namespace. -** *`service` (string)*: The service name. - -[discrete] -==== get_settings -Get security index settings. - -Get the user-configurable settings for the security internal index (`.security` and associated indices). -Only a subset of the index settings — those that are user-configurable—will be shown. -This includes: - -* `index.auto_expand_replicas` -* `index.number_of_replicas` - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings[Endpoint documentation] -[source,ts] ----- -client.security.getSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_token -Get a token. - -Create a bearer token for access without requiring basic authentication. -The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. -Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. -When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. - -The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. - -A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. - -The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. -That time period is defined by the `xpack.security.authc.token.timeout` setting. -If you want to invalidate a token immediately, you can do so by using the invalidate token API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token[Endpoint documentation] -[source,ts] ----- -client.security.getToken({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))*: The type of grant. -Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. -** *`scope` (Optional, string)*: The scope of the token. -Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. -** *`password` (Optional, string)*: The user's password. -If you specify the `password` grant type, this parameter is required. -This parameter is not valid with any other supported grant type. -** *`kerberos_ticket` (Optional, string)*: The base64 encoded kerberos ticket. -If you specify the `_kerberos` grant type, this parameter is required. -This parameter is not valid with any other supported grant type. -** *`refresh_token` (Optional, string)*: The string that was returned when you created the token, which enables you to extend its life. -If you specify the `refresh_token` grant type, this parameter is required. -This parameter is not valid with any other supported grant type. -** *`username` (Optional, string)*: The username that identifies the user. -If you specify the `password` grant type, this parameter is required. -This parameter is not valid with any other supported grant type. - -[discrete] -==== get_user -Get users. - -Get information about users in the native realm and built-in users. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user[Endpoint documentation] -[source,ts] ----- -client.security.getUser({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (Optional, string | string[])*: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. -** *`with_profile_uid` (Optional, boolean)*: Determines whether to retrieve the user profile UID, if it exists, for the users. - -[discrete] -==== get_user_privileges -Get user privileges. - -Get the security privileges for the logged in user. -All users can use this API, but only to determine their own privileges. -To check the privileges of other users, you must use the run as feature. -To check whether a user has a specific list of privileges, use the has privileges API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges[Endpoint documentation] -[source,ts] ----- -client.security.getUserPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (Optional, string)*: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. -** *`priviledge` (Optional, string)*: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. -** *`username` (Optional, string | null)* - -[discrete] -==== get_user_profile -Get a user profile. - -Get a user's profile using the unique profile ID. - -NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. -Individual users and external applications should not call this API directly. -Elastic reserves the right to change or remove this feature in future releases without prior notice. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile[Endpoint documentation] -[source,ts] ----- -client.security.getUserProfile({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string | string[])*: A unique identifier for the user profile. -** *`data` (Optional, string | string[])*: A list of filters for the `data` field of the profile document. -To return all content use `data=*`. -To return a subset of content use `data=` to retrieve content nested under the specified ``. -By default returns no `data` content. - -[discrete] -==== grant_api_key -Grant an API key. - -Create an API key on behalf of another user. -This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. -The caller must have authentication credentials for the user on whose behalf the API key will be created. -It is not possible to use this API to create an API key without that user's credentials. -The supported user authentication credential types are: - -* username and password -* Elasticsearch access tokens -* JWTs - -The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. -In this case, the API key will be created on behalf of the impersonated user. - -This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. -The API keys are created by the Elasticsearch API key service, which is automatically enabled. - -A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. -If applicable, it also returns expiration information for the API key in milliseconds. - -By default, API keys never expire. You can specify expiration information when you create the API keys. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key[Endpoint documentation] -[source,ts] ----- -client.security.grantApiKey({ api_key, grant_type }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`api_key` ({ name, expiration, role_descriptors, metadata })*: The API key. -** *`grant_type` (Enum("access_token" | "password"))*: The type of grant. Supported grant types are: `access_token`, `password`. -** *`access_token` (Optional, string)*: The user's access token. -If you specify the `access_token` grant type, this parameter is required. -It is not valid with other grant types. -** *`username` (Optional, string)*: The user name that identifies the user. -If you specify the `password` grant type, this parameter is required. -It is not valid with other grant types. -** *`password` (Optional, string)*: The user's password. -If you specify the `password` grant type, this parameter is required. -It is not valid with other grant types. -** *`run_as` (Optional, string)*: The name of the user to be impersonated. - -[discrete] -==== has_privileges -Check user privileges. - -Determine whether the specified user has a specified list of privileges. -All users can use this API, but only to determine their own privileges. -To check the privileges of other users, you must use the run as feature. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges[Endpoint documentation] -[source,ts] ----- -client.security.hasPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`user` (Optional, string)*: Username -** *`application` (Optional, { application, privileges, resources }[])* -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. -** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* - -[discrete] -==== has_privileges_user_profile -Check user profile privileges. - -Determine whether the users associated with the specified user profile IDs have all the requested privileges. - -NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. -Elastic reserves the right to change or remove this feature in future releases without prior notice. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile[Endpoint documentation] -[source,ts] ----- -client.security.hasPrivilegesUserProfile({ uids, privileges }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uids` (string[])*: A list of profile IDs. The privileges are checked for associated users of the profiles. -** *`privileges` ({ application, cluster, index })*: An object containing all the privileges to be checked. - -[discrete] -==== invalidate_api_key -Invalidate API keys. - -This API invalidates API keys created by the create API key or grant API key APIs. -Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. - -To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. -The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. -The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. -The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. -In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - -- Set the parameter `owner=true`. -- Or, set both `username` and `realm_name` to match the user's identity. -- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key[Endpoint documentation] -[source,ts] ----- -client.security.invalidateApiKey({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)* -** *`ids` (Optional, string[])*: A list of API key ids. -This parameter cannot be used with any of `name`, `realm_name`, or `username`. -** *`name` (Optional, string)*: An API key name. -This parameter cannot be used with any of `ids`, `realm_name` or `username`. -** *`owner` (Optional, boolean)*: Query API keys owned by the currently authenticated user. -The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. - -NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. -** *`realm_name` (Optional, string)*: The name of an authentication realm. -This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. -** *`username` (Optional, string)*: The username of a user. -This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. - -[discrete] -==== invalidate_token -Invalidate a token. - -The access tokens returned by the get token API have a finite period of time for which they are valid. -After that time period, they can no longer be used. -The time period is defined by the `xpack.security.authc.token.timeout` setting. - -The refresh tokens returned by the get token API are only valid for 24 hours. -They can also be used exactly once. -If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. - -NOTE: While all parameters are optional, at least one of them is required. -More specifically, either one of `token` or `refresh_token` parameters is required. -If none of these two are specified, then `realm_name` and/or `username` need to be specified. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token[Endpoint documentation] -[source,ts] ----- -client.security.invalidateToken({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`token` (Optional, string)*: An access token. -This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. -** *`refresh_token` (Optional, string)*: A refresh token. -This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. -** *`realm_name` (Optional, string)*: The name of an authentication realm. -This parameter cannot be used with either `refresh_token` or `token`. -** *`username` (Optional, string)*: The username of a user. -This parameter cannot be used with either `refresh_token` or `token`. - -[discrete] -==== oidc_authenticate -Authenticate OpenID Connect. - -Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. - -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. -These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate[Endpoint documentation] -[source,ts] ----- -client.security.oidcAuthenticate({ nonce, redirect_uri, state }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`nonce` (string)*: Associate a client session with an ID token and mitigate replay attacks. -This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. -** *`redirect_uri` (string)*: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. -This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. -** *`state` (string)*: Maintain state between the authentication request and the response. -This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. -** *`realm` (Optional, string)*: The name of the OpenID Connect realm. -This property is useful in cases where multiple realms are defined. - -[discrete] -==== oidc_logout -Logout of OpenID Connect. - -Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. - -If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. - -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. -These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout[Endpoint documentation] -[source,ts] ----- -client.security.oidcLogout({ access_token }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`access_token` (string)*: The access token to be invalidated. -** *`refresh_token` (Optional, string)*: The refresh token to be invalidated. - -[discrete] -==== oidc_prepare_authentication -Prepare OpenID connect authentication. - -Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. - -The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. - -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. -These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication[Endpoint documentation] -[source,ts] ----- -client.security.oidcPrepareAuthentication({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`iss` (Optional, string)*: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. -It cannot be specified when *realm* is specified. -One of *realm* or *iss* is required. -** *`login_hint` (Optional, string)*: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. -This parameter is not valid when *realm* is specified. -** *`nonce` (Optional, string)*: The value used to associate a client session with an ID token and to mitigate replay attacks. -If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. -** *`realm` (Optional, string)*: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. -It cannot be specified when *iss* is specified. -One of *realm* or *iss* is required. -** *`state` (Optional, string)*: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. -If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. - -[discrete] -==== put_privileges -Create or update application privileges. - -To use this API, you must have one of the following privileges: - -* The `manage_security` cluster privilege (or a greater privilege such as `all`). -* The "Manage Application Privileges" global privilege for the application being referenced in the request. - -Application names are formed from a prefix, with an optional suffix that conform to the following rules: - -* The prefix must begin with a lowercase ASCII letter. -* The prefix must contain only ASCII letters or digits. -* The prefix must be at least 3 characters long. -* If the suffix exists, it must begin with either a dash `-` or `_`. -* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. -* No part of the name can contain whitespace. - -Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. - -Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges[Endpoint documentation] -[source,ts] ----- -client.security.putPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`privileges` (Optional, Record>)* -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== put_role -Create or update roles. - -The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. -The create or update roles API cannot update roles that are defined in roles files. -File-based role management is not available in Elastic Serverless. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role[Endpoint documentation] -[source,ts] ----- -client.security.putRole({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. -** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. -** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. -** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. -** *`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])*: A list of remote indices permissions entries. - -NOTE: Remote indices are effective for remote clusters configured with the API key based model. -They have no effect for remote clusters configured with the certificate based model. -** *`remote_cluster` (Optional, { clusters, privileges }[])*: A list of remote cluster permissions entries. -** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. -** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. -** *`description` (Optional, string)*: Optional description of the role descriptor -** *`transient_metadata` (Optional, Record)*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== put_role_mapping -Create or update role mappings. - -Role mappings define which roles are assigned to each user. -Each mapping has rules that identify users and a list of roles that are granted to those users. -The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. - -NOTE: This API does not create roles. Rather, it maps users to existing roles. -Roles can be created by using the create or update roles API or roles files. - -**Role templates** - -The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. -For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. -The `roles` field is used for this purpose. - -For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. -The `role_templates` field is used for this purpose. - -NOTE: To use role templates successfully, the relevant scripting feature must be enabled. -Otherwise, all attempts to create a role mapping with role templates fail. - -All of the user fields that are available in the role mapping rules are also available in the role templates. -Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. - -By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. -If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping[Endpoint documentation] -[source,ts] ----- -client.security.putRoleMapping({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The distinct name that identifies the role mapping. -The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. -** *`enabled` (Optional, boolean)*: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. -** *`metadata` (Optional, Record)*: Additional metadata that helps define which roles are assigned to each user. -Within the metadata object, keys beginning with `_` are reserved for system usage. -** *`roles` (Optional, string[])*: A list of role names that are granted to the users that match the role mapping rules. -Exactly one of `roles` or `role_templates` must be specified. -** *`role_templates` (Optional, { format, template }[])*: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. -Exactly one of `roles` or `role_templates` must be specified. -** *`rules` (Optional, { any, all, field, except })*: The rules that determine which users should be matched by the mapping. -A rule is a logical condition that is expressed by using a JSON DSL. -** *`run_as` (Optional, string[])* -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== put_user -Create or update users. - -Add and update users in the native realm. -A password is required for adding a new user but is optional when updating an existing user. -To change a user's password without updating any other fields, use the change password API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user[Endpoint documentation] -[source,ts] ----- -client.security.putUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: An identifier for the user. - -NOTE: Usernames must be at least 1 and no more than 507 characters. -They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. -Leading or trailing whitespace is not allowed. -** *`email` (Optional, string | null)*: The email of the user. -** *`full_name` (Optional, string | null)*: The full name of the user. -** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the user. -** *`password` (Optional, string)*: The user's password. -Passwords must be at least 6 characters long. -When adding a user, one of `password` or `password_hash` is required. -When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password -** *`password_hash` (Optional, string)*: A hash of the user's password. -This must be produced using the same hashing algorithm as has been configured for password storage. -For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. -Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. -The `password` parameter and the `password_hash` parameter cannot be used in the same request. -** *`roles` (Optional, string[])*: A set of roles the user has. -The roles determine the user's access permissions. -To create a user without any roles, specify an empty list (`[]`). -** *`enabled` (Optional, boolean)*: Specifies whether the user is enabled. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: Valid values are `true`, `false`, and `wait_for`. -These values have the same meaning as in the index API, but the default value for this API is true. - -[discrete] -==== query_api_keys -Find API keys with a query. - -Get a paginated list of API keys and their information. -You can optionally filter the results with a query. - -To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. -If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. -If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys[Endpoint documentation] -[source,ts] ----- -client.security.queryApiKeys({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`aggregations` (Optional, Record)*: Any aggregations to run over the corpus of returned API keys. -Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. -This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, -`cardinality`, `value_count`, `composite`, `filter`, and `filters`. -Additionally, aggregations only run over the same subset of fields that query works with. -** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which API keys to return. -If the query parameter is missing, it is equivalent to a `match_all` query. -The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, -`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. -You can query the following public information associated with an API key: `id`, `type`, `name`, -`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. - -NOTE: The queryable string values associated with API keys are internally mapped as keywords. -Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. -Such a match query is hence equivalent to a `term` query. -** *`from` (Optional, number)*: The starting document offset. -It must not be negative. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. -Other than `id`, all public fields of an API key are eligible for sorting. -In addition, sort can also be applied to the `_doc` field to sort by index order. -** *`size` (Optional, number)*: The number of hits to return. -It must not be negative. -The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition. -** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. -An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). -An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. -** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile UID for the API key owner principal. -If it exists, the profile UID is returned under the `profile_uid` response field for each API key. -** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. - -[discrete] -==== query_role -Find roles with a query. - -Get roles in a paginated manner. -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. -You can optionally filter the results with a query. -Also, the results can be paginated and sorted. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role[Endpoint documentation] -[source,ts] ----- -client.security.queryRole({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which roles to return. -If the query parameter is missing, it is equivalent to a `match_all` query. -The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, -`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. -You can query the following information associated with roles: `name`, `description`, `metadata`, -`applications.application`, `applications.privileges`, and `applications.resources`. -** *`from` (Optional, number)*: The starting document offset. -It must not be negative. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. -You can sort on `username`, `roles`, or `enabled`. -In addition, sort can also be applied to the `_doc` field to sort by index order. -** *`size` (Optional, number)*: The number of hits to return. -It must not be negative. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition. - -[discrete] -==== query_user -Find users with a query. - -Get information for users in a paginated manner. -You can optionally filter the results with a query. - -NOTE: As opposed to the get user API, built-in users are excluded from the result. -This API is only for native users. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user[Endpoint documentation] -[source,ts] ----- -client.security.queryUser({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which users to return. -If the query parameter is missing, it is equivalent to a `match_all` query. -The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, -`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. -You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. -** *`from` (Optional, number)*: The starting document offset. -It must not be negative. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. -Fields eligible for sorting are: `username`, `roles`, `enabled`. -In addition, sort can also be applied to the `_doc` field to sort by index order. -** *`size` (Optional, number)*: The number of hits to return. -It must not be negative. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition -** *`with_profile_uid` (Optional, boolean)*: Determines whether to retrieve the user profile UID, if it exists, for the users. - -[discrete] -==== saml_authenticate -Authenticate SAML. - -Submit a SAML response message to Elasticsearch for consumption. - -NOTE: This API is intended for use by custom web applications other than Kibana. -If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -The SAML message that is submitted can be: - -* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. -* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. - -In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. - -After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. -This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate[Endpoint documentation] -[source,ts] ----- -client.security.samlAuthenticate({ content, ids }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`content` (string)*: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. -** *`ids` (string | string[])*: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. -** *`realm` (Optional, string)*: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. - -[discrete] -==== saml_complete_logout -Logout of SAML completely. - -Verifies the logout response sent from the SAML IdP. - -NOTE: This API is intended for use by custom web applications other than Kibana. -If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. -This API verifies the response by ensuring the content is relevant and validating its signature. -An empty response is returned if the verification process is successful. -The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. -The caller of this API must prepare the request accordingly so that this API can handle either of them. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout[Endpoint documentation] -[source,ts] ----- -client.security.samlCompleteLogout({ realm, ids }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`realm` (string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. -** *`ids` (string | string[])*: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. -** *`query_string` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. -** *`content` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. - -[discrete] -==== saml_invalidate -Invalidate SAML. - -Submit a SAML LogoutRequest message to Elasticsearch for consumption. - -NOTE: This API is intended for use by custom web applications other than Kibana. -If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -The logout request comes from the SAML IdP during an IdP initiated Single Logout. -The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. -After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. -Thus the user can be redirected back to their IdP. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate[Endpoint documentation] -[source,ts] ----- -client.security.samlInvalidate({ query_string }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query_string` (string)*: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. -This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. -If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. -In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. -The client application must not attempt to parse or process the string in any way. -** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. -** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. - -[discrete] -==== saml_logout -Logout of SAML. - -Submits a request to invalidate an access token and refresh token. - -NOTE: This API is intended for use by custom web applications other than Kibana. -If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -This API invalidates the tokens that were generated for a user by the SAML authenticate API. -If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout[Endpoint documentation] -[source,ts] ----- -client.security.samlLogout({ token }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`token` (string)*: The access token that was returned as a response to calling the SAML authenticate API. -Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. -** *`refresh_token` (Optional, string)*: The refresh token that was returned as a response to calling the SAML authenticate API. -Alternatively, the most recent refresh token that was received after refreshing the original access token. - -[discrete] -==== saml_prepare_authentication -Prepare SAML authentication. - -Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. - -NOTE: This API is intended for use by custom web applications other than Kibana. -If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -This API returns a URL pointing to the SAML Identity Provider. -You can use the URL to redirect the browser of the user in order to continue the authentication process. -The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. -If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. -These parameters contain the algorithm used for the signature and the signature value itself. -It also returns a random string that uniquely identifies this SAML Authentication request. -The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication[Endpoint documentation] -[source,ts] ----- -client.security.samlPrepareAuthentication({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. -The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. -** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. -You must specify either this parameter or the `acs` parameter. -** *`relay_state` (Optional, string)*: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. -If the Authentication Request is signed, this value is used as part of the signature computation. - -[discrete] -==== saml_service_provider_metadata -Create SAML service provider metadata. - -Generate SAML metadata for a SAML 2.0 Service Provider. - -The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. -This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata[Endpoint documentation] -[source,ts] ----- -client.security.samlServiceProviderMetadata({ realm_name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`realm_name` (string)*: The name of the SAML realm in Elasticsearch. - -[discrete] -==== suggest_user_profiles -Suggest a user profile. - -Get suggestions for user profiles that match specified search criteria. - -NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. -Individual users and external applications should not call this API directly. -Elastic reserves the right to change or remove this feature in future releases without prior notice. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles[Endpoint documentation] -[source,ts] ----- -client.security.suggestUserProfiles({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: A query string used to match name-related fields in user profile documents. -Name-related fields are the user's `username`, `full_name`, and `email`. -** *`size` (Optional, number)*: The number of profiles to return. -** *`data` (Optional, string | string[])*: A list of filters for the `data` field of the profile document. -To return all content use `data=*`. -To return a subset of content, use `data=` to retrieve content nested under the specified ``. -By default, the API returns no `data` content. -It is an error to specify `data` as both the query parameter and the request body field. -** *`hint` (Optional, { uids, labels })*: Extra search criteria to improve relevance of the suggestion result. -Profiles matching the spcified hint are ranked higher in the response. -Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. - -[discrete] -==== update_api_key -Update an API key. - -Update attributes of an existing API key. -This API supports updates to an API key's access scope, expiration, and metadata. - -To use this API, you must have at least the `manage_own_api_key` cluster privilege. -Users can only update API keys that they created or that were granted to them. -To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. - -IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. - -Use this API to update API keys created by the create API key or grant API Key APIs. -If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. -It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. - -The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. -The snapshot of the owner's permissions is updated automatically on every call. - -IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. -This change can occur if the owner user's permissions have changed since the API key was created or last modified. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key[Endpoint documentation] -[source,ts] ----- -client.security.updateApiKey({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The ID of the API key to update. -** *`role_descriptors` (Optional, Record)*: The role descriptors to assign to this API key. -The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. -You can assign new privileges by specifying them in this parameter. -To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. -If an API key has no assigned privileges, it inherits the owner user's full permissions. -The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. -The structure of a role descriptor is the same as the request for the create API keys API. -** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. -It supports a nested data structure. -Within the metadata object, keys beginning with `_` are reserved for system usage. -When specified, this value fully replaces the metadata previously associated with the API key. -** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. -By default, API keys never expire. -This property can be omitted to leave the expiration unchanged. - -[discrete] -==== update_cross_cluster_api_key -Update a cross-cluster API key. - -Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. - -To use this API, you must have at least the `manage_security` cluster privilege. -Users can only update API keys that they created. -To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. - -IMPORTANT: It's not possible to use an API key as the authentication credential for this API. -To update an API key, the owner user's credentials are required. - -It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. - -This API supports updates to an API key's access scope, metadata, and expiration. -The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. - -NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key[Endpoint documentation] -[source,ts] ----- -client.security.updateCrossClusterApiKey({ id, access }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The ID of the cross-cluster API key to update. -** *`access` ({ replication, search })*: The access to be granted to this API key. -The access is composed of permissions for cross cluster search and cross cluster replication. -At least one of them must be specified. -When specified, the new access assignment fully replaces the previously assigned access. -** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. -By default, API keys never expire. This property can be omitted to leave the value unchanged. -** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. -It supports nested data structure. -Within the metadata object, keys beginning with `_` are reserved for system usage. -When specified, this information fully replaces metadata previously associated with the API key. - -[discrete] -==== update_settings -Update security index settings. - -Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - -NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. - -If a specific index is not in use on the system and settings are provided for it, the request will be rejected. -This API does not yet support configuring the settings for indices before they are in use. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings[Endpoint documentation] -[source,ts] ----- -client.security.updateSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`security` (Optional, { index })*: Settings for the index used for most security configuration, including native realm users and roles configured with the API. -** *`security-profile` (Optional, { index })*: Settings for the index used to store profile information. -** *`security-tokens` (Optional, { index })*: Settings for the index used to store tokens. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== update_user_profile_data -Update user profile data. - -Update specific data for the user profile that is associated with a unique ID. - -NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. -Individual users and external applications should not call this API directly. -Elastic reserves the right to change or remove this feature in future releases without prior notice. - -To use this API, you must have one of the following privileges: - -* The `manage_user_profile` cluster privilege. -* The `update_profile_data` global privilege for the namespaces that are referenced in the request. - -This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. -New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. - -For both labels and data, content is namespaced by the top-level fields. -The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data[Endpoint documentation] -[source,ts] ----- -client.security.updateUserProfileData({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string)*: A unique identifier for the user profile. -** *`labels` (Optional, Record)*: Searchable data that you want to associate with the user profile. -This field supports a nested data structure. -Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). -** *`data` (Optional, Record)*: Non-searchable data that you want to associate with the user profile. -This field supports a nested data structure. -Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). -The data object is not searchable, but can be retrieved with the get user profile API. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search. -If 'wait_for', it waits for a refresh to make this operation visible to search. -If 'false', nothing is done with refreshes. - -[discrete] -=== shutdown -[discrete] -==== delete_node -Cancel node shutdown preparations. -Remove a node from the shutdown list so it can resume normal operations. -You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. -Shutdown requests are never removed automatically by Elasticsearch. - -NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. -Direct use is not supported. - -If the operator privileges feature is enabled, you must be an operator to use this API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node[Endpoint documentation] -[source,ts] ----- -client.shutdown.deleteNode({ node_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string)*: The node id of node to be removed from the shutdown state -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_node -Get the shutdown status. - -Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. -The API returns status information for each part of the shut down process. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -If the operator privileges feature is enabled, you must be an operator to use this API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node[Endpoint documentation] -[source,ts] ----- -client.shutdown.getNode({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: Which node for which to retrieve the shutdown status -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_node -Prepare a node to be shut down. - -NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. - -If the operator privileges feature is enabled, you must be an operator to use this API. - -The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. -This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. - -You must specify the type of shutdown: `restart`, `remove`, or `replace`. -If a node is already being prepared for shutdown, you can use this API to change the shutdown type. - -IMPORTANT: This API does NOT terminate the Elasticsearch process. -Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node[Endpoint documentation] -[source,ts] ----- -client.shutdown.putNode({ node_id, type, reason }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string)*: The node identifier. -This parameter is not validated against the cluster's active nodes. -This enables you to register a node for shut down while it is offline. -No error is thrown if you specify an invalid node ID. -** *`type` (Enum("restart" | "remove" | "replace"))*: Valid values are restart, remove, or replace. -Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. -Because the node is expected to rejoin the cluster, data is not migrated off of the node. -Use remove when you need to permanently remove a node from the cluster. -The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. -Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. -During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. -** *`reason` (string)*: A human-readable reason that the node is being shut down. -This field provides information for other cluster operators; it does not affect the shut down process. -** *`allocation_delay` (Optional, string)*: Only valid if type is restart. -Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. -This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. -If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. -** *`target_node_name` (Optional, string)*: Only valid if type is replace. -Specifies the name of the node that is replacing the node being shut down. -Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. -During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== simulate -[discrete] -==== ingest -Simulate data ingestion. -Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. - -This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. - -The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. -If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. -No data is indexed into Elasticsearch. -Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. -The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. - -This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. -The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. - -By default, the pipeline definitions that are currently in the system are used. -However, you can supply substitute pipeline definitions in the body of the request. -These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest[Endpoint documentation] -[source,ts] ----- -client.simulate.ingest({ docs }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. -** *`index` (Optional, string)*: The index to simulate ingesting into. -This value can be overridden by specifying an index on each document. -If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. -** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. -** *`index_template_subtitutions` (Optional, Record)*: A map of index template names to substitute index template definition objects. -** *`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })* -** *`pipeline_substitutions` (Optional, Record)*: Pipelines to test. -If you don’t specify the `pipeline` request path parameter, this parameter is required. -If you specify both this and the request path parameter, the API only uses the request path parameter. -** *`pipeline` (Optional, string)*: The pipeline to use as the default pipeline. -This value can be used to override the default pipeline of the index. - -[discrete] -=== slm -[discrete] -==== delete_lifecycle -Delete a policy. -Delete a snapshot lifecycle policy definition. -This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle[Endpoint documentation] -[source,ts] ----- -client.slm.deleteLifecycle({ policy_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (string)*: The id of the snapshot lifecycle policy to remove -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== execute_lifecycle -Run a policy. -Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. -The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle[Endpoint documentation] -[source,ts] ----- -client.slm.executeLifecycle({ policy_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (string)*: The id of the snapshot lifecycle policy to be executed -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== execute_retention -Run a retention policy. -Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. -The retention policy is normally applied according to its schedule. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention[Endpoint documentation] -[source,ts] ----- -client.slm.executeRetention({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_lifecycle -Get policy information. -Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle[Endpoint documentation] -[source,ts] ----- -client.slm.getLifecycle({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (Optional, string | string[])*: List of snapshot lifecycle policies to retrieve -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_stats -Get snapshot lifecycle management statistics. -Get global and policy-level statistics about actions taken by snapshot lifecycle management. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats[Endpoint documentation] -[source,ts] ----- -client.slm.getStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_status -Get the snapshot lifecycle management status. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status[Endpoint documentation] -[source,ts] ----- -client.slm.getStatus({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== put_lifecycle -Create or update a policy. -Create or update a snapshot lifecycle policy. -If the policy already exists, this request increments the policy version. -Only the latest version of a policy is stored. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle[Endpoint documentation] -[source,ts] ----- -client.slm.putLifecycle({ policy_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (string)*: The identifier for the snapshot lifecycle policy you want to create or update. -** *`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })*: Configuration for each snapshot created by the policy. -** *`name` (Optional, string)*: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. -** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. -** *`retention` (Optional, { expire_after, max_count, min_count })*: Retention rules used to retain and delete snapshots created by the policy. -** *`schedule` (Optional, string)*: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== start -Start snapshot lifecycle management. -Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. -Manually starting SLM is necessary only if it has been stopped using the stop SLM API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start[Endpoint documentation] -[source,ts] ----- -client.slm.start({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== stop -Stop snapshot lifecycle management. -Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. -This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. -Stopping SLM does not stop any snapshots that are in progress. -You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. - -The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. -Use the get snapshot lifecycle management status API to see if SLM is running. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop[Endpoint documentation] -[source,ts] ----- -client.slm.stop({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -=== snapshot -[discrete] -==== cleanup_repository -Clean up the snapshot repository. -Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository[Endpoint documentation] -[source,ts] ----- -client.snapshot.cleanupRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the snapshot repository to clean up. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1` -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. -If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== clone -Clone a snapshot. -Clone part of all of a snapshot into another snapshot in the same repository. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone[Endpoint documentation] -[source,ts] ----- -client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the snapshot repository that both source and target snapshot belong to. -** *`snapshot` (string)*: The source snapshot name. -** *`target_snapshot` (string)*: The target snapshot name. -** *`indices` (string)*: A list of indices to include in the snapshot. -Multi-target syntax is supported. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period of time to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== create -Create a snapshot. -Take a snapshot of a cluster or of data streams and indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create[Endpoint documentation] -[source,ts] ----- -client.snapshot.create({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the repository for the snapshot. -** *`snapshot` (string)*: The name of the snapshot. -It supportes date math. -It must be unique in the repository. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Determines how wildcard patterns in the `indices` parameter match data streams and indices. -It supports a list of values such as `open,hidden`. -** *`feature_states` (Optional, string[])*: The feature states to include in the snapshot. -Each feature state includes one or more system indices containing related data. -You can view a list of eligible features using the get features API. - -If `include_global_state` is `true`, all current feature states are included by default. -If `include_global_state` is `false`, no feature states are included by default. - -Note that specifying an empty array will result in the default behavior. -To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). -** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. -If `false`, the request returns an error for any data stream or index that is missing or closed. -** *`include_global_state` (Optional, boolean)*: If `true`, the current cluster state is included in the snapshot. -The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. -It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). -** *`indices` (Optional, string | string[])*: A list of data streams and indices to include in the snapshot. -It supports a multi-target syntax. -The default is an empty array (`[]`), which includes all regular data streams and regular indices. -To exclude all data streams and indices, use `-*`. - -You can't use this parameter to include or exclude system indices or system data streams from a snapshot. -Use `feature_states` instead. -** *`metadata` (Optional, Record)*: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. -It can have any contents but it must be less than 1024 bytes. -This information is not automatically generated by Elasticsearch. -** *`partial` (Optional, boolean)*: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. -Only shards that were successfully included in the snapshot will be restored. -All missing shards will be recreated as empty. - -If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. -If `false`, the request returns a response when the snapshot initializes. - -[discrete] -==== create_repository -Create or update a snapshot repository. -IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. -To register a snapshot repository, the cluster's global metadata must be writeable. -Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. - -Several options for this API can be specified using a query parameter or a request body parameter. -If both parameters are specified, only the query parameter is used. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository[Endpoint documentation] -[source,ts] ----- -client.snapshot.createRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the snapshot repository to register or update. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. -If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. -To indicate that the request should never timeout, set it to `-1`. -** *`verify` (Optional, boolean)*: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. -If `false`, this verification is skipped. -You can also perform this verification with the verify snapshot repository API. - -[discrete] -==== delete -Delete snapshots. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete[Endpoint documentation] -[source,ts] ----- -client.snapshot.delete({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the repository to delete a snapshot from. -** *`snapshot` (string)*: A list of snapshot names to delete. -It also accepts wildcards (`*`). -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== delete_repository -Delete snapshot repositories. -When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. -The snapshots themselves are left untouched and in place. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository[Endpoint documentation] -[source,ts] ----- -client.snapshot.deleteRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string | string[])*: The ame of the snapshot repositories to unregister. -Wildcard (`*`) patterns are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. -If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== get -Get snapshot information. - -NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. -It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. -Snapshots concurrently created may be seen during an iteration. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get[Endpoint documentation] -[source,ts] ----- -client.snapshot.get({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: A list of snapshot repository names used to limit the request. -Wildcard (`*`) expressions are supported. -** *`snapshot` (string | string[])*: A list of snapshot names to retrieve -Wildcards (`*`) are supported. - -* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. -* To get information about any snapshots that are currently running, use `_current`. -** *`after` (Optional, string)*: An offset identifier to start pagination from as returned by the next field in the response body. -** *`from_sort_value` (Optional, string)*: The value of the current sort column at which to start retrieval. -It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. -It can be a millisecond time value or a number when sorting by `index-` or shard count. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error for any snapshots that are unavailable. -** *`index_details` (Optional, boolean)*: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. -The default is `false`, meaning that this information is omitted. -** *`index_names` (Optional, boolean)*: If `true`, the response includes the name of each index in each snapshot. -** *`include_repository` (Optional, boolean)*: If `true`, the response includes the repository name in each snapshot. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`order` (Optional, Enum("asc" | "desc"))*: The sort order. -Valid values are `asc` for ascending and `desc` for descending order. -The default behavior is ascending order. -** *`offset` (Optional, number)*: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. -** *`size` (Optional, number)*: The maximum number of snapshots to return. -The default is 0, which means to return all that match the request without limit. -** *`slm_policy_filter` (Optional, string)*: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. - -You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. -For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. -Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. -To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. -** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))*: The sort order for the result. -The default behavior is sorting by snapshot start time stamp. -** *`verbose` (Optional, boolean)*: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. - -NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. - -[discrete] -==== get_repository -Get snapshot repository information. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository[Endpoint documentation] -[source,ts] ----- -client.snapshot.getRepository({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (Optional, string | string[])*: A list of snapshot repository names used to limit the request. -Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. - -To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. -** *`local` (Optional, boolean)*: If `true`, the request gets information from the local node only. -If `false`, the request gets information from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== repository_analyze -Analyze a snapshot repository. -Analyze the performance characteristics and any incorrect behaviour found in a repository. - -The response exposes implementation details of the analysis which may change from version to version. -The response body format is therefore not considered stable and may be different in newer versions. - -There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. -Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. - -The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. -Run your first analysis with the default parameter values to check for simple problems. -If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. -Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. -Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. - -If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. -This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. -If so, this storage system is not suitable for use as a snapshot repository. -You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. - -If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. -You can use this information to determine the performance of your storage system. -If any operation fails or returns an incorrect result, the API returns an error. -If the API returns an error, it may not have removed all the data it wrote to the repository. -The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. -You should verify that this location has been cleaned up correctly. -If there is still leftover data at the specified location, you should manually remove it. - -If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. -Some clients are configured to close their connection if no response is received within a certain timeout. -An analysis takes a long time to complete so you might need to relax any such client-side timeouts. -On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. -The path to the leftover data is recorded in the Elasticsearch logs. -You should verify that this location has been cleaned up correctly. -If there is still leftover data at the specified location, you should manually remove it. - -If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. -The analysis attempts to detect common bugs but it does not offer 100% coverage. -Additionally, it does not test the following: - -* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. -* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. -* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. - -IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. -This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. -You must ensure this load does not affect other users of these systems. -Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. - -NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. - -NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. -A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. -This indicates it behaves incorrectly in ways that the former version did not detect. -You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. - -NOTE: This API may not work correctly in a mixed-version cluster. - -*Implementation details* - -NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. - -The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. -These tasks are distributed over the data and master-eligible nodes in the cluster for execution. - -For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. -The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. -If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. - -For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. -These reads are permitted to fail, but must not return partial data. -If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. - -For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. -In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. -If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. - -The executing node will use a variety of different methods to write the blob. -For instance, where applicable, it will use both single-part and multi-part uploads. -Similarly, the reading nodes will use a variety of different methods to read the data back again. -For instance they may read the entire blob from start to end or may read only a subset of the data. - -For some blob-level tasks, the executing node will cancel the write before it is complete. -In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. - -Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. -This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. -The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. -Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. -Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. -If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. -Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. -Some operations also verify the behavior on small blobs with sizes other than 8 bytes. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze[Endpoint documentation] -[source,ts] ----- -client.snapshot.repositoryAnalyze({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the repository. -** *`blob_count` (Optional, number)*: The total number of blobs to write to the repository during the test. -For realistic experiments, you should set it to at least `2000`. -** *`concurrency` (Optional, number)*: The number of operations to run concurrently during the test. -** *`detailed` (Optional, boolean)*: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. -If false, it returns only a summary of the analysis. -** *`early_read_node_count` (Optional, number)*: The number of nodes on which to perform an early read operation while writing each blob. -Early read operations are only rarely performed. -** *`max_blob_size` (Optional, number | string)*: The maximum size of a blob to be written during the test. -For realistic experiments, you should set it to at least `2gb`. -** *`max_total_data_size` (Optional, number | string)*: An upper limit on the total size of all the blobs written during the test. -For realistic experiments, you should set it to at least `1tb`. -** *`rare_action_probability` (Optional, number)*: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. -** *`rarely_abort_writes` (Optional, boolean)*: Indicates whether to rarely cancel writes before they complete. -** *`read_node_count` (Optional, number)*: The number of nodes on which to read a blob after writing. -** *`register_operation_count` (Optional, number)*: The minimum number of linearizable register operations to perform in total. -For realistic experiments, you should set it to at least `100`. -** *`seed` (Optional, number)*: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. -To repeat the same set of operations in multiple experiments, use the same seed in each experiment. -Note that the operations are performed concurrently so might not always happen in the same order on each run. -** *`timeout` (Optional, string | -1 | 0)*: The period of time to wait for the test to complete. -If no response is received before the timeout expires, the test is cancelled and returns an error. - -[discrete] -==== restore -Restore a snapshot. -Restore a snapshot of a cluster or data streams and indices. - -You can restore a snapshot only to a running cluster with an elected master node. -The snapshot repository must be registered and available to the cluster. -The snapshot and cluster versions must be compatible. - -To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. - -Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: - ----- -GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ----- - -If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. - -If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore[Endpoint documentation] -[source,ts] ----- -client.snapshot.restore({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the repository to restore a snapshot from. -** *`snapshot` (string)*: The name of the snapshot to restore. -** *`feature_states` (Optional, string[])*: The feature states to restore. -If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. -If `include_global_state` is `false`, the request restores no feature states by default. -Note that specifying an empty array will result in the default behavior. -To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). -** *`ignore_index_settings` (Optional, string[])*: The index settings to not restore from the snapshot. -You can't use this option to ignore `index.number_of_shards`. - -For data streams, this option applies only to restored backing indices. -New backing indices are configured using the data stream's matching index template. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. -If `false`, the request returns an error for any missing index or data stream. -** *`include_aliases` (Optional, boolean)*: If `true`, the request restores aliases for any restored data streams and indices. -If `false`, the request doesn’t restore aliases. -** *`include_global_state` (Optional, boolean)*: If `true`, restore the cluster state. The cluster state includes: - -* Persistent cluster settings -* Index templates -* Legacy index templates -* Ingest pipelines -* Index lifecycle management (ILM) policies -* Stored scripts -* For snapshots taken after 7.12.0, feature states - -If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. -It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. - -Use the `feature_states` parameter to configure how feature states are restored. - -If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. -** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Index settings to add or change in restored indices, including backing indices. -You can't use this option to change `index.number_of_shards`. - -For data streams, this option applies only to restored backing indices. -New backing indices are configured using the data stream's matching index template. -** *`indices` (Optional, string | string[])*: A list of indices and data streams to restore. -It supports a multi-target syntax. -The default behavior is all regular indices and regular data streams in the snapshot. - -You can't use this parameter to restore system indices or system data streams. -Use `feature_states` instead. -** *`partial` (Optional, boolean)*: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. - -If true, it allows restoring a partial snapshot of indices with unavailable shards. -Only shards that were successfully included in the snapshot will be restored. -All missing shards will be recreated as empty. -** *`rename_pattern` (Optional, string)*: A rename pattern to apply to restored data streams and indices. -Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. - -The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. -** *`rename_replacement` (Optional, string)*: The rename replacement string that is used with the `rename_pattern`. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the restore operation completes. -The operation is complete when it finishes all attempts to recover primary shards for restored indices. -This applies even if one or more of the recovery attempts fail. - -If `false`, the request returns a response when the restore operation initializes. - -[discrete] -==== status -Get the snapshot status. -Get a detailed description of the current state for each shard participating in the snapshot. - -Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. -If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. - -If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. -This usage is preferred. -If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. - -WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. -The API requires a read from the repository for each shard in each snapshot. -For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). - -Depending on the latency of your storage, such requests can take an extremely long time to return results. -These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status[Endpoint documentation] -[source,ts] ----- -client.snapshot.status({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (Optional, string)*: The snapshot repository name used to limit the request. -It supports wildcards (`*`) if `` isn't specified. -** *`snapshot` (Optional, string | string[])*: A list of snapshots to retrieve status for. -The default is currently running snapshots. -Wildcards (`*`) are not supported. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error for any snapshots that are unavailable. -If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== verify_repository -Verify a snapshot repository. -Check for common misconfigurations in a snapshot repository. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository[Endpoint documentation] -[source,ts] ----- -client.snapshot.verifyRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the snapshot repository to verify. -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. -If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -=== sql -[discrete] -==== clear_cursor -Clear an SQL search cursor. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor[Endpoint documentation] -[source,ts] ----- -client.sql.clearCursor({ cursor }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`cursor` (string)*: Cursor to clear. - -[discrete] -==== delete_async -Delete an async SQL search. -Delete an async SQL search or a stored synchronous SQL search. -If the search is still running, the API cancels it. - -If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: - -* Users with the `cancel_task` cluster privilege. -* The user who first submitted the search. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async[Endpoint documentation] -[source,ts] ----- -client.sql.deleteAsync({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The identifier for the search. - -[discrete] -==== get_async -Get async SQL search results. -Get the current status and available results for an async SQL search or stored synchronous SQL search. - -If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async[Endpoint documentation] -[source,ts] ----- -client.sql.getAsync({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The identifier for the search. -** *`delimiter` (Optional, string)*: The separator for CSV results. -The API supports this parameter only for CSV responses. -** *`format` (Optional, string)*: The format for the response. -You must specify a format using this parameter or the `Accept` HTTP header. -If you specify both, the API uses this parameter. -** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for the search and its results. -It defaults to the `keep_alive` period for the original SQL search. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. -It defaults to no timeout, meaning the request waits for complete search results. - -[discrete] -==== get_async_status -Get the async SQL search status. -Get the current status of an async SQL search or a stored synchronous SQL search. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status[Endpoint documentation] -[source,ts] ----- -client.sql.getAsyncStatus({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The identifier for the search. - -[discrete] -==== query -Get SQL search results. -Run an SQL request. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query[Endpoint documentation] -[source,ts] ----- -client.sql.query({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`allow_partial_search_results` (Optional, boolean)*: If `true`, the response has partial results when there are shard request timeouts or shard failures. -If `false`, the API returns an error with no partial results. -** *`catalog` (Optional, string)*: The default catalog (cluster) for queries. -If unspecified, the queries execute on the data in the local cluster only. -** *`columnar` (Optional, boolean)*: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. -The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. -** *`cursor` (Optional, string)*: The cursor used to retrieve a set of paginated results. -If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. -It ignores other request body parameters. -** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`field_multi_value_leniency` (Optional, boolean)*: If `false`, the API returns an exception when encountering multiple values for a field. -If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. -** *`index_using_frozen` (Optional, boolean)*: If `true`, the search can run on frozen indices. -** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for an async or saved synchronous search. -** *`keep_on_completion` (Optional, boolean)*: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. -If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. -** *`page_timeout` (Optional, string | -1 | 0)*: The minimum retention period for the scroll cursor. -After this time period, a pagination request might fail because the scroll cursor is no longer available. -Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. -** *`params` (Optional, Record)*: The values for parameters in the query. -** *`query` (Optional, string)*: The SQL query to run. -** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. -** *`runtime_mappings` (Optional, Record)*: One or more runtime fields for the search request. -These fields take precedence over mapped fields with the same name. -** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. -It defaults to no timeout, meaning the request waits for complete search results. -If the search doesn't finish within this period, the search becomes async. - -To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. -** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: The format for the response. -You can also specify a format using the `Accept` HTTP header. -If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. - -[discrete] -==== translate -Translate SQL into Elasticsearch queries. -Translate an SQL search into a search API request containing Query DSL. -It accepts the same request body parameters as the SQL search API, excluding `cursor`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate[Endpoint documentation] -[source,ts] ----- -client.sql.translate({ query }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (string)*: The SQL query to run. -** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. -** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. - -[discrete] -=== ssl -[discrete] -==== certificates -Get SSL certificates. - -Get information about the X.509 certificates that are used to encrypt communications in the cluster. -The API returns a list that includes certificates from all TLS contexts including: - -- Settings for transport and HTTP interfaces -- TLS settings that are used within authentication realms -- TLS settings for remote monitoring exporters - -The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. -It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. - -The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. - -NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. - -If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates[Endpoint documentation] -[source,ts] ----- -client.ssl.certificates() ----- - - -[discrete] -=== synonyms -[discrete] -==== delete_synonym -Delete a synonym set. - -You can only delete a synonyms set that is not in use by any index analyzer. - -Synonyms sets can be used in synonym graph token filters and synonym token filters. -These synonym filters can be used as part of search analyzers. - -Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). -Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. - -If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. -To prevent that, synonyms sets that are used in analyzers can't be deleted. -A delete request in this case will return a 400 response code. - -To remove a synonyms set, you must first remove all indices that contain analyzers using it. -You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. -Once finished, you can delete the index. -When the synonyms set is not used in analyzers, you will be able to delete it. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym[Endpoint documentation] -[source,ts] ----- -client.synonyms.deleteSynonym({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The synonyms set identifier to delete. - -[discrete] -==== delete_synonym_rule -Delete a synonym rule. -Delete a synonym rule from a synonym set. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule[Endpoint documentation] -[source,ts] ----- -client.synonyms.deleteSynonymRule({ set_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`set_id` (string)*: The ID of the synonym set to update. -** *`rule_id` (string)*: The ID of the synonym rule to delete. - -[discrete] -==== get_synonym -Get a synonym set. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym[Endpoint documentation] -[source,ts] ----- -client.synonyms.getSynonym({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The synonyms set identifier to retrieve. -** *`from` (Optional, number)*: The starting offset for query rules to retrieve. -** *`size` (Optional, number)*: The max number of query rules to retrieve. - -[discrete] -==== get_synonym_rule -Get a synonym rule. -Get a synonym rule from a synonym set. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule[Endpoint documentation] -[source,ts] ----- -client.synonyms.getSynonymRule({ set_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`set_id` (string)*: The ID of the synonym set to retrieve the synonym rule from. -** *`rule_id` (string)*: The ID of the synonym rule to retrieve. - -[discrete] -==== get_synonyms_sets -Get all synonym sets. -Get a summary of all defined synonym sets. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym[Endpoint documentation] -[source,ts] ----- -client.synonyms.getSynonymsSets({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: The starting offset for synonyms sets to retrieve. -** *`size` (Optional, number)*: The maximum number of synonyms sets to retrieve. - -[discrete] -==== put_synonym -Create or update a synonym set. -Synonyms sets are limited to a maximum of 10,000 synonym rules per set. -If you need to manage more synonym rules, you can create multiple synonym sets. - -When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. -This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym[Endpoint documentation] -[source,ts] ----- -client.synonyms.putSynonym({ id, synonyms_set }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The ID of the synonyms set to be created or updated. -** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym rules definitions for the synonyms set. - -[discrete] -==== put_synonym_rule -Create or update a synonym rule. -Create or update a synonym rule in a synonym set. - -If any of the synonym rules included is invalid, the API returns an error. - -When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule[Endpoint documentation] -[source,ts] ----- -client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`set_id` (string)*: The ID of the synonym set. -** *`rule_id` (string)*: The ID of the synonym rule to be updated or created. -** *`synonyms` (string)*: The synonym rule information definition, which must be in Solr format. - -[discrete] -=== tasks -[discrete] -==== cancel -Cancel a task. - -WARNING: The task management API is new and should still be considered a beta feature. -The API may change in ways that are not backwards compatible. - -A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. -It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. -The get task information API will continue to list these cancelled tasks until they complete. -The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. - -To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. -You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] -[source,ts] ----- -client.tasks.cancel({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (Optional, string | number)*: The task identifier. -** *`actions` (Optional, string | string[])*: A list or wildcard expression of actions that is used to limit the request. -** *`nodes` (Optional, string[])*: A list of node IDs or names that is used to limit the request. -** *`parent_task_id` (Optional, string)*: A parent task ID that is used to limit the tasks. -** *`wait_for_completion` (Optional, boolean)*: If true, the request blocks until all found tasks are complete. - -[discrete] -==== get -Get task information. -Get information about a task currently running in the cluster. - -WARNING: The task management API is new and should still be considered a beta feature. -The API may change in ways that are not backwards compatible. - -If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] -[source,ts] ----- -client.tasks.get({ task_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string)*: The task identifier. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. - -[discrete] -==== list -Get all tasks. -Get information about the tasks currently running on one or more nodes in the cluster. - -WARNING: The task management API is new and should still be considered a beta feature. -The API may change in ways that are not backwards compatible. - -**Identifying running tasks** - -The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. -This enables you to track certain calls or associate certain tasks with the client that started them. -For example: - ----- -curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" ----- - -The API returns the following result: - ----- -HTTP/1.1 200 OK -X-Opaque-Id: 123456 -content-type: application/json; charset=UTF-8 -content-length: 831 - -{ - "tasks" : { - "u5lcZHqcQhu-rUoFaqDphA:45" : { - "node" : "u5lcZHqcQhu-rUoFaqDphA", - "id" : 45, - "type" : "transport", - "action" : "cluster:monitor/tasks/lists", - "start_time_in_millis" : 1513823752749, - "running_time_in_nanos" : 293139, - "cancellable" : false, - "headers" : { - "X-Opaque-Id" : "123456" - }, - "children" : [ - { - "node" : "u5lcZHqcQhu-rUoFaqDphA", - "id" : 46, - "type" : "direct", - "action" : "cluster:monitor/tasks/lists[n]", - "start_time_in_millis" : 1513823752750, - "running_time_in_nanos" : 92133, - "cancellable" : false, - "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", - "headers" : { - "X-Opaque-Id" : "123456" - } - } - ] - } - } - } ----- -In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. -The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. -The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] -[source,ts] ----- -client.tasks.list({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` (Optional, string | string[])*: A list or wildcard expression of actions used to limit the request. -For example, you can use `cluser:*` to retrieve all cluster-related tasks. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about the running tasks. -This information is useful to distinguish tasks from each other but is more costly to run. -** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: A key that is used to group tasks in the response. -The task lists can be grouped either by nodes or by parent tasks. -** *`nodes` (Optional, string | string[])*: A list of node IDs or names that is used to limit the returned information. -** *`parent_task_id` (Optional, string)*: A parent task identifier that is used to limit returned information. -To return all tasks, omit this parameter or use a value of `-1`. -If the parent task is not found, the API does not return a 404 response code. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for each node to respond. -If a node does not respond before its timeout expires, the response does not include its information. -However, timed out nodes are included in the `node_failures` property. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. - -[discrete] -=== text_structure -[discrete] -==== find_field_structure -Find the structure of a text field. -Find the structure of a text field in an Elasticsearch index. - -This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. -For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. - -The response from the API contains: - -* Sample messages. -* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. -* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. -* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. - -All this information can be calculated by the structure finder with no guidance. -However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - -If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. -It helps determine why the returned structure was chosen. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure[Endpoint documentation] -[source,ts] ----- -client.textStructure.findFieldStructure({ field, index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`field` (string)*: The field that should be analyzed. -** *`index` (string)*: The name of the index that contains the analyzed field. -** *`column_names` (Optional, string)*: If `format` is set to `delimited`, you can specify the column names in a list. -If this parameter is not specified, the structure finder uses the column names from the header row of the text. -If the text does not have a header row, columns are named "column1", "column2", "column3", for example. -** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. -Only a single character is supported; the delimiter cannot have multiple characters. -By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). -In this default scenario, all rows must have the same number of fields for the delimited format to be detected. -If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. -** *`documents_to_sample` (Optional, number)*: The number of documents to include in the structural analysis. -The minimum value is 2. -** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. -Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. -This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. -If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. -The intention in that situation is that a user who knows the meanings will rename the fields before using them. -** *`explain` (Optional, boolean)*: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. -** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. -By default, the API chooses the format. -In this default scenario, all rows must have the same number of fields for a delimited format to be detected. -If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. -** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. -The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. -If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". -If `grok_pattern` is not specified, the structure finder creates a Grok pattern. -** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. -Only a single character is supported. -If this parameter is not specified, the default value is a double quote (`"`). -If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. -If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. -Otherwise, the default value is `false`. -** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. -If the analysis is still running when the timeout expires, it will be stopped. -** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. -In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. - -If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. -Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. - -For structured text, if you specify this parameter, the field must exist within the text. - -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. -For structured text, it is not compulsory to have a timestamp in the text. -** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. -Only a subset of Java time format letter groups are supported: - -* `a` -* `d` -* `dd` -* `EEE` -* `EEEE` -* `H` -* `HH` -* `h` -* `M` -* `MM` -* `MMM` -* `MMMM` -* `mm` -* `ss` -* `XX` -* `XXX` -* `yy` -* `yyyy` -* `zzz` - -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). -Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. -For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. - -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. -Another is when the timestamp format is one that the structure finder does not consider by default. - -If this parameter is not specified, the structure finder chooses the best format from a built-in set. - -If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. -When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. - -[discrete] -==== find_message_structure -Find the structure of text messages. -Find the structure of a list of text messages. -The messages must contain data that is suitable to be ingested into Elasticsearch. - -This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. -Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. - -The response from the API contains: - -* Sample messages. -* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. -* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. -Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. - -All this information can be calculated by the structure finder with no guidance. -However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - -If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. -It helps determine why the returned structure was chosen. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure[Endpoint documentation] -[source,ts] ----- -client.textStructure.findMessageStructure({ messages }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`messages` (string[])*: The list of messages you want to analyze. -** *`column_names` (Optional, string)*: If the format is `delimited`, you can specify the column names in a list. -If this parameter is not specified, the structure finder uses the column names from the header row of the text. -If the text does not have a header role, columns are named "column1", "column2", "column3", for example. -** *`delimiter` (Optional, string)*: If you the format is `delimited`, you can specify the character used to delimit the values in each row. -Only a single character is supported; the delimiter cannot have multiple characters. -By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). -In this default scenario, all rows must have the same number of fields for the delimited format to be detected. -If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. -** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. -Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. -This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. -If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. -** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. -** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. -By default, the API chooses the format. -In this default scenario, all rows must have the same number of fields for a delimited format to be detected. -If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. -** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. -The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. -If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". -If `grok_pattern` is not specified, the structure finder creates a Grok pattern. -** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. -Only a single character is supported. -If this parameter is not specified, the default value is a double quote (`"`). -If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. -If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. -Otherwise, the default value is `false`. -** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. -If the analysis is still running when the timeout expires, it will be stopped. -** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. -In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. - -If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. -Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. - -For structured text, if you specify this parameter, the field must exist within the text. - -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. -For structured text, it is not compulsory to have a timestamp in the text. -** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. -Only a subset of Java time format letter groups are supported: - -* `a` -* `d` -* `dd` -* `EEE` -* `EEEE` -* `H` -* `HH` -* `h` -* `M` -* `MM` -* `MMM` -* `MMMM` -* `mm` -* `ss` -* `XX` -* `XXX` -* `yy` -* `yyyy` -* `zzz` - -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). -Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. -For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. - -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. -Another is when the timestamp format is one that the structure finder does not consider by default. - -If this parameter is not specified, the structure finder chooses the best format from a built-in set. - -If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. -When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. - -[discrete] -==== find_structure -Find the structure of a text file. -The text file must contain data that is suitable to be ingested into Elasticsearch. - -This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. -Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. -It must, however, be text; binary text formats are not currently supported. -The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. - -The response from the API contains: - -* A couple of messages from the beginning of the text. -* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. -* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. -* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. - -All this information can be calculated by the structure finder with no guidance. -However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure[Endpoint documentation] -[source,ts] ----- -client.textStructure.findStructure({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`text_files` (Optional, TJsonDocument[])* -** *`charset` (Optional, string)*: The text's character set. -It must be a character set that is supported by the JVM that Elasticsearch uses. -For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. -If this parameter is not specified, the structure finder chooses an appropriate character set. -** *`column_names` (Optional, string)*: If you have set format to `delimited`, you can specify the column names in a list. -If this parameter is not specified, the structure finder uses the column names from the header row of the text. -If the text does not have a header role, columns are named "column1", "column2", "column3", for example. -** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. -Only a single character is supported; the delimiter cannot have multiple characters. -By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). -In this default scenario, all rows must have the same number of fields for the delimited format to be detected. -If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. -Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. -Valid values are `disabled` and `v1`. -This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. -If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. -** *`explain` (Optional, boolean)*: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. -If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. -** *`format` (Optional, string)*: The high level structure of the text. -Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. -By default, the API chooses the format. -In this default scenario, all rows must have the same number of fields for a delimited format to be detected. -If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. -** *`grok_pattern` (Optional, string)*: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. -The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. -If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". -If `grok_pattern` is not specified, the structure finder creates a Grok pattern. -** *`has_header_row` (Optional, boolean)*: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. -If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. -** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. -If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. -** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. -The minimum is 2. -If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. - -NOTE: The number of lines and the variation of the lines affects the speed of the analysis. -For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. -If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. -** *`quote` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. -Only a single character is supported. -If this parameter is not specified, the default value is a double quote (`"`). -If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -** *`should_trim_fields` (Optional, boolean)*: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. -If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. -Otherwise, the default value is `false`. -** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. -If the analysis is still running when the timeout expires then it will be stopped. -** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. -In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. - -If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. -Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. - -For structured text, if you specify this parameter, the field must exist within the text. - -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. -For structured text, it is not compulsory to have a timestamp in the text. -** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. - -Only a subset of Java time format letter groups are supported: - -* `a` -* `d` -* `dd` -* `EEE` -* `EEEE` -* `H` -* `HH` -* `h` -* `M` -* `MM` -* `MMM` -* `MMMM` -* `mm` -* `ss` -* `XX` -* `XXX` -* `yy` -* `yyyy` -* `zzz` - -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. -Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. -For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. - -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. -Another is when the timestamp format is one that the structure finder does not consider by default. - -If this parameter is not specified, the structure finder chooses the best format from a built-in set. - -If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. -When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. - -[discrete] -==== test_grok_pattern -Test a Grok pattern. -Test a Grok pattern on one or more lines of text. -The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern[Endpoint documentation] -[source,ts] ----- -client.textStructure.testGrokPattern({ grok_pattern, text }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`grok_pattern` (string)*: The Grok pattern to run on the text. -** *`text` (string[])*: The lines of text to run the Grok pattern on. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. -Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. -Valid values are `disabled` and `v1`. - -[discrete] -=== transform -[discrete] -==== delete_transform -Delete a transform. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform[Endpoint documentation] -[source,ts] ----- -client.transform.deleteTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`force` (Optional, boolean)*: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is -deleted regardless of its current state. -** *`delete_dest_index` (Optional, boolean)*: If this value is true, the destination index is deleted together with the transform. If false, the destination -index will not be deleted -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_node_stats -Retrieves transform usage information for transform nodes. -[source,ts] ----- -client.transform.getNodeStats() ----- - - -[discrete] -==== get_transform -Get transforms. -Get configuration information for transforms. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform[Endpoint documentation] -[source,ts] ----- -client.transform.getTransform({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (Optional, string | string[])*: Identifier for the transform. It can be a transform identifier or a -wildcard expression. You can get information for all transforms by using -`_all`, by specifying `*` as the ``, or by omitting the -``. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no transforms that match. -. Contains the _all string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -If this parameter is false, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. -** *`exclude_generated` (Optional, boolean)*: Excludes fields that were automatically added when creating the -transform. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_transform_stats -Get transform stats. - -Get usage information for transforms. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats[Endpoint documentation] -[source,ts] ----- -client.transform.getTransformStats({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string | string[])*: Identifier for the transform. It can be a transform identifier or a -wildcard expression. You can get information for all transforms by using -`_all`, by specifying `*` as the ``, or by omitting the -``. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -. Contains wildcard expressions and there are no transforms that match. -. Contains the _all string or no identifiers and there are no matches. -. Contains wildcard expressions and there are only partial matches. - -If this parameter is false, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the stats - -[discrete] -==== preview_transform -Preview a transform. -Generates a preview of the results that you will get when you create a transform with the same configuration. - -It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also -generates a list of mappings and settings for the destination index. These values are determined based on the field -types of the source index and the transform aggregations. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform[Endpoint documentation] -[source,ts] ----- -client.transform.previewTransform({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (Optional, string)*: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform -configuration details in the request body. -** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. -** *`description` (Optional, string)*: Free text description of the transform. -** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the -transform is running continuously. Also determines the retry interval in -the event of transient failures while the transform is searching or -indexing. The minimum value is 1s and the maximum is 1h. -** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. -These objects define the group by fields and the aggregation to reduce -the data. -** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. -** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. -** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. -** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined -criteria is deleted from the destination index. -** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for -each unique key. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the -timeout expires, the request fails and returns an error. - -[discrete] -==== put_transform -Create a transform. -Creates a transform. - -A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as -a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a -unique row per entity. - -You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If -you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in -the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values -in the latest object. - -You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and -`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the -transform remembers which roles the user that created it had at the time of creation and uses those same roles. If -those roles do not have the required privileges on the source and destination indices, the transform fails when it -attempts unauthorized operations. - -NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any -`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do -not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not -give users any privileges on `.data-frame-internal*` indices. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform[Endpoint documentation] -[source,ts] ----- -client.transform.putTransform({ transform_id, dest, source }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), -hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. -** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination for the transform. -** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. -** *`description` (Optional, string)*: Free text description of the transform. -** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the transform is running continuously. Also -determines the retry interval in the event of transient failures while the transform is searching or indexing. -The minimum value is `1s` and the maximum is `1h`. -** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for each unique key. -** *`_meta` (Optional, Record)*: Defines optional transform metadata. -** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields -and the aggregation to reduce the data. -** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the -destination index. -** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. -** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. -** *`defer_validation` (Optional, boolean)*: When the transform is created, a series of validations occur to ensure its success. For example, there is a -check for the existence of the source indices and a check that the destination index is not part of the source -index pattern. You can use this parameter to skip the checks, for example when the source index does not exist -until after the transform is created. The validations are always run when you start the transform, however, with -the exception of privilege checks. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== reset_transform -Reset a transform. - -Before you can reset it, you must stop it; alternatively, use the `force` query parameter. -If the destination index was created by the transform, it is deleted. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform[Endpoint documentation] -[source,ts] ----- -client.transform.resetTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), -hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. -** *`force` (Optional, boolean)*: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform -must be stopped before it can be reset. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== schedule_now_transform -Schedule a transform to start now. - -Instantly run a transform to process data. -If you run this API, the transform will process the new data instantly, -without waiting for the configured frequency interval. After the API is called, -the transform will be processed again at `now + frequency` unless the API -is called again in the meantime. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform[Endpoint documentation] -[source,ts] ----- -client.transform.scheduleNowTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the scheduling to take place - -[discrete] -==== start_transform -Start a transform. - -When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is -set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping -definitions for the destination index from the source indices and the transform aggregations. If fields in the -destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), -the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce -mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you -start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings -in a pivot transform. - -When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you -created the transform, they occur when you start the transform—​with the exception of privilege checks. When -Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the -time of creation and uses those same roles. If those roles do not have the required privileges on the source and -destination indices, the transform fails when it attempts unauthorized operations. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform[Endpoint documentation] -[source,ts] ----- -client.transform.startTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`from` (Optional, string)*: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. - -[discrete] -==== stop_transform -Stop transforms. -Stops one or more transforms. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform[Endpoint documentation] -[source,ts] ----- -client.transform.stopTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. -To stop all transforms, use `_all` or `*` as the identifier. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; -contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there -are only partial matches. - -If it is true, the API returns a successful acknowledgement message when there are no matches. When there are -only partial matches, the API stops the appropriate transforms. - -If it is false, the request returns a 404 status code when there are no matches or only partial matches. -** *`force` (Optional, boolean)*: If it is true, the API forcefully stops the transforms. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the -timeout expires, the request returns a timeout exception. However, the request continues processing and -eventually moves the transform to a STOPPED state. -** *`wait_for_checkpoint` (Optional, boolean)*: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, -the transform stops as soon as possible. -** *`wait_for_completion` (Optional, boolean)*: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns -immediately and the indexer is stopped asynchronously in the background. - -[discrete] -==== update_transform -Update a transform. -Updates certain properties of a transform. - -All updated properties except `description` do not take effect until after the transform starts the next checkpoint, -thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` -privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When -Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the -time of update and runs with those privileges. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform[Endpoint documentation] -[source,ts] ----- -client.transform.updateTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. -** *`description` (Optional, string)*: Free text description of the transform. -** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the -transform is running continuously. Also determines the retry interval in -the event of transient failures while the transform is searching or -indexing. The minimum value is 1s and the maximum is 1h. -** *`_meta` (Optional, Record)*: Defines optional transform metadata. -** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. -** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. -** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. -** *`retention_policy` (Optional, { time } | null)*: Defines a retention policy for the transform. Data that meets the defined -criteria is deleted from the destination index. -** *`defer_validation` (Optional, boolean)*: When true, deferrable validations are not run. This behavior may be -desired if the source index does not exist until after the transform is -created. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the -timeout expires, the request fails and returns an error. - -[discrete] -==== upgrade_transforms -Upgrade all transforms. - -Transforms are compatible across minor versions and between supported major versions. -However, over time, the format of transform configuration information may change. -This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. -It also cleans up the internal data structures that store the transform state and checkpoints. -The upgrade does not affect the source and destination indices. -The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. - -If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. -Resolve the issue then re-run the process again. -A summary is returned when the upgrade is finished. - -To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. -You may want to perform a recent cluster backup prior to the upgrade. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms[Endpoint documentation] -[source,ts] ----- -client.transform.upgradeTransforms({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`dry_run` (Optional, boolean)*: When true, the request checks for updates but does not run them. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and -returns an error. - -[discrete] -=== watcher -[discrete] -==== ack_watch -Acknowledge a watch. -Acknowledging a watch enables you to manually throttle the execution of the watch's actions. - -The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. - -IMPORTANT: If the specified watch is currently being executed, this API will return an error -The reason for this behavior is to prevent overwriting the watch status from a watch execution. - -Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. -This happens when the condition of the watch is not met (the condition evaluates to false). - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch[Endpoint documentation] -[source,ts] ----- -client.watcher.ackWatch({ watch_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`watch_id` (string)*: The watch identifier. -** *`action_id` (Optional, string | string[])*: A list of the action identifiers to acknowledge. -If you omit this parameter, all of the actions of the watch are acknowledged. - -[discrete] -==== activate_watch -Activate a watch. -A watch can be either active or inactive. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch[Endpoint documentation] -[source,ts] ----- -client.watcher.activateWatch({ watch_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`watch_id` (string)*: The watch identifier. - -[discrete] -==== deactivate_watch -Deactivate a watch. -A watch can be either active or inactive. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch[Endpoint documentation] -[source,ts] ----- -client.watcher.deactivateWatch({ watch_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`watch_id` (string)*: The watch identifier. - -[discrete] -==== delete_watch -Delete a watch. -When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. - -Deleting a watch does not delete any watch execution records related to this watch from the watch history. - -IMPORTANT: Deleting a watch must be done by using only this API. -Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API -When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch[Endpoint documentation] -[source,ts] ----- -client.watcher.deleteWatch({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The watch identifier. - -[discrete] -==== execute_watch -Run a watch. -This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. - -For testing and debugging purposes, you also have fine-grained control on how the watch runs. -You can run the watch without running all of its actions or alternatively by simulating them. -You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. - -You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. -This serves as great tool for testing and debugging your watches prior to adding them to Watcher. - -When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. -If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. - -When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch[Endpoint documentation] -[source,ts] ----- -client.watcher.executeWatch({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: The watch identifier. -** *`action_modes` (Optional, Record)*: Determines how to handle the watch actions as part of the watch execution. -** *`alternative_input` (Optional, Record)*: When present, the watch uses this object as a payload instead of executing its own input. -** *`ignore_condition` (Optional, boolean)*: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. -** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. -In addition, the status of the watch is updated, possibly throttling subsequent runs. -This can also be specified as an HTTP parameter. -** *`simulated_actions` (Optional, { actions, all, use_all })* -** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution. -** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. -This watch is not persisted to the index and `record_execution` cannot be set. -** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. - -[discrete] -==== get_settings -Get Watcher index settings. -Get settings for the Watcher internal index (`.watches`). -Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings[Endpoint documentation] -[source,ts] ----- -client.watcher.getSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_watch -Get a watch. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch[Endpoint documentation] -[source,ts] ----- -client.watcher.getWatch({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The watch identifier. - -[discrete] -==== put_watch -Create or update a watch. -When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. -Typically for the `schedule` trigger, the scheduler is the trigger engine. - -IMPORTANT: You must use Kibana or this API to create a watch. -Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. -If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. - -When you add a watch you can also define its initial active state by setting the *active* parameter. - -When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. -If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch[Endpoint documentation] -[source,ts] ----- -client.watcher.putWatch({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The identifier for the watch. -** *`actions` (Optional, Record)*: The list of actions that will be run if the condition matches. -** *`condition` (Optional, { always, array_compare, compare, never, script })*: The condition that defines if the actions should be run. -** *`input` (Optional, { chain, http, search, simple })*: The input that defines the input that loads the data for the watch. -** *`metadata` (Optional, Record)*: Metadata JSON that will be copied into the history entries. -** *`throttle_period` (Optional, string | -1 | 0)*: The minimum time between actions being run. -The default is 5 seconds. -This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. -If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. -** *`throttle_period_in_millis` (Optional, Unit)*: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. -** *`transform` (Optional, { chain, script, search })*: The transform that processes the watch payload to prepare it for the watch actions. -** *`trigger` (Optional, { schedule })*: The trigger that defines when the watch should run. -** *`active` (Optional, boolean)*: The initial state of the watch. -The default value is `true`, which means the watch is active by default. -** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term -** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number -** *`version` (Optional, number)*: Explicit version number for concurrency control - -[discrete] -==== query_watches -Query watches. -Get all registered watches in a paginated manner and optionally filter watches by a query. - -Note that only the `_id` and `metadata.*` fields are queryable or sortable. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches[Endpoint documentation] -[source,ts] ----- -client.watcher.queryWatches({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: The offset from the first result to fetch. -It must be non-negative. -** *`size` (Optional, number)*: The number of hits to return. -It must be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query that filters the watches to be returned. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: One or more fields used to sort the search results. -** *`search_after` (Optional, number | number | string | boolean | null[])*: Retrieve the next page of hits using a set of sort values from the previous page. - -[discrete] -==== start -Start the watch service. -Start the Watcher service if it is not already running. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start[Endpoint documentation] -[source,ts] ----- -client.watcher.start({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. - -[discrete] -==== stats -Get Watcher statistics. -This API always returns basic metrics. -You retrieve more metrics by using the metric parameter. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats[Endpoint documentation] -[source,ts] ----- -client.watcher.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])*: Defines which additional metrics are included in the response. -** *`emit_stacktraces` (Optional, boolean)*: Defines whether stack traces are generated for each watch that is running. - -[discrete] -==== stop -Stop the watch service. -Stop the Watcher service if it is running. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop[Endpoint documentation] -[source,ts] ----- -client.watcher.stop({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. -If the master node is not available before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - -[discrete] -==== update_settings -Update Watcher index settings. -Update settings for the Watcher internal index (`.watches`). -Only a subset of settings can be modified. -This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings[Endpoint documentation] -[source,ts] ----- -client.watcher.updateSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index.auto_expand_replicas` (Optional, string)* -** *`index.number_of_replicas` (Optional, number)* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== xpack -[discrete] -==== info -Get information. -The information provided by the API includes: - -* Build information including the build number and timestamp. -* License information about the currently installed license. -* Feature information for the features that are currently enabled and available under the current license. - -https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info[Endpoint documentation] -[source,ts] ----- -client.xpack.info({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. -For example, `build,license,features`. -** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true -** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. -In particular, it adds descriptions and a tag line. - -[discrete] -==== usage -Get usage information. -Get information about the features that are currently enabled and available under the current license. -The API also provides some usage statistics. - -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack[Endpoint documentation] -[source,ts] ----- -client.xpack.usage({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -To indicate that the request should never timeout, set it to `-1`. - diff --git a/docs/advanced-config.asciidoc b/docs/reference/advanced-config.md similarity index 67% rename from docs/advanced-config.asciidoc rename to docs/reference/advanced-config.md index b3c9388a4..8145af678 100644 --- a/docs/advanced-config.asciidoc +++ b/docs/reference/advanced-config.md @@ -1,25 +1,27 @@ -[[advanced-config]] -=== Advanced configuration +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/advanced-config.html +--- -If you need to customize the client behavior heavily, you are in the right -place! The client enables you to customize the following internals: +# Advanced configuration [advanced-config] + +If you need to customize the client behavior heavily, you are in the right place! The client enables you to customize the following internals: * `ConnectionPool` class * `Connection` class * `Serializer` class -NOTE: For information about the `Transport` class, refer to <>. +::::{note} +For information about the `Transport` class, refer to [Transport](/reference/transport.md). +:::: + -[discrete] -==== `ConnectionPool` +## `ConnectionPool` [_connectionpool] -This class is responsible for keeping in memory all the {es} Connection that you -are using. There is a single Connection for every node. The connection pool -handles the resurrection strategies and the updates of the pool. +This class is responsible for keeping in memory all the {{es}} Connection that you are using. There is a single Connection for every node. The connection pool handles the resurrection strategies and the updates of the pool. -[source,js] ----- +```js const { Client, ConnectionPool } = require('@elastic/elasticsearch') class MyConnectionPool extends ConnectionPool { @@ -34,19 +36,14 @@ const client = new Client({ cloud: { id: '' }, auth: { apiKey: 'base64EncodedKey' } }) ----- +``` -[discrete] -==== `Connection` +## `Connection` [_connection] -This class represents a single node, it holds every information we have on the -node, such as roles, id, URL, custom headers and so on. The actual HTTP request -is performed here, this means that if you want to swap the default HTTP client -(Node.js core), you should override the `request` method of this class. +This class represents a single node, it holds every information we have on the node, such as roles, id, URL, custom headers and so on. The actual HTTP request is performed here, this means that if you want to swap the default HTTP client (Node.js core), you should override the `request` method of this class. -[source,js] ----- +```js const { Client, BaseConnection } = require('@elastic/elasticsearch') class MyConnection extends BaseConnection { @@ -60,22 +57,19 @@ const client = new Client({ cloud: { id: '' }, auth: { apiKey: 'base64EncodedKey' } }) ----- +``` -[discrete] -==== `Serializer` +## `Serializer` [_serializer] -This class is responsible for the serialization of every request, it offers the -following methods: +This class is responsible for the serialization of every request, it offers the following methods: * `serialize(object: any): string;` serializes request objects. * `deserialize(json: string): any;` deserializes response strings. * `ndserialize(array: any[]): string;` serializes bulk request objects. * `qserialize(object: any): string;` serializes request query parameters. -[source,js] ----- +```js const { Client, Serializer } = require('@elastic/elasticsearch') class MySerializer extends Serializer { @@ -89,11 +83,10 @@ const client = new Client({ cloud: { id: '' }, auth: { apiKey: 'base64EncodedKey' } }) ----- +``` -[discrete] -[[redaction]] -==== Redaction of potentially sensitive data + +## Redaction of potentially sensitive data [redaction] When the client raises an `Error` that originated at the HTTP layer, like a `ConnectionError` or `TimeoutError`, a `meta` object is often attached to the error object that includes metadata useful for debugging, like request and response information. Because this can include potentially sensitive data, like authentication secrets in an `Authorization` header, the client takes measures to redact common sources of sensitive data when this metadata is attached and serialized. @@ -101,8 +94,7 @@ If your configuration requires extra headers or other configurations that may in By default, the `redaction` option is set to `{ type: 'replace' }`, which recursively searches for sensitive key names, case insensitive, and replaces their values with the string `[redacted]`. -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -115,12 +107,11 @@ try { } catch (err) { console.log(err.meta.meta.request.options.headers.authorization) // prints "[redacted]" } ----- +``` If you would like to redact additional properties, you can include additional key names to search and replace: -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -138,12 +129,11 @@ try { } catch (err) { console.log(err.meta.meta.request.options.headers['X-My-Secret-Password']) // prints "[redacted]" } ----- +``` -Alternatively, if you know you're not going to use the metadata at all, setting the redaction type to `remove` will remove all optional sources of potentially sensitive data entirely, or replacing them with `null` for required properties. +Alternatively, if you know you’re not going to use the metadata at all, setting the redaction type to `remove` will remove all optional sources of potentially sensitive data entirely, or replacing them with `null` for required properties. -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -157,14 +147,16 @@ try { } catch (err) { console.log(err.meta.meta.request.options.headers) // undefined } ----- +``` Finally, if you prefer to turn off redaction altogether, perhaps while debugging on a local developer environment, you can set the redaction type to `off`. This will revert the client to pre-8.11.0 behavior, where basic redaction is only performed during common serialization methods like `console.log` and `JSON.stringify`. -WARNING: Setting `redaction.type` to `off` is not recommended in production environments. +::::{warning} +Setting `redaction.type` to `off` is not recommended in production environments. +:::: + -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -178,18 +170,10 @@ try { } catch (err) { console.log(err.meta.meta.request.options.headers.authorization) // the actual header value will be logged } ----- - -[discrete] -==== Migrate to v8 - -The Node.js client can be configured to emit an HTTP header -`Accept: application/vnd.elasticsearch+json; compatible-with=7` -which signals to Elasticsearch that the client is requesting -`7.x` version of request and response bodies. This allows for -upgrading from 7.x to 8.x version of Elasticsearch without upgrading -everything at once. Elasticsearch should be upgraded first after -the compatibility header is configured and clients should be upgraded -second. -To enable to setting, configure the environment variable -`ELASTIC_CLIENT_APIVERSIONING` to `true`. +``` + + +## Migrate to v8 [_migrate_to_v8] + +The Node.js client can be configured to emit an HTTP header `Accept: application/vnd.elasticsearch+json; compatible-with=7` which signals to Elasticsearch that the client is requesting `7.x` version of request and response bodies. This allows for upgrading from 7.x to 8.x version of Elasticsearch without upgrading everything at once. Elasticsearch should be upgraded first after the compatibility header is configured and clients should be upgraded second. To enable to setting, configure the environment variable `ELASTIC_CLIENT_APIVERSIONING` to `true`. + diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md new file mode 100644 index 000000000..2271a214a --- /dev/null +++ b/docs/reference/api-reference.md @@ -0,0 +1,14377 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html +--- + +# API Reference [api-reference] + + +## bulk [_bulk] + +Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. +* To use the `index` action, you must have the `create`, `index`, or `write` index privilege. +* To use the `delete` action, you must have the `delete` or `write` index privilege. +* To use the `update` action, you must have the `index` or `write` index privilege. +* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. +* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: + +``` +action_and_meta_data\n +optional_source\n +action_and_meta_data\n +optional_source\n +.... +action_and_meta_data\n +optional_source\n +``` + +The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. + +::::{note} +Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. +:::: + + +An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. + +A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. + +::::{note} +The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. +:::: + + +If you provide a target in the request path, it is used for any actions that don’t explicitly specify an `_index` argument. + +A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. + +Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. + +There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. + +**Client suppport for bulk requests** + +Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: + +* Go: Check out `esutil.BulkIndexer` +* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` +* Python: Check out `elasticsearch.helpers.*` +* JavaScript: Check out `client.helpers.*` +* .NET: Check out `BulkAllObservable` +* PHP: Check out bulk indexing. + +**Submitting bulk requests with cURL** + +If you’re providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn’t preserve newlines. For example: + +``` +$ cat requests +{ "index" : { "_index" : "test", "_id" : "1" } } +{ "field1" : "value1" } +$ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo +{"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +``` + +**Optimistic concurrency control** + +Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. + +**Versioning** + +Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. + +**Routing** + +Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. + +::::{note} +Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. +:::: + + +**Wait for active shards** + +When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. + +**Refresh** + +Control when the changes made by this request are visible to search. + +::::{note} +Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) + +```ts +client.bulk({ ... }) +``` + + +### Arguments [_arguments] + +* **Request (object):** + + * **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. + * **`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])** + * **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. + * **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. + * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. + * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`timeout` (Optional, string | -1 | 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. + * **`require_alias` (Optional, boolean)**: If `true`, the request’s actions must target an index alias. + * **`require_data_stream` (Optional, boolean)**: If `true`, the request’s actions must target a data stream (existing or to be created). + + + +## clear_scroll [_clear_scroll] + +Clear a scrolling search. Clear the search context and results for a scrolling search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll) + +```ts +client.clearScroll({ ... }) +``` + + +### Arguments [_arguments_2] + +* **Request (object):** + + * **`scroll_id` (Optional, string | string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. + + + +## close_point_in_time [_close_point_in_time] + +Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) + +```ts +client.closePointInTime({ id }) +``` + + +### Arguments [_arguments_3] + +* **Request (object):** + + * **`id` (string)**: The ID of the point-in-time. + + + +## count [_count] + +Count search results. Get the number of documents matching a query. + +The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. The latter must be nested in a `query` key, which is the same as the search API. + +The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. + +The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count) + +```ts +client.count({ ... }) +``` + + +### Arguments [_arguments_4] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. + * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. + * **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. + * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. + * **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. + * **`q` (Optional, string)**: The query in Lucene query string syntax. + + + +## create [_create] + +Create a new document in the index. + +You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +**Automatically create data streams and indices** + +If the request’s target doesn’t exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn’t exist and doesn’t match a data stream template, the operation automatically creates the index and applies any matching index templates. + +::::{note} +Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. +:::: + + +If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. + +::::{note} +The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. +:::: + + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document’s ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +::::{note} +Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. +:::: + + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) + +```ts +client.create({ id, index }) +``` + + +### Arguments [_arguments_5] + +* **Request (object):** + + * **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. + * **`index` (string)**: The name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. + * **`document` (Optional, object)**: A document. + * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. + * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. + * **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. + * **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + + + +## delete [_delete] + +Delete a document. + +Remove a JSON document from the specified index. + +::::{note} +You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. +:::: + + +**Optimistic concurrency control** + +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Versioning** + +Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document’s version remains available is determined by the `index.gc_deletes` index setting. + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to delete a document. + +If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. + +For example: + +``` +DELETE /my-index-000001/_doc/1?routing=shard-1 +``` + +This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. + +**Distributed** + +The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete) + +```ts +client.delete({ id, index }) +``` + + +### Arguments [_arguments_6] + +* **Request (object):** + + * **`id` (string)**: A unique identifier for the document. + * **`index` (string)**: The name of the target index. + * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. + * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. + * **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + + + +## delete_by_query [_delete_by_query] + +Delete documents. + +Deletes documents that match the specified query. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `delete` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. + +::::{note} +Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. +:::: + + +While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. + +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. + +**Throttling delete requests** + +To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". + +**Slicing** + +Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with slices only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you’re slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. +* Delete performance scales linearly across available resources with the number of slices. + +Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Cancel a delete by query operation** + +Any delete by query can be canceled using the task cancel API. For example: + +``` +POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +``` + +The task ID can be found by using the get tasks API. + +Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) + +```ts +client.deleteByQuery({ index }) +``` + + +### Arguments [_arguments_7] + +* **Request (object):** + + * **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. + * **`max_docs` (Optional, number)**: The maximum number of documents to delete. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. + * **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. + * **`conflicts` (Optional, Enum("abort" | "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. + * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. + * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. + * **`from` (Optional, number)**: Starting offset (default: 0) + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. + * **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API’s `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. + * **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. Defaults to the index-level setting. + * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`q` (Optional, string)**: A query in the Lucene query string syntax. + * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. + * **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. + * **`search_timeout` (Optional, string | -1 | 0)**: The explicit timeout for each search request. It defaults to no timeout. + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. + * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. + * **`sort` (Optional, string[])**: A list of `:` pairs. + * **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. + * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. + * **`timeout` (Optional, string | -1 | 0)**: The period each deletion request waits for active shards. + * **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. + * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + + + +## delete_by_query_rethrottle [_delete_by_query_rethrottle] + +Throttle a delete by query operation. + +Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) + +```ts +client.deleteByQueryRethrottle({ task_id }) +``` + + +### Arguments [_arguments_8] + +* **Request (object):** + + * **`task_id` (string | number)**: The ID for the task. + * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. + + + +## delete_script [_delete_script] + +Delete a script or search template. Deletes a stored script or search template. + +[Endpoint documentation](docs-content://explore-analyze/scripting.md) + +```ts +client.deleteScript({ id }) +``` + + +### Arguments [_arguments_9] + +* **Request (object):** + + * **`id` (string)**: Identifier for the stored script or search template. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## exists [_exists] + +Check a document. + +Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: + +``` +HEAD my-index-000001/_doc/0 +``` + +If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. + +**Versioning support** + +You can use the `version` parameter to check the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn’t disappear immediately, although you won’t be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.exists({ id, index }) +``` + + +### Arguments [_arguments_10] + +* **Request (object):** + + * **`id` (string)**: A unique document identifier. + * **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. + * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. + * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. + * **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + + + +## exists_source [_exists_source] + +Check for a document source. + +Check whether a document source exists in an index. For example: + +``` +HEAD my-index-000001/_source/1 +``` + +A document’s source is not available if it is disabled in the mapping. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.existsSource({ id, index }) +``` + + +### Arguments [_arguments_11] + +* **Request (object):** + + * **`id` (string)**: A unique identifier for the document. + * **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. + * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. + * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. + * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + + + +## explain [_explain] + +Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain) + +```ts +client.explain({ id, index }) +``` + + +### Arguments [_arguments_12] + +* **Request (object):** + + * **`id` (string)**: Defines the document ID. + * **`index` (string)**: Index names used to limit the request. Only a single index name can be provided to this parameter. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. + * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. + * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. + * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. + * **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. + * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. + * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. + * **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. + * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return in the response. + * **`q` (Optional, string)**: Query in the Lucene query string syntax. + + + +## field_caps [_field_caps] + +Get the field capabilities. + +Get information about the capabilities of fields among multiple indices. + +For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps) + +```ts +client.fieldCaps({ ... }) +``` + + +### Arguments [_arguments_13] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. + * **`fields` (Optional, string | string[])**: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. + * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Allows to filter indices if the provided query rewrites to match_none on every shard. + * **`runtime_mappings` (Optional, Record)**: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. + * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. + * **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. + * **`filters` (Optional, string)**: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent + * **`types` (Optional, string[])**: Only return results for fields that have one of the types in the list + * **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. + + + +## get [_get] + +Get a document by its ID. + +Get a document and its source or stored fields from an index. + +By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. + +**Source filtering** + +By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: + +``` +GET my-index-000001/_doc/0?_source=false +``` + +If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: + +``` +GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +``` + +If you only want to specify includes, you can use a shorter notation: + +``` +GET my-index-000001/_doc/0?_source=*.id +``` + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: + +``` +GET my-index-000001/_doc/2?routing=user1 +``` + +This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. + +**Distributed** + +The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. + +**Versioning support** + +You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn’t disappear immediately, although you won’t be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.get({ id, index }) +``` + + +### Arguments [_arguments_14] + +* **Request (object):** + + * **`id` (string)**: A unique document identifier. + * **`index` (string)**: The name of the index that contains the document. + * **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. + * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. + * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can’t be returned;if specified, the request fails. + * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + + + +## get_script [_get_script] + +Get a script or search template. Retrieves a stored script or search template. + +[Endpoint documentation](docs-content://explore-analyze/scripting.md) + +```ts +client.getScript({ id }) +``` + + +### Arguments [_arguments_15] + +* **Request (object):** + + * **`id` (string)**: Identifier for the stored script or search template. + * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master + + + +## get_script_context [_get_script_context] + +Get script contexts. + +Get a list of supported script contexts and their methods. + +[Endpoint documentation](elasticsearch://docs/reference/scripting-languages/painless/painless-contexts.md) + +```ts +client.getScriptContext() +``` + + +## get_script_languages [_get_script_languages] + +Get script languages. + +Get a list of available script types, languages, and contexts. + +[Endpoint documentation](docs-content://explore-analyze/scripting.md) + +```ts +client.getScriptLanguages() +``` + + +## get_source [_get_source] + +Get a document’s source. + +Get the source of a document. For example: + +``` +GET my-index-000001/_source/1 +``` + +You can use the source filtering parameters to control which parts of the `_source` are returned: + +``` +GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +``` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.getSource({ id, index }) +``` + + +### Arguments [_arguments_16] + +* **Request (object):** + + * **`id` (string)**: A unique document identifier. + * **`index` (string)**: The name of the index that contains the document. + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. + * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. + * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. + * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. + * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + + + +## health_report [_health_report] + +Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. + +Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. + +The cluster’s status is controlled by the worst indicator status. + +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. + +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. + +::::{note} +The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report) + +```ts +client.healthReport({ ... }) +``` + + +### Arguments [_arguments_17] + +* **Request (object):** + + * **`feature` (Optional, string | string[])**: A feature of the cluster, as returned by the top-level health report API. + * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout. + * **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. + * **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. + + + +## index [_index] + +Create or update a document in an index. + +Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. + +::::{note} +You cannot use this API to send update requests for existing documents in a data stream. +:::: + + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. +* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +::::{note} +Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. +:::: + + +**Automatically create data streams and indices** + +If the request’s target doesn’t exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn’t exist and doesn’t match a data stream template, the operation automatically creates the index and applies any matching index templates. + +::::{note} +Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. +:::: + + +If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. + +::::{note} +The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. +:::: + + +**Optimistic concurrency control** + +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document’s ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +::::{note} +Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. +:::: + + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +**No operation (noop) updates** + +When updating a document by using this API, a new version of the document is always created even if the document hasn’t changed. If this isn’t acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn’t available on this API because it doesn’t fetch the old source and isn’t able to compare it against the new source. + +There isn’t a definitive rule for when noop updates aren’t acceptable. It’s a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. + +**Versioning** + +Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. + +::::{note} +Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. +:::: + + +When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document’s version number, a version conflict will occur and the index operation will fail. For example: + +``` +PUT my-index-000001/_doc/1?version=2&version_type=external +{ + "user": { + "id": "elkbee" + } +} +``` + +In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). + +A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) + +```ts +client.index({ index }) +``` + + +### Arguments [_arguments_18] + +* **Request (object):** + + * **`index` (string)**: The name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. + * **`id` (Optional, string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. + * **`document` (Optional, object)**: A document. + * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. + * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. + * **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. + * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. + * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. + * **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. + * **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + + + +## info [_info] + +Get cluster info. Get basic build, version, and cluster information. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info) + +```ts +client.info() +``` + + +## knn_search [_knn_search] + +Run a knn search. + +::::{note} +The kNN search API has been replaced by the `knn` option in the search API. +:::: + + +Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. + +Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. + +The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) + +```ts +client.knnSearch({ index, knn }) +``` + + +### Arguments [_arguments_19] + +* **Request (object):** + + * **`index` (string | string[])**: A list of index names to search; use `_all` or to perform the operation on all indices + * **`knn` ({ field, query_vector, k, num_candidates })**: kNN query to execute + * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. + * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. + * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. + * **`fields` (Optional, string | string[])**: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. + * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn’t provided, all documents are allowed to match. + * **`routing` (Optional, string)**: A list of specific routing values + + + +## mget [_mget] + +Get multiple documents. + +Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget) + +```ts +client.mget({ ... }) +``` + + +### Arguments [_arguments_20] + +* **Request (object):** + + * **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. + * **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. + * **`ids` (Optional, string | string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. + * **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. + * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. + * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. + * **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. + * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. + * **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`stored_fields` (Optional, string | string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. + + + +## msearch [_msearch] + +Run multiple searches. + +The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: + +``` +header\n +body\n +header\n +body\n +``` + +This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. + +::::{important} +The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) + +```ts +client.msearch({ ... }) +``` + + +### Arguments [_arguments_21] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases to search. + * **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** + * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. + * **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. + * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. + * **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. + * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. + * **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. + * **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. + * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. + * **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. + * **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. + + + +## msearch_template [_msearch_template] + +Run multiple templated searches. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) + +```ts +client.msearchTemplate({ ... }) +``` + + +### Arguments [_arguments_22] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. + * **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** + * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. + * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the API can run. + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. + * **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. + * **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + + + +## mtermvectors [_mtermvectors] + +Get multiple term vectors. + +You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors) + +```ts +client.mtermvectors({ ... }) +``` + + +### Arguments [_arguments_23] + +* **Request (object):** + + * **`index` (Optional, string)**: Name of the index that contains the documents. + * **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: Array of existing or artificial documents. + * **`ids` (Optional, string[])**: Simplified syntax to specify documents by their ID if they’re in the same index. + * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. + * **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. + * **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. + * **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. + * **`positions` (Optional, boolean)**: If `true`, the response includes term positions. + * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. + * **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. + * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. + * **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. + * **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: Specific version type. + + + +## open_point_in_time [_open_point_in_time] + +Open a point in time. + +A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. + +A point in time must be opened explicitly before being used in search requests. + +A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. + +Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. + +::::{important} +The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. +:::: + + +When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. + +**Keeping point in time alive** + +The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. + +Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. + +::::{tip} +Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. +:::: + + +Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn’t prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) + +```ts +client.openPointInTime({ index, keep_alive }) +``` + + +### Arguments [_arguments_24] + +* **Request (object):** + + * **`index` (string | string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices + * **`keep_alive` (string | -1 | 0)**: Extend the length of time that the point in time persists. + * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. + * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. + + + +## ping [_ping] + +Ping the cluster. Get information about whether the cluster is running. + +[Endpoint documentation](docs-content://get-started/index.md) + +```ts +client.ping() +``` + + +## put_script [_put_script] + +Create or update a script or search template. Creates or updates a stored script or search template. + +[Endpoint documentation](docs-content://explore-analyze/scripting.md) + +```ts +client.putScript({ id, script }) +``` + + +### Arguments [_arguments_25] + +* **Request (object):** + + * **`id` (string)**: Identifier for the stored script or search template. Must be unique within the cluster. + * **`script` ({ lang, options, source })**: Contains the script or search template, its parameters, and its language. + * **`context` (Optional, string)**: Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## rank_eval [_rank_eval] + +Evaluate ranked search results. + +Evaluate the quality of ranked search results over a set of typical search queries. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval) + +```ts +client.rankEval({ requests }) +``` + + +### Arguments [_arguments_26] + +* **Request (object):** + + * **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. + * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. + * **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. + * **`search_type` (Optional, string)**: Search operation type + + + +## reindex [_reindex] + +Reindex documents. + +Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. + +::::{important} +Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. +:::: + + +If the Elasticsearch security features are enabled, you must have the following security privileges: + +* The `read` index privilege for the source data stream, index, or alias. +* The `write` index privilege for the destination data stream, index, or index alias. +* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. +* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. + +If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. + +The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. + +Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. + +Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. + +::::{important} +Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. +:::: + + +By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. + +::::{note} +The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn’t usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. +:::: + + +**Running reindex asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. + +**Reindex from multiple sources** + +If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. + +For example, you can use a bash script like this: + +``` +for index in i1 i2 i3 i4 i5; do + curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ + "source": { + "index": "'$index'" + }, + "dest": { + "index": "'$index'-reindexed" + } + }' +done +``` + +**Throttling** + +Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. + +The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". + +**Slicing** + +Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + +::::{note} +Reindexing from remote clusters does not support manual or automatic slicing. +:::: + + +You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. + +Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: + +* You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices`, each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. +* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. + +If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. + +Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. + +Indexing performance scales linearly across available resources with the number of slices. + +Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Modify documents during reindexing** + +Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document’s metadata. + +Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. + +Think of the possibilities! Just be careful; you are able to change: + +* `_id` +* `_index` +* `_version` +* `_routing` + +Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. + +**Reindex from remote** + +Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. + +When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: + +``` +reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] +``` + +The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. + +::::{warning} +Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. +:::: + + +To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. + +::::{note} +Reindexing from remote clusters does not support manual or automatic slicing. +:::: + + +Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you’ll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. + +**Configuring SSL parameters** + +Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) + +```ts +client.reindex({ dest, source }) +``` + + +### Arguments [_arguments_27] + +* **Request (object):** + + * **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. + * **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. + * **`conflicts` (Optional, Enum("abort" | "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. + * **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. + * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. + * **`size` (Optional, number)** + * **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. + * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. + * **`scroll` (Optional, string | -1 | 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. + * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn’t sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. + * **`timeout` (Optional, string | -1 | 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. + * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. + * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + + + +## reindex_rethrottle [_reindex_rethrottle] + +Throttle a reindex operation. + +Change the number of requests per second for a particular reindex operation. For example: + +``` +POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +``` + +Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) + +```ts +client.reindexRethrottle({ task_id }) +``` + + +### Arguments [_arguments_28] + +* **Request (object):** + + * **`task_id` (string)**: The task identifier, which can be found by using the tasks API. + * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. + + + +## render_search_template [_render_search_template] + +Render a search template. + +Render a search template as a search request body. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template) + +```ts +client.renderSearchTemplate({ ... }) +``` + + +### Arguments [_arguments_29] + +* **Request (object):** + + * **`id` (Optional, string)**: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. + * **`file` (Optional, string)** + * **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. + * **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API’s request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. + + + +## scripts_painless_execute [_scripts_painless_execute] + +Run a script. + +Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don’t have permissions to write documents on a cluster. + +The API uses several *contexts*, which control how scripts are run, what variables are available at runtime, and what the return type is. + +Each context requires a script, but additional parameters depend on the context you’re using for that script. + +[Endpoint documentation](elasticsearch://docs/reference/scripting-languages/painless/painless-api-examples.md) + +```ts +client.scriptsPainlessExecute({ ... }) +``` + + +### Arguments [_arguments_30] + +* **Request (object):** + + * **`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. + * **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. + * **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. + + + +## scroll [_scroll] + +Run a scrolling search. + +::::{important} +The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). +:::: + + +The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. + +You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. + +::::{important} +Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. +:::: + + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html) + +```ts +client.scroll({ scroll_id }) +``` + + +### Arguments [_arguments_31] + +* **Request (object):** + + * **`scroll_id` (string)**: Scroll ID of the search. + * **`scroll` (Optional, string | -1 | 0)**: Period to retain the search context for scrolling. + * **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. + + + +## search [_search] + +Run a search. + +Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. + +If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias’s data streams or indices. + +**Search slicing** + +When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. + +For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. + +::::{important} +The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) + +```ts +client.search({ ... }) +``` + + +### Arguments [_arguments_32] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. + * **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. + * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. + * **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. + * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. + * **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. + * **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. + * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. + * **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. + * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. + * **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. + * **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. + * **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. + * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. + * **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. + * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. + * **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. + * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. + * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. + * **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. + * **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. + * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. + * **`_source` (Optional, boolean | { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. + * **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. + * **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. + * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. + * **`timeout` (Optional, string)**: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. + * **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. + * **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. + * **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. + * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. + * **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. + * **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. + * **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`allow_partial_search_results` (Optional, boolean)**: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. + * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. + * **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. + * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. + * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. + * **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. + * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. + * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. + * **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. + * **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. + * **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. + * **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. + * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. + * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. + * **`suggest_field` (Optional, string)**: The field to use for suggestions. + * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. + * **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. + * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. + * **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. + * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. + * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. + * **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. + * **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. + + + +## search_mvt [_search_mvt] + +Search a vector tile. + +Search a vector tile for geospatial values. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt) + +```ts +client.searchMvt({ index, field, zoom, x, y }) +``` + + +### Arguments [_arguments_33] + +* **Request (object):** + + * **`index` (string | string[])**: List of data streams, indices, or aliases to search + * **`field` (string)**: Field containing geospatial data to return + * **`zoom` (number)**: Zoom level for the vector tile to search + * **`x` (number)**: X coordinate for the vector tile to search + * **`y` (number)**: Y coordinate for the vector tile to search + * **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum + * **`buffer` (Optional, number)**: Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. + * **`exact_bounds` (Optional, boolean)**: If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. + * **`extent` (Optional, number)**: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. + * **`fields` (Optional, string | string[])**: Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. + * **`grid_agg` (Optional, Enum("geotile" | "geohex"))**: Aggregation used to create a grid for the `field`. + * **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. + * **`grid_type` (Optional, Enum("grid" | "point" | "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If *grid* each feature is a Polygon of the cells bounding box. If *point* each feature is a Point that is the centroid of the cell. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query DSL used to filter documents for the search. + * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. + * **`size` (Optional, number)**: Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. + * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. + * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. + * **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. + + + +## search_shards [_search_shards] + +Get the search shards. + +Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards) + +```ts +client.searchShards({ ... }) +``` + + +### Arguments [_arguments_34] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: Returns the indices and shards that a search request would be executed against. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. + * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. + + + +## search_template [_search_template] + +Run a search with a search template. + +[Endpoint documentation](docs-content://solutions/search/search-templates.md) + +```ts +client.searchTemplate({ ... }) +``` + + +### Arguments [_arguments_35] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (*). + * **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. + * **`id` (Optional, string)**: ID of the search template to use. If no source is specified, this parameter is required. + * **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. + * **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. + * **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API’s request body. Also supports Mustache variables. If no id is specified, this parameter is required. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. + * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. + * **`scroll` (Optional, string | -1 | 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. + * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are rendered as an integer in the response. + * **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + + + +## terms_enum [_terms_enum] + +Get terms in an index. + +Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. + +If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. + +::::{note} +The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum) + +```ts +client.termsEnum({ index, field }) +``` + + +### Arguments [_arguments_36] + +* **Request (object):** + + * **`index` (string)**: List of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. + * **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. + * **`size` (Optional, number)**: How many matching terms to return. + * **`timeout` (Optional, string | -1 | 0)**: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. + * **`case_insensitive` (Optional, boolean)**: When true the provided search string is matched against index terms without case sensitivity. + * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Allows to filter an index shard if the provided query rewrites to match_none. + * **`string` (Optional, string)**: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. + * **`search_after` (Optional, string)** + + + +## termvectors [_termvectors] + +Get term vector information. + +Get information and statistics about terms in the fields of a particular document. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors) + +```ts +client.termvectors({ index }) +``` + + +### Arguments [_arguments_37] + +* **Request (object):** + + * **`index` (string)**: Name of the index that contains the document. + * **`id` (Optional, string)**: Unique identifier of the document. + * **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. + * **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. + * **`per_field_analyzer` (Optional, Record)**: Overrides the default per-field analyzer. + * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. + * **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. + * **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. + * **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. + * **`positions` (Optional, boolean)**: If `true`, the response includes term positions. + * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. + * **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. + * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. + * **`term_statistics` (Optional, boolean)**: If `true`, the response includes term frequency and document frequency. + * **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. + * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: Specific version type. + + + +## update [_update] + +Update a document. + +Update a document by running a script or passing a partial document. + +If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. + +The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: + +* Gets the document (collocated with the shard) from the index. +* Runs the specified script. +* Indexes the result. + +The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. + +The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update) + +```ts +client.update({ id, index }) +``` + + +### Arguments [_arguments_38] + +* **Request (object):** + + * **`id` (string)**: A unique identifier for the document to be updated. + * **`index` (string)**: The name of the target index. By default, the index is created automatically if it doesn’t exist. + * **`detect_noop` (Optional, boolean)**: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. + * **`doc` (Optional, object)**: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. + * **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of *doc* as the value of *upsert*. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. + * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. + * **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. + * **`_source` (Optional, boolean | { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. + * **`upsert` (Optional, object)**: If the document does not already exist, the contents of *upsert* are inserted as a new document. If the document exists, the *script* is run. + * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. + * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. + * **`lang` (Optional, string)**: The script language. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, it does nothing with refreshes. + * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + * **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to *all* or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. + * **`_source_excludes` (Optional, string | string[])**: The source fields you want to exclude. + * **`_source_includes` (Optional, string | string[])**: The source fields you want to retrieve. + + + +## update_by_query [_update_by_query] + +Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `index` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. + +::::{note} +Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. +:::: + + +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. + +**Throttling update requests** + +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". + +**Slicing** + +Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + +Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with slices will cancel each sub-request. +* Due to the nature of slices each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you’re slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +* Update performance scales linearly across available resources with the number of slices. + +Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Update the document source** + +Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. + +Set `ctx.op = "noop"` if your script decides that it doesn’t have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. + +Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. + +Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) + +```ts +client.updateByQuery({ index }) +``` + + +### Arguments [_arguments_39] + +* **Request (object):** + + * **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. + * **`max_docs` (Optional, number)**: The maximum number of documents to update. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. + * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. + * **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. + * **`conflicts` (Optional, Enum("abort" | "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. + * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. + * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`from` (Optional, number)**: Starting offset (default: 0) + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. + * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + * **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. + * **`q` (Optional, string)**: A query in the Lucene query string syntax. + * **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API’s `refresh` parameter, which causes just the shard that received the request to be refreshed. + * **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. + * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. + * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. + * **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. + * **`search_timeout` (Optional, string | -1 | 0)**: An explicit timeout for each search request. By default, there is no timeout. + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. + * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. + * **`sort` (Optional, string[])**: A list of : pairs. + * **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. + * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. + * **`timeout` (Optional, string | -1 | 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + * **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. + * **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. + * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + + + +## update_by_query_rethrottle [_update_by_query_rethrottle] + +Throttle an update by query operation. + +Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) + +```ts +client.updateByQueryRethrottle({ task_id }) +``` + + +### Arguments [_arguments_40] + +* **Request (object):** + + * **`task_id` (string)**: The ID for the task. + * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. + + + +## async_search [_async_search] + + +### delete [_delete_2] + +Delete an async search. + +If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.delete({ id }) +``` + + +### Arguments [_arguments_41] + +* **Request (object):** + + * **`id` (string)**: A unique identifier for the async search. + + + +### get [_get_2] + +Get async search results. + +Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.get({ id }) +``` + + +### Arguments [_arguments_42] + +* **Request (object):** + + * **`id` (string)**: A unique identifier for the async search. + * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. + * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. + + + +### status [_status] + +Get the async search status. + +Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.status({ id }) +``` + + +### Arguments [_arguments_43] + +* **Request (object):** + + * **`id` (string)**: A unique identifier for the async search. + * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. + + + +### submit [_submit] + +Run an async search. + +When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. + +Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. + +By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.submit({ ... }) +``` + + +### Arguments [_arguments_44] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices + * **`aggregations` (Optional, Record)** + * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** + * **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. + * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. + * **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. + * **`highlight` (Optional, { encoder, fields })** + * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. + * **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. + * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. + * **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. + * **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. + * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** + * **`profile` (Optional, boolean)** + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. + * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** + * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. + * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])** + * **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. + * **`slice` (Optional, { field, id, max })** + * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** + * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. + * **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. + * **`suggest` (Optional, { text })** + * **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. + * **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. + * **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. + * **`version` (Optional, boolean)**: If true, returns document version as part of a hit. + * **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. + * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. + * **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. + * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. + * **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. + * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. + * **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. + * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + * **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout + * **`analyzer` (Optional, string)**: The analyzer to use for the query string + * **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) + * **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). + * **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. + * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query (AND or OR) + * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled + * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + * **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored + * **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests + * **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) + * **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true + * **`routing` (Optional, string)**: A list of specific routing values + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Search operation type + * **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. + * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: Specify suggest mode + * **`suggest_size` (Optional, number)**: How many suggestions to return in response + * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. + * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response + * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response + * **`_source_excludes` (Optional, string | string[])**: A list of fields to exclude from the returned _source field + * **`_source_includes` (Optional, string | string[])**: A list of fields to extract and return from the _source field + * **`q` (Optional, string)**: Query in the Lucene query string syntax + + + +## autoscaling [_autoscaling] + + +### delete_autoscaling_policy [_delete_autoscaling_policy] + +Delete an autoscaling policy. + +::::{note} +This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy) + +```ts +client.autoscaling.deleteAutoscalingPolicy({ name }) +``` + + +### Arguments [_arguments_45] + +* **Request (object):** + + * **`name` (string)**: the name of the autoscaling policy + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_autoscaling_capacity [_get_autoscaling_capacity] + +Get the autoscaling capacity. + +::::{note} +This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +:::: + + +This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. + +The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. + +The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. + +The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) + +```ts +client.autoscaling.getAutoscalingCapacity({ ... }) +``` + + +### Arguments [_arguments_46] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_autoscaling_policy [_get_autoscaling_policy] + +Get an autoscaling policy. + +::::{note} +This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) + +```ts +client.autoscaling.getAutoscalingPolicy({ name }) +``` + + +### Arguments [_arguments_47] + +* **Request (object):** + + * **`name` (string)**: the name of the autoscaling policy + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_autoscaling_policy [_put_autoscaling_policy] + +Create or update an autoscaling policy. + +::::{note} +This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy) + +```ts +client.autoscaling.putAutoscalingPolicy({ name }) +``` + + +### Arguments [_arguments_48] + +* **Request (object):** + + * **`name` (string)**: the name of the autoscaling policy + * **`policy` (Optional, { roles, deciders })** + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## cat [_cat] + + +### aliases [_aliases] + +Get aliases. + +Get the cluster’s index aliases, including filter and routing information. This API does not return data stream aliases. + +::::{important} +CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases) + +```ts +client.cat.aliases({ ... }) +``` + + +### Arguments [_arguments_49] + +* **Request (object):** + + * **`name` (Optional, string | string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. + + + +### allocation [_allocation] + +Get shard allocation information. + +Get a snapshot of the number of shards allocated to each data node and their disk space. + +::::{important} +CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation) + +```ts +client.cat.allocation({ ... }) +``` + + +### Arguments [_arguments_50] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: A list of node identifiers or names used to limit the returned information. + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### component_templates [_component_templates] + +Get component templates. + +Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +::::{important} +CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates) + +```ts +client.cat.componentTemplates({ ... }) +``` + + +### Arguments [_arguments_51] + +* **Request (object):** + + * **`name` (Optional, string)**: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. + + + +### count [_count_2] + +Get a document count. + +Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. + +::::{important} +CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count) + +```ts +client.cat.count({ ... }) +``` + + +### Arguments [_arguments_52] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + + + +### fielddata [_fielddata] + +Get field data cache information. + +Get the amount of heap memory currently used by the field data cache on every data node in the cluster. + +::::{important} +cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata) + +```ts +client.cat.fielddata({ ... }) +``` + + +### Arguments [_arguments_53] + +* **Request (object):** + + * **`fields` (Optional, string | string[])**: List of fields used to limit returned information. To retrieve all fields, omit this parameter. + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + + + +### health [_health] + +Get the cluster health status. + +::::{important} +CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health) + +```ts +client.cat.health({ ... }) +``` + + +### Arguments [_arguments_54] + +* **Request (object):** + + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. + * **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. + + + +### help [_help] + +Get CAT help. + +Get help for the CAT APIs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat) + +```ts +client.cat.help() +``` + + +### indices [_indices] + +Get index information. + +Get high-level information about indices in a cluster, including backing indices for data streams. + +Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas + +These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. + +CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices) + +```ts +client.cat.indices({ ... }) +``` + + +### Arguments [_arguments_55] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. + * **`health` (Optional, Enum("green" | "yellow" | "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. + * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. + * **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### master [_master] + +Get master node information. + +Get information about the master node, including the ID, bound IP address, and name. + +::::{important} +cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master) + +```ts +client.cat.master({ ... }) +``` + + +### Arguments [_arguments_56] + +* **Request (object):** + + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### ml_data_frame_analytics [_ml_data_frame_analytics] + +Get data frame analytics jobs. + +Get configuration and usage information about data frame analytics jobs. + +::::{important} +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics) + +```ts +client.cat.mlDataFrameAnalytics({ ... }) +``` + + +### Arguments [_arguments_57] + +* **Request (object):** + + * **`id` (Optional, string)**: The ID of the data frame analytics to fetch + * **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit in which to display byte values + * **`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names to display. + * **`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names or column aliases used to sort the response. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + + + +### ml_datafeeds [_ml_datafeeds] + +Get datafeeds. + +Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. + +::::{important} +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds) + +```ts +client.cat.mlDatafeeds({ ... }) +``` + + +### Arguments [_arguments_58] + +* **Request (object):** + + * **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +* Contains wildcard expressions and there are no datafeeds that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. ** *`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names to display. *** *`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names or column aliases used to sort the response. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. + + +### ml_jobs [_ml_jobs] + +Get anomaly detection jobs. + +Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. + +::::{important} +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs) + +```ts +client.cat.mlJobs({ ... }) +``` + + +### Arguments [_arguments_59] + +* **Request (object):** + + * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. + +```json +`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb")): The unit used to display byte values. `h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[]): List of column names to display. `s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[]): List of column names or column aliases used to sort the response. `time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d")): The unit used to display time values. +``` + +### ml_trained_models [_ml_trained_models] + +Get trained models. + +Get configuration and usage information about inference trained models. + +::::{important} +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models) + +```ts +client.cat.mlTrainedModels({ ... }) +``` + + +### Arguments [_arguments_60] + +* **Request (object):** + + * **`model_id` (Optional, string)**: A unique identifier for the trained model. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + * **`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names to display. + * **`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names or aliases used to sort the response. + * **`from` (Optional, number)**: Skips the specified number of transforms. + * **`size` (Optional, number)**: The maximum number of transforms to display. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + + + +### nodeattrs [_nodeattrs] + +Get node attribute information. + +Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs) + +```ts +client.cat.nodeattrs({ ... }) +``` + + +### Arguments [_arguments_61] + +* **Request (object):** + + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### nodes [_nodes] + +Get node information. + +Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) + +```ts +client.cat.nodes({ ... }) +``` + + +### Arguments [_arguments_62] + +* **Request (object):** + + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + * **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. + * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + + + +### pending_tasks [_pending_tasks] + +Get pending task information. + +Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks) + +```ts +client.cat.pendingTasks({ ... }) +``` + + +### Arguments [_arguments_63] + +* **Request (object):** + + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + + + +### plugins [_plugins] + +Get plugin information. + +Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins) + +```ts +client.cat.plugins({ ... }) +``` + + +### Arguments [_arguments_64] + +* **Request (object):** + + * **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### recovery [_recovery] + +Get shard recovery information. + +Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery) + +```ts +client.cat.recovery({ ... }) +``` + + +### Arguments [_arguments_65] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + + + +### repositories [_repositories] + +Get snapshot repository information. + +Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories) + +```ts +client.cat.repositories({ ... }) +``` + + +### Arguments [_arguments_66] + +* **Request (object):** + + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### segments [_segments] + +Get segment information. + +Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments) + +```ts +client.cat.segments({ ... }) +``` + + +### Arguments [_arguments_67] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### shards [_shards] + +Get shard information. + +Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards) + +```ts +client.cat.shards({ ... }) +``` + + +### Arguments [_arguments_68] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + + + +### snapshots [_snapshots] + +Get snapshot information. + +Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots) + +```ts +client.cat.snapshots({ ... }) +``` + + +### Arguments [_arguments_69] + +* **Request (object):** + + * **`repository` (Optional, string | string[])**: A list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + + + +### tasks [_tasks] + +Get task information. + +Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) + +```ts +client.cat.tasks({ ... }) +``` + + +### Arguments [_arguments_70] + +* **Request (object):** + + * **`actions` (Optional, string[])**: The task action names, which are used to limit the response. + * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. + * **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. + * **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. + + + +### templates [_templates] + +Get index template information. + +Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates) + +```ts +client.cat.templates({ ... }) +``` + + +### Arguments [_arguments_71] + +* **Request (object):** + + * **`name` (Optional, string)**: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### thread_pool [_thread_pool] + +Get thread pool statistics. + +Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool) + +```ts +client.cat.threadPool({ ... }) +``` + + +### Arguments [_arguments_72] + +* **Request (object):** + + * **`thread_pool_patterns` (Optional, string | string[])**: A list of thread pool names used to limit the request. Accepts wildcard expressions. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. + * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### transforms [_transforms] + +Get transform information. + +Get configuration and usage information about transforms. + +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms) + +```ts +client.cat.transforms({ ... }) +``` + + +### Arguments [_arguments_73] + +* **Request (object):** + + * **`transform_id` (Optional, string)**: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. + * **`from` (Optional, number)**: Skips the specified number of transforms. + * **`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names to display. + * **`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names or column aliases used to sort the response. + * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. + * **`size` (Optional, number)**: The maximum number of transforms to obtain. + + + +## ccr [_ccr] + + +### delete_auto_follow_pattern [_delete_auto_follow_pattern] + +Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern) + +```ts +client.ccr.deleteAutoFollowPattern({ name }) +``` + + +### Arguments [_arguments_74] + +* **Request (object):** + + * **`name` (string)**: The name of the auto follow pattern. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### follow [_follow] + +Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow) + +```ts +client.ccr.follow({ index, leader_index, remote_cluster }) +``` + + +### Arguments [_arguments_75] + +* **Request (object):** + + * **`index` (string)**: The name of the follower index. + * **`leader_index` (string)**: The name of the index in the leader cluster to follow. + * **`remote_cluster` (string)**: The remote cluster containing the leader index. + * **`data_stream_name` (Optional, string)**: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. + * **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. + * **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. + * **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. + * **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. + * **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. + * **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. + * **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. + * **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. + * **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. + * **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. + * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. + + + +### follow_info [_follow_info] + +Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info) + +```ts +client.ccr.followInfo({ index }) +``` + + +### Arguments [_arguments_76] + +* **Request (object):** + + * **`index` (string | string[])**: A list of index patterns; use `_all` to perform the operation on all indices + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### follow_stats [_follow_stats] + +Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats) + +```ts +client.ccr.followStats({ index }) +``` + + +### Arguments [_arguments_77] + +* **Request (object):** + + * **`index` (string | string[])**: A list of index patterns; use `_all` to perform the operation on all indices + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### forget_follower [_forget_follower] + +Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. + +A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. + +::::{note} +This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower) + +```ts +client.ccr.forgetFollower({ index }) +``` + + +### Arguments [_arguments_78] + +* **Request (object):** + + * **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed + * **`follower_cluster` (Optional, string)** + * **`follower_index` (Optional, string)** + * **`follower_index_uuid` (Optional, string)** + * **`leader_remote_cluster` (Optional, string)** + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_auto_follow_pattern [_get_auto_follow_pattern] + +Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1) + +```ts +client.ccr.getAutoFollowPattern({ ... }) +``` + + +### Arguments [_arguments_79] + +* **Request (object):** + + * **`name` (Optional, string)**: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### pause_auto_follow_pattern [_pause_auto_follow_pattern] + +Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. + +You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern) + +```ts +client.ccr.pauseAutoFollowPattern({ name }) +``` + + +### Arguments [_arguments_80] + +* **Request (object):** + + * **`name` (string)**: The name of the auto follow pattern that should pause discovering new indices to follow. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### pause_follow [_pause_follow] + +Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow) + +```ts +client.ccr.pauseFollow({ index }) +``` + + +### Arguments [_arguments_81] + +* **Request (object):** + + * **`index` (string)**: The name of the follower index that should pause following its leader index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### put_auto_follow_pattern [_put_auto_follow_pattern] + +Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. + +This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern) + +```ts +client.ccr.putAutoFollowPattern({ name, remote_cluster }) +``` + + +### Arguments [_arguments_82] + +* **Request (object):** + + * **`name` (string)**: The name of the collection of auto-follow patterns. + * **`remote_cluster` (string)**: The remote cluster containing the leader indices to match against. + * **`follow_index_pattern` (Optional, string)**: The name of follower index. The template `{{leader_index}}` can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use `{{leader_index}}`; CCR does not support changes to the names of a follower data stream’s backing indices. + * **`leader_index_patterns` (Optional, string[])**: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. + * **`leader_index_exclusion_patterns` (Optional, string[])**: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. + * **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. + * **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). + * **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. + * **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. + * **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. + * **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. + * **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. + * **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. + * **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. + * **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. + * **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### resume_auto_follow_pattern [_resume_auto_follow_pattern] + +Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern) + +```ts +client.ccr.resumeAutoFollowPattern({ name }) +``` + + +### Arguments [_arguments_83] + +* **Request (object):** + + * **`name` (string)**: The name of the auto follow pattern to resume discovering new indices to follow. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### resume_follow [_resume_follow] + +Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow) + +```ts +client.ccr.resumeFollow({ index }) +``` + + +### Arguments [_arguments_84] + +* **Request (object):** + + * **`index` (string)**: The name of the follow index to resume following. + * **`max_outstanding_read_requests` (Optional, number)** + * **`max_outstanding_write_requests` (Optional, number)** + * **`max_read_request_operation_count` (Optional, number)** + * **`max_read_request_size` (Optional, string)** + * **`max_retry_delay` (Optional, string | -1 | 0)** + * **`max_write_buffer_count` (Optional, number)** + * **`max_write_buffer_size` (Optional, string)** + * **`max_write_request_operation_count` (Optional, number)** + * **`max_write_request_size` (Optional, string)** + * **`read_poll_timeout` (Optional, string | -1 | 0)** + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### stats [_stats] + +Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats) + +```ts +client.ccr.stats({ ... }) +``` + + +### Arguments [_arguments_85] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### unfollow [_unfollow] + +Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. + +::::{note} +Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow) + +```ts +client.ccr.unfollow({ index }) +``` + + +### Arguments [_arguments_86] + +* **Request (object):** + + * **`index` (string)**: The name of the follower index that should be turned into a regular index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +## cluster [_cluster] + + +### allocation_explain [_allocation_explain] + +Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) + +```ts +client.cluster.allocationExplain({ ... }) +``` + + +### Arguments [_arguments_87] + +* **Request (object):** + + * **`current_node` (Optional, string)**: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. + * **`index` (Optional, string)**: Specifies the name of the index that you would like an explanation for. + * **`primary` (Optional, boolean)**: If true, returns explanation for the primary shard for the given shard ID. + * **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. + * **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. + * **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### delete_component_template [_delete_component_template] + +Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.deleteComponentTemplate({ name }) +``` + + +### Arguments [_arguments_88] + +* **Request (object):** + + * **`name` (string | string[])**: List or wildcard expression of component template names used to limit the request. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### delete_voting_config_exclusions [_delete_voting_config_exclusions] + +Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) + +```ts +client.cluster.deleteVotingConfigExclusions({ ... }) +``` + + +### Arguments [_arguments_89] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. + + + +### exists_component_template [_exists_component_template] + +Check component templates. Returns information about whether a particular component template exists. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.existsComponentTemplate({ name }) +``` + + +### Arguments [_arguments_90] + +* **Request (object):** + + * **`name` (string | string[])**: List of component template names used to limit the request. Wildcard (*) expressions are supported. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. + + + +### get_component_template [_get_component_template] + +Get component templates. Get information about component templates. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.getComponentTemplate({ ... }) +``` + + +### Arguments [_arguments_91] + +* **Request (object):** + + * **`name` (Optional, string)**: List of component template names used to limit the request. Wildcard (`*`) expressions are supported. + * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. + * **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_settings [_get_settings] + +Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings) + +```ts +client.cluster.getSettings({ ... }) +``` + + +### Arguments [_arguments_92] + +* **Request (object):** + + * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. + * **`include_defaults` (Optional, boolean)**: If `true`, returns default cluster settings from the local node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### health [_health_2] + +Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. + +The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. + +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health) + +```ts +client.cluster.health({ ... }) +``` + + +### Arguments [_arguments_93] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. + * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. + * **`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. + * **`wait_for_nodes` (Optional, string | number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, ⇐N, >N and yellow > red. By default, will not wait for any status. + + + +### info [_info_2] + +Get cluster info. Returns basic information about the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info) + +```ts +client.cluster.info({ target }) +``` + + +### Arguments [_arguments_94] + +* **Request (object):** + + * **`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. + + + +### pending_tasks [_pending_tasks_2] + +Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. + +::::{note} +This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks) + +```ts +client.cluster.pendingTasks({ ... }) +``` + + +### Arguments [_arguments_95] + +* **Request (object):** + + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### post_voting_config_exclusions [_post_voting_config_exclusions] + +Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. + +Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. + +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. + +::::{note} +Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) + +```ts +client.cluster.postVotingConfigExclusions({ ... }) +``` + + +### Arguments [_arguments_96] + +* **Request (object):** + + * **`node_names` (Optional, string | string[])**: A list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. + * **`node_ids` (Optional, string | string[])**: A list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`timeout` (Optional, string | -1 | 0)**: When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. + + + +### put_component_template [_put_component_template] + +Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. + +Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. + +Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. + +You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. + +**Applying component templates** + +You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template’s `composed_of` list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.putComponentTemplate({ name, template }) +``` + + +### Arguments [_arguments_97] + +* **Request (object):** + + * **`name` (string)**: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. + * **`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })**: The template to be applied which includes mappings, settings, or aliases configuration. + * **`version` (Optional, number)**: Version number used to manage component templates externally. This number isn’t automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. + * **`_meta` (Optional, Record)**: Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. + * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. + * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_settings [_put_settings] + +Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. + +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. + +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. + +::::{tip} +In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. +:::: + + +::::{warning} +Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) + +```ts +client.cluster.putSettings({ ... }) +``` + + +### Arguments [_arguments_98] + +* **Request (object):** + + * **`persistent` (Optional, Record)** + * **`transient` (Optional, Record)** + * **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) + * **`master_timeout` (Optional, string | -1 | 0)**: Explicit operation timeout for connection to master node + * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout + + + +### remote_info [_remote_info] + +Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info) + +```ts +client.cluster.remoteInfo() +``` + + +### reroute [_reroute] + +Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. + +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. + +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. + +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. + +Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute) + +```ts +client.cluster.reroute({ ... }) +``` + + +### Arguments [_arguments_99] + +* **Request (object):** + + * **`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])**: Defines the commands to perform. + * **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. + * **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. + * **`metric` (Optional, string | string[])**: Limits the information returned to the specified metrics. + * **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### state [_state] + +Get the cluster state. Get comprehensive information about the state of the cluster. + +The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. + +The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. + +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. + +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. + +::::{warning} +The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state) + +```ts +client.cluster.state({ ... }) +``` + + +### Arguments [_arguments_100] + +* **Request (object):** + + * **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics + * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices + * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) + * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + * **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) + * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master + * **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version + * **`wait_for_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for wait_for_metadata_version before timing out + + + +### stats [_stats_2] + +Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats) + +```ts +client.cluster.stats({ ... }) +``` + + +### Arguments [_arguments_101] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. + * **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. + + + +## connector [_connector] + + +### check_in [_check_in] + +Check in a connector. + +Update the `last_seen` field in the connector and set it to the current timestamp. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in) + +```ts +client.connector.checkIn({ connector_id }) +``` + + +### Arguments [_arguments_102] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be checked in + + + +### delete [_delete_3] + +Delete a connector. + +Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete) + +```ts +client.connector.delete({ connector_id }) +``` + + +### Arguments [_arguments_103] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be deleted + * **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. Defaults to false. + * **`hard` (Optional, boolean)**: A flag indicating if the connector should be hard deleted. + + + +### get [_get_3] + +Get a connector. + +Get the details about a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get) + +```ts +client.connector.get({ connector_id }) +``` + + +### Arguments [_arguments_104] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector + * **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. + + + +### list [_list] + +Get all connectors. + +Get information about all connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list) + +```ts +client.connector.list({ ... }) +``` + + +### Arguments [_arguments_105] + +* **Request (object):** + + * **`from` (Optional, number)**: Starting offset (default: 0) + * **`size` (Optional, number)**: Specifies a max number of results to get + * **`index_name` (Optional, string | string[])**: A list of connector index names to fetch connector documents for + * **`connector_name` (Optional, string | string[])**: A list of connector names to fetch connector documents for + * **`service_type` (Optional, string | string[])**: A list of connector service types to fetch connector documents for + * **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. + * **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name + + + +### post [_post] + +Create a connector. + +Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) + +```ts +client.connector.post({ ... }) +``` + + +### Arguments [_arguments_106] + +* **Request (object):** + + * **`description` (Optional, string)** + * **`index_name` (Optional, string)** + * **`is_native` (Optional, boolean)** + * **`language` (Optional, string)** + * **`name` (Optional, string)** + * **`service_type` (Optional, string)** + + + +### put [_put] + +Create or update a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) + +```ts +client.connector.put({ ... }) +``` + + +### Arguments [_arguments_107] + +* **Request (object):** + + * **`connector_id` (Optional, string)**: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. + * **`description` (Optional, string)** + * **`index_name` (Optional, string)** + * **`is_native` (Optional, boolean)** + * **`language` (Optional, string)** + * **`name` (Optional, string)** + * **`service_type` (Optional, string)** + + + +### sync_job_cancel [_sync_job_cancel] + +Cancel a connector sync job. + +Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel) + +```ts +client.connector.syncJobCancel({ connector_sync_job_id }) +``` + + +### Arguments [_arguments_108] + +* **Request (object):** + + * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job + + + +### sync_job_check_in [_sync_job_check_in] + +Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in) + +```ts +client.connector.syncJobCheckIn({ connector_sync_job_id }) +``` + + +### Arguments [_arguments_109] + +* **Request (object):** + + * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be checked in. + + + +### sync_job_claim [_sync_job_claim] + +Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. + +This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + +```ts +client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) +``` + + +### Arguments [_arguments_110] + +* **Request (object):** + + * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. + * **`worker_hostname` (string)**: The host name of the current system that will run the job. + * **`sync_cursor` (Optional, User-defined value)**: The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. + + + +### sync_job_delete [_sync_job_delete] + +Delete a connector sync job. + +Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete) + +```ts +client.connector.syncJobDelete({ connector_sync_job_id }) +``` + + +### Arguments [_arguments_111] + +* **Request (object):** + + * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be deleted + + + +### sync_job_error [_sync_job_error] + +Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error) + +```ts +client.connector.syncJobError({ connector_sync_job_id, error }) +``` + + +### Arguments [_arguments_112] + +* **Request (object):** + + * **`connector_sync_job_id` (string)**: The unique identifier for the connector sync job. + * **`error` (string)**: The error for the connector sync job error field. + + + +### sync_job_get [_sync_job_get] + +Get a connector sync job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get) + +```ts +client.connector.syncJobGet({ connector_sync_job_id }) +``` + + +### Arguments [_arguments_113] + +* **Request (object):** + + * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job + + + +### sync_job_list [_sync_job_list] + +Get all connector sync jobs. + +Get information about all stored connector sync jobs listed by their creation date in ascending order. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list) + +```ts +client.connector.syncJobList({ ... }) +``` + + +### Arguments [_arguments_114] + +* **Request (object):** + + * **`from` (Optional, number)**: Starting offset (default: 0) + * **`size` (Optional, number)**: Specifies a max number of results to get + * **`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))**: A sync job status to fetch connector sync jobs for + * **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for + * **`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])**: A list of job types to fetch the sync jobs for + + + +### sync_job_post [_sync_job_post] + +Create a connector sync job. + +Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post) + +```ts +client.connector.syncJobPost({ id }) +``` + + +### Arguments [_arguments_115] + +* **Request (object):** + + * **`id` (string)**: The id of the associated connector + * **`job_type` (Optional, Enum("full" | "incremental" | "access_control"))** + * **`trigger_method` (Optional, Enum("on_demand" | "scheduled"))** + + + +### sync_job_update_stats [_sync_job_update_stats] + +Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats) + +```ts +client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) +``` + + +### Arguments [_arguments_116] + +* **Request (object):** + + * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. + * **`deleted_document_count` (number)**: The number of documents the sync job deleted. + * **`indexed_document_count` (number)**: The number of documents the sync job indexed. + * **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. + * **`last_seen` (Optional, string | -1 | 0)**: The timestamp to use in the `last_seen` property for the connector sync job. + * **`metadata` (Optional, Record)**: The connector-specific metadata. + * **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. + + + +### update_active_filtering [_update_active_filtering] + +Activate the connector draft filter. + +Activates the valid draft filtering for a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering) + +```ts +client.connector.updateActiveFiltering({ connector_id }) +``` + + +### Arguments [_arguments_117] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + + + +### update_api_key_id [_update_api_key_id] + +Update the connector API key ID. + +Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id) + +```ts +client.connector.updateApiKeyId({ connector_id }) +``` + + +### Arguments [_arguments_118] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`api_key_id` (Optional, string)** + * **`api_key_secret_id` (Optional, string)** + + + +### update_configuration [_update_configuration] + +Update the connector configuration. + +Update the configuration field in the connector document. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration) + +```ts +client.connector.updateConfiguration({ connector_id }) +``` + + +### Arguments [_arguments_119] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`configuration` (Optional, Record)** + * **`values` (Optional, Record)** + + + +### update_error [_update_error] + +Update the connector error field. + +Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error) + +```ts +client.connector.updateError({ connector_id, error }) +``` + + +### Arguments [_arguments_120] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`error` (T | null)** + + + +### update_features [_update_features] + +Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: + +* document-level security +* incremental syncs +* advanced sync rules +* basic sync rules + +Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features) + +```ts +client.connector.updateFeatures({ connector_id, features }) +``` + + +### Arguments [_arguments_121] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated. + * **`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })** + + + +### update_filtering [_update_filtering] + +Update the connector filtering. + +Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering) + +```ts +client.connector.updateFiltering({ connector_id }) +``` + + +### Arguments [_arguments_122] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`filtering` (Optional, { active, domain, draft }[])** + * **`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])** + * **`advanced_snippet` (Optional, { created_at, updated_at, value })** + + + +### update_filtering_validation [_update_filtering_validation] + +Update the connector draft filtering validation. + +Update the draft filtering validation info for a connector. + +```ts +client.connector.updateFilteringValidation({ connector_id, validation }) +``` + + +### Arguments [_arguments_123] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`validation` ({ errors, state })** + + + +### update_index_name [_update_index_name] + +Update the connector index name. + +Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name) + +```ts +client.connector.updateIndexName({ connector_id, index_name }) +``` + + +### Arguments [_arguments_124] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`index_name` (T | null)** + + + +### update_name [_update_name] + +Update the connector name and description. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name) + +```ts +client.connector.updateName({ connector_id }) +``` + + +### Arguments [_arguments_125] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`name` (Optional, string)** + * **`description` (Optional, string)** + + + +### update_native [_update_native] + +Update the connector is_native flag. + +```ts +client.connector.updateNative({ connector_id, is_native }) +``` + + +### Arguments [_arguments_126] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`is_native` (boolean)** + + + +### update_pipeline [_update_pipeline] + +Update the connector pipeline. + +When you create a new connector, the configuration of an ingest pipeline is populated with default settings. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline) + +```ts +client.connector.updatePipeline({ connector_id, pipeline }) +``` + + +### Arguments [_arguments_127] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })** + + + +### update_scheduling [_update_scheduling] + +Update the connector scheduling. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling) + +```ts +client.connector.updateScheduling({ connector_id, scheduling }) +``` + + +### Arguments [_arguments_128] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`scheduling` ({ access_control, full, incremental })** + + + +### update_service_type [_update_service_type] + +Update the connector service type. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type) + +```ts +client.connector.updateServiceType({ connector_id, service_type }) +``` + + +### Arguments [_arguments_129] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`service_type` (string)** + + + +### update_status [_update_status] + +Update the connector status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status) + +```ts +client.connector.updateStatus({ connector_id, status }) +``` + + +### Arguments [_arguments_130] + +* **Request (object):** + + * **`connector_id` (string)**: The unique identifier of the connector to be updated + * **`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))** + + + +## dangling_indices [_dangling_indices] + + +### delete_dangling_index [_delete_dangling_index] + +Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index) + +```ts +client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) +``` + + +### Arguments [_arguments_131] + +* **Request (object):** + + * **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. + * **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. + * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master + * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout + + + +### import_dangling_index [_import_dangling_index] + +Import a dangling index. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index) + +```ts +client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) +``` + + +### Arguments [_arguments_132] + +* **Request (object):** + + * **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. + * **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. + * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master + * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout + + + +### list_dangling_indices [_list_dangling_indices] + +Get the dangling indices. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +Use this API to list dangling indices, which you can then import or delete. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices) + +```ts +client.danglingIndices.listDanglingIndices() +``` + + +## enrich [_enrich] + + +### delete_policy [_delete_policy] + +Delete an enrich policy. Deletes an existing enrich policy and its enrich index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy) + +```ts +client.enrich.deletePolicy({ name }) +``` + + +### Arguments [_arguments_133] + +* **Request (object):** + + * **`name` (string)**: Enrich policy to delete. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### execute_policy [_execute_policy] + +Run an enrich policy. Create the enrich index for an existing enrich policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy) + +```ts +client.enrich.executePolicy({ name }) +``` + + +### Arguments [_arguments_134] + +* **Request (object):** + + * **`name` (string)**: Enrich policy to execute. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. + + + +### get_policy [_get_policy] + +Get an enrich policy. Returns information about an enrich policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy) + +```ts +client.enrich.getPolicy({ ... }) +``` + + +### Arguments [_arguments_135] + +* **Request (object):** + + * **`name` (Optional, string | string[])**: List of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### put_policy [_put_policy] + +Create an enrich policy. Creates an enrich policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy) + +```ts +client.enrich.putPolicy({ name }) +``` + + +### Arguments [_arguments_136] + +* **Request (object):** + + * **`name` (string)**: Name of the enrich policy to create or update. + * **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. + * **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. + * **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### stats [_stats_3] + +Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats) + +```ts +client.enrich.stats({ ... }) +``` + + +### Arguments [_arguments_137] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +## eql [_eql] + + +### delete [_delete_4] + +Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) + +```ts +client.eql.delete({ id }) +``` + + +### Arguments [_arguments_138] + +* **Request (object):** + + * **`id` (string)**: Identifier for the search to delete. A search ID is provided in the EQL search API’s response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. + + + +### get [_get_4] + +Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get) + +```ts +client.eql.get({ id }) +``` + + +### Arguments [_arguments_139] + +* **Request (object):** + + * **`id` (string)**: Identifier for the search. + * **`keep_alive` (Optional, string | -1 | 0)**: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. + + + +### get_status [_get_status] + +Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status) + +```ts +client.eql.getStatus({ id }) +``` + + +### Arguments [_arguments_140] + +* **Request (object):** + + * **`id` (string)**: Identifier for the search. + + + +### search [_search_2] + +Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) + +```ts +client.eql.search({ index, query }) +``` + + +### Arguments [_arguments_141] + +* **Request (object):** + + * **`index` (string | string[])**: The name of the index to scope the operation + * **`query` (string)**: EQL query you wish to run. + * **`case_sensitive` (Optional, boolean)** + * **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. + * **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order + * **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" + * **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. + * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. + * **`keep_alive` (Optional, string | -1 | 0)** + * **`keep_on_completion` (Optional, boolean)** + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)** + * **`allow_partial_search_results` (Optional, boolean)** + * **`allow_partial_sequence_results` (Optional, boolean)** + * **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 + * **`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. + * **`result_position` (Optional, Enum("tail" | "head"))** + * **`runtime_mappings` (Optional, Record)** + * **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. + * **`allow_no_indices` (Optional, boolean)** + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** + * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. + + + +## esql [_esql] + + +### async_query [_async_query] + +Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. + +The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query) + +```ts +client.esql.asyncQuery({ query }) +``` + + +### Arguments [_arguments_142] + +* **Request (object):** + + * **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. + * **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. + * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. + * **`locale` (Optional, string)** + * **`params` (Optional, number | number | string | boolean | null | User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. + * **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. + * **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. + * **`delimiter` (Optional, string)**: The character to use between values within a CSV row. It is valid only for the CSV format. + * **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + * **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. + * **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. + * **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. + + + +### async_query_delete [_async_query_delete] + +Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: + +* The authenticated user that submitted the original query request +* Users with the `cancel_task` cluster privilege + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete) + +```ts +client.esql.asyncQueryDelete({ id }) +``` + + +### Arguments [_arguments_143] + +* **Request (object):** + + * **`id` (string)**: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. + + + +### async_query_get [_async_query_get] + +Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get) + +```ts +client.esql.asyncQueryGet({ id }) +``` + + +### Arguments [_arguments_144] + +* **Request (object):** + + * **`id` (string)**: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. + * **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + * **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. + + + +### query [_query] + +Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. + +[Endpoint documentation](docs-content://explore-analyze/query-filter/languages/esql-rest.md) + +```ts +client.esql.query({ query }) +``` + + +### Arguments [_arguments_145] + +* **Request (object):** + + * **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. + * **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. + * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. + * **`locale` (Optional, string)** + * **`params` (Optional, number | number | string | boolean | null | User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. + * **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. + * **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. + * **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, e.g. json, yaml. + * **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. + * **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. + + + +## features [_features_17] + + +### get_features [_get_features] + +Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features) + +```ts +client.features.getFeatures({ ... }) +``` + + +### Arguments [_arguments_146] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### reset_features [_reset_features] + +Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. + +::::{warning} +Intended for development and testing use only. Do not reset features on a production cluster. +:::: + + +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. + +The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. + +Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. + +To list the features that will be affected, use the get features API. + +::::{important} +The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. +:::: + + +[Endpoint documentation](docs-content://deploy-manage/tools/snapshot-and-restore.md) + +```ts +client.features.resetFeatures({ ... }) +``` + + +### Arguments [_arguments_147] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +## fleet [_fleet] + + +### global_checkpoints [_global_checkpoints] + +Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet) + +```ts +client.fleet.globalCheckpoints({ index }) +``` + + +### Arguments [_arguments_148] + +* **Request (object):** + + * **`index` (string | string)**: A single index or index alias that resolves to a single index. + * **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. + * **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. + * **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. + + + +### msearch [_msearch_2] + +Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. + +```ts +client.fleet.msearch({ ... }) +``` + + +### Arguments [_arguments_149] + +* **Request (object):** + + * **`index` (Optional, string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. + * **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** + * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. + * **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. + * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. + * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. + * **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. + * **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. + * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. + * **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. + * **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. + * **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](docs-content://deploy-manage/distributed-architecture/reading-and-writing-documents.md#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. + + + +### search [_search_3] + +Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + +```ts +client.fleet.search({ index }) +``` + + +### Arguments [_arguments_150] + +* **Request (object):** + + * **`index` (string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. + * **`aggregations` (Optional, Record)** + * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** + * **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. + * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. + * **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. + * **`highlight` (Optional, { encoder, fields })** + * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. + * **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. + * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. + * **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. + * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** + * **`profile` (Optional, boolean)** + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. + * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** + * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. + * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])** + * **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. + * **`slice` (Optional, { field, id, max })** + * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** + * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. + * **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. + * **`suggest` (Optional, { text })** + * **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. + * **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. + * **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. + * **`version` (Optional, boolean)**: If true, returns document version as part of a hit. + * **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. + * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. + * **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. + * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. + * **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. + * **`allow_no_indices` (Optional, boolean)** + * **`analyzer` (Optional, string)** + * **`analyze_wildcard` (Optional, boolean)** + * **`batched_reduce_size` (Optional, number)** + * **`ccs_minimize_roundtrips` (Optional, boolean)** + * **`default_operator` (Optional, Enum("and" | "or"))** + * **`df` (Optional, string)** + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** + * **`ignore_throttled` (Optional, boolean)** + * **`ignore_unavailable` (Optional, boolean)** + * **`lenient` (Optional, boolean)** + * **`max_concurrent_shard_requests` (Optional, number)** + * **`preference` (Optional, string)** + * **`pre_filter_shard_size` (Optional, number)** + * **`request_cache` (Optional, boolean)** + * **`routing` (Optional, string)** + * **`scroll` (Optional, string | -1 | 0)** + * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))** + * **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. + * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))** + * **`suggest_size` (Optional, number)** + * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. + * **`typed_keys` (Optional, boolean)** + * **`rest_total_hits_as_int` (Optional, boolean)** + * **`_source_excludes` (Optional, string | string[])** + * **`_source_includes` (Optional, string | string[])** + * **`q` (Optional, string)** + * **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. + * **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](docs-content://deploy-manage/distributed-architecture/reading-and-writing-documents.md#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. + + + +## graph [_graph] + + +### explore [_explore] + +Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph) + +```ts +client.graph.explore({ index }) +``` + + +### Arguments [_arguments_151] + +* **Request (object):** + + * **`index` (string | string[])**: Name of the index. + * **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. + * **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. + * **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. + * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. + * **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. + + + +## ilm [_ilm] + + +### delete_lifecycle [_delete_lifecycle] + +Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle) + +```ts +client.ilm.deleteLifecycle({ policy }) +``` + + +### Arguments [_arguments_152] + +* **Request (object):** + + * **`policy` (string)**: Identifier for the policy. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### explain_lifecycle [_explain_lifecycle] + +Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream’s backing indices. + +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle) + +```ts +client.ilm.explainLifecycle({ index }) +``` + + +### Arguments [_arguments_153] + +* **Request (object):** + + * **`index` (string)**: List of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. + * **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. + * **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_lifecycle [_get_lifecycle] + +Get lifecycle policies. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle) + +```ts +client.ilm.getLifecycle({ ... }) +``` + + +### Arguments [_arguments_154] + +* **Request (object):** + + * **`policy` (Optional, string)**: Identifier for the policy. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_status [_get_status_2] + +Get the ILM status. Get the current index lifecycle management status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status) + +```ts +client.ilm.getStatus() +``` + + +### migrate_to_data_tiers [_migrate_to_data_tiers] + +Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. + +Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: + +1. Stop setting the custom hot attribute on new indices. +2. Remove custom allocation settings from existing ILM policies. +3. Replace custom allocation settings from existing indices with the corresponding tier preference. + +ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers) + +```ts +client.ilm.migrateToDataTiers({ ... }) +``` + + +### Arguments [_arguments_155] + +* **Request (object):** + + * **`legacy_template_to_delete` (Optional, string)** + * **`node_attribute` (Optional, string)** + * **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. + + + +### move_to_step [_move_to_step] + +Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. + +::::{warning} +This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. +:::: + + +You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. + +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step) + +```ts +client.ilm.moveToStep({ index, current_step, next_step }) +``` + + +### Arguments [_arguments_156] + +* **Request (object):** + + * **`index` (string)**: The name of the index whose lifecycle step is to change + * **`current_step` ({ action, name, phase })**: The step that the index is expected to be in. + * **`next_step` ({ action, name, phase })**: The step that you want to run. + + + +### put_lifecycle [_put_lifecycle] + +Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. + +::::{note} +Only the latest version of the policy is stored, you cannot revert to previous versions. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle) + +```ts +client.ilm.putLifecycle({ policy }) +``` + + +### Arguments [_arguments_157] + +* **Request (object):** + + * **`policy` (string)**: Identifier for the policy. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### remove_policy [_remove_policy] + +Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream’s backing indices. It also stops managing the indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy) + +```ts +client.ilm.removePolicy({ index }) +``` + + +### Arguments [_arguments_158] + +* **Request (object):** + + * **`index` (string)**: The name of the index to remove policy on + + + +### retry [_retry] + +Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry) + +```ts +client.ilm.retry({ index }) +``` + + +### Arguments [_arguments_159] + +* **Request (object):** + + * **`index` (string)**: The name of the indices (comma-separated) whose failed lifecycle step is to be retry + + + +### start [_start] + +Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) + +```ts +client.ilm.start({ ... }) +``` + + +### Arguments [_arguments_160] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### stop [_stop] + +Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) + +```ts +client.ilm.stop({ ... }) +``` + + +### Arguments [_arguments_161] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## indices [_indices_2] + + +### add_block [_add_block] + +Add an index block. Limits the operations allowed on an index by blocking specific operation types. + +[Index block settings](elasticsearch://docs/reference/elasticsearch/index-settings/index-block.md) + +```ts +client.indices.addBlock({ index, block }) +``` + + +### Arguments [_arguments_162] + +* **Request (object):** + + * **`index` (string)**: A comma separated list of indices to add a block to + * **`block` (Enum("metadata" | "read" | "read_only" | "write"))**: The block to add (one of read, write, read_only or metadata) + * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master + * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout + + + +### analyze [_analyze] + +Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze) + +```ts +client.indices.analyze({ ... }) +``` + + +### Arguments [_arguments_163] + +* **Request (object):** + + * **`index` (Optional, string)**: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. + * **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. + * **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. + * **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. + * **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. + * **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. + * **`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. + * **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. + * **`text` (Optional, string | string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. + * **`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. + + + +### cancel_migrate_reindex [_cancel_migrate_reindex] + +Cancel a migration reindex operation. + +Cancel a migration reindex attempt for a data stream or index. + +```ts +client.indices.cancelMigrateReindex({ index }) +``` + + +### Arguments [_arguments_164] + +* **Request (object):** + + * **`index` (string | string[])**: The index or data stream name + + + +### clear_cache [_clear_cache] + +Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. + +By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache) + +```ts +client.indices.clearCache({ ... }) +``` + + +### Arguments [_arguments_165] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. + * **`fields` (Optional, string | string[])**: List of field names used to limit the `fielddata` parameter. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`query` (Optional, boolean)**: If `true`, clears the query cache. + * **`request` (Optional, boolean)**: If `true`, clears the request cache. + + + +### clone [_clone] + +Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. + +::::{important} +Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. +:::: + + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +::::{important} +Indices can only be cloned if they meet the following requirements: +:::: + + +* The index must be marked as read-only and have a cluster health status of green. +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. + +The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. + +::::{note} +Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. +:::: + + +**Monitor the cloning process** + +The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can’t be allocated, its primary shard will remain unassigned until it can be allocated on that node. + +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. + +**Wait for active shards** + +Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone) + +```ts +client.indices.clone({ index, target }) +``` + + +### Arguments [_arguments_166] + +* **Request (object):** + + * **`index` (string)**: Name of the source index to clone. + * **`target` (string)**: Name of the target index to create. + * **`aliases` (Optional, Record)**: Aliases for the resulting index. + * **`settings` (Optional, Record)**: Configuration options for the target index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### close [_close] + +Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close) + +```ts +client.indices.close({ index }) +``` + + +### Arguments [_arguments_167] + +* **Request (object):** + + * **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### create [_create_2] + +Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: + +* Settings for the index. +* Mappings for fields in the index. +* Index aliases + +**Wait for active shards** + +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create) + +```ts +client.indices.create({ index }) +``` + + +### Arguments [_arguments_168] + +* **Request (object):** + + * **`index` (string)**: Name of the index you wish to create. + * **`aliases` (Optional, Record)**: Aliases for the index. + * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: + + * Field names + * Field data types + * Mapping parameters + + * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### create_data_stream [_create_data_stream] + +Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. + +[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) + +```ts +client.indices.createDataStream({ name }) +``` + + +### Arguments [_arguments_169] + +* **Request (object):** + + * **`name` (string)**: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### create_from [_create_from] + +Create an index from a source index. + +Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. + +```ts +client.indices.createFrom({ source, dest }) +``` + + +### Arguments [_arguments_170] + +* **Request (object):** + + * **`source` (string)**: The source index or data stream name + * **`dest` (string)**: The destination index or data stream name + * **`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })** + + + +### data_streams_stats [_data_streams_stats] + +Get data stream stats. Retrieves statistics for one or more data streams. + +[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) + +```ts +client.indices.dataStreamsStats({ ... }) +``` + + +### Arguments [_arguments_171] + +* **Request (object):** + + * **`name` (Optional, string)**: List of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. + + + +### delete [_delete_5] + +Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. + +You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete) + +```ts +client.indices.delete({ index }) +``` + + +### Arguments [_arguments_172] + +* **Request (object):** + + * **`index` (string | string[])**: List of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### delete_alias [_delete_alias] + +Delete an alias. Removes a data stream or index from an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias) + +```ts +client.indices.deleteAlias({ index, name }) +``` + + +### Arguments [_arguments_173] + +* **Request (object):** + + * **`index` (string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). + * **`name` (string | string[])**: List of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### delete_data_lifecycle [_delete_data_lifecycle] + +Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle) + +```ts +client.indices.deleteDataLifecycle({ name }) +``` + + +### Arguments [_arguments_174] + +* **Request (object):** + + * **`name` (string | string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) + * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master + * **`timeout` (Optional, string | -1 | 0)**: Explicit timestamp for the document + + + +### delete_data_stream [_delete_data_stream] + +Delete data streams. Deletes one or more data streams and their backing indices. + +[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) + +```ts +client.indices.deleteDataStream({ name }) +``` + + +### Arguments [_arguments_175] + +* **Request (object):** + + * **`name` (string | string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. + + + +### delete_index_template [_delete_index_template] + +Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template) + +```ts +client.indices.deleteIndexTemplate({ name }) +``` + + +### Arguments [_arguments_176] + +* **Request (object):** + + * **`name` (string | string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### delete_template [_delete_template] + +Delete a legacy index template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template) + +```ts +client.indices.deleteTemplate({ name }) +``` + + +### Arguments [_arguments_177] + +* **Request (object):** + + * **`name` (string)**: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### disk_usage [_disk_usage] + +Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. + +::::{note} +The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage) + +```ts +client.indices.diskUsage({ index }) +``` + + +### Arguments [_arguments_178] + +* **Request (object):** + + * **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. + * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. + * **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. + * **`run_expensive_tasks` (Optional, boolean)**: Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. + + + +### downsample [_downsample] + +Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. + +::::{note} +Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample) + +```ts +client.indices.downsample({ index, target_index }) +``` + + +### Arguments [_arguments_179] + +* **Request (object):** + + * **`index` (string)**: Name of the time series index to downsample. + * **`target_index` (string)**: Name of the index to create. + * **`config` (Optional, { fixed_interval })** + + + +### exists [_exists_2] + +Check indices. Check if one or more indices, index aliases, or data streams exist. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists) + +```ts +client.indices.exists({ index }) +``` + + +### Arguments [_arguments_180] + +* **Request (object):** + + * **`index` (string | string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + + + +### exists_alias [_exists_alias] + +Check aliases. Checks if one or more data stream or index aliases exist. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) + +```ts +client.indices.existsAlias({ name }) +``` + + +### Arguments [_arguments_181] + +* **Request (object):** + + * **`name` (string | string[])**: List of aliases to check. Supports wildcards (`*`). + * **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### exists_index_template [_exists_index_template] + +Check index templates. Check whether index templates exist. + +[Endpoint documentation](docs-content://manage-data/data-store/templates.md) + +```ts +client.indices.existsIndexTemplate({ name }) +``` + + +### Arguments [_arguments_182] + +* **Request (object):** + + * **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### exists_template [_exists_template] + +Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +::::{important} +This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template) + +```ts +client.indices.existsTemplate({ name }) +``` + + +### Arguments [_arguments_183] + +* **Request (object):** + + * **`name` (string | string[])**: A list of index template names used to limit the request. Wildcard (`*`) expressions are supported. + * **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. + * **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +### explain_data_lifecycle [_explain_data_lifecycle] + +Get the status for a data stream lifecycle. Get information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle) + +```ts +client.indices.explainDataLifecycle({ index }) +``` + + +### Arguments [_arguments_184] + +* **Request (object):** + + * **`index` (string | string[])**: The name of the index to explain + * **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index’s lifecycle + * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master + + + +### field_usage_stats [_field_usage_stats] + +Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. + +The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats) + +```ts +client.indices.fieldUsageStats({ index }) +``` + + +### Arguments [_arguments_185] + +* **Request (object):** + + * **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. + * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### flush [_flush] + +Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + +After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush) + +```ts +client.indices.flush({ ... }) +``` + + +### Arguments [_arguments_186] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. + + + +### forcemerge [_forcemerge] + +Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream’s backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +::::{warning} +We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can’t be backed up incrementally. +:::: + + +**Blocks during a force merge** + +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. + +**Running force merge asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +**Force merging multiple indices** + +You can force merge multiple indices with a single request by targeting: + +* One or more data streams that contain multiple backing indices +* Multiple indices +* One or more aliases +* All data streams and indices in a cluster + +Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + +Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. + +**Data streams and time-based indices** + +Force-merging is useful for managing a data stream’s older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: + +``` +POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +``` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge) + +```ts +client.indices.forcemerge({ ... }) +``` + + +### Arguments [_arguments_187] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices + * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) + * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + * **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) + * **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents + * **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. + + + +### get [_get_5] + +Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get) + +```ts +client.indices.get({ index }) +``` + + +### Arguments [_arguments_188] + +* **Request (object):** + + * **`index` (string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. + * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as open,hidden. + * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. + * **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. + * **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. + * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`features` (Optional, { name, description } | { name, description }[])**: Return only information on specified index features + + + +### get_alias [_get_alias] + +Get aliases. Retrieves information for one or more data stream or index aliases. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias) + +```ts +client.indices.getAlias({ ... }) +``` + + +### Arguments [_arguments_189] + +* **Request (object):** + + * **`name` (Optional, string | string[])**: List of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. + * **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_data_lifecycle [_get_data_lifecycle] + +Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle) + +```ts +client.indices.getDataLifecycle({ name }) +``` + + +### Arguments [_arguments_190] + +* **Request (object):** + + * **`name` (string | string[])**: List of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_data_lifecycle_stats [_get_data_lifecycle_stats] + +Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats) + +```ts +client.indices.getDataLifecycleStats() +``` + + +### get_data_stream [_get_data_stream] + +Get data streams. Retrieves information about one or more data streams. + +[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) + +```ts +client.indices.getDataStream({ ... }) +``` + + +### Arguments [_arguments_191] + +* **Request (object):** + + * **`name` (Optional, string | string[])**: List of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. + * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. + + + +### get_field_mapping [_get_field_mapping] + +Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. + +This API is useful if you don’t need a complete mapping or if an index mapping contains a large number of fields. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) + +```ts +client.indices.getFieldMapping({ fields }) +``` + + +### Arguments [_arguments_192] + +* **Request (object):** + + * **`fields` (string | string[])**: List or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + + + +### get_index_template [_get_index_template] + +Get index templates. Get information about one or more index templates. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template) + +```ts +client.indices.getIndexTemplate({ ... }) +``` + + +### Arguments [_arguments_193] + +* **Request (object):** + + * **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. + * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. + * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + + + +### get_mapping [_get_mapping] + +Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) + +```ts +client.indices.getMapping({ ... }) +``` + + +### Arguments [_arguments_194] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_migrate_reindex_status [_get_migrate_reindex_status] + +Get the migration reindexing status. + +Get the status of a migration reindex attempt for a data stream or index. + +```ts +client.indices.getMigrateReindexStatus({ index }) +``` + + +### Arguments [_arguments_195] + +* **Request (object):** + + * **`index` (string | string[])**: The index or data stream name. + + + +### get_settings [_get_settings_2] + +Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream’s backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings) + +```ts +client.indices.getSettings({ ... }) +``` + + +### Arguments [_arguments_196] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`name` (Optional, string | string[])**: List or wildcard expression of settings to retrieve. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. + * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_template [_get_template] + +Get index templates. Get information about one or more index templates. + +::::{important} +This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template) + +```ts +client.indices.getTemplate({ ... }) +``` + + +### Arguments [_arguments_197] + +* **Request (object):** + + * **`name` (Optional, string | string[])**: List of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. + * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. + * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### migrate_reindex [_migrate_reindex] + +Reindex legacy backing indices. + +Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. + +```ts +client.indices.migrateReindex({ ... }) +``` + + +### Arguments [_arguments_198] + +* **Request (object):** + + * **`reindex` (Optional, { mode, source })** + + + +### migrate_to_data_stream [_migrate_to_data_stream] + +Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. + +[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) + +```ts +client.indices.migrateToDataStream({ name }) +``` + + +### Arguments [_arguments_199] + +* **Request (object):** + + * **`name` (string)**: Name of the index alias to convert to a data stream. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### modify_data_stream [_modify_data_stream] + +Update data streams. Performs one or more data stream modification actions in a single atomic operation. + +[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) + +```ts +client.indices.modifyDataStream({ actions }) +``` + + +### Arguments [_arguments_200] + +* **Request (object):** + + * **`actions` ({ add_backing_index, remove_backing_index }[])**: Actions to perform. + + + +### open [_open] + +Open a closed index. For data streams, the API opens any closed backing indices. + +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. + +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open) + +```ts +client.indices.open({ index }) +``` + + +### Arguments [_arguments_201] + +* **Request (object):** + + * **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### promote_data_stream [_promote_data_stream] + +Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can’t be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +::::{note} +When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. +:::: + + +[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) + +```ts +client.indices.promoteDataStream({ name }) +``` + + +### Arguments [_arguments_202] + +* **Request (object):** + + * **`name` (string)**: The name of the data stream + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_alias [_put_alias] + +Create or update an alias. Adds a data stream or index to an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) + +```ts +client.indices.putAlias({ index, name }) +``` + + +### Arguments [_arguments_203] + +* **Request (object):** + + * **`index` (string | string[])**: List of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. + * **`name` (string)**: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. + * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query used to limit documents the alias can access. + * **`index_routing` (Optional, string)**: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. + * **`is_write_index` (Optional, boolean)**: If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. + * **`routing` (Optional, string)**: Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. + * **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_data_lifecycle [_put_data_lifecycle] + +Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle) + +```ts +client.indices.putDataLifecycle({ name }) +``` + + +### Arguments [_arguments_204] + +* **Request (object):** + + * **`name` (string | string[])**: List of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. + * **`lifecycle` (Optional, { data_retention, downsampling, enabled })** + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_index_template [_put_index_template] + +Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream’s backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. + +**Multiple matching templates** + +If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. + +Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + +**Composing aliases, mappings, and settings** + +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template) + +```ts +client.indices.putIndexTemplate({ name }) +``` + + +### Arguments [_arguments_205] + +* **Request (object):** + + * **`name` (string)**: Index or template name + * **`index_patterns` (Optional, string | string[])**: Name of the index template to create. + * **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. + * **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. + * **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. + * **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. + * **`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. + * **`_meta` (Optional, Record)**: Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. + * **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. + * **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist + * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. + * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`cause` (Optional, string)**: User defined reason for creating/updating the index template + + + +### put_mapping [_put_mapping] + +Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. + +**Add multi-fields to an existing field** + +Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. + +**Change supported mapping parameters for an existing field** + +The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. + +**Change the mapping of an existing field** + +Except for supported mapping parameters, you can’t change the mapping or field type of an existing field. Changing an existing field could invalidate data that’s already indexed. + +If you need to change the mapping of a field in a data stream’s backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. + +**Rename a field** + +Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) + +```ts +client.indices.putMapping({ index }) +``` + + +### Arguments [_arguments_206] + +* **Request (object):** + + * **`index` (string | string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. + * **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. + * **`dynamic` (Optional, Enum("strict" | "runtime" | true | false))**: Controls whether new fields are added dynamically. + * **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked against *dynamic_date_formats* and if the value matches then a new date field is added instead of string. + * **`dynamic_templates` (Optional, Record | Record[])**: Specify dynamic templates for the mapping. + * **`_field_names` (Optional, { enabled })**: Control whether field names are enabled for the index. + * **`_meta` (Optional, Record)**: A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. + * **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. + * **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: + + * Field name + * Field data type + * Mapping parameters + + * **`_routing` (Optional, { required })**: Enable making a routing value required on indexed documents. + * **`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })**: Control whether the _source field is enabled on the index. + * **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. + + + +### put_settings [_put_settings_2] + +Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + +::::{note} +You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream’s write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream’s write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream’s backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) + +```ts +client.indices.putSettings({ ... }) +``` + + +### Arguments [_arguments_207] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. + * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_template [_put_template] + +Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +::::{important} +This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +:::: + + +Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + +You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. + +**Indices matching multiple templates** + +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) + +```ts +client.indices.putTemplate({ name }) +``` + + +### Arguments [_arguments_208] + +* **Request (object):** + + * **`name` (string)**: The name of the template + * **`aliases` (Optional, Record)**: Aliases for the index. + * **`index_patterns` (Optional, string | string[])**: Array of wildcard expressions used to match the names of indices during creation. + * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. + * **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index matches multiple templates. + + +Templates with lower *order* values are merged first. Templates with higher *order* values are merged later, overriding templates with lower values. ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. *** *`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. *** *`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. *** *`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`cause` (Optional, string)** + + +### recovery [_recovery_2] + +Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. + +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. + +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. + +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery) + +```ts +client.indices.recovery({ ... }) +``` + + +### Arguments [_arguments_209] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. + * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. + + + +### refresh [_refresh] + +Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. + +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. + +Refresh requests are synchronous and do not return a response until the refresh operation completes. + +Refreshes are resource-intensive. To ensure good cluster performance, it’s recommended to wait for Elasticsearch’s periodic refresh rather than performing an explicit refresh when possible. + +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it’s recommended to use the index API’s `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) + +```ts +client.indices.refresh({ ... }) +``` + + +### Arguments [_arguments_210] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + + + +### reload_search_analyzers [_reload_search_analyzers] + +Reload search analyzers. Reload an index’s search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream’s backing indices. + +::::{important} +After reloading the search analyzers you should clear the request cache to make sure it doesn’t contain responses derived from the previous versions of the analyzer. +:::: + + +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + +::::{note} +This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster—​including nodes that don’t contain a shard replica—​before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers) + +```ts +client.indices.reloadSearchAnalyzers({ index }) +``` + + +### Arguments [_arguments_211] + +* **Request (object):** + + * **`index` (string | string[])**: A list of index names to reload analyzers for + * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + + + +### resolve_cluster [_resolve_cluster] + +Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. + +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. + +For each cluster in the index expression, information is returned about: + +* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. + +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + +**Advantages of using this endpoint before a cross-cluster search** + +You may want to exclude a cluster or index from a search when: + +* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. +* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. +* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) +* A remote cluster is an older version that does not support the feature you want to use in your search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster) + +```ts +client.indices.resolveCluster({ name }) +``` + + +### Arguments [_arguments_212] + +* **Request (object):** + + * **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. + * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. + * **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. Defaults to false. + + + +### resolve_index [_resolve_index] + +Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index) + +```ts +client.indices.resolveIndex({ name }) +``` + + +### Arguments [_arguments_213] + +* **Request (object):** + + * **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + + + +### rollover [_rollover] + +Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. + +The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. + +**Roll over a data stream** + +If you roll over a data stream, the API creates a new write index for the stream. The stream’s previous write index becomes a regular backing index. A rollover also increments the data stream’s generation. + +**Roll over an index alias with a write index** + +::::{tip} +Prior to Elasticsearch 7.9, you’d typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. +:::: + + +If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. + +**Roll over an index alias with one index** + +If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. + +::::{note} +A rollover creates a new index and is subject to the `wait_for_active_shards` setting. +:::: + + +**Increment index names for an alias** + +When you roll over an index alias, you can specify a name for the new index. If you don’t specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index’s name. + +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index’s name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index’s name is `my-index-2099.05.07-000002`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) + +```ts +client.indices.rollover({ alias }) +``` + + +### Arguments [_arguments_214] + +* **Request (object):** + + * **`alias` (string)**: Name of the data stream or index alias to roll over. + * **`new_index` (Optional, string)**: Name of the index to create. Supports date math. Data streams do not support this parameter. + * **`aliases` (Optional, Record)**: Aliases for the target index. Data streams do not support this parameter. + * **`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })**: Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. + * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. + * **`settings` (Optional, Record)**: Configuration options for the index. Data streams do not support this parameter. + * **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### segments [_segments_2] + +Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments) + +```ts +client.indices.segments({ ... }) +``` + + +### Arguments [_arguments_215] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + + + +### shard_stores [_shard_stores] + +Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. + +The index shard stores API returns the following information: + +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. + +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores) + +```ts +client.indices.shardStores({ ... }) +``` + + +### Arguments [_arguments_216] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. + * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. + * **`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])**: List of shard health statuses used to limit the request. + + + +### shrink [_shrink] + +Shrink an index. Shrink an index into a new index with fewer primary shards. + +Before you can shrink an index: + +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. + +To make shard allocation easier, we recommend you also remove the index’s replica shards. You can later re-add replica shards as part of the shrink operation. + +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + +A shrink operation: + +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + +::::{important} +Indices can only be shrunk if they satisfy the following requirements: +:::: + + +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink) + +```ts +client.indices.shrink({ index, target }) +``` + + +### Arguments [_arguments_217] + +* **Request (object):** + + * **`index` (string)**: Name of the source index to shrink. + * **`target` (string)**: Name of the target index to create. + * **`aliases` (Optional, Record)**: The key is the alias name. Index alias names support date math. + * **`settings` (Optional, Record)**: Configuration options for the target index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### simulate_index_template [_simulate_index_template] + +Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template) + +```ts +client.indices.simulateIndexTemplate({ name }) +``` + + +### Arguments [_arguments_218] + +* **Request (object):** + + * **`name` (string)**: Name of the index to simulate + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + + + +### simulate_template [_simulate_template] + +Simulate an index template. Get the index configuration that would be applied by a particular index template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template) + +```ts +client.indices.simulateTemplate({ ... }) +``` + + +### Arguments [_arguments_219] + +* **Request (object):** + + * **`name` (Optional, string)**: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. + * **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. + * **`index_patterns` (Optional, string | string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. + * **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. + * **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. + * **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. + * **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. + * **`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. + * **`_meta` (Optional, Record)**: Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. + * **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist + * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. + * **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + + + +### split [_split] + +Split an index. Split an index into a new index with more primary shards. * Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +You can do make an index read-only with the following request using the add index block API: + +``` +PUT /my_source_index/_block/write +``` + +The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. + +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + +A split operation: + +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn’t support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. + +::::{important} +Indices can only be split if they satisfy the following requirements: +:::: + + +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split) + +```ts +client.indices.split({ index, target }) +``` + + +### Arguments [_arguments_220] + +* **Request (object):** + + * **`index` (string)**: Name of the source index to split. + * **`target` (string)**: Name of the target index to create. + * **`aliases` (Optional, Record)**: Aliases for the resulting index. + * **`settings` (Optional, Record)**: Configuration options for the target index. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + + + +### stats [_stats_4] + +Get index statistics. For data streams, the API retrieves statistics for the stream’s backing indices. + +By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. + +::::{note} +When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats) + +```ts +client.indices.stats({ ... }) +``` + + +### Arguments [_arguments_221] + +* **Request (object):** + + * **`metric` (Optional, string | string[])**: Limit the information returned the specific metrics. + * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices + * **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. + * **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. + * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. + * **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. + * **`groups` (Optional, string | string[])**: List of search groups to include in the search statistics. + * **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). + * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. + * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. + + + +### update_aliases [_update_aliases] + +Create or update an alias. Adds a data stream or index to an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) + +```ts +client.indices.updateAliases({ ... }) +``` + + +### Arguments [_arguments_222] + +* **Request (object):** + + * **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### validate_query [_validate_query] + +Validate a query. Validates a query without running it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query) + +```ts +client.indices.validateQuery({ ... }) +``` + + +### Arguments [_arguments_223] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. + * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. + * **`all_shards` (Optional, boolean)**: If `true`, the validation is executed on all shards instead of one random shard per index. + * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. + * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. + * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. + * **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * **`rewrite` (Optional, boolean)**: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. + * **`q` (Optional, string)**: Query in the Lucene query string syntax. + + + +## inference [_inference] + + +### delete [_delete_6] + +Delete an inference endpoint + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete) + +```ts +client.inference.delete({ inference_id }) +``` + + +### Arguments [_arguments_224] + +* **Request (object):** + + * **`inference_id` (string)**: The inference Id + * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type + * **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned + * **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields + + + +### get [_get_6] + +Get an inference endpoint + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get) + +```ts +client.inference.get({ ... }) +``` + + +### Arguments [_arguments_225] + +* **Request (object):** + + * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type + * **`inference_id` (Optional, string)**: The inference Id + + + +### inference [_inference_2] + +Perform inference on the service + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) + +```ts +client.inference.inference({ inference_id, input }) +``` + + +### Arguments [_arguments_226] + +* **Request (object):** + + * **`inference_id` (string)**: The inference Id + * **`input` (string | string[])**: Inference input. Either a string or an array of strings. + * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type + * **`query` (Optional, string)**: Query input, required for rerank task. Not required for other tasks. + * **`task_settings` (Optional, User-defined value)**: Optional task settings + * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. + + + +### put [_put_2] + +Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +::::{important} +The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) + +```ts +client.inference.put({ inference_id }) +``` + + +### Arguments [_arguments_227] + +* **Request (object):** + + * **`inference_id` (string)**: The inference Id + * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type + * **`inference_config` (Optional, { service, service_settings, task_settings })** + + + +### stream_inference [_stream_inference] + +Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. + +::::{important} +The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +:::: + + +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference) + +```ts +client.inference.streamInference({ inference_id, input }) +``` + + +### Arguments [_arguments_228] + +* **Request (object):** + + * **`inference_id` (string)**: The unique identifier for the inference endpoint. + * **`input` (string | string[])**: The text on which you want to perform the inference task. It can be a single string or an array. + + +::::{note} +Inference endpoints for the completion task type currently only support a single string as input. *** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The type of task that the model performs. +:::: + + + +### unified_inference [_unified_inference] + +Perform inference on the service using the Unified Schema + +```ts +client.inference.unifiedInference({ inference_id, messages }) +``` + + +### Arguments [_arguments_229] + +* **Request (object):** + + * **`inference_id` (string)**: The inference Id + * **`messages` ({ content, role, tool_call_id, tool_calls }[])**: A list of objects representing the conversation. + * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type + * **`model` (Optional, string)**: The ID of the model to use. + * **`max_completion_tokens` (Optional, number)**: The upper bound limit for the number of tokens that can be generated for a completion request. + * **`stop` (Optional, string[])**: A sequence of strings to control when the model should stop generating additional tokens. + * **`temperature` (Optional, float)**: The sampling temperature to use. + * **`tool_choice` (Optional, string | { type, function })**: Controls which tool is called by the model. + * **`tools` (Optional, { type, function }[])**: A list of tools that the model can call. + * **`top_p` (Optional, float)**: Nucleus sampling, an alternative to sampling with temperature. + * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. + + + +### update [_update_2] + +Update an inference endpoint. + +Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. + +::::{important} +The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update) + +```ts +client.inference.update({ inference_id }) +``` + + +### Arguments [_arguments_230] + +* **Request (object):** + + * **`inference_id` (string)**: The unique identifier of the inference endpoint. + * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The type of inference task that the model performs. + * **`inference_config` (Optional, { service, service_settings, task_settings })** + + + +## ingest [_ingest] + + +### delete_geoip_database [_delete_geoip_database] + +Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database) + +```ts +client.ingest.deleteGeoipDatabase({ id }) +``` + + +### Arguments [_arguments_231] + +* **Request (object):** + + * **`id` (string | string[])**: A list of geoip database configurations to delete + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### delete_ip_location_database [_delete_ip_location_database] + +Delete IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database) + +```ts +client.ingest.deleteIpLocationDatabase({ id }) +``` + + +### Arguments [_arguments_232] + +* **Request (object):** + + * **`id` (string | string[])**: A list of IP location database configurations. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. + + + +### delete_pipeline [_delete_pipeline] + +Delete pipelines. Delete one or more ingest pipelines. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline) + +```ts +client.ingest.deletePipeline({ id }) +``` + + +### Arguments [_arguments_233] + +* **Request (object):** + + * **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### geo_ip_stats [_geo_ip_stats] + +Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. + +[Endpoint documentation](elasticsearch://docs/reference/ingestion-tools/enrich-processor/geoip-processor.md) + +```ts +client.ingest.geoIpStats() +``` + + +### get_geoip_database [_get_geoip_database] + +Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database) + +```ts +client.ingest.getGeoipDatabase({ ... }) +``` + + +### Arguments [_arguments_234] + +* **Request (object):** + + * **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. + + + +### get_ip_location_database [_get_ip_location_database] + +Get IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database) + +```ts +client.ingest.getIpLocationDatabase({ ... }) +``` + + +### Arguments [_arguments_235] + +* **Request (object):** + + * **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. + + + +### get_pipeline [_get_pipeline] + +Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline) + +```ts +client.ingest.getPipeline({ ... }) +``` + + +### Arguments [_arguments_236] + +* **Request (object):** + + * **`id` (Optional, string)**: List of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) + + + +### processor_grok [_processor_grok] + +Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. + +[Endpoint documentation](elasticsearch://docs/reference/ingestion-tools/enrich-processor/grok-processor.md) + +```ts +client.ingest.processorGrok() +``` + + +### put_geoip_database [_put_geoip_database] + +Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database) + +```ts +client.ingest.putGeoipDatabase({ id, name, maxmind }) +``` + + +### Arguments [_arguments_237] + +* **Request (object):** + + * **`id` (string)**: ID of the database configuration to create or update. + * **`name` (string)**: The provider-assigned name of the IP geolocation database to download. + * **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_ip_location_database [_put_ip_location_database] + +Create or update an IP geolocation database configuration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database) + +```ts +client.ingest.putIpLocationDatabase({ id }) +``` + + +### Arguments [_arguments_238] + +* **Request (object):** + + * **`id` (string)**: The database configuration identifier. + * **`configuration` (Optional, { name, maxmind, ipinfo })** + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. + + + +### put_pipeline [_put_pipeline] + +Create or update a pipeline. Changes made using this API take effect immediately. + +[Endpoint documentation](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md) + +```ts +client.ingest.putPipeline({ id }) +``` + + +### Arguments [_arguments_239] + +* **Request (object):** + + * **`id` (string)**: ID of the ingest pipeline to create or update. + * **`_meta` (Optional, Record)**: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. + * **`description` (Optional, string)**: Description of the ingest pipeline. + * **`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline’s remaining processors. + * **`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. + * **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. + * **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates + + + +### simulate [_simulate] + +Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate) + +```ts +client.ingest.simulate({ docs }) +``` + + +### Arguments [_arguments_240] + +* **Request (object):** + + * **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. + * **`id` (Optional, string)**: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. + * **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })**: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. + * **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. + + + +## license [_license] + + +### delete [_delete_7] + +Delete the license. When the license expires, your subscription level reverts to Basic. + +If the operator privileges feature is enabled, only operator users can use this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete) + +```ts +client.license.delete({ ... }) +``` + + +### Arguments [_arguments_241] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get [_get_7] + +Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. + +::::{note} +If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get) + +```ts +client.license.get({ ... }) +``` + + +### Arguments [_arguments_242] + +* **Request (object):** + + * **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. + * **`local` (Optional, boolean)**: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. + + + +### get_basic_status [_get_basic_status] + +Get the basic license status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status) + +```ts +client.license.getBasicStatus() +``` + + +### get_trial_status [_get_trial_status] + +Get the trial status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status) + +```ts +client.license.getTrialStatus() +``` + + +### post [_post_2] + +Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. + +::::{note} +If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post) + +```ts +client.license.post({ ... }) +``` + + +### Arguments [_arguments_243] + +* **Request (object):** + + * **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** + * **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. + * **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### post_start_basic [_post_start_basic] + +Start a basic license. Start an indefinite basic license, which gives access to all the basic features. + +::::{note} +In order to start a basic license, you must not currently have a basic license. +:::: + + +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. + +To check the status of your basic license, use the get basic license API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic) + +```ts +client.license.postStartBasic({ ... }) +``` + + +### Arguments [_arguments_244] + +* **Request (object):** + + * **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### post_start_trial [_post_start_trial] + +Start a trial. Start a 30-day trial, which gives access to all subscription features. + +::::{note} +You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at [https://www.elastic.co/trialextension](https://www.elastic.co/trialextension). +:::: + + +To check the status of your trial, use the get trial status API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial) + +```ts +client.license.postStartTrial({ ... }) +``` + + +### Arguments [_arguments_245] + +* **Request (object):** + + * **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) + * **`type_query_string` (Optional, string)** + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +## logstash [_logstash] + + +### delete_pipeline [_delete_pipeline_2] + +Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline) + +```ts +client.logstash.deletePipeline({ id }) +``` + + +### Arguments [_arguments_246] + +* **Request (object):** + + * **`id` (string)**: An identifier for the pipeline. + + + +### get_pipeline [_get_pipeline_2] + +Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline) + +```ts +client.logstash.getPipeline({ ... }) +``` + + +### Arguments [_arguments_247] + +* **Request (object):** + + * **`id` (Optional, string | string[])**: A list of pipeline identifiers. + + + +### put_pipeline [_put_pipeline_2] + +Create or update a Logstash pipeline. + +Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline) + +```ts +client.logstash.putPipeline({ id }) +``` + + +### Arguments [_arguments_248] + +* **Request (object):** + + * **`id` (string)**: An identifier for the pipeline. + * **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })** + + + +## migration [_migration] + + +### deprecations [_deprecations] + +Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + +::::{tip} +This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations) + +```ts +client.migration.deprecations({ ... }) +``` + + +### Arguments [_arguments_249] + +* **Request (object):** + + * **`index` (Optional, string)**: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. + + + +### get_feature_upgrade_status [_get_feature_upgrade_status] + +Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. + +::::{tip} +This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) + +```ts +client.migration.getFeatureUpgradeStatus() +``` + + +### post_feature_upgrade [_post_feature_upgrade] + +Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. + +Some functionality might be temporarily unavailable during the migration process. + +::::{tip} +The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) + +```ts +client.migration.postFeatureUpgrade() +``` + + +## ml [_ml] + + +### clear_trained_model_deployment_cache [_clear_trained_model_deployment_cache] + +Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache) + +```ts +client.ml.clearTrainedModelDeploymentCache({ model_id }) +``` + + +### Arguments [_arguments_250] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. + + + +### close_job [_close_job] + +Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job) + +```ts +client.ml.closeJob({ job_id }) +``` + + +### Arguments [_arguments_251] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. + * **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. + * **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. + * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. + + + +### delete_calendar [_delete_calendar] + +Delete a calendar. Removes all scheduled events from a calendar, then deletes it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar) + +```ts +client.ml.deleteCalendar({ calendar_id }) +``` + + +### Arguments [_arguments_252] + +* **Request (object):** + + * **`calendar_id` (string)**: A string that uniquely identifies a calendar. + + + +### delete_calendar_event [_delete_calendar_event] + +Delete events from a calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event) + +```ts +client.ml.deleteCalendarEvent({ calendar_id, event_id }) +``` + + +### Arguments [_arguments_253] + +* **Request (object):** + + * **`calendar_id` (string)**: A string that uniquely identifies a calendar. + * **`event_id` (string)**: Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. + + + +### delete_calendar_job [_delete_calendar_job] + +Delete anomaly jobs from a calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job) + +```ts +client.ml.deleteCalendarJob({ calendar_id, job_id }) +``` + + +### Arguments [_arguments_254] + +* **Request (object):** + + * **`calendar_id` (string)**: A string that uniquely identifies a calendar. + * **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. + + + +### delete_data_frame_analytics [_delete_data_frame_analytics] + +Delete a data frame analytics job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics) + +```ts +client.ml.deleteDataFrameAnalytics({ id }) +``` + + +### Arguments [_arguments_255] + +* **Request (object):** + + * **`id` (string)**: Identifier for the data frame analytics job. + * **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. + * **`timeout` (Optional, string | -1 | 0)**: The time to wait for the job to be deleted. + + + +### delete_datafeed [_delete_datafeed] + +Delete a datafeed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed) + +```ts +client.ml.deleteDatafeed({ datafeed_id }) +``` + + +### Arguments [_arguments_256] + +* **Request (object):** + + * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`force` (Optional, boolean)**: Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. + + + +### delete_expired_data [_delete_expired_data] + +Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data) + +```ts +client.ml.deleteExpiredData({ ... }) +``` + + +### Arguments [_arguments_257] + +* **Request (object):** + + * **`job_id` (Optional, string)**: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. + * **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default behavior is no throttling. + * **`timeout` (Optional, string | -1 | 0)**: How long can the underlying delete processes run until they are canceled. + + + +### delete_filter [_delete_filter] + +Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter) + +```ts +client.ml.deleteFilter({ filter_id }) +``` + + +### Arguments [_arguments_258] + +* **Request (object):** + + * **`filter_id` (string)**: A string that uniquely identifies a filter. + + + +### delete_forecast [_delete_forecast] + +Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast) + +```ts +client.ml.deleteForecast({ job_id }) +``` + + +### Arguments [_arguments_259] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`forecast_id` (Optional, string)**: A list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. + * **`allow_no_forecasts` (Optional, boolean)**: Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. + * **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. + + + +### delete_job [_delete_job] + +Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job) + +```ts +client.ml.deleteJob({ job_id }) +``` + + +### Arguments [_arguments_260] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`force` (Optional, boolean)**: Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. + * **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. + * **`wait_for_completion` (Optional, boolean)**: Specifies whether the request should return immediately or wait until the job deletion completes. + + + +### delete_model_snapshot [_delete_model_snapshot] + +Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot) + +```ts +client.ml.deleteModelSnapshot({ job_id, snapshot_id }) +``` + + +### Arguments [_arguments_261] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`snapshot_id` (string)**: Identifier for the model snapshot. + + + +### delete_trained_model [_delete_trained_model] + +Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model) + +```ts +client.ml.deleteTrainedModel({ model_id }) +``` + + +### Arguments [_arguments_262] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. + * **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### delete_trained_model_alias [_delete_trained_model_alias] + +Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias) + +```ts +client.ml.deleteTrainedModelAlias({ model_alias, model_id }) +``` + + +### Arguments [_arguments_263] + +* **Request (object):** + + * **`model_alias` (string)**: The model alias to delete. + * **`model_id` (string)**: The trained model ID to which the model alias refers. + + + +### estimate_model_memory [_estimate_model_memory] + +Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml) + +```ts +client.ml.estimateModelMemory({ ... }) +``` + + +### Arguments [_arguments_264] + +* **Request (object):** + + * **`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. + * **`max_bucket_cardinality` (Optional, Record)**: Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. + * **`overall_cardinality` (Optional, Record)**: Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. + + + +### evaluate_data_frame [_evaluate_data_frame] + +Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame) + +```ts +client.ml.evaluateDataFrame({ evaluation, index }) +``` + + +### Arguments [_arguments_265] + +* **Request (object):** + + * **`evaluation` ({ classification, outlier_detection, regression })**: Defines the type of evaluation you want to perform. + * **`index` (string)**: Defines the `index` in which the evaluation will be performed. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query clause that retrieves a subset of data from the source index. + + + +### explain_data_frame_analytics [_explain_data_frame_analytics] + +Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics) + +```ts +client.ml.explainDataFrameAnalytics({ ... }) +``` + + +### Arguments [_arguments_266] + +* **Request (object):** + + * **`id` (Optional, string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`source` (Optional, { index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. + * **`dest` (Optional, { index, results_field })**: The destination configuration, consisting of index and optionally results_field (ml by default). + * **`analysis` (Optional, { classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. + * **`description` (Optional, string)**: A description of the job. + * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. + * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + * **`analyzed_fields` (Optional, { includes, excludes })**: Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. + * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. + + + +### flush_job [_flush_job] + +Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job) + +```ts +client.ml.flushJob({ job_id }) +``` + + +### Arguments [_arguments_267] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`advance_time` (Optional, string | Unit)**: Refer to the description for the `advance_time` query parameter. + * **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. + * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. + * **`skip_time` (Optional, string | Unit)**: Refer to the description for the `skip_time` query parameter. + * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. + + + +### forecast [_forecast] + +Predict future behavior of a time series. + +Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast) + +```ts +client.ml.forecast({ job_id }) +``` + + +### Arguments [_arguments_268] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. + * **`duration` (Optional, string | -1 | 0)**: Refer to the description for the `duration` query parameter. + * **`expires_in` (Optional, string | -1 | 0)**: Refer to the description for the `expires_in` query parameter. + * **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. + + + +### get_buckets [_get_buckets] + +Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets) + +```ts +client.ml.getBuckets({ job_id }) +``` + + +### Arguments [_arguments_269] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`timestamp` (Optional, string | Unit)**: The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. + * **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. + * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. + * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. + * **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. + * **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. + * **`page` (Optional, { from, size })** + * **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. + * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. + * **`from` (Optional, number)**: Skips the specified number of buckets. + * **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. + + + +### get_calendar_events [_get_calendar_events] + +Get info about events in calendars. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events) + +```ts +client.ml.getCalendarEvents({ calendar_id }) +``` + + +### Arguments [_arguments_270] + +* **Request (object):** + + * **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. + * **`end` (Optional, string | Unit)**: Specifies to get events with timestamps earlier than this time. + * **`from` (Optional, number)**: Skips the specified number of events. + * **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. + * **`size` (Optional, number)**: Specifies the maximum number of events to obtain. + * **`start` (Optional, string | Unit)**: Specifies to get events with timestamps after this time. + + + +### get_calendars [_get_calendars] + +Get calendar configuration info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars) + +```ts +client.ml.getCalendars({ ... }) +``` + + +### Arguments [_arguments_271] + +* **Request (object):** + + * **`calendar_id` (Optional, string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. + * **`page` (Optional, { from, size })**: This object is supported only when you omit the calendar identifier. + * **`from` (Optional, number)**: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. + * **`size` (Optional, number)**: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. + + + +### get_categories [_get_categories] + +Get anomaly detection job results for categories. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories) + +```ts +client.ml.getCategories({ job_id }) +``` + + +### Arguments [_arguments_272] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`category_id` (Optional, string)**: Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. + * **`page` (Optional, { from, size })**: Configures pagination. This parameter has the `from` and `size` properties. + * **`from` (Optional, number)**: Skips the specified number of categories. + * **`partition_field_value` (Optional, string)**: Only return categories for the specified partition. + * **`size` (Optional, number)**: Specifies the maximum number of categories to obtain. + + + +### get_data_frame_analytics [_get_data_frame_analytics] + +Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a list of data frame analytics jobs or a wildcard expression. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics) + +```ts +client.ml.getDataFrameAnalytics({ ... }) +``` + + +### Arguments [_arguments_273] + +* **Request (object):** + + * **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no data frame analytics jobs that match. + 2. Contains the `_all` string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. *** *`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. ** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. + + +### get_data_frame_analytics_stats [_get_data_frame_analytics_stats] + +Get data frame analytics jobs usage info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats) + +```ts +client.ml.getDataFrameAnalyticsStats({ ... }) +``` + + +### Arguments [_arguments_274] + +* **Request (object):** + + * **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no data frame analytics jobs that match. + 2. Contains the `_all` string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. *** *`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. ** *`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. + + +### get_datafeed_stats [_get_datafeed_stats] + +Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats) + +```ts +client.ml.getDatafeedStats({ ... }) +``` + + +### Arguments [_arguments_275] + +* **Request (object):** + + * **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no datafeeds that match. + 2. Contains the `_all` string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. + + +### get_datafeeds [_get_datafeeds] + +Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds) + +```ts +client.ml.getDatafeeds({ ... }) +``` + + +### Arguments [_arguments_276] + +* **Request (object):** + + * **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no datafeeds that match. + 2. Contains the `_all` string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. + + +### get_filters [_get_filters] + +Get filters. You can get a single filter or all filters. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters) + +```ts +client.ml.getFilters({ ... }) +``` + + +### Arguments [_arguments_277] + +* **Request (object):** + + * **`filter_id` (Optional, string | string[])**: A string that uniquely identifies a filter. + * **`from` (Optional, number)**: Skips the specified number of filters. + * **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. + + + +### get_influencers [_get_influencers] + +Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers) + +```ts +client.ml.getInfluencers({ job_id }) +``` + + +### Arguments [_arguments_278] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`page` (Optional, { from, size })**: Configures pagination. This parameter has the `from` and `size` properties. + * **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. + * **`end` (Optional, string | Unit)**: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. + * **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results are included. + * **`influencer_score` (Optional, number)**: Returns influencers with anomaly scores greater than or equal to this value. + * **`from` (Optional, number)**: Skips the specified number of influencers. + * **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. + * **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. + * **`start` (Optional, string | Unit)**: Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. + + + +### get_job_stats [_get_job_stats] + +Get anomaly detection jobs usage info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats) + +```ts +client.ml.getJobStats({ ... }) +``` + + +### Arguments [_arguments_279] + +* **Request (object):** + + * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, a list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no jobs that match. + 2. Contains the _all string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. + + +### get_jobs [_get_jobs] + +Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs) + +```ts +client.ml.getJobs({ ... }) +``` + + +### Arguments [_arguments_280] + +* **Request (object):** + + * **`job_id` (Optional, string | string[])**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no jobs that match. + 2. Contains the _all string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. + + +### get_memory_stats [_get_memory_stats] + +Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats) + +```ts +client.ml.getMemoryStats({ ... }) +``` + + +### Arguments [_arguments_281] + +* **Request (object):** + + * **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_model_snapshot_upgrade_stats [_get_model_snapshot_upgrade_stats] + +Get anomaly detection job model snapshot upgrade usage info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats) + +```ts +client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) +``` + + +### Arguments [_arguments_282] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + * Contains wildcard expressions and there are no jobs that match. + * Contains the _all string or no identifiers and there are no matches. + * Contains wildcard expressions and there are only partial matches. + + +The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. + + +### get_model_snapshots [_get_model_snapshots] + +Get model snapshots info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots) + +```ts +client.ml.getModelSnapshots({ job_id }) +``` + + +### Arguments [_arguments_283] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`snapshot_id` (Optional, string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. + * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. + * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. + * **`page` (Optional, { from, size })** + * **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. + * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. + * **`from` (Optional, number)**: Skips the specified number of snapshots. + * **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. + + + +### get_overall_buckets [_get_overall_buckets] + +Get overall bucket results. + +Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. + +The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets) + +```ts +client.ml.getOverallBuckets({ job_id }) +``` + + +### Arguments [_arguments_284] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, a list of jobs or groups, or a wildcard expression. + + +You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. ** *`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. *** *`bucket_span` (Optional, string | -1 | 0)**: Refer to the description for the `bucket_span` query parameter. *** *`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. *** *`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. *** *`overall_score` (Optional, number | string)**: Refer to the description for the `overall_score` query parameter. *** *`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. ** *`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. + + +### get_records [_get_records] + +Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records) + +```ts +client.ml.getRecords({ job_id }) +``` + + +### Arguments [_arguments_285] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. + * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. + * **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. + * **`page` (Optional, { from, size })** + * **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. + * **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. + * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. + * **`from` (Optional, number)**: Skips the specified number of records. + * **`size` (Optional, number)**: Specifies the maximum number of records to obtain. + + + +### get_trained_models [_get_trained_models] + +Get trained model configuration info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models) + +```ts +client.ml.getTrainedModels({ ... }) +``` + + +### Arguments [_arguments_286] + +* **Request (object):** + + * **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. + + +You can get information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. *** *`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +* Contains wildcard expressions and there are no models that match. +* Contains the _all string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. ** *`decompress_definition` (Optional, boolean)**: Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. *** *`from` (Optional, number)**: Skips the specified number of models. *** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))**: A comma delimited string of optional fields to include in the response body. *** *`include_model_definition` (Optional, boolean)**: parameter is deprecated! Use [include=definition] instead *** *`size` (Optional, number)**: Specifies the maximum number of models to obtain. ** *`tags` (Optional, string | string[])**: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. + + +### get_trained_models_stats [_get_trained_models_stats] + +Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats) + +```ts +client.ml.getTrainedModelsStats({ ... }) +``` + + +### Arguments [_arguments_287] + +* **Request (object):** + + * **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. It can be a list or a wildcard expression. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + * Contains wildcard expressions and there are no models that match. + * Contains the _all string or no identifiers and there are no matches. + * Contains wildcard expressions and there are only partial matches. + + +If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. ** *`from` (Optional, number)**: Skips the specified number of models. ** *`size` (Optional, number)**: Specifies the maximum number of models to obtain. + + +### infer_trained_model [_infer_trained_model] + +Evaluate a trained model. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model) + +```ts +client.ml.inferTrainedModel({ model_id, docs }) +``` + + +### Arguments [_arguments_288] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. + * **`docs` (Record[])**: An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. + * **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call + * **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait for inference results. + + + +### info [_info_3] + +Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info) + +```ts +client.ml.info() +``` + + +### open_job [_open_job] + +Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job) + +```ts +client.ml.openJob({ job_id }) +``` + + +### Arguments [_arguments_289] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. + + + +### post_calendar_events [_post_calendar_events] + +Add scheduled events to the calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events) + +```ts +client.ml.postCalendarEvents({ calendar_id, events }) +``` + + +### Arguments [_arguments_290] + +* **Request (object):** + + * **`calendar_id` (string)**: A string that uniquely identifies a calendar. + * **`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])**: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. + + + +### post_data [_post_data] + +Send data to an anomaly detection job for analysis. + +::::{important} +For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a list. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data) + +```ts +client.ml.postData({ job_id }) +``` + + +### Arguments [_arguments_291] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. + * **`data` (Optional, TData[])** + * **`reset_end` (Optional, string | Unit)**: Specifies the end of the bucket resetting range. + * **`reset_start` (Optional, string | Unit)**: Specifies the start of the bucket resetting range. + + + +### preview_data_frame_analytics [_preview_data_frame_analytics] + +Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics) + +```ts +client.ml.previewDataFrameAnalytics({ ... }) +``` + + +### Arguments [_arguments_292] + +* **Request (object):** + + * **`id` (Optional, string)**: Identifier for the data frame analytics job. + * **`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })**: A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. + + + +### preview_datafeed [_preview_datafeed] + +Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed) + +```ts +client.ml.previewDatafeed({ ... }) +``` + + +### Arguments [_arguments_293] + +* **Request (object):** + + * **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. + * **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: The datafeed definition to preview. + * **`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })**: The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. + * **`start` (Optional, string | Unit)**: The start time from where the datafeed preview should begin + * **`end` (Optional, string | Unit)**: The end time when the datafeed preview should stop + + + +### put_calendar [_put_calendar] + +Create a calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar) + +```ts +client.ml.putCalendar({ calendar_id }) +``` + + +### Arguments [_arguments_294] + +* **Request (object):** + + * **`calendar_id` (string)**: A string that uniquely identifies a calendar. + * **`job_ids` (Optional, string[])**: An array of anomaly detection job identifiers. + * **`description` (Optional, string)**: A description of the calendar. + + + +### put_calendar_job [_put_calendar_job] + +Add anomaly detection job to calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) + +```ts +client.ml.putCalendarJob({ calendar_id, job_id }) +``` + + +### Arguments [_arguments_295] + +* **Request (object):** + + * **`calendar_id` (string)**: A string that uniquely identifies a calendar. + * **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. + + + +### put_data_frame_analytics [_put_data_frame_analytics] + +Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. + +If the destination index does not exist, it is created automatically when you start the job. + +If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics) + +```ts +client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) +``` + + +### Arguments [_arguments_296] + +* **Request (object):** + + * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`analysis` ({ classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. + * **`dest` ({ index, results_field })**: The destination configuration. + * **`source` ({ index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. + * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. + * **`analyzed_fields` (Optional, { includes, excludes })**: Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. + * **`description` (Optional, string)**: A description of the job. + * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + * **`_meta` (Optional, Record)** + * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. + * **`headers` (Optional, Record)** + * **`version` (Optional, string)** + + + +### put_datafeed [_put_datafeed] + +Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed) + +```ts +client.ml.putDatafeed({ datafeed_id }) +``` + + +### Arguments [_arguments_297] + +* **Request (object):** + + * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + * **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. + * **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. + * **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. + * **`indices` (Optional, string | string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. + * **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search + * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. + * **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. + * **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. + * **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. + * **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. + * **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. + * **`headers` (Optional, Record)** + * **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. + * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. + * **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. + + + +### put_filter [_put_filter] + +Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter) + +```ts +client.ml.putFilter({ filter_id }) +``` + + +### Arguments [_arguments_298] + +* **Request (object):** + + * **`filter_id` (string)**: A string that uniquely identifies a filter. + * **`description` (Optional, string)**: A description of the filter. + * **`items` (Optional, string[])**: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. + + + +### put_job [_put_job] + +Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job) + +```ts +client.ml.putJob({ job_id, analysis_config, data_description }) +``` + + +### Arguments [_arguments_299] + +* **Request (object):** + + * **`job_id` (string)**: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. + * **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. + * **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + * **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. + * **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. + * **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. + * **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. + * **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. + * **`description` (Optional, string)**: A description of the job. + * **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. + * **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })**: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. + * **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. + * **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. + * **`results_index_name` (Optional, string)**: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. + * **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. + * **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. + + * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. + + + +### put_trained_model [_put_trained_model] + +Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model) + +```ts +client.ml.putTrainedModel({ model_id }) +``` + + +### Arguments [_arguments_300] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. + * **`compressed_definition` (Optional, string)**: The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. + * **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. + * **`description` (Optional, string)**: A human-readable description of the inference trained model. + * **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model’s target_type. For pre-packaged models such as ELSER the config is not required. + * **`input` (Optional, { field_names })**: The input field names for the model definition. + * **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. + * **`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))**: The model type. + * **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. + * **`platform_architecture` (Optional, string)**: The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. + * **`tags` (Optional, string[])**: An array of tags to organize the model. + * **`prefix_strings` (Optional, { ingest, search })**: Optional prefix strings applied at inference + * **`defer_definition_decompression` (Optional, boolean)**: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. + * **`wait_for_completion` (Optional, boolean)**: Whether to wait for all child operations (e.g. model download) to complete. + + + +### put_trained_model_alias [_put_trained_model_alias] + +Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias) + +```ts +client.ml.putTrainedModelAlias({ model_alias, model_id }) +``` + + +### Arguments [_arguments_301] + +* **Request (object):** + + * **`model_alias` (string)**: The alias to create or update. This value cannot end in numbers. + * **`model_id` (string)**: The identifier for the trained model that the alias refers to. + * **`reassign` (Optional, boolean)**: Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. + + + +### put_trained_model_definition_part [_put_trained_model_definition_part] + +Create part of a trained model definition. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part) + +```ts +client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) +``` + + +### Arguments [_arguments_302] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. + * **`part` (number)**: The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. + * **`definition` (string)**: The definition part for the model. Must be a base64 encoded string. + * **`total_definition_length` (number)**: The total uncompressed definition length in bytes. Not base64 encoded. + * **`total_parts` (number)**: The total number of parts that will be uploaded. Must be greater than 0. + + + +### put_trained_model_vocabulary [_put_trained_model_vocabulary] + +Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary) + +```ts +client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) +``` + + +### Arguments [_arguments_303] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. + * **`vocabulary` (string[])**: The model vocabulary, which must not be empty. + * **`merges` (Optional, string[])**: The optional model merges if required by the tokenizer. + * **`scores` (Optional, number[])**: The optional vocabulary value scores if required by the tokenizer. + + + +### reset_job [_reset_job] + +Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job) + +```ts +client.ml.resetJob({ job_id }) +``` + + +### Arguments [_arguments_304] + +* **Request (object):** + + * **`job_id` (string)**: The ID of the job to reset. + * **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before returning. + * **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. + + + +### revert_model_snapshot [_revert_model_snapshot] + +Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot) + +```ts +client.ml.revertModelSnapshot({ job_id, snapshot_id }) +``` + + +### Arguments [_arguments_305] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`snapshot_id` (string)**: You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. + * **`delete_intervening_results` (Optional, boolean)**: Refer to the description for the `delete_intervening_results` query parameter. + + + +### set_upgrade_mode [_set_upgrade_mode] + +Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode) + +```ts +client.ml.setUpgradeMode({ ... }) +``` + + +### Arguments [_arguments_306] + +* **Request (object):** + + * **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. + * **`timeout` (Optional, string | -1 | 0)**: The time to wait for the request to be completed. + + + +### start_data_frame_analytics [_start_data_frame_analytics] + +Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics) + +```ts +client.ml.startDataFrameAnalytics({ id }) +``` + + +### Arguments [_arguments_307] + +* **Request (object):** + + * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job starts. + + + +### start_datafeed [_start_datafeed] + +Start datafeeds. + +A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. + +Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. + +If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed) + +```ts +client.ml.startDatafeed({ datafeed_id }) +``` + + +### Arguments [_arguments_308] + +* **Request (object):** + + * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. + * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. + * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. + + + +### start_trained_model_deployment [_start_trained_model_deployment] + +Start a trained model deployment. It allocates the model to every machine learning node. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment) + +```ts +client.ml.startTrainedModelDeployment({ model_id }) +``` + + +### Arguments [_arguments_309] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. + * **`cache_size` (Optional, number | string)**: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. + * **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. + * **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. + * **`priority` (Optional, Enum("normal" | "low"))**: The deployment priority. + * **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. + * **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. + * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the model to deploy. + * **`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))**: Specifies the allocation status to wait for before returning. + + + +### stop_data_frame_analytics [_stop_data_frame_analytics] + +Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics) + +```ts +client.ml.stopDataFrameAnalytics({ id }) +``` + + +### Arguments [_arguments_310] + +* **Request (object):** + + * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no data frame analytics jobs that match. + 2. Contains the _all string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. ** *`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. + + +### stop_datafeed [_stop_datafeed] + +Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed) + +```ts +client.ml.stopDatafeed({ datafeed_id }) +``` + + +### Arguments [_arguments_311] + +* **Request (object):** + + * **`datafeed_id` (string)**: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. + * **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. + * **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. + * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. + + + +### stop_trained_model_deployment [_stop_trained_model_deployment] + +Stop a trained model deployment. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment) + +```ts +client.ml.stopTrainedModelDeployment({ model_id }) +``` + + +### Arguments [_arguments_312] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. + * **`force` (Optional, boolean)**: Forcefully stops the deployment, even if it is used by ingest pipelines. You can’t use these pipelines until you restart the model deployment. + + + +### update_data_frame_analytics [_update_data_frame_analytics] + +Update a data frame analytics job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics) + +```ts +client.ml.updateDataFrameAnalytics({ id }) +``` + + +### Arguments [_arguments_313] + +* **Request (object):** + + * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`description` (Optional, string)**: A description of the job. + * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. + * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. + + + +### update_datafeed [_update_datafeed] + +Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed) + +```ts +client.ml.updateDatafeed({ datafeed_id }) +``` + + +### Arguments [_arguments_314] + +* **Request (object):** + + * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. + * **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + * **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. + * **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. + * **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. + * **`indices` (Optional, string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. + * **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search. + * **`job_id` (Optional, string)** + * **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. + * **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. + * **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. + * **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. + * **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. + * **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. + + * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. + * **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. + + + +### update_filter [_update_filter] + +Update a filter. Updates the description of a filter, adds items, or removes items from the list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter) + +```ts +client.ml.updateFilter({ filter_id }) +``` + + +### Arguments [_arguments_315] + +* **Request (object):** + + * **`filter_id` (string)**: A string that uniquely identifies a filter. + * **`add_items` (Optional, string[])**: The items to add to the filter. + * **`description` (Optional, string)**: A description for the filter. + * **`remove_items` (Optional, string[])**: The items to remove from the filter. + + + +### update_job [_update_job] + +Update an anomaly detection job. Updates certain properties of an anomaly detection job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job) + +```ts +client.ml.updateJob({ job_id }) +``` + + +### Arguments [_arguments_316] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the job. + * **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + * **`analysis_limits` (Optional, { model_memory_limit })** + * **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. + * **`custom_settings` (Optional, Record)**: Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. + * **`categorization_filters` (Optional, string[])** + * **`description` (Optional, string)**: A description of the job. + * **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** + * **`model_prune_window` (Optional, string | -1 | 0)** + * **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. + * **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. + * **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. + * **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. + * **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. + * **`detectors` (Optional, { detector_index, description, custom_rules }[])**: An array of detector update objects. + * **`per_partition_categorization` (Optional, { enabled, stop_on_warn })**: Settings related to how categorization interacts with partition fields. + + + +### update_model_snapshot [_update_model_snapshot] + +Update a snapshot. Updates certain properties of a snapshot. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot) + +```ts +client.ml.updateModelSnapshot({ job_id, snapshot_id }) +``` + + +### Arguments [_arguments_317] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`snapshot_id` (string)**: Identifier for the model snapshot. + * **`description` (Optional, string)**: A description of the model snapshot. + * **`retain` (Optional, boolean)**: If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. + + + +### update_trained_model_deployment [_update_trained_model_deployment] + +Update a trained model deployment. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment) + +```ts +client.ml.updateTrainedModelDeployment({ model_id }) +``` + + +### Arguments [_arguments_318] + +* **Request (object):** + + * **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. + * **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. + + + +### upgrade_job_snapshot [_upgrade_job_snapshot] + +Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot) + +```ts +client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) +``` + + +### Arguments [_arguments_319] + +* **Request (object):** + + * **`job_id` (string)**: Identifier for the anomaly detection job. + * **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. + * **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. + * **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the request to complete. + + + +## nodes [_nodes_2] + + +### clear_repositories_metering_archive [_clear_repositories_metering_archive] + +Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive) + +```ts +client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) +``` + + +### Arguments [_arguments_320] + +* **Request (object):** + + * **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. + * **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. + + + +### get_repositories_metering_info [_get_repositories_metering_info] + +Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info) + +```ts +client.nodes.getRepositoriesMeteringInfo({ node_id }) +``` + + +### Arguments [_arguments_321] + +* **Request (object):** + + * **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster). + + + +### hot_threads [_hot_threads] + +Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads) + +```ts +client.nodes.hotThreads({ ... }) +``` + + +### Arguments [_arguments_322] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. + * **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. + * **`interval` (Optional, string | -1 | 0)**: The interval to do the second sampling of threads. + * **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. + * **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The type to sample. + * **`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The sort order for *cpu* type (default: total) + + + +### info [_info_4] + +Get node information. By default, the API returns all attributes and core settings for cluster nodes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info) + +```ts +client.nodes.info({ ... }) +``` + + +### Arguments [_arguments_323] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. + * **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. + * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### reload_secure_settings [_reload_secure_settings] + +Reload the keystore on nodes in the cluster. + +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. + +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. + +[Endpoint documentation](docs-content://deploy-manage/security/secure-settings.md) + +```ts +client.nodes.reloadSecureSettings({ ... }) +``` + + +### Arguments [_arguments_324] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: The names of particular nodes in the cluster to target. + * **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### stats [_stats_5] + +Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats) + +```ts +client.nodes.stats({ ... }) +``` + + +### Arguments [_arguments_325] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. + * **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics + * **`index_metric` (Optional, string | string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. + * **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. + * **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. + * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. + * **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. + * **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). + * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`types` (Optional, string[])**: A list of document types for the indexing index metric. + * **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. + + + +### usage [_usage] + +Get feature usage information. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage) + +```ts +client.nodes.usage({ ... }) +``` + + +### Arguments [_arguments_326] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you’re connecting to, leave empty to get information from all nodes + * **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. A list of the following options: `_all`, `rest_actions`. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## query_rules [_query_rules] + + +### delete_rule [_delete_rule] + +Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule) + +```ts +client.queryRules.deleteRule({ ruleset_id, rule_id }) +``` + + +### Arguments [_arguments_327] + +* **Request (object):** + + * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to delete + * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to delete + + + +### delete_ruleset [_delete_ruleset] + +Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset) + +```ts +client.queryRules.deleteRuleset({ ruleset_id }) +``` + + +### Arguments [_arguments_328] + +* **Request (object):** + + * **`ruleset_id` (string)**: The unique identifier of the query ruleset to delete + + + +### get_rule [_get_rule] + +Get a query rule. Get details about a query rule within a query ruleset. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule) + +```ts +client.queryRules.getRule({ ruleset_id, rule_id }) +``` + + +### Arguments [_arguments_329] + +* **Request (object):** + + * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to retrieve + * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to retrieve + + + +### get_ruleset [_get_ruleset] + +Get a query ruleset. Get details about a query ruleset. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset) + +```ts +client.queryRules.getRuleset({ ruleset_id }) +``` + + +### Arguments [_arguments_330] + +* **Request (object):** + + * **`ruleset_id` (string)**: The unique identifier of the query ruleset + + + +### list_rulesets [_list_rulesets] + +Get all query rulesets. Get summarized information about the query rulesets. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets) + +```ts +client.queryRules.listRulesets({ ... }) +``` + + +### Arguments [_arguments_331] + +* **Request (object):** + + * **`from` (Optional, number)**: The offset from the first result to fetch. + * **`size` (Optional, number)**: The maximum number of results to retrieve. + + + +### put_rule [_put_rule] + +Create or update a query rule. Create or update a query rule within a query ruleset. + +::::{important} +Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule) + +```ts +client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) +``` + + +### Arguments [_arguments_332] + +* **Request (object):** + + * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. + * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. + * **`type` (Enum("pinned" | "exclude"))**: The type of rule. + * **`criteria` ({ type, metadata, values } | { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. + * **`actions` ({ ids, docs })**: The actions to take when the rule is matched. The format of this action depends on the rule type. + * **`priority` (Optional, number)** + + + +### put_ruleset [_put_ruleset] + +Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + +::::{important} +Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset) + +```ts +client.queryRules.putRuleset({ ruleset_id, rules }) +``` + + +### Arguments [_arguments_333] + +* **Request (object):** + + * **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. + * **`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])** + + + +### test [_test] + +Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test) + +```ts +client.queryRules.test({ ruleset_id, match_criteria }) +``` + + +### Arguments [_arguments_334] + +* **Request (object):** + + * **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated + * **`match_criteria` (Record)**: The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. + + + +## rollup [_rollup] + + +### delete_job [_delete_job_2] + +Delete a rollup job. + +A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. + +::::{important} +When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job’s identifier in the rollup index. For example: +:::: + + +``` +POST my_rollup_index/_delete_by_query +{ + "query": { + "term": { + "_rollup.id": "the_rollup_job_id" + } + } +} +``` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job) + +```ts +client.rollup.deleteJob({ id }) +``` + + +### Arguments [_arguments_335] + +* **Request (object):** + + * **`id` (string)**: Identifier for the job. + + + +### get_jobs [_get_jobs_2] + +Get rollup job information. Get the configuration, stats, and status of rollup jobs. + +::::{note} +This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs) + +```ts +client.rollup.getJobs({ ... }) +``` + + +### Arguments [_arguments_336] + +* **Request (object):** + + * **`id` (Optional, string)**: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. + + + +### get_rollup_caps [_get_rollup_caps] + +Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: + +1. Does this index have associated rollup data somewhere in the cluster? +2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps) + +```ts +client.rollup.getRollupCaps({ ... }) +``` + + +### Arguments [_arguments_337] + +* **Request (object):** + + * **`id` (Optional, string)**: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. + + + +### get_rollup_index_caps [_get_rollup_index_caps] + +Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: + +* What jobs are stored in an index (or indices specified via a pattern)? +* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps) + +```ts +client.rollup.getRollupIndexCaps({ index }) +``` + + +### Arguments [_arguments_338] + +* **Request (object):** + + * **`index` (string | string[])**: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. + + + +### put_job [_put_job_2] + +Create a rollup job. + +::::{warning} +From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. +:::: + + +The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. + +There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. + +Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job) + +```ts +client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) +``` + + +### Arguments [_arguments_339] + +* **Request (object):** + + * **`id` (string)**: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. + * **`cron` (string)**: A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. + * **`groups` ({ date_histogram, histogram, terms })**: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. + * **`index_pattern` (string)**: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. + * **`page_size` (number)**: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. + * **`rollup_index` (string)**: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. + * **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. + * **`timeout` (Optional, string | -1 | 0)**: Time to wait for the request to complete. + * **`headers` (Optional, Record)** + + + +### rollup_search [_rollup_search] + +Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + +The request body supports a subset of features from the regular search API. The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. + +**Searching both historical rollup and non-rollup data** + +The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: + +``` +GET sensor-1,sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +``` + +The rollup search endpoint does two things when the search runs: + +* The original request is sent to the non-rollup index unaltered. +* A rewritten version of the original request is sent to the rollup index. + +When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search) + +```ts +client.rollup.rollupSearch({ index }) +``` + + +### Arguments [_arguments_340] + +* **Request (object):** + + * **`index` (string | string[])**: A list of data streams and indices used to limit the request. This parameter has the following rules: + +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream’s backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* Multiple non-rollup indices may be specified. +* Only one rollup index may be specified. If more than one are supplied, an exception occurs. +* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. + + * **`aggregations` (Optional, Record)**: Specifies aggregations. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. + * **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. + * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response + * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response + + + +### start_job [_start_job] + +Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job) + +```ts +client.rollup.startJob({ id }) +``` + + +### Arguments [_arguments_341] + +* **Request (object):** + + * **`id` (string)**: Identifier for the rollup job. + + + +### stop_job [_stop_job] + +Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. + +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: + +``` +POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +``` + +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job) + +```ts +client.rollup.stopJob({ id }) +``` + + +### Arguments [_arguments_342] + +* **Request (object):** + + * **`id` (string)**: Identifier for the rollup job. + * **`timeout` (Optional, string | -1 | 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. The timeout simply means the API call itself timed out while waiting for the status change. + * **`wait_for_completion` (Optional, boolean)**: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. + + + +## search_application [_search_application] + + +### delete [_delete_8] + +Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete) + +```ts +client.searchApplication.delete({ name }) +``` + + +### Arguments [_arguments_343] + +* **Request (object):** + + * **`name` (string)**: The name of the search application to delete + + + +### delete_behavioral_analytics [_delete_behavioral_analytics] + +Delete a behavioral analytics collection. The associated data stream is also deleted. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics) + +```ts +client.searchApplication.deleteBehavioralAnalytics({ name }) +``` + + +### Arguments [_arguments_344] + +* **Request (object):** + + * **`name` (string)**: The name of the analytics collection to be deleted + + + +### get [_get_8] + +Get search application details. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get) + +```ts +client.searchApplication.get({ name }) +``` + + +### Arguments [_arguments_345] + +* **Request (object):** + + * **`name` (string)**: The name of the search application + + + +### get_behavioral_analytics [_get_behavioral_analytics] + +Get behavioral analytics collections. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics) + +```ts +client.searchApplication.getBehavioralAnalytics({ ... }) +``` + + +### Arguments [_arguments_346] + +* **Request (object):** + + * **`name` (Optional, string[])**: A list of analytics collections to limit the returned information + + + +### list [_list_2] + +Get search applications. Get information about search applications. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-list) + +```ts +client.searchApplication.list({ ... }) +``` + + +### Arguments [_arguments_347] + +* **Request (object):** + + * **`q` (Optional, string)**: Query in the Lucene query string syntax. + * **`from` (Optional, number)**: Starting offset. + * **`size` (Optional, number)**: Specifies a max number of results to get. + + + +### post_behavioral_analytics_event [_post_behavioral_analytics_event] + +Create a behavioral analytics collection event. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event) + +```ts +client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) +``` + + +### Arguments [_arguments_348] + +* **Request (object):** + + * **`collection_name` (string)**: The name of the behavioral analytics collection. + * **`event_type` (Enum("page_view" | "search" | "search_click"))**: The analytics event type. + * **`payload` (Optional, User-defined value)** + * **`debug` (Optional, boolean)**: Whether the response type has to include more details + + + +### put [_put_3] + +Create or update a search application. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put) + +```ts +client.searchApplication.put({ name }) +``` + + +### Arguments [_arguments_349] + +* **Request (object):** + + * **`name` (string)**: The name of the search application to be created or updated. + * **`search_application` (Optional, { indices, analytics_collection_name, template })** + * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing Search Applications. + + + +### put_behavioral_analytics [_put_behavioral_analytics] + +Create a behavioral analytics collection. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics) + +```ts +client.searchApplication.putBehavioralAnalytics({ name }) +``` + + +### Arguments [_arguments_350] + +* **Request (object):** + + * **`name` (string)**: The name of the analytics collection to be created or updated. + + + +### render_query [_render_query] + +Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter’s default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. + +You must have `read` privileges on the backing alias of the search application. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query) + +```ts +client.searchApplication.renderQuery({ name }) +``` + + +### Arguments [_arguments_351] + +* **Request (object):** + + * **`name` (string)**: The name of the search application to render teh query for. + * **`params` (Optional, Record)** + + + +### search [_search_4] + +Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search) + +```ts +client.searchApplication.search({ name }) +``` + + +### Arguments [_arguments_352] + +* **Request (object):** + + * **`name` (string)**: The name of the search application to be searched. + * **`params` (Optional, Record)**: Query parameters specific to this request, which will override any defaults specified in the template. + * **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. + + + +## searchable_snapshots [_searchable_snapshots] + + +### cache_stats [_cache_stats] + +Get cache statistics. Get statistics about the shared cache for partially mounted indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats) + +```ts +client.searchableSnapshots.cacheStats({ ... }) +``` + + +### Arguments [_arguments_353] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: The names of the nodes in the cluster to target. + * **`master_timeout` (Optional, string | -1 | 0)** + + + +### clear_cache [_clear_cache_2] + +Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache) + +```ts +client.searchableSnapshots.clearCache({ ... }) +``` + + +### Arguments [_arguments_354] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. + * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) + * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + + + +### mount [_mount] + +Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount) + +```ts +client.searchableSnapshots.mount({ repository, snapshot, index }) +``` + + +### Arguments [_arguments_355] + +* **Request (object):** + + * **`repository` (string)**: The name of the repository containing the snapshot of the index to mount. + * **`snapshot` (string)**: The name of the snapshot of the index to mount. + * **`index` (string)**: The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. + * **`renamed_index` (Optional, string)**: The name of the index that will be created. + * **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. + * **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. + * **`storage` (Optional, string)**: The mount option for the searchable snapshot index. + + + +### stats [_stats_6] + +Get searchable snapshot statistics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats) + +```ts +client.searchableSnapshots.stats({ ... }) +``` + + +### Arguments [_arguments_356] + +* **Request (object):** + + * **`index` (Optional, string | string[])**: A list of data streams and indices to retrieve statistics for. + * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Return stats aggregated at cluster, index or shard level + + + +## security [_security] + + +### activate_user_profile [_activate_user_profile] + +Activate a user profile. + +Create or update a user profile on behalf of another user. + +::::{note} +The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. +:::: + + +This API creates or updates a profile document for end users with information that is extracted from the user’s authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user’s `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. + +When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile) + +```ts +client.security.activateUserProfile({ grant_type }) +``` + + +### Arguments [_arguments_357] + +* **Request (object):** + + * **`grant_type` (Enum("password" | "access_token"))**: The type of grant. + * **`access_token` (Optional, string)**: The user’s Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. + * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. + * **`username` (Optional, string)**: The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. + + + +### authenticate [_authenticate] + +Authenticate a user. + +Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate) + +```ts +client.security.authenticate() +``` + + +### bulk_delete_role [_bulk_delete_role] + +Bulk delete roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role) + +```ts +client.security.bulkDeleteRole({ names }) +``` + + +### Arguments [_arguments_358] + +* **Request (object):** + + * **`names` (string[])**: An array of role names to delete + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### bulk_put_role [_bulk_put_role] + +Bulk create or update roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role) + +```ts +client.security.bulkPutRole({ roles }) +``` + + +### Arguments [_arguments_359] + +* **Request (object):** + + * **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### bulk_update_api_keys [_bulk_update_api_keys] + +Bulk update API keys. Update the attributes for multiple API keys. + +::::{important} +It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user’s credentials are required. +:::: + + +This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. + +It is not possible to update expired or invalidated API keys. + +This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. + +::::{important} +If you don’t specify `role_descriptors` in the request, a call to this API might still change an API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. +:::: + + +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys) + +```ts +client.security.bulkUpdateApiKeys({ ids }) +``` + + +### Arguments [_arguments_360] + +* **Request (object):** + + * **`ids` (string | string[])**: The API key identifiers. + * **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. + * **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. + * **`role_descriptors` (Optional, Record)**: The role descriptors to assign to the API keys. An API key’s effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user’s full permissions. The snapshot of the owner’s permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. + + + +### change_password [_change_password] + +Change passwords. + +Change the passwords of users in the native realm and built-in users. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password) + +```ts +client.security.changePassword({ ... }) +``` + + +### Arguments [_arguments_361] + +* **Request (object):** + + * **`username` (Optional, string)**: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. + * **`password` (Optional, string)**: The new password value. Passwords must be at least 6 characters long. + * **`password_hash` (Optional, string)**: A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### clear_api_key_cache [_clear_api_key_cache] + +Clear the API key cache. + +Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache) + +```ts +client.security.clearApiKeyCache({ ids }) +``` + + +### Arguments [_arguments_362] + +* **Request (object):** + + * **`ids` (string | string[])**: List of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. + + + +### clear_cached_privileges [_clear_cached_privileges] + +Clear the privileges cache. + +Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges) + +```ts +client.security.clearCachedPrivileges({ application }) +``` + + +### Arguments [_arguments_363] + +* **Request (object):** + + * **`application` (string)**: A list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. + + + +### clear_cached_realms [_clear_cached_realms] + +Clear the user cache. + +Evict users from the user cache. You can completely clear the cache or evict specific users. + +User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms) + +```ts +client.security.clearCachedRealms({ realms }) +``` + + +### Arguments [_arguments_364] + +* **Request (object):** + + * **`realms` (string | string[])**: A list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. + * **`usernames` (Optional, string[])**: A list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. + + + +### clear_cached_roles [_clear_cached_roles] + +Clear the roles cache. + +Evict roles from the native role cache. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles) + +```ts +client.security.clearCachedRoles({ name }) +``` + + +### Arguments [_arguments_365] + +* **Request (object):** + + * **`name` (string | string[])**: A list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. + + + +### clear_cached_service_tokens [_clear_cached_service_tokens] + +Clear service account token caches. + +Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. + +The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens) + +```ts +client.security.clearCachedServiceTokens({ namespace, service, name }) +``` + + +### Arguments [_arguments_366] + +* **Request (object):** + + * **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. + * **`service` (string)**: The name of the service, which must be unique within its namespace. + * **`name` (string | string[])**: A list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. + + + +### create_api_key [_create_api_key] + +Create an API key. + +Create an API key for access without requiring basic authentication. + +::::{important} +If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. +:::: + + +A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + +::::{note} +By default, API keys never expire. You can specify expiration information when you create the API keys. +:::: + + +The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) + +```ts +client.security.createApiKey({ ... }) +``` + + +### Arguments [_arguments_367] + +* **Request (object):** + + * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. + * **`name` (Optional, string)**: A name for the API key. + * **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. + + +::::{note} +Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. ** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +:::: + + + +### create_cross_cluster_api_key [_create_cross_cluster_api_key] + +Create a cross-cluster API key. + +Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. + +::::{important} +To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. +:::: + + +Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. + +::::{note} +Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. +:::: + + +A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key) + +```ts +client.security.createCrossClusterApiKey({ access, name }) +``` + + +### Arguments [_arguments_368] + +* **Request (object):** + + * **`access` ({ replication, search })**: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. + + +::::{note} +No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. ** *`name` (string)**: Specifies the name for this API key. *** *`expiration` (Optional, string | -1 | 0)**: Expiration time for the API key. By default, API keys never expire. ** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. +:::: + + + +### create_service_token [_create_service_token] + +Create a service account token. + +Create a service accounts token for access without requiring basic authentication. + +::::{note} +Service account tokens never expire. You must actively delete them if they are no longer needed. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) + +```ts +client.security.createServiceToken({ namespace, service }) +``` + + +### Arguments [_arguments_369] + +* **Request (object):** + + * **`namespace` (string)**: The name of the namespace, which is a top-level grouping of service accounts. + * **`service` (string)**: The name of the service. + * **`name` (Optional, string)**: The name for the service account token. If omitted, a random name will be generated. + + +Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + +::::{note} +Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. *** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +:::: + + + +### delegate_pki [_delegate_pki] + +Delegate PKI authentication. + +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. + +This API is called by smart and trusted proxies, such as Kibana, which terminate the user’s TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. + +::::{important} +The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki) + +```ts +client.security.delegatePki({ x509_certificate_chain }) +``` + + +### Arguments [_arguments_370] + +* **Request (object):** + + * **`x509_certificate_chain` (string[])**: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate’s DER encoding. + + +The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. + + +### delete_privileges [_delete_privileges] + +Delete application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges) + +```ts +client.security.deletePrivileges({ application, name }) +``` + + +### Arguments [_arguments_371] + +* **Request (object):** + + * **`application` (string)**: The name of the application. Application privileges are always associated with exactly one application. + * **`name` (string | string[])**: The name of the privilege. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### delete_role [_delete_role] + +Delete roles. + +Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role) + +```ts +client.security.deleteRole({ name }) +``` + + +### Arguments [_arguments_372] + +* **Request (object):** + + * **`name` (string)**: The name of the role. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### delete_role_mapping [_delete_role_mapping] + +Delete role mappings. + +Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping) + +```ts +client.security.deleteRoleMapping({ name }) +``` + + +### Arguments [_arguments_373] + +* **Request (object):** + + * **`name` (string)**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### delete_service_token [_delete_service_token] + +Delete service account tokens. + +Delete service account tokens for a service in a specified namespace. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token) + +```ts +client.security.deleteServiceToken({ namespace, service, name }) +``` + + +### Arguments [_arguments_374] + +* **Request (object):** + + * **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. + * **`service` (string)**: The service name. + * **`name` (string)**: The name of the service account token. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### delete_user [_delete_user] + +Delete users. + +Delete users from the native realm. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user) + +```ts +client.security.deleteUser({ username }) +``` + + +### Arguments [_arguments_375] + +* **Request (object):** + + * **`username` (string)**: An identifier for the user. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### disable_user [_disable_user] + +Disable users. + +Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user’s access to Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user) + +```ts +client.security.disableUser({ username }) +``` + + +### Arguments [_arguments_376] + +* **Request (object):** + + * **`username` (string)**: An identifier for the user. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### disable_user_profile [_disable_user_profile] + +Disable a user profile. + +Disable user profiles so that they are not visible in user profile searches. + +::::{note} +The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. +:::: + + +When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile) + +```ts +client.security.disableUserProfile({ uid }) +``` + + +### Arguments [_arguments_377] + +* **Request (object):** + + * **`uid` (string)**: Unique identifier for the user profile. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, it does nothing with refreshes. + + + +### enable_user [_enable_user] + +Enable users. + +Enable users in the native realm. By default, when you create users, they are enabled. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user) + +```ts +client.security.enableUser({ username }) +``` + + +### Arguments [_arguments_378] + +* **Request (object):** + + * **`username` (string)**: An identifier for the user. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### enable_user_profile [_enable_user_profile] + +Enable a user profile. + +Enable user profiles to make them visible in user profile searches. + +::::{note} +The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. +:::: + + +When you activate a user profile, it’s automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile) + +```ts +client.security.enableUserProfile({ uid }) +``` + + +### Arguments [_arguments_379] + +* **Request (object):** + + * **`uid` (string)**: A unique identifier for the user profile. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, nothing is done with refreshes. + + + +### enroll_kibana [_enroll_kibana] + +Enroll Kibana. + +Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. + +::::{note} +This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana) + +```ts +client.security.enrollKibana() +``` + + +### enroll_node [_enroll_node] + +Enroll a node. + +Enroll a new node to allow it to join an existing cluster with security features enabled. + +The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node) + +```ts +client.security.enrollNode() +``` + + +### get_api_key [_get_api_key] + +Get API key information. + +Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key) + +```ts +client.security.getApiKey({ ... }) +``` + + +### Arguments [_arguments_380] + +* **Request (object):** + + * **`id` (Optional, string)**: An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. + * **`name` (Optional, string)**: An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. + * **`owner` (Optional, boolean)**: A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + * **`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. + * **`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. + * **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user’s role descriptors associated with the API key. An API key’s actual permission is the intersection of its assigned role descriptors and the owner user’s role descriptors. + * **`active_only` (Optional, boolean)**: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. + * **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. + + + +### get_builtin_privileges [_get_builtin_privileges] + +Get builtin privileges. + +Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges) + +```ts +client.security.getBuiltinPrivileges() +``` + + +### get_privileges [_get_privileges] + +Get application privileges. + +To use this API, you must have one of the following privileges: + +* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges) + +```ts +client.security.getPrivileges({ ... }) +``` + + +### Arguments [_arguments_381] + +* **Request (object):** + + * **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. + * **`name` (Optional, string | string[])**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. + + + +### get_role [_get_role] + +Get roles. + +Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role) + +```ts +client.security.getRole({ ... }) +``` + + +### Arguments [_arguments_382] + +* **Request (object):** + + * **`name` (Optional, string | string[])**: The name of the role. You can specify multiple roles as a list. If you do not specify this parameter, the API returns information about all roles. + + + +### get_role_mapping [_get_role_mapping] + +Get role mappings. + +Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping) + +```ts +client.security.getRoleMapping({ ... }) +``` + + +### Arguments [_arguments_383] + +* **Request (object):** + + * **`name` (Optional, string | string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. + + + +### get_service_accounts [_get_service_accounts] + +Get service accounts. + +Get a list of service accounts that match the provided path parameters. + +::::{note} +Currently, only the `elastic/fleet-server` service account is available. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts) + +```ts +client.security.getServiceAccounts({ ... }) +``` + + +### Arguments [_arguments_384] + +* **Request (object):** + + * **`namespace` (Optional, string)**: The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. + * **`service` (Optional, string)**: The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. + + + +### get_service_credentials [_get_service_credentials] + +Get service account credentials. + +To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). + +The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. + +::::{note} +For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials) + +```ts +client.security.getServiceCredentials({ namespace, service }) +``` + + +### Arguments [_arguments_385] + +* **Request (object):** + + * **`namespace` (string)**: The name of the namespace. + * **`service` (string)**: The service name. + + + +### get_settings [_get_settings_3] + +Get security index settings. + +Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: + +* `index.auto_expand_replicas` +* `index.number_of_replicas` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings) + +```ts +client.security.getSettings({ ... }) +``` + + +### Arguments [_arguments_386] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_token [_get_token] + +Get a token. + +Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. + +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. + +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. + +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token) + +```ts +client.security.getToken({ ... }) +``` + + +### Arguments [_arguments_387] + +* **Request (object):** + + * **`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))**: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. + * **`scope` (Optional, string)**: The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. + * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. + * **`kerberos_ticket` (Optional, string)**: The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. + * **`refresh_token` (Optional, string)**: The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. + * **`username` (Optional, string)**: The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. + + + +### get_user [_get_user] + +Get users. + +Get information about users in the native realm and built-in users. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user) + +```ts +client.security.getUser({ ... }) +``` + + +### Arguments [_arguments_388] + +* **Request (object):** + + * **`username` (Optional, string | string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. + * **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + + + +### get_user_privileges [_get_user_privileges] + +Get user privileges. + +Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges) + +```ts +client.security.getUserPrivileges({ ... }) +``` + + +### Arguments [_arguments_389] + +* **Request (object):** + + * **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. + * **`priviledge` (Optional, string)**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. + * **`username` (Optional, string | null)** + + + +### get_user_profile [_get_user_profile] + +Get a user profile. + +Get a user’s profile using the unique profile ID. + +::::{note} +The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile) + +```ts +client.security.getUserProfile({ uid }) +``` + + +### Arguments [_arguments_390] + +* **Request (object):** + + * **`uid` (string | string[])**: A unique identifier for the user profile. + * **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. + + + +### grant_api_key [_grant_api_key] + +Grant an API key. + +Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The supported user authentication credential types are: + +* username and password +* Elasticsearch access tokens +* JWTs + +The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. + +This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. + +A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key) + +```ts +client.security.grantApiKey({ api_key, grant_type }) +``` + + +### Arguments [_arguments_391] + +* **Request (object):** + + * **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. + * **`grant_type` (Enum("access_token" | "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. + * **`access_token` (Optional, string)**: The user’s access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. + * **`username` (Optional, string)**: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. + * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. + * **`run_as` (Optional, string)**: The name of the user to be impersonated. + + + +### has_privileges [_has_privileges] + +Check user privileges. + +Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges) + +```ts +client.security.hasPrivileges({ ... }) +``` + + +### Arguments [_arguments_392] + +* **Request (object):** + + * **`user` (Optional, string)**: Username + * **`application` (Optional, { application, privileges, resources }[])** + * **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. + * **`index` (Optional, { names, privileges, allow_restricted_indices }[])** + + + +### has_privileges_user_profile [_has_privileges_user_profile] + +Check user profile privileges. + +Determine whether the users associated with the specified user profile IDs have all the requested privileges. + +::::{note} +The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile) + +```ts +client.security.hasPrivilegesUserProfile({ uids, privileges }) +``` + + +### Arguments [_arguments_393] + +* **Request (object):** + + * **`uids` (string[])**: A list of profile IDs. The privileges are checked for associated users of the profiles. + * **`privileges` ({ application, cluster, index })**: An object containing all the privileges to be checked. + + + +### invalidate_api_key [_invalidate_api_key] + +Invalidate API keys. + +This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. + +To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: + +* Set the parameter `owner=true`. +* Or, set both `username` and `realm_name` to match the user’s identity. +* Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key) + +```ts +client.security.invalidateApiKey({ ... }) +``` + + +### Arguments [_arguments_394] + +* **Request (object):** + + * **`id` (Optional, string)** + * **`ids` (Optional, string[])**: A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. + * **`name` (Optional, string)**: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. + * **`owner` (Optional, boolean)**: Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + + +::::{note} +At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. ** *`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. ** *`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. +:::: + + + +### invalidate_token [_invalidate_token] + +Invalidate a token. + +The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. + +The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. + +::::{note} +While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token) + +```ts +client.security.invalidateToken({ ... }) +``` + + +### Arguments [_arguments_395] + +* **Request (object):** + + * **`token` (Optional, string)**: An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. + * **`refresh_token` (Optional, string)**: A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. + * **`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. + * **`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `refresh_token` or `token`. + + + +### oidc_authenticate [_oidc_authenticate] + +Authenticate OpenID Connect. + +Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate) + +```ts +client.security.oidcAuthenticate({ nonce, redirect_uri, state }) +``` + + +### Arguments [_arguments_396] + +* **Request (object):** + + * **`nonce` (string)**: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. + * **`redirect_uri` (string)**: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. + * **`state` (string)**: Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. + * **`realm` (Optional, string)**: The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. + + + +### oidc_logout [_oidc_logout] + +Logout of OpenID Connect. + +Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. + +If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout) + +```ts +client.security.oidcLogout({ access_token }) +``` + + +### Arguments [_arguments_397] + +* **Request (object):** + + * **`access_token` (string)**: The access token to be invalidated. + * **`refresh_token` (Optional, string)**: The refresh token to be invalidated. + + + +### oidc_prepare_authentication [_oidc_prepare_authentication] + +Prepare OpenID connect authentication. + +Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. + +The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication) + +```ts +client.security.oidcPrepareAuthentication({ ... }) +``` + + +### Arguments [_arguments_398] + +* **Request (object):** + + * **`iss` (Optional, string)**: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when **realm** is specified. One of **realm** or **iss** is required. + * **`login_hint` (Optional, string)**: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the **login_hint** parameter. This parameter is not valid when **realm** is specified. + * **`nonce` (Optional, string)**: The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. + * **`realm` (Optional, string)**: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when **iss** is specified. One of **realm** or **iss** is required. + * **`state` (Optional, string)**: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. + + + +### put_privileges [_put_privileges] + +Create or update application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +Application names are formed from a prefix, with an optional suffix that conform to the following rules: + +* The prefix must begin with a lowercase ASCII letter. +* The prefix must contain only ASCII letters or digits. +* The prefix must be at least 3 characters long. +* If the suffix exists, it must begin with either a dash `-` or `_`. +* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. +* No part of the name can contain whitespace. + +Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. + +Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges) + +```ts +client.security.putPrivileges({ ... }) +``` + + +### Arguments [_arguments_399] + +* **Request (object):** + + * **`privileges` (Optional, Record>)** + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### put_role [_put_role] + +Create or update roles. + +The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role) + +```ts +client.security.putRole({ name }) +``` + + +### Arguments [_arguments_400] + +* **Request (object):** + + * **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters *_*, *-*, and *.*. Each role must have a unique name, as this will serve as the identifier for that role. + * **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. + * **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. + * **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. + * **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. + + +::::{note} +Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. ** *`remote_cluster` (Optional, { clusters, privileges }[])**: A list of remote cluster permissions entries. *** *`metadata` (Optional, Record)**: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. *** *`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. **Note**: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. *** *`description` (Optional, string)**: Optional description of the role descriptor *** *`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +:::: + + + +### put_role_mapping [_put_role_mapping] + +Create or update role mappings. + +Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. + +::::{note} +This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. +:::: + + +**Role templates** + +The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. + +For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. + +::::{note} +To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. +:::: + + +All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. + +By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping) + +```ts +client.security.putRoleMapping({ name }) +``` + + +### Arguments [_arguments_401] + +* **Request (object):** + + * **`name` (string)**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. + * **`enabled` (Optional, boolean)**: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. + * **`metadata` (Optional, Record)**: Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. + * **`roles` (Optional, string[])**: A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. + * **`role_templates` (Optional, { format, template }[])**: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. + * **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. + * **`run_as` (Optional, string[])** + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + + + +### put_user [_put_user] + +Create or update users. + +Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user’s password without updating any other fields, use the change password API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user) + +```ts +client.security.putUser({ username }) +``` + + +### Arguments [_arguments_402] + +* **Request (object):** + + * **`username` (string)**: An identifier for the user. + + +::::{note} +Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. ** *`email` (Optional, string | null)**: The email of the user. *** *`full_name` (Optional, string | null)**: The full name of the user. *** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. *** *`password` (Optional, string)**: The user’s password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user’s password *** *`password_hash` (Optional, string)**: A hash of the user’s password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. *** *`roles` (Optional, string[])**: A set of roles the user has. The roles determine the user’s access permissions. To create a user without any roles, specify an empty list (`[]`). *** *`enabled` (Optional, boolean)**: Specifies whether the user is enabled. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. +:::: + + + +### query_api_keys [_query_api_keys] + +Find API keys with a query. + +Get a paginated list of API keys and their information. You can optionally filter the results with a query. + +To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys) + +```ts +client.security.queryApiKeys({ ... }) +``` + + +### Arguments [_arguments_403] + +* **Request (object):** + + * **`aggregations` (Optional, Record)**: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. + * **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + + +::::{note} +The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. ** *`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. *** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. *** *`size` (Optional, number)**: The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. *** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition. *** *`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user’s role descriptors associated with the API key. An API key’s actual permission is the intersection of its assigned role descriptors and the owner user’s role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. *** *`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. ** *`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. +:::: + + + +### query_role [_query_role] + +Find roles with a query. + +Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role) + +```ts +client.security.queryRole({ ... }) +``` + + +### Arguments [_arguments_404] + +* **Request (object):** + + * **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. + * **`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. + * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. + * **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. + * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition. + + + +### query_user [_query_user] + +Find users with a query. + +Get information for users in a paginated manner. You can optionally filter the results with a query. + +::::{note} +As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user) + +```ts +client.security.queryUser({ ... }) +``` + + +### Arguments [_arguments_405] + +* **Request (object):** + + * **`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. + * **`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. + * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. + * **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. + * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition + * **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + + + +### saml_authenticate [_saml_authenticate] + +Authenticate SAML. + +Submit a SAML response message to Elasticsearch for consumption. + +::::{note} +This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. +:::: + + +The SAML message that is submitted can be: + +* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. +* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. + +In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. + +After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate) + +```ts +client.security.samlAuthenticate({ content, ids }) +``` + + +### Arguments [_arguments_406] + +* **Request (object):** + + * **`content` (string)**: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. + * **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. + * **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. + + + +### saml_complete_logout [_saml_complete_logout] + +Logout of SAML completely. + +Verifies the logout response sent from the SAML IdP. + +::::{note} +This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. +:::: + + +The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout) + +```ts +client.security.samlCompleteLogout({ realm, ids }) +``` + + +### Arguments [_arguments_407] + +* **Request (object):** + + * **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. + * **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. + * **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. + * **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. + + + +### saml_invalidate [_saml_invalidate] + +Invalidate SAML. + +Submit a SAML LogoutRequest message to Elasticsearch for consumption. + +::::{note} +This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. +:::: + + +The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate) + +```ts +client.security.samlInvalidate({ query_string }) +``` + + +### Arguments [_arguments_408] + +* **Request (object):** + + * **`query_string` (string)**: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP’s signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. + * **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. + * **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. + + + +### saml_logout [_saml_logout] + +Logout of SAML. + +Submits a request to invalidate an access token and refresh token. + +::::{note} +This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. +:::: + + +This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout) + +```ts +client.security.samlLogout({ token }) +``` + + +### Arguments [_arguments_409] + +* **Request (object):** + + * **`token` (string)**: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. + * **`refresh_token` (Optional, string)**: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. + + + +### saml_prepare_authentication [_saml_prepare_authentication] + +Prepare SAML authentication. + +Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + +::::{note} +This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. +:::: + + +This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication) + +```ts +client.security.samlPrepareAuthentication({ ... }) +``` + + +### Arguments [_arguments_410] + +* **Request (object):** + + * **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. + * **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. + * **`relay_state` (Optional, string)**: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. + + + +### saml_service_provider_metadata [_saml_service_provider_metadata] + +Create SAML service provider metadata. + +Generate SAML metadata for a SAML 2.0 Service Provider. + +The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata) + +```ts +client.security.samlServiceProviderMetadata({ realm_name }) +``` + + +### Arguments [_arguments_411] + +* **Request (object):** + + * **`realm_name` (string)**: The name of the SAML realm in Elasticsearch. + + + +### suggest_user_profiles [_suggest_user_profiles] + +Suggest a user profile. + +Get suggestions for user profiles that match specified search criteria. + +::::{note} +The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles) + +```ts +client.security.suggestUserProfiles({ ... }) +``` + + +### Arguments [_arguments_412] + +* **Request (object):** + + * **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. Name-related fields are the user’s `username`, `full_name`, and `email`. + * **`size` (Optional, number)**: The number of profiles to return. + * **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. + * **`hint` (Optional, { uids, labels })**: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren’t excluded from the response as long as the profile matches the `name` field query. + + + +### update_api_key [_update_api_key] + +Update an API key. + +Update attributes of an existing API key. This API supports updates to an API key’s access scope, expiration, and metadata. + +To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. + +::::{important} +It’s not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. +:::: + + +Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It’s not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. + +The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. + +::::{important} +If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key) + +```ts +client.security.updateApiKey({ id }) +``` + + +### Arguments [_arguments_413] + +* **Request (object):** + + * **`id` (string)**: The ID of the API key to update. + * **`role_descriptors` (Optional, Record)**: The role descriptors to assign to this API key. The API key’s effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user’s full permissions. The snapshot of the owner’s permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. + * **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. + * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. + + + +### update_cross_cluster_api_key [_update_cross_cluster_api_key] + +Update a cross-cluster API key. + +Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. + +To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. + +::::{important} +It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. +:::: + + +It’s not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. + +This API supports updates to an API key’s access scope, metadata, and expiration. The owner user’s information, such as the `username` and `realm`, is also updated automatically on every call. + +::::{note} +This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key) + +```ts +client.security.updateCrossClusterApiKey({ id, access }) +``` + + +### Arguments [_arguments_414] + +* **Request (object):** + + * **`id` (string)**: The ID of the cross-cluster API key to update. + * **`access` ({ replication, search })**: The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. + * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. + * **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. + + + +### update_settings [_update_settings] + +Update security index settings. + +Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + +::::{note} +If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. +:::: + + +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings) + +```ts +client.security.updateSettings({ ... }) +``` + + +### Arguments [_arguments_415] + +* **Request (object):** + + * **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. + * **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. + * **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### update_user_profile_data [_update_user_profile_data] + +Update user profile data. + +Update specific data for the user profile that is associated with a unique ID. + +::::{note} +The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. +:::: + + +To use this API, you must have one of the following privileges: + +* The `manage_user_profile` cluster privilege. +* The `update_profile_data` global privilege for the namespaces that are referenced in the request. + +This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that’s included in the request. + +For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data) + +```ts +client.security.updateUserProfileData({ uid }) +``` + + +### Arguments [_arguments_416] + +* **Request (object):** + + * **`uid` (string)**: A unique identifier for the user profile. + * **`labels` (Optional, Record)**: Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). + * **`data` (Optional, Record)**: Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. + * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. + * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. + * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, nothing is done with refreshes. + + + +## shutdown [_shutdown] + + +### delete_node [_delete_node] + +Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. + +::::{note} +This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +:::: + + +If the operator privileges feature is enabled, you must be an operator to use this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node) + +```ts +client.shutdown.deleteNode({ node_id }) +``` + + +### Arguments [_arguments_417] + +* **Request (object):** + + * **`node_id` (string)**: The node id of node to be removed from the shutdown state + * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_node [_get_node] + +Get the shutdown status. + +Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. + +::::{note} +This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +:::: + + +If the operator privileges feature is enabled, you must be an operator to use this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node) + +```ts +client.shutdown.getNode({ ... }) +``` + + +### Arguments [_arguments_418] + +* **Request (object):** + + * **`node_id` (Optional, string | string[])**: Which node for which to retrieve the shutdown status + * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_node [_put_node] + +Prepare a node to be shut down. + +::::{note} +This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +:::: + + +If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. + +If the operator privileges feature is enabled, you must be an operator to use this API. + +The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. + +You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. + +::::{important} +This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node) + +```ts +client.shutdown.putNode({ node_id, type, reason }) +``` + + +### Arguments [_arguments_419] + +* **Request (object):** + + * **`node_id` (string)**: The node identifier. This parameter is not validated against the cluster’s active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. + * **`type` (Enum("restart" | "remove" | "replace"))**: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. + * **`reason` (string)**: A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. + * **`allocation_delay` (Optional, string)**: Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. + * **`target_node_name` (Optional, string)**: Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. + * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## simulate [_simulate_2] + + +### ingest [_ingest_2] + +Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. + +This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. + +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index’s pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. + +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. + +By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest) + +```ts +client.simulate.ingest({ docs }) +``` + + +### Arguments [_arguments_420] + +* **Request (object):** + + * **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. + * **`index` (Optional, string)**: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. + * **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. + * **`index_template_subtitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. + * **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** + * **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. + * **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. + + + +## slm [_slm] + + +### delete_lifecycle [_delete_lifecycle_2] + +Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle) + +```ts +client.slm.deleteLifecycle({ policy_id }) +``` + + +### Arguments [_arguments_421] + +* **Request (object):** + + * **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### execute_lifecycle [_execute_lifecycle] + +Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle) + +```ts +client.slm.executeLifecycle({ policy_id }) +``` + + +### Arguments [_arguments_422] + +* **Request (object):** + + * **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### execute_retention [_execute_retention] + +Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention) + +```ts +client.slm.executeRetention({ ... }) +``` + + +### Arguments [_arguments_423] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_lifecycle [_get_lifecycle_2] + +Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle) + +```ts +client.slm.getLifecycle({ ... }) +``` + + +### Arguments [_arguments_424] + +* **Request (object):** + + * **`policy_id` (Optional, string | string[])**: List of snapshot lifecycle policies to retrieve + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_stats [_get_stats] + +Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats) + +```ts +client.slm.getStats({ ... }) +``` + + +### Arguments [_arguments_425] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_status [_get_status_3] + +Get the snapshot lifecycle management status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status) + +```ts +client.slm.getStatus({ ... }) +``` + + +### Arguments [_arguments_426] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +### put_lifecycle [_put_lifecycle_2] + +Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle) + +```ts +client.slm.putLifecycle({ policy_id }) +``` + + +### Arguments [_arguments_427] + +* **Request (object):** + + * **`policy_id` (string)**: The identifier for the snapshot lifecycle policy you want to create or update. + * **`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })**: Configuration for each snapshot created by the policy. + * **`name` (Optional, string)**: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. + * **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. + * **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. + * **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +### start [_start_2] + +Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start) + +```ts +client.slm.start({ ... }) +``` + + +### Arguments [_arguments_428] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +### stop [_stop_2] + +Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. + +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop) + +```ts +client.slm.stop({ ... }) +``` + + +### Arguments [_arguments_429] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +## snapshot [_snapshot] + + +### cleanup_repository [_cleanup_repository] + +Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository) + +```ts +client.snapshot.cleanupRepository({ repository }) +``` + + +### Arguments [_arguments_430] + +* **Request (object):** + + * **`repository` (string)**: The name of the snapshot repository to clean up. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. + + + +### clone [_clone_2] + +Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone) + +```ts +client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) +``` + + +### Arguments [_arguments_431] + +* **Request (object):** + + * **`repository` (string)**: The name of the snapshot repository that both source and target snapshot belong to. + * **`snapshot` (string)**: The source snapshot name. + * **`target_snapshot` (string)**: The target snapshot name. + * **`indices` (string)**: A list of indices to include in the snapshot. Multi-target syntax is supported. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### create [_create_3] + +Create a snapshot. Take a snapshot of a cluster or of data streams and indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create) + +```ts +client.snapshot.create({ repository, snapshot }) +``` + + +### Arguments [_arguments_432] + +* **Request (object):** + + * **`repository` (string)**: The name of the repository for the snapshot. + * **`snapshot` (string)**: The name of the snapshot. It supportes date math. It must be unique in the repository. + * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports a list of values such as `open,hidden`. + * **`feature_states` (Optional, string[])**: The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. + + +If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. + +Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). ** *`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. *** *`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). ** *`indices` (Optional, string | string[])**: A list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. + +You can’t use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. ** *`metadata` (Optional, Record)**: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. ** *`partial` (Optional, boolean)**: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. + +If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. ** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. + + +### create_repository [_create_repository] + +Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster’s global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + +Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository) + +```ts +client.snapshot.createRepository({ repository }) +``` + + +### Arguments [_arguments_433] + +* **Request (object):** + + * **`repository` (string)**: The name of the snapshot repository to register or update. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. + * **`verify` (Optional, boolean)**: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. + + + +### delete [_delete_9] + +Delete snapshots. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete) + +```ts +client.snapshot.delete({ repository, snapshot }) +``` + + +### Arguments [_arguments_434] + +* **Request (object):** + + * **`repository` (string)**: The name of the repository to delete a snapshot from. + * **`snapshot` (string)**: A list of snapshot names to delete. It also accepts wildcards (`*`). + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +### delete_repository [_delete_repository] + +Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository) + +```ts +client.snapshot.deleteRepository({ repository }) +``` + + +### Arguments [_arguments_435] + +* **Request (object):** + + * **`repository` (string | string[])**: The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. + + + +### get [_get_9] + +Get snapshot information. + +::::{note} +The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get) + +```ts +client.snapshot.get({ repository, snapshot }) +``` + + +### Arguments [_arguments_436] + +* **Request (object):** + + * **`repository` (string)**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. + * **`snapshot` (string | string[])**: A list of snapshot names to retrieve Wildcards (`*`) are supported. + +* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. +* To get information about any snapshots that are currently running, use `_current`. + + * **`after` (Optional, string)**: An offset identifier to start pagination from as returned by the next field in the response body. + * **`from_sort_value` (Optional, string)**: The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. + * **`index_details` (Optional, boolean)**: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. + * **`index_names` (Optional, boolean)**: If `true`, the response includes the name of each index in each snapshot. + * **`include_repository` (Optional, boolean)**: If `true`, the response includes the repository name in each snapshot. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`order` (Optional, Enum("asc" | "desc"))**: The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. + * **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. + * **`size` (Optional, number)**: The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. + * **`slm_policy_filter` (Optional, string)**: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + + +You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. ** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))**: The sort order for the result. The default behavior is sorting by snapshot start time stamp. ** *`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + +::::{note} +The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. +:::: + + + +### get_repository [_get_repository] + +Get snapshot repository information. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository) + +```ts +client.snapshot.getRepository({ ... }) +``` + + +### Arguments [_arguments_437] + +* **Request (object):** + + * **`repository` (Optional, string | string[])**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + + +To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. ** *`local` (Optional, boolean)**: If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. ** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + +### repository_analyze [_repository_analyze] + +Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. + +The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. + +There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. + +The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. + +If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. + +If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. + +If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. + +If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: + +* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. +* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. +* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. + +::::{important} +An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. +:::: + + +::::{note} +This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. +:::: + + +::::{note} +Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. +:::: + + +::::{note} +This API may not work correctly in a mixed-version cluster. +:::: + + +**Implementation details** + +::::{note} +This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. +:::: + + +The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. + +For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. + +The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. + +For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. + +Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze) + +```ts +client.snapshot.repositoryAnalyze({ repository }) +``` + + +### Arguments [_arguments_438] + +* **Request (object):** + + * **`repository` (string)**: The name of the repository. + * **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. + * **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. + * **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. + * **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. + * **`max_blob_size` (Optional, number | string)**: The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. + * **`max_total_data_size` (Optional, number | string)**: An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. + * **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. + * **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. + * **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. + * **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. + * **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. + * **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. + + + +### restore [_restore] + +Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster’s global metadata must be writable. Ensure there are’t any cluster blocks that prevent writes. The restore operation ignores index blocks. + +Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: + +``` +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +``` + +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can’t roll over or create backing indices. + +If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore) + +```ts +client.snapshot.restore({ repository, snapshot }) +``` + + +### Arguments [_arguments_439] + +* **Request (object):** + + * **`repository` (string)**: The name of the repository to restore a snapshot from. + * **`snapshot` (string)**: The name of the snapshot to restore. + * **`feature_states` (Optional, string[])**: The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). + * **`ignore_index_settings` (Optional, string[])**: The index settings to not restore from the snapshot. You can’t use this option to ignore `index.number_of_shards`. + + +For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream’s matching index template. ** *`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores any index or data stream in indices that’s missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. *** *`include_aliases` (Optional, boolean)**: If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. ** *`include_global_state` (Optional, boolean)**: If `true`, restore the cluster state. The cluster state includes: + +* Persistent cluster settings +* Index templates +* Legacy index templates +* Ingest pipelines +* Index lifecycle management (ILM) policies +* Stored scripts +* For snapshots taken after 7.12.0, feature states + +If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + +Use the `feature_states` parameter to configure how feature states are restored. + +If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. *** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Index settings to add or change in restored indices, including backing indices. You can’t use this option to change `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream’s matching index template. *** *`indices` (Optional, string | string[])**: A list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. + +You can’t use this parameter to restore system indices or system data streams. Use `feature_states` instead. *** *`partial` (Optional, boolean)**: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + +If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. *** *`rename_pattern` (Optional, string)**: A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + +The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. ** *`rename_replacement` (Optional, string)**: The rename replacement string that is used with the `rename_pattern`. *** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. ** *`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. + +If `false`, the request returns a response when the restore operation initializes. + + +### status [_status_2] + +Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. + +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. + +If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they’re not currently running. + +::::{warning} +Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). +:::: + + +Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status) + +```ts +client.snapshot.status({ ... }) +``` + + +### Arguments [_arguments_440] + +* **Request (object):** + + * **`repository` (Optional, string)**: The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn’t specified. + * **`snapshot` (Optional, string | string[])**: A list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. + * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +### verify_repository [_verify_repository] + +Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository) + +```ts +client.snapshot.verifyRepository({ repository }) +``` + + +### Arguments [_arguments_441] + +* **Request (object):** + + * **`repository` (string)**: The name of the snapshot repository to verify. + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. + + + +## sql [_sql] + + +### clear_cursor [_clear_cursor] + +Clear an SQL search cursor. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor) + +```ts +client.sql.clearCursor({ cursor }) +``` + + +### Arguments [_arguments_442] + +* **Request (object):** + + * **`cursor` (string)**: Cursor to clear. + + + +### delete_async [_delete_async] + +Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: + +* Users with the `cancel_task` cluster privilege. +* The user who first submitted the search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async) + +```ts +client.sql.deleteAsync({ id }) +``` + + +### Arguments [_arguments_443] + +* **Request (object):** + + * **`id` (string)**: The identifier for the search. + + + +### get_async [_get_async] + +Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. + +If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async) + +```ts +client.sql.getAsync({ id }) +``` + + +### Arguments [_arguments_444] + +* **Request (object):** + + * **`id` (string)**: The identifier for the search. + * **`delimiter` (Optional, string)**: The separator for CSV results. The API supports this parameter only for CSV responses. + * **`format` (Optional, string)**: The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. + * **`keep_alive` (Optional, string | -1 | 0)**: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. + + + +### get_async_status [_get_async_status] + +Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status) + +```ts +client.sql.getAsyncStatus({ id }) +``` + + +### Arguments [_arguments_445] + +* **Request (object):** + + * **`id` (string)**: The identifier for the search. + + + +### query [_query_2] + +Get SQL search results. Run an SQL request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query) + +```ts +client.sql.query({ ... }) +``` + + +### Arguments [_arguments_446] + +* **Request (object):** + + * **`allow_partial_search_results` (Optional, boolean)**: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. + * **`catalog` (Optional, string)**: The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. + * **`columnar` (Optional, boolean)**: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. + * **`cursor` (Optional, string)**: The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. + * **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. + * **`field_multi_value_leniency` (Optional, boolean)**: If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. + * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. + * **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. + * **`keep_alive` (Optional, string | -1 | 0)**: The retention period for an async or saved synchronous search. + * **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don’t finish before the `wait_for_completion_timeout`. + * **`page_timeout` (Optional, string | -1 | 0)**: The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. + * **`params` (Optional, Record)**: The values for parameters in the query. + * **`query` (Optional, string)**: The SQL query to run. + * **`request_timeout` (Optional, string | -1 | 0)**: The timeout before the request fails. + * **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. + * **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. + * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. + + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. *** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))**: The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. + + +### translate [_translate] + +Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate) + +```ts +client.sql.translate({ query }) +``` + + +### Arguments [_arguments_447] + +* **Request (object):** + + * **`query` (string)**: The SQL query to run. + * **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. + * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. + * **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. + + + +## ssl [_ssl] + + +### certificates [_certificates] + +Get SSL certificates. + +Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: + +* Settings for transport and HTTP interfaces +* TLS settings that are used within authentication realms +* TLS settings for remote monitoring exporters + +The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + +The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. + +::::{note} +When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. +:::: + + +If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates) + +```ts +client.ssl.certificates() +``` + + +## synonyms [_synonyms] + + +### delete_synonym [_delete_synonym] + +Delete a synonym set. + +You can only delete a synonyms set that is not in use by any index analyzer. + +Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. + +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. + +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can’t be deleted. A delete request in this case will return a 400 response code. + +To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym) + +```ts +client.synonyms.deleteSynonym({ id }) +``` + + +### Arguments [_arguments_448] + +* **Request (object):** + + * **`id` (string)**: The synonyms set identifier to delete. + + + +### delete_synonym_rule [_delete_synonym_rule] + +Delete a synonym rule. Delete a synonym rule from a synonym set. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule) + +```ts +client.synonyms.deleteSynonymRule({ set_id, rule_id }) +``` + + +### Arguments [_arguments_449] + +* **Request (object):** + + * **`set_id` (string)**: The ID of the synonym set to update. + * **`rule_id` (string)**: The ID of the synonym rule to delete. + + + +### get_synonym [_get_synonym] + +Get a synonym set. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) + +```ts +client.synonyms.getSynonym({ id }) +``` + + +### Arguments [_arguments_450] + +* **Request (object):** + + * **`id` (string)**: The synonyms set identifier to retrieve. + * **`from` (Optional, number)**: The starting offset for query rules to retrieve. + * **`size` (Optional, number)**: The max number of query rules to retrieve. + + + +### get_synonym_rule [_get_synonym_rule] + +Get a synonym rule. Get a synonym rule from a synonym set. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule) + +```ts +client.synonyms.getSynonymRule({ set_id, rule_id }) +``` + + +### Arguments [_arguments_451] + +* **Request (object):** + + * **`set_id` (string)**: The ID of the synonym set to retrieve the synonym rule from. + * **`rule_id` (string)**: The ID of the synonym rule to retrieve. + + + +### get_synonyms_sets [_get_synonyms_sets] + +Get all synonym sets. Get a summary of all defined synonym sets. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) + +```ts +client.synonyms.getSynonymsSets({ ... }) +``` + + +### Arguments [_arguments_452] + +* **Request (object):** + + * **`from` (Optional, number)**: The starting offset for synonyms sets to retrieve. + * **`size` (Optional, number)**: The maximum number of synonyms sets to retrieve. + + + +### put_synonym [_put_synonym] + +Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. + +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym) + +```ts +client.synonyms.putSynonym({ id, synonyms_set }) +``` + + +### Arguments [_arguments_453] + +* **Request (object):** + + * **`id` (string)**: The ID of the synonyms set to be created or updated. + * **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set. + + + +### put_synonym_rule [_put_synonym_rule] + +Create or update a synonym rule. Create or update a synonym rule in a synonym set. + +If any of the synonym rules included is invalid, the API returns an error. + +When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule) + +```ts +client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) +``` + + +### Arguments [_arguments_454] + +* **Request (object):** + + * **`set_id` (string)**: The ID of the synonym set. + * **`rule_id` (string)**: The ID of the synonym rule to be updated or created. + * **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. + + + +## tasks [_tasks_2] + + +### cancel [_cancel] + +Cancel a task. + +::::{warning} +The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. +:::: + + +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. + +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) + +```ts +client.tasks.cancel({ ... }) +``` + + +### Arguments [_arguments_455] + +* **Request (object):** + + * **`task_id` (Optional, string | number)**: The task identifier. + * **`actions` (Optional, string | string[])**: A list or wildcard expression of actions that is used to limit the request. + * **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. + * **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. + * **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. + + + +### get [_get_10] + +Get task information. Get information about a task currently running in the cluster. + +::::{warning} +The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. +:::: + + +If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) + +```ts +client.tasks.get({ task_id }) +``` + + +### Arguments [_arguments_456] + +* **Request (object):** + + * **`task_id` (string)**: The task identifier. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. + + + +### list [_list_3] + +Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. + +::::{warning} +The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. +:::: + + +**Identifying running tasks** + +The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: + +``` +curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" +``` + +The API returns the following result: + +``` +HTTP/1.1 200 OK +X-Opaque-Id: 123456 +content-type: application/json; charset=UTF-8 +content-length: 831 + +{ + "tasks" : { + "u5lcZHqcQhu-rUoFaqDphA:45" : { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 45, + "type" : "transport", + "action" : "cluster:monitor/tasks/lists", + "start_time_in_millis" : 1513823752749, + "running_time_in_nanos" : 293139, + "cancellable" : false, + "headers" : { + "X-Opaque-Id" : "123456" + }, + "children" : [ + { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, + "type" : "direct", + "action" : "cluster:monitor/tasks/lists[n]", + "start_time_in_millis" : 1513823752750, + "running_time_in_nanos" : 92133, + "cancellable" : false, + "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + "headers" : { + "X-Opaque-Id" : "123456" + } + } + ] + } + } + } +``` + +In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) + +```ts +client.tasks.list({ ... }) +``` + + +### Arguments [_arguments_457] + +* **Request (object):** + + * **`actions` (Optional, string | string[])**: A list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. + * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. + * **`group_by` (Optional, Enum("nodes" | "parents" | "none"))**: A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. + * **`nodes` (Optional, string | string[])**: A list of node IDs or names that is used to limit the returned information. + * **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. + * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. + + + +## text_structure [_text_structure] + + +### find_field_structure [_find_field_structure] + +Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. + +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure) + +```ts +client.textStructure.findFieldStructure({ field, index }) +``` + + +### Arguments [_arguments_458] + +* **Request (object):** + + * **`field` (string)**: The field that should be analyzed. + * **`index` (string)**: The name of the index that contains the analyzed field. + * **`column_names` (Optional, string)**: If `format` is set to `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. + * **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. + * **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. The minimum value is 2. + * **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. + * **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. + * **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. + * **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. + * **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. + * **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. + * **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. + * **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + + +### find_message_structure [_find_message_structure] + +Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure) + +```ts +client.textStructure.findMessageStructure({ messages }) +``` + + +### Arguments [_arguments_459] + +* **Request (object):** + + * **`messages` (string[])**: The list of messages you want to analyze. + * **`column_names` (Optional, string)**: If the format is `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. + * **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. + * **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. + * **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. + * **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. + * **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. + * **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. + * **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. + * **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. + * **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + + +### find_structure [_find_structure] + +Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. + +The response from the API contains: + +* A couple of messages from the beginning of the text. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure) + +```ts +client.textStructure.findStructure({ ... }) +``` + + +### Arguments [_arguments_460] + +* **Request (object):** + + * **`text_files` (Optional, TJsonDocument[])** + * **`charset` (Optional, string)**: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. + * **`column_names` (Optional, string)**: If you have set format to `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. + * **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. + * **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. + * **`explain` (Optional, boolean)**: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. + * **`format` (Optional, string)**: The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. + * **`grok_pattern` (Optional, string)**: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. + * **`has_header_row` (Optional, boolean)**: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. + * **`line_merge_size_limit` (Optional, number)**: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. + * **`lines_to_sample` (Optional, number)**: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + + +::::{note} +The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. ** *`quote` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. *** *`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. *** *`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. ** *`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. +:::: + + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. + +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. + + +### test_grok_pattern [_test_grok_pattern] + +Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern) + +```ts +client.textStructure.testGrokPattern({ grok_pattern, text }) +``` + + +### Arguments [_arguments_461] + +* **Request (object):** + + * **`grok_pattern` (string)**: The Grok pattern to run on the text. + * **`text` (string[])**: The lines of text to run the Grok pattern on. + * **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. + + + +## transform [_transform] + + +### delete_transform [_delete_transform] + +Delete a transform. Deletes a transform. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform) + +```ts +client.transform.deleteTransform({ transform_id }) +``` + + +### Arguments [_arguments_462] + +* **Request (object):** + + * **`transform_id` (string)**: Identifier for the transform. + * **`force` (Optional, boolean)**: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. + * **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_node_stats [_get_node_stats] + +Retrieves transform usage information for transform nodes. + +```ts +client.transform.getNodeStats() +``` + + +### get_transform [_get_transform] + +Get transforms. Retrieves configuration information for transforms. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform) + +```ts +client.transform.getTransform({ ... }) +``` + + +### Arguments [_arguments_463] + +* **Request (object):** + + * **`transform_id` (Optional, string | string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no transforms that match. + 2. Contains the _all string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of transforms. *** *`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. ** *`exclude_generated` (Optional, boolean)**: Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. + + +### get_transform_stats [_get_transform_stats] + +Get transform stats. Retrieves usage information for transforms. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats) + +```ts +client.transform.getTransformStats({ transform_id }) +``` + + +### Arguments [_arguments_464] + +* **Request (object):** + + * **`transform_id` (string | string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + 1. Contains wildcard expressions and there are no transforms that match. + 2. Contains the _all string or no identifiers and there are no matches. + 3. Contains wildcard expressions and there are only partial matches. + + +If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of transforms. *** *`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. ** *`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the stats + + +### preview_transform [_preview_transform] + +Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. + +It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform) + +```ts +client.transform.previewTransform({ ... }) +``` + + +### Arguments [_arguments_465] + +* **Request (object):** + + * **`transform_id` (Optional, string)**: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. + * **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. + * **`description` (Optional, string)**: Free text description of the transform. + * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. + * **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. + * **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. + * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. + * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. + * **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. + * **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### put_transform [_put_transform] + +Create a transform. Creates a transform. + +A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. + +You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. + +You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. + +::::{note} +You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform) + +```ts +client.transform.putTransform({ transform_id, dest, source }) +``` + + +### Arguments [_arguments_466] + +* **Request (object):** + + * **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. + * **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. + * **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. + * **`description` (Optional, string)**: Free text description of the transform. + * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. + * **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. + * **`_meta` (Optional, Record)**: Defines optional transform metadata. + * **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. + * **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. + * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. + * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. + * **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### reset_transform [_reset_transform] + +Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform) + +```ts +client.transform.resetTransform({ transform_id }) +``` + + +### Arguments [_arguments_467] + +* **Request (object):** + + * **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. + * **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it’s `false`, the transform must be stopped before it can be reset. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### schedule_now_transform [_schedule_now_transform] + +Schedule a transform to start now. Instantly runs a transform to process data. + +If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform) + +```ts +client.transform.scheduleNowTransform({ transform_id }) +``` + + +### Arguments [_arguments_468] + +* **Request (object):** + + * **`transform_id` (string)**: Identifier for the transform. + * **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the scheduling to take place + + + +### start_transform [_start_transform] + +Start a transform. Starts a transform. + +When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. + +When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—​with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform) + +```ts +client.transform.startTransform({ transform_id }) +``` + + +### Arguments [_arguments_469] + +* **Request (object):** + + * **`transform_id` (string)**: Identifier for the transform. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + * **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. + + + +### stop_transform [_stop_transform] + +Stop transforms. Stops one or more transforms. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform) + +```ts +client.transform.stopTransform({ transform_id }) +``` + + +### Arguments [_arguments_470] + +* **Request (object):** + + * **`transform_id` (string)**: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. + * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + + +If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. + +If it is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. *** *`timeout` (Optional, string | -1 | 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. *** *`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. ** *`wait_for_completion` (Optional, boolean)**: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. + + +### update_transform [_update_transform] + +Update a transform. Updates certain properties of a transform. + +All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform) + +```ts +client.transform.updateTransform({ transform_id }) +``` + + +### Arguments [_arguments_471] + +* **Request (object):** + + * **`transform_id` (string)**: Identifier for the transform. + * **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. + * **`description` (Optional, string)**: Free text description of the transform. + * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. + * **`_meta` (Optional, Record)**: Defines optional transform metadata. + * **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. + * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. + * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. + * **`retention_policy` (Optional, { time } | null)**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. + * **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +### upgrade_transforms [_upgrade_transforms] + +Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms) + +```ts +client.transform.upgradeTransforms({ ... }) +``` + + +### Arguments [_arguments_472] + +* **Request (object):** + + * **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. + * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## watcher [_watcher] + + +### ack_watch [_ack_watch] + +Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch’s actions. + +The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. + +::::{important} +If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. +:::: + + +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) + +```ts +client.watcher.ackWatch({ watch_id }) +``` + + +### Arguments [_arguments_473] + +* **Request (object):** + + * **`watch_id` (string)**: The watch identifier. + * **`action_id` (Optional, string | string[])**: A list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. + + + +### activate_watch [_activate_watch] + +Activate a watch. A watch can be either active or inactive. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch) + +```ts +client.watcher.activateWatch({ watch_id }) +``` + + +### Arguments [_arguments_474] + +* **Request (object):** + + * **`watch_id` (string)**: The watch identifier. + + + +### deactivate_watch [_deactivate_watch] + +Deactivate a watch. A watch can be either active or inactive. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch) + +```ts +client.watcher.deactivateWatch({ watch_id }) +``` + + +### Arguments [_arguments_475] + +* **Request (object):** + + * **`watch_id` (string)**: The watch identifier. + + + +### delete_watch [_delete_watch] + +Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. + +Deleting a watch does not delete any watch execution records related to this watch from the watch history. + +::::{important} +Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. +:::: + + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch) + +```ts +client.watcher.deleteWatch({ id }) +``` + + +### Arguments [_arguments_476] + +* **Request (object):** + + * **`id` (string)**: The watch identifier. + + + +### execute_watch [_execute_watch] + +Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. + +For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. + +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. + +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. + +When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch) + +```ts +client.watcher.executeWatch({ ... }) +``` + + +### Arguments [_arguments_477] + +* **Request (object):** + + * **`id` (Optional, string)**: The watch identifier. + * **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. + * **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. + * **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. + * **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. + * **`simulated_actions` (Optional, { actions, all, use_all })** + * **`trigger_data` (Optional, { scheduled_time, triggered_time })**: This structure is parsed as the data of the trigger event that will be used during the watch execution. + * **`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })**: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. + * **`debug` (Optional, boolean)**: Defines whether the watch runs in debug mode. + + + +### get_settings [_get_settings_4] + +Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings) + +```ts +client.watcher.getSettings({ ... }) +``` + + +### Arguments [_arguments_478] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + + + +### get_watch [_get_watch] + +Get a watch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch) + +```ts +client.watcher.getWatch({ id }) +``` + + +### Arguments [_arguments_479] + +* **Request (object):** + + * **`id` (string)**: The watch identifier. + + + +### put_watch [_put_watch] + +Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. + +::::{important} +You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. +:::: + + +When you add a watch you can also define its initial active state by setting the **active** parameter. + +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch) + +```ts +client.watcher.putWatch({ id }) +``` + + +### Arguments [_arguments_480] + +* **Request (object):** + + * **`id` (string)**: The identifier for the watch. + * **`actions` (Optional, Record)**: The list of actions that will be run if the condition matches. + * **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. + * **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. + * **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. + * **`throttle_period` (Optional, string | -1 | 0)**: The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. + * **`throttle_period_in_millis` (Optional, Unit)**: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. + * **`transform` (Optional, { chain, script, search })**: The transform that processes the watch payload to prepare it for the watch actions. + * **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. + * **`active` (Optional, boolean)**: The initial state of the watch. The default value is `true`, which means the watch is active by default. + * **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term + * **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number + * **`version` (Optional, number)**: Explicit version number for concurrency control + + + +### query_watches [_query_watches] + +Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. + +Note that only the `_id` and `metadata.*` fields are queryable or sortable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches) + +```ts +client.watcher.queryWatches({ ... }) +``` + + +### Arguments [_arguments_481] + +* **Request (object):** + + * **`from` (Optional, number)**: The offset from the first result to fetch. It must be non-negative. + * **`size` (Optional, number)**: The number of hits to return. It must be non-negative. + * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. + * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. + * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: Retrieve the next page of hits using a set of sort values from the previous page. + + + +### start [_start_3] + +Start the watch service. Start the Watcher service if it is not already running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start) + +```ts +client.watcher.start({ ... }) +``` + + +### Arguments [_arguments_482] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + + + +### stats [_stats_7] + +Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats) + +```ts +client.watcher.stats({ ... }) +``` + + +### Arguments [_arguments_483] + +* **Request (object):** + + * **`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])**: Defines which additional metrics are included in the response. + * **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. + + + +### stop [_stop_3] + +Stop the watch service. Stop the Watcher service if it is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop) + +```ts +client.watcher.stop({ ... }) +``` + + +### Arguments [_arguments_484] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. + + + +### update_settings [_update_settings_2] + +Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings) + +```ts +client.watcher.updateSettings({ ... }) +``` + + +### Arguments [_arguments_485] + +* **Request (object):** + + * **`index.auto_expand_replicas` (Optional, string)** + * **`index.number_of_replicas` (Optional, number)** + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + + + +## xpack [_xpack] + + +### info [_info_5] + +Get information. The information provided by the API includes: + +* Build information including the build number and timestamp. +* License information about the currently installed license. +* Feature information for the features that are currently enabled and available under the current license. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info) + +```ts +client.xpack.info({ ... }) +``` + + +### Arguments [_arguments_486] + +* **Request (object):** + + * **`categories` (Optional, Enum("build" | "features" | "license")[])**: A list of the information categories to include in the response. For example, `build,license,features`. + * **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true + * **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. + + + +### usage [_usage_2] + +Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack) + +```ts +client.xpack.usage({ ... }) +``` + + +### Arguments [_arguments_487] + +* **Request (object):** + + * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. diff --git a/docs/examples/asStream.asciidoc b/docs/reference/as_stream_examples.md similarity index 84% rename from docs/examples/asStream.asciidoc rename to docs/reference/as_stream_examples.md index e27c0a1b1..6e678bd02 100644 --- a/docs/examples/asStream.asciidoc +++ b/docs/reference/as_stream_examples.md @@ -1,11 +1,13 @@ -[[as_stream_examples]] -=== asStream +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/as_stream_examples.html +--- -Instead of getting the parsed body back, you will get the raw Node.js stream of -data. +# asStream [as_stream_examples] -[source,js] ----- +Instead of getting the parsed body back, you will get the raw Node.js stream of data. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -66,13 +68,14 @@ async function run () { } run().catch(console.log) ----- +``` + +::::{tip} +This can be useful if you need to pipe the {{es}}'s response to a proxy, or send it directly to another source. +:::: -TIP: This can be useful if you need to pipe the {es}'s response to a proxy, or -send it directly to another source. -[source,js] ----- +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -96,4 +99,5 @@ fastify.post('/search/:index', async (req, reply) => { }) fastify.listen(3000) ----- +``` + diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md new file mode 100644 index 000000000..dd3452217 --- /dev/null +++ b/docs/reference/basic-config.md @@ -0,0 +1,51 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/basic-config.html +--- + +# Basic configuration [basic-config] + +This page shows you the possible basic configuration options that the clients offers. + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + maxRetries: 5, + sniffOnStart: true +}) +``` + +| | | +| --- | --- | +| `node` or `nodes` | The Elasticsearch endpoint to use.
    It can be a single string or an array of strings:

    ```js
    node: 'http://localhost:9200/'
    ```

    Or it can be an object (or an array of objects) that represents the node:

    ```js
    node: {
    url: new URL('http://localhost:9200/'),
    tls: 'tls options',
    agent: 'http agent options',
    id: 'custom node id',
    headers: { 'custom': 'headers' }
    roles: {
    master: true,
    data: true,
    ingest: true,
    ml: false
    }
    }
    ```
    | +| `auth` | Your authentication data. You can use both basic authentication and [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
    See [Authentication](/reference/connecting.md#authentication) for more details.
    *Default:* `null`

    Basic authentication:

    ```js
    auth: {
    username: 'elastic',
    password: 'changeme'
    }
    ```

    [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication:

    ```js
    auth: {
    apiKey: 'base64EncodedKey'
    }
    ```

    Bearer authentication, useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh:

    ```js
    auth: {
    bearer: 'token'
    }
    ```
    | +| `maxRetries` | `number` - Max number of retries for each request.
    *Default:* `3` | +| `requestTimeout` | `number` - Max request timeout in milliseconds for each request.
    *Default:* No value | +| `pingTimeout` | `number` - Max ping request timeout in milliseconds for each request.
    *Default:* `3000` | +| `sniffInterval` | `number, boolean` - Perform a sniff operation every `n` milliseconds. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
    *Default:* `false` | +| `sniffOnStart` | `boolean` - Perform a sniff once the client is started. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
    *Default:* `false` | +| `sniffEndpoint` | `string` - Endpoint to ping during a sniff.
    *Default:* `'_nodes/_all/http'` | +| `sniffOnConnectionFault` | `boolean` - Perform a sniff on connection fault. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
    *Default:* `false` | +| `resurrectStrategy` | `string` - Configure the node resurrection strategy.
    *Options:* `'ping'`, `'optimistic'`, `'none'`
    *Default:* `'ping'` | +| `suggestCompression` | `boolean` - Adds `accept-encoding` header to every request.
    *Default:* `false` | +| `compression` | `string, boolean` - Enables gzip request body compression.
    *Options:* `'gzip'`, `false`
    *Default:* `false` | +| `tls` | `http.SecureContextOptions` - tls [configuraton](https://nodejs.org/api/tls.md).
    *Default:* `null` | +| `proxy` | `string, URL` - If you are using an http(s) proxy, you can put its url here. The client will automatically handle the connection to it.
    *Default:* `null`

    ```js
    const client = new Client({
    node: 'http://localhost:9200/',
    proxy: 'http://localhost:8080/'
    })

    const client = new Client({
    node: 'http://localhost:9200/',
    proxy: 'http://user:pwd@localhost:8080/'
    })
    ```
    | +| `agent` | `http.AgentOptions, function` - http agent [options](https://nodejs.org/api/http.md#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`.
    *Default:* `null`

    ```js
    const client = new Client({
    node: 'http://localhost:9200/',
    agent: { agent: 'options' }
    })

    const client = new Client({
    node: 'http://localhost:9200/',
    // the function takes as parameter the option
    // object passed to the Connection constructor
    agent: (opts) => new CustomAgent()
    })

    const client = new Client({
    node: 'http://localhost:9200/',
    // Disable agent and keep-alive
    agent: false
    })
    ```
    | +| `nodeFilter` | `function` - Filters which node not to use for a request.
    *Default:*

    ```js
    function defaultNodeFilter (node) {
    // avoid master only nodes
    if (node.roles.master === true &&
    node.roles.data === false &&
    node.roles.ingest === false) {
    return false
    }
    return true
    }
    ```
    | +| `nodeSelector` | `function` - custom selection strategy.
    *Options:* `'round-robin'`, `'random'`, custom function
    *Default:* `'round-robin'`
    *Custom function example:*

    ```js
    function nodeSelector (connections) {
    const index = calculateIndex()
    return connections[index]
    }
    ```
    | +| `generateRequestId` | `function` - function to generate the request id for every request, it takes two parameters, the request parameters and options.
    By default it generates an incremental integer for every request.
    *Custom function example:*

    ```js
    function generateRequestId (params, options) {
    // your id generation logic
    // must be syncronous
    return 'id'
    }
    ```
    | +| `name` | `string, symbol` - The name to identify the client instance in the events.
    *Default:* `elasticsearch-js` | +| `opaqueIdPrefix` | `string` - A string that will be use to prefix any `X-Opaque-Id` header.
    See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details.
    _Default:* `null` | +| `headers` | `object` - A set of custom headers to send in every request.
    *Default:* `{}` | +| `context` | `object` - A custom object that you can use for observability in your events.It will be merged with the API level context option.
    *Default:* `null` | +| `enableMetaHeader` | `boolean` - If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data,such as the client and platform version.
    *Default:* `true` | +| `cloud` | `object` - Custom configuration for connecting to [Elastic Cloud](https://cloud.elastic.co). See [Authentication](/reference/connecting.md) for more details.
    *Default:* `null`
    *Cloud configuration example:*

    ```js
    const client = new Client({
    cloud: {
    id: ''
    },
    auth: {
    username: 'elastic',
    password: 'changeme'
    }
    })
    ```
    | +| `disablePrototypePoisoningProtection` | `boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. Read [this article](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08) to learn more about this security concern. If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. Read the `secure-json-parse` [documentation](https://github.com/fastify/secure-json-parse) to learn more.
    *Default:* `true` | +| `caFingerprint` | `string` - If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints.
    *Default:* `null` | +| `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENGTH
    *Default:* `null` | +| `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENGTH
    *Default:* `null` | + diff --git a/docs/examples/bulk.asciidoc b/docs/reference/bulk_examples.md similarity index 83% rename from docs/examples/bulk.asciidoc rename to docs/reference/bulk_examples.md index c357d5130..51b53e6c7 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/reference/bulk_examples.md @@ -1,13 +1,18 @@ -[[bulk_examples]] -=== Bulk +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/bulk_examples.html +--- -With the {jsclient}/api-reference.html#_bulk[`bulk` API], you can perform multiple index/delete operations in a -single API call. The `bulk` API significantly increases indexing speed. +# Bulk [bulk_examples] -NOTE: You can also use the <>. +With the [`bulk` API](/reference/api-reference.md#_bulk), you can perform multiple index/delete operations in a single API call. The `bulk` API significantly increases indexing speed. -[source,js] ----- +::::{note} +You can also use the [bulk helper](/reference/client-helpers.md#bulk-helper). +:::: + + +```js 'use strict' require('array.prototype.flatmap').shim() @@ -90,4 +95,5 @@ async function run () { } run().catch(console.log) ----- +``` + diff --git a/docs/reference/child.md b/docs/reference/child.md new file mode 100644 index 000000000..bbebed573 --- /dev/null +++ b/docs/reference/child.md @@ -0,0 +1,34 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html +--- + +# Creating a child client [child] + +There are some use cases where you may need multiple instances of the client. You can easily do that by calling `new Client()` as many times as you need, but you will lose all the benefits of using one single client, such as the long living connections and the connection pool handling. To avoid this problem, the client offers a `child` API, which returns a new client instance that shares the connection pool with the parent client. + +::::{note} +The event emitter is shared between the parent and the child(ren). If you extend the parent client, the child client will have the same extensions, while if the child client adds an extension, the parent client will not be extended. +:::: + + +You can pass to the `child` every client option you would pass to a normal client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`, `Connection`, and `resurrectStrategy`). + +::::{warning} +If you call `close` in any of the parent/child clients, every client will be closed. +:::: + + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const child = client.child({ + headers: { 'x-foo': 'bar' }, +}) + +client.info().then(console.log, console.log) +child.info().then(console.log, console.log) +``` diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md new file mode 100644 index 000000000..38c29198e --- /dev/null +++ b/docs/reference/client-helpers.md @@ -0,0 +1,532 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html +--- + +# Client helpers [client-helpers] + +The client comes with an handy collection of helpers to give you a more comfortable experience with some APIs. + +::::{warning} +The client helpers are experimental, and the API may change in the next minor releases. The helpers will not work in any Node.js version lower than 10. +:::: + + + +## Bulk helper [bulk-helper] + +Added in `v7.7.0` + +Running bulk requests can be complex due to the shape of the API, this helper aims to provide a nicer developer experience around the Bulk API. + + +### Usage [_usage_3] + +```js +const { createReadStream } = require('fs') +const split = require('split2') +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const result = await client.helpers.bulk({ + datasource: createReadStream('./dataset.ndjson').pipe(split()), + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } +}) + +console.log(result) +// { +// total: number, +// failed: number, +// retry: number, +// successful: number, +// time: number, +// bytes: number, +// aborted: boolean +// } +``` + +To create a new instance of the Bulk helper, access it as shown in the example above, the configuration options are: + +| | | +| --- | --- | +| `datasource` | An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects.
    If it is a stream, we recommend to use the [`split2`](https://www.npmjs.com/package/split2) package, that splits the stream on new lines delimiters.
    This parameter is mandatory.

    ```js
    const { createReadStream } = require('fs')
    const split = require('split2')
    const b = client.helpers.bulk({
    // if you just use split(), the data will be used as array of strings
    datasource: createReadStream('./dataset.ndjson').pipe(split())
    // if you need to manipulate the data, you can pass JSON.parse to split
    datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse))
    })
    ```
    | +| `onDocument` | A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the [Bulk API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to see the supported operations.
    This parameter is mandatory.

    ```js
    const b = client.helpers.bulk({
    onDocument (doc) {
    return {
    index: { _index: 'my-index' }
    }
    }
    })
    ```
    | +| `onDrop` | A function that is called for everytime a document can’t be indexed and it has reached the maximum amount of retries.

    ```js
    const b = client.helpers.bulk({
    onDrop (doc) {
    console.log(doc)
    }
    })
    ```
    | +| `onSuccess` | A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations.

    ```js
    const b = client.helpers.bulk({
    onSuccess ({ result, document }) {
    console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`)
    }
    })
    ```
    | +| `flushBytes` | The size of the bulk body in bytes to reach before to send it. Default of 5MB.
    *Default:* `5000000`

    ```js
    const b = client.helpers.bulk({
    flushBytes: 1000000
    })
    ```
    | +| `flushInterval` | How much time (in milliseconds) the helper waits before flushing the body from the last document read.
    *Default:* `30000`

    ```js
    const b = client.helpers.bulk({
    flushInterval: 30000
    })
    ```
    | +| `concurrency` | How many request is executed at the same time.
    *Default:* `5`

    ```js
    const b = client.helpers.bulk({
    concurrency: 10
    })
    ```
    | +| `retries` | How many times a document is retried before to call the `onDrop` callback.
    *Default:* Client max retries.

    ```js
    const b = client.helpers.bulk({
    retries: 3
    })
    ```
    | +| `wait` | How much time to wait before retries in milliseconds.
    *Default:* 5000.

    ```js
    const b = client.helpers.bulk({
    wait: 3000
    })
    ```
    | +| `refreshOnCompletion` | If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices.
    *Default:* false.

    ```js
    const b = client.helpers.bulk({
    refreshOnCompletion: true
    // or
    refreshOnCompletion: 'index-name'
    })
    ```
    | + + +### Supported operations [_supported_operations] + + +#### Index [_index_2] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } +}) +``` + + +#### Create [_create_4] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + return { + create: { _index: 'my-index', _id: doc.id } + } + } +}) +``` + + +#### Update [_update_3] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + // Note that the update operation requires you to return + // an array, where the first element is the action, while + // the second are the document option + return [ + { update: { _index: 'my-index', _id: doc.id } }, + { doc_as_upsert: true } + ] + } +}) +``` + + +#### Delete [_delete_10] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + return { + delete: { _index: 'my-index', _id: doc.id } + } + } +}) +``` + + +### Abort a bulk operation [_abort_a_bulk_operation] + +If needed, you can abort a bulk operation at any time. The bulk helper returns a [thenable](https://promisesaplus.com/), which has an `abort` method. + +::::{note} +The abort method stops the execution of the bulk operation, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. +:::: + + +```js +const { createReadStream } = require('fs') +const split = require('split2') +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const b = client.helpers.bulk({ + datasource: createReadStream('./dataset.ndjson').pipe(split()), + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + }, + onDrop (doc) { + b.abort() + } +}) + +console.log(await b) +``` + + +### Passing custom options to the Bulk API [_passing_custom_options_to_the_bulk_api] + +You can pass any option supported by the link: [Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to the helper, and the helper uses those options in conjunction with the Bulk API call. + +```js +const result = await client.helpers.bulk({ + datasource: [...], + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + }, + pipeline: 'my-pipeline' +}) +``` + + +### Usage with an async generator [_usage_with_an_async_generator] + +```js +const { Client } = require('@elastic/elasticsearch') + +async function * generator () { + const dataset = [ + { user: 'jon', age: 23 }, + { user: 'arya', age: 18 }, + { user: 'tyrion', age: 39 } + ] + for (const doc of dataset) { + yield doc + } +} + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const result = await client.helpers.bulk({ + datasource: generator(), + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } +}) + +console.log(result) +``` + + +### Modifying a document before operation [_modifying_a_document_before_operation] + +Added in `v8.8.2` + +If you need to modify documents in your datasource before it is sent to Elasticsearch, you can return an array in the `onDocument` function rather than an operation object. The first item in the array must be the operation object, and the second item must be the document or partial document object as you’d like it to be sent to Elasticsearch. + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const result = await client.helpers.bulk({ + datasource: [...], + onDocument (doc) { + return [ + { index: { _index: 'my-index' } }, + { ...doc, favorite_color: 'mauve' }, + ] + } +}) + +console.log(result) +``` + + +## Multi search helper [multi-search-helper] + +Added in `v7.8.0` + +If you send search request at a high rate, this helper might be useful for you. It uses the multi search API under the hood to batch the requests and improve the overall performances of your application. The `result` exposes a `documents` property as well, which allows you to access directly the hits sources. + + +### Usage [_usage_4] + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const m = client.helpers.msearch() + +m.search( + { index: 'stackoverflow' }, + { query: { match: { title: 'javascript' } } } + ) + .then(result => console.log(result.body)) // or result.documents + .catch(err => console.error(err)) +``` + +To create a new instance of the multi search (msearch) helper, you should access it as shown in the example above, the configuration options are: + +| | | +| --- | --- | +| `operations` | How many search operations should be sent in a single msearch request.
    *Default:* `5`

    ```js
    const m = client.helpers.msearch({
    operations: 10
    })
    ```
    | +| `flushInterval` | How much time (in milliseconds) the helper waits before flushing the operations from the last operation read.
    *Default:* `500`

    ```js
    const m = client.helpers.msearch({
    flushInterval: 500
    })
    ```
    | +| `concurrency` | How many request is executed at the same time.
    *Default:* `5`

    ```js
    const m = client.helpers.msearch({
    concurrency: 10
    })
    ```
    | +| `retries` | How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error.
    *Default:* Client max retries.

    ```js
    const m = client.helpers.msearch({
    retries: 3
    })
    ```
    | +| `wait` | How much time to wait before retries in milliseconds.
    *Default:* 5000.

    ```js
    const m = client.helpers.msearch({
    wait: 3000
    })
    ```
    | + + +### Stopping the msearch helper [_stopping_the_msearch_helper] + +If needed, you can stop an msearch processor at any time. The msearch helper returns a [thenable](https://promisesaplus.com/), which has an `stop` method. + +If you are creating multiple msearch helpers instances and using them for a limitied period of time, remember to always use the `stop` method once you have finished using them, otherwise your application will start leaking memory. + +The `stop` method accepts an optional error, that will be dispatched every subsequent search request. + +::::{note} +The stop method stops the execution of the msearch processor, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. +:::: + + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const m = client.helpers.msearch() + +m.search( + { index: 'stackoverflow' }, + { query: { match: { title: 'javascript' } } } + ) + .then(result => console.log(result.body)) + .catch(err => console.error(err)) + +m.search( + { index: 'stackoverflow' }, + { query: { match: { title: 'ruby' } } } + ) + .then(result => console.log(result.body)) + .catch(err => console.error(err)) + +setImmediate(() => m.stop()) +``` + + +## Search helper [search-helper] + +Added in `v7.7.0` + +A simple wrapper around the search API. Instead of returning the entire `result` object it returns only the search documents source. For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the query string. + +```js +const documents = await client.helpers.search({ + index: 'stackoverflow', + query: { + match: { + title: 'javascript' + } + } +}) + +for (const doc of documents) { + console.log(doc) +} +``` + + +## Scroll search helper [scroll-search-helper] + +Added in `v7.7.0` + +This helpers offers a simple and intuitive way to use the scroll search API. Once called, it returns an [async iterator](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function) which can be used in conjuction with a for-await…​of. It handles automatically the `429` error and uses the `maxRetries` option of the client. + +```js +const scrollSearch = client.helpers.scrollSearch({ + index: 'stackoverflow', + query: { + match: { + title: 'javascript' + } + } +}) + +for await (const result of scrollSearch) { + console.log(result) +} +``` + + +### Clear a scroll search [_clear_a_scroll_search] + +If needed, you can clear a scroll search by calling `result.clear()`: + +```js +for await (const result of scrollSearch) { + if (condition) { + await result.clear() + } +} +``` + + +### Quickly getting the documents [_quickly_getting_the_documents] + +If you only need the documents from the result of a scroll search, you can access them via `result.documents`: + +```js +for await (const result of scrollSearch) { + console.log(result.documents) +} +``` + + +## Scroll documents helper [scroll-documents-helper] + +Added in `v7.7.0` + +It works in the same way as the scroll search helper, but it returns only the documents instead. Note, every loop cycle returns a single document, and you can’t use the `clear` method. For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the query string. + +```js +const scrollSearch = client.helpers.scrollDocuments({ + index: 'stackoverflow', + query: { + match: { + title: 'javascript' + } + } +}) + +for await (const doc of scrollSearch) { + console.log(doc) +} +``` + + +## ES|QL helper [esql-helper] + +ES|QL queries can return their results in [several formats](docs-content://explore-analyze/query-filter/languages/esql-rest.md#esql-rest-format). The default JSON format returned by ES|QL queries contains arrays of values for each row, with column names and types returned separately: + + +### Usage [_usage_5] + + +#### `toRecords` [_torecords] + +Added in `v8.14.0` + +The default JSON format returned by ES|QL queries contains arrays of values for each row, with column names and types returned separately: + +```json +{ + "columns": [ + { "name": "@timestamp", "type": "date" }, + { "name": "client_ip", "type": "ip" }, + { "name": "event_duration", "type": "long" }, + { "name": "message", "type": "keyword" } + ], + "values": [ + [ + "2023-10-23T12:15:03.360Z", + "172.21.2.162", + 3450233, + "Connected to 10.1.0.3" + ], + [ + "2023-10-23T12:27:28.948Z", + "172.21.2.113", + 2764889, + "Connected to 10.1.0.2" + ] + ] +} +``` + +In many cases, it’s preferable to operate on an array of objects, one object per row, rather than an array of arrays. The ES|QL `toRecords` helper converts row data into objects. + +```js +await client.helpers + .esql({ query: 'FROM sample_data | LIMIT 2' }) + .toRecords() +// => +// { +// "columns": [ +// { "name": "@timestamp", "type": "date" }, +// { "name": "client_ip", "type": "ip" }, +// { "name": "event_duration", "type": "long" }, +// { "name": "message", "type": "keyword" } +// ], +// "records": [ +// { +// "@timestamp": "2023-10-23T12:15:03.360Z", +// "client_ip": "172.21.2.162", +// "event_duration": 3450233, +// "message": "Connected to 10.1.0.3" +// }, +// { +// "@timestamp": "2023-10-23T12:27:28.948Z", +// "client_ip": "172.21.2.113", +// "event_duration": 2764889, +// "message": "Connected to 10.1.0.2" +// }, +// ] +// } +``` + +In TypeScript, you can declare the type that `toRecords` returns: + +```ts +type EventLog = { + '@timestamp': string, + client_ip: string, + event_duration: number, + message: string, +} + +const result = await client.helpers + .esql({ query: 'FROM sample_data | LIMIT 2' }) + .toRecords() +``` + + +#### `toArrowReader` [_toarrowreader] + +Added in `v8.16.0` + +ES|QL can return results in multiple binary formats, including [Apache Arrow](https://arrow.apache.org/)'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets. + +`toArrowReader` returns a [`RecordBatchStreamReader`](https://arrow.apache.org/docs/js/classes/Arrow_dom.RecordBatchReader.md). + +```ts +const reader = await client.helpers + .esql({ query: 'FROM sample_data' }) + .toArrowReader() + +// print each record as JSON +for (const recordBatch of reader) { + for (const record of recordBatch) { + console.log(record.toJSON()) + } +} +``` + + +#### `toArrowTable` [_toarrowtable] + +Added in `v8.16.0` + +If you would like to pull the entire data set in Arrow format but without streaming, you can use the `toArrowTable` helper to get a [Table](https://arrow.apache.org/docs/js/classes/Arrow_dom.Table.md) back instead. + +```ts +const table = await client.helpers + .esql({ query: 'FROM sample_data' }) + .toArrowTable() + +console.log(table.toArray()) +``` diff --git a/docs/reference/client-testing.md b/docs/reference/client-testing.md new file mode 100644 index 000000000..ffeb34d5e --- /dev/null +++ b/docs/reference/client-testing.md @@ -0,0 +1,121 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html +--- + +# Testing [client-testing] + +Testing is one of the most important parts of developing an application. The client is very flexible when it comes to testing and is compatible with most testing frameworks (such as [`ava`](https://www.npmjs.com/package/ava), which is used in the examples below). + +If you are using this client, you are most likely working with {{es}}, and one of the first issues you face is how to test your application. A perfectly valid solution is to use the real {{es}} instance for testing your application, but you would be doing an integration test, while you want a unit test. There are many ways to solve this problem, you could create the database with Docker, or use an in-memory compatible one, but if you are writing unit tests that can be easily parallelized this becomes quite uncomfortable. A different way of improving your testing experience while doing unit tests is to use a mock. + +The client is designed to be easy to extend and adapt to your needs. Thanks to its internal architecture it allows you to change some specific components while keeping the rest of it working as usual. Each {{es}} official client is composed of the following components: + +* `API layer`: every {{es}} API that you can call. +* `Transport`: a component that takes care of preparing a request before sending it and handling all the retry and sniffing strategies. +* `ConnectionPool`: {{es}} is a cluster and might have multiple nodes, the `ConnectionPool` takes care of them. +* `Serializer`: A class with all the serialization strategies, from the basic JSON to the new line delimited JSON. +* `Connection`: The actual HTTP library. + +The best way to mock {{es}} with the official clients is to replace the `Connection` component since it has very few responsibilities and it does not interact with other internal components other than getting requests and returning responses. + + +## `@elastic/elasticsearch-mock` [_elasticelasticsearch_mock] + +Writing each time a mock for your test can be annoying and error-prone, so we have built a simple yet powerful mocking library specifically designed for this client, and you can install it with the following command: + +```sh +npm install @elastic/elasticsearch-mock --save-dev +``` + +With this library you can create custom mocks for any request you can send to {{es}}. It offers a simple and intuitive API and it mocks only the HTTP layer, leaving the rest of the client working as usual. + +Before showing all of its features, and what you can do with it, let’s see an example: + +```js +const { Client } = require('@elastic/elasticsearch') +const Mock = require('@elastic/elasticsearch-mock') + +const mock = new Mock() +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + Connection: mock.getConnection() +}) + +mock.add({ + method: 'GET', + path: '/' +}, () => { + return { status: 'ok' } +}) + +client.info().then(console.log, console.log) +``` + +As you can see it works closely with the client itself, once you have created a new instance of the mock library you just need to call the mock.getConnection() method and pass its result to the Connection option of the client. From now on, every request is handled by the mock library, and the HTTP layer will never be touched. As a result, your test is significantly faster and you are able to easily parallelize them! + +The library allows you to write both “strict” and “loose” mocks, which means that you can write a mock that handles a very specific request or be looser and handle a group of request, let’s see this in action: + +```js +mock.add({ + method: 'POST', + path: '/indexName/_search' +}, () => { + return { + hits: { + total: { value: 1, relation: 'eq' }, + hits: [{ _source: { baz: 'faz' } }] + } + } +}) + +mock.add({ + method: 'POST', + path: '/indexName/_search', + body: { query: { match: { foo: 'bar' } } } +}, () => { + return { + hits: { + total: { value: 0, relation: 'eq' }, + hits: [] + } + } +}) +``` + +In the example above, every search request gets the first response, while every search request that uses the query described in the second mock gets the second response. + +You can also specify dynamic paths: + +```js +mock.add({ + method: 'GET', + path: '/:index/_count' +}, () => { + return { count: 42 } +}) + +client.count({ index: 'foo' }).then(console.log, console.log) // => { count: 42 } +client.count({ index: 'bar' }).then(console.log, console.log) // => { count: 42 } +``` + +And wildcards are supported as well. + +Another very interesting use case is the ability to create a test that randomly fails to see how your code reacts to failures: + +```js +mock.add({ + method: 'GET', + path: '/:index/_count' +}, () => { + if (Math.random() > 0.8) { + return ResponseError({ body: {}, statusCode: 500 }) + } else { + return { count: 42 } + } +}) +``` + +We have seen how simple is mocking {{es}} and testing your application, you can find many more features and examples in the [module documentation](https://github.com/elastic/elasticsearch-js-mock). + diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md new file mode 100644 index 000000000..0367bdc12 --- /dev/null +++ b/docs/reference/configuration.md @@ -0,0 +1,19 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html +--- + +# Configuration [client-configuration] + +The client is designed to be easily configured for your needs. In the following section, you can see the possible options that you can use to configure it. + +* [Basic configuration](/reference/basic-config.md) +* [Advanced configuration](/reference/advanced-config.md) +* [Timeout best practices](docs-content://troubleshoot/elasticsearch/elasticsearch-client-javascript-api/nodejs.md) +* [Creating a child client](/reference/child.md) +* [Testing](/reference/client-testing.md) + + + + + diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md new file mode 100644 index 000000000..ec4c2e454 --- /dev/null +++ b/docs/reference/connecting.md @@ -0,0 +1,524 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html +--- + +# Connecting [client-connecting] + +This page contains the information you need to connect and use the Client with {{es}}. + +## Authentication [authentication] + +This document contains code snippets to show you how to connect to various {{es}} providers. + + +### Elastic Cloud [auth-ec] + +If you are using [Elastic Cloud](https://www.elastic.co/cloud), the client offers an easy way to connect to it via the `cloud` option. You must pass the Cloud ID that you can find in the cloud console, then your username and password inside the `auth` option. + +::::{note} +When connecting to Elastic Cloud, the client will automatically enable both request and response compression by default, since it yields significant throughput improvements. Moreover, the client will also set the tls option `secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still override this option by configuring them. +:::: + + +::::{important} +Do not enable sniffing when using Elastic Cloud, since the nodes are behind a load balancer, Elastic Cloud will take care of everything for you. Take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more. +:::: + + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + } +}) +``` + + +## Connecting to a self-managed cluster [connect-self-managed-new] + +By default {{es}} will start with security features like authentication and TLS enabled. To connect to the {{es}} cluster you’ll need to configure the Node.js {{es}} client to use HTTPS with the generated CA certificate in order to make requests successfully. + +If you’re just getting started with {{es}} we recommend reading the documentation on [configuring](docs-content://deploy-manage/deploy/self-managed/configure-elasticsearch.md) and [starting {{es}}](docs-content://deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md) to ensure your cluster is running as expected. + +When you start {{es}} for the first time you’ll see a distinct block like the one below in the output from {{es}} (you may have to scroll up if it’s been a while): + +```sh +-> Elasticsearch security features have been automatically configured! +-> Authentication is enabled and cluster connections are encrypted. + +-> Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`): + lhQpLELkjkrawaBoaz0Q + +-> HTTP CA certificate SHA-256 fingerprint: + a52dd93511e8c6045e21f16654b77c9ee0f34aea26d9f40320b531c474676228 +... +``` + +Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint. + + +### TLS configuration [auth-tls] + +The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). + +Without any additional configuration you can specify `https://` node urls, and the certificates used to sign these requests will be verified. To turn off certificate verification, you must specify an `tls` object in the top level config and set `rejectUnauthorized: false`. The default `tls` values are the same that Node.js’s [`tls.connect()`](https://nodejs.org/api/tls.md#tls_tls_connect_options_callback) uses. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + username: 'elastic', + password: 'changeme' + }, + tls: { + ca: fs.readFileSync('./http_ca.crt'), + rejectUnauthorized: false + } +}) +``` + + +### CA fingerprint [auth-ca-fingerprint] + +You can configure the client to only trust certificates that are signed by a specific CA certificate (CA certificate pinning) by providing a `caFingerprint` option. This will verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied value. You must configure a SHA256 digest. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://example.com/' + auth: { ... }, + // the fingerprint (SHA256) of the CA certificate that is used to sign + // the certificate that the Elasticsearch node presents for TLS. + caFingerprint: '20:0D:CA:FA:76:...', + tls: { + // might be required if it's a self-signed certificate + rejectUnauthorized: false + } +}) +``` + +The certificate fingerprint can be calculated using `openssl x509` with the certificate file: + +```sh +openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt +``` + +If you don’t have access to the generated CA file from {{es}} you can use the following script to output the root CA fingerprint of the {{es}} instance with `openssl s_client`: + +```sh +# Replace the values of 'localhost' and '9200' to the +# corresponding host and port values for the cluster. +openssl s_client -connect localhost:9200 -servername localhost -showcerts /dev/null \ + | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin +``` + +The output of `openssl x509` will look something like this: + +```sh +SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28 +``` + + +## Connecting without security enabled [connect-no-security] + +::::{warning} +Running {{es}} without security enabled is not recommended. +:::: + + +If your cluster is configured with [security explicitly disabled](elasticsearch://docs/reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'http://example.com/' +}) +``` + + +## Authentication strategies [auth-strategies] + +Following you can find all the supported authentication strategies. + + +### ApiKey authentication [auth-apikey] + +You can use the [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication by passing the `apiKey` parameter via the `auth` option. The `apiKey` parameter can be either a base64 encoded string or an object with the values that you can obtain from the [create api key endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). + +::::{note} +If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence. +:::: + + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + apiKey: 'base64EncodedKey' + } +}) +``` + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + apiKey: { + id: 'foo', + api_key: 'bar' + } + } +}) +``` + + +### Bearer authentication [auth-bearer] + +You can provide your credentials by passing the `bearer` token parameter via the `auth` option. Useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + bearer: 'token' + } +}) +``` + + +### Basic authentication [auth-basic] + +You can provide your credentials by passing the `username` and `password` parameters via the `auth` option. + +::::{note} +If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence. +:::: + + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + username: 'elastic', + password: 'changeme' + } +}) +``` + +Otherwise, you can provide your credentials in the node(s) URL. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://username:password@localhost:9200/' +}) +``` + + +## Usage [client-usage] + +Using the client is straightforward, it supports all the public APIs of {{es}}, and every method exposes the same signature. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +const result = await client.search({ + index: 'my-index', + query: { + match: { hello: 'world' } + } +}) +``` + +The returned value of every API call is the response body from {{es}}. If you need to access additonal metadata, such as the status code or headers, you must specify `meta: true` in the request options: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +const result = await client.search({ + index: 'my-index', + query: { + match: { hello: 'world' } + } +}, { meta: true }) +``` + +In this case, the result will be: + +```ts +{ + body: object | boolean + statusCode: number + headers: object + warnings: string[], + meta: object +} +``` + +::::{note} +The body is a boolean value when you use `HEAD` APIs. +:::: + + + +### Aborting a request [_aborting_a_request] + +If needed, you can abort a running request by using the `AbortController` standard. + +::::{warning} +If you abort a request, the request will fail with a `RequestAbortedError`. +:::: + + +```js +const AbortController = require('node-abort-controller') +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +const abortController = new AbortController() +setImmediate(() => abortController.abort()) + +const result = await client.search({ + index: 'my-index', + query: { + match: { hello: 'world' } + } +}, { signal: abortController.signal }) +``` + + +### Request specific options [_request_specific_options] + +If needed you can pass request specific options in a second object: + +```js +const result = await client.search({ + index: 'my-index', + body: { + query: { + match: { hello: 'world' } + } + } +}, { + ignore: [404], + maxRetries: 3 +}) +``` + +The supported request specific options are: + +| Option | Description | +| --- | ----------- | +| `ignore` | `number[]` -  HTTP status codes which should not be considered errors for this request.
    *Default:* `null` | +| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://docs/reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
    _Default:* No timeout | +| `retryOnTimeout` | `boolean` - Retry requests that have timed out.*Default:* `false` | +| `maxRetries` | `number` - Max number of retries for the request, it overrides the client default.
    *Default:* `3` | +| `compression` | `string` or `boolean` - Enables body compression for the request.
    *Options:* `false`, `'gzip'`
    *Default:* `false` | +| `asStream` | `boolean` - Instead of getting the parsed body back, you get the raw Node.js stream of data.
    *Default:* `false` | +| `headers` | `object` - Custom headers for the request.
    *Default:* `null` | +|`querystring` | `object` - Custom querystring for the request.
    *Default:* `null` | +| `id` | `any` - Custom request ID. *(overrides the top level request id generator)*
    *Default:* `null` | +| `context` | `any` - Custom object per request. *(you can use it to pass data to the clients events)*
    *Default:* `null` | +| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://docs/reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id) *Default:* `null` | +| `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH
    *Default:* `null` | +| `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH
    *Default:* `null` | +| `signal` | `AbortSignal` - The AbortSignal instance to allow request abortion.
    *Default:* `null` | +| `meta` | `boolean` - Rather than returning the body, return an object containing `body`, `statusCode`, `headers` and `meta` keys
    *Default*: `false` | +| `redaction` | `object` - Options for redacting potentially sensitive data from error metadata. See [Redaction of potentially sensitive data](/reference/advanced-config.md#redaction). | `retryBackoff` | + +## Using the Client in a Function-as-a-Service Environment [client-faas-env] + +This section illustrates the best practices for leveraging the {{es}} client in a Function-as-a-Service (FaaS) environment. The most influential optimization is to initialize the client outside of the function, the global scope. This practice does not only improve performance but also enables background functionality as – for example – [sniffing](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). The following examples provide a skeleton for the best practices. + + +### GCP Cloud Functions [_gcp_cloud_functions] + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +exports.testFunction = async function (req, res) { + // use the client +} +``` + + +### AWS Lambda [_aws_lambda] + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +exports.handler = async function (event, context) { + // use the client +} +``` + + +### Azure Functions [_azure_functions] + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +module.exports = async function (context, req) { + // use the client +} +``` + +Resources used to assess these recommendations: + +* [GCP Cloud Functions: Tips & Tricks](https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations) +* [Best practices for working with AWS Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.md) +* [Azure Functions Python developer guide](https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables) +* [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.md) + + +## Connecting through a proxy [client-connect-proxy] + +Added in `v7.10.0` + +If you need to pass through an http(s) proxy for connecting to {{es}}, the client out of the box offers a handy configuration for helping you with it. Under the hood, it uses the [`hpagent`](https://github.com/delvedor/hpagent) module. + +::::{important} +In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. To use a proxy, you will need to use the `HttpConnection` class from `@elastic/transport` instead. +:::: + + +```js +import { HttpConnection } from '@elastic/transport' + +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://localhost:8080/', + Connection: HttpConnection, +}) +``` + +Basic authentication is supported as well: + +```js +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http:user:pwd@//localhost:8080', + Connection: HttpConnection, +}) +``` + +If you are connecting through a non-http(s) proxy, such as a `socks5` or `pac`, you can use the `agent` option to configure it. + +```js +const SocksProxyAgent = require('socks-proxy-agent') +const client = new Client({ + node: 'http://localhost:9200/', + agent () { + return new SocksProxyAgent('socks://127.0.0.1:1080') + }, + Connection: HttpConnection, +}) +``` + + +## Error handling [client-error-handling] + +The client exposes a variety of error objects that you can use to enhance your error handling. You can find all the error objects inside the `errors` key in the client. + +```js +const { errors } = require('@elastic/elasticsearch') +console.log(errors) +``` + +You can find the errors exported by the client in the table below. + +| | | | +| --- | --- | --- | +| **Error** | **Description** | **Properties** | +| `ElasticsearchClientError` | Every error inherits from this class, it is the basic error generated by the client. | * `name` - `string`
    * `message` - `string`
    | +| `TimeoutError` | Generated when a request exceeds the `requestTimeout` option. | * `name` - `string`
    * `message` - `string`
    * `meta` - `object`, contains all the information about the request
    | +| `ConnectionError` | Generated when an error occurs during the request, it can be a connection error or a malformed stream of data. | * `name` - `string`
    * `message` - `string`
    * `meta` - `object`, contains all the information about the request
    | +| `RequestAbortedError` | Generated if the user calls the `request.abort()` method. | * `name` - `string`
    * `message` - `string`
    * `meta` - `object`, contains all the information about the request
    | +| `NoLivingConnectionsError` | Given the configuration, the ConnectionPool was not able to find a usable Connection for this request. | * `name` - `string`
    * `message` - `string`
    * `meta` - `object`, contains all the information about the request
    | +| `SerializationError` | Generated if the serialization fails. | * `name` - `string`
    * `message` - `string`
    * `data` - `object`, the object to serialize
    | +| `DeserializationError` | Generated if the deserialization fails. | * `name` - `string`
    * `message` - `string`
    * `data` - `string`, the string to deserialize
    | +| `ConfigurationError` | Generated if there is a malformed configuration or parameter. | * `name` - `string`
    * `message` - `string`
    | +| `ResponseError` | Generated when in case of a `4xx` or `5xx` response. | * `name` - `string`
    * `message` - `string`
    * `meta` - `object`, contains all the information about the request
    * `body` - `object`, the response body
    * `statusCode` - `object`, the response headers
    * `headers` - `object`, the response status code
    | + + +## Keep-alive connections [keep-alive] + +By default, the client uses persistent, keep-alive connections to reduce the overhead of creating a new HTTP connection for each Elasticsearch request. If you are using the default `UndiciConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 10 minutes. If you are using the legacy `HttpConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 1 minute. + +If you need to disable keep-alive connections, you can override the HTTP agent with your preferred [HTTP agent options](https://nodejs.org/api/http.md#http_new_agent_options): + +```js +const client = new Client({ + node: 'http://localhost:9200/', + // the function takes as parameter the option + // object passed to the Connection constructor + agent: (opts) => new CustomAgent() +}) +``` + +Or you can disable the HTTP agent entirely: + +```js +const client = new Client({ + node: 'http://localhost:9200/', + // Disable agent and keep-alive + agent: false +}) +``` + + +## Closing a client’s connections [close-connections] + +If you would like to close all open connections being managed by an instance of the client, use the `close()` function: + +```js +const client = new Client({ + node: 'http://localhost:9200/' +}); +client.close(); +``` + + +## Automatic product check [product-check] + +Since v7.14.0, the client performs a required product check before the first call. This pre-flight product check allows the client to establish the version of Elasticsearch that it is communicating with. The product check requires one additional HTTP request to be sent to the server as part of the request pipeline before the main API call is sent. In most cases, this will succeed during the very first API call that the client sends. Once the product check completes, no further product check HTTP requests are sent for subsequent API calls. diff --git a/docs/reference/examples.md b/docs/reference/examples.md new file mode 100644 index 000000000..d307341d1 --- /dev/null +++ b/docs/reference/examples.md @@ -0,0 +1,38 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html +--- + +# Examples [examples] + +Following you can find some examples on how to use the client. + +* Use of the [asStream](/reference/as_stream_examples.md) parameter; +* Executing a [bulk](/reference/bulk_examples.md) request; +* Executing a [exists](/reference/exists_examples.md) request; +* Executing a [get](/reference/get_examples.md) request; +* Executing a [sql.query](/reference/sql_query_examples.md) request; +* Executing a [update](/reference/update_examples.md) request; +* Executing a [update by query](/reference/update_by_query_examples.md) request; +* Executing a [reindex](/reference/reindex_examples.md) request; +* Use of the [ignore](/reference/ignore_examples.md) parameter; +* Executing a [msearch](/reference/msearch_examples.md) request; +* How do I [scroll](/reference/scroll_examples.md)? +* Executing a [search](/reference/search_examples.md) request; +* I need [suggestions](/reference/suggest_examples.md); +* How to use the [transport.request](/reference/transport_request_examples.md) method; + + + + + + + + + + + + + + + diff --git a/docs/examples/exists.asciidoc b/docs/reference/exists_examples.md similarity index 68% rename from docs/examples/exists.asciidoc rename to docs/reference/exists_examples.md index 3553796fb..c5a691f64 100644 --- a/docs/examples/exists.asciidoc +++ b/docs/reference/exists_examples.md @@ -1,12 +1,18 @@ -[[exists_examples]] -=== Exists +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/exists_examples.html +--- + +# Exists [exists_examples] Check that the document `/game-of-thrones/1` exists. -NOTE: Since this API uses the `HEAD` method, the body value will be boolean. +::::{note} +Since this API uses the `HEAD` method, the body value will be boolean. +:::: + -[source,js] ----- +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -34,4 +40,5 @@ async function run () { } run().catch(console.log) ----- +``` + diff --git a/docs/examples/get.asciidoc b/docs/reference/get_examples.md similarity index 64% rename from docs/examples/get.asciidoc rename to docs/reference/get_examples.md index fe0268647..6b545b072 100644 --- a/docs/examples/get.asciidoc +++ b/docs/reference/get_examples.md @@ -1,12 +1,13 @@ -[[get_examples]] -=== Get +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/get_examples.html +--- -The get API allows to get a typed JSON document from the index based on its id. -The following example gets a JSON document from an index called -`game-of-thrones`, under a type called `_doc`, with id valued `'1'`. +# Get [get_examples] -[source,js] ----- +The get API allows to get a typed JSON document from the index based on its id. The following example gets a JSON document from an index called `game-of-thrones`, under a type called `_doc`, with id valued `'1'`. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -34,4 +35,5 @@ async function run () { } run().catch(console.log) ----- +``` + diff --git a/docs/reference/getting-started.md b/docs/reference/getting-started.md new file mode 100644 index 000000000..59b290037 --- /dev/null +++ b/docs/reference/getting-started.md @@ -0,0 +1,154 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html + - https://www.elastic.co/guide/en/serverless/current/elasticsearch-nodejs-client-getting-started.html +--- + +# Getting started [getting-started-js] + +This page guides you through the installation process of the Node.js client, shows you how to instantiate the client, and how to perform basic Elasticsearch operations with it. + + +### Requirements [_requirements] + +* [Node.js](https://nodejs.org/) version 14.x or newer +* [`npm`](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm), usually bundled with Node.js + + +### Installation [_installation] + +To install the latest version of the client, run the following command: + +```shell +npm install @elastic/elasticsearch +``` + +Refer to the [*Installation*](/reference/installation.md) page to learn more. + + +### Connecting [_connecting] + +You can connect to the Elastic Cloud using an API key and the Elasticsearch endpoint. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://.../', // Elasticsearch endpoint + auth: { + apiKey: { // API key ID and secret + id: 'foo', + api_key: 'bar', + } + } +}) +``` + +Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment: + +:::{image} ../images/es-endpoint.jpg +:alt: Finding Elasticsearch endpoint +::: + +You can generate an API key on the **Management** page under Security. + +:::{image} ../images/create-api-key.png +:alt: Create API key +::: + +For other connection options, refer to the [*Connecting*](/reference/connecting.md) section. + + +### Operations [_operations] + +Time to use Elasticsearch! This section walks you through the basic, and most important, operations of Elasticsearch. + + +#### Creating an index [_creating_an_index] + +This is how you create the `my_index` index: + +```js +await client.indices.create({ index: 'my_index' }) +``` + + +#### Indexing documents [_indexing_documents] + +This is a simple way of indexing a document: + +```js +await client.index({ + index: 'my_index', + id: 'my_document_id', + document: { + foo: 'foo', + bar: 'bar', + }, +}) +``` + + +#### Getting documents [_getting_documents] + +You can get documents by using the following code: + +```js +await client.get({ + index: 'my_index', + id: 'my_document_id', +}) +``` + + +#### Searching documents [_searching_documents] + +This is how you can create a single match query with the client: + +```js +await client.search({ + query: { + match: { + foo: 'foo' + } + } +}) +``` + + +#### Updating documents [_updating_documents] + +This is how you can update a document, for example to add a new field: + +```js +await client.update({ + index: 'my_index', + id: 'my_document_id', + doc: { + foo: 'bar', + new_field: 'new value' + } +}) +``` + + +#### Deleting documents [_deleting_documents] + +```js +await client.delete({ + index: 'my_index', + id: 'my_document_id', +}) +``` + + +#### Deleting an index [_deleting_an_index] + +```js +await client.indices.delete({ index: 'my_index' }) +``` + + +## Further reading [_further_reading] + +* Use [*Client helpers*](/reference/client-helpers.md) for a more comfortable experience with the APIs. +* For an elaborate example of how to ingest data into Elastic Cloud, refer to [this page](docs-content://manage-data/ingest/ingesting-data-from-applications/ingest-data-with-nodejs-on-elasticsearch-service.md). diff --git a/docs/examples/ignore.asciidoc b/docs/reference/ignore_examples.md similarity index 88% rename from docs/examples/ignore.asciidoc rename to docs/reference/ignore_examples.md index 0b4c6fa98..8414d007c 100644 --- a/docs/examples/ignore.asciidoc +++ b/docs/reference/ignore_examples.md @@ -1,10 +1,13 @@ -[[ignore_examples]] -=== Ignore +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/ignore_examples.html +--- + +# Ignore [ignore_examples] HTTP status codes which should not be considered errors for this request. -[source,js] ----- +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -62,4 +65,5 @@ async function run () { } run().catch(console.log) ----- +``` + diff --git a/docs/introduction.asciidoc b/docs/reference/index.md similarity index 59% rename from docs/introduction.asciidoc rename to docs/reference/index.md index e6b5963e0..22b745b70 100644 --- a/docs/introduction.asciidoc +++ b/docs/reference/index.md @@ -1,12 +1,15 @@ -[[introduction]] -== Introduction +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html +--- -This is the official Node.js client for {es}. This page gives a quick overview -about the features of the client. +# JavaScript [introduction] +This is the official Node.js client for {{es}}. This page gives a quick overview about the features of the client. -[discrete] -=== Features + +## Features [_features] * One-to-one mapping with REST API. * Generalized, pluggable architecture. @@ -17,45 +20,35 @@ about the features of the client. * TypeScript support out of the box. -[discrete] -==== Install multiple versions +### Install multiple versions [_install_multiple_versions] -If you are using multiple versions of {es}, you need to use multiple versions of -the client as well. In the past, installing multiple versions of the same -package was not possible, but with `npm v6.9`, you can do it via aliasing. +If you are using multiple versions of {{es}}, you need to use multiple versions of the client as well. In the past, installing multiple versions of the same package was not possible, but with `npm v6.9`, you can do it via aliasing. To install different version of the client, run the following command: -[source,sh] ----- +```sh npm install @npm:@elastic/elasticsearch@ ----- - +``` For example, if you need to install `7.x` and `6.x`, run the following commands: -[source,sh] ----- +```sh npm install es6@npm:@elastic/elasticsearch@6 npm install es7@npm:@elastic/elasticsearch@7 ----- - +``` Your `package.json` will look similar to the following example: -[source,json] ----- +```json "dependencies": { "es6": "npm:@elastic/elasticsearch@^6.7.0", "es7": "npm:@elastic/elasticsearch@^7.0.0" } ----- - +``` Require the packages from your code by using the alias you have defined. -[source,js] ----- +```js const { Client: Client6 } = require('es6') const { Client: Client7 } = require('es7') @@ -70,15 +63,16 @@ const client7 = new Client7({ client6.info().then(console.log, console.log) client7.info().then(console.log, console.log) ----- +``` +Finally, if you want to install the client for the next version of {{es}} (the one that lives in the {{es}} main branch), use the following command: -Finally, if you want to install the client for the next version of {es} (the one -that lives in the {es} main branch), use the following command: - -[source,sh] ----- +```sh npm install esmain@github:elastic/elasticsearch-js ----- -WARNING: This command installs the main branch of the client which is not -considered stable. \ No newline at end of file +``` + +::::{warning} +This command installs the main branch of the client which is not considered stable. +:::: + + diff --git a/docs/reference/installation.md b/docs/reference/installation.md new file mode 100644 index 000000000..2f29fd57e --- /dev/null +++ b/docs/reference/installation.md @@ -0,0 +1,65 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/installation.html +--- + +# Installation [installation] + +This page guides you through the installation process of the client. + +To install the latest version of the client, run the following command: + +```sh +npm install @elastic/elasticsearch +``` + +To install a specific major version of the client, run the following command: + +```sh +npm install @elastic/elasticsearch@ +``` + +To learn more about the supported major versions, please refer to the [Compatibility matrix](#js-compatibility-matrix). + + +## Node.js support [nodejs-support] + +::::{note} +The minimum supported version of Node.js is `v18`. +:::: + + +The client versioning follows the {{stack}} versioning, this means that major, minor, and patch releases are done following a precise schedule that often does not coincide with the [Node.js release](https://nodejs.org/en/about/releases/) times. + +To avoid support insecure and unsupported versions of Node.js, the client **will drop the support of EOL versions of Node.js between minor releases**. Typically, as soon as a Node.js version goes into EOL, the client will continue to support that version for at least another minor release. If you are using the client with a version of Node.js that will be unsupported soon, you will see a warning in your logs (the client will start logging the warning with two minors in advance). + +Unless you are **always** using a supported version of Node.js, we recommend defining the client dependency in your `package.json` with the `~` instead of `^`. In this way, you will lock the dependency on the minor release and not the major. (for example, `~7.10.0` instead of `^7.10.0`). + +| Node.js Version | Node.js EOL date | End of support | +| --- | --- | --- | +| `8.x` | December 2019 | `7.11` (early 2021) | +| `10.x` | April 2021 | `7.12` (mid 2021) | +| `12.x` | April 2022 | `8.2` (early 2022) | +| `14.x` | April 2023 | `8.8` (early 2023) | +| `16.x` | September 2023 | `8.11` (late 2023) | + + +## Compatibility matrix [js-compatibility-matrix] + +Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of {{es}} without breaking. It does not mean that the client automatically supports new features of newer {{es}} versions; it is only possible after a release of a new client version. For example, a 8.12 client version won’t automatically support the new features of the 8.13 version of {{es}}, the 8.13 client version is required for that. {{es}} language clients are only backwards compatible with default distributions and without guarantees made. + +| {{es}} Version | Client Version | Supported | +| --- | --- | --- | +| `8.x` | `8.x` | `8.x` | +| `7.x` | `7.x` | `7.17` | +| `6.x` | `6.x` | | +| `5.x` | `5.x` | | + + +### Browser [_browser] + +::::{warning} +There is no official support for the browser environment. It exposes your {{es}} instance to everyone, which could lead to security issues. We recommend you to write a lightweight proxy that uses this client instead, you can see a proxy example [here](https://github.com/elastic/elasticsearch-js/tree/master/docs/examples/proxy). +:::: + + diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md new file mode 100644 index 000000000..d301e4217 --- /dev/null +++ b/docs/reference/integrations.md @@ -0,0 +1,16 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/integrations.html +--- + +# Integrations [integrations] + +The Client offers the following integration options for you: + +* [Observability](/reference/observability.md) +* [Transport](/reference/transport.md) +* [TypeScript support](/reference/typescript.md) + + + + diff --git a/docs/examples/msearch.asciidoc b/docs/reference/msearch_examples.md similarity index 86% rename from docs/examples/msearch.asciidoc rename to docs/reference/msearch_examples.md index 66222a34e..f411f8699 100644 --- a/docs/examples/msearch.asciidoc +++ b/docs/reference/msearch_examples.md @@ -1,11 +1,13 @@ -[[msearch_examples]] -=== MSearch +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/msearch_examples.html +--- -The multi search API allows to execute several search requests within the same -API. +# MSearch [msearch_examples] -[source,js] ----- +The multi search API allows to execute several search requests within the same API. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -57,4 +59,5 @@ async function run () { } run().catch(console.log) ----- \ No newline at end of file +``` + diff --git a/docs/observability.asciidoc b/docs/reference/observability.md similarity index 58% rename from docs/observability.asciidoc rename to docs/reference/observability.md index 9436d457f..d142a96cc 100644 --- a/docs/observability.asciidoc +++ b/docs/reference/observability.md @@ -1,64 +1,54 @@ -[[observability]] -=== Observability +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html +--- + +# Observability [observability] To observe and measure Elasticsearch client usage, several client features are provided. First, as of 8.15.0, the client provides native support for OpenTelemetry, which allows you to send client usage data to any endpoint that supports OpenTelemetry without having to make any changes to your JavaScript codebase. -Also, rather than providing a default logger, the client offers an event -emitter interface to hook into internal events, such as `request` and -`response`, allowing you to log the events you care about, or otherwise react -to client usage however you might need. +Also, rather than providing a default logger, the client offers an event emitter interface to hook into internal events, such as `request` and `response`, allowing you to log the events you care about, or otherwise react to client usage however you might need. -Correlating events can be hard, especially if your applications have a large codebase with many events happening at the same time. To help you with this, the client provides a correlation ID system, and other -features. +Correlating events can be hard, especially if your applications have a large codebase with many events happening at the same time. To help you with this, the client provides a correlation ID system, and other features. All of these observability features are documented below. -[discrete] -==== OpenTelemetry -The client supports OpenTelemetry's https://opentelemetry.io/docs/zero-code/js/[zero-code -instrumentation] to enable tracking each client request as an -https://opentelemetry.io/docs/concepts/signals/traces/#spans[OpenTelemetry span]. These spans -follow all of the https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/[semantic -OpenTelemetry conventions for Elasticsearch] except for `db.query.text`. +## OpenTelemetry [_opentelemetry] + +The client supports OpenTelemetry’s [zero-code instrumentation](https://opentelemetry.io/docs/zero-code/js/) to enable tracking each client request as an [OpenTelemetry span](https://opentelemetry.io/docs/concepts/signals/traces/#spans). These spans follow all of the [semantic OpenTelemetry conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) except for `db.query.text`. -To start sending Elasticsearch trace data to your OpenTelemetry endpoint, follow -https://opentelemetry.io/docs/zero-code/js/[OpenTelemetry's zero-code instrumentation guide], -or the following steps: +To start sending Elasticsearch trace data to your OpenTelemetry endpoint, follow [OpenTelemetry’s zero-code instrumentation guide](https://opentelemetry.io/docs/zero-code/js/), or the following steps: 1. Install `@opentelemetry/api` and `@opentelemetry/auto-instrumentations-node` as Node.js dependencies 2. Export the following environment variables with the appropriate values: - - `OTEL_EXPORTER_OTLP_ENDPOINT` - - `OTEL_EXPORTER_OTLP_HEADERS` - - `OTEL_RESOURCE_ATTRIBUTES` - - `OTEL_SERVICE_NAME` + + * `OTEL_EXPORTER_OTLP_ENDPOINT` + * `OTEL_EXPORTER_OTLP_HEADERS` + * `OTEL_RESOURCE_ATTRIBUTES` + * `OTEL_SERVICE_NAME` + 3. `require` the Node.js auto-instrumentation library at startup: -[source,bash] ----- + +``` node --require '@opentelemetry/auto-instrumentations-node/register' index.js ----- +``` -[discrete] -==== Events -The client is an event emitter. This means that you can listen for its events to -add additional logic to your code, without needing to change the client's internals -or how you use the client. You can find the events' names by accessing the `events` key -of the client: +## Events [_events] -[source,js] ----- +The client is an event emitter. This means that you can listen for its events to add additional logic to your code, without needing to change the client’s internals or how you use the client. You can find the events' names by accessing the `events` key of the client: + +```js const { events } = require('@elastic/elasticsearch') console.log(events) ----- +``` -The event emitter functionality can be useful if you want to log every request, -response or error that is created by the client: +The event emitter functionality can be useful if you want to log every request, response or error that is created by the client: -[source,js] ----- +```js const logger = require('my-logger')() const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -73,72 +63,22 @@ client.diagnostic.on('response', (err, result) => { logger.info(result) } }) ----- +``` The client emits the following events: -[cols=2*] -|=== -|`serialization` -a|Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`. -[source,js] ----- -client.diagnostic.on('serialization', (err, result) => { - console.log(err, result) -}) ----- -|`request` -a|Emitted before sending the actual request to {es} _(emitted multiple times in case of retries)_. -[source,js] ----- -client.diagnostic.on('request', (err, result) => { - console.log(err, result) -}) ----- - -|`deserialization` -a|Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. _(This event might not be emitted in certain situations)_. -[source,js] ----- -client.diagnostic.on('deserialization', (err, result) => { - console.log(err, result) -}) ----- +| | | +| --- | --- | +| `serialization` | Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`.

    ```js
    client.diagnostic.on('serialization', (err, result) => {
    console.log(err, result)
    })
    ```
    | +| `request` | Emitted before sending the actual request to {{es}} *(emitted multiple times in case of retries)*.

    ```js
    client.diagnostic.on('request', (err, result) => {
    console.log(err, result)
    })
    ```
    | +| `deserialization` | Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. *(This event might not be emitted in certain situations)*.

    ```js
    client.diagnostic.on('deserialization', (err, result) => {
    console.log(err, result)
    })
    ```
    | +| `response` | Emitted once {{es}} response has been received and parsed.

    ```js
    client.diagnostic.on('response', (err, result) => {
    console.log(err, result)
    })
    ```
    | +| `sniff` | Emitted when the client ends a sniffing request.

    ```js
    client.diagnostic.on('sniff', (err, result) => {
    console.log(err, result)
    })
    ```
    | +| `resurrect` | Emitted if the client is able to resurrect a dead node.

    ```js
    client.diagnostic.on('resurrect', (err, result) => {
    console.log(err, result)
    })
    ```
    | -|`response` -a|Emitted once {es} response has been received and parsed. -[source,js] ----- -client.diagnostic.on('response', (err, result) => { - console.log(err, result) -}) ----- - -|`sniff` -a|Emitted when the client ends a sniffing request. -[source,js] ----- -client.diagnostic.on('sniff', (err, result) => { - console.log(err, result) -}) ----- - -|`resurrect` -a|Emitted if the client is able to resurrect a dead node. -[source,js] ----- -client.diagnostic.on('resurrect', (err, result) => { - console.log(err, result) -}) ----- - -|=== +The values of `result` in `serialization`, `request`, `deserialization`, `response` and `sniff` are: -The values of `result` in `serialization`, `request`, `deserialization`, -`response` and `sniff` are: - -[source,ts] ----- +```ts body: any; statusCode: number | null; headers: anyObject | null; @@ -159,12 +99,11 @@ meta: { reason: string; }; }; ----- +``` While the `result` value in `resurrect` is: -[source,ts] ----- +```ts strategy: string; isAlive: boolean; connection: Connection; @@ -172,19 +111,14 @@ name: string; request: { id: any; }; ----- +``` + -[discrete] -===== Events order +### Events order [_events_order] -The event order is described in the following graph, in some edge cases, the -order is not guaranteed. -You can find in -https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js[`test/acceptance/events-order.test.js`] -how the order changes based on the situation. +The event order is described in the following graph, in some edge cases, the order is not guaranteed. You can find in [`test/acceptance/events-order.test.js`](https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js) how the order changes based on the situation. -[source] ----- +``` serialization │ │ (serialization and compression happens between those two events) @@ -198,17 +132,14 @@ serialization │ (deserialization and decompression happens between those two events) │ └─▶ response ----- +``` -[discrete] -==== Correlation ID -Correlating events can be hard, especially if there are many events at the same -time. The client offers you an automatic (and configurable) system to help you -handle this problem. +## Correlation ID [_correlation_id] -[source,js] ----- +Correlating events can be hard, especially if there are many events at the same time. The client offers you an automatic (and configurable) system to help you handle this problem. + +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, @@ -233,13 +164,11 @@ client.search({ index: 'my-index', query: { match_all: {} } }).then(console.log, console.log) ----- +``` -By default the ID is an incremental integer, but you can configure it with the -`generateRequestId` option: +By default the ID is an incremental integer, but you can configure it with the `generateRequestId` option: -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, @@ -251,30 +180,25 @@ const client = new Client({ return 'id' } }) ----- - +``` You can also specify a custom ID per request: -[source,js] ----- +```js client.search({ index: 'my-index', query: { match_all: {} } }, { id: 'custom-id' }).then(console.log, console.log) ----- +``` -[discrete] -==== Context object +## Context object [_context_object] -Sometimes, you might need to make some custom data available in your events, you -can do that via the `context` option of a request: +Sometimes, you might need to make some custom data available in your events, you can do that via the `context` option of a request: -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, @@ -303,14 +227,11 @@ client.search({ }, { context: { winter: 'is coming' } }).then(console.log, console.log) ----- +``` -The context object can also be configured as a global option in the client -configuration. If you provide both, the two context objects will be shallow -merged, and the API level object will take precedence. +The context object can also be configured as a global option in the client configuration. If you provide both, the two context objects will be shallow merged, and the API level object will take precedence. -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, @@ -340,19 +261,14 @@ client.search({ }, { context: { winter: 'has come' } }).then(console.log, console.log) ----- +``` -[discrete] -==== Client name +## Client name [_client_name] -If you are using multiple instances of the client or if you are using multiple -child clients _(which is the recommended way to have multiple instances of the -client)_, you might need to recognize which client you are using. The `name` -options help you in this regard. +If you are using multiple instances of the client or if you are using multiple child clients *(which is the recommended way to have multiple instances of the client)*, you might need to recognize which client you are using. The `name` options help you in this regard. -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, @@ -391,25 +307,16 @@ child.search({ index: 'my-index', query: { match_all: {} } }).then(console.log, console.log) ----- +``` -[discrete] -==== X-Opaque-Id support +## X-Opaque-Id support [_x_opaque_id_support] -To improve observability, the client offers an easy way to configure the -`X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this -allows you to discover this identifier in the -https://www.elastic.co/guide/en/elasticsearch/reference/current/logging.html#deprecation-logging[deprecation logs], -helps you with https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin] -as well as https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html#_identifying_running_tasks[identifying running tasks]. +To improve observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this allows you to discover this identifier in the [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), helps you with [identifying search slow log origin](elasticsearch://docs/reference/elasticsearch/index-settings/slow-log.md) as well as [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks). -The `X-Opaque-Id` should be configured in each request, for doing that you can -use the `opaqueId` option, as you can see in the following example. The -resulting header will be `{ 'X-Opaque-Id': 'my-search' }`. +The `X-Opaque-Id` should be configured in each request, for doing that you can use the `opaqueId` option, as you can see in the following example. The resulting header will be `{ 'X-Opaque-Id': 'my-search' }`. -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, @@ -422,16 +329,11 @@ client.search({ }, { opaqueId: 'my-search' }).then(console.log, console.log) ----- +``` -Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a -specific string, in case you need to identify a specific client or server. For -doing this, the client offers a top-level configuration option: -`opaqueIdPrefix`. In the following example, the resulting header will be -`{ 'X-Opaque-Id': 'proxy-client::my-search' }`. +Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a specific string, in case you need to identify a specific client or server. For doing this, the client offers a top-level configuration option: `opaqueIdPrefix`. In the following example, the resulting header will be `{ 'X-Opaque-Id': 'proxy-client::my-search' }`. -[source,js] ----- +```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ cloud: { id: '' }, @@ -445,5 +347,5 @@ client.search({ }, { opaqueId: 'my-search' }).then(console.log, console.log) ----- +``` diff --git a/docs/examples/reindex.asciidoc b/docs/reference/reindex_examples.md similarity index 73% rename from docs/examples/reindex.asciidoc rename to docs/reference/reindex_examples.md index a9014036a..82955c8d9 100644 --- a/docs/examples/reindex.asciidoc +++ b/docs/reference/reindex_examples.md @@ -1,17 +1,15 @@ -[[reindex_examples]] -=== Reindex +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/reindex_examples.html +--- -The `reindex` API extracts the document source from the source index and indexes -the documents into the destination index. You can copy all documents to the -destination index, reindex a subset of the documents or update the source before -to reindex it. +# Reindex [reindex_examples] -In the following example we have a `game-of-thrones` index which contains -different quotes of various characters, we want to create a new index only for -the house Stark and remove the `house` field from the document source. +The `reindex` API extracts the document source from the source index and indexes the documents into the destination index. You can copy all documents to the destination index, reindex a subset of the documents or update the source before to reindex it. -[source,js] ----- +In the following example we have a `game-of-thrones` index which contains different quotes of various characters, we want to create a new index only for the house Stark and remove the `house` field from the document source. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -76,4 +74,5 @@ async function run () { } run().catch(console.log) ----- +``` + diff --git a/docs/examples/scroll.asciidoc b/docs/reference/scroll_examples.md similarity index 78% rename from docs/examples/scroll.asciidoc rename to docs/reference/scroll_examples.md index 87f302876..7f2eb4da7 100644 --- a/docs/examples/scroll.asciidoc +++ b/docs/reference/scroll_examples.md @@ -1,28 +1,27 @@ -[[scroll_examples]] -=== Scroll +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/scroll_examples.html +--- -While a search request returns a single “page” of results, the scroll API can be -used to retrieve large numbers of results (or even all results) from a single -search request, in much the same way as you would use a cursor on a traditional -database. +# Scroll [scroll_examples] -Scrolling is not intended for real time user requests, but rather for processing -large amounts of data, for example in order to reindex the contents of one index -into a new index with a different configuration. +While a search request returns a single “page” of results, the scroll API can be used to retrieve large numbers of results (or even all results) from a single search request, in much the same way as you would use a cursor on a traditional database. -NOTE: The results that are returned from a scroll request reflect the state of -the index at the time that the initial search request was made, like a snapshot -in time. Subsequent changes to documents (index, update or delete) will only -affect later search requests. +Scrolling is not intended for real time user requests, but rather for processing large amounts of data, for example in order to reindex the contents of one index into a new index with a different configuration. -In order to use scrolling, the initial search request should specify the scroll -parameter in the query string, which tells {es} how long it should keep the -“search context” alive. +::::{note} +The results that are returned from a scroll request reflect the state of the index at the time that the initial search request was made, like a snapshot in time. Subsequent changes to documents (index, update or delete) will only affect later search requests. +:::: -NOTE: Did you know that we provide an helper for sending scroll requests? You can find it <>. -[source,js] ----- +In order to use scrolling, the initial search request should specify the scroll parameter in the query string, which tells {{es}} how long it should keep the “search context” alive. + +::::{note} +Did you know that we provide an helper for sending scroll requests? You can find it [here](/reference/client-helpers.md#scroll-search-helper). +:::: + + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -111,13 +110,11 @@ async function run () { } run().catch(console.log) ----- +``` -Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using -async iteration! +Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using async iteration! -[source,js] ----- +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -192,4 +189,5 @@ async function run () { } run().catch(console.log) ----- +``` + diff --git a/docs/examples/search.asciidoc b/docs/reference/search_examples.md similarity index 70% rename from docs/examples/search.asciidoc rename to docs/reference/search_examples.md index 229d1b09b..2847c1de0 100644 --- a/docs/examples/search.asciidoc +++ b/docs/reference/search_examples.md @@ -1,14 +1,13 @@ -[[search_examples]] -=== Search +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/search_examples.html +--- -The `search` API allows you to execute a search query and get back search hits -that match the query. The query can either be provided using a simple -https://www.elastic.co/guide/en/elasticsearch/reference/6.6/search-uri-request.html[query string as a parameter], -or using a -https://www.elastic.co/guide/en/elasticsearch/reference/6.6/search-request-body.html[request body]. +# Search [search_examples] -[source,js] ----- +The `search` API allows you to execute a search query and get back search hits that match the query. The query can either be provided using a simple [query string as a parameter](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search), or using a [request body](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html). + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -61,4 +60,5 @@ async function run () { } run().catch(console.log) ----- \ No newline at end of file +``` + diff --git a/docs/examples/sql.query.asciidoc b/docs/reference/sql_query_examples.md similarity index 62% rename from docs/examples/sql.query.asciidoc rename to docs/reference/sql_query_examples.md index cdf61147c..f2a955d27 100644 --- a/docs/examples/sql.query.asciidoc +++ b/docs/reference/sql_query_examples.md @@ -1,19 +1,15 @@ -[[sql_query_examples]] -=== SQL +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/sql_query_examples.html +--- -{es} SQL is an X-Pack component that allows SQL-like queries to be executed in -real-time against {es}. Whether using the REST interface, command-line or JDBC, -any client can use SQL to search and aggregate data natively inside {es}. One -can think of {es} SQL as a translator, one that understands both SQL and {es} -and makes it easy to read and process data in real-time, at scale by leveraging -{es} capabilities. +# SQL [sql_query_examples] -In the following example we will search all the documents that has the field -`house` equals to `stark`, log the result with the tabular view and then -manipulate the result to obtain an object easy to navigate. +{{es}} SQL is an X-Pack component that allows SQL-like queries to be executed in real-time against {{es}}. Whether using the REST interface, command-line or JDBC, any client can use SQL to search and aggregate data natively inside {{es}}. One can think of {{es}} SQL as a translator, one that understands both SQL and {{es}} and makes it easy to read and process data in real-time, at scale by leveraging {{es}} capabilities. -[source,js] ----- +In the following example we will search all the documents that has the field `house` equals to `stark`, log the result with the tabular view and then manipulate the result to obtain an object easy to navigate. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -69,4 +65,5 @@ async function run () { } run().catch(console.log) ----- +``` + diff --git a/docs/examples/suggest.asciidoc b/docs/reference/suggest_examples.md similarity index 78% rename from docs/examples/suggest.asciidoc rename to docs/reference/suggest_examples.md index 6096bc753..70fbbd05a 100644 --- a/docs/examples/suggest.asciidoc +++ b/docs/reference/suggest_examples.md @@ -1,14 +1,15 @@ -[[suggest_examples]] -=== Suggest +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/suggest_examples.html +--- -The suggest feature suggests similar looking terms based on a provided text by -using a suggester. _Parts of the suggest feature are still under development._ +# Suggest [suggest_examples] -The suggest request part is defined alongside the query part in a `search` -request. If the query part is left out, only suggestions are returned. +The suggest feature suggests similar looking terms based on a provided text by using a suggester. *Parts of the suggest feature are still under development.* -[source,js] ----- +The suggest request part is defined alongside the query part in a `search` request. If the query part is left out, only suggestions are returned. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -63,5 +64,5 @@ async function run () { } run().catch(console.log) +``` ----- \ No newline at end of file diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml new file mode 100644 index 000000000..3896c1fde --- /dev/null +++ b/docs/reference/toc.yml @@ -0,0 +1,34 @@ +toc: + - file: index.md + - file: getting-started.md + - file: installation.md + - file: connecting.md + - file: configuration.md + children: + - file: basic-config.md + - file: advanced-config.md + - file: child.md + - file: client-testing.md + - file: integrations.md + children: + - file: observability.md + - file: transport.md + - file: typescript.md + - file: api-reference.md + - file: examples.md + children: + - file: as_stream_examples.md + - file: bulk_examples.md + - file: exists_examples.md + - file: get_examples.md + - file: ignore_examples.md + - file: msearch_examples.md + - file: scroll_examples.md + - file: search_examples.md + - file: suggest_examples.md + - file: transport_request_examples.md + - file: sql_query_examples.md + - file: update_examples.md + - file: update_by_query_examples.md + - file: reindex_examples.md + - file: client-helpers.md \ No newline at end of file diff --git a/docs/reference/transport.md b/docs/reference/transport.md new file mode 100644 index 000000000..382574bb6 --- /dev/null +++ b/docs/reference/transport.md @@ -0,0 +1,53 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/transport.html +--- + +# Transport [transport] + +This class is responsible for performing the request to {{es}} and handling errors, it also handles sniffing. + +```js +const { Client } = require('@elastic/elasticsearch') +const { Transport } = require('@elastic/transport') + +class MyTransport extends Transport { + request (params, options, callback) { + // your code + } +} + +const client = new Client({ + Transport: MyTransport +}) +``` + +Sometimes you need to inject a small snippet of your code and then continue to use the usual client code. In such cases, call `super.method`: + +```js +class MyTransport extends Transport { + request (params, options, callback) { + // your code + return super.request(params, options, callback) + } +} +``` + +## Supported content types [_supported_content_types] + +Depending on the `content-type` of the response, the transport will return the body as different types: + +| Content-Type | JavaScript type | +| --- | --- | +| `application/json` | `object` | +| `text/plain` | `string` | +| `application/vnd.elasticsearch+json` | `object` | +| `application/vnd.mapbox-vector-tile` | `Buffer` | +| `application/vnd.apache.arrow.stream` | `Buffer` | +| `application/vnd.elasticsearch+arrow+stream` | `Buffer` | +| `application/smile` | `Buffer` | +| `application/vnd.elasticsearch+smile` | `Buffer` | +| `application/cbor` | `Buffer` | +| `application/vnd.elasticsearch+cbor` | `Buffer` | + + diff --git a/docs/examples/transport.request.asciidoc b/docs/reference/transport_request_examples.md similarity index 57% rename from docs/examples/transport.request.asciidoc rename to docs/reference/transport_request_examples.md index 7c325e07e..1558d0403 100644 --- a/docs/examples/transport.request.asciidoc +++ b/docs/reference/transport_request_examples.md @@ -1,22 +1,23 @@ -[[transport_request_examples]] -=== transport.request +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/transport_request_examples.html +--- -It can happen that you need to communicate with {es} by using an API that is not -supported by the client, to mitigate this issue you can directly call -`client.transport.request`, which is the internal utility that the client uses -to communicate with {es} when you use an API method. +# transport.request [transport_request_examples] -NOTE: When using the `transport.request` method you must provide all the -parameters needed to perform an HTTP call, such as `method`, `path`, -`querystring`, and `body`. +It can happen that you need to communicate with {{es}} by using an API that is not supported by the client, to mitigate this issue you can directly call `client.transport.request`, which is the internal utility that the client uses to communicate with {{es}} when you use an API method. +::::{note} +When using the `transport.request` method you must provide all the parameters needed to perform an HTTP call, such as `method`, `path`, `querystring`, and `body`. +:::: -TIP: If you find yourself use this method too often, take in consideration the -use of `client.extend`, which will make your code look cleaner and easier to -maintain. -[source,js] ----- +::::{tip} +If you find yourself use this method too often, take in consideration the use of `client.extend`, which will make your code look cleaner and easier to maintain. +:::: + + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -71,4 +72,5 @@ async function run () { } run().catch(console.log) ----- \ No newline at end of file +``` + diff --git a/docs/typescript.asciidoc b/docs/reference/typescript.md similarity index 64% rename from docs/typescript.asciidoc rename to docs/reference/typescript.md index 07534d733..29bc9eb06 100644 --- a/docs/typescript.asciidoc +++ b/docs/reference/typescript.md @@ -1,22 +1,23 @@ -[[typescript]] -=== TypeScript support +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html +--- -The client offers a first-class support for TypeScript, shipping a complete set -of type definitions of Elasticsearch's API surface. +# TypeScript support [typescript] -The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g. EQL), -and others may contain some errors, but we are continuously pushing fixes & improvements. -Contribute type fixes and improvements to https://github.com/elastic/elasticsearch-specification[elasticsearch-specification github repository]. +The client offers a first-class support for TypeScript, shipping a complete set of type definitions of Elasticsearch’s API surface. -NOTE: The client is developed against the https://www.npmjs.com/package/typescript?activeTab=versions[latest] -version of TypeScript. Furthermore, unless you have set `skipLibCheck` to `true`, -you should configure `esModuleInterop` to `true`. +The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g. EQL), and others may contain some errors, but we are continuously pushing fixes & improvements. Contribute type fixes and improvements to [elasticsearch-specification github repository](https://github.com/elastic/elasticsearch-specification). -[discrete] -==== Example +::::{note} +The client is developed against the [latest](https://www.npmjs.com/package/typescript?activeTab=versions) version of TypeScript. Furthermore, unless you have set `skipLibCheck` to `true`, you should configure `esModuleInterop` to `true`. +:::: -[source,ts] ----- + + +## Example [_example] + +```ts import { Client } from '@elastic/elasticsearch' const client = new Client({ @@ -71,21 +72,20 @@ async function run () { } run().catch(console.log) ----- +``` -[discrete] -==== Request & Response types + +## Request & Response types [_request_response_types] You can import the full TypeScript requests & responses definitions as it follows: -[source,ts] ----- +```ts import { estypes } from '@elastic/elasticsearch' ----- +``` If you need the legacy definitions with the body, you can do the following: -[source,ts] ----- +```ts import { estypesWithBody } from '@elastic/elasticsearch' ----- \ No newline at end of file +``` + diff --git a/docs/examples/update_by_query.asciidoc b/docs/reference/update_by_query_examples.md similarity index 75% rename from docs/examples/update_by_query.asciidoc rename to docs/reference/update_by_query_examples.md index 80c52fd90..0c61c0617 100644 --- a/docs/examples/update_by_query.asciidoc +++ b/docs/reference/update_by_query_examples.md @@ -1,12 +1,13 @@ -[[update_by_query_examples]] -=== Update By Query +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/update_by_query_examples.html +--- -The simplest usage of _update_by_query just performs an update on every document -in the index without changing the source. This is useful to pick up a new -property or some other online mapping change. +# Update By Query [update_by_query_examples] -[source,js] ----- +The simplest usage of _update_by_query just performs an update on every document in the index without changing the source. This is useful to pick up a new property or some other online mapping change. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -56,5 +57,5 @@ async function run () { } run().catch(console.log) +``` ----- diff --git a/docs/examples/update.asciidoc b/docs/reference/update_examples.md similarity index 81% rename from docs/examples/update.asciidoc rename to docs/reference/update_examples.md index b7e0272ae..5de58586b 100644 --- a/docs/examples/update.asciidoc +++ b/docs/reference/update_examples.md @@ -1,12 +1,13 @@ -[[update_examples]] -=== Update +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/update_examples.html +--- -The update API allows updates of a specific document using the given script. In -the following example, we will index a document that also tracks how many times -a character has said the given quote, and then we will update the `times` field. +# Update [update_examples] -[source,js] ----- +The update API allows updates of a specific document using the given script. In the following example, we will index a document that also tracks how many times a character has said the given quote, and then we will update the `times` field. + +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -47,13 +48,11 @@ async function run () { } run().catch(console.log) - ----- +``` With the update API, you can also run a partial update of a document. -[source,js] ----- +```js 'use strict' const { Client } = require('@elastic/elasticsearch') @@ -90,6 +89,5 @@ async function run () { } run().catch(console.log) +``` - ----- diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md new file mode 100644 index 000000000..9758cf808 --- /dev/null +++ b/docs/release-notes/breaking-changes.md @@ -0,0 +1,28 @@ +--- +navigation_title: "Elasticsearch JavaScript Client" +--- + +# Elasticsearch JavaScript Client breaking changes [elasticsearch-javascript-client-breaking-changes] +Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. + +To learn how to upgrade, check out . + +% ## Next version [elasticsearch-javascript-client-versionnext-breaking-changes] +% **Release date:** Month day, year + +% ::::{dropdown} Title of breaking change +% Description of the breaking change. +% For more information, check [PR #](PR link). +% **Impact**
    Impact of the breaking change. +% **Action**
    Steps for mitigating deprecation impact. +% :::: + +% ## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes] +% **Release date:** March 25, 2025 + +% ::::{dropdown} Title of breaking change +% Description of the breaking change. +% For more information, check [PR #](PR link). +% **Impact**
    Impact of the breaking change. +% **Action**
    Steps for mitigating deprecation impact. +% :::: \ No newline at end of file diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md new file mode 100644 index 000000000..fef8650dd --- /dev/null +++ b/docs/release-notes/deprecations.md @@ -0,0 +1,28 @@ +--- +navigation_title: "Elasticsearch JavaScript Client" +--- + +# Elasticsearch JavaScript Client deprecations [elasticsearch-javascript-client-deprecations] +Review the deprecated functionality for your Elasticsearch JavaScript Client version. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. + +To learn how to upgrade, check out . + +% ## Next version +% **Release date:** Month day, year + +% ::::{dropdown} Deprecation title +% Description of the deprecation. +% For more information, check [PR #](PR link). +% **Impact**
    Impact of deprecation. +% **Action**
    Steps for mitigating deprecation impact. +% :::: + +% ## 9.0.0 [elasticsearch-javascript-client-900-deprecations] +% **Release date:** March 25, 2025 + +% ::::{dropdown} Deprecation title +% Description of the deprecation. +% For more information, check [PR #](PR link). +% **Impact**
    Impact of deprecation. +% **Action**
    Steps for mitigating deprecation impact. +% :::: \ No newline at end of file diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md new file mode 100644 index 000000000..071841de1 --- /dev/null +++ b/docs/release-notes/index.md @@ -0,0 +1,27 @@ +--- +navigation_title: "Elasticsearch JavaScript Client" +--- + +# Elasticsearch JavaScript Client release notes [elasticsearch-javascript-client-release-notes] + +Review the changes, fixes, and more in each version of Elasticsearch JavaScript Client. + +To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31). + +% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. + +% ## version.next [elasticsearch-javascript-client-next-release-notes] +% **Release date:** Month day, year + +% ### Features and enhancements [elasticsearch-javascript-client-next-features-enhancements] +% * + +% ### Fixes [elasticsearch-javascript-client-next-fixes] +% * + +## 9.0.0 [elasticsearch-javascript-client-900-release-notes] +**Release date:** March 25, 2025 + +### Features and enhancements [elasticsearch-javascript-client-900-features-enhancements] + +### Fixes [elasticsearch-javascript-client-900-fixes] \ No newline at end of file diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md new file mode 100644 index 000000000..16ca9fb3c --- /dev/null +++ b/docs/release-notes/known-issues.md @@ -0,0 +1,20 @@ +--- +navigation_title: "Elasticsearch JavaScript Client" + +--- + +# Elasticsearch JavaScript Client known issues [elasticsearch-javascript-client-known-issues] + +% Use the following template to add entries to this page. + +% :::{dropdown} Title of known issue +% **Details** +% On [Month/Day/Year], a known issue was discovered that [description of known issue]. + +% **Workaround** +% Workaround description. + +% **Resolved** +% On [Month/Day/Year], this issue was resolved. + +::: \ No newline at end of file diff --git a/docs/release-notes/toc.yml b/docs/release-notes/toc.yml new file mode 100644 index 000000000..a41006794 --- /dev/null +++ b/docs/release-notes/toc.yml @@ -0,0 +1,5 @@ +toc: + - file: index.md + - file: known-issues.md + - file: breaking-changes.md + - file: deprecations.md \ No newline at end of file diff --git a/docs/testing.asciidoc b/docs/testing.asciidoc deleted file mode 100644 index 35b937474..000000000 --- a/docs/testing.asciidoc +++ /dev/null @@ -1,158 +0,0 @@ -[[client-testing]] -=== Testing - -Testing is one of the most important parts of developing an application. -The client is very flexible when it comes to testing and is compatible with -most testing frameworks (such as https://www.npmjs.com/package/ava[`ava`], -which is used in the examples below). - -If you are using this client, you are most likely working with {es}, and one of -the first issues you face is how to test your application. A perfectly valid -solution is to use the real {es} instance for testing your application, but you -would be doing an integration test, while you want a unit test. There are many -ways to solve this problem, you could create the database with Docker, or use an -in-memory compatible one, but if you are writing unit tests that can be easily -parallelized this becomes quite uncomfortable. A different way of improving your -testing experience while doing unit tests is to use a mock. - -The client is designed to be easy to extend and adapt to your needs. Thanks to -its internal architecture it allows you to change some specific components while -keeping the rest of it working as usual. Each {es} official client is composed -of the following components: - -* `API layer`: every {es} API that you can call. -* `Transport`: a component that takes care of preparing a request before sending - it and handling all the retry and sniffing strategies. -* `ConnectionPool`: {es} is a cluster and might have multiple nodes, the - `ConnectionPool` takes care of them. -* `Serializer`: A class with all the serialization strategies, from the basic - JSON to the new line delimited JSON. -* `Connection`: The actual HTTP library. - -The best way to mock {es} with the official clients is to replace the -`Connection` component since it has very few responsibilities and it does not -interact with other internal components other than getting requests and -returning responses. - - -[discrete] -==== `@elastic/elasticsearch-mock` - -Writing each time a mock for your test can be annoying and error-prone, so we -have built a simple yet powerful mocking library specifically designed for this -client, and you can install it with the following command: - -[source,sh] ----- -npm install @elastic/elasticsearch-mock --save-dev ----- - -With this library you can create custom mocks for any request you can send to -{es}. It offers a simple and intuitive API and it mocks only the HTTP layer, -leaving the rest of the client working as usual. - -Before showing all of its features, and what you can do with it, let’s see an -example: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const Mock = require('@elastic/elasticsearch-mock') - -const mock = new Mock() -const client = new Client({ - cloud: { id: '' }, - auth: { apiKey: 'base64EncodedKey' }, - Connection: mock.getConnection() -}) - -mock.add({ - method: 'GET', - path: '/' -}, () => { - return { status: 'ok' } -}) - -client.info().then(console.log, console.log) ----- - -As you can see it works closely with the client itself, once you have created a -new instance of the mock library you just need to call the mock.getConnection() -method and pass its result to the Connection option of the client. From now on, -every request is handled by the mock library, and the HTTP layer will never be -touched. As a result, your test is significantly faster and you are able to -easily parallelize them! - -The library allows you to write both “strict” and “loose” mocks, which means -that you can write a mock that handles a very specific request or be looser and -handle a group of request, let’s see this in action: - -[source,js] ----- -mock.add({ - method: 'POST', - path: '/indexName/_search' -}, () => { - return { - hits: { - total: { value: 1, relation: 'eq' }, - hits: [{ _source: { baz: 'faz' } }] - } - } -}) - -mock.add({ - method: 'POST', - path: '/indexName/_search', - body: { query: { match: { foo: 'bar' } } } -}, () => { - return { - hits: { - total: { value: 0, relation: 'eq' }, - hits: [] - } - } -}) ----- - -In the example above, every search request gets the first response, while every -search request that uses the query described in the second mock gets the second -response. - -You can also specify dynamic paths: - -[source,js] ----- -mock.add({ - method: 'GET', - path: '/:index/_count' -}, () => { - return { count: 42 } -}) - -client.count({ index: 'foo' }).then(console.log, console.log) // => { count: 42 } -client.count({ index: 'bar' }).then(console.log, console.log) // => { count: 42 } ----- - -And wildcards are supported as well. - -Another very interesting use case is the ability to create a test that randomly -fails to see how your code reacts to failures: - -[source,js] ----- -mock.add({ - method: 'GET', - path: '/:index/_count' -}, () => { - if (Math.random() > 0.8) { - return ResponseError({ body: {}, statusCode: 500 }) - } else { - return { count: 42 } - } -}) ----- - -We have seen how simple is mocking {es} and testing your application, you can -find many more features and examples in the -https://github.com/elastic/elasticsearch-js-mock[module documentation]. \ No newline at end of file diff --git a/docs/timeout-best-practices.asciidoc b/docs/timeout-best-practices.asciidoc deleted file mode 100644 index 5116034af..000000000 --- a/docs/timeout-best-practices.asciidoc +++ /dev/null @@ -1,8 +0,0 @@ -[[timeout-best-practices]] -=== Timeout best practices - -Starting in 9.0.0, this client is configured to not time out any HTTP request by default. {es} will always eventually respond to any request, even if it takes several minutes. Reissuing a request that it has not responded to yet can cause performance side effects. See the {ref}/modules-network.html#_http_client_configuration[official {es} recommendations for HTTP clients] for more information. - -Prior to 9.0, this client was configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {es}, raising a `TimeoutError` when that time period elapsed without receiving a response. - -If your circumstances require you to set timeouts on Elasticsearch requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0. diff --git a/docs/transport.asciidoc b/docs/transport.asciidoc deleted file mode 100644 index d32606b63..000000000 --- a/docs/transport.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -[[transport]] -=== Transport - -This class is responsible for performing the request to {es} and handling -errors, it also handles sniffing. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const { Transport } = require('@elastic/transport') - -class MyTransport extends Transport { - request (params, options, callback) { - // your code - } -} - -const client = new Client({ - Transport: MyTransport -}) ----- - -Sometimes you need to inject a small snippet of your code and then continue to -use the usual client code. In such cases, call `super.method`: - -[source,js] ----- -class MyTransport extends Transport { - request (params, options, callback) { - // your code - return super.request(params, options, callback) - } -} ----- - -==== Supported content types - -Depending on the `content-type` of the response, the transport will return the body as different types: - -[cols="1,1"] -|=== -|Content-Type |JavaScript type - -|`application/json` -|`object` - -|`text/plain` -|`string` - -|`application/vnd.elasticsearch+json` -|`object` - -|`application/vnd.mapbox-vector-tile` -|`Buffer` - -|`application/vnd.apache.arrow.stream` -|`Buffer` - -|`application/vnd.elasticsearch+arrow+stream` -|`Buffer` - -|`application/smile` -|`Buffer` - -|`application/vnd.elasticsearch+smile` -|`Buffer` - -|`application/cbor` -|`Buffer` - -|`application/vnd.elasticsearch+cbor` -|`Buffer` -|=== diff --git a/package.json b/package.json index 0960ee05b..377ba7b6b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.4", + "version": "9.0.0-alpha.3", "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 9cc1582e1..b3dd631c1 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -35,133 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class AsyncSearch { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'async_search.delete': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'async_search.get': { - path: [ - 'id' - ], - body: [], - query: [ - 'keep_alive', - 'typed_keys', - 'wait_for_completion_timeout' - ] - }, - 'async_search.status': { - path: [ - 'id' - ], - body: [], - query: [ - 'keep_alive' - ] - }, - 'async_search.submit': { - path: [ - 'index' - ], - body: [ - 'aggregations', - 'aggs', - 'collapse', - 'explain', - 'ext', - 'from', - 'highlight', - 'track_total_hits', - 'indices_boost', - 'docvalue_fields', - 'knn', - 'min_score', - 'post_filter', - 'profile', - 'query', - 'rescore', - 'script_fields', - 'search_after', - 'size', - 'slice', - 'sort', - '_source', - 'fields', - 'suggest', - 'terminate_after', - 'timeout', - 'track_scores', - 'version', - 'seq_no_primary_term', - 'stored_fields', - 'pit', - 'runtime_mappings', - 'stats' - ], - query: [ - 'wait_for_completion_timeout', - 'keep_alive', - 'keep_on_completion', - 'allow_no_indices', - 'allow_partial_search_results', - 'analyzer', - 'analyze_wildcard', - 'batched_reduce_size', - 'ccs_minimize_roundtrips', - 'default_operator', - 'df', - 'docvalue_fields', - 'expand_wildcards', - 'explain', - 'ignore_throttled', - 'ignore_unavailable', - 'lenient', - 'max_concurrent_shard_requests', - 'preference', - 'request_cache', - 'routing', - 'search_type', - 'stats', - 'stored_fields', - 'suggest_field', - 'suggest_mode', - 'suggest_size', - 'suggest_text', - 'terminate_after', - 'timeout', - 'track_total_hits', - 'track_scores', - 'typed_keys', - 'rest_total_hits_as_int', - 'version', - '_source', - '_source_excludes', - '_source_includes', - 'seq_no_primary_term', - 'q', - 'size', - 'from', - 'sort' - ] - } - } } /** @@ -172,10 +51,7 @@ export default class AsyncSearch { async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['async_search.delete'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -217,10 +93,7 @@ export default class AsyncSearch { async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['async_search.get'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -262,10 +135,7 @@ export default class AsyncSearch { async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['async_search.status'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -307,12 +177,8 @@ export default class AsyncSearch { async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['async_search.submit'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -339,14 +205,8 @@ export default class AsyncSearch { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index e53887579..7f123c5a2 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -35,59 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Autoscaling { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'autoscaling.delete_autoscaling_policy': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'autoscaling.get_autoscaling_capacity': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - }, - 'autoscaling.get_autoscaling_policy': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'autoscaling.put_autoscaling_policy': { - path: [ - 'name' - ], - body: [ - 'policy' - ], - query: [ - 'master_timeout', - 'timeout' - ] - } - } } /** @@ -98,10 +51,7 @@ export default class Autoscaling { async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['autoscaling.delete_autoscaling_policy'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,10 +93,7 @@ export default class Autoscaling { async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['autoscaling.get_autoscaling_capacity'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -186,10 +133,7 @@ export default class Autoscaling { async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['autoscaling.get_autoscaling_policy'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -231,12 +175,8 @@ export default class Autoscaling { async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['autoscaling.put_autoscaling_policy'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -248,14 +188,8 @@ export default class Autoscaling { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index b7508e514..ccdedfcb2 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -35,37 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - bulk: { - path: [ - 'index' - ], - body: [ - 'operations' - ], - query: [ - 'include_source_on_error', - 'list_executed_pipelines', - 'pipeline', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_includes', - 'timeout', - 'wait_for_active_shards', - 'require_alias', - 'require_data_stream' - ] - } -} +interface That { transport: Transport } /** * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. @@ -75,12 +45,8 @@ export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.bulk - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['operations'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -92,14 +58,8 @@ export default async function BulkApi = { - capabilities: { - path: [], - body: [], - query: [] - } -} +interface That { transport: Transport } /** * Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported @@ -56,10 +45,7 @@ export default async function CapabilitiesApi (this: That, params?: T.TODO, opti export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.capabilities - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index c163f4c8b..bc397b310 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -35,336 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class Cat { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'cat.aliases': { - path: [ - 'name' - ], - body: [], - query: [ - 'h', - 's', - 'expand_wildcards', - 'master_timeout' - ] - }, - 'cat.allocation': { - path: [ - 'node_id' - ], - body: [], - query: [ - 'bytes', - 'h', - 's', - 'local', - 'master_timeout' - ] - }, - 'cat.component_templates': { - path: [ - 'name' - ], - body: [], - query: [ - 'h', - 's', - 'local', - 'master_timeout' - ] - }, - 'cat.count': { - path: [ - 'index' - ], - body: [], - query: [ - 'h', - 's' - ] - }, - 'cat.fielddata': { - path: [ - 'fields' - ], - body: [], - query: [ - 'bytes', - 'fields', - 'h', - 's' - ] - }, - 'cat.health': { - path: [], - body: [], - query: [ - 'time', - 'ts', - 'h', - 's' - ] - }, - 'cat.help': { - path: [], - body: [], - query: [] - }, - 'cat.indices': { - path: [ - 'index' - ], - body: [], - query: [ - 'bytes', - 'expand_wildcards', - 'health', - 'include_unloaded_segments', - 'pri', - 'time', - 'master_timeout', - 'h', - 's' - ] - }, - 'cat.master': { - path: [], - body: [], - query: [ - 'h', - 's', - 'local', - 'master_timeout' - ] - }, - 'cat.ml_data_frame_analytics': { - path: [ - 'id' - ], - body: [], - query: [ - 'allow_no_match', - 'bytes', - 'h', - 's', - 'time' - ] - }, - 'cat.ml_datafeeds': { - path: [ - 'datafeed_id' - ], - body: [], - query: [ - 'allow_no_match', - 'h', - 's', - 'time' - ] - }, - 'cat.ml_jobs': { - path: [ - 'job_id' - ], - body: [], - query: [ - 'allow_no_match', - 'bytes', - 'h', - 's', - 'time' - ] - }, - 'cat.ml_trained_models': { - path: [ - 'model_id' - ], - body: [], - query: [ - 'allow_no_match', - 'bytes', - 'h', - 's', - 'from', - 'size', - 'time' - ] - }, - 'cat.nodeattrs': { - path: [], - body: [], - query: [ - 'h', - 's', - 'local', - 'master_timeout' - ] - }, - 'cat.nodes': { - path: [], - body: [], - query: [ - 'bytes', - 'full_id', - 'include_unloaded_segments', - 'h', - 's', - 'master_timeout', - 'time' - ] - }, - 'cat.pending_tasks': { - path: [], - body: [], - query: [ - 'h', - 's', - 'local', - 'master_timeout', - 'time' - ] - }, - 'cat.plugins': { - path: [], - body: [], - query: [ - 'h', - 's', - 'include_bootstrap', - 'local', - 'master_timeout' - ] - }, - 'cat.recovery': { - path: [ - 'index' - ], - body: [], - query: [ - 'active_only', - 'bytes', - 'detailed', - 'index', - 'h', - 's', - 'time' - ] - }, - 'cat.repositories': { - path: [], - body: [], - query: [ - 'h', - 's', - 'local', - 'master_timeout' - ] - }, - 'cat.segments': { - path: [ - 'index' - ], - body: [], - query: [ - 'bytes', - 'h', - 's', - 'local', - 'master_timeout' - ] - }, - 'cat.shards': { - path: [ - 'index' - ], - body: [], - query: [ - 'bytes', - 'h', - 's', - 'master_timeout', - 'time' - ] - }, - 'cat.snapshots': { - path: [ - 'repository' - ], - body: [], - query: [ - 'ignore_unavailable', - 'h', - 's', - 'master_timeout', - 'time' - ] - }, - 'cat.tasks': { - path: [], - body: [], - query: [ - 'actions', - 'detailed', - 'nodes', - 'parent_task_id', - 'h', - 's', - 'time', - 'timeout', - 'wait_for_completion' - ] - }, - 'cat.templates': { - path: [ - 'name' - ], - body: [], - query: [ - 'h', - 's', - 'local', - 'master_timeout' - ] - }, - 'cat.thread_pool': { - path: [ - 'thread_pool_patterns' - ], - body: [], - query: [ - 'h', - 's', - 'time', - 'local', - 'master_timeout' - ] - }, - 'cat.transforms': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'allow_no_match', - 'from', - 'h', - 's', - 'time', - 'size' - ] - } - } } /** @@ -375,10 +51,7 @@ export default class Cat { async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.aliases'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -428,10 +101,7 @@ export default class Cat { async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.allocation'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -481,10 +151,7 @@ export default class Cat { async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.component_templates'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -534,10 +201,7 @@ export default class Cat { async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.count'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -587,10 +251,7 @@ export default class Cat { async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.fielddata'] - + const acceptedPath: string[] = ['fields'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -640,10 +301,7 @@ export default class Cat { async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.health'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -683,10 +341,7 @@ export default class Cat { async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.help'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -726,10 +381,7 @@ export default class Cat { async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.indices'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -779,10 +431,7 @@ export default class Cat { async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.master'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -822,10 +471,7 @@ export default class Cat { async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.ml_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -875,10 +521,7 @@ export default class Cat { async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.ml_datafeeds'] - + const acceptedPath: string[] = ['datafeed_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -928,10 +571,7 @@ export default class Cat { async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.ml_jobs'] - + const acceptedPath: string[] = ['job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -981,10 +621,7 @@ export default class Cat { async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.ml_trained_models'] - + const acceptedPath: string[] = ['model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1034,10 +671,7 @@ export default class Cat { async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.nodeattrs'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1077,10 +711,7 @@ export default class Cat { async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.nodes'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1120,10 +751,7 @@ export default class Cat { async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.pending_tasks'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1163,10 +791,7 @@ export default class Cat { async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.plugins'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1206,10 +831,7 @@ export default class Cat { async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.recovery'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1259,10 +881,7 @@ export default class Cat { async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.repositories'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1302,10 +921,7 @@ export default class Cat { async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.segments'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1355,10 +971,7 @@ export default class Cat { async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.shards'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1408,10 +1021,7 @@ export default class Cat { async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.snapshots'] - + const acceptedPath: string[] = ['repository'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1461,10 +1071,7 @@ export default class Cat { async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.tasks'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1504,10 +1111,7 @@ export default class Cat { async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.templates'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1557,10 +1161,7 @@ export default class Cat { async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.thread_pool'] - + const acceptedPath: string[] = ['thread_pool_patterns'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1610,10 +1211,7 @@ export default class Cat { async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cat.transforms'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index dd704a23f..29455527c 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -35,185 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Ccr { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'ccr.delete_auto_follow_pattern': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'ccr.follow': { - path: [ - 'index' - ], - body: [ - 'data_stream_name', - 'leader_index', - 'max_outstanding_read_requests', - 'max_outstanding_write_requests', - 'max_read_request_operation_count', - 'max_read_request_size', - 'max_retry_delay', - 'max_write_buffer_count', - 'max_write_buffer_size', - 'max_write_request_operation_count', - 'max_write_request_size', - 'read_poll_timeout', - 'remote_cluster', - 'settings' - ], - query: [ - 'master_timeout', - 'wait_for_active_shards' - ] - }, - 'ccr.follow_info': { - path: [ - 'index' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'ccr.follow_stats': { - path: [ - 'index' - ], - body: [], - query: [ - 'timeout' - ] - }, - 'ccr.forget_follower': { - path: [ - 'index' - ], - body: [ - 'follower_cluster', - 'follower_index', - 'follower_index_uuid', - 'leader_remote_cluster' - ], - query: [ - 'timeout' - ] - }, - 'ccr.get_auto_follow_pattern': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'ccr.pause_auto_follow_pattern': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'ccr.pause_follow': { - path: [ - 'index' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'ccr.put_auto_follow_pattern': { - path: [ - 'name' - ], - body: [ - 'remote_cluster', - 'follow_index_pattern', - 'leader_index_patterns', - 'leader_index_exclusion_patterns', - 'max_outstanding_read_requests', - 'settings', - 'max_outstanding_write_requests', - 'read_poll_timeout', - 'max_read_request_operation_count', - 'max_read_request_size', - 'max_retry_delay', - 'max_write_buffer_count', - 'max_write_buffer_size', - 'max_write_request_operation_count', - 'max_write_request_size' - ], - query: [ - 'master_timeout' - ] - }, - 'ccr.resume_auto_follow_pattern': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'ccr.resume_follow': { - path: [ - 'index' - ], - body: [ - 'max_outstanding_read_requests', - 'max_outstanding_write_requests', - 'max_read_request_operation_count', - 'max_read_request_size', - 'max_retry_delay', - 'max_write_buffer_count', - 'max_write_buffer_size', - 'max_write_request_operation_count', - 'max_write_request_size', - 'read_poll_timeout' - ], - query: [ - 'master_timeout' - ] - }, - 'ccr.stats': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ccr.unfollow': { - path: [ - 'index' - ], - body: [], - query: [ - 'master_timeout' - ] - } - } } /** @@ -224,10 +51,7 @@ export default class Ccr { async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.delete_auto_follow_pattern'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -269,12 +93,8 @@ export default class Ccr { async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ccr.follow'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -296,14 +116,8 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -326,10 +140,7 @@ export default class Ccr { async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.follow_info'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -371,10 +182,7 @@ export default class Ccr { async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.follow_stats'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -416,12 +224,8 @@ export default class Ccr { async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ccr.forget_follower'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -443,14 +247,8 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -473,10 +271,7 @@ export default class Ccr { async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.get_auto_follow_pattern'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -526,10 +321,7 @@ export default class Ccr { async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.pause_auto_follow_pattern'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -571,10 +363,7 @@ export default class Ccr { async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.pause_follow'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -616,12 +405,8 @@ export default class Ccr { async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ccr.put_auto_follow_pattern'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -643,14 +428,8 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -673,10 +452,7 @@ export default class Ccr { async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.resume_auto_follow_pattern'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -718,12 +494,8 @@ export default class Ccr { async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ccr.resume_follow'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -745,14 +517,8 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -775,10 +541,7 @@ export default class Ccr { async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.stats'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -818,10 +581,7 @@ export default class Ccr { async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ccr.unfollow'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index e78c05005..7b7258503 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -35,22 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - clear_scroll: { - path: [], - body: [ - 'scroll_id' - ], - query: [] - } -} +interface That { transport: Transport } /** * Clear a scrolling search. Clear the search context and results for a scrolling search. @@ -60,12 +45,8 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.clear_scroll - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['scroll_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -88,14 +69,8 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 96d22ced1..26d5b0e26 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -35,22 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - close_point_in_time: { - path: [], - body: [ - 'id' - ], - query: [] - } -} +interface That { transport: Transport } /** * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. @@ -60,12 +45,8 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.close_point_in_time - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,14 +68,8 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index a2e8e4495..730c942d2 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -35,202 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Cluster { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'cluster.allocation_explain': { - path: [], - body: [ - 'current_node', - 'index', - 'primary', - 'shard' - ], - query: [ - 'include_disk_info', - 'include_yes_decisions', - 'master_timeout' - ] - }, - 'cluster.delete_component_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'cluster.delete_voting_config_exclusions': { - path: [], - body: [], - query: [ - 'master_timeout', - 'wait_for_removal' - ] - }, - 'cluster.exists_component_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'local' - ] - }, - 'cluster.get_component_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'flat_settings', - 'include_defaults', - 'local', - 'master_timeout' - ] - }, - 'cluster.get_settings': { - path: [], - body: [], - query: [ - 'flat_settings', - 'include_defaults', - 'master_timeout', - 'timeout' - ] - }, - 'cluster.health': { - path: [ - 'index' - ], - body: [], - query: [ - 'expand_wildcards', - 'level', - 'local', - 'master_timeout', - 'timeout', - 'wait_for_active_shards', - 'wait_for_events', - 'wait_for_nodes', - 'wait_for_no_initializing_shards', - 'wait_for_no_relocating_shards', - 'wait_for_status' - ] - }, - 'cluster.info': { - path: [ - 'target' - ], - body: [], - query: [] - }, - 'cluster.pending_tasks': { - path: [], - body: [], - query: [ - 'local', - 'master_timeout' - ] - }, - 'cluster.post_voting_config_exclusions': { - path: [], - body: [], - query: [ - 'node_names', - 'node_ids', - 'master_timeout', - 'timeout' - ] - }, - 'cluster.put_component_template': { - path: [ - 'name' - ], - body: [ - 'template', - 'version', - '_meta', - 'deprecated' - ], - query: [ - 'create', - 'master_timeout' - ] - }, - 'cluster.put_settings': { - path: [], - body: [ - 'persistent', - 'transient' - ], - query: [ - 'flat_settings', - 'master_timeout', - 'timeout' - ] - }, - 'cluster.remote_info': { - path: [], - body: [], - query: [] - }, - 'cluster.reroute': { - path: [], - body: [ - 'commands' - ], - query: [ - 'dry_run', - 'explain', - 'metric', - 'retry_failed', - 'master_timeout', - 'timeout' - ] - }, - 'cluster.state': { - path: [ - 'metric', - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'ignore_unavailable', - 'local', - 'master_timeout', - 'wait_for_metadata_version', - 'wait_for_timeout' - ] - }, - 'cluster.stats': { - path: [ - 'node_id' - ], - body: [], - query: [ - 'include_remotes', - 'timeout' - ] - } - } } /** @@ -241,12 +51,8 @@ export default class Cluster { async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['cluster.allocation_explain'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -269,14 +75,8 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -296,10 +96,7 @@ export default class Cluster { async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.delete_component_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -341,10 +138,7 @@ export default class Cluster { async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.delete_voting_config_exclusions'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -384,10 +178,7 @@ export default class Cluster { async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.exists_component_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -429,10 +220,7 @@ export default class Cluster { async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.get_component_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -482,10 +270,7 @@ export default class Cluster { async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.get_settings'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -525,10 +310,7 @@ export default class Cluster { async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.health'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -578,10 +360,7 @@ export default class Cluster { async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.info'] - + const acceptedPath: string[] = ['target'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -623,10 +402,7 @@ export default class Cluster { async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.pending_tasks'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -666,10 +442,7 @@ export default class Cluster { async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.post_voting_config_exclusions'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -709,12 +482,8 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['cluster.put_component_template'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -736,14 +505,8 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -766,12 +529,8 @@ export default class Cluster { async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['cluster.put_settings'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['persistent', 'transient'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -794,14 +553,8 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -821,10 +574,7 @@ export default class Cluster { async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.remote_info'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -864,12 +614,8 @@ export default class Cluster { async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['cluster.reroute'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['commands'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -892,14 +638,8 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -919,10 +659,7 @@ export default class Cluster { async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.state'] - + const acceptedPath: string[] = ['metric', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -976,10 +713,7 @@ export default class Cluster { async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['cluster.stats'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 41cdb4316..141aa8002 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -35,342 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Connector { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'connector.check_in': { - path: [ - 'connector_id' - ], - body: [], - query: [] - }, - 'connector.delete': { - path: [ - 'connector_id' - ], - body: [], - query: [ - 'delete_sync_jobs', - 'hard' - ] - }, - 'connector.get': { - path: [ - 'connector_id' - ], - body: [], - query: [ - 'include_deleted' - ] - }, - 'connector.last_sync': { - path: [ - 'connector_id' - ], - body: [ - 'last_access_control_sync_error', - 'last_access_control_sync_scheduled_at', - 'last_access_control_sync_status', - 'last_deleted_document_count', - 'last_incremental_sync_scheduled_at', - 'last_indexed_document_count', - 'last_seen', - 'last_sync_error', - 'last_sync_scheduled_at', - 'last_sync_status', - 'last_synced', - 'sync_cursor' - ], - query: [] - }, - 'connector.list': { - path: [], - body: [], - query: [ - 'from', - 'size', - 'index_name', - 'connector_name', - 'service_type', - 'include_deleted', - 'query' - ] - }, - 'connector.post': { - path: [], - body: [ - 'description', - 'index_name', - 'is_native', - 'language', - 'name', - 'service_type' - ], - query: [] - }, - 'connector.put': { - path: [ - 'connector_id' - ], - body: [ - 'description', - 'index_name', - 'is_native', - 'language', - 'name', - 'service_type' - ], - query: [] - }, - 'connector.secret_delete': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'connector.secret_get': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'connector.secret_post': { - path: [], - body: [], - query: [] - }, - 'connector.secret_put': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'connector.sync_job_cancel': { - path: [ - 'connector_sync_job_id' - ], - body: [], - query: [] - }, - 'connector.sync_job_check_in': { - path: [ - 'connector_sync_job_id' - ], - body: [], - query: [] - }, - 'connector.sync_job_claim': { - path: [ - 'connector_sync_job_id' - ], - body: [ - 'sync_cursor', - 'worker_hostname' - ], - query: [] - }, - 'connector.sync_job_delete': { - path: [ - 'connector_sync_job_id' - ], - body: [], - query: [] - }, - 'connector.sync_job_error': { - path: [ - 'connector_sync_job_id' - ], - body: [ - 'error' - ], - query: [] - }, - 'connector.sync_job_get': { - path: [ - 'connector_sync_job_id' - ], - body: [], - query: [] - }, - 'connector.sync_job_list': { - path: [], - body: [], - query: [ - 'from', - 'size', - 'status', - 'connector_id', - 'job_type' - ] - }, - 'connector.sync_job_post': { - path: [], - body: [ - 'id', - 'job_type', - 'trigger_method' - ], - query: [] - }, - 'connector.sync_job_update_stats': { - path: [ - 'connector_sync_job_id' - ], - body: [ - 'deleted_document_count', - 'indexed_document_count', - 'indexed_document_volume', - 'last_seen', - 'metadata', - 'total_document_count' - ], - query: [] - }, - 'connector.update_active_filtering': { - path: [ - 'connector_id' - ], - body: [], - query: [] - }, - 'connector.update_api_key_id': { - path: [ - 'connector_id' - ], - body: [ - 'api_key_id', - 'api_key_secret_id' - ], - query: [] - }, - 'connector.update_configuration': { - path: [ - 'connector_id' - ], - body: [ - 'configuration', - 'values' - ], - query: [] - }, - 'connector.update_error': { - path: [ - 'connector_id' - ], - body: [ - 'error' - ], - query: [] - }, - 'connector.update_features': { - path: [ - 'connector_id' - ], - body: [ - 'features' - ], - query: [] - }, - 'connector.update_filtering': { - path: [ - 'connector_id' - ], - body: [ - 'filtering', - 'rules', - 'advanced_snippet' - ], - query: [] - }, - 'connector.update_filtering_validation': { - path: [ - 'connector_id' - ], - body: [ - 'validation' - ], - query: [] - }, - 'connector.update_index_name': { - path: [ - 'connector_id' - ], - body: [ - 'index_name' - ], - query: [] - }, - 'connector.update_name': { - path: [ - 'connector_id' - ], - body: [ - 'name', - 'description' - ], - query: [] - }, - 'connector.update_native': { - path: [ - 'connector_id' - ], - body: [ - 'is_native' - ], - query: [] - }, - 'connector.update_pipeline': { - path: [ - 'connector_id' - ], - body: [ - 'pipeline' - ], - query: [] - }, - 'connector.update_scheduling': { - path: [ - 'connector_id' - ], - body: [ - 'scheduling' - ], - query: [] - }, - 'connector.update_service_type': { - path: [ - 'connector_id' - ], - body: [ - 'service_type' - ], - query: [] - }, - 'connector.update_status': { - path: [ - 'connector_id' - ], - body: [ - 'status' - ], - query: [] - } - } } /** @@ -381,10 +51,7 @@ export default class Connector { async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.check_in'] - + const acceptedPath: string[] = ['connector_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -426,10 +93,7 @@ export default class Connector { async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.delete'] - + const acceptedPath: string[] = ['connector_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -471,10 +135,7 @@ export default class Connector { async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.get'] - + const acceptedPath: string[] = ['connector_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -516,12 +177,8 @@ export default class Connector { async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.last_sync'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -543,14 +200,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -573,10 +224,7 @@ export default class Connector { async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.list'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -616,12 +264,8 @@ export default class Connector { async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.post'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -644,14 +288,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -671,12 +309,8 @@ export default class Connector { async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.put'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -699,14 +333,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -735,10 +363,7 @@ export default class Connector { async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.secret_delete'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -779,10 +404,7 @@ export default class Connector { async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.secret_get'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -823,10 +445,7 @@ export default class Connector { async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.secret_post'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -864,10 +483,7 @@ export default class Connector { async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.secret_put'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -909,10 +525,7 @@ export default class Connector { async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.sync_job_cancel'] - + const acceptedPath: string[] = ['connector_sync_job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -954,10 +567,7 @@ export default class Connector { async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.sync_job_check_in'] - + const acceptedPath: string[] = ['connector_sync_job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -999,12 +609,8 @@ export default class Connector { async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.sync_job_claim'] - + const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1026,14 +632,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1056,10 +656,7 @@ export default class Connector { async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.sync_job_delete'] - + const acceptedPath: string[] = ['connector_sync_job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1101,12 +698,8 @@ export default class Connector { async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.sync_job_error'] - + const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['error'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1128,14 +721,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1158,10 +745,7 @@ export default class Connector { async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.sync_job_get'] - + const acceptedPath: string[] = ['connector_sync_job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1203,10 +787,7 @@ export default class Connector { async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.sync_job_list'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1246,12 +827,8 @@ export default class Connector { async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.sync_job_post'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1273,14 +850,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1300,12 +871,8 @@ export default class Connector { async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.sync_job_update_stats'] - + const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1327,14 +894,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1357,10 +918,7 @@ export default class Connector { async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['connector.update_active_filtering'] - + const acceptedPath: string[] = ['connector_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1402,12 +960,8 @@ export default class Connector { async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_api_key_id'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1429,14 +983,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1459,12 +1007,8 @@ export default class Connector { async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_configuration'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['configuration', 'values'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1486,14 +1030,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1516,12 +1054,8 @@ export default class Connector { async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_error'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['error'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1543,14 +1077,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1573,12 +1101,8 @@ export default class Connector { async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_features'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['features'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1600,14 +1124,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1630,12 +1148,8 @@ export default class Connector { async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_filtering'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1657,14 +1171,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1687,12 +1195,8 @@ export default class Connector { async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_filtering_validation'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['validation'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1714,14 +1218,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1744,12 +1242,8 @@ export default class Connector { async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_index_name'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['index_name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1771,14 +1265,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1801,12 +1289,8 @@ export default class Connector { async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_name'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['name', 'description'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1828,14 +1312,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1858,12 +1336,8 @@ export default class Connector { async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_native'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['is_native'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1885,14 +1359,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1915,12 +1383,8 @@ export default class Connector { async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_pipeline'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1942,14 +1406,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1972,12 +1430,8 @@ export default class Connector { async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_scheduling'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['scheduling'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1999,14 +1453,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2029,12 +1477,8 @@ export default class Connector { async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_service_type'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['service_type'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2056,14 +1500,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2086,12 +1524,8 @@ export default class Connector { async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['connector.update_status'] - + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2113,14 +1547,8 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index f86aa7690..6e060b369 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -35,39 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - count: { - path: [ - 'index' - ], - body: [ - 'query' - ], - query: [ - 'allow_no_indices', - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'expand_wildcards', - 'ignore_throttled', - 'ignore_unavailable', - 'lenient', - 'min_score', - 'preference', - 'routing', - 'terminate_after', - 'q' - ] - } -} +interface That { transport: Transport } /** * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. @@ -77,12 +45,8 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.count - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -105,14 +69,8 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index e43ee6b8e..c8c663fa3 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -35,34 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - create: { - path: [ - 'id', - 'index' - ], - body: [ - 'document' - ], - query: [ - 'include_source_on_error', - 'pipeline', - 'refresh', - 'routing', - 'timeout', - 'version', - 'version_type', - 'wait_for_active_shards' - ] - } -} +interface That { transport: Transport } /** * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. @@ -72,12 +45,8 @@ export default async function CreateApi (this: That, params export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.create - + const acceptedPath: string[] = ['id', 'index'] + const acceptedBody: string[] = ['document'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -89,14 +58,8 @@ export default async function CreateApi (this: That, params } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 57ff08d00..e8dc5399d 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -35,46 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class DanglingIndices { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'dangling_indices.delete_dangling_index': { - path: [ - 'index_uuid' - ], - body: [], - query: [ - 'accept_data_loss', - 'master_timeout', - 'timeout' - ] - }, - 'dangling_indices.import_dangling_index': { - path: [ - 'index_uuid' - ], - body: [], - query: [ - 'accept_data_loss', - 'master_timeout', - 'timeout' - ] - }, - 'dangling_indices.list_dangling_indices': { - path: [], - body: [], - query: [] - } - } } /** @@ -85,10 +51,7 @@ export default class DanglingIndices { async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['dangling_indices.delete_dangling_index'] - + const acceptedPath: string[] = ['index_uuid'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -130,10 +93,7 @@ export default class DanglingIndices { async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['dangling_indices.import_dangling_index'] - + const acceptedPath: string[] = ['index_uuid'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,10 +135,7 @@ export default class DanglingIndices { async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['dangling_indices.list_dangling_indices'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 69d7fd9bd..63b4cf22b 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -35,30 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - delete: { - path: [ - 'id', - 'index' - ], - body: [], - query: [ - 'if_primary_term', - 'if_seq_no', - 'refresh', - 'routing', - 'timeout', - 'version', - 'version_type', - 'wait_for_active_shards' - ] - } -} +interface That { transport: Transport } /** * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. @@ -68,10 +45,7 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest, op export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.delete - + const acceptedPath: string[] = ['id', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 6842ec280..f99e09670 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -35,56 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - delete_by_query: { - path: [ - 'index' - ], - body: [ - 'max_docs', - 'query', - 'slice' - ], - query: [ - 'allow_no_indices', - 'analyzer', - 'analyze_wildcard', - 'conflicts', - 'default_operator', - 'df', - 'expand_wildcards', - 'from', - 'ignore_unavailable', - 'lenient', - 'max_docs', - 'preference', - 'refresh', - 'request_cache', - 'requests_per_second', - 'routing', - 'q', - 'scroll', - 'scroll_size', - 'search_timeout', - 'search_type', - 'slices', - 'sort', - 'stats', - 'terminate_after', - 'timeout', - 'version', - 'wait_for_active_shards', - 'wait_for_completion' - ] - } -} +interface That { transport: Transport } /** * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. @@ -94,12 +45,8 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.delete_by_query - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['max_docs', 'query', 'slice'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -121,14 +68,8 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 23b331b7a..4da430635 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -35,22 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - delete_by_query_rethrottle: { - path: [ - 'task_id' - ], - body: [], - query: [ - 'requests_per_second' - ] - } -} +interface That { transport: Transport } /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. @@ -60,10 +45,7 @@ export default async function DeleteByQueryRethrottleApi (this: That, params: T. export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.delete_by_query_rethrottle - + const acceptedPath: string[] = ['task_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 68949420a..e6519dffd 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -35,23 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - delete_script: { - path: [ - 'id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - } -} +interface That { transport: Transport } /** * Delete a script or search template. Deletes a stored script or search template. @@ -61,10 +45,7 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.delete_script - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 2a1dff7fc..ea301cac5 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -35,69 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Enrich { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'enrich.delete_policy': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'enrich.execute_policy': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'wait_for_completion' - ] - }, - 'enrich.get_policy': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'enrich.put_policy': { - path: [ - 'name' - ], - body: [ - 'geo_match', - 'match', - 'range' - ], - query: [ - 'master_timeout' - ] - }, - 'enrich.stats': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - } - } } /** @@ -108,10 +51,7 @@ export default class Enrich { async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['enrich.delete_policy'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -153,10 +93,7 @@ export default class Enrich { async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['enrich.execute_policy'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -198,10 +135,7 @@ export default class Enrich { async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['enrich.get_policy'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,12 +185,8 @@ export default class Enrich { async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['enrich.put_policy'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['geo_match', 'match', 'range'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -278,14 +208,8 @@ export default class Enrich { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -308,10 +232,7 @@ export default class Enrich { async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['enrich.stats'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index e645386a6..9f490aca9 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -35,79 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Eql { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'eql.delete': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'eql.get': { - path: [ - 'id' - ], - body: [], - query: [ - 'keep_alive', - 'wait_for_completion_timeout' - ] - }, - 'eql.get_status': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'eql.search': { - path: [ - 'index' - ], - body: [ - 'query', - 'case_sensitive', - 'event_category_field', - 'tiebreaker_field', - 'timestamp_field', - 'fetch_size', - 'filter', - 'keep_alive', - 'keep_on_completion', - 'wait_for_completion_timeout', - 'allow_partial_search_results', - 'allow_partial_sequence_results', - 'size', - 'fields', - 'result_position', - 'runtime_mappings', - 'max_samples_per_key' - ], - query: [ - 'allow_no_indices', - 'allow_partial_search_results', - 'allow_partial_sequence_results', - 'expand_wildcards', - 'ignore_unavailable', - 'keep_alive', - 'keep_on_completion', - 'wait_for_completion_timeout' - ] - } - } } /** @@ -118,10 +51,7 @@ export default class Eql { async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['eql.delete'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -163,10 +93,7 @@ export default class Eql { async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['eql.get'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -208,10 +135,7 @@ export default class Eql { async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['eql.get_status'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -253,12 +177,8 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['eql.search'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'allow_partial_search_results', 'allow_partial_sequence_results', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,14 +200,8 @@ export default class Eql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index d7bcfa0c1..d76ed6962 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -35,87 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Esql { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'esql.async_query': { - path: [], - body: [ - 'columnar', - 'filter', - 'locale', - 'params', - 'profile', - 'query', - 'tables', - 'include_ccs_metadata' - ], - query: [ - 'delimiter', - 'drop_null_columns', - 'format', - 'keep_alive', - 'keep_on_completion', - 'wait_for_completion_timeout' - ] - }, - 'esql.async_query_delete': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'esql.async_query_get': { - path: [ - 'id' - ], - body: [], - query: [ - 'drop_null_columns', - 'keep_alive', - 'wait_for_completion_timeout' - ] - }, - 'esql.async_query_stop': { - path: [ - 'id' - ], - body: [], - query: [ - 'drop_null_columns' - ] - }, - 'esql.query': { - path: [], - body: [ - 'columnar', - 'filter', - 'locale', - 'params', - 'profile', - 'query', - 'tables', - 'include_ccs_metadata' - ], - query: [ - 'format', - 'delimiter', - 'drop_null_columns' - ] - } - } } /** @@ -126,12 +51,8 @@ export default class Esql { async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['esql.async_query'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -153,14 +74,8 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -180,10 +95,7 @@ export default class Esql { async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['esql.async_query_delete'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -225,10 +137,7 @@ export default class Esql { async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['esql.async_query_get'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -270,10 +179,7 @@ export default class Esql { async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['esql.async_query_stop'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -315,12 +221,8 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['esql.query'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -342,14 +244,8 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 50bb2b07f..0c5f99bde 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -35,32 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - exists: { - path: [ - 'id', - 'index' - ], - body: [], - query: [ - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_includes', - 'stored_fields', - 'version', - 'version_type' - ] - } -} +interface That { transport: Transport } /** * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. @@ -70,10 +45,7 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest, op export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.exists - + const acceptedPath: string[] = ['id', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 44009ea87..750302a6f 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -35,31 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - exists_source: { - path: [ - 'id', - 'index' - ], - body: [], - query: [ - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_includes', - 'version', - 'version_type' - ] - } -} +interface That { transport: Transport } /** * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. @@ -69,10 +45,7 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.exists_source - + const acceptedPath: string[] = ['id', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 727c5dcbe..16150530b 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -35,38 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - explain: { - path: [ - 'id', - 'index' - ], - body: [ - 'query' - ], - query: [ - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'lenient', - 'preference', - 'routing', - '_source', - '_source_excludes', - '_source_includes', - 'stored_fields', - 'q' - ] - } -} +interface That { transport: Transport } /** * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. @@ -76,12 +45,8 @@ export default async function ExplainApi (this: That, param export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.explain - + const acceptedPath: string[] = ['id', 'index'] + const acceptedBody: string[] = ['query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -103,14 +68,8 @@ export default async function ExplainApi (this: That, param } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index ee12f298a..670d84cda 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -35,33 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class Features { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'features.get_features': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - }, - 'features.reset_features': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - } - } } /** @@ -72,10 +51,7 @@ export default class Features { async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['features.get_features'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -115,10 +91,7 @@ export default class Features { async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['features.reset_features'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index f863e52a3..de9d61a0e 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -35,35 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - field_caps: { - path: [ - 'index' - ], - body: [ - 'fields', - 'index_filter', - 'runtime_mappings' - ], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'fields', - 'ignore_unavailable', - 'include_unmapped', - 'filters', - 'types', - 'include_empty_fields' - ] - } -} +interface That { transport: Transport } /** * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. @@ -73,12 +45,8 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.field_caps - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,14 +69,8 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index d14151237..042fcbfd1 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -35,159 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Fleet { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'fleet.delete_secret': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'fleet.get_secret': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'fleet.global_checkpoints': { - path: [ - 'index' - ], - body: [], - query: [ - 'wait_for_advance', - 'wait_for_index', - 'checkpoints', - 'timeout' - ] - }, - 'fleet.msearch': { - path: [ - 'index' - ], - body: [ - 'searches' - ], - query: [ - 'allow_no_indices', - 'ccs_minimize_roundtrips', - 'expand_wildcards', - 'ignore_throttled', - 'ignore_unavailable', - 'max_concurrent_searches', - 'max_concurrent_shard_requests', - 'pre_filter_shard_size', - 'search_type', - 'rest_total_hits_as_int', - 'typed_keys', - 'wait_for_checkpoints', - 'allow_partial_search_results' - ] - }, - 'fleet.post_secret': { - path: [], - body: [], - query: [] - }, - 'fleet.search': { - path: [ - 'index' - ], - body: [ - 'aggregations', - 'aggs', - 'collapse', - 'explain', - 'ext', - 'from', - 'highlight', - 'track_total_hits', - 'indices_boost', - 'docvalue_fields', - 'min_score', - 'post_filter', - 'profile', - 'query', - 'rescore', - 'script_fields', - 'search_after', - 'size', - 'slice', - 'sort', - '_source', - 'fields', - 'suggest', - 'terminate_after', - 'timeout', - 'track_scores', - 'version', - 'seq_no_primary_term', - 'stored_fields', - 'pit', - 'runtime_mappings', - 'stats' - ], - query: [ - 'allow_no_indices', - 'analyzer', - 'analyze_wildcard', - 'batched_reduce_size', - 'ccs_minimize_roundtrips', - 'default_operator', - 'df', - 'docvalue_fields', - 'expand_wildcards', - 'explain', - 'ignore_throttled', - 'ignore_unavailable', - 'lenient', - 'max_concurrent_shard_requests', - 'preference', - 'pre_filter_shard_size', - 'request_cache', - 'routing', - 'scroll', - 'search_type', - 'stats', - 'stored_fields', - 'suggest_field', - 'suggest_mode', - 'suggest_size', - 'suggest_text', - 'terminate_after', - 'timeout', - 'track_total_hits', - 'track_scores', - 'typed_keys', - 'rest_total_hits_as_int', - 'version', - '_source', - '_source_excludes', - '_source_includes', - 'seq_no_primary_term', - 'q', - 'size', - 'from', - 'sort', - 'wait_for_checkpoints', - 'allow_partial_search_results' - ] - } - } } /** @@ -197,10 +50,7 @@ export default class Fleet { async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['fleet.delete_secret'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -241,10 +91,7 @@ export default class Fleet { async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['fleet.get_secret'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -286,10 +133,7 @@ export default class Fleet { async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['fleet.global_checkpoints'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -331,12 +175,8 @@ export default class Fleet { async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['fleet.msearch'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['searches'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -348,14 +188,8 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -384,10 +218,7 @@ export default class Fleet { async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['fleet.post_secret'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -426,12 +257,8 @@ export default class Fleet { async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['fleet.search'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -453,14 +280,8 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index cb55c656b..3cb82914a 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -35,33 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - get: { - path: [ - 'id', - 'index' - ], - body: [], - query: [ - 'force_synthetic_source', - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_includes', - 'stored_fields', - 'version', - 'version_type' - ] - } -} +interface That { transport: Transport } /** * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. @@ -71,10 +45,7 @@ export default async function GetApi (this: That, params: T export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.get - + const acceptedPath: string[] = ['id', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index 694f144a6..d079ba650 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -35,22 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - get_script: { - path: [ - 'id' - ], - body: [], - query: [ - 'master_timeout' - ] - } -} +interface That { transport: Transport } /** * Get a script or search template. Retrieves a stored script or search template. @@ -60,10 +45,7 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.get_script - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index a33514bbf..b263ed089 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -35,18 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - get_script_context: { - path: [], - body: [], - query: [] - } -} +interface That { transport: Transport } /** * Get script contexts. Get a list of supported script contexts and their methods. @@ -56,10 +45,7 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.get_script_context - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index f8e7f14f5..7b52735c4 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -35,18 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - get_script_languages: { - path: [], - body: [], - query: [] - } -} +interface That { transport: Transport } /** * Get script languages. Get a list of available script types, languages, and contexts. @@ -56,10 +45,7 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.get_script_languages - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index b9c7191b5..a4eef8c97 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -35,32 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - get_source: { - path: [ - 'id', - 'index' - ], - body: [], - query: [ - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_includes', - 'stored_fields', - 'version', - 'version_type' - ] - } -} +interface That { transport: Transport } /** * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` @@ -70,10 +45,7 @@ export default async function GetSourceApi (this: That, par export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.get_source - + const acceptedPath: string[] = ['id', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index a509820dd..33534fe4a 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -35,36 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Graph { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'graph.explore': { - path: [ - 'index' - ], - body: [ - 'connections', - 'controls', - 'query', - 'vertices' - ], - query: [ - 'routing', - 'timeout' - ] - } - } } /** @@ -75,12 +51,8 @@ export default class Graph { async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['graph.explore'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -102,14 +74,8 @@ export default class Graph { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 8a7539b04..51a48a265 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -35,24 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - health_report: { - path: [ - 'feature' - ], - body: [], - query: [ - 'timeout', - 'verbose', - 'size' - ] - } -} +interface That { transport: Transport } /** * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. @@ -62,10 +45,7 @@ export default async function HealthReportApi (this: That, params?: T.HealthRepo export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.health_report - + const acceptedPath: string[] = ['feature'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index dd4537454..1c097071c 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -35,120 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Ilm { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'ilm.delete_lifecycle': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ilm.explain_lifecycle': { - path: [ - 'index' - ], - body: [], - query: [ - 'only_errors', - 'only_managed', - 'master_timeout' - ] - }, - 'ilm.get_lifecycle': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ilm.get_status': { - path: [], - body: [], - query: [] - }, - 'ilm.migrate_to_data_tiers': { - path: [], - body: [ - 'legacy_template_to_delete', - 'node_attribute' - ], - query: [ - 'dry_run', - 'master_timeout' - ] - }, - 'ilm.move_to_step': { - path: [ - 'index' - ], - body: [ - 'current_step', - 'next_step' - ], - query: [] - }, - 'ilm.put_lifecycle': { - path: [ - 'name' - ], - body: [ - 'policy' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ilm.remove_policy': { - path: [ - 'index' - ], - body: [], - query: [] - }, - 'ilm.retry': { - path: [ - 'index' - ], - body: [], - query: [] - }, - 'ilm.start': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ilm.stop': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - } - } } /** @@ -159,10 +51,7 @@ export default class Ilm { async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.delete_lifecycle'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -204,10 +93,7 @@ export default class Ilm { async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.explain_lifecycle'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -249,10 +135,7 @@ export default class Ilm { async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.get_lifecycle'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -302,10 +185,7 @@ export default class Ilm { async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.get_status'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -345,12 +225,8 @@ export default class Ilm { async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ilm.migrate_to_data_tiers'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -373,14 +249,8 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -400,12 +270,8 @@ export default class Ilm { async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ilm.move_to_step'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['current_step', 'next_step'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -427,14 +293,8 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -457,12 +317,8 @@ export default class Ilm { async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ilm.put_lifecycle'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -484,14 +340,8 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -514,10 +364,7 @@ export default class Ilm { async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.remove_policy'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -559,10 +406,7 @@ export default class Ilm { async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.retry'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -604,10 +448,7 @@ export default class Ilm { async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.start'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -647,10 +488,7 @@ export default class Ilm { async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ilm.stop'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/index.ts b/src/api/api/index.ts index 20adfbc81..bcd3842eb 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -35,38 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - index: { - path: [ - 'id', - 'index' - ], - body: [ - 'document' - ], - query: [ - 'if_primary_term', - 'if_seq_no', - 'include_source_on_error', - 'op_type', - 'pipeline', - 'refresh', - 'routing', - 'timeout', - 'version', - 'version_type', - 'wait_for_active_shards', - 'require_alias' - ] - } -} +interface That { transport: Transport } /** * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. @@ -76,12 +45,8 @@ export default async function IndexApi (this: That, params: export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.index - + const acceptedPath: string[] = ['id', 'index'] + const acceptedBody: string[] = ['document'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,14 +58,8 @@ export default async function IndexApi (this: That, params: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 85cf78aea..8af3fb23d 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -35,834 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Indices { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'indices.add_block': { - path: [ - 'index', - 'block' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'master_timeout', - 'timeout' - ] - }, - 'indices.analyze': { - path: [ - 'index' - ], - body: [ - 'analyzer', - 'attributes', - 'char_filter', - 'explain', - 'field', - 'filter', - 'normalizer', - 'text', - 'tokenizer' - ], - query: [] - }, - 'indices.cancel_migrate_reindex': { - path: [ - 'index' - ], - body: [], - query: [] - }, - 'indices.clear_cache': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'fielddata', - 'fields', - 'ignore_unavailable', - 'query', - 'request' - ] - }, - 'indices.clone': { - path: [ - 'index', - 'target' - ], - body: [ - 'aliases', - 'settings' - ], - query: [ - 'master_timeout', - 'timeout', - 'wait_for_active_shards' - ] - }, - 'indices.close': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'master_timeout', - 'timeout', - 'wait_for_active_shards' - ] - }, - 'indices.create': { - path: [ - 'index' - ], - body: [ - 'aliases', - 'mappings', - 'settings' - ], - query: [ - 'master_timeout', - 'timeout', - 'wait_for_active_shards' - ] - }, - 'indices.create_data_stream': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'indices.create_from': { - path: [ - 'source', - 'dest' - ], - body: [ - 'create_from' - ], - query: [] - }, - 'indices.data_streams_stats': { - path: [ - 'name' - ], - body: [], - query: [ - 'expand_wildcards' - ] - }, - 'indices.delete': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'master_timeout', - 'timeout' - ] - }, - 'indices.delete_alias': { - path: [ - 'index', - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'indices.delete_data_lifecycle': { - path: [ - 'name' - ], - body: [], - query: [ - 'expand_wildcards', - 'master_timeout', - 'timeout' - ] - }, - 'indices.delete_data_stream': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'expand_wildcards' - ] - }, - 'indices.delete_index_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'indices.delete_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'indices.disk_usage': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'flush', - 'ignore_unavailable', - 'run_expensive_tasks' - ] - }, - 'indices.downsample': { - path: [ - 'index', - 'target_index' - ], - body: [ - 'config' - ], - query: [] - }, - 'indices.exists': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'ignore_unavailable', - 'include_defaults', - 'local' - ] - }, - 'indices.exists_alias': { - path: [ - 'name', - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'master_timeout' - ] - }, - 'indices.exists_index_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'indices.exists_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'flat_settings', - 'local', - 'master_timeout' - ] - }, - 'indices.explain_data_lifecycle': { - path: [ - 'index' - ], - body: [], - query: [ - 'include_defaults', - 'master_timeout' - ] - }, - 'indices.field_usage_stats': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'fields', - 'wait_for_active_shards' - ] - }, - 'indices.flush': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'force', - 'ignore_unavailable', - 'wait_if_ongoing' - ] - }, - 'indices.forcemerge': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'flush', - 'ignore_unavailable', - 'max_num_segments', - 'only_expunge_deletes', - 'wait_for_completion' - ] - }, - 'indices.get': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'ignore_unavailable', - 'include_defaults', - 'local', - 'master_timeout', - 'features' - ] - }, - 'indices.get_alias': { - path: [ - 'name', - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'master_timeout' - ] - }, - 'indices.get_data_lifecycle': { - path: [ - 'name' - ], - body: [], - query: [ - 'expand_wildcards', - 'include_defaults', - 'master_timeout' - ] - }, - 'indices.get_data_lifecycle_stats': { - path: [], - body: [], - query: [] - }, - 'indices.get_data_stream': { - path: [ - 'name' - ], - body: [], - query: [ - 'expand_wildcards', - 'include_defaults', - 'master_timeout', - 'verbose' - ] - }, - 'indices.get_field_mapping': { - path: [ - 'fields', - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'include_defaults' - ] - }, - 'indices.get_index_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'local', - 'flat_settings', - 'master_timeout', - 'include_defaults' - ] - }, - 'indices.get_mapping': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'local', - 'master_timeout' - ] - }, - 'indices.get_migrate_reindex_status': { - path: [ - 'index' - ], - body: [], - query: [] - }, - 'indices.get_settings': { - path: [ - 'index', - 'name' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'ignore_unavailable', - 'include_defaults', - 'local', - 'master_timeout' - ] - }, - 'indices.get_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'flat_settings', - 'local', - 'master_timeout' - ] - }, - 'indices.migrate_reindex': { - path: [], - body: [ - 'reindex' - ], - query: [] - }, - 'indices.migrate_to_data_stream': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'indices.modify_data_stream': { - path: [], - body: [ - 'actions' - ], - query: [] - }, - 'indices.open': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'master_timeout', - 'timeout', - 'wait_for_active_shards' - ] - }, - 'indices.promote_data_stream': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'indices.put_alias': { - path: [ - 'index', - 'name' - ], - body: [ - 'filter', - 'index_routing', - 'is_write_index', - 'routing', - 'search_routing' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'indices.put_data_lifecycle': { - path: [ - 'name' - ], - body: [ - 'data_retention', - 'downsampling', - 'enabled' - ], - query: [ - 'expand_wildcards', - 'master_timeout', - 'timeout' - ] - }, - 'indices.put_index_template': { - path: [ - 'name' - ], - body: [ - 'index_patterns', - 'composed_of', - 'template', - 'data_stream', - 'priority', - 'version', - '_meta', - 'allow_auto_create', - 'ignore_missing_component_templates', - 'deprecated' - ], - query: [ - 'create', - 'master_timeout', - 'cause' - ] - }, - 'indices.put_mapping': { - path: [ - 'index' - ], - body: [ - 'date_detection', - 'dynamic', - 'dynamic_date_formats', - 'dynamic_templates', - '_field_names', - '_meta', - 'numeric_detection', - 'properties', - '_routing', - '_source', - 'runtime' - ], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'master_timeout', - 'timeout', - 'write_index_only' - ] - }, - 'indices.put_settings': { - path: [ - 'index' - ], - body: [ - 'settings' - ], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'ignore_unavailable', - 'master_timeout', - 'preserve_existing', - 'timeout' - ] - }, - 'indices.put_template': { - path: [ - 'name' - ], - body: [ - 'aliases', - 'index_patterns', - 'mappings', - 'order', - 'settings', - 'version' - ], - query: [ - 'create', - 'master_timeout', - 'order', - 'cause' - ] - }, - 'indices.recovery': { - path: [ - 'index' - ], - body: [], - query: [ - 'active_only', - 'detailed' - ] - }, - 'indices.refresh': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable' - ] - }, - 'indices.reload_search_analyzers': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable' - ] - }, - 'indices.resolve_cluster': { - path: [ - 'name' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_throttled', - 'ignore_unavailable', - 'timeout' - ] - }, - 'indices.resolve_index': { - path: [ - 'name' - ], - body: [], - query: [ - 'expand_wildcards', - 'ignore_unavailable', - 'allow_no_indices' - ] - }, - 'indices.rollover': { - path: [ - 'alias', - 'new_index' - ], - body: [ - 'aliases', - 'conditions', - 'mappings', - 'settings' - ], - query: [ - 'dry_run', - 'master_timeout', - 'timeout', - 'wait_for_active_shards' - ] - }, - 'indices.segments': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable' - ] - }, - 'indices.shard_stores': { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'status' - ] - }, - 'indices.shrink': { - path: [ - 'index', - 'target' - ], - body: [ - 'aliases', - 'settings' - ], - query: [ - 'master_timeout', - 'timeout', - 'wait_for_active_shards' - ] - }, - 'indices.simulate_index_template': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'include_defaults' - ] - }, - 'indices.simulate_template': { - path: [ - 'name' - ], - body: [ - 'allow_auto_create', - 'index_patterns', - 'composed_of', - 'template', - 'data_stream', - 'priority', - 'version', - '_meta', - 'ignore_missing_component_templates', - 'deprecated' - ], - query: [ - 'create', - 'master_timeout', - 'include_defaults' - ] - }, - 'indices.split': { - path: [ - 'index', - 'target' - ], - body: [ - 'aliases', - 'settings' - ], - query: [ - 'master_timeout', - 'timeout', - 'wait_for_active_shards' - ] - }, - 'indices.stats': { - path: [ - 'metric', - 'index' - ], - body: [], - query: [ - 'completion_fields', - 'expand_wildcards', - 'fielddata_fields', - 'fields', - 'forbid_closed_indices', - 'groups', - 'include_segment_file_sizes', - 'include_unloaded_segments', - 'level' - ] - }, - 'indices.update_aliases': { - path: [], - body: [ - 'actions' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'indices.validate_query': { - path: [ - 'index' - ], - body: [ - 'query' - ], - query: [ - 'allow_no_indices', - 'all_shards', - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'expand_wildcards', - 'explain', - 'ignore_unavailable', - 'lenient', - 'rewrite', - 'q' - ] - } - } } /** @@ -873,10 +51,7 @@ export default class Indices { async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.add_block'] - + const acceptedPath: string[] = ['index', 'block'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -919,12 +94,8 @@ export default class Indices { async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.analyze'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -947,14 +118,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -984,10 +149,7 @@ export default class Indices { async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.cancel_migrate_reindex'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1029,10 +191,7 @@ export default class Indices { async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.clear_cache'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1082,12 +241,8 @@ export default class Indices { async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.clone'] - + const acceptedPath: string[] = ['index', 'target'] + const acceptedBody: string[] = ['aliases', 'settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1109,14 +264,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1140,10 +289,7 @@ export default class Indices { async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.close'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1185,12 +331,8 @@ export default class Indices { async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.create'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1212,14 +354,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1242,10 +378,7 @@ export default class Indices { async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.create_data_stream'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1287,12 +420,8 @@ export default class Indices { async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.create_from'] - + const acceptedPath: string[] = ['source', 'dest'] + const acceptedBody: string[] = ['create_from'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1304,14 +433,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1335,10 +458,7 @@ export default class Indices { async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.data_streams_stats'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1388,10 +508,7 @@ export default class Indices { async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.delete'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1433,10 +550,7 @@ export default class Indices { async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.delete_alias'] - + const acceptedPath: string[] = ['index', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1486,10 +600,7 @@ export default class Indices { async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.delete_data_lifecycle'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1531,10 +642,7 @@ export default class Indices { async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.delete_data_stream'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1576,10 +684,7 @@ export default class Indices { async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.delete_index_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1621,10 +726,7 @@ export default class Indices { async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.delete_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1666,10 +768,7 @@ export default class Indices { async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.disk_usage'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1711,12 +810,8 @@ export default class Indices { async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.downsample'] - + const acceptedPath: string[] = ['index', 'target_index'] + const acceptedBody: string[] = ['config'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1728,14 +823,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1759,10 +848,7 @@ export default class Indices { async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.exists'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1804,10 +890,7 @@ export default class Indices { async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.exists_alias'] - + const acceptedPath: string[] = ['name', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1857,10 +940,7 @@ export default class Indices { async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.exists_index_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1902,10 +982,7 @@ export default class Indices { async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.exists_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1947,10 +1024,7 @@ export default class Indices { async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.explain_data_lifecycle'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1992,10 +1066,7 @@ export default class Indices { async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.field_usage_stats'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2037,10 +1108,7 @@ export default class Indices { async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.flush'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2090,10 +1158,7 @@ export default class Indices { async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.forcemerge'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2143,10 +1208,7 @@ export default class Indices { async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2188,10 +1250,7 @@ export default class Indices { async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_alias'] - + const acceptedPath: string[] = ['name', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2248,10 +1307,7 @@ export default class Indices { async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_data_lifecycle'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2293,10 +1349,7 @@ export default class Indices { async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_data_lifecycle_stats'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2336,10 +1389,7 @@ export default class Indices { async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_data_stream'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2389,10 +1439,7 @@ export default class Indices { async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_field_mapping'] - + const acceptedPath: string[] = ['fields', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2442,10 +1489,7 @@ export default class Indices { async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_index_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2495,10 +1539,7 @@ export default class Indices { async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_mapping'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2548,10 +1589,7 @@ export default class Indices { async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_migrate_reindex_status'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2593,10 +1631,7 @@ export default class Indices { async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_settings'] - + const acceptedPath: string[] = ['index', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2653,10 +1688,7 @@ export default class Indices { async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.get_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2706,12 +1738,8 @@ export default class Indices { async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.migrate_reindex'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['reindex'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2723,14 +1751,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2750,10 +1772,7 @@ export default class Indices { async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.migrate_to_data_stream'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2795,12 +1814,8 @@ export default class Indices { async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.modify_data_stream'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['actions'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2822,14 +1837,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2849,10 +1858,7 @@ export default class Indices { async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.open'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2894,10 +1900,7 @@ export default class Indices { async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.promote_data_stream'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2939,12 +1942,8 @@ export default class Indices { async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.put_alias'] - + const acceptedPath: string[] = ['index', 'name'] + const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2966,14 +1965,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3004,12 +1997,8 @@ export default class Indices { async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.put_data_lifecycle'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['data_retention', 'downsampling', 'enabled'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3031,14 +2020,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3061,12 +2044,8 @@ export default class Indices { async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.put_index_template'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3088,14 +2067,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3118,12 +2091,8 @@ export default class Indices { async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.put_mapping'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3145,14 +2114,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3175,12 +2138,8 @@ export default class Indices { async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.put_settings'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3192,14 +2151,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3229,12 +2182,8 @@ export default class Indices { async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.put_template'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3256,14 +2205,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3286,10 +2229,7 @@ export default class Indices { async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.recovery'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3339,10 +2279,7 @@ export default class Indices { async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.refresh'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3392,10 +2329,7 @@ export default class Indices { async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.reload_search_analyzers'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3437,10 +2371,7 @@ export default class Indices { async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.resolve_cluster'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3490,10 +2421,7 @@ export default class Indices { async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.resolve_index'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3535,12 +2463,8 @@ export default class Indices { async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.rollover'] - + const acceptedPath: string[] = ['alias', 'new_index'] + const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3562,14 +2486,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3600,10 +2518,7 @@ export default class Indices { async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.segments'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3653,10 +2568,7 @@ export default class Indices { async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.shard_stores'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3706,12 +2618,8 @@ export default class Indices { async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.shrink'] - + const acceptedPath: string[] = ['index', 'target'] + const acceptedBody: string[] = ['aliases', 'settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3733,14 +2641,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3764,10 +2666,7 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.simulate_index_template'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3809,12 +2708,8 @@ export default class Indices { async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.simulate_template'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3837,14 +2732,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3874,12 +2763,8 @@ export default class Indices { async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.split'] - + const acceptedPath: string[] = ['index', 'target'] + const acceptedBody: string[] = ['aliases', 'settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3901,14 +2786,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3932,10 +2811,7 @@ export default class Indices { async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['indices.stats'] - + const acceptedPath: string[] = ['metric', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3992,12 +2868,8 @@ export default class Indices { async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.update_aliases'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['actions'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4020,14 +2892,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4047,12 +2913,8 @@ export default class Indices { async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['indices.validate_query'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4075,14 +2937,8 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 6b3309021..b7c9fb55a 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -35,103 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Inference { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'inference.delete': { - path: [ - 'task_type', - 'inference_id' - ], - body: [], - query: [ - 'dry_run', - 'force' - ] - }, - 'inference.get': { - path: [ - 'task_type', - 'inference_id' - ], - body: [], - query: [] - }, - 'inference.inference': { - path: [ - 'task_type', - 'inference_id' - ], - body: [ - 'query', - 'input', - 'task_settings' - ], - query: [ - 'timeout' - ] - }, - 'inference.put': { - path: [ - 'task_type', - 'inference_id' - ], - body: [ - 'inference_config' - ], - query: [] - }, - 'inference.stream_inference': { - path: [ - 'inference_id', - 'task_type' - ], - body: [ - 'input' - ], - query: [] - }, - 'inference.unified_inference': { - path: [ - 'task_type', - 'inference_id' - ], - body: [ - 'messages', - 'model', - 'max_completion_tokens', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p' - ], - query: [ - 'timeout' - ] - }, - 'inference.update': { - path: [ - 'inference_id', - 'task_type' - ], - body: [ - 'inference_config' - ], - query: [] - } - } } /** @@ -142,10 +51,7 @@ export default class Inference { async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['inference.delete'] - + const acceptedPath: string[] = ['task_type', 'inference_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -195,10 +101,7 @@ export default class Inference { async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['inference.get'] - + const acceptedPath: string[] = ['task_type', 'inference_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -252,12 +155,8 @@ export default class Inference { async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['inference.inference'] - + const acceptedPath: string[] = ['task_type', 'inference_id'] + const acceptedBody: string[] = ['query', 'input', 'task_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -279,14 +178,8 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -317,12 +210,8 @@ export default class Inference { async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['inference.put'] - + const acceptedPath: string[] = ['task_type', 'inference_id'] + const acceptedBody: string[] = ['inference_config'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -334,14 +223,8 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -372,12 +255,8 @@ export default class Inference { async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['inference.stream_inference'] - + const acceptedPath: string[] = ['inference_id', 'task_type'] + const acceptedBody: string[] = ['input'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,14 +278,8 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -437,12 +310,8 @@ export default class Inference { async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['inference.unified_inference'] - + const acceptedPath: string[] = ['task_type', 'inference_id'] + const acceptedBody: string[] = ['messages', 'model', 'max_completion_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -464,14 +333,8 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -502,12 +365,8 @@ export default class Inference { async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['inference.update'] - + const acceptedPath: string[] = ['inference_id', 'task_type'] + const acceptedBody: string[] = ['inference_config'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -519,14 +378,8 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index ebbdb0fac..1681fe6f3 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -35,18 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - info: { - path: [], - body: [], - query: [] - } -} +interface That { transport: Transport } /** * Get cluster info. Get basic build, version, and cluster information. @@ -56,10 +45,7 @@ export default async function InfoApi (this: That, params?: T.InfoRequest, optio export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.info - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 502b4c0cf..51ad39aff 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -35,142 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Ingest { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'ingest.delete_geoip_database': { - path: [ - 'id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ingest.delete_ip_location_database': { - path: [ - 'id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ingest.delete_pipeline': { - path: [ - 'id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ingest.geo_ip_stats': { - path: [], - body: [], - query: [] - }, - 'ingest.get_geoip_database': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'ingest.get_ip_location_database': { - path: [ - 'id' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'ingest.get_pipeline': { - path: [ - 'id' - ], - body: [], - query: [ - 'master_timeout', - 'summary' - ] - }, - 'ingest.processor_grok': { - path: [], - body: [], - query: [] - }, - 'ingest.put_geoip_database': { - path: [ - 'id' - ], - body: [ - 'name', - 'maxmind' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ingest.put_ip_location_database': { - path: [ - 'id' - ], - body: [ - 'configuration' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ingest.put_pipeline': { - path: [ - 'id' - ], - body: [ - '_meta', - 'description', - 'on_failure', - 'processors', - 'version', - 'deprecated' - ], - query: [ - 'master_timeout', - 'timeout', - 'if_version' - ] - }, - 'ingest.simulate': { - path: [ - 'id' - ], - body: [ - 'docs', - 'pipeline' - ], - query: [ - 'verbose' - ] - } - } } /** @@ -181,10 +51,7 @@ export default class Ingest { async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.delete_geoip_database'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -226,10 +93,7 @@ export default class Ingest { async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.delete_ip_location_database'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -271,10 +135,7 @@ export default class Ingest { async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.delete_pipeline'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -316,10 +177,7 @@ export default class Ingest { async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.geo_ip_stats'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -359,10 +217,7 @@ export default class Ingest { async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.get_geoip_database'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -412,10 +267,7 @@ export default class Ingest { async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.get_ip_location_database'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -465,10 +317,7 @@ export default class Ingest { async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.get_pipeline'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -518,10 +367,7 @@ export default class Ingest { async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ingest.processor_grok'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -561,12 +407,8 @@ export default class Ingest { async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ingest.put_geoip_database'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['name', 'maxmind'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -588,14 +430,8 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -618,12 +454,8 @@ export default class Ingest { async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ingest.put_ip_location_database'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['configuration'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -635,14 +467,8 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -665,12 +491,8 @@ export default class Ingest { async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ingest.put_pipeline'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -692,14 +514,8 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -722,12 +538,8 @@ export default class Ingest { async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ingest.simulate'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['docs', 'pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -749,14 +561,8 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index a24519479..d1a319461 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -35,31 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - knn_search: { - path: [ - 'index' - ], - body: [ - '_source', - 'docvalue_fields', - 'stored_fields', - 'fields', - 'filter', - 'knn' - ], - query: [ - 'routing' - ] - } -} +interface That { transport: Transport } /** * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. @@ -69,12 +45,8 @@ export default async function KnnSearchApi (this: That, par export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.knn_search - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -96,14 +68,8 @@ export default async function KnnSearchApi (this: That, par } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 15655585a..b80733dd9 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -35,77 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class License { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'license.delete': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'license.get': { - path: [], - body: [], - query: [ - 'accept_enterprise', - 'local' - ] - }, - 'license.get_basic_status': { - path: [], - body: [], - query: [] - }, - 'license.get_trial_status': { - path: [], - body: [], - query: [] - }, - 'license.post': { - path: [], - body: [ - 'license', - 'licenses' - ], - query: [ - 'acknowledge', - 'master_timeout', - 'timeout' - ] - }, - 'license.post_start_basic': { - path: [], - body: [], - query: [ - 'acknowledge', - 'master_timeout', - 'timeout' - ] - }, - 'license.post_start_trial': { - path: [], - body: [], - query: [ - 'acknowledge', - 'type_query_string', - 'master_timeout' - ] - } - } } /** @@ -116,10 +51,7 @@ export default class License { async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['license.delete'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -159,10 +91,7 @@ export default class License { async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['license.get'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -202,10 +131,7 @@ export default class License { async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['license.get_basic_status'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,10 +171,7 @@ export default class License { async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['license.get_trial_status'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -288,12 +211,8 @@ export default class License { async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['license.post'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['license', 'licenses'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -316,14 +235,8 @@ export default class License { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -343,10 +256,7 @@ export default class License { async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['license.post_start_basic'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -386,10 +296,7 @@ export default class License { async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['license.post_start_trial'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 3434c0429..df33e03ac 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -35,44 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Logstash { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'logstash.delete_pipeline': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'logstash.get_pipeline': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'logstash.put_pipeline': { - path: [ - 'id' - ], - body: [ - 'pipeline' - ], - query: [] - } - } } /** @@ -83,10 +51,7 @@ export default class Logstash { async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['logstash.delete_pipeline'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -128,10 +93,7 @@ export default class Logstash { async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['logstash.get_pipeline'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -181,12 +143,8 @@ export default class Logstash { async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['logstash.put_pipeline'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -198,14 +156,8 @@ export default class Logstash { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 0a37d3b40..c254d5fd8 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -35,35 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - mget: { - path: [ - 'index' - ], - body: [ - 'docs', - 'ids' - ], - query: [ - 'force_synthetic_source', - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_includes', - 'stored_fields' - ] - } -} +interface That { transport: Transport } /** * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. @@ -73,12 +45,8 @@ export default async function MgetApi (this: That, params?: export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.mget - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['docs', 'ids'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,14 +69,8 @@ export default async function MgetApi (this: That, params?: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 28c0188a7..5ddf19b7d 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -35,36 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class Migration { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'migration.deprecations': { - path: [ - 'index' - ], - body: [], - query: [] - }, - 'migration.get_feature_upgrade_status': { - path: [], - body: [], - query: [] - }, - 'migration.post_feature_upgrade': { - path: [], - body: [], - query: [] - } - } } /** @@ -75,10 +51,7 @@ export default class Migration { async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['migration.deprecations'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -128,10 +101,7 @@ export default class Migration { async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['migration.get_feature_upgrade_status'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,10 +141,7 @@ export default class Migration { async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['migration.post_feature_upgrade'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 677f3e54f..282fc38a5 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -35,958 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Ml { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'ml.clear_trained_model_deployment_cache': { - path: [ - 'model_id' - ], - body: [], - query: [] - }, - 'ml.close_job': { - path: [ - 'job_id' - ], - body: [ - 'allow_no_match', - 'force', - 'timeout' - ], - query: [ - 'allow_no_match', - 'force', - 'timeout' - ] - }, - 'ml.delete_calendar': { - path: [ - 'calendar_id' - ], - body: [], - query: [] - }, - 'ml.delete_calendar_event': { - path: [ - 'calendar_id', - 'event_id' - ], - body: [], - query: [] - }, - 'ml.delete_calendar_job': { - path: [ - 'calendar_id', - 'job_id' - ], - body: [], - query: [] - }, - 'ml.delete_data_frame_analytics': { - path: [ - 'id' - ], - body: [], - query: [ - 'force', - 'timeout' - ] - }, - 'ml.delete_datafeed': { - path: [ - 'datafeed_id' - ], - body: [], - query: [ - 'force' - ] - }, - 'ml.delete_expired_data': { - path: [ - 'job_id' - ], - body: [ - 'requests_per_second', - 'timeout' - ], - query: [ - 'requests_per_second', - 'timeout' - ] - }, - 'ml.delete_filter': { - path: [ - 'filter_id' - ], - body: [], - query: [] - }, - 'ml.delete_forecast': { - path: [ - 'job_id', - 'forecast_id' - ], - body: [], - query: [ - 'allow_no_forecasts', - 'timeout' - ] - }, - 'ml.delete_job': { - path: [ - 'job_id' - ], - body: [], - query: [ - 'force', - 'delete_user_annotations', - 'wait_for_completion' - ] - }, - 'ml.delete_model_snapshot': { - path: [ - 'job_id', - 'snapshot_id' - ], - body: [], - query: [] - }, - 'ml.delete_trained_model': { - path: [ - 'model_id' - ], - body: [], - query: [ - 'force', - 'timeout' - ] - }, - 'ml.delete_trained_model_alias': { - path: [ - 'model_alias', - 'model_id' - ], - body: [], - query: [] - }, - 'ml.estimate_model_memory': { - path: [], - body: [ - 'analysis_config', - 'max_bucket_cardinality', - 'overall_cardinality' - ], - query: [] - }, - 'ml.evaluate_data_frame': { - path: [], - body: [ - 'evaluation', - 'index', - 'query' - ], - query: [] - }, - 'ml.explain_data_frame_analytics': { - path: [ - 'id' - ], - body: [ - 'source', - 'dest', - 'analysis', - 'description', - 'model_memory_limit', - 'max_num_threads', - 'analyzed_fields', - 'allow_lazy_start' - ], - query: [] - }, - 'ml.flush_job': { - path: [ - 'job_id' - ], - body: [ - 'advance_time', - 'calc_interim', - 'end', - 'skip_time', - 'start' - ], - query: [ - 'advance_time', - 'calc_interim', - 'end', - 'skip_time', - 'start' - ] - }, - 'ml.forecast': { - path: [ - 'job_id' - ], - body: [ - 'duration', - 'expires_in', - 'max_model_memory' - ], - query: [ - 'duration', - 'expires_in', - 'max_model_memory' - ] - }, - 'ml.get_buckets': { - path: [ - 'job_id', - 'timestamp' - ], - body: [ - 'anomaly_score', - 'desc', - 'end', - 'exclude_interim', - 'expand', - 'page', - 'sort', - 'start' - ], - query: [ - 'anomaly_score', - 'desc', - 'end', - 'exclude_interim', - 'expand', - 'from', - 'size', - 'sort', - 'start' - ] - }, - 'ml.get_calendar_events': { - path: [ - 'calendar_id' - ], - body: [], - query: [ - 'end', - 'from', - 'job_id', - 'size', - 'start' - ] - }, - 'ml.get_calendars': { - path: [ - 'calendar_id' - ], - body: [ - 'page' - ], - query: [ - 'from', - 'size' - ] - }, - 'ml.get_categories': { - path: [ - 'job_id', - 'category_id' - ], - body: [ - 'page' - ], - query: [ - 'from', - 'partition_field_value', - 'size' - ] - }, - 'ml.get_data_frame_analytics': { - path: [ - 'id' - ], - body: [], - query: [ - 'allow_no_match', - 'from', - 'size', - 'exclude_generated' - ] - }, - 'ml.get_data_frame_analytics_stats': { - path: [ - 'id' - ], - body: [], - query: [ - 'allow_no_match', - 'from', - 'size', - 'verbose' - ] - }, - 'ml.get_datafeed_stats': { - path: [ - 'datafeed_id' - ], - body: [], - query: [ - 'allow_no_match' - ] - }, - 'ml.get_datafeeds': { - path: [ - 'datafeed_id' - ], - body: [], - query: [ - 'allow_no_match', - 'exclude_generated' - ] - }, - 'ml.get_filters': { - path: [ - 'filter_id' - ], - body: [], - query: [ - 'from', - 'size' - ] - }, - 'ml.get_influencers': { - path: [ - 'job_id' - ], - body: [ - 'page' - ], - query: [ - 'desc', - 'end', - 'exclude_interim', - 'influencer_score', - 'from', - 'size', - 'sort', - 'start' - ] - }, - 'ml.get_job_stats': { - path: [ - 'job_id' - ], - body: [], - query: [ - 'allow_no_match' - ] - }, - 'ml.get_jobs': { - path: [ - 'job_id' - ], - body: [], - query: [ - 'allow_no_match', - 'exclude_generated' - ] - }, - 'ml.get_memory_stats': { - path: [ - 'node_id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'ml.get_model_snapshot_upgrade_stats': { - path: [ - 'job_id', - 'snapshot_id' - ], - body: [], - query: [ - 'allow_no_match' - ] - }, - 'ml.get_model_snapshots': { - path: [ - 'job_id', - 'snapshot_id' - ], - body: [ - 'desc', - 'end', - 'page', - 'sort', - 'start' - ], - query: [ - 'desc', - 'end', - 'from', - 'size', - 'sort', - 'start' - ] - }, - 'ml.get_overall_buckets': { - path: [ - 'job_id' - ], - body: [ - 'allow_no_match', - 'bucket_span', - 'end', - 'exclude_interim', - 'overall_score', - 'start', - 'top_n' - ], - query: [ - 'allow_no_match', - 'bucket_span', - 'end', - 'exclude_interim', - 'overall_score', - 'start', - 'top_n' - ] - }, - 'ml.get_records': { - path: [ - 'job_id' - ], - body: [ - 'desc', - 'end', - 'exclude_interim', - 'page', - 'record_score', - 'sort', - 'start' - ], - query: [ - 'desc', - 'end', - 'exclude_interim', - 'from', - 'record_score', - 'size', - 'sort', - 'start' - ] - }, - 'ml.get_trained_models': { - path: [ - 'model_id' - ], - body: [], - query: [ - 'allow_no_match', - 'decompress_definition', - 'exclude_generated', - 'from', - 'include', - 'include_model_definition', - 'size', - 'tags' - ] - }, - 'ml.get_trained_models_stats': { - path: [ - 'model_id' - ], - body: [], - query: [ - 'allow_no_match', - 'from', - 'size' - ] - }, - 'ml.infer_trained_model': { - path: [ - 'model_id' - ], - body: [ - 'docs', - 'inference_config' - ], - query: [ - 'timeout' - ] - }, - 'ml.info': { - path: [], - body: [], - query: [] - }, - 'ml.open_job': { - path: [ - 'job_id' - ], - body: [ - 'timeout' - ], - query: [ - 'timeout' - ] - }, - 'ml.post_calendar_events': { - path: [ - 'calendar_id' - ], - body: [ - 'events' - ], - query: [] - }, - 'ml.post_data': { - path: [ - 'job_id' - ], - body: [ - 'data' - ], - query: [ - 'reset_end', - 'reset_start' - ] - }, - 'ml.preview_data_frame_analytics': { - path: [ - 'id' - ], - body: [ - 'config' - ], - query: [] - }, - 'ml.preview_datafeed': { - path: [ - 'datafeed_id' - ], - body: [ - 'datafeed_config', - 'job_config' - ], - query: [ - 'start', - 'end' - ] - }, - 'ml.put_calendar': { - path: [ - 'calendar_id' - ], - body: [ - 'job_ids', - 'description' - ], - query: [] - }, - 'ml.put_calendar_job': { - path: [ - 'calendar_id', - 'job_id' - ], - body: [], - query: [] - }, - 'ml.put_data_frame_analytics': { - path: [ - 'id' - ], - body: [ - 'allow_lazy_start', - 'analysis', - 'analyzed_fields', - 'description', - 'dest', - 'max_num_threads', - '_meta', - 'model_memory_limit', - 'source', - 'headers', - 'version' - ], - query: [] - }, - 'ml.put_datafeed': { - path: [ - 'datafeed_id' - ], - body: [ - 'aggregations', - 'aggs', - 'chunking_config', - 'delayed_data_check_config', - 'frequency', - 'indices', - 'indexes', - 'indices_options', - 'job_id', - 'max_empty_searches', - 'query', - 'query_delay', - 'runtime_mappings', - 'script_fields', - 'scroll_size', - 'headers' - ], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_throttled', - 'ignore_unavailable' - ] - }, - 'ml.put_filter': { - path: [ - 'filter_id' - ], - body: [ - 'description', - 'items' - ], - query: [] - }, - 'ml.put_job': { - path: [], - body: [ - 'allow_lazy_open', - 'analysis_config', - 'analysis_limits', - 'background_persist_interval', - 'custom_settings', - 'daily_model_snapshot_retention_after_days', - 'data_description', - 'datafeed_config', - 'description', - 'job_id', - 'groups', - 'model_plot_config', - 'model_snapshot_retention_days', - 'renormalization_window_days', - 'results_index_name', - 'results_retention_days' - ], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_throttled', - 'ignore_unavailable' - ] - }, - 'ml.put_trained_model': { - path: [ - 'model_id' - ], - body: [ - 'compressed_definition', - 'definition', - 'description', - 'inference_config', - 'input', - 'metadata', - 'model_type', - 'model_size_bytes', - 'platform_architecture', - 'tags', - 'prefix_strings' - ], - query: [ - 'defer_definition_decompression', - 'wait_for_completion' - ] - }, - 'ml.put_trained_model_alias': { - path: [ - 'model_alias', - 'model_id' - ], - body: [], - query: [ - 'reassign' - ] - }, - 'ml.put_trained_model_definition_part': { - path: [ - 'model_id', - 'part' - ], - body: [ - 'definition', - 'total_definition_length', - 'total_parts' - ], - query: [] - }, - 'ml.put_trained_model_vocabulary': { - path: [ - 'model_id' - ], - body: [ - 'vocabulary', - 'merges', - 'scores' - ], - query: [] - }, - 'ml.reset_job': { - path: [ - 'job_id' - ], - body: [], - query: [ - 'wait_for_completion', - 'delete_user_annotations' - ] - }, - 'ml.revert_model_snapshot': { - path: [ - 'job_id', - 'snapshot_id' - ], - body: [ - 'delete_intervening_results' - ], - query: [ - 'delete_intervening_results' - ] - }, - 'ml.set_upgrade_mode': { - path: [], - body: [], - query: [ - 'enabled', - 'timeout' - ] - }, - 'ml.start_data_frame_analytics': { - path: [ - 'id' - ], - body: [], - query: [ - 'timeout' - ] - }, - 'ml.start_datafeed': { - path: [ - 'datafeed_id' - ], - body: [ - 'end', - 'start', - 'timeout' - ], - query: [ - 'end', - 'start', - 'timeout' - ] - }, - 'ml.start_trained_model_deployment': { - path: [ - 'model_id' - ], - body: [ - 'adaptive_allocations' - ], - query: [ - 'cache_size', - 'deployment_id', - 'number_of_allocations', - 'priority', - 'queue_capacity', - 'threads_per_allocation', - 'timeout', - 'wait_for' - ] - }, - 'ml.stop_data_frame_analytics': { - path: [ - 'id' - ], - body: [], - query: [ - 'allow_no_match', - 'force', - 'timeout' - ] - }, - 'ml.stop_datafeed': { - path: [ - 'datafeed_id' - ], - body: [ - 'allow_no_match', - 'force', - 'timeout' - ], - query: [ - 'allow_no_match', - 'force', - 'timeout' - ] - }, - 'ml.stop_trained_model_deployment': { - path: [ - 'model_id' - ], - body: [], - query: [ - 'allow_no_match', - 'force' - ] - }, - 'ml.update_data_frame_analytics': { - path: [ - 'id' - ], - body: [ - 'description', - 'model_memory_limit', - 'max_num_threads', - 'allow_lazy_start' - ], - query: [] - }, - 'ml.update_datafeed': { - path: [ - 'datafeed_id' - ], - body: [ - 'aggregations', - 'chunking_config', - 'delayed_data_check_config', - 'frequency', - 'indices', - 'indexes', - 'indices_options', - 'job_id', - 'max_empty_searches', - 'query', - 'query_delay', - 'runtime_mappings', - 'script_fields', - 'scroll_size' - ], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_throttled', - 'ignore_unavailable' - ] - }, - 'ml.update_filter': { - path: [ - 'filter_id' - ], - body: [ - 'add_items', - 'description', - 'remove_items' - ], - query: [] - }, - 'ml.update_job': { - path: [ - 'job_id' - ], - body: [ - 'allow_lazy_open', - 'analysis_limits', - 'background_persist_interval', - 'custom_settings', - 'categorization_filters', - 'description', - 'model_plot_config', - 'model_prune_window', - 'daily_model_snapshot_retention_after_days', - 'model_snapshot_retention_days', - 'renormalization_window_days', - 'results_retention_days', - 'groups', - 'detectors', - 'per_partition_categorization' - ], - query: [] - }, - 'ml.update_model_snapshot': { - path: [ - 'job_id', - 'snapshot_id' - ], - body: [ - 'description', - 'retain' - ], - query: [] - }, - 'ml.update_trained_model_deployment': { - path: [ - 'model_id' - ], - body: [ - 'number_of_allocations', - 'adaptive_allocations' - ], - query: [ - 'number_of_allocations' - ] - }, - 'ml.upgrade_job_snapshot': { - path: [ - 'job_id', - 'snapshot_id' - ], - body: [], - query: [ - 'wait_for_completion', - 'timeout' - ] - }, - 'ml.validate': { - path: [], - body: [ - 'job_id', - 'analysis_config', - 'analysis_limits', - 'data_description', - 'description', - 'model_plot', - 'model_snapshot_id', - 'model_snapshot_retention_days', - 'results_index_name' - ], - query: [] - }, - 'ml.validate_detector': { - path: [], - body: [ - 'detector' - ], - query: [] - } - } } /** @@ -997,10 +51,7 @@ export default class Ml { async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.clear_trained_model_deployment_cache'] - + const acceptedPath: string[] = ['model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1042,12 +93,8 @@ export default class Ml { async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.close_job'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1069,14 +116,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1099,10 +140,7 @@ export default class Ml { async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_calendar'] - + const acceptedPath: string[] = ['calendar_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1144,10 +182,7 @@ export default class Ml { async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_calendar_event'] - + const acceptedPath: string[] = ['calendar_id', 'event_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1190,10 +225,7 @@ export default class Ml { async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_calendar_job'] - + const acceptedPath: string[] = ['calendar_id', 'job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1236,10 +268,7 @@ export default class Ml { async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1281,10 +310,7 @@ export default class Ml { async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_datafeed'] - + const acceptedPath: string[] = ['datafeed_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1326,12 +352,8 @@ export default class Ml { async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.delete_expired_data'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['requests_per_second', 'timeout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1354,14 +376,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1391,10 +407,7 @@ export default class Ml { async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_filter'] - + const acceptedPath: string[] = ['filter_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1436,10 +449,7 @@ export default class Ml { async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_forecast'] - + const acceptedPath: string[] = ['job_id', 'forecast_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1489,10 +499,7 @@ export default class Ml { async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_job'] - + const acceptedPath: string[] = ['job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1534,10 +541,7 @@ export default class Ml { async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_model_snapshot'] - + const acceptedPath: string[] = ['job_id', 'snapshot_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1580,10 +584,7 @@ export default class Ml { async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_trained_model'] - + const acceptedPath: string[] = ['model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1625,10 +626,7 @@ export default class Ml { async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.delete_trained_model_alias'] - + const acceptedPath: string[] = ['model_alias', 'model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1671,12 +669,8 @@ export default class Ml { async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.estimate_model_memory'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1699,14 +693,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1726,12 +714,8 @@ export default class Ml { async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.evaluate_data_frame'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['evaluation', 'index', 'query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1753,14 +737,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1780,12 +758,8 @@ export default class Ml { async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.explain_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1808,14 +782,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1845,12 +813,8 @@ export default class Ml { async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.flush_job'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1872,14 +836,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1902,12 +860,8 @@ export default class Ml { async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.forecast'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1929,14 +883,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1959,12 +907,8 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.get_buckets'] - + const acceptedPath: string[] = ['job_id', 'timestamp'] + const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1986,14 +930,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2024,10 +962,7 @@ export default class Ml { async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_calendar_events'] - + const acceptedPath: string[] = ['calendar_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2069,12 +1004,8 @@ export default class Ml { async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.get_calendars'] - + const acceptedPath: string[] = ['calendar_id'] + const acceptedBody: string[] = ['page'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2097,14 +1028,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2134,12 +1059,8 @@ export default class Ml { async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.get_categories'] - + const acceptedPath: string[] = ['job_id', 'category_id'] + const acceptedBody: string[] = ['page'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2161,14 +1082,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2199,10 +1114,7 @@ export default class Ml { async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2252,10 +1164,7 @@ export default class Ml { async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_data_frame_analytics_stats'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2305,10 +1214,7 @@ export default class Ml { async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_datafeed_stats'] - + const acceptedPath: string[] = ['datafeed_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2358,10 +1264,7 @@ export default class Ml { async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_datafeeds'] - + const acceptedPath: string[] = ['datafeed_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2411,10 +1314,7 @@ export default class Ml { async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_filters'] - + const acceptedPath: string[] = ['filter_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2464,12 +1364,8 @@ export default class Ml { async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.get_influencers'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['page'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2491,14 +1387,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2521,10 +1411,7 @@ export default class Ml { async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_job_stats'] - + const acceptedPath: string[] = ['job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2574,10 +1461,7 @@ export default class Ml { async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_jobs'] - + const acceptedPath: string[] = ['job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2627,10 +1511,7 @@ export default class Ml { async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_memory_stats'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2680,10 +1561,7 @@ export default class Ml { async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_model_snapshot_upgrade_stats'] - + const acceptedPath: string[] = ['job_id', 'snapshot_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2726,12 +1604,8 @@ export default class Ml { async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.get_model_snapshots'] - + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2753,14 +1627,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2791,12 +1659,8 @@ export default class Ml { async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.get_overall_buckets'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2818,14 +1682,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2848,12 +1706,8 @@ export default class Ml { async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.get_records'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2875,14 +1729,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2905,10 +1753,7 @@ export default class Ml { async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_trained_models'] - + const acceptedPath: string[] = ['model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2958,10 +1803,7 @@ export default class Ml { async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.get_trained_models_stats'] - + const acceptedPath: string[] = ['model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3011,12 +1853,8 @@ export default class Ml { async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.infer_trained_model'] - + const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['docs', 'inference_config'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3038,14 +1876,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3068,10 +1900,7 @@ export default class Ml { async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.info'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3111,12 +1940,8 @@ export default class Ml { async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.open_job'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['timeout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3138,14 +1963,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3168,12 +1987,8 @@ export default class Ml { async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.post_calendar_events'] - + const acceptedPath: string[] = ['calendar_id'] + const acceptedBody: string[] = ['events'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3195,14 +2010,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3225,12 +2034,8 @@ export default class Ml { async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.post_data'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['data'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3242,14 +2047,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3272,12 +2071,8 @@ export default class Ml { async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.preview_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['config'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3300,14 +2095,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3337,12 +2126,8 @@ export default class Ml { async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.preview_datafeed'] - + const acceptedPath: string[] = ['datafeed_id'] + const acceptedBody: string[] = ['datafeed_config', 'job_config'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3365,14 +2150,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3402,12 +2181,8 @@ export default class Ml { async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_calendar'] - + const acceptedPath: string[] = ['calendar_id'] + const acceptedBody: string[] = ['job_ids', 'description'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3429,14 +2204,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3459,10 +2228,7 @@ export default class Ml { async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.put_calendar_job'] - + const acceptedPath: string[] = ['calendar_id', 'job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3505,12 +2271,8 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3532,14 +2294,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3562,12 +2318,8 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_datafeed'] - + const acceptedPath: string[] = ['datafeed_id'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3589,14 +2341,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3619,12 +2365,8 @@ export default class Ml { async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_filter'] - + const acceptedPath: string[] = ['filter_id'] + const acceptedBody: string[] = ['description', 'items'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3646,14 +2388,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3676,12 +2412,8 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_job'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3703,14 +2435,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3733,12 +2459,8 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_trained_model'] - + const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3760,14 +2482,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3790,10 +2506,7 @@ export default class Ml { async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.put_trained_model_alias'] - + const acceptedPath: string[] = ['model_alias', 'model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3836,12 +2549,8 @@ export default class Ml { async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_trained_model_definition_part'] - + const acceptedPath: string[] = ['model_id', 'part'] + const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3863,14 +2572,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3894,12 +2597,8 @@ export default class Ml { async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.put_trained_model_vocabulary'] - + const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3921,14 +2620,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3951,10 +2644,7 @@ export default class Ml { async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.reset_job'] - + const acceptedPath: string[] = ['job_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3996,12 +2686,8 @@ export default class Ml { async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.revert_model_snapshot'] - + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedBody: string[] = ['delete_intervening_results'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4023,14 +2709,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4054,10 +2734,7 @@ export default class Ml { async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.set_upgrade_mode'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4097,10 +2774,7 @@ export default class Ml { async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.start_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4142,12 +2816,8 @@ export default class Ml { async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.start_datafeed'] - + const acceptedPath: string[] = ['datafeed_id'] + const acceptedBody: string[] = ['end', 'start', 'timeout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4169,14 +2839,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4199,12 +2863,8 @@ export default class Ml { async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.start_trained_model_deployment'] - + const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['adaptive_allocations'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4226,14 +2886,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4256,10 +2910,7 @@ export default class Ml { async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.stop_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4301,12 +2952,8 @@ export default class Ml { async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.stop_datafeed'] - + const acceptedPath: string[] = ['datafeed_id'] + const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4328,14 +2975,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4358,10 +2999,7 @@ export default class Ml { async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.stop_trained_model_deployment'] - + const acceptedPath: string[] = ['model_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4403,12 +3041,8 @@ export default class Ml { async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.update_data_frame_analytics'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4430,14 +3064,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4460,12 +3088,8 @@ export default class Ml { async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.update_datafeed'] - + const acceptedPath: string[] = ['datafeed_id'] + const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4487,14 +3111,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4517,12 +3135,8 @@ export default class Ml { async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.update_filter'] - + const acceptedPath: string[] = ['filter_id'] + const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4544,14 +3158,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4574,12 +3182,8 @@ export default class Ml { async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.update_job'] - + const acceptedPath: string[] = ['job_id'] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4601,14 +3205,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4631,12 +3229,8 @@ export default class Ml { async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.update_model_snapshot'] - + const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const acceptedBody: string[] = ['description', 'retain'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4658,14 +3252,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4689,12 +3277,8 @@ export default class Ml { async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.update_trained_model_deployment'] - + const acceptedPath: string[] = ['model_id'] + const acceptedBody: string[] = ['number_of_allocations', 'adaptive_allocations'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4716,14 +3300,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4746,10 +3324,7 @@ export default class Ml { async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ml.upgrade_job_snapshot'] - + const acceptedPath: string[] = ['job_id', 'snapshot_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4792,12 +3367,8 @@ export default class Ml { async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.validate'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4820,14 +3391,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -4847,12 +3412,8 @@ export default class Ml { async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['ml.validate_detector'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['detector'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4864,14 +3425,8 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 58bde33fa..053fea53a 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -35,34 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Monitoring { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'monitoring.bulk': { - path: [ - 'type' - ], - body: [ - 'operations' - ], - query: [ - 'system_id', - 'system_api_version', - 'interval' - ] - } - } } /** @@ -73,12 +51,8 @@ export default class Monitoring { async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['monitoring.bulk'] - + const acceptedPath: string[] = ['type'] + const acceptedBody: string[] = ['operations'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -90,14 +64,8 @@ export default class Monitoring { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index a70ea2055..573c4f385 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -35,38 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - msearch: { - path: [ - 'index' - ], - body: [ - 'searches' - ], - query: [ - 'allow_no_indices', - 'ccs_minimize_roundtrips', - 'expand_wildcards', - 'ignore_throttled', - 'ignore_unavailable', - 'include_named_queries_score', - 'max_concurrent_searches', - 'max_concurrent_shard_requests', - 'pre_filter_shard_size', - 'rest_total_hits_as_int', - 'routing', - 'search_type', - 'typed_keys' - ] - } -} +interface That { transport: Transport } /** * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. @@ -76,12 +45,8 @@ export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.msearch - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['searches'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,14 +58,8 @@ export default async function MsearchApi = { - msearch_template: { - path: [ - 'index' - ], - body: [ - 'search_templates' - ], - query: [ - 'ccs_minimize_roundtrips', - 'max_concurrent_searches', - 'search_type', - 'rest_total_hits_as_int', - 'typed_keys' - ] - } -} +interface That { transport: Transport } /** * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` @@ -68,12 +45,8 @@ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.msearch_template - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['search_templates'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -85,14 +58,8 @@ export default async function MsearchTemplateApi = { - mtermvectors: { - path: [ - 'index' - ], - body: [ - 'docs', - 'ids' - ], - query: [ - 'ids', - 'fields', - 'field_statistics', - 'offsets', - 'payloads', - 'positions', - 'preference', - 'realtime', - 'routing', - 'term_statistics', - 'version', - 'version_type' - ] - } -} +interface That { transport: Transport } /** * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. @@ -76,12 +45,8 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.mtermvectors - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['docs', 'ids'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -104,14 +69,8 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 8980be517..1ce489ae0 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -35,102 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Nodes { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'nodes.clear_repositories_metering_archive': { - path: [ - 'node_id', - 'max_archive_version' - ], - body: [], - query: [] - }, - 'nodes.get_repositories_metering_info': { - path: [ - 'node_id' - ], - body: [], - query: [] - }, - 'nodes.hot_threads': { - path: [ - 'node_id' - ], - body: [], - query: [ - 'ignore_idle_threads', - 'interval', - 'snapshots', - 'threads', - 'timeout', - 'type', - 'sort' - ] - }, - 'nodes.info': { - path: [ - 'node_id', - 'metric' - ], - body: [], - query: [ - 'flat_settings', - 'timeout' - ] - }, - 'nodes.reload_secure_settings': { - path: [ - 'node_id' - ], - body: [ - 'secure_settings_password' - ], - query: [ - 'timeout' - ] - }, - 'nodes.stats': { - path: [ - 'node_id', - 'metric', - 'index_metric' - ], - body: [], - query: [ - 'completion_fields', - 'fielddata_fields', - 'fields', - 'groups', - 'include_segment_file_sizes', - 'level', - 'timeout', - 'types', - 'include_unloaded_segments' - ] - }, - 'nodes.usage': { - path: [ - 'node_id', - 'metric' - ], - body: [], - query: [ - 'timeout' - ] - } - } } /** @@ -141,10 +51,7 @@ export default class Nodes { async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['nodes.clear_repositories_metering_archive'] - + const acceptedPath: string[] = ['node_id', 'max_archive_version'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -187,10 +94,7 @@ export default class Nodes { async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['nodes.get_repositories_metering_info'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -232,10 +136,7 @@ export default class Nodes { async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['nodes.hot_threads'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -285,10 +186,7 @@ export default class Nodes { async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['nodes.info'] - + const acceptedPath: string[] = ['node_id', 'metric'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -345,12 +243,8 @@ export default class Nodes { async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['nodes.reload_secure_settings'] - + const acceptedPath: string[] = ['node_id'] + const acceptedBody: string[] = ['secure_settings_password'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -373,14 +267,8 @@ export default class Nodes { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -410,10 +298,7 @@ export default class Nodes { async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['nodes.stats'] - + const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -477,10 +362,7 @@ export default class Nodes { async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['nodes.usage'] - + const acceptedPath: string[] = ['node_id', 'metric'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 1ff65e50e..4cd2a733e 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -35,31 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - open_point_in_time: { - path: [ - 'index' - ], - body: [ - 'index_filter' - ], - query: [ - 'keep_alive', - 'ignore_unavailable', - 'preference', - 'routing', - 'expand_wildcards', - 'allow_partial_search_results' - ] - } -} +interface That { transport: Transport } /** * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. @@ -69,12 +45,8 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.open_point_in_time - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['index_filter'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -96,14 +68,8 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 81c05df92..908709afd 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -35,18 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - ping: { - path: [], - body: [], - query: [] - } -} +interface That { transport: Transport } /** * Ping the cluster. Get information about whether the cluster is running. @@ -56,10 +45,7 @@ export default async function PingApi (this: That, params?: T.PingRequest, optio export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.ping - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index 631c1df17..75f2d46cc 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -35,39 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class Profiling { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'profiling.flamegraph': { - path: [], - body: [], - query: [] - }, - 'profiling.stacktraces': { - path: [], - body: [], - query: [] - }, - 'profiling.status': { - path: [], - body: [], - query: [] - }, - 'profiling.topn_functions': { - path: [], - body: [], - query: [] - } - } } /** @@ -78,10 +51,7 @@ export default class Profiling { async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['profiling.flamegraph'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -120,10 +90,7 @@ export default class Profiling { async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['profiling.stacktraces'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -162,10 +129,7 @@ export default class Profiling { async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['profiling.status'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -204,10 +168,7 @@ export default class Profiling { async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['profiling.topn_functions'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index c412b1faf..d3350ca5b 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -35,29 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - put_script: { - path: [ - 'id', - 'context' - ], - body: [ - 'script' - ], - query: [ - 'context', - 'master_timeout', - 'timeout' - ] - } -} +interface That { transport: Transport } /** * Create or update a script or search template. Creates or updates a stored script or search template. @@ -67,12 +45,8 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.put_script - + const acceptedPath: string[] = ['id', 'context'] + const acceptedBody: string[] = ['script'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -94,14 +68,8 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index 3fa80dccd..bb7a964ee 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -35,90 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class QueryRules { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'query_rules.delete_rule': { - path: [ - 'ruleset_id', - 'rule_id' - ], - body: [], - query: [] - }, - 'query_rules.delete_ruleset': { - path: [ - 'ruleset_id' - ], - body: [], - query: [] - }, - 'query_rules.get_rule': { - path: [ - 'ruleset_id', - 'rule_id' - ], - body: [], - query: [] - }, - 'query_rules.get_ruleset': { - path: [ - 'ruleset_id' - ], - body: [], - query: [] - }, - 'query_rules.list_rulesets': { - path: [], - body: [], - query: [ - 'from', - 'size' - ] - }, - 'query_rules.put_rule': { - path: [ - 'ruleset_id', - 'rule_id' - ], - body: [ - 'type', - 'criteria', - 'actions', - 'priority' - ], - query: [] - }, - 'query_rules.put_ruleset': { - path: [ - 'ruleset_id' - ], - body: [ - 'rules' - ], - query: [] - }, - 'query_rules.test': { - path: [ - 'ruleset_id' - ], - body: [ - 'match_criteria' - ], - query: [] - } - } } /** @@ -129,10 +51,7 @@ export default class QueryRules { async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['query_rules.delete_rule'] - + const acceptedPath: string[] = ['ruleset_id', 'rule_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,10 +94,7 @@ export default class QueryRules { async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['query_rules.delete_ruleset'] - + const acceptedPath: string[] = ['ruleset_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -220,10 +136,7 @@ export default class QueryRules { async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['query_rules.get_rule'] - + const acceptedPath: string[] = ['ruleset_id', 'rule_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -266,10 +179,7 @@ export default class QueryRules { async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['query_rules.get_ruleset'] - + const acceptedPath: string[] = ['ruleset_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -311,10 +221,7 @@ export default class QueryRules { async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['query_rules.list_rulesets'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -354,12 +261,8 @@ export default class QueryRules { async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['query_rules.put_rule'] - + const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -381,14 +284,8 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -412,12 +309,8 @@ export default class QueryRules { async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['query_rules.put_ruleset'] - + const acceptedPath: string[] = ['ruleset_id'] + const acceptedBody: string[] = ['rules'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -439,14 +332,8 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -469,12 +356,8 @@ export default class QueryRules { async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['query_rules.test'] - + const acceptedPath: string[] = ['ruleset_id'] + const acceptedBody: string[] = ['match_criteria'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -496,14 +379,8 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index cd9207896..bd3af65e5 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -35,30 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - rank_eval: { - path: [ - 'index' - ], - body: [ - 'requests', - 'metric' - ], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'search_type' - ] - } -} +interface That { transport: Transport } /** * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. @@ -68,12 +45,8 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.rank_eval - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['requests', 'metric'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,14 +68,8 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 2fe9d235d..5c83f147b 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -35,36 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - reindex: { - path: [], - body: [ - 'conflicts', - 'dest', - 'max_docs', - 'script', - 'size', - 'source' - ], - query: [ - 'refresh', - 'requests_per_second', - 'scroll', - 'slices', - 'timeout', - 'wait_for_active_shards', - 'wait_for_completion', - 'require_alias' - ] - } -} +interface That { transport: Transport } /** * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. NOTE: The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. @@ -74,12 +45,8 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.reindex - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,14 +68,8 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 13a52ff25..d32f80c01 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -35,22 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - reindex_rethrottle: { - path: [ - 'task_id' - ], - body: [], - query: [ - 'requests_per_second' - ] - } -} +interface That { transport: Transport } /** * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. @@ -60,10 +45,7 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.reindex_rethrottle - + const acceptedPath: string[] = ['task_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 40af73935..57b5377c6 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -35,25 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - render_search_template: { - path: [], - body: [ - 'id', - 'file', - 'params', - 'source' - ], - query: [] - } -} +interface That { transport: Transport } /** * Render a search template. Render a search template as a search request body. @@ -63,12 +45,8 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.render_search_template - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id', 'file', 'params', 'source'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,14 +69,8 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 753acf55a..b45043728 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -35,97 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Rollup { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'rollup.delete_job': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'rollup.get_jobs': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'rollup.get_rollup_caps': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'rollup.get_rollup_index_caps': { - path: [ - 'index' - ], - body: [], - query: [] - }, - 'rollup.put_job': { - path: [ - 'id' - ], - body: [ - 'cron', - 'groups', - 'index_pattern', - 'metrics', - 'page_size', - 'rollup_index', - 'timeout', - 'headers' - ], - query: [] - }, - 'rollup.rollup_search': { - path: [ - 'index' - ], - body: [ - 'aggregations', - 'aggs', - 'query', - 'size' - ], - query: [ - 'rest_total_hits_as_int', - 'typed_keys' - ] - }, - 'rollup.start_job': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'rollup.stop_job': { - path: [ - 'id' - ], - body: [], - query: [ - 'timeout', - 'wait_for_completion' - ] - } - } } /** @@ -136,10 +51,7 @@ export default class Rollup { async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['rollup.delete_job'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -181,10 +93,7 @@ export default class Rollup { async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['rollup.get_jobs'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -234,10 +143,7 @@ export default class Rollup { async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['rollup.get_rollup_caps'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -287,10 +193,7 @@ export default class Rollup { async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['rollup.get_rollup_index_caps'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -332,12 +235,8 @@ export default class Rollup { async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['rollup.put_job'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -359,14 +258,8 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -389,12 +282,8 @@ export default class Rollup { async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['rollup.rollup_search'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -416,14 +305,8 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -446,10 +329,7 @@ export default class Rollup { async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['rollup.start_job'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -491,10 +371,7 @@ export default class Rollup { async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['rollup.stop_job'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index 35fcd6225..bbafbeff1 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -35,24 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - scripts_painless_execute: { - path: [], - body: [ - 'context', - 'context_setup', - 'script' - ], - query: [] - } -} +interface That { transport: Transport } /** * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. @@ -62,12 +45,8 @@ export default async function ScriptsPainlessExecuteApi (this export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.scripts_painless_execute - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['context', 'context_setup', 'script'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -90,14 +69,8 @@ export default async function ScriptsPainlessExecuteApi (this } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 184e45b2a..5bd03110b 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -35,27 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - scroll: { - path: [], - body: [ - 'scroll', - 'scroll_id' - ], - query: [ - 'scroll', - 'scroll_id', - 'rest_total_hits_as_int' - ] - } -} +interface That { transport: Transport } /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. @@ -65,12 +45,8 @@ export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.scroll - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['scroll', 'scroll_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -92,14 +68,8 @@ export default async function ScrollApi = { - search: { - path: [ - 'index' - ], - body: [ - 'aggregations', - 'aggs', - 'collapse', - 'explain', - 'ext', - 'from', - 'highlight', - 'track_total_hits', - 'indices_boost', - 'docvalue_fields', - 'knn', - 'rank', - 'min_score', - 'post_filter', - 'profile', - 'query', - 'rescore', - 'retriever', - 'script_fields', - 'search_after', - 'size', - 'slice', - 'sort', - '_source', - 'fields', - 'suggest', - 'terminate_after', - 'timeout', - 'track_scores', - 'version', - 'seq_no_primary_term', - 'stored_fields', - 'pit', - 'runtime_mappings', - 'stats' - ], - query: [ - 'allow_no_indices', - 'allow_partial_search_results', - 'analyzer', - 'analyze_wildcard', - 'batched_reduce_size', - 'ccs_minimize_roundtrips', - 'default_operator', - 'df', - 'docvalue_fields', - 'expand_wildcards', - 'explain', - 'ignore_throttled', - 'ignore_unavailable', - 'include_named_queries_score', - 'lenient', - 'max_concurrent_shard_requests', - 'preference', - 'pre_filter_shard_size', - 'request_cache', - 'routing', - 'scroll', - 'search_type', - 'stats', - 'stored_fields', - 'suggest_field', - 'suggest_mode', - 'suggest_size', - 'suggest_text', - 'terminate_after', - 'timeout', - 'track_total_hits', - 'track_scores', - 'typed_keys', - 'rest_total_hits_as_int', - 'version', - '_source', - '_source_excludes', - '_source_includes', - 'seq_no_primary_term', - 'q', - 'size', - 'from', - 'sort', - 'force_synthetic_source' - ] - } -} +interface That { transport: Transport } /** * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. @@ -141,12 +45,8 @@ export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.search - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -173,14 +73,8 @@ export default async function SearchApi -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class SearchApplication { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'search_application.delete': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'search_application.delete_behavioral_analytics': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'search_application.get': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'search_application.get_behavioral_analytics': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'search_application.list': { - path: [], - body: [], - query: [ - 'q', - 'from', - 'size' - ] - }, - 'search_application.post_behavioral_analytics_event': { - path: [ - 'collection_name', - 'event_type' - ], - body: [ - 'payload' - ], - query: [ - 'debug' - ] - }, - 'search_application.put': { - path: [ - 'name' - ], - body: [ - 'search_application' - ], - query: [ - 'create' - ] - }, - 'search_application.put_behavioral_analytics': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'search_application.render_query': { - path: [ - 'name' - ], - body: [ - 'params' - ], - query: [] - }, - 'search_application.search': { - path: [ - 'name' - ], - body: [ - 'params' - ], - query: [ - 'typed_keys' - ] - } - } } /** @@ -147,10 +51,7 @@ export default class SearchApplication { async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['search_application.delete'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -192,10 +93,7 @@ export default class SearchApplication { async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['search_application.delete_behavioral_analytics'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -237,10 +135,7 @@ export default class SearchApplication { async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['search_application.get'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -282,10 +177,7 @@ export default class SearchApplication { async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['search_application.get_behavioral_analytics'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -335,10 +227,7 @@ export default class SearchApplication { async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['search_application.list'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -378,12 +267,8 @@ export default class SearchApplication { async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['search_application.post_behavioral_analytics_event'] - + const acceptedPath: string[] = ['collection_name', 'event_type'] + const acceptedBody: string[] = ['payload'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -395,14 +280,8 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -426,12 +305,8 @@ export default class SearchApplication { async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['search_application.put'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['search_application'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -443,14 +318,8 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -473,10 +342,7 @@ export default class SearchApplication { async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['search_application.put_behavioral_analytics'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -518,12 +384,8 @@ export default class SearchApplication { async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['search_application.render_query'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['params'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -545,14 +407,8 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -575,12 +431,8 @@ export default class SearchApplication { async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['search_application.search'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['params'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -602,14 +454,8 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 34695497b..c9384a91e 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -35,49 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - search_mvt: { - path: [ - 'index', - 'field', - 'zoom', - 'x', - 'y' - ], - body: [ - 'aggs', - 'buffer', - 'exact_bounds', - 'extent', - 'fields', - 'grid_agg', - 'grid_precision', - 'grid_type', - 'query', - 'runtime_mappings', - 'size', - 'sort', - 'track_total_hits', - 'with_labels' - ], - query: [ - 'exact_bounds', - 'extent', - 'grid_agg', - 'grid_precision', - 'grid_type', - 'size', - 'with_labels' - ] - } -} +interface That { transport: Transport } /** * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. @@ -87,12 +45,8 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.search_mvt - + const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] + const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -114,14 +68,8 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index 87a8ba52e..f2fff30a5 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -35,28 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - search_shards: { - path: [ - 'index' - ], - body: [], - query: [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'local', - 'master_timeout', - 'preference', - 'routing' - ] - } -} +interface That { transport: Transport } /** * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. @@ -66,10 +45,7 @@ export default async function SearchShardsApi (this: That, params?: T.SearchShar export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.search_shards - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index 0a47b171c..f63c77a45 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -35,42 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - search_template: { - path: [ - 'index' - ], - body: [ - 'explain', - 'id', - 'params', - 'profile', - 'source' - ], - query: [ - 'allow_no_indices', - 'ccs_minimize_roundtrips', - 'expand_wildcards', - 'explain', - 'ignore_throttled', - 'ignore_unavailable', - 'preference', - 'profile', - 'routing', - 'scroll', - 'search_type', - 'rest_total_hits_as_int', - 'typed_keys' - ] - } -} +interface That { transport: Transport } /** * Run a search with a search template. @@ -80,12 +45,8 @@ export default async function SearchTemplateApi (this: That export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.search_template - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -108,14 +69,8 @@ export default async function SearchTemplateApi (this: That } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 2d5f792ae..4c8af1dda 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -35,67 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class SearchableSnapshots { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'searchable_snapshots.cache_stats': { - path: [ - 'node_id' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'searchable_snapshots.clear_cache': { - path: [ - 'index' - ], - body: [], - query: [ - 'expand_wildcards', - 'allow_no_indices', - 'ignore_unavailable' - ] - }, - 'searchable_snapshots.mount': { - path: [ - 'repository', - 'snapshot' - ], - body: [ - 'index', - 'renamed_index', - 'index_settings', - 'ignore_index_settings' - ], - query: [ - 'master_timeout', - 'wait_for_completion', - 'storage' - ] - }, - 'searchable_snapshots.stats': { - path: [ - 'index' - ], - body: [], - query: [ - 'level' - ] - } - } } /** @@ -106,10 +51,7 @@ export default class SearchableSnapshots { async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['searchable_snapshots.cache_stats'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -159,10 +101,7 @@ export default class SearchableSnapshots { async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['searchable_snapshots.clear_cache'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -212,12 +151,8 @@ export default class SearchableSnapshots { async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['searchable_snapshots.mount'] - + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -239,14 +174,8 @@ export default class SearchableSnapshots { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -270,10 +199,7 @@ export default class SearchableSnapshots { async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['searchable_snapshots.stats'] - + const acceptedPath: string[] = ['index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 0f3021cc3..3484f5933 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -35,648 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Security { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'security.activate_user_profile': { - path: [], - body: [ - 'access_token', - 'grant_type', - 'password', - 'username' - ], - query: [] - }, - 'security.authenticate': { - path: [], - body: [], - query: [] - }, - 'security.bulk_delete_role': { - path: [], - body: [ - 'names' - ], - query: [ - 'refresh' - ] - }, - 'security.bulk_put_role': { - path: [], - body: [ - 'roles' - ], - query: [ - 'refresh' - ] - }, - 'security.bulk_update_api_keys': { - path: [], - body: [ - 'expiration', - 'ids', - 'metadata', - 'role_descriptors' - ], - query: [] - }, - 'security.change_password': { - path: [ - 'username' - ], - body: [ - 'password', - 'password_hash' - ], - query: [ - 'refresh' - ] - }, - 'security.clear_api_key_cache': { - path: [ - 'ids' - ], - body: [], - query: [] - }, - 'security.clear_cached_privileges': { - path: [ - 'application' - ], - body: [], - query: [] - }, - 'security.clear_cached_realms': { - path: [ - 'realms' - ], - body: [], - query: [ - 'usernames' - ] - }, - 'security.clear_cached_roles': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'security.clear_cached_service_tokens': { - path: [ - 'namespace', - 'service', - 'name' - ], - body: [], - query: [] - }, - 'security.create_api_key': { - path: [], - body: [ - 'expiration', - 'name', - 'role_descriptors', - 'metadata' - ], - query: [ - 'refresh' - ] - }, - 'security.create_cross_cluster_api_key': { - path: [], - body: [ - 'access', - 'expiration', - 'metadata', - 'name' - ], - query: [] - }, - 'security.create_service_token': { - path: [ - 'namespace', - 'service', - 'name' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.delegate_pki': { - path: [], - body: [ - 'x509_certificate_chain' - ], - query: [] - }, - 'security.delete_privileges': { - path: [ - 'application', - 'name' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.delete_role': { - path: [ - 'name' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.delete_role_mapping': { - path: [ - 'name' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.delete_service_token': { - path: [ - 'namespace', - 'service', - 'name' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.delete_user': { - path: [ - 'username' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.disable_user': { - path: [ - 'username' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.disable_user_profile': { - path: [ - 'uid' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.enable_user': { - path: [ - 'username' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.enable_user_profile': { - path: [ - 'uid' - ], - body: [], - query: [ - 'refresh' - ] - }, - 'security.enroll_kibana': { - path: [], - body: [], - query: [] - }, - 'security.enroll_node': { - path: [], - body: [], - query: [] - }, - 'security.get_api_key': { - path: [], - body: [], - query: [ - 'id', - 'name', - 'owner', - 'realm_name', - 'username', - 'with_limited_by', - 'active_only', - 'with_profile_uid' - ] - }, - 'security.get_builtin_privileges': { - path: [], - body: [], - query: [] - }, - 'security.get_privileges': { - path: [ - 'application', - 'name' - ], - body: [], - query: [] - }, - 'security.get_role': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'security.get_role_mapping': { - path: [ - 'name' - ], - body: [], - query: [] - }, - 'security.get_service_accounts': { - path: [ - 'namespace', - 'service' - ], - body: [], - query: [] - }, - 'security.get_service_credentials': { - path: [ - 'namespace', - 'service' - ], - body: [], - query: [] - }, - 'security.get_settings': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - }, - 'security.get_token': { - path: [], - body: [ - 'grant_type', - 'scope', - 'password', - 'kerberos_ticket', - 'refresh_token', - 'username' - ], - query: [] - }, - 'security.get_user': { - path: [ - 'username' - ], - body: [], - query: [ - 'with_profile_uid' - ] - }, - 'security.get_user_privileges': { - path: [], - body: [], - query: [ - 'application', - 'priviledge', - 'username' - ] - }, - 'security.get_user_profile': { - path: [ - 'uid' - ], - body: [], - query: [ - 'data' - ] - }, - 'security.grant_api_key': { - path: [], - body: [ - 'api_key', - 'grant_type', - 'access_token', - 'username', - 'password', - 'run_as' - ], - query: [] - }, - 'security.has_privileges': { - path: [ - 'user' - ], - body: [ - 'application', - 'cluster', - 'index' - ], - query: [] - }, - 'security.has_privileges_user_profile': { - path: [], - body: [ - 'uids', - 'privileges' - ], - query: [] - }, - 'security.invalidate_api_key': { - path: [], - body: [ - 'id', - 'ids', - 'name', - 'owner', - 'realm_name', - 'username' - ], - query: [] - }, - 'security.invalidate_token': { - path: [], - body: [ - 'token', - 'refresh_token', - 'realm_name', - 'username' - ], - query: [] - }, - 'security.oidc_authenticate': { - path: [], - body: [ - 'nonce', - 'realm', - 'redirect_uri', - 'state' - ], - query: [] - }, - 'security.oidc_logout': { - path: [], - body: [ - 'access_token', - 'refresh_token' - ], - query: [] - }, - 'security.oidc_prepare_authentication': { - path: [], - body: [ - 'iss', - 'login_hint', - 'nonce', - 'realm', - 'state' - ], - query: [] - }, - 'security.put_privileges': { - path: [], - body: [ - 'privileges' - ], - query: [ - 'refresh' - ] - }, - 'security.put_role': { - path: [ - 'name' - ], - body: [ - 'applications', - 'cluster', - 'global', - 'indices', - 'remote_indices', - 'remote_cluster', - 'metadata', - 'run_as', - 'description', - 'transient_metadata' - ], - query: [ - 'refresh' - ] - }, - 'security.put_role_mapping': { - path: [ - 'name' - ], - body: [ - 'enabled', - 'metadata', - 'roles', - 'role_templates', - 'rules', - 'run_as' - ], - query: [ - 'refresh' - ] - }, - 'security.put_user': { - path: [], - body: [ - 'username', - 'email', - 'full_name', - 'metadata', - 'password', - 'password_hash', - 'roles', - 'enabled' - ], - query: [ - 'refresh' - ] - }, - 'security.query_api_keys': { - path: [], - body: [ - 'aggregations', - 'aggs', - 'query', - 'from', - 'sort', - 'size', - 'search_after' - ], - query: [ - 'with_limited_by', - 'with_profile_uid', - 'typed_keys' - ] - }, - 'security.query_role': { - path: [], - body: [ - 'query', - 'from', - 'sort', - 'size', - 'search_after' - ], - query: [] - }, - 'security.query_user': { - path: [], - body: [ - 'query', - 'from', - 'sort', - 'size', - 'search_after' - ], - query: [ - 'with_profile_uid' - ] - }, - 'security.saml_authenticate': { - path: [], - body: [ - 'content', - 'ids', - 'realm' - ], - query: [] - }, - 'security.saml_complete_logout': { - path: [], - body: [ - 'realm', - 'ids', - 'query_string', - 'content' - ], - query: [] - }, - 'security.saml_invalidate': { - path: [], - body: [ - 'acs', - 'query_string', - 'realm' - ], - query: [] - }, - 'security.saml_logout': { - path: [], - body: [ - 'token', - 'refresh_token' - ], - query: [] - }, - 'security.saml_prepare_authentication': { - path: [], - body: [ - 'acs', - 'realm', - 'relay_state' - ], - query: [] - }, - 'security.saml_service_provider_metadata': { - path: [ - 'realm_name' - ], - body: [], - query: [] - }, - 'security.suggest_user_profiles': { - path: [], - body: [ - 'name', - 'size', - 'data', - 'hint' - ], - query: [ - 'data' - ] - }, - 'security.update_api_key': { - path: [ - 'id' - ], - body: [ - 'role_descriptors', - 'metadata', - 'expiration' - ], - query: [] - }, - 'security.update_cross_cluster_api_key': { - path: [ - 'id' - ], - body: [ - 'access', - 'expiration', - 'metadata' - ], - query: [] - }, - 'security.update_settings': { - path: [], - body: [ - 'security', - 'security-profile', - 'security-tokens' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'security.update_user_profile_data': { - path: [ - 'uid' - ], - body: [ - 'labels', - 'data' - ], - query: [ - 'if_seq_no', - 'if_primary_term', - 'refresh' - ] - } - } } /** @@ -687,12 +51,8 @@ export default class Security { async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.activate_user_profile'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -714,14 +74,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -741,10 +95,7 @@ export default class Security { async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.authenticate'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -784,12 +135,8 @@ export default class Security { async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.bulk_delete_role'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['names'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -811,14 +158,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -838,12 +179,8 @@ export default class Security { async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.bulk_put_role'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['roles'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -865,14 +202,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -892,12 +223,8 @@ export default class Security { async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.bulk_update_api_keys'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -919,14 +246,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -946,12 +267,8 @@ export default class Security { async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.change_password'] - + const acceptedPath: string[] = ['username'] + const acceptedBody: string[] = ['password', 'password_hash'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -974,14 +291,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1011,10 +322,7 @@ export default class Security { async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.clear_api_key_cache'] - + const acceptedPath: string[] = ['ids'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1056,10 +364,7 @@ export default class Security { async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.clear_cached_privileges'] - + const acceptedPath: string[] = ['application'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1101,10 +406,7 @@ export default class Security { async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.clear_cached_realms'] - + const acceptedPath: string[] = ['realms'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1146,10 +448,7 @@ export default class Security { async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.clear_cached_roles'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1191,10 +490,7 @@ export default class Security { async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.clear_cached_service_tokens'] - + const acceptedPath: string[] = ['namespace', 'service', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1238,12 +534,8 @@ export default class Security { async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.create_api_key'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1266,14 +558,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1293,12 +579,8 @@ export default class Security { async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.create_cross_cluster_api_key'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1320,14 +602,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1347,10 +623,7 @@ export default class Security { async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.create_service_token'] - + const acceptedPath: string[] = ['namespace', 'service', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1401,12 +674,8 @@ export default class Security { async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.delegate_pki'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['x509_certificate_chain'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1428,14 +697,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -1455,10 +718,7 @@ export default class Security { async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.delete_privileges'] - + const acceptedPath: string[] = ['application', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1501,10 +761,7 @@ export default class Security { async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.delete_role'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1546,10 +803,7 @@ export default class Security { async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.delete_role_mapping'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1591,10 +845,7 @@ export default class Security { async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.delete_service_token'] - + const acceptedPath: string[] = ['namespace', 'service', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1638,10 +889,7 @@ export default class Security { async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.delete_user'] - + const acceptedPath: string[] = ['username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1683,10 +931,7 @@ export default class Security { async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.disable_user'] - + const acceptedPath: string[] = ['username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1728,10 +973,7 @@ export default class Security { async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.disable_user_profile'] - + const acceptedPath: string[] = ['uid'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1773,10 +1015,7 @@ export default class Security { async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.enable_user'] - + const acceptedPath: string[] = ['username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1818,10 +1057,7 @@ export default class Security { async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.enable_user_profile'] - + const acceptedPath: string[] = ['uid'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1863,10 +1099,7 @@ export default class Security { async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.enroll_kibana'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1906,10 +1139,7 @@ export default class Security { async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.enroll_node'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1949,10 +1179,7 @@ export default class Security { async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_api_key'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1992,10 +1219,7 @@ export default class Security { async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_builtin_privileges'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2035,10 +1259,7 @@ export default class Security { async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_privileges'] - + const acceptedPath: string[] = ['application', 'name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2092,10 +1313,7 @@ export default class Security { async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_role'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2145,10 +1363,7 @@ export default class Security { async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_role_mapping'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2198,10 +1413,7 @@ export default class Security { async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_service_accounts'] - + const acceptedPath: string[] = ['namespace', 'service'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2255,10 +1467,7 @@ export default class Security { async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_service_credentials'] - + const acceptedPath: string[] = ['namespace', 'service'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2301,10 +1510,7 @@ export default class Security { async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_settings'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2344,12 +1550,8 @@ export default class Security { async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.get_token'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2372,14 +1574,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2399,10 +1595,7 @@ export default class Security { async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_user'] - + const acceptedPath: string[] = ['username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2452,10 +1645,7 @@ export default class Security { async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_user_privileges'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2495,10 +1685,7 @@ export default class Security { async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.get_user_profile'] - + const acceptedPath: string[] = ['uid'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2540,12 +1727,8 @@ export default class Security { async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.grant_api_key'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2567,14 +1750,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2594,12 +1771,8 @@ export default class Security { async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.has_privileges'] - + const acceptedPath: string[] = ['user'] + const acceptedBody: string[] = ['application', 'cluster', 'index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2622,14 +1795,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2659,12 +1826,8 @@ export default class Security { async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.has_privileges_user_profile'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['uids', 'privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2686,14 +1849,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2713,12 +1870,8 @@ export default class Security { async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.invalidate_api_key'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2741,14 +1894,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2768,12 +1915,8 @@ export default class Security { async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.invalidate_token'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2796,14 +1939,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2823,12 +1960,8 @@ export default class Security { async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.oidc_authenticate'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2850,14 +1983,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2877,12 +2004,8 @@ export default class Security { async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.oidc_logout'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['access_token', 'refresh_token'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2904,14 +2027,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2931,12 +2048,8 @@ export default class Security { async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.oidc_prepare_authentication'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2959,14 +2072,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -2986,12 +2093,8 @@ export default class Security { async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.put_privileges'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3003,14 +2106,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3030,12 +2127,8 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.put_role'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3057,14 +2150,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3087,12 +2174,8 @@ export default class Security { async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.put_role_mapping'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3114,14 +2197,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3144,12 +2221,8 @@ export default class Security { async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.put_user'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3171,14 +2244,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3201,12 +2268,8 @@ export default class Security { async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.query_api_keys'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3229,14 +2292,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3256,12 +2313,8 @@ export default class Security { async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.query_role'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3284,14 +2337,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3311,12 +2358,8 @@ export default class Security { async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.query_user'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3339,14 +2382,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3366,12 +2403,8 @@ export default class Security { async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.saml_authenticate'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['content', 'ids', 'realm'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3393,14 +2426,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3420,12 +2447,8 @@ export default class Security { async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.saml_complete_logout'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3447,14 +2470,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3474,12 +2491,8 @@ export default class Security { async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.saml_invalidate'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['acs', 'query_string', 'realm'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3501,14 +2514,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3528,12 +2535,8 @@ export default class Security { async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.saml_logout'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['token', 'refresh_token'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3555,14 +2558,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3582,12 +2579,8 @@ export default class Security { async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.saml_prepare_authentication'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3610,14 +2603,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3637,10 +2624,7 @@ export default class Security { async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['security.saml_service_provider_metadata'] - + const acceptedPath: string[] = ['realm_name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3682,12 +2666,8 @@ export default class Security { async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.suggest_user_profiles'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3710,14 +2690,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3737,12 +2711,8 @@ export default class Security { async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.update_api_key'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3764,14 +2734,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3794,12 +2758,8 @@ export default class Security { async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.update_cross_cluster_api_key'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['access', 'expiration', 'metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3821,14 +2781,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3851,12 +2805,8 @@ export default class Security { async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.update_settings'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['security', 'security-profile', 'security-tokens'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3879,14 +2829,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -3906,12 +2850,8 @@ export default class Security { async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['security.update_user_profile_data'] - + const acceptedPath: string[] = ['uid'] + const acceptedBody: string[] = ['labels', 'data'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3933,14 +2873,8 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 1cac3a03b..ffa3b9c39 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -35,55 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Shutdown { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'shutdown.delete_node': { - path: [ - 'node_id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'shutdown.get_node': { - path: [ - 'node_id' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'shutdown.put_node': { - path: [ - 'node_id' - ], - body: [ - 'type', - 'reason', - 'allocation_delay', - 'target_node_name' - ], - query: [ - 'master_timeout', - 'timeout' - ] - } - } } /** @@ -94,10 +51,7 @@ export default class Shutdown { async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['shutdown.delete_node'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -139,10 +93,7 @@ export default class Shutdown { async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['shutdown.get_node'] - + const acceptedPath: string[] = ['node_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -192,12 +143,8 @@ export default class Shutdown { async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['shutdown.put_node'] - + const acceptedPath: string[] = ['node_id'] + const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -219,14 +166,8 @@ export default class Shutdown { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index c1b3fc539..ba1689505 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -35,36 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Simulate { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'simulate.ingest': { - path: [ - 'index' - ], - body: [ - 'docs', - 'component_template_substitutions', - 'index_template_subtitutions', - 'mapping_addition', - 'pipeline_substitutions' - ], - query: [ - 'pipeline' - ] - } - } } /** @@ -75,12 +51,8 @@ export default class Simulate { async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['simulate.ingest'] - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -102,14 +74,8 @@ export default class Simulate { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 79a2f6f20..9e6a856f9 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -35,107 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Slm { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'slm.delete_lifecycle': { - path: [ - 'policy_id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.execute_lifecycle': { - path: [ - 'policy_id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.execute_retention': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.get_lifecycle': { - path: [ - 'policy_id' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.get_stats': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.get_status': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.put_lifecycle': { - path: [ - 'policy_id' - ], - body: [ - 'config', - 'name', - 'repository', - 'retention', - 'schedule' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.start': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'slm.stop': { - path: [], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - } - } } /** @@ -146,10 +51,7 @@ export default class Slm { async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.delete_lifecycle'] - + const acceptedPath: string[] = ['policy_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,10 +93,7 @@ export default class Slm { async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.execute_lifecycle'] - + const acceptedPath: string[] = ['policy_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -236,10 +135,7 @@ export default class Slm { async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.execute_retention'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -279,10 +175,7 @@ export default class Slm { async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.get_lifecycle'] - + const acceptedPath: string[] = ['policy_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -332,10 +225,7 @@ export default class Slm { async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.get_stats'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -375,10 +265,7 @@ export default class Slm { async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.get_status'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -418,12 +305,8 @@ export default class Slm { async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['slm.put_lifecycle'] - + const acceptedPath: string[] = ['policy_id'] + const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -445,14 +328,8 @@ export default class Slm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -475,10 +352,7 @@ export default class Slm { async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.start'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -518,10 +392,7 @@ export default class Slm { async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['slm.stop'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 6b5ea2848..3b37c9bdb 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -35,208 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Snapshot { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'snapshot.cleanup_repository': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'snapshot.clone': { - path: [ - 'repository', - 'snapshot', - 'target_snapshot' - ], - body: [ - 'indices' - ], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'snapshot.create': { - path: [ - 'repository', - 'snapshot' - ], - body: [ - 'expand_wildcards', - 'feature_states', - 'ignore_unavailable', - 'include_global_state', - 'indices', - 'metadata', - 'partial' - ], - query: [ - 'master_timeout', - 'wait_for_completion' - ] - }, - 'snapshot.create_repository': { - path: [ - 'name' - ], - body: [ - 'repository' - ], - query: [ - 'master_timeout', - 'timeout', - 'verify' - ] - }, - 'snapshot.delete': { - path: [ - 'repository', - 'snapshot' - ], - body: [], - query: [ - 'master_timeout' - ] - }, - 'snapshot.delete_repository': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - }, - 'snapshot.get': { - path: [ - 'repository', - 'snapshot' - ], - body: [], - query: [ - 'after', - 'from_sort_value', - 'ignore_unavailable', - 'index_details', - 'index_names', - 'include_repository', - 'master_timeout', - 'order', - 'offset', - 'size', - 'slm_policy_filter', - 'sort', - 'verbose' - ] - }, - 'snapshot.get_repository': { - path: [ - 'name' - ], - body: [], - query: [ - 'local', - 'master_timeout' - ] - }, - 'snapshot.repository_analyze': { - path: [ - 'name' - ], - body: [], - query: [ - 'blob_count', - 'concurrency', - 'detailed', - 'early_read_node_count', - 'max_blob_size', - 'max_total_data_size', - 'rare_action_probability', - 'rarely_abort_writes', - 'read_node_count', - 'register_operation_count', - 'seed', - 'timeout' - ] - }, - 'snapshot.repository_verify_integrity': { - path: [ - 'name' - ], - body: [], - query: [ - 'blob_thread_pool_concurrency', - 'index_snapshot_verification_concurrency', - 'index_verification_concurrency', - 'max_bytes_per_sec', - 'max_failed_shard_snapshots', - 'meta_thread_pool_concurrency', - 'snapshot_verification_concurrency', - 'verify_blob_contents' - ] - }, - 'snapshot.restore': { - path: [ - 'repository', - 'snapshot' - ], - body: [ - 'feature_states', - 'ignore_index_settings', - 'ignore_unavailable', - 'include_aliases', - 'include_global_state', - 'index_settings', - 'indices', - 'partial', - 'rename_pattern', - 'rename_replacement' - ], - query: [ - 'master_timeout', - 'wait_for_completion' - ] - }, - 'snapshot.status': { - path: [ - 'repository', - 'snapshot' - ], - body: [], - query: [ - 'ignore_unavailable', - 'master_timeout' - ] - }, - 'snapshot.verify_repository': { - path: [ - 'name' - ], - body: [], - query: [ - 'master_timeout', - 'timeout' - ] - } - } } /** @@ -247,10 +51,7 @@ export default class Snapshot { async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.cleanup_repository'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -292,12 +93,8 @@ export default class Snapshot { async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['snapshot.clone'] - + const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] + const acceptedBody: string[] = ['indices'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -319,14 +116,8 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -351,12 +142,8 @@ export default class Snapshot { async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['snapshot.create'] - + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedBody: string[] = ['expand_wildcards', 'feature_states', 'ignore_unavailable', 'include_global_state', 'indices', 'metadata', 'partial'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -378,14 +165,8 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -409,12 +190,8 @@ export default class Snapshot { async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['snapshot.create_repository'] - + const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['repository'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -426,14 +203,8 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -456,10 +227,7 @@ export default class Snapshot { async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.delete'] - + const acceptedPath: string[] = ['repository', 'snapshot'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -502,10 +270,7 @@ export default class Snapshot { async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.delete_repository'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -547,10 +312,7 @@ export default class Snapshot { async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.get'] - + const acceptedPath: string[] = ['repository', 'snapshot'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -593,10 +355,7 @@ export default class Snapshot { async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.get_repository'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -646,10 +405,7 @@ export default class Snapshot { async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.repository_analyze'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -691,10 +447,7 @@ export default class Snapshot { async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.repository_verify_integrity'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -736,12 +489,8 @@ export default class Snapshot { async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['snapshot.restore'] - + const acceptedPath: string[] = ['repository', 'snapshot'] + const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -763,14 +512,8 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -794,10 +537,7 @@ export default class Snapshot { async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.status'] - + const acceptedPath: string[] = ['repository', 'snapshot'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -851,10 +591,7 @@ export default class Snapshot { async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['snapshot.verify_repository'] - + const acceptedPath: string[] = ['name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 60478c7f7..871cb7139 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -35,89 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Sql { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'sql.clear_cursor': { - path: [], - body: [ - 'cursor' - ], - query: [] - }, - 'sql.delete_async': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'sql.get_async': { - path: [ - 'id' - ], - body: [], - query: [ - 'delimiter', - 'format', - 'keep_alive', - 'wait_for_completion_timeout' - ] - }, - 'sql.get_async_status': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'sql.query': { - path: [], - body: [ - 'allow_partial_search_results', - 'catalog', - 'columnar', - 'cursor', - 'fetch_size', - 'field_multi_value_leniency', - 'filter', - 'index_using_frozen', - 'keep_alive', - 'keep_on_completion', - 'page_timeout', - 'params', - 'query', - 'request_timeout', - 'runtime_mappings', - 'time_zone', - 'wait_for_completion_timeout' - ], - query: [ - 'format' - ] - }, - 'sql.translate': { - path: [], - body: [ - 'fetch_size', - 'filter', - 'query', - 'time_zone' - ], - query: [] - } - } } /** @@ -128,12 +51,8 @@ export default class Sql { async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['sql.clear_cursor'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['cursor'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -155,14 +74,8 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -182,10 +95,7 @@ export default class Sql { async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['sql.delete_async'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -227,10 +137,7 @@ export default class Sql { async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['sql.get_async'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -272,10 +179,7 @@ export default class Sql { async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['sql.get_async_status'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -317,12 +221,8 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['sql.query'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -345,14 +245,8 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -372,12 +266,8 @@ export default class Sql { async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['sql.translate'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,14 +289,8 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 6197e6805..29f25f090 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -35,24 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class Ssl { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'ssl.certificates': { - path: [], - body: [], - query: [] - } - } } /** @@ -63,10 +51,7 @@ export default class Ssl { async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['ssl.certificates'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 125d5301f..379510816 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -35,81 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Synonyms { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'synonyms.delete_synonym': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'synonyms.delete_synonym_rule': { - path: [ - 'set_id', - 'rule_id' - ], - body: [], - query: [] - }, - 'synonyms.get_synonym': { - path: [ - 'id' - ], - body: [], - query: [ - 'from', - 'size' - ] - }, - 'synonyms.get_synonym_rule': { - path: [ - 'set_id', - 'rule_id' - ], - body: [], - query: [] - }, - 'synonyms.get_synonyms_sets': { - path: [], - body: [], - query: [ - 'from', - 'size' - ] - }, - 'synonyms.put_synonym': { - path: [ - 'id' - ], - body: [ - 'synonyms_set' - ], - query: [] - }, - 'synonyms.put_synonym_rule': { - path: [ - 'set_id', - 'rule_id' - ], - body: [ - 'synonyms' - ], - query: [] - } - } } /** @@ -120,10 +51,7 @@ export default class Synonyms { async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['synonyms.delete_synonym'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,10 +93,7 @@ export default class Synonyms { async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['synonyms.delete_synonym_rule'] - + const acceptedPath: string[] = ['set_id', 'rule_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -211,10 +136,7 @@ export default class Synonyms { async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['synonyms.get_synonym'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -256,10 +178,7 @@ export default class Synonyms { async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['synonyms.get_synonym_rule'] - + const acceptedPath: string[] = ['set_id', 'rule_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -302,10 +221,7 @@ export default class Synonyms { async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['synonyms.get_synonyms_sets'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -345,12 +261,8 @@ export default class Synonyms { async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['synonyms.put_synonym'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['synonyms_set'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -372,14 +284,8 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -402,12 +308,8 @@ export default class Synonyms { async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['synonyms.put_synonym_rule'] - + const acceptedPath: string[] = ['set_id', 'rule_id'] + const acceptedBody: string[] = ['synonyms'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -429,14 +331,8 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index c5da070be..a8f7ccf20 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -35,54 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class Tasks { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'tasks.cancel': { - path: [ - 'task_id' - ], - body: [], - query: [ - 'actions', - 'nodes', - 'parent_task_id', - 'wait_for_completion' - ] - }, - 'tasks.get': { - path: [ - 'task_id' - ], - body: [], - query: [ - 'timeout', - 'wait_for_completion' - ] - }, - 'tasks.list': { - path: [], - body: [], - query: [ - 'actions', - 'detailed', - 'group_by', - 'nodes', - 'parent_task_id', - 'timeout', - 'wait_for_completion' - ] - } - } } /** @@ -93,10 +51,7 @@ export default class Tasks { async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['tasks.cancel'] - + const acceptedPath: string[] = ['task_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -146,10 +101,7 @@ export default class Tasks { async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['tasks.get'] - + const acceptedPath: string[] = ['task_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,10 +143,7 @@ export default class Tasks { async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['tasks.list'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index 38a264ef8..ad9fa1e0e 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -35,30 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - terms_enum: { - path: [ - 'index' - ], - body: [ - 'field', - 'size', - 'timeout', - 'case_insensitive', - 'index_filter', - 'string', - 'search_after' - ], - query: [] - } -} +interface That { transport: Transport } /** * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. @@ -68,12 +45,8 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.terms_enum - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,14 +68,8 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 331e9fe69..c3f461487 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -35,39 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - termvectors: { - path: [ - 'index', - 'id' - ], - body: [ - 'doc', - 'filter', - 'per_field_analyzer' - ], - query: [ - 'fields', - 'field_statistics', - 'offsets', - 'payloads', - 'positions', - 'preference', - 'realtime', - 'routing', - 'term_statistics', - 'version', - 'version_type' - ] - } -} +interface That { transport: Transport } /** * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. @@ -77,12 +45,8 @@ export default async function TermvectorsApi (this: That, p export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.termvectors - + const acceptedPath: string[] = ['index', 'id'] + const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -104,14 +68,8 @@ export default async function TermvectorsApi (this: That, p } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 7d18c5f3b..fd245e577 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -35,93 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class TextStructure { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'text_structure.find_field_structure': { - path: [], - body: [], - query: [ - 'column_names', - 'delimiter', - 'documents_to_sample', - 'ecs_compatibility', - 'explain', - 'field', - 'format', - 'grok_pattern', - 'index', - 'quote', - 'should_trim_fields', - 'timeout', - 'timestamp_field', - 'timestamp_format' - ] - }, - 'text_structure.find_message_structure': { - path: [], - body: [ - 'messages' - ], - query: [ - 'column_names', - 'delimiter', - 'ecs_compatibility', - 'explain', - 'format', - 'grok_pattern', - 'quote', - 'should_trim_fields', - 'timeout', - 'timestamp_field', - 'timestamp_format' - ] - }, - 'text_structure.find_structure': { - path: [], - body: [ - 'text_files' - ], - query: [ - 'charset', - 'column_names', - 'delimiter', - 'ecs_compatibility', - 'explain', - 'format', - 'grok_pattern', - 'has_header_row', - 'line_merge_size_limit', - 'lines_to_sample', - 'quote', - 'should_trim_fields', - 'timeout', - 'timestamp_field', - 'timestamp_format' - ] - }, - 'text_structure.test_grok_pattern': { - path: [], - body: [ - 'grok_pattern', - 'text' - ], - query: [ - 'ecs_compatibility' - ] - } - } } /** @@ -132,10 +51,7 @@ export default class TextStructure { async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['text_structure.find_field_structure'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -174,12 +90,8 @@ export default class TextStructure { async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['text_structure.find_message_structure'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['messages'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -201,14 +113,8 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -228,12 +134,8 @@ export default class TextStructure { async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['text_structure.find_structure'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['text_files'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,14 +147,8 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -272,12 +168,8 @@ export default class TextStructure { async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['text_structure.test_grok_pattern'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['grok_pattern', 'text'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -299,14 +191,8 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 4e168f30b..4872de3e1 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -35,170 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Transform { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'transform.delete_transform': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'force', - 'delete_dest_index', - 'timeout' - ] - }, - 'transform.get_node_stats': { - path: [], - body: [], - query: [] - }, - 'transform.get_transform': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'allow_no_match', - 'from', - 'size', - 'exclude_generated' - ] - }, - 'transform.get_transform_stats': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'allow_no_match', - 'from', - 'size', - 'timeout' - ] - }, - 'transform.preview_transform': { - path: [ - 'transform_id' - ], - body: [ - 'dest', - 'description', - 'frequency', - 'pivot', - 'source', - 'settings', - 'sync', - 'retention_policy', - 'latest' - ], - query: [ - 'timeout' - ] - }, - 'transform.put_transform': { - path: [ - 'transform_id' - ], - body: [ - 'dest', - 'description', - 'frequency', - 'latest', - '_meta', - 'pivot', - 'retention_policy', - 'settings', - 'source', - 'sync' - ], - query: [ - 'defer_validation', - 'timeout' - ] - }, - 'transform.reset_transform': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'force', - 'timeout' - ] - }, - 'transform.schedule_now_transform': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'timeout' - ] - }, - 'transform.start_transform': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'timeout', - 'from' - ] - }, - 'transform.stop_transform': { - path: [ - 'transform_id' - ], - body: [], - query: [ - 'allow_no_match', - 'force', - 'timeout', - 'wait_for_checkpoint', - 'wait_for_completion' - ] - }, - 'transform.update_transform': { - path: [ - 'transform_id' - ], - body: [ - 'dest', - 'description', - 'frequency', - '_meta', - 'source', - 'settings', - 'sync', - 'retention_policy' - ], - query: [ - 'defer_validation', - 'timeout' - ] - }, - 'transform.upgrade_transforms': { - path: [], - body: [], - query: [ - 'dry_run', - 'timeout' - ] - } - } } /** @@ -209,10 +51,7 @@ export default class Transform { async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.delete_transform'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -254,10 +93,7 @@ export default class Transform { async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.get_node_stats'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -296,10 +132,7 @@ export default class Transform { async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.get_transform'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -349,10 +182,7 @@ export default class Transform { async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.get_transform_stats'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -394,12 +224,8 @@ export default class Transform { async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['transform.preview_transform'] - + const acceptedPath: string[] = ['transform_id'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -422,14 +248,8 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -459,12 +279,8 @@ export default class Transform { async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['transform.put_transform'] - + const acceptedPath: string[] = ['transform_id'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -486,14 +302,8 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -516,10 +326,7 @@ export default class Transform { async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.reset_transform'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -561,10 +368,7 @@ export default class Transform { async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.schedule_now_transform'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -606,10 +410,7 @@ export default class Transform { async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.start_transform'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -651,10 +452,7 @@ export default class Transform { async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.stop_transform'] - + const acceptedPath: string[] = ['transform_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -696,12 +494,8 @@ export default class Transform { async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['transform.update_transform'] - + const acceptedPath: string[] = ['transform_id'] + const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -723,14 +517,8 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -753,10 +541,7 @@ export default class Transform { async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['transform.upgrade_transforms'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 64419582a..06d06ae63 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -35,45 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - -const acceptedParams: Record = { - update: { - path: [ - 'id', - 'index' - ], - body: [ - 'detect_noop', - 'doc', - 'doc_as_upsert', - 'script', - 'scripted_upsert', - '_source', - 'upsert' - ], - query: [ - 'if_primary_term', - 'if_seq_no', - 'include_source_on_error', - 'lang', - 'refresh', - 'require_alias', - 'retry_on_conflict', - 'routing', - 'timeout', - 'wait_for_active_shards', - '_source', - '_source_excludes', - '_source_includes' - ] - } -} +interface That { transport: Transport } /** * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). @@ -83,12 +45,8 @@ export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.update - + const acceptedPath: string[] = ['id', 'index'] + const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -110,14 +68,8 @@ export default async function UpdateApi = { - update_by_query: { - path: [ - 'index' - ], - body: [ - 'max_docs', - 'query', - 'script', - 'slice', - 'conflicts' - ], - query: [ - 'allow_no_indices', - 'analyzer', - 'analyze_wildcard', - 'conflicts', - 'default_operator', - 'df', - 'expand_wildcards', - 'from', - 'ignore_unavailable', - 'lenient', - 'max_docs', - 'pipeline', - 'preference', - 'q', - 'refresh', - 'request_cache', - 'requests_per_second', - 'routing', - 'scroll', - 'scroll_size', - 'search_timeout', - 'search_type', - 'slices', - 'sort', - 'stats', - 'terminate_after', - 'timeout', - 'version', - 'version_type', - 'wait_for_active_shards', - 'wait_for_completion' - ] - } -} +interface That { transport: Transport } /** * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. @@ -98,12 +45,8 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = acceptedParams.update_by_query - + const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -125,14 +68,8 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index 9572be0f7..eb96ad0ed 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -35,22 +35,7 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport -} - -const acceptedParams: Record = { - update_by_query_rethrottle: { - path: [ - 'task_id' - ], - body: [], - query: [ - 'requests_per_second' - ] - } -} +interface That { transport: Transport } /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. @@ -60,10 +45,7 @@ export default async function UpdateByQueryRethrottleApi (this: That, params: T. export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = acceptedParams.update_by_query_rethrottle - + const acceptedPath: string[] = ['task_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index b1956dfce..7e795d62b 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -35,148 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} - -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] +interface That { transport: Transport } export default class Watcher { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'watcher.ack_watch': { - path: [ - 'watch_id', - 'action_id' - ], - body: [], - query: [] - }, - 'watcher.activate_watch': { - path: [ - 'watch_id' - ], - body: [], - query: [] - }, - 'watcher.deactivate_watch': { - path: [ - 'watch_id' - ], - body: [], - query: [] - }, - 'watcher.delete_watch': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'watcher.execute_watch': { - path: [ - 'id' - ], - body: [ - 'action_modes', - 'alternative_input', - 'ignore_condition', - 'record_execution', - 'simulated_actions', - 'trigger_data', - 'watch' - ], - query: [ - 'debug' - ] - }, - 'watcher.get_settings': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - }, - 'watcher.get_watch': { - path: [ - 'id' - ], - body: [], - query: [] - }, - 'watcher.put_watch': { - path: [ - 'id' - ], - body: [ - 'actions', - 'condition', - 'input', - 'metadata', - 'throttle_period', - 'throttle_period_in_millis', - 'transform', - 'trigger' - ], - query: [ - 'active', - 'if_primary_term', - 'if_seq_no', - 'version' - ] - }, - 'watcher.query_watches': { - path: [], - body: [ - 'from', - 'size', - 'query', - 'sort', - 'search_after' - ], - query: [] - }, - 'watcher.start': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - }, - 'watcher.stats': { - path: [ - 'metric' - ], - body: [], - query: [ - 'emit_stacktraces', - 'metric' - ] - }, - 'watcher.stop': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - }, - 'watcher.update_settings': { - path: [], - body: [ - 'index.auto_expand_replicas', - 'index.number_of_replicas' - ], - query: [ - 'master_timeout', - 'timeout' - ] - } - } } /** @@ -187,10 +51,7 @@ export default class Watcher { async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.ack_watch'] - + const acceptedPath: string[] = ['watch_id', 'action_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -240,10 +101,7 @@ export default class Watcher { async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.activate_watch'] - + const acceptedPath: string[] = ['watch_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -285,10 +143,7 @@ export default class Watcher { async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.deactivate_watch'] - + const acceptedPath: string[] = ['watch_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -330,10 +185,7 @@ export default class Watcher { async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.delete_watch'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -375,12 +227,8 @@ export default class Watcher { async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['watcher.execute_watch'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -403,14 +251,8 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -440,10 +282,7 @@ export default class Watcher { async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.get_settings'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -483,10 +322,7 @@ export default class Watcher { async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.get_watch'] - + const acceptedPath: string[] = ['id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -528,12 +364,8 @@ export default class Watcher { async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['watcher.put_watch'] - + const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -555,14 +387,8 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -585,12 +411,8 @@ export default class Watcher { async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['watcher.query_watches'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -613,14 +435,8 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } @@ -640,10 +456,7 @@ export default class Watcher { async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.start'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -683,10 +496,7 @@ export default class Watcher { async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.stats'] - + const acceptedPath: string[] = ['metric'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -736,10 +546,7 @@ export default class Watcher { async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['watcher.stop'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -779,12 +586,8 @@ export default class Watcher { async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['watcher.update_settings'] - + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -807,14 +610,8 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + // @ts-expect-error + querystring[key] = params[key] } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 084fa20ec..9e6a66f7b 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -35,35 +35,12 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' - -interface That { - transport: Transport - acceptedParams: Record -} +interface That { transport: Transport } export default class Xpack { transport: Transport - acceptedParams: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { - 'xpack.info': { - path: [], - body: [], - query: [ - 'categories', - 'accept_enterprise', - 'human' - ] - }, - 'xpack.usage': { - path: [], - body: [], - query: [ - 'master_timeout' - ] - } - } } /** @@ -74,10 +51,7 @@ export default class Xpack { async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['xpack.info'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -117,10 +91,7 @@ export default class Xpack { async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath - } = this.acceptedParams['xpack.usage'] - + const acceptedPath: string[] = [] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/types.ts b/src/api/types.ts index 668948f05..e242e803c 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2775,6 +2775,7 @@ export interface BulkIndexByScrollFailure { id: Id index: IndexName status: integer + type: string } export interface BulkStats { @@ -6102,7 +6103,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { export interface MappingDynamicTemplate { mapping?: MappingProperty - runtime?: MappingRuntimeField + runtime?: MappingProperty match?: string | string[] path_match?: string | string[] unmatch?: string | string[] @@ -12799,7 +12800,7 @@ export interface IndicesIndexSettingsKeys { routing_partition_size?: SpecUtilsStringified load_fixed_bitset_filters_eagerly?: boolean hidden?: boolean | string - auto_expand_replicas?: SpecUtilsWithNullValue + auto_expand_replicas?: string merge?: IndicesMerge search?: IndicesSettingsSearch refresh_interval?: Duration @@ -13096,7 +13097,7 @@ export interface IndicesSoftDeletes { retention_lease?: IndicesRetentionLease } -export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' +export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC' export interface IndicesStorage { type: IndicesStorageType @@ -14274,7 +14275,7 @@ export interface IndicesPutMappingRequest extends RequestBase { /** If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. */ dynamic_date_formats?: string[] /** Specify dynamic templates for the mapping. */ - dynamic_templates?: Record[] + dynamic_templates?: Record | Record[] /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField /** A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. */ @@ -21363,10 +21364,6 @@ export interface SecurityRemoteIndicesPrivileges { allow_restricted_indices?: boolean } -export interface SecurityRemoteUserIndicesPrivileges extends SecurityUserIndicesPrivileges { - clusters: string[] -} - export interface SecurityReplicationAccess { names: IndexName | IndexName[] allow_restricted_indices?: boolean @@ -22054,8 +22051,7 @@ export interface SecurityGetRoleRole { remote_indices?: SecurityRemoteIndicesPrivileges[] remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata - description?: string - run_as?: string[] + run_as: string[] transient_metadata?: Record applications: SecurityApplicationPrivileges[] role_templates?: SecurityRoleTemplate[] @@ -22208,10 +22204,8 @@ export interface SecurityGetUserPrivilegesRequest extends RequestBase { export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[] cluster: string[] - remote_cluster?: SecurityRemoteClusterPrivileges[] global: SecurityGlobalPrivilege[] indices: SecurityUserIndicesPrivileges[] - remote_indices?: SecurityRemoteUserIndicesPrivileges[] run_as: string[] } From 8ca68a4178612eed42c5d0d8da16a8b05729e3e6 Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Mon, 3 Mar 2025 15:28:38 -0600 Subject: [PATCH 485/647] [docs] Clean up cross-repo links (#2640) * clean up cross-repo links * add docs-content to cross_links --- docs/docset.yml | 1 + docs/reference/api-reference.md | 10 +++++----- docs/reference/connecting.md | 6 +++--- docs/reference/observability.md | 2 +- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/docset.yml b/docs/docset.yml index 10fc0529d..27f8dc2d6 100644 --- a/docs/docset.yml +++ b/docs/docset.yml @@ -2,6 +2,7 @@ project: 'Node.js client' exclude: - examples/proxy/README.md cross_links: + - docs-content - elasticsearch toc: - toc: reference diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 2271a214a..e216c1981 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -764,7 +764,7 @@ Get script contexts. Get a list of supported script contexts and their methods. -[Endpoint documentation](elasticsearch://docs/reference/scripting-languages/painless/painless-contexts.md) +[Endpoint documentation](elasticsearch://reference/scripting-languages/painless/painless-contexts.md) ```ts client.getScriptContext() @@ -1534,7 +1534,7 @@ The API uses several *contexts*, which control how scripts are run, what variabl Each context requires a script, but additional parameters depend on the context you’re using for that script. -[Endpoint documentation](elasticsearch://docs/reference/scripting-languages/painless/painless-api-examples.md) +[Endpoint documentation](elasticsearch://reference/scripting-languages/painless/painless-api-examples.md) ```ts client.scriptsPainlessExecute({ ... }) @@ -5261,7 +5261,7 @@ client.ilm.stop({ ... }) Add an index block. Limits the operations allowed on an index by blocking specific operation types. -[Index block settings](elasticsearch://docs/reference/elasticsearch/index-settings/index-block.md) +[Index block settings](elasticsearch://reference/elasticsearch/index-settings/index-block.md) ```ts client.indices.addBlock({ index, block }) @@ -7375,7 +7375,7 @@ client.ingest.deletePipeline({ id }) Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. -[Endpoint documentation](elasticsearch://docs/reference/ingestion-tools/enrich-processor/geoip-processor.md) +[Endpoint documentation](elasticsearch://reference/ingestion-tools/enrich-processor/geoip-processor.md) ```ts client.ingest.geoIpStats() @@ -7446,7 +7446,7 @@ client.ingest.getPipeline({ ... }) Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. -[Endpoint documentation](elasticsearch://docs/reference/ingestion-tools/enrich-processor/grok-processor.md) +[Endpoint documentation](elasticsearch://reference/ingestion-tools/enrich-processor/grok-processor.md) ```ts client.ingest.processorGrok() diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md index ec4c2e454..34f85e65c 100644 --- a/docs/reference/connecting.md +++ b/docs/reference/connecting.md @@ -133,7 +133,7 @@ Running {{es}} without security enabled is not recommended. :::: -If your cluster is configured with [security explicitly disabled](elasticsearch://docs/reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP: +If your cluster is configured with [security explicitly disabled](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP: ```js const { Client } = require('@elastic/elasticsearch') @@ -332,7 +332,7 @@ The supported request specific options are: | Option | Description | | --- | ----------- | | `ignore` | `number[]` -  HTTP status codes which should not be considered errors for this request.
    *Default:* `null` | -| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://docs/reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
    _Default:* No timeout | +| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
    _Default:* No timeout | | `retryOnTimeout` | `boolean` - Retry requests that have timed out.*Default:* `false` | | `maxRetries` | `number` - Max number of retries for the request, it overrides the client default.
    *Default:* `3` | | `compression` | `string` or `boolean` - Enables body compression for the request.
    *Options:* `false`, `'gzip'`
    *Default:* `false` | @@ -341,7 +341,7 @@ The supported request specific options are: |`querystring` | `object` - Custom querystring for the request.
    *Default:* `null` | | `id` | `any` - Custom request ID. *(overrides the top level request id generator)*
    *Default:* `null` | | `context` | `any` - Custom object per request. *(you can use it to pass data to the clients events)*
    *Default:* `null` | -| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://docs/reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id) *Default:* `null` | +| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id) *Default:* `null` | | `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH
    *Default:* `null` | | `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH
    *Default:* `null` | | `signal` | `AbortSignal` - The AbortSignal instance to allow request abortion.
    *Default:* `null` | diff --git a/docs/reference/observability.md b/docs/reference/observability.md index d142a96cc..38f8c332d 100644 --- a/docs/reference/observability.md +++ b/docs/reference/observability.md @@ -312,7 +312,7 @@ child.search({ ## X-Opaque-Id support [_x_opaque_id_support] -To improve observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this allows you to discover this identifier in the [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), helps you with [identifying search slow log origin](elasticsearch://docs/reference/elasticsearch/index-settings/slow-log.md) as well as [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks). +To improve observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this allows you to discover this identifier in the [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), helps you with [identifying search slow log origin](elasticsearch://reference/elasticsearch/index-settings/slow-log.md) as well as [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks). The `X-Opaque-Id` should be configured in each request, for doing that you can use the `opaqueId` option, as you can see in the following example. The resulting header will be `{ 'X-Opaque-Id': 'my-search' }`. From c713e599d1a3f31f714951a99084ff1a5f250cc1 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 6 Mar 2025 10:43:53 -0600 Subject: [PATCH 486/647] Put version back to correct value (#2648) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 377ba7b6b..0960ee05b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.3", + "version": "9.0.0-alpha.4", "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", From 0eaeb78c9647327451ce59a1bddf37cc96e7ccff Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 7 Mar 2025 10:30:34 -0600 Subject: [PATCH 487/647] Fix npm-publish workflow (#2650) Reverting an accidental revert --- .github/workflows/npm-publish.yml | 35 ++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 6040880b4..8994a003b 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -23,19 +23,38 @@ jobs: - run: npm install -g npm - run: npm install - run: npm test - - run: npm publish --provenance --access public --tag alpha + - name: npm publish + run: | + version=$(jq -r .version package.json) + tag_meta=$(echo "$version" | cut -s -d '-' -f2) + if [[ -z "$tag_meta" ]]; then + npm publish --provenance --access public + else + tag=$(echo "$tag_meta" | cut -d '.' -f1) + npm publish --provenance --access public --tag "$tag" + fi env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - name: Publish version on GitHub run: | version=$(jq -r .version package.json) - gh release create \ - -n "This is a 9.0.0 pre-release alpha. Changes may not be stable." \ - --latest=false \ - --prerelease \ - --target "$BRANCH_NAME" \ - --title "v$version" \ - "v$version" + tag_meta=$(echo "$version" | cut -s -d '-' -f2) + if [[ -z "$tag_meta" ]]; then + gh release create \ + -n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)" + --target "$BRANCH_NAME" \ + --title "v$version" \ + "v$version" + else + tag_main=$(echo "$version" | cut -d '-' -f1) + gh release create \ + -n "This is a $tag_main pre-release. Changes may not be stable." \ + --latest=false \ + --prerelease \ + --target "$BRANCH_NAME" \ + --title "v$version" \ + "v$version" + fi env: BRANCH_NAME: ${{ github.event.inputs.branch }} GH_TOKEN: ${{ github.token }} From 6836a3f1c7779e4c620ce64bde81697f18819e3c Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Fri, 7 Mar 2025 20:39:26 +0000 Subject: [PATCH 488/647] Update dependency @elastic/request-converter to v8.18.0 (#2642) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Co-authored-by: Josh Mock --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0960ee05b..fc5a12fdd 100644 --- a/package.json +++ b/package.json @@ -56,7 +56,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "8.17.0", + "@elastic/request-converter": "8.18.0", "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", From e2eb6ef58654354689aa05e6ad81d49f7e7aa889 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Fri, 7 Mar 2025 14:59:54 -0600 Subject: [PATCH 489/647] Update dependency @types/node to v22.13.9 (#2641) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index fc5a12fdd..2a36d7870 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.13.5", + "@types/node": "22.13.9", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 3ed94d71e078a68648271476c113f528d55c9e38 Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Fri, 7 Mar 2025 15:11:18 -0600 Subject: [PATCH 490/647] fix external links (#2649) --- docs/reference/connecting.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md index 34f85e65c..72eab6b5c 100644 --- a/docs/reference/connecting.md +++ b/docs/reference/connecting.md @@ -406,9 +406,9 @@ module.exports = async function (context, req) { Resources used to assess these recommendations: * [GCP Cloud Functions: Tips & Tricks](https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations) -* [Best practices for working with AWS Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.md) +* [Best practices for working with AWS Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html) * [Azure Functions Python developer guide](https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables) -* [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.md) +* [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html) ## Connecting through a proxy [client-connect-proxy] From 16b51c2315f7dd649e5e00dfdb40e4668ef2a874 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 10:47:34 -0500 Subject: [PATCH 491/647] Update dependency @types/node to v22.13.10 (#2653) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 2a36d7870..098222272 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.13.9", + "@types/node": "22.13.10", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 85396ddc67494f864b1caa0daf4a9e4e760ce37d Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:54:15 -0500 Subject: [PATCH 492/647] Update dependency semver to v7.7.1 (#2654) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 098222272..c10929e17 100644 --- a/package.json +++ b/package.json @@ -77,7 +77,7 @@ "ora": "5.4.1", "proxy": "1.0.2", "rimraf": "3.0.2", - "semver": "7.6.3", + "semver": "7.7.1", "split2": "4.2.0", "stoppable": "1.1.0", "tap": "21.0.2", From afc83338b006b9e59f886776243236c66525d85d Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 21 Mar 2025 11:16:10 -0500 Subject: [PATCH 493/647] Assume codegen renders markdown, not asciidoc (#2664) * Assume codegen renders markdown, not asciidoc * Drop accidental local dev tweak --- .buildkite/make.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/make.mjs b/.buildkite/make.mjs index 3026b61f3..b1dc41e18 100644 --- a/.buildkite/make.mjs +++ b/.buildkite/make.mjs @@ -123,7 +123,7 @@ async function codegen (args) { await $`rm -rf ${join(import.meta.url, '..', 'src', 'api')}` await $`mkdir ${join(import.meta.url, '..', 'src', 'api')}` await $`cp -R ${join(import.meta.url, '..', '..', 'elastic-client-generator-js', 'output')}/* ${join(import.meta.url, '..', 'src', 'api')}` - await $`mv ${join(import.meta.url, '..', 'src', 'api', 'reference.asciidoc')} ${join(import.meta.url, '..', 'docs', 'reference.asciidoc')}` + await $`mv ${join(import.meta.url, '..', 'src', 'api', 'reference.md')} ${join(import.meta.url, '..', 'docs', 'reference', 'api-reference.md')}` await $`npm run build` // run docs example generation From 461f9b7f6667e3368bf9c9040d2d5a6ab3a2c9f8 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 21 Mar 2025 12:31:38 -0500 Subject: [PATCH 494/647] SPDX license format (#2667) * Switch to SPDX license format for all non-codegen files * Add test to ensure all committed JS files have SPDX header --- .buildkite/make.mjs | 18 +- .github/workflows/nodejs.yml | 3 + docs/examples/proxy/api/autocomplete.js | 18 +- docs/examples/proxy/api/delete.js | 18 +- docs/examples/proxy/api/index.js | 18 +- docs/examples/proxy/api/search.js | 18 +- docs/examples/proxy/utils/authorize.js | 18 +- .../proxy/utils/prepare-elasticsearch.js | 18 +- index.d.ts | 18 +- index.js | 18 +- package.json | 1 + scripts/check-spdx | 27 + scripts/download-artifacts.js | 18 +- scripts/generate-docs-examples.js | 18 +- scripts/generate.js | 143 ----- scripts/kibana-docker.sh | 8 - scripts/release-canary.js | 9 +- scripts/utils/clone-es.js | 139 ----- scripts/utils/generateApis.js | 553 ------------------ scripts/utils/generateDocs.js | 318 ---------- scripts/utils/generateMain.js | 299 ---------- scripts/utils/generateRequestTypes.js | 191 ------ scripts/utils/index.js | 34 -- scripts/utils/patch.json | 14 - scripts/wait-cluster.sh | 21 - src/client.ts | 18 +- src/helpers.ts | 18 +- src/sniffingTransport.ts | 18 +- test/esm/test-import.mjs | 18 +- test/integration/helper.js | 18 +- test/integration/helpers/bulk.test.js | 18 +- test/integration/helpers/msearch.test.js | 18 +- test/integration/helpers/scroll.test.js | 18 +- test/integration/helpers/search.test.js | 18 +- test/integration/index.js | 24 +- test/integration/reporter.js | 5 + test/integration/test-runner.js | 60 +- test/mock/index.js | 18 +- test/unit/api.test.ts | 18 +- test/unit/client.test.ts | 18 +- test/unit/helpers/bulk.test.ts | 18 +- test/unit/helpers/esql.test.ts | 18 +- test/unit/helpers/msearch.test.ts | 18 +- test/unit/helpers/scroll.test.ts | 18 +- test/unit/helpers/search.test.ts | 19 +- test/utils/MockConnection.ts | 18 +- test/utils/buildCluster.ts | 18 +- test/utils/buildProxy.ts | 18 +- test/utils/buildServer.ts | 18 +- test/utils/index.ts | 20 +- 50 files changed, 138 insertions(+), 2308 deletions(-) create mode 100755 scripts/check-spdx delete mode 100644 scripts/generate.js delete mode 100755 scripts/kibana-docker.sh delete mode 100644 scripts/utils/clone-es.js delete mode 100644 scripts/utils/generateApis.js delete mode 100644 scripts/utils/generateDocs.js delete mode 100644 scripts/utils/generateMain.js delete mode 100644 scripts/utils/generateRequestTypes.js delete mode 100644 scripts/utils/index.js delete mode 100644 scripts/utils/patch.json delete mode 100755 scripts/wait-cluster.sh diff --git a/.buildkite/make.mjs b/.buildkite/make.mjs index b1dc41e18..ddc91d01f 100644 --- a/.buildkite/make.mjs +++ b/.buildkite/make.mjs @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* global $ argv */ diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index bc73e0713..42074cadc 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -83,6 +83,9 @@ jobs: run: | npm run license-checker + - name: SPDX header check + run: npm run license-header + test-bun: name: Test Bun runs-on: ${{ matrix.os }} diff --git a/docs/examples/proxy/api/autocomplete.js b/docs/examples/proxy/api/autocomplete.js index fb18298cf..fdd70e11f 100644 --- a/docs/examples/proxy/api/autocomplete.js +++ b/docs/examples/proxy/api/autocomplete.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/api/delete.js b/docs/examples/proxy/api/delete.js index b76108428..66de08635 100644 --- a/docs/examples/proxy/api/delete.js +++ b/docs/examples/proxy/api/delete.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/api/index.js b/docs/examples/proxy/api/index.js index 901139713..446ba6757 100644 --- a/docs/examples/proxy/api/index.js +++ b/docs/examples/proxy/api/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/api/search.js b/docs/examples/proxy/api/search.js index 8659e08f4..116ef0676 100644 --- a/docs/examples/proxy/api/search.js +++ b/docs/examples/proxy/api/search.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/utils/authorize.js b/docs/examples/proxy/utils/authorize.js index 97bb9c4b5..74370a5ce 100644 --- a/docs/examples/proxy/utils/authorize.js +++ b/docs/examples/proxy/utils/authorize.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/utils/prepare-elasticsearch.js b/docs/examples/proxy/utils/prepare-elasticsearch.js index bf833f0c2..6850aaae4 100644 --- a/docs/examples/proxy/utils/prepare-elasticsearch.js +++ b/docs/examples/proxy/utils/prepare-elasticsearch.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/index.d.ts b/index.d.ts index 89be0131c..12d5eb23e 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import Client from './lib/client' diff --git a/index.js b/index.js index 0bf3da3da..eb12ae5f4 100644 --- a/index.js +++ b/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/package.json b/package.json index c10929e17..f0c943d94 100644 --- a/package.json +++ b/package.json @@ -22,6 +22,7 @@ "lint": "ts-standard src", "lint:fix": "ts-standard --fix src", "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'", + "license-header": "./scripts/check-spdx", "prebuild": "npm run clean-build && npm run lint", "build": "tsc && rm lib/package.json && mv lib/src/* lib/ && rm -rf lib/src", "clean-build": "rimraf ./lib && mkdir lib", diff --git a/scripts/check-spdx b/scripts/check-spdx new file mode 100755 index 000000000..c60d600e8 --- /dev/null +++ b/scripts/check-spdx @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 + +correct='/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */' + +the_exit=0 + +check_file() { + if $(diff <(head -n4 "$1") <(echo "$correct") &>/dev/null); then + echo "Correct: $1" + else + echo "Incorrect: $1" + the_exit=1 + fi +} + +echo "SPDX license header check" +for file in $(git ls-files | grep -E '\.(ts|js|mjs)$'); do + check_file "$file" +done + +exit "$the_exit" diff --git a/scripts/download-artifacts.js b/scripts/download-artifacts.js index 9618838be..d8d5e189e 100644 --- a/scripts/download-artifacts.js +++ b/scripts/download-artifacts.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/scripts/generate-docs-examples.js b/scripts/generate-docs-examples.js index a9c229095..27d873ca2 100644 --- a/scripts/generate-docs-examples.js +++ b/scripts/generate-docs-examples.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ const { join } = require('path') diff --git a/scripts/generate.js b/scripts/generate.js deleted file mode 100644 index ad6fc71cb..000000000 --- a/scripts/generate.js +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { join } = require('path') -const { readdirSync, writeFileSync, readFileSync } = require('fs') -const minimist = require('minimist') -const ora = require('ora') -const rimraf = require('rimraf') -const standard = require('standard') -const downloadArtifacts = require('./download-artifacts') -const { - generate, - genFactory, - generateDocs, - generateRequestTypes -} = require('./utils') - -start(minimist(process.argv.slice(2), { - string: ['version', 'hash'] -})) - -function start (opts) { - if (opts.version == null) { - console.error('Missing version parameter') - process.exit(1) - } - - const packageFolder = join(__dirname, '..', 'api') - const apiOutputFolder = join(packageFolder, 'api') - const mainOutputFile = join(packageFolder, 'index.js') - const docOutputFile = join(__dirname, '..', 'docs', 'reference.asciidoc') - const typeDefFile = join(__dirname, '..', 'index.d.ts') - const requestParamsOutputFile = join(packageFolder, 'requestParams.d.ts') - - let log - downloadArtifacts({ version: opts.version, hash: opts.hash }) - .then(onArtifactsDownloaded) - .catch(err => { - console.log(err) - process.exit(1) - }) - - function onArtifactsDownloaded () { - log = ora('Generating APIs').start() - - log.text = 'Cleaning API folder...' - rimraf.sync(join(apiOutputFolder, '*.js')) - - const allSpec = readdirSync(downloadArtifacts.locations.specFolder) - .filter(file => file !== '_common.json') - .filter(file => !file.includes('deprecated')) - .sort() - .map(file => require(join(downloadArtifacts.locations.specFolder, file))) - - const namespaces = namespacify(readdirSync(downloadArtifacts.locations.specFolder)) - for (const namespace in namespaces) { - if (namespace === '_common') continue - const code = generate(namespace, namespaces[namespace], downloadArtifacts.locations.specFolder, opts.version) - const filePath = join(apiOutputFolder, `${namespace}.js`) - writeFileSync(filePath, code, { encoding: 'utf8' }) - } - - writeFileSync( - requestParamsOutputFile, - generateRequestTypes(opts.version, allSpec), - { encoding: 'utf8' } - ) - - const { fn: factory, types } = genFactory(apiOutputFolder, downloadArtifacts.locations.specFolder, namespaces) - writeFileSync( - mainOutputFile, - factory, - { encoding: 'utf8' } - ) - - const oldTypeDefString = readFileSync(typeDefFile, 'utf8') - const start = oldTypeDefString.indexOf('/* GENERATED */') - const end = oldTypeDefString.indexOf('/* /GENERATED */') - const newTypeDefString = oldTypeDefString.slice(0, start + 15) + '\n' + types + '\n ' + oldTypeDefString.slice(end) - writeFileSync( - typeDefFile, - newTypeDefString, - { encoding: 'utf8' } - ) - - lintFiles(log, () => { - log.text = 'Generating documentation' - writeFileSync( - docOutputFile, - generateDocs(require(join(downloadArtifacts.locations.specFolder, '_common.json')), allSpec), - { encoding: 'utf8' } - ) - - log.succeed('Done!') - }) - } - - function lintFiles (log, cb) { - log.text = 'Linting...' - const files = [join(packageFolder, '*.js'), join(apiOutputFolder, '*.js')] - standard.lintFiles(files, { fix: true }, err => { - if (err) { - return log.fail(err.message) - } - cb() - }) - } - - function namespacify (apis) { - return apis - .map(api => api.slice(0, -5)) - .filter(api => api !== '_common') - .filter(api => !api.includes('deprecated')) - .reduce((acc, val) => { - if (val.includes('.')) { - val = val.split('.') - acc[val[0]] = acc[val[0]] || [] - acc[val[0]].push(val[1]) - } else { - acc[val] = [] - } - return acc - }, {}) - } -} diff --git a/scripts/kibana-docker.sh b/scripts/kibana-docker.sh deleted file mode 100755 index 8c39f9647..000000000 --- a/scripts/kibana-docker.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -exec docker run \ - --rm \ - -e ELASTICSEARCH_URL="http://elasticsearch:9200/" \ - -p 5601:5601 \ - --network=elastic \ - docker.elastic.co/kibana/kibana:7.0.0-beta1 diff --git a/scripts/release-canary.js b/scripts/release-canary.js index 3afcf3983..a4bd8780f 100644 --- a/scripts/release-canary.js +++ b/scripts/release-canary.js @@ -1,3 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + 'use strict' /** @@ -130,9 +135,9 @@ release( 'dry-run', // help text - 'help', + 'help' ], - alias: { help: 'h' }, + alias: { help: 'h' } }) ) .catch(err => { diff --git a/scripts/utils/clone-es.js b/scripts/utils/clone-es.js deleted file mode 100644 index 09f078918..000000000 --- a/scripts/utils/clone-es.js +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { accessSync, mkdirSync } = require('fs') -const { join } = require('path') -const Git = require('simple-git') - -const esRepo = 'https://github.com/elastic/elasticsearch.git' -const esFolder = join(__dirname, '..', '..', 'elasticsearch') -const apiFolder = join(esFolder, 'rest-api-spec', 'src', 'main', 'resources', 'rest-api-spec', 'api') -const xPackFolder = join(esFolder, 'x-pack', 'plugin', 'src', 'test', 'resources', 'rest-api-spec', 'api') - -function cloneAndCheckout (opts, callback) { - const { log, tag, branch } = opts - withTag(tag, callback) - - /** - * Sets the elasticsearch repository to the given tag. - * If the repository is not present in `esFolder` it will - * clone the repository and the checkout the tag. - * If the repository is already present but it cannot checkout to - * the given tag, it will perform a pull and then try again. - * @param {string} tag - * @param {function} callback - */ - function withTag (tag, callback) { - let fresh = false - let retry = 0 - - if (!pathExist(esFolder)) { - if (!createFolder(esFolder)) { - log.fail('Failed folder creation') - return - } - fresh = true - } - - const git = Git(esFolder) - - if (fresh) { - clone(checkout) - } else if (opts.branch) { - checkout(true) - } else { - checkout() - } - - function checkout (alsoPull = false) { - if (branch) { - log.text = `Checking out branch '${branch}'` - } else { - log.text = `Checking out tag '${tag}'` - } - git.checkout(branch || tag, err => { - if (err) { - if (retry++ > 0) { - callback(new Error(`Cannot checkout tag '${tag}'`), { apiFolder, xPackFolder }) - return - } - return pull(checkout) - } - if (alsoPull) { - return pull(checkout) - } - callback(null, { apiFolder, xPackFolder }) - }) - } - - function pull (cb) { - log.text = 'Pulling elasticsearch repository...' - git.pull(err => { - if (err) { - callback(err, { apiFolder, xPackFolder }) - return - } - cb() - }) - } - - function clone (cb) { - log.text = 'Cloning elasticsearch repository...' - git.clone(esRepo, esFolder, err => { - if (err) { - callback(err, { apiFolder, xPackFolder }) - return - } - cb() - }) - } - } - - /** - * Checks if the given path exists - * @param {string} path - * @returns {boolean} true if exists, false if not - */ - function pathExist (path) { - try { - accessSync(path) - return true - } catch (err) { - return false - } - } - - /** - * Creates the given folder - * @param {string} name - * @returns {boolean} true on success, false on failure - */ - function createFolder (name) { - try { - mkdirSync(name) - return true - } catch (err) { - return false - } - } -} - -module.exports = cloneAndCheckout diff --git a/scripts/utils/generateApis.js b/scripts/utils/generateApis.js deleted file mode 100644 index cb99b3701..000000000 --- a/scripts/utils/generateApis.js +++ /dev/null @@ -1,553 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint camelcase: 0 */ - -'use strict' - -const { join } = require('path') -const dedent = require('dedent') -const allowedMethods = { - noBody: ['GET', 'HEAD', 'DELETE'], - body: ['POST', 'PUT', 'DELETE'] -} - -// if a parameter is depracted in a minor release -// we should be able to support it until the next major -const deprecatedParameters = require('./patch.json') - -// list of apis that does not need any kind of validation -// because of how the url is built or the `type` handling in ES7 -const noPathValidation = [ - 'create', - 'exists', - 'explain', - 'get', - 'get_source', - 'index', - 'indices.get_alias', - 'indices.exists_alias', - 'indices.get_field_mapping', - 'indices.get_mapping', - 'indices.get_settings', - 'indices.put_mapping', - 'indices.stats', - 'delete', - 'nodes.info', - 'nodes.stats', - 'nodes.usage', - 'tasks.cancel', - 'termvectors', - 'update' -] - -function generateNamespace (namespace, nested, specFolder, version) { - const common = require(join(specFolder, '_common.json')) - let code = dedent` - /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - 'use strict' - - /* eslint camelcase: 0 */ - /* eslint no-unused-vars: 0 */ - - const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -` - if (nested.length > 0) { - let getters = '' - for (const n of nested) { - if (n.includes('_')) { - const nameSnaked = n - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - getters += `${n}: { get () { return this.${nameSnaked} } },\n` - } - } - const api = generateMultiApi(version, namespace, nested, common, specFolder) - if (getters.length > 0) { - getters = `Object.defineProperties(${api.namespace}Api.prototype, {\n${getters}})` - } - - code += ` - const acceptedQuerystring = ${JSON.stringify(api.acceptedQuerystring)} - const snakeCase = ${JSON.stringify(api.snakeCase)} - - function ${api.namespace}Api (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError - } - - ${api.code} - - ${getters} - - module.exports = ${api.namespace}Api - ` - } else { - const spec = require(join(specFolder, `${namespace}.json`)) - const api = generateSingleApi(version, spec, common) - code += ` - const acceptedQuerystring = ${JSON.stringify(api.acceptedQuerystring)} - const snakeCase = ${JSON.stringify(api.snakeCase)} - - ${api.code} - - module.exports = ${api.name}Api - ` - } - return code -} - -function generateMultiApi (version, namespace, nested, common, specFolder) { - const namespaceSnaked = namespace - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - let code = '' - const snakeCase = {} - const acceptedQuerystring = [] - for (const n of nested) { - const nameSnaked = n - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - const spec = require(join(specFolder, `${namespace}.${n}.json`)) - const api = generateSingleApi(version, spec, common) - code += `${Uppercase(namespaceSnaked)}Api.prototype.${nameSnaked} = ${api.code}\n\n` - Object.assign(snakeCase, api.snakeCase) - for (const q of api.acceptedQuerystring) { - if (!acceptedQuerystring.includes(q)) { - acceptedQuerystring.push(q) - } - } - } - return { code, snakeCase, acceptedQuerystring, namespace: Uppercase(namespaceSnaked) } -} - -function generateSingleApi (version, spec, common) { - const release = version.charAt(0) - const api = Object.keys(spec)[0] - const name = api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - const { paths } = spec[api].url - const { params } = spec[api] - const acceptedQuerystring = [] - const required = [] - - const methods = paths.reduce((acc, val) => { - for (const method of val.methods) { - if (!acc.includes(method)) acc.push(method) - } - return acc - }, []) - const parts = paths.reduce((acc, val) => { - if (!val.parts) return acc - for (const part of Object.keys(val.parts)) { - if (!acc.includes(part)) acc.push(part) - } - return acc - }, []) - - // get the required parts from the url - // if the url has at least one static path, - // then there are not required parts of the url - let allParts = [] - for (const path of paths) { - if (path.parts) { - allParts.push(Object.keys(path.parts)) - } else { - allParts = [] - break - } - } - if (allParts.length > 0) { - intersect(...allParts).forEach(r => required.push(r)) - } - - for (const key in params) { - if (params[key].required) { - required.push(key) - } - - acceptedQuerystring.push(key) - if (deprecatedParameters[release] && deprecatedParameters[release][key]) { - acceptedQuerystring.push(deprecatedParameters[release][key]) - } - } - - for (const key in spec[api]) { - const k = spec[api][key] - if (k && k.required) { - required.push(key) - } - } - if (common && common.params) { - for (const key in common.params) { - acceptedQuerystring.push(key) - } - } - - const code = ` - function ${name}Api (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - ${genRequiredChecks()} - - ${genUrlValidation(paths, api)} - - let { ${genQueryDenylist(false)}, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - ${buildPath()} - - // build request object - const request = { - method, - path, - ${genBody(api, methods, spec[api].body, spec)} - querystring - } - - return this.transport.request(request, options, callback) - } - `.trim() // always call trim to avoid newlines - - return { - name, - code, - acceptedQuerystring: acceptedQuerystring, - snakeCase: genSnakeCaseMap(), - documentation: generateDocumentation(spec[api], api) - } - - function genRequiredChecks () { - const code = required - .map(_genRequiredCheck) - .concat(_noBody()) - .filter(Boolean) - - if (code.length) { - code.unshift('// check required parameters') - } - - return code.join('\n ') - - function _genRequiredCheck (param) { - const camelCased = param[0] === '_' - ? '_' + param.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : param.replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (param === camelCased) { - const check = ` - if (params['${param}'] == null) { - const err = new this[kConfigurationError]('Missing required parameter: ${param}') - return handleError(err, callback) - } - ` - return check.trim() - } else { - const check = ` - if (params['${param}'] == null && params['${camelCased}'] == null) { - const err = new this[kConfigurationError]('Missing required parameter: ${param} or ${camelCased}') - return handleError(err, callback) - } - ` - return check.trim() - } - } - - function _noBody () { - const check = ` - if (params.body != null) { - const err = new this[kConfigurationError]('This API does not require a body') - return handleError(err, callback) - } - ` - return spec[api].body === null ? check.trim() : '' - } - } - - function genSnakeCaseMap () { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - return acceptedQuerystring.reduce((acc, val, index) => { - if (toCamelCase(val) !== val) { - acc[toCamelCase(val)] = val - } - return acc - }, {}) - } - - function genQueryDenylist (addQuotes = true) { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - const denylist = ['method', 'body'] - parts.forEach(p => { - const camelStr = toCamelCase(p) - if (camelStr !== p) denylist.push(`${camelStr}`) - denylist.push(`${p}`) - }) - return addQuotes ? denylist.map(q => `'${q}'`) : denylist - } - - function buildPath () { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - const genAccessKey = str => { - const camelStr = toCamelCase(str) - return camelStr === str - ? str - : `${str} || ${camelStr}` - } - - const genCheck = path => { - return path - .split('/') - .filter(Boolean) - .map(p => p.startsWith('{') ? `(${genAccessKey(p.slice(1, -1))}) != null` : false) - .filter(Boolean) - .join(' && ') - } - - const genPath = path => { - path = path - .split('/') - .filter(Boolean) - .map(p => p.startsWith('{') ? `encodeURIComponent(${genAccessKey(p.slice(1, -1))})` : `'${p}'`) - .join(' + \'/\' + ') - return path.length > 0 ? ('\'/\' + ' + path) : '\'/\'' - } - - let hasStaticPath = false - let sortedPaths = paths - // some legacy API have mutliple statis paths - // this filter removes them - .filter(p => { - if (p.path.includes('{')) return true - if (hasStaticPath === false && p.deprecated == null) { - hasStaticPath = true - return true - } - return false - }) - // sort by number of parameters (desc) - .sort((a, b) => Object.keys(b.parts || {}).length - Object.keys(a.parts || {}).length) - - const allDeprecated = paths.filter(path => path.deprecated != null) - if (allDeprecated.length === paths.length) sortedPaths = [paths[0]] - - let code = '' - for (let i = 0; i < sortedPaths.length; i++) { - const { path, methods } = sortedPaths[i] - if (sortedPaths.length === 1) { - code += `if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - ` - } else if (i === 0) { - code += `if (${genCheck(path)}) { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } else if (i === sortedPaths.length - 1) { - code += ` else { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } else { - code += ` else if (${genCheck(path)}) { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } - } - - return code - } -} - -function generatePickMethod (methods) { - if (methods.length === 1) { - return `'${methods[0]}'` - } - const bodyMethod = getBodyMethod(methods) - const noBodyMethod = getNoBodyMethod(methods) - if (bodyMethod && noBodyMethod) { - return `body == null ? '${noBodyMethod}' : '${bodyMethod}'` - } else if (bodyMethod) { - return `'${bodyMethod}'` - } else { - return `'${noBodyMethod}'` - } -} - -function genBody (api, methods, body, spec) { - const bodyMethod = getBodyMethod(methods) - const { content_type } = spec[api].headers - if (content_type && content_type.includes('application/x-ndjson')) { - return 'bulkBody: body,' - } - if (body === null && bodyMethod) { - return 'body: \'\',' - } else if (bodyMethod) { - return 'body: body || \'\',' - } else { - return 'body: null,' - } -} - -function getBodyMethod (methods) { - const m = methods.filter(m => ~allowedMethods.body.indexOf(m)) - if (m.length) return m[0] - return null -} - -function getNoBodyMethod (methods) { - const m = methods.filter(m => ~allowedMethods.noBody.indexOf(m)) - if (m.length) return m[0] - return null -} - -function genUrlValidation (paths, api) { - // this api does not need url validation - if (!needsPathValidation(api)) return '' - // gets only the dynamic components of the url in an array - // then we reverse it. A parameters always require what is - // at its right in the array. - const chunks = paths - .sort((a, b) => Object.keys(a.parts || {}).length > Object.keys(b.parts || {}).length ? -1 : 1) - .slice(0, 1) - .reduce((acc, val) => val.path, '') - // .reduce((a, b) => a.path.split('/').length > b.path.split('/').length ? a.path : b.path) - .split('/') - .filter(s => s.startsWith('{')) - .map(s => s.slice(1, -1)) - .reverse() - - let code = '' - - const len = chunks.length - chunks.forEach((chunk, index) => { - if (index === len - 1) return - const params = [] - let camelCased = chunk[0] === '_' - ? '_' + chunk.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : chunk.replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (chunk === camelCased) { - code += `${index ? '} else ' : ''}if (params['${chunk}'] != null && (` - } else { - code += `${index ? '} else ' : ''}if ((params['${chunk}'] != null || params['${camelCased}'] != null) && (` - } - for (let i = index + 1; i < len; i++) { - params.push(chunks[i]) - // url parts can be declared in camelCase fashion - camelCased = chunks[i][0] === '_' - ? '_' + chunks[i].slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : chunks[i].replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (chunks[i] === camelCased) { - code += `params['${chunks[i]}'] == null${i === len - 1 ? '' : ' || '}` - } else { - code += `(params['${chunks[i]}'] == null && params['${camelCased}'] == null)${i === len - 1 ? '' : ' || '}` - } - } - code += `)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: ${params.join(', ')}') - return handleError(err, callback) - ` - }) - - if (chunks.length > 1) { - code += '\n}' - } - - if (code.length) { - code = '// check required url components\n' + code - } - - return code.trim() -} - -function generateDocumentation ({ documentation }, op) { - // we use `replace(/\u00A0/g, ' ')` to remove no breaking spaces - // because some parts of the description fields are using it - - if (documentation == null) return '' - - let doc = '/**\n' - doc += ` * Perform a ${op} request\n` - if (documentation.description) { - doc += ` * ${documentation.description.replace(/\u00A0/g, ' ')}\n` - } - if (documentation.url) { - doc += ` * ${documentation.url}\n` - } - doc += ' */' - - return doc -} - -function needsPathValidation (api) { - return noPathValidation.indexOf(api) === -1 -} - -function intersect (first, ...rest) { - return rest.reduce((accum, current) => { - return accum.filter(x => current.indexOf(x) !== -1) - }, first) -} - -function Uppercase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -module.exports = generateNamespace diff --git a/scripts/utils/generateDocs.js b/scripts/utils/generateDocs.js deleted file mode 100644 index 9f681ab49..000000000 --- a/scripts/utils/generateDocs.js +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { readdirSync } = require('fs') -const { join } = require('path') -const dedent = require('dedent') - -const codeExamples = readdirSync(join(__dirname, '..', '..', 'docs', 'examples')) - .map(file => file.slice(0, -9)) - .filter(api => api !== 'index') - -function generateDocs (common, spec) { - let doc = dedent` - [[api-reference]] - - //////// - - - - =========================================================================================================================== - || || - || || - || || - || ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || - || ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || - || ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || - || ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || - || ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || - || ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || - || || - || || - || This file is autogenerated, DO NOT send pull requests that changes this file directly. || - || You should update the script that does the generation, which can be found in '/scripts/utils/generateDocs.js'. || - || || - || You can run the script with the following command: || - || node scripts/generate --branch || - || or || - || node scripts/generate --tag || - || || - || || - || || - =========================================================================================================================== - - - - //////// - - == API Reference - - This document contains the entire list of the Elasticsearch API supported by the client, both OSS and commercial. The client is entirely licensed under Apache 2.0. - - Elasticsearch exposes an HTTP layer to communicate with, and the client is a library that will help you do this. Because of this reason, you will see HTTP related parameters, such as ${'`'}body${'`'} or ${'`'}headers${'`'}. - - Every API can accept two objects, the first contains all the parameters that will be sent to Elasticsearch, while the second includes the request specific parameters, such as timeouts, headers, and so on. - In the first object, every parameter but the body will be sent via querystring or url parameter, depending on the API, and every unrecognized parameter will be sent as querystring. - - [source,js] - ---- - // promise API - const result = await client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } - }, { - ignore: [404], - maxRetries: 3 - }) - - // callback API - client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } - }, { - ignore: [404], - maxRetries: 3 - }, (err, result) => { - if (err) console.log(err) - }) - ---- - - In this document, you will find the reference of every parameter accepted by the querystring or the url. If you also need to send the body, you can find the documentation of its format in the reference link that is present along with every endpoint. - - \n\n` - doc += commonParameters(common) - spec.forEach(s => { - doc += '\n' + generateApiDoc(s) - }) - return doc -} - -function commonParameters (spec) { - let doc = dedent` - [discrete] - === Common parameters - Parameters that are accepted by all API endpoints. - - link:{ref}/common-options.html[Documentation] - [cols=2*] - |===\n` - Object.keys(spec.params).forEach(key => { - const name = isSnakeCased(key) && key !== camelify(key) - ? '`' + key + '` or `' + camelify(key) + '`' - : '`' + key + '`' - - doc += dedent` - |${name} - |${'`' + spec.params[key].type + '`'} - ${spec.params[key].description}` - if (spec.params[key].default) { - doc += ` + - _Default:_ ${'`' + spec.params[key].default + '`'}` - } - doc += '\n\n' - }) - - doc += dedent` - |=== - ` - return doc -} - -function generateApiDoc (spec) { - const name = Object.keys(spec)[0] - const documentationUrl = spec[name].documentation && spec[name].documentation.url - ? fixLink(name, spec[name].documentation.url) - : '' - const params = [] - // url params - const urlParts = spec[name].url.paths.reduce((acc, path) => { - if (!path.parts) return acc - for (const part in path.parts) { - if (acc[part] != null) continue - acc[part] = path.parts[part] - } - return acc - }, {}) - if (urlParts) { - Object.keys(urlParts).forEach(param => { - params.push({ - name: param, - type: getType(urlParts[param].type, urlParts[param].options), - description: urlParts[param].description, - default: urlParts[param].default, - deprecated: !!urlParts[param].deprecated - }) - }) - } - - // query params - const urlParams = spec[name].params - if (urlParams) { - Object.keys(urlParams).forEach(param => { - const duplicate = params.find(ele => ele.name === param) - if (duplicate) return - params.push({ - name: param, - type: getType(urlParams[param].type, urlParams[param].options), - description: urlParams[param].description, - default: urlParams[param].default, - deprecated: !!urlParams[param].deprecated - }) - }) - } - - // body params - const body = spec[name].body - if (body) { - params.push({ - name: 'body', - type: 'object', - description: body.description, - default: body.default, - deprecated: !!body.deprecated - }) - } - - const codeParameters = params - .reduce((acc, val) => { - const code = `${val.name}: ${val.type},` - acc += acc === '' - ? code - : '\n ' + code - - return acc - }, '') - // remove last comma - .slice(0, -1) - - const stability = spec[name].stability === 'stable' - ? '' - : `*Stability:* ${spec[name].stability}` - - let doc = dedent` - [discrete] - === ${camelify(name)} - ${stability} - [source,ts] - ---- - client.${camelify(name)}(${codeParameters.length > 0 ? `{\n ${codeParameters}\n}` : ''}) - ----\n` - if (documentationUrl) { - doc += `link:${documentationUrl}[Documentation] +\n` - } - if (codeExamples.includes(name)) { - doc += `{jsclient}/${name.replace(/\./g, '_')}_examples.html[Code Example] +\n` - } - - if (params.length !== 0) { - doc += dedent`[cols=2*] - |===\n` - doc += params.reduce((acc, val) => { - const name = isSnakeCased(val.name) && val.name !== camelify(val.name) - ? '`' + val.name + '` or `' + camelify(val.name) + '`' - : '`' + val.name + '`' - acc += dedent` - |${name} - |${'`' + val.type.replace(/\|/g, '\\|') + '`'} - ${val.description}` - if (val.default) { - acc += ` +\n_Default:_ ${'`' + val.default + '`'}` - } - if (val.deprecated) { - acc += ' +\n\nWARNING: This parameter has been deprecated.' - } - return acc + '\n\n' - }, '') - - doc += dedent` - |=== - ` - } - doc += '\n' - return doc -} - -const LINK_OVERRIDES = { - 'license.delete': '{ref}/delete-license.html', - 'license.get': '{ref}/get-license.html', - 'license.get_basic_status': '{ref}/get-basic-status.html', - 'license.get_trial_status': '{ref}/get-trial-status.html', - 'license.post': '{ref}/update-license.html', - 'license.post_start_basic': '{ref}/start-basic.html', - 'license.post_start_trial': '{ref}/start-trial.html', - 'migration.deprecations': '{ref}/migration-api-deprecation.html', - 'monitoring.bulk': '{ref}/monitor-elasticsearch-cluster.html', - 'ingest.delete_pipeline': '{ref}/delete-pipeline-api.html', - 'ingest.get_pipeline': '{ref}/get-pipeline-api.html', - 'ingest.put_pipeline': '{ref}/put-pipeline-api.html', - 'ingest.simulate': '{ref}/simulate-pipeline-api.html', - 'ingest.processor_grok': '{ref}/grok-processor.html#grok-processor-rest-get' -} -// Fixes bad urls in the JSON spec -function fixLink (name, str) { - /* In 6.x some API start with `xpack.` when in master they do not. We - * can safely ignore that for link generation. */ - name = name.replace(/^xpack\./, '') - const override = LINK_OVERRIDES[name] - if (override) return override - if (!str) return '' - /* Replace references to the guide with the attribute {ref} because - * the json files in the Elasticsearch repo are a bit of a mess. */ - str = str.replace(/^.+guide\/en\/elasticsearch\/reference\/[^/]+\/([^./]*\.html(?:#.+)?)$/, '{ref}/$1') - str = str.replace(/frozen\.html/, 'freeze-index-api.html') - str = str.replace(/ml-file-structure\.html/, 'ml-find-file-structure.html') - str = str.replace(/security-api-get-user-privileges\.html/, 'security-api-get-privileges.html') - - return str -} - -function getType (type, options) { - switch (type) { - case 'list': - return 'string | string[]' - case 'date': - case 'time': - case 'timeout': - return 'string' - case 'enum': - return options.map(k => `'${k}'`).join(' | ') - case 'int': - case 'double': - case 'long': - return 'number' - default: - return type - } -} - -function camelify (str) { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) -} - -function isSnakeCased (str) { - return !!~str.indexOf('_') -} - -module.exports = generateDocs diff --git a/scripts/utils/generateMain.js b/scripts/utils/generateMain.js deleted file mode 100644 index 02a4873ed..000000000 --- a/scripts/utils/generateMain.js +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint-disable no-template-curly-in-string */ -/* eslint camelcase: 0 */ - -'use strict' - -const { readdirSync } = require('fs') -const { join } = require('path') -const dedent = require('dedent') -const deepmerge = require('deepmerge') - -function genFactory (folder, specFolder, namespaces) { - // get all the API files - // const apiFiles = readdirSync(folder) - const apiFiles = readdirSync(specFolder) - .filter(file => file !== '_common.json') - .filter(file => !file.includes('deprecated')) - .sort() - const types = apiFiles - .map(file => { - const name = file - .slice(0, -5) - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - return file - .slice(0, -5) // remove `.json` extension - .split('.') - .reverse() - .reduce((acc, val) => { - const spec = readSpec(specFolder, file.slice(0, -5)) - const isHead = isHeadMethod(spec, file.slice(0, -5)) - const body = hasBody(spec, file.slice(0, -5)) - const methods = acc === null ? buildMethodDefinition({ kibana: false }, val, name, body, isHead, spec) : null - const obj = {} - if (methods) { - for (const m of methods) { - obj[m.key] = m.val - } - } else { - obj[val] = acc - if (isSnakeCased(val)) { - obj[camelify(val)] = acc - } - } - return obj - }, null) - }) - .reduce((acc, val) => deepmerge(acc, val), {}) - - const kibanaTypes = apiFiles - .map(file => { - const name = file - .slice(0, -5) - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - return file - .slice(0, -5) // remove `.json` extension - .split('.') - .reverse() - .reduce((acc, val) => { - const spec = readSpec(specFolder, file.slice(0, -5)) - const isHead = isHeadMethod(spec, file.slice(0, -5)) - const body = hasBody(spec, file.slice(0, -5)) - const methods = acc === null ? buildMethodDefinition({ kibana: true }, val, name, body, isHead, spec) : null - const obj = {} - if (methods) { - for (const m of methods) { - obj[m.key] = m.val - } - } else { - obj[camelify(val)] = acc - } - return obj - }, null) - }) - .reduce((acc, val) => deepmerge(acc, val), {}) - - // serialize the type object - const typesStr = Object.keys(types) - .map(key => { - const line = ` ${key}: ${JSON.stringify(types[key], null, 4)}` - if (line.slice(-1) === '}') { - return line.slice(0, -1) + ' }' - } - return line - }) - .join('\n') - // remove useless quotes and commas - .replace(/"/g, '') - .replace(/,$/gm, '') - const kibanaTypesStr = Object.keys(kibanaTypes) - .map(key => { - const line = ` ${key}: ${JSON.stringify(kibanaTypes[key], null, 4)}` - if (line.slice(-1) === '}') { - return line.slice(0, -1) + ' }' - } - return line - }) - .join('\n') - // remove useless quotes and commas - .replace(/"/g, '') - .replace(/,$/gm, '') - - let apisStr = '' - const getters = [] - for (const namespace in namespaces) { - if (namespaces[namespace].length > 0) { - getters.push(`${camelify(namespace)}: { - get () { - if (this[k${toPascalCase(camelify(namespace))}] === null) { - this[k${toPascalCase(camelify(namespace))}] = new ${toPascalCase(camelify(namespace))}Api(this.transport, this[kConfigurationError]) - } - return this[k${toPascalCase(camelify(namespace))}] - } - },\n`) - if (namespace.includes('_')) { - getters.push(`${namespace}: { get () { return this.${camelify(namespace)} } },\n`) - } - } else { - apisStr += `ESAPI.prototype.${camelify(namespace)} = ${camelify(namespace)}Api\n` - if (namespace.includes('_')) { - getters.push(`${namespace}: { get () { return this.${camelify(namespace)} } },\n`) - } - } - } - - apisStr += '\nObject.defineProperties(ESAPI.prototype, {\n' - for (const getter of getters) { - apisStr += getter - } - apisStr += '})' - - let modules = '' - let symbols = '' - let symbolsInstance = '' - for (const namespace in namespaces) { - if (namespaces[namespace].length > 0) { - modules += `const ${toPascalCase(camelify(namespace))}Api = require('./api/${namespace}')\n` - symbols += `const k${toPascalCase(camelify(namespace))} = Symbol('${toPascalCase(camelify(namespace))}')\n` - symbolsInstance += `this[k${toPascalCase(camelify(namespace))}] = null\n` - } else { - modules += `const ${camelify(namespace)}Api = require('./api/${namespace}')\n` - } - } - - const fn = dedent` - /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - 'use strict' - - ${modules} - - const { kConfigurationError } = require('./utils') - ${symbols} - - function ESAPI (opts) { - this[kConfigurationError] = opts.ConfigurationError - ${symbolsInstance} - } - - ${apisStr} - - module.exports = ESAPI - ` - - // new line at the end of file - return { fn: fn + '\n', types: typesStr, kibanaTypes: kibanaTypesStr } -} - -// from snake_case to camelCase -function camelify (str) { - return str.replace(/_([a-z])/g, k => k[1].toUpperCase()) -} - -function isSnakeCased (str) { - return !!~str.indexOf('_') -} - -function toPascalCase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -function buildMethodDefinition (opts, api, name, hasBody, isHead, spec) { - const Name = toPascalCase(name) - const { content_type } = spec[Object.keys(spec)[0]].headers - const bodyType = content_type && content_type.includes('application/x-ndjson') ? 'RequestNDBody' : 'RequestBody' - const responseType = isHead ? 'boolean' : 'Record' - const defaultBodyType = content_type && content_type.includes('application/x-ndjson') ? 'Record[]' : 'Record' - - if (opts.kibana) { - if (hasBody) { - return [ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' } - ] - } else { - return [ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' } - ] - } - } - - if (hasBody) { - let methods = [ - { key: `${api}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${api}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ] - if (isSnakeCased(api)) { - methods = methods.concat([ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${camelify(api)}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ]) - } - return methods - } else { - let methods = [ - { key: `${api}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${api}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ] - if (isSnakeCased(api)) { - methods = methods.concat([ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${camelify(api)}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ]) - } - return methods - } -} - -function hasBody (spec, api) { - return !!spec[api].body -} - -function isHeadMethod (spec, api) { - const { paths } = spec[api].url - const methods = [] - for (const path of paths) { - for (const method of path.methods) { - if (!methods.includes(method)) { - methods.push(method) - } - } - } - return methods.length === 1 && methods[0] === 'HEAD' -} - -function readSpec (specFolder, file) { - try { - return require(join(specFolder, file)) - } catch (err) { - throw new Error(`Cannot read spec file ${file}`) - } -} - -module.exports = genFactory diff --git a/scripts/utils/generateRequestTypes.js b/scripts/utils/generateRequestTypes.js deleted file mode 100644 index 854e9ebcf..000000000 --- a/scripts/utils/generateRequestTypes.js +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint camelcase: 0 */ - -'use strict' - -const deprecatedParameters = require('./patch.json') - -function generate (version, api) { - const release = version.charAt(0) - let types = `/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { RequestBody, RequestNDBody } from '../lib/Transport' - -export interface Generic { - method?: string; - filter_path?: string | string[]; - pretty?: boolean; - human?: boolean; - error_trace?: boolean; - source?: string; -} -` - - api.forEach(generateRequestType) - return types - - function generateRequestType (spec) { - const api = Object.keys(spec)[0] - const name = api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - const { paths = {} } = spec[api].url - const { body, params = {} } = spec[api] - - // get the required parts from the url - // if the url has at least one static path, - // then there are not required parts of the url - let allParts = [] - let requiredParts = [] - for (const path of paths) { - if (path.parts) { - allParts.push(Object.keys(path.parts)) - } else { - allParts = [] - break - } - } - if (allParts.length > 0) { - requiredParts = intersect(...allParts) - } - - const parts = paths.reduce((acc, path) => { - if (!path.parts) return acc - for (const part in path.parts) { - if (acc[part] != null) continue - acc[part] = { key: part, value: path.parts[part], required: requiredParts.includes(part) } - } - return acc - }, {}) - const deprecatedParametersToAdd = [] - const paramsArr = Object.keys(params) - .filter(k => !Object.keys(parts).includes(k)) - .map(k => { - if (deprecatedParameters[release] && deprecatedParameters[release][k]) { - deprecatedParametersToAdd.push({ - key: deprecatedParameters[release][k], - value: params[k], - required: params[k].required - }) - } - return { key: k, value: params[k], required: params[k].required } - }) - - const partsArr = Object.keys(parts).map(k => parts[k]) - deprecatedParametersToAdd.forEach(k => partsArr.push(k)) - - const genLine = e => { - const optional = e.required ? '' : '?' - return `${e.key}${optional}: ${getType(e.value.type, e.value.options)};` - } - - const { content_type } = spec[api].headers - const bodyGeneric = content_type && content_type.includes('application/x-ndjson') ? 'RequestNDBody' : 'RequestBody' - - const code = ` -export interface ${toPascalCase(name)}${body ? `` : ''} extends Generic { - ${partsArr.map(genLine).join('\n ')} - ${paramsArr.map(genLine).join('\n ')} - ${body ? `body${body.required ? '' : '?'}: T;` : ''} -} -` - - types += '\n' - // remove empty lines - types += code.replace(/^\s*\n/gm, '') - } - - function getType (type, options) { - switch (type) { - case 'list': - return 'string | string[]' - case 'date': - case 'time': - case 'timeout': - return 'string' - case 'enum': { - // the following code changes 'true' | 'false' to boolean - let foundTrue = false - let foundFalse = false - options = options - .map(k => { - if (k === 'true') { - foundTrue = true - return true - } else if (k === 'false') { - foundFalse = true - return false - } else { - return `'${k}'` - } - }) - .filter(k => { - if (foundTrue && foundFalse && (k === true || k === false)) { - return false - } - return true - }) - if (foundTrue && foundFalse) { - options.push('boolean') - } - return options.join(' | ') - } - case 'int': - case 'double': - case 'long': - return 'number' - case 'boolean|long': - return 'boolean | number' - default: - return type - } - } -} - -function intersect (first, ...rest) { - return rest.reduce((accum, current) => { - return accum.filter(x => current.indexOf(x) !== -1) - }, first) -} - -function toPascalCase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -module.exports = generate diff --git a/scripts/utils/index.js b/scripts/utils/index.js deleted file mode 100644 index 05d955b2e..000000000 --- a/scripts/utils/index.js +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const generate = require('./generateApis') -const cloneAndCheckout = require('./clone-es') -const genFactory = require('./generateMain') -const generateDocs = require('./generateDocs') -const generateRequestTypes = require('./generateRequestTypes') - -module.exports = { - generate, - cloneAndCheckout, - genFactory, - generateDocs, - generateRequestTypes -} diff --git a/scripts/utils/patch.json b/scripts/utils/patch.json deleted file mode 100644 index 3023a6271..000000000 --- a/scripts/utils/patch.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "6": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - }, - "7": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - }, - "8": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - } -} diff --git a/scripts/wait-cluster.sh b/scripts/wait-cluster.sh deleted file mode 100755 index 4cacaa4b6..000000000 --- a/scripts/wait-cluster.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -TEST_ES_SERVER=${TEST_ES_SERVER:-"http://localhost:9200/"} - -attempt_counter=0 -max_attempts=5 -url="${TEST_ES_SERVER}/_cluster/health?wait_for_status=green&timeout=50s" - -echo "Waiting for Elasticsearch..." -while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' --max-time 55 "$url")" != "200" ]]; do - if [ ${attempt_counter} -eq ${max_attempts} ];then - echo "\nCouldn't connect to Elasticsearch" - exit 1 - fi - - printf '.' - attempt_counter=$(($attempt_counter+1)) - sleep 5 -done - -echo "\nReady" diff --git a/src/client.ts b/src/client.ts index 7f9f8fabe..43f78a6e5 100644 --- a/src/client.ts +++ b/src/client.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License") you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import process from 'node:process' diff --git a/src/helpers.ts b/src/helpers.ts index 0043a8ab5..89f804b89 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License") you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable @typescript-eslint/naming-convention */ diff --git a/src/sniffingTransport.ts b/src/sniffingTransport.ts index 7c9cec43c..389c54c3d 100644 --- a/src/sniffingTransport.ts +++ b/src/sniffingTransport.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License") you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import assert from 'node:assert' diff --git a/test/esm/test-import.mjs b/test/esm/test-import.mjs index f7a6f09e6..693ac3e18 100644 --- a/test/esm/test-import.mjs +++ b/test/esm/test-import.mjs @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { Client } from '@elastic/elasticsearch' diff --git a/test/integration/helper.js b/test/integration/helper.js index fe4e0b422..bfe2535fa 100644 --- a/test/integration/helper.js +++ b/test/integration/helper.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/bulk.test.js b/test/integration/helpers/bulk.test.js index a1b2be118..bffad53b1 100644 --- a/test/integration/helpers/bulk.test.js +++ b/test/integration/helpers/bulk.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/msearch.test.js b/test/integration/helpers/msearch.test.js index fb317b0f7..479ddfec7 100644 --- a/test/integration/helpers/msearch.test.js +++ b/test/integration/helpers/msearch.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/scroll.test.js b/test/integration/helpers/scroll.test.js index 36f3b8528..6d5148a9e 100644 --- a/test/integration/helpers/scroll.test.js +++ b/test/integration/helpers/scroll.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/search.test.js b/test/integration/helpers/search.test.js index 7a6946a9f..2f0512177 100644 --- a/test/integration/helpers/search.test.js +++ b/test/integration/helpers/search.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/index.js b/test/integration/index.js index b07ddd2d7..f226ee893 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' @@ -44,7 +30,7 @@ const MAX_TEST_TIME = 1000 * 3 const options = minimist(process.argv.slice(2), { boolean: ['bail'], - string: ['suite', 'test'], + string: ['suite', 'test'] }) const freeSkips = { @@ -90,7 +76,7 @@ const freeSkips = { // the yaml runner assumes that null means "does not exists", // while null is a valid json value, so the check will fail 'search/320_disallow_queries.yml': ['Test disallow expensive queries'], - 'free/tsdb/90_unsupported_operations.yml': ['noop update'], + 'free/tsdb/90_unsupported_operations.yml': ['noop update'] } const platinumDenyList = { @@ -186,7 +172,7 @@ const platinumDenyList = { 'platinum/ml/get_datafeed_stats.yml': ['*'], // start should be a string in the yaml test - 'platinum/ml/start_stop_datafeed.yml': ['*'], + 'platinum/ml/start_stop_datafeed.yml': ['*'] } function runner (opts = {}) { diff --git a/test/integration/reporter.js b/test/integration/reporter.js index d94e09ba3..165478c50 100644 --- a/test/integration/reporter.js +++ b/test/integration/reporter.js @@ -1,3 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + 'use strict' const assert = require('node:assert') diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js index ce80da43e..856b23567 100644 --- a/test/integration/test-runner.js +++ b/test/integration/test-runner.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' @@ -168,25 +154,25 @@ function build (opts = {}) { if (isXPack) { // delete ilm policies const preserveIlmPolicies = [ - "ilm-history-ilm-policy", - "slm-history-ilm-policy", - "watch-history-ilm-policy", - "watch-history-ilm-policy-16", - "ml-size-based-ilm-policy", - "logs", - "metrics", - "synthetics", - "7-days-default", - "30-days-default", - "90-days-default", - "180-days-default", - "365-days-default", - ".fleet-actions-results-ilm-policy", - ".fleet-file-data-ilm-policy", - ".fleet-files-ilm-policy", - ".deprecation-indexing-ilm-policy", - ".monitoring-8-ilm-policy", - "behavioral_analytics-events-default_policy", + 'ilm-history-ilm-policy', + 'slm-history-ilm-policy', + 'watch-history-ilm-policy', + 'watch-history-ilm-policy-16', + 'ml-size-based-ilm-policy', + 'logs', + 'metrics', + 'synthetics', + '7-days-default', + '30-days-default', + '90-days-default', + '180-days-default', + '365-days-default', + '.fleet-actions-results-ilm-policy', + '.fleet-file-data-ilm-policy', + '.fleet-files-ilm-policy', + '.deprecation-indexing-ilm-policy', + '.monitoring-8-ilm-policy', + 'behavioral_analytics-events-default_policy' ] const policies = await client.ilm.getLifecycle() for (const policy in policies) { @@ -488,7 +474,7 @@ function build (opts = {}) { cmd.params.body = JSON.parse(cmd.params.body) } - let err, result; + let err, result try { [err, result] = await to(api(cmd.params, options)) } catch (exc) { @@ -873,7 +859,7 @@ function length (val, len, response) { function parseDo (action) { action = JSON.parse(JSON.stringify(action)) - if (typeof action === 'string') action = {[action]: {}} + if (typeof action === 'string') action = { [action]: {} } if (Array.isArray(action)) action = action[0] return Object.keys(action).reduce((acc, val) => { diff --git a/test/mock/index.js b/test/mock/index.js index d9525299d..6d6452995 100644 --- a/test/mock/index.js +++ b/test/mock/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index a0f513256..452f53805 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index 2e64e5927..3da9a8842 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import * as http from 'node:http' diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index d45d2d003..45487aaa4 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import FakeTimers from '@sinonjs/fake-timers' diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts index c91e3cb03..3a66ee7d4 100644 --- a/test/unit/helpers/esql.test.ts +++ b/test/unit/helpers/esql.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' diff --git a/test/unit/helpers/msearch.test.ts b/test/unit/helpers/msearch.test.ts index ba2457587..a87d86c04 100644 --- a/test/unit/helpers/msearch.test.ts +++ b/test/unit/helpers/msearch.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' diff --git a/test/unit/helpers/scroll.test.ts b/test/unit/helpers/scroll.test.ts index 88361bd7c..ae01989a5 100644 --- a/test/unit/helpers/scroll.test.ts +++ b/test/unit/helpers/scroll.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' diff --git a/test/unit/helpers/search.test.ts b/test/unit/helpers/search.test.ts index e318571a8..8eddde16a 100644 --- a/test/unit/helpers/search.test.ts +++ b/test/unit/helpers/search.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' @@ -109,4 +95,3 @@ test('Merge filter paths (snake_case)', async t => { { _id: '3', three: 'three' } ]) }) - diff --git a/test/utils/MockConnection.ts b/test/utils/MockConnection.ts index 19af3dd54..c1bd25873 100644 --- a/test/utils/MockConnection.ts +++ b/test/utils/MockConnection.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import assert from 'assert' diff --git a/test/utils/buildCluster.ts b/test/utils/buildCluster.ts index 79a8ba71b..5b101f757 100644 --- a/test/utils/buildCluster.ts +++ b/test/utils/buildCluster.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import Debug from 'debug' diff --git a/test/utils/buildProxy.ts b/test/utils/buildProxy.ts index 37f58d55c..314a08c47 100644 --- a/test/utils/buildProxy.ts +++ b/test/utils/buildProxy.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // @ts-ignore diff --git a/test/utils/buildServer.ts b/test/utils/buildServer.ts index 586f1b68f..c2fcfc065 100644 --- a/test/utils/buildServer.ts +++ b/test/utils/buildServer.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { readFileSync } from 'fs' diff --git a/test/utils/index.ts b/test/utils/index.ts index 62d5cc578..6b74fa033 100644 --- a/test/utils/index.ts +++ b/test/utils/index.ts @@ -1,26 +1,12 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import buildServer from './buildServer' import * as connection from './MockConnection' import buildCluster from './buildCluster' -import * as buildProxy from './buildProxy' +import * as buildProxy from './buildProxy' export { buildServer, From 9de4dc50090e39d80375e79d5b5479e918a9f0bd Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Fri, 21 Mar 2025 12:42:42 -0500 Subject: [PATCH 495/647] [docs] Miscellaneous docs clean up (#2663) * remove unused substitutions * move images --- docs/docset.yml | 477 ------------------ docs/reference/getting-started.md | 4 +- .../{ => reference}/images/create-api-key.png | Bin docs/{ => reference}/images/es-endpoint.jpg | Bin 4 files changed, 2 insertions(+), 479 deletions(-) rename docs/{ => reference}/images/create-api-key.png (100%) rename docs/{ => reference}/images/es-endpoint.jpg (100%) diff --git a/docs/docset.yml b/docs/docset.yml index 27f8dc2d6..cea34c4d5 100644 --- a/docs/docset.yml +++ b/docs/docset.yml @@ -8,482 +8,5 @@ toc: - toc: reference - toc: release-notes subs: - ref: "https://www.elastic.co/guide/en/elasticsearch/reference/current" - ref-bare: "https://www.elastic.co/guide/en/elasticsearch/reference" - ref-8x: "https://www.elastic.co/guide/en/elasticsearch/reference/8.1" - ref-80: "https://www.elastic.co/guide/en/elasticsearch/reference/8.0" - ref-7x: "https://www.elastic.co/guide/en/elasticsearch/reference/7.17" - ref-70: "https://www.elastic.co/guide/en/elasticsearch/reference/7.0" - ref-60: "https://www.elastic.co/guide/en/elasticsearch/reference/6.0" - ref-64: "https://www.elastic.co/guide/en/elasticsearch/reference/6.4" - xpack-ref: "https://www.elastic.co/guide/en/x-pack/6.2" - logstash-ref: "https://www.elastic.co/guide/en/logstash/current" - kibana-ref: "https://www.elastic.co/guide/en/kibana/current" - kibana-ref-all: "https://www.elastic.co/guide/en/kibana" - beats-ref-root: "https://www.elastic.co/guide/en/beats" - beats-ref: "https://www.elastic.co/guide/en/beats/libbeat/current" - beats-ref-60: "https://www.elastic.co/guide/en/beats/libbeat/6.0" - beats-ref-63: "https://www.elastic.co/guide/en/beats/libbeat/6.3" - beats-devguide: "https://www.elastic.co/guide/en/beats/devguide/current" - auditbeat-ref: "https://www.elastic.co/guide/en/beats/auditbeat/current" - packetbeat-ref: "https://www.elastic.co/guide/en/beats/packetbeat/current" - metricbeat-ref: "https://www.elastic.co/guide/en/beats/metricbeat/current" - filebeat-ref: "https://www.elastic.co/guide/en/beats/filebeat/current" - functionbeat-ref: "https://www.elastic.co/guide/en/beats/functionbeat/current" - winlogbeat-ref: "https://www.elastic.co/guide/en/beats/winlogbeat/current" - heartbeat-ref: "https://www.elastic.co/guide/en/beats/heartbeat/current" - journalbeat-ref: "https://www.elastic.co/guide/en/beats/journalbeat/current" - ingest-guide: "https://www.elastic.co/guide/en/ingest/current" - fleet-guide: "https://www.elastic.co/guide/en/fleet/current" - apm-guide-ref: "https://www.elastic.co/guide/en/apm/guide/current" - apm-guide-7x: "https://www.elastic.co/guide/en/apm/guide/7.17" - apm-app-ref: "https://www.elastic.co/guide/en/kibana/current" - apm-agents-ref: "https://www.elastic.co/guide/en/apm/agent" - apm-android-ref: "https://www.elastic.co/guide/en/apm/agent/android/current" - apm-py-ref: "https://www.elastic.co/guide/en/apm/agent/python/current" - apm-py-ref-3x: "https://www.elastic.co/guide/en/apm/agent/python/3.x" - apm-node-ref-index: "https://www.elastic.co/guide/en/apm/agent/nodejs" - apm-node-ref: "https://www.elastic.co/guide/en/apm/agent/nodejs/current" - apm-node-ref-1x: "https://www.elastic.co/guide/en/apm/agent/nodejs/1.x" - apm-rum-ref: "https://www.elastic.co/guide/en/apm/agent/rum-js/current" - apm-ruby-ref: "https://www.elastic.co/guide/en/apm/agent/ruby/current" - apm-java-ref: "https://www.elastic.co/guide/en/apm/agent/java/current" - apm-go-ref: "https://www.elastic.co/guide/en/apm/agent/go/current" - apm-dotnet-ref: "https://www.elastic.co/guide/en/apm/agent/dotnet/current" - apm-php-ref: "https://www.elastic.co/guide/en/apm/agent/php/current" - apm-ios-ref: "https://www.elastic.co/guide/en/apm/agent/swift/current" - apm-lambda-ref: "https://www.elastic.co/guide/en/apm/lambda/current" - apm-attacher-ref: "https://www.elastic.co/guide/en/apm/attacher/current" - docker-logging-ref: "https://www.elastic.co/guide/en/beats/loggingplugin/current" - esf-ref: "https://www.elastic.co/guide/en/esf/current" - kinesis-firehose-ref: "https://www.elastic.co/guide/en/kinesis/%7B%7Bkinesis_version%7D%7D" - estc-welcome-current: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current" - estc-welcome: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current" - estc-welcome-all: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions" - hadoop-ref: "https://www.elastic.co/guide/en/elasticsearch/hadoop/current" - stack-ref: "https://www.elastic.co/guide/en/elastic-stack/current" - stack-ref-67: "https://www.elastic.co/guide/en/elastic-stack/6.7" - stack-ref-68: "https://www.elastic.co/guide/en/elastic-stack/6.8" - stack-ref-70: "https://www.elastic.co/guide/en/elastic-stack/7.0" - stack-ref-80: "https://www.elastic.co/guide/en/elastic-stack/8.0" - stack-ov: "https://www.elastic.co/guide/en/elastic-stack-overview/current" - stack-gs: "https://www.elastic.co/guide/en/elastic-stack-get-started/current" - stack-gs-current: "https://www.elastic.co/guide/en/elastic-stack-get-started/current" - javaclient: "https://www.elastic.co/guide/en/elasticsearch/client/java-api/current" - java-api-client: "https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current" - java-rest: "https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current" - jsclient: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current" - jsclient-current: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current" - es-ruby-client: "https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current" - es-dotnet-client: "https://www.elastic.co/guide/en/elasticsearch/client/net-api/current" - es-php-client: "https://www.elastic.co/guide/en/elasticsearch/client/php-api/current" - es-python-client: "https://www.elastic.co/guide/en/elasticsearch/client/python-api/current" - defguide: "https://www.elastic.co/guide/en/elasticsearch/guide/2.x" - painless: "https://www.elastic.co/guide/en/elasticsearch/painless/current" - plugins: "https://www.elastic.co/guide/en/elasticsearch/plugins/current" - plugins-8x: "https://www.elastic.co/guide/en/elasticsearch/plugins/8.1" - plugins-7x: "https://www.elastic.co/guide/en/elasticsearch/plugins/7.17" - plugins-6x: "https://www.elastic.co/guide/en/elasticsearch/plugins/6.8" - glossary: "https://www.elastic.co/guide/en/elastic-stack-glossary/current" - upgrade_guide: "https://www.elastic.co/products/upgrade_guide" - blog-ref: "https://www.elastic.co/blog/" - curator-ref: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current" - curator-ref-current: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current" - metrics-ref: "https://www.elastic.co/guide/en/metrics/current" - metrics-guide: "https://www.elastic.co/guide/en/metrics/guide/current" - logs-ref: "https://www.elastic.co/guide/en/logs/current" - logs-guide: "https://www.elastic.co/guide/en/logs/guide/current" - uptime-guide: "https://www.elastic.co/guide/en/uptime/current" - observability-guide: "https://www.elastic.co/guide/en/observability/current" - observability-guide-all: "https://www.elastic.co/guide/en/observability" - siem-guide: "https://www.elastic.co/guide/en/siem/guide/current" - security-guide: "https://www.elastic.co/guide/en/security/current" - security-guide-all: "https://www.elastic.co/guide/en/security" - endpoint-guide: "https://www.elastic.co/guide/en/endpoint/current" - sql-odbc: "https://www.elastic.co/guide/en/elasticsearch/sql-odbc/current" - ecs-ref: "https://www.elastic.co/guide/en/ecs/current" - ecs-logging-ref: "https://www.elastic.co/guide/en/ecs-logging/overview/current" - ecs-logging-go-logrus-ref: "https://www.elastic.co/guide/en/ecs-logging/go-logrus/current" - ecs-logging-go-zap-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current" - ecs-logging-go-zerolog-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current" - ecs-logging-java-ref: "https://www.elastic.co/guide/en/ecs-logging/java/current" - ecs-logging-dotnet-ref: "https://www.elastic.co/guide/en/ecs-logging/dotnet/current" - ecs-logging-nodejs-ref: "https://www.elastic.co/guide/en/ecs-logging/nodejs/current" - ecs-logging-php-ref: "https://www.elastic.co/guide/en/ecs-logging/php/current" - ecs-logging-python-ref: "https://www.elastic.co/guide/en/ecs-logging/python/current" - ecs-logging-ruby-ref: "https://www.elastic.co/guide/en/ecs-logging/ruby/current" - ml-docs: "https://www.elastic.co/guide/en/machine-learning/current" - eland-docs: "https://www.elastic.co/guide/en/elasticsearch/client/eland/current" - eql-ref: "https://eql.readthedocs.io/en/latest/query-guide" - extendtrial: "https://www.elastic.co/trialextension" - wikipedia: "https://en.wikipedia.org/wiki" - forum: "https://discuss.elastic.co/" - xpack-forum: "https://discuss.elastic.co/c/50-x-pack" - security-forum: "https://discuss.elastic.co/c/x-pack/shield" - watcher-forum: "https://discuss.elastic.co/c/x-pack/watcher" - monitoring-forum: "https://discuss.elastic.co/c/x-pack/marvel" - graph-forum: "https://discuss.elastic.co/c/x-pack/graph" - apm-forum: "https://discuss.elastic.co/c/apm" - enterprise-search-ref: "https://www.elastic.co/guide/en/enterprise-search/current" - app-search-ref: "https://www.elastic.co/guide/en/app-search/current" - workplace-search-ref: "https://www.elastic.co/guide/en/workplace-search/current" - enterprise-search-node-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/enterprise-search-node/current" - enterprise-search-php-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/php/current" - enterprise-search-python-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/python/current" - enterprise-search-ruby-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/ruby/current" - elastic-maps-service: "https://maps.elastic.co/" - integrations-docs: "https://docs.elastic.co/en/integrations" - integrations-devguide: "https://www.elastic.co/guide/en/integrations-developer/current" - time-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units" - byte-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units" - apm-py-ref-v: "https://www.elastic.co/guide/en/apm/agent/python/current" - apm-node-ref-v: "https://www.elastic.co/guide/en/apm/agent/nodejs/current" - apm-rum-ref-v: "https://www.elastic.co/guide/en/apm/agent/rum-js/current" - apm-ruby-ref-v: "https://www.elastic.co/guide/en/apm/agent/ruby/current" - apm-java-ref-v: "https://www.elastic.co/guide/en/apm/agent/java/current" - apm-go-ref-v: "https://www.elastic.co/guide/en/apm/agent/go/current" - apm-ios-ref-v: "https://www.elastic.co/guide/en/apm/agent/swift/current" - apm-dotnet-ref-v: "https://www.elastic.co/guide/en/apm/agent/dotnet/current" - apm-php-ref-v: "https://www.elastic.co/guide/en/apm/agent/php/current" - ecloud: "Elastic Cloud" - esf: "Elastic Serverless Forwarder" - ess: "Elasticsearch Service" - ece: "Elastic Cloud Enterprise" - eck: "Elastic Cloud on Kubernetes" - serverless-full: "Elastic Cloud Serverless" - serverless-short: "Serverless" - es-serverless: "Elasticsearch Serverless" - es3: "Elasticsearch Serverless" - obs-serverless: "Elastic Observability Serverless" - sec-serverless: "Elastic Security Serverless" - serverless-docs: "https://docs.elastic.co/serverless" - cloud: "https://www.elastic.co/guide/en/cloud/current" - ess-utm-params: "?page=docs&placement=docs-body" - ess-baymax: "?page=docs&placement=docs-body" - ess-trial: "https://cloud.elastic.co/registration?page=docs&placement=docs-body" - ess-product: "https://www.elastic.co/cloud/elasticsearch-service?page=docs&placement=docs-body" - ess-console: "https://cloud.elastic.co/?page=docs&placement=docs-body" - ess-console-name: "Elasticsearch Service Console" - ess-deployments: "https://cloud.elastic.co/deployments?page=docs&placement=docs-body" - ece-ref: "https://www.elastic.co/guide/en/cloud-enterprise/current" - eck-ref: "https://www.elastic.co/guide/en/cloud-on-k8s/current" - ess-leadin: "You can run Elasticsearch on your own hardware or use our hosted Elasticsearch Service that is available on AWS, GCP, and Azure. https://cloud.elastic.co/registration{ess-utm-params}[Try the Elasticsearch Service for free]." - ess-leadin-short: "Our hosted Elasticsearch Service is available on AWS, GCP, and Azure, and you can https://cloud.elastic.co/registration{ess-utm-params}[try it for free]." - ess-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elasticsearch Service\"]" - ece-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud_ece.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elastic Cloud Enterprise\"]" - cloud-only: "This feature is designed for indirect use by https://cloud.elastic.co/registration{ess-utm-params}[Elasticsearch Service], https://www.elastic.co/guide/en/cloud-enterprise/{ece-version-link}[Elastic Cloud Enterprise], and https://www.elastic.co/guide/en/cloud-on-k8s/current[Elastic Cloud on Kubernetes]. Direct use is not supported." - ess-setting-change: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"{ess-trial}\", title=\"Supported on {ess}\"] indicates a change to a supported https://www.elastic.co/guide/en/cloud/current/ec-add-user-settings.html[user setting] for Elasticsearch Service." - ess-skip-section: "If you use Elasticsearch Service, skip this section. Elasticsearch Service handles these changes for you." - api-cloud: "https://www.elastic.co/docs/api/doc/cloud" - api-ece: "https://www.elastic.co/docs/api/doc/cloud-enterprise" - api-kibana-serverless: "https://www.elastic.co/docs/api/doc/serverless" - es-feature-flag: "This feature is in development and not yet available for use. This documentation is provided for informational purposes only." - es-ref-dir: "'{{elasticsearch-root}}/docs/reference'" - apm-app: "APM app" - uptime-app: "Uptime app" - synthetics-app: "Synthetics app" - logs-app: "Logs app" - metrics-app: "Metrics app" - infrastructure-app: "Infrastructure app" - siem-app: "SIEM app" - security-app: "Elastic Security app" - ml-app: "Machine Learning" - dev-tools-app: "Dev Tools" - ingest-manager-app: "Ingest Manager" - stack-manage-app: "Stack Management" - stack-monitor-app: "Stack Monitoring" - alerts-ui: "Alerts and Actions" - rules-ui: "Rules" - rac-ui: "Rules and Connectors" - connectors-ui: "Connectors" - connectors-feature: "Actions and Connectors" - stack-rules-feature: "Stack Rules" - user-experience: "User Experience" - ems: "Elastic Maps Service" - ems-init: "EMS" - hosted-ems: "Elastic Maps Server" - ipm-app: "Index Pattern Management" - ingest-pipelines: "ingest pipelines" - ingest-pipelines-app: "Ingest Pipelines" - ingest-pipelines-cap: "Ingest pipelines" - ls-pipelines: "Logstash pipelines" - ls-pipelines-app: "Logstash Pipelines" - maint-windows: "maintenance windows" - maint-windows-app: "Maintenance Windows" - maint-windows-cap: "Maintenance windows" - custom-roles-app: "Custom Roles" - data-source: "data view" - data-sources: "data views" - data-source-caps: "Data View" - data-sources-caps: "Data Views" - data-source-cap: "Data view" - data-sources-cap: "Data views" - project-settings: "Project settings" - manage-app: "Management" - index-manage-app: "Index Management" - data-views-app: "Data Views" - rules-app: "Rules" - saved-objects-app: "Saved Objects" - tags-app: "Tags" - api-keys-app: "API keys" - transforms-app: "Transforms" - connectors-app: "Connectors" - files-app: "Files" - reports-app: "Reports" - maps-app: "Maps" - alerts-app: "Alerts" - crawler: "Enterprise Search web crawler" - ents: "Enterprise Search" - app-search-crawler: "App Search web crawler" - agent: "Elastic Agent" - agents: "Elastic Agents" - fleet: "Fleet" - fleet-server: "Fleet Server" - integrations-server: "Integrations Server" - ingest-manager: "Ingest Manager" - ingest-management: "ingest management" - package-manager: "Elastic Package Manager" - integrations: "Integrations" - package-registry: "Elastic Package Registry" - artifact-registry: "Elastic Artifact Registry" - aws: "AWS" stack: "Elastic Stack" - xpack: "X-Pack" es: "Elasticsearch" - kib: "Kibana" - esms: "Elastic Stack Monitoring Service" - esms-init: "ESMS" - ls: "Logstash" - beats: "Beats" - auditbeat: "Auditbeat" - filebeat: "Filebeat" - heartbeat: "Heartbeat" - metricbeat: "Metricbeat" - packetbeat: "Packetbeat" - winlogbeat: "Winlogbeat" - functionbeat: "Functionbeat" - journalbeat: "Journalbeat" - es-sql: "Elasticsearch SQL" - esql: "ES|QL" - elastic-agent: "Elastic Agent" - k8s: "Kubernetes" - log-driver-long: "Elastic Logging Plugin for Docker" - security: "X-Pack security" - security-features: "security features" - operator-feature: "operator privileges feature" - es-security-features: "Elasticsearch security features" - stack-security-features: "Elastic Stack security features" - endpoint-sec: "Endpoint Security" - endpoint-cloud-sec: "Endpoint and Cloud Security" - elastic-defend: "Elastic Defend" - elastic-sec: "Elastic Security" - elastic-endpoint: "Elastic Endpoint" - swimlane: "Swimlane" - sn: "ServiceNow" - sn-itsm: "ServiceNow ITSM" - sn-itom: "ServiceNow ITOM" - sn-sir: "ServiceNow SecOps" - jira: "Jira" - ibm-r: "IBM Resilient" - webhook: "Webhook" - webhook-cm: "Webhook - Case Management" - opsgenie: "Opsgenie" - bedrock: "Amazon Bedrock" - gemini: "Google Gemini" - hive: "TheHive" - monitoring: "X-Pack monitoring" - monitor-features: "monitoring features" - stack-monitor-features: "Elastic Stack monitoring features" - watcher: "Watcher" - alert-features: "alerting features" - reporting: "X-Pack reporting" - report-features: "reporting features" - graph: "X-Pack graph" - graph-features: "graph analytics features" - searchprofiler: "Search Profiler" - xpackml: "X-Pack machine learning" - ml: "machine learning" - ml-cap: "Machine learning" - ml-init: "ML" - ml-features: "machine learning features" - stack-ml-features: "Elastic Stack machine learning features" - ccr: "cross-cluster replication" - ccr-cap: "Cross-cluster replication" - ccr-init: "CCR" - ccs: "cross-cluster search" - ccs-cap: "Cross-cluster search" - ccs-init: "CCS" - ilm: "index lifecycle management" - ilm-cap: "Index lifecycle management" - ilm-init: "ILM" - dlm: "data lifecycle management" - dlm-cap: "Data lifecycle management" - dlm-init: "DLM" - search-snap: "searchable snapshot" - search-snaps: "searchable snapshots" - search-snaps-cap: "Searchable snapshots" - slm: "snapshot lifecycle management" - slm-cap: "Snapshot lifecycle management" - slm-init: "SLM" - rollup-features: "data rollup features" - ipm: "index pattern management" - ipm-cap: "Index pattern" - rollup: "rollup" - rollup-cap: "Rollup" - rollups: "rollups" - rollups-cap: "Rollups" - rollup-job: "rollup job" - rollup-jobs: "rollup jobs" - rollup-jobs-cap: "Rollup jobs" - dfeed: "datafeed" - dfeeds: "datafeeds" - dfeed-cap: "Datafeed" - dfeeds-cap: "Datafeeds" - ml-jobs: "machine learning jobs" - ml-jobs-cap: "Machine learning jobs" - anomaly-detect: "anomaly detection" - anomaly-detect-cap: "Anomaly detection" - anomaly-job: "anomaly detection job" - anomaly-jobs: "anomaly detection jobs" - anomaly-jobs-cap: "Anomaly detection jobs" - dataframe: "data frame" - dataframes: "data frames" - dataframe-cap: "Data frame" - dataframes-cap: "Data frames" - watcher-transform: "payload transform" - watcher-transforms: "payload transforms" - watcher-transform-cap: "Payload transform" - watcher-transforms-cap: "Payload transforms" - transform: "transform" - transforms: "transforms" - transform-cap: "Transform" - transforms-cap: "Transforms" - dataframe-transform: "transform" - dataframe-transform-cap: "Transform" - dataframe-transforms: "transforms" - dataframe-transforms-cap: "Transforms" - dfanalytics-cap: "Data frame analytics" - dfanalytics: "data frame analytics" - dataframe-analytics-config: "'{dataframe} analytics config'" - dfanalytics-job: "'{dataframe} analytics job'" - dfanalytics-jobs: "'{dataframe} analytics jobs'" - dfanalytics-jobs-cap: "'{dataframe-cap} analytics jobs'" - cdataframe: "continuous data frame" - cdataframes: "continuous data frames" - cdataframe-cap: "Continuous data frame" - cdataframes-cap: "Continuous data frames" - cdataframe-transform: "continuous transform" - cdataframe-transforms: "continuous transforms" - cdataframe-transforms-cap: "Continuous transforms" - ctransform: "continuous transform" - ctransform-cap: "Continuous transform" - ctransforms: "continuous transforms" - ctransforms-cap: "Continuous transforms" - oldetection: "outlier detection" - oldetection-cap: "Outlier detection" - olscore: "outlier score" - olscores: "outlier scores" - fiscore: "feature influence score" - evaluatedf-api: "evaluate {dataframe} analytics API" - evaluatedf-api-cap: "Evaluate {dataframe} analytics API" - binarysc: "binary soft classification" - binarysc-cap: "Binary soft classification" - regression: "regression" - regression-cap: "Regression" - reganalysis: "regression analysis" - reganalysis-cap: "Regression analysis" - depvar: "dependent variable" - feature-var: "feature variable" - feature-vars: "feature variables" - feature-vars-cap: "Feature variables" - classification: "classification" - classification-cap: "Classification" - classanalysis: "classification analysis" - classanalysis-cap: "Classification analysis" - infer-cap: "Inference" - infer: "inference" - lang-ident-cap: "Language identification" - lang-ident: "language identification" - data-viz: "Data Visualizer" - file-data-viz: "File Data Visualizer" - feat-imp: "feature importance" - feat-imp-cap: "Feature importance" - nlp: "natural language processing" - nlp-cap: "Natural language processing" - apm-agent: "APM agent" - apm-go-agent: "Elastic APM Go agent" - apm-go-agents: "Elastic APM Go agents" - apm-ios-agent: "Elastic APM iOS agent" - apm-ios-agents: "Elastic APM iOS agents" - apm-java-agent: "Elastic APM Java agent" - apm-java-agents: "Elastic APM Java agents" - apm-dotnet-agent: "Elastic APM .NET agent" - apm-dotnet-agents: "Elastic APM .NET agents" - apm-node-agent: "Elastic APM Node.js agent" - apm-node-agents: "Elastic APM Node.js agents" - apm-php-agent: "Elastic APM PHP agent" - apm-php-agents: "Elastic APM PHP agents" - apm-py-agent: "Elastic APM Python agent" - apm-py-agents: "Elastic APM Python agents" - apm-ruby-agent: "Elastic APM Ruby agent" - apm-ruby-agents: "Elastic APM Ruby agents" - apm-rum-agent: "Elastic APM Real User Monitoring (RUM) JavaScript agent" - apm-rum-agents: "Elastic APM RUM JavaScript agents" - apm-lambda-ext: "Elastic APM AWS Lambda extension" - project-monitors: "project monitors" - project-monitors-cap: "Project monitors" - private-location: "Private Location" - private-locations: "Private Locations" - pwd: "YOUR_PASSWORD" - esh: "ES-Hadoop" - default-dist: "default distribution" - oss-dist: "OSS-only distribution" - observability: "Observability" - api-request-title: "Request" - api-prereq-title: "Prerequisites" - api-description-title: "Description" - api-path-parms-title: "Path parameters" - api-query-parms-title: "Query parameters" - api-request-body-title: "Request body" - api-response-codes-title: "Response codes" - api-response-body-title: "Response body" - api-example-title: "Example" - api-examples-title: "Examples" - api-definitions-title: "Properties" - multi-arg: "†footnoteref:[multi-arg,This parameter accepts multiple arguments.]" - multi-arg-ref: "†footnoteref:[multi-arg]" - yes-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png[Yes,20,15]" - no-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png[No,20,15]" - es-repo: "https://github.com/elastic/elasticsearch/" - es-issue: "https://github.com/elastic/elasticsearch/issues/" - es-pull: "https://github.com/elastic/elasticsearch/pull/" - es-commit: "https://github.com/elastic/elasticsearch/commit/" - kib-repo: "https://github.com/elastic/kibana/" - kib-issue: "https://github.com/elastic/kibana/issues/" - kibana-issue: "'{kib-repo}issues/'" - kib-pull: "https://github.com/elastic/kibana/pull/" - kibana-pull: "'{kib-repo}pull/'" - kib-commit: "https://github.com/elastic/kibana/commit/" - ml-repo: "https://github.com/elastic/ml-cpp/" - ml-issue: "https://github.com/elastic/ml-cpp/issues/" - ml-pull: "https://github.com/elastic/ml-cpp/pull/" - ml-commit: "https://github.com/elastic/ml-cpp/commit/" - apm-repo: "https://github.com/elastic/apm-server/" - apm-issue: "https://github.com/elastic/apm-server/issues/" - apm-pull: "https://github.com/elastic/apm-server/pull/" - kibana-blob: "https://github.com/elastic/kibana/blob/current/" - apm-get-started-ref: "https://www.elastic.co/guide/en/apm/get-started/current" - apm-server-ref: "https://www.elastic.co/guide/en/apm/server/current" - apm-server-ref-v: "https://www.elastic.co/guide/en/apm/server/current" - apm-server-ref-m: "https://www.elastic.co/guide/en/apm/server/master" - apm-server-ref-62: "https://www.elastic.co/guide/en/apm/server/6.2" - apm-server-ref-64: "https://www.elastic.co/guide/en/apm/server/6.4" - apm-server-ref-70: "https://www.elastic.co/guide/en/apm/server/7.0" - apm-overview-ref-v: "https://www.elastic.co/guide/en/apm/get-started/current" - apm-overview-ref-70: "https://www.elastic.co/guide/en/apm/get-started/7.0" - apm-overview-ref-m: "https://www.elastic.co/guide/en/apm/get-started/master" - infra-guide: "https://www.elastic.co/guide/en/infrastructure/guide/current" - a-data-source: "a data view" - icon-bug: "pass:[]" - icon-checkInCircleFilled: "pass:[]" - icon-warningFilled: "pass:[]" diff --git a/docs/reference/getting-started.md b/docs/reference/getting-started.md index 59b290037..1420d6c4c 100644 --- a/docs/reference/getting-started.md +++ b/docs/reference/getting-started.md @@ -45,13 +45,13 @@ const client = new Client({ Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment: -:::{image} ../images/es-endpoint.jpg +:::{image} images/es-endpoint.jpg :alt: Finding Elasticsearch endpoint ::: You can generate an API key on the **Management** page under Security. -:::{image} ../images/create-api-key.png +:::{image} images/create-api-key.png :alt: Create API key ::: diff --git a/docs/images/create-api-key.png b/docs/reference/images/create-api-key.png similarity index 100% rename from docs/images/create-api-key.png rename to docs/reference/images/create-api-key.png diff --git a/docs/images/es-endpoint.jpg b/docs/reference/images/es-endpoint.jpg similarity index 100% rename from docs/images/es-endpoint.jpg rename to docs/reference/images/es-endpoint.jpg From b7754562967c082936549ca627af27ac7be853eb Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 12:44:48 -0500 Subject: [PATCH 496/647] Update dependency typescript to v5.8.2 (#2660) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index f0c943d94..a3772a52c 100644 --- a/package.json +++ b/package.json @@ -84,7 +84,7 @@ "tap": "21.0.2", "ts-node": "10.9.2", "ts-standard": "12.0.2", - "typescript": "5.7.3", + "typescript": "5.8.2", "workq": "3.0.0", "xmlbuilder2": "3.1.1", "zx": "7.2.3" From 0f09faefbdb8b3646b5462a6b46d7d32978adb92 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 12:47:43 -0500 Subject: [PATCH 497/647] Update dependency tap to v21.1.0 (#2659) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index a3772a52c..ca15cf3ef 100644 --- a/package.json +++ b/package.json @@ -81,7 +81,7 @@ "semver": "7.7.1", "split2": "4.2.0", "stoppable": "1.1.0", - "tap": "21.0.2", + "tap": "21.1.0", "ts-node": "10.9.2", "ts-standard": "12.0.2", "typescript": "5.8.2", From 88270bf3547cad9cb14ab2ecaae77339a51cf431 Mon Sep 17 00:00:00 2001 From: Kaarina Tungseth Date: Fri, 21 Mar 2025 12:49:56 -0500 Subject: [PATCH 498/647] Updates navigation titles and descriptions for release notes (#2665) --- docs/release-notes/breaking-changes.md | 8 ++------ docs/release-notes/deprecations.md | 8 +++----- docs/release-notes/index.md | 2 -- docs/release-notes/known-issues.md | 2 +- 4 files changed, 6 insertions(+), 14 deletions(-) diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md index 9758cf808..a05c07b7c 100644 --- a/docs/release-notes/breaking-changes.md +++ b/docs/release-notes/breaking-changes.md @@ -1,14 +1,11 @@ --- -navigation_title: "Elasticsearch JavaScript Client" +navigation_title: "Breaking changes" --- # Elasticsearch JavaScript Client breaking changes [elasticsearch-javascript-client-breaking-changes] -Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. - -To learn how to upgrade, check out . +Breaking changes can impact your Elastic applications, potentially disrupting normal operations. Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. To learn how to upgrade, check [Upgrade](docs-content://deploy-manage/upgrade.md). % ## Next version [elasticsearch-javascript-client-versionnext-breaking-changes] -% **Release date:** Month day, year % ::::{dropdown} Title of breaking change % Description of the breaking change. @@ -18,7 +15,6 @@ To learn how to upgrade, check out . % :::: % ## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes] -% **Release date:** March 25, 2025 % ::::{dropdown} Title of breaking change % Description of the breaking change. diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md index fef8650dd..df309211a 100644 --- a/docs/release-notes/deprecations.md +++ b/docs/release-notes/deprecations.md @@ -1,14 +1,13 @@ --- -navigation_title: "Elasticsearch JavaScript Client" +navigation_title: "Deprecations" --- # Elasticsearch JavaScript Client deprecations [elasticsearch-javascript-client-deprecations] -Review the deprecated functionality for your Elasticsearch JavaScript Client version. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. +Over time, certain Elastic functionality becomes outdated and is replaced or removed. To help with the transition, Elastic deprecates functionality for a period before removal, giving you time to update your applications. -To learn how to upgrade, check out . +Review the deprecated functionality for Elasticsearch JavaScript Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md). % ## Next version -% **Release date:** Month day, year % ::::{dropdown} Deprecation title % Description of the deprecation. @@ -18,7 +17,6 @@ To learn how to upgrade, check out . % :::: % ## 9.0.0 [elasticsearch-javascript-client-900-deprecations] -% **Release date:** March 25, 2025 % ::::{dropdown} Deprecation title % Description of the deprecation. diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 071841de1..4e39b615b 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -11,7 +11,6 @@ To check for security updates, go to [Security announcements for the Elastic sta % Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. % ## version.next [elasticsearch-javascript-client-next-release-notes] -% **Release date:** Month day, year % ### Features and enhancements [elasticsearch-javascript-client-next-features-enhancements] % * @@ -20,7 +19,6 @@ To check for security updates, go to [Security announcements for the Elastic sta % * ## 9.0.0 [elasticsearch-javascript-client-900-release-notes] -**Release date:** March 25, 2025 ### Features and enhancements [elasticsearch-javascript-client-900-features-enhancements] diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md index 16ca9fb3c..86856b104 100644 --- a/docs/release-notes/known-issues.md +++ b/docs/release-notes/known-issues.md @@ -1,5 +1,5 @@ --- -navigation_title: "Elasticsearch JavaScript Client" +navigation_title: "Known issues" --- From 867ceda5a3a927d286ab26d4b05f63ed8baf04ab Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 24 Mar 2025 19:55:18 +0200 Subject: [PATCH 499/647] Auto-generated API code (#2645) --- .../0bee07a581c5776e068f6f4efad5a399.asciidoc | 16 +- .../0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc | 31 +- .../120fcf9f55128d6a81d5e87a9c235bbd.asciidoc | 22 +- .../13d91782399ba1f291e103c18b5338cc.asciidoc | 25 + .../141ef0ebaa3b0772892b79b9bb85efb0.asciidoc | 5 +- .../15ac33d641b376d9494075eb1f0d4066.asciidoc | 10 + .../174b93c323aa8e9cc8ee2a3df5736810.asciidoc | 12 + .../19c00c6b29bc7dbc5e92b3668da2da93.asciidoc | 34 +- .../29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc | 15 + .../2a1eece9a59ac1773edcf0a932c26de0.asciidoc | 12 +- .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 9 +- .../2f9ee29fe49f7d206a41212aa5945296.asciidoc | 22 + .../3649194a97d265a3bc758f8b38f7561e.asciidoc | 21 + .../3f1fe5f5f99b98d0891f38003e10b636.asciidoc | 12 +- .../405511f7c1f12cc0a227b4563fe7b2e2.asciidoc | 5 +- .../45954b8aaedfed57012be8b6538b0a24.asciidoc | 62 +- .../46b771a9932c3fa6057a7b2679c72ef0.asciidoc | 10 + .../4982c547be1ad9455ae836990aea92c5.asciidoc | 5 + .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 10 +- .../57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc | 12 +- .../5bba213a7f543190139d1a69ab2ed076.asciidoc | 16 +- .../615dc36f0978c676624fb7d1144b4899.asciidoc | 11 + .../66915e95b723ee2f6e5164a94b8f98c1.asciidoc | 12 + .../67b71a95b6fe6c83faae51ea038a1bf1.asciidoc | 10 + .../6f3b723bf6179b96c3413597ed7f49e1.asciidoc | 8 +- .../77518e8c6198acfe77c0934fd2fe65cb.asciidoc | 54 +- .../7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc | 5 +- .../80dd7f5882c59b9c1c90e8351937441f.asciidoc | 38 +- .../82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc | 48 +- .../91e106a2affbc8df32cd940684a779ed.asciidoc | 7 +- .../947efe87db7f8813c0878f8affc3e2d1.asciidoc | 8 + .../99fb82d49ac477e6a9dfdd71f9465374.asciidoc | 5 +- .../9afa0844883b7471883aa378a8dd10b4.asciidoc | 8 +- .../9c01db07c9ac395b6370e3b33965c21f.asciidoc | 16 +- .../a162eb50853331c80596f5994e9d1c38.asciidoc | 11 +- .../a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc | 10 + .../adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc | 10 + .../b0bddf2ffaa83049b195829c06b875cd.asciidoc | 5 +- .../b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc | 10 + .../b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc | 10 +- .../bcdfaa4487747249699a86a0dcd22f5e.asciidoc | 50 +- .../c580990a70028bb49cca8a6bde86bbf6.asciidoc | 10 +- .../ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc | 100 +- .../d35c8cf7a98b3f112e1de8797ec6689d.asciidoc | 10 +- .../d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc | 7 +- .../d8c053ee26c1533ce936ec81101d8e1b.asciidoc | 5 +- .../dd71b0c9f9197684ff29c61062c55660.asciidoc | 5 +- .../dde92fdf3469349ffe2c81764333543a.asciidoc | 14 + .../e3019fd5f23458ae49ad9854c97d321c.asciidoc | 8 +- .../e4b38973c74037335378d8480f1ce894.asciidoc | 56 +- .../ec135f0cc0d3f526df68000b2a95c65b.asciidoc | 12 + docs/reference/api-reference.md | 17110 ++++++++-------- src/api/api/async_search.ts | 174 +- src/api/api/autoscaling.ts | 100 +- src/api/api/bulk.ts | 68 +- src/api/api/capabilities.ts | 36 +- src/api/api/cat.ts | 474 +- src/api/api/ccr.ts | 310 +- src/api/api/clear_scroll.ts | 53 +- src/api/api/close_point_in_time.ts | 53 +- src/api/api/cluster.ts | 342 +- src/api/api/connector.ts | 780 +- src/api/api/count.ts | 70 +- src/api/api/create.ts | 70 +- src/api/api/dangling_indices.ts | 69 +- src/api/api/delete.ts | 48 +- src/api/api/delete_by_query.ts | 87 +- src/api/api/delete_by_query_rethrottle.ts | 40 +- src/api/api/delete_script.ts | 41 +- src/api/api/enrich.ts | 115 +- src/api/api/eql.ts | 120 +- src/api/api/esql.ts | 149 +- src/api/api/exists.ts | 50 +- src/api/api/exists_source.ts | 49 +- src/api/api/explain.ts | 69 +- src/api/api/features.ts | 51 +- src/api/api/field_caps.ts | 66 +- src/api/api/fleet.ts | 223 +- src/api/api/get.ts | 51 +- src/api/api/get_script.ts | 40 +- src/api/api/get_script_context.ts | 36 +- src/api/api/get_script_languages.ts | 36 +- src/api/api/get_source.ts | 50 +- src/api/api/graph.ts | 62 +- src/api/api/health_report.ts | 42 +- src/api/api/ilm.ts | 222 +- src/api/api/index.ts | 69 +- src/api/api/indices.ts | 1430 +- src/api/api/inference.ts | 865 +- src/api/api/info.ts | 36 +- src/api/api/ingest.ts | 262 +- src/api/api/knn_search.ts | 62 +- src/api/api/license.ts | 133 +- src/api/api/logstash.ts | 80 +- src/api/api/mget.ts | 66 +- src/api/api/migration.ts | 59 +- src/api/api/ml.ts | 1850 +- src/api/api/monitoring.ts | 60 +- src/api/api/msearch.ts | 69 +- src/api/api/msearch_template.ts | 61 +- src/api/api/mtermvectors.ts | 69 +- src/api/api/nodes.ts | 158 +- src/api/api/open_point_in_time.ts | 62 +- src/api/api/ping.ts | 36 +- src/api/api/profiling.ts | 67 +- src/api/api/put_script.ts | 60 +- src/api/api/query_rules.ts | 177 +- src/api/api/rank_eval.ts | 61 +- src/api/api/reindex.ts | 67 +- src/api/api/reindex_rethrottle.ts | 40 +- src/api/api/render_search_template.ts | 56 +- src/api/api/rollup.ts | 171 +- src/api/api/scripts_painless_execute.ts | 55 +- src/api/api/scroll.ts | 58 +- src/api/api/search.ts | 134 +- src/api/api/search_application.ts | 218 +- src/api/api/search_mvt.ts | 80 +- src/api/api/search_shards.ts | 46 +- src/api/api/search_template.ts | 73 +- src/api/api/searchable_snapshots.ts | 108 +- src/api/api/security.ts | 1418 +- src/api/api/shutdown.ts | 91 +- src/api/api/simulate.ts | 62 +- src/api/api/slm.ts | 173 +- src/api/api/snapshot.ts | 333 +- src/api/api/sql.ts | 166 +- src/api/api/ssl.ts | 37 +- src/api/api/synonyms.ts | 150 +- src/api/api/tasks.ts | 77 +- src/api/api/terms_enum.ts | 61 +- src/api/api/termvectors.ts | 79 +- src/api/api/text_structure.ts | 160 +- src/api/api/transform.ts | 277 +- src/api/api/update.ts | 76 +- src/api/api/update_by_query.ts | 91 +- src/api/api/update_by_query_rethrottle.ts | 40 +- src/api/api/watcher.ts | 273 +- src/api/api/xpack.ts | 53 +- src/api/index.ts | 18 +- src/api/types.ts | 603 +- 140 files changed, 21961 insertions(+), 11460 deletions(-) create mode 100644 docs/doc_examples/13d91782399ba1f291e103c18b5338cc.asciidoc create mode 100644 docs/doc_examples/15ac33d641b376d9494075eb1f0d4066.asciidoc create mode 100644 docs/doc_examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc create mode 100644 docs/doc_examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc create mode 100644 docs/doc_examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc create mode 100644 docs/doc_examples/3649194a97d265a3bc758f8b38f7561e.asciidoc create mode 100644 docs/doc_examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc create mode 100644 docs/doc_examples/615dc36f0978c676624fb7d1144b4899.asciidoc create mode 100644 docs/doc_examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc create mode 100644 docs/doc_examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc create mode 100644 docs/doc_examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc create mode 100644 docs/doc_examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc create mode 100644 docs/doc_examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc create mode 100644 docs/doc_examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc create mode 100644 docs/doc_examples/dde92fdf3469349ffe2c81764333543a.asciidoc create mode 100644 docs/doc_examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc diff --git a/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc index 506e4ff5b..486847ab6 100644 --- a/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc +++ b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -3,17 +3,11 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_query/async", - querystring: { - format: "json", - }, - body: { - query: - "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", - include_ccs_metadata: true, - }, +const response = await client.esql.asyncQuery({ + format: "json", + query: + "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata: true, }); console.log(response); ---- diff --git a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc index 045036fa2..5627427c4 100644 --- a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc +++ b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -3,23 +3,20 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_application/search_application/my-app/_render_query", - body: { - params: { - query_string: "my first query", - text_fields: [ - { - name: "title", - boost: 5, - }, - { - name: "description", - boost: 1, - }, - ], - }, +const response = await client.searchApplication.renderQuery({ + name: "my-app", + params: { + query_string: "my first query", + text_fields: [ + { + name: "title", + boost: 5, + }, + { + name: "description", + boost: 1, + }, + ], }, }); console.log(response); diff --git a/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc index f6c1cb881..fbfd1cfc5 100644 --- a/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc +++ b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -3,18 +3,16 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/chat_completion/openai-completion/_stream", - body: { - model: "gpt-4o", - messages: [ - { - role: "user", - content: "What is Elastic?", - }, - ], - }, +const response = await client.inference.streamInference({ + task_type: "chat_completion", + inference_id: "openai-completion", + model: "gpt-4o", + messages: [ + { + role: "user", + content: "What is Elastic?", + }, + ], }); console.log(response); ---- diff --git a/docs/doc_examples/13d91782399ba1f291e103c18b5338cc.asciidoc b/docs/doc_examples/13d91782399ba1f291e103c18b5338cc.asciidoc new file mode 100644 index 000000000..c8f218cc1 --- /dev/null +++ b/docs/doc_examples/13d91782399ba1f291e103c18b5338cc.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: { + settings_override: { + index: { + number_of_shards: 5, + }, + }, + mappings_override: { + properties: { + field2: { + type: "boolean", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc index 7d7aeab98..5387dbba3 100644 --- a/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc +++ b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.inference.put({ - task_type: "my-inference-endpoint", - inference_id: "_update", +const response = await client.inference.update({ + inference_id: "my-inference-endpoint", inference_config: { service_settings: { api_key: "", diff --git a/docs/doc_examples/15ac33d641b376d9494075eb1f0d4066.asciidoc b/docs/doc_examples/15ac33d641b376d9494075eb1f0d4066.asciidoc new file mode 100644 index 000000000..73da91b2d --- /dev/null +++ b/docs/doc_examples/15ac33d641b376d9494075eb1f0d4066.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.cancelMigrateReindex({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc b/docs/doc_examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc new file mode 100644 index 000000000..99a0f8861 --- /dev/null +++ b/docs/doc_examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.delegatePki({ + x509_certificate_chain: [ + "MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==", + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc index 724b30762..ab8b0fd13 100644 --- a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc +++ b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -3,27 +3,23 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_ingest/_simulate", - body: { - docs: [ - { - _index: "my-index", - _id: "123", - _source: { - foo: "bar", - }, +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "bar", }, - { - _index: "my-index", - _id: "456", - _source: { - foo: "rab", - }, + }, + { + _index: "my-index", + _id: "456", + _source: { + foo: "rab", }, - ], - }, + }, + ], }); console.log(response); ---- diff --git a/docs/doc_examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc b/docs/doc_examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc new file mode 100644 index 000000000..b98cfe9a8 --- /dev/null +++ b/docs/doc_examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.migrateReindex({ + reindex: { + source: { + index: "my-data-stream", + }, + mode: "upgrade", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc index b3545c105..b3ca912d0 100644 --- a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc +++ b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -3,14 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/oidc/logout", - body: { - token: - "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", - refresh_token: "vLBPvmAB6KvwvJZr27cS", - }, +const response = await client.security.oidcLogout({ + token: + "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + refresh_token: "vLBPvmAB6KvwvJZr27cS", }); console.log(response); ---- diff --git a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index e05299751..32a8ae35c 100644 --- a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -3,12 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", - querystring: { - wait_for_completion_timeout: "30s", - }, +const response = await client.esql.asyncQueryGet({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + wait_for_completion_timeout: "30s", }); console.log(response); ---- diff --git a/docs/doc_examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc b/docs/doc_examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc new file mode 100644 index 000000000..8f98c98a9 --- /dev/null +++ b/docs/doc_examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: { + settings_override: { + index: { + "blocks.write": null, + "blocks.read": null, + "blocks.read_only": null, + "blocks.read_only_allow_delete": null, + "blocks.metadata": null, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3649194a97d265a3bc758f8b38f7561e.asciidoc b/docs/doc_examples/3649194a97d265a3bc758f8b38f7561e.asciidoc new file mode 100644 index 000000000..e465377fb --- /dev/null +++ b/docs/doc_examples/3649194a97d265a3bc758f8b38f7561e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "semantic-embeddings", + mappings: { + properties: { + semantic_text: { + type: "semantic_text", + }, + content: { + type: "text", + copy_to: "semantic_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc index 221e42b58..be6f3596c 100644 --- a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc +++ b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -3,14 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_query/async", - body: { - query: - "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", - wait_for_completion_timeout: "2s", - }, +const response = await client.esql.asyncQuery({ + query: + "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", + wait_for_completion_timeout: "2s", }); console.log(response); ---- diff --git a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc index 40d330c9d..c63439d9c 100644 --- a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc +++ b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +const response = await client.esql.asyncQueryGet({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", }); console.log(response); ---- diff --git a/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc index a2ff623e6..4d6846969 100644 --- a/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc +++ b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -3,44 +3,42 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/chat_completion/openai-completion/_stream", - body: { - messages: [ - { - role: "user", - content: [ - { - type: "text", - text: "What's the price of a scarf?", - }, - ], - }, - ], - tools: [ - { - type: "function", - function: { - name: "get_current_price", - description: "Get the current price of a item", - parameters: { - type: "object", - properties: { - item: { - id: "123", - }, - }, - }, +const response = await client.inference.streamInference({ + task_type: "chat_completion", + inference_id: "openai-completion", + messages: [ + { + role: "user", + content: [ + { + type: "text", + text: "What's the price of a scarf?", }, - }, - ], - tool_choice: { + ], + }, + ], + tools: [ + { type: "function", function: { name: "get_current_price", + description: "Get the current price of a item", + parameters: { + type: "object", + properties: { + item: { + id: "123", + }, + }, + }, }, }, + ], + tool_choice: { + type: "function", + function: { + name: "get_current_price", + }, }, }); console.log(response); diff --git a/docs/doc_examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc b/docs/doc_examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc new file mode 100644 index 000000000..b7d64b4c8 --- /dev/null +++ b/docs/doc_examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMigrateReindexStatus({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc b/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc index b5d5d91f8..c6c3e829d 100644 --- a/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc +++ b/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc @@ -6,6 +6,11 @@ const response = await client.ml.startTrainedModelDeployment({ model_id: "my_model", deployment_id: "my_model_for_search", + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index 823515f74..9ae0176bc 100644 --- a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -3,12 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/completion/openai-completion/_stream", - body: { - input: "What is Elastic?", - }, +const response = await client.inference.streamInference({ + task_type: "completion", + inference_id: "openai-completion", + input: "What is Elastic?", }); console.log(response); ---- diff --git a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc index 2598c7bce..2e5e87c54 100644 --- a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc +++ b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -3,14 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/oidc/prepare", - body: { - realm: "oidc1", - state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", - nonce: "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5", - }, +const response = await client.security.oidcPrepareAuthentication({ + realm: "oidc1", + state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", + nonce: "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5", }); console.log(response); ---- diff --git a/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc index 46cd0a13e..91478f094 100644 --- a/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc +++ b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -3,17 +3,11 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_query/async", - querystring: { - format: "json", - }, - body: { - query: - "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", - include_ccs_metadata: true, - }, +const response = await client.esql.asyncQuery({ + format: "json", + query: + "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata: true, }); console.log(response); ---- diff --git a/docs/doc_examples/615dc36f0978c676624fb7d1144b4899.asciidoc b/docs/doc_examples/615dc36f0978c676624fb7d1144b4899.asciidoc new file mode 100644 index 000000000..a1c564297 --- /dev/null +++ b/docs/doc_examples/615dc36f0978c676624fb7d1144b4899.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataLifecycleStats({ + human: "true", + pretty: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc b/docs/doc_examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc new file mode 100644 index 000000000..c541fd004 --- /dev/null +++ b/docs/doc_examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: null, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc b/docs/doc_examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc new file mode 100644 index 000000000..09b647d22 --- /dev/null +++ b/docs/doc_examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryDelete({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc index f5995e6b6..47b3cfd86 100644 --- a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc +++ b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -3,12 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/api_key/_bulk_update", - body: { - ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], - }, +const response = await client.security.bulkUpdateApiKeys({ + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], }); console.log(response); ---- diff --git a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc index ebe4fce86..4a8d900b8 100644 --- a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc +++ b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -3,35 +3,31 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_text_structure/find_message_structure", - body: { - messages: [ - "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", - "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", - "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", - "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", - "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", - "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", - "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", - "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", - "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", - "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", - "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", - "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", - "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", - "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", - "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", - "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...", - ], - }, +const response = await client.textStructure.findMessageStructure({ + messages: [ + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...", + ], }); console.log(response); ---- diff --git a/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc index d1fcf443c..422e88d26 100644 --- a/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc +++ b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "DELETE", - path: "/_ingest/ip_location/database/my-database-id", +const response = await client.ingest.deleteIpLocationDatabase({ + id: "my-database-id", }); console.log(response); ---- diff --git a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc index 28fdff4a5..3e12f095c 100644 --- a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc +++ b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -3,30 +3,26 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/api_key/_bulk_update", - body: { - ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], - role_descriptors: { - "role-a": { - indices: [ - { - names: ["*"], - privileges: ["write"], - }, - ], - }, +const response = await client.security.bulkUpdateApiKeys({ + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], + role_descriptors: { + "role-a": { + indices: [ + { + names: ["*"], + privileges: ["write"], + }, + ], }, - metadata: { - environment: { - level: 2, - trusted: true, - tags: ["production"], - }, + }, + metadata: { + environment: { + level: 2, + trusted: true, + tags: ["production"], }, - expiration: "30d", }, + expiration: "30d", }); console.log(response); ---- diff --git a/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc index 7c7a7cba1..6958737be 100644 --- a/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc +++ b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -3,32 +3,30 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_inference/chat_completion/openai-completion/_stream", - body: { - messages: [ - { - role: "assistant", - content: "Let's find out what the weather is", - tool_calls: [ - { - id: "call_KcAjWtAww20AihPHphUh46Gd", - type: "function", - function: { - name: "get_current_weather", - arguments: '{"location":"Boston, MA"}', - }, +const response = await client.inference.streamInference({ + task_type: "chat_completion", + inference_id: "openai-completion", + messages: [ + { + role: "assistant", + content: "Let's find out what the weather is", + tool_calls: [ + { + id: "call_KcAjWtAww20AihPHphUh46Gd", + type: "function", + function: { + name: "get_current_weather", + arguments: '{"location":"Boston, MA"}', }, - ], - }, - { - role: "tool", - content: "The weather is cold", - tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd", - }, - ], - }, + }, + ], + }, + { + role: "tool", + content: "The weather is cold", + tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd", + }, + ], }); console.log(response); ---- diff --git a/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc index 8d9b9da8b..8d425841b 100644 --- a/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc +++ b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc @@ -3,10 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_ingest/ip_location/database/my-database-1", - body: { +const response = await client.ingest.putIpLocationDatabase({ + id: "my-database-1", + configuration: { name: "GeoIP2-Domain", maxmind: { account_id: "1234567", diff --git a/docs/doc_examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc b/docs/doc_examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc new file mode 100644 index 000000000..1e8f4bd3e --- /dev/null +++ b/docs/doc_examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster(); +console.log(response); +---- diff --git a/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc index 3f2ffdf6b..e5bfa5787 100644 --- a/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc +++ b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "DELETE", - path: "/_ingest/ip_location/database/example-database-id", +const response = await client.ingest.deleteIpLocationDatabase({ + id: "example-database-id", }); console.log(response); ---- diff --git a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc index 0cf3aea4d..e890718c7 100644 --- a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc +++ b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -3,10 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_application/analytics/my_analytics_collection/event/search_click", - body: { +const response = await client.searchApplication.postBehavioralAnalyticsEvent({ + collection_name: "my_analytics_collection", + event_type: "search_click", + payload: { session: { id: "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9", }, diff --git a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc index 8e19908d0..9000232a8 100644 --- a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc +++ b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -3,16 +3,12 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/oidc/authenticate", - body: { - redirect_uri: - "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", - state: "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", - nonce: "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM", - realm: "oidc1", - }, +const response = await client.security.oidcAuthenticate({ + redirect_uri: + "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + state: "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + nonce: "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM", + realm: "oidc1", }); console.log(response); ---- diff --git a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc index afaf9d7dc..2979e9a90 100644 --- a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc +++ b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -3,13 +3,10 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_application/search_application/my_search_application/_render_query", - body: { - params: { - query_string: "rock climbing", - }, +const response = await client.searchApplication.renderQuery({ + name: "my_search_application", + params: { + query_string: "rock climbing", }, }); console.log(response); diff --git a/docs/doc_examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc b/docs/doc_examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc new file mode 100644 index 000000000..73da91b2d --- /dev/null +++ b/docs/doc_examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.cancelMigrateReindex({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc b/docs/doc_examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc new file mode 100644 index 000000000..b7d64b4c8 --- /dev/null +++ b/docs/doc_examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMigrateReindexStatus({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc index 5186df2ae..82a81bced 100644 --- a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc +++ b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_application/search_application/my_search_application/_render_query", +const response = await client.searchApplication.renderQuery({ + name: "my_search_application", }); console.log(response); ---- diff --git a/docs/doc_examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc b/docs/doc_examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc new file mode 100644 index 000000000..9c3d8a489 --- /dev/null +++ b/docs/doc_examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryStop({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc index 57b7fb69d..7ffe922db 100644 --- a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc +++ b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -208,13 +208,9 @@ const response = await client.bulk({ }); console.log(response); -const response1 = await client.transport.request({ - method: "GET", - path: "/_text_structure/find_field_structure", - querystring: { - index: "test-logs", - field: "message", - }, +const response1 = await client.textStructure.findFieldStructure({ + index: "test-logs", + field: "message", }); console.log(response1); ---- diff --git a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc index 80d974285..01a784b51 100644 --- a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc +++ b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -3,36 +3,32 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_ingest/_simulate", - body: { - docs: [ - { - _index: "my-index", - _id: "123", - _source: { - foo: "bar", - }, +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "bar", }, - { - _index: "my-index", - _id: "456", - _source: { - foo: "rab", - }, + }, + { + _index: "my-index", + _id: "456", + _source: { + foo: "rab", }, - ], - pipeline_substitutions: { - "my-pipeline": { - processors: [ - { - uppercase: { - field: "foo", - }, + }, + ], + pipeline_substitutions: { + "my-pipeline": { + processors: [ + { + uppercase: { + field: "foo", }, - ], - }, + }, + ], }, }, }); diff --git a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc index abc332dd4..9de280733 100644 --- a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc +++ b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -3,13 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/api_key/_bulk_update", - body: { - ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], - role_descriptors: {}, - }, +const response = await client.security.bulkUpdateApiKeys({ + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], + role_descriptors: {}, }); console.log(response); ---- diff --git a/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc index d44a7b669..e1e69507e 100644 --- a/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc +++ b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -3,69 +3,65 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_ingest/_simulate", - body: { - docs: [ - { - _index: "my-index", - _id: "id", - _source: { - foo: "bar", - }, +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "id", + _source: { + foo: "bar", }, - { - _index: "my-index", - _id: "id", - _source: { - foo: "rab", - }, + }, + { + _index: "my-index", + _id: "id", + _source: { + foo: "rab", }, - ], - pipeline_substitutions: { - "my-pipeline": { - processors: [ - { - set: { - field: "field3", - value: "value3", - }, + }, + ], + pipeline_substitutions: { + "my-pipeline": { + processors: [ + { + set: { + field: "field3", + value: "value3", }, - ], - }, + }, + ], }, - component_template_substitutions: { - "my-component-template": { - template: { - mappings: { - dynamic: "true", - properties: { - field3: { - type: "keyword", - }, + }, + component_template_substitutions: { + "my-component-template": { + template: { + mappings: { + dynamic: "true", + properties: { + field3: { + type: "keyword", }, }, - settings: { - index: { - default_pipeline: "my-pipeline", - }, + }, + settings: { + index: { + default_pipeline: "my-pipeline", }, }, }, }, - index_template_substitutions: { - "my-index-template": { - index_patterns: ["my-index-*"], - composed_of: ["component_template_1", "component_template_2"], - }, + }, + index_template_substitutions: { + "my-index-template": { + index_patterns: ["my-index-*"], + composed_of: ["component_template_1", "component_template_2"], }, - mapping_addition: { - dynamic: "strict", - properties: { - foo: { - type: "keyword", - }, + }, + mapping_addition: { + dynamic: "strict", + properties: { + foo: { + type: "keyword", }, }, }, diff --git a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc index 21bcc10b8..644390ba0 100644 --- a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc +++ b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -3,13 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/oidc/prepare", - body: { - iss: "http://127.0.0.1:8080/", - login_hint: "this_is_an_opaque_string", - }, +const response = await client.security.oidcPrepareAuthentication({ + iss: "http://127.0.0.1:8080/", + login_hint: "this_is_an_opaque_string", }); console.log(response); ---- diff --git a/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc index 592744a30..aa1a436a0 100644 --- a/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc +++ b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc @@ -3,10 +3,9 @@ [source, js] ---- -const response = await client.transport.request({ - method: "PUT", - path: "/_ingest/ip_location/database/my-database-2", - body: { +const response = await client.ingest.putIpLocationDatabase({ + id: "my-database-2", + configuration: { name: "standard_location", ipinfo: {}, }, diff --git a/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc index e80e90ffd..3875298ba 100644 --- a/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc +++ b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_ingest/ip_location/database/my-database-id", +const response = await client.ingest.getIpLocationDatabase({ + id: "my-database-id", }); console.log(response); ---- diff --git a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc index 1fe5e6b4c..ff630da8a 100644 --- a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc +++ b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc @@ -3,9 +3,6 @@ [source, js] ---- -const response = await client.transport.request({ - method: "GET", - path: "/_security/settings", -}); +const response = await client.security.getSettings(); console.log(response); ---- diff --git a/docs/doc_examples/dde92fdf3469349ffe2c81764333543a.asciidoc b/docs/doc_examples/dde92fdf3469349ffe2c81764333543a.asciidoc new file mode 100644 index 000000000..d4834c92a --- /dev/null +++ b/docs/doc_examples/dde92fdf3469349ffe2c81764333543a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: { + remove_index_blocks: false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc index febdc3354..327abd471 100644 --- a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc +++ b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -3,12 +3,8 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_security/oidc/prepare", - body: { - realm: "oidc1", - }, +const response = await client.security.oidcPrepareAuthentication({ + realm: "oidc1", }); console.log(response); ---- diff --git a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc index ba52d081d..42726dbd2 100644 --- a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc +++ b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -3,38 +3,34 @@ [source, js] ---- -const response = await client.transport.request({ - method: "POST", - path: "/_ingest/_simulate", - body: { - docs: [ - { - _index: "my-index", - _id: "123", - _source: { - foo: "foo", - }, +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "foo", }, - { - _index: "my-index", - _id: "456", - _source: { - bar: "rab", - }, + }, + { + _index: "my-index", + _id: "456", + _source: { + bar: "rab", }, - ], - component_template_substitutions: { - "my-mappings_template": { - template: { - mappings: { - dynamic: "strict", - properties: { - foo: { - type: "keyword", - }, - bar: { - type: "keyword", - }, + }, + ], + component_template_substitutions: { + "my-mappings_template": { + template: { + mappings: { + dynamic: "strict", + properties: { + foo: { + type: "keyword", + }, + bar: { + type: "keyword", }, }, }, diff --git a/docs/doc_examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc b/docs/doc_examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc new file mode 100644 index 000000000..d130ff537 --- /dev/null +++ b/docs/doc_examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: ".ml-anomalies-custom-example", + dest: ".reindexed-v9-ml-anomalies-custom-example", + create_from: null, +}); +console.log(response); +---- diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index e216c1981..b175e0db9 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1,14 +1,18 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html +comment: | + IMPORTANT: This file is autogenerated, DO NOT send pull requests that change this file directly. + You should update the script that does the generation, which can be found in: + https://github.com/elastic/elastic-client-generator-js --- # API Reference [api-reference] - -## bulk [_bulk] - -Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. +## client.bulk [_bulk] +Bulk index or delete documents. +Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. +This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: @@ -33,29 +37,34 @@ action_and_meta_data\n optional_source\n ``` -The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. - -::::{note} -Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. -:::: +The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. +A `create` action fails if a document with the same ID already exists in the target +An `index` action adds or replaces a document as necessary. +NOTE: Data streams support only the `create` action. +To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. -::::{note} -The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. -:::: - +NOTE: The final line of data must end with a newline character (`\n`). +Each newline character may be preceded by a carriage return (`\r`). +When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. +Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. -If you provide a target in the request path, it is used for any actions that don’t explicitly specify an `_index` argument. +If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. -A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. +A note on the format: the idea here is to make processing as fast as possible. +As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. -There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. +There is no "correct" number of actions to perform in a single bulk request. +Experiment with different settings to find the optimal size for your particular workload. +Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. +It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. +For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** @@ -70,7 +79,8 @@ Some of the officially supported clients provide helpers to assist with bulk req **Submitting bulk requests with cURL** -If you’re providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn’t preserve newlines. For example: +If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. +The latter doesn't preserve newlines. For example: ``` $ cat requests @@ -82,20 +92,21 @@ $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk -- **Optimistic concurrency control** -Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. +Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. +The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** -Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. +Each bulk item can include the version value using the `version` field. +It automatically follows the behavior of the index or delete operation based on the `_version` mapping. +It also support the `version_type`. **Routing** -Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. - -::::{note} -Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. -:::: +Each bulk item can include the routing value using the `routing` field. +It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** @@ -105,121 +116,114 @@ When making bulk calls, you can set the `wait_for_active_shards` parameter to re Control when the changes made by this request are visible to search. -::::{note} -Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. -:::: - +NOTE: Only the shards that receive the bulk request will be affected by refresh. +Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. +The request will only wait for those three shards to refresh. +The other two shards that make up the index do not participate in the `_bulk` request at all. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) ```ts client.bulk({ ... }) ``` - - -### Arguments [_arguments] - -* **Request (object):** - - * **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. - * **`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])** - * **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. - * **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`timeout` (Optional, string | -1 | 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. - * **`require_alias` (Optional, boolean)**: If `true`, the request’s actions must target an index alias. - * **`require_data_stream` (Optional, boolean)**: If `true`, the request’s actions must target a data stream (existing or to be created). - - - -## clear_scroll [_clear_scroll] - -Clear a scrolling search. Clear the search context and results for a scrolling search. +### Arguments [_arguments_bulk] + +#### Request (object) [_request_bulk] + +- **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. +- **`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])** +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. +- **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`timeout` (Optional, string | -1 | 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the request's actions must target an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). + +## client.clearScroll [_clear_scroll] +Clear a scrolling search. +Clear the search context and results for a scrolling search. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll) ```ts client.clearScroll({ ... }) ``` +### Arguments [_arguments_clear_scroll] +#### Request (object) [_request_clear_scroll] -### Arguments [_arguments_2] - -* **Request (object):** - - * **`scroll_id` (Optional, string | string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. +- **`scroll_id` (Optional, string | string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. - - -## close_point_in_time [_close_point_in_time] - -Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. +## client.closePointInTime [_close_point_in_time] +Close a point in time. +A point in time must be opened explicitly before being used in search requests. +The `keep_alive` parameter tells Elasticsearch how long it should persist. +A point in time is automatically closed when the `keep_alive` period has elapsed. +However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) ```ts client.closePointInTime({ id }) ``` +### Arguments [_arguments_close_point_in_time] +#### Request (object) [_request_close_point_in_time] -### Arguments [_arguments_3] - -* **Request (object):** - - * **`id` (string)**: The ID of the point-in-time. - - +- **`id` (string)**: The ID of the point-in-time. -## count [_count] +## client.count [_count] +Count search results. +Get the number of documents matching a query. -Count search results. Get the number of documents matching a query. - -The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. The latter must be nested in a `query` key, which is the same as the search API. +The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. +The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. -The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. +The operation is broadcast across all shards. +For each shard ID group, a replica is chosen and the search is run against it. +This means that replicas increase the scalability of the count. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count) ```ts client.count({ ... }) ``` - - -### Arguments [_arguments_4] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - * **`q` (Optional, string)**: The query in Lucene query string syntax. - - - -## create [_create] - +### Arguments [_arguments_count] + +#### Request (object) [_request_count] + +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`q` (Optional, string)**: The query in Lucene query string syntax. This parameter cannot be used with a request body. + +## client.create [_create] Create a new document in the index. -You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. +You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs +Using `_create` guarantees that the document is indexed only if it does not already exist. +It returns a 409 response when a document with a same ID already exists in the index. +To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: @@ -230,91 +234,109 @@ Automatic data stream creation requires a matching index template with data stre **Automatically create data streams and indices** -If the request’s target doesn’t exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. - -If the target doesn’t exist and doesn’t match a data stream template, the operation automatically creates the index and applies any matching index templates. +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. -::::{note} -Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. -:::: +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. -If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. -Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. - -::::{note} -The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. -:::: +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. **Routing** -By default, shard placement — or routing — is controlled by using a hash of the document’s ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. - -When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. -::::{note} -Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. -:::: +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** -The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** -To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. -Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. -For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. -It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) ```ts client.create({ id, index }) ``` - - -### Arguments [_arguments_5] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. - * **`index` (string)**: The name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. - * **`document` (Optional, object)**: A document. - * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - * **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - - - -## delete [_delete] - +### Arguments [_arguments_create] + +#### Request (object) [_request_create] + +- **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. +- **`document` (Optional, object)**: A document. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +## client.delete [_delete] Delete a document. Remove a JSON document from the specified index. -::::{note} -You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. -:::: - +NOTE: You cannot send deletion requests directly to a data stream. +To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** -Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** -Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document’s version remains available is determined by the `index.gc_deletes` index setting. +Each document indexed is versioned. +When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. +Every write operation run on a document, deletes included, causes its version to be incremented. +The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. +The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** @@ -328,38 +350,35 @@ For example: DELETE /my-index-000001/_doc/1?routing=shard-1 ``` -This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. +This request deletes the document with ID 1, but it is routed based on the user. +The document is not deleted if the correct routing is not specified. **Distributed** -The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. +The delete operation gets hashed into a specific shard ID. +It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete) ```ts client.delete({ id, index }) ``` +### Arguments [_arguments_delete] +#### Request (object) [_request_delete] -### Arguments [_arguments_6] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document. - * **`index` (string)**: The name of the target index. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. - * **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - - - -## delete_by_query [_delete_by_query] +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: The name of the target index. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +## client.deleteByQuery [_delete_by_query] Delete documents. Deletes documents that match the specified query. @@ -369,46 +388,59 @@ If the Elasticsearch security features are enabled, you must have the following * `read` * `delete` or `write` -You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. - -::::{note} -Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. -:::: +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. +When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. +If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. +NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. -While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. +While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. +A bulk delete request is performed for each batch of matching documents. +If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. +If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. +Any delete requests that completed successfully still stick, they are not rolled back. -You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** -To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. +To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to disable throttling. -Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` -Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". +Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". **Slicing** -Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. +Delete by query supports sliced scroll to parallelize the delete process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. -Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: +Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices` each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. -If you’re slicing manually or otherwise tuning automatic slicing, keep in mind that: +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. @@ -425,261 +457,244 @@ POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel The task ID can be found by using the get tasks API. -Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. +Cancellation should happen quickly but might take a few seconds. +The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) ```ts client.deleteByQuery({ index }) ``` - - -### Arguments [_arguments_7] - -* **Request (object):** - - * **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - * **`max_docs` (Optional, number)**: The maximum number of documents to delete. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. - * **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`conflicts` (Optional, Enum("abort" | "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - * **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API’s `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. - * **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. Defaults to the index-level setting. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`q` (Optional, string)**: A query in the Lucene query string syntax. - * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. - * **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. - * **`search_timeout` (Optional, string | -1 | 0)**: The explicit timeout for each search request. It defaults to no timeout. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. - * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. - * **`sort` (Optional, string[])**: A list of `:` pairs. - * **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - * **`timeout` (Optional, string | -1 | 0)**: The period each deletion request waits for active shards. - * **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. - - - -## delete_by_query_rethrottle [_delete_by_query_rethrottle] - +### Arguments [_arguments_delete_by_query] + +#### Request (object) [_request_delete_by_query] + +- **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to delete. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`conflicts` (Optional, Enum("abort" | "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. Defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string | -1 | 0)**: The explicit timeout for each search request. It defaults to no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. +- **`sort` (Optional, string[])**: A list of `:` pairs. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string | -1 | 0)**: The period each deletion request waits for active shards. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +## client.deleteByQueryRethrottle [_delete_by_query_rethrottle] Throttle a delete by query operation. -Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. +Change the number of requests per second for a particular delete by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle) ```ts client.deleteByQueryRethrottle({ task_id }) ``` +### Arguments [_arguments_delete_by_query_rethrottle] +#### Request (object) [_request_delete_by_query_rethrottle] -### Arguments [_arguments_8] +- **`task_id` (string | number)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. -* **Request (object):** +## client.deleteScript [_delete_script] +Delete a script or search template. +Deletes a stored script or search template. - * **`task_id` (string | number)**: The ID for the task. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. - - - -## delete_script [_delete_script] - -Delete a script or search template. Deletes a stored script or search template. - -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script) ```ts client.deleteScript({ id }) ``` +### Arguments [_arguments_delete_script] +#### Request (object) [_request_delete_script] -### Arguments [_arguments_9] - -* **Request (object):** - - * **`id` (string)**: Identifier for the stored script or search template. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## exists [_exists] +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +## client.exists [_exists] Check a document. -Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: +Verify that a document exists. +For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` -If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. +If the document exists, the API returns a status code of `200 - OK`. +If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. -Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn’t disappear immediately, although you won’t be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) ```ts client.exists({ id, index }) ``` +### Arguments [_arguments_exists] +#### Request (object) [_request_exists] -### Arguments [_arguments_10] - -* **Request (object):** - - * **`id` (string)**: A unique document identifier. - * **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. - * **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - - - -## exists_source [_exists_source] +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. +- **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +## client.existsSource [_exists_source] Check for a document source. -Check whether a document source exists in an index. For example: +Check whether a document source exists in an index. +For example: ``` HEAD my-index-000001/_source/1 ``` -A document’s source is not available if it is disabled in the mapping. +A document's source is not available if it is disabled in the mapping. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) ```ts client.existsSource({ id, index }) ``` +### Arguments [_arguments_exists_source] +#### Request (object) [_request_exists_source] -### Arguments [_arguments_11] +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document. - * **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. - * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - - - -## explain [_explain] - -Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. +## client.explain [_explain] +Explain a document match result. +Get information about why a specific document matches, or doesn't match, a query. +It computes a score explanation for a query and a specific document. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain) ```ts client.explain({ id, index }) ``` - - -### Arguments [_arguments_12] - -* **Request (object):** - - * **`id` (string)**: Defines the document ID. - * **`index` (string)**: Index names used to limit the request. Only a single index name can be provided to this parameter. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. - * **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return in the response. - * **`q` (Optional, string)**: Query in the Lucene query string syntax. - - - -## field_caps [_field_caps] - +### Arguments [_arguments_explain] + +#### Request (object) [_request_explain] + +- **`id` (string)**: The document identifier. +- **`index` (string)**: Index names that are used to limit the request. Only a single index name can be provided to this parameter. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: `True` or `false` to return the `_source` field or not or a list of fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return in the response. +- **`q` (Optional, string)**: The query in the Lucene query string syntax. + +## client.fieldCaps [_field_caps] Get the field capabilities. Get information about the capabilities of fields among multiple indices. -For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. +For data streams, the API returns field capabilities among the stream’s backing indices. +It returns runtime fields like any other field. +For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps) ```ts client.fieldCaps({ ... }) ``` +### Arguments [_arguments_field_caps] +#### Request (object) [_request_field_caps] -### Arguments [_arguments_13] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. - * **`fields` (Optional, string | string[])**: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. - * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Allows to filter indices if the provided query rewrites to match_none on every shard. - * **`runtime_mappings` (Optional, Record)**: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. - * **`filters` (Optional, string)**: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent - * **`types` (Optional, string[])**: Only return results for fields that have one of the types in the list - * **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. - - - -## get [_get] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +- **`fields` (Optional, string | string[])**: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. +- **`runtime_mappings` (Optional, Record)**: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. +- **`filters` (Optional, string)**: A list of filters to apply to the response. +- **`types` (Optional, string[])**: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. +- **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. +## client.get [_get] Get a document by its ID. Get a document and its source or stored fields from an index. -By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. +By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). +In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. +To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** -By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: +By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. +You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` -If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: +If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. +This can be helpful with large documents where partial retrieval can save on network overhead +Both parameters take a comma separated list of fields or wildcard expressions. +For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities @@ -693,102 +708,97 @@ GET my-index-000001/_doc/0?_source=*.id **Routing** -If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: +If routing is used during indexing, the routing value also needs to be specified to retrieve a document. +For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` -This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. +This request gets the document with ID 2, but it is routed based on the user. +The document is not fetched if the correct routing is not specified. **Distributed** -The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. +The GET operation is hashed into a specific shard ID. +It is then redirected to one of the replicas within that shard ID and returns the result. +The replicas are the primary shard and its replicas within that shard ID group. +This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. -Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn’t disappear immediately, although you won’t be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) ```ts client.get({ id, index }) ``` +### Arguments [_arguments_get] +#### Request (object) [_request_get] -### Arguments [_arguments_14] - -* **Request (object):** - - * **`id` (string)**: A unique document identifier. - * **`index` (string)**: The name of the index that contains the document. - * **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can’t be returned;if specified, the request fails. - * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - - +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -## get_script [_get_script] +## client.getScript [_get_script] +Get a script or search template. +Retrieves a stored script or search template. -Get a script or search template. Retrieves a stored script or search template. - -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script) ```ts client.getScript({ id }) ``` +### Arguments [_arguments_get_script] +#### Request (object) [_request_get_script] -### Arguments [_arguments_15] - -* **Request (object):** - - * **`id` (string)**: Identifier for the stored script or search template. - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - - - -## get_script_context [_get_script_context] +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +## client.getScriptContext [_get_script_context] Get script contexts. Get a list of supported script contexts and their methods. -[Endpoint documentation](elasticsearch://reference/scripting-languages/painless/painless-contexts.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context) ```ts client.getScriptContext() ``` - -## get_script_languages [_get_script_languages] - +## client.getScriptLanguages [_get_script_languages] Get script languages. Get a list of available script types, languages, and contexts. -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages) ```ts client.getScriptLanguages() ``` +## client.getSource [_get_source] +Get a document's source. -## get_source [_get_source] - -Get a document’s source. - -Get the source of a document. For example: +Get the source of a document. +For example: ``` GET my-index-000001/_source/1 @@ -805,72 +815,64 @@ GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ```ts client.getSource({ id, index }) ``` +### Arguments [_arguments_get_source] +#### Request (object) [_request_get_source] -### Arguments [_arguments_16] - -* **Request (object):** - - * **`id` (string)**: A unique document identifier. - * **`index` (string)**: The name of the index that contains the document. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. - * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - - - -## health_report [_health_report] +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. +## client.healthReport [_health_report] +Get the cluster health. +Get a report with the health status of an Elasticsearch cluster. +The report contains a list of indicators that compose Elasticsearch functionality. -Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. +Each indicator has a health status of: green, unknown, yellow or red. +The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. -In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. +Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. -Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. - -::::{note} -The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. -:::: +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. +The root cause and remediation steps are encapsulated in a diagnosis. +A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. +NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. +When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report) ```ts client.healthReport({ ... }) ``` +### Arguments [_arguments_health_report] +#### Request (object) [_request_health_report] -### Arguments [_arguments_17] - -* **Request (object):** - - * **`feature` (Optional, string | string[])**: A feature of the cluster, as returned by the top-level health report API. - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout. - * **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. - * **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. - - - -## index [_index] +- **`feature` (Optional, string | string[])**: A feature of the cluster, as returned by the top-level health report API. +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout. +- **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. +- **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. +## client.index [_index] Create or update a document in an index. -Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. - -::::{note} -You cannot use this API to send update requests for existing documents in a data stream. -:::: +Add a JSON document to the specified data stream or index and make it searchable. +If the target is an index and the document already exists, the request updates the document and increments its version. +NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: @@ -880,76 +882,96 @@ If the Elasticsearch security features are enabled, you must have the following Automatic data stream creation requires a matching index template with data stream enabled. -::::{note} -Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. -:::: - +NOTE: Replica shards might not all be started when an indexing operation returns successfully. +By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** -If the request’s target doesn’t exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. -If the target doesn’t exist and doesn’t match a data stream template, the operation automatically creates the index and applies any matching index templates. +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. -::::{note} -Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. -:::: +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. -If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. - -Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. - -::::{note} -The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. -:::: +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. **Optimistic concurrency control** -Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** -By default, shard placement — or routing — is controlled by using a hash of the document’s ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. -When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. - -::::{note} -Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. -:::: +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** -The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** -To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. -Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. -For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. -It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** -When updating a document by using this API, a new version of the document is always created even if the document hasn’t changed. If this isn’t acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn’t available on this API because it doesn’t fetch the old source and isn’t able to compare it against the new source. +When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. +If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. +The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. -There isn’t a definitive rule for when noop updates aren’t acceptable. It’s a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. +There isn't a definitive rule for when noop updates aren't acceptable. +It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** -Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. - -::::{note} -Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. -:::: +Each indexed document is given a version number. +By default, internal versioning is used that starts at 1 and increments with each update, deletes included. +Optionally, the version number can be set to an external value (for example, if maintained in a database). +To enable this functionality, `version_type` should be set to `external`. +The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. +NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. +If no version is provided, the operation runs without any version checks. -When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document’s version number, a version conflict will occur and the index operation will fail. For example: +When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. +If true, the document will be indexed and the new version number used. +If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external @@ -958,43 +980,41 @@ PUT my-index-000001/_doc/1?version=2&version_type=external "id": "elkbee" } } -``` -In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). +In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. +If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). -A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. +A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. +Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) ```ts client.index({ index }) ``` - - -### Arguments [_arguments_18] - -* **Request (object):** - - * **`index` (string)**: The name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. - * **`id` (Optional, string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. - * **`document` (Optional, object)**: A document. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. - * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - * **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - - - -## info [_info] - -Get cluster info. Get basic build, version, and cluster information. +### Arguments [_arguments_index] + +#### Request (object) [_request_index] + +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. +- **`id` (Optional, string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. +- **`document` (Optional, object)**: A document. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + +## client.info [_info] +Get cluster info. +Get basic build, version, and cluster information. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info) @@ -1002,81 +1022,91 @@ Get cluster info. Get basic build, version, and cluster information. client.info() ``` - -## knn_search [_knn_search] - +## client.knnSearch [_knn_search] Run a knn search. -::::{note} -The kNN search API has been replaced by the `knn` option in the search API. -:::: +NOTE: The kNN search API has been replaced by the `knn` option in the search API. +Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. +Given a query vector, the API finds the k closest vectors and returns those documents as search hits. -Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. +Elasticsearch uses the HNSW algorithm to support efficient kNN search. +Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. +This means the results returned are not always the true k closest neighbors. -Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. +The kNN search API supports restricting the search using a filter. +The search will return the top k documents that also match the filter query. -The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. +A kNN search response has the exact same structure as a search API response. +However, certain sections have a meaning specific to kNN search: -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) +* The document `_score` is determined by the similarity between the query and document vector. +* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html) ```ts client.knnSearch({ index, knn }) ``` +### Arguments [_arguments_knn_search] +#### Request (object) [_request_knn_search] -### Arguments [_arguments_19] - -* **Request (object):** +- **`index` (string | string[])**: A list of index names to search; use `_all` or to perform the operation on all indices. +- **`knn` ({ field, query_vector, k, num_candidates })**: The kNN query to run. +- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +- **`fields` (Optional, string | string[])**: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. +- **`routing` (Optional, string)**: A list of specific routing values. - * **`index` (string | string[])**: A list of index names to search; use `_all` or to perform the operation on all indices - * **`knn` ({ field, query_vector, k, num_candidates })**: kNN query to execute - * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. - * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. - * **`fields` (Optional, string | string[])**: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn’t provided, all documents are allowed to match. - * **`routing` (Optional, string)**: A list of specific routing values +## client.mget [_mget] +Get multiple documents. +Get multiple JSON documents by ID from one or more indices. +If you specify an index in the request URI, you only need to specify the document IDs in the request body. +To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. +**Filter source fields** -## mget [_mget] +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. -Get multiple documents. +**Get stored fields** -Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. +Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. +Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget) ```ts client.mget({ ... }) ``` +### Arguments [_arguments_mget] +#### Request (object) [_request_mget] -### Arguments [_arguments_20] - -* **Request (object):** - - * **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. - * **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. - * **`ids` (Optional, string | string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. - * **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`stored_fields` (Optional, string | string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. - - - -## msearch [_msearch] +- **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. +- **`ids` (Optional, string | string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. +## client.msearch [_msearch] Run multiple searches. -The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: +The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. +The structure is as follows: ``` header\n @@ -1087,189 +1117,194 @@ body\n This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. -::::{important} -The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. -:::: - +IMPORTANT: The final line of data must end with a newline character `\n`. +Each newline character may be preceded by a carriage return `\r`. +When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) ```ts client.msearch({ ... }) ``` +### Arguments [_arguments_msearch] + +#### Request (object) [_request_msearch] + +- **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases to search. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. + +## client.msearchTemplate [_msearch_template] +Run multiple templated searches. +Run multiple templated searches with a single request. +If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. +For example: -### Arguments [_arguments_21] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases to search. - * **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - * **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. - * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. - * **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. - * **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. - * **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. - * **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. - - - -## msearch_template [_msearch_template] +``` +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} -Run multiple templated searches. +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template) ```ts client.msearchTemplate({ ... }) ``` +### Arguments [_arguments_msearch_template] +#### Request (object) [_request_msearch_template] -### Arguments [_arguments_22] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. - * **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the API can run. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. - * **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. - * **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. - +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`max_concurrent_searches` (Optional, number)**: The maximum number of concurrent searches the API can run. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. +## client.mtermvectors [_mtermvectors] +Get multiple term vectors. -## mtermvectors [_mtermvectors] +Get multiple term vectors with a single request. +You can specify existing documents by index and ID or provide artificial documents in the body of the request. +You can specify the index in the request body or request URI. +The response contains a `docs` array with all the fetched termvectors. +Each element has the structure provided by the termvectors API. -Get multiple term vectors. +**Artificial documents** -You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. +You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. +The mapping used is determined by the specified `_index`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors) ```ts client.mtermvectors({ ... }) ``` - - -### Arguments [_arguments_23] - -* **Request (object):** - - * **`index` (Optional, string)**: Name of the index that contains the documents. - * **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: Array of existing or artificial documents. - * **`ids` (Optional, string[])**: Simplified syntax to specify documents by their ID if they’re in the same index. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. - * **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. - * **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. - * **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. - * **`positions` (Optional, boolean)**: If `true`, the response includes term positions. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. - * **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: Specific version type. - - - -## open_point_in_time [_open_point_in_time] - +### Arguments [_arguments_mtermvectors] + +#### Request (object) [_request_mtermvectors] + +- **`index` (Optional, string)**: The name of the index that contains the documents. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: An array of existing or artificial documents. +- **`ids` (Optional, string[])**: A simplified syntax to specify documents by their ID if they're in the same index. +- **`fields` (Optional, string | string[])**: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + +## client.openPointInTime [_open_point_in_time] Open a point in time. -A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. +A search request by default runs against the most recent visible data of the target indices, +which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the +state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple +search requests using the same point in time. For example, if refreshes happen between +`search_after` requests, then the results of those requests might not be consistent as changes happening +between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. -Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. - -::::{important} -The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. -:::: +Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. +If you want to retrieve more hits, use PIT with `search_after`. +IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. -When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. +When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** -The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. +The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. +The value does not need to be long enough to process all data — it just needs to be long enough for the next request. -Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. +Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. +Once the smaller segments are no longer needed they are deleted. +However, open point-in-times prevent the old segments from being deleted since they are still in use. -::::{tip} -Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. -:::: +TIP: Keeping older segments alive means that more disk space and file handles are needed. +Ensure that you have configured your nodes to have ample free file handles. - -Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn’t prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. +Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. +Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. +Note that a point-in-time doesn't prevent its associated indices from being deleted. +You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) ```ts client.openPointInTime({ index, keep_alive }) ``` +### Arguments [_arguments_open_point_in_time] +#### Request (object) [_request_open_point_in_time] -### Arguments [_arguments_24] - -* **Request (object):** - - * **`index` (string | string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices - * **`keep_alive` (string | -1 | 0)**: Extend the length of time that the point in time persists. - * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. - +- **`index` (string | string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices +- **`keep_alive` (string | -1 | 0)**: Extend the length of time that the point in time persists. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. +## client.ping [_ping] +Ping the cluster. +Get information about whether the cluster is running. -## ping [_ping] - -Ping the cluster. Get information about whether the cluster is running. - -[Endpoint documentation](docs-content://get-started/index.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster) ```ts client.ping() ``` +## client.putScript [_put_script] +Create or update a script or search template. +Creates or updates a stored script or search template. -## put_script [_put_script] - -Create or update a script or search template. Creates or updates a stored script or search template. - -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script) ```ts client.putScript({ id, script }) ``` +### Arguments [_arguments_put_script] +#### Request (object) [_request_put_script] -### Arguments [_arguments_25] - -* **Request (object):** - - * **`id` (string)**: Identifier for the stored script or search template. Must be unique within the cluster. - * **`script` ({ lang, options, source })**: Contains the script or search template, its parameters, and its language. - * **`context` (Optional, string)**: Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## rank_eval [_rank_eval] +- **`id` (string)**: The identifier for the stored script or search template. It must be unique within the cluster. +- **`script` ({ lang, options, source })**: The script or search template, its parameters, and its language. +- **`context` (Optional, string)**: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +## client.rankEval [_rank_eval] Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. @@ -1279,32 +1314,31 @@ Evaluate the quality of ranked search results over a set of typical search queri ```ts client.rankEval({ requests }) ``` +### Arguments [_arguments_rank_eval] +#### Request (object) [_request_rank_eval] -### Arguments [_arguments_26] - -* **Request (object):** - - * **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. - * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. - * **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`search_type` (Optional, string)**: Search operation type - - - -## reindex [_reindex] +- **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. +- **`index` (Optional, string | string[])**: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +- **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`search_type` (Optional, string)**: Search operation type +## client.reindex [_reindex] Reindex documents. -Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. - -::::{important} -Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. -:::: +Copy documents from a source to a destination. +You can copy all documents to the destination index or reindex a subset of the documents. +The source can be any existing index, alias, or data stream. +The destination must differ from the source. +For example, you cannot reindex a data stream into itself. +IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. +The destination should be configured as wanted before calling the reindex API. +Reindex does not copy the settings from the source or its associated template. +Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: @@ -1313,33 +1347,41 @@ If the Elasticsearch security features are enabled, you must have the following * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. -If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. +If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. +Automatic data stream creation requires a matching index template with data stream enabled. -The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. +The `dest` element can be configured like the index API to control optimistic concurrency control. +Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. -Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. +Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. +All existing documents will cause a version conflict. -::::{important} -Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. -:::: +IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. +A reindex can only add new documents to a destination data stream. +It cannot update existing documents in a destination data stream. +By default, version conflicts abort the reindex process. +To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. +In this case, the response includes a count of the version conflicts that were encountered. +Note that the handling of other error types is unaffected by the `conflicts` property. +Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. -By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. - -::::{note} -The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn’t usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. -:::: - +NOTE: The reindex API makes no effort to handle ID collisions. +The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. +Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** -If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** -If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. +If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. +That way you can resume the process if there are any errors by removing the partially completed source and starting over. +It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: @@ -1358,27 +1400,32 @@ done **Throttling** -Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. +Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. +Requests are throttled by padding each batch with a wait time. +To turn off throttling, set `requests_per_second` to `-1`. -The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: +The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` -Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". +Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. +This is "bursty" instead of "smooth". **Slicing** -Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. +Reindex supports sliced scroll to parallelize the reindexing process. +This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. -::::{note} -Reindexing from remote clusters does not support manual or automatic slicing. -:::: +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. - -You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. +You can slice a reindex request manually by providing a slice ID and total number of slices to each request. +You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. +The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: @@ -1387,13 +1434,16 @@ Adding `slices` to the reindex request just automates the manual process, creati * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices`, each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. -If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. +If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. +If slicing manually or otherwise tuning automatic slicing, use the following guidelines. -Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +Query performance is most efficient when the number of slices is equal to the number of shards in the index. +If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. +Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. @@ -1401,9 +1451,14 @@ Whether query or indexing performance dominates the runtime depends on the docum **Modify documents during reindexing** -Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document’s metadata. +Like `_update_by_query`, reindex operations support a script that modifies the document. +Unlike `_update_by_query`, the script is allowed to modify the document's metadata. -Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. +Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. +For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. +Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. +The deletion will be reported in the `deleted` counter in the response body. +Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: @@ -1412,96 +1467,100 @@ Think of the possibilities! Just be careful; you are able to change: * `_version` * `_routing` -Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. +Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. +It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** -Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. +Reindex supports reindexing from a remote Elasticsearch cluster. +The `host` parameter must contain a scheme, host, port, and optional path. +The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. +Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. +There are a range of settings available to configure the behavior of the HTTPS connection. -When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: +When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. +Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. +It can be set to a comma delimited list of allowed remote host and port combinations. +Scheme is ignored; only the host and port are used. +For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` -The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. - -::::{warning} -Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. -:::: +The list of allowed hosts must be configured on any nodes that will coordinate the reindex. +This feature should work with remote clusters of any version of Elasticsearch. +This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. +WARNING: Elasticsearch does not support forward compatibility across major versions. +For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. -::::{note} -Reindexing from remote clusters does not support manual or automatic slicing. -:::: - +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. -Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you’ll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. +Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. +If the remote index includes very large documents you'll need to use a smaller batch size. +It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. +Both default to 30 seconds. **Configuring SSL parameters** -Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. +Reindex from remote supports configurable SSL settings. +These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. +It is not possible to configure SSL in the body of the reindex request. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) ```ts client.reindex({ dest, source }) ``` - - -### Arguments [_arguments_27] - -* **Request (object):** - - * **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. - * **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. - * **`conflicts` (Optional, Enum("abort" | "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. - * **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. - * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. - * **`size` (Optional, number)** - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. - * **`scroll` (Optional, string | -1 | 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. - * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn’t sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. - * **`timeout` (Optional, string | -1 | 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. - * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - - - -## reindex_rethrottle [_reindex_rethrottle] - +### Arguments [_arguments_reindex] + +#### Request (object) [_request_reindex] + +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. +- **`conflicts` (Optional, Enum("abort" | "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. +- **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. +- **`size` (Optional, number)** +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. +- **`scroll` (Optional, string | -1 | 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. +- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. +- **`timeout` (Optional, string | -1 | 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + +## client.reindexRethrottle [_reindex_rethrottle] Throttle a reindex operation. -Change the number of requests per second for a particular reindex operation. For example: +Change the number of requests per second for a particular reindex operation. +For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` -Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. +Rethrottling that speeds up the query takes effect immediately. +Rethrottling that slows down the query will take effect after completing the current batch. +This behavior prevents scroll timeouts. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) ```ts client.reindexRethrottle({ task_id }) ``` +### Arguments [_arguments_reindex_rethrottle] +#### Request (object) [_request_reindex_rethrottle] -### Arguments [_arguments_28] - -* **Request (object):** - - * **`task_id` (string)**: The task identifier, which can be found by using the tasks API. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. - - - -## render_search_template [_render_search_template] +- **`task_id` (string)**: The task identifier, which can be found by using the tasks API. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. +## client.renderSearchTemplate [_render_search_template] Render a search template. Render a search template as a search request body. @@ -1511,361 +1570,495 @@ Render a search template as a search request body. ```ts client.renderSearchTemplate({ ... }) ``` +### Arguments [_arguments_render_search_template] +#### Request (object) [_request_render_search_template] -### Arguments [_arguments_29] - -* **Request (object):** - - * **`id` (Optional, string)**: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. - * **`file` (Optional, string)** - * **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - * **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API’s request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. - - - -## scripts_painless_execute [_scripts_painless_execute] +- **`id` (Optional, string)**: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. +- **`file` (Optional, string)** +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`source` (Optional, string)**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. +## client.scriptsPainlessExecute [_scripts_painless_execute] Run a script. -Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don’t have permissions to write documents on a cluster. +Runs a script and returns a result. +Use this API to build and test scripts, such as when defining a script for a runtime field. +This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. -The API uses several *contexts*, which control how scripts are run, what variables are available at runtime, and what the return type is. +The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. -Each context requires a script, but additional parameters depend on the context you’re using for that script. +Each context requires a script, but additional parameters depend on the context you're using for that script. -[Endpoint documentation](elasticsearch://reference/scripting-languages/painless/painless-api-examples.md) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html) ```ts client.scriptsPainlessExecute({ ... }) ``` +### Arguments [_arguments_scripts_painless_execute] +#### Request (object) [_request_scripts_painless_execute] -### Arguments [_arguments_30] - -* **Request (object):** - - * **`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. - * **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. - * **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. - - - -## scroll [_scroll] +- **`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +- **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. +- **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. +## client.scroll [_scroll] Run a scrolling search. -::::{important} -The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). -:::: +IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). - -The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. +The scroll API gets large sets of results from a single scrolling search request. +To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. +The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. +The search response returns a scroll ID in the `_scroll_id` response body parameter. +You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. +If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. -::::{important} -Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. -:::: - +IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll) ```ts client.scroll({ scroll_id }) ``` +### Arguments [_arguments_scroll] +#### Request (object) [_request_scroll] -### Arguments [_arguments_31] - -* **Request (object):** - - * **`scroll_id` (string)**: Scroll ID of the search. - * **`scroll` (Optional, string | -1 | 0)**: Period to retain the search context for scrolling. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. - - - -## search [_search] +- **`scroll_id` (string)**: The scroll ID of the search. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. +## client.search [_search] Run a search. -Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. +Get search hits that match the query defined in the request. +You can provide search queries using the `q` query string parameter or the request body. +If both are specified, only the query parameter is used. -If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias’s data streams or indices. +If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. +To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** -When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. +When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. +By default the splitting is done first on the shards, then locally on each shard. +The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. -::::{important} -The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. -:::: - +IMPORTANT: The same point-in-time ID should be used for all slices. +If different PIT IDs are used, slices can overlap and miss documents. +This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) ```ts client.search({ ... }) ``` +### Arguments [_arguments_search] + +#### Request (object) [_request_search] + +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. +- **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. +- **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. +- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. +- **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. +- **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. +- **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number | number | string | boolean | null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. +- **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. +- **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. +- **`_source` (Optional, boolean | { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +- **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. +- **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. +- **`timeout` (Optional, string)**: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`allow_partial_search_results` (Optional, boolean)**: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +- **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. +- **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. +- **`suggest_field` (Optional, string)**: The field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. + +## client.searchMvt [_search_mvt] +Search a vector tile. + +Search a vector tile for geospatial values. +Before using this API, you should be familiar with the Mapbox vector tile specification. +The API returns results as a binary mapbox vector tile. +Internally, Elasticsearch translates a vector tile search API request into a search containing: -### Arguments [_arguments_32] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - * **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. - * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. - * **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. - * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. - * **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - * **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. - * **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. - * **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. - * **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. - * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. - * **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. - * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. - * **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. - * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. - * **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. - * **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. - * **`_source` (Optional, boolean | { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. - * **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. - * **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. - * **`timeout` (Optional, string)**: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. - * **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. - * **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. - * **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. - * **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. - * **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`allow_partial_search_results` (Optional, boolean)**: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. - * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. - * **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. - * **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. - * **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. - * **`suggest_field` (Optional, string)**: The field to use for suggestions. - * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - * **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - * **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. - * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. - * **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - - - -## search_mvt [_search_mvt] +* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. +* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. +* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. +* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. -Search a vector tile. +For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search -Search a vector tile for geospatial values. +``` +GET my-index/_search +{ + "size": 10000, + "query": { + "geo_bounding_box": { + "my-geo-field": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "aggregations": { + "grid": { + "geotile_grid": { + "field": "my-geo-field", + "precision": 11, + "size": 65536, + "bounds": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "bounds": { + "geo_bounds": { + "field": "my-geo-field", + "wrap_longitude": false + } + } + } +} +``` + +The API returns results as a binary Mapbox vector tile. +Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. +* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. +* A meta layer containing: + * A feature containing a bounding box. By default, this is the bounding box of the tile. + * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. + * Metadata for the search. + +The API only returns features that can display at its zoom level. +For example, if a polygon feature has no area at its zoom level, the API omits it. +The API returns errors as UTF-8 encoded JSON. + +IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. +If you specify both parameters, the query parameter takes precedence. + +**Grid precision for geotile** + +For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. +`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. +For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. +The maximum final precision is 29. +The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. +For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +The `aggs` layer only contains features for cells with matching data. + +**Grid precision for geohex** + +For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + +This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. +The following table maps the H3 resolution for each precision. +For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +At a precision of 6, hexagonal cells have an H3 resolution of 2. +If `` is 3 and `grid_precision` is 4, the precision is 7. +At a precision of 7, hexagonal cells have an H3 resolution of 3. + +| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +| --------- | ---------------- | ------------- | ----------------| ----- | +| 1 | 4 | 0 | 122 | 30.5 | +| 2 | 16 | 0 | 122 | 7.625 | +| 3 | 64 | 1 | 842 | 13.15625 | +| 4 | 256 | 1 | 842 | 3.2890625 | +| 5 | 1024 | 2 | 5882 | 5.744140625 | +| 6 | 4096 | 2 | 5882 | 1.436035156 | +| 7 | 16384 | 3 | 41162 | 2.512329102 | +| 8 | 65536 | 3 | 41162 | 0.6280822754 | +| 9 | 262144 | 4 | 288122 | 1.099098206 | +| 10 | 1048576 | 4 | 288122 | 0.2747745514 | +| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + +Hexagonal cells don't align perfectly on a vector tile. +Some cells may intersect more than one vector tile. +To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. +Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt) ```ts client.searchMvt({ index, field, zoom, x, y }) ``` +### Arguments [_arguments_search_mvt] + +#### Request (object) [_request_search_mvt] + +- **`index` (string | string[])**: List of data streams, indices, or aliases to search +- **`field` (string)**: Field containing geospatial data to return +- **`zoom` (number)**: Zoom level for the vector tile to search +- **`x` (number)**: X coordinate for the vector tile to search +- **`y` (number)**: Y coordinate for the vector tile to search +- **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +- **`buffer` (Optional, number)**: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +- **`exact_bounds` (Optional, boolean)**: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. +- **`extent` (Optional, number)**: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +- **`fields` (Optional, string | string[])**: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +- **`grid_agg` (Optional, Enum("geotile" | "geohex"))**: The aggregation used to create a grid for the `field`. +- **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. +- **`grid_type` (Optional, Enum("grid" | "point" | "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The query DSL used to filter documents for the search. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`size` (Optional, number)**: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +- **`track_total_hits` (Optional, boolean | number)**: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. + +## client.searchShards [_search_shards] +Get the search shards. +Get the indices and shards that a search request would be run against. +This information can be useful for working out issues or planning optimizations with routing and shard preferences. +When filtered aliases are used, the filter is returned as part of the `indices` section. -### Arguments [_arguments_33] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams, indices, or aliases to search - * **`field` (string)**: Field containing geospatial data to return - * **`zoom` (number)**: Zoom level for the vector tile to search - * **`x` (number)**: X coordinate for the vector tile to search - * **`y` (number)**: Y coordinate for the vector tile to search - * **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum - * **`buffer` (Optional, number)**: Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. - * **`exact_bounds` (Optional, boolean)**: If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. - * **`extent` (Optional, number)**: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. - * **`fields` (Optional, string | string[])**: Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. - * **`grid_agg` (Optional, Enum("geotile" | "geohex"))**: Aggregation used to create a grid for the `field`. - * **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. - * **`grid_type` (Optional, Enum("grid" | "point" | "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If *grid* each feature is a Polygon of the cells bounding box. If *point* each feature is a Point that is the centroid of the cell. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query DSL used to filter documents for the search. - * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`size` (Optional, number)**: Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - * **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. +If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. - - -## search_shards [_search_shards] - -Get the search shards. - -Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards) ```ts client.searchShards({ ... }) ``` +### Arguments [_arguments_search_shards] +#### Request (object) [_request_search_shards] -### Arguments [_arguments_34] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: Returns the indices and shards that a search request would be executed against. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - - - -## search_template [_search_template] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +## client.searchTemplate [_search_template] Run a search with a search template. -[Endpoint documentation](docs-content://solutions/search/search-templates.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template) ```ts client.searchTemplate({ ... }) ``` - - -### Arguments [_arguments_35] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (*). - * **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. - * **`id` (Optional, string)**: ID of the search template to use. If no source is specified, this parameter is required. - * **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - * **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. - * **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API’s request body. Also supports Mustache variables. If no id is specified, this parameter is required. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`scroll` (Optional, string | -1 | 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are rendered as an integer in the response. - * **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. - - - -## terms_enum [_terms_enum] - +### Arguments [_arguments_search_template] + +#### Request (object) [_request_search_template] + +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +- **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. +- **`id` (Optional, string)**: The ID of the search template to use. If no `source` is specified, this parameter is required. +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. +- **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string | -1 | 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + +## client.termsEnum [_terms_enum] Get terms in an index. -Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. - -If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. - -::::{note} -The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. -:::: +Discover terms that match a partial string in an index. +This API is designed for low-latency look-ups used in auto-complete scenarios. +> info +> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum) ```ts client.termsEnum({ index, field }) ``` +### Arguments [_arguments_terms_enum] +#### Request (object) [_request_terms_enum] -### Arguments [_arguments_36] - -* **Request (object):** - - * **`index` (string)**: List of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. - * **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. - * **`size` (Optional, number)**: How many matching terms to return. - * **`timeout` (Optional, string | -1 | 0)**: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. - * **`case_insensitive` (Optional, boolean)**: When true the provided search string is matched against index terms without case sensitivity. - * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Allows to filter an index shard if the provided query rewrites to match_none. - * **`string` (Optional, string)**: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. - * **`search_after` (Optional, string)** - - - -## termvectors [_termvectors] +- **`index` (string)**: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. +- **`size` (Optional, number)**: The number of matching terms to return. +- **`timeout` (Optional, string | -1 | 0)**: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +- **`case_insensitive` (Optional, boolean)**: When `true`, the provided search string is matched against index terms without case sensitivity. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter an index shard if the provided query rewrites to `match_none`. +- **`string` (Optional, string)**: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. +- **`search_after` (Optional, string)**: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. +## client.termvectors [_termvectors] Get term vector information. Get information and statistics about terms in the fields of a particular document. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors) +You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. +You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. +For example: -```ts -client.termvectors({ index }) +``` +GET /my-index-000001/_termvectors/1?fields=message ``` +Fields can be specified using wildcards, similar to the multi match query. -### Arguments [_arguments_37] +Term vectors are real-time by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. -* **Request (object):** +You can request three types of values: _term information_, _term statistics_, and _field statistics_. +By default, all term information and field statistics are returned for all fields but term statistics are excluded. - * **`index` (string)**: Name of the index that contains the document. - * **`id` (Optional, string)**: Unique identifier of the document. - * **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. - * **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. - * **`per_field_analyzer` (Optional, Record)**: Overrides the default per-field analyzer. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. - * **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. - * **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. - * **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. - * **`positions` (Optional, boolean)**: If `true`, the response includes term positions. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`term_statistics` (Optional, boolean)**: If `true`, the response includes term frequency and document frequency. - * **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: Specific version type. +**Term information** +* term frequency in the field (always returned) +* term positions (`positions: true`) +* start and end offsets (`offsets: true`) +* term payloads (`payloads: true`), as base64 encoded bytes +If the requested information wasn't stored in the index, it will be computed on the fly if possible. +Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. -## update [_update] +> warn +> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. +**Behaviour** + +The term and field statistics are not accurate. +Deleted documents are not taken into account. +The information is only retrieved for the shard the requested document resides in. +The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. +By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. +Use `routing` only to hit a particular shard. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors) + +```ts +client.termvectors({ index }) +``` +### Arguments [_arguments_termvectors] + +#### Request (object) [_request_termvectors] + +- **`index` (string)**: The name of the index that contains the document. +- **`id` (Optional, string)**: A unique identifier for the document. +- **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. +- **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. +- **`per_field_analyzer` (Optional, Record)**: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. +- **`fields` (Optional, string | string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`term_statistics` (Optional, boolean)**: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. + +## client.update [_update] Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. -The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: +The script can update, delete, or skip modifying the document. +The API also supports passing a partial document, which is merged into the existing document. +To fully replace an existing document, use the index API. +This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. @@ -1873,45 +2066,44 @@ The script can update, delete, or skip modifying the document. The API also supp The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. -The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). +The `_source` field must be enabled to use this API. +In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update) ```ts client.update({ id, index }) ``` - - -### Arguments [_arguments_38] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document to be updated. - * **`index` (string)**: The name of the target index. By default, the index is created automatically if it doesn’t exist. - * **`detect_noop` (Optional, boolean)**: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. - * **`doc` (Optional, object)**: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. - * **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of *doc* as the value of *upsert*. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. - * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. - * **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. - * **`_source` (Optional, boolean | { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. - * **`upsert` (Optional, object)**: If the document does not already exist, the contents of *upsert* are inserted as a new document. If the document exists, the *script* is run. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`lang` (Optional, string)**: The script language. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, it does nothing with refreshes. - * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - * **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to *all* or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. - * **`_source_excludes` (Optional, string | string[])**: The source fields you want to exclude. - * **`_source_includes` (Optional, string | string[])**: The source fields you want to retrieve. - - - -## update_by_query [_update_by_query] - -Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. +### Arguments [_arguments_update] + +#### Request (object) [_request_update] + +- **`id` (string)**: A unique identifier for the document to be updated. +- **`index` (string)**: The name of the target index. By default, the index is created automatically if it doesn't exist. +- **`detect_noop` (Optional, boolean)**: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. +- **`doc` (Optional, object)**: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. +- **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. +- **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. +- **`_source` (Optional, boolean | { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +- **`upsert` (Optional, object)**: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`lang` (Optional, string)**: The script language. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. +- **`_source_excludes` (Optional, string | string[])**: The source fields you want to exclude. +- **`_source_includes` (Optional, string | string[])**: The source fields you want to retrieve. + +## client.updateByQuery [_update_by_query] +Update documents. +Updates documents that match the specified query. +If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: @@ -1920,33 +2112,45 @@ If the Elasticsearch security features are enabled, you must have the following You can specify the query criteria in the request URI or the request body using the same syntax as the search API. -When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. - -::::{note} -Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. -:::: +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. +When the versions match, the document is updated and the version number is incremented. +If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. +NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. -While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. +A bulk update request is performed for each batch of matching documents. +Any query or update failures cause the update by query request to fail and the failures are shown in the response. +Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** -To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to turn off throttling. -Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` -Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". **Slicing** -Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. +Update by query supports sliced scroll to parallelize the update process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. -Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: @@ -1955,11 +2159,11 @@ Adding `slices` to `_update_by_query` just automates the manual process of creat * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. -* Due to the nature of slices each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. -If you’re slicing manually or otherwise tuning automatic slicing, keep in mind that: +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. @@ -1968,93 +2172,89 @@ Whether query or update performance dominates the runtime depends on the documen **Update the document source** -Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. +Update by query supports scripts to update the document source. +As with the update API, you can set `ctx.op` to change the operation that is performed. -Set `ctx.op = "noop"` if your script decides that it doesn’t have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. +The update by query operation skips updating the document and increments the `noop` counter. -Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. +Set `ctx.op = "delete"` if your script decides that the document should be deleted. +The update by query operation deletes the document and increments the `deleted` counter. -Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. +Update by query supports only `index`, `noop`, and `delete`. +Setting `ctx.op` to anything else is an error. +Setting any other field in `ctx` is an error. +This API enables you to only modify the source of matching documents; you cannot move them. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) ```ts client.updateByQuery({ index }) ``` - - -### Arguments [_arguments_39] - -* **Request (object):** - - * **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - * **`max_docs` (Optional, number)**: The maximum number of documents to update. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. - * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. - * **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. - * **`conflicts` (Optional, Enum("abort" | "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - * **`q` (Optional, string)**: A query in the Lucene query string syntax. - * **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API’s `refresh` parameter, which causes just the shard that received the request to be refreshed. - * **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. - * **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. - * **`search_timeout` (Optional, string | -1 | 0)**: An explicit timeout for each search request. By default, there is no timeout. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. - * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. - * **`sort` (Optional, string[])**: A list of : pairs. - * **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - * **`timeout` (Optional, string | -1 | 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. - * **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. - - - -## update_by_query_rethrottle [_update_by_query_rethrottle] - +### Arguments [_arguments_update_by_query] + +#### Request (object) [_request_update_by_query] + +- **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to update. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`conflicts` (Optional, Enum("abort" | "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string | -1 | 0)**: An explicit timeout for each search request. By default, there is no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. +- **`sort` (Optional, string[])**: A list of : pairs. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string | -1 | 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +## client.updateByQueryRethrottle [_update_by_query_rethrottle] Throttle an update by query operation. -Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. +Change the number of requests per second for a particular update by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle) ```ts client.updateByQueryRethrottle({ task_id }) ``` +### Arguments [_arguments_update_by_query_rethrottle] +#### Request (object) [_request_update_by_query_rethrottle] -### Arguments [_arguments_40] - -* **Request (object):** - - * **`task_id` (string)**: The ID for the task. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. - - - -## async_search [_async_search] - - -### delete [_delete_2] +- **`task_id` (string)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. +## client.asyncSearch.delete [_async_search.delete] Delete an async search. -If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. +If the asynchronous search is still running, it is cancelled. +Otherwise, the saved search results are deleted. +If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) @@ -2062,20 +2262,16 @@ If the asynchronous search is still running, it is cancelled. Otherwise, the sav client.asyncSearch.delete({ id }) ``` +### Arguments [_arguments_async_search.delete] -### Arguments [_arguments_41] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the async search. - - - -### get [_get_2] +#### Request (object) [_request_async_search.delete] +- **`id` (string)**: A unique identifier for the async search. +## client.asyncSearch.get [_async_search.get] Get async search results. -Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. +Retrieve the results of a previously submitted asynchronous search request. +If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) @@ -2083,23 +2279,28 @@ Retrieve the results of a previously submitted asynchronous search request. If t client.asyncSearch.get({ id }) ``` +### Arguments [_arguments_async_search.get] -### Arguments [_arguments_42] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the async search. - * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. - * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. - - - -### status [_status] +#### Request (object) [_request_async_search.get] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string | -1 | 0)**: The length of time that the async search should be available in the cluster. +When not specified, the `keep_alive` set with the corresponding submit async request will be used. +Otherwise, it is possible to override the value and extend the validity of the request. +When this period expires, the search, if still running, is cancelled. +If the search is completed, its saved results are deleted. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Specifies to wait for the search to be completed up until the provided timeout. +Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. +By default no timeout is set meaning that the currently available results will be returned without any additional wait. +## client.asyncSearch.status [_async_search.status] Get the async search status. -Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. +Get the status of a previously submitted async search request given its identifier, without retrieving search results. +If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: + +* The user or API key that submitted the original async search request. +* Users that have the `monitor` cluster privilege or greater privileges. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) @@ -2107,25 +2308,22 @@ Get the status of a previously submitted async search request given its identifi client.asyncSearch.status({ id }) ``` +### Arguments [_arguments_async_search.status] -### Arguments [_arguments_43] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the async search. - * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. - - - -### submit [_submit] +#### Request (object) [_request_async_search.status] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string | -1 | 0)**: The length of time that the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +## client.asyncSearch.submit [_async_search.submit] Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. -By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. +By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. +The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) @@ -2133,87 +2331,102 @@ By default, Elasticsearch does not allow you to store an async search response l client.asyncSearch.submit({ ... }) ``` - -### Arguments [_arguments_44] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices - * **`aggregations` (Optional, Record)** - * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - * **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. - * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. - * **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`highlight` (Optional, { encoder, fields })** - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. - * **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. - * **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. - * **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. - * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - * **`profile` (Optional, boolean)** - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** - * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])** - * **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`slice` (Optional, { field, id, max })** - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** - * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - * **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. - * **`suggest` (Optional, { text })** - * **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. - * **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. - * **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. - * **`version` (Optional, boolean)**: If true, returns document version as part of a hit. - * **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. - * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. - * **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. - * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. - * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. - * **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout - * **`analyzer` (Optional, string)**: The analyzer to use for the query string - * **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) - * **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). - * **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query (AND or OR) - * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - * **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests - * **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) - * **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true - * **`routing` (Optional, string)**: A list of specific routing values - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Search operation type - * **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. - * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: Specify suggest mode - * **`suggest_size` (Optional, number)**: How many suggestions to return in response - * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. - * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response - * **`_source_excludes` (Optional, string | string[])**: A list of fields to exclude from the returned _source field - * **`_source_includes` (Optional, string | string[])**: A list of fields to extract and return from the _source field - * **`q` (Optional, string)**: Query in the Lucene query string syntax - - - -## autoscaling [_autoscaling] - - -### delete_autoscaling_policy [_delete_autoscaling_policy] - +### Arguments [_arguments_async_search.submit] + +#### Request (object) [_request_async_search.submit] +- **`index` (Optional, string | string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number | number | string | boolean | null[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Blocks and waits until the search is completed up to a certain timeout. +When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. +- **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +- **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout +- **`analyzer` (Optional, string)**: The analyzer to use for the query string +- **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) +- **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. +A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). +- **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query (AND or OR) +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +- **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) +- **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true +- **`routing` (Optional, string)**: A list of specific routing values +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Search operation type +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: Specify suggest mode +- **`suggest_size` (Optional, number)**: How many suggestions to return in response +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`_source_excludes` (Optional, string | string[])**: A list of fields to exclude from the returned _source field +- **`_source_includes` (Optional, string | string[])**: A list of fields to extract and return from the _source field +- **`q` (Optional, string)**: Query in the Lucene query string syntax + +## client.autoscaling.deleteAutoscalingPolicy [_autoscaling.delete_autoscaling_policy] Delete an autoscaling policy. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: - +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy) @@ -2221,33 +2434,29 @@ This feature is designed for indirect use by Elasticsearch Service, Elastic Clou client.autoscaling.deleteAutoscalingPolicy({ name }) ``` +### Arguments [_arguments_autoscaling.delete_autoscaling_policy] -### Arguments [_arguments_45] - -* **Request (object):** - - * **`name` (string)**: the name of the autoscaling policy - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_autoscaling_capacity [_get_autoscaling_capacity] +#### Request (object) [_request_autoscaling.delete_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.autoscaling.getAutoscalingCapacity [_autoscaling.get_autoscaling_capacity] Get the autoscaling capacity. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: - +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. +This API gets the current autoscaling capacity based on the configured autoscaling policy. +It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. -The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. +The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. +This information is provided for diagnosis only. +Do not use this information to make autoscaling decisions. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) @@ -2255,23 +2464,16 @@ The response contains decider-specific information you can use to diagnose how a client.autoscaling.getAutoscalingCapacity({ ... }) ``` +### Arguments [_arguments_autoscaling.get_autoscaling_capacity] -### Arguments [_arguments_46] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_autoscaling_policy [_get_autoscaling_policy] +#### Request (object) [_request_autoscaling.get_autoscaling_capacity] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.autoscaling.getAutoscalingPolicy [_autoscaling.get_autoscaling_policy] Get an autoscaling policy. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: - +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) @@ -2279,24 +2481,17 @@ This feature is designed for indirect use by Elasticsearch Service, Elastic Clou client.autoscaling.getAutoscalingPolicy({ name }) ``` +### Arguments [_arguments_autoscaling.get_autoscaling_policy] -### Arguments [_arguments_47] - -* **Request (object):** - - * **`name` (string)**: the name of the autoscaling policy - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_autoscaling_policy [_put_autoscaling_policy] +#### Request (object) [_request_autoscaling.get_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.autoscaling.putAutoscalingPolicy [_autoscaling.put_autoscaling_policy] Create or update an autoscaling policy. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: - +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy) @@ -2304,31 +2499,22 @@ This feature is designed for indirect use by Elasticsearch Service, Elastic Clou client.autoscaling.putAutoscalingPolicy({ name }) ``` +### Arguments [_arguments_autoscaling.put_autoscaling_policy] -### Arguments [_arguments_48] - -* **Request (object):** - - * **`name` (string)**: the name of the autoscaling policy - * **`policy` (Optional, { roles, deciders })** - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## cat [_cat] - - -### aliases [_aliases] +#### Request (object) [_request_autoscaling.put_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`policy` (Optional, { roles, deciders })** +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.cat.aliases [_cat.aliases] Get aliases. -Get the cluster’s index aliases, including filter and routing information. This API does not return data stream aliases. - -::::{important} -CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. -:::: +Get the cluster's index aliases, including filter and routing information. +This API does not return data stream aliases. +IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases) @@ -2336,27 +2522,27 @@ CAT APIs are only intended for human consumption using the command line or the K client.cat.aliases({ ... }) ``` +### Arguments [_arguments_cat.aliases] -### Arguments [_arguments_49] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. - - - -### allocation [_allocation] +#### Request (object) [_request_cat.aliases] +- **`name` (Optional, string | string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicated that the request should never timeout, you can set it to `-1`. +## client.cat.allocation [_cat.allocation] Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. -:::: - +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation) @@ -2364,28 +2550,29 @@ CAT APIs are only intended for human consumption using the command line or Kiban client.cat.allocation({ ... }) ``` +### Arguments [_arguments_cat.allocation] -### Arguments [_arguments_50] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: A list of node identifiers or names used to limit the returned information. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### component_templates [_component_templates] +#### Request (object) [_request_cat.allocation] +- **`node_id` (Optional, string | string[])**: A list of node identifiers or names used to limit the returned information. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.componentTemplates [_cat.component_templates] Get component templates. -Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. -:::: +Get information about component templates in a cluster. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates) @@ -2393,27 +2580,30 @@ CAT APIs are only intended for human consumption using the command line or Kiban client.cat.componentTemplates({ ... }) ``` +### Arguments [_arguments_cat.component_templates] -### Arguments [_arguments_51] - -* **Request (object):** - - * **`name` (Optional, string)**: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. - - - -### count [_count_2] +#### Request (object) [_request_cat.component_templates] +- **`name` (Optional, string)**: The name of the component template. +It accepts wildcard expressions. +If it is omitted, all component templates are returned. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +## client.cat.count [_cat.count] Get a document count. -Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. - -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. -:::: +Get quick access to a document count for a data stream, an index, or an entire cluster. +The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count) @@ -2421,25 +2611,24 @@ CAT APIs are only intended for human consumption using the command line or Kiban client.cat.count({ ... }) ``` +### Arguments [_arguments_cat.count] -### Arguments [_arguments_52] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - - - -### fielddata [_fielddata] +#### Request (object) [_request_cat.count] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +It supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +## client.cat.fielddata [_cat.fielddata] Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. -::::{important} -cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. -:::: - +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the nodes stats API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata) @@ -2447,24 +2636,29 @@ cat APIs are only intended for human consumption using the command line or Kiban client.cat.fielddata({ ... }) ``` +### Arguments [_arguments_cat.fielddata] -### Arguments [_arguments_53] - -* **Request (object):** - - * **`fields` (Optional, string | string[])**: List of fields used to limit returned information. To retrieve all fields, omit this parameter. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - - - -### health [_health] +#### Request (object) [_request_cat.fielddata] +- **`fields` (Optional, string | string[])**: List of fields used to limit returned information. +To retrieve all fields, omit this parameter. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +## client.cat.health [_cat.health] Get the cluster health status. -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. -:::: - +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the cluster health API. +This API is often used to check malfunctioning clusters. +To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: +`HH:MM:SS`, which is human-readable but includes no date information; +`Unix epoch time`, which is machine-sortable and includes date information. +The latter format is useful for cluster recoveries that take multiple days. +You can use the cat health API to verify cluster health across multiple nodes. +You also can use the API to track the recovery of a large cluster over a longer period of time. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health) @@ -2472,18 +2666,17 @@ CAT APIs are only intended for human consumption using the command line or Kiban client.cat.health({ ... }) ``` +### Arguments [_arguments_cat.health] -### Arguments [_arguments_54] - -* **Request (object):** - - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. - - - -### help [_help] +#### Request (object) [_request_cat.health] +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +## client.cat.help [_cat.help] Get CAT help. Get help for the CAT APIs. @@ -2495,17 +2688,23 @@ client.cat.help() ``` -### indices [_indices] - +## client.cat.indices [_cat.indices] Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. -Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas +Use this request to get the following information for each index in a cluster: +- shard count +- document count +- deleted document count +- primary store size +- total store size of all shards, including shard replicas -These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. +These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices) @@ -2513,32 +2712,29 @@ CAT APIs are only intended for human consumption using the command line or Kiban client.cat.indices({ ... }) ``` - -### Arguments [_arguments_55] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. - * **`health` (Optional, Enum("green" | "yellow" | "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. - * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - * **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### master [_master] - +### Arguments [_arguments_cat.indices] + +#### Request (object) [_request_cat.indices] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +- **`health` (Optional, Enum("green" | "yellow" | "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.master [_cat.master] Get master node information. Get information about the master node, including the ID, bound IP address, and name. -::::{important} -cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -:::: - +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master) @@ -2546,26 +2742,27 @@ cat APIs are only intended for human consumption using the command line or Kiban client.cat.master({ ... }) ``` +### Arguments [_arguments_cat.master] -### Arguments [_arguments_56] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### ml_data_frame_analytics [_ml_data_frame_analytics] +#### Request (object) [_request_cat.master] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.mlDataFrameAnalytics [_cat.ml_data_frame_analytics] Get data frame analytics jobs. Get configuration and usage information about data frame analytics jobs. -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. -:::: - +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get data frame analytics jobs statistics API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics) @@ -2573,30 +2770,28 @@ CAT APIs are only intended for human consumption using the Kibana console or com client.cat.mlDataFrameAnalytics({ ... }) ``` +### Arguments [_arguments_cat.ml_data_frame_analytics] -### Arguments [_arguments_57] - -* **Request (object):** - - * **`id` (Optional, string)**: The ID of the data frame analytics to fetch - * **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit in which to display byte values - * **`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names to display. - * **`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names or column aliases used to sort the response. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### ml_datafeeds [_ml_datafeeds] +#### Request (object) [_request_cat.ml_data_frame_analytics] +- **`id` (Optional, string)**: The ID of the data frame analytics to fetch +- **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit in which to display byte values +- **`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names or column aliases used to sort the +response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.mlDatafeeds [_cat.ml_datafeeds] Get datafeeds. -Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. - -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. -:::: +Get configuration and usage information about datafeeds. +This API returns a maximum of 10,000 datafeeds. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` +cluster privileges to use this API. +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get datafeed statistics API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds) @@ -2604,31 +2799,34 @@ CAT APIs are only intended for human consumption using the Kibana console or com client.cat.mlDatafeeds({ ... }) ``` +### Arguments [_arguments_cat.ml_datafeeds] -### Arguments [_arguments_58] - -* **Request (object):** - - * **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_cat.ml_datafeeds] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. -If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. ** *`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names to display. *** *`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names or column aliases used to sort the response. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - - -### ml_jobs [_ml_jobs] +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when +there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only +partial matches. +- **`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names to display. +- **`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +## client.cat.mlJobs [_cat.ml_jobs] Get anomaly detection jobs. -Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. - -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. -:::: +Get configuration and usage information for anomaly detection jobs. +This API returns a maximum of 10,000 jobs. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get anomaly detection job statistics API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs) @@ -2636,34 +2834,32 @@ CAT APIs are only intended for human consumption using the Kibana console or com client.cat.mlJobs({ ... }) ``` +### Arguments [_arguments_cat.ml_jobs] -### Arguments [_arguments_59] - -* **Request (object):** - - * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_cat.ml_jobs] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. -If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. - -```json -`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb")): The unit used to display byte values. `h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[]): List of column names to display. `s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[]): List of column names or column aliases used to sort the response. `time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d")): The unit used to display time values. -``` - -### ml_trained_models [_ml_trained_models] +If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there +are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial +matches. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +## client.cat.mlTrainedModels [_cat.ml_trained_models] Get trained models. Get configuration and usage information about inference trained models. -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. -:::: - +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get trained models statistics API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models) @@ -2671,27 +2867,25 @@ CAT APIs are only intended for human consumption using the Kibana console or com client.cat.mlTrainedModels({ ... }) ``` +### Arguments [_arguments_cat.ml_trained_models] -### Arguments [_arguments_60] - -* **Request (object):** - - * **`model_id` (Optional, string)**: A unique identifier for the trained model. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names to display. - * **`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names or aliases used to sort the response. - * **`from` (Optional, number)**: Skips the specified number of transforms. - * **`size` (Optional, number)**: The maximum number of transforms to display. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### nodeattrs [_nodeattrs] +#### Request (object) [_request_cat.ml_trained_models] +- **`model_id` (Optional, string)**: A unique identifier for the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the API returns a 404 status code when there are no matches or only partial matches. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names to display. +- **`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names or aliases used to sort the response. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: The maximum number of transforms to display. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.nodeattrs [_cat.nodeattrs] Get node attribute information. -Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get information about custom node attributes. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs) @@ -2699,21 +2893,24 @@ Get information about custom node attributes. IMPORTANT: cat APIs are only inten client.cat.nodeattrs({ ... }) ``` +### Arguments [_arguments_cat.nodeattrs] -### Arguments [_arguments_61] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### nodes [_nodes] +#### Request (object) [_request_cat.nodeattrs] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.nodes [_cat.nodes] Get node information. -Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get information about the nodes in a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) @@ -2721,24 +2918,24 @@ Get information about the nodes in a cluster. IMPORTANT: cat APIs are only inten client.cat.nodes({ ... }) ``` +### Arguments [_arguments_cat.nodes] -### Arguments [_arguments_62] - -* **Request (object):** - - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### pending_tasks [_pending_tasks] +#### Request (object) [_request_cat.nodes] +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.pendingTasks [_cat.pending_tasks] Get pending task information. -Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. +Get information about cluster-level changes that have not yet taken effect. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks) @@ -2746,22 +2943,25 @@ Get information about cluster-level changes that have not yet taken effect. IMPO client.cat.pendingTasks({ ... }) ``` +### Arguments [_arguments_cat.pending_tasks] -### Arguments [_arguments_63] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### plugins [_plugins] +#### Request (object) [_request_cat.pending_tasks] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.plugins [_cat.plugins] Get plugin information. -Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get a list of plugins running on each node of a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins) @@ -2769,22 +2969,27 @@ Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are client.cat.plugins({ ... }) ``` +### Arguments [_arguments_cat.plugins] -### Arguments [_arguments_64] - -* **Request (object):** - - * **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### recovery [_recovery] +#### Request (object) [_request_cat.plugins] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.recovery [_cat.recovery] Get shard recovery information. -Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. +Get information about ongoing and completed shard recoveries. +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. +For data streams, the API returns information about the stream’s backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery) @@ -2792,24 +2997,25 @@ Get information about ongoing and completed shard recoveries. Shard recovery is client.cat.recovery({ ... }) ``` +### Arguments [_arguments_cat.recovery] -### Arguments [_arguments_65] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### repositories [_repositories] +#### Request (object) [_request_cat.recovery] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.repositories [_cat.repositories] Get snapshot repository information. -Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. +Get a list of snapshot repositories for a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories) @@ -2817,21 +3023,25 @@ Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only client.cat.repositories({ ... }) ``` +### Arguments [_arguments_cat.repositories] -### Arguments [_arguments_66] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### segments [_segments] +#### Request (object) [_request_cat.repositories] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.segments [_cat.segments] Get segment information. -Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments) @@ -2839,23 +3049,29 @@ Get low-level information about the Lucene segments in index shards. For data st client.cat.segments({ ... }) ``` - -### Arguments [_arguments_67] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### shards [_shards] - +### Arguments [_arguments_cat.segments] + +#### Request (object) [_request_cat.segments] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + +## client.cat.shards [_cat.shards] Get shard information. -Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. +Get information about the shards in a cluster. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards) @@ -2863,23 +3079,26 @@ Get information about the shards in a cluster. For data streams, the API returns client.cat.shards({ ... }) ``` +### Arguments [_arguments_cat.shards] -### Arguments [_arguments_68] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### snapshots [_snapshots] +#### Request (object) [_request_cat.shards] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.snapshots [_cat.snapshots] Get snapshot information. -Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. +Get information about the snapshots stored in one or more repositories. +A snapshot is a backup of an index or running Elasticsearch cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots) @@ -2887,50 +3106,55 @@ Get information about the snapshots stored in one or more repositories. A snapsh client.cat.snapshots({ ... }) ``` +### Arguments [_arguments_cat.snapshots] -### Arguments [_arguments_69] - -* **Request (object):** - - * **`repository` (Optional, string | string[])**: A list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### tasks [_tasks] +#### Request (object) [_request_cat.snapshots] +- **`repository` (Optional, string | string[])**: A list of snapshot repositories used to limit the request. +Accepts wildcard expressions. +`_all` returns all repositories. +If any repository fails during the request, Elasticsearch returns an error. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.tasks [_cat.tasks] Get task information. -Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. +Get information about tasks currently running in the cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks) ```ts client.cat.tasks({ ... }) ``` +### Arguments [_arguments_cat.tasks] -### Arguments [_arguments_70] - -* **Request (object):** - - * **`actions` (Optional, string[])**: The task action names, which are used to limit the response. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - * **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. - * **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. - - - -### templates [_templates] +#### Request (object) [_request_cat.tasks] +- **`actions` (Optional, string[])**: The task action names, which are used to limit the response. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. +- **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. +## client.cat.templates [_cat.templates] Get index template information. -Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. +Get information about the index templates in a cluster. +You can use index templates to apply index settings and field mappings to new indices at creation. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates) @@ -2938,22 +3162,27 @@ Get information about the index templates in a cluster. You can use index templa client.cat.templates({ ... }) ``` +### Arguments [_arguments_cat.templates] -### Arguments [_arguments_71] - -* **Request (object):** - - * **`name` (Optional, string)**: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### thread_pool [_thread_pool] +#### Request (object) [_request_cat.templates] +- **`name` (Optional, string)**: The name of the template to return. +Accepts wildcard expressions. If omitted, all templates are returned. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.threadPool [_cat.thread_pool] Get thread pool statistics. -Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get thread pool statistics for each node in a cluster. +Returned information includes all built-in thread pools and custom thread pools. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool) @@ -2961,25 +3190,30 @@ Get thread pool statistics for each node in a cluster. Returned information incl client.cat.threadPool({ ... }) ``` +### Arguments [_arguments_cat.thread_pool] -### Arguments [_arguments_72] - -* **Request (object):** - - * **`thread_pool_patterns` (Optional, string | string[])**: A list of thread pool names used to limit the request. Accepts wildcard expressions. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### transforms [_transforms] +#### Request (object) [_request_cat.thread_pool] +- **`thread_pool_patterns` (Optional, string | string[])**: A list of thread pool names used to limit the request. +Accepts wildcard expressions. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.transforms [_cat.transforms] Get transform information. Get configuration and usage information about transforms. -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. +CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get transform statistics API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms) @@ -2987,27 +3221,24 @@ CAT APIs are only intended for human consumption using the Kibana console or com client.cat.transforms({ ... }) ``` +### Arguments [_arguments_cat.transforms] -### Arguments [_arguments_73] - -* **Request (object):** +#### Request (object) [_request_cat.transforms] +- **`transform_id` (Optional, string)**: A transform identifier or a wildcard expression. +If you do not specify one of these options, the API returns information for all transforms. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names to display. +- **`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`size` (Optional, number)**: The maximum number of transforms to obtain. - * **`transform_id` (Optional, string)**: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. - * **`from` (Optional, number)**: Skips the specified number of transforms. - * **`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names to display. - * **`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names or column aliases used to sort the response. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`size` (Optional, number)**: The maximum number of transforms to obtain. +## client.ccr.deleteAutoFollowPattern [_ccr.delete_auto_follow_pattern] +Delete auto-follow patterns. - - -## ccr [_ccr] - - -### delete_auto_follow_pattern [_delete_auto_follow_pattern] - -Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. +Delete a collection of cross-cluster replication auto-follow patterns. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern) @@ -3015,19 +3246,18 @@ Delete auto-follow patterns. Delete a collection of cross-cluster replication au client.ccr.deleteAutoFollowPattern({ name }) ``` +### Arguments [_arguments_ccr.delete_auto_follow_pattern] -### Arguments [_arguments_74] - -* **Request (object):** - - * **`name` (string)**: The name of the auto follow pattern. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_ccr.delete_auto_follow_pattern] +- **`name` (string)**: The auto-follow pattern collection to delete. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. - -### follow [_follow] - -Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. +## client.ccr.follow [_ccr.follow] +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow) @@ -3035,81 +3265,87 @@ Create a follower. Create a cross-cluster replication follower index that follow client.ccr.follow({ index, leader_index, remote_cluster }) ``` +### Arguments [_arguments_ccr.follow] + +#### Request (object) [_request_ccr.follow] +- **`index` (string)**: The name of the follower index. +- **`leader_index` (string)**: The name of the index in the leader cluster to follow. +- **`remote_cluster` (string)**: The remote cluster containing the leader index. +- **`data_stream_name` (Optional, string)**: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when +retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be +deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will +be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. +When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. +Then the follower will immediately attempt to read from the leader again. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be +active. +A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the +remote Lucene segment files to the follower index. + +## client.ccr.followInfo [_ccr.follow_info] +Get follower information. + +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. -### Arguments [_arguments_75] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info) -* **Request (object):** +```ts +client.ccr.followInfo({ index }) +``` - * **`index` (string)**: The name of the follower index. - * **`leader_index` (string)**: The name of the index in the leader cluster to follow. - * **`remote_cluster` (string)**: The remote cluster containing the leader index. - * **`data_stream_name` (Optional, string)**: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. - * **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - * **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. - * **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. - * **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. - * **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. - * **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. - * **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. - * **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. - * **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. - * **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. - * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. +### Arguments [_arguments_ccr.follow_info] +#### Request (object) [_request_ccr.follow_info] +- **`index` (string | string[])**: A comma-delimited list of follower index patterns. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +## client.ccr.followStats [_ccr.follow_stats] +Get follower stats. -### follow_info [_follow_info] +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. -Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info) - -```ts -client.ccr.followInfo({ index }) -``` - - -### Arguments [_arguments_76] - -* **Request (object):** - - * **`index` (string | string[])**: A list of index patterns; use `_all` to perform the operation on all indices - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### follow_stats [_follow_stats] - -Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats) ```ts client.ccr.followStats({ index }) ``` +### Arguments [_arguments_ccr.follow_stats] -### Arguments [_arguments_77] - -* **Request (object):** +#### Request (object) [_request_ccr.follow_stats] +- **`index` (string | string[])**: A comma-delimited list of index patterns. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - * **`index` (string | string[])**: A list of index patterns; use `_all` to perform the operation on all indices - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ccr.forgetFollower [_ccr.forget_follower] +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. - -### forget_follower [_forget_follower] - -Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. - -A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. - -::::{note} -This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. -:::: - +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower) @@ -3117,23 +3353,20 @@ This API does not stop replication by a following index. If you use this API wit client.ccr.forgetFollower({ index }) ``` +### Arguments [_arguments_ccr.forget_follower] -### Arguments [_arguments_78] - -* **Request (object):** - - * **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed - * **`follower_cluster` (Optional, string)** - * **`follower_index` (Optional, string)** - * **`follower_index_uuid` (Optional, string)** - * **`leader_remote_cluster` (Optional, string)** - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_ccr.forget_follower] +- **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed +- **`follower_cluster` (Optional, string)** +- **`follower_index` (Optional, string)** +- **`follower_index_uuid` (Optional, string)** +- **`leader_remote_cluster` (Optional, string)** +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ccr.getAutoFollowPattern [_ccr.get_auto_follow_pattern] +Get auto-follow patterns. -### get_auto_follow_pattern [_get_auto_follow_pattern] - -Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. +Get cross-cluster replication auto-follow patterns. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1) @@ -3141,21 +3374,25 @@ Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. client.ccr.getAutoFollowPattern({ ... }) ``` +### Arguments [_arguments_ccr.get_auto_follow_pattern] -### Arguments [_arguments_79] - -* **Request (object):** - - * **`name` (Optional, string)**: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_ccr.get_auto_follow_pattern] +- **`name` (Optional, string)**: The auto-follow pattern collection that you want to retrieve. +If you do not specify a name, the API returns information for all collections. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +## client.ccr.pauseAutoFollowPattern [_ccr.pause_auto_follow_pattern] +Pause an auto-follow pattern. -### pause_auto_follow_pattern [_pause_auto_follow_pattern] +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. -Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. - -You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern) @@ -3163,19 +3400,21 @@ You can resume auto-following with the resume auto-follow pattern API. When it r client.ccr.pauseAutoFollowPattern({ name }) ``` +### Arguments [_arguments_ccr.pause_auto_follow_pattern] -### Arguments [_arguments_80] - -* **Request (object):** - - * **`name` (string)**: The name of the auto follow pattern that should pause discovering new indices to follow. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_ccr.pause_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to pause. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +## client.ccr.pauseFollow [_ccr.pause_follow] +Pause a follower. -### pause_follow [_pause_follow] - -Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow) @@ -3183,21 +3422,22 @@ Pause a follower. Pause a cross-cluster replication follower index. The follower client.ccr.pauseFollow({ index }) ``` +### Arguments [_arguments_ccr.pause_follow] -### Arguments [_arguments_81] - -* **Request (object):** - - * **`index` (string)**: The name of the follower index that should pause following its leader index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_ccr.pause_follow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +## client.ccr.putAutoFollowPattern [_ccr.put_auto_follow_pattern] +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. -### put_auto_follow_pattern [_put_auto_follow_pattern] - -Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. - -This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern) @@ -3205,34 +3445,33 @@ This API can also be used to update auto-follow patterns. NOTE: Follower indices client.ccr.putAutoFollowPattern({ name, remote_cluster }) ``` - -### Arguments [_arguments_82] - -* **Request (object):** - - * **`name` (string)**: The name of the collection of auto-follow patterns. - * **`remote_cluster` (string)**: The remote cluster containing the leader indices to match against. - * **`follow_index_pattern` (Optional, string)**: The name of follower index. The template `{{leader_index}}` can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use `{{leader_index}}`; CCR does not support changes to the names of a follower data stream’s backing indices. - * **`leader_index_patterns` (Optional, string[])**: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. - * **`leader_index_exclusion_patterns` (Optional, string[])**: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. - * **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - * **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). - * **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - * **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. - * **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. - * **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. - * **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. - * **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. - * **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. - * **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. - * **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### resume_auto_follow_pattern [_resume_auto_follow_pattern] - -Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. +### Arguments [_arguments_ccr.put_auto_follow_pattern] + +#### Request (object) [_request_ccr.put_auto_follow_pattern] +- **`name` (string)**: The name of the collection of auto-follow patterns. +- **`remote_cluster` (string)**: The remote cluster containing the leader indices to match against. +- **`follow_index_pattern` (Optional, string)**: The name of follower index. The template `leader_index` can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use `leader_index`; CCR does not support changes to the names of a follower data stream’s backing indices. +- **`leader_index_patterns` (Optional, string[])**: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. +- **`leader_index_exclusion_patterns` (Optional, string[])**: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + +## client.ccr.resumeAutoFollowPattern [_ccr.resume_auto_follow_pattern] +Resume an auto-follow pattern. + +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern) @@ -3240,19 +3479,20 @@ Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pa client.ccr.resumeAutoFollowPattern({ name }) ``` +### Arguments [_arguments_ccr.resume_auto_follow_pattern] -### Arguments [_arguments_83] - -* **Request (object):** - - * **`name` (string)**: The name of the auto follow pattern to resume discovering new indices to follow. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_ccr.resume_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to resume. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. - -### resume_follow [_resume_follow] - -Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. +## client.ccr.resumeFollow [_ccr.resume_follow] +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow) @@ -3260,29 +3500,26 @@ Resume a follower. Resume a cross-cluster replication follower index that was pa client.ccr.resumeFollow({ index }) ``` +### Arguments [_arguments_ccr.resume_follow] -### Arguments [_arguments_84] - -* **Request (object):** - - * **`index` (string)**: The name of the follow index to resume following. - * **`max_outstanding_read_requests` (Optional, number)** - * **`max_outstanding_write_requests` (Optional, number)** - * **`max_read_request_operation_count` (Optional, number)** - * **`max_read_request_size` (Optional, string)** - * **`max_retry_delay` (Optional, string | -1 | 0)** - * **`max_write_buffer_count` (Optional, number)** - * **`max_write_buffer_size` (Optional, string)** - * **`max_write_request_operation_count` (Optional, number)** - * **`max_write_request_size` (Optional, string)** - * **`read_poll_timeout` (Optional, string | -1 | 0)** - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +#### Request (object) [_request_ccr.resume_follow] +- **`index` (string)**: The name of the follow index to resume following. +- **`max_outstanding_read_requests` (Optional, number)** +- **`max_outstanding_write_requests` (Optional, number)** +- **`max_read_request_operation_count` (Optional, number)** +- **`max_read_request_size` (Optional, string)** +- **`max_retry_delay` (Optional, string | -1 | 0)** +- **`max_write_buffer_count` (Optional, number)** +- **`max_write_buffer_size` (Optional, string)** +- **`max_write_request_operation_count` (Optional, number)** +- **`max_write_request_size` (Optional, string)** +- **`read_poll_timeout` (Optional, string | -1 | 0)** +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.ccr.stats [_ccr.stats] +Get cross-cluster replication stats. - -### stats [_stats] - -Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats) @@ -3290,24 +3527,23 @@ Get cross-cluster replication stats. This API returns stats about auto-following client.ccr.stats({ ... }) ``` +### Arguments [_arguments_ccr.stats] -### Arguments [_arguments_85] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_ccr.stats] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ccr.unfollow [_ccr.unfollow] +Unfollow an index. +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. -### unfollow [_unfollow] - -Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. - -::::{note} -Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. -:::: - +> info +> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow) @@ -3315,22 +3551,20 @@ Currently cross-cluster replication does not support converting an existing regu client.ccr.unfollow({ index }) ``` +### Arguments [_arguments_ccr.unfollow] -### Arguments [_arguments_86] - -* **Request (object):** - - * **`index` (string)**: The name of the follower index that should be turned into a regular index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -## cluster [_cluster] +#### Request (object) [_request_ccr.unfollow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. - -### allocation_explain [_allocation_explain] - -Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. +## client.cluster.allocationExplain [_cluster.allocation_explain] +Explain the shard allocations. +Get explanations for shard allocations in the cluster. +For unassigned shards, it provides an explanation for why the shard is unassigned. +For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. +This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) @@ -3338,24 +3572,20 @@ Explain the shard allocations. Get explanations for shard allocations in the clu client.cluster.allocationExplain({ ... }) ``` +### Arguments [_arguments_cluster.allocation_explain] -### Arguments [_arguments_87] - -* **Request (object):** - - * **`current_node` (Optional, string)**: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. - * **`index` (Optional, string)**: Specifies the name of the index that you would like an explanation for. - * **`primary` (Optional, boolean)**: If true, returns explanation for the primary shard for the given shard ID. - * **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. - * **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. - * **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +#### Request (object) [_request_cluster.allocation_explain] +- **`current_node` (Optional, string)**: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. +- **`index` (Optional, string)**: Specifies the name of the index that you would like an explanation for. +- **`primary` (Optional, boolean)**: If true, returns explanation for the primary shard for the given shard ID. +- **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. +- **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. +- **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - -### delete_component_template [_delete_component_template] - -Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. +## client.cluster.deleteComponentTemplate [_cluster.delete_component_template] +Delete component templates. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) @@ -3363,20 +3593,18 @@ Delete component templates. Component templates are building blocks for construc client.cluster.deleteComponentTemplate({ name }) ``` +### Arguments [_arguments_cluster.delete_component_template] -### Arguments [_arguments_88] - -* **Request (object):** +#### Request (object) [_request_cluster.delete_component_template] +- **`name` (string | string[])**: List or wildcard expression of component template names used to limit the request. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - * **`name` (string | string[])**: List or wildcard expression of component template names used to limit the request. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### delete_voting_config_exclusions [_delete_voting_config_exclusions] - -Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. +## client.cluster.deleteVotingConfigExclusions [_cluster.delete_voting_config_exclusions] +Clear cluster voting config exclusions. +Remove master-eligible nodes from the voting configuration exclusion list. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) @@ -3384,19 +3612,20 @@ Clear cluster voting config exclusions. Remove master-eligible nodes from the vo client.cluster.deleteVotingConfigExclusions({ ... }) ``` +### Arguments [_arguments_cluster.delete_voting_config_exclusions] -### Arguments [_arguments_89] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. - - +#### Request (object) [_request_cluster.delete_voting_config_exclusions] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the +cluster before clearing the voting configuration exclusions list. +Defaults to true, meaning that all excluded nodes must be removed from +the cluster before this API takes any action. If set to false then the +voting configuration exclusions list is cleared even if some excluded +nodes are still in the cluster. -### exists_component_template [_exists_component_template] - -Check component templates. Returns information about whether a particular component template exists. +## client.cluster.existsComponentTemplate [_cluster.exists_component_template] +Check component templates. +Returns information about whether a particular component template exists. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) @@ -3404,20 +3633,20 @@ Check component templates. Returns information about whether a particular compon client.cluster.existsComponentTemplate({ name }) ``` +### Arguments [_arguments_cluster.exists_component_template] -### Arguments [_arguments_90] - -* **Request (object):** - - * **`name` (string | string[])**: List of component template names used to limit the request. Wildcard (*) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - +#### Request (object) [_request_cluster.exists_component_template] +- **`name` (string | string[])**: List of component template names used to limit the request. +Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. +Defaults to false, which means information is retrieved from the master node. - -### get_component_template [_get_component_template] - -Get component templates. Get information about component templates. +## client.cluster.getComponentTemplate [_cluster.get_component_template] +Get component templates. +Get information about component templates. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) @@ -3425,22 +3654,21 @@ Get component templates. Get information about component templates. client.cluster.getComponentTemplate({ ... }) ``` +### Arguments [_arguments_cluster.get_component_template] -### Arguments [_arguments_91] - -* **Request (object):** +#### Request (object) [_request_cluster.get_component_template] +- **`name` (Optional, string)**: List of component template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. - * **`name` (Optional, string)**: List of component template names used to limit the request. Wildcard (`*`) expressions are supported. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_settings [_get_settings] - -Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. +## client.cluster.getSettings [_cluster.get_settings] +Get cluster-wide settings. +By default, it returns only settings that have been explicitly defined. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings) @@ -3448,25 +3676,28 @@ Get cluster-wide settings. By default, it returns only settings that have been e client.cluster.getSettings({ ... }) ``` +### Arguments [_arguments_cluster.get_settings] -### Arguments [_arguments_92] - -* **Request (object):** - - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`include_defaults` (Optional, boolean)**: If `true`, returns default cluster settings from the local node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - +#### Request (object) [_request_cluster.get_settings] +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`include_defaults` (Optional, boolean)**: If `true`, returns default cluster settings from the local node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -### health [_health_2] +## client.cluster.health [_cluster.health] +Get the cluster health status. -Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. +You can also use the API to get the health status of only specified data streams and indices. +For data streams, the API retrieves the health status of the stream’s backing indices. -The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. +The cluster health status is: green, yellow or red. +On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. +The index level status is controlled by the worst shard status. -One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. +The cluster status is controlled by the worst index status. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health) @@ -3474,29 +3705,25 @@ One of the main benefits of the API is the ability to wait until the cluster rea client.cluster.health({ ... }) ``` +### Arguments [_arguments_cluster.health] -### Arguments [_arguments_93] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. - * **`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. - * **`wait_for_nodes` (Optional, string | number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, ⇐N, >N and yellow > red. By default, will not wait for any status. - +#### Request (object) [_request_cluster.health] +- **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. +- **`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. +- **`wait_for_nodes` (Optional, string | number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. - -### info [_info_2] - -Get cluster info. Returns basic information about the cluster. +## client.cluster.info [_cluster.info] +Get cluster info. +Returns basic information about the cluster. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info) @@ -3504,23 +3731,18 @@ Get cluster info. Returns basic information about the cluster. client.cluster.info({ target }) ``` +### Arguments [_arguments_cluster.info] -### Arguments [_arguments_94] - -* **Request (object):** - - * **`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. +#### Request (object) [_request_cluster.info] +- **`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. +## client.cluster.pendingTasks [_cluster.pending_tasks] +Get the pending cluster tasks. +Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. - -### pending_tasks [_pending_tasks_2] - -Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. - -::::{note} -This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. -:::: - +NOTE: This API returns a list of any pending updates to the cluster state. +These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks) @@ -3528,28 +3750,33 @@ This API returns a list of any pending updates to the cluster state. These are d client.cluster.pendingTasks({ ... }) ``` +### Arguments [_arguments_cluster.pending_tasks] -### Arguments [_arguments_95] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - +#### Request (object) [_request_cluster.pending_tasks] +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. -### post_voting_config_exclusions [_post_voting_config_exclusions] +## client.cluster.postVotingConfigExclusions [_cluster.post_voting_config_exclusions] +Update voting configuration exclusions. +Update the cluster voting config exclusions by node IDs or node names. +By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. +If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. +The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. +It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. -Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. +Clusters should have no voting configuration exclusions in normal operation. +Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. +This API waits for the nodes to be fully removed from the cluster before it returns. +If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. -Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. - -A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. - -::::{note} -Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. -:::: +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. +In that case, you may safely retry the call. +NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. +They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) @@ -3557,33 +3784,40 @@ Voting exclusions are required only when you remove at least half of the master- client.cluster.postVotingConfigExclusions({ ... }) ``` +### Arguments [_arguments_cluster.post_voting_config_exclusions] -### Arguments [_arguments_96] - -* **Request (object):** - - * **`node_names` (Optional, string | string[])**: A list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. - * **`node_ids` (Optional, string | string[])**: A list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. +#### Request (object) [_request_cluster.post_voting_config_exclusions] +- **`node_names` (Optional, string | string[])**: A list of the names of the nodes to exclude from the +voting configuration. If specified, you may not also specify node_ids. +- **`node_ids` (Optional, string | string[])**: A list of the persistent ids of the nodes to exclude +from the voting configuration. If specified, you may not also specify node_names. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: When adding a voting configuration exclusion, the API waits for the +specified nodes to be excluded from the voting configuration before +returning. If the timeout expires before the appropriate condition +is satisfied, the request fails and returns an error. +## client.cluster.putComponentTemplate [_cluster.put_component_template] +Create or update a component template. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -### put_component_template [_put_component_template] - -Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. +An index template can be composed of multiple component templates. +To use a component template, specify it in an index template’s `composed_of` list. +Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. -Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. +Component templates are only used during index creation. +For data streams, this includes data stream creation and the creation of a stream’s backing indices. +Changes to component templates do not affect existing indices, including a stream’s backing indices. -You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. +You can use C-style `/* *\/` block comments in component templates. +You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** -You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template’s `composed_of` list. +You cannot directly apply a component template to a data stream or index. +To be applied, a component template must be included in an index template's `composed_of` list. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) @@ -3591,38 +3825,48 @@ You cannot directly apply a component template to a data stream or index. To be client.cluster.putComponentTemplate({ name, template }) ``` - -### Arguments [_arguments_97] - -* **Request (object):** - - * **`name` (string)**: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. - * **`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })**: The template to be applied which includes mappings, settings, or aliases configuration. - * **`version` (Optional, number)**: Version number used to manage component templates externally. This number isn’t automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. - * **`_meta` (Optional, Record)**: Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. - * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_settings [_put_settings] - -Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. - -Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. - -If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. - -::::{tip} -In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. -:::: - - -::::{warning} -Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. -:::: - +### Arguments [_arguments_cluster.put_component_template] + +#### Request (object) [_request_cluster.put_component_template] +- **`name` (string)**: Name of the component template to create. +Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elastic Agent uses these templates to configure backing indices for its data streams. +If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. +If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. +- **`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })**: The template to be applied which includes mappings, settings, or aliases configuration. +- **`version` (Optional, number)**: Version number used to manage component templates externally. +This number isn't automatically generated or incremented by Elasticsearch. +To unset a version, replace the template without specifying a version. +- **`_meta` (Optional, Record)**: Optional user metadata about the component template. +It may have any contents. This map is not automatically generated by Elasticsearch. +This information is stored in the cluster state, so keeping it short is preferable. +To unset `_meta`, replace the template without specifying this information. +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.putSettings [_cluster.put_settings] +Update the cluster settings. + +Configure and update dynamic settings on a running cluster. +You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. + +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. +You can also reset transient or persistent settings by assigning them a null value. + +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. +For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. +However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. + +TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. +If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. +Only use `elasticsearch.yml` for static cluster settings and node settings. +The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. + +WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. +If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) @@ -3630,22 +3874,26 @@ Transient cluster settings are no longer recommended. Use persistent cluster set client.cluster.putSettings({ ... }) ``` +### Arguments [_arguments_cluster.put_settings] -### Arguments [_arguments_98] +#### Request (object) [_request_cluster.put_settings] +- **`persistent` (Optional, Record)** +- **`transient` (Optional, Record)** +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`master_timeout` (Optional, string | -1 | 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout -* **Request (object):** +## client.cluster.remoteInfo [_cluster.remote_info] +Get remote cluster information. - * **`persistent` (Optional, Record)** - * **`transient` (Optional, Record)** - * **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) - * **`master_timeout` (Optional, string | -1 | 0)**: Explicit operation timeout for connection to master node - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +Get information about configured remote clusters. +The API returns connection and endpoint information keyed by the configured remote cluster alias. - - -### remote_info [_remote_info] - -Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias. +> info +> This API returns information that reflects current state on the local cluster. +> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. +> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. +> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info) @@ -3654,15 +3902,19 @@ client.cluster.remoteInfo() ``` -### reroute [_reroute] - -Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. +## client.cluster.reroute [_cluster.reroute] +Reroute the cluster. +Manually change the allocation of individual shards in the cluster. +For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. -It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. +For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. -The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. +If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. -The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. +This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. @@ -3672,37 +3924,38 @@ Once the problem has been corrected, allocation can be manually retried by calli client.cluster.reroute({ ... }) ``` +### Arguments [_arguments_cluster.reroute] -### Arguments [_arguments_99] +#### Request (object) [_request_cluster.reroute] +- **`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])**: Defines the commands to perform. +- **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. +It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. +- **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. +- **`metric` (Optional, string | string[])**: Limits the information returned to the specified metrics. +- **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** - - * **`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])**: Defines the commands to perform. - * **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. - * **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. - * **`metric` (Optional, string | string[])**: Limits the information returned to the specified metrics. - * **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### state [_state] - -Get the cluster state. Get comprehensive information about the state of the cluster. +## client.cluster.state [_cluster.state] +Get the cluster state. +Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. -The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. - -By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. +The elected master node ensures that every node in the cluster has a copy of the same cluster state. +This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. +You may need to consult the Elasticsearch source code to determine the precise meaning of the response. -Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. +You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. -::::{warning} -The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. -:::: +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. +If you use this API repeatedly, your cluster may become unstable. +WARNING: The response is a representation of an internal data structure. +Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. +Do not query this API using external monitoring tools. +Instead, obtain the information you require using other more stable cluster APIs. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state) @@ -3710,27 +3963,23 @@ The response is a representation of an internal data structure. Its format is no client.cluster.state({ ... }) ``` +### Arguments [_arguments_cluster.state] -### Arguments [_arguments_100] - -* **Request (object):** - - * **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics - * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version - * **`wait_for_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for wait_for_metadata_version before timing out +#### Request (object) [_request_cluster.state] +- **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics +- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version +- **`wait_for_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for wait_for_metadata_version before timing out - - -### stats [_stats_2] - -Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). +## client.cluster.stats [_cluster.stats] +Get cluster statistics. +Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats) @@ -3738,22 +3987,16 @@ Get cluster statistics. Get basic index metrics (shard numbers, store size, memo client.cluster.stats({ ... }) ``` +### Arguments [_arguments_cluster.stats] -### Arguments [_arguments_101] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. - * **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. - - - -## connector [_connector] - - -### check_in [_check_in] +#### Request (object) [_request_cluster.stats] +- **`node_id` (Optional, string | string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. +- **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its stats. +However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. +## client.connector.checkIn [_connector.check_in] Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. @@ -3764,20 +4007,18 @@ Update the `last_seen` field in the connector and set it to the current timestam client.connector.checkIn({ connector_id }) ``` +### Arguments [_arguments_connector.check_in] -### Arguments [_arguments_102] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be checked in - - - -### delete [_delete_3] +#### Request (object) [_request_connector.check_in] +- **`connector_id` (string)**: The unique identifier of the connector to be checked in +## client.connector.delete [_connector.delete] Delete a connector. -Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. +Removes a connector and associated sync jobs. +This is a destructive action that is not recoverable. +NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. +These need to be removed manually. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete) @@ -3785,19 +4026,14 @@ Removes a connector and associated sync jobs. This is a destructive action that client.connector.delete({ connector_id }) ``` +### Arguments [_arguments_connector.delete] -### Arguments [_arguments_103] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be deleted - * **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. Defaults to false. - * **`hard` (Optional, boolean)**: A flag indicating if the connector should be hard deleted. - - - -### get [_get_3] +#### Request (object) [_request_connector.delete] +- **`connector_id` (string)**: The unique identifier of the connector to be deleted +- **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. Defaults to false. +- **`hard` (Optional, boolean)**: A flag indicating if the connector should be hard deleted. +## client.connector.get [_connector.get] Get a connector. Get the details about a connector. @@ -3808,18 +4044,13 @@ Get the details about a connector. client.connector.get({ connector_id }) ``` +### Arguments [_arguments_connector.get] -### Arguments [_arguments_104] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector - * **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. - - - -### list [_list] +#### Request (object) [_request_connector.get] +- **`connector_id` (string)**: The unique identifier of the connector +- **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. +## client.connector.list [_connector.list] Get all connectors. Get information about all connectors. @@ -3830,26 +4061,23 @@ Get information about all connectors. client.connector.list({ ... }) ``` +### Arguments [_arguments_connector.list] -### Arguments [_arguments_105] - -* **Request (object):** - - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`size` (Optional, number)**: Specifies a max number of results to get - * **`index_name` (Optional, string | string[])**: A list of connector index names to fetch connector documents for - * **`connector_name` (Optional, string | string[])**: A list of connector names to fetch connector documents for - * **`service_type` (Optional, string | string[])**: A list of connector service types to fetch connector documents for - * **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. - * **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name - - - -### post [_post] +#### Request (object) [_request_connector.list] +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`index_name` (Optional, string | string[])**: A list of connector index names to fetch connector documents for +- **`connector_name` (Optional, string | string[])**: A list of connector names to fetch connector documents for +- **`service_type` (Optional, string | string[])**: A list of connector service types to fetch connector documents for +- **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. +- **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name +## client.connector.post [_connector.post] Create a connector. -Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. +Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. +Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. +Self-managed connectors (Connector clients) are self-managed on your infrastructure. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) @@ -3857,22 +4085,17 @@ Connectors are Elasticsearch integrations that bring content from third-party da client.connector.post({ ... }) ``` +### Arguments [_arguments_connector.post] -### Arguments [_arguments_106] - -* **Request (object):** - - * **`description` (Optional, string)** - * **`index_name` (Optional, string)** - * **`is_native` (Optional, boolean)** - * **`language` (Optional, string)** - * **`name` (Optional, string)** - * **`service_type` (Optional, string)** - - - -### put [_put] +#### Request (object) [_request_connector.post] +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** +## client.connector.put [_connector.put] Create or update a connector. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) @@ -3881,26 +4104,22 @@ Create or update a connector. client.connector.put({ ... }) ``` +### Arguments [_arguments_connector.put] -### Arguments [_arguments_107] - -* **Request (object):** - - * **`connector_id` (Optional, string)**: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. - * **`description` (Optional, string)** - * **`index_name` (Optional, string)** - * **`is_native` (Optional, boolean)** - * **`language` (Optional, string)** - * **`name` (Optional, string)** - * **`service_type` (Optional, string)** - - - -### sync_job_cancel [_sync_job_cancel] +#### Request (object) [_request_connector.put] +- **`connector_id` (Optional, string)**: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** +## client.connector.syncJobCancel [_connector.sync_job_cancel] Cancel a connector sync job. -Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. +Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. +The connector service is then responsible for setting the status of connector sync jobs to cancelled. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel) @@ -3908,20 +4127,17 @@ Cancel a connector sync job, which sets the status to cancelling and updates `ca client.connector.syncJobCancel({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_cancel] -### Arguments [_arguments_108] +#### Request (object) [_request_connector.sync_job_cancel] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job -* **Request (object):** +## client.connector.syncJobCheckIn [_connector.sync_job_check_in] +Check in a connector sync job. +Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job - - - -### sync_job_check_in [_sync_job_check_in] - -Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in) @@ -3929,43 +4145,41 @@ To sync data using self-managed connectors, you need to deploy the Elastic conne client.connector.syncJobCheckIn({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_check_in] -### Arguments [_arguments_109] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be checked in. - +#### Request (object) [_request_connector.sync_job_check_in] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be checked in. +## client.connector.syncJobClaim [_connector.sync_job_claim] +Claim a connector sync job. +This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. +Additionally, it can set the `sync_cursor` property for the sync job. -### sync_job_claim [_sync_job_claim] +This API is not intended for direct connector management by users. +It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. -Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. -This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim) ```ts client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) ``` +### Arguments [_arguments_connector.sync_job_claim] -### Arguments [_arguments_110] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. - * **`worker_hostname` (string)**: The host name of the current system that will run the job. - * **`sync_cursor` (Optional, User-defined value)**: The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. - - - -### sync_job_delete [_sync_job_delete] +#### Request (object) [_request_connector.sync_job_claim] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`worker_hostname` (string)**: The host name of the current system that will run the job. +- **`sync_cursor` (Optional, User-defined value)**: The cursor object from the last incremental sync job. +This should reference the `sync_cursor` field in the connector state for which the job runs. +## client.connector.syncJobDelete [_connector.sync_job_delete] Delete a connector sync job. -Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. +Remove a connector sync job and its associated data. +This is a destructive action that is not recoverable. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete) @@ -3973,20 +4187,17 @@ Remove a connector sync job and its associated data. This is a destructive actio client.connector.syncJobDelete({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_delete] -### Arguments [_arguments_111] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be deleted +#### Request (object) [_request_connector.sync_job_delete] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be deleted +## client.connector.syncJobError [_connector.sync_job_error] +Set a connector sync job error. +Set the `error` field for a connector sync job and set its `status` to `error`. - -### sync_job_error [_sync_job_error] - -Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error) @@ -3994,18 +4205,13 @@ To sync data using self-managed connectors, you need to deploy the Elastic conne client.connector.syncJobError({ connector_sync_job_id, error }) ``` +### Arguments [_arguments_connector.sync_job_error] -### Arguments [_arguments_112] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier for the connector sync job. - * **`error` (string)**: The error for the connector sync job error field. - - - -### sync_job_get [_sync_job_get] +#### Request (object) [_request_connector.sync_job_error] +- **`connector_sync_job_id` (string)**: The unique identifier for the connector sync job. +- **`error` (string)**: The error for the connector sync job error field. +## client.connector.syncJobGet [_connector.sync_job_get] Get a connector sync job. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get) @@ -4014,17 +4220,12 @@ Get a connector sync job. client.connector.syncJobGet({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_get] -### Arguments [_arguments_113] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job - - - -### sync_job_list [_sync_job_list] +#### Request (object) [_request_connector.sync_job_get] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job +## client.connector.syncJobList [_connector.sync_job_list] Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. @@ -4035,21 +4236,16 @@ Get information about all stored connector sync jobs listed by their creation da client.connector.syncJobList({ ... }) ``` +### Arguments [_arguments_connector.sync_job_list] -### Arguments [_arguments_114] - -* **Request (object):** - - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`size` (Optional, number)**: Specifies a max number of results to get - * **`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))**: A sync job status to fetch connector sync jobs for - * **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for - * **`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])**: A list of job types to fetch the sync jobs for - - - -### sync_job_post [_sync_job_post] +#### Request (object) [_request_connector.sync_job_list] +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))**: A sync job status to fetch connector sync jobs for +- **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for +- **`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])**: A list of job types to fetch the sync jobs for +## client.connector.syncJobPost [_connector.sync_job_post] Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. @@ -4060,22 +4256,21 @@ Create a connector sync job document in the internal index and initialize its co client.connector.syncJobPost({ id }) ``` +### Arguments [_arguments_connector.sync_job_post] -### Arguments [_arguments_115] - -* **Request (object):** - - * **`id` (string)**: The id of the associated connector - * **`job_type` (Optional, Enum("full" | "incremental" | "access_control"))** - * **`trigger_method` (Optional, Enum("on_demand" | "scheduled"))** +#### Request (object) [_request_connector.sync_job_post] +- **`id` (string)**: The id of the associated connector +- **`job_type` (Optional, Enum("full" | "incremental" | "access_control"))** +- **`trigger_method` (Optional, Enum("on_demand" | "scheduled"))** +## client.connector.syncJobUpdateStats [_connector.sync_job_update_stats] +Set the connector sync job stats. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. +You can also update `last_seen`. +This API is mainly used by the connector service for updating sync job information. - -### sync_job_update_stats [_sync_job_update_stats] - -Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats) @@ -4083,23 +4278,18 @@ To sync data using self-managed connectors, you need to deploy the Elastic conne client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) ``` +### Arguments [_arguments_connector.sync_job_update_stats] -### Arguments [_arguments_116] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. - * **`deleted_document_count` (number)**: The number of documents the sync job deleted. - * **`indexed_document_count` (number)**: The number of documents the sync job indexed. - * **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. - * **`last_seen` (Optional, string | -1 | 0)**: The timestamp to use in the `last_seen` property for the connector sync job. - * **`metadata` (Optional, Record)**: The connector-specific metadata. - * **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. - - - -### update_active_filtering [_update_active_filtering] +#### Request (object) [_request_connector.sync_job_update_stats] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`deleted_document_count` (number)**: The number of documents the sync job deleted. +- **`indexed_document_count` (number)**: The number of documents the sync job indexed. +- **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. +- **`last_seen` (Optional, string | -1 | 0)**: The timestamp to use in the `last_seen` property for the connector sync job. +- **`metadata` (Optional, Record)**: The connector-specific metadata. +- **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. +## client.connector.updateActiveFiltering [_connector.update_active_filtering] Activate the connector draft filter. Activates the valid draft filtering for a connector. @@ -4110,20 +4300,18 @@ Activates the valid draft filtering for a connector. client.connector.updateActiveFiltering({ connector_id }) ``` +### Arguments [_arguments_connector.update_active_filtering] -### Arguments [_arguments_117] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - - - -### update_api_key_id [_update_api_key_id] +#### Request (object) [_request_connector.update_active_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +## client.connector.updateApiKeyId [_connector.update_api_key_id] Update the connector API key ID. -Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. +Update the `api_key_id` and `api_key_secret_id` fields of a connector. +You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. +The connector secret ID is required only for Elastic managed (native) connectors. +Self-managed connectors (connector clients) do not use this field. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id) @@ -4131,19 +4319,14 @@ Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can s client.connector.updateApiKeyId({ connector_id }) ``` +### Arguments [_arguments_connector.update_api_key_id] -### Arguments [_arguments_118] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`api_key_id` (Optional, string)** - * **`api_key_secret_id` (Optional, string)** - - - -### update_configuration [_update_configuration] +#### Request (object) [_request_connector.update_api_key_id] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`api_key_id` (Optional, string)** +- **`api_key_secret_id` (Optional, string)** +## client.connector.updateConfiguration [_connector.update_configuration] Update the connector configuration. Update the configuration field in the connector document. @@ -4154,22 +4337,19 @@ Update the configuration field in the connector document. client.connector.updateConfiguration({ connector_id }) ``` +### Arguments [_arguments_connector.update_configuration] -### Arguments [_arguments_119] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`configuration` (Optional, Record)** - * **`values` (Optional, Record)** - - - -### update_error [_update_error] +#### Request (object) [_request_connector.update_configuration] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`configuration` (Optional, Record)** +- **`values` (Optional, Record)** +## client.connector.updateError [_connector.update_error] Update the connector error field. -Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. +Set the error field for the connector. +If the error provided in the request body is non-null, the connector’s status is updated to error. +Otherwise, if the error is reset to null, the connector status is updated to connected. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error) @@ -4177,28 +4357,27 @@ Set the error field for the connector. If the error provided in the request body client.connector.updateError({ connector_id, error }) ``` +### Arguments [_arguments_connector.update_error] -### Arguments [_arguments_120] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`error` (T | null)** - - - -### update_features [_update_features] +#### Request (object) [_request_connector.update_error] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`error` (T | null)** -Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: +## client.connector.updateFeatures [_connector.update_features] +Update the connector features. +Update the connector features in the connector document. +This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules -Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. +Normally, the running connector service automatically manages these features. +However, you can use this API to override the default behavior. -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features) @@ -4206,21 +4385,18 @@ To sync data using self-managed connectors, you need to deploy the Elastic conne client.connector.updateFeatures({ connector_id, features }) ``` +### Arguments [_arguments_connector.update_features] -### Arguments [_arguments_121] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated. - * **`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })** - - - -### update_filtering [_update_filtering] +#### Request (object) [_request_connector.update_features] +- **`connector_id` (string)**: The unique identifier of the connector to be updated. +- **`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })** +## client.connector.updateFiltering [_connector.update_filtering] Update the connector filtering. -Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. +Update the draft filtering configuration of a connector and marks the draft validation state as edited. +The filtering draft is activated once validated by the running Elastic connector service. +The filtering property is used to configure sync rules (both basic and advanced) for a connector. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering) @@ -4228,40 +4404,32 @@ Update the draft filtering configuration of a connector and marks the draft vali client.connector.updateFiltering({ connector_id }) ``` +### Arguments [_arguments_connector.update_filtering] -### Arguments [_arguments_122] - -* **Request (object):** +#### Request (object) [_request_connector.update_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`filtering` (Optional, { active, domain, draft }[])** +- **`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])** +- **`advanced_snippet` (Optional, { created_at, updated_at, value })** - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`filtering` (Optional, { active, domain, draft }[])** - * **`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])** - * **`advanced_snippet` (Optional, { created_at, updated_at, value })** +## client.connector.updateFilteringValidation [_connector.update_filtering_validation] +Update the connector draft filtering validation. +Update the draft filtering validation info for a connector. - -### update_filtering_validation [_update_filtering_validation] - -Update the connector draft filtering validation. - -Update the draft filtering validation info for a connector. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html) ```ts client.connector.updateFilteringValidation({ connector_id, validation }) ``` +### Arguments [_arguments_connector.update_filtering_validation] -### Arguments [_arguments_123] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`validation` ({ errors, state })** - - - -### update_index_name [_update_index_name] +#### Request (object) [_request_connector.update_filtering_validation] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`validation` ({ errors, state })** +## client.connector.updateIndexName [_connector.update_index_name] Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. @@ -4272,18 +4440,13 @@ Update the `index_name` field of a connector, specifying the index where the dat client.connector.updateIndexName({ connector_id, index_name }) ``` +### Arguments [_arguments_connector.update_index_name] -### Arguments [_arguments_124] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`index_name` (T | null)** - - - -### update_name [_update_name] +#### Request (object) [_request_connector.update_index_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`index_name` (T | null)** +## client.connector.updateName [_connector.update_name] Update the connector name and description. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name) @@ -4292,37 +4455,29 @@ Update the connector name and description. client.connector.updateName({ connector_id }) ``` +### Arguments [_arguments_connector.update_name] -### Arguments [_arguments_125] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`name` (Optional, string)** - * **`description` (Optional, string)** - - - -### update_native [_update_native] +#### Request (object) [_request_connector.update_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`name` (Optional, string)** +- **`description` (Optional, string)** +## client.connector.updateNative [_connector.update_native] Update the connector is_native flag. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html) + ```ts client.connector.updateNative({ connector_id, is_native }) ``` +### Arguments [_arguments_connector.update_native] -### Arguments [_arguments_126] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`is_native` (boolean)** - - - -### update_pipeline [_update_pipeline] +#### Request (object) [_request_connector.update_native] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`is_native` (boolean)** +## client.connector.updatePipeline [_connector.update_pipeline] Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. @@ -4333,18 +4488,13 @@ When you create a new connector, the configuration of an ingest pipeline is popu client.connector.updatePipeline({ connector_id, pipeline }) ``` +### Arguments [_arguments_connector.update_pipeline] -### Arguments [_arguments_127] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })** - - - -### update_scheduling [_update_scheduling] +#### Request (object) [_request_connector.update_pipeline] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })** +## client.connector.updateScheduling [_connector.update_scheduling] Update the connector scheduling. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling) @@ -4353,18 +4503,13 @@ Update the connector scheduling. client.connector.updateScheduling({ connector_id, scheduling }) ``` +### Arguments [_arguments_connector.update_scheduling] -### Arguments [_arguments_128] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`scheduling` ({ access_control, full, incremental })** - - - -### update_service_type [_update_service_type] +#### Request (object) [_request_connector.update_scheduling] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`scheduling` ({ access_control, full, incremental })** +## client.connector.updateServiceType [_connector.update_service_type] Update the connector service type. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type) @@ -4373,18 +4518,13 @@ Update the connector service type. client.connector.updateServiceType({ connector_id, service_type }) ``` +### Arguments [_arguments_connector.update_service_type] -### Arguments [_arguments_129] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`service_type` (string)** - - - -### update_status [_update_status] +#### Request (object) [_request_connector.update_service_type] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`service_type` (string)** +## client.connector.updateStatus [_connector.update_status] Update the connector status. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status) @@ -4393,22 +4533,16 @@ Update the connector status. client.connector.updateStatus({ connector_id, status }) ``` +### Arguments [_arguments_connector.update_status] -### Arguments [_arguments_130] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))** - - - -## dangling_indices [_dangling_indices] - - -### delete_dangling_index [_delete_dangling_index] +#### Request (object) [_request_connector.update_status] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))** -Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +## client.danglingIndices.deleteDanglingIndex [_dangling_indices.delete_dangling_index] +Delete a dangling index. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index) @@ -4416,23 +4550,19 @@ Delete a dangling index. If Elasticsearch encounters index data that is absent f client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) ``` +### Arguments [_arguments_dangling_indices.delete_dangling_index] -### Arguments [_arguments_131] - -* **Request (object):** - - * **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. - * **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout - - - -### import_dangling_index [_import_dangling_index] +#### Request (object) [_request_dangling_indices.delete_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +## client.danglingIndices.importDanglingIndex [_dangling_indices.import_dangling_index] Import a dangling index. -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index) @@ -4440,23 +4570,20 @@ If Elasticsearch encounters index data that is absent from the current cluster s client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) ``` +### Arguments [_arguments_dangling_indices.import_dangling_index] -### Arguments [_arguments_132] - -* **Request (object):** - - * **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. - * **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout - - - -### list_dangling_indices [_list_dangling_indices] +#### Request (object) [_request_dangling_indices.import_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. +Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +## client.danglingIndices.listDanglingIndices [_dangling_indices.list_dangling_indices] Get the dangling indices. -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. @@ -4467,12 +4594,9 @@ client.danglingIndices.listDanglingIndices() ``` -## enrich [_enrich] - - -### delete_policy [_delete_policy] - -Delete an enrich policy. Deletes an existing enrich policy and its enrich index. +## client.enrich.deletePolicy [_enrich.delete_policy] +Delete an enrich policy. +Deletes an existing enrich policy and its enrich index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy) @@ -4480,19 +4604,15 @@ Delete an enrich policy. Deletes an existing enrich policy and its enrich index. client.enrich.deletePolicy({ name }) ``` +### Arguments [_arguments_enrich.delete_policy] -### Arguments [_arguments_133] - -* **Request (object):** - - * **`name` (string)**: Enrich policy to delete. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_enrich.delete_policy] +- **`name` (string)**: Enrich policy to delete. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - -### execute_policy [_execute_policy] - -Run an enrich policy. Create the enrich index for an existing enrich policy. +## client.enrich.executePolicy [_enrich.execute_policy] +Run an enrich policy. +Create the enrich index for an existing enrich policy. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy) @@ -4500,20 +4620,16 @@ Run an enrich policy. Create the enrich index for an existing enrich policy. client.enrich.executePolicy({ name }) ``` +### Arguments [_arguments_enrich.execute_policy] -### Arguments [_arguments_134] - -* **Request (object):** - - * **`name` (string)**: Enrich policy to execute. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. +#### Request (object) [_request_enrich.execute_policy] +- **`name` (string)**: Enrich policy to execute. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. - - -### get_policy [_get_policy] - -Get an enrich policy. Returns information about an enrich policy. +## client.enrich.getPolicy [_enrich.get_policy] +Get an enrich policy. +Returns information about an enrich policy. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy) @@ -4521,19 +4637,16 @@ Get an enrich policy. Returns information about an enrich policy. client.enrich.getPolicy({ ... }) ``` +### Arguments [_arguments_enrich.get_policy] -### Arguments [_arguments_135] - -* **Request (object):** +#### Request (object) [_request_enrich.get_policy] +- **`name` (Optional, string | string[])**: List of enrich policy names used to limit the request. +To return information for all enrich policies, omit this parameter. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`name` (Optional, string | string[])**: List of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### put_policy [_put_policy] - -Create an enrich policy. Creates an enrich policy. +## client.enrich.putPolicy [_enrich.put_policy] +Create an enrich policy. +Creates an enrich policy. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy) @@ -4541,22 +4654,18 @@ Create an enrich policy. Creates an enrich policy. client.enrich.putPolicy({ name }) ``` +### Arguments [_arguments_enrich.put_policy] -### Arguments [_arguments_136] - -* **Request (object):** - - * **`name` (string)**: Name of the enrich policy to create or update. - * **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. - * **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. - * **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - +#### Request (object) [_request_enrich.put_policy] +- **`name` (string)**: Name of the enrich policy to create or update. +- **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. +- **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. +- **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -### stats [_stats_3] - -Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. +## client.enrich.stats [_enrich.stats] +Get enrich stats. +Returns enrich coordinator statistics and information about enrich policies that are currently executing. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats) @@ -4564,40 +4673,32 @@ Get enrich stats. Returns enrich coordinator statistics and information about en client.enrich.stats({ ... }) ``` +### Arguments [_arguments_enrich.stats] -### Arguments [_arguments_137] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -## eql [_eql] - - -### delete [_delete_4] +#### Request (object) [_request_enrich.stats] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. +## client.eql.delete [_eql.delete] +Delete an async EQL search. +Delete an async EQL search or a stored synchronous EQL search. +The API also deletes results for the search. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete) ```ts client.eql.delete({ id }) ``` +### Arguments [_arguments_eql.delete] -### Arguments [_arguments_138] - -* **Request (object):** - - * **`id` (string)**: Identifier for the search to delete. A search ID is provided in the EQL search API’s response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. - +#### Request (object) [_request_eql.delete] +- **`id` (string)**: Identifier for the search to delete. +A search ID is provided in the EQL search API's response for an async search. +A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. - -### get [_get_4] - -Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. +## client.eql.get [_eql.get] +Get async EQL search results. +Get the current status and available results for an async EQL search or a stored synchronous EQL search. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get) @@ -4605,20 +4706,18 @@ Get async EQL search results. Get the current status and available results for a client.eql.get({ id }) ``` +### Arguments [_arguments_eql.get] -### Arguments [_arguments_139] - -* **Request (object):** - - * **`id` (string)**: Identifier for the search. - * **`keep_alive` (Optional, string | -1 | 0)**: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. +#### Request (object) [_request_eql.get] +- **`id` (string)**: Identifier for the search. +- **`keep_alive` (Optional, string | -1 | 0)**: Period for which the search and its results are stored on the cluster. +Defaults to the keep_alive value set by the search’s EQL search API request. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Timeout duration to wait for the request to finish. +Defaults to no timeout, meaning the request waits for complete search results. - - -### get_status [_get_status] - -Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. +## client.eql.getStatus [_eql.get_status] +Get the async EQL status. +Get the current status for an async EQL search or a stored synchronous EQL search without returning results. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status) @@ -4626,18 +4725,15 @@ Get the async EQL status. Get the current status for an async EQL search or a st client.eql.getStatus({ id }) ``` +### Arguments [_arguments_eql.get_status] -### Arguments [_arguments_140] - -* **Request (object):** - - * **`id` (string)**: Identifier for the search. - - - -### search [_search_2] +#### Request (object) [_request_eql.get_status] +- **`id` (string)**: Identifier for the search. -Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. +## client.eql.search [_eql.search] +Get EQL search results. +Returns search results for an Event Query Language (EQL) query. +EQL assumes each document in a data stream or index corresponds to an event. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) @@ -4645,41 +4741,40 @@ Get EQL search results. Returns search results for an Event Query Language (EQL) client.eql.search({ index, query }) ``` - -### Arguments [_arguments_141] - -* **Request (object):** - - * **`index` (string | string[])**: The name of the index to scope the operation - * **`query` (string)**: EQL query you wish to run. - * **`case_sensitive` (Optional, boolean)** - * **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. - * **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order - * **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" - * **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. - * **`keep_alive` (Optional, string | -1 | 0)** - * **`keep_on_completion` (Optional, boolean)** - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)** - * **`allow_partial_search_results` (Optional, boolean)** - * **`allow_partial_sequence_results` (Optional, boolean)** - * **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 - * **`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. - * **`result_position` (Optional, Enum("tail" | "head"))** - * **`runtime_mappings` (Optional, Record)** - * **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. - * **`allow_no_indices` (Optional, boolean)** - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - - - -## esql [_esql] - - -### async_query [_async_query] - -Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. +### Arguments [_arguments_eql.search] + +#### Request (object) [_request_eql.search] +- **`index` (string | string[])**: The name of the index to scope the operation +- **`query` (string)**: EQL query you wish to run. +- **`case_sensitive` (Optional, boolean)** +- **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. +- **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order +- **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" +- **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. +- **`keep_alive` (Optional, string | -1 | 0)** +- **`keep_on_completion` (Optional, boolean)** +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)** +- **`allow_partial_search_results` (Optional, boolean)**: Allow query execution also in case of shard failures. +If true, the query will keep running and will return results based on the available shards. +For sequences, the behavior can be further refined using allow_partial_sequence_results +- **`allow_partial_sequence_results` (Optional, boolean)**: This flag applies only to sequences and has effect only if allow_partial_search_results=true. +If true, the sequence query will return results based on the available shards, ignoring the others. +If false, the sequence query will return successfully, but will always have empty results. +- **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 +- **`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. +- **`result_position` (Optional, Enum("tail" | "head"))** +- **`runtime_mappings` (Optional, Record)** +- **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` +parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the +`max_samples_per_key` parameter. Pipes are not supported for sample queries. +- **`allow_no_indices` (Optional, boolean)** +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. + +## client.esql.asyncQuery [_esql.async_query] +Run an async ES|QL query. +Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. @@ -4689,30 +4784,44 @@ The API accepts the same parameters and request body as the synchronous query AP client.esql.asyncQuery({ query }) ``` - -### Arguments [_arguments_142] - -* **Request (object):** - - * **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. - * **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. - * **`locale` (Optional, string)** - * **`params` (Optional, number | number | string | boolean | null | User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. - * **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. - * **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. - * **`delimiter` (Optional, string)**: The character to use between values within a CSV row. It is valid only for the CSV format. - * **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. - * **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. - * **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. - * **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. - - - -### async_query_delete [_async_query_delete] - -Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. +### Arguments [_arguments_esql.async_query] + +#### Request (object) [_request_esql.async_query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number | number | string | boolean | null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. +By default, the request waits for 1 second for the query results. +If the query completes during this period, results are returned +Otherwise, a query ID is returned that can later be used to retrieve the results. +- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. +It is valid only for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. +- **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. +The default period is five days. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. +- **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. +If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. + +## client.esql.asyncQueryDelete [_esql.async_query_delete] +Delete an async ES|QL query. +If the query is still running, it is cancelled. +Otherwise, the stored results are deleted. If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: @@ -4725,18 +4834,17 @@ If the Elasticsearch security features are enabled, only the following users can client.esql.asyncQueryDelete({ id }) ``` +### Arguments [_arguments_esql.async_query_delete] -### Arguments [_arguments_143] +#### Request (object) [_request_esql.async_query_delete] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. -* **Request (object):** - - * **`id` (string)**: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. - - - -### async_query_get [_async_query_get] - -Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. +## client.esql.asyncQueryGet [_esql.async_query_get] +Get async ES|QL query results. +Get the current status and available results or stored results for an ES|QL asynchronous query. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get) @@ -4744,56 +4852,86 @@ Get async ES|QL query results. Get the current status and available results or s client.esql.asyncQueryGet({ id }) ``` +### Arguments [_arguments_esql.async_query_get] -### Arguments [_arguments_144] - -* **Request (object):** - - * **`id` (string)**: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. - * **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. - * **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. - +#### Request (object) [_request_esql.async_query_get] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. +By default, the request waits for complete query results. +If the request completes during the period specified in this parameter, complete query results are returned. +Otherwise, the response returns an `is_running` value of `true` and no results. +## client.esql.asyncQueryStop [_esql.async_query_stop] +Stop async ES|QL query. -### query [_query] +This API interrupts the query execution and returns the results so far. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. -Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. - -[Endpoint documentation](docs-content://explore-analyze/query-filter/languages/esql-rest.md) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-stop-api.html) ```ts -client.esql.query({ query }) +client.esql.asyncQueryStop({ id }) ``` +### Arguments [_arguments_esql.async_query_stop] -### Arguments [_arguments_145] - -* **Request (object):** - - * **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. - * **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. - * **`locale` (Optional, string)** - * **`params` (Optional, number | number | string | boolean | null | User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. - * **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. - * **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. - * **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, e.g. json, yaml. - * **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. - * **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. - - - -## features [_features_17] - +#### Request (object) [_request_esql.async_query_stop] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. -### get_features [_get_features] +## client.esql.query [_esql.query] +Run an ES|QL query. +Get search results for an ES|QL (Elasticsearch query language) query. -Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html) -A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. +```ts +client.esql.query({ query }) +``` -The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. +### Arguments [_arguments_esql.query] + +#### Request (object) [_request_esql.query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number | number | string | boolean | null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, e.g. json, yaml. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? +Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. +- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + +## client.features.getFeatures [_features.get_features] +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features) @@ -4801,58 +4939,46 @@ The features listed by this API are a combination of built-in features and featu client.features.getFeatures({ ... }) ``` +### Arguments [_arguments_features.get_features] -### Arguments [_arguments_146] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### reset_features [_reset_features] +#### Request (object) [_request_features.get_features] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. +## client.features.resetFeatures [_features.reset_features] +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. -::::{warning} -Intended for development and testing use only. Do not reset features on a production cluster. -:::: +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. -Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. -The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. - -Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. -::::{important} -The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. -:::: - +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. -[Endpoint documentation](docs-content://deploy-manage/tools/snapshot-and-restore.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features) ```ts client.features.resetFeatures({ ... }) ``` +### Arguments [_arguments_features.reset_features] -### Arguments [_arguments_147] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -## fleet [_fleet] - +#### Request (object) [_request_features.reset_features] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -### global_checkpoints [_global_checkpoints] +## client.fleet.globalCheckpoints [_fleet.global_checkpoints] +Get global checkpoints. -Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. +Get the current global checkpoints for an index. +This API is designed for internal use by the Fleet server project. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet) @@ -4860,133 +4986,164 @@ Get global checkpoints. Get the current global checkpoints for an index. This AP client.fleet.globalCheckpoints({ index }) ``` +### Arguments [_arguments_fleet.global_checkpoints] -### Arguments [_arguments_148] +#### Request (object) [_request_fleet.global_checkpoints] +- **`index` (string | string)**: A single index or index alias that resolves to a single index. +- **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints +to advance past the provided `checkpoints`. +- **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist +and all primary shards be active. Can only be true when `wait_for_advance` is true. +- **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, +the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list +will cause Elasticsearch to immediately return the current global checkpoints. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. -* **Request (object):** +## client.fleet.msearch [_fleet.msearch] +Run multiple Fleet searches. +Run several Fleet searches with a single API request. +The API follows the same structure as the multi search API. +However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. - * **`index` (string | string)**: A single index or index alias that resolves to a single index. - * **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. - * **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. - * **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. - - - -### msearch [_msearch_2] - -Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch) ```ts client.fleet.msearch({ ... }) ``` - -### Arguments [_arguments_149] - -* **Request (object):** - - * **`index` (Optional, string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. - * **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. - * **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. - * **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. - * **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. - * **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. - * **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](docs-content://deploy-manage/distributed-architecture/reading-and-writing-documents.md#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. - - - -### search [_search_3] - -Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. +### Arguments [_arguments_fleet.msearch] + +#### Request (object) [_request_fleet.msearch] +- **`index` (Optional, string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns +an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` +which is true by default. + +## client.fleet.search [_fleet.search] +Run a Fleet search. +The purpose of the Fleet search API is to provide an API where the search will be run only +after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search) ```ts client.fleet.search({ index }) ``` - -### Arguments [_arguments_150] - -* **Request (object):** - - * **`index` (string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. - * **`aggregations` (Optional, Record)** - * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - * **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. - * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. - * **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`highlight` (Optional, { encoder, fields })** - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. - * **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. - * **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. - * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - * **`profile` (Optional, boolean)** - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** - * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])** - * **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`slice` (Optional, { field, id, max })** - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** - * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - * **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. - * **`suggest` (Optional, { text })** - * **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. - * **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. - * **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. - * **`version` (Optional, boolean)**: If true, returns document version as part of a hit. - * **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. - * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. - * **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. - * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - * **`allow_no_indices` (Optional, boolean)** - * **`analyzer` (Optional, string)** - * **`analyze_wildcard` (Optional, boolean)** - * **`batched_reduce_size` (Optional, number)** - * **`ccs_minimize_roundtrips` (Optional, boolean)** - * **`default_operator` (Optional, Enum("and" | "or"))** - * **`df` (Optional, string)** - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** - * **`ignore_throttled` (Optional, boolean)** - * **`ignore_unavailable` (Optional, boolean)** - * **`lenient` (Optional, boolean)** - * **`max_concurrent_shard_requests` (Optional, number)** - * **`preference` (Optional, string)** - * **`pre_filter_shard_size` (Optional, number)** - * **`request_cache` (Optional, boolean)** - * **`routing` (Optional, string)** - * **`scroll` (Optional, string | -1 | 0)** - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))** - * **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. - * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))** - * **`suggest_size` (Optional, number)** - * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. - * **`typed_keys` (Optional, boolean)** - * **`rest_total_hits_as_int` (Optional, boolean)** - * **`_source_excludes` (Optional, string | string[])** - * **`_source_includes` (Optional, string | string[])** - * **`q` (Optional, string)** - * **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. - * **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](docs-content://deploy-manage/distributed-architecture/reading-and-writing-documents.md#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. - - - -## graph [_graph] - - -### explore [_explore] - -Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. +### Arguments [_arguments_fleet.search] + +#### Request (object) [_request_fleet.search] +- **`index` (string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number | number | string | boolean | null[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`allow_no_indices` (Optional, boolean)** +- **`analyzer` (Optional, string)** +- **`analyze_wildcard` (Optional, boolean)** +- **`batched_reduce_size` (Optional, number)** +- **`ccs_minimize_roundtrips` (Optional, boolean)** +- **`default_operator` (Optional, Enum("and" | "or"))** +- **`df` (Optional, string)** +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** +- **`ignore_throttled` (Optional, boolean)** +- **`ignore_unavailable` (Optional, boolean)** +- **`lenient` (Optional, boolean)** +- **`max_concurrent_shard_requests` (Optional, number)** +- **`preference` (Optional, string)** +- **`pre_filter_shard_size` (Optional, number)** +- **`request_cache` (Optional, boolean)** +- **`routing` (Optional, string)** +- **`scroll` (Optional, string | -1 | 0)** +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))** +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))** +- **`suggest_size` (Optional, number)** +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)** +- **`rest_total_hits_as_int` (Optional, boolean)** +- **`_source_excludes` (Optional, string | string[])** +- **`_source_includes` (Optional, string | string[])** +- **`q` (Optional, string)** +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns +an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` +which is true by default. + +## client.graph.explore [_graph.explore] +Explore graph analytics. +Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. +An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. +Subsequent requests enable you to spider out from one more vertices of interest. +You can exclude vertices that have already been returned. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph) @@ -4994,27 +5151,22 @@ Explore graph analytics. Extract and summarize information about the documents a client.graph.explore({ index }) ``` +### Arguments [_arguments_graph.explore] -### Arguments [_arguments_151] - -* **Request (object):** - - * **`index` (string | string[])**: Name of the index. - * **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. - * **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. - * **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. - - +#### Request (object) [_request_graph.explore] +- **`index` (string | string[])**: Name of the index. +- **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. +- **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +- **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for a response from each shard. +If no response is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. -## ilm [_ilm] - - -### delete_lifecycle [_delete_lifecycle] - -Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. +## client.ilm.deleteLifecycle [_ilm.delete_lifecycle] +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle) @@ -5022,20 +5174,17 @@ Delete a lifecycle policy. You cannot delete policies that are currently in use. client.ilm.deleteLifecycle({ policy }) ``` +### Arguments [_arguments_ilm.delete_lifecycle] -### Arguments [_arguments_152] - -* **Request (object):** - - * **`policy` (string)**: Identifier for the policy. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### explain_lifecycle [_explain_lifecycle] +#### Request (object) [_request_ilm.delete_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream’s backing indices. +## client.ilm.explainLifecycle [_ilm.explain_lifecycle] +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. @@ -5045,20 +5194,16 @@ The response indicates when the index entered each lifecycle state, provides the client.ilm.explainLifecycle({ index }) ``` +### Arguments [_arguments_ilm.explain_lifecycle] -### Arguments [_arguments_153] - -* **Request (object):** - - * **`index` (string)**: List of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. - * **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. - * **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_lifecycle [_get_lifecycle] +#### Request (object) [_request_ilm.explain_lifecycle] +- **`index` (string)**: List of data streams, indices, and aliases to target. Supports wildcards (`*`). +To target all data streams and indices, use `*` or `_all`. +- **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. +- **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.ilm.getLifecycle [_ilm.get_lifecycle] Get lifecycle policies. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle) @@ -5067,20 +5212,17 @@ Get lifecycle policies. client.ilm.getLifecycle({ ... }) ``` +### Arguments [_arguments_ilm.get_lifecycle] -### Arguments [_arguments_154] - -* **Request (object):** - - * **`policy` (Optional, string)**: Identifier for the policy. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_ilm.get_lifecycle] +- **`policy` (Optional, string)**: Identifier for the policy. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ilm.getStatus [_ilm.get_status] +Get the ILM status. -### get_status [_get_status_2] - -Get the ILM status. Get the current index lifecycle management status. +Get the current index lifecycle management status. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status) @@ -5089,17 +5231,21 @@ client.ilm.getStatus() ``` -### migrate_to_data_tiers [_migrate_to_data_tiers] - -Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. +## client.ilm.migrateToDataTiers [_ilm.migrate_to_data_tiers] +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. +Using node roles enables ILM to automatically move the indices between data tiers. -Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. -2. Remove custom allocation settings from existing ILM policies. -3. Replace custom allocation settings from existing indices with the corresponding tier preference. +1. Remove custom allocation settings from existing ILM policies. +1. Replace custom allocation settings from existing indices with the corresponding tier preference. -ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers) @@ -5107,29 +5253,32 @@ ILM must be stopped before performing the migration. Use the stop ILM and get IL client.ilm.migrateToDataTiers({ ... }) ``` +### Arguments [_arguments_ilm.migrate_to_data_tiers] -### Arguments [_arguments_155] - -* **Request (object):** - - * **`legacy_template_to_delete` (Optional, string)** - * **`node_attribute` (Optional, string)** - * **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. - - - -### move_to_step [_move_to_step] - -Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. +#### Request (object) [_request_ilm.migrate_to_data_tiers] +- **`legacy_template_to_delete` (Optional, string)** +- **`node_attribute` (Optional, string)** +- **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. +This provides a way to retrieve the indices and ILM policies that need to be migrated. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. -::::{warning} -This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. -:::: +## client.ilm.moveToStep [_ilm.move_to_step] +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. -You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. -When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step) @@ -5137,25 +5286,18 @@ When specifying the target (`next_step`) to which the index will be moved, eithe client.ilm.moveToStep({ index, current_step, next_step }) ``` +### Arguments [_arguments_ilm.move_to_step] -### Arguments [_arguments_156] +#### Request (object) [_request_ilm.move_to_step] +- **`index` (string)**: The name of the index whose lifecycle step is to change +- **`current_step` ({ action, name, phase })**: The step that the index is expected to be in. +- **`next_step` ({ action, name, phase })**: The step that you want to run. -* **Request (object):** - - * **`index` (string)**: The name of the index whose lifecycle step is to change - * **`current_step` ({ action, name, phase })**: The step that the index is expected to be in. - * **`next_step` ({ action, name, phase })**: The step that you want to run. - - - -### put_lifecycle [_put_lifecycle] - -Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. - -::::{note} -Only the latest version of the policy is stored, you cannot revert to previous versions. -:::: +## client.ilm.putLifecycle [_ilm.put_lifecycle] +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle) @@ -5163,20 +5305,17 @@ Only the latest version of the policy is stored, you cannot revert to previous v client.ilm.putLifecycle({ policy }) ``` +### Arguments [_arguments_ilm.put_lifecycle] -### Arguments [_arguments_157] - -* **Request (object):** - - * **`policy` (string)**: Identifier for the policy. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_ilm.put_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - -### remove_policy [_remove_policy] - -Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream’s backing indices. It also stops managing the indices. +## client.ilm.removePolicy [_ilm.remove_policy] +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy) @@ -5184,18 +5323,16 @@ Remove policies from an index. Remove the assigned lifecycle policies from an in client.ilm.removePolicy({ index }) ``` +### Arguments [_arguments_ilm.remove_policy] -### Arguments [_arguments_158] - -* **Request (object):** - - * **`index` (string)**: The name of the index to remove policy on - - - -### retry [_retry] +#### Request (object) [_request_ilm.remove_policy] +- **`index` (string)**: The name of the index to remove policy on -Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. +## client.ilm.retry [_ilm.retry] +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry) @@ -5203,18 +5340,16 @@ Retry a policy. Retry running the lifecycle policy for an index that is in the E client.ilm.retry({ index }) ``` +### Arguments [_arguments_ilm.retry] -### Arguments [_arguments_159] +#### Request (object) [_request_ilm.retry] +- **`index` (string)**: The name of the indices (comma-separated) whose failed lifecycle step is to be retry -* **Request (object):** - - * **`index` (string)**: The name of the indices (comma-separated) whose failed lifecycle step is to be retry - - - -### start [_start] - -Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. +## client.ilm.start [_ilm.start] +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) @@ -5222,21 +5357,19 @@ Start the ILM plugin. Start the index lifecycle management plugin if it is curre client.ilm.start({ ... }) ``` +### Arguments [_arguments_ilm.start] -### Arguments [_arguments_160] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_ilm.start] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ilm.stop [_ilm.stop] +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. -### stop [_stop] - -Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. - -The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) @@ -5244,49 +5377,54 @@ The API returns as soon as the stop request has been acknowledged, but the plugi client.ilm.stop({ ... }) ``` +### Arguments [_arguments_ilm.stop] -### Arguments [_arguments_161] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## indices [_indices_2] - +#### Request (object) [_request_ilm.stop] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -### add_block [_add_block] +## client.indices.addBlock [_indices.add_block] +Add an index block. -Add an index block. Limits the operations allowed on an index by blocking specific operation types. +Add an index block to an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. -[Index block settings](elasticsearch://reference/elasticsearch/index-settings/index-block.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block) ```ts client.indices.addBlock({ index, block }) ``` - -### Arguments [_arguments_162] - -* **Request (object):** - - * **`index` (string)**: A comma separated list of indices to add a block to - * **`block` (Enum("metadata" | "read" | "read_only" | "write"))**: The block to add (one of read, write, read_only or metadata) - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout - - - -### analyze [_analyze] - -Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. - -Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. +### Arguments [_arguments_indices.add_block] + +#### Request (object) [_request_indices.add_block] +- **`index` (string)**: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are adding blocks to. +To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +- **`block` (Enum("metadata" | "read" | "read_only" | "write"))**: The block type to add to the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.indices.analyze [_indices.analyze] +Get tokens from text analysis. +The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. +The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. +If more than this limit of tokens gets generated, an error occurs. +The `_analyze` endpoint without a specified index will always use `10000` as its limit. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze) @@ -5294,48 +5432,50 @@ Generating excessive amount of tokens may cause a node to run out of memory. The client.indices.analyze({ ... }) ``` - -### Arguments [_arguments_163] - -* **Request (object):** - - * **`index` (Optional, string)**: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. - * **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. - * **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. - * **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. - * **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. - * **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. - * **`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. - * **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. - * **`text` (Optional, string | string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. - * **`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. - - - -### cancel_migrate_reindex [_cancel_migrate_reindex] - +### Arguments [_arguments_indices.analyze] + +#### Request (object) [_request_indices.analyze] +- **`index` (Optional, string)**: Index used to derive the analyzer. +If specified, the `analyzer` or field parameter overrides this value. +If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. +- **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. +This could be a built-in analyzer, or an analyzer that’s been configured in the index. +- **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. +- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. +- **`field` (Optional, string)**: Field used to derive the analyzer. +To use this parameter, you must specify an index. +If specified, the `analyzer` parameter overrides this value. +- **`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. +- **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. +- **`text` (Optional, string | string[])**: Text to analyze. +If an array of strings is provided, it is analyzed as a multi-value field. +- **`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. + +## client.indices.cancelMigrateReindex [_indices.cancel_migrate_reindex] Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) + ```ts client.indices.cancelMigrateReindex({ index }) ``` +### Arguments [_arguments_indices.cancel_migrate_reindex] -### Arguments [_arguments_164] +#### Request (object) [_request_indices.cancel_migrate_reindex] +- **`index` (string | string[])**: The index or data stream name -* **Request (object):** +## client.indices.clearCache [_indices.clear_cache] +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. - * **`index` (string | string[])**: The index or data stream name - - - -### clear_cache [_clear_cache] - -Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. - -By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. +By default, the clear cache API clears all caches. +To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. +To clear the cache only of specific fields, use the `fields` parameter. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache) @@ -5343,32 +5483,37 @@ By default, the clear cache API clears all caches. To clear only specific caches client.indices.clearCache({ ... }) ``` - -### Arguments [_arguments_165] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. - * **`fields` (Optional, string | string[])**: List of field names used to limit the `fielddata` parameter. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`query` (Optional, boolean)**: If `true`, clears the query cache. - * **`request` (Optional, boolean)**: If `true`, clears the request cache. - - - -### clone [_clone] - -Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. - -::::{important} -Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. -:::: - - -The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. +### Arguments [_arguments_indices.clear_cache] + +#### Request (object) [_request_indices.clear_cache] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. +Use the `fields` parameter to clear the cache of specific fields only. +- **`fields` (Optional, string | string[])**: List of field names used to limit the `fielddata` parameter. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`query` (Optional, boolean)**: If `true`, clears the query cache. +- **`request` (Optional, boolean)**: If `true`, clears the request cache. + +## client.indices.clone [_indices.clone] +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: @@ -5376,30 +5521,29 @@ Cloning works as follows: * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. -::::{important} -Indices can only be cloned if they meet the following requirements: -:::: - +IMPORTANT: Indices can only be cloned if they meet the following requirements: * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. -The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. - -::::{note} -Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. -:::: +The current write index on a data stream cannot be cloned. +In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. +NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. **Monitor the cloning process** The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. -The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can’t be allocated, its primary shard will remain unassigned until it can be allocated on that node. +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. +At this point, all shards are in the state unassigned. +If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. -Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. +When the clone operation completes, the shard will become active. +At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. **Wait for active shards** @@ -5411,32 +5555,39 @@ Because the clone operation creates a new index to clone the shards to, the wait client.indices.clone({ index, target }) ``` +### Arguments [_arguments_indices.clone] -### Arguments [_arguments_166] - -* **Request (object):** - - * **`index` (string)**: Name of the source index to clone. - * **`target` (string)**: Name of the target index to create. - * **`aliases` (Optional, Record)**: Aliases for the resulting index. - * **`settings` (Optional, Record)**: Configuration options for the target index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - +#### Request (object) [_request_indices.clone] +- **`index` (string)**: Name of the source index to clone. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +## client.indices.close [_indices.close] +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. -### close [_close] +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. -Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. -When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. -You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. - -By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. - -Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close) @@ -5444,24 +5595,28 @@ Closed indices consume a significant amount of disk-space which can cause proble client.indices.close({ index }) ``` - -### Arguments [_arguments_167] - -* **Request (object):** - - * **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - - - -### create [_create_2] - -Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: +### Arguments [_arguments_indices.close] + +#### Request (object) [_request_indices.close] +- **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.create [_indices.create] +Create an index. +You can use the create index API to add a new index to an Elasticsearch cluster. +When creating an index, you can specify the following: * Settings for the index. * Mappings for fields in the index. @@ -5469,9 +5624,16 @@ Create an index. You can use the create index API to add a new index to an Elast **Wait for active shards** -By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. +The index creation response will indicate what happened. +For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. +Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. +These values simply indicate whether the operation completed before the timeout. +If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. +If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). -You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. +Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create) @@ -5479,93 +5641,92 @@ You can change the default of only waiting for the primary shards to start throu client.indices.create({ index }) ``` +### Arguments [_arguments_indices.create] -### Arguments [_arguments_168] - -* **Request (object):** - - * **`index` (string)**: Name of the index you wish to create. - * **`aliases` (Optional, Record)**: Aliases for the index. - * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: - - * Field names - * Field data types - * Mapping parameters +#### Request (object) [_request_indices.create] +- **`index` (string)**: Name of the index you wish to create. +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: +- Field names +- Field data types +- Mapping parameters +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +## client.indices.createDataStream [_indices.create_data_stream] +Create a data stream. +You must have a matching index template with data stream enabled. - -### create_data_stream [_create_data_stream] - -Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. - -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream) ```ts client.indices.createDataStream({ name }) ``` +### Arguments [_arguments_indices.create_data_stream] -### Arguments [_arguments_169] - -* **Request (object):** - - * **`name` (string)**: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### create_from [_create_from] +#### Request (object) [_request_indices.create_data_stream] +- **`name` (string)**: Name of the data stream, which must meet the following criteria: +Lowercase only; +Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; +Cannot start with `-`, `_`, `+`, or `.ds-`; +Cannot be `.` or `..`; +Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.createFrom [_indices.create_from] Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) + ```ts client.indices.createFrom({ source, dest }) ``` +### Arguments [_arguments_indices.create_from] -### Arguments [_arguments_170] - -* **Request (object):** - - * **`source` (string)**: The source index or data stream name - * **`dest` (string)**: The destination index or data stream name - * **`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })** - +#### Request (object) [_request_indices.create_from] +- **`source` (string)**: The source index or data stream name +- **`dest` (string)**: The destination index or data stream name +- **`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })** +## client.indices.dataStreamsStats [_indices.data_streams_stats] +Get data stream stats. -### data_streams_stats [_data_streams_stats] +Get statistics for one or more data streams. -Get data stream stats. Retrieves statistics for one or more data streams. - -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1) ```ts client.indices.dataStreamsStats({ ... }) ``` +### Arguments [_arguments_indices.data_streams_stats] -### Arguments [_arguments_171] - -* **Request (object):** - - * **`name` (Optional, string)**: List of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. - +#### Request (object) [_request_indices.data_streams_stats] +- **`name` (Optional, string)**: List of data streams used to limit the request. +Wildcard expressions (`*`) are supported. +To target all data streams in a cluster, omit this parameter or use `*`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +## client.indices.delete [_indices.delete] +Delete indices. +Deleting an index deletes its documents, shards, and metadata. +It does not delete related Kibana components, such as data views, visualizations, or dashboards. -### delete [_delete_5] - -Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. - -You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. +You cannot delete the current write index of a data stream. +To delete the index, you must roll over the data stream so a new write index is created. +You can then use the delete index API to delete the previous write index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete) @@ -5573,23 +5734,28 @@ You cannot delete the current write index of a data stream. To delete the index, client.indices.delete({ index }) ``` - -### Arguments [_arguments_172] - -* **Request (object):** - - * **`index` (string | string[])**: List of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### delete_alias [_delete_alias] - -Delete an alias. Removes a data stream or index from an alias. +### Arguments [_arguments_indices.delete] + +#### Request (object) [_request_indices.delete] +- **`index` (string | string[])**: List of indices to delete. +You cannot specify index aliases. +By default, this parameter does not support wildcards (`*`) or `_all`. +To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.deleteAlias [_indices.delete_alias] +Delete an alias. +Removes a data stream or index from an alias. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias) @@ -5597,21 +5763,21 @@ Delete an alias. Removes a data stream or index from an alias. client.indices.deleteAlias({ index, name }) ``` +### Arguments [_arguments_indices.delete_alias] -### Arguments [_arguments_173] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). - * **`name` (string | string[])**: List of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_indices.delete_alias] +- **`index` (string | string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +- **`name` (string | string[])**: List of aliases to remove. +Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - -### delete_data_lifecycle [_delete_data_lifecycle] - -Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. +## client.indices.deleteDataLifecycle [_indices.delete_data_lifecycle] +Delete data stream lifecycles. +Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle) @@ -5619,42 +5785,36 @@ Delete data stream lifecycles. Removes the data stream lifecycle from a data str client.indices.deleteDataLifecycle({ name }) ``` +### Arguments [_arguments_indices.delete_data_lifecycle] -### Arguments [_arguments_174] - -* **Request (object):** - - * **`name` (string | string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit timestamp for the document +#### Request (object) [_request_indices.delete_data_lifecycle] +- **`name` (string | string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string | -1 | 0)**: Explicit timestamp for the document +## client.indices.deleteDataStream [_indices.delete_data_stream] +Delete data streams. +Deletes one or more data streams and their backing indices. - -### delete_data_stream [_delete_data_stream] - -Delete data streams. Deletes one or more data streams and their backing indices. - -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream) ```ts client.indices.deleteDataStream({ name }) ``` +### Arguments [_arguments_indices.delete_data_stream] -### Arguments [_arguments_175] - -* **Request (object):** - - * **`name` (string | string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. - - - -### delete_index_template [_delete_index_template] +#### Request (object) [_request_indices.delete_data_stream] +- **`name` (string | string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. -Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. +## client.indices.deleteIndexTemplate [_indices.delete_index_template] +Delete an index template. +The provided may contain multiple template names separated by a comma. If multiple template +names are specified then there is no wildcard support and the provided names should match completely with +existing templates. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template) @@ -5662,19 +5822,14 @@ Delete an index template. The provided may contain multiple tem client.indices.deleteIndexTemplate({ name }) ``` +### Arguments [_arguments_indices.delete_index_template] -### Arguments [_arguments_176] - -* **Request (object):** - - * **`name` (string | string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### delete_template [_delete_template] +#### Request (object) [_request_indices.delete_index_template] +- **`name` (string | string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.deleteTemplate [_indices.delete_template] Delete a legacy index template. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template) @@ -5683,25 +5838,25 @@ Delete a legacy index template. client.indices.deleteTemplate({ name }) ``` +### Arguments [_arguments_indices.delete_template] -### Arguments [_arguments_177] - -* **Request (object):** - - * **`name` (string)**: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_indices.delete_template] +- **`name` (string)**: The name of the legacy index template to delete. +Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.diskUsage [_indices.disk_usage] +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. -### disk_usage [_disk_usage] - -Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. - -::::{note} -The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. -:::: - +NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. +Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. +The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage) @@ -5709,28 +5864,32 @@ The total size of fields of the analyzed shards of the index in the response is client.indices.diskUsage({ index }) ``` - -### Arguments [_arguments_178] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`run_expensive_tasks` (Optional, boolean)**: Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. - - - -### downsample [_downsample] - -Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. - -::::{note} -Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). -:::: - +### Arguments [_arguments_indices.disk_usage] + +#### Request (object) [_request_indices.disk_usage] +- **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. +It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. +If `false`, the response may not include uncommitted data. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`run_expensive_tasks` (Optional, boolean)**: Analyzing field disk usage is resource-intensive. +To use the API, this parameter must be set to `true`. + +## client.indices.downsample [_indices.downsample] +Downsample an index. +Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. + +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read only (`index.blocks.write: true`). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample) @@ -5738,20 +5897,16 @@ Only indices in a time series data stream are supported. Neither field nor docum client.indices.downsample({ index, target_index }) ``` +### Arguments [_arguments_indices.downsample] -### Arguments [_arguments_179] - -* **Request (object):** - - * **`index` (string)**: Name of the time series index to downsample. - * **`target_index` (string)**: Name of the index to create. - * **`config` (Optional, { fixed_interval })** - - +#### Request (object) [_request_indices.downsample] +- **`index` (string)**: Name of the time series index to downsample. +- **`target_index` (string)**: Name of the index to create. +- **`config` (Optional, { fixed_interval })** -### exists [_exists_2] - -Check indices. Check if one or more indices, index aliases, or data streams exist. +## client.indices.exists [_indices.exists] +Check indices. +Check if one or more indices, index aliases, or data streams exist. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists) @@ -5759,73 +5914,73 @@ Check indices. Check if one or more indices, index aliases, or data streams exis client.indices.exists({ index }) ``` +### Arguments [_arguments_indices.exists] -### Arguments [_arguments_180] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - +#### Request (object) [_request_indices.exists] +- **`index` (string | string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +## client.indices.existsAlias [_indices.exists_alias] +Check aliases. -### exists_alias [_exists_alias] +Check if one or more data stream or index aliases exist. -Check aliases. Checks if one or more data stream or index aliases exist. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias) ```ts client.indices.existsAlias({ name }) ``` +### Arguments [_arguments_indices.exists_alias] -### Arguments [_arguments_181] - -* **Request (object):** - - * **`name` (string | string[])**: List of aliases to check. Supports wildcards (`*`). - * **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_indices.exists_alias] +- **`name` (string | string[])**: List of aliases to check. Supports wildcards (`*`). +- **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.existsIndexTemplate [_indices.exists_index_template] +Check index templates. +Check whether index templates exist. -### exists_index_template [_exists_index_template] - -Check index templates. Check whether index templates exist. - -[Endpoint documentation](docs-content://manage-data/data-store/templates.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template) ```ts client.indices.existsIndexTemplate({ name }) ``` +### Arguments [_arguments_indices.exists_index_template] -### Arguments [_arguments_182] - -* **Request (object):** - - * **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### exists_template [_exists_template] - -Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +#### Request (object) [_request_indices.exists_index_template] +- **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -::::{important} -This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -:::: +## client.indices.existsTemplate [_indices.exists_template] +Check existence of index templates. +Get information about whether index templates exist. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template) @@ -5833,21 +5988,20 @@ This documentation is about legacy index templates, which are deprecated and wil client.indices.existsTemplate({ name }) ``` +### Arguments [_arguments_indices.exists_template] -### Arguments [_arguments_183] +#### Request (object) [_request_indices.exists_template] +- **`name` (string | string[])**: A list of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. +- **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -* **Request (object):** - - * **`name` (string | string[])**: A list of index template names used to limit the request. Wildcard (`*`) expressions are supported. - * **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. - * **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - - - -### explain_data_lifecycle [_explain_data_lifecycle] - -Get the status for a data stream lifecycle. Get information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. +## client.indices.explainDataLifecycle [_indices.explain_data_lifecycle] +Get the status for a data stream lifecycle. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle) @@ -5855,22 +6009,21 @@ Get the status for a data stream lifecycle. Get information about an index or da client.indices.explainDataLifecycle({ index }) ``` +### Arguments [_arguments_indices.explain_data_lifecycle] -### Arguments [_arguments_184] - -* **Request (object):** - - * **`index` (string | string[])**: The name of the index to explain - * **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index’s lifecycle - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - +#### Request (object) [_request_indices.explain_data_lifecycle] +- **`index` (string | string[])**: The name of the index to explain +- **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index's lifecycle +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +## client.indices.fieldUsageStats [_indices.field_usage_stats] +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. -### field_usage_stats [_field_usage_stats] - -Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. - -The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. +The response body reports the per-shard usage count of the data structures that back the fields in the index. +A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats) @@ -5878,27 +6031,31 @@ The response body reports the per-shard usage count of the data structures that client.indices.fieldUsageStats({ index }) ``` +### Arguments [_arguments_indices.field_usage_stats] -### Arguments [_arguments_185] - -* **Request (object):** - - * **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +#### Request (object) [_request_indices.field_usage_stats] +- **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +## client.indices.flush [_indices.flush] +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. -### flush [_flush] - -Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. - -After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. - -It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush) @@ -5906,38 +6063,50 @@ It is also possible to trigger a flush on one or more indices using the flush AP client.indices.flush({ ... }) ``` - -### Arguments [_arguments_186] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. - - - -### forcemerge [_forcemerge] - -Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream’s backing indices. - -Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. - -::::{warning} -We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can’t be backed up incrementally. -:::: - +### Arguments [_arguments_indices.flush] + +#### Request (object) [_request_indices.flush] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to flush. +Supports wildcards (`*`). +To flush all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. +If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. + +## client.indices.forcemerge [_indices.forcemerge] +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. **Blocks during a force merge** -Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). +If the client connection is lost before completion then the force merge process will continue in the background. +Any new requests to force merge the same indices will also block until the ongoing force merge is complete. **Running force merge asynchronously** -If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. +However, you can not cancel this task as the force merge task is not cancelable. +Elasticsearch creates a record of this task as a document at `_tasks/`. +When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. **Force merging multiple indices** @@ -5948,13 +6117,19 @@ You can force merge multiple indices with a single request by targeting: * One or more aliases * All data streams and indices in a cluster -Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel +Each targeted shard is force-merged separately using the force_merge threadpool. +By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. +If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. **Data streams and time-based indices** -Force-merging is useful for managing a data stream’s older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: +Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. +In these cases, each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, its shards can be force-merged to a single segment. +This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. +For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 @@ -5966,25 +6141,22 @@ POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 client.indices.forcemerge({ ... }) ``` +### Arguments [_arguments_indices.forcemerge] -### Arguments [_arguments_187] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) - * **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents - * **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. +#### Request (object) [_request_indices.forcemerge] +- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) +- **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents +- **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. - - -### get [_get_5] - -Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. +## client.indices.get [_indices.get] +Get index information. +Get information about one or more indices. For data streams, the API returns information about the +stream’s backing indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get) @@ -5992,26 +6164,27 @@ Get index information. Get information about one or more indices. For data strea client.indices.get({ index }) ``` - -### Arguments [_arguments_188] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as open,hidden. - * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. - * **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`features` (Optional, { name, description } | { name, description }[])**: Return only information on specified index features - - - -### get_alias [_get_alias] - -Get aliases. Retrieves information for one or more data stream or index aliases. +### Arguments [_arguments_indices.get] + +#### Request (object) [_request_indices.get] +- **`index` (string | string[])**: List of data streams, indices, and index aliases used to limit the request. +Wildcard expressions (*) are supported. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. +- **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`features` (Optional, { name, description } | { name, description }[])**: Return only information on specified index features + +## client.indices.getAlias [_indices.get_alias] +Get aliases. +Retrieves information for one or more data stream or index aliases. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias) @@ -6019,23 +6192,29 @@ Get aliases. Retrieves information for one or more data stream or index aliases. client.indices.getAlias({ ... }) ``` +### Arguments [_arguments_indices.get_alias] -### Arguments [_arguments_189] +#### Request (object) [_request_indices.get_alias] +- **`name` (Optional, string | string[])**: List of aliases to retrieve. +Supports wildcards (`*`). +To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.getDataLifecycle [_indices.get_data_lifecycle] +Get data stream lifecycles. - * **`name` (Optional, string | string[])**: List of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. - * **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_data_lifecycle [_get_data_lifecycle] - -Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. +Get the data stream lifecycle configuration of one or more data streams. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle) @@ -6043,21 +6222,21 @@ Get data stream lifecycles. Retrieves the data stream lifecycle configuration of client.indices.getDataLifecycle({ name }) ``` +### Arguments [_arguments_indices.get_data_lifecycle] -### Arguments [_arguments_190] - -* **Request (object):** - - * **`name` (string | string[])**: List of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - +#### Request (object) [_request_indices.get_data_lifecycle] +- **`name` (string | string[])**: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -### get_data_lifecycle_stats [_get_data_lifecycle_stats] - -Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. +## client.indices.getDataLifecycleStats [_indices.get_data_lifecycle_stats] +Get data stream lifecycle stats. +Get statistics about the data streams that are managed by a data stream lifecycle. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats) @@ -6066,34 +6245,34 @@ client.indices.getDataLifecycleStats() ``` -### get_data_stream [_get_data_stream] +## client.indices.getDataStream [_indices.get_data_stream] +Get data streams. -Get data streams. Retrieves information about one or more data streams. +Get information about one or more data streams. -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream) ```ts client.indices.getDataStream({ ... }) ``` +### Arguments [_arguments_indices.get_data_stream] -### Arguments [_arguments_191] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: List of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. - +#### Request (object) [_request_indices.get_data_stream] +- **`name` (Optional, string | string[])**: List of data stream names used to limit the request. +Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. +## client.indices.getFieldMapping [_indices.get_field_mapping] +Get mapping definitions. +Retrieves mapping definitions for one or more fields. +For data streams, the API retrieves field mappings for the stream’s backing indices. -### get_field_mapping [_get_field_mapping] - -Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. - -This API is useful if you don’t need a complete mapping or if an index mapping contains a large number of fields. +This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) @@ -6101,24 +6280,26 @@ This API is useful if you don’t need a complete mapping or if an index mapping client.indices.getFieldMapping({ fields }) ``` +### Arguments [_arguments_indices.get_field_mapping] -### Arguments [_arguments_192] - -* **Request (object):** - - * **`fields` (string | string[])**: List or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +#### Request (object) [_request_indices.get_field_mapping] +- **`fields` (string | string[])**: List or wildcard expression of fields used to limit returned information. +Supports wildcards (`*`). +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - - -### get_index_template [_get_index_template] - -Get index templates. Get information about one or more index templates. +## client.indices.getIndexTemplate [_indices.get_index_template] +Get index templates. +Get information about one or more index templates. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template) @@ -6126,22 +6307,18 @@ Get index templates. Get information about one or more index templates. client.indices.getIndexTemplate({ ... }) ``` +### Arguments [_arguments_indices.get_index_template] -### Arguments [_arguments_193] - -* **Request (object):** - - * **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. - - - -### get_mapping [_get_mapping] +#### Request (object) [_request_indices.get_index_template] +- **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. -Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. +## client.indices.getMapping [_indices.get_mapping] +Get mapping definitions. +For data streams, the API retrieves mappings for the stream’s backing indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) @@ -6149,42 +6326,43 @@ Get mapping definitions. For data streams, the API retrieves mappings for the st client.indices.getMapping({ ... }) ``` - -### Arguments [_arguments_194] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_migrate_reindex_status [_get_migrate_reindex_status] - +### Arguments [_arguments_indices.get_mapping] + +#### Request (object) [_request_indices.get_mapping] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getMigrateReindexStatus [_indices.get_migrate_reindex_status] Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) + ```ts client.indices.getMigrateReindexStatus({ index }) ``` +### Arguments [_arguments_indices.get_migrate_reindex_status] -### Arguments [_arguments_195] +#### Request (object) [_request_indices.get_migrate_reindex_status] +- **`index` (string | string[])**: The index or data stream name. -* **Request (object):** - - * **`index` (string | string[])**: The index or data stream name. - - - -### get_settings [_get_settings_2] - -Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream’s backing indices. +## client.indices.getSettings [_indices.get_settings] +Get index settings. +Get setting information for one or more indices. +For data streams, it returns setting information for the stream's backing indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings) @@ -6192,31 +6370,35 @@ Get index settings. Get setting information for one or more indices. For data st client.indices.getSettings({ ... }) ``` - -### Arguments [_arguments_196] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`name` (Optional, string | string[])**: List or wildcard expression of settings to retrieve. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_template [_get_template] - -Get index templates. Get information about one or more index templates. - -::::{important} -This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -:::: - +### Arguments [_arguments_indices.get_settings] + +#### Request (object) [_request_indices.get_settings] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`name` (Optional, string | string[])**: List or wildcard expression of settings to retrieve. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with foo but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If +`false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. + +## client.indices.getTemplate [_indices.get_template] +Get index templates. +Get information about one or more index templates. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template) @@ -6224,90 +6406,98 @@ This documentation is about legacy index templates, which are deprecated and wil client.indices.getTemplate({ ... }) ``` +### Arguments [_arguments_indices.get_template] -### Arguments [_arguments_197] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: List of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### migrate_reindex [_migrate_reindex] +#### Request (object) [_request_indices.get_template] +- **`name` (Optional, string | string[])**: List of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +To return all index templates, omit this parameter or use a value of `_all` or `*`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.migrateReindex [_indices.migrate_reindex] Reindex legacy backing indices. -Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. +Reindex all legacy backing indices for a data stream. +This operation occurs in a persistent task. +The persistent task ID is returned immediately and the reindexing work is completed in that task. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) ```ts client.indices.migrateReindex({ ... }) ``` +### Arguments [_arguments_indices.migrate_reindex] -### Arguments [_arguments_198] - -* **Request (object):** - - * **`reindex` (Optional, { mode, source })** - - +#### Request (object) [_request_indices.migrate_reindex] +- **`reindex` (Optional, { mode, source })** -### migrate_to_data_stream [_migrate_to_data_stream] +## client.indices.migrateToDataStream [_indices.migrate_to_data_stream] +Convert an index alias to a data stream. +Converts an index alias to a data stream. +You must have a matching index template that is data stream enabled. +The alias must meet the following criteria: +The alias must have a write index; +All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; +The alias must not have any filters; +The alias must not use custom routing. +If successful, the request removes the alias and creates a data stream with the same name. +The indices for the alias become hidden backing indices for the stream. +The write index for the alias becomes the write index for the stream. -Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. - -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream) ```ts client.indices.migrateToDataStream({ name }) ``` +### Arguments [_arguments_indices.migrate_to_data_stream] -### Arguments [_arguments_199] - -* **Request (object):** - - * **`name` (string)**: Name of the index alias to convert to a data stream. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_indices.migrate_to_data_stream] +- **`name` (string)**: Name of the index alias to convert to a data stream. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.modifyDataStream [_indices.modify_data_stream] +Update data streams. +Performs one or more data stream modification actions in a single atomic operation. -### modify_data_stream [_modify_data_stream] - -Update data streams. Performs one or more data stream modification actions in a single atomic operation. - -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream) ```ts client.indices.modifyDataStream({ actions }) ``` +### Arguments [_arguments_indices.modify_data_stream] -### Arguments [_arguments_200] - -* **Request (object):** - - * **`actions` ({ add_backing_index, remove_backing_index }[])**: Actions to perform. +#### Request (object) [_request_indices.modify_data_stream] +- **`actions` ({ add_backing_index, remove_backing_index }[])**: Actions to perform. +## client.indices.open [_indices.open] +Open a closed index. +For data streams, the API opens any closed backing indices. +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. -### open [_open] +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. -Open a closed index. For data streams, the API opens any closed backing indices. +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behavior can be turned off by using the `ignore_unavailable=true` parameter. -A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +This setting can also be changed with the cluster update settings API. -When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. - -You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. - -By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. - -Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. @@ -6317,78 +6507,93 @@ Because opening or closing an index allocates its shards, the `wait_for_active_s client.indices.open({ index }) ``` - -### Arguments [_arguments_201] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - - - -### promote_data_stream [_promote_data_stream] - -Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. - -With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can’t be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. - -::::{note} -When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. -:::: - - -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +### Arguments [_arguments_indices.open] + +#### Request (object) [_request_indices.open] +- **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +By default, you must explicitly name the indices you using to limit the request. +To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. +You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.promoteDataStream [_indices.promote_data_stream] +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream) ```ts client.indices.promoteDataStream({ name }) ``` +### Arguments [_arguments_indices.promote_data_stream] -### Arguments [_arguments_202] - -* **Request (object):** - - * **`name` (string)**: The name of the data stream - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_indices.promote_data_stream] +- **`name` (string)**: The name of the data stream +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.putAlias [_indices.put_alias] +Create or update an alias. +Adds a data stream or index to an alias. - -### put_alias [_put_alias] - -Create or update an alias. Adds a data stream or index to an alias. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias) ```ts client.indices.putAlias({ index, name }) ``` - -### Arguments [_arguments_203] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. - * **`name` (string)**: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query used to limit documents the alias can access. - * **`index_routing` (Optional, string)**: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. - * **`is_write_index` (Optional, boolean)**: If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. - * **`routing` (Optional, string)**: Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. - * **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_data_lifecycle [_put_data_lifecycle] - -Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. +### Arguments [_arguments_indices.put_alias] + +#### Request (object) [_request_indices.put_alias] +- **`index` (string | string[])**: List of data streams or indices to add. +Supports wildcards (`*`). +Wildcard patterns that match both data streams and indices return an error. +- **`name` (string)**: Alias to update. +If the alias doesn’t exist, the request creates it. +Index alias names support date math. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query used to limit documents the alias can access. +- **`index_routing` (Optional, string)**: Value used to route indexing operations to a specific shard. +If specified, this overwrites the `routing` value for indexing operations. +Data stream aliases don’t support this parameter. +- **`is_write_index` (Optional, boolean)**: If `true`, sets the write index or data stream for the alias. +If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. +If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. +Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. +- **`routing` (Optional, string)**: Value used to route indexing and search operations to a specific shard. +Data stream aliases don’t support this parameter. +- **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. +If specified, this overwrites the `routing` value for search operations. +Data stream aliases don’t support this parameter. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataLifecycle [_indices.put_data_lifecycle] +Update data stream lifecycles. +Update the data stream lifecycle of the specified data streams. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle) @@ -6396,26 +6601,39 @@ Update data stream lifecycles. Update the data stream lifecycle of the specified client.indices.putDataLifecycle({ name }) ``` - -### Arguments [_arguments_204] - -* **Request (object):** - - * **`name` (string | string[])**: List of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - * **`lifecycle` (Optional, { data_retention, downsampling, enabled })** - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_index_template [_put_index_template] - -Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - -Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream’s backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. - -You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. +### Arguments [_arguments_indices.put_data_lifecycle] + +#### Request (object) [_request_indices.put_data_lifecycle] +- **`name` (string | string[])**: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +- **`data_retention` (Optional, string | -1 | 0)**: If defined, every document added to this data stream will be stored at least for this time frame. +Any time after this duration the document could be deleted. +When empty, every document in this data stream will be stored indefinitely. +- **`downsampling` (Optional, { rounds })**: The downsampling configuration to execute for the managed backing index after rollover. +- **`enabled` (Optional, boolean)**: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle +that's disabled (enabled: `false`) will have no effect on the data stream. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `hidden`, `open`, `closed`, `none`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putIndexTemplate [_indices.put_index_template] +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. +Index templates are applied during data stream or index creation. +For data streams, these settings and mappings are applied when the stream's backing indices are created. +Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. +Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** @@ -6425,7 +6643,14 @@ Multiple templates with overlapping index patterns at the same priority are not **Composing aliases, mappings, and settings** -When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. +Any mappings, settings, or aliases from the parent index template are merged in next. +Finally, any configuration on the index request itself is merged. +Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. +If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. +This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. +If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. +If an entry already exists with the same key, then it is overwritten by the new definition. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template) @@ -6433,49 +6658,73 @@ When multiple component templates are specified in the `composed_of` field for a client.indices.putIndexTemplate({ name }) ``` - -### Arguments [_arguments_205] - -* **Request (object):** - - * **`name` (string)**: Index or template name - * **`index_patterns` (Optional, string | string[])**: Name of the index template to create. - * **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. - * **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. - * **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. - * **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. - * **`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. - * **`_meta` (Optional, Record)**: Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. - * **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. - * **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist - * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`cause` (Optional, string)**: User defined reason for creating/updating the index template - - - -### put_mapping [_put_mapping] - -Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. +### Arguments [_arguments_indices.put_index_template] + +#### Request (object) [_request_indices.put_index_template] +- **`name` (string)**: Index or template name +- **`index_patterns` (Optional, string | string[])**: Name of the index template to create. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +External systems can use these version numbers to simplify template management. +To unset a version, replace the template without specifying one. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +It may have any contents. +It is not automatically generated or used by Elasticsearch. +This user-defined object is stored in the cluster state, so keeping it short is preferable +To unset the metadata, replace the template without specifying it. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.putMapping [_indices.put_mapping] +Update field mappings. +Add new fields to an existing data stream or index. +You can also use this API to change the search settings of existing fields and add new properties to existing object fields. +For data streams, these changes are applied to all backing indices by default. **Add multi-fields to an existing field** -Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. +Multi-fields let you index the same field in different ways. +You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. +WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. +You can populate the new multi-field with the update by query API. **Change supported mapping parameters for an existing field** -The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. +The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. +For example, you can use the update mapping API to update the `ignore_above` parameter. **Change the mapping of an existing field** -Except for supported mapping parameters, you can’t change the mapping or field type of an existing field. Changing an existing field could invalidate data that’s already indexed. +Except for supported mapping parameters, you can't change the mapping or field type of an existing field. +Changing an existing field could invalidate data that's already indexed. -If you need to change the mapping of a field in a data stream’s backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. +If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. +If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. **Rename a field** -Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. +Renaming a field would invalidate data already indexed under the old field name. +Instead, add an alias field to create an alternate field name. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) @@ -6483,120 +6732,156 @@ Renaming a field would invalidate data already indexed under the old field name. client.indices.putMapping({ index }) ``` +### Arguments [_arguments_indices.put_mapping] + +#### Request (object) [_request_indices.put_mapping] +- **`index` (string | string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +- **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. +- **`dynamic` (Optional, Enum("strict" | "runtime" | true | false))**: Controls whether new fields are added dynamically. +- **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked +against 'dynamic_date_formats' and if the value matches then +a new date field is added instead of string. +- **`dynamic_templates` (Optional, Record[])**: Specify dynamic templates for the mapping. +- **`_field_names` (Optional, { enabled })**: Control whether field names are enabled for the index. +- **`_meta` (Optional, Record)**: A mapping type can have custom meta data associated with it. These are +not used at all by Elasticsearch, but can be used to store +application-specific metadata. +- **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: + +- Field name +- Field data type +- Mapping parameters +- **`_routing` (Optional, { required })**: Enable making a routing value required on indexed documents. +- **`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })**: Control whether the _source field is enabled on the index. +- **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. + +## client.indices.putSettings [_indices.put_settings] +Update index settings. +Changes dynamic index settings in real time. +For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. +The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. +To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + +NOTE: You can only define new analyzers on closed indices. +To add an analyzer, you must close the index, define the analyzer, and reopen the index. +You cannot close the write index of a data stream. +To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. +Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. +This affects searches and any new data added to the stream after the rollover. +However, it does not affect the data stream's backing indices or their existing data. +To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. -### Arguments [_arguments_206] - -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) - * **`index` (string | string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. - * **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. - * **`dynamic` (Optional, Enum("strict" | "runtime" | true | false))**: Controls whether new fields are added dynamically. - * **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked against *dynamic_date_formats* and if the value matches then a new date field is added instead of string. - * **`dynamic_templates` (Optional, Record | Record[])**: Specify dynamic templates for the mapping. - * **`_field_names` (Optional, { enabled })**: Control whether field names are enabled for the index. - * **`_meta` (Optional, Record)**: A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. - * **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. - * **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +```ts +client.indices.putSettings({ ... }) +``` - * Field name - * Field data type - * Mapping parameters +### Arguments [_arguments_indices.put_settings] + +#### Request (object) [_request_indices.put_settings] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target +data streams, this argument determines whether wildcard expressions match +hidden data streams. Supports a list of values, such as +`open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. +- **`reopen` (Optional, boolean)**: Whether to close and reopen the index to apply non-dynamic settings. +If set to `true` the indices to which the settings are being applied +will be closed temporarily and then reopened in order to apply the changes. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + +## client.indices.putTemplate [_indices.put_template] +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. - * **`_routing` (Optional, { required })**: Enable making a routing value required on indexed documents. - * **`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })**: Control whether the _source field is enabled on the index. - * **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. +**Indices matching multiple templates** +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. +The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. +NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) -### put_settings [_put_settings_2] +```ts +client.indices.putTemplate({ name }) +``` -Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. +### Arguments [_arguments_indices.put_template] + +#### Request (object) [_request_indices.put_template] +- **`name` (string)**: The name of the template +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`index_patterns` (Optional, string | string[])**: Array of wildcard expressions used to match the names +of indices during creation. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +- **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index +matches multiple templates. + +Templates with lower 'order' values are merged first. Templates with higher +'order' values are merged later, overriding templates with lower values. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`version` (Optional, number)**: Version number used to manage index templates externally. This number +is not automatically generated by Elasticsearch. +To unset a version, replace the template without specifying one. +- **`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.recovery [_indices.recovery] +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. -To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. -::::{note} -You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream’s write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream’s write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream’s backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. -:::: +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) - -```ts -client.indices.putSettings({ ... }) -``` - - -### Arguments [_arguments_207] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_template [_put_template] - -Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. - -::::{important} -This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -:::: - - -Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. - -Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. - -You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. - -**Indices matching multiple templates** - -Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) - -```ts -client.indices.putTemplate({ name }) -``` - - -### Arguments [_arguments_208] - -* **Request (object):** - - * **`name` (string)**: The name of the template - * **`aliases` (Optional, Record)**: Aliases for the index. - * **`index_patterns` (Optional, string | string[])**: Array of wildcard expressions used to match the names of indices during creation. - * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. - * **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index matches multiple templates. - - -Templates with lower *order* values are merged first. Templates with higher *order* values are merged later, overriding templates with lower values. ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. *** *`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. *** *`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. *** *`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`cause` (Optional, string)** - - -### recovery [_recovery_2] - -Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. - -All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. - -Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. - -Recovery automatically occurs during the following processes: +Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. @@ -6607,7 +6892,9 @@ Recovery automatically occurs during the following processes: You can determine the cause of a shard recovery using the recovery or cat recovery APIs. -The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery) @@ -6615,28 +6902,30 @@ The index recovery API reports information about completed recoveries only for s client.indices.recovery({ ... }) ``` +### Arguments [_arguments_indices.recovery] -### Arguments [_arguments_209] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - +#### Request (object) [_request_indices.recovery] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +## client.indices.refresh [_indices.refresh] +Refresh an index. +A refresh makes recent operations performed on one or more indices available for search. +For data streams, the API runs the refresh operation on the stream’s backing indices. -### refresh [_refresh] - -Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. - -By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. +You can change this default interval with the `index.refresh_interval` setting. Refresh requests are synchronous and do not return a response until the refresh operation completes. -Refreshes are resource-intensive. To ensure good cluster performance, it’s recommended to wait for Elasticsearch’s periodic refresh rather than performing an explicit refresh when possible. +Refreshes are resource-intensive. +To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. -If your application workflow indexes documents and then runs a search to retrieve the indexed document, it’s recommended to use the index API’s `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. +This option ensures the indexing operation waits for a periodic refresh before running the search. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) @@ -6644,33 +6933,35 @@ If your application workflow indexes documents and then runs a search to retriev client.indices.refresh({ ... }) ``` +### Arguments [_arguments_indices.refresh] -### Arguments [_arguments_210] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +#### Request (object) [_request_indices.refresh] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +## client.indices.reloadSearchAnalyzers [_indices.reload_search_analyzers] +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. -### reload_search_analyzers [_reload_search_analyzers] - -Reload search analyzers. Reload an index’s search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream’s backing indices. - -::::{important} -After reloading the search analyzers you should clear the request cache to make sure it doesn’t contain responses derived from the previous versions of the analyzer. -:::: - - -You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. - -::::{note} -This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster—​including nodes that don’t contain a shard replica—​before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. -:::: +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers) @@ -6678,37 +6969,45 @@ This API does not perform a reload for each shard of an index. Instead, it perfo client.indices.reloadSearchAnalyzers({ index }) ``` +### Arguments [_arguments_indices.reload_search_analyzers] -### Arguments [_arguments_211] - -* **Request (object):** - - * **`index` (string | string[])**: A list of index names to reload analyzers for - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - +#### Request (object) [_request_indices.reload_search_analyzers] +- **`index` (string | string[])**: A list of index names to reload analyzers for +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable +## client.indices.resolveCluster [_indices.resolve_cluster] +Resolve the cluster. -### resolve_cluster [_resolve_cluster] - -Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. +Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. +If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. -You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: -* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. +* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. -For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. +Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + +## Note on backwards compatibility +The ability to query without an index expression was added in version 8.18, so when +querying remote clusters older than that, the local cluster will send the index +expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference +to that index expression even though you didn't request it. If it causes a problem, you can +instead include an index expression like `*:*` to bypass the issue. -**Advantages of using this endpoint before a cross-cluster search** +## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: @@ -6717,28 +7016,60 @@ You may want to exclude a cluster or index from a search when: * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster) - -```ts -client.indices.resolveCluster({ name }) -``` - - -### Arguments [_arguments_212] +## Test availability of remote clusters -* **Request (object):** +The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. +The remote cluster may be available, while the local cluster is not currently connected to it. - * **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. - * **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. Defaults to false. +You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. +For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +The `connected` field in the response will indicate whether it was successful. +If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster) - -### resolve_index [_resolve_index] - -Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. +```ts +client.indices.resolveCluster({ ... }) +``` + +### Arguments [_arguments_indices.resolve_cluster] + +#### Request (object) [_request_indices.resolve_cluster] +- **`name` (Optional, string | string[])**: A list of names or index patterns for the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +If no index expression is specified, information about all remote clusters configured on the local cluster +is returned without doing any index matching +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing +or closed indices. This behavior applies even if the request targets other open indices. For example, a request +targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`timeout` (Optional, string | -1 | 0)**: The maximum time to wait for remote clusters to respond. +If a remote cluster does not respond within this timeout period, the API response +will show the cluster as not connected and include an error message that the +request timed out. + +The default timeout is unset and the query can take +as long as the networking layer is configured to wait for remote clusters that are +not responding (typically 30 seconds). + +## client.indices.resolveIndex [_indices.resolve_index] +Resolve indices. +Resolve the names and/or index patterns for indices, aliases, and data streams. +Multiple patterns and remote clusters are supported. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index) @@ -6746,51 +7077,59 @@ Resolve indices. Resolve the names and/or index patterns for indices, aliases, a client.indices.resolveIndex({ name }) ``` +### Arguments [_arguments_indices.resolve_index] -### Arguments [_arguments_213] - -* **Request (object):** - - * **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - - +#### Request (object) [_request_indices.resolve_index] +- **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -### rollover [_rollover] +## client.indices.rollover [_indices.rollover] +Roll over to a new index. +TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. -Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. - -The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. +The rollover API creates a new index for a data stream or index alias. +The API behavior depends on the rollover target. **Roll over a data stream** -If you roll over a data stream, the API creates a new write index for the stream. The stream’s previous write index becomes a regular backing index. A rollover also increments the data stream’s generation. +If you roll over a data stream, the API creates a new write index for the stream. +The stream's previous write index becomes a regular backing index. +A rollover also increments the data stream's generation. **Roll over an index alias with a write index** -::::{tip} -Prior to Elasticsearch 7.9, you’d typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. -:::: - +TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. +Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. -If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. +If an index alias points to multiple indices, one of the indices must be a write index. +The rollover API creates a new write index for the alias with `is_write_index` set to `true`. +The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. -::::{note} -A rollover creates a new index and is subject to the `wait_for_active_shards` setting. -:::: - +NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** -When you roll over an index alias, you can specify a name for the new index. If you don’t specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index’s name. +When you roll over an index alias, you can specify a name for the new index. +If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. +For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. +This number is always six characters and zero-padded, regardless of the previous index's name. -If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index’s name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index’s name is `my-index-2099.05.07-000002`. +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. +For example, you can create an alias that points to an index named ``. +If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. +If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) @@ -6798,27 +7137,38 @@ If you use an index alias for time series data, you can use date math in the ind client.indices.rollover({ alias }) ``` - -### Arguments [_arguments_214] - -* **Request (object):** - - * **`alias` (string)**: Name of the data stream or index alias to roll over. - * **`new_index` (Optional, string)**: Name of the index to create. Supports date math. Data streams do not support this parameter. - * **`aliases` (Optional, Record)**: Aliases for the target index. Data streams do not support this parameter. - * **`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })**: Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. - * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. - * **`settings` (Optional, Record)**: Configuration options for the index. Data streams do not support this parameter. - * **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - - - -### segments [_segments_2] - -Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. +### Arguments [_arguments_indices.rollover] + +#### Request (object) [_request_indices.rollover] +- **`alias` (string)**: Name of the data stream or index alias to roll over. +- **`new_index` (Optional, string)**: Name of the index to create. +Supports date math. +Data streams do not support this parameter. +- **`aliases` (Optional, Record)**: Aliases for the target index. +Data streams do not support this parameter. +- **`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })**: Conditions for the rollover. +If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. +If this parameter is not specified, Elasticsearch performs the rollover unconditionally. +If conditions are specified, at least one of them must be a `max_*` condition. +The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +If specified, this mapping can include field names, field data types, and mapping paramaters. +- **`settings` (Optional, Record)**: Configuration options for the index. +Data streams do not support this parameter. +- **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +- **`lazy` (Optional, boolean)**: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. +Only allowed on data streams. + +## client.indices.segments [_indices.segments] +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments) @@ -6826,21 +7176,24 @@ Get index segments. Get low-level information about the Lucene segments in index client.indices.segments({ ... }) ``` +### Arguments [_arguments_indices.segments] -### Arguments [_arguments_215] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +#### Request (object) [_request_indices.segments] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - - -### shard_stores [_shard_stores] - -Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. +## client.indices.shardStores [_indices.shard_stores] +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: @@ -6857,22 +7210,21 @@ By default, the API returns store information only for primary shards that are u client.indices.shardStores({ ... }) ``` +### Arguments [_arguments_indices.shard_stores] -### Arguments [_arguments_216] - -* **Request (object):** +#### Request (object) [_request_indices.shard_stores] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all +value targets only missing or closed indices. This behavior applies even if the request +targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, +this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])**: List of shard health statuses used to limit the request. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - * **`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])**: List of shard health statuses used to limit the request. - - - -### shrink [_shrink] - -Shrink an index. Shrink an index into a new index with fewer primary shards. +## client.indices.shrink [_indices.shrink] +Shrink an index. +Shrink an index into a new index with fewer primary shards. Before you can shrink an index: @@ -6880,9 +7232,13 @@ Before you can shrink an index: * A copy of every shard in the index must reside on the same node. * The index must have a green health status. -To make shard allocation easier, we recommend you also remove the index’s replica shards. You can later re-add replica shards as part of the shrink operation. +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. -The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. @@ -6892,10 +7248,7 @@ A shrink operation: * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. -::::{important} -Indices can only be shrunk if they satisfy the following requirements: -:::: - +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. @@ -6909,24 +7262,24 @@ Indices can only be shrunk if they satisfy the following requirements: client.indices.shrink({ index, target }) ``` +### Arguments [_arguments_indices.shrink] -### Arguments [_arguments_217] - -* **Request (object):** - - * **`index` (string)**: Name of the source index to shrink. - * **`target` (string)**: Name of the target index to create. - * **`aliases` (Optional, Record)**: The key is the alias name. Index alias names support date math. - * **`settings` (Optional, Record)**: Configuration options for the target index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - +#### Request (object) [_request_indices.shrink] +- **`index` (string)**: Name of the source index to shrink. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: The key is the alias name. +Index alias names support date math. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -### simulate_index_template [_simulate_index_template] - -Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. +## client.indices.simulateIndexTemplate [_indices.simulate_index_template] +Simulate an index. +Get the index configuration that would be applied to the specified index from an existing index template. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template) @@ -6934,20 +7287,18 @@ Simulate an index. Get the index configuration that would be applied to the spec client.indices.simulateIndexTemplate({ name }) ``` +### Arguments [_arguments_indices.simulate_index_template] -### Arguments [_arguments_218] - -* **Request (object):** - - * **`name` (string)**: Name of the index to simulate - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +#### Request (object) [_request_indices.simulate_index_template] +- **`name` (string)**: Name of the index to simulate +- **`create` (Optional, boolean)**: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. - - -### simulate_template [_simulate_template] - -Simulate an index template. Get the index configuration that would be applied by a particular index template. +## client.indices.simulateTemplate [_indices.simulate_template] +Simulate an index template. +Get the index configuration that would be applied by a particular index template. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template) @@ -6955,31 +7306,44 @@ Simulate an index template. Get the index configuration that would be applied by client.indices.simulateTemplate({ ... }) ``` - -### Arguments [_arguments_219] - -* **Request (object):** - - * **`name` (Optional, string)**: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. - * **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. - * **`index_patterns` (Optional, string | string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. - * **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. - * **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. - * **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. - * **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. - * **`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. - * **`_meta` (Optional, Record)**: Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. - * **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist - * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - * **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. - - - -### split [_split] - -Split an index. Split an index into a new index with more primary shards. * Before you can split an index: +### Arguments [_arguments_indices.simulate_template] + +#### Request (object) [_request_indices.simulate_template] +- **`name` (Optional, string)**: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit +this parameter and specify the template configuration in the request body. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`index_patterns` (Optional, string | string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +May have any contents. +This map is not automatically generated by Elasticsearch. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.split [_indices.split] +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: * The index must be read-only. * The cluster health status must be green. @@ -6990,21 +7354,21 @@ You can do make an index read-only with the following request using the add inde PUT /my_source_index/_block/write ``` -The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. +The current write index on a data stream cannot be split. +In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. -The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. -* Hard-links segments from the source index into the target index. If the file system doesn’t support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. -::::{important} -Indices can only be split if they satisfy the following requirements: -:::: - +IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. @@ -7017,33 +7381,32 @@ Indices can only be split if they satisfy the following requirements: client.indices.split({ index, target }) ``` +### Arguments [_arguments_indices.split] -### Arguments [_arguments_220] - -* **Request (object):** +#### Request (object) [_request_indices.split] +- **`index` (string)**: Name of the source index to split. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - * **`index` (string)**: Name of the source index to split. - * **`target` (string)**: Name of the target index to create. - * **`aliases` (Optional, Record)**: Aliases for the resulting index. - * **`settings` (Optional, Record)**: Configuration options for the target index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +## client.indices.stats [_indices.stats] +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. - - -### stats [_stats_4] - -Get index statistics. For data streams, the API retrieves statistics for the stream’s backing indices. - -By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. -::::{note} -When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. -:::: - +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats) @@ -7051,28 +7414,26 @@ When moving to another node, the shard-level statistics for a shard are cleared. client.indices.stats({ ... }) ``` +### Arguments [_arguments_indices.stats] -### Arguments [_arguments_221] - -* **Request (object):** - - * **`metric` (Optional, string | string[])**: Limit the information returned the specific metrics. - * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - * **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. - * **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. - * **`groups` (Optional, string | string[])**: List of search groups to include in the search statistics. - * **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). - * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. - +#### Request (object) [_request_indices.stats] +- **`metric` (Optional, string | string[])**: Limit the information returned the specific metrics. +- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as `open,hidden`. +- **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +- **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. +- **`groups` (Optional, string | string[])**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. - -### update_aliases [_update_aliases] - -Create or update an alias. Adds a data stream or index to an alias. +## client.indices.updateAliases [_indices.update_aliases] +Create or update an alias. +Adds a data stream or index to an alias. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) @@ -7080,54 +7441,85 @@ Create or update an alias. Adds a data stream or index to an alias. client.indices.updateAliases({ ... }) ``` +### Arguments [_arguments_indices.update_aliases] -### Arguments [_arguments_222] +#### Request (object) [_request_indices.update_aliases] +- **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.validateQuery [_indices.validate_query] +Validate a query. +Validates a query without running it. - * **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query) +```ts +client.indices.validateQuery({ ... }) +``` +### Arguments [_arguments_indices.validate_query] -### validate_query [_validate_query] +#### Request (object) [_request_indices.validate_query] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`all_shards` (Optional, boolean)**: If `true`, the validation is executed on all shards instead of one random shard per index. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. +- **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +- **`rewrite` (Optional, boolean)**: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. +- **`q` (Optional, string)**: Query in the Lucene query string syntax. -Validate a query. Validates a query without running it. +## client.inference.chatCompletionUnified [_inference.chat_completion_unified] +Perform chat completion inference -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference) ```ts -client.indices.validateQuery({ ... }) +client.inference.chatCompletionUnified({ inference_id }) ``` +### Arguments [_arguments_inference.chat_completion_unified] -### Arguments [_arguments_223] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`all_shards` (Optional, boolean)**: If `true`, the validation is executed on all shards instead of one random shard per index. - * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. - * **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - * **`rewrite` (Optional, boolean)**: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. - * **`q` (Optional, string)**: Query in the Lucene query string syntax. +#### Request (object) [_request_inference.chat_completion_unified] +- **`inference_id` (string)**: The inference Id +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +## client.inference.completion [_inference.completion] +Perform completion inference on the service +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) -## inference [_inference] +```ts +client.inference.completion({ inference_id, input }) +``` +### Arguments [_arguments_inference.completion] -### delete [_delete_6] +#### Request (object) [_request_inference.completion] +- **`inference_id` (string)**: The inference Id +- **`input` (string | string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +## client.inference.delete [_inference.delete] Delete an inference endpoint [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete) @@ -7136,20 +7528,15 @@ Delete an inference endpoint client.inference.delete({ inference_id }) ``` +### Arguments [_arguments_inference.delete] -### Arguments [_arguments_224] - -* **Request (object):** - - * **`inference_id` (string)**: The inference Id - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned - * **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields - - - -### get [_get_6] +#### Request (object) [_request_inference.delete] +- **`inference_id` (string)**: The inference identifier. +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. +- **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. +## client.inference.get [_inference.get] Get an inference endpoint [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get) @@ -7158,179 +7545,290 @@ Get an inference endpoint client.inference.get({ ... }) ``` +### Arguments [_arguments_inference.get] + +#### Request (object) [_request_inference.get] +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`inference_id` (Optional, string)**: The inference Id + +## client.inference.postEisChatCompletion [_inference.post_eis_chat_completion] +Perform a chat completion task through the Elastic Inference Service (EIS). -### Arguments [_arguments_225] +Perform a chat completion inference task with the `elastic` service. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion) - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`inference_id` (Optional, string)**: The inference Id +```ts +client.inference.postEisChatCompletion({ eis_inference_id }) +``` +### Arguments [_arguments_inference.post_eis_chat_completion] +#### Request (object) [_request_inference.post_eis_chat_completion] +- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint. -### inference [_inference_2] +## client.inference.put [_inference.put] +Create an inference endpoint. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -Perform inference on the service +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) ```ts -client.inference.inference({ inference_id, input }) +client.inference.put({ inference_id }) ``` +### Arguments [_arguments_inference.put] -### Arguments [_arguments_226] - -* **Request (object):** +#### Request (object) [_request_inference.put] +- **`inference_id` (string)**: The inference Id +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** - * **`inference_id` (string)**: The inference Id - * **`input` (string | string[])**: Inference input. Either a string or an array of strings. - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`query` (Optional, string)**: Query input, required for rerank task. Not required for other tasks. - * **`task_settings` (Optional, User-defined value)**: Optional task settings - * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +## client.inference.putEis [_inference.put_eis] +Create an Elastic Inference Service (EIS) inference endpoint. +Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS). +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-eis.html) -### put [_put_2] +```ts +client.inference.putEis({ task_type, eis_inference_id, service, service_settings }) +``` -Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +### Arguments [_arguments_inference.put_eis] -::::{important} -The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -:::: +#### Request (object) [_request_inference.put_eis] +- **`task_type` (Enum("chat_completion"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `elastic`. +- **`service_settings` ({ model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `elastic` service. +## client.inference.putMistral [_inference.put_mistral] +Configure a Mistral inference endpoint -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-mistral.html) ```ts -client.inference.put({ inference_id }) +client.inference.putMistral() ``` -### Arguments [_arguments_227] +## client.inference.putOpenai [_inference.put_openai] +Create an OpenAI inference endpoint. -* **Request (object):** +Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. - * **`inference_id` (string)**: The inference Id - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`inference_config` (Optional, { service, service_settings, task_settings })** +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-openai.html) +```ts +client.inference.putOpenai({ task_type, openai_inference_id, service, service_settings }) +``` -### stream_inference [_stream_inference] +### Arguments [_arguments_inference.put_openai] -Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. +#### Request (object) [_request_inference.put_openai] +- **`task_type` (Enum("chat_completion" | "completion" | "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`openai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `openai`. +- **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -::::{important} -The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -:::: +## client.inference.putVoyageai [_inference.put_voyageai] +Create a VoyageAI inference endpoint. +Create an inference endpoint to perform an inference task with the `voyageai` service. -This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai) ```ts -client.inference.streamInference({ inference_id, input }) +client.inference.putVoyageai({ task_type, voyageai_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_voyageai] -### Arguments [_arguments_228] +#### Request (object) [_request_inference.put_voyageai] +- **`task_type` (Enum("text_embedding" | "rerank"))**: The type of the inference task that the model will perform. +- **`voyageai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `voyageai`. +- **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -* **Request (object):** +## client.inference.putWatsonx [_inference.put_watsonx] +Create a Watsonx inference endpoint. - * **`inference_id` (string)**: The unique identifier for the inference endpoint. - * **`input` (string | string[])**: The text on which you want to perform the inference task. It can be a single string or an array. +Create an inference endpoint to perform an inference task with the `watsonxai` service. +You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. +You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -::::{note} -Inference endpoints for the completion task type currently only support a single string as input. *** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The type of task that the model performs. -:::: +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx) + +```ts +client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_settings }) +``` +### Arguments [_arguments_inference.put_watsonx] +#### Request (object) [_request_inference.put_watsonx] +- **`task_type` (Enum("text_embedding"))**: The task type. +The only valid task type for the model to perform is `text_embedding`. +- **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `watsonxai`. +- **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. -### unified_inference [_unified_inference] +## client.inference.rerank [_inference.rerank] +Perform rereanking inference on the service -Perform inference on the service using the Unified Schema +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) ```ts -client.inference.unifiedInference({ inference_id, messages }) +client.inference.rerank({ inference_id, query, input }) ``` +### Arguments [_arguments_inference.rerank] -### Arguments [_arguments_229] +#### Request (object) [_request_inference.rerank] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`query` (string)**: Query input. +- **`input` (string | string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. -* **Request (object):** +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string | -1 | 0)**: The amount of time to wait for the inference request to complete. - * **`inference_id` (string)**: The inference Id - * **`messages` ({ content, role, tool_call_id, tool_calls }[])**: A list of objects representing the conversation. - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`model` (Optional, string)**: The ID of the model to use. - * **`max_completion_tokens` (Optional, number)**: The upper bound limit for the number of tokens that can be generated for a completion request. - * **`stop` (Optional, string[])**: A sequence of strings to control when the model should stop generating additional tokens. - * **`temperature` (Optional, float)**: The sampling temperature to use. - * **`tool_choice` (Optional, string | { type, function })**: Controls which tool is called by the model. - * **`tools` (Optional, { type, function }[])**: A list of tools that the model can call. - * **`top_p` (Optional, float)**: Nucleus sampling, an alternative to sampling with temperature. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +## client.inference.sparseEmbedding [_inference.sparse_embedding] +Perform sparse embedding inference on the service +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) +```ts +client.inference.sparseEmbedding({ inference_id, input }) +``` -### update [_update_2] +### Arguments [_arguments_inference.sparse_embedding] -Update an inference endpoint. +#### Request (object) [_request_inference.sparse_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string | string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. -Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. +## client.inference.streamCompletion [_inference.stream_completion] +Perform streaming inference. +Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +This API works only with the completion task type. -::::{important} -The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -:::: +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference) ```ts -client.inference.update({ inference_id }) +client.inference.streamCompletion({ inference_id, input }) ``` +### Arguments [_arguments_inference.stream_completion] + +#### Request (object) [_request_inference.stream_completion] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string | string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. -### Arguments [_arguments_230] +NOTE: Inference endpoints for the completion task type currently only support a single string as input. +- **`task_settings` (Optional, User-defined value)**: Optional task settings -* **Request (object):** +## client.inference.textEmbedding [_inference.text_embedding] +Perform text embedding inference on the service - * **`inference_id` (string)**: The unique identifier of the inference endpoint. - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The type of inference task that the model performs. - * **`inference_config` (Optional, { service, service_settings, task_settings })** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) +```ts +client.inference.textEmbedding({ inference_id, input }) +``` +### Arguments [_arguments_inference.text_embedding] -## ingest [_ingest] +#### Request (object) [_request_inference.text_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string | string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +## client.inference.update [_inference.update] +Update an inference endpoint. -### delete_geoip_database [_delete_geoip_database] +Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. -Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update) ```ts -client.ingest.deleteGeoipDatabase({ id }) +client.inference.update({ inference_id }) ``` +### Arguments [_arguments_inference.update] + +#### Request (object) [_request_inference.update] +- **`inference_id` (string)**: The unique identifier of the inference endpoint. +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The type of inference task that the model performs. +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** -### Arguments [_arguments_231] +## client.ingest.deleteGeoipDatabase [_ingest.delete_geoip_database] +Delete GeoIP database configurations. -* **Request (object):** +Delete one or more IP geolocation database configurations. - * **`id` (string | string[])**: A list of geoip database configurations to delete - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database) +```ts +client.ingest.deleteGeoipDatabase({ id }) +``` +### Arguments [_arguments_ingest.delete_geoip_database] -### delete_ip_location_database [_delete_ip_location_database] +#### Request (object) [_request_ingest.delete_geoip_database] +- **`id` (string | string[])**: A list of geoip database configurations to delete +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ingest.deleteIpLocationDatabase [_ingest.delete_ip_location_database] Delete IP geolocation database configurations. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database) @@ -7339,20 +7837,20 @@ Delete IP geolocation database configurations. client.ingest.deleteIpLocationDatabase({ id }) ``` +### Arguments [_arguments_ingest.delete_ip_location_database] -### Arguments [_arguments_232] - -* **Request (object):** - - * **`id` (string | string[])**: A list of IP location database configurations. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. - +#### Request (object) [_request_ingest.delete_ip_location_database] +- **`id` (string | string[])**: A list of IP location database configurations. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. - -### delete_pipeline [_delete_pipeline] - -Delete pipelines. Delete one or more ingest pipelines. +## client.ingest.deletePipeline [_ingest.delete_pipeline] +Delete pipelines. +Delete one or more ingest pipelines. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline) @@ -7360,49 +7858,46 @@ Delete pipelines. Delete one or more ingest pipelines. client.ingest.deletePipeline({ id }) ``` +### Arguments [_arguments_ingest.delete_pipeline] -### Arguments [_arguments_233] - -* **Request (object):** - - * **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_ingest.delete_pipeline] +- **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +To delete all ingest pipelines in a cluster, use a value of `*`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +## client.ingest.geoIpStats [_ingest.geo_ip_stats] +Get GeoIP statistics. +Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - -### geo_ip_stats [_geo_ip_stats] - -Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - -[Endpoint documentation](elasticsearch://reference/ingestion-tools/enrich-processor/geoip-processor.md) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html) ```ts client.ingest.geoIpStats() ``` -### get_geoip_database [_get_geoip_database] +## client.ingest.getGeoipDatabase [_ingest.get_geoip_database] +Get GeoIP database configurations. -Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. +Get information about one or more IP geolocation database configurations. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database) ```ts client.ingest.getGeoipDatabase({ ... }) ``` +### Arguments [_arguments_ingest.get_geoip_database] -### Arguments [_arguments_234] - -* **Request (object):** - - * **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. - - - -### get_ip_location_database [_get_ip_location_database] +#### Request (object) [_request_ingest.get_geoip_database] +- **`id` (Optional, string | string[])**: A list of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. +## client.ingest.getIpLocationDatabase [_ingest.get_ip_location_database] Get IP geolocation database configurations. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database) @@ -7411,19 +7906,21 @@ Get IP geolocation database configurations. client.ingest.getIpLocationDatabase({ ... }) ``` +### Arguments [_arguments_ingest.get_ip_location_database] -### Arguments [_arguments_235] - -* **Request (object):** - - * **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. - +#### Request (object) [_request_ingest.get_ip_location_database] +- **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +## client.ingest.getPipeline [_ingest.get_pipeline] +Get pipelines. -### get_pipeline [_get_pipeline] - -Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. +Get information about one or more ingest pipelines. +This API returns a local reference of the pipeline. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline) @@ -7431,53 +7928,52 @@ Get pipelines. Get information about one or more ingest pipelines. This API retu client.ingest.getPipeline({ ... }) ``` +### Arguments [_arguments_ingest.get_pipeline] -### Arguments [_arguments_236] - -* **Request (object):** - - * **`id` (Optional, string)**: List of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) - +#### Request (object) [_request_ingest.get_pipeline] +- **`id` (Optional, string)**: List of pipeline IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all ingest pipelines, omit this parameter or use `*`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) +## client.ingest.processorGrok [_ingest.processor_grok] +Run a grok processor. +Extract structured fields out of a single text field within a document. +You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. +A grok pattern is like a regular expression that supports aliased expressions that can be reused. -### processor_grok [_processor_grok] - -Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. - -[Endpoint documentation](elasticsearch://reference/ingestion-tools/enrich-processor/grok-processor.md) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html) ```ts client.ingest.processorGrok() ``` -### put_geoip_database [_put_geoip_database] +## client.ingest.putGeoipDatabase [_ingest.put_geoip_database] +Create or update a GeoIP database configuration. -Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. +Refer to the create or update IP geolocation database configuration API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database) ```ts client.ingest.putGeoipDatabase({ id, name, maxmind }) ``` +### Arguments [_arguments_ingest.put_geoip_database] -### Arguments [_arguments_237] - -* **Request (object):** - - * **`id` (string)**: ID of the database configuration to create or update. - * **`name` (string)**: The provider-assigned name of the IP geolocation database to download. - * **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_ip_location_database [_put_ip_location_database] +#### Request (object) [_request_ingest.put_geoip_database] +- **`id` (string)**: ID of the database configuration to create or update. +- **`name` (string)**: The provider-assigned name of the IP geolocation database to download. +- **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. +At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ingest.putIpLocationDatabase [_ingest.put_ip_location_database] Create or update an IP geolocation database configuration. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database) @@ -7486,49 +7982,48 @@ Create or update an IP geolocation database configuration. client.ingest.putIpLocationDatabase({ id }) ``` +### Arguments [_arguments_ingest.put_ip_location_database] -### Arguments [_arguments_238] - -* **Request (object):** - - * **`id` (string)**: The database configuration identifier. - * **`configuration` (Optional, { name, maxmind, ipinfo })** - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. - - +#### Request (object) [_request_ingest.put_ip_location_database] +- **`id` (string)**: The database configuration identifier. +- **`configuration` (Optional, { name, maxmind, ipinfo })** +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. +A value of `-1` indicates that the request should never time out. -### put_pipeline [_put_pipeline] +## client.ingest.putPipeline [_ingest.put_pipeline] +Create or update a pipeline. +Changes made using this API take effect immediately. -Create or update a pipeline. Changes made using this API take effect immediately. - -[Endpoint documentation](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html) ```ts client.ingest.putPipeline({ id }) ``` +### Arguments [_arguments_ingest.put_pipeline] -### Arguments [_arguments_239] - -* **Request (object):** - - * **`id` (string)**: ID of the ingest pipeline to create or update. - * **`_meta` (Optional, Record)**: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. - * **`description` (Optional, string)**: Description of the ingest pipeline. - * **`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline’s remaining processors. - * **`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. - * **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. - * **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates - +#### Request (object) [_request_ingest.put_pipeline] +- **`id` (string)**: ID of the ingest pipeline to create or update. +- **`_meta` (Optional, Record)**: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. +- **`description` (Optional, string)**: Description of the ingest pipeline. +- **`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +- **`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +- **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. +- **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. +When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates +## client.ingest.simulate [_ingest.simulate] +Simulate a pipeline. -### simulate [_simulate] - -Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. +Run an ingest pipeline against a set of provided documents. +You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate) @@ -7536,24 +8031,21 @@ Simulate a pipeline. Run an ingest pipeline against a set of provided documents. client.ingest.simulate({ docs }) ``` +### Arguments [_arguments_ingest.simulate] -### Arguments [_arguments_240] - -* **Request (object):** - - * **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. - * **`id` (Optional, string)**: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. - * **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })**: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - * **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. - +#### Request (object) [_request_ingest.simulate] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`id` (Optional, string)**: The pipeline to test. +If you don't specify a `pipeline` in the request body, this parameter is required. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })**: The pipeline to test. +If you don't specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. +## client.license.delete [_license.delete] +Delete the license. -## license [_license] - - -### delete [_delete_7] - -Delete the license. When the license expires, your subscription level reverts to Basic. +When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. @@ -7563,24 +8055,20 @@ If the operator privileges feature is enabled, only operator users can use this client.license.delete({ ... }) ``` +### Arguments [_arguments_license.delete] -### Arguments [_arguments_241] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get [_get_7] +#### Request (object) [_request_license.delete] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. +## client.license.get [_license.get] +Get license information. -::::{note} -If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. -:::: +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. +>info +> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get) @@ -7588,18 +8076,14 @@ If the master node is generating a new cluster state, the get license API may re client.license.get({ ... }) ``` +### Arguments [_arguments_license.get] -### Arguments [_arguments_242] - -* **Request (object):** - - * **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. - * **`local` (Optional, boolean)**: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. - - - -### get_basic_status [_get_basic_status] +#### Request (object) [_request_license.get] +- **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. +This parameter is deprecated and will always be set to true in 8.x. +- **`local` (Optional, boolean)**: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. +## client.license.getBasicStatus [_license.get_basic_status] Get the basic license status. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status) @@ -7609,8 +8093,7 @@ client.license.getBasicStatus() ``` -### get_trial_status [_get_trial_status] - +## client.license.getTrialStatus [_license.get_trial_status] Get the trial status. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status) @@ -7620,14 +8103,16 @@ client.license.getTrialStatus() ``` -### post [_post_2] +## client.license.post [_license.post] +Update the license. -Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. - -::::{note} -If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. -:::: +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post) @@ -7635,29 +8120,24 @@ If Elasticsearch security features are enabled and you are installing a gold or client.license.post({ ... }) ``` +### Arguments [_arguments_license.post] -### Arguments [_arguments_243] +#### Request (object) [_request_license.post] +- **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** +- **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. +- **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.license.postStartBasic [_license.post_start_basic] +Start a basic license. - * **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** - * **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. - * **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +Start an indefinite basic license, which gives access to all the basic features. +NOTE: In order to start a basic license, you must not currently have a basic license. - -### post_start_basic [_post_start_basic] - -Start a basic license. Start an indefinite basic license, which gives access to all the basic features. - -::::{note} -In order to start a basic license, you must not currently have a basic license. -:::: - - -If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. @@ -7667,25 +8147,19 @@ To check the status of your basic license, use the get basic license API. client.license.postStartBasic({ ... }) ``` +### Arguments [_arguments_license.post_start_basic] -### Arguments [_arguments_244] - -* **Request (object):** - - * **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_license.post_start_basic] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.license.postStartTrial [_license.post_start_trial] +Start a trial. +Start a 30-day trial, which gives access to all subscription features. -### post_start_trial [_post_start_trial] - -Start a trial. Start a 30-day trial, which gives access to all subscription features. - -::::{note} -You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at [https://www.elastic.co/trialextension](https://www.elastic.co/trialextension). -:::: - +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. @@ -7695,63 +8169,49 @@ To check the status of your trial, use the get trial status API. client.license.postStartTrial({ ... }) ``` +### Arguments [_arguments_license.post_start_trial] -### Arguments [_arguments_245] +#### Request (object) [_request_license.post_start_trial] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`type_query_string` (Optional, string)** +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -* **Request (object):** +## client.logstash.deletePipeline [_logstash.delete_pipeline] +Delete a Logstash pipeline. +Delete a pipeline that is used for Logstash Central Management. +If the request succeeds, you receive an empty response with an appropriate status code. - * **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) - * **`type_query_string` (Optional, string)** - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline) +```ts +client.logstash.deletePipeline({ id }) +``` +### Arguments [_arguments_logstash.delete_pipeline] -## logstash [_logstash] +#### Request (object) [_request_logstash.delete_pipeline] +- **`id` (string)**: An identifier for the pipeline. +## client.logstash.getPipeline [_logstash.get_pipeline] +Get Logstash pipelines. +Get pipelines that are used for Logstash Central Management. -### delete_pipeline [_delete_pipeline_2] - -Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline) - -```ts -client.logstash.deletePipeline({ id }) -``` - - -### Arguments [_arguments_246] - -* **Request (object):** - - * **`id` (string)**: An identifier for the pipeline. - - - -### get_pipeline [_get_pipeline_2] - -Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline) ```ts client.logstash.getPipeline({ ... }) ``` +### Arguments [_arguments_logstash.get_pipeline] -### Arguments [_arguments_247] - -* **Request (object):** - - * **`id` (Optional, string | string[])**: A list of pipeline identifiers. - - - -### put_pipeline [_put_pipeline_2] +#### Request (object) [_request_logstash.get_pipeline] +- **`id` (Optional, string | string[])**: A list of pipeline identifiers. +## client.logstash.putPipeline [_logstash.put_pipeline] Create or update a Logstash pipeline. -Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. +Create a pipeline that is used for Logstash Central Management. +If the specified pipeline exists, it is replaced. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline) @@ -7759,27 +8219,18 @@ Create a pipeline that is used for Logstash Central Management. If the specified client.logstash.putPipeline({ id }) ``` +### Arguments [_arguments_logstash.put_pipeline] -### Arguments [_arguments_248] - -* **Request (object):** - - * **`id` (string)**: An identifier for the pipeline. - * **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })** +#### Request (object) [_request_logstash.put_pipeline] +- **`id` (string)**: An identifier for the pipeline. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })** +## client.migration.deprecations [_migration.deprecations] +Get deprecation information. +Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. - -## migration [_migration] - - -### deprecations [_deprecations] - -Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. - -::::{tip} -This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. -:::: - +TIP: This APIs is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations) @@ -7787,23 +8238,18 @@ This APIs is designed for indirect use by the Upgrade Assistant. You are strongl client.migration.deprecations({ ... }) ``` +### Arguments [_arguments_migration.deprecations] -### Arguments [_arguments_249] - -* **Request (object):** - - * **`index` (Optional, string)**: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. +#### Request (object) [_request_migration.deprecations] +- **`index` (Optional, string)**: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. +## client.migration.getFeatureUpgradeStatus [_migration.get_feature_upgrade_status] +Get feature migration information. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +Check which features need to be migrated and the status of any migrations that are in progress. - -### get_feature_upgrade_status [_get_feature_upgrade_status] - -Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. - -::::{tip} -This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. -:::: - +TIP: This API is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) @@ -7812,16 +8258,14 @@ client.migration.getFeatureUpgradeStatus() ``` -### post_feature_upgrade [_post_feature_upgrade] - -Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. +## client.migration.postFeatureUpgrade [_migration.post_feature_upgrade] +Start the feature migration. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. -::::{tip} -The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. -:::: - +TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) @@ -7830,12 +8274,13 @@ client.migration.postFeatureUpgrade() ``` -## ml [_ml] - - -### clear_trained_model_deployment_cache [_clear_trained_model_deployment_cache] +## client.ml.clearTrainedModelDeploymentCache [_ml.clear_trained_model_deployment_cache] +Clear trained model deployment cache. -Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. +Cache will be cleared on all nodes where the trained model is assigned. +A trained model deployment may have an inference cache enabled. +As requests are handled by each allocated node, their responses may be cached on that individual node. +Calling this API clears the caches without restarting the deployment. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache) @@ -7843,18 +8288,18 @@ Clear trained model deployment cache. Cache will be cleared on all nodes where t client.ml.clearTrainedModelDeploymentCache({ model_id }) ``` +### Arguments [_arguments_ml.clear_trained_model_deployment_cache] -### Arguments [_arguments_250] +#### Request (object) [_request_ml.clear_trained_model_deployment_cache] +- **`model_id` (string)**: The unique identifier of the trained model. -* **Request (object):** +## client.ml.closeJob [_ml.close_job] +Close anomaly detection jobs. - * **`model_id` (string)**: The unique identifier of the trained model. - - - -### close_job [_close_job] - -Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. +A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. +If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. +When a datafeed that has a specified end date stops, it automatically closes its associated job. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job) @@ -7862,21 +8307,18 @@ Close anomaly detection jobs. A job can be opened and closed multiple times thro client.ml.closeJob({ job_id }) ``` +### Arguments [_arguments_ml.close_job] -### Arguments [_arguments_251] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. - * **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. - * **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. - - +#### Request (object) [_request_ml.close_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. -### delete_calendar [_delete_calendar] +## client.ml.deleteCalendar [_ml.delete_calendar] +Delete a calendar. -Delete a calendar. Removes all scheduled events from a calendar, then deletes it. +Remove all scheduled events from a calendar, then delete it. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar) @@ -7884,17 +8326,12 @@ Delete a calendar. Removes all scheduled events from a calendar, then deletes it client.ml.deleteCalendar({ calendar_id }) ``` +### Arguments [_arguments_ml.delete_calendar] -### Arguments [_arguments_252] - -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - - - -### delete_calendar_event [_delete_calendar_event] +#### Request (object) [_request_ml.delete_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +## client.ml.deleteCalendarEvent [_ml.delete_calendar_event] Delete events from a calendar. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event) @@ -7903,18 +8340,14 @@ Delete events from a calendar. client.ml.deleteCalendarEvent({ calendar_id, event_id }) ``` +### Arguments [_arguments_ml.delete_calendar_event] -### Arguments [_arguments_253] - -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`event_id` (string)**: Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. - - - -### delete_calendar_job [_delete_calendar_job] +#### Request (object) [_request_ml.delete_calendar_event] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`event_id` (string)**: Identifier for the scheduled event. +You can obtain this identifier by using the get calendar events API. +## client.ml.deleteCalendarJob [_ml.delete_calendar_job] Delete anomaly jobs from a calendar. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job) @@ -7923,18 +8356,14 @@ Delete anomaly jobs from a calendar. client.ml.deleteCalendarJob({ calendar_id, job_id }) ``` +### Arguments [_arguments_ml.delete_calendar_job] -### Arguments [_arguments_254] - -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. - - - -### delete_data_frame_analytics [_delete_data_frame_analytics] +#### Request (object) [_request_ml.delete_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a +list of jobs or groups. +## client.ml.deleteDataFrameAnalytics [_ml.delete_data_frame_analytics] Delete a data frame analytics job. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics) @@ -7943,19 +8372,14 @@ Delete a data frame analytics job. client.ml.deleteDataFrameAnalytics({ id }) ``` +### Arguments [_arguments_ml.delete_data_frame_analytics] -### Arguments [_arguments_255] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. - * **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. - * **`timeout` (Optional, string | -1 | 0)**: The time to wait for the job to be deleted. - - - -### delete_datafeed [_delete_datafeed] +#### Request (object) [_request_ml.delete_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. +- **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. +- **`timeout` (Optional, string | -1 | 0)**: The time to wait for the job to be deleted. +## client.ml.deleteDatafeed [_ml.delete_datafeed] Delete a datafeed. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed) @@ -7964,19 +8388,27 @@ Delete a datafeed. client.ml.deleteDatafeed({ datafeed_id }) ``` +### Arguments [_arguments_ml.delete_datafeed] -### Arguments [_arguments_256] - -* **Request (object):** - - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`force` (Optional, boolean)**: Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. - - +#### Request (object) [_request_ml.delete_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This +identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`force` (Optional, boolean)**: Use to forcefully delete a started datafeed; this method is quicker than +stopping and deleting the datafeed. -### delete_expired_data [_delete_expired_data] +## client.ml.deleteExpiredData [_ml.delete_expired_data] +Delete expired ML data. -Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . +Delete all job results, model snapshots and forecast data that have exceeded +their retention days period. Machine learning state documents that are not +associated with any job are also deleted. +You can limit the request to a single or set of anomaly detection jobs by +using a job identifier, a group name, a list of jobs, or a +wildcard expression. You can delete expired data for all anomaly detection +jobs by using `_all`, by specifying `*` as the ``, or by omitting the +``. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data) @@ -7984,20 +8416,20 @@ Delete expired ML data. Deletes all job results, model snapshots and forecast da client.ml.deleteExpiredData({ ... }) ``` +### Arguments [_arguments_ml.delete_expired_data] -### Arguments [_arguments_257] +#### Request (object) [_request_ml.delete_expired_data] +- **`job_id` (Optional, string)**: Identifier for an anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. +- **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default +behavior is no throttling. +- **`timeout` (Optional, string | -1 | 0)**: How long can the underlying delete processes run until they are canceled. -* **Request (object):** +## client.ml.deleteFilter [_ml.delete_filter] +Delete a filter. - * **`job_id` (Optional, string)**: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. - * **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default behavior is no throttling. - * **`timeout` (Optional, string | -1 | 0)**: How long can the underlying delete processes run until they are canceled. - - - -### delete_filter [_delete_filter] - -Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. +If an anomaly detection job references the filter, you cannot delete the +filter. You must update or delete the job before you can delete the filter. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter) @@ -8005,18 +8437,18 @@ Delete a filter. If an anomaly detection job references the filter, you cannot d client.ml.deleteFilter({ filter_id }) ``` +### Arguments [_arguments_ml.delete_filter] -### Arguments [_arguments_258] - -* **Request (object):** - - * **`filter_id` (string)**: A string that uniquely identifies a filter. - - +#### Request (object) [_request_ml.delete_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. -### delete_forecast [_delete_forecast] +## client.ml.deleteForecast [_ml.delete_forecast] +Delete forecasts from a job. -Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. +By default, forecasts are retained for 14 days. You can specify a +different retention period with the `expires_in` parameter in the forecast +jobs API. The delete forecast API enables you to delete one or more +forecasts before they expire. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast) @@ -8024,21 +8456,30 @@ Delete forecasts from a job. By default, forecasts are retained for 14 days. You client.ml.deleteForecast({ job_id }) ``` +### Arguments [_arguments_ml.delete_forecast] -### Arguments [_arguments_259] +#### Request (object) [_request_ml.delete_forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`forecast_id` (Optional, string)**: A list of forecast identifiers. If you do not specify +this optional parameter or if you specify `_all` or `*` the API deletes +all forecasts from the job. +- **`allow_no_forecasts` (Optional, boolean)**: Specifies whether an error occurs when there are no forecasts. In +particular, if this parameter is set to `false` and there are no +forecasts associated with the job, attempts to delete all forecasts +return an error. +- **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for the completion of the delete +operation. When this period of time elapses, the API fails and returns an +error. -* **Request (object):** +## client.ml.deleteJob [_ml.delete_job] +Delete an anomaly detection job. - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`forecast_id` (Optional, string)**: A list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. - * **`allow_no_forecasts` (Optional, boolean)**: Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. - - - -### delete_job [_delete_job] - -Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. +All job configuration, model state and results are deleted. +It is not currently possible to delete multiple jobs using wildcards or a +comma separated list. If you delete a job that has a datafeed, the request +first tries to delete the datafeed. This behavior is equivalent to calling +the delete datafeed API with the same timeout and force parameters as the +delete job request. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job) @@ -8046,21 +8487,24 @@ Delete an anomaly detection job. All job configuration, model state and results client.ml.deleteJob({ job_id }) ``` +### Arguments [_arguments_ml.delete_job] -### Arguments [_arguments_260] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`force` (Optional, boolean)**: Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. - * **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. - * **`wait_for_completion` (Optional, boolean)**: Specifies whether the request should return immediately or wait until the job deletion completes. - - +#### Request (object) [_request_ml.delete_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`force` (Optional, boolean)**: Use to forcefully delete an opened job; this method is quicker than +closing and deleting the job. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. +- **`wait_for_completion` (Optional, boolean)**: Specifies whether the request should return immediately or wait until the +job deletion completes. -### delete_model_snapshot [_delete_model_snapshot] +## client.ml.deleteModelSnapshot [_ml.delete_model_snapshot] +Delete a model snapshot. -Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. +You cannot delete the active model snapshot. To delete that snapshot, first +revert to a different one. To identify the active model snapshot, refer to +the `model_snapshot_id` in the results from the get jobs API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot) @@ -8068,19 +8512,16 @@ Delete a model snapshot. You cannot delete the active model snapshot. To delete client.ml.deleteModelSnapshot({ job_id, snapshot_id }) ``` +### Arguments [_arguments_ml.delete_model_snapshot] -### Arguments [_arguments_261] +#### Request (object) [_request_ml.delete_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. -* **Request (object):** +## client.ml.deleteTrainedModel [_ml.delete_trained_model] +Delete an unreferenced trained model. - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: Identifier for the model snapshot. - - - -### delete_trained_model [_delete_trained_model] - -Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. +The request deletes a trained inference model that is not referenced by an ingest pipeline. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model) @@ -8088,20 +8529,19 @@ Delete an unreferenced trained model. The request deletes a trained inference mo client.ml.deleteTrainedModel({ model_id }) ``` +### Arguments [_arguments_ml.delete_trained_model] -### Arguments [_arguments_262] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - +#### Request (object) [_request_ml.delete_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -### delete_trained_model_alias [_delete_trained_model_alias] +## client.ml.deleteTrainedModelAlias [_ml.delete_trained_model_alias] +Delete a trained model alias. -Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. +This API deletes an existing model alias that refers to a trained model. If +the model alias is missing or refers to a model other than the one identified +by the `model_id`, this API returns an error. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias) @@ -8109,40 +8549,50 @@ Delete a trained model alias. This API deletes an existing model alias that refe client.ml.deleteTrainedModelAlias({ model_alias, model_id }) ``` +### Arguments [_arguments_ml.delete_trained_model_alias] -### Arguments [_arguments_263] +#### Request (object) [_request_ml.delete_trained_model_alias] +- **`model_alias` (string)**: The model alias to delete. +- **`model_id` (string)**: The trained model ID to which the model alias refers. -* **Request (object):** +## client.ml.estimateModelMemory [_ml.estimate_model_memory] +Estimate job model memory usage. - * **`model_alias` (string)**: The model alias to delete. - * **`model_id` (string)**: The trained model ID to which the model alias refers. +Make an estimation of the memory usage for an anomaly detection job model. +The estimate is based on analysis configuration details for the job and cardinality +estimates for the fields it references. - - -### estimate_model_memory [_estimate_model_memory] - -Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory) ```ts client.ml.estimateModelMemory({ ... }) ``` - -### Arguments [_arguments_264] - -* **Request (object):** - - * **`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. - * **`max_bucket_cardinality` (Optional, Record)**: Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. - * **`overall_cardinality` (Optional, Record)**: Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. - - - -### evaluate_data_frame [_evaluate_data_frame] - -Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. +### Arguments [_arguments_ml.estimate_model_memory] + +#### Request (object) [_request_ml.estimate_model_memory] +- **`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: For a list of the properties that you can specify in the +`analysis_config` component of the body of this API. +- **`max_bucket_cardinality` (Optional, Record)**: Estimates of the highest cardinality in a single bucket that is observed +for influencer fields over the time period that the job analyzes data. +To produce a good answer, values must be provided for all influencer +fields. Providing values for fields that are not listed as `influencers` +has no effect on the estimation. +- **`overall_cardinality` (Optional, Record)**: Estimates of the cardinality that is observed for fields over the whole +time period that the job analyzes data. To produce a good answer, values +must be provided for fields referenced in the `by_field_name`, +`over_field_name` and `partition_field_name` of any detectors. Providing +values for other fields has no effect on the estimation. It can be +omitted from the request if no detectors have a `by_field_name`, +`over_field_name` or `partition_field_name`. + +## client.ml.evaluateDataFrame [_ml.evaluate_data_frame] +Evaluate data frame analytics. + +The API packages together commonly used evaluation metrics for various types +of machine learning features. This has been designed for use on indexes +created by data frame analytics. Evaluation requires both a ground truth +field and an analytics result field to be present. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame) @@ -8150,20 +8600,22 @@ Evaluate data frame analytics. The API packages together commonly used evaluatio client.ml.evaluateDataFrame({ evaluation, index }) ``` +### Arguments [_arguments_ml.evaluate_data_frame] -### Arguments [_arguments_265] - -* **Request (object):** +#### Request (object) [_request_ml.evaluate_data_frame] +- **`evaluation` ({ classification, outlier_detection, regression })**: Defines the type of evaluation you want to perform. +- **`index` (string)**: Defines the `index` in which the evaluation will be performed. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query clause that retrieves a subset of data from the source index. - * **`evaluation` ({ classification, outlier_detection, regression })**: Defines the type of evaluation you want to perform. - * **`index` (string)**: Defines the `index` in which the evaluation will be performed. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query clause that retrieves a subset of data from the source index. +## client.ml.explainDataFrameAnalytics [_ml.explain_data_frame_analytics] +Explain data frame analytics config. - - -### explain_data_frame_analytics [_explain_data_frame_analytics] - -Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. +This API provides explanations for a data frame analytics config that either +exists already or one that has not been created yet. The following +explanations are provided: +* which fields are included or not in the analysis and why, +* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. +If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics) @@ -8171,26 +8623,47 @@ Explain data frame analytics config. This API provides explanations for a data f client.ml.explainDataFrameAnalytics({ ... }) ``` - -### Arguments [_arguments_266] - -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`source` (Optional, { index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. - * **`dest` (Optional, { index, results_field })**: The destination configuration, consisting of index and optionally results_field (ml by default). - * **`analysis` (Optional, { classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. - * **`description` (Optional, string)**: A description of the job. - * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. - * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. - * **`analyzed_fields` (Optional, { includes, excludes })**: Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. - * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. - - - -### flush_job [_flush_job] - -Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. +### Arguments [_arguments_ml.explain_data_frame_analytics] + +#### Request (object) [_request_ml.explain_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`source` (Optional, { index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. It requires an +index. Optionally, query and _source may be specified. +- **`dest` (Optional, { index, results_field })**: The destination configuration, consisting of index and optionally +results_field (ml by default). +- **`analysis` (Optional, { classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specify includes and/or excludes patterns to select which fields will be +included in the analysis. The patterns specified in excludes are applied +last, therefore excludes takes precedence. In other words, if the same +field is specified in both includes and excludes, then the field will not +be included in the analysis. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +## client.ml.flushJob [_ml.flush_job] +Force buffered data to be processed. +The flush jobs API is only applicable when sending data for analysis using +the post data API. Depending on the content of the buffer, then it might +additionally calculate new results. Both flush and close operations are +similar, however the flush is more efficient if you are expecting to send +more data for analysis. When flushing, the job remains open and is available +to continue analyzing data. A close operation additionally prunes and +persists the model state to disk and the job must be opened again before +analyzing further data. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job) @@ -8198,25 +8671,23 @@ Force buffered data to be processed. The flush jobs API is only applicable when client.ml.flushJob({ job_id }) ``` +### Arguments [_arguments_ml.flush_job] -### Arguments [_arguments_267] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`advance_time` (Optional, string | Unit)**: Refer to the description for the `advance_time` query parameter. - * **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`skip_time` (Optional, string | Unit)**: Refer to the description for the `skip_time` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - - - -### forecast [_forecast] +#### Request (object) [_request_ml.flush_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`advance_time` (Optional, string | Unit)**: Refer to the description for the `advance_time` query parameter. +- **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`skip_time` (Optional, string | Unit)**: Refer to the description for the `skip_time` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +## client.ml.forecast [_ml.forecast] Predict future behavior of a time series. -Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. +Forecasts are not supported for jobs that perform population analysis; an +error occurs if you try to create a forecast for a job that has an +`over_field_name` in its configuration. Forcasts predict future behavior +based on historical data. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast) @@ -8224,21 +8695,18 @@ Forecasts are not supported for jobs that perform population analysis; an error client.ml.forecast({ job_id }) ``` +### Arguments [_arguments_ml.forecast] -### Arguments [_arguments_268] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. - * **`duration` (Optional, string | -1 | 0)**: Refer to the description for the `duration` query parameter. - * **`expires_in` (Optional, string | -1 | 0)**: Refer to the description for the `expires_in` query parameter. - * **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. - - - -### get_buckets [_get_buckets] +#### Request (object) [_request_ml.forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you +create a forecast; otherwise, an error occurs. +- **`duration` (Optional, string | -1 | 0)**: Refer to the description for the `duration` query parameter. +- **`expires_in` (Optional, string | -1 | 0)**: Refer to the description for the `expires_in` query parameter. +- **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. -Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. +## client.ml.getBuckets [_ml.get_buckets] +Get anomaly detection job results for buckets. +The API presents a chronological view of the records, grouped by bucket. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets) @@ -8246,28 +8714,24 @@ Get anomaly detection job results for buckets. The API presents a chronological client.ml.getBuckets({ job_id }) ``` - -### Arguments [_arguments_269] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`timestamp` (Optional, string | Unit)**: The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. - * **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. - * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. - * **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. - * **`page` (Optional, { from, size })** - * **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`from` (Optional, number)**: Skips the specified number of buckets. - * **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. - - - -### get_calendar_events [_get_calendar_events] - +### Arguments [_arguments_ml.get_buckets] + +#### Request (object) [_request_ml.get_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timestamp` (Optional, string | Unit)**: The timestamp of a single bucket result. If you do not specify this +parameter, the API returns information about all buckets. +- **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of buckets. +- **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. + +## client.ml.getCalendarEvents [_ml.get_calendar_events] Get info about events in calendars. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events) @@ -8276,22 +8740,17 @@ Get info about events in calendars. client.ml.getCalendarEvents({ calendar_id }) ``` +### Arguments [_arguments_ml.get_calendar_events] -### Arguments [_arguments_270] - -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. - * **`end` (Optional, string | Unit)**: Specifies to get events with timestamps earlier than this time. - * **`from` (Optional, number)**: Skips the specified number of events. - * **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. - * **`size` (Optional, number)**: Specifies the maximum number of events to obtain. - * **`start` (Optional, string | Unit)**: Specifies to get events with timestamps after this time. - - - -### get_calendars [_get_calendars] +#### Request (object) [_request_ml.get_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`end` (Optional, string | Unit)**: Specifies to get events with timestamps earlier than this time. +- **`from` (Optional, number)**: Skips the specified number of events. +- **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. +- **`size` (Optional, number)**: Specifies the maximum number of events to obtain. +- **`start` (Optional, string | Unit)**: Specifies to get events with timestamps after this time. +## client.ml.getCalendars [_ml.get_calendars] Get calendar configuration info. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars) @@ -8300,20 +8759,15 @@ Get calendar configuration info. client.ml.getCalendars({ ... }) ``` +### Arguments [_arguments_ml.get_calendars] -### Arguments [_arguments_271] - -* **Request (object):** - - * **`calendar_id` (Optional, string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. - * **`page` (Optional, { from, size })**: This object is supported only when you omit the calendar identifier. - * **`from` (Optional, number)**: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. - * **`size` (Optional, number)**: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. - - - -### get_categories [_get_categories] +#### Request (object) [_request_ml.get_calendars] +- **`calendar_id` (Optional, string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`page` (Optional, { from, size })**: This object is supported only when you omit the calendar identifier. +- **`from` (Optional, number)**: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. +- **`size` (Optional, number)**: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. +## client.ml.getCategories [_ml.get_categories] Get anomaly detection job results for categories. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories) @@ -8322,23 +8776,26 @@ Get anomaly detection job results for categories. client.ml.getCategories({ job_id }) ``` +### Arguments [_arguments_ml.get_categories] -### Arguments [_arguments_272] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`category_id` (Optional, string)**: Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. - * **`page` (Optional, { from, size })**: Configures pagination. This parameter has the `from` and `size` properties. - * **`from` (Optional, number)**: Skips the specified number of categories. - * **`partition_field_value` (Optional, string)**: Only return categories for the specified partition. - * **`size` (Optional, number)**: Specifies the maximum number of categories to obtain. - +#### Request (object) [_request_ml.get_categories] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`category_id` (Optional, string)**: Identifier for the category, which is unique in the job. If you specify +neither the category ID nor the partition_field_value, the API returns +information about all categories. If you specify only the +partition_field_value, it returns information about all categories for +the specified partition. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`from` (Optional, number)**: Skips the specified number of categories. +- **`partition_field_value` (Optional, string)**: Only return categories for the specified partition. +- **`size` (Optional, number)**: Specifies the maximum number of categories to obtain. - -### get_data_frame_analytics [_get_data_frame_analytics] - -Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a list of data frame analytics jobs or a wildcard expression. +## client.ml.getDataFrameAnalytics [_ml.get_data_frame_analytics] +Get data frame analytics job configuration info. +You can get information for multiple data frame analytics jobs in a single +API request by using a list of data frame analytics jobs or a +wildcard expression. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics) @@ -8346,24 +8803,30 @@ Get data frame analytics job configuration info. You can get information for mul client.ml.getDataFrameAnalytics({ ... }) ``` +### Arguments [_arguments_ml.get_data_frame_analytics] -### Arguments [_arguments_273] - -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_ml.get_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - 1. Contains wildcard expressions and there are no data frame analytics jobs that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. -The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. *** *`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. ** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. - - -### get_data_frame_analytics_stats [_get_data_frame_analytics_stats] - +## client.ml.getDataFrameAnalyticsStats [_ml.get_data_frame_analytics_stats] Get data frame analytics jobs usage info. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats) @@ -8372,25 +8835,35 @@ Get data frame analytics jobs usage info. client.ml.getDataFrameAnalyticsStats({ ... }) ``` - -### Arguments [_arguments_274] - -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no data frame analytics jobs that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. *** *`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. ** *`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. - - -### get_datafeed_stats [_get_datafeed_stats] - -Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. +### Arguments [_arguments_ml.get_data_frame_analytics_stats] + +#### Request (object) [_request_ml.get_data_frame_analytics_stats] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. + +## client.ml.getDatafeedStats [_ml.get_datafeed_stats] +Get datafeeds usage info. +You can get statistics for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get statistics for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. If the datafeed is stopped, the +only information you receive is the `datafeed_id` and the `state`. +This API returns a maximum of 10,000 datafeeds. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats) @@ -8398,25 +8871,30 @@ Get datafeeds usage info. You can get statistics for multiple datafeeds in a sin client.ml.getDatafeedStats({ ... }) ``` +### Arguments [_arguments_ml.get_datafeed_stats] -### Arguments [_arguments_275] - -* **Request (object):** - - * **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no datafeeds that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - +#### Request (object) [_request_ml.get_datafeed_stats] +- **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. -### get_datafeeds [_get_datafeeds] - -Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. +## client.ml.getDatafeeds [_ml.get_datafeeds] +Get datafeeds configuration info. +You can get information for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get information for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. +This API returns a maximum of 10,000 datafeeds. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds) @@ -8424,25 +8902,29 @@ Get datafeeds configuration info. You can get information for multiple datafeeds client.ml.getDatafeeds({ ... }) ``` +### Arguments [_arguments_ml.get_datafeeds] -### Arguments [_arguments_276] - -* **Request (object):** - - * **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no datafeeds that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +#### Request (object) [_request_ml.get_datafeeds] +- **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. -The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. - -### get_filters [_get_filters] - -Get filters. You can get a single filter or all filters. +## client.ml.getFilters [_ml.get_filters] +Get filters. +You can get a single filter or all filters. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters) @@ -8450,20 +8932,18 @@ Get filters. You can get a single filter or all filters. client.ml.getFilters({ ... }) ``` +### Arguments [_arguments_ml.get_filters] -### Arguments [_arguments_277] - -* **Request (object):** - - * **`filter_id` (Optional, string | string[])**: A string that uniquely identifies a filter. - * **`from` (Optional, number)**: Skips the specified number of filters. - * **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. +#### Request (object) [_request_ml.get_filters] +- **`filter_id` (Optional, string | string[])**: A string that uniquely identifies a filter. +- **`from` (Optional, number)**: Skips the specified number of filters. +- **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. - - -### get_influencers [_get_influencers] - -Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. +## client.ml.getInfluencers [_ml.get_influencers] +Get anomaly detection job results for influencers. +Influencers are the entities that have contributed to, or are to blame for, +the anomalies. Influencer results are available only if an +`influencer_field_name` is specified in the job configuration. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers) @@ -8471,26 +8951,28 @@ Get anomaly detection job results for influencers. Influencers are the entities client.ml.getInfluencers({ job_id }) ``` - -### Arguments [_arguments_278] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`page` (Optional, { from, size })**: Configures pagination. This parameter has the `from` and `size` properties. - * **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. - * **`end` (Optional, string | Unit)**: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. - * **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results are included. - * **`influencer_score` (Optional, number)**: Returns influencers with anomaly scores greater than or equal to this value. - * **`from` (Optional, number)**: Skips the specified number of influencers. - * **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. - * **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. - * **`start` (Optional, string | Unit)**: Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. - - - -### get_job_stats [_get_job_stats] - +### Arguments [_arguments_ml.get_influencers] + +#### Request (object) [_request_ml.get_influencers] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. +- **`end` (Optional, string | Unit)**: Returns influencers with timestamps earlier than this time. +The default value means it is unset and results are not limited to +specific timestamps. +- **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results +are included. +- **`influencer_score` (Optional, number)**: Returns influencers with anomaly scores greater than or equal to this +value. +- **`from` (Optional, number)**: Skips the specified number of influencers. +- **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. +- **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the +influencers are sorted by the `influencer_score` value. +- **`start` (Optional, string | Unit)**: Returns influencers with timestamps after this time. The default value +means it is unset and results are not limited to specific timestamps. + +## client.ml.getJobStats [_ml.get_job_stats] Get anomaly detection jobs usage info. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats) @@ -8499,25 +8981,30 @@ Get anomaly detection jobs usage info. client.ml.getJobStats({ ... }) ``` +### Arguments [_arguments_ml.get_job_stats] -### Arguments [_arguments_279] - -* **Request (object):** - - * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, a list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_ml.get_job_stats] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs, or a wildcard expression. If +you do not specify one of these options, the API returns information for +all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - 1. Contains wildcard expressions and there are no jobs that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If `false`, the API returns a `404` status +code when there are no matches or only partial matches. -If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. - - -### get_jobs [_get_jobs] - -Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. +## client.ml.getJobs [_ml.get_jobs] +Get anomaly detection jobs configuration info. +You can get information for multiple anomaly detection jobs in a single API +request by using a group name, a list of jobs, or a wildcard +expression. You can get information for all anomaly detection jobs by using +`_all`, by specifying `*` as the ``, or by omitting the ``. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs) @@ -8525,25 +9012,30 @@ Get anomaly detection jobs configuration info. You can get information for multi client.ml.getJobs({ ... }) ``` +### Arguments [_arguments_ml.get_jobs] -### Arguments [_arguments_280] - -* **Request (object):** +#### Request (object) [_request_ml.get_jobs] +- **`job_id` (Optional, string | string[])**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. If you do not specify one of these +options, the API returns information for all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - * **`job_id` (Optional, string | string[])**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. - 1. Contains wildcard expressions and there are no jobs that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +The default value is `true`, which returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status +code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. - -The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. - - -### get_memory_stats [_get_memory_stats] - -Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. +## client.ml.getMemoryStats [_ml.get_memory_stats] +Get machine learning memory usage info. +Get information about how machine learning jobs and trained models are using memory, +on each node, both within the JVM heap, and natively, outside of the JVM. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats) @@ -8551,19 +9043,17 @@ Get machine learning memory usage info. Get information about how machine learni client.ml.getMemoryStats({ ... }) ``` +### Arguments [_arguments_ml.get_memory_stats] -### Arguments [_arguments_281] - -* **Request (object):** - - * **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_model_snapshot_upgrade_stats [_get_model_snapshot_upgrade_stats] +#### Request (object) [_request_ml.get_memory_stats] +- **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or +`ml:true` +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout +expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request +fails and returns an error. +## client.ml.getModelSnapshotUpgradeStats [_ml.get_model_snapshot_upgrade_stats] Get anomaly detection job model snapshot upgrade usage info. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats) @@ -8572,25 +9062,24 @@ Get anomaly detection job model snapshot upgrade usage info. client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) ``` +### Arguments [_arguments_ml.get_model_snapshot_upgrade_stats] -### Arguments [_arguments_282] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - * Contains wildcard expressions and there are no jobs that match. - * Contains the _all string or no identifiers and there are no matches. - * Contains wildcard expressions and there are only partial matches. +#### Request (object) [_request_ml.get_model_snapshot_upgrade_stats] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + - Contains wildcard expressions and there are no jobs that match. + - Contains the _all string or no identifiers and there are no matches. + - Contains wildcard expressions and there are only partial matches. -The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. - - -### get_model_snapshots [_get_model_snapshots] +The default value is true, which returns an empty jobs array when there are no matches and the subset of results +when there are partial matches. If this parameter is false, the request returns a 404 status code when there are +no matches or only partial matches. +## client.ml.getModelSnapshots [_ml.get_model_snapshots] Get model snapshots info. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots) @@ -8599,30 +9088,41 @@ Get model snapshots info. client.ml.getModelSnapshots({ job_id }) ``` +### Arguments [_arguments_ml.get_model_snapshots] -### Arguments [_arguments_283] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (Optional, string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. - * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`page` (Optional, { from, size })** - * **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`from` (Optional, number)**: Skips the specified number of snapshots. - * **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. - - - -### get_overall_buckets [_get_overall_buckets] +#### Request (object) [_request_ml.get_model_snapshots] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (Optional, string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of snapshots. +- **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. +## client.ml.getOverallBuckets [_ml.get_overall_buckets] Get overall bucket results. -Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. - -The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. +Retrievs overall bucket results that summarize the bucket results of +multiple anomaly detection jobs. + +The `overall_score` is calculated by combining the scores of all the +buckets within the overall bucket span. First, the maximum +`anomaly_score` per anomaly detection job in the overall bucket is +calculated. Then the `top_n` of those scores are averaged to result in +the `overall_score`. This means that you can fine-tune the +`overall_score` so that it is more or less sensitive to the number of +jobs that detect an anomaly at the same time. For example, if you set +`top_n` to `1`, the `overall_score` is the maximum bucket score in the +overall bucket. Alternatively, if you set `top_n` to the number of jobs, +the `overall_score` is high only when all jobs detect anomalies in that +overall bucket. If you set the `bucket_span` parameter (to a value +greater than its default), the `overall_score` is the maximum +`overall_score` of the overall buckets that have a span equal to the +jobs' largest bucket span. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets) @@ -8630,20 +9130,35 @@ The `overall_score` is calculated by combining the scores of all the buckets wit client.ml.getOverallBuckets({ job_id }) ``` - -### Arguments [_arguments_284] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, a list of jobs or groups, or a wildcard expression. - - -You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. ** *`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. *** *`bucket_span` (Optional, string | -1 | 0)**: Refer to the description for the `bucket_span` query parameter. *** *`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. *** *`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. *** *`overall_score` (Optional, number | string)**: Refer to the description for the `overall_score` query parameter. *** *`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. ** *`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. - - -### get_records [_get_records] - -Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. +### Arguments [_arguments_ml.get_overall_buckets] + +#### Request (object) [_request_ml.get_overall_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs or groups, or a wildcard +expression. + +You can summarize the bucket results for all anomaly detection jobs by +using `_all` or by specifying `*` as the ``. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`bucket_span` (Optional, string | -1 | 0)**: Refer to the description for the `bucket_span` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`overall_score` (Optional, number | string)**: Refer to the description for the `overall_score` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. + +## client.ml.getRecords [_ml.get_records] +Get anomaly records for an anomaly detection job. +Records contain the detailed analytical results. They describe the anomalous +activity that has been identified in the input data based on the detector +configuration. +There can be many anomaly records depending on the characteristics and size +of the input data. In practice, there are often too many to be able to +manually process them. The machine learning features therefore perform a +sophisticated aggregation of the anomaly records into buckets. +The number of record results depends on the number of anomalies found in each +bucket, which relates to the number of time series being modeled and the +number of detectors. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records) @@ -8651,26 +9166,21 @@ Get anomaly records for an anomaly detection job. Records contain the detailed a client.ml.getRecords({ job_id }) ``` +### Arguments [_arguments_ml.get_records] -### Arguments [_arguments_285] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. - * **`page` (Optional, { from, size })** - * **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. - * **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`from` (Optional, number)**: Skips the specified number of records. - * **`size` (Optional, number)**: Specifies the maximum number of records to obtain. - - - -### get_trained_models [_get_trained_models] +#### Request (object) [_request_ml.get_records] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`page` (Optional, { from, size })** +- **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of records. +- **`size` (Optional, number)**: Specifies the maximum number of records to obtain. +## client.ml.getTrainedModels [_ml.get_trained_models] Get trained model configuration info. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models) @@ -8679,26 +9189,39 @@ Get trained model configuration info. client.ml.getTrainedModels({ ... }) ``` - -### Arguments [_arguments_286] - -* **Request (object):** - - * **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. - - -You can get information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. *** *`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - -* Contains wildcard expressions and there are no models that match. -* Contains the _all string or no identifiers and there are no matches. -* Contains wildcard expressions and there are only partial matches. - -If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. ** *`decompress_definition` (Optional, boolean)**: Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. *** *`from` (Optional, number)**: Skips the specified number of models. *** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))**: A comma delimited string of optional fields to include in the response body. *** *`include_model_definition` (Optional, boolean)**: parameter is deprecated! Use [include=definition] instead *** *`size` (Optional, number)**: Specifies the maximum number of models to obtain. ** *`tags` (Optional, string | string[])**: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. - - -### get_trained_models_stats [_get_trained_models_stats] - -Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. +### Arguments [_arguments_ml.get_trained_models] + +#### Request (object) [_request_ml.get_trained_models] +- **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. + +You can get information for multiple trained models in a single API +request by using a list of model IDs or a wildcard +expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`decompress_definition` (Optional, boolean)**: Specifies whether the included model definition should be returned as a +JSON map (true) or in a custom compressed format (false). +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))**: A comma delimited string of optional fields to include in the response +body. +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. +- **`tags` (Optional, string | string[])**: A comma delimited string of tags. A trained model can have many tags, or +none. When supplied, only trained models that contain all the supplied +tags are returned. + +## client.ml.getTrainedModelsStats [_ml.get_trained_models_stats] +Get trained models usage info. +You can get usage information for multiple trained +models in a single API request by using a list of model IDs or a wildcard expression. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats) @@ -8706,24 +9229,23 @@ Get trained models usage info. You can get usage information for multiple traine client.ml.getTrainedModelsStats({ ... }) ``` +### Arguments [_arguments_ml.get_trained_models_stats] -### Arguments [_arguments_287] - -* **Request (object):** - - * **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. It can be a list or a wildcard expression. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - * Contains wildcard expressions and there are no models that match. - * Contains the _all string or no identifiers and there are no matches. - * Contains wildcard expressions and there are only partial matches. - +#### Request (object) [_request_ml.get_trained_models_stats] +- **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. It can be a +list or a wildcard expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. ** *`from` (Optional, number)**: Skips the specified number of models. ** *`size` (Optional, number)**: Specifies the maximum number of models to obtain. +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. -### infer_trained_model [_infer_trained_model] - +## client.ml.inferTrainedModel [_ml.infer_trained_model] Evaluate a trained model. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model) @@ -8732,21 +9254,25 @@ Evaluate a trained model. client.ml.inferTrainedModel({ model_id, docs }) ``` +### Arguments [_arguments_ml.infer_trained_model] -### Arguments [_arguments_288] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`docs` (Record[])**: An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. - * **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call - * **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait for inference results. - +#### Request (object) [_request_ml.infer_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`docs` (Record[])**: An array of objects to pass to the model for inference. The objects should contain a fields matching your +configured trained model input. Typically, for NLP models, the field name is `text_field`. +Currently, for NLP models, only a single value is allowed. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call +- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait for inference results. - -### info [_info_3] - -Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. +## client.ml.info [_ml.info] +Get machine learning information. +Get defaults and limits used by machine learning. +This endpoint is designed to be used by a user interface that needs to fully +understand machine learning configurations where some options are not +specified, meaning that the defaults should be used. This endpoint may be +used to find out what those defaults are. It also provides information about +the maximum size of machine learning jobs that could run in the current +cluster configuration. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info) @@ -8755,9 +9281,15 @@ client.ml.info() ``` -### open_job [_open_job] +## client.ml.openJob [_ml.open_job] +Open anomaly detection jobs. -Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. +An anomaly detection job must be opened to be ready to receive and analyze +data. It can be opened and closed multiple times throughout its lifecycle. +When you open a new job, it starts with an empty model. +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once +new data is received. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job) @@ -8765,18 +9297,13 @@ Open anomaly detection jobs. An anomaly detection job must be opened to be ready client.ml.openJob({ job_id }) ``` +### Arguments [_arguments_ml.open_job] -### Arguments [_arguments_289] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. - - - -### post_calendar_events [_post_calendar_events] +#### Request (object) [_request_ml.open_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +## client.ml.postCalendarEvents [_ml.post_calendar_events] Add scheduled events to the calendar. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events) @@ -8785,46 +9312,35 @@ Add scheduled events to the calendar. client.ml.postCalendarEvents({ calendar_id, events }) ``` +### Arguments [_arguments_ml.post_calendar_events] -### Arguments [_arguments_290] +#### Request (object) [_request_ml.post_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])**: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. -* **Request (object):** +## client.ml.postData [_ml.post_data] +Send data to an anomaly detection job for analysis. - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])**: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. +IMPORTANT: For each job, data can be accepted from only a single connection at a time. +It is not currently possible to post data to multiple jobs using wildcards or a list. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data) +```ts +client.ml.postData({ job_id }) +``` -### post_data [_post_data] +### Arguments [_arguments_ml.post_data] -Send data to an anomaly detection job for analysis. - -::::{important} -For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a list. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data) - -```ts -client.ml.postData({ job_id }) -``` - - -### Arguments [_arguments_291] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. - * **`data` (Optional, TData[])** - * **`reset_end` (Optional, string | Unit)**: Specifies the end of the bucket resetting range. - * **`reset_start` (Optional, string | Unit)**: Specifies the start of the bucket resetting range. - - - -### preview_data_frame_analytics [_preview_data_frame_analytics] +#### Request (object) [_request_ml.post_data] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. +- **`data` (Optional, TData[])** +- **`reset_end` (Optional, string | Unit)**: Specifies the end of the bucket resetting range. +- **`reset_start` (Optional, string | Unit)**: Specifies the start of the bucket resetting range. -Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. +## client.ml.previewDataFrameAnalytics [_ml.preview_data_frame_analytics] +Preview features used by data frame analytics. +Preview the extracted features used by a data frame analytics config. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics) @@ -8832,19 +9348,24 @@ Preview features used by data frame analytics. Previews the extracted features u client.ml.previewDataFrameAnalytics({ ... }) ``` +### Arguments [_arguments_ml.preview_data_frame_analytics] -### Arguments [_arguments_292] +#### Request (object) [_request_ml.preview_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. +- **`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })**: A data frame analytics config as described in create data frame analytics +jobs. Note that `id` and `dest` don’t need to be provided in the context of +this API. -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the data frame analytics job. - * **`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })**: A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. - - - -### preview_datafeed [_preview_datafeed] - -Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. +## client.ml.previewDatafeed [_ml.preview_datafeed] +Preview a datafeed. +This API returns the first "page" of search results from a datafeed. +You can preview an existing datafeed or provide configuration details for a datafeed +and anomaly detection job in the API. The preview shows the structure of the data +that will be passed to the anomaly detection engine. +IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that +called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the +datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. +You can also use secondary authorization headers to supply the credentials. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed) @@ -8852,21 +9373,22 @@ Preview a datafeed. This API returns the first "page" of search results from a d client.ml.previewDatafeed({ ... }) ``` +### Arguments [_arguments_ml.preview_datafeed] -### Arguments [_arguments_293] - -* **Request (object):** - - * **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. - * **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: The datafeed definition to preview. - * **`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })**: The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. - * **`start` (Optional, string | Unit)**: The start time from where the datafeed preview should begin - * **`end` (Optional, string | Unit)**: The end time when the datafeed preview should stop - - - -### put_calendar [_put_calendar] +#### Request (object) [_request_ml.preview_datafeed] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job +configuration details in the request body. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: The datafeed definition to preview. +- **`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })**: The configuration details for the anomaly detection job that is associated with the datafeed. If the +`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must +supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is +used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. +- **`start` (Optional, string | Unit)**: The start time from where the datafeed preview should begin +- **`end` (Optional, string | Unit)**: The end time when the datafeed preview should stop +## client.ml.putCalendar [_ml.put_calendar] Create a calendar. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar) @@ -8875,19 +9397,14 @@ Create a calendar. client.ml.putCalendar({ calendar_id }) ``` +### Arguments [_arguments_ml.put_calendar] -### Arguments [_arguments_294] - -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`job_ids` (Optional, string[])**: An array of anomaly detection job identifiers. - * **`description` (Optional, string)**: A description of the calendar. - - - -### put_calendar_job [_put_calendar_job] +#### Request (object) [_request_ml.put_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_ids` (Optional, string[])**: An array of anomaly detection job identifiers. +- **`description` (Optional, string)**: A description of the calendar. +## client.ml.putCalendarJob [_ml.put_calendar_job] Add anomaly detection job to calendar. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) @@ -8896,19 +9413,17 @@ Add anomaly detection job to calendar. client.ml.putCalendarJob({ calendar_id, job_id }) ``` +### Arguments [_arguments_ml.put_calendar_job] -### Arguments [_arguments_295] - -* **Request (object):** +#### Request (object) [_request_ml.put_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. - - - -### put_data_frame_analytics [_put_data_frame_analytics] - -Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. +## client.ml.putDataFrameAnalytics [_ml.put_data_frame_analytics] +Create a data frame analytics job. +This API creates a data frame analytics job that performs an analysis on the +source indices and stores the outcome in a destination index. +By default, the query used in the source configuration is `{"match_all": {}}`. If the destination index does not exist, it is created automatically when you start the job. @@ -8920,31 +9435,80 @@ If you supply only a subset of the regression or classification parameters, hype client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) ``` - -### Arguments [_arguments_296] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`analysis` ({ classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. - * **`dest` ({ index, results_field })**: The destination configuration. - * **`source` ({ index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. - * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. - * **`analyzed_fields` (Optional, { includes, excludes })**: Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. - * **`description` (Optional, string)**: A description of the job. - * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. - * **`_meta` (Optional, Record)** - * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. - * **`headers` (Optional, Record)** - * **`version` (Optional, string)** - - - -### put_datafeed [_put_datafeed] - -Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. - -When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. +### Arguments [_arguments_ml.put_data_frame_analytics] + +#### Request (object) [_request_ml.put_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`analysis` ({ classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`dest` ({ index, results_field })**: The destination configuration. +- **`source` ({ index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. If +set to `false` and a machine learning node with capacity to run the job +cannot be immediately found, the API returns an error. If set to `true`, +the API does not return an error; the job waits in the `starting` state +until sufficient machine learning node capacity is available. This +behavior is also affected by the cluster-wide +`xpack.ml.max_lazy_ml_nodes` setting. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specifies `includes` and/or `excludes` patterns to select which fields +will be included in the analysis. The patterns specified in `excludes` +are applied last, therefore `excludes` takes precedence. In other words, +if the same field is specified in both `includes` and `excludes`, then +the field will not be included in the analysis. If `analyzed_fields` is +not set, only the relevant fields will be included. For example, all the +numeric fields for outlier detection. +The supported fields vary for each type of analysis. Outlier detection +requires numeric or `boolean` data to analyze. The algorithms don’t +support missing values therefore fields that have data types other than +numeric or boolean are ignored. Documents where included fields contain +missing values, null values, or an array are also ignored. Therefore the +`dest` index may contain documents that don’t have an outlier score. +Regression supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the regression analysis. +Classification supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the classification analysis. +Classification analysis can be improved by mapping ordinal variable +values to a single number. For example, in case of age ranges, you can +model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +- **`description` (Optional, string)**: A description of the job. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`_meta` (Optional, Record)** +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`headers` (Optional, Record)** +- **`version` (Optional, string)** + +## client.ml.putDatafeed [_ml.put_datafeed] +Create a datafeed. +Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +You can associate only one datafeed with each anomaly detection job. +The datafeed contains a query that runs at a defined interval (`frequency`). +If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had +at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. +You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed +directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed) @@ -8952,36 +9516,60 @@ When Elasticsearch security features are enabled, your datafeed remembers which client.ml.putDatafeed({ datafeed_id }) ``` - -### Arguments [_arguments_297] - -* **Request (object):** - - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. - * **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. - * **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. - * **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. - * **`indices` (Optional, string | string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. - * **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search - * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. - * **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. - * **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. - * **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. - * **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. - * **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. - * **`headers` (Optional, Record)** - * **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. - - - -### put_filter [_put_filter] - -Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. +### Arguments [_arguments_ml.put_datafeed] + +#### Request (object) [_request_ml.put_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. +Support for aggregations is limited and should be used only with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. +This search is split into time chunks in order to ensure the load on Elasticsearch is managed. +Chunking configuration controls how the size of these time chunks are calculated; +it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. +The datafeed can optionally search over indices that have already been read in an effort to determine whether +any data has subsequently been added to the index. If missing data is found, it is a good indication that the +`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. +This check runs only on real-time datafeeds. +- **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. +The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible +fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last +(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses +aggregations, this value must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string | string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master +nodes and the machine learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. +- **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`, which is 10,000 by default. +- **`headers` (Optional, Record)** +- **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` +string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. + +## client.ml.putFilter [_ml.put_filter] +Create a filter. +A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter) @@ -8989,20 +9577,19 @@ Create a filter. A filter contains a list of strings. It can be used by one or m client.ml.putFilter({ filter_id }) ``` +### Arguments [_arguments_ml.put_filter] -### Arguments [_arguments_298] - -* **Request (object):** +#### Request (object) [_request_ml.put_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`description` (Optional, string)**: A description of the filter. +- **`items` (Optional, string[])**: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. - * **`filter_id` (string)**: A string that uniquely identifies a filter. - * **`description` (Optional, string)**: A description of the filter. - * **`items` (Optional, string[])**: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. +## client.ml.putJob [_ml.put_job] +Create an anomaly detection job. - - -### put_job [_put_job] - -Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. +If you include a `datafeed_config`, you must have read index privileges on the source index. +If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job) @@ -9010,44 +9597,41 @@ Create an anomaly detection job. If you include a `datafeed_config`, you must ha client.ml.putJob({ job_id, analysis_config, data_description }) ``` - -### Arguments [_arguments_299] - -* **Request (object):** - - * **`job_id` (string)**: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. - * **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. - * **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. - * **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. - * **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. - * **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. - * **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. - * **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - * **`description` (Optional, string)**: A description of the job. - * **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. - * **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })**: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. - * **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. - * **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. - * **`results_index_name` (Optional, string)**: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. - * **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. - * **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: +### Arguments [_arguments_ml.put_job] + +#### Request (object) [_request_ml.put_job] +- **`job_id` (string)**: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +- **`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. +- **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. +- **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. +- **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. +- **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. +- **`description` (Optional, string)**: A description of the job. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })**: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. +- **`results_index_name` (Optional, string)**: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. - - - -### put_trained_model [_put_trained_model] - -Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. +## client.ml.putTrainedModel [_ml.put_trained_model] +Create a trained model. +Enable you to supply a trained model that is not created by data frame analytics. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model) @@ -9055,31 +9639,59 @@ Create a trained model. Enable you to supply a trained model that is not created client.ml.putTrainedModel({ model_id }) ``` - -### Arguments [_arguments_300] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`compressed_definition` (Optional, string)**: The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. - * **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. - * **`description` (Optional, string)**: A human-readable description of the inference trained model. - * **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model’s target_type. For pre-packaged models such as ELSER the config is not required. - * **`input` (Optional, { field_names })**: The input field names for the model definition. - * **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. - * **`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))**: The model type. - * **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. - * **`platform_architecture` (Optional, string)**: The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. - * **`tags` (Optional, string[])**: An array of tags to organize the model. - * **`prefix_strings` (Optional, { ingest, search })**: Optional prefix strings applied at inference - * **`defer_definition_decompression` (Optional, boolean)**: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. - * **`wait_for_completion` (Optional, boolean)**: Whether to wait for all child operations (e.g. model download) to complete. - - - -### put_trained_model_alias [_put_trained_model_alias] - -Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. +### Arguments [_arguments_ml.put_trained_model] + +#### Request (object) [_request_ml.put_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`compressed_definition` (Optional, string)**: The compressed (GZipped and Base64 encoded) inference definition of the +model. If compressed_definition is specified, then definition cannot be +specified. +- **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then +compressed_definition cannot be specified. +- **`description` (Optional, string)**: A human-readable description of the inference trained model. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression +or classification configuration. It must match the underlying +definition.trained_model's target_type. For pre-packaged models such as +ELSER the config is not required. +- **`input` (Optional, { field_names })**: The input field names for the model definition. +- **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. +- **`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))**: The model type. +- **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. +This property is supported only if defer_definition_decompression is true +or the model definition is not supplied. +- **`platform_architecture` (Optional, string)**: The platform architecture (if applicable) of the trained mode. If the model +only works on one platform, because it is heavily optimized for a particular +processor architecture and OS combination, then this field specifies which. +The format of the string must match the platform identifiers used by Elasticsearch, +so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, +or `windows-x86_64`. For portable models (those that work independent of processor +architecture or OS features), leave this field unset. +- **`tags` (Optional, string[])**: An array of tags to organize the model. +- **`prefix_strings` (Optional, { ingest, search })**: Optional prefix strings applied at inference +- **`defer_definition_decompression` (Optional, boolean)**: If set to `true` and a `compressed_definition` is provided, +the request defers definition decompression and skips relevant +validations. +- **`wait_for_completion` (Optional, boolean)**: Whether to wait for all child operations (e.g. model download) +to complete. + +## client.ml.putTrainedModelAlias [_ml.put_trained_model_alias] +Create or update a trained model alias. +A trained model alias is a logical name used to reference a single trained +model. +You can use aliases instead of trained model identifiers to make it easier to +reference your models. For example, you can use aliases in inference +aggregations and processors. +An alias must be unique and refer to only a single trained model. However, +you can have multiple aliases for each trained model. +If you use this API to update an alias such that it references a different +trained model ID and the model uses a different type of data frame analytics, +an error occurs. For example, this situation occurs if you have a trained +model for regression analysis and a trained model for classification +analysis; you cannot reassign an alias from one type of trained model to +another. +If you use this API to update an alias and there are very few input fields in +common between the old and new trained models for the model alias, the API +returns a warning. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias) @@ -9087,19 +9699,16 @@ Create or update a trained model alias. A trained model alias is a logical name client.ml.putTrainedModelAlias({ model_alias, model_id }) ``` +### Arguments [_arguments_ml.put_trained_model_alias] -### Arguments [_arguments_301] - -* **Request (object):** - - * **`model_alias` (string)**: The alias to create or update. This value cannot end in numbers. - * **`model_id` (string)**: The identifier for the trained model that the alias refers to. - * **`reassign` (Optional, boolean)**: Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. - - - -### put_trained_model_definition_part [_put_trained_model_definition_part] +#### Request (object) [_request_ml.put_trained_model_alias] +- **`model_alias` (string)**: The alias to create or update. This value cannot end in numbers. +- **`model_id` (string)**: The identifier for the trained model that the alias refers to. +- **`reassign` (Optional, boolean)**: Specifies whether the alias gets reassigned to the specified trained +model if it is already assigned to a different model. If the alias is +already assigned and this parameter is false, the API returns an error. +## client.ml.putTrainedModelDefinitionPart [_ml.put_trained_model_definition_part] Create part of a trained model definition. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part) @@ -9108,22 +9717,20 @@ Create part of a trained model definition. client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) ``` +### Arguments [_arguments_ml.put_trained_model_definition_part] -### Arguments [_arguments_302] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`part` (number)**: The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. - * **`definition` (string)**: The definition part for the model. Must be a base64 encoded string. - * **`total_definition_length` (number)**: The total uncompressed definition length in bytes. Not base64 encoded. - * **`total_parts` (number)**: The total number of parts that will be uploaded. Must be greater than 0. +#### Request (object) [_request_ml.put_trained_model_definition_part] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`part` (number)**: The definition part number. When the definition is loaded for inference the definition parts are streamed in the +order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. +- **`definition` (string)**: The definition part for the model. Must be a base64 encoded string. +- **`total_definition_length` (number)**: The total uncompressed definition length in bytes. Not base64 encoded. +- **`total_parts` (number)**: The total number of parts that will be uploaded. Must be greater than 0. - - -### put_trained_model_vocabulary [_put_trained_model_vocabulary] - -Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. +## client.ml.putTrainedModelVocabulary [_ml.put_trained_model_vocabulary] +Create a trained model vocabulary. +This API is supported only for natural language processing (NLP) models. +The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary) @@ -9131,21 +9738,20 @@ Create a trained model vocabulary. This API is supported only for natural langua client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) ``` +### Arguments [_arguments_ml.put_trained_model_vocabulary] -### Arguments [_arguments_303] - -* **Request (object):** +#### Request (object) [_request_ml.put_trained_model_vocabulary] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`vocabulary` (string[])**: The model vocabulary, which must not be empty. +- **`merges` (Optional, string[])**: The optional model merges if required by the tokenizer. +- **`scores` (Optional, number[])**: The optional vocabulary value scores if required by the tokenizer. - * **`model_id` (string)**: The unique identifier of the trained model. - * **`vocabulary` (string[])**: The model vocabulary, which must not be empty. - * **`merges` (Optional, string[])**: The optional model merges if required by the tokenizer. - * **`scores` (Optional, number[])**: The optional vocabulary value scores if required by the tokenizer. - - - -### reset_job [_reset_job] - -Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. +## client.ml.resetJob [_ml.reset_job] +Reset an anomaly detection job. +All model state and results are deleted. The job is ready to start over as if +it had just been created. +It is not currently possible to reset multiple jobs using wildcards or a +comma separated list. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job) @@ -9153,20 +9759,25 @@ Reset an anomaly detection job. All model state and results are deleted. The job client.ml.resetJob({ job_id }) ``` +### Arguments [_arguments_ml.reset_job] -### Arguments [_arguments_304] - -* **Request (object):** - - * **`job_id` (string)**: The ID of the job to reset. - * **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before returning. - * **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. - - +#### Request (object) [_request_ml.reset_job] +- **`job_id` (string)**: The ID of the job to reset. +- **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before +returning. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. -### revert_model_snapshot [_revert_model_snapshot] - -Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. +## client.ml.revertModelSnapshot [_ml.revert_model_snapshot] +Revert to a snapshot. +The machine learning features react quickly to anomalous input, learning new +behaviors in data. Highly anomalous input increases the variance in the +models whilst the system learns whether this is a new step-change in behavior +or a one-off event. In the case where this anomalous input is known to be a +one-off, then it might be appropriate to reset the model state to a time +before this event. For example, you might consider reverting to a saved +snapshot after Black Friday or a critical system failure. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot) @@ -9174,20 +9785,29 @@ Revert to a snapshot. The machine learning features react quickly to anomalous i client.ml.revertModelSnapshot({ job_id, snapshot_id }) ``` - -### Arguments [_arguments_305] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. - * **`delete_intervening_results` (Optional, boolean)**: Refer to the description for the `delete_intervening_results` query parameter. - - - -### set_upgrade_mode [_set_upgrade_mode] - -Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. +### Arguments [_arguments_ml.revert_model_snapshot] + +#### Request (object) [_request_ml.revert_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: You can specify `empty` as the . Reverting to the empty +snapshot means the anomaly detection job starts learning a new model from +scratch when it is started. +- **`delete_intervening_results` (Optional, boolean)**: Refer to the description for the `delete_intervening_results` query parameter. + +## client.ml.setUpgradeMode [_ml.set_upgrade_mode] +Set upgrade_mode for ML indices. +Sets a cluster wide upgrade_mode setting that prepares machine learning +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your machine learning indices. In those circumstances, +there must be no machine learning jobs running. You can close the machine +learning jobs, do the upgrade, then open all the jobs again. Alternatively, +you can use this API to temporarily halt tasks associated with the jobs and +datafeeds and prevent new jobs from opening. You can also use this API +during upgrades that do not require you to reindex your machine learning +indices, though stopping jobs is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +machine learning info API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode) @@ -9195,19 +9815,27 @@ Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that p client.ml.setUpgradeMode({ ... }) ``` - -### Arguments [_arguments_306] - -* **Request (object):** - - * **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. - * **`timeout` (Optional, string | -1 | 0)**: The time to wait for the request to be completed. - - - -### start_data_frame_analytics [_start_data_frame_analytics] - -Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. +### Arguments [_arguments_ml.set_upgrade_mode] + +#### Request (object) [_request_ml.set_upgrade_mode] +- **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job +and datafeed tasks and prohibits new job and datafeed tasks from +starting. +- **`timeout` (Optional, string | -1 | 0)**: The time to wait for the request to be completed. + +## client.ml.startDataFrameAnalytics [_ml.start_data_frame_analytics] +Start a data frame analytics job. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. +If the destination index does not exist, it is created automatically the +first time you start the data frame analytics job. The +`index.number_of_shards` and `index.number_of_replicas` settings for the +destination index are copied from the source index. If there are multiple +source indices, the destination index copies the highest setting values. The +mappings for the destination index are also copied from the source indices. +If there are any mapping conflicts, the job fails to start. +If the destination index exists, it is used as is. You can therefore set up +the destination index in advance with custom settings and mappings. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics) @@ -9215,27 +9843,29 @@ Start a data frame analytics job. A data frame analytics job can be started and client.ml.startDataFrameAnalytics({ id }) ``` +### Arguments [_arguments_ml.start_data_frame_analytics] -### Arguments [_arguments_307] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job starts. - - - -### start_datafeed [_start_datafeed] +#### Request (object) [_request_ml.start_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job +starts. +## client.ml.startDatafeed [_ml.start_datafeed] Start datafeeds. -A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. +A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. -If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. +If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. +If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. -When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. +When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or +update it had at the time of creation or update and runs the query using those same roles. If you provided secondary +authorization headers when you created or updated the datafeed, those credentials are used instead. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed) @@ -9243,21 +9873,19 @@ When Elasticsearch security features are enabled, your datafeed remembers which client.ml.startDatafeed({ datafeed_id }) ``` +### Arguments [_arguments_ml.start_datafeed] -### Arguments [_arguments_308] - -* **Request (object):** - - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. - - +#### Request (object) [_request_ml.start_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. -### start_trained_model_deployment [_start_trained_model_deployment] - -Start a trained model deployment. It allocates the model to every machine learning node. +## client.ml.startTrainedModelDeployment [_ml.start_trained_model_deployment] +Start a trained model deployment. +It allocates the model to every machine learning node. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment) @@ -9265,26 +9893,39 @@ Start a trained model deployment. It allocates the model to every machine learni client.ml.startTrainedModelDeployment({ model_id }) ``` - -### Arguments [_arguments_309] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. - * **`cache_size` (Optional, number | string)**: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. - * **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. - * **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. - * **`priority` (Optional, Enum("normal" | "low"))**: The deployment priority. - * **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. - * **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the model to deploy. - * **`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))**: Specifies the allocation status to wait for before returning. - - - -### stop_data_frame_analytics [_stop_data_frame_analytics] - -Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. +### Arguments [_arguments_ml.start_trained_model_deployment] + +#### Request (object) [_request_ml.start_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. +- **`cache_size` (Optional, number | string)**: The inference cache size (in memory outside the JVM heap) per node for the model. +The default value is the same size as the `model_size_bytes`. To disable the cache, +`0b` can be provided. +- **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`priority` (Optional, Enum("normal" | "low"))**: The deployment priority. +- **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds +this value, new requests are rejected with a 429 error. +- **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases +the inference speed. The inference process is a compute-bound process; any number +greater than the number of available hardware threads on the machine does not increase the +inference speed. If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the model to deploy. +- **`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))**: Specifies the allocation status to wait for before returning. + +## client.ml.stopDataFrameAnalytics [_ml.stop_data_frame_analytics] +Stop data frame analytics jobs. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics) @@ -9292,25 +9933,31 @@ Stop data frame analytics jobs. A data frame analytics job can be started and st client.ml.stopDataFrameAnalytics({ id }) ``` +### Arguments [_arguments_ml.stop_data_frame_analytics] -### Arguments [_arguments_310] +#### Request (object) [_request_ml.stop_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -* **Request (object):** +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +The default value is true, which returns an empty data_frame_analytics +array when there are no matches and the subset of results when there are +partial matches. If this parameter is false, the request returns a 404 +status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. +- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job +stops. Defaults to 20 seconds. - 1. Contains wildcard expressions and there are no data frame analytics jobs that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. ** *`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. - - -### stop_datafeed [_stop_datafeed] - -Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. +## client.ml.stopDatafeed [_ml.stop_datafeed] +Stop datafeeds. +A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed) @@ -9318,20 +9965,17 @@ Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elastics client.ml.stopDatafeed({ datafeed_id }) ``` +### Arguments [_arguments_ml.stop_datafeed] -### Arguments [_arguments_311] - -* **Request (object):** - - * **`datafeed_id` (string)**: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. - * **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. - * **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. - - - -### stop_trained_model_deployment [_stop_trained_model_deployment] +#### Request (object) [_request_ml.stop_datafeed] +- **`datafeed_id` (string)**: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated +list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as +the identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +## client.ml.stopTrainedModelDeployment [_ml.stop_trained_model_deployment] Stop a trained model deployment. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment) @@ -9340,19 +9984,18 @@ Stop a trained model deployment. client.ml.stopTrainedModelDeployment({ model_id }) ``` +### Arguments [_arguments_ml.stop_trained_model_deployment] -### Arguments [_arguments_312] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. - * **`force` (Optional, boolean)**: Forcefully stops the deployment, even if it is used by ingest pipelines. You can’t use these pipelines until you restart the model deployment. - - - -### update_data_frame_analytics [_update_data_frame_analytics] +#### Request (object) [_request_ml.stop_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; +contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and +there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you +restart the model deployment. +## client.ml.updateDataFrameAnalytics [_ml.update_data_frame_analytics] Update a data frame analytics job. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics) @@ -9361,22 +10004,31 @@ Update a data frame analytics job. client.ml.updateDataFrameAnalytics({ id }) ``` - -### Arguments [_arguments_313] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`description` (Optional, string)**: A description of the job. - * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. - * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. - * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. - - - -### update_datafeed [_update_datafeed] - -Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. +### Arguments [_arguments_ml.update_data_frame_analytics] + +#### Request (object) [_request_ml.update_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +## client.ml.updateDatafeed [_ml.update_datafeed] +Update a datafeed. +You must stop and start the datafeed for the changes to be applied. +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at +the time of the update and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed) @@ -9384,42 +10036,67 @@ Update a datafeed. You must stop and start the datafeed for the changes to be ap client.ml.updateDatafeed({ datafeed_id }) ``` - -### Arguments [_arguments_314] - -* **Request (object):** - - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. - * **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. - * **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. - * **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. - * **`indices` (Optional, string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. - * **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search. - * **`job_id` (Optional, string)** - * **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. - * **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. - * **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. - * **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. - * **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. - * **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: +### Arguments [_arguments_ml.update_datafeed] + +#### Request (object) [_request_ml.update_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time +chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of +these time chunks are calculated; it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally +search over indices that have already been read in an effort to determine whether any data has subsequently been +added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and +the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time +datafeeds. +- **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is +either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket +span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are +written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value +must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine +learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search. +- **`job_id` (Optional, string)** +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also +changed. Therefore, the time required to learn might be long and the understandability of the results is +unpredictable. If you want to make significant changes to the source data, it is recommended that you +clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one +when you are satisfied with the results of the job. +- **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. - - - -### update_filter [_update_filter] - -Update a filter. Updates the description of a filter, adds items, or removes items from the list. +## client.ml.updateFilter [_ml.update_filter] +Update a filter. +Updates the description of a filter, adds items, or removes items from the list. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter) @@ -9427,21 +10104,17 @@ Update a filter. Updates the description of a filter, adds items, or removes ite client.ml.updateFilter({ filter_id }) ``` +### Arguments [_arguments_ml.update_filter] -### Arguments [_arguments_315] - -* **Request (object):** - - * **`filter_id` (string)**: A string that uniquely identifies a filter. - * **`add_items` (Optional, string[])**: The items to add to the filter. - * **`description` (Optional, string)**: A description for the filter. - * **`remove_items` (Optional, string[])**: The items to remove from the filter. - +#### Request (object) [_request_ml.update_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`add_items` (Optional, string[])**: The items to add to the filter. +- **`description` (Optional, string)**: A description for the filter. +- **`remove_items` (Optional, string[])**: The items to remove from the filter. - -### update_job [_update_job] - -Update an anomaly detection job. Updates certain properties of an anomaly detection job. +## client.ml.updateJob [_ml.update_job] +Update an anomaly detection job. +Updates certain properties of an anomaly detection job. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job) @@ -9449,33 +10122,63 @@ Update an anomaly detection job. Updates certain properties of an anomaly detect client.ml.updateJob({ job_id }) ``` - -### Arguments [_arguments_316] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the job. - * **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. - * **`analysis_limits` (Optional, { model_memory_limit })** - * **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. - * **`custom_settings` (Optional, Record)**: Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. - * **`categorization_filters` (Optional, string[])** - * **`description` (Optional, string)**: A description of the job. - * **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** - * **`model_prune_window` (Optional, string | -1 | 0)** - * **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. - * **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. - * **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. - * **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. - * **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. - * **`detectors` (Optional, { detector_index, description, custom_rules }[])**: An array of detector update objects. - * **`per_partition_categorization` (Optional, { enabled, stop_on_warn })**: Settings related to how categorization interacts with partition fields. - - - -### update_model_snapshot [_update_model_snapshot] - -Update a snapshot. Updates certain properties of a snapshot. +### Arguments [_arguments_ml.update_job] + +#### Request (object) [_request_ml.update_job] +- **`job_id` (string)**: Identifier for the job. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when +there is insufficient machine learning node capacity for it to be +immediately assigned to a node. If `false` and a machine learning node +with capacity to run the job cannot immediately be found, the open +anomaly detection jobs API returns an error. However, this is also +subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +option is set to `true`, the open anomaly detection jobs API does not +return an error and the job waits in the opening state until sufficient +machine learning node capacity is available. +- **`analysis_limits` (Optional, { model_memory_limit })** +- **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence +of the model. +The default value is a randomized value between 3 to 4 hours, which +avoids all jobs persisting at exactly the same time. The smallest allowed +value is 1 hour. +For very large models (several GB), persistence could take 10-20 minutes, +so do not set the value too low. +If the job is open when you make the update, you must stop the datafeed, +close the job, then reopen the job and restart the datafeed for the +changes to take effect. +- **`custom_settings` (Optional, Record)**: Advanced configuration option. Contains custom meta data about the job. +For example, it can contain custom URL information as shown in Adding +custom URLs to machine learning results. +- **`categorization_filters` (Optional, string[])** +- **`description` (Optional, string)**: A description of the job. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** +- **`model_prune_window` (Optional, string | -1 | 0)** +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies a period of time (in days) +after which only the first snapshot per day is retained. This period is +relative to the timestamp of the most recent snapshot for this job. Valid +values range from 0 to `model_snapshot_retention_days`. For jobs created +before version 7.8.0, the default value matches +`model_snapshot_retention_days`. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies the maximum period of time (in +days) that snapshots are retained. This period is relative to the +timestamp of the most recent snapshot for this job. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the +score are applied, as new data is seen. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results +are retained. Age is calculated relative to the timestamp of the latest +bucket result. If this property has a non-null value, once per day at +00:30 (server time), results that are the specified number of days older +than the latest bucket result are deleted from Elasticsearch. The default +value is null, which means all results are retained. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`detectors` (Optional, { detector_index, description, custom_rules }[])**: An array of detector update objects. +- **`per_partition_categorization` (Optional, { enabled, stop_on_warn })**: Settings related to how categorization interacts with partition fields. + +## client.ml.updateModelSnapshot [_ml.update_model_snapshot] +Update a snapshot. +Updates certain properties of a snapshot. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot) @@ -9483,20 +10186,17 @@ Update a snapshot. Updates certain properties of a snapshot. client.ml.updateModelSnapshot({ job_id, snapshot_id }) ``` +### Arguments [_arguments_ml.update_model_snapshot] -### Arguments [_arguments_317] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: Identifier for the model snapshot. - * **`description` (Optional, string)**: A description of the model snapshot. - * **`retain` (Optional, boolean)**: If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. - - - -### update_trained_model_deployment [_update_trained_model_deployment] +#### Request (object) [_request_ml.update_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. +- **`description` (Optional, string)**: A description of the model snapshot. +- **`retain` (Optional, boolean)**: If `true`, this snapshot will not be deleted during automatic cleanup of +snapshots older than `model_snapshot_retention_days`. However, this +snapshot will be deleted when the job is deleted. +## client.ml.updateTrainedModelDeployment [_ml.update_trained_model_deployment] Update a trained model deployment. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment) @@ -9505,19 +10205,32 @@ Update a trained model deployment. client.ml.updateTrainedModelDeployment({ model_id }) ``` - -### Arguments [_arguments_318] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. - * **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. - - - -### upgrade_job_snapshot [_upgrade_job_snapshot] - -Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. +### Arguments [_arguments_ml.update_trained_model_deployment] + +#### Request (object) [_request_ml.update_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. + +## client.ml.upgradeJobSnapshot [_ml.upgrade_job_snapshot] +Upgrade a snapshot. +Upgrade an anomaly detection model snapshot to the latest major version. +Over time, older snapshot formats are deprecated and removed. Anomaly +detection jobs support only snapshots that are from the current or previous +major version. +This API provides a means to upgrade a snapshot to the current major version. +This aids in preparing the cluster for an upgrade to the next major version. +Only one snapshot per anomaly detection job can be upgraded at a time and the +upgraded snapshot cannot be the current snapshot of the anomaly detection +job. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot) @@ -9525,24 +10238,18 @@ Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest m client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) ``` +### Arguments [_arguments_ml.upgrade_job_snapshot] -### Arguments [_arguments_319] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. - * **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. - * **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the request to complete. - +#### Request (object) [_request_ml.upgrade_job_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. +- **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. +Otherwise, it responds as soon as the upgrade task is assigned to a node. +- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the request to complete. - -## nodes [_nodes_2] - - -### clear_repositories_metering_archive [_clear_repositories_metering_archive] - -Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. +## client.nodes.clearRepositoriesMeteringArchive [_nodes.clear_repositories_metering_archive] +Clear the archived repositories metering. +Clear the archived repositories metering information in the cluster. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive) @@ -9550,19 +10257,17 @@ Clear the archived repositories metering. Clear the archived repositories meteri client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) ``` +### Arguments [_arguments_nodes.clear_repositories_metering_archive] -### Arguments [_arguments_320] - -* **Request (object):** - - * **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. - * **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. - - +#### Request (object) [_request_nodes.clear_repositories_metering_archive] +- **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. +- **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. -### get_repositories_metering_info [_get_repositories_metering_info] - -Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. +## client.nodes.getRepositoriesMeteringInfo [_nodes.get_repositories_metering_info] +Get cluster repositories metering. +Get repositories metering information for a cluster. +This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. +Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info) @@ -9570,18 +10275,16 @@ Get cluster repositories metering. Get repositories metering information for a c client.nodes.getRepositoriesMeteringInfo({ node_id }) ``` +### Arguments [_arguments_nodes.get_repositories_metering_info] -### Arguments [_arguments_321] - -* **Request (object):** - - * **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster). - +#### Request (object) [_request_nodes.get_repositories_metering_info] +- **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. +All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). - -### hot_threads [_hot_threads] - -Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. +## client.nodes.hotThreads [_nodes.hot_threads] +Get the hot threads for nodes. +Get a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of the top hot threads for each node. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads) @@ -9589,25 +10292,24 @@ Get the hot threads for nodes. Get a breakdown of the hot threads on each select client.nodes.hotThreads({ ... }) ``` +### Arguments [_arguments_nodes.hot_threads] -### Arguments [_arguments_322] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. - * **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. - * **`interval` (Optional, string | -1 | 0)**: The interval to do the second sampling of threads. - * **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. - * **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The type to sample. - * **`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The sort order for *cpu* type (default: total) +#### Request (object) [_request_nodes.hot_threads] +- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. +- **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get +a task from an empty queue) are filtered out. +- **`interval` (Optional, string | -1 | 0)**: The interval to do the second sampling of threads. +- **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. +- **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received +before the timeout expires, the request fails and returns an error. +- **`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The type to sample. +- **`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The sort order for 'cpu' type (default: total) +## client.nodes.info [_nodes.info] +Get node information. - -### info [_info_4] - -Get node information. By default, the API returns all attributes and core settings for cluster nodes. +By default, the API returns all attributes and core settings for cluster nodes. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info) @@ -9615,46 +10317,43 @@ Get node information. By default, the API returns all attributes and core settin client.nodes.info({ ... }) ``` +### Arguments [_arguments_nodes.info] -### Arguments [_arguments_323] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. - * **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. - * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### reload_secure_settings [_reload_secure_settings] +#### Request (object) [_request_nodes.info] +- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.nodes.reloadSecureSettings [_nodes.reload_secure_settings] Reload the keystore on nodes in the cluster. -Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. +That is, you can change them on disk and reload them without restarting any nodes in the cluster. +When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. -When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. +Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. +Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. -[Endpoint documentation](docs-content://deploy-manage/security/secure-settings.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings) ```ts client.nodes.reloadSecureSettings({ ... }) ``` +### Arguments [_arguments_nodes.reload_secure_settings] -### Arguments [_arguments_324] +#### Request (object) [_request_nodes.reload_secure_settings] +- **`node_id` (Optional, string | string[])**: The names of particular nodes in the cluster to target. +- **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: The names of particular nodes in the cluster to target. - * **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### stats [_stats_5] - -Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. +## client.nodes.stats [_nodes.stats] +Get node statistics. +Get statistics for nodes in a cluster. +By default, all stats are returned. You can limit the returned information by using metrics. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats) @@ -9662,28 +10361,23 @@ Get node statistics. Get statistics for nodes in a cluster. By default, all stat client.nodes.stats({ ... }) ``` +### Arguments [_arguments_nodes.stats] -### Arguments [_arguments_325] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. - * **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics - * **`index_metric` (Optional, string | string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. - * **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - * **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. - * **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. - * **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`types` (Optional, string[])**: A list of document types for the indexing index metric. - * **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. - - - -### usage [_usage] +#### Request (object) [_request_nodes.stats] +- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics +- **`index_metric` (Optional, string | string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +- **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +- **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`types` (Optional, string[])**: A list of document types for the indexing index metric. +- **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. +## client.nodes.usage [_nodes.usage] Get feature usage information. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage) @@ -9692,23 +10386,19 @@ Get feature usage information. client.nodes.usage({ ... }) ``` +### Arguments [_arguments_nodes.usage] -### Arguments [_arguments_326] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you’re connecting to, leave empty to get information from all nodes - * **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. A list of the following options: `_all`, `rest_actions`. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## query_rules [_query_rules] - +#### Request (object) [_request_nodes.usage] +- **`node_id` (Optional, string | string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +- **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. +A list of the following options: `_all`, `rest_actions`. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -### delete_rule [_delete_rule] - -Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. +## client.queryRules.deleteRule [_query_rules.delete_rule] +Delete a query rule. +Delete a query rule within a query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule) @@ -9716,19 +10406,16 @@ Delete a query rule. Delete a query rule within a query ruleset. This is a destr client.queryRules.deleteRule({ ruleset_id, rule_id }) ``` +### Arguments [_arguments_query_rules.delete_rule] -### Arguments [_arguments_327] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to delete - * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to delete - +#### Request (object) [_request_query_rules.delete_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to delete +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to delete - -### delete_ruleset [_delete_ruleset] - -Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. +## client.queryRules.deleteRuleset [_query_rules.delete_ruleset] +Delete a query ruleset. +Remove a query ruleset and its associated data. +This is a destructive action that is not recoverable. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset) @@ -9736,18 +10423,14 @@ Delete a query ruleset. Remove a query ruleset and its associated data. This is client.queryRules.deleteRuleset({ ruleset_id }) ``` +### Arguments [_arguments_query_rules.delete_ruleset] -### Arguments [_arguments_328] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset to delete +#### Request (object) [_request_query_rules.delete_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to delete - - -### get_rule [_get_rule] - -Get a query rule. Get details about a query rule within a query ruleset. +## client.queryRules.getRule [_query_rules.get_rule] +Get a query rule. +Get details about a query rule within a query ruleset. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule) @@ -9755,19 +10438,15 @@ Get a query rule. Get details about a query rule within a query ruleset. client.queryRules.getRule({ ruleset_id, rule_id }) ``` +### Arguments [_arguments_query_rules.get_rule] -### Arguments [_arguments_329] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to retrieve - * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to retrieve - - - -### get_ruleset [_get_ruleset] +#### Request (object) [_request_query_rules.get_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to retrieve +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to retrieve -Get a query ruleset. Get details about a query ruleset. +## client.queryRules.getRuleset [_query_rules.get_ruleset] +Get a query ruleset. +Get details about a query ruleset. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset) @@ -9775,18 +10454,14 @@ Get a query ruleset. Get details about a query ruleset. client.queryRules.getRuleset({ ruleset_id }) ``` +### Arguments [_arguments_query_rules.get_ruleset] -### Arguments [_arguments_330] +#### Request (object) [_request_query_rules.get_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset - - - -### list_rulesets [_list_rulesets] - -Get all query rulesets. Get summarized information about the query rulesets. +## client.queryRules.listRulesets [_query_rules.list_rulesets] +Get all query rulesets. +Get summarized information about the query rulesets. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets) @@ -9794,24 +10469,20 @@ Get all query rulesets. Get summarized information about the query rulesets. client.queryRules.listRulesets({ ... }) ``` +### Arguments [_arguments_query_rules.list_rulesets] -### Arguments [_arguments_331] - -* **Request (object):** - - * **`from` (Optional, number)**: The offset from the first result to fetch. - * **`size` (Optional, number)**: The maximum number of results to retrieve. - +#### Request (object) [_request_query_rules.list_rulesets] +- **`from` (Optional, number)**: The offset from the first result to fetch. +- **`size` (Optional, number)**: The maximum number of results to retrieve. +## client.queryRules.putRule [_query_rules.put_rule] +Create or update a query rule. +Create or update a query rule within a query ruleset. -### put_rule [_put_rule] - -Create or update a query rule. Create or update a query rule within a query ruleset. - -::::{important} -Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. -:::: - +IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule) @@ -9819,28 +10490,27 @@ Due to limitations within pinned queries, you can only pin documents using ids o client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) ``` +### Arguments [_arguments_query_rules.put_rule] -### Arguments [_arguments_332] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. - * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. - * **`type` (Enum("pinned" | "exclude"))**: The type of rule. - * **`criteria` ({ type, metadata, values } | { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. - * **`actions` ({ ids, docs })**: The actions to take when the rule is matched. The format of this action depends on the rule type. - * **`priority` (Optional, number)** - - - -### put_ruleset [_put_ruleset] +#### Request (object) [_request_query_rules.put_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. +- **`type` (Enum("pinned" | "exclude"))**: The type of rule. +- **`criteria` ({ type, metadata, values } | { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. +- **`actions` ({ ids, docs })**: The actions to take when the rule is matched. +The format of this action depends on the rule type. +- **`priority` (Optional, number)** -Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. - -::::{important} -Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. -:::: +## client.queryRules.putRuleset [_query_rules.put_ruleset] +Create or update a query ruleset. +There is a limit of 100 rules per ruleset. +This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset) @@ -9848,19 +10518,15 @@ Due to limitations within pinned queries, you can only select documents using `i client.queryRules.putRuleset({ ruleset_id, rules }) ``` +### Arguments [_arguments_query_rules.put_ruleset] -### Arguments [_arguments_333] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. - * **`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])** - +#### Request (object) [_request_query_rules.put_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. +- **`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])** - -### test [_test] - -Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. +## client.queryRules.test [_query_rules.test] +Test a query ruleset. +Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test) @@ -9868,29 +10534,27 @@ Test a query ruleset. Evaluate match criteria against a query ruleset to identif client.queryRules.test({ ruleset_id, match_criteria }) ``` +### Arguments [_arguments_query_rules.test] -### Arguments [_arguments_334] +#### Request (object) [_request_query_rules.test] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated +- **`match_criteria` (Record)**: The match criteria to apply to rules in the given query ruleset. +Match criteria should match the keys defined in the `criteria.metadata` field of the rule. -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated - * **`match_criteria` (Record)**: The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. +## client.rollup.deleteJob [_rollup.delete_job] +Delete a rollup job. +A job must be stopped before it can be deleted. +If you attempt to delete a started job, an error occurs. +Similarly, if you attempt to delete a nonexistent job, an exception occurs. - -## rollup [_rollup] - - -### delete_job [_delete_job_2] - -Delete a rollup job. - -A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. - -::::{important} -When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job’s identifier in the rollup index. For example: -:::: - +IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. +The API does not delete any previously rolled up data. +This is by design; a user may wish to roll up a static data set. +Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). +Thus the job can be deleted, leaving behind the rolled up data for analysis. +If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. +If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: ``` POST my_rollup_index/_delete_by_query @@ -9909,23 +10573,18 @@ POST my_rollup_index/_delete_by_query client.rollup.deleteJob({ id }) ``` +### Arguments [_arguments_rollup.delete_job] -### Arguments [_arguments_335] +#### Request (object) [_request_rollup.delete_job] +- **`id` (string)**: Identifier for the job. -* **Request (object):** - - * **`id` (string)**: Identifier for the job. - - - -### get_jobs [_get_jobs_2] - -Get rollup job information. Get the configuration, stats, and status of rollup jobs. - -::::{note} -This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. -:::: +## client.rollup.getJobs [_rollup.get_jobs] +Get rollup job information. +Get the configuration, stats, and status of rollup jobs. +NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +If a job was created, ran for a while, then was deleted, the API does not return any details about it. +For details about a historical rollup job, the rollup capabilities API may be more useful. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs) @@ -9933,20 +10592,19 @@ This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was c client.rollup.getJobs({ ... }) ``` +### Arguments [_arguments_rollup.get_jobs] -### Arguments [_arguments_336] - -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. +#### Request (object) [_request_rollup.get_jobs] +- **`id` (Optional, string)**: Identifier for the rollup job. +If it is `_all` or omitted, the API returns all rollup jobs. +## client.rollup.getRollupCaps [_rollup.get_rollup_caps] +Get the rollup job capabilities. +Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - -### get_rollup_caps [_get_rollup_caps] - -Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - -This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. +Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. +This API enables you to inspect an index and determine: 1. Does this index have associated rollup data somewhere in the cluster? 2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? @@ -9957,18 +10615,16 @@ This API is useful because a rollup job is often configured to rollup only a sub client.rollup.getRollupCaps({ ... }) ``` +### Arguments [_arguments_rollup.get_rollup_caps] -### Arguments [_arguments_337] - -* **Request (object):** - - * **`id` (Optional, string)**: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. - - - -### get_rollup_index_caps [_get_rollup_index_caps] +#### Request (object) [_request_rollup.get_rollup_caps] +- **`id` (Optional, string)**: Index, indices or index-pattern to return rollup capabilities for. +`_all` may be used to fetch rollup capabilities from all jobs. -Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: +## client.rollup.getRollupIndexCaps [_rollup.get_rollup_index_caps] +Get the rollup index capabilities. +Get the rollup capabilities of all jobs inside of a rollup index. +A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? @@ -9979,23 +10635,16 @@ Get the rollup index capabilities. Get the rollup capabilities of all jobs insid client.rollup.getRollupIndexCaps({ index }) ``` +### Arguments [_arguments_rollup.get_rollup_index_caps] -### Arguments [_arguments_338] - -* **Request (object):** - - * **`index` (string | string[])**: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. - - - -### put_job [_put_job_2] +#### Request (object) [_request_rollup.get_rollup_index_caps] +- **`index` (string | string[])**: Data stream or index to check for rollup capabilities. +Wildcard (`*`) expressions are supported. +## client.rollup.putJob [_rollup.put_job] Create a rollup job. -::::{warning} -From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. -:::: - +WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. @@ -10009,34 +10658,51 @@ Jobs are created in a `STOPPED` state. You can start them with the start rollup client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) ``` - -### Arguments [_arguments_339] - -* **Request (object):** - - * **`id` (string)**: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. - * **`cron` (string)**: A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. - * **`groups` ({ date_histogram, histogram, terms })**: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. - * **`index_pattern` (string)**: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. - * **`page_size` (number)**: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. - * **`rollup_index` (string)**: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. - * **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. - * **`timeout` (Optional, string | -1 | 0)**: Time to wait for the request to complete. - * **`headers` (Optional, Record)** - - - -### rollup_search [_rollup_search] - -Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. - -The request body supports a subset of features from the regular search API. The following functionality is not available: - -`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. +### Arguments [_arguments_rollup.put_job] + +#### Request (object) [_request_rollup.put_job] +- **`id` (string)**: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the +data that is associated with the rollup job. The ID is persistent; it is stored with the rolled +up data. If you create a job, let it run for a while, then delete the job, the data that the job +rolled up is still be associated with this job ID. You cannot create a new job with the same ID +since that could lead to problems with mismatched job configurations. +- **`cron` (string)**: A cron string which defines the intervals when the rollup job should be executed. When the interval +triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated +to the time interval of the data being rolled up. For example, you may wish to create hourly rollups +of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The +cron pattern is defined just like a Watcher cron schedule. +- **`groups` ({ date_histogram, histogram, terms })**: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be +available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of +the groups configuration as defining a set of tools that can later be used in aggregations to partition the +data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide +enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. +- **`index_pattern` (string)**: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to +rollup the entire index or index-pattern. +- **`page_size` (number)**: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends +to execute faster, but requires more memory during processing. This value has no effect on how the data is +rolled up; it is merely used for tweaking the speed or memory cost of the indexer. +- **`rollup_index` (string)**: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. +- **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each +group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined +on a per-field basis and for each field you configure which metric should be collected. +- **`timeout` (Optional, string | -1 | 0)**: Time to wait for the request to complete. +- **`headers` (Optional, Record)** + +## client.rollup.rollupSearch [_rollup.rollup_search] +Search rolled-up data. +The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + +The request body supports a subset of features from the regular search API. +The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. +`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** -The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: +The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. +This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search @@ -10057,7 +10723,8 @@ The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. -When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. +When the two responses are received, the endpoint rewrites the rollup response and merges the two together. +During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search) @@ -10065,29 +10732,26 @@ When the two responses are received, the endpoint rewrites the rollup response a client.rollup.rollupSearch({ index }) ``` +### Arguments [_arguments_rollup.rollup_search] -### Arguments [_arguments_340] - -* **Request (object):** +#### Request (object) [_request_rollup.rollup_search] +- **`index` (string | string[])**: A list of data streams and indices used to limit the request. +This parameter has the following rules: - * **`index` (string | string[])**: A list of data streams and indices used to limit the request. This parameter has the following rules: - -* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream’s backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. +- **`aggregations` (Optional, Record)**: Specifies aggregations. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. +- **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - * **`aggregations` (Optional, Record)**: Specifies aggregations. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. - * **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. - * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response - * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - - - -### start_job [_start_job] - -Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. +## client.rollup.startJob [_rollup.start_job] +Start rollup jobs. +If you try to start a job that does not exist, an exception occurs. +If you try to start a job that is already started, nothing happens. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job) @@ -10095,26 +10759,24 @@ Start rollup jobs. If you try to start a job that does not exist, an exception o client.rollup.startJob({ id }) ``` +### Arguments [_arguments_rollup.start_job] -### Arguments [_arguments_341] - -* **Request (object):** - - * **`id` (string)**: Identifier for the rollup job. - +#### Request (object) [_request_rollup.start_job] +- **`id` (string)**: Identifier for the rollup job. +## client.rollup.stopJob [_rollup.stop_job] +Stop rollup jobs. +If you try to stop a job that does not exist, an exception occurs. +If you try to stop a job that is already stopped, nothing happens. -### stop_job [_stop_job] - -Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. - -Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. +This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ``` - -The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. +If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job) @@ -10122,23 +10784,21 @@ The parameter blocks the API call from returning until either the job has moved client.rollup.stopJob({ id }) ``` +### Arguments [_arguments_rollup.stop_job] -### Arguments [_arguments_342] - -* **Request (object):** - - * **`id` (string)**: Identifier for the rollup job. - * **`timeout` (Optional, string | -1 | 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. The timeout simply means the API call itself timed out while waiting for the status change. - * **`wait_for_completion` (Optional, boolean)**: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. - - - -## search_application [_search_application] +#### Request (object) [_request_rollup.stop_job] +- **`id` (string)**: Identifier for the rollup job. +- **`timeout` (Optional, string | -1 | 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. +If more than `timeout` time has passed, the API throws a timeout exception. +NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. +The timeout simply means the API call itself timed out while waiting for the status change. +- **`wait_for_completion` (Optional, boolean)**: If set to `true`, causes the API to block until the indexer state completely stops. +If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. +## client.searchApplication.delete [_search_application.delete] +Delete a search application. -### delete [_delete_8] - -Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. +Remove a search application and its associated alias. Indices attached to the search application are not removed. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete) @@ -10146,18 +10806,14 @@ Delete a search application. Remove a search application and its associated alia client.searchApplication.delete({ name }) ``` +### Arguments [_arguments_search_application.delete] -### Arguments [_arguments_343] - -* **Request (object):** - - * **`name` (string)**: The name of the search application to delete - +#### Request (object) [_request_search_application.delete] +- **`name` (string)**: The name of the search application to delete. - -### delete_behavioral_analytics [_delete_behavioral_analytics] - -Delete a behavioral analytics collection. The associated data stream is also deleted. +## client.searchApplication.deleteBehavioralAnalytics [_search_application.delete_behavioral_analytics] +Delete a behavioral analytics collection. +The associated data stream is also deleted. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics) @@ -10165,17 +10821,12 @@ Delete a behavioral analytics collection. The associated data stream is also del client.searchApplication.deleteBehavioralAnalytics({ name }) ``` +### Arguments [_arguments_search_application.delete_behavioral_analytics] -### Arguments [_arguments_344] - -* **Request (object):** - - * **`name` (string)**: The name of the analytics collection to be deleted - - - -### get [_get_8] +#### Request (object) [_request_search_application.delete_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be deleted +## client.searchApplication.get [_search_application.get] Get search application details. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get) @@ -10184,17 +10835,12 @@ Get search application details. client.searchApplication.get({ name }) ``` +### Arguments [_arguments_search_application.get] -### Arguments [_arguments_345] - -* **Request (object):** - - * **`name` (string)**: The name of the search application - - - -### get_behavioral_analytics [_get_behavioral_analytics] +#### Request (object) [_request_search_application.get] +- **`name` (string)**: The name of the search application +## client.searchApplication.getBehavioralAnalytics [_search_application.get_behavioral_analytics] Get behavioral analytics collections. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics) @@ -10203,38 +10849,29 @@ Get behavioral analytics collections. client.searchApplication.getBehavioralAnalytics({ ... }) ``` +### Arguments [_arguments_search_application.get_behavioral_analytics] -### Arguments [_arguments_346] +#### Request (object) [_request_search_application.get_behavioral_analytics] +- **`name` (Optional, string[])**: A list of analytics collections to limit the returned information -* **Request (object):** +## client.searchApplication.list [_search_application.list] +Get search applications. +Get information about search applications. - * **`name` (Optional, string[])**: A list of analytics collections to limit the returned information - - - -### list [_list_2] - -Get search applications. Get information about search applications. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-list) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics) ```ts client.searchApplication.list({ ... }) ``` +### Arguments [_arguments_search_application.list] -### Arguments [_arguments_347] - -* **Request (object):** - - * **`q` (Optional, string)**: Query in the Lucene query string syntax. - * **`from` (Optional, number)**: Starting offset. - * **`size` (Optional, number)**: Specifies a max number of results to get. - - - -### post_behavioral_analytics_event [_post_behavioral_analytics_event] +#### Request (object) [_request_search_application.list] +- **`q` (Optional, string)**: Query in the Lucene query string syntax. +- **`from` (Optional, number)**: Starting offset. +- **`size` (Optional, number)**: Specifies a max number of results to get. +## client.searchApplication.postBehavioralAnalyticsEvent [_search_application.post_behavioral_analytics_event] Create a behavioral analytics collection event. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event) @@ -10243,20 +10880,15 @@ Create a behavioral analytics collection event. client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) ``` +### Arguments [_arguments_search_application.post_behavioral_analytics_event] -### Arguments [_arguments_348] - -* **Request (object):** - - * **`collection_name` (string)**: The name of the behavioral analytics collection. - * **`event_type` (Enum("page_view" | "search" | "search_click"))**: The analytics event type. - * **`payload` (Optional, User-defined value)** - * **`debug` (Optional, boolean)**: Whether the response type has to include more details - - - -### put [_put_3] +#### Request (object) [_request_search_application.post_behavioral_analytics_event] +- **`collection_name` (string)**: The name of the behavioral analytics collection. +- **`event_type` (Enum("page_view" | "search" | "search_click"))**: The analytics event type. +- **`payload` (Optional, User-defined value)** +- **`debug` (Optional, boolean)**: Whether the response type has to include more details +## client.searchApplication.put [_search_application.put] Create or update a search application. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put) @@ -10265,19 +10897,14 @@ Create or update a search application. client.searchApplication.put({ name }) ``` +### Arguments [_arguments_search_application.put] -### Arguments [_arguments_349] - -* **Request (object):** - - * **`name` (string)**: The name of the search application to be created or updated. - * **`search_application` (Optional, { indices, analytics_collection_name, template })** - * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing Search Applications. - - - -### put_behavioral_analytics [_put_behavioral_analytics] +#### Request (object) [_request_search_application.put] +- **`name` (string)**: The name of the search application to be created or updated. +- **`search_application` (Optional, { indices, analytics_collection_name, template })** +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing Search Applications. +## client.searchApplication.putBehavioralAnalytics [_search_application.put_behavioral_analytics] Create a behavioral analytics collection. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics) @@ -10286,18 +10913,16 @@ Create a behavioral analytics collection. client.searchApplication.putBehavioralAnalytics({ name }) ``` +### Arguments [_arguments_search_application.put_behavioral_analytics] -### Arguments [_arguments_350] - -* **Request (object):** - - * **`name` (string)**: The name of the analytics collection to be created or updated. - +#### Request (object) [_request_search_application.put_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be created or updated. - -### render_query [_render_query] - -Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter’s default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. +## client.searchApplication.renderQuery [_search_application.render_query] +Render a search application query. +Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. +The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. @@ -10307,19 +10932,16 @@ You must have `read` privileges on the backing alias of the search application. client.searchApplication.renderQuery({ name }) ``` +### Arguments [_arguments_search_application.render_query] -### Arguments [_arguments_351] - -* **Request (object):** - - * **`name` (string)**: The name of the search application to render teh query for. - * **`params` (Optional, Record)** +#### Request (object) [_request_search_application.render_query] +- **`name` (string)**: The name of the search application to render teh query for. +- **`params` (Optional, Record)** - - -### search [_search_4] - -Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. +## client.searchApplication.search [_search_application.search] +Run a search application search. +Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +Unspecified template parameters are assigned their default values if applicable. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search) @@ -10327,23 +10949,16 @@ Run a search application search. Generate and run an Elasticsearch query that us client.searchApplication.search({ name }) ``` +### Arguments [_arguments_search_application.search] -### Arguments [_arguments_352] - -* **Request (object):** +#### Request (object) [_request_search_application.search] +- **`name` (string)**: The name of the search application to be searched. +- **`params` (Optional, Record)**: Query parameters specific to this request, which will override any defaults specified in the template. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. - * **`name` (string)**: The name of the search application to be searched. - * **`params` (Optional, Record)**: Query parameters specific to this request, which will override any defaults specified in the template. - * **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. - - - -## searchable_snapshots [_searchable_snapshots] - - -### cache_stats [_cache_stats] - -Get cache statistics. Get statistics about the shared cache for partially mounted indices. +## client.searchableSnapshots.cacheStats [_searchable_snapshots.cache_stats] +Get cache statistics. +Get statistics about the shared cache for partially mounted indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats) @@ -10351,19 +10966,15 @@ Get cache statistics. Get statistics about the shared cache for partially mounte client.searchableSnapshots.cacheStats({ ... }) ``` +### Arguments [_arguments_searchable_snapshots.cache_stats] -### Arguments [_arguments_353] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: The names of the nodes in the cluster to target. - * **`master_timeout` (Optional, string | -1 | 0)** +#### Request (object) [_request_searchable_snapshots.cache_stats] +- **`node_id` (Optional, string | string[])**: The names of the nodes in the cluster to target. +- **`master_timeout` (Optional, string | -1 | 0)** - - -### clear_cache [_clear_cache_2] - -Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. +## client.searchableSnapshots.clearCache [_searchable_snapshots.clear_cache] +Clear the cache. +Clear indices and data streams from the shared cache for partially mounted indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache) @@ -10371,21 +10982,20 @@ Clear the cache. Clear indices and data streams from the shared cache for partia client.searchableSnapshots.clearCache({ ... }) ``` +### Arguments [_arguments_searchable_snapshots.clear_cache] -### Arguments [_arguments_354] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - - - -### mount [_mount] +#### Request (object) [_request_searchable_snapshots.clear_cache] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to clear from the cache. +It supports wildcards (`*`). +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) -Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. +## client.searchableSnapshots.mount [_searchable_snapshots.mount] +Mount a snapshot. +Mount a snapshot as a searchable snapshot index. +Do not use this API for snapshots managed by index lifecycle management (ILM). +Manually mounting ILM-managed snapshots can interfere with ILM processes. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount) @@ -10393,25 +11003,23 @@ Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use th client.searchableSnapshots.mount({ repository, snapshot, index }) ``` +### Arguments [_arguments_searchable_snapshots.mount] -### Arguments [_arguments_355] - -* **Request (object):** - - * **`repository` (string)**: The name of the repository containing the snapshot of the index to mount. - * **`snapshot` (string)**: The name of the snapshot of the index to mount. - * **`index` (string)**: The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. - * **`renamed_index` (Optional, string)**: The name of the index that will be created. - * **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. - * **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. - * **`storage` (Optional, string)**: The mount option for the searchable snapshot index. - - - -### stats [_stats_6] +#### Request (object) [_request_searchable_snapshots.mount] +- **`repository` (string)**: The name of the repository containing the snapshot of the index to mount. +- **`snapshot` (string)**: The name of the snapshot of the index to mount. +- **`index` (string)**: The name of the index contained in the snapshot whose data is to be mounted. +If no `renamed_index` is specified, this name will also be used to create the new index. +- **`renamed_index` (Optional, string)**: The name of the index that will be created. +- **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. +- **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. +- **`storage` (Optional, string)**: The mount option for the searchable snapshot index. +## client.searchableSnapshots.stats [_searchable_snapshots.stats] Get searchable snapshot statistics. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats) @@ -10420,33 +11028,27 @@ Get searchable snapshot statistics. client.searchableSnapshots.stats({ ... }) ``` +### Arguments [_arguments_searchable_snapshots.stats] -### Arguments [_arguments_356] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams and indices to retrieve statistics for. - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Return stats aggregated at cluster, index or shard level - - - -## security [_security] - - -### activate_user_profile [_activate_user_profile] +#### Request (object) [_request_searchable_snapshots.stats] +- **`index` (Optional, string | string[])**: A list of data streams and indices to retrieve statistics for. +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Return stats aggregated at cluster, index or shard level +## client.security.activateUserProfile [_security.activate_user_profile] Activate a user profile. Create or update a user profile on behalf of another user. -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. +Elastic reserves the right to change or remove this feature in future releases without prior notice. +This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. +For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. -This API creates or updates a profile document for end users with information that is extracted from the user’s authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user’s `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. - -When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. +When updating a profile document, the API enables the document if it was disabled. +Any updates do not change existing content for either the `labels` or `data` fields. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile) @@ -10454,23 +11056,28 @@ When updating a profile document, the API enables the document if it was disable client.security.activateUserProfile({ grant_type }) ``` +### Arguments [_arguments_security.activate_user_profile] -### Arguments [_arguments_357] - -* **Request (object):** - - * **`grant_type` (Enum("password" | "access_token"))**: The type of grant. - * **`access_token` (Optional, string)**: The user’s Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. - * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - * **`username` (Optional, string)**: The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - - - -### authenticate [_authenticate] +#### Request (object) [_request_security.activate_user_profile] +- **`grant_type` (Enum("password" | "access_token"))**: The type of grant. +- **`access_token` (Optional, string)**: The user's Elasticsearch access token or JWT. +Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +## client.security.authenticate [_security.authenticate] Authenticate a user. -Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. +Authenticates a user and returns information about the authenticated user. +Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). +A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. +If the user cannot be authenticated, this API returns a 401 status code. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate) @@ -10479,11 +11086,11 @@ client.security.authenticate() ``` -### bulk_delete_role [_bulk_delete_role] - +## client.security.bulkDeleteRole [_security.bulk_delete_role] Bulk delete roles. -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk delete roles API cannot delete roles that are defined in roles files. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role) @@ -10491,21 +11098,17 @@ The role management APIs are generally the preferred way to manage roles, rather client.security.bulkDeleteRole({ names }) ``` +### Arguments [_arguments_security.bulk_delete_role] -### Arguments [_arguments_358] - -* **Request (object):** - - * **`names` (string[])**: An array of role names to delete - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### bulk_put_role [_bulk_put_role] +#### Request (object) [_request_security.bulk_delete_role] +- **`names` (string[])**: An array of role names to delete +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.bulkPutRole [_security.bulk_put_role] Bulk create or update roles. -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk create or update roles API cannot update roles that are defined in roles files. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role) @@ -10513,35 +11116,27 @@ The role management APIs are generally the preferred way to manage roles, rather client.security.bulkPutRole({ roles }) ``` +### Arguments [_arguments_security.bulk_put_role] -### Arguments [_arguments_359] +#### Request (object) [_request_security.bulk_put_role] +- **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -* **Request (object):** - - * **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### bulk_update_api_keys [_bulk_update_api_keys] - -Bulk update API keys. Update the attributes for multiple API keys. - -::::{important} -It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user’s credentials are required. -:::: +## client.security.bulkUpdateApiKeys [_security.bulk_update_api_keys] +Bulk update API keys. +Update the attributes for multiple API keys. +IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. It is not possible to update expired or invalidated API keys. -This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. - -::::{important} -If you don’t specify `role_descriptors` in the request, a call to this API might still change an API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. -:::: +This API supports updates to API key access scope, metadata and expiration. +The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. @@ -10551,20 +11146,25 @@ A successful request returns a JSON structure that contains the IDs of all updat client.security.bulkUpdateApiKeys({ ids }) ``` - -### Arguments [_arguments_360] - -* **Request (object):** - - * **`ids` (string | string[])**: The API key identifiers. - * **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. - * **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. - * **`role_descriptors` (Optional, Record)**: The role descriptors to assign to the API keys. An API key’s effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user’s full permissions. The snapshot of the owner’s permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. - - - -### change_password [_change_password] - +### Arguments [_arguments_security.bulk_update_api_keys] + +#### Request (object) [_request_security.bulk_update_api_keys] +- **`ids` (string | string[])**: The API key identifiers. +- **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API keys. +By default, API keys never expire. +This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. +The structure of a role descriptor is the same as the request for the create API keys API. + +## client.security.changePassword [_security.change_password] Change passwords. Change the passwords of users in the native realm and built-in users. @@ -10575,23 +11175,23 @@ Change the passwords of users in the native realm and built-in users. client.security.changePassword({ ... }) ``` +### Arguments [_arguments_security.change_password] -### Arguments [_arguments_361] - -* **Request (object):** - - * **`username` (Optional, string)**: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. - * **`password` (Optional, string)**: The new password value. Passwords must be at least 6 characters long. - * **`password_hash` (Optional, string)**: A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### clear_api_key_cache [_clear_api_key_cache] +#### Request (object) [_request_security.change_password] +- **`username` (Optional, string)**: The user whose password you want to change. If you do not specify this +parameter, the password is changed for the current user. +- **`password` (Optional, string)**: The new password value. Passwords must be at least 6 characters long. +- **`password_hash` (Optional, string)**: A hash of the new password value. This must be produced using the same +hashing algorithm as has been configured for password storage. For more details, +see the explanation of the `xpack.security.authc.password_hashing.algorithm` +setting. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.clearApiKeyCache [_security.clear_api_key_cache] Clear the API key cache. -Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. +Evict a subset of all entries from the API key cache. +The cache is also automatically cleared on state changes of the security index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache) @@ -10599,20 +11199,18 @@ Evict a subset of all entries from the API key cache. The cache is also automati client.security.clearApiKeyCache({ ids }) ``` +### Arguments [_arguments_security.clear_api_key_cache] -### Arguments [_arguments_362] - -* **Request (object):** - - * **`ids` (string | string[])**: List of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. - - - -### clear_cached_privileges [_clear_cached_privileges] +#### Request (object) [_request_security.clear_api_key_cache] +- **`ids` (string | string[])**: List of API key IDs to evict from the API key cache. +To evict all API keys, use `*`. +Does not support other wildcard patterns. +## client.security.clearCachedPrivileges [_security.clear_cached_privileges] Clear the privileges cache. -Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. +Evict privileges from the native application privilege cache. +The cache is also automatically cleared for applications that have their privileges updated. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges) @@ -10620,22 +11218,22 @@ Evict privileges from the native application privilege cache. The cache is also client.security.clearCachedPrivileges({ application }) ``` +### Arguments [_arguments_security.clear_cached_privileges] -### Arguments [_arguments_363] - -* **Request (object):** - - * **`application` (string)**: A list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. - - - -### clear_cached_realms [_clear_cached_realms] +#### Request (object) [_request_security.clear_cached_privileges] +- **`application` (string)**: A list of applications. +To clear all applications, use an asterism (`*`). +It does not support other wildcard patterns. +## client.security.clearCachedRealms [_security.clear_cached_realms] Clear the user cache. -Evict users from the user cache. You can completely clear the cache or evict specific users. +Evict users from the user cache. +You can completely clear the cache or evict specific users. -User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. +User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. +There are realm settings that you can use to configure the user cache. +For more information, refer to the documentation about controlling the user cache. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms) @@ -10643,18 +11241,16 @@ User credentials are cached in memory on each node to avoid connecting to a remo client.security.clearCachedRealms({ realms }) ``` +### Arguments [_arguments_security.clear_cached_realms] -### Arguments [_arguments_364] - -* **Request (object):** - - * **`realms` (string | string[])**: A list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. - * **`usernames` (Optional, string[])**: A list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. - - - -### clear_cached_roles [_clear_cached_roles] +#### Request (object) [_request_security.clear_cached_realms] +- **`realms` (string | string[])**: A list of realms. +To clear all realms, use an asterisk (`*`). +It does not support other wildcard patterns. +- **`usernames` (Optional, string[])**: A list of the users to clear from the cache. +If you do not specify this parameter, the API evicts all users from the user cache. +## client.security.clearCachedRoles [_security.clear_cached_roles] Clear the roles cache. Evict roles from the native role cache. @@ -10665,22 +11261,22 @@ Evict roles from the native role cache. client.security.clearCachedRoles({ name }) ``` +### Arguments [_arguments_security.clear_cached_roles] -### Arguments [_arguments_365] - -* **Request (object):** - - * **`name` (string | string[])**: A list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. - - - -### clear_cached_service_tokens [_clear_cached_service_tokens] +#### Request (object) [_request_security.clear_cached_roles] +- **`name` (string | string[])**: A list of roles to evict from the role cache. +To evict all roles, use an asterisk (`*`). +It does not support other wildcard patterns. +## client.security.clearCachedServiceTokens [_security.clear_cached_service_tokens] Clear service account token caches. -Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. +Evict a subset of all entries from the service account token caches. +Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. +This API clears matching entries from both caches. -The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. +The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. +The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens) @@ -10688,36 +11284,30 @@ The cache for service account tokens backed by the `.security` index is cleared client.security.clearCachedServiceTokens({ namespace, service, name }) ``` +### Arguments [_arguments_security.clear_cached_service_tokens] -### Arguments [_arguments_366] - -* **Request (object):** - - * **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. - * **`service` (string)**: The name of the service, which must be unique within its namespace. - * **`name` (string | string[])**: A list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. - - - -### create_api_key [_create_api_key] +#### Request (object) [_request_security.clear_cached_service_tokens] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service, which must be unique within its namespace. +- **`name` (string | string[])**: A list of token names to evict from the service account token caches. +Use a wildcard (`*`) to evict all tokens that belong to a service account. +It does not support other wildcard patterns. +## client.security.createApiKey [_security.create_api_key] Create an API key. Create an API key for access without requiring basic authentication. -::::{important} -If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. -:::: +IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. +If you specify privileges, the API returns an error. +A successful request returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. -A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. - -::::{note} -By default, API keys never expire. You can specify expiration information when you create the API keys. -:::: +NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. - -The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. +To configure or turn off the API key service, refer to API key service setting documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) @@ -10725,45 +11315,42 @@ The API keys are created by the Elasticsearch API key service, which is automati client.security.createApiKey({ ... }) ``` +### Arguments [_arguments_security.create_api_key] -### Arguments [_arguments_367] - -* **Request (object):** +#### Request (object) [_request_security.create_api_key] +- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +By default, API keys never expire. +- **`name` (Optional, string)**: A name for the API key. +- **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. +When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. +If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. +The structure of role descriptor is the same as the request for the create role API. +For more details, refer to the create or update roles API. - * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. - * **`name` (Optional, string)**: A name for the API key. - * **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. - - -::::{note} -Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. ** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -:::: - - - -### create_cross_cluster_api_key [_create_cross_cluster_api_key] +NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. +In this case, you must explicitly specify a role descriptor with no privileges. +The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.createCrossClusterApiKey [_security.create_cross_cluster_api_key] Create a cross-cluster API key. -Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. - -::::{important} -To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. -:::: +Create an API key of the `cross_cluster` type for the API key based remote cluster access. +A `cross_cluster` API key cannot be used to authenticate through the REST interface. +IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. -::::{note} -Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. -:::: - +NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. -Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. +Cross-cluster API keys can only be updated with the update cross-cluster API key API. +Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key) @@ -10771,30 +11358,29 @@ Cross-cluster API keys can only be updated with the update cross-cluster API key client.security.createCrossClusterApiKey({ access, name }) ``` +### Arguments [_arguments_security.create_cross_cluster_api_key] -### Arguments [_arguments_368] - -* **Request (object):** +#### Request (object) [_request_security.create_cross_cluster_api_key] +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross-cluster search and cross-cluster replication. +At least one of them must be specified. - * **`access` ({ replication, search })**: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. - - -::::{note} -No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. ** *`name` (string)**: Specifies the name for this API key. *** *`expiration` (Optional, string | -1 | 0)**: Expiration time for the API key. By default, API keys never expire. ** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. -:::: - - - -### create_service_token [_create_service_token] +NOTE: No explicit privileges should be specified for either search or replication access. +The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. +- **`name` (string)**: Specifies the name for this API key. +- **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API key. +By default, API keys never expire. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +## client.security.createServiceToken [_security.create_service_token] Create a service account token. Create a service accounts token for access without requiring basic authentication. -::::{note} -Service account tokens never expire. You must actively delete them if they are no longer needed. -:::: - +NOTE: Service account tokens never expire. +You must actively delete them if they are no longer needed. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) @@ -10802,36 +11388,33 @@ Service account tokens never expire. You must actively delete them if they are n client.security.createServiceToken({ namespace, service }) ``` +### Arguments [_arguments_security.create_service_token] -### Arguments [_arguments_369] - -* **Request (object):** - - * **`namespace` (string)**: The name of the namespace, which is a top-level grouping of service accounts. - * **`service` (string)**: The name of the service. - * **`name` (Optional, string)**: The name for the service account token. If omitted, a random name will be generated. +#### Request (object) [_request_security.create_service_token] +- **`namespace` (string)**: The name of the namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service. +- **`name` (Optional, string)**: The name for the service account token. +If omitted, a random name will be generated. +Token names must be at least one and no more than 256 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. -Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. - -::::{note} -Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. *** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -:::: - - - -### delegate_pki [_delegate_pki] +NOTE: Token names must be unique in the context of the associated service account. +They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.delegatePki [_security.delegate_pki] Delegate PKI authentication. -This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. +The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. +A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. -This API is called by smart and trusted proxies, such as Kibana, which terminate the user’s TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. - -::::{important} -The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. -:::: +This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. +IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. +This is part of the TLS authentication process and it is delegated to the proxy that calls this API. +The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki) @@ -10839,19 +11422,16 @@ The association between the subject public key in the target certificate and the client.security.delegatePki({ x509_certificate_chain }) ``` +### Arguments [_arguments_security.delegate_pki] -### Arguments [_arguments_370] - -* **Request (object):** - - * **`x509_certificate_chain` (string[])**: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate’s DER encoding. - +#### Request (object) [_request_security.delegate_pki] +- **`x509_certificate_chain` (string[])**: The X509Certificate chain, which is represented as an ordered string array. +Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. -The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. - - -### delete_privileges [_delete_privileges] +The first element is the target certificate that contains the subject distinguished name that is requesting access. +This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. +## client.security.deletePrivileges [_security.delete_privileges] Delete application privileges. To use this API, you must have one of the following privileges: @@ -10865,22 +11445,20 @@ To use this API, you must have one of the following privileges: client.security.deletePrivileges({ application, name }) ``` +### Arguments [_arguments_security.delete_privileges] -### Arguments [_arguments_371] - -* **Request (object):** - - * **`application` (string)**: The name of the application. Application privileges are always associated with exactly one application. - * **`name` (string | string[])**: The name of the privilege. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_role [_delete_role] +#### Request (object) [_request_security.delete_privileges] +- **`application` (string)**: The name of the application. +Application privileges are always associated with exactly one application. +- **`name` (string | string[])**: The name of the privilege. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteRole [_security.delete_role] Delete roles. -Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. +Delete roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The delete roles API cannot remove roles that are defined in roles files. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role) @@ -10888,21 +11466,18 @@ Delete roles in the native realm. The role management APIs are generally the pre client.security.deleteRole({ name }) ``` +### Arguments [_arguments_security.delete_role] -### Arguments [_arguments_372] - -* **Request (object):** - - * **`name` (string)**: The name of the role. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_role_mapping [_delete_role_mapping] +#### Request (object) [_request_security.delete_role] +- **`name` (string)**: The name of the role. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteRoleMapping [_security.delete_role_mapping] Delete role mappings. -Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The delete role mappings API cannot remove role mappings that are defined in role mapping files. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping) @@ -10910,18 +11485,14 @@ Role mappings define which roles are assigned to each user. The role mapping API client.security.deleteRoleMapping({ name }) ``` +### Arguments [_arguments_security.delete_role_mapping] -### Arguments [_arguments_373] - -* **Request (object):** - - * **`name` (string)**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_service_token [_delete_service_token] +#### Request (object) [_request_security.delete_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteServiceToken [_security.delete_service_token] Delete service account tokens. Delete service account tokens for a service in a specified namespace. @@ -10932,20 +11503,15 @@ Delete service account tokens for a service in a specified namespace. client.security.deleteServiceToken({ namespace, service, name }) ``` +### Arguments [_arguments_security.delete_service_token] -### Arguments [_arguments_374] - -* **Request (object):** - - * **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. - * **`service` (string)**: The service name. - * **`name` (string)**: The name of the service account token. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_user [_delete_user] +#### Request (object) [_request_security.delete_service_token] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The service name. +- **`name` (string)**: The name of the service account token. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteUser [_security.delete_user] Delete users. Delete users from the native realm. @@ -10956,21 +11522,18 @@ Delete users from the native realm. client.security.deleteUser({ username }) ``` +### Arguments [_arguments_security.delete_user] -### Arguments [_arguments_375] - -* **Request (object):** - - * **`username` (string)**: An identifier for the user. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### disable_user [_disable_user] +#### Request (object) [_request_security.delete_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.disableUser [_security.disable_user] Disable users. -Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user’s access to Elasticsearch. +Disable users in the native realm. +By default, when you create users, they are enabled. +You can use this API to revoke a user's access to Elasticsearch. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user) @@ -10978,28 +11541,23 @@ Disable users in the native realm. By default, when you create users, they are e client.security.disableUser({ username }) ``` +### Arguments [_arguments_security.disable_user] -### Arguments [_arguments_376] - -* **Request (object):** - - * **`username` (string)**: An identifier for the user. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### disable_user_profile [_disable_user_profile] +#### Request (object) [_request_security.disable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.disableUserProfile [_security.disable_user_profile] Disable a user profile. Disable user profiles so that they are not visible in user profile searches. -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. - -When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . +When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. +To re-enable a disabled user profile, use the enable user profile API . [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile) @@ -11007,21 +11565,19 @@ When you activate a user profile, its automatically enabled and visible in user client.security.disableUserProfile({ uid }) ``` +### Arguments [_arguments_security.disable_user_profile] -### Arguments [_arguments_377] - -* **Request (object):** - - * **`uid` (string)**: Unique identifier for the user profile. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, it does nothing with refreshes. - - - -### enable_user [_enable_user] +#### Request (object) [_request_security.disable_user_profile] +- **`uid` (string)**: Unique identifier for the user profile. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', it does nothing with refreshes. +## client.security.enableUser [_security.enable_user] Enable users. -Enable users in the native realm. By default, when you create users, they are enabled. +Enable users in the native realm. +By default, when you create users, they are enabled. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user) @@ -11029,28 +11585,23 @@ Enable users in the native realm. By default, when you create users, they are en client.security.enableUser({ username }) ``` +### Arguments [_arguments_security.enable_user] -### Arguments [_arguments_378] +#### Request (object) [_request_security.enable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -* **Request (object):** +## client.security.enableUserProfile [_security.enable_user_profile] +Enable a user profile. - * **`username` (string)**: An identifier for the user. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +Enable user profiles to make them visible in user profile searches. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. - -### enable_user_profile [_enable_user_profile] - -Enable a user profile. - -Enable user profiles to make them visible in user profile searches. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -When you activate a user profile, it’s automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. +When you activate a user profile, it's automatically enabled and visible in user profile searches. +If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile) @@ -11058,26 +11609,22 @@ When you activate a user profile, it’s automatically enabled and visible in us client.security.enableUserProfile({ uid }) ``` +### Arguments [_arguments_security.enable_user_profile] -### Arguments [_arguments_379] - -* **Request (object):** - - * **`uid` (string)**: A unique identifier for the user profile. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, nothing is done with refreshes. - - - -### enroll_kibana [_enroll_kibana] +#### Request (object) [_request_security.enable_user_profile] +- **`uid` (string)**: A unique identifier for the user profile. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. +## client.security.enrollKibana [_security.enroll_kibana] Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. -::::{note} -This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. -:::: - +NOTE: This API is currently intended for internal use only by Kibana. +Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana) @@ -11086,13 +11633,13 @@ client.security.enrollKibana() ``` -### enroll_node [_enroll_node] - +## client.security.enrollNode [_security.enroll_node] Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. -The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. +The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. +The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node) @@ -11101,11 +11648,12 @@ client.security.enrollNode() ``` -### get_api_key [_get_api_key] - +## client.security.getApiKey [_security.get_api_key] Get API key information. -Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. +Retrieves information for one or more API keys. +NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key) @@ -11113,24 +11661,28 @@ Retrieves information for one or more API keys. NOTE: If you have only the `mana client.security.getApiKey({ ... }) ``` - -### Arguments [_arguments_380] - -* **Request (object):** - - * **`id` (Optional, string)**: An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. - * **`name` (Optional, string)**: An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. - * **`owner` (Optional, boolean)**: A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. - * **`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. - * **`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. - * **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user’s role descriptors associated with the API key. An API key’s actual permission is the intersection of its assigned role descriptors and the owner user’s role descriptors. - * **`active_only` (Optional, boolean)**: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. - * **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. - - - -### get_builtin_privileges [_get_builtin_privileges] - +### Arguments [_arguments_security.get_api_key] + +#### Request (object) [_request_security.get_api_key] +- **`id` (Optional, string)**: An API key id. +This parameter cannot be used with any of `name`, `realm_name` or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `id`, `realm_name` or `username`. +It supports prefix search with wildcard. +- **`owner` (Optional, boolean)**: A boolean flag that can be used to query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. +- **`active_only` (Optional, boolean)**: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. + +## client.security.getBuiltinPrivileges [_security.get_builtin_privileges] Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. @@ -11142,8 +11694,7 @@ client.security.getBuiltinPrivileges() ``` -### get_privileges [_get_privileges] - +## client.security.getPrivileges [_security.get_privileges] Get application privileges. To use this API, you must have one of the following privileges: @@ -11157,21 +11708,21 @@ To use this API, you must have one of the following privileges: client.security.getPrivileges({ ... }) ``` +### Arguments [_arguments_security.get_privileges] -### Arguments [_arguments_381] - -* **Request (object):** - - * **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. - * **`name` (Optional, string | string[])**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. - - - -### get_role [_get_role] +#### Request (object) [_request_security.get_privileges] +- **`application` (Optional, string)**: The name of the application. +Application privileges are always associated with exactly one application. +If you do not specify this parameter, the API returns information about all privileges for all applications. +- **`name` (Optional, string | string[])**: The name of the privilege. +If you do not specify this parameter, the API returns information about all privileges for the requested application. +## client.security.getRole [_security.get_role] Get roles. -Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. +Get roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The get roles API cannot retrieve roles that are defined in roles files. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role) @@ -11179,20 +11730,19 @@ Get roles in the native realm. The role management APIs are generally the prefer client.security.getRole({ ... }) ``` +### Arguments [_arguments_security.get_role] -### Arguments [_arguments_382] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: The name of the role. You can specify multiple roles as a list. If you do not specify this parameter, the API returns information about all roles. - - - -### get_role_mapping [_get_role_mapping] +#### Request (object) [_request_security.get_role] +- **`name` (Optional, string | string[])**: The name of the role. +You can specify multiple roles as a list. +If you do not specify this parameter, the API returns information about all roles. +## client.security.getRoleMapping [_security.get_role_mapping] Get role mappings. -Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The get role mappings API cannot retrieve role mappings that are defined in role mapping files. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping) @@ -11200,25 +11750,17 @@ Role mappings define which roles are assigned to each user. The role mapping API client.security.getRoleMapping({ ... }) ``` +### Arguments [_arguments_security.get_role_mapping] -### Arguments [_arguments_383] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. - - - -### get_service_accounts [_get_service_accounts] +#### Request (object) [_request_security.get_role_mapping] +- **`name` (Optional, string | string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. +## client.security.getServiceAccounts [_security.get_service_accounts] Get service accounts. Get a list of service accounts that match the provided path parameters. -::::{note} -Currently, only the `elastic/fleet-server` service account is available. -:::: - +NOTE: Currently, only the `elastic/fleet-server` service account is available. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts) @@ -11226,28 +11768,24 @@ Currently, only the `elastic/fleet-server` service account is available. client.security.getServiceAccounts({ ... }) ``` +### Arguments [_arguments_security.get_service_accounts] -### Arguments [_arguments_384] - -* **Request (object):** - - * **`namespace` (Optional, string)**: The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. - * **`service` (Optional, string)**: The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. - - - -### get_service_credentials [_get_service_credentials] +#### Request (object) [_request_security.get_service_accounts] +- **`namespace` (Optional, string)**: The name of the namespace. +Omit this parameter to retrieve information about all service accounts. +If you omit this parameter, you must also omit the `service` parameter. +- **`service` (Optional, string)**: The service name. +Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. +## client.security.getServiceCredentials [_security.get_service_credentials] Get service account credentials. To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. -::::{note} -For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. -:::: - +NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. +Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials) @@ -11255,21 +11793,18 @@ For tokens backed by the `service_tokens` file, the API collects them from all n client.security.getServiceCredentials({ namespace, service }) ``` +### Arguments [_arguments_security.get_service_credentials] -### Arguments [_arguments_385] - -* **Request (object):** - - * **`namespace` (string)**: The name of the namespace. - * **`service` (string)**: The service name. - - - -### get_settings [_get_settings_3] +#### Request (object) [_request_security.get_service_credentials] +- **`namespace` (string)**: The name of the namespace. +- **`service` (string)**: The service name. +## client.security.getSettings [_security.get_settings] Get security index settings. -Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: +Get the user-configurable settings for the security internal index (`.security` and associated indices). +Only a subset of the index settings — those that are user-configurable—will be shown. +This includes: * `index.auto_expand_replicas` * `index.number_of_replicas` @@ -11280,26 +11815,27 @@ Get the user-configurable settings for the security internal index (`.security` client.security.getSettings({ ... }) ``` +### Arguments [_arguments_security.get_settings] -### Arguments [_arguments_386] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_token [_get_token] +#### Request (object) [_request_security.get_settings] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.security.getToken [_security.get_token] Get a token. -Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. +Create a bearer token for access without requiring basic authentication. +The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. +Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. +When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. -The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. +That time period is defined by the `xpack.security.authc.token.timeout` setting. +If you want to invalidate a token immediately, you can do so by using the invalidate token API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token) @@ -11307,22 +11843,27 @@ The tokens returned by the get token API have a finite period of time for which client.security.getToken({ ... }) ``` - -### Arguments [_arguments_387] - -* **Request (object):** - - * **`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))**: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. - * **`scope` (Optional, string)**: The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. - * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - * **`kerberos_ticket` (Optional, string)**: The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - * **`refresh_token` (Optional, string)**: The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - * **`username` (Optional, string)**: The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - - - -### get_user [_get_user] - +### Arguments [_arguments_security.get_token] + +#### Request (object) [_request_security.get_token] +- **`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))**: The type of grant. +Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. +- **`scope` (Optional, string)**: The scope of the token. +Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`kerberos_ticket` (Optional, string)**: The base64 encoded kerberos ticket. +If you specify the `_kerberos` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`refresh_token` (Optional, string)**: The string that was returned when you created the token, which enables you to extend its life. +If you specify the `refresh_token` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. + +## client.security.getUser [_security.get_user] Get users. Get information about users in the native realm and built-in users. @@ -11333,21 +11874,19 @@ Get information about users in the native realm and built-in users. client.security.getUser({ ... }) ``` +### Arguments [_arguments_security.get_user] -### Arguments [_arguments_388] - -* **Request (object):** - - * **`username` (Optional, string | string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. - * **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. - - - -### get_user_privileges [_get_user_privileges] +#### Request (object) [_request_security.get_user] +- **`username` (Optional, string | string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. +## client.security.getUserPrivileges [_security.get_user_privileges] Get user privileges. -Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. +Get the security privileges for the logged in user. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. +To check whether a user has a specific list of privileges, use the has privileges API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges) @@ -11355,27 +11894,21 @@ Get the security privileges for the logged in user. All users can use this API, client.security.getUserPrivileges({ ... }) ``` +### Arguments [_arguments_security.get_user_privileges] -### Arguments [_arguments_389] - -* **Request (object):** - - * **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. - * **`priviledge` (Optional, string)**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. - * **`username` (Optional, string | null)** - - - -### get_user_profile [_get_user_profile] +#### Request (object) [_request_security.get_user_privileges] +- **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. +- **`priviledge` (Optional, string)**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. +- **`username` (Optional, string | null)** +## client.security.getUserProfile [_security.get_user_profile] Get a user profile. -Get a user’s profile using the unique profile ID. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: +Get a user's profile using the unique profile ID. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile) @@ -11383,31 +11916,36 @@ The user profile feature is designed only for use by Kibana and Elastic’s Obse client.security.getUserProfile({ uid }) ``` +### Arguments [_arguments_security.get_user_profile] -### Arguments [_arguments_390] - -* **Request (object):** - - * **`uid` (string | string[])**: A unique identifier for the user profile. - * **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. - - - -### grant_api_key [_grant_api_key] +#### Request (object) [_request_security.get_user_profile] +- **`uid` (string | string[])**: A unique identifier for the user profile. +- **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content use `data=` to retrieve content nested under the specified ``. +By default returns no `data` content. +## client.security.grantApiKey [_security.grant_api_key] Grant an API key. -Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The supported user authentication credential types are: +Create an API key on behalf of another user. +This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. +The caller must have authentication credentials for the user on whose behalf the API key will be created. +It is not possible to use this API to create an API key without that user's credentials. +The supported user authentication credential types are: * username and password * Elasticsearch access tokens * JWTs -The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. +The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. +In this case, the API key will be created on behalf of the impersonated user. -This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. +This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. -A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. +A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. @@ -11417,25 +11955,28 @@ By default, API keys never expire. You can specify expiration information when y client.security.grantApiKey({ api_key, grant_type }) ``` +### Arguments [_arguments_security.grant_api_key] -### Arguments [_arguments_391] - -* **Request (object):** - - * **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. - * **`grant_type` (Enum("access_token" | "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. - * **`access_token` (Optional, string)**: The user’s access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. - * **`username` (Optional, string)**: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - * **`run_as` (Optional, string)**: The name of the user to be impersonated. - - - -### has_privileges [_has_privileges] +#### Request (object) [_request_security.grant_api_key] +- **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. +- **`grant_type` (Enum("access_token" | "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. +- **`access_token` (Optional, string)**: The user's access token. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The user name that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`run_as` (Optional, string)**: The name of the user to be impersonated. +## client.security.hasPrivileges [_security.has_privileges] Check user privileges. -Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. +Determine whether the specified user has a specified list of privileges. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges) @@ -11443,28 +11984,21 @@ Determine whether the specified user has a specified list of privileges. All use client.security.hasPrivileges({ ... }) ``` +### Arguments [_arguments_security.has_privileges] -### Arguments [_arguments_392] - -* **Request (object):** - - * **`user` (Optional, string)**: Username - * **`application` (Optional, { application, privileges, resources }[])** - * **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. - * **`index` (Optional, { names, privileges, allow_restricted_indices }[])** - - - -### has_privileges_user_profile [_has_privileges_user_profile] +#### Request (object) [_request_security.has_privileges] +- **`user` (Optional, string)**: Username +- **`application` (Optional, { application, privileges, resources }[])** +- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. +- **`index` (Optional, { names, privileges, allow_restricted_indices }[])** +## client.security.hasPrivilegesUserProfile [_security.has_privileges_user_profile] Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile) @@ -11472,27 +12006,27 @@ The user profile feature is designed only for use by Kibana and Elastic’s Obse client.security.hasPrivilegesUserProfile({ uids, privileges }) ``` +### Arguments [_arguments_security.has_privileges_user_profile] -### Arguments [_arguments_393] - -* **Request (object):** - - * **`uids` (string[])**: A list of profile IDs. The privileges are checked for associated users of the profiles. - * **`privileges` ({ application, cluster, index })**: An object containing all the privileges to be checked. - - - -### invalidate_api_key [_invalidate_api_key] +#### Request (object) [_request_security.has_privileges_user_profile] +- **`uids` (string[])**: A list of profile IDs. The privileges are checked for associated users of the profiles. +- **`privileges` ({ application, cluster, index })**: An object containing all the privileges to be checked. +## client.security.invalidateApiKey [_security.invalidate_api_key] Invalidate API keys. -This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. +This API invalidates API keys created by the create API key or grant API key APIs. +Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. -To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: +To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. +The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. +The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. +The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. +In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: -* Set the parameter `owner=true`. -* Or, set both `username` and `realm_name` to match the user’s identity. -* Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. +- Set the parameter `owner=true`. +- Or, set both `username` and `realm_name` to match the user's identity. +- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key) @@ -11500,35 +12034,37 @@ To use this API, you must have at least the `manage_security`, `manage_api_key`, client.security.invalidateApiKey({ ... }) ``` +### Arguments [_arguments_security.invalidate_api_key] -### Arguments [_arguments_394] - -* **Request (object):** - - * **`id` (Optional, string)** - * **`ids` (Optional, string[])**: A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. - * **`name` (Optional, string)**: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. - * **`owner` (Optional, boolean)**: Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +#### Request (object) [_request_security.invalidate_api_key] +- **`id` (Optional, string)** +- **`ids` (Optional, string[])**: A list of API key ids. +This parameter cannot be used with any of `name`, `realm_name`, or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `ids`, `realm_name` or `username`. +- **`owner` (Optional, boolean)**: Query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. -::::{note} -At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. ** *`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. ** *`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. -:::: - - - -### invalidate_token [_invalidate_token] - +## client.security.invalidateToken [_security.invalidate_token] Invalidate a token. -The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. +The access tokens returned by the get token API have a finite period of time for which they are valid. +After that time period, they can no longer be used. +The time period is defined by the `xpack.security.authc.token.timeout` setting. -The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. - -::::{note} -While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. -:::: +The refresh tokens returned by the get token API are only valid for 24 hours. +They can also be used exactly once. +If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. +NOTE: While all parameters are optional, at least one of them is required. +More specifically, either one of `token` or `refresh_token` parameters is required. +If none of these two are specified, then `realm_name` and/or `username` need to be specified. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token) @@ -11536,25 +12072,25 @@ While all parameters are optional, at least one of them is required. More specif client.security.invalidateToken({ ... }) ``` +### Arguments [_arguments_security.invalidate_token] -### Arguments [_arguments_395] - -* **Request (object):** - - * **`token` (Optional, string)**: An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. - * **`refresh_token` (Optional, string)**: A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. - * **`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. - * **`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `refresh_token` or `token`. - - - -### oidc_authenticate [_oidc_authenticate] +#### Request (object) [_request_security.invalidate_token] +- **`token` (Optional, string)**: An access token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`refresh_token` (Optional, string)**: A refresh token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `refresh_token` or `token`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `refresh_token` or `token`. +## client.security.oidcAuthenticate [_security.oidc_authenticate] Authenticate OpenID Connect. Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate) @@ -11562,53 +12098,49 @@ Elasticsearch exposes all the necessary OpenID Connect related functionality wit client.security.oidcAuthenticate({ nonce, redirect_uri, state }) ``` +### Arguments [_arguments_security.oidc_authenticate] -### Arguments [_arguments_396] - -* **Request (object):** - - * **`nonce` (string)**: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. - * **`redirect_uri` (string)**: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. - * **`state` (string)**: Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. - * **`realm` (Optional, string)**: The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. - - - -### oidc_logout [_oidc_logout] +#### Request (object) [_request_security.oidc_authenticate] +- **`nonce` (string)**: Associate a client session with an ID token and mitigate replay attacks. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`redirect_uri` (string)**: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. +This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. +- **`state` (string)**: Maintain state between the authentication request and the response. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm. +This property is useful in cases where multiple realms are defined. +## client.security.oidcLogout [_security.oidc_logout] Logout of OpenID Connect. Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout) ```ts -client.security.oidcLogout({ access_token }) +client.security.oidcLogout({ token }) ``` +### Arguments [_arguments_security.oidc_logout] -### Arguments [_arguments_397] - -* **Request (object):** - - * **`access_token` (string)**: The access token to be invalidated. - * **`refresh_token` (Optional, string)**: The refresh token to be invalidated. - - - -### oidc_prepare_authentication [_oidc_prepare_authentication] +#### Request (object) [_request_security.oidc_logout] +- **`token` (string)**: The access token to be invalidated. +- **`refresh_token` (Optional, string)**: The refresh token to be invalidated. +## client.security.oidcPrepareAuthentication [_security.oidc_prepare_authentication] Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication) @@ -11616,21 +12148,23 @@ Elasticsearch exposes all the necessary OpenID Connect related functionality wit client.security.oidcPrepareAuthentication({ ... }) ``` +### Arguments [_arguments_security.oidc_prepare_authentication] -### Arguments [_arguments_398] - -* **Request (object):** - - * **`iss` (Optional, string)**: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when **realm** is specified. One of **realm** or **iss** is required. - * **`login_hint` (Optional, string)**: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the **login_hint** parameter. This parameter is not valid when **realm** is specified. - * **`nonce` (Optional, string)**: The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. - * **`realm` (Optional, string)**: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when **iss** is specified. One of **realm** or **iss** is required. - * **`state` (Optional, string)**: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. - - - -### put_privileges [_put_privileges] +#### Request (object) [_request_security.oidc_prepare_authentication] +- **`iss` (Optional, string)**: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. +It cannot be specified when *realm* is specified. +One of *realm* or *iss* is required. +- **`login_hint` (Optional, string)**: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. +This parameter is not valid when *realm* is specified. +- **`nonce` (Optional, string)**: The value used to associate a client session with an ID token and to mitigate replay attacks. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. +It cannot be specified when *iss* is specified. +One of *realm* or *iss* is required. +- **`state` (Optional, string)**: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +## client.security.putPrivileges [_security.put_privileges] Create or update application privileges. To use this API, you must have one of the following privileges: @@ -11657,21 +12191,18 @@ Action names can contain any number of printable ASCII characters and must conta client.security.putPrivileges({ ... }) ``` +### Arguments [_arguments_security.put_privileges] -### Arguments [_arguments_399] - -* **Request (object):** - - * **`privileges` (Optional, Record>)** - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### put_role [_put_role] +#### Request (object) [_request_security.put_privileges] +- **`privileges` (Optional, Record>)** +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.putRole [_security.put_role] Create or update roles. -The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. +The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. +The create or update roles API cannot update roles that are defined in roles files. +File-based role management is not available in Elastic Serverless. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role) @@ -11679,50 +12210,52 @@ The role management APIs are generally the preferred way to manage roles in the client.security.putRole({ name }) ``` +### Arguments [_arguments_security.put_role] -### Arguments [_arguments_400] - -* **Request (object):** - - * **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters *_*, *-*, and *.*. Each role must have a unique name, as this will serve as the identifier for that role. - * **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. - * **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. - * **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. - * **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. - * **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. - - -::::{note} -Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. ** *`remote_cluster` (Optional, { clusters, privileges }[])**: A list of remote cluster permissions entries. *** *`metadata` (Optional, Record)**: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. *** *`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. **Note**: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. *** *`description` (Optional, string)**: Optional description of the role descriptor *** *`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -:::: - - +#### Request (object) [_request_security.put_role] +- **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. +- **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. +- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +- **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. +- **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. +- **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. -### put_role_mapping [_put_role_mapping] +NOTE: Remote indices are effective for remote clusters configured with the API key based model. +They have no effect for remote clusters configured with the certificate based model. +- **`remote_cluster` (Optional, { clusters, privileges }[])**: A list of remote cluster permissions entries. +- **`metadata` (Optional, Record)**: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. +- **`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. +- **`description` (Optional, string)**: Optional description of the role descriptor +- **`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.putRoleMapping [_security.put_role_mapping] Create or update role mappings. -Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. - -::::{note} -This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. -:::: +Role mappings define which roles are assigned to each user. +Each mapping has rules that identify users and a list of roles that are granted to those users. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using the create or update roles API or roles files. **Role templates** -The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. - -For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. +The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. +The `roles` field is used for this purpose. -::::{note} -To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. -:::: +For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. +NOTE: To use role templates successfully, the relevant scripting feature must be enabled. +Otherwise, all attempts to create a role mapping with role templates fail. -All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. +All of the user fields that are available in the role mapping rules are also available in the role templates. +Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. -By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. +By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. +If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping) @@ -11730,27 +12263,29 @@ By default a template is evaluated to produce a single string that is the name o client.security.putRoleMapping({ name }) ``` - -### Arguments [_arguments_401] - -* **Request (object):** - - * **`name` (string)**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. - * **`enabled` (Optional, boolean)**: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. - * **`metadata` (Optional, Record)**: Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. - * **`roles` (Optional, string[])**: A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. - * **`role_templates` (Optional, { format, template }[])**: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. - * **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. - * **`run_as` (Optional, string[])** - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### put_user [_put_user] - +### Arguments [_arguments_security.put_role_mapping] + +#### Request (object) [_request_security.put_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`enabled` (Optional, boolean)**: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. +- **`metadata` (Optional, Record)**: Additional metadata that helps define which roles are assigned to each user. +Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`roles` (Optional, string[])**: A list of role names that are granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`role_templates` (Optional, { format, template }[])**: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. +- **`run_as` (Optional, string[])** +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putUser [_security.put_user] Create or update users. -Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user’s password without updating any other fields, use the change password API. +Add and update users in the native realm. +A password is required for adding a new user but is optional when updating an existing user. +To change a user's password without updating any other fields, use the change password API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user) @@ -11758,27 +12293,42 @@ Add and update users in the native realm. A password is required for adding a ne client.security.putUser({ username }) ``` - -### Arguments [_arguments_402] - -* **Request (object):** - - * **`username` (string)**: An identifier for the user. - - -::::{note} -Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. ** *`email` (Optional, string | null)**: The email of the user. *** *`full_name` (Optional, string | null)**: The full name of the user. *** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. *** *`password` (Optional, string)**: The user’s password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user’s password *** *`password_hash` (Optional, string)**: A hash of the user’s password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. *** *`roles` (Optional, string[])**: A set of roles the user has. The roles determine the user’s access permissions. To create a user without any roles, specify an empty list (`[]`). *** *`enabled` (Optional, boolean)**: Specifies whether the user is enabled. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. -:::: - - - -### query_api_keys [_query_api_keys] - +### Arguments [_arguments_security.put_user] + +#### Request (object) [_request_security.put_user] +- **`username` (string)**: An identifier for the user. + +NOTE: Usernames must be at least 1 and no more than 507 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. +Leading or trailing whitespace is not allowed. +- **`email` (Optional, string | null)**: The email of the user. +- **`full_name` (Optional, string | null)**: The full name of the user. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. +- **`password` (Optional, string)**: The user's password. +Passwords must be at least 6 characters long. +When adding a user, one of `password` or `password_hash` is required. +When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password +- **`password_hash` (Optional, string)**: A hash of the user's password. +This must be produced using the same hashing algorithm as has been configured for password storage. +For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. +Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. +The `password` parameter and the `password_hash` parameter cannot be used in the same request. +- **`roles` (Optional, string[])**: A set of roles the user has. +The roles determine the user's access permissions. +To create a user without any roles, specify an empty list (`[]`). +- **`enabled` (Optional, boolean)**: Specifies whether the user is enabled. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. +These values have the same meaning as in the index API, but the default value for this API is true. + +## client.security.queryApiKeys [_security.query_api_keys] Find API keys with a query. -Get a paginated list of API keys and their information. You can optionally filter the results with a query. +Get a paginated list of API keys and their information. +You can optionally filter the results with a query. -To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. +To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. +If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys) @@ -11786,26 +12336,52 @@ To use this API, you must have at least the `manage_own_api_key` or the `read_se client.security.queryApiKeys({ ... }) ``` - -### Arguments [_arguments_403] - -* **Request (object):** - - * **`aggregations` (Optional, Record)**: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. - * **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. - - -::::{note} -The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. ** *`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. *** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. *** *`size` (Optional, number)**: The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. *** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition. *** *`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user’s role descriptors associated with the API key. An API key’s actual permission is the intersection of its assigned role descriptors and the owner user’s role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. *** *`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. ** *`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. -:::: - - - -### query_role [_query_role] - +### Arguments [_arguments_security.query_api_keys] + +#### Request (object) [_request_security.query_api_keys] +- **`aggregations` (Optional, Record)**: Any aggregations to run over the corpus of returned API keys. +Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. +This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, +`cardinality`, `value_count`, `composite`, `filter`, and `filters`. +Additionally, aggregations only run over the same subset of fields that query works with. +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which API keys to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following public information associated with an API key: `id`, `type`, `name`, +`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + +NOTE: The queryable string values associated with API keys are internally mapped as keywords. +Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. +Such a match query is hence equivalent to a `term` query. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Other than `id`, all public fields of an API key are eligible for sorting. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors associated with the API key. +An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). +An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile UID for the API key owner principal. +If it exists, the profile UID is returned under the `profile_uid` response field for each API key. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. + +## client.security.queryRole [_security.query_role] Find roles with a query. -Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. +Get roles in a paginated manner. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. +You can optionally filter the results with a query. +Also, the results can be paginated and sorted. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role) @@ -11813,29 +12389,36 @@ Get roles in a paginated manner. The role management APIs are generally the pref client.security.queryRole({ ... }) ``` - -### Arguments [_arguments_404] - -* **Request (object):** - - * **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. - * **`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. - * **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition. - - - -### query_user [_query_user] - +### Arguments [_arguments_security.query_role] + +#### Request (object) [_request_security.query_role] +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which roles to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with roles: `name`, `description`, `metadata`, +`applications.application`, `applications.privileges`, and `applications.resources`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +You can sort on `username`, `roles`, or `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition. + +## client.security.queryUser [_security.query_user] Find users with a query. -Get information for users in a paginated manner. You can optionally filter the results with a query. - -::::{note} -As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. -:::: +Get information for users in a paginated manner. +You can optionally filter the results with a query. +NOTE: As opposed to the get user API, built-in users are excluded from the result. +This API is only for native users. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user) @@ -11843,30 +12426,35 @@ As opposed to the get user API, built-in users are excluded from the result. Thi client.security.queryUser({ ... }) ``` - -### Arguments [_arguments_405] - -* **Request (object):** - - * **`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. - * **`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. - * **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition - * **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. - - - -### saml_authenticate [_saml_authenticate] - +### Arguments [_arguments_security.query_user] + +#### Request (object) [_request_security.query_user] +- **`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which users to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Fields eligible for sorting are: `username`, `roles`, `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + +## client.security.samlAuthenticate [_security.saml_authenticate] Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: - +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML message that is submitted can be: @@ -11875,7 +12463,8 @@ The SAML message that is submitted can be: In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. -After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. +After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. +This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate) @@ -11883,29 +12472,26 @@ After successful validation, Elasticsearch responds with an Elasticsearch intern client.security.samlAuthenticate({ content, ids }) ``` +### Arguments [_arguments_security.saml_authenticate] -### Arguments [_arguments_406] - -* **Request (object):** - - * **`content` (string)**: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. - * **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. - * **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. - - - -### saml_complete_logout [_saml_complete_logout] +#### Request (object) [_request_security.saml_authenticate] +- **`content` (string)**: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. +- **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. +## client.security.samlCompleteLogout [_security.saml_complete_logout] Logout of SAML completely. Verifies the logout response sent from the SAML IdP. -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. +The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. +This API verifies the response by ensuring the content is relevant and validating its signature. +An empty response is returned if the verification process is successful. +The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. +The caller of this API must prepare the request accordingly so that this API can handle either of them. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout) @@ -11913,30 +12499,26 @@ The SAML IdP may send a logout response back to the SP after handling the SP-ini client.security.samlCompleteLogout({ realm, ids }) ``` +### Arguments [_arguments_security.saml_complete_logout] -### Arguments [_arguments_407] - -* **Request (object):** - - * **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. - * **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. - * **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. - * **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. - - - -### saml_invalidate [_saml_invalidate] +#### Request (object) [_request_security.saml_complete_logout] +- **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. +- **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. +- **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. +## client.security.samlInvalidate [_security.saml_invalidate] Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. +The logout request comes from the SAML IdP during an IdP initiated Single Logout. +The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. +After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. +Thus the user can be redirected back to their IdP. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate) @@ -11944,29 +12526,27 @@ The logout request comes from the SAML IdP during an IdP initiated Single Logout client.security.samlInvalidate({ query_string }) ``` +### Arguments [_arguments_security.saml_invalidate] -### Arguments [_arguments_408] - -* **Request (object):** - - * **`query_string` (string)**: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP’s signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. - * **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. - * **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. - - - -### saml_logout [_saml_logout] +#### Request (object) [_request_security.saml_invalidate] +- **`query_string` (string)**: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. +This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. +If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. +In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. +The client application must not attempt to parse or process the string in any way. +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. +## client.security.samlLogout [_security.saml_logout] Logout of SAML. Submits a request to invalidate an access token and refresh token. -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: - +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). +This API invalidates the tokens that were generated for a user by the SAML authenticate API. +If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout) @@ -11974,28 +12554,29 @@ This API invalidates the tokens that were generated for a user by the SAML authe client.security.samlLogout({ token }) ``` +### Arguments [_arguments_security.saml_logout] -### Arguments [_arguments_409] - -* **Request (object):** - - * **`token` (string)**: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. - * **`refresh_token` (Optional, string)**: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. - - - -### saml_prepare_authentication [_saml_prepare_authentication] +#### Request (object) [_request_security.saml_logout] +- **`token` (string)**: The access token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. +- **`refresh_token` (Optional, string)**: The refresh token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent refresh token that was received after refreshing the original access token. +## client.security.samlPrepareAuthentication [_security.saml_prepare_authentication] Prepare SAML authentication. Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. - -This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. +This API returns a URL pointing to the SAML Identity Provider. +You can use the URL to redirect the browser of the user in order to continue the authentication process. +The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. +If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. +These parameters contain the algorithm used for the signature and the signature value itself. +It also returns a random string that uniquely identifies this SAML Authentication request. +The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication) @@ -12003,24 +12584,23 @@ This API returns a URL pointing to the SAML Identity Provider. You can use the U client.security.samlPrepareAuthentication({ ... }) ``` +### Arguments [_arguments_security.saml_prepare_authentication] -### Arguments [_arguments_410] - -* **Request (object):** - - * **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. - * **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. - * **`relay_state` (Optional, string)**: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. - - - -### saml_service_provider_metadata [_saml_service_provider_metadata] +#### Request (object) [_request_security.saml_prepare_authentication] +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. +The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. +You must specify either this parameter or the `acs` parameter. +- **`relay_state` (Optional, string)**: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. +If the Authentication Request is signed, this value is used as part of the signature computation. +## client.security.samlServiceProviderMetadata [_security.saml_service_provider_metadata] Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. -The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. +The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. +This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata) @@ -12028,25 +12608,19 @@ The SAML 2.0 specification provides a mechanism for Service Providers to describ client.security.samlServiceProviderMetadata({ realm_name }) ``` +### Arguments [_arguments_security.saml_service_provider_metadata] -### Arguments [_arguments_411] - -* **Request (object):** - - * **`realm_name` (string)**: The name of the SAML realm in Elasticsearch. - - - -### suggest_user_profiles [_suggest_user_profiles] +#### Request (object) [_request_security.saml_service_provider_metadata] +- **`realm_name` (string)**: The name of the SAML realm in Elasticsearch. +## client.security.suggestUserProfiles [_security.suggest_user_profiles] Suggest a user profile. Get suggestions for user profiles that match specified search criteria. -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles) @@ -12054,39 +12628,42 @@ The user profile feature is designed only for use by Kibana and Elastic’s Obse client.security.suggestUserProfiles({ ... }) ``` +### Arguments [_arguments_security.suggest_user_profiles] -### Arguments [_arguments_412] - -* **Request (object):** - - * **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. Name-related fields are the user’s `username`, `full_name`, and `email`. - * **`size` (Optional, number)**: The number of profiles to return. - * **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. - * **`hint` (Optional, { uids, labels })**: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren’t excluded from the response as long as the profile matches the `name` field query. - - - -### update_api_key [_update_api_key] +#### Request (object) [_request_security.suggest_user_profiles] +- **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. +Name-related fields are the user's `username`, `full_name`, and `email`. +- **`size` (Optional, number)**: The number of profiles to return. +- **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content, use `data=` to retrieve content nested under the specified ``. +By default, the API returns no `data` content. +It is an error to specify `data` as both the query parameter and the request body field. +- **`hint` (Optional, { uids, labels })**: Extra search criteria to improve relevance of the suggestion result. +Profiles matching the spcified hint are ranked higher in the response. +Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. +## client.security.updateApiKey [_security.update_api_key] Update an API key. -Update attributes of an existing API key. This API supports updates to an API key’s access scope, expiration, and metadata. - -To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. - -::::{important} -It’s not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. -:::: +Update attributes of an existing API key. +This API supports updates to an API key's access scope, expiration, and metadata. +To use this API, you must have at least the `manage_own_api_key` cluster privilege. +Users can only update API keys that they created or that were granted to them. +To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. -Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It’s not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. -The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. +Use this API to update API keys created by the create API key or grant API Key APIs. +If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. +It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. -::::{important} -If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. -:::: +The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key) @@ -12094,39 +12671,43 @@ If you don’t specify `role_descriptors` in the request, a call to this API mig client.security.updateApiKey({ id }) ``` - -### Arguments [_arguments_413] - -* **Request (object):** - - * **`id` (string)**: The ID of the API key to update. - * **`role_descriptors` (Optional, Record)**: The role descriptors to assign to this API key. The API key’s effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user’s full permissions. The snapshot of the owner’s permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. - * **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. - * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. - - - -### update_cross_cluster_api_key [_update_cross_cluster_api_key] - +### Arguments [_arguments_security.update_api_key] + +#### Request (object) [_request_security.update_api_key] +- **`id` (string)**: The ID of the API key to update. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to this API key. +The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the create API keys API. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports a nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this value fully replaces the metadata previously associated with the API key. +- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +By default, API keys never expire. +This property can be omitted to leave the expiration unchanged. + +## client.security.updateCrossClusterApiKey [_security.update_cross_cluster_api_key] Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. -To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. +To use this API, you must have at least the `manage_security` cluster privilege. +Users can only update API keys that they created. +To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. -::::{important} -It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. -:::: +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user's credentials are required. +It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. -It’s not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. - -This API supports updates to an API key’s access scope, metadata, and expiration. The owner user’s information, such as the `username` and `realm`, is also updated automatically on every call. - -::::{note} -This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. -:::: +This API supports updates to an API key's access scope, metadata, and expiration. +The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. +NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key) @@ -12134,30 +12715,30 @@ This API cannot update REST API keys, which should be updated by either the upda client.security.updateCrossClusterApiKey({ id, access }) ``` +### Arguments [_arguments_security.update_cross_cluster_api_key] -### Arguments [_arguments_414] - -* **Request (object):** - - * **`id` (string)**: The ID of the cross-cluster API key to update. - * **`access` ({ replication, search })**: The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. - * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. - * **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. - - - -### update_settings [_update_settings] +#### Request (object) [_request_security.update_cross_cluster_api_key] +- **`id` (string)**: The ID of the cross-cluster API key to update. +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross cluster search and cross cluster replication. +At least one of them must be specified. +When specified, the new access assignment fully replaces the previously assigned access. +- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +By default, API keys never expire. This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this information fully replaces metadata previously associated with the API key. +## client.security.updateSettings [_security.update_settings] Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. -::::{note} -If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. -:::: +NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. - -If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. +This API does not yet support configuring the settings for indices before they are in use. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings) @@ -12165,38 +12746,36 @@ If a specific index is not in use on the system and settings are provided for it client.security.updateSettings({ ... }) ``` +### Arguments [_arguments_security.update_settings] -### Arguments [_arguments_415] - -* **Request (object):** - - * **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. - * **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. - * **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### update_user_profile_data [_update_user_profile_data] +#### Request (object) [_request_security.update_settings] +- **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. +- **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. +- **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +## client.security.updateUserProfileData [_security.update_user_profile_data] Update user profile data. Update specific data for the user profile that is associated with a unique ID. -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. To use this API, you must have one of the following privileges: * The `manage_user_profile` cluster privilege. * The `update_profile_data` global privilege for the namespaces that are referenced in the request. -This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that’s included in the request. +This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. +New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. -For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. +For both labels and data, content is namespaced by the top-level fields. +The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data) @@ -12204,31 +12783,32 @@ For both labels and data, content is namespaced by the top-level fields. The `up client.security.updateUserProfileData({ uid }) ``` - -### Arguments [_arguments_416] - -* **Request (object):** - - * **`uid` (string)**: A unique identifier for the user profile. - * **`labels` (Optional, Record)**: Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). - * **`data` (Optional, Record)**: Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, nothing is done with refreshes. - - - -## shutdown [_shutdown] - - -### delete_node [_delete_node] - -Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. - -::::{note} -This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: - +### Arguments [_arguments_security.update_user_profile_data] + +#### Request (object) [_request_security.update_user_profile_data] +- **`uid` (string)**: A unique identifier for the user profile. +- **`labels` (Optional, Record)**: Searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +- **`data` (Optional, Record)**: Non-searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +The data object is not searchable, but can be retrieved with the get user profile API. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.shutdown.deleteNode [_shutdown.delete_node] +Cancel node shutdown preparations. +Remove a node from the shutdown list so it can resume normal operations. +You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. +Shutdown requests are never removed automatically by Elasticsearch. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. +Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. @@ -12238,27 +12818,20 @@ If the operator privileges feature is enabled, you must be an operator to use th client.shutdown.deleteNode({ node_id }) ``` +### Arguments [_arguments_shutdown.delete_node] -### Arguments [_arguments_417] - -* **Request (object):** - - * **`node_id` (string)**: The node id of node to be removed from the shutdown state - * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_node [_get_node] +#### Request (object) [_request_shutdown.delete_node] +- **`node_id` (string)**: The node id of node to be removed from the shutdown state +- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.shutdown.getNode [_shutdown.get_node] Get the shutdown status. -Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. - -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: +Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. +The API returns status information for each part of the shut down process. +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. @@ -12268,37 +12841,29 @@ If the operator privileges feature is enabled, you must be an operator to use th client.shutdown.getNode({ ... }) ``` +### Arguments [_arguments_shutdown.get_node] -### Arguments [_arguments_418] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: Which node for which to retrieve the shutdown status - * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_node [_put_node] +#### Request (object) [_request_shutdown.get_node] +- **`node_id` (Optional, string | string[])**: Which node for which to retrieve the shutdown status +- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.shutdown.putNode [_shutdown.put_node] Prepare a node to be shut down. -::::{note} -This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: - +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. If the operator privileges feature is enabled, you must be an operator to use this API. -The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. +The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. +This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. -You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. - -::::{important} -This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. -:::: +You must specify the type of shutdown: `restart`, `remove`, or `replace`. +If a node is already being prepared for shutdown, you can use this API to change the shutdown type. +IMPORTANT: This API does NOT terminate the Elasticsearch process. +Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node) @@ -12306,35 +12871,53 @@ This API does NOT terminate the Elasticsearch process. Monitor the node shutdown client.shutdown.putNode({ node_id, type, reason }) ``` - -### Arguments [_arguments_419] - -* **Request (object):** - - * **`node_id` (string)**: The node identifier. This parameter is not validated against the cluster’s active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. - * **`type` (Enum("restart" | "remove" | "replace"))**: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. - * **`reason` (string)**: A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. - * **`allocation_delay` (Optional, string)**: Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. - * **`target_node_name` (Optional, string)**: Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. - * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## simulate [_simulate_2] - - -### ingest [_ingest_2] - -Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. +### Arguments [_arguments_shutdown.put_node] + +#### Request (object) [_request_shutdown.put_node] +- **`node_id` (string)**: The node identifier. +This parameter is not validated against the cluster's active nodes. +This enables you to register a node for shut down while it is offline. +No error is thrown if you specify an invalid node ID. +- **`type` (Enum("restart" | "remove" | "replace"))**: Valid values are restart, remove, or replace. +Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. +Because the node is expected to rejoin the cluster, data is not migrated off of the node. +Use remove when you need to permanently remove a node from the cluster. +The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. +Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. +During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. +- **`reason` (string)**: A human-readable reason that the node is being shut down. +This field provides information for other cluster operators; it does not affect the shut down process. +- **`allocation_delay` (Optional, string)**: Only valid if type is restart. +Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. +This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. +If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. +- **`target_node_name` (Optional, string)**: Only valid if type is replace. +Specifies the name of the node that is replacing the node being shut down. +Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. +During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. +- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.simulate.ingest [_simulate.ingest] +Simulate data ingestion. +Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. -The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index’s pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. +If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. +No data is indexed into Elasticsearch. +Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. +The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. -This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. +The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. -By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. +By default, the pipeline definitions that are currently in the system are used. +However, you can supply substitute pipeline definitions in the body of the request. +These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest) @@ -12342,27 +12925,26 @@ By default, the pipeline definitions that are currently in the system are used. client.simulate.ingest({ docs }) ``` +### Arguments [_arguments_simulate.ingest] -### Arguments [_arguments_420] - -* **Request (object):** +#### Request (object) [_request_simulate.ingest] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`index` (Optional, string)**: The index to simulate ingesting into. +This value can be overridden by specifying an index on each document. +If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. +- **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. +- **`index_template_substitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. +- **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** +- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. +This value can be used to override the default pipeline of the index. - * **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. - * **`index` (Optional, string)**: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. - * **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. - * **`index_template_subtitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. - * **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** - * **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - * **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. - - - -## slm [_slm] - - -### delete_lifecycle [_delete_lifecycle_2] - -Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. +## client.slm.deleteLifecycle [_slm.delete_lifecycle] +Delete a policy. +Delete a snapshot lifecycle policy definition. +This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle) @@ -12370,20 +12952,19 @@ Delete a policy. Delete a snapshot lifecycle policy definition. This operation p client.slm.deleteLifecycle({ policy_id }) ``` +### Arguments [_arguments_slm.delete_lifecycle] -### Arguments [_arguments_421] - -* **Request (object):** +#### Request (object) [_request_slm.delete_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - * **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### execute_lifecycle [_execute_lifecycle] - -Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. +## client.slm.executeLifecycle [_slm.execute_lifecycle] +Run a policy. +Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle) @@ -12391,20 +12972,19 @@ Run a policy. Immediately create a snapshot according to the snapshot lifecycle client.slm.executeLifecycle({ policy_id }) ``` +### Arguments [_arguments_slm.execute_lifecycle] -### Arguments [_arguments_422] - -* **Request (object):** - - * **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_slm.execute_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - -### execute_retention [_execute_retention] - -Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. +## client.slm.executeRetention [_slm.execute_retention] +Run a retention policy. +Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +The retention policy is normally applied according to its schedule. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention) @@ -12412,19 +12992,17 @@ Run a retention policy. Manually apply the retention policy to force immediate r client.slm.executeRetention({ ... }) ``` +### Arguments [_arguments_slm.execute_retention] -### Arguments [_arguments_423] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_lifecycle [_get_lifecycle_2] +#### Request (object) [_request_slm.execute_retention] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. +## client.slm.getLifecycle [_slm.get_lifecycle] +Get policy information. +Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle) @@ -12432,20 +13010,18 @@ Get policy information. Get snapshot lifecycle policy definitions and informatio client.slm.getLifecycle({ ... }) ``` +### Arguments [_arguments_slm.get_lifecycle] -### Arguments [_arguments_424] +#### Request (object) [_request_slm.get_lifecycle] +- **`policy_id` (Optional, string | string[])**: List of snapshot lifecycle policies to retrieve +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** - - * **`policy_id` (Optional, string | string[])**: List of snapshot lifecycle policies to retrieve - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_stats [_get_stats] - -Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. +## client.slm.getStats [_slm.get_stats] +Get snapshot lifecycle management statistics. +Get global and policy-level statistics about actions taken by snapshot lifecycle management. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats) @@ -12453,18 +13029,13 @@ Get snapshot lifecycle management statistics. Get global and policy-level statis client.slm.getStats({ ... }) ``` +### Arguments [_arguments_slm.get_stats] -### Arguments [_arguments_425] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_status [_get_status_3] +#### Request (object) [_request_slm.get_stats] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.slm.getStatus [_slm.get_status] Get the snapshot lifecycle management status. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status) @@ -12473,19 +13044,21 @@ Get the snapshot lifecycle management status. client.slm.getStatus({ ... }) ``` +### Arguments [_arguments_slm.get_status] -### Arguments [_arguments_426] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - - - -### put_lifecycle [_put_lifecycle_2] +#### Request (object) [_request_slm.get_status] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. +## client.slm.putLifecycle [_slm.put_lifecycle] +Create or update a policy. +Create or update a snapshot lifecycle policy. +If the policy already exists, this request increments the policy version. +Only the latest version of a policy is stored. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle) @@ -12493,25 +13066,26 @@ Create or update a policy. Create or update a snapshot lifecycle policy. If the client.slm.putLifecycle({ policy_id }) ``` +### Arguments [_arguments_slm.put_lifecycle] -### Arguments [_arguments_427] +#### Request (object) [_request_slm.put_lifecycle] +- **`policy_id` (string)**: The identifier for the snapshot lifecycle policy you want to create or update. +- **`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })**: Configuration for each snapshot created by the policy. +- **`name` (Optional, string)**: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. +- **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. +- **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. +- **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -* **Request (object):** - - * **`policy_id` (string)**: The identifier for the snapshot lifecycle policy you want to create or update. - * **`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })**: Configuration for each snapshot created by the policy. - * **`name` (Optional, string)**: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. - * **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. - * **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. - * **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - - - -### start [_start_2] - -Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. +## client.slm.start [_slm.start] +Start snapshot lifecycle management. +Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +Manually starting SLM is necessary only if it has been stopped using the stop SLM API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start) @@ -12519,21 +13093,25 @@ Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts client.slm.start({ ... }) ``` +### Arguments [_arguments_slm.start] -### Arguments [_arguments_428] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - +#### Request (object) [_request_slm.start] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +## client.slm.stop [_slm.stop] +Stop snapshot lifecycle management. +Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. +Stopping SLM does not stop any snapshots that are in progress. +You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. -### stop [_stop_2] - -Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. - -The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. +Use the get snapshot lifecycle management status API to see if SLM is running. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop) @@ -12541,22 +13119,19 @@ The API returns a response as soon as the request is acknowledged, but the plugi client.slm.stop({ ... }) ``` +### Arguments [_arguments_slm.stop] -### Arguments [_arguments_429] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - - - -## snapshot [_snapshot] +#### Request (object) [_request_slm.stop] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. - -### cleanup_repository [_cleanup_repository] - -Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. +## client.snapshot.cleanupRepository [_snapshot.cleanup_repository] +Clean up the snapshot repository. +Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository) @@ -12564,20 +13139,20 @@ Clean up the snapshot repository. Trigger the review of the contents of a snapsh client.snapshot.cleanupRepository({ repository }) ``` +### Arguments [_arguments_snapshot.cleanup_repository] -### Arguments [_arguments_430] - -* **Request (object):** - - * **`repository` (string)**: The name of the snapshot repository to clean up. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. +#### Request (object) [_request_snapshot.cleanup_repository] +- **`repository` (string)**: The name of the snapshot repository to clean up. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1` +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. - - -### clone [_clone_2] - -Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. +## client.snapshot.clone [_snapshot.clone] +Clone a snapshot. +Clone part of all of a snapshot into another snapshot in the same repository. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone) @@ -12585,23 +13160,23 @@ Clone a snapshot. Clone part of all of a snapshot into another snapshot in the s client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) ``` +### Arguments [_arguments_snapshot.clone] -### Arguments [_arguments_431] - -* **Request (object):** +#### Request (object) [_request_snapshot.clone] +- **`repository` (string)**: The name of the snapshot repository that both source and target snapshot belong to. +- **`snapshot` (string)**: The source snapshot name. +- **`target_snapshot` (string)**: The target snapshot name. +- **`indices` (string)**: A list of indices to include in the snapshot. +Multi-target syntax is supported. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - * **`repository` (string)**: The name of the snapshot repository that both source and target snapshot belong to. - * **`snapshot` (string)**: The source snapshot name. - * **`target_snapshot` (string)**: The target snapshot name. - * **`indices` (string)**: A list of indices to include in the snapshot. Multi-target syntax is supported. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### create [_create_3] - -Create a snapshot. Take a snapshot of a cluster or of data streams and indices. +## client.snapshot.create [_snapshot.create] +Create a snapshot. +Take a snapshot of a cluster or of data streams and indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create) @@ -12609,31 +13184,57 @@ Create a snapshot. Take a snapshot of a cluster or of data streams and indices. client.snapshot.create({ repository, snapshot }) ``` - -### Arguments [_arguments_432] - -* **Request (object):** - - * **`repository` (string)**: The name of the repository for the snapshot. - * **`snapshot` (string)**: The name of the snapshot. It supportes date math. It must be unique in the repository. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports a list of values such as `open,hidden`. - * **`feature_states` (Optional, string[])**: The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. - - -If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. - -Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). ** *`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. *** *`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). ** *`indices` (Optional, string | string[])**: A list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. - -You can’t use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. ** *`metadata` (Optional, Record)**: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. ** *`partial` (Optional, boolean)**: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. - -If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. ** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. - - -### create_repository [_create_repository] - -Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster’s global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. - -Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. +### Arguments [_arguments_snapshot.create] + +#### Request (object) [_request_snapshot.create] +- **`repository` (string)**: The name of the repository for the snapshot. +- **`snapshot` (string)**: The name of the snapshot. +It supportes date math. +It must be unique in the repository. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. +It supports a list of values such as `open,hidden`. +- **`feature_states` (Optional, string[])**: The feature states to include in the snapshot. +Each feature state includes one or more system indices containing related data. +You can view a list of eligible features using the get features API. + +If `include_global_state` is `true`, all current feature states are included by default. +If `include_global_state` is `false`, no feature states are included by default. + +Note that specifying an empty array will result in the default behavior. +To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. +If `false`, the request returns an error for any data stream or index that is missing or closed. +- **`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. +The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. +It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). +- **`indices` (Optional, string | string[])**: A list of data streams and indices to include in the snapshot. +It supports a multi-target syntax. +The default is an empty array (`[]`), which includes all regular data streams and regular indices. +To exclude all data streams and indices, use `-*`. + +You can't use this parameter to include or exclude system indices or system data streams from a snapshot. +Use `feature_states` instead. +- **`metadata` (Optional, Record)**: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. +It can have any contents but it must be less than 1024 bytes. +This information is not automatically generated by Elasticsearch. +- **`partial` (Optional, boolean)**: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. + +If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. +If `false`, the request returns a response when the snapshot initializes. + +## client.snapshot.createRepository [_snapshot.create_repository] +Create or update a snapshot repository. +IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +To register a snapshot repository, the cluster's global metadata must be writeable. +Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + +Several options for this API can be specified using a query parameter or a request body parameter. +If both parameters are specified, only the query parameter is used. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository) @@ -12641,20 +13242,21 @@ Several options for this API can be specified using a query parameter or a reque client.snapshot.createRepository({ repository }) ``` +### Arguments [_arguments_snapshot.create_repository] -### Arguments [_arguments_433] - -* **Request (object):** - - * **`repository` (string)**: The name of the snapshot repository to register or update. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. - * **`verify` (Optional, boolean)**: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. - - - -### delete [_delete_9] +#### Request (object) [_request_snapshot.create_repository] +- **`repository` (string)**: The name of the snapshot repository to register or update. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +- **`verify` (Optional, boolean)**: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. +If `false`, this verification is skipped. +You can also perform this verification with the verify snapshot repository API. +## client.snapshot.delete [_snapshot.delete] Delete snapshots. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete) @@ -12663,20 +13265,20 @@ Delete snapshots. client.snapshot.delete({ repository, snapshot }) ``` +### Arguments [_arguments_snapshot.delete] -### Arguments [_arguments_434] +#### Request (object) [_request_snapshot.delete] +- **`repository` (string)**: The name of the repository to delete a snapshot from. +- **`snapshot` (string)**: A list of snapshot names to delete. +It also accepts wildcards (`*`). +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -* **Request (object):** - - * **`repository` (string)**: The name of the repository to delete a snapshot from. - * **`snapshot` (string)**: A list of snapshot names to delete. It also accepts wildcards (`*`). - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - - - -### delete_repository [_delete_repository] - -Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. +## client.snapshot.deleteRepository [_snapshot.delete_repository] +Delete snapshot repositories. +When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +The snapshots themselves are left untouched and in place. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository) @@ -12684,25 +13286,24 @@ Delete snapshot repositories. When a repository is unregistered, Elasticsearch r client.snapshot.deleteRepository({ repository }) ``` +### Arguments [_arguments_snapshot.delete_repository] -### Arguments [_arguments_435] - -* **Request (object):** - - * **`repository` (string | string[])**: The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. - - - -### get [_get_9] +#### Request (object) [_request_snapshot.delete_repository] +- **`repository` (string | string[])**: The ame of the snapshot repositories to unregister. +Wildcard (`*`) patterns are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +## client.snapshot.get [_snapshot.get] Get snapshot information. -::::{note} -The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. -:::: - +NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. +It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. +Snapshots concurrently created may be seen during an iteration. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get) @@ -12710,40 +13311,46 @@ The `after` parameter and `next` field enable you to iterate through snapshots w client.snapshot.get({ repository, snapshot }) ``` +### Arguments [_arguments_snapshot.get] -### Arguments [_arguments_436] - -* **Request (object):** - - * **`repository` (string)**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. - * **`snapshot` (string | string[])**: A list of snapshot names to retrieve Wildcards (`*`) are supported. +#### Request (object) [_request_snapshot.get] +- **`repository` (string)**: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`snapshot` (string | string[])**: A list of snapshot names to retrieve +Wildcards (`*`) are supported. * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * To get information about any snapshots that are currently running, use `_current`. - - * **`after` (Optional, string)**: An offset identifier to start pagination from as returned by the next field in the response body. - * **`from_sort_value` (Optional, string)**: The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. - * **`index_details` (Optional, boolean)**: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. - * **`index_names` (Optional, boolean)**: If `true`, the response includes the name of each index in each snapshot. - * **`include_repository` (Optional, boolean)**: If `true`, the response includes the repository name in each snapshot. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`order` (Optional, Enum("asc" | "desc"))**: The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. - * **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. - * **`size` (Optional, number)**: The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. - * **`slm_policy_filter` (Optional, string)**: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. - - -You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. ** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))**: The sort order for the result. The default behavior is sorting by snapshot start time stamp. ** *`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. - -::::{note} -The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. -:::: - - - -### get_repository [_get_repository] - +- **`after` (Optional, string)**: An offset identifier to start pagination from as returned by the next field in the response body. +- **`from_sort_value` (Optional, string)**: The value of the current sort column at which to start retrieval. +It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. +It can be a millisecond time value or a number when sorting by `index-` or shard count. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. +- **`index_details` (Optional, boolean)**: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. +The default is `false`, meaning that this information is omitted. +- **`index_names` (Optional, boolean)**: If `true`, the response includes the name of each index in each snapshot. +- **`include_repository` (Optional, boolean)**: If `true`, the response includes the repository name in each snapshot. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`order` (Optional, Enum("asc" | "desc"))**: The sort order. +Valid values are `asc` for ascending and `desc` for descending order. +The default behavior is ascending order. +- **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. +- **`size` (Optional, number)**: The maximum number of snapshots to return. +The default is 0, which means to return all that match the request without limit. +- **`slm_policy_filter` (Optional, string)**: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + +You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. +For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. +Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. +To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. +- **`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))**: The sort order for the result. +The default behavior is sorting by snapshot start time stamp. +- **`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + +NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. + +## client.snapshot.getRepository [_snapshot.get_repository] Get snapshot repository information. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository) @@ -12752,79 +13359,113 @@ Get snapshot repository information. client.snapshot.getRepository({ ... }) ``` - -### Arguments [_arguments_437] - -* **Request (object):** - - * **`repository` (Optional, string | string[])**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. - - -To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. ** *`local` (Optional, boolean)**: If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. ** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - - -### repository_analyze [_repository_analyze] - -Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. - -The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. - -There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. - -The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. - -If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. - -If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. - -If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. - -If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: +### Arguments [_arguments_snapshot.get_repository] + +#### Request (object) [_request_snapshot.get_repository] +- **`repository` (Optional, string | string[])**: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + +To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. +- **`local` (Optional, boolean)**: If `true`, the request gets information from the local node only. +If `false`, the request gets information from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.repositoryAnalyze [_snapshot.repository_analyze] +Analyze a snapshot repository. +Analyze the performance characteristics and any incorrect behaviour found in a repository. + +The response exposes implementation details of the analysis which may change from version to version. +The response body format is therefore not considered stable and may be different in newer versions. + +There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. +Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. + +The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. +Run your first analysis with the default parameter values to check for simple problems. +If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. +Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. +Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. + +If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. +This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. +If so, this storage system is not suitable for use as a snapshot repository. +You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. + +If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. +You can use this information to determine the performance of your storage system. +If any operation fails or returns an incorrect result, the API returns an error. +If the API returns an error, it may not have removed all the data it wrote to the repository. +The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. +Some clients are configured to close their connection if no response is received within a certain timeout. +An analysis takes a long time to complete so you might need to relax any such client-side timeouts. +On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. +The path to the leftover data is recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. +The analysis attempts to detect common bugs but it does not offer 100% coverage. +Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. -::::{important} -An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. -:::: - - -::::{note} -This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. -:::: +IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. +This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. +You must ensure this load does not affect other users of these systems. +Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. +NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. -::::{note} -Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. -:::: +NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. +A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. +This indicates it behaves incorrectly in ways that the former version did not detect. +You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. +NOTE: This API may not work correctly in a mixed-version cluster. -::::{note} -This API may not work correctly in a mixed-version cluster. -:::: +*Implementation details* +NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. -**Implementation details** +The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. +These tasks are distributed over the data and master-eligible nodes in the cluster for execution. -::::{note} -This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. -:::: +For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. +The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. +If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. +For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. +These reads are permitted to fail, but must not return partial data. +If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. -The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. +For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. +In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. +If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. -For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. +The executing node will use a variety of different methods to write the blob. +For instance, where applicable, it will use both single-part and multi-part uploads. +Similarly, the reading nodes will use a variety of different methods to read the data back again. +For instance they may read the entire blob from start to end or may read only a subset of the data. -For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. +For some blob-level tasks, the executing node will cancel the write before it is complete. +In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. -For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. - -The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. - -For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. - -Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. +Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. +This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. +The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. +Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. +Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. +If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. +Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. +Some operations also verify the behavior on small blobs with sizes other than 8 bytes. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze) @@ -12832,34 +13473,41 @@ Linearizable registers are special blobs that Elasticsearch manipulates using an client.snapshot.repositoryAnalyze({ repository }) ``` - -### Arguments [_arguments_438] - -* **Request (object):** - - * **`repository` (string)**: The name of the repository. - * **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. - * **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. - * **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. - * **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. - * **`max_blob_size` (Optional, number | string)**: The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. - * **`max_total_data_size` (Optional, number | string)**: An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. - * **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. - * **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. - * **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. - * **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. - * **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. - * **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. - - - -### restore [_restore] - -Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. - -You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. - -To restore a snapshot, the cluster’s global metadata must be writable. Ensure there are’t any cluster blocks that prevent writes. The restore operation ignores index blocks. +### Arguments [_arguments_snapshot.repository_analyze] + +#### Request (object) [_request_snapshot.repository_analyze] +- **`repository` (string)**: The name of the repository. +- **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. +For realistic experiments, you should set it to at least `2000`. +- **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. +- **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. +If false, it returns only a summary of the analysis. +- **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. +Early read operations are only rarely performed. +- **`max_blob_size` (Optional, number | string)**: The maximum size of a blob to be written during the test. +For realistic experiments, you should set it to at least `2gb`. +- **`max_total_data_size` (Optional, number | string)**: An upper limit on the total size of all the blobs written during the test. +For realistic experiments, you should set it to at least `1tb`. +- **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. +- **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. +- **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. +- **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. +For realistic experiments, you should set it to at least `100`. +- **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. +To repeat the same set of operations in multiple experiments, use the same seed in each experiment. +Note that the operations are performed concurrently so might not always happen in the same order on each run. +- **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for the test to complete. +If no response is received before the timeout expires, the test is cancelled and returns an error. + +## client.snapshot.restore [_snapshot.restore] +Restore a snapshot. +Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. +The snapshot repository must be registered and available to the cluster. +The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: @@ -12867,7 +13515,7 @@ Before you restore a data stream, ensure the cluster contains a matching index t GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` -If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can’t roll over or create backing indices. +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. @@ -12877,18 +13525,26 @@ If your snapshot contains data from App Search or Workplace Search, you must res client.snapshot.restore({ repository, snapshot }) ``` +### Arguments [_arguments_snapshot.restore] -### Arguments [_arguments_439] - -* **Request (object):** - - * **`repository` (string)**: The name of the repository to restore a snapshot from. - * **`snapshot` (string)**: The name of the snapshot to restore. - * **`feature_states` (Optional, string[])**: The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). - * **`ignore_index_settings` (Optional, string[])**: The index settings to not restore from the snapshot. You can’t use this option to ignore `index.number_of_shards`. +#### Request (object) [_request_snapshot.restore] +- **`repository` (string)**: The name of the repository to restore a snapshot from. +- **`snapshot` (string)**: The name of the snapshot to restore. +- **`feature_states` (Optional, string[])**: The feature states to restore. +If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. +If `include_global_state` is `false`, the request restores no feature states by default. +Note that specifying an empty array will result in the default behavior. +To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). +- **`ignore_index_settings` (Optional, string[])**: The index settings to not restore from the snapshot. +You can't use this option to ignore `index.number_of_shards`. - -For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream’s matching index template. ** *`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores any index or data stream in indices that’s missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. *** *`include_aliases` (Optional, boolean)**: If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. ** *`include_global_state` (Optional, boolean)**: If `true`, restore the cluster state. The cluster state includes: +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. +If `false`, the request returns an error for any missing index or data stream. +- **`include_aliases` (Optional, boolean)**: If `true`, the request restores aliases for any restored data streams and indices. +If `false`, the request doesn’t restore aliases. +- **`include_global_state` (Optional, boolean)**: If `true`, restore the cluster state. The cluster state includes: * Persistent cluster settings * Index templates @@ -12898,37 +13554,59 @@ For data streams, this option applies only to restored backing indices. New back * Stored scripts * For snapshots taken after 7.12.0, feature states -If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. +If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. +It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. Use the `feature_states` parameter to configure how feature states are restored. -If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. *** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Index settings to add or change in restored indices, including backing indices. You can’t use this option to change `index.number_of_shards`. - -For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream’s matching index template. *** *`indices` (Optional, string | string[])**: A list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. - -You can’t use this parameter to restore system indices or system data streams. Use `feature_states` instead. *** *`partial` (Optional, boolean)**: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. - -If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. *** *`rename_pattern` (Optional, string)**: A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. - -The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. ** *`rename_replacement` (Optional, string)**: The rename replacement string that is used with the `rename_pattern`. *** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. ** *`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. +If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. +- **`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Index settings to add or change in restored indices, including backing indices. +You can't use this option to change `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +- **`indices` (Optional, string | string[])**: A list of indices and data streams to restore. +It supports a multi-target syntax. +The default behavior is all regular indices and regular data streams in the snapshot. + +You can't use this parameter to restore system indices or system data streams. +Use `feature_states` instead. +- **`partial` (Optional, boolean)**: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + +If true, it allows restoring a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. +- **`rename_pattern` (Optional, string)**: A rename pattern to apply to restored data streams and indices. +Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + +The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. +- **`rename_replacement` (Optional, string)**: The rename replacement string that is used with the `rename_pattern`. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the restore operation completes. +The operation is complete when it finishes all attempts to recover primary shards for restored indices. +This applies even if one or more of the recovery attempts fail. If `false`, the request returns a response when the restore operation initializes. +## client.snapshot.status [_snapshot.status] +Get the snapshot status. +Get a detailed description of the current state for each shard participating in the snapshot. -### status [_status_2] - -Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. - -Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. +If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. -If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they’re not currently running. +If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. +This usage is preferred. +If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. -::::{warning} -Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). -:::: +WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. +The API requires a read from the repository for each shard in each snapshot. +For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). - -Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. +Depending on the latency of your storage, such requests can take an extremely long time to return results. +These requests can also tax machine resources and, when using cloud storage, incur high processing costs. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status) @@ -12936,21 +13614,23 @@ Depending on the latency of your storage, such requests can take an extremely lo client.snapshot.status({ ... }) ``` +### Arguments [_arguments_snapshot.status] -### Arguments [_arguments_440] - -* **Request (object):** - - * **`repository` (Optional, string)**: The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn’t specified. - * **`snapshot` (Optional, string | string[])**: A list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - +#### Request (object) [_request_snapshot.status] +- **`repository` (Optional, string)**: The snapshot repository name used to limit the request. +It supports wildcards (`*`) if `` isn't specified. +- **`snapshot` (Optional, string | string[])**: A list of snapshots to retrieve status for. +The default is currently running snapshots. +Wildcards (`*`) are not supported. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. +If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. - -### verify_repository [_verify_repository] - -Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. +## client.snapshot.verifyRepository [_snapshot.verify_repository] +Verify a snapshot repository. +Check for common misconfigurations in a snapshot repository. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository) @@ -12958,22 +13638,18 @@ Verify a snapshot repository. Check for common misconfigurations in a snapshot r client.snapshot.verifyRepository({ repository }) ``` +### Arguments [_arguments_snapshot.verify_repository] -### Arguments [_arguments_441] - -* **Request (object):** - - * **`repository` (string)**: The name of the snapshot repository to verify. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. - - - -## sql [_sql] - - -### clear_cursor [_clear_cursor] +#### Request (object) [_request_snapshot.verify_repository] +- **`repository` (string)**: The name of the snapshot repository to verify. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +## client.sql.clearCursor [_sql.clear_cursor] Clear an SQL search cursor. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor) @@ -12982,18 +13658,15 @@ Clear an SQL search cursor. client.sql.clearCursor({ cursor }) ``` +### Arguments [_arguments_sql.clear_cursor] -### Arguments [_arguments_442] - -* **Request (object):** - - * **`cursor` (string)**: Cursor to clear. +#### Request (object) [_request_sql.clear_cursor] +- **`cursor` (string)**: Cursor to clear. - - -### delete_async [_delete_async] - -Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. +## client.sql.deleteAsync [_sql.delete_async] +Delete an async SQL search. +Delete an async SQL search or a stored synchronous SQL search. +If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: @@ -13006,18 +13679,14 @@ If the Elasticsearch security features are enabled, only the following users can client.sql.deleteAsync({ id }) ``` +### Arguments [_arguments_sql.delete_async] -### Arguments [_arguments_443] - -* **Request (object):** - - * **`id` (string)**: The identifier for the search. - - +#### Request (object) [_request_sql.delete_async] +- **`id` (string)**: The identifier for the search. -### get_async [_get_async] - -Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. +## client.sql.getAsync [_sql.get_async] +Get async SQL search results. +Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. @@ -13027,118 +13696,118 @@ If the Elasticsearch security features are enabled, only the user who first subm client.sql.getAsync({ id }) ``` +### Arguments [_arguments_sql.get_async] -### Arguments [_arguments_444] - -* **Request (object):** - - * **`id` (string)**: The identifier for the search. - * **`delimiter` (Optional, string)**: The separator for CSV results. The API supports this parameter only for CSV responses. - * **`format` (Optional, string)**: The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. - * **`keep_alive` (Optional, string | -1 | 0)**: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. - +#### Request (object) [_request_sql.get_async] +- **`id` (string)**: The identifier for the search. +- **`delimiter` (Optional, string)**: The separator for CSV results. +The API supports this parameter only for CSV responses. +- **`format` (Optional, string)**: The format for the response. +You must specify a format using this parameter or the `Accept` HTTP header. +If you specify both, the API uses this parameter. +- **`keep_alive` (Optional, string | -1 | 0)**: The retention period for the search and its results. +It defaults to the `keep_alive` period for the original SQL search. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. - -### get_async_status [_get_async_status] - -Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. +## client.sql.getAsyncStatus [_sql.get_async_status] +Get the async SQL search status. +Get the current status of an async SQL search or a stored synchronous SQL search. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status) ```ts -client.sql.getAsyncStatus({ id }) -``` - - -### Arguments [_arguments_445] - -* **Request (object):** - - * **`id` (string)**: The identifier for the search. - - - -### query [_query_2] - -Get SQL search results. Run an SQL request. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query) - -```ts -client.sql.query({ ... }) -``` - - -### Arguments [_arguments_446] - -* **Request (object):** - - * **`allow_partial_search_results` (Optional, boolean)**: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. - * **`catalog` (Optional, string)**: The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. - * **`columnar` (Optional, boolean)**: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. - * **`cursor` (Optional, string)**: The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. - * **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. - * **`field_multi_value_leniency` (Optional, boolean)**: If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. - * **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. - * **`keep_alive` (Optional, string | -1 | 0)**: The retention period for an async or saved synchronous search. - * **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don’t finish before the `wait_for_completion_timeout`. - * **`page_timeout` (Optional, string | -1 | 0)**: The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. - * **`params` (Optional, Record)**: The values for parameters in the query. - * **`query` (Optional, string)**: The SQL query to run. - * **`request_timeout` (Optional, string | -1 | 0)**: The timeout before the request fails. - * **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. - * **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. - - -To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. *** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))**: The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. - - -### translate [_translate] - -Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate) - -```ts -client.sql.translate({ query }) +client.sql.getAsyncStatus({ id }) ``` +### Arguments [_arguments_sql.get_async_status] + +#### Request (object) [_request_sql.get_async_status] +- **`id` (string)**: The identifier for the search. -### Arguments [_arguments_447] +## client.sql.query [_sql.query] +Get SQL search results. +Run an SQL request. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query) - * **`query` (string)**: The SQL query to run. - * **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. - * **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +```ts +client.sql.query({ ... }) +``` +### Arguments [_arguments_sql.query] + +#### Request (object) [_request_sql.query] +- **`allow_partial_search_results` (Optional, boolean)**: If `true`, the response has partial results when there are shard request timeouts or shard failures. +If `false`, the API returns an error with no partial results. +- **`catalog` (Optional, string)**: The default catalog (cluster) for queries. +If unspecified, the queries execute on the data in the local cluster only. +- **`columnar` (Optional, boolean)**: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. +The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. +- **`cursor` (Optional, string)**: The cursor used to retrieve a set of paginated results. +If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. +It ignores other request body parameters. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`field_multi_value_leniency` (Optional, boolean)**: If `false`, the API returns an exception when encountering multiple values for a field. +If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. +- **`keep_alive` (Optional, string | -1 | 0)**: The retention period for an async or saved synchronous search. +- **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. +If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. +- **`page_timeout` (Optional, string | -1 | 0)**: The minimum retention period for the scroll cursor. +After this time period, a pagination request might fail because the scroll cursor is no longer available. +Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. +- **`params` (Optional, Record)**: The values for parameters in the query. +- **`query` (Optional, string)**: The SQL query to run. +- **`request_timeout` (Optional, string | -1 | 0)**: The timeout before the request fails. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. +These fields take precedence over mapped fields with the same name. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. +If the search doesn't finish within this period, the search becomes async. + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. +- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))**: The format for the response. +You can also specify a format using the `Accept` HTTP header. +If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. + +## client.sql.translate [_sql.translate] +Translate SQL into Elasticsearch queries. +Translate an SQL search into a search API request containing Query DSL. +It accepts the same request body parameters as the SQL search API, excluding `cursor`. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate) -## ssl [_ssl] +```ts +client.sql.translate({ query }) +``` +### Arguments [_arguments_sql.translate] -### certificates [_certificates] +#### Request (object) [_request_sql.translate] +- **`query` (string)**: The SQL query to run. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +## client.ssl.certificates [_ssl.certificates] Get SSL certificates. -Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: +Get information about the X.509 certificates that are used to encrypt communications in the cluster. +The API returns a list that includes certificates from all TLS contexts including: -* Settings for transport and HTTP interfaces -* TLS settings that are used within authentication realms -* TLS settings for remote monitoring exporters +- Settings for transport and HTTP interfaces +- TLS settings that are used within authentication realms +- TLS settings for remote monitoring exporters -The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. +The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. +It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. -::::{note} -When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. -:::: - +NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. @@ -13149,22 +13818,25 @@ client.ssl.certificates() ``` -## synonyms [_synonyms] - - -### delete_synonym [_delete_synonym] - +## client.synonyms.deleteSynonym [_synonyms.delete_synonym] Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. -Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. +Synonyms sets can be used in synonym graph token filters and synonym token filters. +These synonym filters can be used as part of search analyzers. -Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). +Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. -If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can’t be deleted. A delete request in this case will return a 400 response code. +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. +To prevent that, synonyms sets that are used in analyzers can't be deleted. +A delete request in this case will return a 400 response code. -To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. +To remove a synonyms set, you must first remove all indices that contain analyzers using it. +You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. +Once finished, you can delete the index. +When the synonyms set is not used in analyzers, you will be able to delete it. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym) @@ -13172,18 +13844,14 @@ To remove a synonyms set, you must first remove all indices that contain analyze client.synonyms.deleteSynonym({ id }) ``` +### Arguments [_arguments_synonyms.delete_synonym] -### Arguments [_arguments_448] - -* **Request (object):** - - * **`id` (string)**: The synonyms set identifier to delete. +#### Request (object) [_request_synonyms.delete_synonym] +- **`id` (string)**: The synonyms set identifier to delete. - - -### delete_synonym_rule [_delete_synonym_rule] - -Delete a synonym rule. Delete a synonym rule from a synonym set. +## client.synonyms.deleteSynonymRule [_synonyms.delete_synonym_rule] +Delete a synonym rule. +Delete a synonym rule from a synonym set. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule) @@ -13191,18 +13859,13 @@ Delete a synonym rule. Delete a synonym rule from a synonym set. client.synonyms.deleteSynonymRule({ set_id, rule_id }) ``` +### Arguments [_arguments_synonyms.delete_synonym_rule] -### Arguments [_arguments_449] - -* **Request (object):** - - * **`set_id` (string)**: The ID of the synonym set to update. - * **`rule_id` (string)**: The ID of the synonym rule to delete. - - - -### get_synonym [_get_synonym] +#### Request (object) [_request_synonyms.delete_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to update. +- **`rule_id` (string)**: The ID of the synonym rule to delete. +## client.synonyms.getSynonym [_synonyms.get_synonym] Get a synonym set. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) @@ -13211,20 +13874,16 @@ Get a synonym set. client.synonyms.getSynonym({ id }) ``` +### Arguments [_arguments_synonyms.get_synonym] -### Arguments [_arguments_450] +#### Request (object) [_request_synonyms.get_synonym] +- **`id` (string)**: The synonyms set identifier to retrieve. +- **`from` (Optional, number)**: The starting offset for query rules to retrieve. +- **`size` (Optional, number)**: The max number of query rules to retrieve. -* **Request (object):** - - * **`id` (string)**: The synonyms set identifier to retrieve. - * **`from` (Optional, number)**: The starting offset for query rules to retrieve. - * **`size` (Optional, number)**: The max number of query rules to retrieve. - - - -### get_synonym_rule [_get_synonym_rule] - -Get a synonym rule. Get a synonym rule from a synonym set. +## client.synonyms.getSynonymRule [_synonyms.get_synonym_rule] +Get a synonym rule. +Get a synonym rule from a synonym set. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule) @@ -13232,19 +13891,15 @@ Get a synonym rule. Get a synonym rule from a synonym set. client.synonyms.getSynonymRule({ set_id, rule_id }) ``` +### Arguments [_arguments_synonyms.get_synonym_rule] -### Arguments [_arguments_451] - -* **Request (object):** - - * **`set_id` (string)**: The ID of the synonym set to retrieve the synonym rule from. - * **`rule_id` (string)**: The ID of the synonym rule to retrieve. - +#### Request (object) [_request_synonyms.get_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to retrieve the synonym rule from. +- **`rule_id` (string)**: The ID of the synonym rule to retrieve. - -### get_synonyms_sets [_get_synonyms_sets] - -Get all synonym sets. Get a summary of all defined synonym sets. +## client.synonyms.getSynonymsSets [_synonyms.get_synonyms_sets] +Get all synonym sets. +Get a summary of all defined synonym sets. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) @@ -13252,21 +13907,19 @@ Get all synonym sets. Get a summary of all defined synonym sets. client.synonyms.getSynonymsSets({ ... }) ``` +### Arguments [_arguments_synonyms.get_synonyms_sets] -### Arguments [_arguments_452] - -* **Request (object):** - - * **`from` (Optional, number)**: The starting offset for synonyms sets to retrieve. - * **`size` (Optional, number)**: The maximum number of synonyms sets to retrieve. +#### Request (object) [_request_synonyms.get_synonyms_sets] +- **`from` (Optional, number)**: The starting offset for synonyms sets to retrieve. +- **`size` (Optional, number)**: The maximum number of synonyms sets to retrieve. +## client.synonyms.putSynonym [_synonyms.put_synonym] +Create or update a synonym set. +Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +If you need to manage more synonym rules, you can create multiple synonym sets. - -### put_synonym [_put_synonym] - -Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. - -When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. +This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym) @@ -13274,19 +13927,15 @@ When an existing synonyms set is updated, the search analyzers that use the syno client.synonyms.putSynonym({ id, synonyms_set }) ``` +### Arguments [_arguments_synonyms.put_synonym] -### Arguments [_arguments_453] - -* **Request (object):** +#### Request (object) [_request_synonyms.put_synonym] +- **`id` (string)**: The ID of the synonyms set to be created or updated. +- **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set. - * **`id` (string)**: The ID of the synonyms set to be created or updated. - * **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set. - - - -### put_synonym_rule [_put_synonym_rule] - -Create or update a synonym rule. Create or update a synonym rule in a synonym set. +## client.synonyms.putSynonymRule [_synonyms.put_synonym_rule] +Create or update a synonym rule. +Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. @@ -13298,32 +13947,26 @@ When you update a synonym rule, all analyzers using the synonyms set will be rel client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) ``` +### Arguments [_arguments_synonyms.put_synonym_rule] -### Arguments [_arguments_454] - -* **Request (object):** - - * **`set_id` (string)**: The ID of the synonym set. - * **`rule_id` (string)**: The ID of the synonym rule to be updated or created. - * **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. - - - -## tasks [_tasks_2] - - -### cancel [_cancel] +#### Request (object) [_request_synonyms.put_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set. +- **`rule_id` (string)**: The ID of the synonym rule to be updated or created. +- **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. +## client.tasks.cancel [_tasks.cancel] Cancel a task. -::::{warning} -The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. -:::: - +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. -A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. +It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. +The get task information API will continue to list these cancelled tasks until they complete. +The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. -To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. +You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) @@ -13331,27 +13974,21 @@ To troubleshoot why a cancelled task does not complete promptly, use the get tas client.tasks.cancel({ ... }) ``` +### Arguments [_arguments_tasks.cancel] -### Arguments [_arguments_455] - -* **Request (object):** - - * **`task_id` (Optional, string | number)**: The task identifier. - * **`actions` (Optional, string | string[])**: A list or wildcard expression of actions that is used to limit the request. - * **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. - * **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. - * **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. - - +#### Request (object) [_request_tasks.cancel] +- **`task_id` (Optional, string | number)**: The task identifier. +- **`actions` (Optional, string | string[])**: A list or wildcard expression of actions that is used to limit the request. +- **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. +- **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. -### get [_get_10] - -Get task information. Get information about a task currently running in the cluster. - -::::{warning} -The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. -:::: +## client.tasks.get [_tasks.get] +Get task information. +Get information about a task currently running in the cluster. +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. @@ -13361,29 +13998,26 @@ If the task identifier is not found, a 404 response code indicates that there ar client.tasks.get({ task_id }) ``` +### Arguments [_arguments_tasks.get] -### Arguments [_arguments_456] - -* **Request (object):** - - * **`task_id` (string)**: The task identifier. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. - - - -### list [_list_3] +#### Request (object) [_request_tasks.get] +- **`task_id` (string)**: The task identifier. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. -Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. - -::::{warning} -The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. -:::: +## client.tasks.list [_tasks.list] +Get all tasks. +Get information about the tasks currently running on one or more nodes in the cluster. +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. **Identifying running tasks** -The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: +The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. +This enables you to track certain calls or associate certain tasks with the client that started them. +For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" @@ -13429,8 +14063,9 @@ content-length: 831 } } ``` - -In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. +In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. +The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. +The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) @@ -13438,29 +14073,30 @@ In this example, `X-Opaque-Id: 123456` is the ID as a part of the response heade client.tasks.list({ ... }) ``` +### Arguments [_arguments_tasks.list] -### Arguments [_arguments_457] - -* **Request (object):** - - * **`actions` (Optional, string | string[])**: A list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. - * **`group_by` (Optional, Enum("nodes" | "parents" | "none"))**: A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. - * **`nodes` (Optional, string | string[])**: A list of node IDs or names that is used to limit the returned information. - * **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. - - - -## text_structure [_text_structure] +#### Request (object) [_request_tasks.list] +- **`actions` (Optional, string | string[])**: A list or wildcard expression of actions used to limit the request. +For example, you can use `cluser:*` to retrieve all cluster-related tasks. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. +This information is useful to distinguish tasks from each other but is more costly to run. +- **`group_by` (Optional, Enum("nodes" | "parents" | "none"))**: A key that is used to group tasks in the response. +The task lists can be grouped either by nodes or by parent tasks. +- **`nodes` (Optional, string | string[])**: A list of node IDs or names that is used to limit the returned information. +- **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. +To return all tasks, omit this parameter or use a value of `-1`. +If the parent task is not found, the API does not return a 404 response code. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its information. +However, timed out nodes are included in the `node_failures` property. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. +## client.textStructure.findFieldStructure [_text_structure.find_field_structure] +Find the structure of a text field. +Find the structure of a text field in an Elasticsearch index. -### find_field_structure [_find_field_structure] - -Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. - -This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. +For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: @@ -13469,9 +14105,11 @@ The response from the API contains: * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. -All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. -If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure) @@ -13479,31 +14117,56 @@ If the structure finder produces unexpected results, specify the `explain` query client.textStructure.findFieldStructure({ field, index }) ``` - -### Arguments [_arguments_458] - -* **Request (object):** - - * **`field` (string)**: The field that should be analyzed. - * **`index` (string)**: The name of the index that contains the analyzed field. - * **`column_names` (Optional, string)**: If `format` is set to `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. - * **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. - * **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. The minimum value is 2. - * **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. - * **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. - * **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - * **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - * **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - * **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. - * **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. - * **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. - - -If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. +### Arguments [_arguments_text_structure.find_field_structure] + +#### Request (object) [_request_text_structure.find_field_structure] +- **`field` (string)**: The field that should be analyzed. +- **`index` (string)**: The name of the index that contains the analyzed field. +- **`column_names` (Optional, string)**: If `format` is set to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header row, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. +The minimum value is 2. +- **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. +The intention in that situation is that a user who knows the meanings will rename the fields before using them. +- **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: * `a` * `d` @@ -13525,30 +14188,38 @@ If this parameter is not specified, the structure finder makes a decision about * `yyyy` * `zzz` -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. -If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. +## client.textStructure.findMessageStructure [_text_structure.find_message_structure] +Find the structure of text messages. +Find the structure of a list of text messages. +The messages must contain data that is suitable to be ingested into Elasticsearch. -### find_message_structure [_find_message_structure] - -Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. - -This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. -* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. -All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. -If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure) @@ -13556,29 +14227,52 @@ If the structure finder produces unexpected results, specify the `explain` query client.textStructure.findMessageStructure({ messages }) ``` - -### Arguments [_arguments_459] - -* **Request (object):** - - * **`messages` (string[])**: The list of messages you want to analyze. - * **`column_names` (Optional, string)**: If the format is `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - * **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. - * **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. - * **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. - * **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - * **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - * **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - * **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. - * **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. - * **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. - - -If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. +### Arguments [_arguments_text_structure.find_message_structure] + +#### Request (object) [_request_text_structure.find_message_structure] +- **`messages` (string[])**: The list of messages you want to analyze. +- **`column_names` (Optional, string)**: If the format is `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: * `a` * `d` @@ -13600,20 +14294,26 @@ If this parameter is not specified, the structure finder makes a decision about * `yyyy` * `zzz` -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. -If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. - +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. -### find_structure [_find_structure] +## client.textStructure.findStructure [_text_structure.find_structure] +Find the structure of a text file. +The text file must contain data that is suitable to be ingested into Elasticsearch. -Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. - -This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. +It must, however, be text; binary text formats are not currently supported. +The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: @@ -13622,7 +14322,8 @@ The response from the API contains: * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. -All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure) @@ -13630,34 +14331,69 @@ All this information can be calculated by the structure finder with no guidance. client.textStructure.findStructure({ ... }) ``` - -### Arguments [_arguments_460] - -* **Request (object):** - - * **`text_files` (Optional, TJsonDocument[])** - * **`charset` (Optional, string)**: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. - * **`column_names` (Optional, string)**: If you have set format to `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - * **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. - * **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. - * **`explain` (Optional, boolean)**: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. - * **`format` (Optional, string)**: The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - * **`grok_pattern` (Optional, string)**: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - * **`has_header_row` (Optional, boolean)**: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. - * **`line_merge_size_limit` (Optional, number)**: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. - * **`lines_to_sample` (Optional, number)**: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. - - -::::{note} -The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. ** *`quote` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. *** *`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. *** *`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. ** *`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. -:::: - - -If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. +### Arguments [_arguments_text_structure.find_structure] + +#### Request (object) [_request_text_structure.find_structure] +- **`text_files` (Optional, TJsonDocument[])** +- **`charset` (Optional, string)**: The text's character set. +It must be a character set that is supported by the JVM that Elasticsearch uses. +For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +If this parameter is not specified, the structure finder chooses an appropriate character set. +- **`column_names` (Optional, string)**: If you have set format to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. +- **`format` (Optional, string)**: The high level structure of the text. +Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`has_header_row` (Optional, boolean)**: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +- **`line_merge_size_limit` (Optional, number)**: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. +If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +- **`lines_to_sample` (Optional, number)**: The number of lines to include in the structural analysis, starting from the beginning of the text. +The minimum is 2. +If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + +NOTE: The number of lines and the variation of the lines affects the speed of the analysis. +For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. +If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. +- **`quote` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires then it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: @@ -13681,18 +14417,22 @@ Only a subset of Java time format letter groups are supported: * `yyyy` * `zzz` -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. +Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. -If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. - - -### test_grok_pattern [_test_grok_pattern] +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. -Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. +## client.textStructure.testGrokPattern [_text_structure.test_grok_pattern] +Test a Grok pattern. +Test a Grok pattern on one or more lines of text. +The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern) @@ -13700,23 +14440,17 @@ Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API i client.textStructure.testGrokPattern({ grok_pattern, text }) ``` +### Arguments [_arguments_text_structure.test_grok_pattern] -### Arguments [_arguments_461] - -* **Request (object):** - - * **`grok_pattern` (string)**: The Grok pattern to run on the text. - * **`text` (string[])**: The lines of text to run the Grok pattern on. - * **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. - - +#### Request (object) [_request_text_structure.test_grok_pattern] +- **`grok_pattern` (string)**: The Grok pattern to run on the text. +- **`text` (string[])**: The lines of text to run the Grok pattern on. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. -## transform [_transform] - - -### delete_transform [_delete_transform] - -Delete a transform. Deletes a transform. +## client.transform.deleteTransform [_transform.delete_transform] +Delete a transform. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform) @@ -13724,30 +14458,29 @@ Delete a transform. Deletes a transform. client.transform.deleteTransform({ transform_id }) ``` +### Arguments [_arguments_transform.delete_transform] -### Arguments [_arguments_462] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. - * **`force` (Optional, boolean)**: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. - * **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_node_stats [_get_node_stats] +#### Request (object) [_request_transform.delete_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`force` (Optional, boolean)**: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is +deleted regardless of its current state. +- **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination +index will not be deleted +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.transform.getNodeStats [_transform.get_node_stats] Retrieves transform usage information for transform nodes. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html) + ```ts client.transform.getNodeStats() ``` -### get_transform [_get_transform] - -Get transforms. Retrieves configuration information for transforms. +## client.transform.getTransform [_transform.get_transform] +Get transforms. +Get configuration information for transforms. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform) @@ -13755,25 +14488,31 @@ Get transforms. Retrieves configuration information for transforms. client.transform.getTransform({ ... }) ``` +### Arguments [_arguments_transform.get_transform] -### Arguments [_arguments_463] - -* **Request (object):** - - * **`transform_id` (Optional, string | string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_transform.get_transform] +- **`transform_id` (Optional, string | string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - 1. Contains wildcard expressions and there are no transforms that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`exclude_generated` (Optional, boolean)**: Excludes fields that were automatically added when creating the +transform. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. -If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of transforms. *** *`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. ** *`exclude_generated` (Optional, boolean)**: Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. +## client.transform.getTransformStats [_transform.get_transform_stats] +Get transform stats. - -### get_transform_stats [_get_transform_stats] - -Get transform stats. Retrieves usage information for transforms. +Get usage information for transforms. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats) @@ -13781,27 +14520,32 @@ Get transform stats. Retrieves usage information for transforms. client.transform.getTransformStats({ transform_id }) ``` +### Arguments [_arguments_transform.get_transform_stats] -### Arguments [_arguments_464] - -* **Request (object):** - - * **`transform_id` (string | string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_transform.get_transform_stats] +- **`transform_id` (string | string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - 1. Contains wildcard expressions and there are no transforms that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the stats -If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of transforms. *** *`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. ** *`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the stats +## client.transform.previewTransform [_transform.preview_transform] +Preview a transform. +Generates a preview of the results that you will get when you create a transform with the same configuration. - -### preview_transform [_preview_transform] - -Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. - -It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. +It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also +generates a list of mappings and settings for the destination index. These values are determined based on the field +types of the source index and the transform aggregations. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform) @@ -13809,39 +14553,53 @@ It returns a maximum of 100 results. The calculations are based on all the curre client.transform.previewTransform({ ... }) ``` - -### Arguments [_arguments_465] - -* **Request (object):** - - * **`transform_id` (Optional, string)**: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. - * **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. - * **`description` (Optional, string)**: Free text description of the transform. - * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. - * **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. - * **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. - * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - * **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. - * **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_transform [_put_transform] - -Create a transform. Creates a transform. - -A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. - -You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. - -You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - -::::{note} -You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. -:::: - +### Arguments [_arguments_transform.preview_transform] + +#### Request (object) [_request_transform.preview_transform] +- **`transform_id` (Optional, string)**: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform +configuration details in the request body. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. +These objects define the group by fields and the aggregation to reduce +the data. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for +each unique key. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.putTransform [_transform.put_transform] +Create a transform. +Creates a transform. + +A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as +a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a +unique row per entity. + +You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If +you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in +the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values +in the latest object. + +You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and +`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the +transform remembers which roles the user that created it had at the time of creation and uses those same roles. If +those roles do not have the required privileges on the source and destination indices, the transform fails when it +attempts unauthorized operations. + +NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any +`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do +not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not +give users any privileges on `.data-frame-internal*` indices. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform) @@ -13849,30 +14607,37 @@ You must use Kibana or this API to create a transform. Do not add a transform di client.transform.putTransform({ transform_id, dest, source }) ``` - -### Arguments [_arguments_466] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. - * **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. - * **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - * **`description` (Optional, string)**: Free text description of the transform. - * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. - * **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. - * **`_meta` (Optional, Record)**: Defines optional transform metadata. - * **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. - * **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. - * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. - * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - * **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### reset_transform [_reset_transform] - -Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. +### Arguments [_arguments_transform.put_transform] + +#### Request (object) [_request_transform.put_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also +determines the retry interval in the event of transient failures while the transform is searching or indexing. +The minimum value is `1s` and the maximum is `1h`. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields +and the aggregation to reduce the data. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the +destination index. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a +check for the existence of the source indices and a check that the destination index is not part of the source +index pattern. You can use this parameter to skip the checks, for example when the source index does not exist +until after the transform is created. The validations are always run when you start the transform, however, with +the exception of privilege checks. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.resetTransform [_transform.reset_transform] +Reset a transform. + +Before you can reset it, you must stop it; alternatively, use the `force` query parameter. +If the destination index was created by the transform, it is deleted. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform) @@ -13880,22 +14645,23 @@ Reset a transform. Resets a transform. Before you can reset it, you must stop it client.transform.resetTransform({ transform_id }) ``` +### Arguments [_arguments_transform.reset_transform] -### Arguments [_arguments_467] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. - * **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it’s `false`, the transform must be stopped before it can be reset. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_transform.reset_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform +must be stopped before it can be reset. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.transform.scheduleNowTransform [_transform.schedule_now_transform] +Schedule a transform to start now. - -### schedule_now_transform [_schedule_now_transform] - -Schedule a transform to start now. Instantly runs a transform to process data. - -If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. +Instantly run a transform to process data. +If you run this API, the transform will process the new data instantly, +without waiting for the configured frequency interval. After the API is called, +the transform will be processed again at `now + frequency` unless the API +is called again in the meantime. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform) @@ -13903,23 +14669,29 @@ If you _schedule_now a transform, it will process the new data instantly, withou client.transform.scheduleNowTransform({ transform_id }) ``` +### Arguments [_arguments_transform.schedule_now_transform] -### Arguments [_arguments_468] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. - * **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the scheduling to take place - - - -### start_transform [_start_transform] +#### Request (object) [_request_transform.schedule_now_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the scheduling to take place -Start a transform. Starts a transform. +## client.transform.startTransform [_transform.start_transform] +Start a transform. -When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. +When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is +set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping +definitions for the destination index from the source indices and the transform aggregations. If fields in the +destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), +the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce +mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you +start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings +in a pivot transform. -When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—​with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. +When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you +created the transform, they occur when you start the transform—​with the exception of privilege checks. When +Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the +time of creation and uses those same roles. If those roles do not have the required privileges on the source and +destination indices, the transform fails when it attempts unauthorized operations. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform) @@ -13927,20 +14699,16 @@ When the transform starts, a series of validations occur to ensure its success. client.transform.startTransform({ transform_id }) ``` +### Arguments [_arguments_transform.start_transform] -### Arguments [_arguments_469] +#### Request (object) [_request_transform.start_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. - - - -### stop_transform [_stop_transform] - -Stop transforms. Stops one or more transforms. +## client.transform.stopTransform [_transform.stop_transform] +Stop transforms. +Stops one or more transforms. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform) @@ -13948,25 +14716,37 @@ Stop transforms. Stops one or more transforms. client.transform.stopTransform({ transform_id }) ``` +### Arguments [_arguments_transform.stop_transform] -### Arguments [_arguments_470] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. - +#### Request (object) [_request_transform.stop_transform] +- **`transform_id` (string)**: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. +To stop all transforms, use `_all` or `*` as the identifier. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; +contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there +are only partial matches. -If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. +If it is true, the API returns a successful acknowledgement message when there are no matches. When there are +only partial matches, the API stops the appropriate transforms. -If it is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. *** *`timeout` (Optional, string | -1 | 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. *** *`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. ** *`wait_for_completion` (Optional, boolean)**: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. +If it is false, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the +timeout expires, the request returns a timeout exception. However, the request continues processing and +eventually moves the transform to a STOPPED state. +- **`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, +the transform stops as soon as possible. +- **`wait_for_completion` (Optional, boolean)**: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns +immediately and the indexer is stopped asynchronously in the background. +## client.transform.updateTransform [_transform.update_transform] +Update a transform. +Updates certain properties of a transform. -### update_transform [_update_transform] - -Update a transform. Updates certain properties of a transform. - -All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. +All updated properties except `description` do not take effect until after the transform starts the next checkpoint, +thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` +privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When +Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the +time of update and runs with those privileges. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform) @@ -13974,32 +14754,44 @@ All updated properties except `description` do not take effect until after the t client.transform.updateTransform({ transform_id }) ``` - -### Arguments [_arguments_471] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. - * **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. - * **`description` (Optional, string)**: Free text description of the transform. - * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. - * **`_meta` (Optional, Record)**: Defines optional transform metadata. - * **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. - * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - * **`retention_policy` (Optional, { time } | null)**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. - * **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### upgrade_transforms [_upgrade_transforms] - -Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. - -If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. - -To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. +### Arguments [_arguments_transform.update_transform] + +#### Request (object) [_request_transform.update_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time } | null)**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be +desired if the source index does not exist until after the transform is +created. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.upgradeTransforms [_transform.upgrade_transforms] +Upgrade all transforms. + +Transforms are compatible across minor versions and between supported major versions. +However, over time, the format of transform configuration information may change. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. +It also cleans up the internal data structures that store the transform state and checkpoints. +The upgrade does not affect the source and destination indices. +The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. +Resolve the issue then re-run the process again. +A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. +You may want to perform a recent cluster backup prior to the upgrade. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms) @@ -14007,31 +14799,24 @@ To ensure continuous transforms remain running during a major version upgrade of client.transform.upgradeTransforms({ ... }) ``` +### Arguments [_arguments_transform.upgrade_transforms] -### Arguments [_arguments_472] - -* **Request (object):** - - * **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## watcher [_watcher] +#### Request (object) [_request_transform.upgrade_transforms] +- **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and +returns an error. - -### ack_watch [_ack_watch] - -Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch’s actions. +## client.watcher.ackWatch [_watcher.ack_watch] +Acknowledge a watch. +Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. -::::{important} -If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. -:::: +IMPORTANT: If the specified watch is currently being executed, this API will return an error +The reason for this behavior is to prevent overwriting the watch status from a watch execution. - -Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. +This happens when the condition of the watch is not met (the condition evaluates to false). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) @@ -14039,19 +14824,16 @@ Acknowledging an action throttles further executions of that action until its `a client.watcher.ackWatch({ watch_id }) ``` +### Arguments [_arguments_watcher.ack_watch] -### Arguments [_arguments_473] - -* **Request (object):** - - * **`watch_id` (string)**: The watch identifier. - * **`action_id` (Optional, string | string[])**: A list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. - - - -### activate_watch [_activate_watch] +#### Request (object) [_request_watcher.ack_watch] +- **`watch_id` (string)**: The watch identifier. +- **`action_id` (Optional, string | string[])**: A list of the action identifiers to acknowledge. +If you omit this parameter, all of the actions of the watch are acknowledged. -Activate a watch. A watch can be either active or inactive. +## client.watcher.activateWatch [_watcher.activate_watch] +Activate a watch. +A watch can be either active or inactive. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch) @@ -14059,18 +14841,14 @@ Activate a watch. A watch can be either active or inactive. client.watcher.activateWatch({ watch_id }) ``` +### Arguments [_arguments_watcher.activate_watch] -### Arguments [_arguments_474] +#### Request (object) [_request_watcher.activate_watch] +- **`watch_id` (string)**: The watch identifier. -* **Request (object):** - - * **`watch_id` (string)**: The watch identifier. - - - -### deactivate_watch [_deactivate_watch] - -Deactivate a watch. A watch can be either active or inactive. +## client.watcher.deactivateWatch [_watcher.deactivate_watch] +Deactivate a watch. +A watch can be either active or inactive. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch) @@ -14078,25 +14856,20 @@ Deactivate a watch. A watch can be either active or inactive. client.watcher.deactivateWatch({ watch_id }) ``` +### Arguments [_arguments_watcher.deactivate_watch] -### Arguments [_arguments_475] - -* **Request (object):** - - * **`watch_id` (string)**: The watch identifier. - +#### Request (object) [_request_watcher.deactivate_watch] +- **`watch_id` (string)**: The watch identifier. - -### delete_watch [_delete_watch] - -Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. +## client.watcher.deleteWatch [_watcher.delete_watch] +Delete a watch. +When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. -::::{important} -Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. -:::: - +IMPORTANT: Deleting a watch must be done by using only this API. +Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API +When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch) @@ -14104,24 +14877,24 @@ Deleting a watch must be done by using only this API. Do not delete the watch di client.watcher.deleteWatch({ id }) ``` +### Arguments [_arguments_watcher.delete_watch] -### Arguments [_arguments_476] - -* **Request (object):** - - * **`id` (string)**: The watch identifier. - - - -### execute_watch [_execute_watch] +#### Request (object) [_request_watcher.delete_watch] +- **`id` (string)**: The watch identifier. -Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. +## client.watcher.executeWatch [_watcher.execute_watch] +Run a watch. +This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. -For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. +For testing and debugging purposes, you also have fine-grained control on how the watch runs. +You can run the watch without running all of its actions or alternatively by simulating them. +You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. -You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. +This serves as great tool for testing and debugging your watches prior to adding them to Watcher. -When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. +If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. @@ -14131,26 +14904,26 @@ When using the run watch API, the authorization data of the user that called the client.watcher.executeWatch({ ... }) ``` +### Arguments [_arguments_watcher.execute_watch] -### Arguments [_arguments_477] +#### Request (object) [_request_watcher.execute_watch] +- **`id` (Optional, string)**: The watch identifier. +- **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. +- **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. +- **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. +- **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. +In addition, the status of the watch is updated, possibly throttling subsequent runs. +This can also be specified as an HTTP parameter. +- **`simulated_actions` (Optional, { actions, all, use_all })** +- **`trigger_data` (Optional, { scheduled_time, triggered_time })**: This structure is parsed as the data of the trigger event that will be used during the watch execution. +- **`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })**: When present, this watch is used instead of the one specified in the request. +This watch is not persisted to the index and `record_execution` cannot be set. +- **`debug` (Optional, boolean)**: Defines whether the watch runs in debug mode. -* **Request (object):** - - * **`id` (Optional, string)**: The watch identifier. - * **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. - * **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. - * **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. - * **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. - * **`simulated_actions` (Optional, { actions, all, use_all })** - * **`trigger_data` (Optional, { scheduled_time, triggered_time })**: This structure is parsed as the data of the trigger event that will be used during the watch execution. - * **`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })**: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. - * **`debug` (Optional, boolean)**: Defines whether the watch runs in debug mode. - - - -### get_settings [_get_settings_4] - -Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. +## client.watcher.getSettings [_watcher.get_settings] +Get Watcher index settings. +Get settings for the Watcher internal index (`.watches`). +Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings) @@ -14158,17 +14931,13 @@ Get Watcher index settings. Get settings for the Watcher internal index (`.watch client.watcher.getSettings({ ... }) ``` +### Arguments [_arguments_watcher.get_settings] -### Arguments [_arguments_478] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_watch [_get_watch] +#### Request (object) [_request_watcher.get_settings] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.watcher.getWatch [_watcher.get_watch] Get a watch. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch) @@ -14177,27 +14946,24 @@ Get a watch. client.watcher.getWatch({ id }) ``` +### Arguments [_arguments_watcher.get_watch] -### Arguments [_arguments_479] - -* **Request (object):** - - * **`id` (string)**: The watch identifier. - - - -### put_watch [_put_watch] - -Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. +#### Request (object) [_request_watcher.get_watch] +- **`id` (string)**: The watch identifier. -::::{important} -You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. -:::: +## client.watcher.putWatch [_watcher.put_watch] +Create or update a watch. +When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. +Typically for the `schedule` trigger, the scheduler is the trigger engine. +IMPORTANT: You must use Kibana or this API to create a watch. +Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. +If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. -When you add a watch you can also define its initial active state by setting the **active** parameter. +When you add a watch you can also define its initial active state by setting the *active* parameter. -When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. +If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch) @@ -14205,30 +14971,30 @@ When Elasticsearch security features are enabled, your watch can index or search client.watcher.putWatch({ id }) ``` - -### Arguments [_arguments_480] - -* **Request (object):** - - * **`id` (string)**: The identifier for the watch. - * **`actions` (Optional, Record)**: The list of actions that will be run if the condition matches. - * **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. - * **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. - * **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. - * **`throttle_period` (Optional, string | -1 | 0)**: The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. - * **`throttle_period_in_millis` (Optional, Unit)**: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. - * **`transform` (Optional, { chain, script, search })**: The transform that processes the watch payload to prepare it for the watch actions. - * **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. - * **`active` (Optional, boolean)**: The initial state of the watch. The default value is `true`, which means the watch is active by default. - * **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term - * **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number - * **`version` (Optional, number)**: Explicit version number for concurrency control - - - -### query_watches [_query_watches] - -Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. +### Arguments [_arguments_watcher.put_watch] + +#### Request (object) [_request_watcher.put_watch] +- **`id` (string)**: The identifier for the watch. +- **`actions` (Optional, Record)**: The list of actions that will be run if the condition matches. +- **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. +- **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. +- **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. +- **`throttle_period` (Optional, string | -1 | 0)**: The minimum time between actions being run. +The default is 5 seconds. +This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. +If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. +- **`throttle_period_in_millis` (Optional, Unit)**: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. +- **`transform` (Optional, { chain, script, search })**: The transform that processes the watch payload to prepare it for the watch actions. +- **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. +- **`active` (Optional, boolean)**: The initial state of the watch. +The default value is `true`, which means the watch is active by default. +- **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term +- **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number +- **`version` (Optional, number)**: Explicit version number for concurrency control + +## client.watcher.queryWatches [_watcher.query_watches] +Query watches. +Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. @@ -14238,22 +15004,20 @@ Note that only the `_id` and `metadata.*` fields are queryable or sortable. client.watcher.queryWatches({ ... }) ``` +### Arguments [_arguments_watcher.query_watches] -### Arguments [_arguments_481] - -* **Request (object):** +#### Request (object) [_request_watcher.query_watches] +- **`from` (Optional, number)**: The offset from the first result to fetch. +It must be non-negative. +- **`size` (Optional, number)**: The number of hits to return. +It must be non-negative. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. +- **`search_after` (Optional, number | number | string | boolean | null[])**: Retrieve the next page of hits using a set of sort values from the previous page. - * **`from` (Optional, number)**: The offset from the first result to fetch. It must be non-negative. - * **`size` (Optional, number)**: The number of hits to return. It must be non-negative. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: Retrieve the next page of hits using a set of sort values from the previous page. - - - -### start [_start_3] - -Start the watch service. Start the Watcher service if it is not already running. +## client.watcher.start [_watcher.start] +Start the watch service. +Start the Watcher service if it is not already running. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start) @@ -14261,18 +15025,15 @@ Start the watch service. Start the Watcher service if it is not already running. client.watcher.start({ ... }) ``` +### Arguments [_arguments_watcher.start] -### Arguments [_arguments_482] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - +#### Request (object) [_request_watcher.start] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -### stats [_stats_7] - -Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. +## client.watcher.stats [_watcher.stats] +Get Watcher statistics. +This API always returns basic metrics. +You retrieve more metrics by using the metric parameter. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats) @@ -14280,19 +15041,15 @@ Get Watcher statistics. This API always returns basic metrics. You retrieve more client.watcher.stats({ ... }) ``` +### Arguments [_arguments_watcher.stats] -### Arguments [_arguments_483] - -* **Request (object):** - - * **`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])**: Defines which additional metrics are included in the response. - * **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. - +#### Request (object) [_request_watcher.stats] +- **`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])**: Defines which additional metrics are included in the response. +- **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. - -### stop [_stop_3] - -Stop the watch service. Stop the Watcher service if it is running. +## client.watcher.stop [_watcher.stop] +Stop the watch service. +Stop the Watcher service if it is running. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop) @@ -14300,18 +15057,18 @@ Stop the watch service. Stop the Watcher service if it is running. client.watcher.stop({ ... }) ``` +### Arguments [_arguments_watcher.stop] -### Arguments [_arguments_484] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +#### Request (object) [_request_watcher.stop] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. - - -### update_settings [_update_settings_2] - -Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. +## client.watcher.updateSettings [_watcher.update_settings] +Update Watcher index settings. +Update settings for the Watcher internal index (`.watches`). +Only a subset of settings can be modified. +This includes `index.auto_expand_replicas` and `index.number_of_replicas`. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings) @@ -14319,24 +15076,19 @@ Update Watcher index settings. Update settings for the Watcher internal index (` client.watcher.updateSettings({ ... }) ``` +### Arguments [_arguments_watcher.update_settings] -### Arguments [_arguments_485] - -* **Request (object):** - - * **`index.auto_expand_replicas` (Optional, string)** - * **`index.number_of_replicas` (Optional, number)** - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## xpack [_xpack] +#### Request (object) [_request_watcher.update_settings] +- **`index.auto_expand_replicas` (Optional, string)** +- **`index.number_of_replicas` (Optional, number)** +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - -### info [_info_5] - -Get information. The information provided by the API includes: +## client.xpack.info [_xpack.info] +Get information. +The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. @@ -14348,20 +15100,19 @@ Get information. The information provided by the API includes: client.xpack.info({ ... }) ``` +### Arguments [_arguments_xpack.info] -### Arguments [_arguments_486] - -* **Request (object):** - - * **`categories` (Optional, Enum("build" | "features" | "license")[])**: A list of the information categories to include in the response. For example, `build,license,features`. - * **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true - * **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. +#### Request (object) [_request_xpack.info] +- **`categories` (Optional, Enum("build" | "features" | "license")[])**: A list of the information categories to include in the response. +For example, `build,license,features`. +- **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true +- **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. +In particular, it adds descriptions and a tag line. - - -### usage [_usage_2] - -Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. +## client.xpack.usage [_xpack.usage] +Get usage information. +Get information about the features that are currently enabled and available under the current license. +The API also provides some usage statistics. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack) @@ -14369,9 +15120,10 @@ Get usage information. Get information about the features that are currently ena client.xpack.usage({ ... }) ``` +### Arguments [_arguments_xpack.usage] -### Arguments [_arguments_487] - -* **Request (object):** +#### Request (object) [_request_xpack.usage] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index b3dd631c1..c20284ef0 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,133 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class AsyncSearch { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'async_search.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'async_search.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'typed_keys', + 'wait_for_completion_timeout' + ] + }, + 'async_search.status': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive' + ] + }, + 'async_search.submit': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'request_cache', + 'routing', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort' + ] + } + } } /** @@ -51,7 +158,10 @@ export default class AsyncSearch { async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +203,10 @@ export default class AsyncSearch { async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +248,10 @@ export default class AsyncSearch { async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,8 +293,12 @@ export default class AsyncSearch { async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['async_search.submit'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -205,8 +325,14 @@ export default class AsyncSearch { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index 7f123c5a2..a7f728dba 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,59 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Autoscaling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'autoscaling.delete_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'autoscaling.get_autoscaling_capacity': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.get_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.put_autoscaling_policy': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +84,10 @@ export default class Autoscaling { async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.delete_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +129,10 @@ export default class Autoscaling { async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_capacity'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -133,7 +172,10 @@ export default class Autoscaling { async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,8 +217,12 @@ export default class Autoscaling { async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['autoscaling.put_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -188,8 +234,14 @@ export default class Autoscaling { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index ccdedfcb2..c2e31ffdd 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,37 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + bulk: { + path: [ + 'index' + ], + body: [ + 'operations' + ], + query: [ + 'include_source_on_error', + 'list_executed_pipelines', + 'pipeline', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'timeout', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } +} /** * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. @@ -45,8 +61,12 @@ export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['operations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.bulk + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +78,14 @@ export default async function BulkApi = { + capabilities: { + path: [], + body: [], + query: [] + } +} /** * Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported @@ -45,7 +42,10 @@ export default async function CapabilitiesApi (this: That, params?: T.TODO, opti export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.capabilities + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index bc397b310..069613851 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,336 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Cat { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cat.aliases': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'expand_wildcards', + 'master_timeout' + ] + }, + 'cat.allocation': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.component_templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.count': { + path: [ + 'index' + ], + body: [], + query: [ + 'h', + 's' + ] + }, + 'cat.fielddata': { + path: [ + 'fields' + ], + body: [], + query: [ + 'bytes', + 'fields', + 'h', + 's' + ] + }, + 'cat.health': { + path: [], + body: [], + query: [ + 'time', + 'ts', + 'h', + 's' + ] + }, + 'cat.help': { + path: [], + body: [], + query: [] + }, + 'cat.indices': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'expand_wildcards', + 'health', + 'include_unloaded_segments', + 'pri', + 'time', + 'master_timeout', + 'h', + 's' + ] + }, + 'cat.master': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.ml_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'from', + 'size', + 'time' + ] + }, + 'cat.nodeattrs': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.nodes': { + path: [], + body: [], + query: [ + 'bytes', + 'full_id', + 'include_unloaded_segments', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.pending_tasks': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout', + 'time' + ] + }, + 'cat.plugins': { + path: [], + body: [], + query: [ + 'h', + 's', + 'include_bootstrap', + 'local', + 'master_timeout' + ] + }, + 'cat.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'bytes', + 'detailed', + 'index', + 'h', + 's', + 'time' + ] + }, + 'cat.repositories': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.shards': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.snapshots': { + path: [ + 'repository' + ], + body: [], + query: [ + 'ignore_unavailable', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.tasks': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'nodes', + 'parent_task_id', + 'h', + 's', + 'time', + 'timeout', + 'wait_for_completion' + ] + }, + 'cat.templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.thread_pool': { + path: [ + 'thread_pool_patterns' + ], + body: [], + query: [ + 'h', + 's', + 'time', + 'local', + 'master_timeout' + ] + }, + 'cat.transforms': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'h', + 's', + 'time', + 'size' + ] + } + } } /** @@ -51,7 +361,10 @@ export default class Cat { async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.aliases'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +414,10 @@ export default class Cat { async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.allocation'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -151,7 +467,10 @@ export default class Cat { async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.component_templates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -201,7 +520,10 @@ export default class Cat { async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.count'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,7 +573,10 @@ export default class Cat { async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields'] + const { + path: acceptedPath + } = this.acceptedParams['cat.fielddata'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -301,7 +626,10 @@ export default class Cat { async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.health'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -341,7 +669,10 @@ export default class Cat { async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.help'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -381,7 +712,10 @@ export default class Cat { async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.indices'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -431,7 +765,10 @@ export default class Cat { async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.master'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -471,7 +808,10 @@ export default class Cat { async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -521,7 +861,10 @@ export default class Cat { async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_datafeeds'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -571,7 +914,10 @@ export default class Cat { async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -621,7 +967,10 @@ export default class Cat { async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_trained_models'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -671,7 +1020,10 @@ export default class Cat { async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.nodeattrs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -711,7 +1063,10 @@ export default class Cat { async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.nodes'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -751,7 +1106,10 @@ export default class Cat { async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.pending_tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -791,7 +1149,10 @@ export default class Cat { async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.plugins'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -831,7 +1192,10 @@ export default class Cat { async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.recovery'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -881,7 +1245,10 @@ export default class Cat { async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.repositories'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -921,7 +1288,10 @@ export default class Cat { async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.segments'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -971,7 +1341,10 @@ export default class Cat { async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.shards'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1021,7 +1394,10 @@ export default class Cat { async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] + const { + path: acceptedPath + } = this.acceptedParams['cat.snapshots'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1071,7 +1447,10 @@ export default class Cat { async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1111,7 +1490,10 @@ export default class Cat { async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.templates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1161,7 +1543,10 @@ export default class Cat { async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['thread_pool_patterns'] + const { + path: acceptedPath + } = this.acceptedParams['cat.thread_pool'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1211,7 +1596,10 @@ export default class Cat { async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.transforms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 29455527c..66849dbb9 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,185 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ccr { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ccr.delete_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow': { + path: [ + 'index' + ], + body: [ + 'data_stream_name', + 'leader_index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'remote_cluster', + 'settings' + ], + query: [ + 'master_timeout', + 'wait_for_active_shards' + ] + }, + 'ccr.follow_info': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ccr.forget_follower': { + path: [ + 'index' + ], + body: [ + 'follower_cluster', + 'follower_index', + 'follower_index_uuid', + 'leader_remote_cluster' + ], + query: [ + 'timeout' + ] + }, + 'ccr.get_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_follow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.put_auto_follow_pattern': { + path: [ + 'name' + ], + body: [ + 'remote_cluster', + 'follow_index_pattern', + 'leader_index_patterns', + 'leader_index_exclusion_patterns', + 'max_outstanding_read_requests', + 'settings', + 'max_outstanding_write_requests', + 'read_poll_timeout', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_follow': { + path: [ + 'index' + ], + body: [ + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ccr.unfollow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +210,10 @@ export default class Ccr { async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.delete_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,8 +255,12 @@ export default class Ccr { async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +282,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -140,7 +312,10 @@ export default class Ccr { async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -182,7 +357,10 @@ export default class Ccr { async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -224,8 +402,12 @@ export default class Ccr { async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.forget_follower'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -247,8 +429,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -271,7 +459,10 @@ export default class Ccr { async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.get_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -321,7 +512,10 @@ export default class Ccr { async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -363,7 +557,10 @@ export default class Ccr { async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -405,8 +602,12 @@ export default class Ccr { async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.put_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -428,8 +629,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -452,7 +659,10 @@ export default class Ccr { async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.resume_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -494,8 +704,12 @@ export default class Ccr { async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.resume_follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -517,8 +731,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -541,7 +761,10 @@ export default class Ccr { async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ccr.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -581,7 +804,10 @@ export default class Ccr { async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.unfollow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 7b7258503..f611b3bf7 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + clear_scroll: { + path: [], + body: [ + 'scroll_id' + ], + query: [] + } +} /** * Clear a scrolling search. Clear the search context and results for a scrolling search. @@ -45,8 +46,12 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.clear_scroll + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +74,14 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 26d5b0e26..027c18182 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + close_point_in_time: { + path: [], + body: [ + 'id' + ], + query: [] + } +} /** * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. @@ -45,8 +46,12 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.close_point_in_time + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +73,14 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 730c942d2..f1e879d6b 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,202 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Cluster { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cluster.allocation_explain': { + path: [], + body: [ + 'current_node', + 'index', + 'primary', + 'shard' + ], + query: [ + 'include_disk_info', + 'include_yes_decisions', + 'master_timeout' + ] + }, + 'cluster.delete_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'cluster.delete_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'master_timeout', + 'wait_for_removal' + ] + }, + 'cluster.exists_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'local' + ] + }, + 'cluster.get_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'cluster.get_settings': { + path: [], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.health': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'level', + 'local', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'wait_for_events', + 'wait_for_nodes', + 'wait_for_no_initializing_shards', + 'wait_for_no_relocating_shards', + 'wait_for_status' + ] + }, + 'cluster.info': { + path: [ + 'target' + ], + body: [], + query: [] + }, + 'cluster.pending_tasks': { + path: [], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'cluster.post_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'node_names', + 'node_ids', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.put_component_template': { + path: [ + 'name' + ], + body: [ + 'template', + 'version', + '_meta', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout' + ] + }, + 'cluster.put_settings': { + path: [], + body: [ + 'persistent', + 'transient' + ], + query: [ + 'flat_settings', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.remote_info': { + path: [], + body: [], + query: [] + }, + 'cluster.reroute': { + path: [], + body: [ + 'commands' + ], + query: [ + 'dry_run', + 'explain', + 'metric', + 'retry_failed', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.state': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'wait_for_metadata_version', + 'wait_for_timeout' + ] + }, + 'cluster.stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'include_remotes', + 'timeout' + ] + } + } } /** @@ -51,8 +227,12 @@ export default class Cluster { async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.allocation_explain'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -75,8 +255,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -96,7 +282,10 @@ export default class Cluster { async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -138,7 +327,10 @@ export default class Cluster { async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_voting_config_exclusions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -178,7 +370,10 @@ export default class Cluster { async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.exists_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -220,7 +415,10 @@ export default class Cluster { async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -270,7 +468,10 @@ export default class Cluster { async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -310,7 +511,10 @@ export default class Cluster { async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.health'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -360,7 +564,10 @@ export default class Cluster { async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['target'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -402,7 +609,10 @@ export default class Cluster { async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.pending_tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -442,7 +652,10 @@ export default class Cluster { async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.post_voting_config_exclusions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -482,8 +695,12 @@ export default class Cluster { async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -505,8 +722,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -529,8 +752,12 @@ export default class Cluster { async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['persistent', 'transient'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -553,8 +780,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -574,7 +807,10 @@ export default class Cluster { async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.remote_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -614,8 +850,12 @@ export default class Cluster { async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['commands'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.reroute'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -638,8 +878,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -659,7 +905,10 @@ export default class Cluster { async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.state'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -713,7 +962,10 @@ export default class Cluster { async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 141aa8002..57e5f5cd1 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,342 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Connector { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'connector.check_in': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.delete': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'delete_sync_jobs', + 'hard' + ] + }, + 'connector.get': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'include_deleted' + ] + }, + 'connector.last_sync': { + path: [ + 'connector_id' + ], + body: [ + 'last_access_control_sync_error', + 'last_access_control_sync_scheduled_at', + 'last_access_control_sync_status', + 'last_deleted_document_count', + 'last_incremental_sync_scheduled_at', + 'last_indexed_document_count', + 'last_seen', + 'last_sync_error', + 'last_sync_scheduled_at', + 'last_sync_status', + 'last_synced', + 'sync_cursor' + ], + query: [] + }, + 'connector.list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'index_name', + 'connector_name', + 'service_type', + 'include_deleted', + 'query' + ] + }, + 'connector.post': { + path: [], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.put': { + path: [ + 'connector_id' + ], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.secret_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_get': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_post': { + path: [], + body: [], + query: [] + }, + 'connector.secret_put': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.sync_job_cancel': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_check_in': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_claim': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'sync_cursor', + 'worker_hostname' + ], + query: [] + }, + 'connector.sync_job_delete': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_error': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.sync_job_get': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'status', + 'connector_id', + 'job_type' + ] + }, + 'connector.sync_job_post': { + path: [], + body: [ + 'id', + 'job_type', + 'trigger_method' + ], + query: [] + }, + 'connector.sync_job_update_stats': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'deleted_document_count', + 'indexed_document_count', + 'indexed_document_volume', + 'last_seen', + 'metadata', + 'total_document_count' + ], + query: [] + }, + 'connector.update_active_filtering': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.update_api_key_id': { + path: [ + 'connector_id' + ], + body: [ + 'api_key_id', + 'api_key_secret_id' + ], + query: [] + }, + 'connector.update_configuration': { + path: [ + 'connector_id' + ], + body: [ + 'configuration', + 'values' + ], + query: [] + }, + 'connector.update_error': { + path: [ + 'connector_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.update_features': { + path: [ + 'connector_id' + ], + body: [ + 'features' + ], + query: [] + }, + 'connector.update_filtering': { + path: [ + 'connector_id' + ], + body: [ + 'filtering', + 'rules', + 'advanced_snippet' + ], + query: [] + }, + 'connector.update_filtering_validation': { + path: [ + 'connector_id' + ], + body: [ + 'validation' + ], + query: [] + }, + 'connector.update_index_name': { + path: [ + 'connector_id' + ], + body: [ + 'index_name' + ], + query: [] + }, + 'connector.update_name': { + path: [ + 'connector_id' + ], + body: [ + 'name', + 'description' + ], + query: [] + }, + 'connector.update_native': { + path: [ + 'connector_id' + ], + body: [ + 'is_native' + ], + query: [] + }, + 'connector.update_pipeline': { + path: [ + 'connector_id' + ], + body: [ + 'pipeline' + ], + query: [] + }, + 'connector.update_scheduling': { + path: [ + 'connector_id' + ], + body: [ + 'scheduling' + ], + query: [] + }, + 'connector.update_service_type': { + path: [ + 'connector_id' + ], + body: [ + 'service_type' + ], + query: [] + }, + 'connector.update_status': { + path: [ + 'connector_id' + ], + body: [ + 'status' + ], + query: [] + } + } } /** @@ -51,7 +367,10 @@ export default class Connector { async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.check_in'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +412,10 @@ export default class Connector { async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +457,10 @@ export default class Connector { async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,8 +502,12 @@ export default class Connector { async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.last_sync'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -200,8 +529,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -224,7 +559,10 @@ export default class Connector { async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -264,8 +602,12 @@ export default class Connector { async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -288,8 +630,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -309,8 +657,12 @@ export default class Connector { async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -333,8 +685,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -363,7 +721,10 @@ export default class Connector { async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -404,7 +765,10 @@ export default class Connector { async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -445,7 +809,10 @@ export default class Connector { async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -483,7 +850,10 @@ export default class Connector { async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -525,7 +895,10 @@ export default class Connector { async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_cancel'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -567,7 +940,10 @@ export default class Connector { async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_check_in'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -609,8 +985,12 @@ export default class Connector { async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_claim'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -632,8 +1012,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -656,7 +1042,10 @@ export default class Connector { async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -698,8 +1087,12 @@ export default class Connector { async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['error'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_error'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -721,8 +1114,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -745,7 +1144,10 @@ export default class Connector { async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -787,7 +1189,10 @@ export default class Connector { async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -827,8 +1232,12 @@ export default class Connector { async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -850,8 +1259,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -871,8 +1286,12 @@ export default class Connector { async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_update_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -894,8 +1313,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -918,7 +1343,10 @@ export default class Connector { async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.update_active_filtering'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -960,8 +1388,12 @@ export default class Connector { async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_api_key_id'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -983,8 +1415,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1007,8 +1445,12 @@ export default class Connector { async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['configuration', 'values'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_configuration'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1030,8 +1472,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1054,8 +1502,12 @@ export default class Connector { async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['error'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_error'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1077,8 +1529,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1101,8 +1559,12 @@ export default class Connector { async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['features'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1124,8 +1586,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1148,8 +1616,12 @@ export default class Connector { async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1171,8 +1643,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1195,8 +1673,12 @@ export default class Connector { async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['validation'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering_validation'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1218,8 +1700,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1242,8 +1730,12 @@ export default class Connector { async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['index_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_index_name'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1265,8 +1757,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1289,8 +1787,12 @@ export default class Connector { async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['name', 'description'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_name'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1312,8 +1814,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1336,8 +1844,12 @@ export default class Connector { async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['is_native'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_native'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1359,8 +1871,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1383,8 +1901,12 @@ export default class Connector { async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1406,8 +1928,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1430,8 +1958,12 @@ export default class Connector { async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['scheduling'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_scheduling'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1453,8 +1985,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1477,8 +2015,12 @@ export default class Connector { async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_service_type'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1500,8 +2042,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1524,8 +2072,12 @@ export default class Connector { async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['status'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1547,8 +2099,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 6e060b369..820cd85dc 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,39 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + count: { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'min_score', + 'preference', + 'routing', + 'terminate_after', + 'q' + ] + } +} /** * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. @@ -45,8 +63,12 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.count + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +91,14 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index c8c663fa3..0e904a233 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,39 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + create: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'require_alias', + 'require_data_stream', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. @@ -45,8 +63,12 @@ export default async function CreateApi (this: That, params export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.create + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +80,14 @@ export default async function CreateApi (this: That, params } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index e8dc5399d..92cab870b 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,46 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class DanglingIndices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'dangling_indices.delete_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.import_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.list_dangling_indices': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +71,10 @@ export default class DanglingIndices { async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.delete_dangling_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +116,10 @@ export default class DanglingIndices { async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.import_dangling_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +161,10 @@ export default class DanglingIndices { async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.list_dangling_indices'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 63b4cf22b..2e0a2c61d 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,30 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'if_primary_term', + 'if_seq_no', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. @@ -45,7 +54,10 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest, op export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.delete + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index f99e09670..c52be429e 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,56 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + delete_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'slice' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'preference', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'q', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. @@ -45,8 +80,12 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'slice'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.delete_by_query + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +107,14 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 4da430635..edd325fa6 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. @@ -45,7 +46,10 @@ export default async function DeleteByQueryRethrottleApi (this: That, params: T. export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.delete_by_query_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index e6519dffd..32f909b8b 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,23 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } +} /** * Delete a script or search template. Deletes a stored script or search template. @@ -45,7 +47,10 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = acceptedParams.delete_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index ea301cac5..1dd3eb590 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,69 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Enrich { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'enrich.delete_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.execute_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'enrich.get_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.put_policy': { + path: [ + 'name' + ], + body: [ + 'geo_match', + 'match', + 'range' + ], + query: [ + 'master_timeout' + ] + }, + 'enrich.stats': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +94,10 @@ export default class Enrich { async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.delete_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +139,10 @@ export default class Enrich { async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.execute_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +184,10 @@ export default class Enrich { async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.get_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,8 +237,12 @@ export default class Enrich { async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['geo_match', 'match', 'range'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['enrich.put_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -208,8 +264,14 @@ export default class Enrich { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -232,7 +294,10 @@ export default class Enrich { async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['enrich.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 9f490aca9..ca8e1c185 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,79 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Eql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'eql.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'eql.get_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.search': { + path: [ + 'index' + ], + body: [ + 'query', + 'case_sensitive', + 'event_category_field', + 'tiebreaker_field', + 'timestamp_field', + 'fetch_size', + 'filter', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'size', + 'fields', + 'result_position', + 'runtime_mappings', + 'max_samples_per_key' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'expand_wildcards', + 'ignore_unavailable', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout' + ] + } + } } /** @@ -51,7 +104,10 @@ export default class Eql { async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +149,10 @@ export default class Eql { async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +194,10 @@ export default class Eql { async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,8 +239,12 @@ export default class Eql { async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'allow_partial_search_results', 'allow_partial_sequence_results', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['eql.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -200,8 +266,14 @@ export default class Eql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index d76ed6962..1e98e6b65 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,90 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Esql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'esql.async_query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'wait_for_completion_timeout' + ], + query: [ + 'allow_partial_results', + 'delimiter', + 'drop_null_columns', + 'format', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'esql.async_query_get': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_stop': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns' + ] + }, + 'esql.query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata' + ], + query: [ + 'format', + 'delimiter', + 'drop_null_columns', + 'allow_partial_results' + ] + } + } } /** @@ -51,8 +115,12 @@ export default class Esql { async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.async_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +142,14 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -95,7 +169,10 @@ export default class Esql { async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,7 +214,10 @@ export default class Esql { async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,7 +259,10 @@ export default class Esql { async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,8 +304,12 @@ export default class Esql { async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -244,8 +331,14 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 0c5f99bde..3c4fad80b 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,32 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. @@ -45,7 +56,10 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest, op export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.exists + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 750302a6f..908e12489 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } +} /** * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. @@ -45,7 +55,10 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.exists_source + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 16150530b..dbf9ae6ce 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,38 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + explain: { + path: [ + 'id', + 'index' + ], + body: [ + 'query' + ], + query: [ + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'lenient', + 'preference', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'q' + ] + } +} /** * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. @@ -45,8 +62,12 @@ export default async function ExplainApi (this: That, param export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.explain + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +89,14 @@ export default async function ExplainApi (this: That, param } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 670d84cda..2c24d0490 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,33 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Features { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'features.get_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'features.reset_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +58,10 @@ export default class Features { async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['features.get_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +101,10 @@ export default class Features { async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['features.reset_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index de9d61a0e..c2cd39b07 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + field_caps: { + path: [ + 'index' + ], + body: [ + 'fields', + 'index_filter', + 'runtime_mappings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'fields', + 'ignore_unavailable', + 'include_unmapped', + 'filters', + 'types', + 'include_empty_fields' + ] + } +} /** * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. @@ -45,8 +59,12 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.field_caps + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +87,14 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 042fcbfd1..030c72e9a 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,159 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Fleet { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'fleet.delete_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.get_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.global_checkpoints': { + path: [ + 'index' + ], + body: [], + query: [ + 'wait_for_advance', + 'wait_for_index', + 'checkpoints', + 'timeout' + ] + }, + 'fleet.msearch': { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + }, + 'fleet.post_secret': { + path: [], + body: [], + query: [] + }, + 'fleet.search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + } + } } /** @@ -50,7 +183,10 @@ export default class Fleet { async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.delete_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +227,10 @@ export default class Fleet { async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.get_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -133,7 +272,10 @@ export default class Fleet { async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.global_checkpoints'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,8 +317,12 @@ export default class Fleet { async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.msearch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -188,8 +334,14 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -218,7 +370,10 @@ export default class Fleet { async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['fleet.post_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -257,8 +412,12 @@ export default class Fleet { async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,8 +439,14 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 3cb82914a..6b8c79f55 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,33 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. @@ -45,7 +57,10 @@ export default async function GetApi (this: That, params: T export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.get + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index d079ba650..82891ae01 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + } +} /** * Get a script or search template. Retrieves a stored script or search template. @@ -45,7 +46,10 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = acceptedParams.get_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index b263ed089..7cd4ea26e 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_context: { + path: [], + body: [], + query: [] + } +} /** * Get script contexts. Get a list of supported script contexts and their methods. @@ -45,7 +42,10 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.get_script_context + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 7b52735c4..748b7550e 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_languages: { + path: [], + body: [], + query: [] + } +} /** * Get script languages. Get a list of available script types, languages, and contexts. @@ -45,7 +42,10 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.get_script_languages + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index a4eef8c97..ada7b0d1b 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,32 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` @@ -45,7 +56,10 @@ export default async function GetSourceApi (this: That, par export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.get_source + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 33534fe4a..7f74a9763 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Graph { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'graph.explore': { + path: [ + 'index' + ], + body: [ + 'connections', + 'controls', + 'query', + 'vertices' + ], + query: [ + 'routing', + 'timeout' + ] + } + } } /** @@ -51,8 +61,12 @@ export default class Graph { async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['graph.explore'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +88,14 @@ export default class Graph { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 51a48a265..9620c003a 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,24 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + health_report: { + path: [ + 'feature' + ], + body: [], + query: [ + 'timeout', + 'verbose', + 'size' + ] + } +} /** * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. @@ -45,7 +48,10 @@ export default async function HealthReportApi (this: That, params?: T.HealthRepo export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['feature'] + const { + path: acceptedPath + } = acceptedParams.health_report + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 1c097071c..a6c11c643 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,120 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ilm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ilm.delete_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.explain_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'only_errors', + 'only_managed', + 'master_timeout' + ] + }, + 'ilm.get_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.get_status': { + path: [], + body: [], + query: [] + }, + 'ilm.migrate_to_data_tiers': { + path: [], + body: [ + 'legacy_template_to_delete', + 'node_attribute' + ], + query: [ + 'dry_run', + 'master_timeout' + ] + }, + 'ilm.move_to_step': { + path: [ + 'index' + ], + body: [ + 'current_step', + 'next_step' + ], + query: [] + }, + 'ilm.put_lifecycle': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.remove_policy': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.retry': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +145,10 @@ export default class Ilm { async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.delete_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +190,10 @@ export default class Ilm { async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.explain_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +235,10 @@ export default class Ilm { async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,7 +288,10 @@ export default class Ilm { async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -225,8 +331,12 @@ export default class Ilm { async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.migrate_to_data_tiers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -249,8 +359,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -270,8 +386,12 @@ export default class Ilm { async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['current_step', 'next_step'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.move_to_step'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -293,8 +413,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -317,8 +443,12 @@ export default class Ilm { async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.put_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -340,8 +470,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -364,7 +500,10 @@ export default class Ilm { async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.remove_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -406,7 +545,10 @@ export default class Ilm { async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.retry'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -448,7 +590,10 @@ export default class Ilm { async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -488,7 +633,10 @@ export default class Ilm { async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/index.ts b/src/api/api/index.ts index bcd3842eb..3293c5e75 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,38 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + index: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'require_alias' + ] + } +} /** * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. @@ -45,8 +62,12 @@ export default async function IndexApi (this: That, params: export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.index + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +79,14 @@ export default async function IndexApi (this: That, params: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 8af3fb23d..b037cfa72 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,844 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Indices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'indices.add_block': { + path: [ + 'index', + 'block' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.analyze': { + path: [ + 'index' + ], + body: [ + 'analyzer', + 'attributes', + 'char_filter', + 'explain', + 'field', + 'filter', + 'normalizer', + 'text', + 'tokenizer' + ], + query: [ + 'index' + ] + }, + 'indices.cancel_migrate_reindex': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'fielddata', + 'fields', + 'ignore_unavailable', + 'query', + 'request' + ] + }, + 'indices.clone': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.close': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create': { + path: [ + 'index' + ], + body: [ + 'aliases', + 'mappings', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.create_from': { + path: [ + 'source', + 'dest' + ], + body: [ + 'create_from' + ], + query: [] + }, + 'indices.data_streams_stats': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards' + ] + }, + 'indices.delete': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_alias': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'expand_wildcards' + ] + }, + 'indices.delete_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.disk_usage': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'run_expensive_tasks' + ] + }, + 'indices.downsample': { + path: [ + 'index', + 'target_index' + ], + body: [ + 'config' + ], + query: [] + }, + 'indices.exists': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + }, + 'indices.exists_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.exists_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout' + ] + }, + 'indices.exists_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.explain_data_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.field_usage_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'fields' + ] + }, + 'indices.flush': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'force', + 'ignore_unavailable', + 'wait_if_ongoing' + ] + }, + 'indices.forcemerge': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'max_num_segments', + 'only_expunge_deletes', + 'wait_for_completion' + ] + }, + 'indices.get': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout', + 'features' + ] + }, + 'indices.get_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle_stats': { + path: [], + body: [], + query: [] + }, + 'indices.get_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout', + 'verbose' + ] + }, + 'indices.get_field_mapping': { + path: [ + 'fields', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'include_defaults' + ] + }, + 'indices.get_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.get_mapping': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout' + ] + }, + 'indices.get_migrate_reindex_status': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_settings': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'indices.get_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.migrate_reindex': { + path: [], + body: [ + 'reindex' + ], + query: [] + }, + 'indices.migrate_to_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.modify_data_stream': { + path: [], + body: [ + 'actions' + ], + query: [] + }, + 'indices.open': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.promote_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.put_alias': { + path: [ + 'index', + 'name' + ], + body: [ + 'filter', + 'index_routing', + 'is_write_index', + 'routing', + 'search_routing' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_lifecycle': { + path: [ + 'name' + ], + body: [ + 'data_retention', + 'downsampling', + 'enabled' + ], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_index_template': { + path: [ + 'name' + ], + body: [ + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'allow_auto_create', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout', + 'cause' + ] + }, + 'indices.put_mapping': { + path: [ + 'index' + ], + body: [ + 'date_detection', + 'dynamic', + 'dynamic_date_formats', + 'dynamic_templates', + '_field_names', + '_meta', + 'numeric_detection', + 'properties', + '_routing', + '_source', + 'runtime' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'write_index_only' + ] + }, + 'indices.put_settings': { + path: [ + 'index' + ], + body: [ + 'settings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'master_timeout', + 'preserve_existing', + 'reopen', + 'timeout' + ] + }, + 'indices.put_template': { + path: [ + 'name' + ], + body: [ + 'aliases', + 'index_patterns', + 'mappings', + 'order', + 'settings', + 'version' + ], + query: [ + 'create', + 'master_timeout', + 'order', + 'cause' + ] + }, + 'indices.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'detailed' + ] + }, + 'indices.refresh': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.reload_search_analyzers': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'resource' + ] + }, + 'indices.resolve_cluster': { + path: [ + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'timeout' + ] + }, + 'indices.resolve_index': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'ignore_unavailable', + 'allow_no_indices' + ] + }, + 'indices.rollover': { + path: [ + 'alias', + 'new_index' + ], + body: [ + 'aliases', + 'conditions', + 'mappings', + 'settings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'lazy' + ] + }, + 'indices.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.shard_stores': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'status' + ] + }, + 'indices.shrink': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.simulate_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.simulate_template': { + path: [ + 'name' + ], + body: [ + 'allow_auto_create', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.split': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.stats': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'completion_fields', + 'expand_wildcards', + 'fielddata_fields', + 'fields', + 'forbid_closed_indices', + 'groups', + 'include_segment_file_sizes', + 'include_unloaded_segments', + 'level' + ] + }, + 'indices.update_aliases': { + path: [], + body: [ + 'actions' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.validate_query': { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'all_shards', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'explain', + 'ignore_unavailable', + 'lenient', + 'rewrite', + 'q' + ] + } + } } /** @@ -51,7 +869,10 @@ export default class Indices { async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'block'] + const { + path: acceptedPath + } = this.acceptedParams['indices.add_block'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -94,8 +915,12 @@ export default class Indices { async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.analyze'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -118,8 +943,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -149,7 +980,10 @@ export default class Indices { async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.cancel_migrate_reindex'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,7 +1025,10 @@ export default class Indices { async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.clear_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -241,8 +1078,12 @@ export default class Indices { async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.clone'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -264,8 +1105,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -289,7 +1136,10 @@ export default class Indices { async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.close'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -331,8 +1181,12 @@ export default class Indices { async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -354,8 +1208,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -378,7 +1238,10 @@ export default class Indices { async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.create_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -420,8 +1283,12 @@ export default class Indices { async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['source', 'dest'] - const acceptedBody: string[] = ['create_from'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create_from'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -433,8 +1300,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -458,7 +1331,10 @@ export default class Indices { async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.data_streams_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -508,7 +1384,10 @@ export default class Indices { async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -550,7 +1429,10 @@ export default class Indices { async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -600,7 +1482,10 @@ export default class Indices { async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -642,7 +1527,10 @@ export default class Indices { async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -684,7 +1572,10 @@ export default class Indices { async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -726,7 +1617,10 @@ export default class Indices { async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -768,7 +1662,10 @@ export default class Indices { async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.disk_usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -810,8 +1707,12 @@ export default class Indices { async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target_index'] - const acceptedBody: string[] = ['config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.downsample'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -823,8 +1724,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -848,7 +1755,10 @@ export default class Indices { async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -890,7 +1800,10 @@ export default class Indices { async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -940,7 +1853,10 @@ export default class Indices { async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -982,7 +1898,10 @@ export default class Indices { async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1024,7 +1943,10 @@ export default class Indices { async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.explain_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1066,7 +1988,10 @@ export default class Indices { async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.field_usage_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1108,7 +2033,10 @@ export default class Indices { async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.flush'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1158,7 +2086,10 @@ export default class Indices { async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.forcemerge'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1208,7 +2139,10 @@ export default class Indices { async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1250,7 +2184,10 @@ export default class Indices { async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1307,7 +2244,10 @@ export default class Indices { async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1349,7 +2289,10 @@ export default class Indices { async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1389,7 +2332,10 @@ export default class Indices { async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1439,7 +2385,10 @@ export default class Indices { async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_field_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1489,7 +2438,10 @@ export default class Indices { async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1539,7 +2491,10 @@ export default class Indices { async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1589,7 +2544,10 @@ export default class Indices { async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_migrate_reindex_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1631,7 +2589,10 @@ export default class Indices { async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1688,7 +2649,10 @@ export default class Indices { async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1738,8 +2702,12 @@ export default class Indices { async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['reindex'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.migrate_reindex'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1751,8 +2719,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1766,13 +2740,16 @@ export default class Indices { /** * Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream | Elasticsearch API documentation} */ async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.migrate_to_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1808,14 +2785,18 @@ export default class Indices { /** * Update data streams. Performs one or more data stream modification actions in a single atomic operation. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream | Elasticsearch API documentation} */ async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.modify_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1837,8 +2818,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1858,7 +2845,10 @@ export default class Indices { async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.open'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1894,13 +2884,16 @@ export default class Indices { /** * Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream | Elasticsearch API documentation} */ async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.promote_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1936,14 +2929,18 @@ export default class Indices { /** * Create or update an alias. Adds a data stream or index to an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias | Elasticsearch API documentation} */ async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] - const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1965,8 +2962,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1991,14 +2994,18 @@ export default class Indices { /** * Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle | Elasticsearch API documentation} */ async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['data_retention', 'downsampling', 'enabled'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2020,8 +3027,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2038,14 +3051,18 @@ export default class Indices { /** * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template | Elasticsearch API documentation} */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2067,8 +3084,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2091,8 +3114,12 @@ export default class Indices { async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2114,8 +3141,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2138,8 +3171,12 @@ export default class Indices { async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2151,8 +3188,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2182,8 +3225,12 @@ export default class Indices { async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2205,8 +3252,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2229,7 +3282,10 @@ export default class Indices { async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.recovery'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2279,7 +3335,10 @@ export default class Indices { async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.refresh'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2329,7 +3388,10 @@ export default class Indices { async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.reload_search_analyzers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2371,7 +3433,10 @@ export default class Indices { async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_cluster'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2421,7 +3486,10 @@ export default class Indices { async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2463,8 +3531,12 @@ export default class Indices { async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['alias', 'new_index'] - const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.rollover'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2486,8 +3558,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2518,7 +3596,10 @@ export default class Indices { async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.segments'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2568,7 +3649,10 @@ export default class Indices { async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.shard_stores'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2618,8 +3702,12 @@ export default class Indices { async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.shrink'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2641,8 +3729,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2666,7 +3760,10 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.simulate_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2708,8 +3805,12 @@ export default class Indices { async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.simulate_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2732,8 +3833,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2763,8 +3870,12 @@ export default class Indices { async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.split'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2786,8 +3897,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2811,7 +3928,10 @@ export default class Indices { async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2862,14 +3982,18 @@ export default class Indices { /** * Create or update an alias. Adds a data stream or index to an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases | Elasticsearch API documentation} */ async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.update_aliases'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2892,8 +4016,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2907,14 +4037,18 @@ export default class Indices { /** * Validate a query. Validates a query without running it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query | Elasticsearch API documentation} */ async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.validate_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2937,8 +4071,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index b7c9fb55a..09429c394 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,293 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Inference { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'inference.chat_completion_unified': { + path: [ + 'inference_id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'inference.completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.delete': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [ + 'dry_run', + 'force' + ] + }, + 'inference.get': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [] + }, + 'inference.post_eis_chat_completion': { + path: [ + 'eis_inference_id' + ], + body: [], + query: [] + }, + 'inference.put': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'inference_config' + ], + query: [] + }, + 'inference.put_eis': { + path: [ + 'task_type', + 'eis_inference_id' + ], + body: [ + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_mistral': { + path: [ + 'task_type', + 'mistral_inference_id' + ], + body: [], + query: [] + }, + 'inference.put_openai': { + path: [ + 'task_type', + 'openai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_voyageai': { + path: [ + 'task_type', + 'voyageai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_watsonx': { + path: [ + 'task_type', + 'watsonx_inference_id' + ], + body: [ + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.rerank': { + path: [ + 'inference_id' + ], + body: [ + 'query', + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.sparse_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.stream_completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [] + }, + 'inference.text_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.update': { + path: [ + 'inference_id', + 'task_type' + ], + body: [ + 'inference_config' + ], + query: [] + } + } + } + + /** + * Perform chat completion inference + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} + */ + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['inference.chat_completion_unified'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_inference/chat_completion/${encodeURIComponent(params.inference_id.toString())}/_stream` + const meta: TransportRequestMetadata = { + name: 'inference.chat_completion_unified', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform completion inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.completion', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** @@ -51,7 +318,10 @@ export default class Inference { async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] + const { + path: acceptedPath + } = this.acceptedParams['inference.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +371,10 @@ export default class Inference { async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] + const { + path: acceptedPath + } = this.acceptedParams['inference.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -148,15 +421,17 @@ export default class Inference { } /** - * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + * Perform a chat completion task through the Elastic Inference Service (EIS). Perform a chat completion inference task with the `elastic` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion | Elasticsearch API documentation} */ - async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise - async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['query', 'input', 'task_settings'] + async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise + async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['inference.post_eis_chat_completion'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,11 +446,7 @@ export default class Inference { } for (const key in params) { - if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error @@ -183,20 +454,12 @@ export default class Inference { } } - let method = '' - let path = '' - if (params.task_type != null && params.inference_id != null) { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` - } else { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` - } + const method = 'POST' + const path = `/_inference/chat_completion/${encodeURIComponent(params.eis_inference_id.toString())}/_stream` const meta: TransportRequestMetadata = { - name: 'inference.inference', + name: 'inference.post_eis_chat_completion', pathParts: { - task_type: params.task_type, - inference_id: params.inference_id + eis_inference_id: params.eis_inference_id } } return await this.transport.request({ path, method, querystring, body, meta }, options) @@ -210,8 +473,12 @@ export default class Inference { async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -223,8 +490,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -248,15 +521,19 @@ export default class Inference { } /** - * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference | Elasticsearch API documentation} + * Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS). + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-eis.html | Elasticsearch API documentation} */ - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id', 'task_type'] - const acceptedBody: string[] = ['input'] + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_eis'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -278,40 +555,89 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } - let method = '' - let path = '' - if (params.task_type != null && params.inference_id != null) { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_stream` - } else { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_stream` + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_eis', + pathParts: { + task_type: params.task_type, + eis_inference_id: params.eis_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Configure a Mistral inference endpoint + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-mistral.html | Elasticsearch API documentation} + */ + async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['inference.put_mistral'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.mistral_inference_id.toString())}` const meta: TransportRequestMetadata = { - name: 'inference.stream_inference', + name: 'inference.put_mistral', pathParts: { - inference_id: params.inference_id, - task_type: params.task_type + task_type: params.task_type, + mistral_inference_id: params.mistral_inference_id } } return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Perform inference on the service using the Unified Schema - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unified-inference-api.html | Elasticsearch API documentation} + * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-openai.html | Elasticsearch API documentation} */ - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['messages', 'model', 'max_completion_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'] + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_openai'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -333,24 +659,367 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.openai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_openai', + pathParts: { + task_type: params.task_type, + openai_inference_id: params.openai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a VoyageAI inference endpoint. Create an inference endpoint to perform an inference task with the `voyageai` service. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai | Elasticsearch API documentation} + */ + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_voyageai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } - let method = '' - let path = '' - if (params.task_type != null && params.inference_id != null) { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_unified` - } else { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_unified` + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.voyageai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_voyageai', + pathParts: { + task_type: params.task_type, + voyageai_inference_id: params.voyageai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx | Elasticsearch API documentation} + */ + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_watsonx'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.watsonx_inference_id.toString())}` const meta: TransportRequestMetadata = { - name: 'inference.unified_inference', + name: 'inference.put_watsonx', pathParts: { task_type: params.task_type, + watsonx_inference_id: params.watsonx_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform rereanking inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.rerank'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/rerank/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.rerank', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform sparse embedding inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.sparse_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/sparse_embedding/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.sparse_embedding', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference | Elasticsearch API documentation} + */ + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.stream_completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}/_stream` + const meta: TransportRequestMetadata = { + name: 'inference.stream_completion', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform text embedding inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.text_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/text_embedding/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.text_embedding', + pathParts: { inference_id: params.inference_id } } @@ -365,8 +1034,12 @@ export default class Inference { async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id', 'task_type'] - const acceptedBody: string[] = ['inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.update'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -378,18 +1051,24 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } let method = '' let path = '' if (params.task_type != null && params.inference_id != null) { - method = 'POST' + method = 'PUT' path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_update` } else { - method = 'POST' + method = 'PUT' path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_update` } const meta: TransportRequestMetadata = { diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 1681fe6f3..536fabb6c 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + info: { + path: [], + body: [], + query: [] + } +} /** * Get cluster info. Get basic build, version, and cluster information. @@ -45,7 +42,10 @@ export default async function InfoApi (this: That, params?: T.InfoRequest, optio export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.info + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 51ad39aff..840a4d376 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,142 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ingest { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ingest.delete_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.geo_ip_stats': { + path: [], + body: [], + query: [] + }, + 'ingest.get_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'ingest.get_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ingest.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'summary' + ] + }, + 'ingest.processor_grok': { + path: [], + body: [], + query: [] + }, + 'ingest.put_geoip_database': { + path: [ + 'id' + ], + body: [ + 'name', + 'maxmind' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_ip_location_database': { + path: [ + 'id' + ], + body: [ + 'configuration' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_pipeline': { + path: [ + 'id' + ], + body: [ + '_meta', + 'description', + 'on_failure', + 'processors', + 'version', + 'deprecated' + ], + query: [ + 'master_timeout', + 'timeout', + 'if_version' + ] + }, + 'ingest.simulate': { + path: [ + 'id' + ], + body: [ + 'docs', + 'pipeline' + ], + query: [ + 'verbose' + ] + } + } } /** @@ -51,7 +167,10 @@ export default class Ingest { async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +212,10 @@ export default class Ingest { async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +257,10 @@ export default class Ingest { async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,7 +302,10 @@ export default class Ingest { async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ingest.geo_ip_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -217,7 +345,10 @@ export default class Ingest { async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -267,7 +398,10 @@ export default class Ingest { async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -317,7 +451,10 @@ export default class Ingest { async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -367,7 +504,10 @@ export default class Ingest { async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ingest.processor_grok'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -407,8 +547,12 @@ export default class Ingest { async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['name', 'maxmind'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -430,8 +574,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -454,8 +604,12 @@ export default class Ingest { async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['configuration'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -467,8 +621,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -491,8 +651,12 @@ export default class Ingest { async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -514,8 +678,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -538,8 +708,12 @@ export default class Ingest { async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['docs', 'pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.simulate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -561,8 +735,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index d1a319461..650a1d6a7 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + knn_search: { + path: [ + 'index' + ], + body: [ + '_source', + 'docvalue_fields', + 'stored_fields', + 'fields', + 'filter', + 'knn' + ], + query: [ + 'routing' + ] + } +} /** * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. @@ -45,8 +55,12 @@ export default async function KnnSearchApi (this: That, par export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.knn_search + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +82,14 @@ export default async function KnnSearchApi (this: That, par } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index b80733dd9..8a1fb00d1 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,77 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class License { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'license.delete': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'license.get': { + path: [], + body: [], + query: [ + 'accept_enterprise', + 'local' + ] + }, + 'license.get_basic_status': { + path: [], + body: [], + query: [] + }, + 'license.get_trial_status': { + path: [], + body: [], + query: [] + }, + 'license.post': { + path: [], + body: [ + 'license', + 'licenses' + ], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_basic': { + path: [], + body: [], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_trial': { + path: [], + body: [], + query: [ + 'acknowledge', + 'type_query_string', + 'master_timeout' + ] + } + } } /** @@ -51,7 +102,10 @@ export default class License { async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +145,10 @@ export default class License { async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -131,7 +188,10 @@ export default class License { async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get_basic_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,7 +231,10 @@ export default class License { async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get_trial_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -211,8 +274,12 @@ export default class License { async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['license', 'licenses'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['license.post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,8 +302,14 @@ export default class License { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -256,7 +329,10 @@ export default class License { async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_basic'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -296,7 +372,10 @@ export default class License { async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_trial'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index df33e03ac..f3909331b 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,44 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Logstash { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'logstash.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.put_pipeline': { + path: [ + 'id' + ], + body: [ + 'pipeline' + ], + query: [] + } + } } /** @@ -51,7 +69,10 @@ export default class Logstash { async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['logstash.delete_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +114,10 @@ export default class Logstash { async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['logstash.get_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,8 +167,12 @@ export default class Logstash { async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['logstash.put_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -156,8 +184,14 @@ export default class Logstash { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index c254d5fd8..c09cdecaf 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + mget: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields' + ] + } +} /** * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. @@ -45,8 +59,12 @@ export default async function MgetApi (this: That, params?: export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mget + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +87,14 @@ export default async function MgetApi (this: That, params?: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 5ddf19b7d..6c1cbb7bf 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Migration { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'migration.deprecations': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'migration.get_feature_upgrade_status': { + path: [], + body: [], + query: [] + }, + 'migration.post_feature_upgrade': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +61,10 @@ export default class Migration { async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['migration.deprecations'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +114,10 @@ export default class Migration { async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['migration.get_feature_upgrade_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -141,7 +157,10 @@ export default class Migration { async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['migration.post_feature_upgrade'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 282fc38a5..c25f8763e 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,957 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ml { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ml.clear_trained_model_deployment_cache': { + path: [ + 'model_id' + ], + body: [], + query: [] + }, + 'ml.close_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.delete_calendar': { + path: [ + 'calendar_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_event': { + path: [ + 'calendar_id', + 'event_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.delete_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_datafeed': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'force' + ] + }, + 'ml.delete_expired_data': { + path: [ + 'job_id' + ], + body: [ + 'requests_per_second', + 'timeout' + ], + query: [ + 'requests_per_second', + 'timeout' + ] + }, + 'ml.delete_filter': { + path: [ + 'filter_id' + ], + body: [], + query: [] + }, + 'ml.delete_forecast': { + path: [ + 'job_id', + 'forecast_id' + ], + body: [], + query: [ + 'allow_no_forecasts', + 'timeout' + ] + }, + 'ml.delete_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'force', + 'delete_user_annotations', + 'wait_for_completion' + ] + }, + 'ml.delete_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [] + }, + 'ml.delete_trained_model': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [] + }, + 'ml.estimate_model_memory': { + path: [], + body: [ + 'analysis_config', + 'max_bucket_cardinality', + 'overall_cardinality' + ], + query: [] + }, + 'ml.evaluate_data_frame': { + path: [], + body: [ + 'evaluation', + 'index', + 'query' + ], + query: [] + }, + 'ml.explain_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'source', + 'dest', + 'analysis', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'analyzed_fields', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.flush_job': { + path: [ + 'job_id' + ], + body: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ], + query: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ] + }, + 'ml.forecast': { + path: [ + 'job_id' + ], + body: [ + 'duration', + 'expires_in', + 'max_model_memory' + ], + query: [ + 'duration', + 'expires_in', + 'max_model_memory' + ] + }, + 'ml.get_buckets': { + path: [ + 'job_id', + 'timestamp' + ], + body: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'page', + 'sort', + 'start' + ], + query: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_calendar_events': { + path: [ + 'calendar_id' + ], + body: [], + query: [ + 'end', + 'from', + 'job_id', + 'size', + 'start' + ] + }, + 'ml.get_calendars': { + path: [ + 'calendar_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_categories': { + path: [ + 'job_id', + 'category_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'partition_field_value', + 'size' + ] + }, + 'ml.get_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'ml.get_data_frame_analytics_stats': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'verbose' + ] + }, + 'ml.get_datafeed_stats': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_filters': { + path: [ + 'filter_id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_influencers': { + path: [ + 'job_id' + ], + body: [ + 'page' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'influencer_score', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_job_stats': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_memory_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ml.get_model_snapshot_upgrade_stats': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_model_snapshots': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'desc', + 'end', + 'page', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_overall_buckets': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ], + query: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ] + }, + 'ml.get_records': { + path: [ + 'job_id' + ], + body: [ + 'desc', + 'end', + 'exclude_interim', + 'page', + 'record_score', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'from', + 'record_score', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'decompress_definition', + 'exclude_generated', + 'from', + 'include', + 'size', + 'tags' + ] + }, + 'ml.get_trained_models_stats': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size' + ] + }, + 'ml.infer_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'docs', + 'inference_config' + ], + query: [ + 'timeout' + ] + }, + 'ml.info': { + path: [], + body: [], + query: [] + }, + 'ml.open_job': { + path: [ + 'job_id' + ], + body: [ + 'timeout' + ], + query: [ + 'timeout' + ] + }, + 'ml.post_calendar_events': { + path: [ + 'calendar_id' + ], + body: [ + 'events' + ], + query: [] + }, + 'ml.post_data': { + path: [ + 'job_id' + ], + body: [ + 'data' + ], + query: [ + 'reset_end', + 'reset_start' + ] + }, + 'ml.preview_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'config' + ], + query: [] + }, + 'ml.preview_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'datafeed_config', + 'job_config' + ], + query: [ + 'start', + 'end' + ] + }, + 'ml.put_calendar': { + path: [ + 'calendar_id' + ], + body: [ + 'job_ids', + 'description' + ], + query: [] + }, + 'ml.put_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.put_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'allow_lazy_start', + 'analysis', + 'analyzed_fields', + 'description', + 'dest', + 'max_num_threads', + '_meta', + 'model_memory_limit', + 'source', + 'headers', + 'version' + ], + query: [] + }, + 'ml.put_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'aggs', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'headers' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_filter': { + path: [ + 'filter_id' + ], + body: [ + 'description', + 'items' + ], + query: [] + }, + 'ml.put_job': { + path: [], + body: [ + 'allow_lazy_open', + 'analysis_config', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'daily_model_snapshot_retention_after_days', + 'data_description', + 'datafeed_config', + 'description', + 'job_id', + 'groups', + 'model_plot_config', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_index_name', + 'results_retention_days' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'compressed_definition', + 'definition', + 'description', + 'inference_config', + 'input', + 'metadata', + 'model_type', + 'model_size_bytes', + 'platform_architecture', + 'tags', + 'prefix_strings' + ], + query: [ + 'defer_definition_decompression', + 'wait_for_completion' + ] + }, + 'ml.put_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [ + 'reassign' + ] + }, + 'ml.put_trained_model_definition_part': { + path: [ + 'model_id', + 'part' + ], + body: [ + 'definition', + 'total_definition_length', + 'total_parts' + ], + query: [] + }, + 'ml.put_trained_model_vocabulary': { + path: [ + 'model_id' + ], + body: [ + 'vocabulary', + 'merges', + 'scores' + ], + query: [] + }, + 'ml.reset_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'delete_user_annotations' + ] + }, + 'ml.revert_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'delete_intervening_results' + ], + query: [ + 'delete_intervening_results' + ] + }, + 'ml.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, + 'ml.start_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ml.start_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'end', + 'start', + 'timeout' + ], + query: [ + 'end', + 'start', + 'timeout' + ] + }, + 'ml.start_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'adaptive_allocations' + ], + query: [ + 'cache_size', + 'deployment_id', + 'number_of_allocations', + 'priority', + 'queue_capacity', + 'threads_per_allocation', + 'timeout', + 'wait_for' + ] + }, + 'ml.stop_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force' + ] + }, + 'ml.update_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'description', + 'model_memory_limit', + 'max_num_threads', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.update_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.update_filter': { + path: [ + 'filter_id' + ], + body: [ + 'add_items', + 'description', + 'remove_items' + ], + query: [] + }, + 'ml.update_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_lazy_open', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'categorization_filters', + 'description', + 'model_plot_config', + 'model_prune_window', + 'daily_model_snapshot_retention_after_days', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_retention_days', + 'groups', + 'detectors', + 'per_partition_categorization' + ], + query: [] + }, + 'ml.update_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'description', + 'retain' + ], + query: [] + }, + 'ml.update_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'number_of_allocations', + 'adaptive_allocations' + ], + query: [ + 'number_of_allocations' + ] + }, + 'ml.upgrade_job_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'timeout' + ] + }, + 'ml.validate': { + path: [], + body: [ + 'job_id', + 'analysis_config', + 'analysis_limits', + 'data_description', + 'description', + 'model_plot', + 'model_snapshot_id', + 'model_snapshot_retention_days', + 'results_index_name' + ], + query: [] + }, + 'ml.validate_detector': { + path: [], + body: [ + 'detector' + ], + query: [] + } + } } /** @@ -51,7 +982,10 @@ export default class Ml { async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.clear_trained_model_deployment_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,8 +1027,12 @@ export default class Ml { async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.close_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +1054,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -140,7 +1084,10 @@ export default class Ml { async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -182,7 +1129,10 @@ export default class Ml { async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'event_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_event'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -225,7 +1175,10 @@ export default class Ml { async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -268,7 +1221,10 @@ export default class Ml { async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -310,7 +1266,10 @@ export default class Ml { async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -352,8 +1311,12 @@ export default class Ml { async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['requests_per_second', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.delete_expired_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -376,8 +1339,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -407,7 +1376,10 @@ export default class Ml { async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -449,7 +1421,10 @@ export default class Ml { async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'forecast_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_forecast'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -499,7 +1474,10 @@ export default class Ml { async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -541,7 +1519,10 @@ export default class Ml { async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -584,7 +1565,10 @@ export default class Ml { async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -626,7 +1610,10 @@ export default class Ml { async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -669,8 +1656,12 @@ export default class Ml { async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.estimate_model_memory'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -693,8 +1684,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -714,8 +1711,12 @@ export default class Ml { async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['evaluation', 'index', 'query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.evaluate_data_frame'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -737,8 +1738,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -758,8 +1765,12 @@ export default class Ml { async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.explain_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -782,8 +1793,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -813,8 +1830,12 @@ export default class Ml { async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.flush_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -836,8 +1857,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -860,8 +1887,12 @@ export default class Ml { async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.forecast'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -883,8 +1914,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -907,8 +1944,12 @@ export default class Ml { async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_buckets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -930,8 +1971,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -962,7 +2009,10 @@ export default class Ml { async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_calendar_events'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1004,8 +2054,12 @@ export default class Ml { async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_calendars'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1028,8 +2082,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1059,8 +2119,12 @@ export default class Ml { async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'category_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_categories'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1082,8 +2146,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1114,7 +2184,10 @@ export default class Ml { async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1164,7 +2237,10 @@ export default class Ml { async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1214,7 +2290,10 @@ export default class Ml { async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeed_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1264,7 +2343,10 @@ export default class Ml { async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeeds'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1314,7 +2396,10 @@ export default class Ml { async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_filters'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1364,8 +2449,12 @@ export default class Ml { async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_influencers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1387,8 +2476,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1411,7 +2506,10 @@ export default class Ml { async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_job_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1461,7 +2559,10 @@ export default class Ml { async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1511,7 +2612,10 @@ export default class Ml { async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_memory_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1561,7 +2665,10 @@ export default class Ml { async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_model_snapshot_upgrade_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1604,8 +2711,12 @@ export default class Ml { async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_model_snapshots'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1627,8 +2738,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1659,8 +2776,12 @@ export default class Ml { async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_overall_buckets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1682,8 +2803,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1706,8 +2833,12 @@ export default class Ml { async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_records'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1729,8 +2860,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1753,7 +2890,10 @@ export default class Ml { async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1803,7 +2943,10 @@ export default class Ml { async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1853,8 +2996,12 @@ export default class Ml { async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['docs', 'inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.infer_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1876,8 +3023,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1900,7 +3053,10 @@ export default class Ml { async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ml.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1940,8 +3096,12 @@ export default class Ml { async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.open_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1963,8 +3123,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1987,8 +3153,12 @@ export default class Ml { async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['events'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_calendar_events'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2010,8 +3180,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2034,8 +3210,12 @@ export default class Ml { async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['data'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2047,8 +3227,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2071,8 +3257,12 @@ export default class Ml { async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2095,8 +3285,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2126,8 +3322,12 @@ export default class Ml { async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['datafeed_config', 'job_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2150,8 +3350,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2181,8 +3387,12 @@ export default class Ml { async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['job_ids', 'description'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_calendar'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2204,8 +3414,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2228,7 +3444,10 @@ export default class Ml { async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.put_calendar_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2271,8 +3490,12 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2294,8 +3517,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2318,8 +3547,12 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2341,8 +3574,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2365,8 +3604,12 @@ export default class Ml { async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['description', 'items'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2388,8 +3631,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2412,8 +3661,12 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2435,8 +3688,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2459,8 +3718,12 @@ export default class Ml { async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2482,8 +3745,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2506,7 +3775,10 @@ export default class Ml { async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.put_trained_model_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2549,8 +3821,12 @@ export default class Ml { async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id', 'part'] - const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_definition_part'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2572,8 +3848,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2597,8 +3879,12 @@ export default class Ml { async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_vocabulary'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2620,8 +3906,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2644,7 +3936,10 @@ export default class Ml { async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.reset_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2686,8 +3981,12 @@ export default class Ml { async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['delete_intervening_results'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.revert_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2709,8 +4008,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2734,7 +4039,10 @@ export default class Ml { async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ml.set_upgrade_mode'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2774,7 +4082,10 @@ export default class Ml { async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.start_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2816,8 +4127,12 @@ export default class Ml { async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['end', 'start', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2839,8 +4154,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2863,8 +4184,12 @@ export default class Ml { async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['adaptive_allocations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2886,8 +4211,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2910,7 +4241,10 @@ export default class Ml { async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2952,8 +4286,12 @@ export default class Ml { async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.stop_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2975,8 +4313,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2999,7 +4343,10 @@ export default class Ml { async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3041,8 +4388,12 @@ export default class Ml { async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3064,8 +4415,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3088,8 +4445,12 @@ export default class Ml { async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3111,8 +4472,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3135,8 +4502,12 @@ export default class Ml { async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3158,8 +4529,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3182,8 +4559,12 @@ export default class Ml { async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3205,8 +4586,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3229,8 +4616,12 @@ export default class Ml { async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['description', 'retain'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3252,8 +4643,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3277,8 +4674,12 @@ export default class Ml { async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['number_of_allocations', 'adaptive_allocations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3300,8 +4701,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3324,7 +4731,10 @@ export default class Ml { async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.upgrade_job_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3367,8 +4777,12 @@ export default class Ml { async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3391,8 +4805,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3412,8 +4832,12 @@ export default class Ml { async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['detector'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate_detector'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3425,8 +4849,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 053fea53a..8974e0c87 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,34 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Monitoring { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'monitoring.bulk': { + path: [ + 'type' + ], + body: [ + 'operations' + ], + query: [ + 'system_id', + 'system_api_version', + 'interval' + ] + } + } } /** @@ -51,8 +59,12 @@ export default class Monitoring { async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['type'] - const acceptedBody: string[] = ['operations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['monitoring.bulk'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -64,8 +76,14 @@ export default class Monitoring { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 573c4f385..59a71420b 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,38 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + msearch: { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'rest_total_hits_as_int', + 'routing', + 'search_type', + 'typed_keys' + ] + } +} /** * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. @@ -45,8 +62,12 @@ export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +79,14 @@ export default async function MsearchApi = { + msearch_template: { + path: [ + 'index' + ], + body: [ + 'search_templates' + ], + query: [ + 'ccs_minimize_roundtrips', + 'max_concurrent_searches', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` @@ -45,8 +54,12 @@ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['search_templates'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +71,14 @@ export default async function MsearchTemplateApi = { + mtermvectors: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'ids', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. @@ -45,8 +62,12 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mtermvectors + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +90,14 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 1ce489ae0..dbeef1f8a 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,102 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Nodes { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'nodes.clear_repositories_metering_archive': { + path: [ + 'node_id', + 'max_archive_version' + ], + body: [], + query: [] + }, + 'nodes.get_repositories_metering_info': { + path: [ + 'node_id' + ], + body: [], + query: [] + }, + 'nodes.hot_threads': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'ignore_idle_threads', + 'interval', + 'snapshots', + 'threads', + 'timeout', + 'type', + 'sort' + ] + }, + 'nodes.info': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'flat_settings', + 'timeout' + ] + }, + 'nodes.reload_secure_settings': { + path: [ + 'node_id' + ], + body: [ + 'secure_settings_password' + ], + query: [ + 'timeout' + ] + }, + 'nodes.stats': { + path: [ + 'node_id', + 'metric', + 'index_metric' + ], + body: [], + query: [ + 'completion_fields', + 'fielddata_fields', + 'fields', + 'groups', + 'include_segment_file_sizes', + 'level', + 'timeout', + 'types', + 'include_unloaded_segments' + ] + }, + 'nodes.usage': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'timeout' + ] + } + } } /** @@ -51,7 +127,10 @@ export default class Nodes { async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'max_archive_version'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.clear_repositories_metering_archive'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -94,7 +173,10 @@ export default class Nodes { async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.get_repositories_metering_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -136,7 +218,10 @@ export default class Nodes { async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.hot_threads'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -186,7 +271,10 @@ export default class Nodes { async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -243,8 +331,12 @@ export default class Nodes { async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['secure_settings_password'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['nodes.reload_secure_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -267,8 +359,14 @@ export default class Nodes { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -298,7 +396,10 @@ export default class Nodes { async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -362,7 +463,10 @@ export default class Nodes { async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 4cd2a733e..609ab0c92 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + open_point_in_time: { + path: [ + 'index' + ], + body: [ + 'index_filter' + ], + query: [ + 'keep_alive', + 'ignore_unavailable', + 'preference', + 'routing', + 'expand_wildcards', + 'allow_partial_search_results' + ] + } +} /** * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. @@ -45,8 +55,12 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['index_filter'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.open_point_in_time + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +82,14 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 908709afd..477bca682 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,18 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + ping: { + path: [], + body: [], + query: [] + } +} /** * Ping the cluster. Get information about whether the cluster is running. @@ -45,7 +42,10 @@ export default async function PingApi (this: That, params?: T.PingRequest, optio export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.ping + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index 75f2d46cc..04a1028ab 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,39 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Profiling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'profiling.flamegraph': { + path: [], + body: [], + query: [] + }, + 'profiling.stacktraces': { + path: [], + body: [], + query: [] + }, + 'profiling.status': { + path: [], + body: [], + query: [] + }, + 'profiling.topn_functions': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +64,10 @@ export default class Profiling { async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.flamegraph'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -90,7 +106,10 @@ export default class Profiling { async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.stacktraces'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,7 +148,10 @@ export default class Profiling { async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -168,7 +190,10 @@ export default class Profiling { async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.topn_functions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index d3350ca5b..6762be248 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,29 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + put_script: { + path: [ + 'id', + 'context' + ], + body: [ + 'script' + ], + query: [ + 'context', + 'master_timeout', + 'timeout' + ] + } +} /** * Create or update a script or search template. Creates or updates a stored script or search template. @@ -45,8 +53,12 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'context'] - const acceptedBody: string[] = ['script'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.put_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +80,14 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index bb7a964ee..ba218714f 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,90 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class QueryRules { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'query_rules.delete_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.delete_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.get_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.get_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.list_rulesets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'query_rules.put_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [ + 'type', + 'criteria', + 'actions', + 'priority' + ], + query: [] + }, + 'query_rules.put_ruleset': { + path: [ + 'ruleset_id' + ], + body: [ + 'rules' + ], + query: [] + }, + 'query_rules.test': { + path: [ + 'ruleset_id' + ], + body: [ + 'match_criteria' + ], + query: [] + } + } } /** @@ -51,7 +115,10 @@ export default class QueryRules { async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -94,7 +161,10 @@ export default class QueryRules { async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -136,7 +206,10 @@ export default class QueryRules { async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,7 +252,10 @@ export default class QueryRules { async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,7 +297,10 @@ export default class QueryRules { async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.list_rulesets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -261,8 +340,12 @@ export default class QueryRules { async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,8 +367,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -309,8 +398,12 @@ export default class QueryRules { async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['rules'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -332,8 +425,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -356,8 +455,12 @@ export default class QueryRules { async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['match_criteria'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.test'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -379,8 +482,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index bd3af65e5..cd7773c31 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,30 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + rank_eval: { + path: [ + 'index' + ], + body: [ + 'requests', + 'metric' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'search_type' + ] + } +} /** * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. @@ -45,8 +54,12 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['requests', 'metric'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.rank_eval + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +81,14 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 5c83f147b..3f1e31fd9 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + reindex: { + path: [], + body: [ + 'conflicts', + 'dest', + 'max_docs', + 'script', + 'size', + 'source' + ], + query: [ + 'refresh', + 'requests_per_second', + 'scroll', + 'slices', + 'timeout', + 'wait_for_active_shards', + 'wait_for_completion', + 'require_alias' + ] + } +} /** * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. NOTE: The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. @@ -45,8 +60,12 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.reindex + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +87,14 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index d32f80c01..9c6b73a92 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + reindex_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. @@ -45,7 +46,10 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.reindex_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 57b5377c6..b08178668 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,25 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + render_search_template: { + path: [], + body: [ + 'id', + 'file', + 'params', + 'source' + ], + query: [] + } +} /** * Render a search template. Render a search template as a search request body. @@ -45,8 +49,12 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'file', 'params', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.render_search_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +77,14 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index b45043728..3a27e3549 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,97 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Rollup { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'rollup.delete_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_jobs': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_caps': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_index_caps': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'rollup.put_job': { + path: [ + 'id' + ], + body: [ + 'cron', + 'groups', + 'index_pattern', + 'metrics', + 'page_size', + 'rollup_index', + 'timeout', + 'headers' + ], + query: [] + }, + 'rollup.rollup_search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'query', + 'size' + ], + query: [ + 'rest_total_hits_as_int', + 'typed_keys' + ] + }, + 'rollup.start_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.stop_job': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + } + } } /** @@ -51,7 +122,10 @@ export default class Rollup { async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.delete_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +167,10 @@ export default class Rollup { async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,7 +220,10 @@ export default class Rollup { async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_caps'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -193,7 +273,10 @@ export default class Rollup { async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_index_caps'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,8 +318,12 @@ export default class Rollup { async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.put_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -258,8 +345,14 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -282,8 +375,12 @@ export default class Rollup { async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.rollup_search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -305,8 +402,14 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -329,7 +432,10 @@ export default class Rollup { async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.start_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -371,7 +477,10 @@ export default class Rollup { async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.stop_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index bbafbeff1..f9823c7fb 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,24 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scripts_painless_execute: { + path: [], + body: [ + 'context', + 'context_setup', + 'script' + ], + query: [] + } +} /** * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. @@ -45,8 +48,12 @@ export default async function ScriptsPainlessExecuteApi (this export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['context', 'context_setup', 'script'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scripts_painless_execute + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +76,14 @@ export default async function ScriptsPainlessExecuteApi (this } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 5bd03110b..2b31642cb 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,27 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scroll: { + path: [], + body: [ + 'scroll', + 'scroll_id' + ], + query: [ + 'scroll', + 'scroll_id', + 'rest_total_hits_as_int' + ] + } +} /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. @@ -45,8 +51,12 @@ export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll', 'scroll_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scroll + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +78,14 @@ export default async function ScrollApi = { + search: { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'rank', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'retriever', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'force_synthetic_source' + ] + } +} /** * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. @@ -45,8 +127,12 @@ export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -73,8 +159,14 @@ export default async function SearchApi +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchApplication { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'search_application.delete': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.delete_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.list': { + path: [], + body: [], + query: [ + 'q', + 'from', + 'size' + ] + }, + 'search_application.post_behavioral_analytics_event': { + path: [ + 'collection_name', + 'event_type' + ], + body: [ + 'payload' + ], + query: [ + 'debug' + ] + }, + 'search_application.put': { + path: [ + 'name' + ], + body: [ + 'search_application' + ], + query: [ + 'create' + ] + }, + 'search_application.put_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.render_query': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [] + }, + 'search_application.search': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [ + 'typed_keys' + ] + } + } } /** @@ -51,7 +133,10 @@ export default class SearchApplication { async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +178,10 @@ export default class SearchApplication { async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +223,10 @@ export default class SearchApplication { async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,7 +268,10 @@ export default class SearchApplication { async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.get_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -227,7 +321,10 @@ export default class SearchApplication { async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['search_application.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -267,8 +364,12 @@ export default class SearchApplication { async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['collection_name', 'event_type'] - const acceptedBody: string[] = ['payload'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.post_behavioral_analytics_event'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,8 +381,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -305,8 +412,12 @@ export default class SearchApplication { async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['search_application'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -318,8 +429,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -342,7 +459,10 @@ export default class SearchApplication { async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.put_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -384,8 +504,12 @@ export default class SearchApplication { async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.render_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -407,8 +531,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -431,8 +561,12 @@ export default class SearchApplication { async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -454,8 +588,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index c9384a91e..1c44a4994 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_mvt: { + path: [ + 'index', + 'field', + 'zoom', + 'x', + 'y' + ], + body: [ + 'aggs', + 'buffer', + 'exact_bounds', + 'extent', + 'fields', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'query', + 'runtime_mappings', + 'size', + 'sort', + 'track_total_hits', + 'with_labels' + ], + query: [ + 'exact_bounds', + 'extent', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'size', + 'with_labels' + ] + } +} /** * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. @@ -45,8 +73,12 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_mvt + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +100,14 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index f2fff30a5..2e700ba0d 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,28 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + search_shards: { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'preference', + 'routing' + ] + } +} /** * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. @@ -45,7 +52,10 @@ export default async function SearchShardsApi (this: That, params?: T.SearchShar export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = acceptedParams.search_shards + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index f63c77a45..7a1e84c9b 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,42 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_template: { + path: [ + 'index' + ], + body: [ + 'explain', + 'id', + 'params', + 'profile', + 'source' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'preference', + 'profile', + 'routing', + 'scroll', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run a search with a search template. @@ -45,8 +66,12 @@ export default async function SearchTemplateApi (this: That export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +94,14 @@ export default async function SearchTemplateApi (this: That } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 4c8af1dda..4342c20e6 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,67 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchableSnapshots { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'searchable_snapshots.cache_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'searchable_snapshots.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'allow_no_indices', + 'ignore_unavailable' + ] + }, + 'searchable_snapshots.mount': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'index', + 'renamed_index', + 'index_settings', + 'ignore_index_settings' + ], + query: [ + 'master_timeout', + 'wait_for_completion', + 'storage' + ] + }, + 'searchable_snapshots.stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'level' + ] + } + } } /** @@ -51,7 +92,10 @@ export default class SearchableSnapshots { async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.cache_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +145,10 @@ export default class SearchableSnapshots { async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.clear_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -151,8 +198,12 @@ export default class SearchableSnapshots { async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['searchable_snapshots.mount'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -174,8 +225,14 @@ export default class SearchableSnapshots { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -199,7 +256,10 @@ export default class SearchableSnapshots { async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 3484f5933..adde7580d 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,648 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Security { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'security.activate_user_profile': { + path: [], + body: [ + 'access_token', + 'grant_type', + 'password', + 'username' + ], + query: [] + }, + 'security.authenticate': { + path: [], + body: [], + query: [] + }, + 'security.bulk_delete_role': { + path: [], + body: [ + 'names' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_put_role': { + path: [], + body: [ + 'roles' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_update_api_keys': { + path: [], + body: [ + 'expiration', + 'ids', + 'metadata', + 'role_descriptors' + ], + query: [] + }, + 'security.change_password': { + path: [ + 'username' + ], + body: [ + 'password', + 'password_hash' + ], + query: [ + 'refresh' + ] + }, + 'security.clear_api_key_cache': { + path: [ + 'ids' + ], + body: [], + query: [] + }, + 'security.clear_cached_privileges': { + path: [ + 'application' + ], + body: [], + query: [] + }, + 'security.clear_cached_realms': { + path: [ + 'realms' + ], + body: [], + query: [ + 'usernames' + ] + }, + 'security.clear_cached_roles': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.clear_cached_service_tokens': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [] + }, + 'security.create_api_key': { + path: [], + body: [ + 'expiration', + 'name', + 'role_descriptors', + 'metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.create_cross_cluster_api_key': { + path: [], + body: [ + 'access', + 'expiration', + 'metadata', + 'name' + ], + query: [] + }, + 'security.create_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delegate_pki': { + path: [], + body: [ + 'x509_certificate_chain' + ], + query: [] + }, + 'security.delete_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enroll_kibana': { + path: [], + body: [], + query: [] + }, + 'security.enroll_node': { + path: [], + body: [], + query: [] + }, + 'security.get_api_key': { + path: [], + body: [], + query: [ + 'id', + 'name', + 'owner', + 'realm_name', + 'username', + 'with_limited_by', + 'active_only', + 'with_profile_uid' + ] + }, + 'security.get_builtin_privileges': { + path: [], + body: [], + query: [] + }, + 'security.get_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [] + }, + 'security.get_role': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_service_accounts': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_service_credentials': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'security.get_token': { + path: [], + body: [ + 'grant_type', + 'scope', + 'password', + 'kerberos_ticket', + 'refresh_token', + 'username' + ], + query: [] + }, + 'security.get_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'with_profile_uid' + ] + }, + 'security.get_user_privileges': { + path: [], + body: [], + query: [ + 'application', + 'priviledge', + 'username' + ] + }, + 'security.get_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'data' + ] + }, + 'security.grant_api_key': { + path: [], + body: [ + 'api_key', + 'grant_type', + 'access_token', + 'username', + 'password', + 'run_as' + ], + query: [] + }, + 'security.has_privileges': { + path: [ + 'user' + ], + body: [ + 'application', + 'cluster', + 'index' + ], + query: [] + }, + 'security.has_privileges_user_profile': { + path: [], + body: [ + 'uids', + 'privileges' + ], + query: [] + }, + 'security.invalidate_api_key': { + path: [], + body: [ + 'id', + 'ids', + 'name', + 'owner', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.invalidate_token': { + path: [], + body: [ + 'token', + 'refresh_token', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.oidc_authenticate': { + path: [], + body: [ + 'nonce', + 'realm', + 'redirect_uri', + 'state' + ], + query: [] + }, + 'security.oidc_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.oidc_prepare_authentication': { + path: [], + body: [ + 'iss', + 'login_hint', + 'nonce', + 'realm', + 'state' + ], + query: [] + }, + 'security.put_privileges': { + path: [], + body: [ + 'privileges' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role': { + path: [ + 'name' + ], + body: [ + 'applications', + 'cluster', + 'global', + 'indices', + 'remote_indices', + 'remote_cluster', + 'metadata', + 'run_as', + 'description', + 'transient_metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role_mapping': { + path: [ + 'name' + ], + body: [ + 'enabled', + 'metadata', + 'roles', + 'role_templates', + 'rules', + 'run_as' + ], + query: [ + 'refresh' + ] + }, + 'security.put_user': { + path: [], + body: [ + 'username', + 'email', + 'full_name', + 'metadata', + 'password', + 'password_hash', + 'roles', + 'enabled' + ], + query: [ + 'refresh' + ] + }, + 'security.query_api_keys': { + path: [], + body: [ + 'aggregations', + 'aggs', + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_limited_by', + 'with_profile_uid', + 'typed_keys' + ] + }, + 'security.query_role': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [] + }, + 'security.query_user': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_profile_uid' + ] + }, + 'security.saml_authenticate': { + path: [], + body: [ + 'content', + 'ids', + 'realm' + ], + query: [] + }, + 'security.saml_complete_logout': { + path: [], + body: [ + 'realm', + 'ids', + 'query_string', + 'content' + ], + query: [] + }, + 'security.saml_invalidate': { + path: [], + body: [ + 'acs', + 'query_string', + 'realm' + ], + query: [] + }, + 'security.saml_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.saml_prepare_authentication': { + path: [], + body: [ + 'acs', + 'realm', + 'relay_state' + ], + query: [] + }, + 'security.saml_service_provider_metadata': { + path: [ + 'realm_name' + ], + body: [], + query: [] + }, + 'security.suggest_user_profiles': { + path: [], + body: [ + 'name', + 'size', + 'data', + 'hint' + ], + query: [ + 'data' + ] + }, + 'security.update_api_key': { + path: [ + 'id' + ], + body: [ + 'role_descriptors', + 'metadata', + 'expiration' + ], + query: [] + }, + 'security.update_cross_cluster_api_key': { + path: [ + 'id' + ], + body: [ + 'access', + 'expiration', + 'metadata' + ], + query: [] + }, + 'security.update_settings': { + path: [], + body: [ + 'security', + 'security-profile', + 'security-tokens' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'security.update_user_profile_data': { + path: [ + 'uid' + ], + body: [ + 'labels', + 'data' + ], + query: [ + 'if_seq_no', + 'if_primary_term', + 'refresh' + ] + } + } } /** @@ -51,8 +673,12 @@ export default class Security { async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.activate_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +700,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -95,7 +727,10 @@ export default class Security { async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,8 +770,12 @@ export default class Security { async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['names'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_delete_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -158,8 +797,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -179,8 +824,12 @@ export default class Security { async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['roles'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_put_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -202,8 +851,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -223,8 +878,12 @@ export default class Security { async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_update_api_keys'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -246,8 +905,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -267,8 +932,12 @@ export default class Security { async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const acceptedBody: string[] = ['password', 'password_hash'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.change_password'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -291,8 +960,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -322,7 +997,10 @@ export default class Security { async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ids'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_api_key_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -364,7 +1042,10 @@ export default class Security { async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -406,7 +1087,10 @@ export default class Security { async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realms'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_realms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -448,7 +1132,10 @@ export default class Security { async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_roles'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -490,7 +1177,10 @@ export default class Security { async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_service_tokens'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -534,8 +1224,12 @@ export default class Security { async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -558,8 +1252,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -579,8 +1279,12 @@ export default class Security { async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_cross_cluster_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -602,8 +1306,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -623,7 +1333,10 @@ export default class Security { async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.create_service_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -674,8 +1387,12 @@ export default class Security { async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['x509_certificate_chain'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.delegate_pki'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -697,8 +1414,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -718,7 +1441,10 @@ export default class Security { async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -761,7 +1487,10 @@ export default class Security { async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -803,7 +1532,10 @@ export default class Security { async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -845,7 +1577,10 @@ export default class Security { async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_service_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -889,7 +1624,10 @@ export default class Security { async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -931,7 +1669,10 @@ export default class Security { async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -973,7 +1714,10 @@ export default class Security { async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1015,7 +1759,10 @@ export default class Security { async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1057,7 +1804,10 @@ export default class Security { async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1099,7 +1849,10 @@ export default class Security { async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_kibana'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1139,7 +1892,10 @@ export default class Security { async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1179,7 +1935,10 @@ export default class Security { async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1219,7 +1978,10 @@ export default class Security { async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_builtin_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1259,7 +2021,10 @@ export default class Security { async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1313,7 +2078,10 @@ export default class Security { async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1363,7 +2131,10 @@ export default class Security { async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1413,7 +2184,10 @@ export default class Security { async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_accounts'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1467,7 +2241,10 @@ export default class Security { async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_credentials'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1510,7 +2287,10 @@ export default class Security { async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1550,8 +2330,12 @@ export default class Security { async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.get_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1574,8 +2358,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1595,7 +2385,10 @@ export default class Security { async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1645,7 +2438,10 @@ export default class Security { async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1685,7 +2481,10 @@ export default class Security { async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1727,8 +2526,12 @@ export default class Security { async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.grant_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1750,8 +2553,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1771,8 +2580,12 @@ export default class Security { async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['user'] - const acceptedBody: string[] = ['application', 'cluster', 'index'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1795,8 +2608,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1826,8 +2645,12 @@ export default class Security { async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['uids', 'privileges'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1849,8 +2672,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1870,8 +2699,12 @@ export default class Security { async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1894,8 +2727,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1915,8 +2754,12 @@ export default class Security { async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1939,8 +2782,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1960,8 +2809,12 @@ export default class Security { async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1983,8 +2836,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2004,8 +2863,12 @@ export default class Security { async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access_token', 'refresh_token'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2027,8 +2890,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2048,8 +2917,12 @@ export default class Security { async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_prepare_authentication'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2072,8 +2945,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2093,8 +2972,12 @@ export default class Security { async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['privileges'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2106,8 +2989,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2127,8 +3016,12 @@ export default class Security { async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2150,8 +3043,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2174,8 +3073,12 @@ export default class Security { async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2197,8 +3100,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2221,8 +3130,12 @@ export default class Security { async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2244,8 +3157,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2268,8 +3187,12 @@ export default class Security { async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_api_keys'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2292,8 +3215,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2313,8 +3242,12 @@ export default class Security { async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2337,8 +3270,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2358,8 +3297,12 @@ export default class Security { async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2382,8 +3325,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2403,8 +3352,12 @@ export default class Security { async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['content', 'ids', 'realm'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2426,8 +3379,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2447,8 +3406,12 @@ export default class Security { async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_complete_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2470,8 +3433,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2491,8 +3460,12 @@ export default class Security { async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'query_string', 'realm'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_invalidate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2514,8 +3487,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2535,8 +3514,12 @@ export default class Security { async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2558,8 +3541,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2579,8 +3568,12 @@ export default class Security { async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_prepare_authentication'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2603,8 +3596,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2624,7 +3623,10 @@ export default class Security { async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realm_name'] + const { + path: acceptedPath + } = this.acceptedParams['security.saml_service_provider_metadata'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2666,8 +3668,12 @@ export default class Security { async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.suggest_user_profiles'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2690,8 +3696,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2711,8 +3723,12 @@ export default class Security { async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2734,8 +3750,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2758,8 +3780,12 @@ export default class Security { async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['access', 'expiration', 'metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_cross_cluster_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2781,8 +3807,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2805,8 +3837,12 @@ export default class Security { async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['security', 'security-profile', 'security-tokens'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2829,8 +3865,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2850,8 +3892,12 @@ export default class Security { async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] - const acceptedBody: string[] = ['labels', 'data'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_user_profile_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2873,8 +3919,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index ffa3b9c39..99c32f3c9 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,55 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Shutdown { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'shutdown.delete_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'shutdown.get_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'shutdown.put_node': { + path: [ + 'node_id' + ], + body: [ + 'type', + 'reason', + 'allocation_delay', + 'target_node_name' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +80,10 @@ export default class Shutdown { async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['shutdown.delete_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +125,10 @@ export default class Shutdown { async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['shutdown.get_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,8 +178,12 @@ export default class Shutdown { async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['shutdown.put_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -166,8 +205,14 @@ export default class Shutdown { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index ba1689505..4393e380c 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Simulate { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'simulate.ingest': { + path: [ + 'index' + ], + body: [ + 'docs', + 'component_template_substitutions', + 'index_template_substitutions', + 'mapping_addition', + 'pipeline_substitutions' + ], + query: [ + 'pipeline' + ] + } + } } /** @@ -51,8 +61,12 @@ export default class Simulate { async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['simulate.ingest'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +88,14 @@ export default class Simulate { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 9e6a856f9..f1f5f5e31 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,107 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Slm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'slm.delete_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_retention': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_status': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.put_lifecycle': { + path: [ + 'policy_id' + ], + body: [ + 'config', + 'name', + 'repository', + 'retention', + 'schedule' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +132,10 @@ export default class Slm { async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.delete_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +177,10 @@ export default class Slm { async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +222,10 @@ export default class Slm { async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_retention'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,7 +265,10 @@ export default class Slm { async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -225,7 +318,10 @@ export default class Slm { async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -265,7 +361,10 @@ export default class Slm { async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -305,8 +404,12 @@ export default class Slm { async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] - const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['slm.put_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -328,8 +431,14 @@ export default class Slm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -352,7 +461,10 @@ export default class Slm { async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -392,7 +504,10 @@ export default class Slm { async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 3b37c9bdb..c472ac9bf 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,208 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Snapshot { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'snapshot.cleanup_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.clone': { + path: [ + 'repository', + 'snapshot', + 'target_snapshot' + ], + body: [ + 'indices' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.create': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'expand_wildcards', + 'feature_states', + 'ignore_unavailable', + 'include_global_state', + 'indices', + 'metadata', + 'partial' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.create_repository': { + path: [ + 'name' + ], + body: [ + 'repository' + ], + query: [ + 'master_timeout', + 'timeout', + 'verify' + ] + }, + 'snapshot.delete': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'snapshot.delete_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.get': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'after', + 'from_sort_value', + 'ignore_unavailable', + 'index_details', + 'index_names', + 'include_repository', + 'master_timeout', + 'order', + 'offset', + 'size', + 'slm_policy_filter', + 'sort', + 'verbose' + ] + }, + 'snapshot.get_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'snapshot.repository_analyze': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_count', + 'concurrency', + 'detailed', + 'early_read_node_count', + 'max_blob_size', + 'max_total_data_size', + 'rare_action_probability', + 'rarely_abort_writes', + 'read_node_count', + 'register_operation_count', + 'seed', + 'timeout' + ] + }, + 'snapshot.repository_verify_integrity': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_thread_pool_concurrency', + 'index_snapshot_verification_concurrency', + 'index_verification_concurrency', + 'max_bytes_per_sec', + 'max_failed_shard_snapshots', + 'meta_thread_pool_concurrency', + 'snapshot_verification_concurrency', + 'verify_blob_contents' + ] + }, + 'snapshot.restore': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'feature_states', + 'ignore_index_settings', + 'ignore_unavailable', + 'include_aliases', + 'include_global_state', + 'index_settings', + 'indices', + 'partial', + 'rename_pattern', + 'rename_replacement' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.status': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'snapshot.verify_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +233,10 @@ export default class Snapshot { async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.cleanup_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,8 +278,12 @@ export default class Snapshot { async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] - const acceptedBody: string[] = ['indices'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.clone'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +305,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -142,8 +337,12 @@ export default class Snapshot { async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['expand_wildcards', 'feature_states', 'ignore_unavailable', 'include_global_state', 'indices', 'metadata', 'partial'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,8 +364,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -190,8 +395,12 @@ export default class Snapshot { async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['repository'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -203,8 +412,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -227,7 +442,10 @@ export default class Snapshot { async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -270,7 +488,10 @@ export default class Snapshot { async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -312,7 +533,10 @@ export default class Snapshot { async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -355,7 +579,10 @@ export default class Snapshot { async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -405,7 +632,10 @@ export default class Snapshot { async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_analyze'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -447,7 +677,10 @@ export default class Snapshot { async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_verify_integrity'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -489,8 +722,12 @@ export default class Snapshot { async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.restore'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -512,8 +749,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -537,7 +780,10 @@ export default class Snapshot { async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -591,7 +837,10 @@ export default class Snapshot { async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.verify_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 871cb7139..2fa3d0b02 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,89 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Sql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'sql.clear_cursor': { + path: [], + body: [ + 'cursor' + ], + query: [] + }, + 'sql.delete_async': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.get_async': { + path: [ + 'id' + ], + body: [], + query: [ + 'delimiter', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'sql.get_async_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.query': { + path: [], + body: [ + 'allow_partial_search_results', + 'catalog', + 'columnar', + 'cursor', + 'fetch_size', + 'field_multi_value_leniency', + 'filter', + 'index_using_frozen', + 'keep_alive', + 'keep_on_completion', + 'page_timeout', + 'params', + 'query', + 'request_timeout', + 'runtime_mappings', + 'time_zone', + 'wait_for_completion_timeout' + ], + query: [ + 'format' + ] + }, + 'sql.translate': { + path: [], + body: [ + 'fetch_size', + 'filter', + 'query', + 'time_zone' + ], + query: [] + } + } } /** @@ -51,8 +114,12 @@ export default class Sql { async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['cursor'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.clear_cursor'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +141,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -95,7 +168,10 @@ export default class Sql { async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.delete_async'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,7 +213,10 @@ export default class Sql { async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,7 +258,10 @@ export default class Sql { async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,8 +303,12 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,8 +331,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -266,8 +358,12 @@ export default class Sql { async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.translate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -289,8 +385,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 29f25f090..1708b535e 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,24 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Ssl { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ssl.certificates': { + path: [], + body: [], + query: [] + } + } } /** @@ -51,7 +49,10 @@ export default class Ssl { async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ssl.certificates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 379510816..fc39d46c3 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,81 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Synonyms { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'synonyms.delete_synonym': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'synonyms.delete_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonym': { + path: [ + 'id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.get_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonyms_sets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.put_synonym': { + path: [ + 'id' + ], + body: [ + 'synonyms_set' + ], + query: [] + }, + 'synonyms.put_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [ + 'synonyms' + ], + query: [] + } + } } /** @@ -51,7 +106,10 @@ export default class Synonyms { async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +151,10 @@ export default class Synonyms { async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -136,7 +197,10 @@ export default class Synonyms { async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -178,7 +242,10 @@ export default class Synonyms { async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,7 +288,10 @@ export default class Synonyms { async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonyms_sets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -261,8 +331,12 @@ export default class Synonyms { async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['synonyms_set'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,8 +358,14 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -308,8 +388,12 @@ export default class Synonyms { async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] - const acceptedBody: string[] = ['synonyms'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -331,8 +415,14 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index a8f7ccf20..c85a53d77 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,54 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Tasks { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'tasks.cancel': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'actions', + 'nodes', + 'parent_task_id', + 'wait_for_completion' + ] + }, + 'tasks.get': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + }, + 'tasks.list': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'group_by', + 'nodes', + 'parent_task_id', + 'timeout', + 'wait_for_completion' + ] + } + } } /** @@ -51,7 +79,10 @@ export default class Tasks { async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = this.acceptedParams['tasks.cancel'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +132,10 @@ export default class Tasks { async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = this.acceptedParams['tasks.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,7 +177,10 @@ export default class Tasks { async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['tasks.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index ad9fa1e0e..e12d731bf 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,30 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + terms_enum: { + path: [ + 'index' + ], + body: [ + 'field', + 'size', + 'timeout', + 'case_insensitive', + 'index_filter', + 'string', + 'search_after' + ], + query: [] + } +} /** * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. @@ -45,8 +54,12 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.terms_enum + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +81,14 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index c3f461487..a2f343d11 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,48 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + termvectors: { + path: [ + 'index', + 'id' + ], + body: [ + 'doc', + 'filter', + 'per_field_analyzer', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'term_statistics', + 'routing', + 'version', + 'version_type' + ], + query: [ + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. @@ -45,8 +72,12 @@ export default async function TermvectorsApi (this: That, p export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'id'] - const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.termvectors + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +99,14 @@ export default async function TermvectorsApi (this: That, p } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index fd245e577..4d9997c24 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,93 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class TextStructure { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'text_structure.find_field_structure': { + path: [], + body: [], + query: [ + 'column_names', + 'delimiter', + 'documents_to_sample', + 'ecs_compatibility', + 'explain', + 'field', + 'format', + 'grok_pattern', + 'index', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_message_structure': { + path: [], + body: [ + 'messages' + ], + query: [ + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_structure': { + path: [], + body: [ + 'text_files' + ], + query: [ + 'charset', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'has_header_row', + 'line_merge_size_limit', + 'lines_to_sample', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.test_grok_pattern': { + path: [], + body: [ + 'grok_pattern', + 'text' + ], + query: [ + 'ecs_compatibility' + ] + } + } } /** @@ -51,7 +118,10 @@ export default class TextStructure { async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['text_structure.find_field_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -90,8 +160,12 @@ export default class TextStructure { async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['messages'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_message_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -113,8 +187,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -134,8 +214,12 @@ export default class TextStructure { async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['text_files'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -147,8 +231,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -168,8 +258,12 @@ export default class TextStructure { async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grok_pattern', 'text'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.test_grok_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,8 +285,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 4872de3e1..dc47023c8 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,170 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Transform { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'transform.delete_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'delete_dest_index', + 'timeout' + ] + }, + 'transform.get_node_stats': { + path: [], + body: [], + query: [] + }, + 'transform.get_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'transform.get_transform_stats': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'timeout' + ] + }, + 'transform.preview_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'pivot', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'latest' + ], + query: [ + 'timeout' + ] + }, + 'transform.put_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'latest', + '_meta', + 'pivot', + 'retention_policy', + 'settings', + 'source', + 'sync' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.reset_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'transform.schedule_now_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'transform.start_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout', + 'from' + ] + }, + 'transform.stop_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout', + 'wait_for_checkpoint', + 'wait_for_completion' + ] + }, + 'transform.update_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + '_meta', + 'source', + 'settings', + 'sync', + 'retention_policy' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.upgrade_transforms': { + path: [], + body: [], + query: [ + 'dry_run', + 'timeout' + ] + } + } } /** @@ -51,7 +195,10 @@ export default class Transform { async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.delete_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +240,10 @@ export default class Transform { async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_node_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -132,7 +282,10 @@ export default class Transform { async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -182,7 +335,10 @@ export default class Transform { async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -224,8 +380,12 @@ export default class Transform { async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.preview_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -248,8 +408,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -279,8 +445,12 @@ export default class Transform { async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.put_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -302,8 +472,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -326,7 +502,10 @@ export default class Transform { async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.reset_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -368,7 +547,10 @@ export default class Transform { async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.schedule_now_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -410,7 +592,10 @@ export default class Transform { async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.start_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -452,7 +637,10 @@ export default class Transform { async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.stop_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -494,8 +682,12 @@ export default class Transform { async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.update_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -517,8 +709,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -541,7 +739,10 @@ export default class Transform { async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['transform.upgrade_transforms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 06d06ae63..264881ce8 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,45 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + update: { + path: [ + 'id', + 'index' + ], + body: [ + 'detect_noop', + 'doc', + 'doc_as_upsert', + 'script', + 'scripted_upsert', + '_source', + 'upsert' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'lang', + 'refresh', + 'require_alias', + 'retry_on_conflict', + 'routing', + 'timeout', + 'wait_for_active_shards', + '_source', + '_source_excludes', + '_source_includes' + ] + } +} /** * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). @@ -45,8 +69,12 @@ export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +96,14 @@ export default async function UpdateApi = { + update_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'script', + 'slice', + 'conflicts' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'pipeline', + 'preference', + 'q', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. @@ -45,8 +84,12 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update_by_query + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +111,14 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index eb96ad0ed..976be5d2d 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,7 +21,22 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + update_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. @@ -45,7 +46,10 @@ export default async function UpdateByQueryRethrottleApi (this: That, params: T. export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.update_by_query_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 7e795d62b..289cbafc3 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,148 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Watcher { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'watcher.ack_watch': { + path: [ + 'watch_id', + 'action_id' + ], + body: [], + query: [] + }, + 'watcher.activate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.deactivate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.delete_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.execute_watch': { + path: [ + 'id' + ], + body: [ + 'action_modes', + 'alternative_input', + 'ignore_condition', + 'record_execution', + 'simulated_actions', + 'trigger_data', + 'watch' + ], + query: [ + 'debug' + ] + }, + 'watcher.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.get_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.put_watch': { + path: [ + 'id' + ], + body: [ + 'actions', + 'condition', + 'input', + 'metadata', + 'throttle_period', + 'throttle_period_in_millis', + 'transform', + 'trigger' + ], + query: [ + 'active', + 'if_primary_term', + 'if_seq_no', + 'version' + ] + }, + 'watcher.query_watches': { + path: [], + body: [ + 'from', + 'size', + 'query', + 'sort', + 'search_after' + ], + query: [] + }, + 'watcher.start': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.stats': { + path: [ + 'metric' + ], + body: [], + query: [ + 'emit_stacktraces', + 'metric' + ] + }, + 'watcher.stop': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.update_settings': { + path: [], + body: [ + 'index.auto_expand_replicas', + 'index.number_of_replicas' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** @@ -51,7 +173,10 @@ export default class Watcher { async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id', 'action_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.ack_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +226,10 @@ export default class Watcher { async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.activate_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -143,7 +271,10 @@ export default class Watcher { async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.deactivate_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,7 +316,10 @@ export default class Watcher { async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.delete_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -227,8 +361,12 @@ export default class Watcher { async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.execute_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,8 +389,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -282,7 +426,10 @@ export default class Watcher { async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -322,7 +469,10 @@ export default class Watcher { async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -364,8 +514,12 @@ export default class Watcher { async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.put_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -387,8 +541,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -411,8 +571,12 @@ export default class Watcher { async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.query_watches'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -435,8 +599,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -456,7 +626,10 @@ export default class Watcher { async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -496,7 +669,10 @@ export default class Watcher { async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -546,7 +722,10 @@ export default class Watcher { async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -586,8 +765,12 @@ export default class Watcher { async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.update_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -610,8 +793,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 9e6a66f7b..858e52869 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Xpack { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'xpack.info': { + path: [], + body: [], + query: [ + 'categories', + 'accept_enterprise', + 'human' + ] + }, + 'xpack.usage': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** @@ -51,7 +60,10 @@ export default class Xpack { async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['xpack.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +103,10 @@ export default class Xpack { async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['xpack.usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/index.ts b/src/api/index.ts index f69eb473d..cfa328a82 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ diff --git a/src/api/types.ts b/src/api/types.ts index e242e803c..fd6f4ac2c 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable @typescript-eslint/array-type */ @@ -212,12 +198,22 @@ export interface CreateRequest extends RequestBase { id: Id /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ + if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ + if_seq_no?: SequenceNumber /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean + /** Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. */ + op_type?: OpType /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** If `true`, the destination must be an index alias. */ + require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ + require_data_stream?: boolean /** A custom value that is used to route operations to a specific shard. */ routing?: Routing /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. */ @@ -230,9 +226,9 @@ export interface CreateRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards document?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } } export type CreateResponse = WriteResponseBase @@ -2514,7 +2510,17 @@ export interface TermvectorsRequest extends RequestBase { index: IndexName /** A unique identifier for the document. */ id?: Id - /** A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + /** The node or shard the operation should be performed on. It is random by default. */ + preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ + doc?: TDocument + /** Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. */ + filter?: TermvectorsFilter + /** Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ + per_field_analyzer?: Record + /** A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields /** If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ field_statistics?: boolean @@ -2524,28 +2530,18 @@ export interface TermvectorsRequest extends RequestBase { payloads?: boolean /** If `true`, the response includes term positions. */ positions?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ - preference?: string - /** If true, the request is real-time as opposed to near-real-time. */ - realtime?: boolean - /** A custom value that is used to route operations to a specific shard. */ - routing?: Routing /** If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. */ term_statistics?: boolean + /** A custom value that is used to route operations to a specific shard. */ + routing?: Routing /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ - doc?: TDocument - /** Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. */ - filter?: TermvectorsFilter - /** Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ - per_field_analyzer?: Record /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, id?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, doc?: never, filter?: never, per_field_analyzer?: never } + body?: string | { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, id?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, doc?: never, filter?: never, per_field_analyzer?: never } + querystring?: { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } } export interface TermvectorsResponse { @@ -2775,7 +2771,6 @@ export interface BulkIndexByScrollFailure { id: Id index: IndexName status: integer - type: string } export interface BulkStats { @@ -5045,7 +5040,7 @@ export interface AggregationsWeightedAverageValue { export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer export interface AnalysisArabicAnalyzer { type: 'arabic' @@ -5481,17 +5476,6 @@ export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { discard_compound_token?: boolean } -export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' - -export interface AnalysisLanguageAnalyzer { - type: 'language' - version?: VersionString - language: AnalysisLanguage - stem_exclusion: string[] - stopwords?: AnalysisStopWords - stopwords_path?: string -} - export interface AnalysisLatvianAnalyzer { type: 'latvian' stopwords?: AnalysisStopWords @@ -6016,6 +6000,8 @@ export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean + script?: Script | string + on_script_error?: MappingOnScriptError null_value?: DateTime precision_step?: integer type: 'date_nanos' @@ -6027,6 +6013,8 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean + script?: Script | string + on_script_error?: MappingOnScriptError null_value?: DateTime precision_step?: integer locale?: string @@ -6103,7 +6091,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { export interface MappingDynamicTemplate { mapping?: MappingProperty - runtime?: MappingProperty + runtime?: MappingRuntimeField match?: string | string[] path_match?: string | string[] unmatch?: string | string[] @@ -6167,6 +6155,7 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean ignore_malformed?: boolean ignore_z_value?: boolean + index?: boolean orientation?: MappingGeoOrientation strategy?: MappingGeoStrategy type: 'geo_shape' @@ -6298,7 +6287,7 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase export interface MappingObjectProperty extends MappingCorePropertyBase { enabled?: boolean - subobjects?: boolean + subobjects?: MappingSubobjects type?: 'object' } @@ -6369,7 +6358,7 @@ export interface MappingRuntimeFieldFetchFields { format?: string } -export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' +export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'geo_shape' | 'ip' | 'keyword' | 'long' | 'lookup' export type MappingRuntimeFields = Record @@ -6395,7 +6384,8 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase export interface MappingSemanticTextProperty { type: 'semantic_text' meta?: Record - inference_id: Id + inference_id?: Id + search_inference_id?: Id } export interface MappingShapeProperty extends MappingDocValuesPropertyBase { @@ -6430,6 +6420,8 @@ export interface MappingSparseVectorProperty extends MappingPropertyBase { type: 'sparse_vector' } +export type MappingSubobjects = boolean | 'true' | 'false' | 'auto' + export interface MappingSuggestContext { name: Name path?: Field @@ -6481,7 +6473,7 @@ export interface MappingTypeMapping { date_detection?: boolean dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] - dynamic_templates?: Record[] + dynamic_templates?: Partial>[] _field_names?: MappingFieldNamesField index_field?: MappingIndexField _meta?: Metadata @@ -6492,7 +6484,7 @@ export interface MappingTypeMapping { _source?: MappingSourceField runtime?: Record enabled?: boolean - subobjects?: boolean + subobjects?: MappingSubobjects _data_stream_timestamp?: MappingDataStreamTimestamp } @@ -6677,6 +6669,12 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys export type QueryDslGeoExecution = 'memory' | 'indexed' +export interface QueryDslGeoGridQuery extends QueryDslQueryBase { + geogrid?: GeoTile + geohash?: GeoHash + geohex?: GeoHexCell +} + export interface QueryDslGeoPolygonPoints { points: GeoLocation[] } @@ -6972,6 +6970,7 @@ export interface QueryDslQueryContainer { fuzzy?: Partial> geo_bounding_box?: QueryDslGeoBoundingBoxQuery geo_distance?: QueryDslGeoDistanceQuery + geo_grid?: Partial> geo_polygon?: QueryDslGeoPolygonQuery geo_shape?: QueryDslGeoShapeQuery has_child?: QueryDslHasChildQuery @@ -11927,7 +11926,9 @@ export type EsqlTableValuesLongDouble = double | double[] export type EsqlTableValuesLongValue = long | long[] export interface EsqlAsyncQueryRequest extends RequestBase { -/** The character to use between values within a CSV row. It is valid only for the CSV format. */ +/** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. */ + allow_partial_results?: boolean + /** The character to use between values within a CSV row. It is valid only for the CSV format. */ delimiter?: string /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean @@ -11937,8 +11938,6 @@ export interface EsqlAsyncQueryRequest extends RequestBase { keep_alive?: Duration /** Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ keep_on_completion?: boolean - /** The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. */ - wait_for_completion_timeout?: Duration /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ @@ -11954,10 +11953,12 @@ export interface EsqlAsyncQueryRequest extends RequestBase { tables?: Record> /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ include_ccs_metadata?: boolean + /** The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. */ + wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } } export type EsqlAsyncQueryResponse = EsqlResult @@ -12010,6 +12011,8 @@ export interface EsqlQueryRequest extends RequestBase { delimiter?: string /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ drop_null_columns?: boolean + /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. */ + allow_partial_results?: boolean /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ @@ -12026,9 +12029,9 @@ export interface EsqlQueryRequest extends RequestBase { /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ include_ccs_metadata?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } } export type EsqlQueryResponse = EsqlResult @@ -12669,6 +12672,7 @@ export interface IndicesDataStream { system?: boolean template: Name timestamp_field: IndicesDataStreamTimestampField + index_mode?: IndicesIndexMode } export interface IndicesDataStreamIndex { @@ -12677,6 +12681,7 @@ export interface IndicesDataStreamIndex { ilm_policy?: Name managed_by?: IndicesManagedBy prefer_ilm?: boolean + index_mode?: IndicesIndexMode } export interface IndicesDataStreamLifecycle { @@ -12738,6 +12743,8 @@ export interface IndicesFielddataFrequencyFilter { export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum' +export type IndicesIndexMode = 'standard' | 'time_series' | 'logsdb' | 'lookup' + export interface IndicesIndexRouting { allocation?: IndicesIndexRoutingAllocation rebalance?: IndicesIndexRoutingRebalance @@ -12800,7 +12807,7 @@ export interface IndicesIndexSettingsKeys { routing_partition_size?: SpecUtilsStringified load_fixed_bitset_filters_eagerly?: boolean hidden?: boolean | string - auto_expand_replicas?: string + auto_expand_replicas?: SpecUtilsWithNullValue merge?: IndicesMerge search?: IndicesSettingsSearch refresh_interval?: Duration @@ -13097,7 +13104,7 @@ export interface IndicesSoftDeletes { retention_lease?: IndicesRetentionLease } -export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC' +export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' export interface IndicesStorage { type: IndicesStorageType @@ -13618,12 +13625,16 @@ export type IndicesExistsAliasResponse = boolean export interface IndicesExistsIndexTemplateRequest extends RequestBase { /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Name + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + local?: boolean + /** If true, returns settings in flat format. */ + flat_settings?: boolean /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } } export type IndicesExistsIndexTemplateResponse = boolean @@ -13712,12 +13723,10 @@ export interface IndicesFieldUsageStatsRequest extends RequestBase { ignore_unavailable?: boolean /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields - /** The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ - wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never, wait_for_active_shards?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never, wait_for_active_shards?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } } export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody @@ -14275,7 +14284,7 @@ export interface IndicesPutMappingRequest extends RequestBase { /** If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. */ dynamic_date_formats?: string[] /** Specify dynamic templates for the mapping. */ - dynamic_templates?: Record | Record[] + dynamic_templates?: Partial>[] /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField /** A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. */ @@ -14313,13 +14322,15 @@ export interface IndicesPutSettingsRequest extends RequestBase { master_timeout?: Duration /** If `true`, existing index settings remain unchanged. */ preserve_existing?: boolean + /** Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. */ + reopen?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration settings?: IndicesIndexSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, timeout?: never, settings?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, timeout?: never, settings?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } } export type IndicesPutSettingsResponse = AcknowledgedResponseBase @@ -14331,6 +14342,7 @@ export interface IndicesPutTemplateRequest extends RequestBase { create?: boolean /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** User defined reason for creating/updating the index template */ cause?: string /** Aliases for the index. */ aliases?: Record @@ -14503,10 +14515,12 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { expand_wildcards?: ExpandWildcards /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Changed resource to reload analyzers from if applicable */ + resource?: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } } export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult @@ -14592,6 +14606,8 @@ export interface IndicesRolloverRequest extends RequestBase { timeout?: Duration /** The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams. */ + lazy?: boolean /** Aliases for the target index. Data streams do not support this parameter. */ aliases?: Record /** Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ @@ -14601,9 +14617,9 @@ export interface IndicesRolloverRequest extends RequestBase { /** Configuration options for the index. Data streams do not support this parameter. */ settings?: Record /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } + body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } + querystring?: { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } } export interface IndicesRolloverResponse { @@ -14768,14 +14784,18 @@ export interface IndicesShrinkResponse { export interface IndicesSimulateIndexTemplateRequest extends RequestBase { /** Name of the index to simulate */ name: Name + /** Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one */ + create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ + cause?: string /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, include_defaults?: never } + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, include_defaults?: never } + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } } export interface IndicesSimulateIndexTemplateResponse { @@ -14793,6 +14813,8 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { name?: Name /** If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. */ create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ + cause?: string /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** If true, returns all relevant default configurations for the index template. */ @@ -14818,9 +14840,9 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } } export interface IndicesSimulateTemplateResponse { @@ -15126,6 +15148,10 @@ export interface IndicesValidateQueryResponse { error?: string } +export interface InferenceCompletionInferenceResult { + completion: InferenceCompletionResult[] +} + export interface InferenceCompletionResult { result: string } @@ -15157,23 +15183,37 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } -export interface InferenceInferenceResult { - text_embedding_bytes?: InferenceTextEmbeddingByteResult[] - text_embedding_bits?: InferenceTextEmbeddingByteResult[] - text_embedding?: InferenceTextEmbeddingResult[] - sparse_embedding?: InferenceSparseEmbeddingResult[] - completion?: InferenceCompletionResult[] - rerank?: InferenceRankedDocument[] -} - export interface InferenceRankedDocument { index: integer relevance_score: float text?: string } +export interface InferenceRateLimitSetting { + requests_per_minute?: integer +} + +export interface InferenceRequestChatCompletionBase extends RequestBase { + messages: InferenceChatCompletionUnifiedMessage[] + model?: string + max_completion_tokens?: long + stop?: string[] + temperature?: float + tool_choice?: InferenceChatCompletionUnifiedCompletionToolType + tools?: InferenceChatCompletionUnifiedCompletionTool[] + top_p?: float +} + +export interface InferenceRerankedInferenceResult { + rerank: InferenceRankedDocument[] +} + export type InferenceServiceSettings = any +export interface InferenceSparseEmbeddingInferenceResult { + sparse_embedding: InferenceSparseEmbeddingResult[] +} + export interface InferenceSparseEmbeddingResult { embedding: InferenceSparseVector } @@ -15182,16 +15222,100 @@ export type InferenceSparseVector = Record export type InferenceTaskSettings = any -export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' +export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector } +export interface InferenceTextEmbeddingInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding_bits?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] +} + export interface InferenceTextEmbeddingResult { embedding: InferenceDenseVector } +export interface InferenceChatCompletionUnifiedCompletionTool { + type: string + function: InferenceChatCompletionUnifiedCompletionToolFunction +} + +export interface InferenceChatCompletionUnifiedCompletionToolChoice { + type: string + function: InferenceChatCompletionUnifiedCompletionToolChoiceFunction +} + +export interface InferenceChatCompletionUnifiedCompletionToolChoiceFunction { + name: string +} + +export interface InferenceChatCompletionUnifiedCompletionToolFunction { + description?: string + name: string + parameters?: any + strict?: boolean +} + +export type InferenceChatCompletionUnifiedCompletionToolType = string | InferenceChatCompletionUnifiedCompletionToolChoice + +export interface InferenceChatCompletionUnifiedContentObject { + text: string + type: string +} + +export interface InferenceChatCompletionUnifiedMessage { + content?: InferenceChatCompletionUnifiedMessageContent + role: string + tool_call_id?: Id + tool_calls?: InferenceChatCompletionUnifiedToolCall[] +} + +export type InferenceChatCompletionUnifiedMessageContent = string | InferenceChatCompletionUnifiedContentObject[] + +export interface InferenceChatCompletionUnifiedRequest extends InferenceRequestChatCompletionBase { +/** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never } +} + +export type InferenceChatCompletionUnifiedResponse = StreamResult + +export interface InferenceChatCompletionUnifiedToolCall { + id: Id + function: InferenceChatCompletionUnifiedToolCallFunction + type: string +} + +export interface InferenceChatCompletionUnifiedToolCallFunction { + arguments: string + name: string +} + +export interface InferenceCompletionRequest extends RequestBase { +/** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Inference input. Either a string or an array of strings. */ + input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } +} + +export type InferenceCompletionResponse = InferenceCompletionInferenceResult + export interface InferenceDeleteRequest extends RequestBase { /** The task type */ task_type?: InferenceTaskType @@ -15224,26 +15348,16 @@ export interface InferenceGetResponse { endpoints: InferenceInferenceEndpointInfo[] } -export interface InferenceInferenceRequest extends RequestBase { -/** The type of inference task that the model performs. */ - task_type?: InferenceTaskType - /** The unique identifier for the inference endpoint. */ - inference_id: Id - /** The amount of time to wait for the inference request to complete. */ - timeout?: Duration - /** The query input, which is required only for the `rerank` task. It is not required for other tasks. */ - query?: string - /** The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. */ - input: string | string[] - /** Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ - task_settings?: InferenceTaskSettings +export interface InferencePostEisChatCompletionRequest extends InferenceRequestChatCompletionBase { +/** The unique identifier of the inference endpoint. */ + eis_inference_id: Id /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + body?: string | { [key: string]: any } & { eis_inference_id?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + querystring?: { [key: string]: any } & { eis_inference_id?: never } } -export type InferenceInferenceResponse = InferenceInferenceResult +export type InferencePostEisChatCompletionResponse = StreamResult export interface InferencePutRequest extends RequestBase { /** The task type */ @@ -15259,100 +15373,207 @@ export interface InferencePutRequest extends RequestBase { export type InferencePutResponse = InferenceInferenceEndpointInfo -export interface InferenceStreamInferenceRequest extends RequestBase { -/** The unique identifier for the inference endpoint. */ - inference_id: Id - /** The type of task that the model performs. */ - task_type?: InferenceTaskType - /** The text on which you want to perform the inference task. It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ - input: string | string[] +export interface InferencePutEisEisServiceSettings { + model_id: string + rate_limit?: InferenceRateLimitSetting +} + +export type InferencePutEisEisTaskType = 'chat_completion' + +export interface InferencePutEisRequest extends RequestBase { +/** The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferencePutEisEisTaskType + /** The unique identifier of the inference endpoint. */ + eis_inference_id: Id + /** The type of service supported for the specified task type. In this case, `elastic`. */ + service: InferencePutEisServiceType + /** Settings used to install the inference model. These settings are specific to the `elastic` service. */ + service_settings: InferencePutEisEisServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { inference_id?: never, task_type?: never, input?: never } + body?: string | { [key: string]: any } & { task_type?: never, eis_inference_id?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { inference_id?: never, task_type?: never, input?: never } + querystring?: { [key: string]: any } & { task_type?: never, eis_inference_id?: never, service?: never, service_settings?: never } } -export type InferenceStreamInferenceResponse = StreamResult +export type InferencePutEisResponse = InferenceInferenceEndpointInfo -export interface InferenceUnifiedInferenceCompletionTool { - type: string - function: InferenceUnifiedInferenceCompletionToolFunction +export type InferencePutEisServiceType = 'elastic' + +export interface InferencePutOpenaiOpenAIServiceSettings { + api_key: string + dimensions?: integer + model_id: string + organization_id?: string + rate_limit?: InferenceRateLimitSetting + url?: string } -export interface InferenceUnifiedInferenceCompletionToolChoice { - type: string - function: InferenceUnifiedInferenceCompletionToolChoiceFunction +export interface InferencePutOpenaiOpenAITaskSettings { + user?: string } -export interface InferenceUnifiedInferenceCompletionToolChoiceFunction { - name: string +export type InferencePutOpenaiOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding' + +export interface InferencePutOpenaiRequest extends RequestBase { +/** The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferencePutOpenaiOpenAITaskType + /** The unique identifier of the inference endpoint. */ + openai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `openai`. */ + service: InferencePutOpenaiServiceType + /** Settings used to install the inference model. These settings are specific to the `openai` service. */ + service_settings: InferencePutOpenaiOpenAIServiceSettings + /** Settings to configure the inference task. These settings are specific to the task type you specified. */ + task_settings?: InferencePutOpenaiOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export interface InferenceUnifiedInferenceCompletionToolFunction { - description?: string - name: string - parameters?: any - strict?: boolean +export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo + +export type InferencePutOpenaiServiceType = 'openai' + +export interface InferencePutVoyageaiRequest extends RequestBase { +/** The type of the inference task that the model will perform. */ + task_type: InferencePutVoyageaiVoyageAITaskType + /** The unique identifier of the inference endpoint. */ + voyageai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `voyageai`. */ + service: InferencePutVoyageaiServiceType + /** Settings used to install the inference model. These settings are specific to the `voyageai` service. */ + service_settings: InferencePutVoyageaiVoyageAIServiceSettings + /** Settings to configure the inference task. These settings are specific to the task type you specified. */ + task_settings?: InferencePutVoyageaiVoyageAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferenceUnifiedInferenceCompletionToolType = string | InferenceUnifiedInferenceCompletionToolChoice +export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo -export interface InferenceUnifiedInferenceContentObject { - text: string - type: string +export type InferencePutVoyageaiServiceType = 'voyageai' + +export interface InferencePutVoyageaiVoyageAIServiceSettings { + dimensions?: integer + model_id: string + rate_limit?: InferenceRateLimitSetting + embedding_type?: float } -export interface InferenceUnifiedInferenceMessage { - content?: InferenceUnifiedInferenceMessageContent - role: string - tool_call_id?: Id - tool_calls?: InferenceUnifiedInferenceToolCall[] +export interface InferencePutVoyageaiVoyageAITaskSettings { + input_type?: string + return_documents?: boolean + top_k?: integer + truncation?: boolean } -export type InferenceUnifiedInferenceMessageContent = string | InferenceUnifiedInferenceContentObject[] +export type InferencePutVoyageaiVoyageAITaskType = 'text_embedding' | 'rerank' -export interface InferenceUnifiedInferenceRequest extends RequestBase { -/** The task type */ - task_type?: InferenceTaskType - /** The inference Id */ +export interface InferencePutWatsonxRequest extends RequestBase { +/** The task type. The only valid task type for the model to perform is `text_embedding`. */ + task_type: InferencePutWatsonxWatsonxTaskType + /** The unique identifier of the inference endpoint. */ + watsonx_inference_id: Id + /** The type of service supported for the specified task type. In this case, `watsonxai`. */ + service: InferencePutWatsonxServiceType + /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ + service_settings: InferencePutWatsonxWatsonxServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } +} + +export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo + +export type InferencePutWatsonxServiceType = 'watsonxai' + +export interface InferencePutWatsonxWatsonxServiceSettings { + api_key: string + api_version: string + model_id: string + project_id: string + rate_limit?: InferenceRateLimitSetting + url: string +} + +export type InferencePutWatsonxWatsonxTaskType = 'text_embedding' + +export interface InferenceRerankRequest extends RequestBase { +/** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Query input. */ + query: string + /** The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. */ + input: string | string[] + /** Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } +} + +export type InferenceRerankResponse = InferenceRerankedInferenceResult + +export interface InferenceSparseEmbeddingRequest extends RequestBase { +/** The inference Id */ inference_id: Id /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration - /** A list of objects representing the conversation. */ - messages: InferenceUnifiedInferenceMessage[] - /** The ID of the model to use. */ - model?: string - /** The upper bound limit for the number of tokens that can be generated for a completion request. */ - max_completion_tokens?: long - /** A sequence of strings to control when the model should stop generating additional tokens. */ - stop?: string[] - /** The sampling temperature to use. */ - temperature?: float - /** Controls which tool is called by the model. */ - tool_choice?: InferenceUnifiedInferenceCompletionToolType - /** A list of tools that the model can call. */ - tools?: InferenceUnifiedInferenceCompletionTool[] - /** Nucleus sampling, an alternative to sampling with temperature. */ - top_p?: float + /** Inference input. Either a string or an array of strings. */ + input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, messages?: never, model?: never, max_completion_tokens?: never, stop?: never, temperature?: never, tool_choice?: never, tools?: never, top_p?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, messages?: never, model?: never, max_completion_tokens?: never, stop?: never, temperature?: never, tool_choice?: never, tools?: never, top_p?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } -export type InferenceUnifiedInferenceResponse = StreamResult +export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult -export interface InferenceUnifiedInferenceToolCall { - id: Id - function: InferenceUnifiedInferenceToolCallFunction - type: string +export interface InferenceStreamCompletionRequest extends RequestBase { +/** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The text on which you want to perform the inference task. It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ + input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } } -export interface InferenceUnifiedInferenceToolCallFunction { - arguments: string - name: string +export type InferenceStreamCompletionResponse = StreamResult + +export interface InferenceTextEmbeddingRequest extends RequestBase { +/** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Inference input. Either a string or an array of strings. */ + input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } +export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult + export interface InferenceUpdateRequest extends RequestBase { /** The unique identifier of the inference endpoint. */ inference_id: Id @@ -18530,16 +18751,14 @@ export interface MlGetTrainedModelsRequest extends RequestBase { from?: integer /** A comma delimited string of optional fields to include in the response body. */ include?: MlInclude - /** parameter is deprecated! Use [include=definition] instead */ - include_model_definition?: boolean /** Specifies the maximum number of models to obtain. */ size?: integer /** A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. */ tags?: string | string[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, size?: never, tags?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, size?: never, tags?: never } } export interface MlGetTrainedModelsResponse { @@ -18840,10 +19059,10 @@ export interface MlPutDatafeedRequest extends RequestBase { delayed_data_check_config?: MlDelayedDataCheckConfig /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices /** @alias indices */ - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. */ indexes?: Indices /** Specifies index expansion options that are used during search */ indices_options?: IndicesOptions @@ -19887,13 +20106,11 @@ export interface NodesNodeBufferPool { used_in_bytes?: long } -export interface NodesNodeReloadError { +export interface NodesNodeReloadResult { name: Name reload_exception?: ErrorCause } -export type NodesNodeReloadResult = NodesStats | NodesNodeReloadError - export interface NodesNodesResponseBase { _nodes?: NodeStatistics } @@ -21310,12 +21527,6 @@ export interface SecurityCreatedStatus { created: boolean } -export interface SecurityFieldRule { - username?: Names - dn?: Names - groups?: Names -} - export interface SecurityFieldSecurity { except?: Fields grant?: Fields @@ -21364,6 +21575,15 @@ export interface SecurityRemoteIndicesPrivileges { allow_restricted_indices?: boolean } +export interface SecurityRemoteUserIndicesPrivileges { + field_security?: SecurityFieldSecurity[] + names: IndexName | IndexName[] + privileges: SecurityIndexPrivilege[] + query?: SecurityIndicesPrivilegesQuery[] + allow_restricted_indices: boolean + clusters: string[] +} + export interface SecurityReplicationAccess { names: IndexName | IndexName[] allow_restricted_indices?: boolean @@ -21416,7 +21636,7 @@ export interface SecurityRoleMapping { export interface SecurityRoleMappingRule { any?: SecurityRoleMappingRule[] all?: SecurityRoleMappingRule[] - field?: SecurityFieldRule + field?: Partial> except?: SecurityRoleMappingRule } @@ -22051,7 +22271,8 @@ export interface SecurityGetRoleRole { remote_indices?: SecurityRemoteIndicesPrivileges[] remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata - run_as: string[] + description?: string + run_as?: string[] transient_metadata?: Record applications: SecurityApplicationPrivileges[] role_templates?: SecurityRoleTemplate[] @@ -22204,8 +22425,10 @@ export interface SecurityGetUserPrivilegesRequest extends RequestBase { export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[] cluster: string[] + remote_cluster?: SecurityRemoteClusterPrivileges[] global: SecurityGlobalPrivilege[] indices: SecurityUserIndicesPrivileges[] + remote_indices?: SecurityRemoteUserIndicesPrivileges[] run_as: string[] } @@ -22403,13 +22626,13 @@ export interface SecurityOidcAuthenticateResponse { export interface SecurityOidcLogoutRequest extends RequestBase { /** The access token to be invalidated. */ - access_token: string + token: string /** The refresh token to be invalidated. */ refresh_token?: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { access_token?: never, refresh_token?: never } + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { access_token?: never, refresh_token?: never } + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never } } export interface SecurityOidcLogoutResponse { @@ -23027,14 +23250,14 @@ export interface SimulateIngestRequest extends RequestBase { /** A map of component template names to substitute component template definition objects. */ component_template_substitutions?: Record /** A map of index template names to substitute index template definition objects. */ - index_template_subtitutions?: Record + index_template_substitutions?: Record mapping_addition?: MappingTypeMapping /** Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline_substitutions?: Record /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_subtitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_subtitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + querystring?: { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } } export interface SimulateIngestResponse { From 1519963dd9d636cdcd7a6f44252df519522dc27a Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 10:04:59 -0500 Subject: [PATCH 500/647] Update actions/setup-node digest to cdca736 (#2676) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/nodejs.yml | 4 ++-- .github/workflows/npm-publish.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 42074cadc..da5428aea 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -41,7 +41,7 @@ jobs: persist-credentials: false - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4 + uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 with: node-version: ${{ matrix.node-version }} @@ -71,7 +71,7 @@ jobs: persist-credentials: false - name: Use Node.js - uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4 + uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 with: node-version: 22.x diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 8994a003b..dd42454d6 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -16,7 +16,7 @@ jobs: with: persist-credentials: false ref: ${{ github.event.inputs.branch }} - - uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4 + - uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 with: node-version: "22.x" registry-url: "https://registry.npmjs.org/" From d9e9906c4ef3562c79d59cef8d498d90836aeaba Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 15:08:41 +0000 Subject: [PATCH 501/647] Update dependency @types/node to v22.13.13 (#2677) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Co-authored-by: Josh Mock --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index ca15cf3ef..45c14efbd 100644 --- a/package.json +++ b/package.json @@ -61,7 +61,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.13.10", + "@types/node": "22.13.13", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 1abb4e3c9fa6bcb7cf3519ea619272ff717b3abb Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Thu, 27 Mar 2025 14:01:57 -0500 Subject: [PATCH 502/647] add missing mapped pages (#2684) --- docs/release-notes/index.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 4e39b615b..098bb38fb 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -1,22 +1,24 @@ --- navigation_title: "Elasticsearch JavaScript Client" +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog-client.html --- # Elasticsearch JavaScript Client release notes [elasticsearch-javascript-client-release-notes] -Review the changes, fixes, and more in each version of Elasticsearch JavaScript Client. +Review the changes, fixes, and more in each version of Elasticsearch JavaScript Client. To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31). -% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. +% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. % ## version.next [elasticsearch-javascript-client-next-release-notes] % ### Features and enhancements [elasticsearch-javascript-client-next-features-enhancements] -% * +% * % ### Fixes [elasticsearch-javascript-client-next-fixes] -% * +% * ## 9.0.0 [elasticsearch-javascript-client-900-release-notes] From 64ef5359e7ce72b1da7f38a5e6bd28c701bf2b15 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 11:07:04 -0500 Subject: [PATCH 503/647] Update dependency @types/node to v22.13.14 (#2686) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 45c14efbd..64adb1197 100644 --- a/package.json +++ b/package.json @@ -61,7 +61,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.13.13", + "@types/node": "22.13.14", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From a9a5aca736394ff91a8db440e12e282bfa1c345d Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 16:11:46 +0000 Subject: [PATCH 504/647] Update dependency @elastic/request-converter to v9 (#2687) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Co-authored-by: Josh Mock --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 64adb1197..0cd01eac9 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "8.18.0", + "@elastic/request-converter": "9.0.0", "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", From c5f9625463b6287336477b52d6245907449fac9e Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Tue, 1 Apr 2025 11:36:18 -0400 Subject: [PATCH 505/647] replace mis-converted table (#2685) --- docs/reference/basic-config.md | 408 ++++++++++++++++++++++++++++++--- 1 file changed, 377 insertions(+), 31 deletions(-) diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md index dd3452217..c1e3ec234 100644 --- a/docs/reference/basic-config.md +++ b/docs/reference/basic-config.md @@ -5,7 +5,7 @@ mapped_pages: # Basic configuration [basic-config] -This page shows you the possible basic configuration options that the clients offers. +This page explains the basic configuration options for the JavaScript client. ```js const { Client } = require('@elastic/elasticsearch') @@ -18,34 +18,380 @@ const client = new Client({ }) ``` -| | | -| --- | --- | -| `node` or `nodes` | The Elasticsearch endpoint to use.
    It can be a single string or an array of strings:

    ```js
    node: 'http://localhost:9200/'
    ```

    Or it can be an object (or an array of objects) that represents the node:

    ```js
    node: {
    url: new URL('http://localhost:9200/'),
    tls: 'tls options',
    agent: 'http agent options',
    id: 'custom node id',
    headers: { 'custom': 'headers' }
    roles: {
    master: true,
    data: true,
    ingest: true,
    ml: false
    }
    }
    ```
    | -| `auth` | Your authentication data. You can use both basic authentication and [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
    See [Authentication](/reference/connecting.md#authentication) for more details.
    *Default:* `null`

    Basic authentication:

    ```js
    auth: {
    username: 'elastic',
    password: 'changeme'
    }
    ```

    [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication:

    ```js
    auth: {
    apiKey: 'base64EncodedKey'
    }
    ```

    Bearer authentication, useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh:

    ```js
    auth: {
    bearer: 'token'
    }
    ```
    | -| `maxRetries` | `number` - Max number of retries for each request.
    *Default:* `3` | -| `requestTimeout` | `number` - Max request timeout in milliseconds for each request.
    *Default:* No value | -| `pingTimeout` | `number` - Max ping request timeout in milliseconds for each request.
    *Default:* `3000` | -| `sniffInterval` | `number, boolean` - Perform a sniff operation every `n` milliseconds. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
    *Default:* `false` | -| `sniffOnStart` | `boolean` - Perform a sniff once the client is started. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
    *Default:* `false` | -| `sniffEndpoint` | `string` - Endpoint to ping during a sniff.
    *Default:* `'_nodes/_all/http'` | -| `sniffOnConnectionFault` | `boolean` - Perform a sniff on connection fault. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
    *Default:* `false` | -| `resurrectStrategy` | `string` - Configure the node resurrection strategy.
    *Options:* `'ping'`, `'optimistic'`, `'none'`
    *Default:* `'ping'` | -| `suggestCompression` | `boolean` - Adds `accept-encoding` header to every request.
    *Default:* `false` | -| `compression` | `string, boolean` - Enables gzip request body compression.
    *Options:* `'gzip'`, `false`
    *Default:* `false` | -| `tls` | `http.SecureContextOptions` - tls [configuraton](https://nodejs.org/api/tls.md).
    *Default:* `null` | -| `proxy` | `string, URL` - If you are using an http(s) proxy, you can put its url here. The client will automatically handle the connection to it.
    *Default:* `null`

    ```js
    const client = new Client({
    node: 'http://localhost:9200/',
    proxy: 'http://localhost:8080/'
    })

    const client = new Client({
    node: 'http://localhost:9200/',
    proxy: 'http://user:pwd@localhost:8080/'
    })
    ```
    | -| `agent` | `http.AgentOptions, function` - http agent [options](https://nodejs.org/api/http.md#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`.
    *Default:* `null`

    ```js
    const client = new Client({
    node: 'http://localhost:9200/',
    agent: { agent: 'options' }
    })

    const client = new Client({
    node: 'http://localhost:9200/',
    // the function takes as parameter the option
    // object passed to the Connection constructor
    agent: (opts) => new CustomAgent()
    })

    const client = new Client({
    node: 'http://localhost:9200/',
    // Disable agent and keep-alive
    agent: false
    })
    ```
    | -| `nodeFilter` | `function` - Filters which node not to use for a request.
    *Default:*

    ```js
    function defaultNodeFilter (node) {
    // avoid master only nodes
    if (node.roles.master === true &&
    node.roles.data === false &&
    node.roles.ingest === false) {
    return false
    }
    return true
    }
    ```
    | -| `nodeSelector` | `function` - custom selection strategy.
    *Options:* `'round-robin'`, `'random'`, custom function
    *Default:* `'round-robin'`
    *Custom function example:*

    ```js
    function nodeSelector (connections) {
    const index = calculateIndex()
    return connections[index]
    }
    ```
    | -| `generateRequestId` | `function` - function to generate the request id for every request, it takes two parameters, the request parameters and options.
    By default it generates an incremental integer for every request.
    *Custom function example:*

    ```js
    function generateRequestId (params, options) {
    // your id generation logic
    // must be syncronous
    return 'id'
    }
    ```
    | -| `name` | `string, symbol` - The name to identify the client instance in the events.
    *Default:* `elasticsearch-js` | -| `opaqueIdPrefix` | `string` - A string that will be use to prefix any `X-Opaque-Id` header.
    See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details.
    _Default:* `null` | -| `headers` | `object` - A set of custom headers to send in every request.
    *Default:* `{}` | -| `context` | `object` - A custom object that you can use for observability in your events.It will be merged with the API level context option.
    *Default:* `null` | -| `enableMetaHeader` | `boolean` - If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data,such as the client and platform version.
    *Default:* `true` | -| `cloud` | `object` - Custom configuration for connecting to [Elastic Cloud](https://cloud.elastic.co). See [Authentication](/reference/connecting.md) for more details.
    *Default:* `null`
    *Cloud configuration example:*

    ```js
    const client = new Client({
    cloud: {
    id: ''
    },
    auth: {
    username: 'elastic',
    password: 'changeme'
    }
    })
    ```
    | -| `disablePrototypePoisoningProtection` | `boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. Read [this article](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08) to learn more about this security concern. If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. Read the `secure-json-parse` [documentation](https://github.com/fastify/secure-json-parse) to learn more.
    *Default:* `true` | -| `caFingerprint` | `string` - If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints.
    *Default:* `null` | -| `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENGTH
    *Default:* `null` | -| `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENGTH
    *Default:* `null` | +### `node` or `nodes` +The Elasticsearch endpoint to use. It can be a single string or an array of strings: + +```js +node: 'http://localhost:9200/' +``` + +Or it can be an object (or an array of objects) that represents the node: + +```js +node: { + url: new URL('http://localhost:9200/'), + tls: 'tls options', + agent: 'http agent options', + id: 'custom node id', + headers: { 'custom': 'headers' }, + roles: { + master: true, + data: true, + ingest: true, + ml: false + } +} +``` + +--- + +### `auth` + +Default: `null` + +Your authentication data. You can use both basic authentication and [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). + See [Authentication](/reference/connecting.md#authentication) for more details. + + +Basic authentication: + +```js +auth: { + username: 'elastic', + password: 'changeme' +} +``` + +[ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication: + +```js +auth: { + apiKey: 'base64EncodedKey' +} +``` + +Bearer authentication, useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh: + +```js +auth: { + bearer: 'token' +} +``` + +--- + +### `maxRetries` + +Type: `number`
    +Default: `3` + +Max number of retries for each request. + +--- + +### `requestTimeout` + +Type: `number`
    +Default: `No value` + +Max request timeout in milliseconds for each request. + +--- + +### `pingTimeout` + +Type: `number`
    +Default: `3000` + +Max ping request timeout in milliseconds for each request. + +--- + +### `sniffInterval` + +Type: `number, boolean`
    +Default: `false` + +Perform a sniff operation every `n` milliseconds. + +:::{tip} +Sniffing might not be the best solution. Before using the various `sniff` options, review this [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). +::: + +--- + +### `sniffOnStart` + +Type: `boolean`
    +Default: `false` + +Perform a sniff once the client is started. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +--- + +### `sniffEndpoint` + +Type: `string`
    +Default: `'_nodes/_all/http'` + +Endpoint to ping during a sniff. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +--- + +### `sniffOnConnectionFault` + +Type: `boolean`
    +Default: `false` + +Perform a sniff on connection fault. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +--- + +### `resurrectStrategy` + +Type: `string`
    +Default: `'ping'` + +Configure the node resurrection strategy.
    +Options: `'ping'`, `'optimistic'`, `'none'` + +--- + +### `suggestCompression` + +Type: `boolean`
    +Default: `false` + +Adds an `accept-encoding` header to every request. + +--- + +### `compression` + +Type: `string, boolean`
    +Default: `false` + +Enables gzip request body compression.
    +Options: `'gzip'`, `false` + +--- + +### `tls` + +Type: `http.SecureContextOptions`
    +Default: `null` + +The [tls configuraton](https://nodejs.org/api/tls.md). + +--- + +### `proxy` + +Type: `string, URL`
    +Default: `null` + +If you are using an http(s) proxy, you can put its url here. The client will automatically handle the connection to it. + + +```js +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://localhost:8080/' +}) + +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://user:pwd@localhost:8080/' +}) +``` + +--- + +### `agent` + +Type: `http.AgentOptions, function`
    +Default: `null` + +http agent [options](https://nodejs.org/api/http.md#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`. + +```js +const client = new Client({ + node: 'http://localhost:9200/', + agent: { agent: 'options' } +}) + +const client = new Client({ + node: 'http://localhost:9200/', + // the function takes as parameter the option + // object passed to the Connection constructor + agent: (opts) => new CustomAgent() +}) + +const client = new Client({ + node: 'http://localhost:9200/', + // Disable agent and keep-alive + agent: false +}) +``` + +--- + +### `nodeFilter` + +Type: `function` + +Filter that indicates whether a node should be used for a request. Default function definition: + +```js +function defaultNodeFilter (node) { + // avoid master only nodes + if (node.roles.master === true && + node.roles.data === false && + node.roles.ingest === false) { + return false + } + return true +} +``` + +--- + +### `nodeSelector` + +Type: `function`
    +Default: `'round-robin'` + +Custom selection strategy.
    +Options: `'round-robin'`, `'random'`, custom function + +Custom function example: + +```js +function nodeSelector (connections) { + const index = calculateIndex() + return connections[index] +} +``` + +--- + +### `generateRequestId` + +Type: `function`
    + +function to generate the request id for every request, it takes two parameters, the request parameters and options. By default, it generates an incremental integer for every request. + +Custom function example: + +```js +function generateRequestId (params, options) { + // your id generation logic + // must be syncronous + return 'id' +} +``` + +--- + +### `name` + +Type: `string, symbol`
    +Default: `elasticsearch-js` + +The name to identify the client instance in the events. + +--- + +### `opaqueIdPrefix` + +Type: `string`
    +Default: `null` + +A string that will be use to prefix any `X-Opaque-Id` header. +See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details. + +--- + +### `headers` + +Type: `object`
    +Default: `{}` + +A set of custom headers to send in every request. + +--- + +### `context` + +Type: `object`
    +Default: `null` + +A custom object that you can use for observability in your events. It will be merged with the API level context option. + +--- + +### `enableMetaHeader` + +Type: `boolean`
    +Default: `true` + +If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data, such as the client and platform version. + +--- + +### `cloud` + +Type: `object`
    +Default: `null` + +Custom configuration for connecting to [Elastic Cloud](https://cloud.elastic.co). See [Authentication](/reference/connecting.md) for more details. + +Cloud configuration example: + +```js +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + } +}) +``` + +--- + +### `disablePrototypePoisoningProtection` + +Default: `true` + +`boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. For more information, refer to [Square Brackets are the Enemy](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08). If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. To learn more, refer to the [`secure-json-parse` documentation](https://github.com/fastify/secure-json-parse). + +--- + +### `caFingerprint` + +Type: `string`
    +Default: `null` + +If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. + +--- + +### `maxResponseSize` + +Type: `number`
    +Default: `null` + +When configured, `maxResponseSize` verifies that the uncompressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. + +--- + +### `maxCompressedResponseSize` + +Type: `number`
    +Default: `null` + +When configured, `maxCompressedResponseSize` verifies that the compressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxCompressedResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. \ No newline at end of file From e8dc747c61277be150e1fa72f6c99e1275e25554 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 3 Apr 2025 14:41:58 -0500 Subject: [PATCH 506/647] Merge serverless functionality from @elastic/elasticsearch-serverless (#2695) * Expose a serverMode option to enable serverless-friendly defaults * Update basic config docs to note how the serverMode flag works * Docs cleanup * Add another note to docs about connecting to serverless --- docs/reference/basic-config.md | 38 ++++++++++++--- docs/reference/client-helpers.md | 27 ----------- docs/reference/configuration.md | 15 ++---- docs/reference/connecting.md | 44 +++++++---------- src/client.ts | 81 +++++++++++++++++++++----------- test/unit/client.test.ts | 67 +++++++++++++++++++++++++- 6 files changed, 173 insertions(+), 99 deletions(-) diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md index c1e3ec234..7b523cbeb 100644 --- a/docs/reference/basic-config.md +++ b/docs/reference/basic-config.md @@ -20,12 +20,16 @@ const client = new Client({ ### `node` or `nodes` -The Elasticsearch endpoint to use. It can be a single string or an array of strings: +The {{es}} endpoint to use. It can be a single string or an array of strings: ```js node: 'http://localhost:9200/' ``` +```js +nodes: ['http://localhost:9200/', 'http://localhost:9201/'] +``` + Or it can be an object (or an array of objects) that represents the node: ```js @@ -52,7 +56,6 @@ Default: `null` Your authentication data. You can use both basic authentication and [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). See [Authentication](/reference/connecting.md#authentication) for more details. - Basic authentication: @@ -113,7 +116,7 @@ Max ping request timeout in milliseconds for each request. Type: `number, boolean`
    Default: `false` -Perform a sniff operation every `n` milliseconds. +Perform a sniff operation every `n` milliseconds. :::{tip} Sniffing might not be the best solution. Before using the various `sniff` options, review this [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). @@ -182,7 +185,7 @@ Options: `'gzip'`, `false` Type: `http.SecureContextOptions`
    Default: `null` -The [tls configuraton](https://nodejs.org/api/tls.md). +The [tls configuraton](https://nodejs.org/api/tls.html). --- @@ -192,7 +195,6 @@ Type: `string, URL`
    Default: `null` If you are using an http(s) proxy, you can put its url here. The client will automatically handle the connection to it. - ```js const client = new Client({ @@ -213,7 +215,7 @@ const client = new Client({ Type: `http.AgentOptions, function`
    Default: `null` -http agent [options](https://nodejs.org/api/http.md#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`. +http agent [options](https://nodejs.org/api/http.html#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`. ```js const client = new Client({ @@ -394,4 +396,26 @@ When configured, `maxResponseSize` verifies that the uncompressed response size Type: `number`
    Default: `null` -When configured, `maxCompressedResponseSize` verifies that the compressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxCompressedResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. \ No newline at end of file +When configured, `maxCompressedResponseSize` verifies that the compressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxCompressedResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. + +--- + +### `redaction` + +Type: `object`
    +Default: A configuration that will replace known sources of sensitive data in `Error` metadata + +Options for how to redact potentially sensitive data from metadata attached to `Error` objects + +::::{note} +[Read about redaction](/reference/advanced-config.md#redaction) for more details +:::: + +--- + +### `serverMode` + +Type: `string`
    +Default: `"stack"` + +Setting to `"stack"` sets defaults assuming a traditional (non-serverless) {{es}} instance. Setting to `"serverless"` sets defaults to work more seamlessly with [Elastic Cloud Serverless](https://www.elastic.co/guide/en/serverless/current/intro.html), like enabling compression and disabling features that assume the possibility of multiple {{es}} nodes. diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md index 38c29198e..c80562db4 100644 --- a/docs/reference/client-helpers.md +++ b/docs/reference/client-helpers.md @@ -11,15 +11,12 @@ The client comes with an handy collection of helpers to give you a more comforta The client helpers are experimental, and the API may change in the next minor releases. The helpers will not work in any Node.js version lower than 10. :::: - - ## Bulk helper [bulk-helper] Added in `v7.7.0` Running bulk requests can be complex due to the shape of the API, this helper aims to provide a nicer developer experience around the Bulk API. - ### Usage [_usage_3] ```js @@ -67,10 +64,8 @@ To create a new instance of the Bulk helper, access it as shown in the example a | `wait` | How much time to wait before retries in milliseconds.
    *Default:* 5000.

    ```js
    const b = client.helpers.bulk({
    wait: 3000
    })
    ```
    | | `refreshOnCompletion` | If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices.
    *Default:* false.

    ```js
    const b = client.helpers.bulk({
    refreshOnCompletion: true
    // or
    refreshOnCompletion: 'index-name'
    })
    ```
    | - ### Supported operations [_supported_operations] - #### Index [_index_2] ```js @@ -84,7 +79,6 @@ client.helpers.bulk({ }) ``` - #### Create [_create_4] ```js @@ -98,7 +92,6 @@ client.helpers.bulk({ }) ``` - #### Update [_update_3] ```js @@ -116,7 +109,6 @@ client.helpers.bulk({ }) ``` - #### Delete [_delete_10] ```js @@ -130,7 +122,6 @@ client.helpers.bulk({ }) ``` - ### Abort a bulk operation [_abort_a_bulk_operation] If needed, you can abort a bulk operation at any time. The bulk helper returns a [thenable](https://promisesaplus.com/), which has an `abort` method. @@ -139,7 +130,6 @@ If needed, you can abort a bulk operation at any time. The bulk helper returns a The abort method stops the execution of the bulk operation, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. :::: - ```js const { createReadStream } = require('fs') const split = require('split2') @@ -164,7 +154,6 @@ const b = client.helpers.bulk({ console.log(await b) ``` - ### Passing custom options to the Bulk API [_passing_custom_options_to_the_bulk_api] You can pass any option supported by the link: [Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to the helper, and the helper uses those options in conjunction with the Bulk API call. @@ -181,7 +170,6 @@ const result = await client.helpers.bulk({ }) ``` - ### Usage with an async generator [_usage_with_an_async_generator] ```js @@ -214,7 +202,6 @@ const result = await client.helpers.bulk({ console.log(result) ``` - ### Modifying a document before operation [_modifying_a_document_before_operation] Added in `v8.8.2` @@ -241,14 +228,12 @@ const result = await client.helpers.bulk({ console.log(result) ``` - ## Multi search helper [multi-search-helper] Added in `v7.8.0` If you send search request at a high rate, this helper might be useful for you. It uses the multi search API under the hood to batch the requests and improve the overall performances of your application. The `result` exposes a `documents` property as well, which allows you to access directly the hits sources. - ### Usage [_usage_4] ```js @@ -278,7 +263,6 @@ To create a new instance of the multi search (msearch) helper, you should access | `retries` | How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error.
    *Default:* Client max retries.

    ```js
    const m = client.helpers.msearch({
    retries: 3
    })
    ```
    | | `wait` | How much time to wait before retries in milliseconds.
    *Default:* 5000.

    ```js
    const m = client.helpers.msearch({
    wait: 3000
    })
    ```
    | - ### Stopping the msearch helper [_stopping_the_msearch_helper] If needed, you can stop an msearch processor at any time. The msearch helper returns a [thenable](https://promisesaplus.com/), which has an `stop` method. @@ -291,7 +275,6 @@ The `stop` method accepts an optional error, that will be dispatched every subse The stop method stops the execution of the msearch processor, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. :::: - ```js const { Client } = require('@elastic/elasticsearch') @@ -318,7 +301,6 @@ m.search( setImmediate(() => m.stop()) ``` - ## Search helper [search-helper] Added in `v7.7.0` @@ -340,7 +322,6 @@ for (const doc of documents) { } ``` - ## Scroll search helper [scroll-search-helper] Added in `v7.7.0` @@ -362,7 +343,6 @@ for await (const result of scrollSearch) { } ``` - ### Clear a scroll search [_clear_a_scroll_search] If needed, you can clear a scroll search by calling `result.clear()`: @@ -375,7 +355,6 @@ for await (const result of scrollSearch) { } ``` - ### Quickly getting the documents [_quickly_getting_the_documents] If you only need the documents from the result of a scroll search, you can access them via `result.documents`: @@ -386,7 +365,6 @@ for await (const result of scrollSearch) { } ``` - ## Scroll documents helper [scroll-documents-helper] Added in `v7.7.0` @@ -408,15 +386,12 @@ for await (const doc of scrollSearch) { } ``` - ## ES|QL helper [esql-helper] ES|QL queries can return their results in [several formats](docs-content://explore-analyze/query-filter/languages/esql-rest.md#esql-rest-format). The default JSON format returned by ES|QL queries contains arrays of values for each row, with column names and types returned separately: - ### Usage [_usage_5] - #### `toRecords` [_torecords] Added in `v8.14.0` @@ -494,7 +469,6 @@ const result = await client.helpers .toRecords() ``` - #### `toArrowReader` [_toarrowreader] Added in `v8.16.0` @@ -516,7 +490,6 @@ for (const recordBatch of reader) { } ``` - #### `toArrowTable` [_toarrowtable] Added in `v8.16.0` diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md index 0367bdc12..d6519a589 100644 --- a/docs/reference/configuration.md +++ b/docs/reference/configuration.md @@ -7,13 +7,8 @@ mapped_pages: The client is designed to be easily configured for your needs. In the following section, you can see the possible options that you can use to configure it. -* [Basic configuration](/reference/basic-config.md) -* [Advanced configuration](/reference/advanced-config.md) -* [Timeout best practices](docs-content://troubleshoot/elasticsearch/elasticsearch-client-javascript-api/nodejs.md) -* [Creating a child client](/reference/child.md) -* [Testing](/reference/client-testing.md) - - - - - +- [Basic configuration](/reference/basic-config.md) +- [Advanced configuration](/reference/advanced-config.md) +- [Timeout best practices](docs-content://troubleshoot/elasticsearch/elasticsearch-client-javascript-api/nodejs.md) +- [Creating a child client](/reference/child.md) +- [Testing](/reference/client-testing.md) diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md index 72eab6b5c..887dc587d 100644 --- a/docs/reference/connecting.md +++ b/docs/reference/connecting.md @@ -11,7 +11,6 @@ This page contains the information you need to connect and use the Client with { This document contains code snippets to show you how to connect to various {{es}} providers. - ### Elastic Cloud [auth-ec] If you are using [Elastic Cloud](https://www.elastic.co/cloud), the client offers an easy way to connect to it via the `cloud` option. You must pass the Cloud ID that you can find in the cloud console, then your username and password inside the `auth` option. @@ -20,12 +19,10 @@ If you are using [Elastic Cloud](https://www.elastic.co/cloud), the client offer When connecting to Elastic Cloud, the client will automatically enable both request and response compression by default, since it yields significant throughput improvements. Moreover, the client will also set the tls option `secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still override this option by configuring them. :::: - ::::{important} Do not enable sniffing when using Elastic Cloud, since the nodes are behind a load balancer, Elastic Cloud will take care of everything for you. Take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more. :::: - ```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -39,6 +36,24 @@ const client = new Client({ }) ``` +## Connecting to an Elastic Cloud Serverless instance [connect-serverless] + +The Node.js client is built to support connecting to [Elastic Cloud Serverless](https://www.elastic.co/guide/en/serverless/current/intro.html). By setting the `serverMode` option to `"serverless"`, several default options will be modified to better suit the serverless environment. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + }, + serverMode: 'serverless' +}) + +``` ## Connecting to a self-managed cluster [connect-self-managed-new] @@ -62,7 +77,6 @@ When you start {{es}} for the first time you’ll see a distinct block like the Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint. - ### TLS configuration [auth-tls] The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). @@ -84,7 +98,6 @@ const client = new Client({ }) ``` - ### CA fingerprint [auth-ca-fingerprint] You can configure the client to only trust certificates that are signed by a specific CA certificate (CA certificate pinning) by providing a `caFingerprint` option. This will verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied value. You must configure a SHA256 digest. @@ -125,14 +138,12 @@ The output of `openssl x509` will look something like this: SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28 ``` - ## Connecting without security enabled [connect-no-security] ::::{warning} Running {{es}} without security enabled is not recommended. :::: - If your cluster is configured with [security explicitly disabled](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP: ```js @@ -142,12 +153,10 @@ const client = new Client({ }) ``` - ## Authentication strategies [auth-strategies] Following you can find all the supported authentication strategies. - ### ApiKey authentication [auth-apikey] You can use the [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication by passing the `apiKey` parameter via the `auth` option. The `apiKey` parameter can be either a base64 encoded string or an object with the values that you can obtain from the [create api key endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). @@ -156,7 +165,6 @@ You can use the [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/opera If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence. :::: - ```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -180,7 +188,6 @@ const client = new Client({ }) ``` - ### Bearer authentication [auth-bearer] You can provide your credentials by passing the `bearer` token parameter via the `auth` option. Useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh. @@ -195,7 +202,6 @@ const client = new Client({ }) ``` - ### Basic authentication [auth-basic] You can provide your credentials by passing the `username` and `password` parameters via the `auth` option. @@ -204,7 +210,6 @@ You can provide your credentials by passing the `username` and `password` parame If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence. :::: - ```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -225,7 +230,6 @@ const client = new Client({ }) ``` - ## Usage [client-usage] Using the client is straightforward, it supports all the public APIs of {{es}}, and every method exposes the same signature. @@ -278,8 +282,6 @@ In this case, the result will be: The body is a boolean value when you use `HEAD` APIs. :::: - - ### Aborting a request [_aborting_a_request] If needed, you can abort a running request by using the `AbortController` standard. @@ -288,7 +290,6 @@ If needed, you can abort a running request by using the `AbortController` standa If you abort a request, the request will fail with a `RequestAbortedError`. :::: - ```js const AbortController = require('node-abort-controller') const { Client } = require('@elastic/elasticsearch') @@ -308,7 +309,6 @@ const result = await client.search({ }, { signal: abortController.signal }) ``` - ### Request specific options [_request_specific_options] If needed you can pass request specific options in a second object: @@ -352,7 +352,6 @@ The supported request specific options are: This section illustrates the best practices for leveraging the {{es}} client in a Function-as-a-Service (FaaS) environment. The most influential optimization is to initialize the client outside of the function, the global scope. This practice does not only improve performance but also enables background functionality as – for example – [sniffing](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). The following examples provide a skeleton for the best practices. - ### GCP Cloud Functions [_gcp_cloud_functions] ```js @@ -369,7 +368,6 @@ exports.testFunction = async function (req, res) { } ``` - ### AWS Lambda [_aws_lambda] ```js @@ -386,7 +384,6 @@ exports.handler = async function (event, context) { } ``` - ### Azure Functions [_azure_functions] ```js @@ -410,7 +407,6 @@ Resources used to assess these recommendations: * [Azure Functions Python developer guide](https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables) * [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html) - ## Connecting through a proxy [client-connect-proxy] Added in `v7.10.0` @@ -421,7 +417,6 @@ If you need to pass through an http(s) proxy for connecting to {{es}}, the clien In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. To use a proxy, you will need to use the `HttpConnection` class from `@elastic/transport` instead. :::: - ```js import { HttpConnection } from '@elastic/transport' @@ -455,7 +450,6 @@ const client = new Client({ }) ``` - ## Error handling [client-error-handling] The client exposes a variety of error objects that you can use to enhance your error handling. You can find all the error objects inside the `errors` key in the client. @@ -506,7 +500,6 @@ const client = new Client({ }) ``` - ## Closing a client’s connections [close-connections] If you would like to close all open connections being managed by an instance of the client, use the `close()` function: @@ -518,7 +511,6 @@ const client = new Client({ client.close(); ``` - ## Automatic product check [product-check] Since v7.14.0, the client performs a required product check before the first call. This pre-flight product check allows the client to establish the version of Elasticsearch that it is communicating with. The product check requires one additional HTTP request to be sent to the server as part of the request pipeline before the main API call is sent. In most cases, this will succeed during the very first API call that the client sends. Once the product check completes, no further product check HTTP requests are sent for subsequent API calls. diff --git a/src/client.ts b/src/client.ts index 43f78a6e5..a50670ca3 100644 --- a/src/client.ts +++ b/src/client.ts @@ -10,6 +10,7 @@ import buffer from 'node:buffer' import os from 'node:os' import { Transport, + TransportOptions, UndiciConnection, WeightedConnectionPool, CloudConnectionPool, @@ -54,6 +55,8 @@ if (transportVersion.includes('-')) { } const nodeVersion = process.versions.node +const serverlessApiVersion = '2023-10-31' + export interface NodeOptions { /** @property url Elasticsearch node's location */ url: URL @@ -180,6 +183,9 @@ export interface ClientOptions { * @remarks Read https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/advanced-config.html#redaction for more details * @defaultValue Configuration that will replace known sources of sensitive data */ redaction?: RedactionOptions + /** @property serverMode Setting to "serverless" will change some default behavior, like enabling compression and disabling features that assume the possibility of multiple Elasticsearch nodes. + * @defaultValue "stack", which sets defaults for a traditional (non-serverless) Elasticsearch instance. */ + serverMode?: 'stack' | 'serverless' } export default class Client extends API { @@ -192,15 +198,18 @@ export default class Client extends API { constructor (opts: ClientOptions) { super() - // @ts-expect-error kChild symbol is for internal use only - if ((opts.cloud != null) && opts[kChild] === undefined) { - const { id } = opts.cloud - // the cloud id is `cluster-name:base64encodedurl` - // the url is a string divided by two '$', the first is the cloud url - // the second the elasticsearch instance, the third the kibana instance - const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$') - opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` + // @ts-expect-error kChild symbol is for internal use only + if ((opts.cloud != null || opts.serverMode === 'serverless') && opts[kChild] === undefined) { + if (opts.cloud != null) { + const { id } = opts.cloud + // the cloud id is `cluster-name:base64encodedurl` + // the url is a string divided by two '$', the first is the cloud url + // the second the elasticsearch instance, the third the kibana instance + const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$') + + opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` + } // Cloud has better performance with compression enabled // see https://github.com/elastic/elasticsearch-py/pull/704. @@ -225,11 +234,16 @@ export default class Client extends API { } } + const headers: Record = { + 'user-agent': `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${nodeVersion}; Transport ${transportVersion})` + } + if (opts.serverMode === 'serverless') headers['elastic-api-version'] = serverlessApiVersion + const options: Required = Object.assign({}, { Connection: UndiciConnection, - Transport: SniffingTransport, + Transport: opts.serverMode === 'serverless' ? Transport : SniffingTransport, Serializer, - ConnectionPool: (opts.cloud != null) ? CloudConnectionPool : WeightedConnectionPool, + ConnectionPool: (opts.cloud != null || opts.serverMode === 'serverless') ? CloudConnectionPool : WeightedConnectionPool, maxRetries: 3, pingTimeout: 3000, sniffInterval: false, @@ -241,9 +255,7 @@ export default class Client extends API { tls: null, caFingerprint: null, agent: null, - headers: { - 'user-agent': `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${nodeVersion}; Transport ${transportVersion})` - }, + headers, nodeFilter: null, generateRequestId: null, name: 'elasticsearch-js', @@ -257,7 +269,8 @@ export default class Client extends API { redaction: { type: 'replace', additionalKeys: [] - } + }, + serverMode: 'stack' }, opts) if (options.caFingerprint != null && isHttpConnection(opts.node ?? opts.nodes)) { @@ -326,7 +339,13 @@ export default class Client extends API { // ensure default connection values are inherited when creating new connections // see https://github.com/elastic/elasticsearch-js/issues/1791 - const nodes = options.node ?? options.nodes + let nodes = options.node ?? options.nodes + + // serverless only supports one node, so pick the first one + if (options.serverMode === 'serverless' && Array.isArray(nodes)) { + nodes = nodes[0] + } + let nodeOptions: Array = Array.isArray(nodes) ? nodes : [nodes] type ConnectionDefaults = Record nodeOptions = nodeOptions.map(opt => { @@ -354,20 +373,14 @@ export default class Client extends API { this.connectionPool.addConnection(nodeOptions) } - this.transport = new options.Transport({ + let transportOptions: TransportOptions = { diagnostic: this.diagnostic, connectionPool: this.connectionPool, serializer: this.serializer, maxRetries: options.maxRetries, requestTimeout: options.requestTimeout, - sniffInterval: options.sniffInterval, - sniffOnStart: options.sniffOnStart, - sniffOnConnectionFault: options.sniffOnConnectionFault, - sniffEndpoint: options.sniffEndpoint, compression: options.compression, headers: options.headers, - nodeFilter: options.nodeFilter, - nodeSelector: options.nodeSelector, generateRequestId: options.generateRequestId, name: options.name, opaqueIdPrefix: options.opaqueIdPrefix, @@ -375,13 +388,25 @@ export default class Client extends API { productCheck: 'Elasticsearch', maxResponseSize: options.maxResponseSize, maxCompressedResponseSize: options.maxCompressedResponseSize, - vendoredHeaders: { - jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=9', - ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', - accept: 'application/vnd.elasticsearch+json; compatible-with=9,text/plain' - }, redaction: options.redaction - }) + } + if (options.serverMode !== 'serverless') { + transportOptions = Object.assign({}, transportOptions, { + sniffInterval: options.sniffInterval, + sniffOnStart: options.sniffOnStart, + sniffOnConnectionFault: options.sniffOnConnectionFault, + sniffEndpoint: options.sniffEndpoint, + nodeFilter: options.nodeFilter, + nodeSelector: options.nodeSelector, + vendoredHeaders: { + jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=9', + ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + accept: 'application/vnd.elasticsearch+json; compatible-with=9,text/plain' + } + }) + } + + this.transport = new options.Transport(transportOptions) this.helpers = new Helpers({ client: this, diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index 3da9a8842..e57f4d092 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -9,7 +9,7 @@ import { setTimeout } from 'node:timers/promises' import { test } from 'tap' import FakeTimers from '@sinonjs/fake-timers' import { buildServer, connection } from '../utils' -import { Client, errors } from '../..' +import { Client, errors, SniffingTransport } from '../..' import * as symbols from '@elastic/transport/lib/symbols' import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool, HttpConnection } from '@elastic/transport' @@ -558,3 +558,68 @@ test('disablePrototypePoisoningProtection is true by default', async t => { constructorAction: 'ignore' }) }) + +test('serverless defaults', t => { + t.test('uses CloudConnectionPool by default', t => { + const client = new Client({ node: 'http://localhost:9200/', serverMode: 'serverless' }) + t.ok(client.connectionPool instanceof CloudConnectionPool) + t.equal(client.connectionPool.size, 1) + t.end() + }) + + t.test('selects one node if multiple are provided', t => { + const client = new Client({ nodes: ['http://localhost:9200/', 'http://localhost:9201/'], serverMode: 'serverless' }) + t.equal(client.connectionPool.size, 1) + t.end() + }) + + t.test('uses TLSv1_2_method by default', t => { + const client = new Client({ + node: 'https://localhost:9200/', + serverMode: 'serverless', + auth: { + username: 'elastic', + password: 'changeme' + } + }) + + const connection = client.connectionPool.connections.find(c => c.id === 'https://localhost:9200/') + + t.equal(connection?.headers?.authorization, `Basic ${Buffer.from('elastic:changeme').toString('base64')}`) + t.same(connection?.tls, { secureProtocol: 'TLSv1_2_method' }) + t.equal(connection?.url.hostname, 'localhost') + t.equal(connection?.url.protocol, 'https:') + + t.end() + }) + + t.test('elastic-api-version header exists on all requests', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.equal(opts.headers?.['elastic-api-version'], '2023-10-31') + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + serverMode: 'serverless', + Connection, + }) + + await client.transport.request({ method: 'GET', path: '/' }) + }) + + t.test('sniffing transport not used', t => { + const client = new Client({ node: 'http://localhost:9200/', serverMode: 'serverless' }) + t.ok(!(client.transport instanceof SniffingTransport)) + t.end() + }) + + t.end() +}) From b2a490718d50d2e56814f29ba7a30d930a763a64 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 4 Apr 2025 12:32:12 -0500 Subject: [PATCH 507/647] Update helpers to use new multisearch types (#2697) * Update helpers to use correct multisearch types The spec combined definitions for search and multisearch bodies in https://github.com/elastic/elasticsearch-specification/pull/2960. * Stop copying project files to Dockerfile Slightly faster run times for codegen, hopefully. --- .buildkite/Dockerfile | 2 -- .buildkite/Dockerfile-make | 3 --- src/helpers.ts | 14 +++++++------- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 2bf3886dc..0de3234dc 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -12,5 +12,3 @@ WORKDIR /usr/src/app COPY package.json . RUN npm install - -COPY . . diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 3805eb0a2..b171f5d03 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -25,6 +25,3 @@ USER ${BUILDER_UID}:${BUILDER_GID} # install dependencies COPY package.json . RUN npm install - -# copy project files -COPY . . diff --git a/src/helpers.ts b/src/helpers.ts index 89f804b89..46d82e28f 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -41,7 +41,7 @@ export interface MsearchHelperOptions extends T.MsearchRequest { export interface MsearchHelper extends Promise { stop: (error?: Error | null) => void - search: (header: T.MsearchMultisearchHeader, body: T.MsearchMultisearchBody) => Promise> + search: (header: T.MsearchMultisearchHeader, body: T.SearchSearchRequestBody) => Promise> } export interface MsearchHelperResponse { @@ -362,7 +362,7 @@ export default class Helpers { // TODO: support abort a single search? // NOTE: the validation checks are synchronous and the callback/promise will // be resolved in the same tick. We might want to fix this in the future. - search (header: T.MsearchMultisearchHeader, body: T.MsearchMultisearchBody): Promise> { + search (header: T.MsearchMultisearchHeader, body: T.SearchSearchRequestBody): Promise> { if (stopReading) { const error = stopError === null ? new ConfigurationError('The msearch processor has been stopped') @@ -397,7 +397,7 @@ export default class Helpers { async function iterate (): Promise { const { semaphore, finish } = buildSemaphore() - const msearchBody: Array = [] + const msearchBody: Array = [] const callbacks: any[] = [] let loadedOperations = 0 timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line @@ -490,7 +490,7 @@ export default class Helpers { } } - function send (msearchBody: Array, callbacks: any[]): void { + function send (msearchBody: Array, callbacks: any[]): void { /* istanbul ignore if */ if (running > concurrency) { throw new Error('Max concurrency reached') @@ -508,7 +508,7 @@ export default class Helpers { } } - function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { + function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { let retryCount = retries // Instead of going full on async-await, which would make the code easier to read, @@ -516,7 +516,7 @@ export default class Helpers { // This because every time we use async await, V8 will create multiple promises // behind the scenes, making the code slightly slower. tryMsearch(msearchBody, callbacks, retrySearch) - function retrySearch (msearchBody: Array, callbacks: any[]): void { + function retrySearch (msearchBody: Array, callbacks: any[]): void { if (msearchBody.length > 0 && retryCount > 0) { retryCount -= 1 setTimeout(tryMsearch, wait, msearchBody, callbacks, retrySearch) @@ -528,7 +528,7 @@ export default class Helpers { // This function never returns an error, if the msearch operation fails, // the error is dispatched to all search executors. - function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { + function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { client.msearch(Object.assign({}, msearchOptions, { body: msearchBody }), reqOptions as TransportRequestOptionsWithMeta) .then(results => { const retryBody = [] From d5bd34fc23599d8199ff0f786ceea34c1b5f0e40 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Fri, 4 Apr 2025 19:02:57 +0100 Subject: [PATCH 508/647] Auto-generated API code (#2700) --- .../141ef0ebaa3b0772892b79b9bb85efb0.asciidoc | 5 +- docs/reference/api-reference.md | 443 +- src/api/api/inference.ts | 979 +- src/api/api/open_point_in_time.ts | 3 +- src/api/api/watcher.ts | 2 +- src/api/types.ts | 14330 ++++++++++++++-- 6 files changed, 14029 insertions(+), 1733 deletions(-) diff --git a/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc index 5387dbba3..7d7aeab98 100644 --- a/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc +++ b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc @@ -3,8 +3,9 @@ [source, js] ---- -const response = await client.inference.update({ - inference_id: "my-inference-endpoint", +const response = await client.inference.put({ + task_type: "my-inference-endpoint", + inference_id: "_update", inference_config: { service_settings: { api_key: "", diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index b175e0db9..cd6d88027 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -480,7 +480,7 @@ client.deleteByQuery({ index }) - **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. -- **`from` (Optional, number)**: Starting offset (default: 0) +- **`from` (Optional, number)**: Skips the specified number of documents. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. @@ -1131,14 +1131,14 @@ client.msearch({ ... }) #### Request (object) [_request_msearch] - **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases to search. -- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. -- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. - **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. - **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. - **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. @@ -1173,7 +1173,7 @@ client.msearchTemplate({ ... }) #### Request (object) [_request_msearch_template] - **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. -- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - **`max_concurrent_searches` (Optional, number)**: The maximum number of concurrent searches the API can run. - **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. @@ -1274,6 +1274,7 @@ client.openPointInTime({ index, keep_alive }) - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. ## client.ping [_ping] Ping the cluster. @@ -1577,7 +1578,7 @@ client.renderSearchTemplate({ ... }) - **`id` (Optional, string)**: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. - **`file` (Optional, string)** - **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. -- **`source` (Optional, string)**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. +- **`source` (Optional, string | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. ## client.scriptsPainlessExecute [_scripts_painless_execute] Run a script. @@ -1675,7 +1676,7 @@ client.search({ ... }) - **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. - **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. - **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. -- **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. +- **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. - **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. @@ -1939,7 +1940,7 @@ client.searchTemplate({ ... }) - **`id` (Optional, string)**: The ID of the search template to use. If no `source` is specified, this parameter is required. - **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. -- **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. +- **`source` (Optional, string | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -2207,7 +2208,7 @@ client.updateByQuery({ index }) - **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -- **`from` (Optional, number)**: Starting offset (default: 0) +- **`from` (Optional, number)**: Skips the specified number of documents. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. @@ -2352,7 +2353,7 @@ Defaults to 10,000 hits. names matching these patterns in the hits.fields property of the response. - **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. - **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. +not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - **`profile` (Optional, boolean)** - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. @@ -5015,7 +5016,7 @@ client.fleet.msearch({ ... }) #### Request (object) [_request_fleet.msearch] - **`index` (Optional, string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. -- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. @@ -5065,7 +5066,7 @@ Defaults to 10,000 hits. - **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. - **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. +not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - **`profile` (Optional, boolean)** - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. @@ -6747,7 +6748,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -7499,6 +7500,7 @@ client.inference.chatCompletionUnified({ inference_id }) #### Request (object) [_request_inference.chat_completion_unified] - **`inference_id` (string)**: The inference Id +- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })** - **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.completion [_inference.completion] @@ -7566,6 +7568,7 @@ client.inference.postEisChatCompletion({ eis_inference_id }) #### Request (object) [_request_inference.post_eis_chat_completion] - **`eis_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })** ## client.inference.put [_inference.put] Create an inference endpoint. @@ -7592,12 +7595,193 @@ client.inference.put({ inference_id }) - **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type - **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** +## client.inference.putAlibabacloud [_inference.put_alibabacloud] +Create an AlibabaCloud AI Search inference endpoint. + +Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud) + +```ts +client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_alibabacloud] + +#### Request (object) [_request_inference.put_alibabacloud] +- **`task_type` (Enum("completion" | "rerank" | "space_embedding" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`alibabacloud_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("alibabacloud-ai-search"))**: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. +- **`service_settings` ({ api_key, host, rate_limit, service_id, workspace })**: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putAmazonbedrock [_inference.put_amazonbedrock] +Create an Amazon Bedrock inference endpoint. + +Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. + +>info +> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock) + +```ts +client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_amazonbedrock] + +#### Request (object) [_request_inference.put_amazonbedrock] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`amazonbedrock_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("amazonbedrock"))**: The type of service supported for the specified task type. In this case, `amazonbedrock`. +- **`service_settings` ({ access_key, model, provider, region, rate_limit, secret_key })**: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putAnthropic [_inference.put_anthropic] +Create an Anthropic inference endpoint. + +Create an inference endpoint to perform an inference task with the `anthropic` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic) + +```ts +client.inference.putAnthropic({ task_type, anthropic_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_anthropic] + +#### Request (object) [_request_inference.put_anthropic] +- **`task_type` (Enum("completion"))**: The task type. +The only valid task type for the model to perform is `completion`. +- **`anthropic_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("anthropic"))**: The type of service supported for the specified task type. In this case, `anthropic`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putAzureaistudio [_inference.put_azureaistudio] +Create an Azure AI studio inference endpoint. + +Create an inference endpoint to perform an inference task with the `azureaistudio` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio) + +```ts +client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_azureaistudio] + +#### Request (object) [_request_inference.put_azureaistudio] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`azureaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. +- **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putAzureopenai [_inference.put_azureopenai] +Create an Azure OpenAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `azureopenai` service. + +The list of chat completion models that you can choose from in your Azure OpenAI deployment include: + +* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) +* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) + +The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai) + +```ts +client.inference.putAzureopenai({ task_type, azureopenai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_azureopenai] + +#### Request (object) [_request_inference.put_azureopenai] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`azureopenai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureopenai"))**: The type of service supported for the specified task type. In this case, `azureopenai`. +- **`service_settings` ({ api_key, api_version, deployment_id, entra_id, rate_limit, resource_name })**: Settings used to install the inference model. These settings are specific to the `azureopenai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putCohere [_inference.put_cohere] +Create a Cohere inference endpoint. + +Create an inference endpoint to perform an inference task with the `cohere` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere) + +```ts +client.inference.putCohere({ task_type, cohere_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_cohere] + +#### Request (object) [_request_inference.put_cohere] +- **`task_type` (Enum("completion" | "rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`cohere_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("cohere"))**: The type of service supported for the specified task type. In this case, `cohere`. +- **`service_settings` ({ api_key, embedding_type, model_id, rate_limit, similarity })**: Settings used to install the inference model. +These settings are specific to the `cohere` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + ## client.inference.putEis [_inference.put_eis] Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS). -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-eis.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis) ```ts client.inference.putEis({ task_type, eis_inference_id, service, service_settings }) @@ -7612,15 +7796,227 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `elastic`. - **`service_settings` ({ model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `elastic` service. +## client.inference.putElasticsearch [_inference.put_elasticsearch] +Create an Elasticsearch inference endpoint. + +Create an inference endpoint to perform an inference task with the `elasticsearch` service. + +> info +> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. + +If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. + +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. + +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch) + +```ts +client.inference.putElasticsearch({ task_type, elasticsearch_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_elasticsearch] + +#### Request (object) [_request_inference.put_elasticsearch] +- **`task_type` (Enum("rerank" | "sparse_embedding" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`elasticsearch_inference_id` (string)**: The unique identifier of the inference endpoint. +The must not match the `model_id`. +- **`service` (Enum("elasticsearch"))**: The type of service supported for the specified task type. In this case, `elasticsearch`. +- **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putElser [_inference.put_elser] +Create an ELSER inference endpoint. + +Create an inference endpoint to perform an inference task with the `elser` service. +You can also deploy ELSER by using the Elasticsearch inference integration. + +> info +> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. + +The API request will automatically download and deploy the ELSER model if it isn't already downloaded. + +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. + +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser) + +```ts +client.inference.putElser({ task_type, elser_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_elser] + +#### Request (object) [_request_inference.put_elser] +- **`task_type` (Enum("sparse_embedding"))**: The type of the inference task that the model will perform. +- **`elser_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. +- **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. + +## client.inference.putGoogleaistudio [_inference.put_googleaistudio] +Create an Google AI Studio inference endpoint. + +Create an inference endpoint to perform an inference task with the `googleaistudio` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio) + +```ts +client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_googleaistudio] + +#### Request (object) [_request_inference.put_googleaistudio] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`googleaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. + +## client.inference.putGooglevertexai [_inference.put_googlevertexai] +Create a Google Vertex AI inference endpoint. + +Create an inference endpoint to perform an inference task with the `googlevertexai` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai) + +```ts +client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_googlevertexai] + +#### Request (object) [_request_inference.put_googlevertexai] +- **`task_type` (Enum("rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. +- **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { auto_truncate, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putHuggingFace [_inference.put_hugging_face] +Create a Hugging Face inference endpoint. + +Create an inference endpoint to perform an inference task with the `hugging_face` service. + +You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. +Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. +Create the endpoint and copy the URL after the endpoint initialization has been finished. + +The following models are recommended for the Hugging Face service: + +* `all-MiniLM-L6-v2` +* `all-MiniLM-L12-v2` +* `all-mpnet-base-v2` +* `e5-base-v2` +* `e5-small-v2` +* `multilingual-e5-base` +* `multilingual-e5-small` + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face) + +```ts +client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_hugging_face] + +#### Request (object) [_request_inference.put_hugging_face] +- **`task_type` (Enum("text_embedding"))**: The type of the inference task that the model will perform. +- **`huggingface_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. +- **`service_settings` ({ api_key, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. + +## client.inference.putJinaai [_inference.put_jinaai] +Create an JinaAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `jinaai` service. + +To review the available `rerank` models, refer to . +To review the available `text_embedding` models, refer to the . + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai) + +```ts +client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_jinaai] + +#### Request (object) [_request_inference.put_jinaai] +- **`task_type` (Enum("rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`jinaai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("jinaai"))**: The type of service supported for the specified task type. In this case, `jinaai`. +- **`service_settings` ({ api_key, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `jinaai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + ## client.inference.putMistral [_inference.put_mistral] -Configure a Mistral inference endpoint +Create a Mistral inference endpoint. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-mistral.html) +Creates an inference endpoint to perform an inference task with the `mistral` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral) ```ts -client.inference.putMistral() +client.inference.putMistral({ task_type, mistral_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_mistral] + +#### Request (object) [_request_inference.put_mistral] +- **`task_type` (Enum("text_embedding"))**: The task type. +The only valid task type for the model to perform is `text_embedding`. +- **`mistral_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. +- **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. ## client.inference.putOpenai [_inference.put_openai] Create an OpenAI inference endpoint. @@ -7633,7 +8029,7 @@ To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-openai.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai) ```ts client.inference.putOpenai({ task_type, openai_inference_id, service, service_settings }) @@ -7645,7 +8041,7 @@ client.inference.putOpenai({ task_type, openai_inference_id, service, service_se - **`task_type` (Enum("chat_completion" | "completion" | "text_embedding"))**: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. - **`openai_inference_id` (string)**: The unique identifier of the inference endpoint. -- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `openai`. +- **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. - **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. @@ -7669,7 +8065,7 @@ client.inference.putVoyageai({ task_type, voyageai_inference_id, service, servic #### Request (object) [_request_inference.put_voyageai] - **`task_type` (Enum("text_embedding" | "rerank"))**: The type of the inference task that the model will perform. - **`voyageai_inference_id` (string)**: The unique identifier of the inference endpoint. -- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `voyageai`. +- **`service` (Enum("voyageai"))**: The type of service supported for the specified task type. In this case, `voyageai`. - **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. @@ -7700,7 +8096,7 @@ client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_ - **`task_type` (Enum("text_embedding"))**: The task type. The only valid task type for the model to perform is `text_embedding`. - **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. -- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `watsonxai`. +- **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. - **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. ## client.inference.rerank [_inference.rerank] @@ -9649,7 +10045,7 @@ specified. - **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. - **`description` (Optional, string)**: A human-readable description of the inference trained model. -- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, learning_to_rank, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. @@ -15068,7 +15464,10 @@ To indicate that the request should never timeout, set it to `-1`. Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. -This includes `index.auto_expand_replicas` and `index.number_of_replicas`. +This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, +`index.routing.allocation.include.*` and `index.routing.allocation.require.*`. +Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the +Watcher shards must always be in the `data_content` tier. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings) diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 09429c394..40cb657cf 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -39,7 +39,9 @@ export default class Inference { path: [ 'inference_id' ], - body: [], + body: [ + 'chat_completion_request' + ], query: [ 'timeout' ] @@ -79,7 +81,9 @@ export default class Inference { path: [ 'eis_inference_id' ], - body: [], + body: [ + 'chat_completion_request' + ], query: [] }, 'inference.put': { @@ -92,6 +96,84 @@ export default class Inference { ], query: [] }, + 'inference.put_alibabacloud': { + path: [ + 'task_type', + 'alibabacloud_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_amazonbedrock': { + path: [ + 'task_type', + 'amazonbedrock_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_anthropic': { + path: [ + 'task_type', + 'anthropic_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_azureaistudio': { + path: [ + 'task_type', + 'azureaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_azureopenai': { + path: [ + 'task_type', + 'azureopenai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_cohere': { + path: [ + 'task_type', + 'cohere_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, 'inference.put_eis': { path: [ 'task_type', @@ -103,12 +185,91 @@ export default class Inference { ], query: [] }, + 'inference.put_elasticsearch': { + path: [ + 'task_type', + 'elasticsearch_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_elser': { + path: [ + 'task_type', + 'elser_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_googleaistudio': { + path: [ + 'task_type', + 'googleaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_googlevertexai': { + path: [ + 'task_type', + 'googlevertexai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_hugging_face': { + path: [ + 'task_type', + 'huggingface_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_jinaai': { + path: [ + 'task_type', + 'jinaai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, 'inference.put_mistral': { path: [ 'task_type', 'mistral_inference_id' ], - body: [], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], query: [] }, 'inference.put_openai': { @@ -217,28 +378,30 @@ export default class Inference { async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this.acceptedParams['inference.chat_completion_unified'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} - let body: Record | string | undefined - const userBody = params?.body - if (userBody != null) { - if (typeof userBody === 'string') { - body = userBody - } else { - body = { ...userBody } - } - } - + let body: any = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -429,28 +592,30 @@ export default class Inference { async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this.acceptedParams['inference.post_eis_chat_completion'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} - let body: Record | string | undefined - const userBody = params?.body - if (userBody != null) { - if (typeof userBody === 'string') { - body = userBody - } else { - body = { ...userBody } - } - } - + let body: any = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -521,18 +686,18 @@ export default class Inference { } /** - * Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS). - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-eis.html | Elasticsearch API documentation} + * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} */ - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise { + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_eis'] + } = this.acceptedParams['inference.put_alibabacloud'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -567,28 +732,30 @@ export default class Inference { } const method = 'PUT' - const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}` + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.alibabacloud_inference_id.toString())}` const meta: TransportRequestMetadata = { - name: 'inference.put_eis', + name: 'inference.put_alibabacloud', pathParts: { task_type: params.task_type, - eis_inference_id: params.eis_inference_id + alibabacloud_inference_id: params.alibabacloud_inference_id } } return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Configure a Mistral inference endpoint - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-mistral.html | Elasticsearch API documentation} + * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} */ - async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async putMistral (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath - } = this.acceptedParams['inference.put_mistral'] + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_amazonbedrock'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -603,12 +770,718 @@ export default class Inference { } } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.amazonbedrock_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_amazonbedrock', + pathParts: { + task_type: params.task_type, + amazonbedrock_inference_id: params.amazonbedrock_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} + */ + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_anthropic'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.anthropic_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_anthropic', + pathParts: { + task_type: params.task_type, + anthropic_inference_id: params.anthropic_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation} + */ + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_azureaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.azureaistudio_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_azureaistudio', + pathParts: { + task_type: params.task_type, + azureaistudio_inference_id: params.azureaistudio_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai | Elasticsearch API documentation} + */ + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_azureopenai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.azureopenai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_azureopenai', + pathParts: { + task_type: params.task_type, + azureopenai_inference_id: params.azureopenai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere | Elasticsearch API documentation} + */ + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_cohere'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.cohere_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_cohere', + pathParts: { + task_type: params.task_type, + cohere_inference_id: params.cohere_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis | Elasticsearch API documentation} + */ + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise + async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_eis'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_eis', + pathParts: { + task_type: params.task_type, + eis_inference_id: params.eis_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation} + */ + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_elasticsearch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.elasticsearch_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_elasticsearch', + pathParts: { + task_type: params.task_type, + elasticsearch_inference_id: params.elasticsearch_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an ELSER inference endpoint. Create an inference endpoint to perform an inference task with the `elser` service. You can also deploy ELSER by using the Elasticsearch inference integration. > info > Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. The API request will automatically download and deploy the ELSER model if it isn't already downloaded. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser | Elasticsearch API documentation} + */ + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_elser'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.elser_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_elser', + pathParts: { + task_type: params.task_type, + elser_inference_id: params.elser_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation} + */ + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_googleaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.googleaistudio_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_googleaistudio', + pathParts: { + task_type: params.task_type, + googleaistudio_inference_id: params.googleaistudio_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation} + */ + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_googlevertexai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.googlevertexai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_googlevertexai', + pathParts: { + task_type: params.task_type, + googlevertexai_inference_id: params.googlevertexai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} + */ + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_hugging_face'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.huggingface_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_hugging_face', + pathParts: { + task_type: params.task_type, + huggingface_inference_id: params.huggingface_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai | Elasticsearch API documentation} + */ + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_jinaai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.jinaai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_jinaai', + pathParts: { + task_type: params.task_type, + jinaai_inference_id: params.jinaai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} + */ + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_mistral'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -626,7 +1499,7 @@ export default class Inference { /** * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-openai.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai | Elasticsearch API documentation} */ async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 609ab0c92..a93f89d65 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -42,7 +42,8 @@ const acceptedParams: Record diff --git a/src/api/types.ts b/src/api/types.ts index fd6f4ac2c..232b6bd1d 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -26,8 +26,11 @@ export interface BulkIndexOperation extends BulkWriteOperation { } export interface BulkOperationBase { + /** The document ID. */ _id?: Id + /** The name of the index or index alias to perform the action on. */ _index?: IndexName + /** A custom value used to route operations to a specific shard. */ routing?: Routing if_primary_term?: long if_seq_no?: SequenceNumber @@ -36,36 +39,58 @@ export interface BulkOperationBase { } export interface BulkOperationContainer { + /** Index the specified document. + * If the document exists, it replaces the document and increments the version. + * The following line must contain the source data to be indexed. */ index?: BulkIndexOperation + /** Index the specified document if it does not already exist. + * The following line must contain the source data to be indexed. */ create?: BulkCreateOperation + /** Perform a partial document update. + * The following line must contain the partial document and update options. */ update?: BulkUpdateOperation + /** Remove the specified document from the index. */ delete?: BulkDeleteOperation } export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { -/** The name of the data stream, index, or index alias to perform bulk actions on. */ + /** The name of the data stream, index, or index alias to perform bulk actions on. */ index?: IndexName /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean /** If `true`, the response will include the ingest pipelines that were run for each index or create. */ list_executed_pipelines?: boolean - /** The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ + /** The pipeline identifier to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, wait for a refresh to make this operation visible to search. + * If `false`, do nothing with refreshes. + * Valid values: `true`, `false`, `wait_for`. */ refresh?: Refresh /** A custom value that is used to route operations to a specific shard. */ routing?: Routing /** Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. + * The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default is `1`, which waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean @@ -79,50 +104,90 @@ export interface BulkRequest ex } export interface BulkResponse { + /** If `true`, one or more of the operations in the bulk request did not complete successfully. */ errors: boolean + /** The result of each operation in the bulk request, in the order they were submitted. */ items: Partial>[] + /** The length of time, in milliseconds, it took to process the bulk request. */ took: long ingest_took?: long } export interface BulkResponseItem { + /** The document ID associated with the operation. */ _id?: string | null + /** The name of the index associated with the operation. + * If the operation targeted a data stream, this is the backing index into which the document was written. */ _index: string + /** The HTTP status code returned for the operation. */ status: integer failure_store?: BulkFailureStoreStatus + /** Additional information about the failed operation. + * The property is returned only for failed operations. */ error?: ErrorCause + /** The primary term assigned to the document for the operation. + * This property is returned only for successful operations. */ _primary_term?: long + /** The result of the operation. + * Successful values are `created`, `deleted`, and `updated`. */ result?: string + /** The sequence number assigned to the document for the operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** Shard information for the operation. */ _shards?: ShardStatistics + /** The document version associated with the operation. + * The document version is incremented each time the document is updated. + * This property is returned only for successful actions. */ _version?: VersionNumber forced_refresh?: boolean get?: InlineGet> } export interface BulkUpdateAction { + /** If true, the `result` in the response is set to 'noop' when no changes to the document occur. */ detect_noop?: boolean + /** A partial update to an existing document. */ doc?: TPartialDocument + /** Set to `true` to use the contents of `doc` as the value of `upsert`. */ doc_as_upsert?: boolean - script?: Script | string + /** The script to run to update the document. */ + script?: Script | ScriptSource + /** Set to `true` to run the script whether or not the document exists. */ scripted_upsert?: boolean + /** If `false`, source retrieval is turned off. + * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig + /** If the document does not already exist, the contents of `upsert` are inserted as a new document. + * If the document exists, the `script` is run. */ upsert?: TDocument } export interface BulkUpdateOperation extends BulkOperationBase { + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean + /** The number of times an update should be retried in the case of a version conflict. */ retry_on_conflict?: integer } export interface BulkWriteOperation extends BulkOperationBase { + /** A map from the full name of fields to the name of dynamic templates. + * It defaults to an empty map. + * If a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template. + * If a field is already defined in the mapping, then this parameter won't be used. */ dynamic_templates?: Record + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean } export interface ClearScrollRequest extends RequestBase { -/** A comma-separated list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ + /** A comma-separated list of scroll IDs to clear. + * To clear all scroll IDs, use `_all`. + * IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ scroll_id?: ScrollIds /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { scroll_id?: never } @@ -131,12 +196,15 @@ export interface ClearScrollRequest extends RequestBase { } export interface ClearScrollResponse { + /** If `true`, the request succeeded. + * This does not indicate whether any scrolling search requests were cleared. */ succeeded: boolean + /** The number of scrolling search requests cleared. */ num_freed: integer } export interface ClosePointInTimeRequest extends RequestBase { -/** The ID of the point-in-time. */ + /** The ID of the point-in-time. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -145,42 +213,64 @@ export interface ClosePointInTimeRequest extends RequestBase { } export interface ClosePointInTimeResponse { + /** If `true`, all search contexts associated with the point-in-time ID were successfully closed. */ succeeded: boolean + /** The number of search contexts that were successfully closed. */ num_freed: integer } export interface CountRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean /** The minimum `_score` value that documents must have to be included in the result. */ min_score?: double - /** The node or shard the operation should be performed on. By default, it is random. */ + /** The node or shard the operation should be performed on. + * By default, it is random. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long /** The query in Lucene query string syntax. This parameter cannot be used with a request body. */ q?: string - /** Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. */ + /** Defines the search query using Query DSL. A request body query cannot be used + * with the `q` query string parameter. */ query?: QueryDslQueryContainer /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } @@ -194,9 +284,12 @@ export interface CountResponse { } export interface CreateRequest extends RequestBase { -/** A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. */ + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format. */ id: Id - /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName /** Only perform the operation if the document has this primary term. */ if_primary_term?: long @@ -204,11 +297,20 @@ export interface CreateRequest extends RequestBase { if_seq_no?: SequenceNumber /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean - /** Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. */ + /** Set to `create` to only index the document if it does not already exist (put if absent). + * If a document with the specified `_id` already exists, the indexing operation will fail. + * The behavior is the same as using the `/_create` endpoint. + * If a document ID is specified, this paramater defaults to `index`. + * Otherwise, it defaults to `create`. + * If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType - /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh /** If `true`, the destination must be an index alias. */ require_alias?: boolean @@ -216,13 +318,23 @@ export interface CreateRequest extends RequestBase { require_data_stream?: boolean /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * Elasticsearch waits for at least the specified timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The explicit version number for concurrency control. It must be a non-negative long number. */ + /** The explicit version number for concurrency control. + * It must be a non-negative long number. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards document?: TDocument /** All values in `body` will be added to the request body. */ @@ -234,7 +346,7 @@ export interface CreateRequest extends RequestBase { export type CreateResponse = WriteResponseBase export interface DeleteRequest extends RequestBase { -/** A unique identifier for the document. */ + /** A unique identifier for the document. */ id: Id /** The name of the target index. */ index: IndexName @@ -242,17 +354,26 @@ export interface DeleteRequest extends RequestBase { if_primary_term?: long /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ + /** The period to wait for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. + * By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ timeout?: Duration - /** An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** An explicit version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ + /** The minimum number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } @@ -263,33 +384,48 @@ export interface DeleteRequest extends RequestBase { export type DeleteResponse = WriteResponseBase export interface DeleteByQueryRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** Analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean /** What to do if delete by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** Starting offset (default: 0) */ + /** Skips the specified number of documents. */ from?: long /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string - /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. */ + /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. + * This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. + * Unlike the delete API, it does not support `wait_for`. */ refresh?: boolean - /** If `true`, the request cache is used for this request. Defaults to the index-level setting. */ + /** If `true`, the request cache is used for this request. + * Defaults to the index-level setting. */ request_cache?: boolean /** The throttle for this request in sub-requests per second. */ requests_per_second?: float @@ -301,9 +437,11 @@ export interface DeleteByQueryRequest extends RequestBase { scroll?: Duration /** The size of the scroll request that powers the operation. */ scroll_size?: long - /** The explicit timeout for each search request. It defaults to no timeout. */ + /** The explicit timeout for each search request. + * It defaults to no timeout. */ search_timeout?: Duration - /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ + /** The type of the search operation. + * Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType /** The number of slices this task should be divided into. */ slices?: Slices @@ -311,15 +449,25 @@ export interface DeleteByQueryRequest extends RequestBase { sort?: string[] /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long /** The period each deletion request waits for active shards. */ timeout?: Duration /** If `true`, returns the document version as part of a hit. */ version?: boolean - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` value controls how long each write request waits for unavailable shards to become available. */ wait_for_active_shards?: WaitForActiveShards - /** If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ wait_for_completion?: boolean /** The maximum number of documents to delete. */ max_docs?: long @@ -334,28 +482,48 @@ export interface DeleteByQueryRequest extends RequestBase { } export interface DeleteByQueryResponse { + /** The number of scroll responses pulled back by the delete by query. */ batches?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** An array of failures if there were any unrecoverable errors during the process. + * If this array is not empty, the request ended abnormally because of those failures. + * Delete by query is implemented using batches and any failures cause the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[] + /** This field is always equal to zero for delete by query. + * It exists only so that delete by query, update by query, and reindex APIs return responses with the same structure. */ noops?: long + /** The number of requests per second effectively run during the delete by query. */ requests_per_second?: float + /** The number of retries attempted by delete by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ retries?: Retries slice_id?: integer task?: TaskId throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in a `_delete_by_query` response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue + /** If `true`, some requests run during the delete by query operation timed out. */ timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of version conflicts that the delete by query hit. */ version_conflicts?: long } export interface DeleteByQueryRethrottleRequest extends RequestBase { -/** The ID for the task. */ + /** The ID for the task. */ task_id: TaskId - /** The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. */ + /** The throttle for this request in sub-requests per second. + * To disable throttling, set it to `-1`. */ requests_per_second?: float /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } @@ -366,11 +534,15 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { -/** The identifier for the stored script or search template. */ + /** The identifier for the stored script or search template. */ id: Id - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -381,27 +553,43 @@ export interface DeleteScriptRequest extends RequestBase { export type DeleteScriptResponse = AcknowledgedResponseBase export interface ExistsRequest extends RequestBase { -/** A unique document identifier. */ + /** A unique document identifier. */ id: Id - /** A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ index: IndexName - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. */ + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. */ stored_fields?: Fields - /** Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. */ + /** Explicit version number for concurrency control. + * The specified version must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -414,15 +602,18 @@ export interface ExistsRequest extends RequestBase { export type ExistsResponse = boolean export interface ExistsSourceRequest extends RequestBase { -/** A unique identifier for the document. */ + /** A unique identifier for the document. */ id: Id - /** A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ index: IndexName - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing @@ -432,7 +623,8 @@ export interface ExistsSourceRequest extends RequestBase { _source_excludes?: Fields /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields - /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -457,29 +649,41 @@ export interface ExplainExplanationDetail { } export interface ExplainRequest extends RequestBase { -/** The document identifier. */ + /** The document identifier. */ id: Id - /** Index names that are used to limit the request. Only a single index name can be provided to this parameter. */ + /** Index names that are used to limit the request. + * Only a single index name can be provided to this parameter. */ index: IndexName - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing /** `True` or `false` to return the `_source` field or not or a list of fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields /** A comma-separated list of stored fields to return in the response. */ stored_fields?: Fields @@ -502,24 +706,44 @@ export interface ExplainResponse { } export interface FieldCapsFieldCapability { + /** Whether this field can be aggregated on all indices. */ aggregatable: boolean + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices?: Indices + /** Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key. */ meta?: Metadata + /** The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field. */ non_aggregatable_indices?: Indices + /** The list of indices where this field is not searchable, or null if all indices have the same definition for the field. */ non_searchable_indices?: Indices + /** Whether this field is indexed for search on all indices. */ searchable: boolean type: string + /** Whether this field is registered as a metadata field. */ metadata_field?: boolean + /** Whether this field is used as a time series dimension. + * @experimental */ time_series_dimension?: boolean + /** Contains metric type if this fields is used as a time series + * metrics, absent if the field is not used as metric. + * @experimental */ time_series_metric?: MappingTimeSeriesMetricType + /** If this list is present in response then some indices have the + * field marked as a dimension and other indices, the ones in this list, do not. + * @experimental */ non_dimension_indices?: IndexName[] + /** The list of indices where this field is present if these indices + * don’t have the same `time_series_metric` value for this field. + * @experimental */ metric_conflicts_indices?: IndexName[] } export interface FieldCapsRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ index?: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ + /** If false, the request returns an error if any wildcard expression, index alias, + * or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards @@ -529,15 +753,22 @@ export interface FieldCapsRequest extends RequestBase { include_unmapped?: boolean /** A comma-separated list of filters to apply to the response. */ filters?: string - /** A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. */ + /** A comma-separated list of field types to include. + * Any fields that do not match one of these types will be excluded from the results. + * It defaults to empty, meaning that all field types are returned. */ types?: string[] /** If false, empty fields are not included in the response. */ include_empty_fields?: boolean /** A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ fields?: Fields - /** Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ + /** Filter indices if the provided query rewrites to `match_none` on every shard. + * + * IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. + * For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. + * However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ index_filter?: QueryDslQueryContainer - /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ + /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. + * These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ runtime_mappings?: MappingRuntimeFields /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } @@ -546,47 +777,78 @@ export interface FieldCapsRequest extends RequestBase { } export interface FieldCapsResponse { + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices: Indices fields: Record> } export interface GetGetResult { + /** The name of the index the document belongs to. */ _index: IndexName + /** If the `stored_fields` parameter is set to `true` and `found` is `true`, it contains the document fields stored in the index. */ fields?: Record _ignored?: string[] + /** Indicates whether the document exists. */ found: boolean + /** The unique identifier for the document. */ _id: Id + /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long + /** The explicit routing, if set. */ _routing?: string + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** If `found` is `true`, it contains the document data formatted in JSON. + * If the `_source` parameter is set to `false` or the `stored_fields` parameter is set to `true`, it is excluded. */ _source?: TDocument + /** The document version, which is ncremented each time the document is updated. */ _version?: VersionNumber } export interface GetRequest extends RequestBase { -/** A unique document identifier. */ + /** A unique document identifier. */ id: Id /** The name of the index that contains the document. */ index: IndexName - /** Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ + /** Indicates whether the request forces synthetic `_source`. + * Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. + * Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ force_synthetic_source?: boolean - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. */ + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. + * Only leaf fields can be retrieved with the `stored_field` option. + * Object fields can't be returned;if specified, the request fails. */ stored_fields?: Fields - /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -599,9 +861,11 @@ export interface GetRequest extends RequestBase { export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { -/** The identifier for the stored script or search template. */ + /** The identifier for the stored script or search template. */ id: Id - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } @@ -660,15 +924,17 @@ export interface GetScriptLanguagesResponse { } export interface GetSourceRequest extends RequestBase { -/** A unique document identifier. */ + /** A unique document identifier. */ id: Id /** The name of the index that contains the document. */ index: IndexName - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing @@ -680,7 +946,8 @@ export interface GetSourceRequest extends RequestBase { _source_includes?: Fields /** A comma-separated list of stored fields to return as part of a hit. */ stored_fields?: Fields - /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -817,7 +1084,7 @@ export interface HealthReportRepositoryIntegrityIndicatorDetails { } export interface HealthReportRequest extends RequestBase { -/** A feature of the cluster, as returned by the top-level health report API. */ + /** A feature of the cluster, as returned by the top-level health report API. */ feature?: string | string[] /** Explicit operation timeout. */ timeout?: Duration @@ -890,9 +1157,13 @@ export interface HealthReportStagnatingBackingIndices { } export interface IndexRequest extends RequestBase { -/** A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ id?: Id - /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. */ + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn't match a data stream template, this request creates the index. + * You can check for existing targets with the resolve index API. */ index: IndexName /** Only perform the operation if the document has this primary term. */ if_primary_term?: long @@ -900,21 +1171,38 @@ export interface IndexRequest extends RequestBase { if_seq_no?: SequenceNumber /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean - /** Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. */ + /** Set to `create` to only index the document if it does not already exist (put if absent). + * If a document with the specified `_id` already exists, the indexing operation will fail. + * The behavior is the same as using the `/_create` endpoint. + * If a document ID is specified, this paramater defaults to `index`. + * Otherwise, it defaults to `create`. + * If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType - /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** An explicit version number for concurrency control. It must be a non-negative long number. */ + /** An explicit version number for concurrency control. + * It must be a non-negative long number. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** If `true`, the destination must be an index alias. */ require_alias?: boolean @@ -935,27 +1223,41 @@ export interface InfoRequest extends RequestBase { } export interface InfoResponse { + /** The responding cluster's name. */ cluster_name: Name cluster_uuid: Uuid + /** The responding node's name. */ name: Name tagline: string + /** The running version of Elasticsearch. */ version: ElasticsearchVersionInfo } export interface KnnSearchRequest extends RequestBase { -/** A comma-separated list of index names to search; use `_all` or to perform the operation on all indices. */ + /** A comma-separated list of index names to search; + * use `_all` or to perform the operation on all indices. */ index: Indices /** A comma-separated list of specific routing values. */ routing?: Routing - /** Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. */ + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the `hits._source` property of the search response. */ _source?: SearchSourceConfig - /** The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. */ + /** The request returns doc values for field names matching these patterns + * in the `hits.fields` property of the response. + * It accepts wildcard (`*`) patterns. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - /** A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. */ + /** A list of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the `_source` + * parameter defaults to `false`. You can pass `_source: true` to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields - /** The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. */ + /** The request returns values for field names matching these patterns + * in the `hits.fields` property of the response. + * It accepts wildcard (`*`) patterns. */ fields?: Fields - /** A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. */ + /** A query to filter the documents that can match. The kNN search will return the top + * `k` documents that also match this filter. The value can be a single query or a + * list of queries. If `filter` isn't provided, all documents are allowed to match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] /** The kNN query to run. */ knn: KnnSearchQuery @@ -966,18 +1268,31 @@ export interface KnnSearchRequest extends RequestBase { } export interface KnnSearchResponse { + /** The milliseconds it took Elasticsearch to run the request. */ took: long + /** If true, the request timed out before completion; + * returned results may be partial or empty. */ timed_out: boolean + /** A count of shards used for the request. */ _shards: ShardStatistics + /** The returned documents and metadata. */ hits: SearchHitsMetadata + /** The field values for the documents. These fields + * must be specified in the request using the `fields` parameter. */ fields?: Record + /** The highest returned document score. This value is null for requests + * that do not sort by score. */ max_score?: double } export interface KnnSearchQuery { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector: QueryVector + /** The final number of nearest neighbors to return as top hits */ k: integer + /** The number of nearest neighbor candidates to consider per shard */ num_candidates: integer } @@ -988,19 +1303,26 @@ export interface MgetMultiGetError { } export interface MgetOperation { + /** The unique document ID. */ _id: Id + /** The index that contains the document. */ _index?: IndexName + /** The key for the primary shard the document resides on. Required if routing is used during indexing. */ routing?: Routing + /** If `false`, excludes all _source fields. */ _source?: SearchSourceConfig + /** The stored fields you want to retrieve. */ stored_fields?: Fields version?: VersionNumber version_type?: VersionType } export interface MgetRequest extends RequestBase { -/** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ + /** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ index?: IndexName - /** Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string @@ -1012,9 +1334,12 @@ export interface MgetRequest extends RequestBase { routing?: Routing /** True or false to return the `_source` field or not, or a list of fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields /** If `true`, retrieves the document fields stored in the index rather than the document `_source`. */ stored_fields?: Fields @@ -1029,6 +1354,9 @@ export interface MgetRequest extends RequestBase { } export interface MgetResponse { + /** The response includes a docs array that contains the documents in the order specified in the request. + * The structure of the returned documents is similar to that returned by the get API. + * If there is a failure getting a particular document, the error is included in place of the document. */ docs: MgetResponseItem[] } @@ -1043,41 +1371,6 @@ export interface MsearchMultiSearchResult[] } -export interface MsearchMultisearchBody { - aggregations?: Record - aggs?: Record - collapse?: SearchFieldCollapse - query?: QueryDslQueryContainer - explain?: boolean - ext?: Record - stored_fields?: Fields - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - from?: integer - highlight?: SearchHighlight - indices_boost?: Record[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - terminate_after?: long - stats?: string[] - timeout?: string - track_scores?: boolean - track_total_hits?: SearchTrackHits - version?: boolean - runtime_mappings?: MappingRuntimeFields - seq_no_primary_term?: boolean - pit?: SearchPointInTimeReference - suggest?: SearchSuggester -} - export interface MsearchMultisearchHeader { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards @@ -1093,7 +1386,7 @@ export interface MsearchMultisearchHeader { } export interface MsearchRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases to search. */ + /** Comma-separated list of data streams, indices, and index aliases to search. */ index?: Indices /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean @@ -1105,12 +1398,18 @@ export interface MsearchRequest extends RequestBase { ignore_throttled?: boolean /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean - /** Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. */ + /** Indicates whether hit.matched_queries should be rendered as a map that includes + * the name of the matched query associated with its score (true) + * or as an array containing the name of the matched queries (false) + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean - /** Maximum number of concurrent searches the multi search API can execute. */ - max_concurrent_searches?: long + /** Maximum number of concurrent searches the multi search API can execute. + * Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. */ + max_concurrent_searches?: integer /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ @@ -1128,14 +1427,16 @@ export interface MsearchRequest extends RequestBase { querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } } -export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody +export type MsearchRequestItem = MsearchMultisearchHeader | SearchSearchRequestBody export type MsearchResponse> = MsearchMultiSearchResult export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase export interface MsearchTemplateRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*`. */ index?: Indices /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean @@ -1143,7 +1444,8 @@ export interface MsearchTemplateRequest extends RequestBase { max_concurrent_searches?: long /** The type of the search operation. */ search_type?: SearchType - /** If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. */ + /** If `true`, the response returns `hits.total` as an integer. + * If `false`, it returns `hits.total` as an object. */ rest_total_hits_as_int?: boolean /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean @@ -1159,33 +1461,58 @@ export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTempl export type MsearchTemplateResponse> = MsearchMultiSearchResult export interface MsearchTemplateTemplateConfig { + /** If `true`, returns detailed information about score calculation as part of each hit. */ explain?: boolean + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ id?: Id + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record + /** If `true`, the query execution is profiled. */ profile?: boolean - source?: string + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ + source?: ScriptSource } export interface MtermvectorsOperation { + /** The ID of the document. */ _id?: Id + /** The index of the document. */ _index?: IndexName + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: any + /** Comma-separated list or wildcard expressions of fields to include in the statistics. + * Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean + /** Filter terms based on their tf-idf scores. */ filter?: TermvectorsFilter + /** If `true`, the response includes term offsets. */ offsets?: boolean + /** If `true`, the response includes term payloads. */ payloads?: boolean + /** If `true`, the response includes term positions. */ positions?: boolean + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber + /** Specific version type. */ version_type?: VersionType } export interface MtermvectorsRequest extends RequestBase { -/** The name of the index that contains the documents. */ + /** The name of the index that contains the documents. */ index?: IndexName - /** A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + /** A comma-separated list or wildcard expressions of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean @@ -1195,7 +1522,8 @@ export interface MtermvectorsRequest extends RequestBase { payloads?: boolean /** If `true`, the response includes term positions. */ positions?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean @@ -1232,29 +1560,37 @@ export interface MtermvectorsTermVectorsResult { } export interface OpenPointInTimeRequest extends RequestBase { -/** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ + /** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ index: Indices /** Extend the length of time that the point in time persists. */ keep_alive: Duration /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** The node or shard the operation should be performed on. By default, it is random. */ + /** The node or shard the operation should be performed on. + * By default, it is random. */ preference?: string /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards - /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. */ + /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. + * If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. + * If `true`, the point in time will contain all the shards that are available at the time of the request. */ allow_partial_search_results?: boolean + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ + max_concurrent_shard_requests?: integer /** Filter indices if the provided query rewrites to `match_none` on every shard. */ index_filter?: QueryDslQueryContainer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, index_filter?: never } + body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, index_filter?: never } + querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } } export interface OpenPointInTimeResponse { + /** Shards used to create the PIT */ _shards: ShardStatistics id: Id } @@ -1269,13 +1605,19 @@ export interface PingRequest extends RequestBase { export type PingResponse = boolean export interface PutScriptRequest extends RequestBase { -/** The identifier for the stored script or search template. It must be unique within the cluster. */ + /** The identifier for the stored script or search template. + * It must be unique within the cluster. */ id: Id - /** The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. */ + /** The context in which the script or search template should run. + * To prevent errors, the API immediately compiles the script or template in this context. */ context?: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** The script or search template, its parameters, and its language. */ script: StoredScript @@ -1288,8 +1630,11 @@ export interface PutScriptRequest extends RequestBase { export type PutScriptResponse = AcknowledgedResponseBase export interface RankEvalDocumentRating { + /** The document ID. */ _id: Id + /** The document’s index. For data streams, this should be the document’s backing index. */ _index: IndexName + /** The document’s relevance with regard to this search request. */ rating: integer } @@ -1313,21 +1658,28 @@ export interface RankEvalRankEvalMetric { } export interface RankEvalRankEvalMetricBase { + /** Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query. */ k?: integer } export interface RankEvalRankEvalMetricDetail { + /** The metric_score in the details section shows the contribution of this query to the global quality metric score */ metric_score: double + /** The unrated_docs section contains an _index and _id entry for each document in the search result for this query that didn’t have a ratings value. This can be used to ask the user to supply ratings for these documents */ unrated_docs: RankEvalUnratedDocument[] + /** The hits section shows a grouping of the search results with their supplied ratings */ hits: RankEvalRankEvalHitItem[] + /** The metric_details give additional information about the calculated quality metric (e.g. how many of the retrieved documents were relevant). The content varies for each metric but allows for better interpretation of the results */ metric_details: Record> } export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { + /** If set to true, this metric will calculate the Normalized DCG. */ normalize?: boolean } export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { + /** The highest relevance grade used in the user-supplied relevance judgments. */ maximum_relevance: integer } @@ -1335,10 +1687,12 @@ export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEv } export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { + /** Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant. */ ignore_unlabeled?: boolean } export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { + /** Sets the rating threshold above which documents are considered to be "relevant". */ relevant_rating_threshold?: integer } @@ -1351,15 +1705,22 @@ export interface RankEvalRankEvalQuery { } export interface RankEvalRankEvalRequestItem { + /** The search request’s ID, used to group result details later. */ id: Id + /** The query being evaluated. */ request?: RankEvalRankEvalQuery | QueryDslQueryContainer + /** List of document ratings */ ratings: RankEvalDocumentRating[] + /** The search template Id */ template_id?: Id + /** The search template parameters. */ params?: Record } export interface RankEvalRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ + /** A comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard (`*`) expressions are supported. + * To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ index?: Indices /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean @@ -1380,7 +1741,9 @@ export interface RankEvalRequest extends RequestBase { } export interface RankEvalResponse { + /** The overall evaluation quality calculated by the defined metric */ metric_score: double + /** The details section contains one entry for every query in the original requests section, keyed by the search request id */ details: Record failures: Record } @@ -1391,34 +1754,66 @@ export interface RankEvalUnratedDocument { } export interface ReindexDestination { + /** The name of the data stream, index, or index alias you are copying to. */ index: IndexName + /** If it is `create`, the operation will only index documents that do not already exist (also known as "put if absent"). + * + * IMPORTANT: To reindex to a data stream destination, this argument must be `create`. */ op_type?: OpType + /** The name of the pipeline to use. */ pipeline?: string + /** By default, a document's routing is preserved unless it's changed by the script. + * If it is `keep`, the routing on the bulk request sent for each match is set to the routing on the match. + * If it is `discard`, the routing on the bulk request sent for each match is set to `null`. + * If it is `=value`, the routing on the bulk request sent for each match is set to all value specified after the equals sign (`=`). */ routing?: Routing + /** The versioning to use for the indexing operation. */ version_type?: VersionType } export interface ReindexRemoteSource { + /** The remote connection timeout. */ connect_timeout?: Duration + /** An object containing the headers of the request. */ headers?: Record + /** The URL for the remote instance of Elasticsearch that you want to index from. + * This information is required when you're indexing from remote. */ host: Host + /** The username to use for authentication with the remote host. */ username?: Username + /** The password to use for authentication with the remote host. */ password?: Password + /** The remote socket read timeout. */ socket_timeout?: Duration } export interface ReindexRequest extends RequestBase { -/** If `true`, the request refreshes affected shards to make this operation visible to search. */ + /** If `true`, the request refreshes affected shards to make this operation visible to search. */ refresh?: boolean - /** The throttle for this request in sub-requests per second. By default, there is no throttle. */ + /** The throttle for this request in sub-requests per second. + * By default, there is no throttle. */ requests_per_second?: float /** The period of time that a consistent view of the index should be maintained for scrolled search. */ scroll?: Duration - /** The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ + /** The number of slices this task should be divided into. + * It defaults to one slice, which means the task isn't sliced into subtasks. + * + * Reindex supports sliced scroll to parallelize the reindexing process. + * This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + * + * NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + * + * If set to `auto`, Elasticsearch chooses the number of slices to use. + * This setting will use one slice per shard, up to a certain limit. + * If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ slices?: Slices - /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. + * By default, Elasticsearch waits for at least one minute before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value is one, which means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean @@ -1428,10 +1823,14 @@ export interface ReindexRequest extends RequestBase { conflicts?: Conflicts /** The destination you are copying to. */ dest: ReindexDestination - /** The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ + /** The maximum number of documents to reindex. + * By default, all documents are reindexed. + * If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. + * + * If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ max_docs?: long /** The script to run to update the document source or metadata when reindexing. */ - script?: Script | string + script?: Script | ScriptSource size?: long /** The source you are copying from. */ source: ReindexSource @@ -1442,31 +1841,65 @@ export interface ReindexRequest extends RequestBase { } export interface ReindexResponse { + /** The number of scroll responses that were pulled back by the reindex. */ batches?: long + /** The number of documents that were successfully created. */ created?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** If there were any unrecoverable errors during the process, it is an array of those failures. + * If this array is not empty, the request ended because of those failures. + * Reindex is implemented using batches and any failure causes the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent the reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops?: long + /** The number of retries attempted by reindex. */ retries?: Retries + /** The number of requests per second effectively run during the reindex. */ requests_per_second?: float slice_id?: integer task?: TaskId + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: EpochTime + /** This field should always be equal to zero in a reindex response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) that a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: EpochTime + /** If any of the requests that ran during the reindex timed out, it is `true`. */ timed_out?: boolean + /** The total milliseconds the entire operation took. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of documents that were successfully updated. + * That is to say, a document with the same ID already existed before the reindex updated it. */ updated?: long + /** The number of version conflicts that occurred. */ version_conflicts?: long } export interface ReindexSource { + /** The name of the data stream, index, or alias you are copying from. + * It accepts a comma-separated list to reindex from multiple sources. */ index: Indices + /** The documents to reindex, which is defined with Query DSL. */ query?: QueryDslQueryContainer + /** A remote instance of Elasticsearch that you want to index from. */ remote?: ReindexRemoteSource + /** The number of documents to index per batch. + * Use it when you are indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB. */ size?: integer + /** Slice the reindex request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** A comma-separated list of `:` pairs to sort by before indexing. + * Use it in conjunction with `max_docs` to control what documents are reindexed. + * + * WARNING: Sort in reindex is deprecated. + * Sorting in reindex was never guaranteed to index documents in order and prevents further development of reindex such as resilience and performance improvements. + * If used in combination with `max_docs`, consider using a query filter instead. */ sort?: Sort + /** If `true`, reindex all source fields. + * Set it to a list to reindex select fields. */ _source?: Fields runtime_mappings?: MappingRuntimeFields } @@ -1476,18 +1909,30 @@ export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { } export interface ReindexRethrottleReindexStatus { + /** The number of scroll responses pulled back by the reindex. */ batches: long + /** The number of documents that were successfully created. */ created: long + /** The number of documents that were successfully deleted. */ deleted: long + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops: long + /** The number of requests per second effectively executed during the reindex. */ requests_per_second: float + /** The number of retries attempted by reindex. `bulk` is the number of bulk actions retried and `search` is the number of search actions retried. */ retries: Retries throttled?: Duration + /** Number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in a `_reindex` response. + * It only has meaning when using the Task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. */ throttled_until_millis: DurationValue + /** The number of documents that were successfully processed. */ total: long + /** The number of documents that were successfully updated, for example, a document with same ID already existed prior to reindex updating it. */ updated: long + /** The number of version conflicts that reindex hits. */ version_conflicts: long } @@ -1505,9 +1950,10 @@ export interface ReindexRethrottleReindexTask { } export interface ReindexRethrottleRequest extends RequestBase { -/** The task identifier, which can be found by using the tasks API. */ + /** The task identifier, which can be found by using the tasks API. */ task_id: Id - /** The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ + /** The throttle for this request in sub-requests per second. + * It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ requests_per_second?: float /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } @@ -1520,13 +1966,19 @@ export interface ReindexRethrottleResponse { } export interface RenderSearchTemplateRequest extends RequestBase { -/** The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. */ + /** The ID of the search template to render. + * If no `source` is specified, this or the `id` request body parameter is required. */ id?: Id file?: string - /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record - /** An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. */ - source?: string + /** An inline search template. + * It supports the same parameters as the search API's request body. + * These parameters also support Mustache variables. + * If no `id` or `` is specified, this parameter is required. */ + source?: ScriptSource /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -1540,18 +1992,29 @@ export interface RenderSearchTemplateResponse { export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' export interface ScriptsPainlessExecutePainlessContextSetup { + /** Document that's temporarily indexed in-memory and accessible from the script. */ document: any + /** Index containing a mapping that's compatible with the indexed document. + * You may specify a remote index by prefixing the index with the remote cluster alias. + * For example, `remote1:my_index` indicates that you want to run the painless script against the "my_index" index on the "remote1" cluster. + * This request will be forwarded to the "remote1" cluster if you have configured a connection to that remote cluster. + * + * NOTE: Wildcards are not accepted in the index expression for this endpoint. + * The expression `*:myindex` will return the error "No such remote cluster" and the expression `logs*` or `remote1:logs*` will return the error "index not found". */ index: IndexName + /** Use this parameter to specify a query for computing a score. */ query?: QueryDslQueryContainer } export interface ScriptsPainlessExecuteRequest extends RequestBase { -/** The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. */ + /** The context that the script should run in. + * NOTE: Result ordering in the field contexts is not guaranteed. */ context?: ScriptsPainlessExecutePainlessContext - /** Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ + /** Additional parameters for the `context`. + * NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ context_setup?: ScriptsPainlessExecutePainlessContextSetup /** The Painless script to run. */ - script?: Script | string + script?: Script | ScriptSource /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { context?: never, context_setup?: never, script?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -1563,7 +2026,7 @@ export interface ScriptsPainlessExecuteResponse { } export interface ScrollRequest extends RequestBase { -/** The scroll ID */ + /** The scroll ID */ scroll_id?: ScrollId /** If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. */ rest_total_hits_as_int?: boolean @@ -1578,72 +2041,124 @@ export interface ScrollRequest extends RequestBase { export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ + /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. + * If `false`, it returns an error with no partial results. + * + * To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ allow_partial_search_results?: boolean - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ + /** The number of shard results that should be reduced at once on the coordinating node. + * If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ batched_reduce_size?: long /** If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. */ ccs_minimize_roundtrips?: boolean - /** The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for the query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices will be ignored when frozen. */ ignore_throttled?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. */ + /** If `true`, the response includes the score contribution from any named queries. + * + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ - max_concurrent_shard_requests?: long - /** The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ + /** The number of concurrent shard requests per node that the search runs concurrently. + * This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ + max_concurrent_shard_requests?: integer + /** The nodes and shards used for the search. + * By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. + * Valid values are: + * + * * `_only_local` to run the search only on shards on the local node. + * * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. + * * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. + * * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. + * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. + * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ preference?: string - /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. */ + /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. + * This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). + * When unspecified, the pre-filter phase is executed if any of these conditions is met: + * + * * The request targets more than 128 shards. + * * The request targets one or more read-only index. + * * The primary sort of the query targets an indexed field. */ pre_filter_shard_size?: long - /** If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. */ + /** If `true`, the caching of search results is enabled for requests where `size` is `0`. + * It defaults to index level settings. */ request_cache?: boolean /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ + /** The period to retain the search context for scrolling. + * By default, this value cannot exceed `1d` (24 hours). + * You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ scroll?: Duration /** Indicates how distributed term frequencies are calculated for relevance scoring. */ search_type?: SearchType /** The field to use for suggestions. */ suggest_field?: Field - /** The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + /** The suggest mode. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_mode?: SuggestMode - /** The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + /** The number of suggestions to return. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_size?: long - /** The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + /** The source text for which the suggestions should be returned. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_text?: string /** If `true`, aggregation and suggester names are be prefixed by their respective types in the response. */ typed_keys?: boolean /** Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. */ rest_total_hits_as_int?: boolean - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. */ + /** A query in the Lucene query string syntax. + * Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. + * + * IMPORTANT: This parameter overrides the query parameter in the request body. + * If both parameters are specified, documents matching the query request body parameter are not returned. */ q?: string - /** Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean /** Defines the aggregations that are run as part of the search request. */ aggregations?: Record - /** @alias aggregations */ - /** Defines the aggregations that are run as part of the search request. */ + /** Defines the aggregations that are run as part of the search request. + * @alias aggregations */ aggs?: Record /** Collapses search results the values of the specified field. */ collapse?: SearchFieldCollapse @@ -1651,51 +2166,82 @@ export interface SearchRequest extends RequestBase { explain?: boolean /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record - /** The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset, which must be non-negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ highlight?: SearchHighlight - /** Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ + /** Number of hits matching the query to count accurately. + * If `true`, the exact number of hits is returned at the cost of some performance. + * If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits - /** Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. */ - indices_boost?: Record[] - /** An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ + /** Boost the `_score` of documents from specified indices. + * The boost value is the factor by which scores are multiplied. + * A boost value greater than `1.0` increases the score. + * A boost value between `0` and `1.0` decreases the score. */ + indices_boost?: Partial>[] + /** An array of wildcard (`*`) field patterns. + * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] /** The approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] - /** The Reciprocal Rank Fusion (RRF) to use. */ + /** The Reciprocal Rank Fusion (RRF) to use. + * @remarks This property is not supported on Elastic Cloud Serverless. */ rank?: RankContainer - /** The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. */ + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results and results collected by aggregations. */ min_score?: double - /** Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. */ + /** Use the `post_filter` parameter to filter search results. + * The search hits are filtered after the aggregations are calculated. + * A post filter has no impact on the aggregation results. */ post_filter?: QueryDslQueryContainer - /** Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. */ + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. + * NOTE: This is a debugging tool and adds significant overhead to search execution. */ profile?: boolean /** The search definition using the Query DSL. */ query?: QueryDslQueryContainer /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ rescore?: SearchRescore | SearchRescore[] - /** A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ + /** A retriever is a specification to describe top documents returned from a search. + * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ retriever?: RetrieverContainer /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults - /** The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. */ + /** The number of hits to return, which must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` property. */ size?: integer /** Split a scrolled search into multiple slices that can be consumed independently. */ slice?: SlicedScroll /** A comma-separated list of : pairs. */ sort?: Sort - /** The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. */ + /** The source fields that are returned for matching documents. + * These fields are returned in the `hits._source` property of the search response. + * If the `stored_fields` property is specified, the `_source` property defaults to `false`. + * Otherwise, it defaults to `true`. */ _source?: SearchSourceConfig - /** An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ + /** An array of wildcard (`*`) field patterns. + * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] /** Defines a suggester that provides similar looking terms based on a provided text. */ suggest?: SearchSuggester - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this property to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. + * + * If set to `0` (default), the query does not terminate early. */ terminate_after?: long - /** The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** The period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean @@ -1703,13 +2249,20 @@ export interface SearchRequest extends RequestBase { version?: boolean /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean - /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. */ + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` property defaults to `false`. + * You can pass `_source: true` to return both source fields and stored fields in the search response. */ stored_fields?: Fields - /** Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. */ + /** Limit the search to a point in time (PIT). + * If you provide a PIT, you cannot specify an `` in the request path. */ pit?: SearchPointInTimeReference - /** One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** One or more runtime fields in the search request. + * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ + /** The stats groups to associate with the search. + * Each group maintains a statistics aggregation for its associated searches. + * You can retrieve these stats using the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } @@ -1720,9 +2273,25 @@ export interface SearchRequest extends RequestBase { export type SearchResponse> = SearchResponseBody export interface SearchResponseBody> { + /** The number of milliseconds it took Elasticsearch to run the request. + * This value is calculated by measuring the time elapsed between receipt of a request on the coordinating node and the time at which the coordinating node is ready to send the response. + * It includes: + * + * * Communication time between the coordinating node and data nodes + * * Time the request spends in the search thread pool, queued for execution + * * Actual run time + * + * It does not include: + * + * * Time needed to send the request to Elasticsearch + * * Time needed to serialize the JSON response + * * Time needed to send the response to a client */ took: long + /** If `true`, the request timed out before completion; returned results may be partial or empty. */ timed_out: boolean + /** A count of shards used for the request. */ _shards: ShardStatistics + /** The returned documents and metadata. */ hits: SearchHitsMetadata aggregations?: TAggregations _clusters?: ClusterStatistics @@ -1731,6 +2300,9 @@ export interface SearchResponseBody[]> terminated_early?: boolean @@ -1812,10 +2384,20 @@ export interface SearchCollector { } export interface SearchCompletionContext { + /** The factor by which the score of the suggestion should be boosted. + * The score is computed by multiplying the boost with the suggestion weight. */ boost?: double + /** The value of the category to filter/boost on. */ context: SearchContext + /** An array of precision values at which neighboring geohashes should be taken into account. + * Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash precision (`1`..`12`). + * Defaults to generating neighbors for index time precision level. */ neighbours?: GeoHashPrecision[] + /** The precision of the geohash to encode the query geo point. + * Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw geohash precision (`1`..`12`). + * Defaults to index time precision level. */ precision?: GeoHashPrecision + /** Whether the category value should be treated as a prefix or not. */ prefix?: boolean } @@ -1837,9 +2419,13 @@ export interface SearchCompletionSuggestOption { } export interface SearchCompletionSuggester extends SearchSuggesterBase { + /** A value, geo point object, or a geo hash string to filter or boost the suggestion on. */ contexts?: Record + /** Enables fuzziness, meaning you can have a typo in your search and still get results back. */ fuzzy?: SearchSuggestFuzziness + /** A regex query that expresses a prefix as a regular expression. */ regex?: SearchRegexOptions + /** Whether duplicate suggestions should be filtered out. */ skip_duplicates?: boolean } @@ -1879,16 +2465,38 @@ export interface SearchDfsStatisticsProfile { } export interface SearchDirectGenerator { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ field: Field + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ max_inspections?: float + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * This can be used to exclude high frequency terms—which are usually spelled correctly—from being spellchecked. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, the number cannot be fractional. */ min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer + /** A filter (analyzer) that is applied to each of the generated tokens before they are passed to the actual phrase scorer. */ post_filter?: string + /** A filter (analyzer) that is applied to each of the tokens passed to this candidate generator. + * This filter is applied to the original token before candidates are generated. */ pre_filter?: string + /** The number of minimal prefix characters that must match in order be a candidate suggestions. + * Increasing this number improves spellcheck performance. */ prefix_length?: integer + /** The maximum corrections to be returned per suggest text token. */ size?: integer + /** Controls what suggestions are included on the suggestions generated on each shard. */ suggest_mode?: SuggestMode } @@ -1918,18 +2526,28 @@ export interface SearchFetchProfileDebug { } export interface SearchFieldCollapse { + /** The field to collapse the result set on */ field: Field + /** The number of inner hits and their sort order */ inner_hits?: SearchInnerHits | SearchInnerHits[] + /** The number of concurrent requests allowed to retrieve the inner_hits per group */ max_concurrent_group_searches?: integer collapse?: SearchFieldCollapse } export interface SearchFieldSuggester { + /** Provides auto-complete/search-as-you-type functionality. */ completion?: SearchCompletionSuggester + /** Provides access to word alternatives on a per token basis within a certain string distance. */ phrase?: SearchPhraseSuggester + /** Suggests terms based on edit distance. */ term?: SearchTermSuggester + /** Prefix used to search for suggestions. */ prefix?: string + /** A prefix expressed as a regular expression. */ regex?: string + /** The text to use as input for the suggester. + * Needs to be set globally or per suggestion. */ text?: string } @@ -1940,25 +2558,61 @@ export interface SearchHighlight extends SearchHighlightBase { export interface SearchHighlightBase { type?: SearchHighlighterType + /** A string that contains each boundary character. */ boundary_chars?: string + /** How far to scan for boundary characters. */ boundary_max_scan?: integer + /** Specifies how to break the highlighted fragments: chars, sentence, or word. + * Only valid for the unified and fvh highlighters. + * Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. */ boundary_scanner?: SearchBoundaryScanner + /** Controls which locale is used to search for sentence and word boundaries. + * This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. */ boundary_scanner_locale?: string force_source?: boolean + /** Specifies how text should be broken up in highlight snippets: `simple` or `span`. + * Only valid for the `plain` highlighter. */ fragmenter?: SearchHighlighterFragmenter + /** The size of the highlighted fragment in characters. */ fragment_size?: integer highlight_filter?: boolean + /** Highlight matches for a query other than the search query. + * This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. */ highlight_query?: QueryDslQueryContainer max_fragment_length?: integer + /** If set to a non-negative value, highlighting stops at this defined maximum limit. + * The rest of the text is not processed, thus not highlighted and no error is returned + * The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. */ max_analyzed_offset?: integer + /** The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. */ no_match_size?: integer + /** The maximum number of fragments to return. + * If the number of fragments is set to `0`, no fragments are returned. + * Instead, the entire field contents are highlighted and returned. + * This can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required. + * If `number_of_fragments` is `0`, `fragment_size` is ignored. */ number_of_fragments?: integer options?: Record + /** Sorts highlighted fragments by score when set to `score`. + * By default, fragments will be output in the order they appear in the field (order: `none`). + * Setting this option to `score` will output the most relevant fragments first. + * Each highlighter applies its own logic to compute relevancy scores. */ order?: SearchHighlighterOrder + /** Controls the number of matching phrases in a document that are considered. + * Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. + * When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. + * Only supported by the `fvh` highlighter. */ phrase_limit?: integer + /** Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ post_tags?: string[] + /** Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ pre_tags?: string[] + /** By default, only fields that contains a query match are highlighted. + * Set to `false` to highlight all fields. */ require_field_match?: boolean + /** Set to `styled` to use the built-in tag schema. */ tags_schema?: SearchHighlighterTagsSchema } @@ -2001,14 +2655,19 @@ export interface SearchHit { } export interface SearchHitsMetadata { + /** Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ total?: SearchTotalHits | long hits: SearchHit[] max_score?: double | null } export interface SearchInnerHits { + /** The name for the particular inner hit definition in the response. + * Useful when a search request contains multiple inner hits. */ name?: Name + /** The maximum number of hits to return per `inner_hits`. */ size?: integer + /** Inner hit starting document offset. */ from?: integer collapse?: SearchFieldCollapse docvalue_fields?: (QueryDslFieldAndFormat | Field)[] @@ -2018,6 +2677,8 @@ export interface SearchInnerHits { script_fields?: Record seq_no_primary_term?: boolean fields?: Fields + /** How the inner hits should be sorted per `inner_hits`. + * By default, inner hits are sorted by score. */ sort?: Sort _source?: SearchSourceConfig stored_fields?: Fields @@ -2071,11 +2732,14 @@ export interface SearchKnnQueryProfileResult { } export interface SearchLaplaceSmoothingModel { + /** A constant that is added to all counts to balance weights. */ alpha: double } export interface SearchLearningToRank { + /** The unique identifier of the trained model uploaded to Elasticsearch */ model_id: string + /** Named parameters to be passed to the query templates used for feature */ params?: Record } @@ -2096,18 +2760,25 @@ export interface SearchPhraseSuggest extends SearchSuggestBase { } export interface SearchPhraseSuggestCollate { + /** Parameters to use if the query is templated. */ params?: Record + /** Returns all suggestions with an extra `collate_match` option indicating whether the generated phrase matched any document. */ prune?: boolean + /** A collate query that is run once for every suggestion. */ query: SearchPhraseSuggestCollateQuery } export interface SearchPhraseSuggestCollateQuery { + /** The search template ID. */ id?: Id - source?: string + /** The query source. */ + source?: ScriptSource } export interface SearchPhraseSuggestHighlight { + /** Use in conjunction with `pre_tag` to define the HTML tags to use for the highlighted text. */ post_tag: string + /** Use in conjunction with `post_tag` to define the HTML tags to use for the highlighted text. */ pre_tag: string } @@ -2119,17 +2790,35 @@ export interface SearchPhraseSuggestOption { } export interface SearchPhraseSuggester extends SearchSuggesterBase { + /** Checks each suggestion against the specified query to prune suggestions for which no matching docs exist in the index. */ collate?: SearchPhraseSuggestCollate + /** Defines a factor applied to the input phrases score, which is used as a threshold for other suggest candidates. + * Only candidates that score higher than the threshold will be included in the result. */ confidence?: double + /** A list of candidate generators that produce a list of possible terms per term in the given text. */ direct_generator?: SearchDirectGenerator[] force_unigrams?: boolean + /** Sets max size of the n-grams (shingles) in the field. + * If the field doesn’t contain n-grams (shingles), this should be omitted or set to `1`. + * If the field uses a shingle filter, the `gram_size` is set to the `max_shingle_size` if not explicitly set. */ gram_size?: integer + /** Sets up suggestion highlighting. + * If not provided, no highlighted field is returned. */ highlight?: SearchPhraseSuggestHighlight + /** The maximum percentage of the terms considered to be misspellings in order to form a correction. + * This method accepts a float value in the range `[0..1)` as a fraction of the actual query terms or a number `>=1` as an absolute number of query terms. */ max_errors?: double + /** The likelihood of a term being misspelled even if the term exists in the dictionary. */ real_word_error_likelihood?: double + /** The separator that is used to separate terms in the bigram field. + * If not set, the whitespace character is used as a separator. */ separator?: string + /** Sets the maximum number of suggested terms to be retrieved from each individual shard. */ shard_size?: integer + /** The smoothing model used to balance weight between infrequent grams (grams (shingles) are not existing in the index) and frequent grams (appear at least once in the index). + * The default model is Stupid Backoff. */ smoothing?: SearchSmoothingModelContainer + /** The text/query to provide suggestions for. */ text?: string token_limit?: integer } @@ -2175,7 +2864,9 @@ export interface SearchQueryProfile { } export interface SearchRegexOptions { + /** Optional operators for the regular expression. */ flags?: integer | string + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer } @@ -2186,9 +2877,14 @@ export interface SearchRescore { } export interface SearchRescoreQuery { + /** The query to use for rescoring. + * This query is only run on the Top-K results returned by the `query` and `post_filter` phases. */ rescore_query: QueryDslQueryContainer + /** Relative importance of the original query versus the rescore query. */ query_weight?: double + /** Relative importance of the rescore query versus the original query. */ rescore_query_weight?: double + /** Determines how scores are combined. */ score_mode?: SearchScoreMode } @@ -2200,6 +2896,118 @@ export interface SearchSearchProfile { rewrite_time: long } +export interface SearchSearchRequestBody { + /** Defines the aggregations that are run as part of the search request. */ + aggregations?: Record + /** Defines the aggregations that are run as part of the search request. + * @alias aggregations */ + aggs?: Record + /** Collapses search results the values of the specified field. */ + collapse?: SearchFieldCollapse + /** If `true`, the request returns detailed information about score computation as part of a hit. */ + explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ + ext?: Record + /** The starting document offset, which must be non-negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ + highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. + * If `true`, the exact number of hits is returned at the cost of some performance. + * If `false`, the response does not include the total number of hits matching the query. */ + track_total_hits?: SearchTrackHits + /** Boost the `_score` of documents from specified indices. + * The boost value is the factor by which scores are multiplied. + * A boost value greater than `1.0` increases the score. + * A boost value between `0` and `1.0` decreases the score. */ + indices_boost?: Partial>[] + /** An array of wildcard (`*`) field patterns. + * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** The approximate kNN search to run. */ + knn?: KnnSearch | KnnSearch[] + /** The Reciprocal Rank Fusion (RRF) to use. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + rank?: RankContainer + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results or results collected by aggregations. */ + min_score?: double + /** Use the `post_filter` parameter to filter search results. + * The search hits are filtered after the aggregations are calculated. + * A post filter has no impact on the aggregation results. */ + post_filter?: QueryDslQueryContainer + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. + * NOTE: This is a debugging tool and adds significant overhead to search execution. */ + profile?: boolean + /** The search definition using the Query DSL. */ + query?: QueryDslQueryContainer + /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ + rescore?: SearchRescore | SearchRescore[] + /** A retriever is a specification to describe top documents returned from a search. + * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ + retriever?: RetrieverContainer + /** Retrieve a script evaluation (based on different fields) for each hit. */ + script_fields?: Record + /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ + search_after?: SortResults + /** The number of hits to return, which must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` property. */ + size?: integer + /** Split a scrolled search into multiple slices that can be consumed independently. */ + slice?: SlicedScroll + /** A comma-separated list of : pairs. */ + sort?: Sort + /** The source fields that are returned for matching documents. + * These fields are returned in the `hits._source` property of the search response. + * If the `stored_fields` property is specified, the `_source` property defaults to `false`. + * Otherwise, it defaults to `true`. */ + _source?: SearchSourceConfig + /** An array of wildcard (`*`) field patterns. + * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ + fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines a suggester that provides similar looking terms based on a provided text. */ + suggest?: SearchSuggester + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this property to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. + * + * If set to `0` (default), the query does not terminate early. */ + terminate_after?: long + /** The period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ + timeout?: string + /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + /** If `true`, the request returns the document version as part of a hit. */ + version?: boolean + /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ + seq_no_primary_term?: boolean + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` property defaults to `false`. + * You can pass `_source: true` to return both source fields and stored fields in the search response. */ + stored_fields?: Fields + /** Limit the search to a point in time (PIT). + * If you provide a PIT, you cannot specify an `` in the request path. */ + pit?: SearchPointInTimeReference + /** One or more runtime fields in the search request. + * These fields take precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** The stats groups to associate with the search. + * Each group maintains a statistics aggregation for its associated searches. + * You can retrieve these stats using the indices stats API. */ + stats?: string[] +} + export interface SearchShardProfile { aggregations: SearchAggregationProfile[] cluster: string @@ -2213,8 +3021,11 @@ export interface SearchShardProfile { } export interface SearchSmoothingModelContainer { + /** A smoothing model that uses an additive smoothing where a constant (typically `1.0` or smaller) is added to all counts to balance weights. */ laplace?: SearchLaplaceSmoothingModel + /** A smoothing model that takes the weighted mean of the unigrams, bigrams, and trigrams based on user supplied weights (lambdas). */ linear_interpolation?: SearchLinearInterpolationSmoothingModel + /** A simple backoff model that backs off to lower order n-gram models if the higher order count is `0` and discounts the lower order n-gram model by a constant factor. */ stupid_backoff?: SearchStupidBackoffSmoothingModel } @@ -2224,14 +3035,17 @@ export type SearchSourceConfigParam = boolean | Fields export interface SearchSourceFilter { excludes?: Fields + /** @alias excludes */ exclude?: Fields includes?: Fields + /** @alias includes */ include?: Fields } export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' export interface SearchStupidBackoffSmoothingModel { + /** A constant factor that the lower order n-gram model is discounted by. */ discount: double } @@ -2244,24 +3058,36 @@ export interface SearchSuggestBase { } export interface SearchSuggestFuzziness { + /** The fuzziness factor. */ fuzziness?: Fuzziness + /** Minimum length of the input before fuzzy suggestions are returned. */ min_length?: integer + /** Minimum length of the input, which is not checked for fuzzy alternatives. */ prefix_length?: integer + /** If set to `true`, transpositions are counted as one change instead of two. */ transpositions?: boolean + /** If `true`, all measurements (like fuzzy edit distance, transpositions, and lengths) are measured in Unicode code points instead of in bytes. + * This is slightly slower than raw bytes. */ unicode_aware?: boolean } export type SearchSuggestSort = 'score' | 'frequency' export interface SearchSuggesterKeys { + /** Global suggest text, to avoid repetition when the same text is used in several suggesters */ text?: string } export type SearchSuggester = SearchSuggesterKeys & { [property: string]: SearchFieldSuggester | string } export interface SearchSuggesterBase { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ field: Field + /** The analyzer to analyze the suggest text with. + * Defaults to the search analyzer of the suggest field. */ analyzer?: string + /** The maximum corrections to be returned per suggest text token. */ size?: integer } @@ -2279,16 +3105,36 @@ export interface SearchTermSuggestOption { export interface SearchTermSuggester extends SearchSuggesterBase { lowercase_terms?: boolean + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ max_inspections?: integer + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, then the number cannot be fractional. */ min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer + /** The number of minimal prefix characters that must match in order be a candidate for suggestions. + * Increasing this number improves spellcheck performance. */ prefix_length?: integer + /** Sets the maximum number of suggestions to be retrieved from each individual shard. */ shard_size?: integer + /** Defines how suggestions should be sorted per suggest text term. */ sort?: SearchSuggestSort + /** The string distance implementation to use for comparing how similar suggested terms are. */ string_distance?: SearchStringDistance + /** Controls what suggestions are included or controls for what suggest text terms, suggestions should be suggested. */ suggest_mode?: SuggestMode + /** The suggest text. + * Needs to be set globally or per suggestion. */ text?: string } @@ -2302,7 +3148,7 @@ export type SearchTotalHitsRelation = 'eq' | 'gte' export type SearchTrackHits = boolean | integer export interface SearchMvtRequest extends RequestBase { -/** Comma-separated list of data streams, indices, or aliases to search */ + /** Comma-separated list of data streams, indices, or aliases to search */ index: Indices /** Field containing geospatial data to return */ field: Field @@ -2312,33 +3158,78 @@ export interface SearchMvtRequest extends RequestBase { x: SearchMvtCoordinate /** Y coordinate for the vector tile to search */ y: SearchMvtCoordinate - /** Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ + /** Sub-aggregations for the geotile_grid. + * + * It supports the following aggregation types: + * + * - `avg` + * - `boxplot` + * - `cardinality` + * - `extended stats` + * - `max` + * - `median absolute deviation` + * - `min` + * - `percentile` + * - `percentile-rank` + * - `stats` + * - `sum` + * - `value count` + * + * The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ aggs?: Record - /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. */ + /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers + * to avoid outline artifacts from geometries that extend past the extent of the tile. */ buffer?: integer - /** If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. */ + /** If `false`, the meta layer's feature is the bounding box of the tile. + * If `true`, the meta layer's feature is a bounding box resulting from a + * `geo_bounds` aggregation. The aggregation runs on values that intersect + * the `//` tile with `wrap_longitude` set to `false`. The resulting + * bounding box may be larger than the vector tile. */ exact_bounds?: boolean /** The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ extent?: integer - /** The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. */ + /** The fields to return in the `hits` layer. + * It supports wildcards (`*`). + * This parameter does not support fields with array values. Fields with array + * values may return inconsistent results. */ fields?: Fields /** The aggregation used to create a grid for the `field`. */ grid_agg?: SearchMvtGridAggregationType - /** Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. */ + /** Additional zoom levels available through the aggs layer. For example, if `` is `7` + * and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results + * don't include the aggs layer. */ grid_precision?: integer - /** Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. */ + /** Determines the geometry type for features in the aggs layer. In the aggs layer, + * each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon + * of the cells bounding box. If `point`, each feature is a Point that is the centroid + * of the cell. */ grid_type?: SearchMvtGridType /** The query DSL used to filter documents for the search. */ query?: QueryDslQueryContainer - /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. */ + /** The maximum number of features to return in the hits layer. Accepts 0-10000. + * If 0, results don't include the hits layer. */ size?: integer - /** Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. */ + /** Sort the features in the hits layer. By default, the API calculates a bounding + * box for each feature. It sorts features based on this box's diagonal length, + * from longest to shortest. */ sort?: Sort - /** The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ + /** The number of hits matching the query to count accurately. If `true`, the exact number + * of hits is returned at the cost of some performance. If `false`, the response does + * not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits - /** If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ + /** If `true`, the hits and aggs layers will contain additional point features representing + * suggested label positions for the original features. + * + * * `Point` and `MultiPoint` features will have one of the points selected. + * * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. + * * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. + * * The aggregation results will provide one central point for each aggregation bucket. + * + * All attributes from the original features will also be copied to the new label features. + * In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ with_labels?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } @@ -2357,19 +3248,29 @@ export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer export interface SearchShardsRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean /** If `true`, the request retrieves information from the local node only. */ local?: boolean - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * IT can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing @@ -2386,10 +3287,14 @@ export interface SearchShardsResponse { } export interface SearchShardsSearchShardsNodeAttributes { + /** The human-readable identifier of the node. */ name: NodeName + /** The ephemeral ID of the node. */ ephemeral_id: Id + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress external_id: string + /** Lists node attributes. */ attributes: Record roles: NodeRoles version: VersionString @@ -2403,40 +3308,55 @@ export interface SearchShardsShardStoreIndex { } export interface SearchTemplateRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ ignore_throttled?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** Specifies how long a consistent view of the index should be maintained for scrolled search. */ + /** Specifies how long a consistent view of the index + * should be maintained for scrolled search. */ scroll?: Duration /** The type of the search operation. */ search_type?: SearchType - /** If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. */ + /** If `true`, `hits.total` is rendered as an integer in the response. + * If `false`, it is rendered as an object. */ rest_total_hits_as_int?: boolean /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean - /** If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ + /** If `true`, returns detailed information about score calculation as part of each hit. + * If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ explain?: boolean - /** The ID of the search template to use. If no `source` is specified, this parameter is required. */ + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ id?: Id - /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record /** If `true`, the query execution is profiled. */ profile?: boolean - /** An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. */ - source?: string + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ + source?: ScriptSource /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -2461,21 +3381,29 @@ export interface SearchTemplateResponse { } export interface TermsEnumRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and index aliases to search. + * Wildcard (`*`) expressions are supported. + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: IndexName /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ field: Field /** The number of matching terms to return. */ size?: integer - /** The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ + /** The maximum length of time to spend collecting results. + * If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ timeout?: Duration /** When `true`, the provided search string is matched against index terms without case sensitivity. */ case_insensitive?: boolean /** Filter an index shard if the provided query rewrites to `match_none`. */ index_filter?: QueryDslQueryContainer - /** The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ + /** The string to match at the start of indexed terms. + * If it is not provided, all terms in the field are considered. + * + * > info + * > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ string?: string - /** The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ + /** The string after which terms in the index should be returned. + * It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ search_after?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } @@ -2486,6 +3414,8 @@ export interface TermsEnumRequest extends RequestBase { export interface TermsEnumResponse { _shards: ShardStatistics terms: string[] + /** If `false`, the returned terms set may be incomplete and should be treated as approximate. + * This can occur due to a few reasons, such as a request timeout or a node error. */ complete: boolean } @@ -2496,33 +3426,53 @@ export interface TermvectorsFieldStatistics { } export interface TermvectorsFilter { + /** Ignore words which occur in more than this many docs. + * Defaults to unbounded. */ max_doc_freq?: integer + /** The maximum number of terms that must be returned per field. */ max_num_terms?: integer + /** Ignore words with more than this frequency in the source doc. + * It defaults to unbounded. */ max_term_freq?: integer + /** The maximum word length above which words will be ignored. + * Defaults to unbounded. */ max_word_length?: integer + /** Ignore terms which do not occur in at least this many docs. */ min_doc_freq?: integer + /** Ignore words with less than this frequency in the source doc. */ min_term_freq?: integer + /** The minimum word length below which words will be ignored. */ min_word_length?: integer } export interface TermvectorsRequest extends RequestBase { -/** The name of the index that contains the document. */ + /** The name of the index that contains the document. */ index: IndexName /** A unique identifier for the document. */ id?: Id - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: TDocument - /** Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. */ + /** Filter terms based on their tf-idf scores. + * This could be useful in order find out a good characteristic vector of a document. + * This feature works in a similar manner to the second phase of the More Like This Query. */ filter?: TermvectorsFilter - /** Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ + /** Override the default per-field analyzer. + * This is useful in order to generate term vectors in any fashion, especially when using artificial documents. + * When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ per_field_analyzer?: Record - /** A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + /** A list of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields - /** If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ + /** If `true`, the response includes: + * + * * The document count (how many documents contain this field). + * * The sum of document frequencies (the sum of document frequencies for all terms in this field). + * * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ field_statistics?: boolean /** If `true`, the response includes term offsets. */ offsets?: boolean @@ -2530,7 +3480,12 @@ export interface TermvectorsRequest extends RequestBase { payloads?: boolean /** If `true`, the response includes term positions. */ positions?: boolean - /** If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. */ + /** If `true`, the response includes: + * + * * The total term frequency (how often a term occurs in all documents). + * * The document frequency (the number of documents containing the current term). + * + * By default these values are not returned since term statistics can have a serious performance impact. */ term_statistics?: boolean /** A custom value that is used to route operations to a specific shard. */ routing?: Routing @@ -2574,9 +3529,10 @@ export interface TermvectorsToken { } export interface UpdateRequest extends RequestBase { -/** A unique identifier for the document to be updated. */ + /** A unique identifier for the document to be updated. */ id: Id - /** The name of the target index. By default, the index is created automatically if it doesn't exist. */ + /** The name of the target index. + * By default, the index is created automatically if it doesn't exist. */ index: IndexName /** Only perform the operation if the document has this primary term. */ if_primary_term?: long @@ -2586,7 +3542,9 @@ export interface UpdateRequest include_source_on_error?: boolean /** The script language. */ lang?: string - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ refresh?: Refresh /** If `true`, the destination must be an index alias. */ require_alias?: boolean @@ -2594,9 +3552,13 @@ export interface UpdateRequest retry_on_conflict?: integer /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. + * Elasticsearch waits for at least the timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. */ + /** The number of copies of each shard that must be active before proceeding with the operation. + * Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** The source fields you want to exclude. */ _source_excludes?: Fields @@ -2604,17 +3566,21 @@ export interface UpdateRequest _source_includes?: Fields /** If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. */ detect_noop?: boolean - /** A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. */ + /** A partial update to an existing document. + * If both `doc` and `script` are specified, `doc` is ignored. */ doc?: TPartialDocument - /** If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ + /** If `true`, use the contents of 'doc' as the value of 'upsert'. + * NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ doc_as_upsert?: boolean /** The script to run to update the document. */ - script?: Script | string + script?: Script | ScriptSource /** If `true`, run the script whether or not the document exists. */ scripted_upsert?: boolean - /** If `false`, turn off source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. */ + /** If `false`, turn off source retrieval. + * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig - /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. */ + /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. + * If the document exists, the 'script' is run. */ upsert?: TDocument /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } @@ -2629,35 +3595,52 @@ export interface UpdateUpdateWriteResponseBase extends Writ } export interface UpdateByQueryRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards - /** Starting offset (default: 0) */ + /** Skips the specified number of documents. */ from?: long /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A query in the Lucene query string syntax. */ q?: string - /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ + /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. + * This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ refresh?: boolean - /** If `true`, the request cache is used for this request. It defaults to the index-level setting. */ + /** If `true`, the request cache is used for this request. + * It defaults to the index-level setting. */ request_cache?: boolean /** The throttle for this request in sub-requests per second. */ requests_per_second?: float @@ -2667,7 +3650,8 @@ export interface UpdateByQueryRequest extends RequestBase { scroll?: Duration /** The size of the scroll request that powers the operation. */ scroll_size?: long - /** An explicit timeout for each search request. By default, there is no timeout. */ + /** An explicit timeout for each search request. + * By default, there is no timeout. */ search_timeout?: Duration /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType @@ -2677,24 +3661,39 @@ export interface UpdateByQueryRequest extends RequestBase { sort?: string[] /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long - /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. + * By default, it is one minute. + * This guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration /** If `true`, returns the document version as part of a hit. */ version?: boolean /** Should the document increment the version number (internal) on hit or not (reindex) */ version_type?: boolean - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` parameter controls how long each write request waits for unavailable shards to become available. + * Both work exactly the way they work in the bulk API. */ wait_for_active_shards?: WaitForActiveShards - /** If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. + * Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ wait_for_completion?: boolean /** The maximum number of documents to update. */ max_docs?: long /** The documents to update using the Query DSL. */ query?: QueryDslQueryContainer /** The script to run to update the document source or metadata when updating. */ - script?: Script | string + script?: Script | ScriptSource /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll /** The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. */ @@ -2706,28 +3705,49 @@ export interface UpdateByQueryRequest extends RequestBase { } export interface UpdateByQueryResponse { + /** The number of scroll responses pulled back by the update by query. */ batches?: long + /** Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. + * Update by query is implemented using batches. + * Any failure causes the entire process to end, but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending when version conflicts occur. */ failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the update by query returned a noop value for `ctx.op`. */ noops?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** The number of requests per second effectively run during the update by query. */ requests_per_second?: float + /** The number of retries attempted by update by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ retries?: Retries task?: TaskId + /** If true, some requests timed out during the update by query. */ timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of documents that were successfully updated. */ updated?: long + /** The number of version conflicts that the update by query hit. */ version_conflicts?: long throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in an _update_by_query response. + * It only has meaning when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue } export interface UpdateByQueryRethrottleRequest extends RequestBase { -/** The ID for the task. */ + /** The ID for the task. */ task_id: Id - /** The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. */ + /** The throttle for this request in sub-requests per second. + * To turn off throttling, set it to `-1`. */ requests_per_second?: float /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } @@ -2761,6 +3781,7 @@ export type SpecUtilsStringified = T | string export type SpecUtilsWithNullValue = T | SpecUtilsNullValue export interface AcknowledgedResponseBase { + /** For a successful response, this value is always true. On failure, an exception is returned instead. */ acknowledged: boolean } @@ -2819,7 +3840,9 @@ export interface ClusterStatistics { } export interface CompletionStats { + /** Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */ size_in_bytes: long + /** Total amount of memory used for completion across all shards assigned to selected nodes. */ size?: ByteSize fields?: Record } @@ -2854,7 +3877,12 @@ export type Distance = string export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' export interface DocStats { + /** Total number of non-deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments and may include documents from nested fields. */ count: long + /** Total number of deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments. + * Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ deleted?: long } @@ -2865,14 +3893,25 @@ export type DurationLarge = string export type DurationValue = Unit export interface ElasticsearchVersionInfo { + /** The Elasticsearch Git commit's date. */ build_date: DateTime + /** The build flavor. For example, `default`. */ build_flavor: string + /** The Elasticsearch Git commit's SHA hash. */ build_hash: string + /** Indicates whether the Elasticsearch build was a snapshot. */ build_snapshot: boolean + /** The build type that corresponds to how Elasticsearch was installed. + * For example, `docker`, `rpm`, or `tar`. */ build_type: string + /** The version number of Elasticsearch's underlying Lucene software. */ lucene_version: VersionString + /** The minimum index version with which the responding node can read from disk. */ minimum_index_compatibility_version: VersionString + /** The minimum node version with which the responding node can communicate. + * Also the minimum version from which you can perform a rolling upgrade. */ minimum_wire_compatibility_version: VersionString + /** The Elasticsearch version number. */ number: string } @@ -2889,8 +3928,11 @@ export interface EmptyObject { export type EpochTime = Unit export interface ErrorCauseKeys { + /** The type of error */ type: string + /** A human-readable explanation of the error, in English. */ reason?: string + /** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */ stack_trace?: string caused_by?: ErrorCause root_cause?: ErrorCause[] @@ -2980,7 +4022,9 @@ export type GeoHashPrecision = number | string export type GeoHexCell = string export interface GeoLine { + /** Always `"LineString"` */ type: string + /** Array of `[lon, lat]` coordinates */ coordinates: double[][] } @@ -3047,14 +4091,24 @@ export interface IndexingStats { index_failed: long types?: Record write_load?: double + recent_write_load?: double + peak_write_load?: double } export type Indices = IndexName | IndexName[] export interface IndicesOptions { + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean } @@ -3076,41 +4130,68 @@ export type InlineGet = InlineGetKeys export type Ip = string export interface KnnQuery extends QueryDslQueryBase { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder + /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** The final number of nearest neighbors to return as top hits */ k?: integer + /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ similarity?: float + /** Apply oversampling and rescoring to quantized vectors */ rescore_vector?: RescoreVector } export interface KnnRetriever extends RetrieverBase { + /** The name of the vector field to search against. */ field: string + /** Query vector. Must have the same number of dimensions as the vector field you are searching against. You must provide a query_vector_builder or query_vector, but not both. */ query_vector?: QueryVector + /** Defines a model to build a query vector. */ query_vector_builder?: QueryVectorBuilder + /** Number of nearest neighbors to return as top hits. */ k: integer + /** Number of nearest neighbor candidates to consider per shard. */ num_candidates: integer + /** The minimum similarity required for a document to be considered a match. */ similarity?: float + /** Apply oversampling and rescoring to quantized vectors */ rescore_vector?: RescoreVector } export interface KnnSearch { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder + /** The final number of nearest neighbors to return as top hits */ k?: integer + /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** Boost value to apply to kNN scores */ boost?: float + /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ similarity?: float + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Apply oversampling and rescoring to quantized vectors */ rescore_vector?: RescoreVector } export interface LatLonGeoLocation { + /** Latitude */ lat: double + /** Longitude */ lon: double } @@ -3161,10 +4242,15 @@ export interface NestedSortValue { } export interface NodeAttributes { + /** Lists node attributes. */ attributes: Record + /** The ephemeral ID of the node. */ ephemeral_id: Id + /** The unique identifier of the node. */ id?: NodeId + /** The unique identifier of the node. */ name: NodeName + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress } @@ -3193,8 +4279,11 @@ export interface NodeShard { export interface NodeStatistics { failures?: ErrorCause[] + /** Total number of nodes selected by the request. */ total: integer + /** Number of nodes that responded successfully to the request. */ successful: integer + /** Number of nodes that rejected the request or failed to respond. If this value is not 0, a reason for the rejection or failure is included in the response. */ failed: integer } @@ -3223,13 +4312,22 @@ export interface PluginStats { export type PropertyName = string export interface QueryCacheStats { + /** Total number of entries added to the query cache across all shards assigned to selected nodes. + * This number includes current and evicted entries. */ cache_count: long + /** Total number of entries currently in the query cache across all shards assigned to selected nodes. */ cache_size: long + /** Total number of query cache evictions across all shards assigned to selected nodes. */ evictions: long + /** Total count of query cache hits across all shards assigned to selected nodes. */ hit_count: long + /** Total amount of memory used for the query cache across all shards assigned to selected nodes. */ memory_size?: ByteSize + /** Total amount, in bytes, of memory used for the query cache across all shards assigned to selected nodes. */ memory_size_in_bytes: long + /** Total count of query cache misses across all shards assigned to selected nodes. */ miss_count: long + /** Total count of hits and misses in the query cache across all shards assigned to selected nodes. */ total_count: long } @@ -3240,8 +4338,11 @@ export interface QueryVectorBuilder { } export interface RRFRetriever extends RetrieverBase { + /** A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. */ retrievers: RetrieverContainer[] + /** This value determines how much influence documents in individual result sets per query have over the final ranked result set. */ rank_constant?: integer + /** This value determines the size of the individual result sets per query. */ rank_window_size?: integer } @@ -3249,6 +4350,7 @@ export interface RankBase { } export interface RankContainer { + /** The reciprocal rank fusion parameters */ rrf?: RrfRank } @@ -3288,40 +4390,56 @@ export interface RequestCacheStats { } export interface RescoreVector { + /** Applies the specified oversample factor to k on the approximate kNN search */ oversample: float } export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { + /** The number of bulk actions retried. */ bulk: long + /** The number of search actions retried. */ search: long } export interface RetrieverBase { + /** Query to filter the documents that can match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ min_score?: float } export interface RetrieverContainer { + /** A retriever that replaces the functionality of a traditional query. */ standard?: StandardRetriever + /** A retriever that replaces the functionality of a knn search. */ knn?: KnnRetriever + /** A retriever that produces top documents from reciprocal rank fusion (RRF). */ rrf?: RRFRetriever + /** A retriever that reranks the top documents based on a reranking model using the InferenceAPI */ text_similarity_reranker?: TextSimilarityReranker + /** A retriever that replaces the functionality of a rule query. */ rule?: RuleRetriever } export type Routing = string export interface RrfRank { + /** How much influence documents in individual result sets per query have over the final ranked result set */ rank_constant?: long + /** Size of the individual result sets per query */ rank_window_size?: long } export interface RuleRetriever extends RetrieverBase { + /** The ruleset IDs containing the rules this retriever is evaluating against. */ ruleset_ids: Id[] + /** The match criteria that will determine if a rule in the provided rulesets should be applied. */ match_criteria: any + /** The retriever whose results rules should be applied to. */ retriever: RetrieverContainer + /** This value determines the size of the individual result set. */ rank_window_size?: integer } @@ -3332,15 +4450,20 @@ export interface ScoreSort { } export interface Script { - source?: string + /** The script source. */ + source?: ScriptSource + /** The `id` for a stored script. */ id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record + /** Specifies the language the script is written in. */ lang?: ScriptLanguage options?: Record } export interface ScriptField { - script: Script | string + script: Script | ScriptSource ignore_failure?: boolean } @@ -3348,7 +4471,7 @@ export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | s export interface ScriptSort { order?: SortOrder - script: Script | string + script: Script | ScriptSource type?: ScriptSortType mode?: SortMode nested?: NestedSortValue @@ -3356,10 +4479,12 @@ export interface ScriptSort { export type ScriptSortType = 'string' | 'number' | 'version' +export type ScriptSource = string | SearchSearchRequestBody + export interface ScriptTransform { lang?: string params?: Record - source?: string + source?: ScriptSource id?: string } @@ -3396,29 +4521,53 @@ export interface SearchTransform { export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' export interface SegmentsStats { + /** Total number of segments across all shards assigned to selected nodes. */ count: integer + /** Total amount of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory?: ByteSize + /** Total amount, in bytes, of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory_in_bytes: long + /** This object is not populated by the cluster stats API. + * To get information on segment files, use the node stats API. */ file_sizes: Record + /** Total amount of memory used by fixed bit sets across all shards assigned to selected nodes. + * Fixed bit sets are used for nested object field types and type filters for join fields. */ fixed_bit_set?: ByteSize + /** Total amount of memory, in bytes, used by fixed bit sets across all shards assigned to selected nodes. */ fixed_bit_set_memory_in_bytes: long + /** Total amount of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory?: ByteSize index_writer_max_memory_in_bytes?: long + /** Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory_in_bytes: long + /** Unix timestamp, in milliseconds, of the most recently retried indexing request. */ max_unsafe_auto_id_timestamp: long + /** Total amount of memory used for segments across all shards assigned to selected nodes. */ memory?: ByteSize + /** Total amount, in bytes, of memory used for segments across all shards assigned to selected nodes. */ memory_in_bytes: long + /** Total amount of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory?: ByteSize + /** Total amount, in bytes, of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory_in_bytes: long + /** Total amount of memory used for points across all shards assigned to selected nodes. */ points_memory?: ByteSize + /** Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ points_memory_in_bytes: long stored_memory?: ByteSize + /** Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ stored_fields_memory_in_bytes: long + /** Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ terms_memory_in_bytes: long + /** Total amount of memory used for terms across all shards assigned to selected nodes. */ terms_memory?: ByteSize + /** Total amount of memory used for term vectors across all shards assigned to selected nodes. */ term_vectory_memory?: ByteSize + /** Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ term_vectors_memory_in_bytes: long + /** Total amount of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory?: ByteSize + /** Total amount, in bytes, of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory_in_bytes: long } @@ -3435,8 +4584,11 @@ export interface ShardFailure { } export interface ShardStatistics { + /** The number of shards the operation or search attempted to run on but failed. */ failed: uint + /** The number of shards the operation or search succeeded on. */ successful: uint + /** The number of shards the operation or search will run on overall. */ total: uint failures?: ShardFailure[] skipped?: uint @@ -3476,26 +4628,43 @@ export type SortOrder = 'asc' | 'desc' export type SortResults = FieldValue[] export interface StandardRetriever extends RetrieverBase { + /** Defines a query to retrieve a set of top documents. */ query?: QueryDslQueryContainer + /** Defines a search after object parameter used for pagination. */ search_after?: SortResults + /** Maximum number of documents to collect for each shard. */ terminate_after?: integer + /** A sort object that that specifies the order of matching documents. */ sort?: Sort + /** Collapses the top documents by a specified key into a single top document per key. */ collapse?: SearchFieldCollapse } export interface StoreStats { + /** Total size of all shards assigned to selected nodes. */ size?: ByteSize + /** Total size, in bytes, of all shards assigned to selected nodes. */ size_in_bytes: long + /** A prediction of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved?: ByteSize + /** A prediction, in bytes, of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved_in_bytes: long + /** Total data set size of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size?: ByteSize + /** Total data set size, in bytes, of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size_in_bytes?: long } export interface StoredScript { + /** The language the script is written in. + * For search templates, use `mustache`. */ lang: ScriptLanguage options?: Record - source: string + /** The script source. + * For search templates, an object containing the search template. */ + source: ScriptSource } export type StreamResult = ArrayBuffer @@ -3519,10 +4688,15 @@ export interface TextEmbedding { } export interface TextSimilarityReranker extends RetrieverBase { + /** The nested retriever which will produce the first-level results, that will later be used for reranking. */ retriever: RetrieverContainer + /** This value determines how many documents we will consider from the nested retriever. */ rank_window_size?: integer + /** Unique identifier of the inference endpoint created using the inference API. */ inference_id?: string + /** The text snippet used as the basis for similarity comparison */ inference_text?: string + /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */ field?: string } @@ -3598,12 +4772,20 @@ export interface WktGeoBounds { } export interface WriteResponseBase { + /** The unique identifier for the added document. */ _id: Id + /** The name of the index the document was added to. */ _index: IndexName + /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long + /** The result of the indexing operation: `created` or `updated`. */ result: Result + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** Information about the replication process of the operation. */ _shards: ShardStatistics + /** The document version, which is incremented each time the document is updated. */ _version: VersionNumber forced_refresh?: boolean } @@ -3628,7 +4810,10 @@ export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiB } export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { + /** Filters used to create buckets. + * At least one filter is required. */ filters?: Record + /** Separator used to concatenate filter names. Defaults to &. */ separator?: string } @@ -3650,93 +4835,202 @@ export interface AggregationsAggregation { } export interface AggregationsAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ aggs?: Record meta?: Metadata + /** A bucket aggregation returning a form of adjacency matrix. + * The request provides a collection of named filter expressions, similar to the `filters` aggregation. + * Each bucket in the response represents a non-empty cell in the matrix of intersecting filters. */ adjacency_matrix?: AggregationsAdjacencyMatrixAggregation + /** A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ auto_date_histogram?: AggregationsAutoDateHistogramAggregation + /** A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. */ avg?: AggregationsAverageAggregation + /** A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. + * The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. */ avg_bucket?: AggregationsAverageBucketAggregation + /** A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. */ boxplot?: AggregationsBoxplotAggregation + /** A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. */ bucket_script?: AggregationsBucketScriptAggregation + /** A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. */ bucket_selector?: AggregationsBucketSelectorAggregation + /** A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. */ bucket_sort?: AggregationsBucketSortAggregation + /** A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. + * @experimental */ bucket_count_ks_test?: AggregationsBucketKsAggregation + /** A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. + * @experimental */ bucket_correlation?: AggregationsBucketCorrelationAggregation + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation + /** A multi-bucket aggregation that groups semi-structured text into buckets. + * @experimental */ categorize_text?: AggregationsCategorizeTextAggregation + /** A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */ children?: AggregationsChildrenAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation + /** A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. */ cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation + /** A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ cumulative_sum?: AggregationsCumulativeSumAggregation + /** A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ date_histogram?: AggregationsDateHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation + /** A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ derivative?: AggregationsDerivativeAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. + * Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. */ diversified_sampler?: AggregationsDiversifiedSamplerAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ extended_stats?: AggregationsExtendedStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */ frequent_item_sets?: AggregationsFrequentItemSetsAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: QueryDslQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: AggregationsFiltersAggregation + /** A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. */ geo_bounds?: AggregationsGeoBoundsAggregation + /** A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. */ geo_centroid?: AggregationsGeoCentroidAggregation + /** A multi-bucket aggregation that works on `geo_point` fields. + * Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. */ geo_distance?: AggregationsGeoDistanceAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell is labeled using a geohash which is of user-definable precision. */ geohash_grid?: AggregationsGeoHashGridAggregation + /** Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. */ geo_line?: AggregationsGeoLineAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a map tile as used by many online map sites. */ geotile_grid?: AggregationsGeoTileGridAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. */ geohex_grid?: AggregationsGeohexGridAggregation + /** Defines a single bucket of all the documents within the search execution context. + * This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. */ global?: AggregationsGlobalAggregation + /** A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ histogram?: AggregationsHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. */ ip_range?: AggregationsIpRangeAggregation + /** A bucket aggregation that groups documents based on the network or sub-network of an IP address. */ ip_prefix?: AggregationsIpPrefixAggregation + /** A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. */ inference?: AggregationsInferenceAggregation line?: AggregationsGeoLineAggregation + /** A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. */ matrix_stats?: AggregationsMatrixStatsAggregation + /** A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. */ max?: AggregationsMaxAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ max_bucket?: AggregationsMaxBucketAggregation + /** A single-value aggregation that approximates the median absolute deviation of its search results. */ median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation + /** A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. */ min?: AggregationsMinAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ min_bucket?: AggregationsMinBucketAggregation + /** A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). */ missing?: AggregationsMissingAggregation moving_avg?: AggregationsMovingAverageAggregation + /** Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. */ moving_percentiles?: AggregationsMovingPercentilesAggregation + /** Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. + * For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. */ moving_fn?: AggregationsMovingFunctionAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. */ multi_terms?: AggregationsMultiTermsAggregation + /** A special single bucket aggregation that enables aggregating nested documents. */ nested?: AggregationsNestedAggregation + /** A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. */ normalize?: AggregationsNormalizeAggregation + /** A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. */ parent?: AggregationsParentAggregation + /** A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. */ percentile_ranks?: AggregationsPercentileRanksAggregation + /** A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. */ percentiles?: AggregationsPercentilesAggregation + /** A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. */ percentiles_bucket?: AggregationsPercentilesBucketAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation which finds "rare" terms—terms that are at the long-tail of the distribution and are not frequent. */ rare_terms?: AggregationsRareTermsAggregation + /** Calculates a rate of documents or a field in each bucket. + * Can only be used inside a `date_histogram` or `composite` aggregation. */ rate?: AggregationsRateAggregation + /** A special single bucket aggregation that enables aggregating on parent documents from nested documents. + * Should only be defined inside a `nested` aggregation. */ reverse_nested?: AggregationsReverseNestedAggregation + /** A single bucket aggregation that randomly includes documents in the aggregated results. + * Sampling provides significant speed improvement at the cost of accuracy. + * @remarks This property is not supported on Elastic Cloud Serverless. + * @experimental */ random_sampler?: AggregationsRandomSamplerAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. */ sampler?: AggregationsSamplerAggregation + /** A metric aggregation that uses scripts to provide a metric output. */ scripted_metric?: AggregationsScriptedMetricAggregation + /** An aggregation that subtracts values in a time series from themselves at different time lags or periods. */ serial_diff?: AggregationsSerialDifferencingAggregation + /** Returns interesting or unusual occurrences of terms in a set. */ significant_terms?: AggregationsSignificantTermsAggregation + /** Returns interesting or unusual occurrences of free-text terms in a set. */ significant_text?: AggregationsSignificantTextAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ stats?: AggregationsStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ stats_bucket?: AggregationsStatsBucketAggregation + /** A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. */ string_stats?: AggregationsStringStatsAggregation + /** A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. */ sum?: AggregationsSumAggregation + /** A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. */ sum_bucket?: AggregationsSumBucketAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation + /** The time series aggregation queries data created using a time series index. + * This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. + * @experimental */ time_series?: AggregationsTimeSeriesAggregation + /** A metric aggregation that returns the top matching documents per bucket. */ top_hits?: AggregationsTopHitsAggregation + /** A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. */ t_test?: AggregationsTTestAggregation + /** A metric aggregation that selects metrics from the document with the largest or smallest sort value. */ top_metrics?: AggregationsTopMetricsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation + /** A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. */ weighted_avg?: AggregationsWeightedAverageAggregation + /** A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ variable_width_histogram?: AggregationsVariableWidthHistogramAggregation } -export interface AggregationsAggregationRange { - from?: double | null +export type AggregationsAggregationRange = AggregationsUntypedAggregationRange | AggregationsDateAggregationRange | AggregationsNumberAggregationRange | AggregationsTermAggregationRange + +export interface AggregationsAggregationRangeBase { + /** Start of the range (inclusive). */ + from?: T + /** Custom key to return the range with. */ key?: string - to?: double | null + /** End of the range (exclusive). */ + to?: T } export interface AggregationsArrayPercentilesItem { @@ -3750,14 +5044,24 @@ export interface AggregationsAutoDateHistogramAggregate extends AggregationsMult } export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** The target number of buckets. */ buckets?: integer + /** The field on which to run the aggregation. */ field?: Field + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string + /** The minimum rounding interval. + * This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. */ minimum_interval?: AggregationsMinimumInterval + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: DateTime + /** Time zone specified as a ISO 8601 UTC offset. */ offset?: string params?: Record - script?: Script | string + script?: Script | ScriptSource + /** Time zone ID. */ time_zone?: TimeZone } @@ -3788,6 +5092,7 @@ export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase } export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double } @@ -3795,26 +5100,49 @@ export interface AggregationsBucketAggregationBase { } export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { + /** The correlation function to execute. */ function: AggregationsBucketCorrelationFunction } export interface AggregationsBucketCorrelationFunction { + /** The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. */ count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation } export interface AggregationsBucketCorrelationFunctionCountCorrelation { + /** The indicator with which to correlate the configured `bucket_path` values. */ indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator } export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { + /** The total number of documents that initially created the expectations. It’s required to be greater + * than or equal to the sum of all values in the buckets_path as this is the originating superset of data + * to which the term values are correlated. */ doc_count: integer + /** An array of numbers with which to correlate the configured `bucket_path` values. + * The length of this value must always equal the number of buckets returned by the `bucket_path`. */ expectations: double[] + /** An array of fractions to use when averaging and calculating variance. This should be used if + * the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, + * must equal expectations. */ fractions?: double[] } export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { + /** A list of string values indicating which K-S test alternative to calculate. The valid values + * are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used + * when calculating the K-S test. Default value is all possible alternative hypotheses. */ alternative?: string[] + /** A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. + * In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual + * document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall + * documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a + * metric to define the bucket end points. */ fractions?: double[] + /** Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. + * This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is + * `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, + * and `lower_tail`. */ sampling_method?: string } @@ -3823,21 +5151,29 @@ export interface AggregationsBucketMetricValueAggregate extends AggregationsSing } export interface AggregationsBucketPathAggregation { + /** Path to the buckets that contain one set of values to correlate. */ buckets_path?: AggregationsBucketsPath } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string + /** The script to run for this aggregation. */ + script?: Script | ScriptSource } export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string + /** The script to run for this aggregation. */ + script?: Script | ScriptSource } export interface AggregationsBucketSortAggregation { + /** Buckets in positions prior to `from` will be truncated. */ from?: integer + /** The policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy + /** The number of buckets to return. + * Defaults to all buckets of the parent aggregation. */ size?: integer + /** The list of fields to sort on. */ sort?: Sort } @@ -3852,30 +5188,58 @@ export interface AggregationsCardinalityAggregate extends AggregationsAggregateB } export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { + /** A unique count below which counts are expected to be close to accurate. + * This allows to trade memory for accuracy. */ precision_threshold?: integer rehash?: boolean + /** Mechanism by which cardinality aggregations is run. */ execution_hint?: AggregationsCardinalityExecutionMode } export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' export interface AggregationsCategorizeTextAggregation { + /** The semi-structured text field to categorize. */ field: Field + /** The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. + * Smaller values use less memory and create fewer categories. Larger values will use more memory and + * create narrower categories. Max allowed value is 100. */ max_unique_tokens?: integer + /** The maximum number of token positions to match on before attempting to merge categories. Larger + * values will use more memory and create narrower categories. Max allowed value is 100. */ max_matched_tokens?: integer + /** The minimum percentage of tokens that must match for text to be added to the category bucket. Must + * be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory + * usage and create narrower categories. */ similarity_threshold?: integer + /** This property expects an array of regular expressions. The expressions are used to filter out matching + * sequences from the categorization field values. You can use this functionality to fine tune the categorization + * by excluding sequences from consideration when categories are defined. For example, you can exclude SQL + * statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. + * If you only want to define simple regular expression filters that are applied prior to tokenization, setting + * this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, + * use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */ categorization_filters?: string[] + /** The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. + * The syntax is very similar to that used to define the analyzer in the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html). This property + * cannot be used at the same time as categorization_filters. */ categorization_analyzer?: AggregationsCategorizeTextAnalyzer + /** The number of categorization buckets to return from each shard before merging all the results. */ shard_size?: integer + /** The number of buckets to return. */ size?: integer + /** The minimum number of documents in a bucket to be returned to the results. */ min_doc_count?: integer + /** The minimum number of documents in a bucket to be returned from the shard before merging. */ shard_min_doc_count?: integer } export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer export interface AggregationsChiSquareHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives: boolean } @@ -3885,6 +5249,7 @@ export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ type?: RelationName } @@ -3895,24 +5260,34 @@ export interface AggregationsCompositeAggregate extends AggregationsMultiBucketA export type AggregationsCompositeAggregateKey = Record export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { + /** When paginating, use the `after_key` value returned in the previous response to retrieve the next page. */ after?: AggregationsCompositeAggregateKey + /** The number of composite buckets that should be returned. */ size?: integer + /** The value sources used to build composite buckets. + * Keys are returned in the order of the `sources` definition. */ sources?: Record[] } export interface AggregationsCompositeAggregationBase { + /** Either `field` or `script` must be present */ field?: Field missing_bucket?: boolean missing_order?: AggregationsMissingOrder - script?: Script | string + /** Either `field` or `script` must be present */ + script?: Script | ScriptSource value_type?: AggregationsValueType order?: SortOrder } export interface AggregationsCompositeAggregationSource { + /** A terms aggregation. */ terms?: AggregationsCompositeTermsAggregation + /** A histogram aggregation. */ histogram?: AggregationsCompositeHistogramAggregation + /** A date histogram aggregation. */ date_histogram?: AggregationsCompositeDateHistogramAggregation + /** A geotile grid aggregation. */ geotile_grid?: AggregationsCompositeGeoTileGridAggregation } @@ -3924,7 +5299,9 @@ export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { format?: string + /** Either `calendar_interval` or `fixed_interval` must be present */ calendar_interval?: DurationLarge + /** Either `calendar_interval` or `fixed_interval` must be present */ fixed_interval?: DurationLarge offset?: Duration time_zone?: TimeZone @@ -3959,24 +5336,44 @@ export interface AggregationsCustomCategorizeTextAnalyzer { filter?: string[] } +export interface AggregationsDateAggregationRange extends AggregationsAggregationRangeBase { +} + export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** Calendar-aware interval. + * Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. */ calendar_interval?: AggregationsCalendarInterval + /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds + /** Limits the histogram to specified bounds. */ hard_bounds?: AggregationsExtendedBounds + /** The date field whose values are use to build a histogram. */ field?: Field + /** Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. */ fixed_interval?: Duration + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string interval?: Duration + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, all buckets between the first bucket that matches documents and the last one are returned. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: DateTime + /** Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. */ offset?: Duration + /** The sort order of the returned buckets. */ order?: AggregationsAggregateOrder params?: Record - script?: Script | string + script?: Script | ScriptSource + /** Time zone used for bucketing and rounding. + * Defaults to Coordinated Universal Time (UTC). */ time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean } @@ -3991,20 +5388,21 @@ export interface AggregationsDateRangeAggregate extends AggregationsRangeAggrega } export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ field?: Field + /** The date format used to format `from` and `to` in the response. */ format?: string + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing - ranges?: AggregationsDateRangeExpression[] + /** Array of date ranges. */ + ranges?: AggregationsDateAggregationRange[] + /** Time zone used to convert dates from another time zone to UTC. */ time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean } -export interface AggregationsDateRangeExpression { - from?: AggregationsFieldDateMath - key?: string - to?: AggregationsFieldDateMath -} - export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { normalized_value?: double normalized_value_as_string?: string @@ -4014,10 +5412,14 @@ export interface AggregationsDerivativeAggregation extends AggregationsPipelineA } export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { + /** The type of value used for de-duplication. */ execution_hint?: AggregationsSamplerAggregationExecutionHint + /** Limits how many documents are permitted per choice of de-duplicating value. */ max_docs_per_value?: integer - script?: Script | string + script?: Script | ScriptSource + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer + /** The field used to provide values used for de-duplication. */ field?: Field } @@ -4041,7 +5443,9 @@ export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMo } export interface AggregationsExtendedBounds { + /** Maximum value for the bound. */ max?: T + /** Minimum value for the bound. */ min?: T } @@ -4063,6 +5467,7 @@ export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAgg } export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { + /** The number of standard deviations above/below the mean to display. */ sigma?: double } @@ -4070,6 +5475,7 @@ export interface AggregationsExtendedStatsBucketAggregate extends AggregationsEx } export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { + /** The number of standard deviations above/below the mean to display. */ sigma?: double } @@ -4084,9 +5490,14 @@ export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAgg } export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean + /** The key with which the other bucket is returned. */ other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean } @@ -4107,10 +5518,15 @@ export interface AggregationsFrequentItemSetsAggregate extends AggregationsMulti } export interface AggregationsFrequentItemSetsAggregation { + /** Fields to analyze. */ fields: AggregationsFrequentItemSetsField[] + /** The minimum size of one item set. */ minimum_set_size?: integer + /** The minimum support of one item set. */ minimum_support?: double + /** The number of top item sets to return. */ size?: integer + /** Query that filters documents from analysis. */ filter?: QueryDslQueryContainer } @@ -4123,7 +5539,11 @@ export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBuc export interface AggregationsFrequentItemSetsField { field: Field + /** Values to exclude. + * Can be regular expression strings or arrays of strings of exact terms. */ exclude?: AggregationsTermsExclude + /** Values to include. + * Can be regular expression strings or arrays of strings of exact terms. */ include?: AggregationsTermsInclude } @@ -4134,6 +5554,7 @@ export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBas } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { + /** Specifies whether the bounding box should be allowed to overlap the international date line. */ wrap_longitude?: boolean } @@ -4151,10 +5572,15 @@ export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggre } export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { + /** The distance calculation type. */ distance_type?: GeoDistanceType + /** A field of type `geo_point` used to evaluate the distance. */ field?: Field + /** The origin used to evaluate the distance. */ origin?: GeoLocation + /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[] + /** The distance unit. */ unit?: DistanceUnit } @@ -4162,10 +5588,17 @@ export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucke } export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { + /** The bounding box to filter the points in each bucket. */ bounds?: GeoBounds + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohash_grid` aggregates all array values. */ field?: Field + /** The string length of the geohashes used to define cells/buckets in the results. */ precision?: GeoHashPrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer + /** The maximum number of geohash buckets to return. */ size?: integer } @@ -4191,18 +5624,27 @@ export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase } export interface AggregationsGeoLineAggregation { + /** The name of the geo_point field. */ point: AggregationsGeoLinePoint + /** The name of the numeric field to use as the sort key for ordering the points. + * When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */ sort: AggregationsGeoLineSort + /** When `true`, returns an additional array of the sort values in the feature properties. */ include_sort?: boolean + /** The order in which the line is sorted (ascending or descending). */ sort_order?: SortOrder + /** The maximum length of the line represented in the aggregation. + * Valid sizes are between 1 and 10000. */ size?: integer } export interface AggregationsGeoLinePoint { + /** The name of the geo_point field. */ field: Field } export interface AggregationsGeoLineSort { + /** The name of the numeric field to use as the sort key for ordering the points. */ field: Field } @@ -4210,10 +5652,18 @@ export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucke } export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geotile_grid` aggregates all array values. */ field?: Field + /** Integer zoom of the key used to define cells/buckets in the results. + * Values outside of the range [0,29] will be rejected. */ precision?: GeoTilePrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer + /** The maximum number of buckets to return. */ size?: integer + /** A bounding box to filter the geo-points or geo-shapes in each bucket. */ bounds?: GeoBounds } @@ -4224,10 +5674,17 @@ export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long } export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohex_grid` aggregates all array values. */ field: Field + /** Integer zoom of the key used to defined cells or buckets + * in the results. Value should be between 0-15. */ precision?: integer + /** Bounding box used to filter the geo-points in each bucket. */ bounds?: GeoBounds + /** Maximum number of buckets to return. */ size?: integer + /** Number of buckets returned from each shard. */ shard_size?: integer } @@ -4240,10 +5697,12 @@ export interface AggregationsGlobalAggregation extends AggregationsBucketAggrega } export interface AggregationsGoogleNormalizedDistanceHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean } export interface AggregationsHdrMethod { + /** Specifies the resolution of values for the histogram in number of significant digits. */ number_of_significant_value_digits?: integer } @@ -4257,16 +5716,31 @@ export interface AggregationsHistogramAggregate extends AggregationsMultiBucketA } export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { + /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds + /** Limits the range of buckets in the histogram. + * It is particularly useful in the case of open data ranges that can result in a very large number of buckets. */ hard_bounds?: AggregationsExtendedBounds + /** The name of the field to aggregate on. */ field?: Field + /** The interval for the buckets. + * Must be a positive decimal. */ interval?: double + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, the response will fill gaps in the histogram with empty buckets. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: double + /** By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. + * The bucket boundaries can be shifted by using the `offset` option. */ offset?: double + /** The sort order of the returned buckets. + * By default, the returned buckets are sorted by their key ascending. */ order?: AggregationsAggregateOrder - script?: Script | string + script?: Script | ScriptSource format?: string + /** If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. */ keyed?: boolean } @@ -4313,7 +5787,9 @@ export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys & { [property: string]: any } export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { + /** The ID or alias for the trained model. */ model_id: Name + /** Contains the inference type and its options. */ inference_config?: AggregationsInferenceConfigContainer } @@ -4323,7 +5799,9 @@ export interface AggregationsInferenceClassImportance { } export interface AggregationsInferenceConfigContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions } @@ -4343,11 +5821,18 @@ export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAg } export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { + /** The IP address field to aggregation on. The field mapping type must be `ip`. */ field: Field + /** Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. + * For IPv6 addresses the accepted range is [0, 128]. */ prefix_length: integer + /** Defines whether the prefix applies to IPv6 addresses. */ is_ipv6?: boolean + /** Defines whether the prefix length is appended to IP address keys in the response. */ append_prefix_length?: boolean + /** Defines whether buckets are returned as a hash rather than an array in the response. */ keyed?: boolean + /** Minimum number of documents in a bucket for it to be included in the response. */ min_doc_count?: long } @@ -4364,13 +5849,18 @@ export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAgg } export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are used to build ranges. */ field?: Field + /** Array of IP ranges. */ ranges?: AggregationsIpRangeAggregationRange[] } export interface AggregationsIpRangeAggregationRange { + /** Start of the range. */ from?: string | null + /** IP range defined as a CIDR mask. */ mask?: string + /** End of the range. */ to?: string | null } @@ -4410,7 +5900,10 @@ export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string } export interface AggregationsMatrixAggregation { + /** An array of fields for computing the statistics. */ fields?: Fields + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: Record } @@ -4420,6 +5913,7 @@ export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateB } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { + /** Array value the aggregation will use for array or multi-valued fields. */ mode?: SortMode } @@ -4447,13 +5941,17 @@ export interface AggregationsMedianAbsoluteDeviationAggregate extends Aggregatio } export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double } export interface AggregationsMetricAggregationBase { + /** The field on which to run the aggregation. */ field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing - script?: Script | string + script?: Script | ScriptSource } export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { @@ -4475,6 +5973,7 @@ export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { + /** The name of the field. */ field?: Field missing?: AggregationsMissing } @@ -4490,13 +5989,20 @@ export interface AggregationsMovingAverageAggregationBase extends AggregationsPi } export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { + /** The script that should be executed on each window of data. */ script?: string + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer + /** The size of window to "slide" across the histogram. */ window?: integer } export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { + /** The size of window to "slide" across the histogram. */ window?: integer + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer keyed?: boolean } @@ -4510,7 +6016,10 @@ export interface AggregationsMultiBucketBase { } export interface AggregationsMultiTermLookup { + /** A fields from which to retrieve terms. */ field: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing } @@ -4518,13 +6027,23 @@ export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggreg } export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + /** Specifies the strategy for data collection. */ collect_mode?: AggregationsTermsAggregationCollectMode + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder + /** The minimum number of documents in a bucket for it to be returned. */ min_doc_count?: long + /** The minimum number of documents in a bucket on each shard for it to be returned. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** Calculates the doc count error on per term basis. */ show_term_doc_count_error?: boolean + /** The number of term buckets should be returned out of the overall terms list. */ size?: integer + /** The field from which to generate sets of terms. */ terms: AggregationsMultiTermLookup[] } @@ -4537,7 +6056,9 @@ export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys & { [property: string]: AggregationsAggregate | FieldValue[] | string | long } export interface AggregationsMutualInformationHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives?: boolean } @@ -4547,21 +6068,27 @@ export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { + /** The path to the field of type `nested`. */ path?: Field } export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { + /** The specific method to apply. */ method?: AggregationsNormalizeMethod } export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' +export interface AggregationsNumberAggregationRange extends AggregationsAggregationRangeBase { +} + export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsParentAggregate = AggregationsParentAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ type?: RelationName } @@ -4569,9 +6096,14 @@ export interface AggregationsPercentageScoreHeuristic { } export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ keyed?: boolean + /** An array of values for which to calculate the percentile ranks. */ values?: double[] | null + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. */ hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentile ranks. */ tdigest?: AggregationsTDigest } @@ -4582,9 +6114,14 @@ export interface AggregationsPercentilesAggregateBase extends AggregationsAggreg } export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ keyed?: boolean + /** The percentiles to calculate. */ percents?: double[] + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */ hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentiles. */ tdigest?: AggregationsTDigest } @@ -4592,17 +6129,28 @@ export interface AggregationsPercentilesBucketAggregate extends AggregationsPerc } export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { + /** The list of percentiles to calculate. */ percents?: double[] } export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { + /** `DecimalFormat` pattern for the output value. + * If specified, the formatted value is returned in the aggregation’s `value_as_string` property. */ format?: string + /** Policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy } export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { + /** The probability that a document will be included in the aggregated data. + * Must be greater than 0, less than 0.5, or exactly 1. + * The lower the probability, the fewer documents are matched. */ probability: double + /** The seed to generate the random sampling of documents. + * When a seed is provided, the random subset of documents is the same between calls. */ seed?: integer + /** When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. + * @remarks This property is not supported on Elastic Cloud Serverless. */ shard_seed?: integer } @@ -4610,10 +6158,15 @@ export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggre } export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: integer + /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[] - script?: Script | string + script?: Script | ScriptSource + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean format?: string } @@ -4623,17 +6176,26 @@ export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase to?: double from_as_string?: string to_as_string?: string + /** The bucket key. Present if the aggregation is _not_ keyed */ key?: string } export type AggregationsRangeBucket = AggregationsRangeBucketKeys & { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { + /** Terms that should be excluded from the aggregation. */ exclude?: AggregationsTermsExclude + /** The field from which to return rare terms. */ field?: Field + /** Terms that should be included in the aggregation. */ include?: AggregationsTermsInclude + /** The maximum number of documents a term should appear in. */ max_doc_count?: long + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing + /** The precision of the internal CuckooFilters. + * Smaller precision leads to better approximation, but higher memory usage. */ precision?: double value_type?: string } @@ -4644,7 +6206,10 @@ export interface AggregationsRateAggregate extends AggregationsAggregateBase { } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { + /** The interval used to calculate the rate. + * By default, the interval of the `date_histogram` is used. */ unit?: AggregationsCalendarInterval + /** How the rate is calculated. */ mode?: AggregationsRateMode } @@ -4656,6 +6221,8 @@ export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggreg & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { + /** Defines the nested object field that should be joined back to. + * The default is empty, which means that it joins back to the root/main document level. */ path?: Field } @@ -4665,13 +6232,14 @@ export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer } export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' export interface AggregationsScriptedHeuristic { - script: Script | string + script: Script | ScriptSource } export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { @@ -4679,14 +6247,26 @@ export interface AggregationsScriptedMetricAggregate extends AggregationsAggrega } export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { - combine_script?: Script | string - init_script?: Script | string - map_script?: Script | string + /** Runs once on each shard after document collection is complete. + * Allows the aggregation to consolidate the state returned from each shard. */ + combine_script?: Script | ScriptSource + /** Runs prior to any collection of documents. + * Allows the aggregation to set up any initial state. */ + init_script?: Script | ScriptSource + /** Run once per document collected. + * If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. */ + map_script?: Script | ScriptSource + /** A global object with script parameters for `init`, `map` and `combine` scripts. + * It is shared between the scripts. */ params?: Record - reduce_script?: Script | string + /** Runs once on the coordinating node after all shards have returned their results. + * The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. */ + reduce_script?: Script | ScriptSource } export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { + /** The historical bucket to subtract from the current value. + * Must be a positive, non-zero integer. */ lag?: integer } @@ -4715,20 +6295,37 @@ export interface AggregationsSignificantTermsAggregateBase extends } export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic + /** Terms to exclude. */ exclude?: AggregationsTermsExclude + /** Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant terms. */ field?: Field + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Terms to include. */ include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ jlh?: EmptyObject + /** Only return terms that are found in more than `min_doc_count` hits. */ min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** Can be used to control the volumes of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ size?: integer } @@ -4738,22 +6335,41 @@ export interface AggregationsSignificantTermsBucketBase extends AggregationsMult } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic + /** Values to exclude. */ exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant text. */ field?: Field + /** Whether to out duplicate text to deal with noisy data. */ filter_duplicate_text?: boolean + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Values to include. */ include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ jlh?: EmptyObject + /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic + /** Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. + * Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ size?: integer + /** Overrides the JSON `_source` fields from which text will be analyzed. */ source_fields?: Fields } @@ -4770,6 +6386,8 @@ export interface AggregationsSingleBucketAggregateBase extends AggregationsAggre } export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { + /** The metric value. A missing value generally means that there was no data to aggregate, + * unless specified otherwise. */ value: double | null value_as_string?: string } @@ -4835,6 +6453,7 @@ export interface AggregationsStringStatsAggregate extends AggregationsAggregateB } export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { + /** Shows the probability distribution for all characters. */ show_distribution?: boolean } @@ -4857,6 +6476,7 @@ export interface AggregationsSumBucketAggregation extends AggregationsPipelineAg } export interface AggregationsTDigest { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: integer } @@ -4872,34 +6492,59 @@ export interface AggregationsTTestAggregate extends AggregationsAggregateBase { } export interface AggregationsTTestAggregation { + /** Test population A. */ a?: AggregationsTestPopulation + /** Test population B. */ b?: AggregationsTestPopulation + /** The type of test. */ type?: AggregationsTTestType } export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' +export interface AggregationsTermAggregationRange extends AggregationsAggregationRangeBase { +} + export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { doc_count_error_upper_bound?: long sum_other_doc_count?: long } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { + /** Determines how child aggregations should be calculated: breadth-first or depth-first. */ collect_mode?: AggregationsTermsAggregationCollectMode + /** Values to exclude. + * Accepts regular expressions and partitions. */ exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return terms. */ field?: Field + /** Values to include. + * Accepts regular expressions and partitions. */ include?: AggregationsTermsInclude + /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing missing_order?: AggregationsMissingOrder missing_bucket?: boolean + /** Coerced unmapped fields into the specified type. */ value_type?: string + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder - script?: Script | string + script?: Script | ScriptSource + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. */ show_term_doc_count_error?: boolean + /** The number of buckets returned out of the overall terms list. */ size?: integer format?: string } @@ -4917,13 +6562,17 @@ export type AggregationsTermsExclude = string | string[] export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition export interface AggregationsTermsPartition { + /** The number of partitions. */ num_partitions: long + /** The partition number for this request. */ partition: long } export interface AggregationsTestPopulation { + /** The field to aggregate. */ field: Field - script?: Script | string + script?: Script | ScriptSource + /** A filter used to define a set of records to run unpaired t-test on. */ filter?: QueryDslQueryContainer } @@ -4931,7 +6580,9 @@ export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucket } export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { + /** The maximum number of results to return. */ size?: integer + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean } @@ -4946,18 +6597,33 @@ export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { + /** Fields for which to return doc values. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** If `true`, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] + /** Starting document offset. */ from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. */ highlight?: SearchHighlight + /** Returns the result of one or more script evaluations for each hit. */ script_fields?: Record + /** The maximum number of top matching hits to return per bucket. */ size?: integer + /** Sort order of the top matching hits. + * By default, the hits are sorted by the score of the main query. */ sort?: Sort + /** Selects the fields of the source that are returned. */ _source?: SearchSourceConfig + /** Returns values for the specified stored fields (fields that use the `store` mapping option). */ stored_fields?: Fields + /** If `true`, calculates and returns document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If `true`, returns document version as part of a hit. */ version?: boolean + /** If `true`, returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean } @@ -4971,12 +6637,16 @@ export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBa } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { + /** The fields of the top document to return. */ metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] + /** The number of top documents from which to return metrics. */ size?: integer + /** The sort order of the documents. */ sort?: Sort } export interface AggregationsTopMetricsValue { + /** A field to return as a metric. */ field: Field } @@ -4994,6 +6664,9 @@ export interface AggregationsUnmappedSignificantTermsAggregate extends Aggregati export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { } +export interface AggregationsUntypedAggregationRange extends AggregationsAggregationRangeBase { +} + export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { } @@ -5006,11 +6679,17 @@ export interface AggregationsVariableWidthHistogramAggregate extends Aggregation } export interface AggregationsVariableWidthHistogramAggregation { + /** The name of the field. */ field?: Field + /** The target number of buckets. */ buckets?: integer + /** The number of buckets that the coordinating node will request from each shard. + * Defaults to `buckets * 50`. */ shard_size?: integer + /** Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. + * Defaults to `min(10 * shard_size, 50000)`. */ initial_buffer?: integer - script?: Script | string + script?: Script | ScriptSource } export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { @@ -5025,16 +6704,21 @@ export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidth & { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsWeightedAverageAggregation { + /** A numeric response formatter. */ format?: string + /** Configuration for the field that provides the values. */ value?: AggregationsWeightedAverageValue value_type?: AggregationsValueType + /** Configuration for the field or script that provides the weights. */ weight?: AggregationsWeightedAverageValue } export interface AggregationsWeightedAverageValue { + /** The field from which to extract the values or weights. */ field?: Field + /** A value or weight to use if the field is missing. */ missing?: double - script?: Script | string + script?: Script | ScriptSource } export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { @@ -5147,7 +6831,7 @@ export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilter export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { type: 'condition' filter: string[] - script: Script | string + script: Script | ScriptSource } export interface AnalysisCustomAnalyzer { @@ -5238,9 +6922,16 @@ export interface AnalysisEstonianAnalyzer { export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' version?: VersionString + /** The maximum token size to emit. Tokens larger than this size will be discarded. + * Defaults to `255` */ max_output_size?: integer + /** The character to use to concatenate the terms. + * Defaults to a space. */ separator?: string + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5590,10 +7281,18 @@ export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { export interface AnalysisPatternAnalyzer { type: 'pattern' version?: VersionString + /** Java regular expression flags. Flags should be pipe-separated, eg "CASE_INSENSITIVE|COMMENTS". */ flags?: string + /** Should terms be lowercased or not. + * Defaults to `true`. */ lowercase?: boolean + /** A Java regular expression. + * Defaults to `\W+`. */ pattern?: string + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5662,7 +7361,7 @@ export interface AnalysisPortugueseAnalyzer { export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' - script: Script | string + script: Script | ScriptSource } export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { @@ -5749,8 +7448,13 @@ export interface AnalysisSpanishAnalyzer { export interface AnalysisStandardAnalyzer { type: 'standard' + /** The maximum token length. If a token is seen that exceeds this length then it is split at `max_token_length` intervals. + * Defaults to `255`. */ max_token_length?: integer + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5768,13 +7472,17 @@ export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterB export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer' language?: string + /** @alias language */ name?: string } export interface AnalysisStopAnalyzer { type: 'stop' version?: VersionString + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5954,6 +7662,12 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { fielddata?: IndicesNumericFielddata index?: boolean null_value?: boolean + ignore_malformed?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ + time_series_dimension?: boolean type: 'boolean' } @@ -6000,7 +7714,7 @@ export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean - script?: Script | string + script?: Script | ScriptSource on_script_error?: MappingOnScriptError null_value?: DateTime precision_step?: integer @@ -6013,7 +7727,7 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean - script?: Script | string + script?: Script | ScriptSource on_script_error?: MappingOnScriptError null_value?: DateTime precision_step?: integer @@ -6029,20 +7743,68 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' export interface MappingDenseVectorIndexOptions { + /** The confidence interval to use when quantizing the vectors. Can be any value between and including `0.90` and + * `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic quantiles should be calculated for + * optimized quantization. When between `0.90` and `1.0`, this value restricts the values used when calculating + * the quantization thresholds. + * + * For example, a value of `0.95` will only use the middle `95%` of the values when calculating the quantization + * thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). + * + * Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` for dynamic quantile calculation. + * + * Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` index types. */ confidence_interval?: float + /** The number of candidates to track while assembling the list of nearest neighbors for each new node. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ ef_construction?: integer + /** The number of neighbors each node will be connected to in the HNSW graph. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ m?: integer + /** The type of kNN algorithm to use. */ type: MappingDenseVectorIndexOptionsType + /** The rescore vector options. This is only applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, `int4_flat`, and `int8_flat` index types. */ + rescore_vector?: MappingDenseVectorIndexOptionsRescoreVector } -export type MappingDenseVectorIndexOptionsType = 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' +export interface MappingDenseVectorIndexOptionsRescoreVector { + /** The oversampling factor to use when searching for the nearest neighbor. This is only applicable to the quantized formats: `bbq_*`, `int4_*`, and `int8_*`. + * When provided, `oversample * k` vectors will be gathered and then their scores will be re-computed with the original vectors. + * + * valid values are between `1.0` and `10.0` (inclusive), or `0` exactly to disable oversampling. */ + oversample: float +} + +export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' + /** Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of + * the first vector added to the field. */ dims?: integer + /** The data type used to encode vectors. The supported data types are `float` (default), `byte`, and `bit`. */ element_type?: MappingDenseVectorElementType + /** If `true`, you can search this field using the kNN search API. */ index?: boolean + /** An optional section that configures the kNN indexing algorithm. The HNSW algorithm has two internal parameters + * that influence how the data structure is built. These can be adjusted to improve the accuracy of results, at the + * expense of slower indexing speed. + * + * This parameter can only be specified when `index` is `true`. */ index_options?: MappingDenseVectorIndexOptions + /** The vector similarity metric to use in kNN search. + * + * Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will + * be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds + * to a higher ranking. + * + * Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to `cosine`. + * + * `bit` vectors only support `l2_norm` as their similarity metric. + * + * This parameter can only be specified when `index` is `true`. */ similarity?: MappingDenseVectorSimilarity } @@ -6069,7 +7831,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { null_value?: FieldValue boost?: double coerce?: boolean - script?: Script | string + script?: Script | ScriptSource on_script_error?: MappingOnScriptError ignore_malformed?: boolean time_series_metric?: MappingTimeSeriesMetricType @@ -6147,7 +7909,7 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { null_value?: GeoLocation index?: boolean on_script_error?: MappingOnScriptError - script?: Script | string + script?: Script | ScriptSource type: 'geo_point' } @@ -6177,7 +7939,9 @@ export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBas type: 'icu_collation_keyword' norms?: boolean index_options?: MappingIndexOptions + /** Should the field be searchable? */ index?: boolean + /** Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. */ null_value?: string rules?: string language?: string @@ -6214,7 +7978,9 @@ export interface MappingIpProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean null_value?: string on_script_error?: MappingOnScriptError - script?: Script | string + script?: Script | ScriptSource + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean type: 'ip' } @@ -6234,13 +8000,15 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { eager_global_ordinals?: boolean index?: boolean index_options?: MappingIndexOptions - script?: Script | string + script?: Script | ScriptSource on_script_error?: MappingOnScriptError normalizer?: string norms?: boolean null_value?: string similarity?: string | null split_queries_on_whitespace?: boolean + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean type: 'keyword' } @@ -6256,8 +8024,13 @@ export interface MappingLongRangeProperty extends MappingRangePropertyBase { export interface MappingMatchOnlyTextProperty { type: 'match_only_text' + /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one + * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ fields?: Record + /** Metadata about the field. */ meta?: Record + /** Allows you to copy the values of multiple fields into a group + * field, which can then be queried as a single field. */ copy_to?: Fields } @@ -6280,8 +8053,12 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase ignore_malformed?: boolean index?: boolean on_script_error?: MappingOnScriptError - script?: Script | string + script?: Script | ScriptSource + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_metric?: MappingTimeSeriesMetricType + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean } @@ -6314,6 +8091,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { + /** Metadata about the field. */ meta?: Record properties?: Record ignore_above?: integer @@ -6343,13 +8121,21 @@ export interface MappingRoutingField { } export interface MappingRuntimeField { + /** For type `composite` */ fields?: Record + /** For type `lookup` */ fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] + /** A custom format for `date` type runtime fields. */ format?: string + /** For type `lookup` */ input_field?: Field + /** For type `lookup` */ target_field?: Field + /** For type `lookup` */ target_index?: IndexName - script?: Script | string + /** Painless script executed at query time. */ + script?: Script | ScriptSource + /** Field type, which can be: `boolean`, `composite`, `date`, `double`, `geo_point`, `ip`,`keyword`, `long`, or `lookup`. */ type: MappingRuntimeFieldType } @@ -6384,7 +8170,13 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase export interface MappingSemanticTextProperty { type: 'semantic_text' meta?: Record + /** Inference endpoint that will be used to generate embeddings for the field. + * This parameter cannot be updated. Use the Create inference API to create the endpoint. + * If `search_inference_id` is specified, the inference endpoint will only be used at index time. */ inference_id?: Id + /** Inference endpoint that will be used to generate embeddings at query time. + * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. + * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */ search_inference_id?: Id } @@ -6503,16 +8295,26 @@ export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { } export interface QueryDslBoolQuery extends QueryDslQueryBase { + /** The clause (query) must appear in matching documents. + * However, unlike `must`, the score of the query will be ignored. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Specifies the number or percentage of `should` clauses returned documents must match. */ minimum_should_match?: MinimumShouldMatch + /** The clause (query) must appear in matching documents and will contribute to the score. */ must?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) must not appear in the matching documents. + * Because scoring is ignored, a score of `0` is returned for all documents. */ must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) should appear in the matching document. */ should?: QueryDslQueryContainer | QueryDslQueryContainer[] } export interface QueryDslBoostingQuery extends QueryDslQueryBase { + /** Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. */ negative_boost: double + /** Query used to decrease the relevance score of matching documents. */ negative: QueryDslQueryContainer + /** Any returned documents must match this query. */ positive: QueryDslQueryContainer } @@ -6521,11 +8323,18 @@ export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' export type QueryDslCombinedFieldsOperator = 'or' | 'and' export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { + /** List of fields to search. Field wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. */ fields: Field[] + /** Text to search for in the provided `fields`. + * The `combined_fields` query analyzes the provided text before performing a search. */ query: string + /** If true, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslCombinedFieldsOperator + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslCombinedFieldsZeroTerms } @@ -6541,6 +8350,9 @@ export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { } export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { + /** Filter query you wish to run. Any returned documents must match this query. + * Filter queries do not calculate relevance scores. + * To speed up performance, Elasticsearch automatically caches frequently used filter queries. */ filter: QueryDslQueryContainer } @@ -6553,70 +8365,111 @@ export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatur } export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone } export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction export interface QueryDslDecayFunctionBase { + /** Determines how the distance is calculated when a field used for computing the decay contains multiple values. */ multi_value_mode?: QueryDslMultiValueMode } export interface QueryDslDecayPlacement { + /** Defines how documents are scored at the distance given at scale. */ decay?: double + /** If defined, the decay function will only compute the decay function for documents with a distance greater than the defined `offset`. */ offset?: TScale + /** Defines the distance from origin + offset at which the computed score will equal `decay` parameter. */ scale?: TScale + /** The point of origin used for calculating distance. Must be given as a number for numeric field, date for date fields and geo point for geo fields. */ origin?: TOrigin } export interface QueryDslDisMaxQuery extends QueryDslQueryBase { + /** One or more query clauses. + * Returned documents must match one or more of these queries. + * If a document matches multiple queries, Elasticsearch uses the highest relevance score. */ queries: QueryDslQueryContainer[] + /** Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. */ tie_breaker?: double } export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { + /** Date or point of origin used to calculate distances. + * If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. + * Date Math, such as `now-1h`, is supported. + * If the field value is a `geo_point` field, the `origin` value must be a geopoint. */ origin: TOrigin + /** Distance from the `origin` at which relevance scores receive half of the `boost` value. + * If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. */ pivot: TDistance + /** Name of the field used to calculate distances. This field must meet the following criteria: + * be a `date`, `date_nanos` or `geo_point` field; + * have an `index` mapping parameter value of `true`, which is the default; + * have an `doc_values` mapping parameter value of `true`, which is the default. */ field: Field } export interface QueryDslExistsQuery extends QueryDslQueryBase { + /** Name of the field you wish to search. */ field: Field } export interface QueryDslFieldAndFormat { + /** A wildcard pattern. The request returns values for field names matching this pattern. */ field: Field + /** The format in which the values are returned. */ format?: string include_unmapped?: boolean } export interface QueryDslFieldLookup { + /** `id` of the document. */ id: Id + /** Index from which to retrieve the document. */ index?: IndexName + /** Name of the field. */ path?: Field + /** Custom routing value. */ routing?: Routing } export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' export interface QueryDslFieldValueFactorScoreFunction { + /** Field to be extracted from the document. */ field: Field + /** Optional factor to multiply the field value with. */ factor?: double + /** Value used if the document doesn’t have that field. + * The modifier and factor are still applied to it as though it were read from the document. */ missing?: double + /** Modifier to apply to the field value. */ modifier?: QueryDslFieldValueFactorModifier } export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' export interface QueryDslFunctionScoreContainer { + /** Function that scores a document with a exponential decay, depending on the distance of a numeric field value of the document from an origin. */ exp?: QueryDslDecayFunction + /** Function that scores a document with a normal decay, depending on the distance of a numeric field value of the document from an origin. */ gauss?: QueryDslDecayFunction + /** Function that scores a document with a linear decay, depending on the distance of a numeric field value of the document from an origin. */ linear?: QueryDslDecayFunction + /** Function allows you to use a field from a document to influence the score. + * It’s similar to using the script_score function, however, it avoids the overhead of scripting. */ field_value_factor?: QueryDslFieldValueFactorScoreFunction + /** Generates scores that are uniformly distributed from 0 up to but not including 1. + * In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. */ random_score?: QueryDslRandomScoreFunction + /** Enables you to wrap another query and customize the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. */ script_score?: QueryDslScriptScoreFunction filter?: QueryDslQueryContainer weight?: double @@ -6625,26 +8478,42 @@ export interface QueryDslFunctionScoreContainer { export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { + /** Defines how he newly computed score is combined with the score of the query */ boost_mode?: QueryDslFunctionBoostMode + /** One or more functions that compute a new score for each document returned by the query. */ functions?: QueryDslFunctionScoreContainer[] + /** Restricts the new score to not exceed the provided limit. */ max_boost?: double + /** Excludes documents that do not meet the provided score threshold. */ min_score?: double + /** A query that determines the documents for which a new score is computed. */ query?: QueryDslQueryContainer + /** Specifies how the computed scores are combined */ score_mode?: QueryDslFunctionScoreMode } export interface QueryDslFuzzyQuery extends QueryDslQueryBase { + /** Maximum number of variations created. */ max_expansions?: integer + /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer + /** Number of beginning characters left unchanged when creating expansions. */ rewrite?: MultiTermQueryRewrite + /** Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). */ transpositions?: boolean + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Term you wish to find in the provided field. */ value: string | double | boolean } export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { type?: QueryDslGeoExecution + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys @@ -6659,9 +8528,17 @@ export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeature } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { + /** The radius of the circle centred on the specified location. + * Points which fall into this circle are considered to be matches. */ distance: Distance + /** How to compute the distance. + * Set to `plane` for a faster calculation that's inaccurate on long distances and close to the poles. */ distance_type?: GeoDistanceType + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys @@ -6688,11 +8565,15 @@ export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys export interface QueryDslGeoShapeFieldQuery { shape?: GeoShape + /** Query using an indexed shape retrieved from the the specified document and path. */ indexed_shape?: QueryDslFieldLookup + /** Spatial relation operator used to search a geo field. */ relation?: GeoShapeRelation } export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys @@ -6701,106 +8582,181 @@ export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' export interface QueryDslHasChildQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Maximum number of child documents that match the query allowed for a returned parent document. + * If the parent document exceeds this limit, it is excluded from the search results. */ max_children?: integer + /** Minimum number of child documents that match the query required to match the query for a returned parent document. + * If the parent document does not meet this limit, it is excluded from the search results. */ min_children?: integer + /** Query you wish to run on child documents of the `type` field. + * If a child document matches the search, the query returns the parent document. */ query: QueryDslQueryContainer + /** Indicates how scores for matching child documents affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode + /** Name of the child relationship mapped for the `join` field. */ type: RelationName } export interface QueryDslHasParentQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. + * You can use this parameter to query multiple indices that may not contain the `parent_type`. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Name of the parent relationship mapped for the `join` field. */ parent_type: RelationName + /** Query you wish to run on parent documents of the `parent_type` field. + * If a parent document matches the search, the query returns its child documents. */ query: QueryDslQueryContainer + /** Indicates whether the relevance score of a matching parent document is aggregated into its child documents. */ score?: boolean } export interface QueryDslIdsQuery extends QueryDslQueryBase { + /** An array of document IDs. */ values?: Ids } export interface QueryDslIntervalsAllOf { + /** An array of rules to combine. All rules must produce a match in a document for the overall source to match. */ intervals: QueryDslIntervalsContainer[] + /** Maximum number of positions between the matching terms. + * Intervals produced by the rules further apart than this are not considered matches. */ max_gaps?: integer + /** If `true`, intervals produced by the rules should appear in the order in which they are specified. */ ordered?: boolean + /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsAnyOf { + /** An array of rules to match. */ intervals: QueryDslIntervalsContainer[] + /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsContainer { + /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf + /** Matches analyzed text. */ fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } export interface QueryDslIntervalsFilter { + /** Query used to return intervals that follow an interval from the `filter` rule. */ after?: QueryDslIntervalsContainer + /** Query used to return intervals that occur before an interval from the `filter` rule. */ before?: QueryDslIntervalsContainer + /** Query used to return intervals contained by an interval from the `filter` rule. */ contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that contain an interval from the `filter` rule. */ containing?: QueryDslIntervalsContainer + /** Query used to return intervals that are **not** contained by an interval from the `filter` rule. */ not_contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** contain an interval from the `filter` rule. */ not_containing?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** overlap with an interval from the `filter` rule. */ not_overlapping?: QueryDslIntervalsContainer + /** Query used to return intervals that overlap with an interval from the `filter` rule. */ overlapping?: QueryDslIntervalsContainer - script?: Script | string + /** Script used to return matching documents. + * This script must return a boolean value: `true` or `false`. */ + script?: Script | ScriptSource } export interface QueryDslIntervalsFuzzy { + /** Analyzer used to normalize the term. */ analyzer?: string + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer + /** The term to match. */ term: string + /** Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). */ transpositions?: boolean + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsMatch { + /** Analyzer used to analyze terms in the query. */ analyzer?: string + /** Maximum number of positions between the matching terms. + * Terms further apart than this are not considered matches. */ max_gaps?: integer + /** If `true`, matching terms must appear in their specified order. */ ordered?: boolean + /** Text you wish to find in the provided field. */ query: string + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field + /** An optional interval filter. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsPrefix { + /** Analyzer used to analyze the `prefix`. */ analyzer?: string + /** Beginning characters of terms you wish to find in the top-level field. */ prefix: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsQuery extends QueryDslQueryBase { + /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf + /** Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. */ fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } export interface QueryDslIntervalsWildcard { + /** Analyzer used to analyze the `pattern`. + * Defaults to the top-level field's analyzer. */ analyzer?: string + /** Wildcard pattern used to find matching terms. */ pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { + /** A document not present in the index. */ doc?: any fields?: Field[] + /** ID of a document. */ _id?: Id + /** Index of a document. */ _index?: IndexName + /** Overrides the default analyzer. */ per_field_analyzer?: Record routing?: Routing version?: VersionNumber @@ -6811,14 +8767,31 @@ export interface QueryDslMatchAllQuery extends QueryDslQueryBase { } export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** Maximum edit distance allowed for matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean + /** Maximum number of terms to which the query will expand. + * Can be applied to the term subqueries constructed for all terms but the final term. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. + * Applied to the constructed bool query. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. + * Applied to the constructed bool query. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ prefix_length?: integer + /** Terms you wish to find in the provided field. + * The last term is used in a prefix query. */ query: string } @@ -6826,84 +8799,149 @@ export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { } export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query value into tokens. */ analyzer?: string + /** Maximum number of terms to which the last provided term of the query value will expand. */ max_expansions?: integer + /** Text you wish to find in the provided field. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** Query terms that are analyzed and turned into a phrase query. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean cutoff_frequency?: double + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of terms to which the query will expand. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ query: string | float | boolean + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { + /** The analyzer that is used to analyze the free form text. + * Defaults to the analyzer associated with the first field in fields. */ analyzer?: string + /** Each term in the formed query could be further boosted by their tf-idf score. + * This sets the boost factor to use when using this feature. + * Defaults to deactivated (0). */ boost_terms?: double + /** Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). */ fail_on_unsupported_field?: boolean + /** A list of fields to fetch and analyze the text from. + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ fields?: Field[] + /** Specifies whether the input documents should also be included in the search results returned. */ include?: boolean + /** Specifies free form text and/or a single or multiple documents for which you want to find similar documents. */ like: QueryDslLike | QueryDslLike[] + /** The maximum document frequency above which the terms are ignored from the input document. */ max_doc_freq?: integer + /** The maximum number of query terms that can be selected. */ max_query_terms?: integer + /** The maximum word length above which the terms are ignored. + * Defaults to unbounded (`0`). */ max_word_length?: integer + /** The minimum document frequency below which the terms are ignored from the input document. */ min_doc_freq?: integer + /** After the disjunctive query has been formed, this parameter controls the number of terms that must match. */ minimum_should_match?: MinimumShouldMatch + /** The minimum term frequency below which the terms are ignored from the input document. */ min_term_freq?: integer + /** The minimum word length below which the terms are ignored. */ min_word_length?: integer routing?: Routing + /** An array of stop words. + * Any word in this set is ignored. */ stop_words?: AnalysisStopWords + /** Used in combination with `like` to exclude documents that match a set of terms. */ unlike?: QueryDslLike | QueryDslLike[] version?: VersionNumber version_type?: VersionType } export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean cutoff_frequency?: double + /** The fields to be queried. + * Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. */ fields?: Fields + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of terms to which the query will expand. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Determines how scores for each per-term blended query and scores across groups are combined. */ tie_breaker?: double + /** How `the` multi_match query is executed internally. */ type?: QueryDslTextQueryType + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' export interface QueryDslNestedQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped path and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Path to the nested object you wish to search. */ path: Field + /** Query you wish to run on nested objects in the path. */ query: QueryDslQueryContainer + /** How scores for matching child objects affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode } @@ -6918,134 +8956,262 @@ export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' export interface QueryDslParentIdQuery extends QueryDslQueryBase { + /** ID of the parent document. */ id?: Id + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** Name of the child relationship mapped for the `join` field. */ type?: RelationName } export interface QueryDslPercolateQuery extends QueryDslQueryBase { + /** The source of the document being percolated. */ document?: any + /** An array of sources of the documents being percolated. */ documents?: any[] + /** Field that holds the indexed queries. The field must use the `percolator` mapping type. */ field: Field + /** The ID of a stored document to percolate. */ id?: Id + /** The index of a stored document to percolate. */ index?: IndexName + /** The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. */ name?: string + /** Preference used to fetch document to percolate. */ preference?: string + /** Routing used to fetch document to percolate. */ routing?: Routing + /** The expected version of a stored document to percolate. */ version?: VersionNumber } export interface QueryDslPinnedDoc { + /** The unique document ID. */ _id: Id + /** The index that contains the document. */ _index?: IndexName } export interface QueryDslPinnedQuery extends QueryDslQueryBase { + /** Any choice of query used to rank documents which will be ranked below the "pinned" documents. */ organic: QueryDslQueryContainer + /** Document IDs listed in the order they are to appear in results. + * Required if `docs` is not specified. */ ids?: Id[] + /** Documents listed in the order they are to appear in results. + * Required if `ids` is not specified. */ docs?: QueryDslPinnedDoc[] } export interface QueryDslPrefixQuery extends QueryDslQueryBase { + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Beginning characters of terms you wish to find in the provided field. */ value: string + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean } export interface QueryDslQueryBase { + /** Floating point number used to decrease or increase the relevance scores of the query. + * Boost values are relative to the default value of 1.0. + * A boost value between 0 and 1.0 decreases the relevance score. + * A value greater than 1.0 increases the relevance score. */ boost?: float _name?: string } export interface QueryDslQueryContainer { + /** matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. */ boosting?: QueryDslBoostingQuery common?: Partial> + /** The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. */ combined_fields?: QueryDslCombinedFieldsQuery + /** Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. */ constant_score?: QueryDslConstantScoreQuery + /** Returns documents matching one or more wrapped queries, called query clauses or clauses. + * If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. */ dis_max?: QueryDslDisMaxQuery + /** Boosts the relevance score of documents closer to a provided origin date or point. + * For example, you can use this query to give more weight to documents closer to a certain date or location. */ distance_feature?: QueryDslDistanceFeatureQuery + /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** The `function_score` enables you to modify the score of documents that are retrieved by a query. */ function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] + /** Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. */ fuzzy?: Partial> + /** Matches geo_point and geo_shape values that intersect a bounding box. */ geo_bounding_box?: QueryDslGeoBoundingBoxQuery + /** Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. */ geo_distance?: QueryDslGeoDistanceQuery + /** Matches `geo_point` and `geo_shape` values that intersect a grid cell from a GeoGrid aggregation. */ geo_grid?: Partial> geo_polygon?: QueryDslGeoPolygonQuery + /** Filter documents indexed using either the `geo_shape` or the `geo_point` type. */ geo_shape?: QueryDslGeoShapeQuery + /** Returns parent documents whose joined child documents match a provided query. */ has_child?: QueryDslHasChildQuery + /** Returns child documents whose joined parent document matches a provided query. */ has_parent?: QueryDslHasParentQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns documents based on the order and proximity of matching terms. */ intervals?: Partial> + /** Finds the k nearest vectors to a query vector, as measured by a similarity + * metric. knn query finds nearest vectors through approximate search on indexed + * dense_vectors. */ knn?: KnnQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Analyzes its input and constructs a `bool` query from the terms. + * Each term except the last is used in a `term` query. + * The last term is used in a prefix query. */ match_bool_prefix?: Partial> + /** Matches no documents. */ match_none?: QueryDslMatchNoneQuery + /** Analyzes the text and creates a phrase query out of the analyzed text. */ match_phrase?: Partial> + /** Returns documents that contain the words of a provided text, in the same order as provided. + * The last term of the provided text is treated as a prefix, matching any words that begin with that term. */ match_phrase_prefix?: Partial> + /** Returns documents that are "like" a given set of documents. */ more_like_this?: QueryDslMoreLikeThisQuery + /** Enables you to search for a provided text, number, date or boolean value across multiple fields. + * The provided text is analyzed before matching. */ multi_match?: QueryDslMultiMatchQuery + /** Wraps another query to search nested fields. + * If an object matches the search, the nested query returns the root parent document. */ nested?: QueryDslNestedQuery + /** Returns child documents joined to a specific parent document. */ parent_id?: QueryDslParentIdQuery + /** Matches queries stored in an index. */ percolate?: QueryDslPercolateQuery + /** Promotes selected documents to rank higher than those matching a given query. */ pinned?: QueryDslPinnedQuery + /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns documents based on a provided query string, using a parser with a strict syntax. */ query_string?: QueryDslQueryStringQuery + /** Returns documents that contain terms within a provided range. */ range?: Partial> + /** Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. */ rank_feature?: QueryDslRankFeatureQuery + /** Returns documents that contain terms matching a regular expression. */ regexp?: Partial> rule?: QueryDslRuleQuery + /** Filters documents based on a provided script. + * The script query is typically used in a filter context. */ script?: QueryDslScriptQuery + /** Uses a script to provide a custom score for returned documents. */ script_score?: QueryDslScriptScoreQuery + /** A semantic query to semantic_text field types */ semantic?: QueryDslSemanticQuery + /** Queries documents that contain fields indexed using the `shape` type. */ shape?: QueryDslShapeQuery + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns matches which enclose another span query. */ span_containing?: QueryDslSpanContainingQuery + /** Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. */ span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Matches spans near the beginning of a field. */ span_first?: QueryDslSpanFirstQuery + /** Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. */ span_multi?: QueryDslSpanMultiTermQuery + /** Matches spans which are near one another. + * You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. */ span_near?: QueryDslSpanNearQuery + /** Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. */ span_not?: QueryDslSpanNotQuery + /** Matches the union of its span clauses. */ span_or?: QueryDslSpanOrQuery + /** Matches spans containing a term. */ span_term?: Partial> + /** Returns matches which are enclosed inside another span query. */ span_within?: QueryDslSpanWithinQuery + /** Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. */ sparse_vector?: QueryDslSparseVectorQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns documents that contain a minimum number of exact terms in a provided field. + * To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. */ terms_set?: Partial> + /** Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. */ text_expansion?: Partial> + /** Supports returning text_expansion query results by sending in precomputed tokens with the query. */ weighted_tokens?: Partial> + /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial> + /** A query that accepts any other query as base64 encoded string. */ wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery } export interface QueryDslQueryStringQuery extends QueryDslQueryBase { + /** If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. */ allow_leading_wildcard?: boolean + /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean + /** Default field to search if no field is provided in the query string. + * Supports wildcards (`*`). + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ default_field?: Field + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator + /** If `true`, enable position increments in queries constructed from a `query_string` search. */ enable_position_increments?: boolean escape?: boolean + /** Array of fields to search. Supports wildcards (`*`). */ fields?: Field[] + /** Maximum edit distance allowed for fuzzy matching. */ fuzziness?: Fuzziness + /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Maximum number of positions allowed between matching tokens for phrases. */ phrase_slop?: double + /** Query string you wish to parse and use for search. */ query: string + /** Analyzer used to convert quoted text in the query string into tokens. + * For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. */ quote_analyzer?: string + /** Suffix appended to quoted text in the query string. + * You can use this suffix to use a different analysis method for exact matches. */ quote_field_suffix?: string + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** How to combine the queries generated from the individual search terms in the resulting `dis_max` query. */ tie_breaker?: double + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. */ time_zone?: TimeZone + /** Determines how the query matches and scores documents. */ type?: QueryDslTextQueryType } @@ -7057,10 +9223,15 @@ export interface QueryDslRandomScoreFunction { export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery export interface QueryDslRangeQueryBase extends QueryDslQueryBase { + /** Indicates how the range query matches values for `range` fields. */ relation?: QueryDslRangeRelation + /** Greater than. */ gt?: T + /** Greater than or equal to. */ gte?: T + /** Less than. */ lt?: T + /** Less than or equal to. */ lte?: T from?: T | null to?: T | null @@ -7075,31 +9246,46 @@ export interface QueryDslRankFeatureFunctionLinear { } export interface QueryDslRankFeatureFunctionLogarithm { + /** Configurable scaling factor. */ scaling_factor: float } export interface QueryDslRankFeatureFunctionSaturation { + /** Configurable pivot value so that the result will be less than 0.5. */ pivot?: float } export interface QueryDslRankFeatureFunctionSigmoid { + /** Configurable pivot value so that the result will be less than 0.5. */ pivot: float + /** Configurable Exponent. */ exponent: float } export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { + /** `rank_feature` or `rank_features` field used to boost relevance scores. */ field: Field + /** Saturation function used to boost relevance scores based on the value of the rank feature `field`. */ saturation?: QueryDslRankFeatureFunctionSaturation + /** Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. */ log?: QueryDslRankFeatureFunctionLogarithm + /** Linear function used to boost relevance scores based on the value of the rank feature `field`. */ linear?: QueryDslRankFeatureFunctionLinear + /** Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. */ sigmoid?: QueryDslRankFeatureFunctionSigmoid } export interface QueryDslRegexpQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. + * When `false`, case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean + /** Enables optional operators for the regular expression. */ flags?: string + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Regular expression for terms you wish to find in the provided field. */ value: string } @@ -7110,31 +9296,44 @@ export interface QueryDslRuleQuery extends QueryDslQueryBase { } export interface QueryDslScriptQuery extends QueryDslQueryBase { - script: Script | string + /** Contains a script to run as a query. + * This script must return a boolean value, `true` or `false`. */ + script: Script | ScriptSource } export interface QueryDslScriptScoreFunction { - script: Script | string + /** A script that computes a score. */ + script: Script | ScriptSource } export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { + /** Documents with a score lower than this floating point number are excluded from the search results. */ min_score?: float + /** Query used to return documents. */ query: QueryDslQueryContainer - script: Script | string + /** Script used to compute the score of documents returned by the query. + * Important: final relevance scores from the `script_score` query cannot be negative. */ + script: Script | ScriptSource } export interface QueryDslSemanticQuery extends QueryDslQueryBase { + /** The field to query, which must be a semantic_text field type */ field: string + /** The query text */ query: string } export interface QueryDslShapeFieldQuery { + /** Queries using a pre-indexed shape. */ indexed_shape?: QueryDslFieldLookup + /** Spatial relation between the query shape and the document shape. */ relation?: GeoShapeRelation + /** Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) format. */ shape?: GeoShape } export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { + /** When set to `true` the query ignores an unmapped field and will not match any documents. */ ignore_unmapped?: boolean } export type QueryDslShapeQuery = QueryDslShapeQueryKeys @@ -7145,23 +9344,43 @@ export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PRE export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean + /** If `true`, the parser creates a match_phrase query for each multi-position token. */ auto_generate_synonyms_phrase_query?: boolean + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator + /** Array of fields you wish to search. + * Accepts wildcard expressions. + * You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. + * Defaults to the `index.query.default_field index` setting, which has a default value of `*`. */ fields?: Field[] + /** List of enabled operators for the simple query string syntax. */ flags?: QueryDslSimpleQueryStringFlags + /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Query string in the simple query string syntax you wish to parse and use for search. */ query: string + /** Suffix appended to quoted text in the query string. */ quote_field_suffix?: string } export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ little: QueryDslSpanQuery } @@ -7171,68 +9390,118 @@ export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { } export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { + /** Controls the maximum end position permitted in a match. */ end: integer + /** Can be any other span type query. */ match: QueryDslSpanQuery } export type QueryDslSpanGapQuery = Partial> export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { + /** Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). */ match: QueryDslQueryContainer } export interface QueryDslSpanNearQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[] + /** Controls whether matches are required to be in-order. */ in_order?: boolean + /** Controls the maximum number of intervening unmatched positions permitted. */ slop?: integer } export interface QueryDslSpanNotQuery extends QueryDslQueryBase { + /** The number of tokens from within the include span that can’t have overlap with the exclude span. + * Equivalent to setting both `pre` and `post`. */ dist?: integer + /** Span query whose matches must not overlap those returned. */ exclude: QueryDslSpanQuery + /** Span query whose matches are filtered. */ include: QueryDslSpanQuery + /** The number of tokens after the include span that can’t have overlap with the exclude span. */ post?: integer + /** The number of tokens before the include span that can’t have overlap with the exclude span. */ pre?: integer } export interface QueryDslSpanOrQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[] } export interface QueryDslSpanQuery { + /** Accepts a list of span queries, but only returns those spans which also match a second span query. */ span_containing?: QueryDslSpanContainingQuery + /** Allows queries like `span_near` or `span_or` across different fields. */ span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Accepts another span query whose matches must appear within the first N positions of the field. */ span_first?: QueryDslSpanFirstQuery span_gap?: QueryDslSpanGapQuery + /** Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. */ span_multi?: QueryDslSpanMultiTermQuery + /** Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. */ span_near?: QueryDslSpanNearQuery + /** Wraps another span query, and excludes any documents which match that query. */ span_not?: QueryDslSpanNotQuery + /** Combines multiple span queriesandreturns documents which match any of the specified queries. */ span_or?: QueryDslSpanOrQuery + /** The equivalent of the `term` query but for use with other span queries. */ span_term?: Partial> + /** The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. */ span_within?: QueryDslSpanWithinQuery } export interface QueryDslSpanTermQuery extends QueryDslQueryBase { value: FieldValue + /** @alias value */ term: FieldValue } export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ little: QueryDslSpanQuery } export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { + /** The name of the field that contains the token-weight pairs to be searched against. + * This field must be a mapped sparse_vector field. */ field: Field + /** Dictionary of precomputed sparse vectors and their associated weights. + * Only one of inference_id or query_vector may be supplied in a request. */ query_vector?: Record + /** The inference ID to use to convert the query text into token-weight pairs. + * It must be the same inference ID that was used to create the tokens from the input text. + * Only one of inference_id and query_vector is allowed. + * If inference_id is specified, query must also be specified. + * Only one of inference_id or query_vector may be supplied in a request. */ inference_id?: Id + /** The query text you want to use for search. + * If inference_id is specified, query must also be specified. */ query?: string + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false + * @experimental */ prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. + * @experimental */ pruning_config?: QueryDslTokenPruningConfig } export interface QueryDslTermQuery extends QueryDslQueryBase { + /** Term you wish to find in the provided field. */ value: FieldValue + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * When `false`, the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean } @@ -7254,23 +9523,34 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup export interface QueryDslTermsSetQuery extends QueryDslQueryBase { + /** Specification describing number of matching terms required to return a document. */ minimum_should_match?: MinimumShouldMatch + /** Numeric field containing the number of matching terms required to return a document. */ minimum_should_match_field?: Field - minimum_should_match_script?: Script | string + /** Custom script containing the number of matching terms required to return a document. */ + minimum_should_match_script?: Script | ScriptSource + /** Array of terms you wish to find in the provided field. */ terms: FieldValue[] } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { + /** The text expansion NLP model to use */ model_id: string + /** The query text */ model_text: string + /** Token pruning configurations + * @experimental */ pruning_config?: QueryDslTokenPruningConfig } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' export interface QueryDslTokenPruningConfig { + /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ tokens_freq_ratio_threshold?: integer + /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ tokens_weight_threshold?: float + /** Whether to only score pruned tokens, vs only scoring kept tokens. */ only_score_pruned_tokens?: boolean } @@ -7287,38 +9567,53 @@ export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFea } export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone } export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { + /** The tokens representing this query */ tokens: Record + /** Token pruning configurations */ pruning_config?: QueryDslTokenPruningConfig } export interface QueryDslWildcardQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. */ value?: string + /** Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. */ wildcard?: string } export interface QueryDslWrapperQuery extends QueryDslQueryBase { + /** A base64 encoded query. + * The binary data format can be any of JSON, YAML, CBOR or SMILE encodings */ query: string } export type QueryDslZeroTermsQuery = 'all' | 'none' export interface AsyncSearchAsyncSearch> { + /** Partial aggregations results, coming from the shards that have already completed running the query. */ aggregations?: TAggregations _clusters?: ClusterStatistics fields?: Record hits: SearchHitsMetadata max_score?: double + /** Indicates how many reductions of the results have been performed. + * If this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response. */ num_reduce_phases?: long profile?: SearchProfile pit_id?: Id _scroll_id?: ScrollId + /** Indicates how many shards have run the query. + * Note that in order for shard results to be included in the search response, they need to be reduced first. */ _shards: ShardStatistics suggest?: Record[]> terminated_early?: boolean @@ -7332,18 +9627,27 @@ export interface AsyncSearchAsyncSearchDocumentResponseBase info + * > If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`. */ is_running: boolean + /** Indicates when the async search will expire. */ expiration_time?: DateTime expiration_time_in_millis: EpochTime start_time?: DateTime start_time_in_millis: EpochTime + /** Indicates when the async search completed. + * It is present only when the search has completed. */ completion_time?: DateTime completion_time_in_millis?: EpochTime } export interface AsyncSearchDeleteRequest extends RequestBase { -/** A unique identifier for the async search. */ + /** A unique identifier for the async search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -7354,13 +9658,19 @@ export interface AsyncSearchDeleteRequest extends RequestBase { export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { -/** A unique identifier for the async search. */ + /** A unique identifier for the async search. */ id: Id - /** The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. */ + /** The length of time that the async search should be available in the cluster. + * When not specified, the `keep_alive` set with the corresponding submit async request will be used. + * Otherwise, it is possible to override the value and extend the validity of the request. + * When this period expires, the search, if still running, is cancelled. + * If the search is completed, its saved results are deleted. */ keep_alive?: Duration /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean - /** Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ + /** Specifies to wait for the search to be completed up until the provided timeout. + * Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. + * By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } @@ -7371,9 +9681,10 @@ export interface AsyncSearchGetRequest extends RequestBase { export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AsyncSearchStatusRequest extends RequestBase { -/** A unique identifier for the async search. */ + /** A unique identifier for the async search. */ id: Id - /** The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. */ + /** The length of time that the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, keep_alive?: never } @@ -7384,17 +9695,25 @@ export interface AsyncSearchStatusRequest extends RequestBase { export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { + /** The number of shards that have run the query so far. */ _shards: ShardStatistics + /** Metadata about clusters involved in the cross-cluster search. + * It is not shown for local-only searches. */ _clusters?: ClusterStatistics + /** If the async search completed, this field shows the status code of the search. + * For example, `200` indicates that the async search was successfully completed. + * `503` indicates that the async search was completed with an error. */ completion_status?: integer } export interface AsyncSearchSubmitRequest extends RequestBase { -/** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ + /** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ index?: Indices - /** Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ + /** Blocks and waits until the search is completed up to a certain timeout. + * When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ wait_for_completion_timeout?: Duration - /** Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. */ + /** Specifies how long the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration /** If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. */ keep_on_completion?: boolean @@ -7406,7 +9725,8 @@ export interface AsyncSearchSubmitRequest extends RequestBase { analyzer?: string /** Specify whether wildcard and prefix queries should be analyzed (default: false) */ analyze_wildcard?: boolean - /** Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ + /** Affects how often partial results become available, which happens whenever shard results are reduced. + * A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ batched_reduce_size?: long /** The default value is the only supported value. */ ccs_minimize_roundtrips?: boolean @@ -7423,7 +9743,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { /** Specify whether format-based query failures (such as providing text to a numeric field) should be ignored */ lenient?: boolean /** The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer /** Specify the node or shard the operation should be performed on (default: random) */ preference?: string /** Specify if request cache should be used for this request or not, defaults to true */ @@ -7458,18 +9778,25 @@ export interface AsyncSearchSubmitRequest extends RequestBase { explain?: boolean /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record - /** Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ from?: integer highlight?: SearchHighlight - /** Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. */ + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits /** Boosts the _score of documents from specified indices. */ - indices_boost?: Record[] - /** Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. */ + indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] /** Defines the approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] - /** Minimum _score for matching documents. Documents with a lower _score are not included in the search results. */ + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -7479,32 +9806,48 @@ export interface AsyncSearchSubmitRequest extends RequestBase { /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults - /** The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort - /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig - /** Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester - /** Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. */ + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long - /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean /** If true, returns document version as part of a hit. */ version?: boolean - /** If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. */ + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean - /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields - /** Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. */ + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ pit?: SearchPointInTimeReference - /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } @@ -7516,13 +9859,15 @@ export type AsyncSearchSubmitResponse } export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { -/** the name of the autoscaling policy */ + /** the name of the autoscaling policy */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -7562,7 +9907,8 @@ export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { } export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -7575,9 +9921,10 @@ export interface AutoscalingGetAutoscalingCapacityResponse { } export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { -/** the name of the autoscaling policy */ + /** the name of the autoscaling policy */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -7588,9 +9935,10 @@ export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { -/** the name of the autoscaling policy */ + /** the name of the autoscaling policy */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -7627,35 +9975,69 @@ export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'ch export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] export interface CatAliasesAliasesRecord { + /** alias name */ alias?: string + /** alias name + * @alias alias */ a?: string + /** index alias points to */ index?: IndexName + /** index alias points to + * @alias index */ i?: IndexName + /** index alias points to + * @alias index */ idx?: IndexName + /** filter */ filter?: string + /** filter + * @alias filter */ f?: string + /** filter + * @alias filter */ fi?: string + /** index routing */ 'routing.index'?: string + /** index routing + * @alias 'routing.index' */ ri?: string + /** index routing + * @alias 'routing.index' */ routingIndex?: string + /** search routing */ 'routing.search'?: string + /** search routing + * @alias 'routing.search' */ rs?: string + /** search routing + * @alias 'routing.search' */ routingSearch?: string + /** write index */ is_write_index?: string + /** write index + * @alias is_write_index */ w?: string + /** write index + * @alias is_write_index */ isWriteIndex?: string } export interface CatAliasesRequest extends CatCatRequestBase { -/** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicated that the request should never timeout, you can set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, master_timeout?: never } @@ -7666,51 +10048,127 @@ export interface CatAliasesRequest extends CatCatRequestBase { export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { + /** Number of primary and replica shards assigned to the node. */ shards?: string + /** Number of primary and replica shards assigned to the node. + * @alias shards */ s?: string + /** Amount of shards that are scheduled to be moved elsewhere in the cluster or -1 other than desired balance allocator is used */ 'shards.undesired'?: string | null + /** Sum of index write load forecasts */ 'write_load.forecast'?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ wlf?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ writeLoadForecast?: SpecUtilsStringified | null + /** Sum of shard size forecasts */ 'disk.indices.forecast'?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ dif?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ diskIndicesForecast?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. */ 'disk.indices'?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ di?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ diskIndices?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. */ 'disk.used'?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ du?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ diskUsed?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. */ 'disk.avail'?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ da?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ diskAvail?: ByteSize | null + /** Total disk space for the node, including in-use and available space. */ 'disk.total'?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ dt?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ diskTotal?: ByteSize | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. */ 'disk.percent'?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ dp?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ diskPercent?: Percentage | null + /** Network host for the node. Set using the `network.host` setting. */ host?: Host | null + /** Network host for the node. Set using the `network.host` setting. + * @alias host */ h?: Host | null + /** IP address and port for the node. */ ip?: Ip | null + /** Name for the node. Set using the `node.name` setting. */ node?: string + /** Name for the node. Set using the `node.name` setting. + * @alias node */ n?: string + /** Node roles */ 'node.role'?: string | null + /** Node roles + * @alias 'node.role' */ r?: string | null + /** Node roles + * @alias 'node.role' */ role?: string | null + /** Node roles + * @alias 'node.role' */ nodeRole?: string | null } export interface CatAllocationRequest extends CatCatRequestBase { -/** A comma-separated list of node identifiers or names used to limit the returned information. */ + /** A comma-separated list of node identifiers or names used to limit the returned information. */ node_id?: NodeIds /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -7733,13 +10191,20 @@ export interface CatComponentTemplatesComponentTemplate { } export interface CatComponentTemplatesRequest extends CatCatRequestBase { -/** The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. */ + /** The name of the component template. + * It accepts wildcard expressions. + * If it is omitted, all component templates are returned. */ name?: string /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** The period to wait for a connection to the master node. */ master_timeout?: Duration @@ -7752,25 +10217,48 @@ export interface CatComponentTemplatesRequest extends CatCatRequestBase { export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] export interface CatCountCountRecord { + /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ t?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ time?: SpecUtilsStringified> + /** time in HH:MM:SS */ timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hhmmss?: TimeOfDay + /** the document count */ count?: string + /** the document count + * @alias count */ dc?: string + /** the document count + * @alias count */ 'docs.count'?: string + /** the document count + * @alias count */ docsCount?: string } export interface CatCountRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * It supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never } @@ -7781,25 +10269,40 @@ export interface CatCountRequest extends CatCatRequestBase { export type CatCountResponse = CatCountCountRecord[] export interface CatFielddataFielddataRecord { + /** node id */ id?: string + /** host name */ host?: string + /** host name + * @alias host */ h?: string + /** ip address */ ip?: string + /** node name */ node?: string + /** node name + * @alias node */ n?: string + /** field name */ field?: string + /** field name + * @alias field */ f?: string + /** field data usage */ size?: string } export interface CatFielddataRequest extends CatCatRequestBase { -/** Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. */ + /** Comma-separated list of fields used to limit returned information. + * To retrieve all fields, omit this parameter. */ fields?: Fields /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } @@ -7810,66 +10313,153 @@ export interface CatFielddataRequest extends CatCatRequestBase { export type CatFielddataResponse = CatFielddataFielddataRecord[] export interface CatHealthHealthRecord { + /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ time?: SpecUtilsStringified> + /** time in HH:MM:SS */ timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hhmmss?: TimeOfDay + /** cluster name */ cluster?: string + /** cluster name + * @alias cluster */ cl?: string + /** health status */ status?: string + /** health status + * @alias status */ st?: string + /** total number of nodes */ 'node.total'?: string + /** total number of nodes + * @alias 'node.total' */ nt?: string + /** total number of nodes + * @alias 'node.total' */ nodeTotal?: string + /** number of nodes that can store data */ 'node.data'?: string + /** number of nodes that can store data + * @alias 'node.data' */ nd?: string + /** number of nodes that can store data + * @alias 'node.data' */ nodeData?: string + /** total number of shards */ shards?: string + /** total number of shards + * @alias shards */ t?: string + /** total number of shards + * @alias shards */ sh?: string + /** total number of shards + * @alias shards */ 'shards.total'?: string + /** total number of shards + * @alias shards */ shardsTotal?: string + /** number of primary shards */ pri?: string + /** number of primary shards + * @alias pri */ p?: string + /** number of primary shards + * @alias pri */ 'shards.primary'?: string + /** number of primary shards + * @alias pri */ shardsPrimary?: string + /** number of relocating nodes */ relo?: string + /** number of relocating nodes + * @alias relo */ r?: string + /** number of relocating nodes + * @alias relo */ 'shards.relocating'?: string + /** number of relocating nodes + * @alias relo */ shardsRelocating?: string + /** number of initializing nodes */ init?: string + /** number of initializing nodes + * @alias init */ i?: string + /** number of initializing nodes + * @alias init */ 'shards.initializing'?: string + /** number of initializing nodes + * @alias init */ shardsInitializing?: string + /** number of unassigned primary shards */ 'unassign.pri'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ up?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ 'shards.unassigned.primary'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ shardsUnassignedPrimary?: string + /** number of unassigned shards */ unassign?: string + /** number of unassigned shards + * @alias unassign */ u?: string + /** number of unassigned shards + * @alias unassign */ 'shards.unassigned'?: string + /** number of unassigned shards + * @alias unassign */ shardsUnassigned?: string + /** number of pending tasks */ pending_tasks?: string + /** number of pending tasks + * @alias pending_tasks */ pt?: string + /** number of pending tasks + * @alias pending_tasks */ pendingTasks?: string + /** wait time of longest task pending */ max_task_wait_time?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ mtwt?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ maxTaskWaitTime?: string + /** active number of shards in percent */ active_shards_percent?: string + /** active number of shards in percent + * @alias active_shards_percent */ asp?: string + /** active number of shards in percent + * @alias active_shards_percent */ activeShardsPercent?: string } export interface CatHealthRequest extends CatCatRequestBase { -/** The unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } @@ -7890,298 +10480,733 @@ export interface CatHelpResponse { } export interface CatIndicesIndicesRecord { + /** current health status */ health?: string + /** current health status + * @alias health */ h?: string + /** open/close status */ status?: string + /** open/close status + * @alias status */ s?: string + /** index name */ index?: string + /** index name + * @alias index */ i?: string + /** index name + * @alias index */ idx?: string + /** index uuid */ uuid?: string + /** index uuid + * @alias uuid */ id?: string + /** number of primary shards */ pri?: string + /** number of primary shards + * @alias pri */ p?: string + /** number of primary shards + * @alias pri */ 'shards.primary'?: string + /** number of primary shards + * @alias pri */ shardsPrimary?: string + /** number of replica shards */ rep?: string + /** number of replica shards + * @alias rep */ r?: string + /** number of replica shards + * @alias rep */ 'shards.replica'?: string + /** number of replica shards + * @alias rep */ shardsReplica?: string + /** available docs */ 'docs.count'?: string | null + /** available docs + * @alias 'docs.count' */ dc?: string | null + /** available docs + * @alias 'docs.count' */ docsCount?: string | null + /** deleted docs */ 'docs.deleted'?: string | null + /** deleted docs + * @alias 'docs.deleted' */ dd?: string | null + /** deleted docs + * @alias 'docs.deleted' */ docsDeleted?: string | null + /** index creation date (millisecond value) */ 'creation.date'?: string + /** index creation date (millisecond value) + * @alias 'creation.date' */ cd?: string + /** index creation date (as string) */ 'creation.date.string'?: string + /** index creation date (as string) + * @alias 'creation.date.string' */ cds?: string + /** store size of primaries & replicas */ 'store.size'?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ ss?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ storeSize?: string | null + /** store size of primaries */ 'pri.store.size'?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ 'dataset.size'?: string | null + /** size of completion */ 'completion.size'?: string + /** size of completion + * @alias 'completion.size' */ cs?: string + /** size of completion + * @alias 'completion.size' */ completionSize?: string + /** size of completion */ 'pri.completion.size'?: string + /** used fielddata cache */ 'fielddata.memory_size'?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ fm?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** used fielddata cache */ 'pri.fielddata.memory_size'?: string + /** fielddata evictions */ 'fielddata.evictions'?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ fe?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** fielddata evictions */ 'pri.fielddata.evictions'?: string + /** used query cache */ 'query_cache.memory_size'?: string + /** used query cache + * @alias 'query_cache.memory_size' */ qcm?: string + /** used query cache + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** used query cache */ 'pri.query_cache.memory_size'?: string + /** query cache evictions */ 'query_cache.evictions'?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ qce?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** query cache evictions */ 'pri.query_cache.evictions'?: string + /** used request cache */ 'request_cache.memory_size'?: string + /** used request cache + * @alias 'request_cache.memory_size' */ rcm?: string + /** used request cache + * @alias 'request_cache.memory_size' */ requestCacheMemory?: string + /** used request cache */ 'pri.request_cache.memory_size'?: string + /** request cache evictions */ 'request_cache.evictions'?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ rce?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ requestCacheEvictions?: string + /** request cache evictions */ 'pri.request_cache.evictions'?: string + /** request cache hit count */ 'request_cache.hit_count'?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ rchc?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string + /** request cache hit count */ 'pri.request_cache.hit_count'?: string + /** request cache miss count */ 'request_cache.miss_count'?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ rcmc?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string + /** request cache miss count */ 'pri.request_cache.miss_count'?: string + /** number of flushes */ 'flush.total'?: string + /** number of flushes + * @alias 'flush.total' */ ft?: string + /** number of flushes + * @alias 'flush.total' */ flushTotal?: string + /** number of flushes */ 'pri.flush.total'?: string + /** time spent in flush */ 'flush.total_time'?: string + /** time spent in flush + * @alias 'flush.total_time' */ ftt?: string + /** time spent in flush + * @alias 'flush.total_time' */ flushTotalTime?: string + /** time spent in flush */ 'pri.flush.total_time'?: string + /** number of current get ops */ 'get.current'?: string + /** number of current get ops + * @alias 'get.current' */ gc?: string + /** number of current get ops + * @alias 'get.current' */ getCurrent?: string + /** number of current get ops */ 'pri.get.current'?: string + /** time spent in get */ 'get.time'?: string + /** time spent in get + * @alias 'get.time' */ gti?: string + /** time spent in get + * @alias 'get.time' */ getTime?: string + /** time spent in get */ 'pri.get.time'?: string + /** number of get ops */ 'get.total'?: string + /** number of get ops + * @alias 'get.total' */ gto?: string + /** number of get ops + * @alias 'get.total' */ getTotal?: string + /** number of get ops */ 'pri.get.total'?: string + /** time spent in successful gets */ 'get.exists_time'?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ geti?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ getExistsTime?: string + /** time spent in successful gets */ 'pri.get.exists_time'?: string + /** number of successful gets */ 'get.exists_total'?: string + /** number of successful gets + * @alias 'get.exists_total' */ geto?: string + /** number of successful gets + * @alias 'get.exists_total' */ getExistsTotal?: string + /** number of successful gets */ 'pri.get.exists_total'?: string + /** time spent in failed gets */ 'get.missing_time'?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ gmti?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ getMissingTime?: string + /** time spent in failed gets */ 'pri.get.missing_time'?: string + /** number of failed gets */ 'get.missing_total'?: string + /** number of failed gets + * @alias 'get.missing_total' */ gmto?: string + /** number of failed gets + * @alias 'get.missing_total' */ getMissingTotal?: string + /** number of failed gets */ 'pri.get.missing_total'?: string + /** number of current deletions */ 'indexing.delete_current'?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ idc?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** number of current deletions */ 'pri.indexing.delete_current'?: string + /** time spent in deletions */ 'indexing.delete_time'?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ idti?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** time spent in deletions */ 'pri.indexing.delete_time'?: string + /** number of delete ops */ 'indexing.delete_total'?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ idto?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** number of delete ops */ 'pri.indexing.delete_total'?: string + /** number of current indexing ops */ 'indexing.index_current'?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ iic?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** number of current indexing ops */ 'pri.indexing.index_current'?: string + /** time spent in indexing */ 'indexing.index_time'?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ iiti?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** time spent in indexing */ 'pri.indexing.index_time'?: string + /** number of indexing ops */ 'indexing.index_total'?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ iito?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** number of indexing ops */ 'pri.indexing.index_total'?: string + /** number of failed indexing ops */ 'indexing.index_failed'?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ iif?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** number of failed indexing ops */ 'pri.indexing.index_failed'?: string + /** number of current merges */ 'merges.current'?: string + /** number of current merges + * @alias 'merges.current' */ mc?: string + /** number of current merges + * @alias 'merges.current' */ mergesCurrent?: string + /** number of current merges */ 'pri.merges.current'?: string + /** number of current merging docs */ 'merges.current_docs'?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ mcd?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** number of current merging docs */ 'pri.merges.current_docs'?: string + /** size of current merges */ 'merges.current_size'?: string + /** size of current merges + * @alias 'merges.current_size' */ mcs?: string + /** size of current merges + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** size of current merges */ 'pri.merges.current_size'?: string + /** number of completed merge ops */ 'merges.total'?: string + /** number of completed merge ops + * @alias 'merges.total' */ mt?: string + /** number of completed merge ops + * @alias 'merges.total' */ mergesTotal?: string + /** number of completed merge ops */ 'pri.merges.total'?: string + /** docs merged */ 'merges.total_docs'?: string + /** docs merged + * @alias 'merges.total_docs' */ mtd?: string + /** docs merged + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** docs merged */ 'pri.merges.total_docs'?: string + /** size merged */ 'merges.total_size'?: string + /** size merged + * @alias 'merges.total_size' */ mts?: string + /** size merged + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** size merged */ 'pri.merges.total_size'?: string + /** time spent in merges */ 'merges.total_time'?: string + /** time spent in merges + * @alias 'merges.total_time' */ mtt?: string + /** time spent in merges + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** time spent in merges */ 'pri.merges.total_time'?: string + /** total refreshes */ 'refresh.total'?: string + /** total refreshes + * @alias 'refresh.total' */ rto?: string + /** total refreshes + * @alias 'refresh.total' */ refreshTotal?: string + /** total refreshes */ 'pri.refresh.total'?: string + /** time spent in refreshes */ 'refresh.time'?: string + /** time spent in refreshes + * @alias 'refresh.time' */ rti?: string + /** time spent in refreshes + * @alias 'refresh.time' */ refreshTime?: string + /** time spent in refreshes */ 'pri.refresh.time'?: string + /** total external refreshes */ 'refresh.external_total'?: string + /** total external refreshes + * @alias 'refresh.external_total' */ reto?: string + /** total external refreshes */ 'pri.refresh.external_total'?: string + /** time spent in external refreshes */ 'refresh.external_time'?: string + /** time spent in external refreshes + * @alias 'refresh.external_time' */ reti?: string + /** time spent in external refreshes */ 'pri.refresh.external_time'?: string + /** number of pending refresh listeners */ 'refresh.listeners'?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ rli?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ refreshListeners?: string + /** number of pending refresh listeners */ 'pri.refresh.listeners'?: string + /** current fetch phase ops */ 'search.fetch_current'?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ sfc?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** current fetch phase ops */ 'pri.search.fetch_current'?: string + /** time spent in fetch phase */ 'search.fetch_time'?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ sfti?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** time spent in fetch phase */ 'pri.search.fetch_time'?: string + /** total fetch ops */ 'search.fetch_total'?: string + /** total fetch ops + * @alias 'search.fetch_total' */ sfto?: string + /** total fetch ops + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** total fetch ops */ 'pri.search.fetch_total'?: string + /** open search contexts */ 'search.open_contexts'?: string + /** open search contexts + * @alias 'search.open_contexts' */ so?: string + /** open search contexts + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** open search contexts */ 'pri.search.open_contexts'?: string + /** current query phase ops */ 'search.query_current'?: string + /** current query phase ops + * @alias 'search.query_current' */ sqc?: string + /** current query phase ops + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** current query phase ops */ 'pri.search.query_current'?: string + /** time spent in query phase */ 'search.query_time'?: string + /** time spent in query phase + * @alias 'search.query_time' */ sqti?: string + /** time spent in query phase + * @alias 'search.query_time' */ searchQueryTime?: string + /** time spent in query phase */ 'pri.search.query_time'?: string + /** total query phase ops */ 'search.query_total'?: string + /** total query phase ops + * @alias 'search.query_total' */ sqto?: string + /** total query phase ops + * @alias 'search.query_total' */ searchQueryTotal?: string + /** total query phase ops */ 'pri.search.query_total'?: string + /** open scroll contexts */ 'search.scroll_current'?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ scc?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** open scroll contexts */ 'pri.search.scroll_current'?: string + /** time scroll contexts held open */ 'search.scroll_time'?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ scti?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** time scroll contexts held open */ 'pri.search.scroll_time'?: string + /** completed scroll contexts */ 'search.scroll_total'?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ scto?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** completed scroll contexts */ 'pri.search.scroll_total'?: string + /** number of segments */ 'segments.count'?: string + /** number of segments + * @alias 'segments.count' */ sc?: string + /** number of segments + * @alias 'segments.count' */ segmentsCount?: string + /** number of segments */ 'pri.segments.count'?: string + /** memory used by segments */ 'segments.memory'?: string + /** memory used by segments + * @alias 'segments.memory' */ sm?: string + /** memory used by segments + * @alias 'segments.memory' */ segmentsMemory?: string + /** memory used by segments */ 'pri.segments.memory'?: string + /** memory used by index writer */ 'segments.index_writer_memory'?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ siwm?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** memory used by index writer */ 'pri.segments.index_writer_memory'?: string + /** memory used by version map */ 'segments.version_map_memory'?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ svmm?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** memory used by version map */ 'pri.segments.version_map_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'segments.fixed_bitset_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'pri.segments.fixed_bitset_memory'?: string + /** current warmer ops */ 'warmer.current'?: string + /** current warmer ops + * @alias 'warmer.current' */ wc?: string + /** current warmer ops + * @alias 'warmer.current' */ warmerCurrent?: string + /** current warmer ops */ 'pri.warmer.current'?: string + /** total warmer ops */ 'warmer.total'?: string + /** total warmer ops + * @alias 'warmer.total' */ wto?: string + /** total warmer ops + * @alias 'warmer.total' */ warmerTotal?: string + /** total warmer ops */ 'pri.warmer.total'?: string + /** time spent in warmers */ 'warmer.total_time'?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ wtt?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ warmerTotalTime?: string + /** time spent in warmers */ 'pri.warmer.total_time'?: string + /** number of current suggest ops */ 'suggest.current'?: string + /** number of current suggest ops + * @alias 'suggest.current' */ suc?: string + /** number of current suggest ops + * @alias 'suggest.current' */ suggestCurrent?: string + /** number of current suggest ops */ 'pri.suggest.current'?: string + /** time spend in suggest */ 'suggest.time'?: string + /** time spend in suggest + * @alias 'suggest.time' */ suti?: string + /** time spend in suggest + * @alias 'suggest.time' */ suggestTime?: string + /** time spend in suggest */ 'pri.suggest.time'?: string + /** number of suggest ops */ 'suggest.total'?: string + /** number of suggest ops + * @alias 'suggest.total' */ suto?: string + /** number of suggest ops + * @alias 'suggest.total' */ suggestTotal?: string + /** number of suggest ops */ 'pri.suggest.total'?: string + /** total used memory */ 'memory.total'?: string + /** total used memory + * @alias 'memory.total' */ tm?: string + /** total used memory + * @alias 'memory.total' */ memoryTotal?: string + /** total user memory */ 'pri.memory.total'?: string + /** indicates if the index is search throttled */ 'search.throttled'?: string + /** indicates if the index is search throttled + * @alias 'search.throttled' */ sth?: string + /** number of bulk shard ops */ 'bulk.total_operations'?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ bto?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ bulkTotalOperation?: string + /** number of bulk shard ops */ 'pri.bulk.total_operations'?: string + /** time spend in shard bulk */ 'bulk.total_time'?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ btti?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** time spend in shard bulk */ 'pri.bulk.total_time'?: string + /** total size in bytes of shard bulk */ 'bulk.total_size_in_bytes'?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** total size in bytes of shard bulk */ 'pri.bulk.total_size_in_bytes'?: string + /** average time spend in shard bulk */ 'bulk.avg_time'?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ bati?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** average time spend in shard bulk */ 'pri.bulk.avg_time'?: string + /** average size in bytes of shard bulk */ 'bulk.avg_size_in_bytes'?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string + /** average size in bytes of shard bulk */ 'pri.bulk.avg_size_in_bytes'?: string } export interface CatIndicesRequest extends CatCatRequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** The unit used to display byte values. */ bytes?: Bytes @@ -8199,7 +11224,9 @@ export interface CatIndicesRequest extends CatCatRequestBase { master_timeout?: Duration /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } @@ -8210,20 +11237,33 @@ export interface CatIndicesRequest extends CatCatRequestBase { export type CatIndicesResponse = CatIndicesIndicesRecord[] export interface CatMasterMasterRecord { + /** node id */ id?: string + /** host name */ host?: string + /** host name + * @alias host */ h?: string + /** ip address */ ip?: string + /** node name */ node?: string + /** node name + * @alias node */ n?: string } export interface CatMasterRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8236,51 +11276,117 @@ export interface CatMasterRequest extends CatCatRequestBase { export type CatMasterResponse = CatMasterMasterRecord[] export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { + /** The identifier for the job. */ id?: Id + /** The type of analysis that the job performs. */ type?: string + /** The type of analysis that the job performs. + * @alias type */ t?: string + /** The time when the job was created. */ create_time?: string + /** The time when the job was created. + * @alias create_time */ ct?: string + /** The time when the job was created. + * @alias create_time */ createTime?: string + /** The version of Elasticsearch when the job was created. */ version?: VersionString + /** The version of Elasticsearch when the job was created. + * @alias version */ v?: VersionString + /** The name of the source index. */ source_index?: IndexName + /** The name of the source index. + * @alias source_index */ si?: IndexName + /** The name of the source index. + * @alias source_index */ sourceIndex?: IndexName + /** The name of the destination index. */ dest_index?: IndexName + /** The name of the destination index. + * @alias dest_index */ di?: IndexName + /** The name of the destination index. + * @alias dest_index */ destIndex?: IndexName + /** A description of the job. */ description?: string + /** A description of the job. + * @alias description */ d?: string + /** The approximate maximum amount of memory resources that are permitted for the job. */ model_memory_limit?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ mml?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ modelMemoryLimit?: string + /** The current status of the job. */ state?: string + /** The current status of the job. + * @alias state */ s?: string + /** Messages about the reason why the job failed. */ failure_reason?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ fr?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ failureReason?: string + /** The progress report for the job by phase. */ progress?: string + /** The progress report for the job by phase. + * @alias progress */ p?: string + /** Messages related to the selection of a node. */ assignment_explanation?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ ae?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ assignmentExplanation?: string + /** The unique identifier of the assigned node. */ 'node.id'?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ ni?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ nodeId?: Id + /** The name of the assigned node. */ 'node.name'?: Name + /** The name of the assigned node. + * @alias 'node.name' */ nn?: Name + /** The name of the assigned node. + * @alias 'node.name' */ nodeName?: Name + /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ ne?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: Id + /** The network address of the assigned node. */ 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ nodeAddress?: string } export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { -/** The ID of the data frame analytics to fetch */ + /** The ID of the data frame analytics to fetch */ id?: Id /** Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) */ allow_no_match?: boolean @@ -8288,7 +11394,8 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { bytes?: Bytes /** Comma-separated list of column names to display. */ h?: CatCatDfaColumns - /** Comma-separated list of column names or column aliases used to sort the response. */ + /** Comma-separated list of column names or column aliases used to sort the + * response. */ s?: CatCatDfaColumns /** Unit used to display time values. */ time?: TimeUnit @@ -8301,44 +11408,116 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] export interface CatMlDatafeedsDatafeedsRecord { + /** The datafeed identifier. */ id?: string + /** The status of the datafeed. */ state?: MlDatafeedState + /** The status of the datafeed. + * @alias state */ s?: MlDatafeedState + /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string + /** For started datafeeds only, contains messages relating to the selection of a node. + * @alias assignment_explanation */ ae?: string + /** The number of buckets processed. */ 'buckets.count'?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ bc?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ bucketsCount?: string + /** The number of searches run by the datafeed. */ 'search.count'?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ sc?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ searchCount?: string + /** The total time the datafeed spent searching, in milliseconds. */ 'search.time'?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ st?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ searchTime?: string + /** The average search time per bucket, in milliseconds. */ 'search.bucket_avg'?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ sba?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ searchBucketAvg?: string + /** The exponential average search time per hour, in milliseconds. */ 'search.exp_avg_hour'?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ seah?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ searchExpAvgHour?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.id'?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ ni?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ nodeId?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.name'?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ nn?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ nodeName?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.ephemeral_id'?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ ne?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.address'?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ nodeAddress?: string } export interface CatMlDatafeedsRequest extends CatCatRequestBase { -/** A numerical character string that uniquely identifies the datafeed. */ + /** A numerical character string that uniquely identifies the datafeed. */ datafeed_id?: Id - /** Specifies what to do when the request: * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no datafeeds that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when + * there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only + * partial matches. */ allow_no_match?: boolean /** Comma-separated list of column names to display. */ h?: CatCatDatafeedColumns @@ -8355,186 +11534,551 @@ export interface CatMlDatafeedsRequest extends CatCatRequestBase { export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] export interface CatMlJobsJobsRecord { + /** The anomaly detection job identifier. */ id?: Id + /** The status of the anomaly detection job. */ state?: MlJobState + /** The status of the anomaly detection job. + * @alias state */ s?: MlJobState + /** For open jobs only, the amount of time the job has been opened. */ opened_time?: string + /** For open jobs only, the amount of time the job has been opened. + * @alias opened_time */ ot?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. + * @alias assignment_explanation */ ae?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. */ 'data.processed_records'?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ dpr?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ dataProcessedRecords?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. */ 'data.processed_fields'?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ dpf?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ dataProcessedFields?: string + /** The number of bytes of input data posted to the anomaly detection job. */ 'data.input_bytes'?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ dib?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ dataInputBytes?: ByteSize + /** The number of input documents posted to the anomaly detection job. */ 'data.input_records'?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ dir?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ dataInputRecords?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. */ 'data.input_fields'?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ dif?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ dataInputFields?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. */ 'data.invalid_dates'?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ did?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ dataInvalidDates?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. */ 'data.missing_fields'?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ dmf?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ dataMissingFields?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. */ 'data.out_of_order_timestamps'?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ doot?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ dataOutOfOrderTimestamps?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. */ 'data.empty_buckets'?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ deb?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ dataEmptyBuckets?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. */ 'data.sparse_buckets'?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ dsb?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ dataSparseBuckets?: string + /** The total number of buckets processed. */ 'data.buckets'?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ db?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ dataBuckets?: string + /** The timestamp of the earliest chronologically input document. */ 'data.earliest_record'?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ der?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ dataEarliestRecord?: string + /** The timestamp of the latest chronologically input document. */ 'data.latest_record'?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ dlr?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ dataLatestRecord?: string + /** The timestamp at which data was last analyzed, according to server time. */ 'data.last'?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ dl?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ dataLast?: string + /** The timestamp of the last bucket that did not contain any data. */ 'data.last_empty_bucket'?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ dleb?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ dataLastEmptyBucket?: string + /** The timestamp of the last bucket that was considered sparse. */ 'data.last_sparse_bucket'?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ dlsb?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ dataLastSparseBucket?: string + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. */ 'model.bytes'?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ mb?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ modelBytes?: ByteSize + /** The status of the mathematical models. */ 'model.memory_status'?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ mms?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ modelMemoryStatus?: MlMemoryStatus + /** The number of bytes over the high limit for memory usage at the last allocation failure. */ 'model.bytes_exceeded'?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ mbe?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ modelBytesExceeded?: ByteSize + /** The upper limit for model memory usage, checked on increasing values. */ 'model.memory_limit'?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ mml?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ modelMemoryLimit?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.by_fields'?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ mbf?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ modelByFields?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.over_fields'?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ mof?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ modelOverFields?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.partition_fields'?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ mpf?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ modelPartitionFields?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. */ 'model.bucket_allocation_failures'?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ mbaf?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ modelBucketAllocationFailures?: string + /** The status of categorization for the job. */ 'model.categorization_status'?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ mcs?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ modelCategorizationStatus?: MlCategorizationStatus + /** The number of documents that have had a field categorized. */ 'model.categorized_doc_count'?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ mcdc?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ modelCategorizedDocCount?: string + /** The number of categories created by categorization. */ 'model.total_category_count'?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ mtcc?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ modelTotalCategoryCount?: string + /** The number of categories that match more than 1% of categorized documents. */ 'model.frequent_category_count'?: string + /** The number of categories that match more than 1% of categorized documents. + * @alias 'model.frequent_category_count' */ modelFrequentCategoryCount?: string + /** The number of categories that match just one categorized document. */ 'model.rare_category_count'?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ mrcc?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ modelRareCategoryCount?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. */ 'model.dead_category_count'?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ mdcc?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ modelDeadCategoryCount?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. */ 'model.failed_category_count'?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ mfcc?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ modelFailedCategoryCount?: string + /** The timestamp when the model stats were gathered, according to server time. */ 'model.log_time'?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ mlt?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ modelLogTime?: string + /** The timestamp of the last record when the model stats were gathered. */ 'model.timestamp'?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ mt?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ modelTimestamp?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. */ 'forecasts.total'?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ ft?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ forecastsTotal?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.min'?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ fmmin?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ forecastsMemoryMin?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.max'?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ fmmax?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ forecastsMemoryMax?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.avg'?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ fmavg?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ forecastsMemoryAvg?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.total'?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ fmt?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ forecastsMemoryTotal?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.min'?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ frmin?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ forecastsRecordsMin?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.max'?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ frmax?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ forecastsRecordsMax?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.avg'?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ fravg?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ forecastsRecordsAvg?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.total'?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ frt?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ forecastsRecordsTotal?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.min'?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ ftmin?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ forecastsTimeMin?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.max'?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ ftmax?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ forecastsTimeMax?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.avg'?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ ftavg?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ forecastsTimeAvg?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.total'?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ ftt?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ forecastsTimeTotal?: string + /** The uniqe identifier of the assigned node. */ 'node.id'?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ ni?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ nodeId?: NodeId + /** The name of the assigned node. */ 'node.name'?: string + /** The name of the assigned node. + * @alias 'node.name' */ nn?: string + /** The name of the assigned node. + * @alias 'node.name' */ nodeName?: string + /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ ne?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: NodeId + /** The network address of the assigned node. */ 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ nodeAddress?: string + /** The number of bucket results produced by the job. */ 'buckets.count'?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ bc?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ bucketsCount?: string + /** The sum of all bucket processing times, in milliseconds. */ 'buckets.time.total'?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ btt?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ bucketsTimeTotal?: string + /** The minimum of all bucket processing times, in milliseconds. */ 'buckets.time.min'?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ btmin?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ bucketsTimeMin?: string + /** The maximum of all bucket processing times, in milliseconds. */ 'buckets.time.max'?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ btmax?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ bucketsTimeMax?: string + /** The exponential moving average of all bucket processing times, in milliseconds. */ 'buckets.time.exp_avg'?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ btea?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ bucketsTimeExpAvg?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. */ 'buckets.time.exp_avg_hour'?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ bteah?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ bucketsTimeExpAvgHour?: string } export interface CatMlJobsRequest extends CatCatRequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id?: Id - /** Specifies what to do when the request: * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no jobs that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there + * are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial + * matches. */ allow_no_match?: boolean /** The unit used to display byte values. */ bytes?: Bytes @@ -8553,9 +12097,11 @@ export interface CatMlJobsRequest extends CatCatRequestBase { export type CatMlJobsResponse = CatMlJobsJobsRecord[] export interface CatMlTrainedModelsRequest extends CatCatRequestBase { -/** A unique identifier for the trained model. */ + /** A unique identifier for the trained model. */ model_id?: Id - /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** The unit used to display byte values. */ bytes?: Bytes @@ -8578,73 +12124,169 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] export interface CatMlTrainedModelsTrainedModelsRecord { + /** The model identifier. */ id?: Id + /** Information about the creator of the model. */ created_by?: string + /** Information about the creator of the model. + * @alias created_by */ c?: string + /** Information about the creator of the model. + * @alias created_by */ createdBy?: string + /** The estimated heap size to keep the model in memory. */ heap_size?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ hs?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ modelHeapSize?: ByteSize + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. */ operations?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ o?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ modelOperations?: string + /** The license level of the model. */ license?: string + /** The license level of the model. + * @alias license */ l?: string + /** The time the model was created. */ create_time?: DateTime + /** The time the model was created. + * @alias create_time */ ct?: DateTime + /** The version of Elasticsearch when the model was created. */ version?: VersionString + /** The version of Elasticsearch when the model was created. + * @alias version */ v?: VersionString + /** A description of the model. */ description?: string + /** A description of the model. + * @alias description */ d?: string + /** The number of pipelines that are referencing the model. */ 'ingest.pipelines'?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ ip?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ ingestPipelines?: string + /** The total number of documents that are processed by the model. */ 'ingest.count'?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ ic?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ ingestCount?: string + /** The total time spent processing documents with thie model. */ 'ingest.time'?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ it?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ ingestTime?: string + /** The total number of documents that are currently being handled by the model. */ 'ingest.current'?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ icurr?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ ingestCurrent?: string + /** The total number of failed ingest attempts with the model. */ 'ingest.failed'?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ if?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ ingestFailed?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. */ 'data_frame.id'?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ dfid?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ dataFrameAnalytics?: string + /** The time the data frame analytics job was created. */ 'data_frame.create_time'?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ dft?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ dataFrameAnalyticsTime?: string + /** The source index used to train in the data frame analysis. */ 'data_frame.source_index'?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ dfsi?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ dataFrameAnalyticsSrcIndex?: string + /** The analysis used by the data frame to build the model. */ 'data_frame.analysis'?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ dfa?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ dataFrameAnalyticsAnalysis?: string type?: string } export interface CatNodeattrsNodeAttributesRecord { + /** The node name. */ node?: string + /** The unique node identifier. */ id?: string + /** The process identifier. */ pid?: string + /** The host name. */ host?: string + /** The host name. + * @alias host */ h?: string + /** The IP address. */ ip?: string + /** The IP address. + * @alias ip */ i?: string + /** The bound transport port. */ port?: string + /** The attribute name. */ attr?: string + /** The attribute value. */ value?: string } export interface CatNodeattrsRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8657,277 +12299,721 @@ export interface CatNodeattrsRequest extends CatCatRequestBase { export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] export interface CatNodesNodesRecord { + /** The unique node identifier. */ id?: Id + /** The unique node identifier. + * @alias id */ nodeId?: Id + /** The process identifier. */ pid?: string + /** The process identifier. + * @alias pid */ p?: string + /** The IP address. */ ip?: string + /** The IP address. + * @alias ip */ i?: string + /** The bound transport port. */ port?: string + /** The bound transport port. + * @alias port */ po?: string + /** The bound HTTP address. */ http_address?: string + /** The bound HTTP address. + * @alias http_address */ http?: string + /** The Elasticsearch version. */ version?: VersionString + /** The Elasticsearch version. + * @alias version */ v?: VersionString + /** The Elasticsearch distribution flavor. */ flavor?: string + /** The Elasticsearch distribution flavor. + * @alias flavor */ f?: string + /** The Elasticsearch distribution type. */ type?: string + /** The Elasticsearch distribution type. + * @alias type */ t?: string + /** The Elasticsearch build hash. */ build?: string + /** The Elasticsearch build hash. + * @alias build */ b?: string + /** The Java version. */ jdk?: string + /** The Java version. + * @alias jdk */ j?: string + /** The total disk space. */ 'disk.total'?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ dt?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ diskTotal?: ByteSize + /** The used disk space. */ 'disk.used'?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ du?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ diskUsed?: ByteSize + /** The available disk space. */ 'disk.avail'?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ d?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ da?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ disk?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ diskAvail?: ByteSize + /** The used disk space percentage. */ 'disk.used_percent'?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ dup?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ diskUsedPercent?: Percentage + /** The used heap. */ 'heap.current'?: string + /** The used heap. + * @alias 'heap.current' */ hc?: string + /** The used heap. + * @alias 'heap.current' */ heapCurrent?: string + /** The used heap ratio. */ 'heap.percent'?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ hp?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ heapPercent?: Percentage + /** The maximum configured heap. */ 'heap.max'?: string + /** The maximum configured heap. + * @alias 'heap.max' */ hm?: string + /** The maximum configured heap. + * @alias 'heap.max' */ heapMax?: string + /** The used machine memory. */ 'ram.current'?: string + /** The used machine memory. + * @alias 'ram.current' */ rc?: string + /** The used machine memory. + * @alias 'ram.current' */ ramCurrent?: string + /** The used machine memory ratio. */ 'ram.percent'?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ rp?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ ramPercent?: Percentage + /** The total machine memory. */ 'ram.max'?: string + /** The total machine memory. + * @alias 'ram.max' */ rn?: string + /** The total machine memory. + * @alias 'ram.max' */ ramMax?: string + /** The used file descriptors. */ 'file_desc.current'?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ fdc?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ fileDescriptorCurrent?: string + /** The used file descriptor ratio. */ 'file_desc.percent'?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ fdp?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ fileDescriptorPercent?: Percentage + /** The maximum number of file descriptors. */ 'file_desc.max'?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ fdm?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ fileDescriptorMax?: string + /** The recent system CPU usage as a percentage. */ cpu?: string + /** The load average for the most recent minute. */ load_1m?: string + /** The load average for the last five minutes. */ load_5m?: string + /** The load average for the last fifteen minutes. */ load_15m?: string + /** The load average for the last fifteen minutes. + * @alias load_15m */ l?: string + /** The node uptime. */ uptime?: string + /** The node uptime. + * @alias uptime */ u?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). */ 'node.role'?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ r?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ role?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ nodeRole?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). */ master?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). + * @alias master */ m?: string + /** The node name. */ name?: Name + /** The node name. + * @alias name */ n?: Name + /** The size of completion. */ 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ cs?: string + /** The size of completion. + * @alias 'completion.size' */ completionSize?: string + /** The used fielddata cache. */ 'fielddata.memory_size'?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ fm?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** The fielddata evictions. */ 'fielddata.evictions'?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ fe?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** The used query cache. */ 'query_cache.memory_size'?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ qcm?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** The query cache evictions. */ 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** The query cache hit counts. */ 'query_cache.hit_count'?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ qchc?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ queryCacheHitCount?: string + /** The query cache miss counts. */ 'query_cache.miss_count'?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ qcmc?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ queryCacheMissCount?: string + /** The used request cache. */ 'request_cache.memory_size'?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ rcm?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ requestCacheMemory?: string + /** The request cache evictions. */ 'request_cache.evictions'?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ rce?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ requestCacheEvictions?: string + /** The request cache hit counts. */ 'request_cache.hit_count'?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ rchc?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string + /** The request cache miss counts. */ 'request_cache.miss_count'?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ rcmc?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string + /** The number of flushes. */ 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ ft?: string + /** The number of flushes. + * @alias 'flush.total' */ flushTotal?: string + /** The time spent in flush. */ 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ flushTotalTime?: string + /** The number of current get ops. */ 'get.current'?: string + /** The number of current get ops. + * @alias 'get.current' */ gc?: string + /** The number of current get ops. + * @alias 'get.current' */ getCurrent?: string + /** The time spent in get. */ 'get.time'?: string + /** The time spent in get. + * @alias 'get.time' */ gti?: string + /** The time spent in get. + * @alias 'get.time' */ getTime?: string + /** The number of get ops. */ 'get.total'?: string + /** The number of get ops. + * @alias 'get.total' */ gto?: string + /** The number of get ops. + * @alias 'get.total' */ getTotal?: string + /** The time spent in successful gets. */ 'get.exists_time'?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ geti?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ getExistsTime?: string + /** The number of successful get operations. */ 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ getExistsTotal?: string + /** The time spent in failed gets. */ 'get.missing_time'?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ gmti?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ getMissingTime?: string + /** The number of failed gets. */ 'get.missing_total'?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ gmto?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ getMissingTotal?: string + /** The number of current deletions. */ 'indexing.delete_current'?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ idc?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** The time spent in deletions. */ 'indexing.delete_time'?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ idti?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** The number of delete operations. */ 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** The number of current indexing operations. */ 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** The time spent in indexing. */ 'indexing.index_time'?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ iiti?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** The number of indexing operations. */ 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** The number of failed indexing operations. */ 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** The number of current merges. */ 'merges.current'?: string + /** The number of current merges. + * @alias 'merges.current' */ mc?: string + /** The number of current merges. + * @alias 'merges.current' */ mergesCurrent?: string + /** The number of current merging docs. */ 'merges.current_docs'?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ mcd?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** The size of current merges. */ 'merges.current_size'?: string + /** The size of current merges. + * @alias 'merges.current_size' */ mcs?: string + /** The size of current merges. + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** The number of completed merge operations. */ 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mergesTotal?: string + /** The docs merged. */ 'merges.total_docs'?: string + /** The docs merged. + * @alias 'merges.total_docs' */ mtd?: string + /** The docs merged. + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** The size merged. */ 'merges.total_size'?: string + /** The size merged. + * @alias 'merges.total_size' */ mts?: string + /** The size merged. + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** The time spent in merges. */ 'merges.total_time'?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ mtt?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** The total refreshes. */ 'refresh.total'?: string + /** The time spent in refreshes. */ 'refresh.time'?: string + /** The total external refreshes. */ 'refresh.external_total'?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ rto?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ refreshTotal?: string + /** The time spent in external refreshes. */ 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ refreshTime?: string + /** The number of pending refresh listeners. */ 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ refreshListeners?: string + /** The total script compilations. */ 'script.compilations'?: string + /** The total script compilations. + * @alias 'script.compilations' */ scrcc?: string + /** The total script compilations. + * @alias 'script.compilations' */ scriptCompilations?: string + /** The total compiled scripts evicted from the cache. */ 'script.cache_evictions'?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ scrce?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ scriptCacheEvictions?: string + /** The script cache compilation limit triggered. */ 'script.compilation_limit_triggered'?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ scrclt?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ scriptCacheCompilationLimitTriggered?: string + /** The current fetch phase operations. */ 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** The time spent in fetch phase. */ 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** The total fetch operations. */ 'search.fetch_total'?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ sfto?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** The open search contexts. */ 'search.open_contexts'?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ so?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** The current query phase operations. */ 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** The time spent in query phase. */ 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ searchQueryTime?: string + /** The total query phase operations. */ 'search.query_total'?: string + /** The total query phase operations. + * @alias 'search.query_total' */ sqto?: string + /** The total query phase operations. + * @alias 'search.query_total' */ searchQueryTotal?: string + /** The open scroll contexts. */ 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** The time scroll contexts held open. */ 'search.scroll_time'?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ scti?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** The completed scroll contexts. */ 'search.scroll_total'?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ scto?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** The number of segments. */ 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ sc?: string + /** The number of segments. + * @alias 'segments.count' */ segmentsCount?: string + /** The memory used by segments. */ 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ segmentsMemory?: string + /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. */ 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** The number of current suggest operations. */ 'suggest.current'?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ suc?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ suggestCurrent?: string + /** The time spend in suggest. */ 'suggest.time'?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ suti?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ suggestTime?: string + /** The number of suggest operations. */ 'suggest.total'?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ suto?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ suggestTotal?: string + /** The number of bulk shard operations. */ 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bulkTotalOperations?: string + /** The time spend in shard bulk. */ 'bulk.total_time'?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ btti?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** The total size in bytes of shard bulk. */ 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** The average time spend in shard bulk. */ 'bulk.avg_time'?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ bati?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** The average size in bytes of shard bulk. */ 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string } export interface CatNodesRequest extends CatCatRequestBase { -/** The unit used to display byte values. */ + /** The unit used to display byte values. */ bytes?: Bytes /** If `true`, return the full node ID. If `false`, return the shortened node ID. */ full_id?: boolean | string @@ -8935,7 +13021,9 @@ export interface CatNodesRequest extends CatCatRequestBase { include_unloaded_segments?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8950,22 +13038,39 @@ export interface CatNodesRequest extends CatCatRequestBase { export type CatNodesResponse = CatNodesNodesRecord[] export interface CatPendingTasksPendingTasksRecord { + /** The task insertion order. */ insertOrder?: string + /** The task insertion order. + * @alias insertOrder */ o?: string + /** Indicates how long the task has been in queue. */ timeInQueue?: string + /** Indicates how long the task has been in queue. + * @alias timeInQueue */ t?: string + /** The task priority. */ priority?: string + /** The task priority. + * @alias priority */ p?: string + /** The task source. */ source?: string + /** The task source. + * @alias source */ s?: string } export interface CatPendingTasksRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8980,27 +13085,48 @@ export interface CatPendingTasksRequest extends CatCatRequestBase { export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] export interface CatPluginsPluginsRecord { + /** The unique node identifier. */ id?: NodeId + /** The node name. */ name?: Name + /** The node name. + * @alias name */ n?: Name + /** The component name. */ component?: string + /** The component name. + * @alias component */ c?: string + /** The component version. */ version?: VersionString + /** The component version. + * @alias version */ v?: VersionString + /** The plugin details. */ description?: string + /** The plugin details. + * @alias description */ d?: string + /** The plugin type. */ type?: string + /** The plugin type. + * @alias type */ t?: string } export interface CatPluginsRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Include bootstrap plugins in the response */ include_bootstrap?: boolean - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9013,65 +13139,150 @@ export interface CatPluginsRequest extends CatCatRequestBase { export type CatPluginsResponse = CatPluginsPluginsRecord[] export interface CatRecoveryRecoveryRecord { + /** The index name. */ index?: IndexName + /** The index name. + * @alias index */ i?: IndexName + /** The index name. + * @alias index */ idx?: IndexName + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The recovery start time. */ start_time?: DateTime + /** The recovery start time. + * @alias start_time */ start?: DateTime + /** The recovery start time in epoch milliseconds. */ start_time_millis?: EpochTime + /** The recovery start time in epoch milliseconds. + * @alias start_time_millis */ start_millis?: EpochTime + /** The recovery stop time. */ stop_time?: DateTime + /** The recovery stop time. + * @alias stop_time */ stop?: DateTime + /** The recovery stop time in epoch milliseconds. */ stop_time_millis?: EpochTime + /** The recovery stop time in epoch milliseconds. + * @alias stop_time_millis */ stop_millis?: EpochTime + /** The recovery time. */ time?: Duration + /** The recovery time. + * @alias time */ t?: Duration + /** The recovery time. + * @alias time */ ti?: Duration + /** The recovery type. */ type?: string + /** The recovery type. + * @alias type */ ty?: string + /** The recovery stage. */ stage?: string + /** The recovery stage. + * @alias stage */ st?: string + /** The source host. */ source_host?: string + /** The source host. + * @alias source_host */ shost?: string + /** The source node name. */ source_node?: string + /** The source node name. + * @alias source_node */ snode?: string + /** The target host. */ target_host?: string + /** The target host. + * @alias target_host */ thost?: string + /** The target node name. */ target_node?: string + /** The target node name. + * @alias target_node */ tnode?: string + /** The repository name. */ repository?: string + /** The repository name. + * @alias repository */ rep?: string + /** The snapshot name. */ snapshot?: string + /** The snapshot name. + * @alias snapshot */ snap?: string + /** The number of files to recover. */ files?: string + /** The number of files to recover. + * @alias files */ f?: string + /** The files recovered. */ files_recovered?: string + /** The files recovered. + * @alias files_recovered */ fr?: string + /** The ratio of files recovered. */ files_percent?: Percentage + /** The ratio of files recovered. + * @alias files_percent */ fp?: Percentage + /** The total number of files. */ files_total?: string + /** The total number of files. + * @alias files_total */ tf?: string + /** The number of bytes to recover. */ bytes?: string + /** The number of bytes to recover. + * @alias bytes */ b?: string + /** The bytes recovered. */ bytes_recovered?: string + /** The bytes recovered. + * @alias bytes_recovered */ br?: string + /** The ratio of bytes recovered. */ bytes_percent?: Percentage + /** The ratio of bytes recovered. + * @alias bytes_percent */ bp?: Percentage + /** The total number of bytes. */ bytes_total?: string + /** The total number of bytes. + * @alias bytes_total */ tb?: string + /** The number of translog operations to recover. */ translog_ops?: string + /** The number of translog operations to recover. + * @alias translog_ops */ to?: string + /** The translog operations recovered. */ translog_ops_recovered?: string + /** The translog operations recovered. + * @alias translog_ops_recovered */ tor?: string + /** The ratio of translog operations recovered. */ translog_ops_percent?: Percentage + /** The ratio of translog operations recovered. + * @alias translog_ops_percent */ top?: Percentage } export interface CatRecoveryRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean @@ -9081,7 +13292,9 @@ export interface CatRecoveryRequest extends CatCatRequestBase { detailed?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Unit used to display time values. */ time?: TimeUnit @@ -9094,18 +13307,29 @@ export interface CatRecoveryRequest extends CatCatRequestBase { export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] export interface CatRepositoriesRepositoriesRecord { + /** The unique repository identifier. */ id?: string + /** The unique repository identifier. + * @alias id */ repoId?: string + /** The repository type. */ type?: string + /** The repository type. + * @alias type */ t?: string } export interface CatRepositoriesRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9118,15 +13342,22 @@ export interface CatRepositoriesRequest extends CatCatRequestBase { export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9139,55 +13370,155 @@ export interface CatSegmentsRequest extends CatCatRequestBase { export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] export interface CatSegmentsSegmentsRecord { + /** The index name. */ index?: IndexName + /** The index name. + * @alias index */ i?: IndexName + /** The index name. + * @alias index */ idx?: IndexName + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The shard type: `primary` or `replica`. */ prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ primaryOrReplica?: string + /** The IP address of the node where it lives. */ ip?: string + /** The unique identifier of the node where it lives. */ id?: NodeId + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. */ segment?: string + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. + * @alias segment */ seg?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. */ generation?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ g?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ gen?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. */ 'docs.count'?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ dc?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ docsCount?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. */ 'docs.deleted'?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ dd?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ docsDeleted?: string + /** The segment size in bytes. */ size?: ByteSize + /** The segment size in bytes. + * @alias size */ si?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. */ 'size.memory'?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ sm?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ sizeMemory?: ByteSize + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. */ committed?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ ic?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ isCommitted?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. */ searchable?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ is?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ isSearchable?: string + /** The version of Lucene used to write the segment. */ version?: VersionString + /** The version of Lucene used to write the segment. + * @alias version */ v?: VersionString + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. */ compound?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ ico?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ isCompound?: string } export interface CatShardsRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9202,228 +13533,630 @@ export interface CatShardsRequest extends CatCatRequestBase { export type CatShardsResponse = CatShardsShardsRecord[] export interface CatShardsShardsRecord { + /** The index name. */ index?: string + /** The index name. + * @alias index */ i?: string + /** The index name. + * @alias index */ idx?: string + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The shard type: `primary` or `replica`. */ prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ primaryOrReplica?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. */ state?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. + * @alias state */ st?: string + /** The number of documents in the shard. */ docs?: string | null + /** The number of documents in the shard. + * @alias docs */ d?: string | null + /** The number of documents in the shard. + * @alias docs */ dc?: string | null + /** The disk space used by the shard. */ store?: string | null + /** The disk space used by the shard. + * @alias store */ sto?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ dataset?: string | null + /** The IP address of the node. */ ip?: string | null + /** The unique identifier for the node. */ id?: string + /** The name of node. */ node?: string | null + /** The name of node. + * @alias node */ n?: string | null + /** The sync identifier. */ sync_id?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. */ 'unassigned.reason'?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. + * @alias 'unassigned.reason' */ ur?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). */ 'unassigned.at'?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.at' */ ua?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). */ 'unassigned.for'?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.for' */ uf?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. */ 'unassigned.details'?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. + * @alias 'unassigned.details' */ ud?: string + /** The type of recovery source. */ 'recoverysource.type'?: string + /** The type of recovery source. + * @alias 'recoverysource.type' */ rs?: string + /** The size of completion. */ 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ cs?: string + /** The size of completion. + * @alias 'completion.size' */ completionSize?: string + /** The used fielddata cache memory. */ 'fielddata.memory_size'?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ fm?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** The fielddata cache evictions. */ 'fielddata.evictions'?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ fe?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** The used query cache memory. */ 'query_cache.memory_size'?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ qcm?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** The query cache evictions. */ 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** The number of flushes. */ 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ ft?: string + /** The number of flushes. + * @alias 'flush.total' */ flushTotal?: string + /** The time spent in flush. */ 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ flushTotalTime?: string + /** The number of current get operations. */ 'get.current'?: string + /** The number of current get operations. + * @alias 'get.current' */ gc?: string + /** The number of current get operations. + * @alias 'get.current' */ getCurrent?: string + /** The time spent in get operations. */ 'get.time'?: string + /** The time spent in get operations. + * @alias 'get.time' */ gti?: string + /** The time spent in get operations. + * @alias 'get.time' */ getTime?: string + /** The number of get operations. */ 'get.total'?: string + /** The number of get operations. + * @alias 'get.total' */ gto?: string + /** The number of get operations. + * @alias 'get.total' */ getTotal?: string + /** The time spent in successful get operations. */ 'get.exists_time'?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ geti?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ getExistsTime?: string + /** The number of successful get operations. */ 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ getExistsTotal?: string + /** The time spent in failed get operations. */ 'get.missing_time'?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ gmti?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ getMissingTime?: string + /** The number of failed get operations. */ 'get.missing_total'?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ gmto?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ getMissingTotal?: string + /** The number of current deletion operations. */ 'indexing.delete_current'?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ idc?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** The time spent in deletion operations. */ 'indexing.delete_time'?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ idti?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** The number of delete operations. */ 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** The number of current indexing operations. */ 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** The time spent in indexing operations. */ 'indexing.index_time'?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ iiti?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** The number of indexing operations. */ 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** The number of failed indexing operations. */ 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** The number of current merge operations. */ 'merges.current'?: string + /** The number of current merge operations. + * @alias 'merges.current' */ mc?: string + /** The number of current merge operations. + * @alias 'merges.current' */ mergesCurrent?: string + /** The number of current merging documents. */ 'merges.current_docs'?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ mcd?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** The size of current merge operations. */ 'merges.current_size'?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ mcs?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** The number of completed merge operations. */ 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mergesTotal?: string + /** The nuber of merged documents. */ 'merges.total_docs'?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ mtd?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** The size of current merges. */ 'merges.total_size'?: string + /** The size of current merges. + * @alias 'merges.total_size' */ mts?: string + /** The size of current merges. + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** The time spent merging documents. */ 'merges.total_time'?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ mtt?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** The total number of refreshes. */ 'refresh.total'?: string + /** The time spent in refreshes. */ 'refresh.time'?: string + /** The total nunber of external refreshes. */ 'refresh.external_total'?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ rto?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ refreshTotal?: string + /** The time spent in external refreshes. */ 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ refreshTime?: string + /** The number of pending refresh listeners. */ 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ refreshListeners?: string + /** The current fetch phase operations. */ 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** The time spent in fetch phase. */ 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** The total number of fetch operations. */ 'search.fetch_total'?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ sfto?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** The number of open search contexts. */ 'search.open_contexts'?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ so?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** The current query phase operations. */ 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** The time spent in query phase. */ 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ searchQueryTime?: string + /** The total number of query phase operations. */ 'search.query_total'?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ sqto?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ searchQueryTotal?: string + /** The open scroll contexts. */ 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** The time scroll contexts were held open. */ 'search.scroll_time'?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ scti?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** The number of completed scroll contexts. */ 'search.scroll_total'?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ scto?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** The number of segments. */ 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ sc?: string + /** The number of segments. + * @alias 'segments.count' */ segmentsCount?: string + /** The memory used by segments. */ 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ segmentsMemory?: string + /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. */ 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** The maximum sequence number. */ 'seq_no.max'?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ sqm?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ maxSeqNo?: string + /** The local checkpoint. */ 'seq_no.local_checkpoint'?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ sql?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ localCheckpoint?: string + /** The global checkpoint. */ 'seq_no.global_checkpoint'?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ sqg?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ globalCheckpoint?: string + /** The number of current warmer operations. */ 'warmer.current'?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ wc?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ warmerCurrent?: string + /** The total number of warmer operations. */ 'warmer.total'?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ wto?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ warmerTotal?: string + /** The time spent in warmer operations. */ 'warmer.total_time'?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ wtt?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ warmerTotalTime?: string + /** The shard data path. */ 'path.data'?: string + /** The shard data path. + * @alias 'path.data' */ pd?: string + /** The shard data path. + * @alias 'path.data' */ dataPath?: string + /** The shard state path. */ 'path.state'?: string + /** The shard state path. + * @alias 'path.state' */ ps?: string + /** The shard state path. + * @alias 'path.state' */ statsPath?: string + /** The number of bulk shard operations. */ 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bulkTotalOperations?: string + /** The time spent in shard bulk operations. */ 'bulk.total_time'?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ btti?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** The total size in bytes of shard bulk operations. */ 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** The average time spent in shard bulk operations. */ 'bulk.avg_time'?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ bati?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** The average size in bytes of shard bulk operations. */ 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string } export interface CatSnapshotsRequest extends CatCatRequestBase { -/** A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. */ + /** A comma-separated list of snapshot repositories used to limit the request. + * Accepts wildcard expressions. + * `_all` returns all repositories. + * If any repository fails during the request, Elasticsearch returns an error. */ repository?: Names /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9438,41 +14171,102 @@ export interface CatSnapshotsRequest extends CatCatRequestBase { export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] export interface CatSnapshotsSnapshotsRecord { + /** The unique identifier for the snapshot. */ id?: string + /** The unique identifier for the snapshot. + * @alias id */ snapshot?: string + /** The repository name. */ repository?: string + /** The repository name. + * @alias repository */ re?: string + /** The repository name. + * @alias repository */ repo?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. */ status?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. + * @alias status */ s?: string + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. */ start_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ ste?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ startEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process started. */ start_time?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ sti?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ startTime?: WatcherScheduleTimeOfDay + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. */ end_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ ete?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ endEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process ended. */ end_time?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ eti?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ endTime?: TimeOfDay + /** The time it took the snapshot process to complete, in time units. */ duration?: Duration + /** The time it took the snapshot process to complete, in time units. + * @alias duration */ dur?: Duration + /** The number of indices in the snapshot. */ indices?: string + /** The number of indices in the snapshot. + * @alias indices */ i?: string + /** The number of successful shards in the snapshot. */ successful_shards?: string + /** The number of successful shards in the snapshot. + * @alias successful_shards */ ss?: string + /** The number of failed shards in the snapshot. */ failed_shards?: string + /** The number of failed shards in the snapshot. + * @alias failed_shards */ fs?: string + /** The total number of shards in the snapshot. */ total_shards?: string + /** The total number of shards in the snapshot. + * @alias total_shards */ ts?: string + /** The reason for any snapshot failures. */ reason?: string + /** The reason for any snapshot failures. + * @alias reason */ r?: string } export interface CatTasksRequest extends CatCatRequestBase { -/** The task action names, which are used to limit the response. */ + /** The task action names, which are used to limit the response. */ actions?: string[] /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean @@ -9482,11 +14276,14 @@ export interface CatTasksRequest extends CatCatRequestBase { parent_task_id?: string /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Unit used to display time values. */ time?: TimeUnit - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean @@ -9499,48 +14296,102 @@ export interface CatTasksRequest extends CatCatRequestBase { export type CatTasksResponse = CatTasksTasksRecord[] export interface CatTasksTasksRecord { + /** The identifier of the task with the node. */ id?: Id + /** The task action. */ action?: string + /** The task action. + * @alias action */ ac?: string + /** The unique task identifier. */ task_id?: Id + /** The unique task identifier. + * @alias task_id */ ti?: Id + /** The parent task identifier. */ parent_task_id?: string + /** The parent task identifier. + * @alias parent_task_id */ pti?: string + /** The task type. */ type?: string + /** The task type. + * @alias type */ ty?: string + /** The start time in milliseconds. */ start_time?: string + /** The start time in milliseconds. + * @alias start_time */ start?: string + /** The start time in `HH:MM:SS` format. */ timestamp?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ ts?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ hms?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ hhmmss?: string + /** The running time in nanoseconds. */ running_time_ns?: string + /** The running time. */ running_time?: string + /** The running time. + * @alias running_time */ time?: string + /** The unique node identifier. */ node_id?: NodeId + /** The unique node identifier. + * @alias node_id */ ni?: NodeId + /** The IP address for the node. */ ip?: string + /** The IP address for the node. + * @alias ip */ i?: string + /** The bound transport port for the node. */ port?: string + /** The bound transport port for the node. + * @alias port */ po?: string + /** The node name. */ node?: string + /** The node name. + * @alias node */ n?: string + /** The Elasticsearch version. */ version?: VersionString + /** The Elasticsearch version. + * @alias version */ v?: VersionString + /** The X-Opaque-ID header. */ x_opaque_id?: string + /** The X-Opaque-ID header. + * @alias x_opaque_id */ x?: string + /** The task action description. */ description?: string + /** The task action description. + * @alias description */ desc?: string } export interface CatTemplatesRequest extends CatCatRequestBase { -/** The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. */ + /** The name of the template to return. + * Accepts wildcard expressions. If omitted, all templates are returned. */ name?: Name /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9553,29 +14404,52 @@ export interface CatTemplatesRequest extends CatCatRequestBase { export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] export interface CatTemplatesTemplatesRecord { + /** The template name. */ name?: Name + /** The template name. + * @alias name */ n?: Name + /** The template index patterns. */ index_patterns?: string + /** The template index patterns. + * @alias index_patterns */ t?: string + /** The template application order or priority number. */ order?: string + /** The template application order or priority number. + * @alias order */ o?: string + /** The template application order or priority number. + * @alias order */ p?: string + /** The template version. */ version?: VersionString | null + /** The template version. + * @alias version */ v?: VersionString | null + /** The component templates that comprise the index template. */ composed_of?: string + /** The component templates that comprise the index template. + * @alias composed_of */ c?: string } export interface CatThreadPoolRequest extends CatCatRequestBase { -/** A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. */ + /** A comma-separated list of thread pool names used to limit the request. + * Accepts wildcard expressions. */ thread_pool_patterns?: Names /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** The unit used to display time values. */ time?: TimeUnit - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9588,52 +14462,117 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] export interface CatThreadPoolThreadPoolRecord { + /** The node name. */ node_name?: string + /** The node name. + * @alias node_name */ nn?: string + /** The persistent node identifier. */ node_id?: NodeId + /** The persistent node identifier. + * @alias node_id */ id?: NodeId + /** The ephemeral node identifier. */ ephemeral_node_id?: string + /** The ephemeral node identifier. + * @alias ephemeral_node_id */ eid?: string + /** The process identifier. */ pid?: string + /** The process identifier. + * @alias pid */ p?: string + /** The host name for the current node. */ host?: string + /** The host name for the current node. + * @alias host */ h?: string + /** The IP address for the current node. */ ip?: string + /** The IP address for the current node. + * @alias ip */ i?: string + /** The bound transport port for the current node. */ port?: string + /** The bound transport port for the current node. + * @alias port */ po?: string + /** The thread pool name. */ name?: string + /** The thread pool name. + * @alias name */ n?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. */ type?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. + * @alias type */ t?: string + /** The number of active threads in the current thread pool. */ active?: string + /** The number of active threads in the current thread pool. + * @alias active */ a?: string + /** The number of threads in the current thread pool. */ pool_size?: string + /** The number of threads in the current thread pool. + * @alias pool_size */ psz?: string + /** The number of tasks currently in queue. */ queue?: string + /** The number of tasks currently in queue. + * @alias queue */ q?: string + /** The maximum number of tasks permitted in the queue. */ queue_size?: string + /** The maximum number of tasks permitted in the queue. + * @alias queue_size */ qs?: string + /** The number of rejected tasks. */ rejected?: string + /** The number of rejected tasks. + * @alias rejected */ r?: string + /** The highest number of active threads in the current thread pool. */ largest?: string + /** The highest number of active threads in the current thread pool. + * @alias largest */ l?: string + /** The number of completed tasks. */ completed?: string + /** The number of completed tasks. + * @alias completed */ c?: string + /** The core number of active threads allowed in a scaling thread pool. */ core?: string | null + /** The core number of active threads allowed in a scaling thread pool. + * @alias core */ cr?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. */ max?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. + * @alias max */ mx?: string | null + /** The number of active threads allowed in a fixed thread pool. */ size?: string | null + /** The number of active threads allowed in a fixed thread pool. + * @alias size */ sz?: string | null + /** The thread keep alive time. */ keep_alive?: string | null + /** The thread keep alive time. + * @alias keep_alive */ ka?: string | null } export interface CatTransformsRequest extends CatCatRequestBase { -/** A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. */ + /** A transform identifier or a wildcard expression. + * If you do not specify one of these options, the API returns information for all transforms. */ transform_id?: Id - /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of transforms. */ from?: integer @@ -9654,131 +14593,301 @@ export interface CatTransformsRequest extends CatCatRequestBase { export type CatTransformsResponse = CatTransformsTransformsRecord[] export interface CatTransformsTransformsRecord { + /** The transform identifier. */ id?: Id + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. */ state?: string + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. + * @alias state */ s?: string + /** The sequence number for the checkpoint. */ checkpoint?: string + /** The sequence number for the checkpoint. + * @alias checkpoint */ c?: string + /** The number of documents that have been processed from the source index of the transform. */ documents_processed?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ docp?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ documentsProcessed?: string + /** The progress of the next checkpoint that is currently in progress. */ checkpoint_progress?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ cp?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ checkpointProgress?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. */ last_search_time?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ lst?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ lastSearchTime?: string | null + /** The timestamp when changes were last detected in the source indices. */ changes_last_detection_time?: string | null + /** The timestamp when changes were last detected in the source indices. + * @alias changes_last_detection_time */ cldt?: string | null + /** The time the transform was created. */ create_time?: string + /** The time the transform was created. + * @alias create_time */ ct?: string + /** The time the transform was created. + * @alias create_time */ createTime?: string + /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString + /** The version of Elasticsearch that existed on the node when the transform was created. + * @alias version */ v?: VersionString + /** The source indices for the transform. */ source_index?: string + /** The source indices for the transform. + * @alias source_index */ si?: string + /** The source indices for the transform. + * @alias source_index */ sourceIndex?: string + /** The destination index for the transform. */ dest_index?: string + /** The destination index for the transform. + * @alias dest_index */ di?: string + /** The destination index for the transform. + * @alias dest_index */ destIndex?: string + /** The unique identifier for the ingest pipeline. */ pipeline?: string + /** The unique identifier for the ingest pipeline. + * @alias pipeline */ p?: string + /** The description of the transform. */ description?: string + /** The description of the transform. + * @alias description */ d?: string + /** The type of transform: `batch` or `continuous`. */ transform_type?: string + /** The type of transform: `batch` or `continuous`. + * @alias transform_type */ tt?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. */ frequency?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. + * @alias frequency */ f?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. */ max_page_search_size?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. + * @alias max_page_search_size */ mpsz?: string + /** The number of input documents per second. */ docs_per_second?: string + /** The number of input documents per second. + * @alias docs_per_second */ dps?: string + /** If a transform has a `failed` state, these details describe the reason for failure. */ reason?: string + /** If a transform has a `failed` state, these details describe the reason for failure. + * @alias reason */ r?: string + /** The total number of search operations on the source index for the transform. */ search_total?: string + /** The total number of search operations on the source index for the transform. + * @alias search_total */ st?: string + /** The total number of search failures. */ search_failure?: string + /** The total number of search failures. + * @alias search_failure */ sf?: string + /** The total amount of search time, in milliseconds. */ search_time?: string + /** The total amount of search time, in milliseconds. + * @alias search_time */ stime?: string + /** The total number of index operations done by the transform. */ index_total?: string + /** The total number of index operations done by the transform. + * @alias index_total */ it?: string + /** The total number of indexing failures. */ index_failure?: string + /** The total number of indexing failures. + * @alias index_failure */ if?: string + /** The total time spent indexing documents, in milliseconds. */ index_time?: string + /** The total time spent indexing documents, in milliseconds. + * @alias index_time */ itime?: string + /** The number of documents that have been indexed into the destination index for the transform. */ documents_indexed?: string + /** The number of documents that have been indexed into the destination index for the transform. + * @alias documents_indexed */ doci?: string + /** The total time spent deleting documents, in milliseconds. */ delete_time?: string + /** The total time spent deleting documents, in milliseconds. + * @alias delete_time */ dtime?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. */ documents_deleted?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. + * @alias documents_deleted */ docd?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. */ trigger_count?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. + * @alias trigger_count */ tc?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. */ pages_processed?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. + * @alias pages_processed */ pp?: string + /** The total time spent processing results, in milliseconds. */ processing_time?: string + /** The total time spent processing results, in milliseconds. + * @alias processing_time */ pt?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. */ checkpoint_duration_time_exp_avg?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ cdtea?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ checkpointTimeExpAvg?: string + /** The exponential moving average of the number of new documents that have been indexed. */ indexed_documents_exp_avg?: string + /** The exponential moving average of the number of new documents that have been indexed. + * @alias indexed_documents_exp_avg */ idea?: string + /** The exponential moving average of the number of documents that have been processed. */ processed_documents_exp_avg?: string + /** The exponential moving average of the number of documents that have been processed. + * @alias processed_documents_exp_avg */ pdea?: string } export interface CcrFollowIndexStats { + /** The name of the follower index. */ index: IndexName + /** An array of shard-level following task statistics. */ shards: CcrShardStats[] } export interface CcrReadException { + /** The exception that caused the read to fail. */ exception: ErrorCause + /** The starting sequence number of the batch requested from the leader. */ from_seq_no: SequenceNumber + /** The number of times the batch has been retried. */ retries: integer } export interface CcrShardStats { + /** The total of transferred bytes read from the leader. + * This is only an estimate and does not account for compression if enabled. */ bytes_read: long + /** The number of failed reads. */ failed_read_requests: long + /** The number of failed bulk write requests on the follower. */ failed_write_requests: long fatal_exception?: ErrorCause + /** The index aliases version the follower is synced up to. */ follower_aliases_version: VersionNumber + /** The current global checkpoint on the follower. + * The difference between the `leader_global_checkpoint` and the `follower_global_checkpoint` is an indication of how much the follower is lagging the leader. */ follower_global_checkpoint: long + /** The name of the follower index. */ follower_index: string + /** The mapping version the follower is synced up to. */ follower_mapping_version: VersionNumber + /** The current maximum sequence number on the follower. */ follower_max_seq_no: SequenceNumber + /** The index settings version the follower is synced up to. */ follower_settings_version: VersionNumber + /** The starting sequence number of the last batch of operations requested from the leader. */ last_requested_seq_no: SequenceNumber + /** The current global checkpoint on the leader known to the follower task. */ leader_global_checkpoint: long + /** The name of the index in the leader cluster being followed. */ leader_index: string + /** The current maximum sequence number on the leader known to the follower task. */ leader_max_seq_no: SequenceNumber + /** The total number of operations read from the leader. */ operations_read: long + /** The number of operations written on the follower. */ operations_written: long + /** The number of active read requests from the follower. */ outstanding_read_requests: integer + /** The number of active bulk write requests on the follower. */ outstanding_write_requests: integer + /** An array of objects representing failed reads. */ read_exceptions: CcrReadException[] + /** The remote cluster containing the leader index. */ remote_cluster: string + /** The numerical shard ID, with values from 0 to one less than the number of replicas. */ shard_id: integer + /** The number of successful fetches. */ successful_read_requests: long + /** The number of bulk write requests run on the follower. */ successful_write_requests: long time_since_last_read?: Duration + /** The number of milliseconds since a read request was sent to the leader. + * When the follower is caught up to the leader, this number will increase up to the configured `read_poll_timeout` at which point another read request will be sent to the leader. */ time_since_last_read_millis: DurationValue total_read_remote_exec_time?: Duration + /** The total time reads spent running on the remote cluster. */ total_read_remote_exec_time_millis: DurationValue total_read_time?: Duration + /** The total time reads were outstanding, measured from the time a read was sent to the leader to the time a reply was returned to the follower. */ total_read_time_millis: DurationValue total_write_time?: Duration + /** The total time spent writing on the follower. */ total_write_time_millis: DurationValue + /** The number of write operations queued on the follower. */ write_buffer_operation_count: long + /** The total number of bytes of operations currently queued for writing. */ write_buffer_size_in_bytes: ByteSize } export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { -/** The auto-follow pattern collection to delete. */ + /** The auto-follow pattern collection to delete. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9789,11 +14898,14 @@ export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { -/** The name of the follower index. */ + /** The name of the follower index. */ index: IndexName /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. */ + /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be + * active. + * A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the + * remote Lucene segment files to the follower index. */ wait_for_active_shards?: WaitForActiveShards /** If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. */ data_stream_name?: string @@ -9807,17 +14919,22 @@ export interface CcrFollowRequest extends RequestBase { max_read_request_operation_count?: integer /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize - /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. */ + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ max_retry_delay?: Duration - /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. */ + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer - /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. */ + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize - /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. */ + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration /** The remote cluster containing the leader index. */ remote_cluster: string @@ -9836,32 +14953,54 @@ export interface CcrFollowResponse { } export interface CcrFollowInfoFollowerIndex { + /** The name of the follower index. */ follower_index: IndexName + /** The name of the index in the leader cluster that is followed. */ leader_index: IndexName + /** An object that encapsulates cross-cluster replication parameters. If the follower index's status is paused, this object is omitted. */ parameters?: CcrFollowInfoFollowerIndexParameters + /** The remote cluster that contains the leader index. */ remote_cluster: Name + /** The status of the index following: `active` or `paused`. */ status: CcrFollowInfoFollowerIndexStatus } export interface CcrFollowInfoFollowerIndexParameters { + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: long + /** The maximum number of outstanding write requests on the follower. */ max_outstanding_write_requests?: integer + /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' export interface CcrFollowInfoRequest extends RequestBase { -/** A comma-delimited list of follower index patterns. */ + /** A comma-delimited list of follower index patterns. */ index: Indices - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -9874,9 +15013,10 @@ export interface CcrFollowInfoResponse { } export interface CcrFollowStatsRequest extends RequestBase { -/** A comma-delimited list of index patterns. */ + /** A comma-delimited list of index patterns. */ index: Indices - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, timeout?: never } @@ -9885,11 +15025,12 @@ export interface CcrFollowStatsRequest extends RequestBase { } export interface CcrFollowStatsResponse { + /** An array of follower index statistics. */ indices: CcrFollowIndexStats[] } export interface CcrForgetFollowerRequest extends RequestBase { -/** the name of the leader index for which specified follower retention leases should be removed */ + /** the name of the leader index for which specified follower retention leases should be removed */ index: IndexName /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -9914,17 +15055,25 @@ export interface CcrGetAutoFollowPatternAutoFollowPattern { export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { active: boolean + /** The remote cluster containing the leader indices to match against. */ remote_cluster: string + /** The name of follower index. */ follow_index_pattern?: IndexPattern + /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ leader_index_patterns: IndexPatterns + /** An array of simple index patterns that can be used to exclude indices from being auto-followed. */ leader_index_exclusion_patterns: IndexPatterns + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests: integer } export interface CcrGetAutoFollowPatternRequest extends RequestBase { -/** The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. */ + /** The auto-follow pattern collection that you want to retrieve. + * If you do not specify a name, the API returns information for all collections. */ name?: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9937,9 +15086,11 @@ export interface CcrGetAutoFollowPatternResponse { } export interface CcrPauseAutoFollowPatternRequest extends RequestBase { -/** The name of the auto-follow pattern to pause. */ + /** The name of the auto-follow pattern to pause. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9950,9 +15101,11 @@ export interface CcrPauseAutoFollowPatternRequest extends RequestBase { export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { -/** The name of the follower index. */ + /** The name of the follower index. */ index: IndexName - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -9963,7 +15116,7 @@ export interface CcrPauseFollowRequest extends RequestBase { export type CcrPauseFollowResponse = AcknowledgedResponseBase export interface CcrPutAutoFollowPatternRequest extends RequestBase { -/** The name of the collection of auto-follow patterns. */ + /** The name of the collection of auto-follow patterns. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -10006,9 +15159,11 @@ export interface CcrPutAutoFollowPatternRequest extends RequestBase { export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { -/** The name of the auto-follow pattern to resume. */ + /** The name of the auto-follow pattern to resume. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -10019,7 +15174,7 @@ export interface CcrResumeAutoFollowPatternRequest extends RequestBase { export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { -/** The name of the follow index to resume following. */ + /** The name of the follow index to resume following. */ index: IndexName /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -10043,9 +15198,14 @@ export type CcrResumeFollowResponse = AcknowledgedResponseBase export interface CcrStatsAutoFollowStats { auto_followed_clusters: CcrStatsAutoFollowedCluster[] + /** The number of indices that the auto-follow coordinator failed to automatically follow. + * The causes of recent failures are captured in the logs of the elected master node and in the `auto_follow_stats.recent_auto_follow_errors` field. */ number_of_failed_follow_indices: long + /** The number of times that the auto-follow coordinator failed to retrieve the cluster state from a remote cluster registered in a collection of auto-follow patterns. */ number_of_failed_remote_cluster_state_requests: long + /** The number of indices that the auto-follow coordinator successfully followed. */ number_of_successful_follow_indices: long + /** An array of objects representing failures by the auto-follow coordinator. */ recent_auto_follow_errors: ErrorCause[] } @@ -10060,7 +15220,9 @@ export interface CcrStatsFollowStats { } export interface CcrStatsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -10071,14 +15233,18 @@ export interface CcrStatsRequest extends RequestBase { } export interface CcrStatsResponse { + /** Statistics for the auto-follow coordinator. */ auto_follow_stats: CcrStatsAutoFollowStats + /** Shard-level statistics for follower indices. */ follow_stats: CcrStatsFollowStats } export interface CcrUnfollowRequest extends RequestBase { -/** The name of the follower index. */ + /** The name of the follower index. */ index: IndexName - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -10173,7 +15339,7 @@ export interface ClusterAllocationExplainNodeDiskUsage { } export interface ClusterAllocationExplainRequest extends RequestBase { -/** If true, returns information about disk usage and shard sizes. */ + /** If true, returns information about disk usage and shard sizes. */ include_disk_info?: boolean /** If true, returns YES decisions in explanation. */ include_yes_decisions?: boolean @@ -10241,11 +15407,13 @@ export interface ClusterAllocationExplainUnassignedInformation { export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' export interface ClusterDeleteComponentTemplateRequest extends RequestBase { -/** Comma-separated list or wildcard expression of component template names used to limit the request. */ + /** Comma-separated list or wildcard expression of component template names used to limit the request. */ name: Names - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -10256,9 +15424,14 @@ export interface ClusterDeleteComponentTemplateRequest extends RequestBase { export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. */ + /** Specifies whether to wait for all excluded nodes to be removed from the + * cluster before clearing the voting configuration exclusions list. + * Defaults to true, meaning that all excluded nodes must be removed from + * the cluster before this API takes any action. If set to false then the + * voting configuration exclusions list is cleared even if some excluded + * nodes are still in the cluster. */ wait_for_removal?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } @@ -10269,11 +15442,15 @@ export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase export type ClusterDeleteVotingConfigExclusionsResponse = boolean export interface ClusterExistsComponentTemplateRequest extends RequestBase { -/** Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of component template names used to limit the request. + * Wildcard (*) expressions are supported. */ name: Names - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration - /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + /** If true, the request retrieves information from the local node only. + * Defaults to false, which means information is retrieved from the master node. */ local?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } @@ -10284,15 +15461,18 @@ export interface ClusterExistsComponentTemplateRequest extends RequestBase { export type ClusterExistsComponentTemplateResponse = boolean export interface ClusterGetComponentTemplateRequest extends RequestBase { -/** Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. */ + /** Comma-separated list of component template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ name?: Name /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** Return all default configurations for the component template (default: false) */ include_defaults?: boolean - /** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, include_defaults?: never, local?: never, master_timeout?: never } @@ -10305,13 +15485,15 @@ export interface ClusterGetComponentTemplateResponse { } export interface ClusterGetSettingsRequest extends RequestBase { -/** If `true`, returns settings in flat format. */ + /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** If `true`, returns default cluster settings from the local node. */ include_defaults?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } @@ -10326,23 +15508,39 @@ export interface ClusterGetSettingsResponse { } export interface ClusterHealthHealthResponseBody { + /** The number of active primary shards. */ active_primary_shards: integer + /** The total number of active primary and replica shards. */ active_shards: integer + /** The ratio of active shards in the cluster expressed as a percentage. */ active_shards_percent_as_number: Percentage + /** The name of the cluster. */ cluster_name: Name + /** The number of shards whose allocation has been delayed by the timeout settings. */ delayed_unassigned_shards: integer indices?: Record + /** The number of shards that are under initialization. */ initializing_shards: integer + /** The number of nodes that are dedicated data nodes. */ number_of_data_nodes: integer + /** The number of unfinished fetches. */ number_of_in_flight_fetch: integer + /** The number of nodes within the cluster. */ number_of_nodes: integer + /** The number of cluster-level changes that have not yet been executed. */ number_of_pending_tasks: integer + /** The number of shards that are under relocation. */ relocating_shards: integer status: HealthStatus + /** The time since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue?: Duration + /** The time expressed in milliseconds since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue_millis: DurationValue + /** If false the response returned within the period of time that is specified by the timeout parameter (30s by default) */ timed_out: boolean + /** The number of primary shards that are not allocated. */ unassigned_primary_shards: integer + /** The number of shards that are not allocated. */ unassigned_shards: integer } @@ -10360,7 +15558,7 @@ export interface ClusterHealthIndexHealthStats { } export interface ClusterHealthRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ index?: Indices /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards @@ -10403,7 +15601,7 @@ export interface ClusterHealthShardHealthStats { } export interface ClusterInfoRequest extends RequestBase { -/** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */ + /** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */ target: ClusterInfoTargets /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { target?: never } @@ -10420,18 +15618,27 @@ export interface ClusterInfoResponse { } export interface ClusterPendingTasksPendingTask { + /** Indicates whether the pending tasks are currently executing or not. */ executing: boolean + /** The number that represents when the task has been inserted into the task queue. */ insert_order: integer + /** The priority of the pending task. + * The valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` > `HIGH` > `NORMAL` > `LOW` > `LANGUID`. */ priority: string + /** A general description of the cluster task that may include a reason and origin. */ source: string + /** The time since the task is waiting for being performed. */ time_in_queue?: Duration + /** The time expressed in milliseconds since the task is waiting for being performed. */ time_in_queue_millis: DurationValue } export interface ClusterPendingTasksRequest extends RequestBase { -/** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } @@ -10444,13 +15651,18 @@ export interface ClusterPendingTasksResponse { } export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { -/** A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. */ + /** A comma-separated list of the names of the nodes to exclude from the + * voting configuration. If specified, you may not also specify node_ids. */ node_names?: Names - /** A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. */ + /** A comma-separated list of the persistent ids of the nodes to exclude + * from the voting configuration. If specified, you may not also specify node_names. */ node_ids?: Ids /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. */ + /** When adding a voting configuration exclusion, the API waits for the + * specified nodes to be excluded from the voting configuration before + * returning. If the timeout expires before the appropriate condition + * is satisfied, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } @@ -10461,19 +15673,30 @@ export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { export type ClusterPostVotingConfigExclusionsResponse = boolean export interface ClusterPutComponentTemplateRequest extends RequestBase { -/** Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ + /** Name of the component template to create. + * Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. + * Elastic Agent uses these templates to configure backing indices for its data streams. + * If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. + * If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ name: Name /** If `true`, this request cannot replace or update existing component templates. */ create?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** The template to be applied which includes mappings, settings, or aliases configuration. */ template: IndicesIndexState - /** Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. */ + /** Version number used to manage component templates externally. + * This number isn't automatically generated or incremented by Elasticsearch. + * To unset a version, replace the template without specifying a version. */ version?: VersionNumber - /** Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. */ + /** Optional user metadata about the component template. + * It may have any contents. This map is not automatically generated by Elasticsearch. + * This information is stored in the cluster state, so keeping it short is preferable. + * To unset `_meta`, replace the template without specifying this information. */ _meta?: Metadata - /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } @@ -10484,7 +15707,7 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { -/** Return settings in flat format (default: false) */ + /** Return settings in flat format (default: false) */ flat_settings?: boolean /** Explicit operation timeout for connection to master node */ master_timeout?: Duration @@ -10507,24 +15730,43 @@ export interface ClusterPutSettingsResponse { export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo export interface ClusterRemoteInfoClusterRemoteProxyInfo { + /** The connection mode for the remote cluster. */ mode: 'proxy' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean + /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean + /** The address for remote connections when proxy mode is configured. */ proxy_address: string server_name: string + /** The number of open socket connections to the remote cluster when proxy mode is configured. */ num_proxy_sockets_connected: integer + /** The maximum number of socket connections to the remote cluster when proxy mode is configured. */ max_proxy_socket_connections: integer + /** This field is present and has a value of `::es_redacted::` only when the remote cluster is configured with the API key based model. Otherwise, the field is not present. */ cluster_credentials?: string } export interface ClusterRemoteInfoClusterRemoteSniffInfo { + /** The connection mode for the remote cluster. */ mode: 'sniff' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean + /** The maximum number of connections maintained for the remote cluster when sniff mode is configured. */ max_connections_per_cluster: integer + /** The number of connected nodes in the remote cluster when sniff mode is configured. */ num_nodes_connected: long + /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean + /** The initial seed transport addresses of the remote cluster when sniff mode is configured. */ seeds: string[] } @@ -10538,10 +15780,15 @@ export interface ClusterRemoteInfoRequest extends RequestBase { export type ClusterRemoteInfoResponse = Record export interface ClusterRerouteCommand { + /** Cancel allocation of a shard (or recovery). Accepts index and shard for index name and shard number, and node for the node to cancel the shard allocation on. This can be used to force resynchronization of existing replicas from the primary shard by cancelling them and allowing them to be reinitialized through the standard recovery process. By default only replica shard allocations can be cancelled. If it is necessary to cancel the allocation of a primary shard then the allow_primary flag must also be included in the request. */ cancel?: ClusterRerouteCommandCancelAction + /** Move a started shard from one node to another node. Accepts index and shard for index name and shard number, from_node for the node to move the shard from, and to_node for the node to move the shard to. */ move?: ClusterRerouteCommandMoveAction + /** Allocate an unassigned replica shard to a node. Accepts index and shard for index name and shard number, and node to allocate the shard to. Takes allocation deciders into account. */ allocate_replica?: ClusterRerouteCommandAllocateReplicaAction + /** Allocate a primary shard to a node that holds a stale copy. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command may lead to data loss for the provided shard id. If a node which has the good copy of the data rejoins the cluster later on, that data will be deleted or overwritten with the data of the stale copy that was forcefully allocated with this command. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction + /** Allocate an empty primary shard to a node. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command leads to a complete loss of all data that was indexed into this shard, if it was previously started. If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction } @@ -10549,6 +15796,7 @@ export interface ClusterRerouteCommandAllocatePrimaryAction { index: IndexName shard: integer node: string + /** If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true */ accept_data_loss: boolean } @@ -10568,12 +15816,15 @@ export interface ClusterRerouteCommandCancelAction { export interface ClusterRerouteCommandMoveAction { index: IndexName shard: integer + /** The node to move the shard from */ from_node: string + /** The node to move the shard to */ to_node: string } export interface ClusterRerouteRequest extends RequestBase { -/** If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ + /** If true, then the request simulates the operation. + * It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ dry_run?: boolean /** If true, then the response contains an explanation of why the commands can or cannot run. */ explain?: boolean @@ -10617,11 +15868,14 @@ export interface ClusterRerouteRerouteParameters { export interface ClusterRerouteResponse { acknowledged: boolean explanations?: ClusterRerouteRerouteExplanation[] + /** There aren't any guarantees on the output/structure of the raw cluster state. + * Here you will find the internal representation of the cluster, which can + * differ from the external representation. */ state?: any } export interface ClusterStateRequest extends RequestBase { -/** Limit the information returned to the specified metrics */ + /** Limit the information returned to the specified metrics */ metric?: Metrics /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices @@ -10650,46 +15904,77 @@ export interface ClusterStateRequest extends RequestBase { export type ClusterStateResponse = any export interface ClusterStatsCharFilterTypes { + /** Contains statistics about analyzer types used in selected nodes. */ analyzer_types: ClusterStatsFieldTypes[] + /** Contains statistics about built-in analyzers used in selected nodes. */ built_in_analyzers: ClusterStatsFieldTypes[] + /** Contains statistics about built-in character filters used in selected nodes. */ built_in_char_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in token filters used in selected nodes. */ built_in_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in tokenizers used in selected nodes. */ built_in_tokenizers: ClusterStatsFieldTypes[] + /** Contains statistics about character filter types used in selected nodes. */ char_filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about token filter types used in selected nodes. */ filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about tokenizer types used in selected nodes. */ tokenizer_types: ClusterStatsFieldTypes[] } export interface ClusterStatsClusterFileSystem { + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ available_in_bytes: long + /** Total number of unallocated bytes in file stores across all selected nodes. */ free_in_bytes: long + /** Total size, in bytes, of all file stores across all selected nodes. */ total_in_bytes: long } export interface ClusterStatsClusterIndices { + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ analysis: ClusterStatsCharFilterTypes + /** Contains statistics about memory used for completion in selected nodes. */ completion: CompletionStats + /** Total number of indices with shards assigned to selected nodes. */ count: long + /** Contains counts for documents in selected nodes. */ docs: DocStats + /** Contains statistics about the field data cache of selected nodes. */ fielddata: FielddataStats + /** Contains statistics about the query cache of selected nodes. */ query_cache: QueryCacheStats + /** Contains statistics about segments in selected nodes. */ segments: SegmentsStats + /** Contains statistics about indices with shards assigned to selected nodes. */ shards: ClusterStatsClusterIndicesShards + /** Contains statistics about the size of shards assigned to selected nodes. */ store: StoreStats + /** Contains statistics about field mappings in selected nodes. */ mappings: ClusterStatsFieldTypesMappings + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ versions?: ClusterStatsIndicesVersions[] } export interface ClusterStatsClusterIndicesShards { + /** Contains statistics about shards assigned to selected nodes. */ index?: ClusterStatsClusterIndicesShardsIndex + /** Number of primary shards assigned to selected nodes. */ primaries?: double + /** Ratio of replica shards to primary shards across all selected nodes. */ replication?: double + /** Total number of shards assigned to selected nodes. */ total?: double } export interface ClusterStatsClusterIndicesShardsIndex { + /** Contains statistics about the number of primary shards assigned to selected nodes. */ primaries: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of replication shards assigned to selected nodes. */ replication: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of shards assigned to selected nodes. */ shards: ClusterStatsClusterShardMetrics } @@ -10699,29 +15984,45 @@ export interface ClusterStatsClusterIngest { } export interface ClusterStatsClusterJvm { + /** Uptime duration, in milliseconds, since JVM last started. */ max_uptime_in_millis: DurationValue + /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsClusterJvmMemory + /** Number of active threads in use by JVM across all selected nodes. */ threads: long + /** Contains statistics about the JVM versions used by selected nodes. */ versions: ClusterStatsClusterJvmVersion[] } export interface ClusterStatsClusterJvmMemory { + /** Maximum amount of memory, in bytes, available for use by the heap across all selected nodes. */ heap_max_in_bytes: long + /** Memory, in bytes, currently in use by the heap across all selected nodes. */ heap_used_in_bytes: long } export interface ClusterStatsClusterJvmVersion { + /** Always `true`. All distributions come with a bundled Java Development Kit (JDK). */ bundled_jdk: boolean + /** Total number of selected nodes using JVM. */ count: integer + /** If `true`, a bundled JDK is in use by JVM. */ using_bundled_jdk: boolean + /** Version of JVM used by one or more selected nodes. */ version: VersionString + /** Name of the JVM. */ vm_name: string + /** Vendor of the JVM. */ vm_vendor: string + /** Full version number of JVM. + * The full version number includes a plus sign (+) followed by the build number. */ vm_version: VersionString } export interface ClusterStatsClusterNetworkTypes { + /** Contains statistics about the HTTP network types used by selected nodes. */ http_types: Record + /** Contains statistics about the transport network types used by selected nodes. */ transport_types: Record } @@ -10743,56 +16044,91 @@ export interface ClusterStatsClusterNodeCount { } export interface ClusterStatsClusterNodes { + /** Contains counts for nodes selected by the request’s node filters. */ count: ClusterStatsClusterNodeCount + /** Contains statistics about the discovery types used by selected nodes. */ discovery_types: Record + /** Contains statistics about file stores by selected nodes. */ fs: ClusterStatsClusterFileSystem indexing_pressure: ClusterStatsIndexingPressure ingest: ClusterStatsClusterIngest + /** Contains statistics about the Java Virtual Machines (JVMs) used by selected nodes. */ jvm: ClusterStatsClusterJvm + /** Contains statistics about the transport and HTTP networks used by selected nodes. */ network_types: ClusterStatsClusterNetworkTypes + /** Contains statistics about the operating systems used by selected nodes. */ os: ClusterStatsClusterOperatingSystem + /** Contains statistics about Elasticsearch distributions installed on selected nodes. */ packaging_types: ClusterStatsNodePackagingType[] + /** Contains statistics about installed plugins and modules by selected nodes. + * If no plugins or modules are installed, this array is empty. */ plugins: PluginStats[] + /** Contains statistics about processes used by selected nodes. */ process: ClusterStatsClusterProcess + /** Array of Elasticsearch versions used on selected nodes. */ versions: VersionString[] } export interface ClusterStatsClusterOperatingSystem { + /** Number of processors used to calculate thread pool size across all selected nodes. + * This number can be set with the processors setting of a node and defaults to the number of processors reported by the operating system. + * In both cases, this number will never be larger than 32. */ allocated_processors: integer + /** Contains statistics about processor architectures (for example, x86_64 or aarch64) used by selected nodes. */ architectures?: ClusterStatsClusterOperatingSystemArchitecture[] + /** Number of processors available to JVM across all selected nodes. */ available_processors: integer + /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsOperatingSystemMemoryInfo + /** Contains statistics about operating systems used by selected nodes. */ names: ClusterStatsClusterOperatingSystemName[] + /** Contains statistics about operating systems used by selected nodes. */ pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] } export interface ClusterStatsClusterOperatingSystemArchitecture { + /** Name of an architecture used by one or more selected nodes. */ arch: string + /** Number of selected nodes using the architecture. */ count: integer } export interface ClusterStatsClusterOperatingSystemName { + /** Number of selected nodes using the operating system. */ count: integer + /** Name of an operating system used by one or more selected nodes. */ name: Name } export interface ClusterStatsClusterOperatingSystemPrettyName { + /** Number of selected nodes using the operating system. */ count: integer + /** Human-readable name of an operating system used by one or more selected nodes. */ pretty_name: Name } export interface ClusterStatsClusterProcess { + /** Contains statistics about CPU used by selected nodes. */ cpu: ClusterStatsClusterProcessCpu + /** Contains statistics about open file descriptors in selected nodes. */ open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors } export interface ClusterStatsClusterProcessCpu { + /** Percentage of CPU used across all selected nodes. + * Returns `-1` if not supported. */ percent: integer } export interface ClusterStatsClusterProcessOpenFileDescriptors { + /** Average number of concurrently open file descriptors. + * Returns `-1` if not supported. */ avg: long + /** Maximum number of concurrently open file descriptors allowed across all selected nodes. + * Returns `-1` if not supported. */ max: long + /** Minimum number of concurrently open file descriptors across all selected nodes. + * Returns -1 if not supported. */ min: long } @@ -10805,27 +16141,43 @@ export interface ClusterStatsClusterProcessor { } export interface ClusterStatsClusterShardMetrics { + /** Mean number of shards in an index, counting only shards assigned to selected nodes. */ avg: double + /** Maximum number of shards in an index, counting only shards assigned to selected nodes. */ max: double + /** Minimum number of shards in an index, counting only shards assigned to selected nodes. */ min: double } export interface ClusterStatsFieldTypes { + /** The name for the field type in selected nodes. */ name: Name + /** The number of occurrences of the field type in selected nodes. */ count: integer + /** The number of indices containing the field type in selected nodes. */ index_count: integer + /** For dense_vector field types, number of indexed vector types in selected nodes. */ indexed_vector_count?: long + /** For dense_vector field types, the maximum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_max?: long + /** For dense_vector field types, the minimum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_min?: long + /** The number of fields that declare a script. */ script_count?: integer } export interface ClusterStatsFieldTypesMappings { + /** Contains statistics about field data types used in selected nodes. */ field_types: ClusterStatsFieldTypes[] + /** Contains statistics about runtime field data types used in selected nodes. */ runtime_field_types?: ClusterStatsRuntimeFieldTypes[] + /** Total number of fields in all non-system indices. */ total_field_count?: integer + /** Total number of fields in all non-system indices, accounting for mapping deduplication. */ total_deduplicated_field_count?: integer + /** Total size of all mappings after deduplication and compression. */ total_deduplicated_mapping_size?: ByteSize + /** Total size of all mappings, in bytes, after deduplication and compression. */ total_deduplicated_mapping_size_in_bytes?: long } @@ -10858,26 +16210,37 @@ export interface ClusterStatsIndicesVersions { } export interface ClusterStatsNodePackagingType { + /** Number of selected nodes using the distribution flavor and file type. */ count: integer + /** Type of Elasticsearch distribution. This is always `default`. */ flavor: string + /** File type (such as `tar` or `zip`) used for the distribution package. */ type: string } export interface ClusterStatsOperatingSystemMemoryInfo { + /** Total amount, in bytes, of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ adjusted_total_in_bytes?: long + /** Amount, in bytes, of free physical memory across all selected nodes. */ free_in_bytes: long + /** Percentage of free physical memory across all selected nodes. */ free_percent: integer + /** Total amount, in bytes, of physical memory across all selected nodes. */ total_in_bytes: long + /** Amount, in bytes, of physical memory in use across all selected nodes. */ used_in_bytes: long + /** Percentage of physical memory in use across all selected nodes. */ used_percent: integer } export interface ClusterStatsRequest extends RequestBase { -/** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ + /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds /** Include remote cluster data into the response */ include_remotes?: boolean - /** Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ + /** Period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its stats. + * However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } @@ -10888,28 +16251,48 @@ export interface ClusterStatsRequest extends RequestBase { export type ClusterStatsResponse = ClusterStatsStatsResponseBase export interface ClusterStatsRuntimeFieldTypes { + /** Maximum number of characters for a single runtime field script. */ chars_max: integer + /** Total number of characters for the scripts that define the current runtime field data type. */ chars_total: integer + /** Number of runtime fields mapped to the field data type in selected nodes. */ count: integer + /** Maximum number of accesses to doc_values for a single runtime field script */ doc_max: integer + /** Total number of accesses to doc_values for the scripts that define the current runtime field data type. */ doc_total: integer + /** Number of indices containing a mapping of the runtime field data type in selected nodes. */ index_count: integer + /** Script languages used for the runtime fields scripts. */ lang: string[] + /** Maximum number of lines for a single runtime field script. */ lines_max: integer + /** Total number of lines for the scripts that define the current runtime field data type. */ lines_total: integer + /** Field data type used in selected nodes. */ name: Name + /** Number of runtime fields that don’t declare a script. */ scriptless_count: integer + /** Number of runtime fields that shadow an indexed field. */ shadowed_count: integer + /** Maximum number of accesses to _source for a single runtime field script. */ source_max: integer + /** Total number of accesses to _source for the scripts that define the current runtime field data type. */ source_total: integer } export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { + /** Name of the cluster, based on the cluster name setting. */ cluster_name: Name + /** Unique identifier for the cluster. */ cluster_uuid: Uuid + /** Contains statistics about indices with shards assigned to selected nodes. */ indices: ClusterStatsClusterIndices + /** Contains statistics about nodes selected by the request’s node filters. */ nodes: ClusterStatsClusterNodes + /** Health status of the cluster, based on the state of its primary and replica shards. */ status: HealthStatus + /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ timestamp: long } @@ -10970,8 +16353,11 @@ export type ConnectorConnectorConfiguration = Record export interface ConnectorConnectorFeatures { + /** Indicates whether document-level security is enabled. */ document_level_security?: ConnectorFeatureEnabled + /** Indicates whether incremental syncs are enabled. */ incremental_sync?: ConnectorFeatureEnabled + /** Indicates whether managed connector API keys are enabled. */ native_connector_api_keys?: ConnectorFeatureEnabled sync_rules?: ConnectorSyncRulesFeature } @@ -10980,6 +16366,7 @@ export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' export interface ConnectorConnectorScheduling { enabled: boolean + /** The interval is expressed using the crontab syntax */ interval: string } @@ -11137,7 +16524,9 @@ export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' export interface ConnectorSyncRulesFeature { + /** Indicates whether advanced sync rules are enabled. */ advanced?: ConnectorFeatureEnabled + /** Indicates whether basic sync rules are enabled. */ basic?: ConnectorFeatureEnabled } @@ -11146,7 +16535,7 @@ export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'erro export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation export interface ConnectorCheckInRequest extends RequestBase { -/** The unique identifier of the connector to be checked in */ + /** The unique identifier of the connector to be checked in */ connector_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_id?: never } @@ -11159,7 +16548,7 @@ export interface ConnectorCheckInResponse { } export interface ConnectorDeleteRequest extends RequestBase { -/** The unique identifier of the connector to be deleted */ + /** The unique identifier of the connector to be deleted */ connector_id: Id /** A flag indicating if associated sync jobs should be also removed. Defaults to false. */ delete_sync_jobs?: boolean @@ -11174,7 +16563,7 @@ export interface ConnectorDeleteRequest extends RequestBase { export type ConnectorDeleteResponse = AcknowledgedResponseBase export interface ConnectorGetRequest extends RequestBase { -/** The unique identifier of the connector */ + /** The unique identifier of the connector */ connector_id: Id /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ include_deleted?: boolean @@ -11187,7 +16576,7 @@ export interface ConnectorGetRequest extends RequestBase { export type ConnectorGetResponse = ConnectorConnector export interface ConnectorLastSyncRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id last_access_control_sync_error?: string last_access_control_sync_scheduled_at?: DateTime @@ -11212,7 +16601,7 @@ export interface ConnectorLastSyncResponse { } export interface ConnectorListRequest extends RequestBase { -/** Starting offset (default: 0) */ + /** Starting offset (default: 0) */ from?: integer /** Specifies a max number of results to get */ size?: integer @@ -11256,7 +16645,7 @@ export interface ConnectorPostResponse { } export interface ConnectorPutRequest extends RequestBase { -/** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ + /** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ connector_id?: Id description?: string index_name?: IndexName @@ -11276,7 +16665,7 @@ export interface ConnectorPutResponse { } export interface ConnectorSyncJobCancelRequest extends RequestBase { -/** The unique identifier of the connector sync job */ + /** The unique identifier of the connector sync job */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11289,7 +16678,7 @@ export interface ConnectorSyncJobCancelResponse { } export interface ConnectorSyncJobCheckInRequest extends RequestBase { -/** The unique identifier of the connector sync job to be checked in. */ + /** The unique identifier of the connector sync job to be checked in. */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11301,9 +16690,10 @@ export interface ConnectorSyncJobCheckInResponse { } export interface ConnectorSyncJobClaimRequest extends RequestBase { -/** The unique identifier of the connector sync job. */ + /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id - /** The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. */ + /** The cursor object from the last incremental sync job. + * This should reference the `sync_cursor` field in the connector state for which the job runs. */ sync_cursor?: any /** The host name of the current system that will run the job. */ worker_hostname: string @@ -11317,7 +16707,7 @@ export interface ConnectorSyncJobClaimResponse { } export interface ConnectorSyncJobDeleteRequest extends RequestBase { -/** The unique identifier of the connector sync job to be deleted */ + /** The unique identifier of the connector sync job to be deleted */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11328,7 +16718,7 @@ export interface ConnectorSyncJobDeleteRequest extends RequestBase { export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase export interface ConnectorSyncJobErrorRequest extends RequestBase { -/** The unique identifier for the connector sync job. */ + /** The unique identifier for the connector sync job. */ connector_sync_job_id: Id /** The error for the connector sync job error field. */ error: string @@ -11342,7 +16732,7 @@ export interface ConnectorSyncJobErrorResponse { } export interface ConnectorSyncJobGetRequest extends RequestBase { -/** The unique identifier of the connector sync job */ + /** The unique identifier of the connector sync job */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11353,7 +16743,7 @@ export interface ConnectorSyncJobGetRequest extends RequestBase { export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob export interface ConnectorSyncJobListRequest extends RequestBase { -/** Starting offset (default: 0) */ + /** Starting offset (default: 0) */ from?: integer /** Specifies a max number of results to get */ size?: integer @@ -11375,7 +16765,7 @@ export interface ConnectorSyncJobListResponse { } export interface ConnectorSyncJobPostRequest extends RequestBase { -/** The id of the associated connector */ + /** The id of the associated connector */ id: Id job_type?: ConnectorSyncJobType trigger_method?: ConnectorSyncJobTriggerMethod @@ -11390,7 +16780,7 @@ export interface ConnectorSyncJobPostResponse { } export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { -/** The unique identifier of the connector sync job. */ + /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id /** The number of documents the sync job deleted. */ deleted_document_count: long @@ -11414,7 +16804,7 @@ export interface ConnectorSyncJobUpdateStatsResponse { } export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_id?: never } @@ -11427,7 +16817,7 @@ export interface ConnectorUpdateActiveFilteringResponse { } export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id api_key_id?: string api_key_secret_id?: string @@ -11442,7 +16832,7 @@ export interface ConnectorUpdateApiKeyIdResponse { } export interface ConnectorUpdateConfigurationRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id configuration?: ConnectorConnectorConfiguration values?: Record @@ -11457,7 +16847,7 @@ export interface ConnectorUpdateConfigurationResponse { } export interface ConnectorUpdateErrorRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id error: SpecUtilsWithNullValue /** All values in `body` will be added to the request body. */ @@ -11471,7 +16861,7 @@ export interface ConnectorUpdateErrorResponse { } export interface ConnectorUpdateFeaturesRequest extends RequestBase { -/** The unique identifier of the connector to be updated. */ + /** The unique identifier of the connector to be updated. */ connector_id: Id features: ConnectorConnectorFeatures /** All values in `body` will be added to the request body. */ @@ -11485,7 +16875,7 @@ export interface ConnectorUpdateFeaturesResponse { } export interface ConnectorUpdateFilteringRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id filtering?: ConnectorFilteringConfig[] rules?: ConnectorFilteringRule[] @@ -11501,7 +16891,7 @@ export interface ConnectorUpdateFilteringResponse { } export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id validation: ConnectorFilteringRulesValidation /** All values in `body` will be added to the request body. */ @@ -11515,7 +16905,7 @@ export interface ConnectorUpdateFilteringValidationResponse { } export interface ConnectorUpdateIndexNameRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id index_name: SpecUtilsWithNullValue /** All values in `body` will be added to the request body. */ @@ -11529,7 +16919,7 @@ export interface ConnectorUpdateIndexNameResponse { } export interface ConnectorUpdateNameRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id name?: string description?: string @@ -11544,7 +16934,7 @@ export interface ConnectorUpdateNameResponse { } export interface ConnectorUpdateNativeRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id is_native: boolean /** All values in `body` will be added to the request body. */ @@ -11558,7 +16948,7 @@ export interface ConnectorUpdateNativeResponse { } export interface ConnectorUpdatePipelineRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id pipeline: ConnectorIngestPipelineParams /** All values in `body` will be added to the request body. */ @@ -11572,7 +16962,7 @@ export interface ConnectorUpdatePipelineResponse { } export interface ConnectorUpdateSchedulingRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id scheduling: ConnectorSchedulingConfiguration /** All values in `body` will be added to the request body. */ @@ -11586,7 +16976,7 @@ export interface ConnectorUpdateSchedulingResponse { } export interface ConnectorUpdateServiceTypeRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id service_type: string /** All values in `body` will be added to the request body. */ @@ -11600,7 +16990,7 @@ export interface ConnectorUpdateServiceTypeResponse { } export interface ConnectorUpdateStatusRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id status: ConnectorConnectorStatus /** All values in `body` will be added to the request body. */ @@ -11614,7 +17004,7 @@ export interface ConnectorUpdateStatusResponse { } export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { -/** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ + /** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ index_uuid: Uuid /** This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. */ accept_data_loss: boolean @@ -11631,9 +17021,10 @@ export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { -/** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ + /** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ index_uuid: Uuid - /** This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ + /** This parameter must be set to true to import a dangling index. + * Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ accept_data_loss: boolean /** Specify timeout for connection to master */ master_timeout?: Duration @@ -11681,7 +17072,7 @@ export interface EnrichSummary { } export interface EnrichDeletePolicyRequest extends RequestBase { -/** Enrich policy to delete. */ + /** Enrich policy to delete. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11693,14 +17084,15 @@ export interface EnrichDeletePolicyRequest extends RequestBase { export type EnrichDeletePolicyResponse = AcknowledgedResponseBase -export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' +export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' | 'CANCELLED' export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { phase: EnrichExecutePolicyEnrichPolicyPhase + step?: string } export interface EnrichExecutePolicyRequest extends RequestBase { -/** Enrich policy to execute. */ + /** Enrich policy to execute. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11714,11 +17106,12 @@ export interface EnrichExecutePolicyRequest extends RequestBase { export interface EnrichExecutePolicyResponse { status?: EnrichExecutePolicyExecuteEnrichPolicyStatus - task_id?: TaskId + task?: TaskId } export interface EnrichGetPolicyRequest extends RequestBase { -/** Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. */ + /** Comma-separated list of enrich policy names used to limit the request. + * To return information for all enrich policies, omit this parameter. */ name?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11733,7 +17126,7 @@ export interface EnrichGetPolicyResponse { } export interface EnrichPutPolicyRequest extends RequestBase { -/** Name of the enrich policy to create or update. */ + /** Name of the enrich policy to create or update. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11776,7 +17169,7 @@ export interface EnrichStatsExecutingPolicy { } export interface EnrichStatsRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -11785,42 +17178,63 @@ export interface EnrichStatsRequest extends RequestBase { } export interface EnrichStatsResponse { + /** Objects containing information about each coordinating ingest node for configured enrich processors. */ coordinator_stats: EnrichStatsCoordinatorStats[] + /** Objects containing information about each enrich policy that is currently executing. */ executing_policies: EnrichStatsExecutingPolicy[] + /** Objects containing information about the enrich cache stats on each ingest node. */ cache_stats?: EnrichStatsCacheStats[] } export interface EqlEqlHits { + /** Metadata about the number of matching events or sequences. */ total?: SearchTotalHits + /** Contains events matching the query. Each object represents a matching event. */ events?: EqlHitsEvent[] + /** Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence. */ sequences?: EqlHitsSequence[] } export interface EqlEqlSearchResponseBase { + /** Identifier for the search. */ id?: Id + /** If true, the response does not contain complete search results. */ is_partial?: boolean + /** If true, the search request is still executing. */ is_running?: boolean + /** Milliseconds it took Elasticsearch to execute the request. */ took?: DurationValue + /** If true, the request timed out before completion. */ timed_out?: boolean + /** Contains matching events and sequences. Also contains related metadata. */ hits: EqlEqlHits + /** Contains information about shard failures (if any), in case allow_partial_search_results=true */ shard_failures?: ShardFailure[] } export interface EqlHitsEvent { + /** Name of the index containing the event. */ _index: IndexName + /** Unique identifier for the event. This ID is only unique within the index. */ _id: Id + /** Original JSON body passed for the event at index time. */ _source: TEvent + /** Set to `true` for events in a timespan-constrained sequence that do not meet a given condition. */ missing?: boolean fields?: Record } export interface EqlHitsSequence { + /** Contains events matching the query. Each object represents a matching event. */ events: EqlHitsEvent[] + /** Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax. */ join_keys?: any[] } export interface EqlDeleteRequest extends RequestBase { -/** Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ + /** Identifier for the search to delete. + * A search ID is provided in the EQL search API's response for an async search. + * A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -11831,11 +17245,13 @@ export interface EqlDeleteRequest extends RequestBase { export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { -/** Identifier for the search. */ + /** Identifier for the search. */ id: Id - /** Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. */ + /** Period for which the search and its results are stored on the cluster. + * Defaults to the keep_alive value set by the search’s EQL search API request. */ keep_alive?: Duration - /** Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. */ + /** Timeout duration to wait for the request to finish. + * Defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } @@ -11846,7 +17262,7 @@ export interface EqlGetRequest extends RequestBase { export type EqlGetResponse = EqlEqlSearchResponseBase export interface EqlGetStatusRequest extends RequestBase { -/** Identifier for the search. */ + /** Identifier for the search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -11855,16 +17271,22 @@ export interface EqlGetStatusRequest extends RequestBase { } export interface EqlGetStatusResponse { + /** Identifier for the search. */ id: Id + /** If true, the search request is still executing. If false, the search is completed. */ is_partial: boolean + /** If true, the response does not contain complete search results. This could be because either the search is still running (is_running status is false), or because it is already completed (is_running status is true) and results are partial due to failures or timeouts. */ is_running: boolean + /** For a running search shows a timestamp when the eql search started, in milliseconds since the Unix epoch. */ start_time_in_millis?: EpochTime + /** Shows a timestamp when the eql search will be expired, in milliseconds since the Unix epoch. When this time is reached, the search and its results are deleted, even if the search is still ongoing. */ expiration_time_in_millis?: EpochTime + /** For a completed search shows the http status code of the completed search. */ completion_status?: integer } export interface EqlSearchRequest extends RequestBase { -/** The name of the index to scope the operation */ + /** The name of the index to scope the operation */ index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards @@ -11886,9 +17308,13 @@ export interface EqlSearchRequest extends RequestBase { keep_alive?: Duration keep_on_completion?: boolean wait_for_completion_timeout?: Duration - /** Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results */ + /** Allow query execution also in case of shard failures. + * If true, the query will keep running and will return results based on the available shards. + * For sequences, the behavior can be further refined using allow_partial_sequence_results */ allow_partial_search_results?: boolean - /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results. */ + /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. + * If true, the sequence query will return results based on the available shards, ignoring the others. + * If false, the sequence query will return successfully, but will always have empty results. */ allow_partial_sequence_results?: boolean /** For basic queries, the maximum number of matching events to return. Defaults to 10 */ size?: uint @@ -11896,7 +17322,9 @@ export interface EqlSearchRequest extends RequestBase { fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition runtime_mappings?: MappingRuntimeFields - /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ + /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` + * parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the + * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } @@ -11926,17 +17354,23 @@ export type EsqlTableValuesLongDouble = double | double[] export type EsqlTableValuesLongValue = long | long[] export interface EsqlAsyncQueryRequest extends RequestBase { -/** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. */ + /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. */ allow_partial_results?: boolean - /** The character to use between values within a CSV row. It is valid only for the CSV format. */ + /** The character to use between values within a CSV row. + * It is valid only for the CSV format. */ delimiter?: string - /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean /** A short version of the Accept header, for example `json` or `yaml`. */ format?: EsqlEsqlFormat - /** The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ + /** The period for which the query and its results are stored in the cluster. + * The default period is five days. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. + * If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ keep_alive?: Duration - /** Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ + /** Indicates whether the query and its results are stored in the cluster. + * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ keep_on_completion?: boolean /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean @@ -11945,15 +17379,24 @@ export interface EsqlAsyncQueryRequest extends RequestBase { locale?: string /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[] - /** If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. */ + /** If provided and `true` the response will include an extra `profile` object + * with information on how the query was executed. This information is for human debugging + * and its format can change at any time but it can give some insight into the performance + * of each part of the query. */ profile?: boolean /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string - /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ + /** Tables to use with the LOOKUP operation. The top level key is the table + * name and the next level key is the column name. */ tables?: Record> - /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ include_ccs_metadata?: boolean - /** The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. */ + /** The period to wait for the request to finish. + * By default, the request waits for 1 second for the query results. + * If the query completes during this period, results are returned + * Otherwise, a query ID is returned that can later be used to retrieve the results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } @@ -11964,7 +17407,9 @@ export interface EsqlAsyncQueryRequest extends RequestBase { export type EsqlAsyncQueryResponse = EsqlResult export interface EsqlAsyncQueryDeleteRequest extends RequestBase { -/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -11975,13 +17420,20 @@ export interface EsqlAsyncQueryDeleteRequest extends RequestBase { export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase export interface EsqlAsyncQueryGetRequest extends RequestBase { -/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id - /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean - /** The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. */ + /** The period for which the query and its results are stored in the cluster. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. */ keep_alive?: Duration - /** The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. */ + /** The period to wait for the request to finish. + * By default, the request waits for complete query results. + * If the request completes during the period specified in this parameter, complete query results are returned. + * Otherwise, the response returns an `is_running` value of `true` and no results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } @@ -11992,9 +17444,12 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { export type EsqlAsyncQueryGetResponse = EsqlResult export interface EsqlAsyncQueryStopRequest extends RequestBase { -/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id - /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never } @@ -12005,11 +17460,12 @@ export interface EsqlAsyncQueryStopRequest extends RequestBase { export type EsqlAsyncQueryStopResponse = EsqlResult export interface EsqlQueryRequest extends RequestBase { -/** A short version of the Accept header, e.g. json, yaml. */ + /** A short version of the Accept header, e.g. json, yaml. */ format?: EsqlEsqlFormat /** The character to use between values within a CSV row. Only valid for the CSV format. */ delimiter?: string - /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ + /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? + * Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ drop_null_columns?: boolean /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. */ allow_partial_results?: boolean @@ -12020,13 +17476,19 @@ export interface EsqlQueryRequest extends RequestBase { locale?: string /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[] - /** If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. */ + /** If provided and `true` the response will include an extra `profile` object + * with information on how the query was executed. This information is for human debugging + * and its format can change at any time but it can give some insight into the performance + * of each part of the query. */ profile?: boolean /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string - /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ + /** Tables to use with the LOOKUP operation. The top level key is the table + * name and the next level key is the column name. */ tables?: Record> - /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ include_ccs_metadata?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } @@ -12042,7 +17504,7 @@ export interface FeaturesFeature { } export interface FeaturesGetFeaturesRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -12055,7 +17517,7 @@ export interface FeaturesGetFeaturesResponse { } export interface FeaturesResetFeaturesRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -12070,13 +17532,17 @@ export interface FeaturesResetFeaturesResponse { export type FleetCheckpoint = long export interface FleetGlobalCheckpointsRequest extends RequestBase { -/** A single index or index alias that resolves to a single index. */ + /** A single index or index alias that resolves to a single index. */ index: IndexName | IndexAlias - /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. */ + /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints + * to advance past the provided `checkpoints`. */ wait_for_advance?: boolean - /** A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. */ + /** A boolean value which controls whether to wait (until the timeout) for the target index to exist + * and all primary shards be active. Can only be true when `wait_for_advance` is true. */ wait_for_index?: boolean - /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. */ + /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, + * the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list + * will cause Elasticsearch to immediately return the current global checkpoints. */ checkpoints?: FleetCheckpoint[] /** Period to wait for a global checkpoints to advance past `checkpoints`. */ timeout?: Duration @@ -12092,7 +17558,7 @@ export interface FleetGlobalCheckpointsResponse { } export interface FleetMsearchRequest extends RequestBase { -/** A single target to search. If the target is an index alias, it must resolve to a single index. */ + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index?: IndexName | IndexAlias /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean @@ -12105,9 +17571,9 @@ export interface FleetMsearchRequest extends RequestBase { /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean /** Maximum number of concurrent searches the multi search API can execute. */ - max_concurrent_searches?: long + max_concurrent_searches?: integer /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ @@ -12116,9 +17582,13 @@ export interface FleetMsearchRequest extends RequestBase { rest_total_hits_as_int?: boolean /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean - /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. */ + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] - /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. */ + /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns + * an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + * which is true by default. */ allow_partial_search_results?: boolean searches?: MsearchRequestItem[] /** All values in `body` will be added to the request body. */ @@ -12132,7 +17602,7 @@ export interface FleetMsearchResponse { } export interface FleetSearchRequest extends RequestBase { -/** A single target to search. If the target is an index alias, it must resolve to a single index. */ + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index: IndexName | IndexAlias allow_no_indices?: boolean analyzer?: string @@ -12145,7 +17615,7 @@ export interface FleetSearchRequest extends RequestBase { ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -12163,9 +17633,13 @@ export interface FleetSearchRequest extends RequestBase { _source_excludes?: Fields _source_includes?: Fields q?: string - /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. */ + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] - /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. */ + /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns + * an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + * which is true by default. */ allow_partial_search_results?: boolean aggregations?: Record /** @alias aggregations */ @@ -12175,16 +17649,23 @@ export interface FleetSearchRequest extends RequestBase { explain?: boolean /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record - /** Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ from?: integer highlight?: SearchHighlight - /** Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. */ + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits /** Boosts the _score of documents from specified indices. */ - indices_boost?: Record[] - /** Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. */ + indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - /** Minimum _score for matching documents. Documents with a lower _score are not included in the search results. */ + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -12194,32 +17675,48 @@ export interface FleetSearchRequest extends RequestBase { /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults - /** The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort - /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig - /** Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester - /** Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. */ + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long - /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean /** If true, returns document version as part of a hit. */ version?: boolean - /** If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. */ + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean - /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields - /** Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. */ + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ pit?: SearchPointInTimeReference - /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } @@ -12252,15 +17749,28 @@ export interface GraphConnection { } export interface GraphExploreControls { + /** To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample. + * You can do this by selecting a single-value field and setting a maximum number of documents per value for that field. */ sample_diversity?: GraphSampleDiversity + /** Each hop considers a sample of the best-matching documents on each shard. + * Using samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms. + * Very small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms. + * Very large sample sizes can dilute the quality of the results and increase execution times. */ sample_size?: integer + /** The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned. + * This timeout is honored on a best-effort basis. + * Execution might overrun this timeout if, for example, a long pause is encountered while FieldData is loaded for a field. */ timeout?: Duration + /** Filters associated terms so only those that are significantly associated with your query are included. */ use_significance: boolean } export interface GraphHop { + /** Specifies one or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop + /** An optional guiding query that constrains the Graph API as it explores connected terms. */ query?: QueryDslQueryContainer + /** Contains the fields you are interested in. */ vertices: GraphVertexDefinition[] } @@ -12277,11 +17787,18 @@ export interface GraphVertex { } export interface GraphVertexDefinition { + /** Prevents the specified terms from being included in the results. */ exclude?: string[] + /** Identifies a field in the documents of interest. */ field: Field + /** Identifies the terms of interest that form the starting points from which you want to spider out. */ include?: (GraphVertexInclude | string)[] + /** Specifies how many documents must contain a pair of terms before it is considered to be a useful connection. + * This setting acts as a certainty threshold. */ min_doc_count?: long + /** Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration. */ shard_min_doc_count?: long + /** Specifies the maximum number of vertex terms returned for each field. */ size?: integer } @@ -12291,11 +17808,13 @@ export interface GraphVertexInclude { } export interface GraphExploreRequest extends RequestBase { -/** Name of the index. */ + /** Name of the index. */ index: Indices /** Custom value used to route operations to a specific shard. */ routing?: Routing - /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** Specifies the period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: Duration /** Specifies or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop @@ -12320,18 +17839,31 @@ export interface GraphExploreResponse { } export interface IlmActions { + /** Phases allowed: warm, cold. */ allocate?: IlmAllocateAction + /** Phases allowed: delete. */ delete?: IlmDeleteAction + /** Phases allowed: hot, warm, cold. */ downsample?: IlmDownsampleAction + /** The freeze action is a noop in 8.x */ freeze?: EmptyObject + /** Phases allowed: hot, warm. */ forcemerge?: IlmForceMergeAction + /** Phases allowed: warm, cold. */ migrate?: IlmMigrateAction + /** Phases allowed: hot, warm, cold. */ readonly?: EmptyObject + /** Phases allowed: hot. */ rollover?: IlmRolloverAction + /** Phases allowed: hot, warm, cold. */ set_priority?: IlmSetPriorityAction + /** Phases allowed: hot, cold, frozen. */ searchable_snapshot?: IlmSearchableSnapshotAction + /** Phases allowed: hot, warm. */ shrink?: IlmShrinkAction + /** Phases allowed: hot, warm, cold, frozen. */ unfollow?: EmptyObject + /** Phases allowed: delete. */ wait_for_snapshot?: IlmWaitForSnapshotAction } @@ -12376,6 +17908,7 @@ export interface IlmPhases { export interface IlmPolicy { phases: IlmPhases + /** Arbitrary metadata that is not automatically generated or used by Elasticsearch. */ _meta?: Metadata } @@ -12412,7 +17945,7 @@ export interface IlmWaitForSnapshotAction { } export interface IlmDeleteLifecycleRequest extends RequestBase { -/** Identifier for the policy. */ + /** Identifier for the policy. */ name: Name /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -12471,7 +18004,8 @@ export interface IlmExplainLifecycleLifecycleExplainUnmanaged { } export interface IlmExplainLifecycleRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). + * To target all data streams and indices, use `*` or `_all`. */ index: IndexName /** Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. */ only_errors?: boolean @@ -12496,7 +18030,7 @@ export interface IlmGetLifecycleLifecycle { } export interface IlmGetLifecycleRequest extends RequestBase { -/** Identifier for the policy. */ + /** Identifier for the policy. */ name?: Name /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -12522,9 +18056,12 @@ export interface IlmGetStatusResponse { } export interface IlmMigrateToDataTiersRequest extends RequestBase { -/** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. */ + /** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. + * This provides a way to retrieve the indices and ILM policies that need to be migrated. */ dry_run?: boolean - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration legacy_template_to_delete?: string node_attribute?: string @@ -12536,16 +18073,23 @@ export interface IlmMigrateToDataTiersRequest extends RequestBase { export interface IlmMigrateToDataTiersResponse { dry_run: boolean + /** The name of the legacy index template that was deleted. + * This information is missing if no legacy index templates were deleted. */ removed_legacy_template: string + /** The ILM policies that were updated. */ migrated_ilm_policies: string[] + /** The indices that were migrated to tier preference routing. */ migrated_indices: Indices + /** The legacy index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_legacy_templates: string[] + /** The composable index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_composable_templates: string[] + /** The component templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_component_templates: string[] } export interface IlmMoveToStepRequest extends RequestBase { -/** The name of the index whose lifecycle step is to change */ + /** The name of the index whose lifecycle step is to change */ index: IndexName /** The step that the index is expected to be in. */ current_step: IlmMoveToStepStepKey @@ -12560,13 +18104,15 @@ export interface IlmMoveToStepRequest extends RequestBase { export type IlmMoveToStepResponse = AcknowledgedResponseBase export interface IlmMoveToStepStepKey { + /** The optional action to which the index will be moved. */ action?: string + /** The optional step name to which the index will be moved. */ name?: string phase: string } export interface IlmPutLifecycleRequest extends RequestBase { -/** Identifier for the policy. */ + /** Identifier for the policy. */ name: Name /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -12582,7 +18128,7 @@ export interface IlmPutLifecycleRequest extends RequestBase { export type IlmPutLifecycleResponse = AcknowledgedResponseBase export interface IlmRemovePolicyRequest extends RequestBase { -/** The name of the index to remove policy on */ + /** The name of the index to remove policy on */ index: IndexName /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -12596,7 +18142,7 @@ export interface IlmRemovePolicyResponse { } export interface IlmRetryRequest extends RequestBase { -/** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ + /** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ index: IndexName /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -12607,7 +18153,7 @@ export interface IlmRetryRequest extends RequestBase { export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -12620,7 +18166,7 @@ export interface IlmStartRequest extends RequestBase { export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -12633,20 +18179,38 @@ export interface IlmStopRequest extends RequestBase { export type IlmStopResponse = AcknowledgedResponseBase export interface IndicesAlias { + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: Routing + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean + /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ search_routing?: Routing } export interface IndicesAliasDefinition { + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: string + /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ routing?: string + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ search_routing?: string + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean } @@ -12655,42 +18219,80 @@ export interface IndicesCacheQueries { } export interface IndicesDataStream { + /** Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template. + * If empty, the response omits this property. */ _meta?: Metadata + /** If `true`, the data stream allows custom routing on write request. */ allow_custom_routing?: boolean + /** Information about failure store backing indices */ failure_store?: IndicesFailureStore + /** Current generation for the data stream. This number acts as a cumulative count of the stream’s rollovers, starting at 1. */ generation: integer + /** If `true`, the data stream is hidden. */ hidden: boolean + /** Name of the current ILM lifecycle policy in the stream’s matching index template. + * This lifecycle policy is set in the `index.lifecycle.name` setting. + * If the template does not include a lifecycle policy, this property is not included in the response. + * NOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API. */ ilm_policy?: Name + /** Name of the lifecycle system that'll manage the next generation of the data stream. */ next_generation_managed_by: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream. */ prefer_ilm: boolean + /** Array of objects containing information about the data stream’s backing indices. + * The last item in this array contains information about the stream’s current write index. */ indices: IndicesDataStreamIndex[] + /** Contains the configuration for the data stream lifecycle of this data stream. */ lifecycle?: IndicesDataStreamLifecycleWithRollover + /** Name of the data stream. */ name: DataStreamName + /** If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings. */ replicated?: boolean + /** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */ rollover_on_write: boolean + /** Health status of the data stream. + * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ status: HealthStatus + /** If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction. */ system?: boolean + /** Name of the index template used to create the data stream’s backing indices. + * The template’s index pattern must match the name of this data stream. */ template: Name + /** Information about the `@timestamp` field in the data stream. */ timestamp_field: IndicesDataStreamTimestampField + /** The index mode for the data stream that will be used for newly created backing indices. */ index_mode?: IndicesIndexMode } export interface IndicesDataStreamIndex { + /** Name of the backing index. */ index_name: IndexName + /** Universally unique identifier (UUID) for the index. */ index_uuid: Uuid + /** Name of the current ILM lifecycle policy configured for this backing index. */ ilm_policy?: Name + /** Name of the lifecycle system that's currently managing this backing index. */ managed_by?: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to manage this index. */ prefer_ilm?: boolean + /** The index mode of this backing index of the data stream. */ index_mode?: IndicesIndexMode } export interface IndicesDataStreamLifecycle { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration + /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean } export interface IndicesDataStreamLifecycleDownsampling { + /** The list of downsampling rounds to execute as part of this downsampling configuration */ rounds: IndicesDownsamplingRound[] } @@ -12708,10 +18310,14 @@ export interface IndicesDataStreamLifecycleRolloverConditions { } export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { + /** The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`. + * This property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true. + * The contents of this field are subject to change. */ rollover?: IndicesDataStreamLifecycleRolloverConditions } export interface IndicesDataStreamTimestampField { + /** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */ name: Field } @@ -12721,11 +18327,14 @@ export interface IndicesDataStreamVisibility { } export interface IndicesDownsampleConfig { + /** The interval at which to aggregate the original time series index. */ fixed_interval: DurationLarge } export interface IndicesDownsamplingRound { + /** The duration since rollover when this downsampling round should execute */ after: Duration + /** The downsample configuration to execute. */ config: IndicesDownsampleConfig } @@ -12799,7 +18408,9 @@ export interface IndicesIndexSettingsKeys { routing_path?: string | string[] soft_deletes?: IndicesSoftDeletes sort?: IndicesIndexSegmentSort + /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_shards?: integer | string + /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_replicas?: integer | string number_of_routing_shards?: integer check_on_startup?: IndicesIndexCheckOnStartup @@ -12820,6 +18431,7 @@ export interface IndicesIndexSettingsKeys { max_shingle_diff?: integer blocks?: IndicesIndexSettingBlocks max_refresh_listeners?: integer + /** Settings to define analyzers, tokenizers, token filters and character filters. */ analyze?: IndicesSettingsAnalyze highlight?: IndicesSettingsHighlight max_terms_count?: integer @@ -12845,10 +18457,14 @@ export interface IndicesIndexSettingsKeys { settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries queries?: IndicesQueries + /** Configure custom similarity settings to customize how search results are scored. */ similarity?: Record + /** Enable or disable dynamic mapping for an index. */ mapping?: IndicesMappingLimitSettings 'indexing.slowlog'?: IndicesIndexingSlowlogSettings + /** Configure indexing back pressure limits. */ indexing_pressure?: IndicesIndexingPressure + /** The store module allows you to control how index data is stored and accessed on disk. */ store?: IndicesStorage } export type IndicesIndexSettings = IndicesIndexSettingsKeys @@ -12863,16 +18479,33 @@ export interface IndicesIndexSettingsAnalysis { } export interface IndicesIndexSettingsLifecycle { + /** The name of the policy to use to manage the index. For information about how Elasticsearch applies policy changes, see Policy updates. */ name?: Name + /** Indicates whether or not the index has been rolled over. Automatically set to true when ILM completes the rollover action. + * You can explicitly set it to skip rollover. */ indexing_complete?: SpecUtilsStringified + /** If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting + * if you create a new index that contains old data and want to use the original creation date to calculate the index + * age. Specified as a Unix epoch value in milliseconds. */ origination_date?: long + /** Set to true to parse the origination date from the index name. This origination date is used to calculate the index age + * for its phase transitions. The index name must match the pattern ^.*-{date_format}-\\d+, where the date_format is + * yyyy.MM.dd and the trailing digits are optional. An index that was rolled over would normally match the full format, + * for example logs-2016.10.31-000002). If the index name doesn’t match the pattern, index creation fails. */ parse_origination_date?: boolean step?: IndicesIndexSettingsLifecycleStep + /** The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. + * When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more + * information about rolling indices, see Rollover. */ rollover_alias?: string + /** Preference for the system that manages a data stream backing index (preferring ILM when both ILM and DLM are + * applicable for an index). */ prefer_ilm?: boolean | string } export interface IndicesIndexSettingsLifecycleStep { + /** Time to wait for the cluster to resolve allocation issues during an ILM shrink action. Must be greater than 1h (1 hour). + * See Shard allocation for shrink. */ wait_time_threshold?: Duration } @@ -12885,32 +18518,63 @@ export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + /** Default settings, included when the request's `include_default` is `true`. */ defaults?: IndicesIndexSettings data_stream?: DataStreamName + /** Data stream lifecycle applicable if this is a data stream. */ lifecycle?: IndicesDataStreamLifecycle } export interface IndicesIndexTemplate { + /** Name of the index template. */ index_patterns: Names + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesIndexTemplateSummary + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long + /** Optional user metadata about the index template. May have any contents. + * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata allow_auto_create?: boolean + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesIndexTemplateDataStreamConfiguration + /** Marks this index template as deprecated. + * When creating or updating a non-deprecated index template that uses deprecated components, + * Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** A list of component template names that are allowed to be absent. */ ignore_missing_component_templates?: Names } export interface IndicesIndexTemplateDataStreamConfiguration { + /** If true, the data stream is hidden. */ hidden?: boolean + /** If true, the data stream supports custom routing. */ allow_custom_routing?: boolean } export interface IndicesIndexTemplateSummary { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings lifecycle?: IndicesDataStreamLifecycleWithRollover } @@ -12925,6 +18589,9 @@ export interface IndicesIndexingPressure { } export interface IndicesIndexingPressureMemory { + /** Number of outstanding bytes that may be consumed by indexing requests. When this limit is reached or exceeded, + * the node will reject new coordinating and primary operations. When replica operations consume 1.5x this limit, + * the node will reject new replica operations. Defaults to 10% of the heap. */ limit?: integer } @@ -12936,6 +18603,8 @@ export interface IndicesIndexingSlowlogSettings { } export interface IndicesIndexingSlowlogTresholds { + /** The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.json`. + * Log and the thresholds are configured in the same way as the search slowlog. */ index?: IndicesSlowlogTresholdLevels } @@ -12954,22 +18623,34 @@ export interface IndicesMappingLimitSettings { } export interface IndicesMappingLimitSettingsDepth { + /** The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined + * at the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc. */ limit?: long } export interface IndicesMappingLimitSettingsDimensionFields { + /** [preview] This functionality is in technical preview and may be changed or removed in a future release. + * Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. */ limit?: long } export interface IndicesMappingLimitSettingsFieldNameLength { + /** Setting for the maximum length of a field name. This setting isn’t really something that addresses mappings explosion but + * might still be useful if you want to limit the field length. It usually shouldn’t be necessary to set this setting. The + * default is okay unless a user starts to add a huge number of fields with really long names. Default is `Long.MAX_VALUE` (no limit). */ limit?: long } export interface IndicesMappingLimitSettingsNestedFields { + /** The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when + * arrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this + * setting limits the number of unique nested types per index. */ limit?: long } export interface IndicesMappingLimitSettingsNestedObjects { + /** The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps + * to prevent out of memory errors when a document contains too many nested objects. */ limit?: long } @@ -12978,7 +18659,15 @@ export interface IndicesMappingLimitSettingsSourceFields { } export interface IndicesMappingLimitSettingsTotalFields { + /** The maximum number of fields in an index. Field and object mappings, as well as field aliases count towards this limit. + * The limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance + * degradations and memory issues, especially in clusters with a high load or few resources. */ limit?: long | string + /** This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set + * to false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail + * with the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail. + * Instead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false. + * The fields that were not added to the mapping will be added to the _ignored field. */ ignore_dynamic_beyond_limit?: boolean | string } @@ -13076,8 +18765,8 @@ export interface IndicesSettingsSimilarityLmj { export interface IndicesSettingsSimilarityScripted { type: 'scripted' - script: Script | string - weight_script?: Script | string + script: Script | ScriptSource + weight_script?: Script | ScriptSource } export interface IndicesSlowlogSettings { @@ -13100,7 +18789,12 @@ export interface IndicesSlowlogTresholds { } export interface IndicesSoftDeletes { + /** Indicates whether soft deletes are enabled on the index. */ enabled?: boolean + /** The maximum period to retain a shard history retention lease before it is considered expired. + * Shard history retention leases ensure that soft deletes are retained during merges on the Lucene + * index. If a soft delete is merged away before it can be replicated to a follower the following + * process will fail due to incomplete history on the leader. */ retention_lease?: IndicesRetentionLease } @@ -13108,6 +18802,10 @@ export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' export interface IndicesStorage { type: IndicesStorageType + /** You can restrict the use of the mmapfs and the related hybridfs store type via the setting node.store.allow_mmap. + * This is a boolean setting indicating whether or not memory-mapping is allowed. The default is to allow it. This + * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot + * of memory maps so you need disable the ability to use memory-mapping. */ allow_mmap?: boolean } @@ -13123,8 +18821,16 @@ export interface IndicesTemplateMapping { } export interface IndicesTranslog { + /** How often the translog is fsynced to disk and committed, regardless of write operations. + * Values less than 100ms are not allowed. */ sync_interval?: Duration + /** Whether or not to `fsync` and commit the translog after every index, delete, update, or bulk request. */ durability?: IndicesTranslogDurability + /** The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not + * part of a Lucene commit point). Although these operations are available for reads, they will need + * to be replayed if the shard was stopped and had to be recovered. This setting controls the + * maximum total size of these operations, to prevent recoveries from taking too long. Once the + * maximum size has been reached a flush will happen, generating a new Lucene commit point. */ flush_threshold_size?: ByteSize retention?: IndicesTranslogRetention } @@ -13132,7 +18838,17 @@ export interface IndicesTranslog { export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' export interface IndicesTranslogRetention { + /** This controls the total size of translog files to keep for each shard. Keeping more translog files increases + * the chance of performing an operation based sync when recovering a replica. If the translog files are not + * sufficient, replica recovery will fall back to a file based sync. This setting is ignored, and should not be + * set, if soft deletes are enabled. Soft deletes are enabled by default in indices created in Elasticsearch + * versions 7.0.0 and later. */ size?: ByteSize + /** This controls the maximum duration for which translog files are kept by each shard. Keeping more + * translog files increases the chance of performing an operation based sync when recovering replicas. If + * the translog files are not sufficient, replica recovery will fall back to a file based sync. This setting + * is ignored, and should not be set, if soft deletes are enabled. Soft deletes are enabled by default in + * indices created in Elasticsearch versions 7.0.0 and later. */ age?: Duration } @@ -13144,19 +18860,30 @@ export interface IndicesAddBlockIndicesBlockStatus { } export interface IndicesAddBlockRequest extends RequestBase { -/** A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ + /** A comma-separated list or wildcard expression of index names used to limit the request. + * By default, you must explicitly name the indices you are adding blocks to. + * To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ index: IndexName /** The block type to add to the index. */ block: IndicesAddBlockIndicesBlockOptions - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } @@ -13212,9 +18939,12 @@ export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeToke & { [property: string]: any } export interface IndicesAnalyzeRequest extends RequestBase { -/** Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ + /** Index used to derive the analyzer. + * If specified, the `analyzer` or field parameter overrides this value. + * If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ index?: IndexName - /** The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ + /** The name of the analyzer that should be applied to the provided `text`. + * This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ analyzer?: string /** Array of token attributes used to filter the output of the `explain` parameter. */ attributes?: string[] @@ -13222,13 +18952,16 @@ export interface IndicesAnalyzeRequest extends RequestBase { char_filter?: AnalysisCharFilter[] /** If `true`, the response includes token attributes and additional details. */ explain?: boolean - /** Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. */ + /** Field used to derive the analyzer. + * To use this parameter, you must specify an index. + * If specified, the `analyzer` parameter overrides this value. */ field?: Field /** Array of token filters used to apply after the tokenizer. */ filter?: AnalysisTokenFilter[] /** Normalizer to use to convert text into a single token. */ normalizer?: string - /** Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. */ + /** Text to analyze. + * If an array of strings is provided, it is analyzed as a multi-value field. */ text?: IndicesAnalyzeTextToAnalyze /** Tokenizer to use to convert text into tokens. */ tokenizer?: AnalysisTokenizer @@ -13251,7 +18984,7 @@ export interface IndicesAnalyzeTokenDetail { } export interface IndicesCancelMigrateReindexRequest extends RequestBase { -/** The index or data stream name */ + /** The index or data stream name */ index: Indices /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -13262,13 +18995,20 @@ export interface IndicesCancelMigrateReindexRequest extends RequestBase { export type IndicesCancelMigrateReindexResponse = AcknowledgedResponseBase export interface IndicesClearCacheRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards - /** If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. */ + /** If `true`, clears the fields cache. + * Use the `fields` parameter to clear the cache of specific fields only. */ fielddata?: boolean /** Comma-separated list of field names used to limit the `fielddata` parameter. */ fields?: Fields @@ -13287,15 +19027,18 @@ export interface IndicesClearCacheRequest extends RequestBase { export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { -/** Name of the source index to clone. */ + /** Name of the source index to clone. */ index: IndexName /** Name of the target index to create. */ target: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** Aliases for the resulting index. */ aliases?: Record @@ -13323,19 +19066,26 @@ export interface IndicesCloseCloseShardResult { } export interface IndicesCloseRequest extends RequestBase { -/** Comma-separated list or wildcard expression of index names used to limit the request. */ + /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } @@ -13350,17 +19100,23 @@ export interface IndicesCloseResponse { } export interface IndicesCreateRequest extends RequestBase { -/** Name of the index you wish to create. */ + /** Name of the index you wish to create. */ index: IndexName - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** Aliases for the index. */ aliases?: Record - /** Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters */ + /** Mapping for fields in the index. If specified, this mapping can include: + * - Field names + * - Field data types + * - Mapping parameters */ mappings?: MappingTypeMapping /** Configuration options for the index. */ settings?: IndicesIndexSettings @@ -13377,7 +19133,12 @@ export interface IndicesCreateResponse { } export interface IndicesCreateDataStreamRequest extends RequestBase { -/** Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ + /** Name of the data stream, which must meet the following criteria: + * Lowercase only; + * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; + * Cannot start with `-`, `_`, `+`, or `.ds-`; + * Cannot be `.` or `..`; + * Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ name: DataStreamName /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -13392,13 +19153,16 @@ export interface IndicesCreateDataStreamRequest extends RequestBase { export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase export interface IndicesCreateFromCreateFrom { + /** Mappings overrides to be applied to the destination index (optional) */ mappings_override?: MappingTypeMapping + /** Settings overrides to be applied to the destination index (optional) */ settings_override?: IndicesIndexSettings + /** If index blocks should be removed when creating destination index (optional) */ remove_index_blocks?: boolean } export interface IndicesCreateFromRequest extends RequestBase { -/** The source index or data stream name */ + /** The source index or data stream name */ source: IndexName /** The destination index or data stream name */ dest: IndexName @@ -13416,17 +19180,30 @@ export interface IndicesCreateFromResponse { } export interface IndicesDataStreamsStatsDataStreamsStatsItem { + /** Current number of backing indices for the data stream. */ backing_indices: integer + /** Name of the data stream. */ data_stream: Name + /** The data stream’s highest `@timestamp` value, converted to milliseconds since the Unix epoch. + * NOTE: This timestamp is provided as a best effort. + * The data stream may contain `@timestamp` values higher than this if one or more of the following conditions are met: + * The stream contains closed backing indices; + * Backing indices with a lower generation contain higher `@timestamp` values. */ maximum_timestamp: EpochTime + /** Total size of all shards for the data stream’s backing indices. + * This parameter is only returned if the `human` query parameter is `true`. */ store_size?: ByteSize + /** Total size, in bytes, of all shards for the data stream’s backing indices. */ store_size_bytes: long } export interface IndicesDataStreamsStatsRequest extends RequestBase { -/** Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. */ + /** Comma-separated list of data streams used to limit the request. + * Wildcard expressions (`*`) are supported. + * To target all data streams in a cluster, omit this parameter or use `*`. */ name?: IndexName - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never } @@ -13435,26 +19212,42 @@ export interface IndicesDataStreamsStatsRequest extends RequestBase { } export interface IndicesDataStreamsStatsResponse { + /** Contains information about shards that attempted to execute the request. */ _shards: ShardStatistics + /** Total number of backing indices for the selected data streams. */ backing_indices: integer + /** Total number of selected data streams. */ data_stream_count: integer + /** Contains statistics for the selected data streams. */ data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] + /** Total size of all shards for the selected data streams. + * This property is included only if the `human` query parameter is `true` */ total_store_sizes?: ByteSize + /** Total size, in bytes, of all shards for the selected data streams. */ total_store_size_bytes: long } export interface IndicesDeleteRequest extends RequestBase { -/** Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ + /** Comma-separated list of indices to delete. + * You cannot specify index aliases. + * By default, this parameter does not support wildcards (`*`) or `_all`. + * To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } @@ -13465,13 +19258,17 @@ export interface IndicesDeleteRequest extends RequestBase { export type IndicesDeleteResponse = IndicesResponseBase export interface IndicesDeleteAliasRequest extends RequestBase { -/** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). */ + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). */ index: Indices - /** Comma-separated list of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ + /** Comma-separated list of aliases to remove. + * Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ name: Names - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } @@ -13482,7 +19279,7 @@ export interface IndicesDeleteAliasRequest extends RequestBase { export type IndicesDeleteAliasResponse = AcknowledgedResponseBase export interface IndicesDeleteDataLifecycleRequest extends RequestBase { -/** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ + /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ name: DataStreamNames /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ expand_wildcards?: ExpandWildcards @@ -13499,7 +19296,7 @@ export interface IndicesDeleteDataLifecycleRequest extends RequestBase { export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { -/** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ + /** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ name: DataStreamNames /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -13514,7 +19311,7 @@ export interface IndicesDeleteDataStreamRequest extends RequestBase { export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Names /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -13529,11 +19326,14 @@ export interface IndicesDeleteIndexTemplateRequest extends RequestBase { export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { -/** The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. */ + /** The name of the legacy index template to delete. + * Wildcard (`*`) expressions are supported. */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -13544,17 +19344,24 @@ export interface IndicesDeleteTemplateRequest extends RequestBase { export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ index: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. */ + /** If `true`, the API performs a flush before analysis. + * If `false`, the response may not include uncommitted data. */ flush?: boolean /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean - /** Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. */ + /** Analyzing field disk usage is resource-intensive. + * To use the API, this parameter must be set to `true`. */ run_expensive_tasks?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } @@ -13565,7 +19372,7 @@ export interface IndicesDiskUsageRequest extends RequestBase { export type IndicesDiskUsageResponse = any export interface IndicesDownsampleRequest extends RequestBase { -/** Name of the time series index to downsample. */ + /** Name of the time series index to downsample. */ index: IndexName /** Name of the index to create. */ target_index: IndexName @@ -13579,11 +19386,15 @@ export interface IndicesDownsampleRequest extends RequestBase { export type IndicesDownsampleResponse = any export interface IndicesExistsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ + /** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, returns settings in flat format. */ flat_settings?: boolean @@ -13602,17 +19413,23 @@ export interface IndicesExistsRequest extends RequestBase { export type IndicesExistsResponse = boolean export interface IndicesExistsAliasRequest extends RequestBase { -/** Comma-separated list of aliases to check. Supports wildcards (`*`). */ + /** Comma-separated list of aliases to check. Supports wildcards (`*`). */ name: Names - /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } @@ -13623,7 +19440,7 @@ export interface IndicesExistsAliasRequest extends RequestBase { export type IndicesExistsAliasResponse = boolean export interface IndicesExistsIndexTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Name /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean @@ -13640,13 +19457,16 @@ export interface IndicesExistsIndexTemplateRequest extends RequestBase { export type IndicesExistsIndexTemplateResponse = boolean export interface IndicesExistsTemplateRequest extends RequestBase { -/** A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. */ + /** A comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ name: Names /** Indicates whether to use a flat format for the response. */ flat_settings?: boolean /** Indicates whether to get information from the local node only. */ local?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } @@ -13669,7 +19489,7 @@ export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { } export interface IndicesExplainDataLifecycleRequest extends RequestBase { -/** The name of the index to explain */ + /** The name of the index to explain */ index: Indices /** indicates if the API should return the default values the system uses for the index's lifecycle */ include_defaults?: boolean @@ -13713,11 +19533,15 @@ export interface IndicesFieldUsageStatsInvertedIndex { } export interface IndicesFieldUsageStatsRequest extends RequestBase { -/** Comma-separated list or wildcard expression of index names used to limit the request. */ + /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean @@ -13748,17 +19572,24 @@ export interface IndicesFieldUsageStatsUsageStatsShards { } export interface IndicesFlushRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases to flush. + * Supports wildcards (`*`). + * To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, the request forces a flush even if there are no changes to commit to the index. */ force?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ + /** If `true`, the flush operation blocks until execution when another flush operation is running. + * If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ wait_if_ongoing?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } @@ -13769,7 +19600,7 @@ export interface IndicesFlushRequest extends RequestBase { export type IndicesFlushResponse = ShardsOperationResponseBase export interface IndicesForcemergeRequest extends RequestBase { -/** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean @@ -13794,6 +19625,8 @@ export interface IndicesForcemergeRequest extends RequestBase { export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { + /** task contains a task id returned when wait_for_completion=false, + * you can use the task_id to get the status of the task at _tasks/ */ task?: string } @@ -13802,11 +19635,16 @@ export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] export interface IndicesGetRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. */ + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard expressions (*) are supported. */ index: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean - /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden. */ + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as open,hidden. */ expand_wildcards?: ExpandWildcards /** If true, returns settings in flat format. */ flat_settings?: boolean @@ -13833,17 +19671,26 @@ export interface IndicesGetAliasIndexAliases { } export interface IndicesGetAliasRequest extends RequestBase { -/** Comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of aliases to retrieve. + * Supports wildcards (`*`). + * To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names - /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } @@ -13859,9 +19706,13 @@ export interface IndicesGetDataLifecycleDataStreamWithLifecycle { } export interface IndicesGetDataLifecycleRequest extends RequestBase { -/** Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams to limit the request. + * Supports wildcards (`*`). + * To target all data streams, omit this parameter or use `*` or `_all`. */ name: DataStreamNames - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, return all default settings in the response. */ include_defaults?: boolean @@ -13878,8 +19729,11 @@ export interface IndicesGetDataLifecycleResponse { } export interface IndicesGetDataLifecycleStatsDataStreamStats { + /** The count of the backing indices for the data stream. */ backing_indices_in_error: integer + /** The count of the backing indices for the data stream that have encountered an error. */ backing_indices_in_total: integer + /** The name of the data stream. */ name: DataStreamName } @@ -13891,16 +19745,23 @@ export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { } export interface IndicesGetDataLifecycleStatsResponse { + /** The count of data streams currently being managed by the data stream lifecycle. */ data_stream_count: integer + /** Information about the data streams that are managed by the data stream lifecycle. */ data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] + /** The duration of the last data stream lifecycle execution. */ last_run_duration_in_millis?: DurationValue + /** The time that passed between the start of the last two data stream lifecycle executions. + * This value should amount approximately to `data_streams.lifecycle.poll_interval`. */ time_between_starts_in_millis?: DurationValue } export interface IndicesGetDataStreamRequest extends RequestBase { -/** Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ + /** Comma-separated list of data stream names used to limit the request. + * Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ name?: DataStreamNames - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean @@ -13919,13 +19780,20 @@ export interface IndicesGetDataStreamResponse { } export interface IndicesGetFieldMappingRequest extends RequestBase { -/** Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). */ + /** Comma-separated list or wildcard expression of fields used to limit returned information. + * Supports wildcards (`*`). */ fields: Fields - /** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -13949,7 +19817,7 @@ export interface IndicesGetIndexTemplateIndexTemplateItem { } export interface IndicesGetIndexTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name?: Name /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean @@ -13975,17 +19843,24 @@ export interface IndicesGetMappingIndexMappingRecord { } export interface IndicesGetMappingRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean /** If `true`, the request retrieves information from the local node only. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } @@ -13996,7 +19871,7 @@ export interface IndicesGetMappingRequest extends RequestBase { export type IndicesGetMappingResponse = Record export interface IndicesGetMigrateReindexStatusRequest extends RequestBase { -/** The index or data stream name. */ + /** The index or data stream name. */ index: Indices /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -14029,13 +19904,21 @@ export interface IndicesGetMigrateReindexStatusStatusInProgress { } export interface IndicesGetSettingsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** Comma-separated list or wildcard expression of settings to retrieve. */ name?: Names - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with foo but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, returns settings in flat format. */ flat_settings?: boolean @@ -14043,9 +19926,12 @@ export interface IndicesGetSettingsRequest extends RequestBase { ignore_unavailable?: boolean /** If `true`, return all default settings in the response. */ include_defaults?: boolean - /** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ + /** If `true`, the request retrieves information from the local node only. If + * `false`, information is retrieved from the master node. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } @@ -14056,13 +19942,16 @@ export interface IndicesGetSettingsRequest extends RequestBase { export type IndicesGetSettingsResponse = Record export interface IndicesGetTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. */ + /** Comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. + * To return all index templates, omit this parameter or use a value of `_all` or `*`. */ name?: Names /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** If `true`, the request retrieves information from the local node only. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } @@ -14073,7 +19962,9 @@ export interface IndicesGetTemplateRequest extends RequestBase { export type IndicesGetTemplateResponse = Record export interface IndicesMigrateReindexMigrateReindex { + /** Reindex mode. Currently only 'upgrade' is supported. */ mode: IndicesMigrateReindexModeEnum + /** The source index or data stream (only data streams are currently supported). */ source: IndicesMigrateReindexSourceIndex } @@ -14094,7 +19985,7 @@ export interface IndicesMigrateReindexSourceIndex { } export interface IndicesMigrateToDataStreamRequest extends RequestBase { -/** Name of the index alias to convert to a data stream. */ + /** Name of the index alias to convert to a data stream. */ name: IndexName /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -14109,17 +20000,26 @@ export interface IndicesMigrateToDataStreamRequest extends RequestBase { export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase export interface IndicesModifyDataStreamAction { + /** Adds an existing index as a backing index for a data stream. + * The index is hidden as part of this operation. + * WARNING: Adding indices with the `add_backing_index` action can potentially result in improper data stream behavior. + * This should be considered an expert level API. */ add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction + /** Removes a backing index from a data stream. + * The index is unhidden as part of this operation. + * A data stream’s write index cannot be removed. */ remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction } export interface IndicesModifyDataStreamIndexAndDataStreamAction { + /** Data stream targeted by the action. */ data_stream: DataStreamName + /** Index for the action. */ index: IndexName } export interface IndicesModifyDataStreamRequest extends RequestBase { -/** Actions to perform. */ + /** Actions to perform. */ actions: IndicesModifyDataStreamAction[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { actions?: never } @@ -14130,19 +20030,30 @@ export interface IndicesModifyDataStreamRequest extends RequestBase { export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase export interface IndicesOpenRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * By default, you must explicitly name the indices you using to limit the request. + * To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. + * You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } @@ -14156,7 +20067,7 @@ export interface IndicesOpenResponse { } export interface IndicesPromoteDataStreamRequest extends RequestBase { -/** The name of the data stream */ + /** The name of the data stream */ name: IndexName /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -14169,23 +20080,37 @@ export interface IndicesPromoteDataStreamRequest extends RequestBase { export type IndicesPromoteDataStreamResponse = any export interface IndicesPutAliasRequest extends RequestBase { -/** Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. */ + /** Comma-separated list of data streams or indices to add. + * Supports wildcards (`*`). + * Wildcard patterns that match both data streams and indices return an error. */ index: Indices - /** Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. */ + /** Alias to update. + * If the alias doesn’t exist, the request creates it. + * Index alias names support date math. */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer - /** Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. */ + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ index_routing?: Routing - /** If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ + /** If `true`, sets the write index or data stream for the alias. + * If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. + * If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. + * Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ is_write_index?: boolean - /** Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. */ + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ routing?: Routing - /** Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. */ + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ search_routing?: Routing /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } @@ -14196,19 +20121,29 @@ export interface IndicesPutAliasRequest extends RequestBase { export type IndicesPutAliasResponse = AcknowledgedResponseBase export interface IndicesPutDataLifecycleRequest extends RequestBase { -/** Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. */ + /** Comma-separated list of data streams used to limit the request. + * Supports wildcards (`*`). + * To target all data streams use `*` or `_all`. */ name: DataStreamNames - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ expand_wildcards?: ExpandWildcards - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. */ + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling - /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. */ + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } @@ -14219,40 +20154,66 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesPutIndexTemplateIndexTemplateMapping { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings lifecycle?: IndicesDataStreamLifecycle } export interface IndicesPutIndexTemplateRequest extends RequestBase { -/** Index or template name */ + /** Index or template name */ name: Name /** If `true`, this request cannot replace or update existing index templates. */ create?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** User defined reason for creating/updating the index template */ cause?: string /** Name of the index template to create. */ index_patterns?: Indices - /** An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] - /** Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping - /** If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. */ + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility - /** Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. */ + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long - /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. */ + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. + * External systems can use these version numbers to simplify template management. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber - /** Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. */ + /** Optional user metadata about the index template. + * It may have any contents. + * It is not automatically generated or used by Elasticsearch. + * This user-defined object is stored in the cluster state, so keeping it short is preferable + * To unset the metadata, replace the template without specifying it. */ _meta?: Metadata - /** This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean - /** The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist */ + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ ignore_missing_component_templates?: string[] - /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } @@ -14263,17 +20224,23 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesPutMappingRequest extends RequestBase { -/** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ + /** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** If `true`, the mappings are applied only to the current write index for the target. */ write_index_only?: boolean @@ -14281,17 +20248,25 @@ export interface IndicesPutMappingRequest extends RequestBase { date_detection?: boolean /** Controls whether new fields are added dynamically. */ dynamic?: MappingDynamicMapping - /** If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. */ + /** If date detection is enabled then new string fields are checked + * against 'dynamic_date_formats' and if the value matches then + * a new date field is added instead of string. */ dynamic_date_formats?: string[] /** Specify dynamic templates for the mapping. */ dynamic_templates?: Partial>[] /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField - /** A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. */ + /** A mapping type can have custom meta data associated with it. These are + * not used at all by Elasticsearch, but can be used to store + * application-specific metadata. */ _meta?: Metadata /** Automatically map strings into numeric data types for all fields. */ numeric_detection?: boolean - /** Mapping for a field. For new fields, this mapping can include: - Field name - Field data type - Mapping parameters */ + /** Mapping for a field. For new fields, this mapping can include: + * + * - Field name + * - Field data type + * - Mapping parameters */ properties?: Record /** Enable making a routing value required on indexed documents. */ _routing?: MappingRoutingField @@ -14308,23 +20283,37 @@ export interface IndicesPutMappingRequest extends RequestBase { export type IndicesPutMappingResponse = IndicesResponseBase export interface IndicesPutSettingsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. Supports comma-separated values, such as + * `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** If `true`, returns settings in flat format. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration /** If `true`, existing index settings remain unchanged. */ preserve_existing?: boolean - /** Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. */ + /** Whether to close and reopen the index to apply non-dynamic settings. + * If set to `true` the indices to which the settings are being applied + * will be closed temporarily and then reopened in order to apply the changes. */ reopen?: boolean - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration settings?: IndicesIndexSettings /** All values in `body` will be added to the request body. */ @@ -14336,25 +20325,33 @@ export interface IndicesPutSettingsRequest extends RequestBase { export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { -/** The name of the template */ + /** The name of the template */ name: Name /** If true, this request cannot replace or update existing index templates. */ create?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** User defined reason for creating/updating the index template */ cause?: string /** Aliases for the index. */ aliases?: Record - /** Array of wildcard expressions used to match the names of indices during creation. */ + /** Array of wildcard expressions used to match the names + * of indices during creation. */ index_patterns?: string | string[] /** Mapping for fields in the index. */ mappings?: MappingTypeMapping - /** Order in which Elasticsearch applies this template if index matches multiple templates. Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values. */ + /** Order in which Elasticsearch applies this template if index + * matches multiple templates. + * + * Templates with lower 'order' values are merged first. Templates with higher + * 'order' values are merged later, overriding templates with lower values. */ order?: integer /** Configuration options for the index. */ settings?: IndicesIndexSettings - /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. */ + /** Version number used to manage index templates externally. This number + * is not automatically generated by Elasticsearch. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } @@ -14429,7 +20426,9 @@ export interface IndicesRecoveryRecoveryStatus { } export interface IndicesRecoveryRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean @@ -14479,11 +20478,17 @@ export interface IndicesRecoveryVerifyIndex { } export interface IndicesRefreshRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -14507,7 +20512,7 @@ export interface IndicesReloadSearchAnalyzersReloadResult { } export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { -/** A comma-separated list of index names to reload analyzers for */ + /** A comma-separated list of index names to reload analyzers for */ index: Indices /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean @@ -14526,17 +20531,41 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveClusterRequest extends RequestBase { -/** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. If no index expression is specified, information about all remote clusters configured on the local cluster is returned without doing any index matching */ + /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. + * Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. + * If no index expression is specified, information about all remote clusters configured on the local cluster + * is returned without doing any index matching */ name?: Names - /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing + * or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ expand_wildcards?: ExpandWildcards - /** If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** If true, concrete, expanded, or aliased indices are ignored when frozen. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_throttled?: boolean - /** If false, the request returns an error if it targets a missing or closed index. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** If false, the request returns an error if it targets a missing or closed index. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_unavailable?: boolean - /** The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out. The default timeout is unset and the query can take as long as the networking layer is configured to wait for remote clusters that are not responding (typically 30 seconds). */ + /** The maximum time to wait for remote clusters to respond. + * If a remote cluster does not respond within this timeout period, the API response + * will show the cluster as not connected and include an error message that the + * request timed out. + * + * The default timeout is unset and the query can take + * as long as the networking layer is configured to wait for remote clusters that are + * not responding (typically 30 seconds). */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } @@ -14545,23 +20574,36 @@ export interface IndicesResolveClusterRequest extends RequestBase { } export interface IndicesResolveClusterResolveClusterInfo { + /** Whether the remote cluster is connected to the local (querying) cluster. */ connected: boolean + /** The `skip_unavailable` setting for a remote cluster. */ skip_unavailable: boolean + /** Whether the index expression provided in the request matches any indices, aliases or data streams + * on the cluster. */ matching_indices?: boolean + /** Provides error messages that are likely to occur if you do a search with this index expression + * on the specified cluster (for example, lack of security privileges to query an index). */ error?: string + /** Provides version information about the cluster. */ version?: ElasticsearchVersionMinInfo } export type IndicesResolveClusterResponse = Record export interface IndicesResolveIndexRequest extends RequestBase { -/** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. */ + /** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. */ name: Names - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } @@ -14594,27 +20636,40 @@ export interface IndicesResolveIndexResponse { } export interface IndicesRolloverRequest extends RequestBase { -/** Name of the data stream or index alias to roll over. */ + /** Name of the data stream or index alias to roll over. */ alias: IndexAlias - /** Name of the index to create. Supports date math. Data streams do not support this parameter. */ + /** Name of the index to create. + * Supports date math. + * Data streams do not support this parameter. */ new_index?: IndexName /** If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. */ dry_run?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards - /** If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams. */ + /** If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. + * Only allowed on data streams. */ lazy?: boolean - /** Aliases for the target index. Data streams do not support this parameter. */ + /** Aliases for the target index. + * Data streams do not support this parameter. */ aliases?: Record - /** Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ + /** Conditions for the rollover. + * If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. + * If this parameter is not specified, Elasticsearch performs the rollover unconditionally. + * If conditions are specified, at least one of them must be a `max_*` condition. + * The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ conditions?: IndicesRolloverRolloverConditions - /** Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. */ + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping paramaters. */ mappings?: MappingTypeMapping - /** Configuration options for the index. Data streams do not support this parameter. */ + /** Configuration options for the index. + * Data streams do not support this parameter. */ settings?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } @@ -14655,11 +20710,17 @@ export interface IndicesSegmentsIndexSegment { } export interface IndicesSegmentsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -14704,11 +20765,14 @@ export interface IndicesShardStoresIndicesShardStores { } export interface IndicesShardStoresRequest extends RequestBase { -/** List of data streams, indices, and aliases used to limit the request. */ + /** List of data streams, indices, and aliases used to limit the request. */ index?: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If false, the request returns an error if any wildcard expression, index alias, or _all + * value targets only missing or closed indices. This behavior applies even if the request + * targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, + * this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean @@ -14755,17 +20819,21 @@ export interface IndicesShardStoresShardStoreWrapper { } export interface IndicesShrinkRequest extends RequestBase { -/** Name of the source index to shrink. */ + /** Name of the source index to shrink. */ index: IndexName /** Name of the target index to create. */ target: IndexName - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards - /** The key is the alias name. Index alias names support date math. */ + /** The key is the alias name. + * Index alias names support date math. */ aliases?: Record /** Configuration options for the target index. */ settings?: Record @@ -14782,7 +20850,7 @@ export interface IndicesShrinkResponse { } export interface IndicesSimulateIndexTemplateRequest extends RequestBase { -/** Name of the index to simulate */ + /** Name of the index to simulate */ name: Name /** Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one */ create?: boolean @@ -14809,7 +20877,8 @@ export interface IndicesSimulateTemplateOverlapping { } export interface IndicesSimulateTemplateRequest extends RequestBase { -/** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. */ + /** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit + * this parameter and specify the template configuration in the request body. */ name?: Name /** If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. */ create?: boolean @@ -14819,25 +20888,39 @@ export interface IndicesSimulateTemplateRequest extends RequestBase { master_timeout?: Duration /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean - /** This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean /** Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. */ index_patterns?: Indices - /** An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] - /** Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping - /** If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. */ + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility - /** Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. */ + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long - /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. */ + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber - /** Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. */ + /** Optional user metadata about the index template. + * May have any contents. + * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata - /** The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist */ + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ ignore_missing_component_templates?: string[] - /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } @@ -14857,15 +20940,18 @@ export interface IndicesSimulateTemplateTemplate { } export interface IndicesSplitRequest extends RequestBase { -/** Name of the source index to split. */ + /** Name of the source index to split. */ index: IndexName /** Name of the target index to create. */ target: IndexName - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** Aliases for the resulting index. */ aliases?: Record @@ -14886,22 +20972,39 @@ export interface IndicesSplitResponse { export type IndicesStatsIndexMetadataState = 'open' | 'close' export interface IndicesStatsIndexStats { + /** Contains statistics about completions across all shards assigned to the node. */ completion?: CompletionStats + /** Contains statistics about documents across all primary shards assigned to the node. */ docs?: DocStats + /** Contains statistics about the field data cache across all shards assigned to the node. */ fielddata?: FielddataStats + /** Contains statistics about flush operations for the node. */ flush?: FlushStats + /** Contains statistics about get operations for the node. */ get?: GetStats + /** Contains statistics about indexing operations for the node. */ indexing?: IndexingStats + /** Contains statistics about indices operations for the node. */ indices?: IndicesStatsIndicesStats + /** Contains statistics about merge operations for the node. */ merges?: MergesStats + /** Contains statistics about the query cache across all shards assigned to the node. */ query_cache?: QueryCacheStats + /** Contains statistics about recovery operations for the node. */ recovery?: RecoveryStats + /** Contains statistics about refresh operations for the node. */ refresh?: RefreshStats + /** Contains statistics about the request cache across all shards assigned to the node. */ request_cache?: RequestCacheStats + /** Contains statistics about search operations for the node. */ search?: SearchStats + /** Contains statistics about segments across all shards assigned to the node. */ segments?: SegmentsStats + /** Contains statistics about the size of shards assigned to the node. */ store?: StoreStats + /** Contains statistics about transaction log operations for the node. */ translog?: TranslogStats + /** Contains statistics about index warming operations for the node. */ warmer?: WarmerStats bulk?: BulkStats shard_stats?: IndicesStatsShardsTotalStats @@ -14923,13 +21026,15 @@ export interface IndicesStatsMappingStats { } export interface IndicesStatsRequest extends RequestBase { -/** Limit the information returned the specific metrics. */ + /** Limit the information returned the specific metrics. */ metric?: Metrics /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields @@ -15051,43 +21156,84 @@ export interface IndicesStatsShardsTotalStats { } export interface IndicesUpdateAliasesAction { + /** Adds a data stream or index to an alias. + * If the alias doesn’t exist, the `add` action creates it. */ add?: IndicesUpdateAliasesAddAction + /** Removes a data stream or index from an alias. */ remove?: IndicesUpdateAliasesRemoveAction + /** Deletes an index. + * You cannot use this action on aliases or data streams. */ remove_index?: IndicesUpdateAliasesRemoveIndexAction } export interface IndicesUpdateAliasesAddAction { + /** Alias for the action. + * Index alias names support date math. */ alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[] + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ index_routing?: Routing + /** If `true`, the alias is hidden. */ is_hidden?: boolean + /** If `true`, sets the write index or data stream for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ search_routing?: Routing + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRemoveAction { + /** Alias for the action. + * Index alias names support date math. */ alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[] + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRemoveIndexAction { + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** Actions to perform. */ actions?: IndicesUpdateAliasesAction[] @@ -15107,21 +21253,29 @@ export interface IndicesValidateQueryIndicesValidationExplanation { } export interface IndicesValidateQueryRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases to search. + * Supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean /** If `true`, the validation is executed on all shards instead of one random shard per index. */ all_shards?: boolean - /** Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. */ + /** Analyzer to use for the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ analyzer?: string /** If `true`, wildcard and prefix queries are analyzed. */ analyze_wildcard?: boolean /** The default operator for query string query: `AND` or `OR`. */ default_operator?: QueryDslOperator - /** Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. */ + /** Field to use as default where no field prefix is given in the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ df?: string - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, the response returns detailed information if an error has occurred. */ explain?: boolean @@ -15148,15 +21302,366 @@ export interface IndicesValidateQueryResponse { error?: string } -export interface InferenceCompletionInferenceResult { - completion: InferenceCompletionResult[] +export interface InferenceAdaptiveAllocations { + /** Turn on `adaptive_allocations`. */ + enabled?: boolean + /** The maximum number of allocations to scale to. + * If set, it must be greater than or equal to `min_number_of_allocations`. */ + max_number_of_allocations?: integer + /** The minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ + min_number_of_allocations?: integer } -export interface InferenceCompletionResult { - result: string +export interface InferenceAlibabaCloudServiceSettings { + /** A valid API key for the AlibabaCloud AI Search API. */ + api_key: string + /** The name of the host address used for the inference task. + * You can find the host address in the API keys section of the documentation. */ + host: string + /** This setting helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search. + * By default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`. */ + rate_limit?: InferenceRateLimitSetting + /** The name of the model service to use for the inference task. + * The following service IDs are available for the `completion` task: + * + * * `ops-qwen-turbo` + * * `qwen-turbo` + * * `qwen-plus` + * * `qwen-max ÷ qwen-max-longcontext` + * + * The following service ID is available for the `rerank` task: + * + * * `ops-bge-reranker-larger` + * + * The following service ID is available for the `sparse_embedding` task: + * + * * `ops-text-sparse-embedding-001` + * + * The following service IDs are available for the `text_embedding` task: + * + * `ops-text-embedding-001` + * `ops-text-embedding-zh-001` + * `ops-text-embedding-en-001` + * `ops-text-embedding-002` */ + service_id: string + /** The name of the workspace used for the inference task. */ + workspace: string +} + +export type InferenceAlibabaCloudServiceType = 'alibabacloud-ai-search' + +export interface InferenceAlibabaCloudTaskSettings { + /** For a `sparse_embedding` or `text_embedding` task, specify the type of input passed to the model. + * Valid values are: + * + * * `ingest` for storing document embeddings in a vector database. + * * `search` for storing embeddings of search queries run against a vector database to find relevant documents. */ + input_type?: string + /** For a `sparse_embedding` task, it affects whether the token name will be returned in the response. + * It defaults to `false`, which means only the token ID will be returned in the response. */ + return_token?: boolean } -export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { +export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'space_embedding' | 'text_embedding' + +export interface InferenceAmazonBedrockServiceSettings { + /** A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. */ + access_key: string + /** The base model ID or an ARN to a custom model based on a foundational model. + * The base model IDs can be found in the Amazon Bedrock documentation. + * Note that the model ID must be available for the provider chosen and your IAM user must have access to the model. */ + model: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `amazontitan` - available for `text_embedding` and `completion` task types + * * `anthropic` - available for `completion` task type only + * * `ai21labs` - available for `completion` task type only + * * `cohere` - available for `text_embedding` and `completion` task types + * * `meta` - available for `completion` task type only + * * `mistral` - available for `completion` task type only */ + provider?: string + /** The region that your model or ARN is deployed in. + * The list of available regions per model can be found in the Amazon Bedrock documentation. */ + region: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ + rate_limit?: InferenceRateLimitSetting + /** A valid AWS secret key that is paired with the `access_key`. + * For informationg about creating and managing access and secret keys, refer to the AWS documentation. */ + secret_key: string +} + +export type InferenceAmazonBedrockServiceType = 'amazonbedrock' + +export interface InferenceAmazonBedrockTaskSettings { + /** For a `completion` task, it sets the maximum number for the output tokens to be generated. */ + max_new_tokens?: integer + /** For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results. + * At temperature 0.0 the model is most deterministic, at temperature 1.0 most random. + * It should not be used if `top_p` or `top_k` is specified. */ + temperature?: float + /** For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability. + * It is only available for anthropic, cohere, and mistral providers. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ + top_k?: float + /** For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens. + * Top-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ + top_p?: float +} + +export type InferenceAmazonBedrockTaskType = 'completion' | 'text_embedding' + +export interface InferenceAnthropicServiceSettings { + /** A valid API key for the Anthropic API. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Anthropic documentation for the list of supported models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Anthropic. + * By default, the `anthropic` service sets the number of requests allowed per minute to 50. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAnthropicServiceType = 'anthropic' + +export interface InferenceAnthropicTaskSettings { + /** For a `completion` task, it is the maximum number of tokens to generate before stopping. */ + max_tokens: integer + /** For a `completion` task, it is the amount of randomness injected into the response. + * For more details about the supported range, refer to Anthropic documentation. */ + temperature?: float + /** For a `completion` task, it specifies to only sample from the top K options for each subsequent token. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ + top_k?: integer + /** For a `completion` task, it specifies to use Anthropic's nucleus sampling. + * In nucleus sampling, Anthropic computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches the specified probability. + * You should either alter `temperature` or `top_p`, but not both. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ + top_p?: float +} + +export type InferenceAnthropicTaskType = 'completion' + +export interface InferenceAzureAiStudioServiceSettings { + /** A valid API key of your Azure AI Studio model deployment. + * This key can be found on the overview page for your deployment in the management section of your Azure AI Studio account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The type of endpoint that is available for deployment through Azure AI Studio: `token` or `realtime`. + * The `token` endpoint type is for "pay as you go" endpoints that are billed per token. + * The `realtime` endpoint type is for "real-time" endpoints that are billed per hour of usage. */ + endpoint_type: string + /** The target URL of your Azure AI Studio model deployment. + * This can be found on the overview page for your deployment in the management section of your Azure AI Studio account. */ + target: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `cohere` - available for `text_embedding` and `completion` task types + * * `databricks` - available for `completion` task type only + * * `meta` - available for `completion` task type only + * * `microsoft_phi` - available for `completion` task type only + * * `mistral` - available for `completion` task type only + * * `openai` - available for `text_embedding` and `completion` task types */ + provider: string + /** This setting helps to minimize the number of rate limit errors returned from Azure AI Studio. + * By default, the `azureaistudio` service sets the number of requests allowed per minute to 240. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAzureAiStudioServiceType = 'azureaistudio' + +export interface InferenceAzureAiStudioTaskSettings { + /** For a `completion` task, instruct the inference process to perform sampling. + * It has no effect unless `temperature` or `top_p` is specified. */ + do_sample?: float + /** For a `completion` task, provide a hint for the maximum number of output tokens to be generated. */ + max_new_tokens?: integer + /** For a `completion` task, control the apparent creativity of generated completions with a sampling temperature. + * It must be a number in the range of 0.0 to 2.0. + * It should not be used if `top_p` is specified. */ + temperature?: float + /** For a `completion` task, make the model consider the results of the tokens with nucleus sampling probability. + * It is an alternative value to `temperature` and must be a number in the range of 0.0 to 2.0. + * It should not be used if `temperature` is specified. */ + top_p?: float + /** For a `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string +} + +export type InferenceAzureAiStudioTaskType = 'completion' | 'text_embedding' + +export interface InferenceAzureOpenAIServiceSettings { + /** A valid API key for your Azure OpenAI account. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key?: string + /** The Azure API version ID to use. + * It is recommended to use the latest supported non-preview version. */ + api_version: string + /** The deployment name of your deployed models. + * Your Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription. */ + deployment_id: string + /** A valid Microsoft Entra token. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. */ + entra_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Azure. + * The `azureopenai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `1440`. + * For `completion`, it is set to `120`. */ + rate_limit?: InferenceRateLimitSetting + /** The name of your Azure OpenAI resource. + * You can find this from the list of resources in the Azure Portal for your subscription. */ + resource_name: string +} + +export type InferenceAzureOpenAIServiceType = 'azureopenai' + +export interface InferenceAzureOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string +} + +export type InferenceAzureOpenAITaskType = 'completion' | 'text_embedding' + +export type InferenceCohereEmbeddingType = 'byte' | 'float' | 'int8' + +export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search' + +export interface InferenceCohereServiceSettings { + /** A valid API key for your Cohere account. + * You can find or create your Cohere API keys on the Cohere API key settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** For a `text_embedding` task, the types of embeddings you want to get back. + * Use `byte` for signed int8 embeddings (this is a synonym of `int8`). + * Use `float` for the default float embeddings. + * Use `int8` for signed int8 embeddings. */ + embedding_type?: InferenceCohereEmbeddingType + /** For a `completion`, `rerank`, or `text_embedding` task, the name of the model to use for the inference task. + * + * * For the available `completion` models, refer to the [Cohere command docs](https://docs.cohere.com/docs/models#command). + * * For the available `rerank` models, refer to the [Cohere rerank docs](https://docs.cohere.com/reference/rerank-1). + * * For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed). + * + * The default value for a text embedding task is `embed-english-v2.0`. */ + model_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Cohere. + * By default, the `cohere` service sets the number of requests allowed per minute to 10000. */ + rate_limit?: InferenceRateLimitSetting + /** The similarity measure. + * If the `embedding_type` is `float`, the default value is `dot_product`. + * If the `embedding_type` is `int8` or `byte`, the default value is `cosine`. */ + similarity?: InferenceCohereSimilarityType +} + +export type InferenceCohereServiceType = 'cohere' + +export type InferenceCohereSimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export interface InferenceCohereTaskSettings { + /** For a `text_embedding` task, the type of input passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. + * + * IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. */ + input_type?: InferenceCohereInputType + /** For a `rerank` task, return doc text within the results. */ + return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ + top_n?: integer + /** For a `text_embedding` task, the method to handle inputs longer than the maximum token length. + * Valid values are: + * + * * `END`: When the input exceeds the maximum input token length, the end of the input is discarded. + * * `NONE`: When the input exceeds the maximum input token length, an error is returned. + * * `START`: When the input exceeds the maximum input token length, the start of the input is discarded. */ + truncate?: InferenceCohereTruncateType +} + +export type InferenceCohereTaskType = 'completion' | 'rerank' | 'text_embedding' + +export type InferenceCohereTruncateType = 'END' | 'NONE' | 'START' + +export interface InferenceCompletionInferenceResult { + completion: InferenceCompletionResult[] +} + +export interface InferenceCompletionResult { + result: string +} + +export interface InferenceCompletionTool { + /** The type of tool. */ + type: string + /** The function definition. */ + function: InferenceCompletionToolFunction +} + +export interface InferenceCompletionToolChoice { + /** The type of the tool. */ + type: string + /** The tool choice function. */ + function: InferenceCompletionToolChoiceFunction +} + +export interface InferenceCompletionToolChoiceFunction { + /** The name of the function to call. */ + name: string +} + +export interface InferenceCompletionToolFunction { + /** A description of what the function does. + * This is used by the model to choose when and how to call the function. */ + description?: string + /** The name of the function. */ + name: string + /** The parameters the functional accepts. This should be formatted as a JSON object. */ + parameters?: any + /** Whether to enable schema adherence when generating the function call. */ + strict?: boolean +} + +export type InferenceCompletionToolType = string | InferenceCompletionToolChoice + +export interface InferenceContentObject { + /** The text content. */ + text: string + /** The type of content. */ + type: string +} + +export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { pipelines: string[] } @@ -15164,25 +21669,295 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] -export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint { +export interface InferenceEisServiceSettings { + /** The name of the model to use for the inference task. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned. + * By default, the `elastic` service sets the number of requests allowed per minute to `240` in case of `chat_completion`. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceEisServiceType = 'elastic' + +export type InferenceEisTaskType = 'chat_completion' + +export interface InferenceElasticsearchServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ + adaptive_allocations?: InferenceAdaptiveAllocations + /** The deployment identifier for a trained model deployment. + * When `deployment_id` is used the `model_id` is optional. */ + deployment_id?: string + /** The name of the model to use for the inference task. + * It can be the ID of a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model that was uploaded by using the Eland client. */ + model_id: string + /** The total number of allocations that are assigned to the model across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations are enabled, do not set this value because it's automatically set. */ + num_allocations?: integer + /** The number of threads used by each model allocation during inference. + * This setting generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. */ + num_threads: integer +} + +export type InferenceElasticsearchServiceType = 'elasticsearch' + +export interface InferenceElasticsearchTaskSettings { + /** For a `rerank` task, return the document instead of only the index. */ + return_documents?: boolean +} + +export type InferenceElasticsearchTaskType = 'rerank' | 'sparse_embedding' | 'text_embedding' + +export interface InferenceElserServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ + adaptive_allocations?: InferenceAdaptiveAllocations + /** The total number of allocations this model is assigned across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations is enabled, do not set this value because it's automatically set. */ + num_allocations: integer + /** The number of threads used by each model allocation during inference. + * Increasing this value generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. + * + * > info + * > If you want to optimize your ELSER endpoint for ingest, set the number of threads to 1. If you want to optimize your ELSER endpoint for search, set the number of threads to greater than 1. */ + num_threads: integer +} + +export type InferenceElserServiceType = 'elser' + +export type InferenceElserTaskType = 'sparse_embedding' + +export type InferenceGoogleAiServiceType = 'googleaistudio' + +export interface InferenceGoogleAiStudioServiceSettings { + /** A valid API key of your Google Gemini account. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Google documentation for the list of supported models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Google AI Studio. + * By default, the `googleaistudio` service sets the number of requests allowed per minute to 360. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceGoogleAiStudioTaskType = 'completion' | 'text_embedding' + +export interface InferenceGoogleVertexAIServiceSettings { + /** The name of the location to use for the inference task. + * Refer to the Google documentation for the list of supported locations. */ + location: string + /** The name of the model to use for the inference task. + * Refer to the Google documentation for the list of supported models. */ + model_id: string + /** The name of the project to use for the inference task. */ + project_id: string + /** This setting helps to minimize the number of rate limit errors returned from Google Vertex AI. + * By default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000. */ + rate_limit?: InferenceRateLimitSetting + /** A valid service account in JSON format for the Google Vertex AI API. */ + service_account_json: string +} + +export type InferenceGoogleVertexAIServiceType = 'googlevertexai' + +export interface InferenceGoogleVertexAITaskSettings { + /** For a `text_embedding` task, truncate inputs longer than the maximum token length automatically. */ + auto_truncate?: boolean + /** For a `rerank` task, the number of the top N documents that should be returned. */ + top_n?: integer +} + +export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' + +export interface InferenceHuggingFaceServiceSettings { + /** A valid access token for your HuggingFace account. + * You can create or find your access tokens on the HuggingFace settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** This setting helps to minimize the number of rate limit errors returned from Hugging Face. + * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000. */ + rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. */ + url: string +} + +export type InferenceHuggingFaceServiceType = 'hugging_face' + +export type InferenceHuggingFaceTaskType = 'text_embedding' + +export interface InferenceInferenceChunkingSettings { + /** The maximum size of a chunk in words. + * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ max_chunk_size?: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ sentence_overlap?: integer + /** The chunking strategy: `sentence` or `word`. */ strategy?: string } export interface InferenceInferenceEndpoint { + /** Chunking configuration object */ chunking_settings?: InferenceInferenceChunkingSettings + /** The service type */ service: string + /** Settings specific to the service */ service_settings: InferenceServiceSettings + /** Task settings specific to the service and task type */ task_settings?: InferenceTaskSettings } export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskType } +export interface InferenceJinaAIServiceSettings { + /** A valid API key of your JinaAI account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The name of the model to use for the inference task. + * For a `rerank` task, it is required. + * For a `text_embedding` task, it is optional. */ + model_id?: string + /** This setting helps to minimize the number of rate limit errors returned from JinaAI. + * By default, the `jinaai` service sets the number of requests allowed per minute to 2000 for all task types. */ + rate_limit?: InferenceRateLimitSetting + /** For a `text_embedding` task, the similarity measure. One of cosine, dot_product, l2_norm. + * The default values varies with the embedding type. + * For example, a float embedding type uses a `dot_product` similarity measure by default. */ + similarity?: InferenceJinaAISimilarityType +} + +export type InferenceJinaAIServiceType = 'jinaai' + +export type InferenceJinaAISimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export interface InferenceJinaAITaskSettings { + /** For a `rerank` task, return the doc text within the results. */ + return_documents?: boolean + /** For a `text_embedding` task, the task passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. */ + task?: InferenceJinaAITextEmbeddingTask + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ + top_n?: integer +} + +export type InferenceJinaAITaskType = 'rerank' | 'text_embedding' + +export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search' + +export interface InferenceMessage { + /** The content of the message. */ + content?: InferenceMessageContent + /** The role of the message author. */ + role: string + /** The tool call that this message is responding to. */ + tool_call_id?: Id + /** The tool calls generated by the model. */ + tool_calls?: InferenceToolCall[] +} + +export type InferenceMessageContent = string | InferenceContentObject[] + +export interface InferenceMistralServiceSettings { + /** A valid API key of your Mistral account. + * You can find your Mistral API keys or you can create a new one on the API Keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The maximum number of tokens per input before chunking occurs. */ + max_input_tokens?: integer + /** The name of the model to use for the inference task. + * Refer to the Mistral models documentation for the list of available text embedding models. */ + model: string + /** This setting helps to minimize the number of rate limit errors returned from the Mistral API. + * By default, the `mistral` service sets the number of requests allowed per minute to 240. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceMistralServiceType = 'mistral' + +export type InferenceMistralTaskType = 'text_embedding' + +export interface InferenceOpenAIServiceSettings { + /** A valid API key of your OpenAI account. + * You can find your OpenAI API keys in your OpenAI account under the API keys section. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The number of dimensions the resulting output embeddings should have. + * It is supported only in `text-embedding-3` and later models. + * If it is not set, the OpenAI defined default for the model is used. */ + dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the OpenAI documentation for the list of available text embedding models. */ + model_id: string + /** The unique identifier for your organization. + * You can find the Organization ID in your OpenAI account under *Settings > Organizations*. */ + organization_id?: string + /** This setting helps to minimize the number of rate limit errors returned from OpenAI. + * The `openai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `3000`. + * For `completion`, it is set to `500`. */ + rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. + * It can be changed for testing purposes. */ + url?: string +} + +export type InferenceOpenAIServiceType = 'openai' + +export interface InferenceOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string +} + +export type InferenceOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding' + export interface InferenceRankedDocument { index: integer relevance_score: float @@ -15190,17 +21965,26 @@ export interface InferenceRankedDocument { } export interface InferenceRateLimitSetting { + /** The number of requests allowed per minute. */ requests_per_minute?: integer } -export interface InferenceRequestChatCompletionBase extends RequestBase { - messages: InferenceChatCompletionUnifiedMessage[] +export interface InferenceRequestChatCompletion { + /** A list of objects representing the conversation. */ + messages: InferenceMessage[] + /** The ID of the model to use. */ model?: string + /** The upper bound limit for the number of tokens that can be generated for a completion request. */ max_completion_tokens?: long + /** A sequence of strings to control when the model should stop generating additional tokens. */ stop?: string[] + /** The sampling temperature to use. */ temperature?: float - tool_choice?: InferenceChatCompletionUnifiedCompletionToolType - tools?: InferenceChatCompletionUnifiedCompletionTool[] + /** Controls which tool is called by the model. */ + tool_choice?: InferenceCompletionToolType + /** A list of tools that the model can call. */ + tools?: InferenceCompletionTool[] + /** Nucleus sampling, an alternative to sampling with temperature. */ top_p?: float } @@ -15238,73 +22022,112 @@ export interface InferenceTextEmbeddingResult { embedding: InferenceDenseVector } -export interface InferenceChatCompletionUnifiedCompletionTool { - type: string - function: InferenceChatCompletionUnifiedCompletionToolFunction -} - -export interface InferenceChatCompletionUnifiedCompletionToolChoice { +export interface InferenceToolCall { + /** The identifier of the tool call. */ + id: Id + /** The function that the model called. */ + function: InferenceToolCallFunction + /** The type of the tool call. */ type: string - function: InferenceChatCompletionUnifiedCompletionToolChoiceFunction } -export interface InferenceChatCompletionUnifiedCompletionToolChoiceFunction { +export interface InferenceToolCallFunction { + /** The arguments to call the function with in JSON format. */ + arguments: string + /** The name of the function to call. */ name: string } -export interface InferenceChatCompletionUnifiedCompletionToolFunction { - description?: string - name: string - parameters?: any - strict?: boolean +export interface InferenceVoyageAIServiceSettings { + /** The number of dimensions for resulting output embeddings. + * This setting maps to `output_dimension` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ + dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the VoyageAI documentation for the list of available text embedding and rerank models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from VoyageAI. + * The `voyageai` service sets a default number of requests allowed per minute depending on the task type. + * For both `text_embedding` and `rerank`, it is set to `2000`. */ + rate_limit?: InferenceRateLimitSetting + /** The data type for the embeddings to be returned. + * This setting maps to `output_dtype` in the VoyageAI documentation. + * Permitted values: float, int8, bit. + * `int8` is a synonym of `byte` in the VoyageAI documentation. + * `bit` is a synonym of `binary` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ + embedding_type?: float } -export type InferenceChatCompletionUnifiedCompletionToolType = string | InferenceChatCompletionUnifiedCompletionToolChoice +export type InferenceVoyageAIServiceType = 'voyageai' -export interface InferenceChatCompletionUnifiedContentObject { - text: string - type: string +export interface InferenceVoyageAITaskSettings { + /** Type of the input text. + * Permitted values: `ingest` (maps to `document` in the VoyageAI documentation), `search` (maps to `query` in the VoyageAI documentation). + * Only for the `text_embedding` task type. */ + input_type?: string + /** Whether to return the source documents in the response. + * Only for the `rerank` task type. */ + return_documents?: boolean + /** The number of most relevant documents to return. + * If not specified, the reranking results of all documents will be returned. + * Only for the `rerank` task type. */ + top_k?: integer + /** Whether to truncate the input texts to fit within the context length. */ + truncation?: boolean } -export interface InferenceChatCompletionUnifiedMessage { - content?: InferenceChatCompletionUnifiedMessageContent - role: string - tool_call_id?: Id - tool_calls?: InferenceChatCompletionUnifiedToolCall[] +export type InferenceVoyageAITaskType = 'text_embedding' | 'rerank' + +export interface InferenceWatsonxServiceSettings { + /** A valid API key of your Watsonx account. + * You can find your Watsonx API keys or you can create a new one on the API keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** A version parameter that takes a version date in the format of `YYYY-MM-DD`. + * For the active version data parameters, refer to the Wastonx documentation. */ + api_version: string + /** The name of the model to use for the inference task. + * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. */ + model_id: string + /** The identifier of the IBM Cloud project to use for the inference task. */ + project_id: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ + rate_limit?: InferenceRateLimitSetting + /** The URL of the inference endpoint that you created on Watsonx. */ + url: string } -export type InferenceChatCompletionUnifiedMessageContent = string | InferenceChatCompletionUnifiedContentObject[] +export type InferenceWatsonxServiceType = 'watsonxai' -export interface InferenceChatCompletionUnifiedRequest extends InferenceRequestChatCompletionBase { -/** The inference Id */ +export type InferenceWatsonxTaskType = 'text_embedding' + +export interface InferenceChatCompletionUnifiedRequest extends RequestBase { + /** The inference Id */ inference_id: Id /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration + chat_completion_request?: InferenceRequestChatCompletion /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } } export type InferenceChatCompletionUnifiedResponse = StreamResult -export interface InferenceChatCompletionUnifiedToolCall { - id: Id - function: InferenceChatCompletionUnifiedToolCallFunction - type: string -} - -export interface InferenceChatCompletionUnifiedToolCallFunction { - arguments: string - name: string -} - export interface InferenceCompletionRequest extends RequestBase { -/** The inference Id */ + /** The inference Id */ inference_id: Id /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration - /** Inference input. Either a string or an array of strings. */ + /** Inference input. + * Either a string or an array of strings. */ input: string | string[] /** Optional task settings */ task_settings?: InferenceTaskSettings @@ -15317,7 +22140,7 @@ export interface InferenceCompletionRequest extends RequestBase { export type InferenceCompletionResponse = InferenceCompletionInferenceResult export interface InferenceDeleteRequest extends RequestBase { -/** The task type */ + /** The task type */ task_type?: InferenceTaskType /** The inference identifier. */ inference_id: Id @@ -15334,7 +22157,7 @@ export interface InferenceDeleteRequest extends RequestBase { export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult export interface InferenceGetRequest extends RequestBase { -/** The task type */ + /** The task type */ task_type?: InferenceTaskType /** The inference Id */ inference_id?: Id @@ -15348,19 +22171,20 @@ export interface InferenceGetResponse { endpoints: InferenceInferenceEndpointInfo[] } -export interface InferencePostEisChatCompletionRequest extends InferenceRequestChatCompletionBase { -/** The unique identifier of the inference endpoint. */ +export interface InferencePostEisChatCompletionRequest extends RequestBase { + /** The unique identifier of the inference endpoint. */ eis_inference_id: Id + chat_completion_request?: InferenceRequestChatCompletion /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { eis_inference_id?: never } + body?: string | { [key: string]: any } & { eis_inference_id?: never, chat_completion_request?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { eis_inference_id?: never } + querystring?: { [key: string]: any } & { eis_inference_id?: never, chat_completion_request?: never } } export type InferencePostEisChatCompletionResponse = StreamResult export interface InferencePutRequest extends RequestBase { -/** The task type */ + /** The task type */ task_type?: InferenceTaskType /** The inference Id */ inference_id: Id @@ -15373,22 +22197,151 @@ export interface InferencePutRequest extends RequestBase { export type InferencePutResponse = InferenceInferenceEndpointInfo -export interface InferencePutEisEisServiceSettings { - model_id: string - rate_limit?: InferenceRateLimitSetting +export interface InferencePutAlibabacloudRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAlibabaCloudTaskType + /** The unique identifier of the inference endpoint. */ + alibabacloud_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ + service: InferenceAlibabaCloudServiceType + /** Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. */ + service_settings: InferenceAlibabaCloudServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAlibabaCloudTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutEisEisTaskType = 'chat_completion' +export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAmazonbedrockRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAmazonBedrockTaskType + /** The unique identifier of the inference endpoint. */ + amazonbedrock_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ + service: InferenceAmazonBedrockServiceType + /** Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. */ + service_settings: InferenceAmazonBedrockServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAmazonBedrockTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAnthropicRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `completion`. */ + task_type: InferenceAnthropicTaskType + /** The unique identifier of the inference endpoint. */ + anthropic_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `anthropic`. */ + service: InferenceAnthropicServiceType + /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ + service_settings: InferenceAnthropicServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAnthropicTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAzureaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAzureAiStudioTaskType + /** The unique identifier of the inference endpoint. */ + azureaistudio_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ + service: InferenceAzureAiStudioServiceType + /** Settings used to install the inference model. These settings are specific to the `openai` service. */ + service_settings: InferenceAzureAiStudioServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAzureAiStudioTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAzureopenaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferenceAzureOpenAITaskType + /** The unique identifier of the inference endpoint. */ + azureopenai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureopenai`. */ + service: InferenceAzureOpenAIServiceType + /** Settings used to install the inference model. These settings are specific to the `azureopenai` service. */ + service_settings: InferenceAzureOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAzureOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfo + +export interface InferencePutCohereRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceCohereTaskType + /** The unique identifier of the inference endpoint. */ + cohere_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `cohere`. */ + service: InferenceCohereServiceType + /** Settings used to install the inference model. + * These settings are specific to the `cohere` service. */ + service_settings: InferenceCohereServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceCohereTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutCohereResponse = InferenceInferenceEndpointInfo export interface InferencePutEisRequest extends RequestBase { -/** The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ - task_type: InferencePutEisEisTaskType + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferenceEisTaskType /** The unique identifier of the inference endpoint. */ eis_inference_id: Id /** The type of service supported for the specified task type. In this case, `elastic`. */ - service: InferencePutEisServiceType + service: InferenceEisServiceType /** Settings used to install the inference model. These settings are specific to the `elastic` service. */ - service_settings: InferencePutEisEisServiceSettings + service_settings: InferenceEisServiceSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_type?: never, eis_inference_id?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -15397,36 +22350,165 @@ export interface InferencePutEisRequest extends RequestBase { export type InferencePutEisResponse = InferenceInferenceEndpointInfo -export type InferencePutEisServiceType = 'elastic' +export interface InferencePutElasticsearchRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceElasticsearchTaskType + /** The unique identifier of the inference endpoint. + * The must not match the `model_id`. */ + elasticsearch_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ + service: InferenceElasticsearchServiceType + /** Settings used to install the inference model. These settings are specific to the `elasticsearch` service. */ + service_settings: InferenceElasticsearchServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceElasticsearchTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfo -export interface InferencePutOpenaiOpenAIServiceSettings { - api_key: string - dimensions?: integer - model_id: string - organization_id?: string - rate_limit?: InferenceRateLimitSetting - url?: string +export interface InferencePutElserRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceElserTaskType + /** The unique identifier of the inference endpoint. */ + elser_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elser`. */ + service: InferenceElserServiceType + /** Settings used to install the inference model. These settings are specific to the `elser` service. */ + service_settings: InferenceElserServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export interface InferencePutOpenaiOpenAITaskSettings { - user?: string +export type InferencePutElserResponse = InferenceInferenceEndpointInfo + +export interface InferencePutGoogleaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceGoogleAiStudioTaskType + /** The unique identifier of the inference endpoint. */ + googleaistudio_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ + service: InferenceGoogleAiServiceType + /** Settings used to install the inference model. These settings are specific to the `googleaistudio` service. */ + service_settings: InferenceGoogleAiStudioServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfo + +export interface InferencePutGooglevertexaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceGoogleVertexAITaskType + /** The unique identifier of the inference endpoint. */ + googlevertexai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ + service: InferenceGoogleVertexAIServiceType + /** Settings used to install the inference model. These settings are specific to the `googlevertexai` service. */ + service_settings: InferenceGoogleVertexAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceGoogleVertexAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfo + +export interface InferencePutHuggingFaceRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceHuggingFaceTaskType + /** The unique identifier of the inference endpoint. */ + huggingface_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `hugging_face`. */ + service: InferenceHuggingFaceServiceType + /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */ + service_settings: InferenceHuggingFaceServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfo + +export interface InferencePutJinaaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceJinaAITaskType + /** The unique identifier of the inference endpoint. */ + jinaai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `jinaai`. */ + service: InferenceJinaAIServiceType + /** Settings used to install the inference model. These settings are specific to the `jinaai` service. */ + service_settings: InferenceJinaAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceJinaAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutOpenaiOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding' +export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfo + +export interface InferencePutMistralRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `text_embedding`. */ + task_type: InferenceMistralTaskType + /** The unique identifier of the inference endpoint. */ + mistral_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `mistral`. */ + service: InferenceMistralServiceType + /** Settings used to install the inference model. These settings are specific to the `mistral` service. */ + service_settings: InferenceMistralServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutMistralResponse = InferenceInferenceEndpointInfo export interface InferencePutOpenaiRequest extends RequestBase { -/** The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ - task_type: InferencePutOpenaiOpenAITaskType + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferenceOpenAITaskType /** The unique identifier of the inference endpoint. */ openai_inference_id: Id /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `openai`. */ - service: InferencePutOpenaiServiceType + service: InferenceOpenAIServiceType /** Settings used to install the inference model. These settings are specific to the `openai` service. */ - service_settings: InferencePutOpenaiOpenAIServiceSettings - /** Settings to configure the inference task. These settings are specific to the task type you specified. */ - task_settings?: InferencePutOpenaiOpenAITaskSettings + service_settings: InferenceOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceOpenAITaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -15435,21 +22517,20 @@ export interface InferencePutOpenaiRequest extends RequestBase { export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo -export type InferencePutOpenaiServiceType = 'openai' - export interface InferencePutVoyageaiRequest extends RequestBase { -/** The type of the inference task that the model will perform. */ - task_type: InferencePutVoyageaiVoyageAITaskType + /** The type of the inference task that the model will perform. */ + task_type: InferenceVoyageAITaskType /** The unique identifier of the inference endpoint. */ voyageai_inference_id: Id /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `voyageai`. */ - service: InferencePutVoyageaiServiceType + service: InferenceVoyageAIServiceType /** Settings used to install the inference model. These settings are specific to the `voyageai` service. */ - service_settings: InferencePutVoyageaiVoyageAIServiceSettings - /** Settings to configure the inference task. These settings are specific to the task type you specified. */ - task_settings?: InferencePutVoyageaiVoyageAITaskSettings + service_settings: InferenceVoyageAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceVoyageAITaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -15458,33 +22539,16 @@ export interface InferencePutVoyageaiRequest extends RequestBase { export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo -export type InferencePutVoyageaiServiceType = 'voyageai' - -export interface InferencePutVoyageaiVoyageAIServiceSettings { - dimensions?: integer - model_id: string - rate_limit?: InferenceRateLimitSetting - embedding_type?: float -} - -export interface InferencePutVoyageaiVoyageAITaskSettings { - input_type?: string - return_documents?: boolean - top_k?: integer - truncation?: boolean -} - -export type InferencePutVoyageaiVoyageAITaskType = 'text_embedding' | 'rerank' - export interface InferencePutWatsonxRequest extends RequestBase { -/** The task type. The only valid task type for the model to perform is `text_embedding`. */ - task_type: InferencePutWatsonxWatsonxTaskType + /** The task type. + * The only valid task type for the model to perform is `text_embedding`. */ + task_type: InferenceWatsonxTaskType /** The unique identifier of the inference endpoint. */ watsonx_inference_id: Id /** The type of service supported for the specified task type. In this case, `watsonxai`. */ - service: InferencePutWatsonxServiceType + service: InferenceWatsonxServiceType /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ - service_settings: InferencePutWatsonxWatsonxServiceSettings + service_settings: InferenceWatsonxServiceSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -15493,29 +22557,21 @@ export interface InferencePutWatsonxRequest extends RequestBase { export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo -export type InferencePutWatsonxServiceType = 'watsonxai' - -export interface InferencePutWatsonxWatsonxServiceSettings { - api_key: string - api_version: string - model_id: string - project_id: string - rate_limit?: InferenceRateLimitSetting - url: string -} - -export type InferencePutWatsonxWatsonxTaskType = 'text_embedding' - export interface InferenceRerankRequest extends RequestBase { -/** The unique identifier for the inference endpoint. */ + /** The unique identifier for the inference endpoint. */ inference_id: Id /** The amount of time to wait for the inference request to complete. */ timeout?: Duration /** Query input. */ query: string - /** The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. */ + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * > info + * > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[] - /** Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } @@ -15526,11 +22582,12 @@ export interface InferenceRerankRequest extends RequestBase { export type InferenceRerankResponse = InferenceRerankedInferenceResult export interface InferenceSparseEmbeddingRequest extends RequestBase { -/** The inference Id */ + /** The inference Id */ inference_id: Id /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration - /** Inference input. Either a string or an array of strings. */ + /** Inference input. + * Either a string or an array of strings. */ input: string | string[] /** Optional task settings */ task_settings?: InferenceTaskSettings @@ -15543,9 +22600,12 @@ export interface InferenceSparseEmbeddingRequest extends RequestBase { export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult export interface InferenceStreamCompletionRequest extends RequestBase { -/** The unique identifier for the inference endpoint. */ + /** The unique identifier for the inference endpoint. */ inference_id: Id - /** The text on which you want to perform the inference task. It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ input: string | string[] /** Optional task settings */ task_settings?: InferenceTaskSettings @@ -15558,11 +22618,12 @@ export interface InferenceStreamCompletionRequest extends RequestBase { export type InferenceStreamCompletionResponse = StreamResult export interface InferenceTextEmbeddingRequest extends RequestBase { -/** The inference Id */ + /** The inference Id */ inference_id: Id /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration - /** Inference input. Either a string or an array of strings. */ + /** Inference input. + * Either a string or an array of strings. */ input: string | string[] /** Optional task settings */ task_settings?: InferenceTaskSettings @@ -15575,7 +22636,7 @@ export interface InferenceTextEmbeddingRequest extends RequestBase { export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult export interface InferenceUpdateRequest extends RequestBase { -/** The unique identifier of the inference endpoint. */ + /** The unique identifier of the inference endpoint. */ inference_id: Id /** The type of inference task that the model performs. */ task_type?: InferenceTaskType @@ -15589,70 +22650,126 @@ export interface InferenceUpdateRequest extends RequestBase { export type InferenceUpdateResponse = InferenceInferenceEndpointInfo export interface IngestAppendProcessor extends IngestProcessorBase { + /** The field to be appended to. + * Supports template snippets. */ field: Field + /** The value to be appended. Supports template snippets. */ value: any | any[] + /** If `false`, the processor does not append values already present in the field. */ allow_duplicates?: boolean } export interface IngestAttachmentProcessor extends IngestProcessorBase { + /** The field to get the base64 encoded field from. */ field: Field + /** If `true` and field does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The number of chars being used for extraction to prevent huge fields. + * Use `-1` for no limit. */ indexed_chars?: long + /** Field name from which you can overwrite the number of chars being used for extraction. */ indexed_chars_field?: Field + /** Array of properties to select to be stored. + * Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`. */ properties?: string[] + /** The field that will hold the attachment information. */ target_field?: Field + /** If true, the binary field will be removed from the document */ remove_binary?: boolean + /** Field containing the name of the resource to decode. + * If specified, the processor passes this resource name to the underlying Tika library to enable Resource Name Based Detection. */ resource_name?: string } export interface IngestBytesProcessor extends IngestProcessorBase { + /** The field to convert. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestCircleProcessor extends IngestProcessorBase { + /** The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`). */ error_distance: double + /** The field to interpret as a circle. Either a string in WKT format or a map for GeoJSON. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Which field mapping type is to be used when processing the circle: `geo_shape` or `shape`. */ shape_type: IngestShapeType + /** The field to assign the polygon shape to + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestCommunityIDProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ source_ip?: Field + /** Field containing the source port. */ source_port?: Field + /** Field containing the destination IP address. */ destination_ip?: Field + /** Field containing the destination port. */ destination_port?: Field + /** Field containing the IANA number. */ iana_number?: Field + /** Field containing the ICMP type. */ icmp_type?: Field + /** Field containing the ICMP code. */ icmp_code?: Field + /** Field containing the transport protocol name or number. Used only when the + * iana_number field is not present. The following protocol names are currently + * supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, udp */ transport?: Field + /** Output field for the community ID. */ target_field?: Field + /** Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The + * seed can prevent hash collisions between network domains, such as a staging + * and production network that use the same addressing scheme. */ seed?: integer + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestConvertProcessor extends IngestProcessorBase { + /** The field whose value is to be converted. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the `field` is updated in-place. */ target_field?: Field + /** The type to convert the existing value to. */ type: IngestConvertType } export type IngestConvertType = 'integer' | 'long' | 'double' | 'float' | 'boolean' | 'ip' | 'string' | 'auto' export interface IngestCsvProcessor extends IngestProcessorBase { + /** Value used to fill empty fields. + * Empty fields are skipped if this is not provided. + * An empty field is one with no value (2 consecutive separators) or empty quotes (`""`). */ empty_value?: any + /** The field to extract data from. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Quote used in CSV, has to be single character string. */ quote?: string + /** Separator used in CSV, has to be single character string. */ separator?: string + /** The array of fields to assign extracted values to. */ target_fields: Fields + /** Trim whitespaces in unquoted fields. */ trim?: boolean } export interface IngestDatabaseConfiguration { + /** The provider-assigned name of the IP geolocation database to download. */ name: Name maxmind?: IngestMaxmind ipinfo?: IngestIpinfo @@ -15661,49 +22778,86 @@ export interface IngestDatabaseConfiguration { export interface IngestDatabaseConfigurationFull { web?: IngestWeb local?: IngestLocal + /** The provider-assigned name of the IP geolocation database to download. */ name: Name maxmind?: IngestMaxmind ipinfo?: IngestIpinfo } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { + /** An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ date_formats?: string[] + /** How to round the date when formatting the date into the index name. Valid values are: + * `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). + * Supports template snippets. */ date_rounding: string + /** The field to get the date or timestamp from. */ field: Field + /** The format to be used when printing the parsed date into the index name. + * A valid java time pattern is expected here. + * Supports template snippets. */ index_name_format?: string + /** A prefix of the index name to be prepended before the printed date. + * Supports template snippets. */ index_name_prefix?: string + /** The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. */ locale?: string + /** The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. */ timezone?: string } export interface IngestDateProcessor extends IngestProcessorBase { + /** The field to get the date from. */ field: Field + /** An array of the expected date formats. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ formats: string[] + /** The locale to use when parsing the date, relevant when parsing month names or week days. + * Supports template snippets. */ locale?: string + /** The field that will hold the parsed date. */ target_field?: Field + /** The timezone to use when parsing the date. + * Supports template snippets. */ timezone?: string + /** The format to use when writing the date to target_field. Must be a valid + * java time pattern. */ output_format?: string } export interface IngestDissectProcessor extends IngestProcessorBase { + /** The character(s) that separate the appended fields. */ append_separator?: string + /** The field to dissect. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The pattern to apply to the field. */ pattern: string } export interface IngestDocument { + /** Unique identifier for the document. + * This ID must be unique within the `_index`. */ _id?: Id + /** Name of the index containing the document. */ _index?: IndexName + /** JSON body for the document. */ _source: any } export interface IngestDocumentSimulationKeys { + /** Unique identifier for the document. This ID must be unique within the `_index`. */ _id: Id + /** Name of the index containing the document. */ _index: IndexName _ingest: IngestIngest + /** Value used to send the document to a specific primary shard. */ _routing?: string + /** JSON body for the document. */ _source: Record + /** */ _version?: SpecUtilsStringified _version_type?: VersionType } @@ -15711,8 +22865,15 @@ export type IngestDocumentSimulation = IngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } export interface IngestDotExpanderProcessor extends IngestProcessorBase { + /** The field to expand into an object field. + * If set to `*`, all top-level fields will be expanded. */ field: Field + /** Controls the behavior when there is already an existing nested object that conflicts with the expanded field. + * When `false`, the processor will merge conflicts by combining the old and the new values into an array. + * When `true`, the value from the expanded field will overwrite the existing value. */ override?: boolean + /** The field that contains the field to expand. + * Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. */ path?: string } @@ -15720,44 +22881,81 @@ export interface IngestDropProcessor extends IngestProcessorBase { } export interface IngestEnrichProcessor extends IngestProcessorBase { + /** The field in the input document that matches the policies match_field used to retrieve the enrichment data. + * Supports template snippets. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The maximum number of matched documents to include under the configured target field. + * The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. + * In order to avoid documents getting too large, the maximum allowed value is 128. */ max_matches?: integer + /** If processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ override?: boolean + /** The name of the enrich policy to use. */ policy_name: string + /** A spatial relation operator used to match the geoshape of incoming documents to documents in the enrich index. + * This option is only used for `geo_match` enrich policy types. */ shape_relation?: GeoShapeRelation + /** Field added to incoming documents to contain enrich data. This field contains both the `match_field` and `enrich_fields` specified in the enrich policy. + * Supports template snippets. */ target_field: Field } export interface IngestFailProcessor extends IngestProcessorBase { + /** The error message thrown by the processor. + * Supports template snippets. */ message: string } export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' export interface IngestFingerprintProcessor extends IngestProcessorBase { + /** Array of fields to include in the fingerprint. For objects, the processor + * hashes both the field key and value. For other fields, the processor hashes + * only the field value. */ fields: Fields + /** Output field for the fingerprint. */ target_field?: Field + /** Salt value for the hash function. */ salt?: string + /** The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, + * SHA-256, SHA-512, or MurmurHash3. */ method?: IngestFingerprintDigest + /** If true, the processor ignores any missing fields. If all fields are + * missing, the processor silently exits without modifying the document. */ ignore_missing?: boolean } export interface IngestForeachProcessor extends IngestProcessorBase { + /** Field containing array or object values. */ field: Field + /** If `true`, the processor silently exits without changing the document if the `field` is `null` or missing. */ ignore_missing?: boolean + /** Ingest processor to run on each element. */ processor: IngestProcessorContainer } export interface IngestGeoGridProcessor extends IngestProcessorBase { + /** The field to interpret as a geo-tile.= + * The field format is determined by the `tile_type`. */ field: string + /** Three tile formats are understood: geohash, geotile and geohex. */ tile_type: IngestGeoGridTileType + /** The field to assign the polygon shape to, by default, the `field` is updated in-place. */ target_field?: Field + /** If specified and a parent tile exists, save that tile address to this field. */ parent_field?: Field + /** If specified and children tiles exist, save those tile addresses to this field as an array of strings. */ children_field?: Field + /** If specified and intersecting non-child tiles exist, save their addresses to this field as an array of strings. */ non_children_field?: Field + /** If specified, save the tile precision (zoom) as an integer to this field. */ precision_field?: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Which format to save the generated polygon in. */ target_format?: IngestGeoGridTargetFormat } @@ -15766,60 +22964,102 @@ export type IngestGeoGridTargetFormat = 'geojson' | 'wkt' export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash' export interface IngestGeoIpProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string + /** The field to get the ip address from for the geographical lookup. */ field: Field + /** If `true`, only the first found geoip data will be returned, even if the field contains an array. */ first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the geoip lookup. */ properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean } export interface IngestGrokProcessor extends IngestProcessorBase { + /** Must be disabled or v1. If v1, the processor uses patterns with Elastic + * Common Schema (ECS) field names. */ ecs_compatibility?: string + /** The field to use for grok expression parsing. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. + * Patterns matching existing names will override the pre-existing definition. */ pattern_definitions?: Record + /** An ordered list of grok expression to match and extract named captures with. + * Returns on the first expression in the list that matches. */ patterns: GrokPattern[] + /** When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched. */ trace_match?: boolean } export interface IngestGsubProcessor extends IngestProcessorBase { + /** The field to apply the replacement to. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The pattern to be replaced. */ pattern: string + /** The string to replace the matching patterns with. */ replacement: string + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ target_field?: Field } export interface IngestHtmlStripProcessor extends IngestProcessorBase { + /** The string-valued field to remove HTML tags from. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document, */ ignore_missing?: boolean + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ target_field?: Field } export interface IngestInferenceConfig { + /** Regression configuration for inference. */ regression?: IngestInferenceConfigRegression + /** Classification configuration for inference. */ classification?: IngestInferenceConfigClassification } export interface IngestInferenceConfigClassification { + /** Specifies the number of top class predictions to return. */ num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer + /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field + /** Specifies the field to which the top classes are written. */ top_classes_results_field?: Field + /** Specifies the type of the predicted field to write. + * Valid values are: `string`, `number`, `boolean`. */ prediction_field_type?: string } export interface IngestInferenceConfigRegression { + /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer } export interface IngestInferenceProcessor extends IngestProcessorBase { + /** The ID or alias for the trained model, or the ID of the deployment. */ model_id: Id + /** Field added to incoming documents to contain results objects. */ target_field?: Field + /** Maps the document field names to the known field names of the model. + * This mapping takes precedence over any default mappings provided in the model configuration. */ field_map?: Record + /** Contains the inference type and its options. */ inference_config?: IngestInferenceConfig } @@ -15830,12 +23070,20 @@ export interface IngestIngest { } export interface IngestIpLocationProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string + /** The field to get the ip address from for the geographical lookup. */ field: Field + /** If `true`, only the first found IP location data will be returned, even if the field contains an array. */ first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the IP location lookup. */ properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean } @@ -15843,32 +23091,61 @@ export interface IngestIpinfo { } export interface IngestJoinProcessor extends IngestProcessorBase { + /** Field containing array values to join. */ field: Field + /** The separator character. */ separator: string + /** The field to assign the joined value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestJsonProcessor extends IngestProcessorBase { + /** Flag that forces the parsed JSON to be added at the top level of the document. + * `target_field` must not be set when this option is chosen. */ add_to_root?: boolean + /** When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. + * When set to `merge`, conflicting fields will be merged. + * Only applicable `if add_to_root` is set to true. */ add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy + /** When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. + * Instead, the last encountered value for any duplicate key wins. */ allow_duplicate_keys?: boolean + /** The field to be parsed. */ field: Field + /** The field that the converted structured object will be written into. + * Any existing content in this field will be overwritten. */ target_field?: Field } export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' export interface IngestKeyValueProcessor extends IngestProcessorBase { + /** List of keys to exclude from document. */ exclude_keys?: string[] + /** The field to be parsed. + * Supports template snippets. */ field: Field + /** Regex pattern to use for splitting key-value pairs. */ field_split: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** List of keys to filter and insert into document. + * Defaults to including all keys. */ include_keys?: string[] + /** Prefix to be added to extracted keys. */ prefix?: string + /** If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values. */ strip_brackets?: boolean + /** The field to insert the extracted keys into. + * Defaults to the root of the document. + * Supports template snippets. */ target_field?: Field + /** String of characters to trim from extracted keys. */ trim_key?: string + /** String of characters to trim from extracted values. */ trim_value?: string + /** Regex pattern to use for splitting the key from the value within a key-value pair. */ value_split: string } @@ -15877,8 +23154,12 @@ export interface IngestLocal { } export interface IngestLowercaseProcessor extends IngestProcessorBase { + /** The field to make lowercase. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } @@ -15887,31 +23168,57 @@ export interface IngestMaxmind { } export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ source_ip?: Field + /** Field containing the destination IP address. */ destination_ip?: Field + /** Output field for the network direction. */ target_field?: Field + /** List of internal networks. Supports IPv4 and IPv6 addresses and ranges in + * CIDR notation. Also supports the named ranges listed below. These may be + * constructed with template snippets. Must specify only one of + * internal_networks or internal_networks_field. */ internal_networks?: string[] + /** A field on the given document to read the internal_networks configuration + * from. */ internal_networks_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestPipeline { + /** Description of the ingest pipeline. */ description?: string + /** Processors to run immediately after a processor failure. */ on_failure?: IngestProcessorContainer[] + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ processors?: IngestProcessorContainer[] + /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** Arbitrary metadata about the ingest pipeline. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata } export interface IngestPipelineConfig { + /** Description of the ingest pipeline. */ description?: string + /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ processors: IngestProcessorContainer[] } export interface IngestPipelineProcessor extends IngestProcessorBase { + /** The name of the pipeline to execute. + * Supports template snippets. */ name: Name + /** Whether to ignore missing pipelines instead of failing. */ ignore_missing_pipeline?: boolean } @@ -15926,118 +23233,275 @@ export interface IngestPipelineSimulation { } export interface IngestProcessorBase { + /** Description of the processor. + * Useful for describing the purpose of the processor or its configuration. */ description?: string - if?: Script | string + /** Conditionally execute the processor. */ + if?: Script | ScriptSource + /** Ignore failures for the processor. */ ignore_failure?: boolean + /** Handle failures for the processor. */ on_failure?: IngestProcessorContainer[] + /** Identifier for the processor. + * Useful for debugging and metrics. */ tag?: string } export interface IngestProcessorContainer { + /** Appends one or more values to an existing array if the field already exists and it is an array. + * Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. + * Creates an array containing the provided values if the field doesn’t exist. + * Accepts a single value or an array of values. */ append?: IngestAppendProcessor + /** The attachment processor lets Elasticsearch extract file attachments in common formats (such as PPT, XLS, and PDF) by using the Apache text extraction library Tika. */ attachment?: IngestAttachmentProcessor + /** Converts a human readable byte value (for example `1kb`) to its value in bytes (for example `1024`). + * If the field is an array of strings, all members of the array will be converted. + * Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. + * An error will occur if the field is not a supported format or resultant value exceeds 2^63. */ bytes?: IngestBytesProcessor + /** Converts circle definitions of shapes to regular polygons which approximate them. */ circle?: IngestCircleProcessor + /** Computes the Community ID for network flow data as defined in the + * Community ID Specification. You can use a community ID to correlate network + * events related to a single flow. */ community_id?: IngestCommunityIDProcessor + /** Converts a field in the currently ingested document to a different type, such as converting a string to an integer. + * If the field value is an array, all members will be converted. */ convert?: IngestConvertProcessor + /** Extracts fields from CSV line out of a single text field within a document. + * Any empty field in CSV will be skipped. */ csv?: IngestCsvProcessor + /** Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. */ date?: IngestDateProcessor + /** The purpose of this processor is to point documents to the right time based index based on a date or timestamp field in a document by using the date math index name support. */ date_index_name?: IngestDateIndexNameProcessor + /** Extracts structured fields out of a single text field by matching the text field against a delimiter-based pattern. */ dissect?: IngestDissectProcessor + /** Expands a field with dots into an object field. + * This processor allows fields with dots in the name to be accessible by other processors in the pipeline. + * Otherwise these fields can’t be accessed by any processor. */ dot_expander?: IngestDotExpanderProcessor + /** Drops the document without raising any errors. + * This is useful to prevent the document from getting indexed based on some condition. */ drop?: IngestDropProcessor + /** The `enrich` processor can enrich documents with data from another index. */ enrich?: IngestEnrichProcessor + /** Raises an exception. + * This is useful for when you expect a pipeline to fail and want to relay a specific message to the requester. */ fail?: IngestFailProcessor + /** Computes a hash of the document’s content. You can use this hash for + * content fingerprinting. */ fingerprint?: IngestFingerprintProcessor + /** Runs an ingest processor on each element of an array or object. */ foreach?: IngestForeachProcessor + /** Currently an undocumented alias for GeoIP Processor. */ ip_location?: IngestIpLocationProcessor + /** Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. + * This is useful if there is a need to interact with the tile shapes as spatially indexable fields. */ geo_grid?: IngestGeoGridProcessor + /** The `geoip` processor adds information about the geographical location of an IPv4 or IPv6 address. */ geoip?: IngestGeoIpProcessor + /** Extracts structured fields out of a single text field within a document. + * You choose which field to extract matched fields from, as well as the grok pattern you expect will match. + * A grok pattern is like a regular expression that supports aliased expressions that can be reused. */ grok?: IngestGrokProcessor + /** Converts a string field by applying a regular expression and a replacement. + * If the field is an array of string, all members of the array will be converted. + * If any non-string values are encountered, the processor will throw an exception. */ gsub?: IngestGsubProcessor + /** Removes HTML tags from the field. + * If the field is an array of strings, HTML tags will be removed from all members of the array. */ html_strip?: IngestHtmlStripProcessor + /** Uses a pre-trained data frame analytics model or a model deployed for natural language processing tasks to infer against the data that is being ingested in the pipeline. */ inference?: IngestInferenceProcessor + /** Joins each element of an array into a single string using a separator character between each element. + * Throws an error when the field is not an array. */ join?: IngestJoinProcessor + /** Converts a JSON string into a structured JSON object. */ json?: IngestJsonProcessor + /** This processor helps automatically parse messages (or specific event fields) which are of the `foo=bar` variety. */ kv?: IngestKeyValueProcessor + /** Converts a string to its lowercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ lowercase?: IngestLowercaseProcessor + /** Calculates the network direction given a source IP address, destination IP + * address, and a list of internal networks. */ network_direction?: IngestNetworkDirectionProcessor + /** Executes another pipeline. */ pipeline?: IngestPipelineProcessor + /** The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. + * The processor can be used to obscure Personal Identifying Information (PII) by configuring it to detect known patterns such as email or IP addresses. + * Text that matches a Grok pattern is replaced with a configurable string such as `` where an email address is matched or simply replace all matches with the text `` if preferred. */ redact?: IngestRedactProcessor + /** Extracts the registered domain (also known as the effective top-level + * domain or eTLD), sub-domain, and top-level domain from a fully qualified + * domain name (FQDN). Uses the registered domains defined in the Mozilla + * Public Suffix List. */ registered_domain?: IngestRegisteredDomainProcessor + /** Removes existing fields. + * If one field doesn’t exist, an exception will be thrown. */ remove?: IngestRemoveProcessor + /** Renames an existing field. + * If the field doesn’t exist or the new name is already used, an exception will be thrown. */ rename?: IngestRenameProcessor + /** Routes a document to another target index or data stream. + * When setting the `destination` option, the target is explicitly specified and the dataset and namespace options can’t be set. + * When the `destination` option is not set, this processor is in a data stream mode. Note that in this mode, the reroute processor can only be used on data streams that follow the data stream naming scheme. */ reroute?: IngestRerouteProcessor + /** Runs an inline or stored script on incoming documents. + * The script runs in the `ingest` context. */ script?: IngestScriptProcessor + /** Adds a field with the specified value. + * If the field already exists, its value will be replaced with the provided one. */ set?: IngestSetProcessor + /** Sets user-related details (such as `username`, `roles`, `email`, `full_name`, `metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. */ set_security_user?: IngestSetSecurityUserProcessor + /** Sorts the elements of an array ascending or descending. + * Homogeneous arrays of numbers will be sorted numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. + * Throws an error when the field is not an array. */ sort?: IngestSortProcessor + /** Splits a field into an array using a separator character. + * Only works on string fields. */ split?: IngestSplitProcessor + /** Terminates the current ingest pipeline, causing no further processors to be run. + * This will normally be executed conditionally, using the `if` option. */ terminate?: IngestTerminateProcessor + /** Trims whitespace from a field. + * If the field is an array of strings, all members of the array will be trimmed. + * This only works on leading and trailing whitespace. */ trim?: IngestTrimProcessor + /** Converts a string to its uppercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ uppercase?: IngestUppercaseProcessor + /** URL-decodes a string. + * If the field is an array of strings, all members of the array will be decoded. */ urldecode?: IngestUrlDecodeProcessor + /** Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. + * This URI object includes properties for the URI’s domain, path, fragment, port, query, scheme, user info, username, and password. */ uri_parts?: IngestUriPartsProcessor + /** The `user_agent` processor extracts details from the user agent string a browser sends with its web requests. + * This processor adds this information by default under the `user_agent` field. */ user_agent?: IngestUserAgentProcessor } export interface IngestRedact { + /** indicates if document has been redacted */ _is_redacted: boolean } export interface IngestRedactProcessor extends IngestProcessorBase { + /** The field to be redacted */ field: Field + /** A list of grok expressions to match and redact named captures with */ patterns: GrokPattern[] pattern_definitions?: Record + /** Start a redacted section with this token */ prefix?: string + /** End a redacted section with this token */ suffix?: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document */ skip_if_unlicensed?: boolean + /** If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted */ trace_redact?: boolean } export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { + /** Field containing the source FQDN. */ field: Field + /** Object field containing extracted domain components. If an empty string, + * the processor adds components to the document’s root. */ target_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestRemoveProcessor extends IngestProcessorBase { + /** Fields to be removed. Supports template snippets. */ field: Fields + /** Fields to be kept. When set, all fields other than those specified are removed. */ keep?: Fields + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean } export interface IngestRenameProcessor extends IngestProcessorBase { + /** The field to be renamed. + * Supports template snippets. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The new name of the field. + * Supports template snippets. */ target_field: Field } export interface IngestRerouteProcessor extends IngestProcessorBase { + /** A static value for the target. Can’t be set when the dataset or namespace option is set. */ destination?: string + /** Field references or a static value for the dataset part of the data stream name. + * In addition to the criteria for index names, cannot contain - and must be no longer than 100 characters. + * Example values are nginx.access and nginx.error. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.dataset}} */ dataset?: string | string[] + /** Field references or a static value for the namespace part of the data stream name. See the criteria for + * index names for allowed characters. Must be no longer than 100 characters. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.namespace}} */ namespace?: string | string[] } export interface IngestScriptProcessor extends IngestProcessorBase { + /** ID of a stored script. + * If no `source` is specified, this parameter is required. */ id?: Id - lang?: string + /** Script language. */ + lang?: ScriptLanguage + /** Object containing parameters for the script. */ params?: Record - source?: string + /** Inline script. + * If no `id` is specified, this parameter is required. */ + source?: ScriptSource } export interface IngestSetProcessor extends IngestProcessorBase { + /** The origin field which will be copied to `field`, cannot set `value` simultaneously. + * Supported data types are `boolean`, `number`, `array`, `object`, `string`, `date`, etc. */ copy_from?: Field + /** The field to insert, upsert, or update. + * Supports template snippets. */ field: Field + /** If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document. */ ignore_empty_value?: boolean + /** The media type for encoding `value`. + * Applies only when value is a template snippet. + * Must be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`. */ media_type?: string + /** If `true` processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ override?: boolean + /** The value to be set for the field. + * Supports template snippets. + * May specify only one of `value` or `copy_from`. */ value?: any } export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { + /** The field to store the user information into. */ field: Field + /** Controls what user related properties are added to the field. */ properties?: string[] } @@ -16050,16 +23514,27 @@ export interface IngestSimulateDocumentResult { } export interface IngestSortProcessor extends IngestProcessorBase { + /** The field to be sorted. */ field: Field + /** The sort order to use. + * Accepts `"asc"` or `"desc"`. */ order?: SortOrder + /** The field to assign the sorted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestSplitProcessor extends IngestProcessorBase { + /** The field to split. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Preserves empty trailing fields, if any. */ preserve_trailing?: boolean + /** A regex which matches the separator, for example, `,` or `\s+`. */ separator: string + /** The field to assign the split value to. + * By default, the field is updated in-place. */ target_field?: Field } @@ -16067,37 +23542,62 @@ export interface IngestTerminateProcessor extends IngestProcessorBase { } export interface IngestTrimProcessor extends IngestProcessorBase { + /** The string-valued field to trim whitespace from. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the trimmed value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUppercaseProcessor extends IngestProcessorBase { + /** The field to make uppercase. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUriPartsProcessor extends IngestProcessorBase { + /** Field containing the URI string. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** If `true`, the processor copies the unparsed URI to `.original`. */ keep_original?: boolean + /** If `true`, the processor removes the `field` after parsing the URI string. + * If parsing fails, the processor does not remove the `field`. */ remove_if_successful?: boolean + /** Output field for the URI object. */ target_field?: Field } export interface IngestUrlDecodeProcessor extends IngestProcessorBase { + /** The field to decode. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUserAgentProcessor extends IngestProcessorBase { + /** The field containing the user agent string. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with. */ regex_file?: string + /** The field that will be filled with the user agent details. */ target_field?: Field + /** Controls what properties are added to `target_field`. */ properties?: IngestUserAgentProperty[] + /** Extracts device type from the user agent string on a best-effort basis. + * @beta */ extract_device_type?: boolean } @@ -16107,9 +23607,10 @@ export interface IngestWeb { } export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { -/** A comma-separated list of geoip database configurations to delete */ + /** A comma-separated list of geoip database configurations to delete */ id: Ids - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -16122,11 +23623,15 @@ export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { -/** A comma-separated list of IP location database configurations. */ + /** A comma-separated list of IP location database configurations. */ id: Ids - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -16137,11 +23642,14 @@ export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestDeletePipelineRequest extends RequestBase { -/** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. */ + /** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. + * To delete all ingest pipelines in a cluster, use a value of `*`. */ id: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -16152,20 +23660,29 @@ export interface IngestDeletePipelineRequest extends RequestBase { export type IngestDeletePipelineResponse = AcknowledgedResponseBase export interface IngestGeoIpStatsGeoIpDownloadStatistics { + /** Total number of successful database downloads. */ successful_downloads: integer + /** Total number of failed database downloads. */ failed_downloads: integer + /** Total milliseconds spent downloading databases. */ total_download_time: DurationValue + /** Current number of databases available for use. */ databases_count: integer + /** Total number of database updates skipped. */ skipped_updates: integer + /** Total number of databases not updated after 30 days */ expired_databases: integer } export interface IngestGeoIpStatsGeoIpNodeDatabaseName { + /** Name of the database. */ name: Name } export interface IngestGeoIpStatsGeoIpNodeDatabases { + /** Downloaded databases for the node. */ databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] + /** Downloaded database files, including related license files. Elasticsearch stores these files in the node’s temporary directory: $ES_TMPDIR/geoip-databases/. */ files_in_temp: string[] } @@ -16177,7 +23694,9 @@ export interface IngestGeoIpStatsRequest extends RequestBase { } export interface IngestGeoIpStatsResponse { + /** Download statistics for all GeoIP2 databases. */ stats: IngestGeoIpStatsGeoIpDownloadStatistics + /** Downloaded GeoIP2 databases for each node. */ nodes: Record } @@ -16189,7 +23708,9 @@ export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { } export interface IngestGetGeoipDatabaseRequest extends RequestBase { -/** A comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ + /** A comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ id?: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -16210,9 +23731,13 @@ export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { } export interface IngestGetIpLocationDatabaseRequest extends RequestBase { -/** Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ + /** Comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ id?: Ids - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } @@ -16225,9 +23750,12 @@ export interface IngestGetIpLocationDatabaseResponse { } export interface IngestGetPipelineRequest extends RequestBase { -/** Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. */ + /** Comma-separated list of pipeline IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all ingest pipelines, omit this parameter or use `*`. */ id?: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Return pipelines without their definitions (default: false) */ summary?: boolean @@ -16251,15 +23779,17 @@ export interface IngestProcessorGrokResponse { } export interface IngestPutGeoipDatabaseRequest extends RequestBase { -/** ID of the database configuration to create or update. */ + /** ID of the database configuration to create or update. */ id: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** The provider-assigned name of the IP geolocation database to download. */ name: Name - /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ + /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. + * At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ maxmind: IngestMaxmind /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } @@ -16270,11 +23800,15 @@ export interface IngestPutGeoipDatabaseRequest extends RequestBase { export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestPutIpLocationDatabaseRequest extends RequestBase { -/** The database configuration identifier. */ + /** The database configuration identifier. */ id: Id - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. + * A value of `-1` indicates that the request should never time out. */ timeout?: Duration configuration?: IngestDatabaseConfiguration /** All values in `body` will be added to the request body. */ @@ -16286,7 +23820,7 @@ export interface IngestPutIpLocationDatabaseRequest extends RequestBase { export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestPutPipelineRequest extends RequestBase { -/** ID of the ingest pipeline to create or update. */ + /** ID of the ingest pipeline to create or update. */ id: Id /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -16304,7 +23838,8 @@ export interface IngestPutPipelineRequest extends RequestBase { processors?: IngestProcessorContainer[] /** Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. */ version?: VersionNumber - /** Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } @@ -16315,13 +23850,16 @@ export interface IngestPutPipelineRequest extends RequestBase { export type IngestPutPipelineResponse = AcknowledgedResponseBase export interface IngestSimulateRequest extends RequestBase { -/** The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. */ + /** The pipeline to test. + * If you don't specify a `pipeline` in the request body, this parameter is required. */ id?: Id /** If `true`, the response includes output data for each processor in the executed pipeline. */ verbose?: boolean /** Sample documents to test in the pipeline. */ docs: IngestDocument[] - /** The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ + /** The pipeline to test. + * If you don't specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline?: IngestPipeline /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } @@ -16351,7 +23889,7 @@ export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' export interface LicenseDeleteRequest extends RequestBase { -/** The period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -16379,7 +23917,8 @@ export interface LicenseGetLicenseInformation { } export interface LicenseGetRequest extends RequestBase { -/** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. */ + /** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. + * This parameter is deprecated and will always be set to true in 8.x. */ accept_enterprise?: boolean /** Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. */ local?: boolean @@ -16421,7 +23960,7 @@ export interface LicensePostAcknowledgement { } export interface LicensePostRequest extends RequestBase { -/** Specifies whether you acknowledge the license changes. */ + /** Specifies whether you acknowledge the license changes. */ acknowledge?: boolean /** The period to wait for a connection to the master node. */ master_timeout?: Duration @@ -16443,7 +23982,7 @@ export interface LicensePostResponse { } export interface LicensePostStartBasicRequest extends RequestBase { -/** whether the user has acknowledged acknowledge messages (default: false) */ + /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -16464,7 +24003,7 @@ export interface LicensePostStartBasicResponse { } export interface LicensePostStartTrialRequest extends RequestBase { -/** whether the user has acknowledged acknowledge messages (default: false) */ + /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean type_query_string?: string /** Period to wait for a connection to the master node. */ @@ -16483,11 +24022,21 @@ export interface LicensePostStartTrialResponse { } export interface LogstashPipeline { + /** A description of the pipeline. + * This description is not used by Elasticsearch or Logstash. */ description: string + /** The date the pipeline was last updated. + * It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. */ last_modified: DateTime + /** The configuration for the pipeline. */ pipeline: string + /** Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. */ pipeline_metadata: LogstashPipelineMetadata + /** Settings for the pipeline. + * It supports only flat keys in dot notation. */ pipeline_settings: LogstashPipelineSettings + /** The user who last updated the pipeline. */ username: string } @@ -16497,17 +24046,24 @@ export interface LogstashPipelineMetadata { } export interface LogstashPipelineSettings { + /** The number of workers that will, in parallel, execute the filter and output stages of the pipeline. */ 'pipeline.workers': integer + /** The maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. */ 'pipeline.batch.size': integer + /** When creating pipeline event batches, how long in milliseconds to wait for each event before dispatching an undersized batch to pipeline workers. */ 'pipeline.batch.delay': integer + /** The internal queuing model to use for event buffering. */ 'queue.type': string + /** The total capacity of the queue (`queue.type: persisted`) in number of bytes. */ 'queue.max_bytes.number': integer + /** The total capacity of the queue (`queue.type: persisted`) in terms of units of bytes. */ 'queue.max_bytes.units': string + /** The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). */ 'queue.checkpoint.writes': integer } export interface LogstashDeletePipelineRequest extends RequestBase { -/** An identifier for the pipeline. */ + /** An identifier for the pipeline. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -16518,7 +24074,7 @@ export interface LogstashDeletePipelineRequest extends RequestBase { export type LogstashDeletePipelineResponse = boolean export interface LogstashGetPipelineRequest extends RequestBase { -/** A comma-separated list of pipeline identifiers. */ + /** A comma-separated list of pipeline identifiers. */ id?: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -16529,7 +24085,7 @@ export interface LogstashGetPipelineRequest extends RequestBase { export type LogstashGetPipelineResponse = Record export interface LogstashPutPipelineRequest extends RequestBase { -/** An identifier for the pipeline. */ + /** An identifier for the pipeline. */ id: Id pipeline?: LogstashPipeline /** All values in `body` will be added to the request body. */ @@ -16541,9 +24097,13 @@ export interface LogstashPutPipelineRequest extends RequestBase { export type LogstashPutPipelineResponse = boolean export interface MigrationDeprecationsDeprecation { + /** Optional details about the deprecation warning. */ details?: string + /** The level property describes the significance of the issue. */ level: MigrationDeprecationsDeprecationLevel + /** Descriptive information about the deprecation warning. */ message: string + /** A link to the breaking change documentation, where you can find more information about this change. */ url: string resolve_during_rolling_upgrade: boolean _meta?: Record @@ -16552,7 +24112,7 @@ export interface MigrationDeprecationsDeprecation { export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' export interface MigrationDeprecationsRequest extends RequestBase { -/** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ + /** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ index?: IndexName /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -16561,12 +24121,21 @@ export interface MigrationDeprecationsRequest extends RequestBase { } export interface MigrationDeprecationsResponse { + /** Cluster-level deprecation warnings. */ cluster_settings: MigrationDeprecationsDeprecation[] + /** Index warnings are sectioned off per index and can be filtered using an index-pattern in the query. + * This section includes warnings for the backing indices of data streams specified in the request path. */ index_settings: Record data_streams: Record + /** Node-level deprecation warnings. + * Since only a subset of your nodes might incorporate these settings, it is important to read the details section for more information about which nodes are affected. */ node_settings: MigrationDeprecationsDeprecation[] + /** Machine learning-related deprecation warnings. */ ml_settings: MigrationDeprecationsDeprecation[] + /** Template warnings are sectioned off per template and include deprecations for both component templates and + * index templates. */ templates: Record + /** ILM policy warnings are sectioned off per policy. */ ilm_policies: Record } @@ -16614,72 +24183,151 @@ export interface MigrationPostFeatureUpgradeResponse { } export interface MlAdaptiveAllocationsSettings { + /** If true, adaptive_allocations is enabled */ enabled: boolean + /** Specifies the minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ min_number_of_allocations?: integer + /** Specifies the maximum number of allocations to scale to. + * If set, it must be greater than or equal to min_number_of_allocations. */ max_number_of_allocations?: integer } export interface MlAnalysisConfig { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a + * whole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation. */ bucket_span?: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. The `categorization_analyzer` field can be specified either as a string or as an object. If it is a string, it must refer to a built-in analyzer or one added by another plugin. */ categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the `categorization_analyzer` property instead and include the filters as pattern_replace character filters. The effect is exactly the same. */ categorization_filters?: string[] + /** Detector configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. If the detectors array does not contain at least one detector, no analysis can occur and an error is returned. */ detectors: MlDetector[] + /** A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers?: Field[] + /** The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second. NOTE: Latency is applicable only when you send data by using the post data API. */ latency?: Duration + /** Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration + /** This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. For example, suppose CPU and memory usage on host A is usually highly correlated with the same metrics on host B. Perhaps this correlation occurs because they are running a load-balanced application. If you enable this property, anomalies will be reported when, for example, CPU usage on host A is high and the value of CPU usage on host B is low. That is to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU of host B. To use the `multivariate_by_fields` property, you must also specify `by_field_name` in your detector. */ multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. NOTE: The `summary_count_field_name` property cannot be used with the `metric` function. */ summary_count_field_name?: Field } export interface MlAnalysisConfigRead { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. */ bucket_span: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. + * This property cannot be used at the same time as `categorization_filters`. + * The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. */ categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. + * The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. + * This property expects an array of regular expressions. + * The expressions are used to filter out matching sequences from the categorization field values. */ categorization_filters?: string[] + /** An array of detector configuration objects. + * Detector configuration objects specify which data fields a job analyzes. + * They also specify which analytical functions are used. + * You can specify multiple detectors for a job. */ detectors: MlDetectorRead[] + /** A comma separated list of influencer field names. + * Typically these can be the by, over, or partition fields that are used in the detector configuration. + * You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. + * When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers: Field[] + /** Advanced configuration option. + * Affects the pruning of models that have not been updated for the given time duration. + * The value must be set to a multiple of the `bucket_span`. + * If set too low, important information may be removed from the model. + * Typically, set to `30d` or longer. + * If not set, model pruning only occurs if the model memory status reaches the soft limit or the hard limit. + * For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration + /** The size of the window in which to expect data that is out of time order. + * Defaults to no latency. + * If you specify a non-zero value, it must be greater than or equal to one second. */ latency?: Duration + /** This functionality is reserved for internal use. + * It is not supported for use in customer environments and is not subject to the support SLA of official GA features. + * If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. */ multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. + * This property value is the name of the field that contains the count of raw data points that have been summarized. + * The same `summary_count_field_name` applies to all detectors in the job. */ summary_count_field_name?: Field } export interface MlAnalysisLimits { + /** The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization. */ categorization_examples_limit?: long + /** The approximate maximum amount of memory resources that are required for analytical processing. Once this limit is approached, data pruning becomes more aggressive. Upon exceeding this limit, new entities are not modeled. If the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and less than 1024mb, that value is used instead of the default. The default value is relatively small to ensure that high resource usage is a conscious decision. If you have jobs that are expected to analyze high cardinality fields, you will likely need to use a higher value. If you specify a number instead of a string, the units are assumed to be MiB. Specifying a string is recommended for clarity. If you specify a byte size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. If you specify a value for the `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have `model_memory_limit` values greater than that setting value. */ model_memory_limit?: ByteSize } export interface MlAnalysisMemoryLimit { + /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ model_memory_limit: string } export interface MlAnomaly { + /** The actual value for the bucket. */ actual?: double[] + /** Information about the factors impacting the initial anomaly score. */ anomaly_score_explanation?: MlAnomalyExplanation + /** The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. */ bucket_span: DurationValue + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: string + /** The value of `by_field_name`. */ by_field_value?: string + /** For population analysis, an over field must be specified in the detector. This property contains an array of anomaly records that are the causes for the anomaly that has been identified for the over field. This sub-resource contains the most anomalous records for the `over_field_name`. For scalability reasons, a maximum of the 10 most significant causes of the anomaly are returned. As part of the core analytical modeling, these low-level anomaly records are aggregated for their parent over field record. The `causes` resource contains similar elements to the record resource, namely `actual`, `typical`, `geo_results.actual_point`, `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability and scores are not applicable to causes. */ causes?: MlAnomalyCause[] + /** A unique identifier for the detector. */ detector_index: integer + /** Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed. */ field_name?: string + /** The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`. */ function?: string + /** The description of the function in which the anomaly occurs, as specified in the detector configuration. */ function_description?: string + /** If the detector function is `lat_long`, this object contains comma delimited strings for the latitude and longitude of the actual and typical values. */ geo_results?: MlGeoResults + /** If influencers were specified in the detector configuration, this array contains influencers that contributed to or were to blame for an anomaly. */ influencers?: MlInfluence[] + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. This is the initial value that was calculated at the time the bucket was processed. */ initial_record_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: string + /** The value of `over_field_name`. */ over_field_value?: string + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: string + /** The value of `partition_field_name`. */ partition_field_value?: string + /** The probability of the individual anomaly occurring, in the range 0 to 1. For example, `0.0000772031`. This value can be held to a high precision of over 300 decimal places, so the `record_score` is provided as a human-readable and friendly interpretation of this. */ probability: double + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. Unlike `initial_record_score`, this value will be updated by a re-normalization process as new data is analyzed. */ record_score: double + /** Internal. This is always set to `record`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The typical value for the bucket, according to analytical modeling. */ typical?: double[] } @@ -16702,85 +24350,148 @@ export interface MlAnomalyCause { } export interface MlAnomalyExplanation { + /** Impact from the duration and magnitude of the detected anomaly relative to the historical average. */ anomaly_characteristics_impact?: integer + /** Length of the detected anomaly in the number of buckets. */ anomaly_length?: integer + /** Type of the detected anomaly: `spike` or `dip`. */ anomaly_type?: string + /** Indicates reduction of anomaly score for the bucket with large confidence intervals. If a bucket has large confidence intervals, the score is reduced. */ high_variance_penalty?: boolean + /** If the bucket contains fewer samples than expected, the score is reduced. */ incomplete_bucket_penalty?: boolean + /** Lower bound of the 95% confidence interval. */ lower_confidence_bound?: double + /** Impact of the deviation between actual and typical values in the past 12 buckets. */ multi_bucket_impact?: integer + /** Impact of the deviation between actual and typical values in the current bucket. */ single_bucket_impact?: integer + /** Typical (expected) value for this bucket. */ typical_value?: double + /** Upper bound of the 95% confidence interval. */ upper_confidence_bound?: double } export interface MlApiKeyAuthorization { + /** The identifier for the API key. */ id: string + /** The name of the API key. */ name: string } export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' export interface MlBucketInfluencer { + /** A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as + * newer data is analyzed. */ anomaly_score: double + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** The field name of the influencer. */ influencer_field_name: Field + /** The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the + * time the bucket was processed. */ initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision + * of over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of + * this. */ probability: double + /** Internal. */ raw_anomaly_score: double + /** Internal. This value is always set to `bucket_influencer`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime } export interface MlBucketSummary { + /** The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited + * score for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as + * new data is analyzed. */ anomaly_score: double bucket_influencers: MlBucketInfluencer[] + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** The number of input data records processed in this bucket. */ event_count: long + /** The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the + * time the bucket was processed. */ initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The amount of time, in milliseconds, that it took to analyze the bucket contents and calculate results. */ processing_time_ms: DurationValue + /** Internal. This value is always set to bucket. */ result_type: string + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ timestamp: EpochTime + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ timestamp_string?: DateTime } export interface MlCalendarEvent { + /** A string that uniquely identifies a calendar. */ calendar_id?: Id event_id?: Id + /** A description of the scheduled event. */ description: string + /** The timestamp for the end of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ end_time: DateTime + /** The timestamp for the beginning of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ start_time: DateTime + /** When true the model will not create results for this calendar period. */ skip_result?: boolean + /** When true the model will not be updated for this calendar period. */ skip_model_update?: boolean + /** Shift time by this many seconds. For example adjust time for daylight savings changes */ force_time_shift?: integer } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition export interface MlCategorizationAnalyzerDefinition { + /** One or more character filters. In addition to the built-in character filters, other plugins can provide more character filters. If this property is not specified, no character filters are applied prior to categorization. If you are customizing some other aspect of the analyzer and you need to achieve the equivalent of `categorization_filters` (which are not permitted when some other aspect of the analyzer is customized), add them here as pattern replace character filters. */ char_filter?: AnalysisCharFilter[] + /** One or more token filters. In addition to the built-in token filters, other plugins can provide more token filters. If this property is not specified, no token filters are applied prior to categorization. */ filter?: AnalysisTokenFilter[] + /** The name or definition of the tokenizer to use after character filters are applied. This property is compulsory if `categorization_analyzer` is specified as an object. Machine learning provides a tokenizer called `ml_standard` that tokenizes in a way that has been determined to produce good categorization results on a variety of log file formats for logs in English. If you want to use that tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is available, which tokenizes in the same way as the non-customizable tokenizer in old versions of the product (before 6.2). `ml_classic` was the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your `categorization_analyzer`. */ tokenizer?: AnalysisTokenizer } export type MlCategorizationStatus = 'ok' | 'warn' export interface MlCategory { + /** A unique identifier for the category. category_id is unique at the job level, even when per-partition categorization is enabled. */ category_id: ulong + /** A list of examples of actual values that matched the category. */ examples: string[] + /** [experimental] A Grok pattern that could be used in Logstash or an ingest pipeline to extract fields from messages that match the category. This field is experimental and may be changed or removed in a future release. The Grok patterns that are found are not optimal, but are often a good starting point for manual tweaking. */ grok_pattern?: GrokPattern + /** Identifier for the anomaly detection job. */ job_id: Id + /** The maximum length of the fields that matched the category. The value is increased by 10% to enable matching for similar fields that have not been analyzed. */ max_matching_length: ulong + /** If per-partition categorization is enabled, this property identifies the field used to segment the categorization. It is not present when per-partition categorization is disabled. */ partition_field_name?: string + /** If per-partition categorization is enabled, this property identifies the value of the partition_field_name for the category. It is not present when per-partition categorization is disabled. */ partition_field_value?: string + /** A regular expression that is used to search for values that match the category. */ regex: string + /** A space separated list of the common tokens that are matched in values of the category. */ terms: string + /** The number of messages that have been matched by this category. This is only guaranteed to have the latest accurate count after a job _flush or _close */ num_matches?: long + /** A list of category_id entries that this current category encompasses. Any new message that is processed by the categorizer will match against this category and not any of the categories in this list. This is only guaranteed to have the latest accurate list of categories after a job _flush or _close */ preferred_to_categories?: Id[] p?: string result_type: string @@ -16788,25 +24499,40 @@ export interface MlCategory { } export interface MlChunkingConfig { + /** If the mode is `auto`, the chunk size is dynamically calculated; + * this is the recommended value when the datafeed does not use aggregations. + * If the mode is `manual`, chunking is applied according to the specified `time_span`; + * use this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied. */ mode: MlChunkingMode + /** The time span that each search will be querying. This setting is applicable only when the `mode` is set to `manual`. */ time_span?: Duration } export type MlChunkingMode = 'auto' | 'manual' | 'off' export interface MlClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer + /** Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. */ prediction_field_type?: string + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Specifies the field to which the top classes are written. Defaults to top_classes. */ top_classes_results_field?: string } export interface MlCommonTokenizationConfig { + /** Should the tokenizer lower case the text */ do_lower_case?: boolean + /** Maximum input sequence length for the model */ max_sequence_length?: integer + /** Tokenization spanning options. Special value of -1 indicates no spanning takes place */ span?: integer + /** Should tokenization input be automatically truncated before sending to the model for inference */ truncate?: MlTokenizationTruncate + /** Is tokenization completed with special tokens */ with_special_tokens?: boolean } @@ -16837,15 +24563,20 @@ export interface MlDataCounts { } export interface MlDataDescription { + /** Only JSON format is supported at this time. */ format?: string + /** The name of the field that contains the timestamp. */ time_field?: Field + /** The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either integer or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails. */ time_format?: string field_delimiter?: string } export interface MlDatafeed { aggregations?: Record + /** @alias aggregations */ aggs?: Record + /** The security privileges that the datafeed uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the datafeed, this property is omitted. */ authorization?: MlDatafeedAuthorization chunking_config?: MlChunkingConfig datafeed_id: Id @@ -16864,226 +24595,367 @@ export interface MlDatafeed { } export interface MlDatafeedAuthorization { + /** If an API key was used for the most recent update to the datafeed, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the datafeed, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the datafeed, the account name is listed in the response. */ service_account?: string } export interface MlDatafeedConfig { + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ aggs?: Record + /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. */ chunking_config?: MlChunkingConfig + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. The default value is the job identifier. */ datafeed_id?: Id + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` option is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: Indices + /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated job after this many real-time searches that return no documents. In other words, it will stop after `frequency` times `max_empty_searches` of real-time operation. If not set then a datafeed with no end time that sees no data will remain started until it is explicitly stopped. */ max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer } export interface MlDatafeedRunningState { + /** Indicates if the datafeed is "real-time"; meaning that the datafeed has no configured `end` time. */ real_time_configured: boolean + /** Indicates whether the datafeed has finished running on the available past data. + * For datafeeds without a configured `end` time, this means that the datafeed is now running on "real-time" data. */ real_time_running: boolean + /** Provides the latest time interval the datafeed has searched. */ search_interval?: MlRunningStateSearchInterval } export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { + /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id + /** For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact + /** The status of the datafeed, which can be one of the following values: `starting`, `started`, `stopping`, `stopped`. */ state: MlDatafeedState + /** An object that provides statistical information about timing aspect of this datafeed. */ timing_stats?: MlDatafeedTimingStats + /** An object containing the running state for this datafeed. + * It is only provided if the datafeed is started. */ running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { + /** The number of buckets processed. */ bucket_count: long + /** The exponential average search time per hour, in milliseconds. */ exponential_average_search_time_per_hour_ms: DurationValue exponential_average_calculation_context?: MlExponentialAverageCalculationContext + /** Identifier for the anomaly detection job. */ job_id: Id + /** The number of searches run by the datafeed. */ search_count: long + /** The total time the datafeed spent searching, in milliseconds. */ total_search_time_ms: DurationValue + /** The average search time per bucket, in milliseconds. */ average_search_time_per_bucket_ms?: DurationValue } export interface MlDataframeAnalysis { + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */ alpha?: double + /** Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable. + * For classification analysis, the data type of the field must be numeric (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field. + * For regression analysis, the data type of the field must be numeric. */ dependent_variable: string + /** Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */ downsample_factor?: double + /** Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable. */ early_stopping_enabled?: boolean + /** Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between 0.001 and 1. */ eta?: double + /** Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between 0.5 and 2. */ eta_growth_rate_per_tree?: double + /** Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double + /** Advanced configuration option. A collection of feature preprocessors that modify one or more included fields. The analysis uses the resulting one or more features instead of the original document field. However, these features are ephemeral; they are not stored in the destination index. Multiple `feature_processors` entries can refer to the same document fields. Automatic categorical feature encoding still occurs for the fields that are unprocessed by a custom processor or that have categorical values. Use this property only if you want to override the automatic feature encoding of the specified fields. */ feature_processors?: MlDataframeAnalysisFeatureProcessor[] + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ gamma?: double + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ lambda?: double + /** Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. + * @alias max_trees */ maximum_number_trees?: integer + /** Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs. */ num_top_feature_importance_values?: integer + /** Defines the name of the prediction field in the results. Defaults to `_prediction`. */ prediction_field_name?: Field + /** Defines the seed for the random generator that is used to pick training data. By default, it is randomly generated. Set it to a specific value to use the same training data each time you start a job (assuming other related parameters such as `source` and `analyzed_fields` are the same). */ randomize_seed?: double + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer + /** Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double + /** Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example those that contain arrays with more than one value) won’t be included in the calculation for used percentage. */ training_percent?: Percentage } export interface MlDataframeAnalysisAnalyzedFields { + /** An array of strings that defines the fields that will be excluded from the analysis. You do not need to add fields with unsupported data types to excludes, these fields are excluded from the analysis automatically. */ includes?: string[] + /** An array of strings that defines the fields that will be included in the analysis. */ excludes?: string[] } export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { class_assignment_objective?: string + /** Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories. */ num_top_classes?: integer } export interface MlDataframeAnalysisContainer { + /** The configuration information necessary to perform classification. */ classification?: MlDataframeAnalysisClassification + /** The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ outlier_detection?: MlDataframeAnalysisOutlierDetection + /** The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ regression?: MlDataframeAnalysisRegression } export interface MlDataframeAnalysisFeatureProcessor { + /** The configuration information necessary to perform frequency encoding. */ frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding + /** The configuration information necessary to perform multi encoding. It allows multiple processors to be changed together. This way the output of a processor can then be passed to another as an input. */ multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding + /** The configuration information necessary to perform n-gram encoding. Features created by this encoder have the following name format: .. For example, if the feature_prefix is f, the feature name for the second unigram in a string is f.11. */ n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding + /** The configuration information necessary to perform one hot encoding. */ one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding + /** The configuration information necessary to perform target mean encoding. */ target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding } export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { + /** The resulting feature name. */ feature_name: Name field: Field + /** The resulting frequency map for the field value. If the field value is missing from the frequency_map, the resulting value is 0. */ frequency_map: Record } export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { + /** The ordered array of custom processors to execute. Must be more than 1. */ processors: integer[] } export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { + /** The feature name prefix. Defaults to ngram__. */ feature_prefix?: string + /** The name of the text field to encode. */ field: Field + /** Specifies the length of the n-gram substring. Defaults to 50. Must be greater than 0. */ length?: integer + /** Specifies which n-grams to gather. It’s an array of integer values where the minimum value is 1, and a maximum value is 5. */ n_grams: integer[] + /** Specifies the zero-indexed start of the n-gram substring. Negative values are allowed for encoding n-grams of string suffixes. Defaults to 0. */ start?: integer custom?: boolean } export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { + /** The name of the field to encode. */ field: Field + /** The one hot map mapping the field value with the column name. */ hot_map: string } export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { + /** The default value if field value is not found in the target_map. */ default_value: integer + /** The resulting feature name. */ feature_name: Name + /** The name of the field to encode. */ field: Field + /** The field value to target mean transition map. */ target_map: Record } export interface MlDataframeAnalysisOutlierDetection { + /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1. */ feature_influence_threshold?: double + /** The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double + /** If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. */ standardization_enabled?: boolean } export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { + /** The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss). */ loss_function?: string + /** A positive number that is used as a parameter to the `loss_function`. */ loss_function_parameter?: double } export interface MlDataframeAnalytics { + /** An object containing information about the analysis job. */ analysis_stats?: MlDataframeAnalyticsStatsContainer + /** For running jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** An object that provides counts for the quantity of documents skipped, used in training, or available for testing. */ data_counts: MlDataframeAnalyticsStatsDataCounts + /** The unique identifier of the data frame analytics job. */ id: Id + /** An object describing memory usage of the analytics. It is present only after the job is started and memory usage is reported. */ memory_usage: MlDataframeAnalyticsStatsMemoryUsage + /** Contains properties for the node that runs the job. This information is available only for running jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes + /** The progress report of the data frame analytics job by phase. */ progress: MlDataframeAnalyticsStatsProgress[] + /** The status of the data frame analytics job, which can be one of the following values: failed, started, starting, stopping, stopped. */ state: MlDataframeState } export interface MlDataframeAnalyticsAuthorization { + /** If an API key was used for the most recent update to the job, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the job, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the job, the account name is listed in the response. */ service_account?: string } export interface MlDataframeAnalyticsDestination { + /** Defines the destination index to store the results of the data frame analytics job. */ index: IndexName + /** Defines the name of the field in which to store the results of the analysis. Defaults to `ml`. */ results_field?: Field } export interface MlDataframeAnalyticsFieldSelection { + /** Whether the field is selected to be included in the analysis. */ is_included: boolean + /** Whether the field is required. */ is_required: boolean + /** The feature type of this field for the analysis. May be categorical or numerical. */ feature_type?: string + /** The mapping types of the field. */ mapping_types: string[] + /** The field name. */ name: Field + /** The reason a field is not selected to be included in the analysis. */ reason?: string } export interface MlDataframeAnalyticsMemoryEstimation { + /** Estimated memory usage under the assumption that overflowing to disk is allowed during data frame analytics. expected_memory_with_disk is usually smaller than expected_memory_without_disk as using disk allows to limit the main memory needed to perform data frame analytics. */ expected_memory_with_disk: string + /** Estimated memory usage under the assumption that the whole data frame analytics should happen in memory (i.e. without overflowing to disk). */ expected_memory_without_disk: string } export interface MlDataframeAnalyticsSource { + /** Index or indices on which to perform the analysis. It can be a single index or index pattern as well as an array of indices or patterns. NOTE: If your source indices contain documents with the same IDs, only the document that is indexed last appears in the destination index. */ index: Indices + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {"match_all": {}}. */ query?: QueryDslQueryContainer + /** Definitions of runtime fields that will become part of the mapping of the destination index. */ runtime_mappings?: MappingRuntimeFields + /** Specify `includes` and/or `excludes patterns to select which fields will be present in the destination. Fields that are excluded cannot be included in the analysis. */ _source?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlDataframeAnalyticsStatsContainer { + /** An object containing information about the classification analysis job. */ classification_stats?: MlDataframeAnalyticsStatsHyperparameters + /** An object containing information about the outlier detection job. */ outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection + /** An object containing information about the regression analysis. */ regression_stats?: MlDataframeAnalyticsStatsHyperparameters } export interface MlDataframeAnalyticsStatsDataCounts { + /** The number of documents that are skipped during the analysis because they contained values that are not supported by the analysis. For example, outlier detection does not support missing fields so it skips documents with missing fields. Likewise, all types of analysis skip documents that contain arrays with more than one element. */ skipped_docs_count: integer + /** The number of documents that are not used for training the model and can be used for testing. */ test_docs_count: integer + /** The number of documents that are used for training the model. */ training_docs_count: integer } export interface MlDataframeAnalyticsStatsHyperparameters { + /** An object containing the parameters of the classification analysis job. */ hyperparameters: MlHyperparameters + /** The number of iterations on the analysis. */ iteration: integer + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats + /** An object containing information about validation loss. */ validation_loss: MlValidationLoss } export interface MlDataframeAnalyticsStatsMemoryUsage { + /** This value is present when the status is hard_limit and it is a new estimate of how much memory the job needs. */ memory_reestimate_bytes?: long + /** The number of bytes used at the highest peak of memory usage. */ peak_usage_bytes: long + /** The memory usage status. */ status: string + /** The timestamp when memory usage was calculated. */ timestamp?: EpochTime } export interface MlDataframeAnalyticsStatsOutlierDetection { + /** The list of job parameters specified by the user or determined by algorithmic heuristics. */ parameters: MlOutlierDetectionParameters + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats } export interface MlDataframeAnalyticsStatsProgress { + /** Defines the phase of the data frame analytics job. */ phase: string + /** The progress that the data frame analytics job has made expressed in percentage. */ progress_percent: integer } @@ -17091,6 +24963,7 @@ export interface MlDataframeAnalyticsSummary { allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** The security privileges that the job uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the job, this property is omitted. */ authorization?: MlDataframeAnalyticsAuthorization create_time?: EpochTime description?: string @@ -17104,69 +24977,100 @@ export interface MlDataframeAnalyticsSummary { } export interface MlDataframeEvaluationClassification { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field + /** The field in the index which contains the predicted value, in other words the results of the classification analysis. */ predicted_field?: Field + /** The field of the index which is an array of documents of the form { "class_name": XXX, "class_probability": YYY }. This field must be defined as nested in the mappings. */ top_classes_field?: Field + /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationClassificationMetrics } export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ accuracy?: Record + /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: Record } export interface MlDataframeEvaluationClassificationMetricsAucRoc { + /** Name of the only class that is treated as positive during AUC ROC calculation. Other classes are treated as negative ("one-vs-all" strategy). All the evaluated documents must have class_name in the list of their top classes. */ class_name?: Name + /** Whether or not the curve should be returned in addition to the score. Default value is false. */ include_curve?: boolean } export interface MlDataframeEvaluationContainer { + /** Classification evaluation evaluates the results of a classification analysis which outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlDataframeEvaluationClassification + /** Outlier detection evaluates the results of an outlier detection analysis which outputs the probability that each document is an outlier. */ outlier_detection?: MlDataframeEvaluationOutlierDetection + /** Regression evaluation evaluates the results of a regression analysis which outputs a prediction of values. */ regression?: MlDataframeEvaluationRegression } export interface MlDataframeEvaluationMetrics { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc + /** Precision of predictions (per-class and average). */ precision?: Record + /** Recall of predictions (per-class and average). */ recall?: Record } export interface MlDataframeEvaluationOutlierDetection { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field + /** The field of the index that defines the probability of whether the item belongs to the class in question or not. It’s the field that contains the results of the analysis. */ predicted_probability_field: Field + /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationOutlierDetectionMetrics } export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ confusion_matrix?: Record } export interface MlDataframeEvaluationRegression { + /** The field of the index which contains the ground truth. The data type of this field must be numerical. */ actual_field: Field + /** The field in the index that contains the predicted value, in other words the results of the regression analysis. */ predicted_field: Field + /** Specifies the metrics that are used for the evaluation. For more information on mse, msle, and huber, consult the Jupyter notebook on regression loss functions. */ metrics?: MlDataframeEvaluationRegressionMetrics } export interface MlDataframeEvaluationRegressionMetrics { + /** Average squared difference between the predicted values and the actual (ground truth) value. For more information, read this wiki article. */ mse?: Record + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (ground truth) value. */ msle?: MlDataframeEvaluationRegressionMetricsMsle + /** Pseudo Huber loss function. */ huber?: MlDataframeEvaluationRegressionMetricsHuber + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: Record } export interface MlDataframeEvaluationRegressionMetricsHuber { + /** Approximates 1/2 (prediction - actual)2 for values much less than delta and approximates a straight line with slope delta for values much larger than delta. Defaults to 1. Delta needs to be greater than 0. */ delta?: double } export interface MlDataframeEvaluationRegressionMetricsMsle { + /** Defines the transition point at which you switch from minimizing quadratic error to minimizing quadratic log error. Defaults to 1. */ offset?: double } export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' export interface MlDelayedDataCheckConfig { + /** The window of time that is searched for late data. This window of time ends with the latest finalized bucket. + * It defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs. + * In particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`. */ check_window?: Duration + /** Specifies whether the datafeed periodically checks for delayed data. */ enabled: boolean } @@ -17175,40 +25079,82 @@ export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_alloca export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' export interface MlDetectionRule { + /** The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined. */ actions?: MlRuleAction[] + /** An array of numeric conditions when the rule applies. A rule must either have a non-empty scope or at least one condition. Multiple conditions are combined together with a logical AND. */ conditions?: MlRuleCondition[] + /** A scope of series where the rule applies. A rule must either have a non-empty scope or at least one condition. By default, the scope includes all series. Scoping is allowed for any of the fields that are also specified in `by_field_name`, `over_field_name`, or `partition_field_name`. */ scope?: Record } export interface MlDetector { + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: Field + /** Custom rules enable you to customize the way detectors operate. For example, a rule may dictate conditions under which results should be skipped. Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] + /** A description of the detector. */ detector_description?: string + /** A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. If you specify a value for this property, it is ignored. */ detector_index?: integer + /** If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, you can set `exclude_frequent` to `all` for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. If you use an event rate function such as count or rare, do not specify this field. The `field_name` cannot contain double quotes or backslashes. */ field_name?: Field + /** The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, or `sum`. */ function?: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: Field + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean } export interface MlDetectorRead { + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to their own history. + * It is used for finding unusual values in the context of the split. */ by_field_name?: Field + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] + /** A description of the detector. */ detector_description?: string + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index?: integer + /** Contains one of the following values: `all`, `none`, `by`, or `over`. + * If set, frequent entities are excluded from influencing the anomaly results. + * Entities can be considered frequent over time or frequent in a population. + * If you are working with both over and by fields, then you can set `exclude_frequent` to all for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. + * If you use an event rate function such as `count` or `rare`, do not specify this field. */ field_name?: Field + /** The analysis function that is used. + * For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. */ function: string + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to the history of all splits. + * It is used for finding unusual values in the population of all splits. */ over_field_name?: Field + /** The field used to segment the analysis. + * When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean } export interface MlDetectorUpdate { + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index: integer + /** A description of the detector. */ description?: string + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] } @@ -17242,100 +25188,228 @@ export interface MlExponentialAverageCalculationContext { previous_exponential_average_ms?: DurationValue } +export type MlFeatureExtractor = MlQueryFeatureExtractor + export interface MlFillMaskInferenceOptions { + /** The string/token which will be removed from incoming documents and replaced with the inference prediction(s). + * In a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer + * has a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests. + * However, if this field is present in a request, its value must match the predefined value for that model/tokenizer, + * otherwise the request will fail. */ mask_token?: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlFilter { + /** A description of the filter. */ description?: string + /** A string that uniquely identifies a filter. */ filter_id: Id + /** An array of strings which is the filter item list. */ items: string[] } export interface MlFilterRef { + /** The identifier for the filter. */ filter_id: Id + /** If set to `include`, the rule applies for values in the filter. If set to `exclude`, the rule applies for values not in the filter. */ filter_type?: MlFilterType } export type MlFilterType = 'include' | 'exclude' export interface MlGeoResults { + /** The actual value for the bucket formatted as a `geo_point`. */ actual_point?: string + /** The typical value for the bucket formatted as a `geo_point`. */ typical_point?: string } export interface MlHyperparameter { + /** A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ absolute_importance?: double + /** Name of the hyperparameter. */ name: Name + /** A number between 0 and 1 showing the proportion of influence on the variation of the loss function among all tuned hyperparameters. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ relative_importance?: double + /** Indicates if the hyperparameter is specified by the user (true) or optimized (false). */ supplied: boolean + /** The value of the hyperparameter, either optimized or specified by the user. */ value: double } export interface MlHyperparameters { + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This parameter affects loss calculations by acting as a multiplier of the tree depth. + * Higher alpha values result in shallower trees and faster training times. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to zero. */ alpha?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. + * A high lambda value causes training to favor small leaf weights. + * This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. + * A small lambda value results in large individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ lambda?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies a linear penalty associated with the size of individual trees in the forest. + * A high gamma value causes training to prefer small trees. + * A small gamma value results in larger individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ gamma?: double + /** Advanced configuration option. + * The shrinkage applied to the weights. + * Smaller values result in larger forests which have a better generalization error. + * However, larger forests cause slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a value between `0.001` and `1`. */ eta?: double + /** Advanced configuration option. + * Specifies the rate at which `eta` increases for each new tree that is added to the forest. + * For example, a rate of 1.05 increases `eta` by 5% for each extra tree. + * By default, this value is calculated during hyperparameter optimization. + * It must be between `0.5` and `2`. */ eta_growth_rate_per_tree?: double + /** Advanced configuration option. + * Defines the fraction of features that will be used when selecting a random bag for each candidate split. + * By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double + /** Advanced configuration option. + * Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. + * A small value results in the use of a small fraction of the data. + * If this value is set to be less than 1, accuracy typically improves. + * However, too small a value may result in poor convergence for the ensemble and so require more trees. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than zero and less than or equal to 1. */ downsample_factor?: double + /** If the algorithm fails to determine a non-trivial tree (more than a single leaf), this parameter determines how many of such consecutive failures are tolerated. + * Once the number of attempts exceeds the threshold, the forest training stops. */ max_attempts_to_add_tree?: integer + /** Advanced configuration option. + * A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. + * The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. + * By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. + * Defines the maximum number of decision trees in the forest. + * The maximum value is 2000. + * By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer + /** The maximum number of folds for the cross-validation procedure. */ num_folds?: integer + /** Determines the maximum number of splits for every feature that can occur in a decision tree when the tree is trained. */ num_splits_per_feature?: integer + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer + /** Advanced configuration option. + * This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double } export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' export interface MlInferenceConfigCreateContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceOptions + /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceOptions + /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceOptions + learning_to_rank?: MlLearningToRankConfig + /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceOptions + /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceOptions + /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceOptions + /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceOptions + /** Question answering configuration for inference. */ question_answering?: MlQuestionAnsweringInferenceOptions } export interface MlInferenceConfigUpdateContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceUpdateOptions + /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions + /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceUpdateOptions + /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceUpdateOptions + /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceUpdateOptions + /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceUpdateOptions + /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceUpdateOptions + /** Question answering configuration for inference */ question_answering?: MlQuestionAnsweringInferenceUpdateOptions } export interface MlInferenceResponseResult { + /** If the model is trained for named entity recognition (NER) tasks, the response contains the recognized entities. */ entities?: MlTrainedModelEntities[] + /** Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property + * is present only when it is true. */ is_truncated?: boolean + /** If the model is trained for a text classification or zero shot classification task, the response is the + * predicted class. + * For named entity recognition (NER) tasks, it contains the annotated text output. + * For fill mask tasks, it contains the top prediction for replacing the mask token. + * For text embedding tasks, it contains the raw numerical text embedding values. + * For regression models, its a numerical value + * For classification models, it may be an integer, double, boolean or string depending on prediction type */ predicted_value?: MlPredictedValue | MlPredictedValue[] + /** For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted + * value. + * Additionally */ predicted_value_sequence?: string + /** Specifies a probability for the predicted value. */ prediction_probability?: double + /** Specifies a confidence score for the predicted value. */ prediction_score?: double + /** For fill mask, text classification, and zero shot classification tasks, the response contains a list of top + * class entries. */ top_classes?: MlTopClassEntry[] + /** If the request failed, the response contains the reason for the failure. */ warning?: string + /** The feature importance for the inference results. Relevant only for classification or regression models */ feature_importance?: MlTrainedModelInferenceFeatureImportance[] } @@ -17345,42 +25419,114 @@ export interface MlInfluence { } export interface MlInfluencer { + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated + * across detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new + * data is analyzed. */ influencer_score: double + /** The field name of the influencer. */ influencer_field_name: Field + /** The entity that influenced, contributed to, or was to blame for the anomaly. */ influencer_field_value: string + /** A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors. + * This is the initial value that was calculated at the time the bucket was processed. */ initial_influencer_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high + * precision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly + * interpretation of this value. */ probability: double + /** Internal. This value is always set to `influencer`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s + * analyzing `user_name` as an influencer, a field `user_name` is added to the result document. This + * information enables you to filter the anomaly results more easily. */ foo?: string } export interface MlJob { + /** Advanced configuration option. + * Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration blocked?: MlJobBlocked create_time?: DateTime + /** Advanced configuration option. + * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * Valid values range from 0 to `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configuring a datafeed, these properties are automatically set. + * When data is received via the post data API, it is not stored in Elasticsearch. + * Only the results for anomaly detection are retained. */ data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeed + /** Indicates that the process of deleting the job is in progress but not yet completed. + * It is only reported when `true`. */ deleting?: boolean + /** A description of the job. */ description?: string + /** If the job closed or failed, this is the time the job finished, otherwise it is `null`. + * This property is informational; you cannot change its value. */ finished_time?: DateTime + /** A list of job groups. + * A job can belong to no groups or many. */ groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ job_id: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string + /** The machine learning configuration version number at which the the job was created. */ job_version?: VersionString + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig model_snapshot_id?: Id + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * By default, snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ results_retention_days?: long } @@ -17392,22 +25538,66 @@ export interface MlJobBlocked { export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' export interface MlJobConfig { + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open?: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration + /** Advanced configuration option. + * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. */ daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configure a datafeed, these properties are automatically set. */ data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeedConfig + /** A description of the job. */ description?: string + /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ job_id?: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days?: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name?: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ results_retention_days?: long } @@ -17430,15 +25620,30 @@ export interface MlJobStatistics { } export interface MlJobStats { + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** An object that describes the quantity of input to the job and any related error counts. + * The `data_count` values are cumulative for the lifetime of a job. + * If a model snapshot is reverted or old results are deleted, the job counts are not reset. */ data_counts: MlDataCounts + /** An object that provides statistical information about forecasts belonging to this job. + * Some statistics are omitted if no forecasts have been made. */ forecasts_stats: MlJobForecastStatistics + /** Identifier for the anomaly detection job. */ job_id: string + /** An object that provides information about the size and contents of the model. */ model_size_stats: MlModelSizeStats + /** Contains properties for the node that runs the job. + * This information is available only for open jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact + /** For open jobs only, the elapsed time for which the job has been open. */ open_time?: DateTime + /** The status of the anomaly detection job, which can be one of the following values: `closed`, `closing`, `failed`, `opened`, `opening`. */ state: MlJobState + /** An object that provides statistical information about timing aspect of this job. */ timing_stats: MlJobTimingStats + /** Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */ deleting?: boolean } @@ -17453,6 +25658,12 @@ export interface MlJobTimingStats { minimum_bucket_processing_time_ms?: DurationValue } +export interface MlLearningToRankConfig { + default_params?: Record + feature_extractors?: Record[] + num_top_feature_importance_values: integer +} + export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' export interface MlModelPackageConfig { @@ -17473,8 +25684,11 @@ export interface MlModelPackageConfig { } export interface MlModelPlotConfig { + /** If true, enables calculation and storage of the model change annotations for each entity that is being analyzed. */ annotations_enabled?: boolean + /** If true, enables calculation and storage of the model bounds for each entity that is being analyzed. */ enabled?: boolean + /** Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied. Wildcards are not supported. Only the specified terms can be viewed when using the Single Metric Viewer. */ terms?: Field } @@ -17504,15 +25718,25 @@ export interface MlModelSizeStats { } export interface MlModelSnapshot { + /** An optional description of the job. */ description?: string + /** A numerical character string that uniquely identifies the job that the snapshot was created for. */ job_id: Id + /** The timestamp of the latest processed record. */ latest_record_time_stamp?: integer + /** The timestamp of the latest bucket result. */ latest_result_time_stamp?: integer + /** The minimum version required to be able to restore the model snapshot. */ min_version: VersionString + /** Summary information describing the model. */ model_size_stats?: MlModelSizeStats + /** If true, this snapshot will not be deleted during automatic cleanup of snapshots older than model_snapshot_retention_days. However, this snapshot will be deleted when the job is deleted. The default value is false. */ retain: boolean + /** For internal use only. */ snapshot_doc_count: long + /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id + /** The creation timestamp for the snapshot. */ timestamp: long } @@ -17520,19 +25744,25 @@ export interface MlModelSnapshotUpgrade { job_id: Id snapshot_id: Id state: MlSnapshotUpgradeState + /** @remarks This property is not supported on Elastic Cloud Serverless. */ node: MlDiscoveryNode assignment_explanation: string } export interface MlNerInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The token classification labels. Must be IOB formatted tags */ classification_labels?: string[] vocabulary?: MlVocabulary } export interface MlNerInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } @@ -17540,30 +25770,52 @@ export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig } export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { + /** Should the tokenizer prefix input with a space character */ add_prefix_space?: boolean } export interface MlNlpTokenizationUpdateOptions { + /** Truncate options to apply */ truncate?: MlTokenizationTruncate + /** Span options to apply */ span?: integer } export interface MlOutlierDetectionParameters { + /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. + * Value range: 0-1 */ feature_influence_threshold?: double + /** The method that outlier detection uses. + * Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. + * The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. + * When the value is not set, different values are used for different ensemble members. + * This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. + * For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double + /** If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). */ standardization_enabled?: boolean } export interface MlOverallBucket { + /** The length of the bucket in seconds. Matches the job with the longest bucket_span value. */ bucket_span: DurationValue + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** An array of objects that contain the max_anomaly_score per job_id. */ jobs: MlOverallBucketJob[] + /** The top_n average of the maximum bucket anomaly_score per job. */ overall_score: double + /** Internal. This is always set to overall_bucket. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime } @@ -17573,45 +25825,70 @@ export interface MlOverallBucketJob { } export interface MlPage { + /** Skips the specified number of items. */ from?: integer + /** Specifies the maximum number of items to obtain. */ size?: integer } export interface MlPassThroughInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary?: MlVocabulary } export interface MlPassThroughInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlPerPartitionCategorization { + /** To enable this setting, you must also set the `partition_field_name` property to the same value in every detector that uses the keyword `mlcategory`. Otherwise, job creation fails. */ enabled?: boolean + /** This setting can be set to true only if per-partition categorization is enabled. If true, both categorization and subsequent anomaly detection stops for partitions where the categorization status changes to warn. This setting makes it viable to have a job where it is expected that categorization works well for some partitions but not others; you do not pay the cost of bad categorization forever in the partitions where it works badly. */ stop_on_warn?: boolean } export type MlPredictedValue = ScalarValue | ScalarValue[] +export interface MlQueryFeatureExtractor { + default_score?: float + feature_name: string + query: QueryDslQueryContainer +} + export interface MlQuestionAnsweringInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The maximum answer length to consider */ max_answer_length?: integer } export interface MlQuestionAnsweringInferenceUpdateOptions { + /** The question to answer given the inference context */ question: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The maximum answer length to consider for extraction */ max_answer_length?: integer } export interface MlRegressionInferenceOptions { + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer } @@ -17620,67 +25897,97 @@ export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'st export type MlRuleAction = 'skip_result' | 'skip_model_update' export interface MlRuleCondition { + /** Specifies the result property to which the condition applies. If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can only specify conditions that apply to time. */ applies_to: MlAppliesTo + /** Specifies the condition operator. The available options are greater than, greater than or equals, less than, and less than or equals. */ operator: MlConditionOperator + /** The value that is compared against the `applies_to` field using the operator. */ value: double } export interface MlRunningStateSearchInterval { + /** The end time. */ end?: Duration + /** The end time as an epoch in milliseconds. */ end_ms: DurationValue + /** The start time. */ start?: Duration + /** The start time as an epoch in milliseconds. */ start_ms: DurationValue } export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' export interface MlTextClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[] + vocabulary?: MlVocabulary } export interface MlTextClassificationInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[] } export interface MlTextEmbeddingInferenceOptions { + /** The number of dimensions in the embedding output */ embedding_size?: integer + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlTextExpansionInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlTimingStats { + /** Runtime of the analysis in milliseconds. */ elapsed_time: DurationValue + /** Runtime of the latest iteration of the analysis in milliseconds. */ iteration_time?: DurationValue } export interface MlTokenizationConfigContainer { + /** Indicates BERT tokenization and its options */ bert?: MlNlpBertTokenizationConfig + /** Indicates BERT Japanese tokenization and its options */ bert_ja?: MlNlpBertTokenizationConfig + /** Indicates MPNET tokenization and its options */ mpnet?: MlNlpBertTokenizationConfig + /** Indicates RoBERTa tokenization and its options */ roberta?: MlNlpRobertaTokenizationConfig + xlm_roberta?: MlXlmRobertaTokenizationConfig } export type MlTokenizationTruncate = 'first' | 'second' | 'none' @@ -17692,131 +25999,218 @@ export interface MlTopClassEntry { } export interface MlTotalFeatureImportance { + /** The feature for which this importance was calculated. */ feature_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[] + /** If the trained model is a classification model, feature importance statistics are gathered per target class value. */ classes: MlTotalFeatureImportanceClass[] } export interface MlTotalFeatureImportanceClass { + /** The target class value. Could be a string, boolean, or number. */ class_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[] } export interface MlTotalFeatureImportanceStatistics { + /** The average magnitude of this feature across all the training data. This value is the average of the absolute values of the importance for this feature. */ mean_magnitude: double + /** The maximum importance value across all the training data for this feature. */ max: integer + /** The minimum importance value across all the training data for this feature. */ min: integer } export interface MlTrainedModelAssignment { adaptive_allocations?: MlAdaptiveAllocationsSettings | null + /** The overall assignment state. */ assignment_state: MlDeploymentAssignmentState max_assigned_allocations?: integer reason?: string + /** The allocation state for each node. */ routing_table: Record + /** The timestamp when the deployment started. */ start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters } +export interface MlTrainedModelAssignmentRoutingStateAndReason { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ + reason?: string + /** The current routing state. */ + routing_state: MlRoutingState +} + export interface MlTrainedModelAssignmentRoutingTable { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ reason?: string + /** The current routing state. */ routing_state: MlRoutingState + /** Current number of allocations. */ current_allocations: integer + /** Target number of allocations. */ target_allocations: integer } export interface MlTrainedModelAssignmentTaskParameters { + /** The size of the trained model in bytes. */ model_bytes: ByteSize + /** The unique identifier for the trained model. */ model_id: Id + /** The unique identifier for the trained model deployment. */ deployment_id: Id + /** The size of the trained model cache. */ cache_size?: ByteSize + /** The total number of allocations this model is assigned across ML nodes. */ number_of_allocations: integer priority: MlTrainingPriority per_deployment_memory_bytes: ByteSize per_allocation_memory_bytes: ByteSize + /** Number of inference requests are allowed in the queue at a time. */ queue_capacity: integer + /** Number of threads per allocation. */ threads_per_allocation: integer } export interface MlTrainedModelConfig { + /** Identifier for the trained model. */ model_id: Id + /** The model type */ model_type?: MlTrainedModelType + /** A comma delimited string of tags. A trained model can have many tags, or none. */ tags: string[] + /** The Elasticsearch version number in which the trained model was created. */ version?: VersionString compressed_definition?: string + /** Information on the creator of the trained model. */ created_by?: string + /** The time when the trained model was created. */ create_time?: DateTime + /** Any field map described in the inference configuration takes precedence. */ default_field_map?: Record + /** The free-text description of the trained model. */ description?: string + /** The estimated heap usage in bytes to keep the trained model in memory. */ estimated_heap_memory_usage_bytes?: integer + /** The estimated number of operations to use the trained model. */ estimated_operations?: integer + /** True if the full model definition is present. */ fully_defined?: boolean + /** The default configuration for inference. This can be either a regression, classification, or one of the many NLP focused configurations. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer + /** The input field names for the model definition. */ input: MlTrainedModelConfigInput + /** The license level of the trained model. */ license_level?: string + /** An object containing metadata about the trained model. For example, models created by data frame analytics contain analysis_config and input objects. */ metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize model_package?: MlModelPackageConfig location?: MlTrainedModelLocation + platform_architecture?: string prefix_strings?: MlTrainedModelPrefixStrings } export interface MlTrainedModelConfigInput { + /** An array of input field names for the model. */ field_names: Field[] } export interface MlTrainedModelConfigMetadata { model_aliases?: string[] + /** An object that contains the baseline for feature importance values. For regression analysis, it is a single value. For classification analysis, there is a value for each class. */ feature_importance_baseline?: Record + /** List of the available hyperparameters optimized during the fine_parameter_tuning phase as well as specified by the user. */ hyperparameters?: MlHyperparameter[] + /** An array of the total feature importance for each feature used from the training data set. This array of objects is returned if data frame analytics trained the model and the request includes total_feature_importance in the include request parameter. */ total_feature_importance?: MlTotalFeatureImportance[] } export interface MlTrainedModelDeploymentAllocationStatus { + /** The current number of nodes where the model is allocated. */ allocation_count: integer + /** The detailed allocation state related to the nodes. */ state: MlDeploymentAllocationState + /** The desired number of nodes for model allocation. */ target_allocation_count: integer } export interface MlTrainedModelDeploymentNodesStats { + /** The average time for each inference call to complete on this node. */ average_inference_time_ms?: DurationValue average_inference_time_ms_last_minute?: DurationValue + /** The average time for each inference call to complete on this node, excluding cache */ average_inference_time_ms_excluding_cache_hits?: DurationValue + /** The number of errors when evaluating the trained model. */ error_count?: integer + /** The total number of inference calls made against this node for this model. */ inference_count?: long inference_cache_hit_count?: long inference_cache_hit_count_last_minute?: long + /** The epoch time stamp of the last inference call for the model on this node. */ last_access?: EpochTime + /** Information pertaining to the node. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNode + /** The number of allocations assigned to this node. */ number_of_allocations?: integer + /** The number of inference requests queued to be processed. */ number_of_pending_requests?: integer peak_throughput_per_minute: long - rejection_execution_count?: integer - routing_state: MlTrainedModelAssignmentRoutingTable + /** The number of inference requests that were not processed because the queue was full. */ + rejected_execution_count?: integer + /** The current routing state and reason for the current routing state for this allocation. */ + routing_state: MlTrainedModelAssignmentRoutingStateAndReason + /** The epoch timestamp when the allocation started. */ start_time?: EpochTime + /** The number of threads used by each allocation during inference. */ threads_per_allocation?: integer throughput_last_minute: integer + /** The number of inference requests that timed out before being processed. */ timeout_count?: integer } export interface MlTrainedModelDeploymentStats { adaptive_allocations?: MlAdaptiveAllocationsSettings + /** The detailed allocation status for the deployment. */ allocation_status?: MlTrainedModelDeploymentAllocationStatus cache_size?: ByteSize + /** The unique identifier for the trained model deployment. */ deployment_id: Id + /** The sum of `error_count` for all nodes in the deployment. */ error_count?: integer + /** The sum of `inference_count` for all nodes in the deployment. */ inference_count?: integer + /** The unique identifier for the trained model. */ model_id: Id + /** The deployment stats for each node that currently has the model allocated. + * In serverless, stats are reported for a single unnamed virtual node. */ nodes: MlTrainedModelDeploymentNodesStats[] + /** The number of allocations requested. */ number_of_allocations?: integer peak_throughput_per_minute: long priority: MlTrainingPriority + /** The number of inference requests that can be queued before new requests are rejected. */ queue_capacity?: integer + /** The sum of `rejected_execution_count` for all nodes in the deployment. + * Individual nodes reject an inference request if the inference queue is full. + * The queue size is controlled by the `queue_capacity` setting in the start + * trained model deployment API. */ rejected_execution_count?: integer + /** The reason for the current deployment state. Usually only populated when + * the model is not deployed to a node. */ reason?: string + /** The epoch timestamp when the deployment started. */ start_time: EpochTime + /** The overall state of the deployment. */ state?: MlDeploymentAssignmentState + /** The number of threads used be each allocation during inference. */ threads_per_allocation?: integer + /** The sum of `timeout_count` for all nodes in the deployment. */ timeout_count?: integer } @@ -17840,10 +26234,19 @@ export interface MlTrainedModelInferenceFeatureImportance { } export interface MlTrainedModelInferenceStats { + /** The number of times the model was loaded for inference and was not retrieved from the cache. + * If this number is close to the `inference_count`, the cache is not being appropriately used. + * This can be solved by increasing the cache size or its time-to-live (TTL). + * Refer to general machine learning settings for the appropriate settings. */ cache_miss_count: integer + /** The number of failures when using the model for inference. */ failure_count: integer + /** The total number of times the model has been called for inference. + * This is across all inference contexts, including all pipelines. */ inference_count: integer + /** The number of inference calls where all the training features for the model were missing. */ missing_all_fields_count: integer + /** The time when the statistics were last updated. */ timestamp: EpochTime } @@ -17856,21 +26259,33 @@ export interface MlTrainedModelLocationIndex { } export interface MlTrainedModelPrefixStrings { + /** String prepended to input at ingest */ ingest?: string + /** String prepended to input at search */ search?: string } export interface MlTrainedModelSizeStats { + /** The size of the model in bytes. */ model_size_bytes: ByteSize + /** The amount of memory required to load the model in bytes. */ required_native_memory_bytes: ByteSize } export interface MlTrainedModelStats { + /** A collection of deployment stats, which is present when the models are deployed. */ deployment_stats?: MlTrainedModelDeploymentStats + /** A collection of inference stats fields. */ inference_stats?: MlTrainedModelInferenceStats + /** A collection of ingest stats for the model across all nodes. + * The values are summations of the individual node statistics. + * The format matches the ingest section in the nodes stats API. */ ingest?: Record + /** The unique identifier of the trained model. */ model_id: Id + /** A collection of model size stats. */ model_size_stats: MlTrainedModelSizeStats + /** The number of ingest pipelines that currently refer to the model. */ pipeline_count: integer } @@ -17879,13 +26294,18 @@ export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' export type MlTrainingPriority = 'normal' | 'low' export interface MlTransformAuthorization { + /** If an API key was used for the most recent update to the transform, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the transform, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the transform, the account name is listed in the response. */ service_account?: string } export interface MlValidationLoss { + /** Validation loss values for every added decision tree during the forest growing procedure. */ fold_values: string[] + /** The type of the loss metric. For example, binomial_logistic. */ loss_type: string } @@ -17893,24 +26313,38 @@ export interface MlVocabulary { index: IndexName } +export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConfig { +} + export interface MlZeroShotClassificationInferenceOptions { + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** Hypothesis template used when tokenizing labels for prediction */ hypothesis_template?: string + /** The zero shot classification labels indicating entailment, neutral, and contradiction + * Must contain exactly and only entailment, neutral, and contradiction */ classification_labels: string[] + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Indicates if more than one true label exists. */ multi_label?: boolean + /** The labels to predict. */ labels?: string[] } export interface MlZeroShotClassificationInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Update the configured multi label option. Indicates if more than one true label exists. Defaults to the configured value. */ multi_label?: boolean + /** The labels to predict. */ labels: string[] } export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never } @@ -17923,7 +26357,7 @@ export interface MlClearTrainedModelDeploymentCacheResponse { } export interface MlCloseJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ job_id: Id /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean @@ -17942,7 +26376,7 @@ export interface MlCloseJobResponse { } export interface MlDeleteCalendarRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { calendar_id?: never } @@ -17953,9 +26387,10 @@ export interface MlDeleteCalendarRequest extends RequestBase { export type MlDeleteCalendarResponse = AcknowledgedResponseBase export interface MlDeleteCalendarEventRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id - /** Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. */ + /** Identifier for the scheduled event. + * You can obtain this identifier by using the get calendar events API. */ event_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { calendar_id?: never, event_id?: never } @@ -17966,9 +26401,10 @@ export interface MlDeleteCalendarEventRequest extends RequestBase { export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id - /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a + * comma-separated list of jobs or groups. */ job_id: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } @@ -17977,13 +26413,16 @@ export interface MlDeleteCalendarJobRequest extends RequestBase { } export interface MlDeleteCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. */ + /** Identifier for the data frame analytics job. */ id: Id /** If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. */ force?: boolean @@ -17998,9 +26437,13 @@ export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase export interface MlDeleteDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. This + * identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It must start and end with alphanumeric + * characters. */ datafeed_id: Id - /** Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. */ + /** Use to forcefully delete a started datafeed; this method is quicker than + * stopping and deleting the datafeed. */ force?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, force?: never } @@ -18011,9 +26454,11 @@ export interface MlDeleteDatafeedRequest extends RequestBase { export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { -/** Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. */ + /** Identifier for an anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. */ job_id?: Id - /** The desired requests per second for the deletion processes. The default behavior is no throttling. */ + /** The desired requests per second for the deletion processes. The default + * behavior is no throttling. */ requests_per_second?: float /** How long can the underlying delete processes run until they are canceled. */ timeout?: Duration @@ -18028,7 +26473,7 @@ export interface MlDeleteExpiredDataResponse { } export interface MlDeleteFilterRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { filter_id?: never } @@ -18039,13 +26484,20 @@ export interface MlDeleteFilterRequest extends RequestBase { export type MlDeleteFilterResponse = AcknowledgedResponseBase export interface MlDeleteForecastRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. */ + /** A comma-separated list of forecast identifiers. If you do not specify + * this optional parameter or if you specify `_all` or `*` the API deletes + * all forecasts from the job. */ forecast_id?: Id - /** Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. */ + /** Specifies whether an error occurs when there are no forecasts. In + * particular, if this parameter is set to `false` and there are no + * forecasts associated with the job, attempts to delete all forecasts + * return an error. */ allow_no_forecasts?: boolean - /** Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. */ + /** Specifies the period of time to wait for the completion of the delete + * operation. When this period of time elapses, the API fails and returns an + * error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } @@ -18056,13 +26508,17 @@ export interface MlDeleteForecastRequest extends RequestBase { export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. */ + /** Use to forcefully delete an opened job; this method is quicker than + * closing and deleting the job. */ force?: boolean - /** Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. */ + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ delete_user_annotations?: boolean - /** Specifies whether the request should return immediately or wait until the job deletion completes. */ + /** Specifies whether the request should return immediately or wait until the + * job deletion completes. */ wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } @@ -18073,7 +26529,7 @@ export interface MlDeleteJobRequest extends RequestBase { export type MlDeleteJobResponse = AcknowledgedResponseBase export interface MlDeleteModelSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Identifier for the model snapshot. */ snapshot_id: Id @@ -18086,7 +26542,7 @@ export interface MlDeleteModelSnapshotRequest extends RequestBase { export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. */ force?: boolean @@ -18101,7 +26557,7 @@ export interface MlDeleteTrainedModelRequest extends RequestBase { export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelAliasRequest extends RequestBase { -/** The model alias to delete. */ + /** The model alias to delete. */ model_alias: Name /** The trained model ID to which the model alias refers. */ model_id: Id @@ -18114,11 +26570,22 @@ export interface MlDeleteTrainedModelAliasRequest extends RequestBase { export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlEstimateModelMemoryRequest extends RequestBase { -/** For a list of the properties that you can specify in the `analysis_config` component of the body of this API. */ + /** For a list of the properties that you can specify in the + * `analysis_config` component of the body of this API. */ analysis_config?: MlAnalysisConfig - /** Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. */ + /** Estimates of the highest cardinality in a single bucket that is observed + * for influencer fields over the time period that the job analyzes data. + * To produce a good answer, values must be provided for all influencer + * fields. Providing values for fields that are not listed as `influencers` + * has no effect on the estimation. */ max_bucket_cardinality?: Record - /** Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. */ + /** Estimates of the cardinality that is observed for fields over the whole + * time period that the job analyzes data. To produce a good answer, values + * must be provided for fields referenced in the `by_field_name`, + * `over_field_name` and `partition_field_name` of any detectors. Providing + * values for other fields has no effect on the estimation. It can be + * omitted from the request if no detectors have a `by_field_name`, + * `over_field_name` or `partition_field_name`. */ overall_cardinality?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } @@ -18143,17 +26610,27 @@ export interface MlEvaluateDataFrameConfusionMatrixPrediction { } export interface MlEvaluateDataFrameConfusionMatrixThreshold { + /** True Positive */ tp: integer + /** False Positive */ fp: integer + /** True Negative */ tn: integer + /** False Negative */ fn: integer } export interface MlEvaluateDataFrameDataframeClassificationSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. + * It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Accuracy of predictions (per-class and overall). */ accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy + /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix + /** Precision of predictions (per-class and average). */ precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision + /** Recall of predictions (per-class and average). */ recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall } @@ -18196,21 +26673,29 @@ export interface MlEvaluateDataFrameDataframeEvaluationValue { } export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Set the different thresholds of the outlier score at where the metric is calculated. */ precision?: Record + /** Set the different thresholds of the outlier score at where the metric is calculated. */ recall?: Record + /** Set the different thresholds of the outlier score at where the metrics (`tp` - true positive, `fp` - false positive, `tn` - true negative, `fn` - false negative) are calculated. */ confusion_matrix?: Record } export interface MlEvaluateDataFrameDataframeRegressionSummary { + /** Pseudo Huber loss function. */ huber?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the predicted values and the actual (`ground truth`) value. */ mse?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (`ground truth`) value. */ msle?: MlEvaluateDataFrameDataframeEvaluationValue + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: MlEvaluateDataFrameDataframeEvaluationValue } export interface MlEvaluateDataFrameRequest extends RequestBase { -/** Defines the type of evaluation you want to perform. */ + /** Defines the type of evaluation you want to perform. */ evaluation: MlDataframeEvaluationContainer /** Defines the `index` in which the evaluation will be performed. */ index: IndexName @@ -18223,29 +26708,52 @@ export interface MlEvaluateDataFrameRequest extends RequestBase { } export interface MlEvaluateDataFrameResponse { + /** Evaluation results for a classification analysis. + * It outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlEvaluateDataFrameDataframeClassificationSummary + /** Evaluation results for an outlier detection analysis. + * It outputs the probability that each document is an outlier. */ outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary + /** Evaluation results for a regression analysis which outputs a prediction of values. */ regression?: MlEvaluateDataFrameDataframeRegressionSummary } export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id?: Id - /** The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. */ + /** The configuration of how to source the analysis data. It requires an + * index. Optionally, query and _source may be specified. */ source?: MlDataframeAnalyticsSource - /** The destination configuration, consisting of index and optionally results_field (ml by default). */ + /** The destination configuration, consisting of index and optionally + * results_field (ml by default). */ dest?: MlDataframeAnalyticsDestination - /** The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. */ + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ analysis?: MlDataframeAnalysisContainer /** A description of the job. */ description?: string - /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + * create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string - /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer - /** Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. */ + /** Specify includes and/or excludes patterns to select which fields will be + * included in the analysis. The patterns specified in excludes are applied + * last, therefore excludes takes precedence. In other words, if the same + * field is specified in both includes and excludes, then the field will not + * be included in the analysis. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } @@ -18254,12 +26762,14 @@ export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { } export interface MlExplainDataFrameAnalyticsResponse { + /** An array of objects that explain selection for each field, sorted by the field names. */ field_selection: MlDataframeAnalyticsFieldSelection[] + /** An array of objects that explain selection for each field, sorted by the field names. */ memory_estimation: MlDataframeAnalyticsMemoryEstimation } export interface MlFlushJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Refer to the description for the `advance_time` query parameter. */ advance_time?: DateTime @@ -18279,11 +26789,14 @@ export interface MlFlushJobRequest extends RequestBase { export interface MlFlushJobResponse { flushed: boolean + /** Provides the timestamp (in milliseconds since the epoch) of the end of + * the last bucket that was processed. */ last_finalized_bucket_end?: integer } export interface MlForecastRequest extends RequestBase { -/** Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. */ + /** Identifier for the anomaly detection job. The job must be open when you + * create a forecast; otherwise, an error occurs. */ job_id: Id /** Refer to the description for the `duration` query parameter. */ duration?: Duration @@ -18303,9 +26816,10 @@ export interface MlForecastResponse { } export interface MlGetBucketsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. */ + /** The timestamp of a single bucket result. If you do not specify this + * parameter, the API returns information about all buckets. */ timestamp?: DateTime /** Skips the specified number of buckets. */ from?: integer @@ -18338,7 +26852,7 @@ export interface MlGetBucketsResponse { } export interface MlGetCalendarEventsRequest extends RequestBase { -/** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id: Id /** Specifies to get events with timestamps earlier than this time. */ end?: DateTime @@ -18362,13 +26876,16 @@ export interface MlGetCalendarEventsResponse { } export interface MlGetCalendarsCalendar { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** An array of anomaly detection job identifiers. */ job_ids: Id[] } export interface MlGetCalendarsRequest extends RequestBase { -/** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id?: Id /** Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. */ from?: integer @@ -18388,9 +26905,13 @@ export interface MlGetCalendarsResponse { } export interface MlGetCategoriesRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. */ + /** Identifier for the category, which is unique in the job. If you specify + * neither the category ID nor the partition_field_value, the API returns + * information about all categories. If you specify only the + * partition_field_value, it returns information about all categories for + * the specified partition. */ category_id?: CategoryId /** Skips the specified number of categories. */ from?: integer @@ -18398,7 +26919,8 @@ export interface MlGetCategoriesRequest extends RequestBase { partition_field_value?: string /** Specifies the maximum number of categories to obtain. */ size?: integer - /** Configures pagination. This parameter has the `from` and `size` properties. */ + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ page?: MlPage /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } @@ -18412,15 +26934,29 @@ export interface MlGetCategoriesResponse { } export interface MlGetDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. */ + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ id?: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of data frame analytics jobs. */ from?: integer /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } @@ -18430,13 +26966,26 @@ export interface MlGetDataFrameAnalyticsRequest extends RequestBase { export interface MlGetDataFrameAnalyticsResponse { count: integer + /** An array of data frame analytics job resources, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalyticsSummary[] } export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { -/** Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. */ + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ id?: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of data frame analytics jobs. */ from?: integer @@ -18452,13 +27001,25 @@ export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { export interface MlGetDataFrameAnalyticsStatsResponse { count: long + /** An array of objects that contain usage information for data frame analytics jobs, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalytics[] } export interface MlGetDatafeedStatsRequest extends RequestBase { -/** Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. */ + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ datafeed_id?: Ids - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } @@ -18472,11 +27033,24 @@ export interface MlGetDatafeedStatsResponse { } export interface MlGetDatafeedsRequest extends RequestBase { -/** Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. */ + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ datafeed_id?: Ids - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } @@ -18490,7 +27064,7 @@ export interface MlGetDatafeedsResponse { } export interface MlGetFiltersRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id?: Ids /** Skips the specified number of filters. */ from?: integer @@ -18508,25 +27082,32 @@ export interface MlGetFiltersResponse { } export interface MlGetInfluencersRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** If true, the results are sorted in descending order. */ desc?: boolean - /** Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. */ + /** Returns influencers with timestamps earlier than this time. + * The default value means it is unset and results are not limited to + * specific timestamps. */ end?: DateTime - /** If true, the output excludes interim results. By default, interim results are included. */ + /** If true, the output excludes interim results. By default, interim results + * are included. */ exclude_interim?: boolean - /** Returns influencers with anomaly scores greater than or equal to this value. */ + /** Returns influencers with anomaly scores greater than or equal to this + * value. */ influencer_score?: double /** Skips the specified number of influencers. */ from?: integer /** Specifies the maximum number of influencers to obtain. */ size?: integer - /** Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. */ + /** Specifies the sort field for the requested influencers. By default, the + * influencers are sorted by the `influencer_score` value. */ sort?: Field - /** Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. */ + /** Returns influencers with timestamps after this time. The default value + * means it is unset and results are not limited to specific timestamps. */ start?: DateTime - /** Configures pagination. This parameter has the `from` and `size` properties. */ + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ page?: MlPage /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } @@ -18536,13 +27117,26 @@ export interface MlGetInfluencersRequest extends RequestBase { export interface MlGetInfluencersResponse { count: long + /** Array of influencer objects */ influencers: MlInfluencer[] } export interface MlGetJobStatsRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs, or a wildcard expression. If + * you do not specify one of these options, the API returns information for + * all anomaly detection jobs. */ job_id?: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If `false`, the API returns a `404` status + * code when there are no matches or only partial matches. */ allow_no_match?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never } @@ -18556,11 +27150,24 @@ export interface MlGetJobStatsResponse { } export interface MlGetJobsRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. If you do not specify one of these + * options, the API returns information for all anomaly detection jobs. */ job_id?: Ids - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If this parameter is `false`, the request returns a `404` status + * code when there are no matches or only partial matches. */ allow_no_match?: boolean - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } @@ -18574,51 +27181,82 @@ export interface MlGetJobsResponse { } export interface MlGetMemoryStatsJvmStats { + /** Maximum amount of memory available for use by the heap. */ heap_max?: ByteSize + /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes: integer + /** Amount of Java heap currently being used for caching inference models. */ java_inference?: ByteSize + /** Amount of Java heap, in bytes, currently being used for caching inference models. */ java_inference_in_bytes: integer + /** Maximum amount of Java heap to be used for caching inference models. */ java_inference_max?: ByteSize + /** Maximum amount of Java heap, in bytes, to be used for caching inference models. */ java_inference_max_in_bytes: integer } export interface MlGetMemoryStatsMemMlStats { + /** Amount of native memory set aside for anomaly detection jobs. */ anomaly_detectors?: ByteSize + /** Amount of native memory, in bytes, set aside for anomaly detection jobs. */ anomaly_detectors_in_bytes: integer + /** Amount of native memory set aside for data frame analytics jobs. */ data_frame_analytics?: ByteSize + /** Amount of native memory, in bytes, set aside for data frame analytics jobs. */ data_frame_analytics_in_bytes: integer + /** Maximum amount of native memory (separate to the JVM heap) that may be used by machine learning native processes. */ max?: ByteSize + /** Maximum amount of native memory (separate to the JVM heap), in bytes, that may be used by machine learning native processes. */ max_in_bytes: integer + /** Amount of native memory set aside for loading machine learning native code shared libraries. */ native_code_overhead?: ByteSize + /** Amount of native memory, in bytes, set aside for loading machine learning native code shared libraries. */ native_code_overhead_in_bytes: integer + /** Amount of native memory set aside for trained models that have a PyTorch model_type. */ native_inference?: ByteSize + /** Amount of native memory, in bytes, set aside for trained models that have a PyTorch model_type. */ native_inference_in_bytes: integer } export interface MlGetMemoryStatsMemStats { + /** If the amount of physical memory has been overridden using the es.total_memory_bytes system property + * then this reports the overridden value. Otherwise it reports the same value as total. */ adjusted_total?: ByteSize + /** If the amount of physical memory has been overridden using the `es.total_memory_bytes` system property + * then this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes: integer + /** Total amount of physical memory. */ total?: ByteSize + /** Total amount of physical memory in bytes. */ total_in_bytes: integer + /** Contains statistics about machine learning use of native memory on the node. */ ml: MlGetMemoryStatsMemMlStats } export interface MlGetMemoryStatsMemory { attributes: Record + /** Contains Java Virtual Machine (JVM) statistics for the node. */ jvm: MlGetMemoryStatsJvmStats + /** Contains statistics about memory usage for the node. */ mem: MlGetMemoryStatsMemStats + /** Human-readable identifier for the node. Based on the Node name setting setting. */ name: Name + /** Roles assigned to the node. */ roles: string[] + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress ephemeral_id: Id } export interface MlGetMemoryStatsRequest extends RequestBase { -/** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` */ + /** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or + * `ml:true` */ node_id?: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout + * expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the timeout expires, the request + * fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } @@ -18633,11 +27271,21 @@ export interface MlGetMemoryStatsResponse { } export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id: Id - /** Specifies what to do when the request: - Contains wildcard expressions and there are no jobs that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no jobs that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty jobs array when there are no matches and the subset of results + * when there are partial matches. If this parameter is false, the request returns a 404 status code when there are + * no matches or only partial matches. */ allow_no_match?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } @@ -18651,9 +27299,11 @@ export interface MlGetModelSnapshotUpgradeStatsResponse { } export interface MlGetModelSnapshotsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id?: Id /** Skips the specified number of snapshots. */ from?: integer @@ -18680,7 +27330,12 @@ export interface MlGetModelSnapshotsResponse { } export interface MlGetOverallBucketsRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs or groups, or a wildcard + * expression. + * + * You can summarize the bucket results for all anomaly detection jobs by + * using `_all` or by specifying `*` as the ``. */ job_id: Id /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean @@ -18704,11 +27359,12 @@ export interface MlGetOverallBucketsRequest extends RequestBase { export interface MlGetOverallBucketsResponse { count: long + /** Array of overall bucket objects */ overall_buckets: MlOverallBucket[] } export interface MlGetRecordsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Skips the specified number of records. */ from?: integer @@ -18739,21 +27395,38 @@ export interface MlGetRecordsResponse { } export interface MlGetTrainedModelsRequest extends RequestBase { -/** The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. */ + /** The unique identifier of the trained model or a model alias. + * + * You can get information for multiple trained models in a single API + * request by using a comma-separated list of model IDs or a wildcard + * expression. */ model_id?: Ids - /** Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. */ + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ allow_no_match?: boolean - /** Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). */ + /** Specifies whether the included model definition should be returned as a + * JSON map (true) or in a custom compressed format (false). */ decompress_definition?: boolean - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** Skips the specified number of models. */ from?: integer - /** A comma delimited string of optional fields to include in the response body. */ + /** A comma delimited string of optional fields to include in the response + * body. */ include?: MlInclude /** Specifies the maximum number of models to obtain. */ size?: integer - /** A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. */ + /** A comma delimited string of tags. A trained model can have many tags, or + * none. When supplied, only trained models that contain all the supplied + * tags are returned. */ tags?: string | string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, size?: never, tags?: never } @@ -18763,13 +27436,22 @@ export interface MlGetTrainedModelsRequest extends RequestBase { export interface MlGetTrainedModelsResponse { count: integer + /** An array of trained model resources, which are sorted by the model_id value in ascending order. */ trained_model_configs: MlTrainedModelConfig[] } export interface MlGetTrainedModelsStatsRequest extends RequestBase { -/** The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. */ + /** The unique identifier of the trained model or a model alias. It can be a + * comma-separated list or a wildcard expression. */ model_id?: Ids - /** Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. */ + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ allow_no_match?: boolean /** Skips the specified number of models. */ from?: integer @@ -18782,16 +27464,20 @@ export interface MlGetTrainedModelsStatsRequest extends RequestBase { } export interface MlGetTrainedModelsStatsResponse { + /** The total number of trained model statistics that matched the requested ID patterns. Could be higher than the number of items in the trained_model_stats array as the size of the array is restricted by the supplied size parameter. */ count: integer + /** An array of trained model statistics, which are sorted by the model_id value in ascending order. */ trained_model_stats: MlTrainedModelStats[] } export interface MlInferTrainedModelRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** Controls the amount of time to wait for inference results. */ timeout?: Duration - /** An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. */ + /** An array of objects to pass to the model for inference. The objects should contain a fields matching your + * configured trained model input. Typically, for NLP models, the field name is `text_field`. + * Currently, for NLP models, only a single value is allowed. */ docs: Record[] /** The inference configuration updates to apply on the API call */ inference_config?: MlInferenceConfigUpdateContainer @@ -18850,7 +27536,7 @@ export interface MlInfoResponse { } export interface MlOpenJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration @@ -18862,11 +27548,13 @@ export interface MlOpenJobRequest extends RequestBase { export interface MlOpenJobResponse { opened: boolean + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeId } export interface MlPostCalendarEventsRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. */ events: MlCalendarEvent[] @@ -18881,7 +27569,7 @@ export interface MlPostCalendarEventsResponse { } export interface MlPostDataRequest extends RequestBase { -/** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ + /** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ job_id: Id /** Specifies the end of the bucket resetting range. */ reset_end?: DateTime @@ -18924,9 +27612,11 @@ export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { } export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. */ + /** Identifier for the data frame analytics job. */ id?: Id - /** A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. */ + /** A data frame analytics config as described in create data frame analytics + * jobs. Note that `id` and `dest` don’t need to be provided in the context of + * this API. */ config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, config?: never } @@ -18935,11 +27625,15 @@ export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { } export interface MlPreviewDataFrameAnalyticsResponse { + /** An array of objects that contain feature name and value pairs. The features have been processed and indicate what will be sent to the model for training. */ feature_values: Record[] } export interface MlPreviewDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. */ + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job + * configuration details in the request body. */ datafeed_id?: Id /** The start time from where the datafeed preview should begin */ start?: DateTime @@ -18947,7 +27641,10 @@ export interface MlPreviewDatafeedRequest extends RequestBase { end?: DateTime /** The datafeed definition to preview. */ datafeed_config?: MlDatafeedConfig - /** The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ + /** The configuration details for the anomaly detection job that is associated with the datafeed. If the + * `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must + * supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is + * used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ job_config?: MlJobConfig /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } @@ -18958,7 +27655,7 @@ export interface MlPreviewDatafeedRequest extends RequestBase { export type MlPreviewDatafeedResponse = TDocument[] export interface MlPutCalendarRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** An array of anomaly detection job identifiers. */ job_ids?: Id[] @@ -18971,13 +27668,16 @@ export interface MlPutCalendarRequest extends RequestBase { } export interface MlPutCalendarResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlPutCalendarJobRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ job_id: Ids @@ -18988,28 +27688,76 @@ export interface MlPutCalendarJobRequest extends RequestBase { } export interface MlPutCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlPutDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id - /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. */ + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. If + * set to `false` and a machine learning node with capacity to run the job + * cannot be immediately found, the API returns an error. If set to `true`, + * the API does not return an error; the job waits in the `starting` state + * until sufficient machine learning node capacity is available. This + * behavior is also affected by the cluster-wide + * `xpack.ml.max_lazy_ml_nodes` setting. */ allow_lazy_start?: boolean - /** The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. */ + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ analysis: MlDataframeAnalysisContainer - /** Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ + /** Specifies `includes` and/or `excludes` patterns to select which fields + * will be included in the analysis. The patterns specified in `excludes` + * are applied last, therefore `excludes` takes precedence. In other words, + * if the same field is specified in both `includes` and `excludes`, then + * the field will not be included in the analysis. If `analyzed_fields` is + * not set, only the relevant fields will be included. For example, all the + * numeric fields for outlier detection. + * The supported fields vary for each type of analysis. Outlier detection + * requires numeric or `boolean` data to analyze. The algorithms don’t + * support missing values therefore fields that have data types other than + * numeric or boolean are ignored. Documents where included fields contain + * missing values, null values, or an array are also ignored. Therefore the + * `dest` index may contain documents that don’t have an outlier score. + * Regression supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the regression analysis. + * Classification supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the classification analysis. + * Classification analysis can be improved by mapping ordinal variable + * values to a single number. For example, in case of age ranges, you can + * model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] /** A description of the job. */ description?: string /** The destination configuration. */ dest: MlDataframeAnalyticsDestination - /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer _meta?: Metadata - /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string /** The configuration of how to source the analysis data. */ source: MlDataframeAnalyticsSource @@ -19038,47 +27786,76 @@ export interface MlPutDataFrameAnalyticsResponse { } export interface MlPutDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id - /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ + /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` + * string or when no indices are specified. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards /** If true, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean /** If true, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean - /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record - /** @alias aggregations */ - /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ aggs?: Record - /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. */ + /** Datafeeds might be required to search over long time periods, for several months or years. + * This search is split into time chunks in order to ensure the load on Elasticsearch is managed. + * Chunking configuration controls how the size of these time chunks are calculated; + * it is an advanced configuration option. */ chunking_config?: MlChunkingConfig - /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ + /** Specifies whether the datafeed checks for missing data and the size of the window. + * The datafeed can optionally search over indices that have already been read in an effort to determine whether + * any data has subsequently been added to the index. If missing data is found, it is a good indication that the + * `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. + * This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig - /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + /** The interval at which scheduled queries are made while the datafeed runs in real time. + * The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible + * fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last + * (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses + * aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices - /** @alias indices */ - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: Indices /** Specifies index expansion options that are used during search */ indices_options?: IndicesOptions /** Identifier for the anomaly detection job. */ job_id?: Id - /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer - /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer - /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ query_delay?: Duration /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields - /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record - /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer headers?: HttpHeaders /** All values in `body` will be added to the request body. */ @@ -19106,11 +27883,12 @@ export interface MlPutDatafeedResponse { } export interface MlPutFilterRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id: Id /** A description of the filter. */ description?: string - /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. */ + /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. + * Up to 10000 items are allowed in each filter. */ items?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { filter_id?: never, description?: never, items?: never } @@ -19125,11 +27903,19 @@ export interface MlPutFilterResponse { } export interface MlPutJobRequest extends RequestBase { -/** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ job_id: Id - /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: + * + * * `all`: Match any data stream or index, including hidden ones. + * * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. + * * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. + * * `none`: Wildcard patterns are not accepted. + * * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean @@ -19202,7 +27988,9 @@ export interface MlPutTrainedModelAggregateOutput { } export interface MlPutTrainedModelDefinition { + /** Collection of preprocessors */ preprocessors?: MlPutTrainedModelPreprocessor[] + /** The definition of the trained model. */ trained_model: MlPutTrainedModelTrainedModel } @@ -19236,19 +28024,28 @@ export interface MlPutTrainedModelPreprocessor { } export interface MlPutTrainedModelRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id - /** If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. */ + /** If set to `true` and a `compressed_definition` is provided, + * the request defers definition decompression and skips relevant + * validations. */ defer_definition_decompression?: boolean - /** Whether to wait for all child operations (e.g. model download) to complete. */ + /** Whether to wait for all child operations (e.g. model download) + * to complete. */ wait_for_completion?: boolean - /** The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. */ + /** The compressed (GZipped and Base64 encoded) inference definition of the + * model. If compressed_definition is specified, then definition cannot be + * specified. */ compressed_definition?: string - /** The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. */ + /** The inference definition for the model. If definition is specified, then + * compressed_definition cannot be specified. */ definition?: MlPutTrainedModelDefinition /** A human-readable description of the inference trained model. */ description?: string - /** The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ + /** The default configuration for inference. This can be either a regression + * or classification configuration. It must match the underlying + * definition.trained_model's target_type. For pre-packaged models such as + * ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer /** The input field names for the model definition. */ input?: MlPutTrainedModelInput @@ -19256,9 +28053,17 @@ export interface MlPutTrainedModelRequest extends RequestBase { metadata?: any /** The model type. */ model_type?: MlTrainedModelType - /** The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. */ + /** The estimated memory usage in bytes to keep the trained model in memory. + * This property is supported only if defer_definition_decompression is true + * or the model definition is not supplied. */ model_size_bytes?: long - /** The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. */ + /** The platform architecture (if applicable) of the trained mode. If the model + * only works on one platform, because it is heavily optimized for a particular + * processor architecture and OS combination, then this field specifies which. + * The format of the string must match the platform identifiers used by Elasticsearch, + * so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, + * or `windows-x86_64`. For portable models (those that work independent of processor + * architecture or OS features), leave this field unset. */ platform_architecture?: string /** An array of tags to organize the model. */ tags?: string[] @@ -19280,8 +28085,14 @@ export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { } export interface MlPutTrainedModelTrainedModel { + /** The definition for a binary decision tree. */ tree?: MlPutTrainedModelTrainedModelTree + /** The definition of a node in a tree. + * There are two major types of nodes: leaf nodes and not-leaf nodes. + * - Leaf nodes only need node_index and leaf_value defined. + * - All other nodes need split_feature, left_child, right_child, threshold, decision_type, and default_left defined. */ tree_node?: MlPutTrainedModelTrainedModelTreeNode + /** The definition for an ensemble model */ ensemble?: MlPutTrainedModelEnsemble } @@ -19309,11 +28120,13 @@ export interface MlPutTrainedModelWeights { } export interface MlPutTrainedModelAliasRequest extends RequestBase { -/** The alias to create or update. This value cannot end in numbers. */ + /** The alias to create or update. This value cannot end in numbers. */ model_alias: Name /** The identifier for the trained model that the alias refers to. */ model_id: Id - /** Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. */ + /** Specifies whether the alias gets reassigned to the specified trained + * model if it is already assigned to a different model. If the alias is + * already assigned and this parameter is false, the API returns an error. */ reassign?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } @@ -19324,9 +28137,10 @@ export interface MlPutTrainedModelAliasRequest extends RequestBase { export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id - /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ + /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the + * order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ part: integer /** The definition part for the model. Must be a base64 encoded string. */ definition: string @@ -19343,7 +28157,7 @@ export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase export interface MlPutTrainedModelVocabularyRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** The model vocabulary, which must not be empty. */ vocabulary: string[] @@ -19360,11 +28174,14 @@ export interface MlPutTrainedModelVocabularyRequest extends RequestBase { export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { -/** The ID of the job to reset. */ + /** The ID of the job to reset. */ job_id: Id - /** Should this request wait until the operation has completed before returning. */ + /** Should this request wait until the operation has completed before + * returning. */ wait_for_completion?: boolean - /** Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. */ + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ delete_user_annotations?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } @@ -19375,9 +28192,11 @@ export interface MlResetJobRequest extends RequestBase { export type MlResetJobResponse = AcknowledgedResponseBase export interface MlRevertModelSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. */ + /** You can specify `empty` as the . Reverting to the empty + * snapshot means the anomaly detection job starts learning a new model from + * scratch when it is started. */ snapshot_id: Id /** Refer to the description for the `delete_intervening_results` query parameter. */ delete_intervening_results?: boolean @@ -19392,7 +28211,9 @@ export interface MlRevertModelSnapshotResponse { } export interface MlSetUpgradeModeRequest extends RequestBase { -/** When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. */ + /** When `true`, it enables `upgrade_mode` which temporarily halts all job + * and datafeed tasks and prohibits new job and datafeed tasks from + * starting. */ enabled?: boolean /** The time to wait for the request to be completed. */ timeout?: Duration @@ -19405,9 +28226,12 @@ export interface MlSetUpgradeModeRequest extends RequestBase { export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id - /** Controls the amount of time to wait until the data frame analytics job starts. */ + /** Controls the amount of time to wait until the data frame analytics job + * starts. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, timeout?: never } @@ -19417,11 +28241,18 @@ export interface MlStartDataFrameAnalyticsRequest extends RequestBase { export interface MlStartDataFrameAnalyticsResponse { acknowledged: boolean + /** The ID of the node that the job was started on. If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. + * The node ID of the node the job has been assigned to, or + * an empty string if it hasn't been assigned to a node. In + * serverless if the job has been assigned to run then the + * node ID will be "serverless". */ node: NodeId } export interface MlStartDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. */ datafeed_id: Id /** Refer to the description for the `end` query parameter. */ end?: DateTime @@ -19436,30 +28267,49 @@ export interface MlStartDatafeedRequest extends RequestBase { } export interface MlStartDatafeedResponse { + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeIds + /** For a successful response, this value is always `true`. On failure, an exception is returned instead. */ started: boolean } export interface MlStartTrainedModelDeploymentRequest extends RequestBase { -/** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id - /** The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. */ + /** The inference cache size (in memory outside the JVM heap) per node for the model. + * The default value is the same size as the `model_size_bytes`. To disable the cache, + * `0b` can be provided. */ cache_size?: ByteSize - /** A unique identifier for the deployment of the model. */ + /** A unique identifier for the deployment of the model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ deployment_id?: string - /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer /** The deployment priority. */ priority?: MlTrainingPriority - /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. */ + /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds + * this value, new requests are rejected with a 429 error. */ queue_capacity?: integer - /** Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. */ + /** Sets the number of threads used by each model allocation during inference. This generally increases + * the inference speed. The inference process is a compute-bound process; any number + * greater than the number of available hardware threads on the machine does not increase the + * inference speed. If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. */ threads_per_allocation?: integer /** Specifies the amount of time to wait for the model to deploy. */ timeout?: Duration /** Specifies the allocation status to wait for before returning. */ wait_for?: MlDeploymentAllocationState - /** Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. */ + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } @@ -19472,13 +28322,26 @@ export interface MlStartTrainedModelDeploymentResponse { } export interface MlStopDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty data_frame_analytics + * array when there are no matches and the subset of results when there are + * partial matches. If this parameter is false, the request returns a 404 + * status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** If true, the data frame analytics job is stopped forcefully. */ force?: boolean - /** Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. */ + /** Controls the amount of time to wait until the data frame analytics job + * stops. Defaults to 20 seconds. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } @@ -19491,7 +28354,9 @@ export interface MlStopDataFrameAnalyticsResponse { } export interface MlStopDatafeedRequest extends RequestBase { -/** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. */ + /** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated + * list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as + * the identifier. */ datafeed_id: Id /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean @@ -19510,11 +28375,15 @@ export interface MlStopDatafeedResponse { } export interface MlStopTrainedModelDeploymentRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id - /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; + * contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and + * there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean - /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you restart the model deployment. */ + /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you + * restart the model deployment. */ force?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } @@ -19527,15 +28396,25 @@ export interface MlStopTrainedModelDeploymentResponse { } export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id /** A description of the job. */ description?: string - /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string - /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer - /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } @@ -19559,43 +28438,80 @@ export interface MlUpdateDataFrameAnalyticsResponse { } export interface MlUpdateDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id - /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: + * + * * `all`: Match any data stream or index, including hidden ones. + * * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. + * * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. + * * `none`: Wildcard patterns are not accepted. + * * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean - /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only + * with low cardinality data. */ aggregations?: Record - /** Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. */ + /** Datafeeds might search over long time periods, for several months or years. This search is split into time + * chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of + * these time chunks are calculated; it is an advanced configuration option. */ chunking_config?: MlChunkingConfig - /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally + * search over indices that have already been read in an effort to determine whether any data has subsequently been + * added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and + * the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time + * datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig - /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is + * either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket + * span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are + * written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value + * must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. */ indices?: string[] - /** @alias indices */ - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: string[] /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions job_id?: Id - /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer - /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. */ + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also + * changed. Therefore, the time required to learn might be long and the understandability of the results is + * unpredictable. If you want to make significant changes to the source data, it is recommended that you + * clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one + * when you are satisfied with the results of the job. */ query?: QueryDslQueryContainer - /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ query_delay?: Duration /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields - /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record - /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. */ + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`. */ scroll_size?: integer /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } @@ -19622,7 +28538,7 @@ export interface MlUpdateDatafeedResponse { } export interface MlUpdateFilterRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id: Id /** The items to add to the filter. */ add_items?: string[] @@ -19643,27 +28559,61 @@ export interface MlUpdateFilterResponse { } export interface MlUpdateJobRequest extends RequestBase { -/** Identifier for the job. */ + /** Identifier for the job. */ job_id: Id - /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. */ + /** Advanced configuration option. Specifies whether this job can open when + * there is insufficient machine learning node capacity for it to be + * immediately assigned to a node. If `false` and a machine learning node + * with capacity to run the job cannot immediately be found, the open + * anomaly detection jobs API returns an error. However, this is also + * subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this + * option is set to `true`, the open anomaly detection jobs API does not + * return an error and the job waits in the opening state until sufficient + * machine learning node capacity is available. */ allow_lazy_open?: boolean analysis_limits?: MlAnalysisMemoryLimit - /** Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. */ + /** Advanced configuration option. The time between each periodic persistence + * of the model. + * The default value is a randomized value between 3 to 4 hours, which + * avoids all jobs persisting at exactly the same time. The smallest allowed + * value is 1 hour. + * For very large models (several GB), persistence could take 10-20 minutes, + * so do not set the value too low. + * If the job is open when you make the update, you must stop the datafeed, + * close the job, then reopen the job and restart the datafeed for the + * changes to take effect. */ background_persist_interval?: Duration - /** Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. */ + /** Advanced configuration option. Contains custom meta data about the job. + * For example, it can contain custom URL information as shown in Adding + * custom URLs to machine learning results. */ custom_settings?: Record categorization_filters?: string[] /** A description of the job. */ description?: string model_plot_config?: MlModelPlotConfig model_prune_window?: Duration - /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. */ + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies a period of time (in days) + * after which only the first snapshot per day is retained. This period is + * relative to the timestamp of the most recent snapshot for this job. Valid + * values range from 0 to `model_snapshot_retention_days`. For jobs created + * before version 7.8.0, the default value matches + * `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long - /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. */ + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies the maximum period of time (in + * days) that snapshots are retained. This period is relative to the + * timestamp of the most recent snapshot for this job. */ model_snapshot_retention_days?: long - /** Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. */ + /** Advanced configuration option. The period over which adjustments to the + * score are applied, as new data is seen. */ renormalization_window_days?: long - /** Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. */ + /** Advanced configuration option. The period of time (in days) that results + * are retained. Age is calculated relative to the timestamp of the latest + * bucket result. If this property has a non-null value, once per day at + * 00:30 (server time), results that are the specified number of days older + * than the latest bucket result are deleted from Elasticsearch. The default + * value is null, which means all results are retained. */ results_retention_days?: long /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] @@ -19702,13 +28652,15 @@ export interface MlUpdateJobResponse { } export interface MlUpdateModelSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Identifier for the model snapshot. */ snapshot_id: Id /** A description of the model snapshot. */ description?: string - /** If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. */ + /** If `true`, this snapshot will not be deleted during automatic cleanup of + * snapshots older than `model_snapshot_retention_days`. However, this + * snapshot will be deleted when the job is deleted. */ retain?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } @@ -19722,11 +28674,19 @@ export interface MlUpdateModelSnapshotResponse { } export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { -/** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id - /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer - /** Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. */ + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } @@ -19739,11 +28699,12 @@ export interface MlUpdateTrainedModelDeploymentResponse { } export interface MlUpgradeJobSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id - /** When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. */ + /** When true, the API won’t respond until the upgrade is complete. + * Otherwise, it responds as soon as the upgrade task is assigned to a node. */ wait_for_completion?: boolean /** Controls the time to wait for the request to complete. */ timeout?: Duration @@ -19754,7 +28715,9 @@ export interface MlUpgradeJobSnapshotRequest extends RequestBase { } export interface MlUpgradeJobSnapshotResponse { + /** The ID of the node that the upgrade task was started on if it is still running. In serverless this will be the "serverless". */ node: NodeId + /** When true, this means the task is complete. When false, it is still running. */ completed: boolean } @@ -19787,7 +28750,7 @@ export interface MlValidateDetectorRequest extends RequestBase { export type MlValidateDetectorResponse = AcknowledgedResponseBase export interface MonitoringBulkRequest extends RequestBase { -/** Default document type for items which don't provide one */ + /** Default document type for items which don't provide one */ type?: string /** Identifier of the monitored system */ system_id: string @@ -19804,66 +28767,110 @@ export interface MonitoringBulkRequest } export interface NodesCgroupMemory { + /** The `memory` control group to which the Elasticsearch process belongs. */ control_group?: string + /** The maximum amount of user memory (including file cache) allowed for all tasks in the same cgroup as the Elasticsearch process. + * This value can be too big to store in a `long`, so is returned as a string so that the value returned can exactly match what the underlying operating system interface returns. + * Any value that is too large to parse into a `long` almost certainly means no limit has been set for the cgroup. */ limit_in_bytes?: string + /** The total current memory usage by processes in the cgroup, in bytes, by all tasks in the same cgroup as the Elasticsearch process. + * This value is stored as a string for consistency with `limit_in_bytes`. */ usage_in_bytes?: string } export interface NodesClient { + /** Unique ID for the HTTP client. */ id?: long + /** Reported agent for the HTTP client. + * If unavailable, this property is not included in the response. */ agent?: string + /** Local address for the HTTP connection. */ local_address?: string + /** Remote address for the HTTP connection. */ remote_address?: string + /** The URI of the client’s most recent request. */ last_uri?: string + /** Time at which the client opened the connection. */ opened_time_millis?: long + /** Time at which the client closed the connection if the connection is closed. */ closed_time_millis?: long + /** Time of the most recent request from this client. */ last_request_time_millis?: long + /** Number of requests from this client. */ request_count?: long + /** Cumulative size in bytes of all requests from this client. */ request_size_bytes?: long + /** Value from the client’s `x-opaque-id` HTTP header. + * If unavailable, this property is not included in the response. */ x_opaque_id?: string } @@ -19872,26 +28879,48 @@ export interface NodesClusterAppliedStats { } export interface NodesClusterStateQueue { + /** Total number of cluster states in queue. */ total?: long + /** Number of pending cluster states in queue. */ pending?: long + /** Number of committed cluster states in queue. */ committed?: long } export interface NodesClusterStateUpdate { + /** The number of cluster state update attempts that did not change the cluster state since the node started. */ count: long + /** The cumulative amount of time spent computing no-op cluster state updates since the node started. */ computation_time?: Duration + /** The cumulative amount of time, in milliseconds, spent computing no-op cluster state updates since the node started. */ computation_time_millis?: DurationValue + /** The cumulative amount of time spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time?: Duration + /** The cumulative amount of time, in milliseconds, spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time_millis?: DurationValue + /** The cumulative amount of time spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time?: Duration + /** The cumulative amount of time, in milliseconds, spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time_millis?: DurationValue + /** The cumulative amount of time spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time?: Duration + /** The cumulative amount of time, in milliseconds, spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time_millis?: DurationValue + /** The cumulative amount of time spent notifying listeners of a no-op cluster state update since the node started. */ notification_time?: Duration + /** The cumulative amount of time, in milliseconds, spent notifying listeners of a no-op cluster state update since the node started. */ notification_time_millis?: DurationValue } @@ -19914,12 +28943,16 @@ export interface NodesCpu { } export interface NodesCpuAcct { + /** The `cpuacct` control group to which the Elasticsearch process belongs. */ control_group?: string + /** The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup as the Elasticsearch process. */ usage_nanos?: DurationValue } export interface NodesDataPathStats { + /** Total amount of disk space available to this Java virtual machine on this file store. */ available?: string + /** Total number of bytes available to this Java virtual machine on this file store. */ available_in_bytes?: long disk_queue?: string disk_reads?: long @@ -19928,58 +28961,98 @@ export interface NodesDataPathStats { disk_writes?: long disk_write_size?: string disk_write_size_in_bytes?: long + /** Total amount of unallocated disk space in the file store. */ free?: string + /** Total number of unallocated bytes in the file store. */ free_in_bytes?: long + /** Mount point of the file store (for example: `/dev/sda2`). */ mount?: string + /** Path to the file store. */ path?: string + /** Total size of the file store. */ total?: string + /** Total size of the file store in bytes. */ total_in_bytes?: long + /** Type of the file store (ex: ext4). */ type?: string } export interface NodesDiscovery { + /** Contains statistics for the cluster state queue of the node. */ cluster_state_queue?: NodesClusterStateQueue + /** Contains statistics for the published cluster states of the node. */ published_cluster_states?: NodesPublishedClusterStates + /** Contains low-level statistics about how long various activities took during cluster state updates while the node was the elected master. + * Omitted if the node is not master-eligible. + * Every field whose name ends in `_time` within this object is also represented as a raw number of milliseconds in a field whose name ends in `_time_millis`. + * The human-readable fields with a `_time` suffix are only returned if requested with the `?human=true` query parameter. */ cluster_state_update?: Record serialized_cluster_states?: NodesSerializedClusterState cluster_applier_stats?: NodesClusterAppliedStats } export interface NodesExtendedMemoryStats extends NodesMemoryStats { + /** Percentage of free memory. */ free_percent?: integer + /** Percentage of used memory. */ used_percent?: integer } export interface NodesFileSystem { + /** List of all file stores. */ data?: NodesDataPathStats[] + /** Last time the file stores statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long + /** Contains statistics for all file stores of the node. */ total?: NodesFileSystemTotal + /** Contains I/O statistics for the node. */ io_stats?: NodesIoStats } export interface NodesFileSystemTotal { + /** Total disk space available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available?: string + /** Total number of bytes available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free_in_bytes`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available_in_bytes?: long + /** Total unallocated disk space in all file stores. */ free?: string + /** Total number of unallocated bytes in all file stores. */ free_in_bytes?: long + /** Total size of all file stores. */ total?: string + /** Total size of all file stores in bytes. */ total_in_bytes?: long } export interface NodesGarbageCollector { + /** Contains statistics about JVM garbage collectors for the node. */ collectors?: Record } export interface NodesGarbageCollectorTotal { + /** Total number of JVM garbage collectors that collect objects. */ collection_count?: long + /** Total time spent by JVM collecting objects. */ collection_time?: string + /** Total time, in milliseconds, spent by JVM collecting objects. */ collection_time_in_millis?: long } export interface NodesHttp { + /** Current number of open HTTP connections for the node. */ current_open?: integer + /** Total number of HTTP connections opened for the node. */ total_opened?: long + /** Information on current and recently-closed HTTP client connections. + * Clients that have been closed longer than the `http.client_stats.closed_channels.max_age` setting will not be represented here. */ clients?: NodesClient[] + /** Detailed HTTP stats broken down by route + * @remarks This property is not supported on Elastic Cloud Serverless. */ routes: Record } @@ -20002,81 +29075,136 @@ export interface NodesHttpRouteResponses { } export interface NodesIndexingPressure { + /** Contains statistics for memory consumption from indexing load. */ memory?: NodesIndexingPressureMemory } export interface NodesIndexingPressureMemory { + /** Configured memory limit for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ limit?: ByteSize + /** Configured memory limit, in bytes, for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ limit_in_bytes?: long + /** Contains statistics for current indexing load. */ current?: NodesPressureMemory + /** Contains statistics for the cumulative indexing load since the node started. */ total?: NodesPressureMemory } export interface NodesIngest { + /** Contains statistics about ingest pipelines for the node. */ pipelines?: Record + /** Contains statistics about ingest operations for the node. */ total?: NodesIngestTotal } export interface NodesIngestStats { + /** Total number of documents ingested during the lifetime of this node. */ count: long + /** Total number of documents currently being ingested. */ current: long + /** Total number of failed ingest operations during the lifetime of this node. */ failed: long + /** Total number of ingest processors. */ processors: Record[] + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue + /** Total number of bytes of all documents ingested by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. */ ingested_as_first_pipeline_in_bytes: long + /** Total number of bytes of all documents produced by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. + * In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines have run. */ produced_as_first_pipeline_in_bytes: long } export interface NodesIngestTotal { + /** Total number of documents ingested during the lifetime of this node. */ count: long + /** Total number of documents currently being ingested. */ current: long + /** Total number of failed ingest operations during the lifetime of this node. */ failed: long + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue } export interface NodesIoStatDevice { + /** The Linux device name. */ device_name?: string + /** The total number of read and write operations for the device completed since starting Elasticsearch. */ operations?: long + /** The total number of kilobytes read for the device since starting Elasticsearch. */ read_kilobytes?: long + /** The total number of read operations for the device completed since starting Elasticsearch. */ read_operations?: long + /** The total number of kilobytes written for the device since starting Elasticsearch. */ write_kilobytes?: long + /** The total number of write operations for the device completed since starting Elasticsearch. */ write_operations?: long } export interface NodesIoStats { + /** Array of disk metrics for each device that is backing an Elasticsearch data path. + * These disk metrics are probed periodically and averages between the last probe and the current probe are computed. */ devices?: NodesIoStatDevice[] + /** The sum of the disk metrics for all devices that back an Elasticsearch data path. */ total?: NodesIoStatDevice } export interface NodesJvm { + /** Contains statistics about JVM buffer pools for the node. */ buffer_pools?: Record + /** Contains statistics about classes loaded by JVM for the node. */ classes?: NodesJvmClasses + /** Contains statistics about JVM garbage collectors for the node. */ gc?: NodesGarbageCollector + /** Contains JVM memory usage statistics for the node. */ mem?: NodesJvmMemoryStats + /** Contains statistics about JVM thread usage for the node. */ threads?: NodesJvmThreads + /** Last time JVM statistics were refreshed. */ timestamp?: long + /** Human-readable JVM uptime. + * Only returned if the `human` query parameter is `true`. */ uptime?: string + /** JVM uptime in milliseconds. */ uptime_in_millis?: long } export interface NodesJvmClasses { + /** Number of classes currently loaded by JVM. */ current_loaded_count?: long + /** Total number of classes loaded since the JVM started. */ total_loaded_count?: long + /** Total number of classes unloaded since the JVM started. */ total_unloaded_count?: long } export interface NodesJvmMemoryStats { + /** Memory, in bytes, currently in use by the heap. */ heap_used_in_bytes?: long + /** Percentage of memory currently in use by the heap. */ heap_used_percent?: long + /** Amount of memory, in bytes, available for use by the heap. */ heap_committed_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes?: long + /** Non-heap memory used, in bytes. */ non_heap_used_in_bytes?: long + /** Amount of non-heap memory available, in bytes. */ non_heap_committed_in_bytes?: long + /** Contains statistics about heap memory usage for the node. */ pools?: Record } export interface NodesJvmThreads { + /** Number of active threads in use by JVM. */ count?: long + /** Highest number of threads used by JVM. */ peak_count?: long } @@ -20086,6 +29214,8 @@ export interface NodesKeyedProcessor { } export interface NodesMemoryStats { + /** If the amount of physical memory has been overridden using the `es`.`total_memory_bytes` system property then this reports the overridden value in bytes. + * Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes?: long resident?: string resident_in_bytes?: long @@ -20093,16 +29223,24 @@ export interface NodesMemoryStats { share_in_bytes?: long total_virtual?: string total_virtual_in_bytes?: long + /** Total amount of physical memory in bytes. */ total_in_bytes?: long + /** Amount of free physical memory in bytes. */ free_in_bytes?: long + /** Amount of used physical memory in bytes. */ used_in_bytes?: long } export interface NodesNodeBufferPool { + /** Number of buffer pools. */ count?: long + /** Total capacity of buffer pools. */ total_capacity?: string + /** Total capacity of buffer pools in bytes. */ total_capacity_in_bytes?: long + /** Size of buffer pools. */ used?: string + /** Size of buffer pools in bytes. */ used_in_bytes?: long } @@ -20112,6 +29250,7 @@ export interface NodesNodeReloadResult { } export interface NodesNodesResponseBase { + /** Contains statistics about the number of nodes selected by the request’s node filters. */ _nodes?: NodeStatistics } @@ -20124,46 +29263,78 @@ export interface NodesOperatingSystem { } export interface NodesPool { + /** Memory, in bytes, used by the heap. */ used_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ max_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ peak_used_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ peak_max_in_bytes?: long } export interface NodesPressureMemory { + /** Memory consumed by indexing requests in the coordinating, primary, or replica stage. */ all?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating, primary, or replica stage. */ all_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating stage. */ coordinating?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating stage. */ coordinating_in_bytes?: long + /** Memory consumed by indexing requests in the primary stage. */ primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the primary stage. */ primary_in_bytes?: long + /** Memory consumed by indexing requests in the replica stage. */ replica?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the replica stage. */ replica_in_bytes?: long + /** Number of indexing requests rejected in the coordinating stage. */ coordinating_rejections?: long + /** Number of indexing requests rejected in the primary stage. */ primary_rejections?: long + /** Number of indexing requests rejected in the replica stage. */ replica_rejections?: long } export interface NodesProcess { + /** Contains CPU statistics for the node. */ cpu?: NodesCpu + /** Contains virtual memory statistics for the node. */ mem?: NodesMemoryStats + /** Number of opened file descriptors associated with the current or `-1` if not supported. */ open_file_descriptors?: integer + /** Maximum number of file descriptors allowed on the system, or `-1` if not supported. */ max_file_descriptors?: integer + /** Last time the statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long } export interface NodesProcessor { + /** Number of documents transformed by the processor. */ count?: long + /** Number of documents currently being transformed by the processor. */ current?: long + /** Number of failed operations for the processor. */ failed?: long + /** Time, in milliseconds, spent by the processor transforming documents. */ time_in_millis?: DurationValue } export interface NodesPublishedClusterStates { + /** Number of published cluster states. */ full_states?: long + /** Number of incompatible differences between published cluster states. */ incompatible_diffs?: long + /** Number of compatible differences between published cluster states. */ compatible_diffs?: long } @@ -20176,52 +29347,89 @@ export interface NodesRecording { export interface NodesRepositoryLocation { base_path: string + /** Container name (Azure) */ container?: string + /** Bucket name (GCP, S3) */ bucket?: string } export interface NodesRepositoryMeteringInformation { + /** Repository name. */ repository_name: Name + /** Repository type. */ repository_type: string + /** Represents an unique location within the repository. */ repository_location: NodesRepositoryLocation + /** An identifier that changes every time the repository is updated. */ repository_ephemeral_id: Id + /** Time the repository was created or updated. Recorded in milliseconds since the Unix Epoch. */ repository_started_at: EpochTime + /** Time the repository was deleted or updated. Recorded in milliseconds since the Unix Epoch. */ repository_stopped_at?: EpochTime + /** A flag that tells whether or not this object has been archived. When a repository is closed or updated the + * repository metering information is archived and kept for a certain period of time. This allows retrieving the + * repository metering information of previous repository instantiations. */ archived: boolean + /** The cluster state version when this object was archived, this field can be used as a logical timestamp to delete + * all the archived metrics up to an observed version. This field is only present for archived repository metering + * information objects. The main purpose of this field is to avoid possible race conditions during repository metering + * information deletions, i.e. deleting archived repositories metering information that we haven’t observed yet. */ cluster_version?: VersionNumber + /** An object with the number of request performed against the repository grouped by request type. */ request_counts: NodesRequestCounts } export interface NodesRequestCounts { + /** Number of Get Blob Properties requests (Azure) */ GetBlobProperties?: long + /** Number of Get Blob requests (Azure) */ GetBlob?: long + /** Number of List Blobs requests (Azure) */ ListBlobs?: long + /** Number of Put Blob requests (Azure) */ PutBlob?: long + /** Number of Put Block (Azure) */ PutBlock?: long + /** Number of Put Block List requests */ PutBlockList?: long + /** Number of get object requests (GCP, S3) */ GetObject?: long + /** Number of list objects requests (GCP, S3) */ ListObjects?: long + /** Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads + * can perform multiple http requests to insert a single object but they are considered as a single request + * since they are billed as an individual operation. (GCP) */ InsertObject?: long + /** Number of PutObject requests (S3) */ PutObject?: long + /** Number of Multipart requests, including CreateMultipartUpload, UploadPart and CompleteMultipartUpload requests (S3) */ PutMultipartObject?: long } export interface NodesScriptCache { + /** Total number of times the script cache has evicted old data. */ cache_evictions?: long + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long + /** Total number of inline script compilations performed by the node. */ compilations?: long context?: string } export interface NodesScripting { + /** Total number of times the script cache has evicted old data. */ cache_evictions?: long + /** Total number of inline script compilations performed by the node. */ compilations?: long + /** Contains this recent history of script compilations. */ compilations_history?: Record + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long contexts?: NodesContext[] } export interface NodesSerializedClusterState { + /** Number of published cluster states. */ full_states?: NodesSerializedClusterStateDetail diffs?: NodesSerializedClusterStateDetail } @@ -20241,36 +29449,63 @@ export interface NodesSizeHttpHistogram { } export interface NodesStats { + /** Statistics about adaptive replica selection. */ adaptive_selection?: Record + /** Statistics about the field data circuit breaker. */ breakers?: Record + /** File system information, data path, free disk space, read/write stats. */ fs?: NodesFileSystem + /** Network host for the node, based on the network host setting. */ host?: Host + /** HTTP connection information. */ http?: NodesHttp + /** Statistics about ingest preprocessing. */ ingest?: NodesIngest + /** IP address and port for the node. */ ip?: Ip | Ip[] + /** JVM stats, memory pool information, garbage collection, buffer pools, number of loaded/unloaded classes. */ jvm?: NodesJvm + /** Human-readable identifier for the node. + * Based on the node name setting. */ name?: Name + /** Operating system stats, load average, mem, swap. */ os?: NodesOperatingSystem + /** Process statistics, memory consumption, cpu usage, open file descriptors. */ process?: NodesProcess + /** Roles assigned to the node. */ roles?: NodeRoles + /** Contains script statistics for the node. */ script?: NodesScripting script_cache?: Record + /** Statistics about each thread pool, including current size, queue and rejected tasks. */ thread_pool?: Record timestamp?: long + /** Transport statistics about sent and received bytes in cluster communication. */ transport?: NodesTransport + /** Host and port for the transport layer, used for internal communication between nodes in a cluster. */ transport_address?: TransportAddress + /** Contains a list of attributes for the node. */ attributes?: Record + /** Contains node discovery statistics for the node. */ discovery?: NodesDiscovery + /** Contains indexing pressure statistics for the node. */ indexing_pressure?: NodesIndexingPressure + /** Indices stats about size, document count, indexing and deletion times, search times, field cache size, merges and flushes. */ indices?: IndicesStatsShardStats } export interface NodesThreadCount { + /** Number of active threads in the thread pool. */ active?: long + /** Number of tasks completed by the thread pool executor. */ completed?: long + /** Highest number of active threads in the thread pool. */ largest?: long + /** Number of tasks in queue for the thread pool. */ queue?: long + /** Number of tasks rejected by the thread pool executor. */ rejected?: long + /** Number of threads in the thread pool. */ threads?: long } @@ -20281,26 +29516,42 @@ export interface NodesTimeHttpHistogram { } export interface NodesTransport { + /** The distribution of the time spent handling each inbound message on a transport thread, represented as a histogram. */ inbound_handling_time_histogram?: NodesTransportHistogram[] + /** The distribution of the time spent sending each outbound transport message on a transport thread, represented as a histogram. */ outbound_handling_time_histogram?: NodesTransportHistogram[] + /** Total number of RX (receive) packets received by the node during internal cluster communication. */ rx_count?: long + /** Size of RX packets received by the node during internal cluster communication. */ rx_size?: string + /** Size, in bytes, of RX packets received by the node during internal cluster communication. */ rx_size_in_bytes?: long + /** Current number of inbound TCP connections used for internal communication between nodes. */ server_open?: integer + /** Total number of TX (transmit) packets sent by the node during internal cluster communication. */ tx_count?: long + /** Size of TX packets sent by the node during internal cluster communication. */ tx_size?: string + /** Size, in bytes, of TX packets sent by the node during internal cluster communication. */ tx_size_in_bytes?: long + /** The cumulative number of outbound transport connections that this node has opened since it started. + * Each transport connection may comprise multiple TCP connections but is only counted once in this statistic. + * Transport connections are typically long-lived so this statistic should remain constant in a stable cluster. */ total_outbound_connections?: long } export interface NodesTransportHistogram { + /** The number of times a transport thread took a period of time within the bounds of this bucket to handle an inbound message. */ count?: long + /** The exclusive upper bound of the bucket in milliseconds. + * May be omitted on the last bucket if this bucket has no upper bound. */ lt_millis?: long + /** The inclusive lower bound of the bucket in milliseconds. May be omitted on the first bucket if this bucket has no lower bound. */ ge_millis?: long } export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds /** Specifies the maximum `archive_version` to be cleared from the archive. */ max_archive_version: long @@ -20313,12 +29564,15 @@ export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBas export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record } export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). */ + /** Comma-separated list of node IDs or names used to limit returned information. + * All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). */ node_id: NodeIds /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never } @@ -20329,14 +29583,17 @@ export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record } export interface NodesHotThreadsRequest extends RequestBase { -/** List of node IDs or names used to limit returned information. */ + /** List of node IDs or names used to limit returned information. */ node_id?: NodeIds - /** If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. */ + /** If true, known idle threads (e.g. waiting in a socket select, or to get + * a task from an empty queue) are filtered out. */ ignore_idle_threads?: boolean /** The interval to do the second sampling of threads. */ interval?: Duration @@ -20344,7 +29601,8 @@ export interface NodesHotThreadsRequest extends RequestBase { snapshots?: long /** Specifies the number of hot threads to provide information for. */ threads?: long - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received + * before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** The type to sample. */ type?: ThreadType @@ -20366,12 +29624,16 @@ export interface NodesInfoDeprecationIndexing { export interface NodesInfoNodeInfo { attributes: Record build_flavor: string + /** Short hash of the last git commit in this release. */ build_hash: string build_type: string + /** The node’s host name. */ host: Host http?: NodesInfoNodeInfoHttp + /** The node’s IP address. */ ip: Ip jvm?: NodesInfoNodeJvmInfo + /** The node's name */ name: Name network?: NodesInfoNodeInfoNetwork os?: NodesInfoNodeOperatingSystemInfo @@ -20380,10 +29642,14 @@ export interface NodesInfoNodeInfo { roles: NodeRoles settings?: NodesInfoNodeInfoSettings thread_pool?: Record + /** Total heap allowed to be used to hold recently indexed documents before they must be written to disk. This size is a shared pool across all shards on this node, and is controlled by Indexing Buffer settings. */ total_indexing_buffer?: long + /** Same as total_indexing_buffer, but expressed in bytes. */ total_indexing_buffer_in_bytes?: ByteSize transport?: NodesInfoNodeInfoTransport + /** Host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress + /** Elasticsearch version running on this node. */ version: VersionString modules?: PluginStats[] ingest?: NodesInfoNodeInfoIngest @@ -20674,18 +29940,25 @@ export interface NodesInfoNodeJvmInfo { vm_vendor: string vm_version: VersionString using_bundled_jdk: boolean + /** @alias using_bundled_jdk */ bundled_jdk: boolean using_compressed_ordinary_object_pointers?: boolean | string input_arguments: string[] } export interface NodesInfoNodeOperatingSystemInfo { + /** Name of the JVM architecture (ex: amd64, x86) */ arch: string + /** Number of processors available to the Java virtual machine */ available_processors: integer + /** The number of processors actually used to calculate thread pool size. This number can be set with the node.processors setting of a node and defaults to the number of processors reported by the OS. */ allocated_processors?: integer + /** Name of the operating system (ex: Linux, Windows, Mac OS X) */ name: Name pretty_name: Name + /** Refresh interval for the OS statistics */ refresh_interval_in_millis: DurationValue + /** Version of the operating system */ version: VersionString cpu?: NodesInfoNodeInfoOSCPU mem?: NodesInfoNodeInfoMemory @@ -20693,8 +29966,11 @@ export interface NodesInfoNodeOperatingSystemInfo { } export interface NodesInfoNodeProcessInfo { + /** Process identifier (PID) */ id: long + /** Indicates if the process address space has been successfully locked in memory */ mlockall: boolean + /** Refresh interval for the process statistics */ refresh_interval_in_millis: DurationValue } @@ -20708,7 +29984,7 @@ export interface NodesInfoNodeThreadPoolInfo { } export interface NodesInfoRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds /** Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. */ metric?: Metrics @@ -20730,9 +30006,10 @@ export interface NodesInfoResponseBase extends NodesNodesResponseBase { } export interface NodesReloadSecureSettingsRequest extends RequestBase { -/** The names of particular nodes in the cluster to target. */ + /** The names of particular nodes in the cluster to target. */ node_id?: NodeIds - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** The password for the Elasticsearch keystore. */ secure_settings_password?: Password @@ -20750,7 +30027,7 @@ export interface NodesReloadSecureSettingsResponseBase extends NodesNodesRespons } export interface NodesStatsRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds /** Limit the information returned to the specified metrics */ metric?: Metrics @@ -20795,11 +30072,13 @@ export interface NodesUsageNodeUsage { } export interface NodesUsageRequest extends RequestBase { -/** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ + /** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ node_id?: NodeIds - /** Limits the information returned to the specific metrics. A comma-separated list of the following options: `_all`, `rest_actions`. */ + /** Limits the information returned to the specific metrics. + * A comma-separated list of the following options: `_all`, `rest_actions`. */ metric?: Metrics - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } @@ -20815,21 +30094,56 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { } export interface QueryRulesQueryRule { + /** A unique identifier for the rule. */ rule_id: Id + /** The type of rule. + * `pinned` will identify and pin specific documents to the top of search results. + * `exclude` will exclude specific documents from search results. */ type: QueryRulesQueryRuleType + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions priority?: integer } export interface QueryRulesQueryRuleActions { + /** The unique document IDs of the documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. */ ids?: Id[] + /** The documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. + * There is a maximum value of 100 documents in a rule. + * You can specify the following attributes for each document: + * + * * `_index`: The index of the document to pin. + * * `_id`: The unique document ID. */ docs?: QueryDslPinnedDoc[] } export interface QueryRulesQueryRuleCriteria { + /** The type of criteria. The following criteria types are supported: + * + * * `always`: Matches all queries, regardless of input. + * * `contains`: Matches that contain this value anywhere in the field meet the criteria defined by the rule. Only applicable for string values. + * * `exact`: Only exact matches meet the criteria defined by the rule. Applicable for string or numerical values. + * * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit Distance meet the criteria defined by the rule. Only applicable for string values. + * * `gt`: Matches with a value greater than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `gte`: Matches with a value greater than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lt`: Matches with a value less than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lte`: Matches with a value less than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `prefix`: Matches that start with this value meet the criteria defined by the rule. Only applicable for string values. + * * `suffix`: Matches that end with this value meet the criteria defined by the rule. Only applicable for string values. */ type: QueryRulesQueryRuleCriteriaType + /** The metadata field to match against. + * This metadata will be used to match against `match_criteria` sent in the rule. + * It is required for all criteria types except `always`. */ metadata?: string + /** The values to match against the `metadata` field. + * Only one value must match for the criteria to be met. + * It is required for all criteria types except `always`. */ values?: any[] } @@ -20838,12 +30152,14 @@ export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' export type QueryRulesQueryRuleType = 'pinned' | 'exclude' export interface QueryRulesQueryRuleset { + /** A unique identifier for the ruleset. */ ruleset_id: Id + /** Rules associated with the query ruleset. */ rules: QueryRulesQueryRule[] } export interface QueryRulesDeleteRuleRequest extends RequestBase { -/** The unique identifier of the query ruleset containing the rule to delete */ + /** The unique identifier of the query ruleset containing the rule to delete */ ruleset_id: Id /** The unique identifier of the query rule within the specified ruleset to delete */ rule_id: Id @@ -20856,7 +30172,7 @@ export interface QueryRulesDeleteRuleRequest extends RequestBase { export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase export interface QueryRulesDeleteRulesetRequest extends RequestBase { -/** The unique identifier of the query ruleset to delete */ + /** The unique identifier of the query ruleset to delete */ ruleset_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ruleset_id?: never } @@ -20867,7 +30183,7 @@ export interface QueryRulesDeleteRulesetRequest extends RequestBase { export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase export interface QueryRulesGetRuleRequest extends RequestBase { -/** The unique identifier of the query ruleset containing the rule to retrieve */ + /** The unique identifier of the query ruleset containing the rule to retrieve */ ruleset_id: Id /** The unique identifier of the query rule within the specified ruleset to retrieve */ rule_id: Id @@ -20880,7 +30196,7 @@ export interface QueryRulesGetRuleRequest extends RequestBase { export type QueryRulesGetRuleResponse = QueryRulesQueryRule export interface QueryRulesGetRulesetRequest extends RequestBase { -/** The unique identifier of the query ruleset */ + /** The unique identifier of the query ruleset */ ruleset_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ruleset_id?: never } @@ -20891,14 +30207,20 @@ export interface QueryRulesGetRulesetRequest extends RequestBase { export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset export interface QueryRulesListRulesetsQueryRulesetListItem { + /** A unique identifier for the ruleset. */ ruleset_id: Id + /** The number of rules associated with the ruleset. */ rule_total_count: integer + /** A map of criteria type (for example, `exact`) to the number of rules of that type. + * + * NOTE: The counts in `rule_criteria_types_counts` may be larger than the value of `rule_total_count` because a rule may have multiple criteria. */ rule_criteria_types_counts: Record + /** A map of rule type (for example, `pinned`) to the number of rules of that type. */ rule_type_counts: Record } export interface QueryRulesListRulesetsRequest extends RequestBase { -/** The offset from the first result to fetch. */ + /** The offset from the first result to fetch. */ from?: integer /** The maximum number of results to retrieve. */ size?: integer @@ -20914,15 +30236,17 @@ export interface QueryRulesListRulesetsResponse { } export interface QueryRulesPutRuleRequest extends RequestBase { -/** The unique identifier of the query ruleset containing the rule to be created or updated. */ + /** The unique identifier of the query ruleset containing the rule to be created or updated. */ ruleset_id: Id /** The unique identifier of the query rule within the specified ruleset to be created or updated. */ rule_id: Id /** The type of rule. */ type: QueryRulesQueryRuleType - /** The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] - /** The actions to take when the rule is matched. The format of this action depends on the rule type. */ + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions priority?: integer /** All values in `body` will be added to the request body. */ @@ -20936,7 +30260,7 @@ export interface QueryRulesPutRuleResponse { } export interface QueryRulesPutRulesetRequest extends RequestBase { -/** The unique identifier of the query ruleset to be created or updated. */ + /** The unique identifier of the query ruleset to be created or updated. */ ruleset_id: Id rules: QueryRulesQueryRule | QueryRulesQueryRule[] /** All values in `body` will be added to the request body. */ @@ -20950,14 +30274,17 @@ export interface QueryRulesPutRulesetResponse { } export interface QueryRulesTestQueryRulesetMatchedRule { + /** Ruleset unique identifier */ ruleset_id: Id + /** Rule unique identifier within that ruleset */ rule_id: Id } export interface QueryRulesTestRequest extends RequestBase { -/** The unique identifier of the query ruleset to be created or updated */ + /** The unique identifier of the query ruleset to be created or updated */ ruleset_id: Id - /** The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ + /** The match criteria to apply to rules in the given query ruleset. + * Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ match_criteria: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } @@ -20971,39 +30298,67 @@ export interface QueryRulesTestResponse { } export interface RollupDateHistogramGrouping { + /** How long to wait before rolling up new documents. + * By default, the indexer attempts to roll up all data that is available. + * However, it is not uncommon for data to arrive out of order. + * The indexer is unable to deal with data that arrives after a time-span has been rolled up. + * You need to specify a delay that matches the longest period of time you expect out-of-order data to arrive. */ delay?: Duration + /** The date field that is to be rolled up. */ field: Field format?: string interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ calendar_interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ fixed_interval?: Duration + /** Defines what `time_zone` the rollup documents are stored as. + * Unlike raw data, which can shift timezones on the fly, rolled documents have to be stored with a specific timezone. + * By default, rollup documents are stored in `UTC`. */ time_zone?: TimeZone } export interface RollupFieldMetric { + /** The field to collect metrics for. This must be a numeric of some kind. */ field: Field + /** An array of metrics to collect for the field. At least one metric must be configured. */ metrics: RollupMetric[] } export interface RollupGroupings { + /** A date histogram group aggregates a date field into time-based buckets. + * This group is mandatory; you currently cannot roll up documents without a timestamp and a `date_histogram` group. */ date_histogram?: RollupDateHistogramGrouping + /** The histogram group aggregates one or more numeric fields into numeric histogram intervals. */ histogram?: RollupHistogramGrouping + /** The terms group can be used on keyword or numeric fields to allow bucketing via the terms aggregation at a later point. + * The indexer enumerates and stores all values of a field for each time-period. + * This can be potentially costly for high-cardinality groups such as IP addresses, especially if the time-bucket is particularly sparse. */ terms?: RollupTermsGrouping } export interface RollupHistogramGrouping { + /** The set of fields that you wish to build histograms for. + * All fields specified must be some kind of numeric. + * Order does not matter. */ fields: Fields + /** The interval of histogram buckets to be generated when rolling up. + * For example, a value of `5` creates buckets that are five units wide (`0-5`, `5-10`, etc). + * Note that only one interval can be specified in the histogram group, meaning that all fields being grouped via the histogram must share the same interval. */ interval: long } export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' export interface RollupTermsGrouping { + /** The set of fields that you wish to collect terms for. + * This array can contain fields that are both keyword and numerics. + * Order does not matter. */ fields: Fields } export interface RollupDeleteJobRequest extends RequestBase { -/** Identifier for the job. */ + /** Identifier for the job. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -21019,7 +30374,8 @@ export interface RollupDeleteJobResponse { export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' export interface RollupGetJobsRequest extends RequestBase { -/** Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. */ + /** Identifier for the rollup job. + * If it is `_all` or omitted, the API returns all rollup jobs. */ id?: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -21032,8 +30388,13 @@ export interface RollupGetJobsResponse { } export interface RollupGetJobsRollupJob { + /** The rollup job configuration. */ config: RollupGetJobsRollupJobConfiguration + /** Transient statistics about the rollup job, such as how many documents have been processed and how many rollup summary docs have been indexed. + * These stats are not persisted. + * If a node is restarted, these stats are reset. */ stats: RollupGetJobsRollupJobStats + /** The current status of the indexer for the rollup job. */ status: RollupGetJobsRollupJobStatus } @@ -21070,7 +30431,8 @@ export interface RollupGetJobsRollupJobStatus { } export interface RollupGetRollupCapsRequest extends RequestBase { -/** Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. */ + /** Index, indices or index-pattern to return rollup capabilities for. + * `_all` may be used to fetch rollup capabilities from all jobs. */ id?: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -21081,6 +30443,7 @@ export interface RollupGetRollupCapsRequest extends RequestBase { export type RollupGetRollupCapsResponse = Record export interface RollupGetRollupCapsRollupCapabilities { + /** There can be multiple, independent jobs configured for a single index or index pattern. Each of these jobs may have different configurations, so the API returns a list of all the various configurations available. */ rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] } @@ -21102,7 +30465,8 @@ export interface RollupGetRollupIndexCapsIndexCapabilities { } export interface RollupGetRollupIndexCapsRequest extends RequestBase { -/** Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. */ + /** Data stream or index to check for rollup capabilities. + * Wildcard (`*`) expressions are supported. */ index: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -21126,17 +30490,34 @@ export interface RollupGetRollupIndexCapsRollupJobSummaryField { } export interface RollupPutJobRequest extends RequestBase { -/** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. */ + /** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the + * data that is associated with the rollup job. The ID is persistent; it is stored with the rolled + * up data. If you create a job, let it run for a while, then delete the job, the data that the job + * rolled up is still be associated with this job ID. You cannot create a new job with the same ID + * since that could lead to problems with mismatched job configurations. */ id: Id - /** A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. */ + /** A cron string which defines the intervals when the rollup job should be executed. When the interval + * triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated + * to the time interval of the data being rolled up. For example, you may wish to create hourly rollups + * of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The + * cron pattern is defined just like a Watcher cron schedule. */ cron: string - /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ + /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be + * available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of + * the groups configuration as defining a set of tools that can later be used in aggregations to partition the + * data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide + * enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ groups: RollupGroupings - /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. */ + /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to + * rollup the entire index or index-pattern. */ index_pattern: string - /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. */ + /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each + * group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined + * on a per-field basis and for each field you configure which metric should be collected. */ metrics?: RollupFieldMetric[] - /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ + /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends + * to execute faster, but requires more memory during processing. This value has no effect on how the data is + * rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ page_size: integer /** The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. */ rollup_index: IndexName @@ -21152,7 +30533,13 @@ export interface RollupPutJobRequest extends RequestBase { export type RollupPutJobResponse = AcknowledgedResponseBase export interface RollupRollupSearchRequest extends RequestBase { -/** A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ + /** A comma-separated list of data streams and indices used to limit the request. + * This parameter has the following rules: + * + * * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. + * * Multiple non-rollup indices may be specified. + * * Only one rollup index may be specified. If more than one are supplied, an exception occurs. + * * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ index: Indices /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean @@ -21160,8 +30547,8 @@ export interface RollupRollupSearchRequest extends RequestBase { typed_keys?: boolean /** Specifies aggregations. */ aggregations?: Record - /** @alias aggregations */ - /** Specifies aggregations. */ + /** Specifies aggregations. + * @alias aggregations */ aggs?: Record /** Specifies a DSL query that is subject to some limitations. */ query?: QueryDslQueryContainer @@ -21183,7 +30570,7 @@ export interface RollupRollupSearchResponse } export interface SearchApplicationSearchApplicationParameters { + /** Indices that are part of the Search Application. */ indices: IndexName[] + /** Analytics collection associated to the Search Application. */ analytics_collection_name?: Name + /** Search template to use on search operations. */ template?: SearchApplicationSearchApplicationTemplate } export interface SearchApplicationSearchApplicationTemplate { - script: Script | string + /** The associated mustache template. */ + script: Script | ScriptSource } export interface SearchApplicationDeleteRequest extends RequestBase { -/** The name of the search application to delete. */ + /** The name of the search application to delete. */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21249,7 +30647,7 @@ export interface SearchApplicationDeleteRequest extends RequestBase { export type SearchApplicationDeleteResponse = AcknowledgedResponseBase export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { -/** The name of the analytics collection to be deleted */ + /** The name of the analytics collection to be deleted */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21260,7 +30658,7 @@ export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends Reque export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase export interface SearchApplicationGetRequest extends RequestBase { -/** The name of the search application */ + /** The name of the search application */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21271,7 +30669,7 @@ export interface SearchApplicationGetRequest extends RequestBase { export type SearchApplicationGetResponse = SearchApplicationSearchApplication export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { -/** A list of analytics collections to limit the returned information */ + /** A list of analytics collections to limit the returned information */ name?: Name[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21282,7 +30680,7 @@ export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestB export type SearchApplicationGetBehavioralAnalyticsResponse = Record export interface SearchApplicationListRequest extends RequestBase { -/** Query in the Lucene query string syntax. */ + /** Query in the Lucene query string syntax. */ q?: string /** Starting offset. */ from?: integer @@ -21300,7 +30698,7 @@ export interface SearchApplicationListResponse { } export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { -/** The name of the behavioral analytics collection. */ + /** The name of the behavioral analytics collection. */ collection_name: Name /** The analytics event type. */ event_type: SearchApplicationEventType @@ -21319,7 +30717,7 @@ export interface SearchApplicationPostBehavioralAnalyticsEventResponse { } export interface SearchApplicationPutRequest extends RequestBase { -/** The name of the search application to be created or updated. */ + /** The name of the search application to be created or updated. */ name: Name /** If `true`, this request cannot replace or update existing Search Applications. */ create?: boolean @@ -21335,11 +30733,12 @@ export interface SearchApplicationPutResponse { } export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { + /** The name of the analytics collection created or updated */ name: Name } export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { -/** The name of the analytics collection to be created or updated. */ + /** The name of the analytics collection to be created or updated. */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21350,7 +30749,7 @@ export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestB export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase export interface SearchApplicationRenderQueryRequest extends RequestBase { -/** The name of the search application to render teh query for. */ + /** The name of the search application to render teh query for. */ name: Name params?: Record /** All values in `body` will be added to the request body. */ @@ -21363,7 +30762,7 @@ export interface SearchApplicationRenderQueryResponse { } export interface SearchApplicationSearchRequest extends RequestBase { -/** The name of the search application to be searched. */ + /** The name of the search application to be searched. */ name: Name /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean @@ -21384,7 +30783,7 @@ export interface SearchableSnapshotsCacheStatsNode { } export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { -/** The names of the nodes in the cluster to target. */ + /** The names of the nodes in the cluster to target. */ node_id?: NodeIds master_timeout?: Duration /** All values in `body` will be added to the request body. */ @@ -21409,7 +30808,8 @@ export interface SearchableSnapshotsCacheStatsShared { } export interface SearchableSnapshotsClearCacheRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases to clear from the cache. + * It supports wildcards (`*`). */ index?: Indices /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards @@ -21432,17 +30832,20 @@ export interface SearchableSnapshotsMountMountedSnapshot { } export interface SearchableSnapshotsMountRequest extends RequestBase { -/** The name of the repository containing the snapshot of the index to mount. */ + /** The name of the repository containing the snapshot of the index to mount. */ repository: Name /** The name of the snapshot of the index to mount. */ snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** If true, the request blocks until the operation is complete. */ wait_for_completion?: boolean /** The mount option for the searchable snapshot index. */ storage?: string - /** The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. */ + /** The name of the index contained in the snapshot whose data is to be mounted. + * If no `renamed_index` is specified, this name will also be used to create the new index. */ index: IndexName /** The name of the index that will be created. */ renamed_index?: IndexName @@ -21461,7 +30864,7 @@ export interface SearchableSnapshotsMountResponse { } export interface SearchableSnapshotsStatsRequest extends RequestBase { -/** A comma-separated list of data streams and indices to retrieve statistics for. */ + /** A comma-separated list of data streams and indices to retrieve statistics for. */ index?: Indices /** Return stats aggregated at cluster, index or shard level */ level?: SearchableSnapshotsStatsLevel @@ -21477,26 +30880,51 @@ export interface SearchableSnapshotsStatsResponse { } export interface SecurityAccess { + /** A list of indices permission entries for cross-cluster replication. */ replication?: SecurityReplicationAccess[] + /** A list of indices permission entries for cross-cluster search. */ search?: SecuritySearchAccess[] } export interface SecurityApiKey { + /** Id for the API key */ id: Id + /** Name of the API key. */ name: Name + /** The type of the API key (e.g. `rest` or `cross_cluster`). */ type: SecurityApiKeyType + /** Creation time for the API key in milliseconds. */ creation: EpochTime + /** Expiration time for the API key in milliseconds. */ expiration?: EpochTime + /** Invalidation status for the API key. + * If the key has been invalidated, it has a value of `true`. Otherwise, it is `false`. */ invalidated: boolean + /** If the key has been invalidated, invalidation time in milliseconds. */ invalidation?: EpochTime + /** Principal for which this API key was created */ username: Username + /** Realm name of the principal for which this API key was created. */ realm: string + /** Realm type of the principal for which this API key was created */ realm_type?: string + /** Metadata of the API key */ metadata: Metadata + /** The role descriptors assigned to this API key when it was created or last updated. + * An empty role descriptor means the API key inherits the owner user’s permissions. */ role_descriptors?: Record + /** The owner user’s permissions associated with the API key. + * It is a point-in-time snapshot captured at creation and subsequent updates. + * An API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions. */ limited_by?: Record[] + /** The access granted to cross-cluster API keys. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ access?: SecurityAccess + /** The profile uid for the API key owner principal, if requested and if it exists */ profile_uid?: string + /** Sorting values when using the `sort` parameter with the `security.query_api_keys` API. */ _sort?: SortResults } @@ -21507,13 +30935,18 @@ export interface SecurityApplicationGlobalUserPrivileges { } export interface SecurityApplicationPrivileges { + /** The name of the application to which this entry applies. */ application: string + /** A list of strings, where each element is the name of an application privilege or action. */ privileges: string[] + /** A list resources to which the privileges are applied. */ resources: string[] } export interface SecurityBulkError { + /** The number of errors */ count: integer + /** Details about the errors, keyed by role name */ details: Record } @@ -21541,10 +30974,16 @@ export type SecurityGrantType = 'password' | 'access_token' export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string export interface SecurityIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } @@ -21562,65 +31001,117 @@ export interface SecurityRealmInfo { export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' export interface SecurityRemoteClusterPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names + /** The cluster level privileges that owners of the role have on the remote cluster. */ privileges: SecurityRemoteClusterPrivilege[] } export interface SecurityRemoteIndicesPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } export interface SecurityRemoteUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ allow_restricted_indices: boolean clusters: string[] } export interface SecurityReplicationAccess { + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** This needs to be set to true if the patterns in the names field should cover system indices. */ allow_restricted_indices?: boolean } export interface SecurityRestriction { + /** A list of workflows to which the API key is restricted. + * NOTE: In order to use a role restriction, an API key must be created with a single role descriptor. */ workflows: SecurityRestrictionWorkflow[] } export type SecurityRestrictionWorkflow = 'search_application_query' | string export interface SecurityRoleDescriptor { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster?: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ index?: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of users that the API keys can impersonate. + * NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + * For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ run_as?: string[] + /** Optional description of the role descriptor */ description?: string + /** Restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction transient_metadata?: Record } export interface SecurityRoleDescriptorRead { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ indices: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ index: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of users that the API keys can impersonate. */ run_as?: string[] + /** An optional description of the role descriptor. */ description?: string + /** A restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction transient_metadata?: Record } @@ -21642,27 +31133,40 @@ export interface SecurityRoleMappingRule { export interface SecurityRoleTemplate { format?: SecurityTemplateFormat - template: Script | string + template: Script | ScriptSource } export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer export interface SecurityRoleTemplateQuery { + /** When you create a role, you can specify a query that defines the document level security permissions. You can optionally + * use Mustache templates in the role query to insert the username of the current authenticated user into the role. + * Like other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based + * templates and define custom parameters. You access the details for the current authenticated user through the _user parameter. */ template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery } export interface SecurityRoleTemplateScript { source?: SecurityRoleTemplateInlineQuery + /** The `id` for a stored script. */ id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record + /** Specifies the language the script is written in. */ lang?: ScriptLanguage options?: Record } export interface SecuritySearchAccess { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } @@ -21683,10 +31187,15 @@ export interface SecurityUser { } export interface SecurityUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ allow_restricted_indices: boolean } @@ -21720,13 +31229,20 @@ export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { } export interface SecurityActivateUserProfileRequest extends RequestBase { -/** The user's Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's Elasticsearch access token or JWT. + * Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ access_token?: string /** The type of grant. */ grant_type: SecurityGrantType - /** The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ password?: string - /** The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ username?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } @@ -21768,7 +31284,7 @@ export interface SecurityAuthenticateToken { } export interface SecurityBulkDeleteRoleRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** An array of role names to delete */ names: string[] @@ -21779,13 +31295,16 @@ export interface SecurityBulkDeleteRoleRequest extends RequestBase { } export interface SecurityBulkDeleteRoleResponse { + /** Array of deleted roles */ deleted?: string[] + /** Array of roles that could not be found */ not_found?: string[] + /** Present if any deletes resulted in errors */ errors?: SecurityBulkError } export interface SecurityBulkPutRoleRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** A dictionary of role name to RoleDescriptor objects to add or update */ roles: Record @@ -21796,20 +31315,34 @@ export interface SecurityBulkPutRoleRequest extends RequestBase { } export interface SecurityBulkPutRoleResponse { + /** Array of created roles */ created?: string[] + /** Array of updated roles */ updated?: string[] + /** Array of role names without any changes */ noop?: string[] + /** Present if any updates resulted in errors */ errors?: SecurityBulkError } export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { -/** Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. */ + /** Expiration time for the API keys. + * By default, API keys never expire. + * This property can be omitted to leave the value unchanged. */ expiration?: Duration /** The API key identifiers. */ ids: string | string[] - /** Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. */ + /** Arbitrary nested metadata to associate with the API keys. + * Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. + * Any information specified with this parameter fully replaces metadata previously associated with the API key. */ metadata?: Metadata - /** The role descriptors to assign to the API keys. An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. */ + /** The role descriptors to assign to the API keys. + * An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. + * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } @@ -21824,13 +31357,17 @@ export interface SecurityBulkUpdateApiKeysResponse { } export interface SecurityChangePasswordRequest extends RequestBase { -/** The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. */ + /** The user whose password you want to change. If you do not specify this + * parameter, the password is changed for the current user. */ username?: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** The new password value. Passwords must be at least 6 characters long. */ password?: Password - /** A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. */ + /** A hash of the new password value. This must be produced using the same + * hashing algorithm as has been configured for password storage. For more details, + * see the explanation of the `xpack.security.authc.password_hashing.algorithm` + * setting. */ password_hash?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } @@ -21842,7 +31379,9 @@ export interface SecurityChangePasswordResponse { } export interface SecurityClearApiKeyCacheRequest extends RequestBase { -/** Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. */ + /** Comma-separated list of API key IDs to evict from the API key cache. + * To evict all API keys, use `*`. + * Does not support other wildcard patterns. */ ids: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ids?: never } @@ -21857,7 +31396,9 @@ export interface SecurityClearApiKeyCacheResponse { } export interface SecurityClearCachedPrivilegesRequest extends RequestBase { -/** A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. */ + /** A comma-separated list of applications. + * To clear all applications, use an asterism (`*`). + * It does not support other wildcard patterns. */ application: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { application?: never } @@ -21872,9 +31413,12 @@ export interface SecurityClearCachedPrivilegesResponse { } export interface SecurityClearCachedRealmsRequest extends RequestBase { -/** A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. */ + /** A comma-separated list of realms. + * To clear all realms, use an asterisk (`*`). + * It does not support other wildcard patterns. */ realms: Names - /** A comma-separated list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. */ + /** A comma-separated list of the users to clear from the cache. + * If you do not specify this parameter, the API evicts all users from the user cache. */ usernames?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { realms?: never, usernames?: never } @@ -21889,7 +31433,9 @@ export interface SecurityClearCachedRealmsResponse { } export interface SecurityClearCachedRolesRequest extends RequestBase { -/** A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. */ + /** A comma-separated list of roles to evict from the role cache. + * To evict all roles, use an asterisk (`*`). + * It does not support other wildcard patterns. */ name: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21904,11 +31450,13 @@ export interface SecurityClearCachedRolesResponse { } export interface SecurityClearCachedServiceTokensRequest extends RequestBase { -/** The namespace, which is a top-level grouping of service accounts. */ + /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace /** The name of the service, which must be unique within its namespace. */ service: Service - /** A comma-separated list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. */ + /** A comma-separated list of token names to evict from the service account token caches. + * Use a wildcard (`*`) to evict all tokens that belong to a service account. + * It does not support other wildcard patterns. */ name: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never } @@ -21923,13 +31471,22 @@ export interface SecurityClearCachedServiceTokensResponse { } export interface SecurityCreateApiKeyRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh - /** The expiration time for the API key. By default, API keys never expire. */ + /** The expiration time for the API key. + * By default, API keys never expire. */ expiration?: Duration /** A name for the API key. */ name?: Name - /** An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ + /** An array of role descriptors for this API key. + * When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. + * The structure of role descriptor is the same as the request for the create role API. + * For more details, refer to the create or update roles API. + * + * NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. + * In this case, you must explicitly specify a role descriptor with no privileges. + * The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ role_descriptors?: Record /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata @@ -21940,19 +31497,34 @@ export interface SecurityCreateApiKeyRequest extends RequestBase { } export interface SecurityCreateApiKeyResponse { + /** Generated API key. */ api_key: string + /** Expiration in milliseconds for the API key. */ expiration?: long + /** Unique ID for this API key. */ id: Id + /** Specifies the name for this API key. */ name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ encoded: string } export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { -/** The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. NOTE: No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ + /** The access to be granted to this API key. + * The access is composed of permissions for cross-cluster search and cross-cluster replication. + * At least one of them must be specified. + * + * NOTE: No explicit privileges should be specified for either search or replication access. + * The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ access: SecurityAccess - /** Expiration time for the API key. By default, API keys never expire. */ + /** Expiration time for the API key. + * By default, API keys never expire. */ expiration?: Duration - /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata /** Specifies the name for this API key. */ name: Name @@ -21963,19 +31535,33 @@ export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { } export interface SecurityCreateCrossClusterApiKeyResponse { + /** Generated API key. */ api_key: string + /** Expiration in milliseconds for the API key. */ expiration?: DurationValue + /** Unique ID for this API key. */ id: Id + /** Specifies the name for this API key. */ name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ encoded: string } export interface SecurityCreateServiceTokenRequest extends RequestBase { -/** The name of the namespace, which is a top-level grouping of service accounts. */ + /** The name of the namespace, which is a top-level grouping of service accounts. */ namespace: Namespace /** The name of the service. */ service: Service - /** The name for the service account token. If omitted, a random name will be generated. Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ + /** The name for the service account token. + * If omitted, a random name will be generated. + * + * Token names must be at least one and no more than 256 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + * + * NOTE: Token names must be unique in the context of the associated service account. + * They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ name?: Name /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22016,7 +31602,11 @@ export interface SecurityDelegatePkiAuthenticationRealm { } export interface SecurityDelegatePkiRequest extends RequestBase { -/** The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ + /** The X509Certificate chain, which is represented as an ordered string array. + * Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + * + * The first element is the target certificate that contains the subject distinguished name that is requesting access. + * This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ x509_certificate_chain: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { x509_certificate_chain?: never } @@ -22025,8 +31615,11 @@ export interface SecurityDelegatePkiRequest extends RequestBase { } export interface SecurityDelegatePkiResponse { + /** An access token associated with the subject distinguished name of the client's certificate. */ access_token: string + /** The amount of time (in seconds) before the token expires. */ expires_in: long + /** The type of token. */ type: string authentication?: SecurityDelegatePkiAuthentication } @@ -22036,7 +31629,8 @@ export interface SecurityDeletePrivilegesFoundStatus { } export interface SecurityDeletePrivilegesRequest extends RequestBase { -/** The name of the application. Application privileges are always associated with exactly one application. */ + /** The name of the application. + * Application privileges are always associated with exactly one application. */ application: Name /** The name of the privilege. */ name: Names @@ -22051,7 +31645,7 @@ export interface SecurityDeletePrivilegesRequest extends RequestBase { export type SecurityDeletePrivilegesResponse = Record> export interface SecurityDeleteRoleRequest extends RequestBase { -/** The name of the role. */ + /** The name of the role. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22062,11 +31656,14 @@ export interface SecurityDeleteRoleRequest extends RequestBase { } export interface SecurityDeleteRoleResponse { + /** If the role is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ found: boolean } export interface SecurityDeleteRoleMappingRequest extends RequestBase { -/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22077,11 +31674,13 @@ export interface SecurityDeleteRoleMappingRequest extends RequestBase { } export interface SecurityDeleteRoleMappingResponse { + /** If the mapping is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ found: boolean } export interface SecurityDeleteServiceTokenRequest extends RequestBase { -/** The namespace, which is a top-level grouping of service accounts. */ + /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace /** The service name. */ service: Service @@ -22096,11 +31695,13 @@ export interface SecurityDeleteServiceTokenRequest extends RequestBase { } export interface SecurityDeleteServiceTokenResponse { + /** If the service account token is successfully deleted, the request returns `{"found": true}`. + * Otherwise, the response will have status code 404 and `found` is set to `false`. */ found: boolean } export interface SecurityDeleteUserRequest extends RequestBase { -/** An identifier for the user. */ + /** An identifier for the user. */ username: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22111,11 +31712,13 @@ export interface SecurityDeleteUserRequest extends RequestBase { } export interface SecurityDeleteUserResponse { + /** If the user is successfully deleted, the request returns `{"found": true}`. + * Otherwise, `found` is set to `false`. */ found: boolean } export interface SecurityDisableUserRequest extends RequestBase { -/** An identifier for the user. */ + /** An identifier for the user. */ username: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22129,9 +31732,11 @@ export interface SecurityDisableUserResponse { } export interface SecurityDisableUserProfileRequest extends RequestBase { -/** Unique identifier for the user profile. */ + /** Unique identifier for the user profile. */ uid: SecurityUserProfileId - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ refresh?: Refresh /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, refresh?: never } @@ -22142,7 +31747,7 @@ export interface SecurityDisableUserProfileRequest extends RequestBase { export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase export interface SecurityEnableUserRequest extends RequestBase { -/** An identifier for the user. */ + /** An identifier for the user. */ username: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22156,9 +31761,12 @@ export interface SecurityEnableUserResponse { } export interface SecurityEnableUserProfileRequest extends RequestBase { -/** A unique identifier for the user profile. */ + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ refresh?: Refresh /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, refresh?: never } @@ -22177,11 +31785,16 @@ export interface SecurityEnrollKibanaRequest extends RequestBase { export interface SecurityEnrollKibanaResponse { token: SecurityEnrollKibanaToken + /** The CA certificate used to sign the node certificates that Elasticsearch uses for TLS on the HTTP layer. + * The certificate is returned as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca: string } export interface SecurityEnrollKibanaToken { + /** The name of the bearer token for the `elastic/kibana` service account. */ name: string + /** The value of the bearer token for the `elastic/kibana` service account. + * Use this value to authenticate the service account with Elasticsearch. */ value: string } @@ -22193,26 +31806,41 @@ export interface SecurityEnrollNodeRequest extends RequestBase { } export interface SecurityEnrollNodeResponse { + /** The CA private key that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ http_ca_key: string + /** The CA certificate that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca_cert: string + /** The CA certificate that is used to sign the TLS certificate for the transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_ca_cert: string + /** The private key that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ transport_key: string + /** The certificate that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_cert: string + /** A list of transport addresses in the form of `host:port` for the nodes that are already members of the cluster. */ nodes_addresses: string[] } export interface SecurityGetApiKeyRequest extends RequestBase { -/** An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. */ + /** An API key id. + * This parameter cannot be used with any of `name`, `realm_name` or `username`. */ id?: Id - /** An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. */ + /** An API key name. + * This parameter cannot be used with any of `id`, `realm_name` or `username`. + * It supports prefix search with wildcard. */ name?: Name - /** A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ + /** A boolean flag that can be used to query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ owner?: boolean - /** The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ + /** The name of an authentication realm. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ realm_name?: Name - /** The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ + /** The username of a user. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ username?: Username - /** Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. */ + /** Return the snapshot of the owner user's role descriptors + * associated with the API key. An API key's actual + * permission is the intersection of its assigned role + * descriptors and the owner user's role descriptors. */ with_limited_by?: boolean /** A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. */ active_only?: boolean @@ -22236,15 +31864,22 @@ export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { } export interface SecurityGetBuiltinPrivilegesResponse { + /** The list of cluster privileges that are understood by this version of Elasticsearch. */ cluster: SecurityClusterPrivilege[] + /** The list of index privileges that are understood by this version of Elasticsearch. */ index: IndexName[] + /** The list of remote_cluster privileges that are understood by this version of Elasticsearch. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster: SecurityRemoteClusterPrivilege[] } export interface SecurityGetPrivilegesRequest extends RequestBase { -/** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ + /** The name of the application. + * Application privileges are always associated with exactly one application. + * If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name - /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ + /** The name of the privilege. + * If you do not specify this parameter, the API returns information about all privileges for the requested application. */ name?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { application?: never, name?: never } @@ -22255,7 +31890,9 @@ export interface SecurityGetPrivilegesRequest extends RequestBase { export type SecurityGetPrivilegesResponse = Record> export interface SecurityGetRoleRequest extends RequestBase { -/** The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about all roles. */ + /** The name of the role. + * You can specify multiple roles as a comma-separated list. + * If you do not specify this parameter, the API returns information about all roles. */ name?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -22268,7 +31905,9 @@ export type SecurityGetRoleResponse = Record export interface SecurityGetRoleRole { cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata description?: string @@ -22280,7 +31919,7 @@ export interface SecurityGetRoleRole { } export interface SecurityGetRoleMappingRequest extends RequestBase { -/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ + /** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ name?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -22291,9 +31930,12 @@ export interface SecurityGetRoleMappingRequest extends RequestBase { export type SecurityGetRoleMappingResponse = Record export interface SecurityGetServiceAccountsRequest extends RequestBase { -/** The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. */ + /** The name of the namespace. + * Omit this parameter to retrieve information about all service accounts. + * If you omit this parameter, you must also omit the `service` parameter. */ namespace?: Namespace - /** The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ + /** The service name. + * Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ service?: Service /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { namespace?: never, service?: never } @@ -22308,7 +31950,9 @@ export interface SecurityGetServiceAccountsRoleDescriptorWrapper { } export interface SecurityGetServiceCredentialsNodesCredentials { + /** General status showing how nodes respond to the above collection request */ _nodes: NodeStatistics + /** File-backed tokens collected from all nodes */ file_tokens: Record } @@ -22317,7 +31961,7 @@ export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { } export interface SecurityGetServiceCredentialsRequest extends RequestBase { -/** The name of the namespace. */ + /** The name of the namespace. */ namespace: Namespace /** The service name. */ service: Name @@ -22331,11 +31975,13 @@ export interface SecurityGetServiceCredentialsResponse { service_account: string count: integer tokens: Record + /** Service account credentials collected from all nodes of the cluster. */ nodes_credentials: SecurityGetServiceCredentialsNodesCredentials } export interface SecurityGetSettingsRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -22344,8 +31990,11 @@ export interface SecurityGetSettingsRequest extends RequestBase { } export interface SecurityGetSettingsResponse { + /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security: SecuritySecuritySettings + /** Settings for the index used to store profile information. */ 'security-profile': SecuritySecuritySettings + /** Settings for the index used to store tokens. */ 'security-tokens': SecuritySecuritySettings } @@ -22364,17 +32013,27 @@ export interface SecurityGetTokenAuthenticationProvider { } export interface SecurityGetTokenRequest extends RequestBase { -/** The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ + /** The type of grant. + * Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ grant_type?: SecurityGetTokenAccessTokenGrantType - /** The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ + /** The scope of the token. + * Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ scope?: string - /** The user's password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ password?: Password - /** The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The base64 encoded kerberos ticket. + * If you specify the `_kerberos` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ kerberos_ticket?: string - /** The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The string that was returned when you created the token, which enables you to extend its life. + * If you specify the `refresh_token` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ refresh_token?: string - /** The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ username?: Username /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } @@ -22398,7 +32057,7 @@ export interface SecurityGetTokenUserRealm { } export interface SecurityGetUserRequest extends RequestBase { -/** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ + /** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ username?: Username | Username[] /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean @@ -22411,7 +32070,7 @@ export interface SecurityGetUserRequest extends RequestBase { export type SecurityGetUserResponse = Record export interface SecurityGetUserPrivilegesRequest extends RequestBase { -/** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ + /** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ priviledge?: Name @@ -22438,9 +32097,12 @@ export interface SecurityGetUserProfileGetUserProfileErrors { } export interface SecurityGetUserProfileRequest extends RequestBase { -/** A unique identifier for the user profile. */ + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId | SecurityUserProfileId[] - /** A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. */ + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content use `data=` to retrieve content nested under the specified ``. + * By default returns no `data` content. */ data?: string | string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, data?: never } @@ -22449,6 +32111,9 @@ export interface SecurityGetUserProfileRequest extends RequestBase { } export interface SecurityGetUserProfileResponse { + /** A successful call returns the JSON representation of the user profile and its internal versioning numbers. + * The API returns an empty object if no profile document is found for the provided `uid`. + * The content of the data field is not returned by default to avoid deserializing a potential large payload. */ profiles: SecurityUserProfileWithMetadata[] errors?: SecurityGetUserProfileGetUserProfileErrors } @@ -22457,21 +32122,34 @@ export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { name: Name + /** Expiration time for the API key. By default, API keys never expire. */ expiration?: DurationLarge + /** The role descriptors for this API key. + * When it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access token. */ role_descriptors?: Record | Record[] + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the `metadata` object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata } export interface SecurityGrantApiKeyRequest extends RequestBase { -/** The API key. */ + /** The API key. */ api_key: SecurityGrantApiKeyGrantApiKey /** The type of grant. Supported grant types are: `access_token`, `password`. */ grant_type: SecurityGrantApiKeyApiKeyGrantType - /** The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's access token. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ access_token?: string - /** The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user name that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ username?: Username - /** The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ password?: Password /** The name of the user to be impersonated. */ run_as?: Username @@ -22490,23 +32168,32 @@ export interface SecurityGrantApiKeyResponse { } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { + /** The name of the application. */ application: string + /** A list of the privileges that you want to check for the specified resources. + * It may be either application privilege names or the names of actions that are granted by those privileges */ privileges: string[] + /** A list of resource names against which the privileges should be checked. */ resources: string[] } export type SecurityHasPrivilegesApplicationsPrivileges = Record export interface SecurityHasPrivilegesIndexPrivilegesCheck { + /** A list of indices. */ names: Indices + /** A list of the privileges that you want to check for the specified indices. */ privileges: SecurityIndexPrivilege[] + /** This needs to be set to `true` (default is `false`) if using wildcards or regexps for patterns that cover restricted indices. + * Implicitly, restricted indices do not match index patterns because restricted indices usually have limited privileges and including them in pattern tests would render most such tests false. + * If restricted indices are explicitly included in the names list, privileges will be checked against them regardless of the value of `allow_restricted_indices`. */ allow_restricted_indices?: boolean } export type SecurityHasPrivilegesPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { -/** Username */ + /** Username */ user?: Name application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] /** A list of the cluster privileges that you want to check. */ @@ -22535,12 +32222,13 @@ export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + /** A list of the cluster privileges that you want to check. */ cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] } export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { -/** A list of profile IDs. The privileges are checked for associated users of the profiles. */ + /** A list of profile IDs. The privileges are checked for associated users of the profiles. */ uids: SecurityUserProfileId[] /** An object containing all the privileges to be checked. */ privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck @@ -22551,21 +32239,34 @@ export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { } export interface SecurityHasPrivilegesUserProfileResponse { + /** The subset of the requested profile IDs of the users that + * have all the requested privileges. */ has_privilege_uids: SecurityUserProfileId[] + /** The subset of the requested profile IDs for which an error + * was encountered. It does not include the missing profile IDs + * or the profile IDs of the users that do not have all the + * requested privileges. This field is absent if empty. */ errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors } export interface SecurityInvalidateApiKeyRequest extends RequestBase { id?: Id - /** A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ + /** A list of API key ids. + * This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ ids?: Id[] - /** An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ + /** An API key name. + * This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ name?: Name - /** Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ + /** Query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + * + * NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ owner?: boolean - /** The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ + /** The name of an authentication realm. + * This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ realm_name?: string - /** The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ + /** The username of a user. + * This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ username?: Username /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } @@ -22574,20 +32275,29 @@ export interface SecurityInvalidateApiKeyRequest extends RequestBase { } export interface SecurityInvalidateApiKeyResponse { + /** The number of errors that were encountered when invalidating the API keys. */ error_count: integer + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[] + /** The IDs of the API keys that were invalidated as part of this request. */ invalidated_api_keys: string[] + /** The IDs of the API keys that were already invalidated. */ previously_invalidated_api_keys: string[] } export interface SecurityInvalidateTokenRequest extends RequestBase { -/** An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ + /** An access token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ token?: string - /** A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ + /** A refresh token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ refresh_token?: string - /** The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. */ + /** The name of an authentication realm. + * This parameter cannot be used with either `refresh_token` or `token`. */ realm_name?: Name - /** The username of a user. This parameter cannot be used with either `refresh_token` or `token`. */ + /** The username of a user. + * This parameter cannot be used with either `refresh_token` or `token`. */ username?: Username /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } @@ -22596,20 +32306,29 @@ export interface SecurityInvalidateTokenRequest extends RequestBase { } export interface SecurityInvalidateTokenResponse { + /** The number of errors that were encountered when invalidating the tokens. */ error_count: long + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[] + /** The number of the tokens that were invalidated as part of this request. */ invalidated_tokens: long + /** The number of tokens that were already invalidated. */ previously_invalidated_tokens: long } export interface SecurityOidcAuthenticateRequest extends RequestBase { -/** Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ + /** Associate a client session with an ID token and mitigate replay attacks. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ nonce: string - /** The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. */ + /** The name of the OpenID Connect realm. + * This property is useful in cases where multiple realms are defined. */ realm?: string - /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ + /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. + * This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ redirect_uri: string - /** Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ + /** Maintain state between the authentication request and the response. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ state: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } @@ -22618,14 +32337,18 @@ export interface SecurityOidcAuthenticateRequest extends RequestBase { } export interface SecurityOidcAuthenticateResponse { + /** The Elasticsearch access token. */ access_token: string + /** The duration (in seconds) of the tokens. */ expires_in: integer + /** The Elasticsearch refresh token. */ refresh_token: string + /** The type of token. */ type: string } export interface SecurityOidcLogoutRequest extends RequestBase { -/** The access token to be invalidated. */ + /** The access token to be invalidated. */ token: string /** The refresh token to be invalidated. */ refresh_token?: string @@ -22636,19 +32359,27 @@ export interface SecurityOidcLogoutRequest extends RequestBase { } export interface SecurityOidcLogoutResponse { + /** A URI that points to the end session endpoint of the OpenID Connect Provider with all the parameters of the logout request as HTTP GET parameters. */ redirect: string } export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { -/** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when *realm* is specified. One of *realm* or *iss* is required. */ + /** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. + * It cannot be specified when *realm* is specified. + * One of *realm* or *iss* is required. */ iss?: string - /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. This parameter is not valid when *realm* is specified. */ + /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. + * This parameter is not valid when *realm* is specified. */ login_hint?: string - /** The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ + /** The value used to associate a client session with an ID token and to mitigate replay attacks. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ nonce?: string - /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when *iss* is specified. One of *realm* or *iss* is required. */ + /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. + * It cannot be specified when *iss* is specified. + * One of *realm* or *iss* is required. */ realm?: string - /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ + /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ state?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } @@ -22659,6 +32390,7 @@ export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { export interface SecurityOidcPrepareAuthenticationResponse { nonce: string realm: string + /** A URI that points to the authorization endpoint of the OpenID Connect Provider with all the parameters of the authentication request as HTTP GET parameters. */ redirect: string state: string } @@ -22671,7 +32403,7 @@ export interface SecurityPutPrivilegesActions { } export interface SecurityPutPrivilegesRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh privileges?: Record> /** All values in `body` will be added to the request body. */ @@ -22683,7 +32415,7 @@ export interface SecurityPutPrivilegesRequest extends RequestBase { export type SecurityPutPrivilegesResponse = Record> export interface SecurityPutRoleRequest extends RequestBase { -/** The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. */ + /** The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22691,13 +32423,19 @@ export interface SecurityPutRoleRequest extends RequestBase { applications?: SecurityApplicationPrivileges[] /** A list of cluster privileges. These privileges define the cluster-level actions for users with this role. */ cluster?: SecurityClusterPrivilege[] - /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. */ + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: Record /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[] - /** A list of remote indices permissions entries. NOTE: Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. */ + /** A list of remote indices permissions entries. + * + * NOTE: Remote indices are effective for remote clusters configured with the API key based model. + * They have no effect for remote clusters configured with the certificate based model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] - /** A list of remote cluster permissions entries. */ + /** A list of remote cluster permissions entries. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] /** Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. */ metadata?: Metadata @@ -22714,23 +32452,29 @@ export interface SecurityPutRoleRequest extends RequestBase { } export interface SecurityPutRoleResponse { + /** When an existing role is updated, `created` is set to `false`. */ role: SecurityCreatedStatus } export interface SecurityPutRoleMappingRequest extends RequestBase { -/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** Mappings that have `enabled` set to `false` are ignored when role mapping is performed. */ enabled?: boolean - /** Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. */ + /** Additional metadata that helps define which roles are assigned to each user. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata - /** A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. */ + /** A list of role names that are granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ roles?: string[] - /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. */ + /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ role_templates?: SecurityRoleTemplate[] - /** The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. */ + /** The rules that determine which users should be matched by the mapping. + * A rule is a logical condition that is expressed by using a JSON DSL. */ rules?: SecurityRoleMappingRule run_as?: string[] /** All values in `body` will be added to the request body. */ @@ -22745,9 +32489,14 @@ export interface SecurityPutRoleMappingResponse { } export interface SecurityPutUserRequest extends RequestBase { -/** An identifier for the user. NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. */ + /** An identifier for the user. + * + * NOTE: Usernames must be at least 1 and no more than 507 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. + * Leading or trailing whitespace is not allowed. */ username: Username - /** Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. */ + /** Valid values are `true`, `false`, and `wait_for`. + * These values have the same meaning as in the index API, but the default value for this API is true. */ refresh?: Refresh /** The email of the user. */ email?: string | null @@ -22755,11 +32504,20 @@ export interface SecurityPutUserRequest extends RequestBase { full_name?: string | null /** Arbitrary metadata that you want to associate with the user. */ metadata?: Metadata - /** The user's password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ + /** The user's password. + * Passwords must be at least 6 characters long. + * When adding a user, one of `password` or `password_hash` is required. + * When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ password?: Password - /** A hash of the user's password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ + /** A hash of the user's password. + * This must be produced using the same hashing algorithm as has been configured for password storage. + * For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. + * Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. + * The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ password_hash?: string - /** A set of roles the user has. The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`). */ + /** A set of roles the user has. + * The roles determine the user's access permissions. + * To create a user without any roles, specify an empty list (`[]`). */ roles?: string[] /** Specifies whether the user is enabled. */ enabled?: boolean @@ -22770,66 +32528,131 @@ export interface SecurityPutUserRequest extends RequestBase { } export interface SecurityPutUserResponse { + /** A successful call returns a JSON structure that shows whether the user has been created or updated. + * When an existing user is updated, `created` is set to `false`. */ created: boolean } export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate export interface SecurityQueryApiKeysApiKeyAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ aggs?: Record meta?: Metadata + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: SecurityQueryApiKeysApiKeyQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: SecurityQueryApiKeysApiKeyFiltersAggregation missing?: AggregationsMissingAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation } export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean + /** The key with which the other bucket is returned. */ other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean } export interface SecurityQueryApiKeysApiKeyQueryContainer { + /** Matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns documents that contain terms within a provided range. */ range?: Partial> + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial> } export interface SecurityQueryApiKeysRequest extends RequestBase { -/** Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ + /** Return the snapshot of the owner user's role descriptors associated with the API key. + * An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). + * An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ with_limited_by?: boolean - /** Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ + /** Determines whether to also retrieve the profile UID for the API key owner principal. + * If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ with_profile_uid?: boolean /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean - /** Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. */ + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. */ aggregations?: Record - /** @alias aggregations */ - /** Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. */ + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. + * @alias aggregations */ aggs?: Record - /** A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. NOTE: The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. */ + /** A query to filter which API keys to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following public information associated with an API key: `id`, `type`, `name`, + * `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + * + * NOTE: The queryable string values associated with API keys are internally mapped as keywords. + * Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. + * Such a match query is hence equivalent to a `term` query. */ query?: SecurityQueryApiKeysApiKeyQueryContainer - /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer - /** The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. */ + /** The sort definition. + * Other than `id`, all public fields of an API key are eligible for sorting. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort - /** The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The number of hits to return. + * It must not be negative. + * The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer /** The search after definition. */ search_after?: SortResults @@ -22840,25 +32663,43 @@ export interface SecurityQueryApiKeysRequest extends RequestBase { } export interface SecurityQueryApiKeysResponse { + /** The total number of API keys found. */ total: integer + /** The number of API keys returned in the response. */ count: integer + /** A list of API key information. */ api_keys: SecurityApiKey[] + /** The aggregations result, if requested. */ aggregations?: Record } export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { _sort?: SortResults + /** Name of the role. */ name: string } export interface SecurityQueryRoleRequest extends RequestBase { -/** A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. */ + /** A query to filter which roles to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with roles: `name`, `description`, `metadata`, + * `applications.application`, `applications.privileges`, and `applications.resources`. */ query?: SecurityQueryRoleRoleQueryContainer - /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer - /** The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. */ + /** The sort definition. + * You can sort on `username`, `roles`, or `enabled`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort - /** The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer /** The search after definition. */ search_after?: SortResults @@ -22869,22 +32710,45 @@ export interface SecurityQueryRoleRequest extends RequestBase { } export interface SecurityQueryRoleResponse { + /** The total number of roles found. */ total: integer + /** The number of roles returned in the response. */ count: integer + /** A list of roles that match the query. + * The returned role format is an extension of the role definition format. + * It adds the `transient_metadata.enabled` and the `_sort` fields. + * `transient_metadata.enabled` is set to `false` in case the role is automatically disabled, for example when the role grants privileges that are not allowed by the installed license. + * `_sort` is present when the search query sorts on some field. + * It contains the array of values that have been used for sorting. */ roles: SecurityQueryRoleQueryRole[] } export interface SecurityQueryRoleRoleQueryContainer { + /** matches roles matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns roles that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns roles based on their IDs. + * This query uses role document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns roles that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all roles, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns roles that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns roles that contain terms within a provided range. */ range?: Partial> + /** Returns roles based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns roles that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns roles that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns roles that contain terms matching a wildcard pattern. */ wildcard?: Partial> } @@ -22893,15 +32757,27 @@ export interface SecurityQueryUserQueryUser extends SecurityUser { } export interface SecurityQueryUserRequest extends RequestBase { -/** Determines whether to retrieve the user profile UID, if it exists, for the users. */ + /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean - /** A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ + /** A query to filter which users to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ query?: SecurityQueryUserUserQueryContainer - /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer - /** The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. */ + /** The sort definition. + * Fields eligible for sorting are: `username`, `roles`, `enabled`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort - /** The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer /** The search after definition */ search_after?: SortResults @@ -22912,27 +32788,45 @@ export interface SecurityQueryUserRequest extends RequestBase { } export interface SecurityQueryUserResponse { + /** The total number of users found. */ total: integer + /** The number of users returned in the response. */ count: integer + /** A list of users that match the query. */ users: SecurityQueryUserQueryUser[] } export interface SecurityQueryUserUserQueryContainer { + /** Returns users based on their IDs. + * This query uses the user document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** matches users matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns users that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns users that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all users, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns users that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns users that contain terms within a provided range. */ range?: Partial> + /** Returns users based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns users that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns users that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns users that contain terms matching a wildcard pattern. */ wildcard?: Partial> } export interface SecuritySamlAuthenticateRequest extends RequestBase { -/** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ + /** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ content: string /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids @@ -22945,15 +32839,20 @@ export interface SecuritySamlAuthenticateRequest extends RequestBase { } export interface SecuritySamlAuthenticateResponse { + /** The access token that was generated by Elasticsearch. */ access_token: string + /** The authenticated user's name. */ username: string + /** The amount of time (in seconds) left until the token expires. */ expires_in: integer + /** The refresh token that was generated by Elasticsearch. */ refresh_token: string + /** The name of the realm where the user was authenticated. */ realm: string } export interface SecuritySamlCompleteLogoutRequest extends RequestBase { -/** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ + /** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ realm: string /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids @@ -22970,9 +32869,13 @@ export interface SecuritySamlCompleteLogoutRequest extends RequestBase { export type SecuritySamlCompleteLogoutResponse = boolean export interface SecuritySamlInvalidateRequest extends RequestBase { -/** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ + /** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ acs?: string - /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. */ + /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. + * This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. + * If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. + * In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. + * The client application must not attempt to parse or process the string in any way. */ query_string: string /** The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. */ realm?: string @@ -22983,15 +32886,20 @@ export interface SecuritySamlInvalidateRequest extends RequestBase { } export interface SecuritySamlInvalidateResponse { + /** The number of tokens that were invalidated as part of this logout. */ invalidated: integer + /** The realm name of the SAML realm in Elasticsearch that authenticated the user. */ realm: string + /** A SAML logout response as a parameter so that the user can be redirected back to the SAML IdP. */ redirect: string } export interface SecuritySamlLogoutRequest extends RequestBase { -/** The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ + /** The access token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ token: string - /** The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. */ + /** The refresh token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent refresh token that was received after refreshing the original access token. */ refresh_token?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } @@ -23000,15 +32908,20 @@ export interface SecuritySamlLogoutRequest extends RequestBase { } export interface SecuritySamlLogoutResponse { + /** A URL that contains a SAML logout request as a parameter. + * You can use this URL to be redirected back to the SAML IdP and to initiate Single Logout. */ redirect: string } export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { -/** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ + /** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. + * The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ acs?: string - /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. */ + /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. + * You must specify either this parameter or the `acs` parameter. */ realm?: string - /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. */ + /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. + * If the Authentication Request is signed, this value is used as part of the signature computation. */ relay_state?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } @@ -23017,13 +32930,16 @@ export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { } export interface SecuritySamlPrepareAuthenticationResponse { + /** A unique identifier for the SAML Request to be stored by the caller of the API. */ id: Id + /** The name of the Elasticsearch realm that was used to construct the authentication request. */ realm: string + /** The URL to redirect the user to. */ redirect: string } export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { -/** The name of the SAML realm in Elasticsearch. */ + /** The name of the SAML realm in Elasticsearch. */ realm_name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { realm_name?: never } @@ -23032,22 +32948,34 @@ export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase } export interface SecuritySamlServiceProviderMetadataResponse { + /** An XML string that contains a SAML Service Provider's metadata for the realm. */ metadata: string } export interface SecuritySuggestUserProfilesHint { + /** A list of profile UIDs to match against. */ uids?: SecurityUserProfileId[] + /** A single key-value pair to match against the labels section + * of a profile. A profile is considered matching if it matches + * at least one of the strings. */ labels?: Record } export interface SecuritySuggestUserProfilesRequest extends RequestBase { -/** A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`. */ + /** A query string used to match name-related fields in user profile documents. + * Name-related fields are the user's `username`, `full_name`, and `email`. */ name?: string /** The number of profiles to return. */ size?: long - /** A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. */ + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content, use `data=` to retrieve content nested under the specified ``. + * By default, the API returns no `data` content. + * It is an error to specify `data` as both the query parameter and the request body field. */ data?: string | string[] - /** Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ + /** Extra search criteria to improve relevance of the suggestion result. + * Profiles matching the spcified hint are ranked higher in the response. + * Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ hint?: SecuritySuggestUserProfilesHint /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } @@ -23056,8 +32984,11 @@ export interface SecuritySuggestUserProfilesRequest extends RequestBase { } export interface SecuritySuggestUserProfilesResponse { + /** Metadata about the number of matching profiles. */ total: SecuritySuggestUserProfilesTotalUserProfiles + /** The number of milliseconds it took Elasticsearch to run the request. */ took: long + /** A list of profile documents, ordered by relevance, that match the search criteria. */ profiles: SecurityUserProfile[] } @@ -23067,13 +32998,24 @@ export interface SecuritySuggestUserProfilesTotalUserProfiles { } export interface SecurityUpdateApiKeyRequest extends RequestBase { -/** The ID of the API key to update. */ + /** The ID of the API key to update. */ id: Id - /** The role descriptors to assign to this API key. The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. */ + /** The role descriptors to assign to this API key. + * The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. + * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record - /** Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. */ + /** Arbitrary metadata that you want to associate with the API key. + * It supports a nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this value fully replaces the metadata previously associated with the API key. */ metadata?: Metadata - /** The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. */ + /** The expiration time for the API key. + * By default, API keys never expire. + * This property can be omitted to leave the expiration unchanged. */ expiration?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } @@ -23082,17 +33024,26 @@ export interface SecurityUpdateApiKeyRequest extends RequestBase { } export interface SecurityUpdateApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn't change because no change was detected. */ updated: boolean } export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { -/** The ID of the cross-cluster API key to update. */ + /** The ID of the cross-cluster API key to update. */ id: Id - /** The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. */ + /** The access to be granted to this API key. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ access: SecurityAccess - /** The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. */ + /** The expiration time for the API key. + * By default, API keys never expire. This property can be omitted to leave the value unchanged. */ expiration?: Duration - /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. */ + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this information fully replaces metadata previously associated with the API key. */ metadata?: Metadata /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } @@ -23101,13 +33052,17 @@ export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { } export interface SecurityUpdateCrossClusterApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn’t change because no change was detected. */ updated: boolean } export interface SecurityUpdateSettingsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security?: SecuritySecuritySettings @@ -23126,17 +33081,25 @@ export interface SecurityUpdateSettingsResponse { } export interface SecurityUpdateUserProfileDataRequest extends RequestBase { -/** A unique identifier for the user profile. */ + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber /** Only perform the operation if the document has this primary term. */ if_primary_term?: long - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ refresh?: Refresh - /** Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ + /** Searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ labels?: Record - /** Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. */ + /** Non-searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). + * The data object is not searchable, but can be retrieved with the get user profile API. */ data?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } @@ -23149,7 +33112,7 @@ export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase export type ShutdownType = 'restart' | 'remove' | 'replace' export interface ShutdownDeleteNodeRequest extends RequestBase { -/** The node id of node to be removed from the shutdown state */ + /** The node id of node to be removed from the shutdown state */ node_id: NodeId /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit @@ -23183,7 +33146,7 @@ export interface ShutdownGetNodePluginsStatus { } export interface ShutdownGetNodeRequest extends RequestBase { -/** Which node for which to retrieve the shutdown status */ + /** Which node for which to retrieve the shutdown status */ node_id?: NodeIds /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit @@ -23206,19 +33169,37 @@ export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'sta export type ShutdownGetNodeShutdownType = 'remove' | 'restart' export interface ShutdownPutNodeRequest extends RequestBase { -/** The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. */ + /** The node identifier. + * This parameter is not validated against the cluster's active nodes. + * This enables you to register a node for shut down while it is offline. + * No error is thrown if you specify an invalid node ID. */ node_id: NodeId - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit - /** Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ + /** Valid values are restart, remove, or replace. + * Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. + * Because the node is expected to rejoin the cluster, data is not migrated off of the node. + * Use remove when you need to permanently remove a node from the cluster. + * The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. + * Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. + * During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ type: ShutdownType - /** A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. */ + /** A human-readable reason that the node is being shut down. + * This field provides information for other cluster operators; it does not affect the shut down process. */ reason: string - /** Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ + /** Only valid if type is restart. + * Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. + * This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. + * If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ allocation_delay?: string - /** Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ + /** Only valid if type is replace. + * Specifies the name of the node that is replacing the node being shut down. + * Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. + * During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ target_node_name?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } @@ -23229,21 +33210,35 @@ export interface ShutdownPutNodeRequest extends RequestBase { export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SimulateIngestIngestDocumentSimulationKeys { + /** Identifier for the document. */ _id: Id + /** Name of the index that the document would be indexed into if this were not a simulation. */ _index: IndexName + /** JSON body for the document. */ _source: Record + /** */ _version: SpecUtilsStringified + /** A list of the names of the pipelines executed on this document. */ executed_pipelines: string[] + /** A list of the fields that would be ignored at the indexing step. For example, a field whose + * value is larger than the allowed limit would make it through all of the pipelines, but + * would not be indexed into Elasticsearch. */ ignored_fields?: Record[] + /** Any error resulting from simulatng ingest on this doc. This can be an error generated by + * executing a processor, or a mapping validation error when simulating indexing the resulting + * doc. */ error?: ErrorCause } export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause } export interface SimulateIngestRequest extends RequestBase { -/** The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ + /** The index to simulate ingesting into. + * This value can be overridden by specifying an index on each document. + * If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ index?: IndexName - /** The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. */ + /** The pipeline to use as the default pipeline. + * This value can be used to override the default pipeline of the index. */ pipeline?: PipelineName /** Sample documents to test in the pipeline. */ docs: IngestDocument[] @@ -23252,7 +33247,9 @@ export interface SimulateIngestRequest extends RequestBase { /** A map of index template names to substitute index template definition objects. */ index_template_substitutions?: Record mapping_addition?: MappingTypeMapping - /** Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ + /** Pipelines to test. + * If you don’t specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline_substitutions?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } @@ -23269,11 +33266,19 @@ export interface SimulateIngestSimulateIngestDocumentResult { } export interface SlmConfiguration { + /** If false, the snapshot fails if any data stream or index in indices is missing or closed. If true, the snapshot ignores missing or closed data streams and indices. */ ignore_unavailable?: boolean + /** A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported. + * By default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters. */ indices?: Indices + /** If true, the current global state is included in the snapshot. */ include_global_state?: boolean + /** A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API. + * Each feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false. */ feature_states?: string[] + /** Attaches arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. Metadata must be less than 1024 bytes. */ metadata?: Metadata + /** If false, the entire snapshot will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean } @@ -23298,8 +33303,11 @@ export interface SlmPolicy { } export interface SlmRetention { + /** Time period after which a snapshot is considered expired and eligible for deletion. SLM deletes expired snapshots based on the slm.retention_schedule. */ expire_after: Duration + /** Maximum number of snapshots to retain, even if the snapshots have not yet expired. If the number of snapshots in the repository exceeds this limit, the policy retains the most recent snapshots and deletes older snapshots. */ max_count: integer + /** Minimum number of snapshots to retain, even if the snapshots have expired. */ min_count: integer } @@ -23307,11 +33315,15 @@ export interface SlmSnapshotLifecycle { in_progress?: SlmInProgress last_failure?: SlmInvocation last_success?: SlmInvocation + /** The last time the policy was modified. */ modified_date?: DateTime modified_date_millis: EpochTime + /** The next time the policy will run. */ next_execution?: DateTime next_execution_millis: EpochTime policy: SlmPolicy + /** The version of the snapshot policy. + * Only the latest version is stored and incremented when the policy is updated. */ version: VersionNumber stats: SlmStatistics } @@ -23324,21 +33336,27 @@ export interface SlmStatistics { retention_timed_out?: long policy?: Id total_snapshots_deleted?: long + /** @alias total_snapshots_deleted */ snapshots_deleted?: long total_snapshot_deletion_failures?: long + /** @alias total_snapshot_deletion_failures */ snapshot_deletion_failures?: long total_snapshots_failed?: long + /** @alias total_snapshots_failed */ snapshots_failed?: long total_snapshots_taken?: long + /** @alias total_snapshots_taken */ snapshots_taken?: long } export interface SlmDeleteLifecycleRequest extends RequestBase { -/** The id of the snapshot lifecycle policy to remove */ + /** The id of the snapshot lifecycle policy to remove */ policy_id: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } @@ -23349,11 +33367,13 @@ export interface SlmDeleteLifecycleRequest extends RequestBase { export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { -/** The id of the snapshot lifecycle policy to be executed */ + /** The id of the snapshot lifecycle policy to be executed */ policy_id: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } @@ -23366,9 +33386,11 @@ export interface SlmExecuteLifecycleResponse { } export interface SlmExecuteRetentionRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23379,11 +33401,13 @@ export interface SlmExecuteRetentionRequest extends RequestBase { export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { -/** Comma-separated list of snapshot lifecycle policies to retrieve */ + /** Comma-separated list of snapshot lifecycle policies to retrieve */ policy_id?: Names - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } @@ -23394,7 +33418,7 @@ export interface SlmGetLifecycleRequest extends RequestBase { export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -23418,9 +33442,13 @@ export interface SlmGetStatsResponse { } export interface SlmGetStatusRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23433,11 +33461,15 @@ export interface SlmGetStatusResponse { } export interface SlmPutLifecycleRequest extends RequestBase { -/** The identifier for the snapshot lifecycle policy you want to create or update. */ + /** The identifier for the snapshot lifecycle policy you want to create or update. */ policy_id: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** Configuration for each snapshot created by the policy. */ config?: SlmConfiguration @@ -23458,9 +33490,13 @@ export interface SlmPutLifecycleRequest extends RequestBase { export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23471,9 +33507,13 @@ export interface SlmStartRequest extends RequestBase { export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23484,17 +33524,42 @@ export interface SlmStopRequest extends RequestBase { export type SlmStopResponse = AcknowledgedResponseBase export interface SnapshotAzureRepository extends SnapshotRepositoryBase { + /** The Azure repository type. */ type: 'azure' + /** The repository settings. */ settings?: SnapshotAzureRepositorySettings } export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { + /** The path to the repository data within the container. + * It defaults to the root directory. + * + * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ base_path?: string + /** The name of the Azure repository client to use. */ client?: string + /** The Azure container. */ container?: string + /** The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests. + * Defaults to 256 which is the maximum number supported by the Azure blob batch API. */ delete_objects_max_size?: integer + /** Either `primary_only` or `secondary_only`. + * Note that if you set it to `secondary_only`, it will force `readonly` to `true`. */ location_mode?: string + /** The maximum number of concurrent batch delete requests that will be submitted for any individual bulk delete with `BlobBatch`. + * Note that the effective number of concurrent deletes is further limited by the Azure client connection and event loop thread limits. + * Defaults to 10, minimum is 1, maximum is 100. */ max_concurrent_batch_deletes?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean } @@ -23504,15 +33569,35 @@ export interface SnapshotFileCountSnapshotStats { } export interface SnapshotGcsRepository extends SnapshotRepositoryBase { + /** The Google Cloud Storage repository type. */ type: 'gcs' + /** The repository settings. */ settings: SnapshotGcsRepositorySettings } export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { + /** The name of the bucket to be used for snapshots. */ bucket: string + /** The name used by the client when it uses the Google Cloud Storage service. */ application_name?: string + /** The path to the repository data within the bucket. + * It defaults to the root of the bucket. + * + * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ base_path?: string + /** The name of the client to use to connect to Google Cloud Storage. */ client?: string + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean } @@ -23529,14 +33614,35 @@ export interface SnapshotInfoFeatureState { } export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { + /** The read-only URL repository type. */ type: 'url' + /** The repository settings. */ settings: SnapshotReadOnlyUrlRepositorySettings } export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { + /** The maximum number of retries for HTTP and HTTPS URLs. */ http_max_retries?: integer + /** The maximum wait time for data transfers over a connection. */ http_socket_timeout?: Duration + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer + /** The URL location of the root of the shared filesystem repository. + * The following protocols are supported: + * + * * `file` + * * `ftp` + * * `http` + * * `https` + * * `jar` + * + * URLs using the HTTP, HTTPS, or FTP protocols must be explicitly allowed with the `repositories.url.allowed_urls` cluster setting. + * This setting supports wildcards in the place of a host, path, query, or fragment in the URL. + * + * URLs using the file protocol must point to the location of a shared filesystem accessible to all master and data nodes in the cluster. + * This location must be registered in the `path.repo` setting. + * You don't need to register URLs using the FTP, HTTP, HTTPS, or JAR protocols in the `path.repo` setting. */ url: string } @@ -23547,41 +33653,113 @@ export interface SnapshotRepositoryBase { } export interface SnapshotRepositorySettingsBase { + /** Big files can be broken down into multiple smaller blobs in the blob store during snapshotting. + * It is not recommended to change this value from its default unless there is an explicit reason for limiting the size of blobs in the repository. + * Setting a value lower than the default can result in an increased number of API calls to the blob store during snapshot create and restore operations compared to using the default value and thus make both operations slower and more costly. + * Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + * The default varies by repository type. */ chunk_size?: ByteSize + /** When set to `true`, metadata files are stored in compressed format. + * This setting doesn't affect index files that are already compressed by default. */ compress?: boolean + /** The maximum snapshot restore rate per node. + * It defaults to unlimited. + * Note that restores are also throttled through recovery settings. */ max_restore_bytes_per_sec?: ByteSize + /** The maximum snapshot creation rate per node. + * It defaults to 40mb per second. + * Note that if the recovery settings for managed services are set, then it defaults to unlimited, and the rate is additionally throttled through recovery settings. */ max_snapshot_bytes_per_sec?: ByteSize } export interface SnapshotS3Repository extends SnapshotRepositoryBase { + /** The S3 repository type. */ type: 's3' + /** The repository settings. + * + * NOTE: In addition to the specified settings, you can also use all non-secure client settings in the repository settings. + * In this case, the client settings found in the repository settings will be merged with those of the named client used by the repository. + * Conflicts between client and repository settings are resolved by the repository settings taking precedence over client settings. */ settings: SnapshotS3RepositorySettings } export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { + /** The name of the S3 bucket to use for snapshots. + * The bucket name must adhere to Amazon's S3 bucket naming rules. */ bucket: string + /** The path to the repository data within its bucket. + * It defaults to an empty string, meaning that the repository is at the root of the bucket. + * The value of this setting should not start or end with a forward slash (`/`). + * + * NOTE: Don't set base_path when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments may share the same bucket. */ base_path?: string + /** The minimum threshold below which the chunk is uploaded using a single request. + * Beyond this threshold, the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of `buffer_size` length, and to upload each part in its own request. + * Note that setting a buffer size lower than 5mb is not allowed since it will prevent the use of the Multipart API and may result in upload errors. + * It is also not possible to set a buffer size greater than 5gb as it is the maximum upload size allowed by S3. + * Defaults to `100mb` or 5% of JVM heap, whichever is smaller. */ buffer_size?: ByteSize + /** The S3 repository supports all S3 canned ACLs: `private`, `public-read`, `public-read-write`, `authenticated-read`, `log-delivery-write`, `bucket-owner-read`, `bucket-owner-full-control`. + * You could specify a canned ACL using the `canned_acl` setting. + * When the S3 repository creates buckets and objects, it adds the canned ACL into the buckets and objects. */ canned_acl?: string + /** The name of the S3 client to use to connect to S3. */ client?: string + /** The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` requests. + * Defaults to 1000 which is the maximum number supported by the AWS DeleteObjects API. */ delete_objects_max_size?: integer + /** The time to wait before trying again if an attempt to read a linearizable register fails. */ get_register_retry_delay?: Duration + /** The maximum number of parts that Elasticsearch will write during a multipart upload of a single object. + * Files which are larger than `buffer_size × max_multipart_parts` will be chunked into several smaller objects. + * Elasticsearch may also split a file across multiple objects to satisfy other constraints such as the `chunk_size` limit. + * Defaults to `10000` which is the maximum number of parts in a multipart upload in AWS S3. */ max_multipart_parts?: integer + /** The maximum number of possibly-dangling multipart uploads to clean up in each batch of snapshot deletions. + * Defaults to 1000 which is the maximum number supported by the AWS ListMultipartUploads API. + * If set to `0`, Elasticsearch will not attempt to clean up dangling multipart uploads. */ max_multipart_upload_cleanup_size?: integer + /** If true, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean + /** When set to `true`, files are encrypted on server side using an AES256 algorithm. */ server_side_encryption?: boolean + /** The S3 storage class for objects written to the repository. + * Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, and `intelligent_tiering`. */ storage_class?: string + /** The delay before the first retry and the amount the delay is incremented by on each subsequent retry. + * The default is 50ms and the minimum is 0ms. */ 'throttled_delete_retry.delay_increment'?: Duration + /** The upper bound on how long the delays between retries will grow to. + * The default is 5s and the minimum is 0ms. */ 'throttled_delete_retry.maximum_delay'?: Duration + /** The number times to retry a throttled snapshot deletion. + * The default is 10 and the minimum value is 0 which will disable retries altogether. + * Note that if retries are enabled in the Azure client, each of these retries comprises that many client-level retries. */ 'throttled_delete_retry.maximum_number_of_retries'?: integer } export interface SnapshotShardsStats { + /** The number of shards that initialized, started, and finalized successfully. */ done: long + /** The number of shards that failed to be included in the snapshot. */ failed: long + /** The number of shards that are finalizing but are not done. */ finalizing: long + /** The number of shards that are still initializing. */ initializing: long + /** The number of shards that have started but are not finalized. */ started: long + /** The total number of shards included in the snapshot. */ total: long } @@ -23601,13 +33779,30 @@ export interface SnapshotShardsStatsSummaryItem { } export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { + /** The shared file system repository type. */ type: 'fs' + /** The repository settings. */ settings: SnapshotSharedFileSystemRepositorySettings } export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { + /** The location of the shared filesystem used to store and retrieve snapshots. + * This location must be registered in the `path.repo` setting on all master and data nodes in the cluster. + * Unlike `path.repo`, this setting supports only a single file path. */ location: string + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean } @@ -23658,47 +33853,97 @@ export interface SnapshotSnapshotShardsStatus { export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' export interface SnapshotSnapshotStats { + /** The number and size of files that still need to be copied as part of the incremental snapshot. + * For completed snapshots, this property indicates the number and size of files that were not already in the repository and were copied as part of the incremental snapshot. */ incremental: SnapshotFileCountSnapshotStats + /** The time, in milliseconds, when the snapshot creation process started. */ start_time_in_millis: EpochTime time?: Duration + /** The total time, in milliseconds, that it took for the snapshot process to complete. */ time_in_millis: DurationValue + /** The total number and size of files that are referenced by the snapshot. */ total: SnapshotFileCountSnapshotStats } export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { + /** The source-only repository type. */ type: 'source' + /** The repository settings. */ settings: SnapshotSourceOnlyRepositorySettings } export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { + /** The delegated repository type. For valid values, refer to the `type` parameter. + * Source repositories can use `settings` properties for its delegated repository type. */ delegate_type?: string + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ read_only?: boolean + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. + * @alias read_only */ readonly?: boolean } export interface SnapshotStatus { + /** Indicates whether the current cluster state is included in the snapshot. */ include_global_state: boolean indices: Record + /** The name of the repository that includes the snapshot. */ repository: string + /** Statistics for the shards in the snapshot. */ shards_stats: SnapshotShardsStats + /** The name of the snapshot. */ snapshot: string + /** The current snapshot state: + * + * * `FAILED`: The snapshot finished with an error and failed to store any data. + * * `STARTED`: The snapshot is currently running. + * * `SUCCESS`: The snapshot completed. */ state: string + /** Details about the number (`file_count`) and size (`size_in_bytes`) of files included in the snapshot. */ stats: SnapshotSnapshotStats + /** The universally unique identifier (UUID) for the snapshot. */ uuid: Uuid } export interface SnapshotCleanupRepositoryCleanupRepositoryResults { + /** The number of binary large objects (blobs) removed from the snapshot repository during cleanup operations. + * A non-zero value indicates that unreferenced blobs were found and subsequently cleaned up. */ deleted_blobs: long + /** The number of bytes freed by cleanup operations. */ deleted_bytes: long } export interface SnapshotCleanupRepositoryRequest extends RequestBase { -/** The name of the snapshot repository to clean up. */ + /** The name of the snapshot repository to clean up. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1` */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -23707,21 +33952,26 @@ export interface SnapshotCleanupRepositoryRequest extends RequestBase { } export interface SnapshotCleanupRepositoryResponse { + /** Statistics for cleanup operations. */ results: SnapshotCleanupRepositoryCleanupRepositoryResults } export interface SnapshotCloneRequest extends RequestBase { -/** The name of the snapshot repository that both source and target snapshot belong to. */ + /** The name of the snapshot repository that both source and target snapshot belong to. */ repository: Name /** The source snapshot name. */ snapshot: Name /** The target snapshot name. */ target_snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period of time to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period of time to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** A comma-separated list of indices to include in the snapshot. Multi-target syntax is supported. */ + /** A comma-separated list of indices to include in the snapshot. + * Multi-target syntax is supported. */ indices: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, timeout?: never, indices?: never } @@ -23732,27 +33982,55 @@ export interface SnapshotCloneRequest extends RequestBase { export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { -/** The name of the repository for the snapshot. */ + /** The name of the repository for the snapshot. */ repository: Name - /** The name of the snapshot. It supportes date math. It must be unique in the repository. */ + /** The name of the snapshot. + * It supportes date math. + * It must be unique in the repository. */ snapshot: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. */ + /** If `true`, the request returns a response when the snapshot is complete. + * If `false`, the request returns a response when the snapshot initializes. */ wait_for_completion?: boolean - /** Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports comma-separated values such as `open,hidden`. */ + /** Determines how wildcard patterns in the `indices` parameter match data streams and indices. + * It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). */ + /** The feature states to include in the snapshot. + * Each feature state includes one or more system indices containing related data. + * You can view a list of eligible features using the get features API. + * + * If `include_global_state` is `true`, all current feature states are included by default. + * If `include_global_state` is `false`, no feature states are included by default. + * + * Note that specifying an empty array will result in the default behavior. + * To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). */ feature_states?: string[] - /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. */ + /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. + * If `false`, the request returns an error for any data stream or index that is missing or closed. */ ignore_unavailable?: boolean - /** If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ + /** If `true`, the current cluster state is included in the snapshot. + * The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. + * It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ include_global_state?: boolean - /** A comma-separated list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. You can't use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. */ + /** A comma-separated list of data streams and indices to include in the snapshot. + * It supports a multi-target syntax. + * The default is an empty array (`[]`), which includes all regular data streams and regular indices. + * To exclude all data streams and indices, use `-*`. + * + * You can't use this parameter to include or exclude system indices or system data streams from a snapshot. + * Use `feature_states` instead. */ indices?: Indices - /** Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. */ + /** Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. + * It can have any contents but it must be less than 1024 bytes. + * This information is not automatically generated by Elasticsearch. */ metadata?: Metadata - /** If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ + /** If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. + * Only shards that were successfully included in the snapshot will be restored. + * All missing shards will be recreated as empty. + * + * If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, expand_wildcards?: never, feature_states?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, metadata?: never, partial?: never } @@ -23761,18 +34039,26 @@ export interface SnapshotCreateRequest extends RequestBase { } export interface SnapshotCreateResponse { + /** Equals `true` if the snapshot was accepted. Present when the request had `wait_for_completion` set to `false` */ accepted?: boolean + /** Snapshot information. Present when the request had `wait_for_completion` set to `true` */ snapshot?: SnapshotSnapshotInfo } export interface SnapshotCreateRepositoryRequest extends RequestBase { -/** The name of the snapshot repository to register or update. */ + /** The name of the snapshot repository to register or update. */ name: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration - /** If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. */ + /** If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. + * If `false`, this verification is skipped. + * You can also perform this verification with the verify snapshot repository API. */ verify?: boolean repository?: SnapshotRepository /** All values in `body` will be added to the request body. */ @@ -23784,11 +34070,14 @@ export interface SnapshotCreateRepositoryRequest extends RequestBase { export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { -/** The name of the repository to delete a snapshot from. */ + /** The name of the repository to delete a snapshot from. */ repository: Name - /** A comma-separated list of snapshot names to delete. It also accepts wildcards (`*`). */ + /** A comma-separated list of snapshot names to delete. + * It also accepts wildcards (`*`). */ snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never } @@ -23799,11 +34088,16 @@ export interface SnapshotDeleteRequest extends RequestBase { export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { -/** The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. */ + /** The ame of the snapshot repositories to unregister. + * Wildcard (`*`) patterns are supported. */ name: Names - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -23814,35 +34108,55 @@ export interface SnapshotDeleteRepositoryRequest extends RequestBase { export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase export interface SnapshotGetRequest extends RequestBase { -/** A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. */ + /** A comma-separated list of snapshot repository names used to limit the request. + * Wildcard (`*`) expressions are supported. */ repository: Name - /** A comma-separated list of snapshot names to retrieve Wildcards (`*`) are supported. * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * To get information about any snapshots that are currently running, use `_current`. */ + /** A comma-separated list of snapshot names to retrieve + * Wildcards (`*`) are supported. + * + * * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. + * * To get information about any snapshots that are currently running, use `_current`. */ snapshot: Names /** An offset identifier to start pagination from as returned by the next field in the response body. */ after?: string - /** The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. */ + /** The value of the current sort column at which to start retrieval. + * It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. + * It can be a millisecond time value or a number when sorting by `index-` or shard count. */ from_sort_value?: string /** If `false`, the request returns an error for any snapshots that are unavailable. */ ignore_unavailable?: boolean - /** If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. */ + /** If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. + * The default is `false`, meaning that this information is omitted. */ index_details?: boolean /** If `true`, the response includes the name of each index in each snapshot. */ index_names?: boolean /** If `true`, the response includes the repository name in each snapshot. */ include_repository?: boolean - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. */ + /** The sort order. + * Valid values are `asc` for ascending and `desc` for descending order. + * The default behavior is ascending order. */ order?: SortOrder /** Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. */ offset?: integer - /** The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. */ + /** The maximum number of snapshots to return. + * The default is 0, which means to return all that match the request without limit. */ size?: integer - /** Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. */ + /** Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + * + * You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. + * For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. + * Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. + * To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. */ slm_policy_filter?: Name - /** The sort order for the result. The default behavior is sorting by snapshot start time stamp. */ + /** The sort order for the result. + * The default behavior is sorting by snapshot start time stamp. */ sort?: SnapshotSnapshotSort - /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ + /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + * + * NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ verbose?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, verbose?: never } @@ -23851,8 +34165,12 @@ export interface SnapshotGetRequest extends RequestBase { } export interface SnapshotGetResponse { + /** The number of remaining snapshots that were not returned due to size limits and that can be fetched by additional requests using the `next` field value. */ remaining: integer + /** The total number of snapshots that match the request when ignoring the size limit or `after` query parameter. */ total: integer + /** If the request contained a size limit and there might be more results, a `next` field will be added to the response. + * It can be used as the `after` query parameter to fetch additional results. */ next?: string responses?: SnapshotGetSnapshotResponseItem[] snapshots?: SnapshotSnapshotInfo[] @@ -23865,11 +34183,17 @@ export interface SnapshotGetSnapshotResponseItem { } export interface SnapshotGetRepositoryRequest extends RequestBase { -/** A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of snapshot repository names used to limit the request. + * Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + * + * To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. */ name?: Names - /** If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. */ + /** If `true`, the request gets information from the local node only. + * If `false`, the request gets information from the master node. */ local?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } @@ -23880,67 +34204,117 @@ export interface SnapshotGetRepositoryRequest extends RequestBase { export type SnapshotGetRepositoryResponse = Record export interface SnapshotRepositoryAnalyzeBlobDetails { + /** The name of the blob. */ name: string + /** Indicates whether the blob was overwritten while the read operations were ongoing. + * /** */ overwritten: boolean read_early: boolean + /** The position, in bytes, at which read operations completed. */ read_end: long + /** The position, in bytes, at which read operations started. */ read_start: long + /** A description of every read operation performed on the blob. */ reads: SnapshotRepositoryAnalyzeReadBlobDetails + /** The size of the blob. */ size: ByteSize + /** The size of the blob in bytes. */ size_bytes: long } export interface SnapshotRepositoryAnalyzeDetailsInfo { + /** A description of the blob that was written and read. */ blob: SnapshotRepositoryAnalyzeBlobDetails + /** The elapsed time spent overwriting the blob. + * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed?: Duration + /** The elapsed time spent overwriting the blob, in nanoseconds. + * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed_nanos?: DurationValue + /** The elapsed time spent writing the blob. */ write_elapsed: Duration + /** The elapsed time spent writing the blob, in nanoseconds. */ write_elapsed_nanos: DurationValue + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob. */ write_throttled: Duration + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob, in nanoseconds. */ write_throttled_nanos: DurationValue + /** The node which wrote the blob and coordinated the read operations. */ writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo } export interface SnapshotRepositoryAnalyzeReadBlobDetails { + /** Indicates whether the read operation may have started before the write operation was complete. */ before_write_complete?: boolean + /** The length of time spent reading the blob. + * If the blob was not found, this detail is omitted. */ elapsed?: Duration + /** The length of time spent reading the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ elapsed_nanos?: DurationValue + /** The length of time waiting for the first byte of the read operation to be received. + * If the blob was not found, this detail is omitted. */ first_byte_time?: Duration + /** The length of time waiting for the first byte of the read operation to be received, in nanoseconds. + * If the blob was not found, this detail is omitted. */ first_byte_time_nanos: DurationValue + /** Indicates whether the blob was found by the read operation. + * If the read was started before the write completed or the write was ended before completion, it might be false. */ found: boolean + /** The node that performed the read operation. */ node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. + * If the blob was not found, this detail is omitted. */ throttled?: Duration + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ throttled_nanos?: DurationValue } export interface SnapshotRepositoryAnalyzeReadSummaryInfo { + /** The number of read operations performed in the test. */ count: integer + /** The maximum time spent waiting for the first byte of any read request to be received. */ max_wait: Duration + /** The maximum time spent waiting for the first byte of any read request to be received, in nanoseconds. */ max_wait_nanos: DurationValue + /** The total elapsed time spent on reading blobs in the test. */ total_elapsed: Duration + /** The total elapsed time spent on reading blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue + /** The total size of all the blobs or partial blobs read in the test. */ total_size: ByteSize + /** The total size of all the blobs or partial blobs read in the test, in bytes. */ total_size_bytes: long + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles. */ total_throttled: Duration + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. */ total_throttled_nanos: DurationValue + /** The total time spent waiting for the first byte of each read request to be received. */ total_wait: Duration + /** The total time spent waiting for the first byte of each read request to be received, in nanoseconds. */ total_wait_nanos: DurationValue } export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { -/** The name of the repository. */ + /** The name of the repository. */ name: Name - /** The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. */ + /** The total number of blobs to write to the repository during the test. + * For realistic experiments, you should set it to at least `2000`. */ blob_count?: integer /** The number of operations to run concurrently during the test. */ concurrency?: integer - /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. */ + /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. + * If false, it returns only a summary of the analysis. */ detailed?: boolean - /** The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. */ + /** The number of nodes on which to perform an early read operation while writing each blob. + * Early read operations are only rarely performed. */ early_read_node_count?: integer - /** The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. */ + /** The maximum size of a blob to be written during the test. + * For realistic experiments, you should set it to at least `2gb`. */ max_blob_size?: ByteSize - /** An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. */ + /** An upper limit on the total size of all the blobs written during the test. + * For realistic experiments, you should set it to at least `1tb`. */ max_total_data_size?: ByteSize /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. */ rare_action_probability?: double @@ -23948,11 +34322,15 @@ export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { rarely_abort_writes?: boolean /** The number of nodes on which to read a blob after writing. */ read_node_count?: integer - /** The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. */ + /** The minimum number of linearizable register operations to perform in total. + * For realistic experiments, you should set it to at least `100`. */ register_operation_count?: integer - /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. */ + /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. + * To repeat the same set of operations in multiple experiments, use the same seed in each experiment. + * Note that the operations are performed concurrently so might not always happen in the same order on each run. */ seed?: integer - /** The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. */ + /** The period of time to wait for the test to complete. + * If no response is received before the timeout expires, the test is cancelled and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } @@ -23961,25 +34339,46 @@ export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { } export interface SnapshotRepositoryAnalyzeResponse { + /** The number of blobs written to the repository during the test. */ blob_count: integer + /** The path in the repository under which all the blobs were written during the test. */ blob_path: string + /** The number of write operations performed concurrently during the test. */ concurrency: integer + /** The node that coordinated the analysis and performed the final cleanup. */ coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The time it took to delete all the blobs in the container. */ delete_elapsed: Duration + /** The time it took to delete all the blobs in the container, in nanoseconds. */ delete_elapsed_nanos: DurationValue + /** A description of every read and write operation performed during the test. */ details: SnapshotRepositoryAnalyzeDetailsInfo + /** The limit on the number of nodes on which early read operations were performed after writing each blob. */ early_read_node_count: integer + /** A list of correctness issues detected, which is empty if the API succeeded. + * It is included to emphasize that a successful response does not guarantee correct behaviour in future. */ issues_detected: string[] + /** The time it took to retrieve a list of all the blobs in the container. */ listing_elapsed: Duration + /** The time it took to retrieve a list of all the blobs in the container, in nanoseconds. */ listing_elapsed_nanos: DurationValue + /** The limit on the size of a blob written during the test. */ max_blob_size: ByteSize + /** The limit, in bytes, on the size of a blob written during the test. */ max_blob_size_bytes: long + /** The limit on the total size of all blob written during the test. */ max_total_data_size: ByteSize + /** The limit, in bytes, on the total size of all blob written during the test. */ max_total_data_size_bytes: long + /** The probability of performing rare actions during the test. */ rare_action_probability: double + /** The limit on the number of nodes on which read operations were performed after writing each blob. */ read_node_count: integer + /** The name of the repository that was the subject of the analysis. */ repository: string + /** The seed for the pseudo-random number generator used to generate the operations used during the test. */ seed: long + /** A collection of statistics that summarize the results of the test. */ summary: SnapshotRepositoryAnalyzeSummaryInfo } @@ -23989,38 +34388,52 @@ export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { } export interface SnapshotRepositoryAnalyzeSummaryInfo { + /** A collection of statistics that summarise the results of the read operations in the test. */ read: SnapshotRepositoryAnalyzeReadSummaryInfo + /** A collection of statistics that summarise the results of the write operations in the test. */ write: SnapshotRepositoryAnalyzeWriteSummaryInfo } export interface SnapshotRepositoryAnalyzeWriteSummaryInfo { + /** The number of write operations performed in the test. */ count: integer + /** The total elapsed time spent on writing blobs in the test. */ total_elapsed: Duration + /** The total elapsed time spent on writing blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue + /** The total size of all the blobs written in the test. */ total_size: ByteSize + /** The total size of all the blobs written in the test, in bytes. */ total_size_bytes: long + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle. */ total_throttled: Duration + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle, in nanoseconds. */ total_throttled_nanos: long } export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { -/** The name of the snapshot repository. */ + /** The name of the snapshot repository. */ name: Names /** If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. */ blob_thread_pool_concurrency?: integer /** The maximum number of index snapshots to verify concurrently within each index verification. */ index_snapshot_verification_concurrency?: integer - /** The number of indices to verify concurrently. The default behavior is to use the entire `snapshot_meta` thread pool. */ + /** The number of indices to verify concurrently. + * The default behavior is to use the entire `snapshot_meta` thread pool. */ index_verification_concurrency?: integer /** If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. */ max_bytes_per_sec?: string - /** The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. If your repository contains more than this number of shard snapshot failures, the verification will fail. */ + /** The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. + * If your repository contains more than this number of shard snapshot failures, the verification will fail. */ max_failed_shard_snapshots?: integer - /** The maximum number of snapshot metadata operations to run concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ + /** The maximum number of snapshot metadata operations to run concurrently. + * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ meta_thread_pool_concurrency?: integer - /** The number of snapshots to verify concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ + /** The number of snapshots to verify concurrently. + * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ snapshot_verification_concurrency?: integer - /** Indicates whether to verify the checksum of every data blob in the repository. If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. */ + /** Indicates whether to verify the checksum of every data blob in the repository. + * If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. */ verify_blob_contents?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, blob_thread_pool_concurrency?: never, index_snapshot_verification_concurrency?: never, index_verification_concurrency?: never, max_bytes_per_sec?: never, max_failed_shard_snapshots?: never, meta_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, verify_blob_contents?: never } @@ -24031,31 +34444,78 @@ export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { export type SnapshotRepositoryVerifyIntegrityResponse = any export interface SnapshotRestoreRequest extends RequestBase { -/** The name of the repository to restore a snapshot from. */ + /** The name of the repository to restore a snapshot from. */ repository: Name /** The name of the snapshot to restore. */ snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. If `false`, the request returns a response when the restore operation initializes. */ + /** If `true`, the request returns a response when the restore operation completes. + * The operation is complete when it finishes all attempts to recover primary shards for restored indices. + * This applies even if one or more of the recovery attempts fail. + * + * If `false`, the request returns a response when the restore operation initializes. */ wait_for_completion?: boolean - /** The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). */ + /** The feature states to restore. + * If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. + * If `include_global_state` is `false`, the request restores no feature states by default. + * Note that specifying an empty array will result in the default behavior. + * To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). */ feature_states?: string[] - /** The index settings to not restore from the snapshot. You can't use this option to ignore `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. */ + /** The index settings to not restore from the snapshot. + * You can't use this option to ignore `index.number_of_shards`. + * + * For data streams, this option applies only to restored backing indices. + * New backing indices are configured using the data stream's matching index template. */ ignore_index_settings?: string[] - /** If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. */ + /** If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. + * If `false`, the request returns an error for any missing index or data stream. */ ignore_unavailable?: boolean - /** If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. */ + /** If `true`, the request restores aliases for any restored data streams and indices. + * If `false`, the request doesn’t restore aliases. */ include_aliases?: boolean - /** If `true`, restore the cluster state. The cluster state includes: * Persistent cluster settings * Index templates * Legacy index templates * Ingest pipelines * Index lifecycle management (ILM) policies * Stored scripts * For snapshots taken after 7.12.0, feature states If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. Use the `feature_states` parameter to configure how feature states are restored. If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. */ + /** If `true`, restore the cluster state. The cluster state includes: + * + * * Persistent cluster settings + * * Index templates + * * Legacy index templates + * * Ingest pipelines + * * Index lifecycle management (ILM) policies + * * Stored scripts + * * For snapshots taken after 7.12.0, feature states + * + * If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. + * It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + * + * Use the `feature_states` parameter to configure how feature states are restored. + * + * If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. */ include_global_state?: boolean - /** Index settings to add or change in restored indices, including backing indices. You can't use this option to change `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. */ + /** Index settings to add or change in restored indices, including backing indices. + * You can't use this option to change `index.number_of_shards`. + * + * For data streams, this option applies only to restored backing indices. + * New backing indices are configured using the data stream's matching index template. */ index_settings?: IndicesIndexSettings - /** A comma-separated list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. You can't use this parameter to restore system indices or system data streams. Use `feature_states` instead. */ + /** A comma-separated list of indices and data streams to restore. + * It supports a multi-target syntax. + * The default behavior is all regular indices and regular data streams in the snapshot. + * + * You can't use this parameter to restore system indices or system data streams. + * Use `feature_states` instead. */ indices?: Indices - /** If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. */ + /** If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + * + * If true, it allows restoring a partial snapshot of indices with unavailable shards. + * Only shards that were successfully included in the snapshot will be restored. + * All missing shards will be recreated as empty. */ partial?: boolean - /** A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. */ + /** A rename pattern to apply to restored data streams and indices. + * Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + * + * The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. */ rename_pattern?: string /** The rename replacement string that is used with the `rename_pattern`. */ rename_replacement?: string @@ -24077,13 +34537,19 @@ export interface SnapshotRestoreSnapshotRestore { } export interface SnapshotStatusRequest extends RequestBase { -/** The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified. */ + /** The snapshot repository name used to limit the request. + * It supports wildcards (`*`) if `` isn't specified. */ repository?: Name - /** A comma-separated list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. */ + /** A comma-separated list of snapshots to retrieve status for. + * The default is currently running snapshots. + * Wildcards (`*`) are not supported. */ snapshot?: Names - /** If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. */ + /** If `false`, the request returns an error for any snapshots that are unavailable. + * If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. */ ignore_unavailable?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } @@ -24096,15 +34562,22 @@ export interface SnapshotStatusResponse { } export interface SnapshotVerifyRepositoryCompactNodeInfo { + /** A human-readable name for the node. + * You can set this name using the `node.name` property in `elasticsearch.yml`. + * The default value is the machine's hostname. */ name: Name } export interface SnapshotVerifyRepositoryRequest extends RequestBase { -/** The name of the snapshot repository to verify. */ + /** The name of the snapshot repository to verify. */ name: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -24113,6 +34586,8 @@ export interface SnapshotVerifyRepositoryRequest extends RequestBase { } export interface SnapshotVerifyRepositoryResponse { + /** Information about the nodes connected to the snapshot repository. + * The key is the ID of the node. */ nodes: Record } @@ -24124,7 +34599,7 @@ export interface SqlColumn { export type SqlRow = any[] export interface SqlClearCursorRequest extends RequestBase { -/** Cursor to clear. */ + /** Cursor to clear. */ cursor: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { cursor?: never } @@ -24137,7 +34612,7 @@ export interface SqlClearCursorResponse { } export interface SqlDeleteAsyncRequest extends RequestBase { -/** The identifier for the search. */ + /** The identifier for the search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -24148,15 +34623,20 @@ export interface SqlDeleteAsyncRequest extends RequestBase { export type SqlDeleteAsyncResponse = AcknowledgedResponseBase export interface SqlGetAsyncRequest extends RequestBase { -/** The identifier for the search. */ + /** The identifier for the search. */ id: Id - /** The separator for CSV results. The API supports this parameter only for CSV responses. */ + /** The separator for CSV results. + * The API supports this parameter only for CSV responses. */ delimiter?: string - /** The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. */ + /** The format for the response. + * You must specify a format using this parameter or the `Accept` HTTP header. + * If you specify both, the API uses this parameter. */ format?: string - /** The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. */ + /** The retention period for the search and its results. + * It defaults to the `keep_alive` period for the original SQL search. */ keep_alive?: Duration - /** The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. */ + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } @@ -24165,16 +34645,32 @@ export interface SqlGetAsyncRequest extends RequestBase { } export interface SqlGetAsyncResponse { + /** Identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial: boolean + /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string + /** The values for the search results. */ rows: SqlRow[] } export interface SqlGetAsyncStatusRequest extends RequestBase { -/** The identifier for the search. */ + /** The identifier for the search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -24183,28 +34679,47 @@ export interface SqlGetAsyncStatusRequest extends RequestBase { } export interface SqlGetAsyncStatusResponse { + /** The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will delete the search and its results, even if the search is still running. */ expiration_time_in_millis: EpochTime + /** The identifier for the search. */ id: string + /** If `true`, the search is still running. + * If `false`, the search has finished. */ is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. */ is_partial: boolean + /** The timestamp, in milliseconds since the Unix epoch, when the search started. + * The API returns this property only for running searches. */ start_time_in_millis: EpochTime + /** The HTTP status code for the search. + * The API returns this property only for completed searches. */ completion_status?: uint } export interface SqlQueryRequest extends RequestBase { -/** The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ + /** The format for the response. + * You can also specify a format using the `Accept` HTTP header. + * If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ format?: SqlQuerySqlFormat - /** If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. */ + /** If `true`, the response has partial results when there are shard request timeouts or shard failures. + * If `false`, the API returns an error with no partial results. */ allow_partial_search_results?: boolean - /** The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. */ + /** The default catalog (cluster) for queries. + * If unspecified, the queries execute on the data in the local cluster only. */ catalog?: string - /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ + /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. + * The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ columnar?: boolean - /** The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. */ + /** The cursor used to retrieve a set of paginated results. + * If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. + * It ignores other request body parameters. */ cursor?: string /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer - /** If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ + /** If `false`, the API returns an exception when encountering multiple values for a field. + * If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ field_multi_value_leniency?: boolean /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer @@ -24212,9 +34727,12 @@ export interface SqlQueryRequest extends RequestBase { index_using_frozen?: boolean /** The retention period for an async or saved synchronous search. */ keep_alive?: Duration - /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ + /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. + * If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ keep_on_completion?: boolean - /** The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ + /** The minimum retention period for the scroll cursor. + * After this time period, a pagination request might fail because the scroll cursor is no longer available. + * Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ page_timeout?: Duration /** The values for parameters in the query. */ params?: Record @@ -24222,11 +34740,16 @@ export interface SqlQueryRequest extends RequestBase { query?: string /** The timeout before the request fails. */ request_timeout?: Duration - /** One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. */ + /** One or more runtime fields for the search request. + * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone - /** The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async. To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. + * If the search doesn't finish within this period, the search becomes async. + * + * To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } @@ -24235,18 +34758,34 @@ export interface SqlQueryRequest extends RequestBase { } export interface SqlQueryResponse { + /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string + /** The identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id?: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running?: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial?: boolean + /** The values for the search results. */ rows: SqlRow[] } export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' export interface SqlTranslateRequest extends RequestBase { -/** The maximum number of rows (or entries) to return in one response. */ + /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer @@ -24270,13 +34809,23 @@ export interface SqlTranslateResponse { } export interface SslCertificatesCertificateInformation { + /** If the path refers to a container file (a jks keystore, or a PKCS#12 file), it is the alias of the certificate. + * Otherwise, it is null. */ alias: string | null + /** The ISO formatted date of the certificate's expiry (not-after) date. */ expiry: DateTime + /** The format of the file. + * Valid values include `jks`, `PKCS12`, and `PEM`. */ format: string + /** Indicates whether Elasticsearch has access to the private key for this certificate. */ has_private_key: boolean + /** The Distinguished Name of the certificate's issuer. */ issuer?: string + /** The path to the certificate, as configured in the `elasticsearch.yml` file. */ path: string + /** The hexadecimal representation of the certificate's serial number. */ serial_number: string + /** The Distinguished Name of the certificate's subject. */ subject_dn: string } @@ -24290,24 +34839,32 @@ export interface SslCertificatesRequest extends RequestBase { export type SslCertificatesResponse = SslCertificatesCertificateInformation[] export interface SynonymsSynonymRule { + /** The identifier for the synonym rule. + * If you do not specify a synonym rule ID when you create a rule, an identifier is created automatically by Elasticsearch. */ id?: Id + /** The synonyms that conform the synonym rule in Solr format. */ synonyms: SynonymsSynonymString } export interface SynonymsSynonymRuleRead { + /** Synonym Rule identifier */ id: Id + /** Synonyms, in Solr format, that conform the synonym rule. See https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 */ synonyms: SynonymsSynonymString } export type SynonymsSynonymString = string export interface SynonymsSynonymsUpdateResult { + /** The update operation result. */ result: Result + /** Updating synonyms in a synonym set reloads the associated analyzers. + * This information is the analyzers reloading result. */ reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult } export interface SynonymsDeleteSynonymRequest extends RequestBase { -/** The synonyms set identifier to delete. */ + /** The synonyms set identifier to delete. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -24318,7 +34875,7 @@ export interface SynonymsDeleteSynonymRequest extends RequestBase { export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { -/** The ID of the synonym set to update. */ + /** The ID of the synonym set to update. */ set_id: Id /** The ID of the synonym rule to delete. */ rule_id: Id @@ -24331,7 +34888,7 @@ export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult export interface SynonymsGetSynonymRequest extends RequestBase { -/** The synonyms set identifier to retrieve. */ + /** The synonyms set identifier to retrieve. */ id: Id /** The starting offset for query rules to retrieve. */ from?: integer @@ -24344,12 +34901,14 @@ export interface SynonymsGetSynonymRequest extends RequestBase { } export interface SynonymsGetSynonymResponse { + /** The total number of synonyms rules that the synonyms set contains. */ count: integer + /** Synonym rule details. */ synonyms_set: SynonymsSynonymRuleRead[] } export interface SynonymsGetSynonymRuleRequest extends RequestBase { -/** The ID of the synonym set to retrieve the synonym rule from. */ + /** The ID of the synonym set to retrieve the synonym rule from. */ set_id: Id /** The ID of the synonym rule to retrieve. */ rule_id: Id @@ -24362,7 +34921,7 @@ export interface SynonymsGetSynonymRuleRequest extends RequestBase { export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead export interface SynonymsGetSynonymsSetsRequest extends RequestBase { -/** The starting offset for synonyms sets to retrieve. */ + /** The starting offset for synonyms sets to retrieve. */ from?: integer /** The maximum number of synonyms sets to retrieve. */ size?: integer @@ -24373,17 +34932,21 @@ export interface SynonymsGetSynonymsSetsRequest extends RequestBase { } export interface SynonymsGetSynonymsSetsResponse { + /** The total number of synonyms sets defined. */ count: integer + /** The identifier and total number of defined synonym rules for each synonyms set. */ results: SynonymsGetSynonymsSetsSynonymsSetItem[] } export interface SynonymsGetSynonymsSetsSynonymsSetItem { + /** Synonyms set identifier */ synonyms_set: Id + /** Number of synonym rules that the synonym set contains */ count: integer } export interface SynonymsPutSynonymRequest extends RequestBase { -/** The ID of the synonyms set to be created or updated. */ + /** The ID of the synonyms set to be created or updated. */ id: Id /** The synonym rules definitions for the synonyms set. */ synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] @@ -24399,7 +34962,7 @@ export interface SynonymsPutSynonymResponse { } export interface SynonymsPutSynonymRuleRequest extends RequestBase { -/** The ID of the synonym set. */ + /** The ID of the synonym set. */ set_id: Id /** The ID of the synonym rule to be updated or created. */ rule_id: Id @@ -24433,6 +34996,10 @@ export interface TasksTaskInfo { action: string cancelled?: boolean cancellable: boolean + /** Human readable text that identifies the particular request that the task is performing. + * For example, it might identify the search request being performed by a search task. + * Other kinds of tasks have different descriptions, like `_reindex` which has the source and the destination, or `_bulk` which just has the number of requests and the destination indices. + * Many requests will have only an empty description because more detailed information about the request is not easily available or particularly helpful in identifying the request. */ description?: string headers: Record id: long @@ -24440,6 +35007,10 @@ export interface TasksTaskInfo { running_time?: Duration running_time_in_nanos: DurationValue start_time_in_millis: EpochTime + /** The internal status of the task, which varies from task to task. + * The format also varies. + * While the goal is to keep the status for a particular task consistent from version to version, this is not always possible because sometimes the implementation changes. + * Fields might be removed from the status for a particular request so any parsing you do of the status might break in minor releases. */ status?: any type: string parent_task_id?: TaskId @@ -24450,12 +35021,15 @@ export type TasksTaskInfos = TasksTaskInfo[] | Record + /** Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if + * `group_by` was set to `parents`. */ tasks?: TasksTaskInfos } export interface TasksCancelRequest extends RequestBase { -/** The task identifier. */ + /** The task identifier. */ task_id?: TaskId /** A comma-separated list or wildcard expression of actions that is used to limit the request. */ actions?: string | string[] @@ -24474,9 +35048,10 @@ export interface TasksCancelRequest extends RequestBase { export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { -/** The task identifier. */ + /** The task identifier. */ task_id: Id - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean @@ -24494,17 +35069,24 @@ export interface TasksGetResponse { } export interface TasksListRequest extends RequestBase { -/** A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ + /** A comma-separated list or wildcard expression of actions used to limit the request. + * For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ actions?: string | string[] - /** If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. */ + /** If `true`, the response includes detailed information about the running tasks. + * This information is useful to distinguish tasks from each other but is more costly to run. */ detailed?: boolean - /** A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. */ + /** A key that is used to group tasks in the response. + * The task lists can be grouped either by nodes or by parent tasks. */ group_by?: TasksGroupBy /** A comma-separated list of node IDs or names that is used to limit the returned information. */ nodes?: NodeIds - /** A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. */ + /** A parent task identifier that is used to limit returned information. + * To return all tasks, omit this parameter or use a value of `-1`. + * If the parent task is not found, the API does not return a 404 response code. */ parent_task_id?: Id - /** The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. */ + /** The period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its information. + * However, timed out nodes are included in the `node_failures` property. */ timeout?: Duration /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean @@ -24538,33 +35120,98 @@ export interface TextStructureTopHit { } export interface TextStructureFindFieldStructureRequest extends RequestBase { -/** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ + /** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ column_names?: string - /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string - /** The number of documents to include in the structural analysis. The minimum value is 2. */ + /** The number of documents to include in the structural analysis. + * The minimum value is 2. */ documents_to_sample?: uint - /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. + * The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ ecs_compatibility?: TextStructureEcsCompatibilityType /** If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean /** The field that should be analyzed. */ field: Field - /** The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType - /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern /** The name of the index that contains the analyzed field. */ index: IndexName - /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string - /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. */ + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean - /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. */ + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration - /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field - /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } @@ -24591,27 +35238,90 @@ export interface TextStructureFindFieldStructureResponse { } export interface TextStructureFindMessageStructureRequest extends RequestBase { -/** If the format is `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ + /** If the format is `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string - /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string - /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: TextStructureEcsCompatibilityType /** If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean - /** The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType - /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern - /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string - /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. */ + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean - /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. */ + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration - /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field - /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string /** The list of messages you want to analyze. */ messages: string[] @@ -24640,35 +35350,113 @@ export interface TextStructureFindMessageStructureResponse { } export interface TextStructureFindStructureRequest { -/** The text's character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. */ + /** The text's character set. + * It must be a character set that is supported by the JVM that Elasticsearch uses. + * For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. + * If this parameter is not specified, the structure finder chooses an appropriate character set. */ charset?: string - /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ + /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string - /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string - /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: string - /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ + /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. + * If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ explain?: boolean - /** The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + /** The high level structure of the text. + * Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: string - /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern - /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ + /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. + * If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ has_header_row?: boolean - /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ + /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. + * If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ line_merge_size_limit?: uint - /** The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. NOTE: The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ + /** The number of lines to include in the structural analysis, starting from the beginning of the text. + * The minimum is 2. + * If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + * + * NOTE: The number of lines and the variation of the lines affects the speed of the analysis. + * For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. + * If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ lines_to_sample?: uint - /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string - /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. */ + /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean - /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. */ + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires then it will be stopped. */ timeout?: Duration - /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field - /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ + /** The Java time format of the timestamp field in the text. + * + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. + * Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string text_files?: TJsonDocument[] /** All values in `body` will be added to the request body. */ @@ -24678,25 +35466,44 @@ export interface TextStructureFindStructureRequest { } export interface TextStructureFindStructureResponse { + /** The character encoding used to parse the text. */ charset: string has_header_row?: boolean + /** For UTF character encodings, it indicates whether the text begins with a byte order marker. */ has_byte_order_marker: boolean + /** Valid values include `ndjson`, `xml`, `delimited`, and `semi_structured_text`. */ format: string + /** The most common values of each field, plus basic numeric statistics for the numeric `page_count` field. + * This information may provide clues that the data needs to be cleaned or transformed prior to use by other Elastic Stack functionality. */ field_stats: Record + /** The first two messages in the text verbatim. + * This may help diagnose parse errors or accidental uploads of the wrong text. */ sample_start: string + /** The number of distinct messages the lines contained. + * For NDJSON, this value is the same as `num_lines_analyzed`. + * For other text formats, messages can span several lines. */ num_messages_analyzed: integer + /** Some suitable mappings for an index into which the data could be ingested. */ mappings: MappingTypeMapping quote?: string delimiter?: string + /** If a timestamp format is detected that does not include a timezone, `need_client_timezone` is `true`. + * The server that parses the text must therefore be told the correct timezone by the client. */ need_client_timezone: boolean + /** The number of lines of the text that were analyzed. */ num_lines_analyzed: integer + /** If `format` is `delimited`, the `column_names` field lists the column names in the order they appear in the sample. */ column_names?: string[] explanation?: string[] grok_pattern?: GrokPattern multiline_start_pattern?: string exclude_lines_pattern?: string + /** The Java time formats recognized in the time fields. + * Elasticsearch mappings and ingest pipelines use this format. */ java_timestamp_formats?: string[] + /** Information that is used to tell Logstash how to parse timestamps. */ joda_timestamp_formats?: string[] + /** The field considered most likely to be the primary timestamp of each document. */ timestamp_field?: Field should_trim_fields?: boolean ingest_pipeline: IngestPipelineConfig @@ -24714,7 +35521,9 @@ export interface TextStructureTestGrokPatternMatchedText { } export interface TextStructureTestGrokPatternRequest extends RequestBase { -/** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. */ ecs_compatibility?: string /** The Grok pattern to run on the text. */ grok_pattern: GrokPattern @@ -24731,18 +35540,35 @@ export interface TextStructureTestGrokPatternResponse { } export interface TransformDestination { + /** The destination index for the transform. The mappings of the destination index are deduced based on the source + * fields when possible. If alternate mappings are required, use the create index API prior to starting the + * transform. */ index?: IndexName + /** The unique identifier for an ingest pipeline. */ pipeline?: string } export interface TransformLatest { + /** Specifies the date field that is used to identify the latest documents. */ sort: Field + /** Specifies an array of one or more fields that are used to group the data. */ unique_key: Field[] } export interface TransformPivot { + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. */ aggregations?: Record + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. + * @alias aggregations */ aggs?: Record + /** Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are + * currently supported: date histogram, geotile grid, histogram, terms. */ group_by?: Record } @@ -24754,44 +35580,77 @@ export interface TransformPivotGroupByContainer { } export interface TransformRetentionPolicy { + /** The date field that is used to calculate the age of the document. */ field: Field + /** Specifies the maximum age of a document in the destination index. Documents that are older than the configured + * value are removed from the destination index. */ max_age: Duration } export interface TransformRetentionPolicyContainer { + /** Specifies that the transform uses a time field to set the retention policy. */ time?: TransformRetentionPolicy } export interface TransformSettings { + /** Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align + * checkpoint ranges with the date histogram interval when date histogram is specified as a group source in the + * transform config. As a result, less document updates in the destination index will be performed thus improving + * overall performance. */ align_checkpoints?: boolean + /** Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was + * the default for transforms created before version 7.11. For compatible output set this value to `true`. */ dates_as_epoch_millis?: boolean + /** Specifies whether the transform should deduce the destination index mappings from the transform configuration. */ deduce_mappings?: boolean + /** Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a + * wait time between search requests. The default value is null, which disables throttling. */ docs_per_second?: float + /** Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker + * exceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the + * maximum is `65,536`. */ max_page_search_size?: integer + /** If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case + * of an error which means the transform never fails. Setting the number of retries other than infinite fails in + * validation. */ unattended?: boolean } export interface TransformSource { + /** The source indices for the transform. It can be a single index, an index pattern (for example, `"my-index-*""`), an + * array of indices (for example, `["my-index-000001", "my-index-000002"]`), or an array of index patterns (for + * example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax `"remote_name:index_name"`. If + * any indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role. */ index: Indices + /** A query clause that retrieves a subset of data from the source index. */ query?: QueryDslQueryContainer + /** Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data + * nodes, including remote nodes, must be 7.12 or later. */ runtime_mappings?: MappingRuntimeFields } export interface TransformSyncContainer { + /** Specifies that the transform uses a time field to synchronize the source and destination indices. */ time?: TransformTimeSync } export interface TransformTimeSync { + /** The time delay between the current time and the latest input data time. */ delay?: Duration + /** The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field + * that contains the ingest timestamp. If you use a different field, you might need to set the delay such that it + * accounts for data transmission delays. */ field: Field } export interface TransformDeleteTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id - /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. */ + /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is + * deleted regardless of its current state. */ force?: boolean - /** If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted */ + /** If this value is true, the destination index is deleted together with the transform. If false, the destination + * index will not be deleted */ delete_dest_index?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -24804,15 +35663,27 @@ export interface TransformDeleteTransformRequest extends RequestBase { export type TransformDeleteTransformResponse = AcknowledgedResponseBase export interface TransformGetTransformRequest extends RequestBase { -/** Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. */ + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ transform_id?: Names - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of transforms. */ from?: integer /** Specifies the maximum number of transforms to obtain. */ size?: integer - /** Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Excludes fields that were automatically added when creating the + * transform. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } @@ -24826,18 +35697,28 @@ export interface TransformGetTransformResponse { } export interface TransformGetTransformTransformSummary { + /** The security privileges that the transform uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the transform, this property is omitted. */ authorization?: MlTransformAuthorization + /** The time the transform was created. */ create_time?: EpochTime + create_time_string?: DateTime + /** Free text description of the transform. */ description?: string + /** The destination for the transform. */ dest: ReindexDestination frequency?: Duration id: Id latest?: TransformLatest + /** The pivot method transforms the data by aggregating and grouping it. */ pivot?: TransformPivot retention_policy?: TransformRetentionPolicyContainer + /** Defines optional transform settings. */ settings?: TransformSettings + /** The source of the data for the transform. */ source: TransformSource + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString _meta?: Metadata } @@ -24853,17 +35734,28 @@ export interface TransformGetTransformStatsCheckpointStats { export interface TransformGetTransformStatsCheckpointing { changes_last_detected_at?: long - changes_last_detected_at_date_time?: DateTime + changes_last_detected_at_string?: DateTime last: TransformGetTransformStatsCheckpointStats next?: TransformGetTransformStatsCheckpointStats operations_behind?: long last_search_time?: long + last_search_time_string?: DateTime } export interface TransformGetTransformStatsRequest extends RequestBase { -/** Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. */ + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ transform_id: Names - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of transforms. */ from?: long @@ -24882,6 +35774,20 @@ export interface TransformGetTransformStatsResponse { transforms: TransformGetTransformStatsTransformStats[] } +export interface TransformGetTransformStatsTransformHealthIssue { + /** The type of the issue */ + type: string + /** A description of the issue */ + issue: string + /** Details about the issue */ + details?: string + /** Number of times this issue has occurred since it started */ + count: integer + /** The timestamp this issue occurred for for the first time */ + first_occurrence?: EpochTime + first_occurence_string?: DateTime +} + export interface TransformGetTransformStatsTransformIndexerStats { delete_time_in_ms?: EpochTime documents_indexed: long @@ -24914,6 +35820,7 @@ export interface TransformGetTransformStatsTransformStats { checkpointing: TransformGetTransformStatsCheckpointing health?: TransformGetTransformStatsTransformStatsHealth id: Id + /** @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes reason?: string state: string @@ -24922,20 +35829,29 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformGetTransformStatsTransformStatsHealth { status: HealthStatus + /** If a non-healthy status is returned, contains a list of issues of the transform. */ + issues?: TransformGetTransformStatsTransformHealthIssue[] } export interface TransformPreviewTransformRequest extends RequestBase { -/** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. */ + /** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform + * configuration details in the request body. */ transform_id?: Id - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration /** The destination for the transform. */ dest?: TransformDestination /** Free text description of the transform. */ description?: string - /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. */ + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration - /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. */ + /** The pivot method transforms the data by aggregating and grouping it. + * These objects define the group by fields and the aggregation to reduce + * the data. */ pivot?: TransformPivot /** The source of the data for the transform. */ source?: TransformSource @@ -24943,9 +35859,11 @@ export interface TransformPreviewTransformRequest extends RequestBase { settings?: TransformSettings /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer - /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer - /** The latest method transforms the data by finding the latest document for each unique key. */ + /** The latest method transforms the data by finding the latest document for + * each unique key. */ latest?: TransformLatest /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } @@ -24959,9 +35877,14 @@ export interface TransformPreviewTransformResponse { } export interface TransformPutTransformRequest extends RequestBase { -/** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id - /** When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. */ + /** When the transform is created, a series of validations occur to ensure its success. For example, there is a + * check for the existence of the source indices and a check that the destination index is not part of the source + * index pattern. You can use this parameter to skip the checks, for example when the source index does not exist + * until after the transform is created. The validations are always run when you start the transform, however, with + * the exception of privilege checks. */ defer_validation?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -24969,15 +35892,19 @@ export interface TransformPutTransformRequest extends RequestBase { dest: TransformDestination /** Free text description of the transform. */ description?: string - /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. */ + /** The interval between checks for changes in the source indices when the transform is running continuously. Also + * determines the retry interval in the event of transient failures while the transform is searching or indexing. + * The minimum value is `1s` and the maximum is `1h`. */ frequency?: Duration /** The latest method transforms the data by finding the latest document for each unique key. */ latest?: TransformLatest /** Defines optional transform metadata. */ _meta?: Metadata - /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. */ + /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields + * and the aggregation to reduce the data. */ pivot?: TransformPivot - /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ + /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the + * destination index. */ retention_policy?: TransformRetentionPolicyContainer /** Defines optional transform settings. */ settings?: TransformSettings @@ -24994,9 +35921,11 @@ export interface TransformPutTransformRequest extends RequestBase { export type TransformPutTransformResponse = AcknowledgedResponseBase export interface TransformResetTransformRequest extends RequestBase { -/** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id - /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. */ + /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform + * must be stopped before it can be reset. */ force?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -25009,7 +35938,7 @@ export interface TransformResetTransformRequest extends RequestBase { export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformScheduleNowTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id /** Controls the time to wait for the scheduling to take place */ timeout?: Duration @@ -25022,7 +35951,7 @@ export interface TransformScheduleNowTransformRequest extends RequestBase { export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase export interface TransformStartTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -25037,17 +35966,29 @@ export interface TransformStartTransformRequest extends RequestBase { export type TransformStartTransformResponse = AcknowledgedResponseBase export interface TransformStopTransformRequest extends RequestBase { -/** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. */ + /** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. + * To stop all transforms, use `_all` or `*` as the identifier. */ transform_id: Name - /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; + * contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there + * are only partial matches. + * + * If it is true, the API returns a successful acknowledgement message when there are no matches. When there are + * only partial matches, the API stops the appropriate transforms. + * + * If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** If it is true, the API forcefully stops the transforms. */ force?: boolean - /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. */ + /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the + * timeout expires, the request returns a timeout exception. However, the request continues processing and + * eventually moves the transform to a STOPPED state. */ timeout?: Duration - /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. */ + /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, + * the transform stops as soon as possible. */ wait_for_checkpoint?: boolean - /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. */ + /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns + * immediately and the indexer is stopped asynchronously in the background. */ wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } @@ -25058,17 +35999,23 @@ export interface TransformStopTransformRequest extends RequestBase { export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id - /** When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. */ + /** When true, deferrable validations are not run. This behavior may be + * desired if the source index does not exist until after the transform is + * created. */ defer_validation?: boolean - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration /** The destination for the transform. */ dest?: TransformDestination /** Free text description of the transform. */ description?: string - /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. */ + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration /** Defines optional transform metadata. */ _meta?: Metadata @@ -25078,7 +36025,8 @@ export interface TransformUpdateTransformRequest extends RequestBase { settings?: TransformSettings /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer - /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer | null /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } @@ -25104,9 +36052,10 @@ export interface TransformUpdateTransformResponse { } export interface TransformUpgradeTransformsRequest extends RequestBase { -/** When true, the request checks for updates but does not run them. */ + /** When true, the request checks for updates but does not run them. */ dry_run?: boolean - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and + * returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { dry_run?: never, timeout?: never } @@ -25115,8 +36064,11 @@ export interface TransformUpgradeTransformsRequest extends RequestBase { } export interface TransformUpgradeTransformsResponse { + /** The number of transforms that need to be upgraded. */ needs_update: integer + /** The number of transforms that don’t require upgrading. */ no_action: integer + /** The number of transforms that have been upgraded. */ updated: integer } @@ -25297,7 +36249,9 @@ export interface WatcherExecutionState { export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' export interface WatcherExecutionThreadPool { + /** The largest size of the execution thread pool, which indicates the largest number of concurrent running watches. */ max_size: long + /** The number of watches that were triggered and are currently queued. */ queue_size: long } @@ -25425,6 +36379,7 @@ export interface WatcherPagerDutyEvent { client?: string client_url?: string contexts?: WatcherPagerDutyContext[] + /** @alias contexts */ context?: WatcherPagerDutyContext[] description: string event_type?: WatcherPagerDutyEventType @@ -25485,9 +36440,9 @@ export interface WatcherScheduleTriggerEvent { } export interface WatcherScriptCondition { - lang?: string + lang?: ScriptLanguage params?: Record - source?: string + source?: ScriptSource id?: string } @@ -25512,9 +36467,14 @@ export interface WatcherSearchInputRequestDefinition { export interface WatcherSearchTemplateRequestBody { explain?: boolean + /** ID of the search template to use. If no source is specified, + * this parameter is required. */ id?: Id params?: Record profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. Also supports Mustache variables. If no id is specified, this + * parameter is required. */ source?: string } @@ -25637,9 +36597,10 @@ export interface WatcherWebhookResult { } export interface WatcherAckWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ watch_id: Name - /** A comma-separated list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. */ + /** A comma-separated list of the action identifiers to acknowledge. + * If you omit this parameter, all of the actions of the watch are acknowledged. */ action_id?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { watch_id?: never, action_id?: never } @@ -25652,7 +36613,7 @@ export interface WatcherAckWatchResponse { } export interface WatcherActivateWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ watch_id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { watch_id?: never } @@ -25665,7 +36626,7 @@ export interface WatcherActivateWatchResponse { } export interface WatcherDeactivateWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ watch_id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { watch_id?: never } @@ -25678,7 +36639,7 @@ export interface WatcherDeactivateWatchResponse { } export interface WatcherDeleteWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -25693,7 +36654,7 @@ export interface WatcherDeleteWatchResponse { } export interface WatcherExecuteWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ id?: Id /** Defines whether the watch runs in debug mode. */ debug?: boolean @@ -25703,12 +36664,15 @@ export interface WatcherExecuteWatchRequest extends RequestBase { alternative_input?: Record /** When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. */ ignore_condition?: boolean - /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. */ + /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. + * In addition, the status of the watch is updated, possibly throttling subsequent runs. + * This can also be specified as an HTTP parameter. */ record_execution?: boolean simulated_actions?: WatcherSimulatedActions /** This structure is parsed as the data of the trigger event that will be used during the watch execution. */ trigger_data?: WatcherScheduleTriggerEvent - /** When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. */ + /** When present, this watch is used instead of the one specified in the request. + * This watch is not persisted to the index and `record_execution` cannot be set. */ watch?: WatcherWatch /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } @@ -25717,7 +36681,9 @@ export interface WatcherExecuteWatchRequest extends RequestBase { } export interface WatcherExecuteWatchResponse { + /** The watch record identifier as it would be stored in the `.watcher-history` index. */ _id: Id + /** The watch record document as it would be stored in the `.watcher-history` index. */ watch_record: WatcherExecuteWatchWatchRecord } @@ -25736,7 +36702,8 @@ export interface WatcherExecuteWatchWatchRecord { } export interface WatcherGetSettingsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -25749,7 +36716,7 @@ export interface WatcherGetSettingsResponse { } export interface WatcherGetWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -25768,9 +36735,10 @@ export interface WatcherGetWatchResponse { } export interface WatcherPutWatchRequest extends RequestBase { -/** The identifier for the watch. */ + /** The identifier for the watch. */ id: Id - /** The initial state of the watch. The default value is `true`, which means the watch is active by default. */ + /** The initial state of the watch. + * The default value is `true`, which means the watch is active by default. */ active?: boolean /** only update the watch if the last operation that has changed the watch has the specified primary term */ if_primary_term?: long @@ -25786,7 +36754,10 @@ export interface WatcherPutWatchRequest extends RequestBase { input?: WatcherInputContainer /** Metadata JSON that will be copied into the history entries. */ metadata?: Metadata - /** The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ + /** The minimum time between actions being run. + * The default is 5 seconds. + * This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. + * If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period?: Duration /** Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period_in_millis?: DurationValue @@ -25809,9 +36780,11 @@ export interface WatcherPutWatchResponse { } export interface WatcherQueryWatchesRequest extends RequestBase { -/** The offset from the first result to fetch. It must be non-negative. */ + /** The offset from the first result to fetch. + * It must be non-negative. */ from?: integer - /** The number of hits to return. It must be non-negative. */ + /** The number of hits to return. + * It must be non-negative. */ size?: integer /** A query that filters the watches to be returned. */ query?: QueryDslQueryContainer @@ -25826,12 +36799,14 @@ export interface WatcherQueryWatchesRequest extends RequestBase { } export interface WatcherQueryWatchesResponse { + /** The total number of watches found. */ count: integer + /** A list of watches based on the `from`, `size`, or `search_after` request body parameters. */ watches: WatcherQueryWatch[] } export interface WatcherStartRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -25842,7 +36817,7 @@ export interface WatcherStartRequest extends RequestBase { export type WatcherStartResponse = AcknowledgedResponseBase export interface WatcherStatsRequest extends RequestBase { -/** Defines which additional metrics are included in the response. */ + /** Defines which additional metrics are included in the response. */ metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] /** Defines whether stack traces are generated for each watch that is running. */ emit_stacktraces?: boolean @@ -25860,24 +36835,42 @@ export interface WatcherStatsResponse { } export interface WatcherStatsWatchRecordQueuedStats { + /** The time the watch was run. + * This is just before the input is being run. */ execution_time: DateTime } export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { + /** The current watch execution phase. */ execution_phase: WatcherExecutionPhase + /** The time the watch was triggered by the trigger engine. */ triggered_time: DateTime executed_actions?: string[] watch_id: Id + /** The watch record identifier. */ watch_record_id: Id } export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' export interface WatcherStatsWatcherNodeStats { + /** The current executing watches metric gives insight into the watches that are currently being executed by Watcher. + * Additional information is shared per watch that is currently executing. + * This information includes the `watch_id`, the time its execution started and its current execution phase. + * To include this metric, the `metric` option should be set to `current_watches` or `_all`. + * In addition you can also specify the `emit_stacktraces=true` parameter, which adds stack traces for each watch that is being run. + * These stack traces can give you more insight into an execution of a watch. */ current_watches?: WatcherStatsWatchRecordStats[] execution_thread_pool: WatcherExecutionThreadPool + /** Watcher moderates the execution of watches such that their execution won't put too much pressure on the node and its resources. + * If too many watches trigger concurrently and there isn't enough capacity to run them all, some of the watches are queued, waiting for the current running watches to finish.s + * The queued watches metric gives insight on these queued watches. + * + * To include this metric, the `metric` option should include `queued_watches` or `_all`. */ queued_watches?: WatcherStatsWatchRecordQueuedStats[] + /** The number of watches currently registered. */ watch_count: long + /** The current state of Watcher. */ watcher_state: WatcherStatsWatcherState node_id: Id } @@ -25885,7 +36878,9 @@ export interface WatcherStatsWatcherNodeStats { export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' export interface WatcherStopRequest extends RequestBase { -/** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -25896,9 +36891,11 @@ export interface WatcherStopRequest extends RequestBase { export type WatcherStopResponse = AcknowledgedResponseBase export interface WatcherUpdateSettingsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration 'index.auto_expand_replicas'?: string 'index.number_of_replicas'?: integer @@ -25931,8 +36928,10 @@ export interface XpackInfoFeatures { data_streams: XpackInfoFeature data_tiers: XpackInfoFeature enrich: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ enterprise_search: XpackInfoFeature eql: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ esql: XpackInfoFeature graph: XpackInfoFeature ilm: XpackInfoFeature @@ -25948,9 +36947,11 @@ export interface XpackInfoFeatures { spatial: XpackInfoFeature sql: XpackInfoFeature transform: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ universal_profiling: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ archive: XpackInfoFeature } @@ -25968,11 +36969,13 @@ export interface XpackInfoNativeCodeInformation { } export interface XpackInfoRequest extends RequestBase { -/** A comma-separated list of the information categories to include in the response. For example, `build,license,features`. */ + /** A comma-separated list of the information categories to include in the response. + * For example, `build,license,features`. */ categories?: XpackInfoXPackCategory[] /** If this param is used it must be set to true */ accept_enterprise?: boolean - /** Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. */ + /** Defines whether additional human-readable information is included in the response. + * In particular, it adds descriptions and a tag line. */ human?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } @@ -26144,6 +37147,7 @@ export interface XpackUsageJobUsage { export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record + /** Job usage statistics. The `_all` entry is always present and gathers statistics for all jobs. */ jobs: Record node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs @@ -26270,7 +37274,9 @@ export interface XpackUsageRealmCache { } export interface XpackUsageRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -26444,15 +37450,31 @@ export interface SpecUtilsAdditionalProperty { } export interface SpecUtilsCommonQueryParameters { + /** When set to `true` Elasticsearch will include the full stack trace of errors + * when they occur. */ error_trace?: boolean + /** Comma-separated list of filters in dot notation which reduce the response + * returned by Elasticsearch. */ filter_path?: string | string[] + /** When set to `true` will return statistics in a format suitable for humans. + * For example `"exists_time": "1h"` for humans and + * `"eixsts_time_in_millis": 3600000` for computers. When disabled the human + * readable values will be omitted. This makes sense for responses being consumed + * only by machines. */ human?: boolean + /** If set to `true` the returned JSON will be "pretty-formatted". Only use + * this option for debugging only. */ pretty?: boolean } export interface SpecUtilsCommonCatQueryParameters { + /** Specifies the format to return the columnar data in, can be set to + * `text`, `json`, `cbor`, `yaml`, or `smile`. */ format?: string + /** When set to `true` will output available columns. This option + * can't be combined with any other query string option. */ help?: boolean + /** When set to `true` will enable verbose output. */ v?: boolean } From dea4db1736b50a3f5ea59f4ae92fa2b82b3ec617 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 4 Apr 2025 13:18:12 -0500 Subject: [PATCH 509/647] Bump to 9.0.0-alpha.5 (#2701) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0cd01eac9..95768e8ee 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.4", + "version": "9.0.0-alpha.5", "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", From 11a1297792ea6583d83f0f9b11acfbca27928993 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 11:13:25 -0500 Subject: [PATCH 510/647] Update dependency @elastic/request-converter to v9.0.1 (#2704) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 95768e8ee..b251370b8 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "9.0.0", + "@elastic/request-converter": "9.0.1", "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", From fd0c9992b35b7e45ac21e9be9bd20720a8035a76 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 11:14:06 -0500 Subject: [PATCH 511/647] Update dependency typescript to v5.8.3 (#2705) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b251370b8..1fa5906d4 100644 --- a/package.json +++ b/package.json @@ -84,7 +84,7 @@ "tap": "21.1.0", "ts-node": "10.9.2", "ts-standard": "12.0.2", - "typescript": "5.8.2", + "typescript": "5.8.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", "zx": "7.2.3" From 8174ba520700f78447b377bff3239d62794d7298 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 7 Apr 2025 13:53:24 -0500 Subject: [PATCH 512/647] Changelog for 9.0.0 (#2712) * Changelog for 9.0.0 * Update title for release notes page * Grammar tweak * Adjustment to formatting --- docs/release-notes/breaking-changes.md | 47 +++++++++++++++++++++----- docs/release-notes/index.md | 20 ++++++++--- 2 files changed, 55 insertions(+), 12 deletions(-) diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md index a05c07b7c..326fafdaf 100644 --- a/docs/release-notes/breaking-changes.md +++ b/docs/release-notes/breaking-changes.md @@ -3,22 +3,53 @@ navigation_title: "Breaking changes" --- # Elasticsearch JavaScript Client breaking changes [elasticsearch-javascript-client-breaking-changes] + Breaking changes can impact your Elastic applications, potentially disrupting normal operations. Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. To learn how to upgrade, check [Upgrade](docs-content://deploy-manage/upgrade.md). % ## Next version [elasticsearch-javascript-client-versionnext-breaking-changes] -% ::::{dropdown} Title of breaking change +% ::::{dropdown} Title of breaking change % Description of the breaking change. % For more information, check [PR #](PR link). % **Impact**
    Impact of the breaking change. % **Action**
    Steps for mitigating deprecation impact. % :::: -% ## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes] +## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes] -% ::::{dropdown} Title of breaking change -% Description of the breaking change. -% For more information, check [PR #](PR link). -% **Impact**
    Impact of the breaking change. -% **Action**
    Steps for mitigating deprecation impact. -% :::: \ No newline at end of file +::::{dropdown} Changes to the optional body property + +In 8.x, every API function had a `body` property that would provide a place to put arbitrary values that should go in the HTTP request body, even if they were not noted in the specification or documentation. In 9.0, each API function still includes an optional `body` property, but TypeScript's type checker will disallow properties that should go in the root of the object. A `querystring` parameter has also been added that behaves the same as `body`, but inserts its values into the request querystring. + +**Impact**
    Some adjustments to API calls may be necessary for code that used a `body` property 8.x, especially to appease the TypeScript type checker, but it should not have any impact on any code that was not using a `body` property. + +**Action**
    Check existing code for use of the `body` property, and move any properties that should be in the root object according to the API function's request type definition. If using TypeScript, the TypeScript type checker will surface most of these issues for you. +:::: + +::::{dropdown} Changes to API parameter collation into an HTTP request + +The logic for where each parameter in an API function call should be added to its HTTP request has been updated: + +1. If recognized as a `body` parameter according to the Elasticsearch specification, put it in the JSON body +2. If recognized as a `path` parameter, put it in the URL path +3. If recognized as a `query` parameter or a "common" query parameter (e.g. `pretty`, `error_trace`), put it in the querystring +4. If not recognized, and this API accepts a JSON body, put it in the JSON body +5. If not recognized and this API does not accept a JSON body, put it in the querystring + +The first two steps are identical in 8.x. The final three steps replace the logic from 8.x that put all unrecognized parameters in the querystring. + +**Impact**
    Some parameters that were sent via querystring to Elasticsearch may be sent in the JSON request body, and vice versa. + +**Action**
    If Elasticsearch sends back an error response due to a request not being valid, verify with the client's TypeScript type definitions, or via the docs, that the parameters your code passes are correct. +:::: + +::::{dropdown} Removal of the default 30-second timeout on all API calls + +The default 30-second timeout on all HTTP requests sent to Elasticsearch has been dropped in favor of having no timeout set at all. The previous behavior still works as it did when setting the `requestTimeout` value. + +See pull request [#2573](https://github.com/elastic/elasticsearch-js/pull/2573) for more information. + +**Impact**
    Requests to Elasticsearch that used to time out after 30 seconds will now wait for as long as it takes for Elasticsearch to respond. + +**Action**
    In environments where it is not ideal to wait for an API response indefinitely, manually setting the `requestTimeout` option when instantiating the client still works as it did in 8.x. +:::: diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 098bb38fb..1aa261e09 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -1,5 +1,5 @@ --- -navigation_title: "Elasticsearch JavaScript Client" +navigation_title: "JavaScript client release notes" mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog-client.html --- @@ -15,13 +15,25 @@ To check for security updates, go to [Security announcements for the Elastic sta % ## version.next [elasticsearch-javascript-client-next-release-notes] % ### Features and enhancements [elasticsearch-javascript-client-next-features-enhancements] -% * +% \* % ### Fixes [elasticsearch-javascript-client-next-fixes] -% * +% \* ## 9.0.0 [elasticsearch-javascript-client-900-release-notes] ### Features and enhancements [elasticsearch-javascript-client-900-features-enhancements] -### Fixes [elasticsearch-javascript-client-900-fixes] \ No newline at end of file +- **Compatibility with Elasticsearch 9.0:** All changes and additions to Elasticsearch APIs for its 9.0 release are reflected in this release. +- **Serverless client merged in:** the `@elastic/elasticsearch-serverless` client is being deprecated, and its functionality has been merged back into this client. This should have zero impact on the way the client works by default, except that a new `serverMode` option has been added. When it's explicitly set to `"serverless"` by a user, a few default settings and behaviors are changed: + + - turns off sniffing and ignores any sniffing-related options + - ignores all nodes passed in config except the first one, and ignores any node filtering and selecting options + - enables compression and `TLSv1_2_method` (same as when configured for Elastic Cloud) + - adds an `elastic-api-version` HTTP header to all requests + - uses `CloudConnectionPool` by default instead of `WeightedConnectionPool` + - turns off vendored `content-type` and `accept` headers in favor or standard MIME types + + Docstrings for types that differ between stack and serverless have also been updated to indicate when that is the case. + +### Fixes [elasticsearch-javascript-client-900-fixes] From 42b578196787762a68ff2c117b1c8aea1a6d124e Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 7 Apr 2025 21:41:26 +0200 Subject: [PATCH 513/647] Auto-generated API code (#2714) --- .../120fcf9f55128d6a81d5e87a9c235bbd.asciidoc | 19 +-- .../13ecdf99114098c76b050397d9c3d4e6.asciidoc | 3 +- .../141ef0ebaa3b0772892b79b9bb85efb0.asciidoc | 5 +- .../45954b8aaedfed57012be8b6538b0a24.asciidoc | 59 +++++---- .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 3 +- .../7429b16221fe741fd31b0584786dd0b0.asciidoc | 3 +- .../82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc | 45 ++++--- .../b45a8c6fc746e9c90fd181e69a605fad.asciidoc | 3 +- .../f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc | 3 +- docs/reference/api-reference.md | 37 ------ src/api/api/inference.ts | 125 ------------------ src/api/types.ts | 62 +++------ 12 files changed, 88 insertions(+), 279 deletions(-) diff --git a/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc index fbfd1cfc5..7c9de2841 100644 --- a/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc +++ b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -3,16 +3,17 @@ [source, js] ---- -const response = await client.inference.streamInference({ - task_type: "chat_completion", +const response = await client.inference.chatCompletionUnified({ inference_id: "openai-completion", - model: "gpt-4o", - messages: [ - { - role: "user", - content: "What is Elastic?", - }, - ], + chat_completion_request: { + model: "gpt-4o", + messages: [ + { + role: "user", + content: "What is Elastic?", + }, + ], + }, }); console.log(response); ---- diff --git a/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc index 01baab9cf..cb18160a8 100644 --- a/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc +++ b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -3,8 +3,7 @@ [source, js] ---- -const response = await client.inference.inference({ - task_type: "sparse_embedding", +const response = await client.inference.sparseEmbedding({ inference_id: "my-elser-model", input: "The sky above the port was the color of television tuned to a dead channel.", diff --git a/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc index 7d7aeab98..5387dbba3 100644 --- a/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc +++ b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc @@ -3,9 +3,8 @@ [source, js] ---- -const response = await client.inference.put({ - task_type: "my-inference-endpoint", - inference_id: "_update", +const response = await client.inference.update({ + inference_id: "my-inference-endpoint", inference_config: { service_settings: { api_key: "", diff --git a/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc index 4d6846969..12fb33095 100644 --- a/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc +++ b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -3,42 +3,43 @@ [source, js] ---- -const response = await client.inference.streamInference({ - task_type: "chat_completion", +const response = await client.inference.chatCompletionUnified({ inference_id: "openai-completion", - messages: [ - { - role: "user", - content: [ - { - type: "text", - text: "What's the price of a scarf?", + chat_completion_request: { + messages: [ + { + role: "user", + content: [ + { + type: "text", + text: "What's the price of a scarf?", + }, + ], + }, + ], + tools: [ + { + type: "function", + function: { + name: "get_current_price", + description: "Get the current price of a item", + parameters: { + type: "object", + properties: { + item: { + id: "123", + }, + }, + }, }, - ], - }, - ], - tools: [ - { + }, + ], + tool_choice: { type: "function", function: { name: "get_current_price", - description: "Get the current price of a item", - parameters: { - type: "object", - properties: { - item: { - id: "123", - }, - }, - }, }, }, - ], - tool_choice: { - type: "function", - function: { - name: "get_current_price", - }, }, }); console.log(response); diff --git a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index 9ae0176bc..1e6cd8582 100644 --- a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -3,8 +3,7 @@ [source, js] ---- -const response = await client.inference.streamInference({ - task_type: "completion", +const response = await client.inference.streamCompletion({ inference_id: "openai-completion", input: "What is Elastic?", }); diff --git a/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc index 8f897c69c..e41a7bf42 100644 --- a/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc +++ b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -3,8 +3,7 @@ [source, js] ---- -const response = await client.inference.inference({ - task_type: "text_embedding", +const response = await client.inference.textEmbedding({ inference_id: "my-cohere-endpoint", input: "The sky above the port was the color of television tuned to a dead channel.", diff --git a/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc index 6958737be..c693e38f1 100644 --- a/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc +++ b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -3,30 +3,31 @@ [source, js] ---- -const response = await client.inference.streamInference({ - task_type: "chat_completion", +const response = await client.inference.chatCompletionUnified({ inference_id: "openai-completion", - messages: [ - { - role: "assistant", - content: "Let's find out what the weather is", - tool_calls: [ - { - id: "call_KcAjWtAww20AihPHphUh46Gd", - type: "function", - function: { - name: "get_current_weather", - arguments: '{"location":"Boston, MA"}', + chat_completion_request: { + messages: [ + { + role: "assistant", + content: "Let's find out what the weather is", + tool_calls: [ + { + id: "call_KcAjWtAww20AihPHphUh46Gd", + type: "function", + function: { + name: "get_current_weather", + arguments: '{"location":"Boston, MA"}', + }, }, - }, - ], - }, - { - role: "tool", - content: "The weather is cold", - tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd", - }, - ], + ], + }, + { + role: "tool", + content: "The weather is cold", + tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd", + }, + ], + }, }); console.log(response); ---- diff --git a/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc index 876b182d2..0a14b2f32 100644 --- a/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc +++ b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -3,8 +3,7 @@ [source, js] ---- -const response = await client.inference.inference({ - task_type: "completion", +const response = await client.inference.completion({ inference_id: "openai_chat_completions", input: "What is Elastic?", }); diff --git a/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc index 023d009ab..31cf0b9d3 100644 --- a/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc +++ b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -3,8 +3,7 @@ [source, js] ---- -const response = await client.inference.inference({ - task_type: "rerank", +const response = await client.inference.rerank({ inference_id: "cohere_rerank", input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"], query: "star wars main character", diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index cd6d88027..6b2005aab 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -7553,23 +7553,6 @@ client.inference.get({ ... }) - **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type - **`inference_id` (Optional, string)**: The inference Id -## client.inference.postEisChatCompletion [_inference.post_eis_chat_completion] -Perform a chat completion task through the Elastic Inference Service (EIS). - -Perform a chat completion inference task with the `elastic` service. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion) - -```ts -client.inference.postEisChatCompletion({ eis_inference_id }) -``` - -### Arguments [_arguments_inference.post_eis_chat_completion] - -#### Request (object) [_request_inference.post_eis_chat_completion] -- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint. -- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })** - ## client.inference.put [_inference.put] Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. @@ -7776,26 +7759,6 @@ These settings are specific to the `cohere` service. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. These settings are specific to the task type you specified. -## client.inference.putEis [_inference.put_eis] -Create an Elastic Inference Service (EIS) inference endpoint. - -Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS). - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis) - -```ts -client.inference.putEis({ task_type, eis_inference_id, service, service_settings }) -``` - -### Arguments [_arguments_inference.put_eis] - -#### Request (object) [_request_inference.put_eis] -- **`task_type` (Enum("chat_completion"))**: The type of the inference task that the model will perform. -NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. -- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint. -- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `elastic`. -- **`service_settings` ({ model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `elastic` service. - ## client.inference.putElasticsearch [_inference.put_elasticsearch] Create an Elasticsearch inference endpoint. diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 40cb657cf..5c64c2b2a 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -77,15 +77,6 @@ export default class Inference { body: [], query: [] }, - 'inference.post_eis_chat_completion': { - path: [ - 'eis_inference_id' - ], - body: [ - 'chat_completion_request' - ], - query: [] - }, 'inference.put': { path: [ 'task_type', @@ -174,17 +165,6 @@ export default class Inference { ], query: [] }, - 'inference.put_eis': { - path: [ - 'task_type', - 'eis_inference_id' - ], - body: [ - 'service', - 'service_settings' - ], - query: [] - }, 'inference.put_elasticsearch': { path: [ 'task_type', @@ -583,53 +563,6 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } - /** - * Perform a chat completion task through the Elastic Inference Service (EIS). Perform a chat completion inference task with the `elastic` service. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion | Elasticsearch API documentation} - */ - async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise - async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['inference.post_eis_chat_completion'] - - const userQuery = params?.querystring - const querystring: Record = userQuery != null ? { ...userQuery } : {} - - let body: any = params.body ?? undefined - for (const key in params) { - if (acceptedBody.includes(key)) { - // @ts-expect-error - body = params[key] - } else if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } - } - } - - const method = 'POST' - const path = `/_inference/chat_completion/${encodeURIComponent(params.eis_inference_id.toString())}/_stream` - const meta: TransportRequestMetadata = { - name: 'inference.post_eis_chat_completion', - pathParts: { - eis_inference_id: params.eis_inference_id - } - } - return await this.transport.request({ path, method, querystring, body, meta }, options) - } - /** * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} @@ -1033,64 +966,6 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } - /** - * Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS). - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis | Elasticsearch API documentation} - */ - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise - async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise { - const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery - } = this.acceptedParams['inference.put_eis'] - - const userQuery = params?.querystring - const querystring: Record = userQuery != null ? { ...userQuery } : {} - - let body: Record | string | undefined - const userBody = params?.body - if (userBody != null) { - if (typeof userBody === 'string') { - body = userBody - } else { - body = { ...userBody } - } - } - - for (const key in params) { - if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } else if (acceptedPath.includes(key)) { - continue - } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } - } - } - - const method = 'PUT' - const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}` - const meta: TransportRequestMetadata = { - name: 'inference.put_eis', - pathParts: { - task_type: params.task_type, - eis_inference_id: params.eis_inference_id - } - } - return await this.transport.request({ path, method, querystring, body, meta }, options) - } - /** * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation} diff --git a/src/api/types.ts b/src/api/types.ts index 232b6bd1d..c8411e20a 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -15512,8 +15512,10 @@ export interface ClusterHealthHealthResponseBody { active_primary_shards: integer /** The total number of active primary and replica shards. */ active_shards: integer + /** The ratio of active shards in the cluster expressed as a string formatted percentage. */ + active_shards_percent?: string /** The ratio of active shards in the cluster expressed as a percentage. */ - active_shards_percent_as_number: Percentage + active_shards_percent_as_number: double /** The name of the cluster. */ cluster_name: Name /** The number of shards whose allocation has been delayed by the timeout settings. */ @@ -15575,7 +15577,7 @@ export interface ClusterHealthRequest extends RequestBase { /** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */ wait_for_events?: WaitForEvents /** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and /** Contains the inference type and its options. */ inference_config?: IngestInferenceConfig + /** Input fields for inference and output (destination) fields for the inference results. + * This option is incompatible with the target_field and field_map options. */ + input_output?: IngestInputConfig | IngestInputConfig[] + /** If true and any of the input fields defined in input_ouput are missing + * then those missing fields are quietly ignored, otherwise a missing field causes a failure. + * Only applies when using input_output configurations to explicitly list the input fields. */ + ignore_missing?: boolean } export interface IngestIngest { @@ -23069,6 +23038,11 @@ export interface IngestIngest { pipeline?: Name } +export interface IngestInputConfig { + input_field: string + output_field: string +} + export interface IngestIpLocationProcessor extends IngestProcessorBase { /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string From 5d8f357805ef64ee9e056bad56335f9bafc646e5 Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Tue, 8 Apr 2025 11:50:39 -0400 Subject: [PATCH 514/647] Update release notes and related files (#2717) --- docs/release-notes/deprecations.md | 13 ++++--------- docs/release-notes/index.md | 4 ++-- docs/release-notes/known-issues.md | 6 +++++- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md index df309211a..a137fb0cf 100644 --- a/docs/release-notes/deprecations.md +++ b/docs/release-notes/deprecations.md @@ -5,18 +5,13 @@ navigation_title: "Deprecations" # Elasticsearch JavaScript Client deprecations [elasticsearch-javascript-client-deprecations] Over time, certain Elastic functionality becomes outdated and is replaced or removed. To help with the transition, Elastic deprecates functionality for a period before removal, giving you time to update your applications. -Review the deprecated functionality for Elasticsearch JavaScript Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md). +Review the deprecated functionality for the Elasticsearch JavaScript Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md). -% ## Next version +## 9.0.0 [elasticsearch-javascript-client-900-deprecations] -% ::::{dropdown} Deprecation title -% Description of the deprecation. -% For more information, check [PR #](PR link). -% **Impact**
    Impact of deprecation. -% **Action**
    Steps for mitigating deprecation impact. -% :::: +_No deprecations_ -% ## 9.0.0 [elasticsearch-javascript-client-900-deprecations] +% ## Next version % ::::{dropdown} Deprecation title % Description of the deprecation. diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 1aa261e09..4bd944d96 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -1,5 +1,5 @@ --- -navigation_title: "JavaScript client release notes" +navigation_title: "Elasticsearch JavaScript Client" mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog-client.html --- @@ -36,4 +36,4 @@ To check for security updates, go to [Security announcements for the Elastic sta Docstrings for types that differ between stack and serverless have also been updated to indicate when that is the case. -### Fixes [elasticsearch-javascript-client-900-fixes] +% ### Fixes [elasticsearch-javascript-client-900-fixes] diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md index 86856b104..e35bd7826 100644 --- a/docs/release-notes/known-issues.md +++ b/docs/release-notes/known-issues.md @@ -5,6 +5,10 @@ navigation_title: "Known issues" # Elasticsearch JavaScript Client known issues [elasticsearch-javascript-client-known-issues] +## 9.0.0 + +_No known issues_ + % Use the following template to add entries to this page. % :::{dropdown} Title of known issue @@ -17,4 +21,4 @@ navigation_title: "Known issues" % **Resolved** % On [Month/Day/Year], this issue was resolved. -::: \ No newline at end of file +% ::: \ No newline at end of file From d29e079a1e9e5c44756d90e59f352694f88fa343 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 8 Apr 2025 11:38:14 -0500 Subject: [PATCH 515/647] Make example generation quiet by default (#2722) --- scripts/generate-docs-examples.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/generate-docs-examples.js b/scripts/generate-docs-examples.js index 27d873ca2..3a6813f7a 100644 --- a/scripts/generate-docs-examples.js +++ b/scripts/generate-docs-examples.js @@ -77,6 +77,7 @@ ${source.trim()} } const options = minimist(process.argv.slice(2), { + boolean: ['debug'], string: ['version'], default: { version: 'master' @@ -88,7 +89,7 @@ generate(options.version) .catch(err => log.fail(err.message)) .finally(() => { const keys = Object.keys(failures) - if (keys.length > 0) { + if (keys.length > 0 && options.debug) { let message = 'Some examples failed to generate:\n\n' for (const key of keys) { message += `${key}: ${failures[key]}\n` From 868dd02ffd3a59e1e0d626c322a502bf2f144f0f Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 9 Apr 2025 19:14:16 +0200 Subject: [PATCH 516/647] Auto-generated API code (#2721) --- docs/reference/api-reference.md | 2 - src/api/api/indices.ts | 156 ++++++++++++++++++++++++++++++++ src/api/api/snapshot.ts | 3 +- src/api/types.ts | 7 +- 4 files changed, 159 insertions(+), 9 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 6b2005aab..6bc26a85a 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -13530,8 +13530,6 @@ Multi-target syntax is supported. - **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. ## client.snapshot.create [_snapshot.create] Create a snapshot. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index b037cfa72..918c6cdec 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -209,6 +209,13 @@ export default class Indices { 'expand_wildcards' ] }, + 'indices.delete_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.delete_index_template': { path: [ 'name' @@ -408,6 +415,13 @@ export default class Indices { 'verbose' ] }, + 'indices.get_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.get_field_mapping': { path: [ 'fields', @@ -559,6 +573,13 @@ export default class Indices { 'timeout' ] }, + 'indices.put_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.put_index_template': { path: [ 'name' @@ -1564,6 +1585,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Deletes the data stream options of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + */ + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options` + const meta: TransportRequestMetadata = { + name: 'indices.delete_data_stream_options', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template | Elasticsearch API documentation} @@ -2377,6 +2443,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Returns the data stream options of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + */ + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream_options', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} @@ -3049,6 +3160,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Updates the data stream options of the selected data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + */ + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.put_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_stream_options', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template | Elasticsearch API documentation} diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index c472ac9bf..06a544055 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -55,8 +55,7 @@ export default class Snapshot { 'indices' ], query: [ - 'master_timeout', - 'timeout' + 'master_timeout' ] }, 'snapshot.create': { diff --git a/src/api/types.ts b/src/api/types.ts index c8411e20a..7e6a1acb7 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -33941,16 +33941,13 @@ export interface SnapshotCloneRequest extends RequestBase { * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period of time to wait for a response. - * If no response is received before the timeout expires, the request fails and returns an error. */ - timeout?: Duration /** A comma-separated list of indices to include in the snapshot. * Multi-target syntax is supported. */ indices: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, timeout?: never, indices?: never } + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, timeout?: never, indices?: never } + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } } export type SnapshotCloneResponse = AcknowledgedResponseBase From 3fc214d2a26ed693bcdb67beb4655fda2bb1059b Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Thu, 10 Apr 2025 10:57:35 -0400 Subject: [PATCH 517/647] Restore troubleshooting content (#2727) --- docs/reference/timeout-best-practices.md | 13 +++++++++++++ docs/reference/toc.yml | 3 ++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 docs/reference/timeout-best-practices.md diff --git a/docs/reference/timeout-best-practices.md b/docs/reference/timeout-best-practices.md new file mode 100644 index 000000000..8bb66f961 --- /dev/null +++ b/docs/reference/timeout-best-practices.md @@ -0,0 +1,13 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/timeout-best-practices.html +--- + +# Timeout best practices [timeout-best-practices] + +Starting in 9.0.0, this client is configured to not time out any HTTP request by default. {{es}} will always eventually respond to any request, even if it takes several minutes. Reissuing a request that it has not responded to yet can cause performance side effects. See the [official {{es}} recommendations for HTTP clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more information. + +Prior to 9.0, this client was configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {{es}}, raising a `TimeoutError` when that time period elapsed without receiving a response. + +If you need to set timeouts on Elasticsearch requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0. + diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml index 3896c1fde..9fbda6f58 100644 --- a/docs/reference/toc.yml +++ b/docs/reference/toc.yml @@ -31,4 +31,5 @@ toc: - file: update_examples.md - file: update_by_query_examples.md - file: reindex_examples.md - - file: client-helpers.md \ No newline at end of file + - file: client-helpers.md + - file: timeout-best-practices.md \ No newline at end of file From 8b4fcc8ce1e7c54573c7a7a8e3b981d9667d7281 Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Fri, 11 Apr 2025 10:30:35 -0400 Subject: [PATCH 518/647] Merge branch 'main' of https://github.com/marciw/elasticsearch-js (#2729) --- docs/reference/configuration.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md index d6519a589..744743ef6 100644 --- a/docs/reference/configuration.md +++ b/docs/reference/configuration.md @@ -9,6 +9,6 @@ The client is designed to be easily configured for your needs. In the following - [Basic configuration](/reference/basic-config.md) - [Advanced configuration](/reference/advanced-config.md) -- [Timeout best practices](docs-content://troubleshoot/elasticsearch/elasticsearch-client-javascript-api/nodejs.md) +- [Timeout best practices](/reference/timeout-best-practices.md) - [Creating a child client](/reference/child.md) -- [Testing](/reference/client-testing.md) +- [Testing](/reference/client-testing.md) \ No newline at end of file From b9a2df54070ef835fe97f017c927797a7d4bc115 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 11:15:59 -0500 Subject: [PATCH 519/647] Update dependency @types/node to v22.14.1 (#2732) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 1fa5906d4..c3efea548 100644 --- a/package.json +++ b/package.json @@ -61,7 +61,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "0.7.34", - "@types/node": "22.13.14", + "@types/node": "22.14.1", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 1ab089022ef1d65fa1b35f436d1b73f01f159ee2 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 12:14:26 -0500 Subject: [PATCH 520/647] Update dependency @types/ms to v2 (#2733) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index c3efea548..78a46297b 100644 --- a/package.json +++ b/package.json @@ -60,7 +60,7 @@ "@elastic/request-converter": "9.0.1", "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", - "@types/ms": "0.7.34", + "@types/ms": "2.1.0", "@types/node": "22.14.1", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", From 931a80cacbf8a4dfb2fa3b004ede7d48f5826818 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 14 Apr 2025 12:44:39 -0500 Subject: [PATCH 521/647] Update ESQL helper types (#2738) --- src/helpers.ts | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/src/helpers.ts b/src/helpers.ts index 46d82e28f..39e5a3e70 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -132,15 +132,6 @@ export interface EsqlColumn { type: string } -export type EsqlValue = any[] - -export type EsqlRow = EsqlValue[] - -export interface EsqlResponse { - columns: EsqlColumn[] - values: EsqlRow[] -} - export interface EsqlHelper { toRecords: () => Promise> toArrowTable: () => Promise> @@ -963,7 +954,7 @@ export default class Helpers { esql (params: T.EsqlQueryRequest, reqOptions: TransportRequestOptions = {}): EsqlHelper { const client = this[kClient] - function toRecords (response: EsqlResponse): TDocument[] { + function toRecords (response: T.EsqlEsqlResult): TDocument[] { const { columns, values } = response return values.map(row => { const doc: Partial = {} @@ -990,8 +981,7 @@ export default class Helpers { params.format = 'json' params.columnar = false - // @ts-expect-error it's typed as ArrayBuffer but we know it will be JSON - const response: EsqlResponse = await client.esql.query(params, reqOptions) + const response = await client.esql.query(params, reqOptions) const records: TDocument[] = toRecords(response) const { columns } = response return { records, columns } @@ -1005,7 +995,8 @@ export default class Helpers { params.format = 'arrow' - const response = await client.esql.query(params, reqOptions) + // @ts-expect-error the return type will be ArrayBuffer when the format is set to 'arrow' + const response: ArrayBuffer = await client.esql.query(params, reqOptions) return tableFromIPC(response) }, @@ -1018,7 +1009,8 @@ export default class Helpers { params.format = 'arrow' - const response = await client.esql.query(params, reqOptions) + // @ts-expect-error the return type will be ArrayBuffer when the format is set to 'arrow' + const response: ArrayBuffer = await client.esql.query(params, reqOptions) return RecordBatchStreamReader.from(response) } } From d9d54b1bb8388e313d77ef798f3c4d5dbcdb6371 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 14 Apr 2025 19:52:54 +0200 Subject: [PATCH 522/647] Auto-generated API code (#2740) --- docs/reference/api-reference.md | 121 +++++++------ src/api/api/connector.ts | 4 +- src/api/api/esql.ts | 101 ++++++++++- src/api/api/indices.ts | 8 +- src/api/api/inference.ts | 79 +++++++++ src/api/api/ingest.ts | 6 +- src/api/api/knn_search.ts | 47 ++--- src/api/api/scripts_painless_execute.ts | 2 +- src/api/types.ts | 223 +++++++++++++----------- 9 files changed, 391 insertions(+), 200 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 6bc26a85a..7381b5ff2 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1023,43 +1023,13 @@ client.info() ``` ## client.knnSearch [_knn_search] -Run a knn search. +Performs a kNN search. -NOTE: The kNN search API has been replaced by the `knn` option in the search API. - -Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. -Given a query vector, the API finds the k closest vectors and returns those documents as search hits. - -Elasticsearch uses the HNSW algorithm to support efficient kNN search. -Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. -This means the results returned are not always the true k closest neighbors. - -The kNN search API supports restricting the search using a filter. -The search will return the top k documents that also match the filter query. - -A kNN search response has the exact same structure as a search API response. -However, certain sections have a meaning specific to kNN search: - -* The document `_score` is determined by the similarity between the query and document vector. -* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html) ```ts -client.knnSearch({ index, knn }) +client.knnSearch() ``` -### Arguments [_arguments_knn_search] - -#### Request (object) [_request_knn_search] - -- **`index` (string | string[])**: A list of index names to search; use `_all` or to perform the operation on all indices. -- **`knn` ({ field, query_vector, k, num_candidates })**: The kNN query to run. -- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. -- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. -- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. -- **`fields` (Optional, string | string[])**: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. -- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. -- **`routing` (Optional, string)**: A list of specific routing values. ## client.mget [_mget] Get multiple documents. @@ -1591,7 +1561,7 @@ The API uses several _contexts_, which control how scripts are run, what variabl Each context requires a script, but additional parameters depend on the context you're using for that script. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html) +[Endpoint documentation](https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples) ```ts client.scriptsPainlessExecute({ ... }) @@ -4418,7 +4388,7 @@ Update the connector draft filtering validation. Update the draft filtering validation info for a connector. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation) ```ts client.connector.updateFilteringValidation({ connector_id, validation }) @@ -4466,7 +4436,7 @@ client.connector.updateName({ connector_id }) ## client.connector.updateNative [_connector.update_native] Update the connector is_native flag. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native) ```ts client.connector.updateNative({ connector_id, is_native }) @@ -4874,7 +4844,7 @@ Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-stop-api.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop) ```ts client.esql.asyncQueryStop({ id }) @@ -4889,11 +4859,25 @@ A query ID is also provided when the request was submitted with the `keep_on_com - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +## client.esql.getQuery [_esql.get_query] +Executes a get ESQL query request +```ts +client.esql.getQuery() +``` + + +## client.esql.listQueries [_esql.list_queries] +Executes a list ESQL queries request +```ts +client.esql.listQueries() +``` + + ## client.esql.query [_esql.query] Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html) +[Endpoint documentation](https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest) ```ts client.esql.query({ query }) @@ -5031,9 +5015,9 @@ client.fleet.msearch({ ... }) - **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. -- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns -an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` -which is true by default. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. +If false, returns an error with no partial results. +Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. ## client.fleet.search [_fleet.search] Run a Fleet search. @@ -5134,9 +5118,9 @@ the indices stats API. - **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. -- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns -an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` -which is true by default. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. +If false, returns an error with no partial results. +Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. ## client.graph.explore [_graph.explore] Explore graph analytics. @@ -5458,7 +5442,7 @@ Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex) ```ts client.indices.cancelMigrateReindex({ index }) @@ -5687,7 +5671,7 @@ Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from) ```ts client.indices.createFrom({ source, dest }) @@ -6349,7 +6333,7 @@ Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration) ```ts client.indices.getMigrateReindexStatus({ index }) @@ -6425,7 +6409,7 @@ Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex) ```ts client.indices.migrateReindex({ ... }) @@ -7553,6 +7537,40 @@ client.inference.get({ ... }) - **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type - **`inference_id` (Optional, string)**: The inference Id +## client.inference.inference [_inference.inference] +Perform inference on the service. + +This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. +It returns a response with the results of the tasks. +The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. + +For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. + +> info +> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) + +```ts +client.inference.inference({ inference_id, input }) +``` + +### Arguments [_arguments_inference.inference] + +#### Request (object) [_request_inference.inference] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string | string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The type of inference task that the model performs. +- **`query` (Optional, string)**: The query input, which is required only for the `rerank` task. +It is not required for other tasks. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string | -1 | 0)**: The amount of time to wait for the inference request to complete. + ## client.inference.put [_inference.put] Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. @@ -8231,7 +8249,7 @@ If no response is received before the timeout expires, the request fails and ret Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html) +[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/geoip-processor) ```ts client.ingest.geoIpStats() @@ -8303,7 +8321,7 @@ Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html) +[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/grok-processor) ```ts client.ingest.processorGrok() @@ -8357,7 +8375,7 @@ A value of `-1` indicates that the request should never time out. Create or update a pipeline. Changes made using this API take effect immediately. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html) +[Endpoint documentation](https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines) ```ts client.ingest.putPipeline({ id }) @@ -10638,7 +10656,6 @@ client.nodes.getRepositoriesMeteringInfo({ node_id }) #### Request (object) [_request_nodes.get_repositories_metering_info] - **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. -All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). ## client.nodes.hotThreads [_nodes.hot_threads] Get the hot threads for nodes. diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 57e5f5cd1..3198dfa5c 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -1667,7 +1667,7 @@ export default class Connector { /** * Update the connector draft filtering validation. Update the draft filtering validation info for a connector. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-validation-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation | Elasticsearch API documentation} */ async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1838,7 +1838,7 @@ export default class Connector { /** * Update the connector is_native flag. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-native-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native | Elasticsearch API documentation} */ async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 1e98e6b65..98f952bfd 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -85,6 +85,18 @@ export default class Esql { 'drop_null_columns' ] }, + 'esql.get_query': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'esql.list_queries': { + path: [], + body: [], + query: [] + }, 'esql.query': { path: [], body: [ @@ -253,7 +265,7 @@ export default class Esql { /** * Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop | Elasticsearch API documentation} */ async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -296,9 +308,94 @@ export default class Esql { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Executes a get ESQL query request + */ + async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['esql.get_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query/queries/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'esql.get_query', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Executes a list ESQL queries request + */ + async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['esql.list_queries'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_query/queries' + const meta: TransportRequestMetadata = { + name: 'esql.list_queries' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-rest.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest | Elasticsearch API documentation} */ async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 918c6cdec..e613a1c46 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -995,7 +995,7 @@ export default class Indices { /** * Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex | Elasticsearch API documentation} */ async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1298,7 +1298,7 @@ export default class Indices { /** * Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from | Elasticsearch API documentation} */ async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2649,7 +2649,7 @@ export default class Indices { /** * Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration | Elasticsearch API documentation} */ async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2807,7 +2807,7 @@ export default class Indices { /** * Reindex legacy backing indices. Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex | Elasticsearch API documentation} */ async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 5c64c2b2a..5b65421f5 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -77,6 +77,20 @@ export default class Inference { body: [], query: [] }, + 'inference.inference': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'query', + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, 'inference.put': { path: [ 'task_type', @@ -563,6 +577,71 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.inference'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'inference.inference', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 840a4d376..e5bc73147 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -296,7 +296,7 @@ export default class Ingest { /** * Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-processor.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/reference/enrich-processor/geoip-processor | Elasticsearch API documentation} */ async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -498,7 +498,7 @@ export default class Ingest { /** * Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/reference/enrich-processor/grok-processor | Elasticsearch API documentation} */ async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -645,7 +645,7 @@ export default class Ingest { /** * Create or update a pipeline. Changes made using this API take effect immediately. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 650a1d6a7..367a61d48 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -26,39 +26,26 @@ interface That { transport: Transport } -const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] - const acceptedParams: Record = { knn_search: { path: [ 'index' ], - body: [ - '_source', - 'docvalue_fields', - 'stored_fields', - 'fields', - 'filter', - 'knn' - ], - query: [ - 'routing' - ] + body: [], + query: [] } } /** - * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/knn-search-api.html | Elasticsearch API documentation} + * Performs a kNN search. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation} */ -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { - path: acceptedPath, - body: acceptedBody, - query: acceptedQuery + path: acceptedPath } = acceptedParams.knn_search const userQuery = params?.querystring @@ -74,22 +61,12 @@ export default async function KnnSearchApi (this: That, par } } + params = params ?? {} for (const key in params) { - if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { - // @ts-expect-error - querystring[key] = params[key] - } else { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } + querystring[key] = params[key] } } diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index f9823c7fb..1d524bc02 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -42,7 +42,7 @@ const acceptedParams: Record (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/types.ts b/src/api/types.ts index 7e6a1acb7..19cf61017 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1233,69 +1233,6 @@ export interface InfoResponse { version: ElasticsearchVersionInfo } -export interface KnnSearchRequest extends RequestBase { - /** A comma-separated list of index names to search; - * use `_all` or to perform the operation on all indices. */ - index: Indices - /** A comma-separated list of specific routing values. */ - routing?: Routing - /** Indicates which source fields are returned for matching documents. These - * fields are returned in the `hits._source` property of the search response. */ - _source?: SearchSourceConfig - /** The request returns doc values for field names matching these patterns - * in the `hits.fields` property of the response. - * It accepts wildcard (`*`) patterns. */ - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - /** A list of stored fields to return as part of a hit. If no fields are specified, - * no stored fields are included in the response. If this field is specified, the `_source` - * parameter defaults to `false`. You can pass `_source: true` to return both source fields - * and stored fields in the search response. */ - stored_fields?: Fields - /** The request returns values for field names matching these patterns - * in the `hits.fields` property of the response. - * It accepts wildcard (`*`) patterns. */ - fields?: Fields - /** A query to filter the documents that can match. The kNN search will return the top - * `k` documents that also match this filter. The value can be a single query or a - * list of queries. If `filter` isn't provided, all documents are allowed to match. */ - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - /** The kNN query to run. */ - knn: KnnSearchQuery - /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } - /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } -} - -export interface KnnSearchResponse { - /** The milliseconds it took Elasticsearch to run the request. */ - took: long - /** If true, the request timed out before completion; - * returned results may be partial or empty. */ - timed_out: boolean - /** A count of shards used for the request. */ - _shards: ShardStatistics - /** The returned documents and metadata. */ - hits: SearchHitsMetadata - /** The field values for the documents. These fields - * must be specified in the request using the `fields` parameter. */ - fields?: Record - /** The highest returned document score. This value is null for requests - * that do not sort by score. */ - max_score?: double -} - -export interface KnnSearchQuery { - /** The name of the vector field to search against */ - field: Field - /** The query vector */ - query_vector: QueryVector - /** The final number of nearest neighbors to return as top hits */ - k: integer - /** The number of nearest neighbor candidates to consider per shard */ - num_candidates: integer -} - export interface MgetMultiGetError { error: ErrorCause _id: Id @@ -3946,8 +3883,6 @@ export interface ErrorResponseBase { status: integer } -export type EsqlResult = ArrayBuffer - export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] @@ -5022,15 +4957,13 @@ export interface AggregationsAggregationContainer { variable_width_histogram?: AggregationsVariableWidthHistogramAggregation } -export type AggregationsAggregationRange = AggregationsUntypedAggregationRange | AggregationsDateAggregationRange | AggregationsNumberAggregationRange | AggregationsTermAggregationRange - -export interface AggregationsAggregationRangeBase { +export interface AggregationsAggregationRange { /** Start of the range (inclusive). */ - from?: T + from?: double | null /** Custom key to return the range with. */ key?: string /** End of the range (exclusive). */ - to?: T + to?: double | null } export interface AggregationsArrayPercentilesItem { @@ -5221,8 +5154,8 @@ export interface AggregationsCategorizeTextAggregation { * use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */ categorization_filters?: string[] /** The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. - * The syntax is very similar to that used to define the analyzer in the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html). This property - * cannot be used at the same time as categorization_filters. */ + * The syntax is very similar to that used to define the analyzer in the analyze API. This property + * cannot be used at the same time as `categorization_filters`. */ categorization_analyzer?: AggregationsCategorizeTextAnalyzer /** The number of categorization buckets to return from each shard before merging all the results. */ shard_size?: integer @@ -5336,9 +5269,6 @@ export interface AggregationsCustomCategorizeTextAnalyzer { filter?: string[] } -export interface AggregationsDateAggregationRange extends AggregationsAggregationRangeBase { -} - export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { } @@ -5396,13 +5326,22 @@ export interface AggregationsDateRangeAggregation extends AggregationsBucketAggr * By default, documents without a value are ignored. */ missing?: AggregationsMissing /** Array of date ranges. */ - ranges?: AggregationsDateAggregationRange[] + ranges?: AggregationsDateRangeExpression[] /** Time zone used to convert dates from another time zone to UTC. */ time_zone?: TimeZone /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean } +export interface AggregationsDateRangeExpression { + /** Start of the range (inclusive). */ + from?: AggregationsFieldDateMath + /** Custom key to return the range with. */ + key?: string + /** End of the range (exclusive). */ + to?: AggregationsFieldDateMath +} + export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { normalized_value?: double normalized_value_as_string?: string @@ -6079,9 +6018,6 @@ export interface AggregationsNormalizeAggregation extends AggregationsPipelineAg export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' -export interface AggregationsNumberAggregationRange extends AggregationsAggregationRangeBase { -} - export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsParentAggregate = AggregationsParentAggregateKeys @@ -6502,9 +6438,6 @@ export interface AggregationsTTestAggregation { export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' -export interface AggregationsTermAggregationRange extends AggregationsAggregationRangeBase { -} - export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { doc_count_error_upper_bound?: long sum_other_doc_count?: long @@ -6664,9 +6597,6 @@ export interface AggregationsUnmappedSignificantTermsAggregate extends Aggregati export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { } -export interface AggregationsUntypedAggregationRange extends AggregationsAggregationRangeBase { -} - export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { } @@ -17340,8 +17270,66 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase + _shards?: EsqlEsqlShardInfo +} + +export interface EsqlEsqlClusterInfo { + total: integer + successful: integer + running: integer + skipped: integer + partial: integer + failed: integer + details: Record +} + +export type EsqlEsqlClusterStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' + +export interface EsqlEsqlColumnInfo { + name: string + type: string +} + export type EsqlEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' +export interface EsqlEsqlResult { + took?: DurationValue + is_partial?: boolean + all_columns?: EsqlEsqlColumnInfo[] + columns: EsqlEsqlColumnInfo[] + values: FieldValue[][] + /** Cross-cluster search information. Present if `include_ccs_metadata` was `true` in the request + * and a cross-cluster search was performed. */ + _clusters?: EsqlEsqlClusterInfo + /** Profiling information. Present if `profile` was `true` in the request. + * The contents of this field are currently unstable. */ + profile?: any +} + +export interface EsqlEsqlShardFailure { + shard: Id + index: IndexName + node?: NodeId + reason: ErrorCause +} + +export interface EsqlEsqlShardInfo { + total: integer + successful?: integer + skipped?: integer + failed?: integer + failures?: EsqlEsqlShardFailure[] +} + export interface EsqlTableValuesContainer { integer?: EsqlTableValuesIntegerValue[] keyword?: EsqlTableValuesKeywordValue[] @@ -17408,7 +17396,7 @@ export interface EsqlAsyncQueryRequest extends RequestBase { querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } } -export type EsqlAsyncQueryResponse = EsqlResult +export type EsqlAsyncQueryResponse = EsqlAsyncEsqlResult export interface EsqlAsyncQueryDeleteRequest extends RequestBase { /** The unique identifier of the query. @@ -17445,7 +17433,7 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } } -export type EsqlAsyncQueryGetResponse = EsqlResult +export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult export interface EsqlAsyncQueryStopRequest extends RequestBase { /** The unique identifier of the query. @@ -17461,7 +17449,7 @@ export interface EsqlAsyncQueryStopRequest extends RequestBase { querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never } } -export type EsqlAsyncQueryStopResponse = EsqlResult +export type EsqlAsyncQueryStopResponse = EsqlEsqlResult export interface EsqlQueryRequest extends RequestBase { /** A short version of the Accept header, e.g. json, yaml. */ @@ -17500,7 +17488,7 @@ export interface EsqlQueryRequest extends RequestBase { querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } } -export type EsqlQueryResponse = EsqlResult +export type EsqlQueryResponse = EsqlEsqlResult export interface FeaturesFeature { name: string @@ -17590,9 +17578,9 @@ export interface FleetMsearchRequest extends RequestBase { * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] - /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns - * an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` - * which is true by default. */ + /** If true, returns partial results if there are shard request timeouts or shard failures. + * If false, returns an error with no partial results. + * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ allow_partial_search_results?: boolean searches?: MsearchRequestItem[] /** All values in `body` will be added to the request body. */ @@ -17641,9 +17629,9 @@ export interface FleetSearchRequest extends RequestBase { * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] - /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns - * an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` - * which is true by default. */ + /** If true, returns partial results if there are shard request timeouts or shard failures. + * If false, returns an error with no partial results. + * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ allow_partial_search_results?: boolean aggregations?: Record /** @alias aggregations */ @@ -21829,6 +21817,15 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding_bits?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] + completion?: InferenceCompletionResult[] + rerank?: InferenceRankedDocument[] +} + export interface InferenceJinaAIServiceSettings { /** A valid API key of your JinaAI account. * @@ -22163,6 +22160,33 @@ export interface InferenceGetResponse { endpoints: InferenceInferenceEndpointInfo[] } +export interface InferenceInferenceRequest extends RequestBase { + /** The type of inference task that the model performs. */ + task_type?: InferenceTaskType + /** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** The query input, which is required only for the `rerank` task. + * It is not required for other tasks. */ + query?: string + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * > info + * > Inference endpoints for the `completion` task type currently only support a single string as input. */ + input: string | string[] + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } +} + +export type InferenceInferenceResponse = InferenceInferenceResult + export interface InferencePutRequest extends RequestBase { /** The task type */ task_type?: InferenceTaskType @@ -24029,9 +24053,7 @@ export interface LogstashPipelineSettings { /** The internal queuing model to use for event buffering. */ 'queue.type': string /** The total capacity of the queue (`queue.type: persisted`) in number of bytes. */ - 'queue.max_bytes.number': integer - /** The total capacity of the queue (`queue.type: persisted`) in terms of units of bytes. */ - 'queue.max_bytes.units': string + 'queue.max_bytes': string /** The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). */ 'queue.checkpoint.writes': integer } @@ -29545,8 +29567,7 @@ export interface NodesClearRepositoriesMeteringArchiveResponseBase extends Nodes } export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { - /** Comma-separated list of node IDs or names used to limit returned information. - * All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never } @@ -34820,7 +34841,7 @@ export interface SynonymsSynonymRule { export interface SynonymsSynonymRuleRead { /** Synonym Rule identifier */ id: Id - /** Synonyms, in Solr format, that conform the synonym rule. See https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 */ + /** Synonyms, in Solr format, that conform the synonym rule. */ synonyms: SynonymsSynonymString } From 9d719ce874af1c471692f3ad45989d91eefd6562 Mon Sep 17 00:00:00 2001 From: Siddharth Khengare <67581382+Siddhu545@users.noreply.github.com> Date: Tue, 15 Apr 2025 18:44:00 +0100 Subject: [PATCH 523/647] Bug #2694 (#2731) Co-authored-by: Siddhu545 Co-authored-by: Josh Mock --- src/client.ts | 24 +++++++++++++++++++++++- test/unit/client.test.ts | 16 ++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index a50670ca3..f4cedef16 100644 --- a/src/client.ts +++ b/src/client.ts @@ -203,10 +203,32 @@ export default class Client extends API { if ((opts.cloud != null || opts.serverMode === 'serverless') && opts[kChild] === undefined) { if (opts.cloud != null) { const { id } = opts.cloud + if (typeof id !== 'string') { + throw new errors.ConfigurationError('Cloud ID must be a string.') + } + + const parts = id.split(':') + if (parts.length !== 2 || parts[1] === '') { + throw new errors.ConfigurationError( + 'Cloud ID must be in the format "name:base64string".' + ) + } + // the cloud id is `cluster-name:base64encodedurl` // the url is a string divided by two '$', the first is the cloud url // the second the elasticsearch instance, the third the kibana instance - const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$') + + let cloudUrls + try { + cloudUrls = Buffer.from(parts[1], 'base64').toString().split('$') + } catch (err) { + throw new errors.ConfigurationError('Cloud ID base64 decoding failed.') + } + if (cloudUrls.length < 2 || cloudUrls[0] === '' || cloudUrls[1] === '') { + throw new errors.ConfigurationError( + 'Cloud ID base64 must contain at least two "$" separated parts: "$[$]".' + ) + } opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` } diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index e57f4d092..7c4aa3339 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -287,9 +287,25 @@ test('Elastic Cloud config', t => { t.equal(connection?.url.hostname, 'abcd.localhost') t.equal(connection?.url.protocol, 'https:') + t.test('Invalid Cloud ID will throw ConfigurationError', t => { + t.throws(() => new Client({ + cloud : { + id : 'invalidCloudIdThatIsNotBase64' + }, + auth : { + username: 'elastic', + password: 'changeme' + } + + }), errors.ConfigurationError) + t.end() + }) + t.end() }) + + test('Override default Elastic Cloud options', t => { const client = new Client({ cloud: { From 2a93c062e4cb27adca29da2a1b58df7fe96aa59e Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 15 Apr 2025 13:41:24 -0500 Subject: [PATCH 524/647] Update changelog to include fix for #2694 (#2745) --- docs/release-notes/index.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 4bd944d96..0705942e3 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -36,4 +36,6 @@ To check for security updates, go to [Security announcements for the Elastic sta Docstrings for types that differ between stack and serverless have also been updated to indicate when that is the case. +- **Improved Cloud ID parsing:** when using a Cloud ID as the `cloud` parameter to instantiate the client, that ID was assumed to be in the correct format. New assertions have been added to verify that format and throw a `ConfigurationError` if it is invalid. See [#2694](https://github.com/elastic/elasticsearch-js/issues/2694). + % ### Fixes [elasticsearch-javascript-client-900-fixes] From 46b08caa4f76077d6ecea7d23075608604b0d63f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 15 Apr 2025 14:41:04 -0500 Subject: [PATCH 525/647] Bump version to 9.0.0 (#2749) --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 78a46297b..4754d0d18 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.5", + "version": "9.0.0", "versionCanary": "9.0.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", @@ -90,7 +90,7 @@ "zx": "7.2.3" }, "dependencies": { - "@elastic/transport": "9.0.0-alpha.1", + "@elastic/transport": "^9.0.0", "apache-arrow": "^18.0.0", "tslib": "^2.4.0" }, From 1650e3d2649cf8cdbb0cdf0a2098d743ce6b69e2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 16 Apr 2025 09:50:25 -0500 Subject: [PATCH 526/647] Reinstate running integration tests on PRs (#2752) --- catalog-info.yaml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 4d1e41757..de212b172 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -37,8 +37,9 @@ spec: everyone: access_level: READ_ONLY provider_settings: - build_pull_requests: false + build_pull_requests: true build_branches: false + separate_pull_request_statuses: true cancel_intermediate_builds: true cancel_intermediate_builds_branch_filter: "!main" schedules: @@ -48,9 +49,3 @@ spec: 8_x: branch: "8.x" cronline: "@daily" - 8_17: - branch: "8.17" - cronline: "@daily" - 8_18: - branch: "8.18" - cronline: "@daily" From d726942ad1bd1645b0a5bc5d257532d63e93e123 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 16 Apr 2025 11:02:28 -0500 Subject: [PATCH 527/647] Ensure npm publish succeeds when publishing non-latest versions (#2754) --- .github/workflows/npm-publish.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index dd42454d6..730b4d87a 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -27,9 +27,20 @@ jobs: run: | version=$(jq -r .version package.json) tag_meta=$(echo "$version" | cut -s -d '-' -f2) + # if no meta info on the version (e.g. a '-alpha.1' prefix), publish as a stable release if [[ -z "$tag_meta" ]]; then - npm publish --provenance --access public + # get latest version on npm + latest=$(npm view @elastic/elasticsearch --json | jq -r '.["dist-tags"].latest') + + # if $version is higher than the most recently published version, publish as-is + if [[ $(yes | npx semver "$version" "$latest" | tail -n1) == "$version" ]]; then + npm publish --provenance --access public + else + # otherwise, publish with "previous" tag + npm publish --provenance --access public --tag "previous" + fi else + # publish as a non-stable release using the meta name (e.g. 'alpha') as the tag tag=$(echo "$tag_meta" | cut -d '.' -f1) npm publish --provenance --access public --tag "$tag" fi From c3f987caaf2a0e9fa62f569e418d8f3dec9fc1db Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Wed, 16 Apr 2025 12:17:36 -0500 Subject: [PATCH 528/647] fix image paths for docs-assembler (#2753) Co-authored-by: Josh Mock --- docs/reference/getting-started.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/reference/getting-started.md b/docs/reference/getting-started.md index 1420d6c4c..61f2dabfb 100644 --- a/docs/reference/getting-started.md +++ b/docs/reference/getting-started.md @@ -45,15 +45,11 @@ const client = new Client({ Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment: -:::{image} images/es-endpoint.jpg -:alt: Finding Elasticsearch endpoint -::: +![Finding Elasticsearch endpoint](images/es-endpoint.jpg) You can generate an API key on the **Management** page under Security. -:::{image} images/create-api-key.png -:alt: Create API key -::: +![Create API key](images/create-api-key.png) For other connection options, refer to the [*Connecting*](/reference/connecting.md) section. From 98b38028aa7b93cb14aa52e67c8e0bf2bbaf45af Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 16 Apr 2025 13:28:32 -0500 Subject: [PATCH 529/647] Add test to verify default node filter function (#2756) --- docs/reference/basic-config.md | 15 +++++++++------ package.json | 2 +- src/client.ts | 2 +- test/unit/client.test.ts | 25 +++++++++++++++++++++++++ 4 files changed, 36 insertions(+), 8 deletions(-) diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md index 7b523cbeb..82a537260 100644 --- a/docs/reference/basic-config.md +++ b/docs/reference/basic-config.md @@ -246,12 +246,15 @@ Type: `function` Filter that indicates whether a node should be used for a request. Default function definition: ```js -function defaultNodeFilter (node) { - // avoid master only nodes - if (node.roles.master === true && - node.roles.data === false && - node.roles.ingest === false) { - return false +function defaultNodeFilter (conn) { + if (conn.roles != null) { + if ( + // avoid master-only nodes + conn.roles.master && + !conn.roles.data && + !conn.roles.ingest && + !conn.roles.ml + ) return false } return true } diff --git a/package.json b/package.json index 4754d0d18..1c52966c3 100644 --- a/package.json +++ b/package.json @@ -90,7 +90,7 @@ "zx": "7.2.3" }, "dependencies": { - "@elastic/transport": "^9.0.0", + "@elastic/transport": "^9.0.1", "apache-arrow": "^18.0.0", "tslib": "^2.4.0" }, diff --git a/src/client.ts b/src/client.ts index f4cedef16..f758553e2 100644 --- a/src/client.ts +++ b/src/client.ts @@ -132,7 +132,7 @@ export interface ClientOptions { * @defaultValue null */ agent?: HttpAgentOptions | UndiciAgentOptions | agentFn | false /** @property nodeFilter A custom function used by the connection pool to determine which nodes are qualified to receive a request - * @defaultValue () => true */ + * @defaultValue A function that uses the Connection `roles` property to avoid master-only nodes */ nodeFilter?: nodeFilterFn /** @property nodeSelector A custom function used by the connection pool to determine which node should receive the next request * @defaultValue A "round robin" function that loops sequentially through each node in the pool. */ diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index 7c4aa3339..fc4016a22 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -64,6 +64,31 @@ test('Missing node(s)', t => { t.end() }) +test('multi nodes with roles, using default node filter', async t => { + const client = new Client({ + nodes: [ + { + url: new URL('http://node1:9200/'), + roles: { master: true, data: false, ingest: false, ml: false } + }, + { + url: new URL('http://node2:9200/'), + roles: { master: true, data: true, ingest: false, ml: false } + }, + ] + }) + const conn = client.connectionPool.getConnection({ + now: Date.now() + 1000 * 60 * 3, + requestId: 1, + name: 'elasticsearch-js', + context: null + }) + + t.equal(conn?.url.hostname, 'node2') + + t.end() +}) + test('Custom headers', t => { const client = new Client({ node: 'http://localhost:9200/', From 25933c003bb629013bbc2d794352b12369745a98 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 17 Apr 2025 15:09:48 -0500 Subject: [PATCH 530/647] Improve docs about observability event emitter (#2765) --- docs/reference/observability.md | 87 ++++++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 18 deletions(-) diff --git a/docs/reference/observability.md b/docs/reference/observability.md index 38f8c332d..d3b5f59f3 100644 --- a/docs/reference/observability.md +++ b/docs/reference/observability.md @@ -15,7 +15,6 @@ Correlating events can be hard, especially if your applications have a large cod All of these observability features are documented below. - ## OpenTelemetry [_opentelemetry] The client supports OpenTelemetry’s [zero-code instrumentation](https://opentelemetry.io/docs/zero-code/js/) to enable tracking each client request as an [OpenTelemetry span](https://opentelemetry.io/docs/concepts/signals/traces/#spans). These spans follow all of the [semantic OpenTelemetry conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) except for `db.query.text`. @@ -36,7 +35,6 @@ To start sending Elasticsearch trace data to your OpenTelemetry endpoint, follow node --require '@opentelemetry/auto-instrumentations-node/register' index.js ``` - ## Events [_events] The client is an event emitter. This means that you can listen for its events to add additional logic to your code, without needing to change the client’s internals or how you use the client. You can find the events' names by accessing the `events` key of the client: @@ -65,16 +63,75 @@ client.diagnostic.on('response', (err, result) => { }) ``` +### Event types + The client emits the following events: -| | | -| --- | --- | -| `serialization` | Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`.

    ```js
    client.diagnostic.on('serialization', (err, result) => {
    console.log(err, result)
    })
    ```
    | -| `request` | Emitted before sending the actual request to {{es}} *(emitted multiple times in case of retries)*.

    ```js
    client.diagnostic.on('request', (err, result) => {
    console.log(err, result)
    })
    ```
    | -| `deserialization` | Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. *(This event might not be emitted in certain situations)*.

    ```js
    client.diagnostic.on('deserialization', (err, result) => {
    console.log(err, result)
    })
    ```
    | -| `response` | Emitted once {{es}} response has been received and parsed.

    ```js
    client.diagnostic.on('response', (err, result) => {
    console.log(err, result)
    })
    ```
    | -| `sniff` | Emitted when the client ends a sniffing request.

    ```js
    client.diagnostic.on('sniff', (err, result) => {
    console.log(err, result)
    })
    ```
    | -| `resurrect` | Emitted if the client is able to resurrect a dead node.

    ```js
    client.diagnostic.on('resurrect', (err, result) => {
    console.log(err, result)
    })
    ```
    | +#### `serialization` + +Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`. + +```js +client.diagnostic.on("serialization", (err, result) => { + console.log(err, result) +}) +``` + +#### `request` + +Emitted before sending the actual request to {{es}} _(emitted multiple times in case of retries)_. + +```js +client.diagnostic.on("request", (err, result) => { + console.log(err, result) +}) +``` + +#### `deserialization` + +Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. + +This event might not be emitted in certain situations: + +* When `asStream` is set to true, the response is returned in its raw stream form before deserialization occurs +* When a response is terminated early due to content length being too large +* When a response is terminated early by an `AbortController` + +```js +client.diagnostic.on("deserialization", (err, result) => { + console.log(err, result) +}) +``` + +#### `response` + +Emitted once {{es}} response has been received and parsed. + +```js +client.diagnostic.on("response", (err, result) => { + console.log(err, result) +}) +``` + +#### `sniff` + +Emitted when the client ends a sniffing request. + +```js +client.diagnostic.on("sniff", (err, result) => { + console.log(err, result) +}) +``` + +#### `resurrect` + +Emitted if the client is able to resurrect a dead node. + +```js +client.diagnostic.on("resurrect", (err, result) => { + console.log(err, result) +}) +``` The values of `result` in `serialization`, `request`, `deserialization`, `response` and `sniff` are: @@ -113,7 +170,6 @@ request: { }; ``` - ### Events order [_events_order] The event order is described in the following graph, in some edge cases, the order is not guaranteed. You can find in [`test/acceptance/events-order.test.js`](https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js) how the order changes based on the situation. @@ -134,7 +190,6 @@ serialization └─▶ response ``` - ## Correlation ID [_correlation_id] Correlating events can be hard, especially if there are many events at the same time. The client offers you an automatic (and configurable) system to help you handle this problem. @@ -176,7 +231,7 @@ const client = new Client({ // it takes two parameters, the request parameters and options generateRequestId: function (params, options) { // your id generation logic - // must be syncronous + // must be synchronous return 'id' } }) @@ -193,7 +248,6 @@ client.search({ }).then(console.log, console.log) ``` - ## Context object [_context_object] Sometimes, you might need to make some custom data available in your events, you can do that via the `context` option of a request: @@ -263,10 +317,9 @@ client.search({ }).then(console.log, console.log) ``` - ## Client name [_client_name] -If you are using multiple instances of the client or if you are using multiple child clients *(which is the recommended way to have multiple instances of the client)*, you might need to recognize which client you are using. The `name` options help you in this regard. +If you are using multiple instances of the client or if you are using multiple child clients _(which is the recommended way to have multiple instances of the client)_, you might need to recognize which client you are using. The `name` options help you in this regard. ```js const { Client } = require('@elastic/elasticsearch') @@ -309,7 +362,6 @@ child.search({ }).then(console.log, console.log) ``` - ## X-Opaque-Id support [_x_opaque_id_support] To improve observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this allows you to discover this identifier in the [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), helps you with [identifying search slow log origin](elasticsearch://reference/elasticsearch/index-settings/slow-log.md) as well as [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks). @@ -348,4 +400,3 @@ client.search({ opaqueId: 'my-search' }).then(console.log, console.log) ``` - From c7d9b00fe338d3b6a53c749f7b903dfd86b7d7d1 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 18 Apr 2025 10:23:47 -0500 Subject: [PATCH 531/647] CODEOWNERS for supply chain attack prevention (#2768) --- .github/CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..67ba321cc --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +package.json @joshmock +renovate.json @joshmock +catalog-info.yaml @joshmock From 40860afe0e8b1df56107b498a099c2011f8d1170 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 21 Apr 2025 12:28:28 -0500 Subject: [PATCH 532/647] Update actions/setup-node digest to 49933ea (#2771) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/nodejs.yml | 4 ++-- .github/workflows/npm-publish.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index da5428aea..fa3090287 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -41,7 +41,7 @@ jobs: persist-credentials: false - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: ${{ matrix.node-version }} @@ -71,7 +71,7 @@ jobs: persist-credentials: false - name: Use Node.js - uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: 22.x diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 730b4d87a..3e12d208e 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -16,7 +16,7 @@ jobs: with: persist-credentials: false ref: ${{ github.event.inputs.branch }} - - uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: "22.x" registry-url: "https://registry.npmjs.org/" From bfb41964397174c327c3c07af237b2da18302cb3 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 21 Apr 2025 12:37:56 -0500 Subject: [PATCH 533/647] Automerge Renovate Docker updates (#2777) --- renovate.json | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/renovate.json b/renovate.json index 287cfa2b9..420ba26d4 100644 --- a/renovate.json +++ b/renovate.json @@ -20,13 +20,15 @@ "matchManagers": [ "dockerfile" ], - "pinDigests": false + "pinDigests": false, + "automerge": true }, { "matchDatasources": [ "docker" ], - "pinDigests": false + "pinDigests": false, + "automerge": true } ] } From 9657180af62fd192b6a7c42adff95661d9bcf160 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 21 Apr 2025 19:48:20 +0200 Subject: [PATCH 534/647] Auto-generated API code (#2773) --- docs/reference/api-reference.md | 18 ++++++--- src/api/api/esql.ts | 23 ++++++------ src/api/types.ts | 66 +++++++++++++++++++++++++++++++-- 3 files changed, 86 insertions(+), 21 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 7381b5ff2..c446c3b4a 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -4860,14 +4860,20 @@ A query ID is also provided when the request was submitted with the `keep_on_com If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. ## client.esql.getQuery [_esql.get_query] -Executes a get ESQL query request +Get a specific running ES|QL query information. +Returns an object extended information about a running ES|QL query. ```ts -client.esql.getQuery() +client.esql.getQuery({ id }) ``` +### Arguments [_arguments_esql.get_query] + +#### Request (object) [_request_esql.get_query] +- **`id` (string)**: The query ID ## client.esql.listQueries [_esql.list_queries] -Executes a list ESQL queries request +Get running ES|QL queries information. +Returns an object containing IDs and other information about the running ES|QL queries. ```ts client.esql.listQueries() ``` @@ -6732,7 +6738,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -12365,7 +12371,7 @@ client.security.hasPrivileges({ ... }) #### Request (object) [_request_security.has_privileges] - **`user` (Optional, string)**: Username - **`application` (Optional, { application, privileges, resources }[])** -- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. +- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_esql" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_esql" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. - **`index` (Optional, { names, privileges, allow_restricted_indices }[])** ## client.security.hasPrivilegesUserProfile [_security.has_privileges_user_profile] @@ -12591,7 +12597,7 @@ client.security.putRole({ name }) #### Request (object) [_request_security.put_role] - **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. - **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. -- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_esql" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_esql" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. - **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. - **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. - **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 98f952bfd..50711262b 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -309,12 +309,12 @@ export default class Esql { } /** - * Executes a get ESQL query request + * Get a specific running ES|QL query information. Returns an object extended information about a running ES|QL query. */ - async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getQuery (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptions): Promise + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['esql.get_query'] @@ -332,11 +332,11 @@ export default class Esql { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -353,12 +353,12 @@ export default class Esql { } /** - * Executes a list ESQL queries request + * Get running ES|QL queries information. Returns an object containing IDs and other information about the running ES|QL queries. */ - async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async listQueries (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptions): Promise + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['esql.list_queries'] @@ -381,6 +381,7 @@ export default class Esql { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/types.ts b/src/api/types.ts index 19cf61017..202f1b299 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -7606,6 +7606,22 @@ export interface MappingByteNumberProperty extends MappingNumberPropertyBase { null_value?: byte } +export interface MappingChunkingSettings { + /** The chunking strategy: `sentence` or `word`. */ + strategy: string + /** The maximum size of a chunk in words. + * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ + max_chunk_size: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ + overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ + sentence_overlap?: integer +} + export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { analyzer?: string contexts?: MappingSuggestContext[] @@ -8108,6 +8124,10 @@ export interface MappingSemanticTextProperty { * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */ search_inference_id?: Id + /** Settings for chunking text into smaller passages. If specified, these will override the + * chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated, + * they will not be applied to existing documents until they are reindexed. */ + chunking_settings?: MappingChunkingSettings } export interface MappingShapeProperty extends MappingDocValuesPropertyBase { @@ -17451,6 +17471,44 @@ export interface EsqlAsyncQueryStopRequest extends RequestBase { export type EsqlAsyncQueryStopResponse = EsqlEsqlResult +export interface EsqlGetQueryRequest extends RequestBase { + /** The query ID */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface EsqlGetQueryResponse { + id: long + node: NodeId + start_time_millis: long + running_time_nanos: long + query: string + coordinating_node: NodeId + data_nodes: NodeId[] +} + +export interface EsqlListQueriesBody { + id: long + node: NodeId + start_time_millis: long + running_time_nanos: long + query: string +} + +export interface EsqlListQueriesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface EsqlListQueriesResponse { + queries: Record +} + export interface EsqlQueryRequest extends RequestBase { /** A short version of the Accept header, e.g. json, yaml. */ format?: EsqlEsqlFormat @@ -30949,7 +31007,7 @@ export interface SecurityClusterNode { name: Name } -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_esql' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_esql' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string export interface SecurityCreatedStatus { created: boolean @@ -37459,6 +37517,9 @@ export interface SpecUtilsCommonQueryParameters { pretty?: boolean } +export interface SpecUtilsOverloadOf { +} + export interface SpecUtilsCommonCatQueryParameters { /** Specifies the format to return the columnar data in, can be set to * `text`, `json`, `cbor`, `yaml`, or `smile`. */ @@ -37469,6 +37530,3 @@ export interface SpecUtilsCommonCatQueryParameters { /** When set to `true` will enable verbose output. */ v?: boolean } - -export interface SpecUtilsOverloadOf { -} From 27774c9d3c5105a838673d6bded2c4af033f91d7 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 21 Apr 2025 14:53:52 -0500 Subject: [PATCH 535/647] Migrate integration tests to built JS files (#2750) --- .buildkite/pipeline.yml | 22 +- .buildkite/run-client.sh | 39 +- .dockerignore | 3 + .gitignore | 4 + .npmignore | 3 + package.json | 5 +- scripts/download-artifacts.js | 170 ++--- scripts/generate-docs-examples.js | 2 +- test/integration/index.js | 463 ++----------- test/integration/reporter.js | 115 ---- test/integration/test-builder.js | 482 +++++++++++++ test/integration/test-runner.js | 1072 ----------------------------- 12 files changed, 630 insertions(+), 1750 deletions(-) delete mode 100644 test/integration/reporter.js create mode 100644 test/integration/test-builder.js delete mode 100644 test/integration/test-runner.js diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c5146fc68..8a7e176b1 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,17 +1,20 @@ --- +agents: + provider: "gcp" + image: family/core-ubuntu-2204 + memory: "8G" + cpu: "2" + steps: - - label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }}) Test Suite: {{ matrix.suite }}" - agents: - provider: "gcp" + - label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }})" env: NODE_VERSION: "{{ matrix.nodejs }}" - TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.16.0 + TEST_SUITE: "platinum" + STACK_VERSION: 9.0.0 + GITHUB_TOKEN_PATH: "secret/ci/elastic-elasticsearch-js/github-token" + TEST_ES_STACK: "1" matrix: setup: - suite: - - "free" - - "platinum" nodejs: - "18" - "20" @@ -21,9 +24,6 @@ steps: - wait: ~ continue_on_failure: true - label: ":junit: Test results" - agents: - provider: "gcp" - image: family/core-ubuntu-2204 plugins: - junit-annotate#v2.6.0: artifacts: "junit-output/junit-*.xml" diff --git a/.buildkite/run-client.sh b/.buildkite/run-client.sh index 59ed168e7..872d57812 100755 --- a/.buildkite/run-client.sh +++ b/.buildkite/run-client.sh @@ -10,22 +10,29 @@ export NODE_VERSION=${NODE_VERSION:-18} echo "--- :javascript: Building Docker image" docker build \ - --file "$script_path/Dockerfile" \ - --tag elastic/elasticsearch-js \ - --build-arg NODE_VERSION="$NODE_VERSION" \ - . + --file "$script_path/Dockerfile" \ + --tag elastic/elasticsearch-js \ + --build-arg NODE_VERSION="$NODE_VERSION" \ + . -echo "--- :javascript: Running $TEST_SUITE tests" +GITHUB_TOKEN=$(vault read -field=token "$GITHUB_TOKEN_PATH") +export GITHUB_TOKEN + +echo "--- :javascript: Running tests" mkdir -p "$repo/junit-output" docker run \ - --network="${network_name}" \ - --env "TEST_ES_SERVER=${elasticsearch_url}" \ - --env "ELASTIC_PASSWORD=${elastic_password}" \ - --env "TEST_SUITE=${TEST_SUITE}" \ - --env "ELASTIC_USER=elastic" \ - --env "BUILDKITE=true" \ - --volume "$repo/junit-output:/junit-output" \ - --name elasticsearch-js \ - --rm \ - elastic/elasticsearch-js \ - bash -c "npm run test:integration; [ -f ./$TEST_SUITE-report-junit.xml ] && mv ./$TEST_SUITE-report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'" + --network="${network_name}" \ + --env TEST_ES_STACK \ + --env STACK_VERSION \ + --env GITHUB_TOKEN \ + --env "TEST_ES_SERVER=${elasticsearch_url}" \ + --env "ELASTIC_PASSWORD=${elastic_password}" \ + --env "ELASTIC_USER=elastic" \ + --env "BUILDKITE=true" \ + --volume "/usr/src/app/node_modules" \ + --volume "$repo:/usr/src/app" \ + --volume "$repo/junit-output:/junit-output" \ + --name elasticsearch-js \ + --rm \ + elastic/elasticsearch-js \ + bash -c "npm run test:integration; [ -f ./report-junit.xml ] && mv ./report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'" diff --git a/.dockerignore b/.dockerignore index a448fae9c..c2031b20f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,3 +6,6 @@ elasticsearch lib junit-output .tap +rest-api-spec +yaml-rest-tests +generated-tests diff --git a/.gitignore b/.gitignore index adec49623..07e49ff7b 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,7 @@ bun.lockb test-results processinfo .tap +rest-api-spec +yaml-rest-tests +generated-tests +schema diff --git a/.npmignore b/.npmignore index 8a921bbd6..3f909d8c7 100644 --- a/.npmignore +++ b/.npmignore @@ -74,3 +74,6 @@ CONTRIBUTING.md src bun.lockb .tap +rest-api-spec +yaml-rest-tests +generated-tests diff --git a/package.json b/package.json index 1c52966c3..28946e5be 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,8 @@ "test:coverage-100": "npm run build && tap --coverage --100", "test:coverage-report": "npm run build && tap --coverage && nyc report --reporter=text-lcov > coverage.lcov", "test:coverage-ui": "npm run build && tap --coverage --coverage-report=html", - "test:integration": "tsc && node test/integration/index.js", + "test:integration-build": "npm run build && node test/integration/index.js", + "test:integration": "npm run test:integration-build && env tap run --jobs=1 --reporter=junit --reporter-file=report-junit.xml generated-tests/", "lint": "ts-standard src", "lint:fix": "ts-standard --fix src", "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'", @@ -77,7 +78,7 @@ "node-fetch": "2.7.0", "ora": "5.4.1", "proxy": "1.0.2", - "rimraf": "3.0.2", + "rimraf": "5.0.10", "semver": "7.7.1", "split2": "4.2.0", "stoppable": "1.1.0", diff --git a/scripts/download-artifacts.js b/scripts/download-artifacts.js index d8d5e189e..c15ed4ae1 100644 --- a/scripts/download-artifacts.js +++ b/scripts/download-artifacts.js @@ -3,162 +3,102 @@ * SPDX-License-Identifier: Apache-2.0 */ -'use strict' - const { join } = require('path') -const minimist = require('minimist') const stream = require('stream') const { promisify } = require('util') const { createWriteStream, promises } = require('fs') -const rimraf = require('rimraf') +const { rimraf } = require('rimraf') const fetch = require('node-fetch') const crossZip = require('cross-zip') const ora = require('ora') -const { mkdir, writeFile } = promises +const { mkdir, cp } = promises const pipeline = promisify(stream.pipeline) const unzip = promisify(crossZip.unzip) -const rm = promisify(rimraf) - -const esFolder = join(__dirname, '..', 'elasticsearch') -const zipFolder = join(esFolder, 'artifacts.zip') -const specFolder = join(esFolder, 'rest-api-spec', 'api') -const freeTestFolder = join(esFolder, 'rest-api-spec', 'test', 'free') -const xPackTestFolder = join(esFolder, 'rest-api-spec', 'test', 'platinum') -const artifactInfo = join(esFolder, 'info.json') - -async function downloadArtifacts (opts) { - if (typeof opts.version !== 'string') { - throw new Error('Missing version') - } - const log = ora('Checking out spec and test').start() +const testYamlFolder = join(__dirname, '..', 'yaml-rest-tests') +const zipFile = join(__dirname, '..', 'elasticsearch-clients-tests.zip') - log.text = 'Resolving versions' - let resolved - try { - resolved = await resolve(opts.version, opts.hash) - } catch (err) { - log.fail(err.message) - process.exit(1) - } +const schemaFolder = join(__dirname, '..', 'schema') +const schemaJson = join(schemaFolder, 'schema.json') - opts.id = opts.id || resolved.id - opts.hash = opts.hash || resolved.hash - opts.version = resolved.version +async function downloadArtifacts (localTests, version = 'main') { + const log = ora('Checking out spec and test').start() - const info = loadInfo() + const { GITHUB_TOKEN } = process.env - if (info && info.version === opts.version) { - if (info.hash === opts.hash && info.id === opts.id) { - log.succeed('The artifact copy present locally is already up to date') - return - } + if (version !== 'main') { + version = version.split('.').slice(0, 2).join('.') } - log.text = 'Cleanup checkouts/elasticsearch' - await rm(esFolder) - await mkdir(esFolder, { recursive: true }) + log.text = 'Clean tests folder' + await rimraf(testYamlFolder) + await mkdir(testYamlFolder, { recursive: true }) - log.text = 'Downloading artifacts' - const response = await fetch(resolved.url) - if (!response.ok) { - log.fail(`unexpected response ${response.statusText}`) - process.exit(1) - } - await pipeline(response.body, createWriteStream(zipFolder)) + log.text = `Fetch test YAML files for version ${version}` - log.text = 'Unzipping' - await unzip(zipFolder, esFolder) + if (localTests) { + log.text = `Copying local tests from ${localTests}` + await cp(localTests, testYamlFolder, { recursive: true }) + } else { + if (!GITHUB_TOKEN) { + log.fail("Missing required environment variable 'GITHUB_TOKEN'") + process.exit(1) + } - log.text = 'Cleanup' - await rm(zipFolder) + const response = await fetch(`https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/${version}`, { + headers: { + Authorization: `Bearer ${GITHUB_TOKEN}`, + Accept: 'application/vnd.github+json' + } + }) - log.text = 'Update info' - await writeFile(artifactInfo, JSON.stringify(opts), 'utf8') + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) + } - log.succeed('Done') -} + log.text = 'Downloading tests zipball' + await pipeline(response.body, createWriteStream(zipFile)) -function loadInfo () { - try { - return require(artifactInfo) - } catch (err) { - return null - } -} + log.text = 'Unzipping tests' + await unzip(zipFile, testYamlFolder) -async function resolve (version, hash) { - const response = await fetch(`https://artifacts-api.elastic.co/v1/versions/${version}`) - if (!response.ok) { - throw new Error(`unexpected response ${response.statusText}`) + log.text = 'Cleanup' + await rimraf(zipFile) } - const data = await response.json() - const esBuilds = data.version.builds - .filter(build => build.projects.elasticsearch != null) - .map(build => { - return { - projects: build.projects.elasticsearch, - buildId: build.build_id, - date: build.start_time, - version: build.version - } - }) - .sort((a, b) => { - const dA = new Date(a.date) - const dB = new Date(b.date) - if (dA > dB) return -1 - if (dA < dB) return 1 - return 0 - }) + log.text = 'Fetching Elasticsearch specification' + await rimraf(schemaFolder) + await mkdir(schemaFolder, { recursive: true }) - if (hash != null) { - const build = esBuilds.find(build => build.projects.commit_hash === hash) - if (!build) { - throw new Error(`Can't find any build with hash '${hash}'`) - } - const zipKey = Object.keys(build.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip')) - return { - url: build.projects.packages[zipKey].url, - id: build.buildId, - hash: build.projects.commit_hash, - version: build.version - } + const response = await fetch(`https://raw.githubusercontent.com/elastic/elasticsearch-specification/${version}/output/schema/schema.json`) + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) } - const lastBuild = esBuilds[0] - const zipKey = Object.keys(lastBuild.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip')) - return { - url: lastBuild.projects.packages[zipKey].url, - id: lastBuild.buildId, - hash: lastBuild.projects.commit_hash, - version: lastBuild.version - } + log.text = 'Downloading schema.json' + await pipeline(response.body, createWriteStream(schemaJson)) + + log.succeed('Done') } -async function main (options) { - delete options._ - await downloadArtifacts(options) +async function main () { + await downloadArtifacts() } + if (require.main === module) { process.on('unhandledRejection', function (err) { console.error(err) process.exit(1) }) - const options = minimist(process.argv.slice(2), { - string: ['id', 'version', 'hash'] - }) - main(options).catch(t => { + main().catch(t => { console.log(t) process.exit(2) }) } module.exports = downloadArtifacts -module.exports.locations = { - specFolder, - freeTestFolder, - xPackTestFolder -} +module.exports.locations = { testYamlFolder, zipFile, schemaJson } diff --git a/scripts/generate-docs-examples.js b/scripts/generate-docs-examples.js index 3a6813f7a..8026547c3 100644 --- a/scripts/generate-docs-examples.js +++ b/scripts/generate-docs-examples.js @@ -6,7 +6,7 @@ const { join } = require('path') const { writeFile } = require('fs/promises') const fetch = require('node-fetch') -const rimraf = require('rimraf') +const { rimraf } = require('rimraf') const ora = require('ora') const { convertRequests } = require('@elastic/request-converter') const minimist = require('minimist') diff --git a/test/integration/index.js b/test/integration/index.js index f226ee893..a4d51ea4e 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -10,436 +10,63 @@ process.on('unhandledRejection', function (err) { process.exit(1) }) -const { writeFileSync, readFileSync, readdirSync, statSync } = require('fs') -const { join, sep } = require('path') -const yaml = require('js-yaml') -const minimist = require('minimist') -const ms = require('ms') -const { Client } = require('../../index') -const build = require('./test-runner') -const { sleep } = require('./helper') -const createJunitReporter = require('./reporter') +const assert = require('node:assert') +const url = require('node:url') +const fs = require('node:fs') +const path = require('node:path') +const globby = require('globby') +const semver = require('semver') const downloadArtifacts = require('../../scripts/download-artifacts') -const yamlFolder = downloadArtifacts.locations.freeTestFolder -const xPackYamlFolder = downloadArtifacts.locations.xPackTestFolder +const buildTests = require('./test-builder') -const MAX_API_TIME = 1000 * 90 -const MAX_FILE_TIME = 1000 * 30 -const MAX_TEST_TIME = 1000 * 3 +const yamlFolder = downloadArtifacts.locations.testYamlFolder -const options = minimist(process.argv.slice(2), { - boolean: ['bail'], - string: ['suite', 'test'] -}) - -const freeSkips = { - // working on fixes for these - '/free/aggregations/bucket_selector.yml': ['bad script'], - '/free/aggregations/bucket_script.yml': ['bad script'], - - // either the YAML test definition is wrong, or this fails because JSON.stringify is coercing "1.0" to "1" - '/free/aggregations/percentiles_bucket.yml': ['*'], - - // not supported yet - '/free/cluster.desired_nodes/10_basic.yml': ['*'], - - // Cannot find methods on `Internal` object - '/free/cluster.desired_balance/10_basic.yml': ['*'], - '/free/cluster.desired_nodes/20_dry_run.yml': ['*'], - '/free/cluster.prevalidate_node_removal/10_basic.yml': ['*'], - - // the v8 client never sends the scroll_id in querystring, - // the way the test is structured causes a security exception - 'free/scroll/10_basic.yml': ['Body params override query string'], - 'free/scroll/11_clear.yml': [ - 'Body params with array param override query string', - 'Body params with string param scroll id override query string' - ], - 'free/cat.allocation/10_basic.yml': ['*'], - 'free/cat.snapshots/10_basic.yml': ['Test cat snapshots output'], - - 'indices.stats/50_disk_usage.yml': ['Disk usage stats'], - 'indices.stats/60_field_usage.yml': ['Field usage stats'], - - // skipping because we are booting ES with `discovery.type=single-node` - // and this test will fail because of this configuration - 'nodes.stats/30_discovery.yml': ['*'], - - // the expected error is returning a 503, - // which triggers a retry and the node to be marked as dead - 'search.aggregation/240_max_buckets.yml': ['*'], - - // long values and json do not play nicely together - 'search.aggregation/40_range.yml': ['Min and max long range bounds'], - - // the yaml runner assumes that null means "does not exists", - // while null is a valid json value, so the check will fail - 'search/320_disallow_queries.yml': ['Test disallow expensive queries'], - 'free/tsdb/90_unsupported_operations.yml': ['noop update'] -} - -const platinumDenyList = { - 'api_key/10_basic.yml': ['Test get api key'], - 'api_key/20_query.yml': ['*'], - 'api_key/11_invalidation.yml': ['Test invalidate api key by realm name'], - 'analytics/histogram.yml': ['Histogram requires values in increasing order'], - - // object keys must me strings, and `0.0.toString()` is `0` - 'ml/evaluate_data_frame.yml': [ - 'Test binary_soft_classifition precision', - 'Test binary_soft_classifition recall', - 'Test binary_soft_classifition confusion_matrix' - ], - - // The cleanup fails with a index not found when retrieving the jobs - 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], - 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], - - // start should be a string - 'ml/jobs_get_result_overall_buckets.yml': ['Test overall buckets given epoch start and end params'], - - // this can't happen with the client - 'ml/start_data_frame_analytics.yml': ['Test start with inconsistent body/param ids'], - 'ml/stop_data_frame_analytics.yml': ['Test stop with inconsistent body/param ids'], - 'ml/preview_datafeed.yml': ['*'], - - // Investigate why is failing - 'ml/inference_crud.yml': ['*'], - 'ml/categorization_agg.yml': ['Test categorization aggregation with poor settings'], - 'ml/filter_crud.yml': ['*'], - - // investigate why this is failing - 'monitoring/bulk/10_basic.yml': ['*'], - 'monitoring/bulk/20_privileges.yml': ['*'], - 'license/20_put_license.yml': ['*'], - 'snapshot/10_basic.yml': ['*'], - 'snapshot/20_operator_privileges_disabled.yml': ['*'], - - // the body is correct, but the regex is failing - 'sql/sql.yml': ['Getting textual representation'], - 'searchable_snapshots/10_usage.yml': ['*'], - 'service_accounts/10_basic.yml': ['*'], - - // we are setting two certificates in the docker config - 'ssl/10_basic.yml': ['*'], - 'token/10_basic.yml': ['*'], - 'token/11_invalidation.yml': ['*'], - - // very likely, the index template has not been loaded yet. - // we should run a indices.existsTemplate, but the name of the - // template may vary during time. - 'transforms_crud.yml': [ - 'Test basic transform crud', - 'Test transform with query and array of indices in source', - 'Test PUT continuous transform', - 'Test PUT continuous transform without delay set' - ], - 'transforms_force_delete.yml': [ - 'Test force deleting a running transform' - ], - 'transforms_cat_apis.yml': ['*'], - 'transforms_start_stop.yml': ['*'], - 'transforms_stats.yml': ['*'], - 'transforms_stats_continuous.yml': ['*'], - 'transforms_update.yml': ['*'], - - // js does not support ulongs - 'unsigned_long/10_basic.yml': ['*'], - 'unsigned_long/20_null_value.yml': ['*'], - 'unsigned_long/30_multi_fields.yml': ['*'], - 'unsigned_long/40_different_numeric.yml': ['*'], - 'unsigned_long/50_script_values.yml': ['*'], - - // the v8 client flattens the body into the parent object - 'platinum/users/10_basic.yml': ['Test put user with different username in body'], - - // docker issue? - 'watcher/execute_watch/60_http_input.yml': ['*'], - - // the checks are correct, but for some reason the test is failing on js side - // I bet is because the backslashes in the rg - 'watcher/execute_watch/70_invalid.yml': ['*'], - 'watcher/put_watch/10_basic.yml': ['*'], - 'xpack/15_basic.yml': ['*'], - - // test that are failing that needs to be investigated - // the error cause can either be in the yaml test or in the specification - - // start should be a string in the yaml test - 'platinum/ml/delete_job_force.yml': ['Test force delete an open job that is referred by a started datafeed'], - 'platinum/ml/evaluate_data_frame.yml': ['*'], - 'platinum/ml/get_datafeed_stats.yml': ['*'], - - // start should be a string in the yaml test - 'platinum/ml/start_stop_datafeed.yml': ['*'] -} - -function runner (opts = {}) { - const options = { node: opts.node } - if (opts.isXPack) { - options.tls = { - ca: readFileSync(join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'), - rejectUnauthorized: false +const getAllFiles = async dir => { + const files = await globby(dir, { + expandDirectories: { + extensions: ['yml', 'yaml'] } - } - const client = new Client(options) - log('Loading yaml suite') - start({ client, isXPack: opts.isXPack }) - .catch(err => { - if (err.name === 'ResponseError') { - console.error(err) - console.log(JSON.stringify(err.meta, null, 2)) - } else { - console.error(err) - } - process.exit(1) - }) + }) + return files.sort() } -async function waitCluster (client, times = 0) { - try { - await client.cluster.health({ wait_for_status: 'green', timeout: '50s' }) - } catch (err) { - if (++times < 10) { - await sleep(5000) - return waitCluster(client, times) - } - console.error(err) - process.exit(1) - } -} - -async function start ({ client, isXPack }) { - log('Waiting for Elasticsearch') - await waitCluster(client) - - const body = await client.info() - const { number: version, build_hash: hash } = body.version - - log(`Downloading artifacts for hash ${hash}...`) - await downloadArtifacts({ hash, version }) - - log(`Testing ${isXPack ? 'Platinum' : 'Free'} api...`) - const junit = createJunitReporter() - const junitTestSuites = junit.testsuites(`Integration test for ${isXPack ? 'Platinum' : 'Free'} api`) - - const stats = { - total: 0, - skip: 0, - pass: 0, - assertions: 0 - } - const folders = getAllFiles(isXPack ? xPackYamlFolder : yamlFolder) - .filter(t => !/(README|TODO)/g.test(t)) - // we cluster the array based on the folder names, - // to provide a better test log output - .reduce((arr, file) => { - const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/')) - let inserted = false - for (let i = 0; i < arr.length; i++) { - if (arr[i][0].includes(path)) { - inserted = true - arr[i].push(file) - break - } - } - if (!inserted) arr.push([file]) - return arr - }, []) - - const totalTime = now() - for (const folder of folders) { - // pretty name - const apiName = folder[0].slice( - folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19, - folder[0].lastIndexOf(sep) - ) - - log('Testing ' + apiName.slice(1)) - const apiTime = now() - - for (const file of folder) { - const testRunner = build({ - client, - version, - isXPack: file.includes('platinum') - }) - const fileTime = now() - const data = readFileSync(file, 'utf8') - // get the test yaml (as object), some file has multiple yaml documents inside, - // every document is separated by '---', so we split on the separator - // and then we remove the empty strings, finally we parse them - const tests = data - .split('\n---\n') - .map(s => s.trim()) - // empty strings - .filter(Boolean) - .map(parse) - // null values - .filter(Boolean) - - // get setup and teardown if present - let setupTest = null - let teardownTest = null - for (const test of tests) { - if (test.setup) setupTest = test.setup - if (test.teardown) teardownTest = test.teardown - } - - const cleanPath = file.slice(file.lastIndexOf(apiName)) - - // skip if --suite CLI arg doesn't match - if (options.suite && !cleanPath.endsWith(options.suite)) continue - - log(' ' + cleanPath) - const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath) - - for (const test of tests) { - const testTime = now() - const name = Object.keys(test)[0] - - // skip setups, teardowns and anything that doesn't match --test flag when present - if (name === 'setup' || name === 'teardown') continue - if (options.test && !name.endsWith(options.test)) continue - - const junitTestCase = junitTestSuite.testcase(name, `node_${process.version}: ${cleanPath}`) - - stats.total += 1 - if (shouldSkip(isXPack, file, name)) { - stats.skip += 1 - junitTestCase.skip('This test is in the skip list of the client') - junitTestCase.end() - continue - } - log(' - ' + name) - try { - await testRunner.run(setupTest, test[name], teardownTest, stats, junitTestCase) - stats.pass += 1 - } catch (err) { - junitTestCase.failure(err) - junitTestCase.end() - junitTestSuite.end() - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') - err.meta = JSON.stringify(err.meta ?? {}, null, 2) - console.error(err) - - if (options.bail) { - process.exit(1) - } else { - continue - } - } - const totalTestTime = now() - testTime - junitTestCase.end() - if (totalTestTime > MAX_TEST_TIME) { - log(' took too long: ' + ms(totalTestTime)) - } else { - log(' took: ' + ms(totalTestTime)) - } - } - junitTestSuite.end() - const totalFileTime = now() - fileTime - if (totalFileTime > MAX_FILE_TIME) { - log(` ${cleanPath} took too long: ` + ms(totalFileTime)) - } else { - log(` ${cleanPath} took: ` + ms(totalFileTime)) - } - } - const totalApiTime = now() - apiTime - if (totalApiTime > MAX_API_TIME) { - log(`${apiName} took too long: ` + ms(totalApiTime)) - } else { - log(`${apiName} took: ` + ms(totalApiTime)) - } - } - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') - log(`Total testing time: ${ms(now() - totalTime)}`) - log(`Test stats: - - Total: ${stats.total} - - Skip: ${stats.skip} - - Pass: ${stats.pass} - - Fail: ${stats.total - (stats.pass + stats.skip)} - - Assertions: ${stats.assertions} - `) -} - -function log (text) { - process.stdout.write(text + '\n') -} - -function now () { - const ts = process.hrtime() - return (ts[0] * 1e3) + (ts[1] / 1e6) -} - -function parse (data) { - let doc - try { - doc = yaml.load(data, { schema: yaml.CORE_SCHEMA }) - } catch (err) { - console.error(err) - return - } - return doc -} - -function generateJunitXmlReport (junit, suite) { - writeFileSync( - join(__dirname, '..', '..', `${suite}-report-junit.xml`), - junit.prettyPrint() - ) +async function doTestBuilder (version, clientOptions) { + await downloadArtifacts(undefined, version) + const files = await getAllFiles(yamlFolder) + await buildTests(files, clientOptions) } if (require.main === module) { - const scheme = process.env.TEST_SUITE === 'platinum' ? 'https' : 'http' - const node = process.env.TEST_ES_SERVER || `${scheme}://elastic:changeme@localhost:9200` - const opts = { - node, - isXPack: process.env.TEST_SUITE !== 'free' - } - runner(opts) -} - -const shouldSkip = (isXPack, file, name) => { - if (options.suite || options.test) return false - - let list = Object.keys(freeSkips) - for (let i = 0; i < list.length; i++) { - const freeTest = freeSkips[list[i]] - for (let j = 0; j < freeTest.length; j++) { - if (file.endsWith(list[i]) && (name === freeTest[j] || freeTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because it is denylisted in the free test suite`) - return true - } - } + const node = process.env.TEST_ES_SERVER + const apiKey = process.env.ES_API_SECRET_KEY + const password = process.env.ELASTIC_PASSWORD + let version = process.env.STACK_VERSION + + assert(node != null, 'Environment variable missing: TEST_ES_SERVER') + assert(apiKey != null || password != null, 'Environment variable missing: ES_API_SECRET_KEY or ELASTIC_PASSWORD') + assert(version != null, 'Environment variable missing: STACK_VERSION') + + version = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version) + + const clientOptions = { node } + if (apiKey != null) { + clientOptions.auth = { apiKey } + } else { + clientOptions.auth = { username: 'elastic', password } } - - if (file.includes('x-pack') || isXPack) { - list = Object.keys(platinumDenyList) - for (let i = 0; i < list.length; i++) { - const platTest = platinumDenyList[list[i]] - for (let j = 0; j < platTest.length; j++) { - if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because it is denylisted in the platinum test suite`) - return true - } - } + const nodeUrl = new url.URL(node) + if (nodeUrl.protocol === 'https:') { + clientOptions.tls = { + ca: fs.readFileSync(path.join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'), + rejectUnauthorized: false } } - return false + doTestBuilder(version, clientOptions) + .then(() => process.exit(0)) + .catch(err => { + console.error(err) + process.exit(1) + }) } - -const getAllFiles = dir => - readdirSync(dir).reduce((files, file) => { - const name = join(dir, file) - const isDirectory = statSync(name).isDirectory() - return isDirectory ? [...files, ...getAllFiles(name)] : [...files, name] - }, []) - -module.exports = runner diff --git a/test/integration/reporter.js b/test/integration/reporter.js deleted file mode 100644 index 165478c50..000000000 --- a/test/integration/reporter.js +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -const assert = require('node:assert') -const { create } = require('xmlbuilder2') - -function createJunitReporter () { - const report = {} - - return { testsuites, prettyPrint } - - function prettyPrint () { - return create(report).end({ prettyPrint: true }) - } - - function testsuites (name) { - assert(name, 'The testsuites name is required') - assert(report.testsuites === undefined, 'Cannot set more than one testsuites block') - const startTime = Date.now() - - report.testsuites = { - '@id': new Date().toISOString(), - '@name': name - } - - const testsuiteList = [] - - return { - testsuite: createTestSuite(testsuiteList), - end () { - report.testsuites['@time'] = Math.round((Date.now() - startTime) / 1000) - report.testsuites['@tests'] = testsuiteList.reduce((acc, val) => { - acc += val['@tests'] - return acc - }, 0) - report.testsuites['@failures'] = testsuiteList.reduce((acc, val) => { - acc += val['@failures'] - return acc - }, 0) - report.testsuites['@skipped'] = testsuiteList.reduce((acc, val) => { - acc += val['@skipped'] - return acc - }, 0) - if (testsuiteList.length) { - report.testsuites.testsuite = testsuiteList - } - } - } - } - - function createTestSuite (testsuiteList) { - return function testsuite (name) { - assert(name, 'The testsuite name is required') - const startTime = Date.now() - const suite = { - '@id': new Date().toISOString(), - '@name': name - } - const testcaseList = [] - testsuiteList.push(suite) - return { - testcase: createTestCase(testcaseList), - end () { - suite['@time'] = Math.round((Date.now() - startTime) / 1000) - suite['@tests'] = testcaseList.length - suite['@failures'] = testcaseList.filter(t => t.failure).length - suite['@skipped'] = testcaseList.filter(t => t.skipped).length - if (testcaseList.length) { - suite.testcase = testcaseList - } - } - } - } - } - - function createTestCase (testcaseList) { - return function testcase (name, file) { - assert(name, 'The testcase name is required') - const startTime = Date.now() - const tcase = { - '@id': new Date().toISOString(), - '@name': name - } - if (file) tcase['@file'] = file - testcaseList.push(tcase) - return { - failure (error) { - assert(error, 'The failure error object is required') - tcase.failure = { - '#': error.stack, - '@message': error.message, - '@type': error.code - } - }, - skip (reason) { - if (typeof reason !== 'string') { - reason = JSON.stringify(reason, null, 2) - } - tcase.skipped = { - '#': reason - } - }, - end () { - tcase['@time'] = Math.round((Date.now() - startTime) / 1000) - } - } - } - } -} - -module.exports = createJunitReporter diff --git a/test/integration/test-builder.js b/test/integration/test-builder.js new file mode 100644 index 000000000..64ce97dd2 --- /dev/null +++ b/test/integration/test-builder.js @@ -0,0 +1,482 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const { join, sep } = require('node:path') +const { readFileSync, writeFileSync, promises } = require('node:fs') +const yaml = require('js-yaml') +const { rimraf } = require('rimraf') +const { mkdir } = promises + +const generatedTestsPath = join(__dirname, '..', '..', 'generated-tests') + +const stackSkips = [ + // test definition bug: response is empty string + 'cat/fielddata.yml', + // test definition bug: response is empty string + 'cluster/delete_voting_config_exclusions.yml', + // test definition bug: response is empty string + 'cluster/voting_config_exclusions.yml', + // client bug: ILM request takes a "body" param, but "body" is a special keyword in the JS client + 'ilm/10_basic.yml', + // health report is... not healthy + 'health_report.yml', + // TODO: `contains` action only supports checking for primitives inside arrays or strings inside strings, not referenced values like objects inside arrays + 'entsearch/10_basic.yml', + // test definition bug: error message does not match + 'entsearch/30_sync_jobs_stack.yml', + // no handler found for uri [/knn_test/_knn_search] + 'knn_search.yml', + // TODO: fix license on ES startup - "Operation failed: Current license is basic." + 'license/10_stack.yml', + // response.body should be truthy. found: "" + 'logstash/10_basic.yml', + // test definition bug? security_exception: unable to authenticate user [x_pack_rest_user] for REST request [/_ml/trained_models/test_model/definition/0] + 'machine_learning/clear_tm_deployment_cache.yml', + // client bug: 0.99995 does not equal 0.5 + 'machine_learning/data_frame_evaluate.yml', + // test definition bug? regex has whitespace, maybe needs to be removed + 'machine_learning/explain_data_frame_analytics.yml', + // client bug: 4 != 227 + 'machine_learning/preview_datafeed.yml', + // test definition bug: error message does not match + 'machine_learning/revert_model_snapshot.yml', + // test definition bug: error message does not match + 'machine_learning/update_model_snapshot.yml', + // version_conflict_engine_exception + 'machine_learning/jobs_crud.yml', + // test definition bug: error message does not match + 'machine_learning/model_snapshots.yml', + // test definition bug: error message does not match + 'query_rules/30_test.yml', + // client bug: 0 != 0.1 + 'script/10_basic.yml', + // client bug: request takes a "body" param, but "body" is a special keyword in the JS client + 'searchable_snapshots/10_basic.yml', + // test builder bug: does `match` action need to support "array contains value"? + 'security/10_api_key_basic.yml', + // test definition bug: error message does not match + 'security/140_user.yml', + // test definition bug: error message does not match + 'security/30_privileges_stack.yml', + // unknown issue: $profile.enabled path doesn't exist in response + 'security/130_user_profile.yml', + // test definition bug: error message does not match + 'security/change_password.yml', + // test builder bug: media_type_header_exception + 'simulate/ingest.yml', + // client bug: request takes a "body" param, but "body" is a special keyword in the JS client + 'snapshot/10_basic.yml', + // test definition bug: illegal_argument_exception + 'sql/10_basic.yml', + // test definition bug: illegal_argument_exception + 'text_structure/10_basic.yml', + // test definition bug: illegal_argument_exception + 'transform/10_basic.yml', +] + +const serverlessSkips = [ + // TODO: sql.getAsync does not set a content-type header but ES expects one + // transport only sets a content-type if the body is not empty + 'sql/10_basic.yml', + // TODO: bulk call in setup fails due to "malformed action/metadata line" + // bulk body is being sent as a Buffer, unsure if related. + 'transform/10_basic.yml', + // TODO: scripts_painless_execute expects {"result":"0.1"}, gets {"result":"0"} + // body sent as Buffer, unsure if related + 'script/10_basic.yml', + // TODO: expects {"outlier_detection.auc_roc.value":0.99995}, gets {"outlier_detection.auc_roc.value":0.5} + // remove if/when https://github.com/elastic/elasticsearch-clients-tests/issues/37 is resolved + 'machine_learning/data_frame_evaluate.yml', + // TODO: Cannot perform requested action because job [job-crud-test-apis] is not open + 'machine_learning/jobs_crud.yml', + // TODO: test runner needs to support ignoring 410 errors + 'enrich/10_basic.yml', + // TODO: parameter `enabled` is not allowed in source + // Same underlying problem as https://github.com/elastic/elasticsearch-clients-tests/issues/55 + 'cluster/component_templates.yml', + // TODO: expecting `ct_field` field mapping to be returned, but instead only finds `field` + 'indices/simulate_template.yml', + 'indices/simulate_index_template.yml', + // TODO: test currently times out + 'inference/10_basic.yml', + // TODO: Fix: "Trained model deployment [test_model] is not allocated to any nodes" + 'machine_learning/20_trained_model_serverless.yml', + // TODO: query_rules api not available yet + 'query_rules/10_query_rules.yml', + 'query_rules/20_rulesets.yml', + 'query_rules/30_test.yml', + // TODO: security.putRole API not available + 'security/50_roles_serverless.yml', + // TODO: expected undefined to equal 'some_table' + 'entsearch/50_connector_updates.yml', + // TODO: resource_not_found_exception + 'tasks_serverless.yml', +] + +function parse (data) { + let doc + try { + doc = yaml.load(data, { schema: yaml.CORE_SCHEMA }) + } catch (err) { + console.error(err) + return + } + return doc +} + +async function build (yamlFiles, clientOptions) { + await rimraf(generatedTestsPath) + await mkdir(generatedTestsPath, { recursive: true }) + + for (const file of yamlFiles) { + const apiName = file.split(`${sep}tests${sep}`)[1] + const data = readFileSync(file, 'utf8') + + const tests = data + .split('\n---\n') + .map(s => s.trim()) + // empty strings + .filter(Boolean) + .map(parse) + // null values + .filter(Boolean) + + let code = "import { test } from 'tap'\n" + code += "import { Client } from '@elastic/elasticsearch'\n\n" + + const requires = tests.find(test => test.requires != null) + let skip = new Set() + if (requires != null) { + const { serverless = true, stack = true } = requires.requires + if (!serverless) skip.add('process.env.TEST_ES_SERVERLESS === "1"') + if (!stack) skip.add('process.env.TEST_ES_STACK === "1"') + } + + if (stackSkips.includes(apiName)) skip.add('process.env.TEST_ES_STACK === "1"') + if (serverlessSkips.includes(apiName)) skip.add('process.env.TEST_ES_SERVERLESS === "1"') + + if (skip.size > 0) { + code += `test('${apiName}', { skip: ${Array.from(skip).join(' || ')} }, t => {\n` + } else { + code += `test('${apiName}', t => {\n` + } + + for (const test of tests) { + if (test.setup != null) { + code += ' t.before(async () => {\n' + code += indent(buildActions(test.setup), 4) + code += ' })\n\n' + } + + if (test.teardown != null) { + code += ' t.after(async () => {\n' + code += indent(buildActions(test.teardown), 4) + code += ' })\n\n' + } + + for (const key of Object.keys(test).filter(k => !['setup', 'teardown', 'requires'].includes(k))) { + if (test[key].find(action => Object.keys(action)[0] === 'skip') != null) { + code += ` t.test('${key}', { skip: true }, async t => {\n` + } else { + code += ` t.test('${key}', async t => {\n` + } + code += indent(buildActions(test[key]), 4) + code += '\n t.end()\n' + code += ' })\n' + } + // if (test.requires != null) requires = test.requires + } + + code += '\n t.end()\n' + code += '})\n' + + const testDir = join(generatedTestsPath, apiName.split(sep).slice(0, -1).join(sep)) + const testFile = join(testDir, apiName.split(sep).pop().replace(/\.ya?ml$/, '.mjs')) + await mkdir(testDir, { recursive: true }) + writeFileSync(testFile, code, 'utf8') + } + + function buildActions (actions) { + let code = `const client = new Client(${JSON.stringify(clientOptions, null, 2)})\n` + code += 'let response\n\n' + + const vars = new Set() + + for (const action of actions) { + const key = Object.keys(action)[0] + switch (key) { + case 'do': + code += buildDo(action.do) + break + case 'set': + const setResult = buildSet(action.set, vars) + vars.add(setResult.varName) + code += setResult.code + break + case 'transform_and_set': + code += buildTransformAndSet(action.transform_and_set) + break + case 'match': + code += buildMatch(action.match) + break + case 'lt': + code += buildLt(action.lt) + break + case 'lte': + code += buildLte(action.lte) + break + case 'gt': + code += buildGt(action.gt) + break + case 'gte': + code += buildGte(action.gte) + break + case 'length': + code += buildLength(action.length) + break + case 'is_true': + code += buildIsTrue(action.is_true) + break + case 'is_false': + code += buildIsFalse(action.is_false) + break + case 'contains': + code += buildContains(action.contains) + break + case 'exists': + code += buildExists(action.exists) + break + case 'skip': + break + default: + console.warn(`Action not supported: ${key}`) + break + } + } + return code + } +} + +function buildDo (action) { + let code = '' + const keys = Object.keys(action) + if (keys.includes('catch')) { + code += 'try {\n' + code += indent(buildRequest(action), 2) + code += '} catch (err) {\n' + code += ` t.match(err.toString(), ${buildValLiteral(action.catch)})\n` + code += '}\n' + } else { + code += buildRequest(action) + } + return code +} + +function buildRequest(action) { + let code = '' + + const options = { meta: true } + + for (const key of Object.keys(action)) { + if (key === 'catch') continue + + if (key === 'headers') { + options.headers = action.headers + continue + } + + const params = action[key] + if (params.ignore != null) { + if (Array.isArray(params.ignore)) { + options.ignore = params.ignore + } else { + options.ignore = [params.ignore] + } + } + + code += `response = await client.${toCamelCase(key)}(${buildApiParams(action[key])}, ${JSON.stringify(options)})\n` + } + return code +} + +function buildSet (action, vars) { + const key = Object.keys(action)[0] + const varName = action[key] + const lookup = buildLookup(key) + + let code = '' + if (vars.has(varName)) { + code = `${varName} = ${lookup}\n` + } else { + code =`let ${varName} = ${lookup}\n` + } + return { code, varName } +} + +function buildTransformAndSet (action) { + return `// TODO buildTransformAndSet: ${JSON.stringify(action)}\n` +} + +function buildMatch (action) { + const key = Object.keys(action)[0] + let lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.match(${lookup}, ${val})\n` +} + +function buildLt (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} < ${val})\n` +} + +function buildLte (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} <= ${val})\n` +} + +function buildGt (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} > ${val})\n` +} + +function buildGte (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} >= ${val})\n` +} + +function buildLength (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + + let code = '' + code += `if (typeof ${lookup} === 'object' && !Array.isArray(${lookup})) {\n` + code += ` t.equal(Object.keys(${lookup}).length, ${val})\n` + code += `} else {\n` + code += ` t.equal(${lookup}.length, ${val})\n` + code += `}\n` + return code +} + +function buildIsTrue (action) { + let lookup = `${buildLookup(action)}` + let errMessage = `\`${action} should be truthy. found: '\$\{JSON.stringify(${lookup})\}'\`` + if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be truthy. found: '\$\{${lookup}\}'\`` + return `t.ok(${lookup} === "true" || (Boolean(${lookup}) && ${lookup} !== "false"), ${errMessage})\n` +} + +function buildIsFalse (action) { + let lookup = `${buildLookup(action)}` + let errMessage = `\`${action} should be falsy. found: '\$\{JSON.stringify(${lookup})\}'\`` + if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be falsy. found: '\$\{${lookup}\}'\`` + return `t.ok(${lookup} === "false" || !Boolean(${lookup}), ${errMessage})\n` +} + +function buildContains (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup}.includes(${val}), '${JSON.stringify(val)} not found in ${key}')\n` +} + +function buildExists (keyName) { + const lookup = buildLookup(keyName) + return `t.ok(${lookup} != null, \`Key "${keyName}" not found in response body: \$\{JSON.stringify(response.body, null, 2)\}\`)\n` +} + +function buildApiParams (params) { + if (Object.keys(params).length === 0) { + return 'undefined' + } else { + const out = {} + Object.keys(params).filter(k => k !== 'ignore' && k !== 'headers').forEach(k => out[k] = params[k]) + return buildValLiteral(out) + } +} + +function toCamelCase (name) { + return name.replace(/_([a-z])/g, g => g[1].toUpperCase()) +} + +function indent (str, spaces) { + const tabs = ' '.repeat(spaces) + return str.replace(/\s+$/, '').split('\n').map(l => `${tabs}${l}`).join('\n') + '\n' +} + +function buildLookup (path) { + if (path === '$body') return '(typeof response.body === "string" ? response.body : JSON.stringify(response.body))' + + const outPath = path.split('.').map(step => { + if (parseInt(step, 10).toString() === step) { + return `[${step}]` + } else if (step.match(/^\$[a-zA-Z0-9_]+$/)) { + const lookup = step.replace(/^\$/, '') + if (lookup === 'body') return '' + return `[${lookup}]` + } else if (step === '') { + return '' + } else { + return `['${step}']` + } + }).join('') + return `response.body${outPath}` +} + +function buildValLiteral (val) { + if (typeof val === 'string') val = val.trim() + if (isRegExp(val)) { + return JSON.stringify(val).replace(/^"/, '').replace(/"$/, '').replaceAll('\\\\', '\\') + } else if (isVariable(val)) { + if (val === '$body') return 'JSON.stringify(response.body)' + return val.replace(/^\$/, '') + } else if (isPlainObject(val)) { + return JSON.stringify(cleanObject(val), null, 2).replace(/"\$([a-zA-Z0-9_]+)"/g, '$1') + } else { + return JSON.stringify(val) + } +} + +function isRegExp (str) { + return typeof str === 'string' && str.startsWith('/') && str.endsWith('/') +} + +function isVariable (str) { + return typeof str === 'string' && str.match(/^\$[a-zA-Z0-9_]+$/) != null +} + +function cleanObject (obj) { + Object.keys(obj).forEach(key => { + let val = obj[key] + if (typeof val === 'string' && val.trim().startsWith('{') && val.trim().endsWith('}')) { + // attempt to parse as object + try { + val = JSON.parse(val) + } catch { + } + } else if (isPlainObject(val)) { + val = cleanObject(val) + } else if (Array.isArray(val)) { + val = val.map(item => isPlainObject(item) ? cleanObject(item) : item) + } + obj[key] = val + }) + return obj +} + +function isPlainObject(obj) { + return typeof obj === 'object' && !Array.isArray(obj) && obj != null +} + +module.exports = build diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js deleted file mode 100644 index 856b23567..000000000 --- a/test/integration/test-runner.js +++ /dev/null @@ -1,1072 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -/* eslint camelcase: 0 */ - -const chai = require('chai') -const semver = require('semver') -const helper = require('./helper') -const { join } = require('path') -const { locations } = require('../../scripts/download-artifacts') -const packageJson = require('../../package.json') - -chai.config.showDiff = true -chai.config.truncateThreshold = 0 -const { assert } = chai - -const { delve, to, isXPackTemplate, sleep, updateParams } = helper - -const supportedFeatures = [ - 'gtelte', - 'regex', - 'benchmark', - 'stash_in_path', - 'groovy_scripting', - 'headers', - 'transform_and_set', - 'catch_unauthorized', - 'arbitrary_key' -] - -function build (opts = {}) { - const client = opts.client - const esVersion = opts.version - const isXPack = opts.isXPack - const stash = new Map() - let response = null - - /** - * Runs a cleanup, removes all indices, aliases, templates, and snapshots - * @returns {Promise} - */ - async function cleanup (isXPack) { - response = null - stash.clear() - - await client.cluster.health({ - wait_for_no_initializing_shards: true, - timeout: '70s', - level: 'shards' - }) - - if (isXPack) { - // wipe rollup jobs - const jobsList = await client.rollup.getJobs({ id: '_all' }) - const jobsIds = jobsList.jobs.map(j => j.config.id) - await helper.runInParallel( - client, 'rollup.stopJob', - jobsIds.map(j => ({ id: j, wait_for_completion: true })) - ) - await helper.runInParallel( - client, 'rollup.deleteJob', - jobsIds.map(j => ({ id: j })) - ) - - // delete slm policies - const policies = await client.slm.getLifecycle() - await helper.runInParallel( - client, 'slm.deleteLifecycle', - Object.keys(policies).map(p => ({ policy_id: p })) - ) - - // remove 'x_pack_rest_user', used in some xpack test - try { - await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) - } catch { - // do nothing - } - - const searchableSnapshotIndices = await client.cluster.state({ - metric: 'metadata', - filter_path: 'metadata.indices.*.settings.index.store.snapshot' - }) - if (searchableSnapshotIndices.metadata != null && searchableSnapshotIndices.metadata.indices != null) { - await helper.runInParallel( - client, 'indices.delete', - Object.keys(searchableSnapshotIndices.metadata.indices).map(i => ({ index: i })), - { ignore: [404] } - ) - } - } - - // clean snapshots - const repositories = await client.snapshot.getRepository() - for (const repository of Object.keys(repositories)) { - await client.snapshot.delete({ repository, snapshot: '*' }, { ignore: [404] }) - await client.snapshot.deleteRepository({ name: repository }, { ignore: [404] }) - } - - if (isXPack) { - // clean data streams - await client.indices.deleteDataStream({ name: '*', expand_wildcards: 'all' }) - } - - // clean all indices - await client.indices.delete({ - index: [ - '*', - '-.ds-ilm-history-*' - ], - expand_wildcards: 'open,closed,hidden' - }, { - ignore: [404] - }) - - // delete templates - const templates = await client.cat.templates({ h: 'name' }) - for (const template of templates.split('\n').filter(Boolean)) { - if (isXPackTemplate(template)) continue - const body = await client.indices.deleteTemplate({ name: template }, { ignore: [404] }) - if (JSON.stringify(body).includes(`index_template [${template}] missing`)) { - await client.indices.deleteIndexTemplate({ name: template }, { ignore: [404] }) - } - } - - // delete component template - const body = await client.cluster.getComponentTemplate() - const components = body.component_templates.filter(c => !isXPackTemplate(c.name)).map(c => c.name) - if (components.length > 0) { - try { - await client.cluster.deleteComponentTemplate({ name: components.join(',') }, { ignore: [404] }) - } catch { - // do nothing - } - } - - // Remove any cluster setting - const settings = await client.cluster.getSettings() - const newSettings = {} - for (const setting in settings) { - if (Object.keys(settings[setting]).length === 0) continue - newSettings[setting] = {} - for (const key in settings[setting]) { - newSettings[setting][`${key}.*`] = null - } - } - if (Object.keys(newSettings).length > 0) { - await client.cluster.putSettings(newSettings) - } - - if (isXPack) { - // delete ilm policies - const preserveIlmPolicies = [ - 'ilm-history-ilm-policy', - 'slm-history-ilm-policy', - 'watch-history-ilm-policy', - 'watch-history-ilm-policy-16', - 'ml-size-based-ilm-policy', - 'logs', - 'metrics', - 'synthetics', - '7-days-default', - '30-days-default', - '90-days-default', - '180-days-default', - '365-days-default', - '.fleet-actions-results-ilm-policy', - '.fleet-file-data-ilm-policy', - '.fleet-files-ilm-policy', - '.deprecation-indexing-ilm-policy', - '.monitoring-8-ilm-policy', - 'behavioral_analytics-events-default_policy' - ] - const policies = await client.ilm.getLifecycle() - for (const policy in policies) { - if (preserveIlmPolicies.includes(policy)) continue - await client.ilm.deleteLifecycle({ name: policy }) - } - - // delete autofollow patterns - const patterns = await client.ccr.getAutoFollowPattern() - for (const { name } of patterns.patterns) { - await client.ccr.deleteAutoFollowPattern({ name }) - } - - // delete all tasks - const nodesTask = await client.tasks.list() - const tasks = Object.keys(nodesTask.nodes) - .reduce((acc, node) => { - const { tasks } = nodesTask.nodes[node] - Object.keys(tasks).forEach(id => { - if (tasks[id].cancellable) acc.push(id) - }) - return acc - }, []) - - await helper.runInParallel( - client, 'tasks.cancel', - tasks.map(id => ({ task_id: id })) - ) - - // cleanup ml - const jobsList = await client.ml.getJobs() - const jobsIds = jobsList.jobs.map(j => j.job_id) - await helper.runInParallel( - client, 'ml.deleteJob', - jobsIds.map(j => ({ job_id: j, force: true })) - ) - - const dataFrame = await client.ml.getDataFrameAnalytics() - const dataFrameIds = dataFrame.data_frame_analytics.map(d => d.id) - await helper.runInParallel( - client, 'ml.deleteDataFrameAnalytics', - dataFrameIds.map(d => ({ id: d, force: true })) - ) - - const calendars = await client.ml.getCalendars() - const calendarsId = calendars.calendars.map(c => c.calendar_id) - await helper.runInParallel( - client, 'ml.deleteCalendar', - calendarsId.map(c => ({ calendar_id: c })) - ) - - const training = await client.ml.getTrainedModels() - const trainingId = training.trained_model_configs - .filter(t => t.created_by !== '_xpack') - .map(t => t.model_id) - await helper.runInParallel( - client, 'ml.deleteTrainedModel', - trainingId.map(t => ({ model_id: t, force: true })) - ) - - // cleanup transforms - const transforms = await client.transform.getTransform() - const transformsId = transforms.transforms.map(t => t.id) - await helper.runInParallel( - client, 'transform.deleteTransform', - transformsId.map(t => ({ transform_id: t, force: true })) - ) - } - - const shutdownNodes = await client.shutdown.getNode() - if (shutdownNodes._nodes == null && shutdownNodes.cluster_name == null) { - for (const node of shutdownNodes.nodes) { - await client.shutdown.deleteNode({ node_id: node.node_id }) - } - } - - // wait for pending task before resolving the promise - await sleep(100) - while (true) { - const body = await client.cluster.pendingTasks() - if (body.tasks.length === 0) break - await sleep(500) - } - } - - /** - * Runs the given test. - * It runs the test components in the following order: - * - skip check - * - xpack user - * - setup - * - the actual test - * - teardown - * - xpack cleanup - * - cleanup - * @param {object} setup (null if not needed) - * @param {object} test - * @param {object} teardown (null if not needed) - * @returns {Promise} - */ - async function run (setup, test, teardown, stats, junit) { - // if we should skip a feature in the setup/teardown section - // we should skip the entire test file - const skip = getSkip(setup) || getSkip(teardown) - if (skip && shouldSkip(esVersion, skip)) { - junit.skip(skip) - logSkip(skip) - return - } - - if (isXPack) { - // Some xpack test requires this user - // tap.comment('Creating x-pack user') - try { - await client.security.putUser({ - username: 'x_pack_rest_user', - password: 'x-pack-test-password', - roles: ['superuser'] - }) - } catch (err) { - assert.ifError(err, 'should not error: security.putUser') - } - } - - if (setup) await exec('Setup', setup, stats, junit) - - await exec('Test', test, stats, junit) - - if (teardown) await exec('Teardown', teardown, stats, junit) - - await cleanup(isXPack) - } - - /** - * Fill the stashed values of a command - * let's say the we have stashed the `master` value, - * is_true: nodes.$master.transport.profiles - * becomes - * is_true: nodes.new_value.transport.profiles - * @param {object|string} the action to update - * @returns {object|string} the updated action - */ - function fillStashedValues (obj) { - if (typeof obj === 'string') { - return getStashedValues(obj) - } - // iterate every key of the object - for (const key in obj) { - const val = obj[key] - // if the key value is a string, and the string includes '${' - // that we must update the content of '${...}'. - // eg: 'Basic ${auth}' we search the stahed value 'auth' - // and the resulting value will be 'Basic valueOfAuth' - if (typeof val === 'string' && val.includes('${')) { - while (obj[key].includes('${')) { - const val = obj[key] - const start = val.indexOf('${') - const end = val.indexOf('}', val.indexOf('${')) - const stashedKey = val.slice(start + 2, end) - const stashed = stash.get(stashedKey) - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - } - continue - } - // handle json strings, eg: '{"hello":"$world"}' - if (typeof val === 'string' && val.includes('"$')) { - while (obj[key].includes('"$')) { - const val = obj[key] - const start = val.indexOf('"$') - const end = val.indexOf('"', start + 1) - const stashedKey = val.slice(start + 2, end) - const stashed = '"' + stash.get(stashedKey) + '"' - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - } - continue - } - // if the key value is a string, and the string includes '$' - // we run the "update value" code - if (typeof val === 'string' && val.includes('$')) { - // update the key value - obj[key] = getStashedValues(val) - continue - } - - // go deep in the object - if (val !== null && typeof val === 'object') { - fillStashedValues(val) - } - } - - return obj - - function getStashedValues (str) { - const arr = str - // we split the string on the dots - // handle the key with a dot inside that is not a part of the path - .split(/(? { - if (part[0] === '$') { - const stashed = stash.get(part.slice(1)) - if (stashed == null) { - throw new Error(`Cannot find stashed value '${part}' for '${JSON.stringify(obj)}'`) - } - return stashed - } - return part - }) - - // recreate the string value only if the array length is higher than one - // otherwise return the first element which in some test this could be a number, - // and call `.join` will coerce it to a string. - return arr.length > 1 ? arr.join('.') : arr[0] - } - } - - /** - * Stashes a value - * @param {string} the key to search in the previous response - * @param {string} the name to identify the stashed value - * @returns {TestRunner} - */ - function set (key, name) { - if (key.includes('_arbitrary_key_')) { - let currentVisit = null - for (const path of key.split('.')) { - if (path === '_arbitrary_key_') { - const keys = Object.keys(currentVisit) - const arbitraryKey = keys[getRandomInt(0, keys.length)] - stash.set(name, arbitraryKey) - } else { - currentVisit = delve(response, path) - } - } - } else { - stash.set(name, delve(response, key)) - } - } - - /** - * Applies a given transformation and stashes the result. - * @param {string} the name to identify the stashed value - * @param {string} the transformation function as string - * @returns {TestRunner} - */ - function transform_and_set (name, transform) { - if (/base64EncodeCredentials/.test(transform)) { - const [user, password] = transform - .slice(transform.indexOf('(') + 1, -1) - .replace(/ /g, '') - .split(',') - const userAndPassword = `${delve(response, user)}:${delve(response, password)}` - stash.set(name, Buffer.from(userAndPassword).toString('base64')) - } else { - throw new Error(`Unknown transform: '${transform}'`) - } - } - - /** - * Runs a client command - * @param {object} the action to perform - * @returns {Promise} - */ - async function doAction (action, stats) { - const cmd = await updateParams(parseDo(action)) - let api - try { - api = delve(client, cmd.method).bind(client) - } catch (err) { - console.error(`\nError: Cannot find the method '${cmd.method}' in the client.\n`) - process.exit(1) - } - - if (action.headers) { - switch (action.headers['Content-Type'] || action.headers['content-type']) { - case 'application/json': - delete action.headers['Content-Type'] - delete action.headers['content-type'] - action.headers['Content-Type'] = `application/vnd.elasticsearch+json; compatible-with=${packageJson.version.split('.')[0]}` - break - case 'application/x-ndjson': - delete action.headers['Content-Type'] - delete action.headers['content-type'] - action.headers['Content-Type'] = `application/vnd.elasticsearch+x-ndjson; compatible-with=${packageJson.version.split('.')[0]}` - break - } - } - - const options = { ignore: cmd.params.ignore, headers: action.headers, meta: true } - if (!Array.isArray(options.ignore)) options.ignore = [options.ignore] - if (cmd.params.ignore) delete cmd.params.ignore - - // ndjson apis should always send the body as an array - if (isNDJson(cmd.api) && !Array.isArray(cmd.params.body)) { - cmd.params.body = [cmd.params.body] - } - - if (typeof cmd.params.body === 'string' && !isNDJson(cmd.api)) { - cmd.params.body = JSON.parse(cmd.params.body) - } - - let err, result - try { - [err, result] = await to(api(cmd.params, options)) - } catch (exc) { - if (JSON.stringify(exc).includes('resource_already_exists_exception')) { - console.warn(`Resource already exists: ${JSON.stringify(cmd.params)}`) - // setup task was already done because cleanup didn't catch it? do nothing - } else { - throw exc - } - } - let warnings = result ? result.warnings : null - const body = result ? result.body : null - - if (action.warnings && warnings === null) { - assert.fail('We should get a warning header', action.warnings) - } else if (!action.warnings && warnings !== null) { - // if there is only the 'default shard will change' - // warning we skip the check, because the yaml - // spec may not be updated - let hasDefaultShardsWarning = false - warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === true && warnings.length > 1) { - assert.fail('We are not expecting warnings', warnings) - } - } else if (action.warnings && warnings !== null) { - // if the yaml warnings do not contain the - // 'default shard will change' warning - // we do not check it presence in the warnings array - // because the yaml spec may not be updated - let hasDefaultShardsWarning = false - action.warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === false) { - warnings = warnings.filter(h => !h.test(/default\snumber\sof\sshards/g)) - } - - stats.assertions += 1 - assert.deepEqual(warnings, action.warnings) - } - - if (action.catch) { - stats.assertions += 1 - assert.ok(err, `Expecting an error, but instead got ${JSON.stringify(err)}, the response was ${JSON.stringify(result)}`) - assert.ok( - parseDoError(err, action.catch), - `the error should match: ${action.catch}, found ${JSON.stringify(err.body)}` - ) - try { - response = JSON.parse(err.body) - } catch (e) { - response = err.body - } - } else { - stats.assertions += 1 - assert.ifError(err, `should not error: ${cmd.method}`, action) - response = body - } - } - - /** - * Runs an actual test - * @param {string} the name of the test - * @param {object} the actions to perform - * @returns {Promise} - */ - async function exec (name, actions, stats, junit) { - // tap.comment(name) - for (const action of actions) { - if (action.skip) { - if (shouldSkip(esVersion, action.skip)) { - junit.skip(fillStashedValues(action.skip)) - logSkip(fillStashedValues(action.skip)) - break - } - } - - if (action.do) { - await doAction(fillStashedValues(action.do), stats) - } - - if (action.set) { - const key = Object.keys(action.set)[0] - set(fillStashedValues(key), action.set[key]) - } - - if (action.transform_and_set) { - const key = Object.keys(action.transform_and_set)[0] - transform_and_set(key, action.transform_and_set[key]) - } - - if (action.match) { - stats.assertions += 1 - const key = Object.keys(action.match)[0] - match( - // in some cases, the yaml refers to the body with an empty string - key.split('.')[0] === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key.split('.')[0] === '$body' - ? action.match[key] - : fillStashedValues(action.match)[key], - action.match, - response - ) - } - - if (action.lt) { - stats.assertions += 1 - const key = Object.keys(action.lt)[0] - lt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lt)[key], - response - ) - } - - if (action.gt) { - stats.assertions += 1 - const key = Object.keys(action.gt)[0] - gt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gt)[key], - response - ) - } - - if (action.lte) { - stats.assertions += 1 - const key = Object.keys(action.lte)[0] - lte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lte)[key], - response - ) - } - - if (action.gte) { - stats.assertions += 1 - const key = Object.keys(action.gte)[0] - gte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gte)[key], - response - ) - } - - if (action.length) { - stats.assertions += 1 - const key = Object.keys(action.length)[0] - length( - key === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key === '$body' - ? action.length[key] - : fillStashedValues(action.length)[key], - response - ) - } - - if (action.is_true) { - stats.assertions += 1 - const isTrue = fillStashedValues(action.is_true) - is_true( - delve(response, isTrue), - isTrue, - response - ) - } - - if (action.is_false) { - stats.assertions += 1 - const isFalse = fillStashedValues(action.is_false) - is_false( - delve(response, isFalse), - isFalse, - response - ) - } - } - } - - return { run } -} - -/** - * Asserts that the given value is truthy - * @param {any} the value to check - * @param {string} an optional message - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function is_true (val, msg, response) { - try { - assert.ok((typeof val === 'string' && val.toLowerCase() === 'true') || val, `expect truthy value: ${msg} - value: ${JSON.stringify(val)}`) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the given value is falsey - * @param {any} the value to check - * @param {string} an optional message - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function is_false (val, msg, response) { - try { - assert.ok((typeof val === 'string' && val.toLowerCase() === 'false') || !val, `expect falsey value: ${msg} - value: ${JSON.stringify(val)}`) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that two values are the same - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function match (val1, val2, action, response) { - try { - // both values are objects - if (typeof val1 === 'object' && typeof val2 === 'object') { - assert.deepEqual(val1, val2, typeof action === 'object' ? JSON.stringify(action) : action) - // the first value is the body as string and the second a pattern string - } else if ( - typeof val1 === 'string' && typeof val2 === 'string' && - val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) - ) { - const regStr = val2 - .replace(/(^|[^\\])#.*/g, '$1') - .replace(/(^|[^\\])\s+/g, '$1') - .slice(1, -1) - // 'm' adds the support for multiline regex - assert.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, but got: ${val1}: ${JSON.stringify(action)}`) - } else if (typeof val1 === 'string' && typeof val2 === 'string') { - // string comparison - assert.include(val1, val2, `should include pattern provided: ${val2}, but got: ${val1}: ${JSON.stringify(action)}`) - } else { - // everything else - assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) - } - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is less than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function lt (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 < val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is greater than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function gt (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 > val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is less than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function lte (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 <= val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is greater than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} -*/ -function gte (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 >= val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the given value has the specified length - * @param {string|object|array} the object to check - * @param {number} the expected length - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function length (val, len, response) { - try { - if (typeof val === 'string' || Array.isArray(val)) { - assert.equal(val.length, len) - } else if (typeof val === 'object' && val !== null) { - assert.equal(Object.keys(val).length, len) - } else { - assert.fail(`length: the given value is invalid: ${val}`) - } - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Gets a `do` action object and returns a structured object, - * where the action is the key and the parameter is the value. - * Eg: - * { - * 'indices.create': { - * 'index': 'test' - * }, - * 'warnings': [ - * '[index] is deprecated' - * ] - * } - * becomes - * { - * method: 'indices.create', - * params: { - * index: 'test' - * }, - * warnings: [ - * '[index] is deprecated' - * ] - * } - * @param {object} - * @returns {object} - */ -function parseDo (action) { - action = JSON.parse(JSON.stringify(action)) - - if (typeof action === 'string') action = { [action]: {} } - if (Array.isArray(action)) action = action[0] - - return Object.keys(action).reduce((acc, val) => { - switch (val) { - case 'catch': - acc.catch = action.catch - break - case 'warnings': - acc.warnings = action.warnings - break - case 'node_selector': - acc.node_selector = action.node_selector - break - default: - // converts underscore to camelCase - // eg: put_mapping => putMapping - acc.method = val.replace(/_([a-z])/g, g => g[1].toUpperCase()) - acc.api = val - acc.params = action[val] // camelify(action[val]) - if (typeof acc.params.body === 'string') { - try { - acc.params.body = JSON.parse(acc.params.body) - } catch (err) {} - } - } - return acc - }, {}) - - // function camelify (obj) { - // const newObj = {} - - // // TODO: add camelCase support for this fields - // const doNotCamelify = ['copy_settings'] - - // for (const key in obj) { - // const val = obj[key] - // let newKey = key - // if (!~doNotCamelify.indexOf(key)) { - // // if the key starts with `_` we should not camelify the first occurence - // // eg: _source_include => _sourceInclude - // newKey = key[0] === '_' - // ? '_' + key.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - // : key.replace(/_([a-z])/g, k => k[1].toUpperCase()) - // } - - // if ( - // val !== null && - // typeof val === 'object' && - // !Array.isArray(val) && - // key !== 'body' - // ) { - // newObj[newKey] = camelify(val) - // } else { - // newObj[newKey] = val - // } - // } - - // return newObj - // } -} - -function parseDoError (err, spec) { - const httpErrors = { - bad_request: 400, - unauthorized: 401, - forbidden: 403, - missing: 404, - request_timeout: 408, - conflict: 409, - unavailable: 503 - } - - if (httpErrors[spec]) { - return err.statusCode === httpErrors[spec] - } - - if (spec === 'request') { - return err.statusCode >= 400 && err.statusCode < 600 - } - - if (spec.startsWith('/') && spec.endsWith('/')) { - return new RegExp(spec.slice(1, -1), 'g').test(JSON.stringify(err.body)) - } - - if (spec === 'param') { - // the new client do not perform runtime checks, - // but it relies on typescript informing the user - return true - // return err instanceof ConfigurationError - } - - return false -} - -function getSkip (arr) { - if (!Array.isArray(arr)) return null - for (let i = 0; i < arr.length; i++) { - if (arr[i].skip) return arr[i].skip - } - return null -} - -// Gets two *maybe* numbers and returns two valida numbers -// it throws if one or both are not a valid number -// the returned value is an array with the new values -function getNumbers (val1, val2) { - const val1Numeric = Number(val1) - if (isNaN(val1Numeric)) { - throw new TypeError(`val1 is not a valid number: ${val1}`) - } - const val2Numeric = Number(val2) - if (isNaN(val2Numeric)) { - throw new TypeError(`val2 is not a valid number: ${val2}`) - } - return [val1Numeric, val2Numeric] -} - -function getRandomInt (min, max) { - return Math.floor(Math.random() * (max - min)) + min -} - -/** - * Logs a skip - * @param {object} the actions - * @returns {TestRunner} - */ -function logSkip (action) { - if (action.reason && action.version) { - console.log(`Skip: ${action.reason} (${action.version})`) - } else if (action.features) { - console.log(`Skip: ${JSON.stringify(action.features)})`) - } else { - console.log('Skipped') - } -} - -/** - * Decides if a test should be skipped - * @param {object} the actions - * @returns {boolean} - */ -function shouldSkip (esVersion, action) { - let shouldSkip = false - // skip based on the version - if (action.version) { - if (action.version.trim() === 'all') return true - const versions = action.version.split(',').filter(Boolean) - for (const version of versions) { - const [min, max] = version.split('-').map(v => v.trim()) - // if both `min` and `max` are specified - if (min && max) { - shouldSkip = semver.satisfies(esVersion, action.version) - // if only `min` is specified - } else if (min) { - shouldSkip = semver.gte(esVersion, min) - // if only `max` is specified - } else if (max) { - shouldSkip = semver.lte(esVersion, max) - // something went wrong! - } else { - throw new Error(`skip: Bad version range: ${action.version}`) - } - } - } - - if (shouldSkip) return true - - if (action.features) { - if (!Array.isArray(action.features)) action.features = [action.features] - // returns true if one of the features is not present in the supportedFeatures - shouldSkip = !!action.features.filter(f => !~supportedFeatures.indexOf(f)).length - } - - if (shouldSkip) return true - - return false -} - -function isNDJson (api) { - const spec = require(join(locations.specFolder, `${api}.json`)) - const { content_type } = spec[Object.keys(spec)[0]].headers - return Boolean(content_type && content_type.includes('application/x-ndjson')) -} - -/** - * Updates the array syntax of keys and values - * eg: 'hits.hits.1.stuff' to 'hits.hits[1].stuff' - * @param {object} the action to update - * @returns {obj} the updated action - */ -// function updateArraySyntax (obj) { -// const newObj = {} - -// for (const key in obj) { -// const newKey = key.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// const val = obj[key] - -// if (typeof val === 'string') { -// newObj[newKey] = val.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// } else if (val !== null && typeof val === 'object') { -// newObj[newKey] = updateArraySyntax(val) -// } else { -// newObj[newKey] = val -// } -// } - -// return newObj -// } - -module.exports = build From 821e77e7ad17e141eb9fca9330c74b57b6b2b1d7 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 22 Apr 2025 10:11:17 -0500 Subject: [PATCH 536/647] Support Apache Arrow 19 (#2782) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 28946e5be..6212c9f81 100644 --- a/package.json +++ b/package.json @@ -92,7 +92,7 @@ }, "dependencies": { "@elastic/transport": "^9.0.1", - "apache-arrow": "^18.0.0", + "apache-arrow": "18.x - 19.x", "tslib": "^2.4.0" }, "tap": { From be0b96b5f5f421c5002e903e6daad333e7ad9cad Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 22 Apr 2025 11:48:53 -0500 Subject: [PATCH 537/647] Update Arrow helper tests to test iteration over results (#2786) --- test/unit/helpers/esql.test.ts | 67 +++++++++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 9 deletions(-) diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts index 3a66ee7d4..af09c18ba 100644 --- a/test/unit/helpers/esql.test.ts +++ b/test/unit/helpers/esql.test.ts @@ -121,11 +121,35 @@ test('ES|QL helper', t => { const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowTable() t.ok(result instanceof arrow.Table) + const testRecords = [ + [ + ['amount', 4.900000095367432], + ['date', 1729532586965] + ], + [ + ['amount', 8.199999809265137], + ['date', 1729446186965], + ], + [ + ['amount', 15.5], + ['date', 1729359786965], + ], + [ + ['amount', 9.899999618530273], + ['date', 1729273386965], + ], + [ + ['amount', 13.899999618530273], + ['date', 1729186986965], + ] + ] + + let count = 0 const table = [...result] - t.same(table[0], [ - ["amount", 4.900000095367432], - ["date", 1729532586965], - ]) + for (const record of table) { + t.same(record, testRecords[count]) + count++ + } t.end() }) @@ -182,11 +206,36 @@ test('ES|QL helper', t => { const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() t.ok(result.isStream()) - const recordBatch = result.next().value - t.same(recordBatch.get(0)?.toJSON(), { - amount: 4.900000095367432, - date: 1729532586965, - }) + const testRecords = [ + { + amount: 4.900000095367432, + date: 1729532586965, + }, + { + amount: 8.199999809265137, + date: 1729446186965, + }, + { + amount: 15.5, + date: 1729359786965, + }, + { + amount: 9.899999618530273, + date: 1729273386965, + }, + { + amount: 13.899999618530273, + date: 1729186986965, + }, + ] + let count = 0 + for (const recordBatch of result) { + for (const record of recordBatch) { + t.same(record.toJSON(), testRecords[count]) + count++ + } + } + t.end() }) From 926b468c6d62be7c9733f21c6a7ff8855a04d66a Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Thu, 24 Apr 2025 11:51:45 -0500 Subject: [PATCH 538/647] [docs] Fix various syntax and rendering errors (#2776) --- docs/reference/api-reference.md | 1 + docs/reference/basic-config.md | 56 ---------- docs/reference/client-helpers.md | 182 +++++++++++++++++++++++++++---- docs/reference/connecting.md | 12 +- 4 files changed, 170 insertions(+), 81 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index c446c3b4a..b91c22313 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -980,6 +980,7 @@ PUT my-index-000001/_doc/1?version=2&version_type=external "id": "elkbee" } } +``` In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md index 82a537260..6c7d75505 100644 --- a/docs/reference/basic-config.md +++ b/docs/reference/basic-config.md @@ -82,8 +82,6 @@ auth: { } ``` ---- - ### `maxRetries` Type: `number`
    @@ -91,8 +89,6 @@ Default: `3` Max number of retries for each request. ---- - ### `requestTimeout` Type: `number`
    @@ -100,8 +96,6 @@ Default: `No value` Max request timeout in milliseconds for each request. ---- - ### `pingTimeout` Type: `number`
    @@ -109,8 +103,6 @@ Default: `3000` Max ping request timeout in milliseconds for each request. ---- - ### `sniffInterval` Type: `number, boolean`
    @@ -122,8 +114,6 @@ Perform a sniff operation every `n` milliseconds. Sniffing might not be the best solution. Before using the various `sniff` options, review this [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). ::: ---- - ### `sniffOnStart` Type: `boolean`
    @@ -131,8 +121,6 @@ Default: `false` Perform a sniff once the client is started. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). ---- - ### `sniffEndpoint` Type: `string`
    @@ -140,8 +128,6 @@ Default: `'_nodes/_all/http'` Endpoint to ping during a sniff. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). ---- - ### `sniffOnConnectionFault` Type: `boolean`
    @@ -149,8 +135,6 @@ Default: `false` Perform a sniff on connection fault. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). ---- - ### `resurrectStrategy` Type: `string`
    @@ -159,8 +143,6 @@ Default: `'ping'` Configure the node resurrection strategy.
    Options: `'ping'`, `'optimistic'`, `'none'` ---- - ### `suggestCompression` Type: `boolean`
    @@ -168,8 +150,6 @@ Default: `false` Adds an `accept-encoding` header to every request. ---- - ### `compression` Type: `string, boolean`
    @@ -178,8 +158,6 @@ Default: `false` Enables gzip request body compression.
    Options: `'gzip'`, `false` ---- - ### `tls` Type: `http.SecureContextOptions`
    @@ -187,8 +165,6 @@ Default: `null` The [tls configuraton](https://nodejs.org/api/tls.html). ---- - ### `proxy` Type: `string, URL`
    @@ -208,8 +184,6 @@ const client = new Client({ }) ``` ---- - ### `agent` Type: `http.AgentOptions, function`
    @@ -237,8 +211,6 @@ const client = new Client({ }) ``` ---- - ### `nodeFilter` Type: `function` @@ -260,8 +232,6 @@ function defaultNodeFilter (conn) { } ``` ---- - ### `nodeSelector` Type: `function`
    @@ -279,8 +249,6 @@ function nodeSelector (connections) { } ``` ---- - ### `generateRequestId` Type: `function`
    @@ -297,8 +265,6 @@ function generateRequestId (params, options) { } ``` ---- - ### `name` Type: `string, symbol`
    @@ -306,8 +272,6 @@ Default: `elasticsearch-js` The name to identify the client instance in the events. ---- - ### `opaqueIdPrefix` Type: `string`
    @@ -316,8 +280,6 @@ Default: `null` A string that will be use to prefix any `X-Opaque-Id` header. See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details. ---- - ### `headers` Type: `object`
    @@ -325,8 +287,6 @@ Default: `{}` A set of custom headers to send in every request. ---- - ### `context` Type: `object`
    @@ -334,8 +294,6 @@ Default: `null` A custom object that you can use for observability in your events. It will be merged with the API level context option. ---- - ### `enableMetaHeader` Type: `boolean`
    @@ -343,8 +301,6 @@ Default: `true` If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data, such as the client and platform version. ---- - ### `cloud` Type: `object`
    @@ -366,16 +322,12 @@ const client = new Client({ }) ``` ---- - ### `disablePrototypePoisoningProtection` Default: `true` `boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. For more information, refer to [Square Brackets are the Enemy](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08). If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. To learn more, refer to the [`secure-json-parse` documentation](https://github.com/fastify/secure-json-parse). ---- - ### `caFingerprint` Type: `string`
    @@ -383,8 +335,6 @@ Default: `null` If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. ---- - ### `maxResponseSize` Type: `number`
    @@ -392,8 +342,6 @@ Default: `null` When configured, `maxResponseSize` verifies that the uncompressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. ---- - ### `maxCompressedResponseSize` Type: `number`
    @@ -401,8 +349,6 @@ Default: `null` When configured, `maxCompressedResponseSize` verifies that the compressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxCompressedResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. ---- - ### `redaction` Type: `object`
    @@ -414,8 +360,6 @@ Options for how to redact potentially sensitive data from metadata attached to ` [Read about redaction](/reference/advanced-config.md#redaction) for more details :::: ---- - ### `serverMode` Type: `string`
    diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md index c80562db4..731df31cc 100644 --- a/docs/reference/client-helpers.md +++ b/docs/reference/client-helpers.md @@ -51,18 +51,120 @@ console.log(result) To create a new instance of the Bulk helper, access it as shown in the example above, the configuration options are: -| | | -| --- | --- | -| `datasource` | An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects.
    If it is a stream, we recommend to use the [`split2`](https://www.npmjs.com/package/split2) package, that splits the stream on new lines delimiters.
    This parameter is mandatory.

    ```js
    const { createReadStream } = require('fs')
    const split = require('split2')
    const b = client.helpers.bulk({
    // if you just use split(), the data will be used as array of strings
    datasource: createReadStream('./dataset.ndjson').pipe(split())
    // if you need to manipulate the data, you can pass JSON.parse to split
    datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse))
    })
    ```
    | -| `onDocument` | A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the [Bulk API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to see the supported operations.
    This parameter is mandatory.

    ```js
    const b = client.helpers.bulk({
    onDocument (doc) {
    return {
    index: { _index: 'my-index' }
    }
    }
    })
    ```
    | -| `onDrop` | A function that is called for everytime a document can’t be indexed and it has reached the maximum amount of retries.

    ```js
    const b = client.helpers.bulk({
    onDrop (doc) {
    console.log(doc)
    }
    })
    ```
    | -| `onSuccess` | A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations.

    ```js
    const b = client.helpers.bulk({
    onSuccess ({ result, document }) {
    console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`)
    }
    })
    ```
    | -| `flushBytes` | The size of the bulk body in bytes to reach before to send it. Default of 5MB.
    *Default:* `5000000`

    ```js
    const b = client.helpers.bulk({
    flushBytes: 1000000
    })
    ```
    | -| `flushInterval` | How much time (in milliseconds) the helper waits before flushing the body from the last document read.
    *Default:* `30000`

    ```js
    const b = client.helpers.bulk({
    flushInterval: 30000
    })
    ```
    | -| `concurrency` | How many request is executed at the same time.
    *Default:* `5`

    ```js
    const b = client.helpers.bulk({
    concurrency: 10
    })
    ```
    | -| `retries` | How many times a document is retried before to call the `onDrop` callback.
    *Default:* Client max retries.

    ```js
    const b = client.helpers.bulk({
    retries: 3
    })
    ```
    | -| `wait` | How much time to wait before retries in milliseconds.
    *Default:* 5000.

    ```js
    const b = client.helpers.bulk({
    wait: 3000
    })
    ```
    | -| `refreshOnCompletion` | If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices.
    *Default:* false.

    ```js
    const b = client.helpers.bulk({
    refreshOnCompletion: true
    // or
    refreshOnCompletion: 'index-name'
    })
    ```
    | + +`datasource` +: An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects. + If it is a stream, we recommend to use the [`split2`](https://www.npmjs.com/package/split2) package, that splits the stream on new lines delimiters. + This parameter is mandatory. + + ```js + const { createReadStream } = require('fs') + const split = require('split2') + const b = client.helpers.bulk({ + // if you just use split(), the data will be used as array of strings + datasource: createReadStream('./dataset.ndjson').pipe(split()) + // if you need to manipulate the data, you can pass JSON.parse to split + datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse)) + }) + ``` + +`onDocument` +: A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the [Bulk API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to see the supported operations. + This parameter is mandatory. + + ```js + const b = client.helpers.bulk({ + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } + }) + ``` + +`onDrop` +: A function that is called for everytime a document can’t be indexed and it has reached the maximum amount of retries. + + ```js + const b = client.helpers.bulk({ + onDrop (doc) { + console.log(doc) + } + }) + ``` + +`onSuccess` +: A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations. + + ```js + const b = client.helpers.bulk({ + onSuccess ({ result, document }) { + console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`) + } + }) + ``` + +`flushBytes` +: The size of the bulk body in bytes to reach before to send it. Default of 5MB. + *Default:* `5000000` + + ```js + const b = client.helpers.bulk({ + flushBytes: 1000000 + }) + ``` + +`flushInterval` +: How much time (in milliseconds) the helper waits before flushing the body from the last document read. + *Default:* `30000` + + ```js + const b = client.helpers.bulk({ + flushInterval: 30000 + }) + ``` + +`concurrency` +: How many request is executed at the same time. + *Default:* `5` + + ```js + const b = client.helpers.bulk({ + concurrency: 10 + }) + ``` + +`retries` +: How many times a document is retried before to call the `onDrop` callback. + *Default:* Client max retries. + + ```js + const b = client.helpers.bulk({ + retries: 3 + }) + ``` + +`wait` +: How much time to wait before retries in milliseconds. + *Default:* 5000. + + ```js + const b = client.helpers.bulk({ + wait: 3000 + }) + ``` + +`refreshOnCompletion` +: If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices. + *Default:* false. + + ```js + const b = client.helpers.bulk({ + refreshOnCompletion: true + // or + refreshOnCompletion: 'index-name' + }) + ``` ### Supported operations [_supported_operations] @@ -255,13 +357,55 @@ m.search( To create a new instance of the multi search (msearch) helper, you should access it as shown in the example above, the configuration options are: -| | | -| --- | --- | -| `operations` | How many search operations should be sent in a single msearch request.
    *Default:* `5`

    ```js
    const m = client.helpers.msearch({
    operations: 10
    })
    ```
    | -| `flushInterval` | How much time (in milliseconds) the helper waits before flushing the operations from the last operation read.
    *Default:* `500`

    ```js
    const m = client.helpers.msearch({
    flushInterval: 500
    })
    ```
    | -| `concurrency` | How many request is executed at the same time.
    *Default:* `5`

    ```js
    const m = client.helpers.msearch({
    concurrency: 10
    })
    ```
    | -| `retries` | How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error.
    *Default:* Client max retries.

    ```js
    const m = client.helpers.msearch({
    retries: 3
    })
    ```
    | -| `wait` | How much time to wait before retries in milliseconds.
    *Default:* 5000.

    ```js
    const m = client.helpers.msearch({
    wait: 3000
    })
    ```
    | +`operations` +: How many search operations should be sent in a single msearch request. + *Default:* `5` + + ```js + const m = client.helpers.msearch({ + operations: 10 + }) + ``` + +`flushInterval` +: How much time (in milliseconds) the helper waits before flushing the operations from the last operation read. + *Default:* `500` + + ```js + const m = client.helpers.msearch({ + flushInterval: 500 + }) + ``` + +`concurrency` +: How many request is executed at the same time. + *Default:* `5` + + ```js + const m = client.helpers.msearch({ + concurrency: 10 + }) + ``` + +`retries` +: How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error. + *Default:* Client max retries. + + ```js + const m = client.helpers.msearch({ + retries: 3 + }) + ``` + +`wait` +: How much time to wait before retries in milliseconds. + *Default:* 5000. + + ```js + const m = client.helpers.msearch({ + wait: 3000 + }) + ``` ### Stopping the msearch helper [_stopping_the_msearch_helper] diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md index 887dc587d..82a121584 100644 --- a/docs/reference/connecting.md +++ b/docs/reference/connecting.md @@ -332,21 +332,22 @@ The supported request specific options are: | Option | Description | | --- | ----------- | | `ignore` | `number[]` -  HTTP status codes which should not be considered errors for this request.
    *Default:* `null` | -| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
    _Default:* No timeout | +| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
    _Default:_ No timeout | | `retryOnTimeout` | `boolean` - Retry requests that have timed out.*Default:* `false` | | `maxRetries` | `number` - Max number of retries for the request, it overrides the client default.
    *Default:* `3` | | `compression` | `string` or `boolean` - Enables body compression for the request.
    *Options:* `false`, `'gzip'`
    *Default:* `false` | | `asStream` | `boolean` - Instead of getting the parsed body back, you get the raw Node.js stream of data.
    *Default:* `false` | | `headers` | `object` - Custom headers for the request.
    *Default:* `null` | -|`querystring` | `object` - Custom querystring for the request.
    *Default:* `null` | +| `querystring` | `object` - Custom querystring for the request.
    *Default:* `null` | | `id` | `any` - Custom request ID. *(overrides the top level request id generator)*
    *Default:* `null` | | `context` | `any` - Custom object per request. *(you can use it to pass data to the clients events)*
    *Default:* `null` | -| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id) *Default:* `null` | +| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id)
    *Default:* `null` | | `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH
    *Default:* `null` | | `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH
    *Default:* `null` | | `signal` | `AbortSignal` - The AbortSignal instance to allow request abortion.
    *Default:* `null` | | `meta` | `boolean` - Rather than returning the body, return an object containing `body`, `statusCode`, `headers` and `meta` keys
    *Default*: `false` | -| `redaction` | `object` - Options for redacting potentially sensitive data from error metadata. See [Redaction of potentially sensitive data](/reference/advanced-config.md#redaction). | `retryBackoff` | +| `redaction` | `object` - Options for redacting potentially sensitive data from error metadata. See [Redaction of potentially sensitive data](/reference/advanced-config.md#redaction). | +| `retryBackoff` | `(min: number, max: number, attempt: number) => number;` - A function that calculates how long to sleep, in seconds, before the next request retry
    _Default:_ A built-in function that uses exponential backoff with jitter. | ## Using the Client in a Function-as-a-Service Environment [client-faas-env] @@ -461,9 +462,8 @@ console.log(errors) You can find the errors exported by the client in the table below. -| | | | -| --- | --- | --- | | **Error** | **Description** | **Properties** | +| --- | --- | --- | | `ElasticsearchClientError` | Every error inherits from this class, it is the basic error generated by the client. | * `name` - `string`
    * `message` - `string`
    | | `TimeoutError` | Generated when a request exceeds the `requestTimeout` option. | * `name` - `string`
    * `message` - `string`
    * `meta` - `object`, contains all the information about the request
    | | `ConnectionError` | Generated when an error occurs during the request, it can be a connection error or a malformed stream of data. | * `name` - `string`
    * `message` - `string`
    * `meta` - `object`, contains all the information about the request
    | From 710b937bff82c13fd326ed8f59c1839e57baa103 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 24 Apr 2025 14:11:33 -0500 Subject: [PATCH 539/647] Use async reader for parsing Apache Arrow responses (#2788) --- docs/reference/client-helpers.md | 4 +-- src/helpers.ts | 12 +++---- test/unit/helpers/esql.test.ts | 58 +++++++++++++------------------- 3 files changed, 32 insertions(+), 42 deletions(-) diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md index 731df31cc..ad170d5bf 100644 --- a/docs/reference/client-helpers.md +++ b/docs/reference/client-helpers.md @@ -619,7 +619,7 @@ Added in `v8.16.0` ES|QL can return results in multiple binary formats, including [Apache Arrow](https://arrow.apache.org/)'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets. -`toArrowReader` returns a [`RecordBatchStreamReader`](https://arrow.apache.org/docs/js/classes/Arrow_dom.RecordBatchReader.md). +`toArrowReader` returns an [`AsyncRecordBatchStreamReader`](https://github.com/apache/arrow/blob/520ae44272d491bbb52eb3c9b84864ed7088f11a/js/src/ipc/reader.ts#L216). ```ts const reader = await client.helpers @@ -627,7 +627,7 @@ const reader = await client.helpers .toArrowReader() // print each record as JSON -for (const recordBatch of reader) { +for await (const recordBatch of reader) { for (const record of recordBatch) { console.log(record.toJSON()) } diff --git a/src/helpers.ts b/src/helpers.ts index 39e5a3e70..e8a64545a 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -11,7 +11,7 @@ import assert from 'node:assert' import * as timersPromises from 'node:timers/promises' import { Readable } from 'node:stream' import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' -import { Table, TypeMap, tableFromIPC, RecordBatchStreamReader } from 'apache-arrow/Arrow.node' +import { Table, TypeMap, tableFromIPC, AsyncRecordBatchStreamReader } from 'apache-arrow/Arrow.node' import Client from './client' import * as T from './api/types' import { Id } from './api/types' @@ -135,7 +135,7 @@ export interface EsqlColumn { export interface EsqlHelper { toRecords: () => Promise> toArrowTable: () => Promise> - toArrowReader: () => Promise + toArrowReader: () => Promise } export interface EsqlToRecords { @@ -1000,7 +1000,7 @@ export default class Helpers { return tableFromIPC(response) }, - async toArrowReader (): Promise { + async toArrowReader (): Promise { if (metaHeader !== null) { reqOptions.headers = reqOptions.headers ?? {} reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa` @@ -1009,9 +1009,9 @@ export default class Helpers { params.format = 'arrow' - // @ts-expect-error the return type will be ArrayBuffer when the format is set to 'arrow' - const response: ArrayBuffer = await client.esql.query(params, reqOptions) - return RecordBatchStreamReader.from(response) + // @ts-expect-error response is a Readable when asStream is true + const response: Readable = await client.esql.query(params, reqOptions) + return await AsyncRecordBatchStreamReader.from(Readable.from(response)) } } diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts index af09c18ba..dace000c6 100644 --- a/test/unit/helpers/esql.test.ts +++ b/test/unit/helpers/esql.test.ts @@ -182,17 +182,28 @@ test('ES|QL helper', t => { t.end() }) - test('toArrowReader', t => { - t.test('Parses a binary response into an Arrow stream reader', async t => { - const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + test('toArrowReader', async t => { + const testRecords = [ + { amount: 4.900000095367432, }, + { amount: 8.199999809265137, }, + { amount: 15.5, }, + { amount: 9.899999618530273, }, + { amount: 13.899999618530273, }, + ] + + // build reusable Arrow table + const table = arrow.tableFromJSON(testRecords) + const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array() + t.test('Parses a binary response into an Arrow stream reader', async t => { const MockConnection = connection.buildMockConnection({ onRequest (_params) { return { - body: Buffer.from(binaryContent, 'base64'), + body: Buffer.from(rawData), statusCode: 200, headers: { - 'content-type': 'application/vnd.elasticsearch+arrow+stream' + 'content-type': 'application/vnd.elasticsearch+arrow+stream', + 'transfer-encoding': 'chunked' } } } @@ -206,30 +217,8 @@ test('ES|QL helper', t => { const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() t.ok(result.isStream()) - const testRecords = [ - { - amount: 4.900000095367432, - date: 1729532586965, - }, - { - amount: 8.199999809265137, - date: 1729446186965, - }, - { - amount: 15.5, - date: 1729359786965, - }, - { - amount: 9.899999618530273, - date: 1729273386965, - }, - { - amount: 13.899999618530273, - date: 1729186986965, - }, - ] let count = 0 - for (const recordBatch of result) { + for await (const recordBatch of result) { for (const record of recordBatch) { t.same(record.toJSON(), testRecords[count]) count++ @@ -240,17 +229,16 @@ test('ES|QL helper', t => { }) t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { - const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' - const MockConnection = connection.buildMockConnection({ onRequest (params) { const header = params.headers?.['x-elastic-client-meta'] ?? '' t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`) return { - body: Buffer.from(binaryContent, 'base64'), + body: Buffer.from(rawData), statusCode: 200, headers: { - 'content-type': 'application/vnd.elasticsearch+arrow+stream' + 'content-type': 'application/vnd.elasticsearch+arrow+stream', + 'transfer-encoding': 'chunked' } } } @@ -289,10 +277,12 @@ test('ES|QL helper', t => { new arrow.RecordBatch(schema, batch3.data), ]) + const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array() + const MockConnection = connection.buildMockConnection({ onRequest (_params) { return { - body: Buffer.from(arrow.tableToIPC(table, "stream")), + body: Buffer.from(rawData), statusCode: 200, headers: { 'content-type': 'application/vnd.elasticsearch+arrow+stream' @@ -310,7 +300,7 @@ test('ES|QL helper', t => { t.ok(result.isStream()) let counter = 0 - for (const batch of result) { + for await (const batch of result) { for (const row of batch) { counter++ const { id, val } = row.toJSON() From 41a2159f63bb6428d2331e8bffc5ebe0c13a0080 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 24 Apr 2025 14:20:38 -0500 Subject: [PATCH 540/647] Update docs to clarify 9.x compatibility (#2789) --- README.md | 48 +++++++++++--------------------- docs/reference/client-helpers.md | 1 - docs/reference/index.md | 4 --- docs/reference/installation.md | 32 ++++++++++----------- 4 files changed, 31 insertions(+), 54 deletions(-) diff --git a/README.md b/README.md index fb0d45a32..cd0338f95 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ # Elasticsearch Node.js client -[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://badge.buildkite.com/15e4246eb268ea78f6e10aa90bce38c1abb0a4489e79f5a0ac.svg)](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) +[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://badge.buildkite.com/15e4246eb268ea78f6e10aa90bce38c1abb0a4489e79f5a0ac.svg)](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) **[Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)** or @@ -34,11 +34,12 @@ the new features of the 8.13 version of Elasticsearch, the 8.13 client version is required for that. Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made. -| Elasticsearch Version | Elasticsearch-JS Branch | Supported | -| --------------------- | ------------------------ | --------- | -| main | main | | -| 8.x | 8.x | 8.x | -| 7.x | 7.x | 7.17 | +| Elasticsearch Version | Elasticsearch-JS Branch | +| --------------------- | ----------------------- | +| main | main | +| 9.x | 9.x | +| 8.x | 8.x | +| 7.x | 7.x | ## Usage @@ -65,36 +66,20 @@ to support that version for at least another minor release. If you are using the with a version of Node.js that will be unsupported soon, you will see a warning in your logs (the client will start logging the warning with two minors in advance). -Unless you are **always** using a supported version of Node.js, +Unless you are **always** using a supported version of Node.js, we recommend defining the client dependency in your `package.json` with the `~` instead of `^`. In this way, you will lock the dependency on the minor release and not the major. (for example, `~7.10.0` instead of `^7.10.0`). -| Node.js Version | Node.js EOL date | End of support | -| --------------- |------------------| ---------------------- | -| `8.x` | `December 2019` | `7.11` (early 2021) | -| `10.x` | `April 2021` | `7.12` (mid 2021) | -| `12.x` | `April 2022` | `8.2` (early 2022) | -| `14.x` | `April 2023` | `8.8` (early 2023) | -| `16.x` | `September 2023` | `8.11` (late 2023) | - -### Compatibility - -Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch. -Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made. - -| Elasticsearch Version | Client Version | -| --------------------- |----------------| -| `8.x` | `8.x` | -| `7.x` | `7.x` | -| `6.x` | `6.x` | -| `5.x` | `5.x` | - -To install a specific major of the client, run the following command: -``` -npm install @elastic/elasticsearch@ -``` +| Node.js Version | Node.js EOL date | End of support | +| --------------- | ---------------- | ------------------- | +| `8.x` | `December 2019` | `7.11` (early 2021) | +| `10.x` | `April 2021` | `7.12` (mid 2021) | +| `12.x` | `April 2022` | `8.2` (early 2022) | +| `14.x` | `April 2023` | `8.8` (early 2023) | +| `16.x` | `September 2023` | `8.11` (late 2023) | +| `18.x` | `April 2025` | `9.2` (late 2025) | #### Browser @@ -117,6 +102,7 @@ We recommend that you write a lightweight proxy that uses this client instead, y * [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) ## Install multiple versions + If you are using multiple versions of Elasticsearch, you need to use multiple versions of the client. In the past, install multiple versions of the same package was not possible, but with `npm v6.9`, you can do that via aliasing. The command you must run to install different version of the client is: diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md index ad170d5bf..076a9200b 100644 --- a/docs/reference/client-helpers.md +++ b/docs/reference/client-helpers.md @@ -51,7 +51,6 @@ console.log(result) To create a new instance of the Bulk helper, access it as shown in the example above, the configuration options are: - `datasource` : An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects. If it is a stream, we recommend to use the [`split2`](https://www.npmjs.com/package/split2) package, that splits the stream on new lines delimiters. diff --git a/docs/reference/index.md b/docs/reference/index.md index 22b745b70..cb62bde97 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -8,7 +8,6 @@ mapped_pages: This is the official Node.js client for {{es}}. This page gives a quick overview about the features of the client. - ## Features [_features] * One-to-one mapping with REST API. @@ -19,7 +18,6 @@ This is the official Node.js client for {{es}}. This page gives a quick overview * Child client support. * TypeScript support out of the box. - ### Install multiple versions [_install_multiple_versions] If you are using multiple versions of {{es}}, you need to use multiple versions of the client as well. In the past, installing multiple versions of the same package was not possible, but with `npm v6.9`, you can do it via aliasing. @@ -74,5 +72,3 @@ npm install esmain@github:elastic/elasticsearch-js ::::{warning} This command installs the main branch of the client which is not considered stable. :::: - - diff --git a/docs/reference/installation.md b/docs/reference/installation.md index 2f29fd57e..07387d1b1 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -21,45 +21,41 @@ npm install @elastic/elasticsearch@ To learn more about the supported major versions, please refer to the [Compatibility matrix](#js-compatibility-matrix). - ## Node.js support [nodejs-support] ::::{note} The minimum supported version of Node.js is `v18`. :::: - The client versioning follows the {{stack}} versioning, this means that major, minor, and patch releases are done following a precise schedule that often does not coincide with the [Node.js release](https://nodejs.org/en/about/releases/) times. To avoid support insecure and unsupported versions of Node.js, the client **will drop the support of EOL versions of Node.js between minor releases**. Typically, as soon as a Node.js version goes into EOL, the client will continue to support that version for at least another minor release. If you are using the client with a version of Node.js that will be unsupported soon, you will see a warning in your logs (the client will start logging the warning with two minors in advance). Unless you are **always** using a supported version of Node.js, we recommend defining the client dependency in your `package.json` with the `~` instead of `^`. In this way, you will lock the dependency on the minor release and not the major. (for example, `~7.10.0` instead of `^7.10.0`). -| Node.js Version | Node.js EOL date | End of support | -| --- | --- | --- | -| `8.x` | December 2019 | `7.11` (early 2021) | -| `10.x` | April 2021 | `7.12` (mid 2021) | -| `12.x` | April 2022 | `8.2` (early 2022) | -| `14.x` | April 2023 | `8.8` (early 2023) | -| `16.x` | September 2023 | `8.11` (late 2023) | - +| Node.js Version | Node.js EOL date | End of support | +| --------------- | ---------------- | ------------------- | +| `8.x` | December 2019 | `7.11` (early 2021) | +| `10.x` | April 2021 | `7.12` (mid 2021) | +| `12.x` | April 2022 | `8.2` (early 2022) | +| `14.x` | April 2023 | `8.8` (early 2023) | +| `16.x` | September 2023 | `8.11` (late 2023) | +| `18.x` | April 2025 | `9.2` (late 2025) | ## Compatibility matrix [js-compatibility-matrix] Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of {{es}} without breaking. It does not mean that the client automatically supports new features of newer {{es}} versions; it is only possible after a release of a new client version. For example, a 8.12 client version won’t automatically support the new features of the 8.13 version of {{es}}, the 8.13 client version is required for that. {{es}} language clients are only backwards compatible with default distributions and without guarantees made. | {{es}} Version | Client Version | Supported | -| --- | --- | --- | -| `8.x` | `8.x` | `8.x` | -| `7.x` | `7.x` | `7.17` | -| `6.x` | `6.x` | | -| `5.x` | `5.x` | | - +| -------------- | -------------- | --------- | +| `9.x` | `9.x` | `9.x` | +| `8.x` | `8.x` | `8.x` | +| `7.x` | `7.x` | `7.17` | +| `6.x` | `6.x` | | +| `5.x` | `5.x` | | ### Browser [_browser] ::::{warning} There is no official support for the browser environment. It exposes your {{es}} instance to everyone, which could lead to security issues. We recommend you to write a lightweight proxy that uses this client instead, you can see a proxy example [here](https://github.com/elastic/elasticsearch-js/tree/master/docs/examples/proxy). :::: - - From f400e68ad1429b9a5b753423a3d9bbec279732de Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 25 Apr 2025 10:42:27 -0500 Subject: [PATCH 541/647] Release notes for 9.0.1 (#2764) --- docs/release-notes/index.md | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 0705942e3..75c68cb49 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -20,9 +20,17 @@ To check for security updates, go to [Security announcements for the Elastic sta % ### Fixes [elasticsearch-javascript-client-next-fixes] % \* -## 9.0.0 [elasticsearch-javascript-client-900-release-notes] +## 9.0.1 -### Features and enhancements [elasticsearch-javascript-client-900-features-enhancements] +### Fixes [elasticsearch-javascript-client-9.0.1-fixes] + +**Reinstate `nodeFilter` and node `roles` feature:** The docs note a `nodeFilter` option on the client that will, by default, filter the nodes based on any `roles` values that are set at instantiation. At some point, this functionality was partially disabled. This brings the feature back, ensuring that it matches what the documentation has said it does all along. + +**Ensure Apache Arrow ES|QL helper uses async iterator:** the [`esql.toArrowReader()` helper function](/reference/client-helpers.md#_toarrowreader) was trying to return `RecordBatchStreamReader`—a synchronous iterator—despite the fact that the `apache-arrow` package was, in most cases, automatically coercing it to `AsyncRecordBatchStreamReader`, its asynchronous counterpart. It now is always returned as an async iterator. + +## 9.0.0 [elasticsearch-javascript-client-9.0.0-release-notes] + +### Features and enhancements [elasticsearch-javascript-client-9.0.0-features-enhancements] - **Compatibility with Elasticsearch 9.0:** All changes and additions to Elasticsearch APIs for its 9.0 release are reflected in this release. - **Serverless client merged in:** the `@elastic/elasticsearch-serverless` client is being deprecated, and its functionality has been merged back into this client. This should have zero impact on the way the client works by default, except that a new `serverMode` option has been added. When it's explicitly set to `"serverless"` by a user, a few default settings and behaviors are changed: @@ -38,4 +46,4 @@ To check for security updates, go to [Security announcements for the Elastic sta - **Improved Cloud ID parsing:** when using a Cloud ID as the `cloud` parameter to instantiate the client, that ID was assumed to be in the correct format. New assertions have been added to verify that format and throw a `ConfigurationError` if it is invalid. See [#2694](https://github.com/elastic/elasticsearch-js/issues/2694). -% ### Fixes [elasticsearch-javascript-client-900-fixes] +% ### Fixes [elasticsearch-javascript-client-9.0.0-fixes] From ae7853798c0dfaaab562ca74e98be6ef58082103 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 25 Apr 2025 12:41:31 -0500 Subject: [PATCH 542/647] Bump to 9.0.1 (#2797) --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 6212c9f81..8e2350299 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0", - "versionCanary": "9.0.0-canary.0", + "version": "9.0.1", + "versionCanary": "9.0.1-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", "types": "index.d.ts", From d6cb0dd5b7fb73169c784a74d990657cfa5c9df2 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 28 Apr 2025 09:48:24 -0500 Subject: [PATCH 543/647] Update dependency @types/node to v22.15.2 (#2799) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 8e2350299..3eecd1480 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.14.1", + "@types/node": "22.15.2", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 56225051df67ee9ce33ee9caed3f36b6f68a45e5 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 28 Apr 2025 17:12:24 +0200 Subject: [PATCH 544/647] Auto-generated API code (#2805) --- docs/reference/api-reference.md | 13 +++++++++---- src/api/api/ml.ts | 6 +++--- src/api/api/synonyms.ts | 12 +++++++++--- src/api/types.ts | 33 ++++++++++++++++++++++++--------- 4 files changed, 45 insertions(+), 19 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index b91c22313..d3a6db5b8 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -980,7 +980,6 @@ PUT my-index-000001/_doc/1?version=2&version_type=external "id": "elkbee" } } -``` In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). @@ -9211,7 +9210,7 @@ retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. ## client.ml.getDataFrameAnalyticsStats [_ml.get_data_frame_analytics_stats] -Get data frame analytics jobs usage info. +Get data frame analytics job stats. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats) @@ -9241,7 +9240,7 @@ there are no matches or only partial matches. - **`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. ## client.ml.getDatafeedStats [_ml.get_datafeed_stats] -Get datafeeds usage info. +Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the @@ -9357,7 +9356,7 @@ influencers are sorted by the `influencer_score` value. means it is unset and results are not limited to specific timestamps. ## client.ml.getJobStats [_ml.get_job_stats] -Get anomaly detection jobs usage info. +Get anomaly detection job stats. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats) @@ -14245,6 +14244,8 @@ client.synonyms.deleteSynonymRule({ set_id, rule_id }) #### Request (object) [_request_synonyms.delete_synonym_rule] - **`set_id` (string)**: The ID of the synonym set to update. - **`rule_id` (string)**: The ID of the synonym rule to delete. +- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. +If `false`, analyzers will not be reloaded with the deleted synonym rule ## client.synonyms.getSynonym [_synonyms.get_synonym] Get a synonym set. @@ -14313,6 +14314,8 @@ client.synonyms.putSynonym({ id, synonyms_set }) #### Request (object) [_request_synonyms.put_synonym] - **`id` (string)**: The ID of the synonyms set to be created or updated. - **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set. +- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. +If `false`, analyzers will not be reloaded with the new synonym set ## client.synonyms.putSynonymRule [_synonyms.put_synonym_rule] Create or update a synonym rule. @@ -14334,6 +14337,8 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) - **`set_id` (string)**: The ID of the synonym set. - **`rule_id` (string)**: The ID of the synonym rule to be updated or created. - **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. +- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. +If `false`, analyzers will not be reloaded with the new synonym rule ## client.tasks.cancel [_tasks.cancel] Cancel a task. diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index c25f8763e..55cec2dd8 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -2230,7 +2230,7 @@ export default class Ml { } /** - * Get data frame analytics jobs usage info. + * Get data frame analytics job stats. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats | Elasticsearch API documentation} */ async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2283,7 +2283,7 @@ export default class Ml { } /** - * Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. + * Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats | Elasticsearch API documentation} */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2499,7 +2499,7 @@ export default class Ml { } /** - * Get anomaly detection jobs usage info. + * Get anomaly detection job stats. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats | Elasticsearch API documentation} */ async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index fc39d46c3..79888212b 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -48,7 +48,9 @@ export default class Synonyms { 'rule_id' ], body: [], - query: [] + query: [ + 'refresh' + ] }, 'synonyms.get_synonym': { path: [ @@ -83,7 +85,9 @@ export default class Synonyms { body: [ 'synonyms_set' ], - query: [] + query: [ + 'refresh' + ] }, 'synonyms.put_synonym_rule': { path: [ @@ -93,7 +97,9 @@ export default class Synonyms { body: [ 'synonyms' ], - query: [] + query: [ + 'refresh' + ] } } } diff --git a/src/api/types.ts b/src/api/types.ts index 202f1b299..a3e5271b1 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -34908,9 +34908,9 @@ export type SynonymsSynonymString = string export interface SynonymsSynonymsUpdateResult { /** The update operation result. */ result: Result - /** Updating synonyms in a synonym set reloads the associated analyzers. + /** Updating synonyms in a synonym set can reload the associated analyzers in case refresh is set to true. * This information is the analyzers reloading result. */ - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult + reload_analyzers_details?: IndicesReloadSearchAnalyzersReloadResult } export interface SynonymsDeleteSynonymRequest extends RequestBase { @@ -34929,10 +34929,14 @@ export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { set_id: Id /** The ID of the synonym rule to delete. */ rule_id: Id + /** If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. + * If `false`, analyzers will not be reloaded with the deleted synonym rule + * @remarks This property is not supported on Elastic Cloud Serverless. */ + refresh?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never } + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never } + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never } } export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult @@ -34998,17 +35002,24 @@ export interface SynonymsGetSynonymsSetsSynonymsSetItem { export interface SynonymsPutSynonymRequest extends RequestBase { /** The ID of the synonyms set to be created or updated. */ id: Id + /** If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. + * If `false`, analyzers will not be reloaded with the new synonym set + * @remarks This property is not supported on Elastic Cloud Serverless. */ + refresh?: boolean /** The synonym rules definitions for the synonyms set. */ synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, synonyms_set?: never } + body?: string | { [key: string]: any } & { id?: never, refresh?: never, synonyms_set?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, synonyms_set?: never } + querystring?: { [key: string]: any } & { id?: never, refresh?: never, synonyms_set?: never } } export interface SynonymsPutSynonymResponse { + /** The update operation result. */ result: Result - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult + /** Updating a synonyms set can reload the associated analyzers in case refresh is set to true. + * This information is the analyzers reloading result. */ + reload_analyzers_details?: IndicesReloadSearchAnalyzersReloadResult } export interface SynonymsPutSynonymRuleRequest extends RequestBase { @@ -35016,12 +35027,16 @@ export interface SynonymsPutSynonymRuleRequest extends RequestBase { set_id: Id /** The ID of the synonym rule to be updated or created. */ rule_id: Id + /** If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. + * If `false`, analyzers will not be reloaded with the new synonym rule + * @remarks This property is not supported on Elastic Cloud Serverless. */ + refresh?: boolean /** The synonym rule information definition, which must be in Solr format. */ synonyms: SynonymsSynonymString /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never, synonyms?: never } + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never, synonyms?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never, synonyms?: never } + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never, synonyms?: never } } export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult From 3da4572d1bc753479c02b016268599e6d177b91d Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 28 Apr 2025 16:22:21 +0000 Subject: [PATCH 545/647] Update dependency proxy to v2 (#2801) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 3eecd1480..8774d8cd9 100644 --- a/package.json +++ b/package.json @@ -77,7 +77,7 @@ "node-abort-controller": "3.1.1", "node-fetch": "2.7.0", "ora": "5.4.1", - "proxy": "1.0.2", + "proxy": "2.2.0", "rimraf": "5.0.10", "semver": "7.7.1", "split2": "4.2.0", From 8ba13d31d8af05d26098734a7d1e79c6265fdf26 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 10:32:37 -0500 Subject: [PATCH 546/647] Update dependency rimraf to v6 (#2802) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 8774d8cd9..6b4c82f30 100644 --- a/package.json +++ b/package.json @@ -78,7 +78,7 @@ "node-fetch": "2.7.0", "ora": "5.4.1", "proxy": "2.2.0", - "rimraf": "5.0.10", + "rimraf": "6.0.1", "semver": "7.7.1", "split2": "4.2.0", "stoppable": "1.1.0", From 3bc89758bfe262bb21fe27c265afd4409ce26a5b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 1 May 2025 10:13:22 -0500 Subject: [PATCH 547/647] Stop running scheduled integration tests (#2812) We're running them on PRs again now, so this is now more noisy than useful. --- catalog-info.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index de212b172..c0ccdc314 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -41,11 +41,3 @@ spec: build_branches: false separate_pull_request_statuses: true cancel_intermediate_builds: true - cancel_intermediate_builds_branch_filter: "!main" - schedules: - main: - branch: "main" - cronline: "@daily" - 8_x: - branch: "8.x" - cronline: "@daily" From 489e5c5809b326ff52e7120acc9bf943ebf23f22 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 5 May 2025 10:58:07 -0500 Subject: [PATCH 548/647] Update oven-sh/setup-bun digest to 735343b (#2814) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/nodejs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index fa3090287..d45bb7215 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -104,7 +104,7 @@ jobs: persist-credentials: false - name: Use Bun - uses: oven-sh/setup-bun@4bc047ad259df6fc24a6c9b0f9a0cb08cf17fbe5 # v2 + uses: oven-sh/setup-bun@735343b667d3e6f658f44d0eca948eb6282f2b76 # v2 - name: Install run: | From b38bed5bfa9a45448a2b2732b9d8716d985edaf3 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 5 May 2025 16:19:21 +0000 Subject: [PATCH 549/647] Update dependency @types/node to v22.15.3 (#2815) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 6b4c82f30..1f0018d3d 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.15.2", + "@types/node": "22.15.3", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 591bf56cba59997455a5079d626d64b270a66bb2 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 5 May 2025 18:20:41 +0200 Subject: [PATCH 550/647] Auto-generated API code (#2819) --- docs/reference/api-reference.md | 19 +- src/api/types.ts | 400 ++++++++++++++++++++++++++++---- 2 files changed, 373 insertions(+), 46 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index d3a6db5b8..b785ce7ba 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1683,7 +1683,7 @@ client.search({ ... }) - **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. -- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. - **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. - **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. @@ -4777,6 +4777,9 @@ By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. - **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. +If `false`, the query will fail if there are any failures. + +To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. - **`delimiter` (Optional, string)**: The character to use between values within a CSV row. It is valid only for the CSV format. - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. @@ -4911,6 +4914,9 @@ count. - **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. - **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. +If `false`, the query will fail if there are any failures. + +To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. ## client.features.getFeatures [_features.get_features] Get the features. @@ -5437,7 +5443,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -- **`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. +- **`filter` (Optional, string | { type } | { type } | { type, preserve_original } | { type, ignored_scripts, output_unigrams } | { type } | { type } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type } | { type } | { type } | { type, dedup, dictionary, locale, longest_only } | { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, bucket_count, hash_count, hash_set_size, with_rotation } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, pattern, replacement } | { type } | { type } | { type, script } | { type } | { type } | { type } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type } | { type } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, ignore_keywords } | { type } | { type, stopwords } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. - **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. - **`text` (Optional, string | string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. @@ -5636,6 +5642,15 @@ client.indices.create({ index }) #### Request (object) [_request_indices.create] - **`index` (string)**: Name of the index you wish to create. +Index names must meet the following criteria: + +* Lowercase only +* Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` +* Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions +* Cannot start with `-`, `_`, or `+` +* Cannot be `.` or `..` +* Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) +* Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins - **`aliases` (Optional, Record)**: Aliases for the index. - **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: - Field names diff --git a/src/api/types.ts b/src/api/types.ts index a3e5271b1..aed848fb1 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2036,8 +2036,8 @@ export interface SearchRequest extends RequestBase { * * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. - * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. - * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ + * * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. + * * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ preference?: string /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. * This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). @@ -2613,7 +2613,7 @@ export interface SearchInnerHits { ignore_unmapped?: boolean script_fields?: Record seq_no_primary_term?: boolean - fields?: Fields + fields?: Field[] /** How the inner hits should be sorted per `inner_hits`. * By default, inner hits are sorted by score. */ sort?: Sort @@ -6656,6 +6656,10 @@ export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetr export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer +export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase { + type: 'apostrophe' +} + export interface AnalysisArabicAnalyzer { type: 'arabic' stopwords?: AnalysisStopWords @@ -6663,6 +6667,10 @@ export interface AnalysisArabicAnalyzer { stem_exclusion?: string[] } +export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'arabic_normalization' +} + export interface AnalysisArmenianAnalyzer { type: 'armenian' stopwords?: AnalysisStopWords @@ -6672,6 +6680,7 @@ export interface AnalysisArmenianAnalyzer { export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' + /** If `true`, emit both original tokens and folded tokens. Defaults to `false`. */ preserve_original?: SpecUtilsStringified } @@ -6735,6 +6744,24 @@ export interface AnalysisCjkAnalyzer { stopwords_path?: string } +export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana' + +export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase { + type: 'cjk_bigram' + /** Array of character scripts for which to disable bigrams. */ + ignored_scripts?: AnalysisCjkBigramIgnoredScript[] + /** If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`. */ + output_unigrams?: boolean +} + +export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase { + type: 'cjk_width' +} + +export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase { + type: 'classic' +} + export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { type: 'classic' max_token_length?: integer @@ -6742,25 +6769,45 @@ export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' + /** A list of tokens. The filter generates bigrams for these tokens. + * Either this or the `common_words_path` parameter is required. */ common_words?: string[] + /** Path to a file containing a list of tokens. The filter generates bigrams for these tokens. + * This path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break. + * Either this or the `common_words` parameter is required. */ common_words_path?: string + /** If `true`, matches for common words matching are case-insensitive. Defaults to `false`. */ ignore_case?: boolean + /** If `true`, the filter excludes the following tokens from the output: + * - Unigrams for common words + * - Unigrams for terms followed by common words + * Defaults to `false`. We recommend enabling this parameter for search analyzers. */ query_mode?: boolean } export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { - hyphenation_patterns_path?: string + /** Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`. */ max_subword_size?: integer + /** Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`. */ min_subword_size?: integer + /** Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`. */ min_word_size?: integer + /** If `true`, only include the longest matching subword. Defaults to `false`. */ only_longest_match?: boolean + /** A list of subwords to look for in the token stream. If found, the subword is included in the token output. + * Either this parameter or `word_list_path` must be specified. */ word_list?: string[] + /** Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output. + * This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. + * Either this parameter or `word_list` must be specified. */ word_list_path?: string } export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { type: 'condition' + /** Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided. */ filter: string[] + /** Predicate script used to apply token filters. If a token matches this script, the filters in the `filter` parameter are applied to the token. */ script: Script | ScriptSource } @@ -6792,11 +6839,17 @@ export interface AnalysisDanishAnalyzer { stopwords_path?: string } +export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase { + type: 'decimal_digit' +} + export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { type: 'delimited_payload' + /** Character used to separate tokens from payloads. Defaults to `|`. */ delimiter?: string + /** Data type for the stored payload. */ encoding?: AnalysisDelimitedPayloadEncoding } @@ -6815,9 +6868,13 @@ export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { type: 'edge_ngram' + /** Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`. */ max_gram?: integer + /** Minimum character length of a gram. Defaults to `1`. */ min_gram?: integer + /** Indicates whether to truncate tokens from the `front` or `back`. Defaults to `front`. */ side?: AnalysisEdgeNGramSide + /** Emits original token when set to `true`. Defaults to `false`. */ preserve_original?: SpecUtilsStringified } @@ -6831,8 +6888,16 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { type: 'elision' + /** List of elisions to remove. + * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. + * For custom `elision` filters, either this parameter or `articles_path` must be specified. */ articles?: string[] + /** Path to a file that contains a list of elisions to remove. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break. + * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. + * For custom `elision` filters, either this parameter or `articles` must be specified. */ articles_path?: string + /** If `true`, elision matching is case insensitive. If `false`, elision matching is case sensitive. Defaults to `false`. */ articles_case?: SpecUtilsStringified } @@ -6867,7 +6932,9 @@ export interface AnalysisFingerprintAnalyzer { export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { type: 'fingerprint' + /** Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output. */ max_output_size?: integer + /** Character to use to concatenate the token stream input. Defaults to a space. */ separator?: string } @@ -6878,6 +6945,10 @@ export interface AnalysisFinnishAnalyzer { stem_exclusion?: string[] } +export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase { + type: 'flatten_graph' +} + export interface AnalysisFrenchAnalyzer { type: 'french' stopwords?: AnalysisStopWords @@ -6899,6 +6970,10 @@ export interface AnalysisGermanAnalyzer { stem_exclusion?: string[] } +export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'german_normalization' +} + export interface AnalysisGreekAnalyzer { type: 'greek' stopwords?: AnalysisStopWords @@ -6912,6 +6987,10 @@ export interface AnalysisHindiAnalyzer { stem_exclusion?: string[] } +export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'hindi_normalization' +} + export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' escaped_tags?: string[] @@ -6926,14 +7005,32 @@ export interface AnalysisHungarianAnalyzer { export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' + /** If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`. */ dedup?: boolean + /** One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary. + * By default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter. */ dictionary?: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. */ locale: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. + * @alias locale */ + lang: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. + * @alias locale */ + language: string + /** If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`. */ longest_only?: boolean } export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { type: 'hyphenation_decompounder' + /** Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file. + * This path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported. */ + hyphenation_patterns_path: string + /** If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`. */ + no_sub_matches?: boolean + /** If `true`, do not allow overlapping tokens. Defaults to `false`. */ + no_overlapping_matches?: boolean } export interface AnalysisIcuAnalyzer { @@ -6999,6 +7096,10 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase id: string } +export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'indic_normalization' +} + export interface AnalysisIndonesianAnalyzer { type: 'indonesian' stopwords?: AnalysisStopWords @@ -7020,6 +7121,11 @@ export interface AnalysisItalianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase { + type: 'ja_stop' + stopwords?: AnalysisStopWords +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -7028,14 +7134,22 @@ export type AnalysisKeepTypesMode = 'include' | 'exclude' export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { type: 'keep_types' + /** Indicates whether to keep or remove the specified token types. */ mode?: AnalysisKeepTypesMode - types?: string[] + /** List of token types to keep or remove. */ + types: string[] } export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { type: 'keep' + /** List of words to keep. Only tokens that match words in this list are included in the output. + * Either this parameter or `keep_words_path` must be specified. */ keep_words?: string[] + /** If `true`, lowercase all keep words. Defaults to `false`. */ keep_words_case?: boolean + /** Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. + * Either this parameter or `keep_words` must be specified. */ keep_words_path?: string } @@ -7046,12 +7160,24 @@ export interface AnalysisKeywordAnalyzer { export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_marker' + /** If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`. */ ignore_case?: boolean + /** Array of keywords. Tokens that match these keywords are not stemmed. + * This parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ keywords?: string | string[] + /** Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. + * This parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ keywords_path?: string + /** Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed. + * This parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`. */ keywords_pattern?: string } +export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase { + type: 'keyword_repeat' +} + export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { type: 'keyword' buffer_size?: integer @@ -7106,7 +7232,9 @@ export interface AnalysisLatvianAnalyzer { export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' + /** Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. */ max?: integer + /** Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`. */ min?: integer } @@ -7116,7 +7244,9 @@ export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { type: 'limit' + /** If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`. */ consume_all_tokens?: boolean + /** Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are excluded from the output. Defaults to `1`. */ max_token_count?: SpecUtilsStringified } @@ -7133,9 +7263,12 @@ export interface AnalysisLowercaseNormalizer { export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase' - language?: string + /** Language-specific lowercase token filter to use. */ + language?: AnalysisLowercaseTokenFilterLanguages } +export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish' + export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { type: 'lowercase' } @@ -7146,16 +7279,34 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { mappings_path?: string } +export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase { + type: 'min_hash' + /** Number of buckets to which hashes are assigned. Defaults to `512`. */ + bucket_count?: integer + /** Number of ways to hash each token in the stream. Defaults to `1`. */ + hash_count?: integer + /** Number of hashes to keep from each bucket. Defaults to `1`. + * Hashes are retained by ascending size, starting with the bucket’s smallest hash first. */ + hash_set_size?: integer + /** If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`. */ + with_rotation?: boolean +} + export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' + /** A list of token filters to apply to incoming tokens. */ filters: string[] + /** If `true` (the default) then emit the original token in addition to the filtered tokens. */ preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram' + /** Maximum length of characters in a gram. Defaults to `2`. */ max_gram?: integer + /** Minimum length of characters in a gram. Defaults to `1`. */ min_gram?: integer + /** Emits original token when set to `true`. Defaults to `false`. */ preserve_original?: SpecUtilsStringified } @@ -7179,6 +7330,7 @@ export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'nori_part_of_speech' + /** An array of part-of-speech tags that should be removed. */ stoptags?: string[] } @@ -7228,7 +7380,9 @@ export interface AnalysisPatternAnalyzer { export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture' + /** A list of regular expressions to match. */ patterns: string[] + /** If set to `true` (the default) it will emit the original token. */ preserve_original?: SpecUtilsStringified } @@ -7241,9 +7395,11 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace' + /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */ all?: boolean - flags?: string + /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */ pattern: string + /** Replacement substring. Defaults to an empty substring (`""`). */ replacement?: string } @@ -7260,6 +7416,10 @@ export interface AnalysisPersianAnalyzer { stopwords_path?: string } +export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'persian_normalization' +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -7291,6 +7451,7 @@ export interface AnalysisPortugueseAnalyzer { export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' + /** Script containing a condition used to filter incoming tokens. Only tokens that match this script are included in the output. */ script: Script | ScriptSource } @@ -7316,6 +7477,14 @@ export interface AnalysisRussianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'scandinavian_folding' +} + +export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'scandinavian_normalization' +} + export interface AnalysisSerbianAnalyzer { type: 'serbian' stopwords?: AnalysisStopWords @@ -7323,13 +7492,23 @@ export interface AnalysisSerbianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'serbian_normalization' +} + export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' + /** String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`). */ filler_token?: string - max_shingle_size?: integer | string - min_shingle_size?: integer | string + /** Maximum number of tokens to concatenate when creating shingles. Defaults to `2`. */ + max_shingle_size?: SpecUtilsStringified + /** Minimum number of tokens to concatenate when creating shingles. Defaults to `2`. */ + min_shingle_size?: SpecUtilsStringified + /** If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`. */ output_unigrams?: boolean + /** If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`. */ output_unigrams_if_no_shingles?: boolean + /** Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`" "`). */ token_separator?: string } @@ -7355,10 +7534,11 @@ export interface AnalysisSnowballAnalyzer { stopwords?: AnalysisStopWords } -export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' +export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish' export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { type: 'snowball' + /** Controls the language used by the stemmer. */ language?: AnalysisSnowballLanguage } @@ -7369,6 +7549,10 @@ export interface AnalysisSoraniAnalyzer { stem_exclusion?: string[] } +export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'sorani_normalization' +} + export interface AnalysisSpanishAnalyzer { type: 'spanish' stopwords?: AnalysisStopWords @@ -7395,7 +7579,9 @@ export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer_override' + /** A list of mapping rules to use. */ rules?: string[] + /** A path (either relative to `config` location, or absolute) to a list of mappings. */ rules_path?: string } @@ -7418,13 +7604,20 @@ export interface AnalysisStopAnalyzer { export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { type: 'stop' + /** If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`. */ ignore_case?: boolean + /** If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`. */ remove_trailing?: boolean + /** Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`. */ stopwords?: AnalysisStopWords + /** Path to a file that contains a list of stop words to remove. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break. */ stopwords_path?: string } -export type AnalysisStopWords = string | string[] +export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_' + +export type AnalysisStopWords = AnalysisStopWordLanguage | string[] export interface AnalysisSwedishAnalyzer { type: 'swedish' @@ -7435,27 +7628,30 @@ export interface AnalysisSwedishAnalyzer { export type AnalysisSynonymFormat = 'solr' | 'wordnet' -export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase { type: 'synonym_graph' - expand?: boolean - format?: AnalysisSynonymFormat - lenient?: boolean - synonyms?: string[] - synonyms_path?: string - synonyms_set?: string - tokenizer?: string - updateable?: boolean } -export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase { type: 'synonym' +} + +export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase { + /** Expands definitions for equivalent synonym rules. Defaults to `true`. */ expand?: boolean + /** Sets the synonym rules format. */ format?: AnalysisSynonymFormat + /** If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting. */ lenient?: boolean + /** Used to define inline synonyms. */ synonyms?: string[] + /** Used to provide a synonym file. This path must be absolute or relative to the `config` location. */ synonyms_path?: string + /** Provide a synonym set created via Synonyms Management APIs. */ synonyms_set?: string + /** Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0. */ tokenizer?: string + /** If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`. */ updateable?: boolean } @@ -7477,7 +7673,7 @@ export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export type AnalysisTokenizer = string | AnalysisTokenizerDefinition @@ -7493,6 +7689,7 @@ export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { type: 'truncate' + /** Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`. */ length?: integer } @@ -7510,6 +7707,7 @@ export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { type: 'unique' + /** If `true`, only remove duplicate tokens in the same position. Defaults to `false`. */ only_on_same_position?: boolean } @@ -7527,39 +7725,45 @@ export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { max_token_length?: integer } -export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase { type: 'word_delimiter_graph' + /** If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`. */ adjust_offsets?: boolean - catenate_all?: boolean - catenate_numbers?: boolean - catenate_words?: boolean - generate_number_parts?: boolean - generate_word_parts?: boolean + /** If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`. */ ignore_keywords?: boolean - preserve_original?: SpecUtilsStringified - protected_words?: string[] - protected_words_path?: string - split_on_case_change?: boolean - split_on_numerics?: boolean - stem_english_possessive?: boolean - type_table?: string[] - type_table_path?: string } -export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase { type: 'word_delimiter' +} + +export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase { + /** If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_all?: boolean + /** If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_numbers?: boolean + /** If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_words?: boolean + /** If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ generate_number_parts?: boolean + /** If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ generate_word_parts?: boolean + /** If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. Defaults to `false`. */ preserve_original?: SpecUtilsStringified + /** Array of tokens the filter won’t split. */ protected_words?: string[] + /** Path to a file that contains a list of tokens the filter won’t split. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. */ protected_words_path?: string + /** If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`. */ split_on_case_change?: boolean + /** If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`. */ split_on_numerics?: boolean + /** If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`. */ stem_english_possessive?: boolean + /** Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ type_table?: string[] + /** Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ type_table_path?: string } @@ -15857,6 +16061,62 @@ export interface ClusterStateRequest extends RequestBase { export type ClusterStateResponse = any +export interface ClusterStatsCCSStats { + /** Contains remote cluster settings and metrics collected from them. + * The keys are cluster names, and the values are per-cluster data. + * Only present if `include_remotes` option is set to true. */ + clusters?: Record + /** Information about cross-cluster search usage. */ + _search: ClusterStatsCCSUsageStats + /** Information about ES|QL cross-cluster query usage. */ + _esql?: ClusterStatsCCSUsageStats +} + +export interface ClusterStatsCCSUsageClusterStats { + /** The total number of successful (not skipped) cross-cluster search requests that were executed against this cluster. This may include requests where partial results were returned, but not requests in which the cluster has been skipped entirely. */ + total: integer + /** The total number of cross-cluster search requests for which this cluster was skipped. */ + skipped: integer + /** Statistics about the time taken to execute requests against this cluster. */ + took: ClusterStatsCCSUsageTimeValue +} + +export interface ClusterStatsCCSUsageStats { + /** The total number of cross-cluster search requests that have been executed by the cluster. */ + total: integer + /** The total number of cross-cluster search requests that have been successfully executed by the cluster. */ + success: integer + /** The total number of cross-cluster search requests (successful or failed) that had at least one remote cluster skipped. */ + skipped: integer + /** Statistics about the time taken to execute cross-cluster search requests. */ + took: ClusterStatsCCSUsageTimeValue + /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `true`. */ + took_mrt_true?: ClusterStatsCCSUsageTimeValue + /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `false`. */ + took_mrt_false?: ClusterStatsCCSUsageTimeValue + /** The maximum number of remote clusters that were queried in a single cross-cluster search request. */ + remotes_per_search_max: integer + /** The average number of remote clusters that were queried in a single cross-cluster search request. */ + remotes_per_search_avg: double + /** Statistics about the reasons for cross-cluster search request failures. The keys are the failure reason names and the values are the number of requests that failed for that reason. */ + failure_reasons: Record + /** The keys are the names of the search feature, and the values are the number of requests that used that feature. Single request can use more than one feature (e.g. both `async` and `wildcard`). */ + features: Record + /** Statistics about the clients that executed cross-cluster search requests. The keys are the names of the clients, and the values are the number of requests that were executed by that client. Only known clients (such as `kibana` or `elasticsearch`) are counted. */ + clients: Record + /** Statistics about the clusters that were queried in cross-cluster search requests. The keys are cluster names, and the values are per-cluster telemetry data. This also includes the local cluster itself, which uses the name `(local)`. */ + clusters: Record +} + +export interface ClusterStatsCCSUsageTimeValue { + /** The maximum time taken to execute a request, in milliseconds. */ + max: DurationValue + /** The average time taken to execute a request, in milliseconds. */ + avg: DurationValue + /** The 90th percentile of the time taken to execute requests, in milliseconds. */ + p90: DurationValue +} + export interface ClusterStatsCharFilterTypes { /** Contains statistics about analyzer types used in selected nodes. */ analyzer_types: ClusterStatsFieldTypes[] @@ -16187,6 +16447,39 @@ export interface ClusterStatsOperatingSystemMemoryInfo { used_percent: integer } +export interface ClusterStatsRemoteClusterInfo { + /** The UUID of the remote cluster. */ + cluster_uuid: string + /** The connection mode used to communicate with the remote cluster. */ + mode: string + /** The `skip_unavailable` setting used for this remote cluster. */ + skip_unavailable: boolean + /** Transport compression setting used for this remote cluster. */ + transport_compress: string + /** Health status of the cluster, based on the state of its primary and replica shards. */ + status: HealthStatus + /** The list of Elasticsearch versions used by the nodes on the remote cluster. */ + version: VersionString[] + /** The total count of nodes in the remote cluster. */ + nodes_count: integer + /** The total number of shards in the remote cluster. */ + shards_count: integer + /** The total number of indices in the remote cluster. */ + indices_count: integer + /** Total data set size, in bytes, of all shards assigned to selected nodes. */ + indices_total_size_in_bytes: long + /** Total data set size of all shards assigned to selected nodes, as a human-readable string. */ + indices_total_size?: string + /** Maximum amount of memory, in bytes, available for use by the heap across the nodes of the remote cluster. */ + max_heap_in_bytes: long + /** Maximum amount of memory available for use by the heap across the nodes of the remote cluster, as a human-readable string. */ + max_heap?: string + /** Total amount, in bytes, of physical memory across the nodes of the remote cluster. */ + mem_total_in_bytes: long + /** Total amount of physical memory across the nodes of the remote cluster, as a human-readable string. */ + mem_total?: string +} + export interface ClusterStatsRequest extends RequestBase { /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds @@ -16248,6 +16541,8 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { status: HealthStatus /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ timestamp: long + /** Cross-cluster stats */ + ccs: ClusterStatsCCSStats } export interface ConnectorConnector { @@ -17366,7 +17661,10 @@ export type EsqlTableValuesLongDouble = double | double[] export type EsqlTableValuesLongValue = long | long[] export interface EsqlAsyncQueryRequest extends RequestBase { - /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. */ + /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + * If `false`, the query will fail if there are any failures. + * + * To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. */ allow_partial_results?: boolean /** The character to use between values within a CSV row. * It is valid only for the CSV format. */ @@ -17517,7 +17815,10 @@ export interface EsqlQueryRequest extends RequestBase { /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? * Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ drop_null_columns?: boolean - /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. */ + /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + * If `false`, the query will fail if there are any failures. + * + * To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. */ allow_partial_results?: boolean /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean @@ -18025,7 +18326,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { lifecycle_date?: DateTime lifecycle_date_millis?: EpochTime managed: true - phase: Name + phase?: Name phase_time?: DateTime phase_time_millis?: EpochTime policy?: Name @@ -19150,7 +19451,16 @@ export interface IndicesCloseResponse { } export interface IndicesCreateRequest extends RequestBase { - /** Name of the index you wish to create. */ + /** Name of the index you wish to create. + * Index names must meet the following criteria: + * + * * Lowercase only + * * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` + * * Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions + * * Cannot start with `-`, `_`, or `+` + * * Cannot be `.` or `..` + * * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) + * * Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins */ index: IndexName /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ @@ -23278,16 +23588,18 @@ export interface IngestPipelineProcessor extends IngestProcessorBase { ignore_missing_pipeline?: boolean } -export interface IngestPipelineSimulation { +export interface IngestPipelineProcessorResult { doc?: IngestDocumentSimulation tag?: string processor_type?: string - status?: WatcherActionStatusOptions + status?: IngestPipelineSimulationStatusOptions description?: string ignored_error?: ErrorCause error?: ErrorCause } +export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped' + export interface IngestProcessorBase { /** Description of the processor. * Useful for describing the purpose of the processor or its configuration. */ @@ -23566,7 +23878,7 @@ export type IngestShapeType = 'geo_shape' | 'shape' export interface IngestSimulateDocumentResult { doc?: IngestDocumentSimulation error?: ErrorCause - processor_results?: IngestPipelineSimulation[] + processor_results?: IngestPipelineProcessorResult[] } export interface IngestSortProcessor extends IngestProcessorBase { From b030084f244f85a0363aa4cb19ad9418e184286e Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 5 May 2025 11:54:58 -0500 Subject: [PATCH 551/647] Export helper types (#2822) --- index.d.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/index.d.ts b/index.d.ts index 12d5eb23e..36a68135d 100644 --- a/index.d.ts +++ b/index.d.ts @@ -11,3 +11,4 @@ export * as estypes from './lib/api/types' export * as estypesWithBody from './lib/api/typesWithBodyKey' export { Client, SniffingTransport } export type { ClientOptions, NodeOptions } from './lib/client' +export * as helpers from './lib/helpers' From a86319b14d85aeba88e3a457b5767065250802a3 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 5 May 2025 12:39:53 -0500 Subject: [PATCH 552/647] Remove dangling references to typesWithBodyKey (#2821) This was removed for 9.0. --- docs/reference/typescript.md | 10 ---------- index.d.ts | 1 - 2 files changed, 11 deletions(-) diff --git a/docs/reference/typescript.md b/docs/reference/typescript.md index 29bc9eb06..880fc3e3b 100644 --- a/docs/reference/typescript.md +++ b/docs/reference/typescript.md @@ -13,8 +13,6 @@ The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g The client is developed against the [latest](https://www.npmjs.com/package/typescript?activeTab=versions) version of TypeScript. Furthermore, unless you have set `skipLibCheck` to `true`, you should configure `esModuleInterop` to `true`. :::: - - ## Example [_example] ```ts @@ -74,7 +72,6 @@ async function run () { run().catch(console.log) ``` - ## Request & Response types [_request_response_types] You can import the full TypeScript requests & responses definitions as it follows: @@ -82,10 +79,3 @@ You can import the full TypeScript requests & responses definitions as it follow ```ts import { estypes } from '@elastic/elasticsearch' ``` - -If you need the legacy definitions with the body, you can do the following: - -```ts -import { estypesWithBody } from '@elastic/elasticsearch' -``` - diff --git a/index.d.ts b/index.d.ts index 36a68135d..8d48439a1 100644 --- a/index.d.ts +++ b/index.d.ts @@ -8,7 +8,6 @@ import SniffingTransport from './lib/sniffingTransport' export * from '@elastic/transport' export * as estypes from './lib/api/types' -export * as estypesWithBody from './lib/api/typesWithBodyKey' export { Client, SniffingTransport } export type { ClientOptions, NodeOptions } from './lib/client' export * as helpers from './lib/helpers' From a6e23fd3a89adc55a2a2af805317eece8503a954 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 8 May 2025 11:41:44 -0500 Subject: [PATCH 553/647] Drop support for Node 18 (#2827) --- .buildkite/Dockerfile | 2 +- .buildkite/Dockerfile-make | 2 +- .buildkite/pipeline.yml | 2 +- .buildkite/run-client.sh | 2 +- .github/ISSUE_TEMPLATE/bug.yaml | 2 +- .github/workflows/nodejs.yml | 2 +- README.md | 44 ++++++++++++++++----------------- 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 0de3234dc..cc4eaae1a 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -1,4 +1,4 @@ -ARG NODE_VERSION=${NODE_VERSION:-18} +ARG NODE_VERSION=${NODE_VERSION:-20} FROM node:$NODE_VERSION # Install required tools diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index b171f5d03..0db4d2028 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -1,4 +1,4 @@ -ARG NODE_JS_VERSION=${NODE_JS_VERSION:-18} +ARG NODE_JS_VERSION=${NODE_JS_VERSION:-20} FROM node:${NODE_JS_VERSION} ARG BUILDER_UID=1000 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 8a7e176b1..307b2f340 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -16,9 +16,9 @@ steps: matrix: setup: nodejs: - - "18" - "20" - "22" + - "23" command: ./.buildkite/run-tests.sh artifact_paths: "./junit-output/junit-*.xml" - wait: ~ diff --git a/.buildkite/run-client.sh b/.buildkite/run-client.sh index 872d57812..f210219a2 100755 --- a/.buildkite/run-client.sh +++ b/.buildkite/run-client.sh @@ -6,7 +6,7 @@ script_path=$(dirname "$(realpath -s "$0")") set -euo pipefail repo=$(pwd) -export NODE_VERSION=${NODE_VERSION:-18} +export NODE_VERSION=${NODE_VERSION:-20} echo "--- :javascript: Building Docker image" docker build \ diff --git a/.github/ISSUE_TEMPLATE/bug.yaml b/.github/ISSUE_TEMPLATE/bug.yaml index d4e41efbf..46bda9336 100644 --- a/.github/ISSUE_TEMPLATE/bug.yaml +++ b/.github/ISSUE_TEMPLATE/bug.yaml @@ -40,7 +40,7 @@ body: id: node-js-version attributes: label: Node.js version - placeholder: 18.x, 20.x, etc. + placeholder: 20.x, 22.x, etc. validations: required: true diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index d45bb7215..9ee524308 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false matrix: - node-version: [18.x, 20.x, 22.x, 23.x] + node-version: [20.x, 22.x, 23.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: diff --git a/README.md b/README.md index cd0338f95..1c80f9cfc 100644 --- a/README.md +++ b/README.md @@ -43,17 +43,17 @@ compatible with default distributions and without guarantees made. ## Usage -* [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_creating_an_index) -* [Indexing a document](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_indexing_documents) -* [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_getting_documents) -* [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_searching_documents) -* [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_updating_documents) -* [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_documents) -* [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_an_index) +- [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_creating_an_index) +- [Indexing a document](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_indexing_documents) +- [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_getting_documents) +- [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_searching_documents) +- [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_updating_documents) +- [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_documents) +- [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_an_index) ### Node.js support -NOTE: The minimum supported version of Node.js is `v18`. +NOTE: The minimum supported version of Node.js is `v20`. The client versioning follows the Elastic Stack versioning, this means that major, minor, and patch releases are done following a precise schedule that @@ -79,27 +79,27 @@ of `^7.10.0`). | `12.x` | `April 2022` | `8.2` (early 2022) | | `14.x` | `April 2023` | `8.8` (early 2023) | | `16.x` | `September 2023` | `8.11` (late 2023) | -| `18.x` | `April 2025` | `9.2` (late 2025) | +| `18.x` | `April 2025` | `9.1` (mid 2025) | #### Browser > [!WARNING] > There is no official support for the browser environment. It exposes your Elasticsearch instance to everyone, which could lead to security issues. -We recommend that you write a lightweight proxy that uses this client instead, you can see a proxy example [here](./docs/examples/proxy). +> We recommend that you write a lightweight proxy that uses this client instead, you can see a proxy example [here](./docs/examples/proxy). ## Documentation -* [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html) -* [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage) -* [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html) -* [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html) -* [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication) -* [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html) -* [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html) -* [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html) -* [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html) -* [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html) -* [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) +- [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html) +- [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage) +- [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html) +- [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html) +- [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication) +- [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html) +- [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html) +- [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html) +- [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html) +- [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html) +- [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) ## Install multiple versions @@ -147,7 +147,7 @@ client7.info().then(console.log, console.log) ``` Finally, if you want to install the client for the next version of Elasticsearch -*(the one that lives in Elasticsearch’s main branch)*, you can use the following +_(the one that lives in Elasticsearch’s main branch)_, you can use the following command: ```sh From 4d4ffca1bad92c8ebbdd30af4d2f8ad81a14c3ed Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 18:23:30 +0000 Subject: [PATCH 554/647] Update dependency zx to v8 (#2829) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 1f0018d3d..0f77e9fce 100644 --- a/package.json +++ b/package.json @@ -88,7 +88,7 @@ "typescript": "5.8.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", - "zx": "7.2.3" + "zx": "8.5.4" }, "dependencies": { "@elastic/transport": "^9.0.1", From 96463f1f44bc7fbd12f2aedf855f0293cb0980d0 Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Mon, 19 May 2025 14:01:04 -0500 Subject: [PATCH 555/647] add products to docset.yml (#2836) --- docs/docset.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/docset.yml b/docs/docset.yml index cea34c4d5..6034dc549 100644 --- a/docs/docset.yml +++ b/docs/docset.yml @@ -1,4 +1,6 @@ project: 'Node.js client' +products: + - id: elasticsearch-client exclude: - examples/proxy/README.md cross_links: From 67a52dbc3749b8fd35609d8ad6a7366e697cce8f Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 19:30:42 +0000 Subject: [PATCH 556/647] Update dependency apache-arrow to v20 (#2838) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0f77e9fce..a8172db3a 100644 --- a/package.json +++ b/package.json @@ -92,7 +92,7 @@ }, "dependencies": { "@elastic/transport": "^9.0.1", - "apache-arrow": "18.x - 19.x", + "apache-arrow": "18.x - 20.x", "tslib": "^2.4.0" }, "tap": { From d2581804eb48ef3c94ba10dca41825c39825b925 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 19 May 2025 19:32:23 +0000 Subject: [PATCH 557/647] Mention typesWithBody in breaking changes (#2839) --- docs/release-notes/breaking-changes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md index 326fafdaf..1829923b8 100644 --- a/docs/release-notes/breaking-changes.md +++ b/docs/release-notes/breaking-changes.md @@ -21,9 +21,9 @@ Breaking changes can impact your Elastic applications, potentially disrupting no In 8.x, every API function had a `body` property that would provide a place to put arbitrary values that should go in the HTTP request body, even if they were not noted in the specification or documentation. In 9.0, each API function still includes an optional `body` property, but TypeScript's type checker will disallow properties that should go in the root of the object. A `querystring` parameter has also been added that behaves the same as `body`, but inserts its values into the request querystring. -**Impact**
    Some adjustments to API calls may be necessary for code that used a `body` property 8.x, especially to appease the TypeScript type checker, but it should not have any impact on any code that was not using a `body` property. +**Impact**
    Some adjustments to API calls may be necessary for code that used a `body` property 8.x, especially to appease the TypeScript type checker, but it should not have any impact on any code that was not using a `body` property. The `estypesWithBody` export and `typesWithBodyKey` module are no longer available. -**Action**
    Check existing code for use of the `body` property, and move any properties that should be in the root object according to the API function's request type definition. If using TypeScript, the TypeScript type checker will surface most of these issues for you. +**Action**
    Check existing code for use of the `body` property, and move any properties that should be in the root object according to the API function's request type definition. If using TypeScript, the TypeScript type checker will surface most of these issues for you. Also look for any imports of `estypesWithBody` or `typesWithBodyKey` and update them to `estypes` and `types`, respectively. :::: ::::{dropdown} Changes to API parameter collation into an HTTP request From 62c8c576b9002284359f2fea14cd4705c1c65dd7 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 19:33:34 +0000 Subject: [PATCH 558/647] Update dependency semver to v7.7.2 (#2837) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index a8172db3a..c3c51dc84 100644 --- a/package.json +++ b/package.json @@ -79,7 +79,7 @@ "ora": "5.4.1", "proxy": "2.2.0", "rimraf": "6.0.1", - "semver": "7.7.1", + "semver": "7.7.2", "split2": "4.2.0", "stoppable": "1.1.0", "tap": "21.1.0", From 965e51b6305900ca8d96d0d06af70ada8ee9cee4 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 19:43:27 +0000 Subject: [PATCH 559/647] Update dependency @types/node to v22.15.19 (#2828) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index c3c51dc84..14f25c972 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.15.3", + "@types/node": "22.15.19", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 0a14ecca4e10f18ec25e358a38bdb23c44b0d374 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 19 May 2025 20:50:38 +0100 Subject: [PATCH 560/647] Auto-generated API code (#2833) --- docs/reference/api-reference.md | 135 +++++++++++--------------------- src/api/api/inference.ts | 30 +++---- src/api/types.ts | 23 ++++-- 3 files changed, 78 insertions(+), 110 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index b785ce7ba..b86f8a5af 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -2895,12 +2895,13 @@ client.cat.nodes({ ... }) - **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version") | Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string | string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. ## client.cat.pendingTasks [_cat.pending_tasks] Get pending task information. @@ -5438,7 +5439,7 @@ If no index is specified or the index does not have a default analyzer, the anal - **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. - **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. -- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name, unicode_set_filter } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. - **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. @@ -5816,6 +5817,16 @@ client.indices.deleteDataStream({ name }) - **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. +## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options] +Deletes the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.deleteDataStreamOptions() +``` + + ## client.indices.deleteIndexTemplate [_indices.delete_index_template] Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template @@ -6273,6 +6284,16 @@ Supports a list of values, such as `open,hidden`. - **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. +## client.indices.getDataStreamOptions [_indices.get_data_stream_options] +Returns the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.getDataStreamOptions() +``` + + ## client.indices.getFieldMapping [_indices.get_field_mapping] Get mapping definitions. Retrieves mapping definitions for one or more fields. @@ -6628,6 +6649,16 @@ error. - **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.putDataStreamOptions [_indices.put_data_stream_options] +Updates the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.putDataStreamOptions() +``` + + ## client.indices.putIndexTemplate [_indices.put_index_template] Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. @@ -7495,6 +7526,17 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ## client.inference.chatCompletionUnified [_inference.chat_completion_unified] Perform chat completion inference +The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `chat_completion` task type for `openai` and `elastic` inference services. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. +The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. +The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. +If you use the `openai` service or the `elastic` service, use the Chat completion inference API. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference) ```ts @@ -7594,11 +7636,6 @@ These settings are specific to the task type you specified and override the task ## client.inference.put [_inference.put] Create an inference endpoint. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. @@ -7622,12 +7659,6 @@ Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud) ```ts @@ -7653,12 +7684,6 @@ Creates an inference endpoint to perform an inference task with the `amazonbedro >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock) ```ts @@ -7681,12 +7706,6 @@ Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic) ```ts @@ -7710,12 +7729,6 @@ Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio) ```ts @@ -7745,12 +7758,6 @@ The list of chat completion models that you can choose from in your Azure OpenAI The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai) ```ts @@ -7774,12 +7781,6 @@ Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere) ```ts @@ -7873,12 +7874,6 @@ Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio) ```ts @@ -7899,12 +7894,6 @@ Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai) ```ts @@ -7941,12 +7930,6 @@ The following models are recommended for the Hugging Face service: * `multilingual-e5-base` * `multilingual-e5-small` -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face) ```ts @@ -7970,12 +7953,6 @@ Create an inference endpoint to perform an inference task with the `jinaai` serv To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai) ```ts @@ -7998,12 +7975,6 @@ Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral) ```ts @@ -8025,12 +7996,6 @@ Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai) ```ts @@ -8080,12 +8045,6 @@ Create an inference endpoint to perform an inference task with the `watsonxai` s You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx) ```ts diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 5b65421f5..394446967 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -364,7 +364,7 @@ export default class Inference { } /** - * Perform chat completion inference + * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -643,7 +643,7 @@ export default class Inference { } /** - * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -698,7 +698,7 @@ export default class Inference { } /** - * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} */ async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -756,7 +756,7 @@ export default class Inference { } /** - * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} */ async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -814,7 +814,7 @@ export default class Inference { } /** - * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} */ async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -872,7 +872,7 @@ export default class Inference { } /** - * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation} */ async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -930,7 +930,7 @@ export default class Inference { } /** - * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai | Elasticsearch API documentation} */ async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -988,7 +988,7 @@ export default class Inference { } /** - * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere | Elasticsearch API documentation} */ async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1162,7 +1162,7 @@ export default class Inference { } /** - * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation} */ async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1220,7 +1220,7 @@ export default class Inference { } /** - * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation} */ async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1278,7 +1278,7 @@ export default class Inference { } /** - * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} */ async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1336,7 +1336,7 @@ export default class Inference { } /** - * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai | Elasticsearch API documentation} */ async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1394,7 +1394,7 @@ export default class Inference { } /** - * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} */ async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1452,7 +1452,7 @@ export default class Inference { } /** - * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai | Elasticsearch API documentation} */ async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1568,7 +1568,7 @@ export default class Inference { } /** - * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx | Elasticsearch API documentation} */ async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index aed848fb1..5cc78eb73 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -7072,6 +7072,7 @@ export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBa type: 'icu_normalizer' mode?: AnalysisIcuNormalizationMode name?: AnalysisIcuNormalizationType + unicode_set_filter?: string } export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' @@ -10117,6 +10118,10 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] +export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string + +export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] + export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } @@ -13173,15 +13178,16 @@ export interface CatNodesRequest extends CatCatRequestBase { full_id?: boolean | string /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatNodeColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } @@ -22327,7 +22333,9 @@ export interface InferenceRateLimitSetting { } export interface InferenceRequestChatCompletion { - /** A list of objects representing the conversation. */ + /** A list of objects representing the conversation. + * Requests should generally only add new messages from the user (role `user`). + * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ messages: InferenceMessage[] /** The ID of the model to use. */ model?: string @@ -24545,7 +24553,8 @@ export interface MigrationPostFeatureUpgradeRequest extends RequestBase { export interface MigrationPostFeatureUpgradeResponse { accepted: boolean - features: MigrationPostFeatureUpgradeMigrationFeature[] + features?: MigrationPostFeatureUpgradeMigrationFeature[] + reason?: string } export interface MlAdaptiveAllocationsSettings { From 29bf43e96ef836c8e8cc83ad2fd2e2220df0e04a Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 20 May 2025 12:00:45 +0400 Subject: [PATCH 561/647] Revert "Auto-generated API code (#2833)" (#2842) This reverts commit 0a14ecca4e10f18ec25e358a38bdb23c44b0d374 which was breaking the docs build because of a large table. --- docs/reference/api-reference.md | 135 +++++++++++++++++++++----------- src/api/api/inference.ts | 30 +++---- src/api/types.ts | 23 ++---- 3 files changed, 110 insertions(+), 78 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index b86f8a5af..b785ce7ba 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -2895,13 +2895,12 @@ client.cat.nodes({ ... }) - **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. -- **`h` (Optional, Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version") | Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version")[])**: A list of columns names to display. -It supports simple wildcards. -- **`s` (Optional, string | string[])**: A list of column names or aliases that determines the sort order. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. ## client.cat.pendingTasks [_cat.pending_tasks] Get pending task information. @@ -5439,7 +5438,7 @@ If no index is specified or the index does not have a default analyzer, the anal - **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. - **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. -- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name, unicode_set_filter } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. - **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. @@ -5817,16 +5816,6 @@ client.indices.deleteDataStream({ name }) - **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. -## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options] -Deletes the data stream options of the selected data streams. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) - -```ts -client.indices.deleteDataStreamOptions() -``` - - ## client.indices.deleteIndexTemplate [_indices.delete_index_template] Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template @@ -6284,16 +6273,6 @@ Supports a list of values, such as `open,hidden`. - **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. -## client.indices.getDataStreamOptions [_indices.get_data_stream_options] -Returns the data stream options of the selected data streams. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) - -```ts -client.indices.getDataStreamOptions() -``` - - ## client.indices.getFieldMapping [_indices.get_field_mapping] Get mapping definitions. Retrieves mapping definitions for one or more fields. @@ -6649,16 +6628,6 @@ error. - **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -## client.indices.putDataStreamOptions [_indices.put_data_stream_options] -Updates the data stream options of the selected data streams. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) - -```ts -client.indices.putDataStreamOptions() -``` - - ## client.indices.putIndexTemplate [_indices.put_index_template] Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. @@ -7526,17 +7495,6 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ## client.inference.chatCompletionUnified [_inference.chat_completion_unified] Perform chat completion inference -The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. -It only works with the `chat_completion` task type for `openai` and `elastic` inference services. - -IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. -For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - -NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. -The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. -The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. -If you use the `openai` service or the `elastic` service, use the Chat completion inference API. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference) ```ts @@ -7636,6 +7594,11 @@ These settings are specific to the task type you specified and override the task ## client.inference.put [_inference.put] Create an inference endpoint. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. @@ -7659,6 +7622,12 @@ Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud) ```ts @@ -7684,6 +7653,12 @@ Creates an inference endpoint to perform an inference task with the `amazonbedro >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock) ```ts @@ -7706,6 +7681,12 @@ Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic) ```ts @@ -7729,6 +7710,12 @@ Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio) ```ts @@ -7758,6 +7745,12 @@ The list of chat completion models that you can choose from in your Azure OpenAI The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai) ```ts @@ -7781,6 +7774,12 @@ Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere) ```ts @@ -7874,6 +7873,12 @@ Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio) ```ts @@ -7894,6 +7899,12 @@ Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai) ```ts @@ -7930,6 +7941,12 @@ The following models are recommended for the Hugging Face service: * `multilingual-e5-base` * `multilingual-e5-small` +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face) ```ts @@ -7953,6 +7970,12 @@ Create an inference endpoint to perform an inference task with the `jinaai` serv To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai) ```ts @@ -7975,6 +7998,12 @@ Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral) ```ts @@ -7996,6 +8025,12 @@ Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai) ```ts @@ -8045,6 +8080,12 @@ Create an inference endpoint to perform an inference task with the `watsonxai` s You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx) ```ts diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 394446967..5b65421f5 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -364,7 +364,7 @@ export default class Inference { } /** - * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. + * Perform chat completion inference * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -643,7 +643,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -698,7 +698,7 @@ export default class Inference { } /** - * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. + * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} */ async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -756,7 +756,7 @@ export default class Inference { } /** - * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. + * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} */ async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -814,7 +814,7 @@ export default class Inference { } /** - * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. + * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} */ async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -872,7 +872,7 @@ export default class Inference { } /** - * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. + * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation} */ async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -930,7 +930,7 @@ export default class Inference { } /** - * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). + * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai | Elasticsearch API documentation} */ async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -988,7 +988,7 @@ export default class Inference { } /** - * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. + * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere | Elasticsearch API documentation} */ async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1162,7 +1162,7 @@ export default class Inference { } /** - * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. + * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation} */ async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1220,7 +1220,7 @@ export default class Inference { } /** - * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. + * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation} */ async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1278,7 +1278,7 @@ export default class Inference { } /** - * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} */ async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1336,7 +1336,7 @@ export default class Inference { } /** - * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . + * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai | Elasticsearch API documentation} */ async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1394,7 +1394,7 @@ export default class Inference { } /** - * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. + * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} */ async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1452,7 +1452,7 @@ export default class Inference { } /** - * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. + * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai | Elasticsearch API documentation} */ async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1568,7 +1568,7 @@ export default class Inference { } /** - * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. + * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx | Elasticsearch API documentation} */ async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 5cc78eb73..aed848fb1 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -7072,7 +7072,6 @@ export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBa type: 'icu_normalizer' mode?: AnalysisIcuNormalizationMode name?: AnalysisIcuNormalizationType - unicode_set_filter?: string } export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' @@ -10118,10 +10117,6 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] -export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string - -export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] - export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } @@ -13178,16 +13173,15 @@ export interface CatNodesRequest extends CatCatRequestBase { full_id?: boolean | string /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean - /** A comma-separated list of columns names to display. - * It supports simple wildcards. */ - h?: CatCatNodeColumns - /** A comma-separated list of column names or aliases that determines the sort order. + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** The period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** The unit used to display time values. */ + /** Unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } @@ -22333,9 +22327,7 @@ export interface InferenceRateLimitSetting { } export interface InferenceRequestChatCompletion { - /** A list of objects representing the conversation. - * Requests should generally only add new messages from the user (role `user`). - * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ + /** A list of objects representing the conversation. */ messages: InferenceMessage[] /** The ID of the model to use. */ model?: string @@ -24553,8 +24545,7 @@ export interface MigrationPostFeatureUpgradeRequest extends RequestBase { export interface MigrationPostFeatureUpgradeResponse { accepted: boolean - features?: MigrationPostFeatureUpgradeMigrationFeature[] - reason?: string + features: MigrationPostFeatureUpgradeMigrationFeature[] } export interface MlAdaptiveAllocationsSettings { From ad7e1b922bd95786bc115602bccf2afd1950cdc9 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 20 May 2025 18:08:27 +0000 Subject: [PATCH 562/647] Bump to 9.0.2 (#2843) --- docs/release-notes/index.md | 6 ++++++ package.json | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 75c68cb49..8582db6cb 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -20,6 +20,12 @@ To check for security updates, go to [Security announcements for the Elastic sta % ### Fixes [elasticsearch-javascript-client-next-fixes] % \* +## 9.0.2 + +### Fixes [elasticsearch-javascript-client-9.0.2-fixes] + +**Remove dangling references to `typesWithBodyKey`:** the `typesWithBodyKey.ts` file and `estypesWithBody` export were removed in 9.0.0 but were still being referenced in the `index.d.ts` file that declares TypeScript types. This reference has been removed. + ## 9.0.1 ### Fixes [elasticsearch-javascript-client-9.0.1-fixes] diff --git a/package.json b/package.json index 14f25c972..264f2cecb 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.1", - "versionCanary": "9.0.1-canary.0", + "version": "9.0.2", + "versionCanary": "9.0.2-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", "types": "index.d.ts", From a1dc6f55ee04134384e23b937f200c792b64811e Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 20 May 2025 20:17:04 +0100 Subject: [PATCH 563/647] Auto-generated API code (#2845) --- docs/reference/api-reference.md | 1929 +++++++++++++++---------------- src/api/api/inference.ts | 30 +- src/api/types.ts | 23 +- 3 files changed, 975 insertions(+), 1007 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index b785ce7ba..a658dbb00 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -131,17 +131,17 @@ client.bulk({ ... }) #### Request (object) [_request_bulk] - **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. -- **`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])** +- **`operations` (Optional, { index, create, update, delete } \| { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } \| object[])** - **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. - **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. - **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. -- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`timeout` (Optional, string | -1 | 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. - **`require_alias` (Optional, boolean)**: If `true`, the request's actions must target an index alias. - **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). @@ -158,7 +158,7 @@ client.clearScroll({ ... }) #### Request (object) [_request_clear_scroll] -- **`scroll_id` (Optional, string | string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. +- **`scroll_id` (Optional, string \| string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. ## client.closePointInTime [_close_point_in_time] Close a point in time. @@ -200,14 +200,14 @@ client.count({ ... }) #### Request (object) [_request_count] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. @@ -306,16 +306,16 @@ client.create({ id, index }) - **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. -- **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`op_type` (Optional, Enum("index" \| "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. - **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. -- **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. ## client.delete [_delete] Delete a document. @@ -371,12 +371,12 @@ client.delete({ id, index }) - **`index` (string)**: The name of the target index. - **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. - **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. ## client.deleteByQuery [_delete_by_query] Delete documents. @@ -469,17 +469,17 @@ client.deleteByQuery({ index }) #### Request (object) [_request_delete_by_query] -- **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`index` (string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - **`max_docs` (Optional, number)**: The maximum number of documents to delete. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. - **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -- **`conflicts` (Optional, Enum("abort" | "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. -- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`from` (Optional, number)**: Skips the specified number of documents. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. @@ -489,17 +489,17 @@ client.deleteByQuery({ index }) - **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - **`q` (Optional, string)**: A query in the Lucene query string syntax. -- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. - **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. -- **`search_timeout` (Optional, string | -1 | 0)**: The explicit timeout for each search request. It defaults to no timeout. -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. -- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. +- **`search_timeout` (Optional, string \| -1 \| 0)**: The explicit timeout for each search request. It defaults to no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. - **`sort` (Optional, string[])**: A list of `:` pairs. - **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. - **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -- **`timeout` (Optional, string | -1 | 0)**: The period each deletion request waits for active shards. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each deletion request waits for active shards. - **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. ## client.deleteByQueryRethrottle [_delete_by_query_rethrottle] @@ -517,7 +517,7 @@ client.deleteByQueryRethrottle({ task_id }) #### Request (object) [_request_delete_by_query_rethrottle] -- **`task_id` (string | number)**: The ID for the task. +- **`task_id` (string \| number)**: The ID for the task. - **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. ## client.deleteScript [_delete_script] @@ -534,8 +534,8 @@ client.deleteScript({ id }) #### Request (object) [_request_delete_script] - **`id` (string)**: The identifier for the stored script or search template. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. ## client.exists [_exists] Check a document. @@ -573,12 +573,12 @@ client.exists({ id, index }) - **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. - **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. ## client.existsSource [_exists_source] Check for a document source. @@ -607,11 +607,11 @@ client.existsSource({ id, index }) - **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. ## client.explain [_explain] Explain a document match result. @@ -632,15 +632,15 @@ client.explain({ id, index }) - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`_source` (Optional, boolean | string | string[])**: `True` or `false` to return the `_source` field or not or a list of fields to return. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return in the response. +- **`_source` (Optional, boolean \| string \| string[])**: `True` or `false` to return the `_source` field or not or a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return in the response. - **`q` (Optional, string)**: The query in the Lucene query string syntax. ## client.fieldCaps [_field_caps] @@ -661,12 +661,12 @@ client.fieldCaps({ ... }) #### Request (object) [_request_field_caps] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. -- **`fields` (Optional, string | string[])**: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +- **`fields` (Optional, string \| string[])**: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. - **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. - **`runtime_mappings` (Optional, Record)**: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. - **`filters` (Optional, string)**: A list of filters to apply to the response. @@ -749,12 +749,12 @@ client.get({ id, index }) - **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. ## client.getScript [_get_script] Get a script or search template. @@ -770,7 +770,7 @@ client.getScript({ id }) #### Request (object) [_request_get_script] - **`id` (string)**: The identifier for the stored script or search template. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. ## client.getScriptContext [_get_script_context] Get script contexts. @@ -825,12 +825,12 @@ client.getSource({ id, index }) - **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. -- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. ## client.healthReport [_health_report] Get the cluster health. @@ -861,8 +861,8 @@ client.healthReport({ ... }) #### Request (object) [_request_health_report] -- **`feature` (Optional, string | string[])**: A feature of the cluster, as returned by the top-level health report API. -- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout. +- **`feature` (Optional, string \| string[])**: A feature of the cluster, as returned by the top-level health report API. +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout. - **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. - **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. @@ -1002,14 +1002,14 @@ client.index({ index }) - **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. -- **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`op_type` (Optional, Enum("index" \| "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. - **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. -- **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. ## client.info [_info] @@ -1061,16 +1061,16 @@ client.mget({ ... }) - **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. - **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. -- **`ids` (Optional, string | string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. +- **`ids` (Optional, string \| string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. - **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. - **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. -- **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`stored_fields` (Optional, string | string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. +- **`_source` (Optional, boolean \| string \| string[])**: True or false to return the `_source` field or not, or a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. ## client.msearch [_msearch] Run multiple searches. @@ -1100,11 +1100,11 @@ client.msearch({ ... }) #### Request (object) [_request_msearch] -- **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases to search. -- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`index` (Optional, string \| string[])**: List of data streams, indices, and index aliases to search. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. @@ -1113,7 +1113,7 @@ client.msearch({ ... }) - **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. - **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. - **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. - **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. ## client.msearchTemplate [_msearch_template] @@ -1142,11 +1142,11 @@ client.msearchTemplate({ ... }) #### Request (object) [_request_msearch_template] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. -- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - **`max_concurrent_searches` (Optional, number)**: The maximum number of concurrent searches the API can run. -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. - **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. - **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. @@ -1176,7 +1176,7 @@ client.mtermvectors({ ... }) - **`index` (Optional, string)**: The name of the index that contains the documents. - **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: An array of existing or artificial documents. - **`ids` (Optional, string[])**: A simplified syntax to specify documents by their ID if they're in the same index. -- **`fields` (Optional, string | string[])**: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`fields` (Optional, string \| string[])**: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. - **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. - **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. - **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. @@ -1186,7 +1186,7 @@ client.mtermvectors({ ... }) - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. - **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. ## client.openPointInTime [_open_point_in_time] Open a point in time. @@ -1236,13 +1236,13 @@ client.openPointInTime({ index, keep_alive }) #### Request (object) [_request_open_point_in_time] -- **`index` (string | string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices -- **`keep_alive` (string | -1 | 0)**: Extend the length of time that the point in time persists. +- **`index` (string \| string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices +- **`keep_alive` (string \| -1 \| 0)**: Extend the length of time that the point in time persists. - **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. - **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. @@ -1272,8 +1272,8 @@ client.putScript({ id, script }) - **`id` (string)**: The identifier for the stored script or search template. It must be unique within the cluster. - **`script` ({ lang, options, source })**: The script or search template, its parameters, and its language. - **`context` (Optional, string)**: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. ## client.rankEval [_rank_eval] Evaluate ranked search results. @@ -1290,10 +1290,10 @@ client.rankEval({ requests }) #### Request (object) [_request_rank_eval] - **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. -- **`index` (Optional, string | string[])**: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. - **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - **`search_type` (Optional, string)**: Search operation type @@ -1492,16 +1492,16 @@ client.reindex({ dest, source }) - **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. - **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. -- **`conflicts` (Optional, Enum("abort" | "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. - **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. - **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. - **`size` (Optional, number)** - **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. - **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. -- **`scroll` (Optional, string | -1 | 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. -- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. -- **`timeout` (Optional, string | -1 | 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. +- **`scroll` (Optional, string \| -1 \| 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. @@ -1548,7 +1548,7 @@ client.renderSearchTemplate({ ... }) - **`id` (Optional, string)**: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. - **`file` (Optional, string)** - **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. -- **`source` (Optional, string | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. +- **`source` (Optional, string \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. ## client.scriptsPainlessExecute [_scripts_painless_execute] Run a script. @@ -1570,7 +1570,7 @@ client.scriptsPainlessExecute({ ... }) #### Request (object) [_request_scripts_painless_execute] -- **`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +- **`context` (Optional, Enum("painless_test" \| "filter" \| "score" \| "boolean_field" \| "date_field" \| "double_field" \| "geo_point_field" \| "ip_field" \| "keyword_field" \| "long_field" \| "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. - **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. - **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. @@ -1600,7 +1600,7 @@ client.scroll({ scroll_id }) #### Request (object) [_request_scroll] - **`scroll_id` (string)**: The scroll ID of the search. -- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. - **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. ## client.search [_search] @@ -1634,30 +1634,30 @@ client.search({ ... }) #### Request (object) [_request_search] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. - **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. - **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. - **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. - **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. -- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. - **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. -- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. - **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. - **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. - **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. -- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. - **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. -- **`search_after` (Optional, number | number | string | boolean | null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. - **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. - **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. -- **`_source` (Optional, boolean | { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. +- **`_source` (Optional, boolean \| { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. - **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. - **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. - **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. @@ -1665,7 +1665,7 @@ client.search({ ... }) - **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. - **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. - **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. -- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. - **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. - **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. @@ -1675,9 +1675,9 @@ client.search({ ... }) - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. -- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. - **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. @@ -1687,16 +1687,16 @@ client.search({ ... }) - **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. - **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. -- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. - **`suggest_field` (Optional, string)**: The field to use for suggestions. -- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. - **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. -- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. - **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. @@ -1846,7 +1846,7 @@ client.searchMvt({ index, field, zoom, x, y }) #### Request (object) [_request_search_mvt] -- **`index` (string | string[])**: List of data streams, indices, or aliases to search +- **`index` (string \| string[])**: List of data streams, indices, or aliases to search - **`field` (string)**: Field containing geospatial data to return - **`zoom` (number)**: Zoom level for the vector tile to search - **`x` (number)**: X coordinate for the vector tile to search @@ -1855,15 +1855,15 @@ client.searchMvt({ index, field, zoom, x, y }) - **`buffer` (Optional, number)**: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. - **`exact_bounds` (Optional, boolean)**: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. - **`extent` (Optional, number)**: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. -- **`fields` (Optional, string | string[])**: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. -- **`grid_agg` (Optional, Enum("geotile" | "geohex"))**: The aggregation used to create a grid for the `field`. +- **`fields` (Optional, string \| string[])**: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +- **`grid_agg` (Optional, Enum("geotile" \| "geohex"))**: The aggregation used to create a grid for the `field`. - **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. -- **`grid_type` (Optional, Enum("grid" | "point" | "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +- **`grid_type` (Optional, Enum("grid" \| "point" \| "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The query DSL used to filter documents for the search. - **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - **`size` (Optional, number)**: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. -- **`track_total_hits` (Optional, boolean | number)**: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +- **`track_total_hits` (Optional, boolean \| number)**: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. ## client.searchShards [_search_shards] @@ -1884,12 +1884,12 @@ client.searchShards({ ... }) #### Request (object) [_request_search_shards] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. @@ -1905,21 +1905,21 @@ client.searchTemplate({ ... }) #### Request (object) [_request_search_template] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). - **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. - **`id` (Optional, string)**: The ID of the search template to use. If no `source` is specified, this parameter is required. - **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. -- **`source` (Optional, string | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. +- **`source` (Optional, string \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`scroll` (Optional, string | -1 | 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. +- **`scroll` (Optional, string \| -1 \| 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. - **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. - **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. @@ -1944,7 +1944,7 @@ client.termsEnum({ index, field }) - **`index` (string)**: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. - **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. - **`size` (Optional, number)**: The number of matching terms to return. -- **`timeout` (Optional, string | -1 | 0)**: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. - **`case_insensitive` (Optional, boolean)**: When `true`, the provided search string is matched against index terms without case sensitivity. - **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter an index shard if the provided query rewrites to `match_none`. - **`string` (Optional, string)**: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. @@ -2007,7 +2007,7 @@ client.termvectors({ index }) - **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. - **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. - **`per_field_analyzer` (Optional, Record)**: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. -- **`fields` (Optional, string | string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`fields` (Optional, string \| string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. - **`field_statistics` (Optional, boolean)**: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). - **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. - **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. @@ -2015,7 +2015,7 @@ client.termvectors({ index }) - **`term_statistics` (Optional, boolean)**: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. -- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. @@ -2056,20 +2056,20 @@ client.update({ id, index }) - **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. - **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. - **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. -- **`_source` (Optional, boolean | { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +- **`_source` (Optional, boolean \| { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. - **`upsert` (Optional, object)**: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. - **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. - **`lang` (Optional, string)**: The script language. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. -- **`_source_excludes` (Optional, string | string[])**: The source fields you want to exclude. -- **`_source_includes` (Optional, string | string[])**: The source fields you want to retrieve. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. +- **`_source_excludes` (Optional, string \| string[])**: The source fields you want to exclude. +- **`_source_includes` (Optional, string \| string[])**: The source fields you want to retrieve. ## client.updateByQuery [_update_by_query] Update documents. @@ -2166,18 +2166,18 @@ client.updateByQuery({ index }) #### Request (object) [_request_update_by_query] -- **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`index` (string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - **`max_docs` (Optional, number)**: The maximum number of documents to update. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. - **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. - **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. -- **`conflicts` (Optional, Enum("abort" | "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`from` (Optional, number)**: Skips the specified number of documents. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. @@ -2188,18 +2188,18 @@ client.updateByQuery({ index }) - **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. - **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. - **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. -- **`search_timeout` (Optional, string | -1 | 0)**: An explicit timeout for each search request. By default, there is no timeout. -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. -- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. +- **`search_timeout` (Optional, string \| -1 \| 0)**: An explicit timeout for each search request. By default, there is no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. - **`sort` (Optional, string[])**: A list of : pairs. - **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. - **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -- **`timeout` (Optional, string | -1 | 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. - **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. - **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. ## client.updateByQueryRethrottle [_update_by_query_rethrottle] @@ -2254,13 +2254,13 @@ client.asyncSearch.get({ id }) #### Request (object) [_request_async_search.get] - **`id` (string)**: A unique identifier for the async search. -- **`keep_alive` (Optional, string | -1 | 0)**: The length of time that the async search should be available in the cluster. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. - **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Specifies to wait for the search to be completed up until the provided timeout. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. @@ -2283,7 +2283,7 @@ client.asyncSearch.status({ id }) #### Request (object) [_request_async_search.status] - **`id` (string)**: A unique identifier for the async search. -- **`keep_alive` (Optional, string | -1 | 0)**: The length of time that the async search needs to be available. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. ## client.asyncSearch.submit [_async_search.submit] @@ -2305,7 +2305,7 @@ client.asyncSearch.submit({ ... }) ### Arguments [_arguments_async_search.submit] #### Request (object) [_request_async_search.submit] -- **`index` (Optional, string | string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices +- **`index` (Optional, string \| string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices - **`aggregations` (Optional, Record)** - **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. @@ -2314,28 +2314,28 @@ client.asyncSearch.submit({ ... }) hits using the from and size parameters. To page through more hits, use the search_after parameter. - **`highlight` (Optional, { encoder, fields })** -- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. - **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. - **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. -- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. - **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - **`profile` (Optional, boolean)** - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. -- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** +- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])** - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. -- **`search_after` (Optional, number | number | string | boolean | null[])** +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])** - **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - **`slice` (Optional, { field, id, max })** -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** -- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean \| { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. @@ -2350,7 +2350,7 @@ Defaults to no timeout. - **`version` (Optional, boolean)**: If true, returns document version as part of a hit. - **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. -- **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, +- **`stored_fields` (Optional, string \| string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. @@ -2361,9 +2361,9 @@ precedence over mapped fields with the same name. - **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Blocks and waits until the search is completed up to a certain timeout. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. -- **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. +- **`keep_alive` (Optional, string \| -1 \| 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. - **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. - **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) @@ -2373,9 +2373,9 @@ Ongoing async searches and any saved search results are deleted after this perio - **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). - **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. -- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query (AND or OR) +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query (AND or OR) - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored @@ -2383,15 +2383,15 @@ A partial reduction is performed every time the coordinating node has received a - **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) - **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true - **`routing` (Optional, string)**: A list of specific routing values -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Search operation type +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Search operation type - **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. -- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: Specify suggest mode +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))**: Specify suggest mode - **`suggest_size` (Optional, number)**: How many suggestions to return in response - **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. - **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -- **`_source_excludes` (Optional, string | string[])**: A list of fields to exclude from the returned _source field -- **`_source_includes` (Optional, string | string[])**: A list of fields to extract and return from the _source field +- **`_source_excludes` (Optional, string \| string[])**: A list of fields to exclude from the returned _source field +- **`_source_includes` (Optional, string \| string[])**: A list of fields to extract and return from the _source field - **`q` (Optional, string)**: Query in the Lucene query string syntax ## client.autoscaling.deleteAutoscalingPolicy [_autoscaling.delete_autoscaling_policy] @@ -2409,9 +2409,9 @@ client.autoscaling.deleteAutoscalingPolicy({ name }) #### Request (object) [_request_autoscaling.delete_autoscaling_policy] - **`name` (string)**: the name of the autoscaling policy -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.autoscaling.getAutoscalingCapacity [_autoscaling.get_autoscaling_capacity] Get the autoscaling capacity. @@ -2438,7 +2438,7 @@ client.autoscaling.getAutoscalingCapacity({ ... }) ### Arguments [_arguments_autoscaling.get_autoscaling_capacity] #### Request (object) [_request_autoscaling.get_autoscaling_capacity] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.autoscaling.getAutoscalingPolicy [_autoscaling.get_autoscaling_policy] @@ -2456,7 +2456,7 @@ client.autoscaling.getAutoscalingPolicy({ name }) #### Request (object) [_request_autoscaling.get_autoscaling_policy] - **`name` (string)**: the name of the autoscaling policy -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.autoscaling.putAutoscalingPolicy [_autoscaling.put_autoscaling_policy] @@ -2475,9 +2475,9 @@ client.autoscaling.putAutoscalingPolicy({ name }) #### Request (object) [_request_autoscaling.put_autoscaling_policy] - **`name` (string)**: the name of the autoscaling policy - **`policy` (Optional, { roles, deciders })** -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.cat.aliases [_cat.aliases] Get aliases. @@ -2496,15 +2496,15 @@ client.cat.aliases({ ... }) ### Arguments [_arguments_cat.aliases] #### Request (object) [_request_cat.aliases] -- **`name` (Optional, string | string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`name` (Optional, string \| string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. @@ -2524,17 +2524,17 @@ client.cat.allocation({ ... }) ### Arguments [_arguments_cat.allocation] #### Request (object) [_request_cat.allocation] -- **`node_id` (Optional, string | string[])**: A list of node identifiers or names used to limit the returned information. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`node_id` (Optional, string \| string[])**: A list of node identifiers or names used to limit the returned information. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.componentTemplates [_cat.component_templates] Get component templates. @@ -2557,15 +2557,15 @@ client.cat.componentTemplates({ ... }) - **`name` (Optional, string)**: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. ## client.cat.count [_cat.count] Get a document count. @@ -2585,11 +2585,11 @@ client.cat.count({ ... }) ### Arguments [_arguments_cat.count] #### Request (object) [_request_cat.count] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2610,11 +2610,11 @@ client.cat.fielddata({ ... }) ### Arguments [_arguments_cat.fielddata] #### Request (object) [_request_cat.fielddata] -- **`fields` (Optional, string | string[])**: List of fields used to limit returned information. +- **`fields` (Optional, string \| string[])**: List of fields used to limit returned information. To retrieve all fields, omit this parameter. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2640,10 +2640,10 @@ client.cat.health({ ... }) ### Arguments [_arguments_cat.health] #### Request (object) [_request_cat.health] -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2686,17 +2686,17 @@ client.cat.indices({ ... }) ### Arguments [_arguments_cat.indices] #### Request (object) [_request_cat.indices] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. -- **`health` (Optional, Enum("green" | "yellow" | "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +- **`health` (Optional, Enum("green" \| "yellow" \| "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2716,15 +2716,15 @@ client.cat.master({ ... }) ### Arguments [_arguments_cat.master] #### Request (object) [_request_cat.master] -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.mlDataFrameAnalytics [_cat.ml_data_frame_analytics] Get data frame analytics jobs. @@ -2746,11 +2746,11 @@ client.cat.mlDataFrameAnalytics({ ... }) #### Request (object) [_request_cat.ml_data_frame_analytics] - **`id` (Optional, string)**: The ID of the data frame analytics to fetch - **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit in which to display byte values -- **`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names to display. -- **`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names or column aliases used to sort the +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit in which to display byte values +- **`h` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.mlDatafeeds [_cat.ml_datafeeds] Get datafeeds. @@ -2783,9 +2783,9 @@ client.cat.mlDatafeeds({ ... }) If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. -- **`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names to display. -- **`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`h` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names to display. +- **`s` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.mlJobs [_cat.ml_jobs] Get anomaly detection jobs. @@ -2818,10 +2818,10 @@ client.cat.mlJobs({ ... }) If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])**: List of column names to display. -- **`s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.mlTrainedModels [_cat.ml_trained_models] Get trained models. @@ -2845,12 +2845,12 @@ client.cat.mlTrainedModels({ ... }) - **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names to display. -- **`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names or aliases used to sort the response. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names to display. +- **`s` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names or aliases used to sort the response. - **`from` (Optional, number)**: Skips the specified number of transforms. - **`size` (Optional, number)**: The maximum number of transforms to display. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.nodeattrs [_cat.nodeattrs] Get node attribute information. @@ -2867,15 +2867,15 @@ client.cat.nodeattrs({ ... }) ### Arguments [_arguments_cat.nodeattrs] #### Request (object) [_request_cat.nodeattrs] -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.nodes [_cat.nodes] Get node information. @@ -2892,15 +2892,16 @@ client.cat.nodes({ ... }) ### Arguments [_arguments_cat.nodes] #### Request (object) [_request_cat.nodes] -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`full_id` (Optional, boolean \| string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.pendingTasks [_cat.pending_tasks] Get pending task information. @@ -2917,16 +2918,16 @@ client.cat.pendingTasks({ ... }) ### Arguments [_arguments_cat.pending_tasks] #### Request (object) [_request_cat.pending_tasks] -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.plugins [_cat.plugins] Get plugin information. @@ -2943,8 +2944,8 @@ client.cat.plugins({ ... }) ### Arguments [_arguments_cat.plugins] #### Request (object) [_request_cat.plugins] -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response @@ -2952,7 +2953,7 @@ or `:desc` as a suffix to the column name. local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.recovery [_cat.recovery] Get shard recovery information. @@ -2971,16 +2972,16 @@ client.cat.recovery({ ... }) ### Arguments [_arguments_cat.recovery] #### Request (object) [_request_cat.recovery] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.repositories [_cat.repositories] Get snapshot repository information. @@ -2997,15 +2998,15 @@ client.cat.repositories({ ... }) ### Arguments [_arguments_cat.repositories] #### Request (object) [_request_cat.repositories] -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.segments [_cat.segments] Get segment information. @@ -3023,19 +3024,19 @@ client.cat.segments({ ... }) ### Arguments [_arguments_cat.segments] #### Request (object) [_request_cat.segments] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.shards [_cat.shards] Get shard information. @@ -3053,16 +3054,16 @@ client.cat.shards({ ... }) ### Arguments [_arguments_cat.shards] #### Request (object) [_request_cat.shards] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.snapshots [_cat.snapshots] Get snapshot information. @@ -3080,17 +3081,17 @@ client.cat.snapshots({ ... }) ### Arguments [_arguments_cat.snapshots] #### Request (object) [_request_cat.snapshots] -- **`repository` (Optional, string | string[])**: A list of snapshot repositories used to limit the request. +- **`repository` (Optional, string \| string[])**: A list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.tasks [_cat.tasks] Get task information. @@ -3111,12 +3112,12 @@ client.cat.tasks({ ... }) - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. - **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. @@ -3138,15 +3139,15 @@ client.cat.templates({ ... }) #### Request (object) [_request_cat.templates] - **`name` (Optional, string)**: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.threadPool [_cat.thread_pool] Get thread pool statistics. @@ -3164,18 +3165,18 @@ client.cat.threadPool({ ... }) ### Arguments [_arguments_cat.thread_pool] #### Request (object) [_request_cat.thread_pool] -- **`thread_pool_patterns` (Optional, string | string[])**: A list of thread pool names used to limit the request. +- **`thread_pool_patterns` (Optional, string \| string[])**: A list of thread pool names used to limit the request. Accepts wildcard expressions. -- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cat.transforms [_cat.transforms] Get transform information. @@ -3201,9 +3202,9 @@ If you do not specify one of these options, the API returns information for all If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. - **`from` (Optional, number)**: Skips the specified number of transforms. -- **`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names to display. -- **`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`h` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names to display. +- **`s` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`size` (Optional, number)**: The maximum number of transforms to obtain. ## client.ccr.deleteAutoFollowPattern [_ccr.delete_auto_follow_pattern] @@ -3221,7 +3222,7 @@ client.ccr.deleteAutoFollowPattern({ name }) #### Request (object) [_request_ccr.delete_auto_follow_pattern] - **`name` (string)**: The auto-follow pattern collection to delete. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -3246,21 +3247,21 @@ client.ccr.follow({ index, leader_index, remote_cluster }) - **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. - **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. -- **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. -- **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when +- **`max_read_request_size` (Optional, number \| string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string \| -1 \| 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. - **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. -- **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will +- **`max_write_buffer_size` (Optional, number \| string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. - **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. -- **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. -- **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. +- **`max_write_request_size` (Optional, number \| string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`read_poll_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. - **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. @@ -3280,8 +3281,8 @@ client.ccr.followInfo({ index }) ### Arguments [_arguments_ccr.follow_info] #### Request (object) [_request_ccr.follow_info] -- **`index` (string | string[])**: A comma-delimited list of follower index patterns. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`index` (string \| string[])**: A comma-delimited list of follower index patterns. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -3300,8 +3301,8 @@ client.ccr.followStats({ index }) ### Arguments [_arguments_ccr.follow_stats] #### Request (object) [_request_ccr.follow_stats] -- **`index` (string | string[])**: A comma-delimited list of index patterns. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`index` (string \| string[])**: A comma-delimited list of index patterns. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ccr.forgetFollower [_ccr.forget_follower] @@ -3332,7 +3333,7 @@ client.ccr.forgetFollower({ index }) - **`follower_index` (Optional, string)** - **`follower_index_uuid` (Optional, string)** - **`leader_remote_cluster` (Optional, string)** -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ccr.getAutoFollowPattern [_ccr.get_auto_follow_pattern] Get auto-follow patterns. @@ -3350,7 +3351,7 @@ client.ccr.getAutoFollowPattern({ ... }) #### Request (object) [_request_ccr.get_auto_follow_pattern] - **`name` (Optional, string)**: The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -3375,7 +3376,7 @@ client.ccr.pauseAutoFollowPattern({ name }) #### Request (object) [_request_ccr.pause_auto_follow_pattern] - **`name` (string)**: The name of the auto-follow pattern to pause. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -3397,7 +3398,7 @@ client.ccr.pauseFollow({ index }) #### Request (object) [_request_ccr.pause_follow] - **`index` (string)**: The name of the follower index. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -3427,15 +3428,15 @@ client.ccr.putAutoFollowPattern({ name, remote_cluster }) - **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). - **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. -- **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. +- **`read_poll_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. - **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. -- **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. -- **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. +- **`max_read_request_size` (Optional, number \| string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string \| -1 \| 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. - **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. -- **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number \| string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. - **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. -- **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`max_write_request_size` (Optional, number \| string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.ccr.resumeAutoFollowPattern [_ccr.resume_auto_follow_pattern] Resume an auto-follow pattern. @@ -3454,7 +3455,7 @@ client.ccr.resumeAutoFollowPattern({ name }) #### Request (object) [_request_ccr.resume_auto_follow_pattern] - **`name` (string)**: The name of the auto-follow pattern to resume. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -3479,13 +3480,13 @@ client.ccr.resumeFollow({ index }) - **`max_outstanding_write_requests` (Optional, number)** - **`max_read_request_operation_count` (Optional, number)** - **`max_read_request_size` (Optional, string)** -- **`max_retry_delay` (Optional, string | -1 | 0)** +- **`max_retry_delay` (Optional, string \| -1 \| 0)** - **`max_write_buffer_count` (Optional, number)** - **`max_write_buffer_size` (Optional, string)** - **`max_write_request_operation_count` (Optional, number)** - **`max_write_request_size` (Optional, string)** -- **`read_poll_timeout` (Optional, string | -1 | 0)** -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`read_poll_timeout` (Optional, string \| -1 \| 0)** +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.ccr.stats [_ccr.stats] Get cross-cluster replication stats. @@ -3501,10 +3502,10 @@ client.ccr.stats({ ... }) ### Arguments [_arguments_ccr.stats] #### Request (object) [_request_ccr.stats] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ccr.unfollow [_ccr.unfollow] Unfollow an index. @@ -3526,7 +3527,7 @@ client.ccr.unfollow({ index }) #### Request (object) [_request_ccr.unfollow] - **`index` (string)**: The name of the follower index. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -3552,7 +3553,7 @@ client.cluster.allocationExplain({ ... }) - **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. - **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. - **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.cluster.deleteComponentTemplate [_cluster.delete_component_template] Delete component templates. @@ -3567,10 +3568,10 @@ client.cluster.deleteComponentTemplate({ name }) ### Arguments [_arguments_cluster.delete_component_template] #### Request (object) [_request_cluster.delete_component_template] -- **`name` (string | string[])**: List or wildcard expression of component template names used to limit the request. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`name` (string \| string[])**: List or wildcard expression of component template names used to limit the request. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.cluster.deleteVotingConfigExclusions [_cluster.delete_voting_config_exclusions] @@ -3586,7 +3587,7 @@ client.cluster.deleteVotingConfigExclusions({ ... }) ### Arguments [_arguments_cluster.delete_voting_config_exclusions] #### Request (object) [_request_cluster.delete_voting_config_exclusions] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. - **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from @@ -3607,9 +3608,9 @@ client.cluster.existsComponentTemplate({ name }) ### Arguments [_arguments_cluster.exists_component_template] #### Request (object) [_request_cluster.exists_component_template] -- **`name` (string | string[])**: List of component template names used to limit the request. +- **`name` (string \| string[])**: List of component template names used to limit the request. Wildcard (*) expressions are supported. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. @@ -3634,7 +3635,7 @@ Wildcard (`*`) expressions are supported. - **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.cluster.getSettings [_cluster.get_settings] @@ -3652,9 +3653,9 @@ client.cluster.getSettings({ ... }) #### Request (object) [_request_cluster.get_settings] - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - **`include_defaults` (Optional, boolean)**: If `true`, returns default cluster settings from the local node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.cluster.health [_cluster.health] @@ -3679,18 +3680,18 @@ client.cluster.health({ ... }) ### Arguments [_arguments_cluster.health] #### Request (object) [_request_cluster.health] -- **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. -- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. - **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. -- **`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. -- **`wait_for_nodes` (Optional, string | number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and =N, <=N, >N and yellow > red. By default, will not wait for any status. +- **`wait_for_status` (Optional, Enum("green" \| "yellow" \| "red"))**: One of green, yellow or red. Will wait (until the timeout provided) until the status of the cluster changes to the one provided or better, i.e. green > yellow > red. By default, will not wait for any status. ## client.cluster.info [_cluster.info] Get cluster info. @@ -3705,7 +3706,7 @@ client.cluster.info({ target }) ### Arguments [_arguments_cluster.info] #### Request (object) [_request_cluster.info] -- **`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. +- **`target` (Enum("_all" \| "http" \| "ingest" \| "thread_pool" \| "script") \| Enum("_all" \| "http" \| "ingest" \| "thread_pool" \| "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. ## client.cluster.pendingTasks [_cluster.pending_tasks] Get the pending cluster tasks. @@ -3726,7 +3727,7 @@ client.cluster.pendingTasks({ ... }) #### Request (object) [_request_cluster.pending_tasks] - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.cluster.postVotingConfigExclusions [_cluster.post_voting_config_exclusions] @@ -3758,12 +3759,12 @@ client.cluster.postVotingConfigExclusions({ ... }) ### Arguments [_arguments_cluster.post_voting_config_exclusions] #### Request (object) [_request_cluster.post_voting_config_exclusions] -- **`node_names` (Optional, string | string[])**: A list of the names of the nodes to exclude from the +- **`node_names` (Optional, string \| string[])**: A list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. -- **`node_ids` (Optional, string | string[])**: A list of the persistent ids of the nodes to exclude +- **`node_ids` (Optional, string \| string[])**: A list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`timeout` (Optional, string | -1 | 0)**: When adding a voting configuration exclusion, the API waits for the +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. @@ -3815,7 +3816,7 @@ To unset `_meta`, replace the template without specifying this information. - **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.cluster.putSettings [_cluster.put_settings] @@ -3851,8 +3852,8 @@ client.cluster.putSettings({ ... }) - **`persistent` (Optional, Record)** - **`transient` (Optional, Record)** - **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) -- **`master_timeout` (Optional, string | -1 | 0)**: Explicit operation timeout for connection to master node -- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout ## client.cluster.remoteInfo [_cluster.remote_info] Get remote cluster information. @@ -3902,10 +3903,10 @@ client.cluster.reroute({ ... }) - **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. - **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. -- **`metric` (Optional, string | string[])**: Limits the information returned to the specified metrics. +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specified metrics. - **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.cluster.state [_cluster.state] Get the cluster state. @@ -3937,16 +3938,16 @@ client.cluster.state({ ... }) ### Arguments [_arguments_cluster.state] #### Request (object) [_request_cluster.state] -- **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics -- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) -- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master - **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version -- **`wait_for_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for wait_for_metadata_version before timing out +- **`wait_for_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for wait_for_metadata_version before timing out ## client.cluster.stats [_cluster.stats] Get cluster statistics. @@ -3961,9 +3962,9 @@ client.cluster.stats({ ... }) ### Arguments [_arguments_cluster.stats] #### Request (object) [_request_cluster.stats] -- **`node_id` (Optional, string | string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. +- **`node_id` (Optional, string \| string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. - **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for each node to respond. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. @@ -4037,9 +4038,9 @@ client.connector.list({ ... }) #### Request (object) [_request_connector.list] - **`from` (Optional, number)**: Starting offset (default: 0) - **`size` (Optional, number)**: Specifies a max number of results to get -- **`index_name` (Optional, string | string[])**: A list of connector index names to fetch connector documents for -- **`connector_name` (Optional, string | string[])**: A list of connector names to fetch connector documents for -- **`service_type` (Optional, string | string[])**: A list of connector service types to fetch connector documents for +- **`index_name` (Optional, string \| string[])**: A list of connector index names to fetch connector documents for +- **`connector_name` (Optional, string \| string[])**: A list of connector names to fetch connector documents for +- **`service_type` (Optional, string \| string[])**: A list of connector service types to fetch connector documents for - **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. - **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name @@ -4212,9 +4213,9 @@ client.connector.syncJobList({ ... }) #### Request (object) [_request_connector.sync_job_list] - **`from` (Optional, number)**: Starting offset (default: 0) - **`size` (Optional, number)**: Specifies a max number of results to get -- **`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))**: A sync job status to fetch connector sync jobs for +- **`status` (Optional, Enum("canceling" \| "canceled" \| "completed" \| "error" \| "in_progress" \| "pending" \| "suspended"))**: A sync job status to fetch connector sync jobs for - **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for -- **`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])**: A list of job types to fetch the sync jobs for +- **`job_type` (Optional, Enum("full" \| "incremental" \| "access_control") \| Enum("full" \| "incremental" \| "access_control")[])**: A list of job types to fetch the sync jobs for ## client.connector.syncJobPost [_connector.sync_job_post] Create a connector sync job. @@ -4231,8 +4232,8 @@ client.connector.syncJobPost({ id }) #### Request (object) [_request_connector.sync_job_post] - **`id` (string)**: The id of the associated connector -- **`job_type` (Optional, Enum("full" | "incremental" | "access_control"))** -- **`trigger_method` (Optional, Enum("on_demand" | "scheduled"))** +- **`job_type` (Optional, Enum("full" \| "incremental" \| "access_control"))** +- **`trigger_method` (Optional, Enum("on_demand" \| "scheduled"))** ## client.connector.syncJobUpdateStats [_connector.sync_job_update_stats] Set the connector sync job stats. @@ -4256,7 +4257,7 @@ client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_co - **`deleted_document_count` (number)**: The number of documents the sync job deleted. - **`indexed_document_count` (number)**: The number of documents the sync job indexed. - **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. -- **`last_seen` (Optional, string | -1 | 0)**: The timestamp to use in the `last_seen` property for the connector sync job. +- **`last_seen` (Optional, string \| -1 \| 0)**: The timestamp to use in the `last_seen` property for the connector sync job. - **`metadata` (Optional, Record)**: The connector-specific metadata. - **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. @@ -4332,7 +4333,7 @@ client.connector.updateError({ connector_id, error }) #### Request (object) [_request_connector.update_error] - **`connector_id` (string)**: The unique identifier of the connector to be updated -- **`error` (T | null)** +- **`error` (T \| null)** ## client.connector.updateFeatures [_connector.update_features] Update the connector features. @@ -4415,7 +4416,7 @@ client.connector.updateIndexName({ connector_id, index_name }) #### Request (object) [_request_connector.update_index_name] - **`connector_id` (string)**: The unique identifier of the connector to be updated -- **`index_name` (T | null)** +- **`index_name` (T \| null)** ## client.connector.updateName [_connector.update_name] Update the connector name and description. @@ -4508,7 +4509,7 @@ client.connector.updateStatus({ connector_id, status }) #### Request (object) [_request_connector.update_status] - **`connector_id` (string)**: The unique identifier of the connector to be updated -- **`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))** +- **`status` (Enum("created" \| "needs_configuration" \| "configured" \| "connected" \| "error"))** ## client.danglingIndices.deleteDanglingIndex [_dangling_indices.delete_dangling_index] Delete a dangling index. @@ -4526,8 +4527,8 @@ client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) #### Request (object) [_request_dangling_indices.delete_dangling_index] - **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. - **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. -- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master -- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout ## client.danglingIndices.importDanglingIndex [_dangling_indices.import_dangling_index] Import a dangling index. @@ -4547,8 +4548,8 @@ client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) - **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. - **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. -- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master -- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout ## client.danglingIndices.listDanglingIndices [_dangling_indices.list_dangling_indices] Get the dangling indices. @@ -4579,7 +4580,7 @@ client.enrich.deletePolicy({ name }) #### Request (object) [_request_enrich.delete_policy] - **`name` (string)**: Enrich policy to delete. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.enrich.executePolicy [_enrich.execute_policy] Run an enrich policy. @@ -4595,7 +4596,7 @@ client.enrich.executePolicy({ name }) #### Request (object) [_request_enrich.execute_policy] - **`name` (string)**: Enrich policy to execute. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. ## client.enrich.getPolicy [_enrich.get_policy] @@ -4611,9 +4612,9 @@ client.enrich.getPolicy({ ... }) ### Arguments [_arguments_enrich.get_policy] #### Request (object) [_request_enrich.get_policy] -- **`name` (Optional, string | string[])**: List of enrich policy names used to limit the request. +- **`name` (Optional, string \| string[])**: List of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.enrich.putPolicy [_enrich.put_policy] Create an enrich policy. @@ -4632,7 +4633,7 @@ client.enrich.putPolicy({ name }) - **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. - **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. - **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.enrich.stats [_enrich.stats] Get enrich stats. @@ -4647,7 +4648,7 @@ client.enrich.stats({ ... }) ### Arguments [_arguments_enrich.stats] #### Request (object) [_request_enrich.stats] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.eql.delete [_eql.delete] Delete an async EQL search. @@ -4681,9 +4682,9 @@ client.eql.get({ id }) #### Request (object) [_request_eql.get] - **`id` (string)**: Identifier for the search. -- **`keep_alive` (Optional, string | -1 | 0)**: Period for which the search and its results are stored on the cluster. +- **`keep_alive` (Optional, string \| -1 \| 0)**: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Timeout duration to wait for the request to finish. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. ## client.eql.getStatus [_eql.get_status] @@ -4715,17 +4716,17 @@ client.eql.search({ index, query }) ### Arguments [_arguments_eql.search] #### Request (object) [_request_eql.search] -- **`index` (string | string[])**: The name of the index to scope the operation +- **`index` (string \| string[])**: The name of the index to scope the operation - **`query` (string)**: EQL query you wish to run. - **`case_sensitive` (Optional, boolean)** - **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. - **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order - **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" - **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. -- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. -- **`keep_alive` (Optional, string | -1 | 0)** +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } \| { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. +- **`keep_alive` (Optional, string \| -1 \| 0)** - **`keep_on_completion` (Optional, boolean)** -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)** +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)** - **`allow_partial_search_results` (Optional, boolean)**: Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results @@ -4733,14 +4734,14 @@ For sequences, the behavior can be further refined using allow_partial_sequence_ If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results. - **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 -- **`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. -- **`result_position` (Optional, Enum("tail" | "head"))** +- **`fields` (Optional, { field, format, include_unmapped } \| { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. +- **`result_position` (Optional, Enum("tail" \| "head"))** - **`runtime_mappings` (Optional, Record)** - **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. - **`allow_no_indices` (Optional, boolean)** -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])** - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. ## client.esql.asyncQuery [_esql.async_query] @@ -4762,7 +4763,7 @@ client.esql.asyncQuery({ query }) - **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. - **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. - **`locale` (Optional, string)** -- **`params` (Optional, number | number | string | boolean | null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`params` (Optional, number \| number \| string \| boolean \| null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. - **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance @@ -4772,7 +4773,7 @@ name and the next level key is the column name. - **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. @@ -4784,8 +4785,8 @@ To override the default behavior, you can set the `esql.query.allow_partial_resu It is valid only for the CSV format. - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. -- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. -- **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. @@ -4834,9 +4835,9 @@ A query ID is provided in the ES|QL async query API response for a query that do A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. -- **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. @@ -4899,7 +4900,7 @@ client.esql.query({ query }) - **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. - **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. - **`locale` (Optional, string)** -- **`params` (Optional, number | number | string | boolean | null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`params` (Optional, number \| number \| string \| boolean \| null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. - **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance @@ -4909,7 +4910,7 @@ name and the next level key is the column name. - **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. -- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, e.g. json, yaml. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. - **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. - **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. @@ -4939,7 +4940,7 @@ client.features.getFeatures({ ... }) ### Arguments [_arguments_features.get_features] #### Request (object) [_request_features.get_features] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.features.resetFeatures [_features.reset_features] Reset the features. @@ -4969,7 +4970,7 @@ client.features.resetFeatures({ ... }) ### Arguments [_arguments_features.reset_features] #### Request (object) [_request_features.reset_features] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.fleet.globalCheckpoints [_fleet.global_checkpoints] Get global checkpoints. @@ -4986,7 +4987,7 @@ client.fleet.globalCheckpoints({ index }) ### Arguments [_arguments_fleet.global_checkpoints] #### Request (object) [_request_fleet.global_checkpoints] -- **`index` (string | string)**: A single index or index alias that resolves to a single index. +- **`index` (string \| string)**: A single index or index alias that resolves to a single index. - **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. - **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist @@ -4994,7 +4995,7 @@ and all primary shards be active. Can only be true when `wait_for_advance` is tr - **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. ## client.fleet.msearch [_fleet.msearch] Run multiple Fleet searches. @@ -5011,17 +5012,17 @@ client.fleet.msearch({ ... }) ### Arguments [_arguments_fleet.msearch] #### Request (object) [_request_fleet.msearch] -- **`index` (Optional, string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. -- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`index` (Optional, string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. - **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. - **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. - **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. - **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. - **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard @@ -5045,7 +5046,7 @@ client.fleet.search({ index }) ### Arguments [_arguments_fleet.search] #### Request (object) [_request_fleet.search] -- **`index` (string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`index` (string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. - **`aggregations` (Optional, Record)** - **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. @@ -5054,7 +5055,7 @@ client.fleet.search({ index }) hits using the from and size parameters. To page through more hits, use the search_after parameter. - **`highlight` (Optional, { encoder, fields })** -- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. @@ -5066,15 +5067,15 @@ not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - **`profile` (Optional, boolean)** - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. -- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** +- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])** - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. -- **`search_after` (Optional, number | number | string | boolean | null[])** +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])** - **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - **`slice` (Optional, { field, id, max })** -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** -- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean \| { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. @@ -5089,7 +5090,7 @@ Defaults to no timeout. - **`version` (Optional, boolean)**: If true, returns document version as part of a hit. - **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. -- **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, +- **`stored_fields` (Optional, string \| string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. @@ -5105,9 +5106,9 @@ the indices stats API. - **`analyze_wildcard` (Optional, boolean)** - **`batched_reduce_size` (Optional, number)** - **`ccs_minimize_roundtrips` (Optional, boolean)** -- **`default_operator` (Optional, Enum("and" | "or"))** +- **`default_operator` (Optional, Enum("and" \| "or"))** - **`df` (Optional, string)** -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])** - **`ignore_throttled` (Optional, boolean)** - **`ignore_unavailable` (Optional, boolean)** - **`lenient` (Optional, boolean)** @@ -5116,16 +5117,16 @@ the indices stats API. - **`pre_filter_shard_size` (Optional, number)** - **`request_cache` (Optional, boolean)** - **`routing` (Optional, string)** -- **`scroll` (Optional, string | -1 | 0)** -- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))** +- **`scroll` (Optional, string \| -1 \| 0)** +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))** - **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. -- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))** +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))** - **`suggest_size` (Optional, number)** - **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. - **`typed_keys` (Optional, boolean)** - **`rest_total_hits_as_int` (Optional, boolean)** -- **`_source_excludes` (Optional, string | string[])** -- **`_source_includes` (Optional, string | string[])** +- **`_source_excludes` (Optional, string \| string[])** +- **`_source_includes` (Optional, string \| string[])** - **`q` (Optional, string)** - **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause @@ -5151,13 +5152,13 @@ client.graph.explore({ index }) ### Arguments [_arguments_graph.explore] #### Request (object) [_request_graph.explore] -- **`index` (string | string[])**: Name of the index. +- **`index` (string \| string[])**: Name of the index. - **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. - **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. - **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. - **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. -- **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for a response from each shard. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. @@ -5175,8 +5176,8 @@ client.ilm.deleteLifecycle({ policy }) #### Request (object) [_request_ilm.delete_lifecycle] - **`policy` (string)**: Identifier for the policy. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ilm.explainLifecycle [_ilm.explain_lifecycle] Explain the lifecycle state. @@ -5198,7 +5199,7 @@ client.ilm.explainLifecycle({ index }) To target all data streams and indices, use `*` or `_all`. - **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. - **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.ilm.getLifecycle [_ilm.get_lifecycle] Get lifecycle policies. @@ -5213,8 +5214,8 @@ client.ilm.getLifecycle({ ... }) #### Request (object) [_request_ilm.get_lifecycle] - **`policy` (Optional, string)**: Identifier for the policy. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ilm.getStatus [_ilm.get_status] Get the ILM status. @@ -5257,7 +5258,7 @@ client.ilm.migrateToDataTiers({ ... }) - **`node_attribute` (Optional, string)** - **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. @@ -5306,8 +5307,8 @@ client.ilm.putLifecycle({ policy }) #### Request (object) [_request_ilm.put_lifecycle] - **`policy` (string)**: Identifier for the policy. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ilm.removePolicy [_ilm.remove_policy] Remove policies from an index. @@ -5357,8 +5358,8 @@ client.ilm.start({ ... }) ### Arguments [_arguments_ilm.start] #### Request (object) [_request_ilm.start] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ilm.stop [_ilm.stop] Stop the ILM plugin. @@ -5377,8 +5378,8 @@ client.ilm.stop({ ... }) ### Arguments [_arguments_ilm.stop] #### Request (object) [_request_ilm.stop] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.addBlock [_indices.add_block] Add an index block. @@ -5399,18 +5400,18 @@ client.indices.addBlock({ index, block }) By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. -- **`block` (Enum("metadata" | "read" | "read_only" | "write"))**: The block type to add to the index. +- **`block` (Enum("metadata" \| "read" \| "read_only" \| "write"))**: The block type to add to the index. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. @@ -5438,16 +5439,16 @@ If no index is specified or the index does not have a default analyzer, the anal - **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. - **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. -- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`char_filter` (Optional, string \| { type, escaped_tags } \| { type, mappings, mappings_path } \| { type, flags, pattern, replacement } \| { type, mode, name, unicode_set_filter } \| { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. - **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -- **`filter` (Optional, string | { type } | { type } | { type, preserve_original } | { type, ignored_scripts, output_unigrams } | { type } | { type } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type } | { type } | { type } | { type, dedup, dictionary, locale, longest_only } | { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, bucket_count, hash_count, hash_set_size, with_rotation } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, pattern, replacement } | { type } | { type } | { type, script } | { type } | { type } | { type } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type } | { type } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, ignore_keywords } | { type } | { type, stopwords } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. +- **`filter` (Optional, string \| { type } \| { type } \| { type, preserve_original } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type, delimiter, encoding } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, pattern, replacement } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. - **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. -- **`text` (Optional, string | string[])**: Text to analyze. +- **`text` (Optional, string \| string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. -- **`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. +- **`tokenizer` (Optional, string \| { type, tokenize_on_chars, max_token_length } \| { type, max_token_length } \| { type, custom_token_chars, max_gram, min_gram, token_chars } \| { type, buffer_size } \| { type } \| { type } \| { type, custom_token_chars, max_gram, min_gram, token_chars } \| { type, buffer_size, delimiter, replacement, reverse, skip } \| { type, flags, group, pattern } \| { type, pattern } \| { type, pattern } \| { type, max_token_length } \| { type } \| { type, max_token_length } \| { type, max_token_length } \| { type, rule_files } \| { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } \| { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. ## client.indices.cancelMigrateReindex [_indices.cancel_migrate_reindex] Cancel a migration reindex operation. @@ -5463,7 +5464,7 @@ client.indices.cancelMigrateReindex({ index }) ### Arguments [_arguments_indices.cancel_migrate_reindex] #### Request (object) [_request_indices.cancel_migrate_reindex] -- **`index` (string | string[])**: The index or data stream name +- **`index` (string \| string[])**: The index or data stream name ## client.indices.clearCache [_indices.clear_cache] Clear the cache. @@ -5483,18 +5484,18 @@ client.indices.clearCache({ ... }) ### Arguments [_arguments_indices.clear_cache] #### Request (object) [_request_indices.clear_cache] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. -- **`fields` (Optional, string | string[])**: List of field names used to limit the `fielddata` parameter. +- **`fields` (Optional, string \| string[])**: List of field names used to limit the `fielddata` parameter. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`query` (Optional, boolean)**: If `true`, clears the query cache. - **`request` (Optional, boolean)**: If `true`, clears the request cache. @@ -5559,11 +5560,11 @@ client.indices.clone({ index, target }) - **`target` (string)**: Name of the target index to create. - **`aliases` (Optional, Record)**: Aliases for the resulting index. - **`settings` (Optional, Record)**: Configuration options for the target index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ## client.indices.close [_indices.close] @@ -5595,19 +5596,19 @@ client.indices.close({ index }) ### Arguments [_arguments_indices.close] #### Request (object) [_request_indices.close] -- **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. +- **`index` (string \| string[])**: List or wildcard expression of index names used to limit the request. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ## client.indices.create [_indices.create] @@ -5657,11 +5658,11 @@ Index names must meet the following criteria: - Field data types - Mapping parameters - **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ## client.indices.createDataStream [_indices.create_data_stream] @@ -5684,8 +5685,8 @@ Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.createFrom [_indices.create_from] Create an index from a source index. @@ -5722,7 +5723,7 @@ client.indices.dataStreamsStats({ ... }) - **`name` (Optional, string)**: List of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. ## client.indices.delete [_indices.delete] @@ -5743,20 +5744,20 @@ client.indices.delete({ index }) ### Arguments [_arguments_indices.delete] #### Request (object) [_request_indices.delete] -- **`index` (string | string[])**: List of indices to delete. +- **`index` (string \| string[])**: List of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.deleteAlias [_indices.delete_alias] @@ -5772,13 +5773,13 @@ client.indices.deleteAlias({ index, name }) ### Arguments [_arguments_indices.delete_alias] #### Request (object) [_request_indices.delete_alias] -- **`index` (string | string[])**: List of data streams or indices used to limit the request. +- **`index` (string \| string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). -- **`name` (string | string[])**: List of aliases to remove. +- **`name` (string \| string[])**: List of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.deleteDataLifecycle [_indices.delete_data_lifecycle] @@ -5794,10 +5795,10 @@ client.indices.deleteDataLifecycle({ name }) ### Arguments [_arguments_indices.delete_data_lifecycle] #### Request (object) [_request_indices.delete_data_lifecycle] -- **`name` (string | string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) -- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master -- **`timeout` (Optional, string | -1 | 0)**: Explicit timestamp for the document +- **`name` (string \| string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document ## client.indices.deleteDataStream [_indices.delete_data_stream] Delete data streams. @@ -5812,9 +5813,19 @@ client.indices.deleteDataStream({ name }) ### Arguments [_arguments_indices.delete_data_stream] #### Request (object) [_request_indices.delete_data_stream] -- **`name` (string | string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. +- **`name` (string \| string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. + +## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options] +Deletes the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.deleteDataStreamOptions() +``` + ## client.indices.deleteIndexTemplate [_indices.delete_index_template] Delete an index template. @@ -5831,9 +5842,9 @@ client.indices.deleteIndexTemplate({ name }) ### Arguments [_arguments_indices.delete_index_template] #### Request (object) [_request_indices.delete_index_template] -- **`name` (string | string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`name` (string \| string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.deleteTemplate [_indices.delete_template] Delete a legacy index template. @@ -5849,9 +5860,9 @@ client.indices.deleteTemplate({ name }) #### Request (object) [_request_indices.delete_template] - **`name` (string)**: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.diskUsage [_indices.disk_usage] @@ -5873,12 +5884,12 @@ client.indices.diskUsage({ index }) ### Arguments [_arguments_indices.disk_usage] #### Request (object) [_request_indices.disk_usage] -- **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (string \| string[])**: List of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. @@ -5923,10 +5934,10 @@ client.indices.exists({ index }) ### Arguments [_arguments_indices.exists] #### Request (object) [_request_indices.exists] -- **`index` (string | string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). +- **`index` (string \| string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -5949,17 +5960,17 @@ client.indices.existsAlias({ name }) ### Arguments [_arguments_indices.exists_alias] #### Request (object) [_request_indices.exists_alias] -- **`name` (string | string[])**: List of aliases to check. Supports wildcards (`*`). -- **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). +- **`name` (string \| string[])**: List of aliases to check. Supports wildcards (`*`). +- **`index` (Optional, string \| string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.existsIndexTemplate [_indices.exists_index_template] @@ -5979,7 +5990,7 @@ client.indices.existsIndexTemplate({ name }) - **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.existsTemplate [_indices.exists_template] Check existence of index templates. @@ -5997,11 +6008,11 @@ client.indices.existsTemplate({ name }) ### Arguments [_arguments_indices.exists_template] #### Request (object) [_request_indices.exists_template] -- **`name` (string | string[])**: A list of index template names used to limit the request. +- **`name` (string \| string[])**: A list of index template names used to limit the request. Wildcard (`*`) expressions are supported. - **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. - **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -6018,9 +6029,9 @@ client.indices.explainDataLifecycle({ index }) ### Arguments [_arguments_indices.explain_data_lifecycle] #### Request (object) [_request_indices.explain_data_lifecycle] -- **`index` (string | string[])**: The name of the index to explain +- **`index` (string \| string[])**: The name of the index to explain - **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index's lifecycle -- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master ## client.indices.fieldUsageStats [_indices.field_usage_stats] Get field usage stats. @@ -6040,15 +6051,15 @@ client.indices.fieldUsageStats({ index }) ### Arguments [_arguments_indices.field_usage_stats] #### Request (object) [_request_indices.field_usage_stats] -- **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. +- **`index` (string \| string[])**: List or wildcard expression of index names used to limit the request. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. -- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. ## client.indices.flush [_indices.flush] Flush data streams or indices. @@ -6072,12 +6083,12 @@ client.indices.flush({ ... }) ### Arguments [_arguments_indices.flush] #### Request (object) [_request_indices.flush] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to flush. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -6150,9 +6161,9 @@ client.indices.forcemerge({ ... }) ### Arguments [_arguments_indices.forcemerge] #### Request (object) [_request_indices.forcemerge] -- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) @@ -6173,20 +6184,20 @@ client.indices.get({ index }) ### Arguments [_arguments_indices.get] #### Request (object) [_request_indices.get] -- **`index` (string | string[])**: List of data streams, indices, and index aliases used to limit the request. +- **`index` (string \| string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as open,hidden. - **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. - **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. - **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`features` (Optional, { name, description } | { name, description }[])**: Return only information on specified index features +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`features` (Optional, { name, description } \| { name, description }[])**: Return only information on specified index features ## client.indices.getAlias [_indices.get_alias] Get aliases. @@ -6201,20 +6212,20 @@ client.indices.getAlias({ ... }) ### Arguments [_arguments_indices.get_alias] #### Request (object) [_request_indices.get_alias] -- **`name` (Optional, string | string[])**: List of aliases to retrieve. +- **`name` (Optional, string \| string[])**: List of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. -- **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.getDataLifecycle [_indices.get_data_lifecycle] @@ -6231,14 +6242,14 @@ client.indices.getDataLifecycle({ name }) ### Arguments [_arguments_indices.get_data_lifecycle] #### Request (object) [_request_indices.get_data_lifecycle] -- **`name` (string | string[])**: List of data streams to limit the request. +- **`name` (string \| string[])**: List of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.getDataLifecycleStats [_indices.get_data_lifecycle_stats] Get data stream lifecycle stats. @@ -6265,14 +6276,24 @@ client.indices.getDataStream({ ... }) ### Arguments [_arguments_indices.get_data_stream] #### Request (object) [_request_indices.get_data_stream] -- **`name` (Optional, string | string[])**: List of data stream names used to limit the request. +- **`name` (Optional, string \| string[])**: List of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. - **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. +## client.indices.getDataStreamOptions [_indices.get_data_stream_options] +Returns the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.getDataStreamOptions() +``` + + ## client.indices.getFieldMapping [_indices.get_field_mapping] Get mapping definitions. Retrieves mapping definitions for one or more fields. @@ -6289,14 +6310,14 @@ client.indices.getFieldMapping({ fields }) ### Arguments [_arguments_indices.get_field_mapping] #### Request (object) [_request_indices.get_field_mapping] -- **`fields` (string | string[])**: List or wildcard expression of fields used to limit returned information. +- **`fields` (string \| string[])**: List or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -6319,7 +6340,7 @@ client.indices.getIndexTemplate({ ... }) - **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. ## client.indices.getMapping [_indices.get_mapping] @@ -6335,18 +6356,18 @@ client.indices.getMapping({ ... }) ### Arguments [_arguments_indices.get_mapping] #### Request (object) [_request_indices.get_mapping] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.getMigrateReindexStatus [_indices.get_migrate_reindex_status] @@ -6363,7 +6384,7 @@ client.indices.getMigrateReindexStatus({ index }) ### Arguments [_arguments_indices.get_migrate_reindex_status] #### Request (object) [_request_indices.get_migrate_reindex_status] -- **`index` (string | string[])**: The index or data stream name. +- **`index` (string \| string[])**: The index or data stream name. ## client.indices.getSettings [_indices.get_settings] Get index settings. @@ -6379,16 +6400,16 @@ client.indices.getSettings({ ... }) ### Arguments [_arguments_indices.get_settings] #### Request (object) [_request_indices.get_settings] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`name` (Optional, string | string[])**: List or wildcard expression of settings to retrieve. +- **`name` (Optional, string \| string[])**: List or wildcard expression of settings to retrieve. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. @@ -6396,7 +6417,7 @@ Supports a list of values, such as `open,hidden`. - **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6415,12 +6436,12 @@ client.indices.getTemplate({ ... }) ### Arguments [_arguments_indices.get_template] #### Request (object) [_request_indices.get_template] -- **`name` (Optional, string | string[])**: List of index template names used to limit the request. +- **`name` (Optional, string \| string[])**: List of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.migrateReindex [_indices.migrate_reindex] @@ -6464,8 +6485,8 @@ client.indices.migrateToDataStream({ name }) #### Request (object) [_request_indices.migrate_to_data_stream] - **`name` (string)**: Name of the index alias to convert to a data stream. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.modifyDataStream [_indices.modify_data_stream] Update data streams. @@ -6516,23 +6537,23 @@ client.indices.open({ index }) ### Arguments [_arguments_indices.open] #### Request (object) [_request_indices.open] -- **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ## client.indices.promoteDataStream [_indices.promote_data_stream] @@ -6558,7 +6579,7 @@ client.indices.promoteDataStream({ name }) #### Request (object) [_request_indices.promote_data_stream] - **`name` (string)**: The name of the data stream -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.putAlias [_indices.put_alias] Create or update an alias. @@ -6573,7 +6594,7 @@ client.indices.putAlias({ index, name }) ### Arguments [_arguments_indices.put_alias] #### Request (object) [_request_indices.put_alias] -- **`index` (string | string[])**: List of data streams or indices to add. +- **`index` (string \| string[])**: List of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. - **`name` (string)**: Alias to update. @@ -6592,9 +6613,9 @@ Data stream aliases don’t support this parameter. - **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.putDataLifecycle [_indices.put_data_lifecycle] @@ -6610,24 +6631,34 @@ client.indices.putDataLifecycle({ name }) ### Arguments [_arguments_indices.put_data_lifecycle] #### Request (object) [_request_indices.put_data_lifecycle] -- **`name` (string | string[])**: List of data streams used to limit the request. +- **`name` (string \| string[])**: List of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. -- **`data_retention` (Optional, string | -1 | 0)**: If defined, every document added to this data stream will be stored at least for this time frame. +- **`data_retention` (Optional, string \| -1 \| 0)**: If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. - **`downsampling` (Optional, { rounds })**: The downsampling configuration to execute for the managed backing index after rollover. - **`enabled` (Optional, boolean)**: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.putDataStreamOptions [_indices.put_data_stream_options] +Updates the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.putDataStreamOptions() +``` + + ## client.indices.putIndexTemplate [_indices.put_index_template] Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. @@ -6668,7 +6699,7 @@ client.indices.putIndexTemplate({ name }) #### Request (object) [_request_indices.put_index_template] - **`name` (string)**: Index or template name -- **`index_patterns` (Optional, string | string[])**: Name of the index template to create. +- **`index_patterns` (Optional, string \| string[])**: Name of the index template to create. - **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. - **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. @@ -6697,7 +6728,7 @@ references a component template that might not exist - **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`cause` (Optional, string)**: User defined reason for creating/updating the index template @@ -6741,9 +6772,9 @@ client.indices.putMapping({ index }) ### Arguments [_arguments_indices.put_mapping] #### Request (object) [_request_indices.put_mapping] -- **`index` (string | string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +- **`index` (string \| string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. - **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. -- **`dynamic` (Optional, Enum("strict" | "runtime" | true | false))**: Controls whether new fields are added dynamically. +- **`dynamic` (Optional, Enum("strict" \| "runtime" \| true \| false))**: Controls whether new fields are added dynamically. - **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. @@ -6753,7 +6784,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -6763,14 +6794,14 @@ application-specific metadata. - **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. @@ -6801,7 +6832,7 @@ client.indices.putSettings({ ... }) ### Arguments [_arguments_indices.put_settings] #### Request (object) [_request_indices.put_settings] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** @@ -6810,20 +6841,20 @@ alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. - **`reopen` (Optional, boolean)**: Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.putTemplate [_indices.put_template] @@ -6860,7 +6891,7 @@ client.indices.putTemplate({ name }) #### Request (object) [_request_indices.put_template] - **`name` (string)**: The name of the template - **`aliases` (Optional, Record)**: Aliases for the index. -- **`index_patterns` (Optional, string | string[])**: Array of wildcard expressions used to match the names +- **`index_patterns` (Optional, string \| string[])**: Array of wildcard expressions used to match the names of indices during creation. - **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. - **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index @@ -6873,7 +6904,7 @@ Templates with lower 'order' values are merged first. Templates with higher is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. - **`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`cause` (Optional, string)**: User defined reason for creating/updating the index template @@ -6911,7 +6942,7 @@ client.indices.recovery({ ... }) ### Arguments [_arguments_indices.recovery] #### Request (object) [_request_indices.recovery] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. @@ -6942,12 +6973,12 @@ client.indices.refresh({ ... }) ### Arguments [_arguments_indices.refresh] #### Request (object) [_request_indices.refresh] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -6978,9 +7009,9 @@ client.indices.reloadSearchAnalyzers({ index }) ### Arguments [_arguments_indices.reload_search_analyzers] #### Request (object) [_request_indices.reload_search_analyzers] -- **`index` (string | string[])**: A list of index names to reload analyzers for +- **`index` (string \| string[])**: A list of index names to reload analyzers for - **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable @@ -7041,7 +7072,7 @@ client.indices.resolveCluster({ ... }) ### Arguments [_arguments_indices.resolve_cluster] #### Request (object) [_request_indices.resolve_cluster] -- **`name` (Optional, string | string[])**: A list of names or index patterns for the indices, aliases, and data streams to resolve. +- **`name` (Optional, string \| string[])**: A list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. If no index expression is specified, information about all remote clusters configured on the local cluster @@ -7051,7 +7082,7 @@ or closed indices. This behavior applies even if the request targets other open targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -7063,7 +7094,7 @@ options to the `_resolve/cluster` API endpoint that takes no index expression. - **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. -- **`timeout` (Optional, string | -1 | 0)**: The maximum time to wait for remote clusters to respond. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out. @@ -7086,9 +7117,9 @@ client.indices.resolveIndex({ name }) ### Arguments [_arguments_indices.resolve_index] #### Request (object) [_request_indices.resolve_index] -- **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +- **`name` (string \| string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -7162,11 +7193,11 @@ If specified, this mapping can include field names, field data types, and mappin - **`settings` (Optional, Record)**: Configuration options for the index. Data streams do not support this parameter. - **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - **`lazy` (Optional, boolean)**: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams. @@ -7185,12 +7216,12 @@ client.indices.segments({ ... }) ### Arguments [_arguments_indices.segments] #### Request (object) [_request_indices.segments] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -7219,14 +7250,14 @@ client.indices.shardStores({ ... }) ### Arguments [_arguments_indices.shard_stores] #### Request (object) [_request_indices.shard_stores] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. -- **`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])**: List of shard health statuses used to limit the request. +- **`status` (Optional, Enum("green" \| "yellow" \| "red" \| "all") \| Enum("green" \| "yellow" \| "red" \| "all")[])**: List of shard health statuses used to limit the request. ## client.indices.shrink [_indices.shrink] Shrink an index. @@ -7276,11 +7307,11 @@ client.indices.shrink({ index, target }) - **`aliases` (Optional, Record)**: The key is the alias name. Index alias names support date math. - **`settings` (Optional, Record)**: Configuration options for the target index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ## client.indices.simulateIndexTemplate [_indices.simulate_index_template] @@ -7299,7 +7330,7 @@ client.indices.simulateIndexTemplate({ name }) - **`name` (string)**: Name of the index to simulate - **`create` (Optional, boolean)**: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one - **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. ## client.indices.simulateTemplate [_indices.simulate_template] @@ -7320,7 +7351,7 @@ this parameter and specify the template configuration in the request body. - **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. -- **`index_patterns` (Optional, string | string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. +- **`index_patterns` (Optional, string \| string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. - **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. - **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. @@ -7343,7 +7374,7 @@ references a component template that might not exist that uses deprecated components, Elasticsearch will emit a deprecation warning. - **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. - **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. ## client.indices.split [_indices.split] @@ -7394,11 +7425,11 @@ client.indices.split({ index, target }) - **`target` (string)**: Name of the target index to create. - **`aliases` (Optional, Record)**: Aliases for the resulting index. - **`settings` (Optional, Record)**: Configuration options for the target index. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ## client.indices.stats [_indices.stats] @@ -7423,19 +7454,19 @@ client.indices.stats({ ... }) ### Arguments [_arguments_indices.stats] #### Request (object) [_request_indices.stats] -- **`metric` (Optional, string | string[])**: Limit the information returned the specific metrics. -- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices -- **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument +- **`metric` (Optional, string \| string[])**: Limit the information returned the specific metrics. +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -- **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. -- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +- **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. - **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. -- **`groups` (Optional, string | string[])**: List of search groups to include in the search statistics. +- **`groups` (Optional, string \| string[])**: List of search groups to include in the search statistics. - **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. -- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. ## client.indices.updateAliases [_indices.update_aliases] Create or update an alias. @@ -7451,9 +7482,9 @@ client.indices.updateAliases({ ... }) #### Request (object) [_request_indices.update_aliases] - **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.validateQuery [_indices.validate_query] @@ -7469,7 +7500,7 @@ client.indices.validateQuery({ ... }) ### Arguments [_arguments_indices.validate_query] #### Request (object) [_request_indices.validate_query] -- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. @@ -7479,10 +7510,10 @@ This behavior applies even if the request targets other open indices. - **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. -- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. - **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. @@ -7495,6 +7526,17 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ## client.inference.chatCompletionUnified [_inference.chat_completion_unified] Perform chat completion inference +The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `chat_completion` task type for `openai` and `elastic` inference services. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. +The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. +The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. +If you use the `openai` service or the `elastic` service, use the Chat completion inference API. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference) ```ts @@ -7506,7 +7548,7 @@ client.inference.chatCompletionUnified({ inference_id }) #### Request (object) [_request_inference.chat_completion_unified] - **`inference_id` (string)**: The inference Id - **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })** -- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.completion [_inference.completion] Perform completion inference on the service @@ -7521,10 +7563,10 @@ client.inference.completion({ inference_id, input }) #### Request (object) [_request_inference.completion] - **`inference_id` (string)**: The inference Id -- **`input` (string | string[])**: Inference input. +- **`input` (string \| string[])**: Inference input. Either a string or an array of strings. - **`task_settings` (Optional, User-defined value)**: Optional task settings -- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.delete [_inference.delete] Delete an inference endpoint @@ -7539,7 +7581,7 @@ client.inference.delete({ inference_id }) #### Request (object) [_request_inference.delete] - **`inference_id` (string)**: The inference identifier. -- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type - **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. - **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. @@ -7555,7 +7597,7 @@ client.inference.get({ ... }) ### Arguments [_arguments_inference.get] #### Request (object) [_request_inference.get] -- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type - **`inference_id` (Optional, string)**: The inference Id ## client.inference.inference [_inference.inference] @@ -7580,25 +7622,20 @@ client.inference.inference({ inference_id, input }) #### Request (object) [_request_inference.inference] - **`inference_id` (string)**: The unique identifier for the inference endpoint. -- **`input` (string | string[])**: The text on which you want to perform the inference task. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. -- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The type of inference task that the model performs. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The type of inference task that the model performs. - **`query` (Optional, string)**: The query input, which is required only for the `rerank` task. It is not required for other tasks. - **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. -- **`timeout` (Optional, string | -1 | 0)**: The amount of time to wait for the inference request to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. ## client.inference.put [_inference.put] Create an inference endpoint. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. @@ -7614,7 +7651,7 @@ client.inference.put({ inference_id }) #### Request (object) [_request_inference.put] - **`inference_id` (string)**: The inference Id -- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type - **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** ## client.inference.putAlibabacloud [_inference.put_alibabacloud] @@ -7622,12 +7659,6 @@ Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud) ```ts @@ -7637,7 +7668,7 @@ client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service ### Arguments [_arguments_inference.put_alibabacloud] #### Request (object) [_request_inference.put_alibabacloud] -- **`task_type` (Enum("completion" | "rerank" | "space_embedding" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "rerank" \| "space_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`alibabacloud_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("alibabacloud-ai-search"))**: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. - **`service_settings` ({ api_key, host, rate_limit, service_id, workspace })**: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. @@ -7653,12 +7684,6 @@ Creates an inference endpoint to perform an inference task with the `amazonbedro >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock) ```ts @@ -7668,7 +7693,7 @@ client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, servi ### Arguments [_arguments_inference.put_amazonbedrock] #### Request (object) [_request_inference.put_amazonbedrock] -- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`amazonbedrock_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("amazonbedrock"))**: The type of service supported for the specified task type. In this case, `amazonbedrock`. - **`service_settings` ({ access_key, model, provider, region, rate_limit, secret_key })**: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. @@ -7681,12 +7706,6 @@ Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic) ```ts @@ -7710,12 +7729,6 @@ Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio) ```ts @@ -7725,7 +7738,7 @@ client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, servi ### Arguments [_arguments_inference.put_azureaistudio] #### Request (object) [_request_inference.put_azureaistudio] -- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`azureaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. - **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `openai` service. @@ -7745,12 +7758,6 @@ The list of chat completion models that you can choose from in your Azure OpenAI The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai) ```ts @@ -7760,7 +7767,7 @@ client.inference.putAzureopenai({ task_type, azureopenai_inference_id, service, ### Arguments [_arguments_inference.put_azureopenai] #### Request (object) [_request_inference.put_azureopenai] -- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. - **`azureopenai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("azureopenai"))**: The type of service supported for the specified task type. In this case, `azureopenai`. @@ -7774,12 +7781,6 @@ Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere) ```ts @@ -7789,7 +7790,7 @@ client.inference.putCohere({ task_type, cohere_inference_id, service, service_se ### Arguments [_arguments_inference.put_cohere] #### Request (object) [_request_inference.put_cohere] -- **`task_type` (Enum("completion" | "rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`cohere_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("cohere"))**: The type of service supported for the specified task type. In this case, `cohere`. - **`service_settings` ({ api_key, embedding_type, model_id, rate_limit, similarity })**: Settings used to install the inference model. @@ -7825,7 +7826,7 @@ client.inference.putElasticsearch({ task_type, elasticsearch_inference_id, servi ### Arguments [_arguments_inference.put_elasticsearch] #### Request (object) [_request_inference.put_elasticsearch] -- **`task_type` (Enum("rerank" | "sparse_embedding" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("rerank" \| "sparse_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`elasticsearch_inference_id` (string)**: The unique identifier of the inference endpoint. The must not match the `model_id`. - **`service` (Enum("elasticsearch"))**: The type of service supported for the specified task type. In this case, `elasticsearch`. @@ -7873,12 +7874,6 @@ Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio) ```ts @@ -7888,7 +7883,7 @@ client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, ser ### Arguments [_arguments_inference.put_googleaistudio] #### Request (object) [_request_inference.put_googleaistudio] -- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`googleaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. @@ -7899,12 +7894,6 @@ Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai) ```ts @@ -7914,7 +7903,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser ### Arguments [_arguments_inference.put_googlevertexai] #### Request (object) [_request_inference.put_googlevertexai] -- **`task_type` (Enum("rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. - **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. @@ -7941,12 +7930,6 @@ The following models are recommended for the Hugging Face service: * `multilingual-e5-base` * `multilingual-e5-small` -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face) ```ts @@ -7970,12 +7953,6 @@ Create an inference endpoint to perform an inference task with the `jinaai` serv To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai) ```ts @@ -7985,7 +7962,7 @@ client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_se ### Arguments [_arguments_inference.put_jinaai] #### Request (object) [_request_inference.put_jinaai] -- **`task_type` (Enum("rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`jinaai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("jinaai"))**: The type of service supported for the specified task type. In this case, `jinaai`. - **`service_settings` ({ api_key, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `jinaai` service. @@ -7998,12 +7975,6 @@ Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral) ```ts @@ -8025,12 +7996,6 @@ Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai) ```ts @@ -8040,7 +8005,7 @@ client.inference.putOpenai({ task_type, openai_inference_id, service, service_se ### Arguments [_arguments_inference.put_openai] #### Request (object) [_request_inference.put_openai] -- **`task_type` (Enum("chat_completion" | "completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("chat_completion" \| "completion" \| "text_embedding"))**: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. - **`openai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. @@ -8065,7 +8030,7 @@ client.inference.putVoyageai({ task_type, voyageai_inference_id, service, servic ### Arguments [_arguments_inference.put_voyageai] #### Request (object) [_request_inference.put_voyageai] -- **`task_type` (Enum("text_embedding" | "rerank"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("text_embedding" \| "rerank"))**: The type of the inference task that the model will perform. - **`voyageai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("voyageai"))**: The type of service supported for the specified task type. In this case, `voyageai`. - **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. @@ -8080,12 +8045,6 @@ Create an inference endpoint to perform an inference task with the `watsonxai` s You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. -When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. -After creating the endpoint, wait for the model deployment to complete before using it. -To verify the deployment status, use the get trained model statistics API. -Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. -Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx) ```ts @@ -8115,14 +8074,14 @@ client.inference.rerank({ inference_id, query, input }) #### Request (object) [_request_inference.rerank] - **`inference_id` (string)**: The unique identifier for the inference endpoint. - **`query` (string)**: Query input. -- **`input` (string | string[])**: The text on which you want to perform the inference task. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. - **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. -- **`timeout` (Optional, string | -1 | 0)**: The amount of time to wait for the inference request to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. ## client.inference.sparseEmbedding [_inference.sparse_embedding] Perform sparse embedding inference on the service @@ -8137,10 +8096,10 @@ client.inference.sparseEmbedding({ inference_id, input }) #### Request (object) [_request_inference.sparse_embedding] - **`inference_id` (string)**: The inference Id -- **`input` (string | string[])**: Inference input. +- **`input` (string \| string[])**: Inference input. Either a string or an array of strings. - **`task_settings` (Optional, User-defined value)**: Optional task settings -- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.streamCompletion [_inference.stream_completion] Perform streaming inference. @@ -8161,7 +8120,7 @@ client.inference.streamCompletion({ inference_id, input }) #### Request (object) [_request_inference.stream_completion] - **`inference_id` (string)**: The unique identifier for the inference endpoint. -- **`input` (string | string[])**: The text on which you want to perform the inference task. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. @@ -8180,10 +8139,10 @@ client.inference.textEmbedding({ inference_id, input }) #### Request (object) [_request_inference.text_embedding] - **`inference_id` (string)**: The inference Id -- **`input` (string | string[])**: Inference input. +- **`input` (string \| string[])**: Inference input. Either a string or an array of strings. - **`task_settings` (Optional, User-defined value)**: Optional task settings -- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.update [_inference.update] Update an inference endpoint. @@ -8204,7 +8163,7 @@ client.inference.update({ inference_id }) #### Request (object) [_request_inference.update] - **`inference_id` (string)**: The unique identifier of the inference endpoint. -- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The type of inference task that the model performs. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The type of inference task that the model performs. - **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** ## client.ingest.deleteGeoipDatabase [_ingest.delete_geoip_database] @@ -8221,10 +8180,10 @@ client.ingest.deleteGeoipDatabase({ id }) ### Arguments [_arguments_ingest.delete_geoip_database] #### Request (object) [_request_ingest.delete_geoip_database] -- **`id` (string | string[])**: A list of geoip database configurations to delete -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`id` (string \| string[])**: A list of geoip database configurations to delete +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ingest.deleteIpLocationDatabase [_ingest.delete_ip_location_database] Delete IP geolocation database configurations. @@ -8238,11 +8197,11 @@ client.ingest.deleteIpLocationDatabase({ id }) ### Arguments [_arguments_ingest.delete_ip_location_database] #### Request (object) [_request_ingest.delete_ip_location_database] -- **`id` (string | string[])**: A list of IP location database configurations. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`id` (string \| string[])**: A list of IP location database configurations. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. @@ -8261,9 +8220,9 @@ client.ingest.deletePipeline({ id }) #### Request (object) [_request_ingest.delete_pipeline] - **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ingest.geoIpStats [_ingest.geo_ip_stats] @@ -8291,7 +8250,7 @@ client.ingest.getGeoipDatabase({ ... }) ### Arguments [_arguments_ingest.get_geoip_database] #### Request (object) [_request_ingest.get_geoip_database] -- **`id` (Optional, string | string[])**: A list of database configuration IDs to retrieve. +- **`id` (Optional, string \| string[])**: A list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. @@ -8307,10 +8266,10 @@ client.ingest.getIpLocationDatabase({ ... }) ### Arguments [_arguments_ingest.get_ip_location_database] #### Request (object) [_request_ingest.get_ip_location_database] -- **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. +- **`id` (Optional, string \| string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. @@ -8332,7 +8291,7 @@ client.ingest.getPipeline({ ... }) - **`id` (Optional, string)**: List of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) @@ -8367,9 +8326,9 @@ client.ingest.putGeoipDatabase({ id, name, maxmind }) - **`name` (string)**: The provider-assigned name of the IP geolocation database to download. - **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ingest.putIpLocationDatabase [_ingest.put_ip_location_database] Create or update an IP geolocation database configuration. @@ -8385,10 +8344,10 @@ client.ingest.putIpLocationDatabase({ id }) #### Request (object) [_request_ingest.put_ip_location_database] - **`id` (string)**: The database configuration identifier. - **`configuration` (Optional, { name, maxmind, ipinfo })** -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. @@ -8413,8 +8372,8 @@ client.ingest.putPipeline({ id }) - **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. - **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates ## client.ingest.simulate [_ingest.simulate] @@ -8456,8 +8415,8 @@ client.license.delete({ ... }) ### Arguments [_arguments_license.delete] #### Request (object) [_request_license.delete] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.license.get [_license.get] Get license information. @@ -8524,8 +8483,8 @@ client.license.post({ ... }) - **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** - **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. - **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.license.postStartBasic [_license.post_start_basic] Start a basic license. @@ -8549,8 +8508,8 @@ client.license.postStartBasic({ ... }) #### Request (object) [_request_license.post_start_basic] - **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.license.postStartTrial [_license.post_start_trial] Start a trial. @@ -8572,7 +8531,7 @@ client.license.postStartTrial({ ... }) #### Request (object) [_request_license.post_start_trial] - **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) - **`type_query_string` (Optional, string)** -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.logstash.deletePipeline [_logstash.delete_pipeline] Delete a Logstash pipeline. @@ -8603,7 +8562,7 @@ client.logstash.getPipeline({ ... }) ### Arguments [_arguments_logstash.get_pipeline] #### Request (object) [_request_logstash.get_pipeline] -- **`id` (Optional, string | string[])**: A list of pipeline identifiers. +- **`id` (Optional, string \| string[])**: A list of pipeline identifiers. ## client.logstash.putPipeline [_logstash.put_pipeline] Create or update a Logstash pipeline. @@ -8711,7 +8670,7 @@ client.ml.closeJob({ job_id }) - **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. - **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. - **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. -- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. ## client.ml.deleteCalendar [_ml.delete_calendar] Delete a calendar. @@ -8758,7 +8717,7 @@ client.ml.deleteCalendarJob({ calendar_id, job_id }) #### Request (object) [_request_ml.delete_calendar_job] - **`calendar_id` (string)**: A string that uniquely identifies a calendar. -- **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a +- **`job_id` (string \| string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. ## client.ml.deleteDataFrameAnalytics [_ml.delete_data_frame_analytics] @@ -8775,7 +8734,7 @@ client.ml.deleteDataFrameAnalytics({ id }) #### Request (object) [_request_ml.delete_data_frame_analytics] - **`id` (string)**: Identifier for the data frame analytics job. - **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. -- **`timeout` (Optional, string | -1 | 0)**: The time to wait for the job to be deleted. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the job to be deleted. ## client.ml.deleteDatafeed [_ml.delete_datafeed] Delete a datafeed. @@ -8821,7 +8780,7 @@ client.ml.deleteExpiredData({ ... }) group name, or a wildcard expression. - **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default behavior is no throttling. -- **`timeout` (Optional, string | -1 | 0)**: How long can the underlying delete processes run until they are canceled. +- **`timeout` (Optional, string \| -1 \| 0)**: How long can the underlying delete processes run until they are canceled. ## client.ml.deleteFilter [_ml.delete_filter] Delete a filter. @@ -8865,7 +8824,7 @@ all forecasts from the job. particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. -- **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for the completion of the delete +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. @@ -8932,7 +8891,7 @@ client.ml.deleteTrainedModel({ model_id }) #### Request (object) [_request_ml.delete_trained_model] - **`model_id` (string)**: The unique identifier of the trained model. - **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ml.deleteTrainedModelAlias [_ml.delete_trained_model_alias] Delete a trained model alias. @@ -9073,11 +9032,11 @@ client.ml.flushJob({ job_id }) #### Request (object) [_request_ml.flush_job] - **`job_id` (string)**: Identifier for the anomaly detection job. -- **`advance_time` (Optional, string | Unit)**: Refer to the description for the `advance_time` query parameter. +- **`advance_time` (Optional, string \| Unit)**: Refer to the description for the `advance_time` query parameter. - **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. -- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. -- **`skip_time` (Optional, string | Unit)**: Refer to the description for the `skip_time` query parameter. -- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`skip_time` (Optional, string \| Unit)**: Refer to the description for the `skip_time` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. ## client.ml.forecast [_ml.forecast] Predict future behavior of a time series. @@ -9098,8 +9057,8 @@ client.ml.forecast({ job_id }) #### Request (object) [_request_ml.forecast] - **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. -- **`duration` (Optional, string | -1 | 0)**: Refer to the description for the `duration` query parameter. -- **`expires_in` (Optional, string | -1 | 0)**: Refer to the description for the `expires_in` query parameter. +- **`duration` (Optional, string \| -1 \| 0)**: Refer to the description for the `duration` query parameter. +- **`expires_in` (Optional, string \| -1 \| 0)**: Refer to the description for the `expires_in` query parameter. - **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. ## client.ml.getBuckets [_ml.get_buckets] @@ -9116,16 +9075,16 @@ client.ml.getBuckets({ job_id }) #### Request (object) [_request_ml.get_buckets] - **`job_id` (string)**: Identifier for the anomaly detection job. -- **`timestamp` (Optional, string | Unit)**: The timestamp of a single bucket result. If you do not specify this +- **`timestamp` (Optional, string \| Unit)**: The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. - **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. - **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. -- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. - **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. - **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. - **`page` (Optional, { from, size })** - **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. -- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. - **`from` (Optional, number)**: Skips the specified number of buckets. - **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. @@ -9142,11 +9101,11 @@ client.ml.getCalendarEvents({ calendar_id }) #### Request (object) [_request_ml.get_calendar_events] - **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. -- **`end` (Optional, string | Unit)**: Specifies to get events with timestamps earlier than this time. +- **`end` (Optional, string \| Unit)**: Specifies to get events with timestamps earlier than this time. - **`from` (Optional, number)**: Skips the specified number of events. - **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. - **`size` (Optional, number)**: Specifies the maximum number of events to obtain. -- **`start` (Optional, string | Unit)**: Specifies to get events with timestamps after this time. +- **`start` (Optional, string \| Unit)**: Specifies to get events with timestamps after this time. ## client.ml.getCalendars [_ml.get_calendars] Get calendar configuration info. @@ -9272,7 +9231,7 @@ client.ml.getDatafeedStats({ ... }) ### Arguments [_arguments_ml.get_datafeed_stats] #### Request (object) [_request_ml.get_datafeed_stats] -- **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +- **`datafeed_id` (Optional, string \| string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. - **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: @@ -9303,7 +9262,7 @@ client.ml.getDatafeeds({ ... }) ### Arguments [_arguments_ml.get_datafeeds] #### Request (object) [_request_ml.get_datafeeds] -- **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +- **`datafeed_id` (Optional, string \| string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. - **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: @@ -9333,7 +9292,7 @@ client.ml.getFilters({ ... }) ### Arguments [_arguments_ml.get_filters] #### Request (object) [_request_ml.get_filters] -- **`filter_id` (Optional, string | string[])**: A string that uniquely identifies a filter. +- **`filter_id` (Optional, string \| string[])**: A string that uniquely identifies a filter. - **`from` (Optional, number)**: Skips the specified number of filters. - **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. @@ -9356,7 +9315,7 @@ client.ml.getInfluencers({ job_id }) - **`page` (Optional, { from, size })**: Configures pagination. This parameter has the `from` and `size` properties. - **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. -- **`end` (Optional, string | Unit)**: Returns influencers with timestamps earlier than this time. +- **`end` (Optional, string \| Unit)**: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. - **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results @@ -9367,7 +9326,7 @@ value. - **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. - **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. -- **`start` (Optional, string | Unit)**: Returns influencers with timestamps after this time. The default value +- **`start` (Optional, string \| Unit)**: Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. ## client.ml.getJobStats [_ml.get_job_stats] @@ -9413,7 +9372,7 @@ client.ml.getJobs({ ... }) ### Arguments [_arguments_ml.get_jobs] #### Request (object) [_request_ml.get_jobs] -- **`job_id` (Optional, string | string[])**: Identifier for the anomaly detection job. It can be a job identifier, a +- **`job_id` (Optional, string \| string[])**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. - **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: @@ -9446,9 +9405,9 @@ client.ml.getMemoryStats({ ... }) #### Request (object) [_request_ml.get_memory_stats] - **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.ml.getModelSnapshotUpgradeStats [_ml.get_model_snapshot_upgrade_stats] @@ -9494,10 +9453,10 @@ client.ml.getModelSnapshots({ job_id }) snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. - **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. -- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. - **`page` (Optional, { from, size })** - **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. -- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. - **`from` (Optional, number)**: Skips the specified number of snapshots. - **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. @@ -9538,11 +9497,11 @@ expression. You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. - **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. -- **`bucket_span` (Optional, string | -1 | 0)**: Refer to the description for the `bucket_span` query parameter. -- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`bucket_span` (Optional, string \| -1 \| 0)**: Refer to the description for the `bucket_span` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. - **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. -- **`overall_score` (Optional, number | string)**: Refer to the description for the `overall_score` query parameter. -- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`overall_score` (Optional, number \| string)**: Refer to the description for the `overall_score` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. - **`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. ## client.ml.getRecords [_ml.get_records] @@ -9569,12 +9528,12 @@ client.ml.getRecords({ job_id }) #### Request (object) [_request_ml.get_records] - **`job_id` (string)**: Identifier for the anomaly detection job. - **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. -- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. - **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. - **`page` (Optional, { from, size })** - **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. - **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. -- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. - **`from` (Optional, number)**: Skips the specified number of records. - **`size` (Optional, number)**: Specifies the maximum number of records to obtain. @@ -9590,7 +9549,7 @@ client.ml.getTrainedModels({ ... }) ### Arguments [_arguments_ml.get_trained_models] #### Request (object) [_request_ml.get_trained_models] -- **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. +- **`model_id` (Optional, string \| string[])**: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request by using a list of model IDs or a wildcard @@ -9609,10 +9568,10 @@ JSON map (true) or in a custom compressed format (false). retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. - **`from` (Optional, number)**: Skips the specified number of models. -- **`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))**: A comma delimited string of optional fields to include in the response +- **`include` (Optional, Enum("definition" \| "feature_importance_baseline" \| "hyperparameters" \| "total_feature_importance" \| "definition_status"))**: A comma delimited string of optional fields to include in the response body. - **`size` (Optional, number)**: Specifies the maximum number of models to obtain. -- **`tags` (Optional, string | string[])**: A comma delimited string of tags. A trained model can have many tags, or +- **`tags` (Optional, string \| string[])**: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. @@ -9630,7 +9589,7 @@ client.ml.getTrainedModelsStats({ ... }) ### Arguments [_arguments_ml.get_trained_models_stats] #### Request (object) [_request_ml.get_trained_models_stats] -- **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. It can be a +- **`model_id` (Optional, string \| string[])**: The unique identifier of the trained model or a model alias. It can be a list or a wildcard expression. - **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: @@ -9660,7 +9619,7 @@ client.ml.inferTrainedModel({ model_id, docs }) configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. - **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call -- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait for inference results. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait for inference results. ## client.ml.info [_ml.info] Get machine learning information. @@ -9699,7 +9658,7 @@ client.ml.openJob({ job_id }) #### Request (object) [_request_ml.open_job] - **`job_id` (string)**: Identifier for the anomaly detection job. -- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. ## client.ml.postCalendarEvents [_ml.post_calendar_events] Add scheduled events to the calendar. @@ -9733,8 +9692,8 @@ client.ml.postData({ job_id }) #### Request (object) [_request_ml.post_data] - **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. - **`data` (Optional, TData[])** -- **`reset_end` (Optional, string | Unit)**: Specifies the end of the bucket resetting range. -- **`reset_start` (Optional, string | Unit)**: Specifies the start of the bucket resetting range. +- **`reset_end` (Optional, string \| Unit)**: Specifies the end of the bucket resetting range. +- **`reset_start` (Optional, string \| Unit)**: Specifies the start of the bucket resetting range. ## client.ml.previewDataFrameAnalytics [_ml.preview_data_frame_analytics] Preview features used by data frame analytics. @@ -9783,8 +9742,8 @@ configuration details in the request body. `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. -- **`start` (Optional, string | Unit)**: The start time from where the datafeed preview should begin -- **`end` (Optional, string | Unit)**: The end time when the datafeed preview should stop +- **`start` (Optional, string \| Unit)**: The start time from where the datafeed preview should begin +- **`end` (Optional, string \| Unit)**: The end time when the datafeed preview should stop ## client.ml.putCalendar [_ml.put_calendar] Create a calendar. @@ -9815,7 +9774,7 @@ client.ml.putCalendarJob({ calendar_id, job_id }) #### Request (object) [_request_ml.put_calendar_job] - **`calendar_id` (string)**: A string that uniquely identifies a calendar. -- **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. +- **`job_id` (string \| string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. ## client.ml.putDataFrameAnalytics [_ml.put_data_frame_analytics] Create a data frame analytics job. @@ -9891,7 +9850,7 @@ analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. -- **`headers` (Optional, Record)** +- **`headers` (Optional, Record)** - **`version` (Optional, string)** ## client.ml.putDatafeed [_ml.put_datafeed] @@ -9931,12 +9890,12 @@ The datafeed can optionally search over indices that have already been read in a any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. -- **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. -- **`indices` (Optional, string | string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master +- **`indices` (Optional, string \| string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. - **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search - **`job_id` (Optional, string)**: Identifier for the anomaly detection job. @@ -9947,7 +9906,7 @@ end time that sees no data remains started until it is explicitly stopped. By de - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. -- **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +- **`query_delay` (Optional, string \| -1 \| 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. @@ -9956,10 +9915,10 @@ when there are multiple jobs running on the same node. The detector configuration objects in a job can contain functions that use these script fields. - **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. -- **`headers` (Optional, Record)** +- **`headers` (Optional, Record)** - **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. - **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. @@ -10003,7 +9962,7 @@ client.ml.putJob({ job_id, analysis_config, data_description }) - **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. - **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. - **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. -- **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. +- **`background_persist_interval` (Optional, string \| -1 \| 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. - **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. - **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. - **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. @@ -10016,7 +9975,7 @@ client.ml.putJob({ job_id, analysis_config, data_description }) - **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. - **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: * `all`: Match any data stream or index, including hidden ones. @@ -10053,7 +10012,7 @@ definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. - **`input` (Optional, { field_names })**: The input field names for the model definition. - **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. -- **`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))**: The model type. +- **`model_type` (Optional, Enum("tree_ensemble" \| "lang_ident" \| "pytorch"))**: The model type. - **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. @@ -10219,7 +10178,7 @@ client.ml.setUpgradeMode({ ... }) - **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. -- **`timeout` (Optional, string | -1 | 0)**: The time to wait for the request to be completed. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the request to be completed. ## client.ml.startDataFrameAnalytics [_ml.start_data_frame_analytics] Start a data frame analytics job. @@ -10247,7 +10206,7 @@ client.ml.startDataFrameAnalytics({ id }) - **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait until the data frame analytics job starts. ## client.ml.startDatafeed [_ml.start_datafeed] @@ -10277,9 +10236,9 @@ client.ml.startDatafeed({ datafeed_id }) - **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. -- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. -- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. ## client.ml.startTrainedModelDeployment [_ml.start_trained_model_deployment] Start a trained model deployment. @@ -10298,7 +10257,7 @@ client.ml.startTrainedModelDeployment({ model_id }) - **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. -- **`cache_size` (Optional, number | string)**: The inference cache size (in memory outside the JVM heap) per node for the model. +- **`cache_size` (Optional, number \| string)**: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. - **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. @@ -10309,7 +10268,7 @@ Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. -- **`priority` (Optional, Enum("normal" | "low"))**: The deployment priority. +- **`priority` (Optional, Enum("normal" \| "low"))**: The deployment priority. - **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. - **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases @@ -10317,8 +10276,8 @@ the inference speed. The inference process is a compute-bound process; any numbe greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. -- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the model to deploy. -- **`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))**: Specifies the allocation status to wait for before returning. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the model to deploy. +- **`wait_for` (Optional, Enum("started" \| "starting" \| "fully_allocated"))**: Specifies the allocation status to wait for before returning. ## client.ml.stopDataFrameAnalytics [_ml.stop_data_frame_analytics] Stop data frame analytics jobs. @@ -10349,7 +10308,7 @@ array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. - **`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. -- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. ## client.ml.stopDatafeed [_ml.stop_datafeed] @@ -10371,7 +10330,7 @@ list of datafeeds or a wildcard expression. You can close all datafeeds by using the identifier. - **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. - **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. -- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. ## client.ml.stopTrainedModelDeployment [_ml.stop_trained_model_deployment] Stop a trained model deployment. @@ -10450,7 +10409,7 @@ search over indices that have already been read in an effort to determine whethe added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. -- **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is +- **`frequency` (Optional, string \| -1 \| 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value @@ -10470,7 +10429,7 @@ changed. Therefore, the time required to learn might be long and the understanda unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. -- **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +- **`query_delay` (Optional, string \| -1 \| 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. @@ -10481,7 +10440,7 @@ The detector configuration objects in a job can contain functions that use these The maximum value is the value of `index.max_result_window`. - **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: * `all`: Match any data stream or index, including hidden ones. @@ -10534,7 +10493,7 @@ option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. - **`analysis_limits` (Optional, { model_memory_limit })** -- **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence +- **`background_persist_interval` (Optional, string \| -1 \| 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed @@ -10550,7 +10509,7 @@ custom URLs to machine learning results. - **`categorization_filters` (Optional, string[])** - **`description` (Optional, string)**: A description of the job. - **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** -- **`model_prune_window` (Optional, string | -1 | 0)** +- **`model_prune_window` (Optional, string \| -1 \| 0)** - **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is @@ -10643,7 +10602,7 @@ client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) - **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. - **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. -- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the request to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the request to complete. ## client.nodes.clearRepositoriesMeteringArchive [_nodes.clear_repositories_metering_archive] Clear the archived repositories metering. @@ -10658,7 +10617,7 @@ client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) ### Arguments [_arguments_nodes.clear_repositories_metering_archive] #### Request (object) [_request_nodes.clear_repositories_metering_archive] -- **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. +- **`node_id` (string \| string[])**: List of node IDs or names used to limit returned information. - **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. ## client.nodes.getRepositoriesMeteringInfo [_nodes.get_repositories_metering_info] @@ -10676,7 +10635,7 @@ client.nodes.getRepositoriesMeteringInfo({ node_id }) ### Arguments [_arguments_nodes.get_repositories_metering_info] #### Request (object) [_request_nodes.get_repositories_metering_info] -- **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. +- **`node_id` (string \| string[])**: List of node IDs or names used to limit returned information. ## client.nodes.hotThreads [_nodes.hot_threads] Get the hot threads for nodes. @@ -10692,16 +10651,16 @@ client.nodes.hotThreads({ ... }) ### Arguments [_arguments_nodes.hot_threads] #### Request (object) [_request_nodes.hot_threads] -- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. - **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. -- **`interval` (Optional, string | -1 | 0)**: The interval to do the second sampling of threads. +- **`interval` (Optional, string \| -1 \| 0)**: The interval to do the second sampling of threads. - **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. - **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -- **`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The type to sample. -- **`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The sort order for 'cpu' type (default: total) +- **`type` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The type to sample. +- **`sort` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The sort order for 'cpu' type (default: total) ## client.nodes.info [_nodes.info] Get node information. @@ -10717,10 +10676,10 @@ client.nodes.info({ ... }) ### Arguments [_arguments_nodes.info] #### Request (object) [_request_nodes.info] -- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. -- **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. - **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.nodes.reloadSecureSettings [_nodes.reload_secure_settings] Reload the keystore on nodes in the cluster. @@ -10742,9 +10701,9 @@ client.nodes.reloadSecureSettings({ ... }) ### Arguments [_arguments_nodes.reload_secure_settings] #### Request (object) [_request_nodes.reload_secure_settings] -- **`node_id` (Optional, string | string[])**: The names of particular nodes in the cluster to target. +- **`node_id` (Optional, string \| string[])**: The names of particular nodes in the cluster to target. - **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.nodes.stats [_nodes.stats] @@ -10761,16 +10720,16 @@ client.nodes.stats({ ... }) ### Arguments [_arguments_nodes.stats] #### Request (object) [_request_nodes.stats] -- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. -- **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics -- **`index_metric` (Optional, string | string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. -- **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. -- **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. -- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics +- **`index_metric` (Optional, string \| string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +- **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. - **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. - **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). -- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`types` (Optional, string[])**: A list of document types for the indexing index metric. - **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. @@ -10786,10 +10745,10 @@ client.nodes.usage({ ... }) ### Arguments [_arguments_nodes.usage] #### Request (object) [_request_nodes.usage] -- **`node_id` (Optional, string | string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -- **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. +- **`node_id` (Optional, string \| string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. A list of the following options: `_all`, `rest_actions`. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.queryRules.deleteRule [_query_rules.delete_rule] @@ -10892,8 +10851,8 @@ client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) #### Request (object) [_request_query_rules.put_rule] - **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. - **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. -- **`type` (Enum("pinned" | "exclude"))**: The type of rule. -- **`criteria` ({ type, metadata, values } | { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. +- **`type` (Enum("pinned" \| "exclude"))**: The type of rule. +- **`criteria` ({ type, metadata, values } \| { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. - **`actions` ({ ids, docs })**: The actions to take when the rule is matched. The format of this action depends on the rule type. @@ -10919,7 +10878,7 @@ client.queryRules.putRuleset({ ruleset_id, rules }) #### Request (object) [_request_query_rules.put_ruleset] - **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. -- **`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])** +- **`rules` ({ rule_id, type, criteria, actions, priority } \| { rule_id, type, criteria, actions, priority }[])** ## client.queryRules.test [_query_rules.test] Test a query ruleset. @@ -11035,7 +10994,7 @@ client.rollup.getRollupIndexCaps({ index }) ### Arguments [_arguments_rollup.get_rollup_index_caps] #### Request (object) [_request_rollup.get_rollup_index_caps] -- **`index` (string | string[])**: Data stream or index to check for rollup capabilities. +- **`index` (string \| string[])**: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. ## client.rollup.putJob [_rollup.put_job] @@ -11082,8 +11041,8 @@ rolled up; it is merely used for tweaking the speed or memory cost of the indexe - **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. -- **`timeout` (Optional, string | -1 | 0)**: Time to wait for the request to complete. -- **`headers` (Optional, Record)** +- **`timeout` (Optional, string \| -1 \| 0)**: Time to wait for the request to complete. +- **`headers` (Optional, Record)** ## client.rollup.rollupSearch [_rollup.rollup_search] Search rolled-up data. @@ -11132,7 +11091,7 @@ client.rollup.rollupSearch({ index }) ### Arguments [_arguments_rollup.rollup_search] #### Request (object) [_request_rollup.rollup_search] -- **`index` (string | string[])**: A list of data streams and indices used to limit the request. +- **`index` (string \| string[])**: A list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. @@ -11185,7 +11144,7 @@ client.rollup.stopJob({ id }) #### Request (object) [_request_rollup.stop_job] - **`id` (string)**: Identifier for the rollup job. -- **`timeout` (Optional, string | -1 | 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. +- **`timeout` (Optional, string \| -1 \| 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. The timeout simply means the API call itself timed out while waiting for the status change. @@ -11281,7 +11240,7 @@ client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_t #### Request (object) [_request_search_application.post_behavioral_analytics_event] - **`collection_name` (string)**: The name of the behavioral analytics collection. -- **`event_type` (Enum("page_view" | "search" | "search_click"))**: The analytics event type. +- **`event_type` (Enum("page_view" \| "search" \| "search_click"))**: The analytics event type. - **`payload` (Optional, User-defined value)** - **`debug` (Optional, boolean)**: Whether the response type has to include more details @@ -11366,8 +11325,8 @@ client.searchableSnapshots.cacheStats({ ... }) ### Arguments [_arguments_searchable_snapshots.cache_stats] #### Request (object) [_request_searchable_snapshots.cache_stats] -- **`node_id` (Optional, string | string[])**: The names of the nodes in the cluster to target. -- **`master_timeout` (Optional, string | -1 | 0)** +- **`node_id` (Optional, string \| string[])**: The names of the nodes in the cluster to target. +- **`master_timeout` (Optional, string \| -1 \| 0)** ## client.searchableSnapshots.clearCache [_searchable_snapshots.clear_cache] Clear the cache. @@ -11382,9 +11341,9 @@ client.searchableSnapshots.clearCache({ ... }) ### Arguments [_arguments_searchable_snapshots.clear_cache] #### Request (object) [_request_searchable_snapshots.clear_cache] -- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to clear from the cache. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) @@ -11410,7 +11369,7 @@ If no `renamed_index` is specified, this name will also be used to create the ne - **`renamed_index` (Optional, string)**: The name of the index that will be created. - **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. - **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. @@ -11428,8 +11387,8 @@ client.searchableSnapshots.stats({ ... }) ### Arguments [_arguments_searchable_snapshots.stats] #### Request (object) [_request_searchable_snapshots.stats] -- **`index` (Optional, string | string[])**: A list of data streams and indices to retrieve statistics for. -- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Return stats aggregated at cluster, index or shard level +- **`index` (Optional, string \| string[])**: A list of data streams and indices to retrieve statistics for. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Return stats aggregated at cluster, index or shard level ## client.security.activateUserProfile [_security.activate_user_profile] Activate a user profile. @@ -11456,7 +11415,7 @@ client.security.activateUserProfile({ grant_type }) ### Arguments [_arguments_security.activate_user_profile] #### Request (object) [_request_security.activate_user_profile] -- **`grant_type` (Enum("password" | "access_token"))**: The type of grant. +- **`grant_type` (Enum("password" \| "access_token"))**: The type of grant. - **`access_token` (Optional, string)**: The user's Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. @@ -11499,7 +11458,7 @@ client.security.bulkDeleteRole({ names }) #### Request (object) [_request_security.bulk_delete_role] - **`names` (string[])**: An array of role names to delete -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.bulkPutRole [_security.bulk_put_role] Bulk create or update roles. @@ -11517,7 +11476,7 @@ client.security.bulkPutRole({ roles }) #### Request (object) [_request_security.bulk_put_role] - **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.bulkUpdateApiKeys [_security.bulk_update_api_keys] Bulk update API keys. @@ -11546,8 +11505,8 @@ client.security.bulkUpdateApiKeys({ ids }) ### Arguments [_arguments_security.bulk_update_api_keys] #### Request (object) [_request_security.bulk_update_api_keys] -- **`ids` (string | string[])**: The API key identifiers. -- **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API keys. +- **`ids` (string \| string[])**: The API key identifiers. +- **`expiration` (Optional, string \| -1 \| 0)**: Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. - **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. @@ -11582,7 +11541,7 @@ parameter, the password is changed for the current user. hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.clearApiKeyCache [_security.clear_api_key_cache] Clear the API key cache. @@ -11599,7 +11558,7 @@ client.security.clearApiKeyCache({ ids }) ### Arguments [_arguments_security.clear_api_key_cache] #### Request (object) [_request_security.clear_api_key_cache] -- **`ids` (string | string[])**: List of API key IDs to evict from the API key cache. +- **`ids` (string \| string[])**: List of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. @@ -11641,7 +11600,7 @@ client.security.clearCachedRealms({ realms }) ### Arguments [_arguments_security.clear_cached_realms] #### Request (object) [_request_security.clear_cached_realms] -- **`realms` (string | string[])**: A list of realms. +- **`realms` (string \| string[])**: A list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. - **`usernames` (Optional, string[])**: A list of the users to clear from the cache. @@ -11661,7 +11620,7 @@ client.security.clearCachedRoles({ name }) ### Arguments [_arguments_security.clear_cached_roles] #### Request (object) [_request_security.clear_cached_roles] -- **`name` (string | string[])**: A list of roles to evict from the role cache. +- **`name` (string \| string[])**: A list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. @@ -11686,7 +11645,7 @@ client.security.clearCachedServiceTokens({ namespace, service, name }) #### Request (object) [_request_security.clear_cached_service_tokens] - **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. - **`service` (string)**: The name of the service, which must be unique within its namespace. -- **`name` (string | string[])**: A list of token names to evict from the service account token caches. +- **`name` (string \| string[])**: A list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. @@ -11715,7 +11674,7 @@ client.security.createApiKey({ ... }) ### Arguments [_arguments_security.create_api_key] #### Request (object) [_request_security.create_api_key] -- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. By default, API keys never expire. - **`name` (Optional, string)**: A name for the API key. - **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. @@ -11728,7 +11687,7 @@ NOTE: Due to the way in which this permission intersection is calculated, it is In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. - **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.createCrossClusterApiKey [_security.create_cross_cluster_api_key] Create a cross-cluster API key. @@ -11765,7 +11724,7 @@ At least one of them must be specified. NOTE: No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. - **`name` (string)**: Specifies the name for this API key. -- **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API key. +- **`expiration` (Optional, string \| -1 \| 0)**: Expiration time for the API key. By default, API keys never expire. - **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. @@ -11798,7 +11757,7 @@ They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and unde NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.delegatePki [_security.delegate_pki] Delegate PKI authentication. @@ -11847,8 +11806,8 @@ client.security.deletePrivileges({ application, name }) #### Request (object) [_request_security.delete_privileges] - **`application` (string)**: The name of the application. Application privileges are always associated with exactly one application. -- **`name` (string | string[])**: The name of the privilege. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`name` (string \| string[])**: The name of the privilege. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.deleteRole [_security.delete_role] Delete roles. @@ -11867,7 +11826,7 @@ client.security.deleteRole({ name }) #### Request (object) [_request_security.delete_role] - **`name` (string)**: The name of the role. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.deleteRoleMapping [_security.delete_role_mapping] Delete role mappings. @@ -11887,7 +11846,7 @@ client.security.deleteRoleMapping({ name }) #### Request (object) [_request_security.delete_role_mapping] - **`name` (string)**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.deleteServiceToken [_security.delete_service_token] Delete service account tokens. @@ -11906,7 +11865,7 @@ client.security.deleteServiceToken({ namespace, service, name }) - **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. - **`service` (string)**: The service name. - **`name` (string)**: The name of the service account token. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.deleteUser [_security.delete_user] Delete users. @@ -11923,7 +11882,7 @@ client.security.deleteUser({ username }) #### Request (object) [_request_security.delete_user] - **`username` (string)**: An identifier for the user. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.disableUser [_security.disable_user] Disable users. @@ -11942,7 +11901,7 @@ client.security.disableUser({ username }) #### Request (object) [_request_security.disable_user] - **`username` (string)**: An identifier for the user. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.disableUserProfile [_security.disable_user_profile] Disable a user profile. @@ -11966,7 +11925,7 @@ client.security.disableUserProfile({ uid }) #### Request (object) [_request_security.disable_user_profile] - **`uid` (string)**: Unique identifier for the user profile. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. @@ -11986,7 +11945,7 @@ client.security.enableUser({ username }) #### Request (object) [_request_security.enable_user] - **`username` (string)**: An identifier for the user. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.enableUserProfile [_security.enable_user_profile] Enable a user profile. @@ -12010,7 +11969,7 @@ client.security.enableUserProfile({ uid }) #### Request (object) [_request_security.enable_user_profile] - **`uid` (string)**: A unique identifier for the user profile. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. @@ -12111,7 +12070,7 @@ client.security.getPrivileges({ ... }) - **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. -- **`name` (Optional, string | string[])**: The name of the privilege. +- **`name` (Optional, string \| string[])**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. ## client.security.getRole [_security.get_role] @@ -12130,7 +12089,7 @@ client.security.getRole({ ... }) ### Arguments [_arguments_security.get_role] #### Request (object) [_request_security.get_role] -- **`name` (Optional, string | string[])**: The name of the role. +- **`name` (Optional, string \| string[])**: The name of the role. You can specify multiple roles as a list. If you do not specify this parameter, the API returns information about all roles. @@ -12150,7 +12109,7 @@ client.security.getRoleMapping({ ... }) ### Arguments [_arguments_security.get_role_mapping] #### Request (object) [_request_security.get_role_mapping] -- **`name` (Optional, string | string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. +- **`name` (Optional, string \| string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. ## client.security.getServiceAccounts [_security.get_service_accounts] Get service accounts. @@ -12215,7 +12174,7 @@ client.security.getSettings({ ... }) ### Arguments [_arguments_security.get_settings] #### Request (object) [_request_security.get_settings] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.security.getToken [_security.get_token] @@ -12243,7 +12202,7 @@ client.security.getToken({ ... }) ### Arguments [_arguments_security.get_token] #### Request (object) [_request_security.get_token] -- **`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))**: The type of grant. +- **`grant_type` (Optional, Enum("password" \| "client_credentials" \| "_kerberos" \| "refresh_token"))**: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. - **`scope` (Optional, string)**: The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. @@ -12274,7 +12233,7 @@ client.security.getUser({ ... }) ### Arguments [_arguments_security.get_user] #### Request (object) [_request_security.get_user] -- **`username` (Optional, string | string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. +- **`username` (Optional, string \| string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. - **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. ## client.security.getUserPrivileges [_security.get_user_privileges] @@ -12296,7 +12255,7 @@ client.security.getUserPrivileges({ ... }) #### Request (object) [_request_security.get_user_privileges] - **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. - **`priviledge` (Optional, string)**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. -- **`username` (Optional, string | null)** +- **`username` (Optional, string \| null)** ## client.security.getUserProfile [_security.get_user_profile] Get a user profile. @@ -12316,8 +12275,8 @@ client.security.getUserProfile({ uid }) ### Arguments [_arguments_security.get_user_profile] #### Request (object) [_request_security.get_user_profile] -- **`uid` (string | string[])**: A unique identifier for the user profile. -- **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. +- **`uid` (string \| string[])**: A unique identifier for the user profile. +- **`data` (Optional, string \| string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. @@ -12356,7 +12315,7 @@ client.security.grantApiKey({ api_key, grant_type }) #### Request (object) [_request_security.grant_api_key] - **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. -- **`grant_type` (Enum("access_token" | "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. +- **`grant_type` (Enum("access_token" \| "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. - **`access_token` (Optional, string)**: The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. @@ -12386,7 +12345,7 @@ client.security.hasPrivileges({ ... }) #### Request (object) [_request_security.has_privileges] - **`user` (Optional, string)**: Username - **`application` (Optional, { application, privileges, resources }[])** -- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_esql" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_esql" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. +- **`cluster` (Optional, Enum("all" \| "cancel_task" \| "create_snapshot" \| "cross_cluster_replication" \| "cross_cluster_search" \| "delegate_pki" \| "grant_api_key" \| "manage" \| "manage_api_key" \| "manage_autoscaling" \| "manage_behavioral_analytics" \| "manage_ccr" \| "manage_data_frame_transforms" \| "manage_data_stream_global_retention" \| "manage_enrich" \| "manage_esql" \| "manage_ilm" \| "manage_index_templates" \| "manage_inference" \| "manage_ingest_pipelines" \| "manage_logstash_pipelines" \| "manage_ml" \| "manage_oidc" \| "manage_own_api_key" \| "manage_pipeline" \| "manage_rollup" \| "manage_saml" \| "manage_search_application" \| "manage_search_query_rules" \| "manage_search_synonyms" \| "manage_security" \| "manage_service_account" \| "manage_slm" \| "manage_token" \| "manage_transform" \| "manage_user_profile" \| "manage_watcher" \| "monitor" \| "monitor_data_frame_transforms" \| "monitor_data_stream_global_retention" \| "monitor_enrich" \| "monitor_esql" \| "monitor_inference" \| "monitor_ml" \| "monitor_rollup" \| "monitor_snapshot" \| "monitor_stats" \| "monitor_text_structure" \| "monitor_transform" \| "monitor_watcher" \| "none" \| "post_behavioral_analytics_event" \| "read_ccr" \| "read_fleet_secrets" \| "read_ilm" \| "read_pipeline" \| "read_security" \| "read_slm" \| "transport_client" \| "write_connector_secrets" \| "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. - **`index` (Optional, { names, privileges, allow_restricted_indices }[])** ## client.security.hasPrivilegesUserProfile [_security.has_privileges_user_profile] @@ -12592,7 +12551,7 @@ client.security.putPrivileges({ ... }) #### Request (object) [_request_security.put_privileges] - **`privileges` (Optional, Record>)** -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.putRole [_security.put_role] Create or update roles. @@ -12612,7 +12571,7 @@ client.security.putRole({ name }) #### Request (object) [_request_security.put_role] - **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. - **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. -- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_esql" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_esql" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +- **`cluster` (Optional, Enum("all" \| "cancel_task" \| "create_snapshot" \| "cross_cluster_replication" \| "cross_cluster_search" \| "delegate_pki" \| "grant_api_key" \| "manage" \| "manage_api_key" \| "manage_autoscaling" \| "manage_behavioral_analytics" \| "manage_ccr" \| "manage_data_frame_transforms" \| "manage_data_stream_global_retention" \| "manage_enrich" \| "manage_esql" \| "manage_ilm" \| "manage_index_templates" \| "manage_inference" \| "manage_ingest_pipelines" \| "manage_logstash_pipelines" \| "manage_ml" \| "manage_oidc" \| "manage_own_api_key" \| "manage_pipeline" \| "manage_rollup" \| "manage_saml" \| "manage_search_application" \| "manage_search_query_rules" \| "manage_search_synonyms" \| "manage_security" \| "manage_service_account" \| "manage_slm" \| "manage_token" \| "manage_transform" \| "manage_user_profile" \| "manage_watcher" \| "monitor" \| "monitor_data_frame_transforms" \| "monitor_data_stream_global_retention" \| "monitor_enrich" \| "monitor_esql" \| "monitor_inference" \| "monitor_ml" \| "monitor_rollup" \| "monitor_snapshot" \| "monitor_stats" \| "monitor_text_structure" \| "monitor_transform" \| "monitor_watcher" \| "none" \| "post_behavioral_analytics_event" \| "read_ccr" \| "read_fleet_secrets" \| "read_ilm" \| "read_pipeline" \| "read_security" \| "read_slm" \| "transport_client" \| "write_connector_secrets" \| "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. - **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. - **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. - **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. @@ -12624,7 +12583,7 @@ They have no effect for remote clusters configured with the certificate based mo - **`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. - **`description` (Optional, string)**: Optional description of the role descriptor - **`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.putRoleMapping [_security.put_role_mapping] Create or update role mappings. @@ -12675,7 +12634,7 @@ Exactly one of `roles` or `role_templates` must be specified. - **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. - **`run_as` (Optional, string[])** -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.putUser [_security.put_user] Create or update users. @@ -12698,8 +12657,8 @@ client.security.putUser({ username }) NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. -- **`email` (Optional, string | null)**: The email of the user. -- **`full_name` (Optional, string | null)**: The full name of the user. +- **`email` (Optional, string \| null)**: The email of the user. +- **`full_name` (Optional, string \| null)**: The full name of the user. - **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. - **`password` (Optional, string)**: The user's password. Passwords must be at least 6 characters long. @@ -12714,7 +12673,7 @@ The `password` parameter and the `password_hash` parameter cannot be used in the The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`). - **`enabled` (Optional, boolean)**: Specifies whether the user is enabled. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. ## client.security.queryApiKeys [_security.query_api_keys] @@ -12755,7 +12714,7 @@ Such a match query is hence equivalent to a `term` query. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. - **`size` (Optional, number)**: The number of hits to return. @@ -12763,7 +12722,7 @@ It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: The search after definition. - **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. @@ -12799,14 +12758,14 @@ You can query the following information associated with roles: `name`, `descript It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. - **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: The search after definition. ## client.security.queryUser [_security.query_user] Find users with a query. @@ -12835,14 +12794,14 @@ You can query the following information associated with user: `username`, `roles It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. - **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. -- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: The search after definition - **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. ## client.security.samlAuthenticate [_security.saml_authenticate] @@ -12873,7 +12832,7 @@ client.security.samlAuthenticate({ content, ids }) #### Request (object) [_request_security.saml_authenticate] - **`content` (string)**: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. -- **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`ids` (string \| string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. - **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. ## client.security.samlCompleteLogout [_security.saml_complete_logout] @@ -12900,7 +12859,7 @@ client.security.samlCompleteLogout({ realm, ids }) #### Request (object) [_request_security.saml_complete_logout] - **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. -- **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`ids` (string \| string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. - **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. - **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. @@ -13031,7 +12990,7 @@ client.security.suggestUserProfiles({ ... }) - **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`. - **`size` (Optional, number)**: The number of profiles to return. -- **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. +- **`data` (Optional, string \| string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. @@ -13083,7 +13042,7 @@ The structure of a role descriptor is the same as the request for the create API It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. -- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. @@ -13120,7 +13079,7 @@ client.security.updateCrossClusterApiKey({ id, access }) The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. -- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. - **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. @@ -13149,9 +13108,9 @@ client.security.updateSettings({ ... }) - **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. - **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. - **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.security.updateUserProfileData [_security.update_user_profile_data] @@ -13193,7 +13152,7 @@ Within the `data` object, top-level keys cannot begin with an underscore (`_`) o The data object is not searchable, but can be retrieved with the get user profile API. - **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. -- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. @@ -13219,8 +13178,8 @@ client.shutdown.deleteNode({ node_id }) #### Request (object) [_request_shutdown.delete_node] - **`node_id` (string)**: The node id of node to be removed from the shutdown state -- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.shutdown.getNode [_shutdown.get_node] Get the shutdown status. @@ -13241,8 +13200,8 @@ client.shutdown.getNode({ ... }) ### Arguments [_arguments_shutdown.get_node] #### Request (object) [_request_shutdown.get_node] -- **`node_id` (Optional, string | string[])**: Which node for which to retrieve the shutdown status -- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`node_id` (Optional, string \| string[])**: Which node for which to retrieve the shutdown status +- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.shutdown.putNode [_shutdown.put_node] Prepare a node to be shut down. @@ -13275,7 +13234,7 @@ client.shutdown.putNode({ node_id, type, reason }) This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. -- **`type` (Enum("restart" | "remove" | "replace"))**: Valid values are restart, remove, or replace. +- **`type` (Enum("restart" \| "remove" \| "replace"))**: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. @@ -13292,9 +13251,9 @@ If you specify both a restart allocation delay and an index-level allocation del Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. -- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a response. +- **`timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.simulate.ingest [_simulate.ingest] @@ -13353,9 +13312,9 @@ client.slm.deleteLifecycle({ policy_id }) #### Request (object) [_request_slm.delete_lifecycle] - **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.slm.executeLifecycle [_slm.execute_lifecycle] @@ -13373,9 +13332,9 @@ client.slm.executeLifecycle({ policy_id }) #### Request (object) [_request_slm.execute_lifecycle] - **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.slm.executeRetention [_slm.execute_retention] @@ -13392,9 +13351,9 @@ client.slm.executeRetention({ ... }) ### Arguments [_arguments_slm.execute_retention] #### Request (object) [_request_slm.execute_retention] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.slm.getLifecycle [_slm.get_lifecycle] @@ -13410,10 +13369,10 @@ client.slm.getLifecycle({ ... }) ### Arguments [_arguments_slm.get_lifecycle] #### Request (object) [_request_slm.get_lifecycle] -- **`policy_id` (Optional, string | string[])**: List of snapshot lifecycle policies to retrieve -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`policy_id` (Optional, string \| string[])**: List of snapshot lifecycle policies to retrieve +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.slm.getStats [_slm.get_stats] @@ -13429,8 +13388,8 @@ client.slm.getStats({ ... }) ### Arguments [_arguments_slm.get_stats] #### Request (object) [_request_slm.get_stats] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.slm.getStatus [_slm.get_status] Get the snapshot lifecycle management status. @@ -13444,10 +13403,10 @@ client.slm.getStatus({ ... }) ### Arguments [_arguments_slm.get_status] #### Request (object) [_request_slm.get_status] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -13472,10 +13431,10 @@ client.slm.putLifecycle({ policy_id }) - **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. - **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. - **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -13493,10 +13452,10 @@ client.slm.start({ ... }) ### Arguments [_arguments_slm.start] #### Request (object) [_request_slm.start] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -13519,10 +13478,10 @@ client.slm.stop({ ... }) ### Arguments [_arguments_slm.stop] #### Request (object) [_request_slm.stop] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -13540,10 +13499,10 @@ client.snapshot.cleanupRepository({ repository }) #### Request (object) [_request_snapshot.cleanup_repository] - **`repository` (string)**: The name of the snapshot repository to clean up. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. @@ -13565,7 +13524,7 @@ client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) - **`target_snapshot` (string)**: The target snapshot name. - **`indices` (string)**: A list of indices to include in the snapshot. Multi-target syntax is supported. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -13586,7 +13545,7 @@ client.snapshot.create({ repository, snapshot }) - **`snapshot` (string)**: The name of the snapshot. It supportes date math. It must be unique in the repository. -- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports a list of values such as `open,hidden`. - **`feature_states` (Optional, string[])**: The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. @@ -13602,7 +13561,7 @@ If `false`, the request returns an error for any data stream or index that is mi - **`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). -- **`indices` (Optional, string | string[])**: A list of data streams and indices to include in the snapshot. +- **`indices` (Optional, string \| string[])**: A list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. @@ -13617,7 +13576,7 @@ Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. @@ -13641,10 +13600,10 @@ client.snapshot.createRepository({ repository }) #### Request (object) [_request_snapshot.create_repository] - **`repository` (string)**: The name of the snapshot repository to register or update. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. - **`verify` (Optional, boolean)**: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. @@ -13666,7 +13625,7 @@ client.snapshot.delete({ repository, snapshot }) - **`repository` (string)**: The name of the repository to delete a snapshot from. - **`snapshot` (string)**: A list of snapshot names to delete. It also accepts wildcards (`*`). -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -13684,12 +13643,12 @@ client.snapshot.deleteRepository({ repository }) ### Arguments [_arguments_snapshot.delete_repository] #### Request (object) [_request_snapshot.delete_repository] -- **`repository` (string | string[])**: The ame of the snapshot repositories to unregister. +- **`repository` (string \| string[])**: The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. @@ -13711,7 +13670,7 @@ client.snapshot.get({ repository, snapshot }) #### Request (object) [_request_snapshot.get] - **`repository` (string)**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. -- **`snapshot` (string | string[])**: A list of snapshot names to retrieve +- **`snapshot` (string \| string[])**: A list of snapshot names to retrieve Wildcards (`*`) are supported. * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. @@ -13725,9 +13684,9 @@ It can be a millisecond time value or a number when sorting by `index-` or shard The default is `false`, meaning that this information is omitted. - **`index_names` (Optional, boolean)**: If `true`, the response includes the name of each index in each snapshot. - **`include_repository` (Optional, boolean)**: If `true`, the response includes the repository name in each snapshot. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`order` (Optional, Enum("asc" | "desc"))**: The sort order. +- **`order` (Optional, Enum("asc" \| "desc"))**: The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. - **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. @@ -13739,7 +13698,7 @@ You can use wildcards (`*`) and combinations of wildcards followed by exclude pa For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. -- **`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))**: The sort order for the result. +- **`sort` (Optional, Enum("start_time" \| "duration" \| "name" \| "index_count" \| "repository" \| "shard_count" \| "failed_shard_count"))**: The sort order for the result. The default behavior is sorting by snapshot start time stamp. - **`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. @@ -13757,13 +13716,13 @@ client.snapshot.getRepository({ ... }) ### Arguments [_arguments_snapshot.get_repository] #### Request (object) [_request_snapshot.get_repository] -- **`repository` (Optional, string | string[])**: A list of snapshot repository names used to limit the request. +- **`repository` (Optional, string \| string[])**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. - **`local` (Optional, boolean)**: If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -13879,9 +13838,9 @@ For realistic experiments, you should set it to at least `2000`. If false, it returns only a summary of the analysis. - **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. -- **`max_blob_size` (Optional, number | string)**: The maximum size of a blob to be written during the test. +- **`max_blob_size` (Optional, number \| string)**: The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. -- **`max_total_data_size` (Optional, number | string)**: An upper limit on the total size of all the blobs written during the test. +- **`max_total_data_size` (Optional, number \| string)**: An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. - **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. - **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. @@ -13891,7 +13850,7 @@ For realistic experiments, you should set it to at least `100`. - **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. -- **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for the test to complete. +- **`timeout` (Optional, string \| -1 \| 0)**: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. ## client.snapshot.restore [_snapshot.restore] @@ -13960,7 +13919,7 @@ You can't use this option to change `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. -- **`indices` (Optional, string | string[])**: A list of indices and data streams to restore. +- **`indices` (Optional, string \| string[])**: A list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. @@ -13976,7 +13935,7 @@ Data streams and indices matching the rename pattern will be renamed according t The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. - **`rename_replacement` (Optional, string)**: The rename replacement string that is used with the `rename_pattern`. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the restore operation completes. @@ -14014,12 +13973,12 @@ client.snapshot.status({ ... }) #### Request (object) [_request_snapshot.status] - **`repository` (Optional, string)**: The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified. -- **`snapshot` (Optional, string | string[])**: A list of snapshots to retrieve status for. +- **`snapshot` (Optional, string \| string[])**: A list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -14037,10 +13996,10 @@ client.snapshot.verifyRepository({ repository }) #### Request (object) [_request_snapshot.verify_repository] - **`repository` (string)**: The name of the snapshot repository to verify. -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. @@ -14100,9 +14059,9 @@ The API supports this parameter only for CSV responses. - **`format` (Optional, string)**: The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. -- **`keep_alive` (Optional, string | -1 | 0)**: The retention period for the search and its results. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. ## client.sql.getAsyncStatus [_sql.get_async_status] @@ -14147,24 +14106,24 @@ It ignores other request body parameters. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. - **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. - **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. -- **`keep_alive` (Optional, string | -1 | 0)**: The retention period for an async or saved synchronous search. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The retention period for an async or saved synchronous search. - **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. -- **`page_timeout` (Optional, string | -1 | 0)**: The minimum retention period for the scroll cursor. +- **`page_timeout` (Optional, string \| -1 \| 0)**: The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. - **`params` (Optional, Record)**: The values for parameters in the query. - **`query` (Optional, string)**: The SQL query to run. -- **`request_timeout` (Optional, string | -1 | 0)**: The timeout before the request fails. +- **`request_timeout` (Optional, string \| -1 \| 0)**: The timeout before the request fails. - **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. - **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. -- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async. To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. -- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))**: The format for the response. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile"))**: The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. @@ -14328,7 +14287,7 @@ client.synonyms.putSynonym({ id, synonyms_set }) #### Request (object) [_request_synonyms.put_synonym] - **`id` (string)**: The ID of the synonyms set to be created or updated. -- **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set. +- **`synonyms_set` ({ id, synonyms } \| { id, synonyms }[])**: The synonym rules definitions for the synonyms set. - **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the new synonym set @@ -14378,8 +14337,8 @@ client.tasks.cancel({ ... }) ### Arguments [_arguments_tasks.cancel] #### Request (object) [_request_tasks.cancel] -- **`task_id` (Optional, string | number)**: The task identifier. -- **`actions` (Optional, string | string[])**: A list or wildcard expression of actions that is used to limit the request. +- **`task_id` (Optional, string \| number)**: The task identifier. +- **`actions` (Optional, string \| string[])**: A list or wildcard expression of actions that is used to limit the request. - **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. - **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. - **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. @@ -14403,7 +14362,7 @@ client.tasks.get({ task_id }) #### Request (object) [_request_tasks.get] - **`task_id` (string)**: The task identifier. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. @@ -14477,17 +14436,17 @@ client.tasks.list({ ... }) ### Arguments [_arguments_tasks.list] #### Request (object) [_request_tasks.list] -- **`actions` (Optional, string | string[])**: A list or wildcard expression of actions used to limit the request. +- **`actions` (Optional, string \| string[])**: A list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. -- **`group_by` (Optional, Enum("nodes" | "parents" | "none"))**: A key that is used to group tasks in the response. +- **`group_by` (Optional, Enum("nodes" \| "parents" \| "none"))**: A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. -- **`nodes` (Optional, string | string[])**: A list of node IDs or names that is used to limit the returned information. +- **`nodes` (Optional, string \| string[])**: A list of node IDs or names that is used to limit the returned information. - **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for each node to respond. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. @@ -14533,13 +14492,13 @@ In this default scenario, all rows must have the same number of fields for the d If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. - **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. The minimum value is 2. -- **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +- **`ecs_compatibility` (Optional, Enum("disabled" \| "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. - **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. -- **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. +- **`format` (Optional, Enum("delimited" \| "ndjson" \| "semi_structured_text" \| "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. @@ -14554,7 +14513,7 @@ If your delimited text format does not use quoting, a workaround is to set this - **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. -- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. - **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. @@ -14640,12 +14599,12 @@ Only a single character is supported; the delimiter cannot have multiple charact By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. -- **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +- **`ecs_compatibility` (Optional, Enum("disabled" \| "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. - **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. -- **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. +- **`format` (Optional, Enum("delimited" \| "ndjson" \| "semi_structured_text" \| "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. @@ -14660,7 +14619,7 @@ If your delimited text format does not use quoting, a workaround is to set this - **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. -- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. - **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. @@ -14782,7 +14741,7 @@ If your delimited text format does not use quoting, a workaround is to set this - **`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. -- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. - **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. @@ -14867,7 +14826,7 @@ client.transform.deleteTransform({ transform_id }) deleted regardless of its current state. - **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.transform.getNodeStats [_transform.get_node_stats] Retrieves transform usage information for transform nodes. @@ -14892,7 +14851,7 @@ client.transform.getTransform({ ... }) ### Arguments [_arguments_transform.get_transform] #### Request (object) [_request_transform.get_transform] -- **`transform_id` (Optional, string | string[])**: Identifier for the transform. It can be a transform identifier or a +- **`transform_id` (Optional, string \| string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. @@ -14924,7 +14883,7 @@ client.transform.getTransformStats({ transform_id }) ### Arguments [_arguments_transform.get_transform_stats] #### Request (object) [_request_transform.get_transform_stats] -- **`transform_id` (string | string[])**: Identifier for the transform. It can be a transform identifier or a +- **`transform_id` (string \| string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. @@ -14938,7 +14897,7 @@ If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. - **`from` (Optional, number)**: Skips the specified number of transforms. - **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. -- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the stats +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the stats ## client.transform.previewTransform [_transform.preview_transform] Preview a transform. @@ -14961,7 +14920,7 @@ client.transform.previewTransform({ ... }) configuration details in the request body. - **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. - **`description` (Optional, string)**: Free text description of the transform. -- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. @@ -14975,7 +14934,7 @@ the data. criteria is deleted from the destination index. - **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.transform.putTransform [_transform.put_transform] @@ -15016,7 +14975,7 @@ hyphens, and underscores. It has a 64 character limit and must start and end wit - **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. - **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - **`description` (Optional, string)**: Free text description of the transform. -- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. - **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. @@ -15032,7 +14991,7 @@ check for the existence of the source indices and a check that the destination i index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.transform.resetTransform [_transform.reset_transform] Reset a transform. @@ -15053,7 +15012,7 @@ client.transform.resetTransform({ transform_id }) hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. - **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.transform.scheduleNowTransform [_transform.schedule_now_transform] Schedule a transform to start now. @@ -15074,7 +15033,7 @@ client.transform.scheduleNowTransform({ transform_id }) #### Request (object) [_request_transform.schedule_now_transform] - **`transform_id` (string)**: Identifier for the transform. -- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the scheduling to take place +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the scheduling to take place ## client.transform.startTransform [_transform.start_transform] Start a transform. @@ -15104,7 +15063,7 @@ client.transform.startTransform({ transform_id }) #### Request (object) [_request_transform.start_transform] - **`transform_id` (string)**: Identifier for the transform. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. ## client.transform.stopTransform [_transform.stop_transform] @@ -15131,7 +15090,7 @@ only partial matches, the API stops the appropriate transforms. If it is false, the request returns a 404 status code when there are no matches or only partial matches. - **`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. - **`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, @@ -15161,7 +15120,7 @@ client.transform.updateTransform({ transform_id }) - **`transform_id` (string)**: Identifier for the transform. - **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. - **`description` (Optional, string)**: Free text description of the transform. -- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. @@ -15169,12 +15128,12 @@ indexing. The minimum value is 1s and the maximum is 1h. - **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. - **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. -- **`retention_policy` (Optional, { time } | null)**: Defines a retention policy for the transform. Data that meets the defined +- **`retention_policy` (Optional, { time } \| null)**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. - **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.transform.upgradeTransforms [_transform.upgrade_transforms] @@ -15204,7 +15163,7 @@ client.transform.upgradeTransforms({ ... }) #### Request (object) [_request_transform.upgrade_transforms] - **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. -- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.watcher.ackWatch [_watcher.ack_watch] @@ -15229,7 +15188,7 @@ client.watcher.ackWatch({ watch_id }) #### Request (object) [_request_watcher.ack_watch] - **`watch_id` (string)**: The watch identifier. -- **`action_id` (Optional, string | string[])**: A list of the action identifiers to acknowledge. +- **`action_id` (Optional, string \| string[])**: A list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. ## client.watcher.activateWatch [_watcher.activate_watch] @@ -15309,7 +15268,7 @@ client.watcher.executeWatch({ ... }) #### Request (object) [_request_watcher.execute_watch] - **`id` (Optional, string)**: The watch identifier. -- **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. +- **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. - **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. - **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. - **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. @@ -15335,7 +15294,7 @@ client.watcher.getSettings({ ... }) ### Arguments [_arguments_watcher.get_settings] #### Request (object) [_request_watcher.get_settings] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.watcher.getWatch [_watcher.get_watch] @@ -15380,7 +15339,7 @@ client.watcher.putWatch({ id }) - **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. - **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. - **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. -- **`throttle_period` (Optional, string | -1 | 0)**: The minimum time between actions being run. +- **`throttle_period` (Optional, string \| -1 \| 0)**: The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. @@ -15413,8 +15372,8 @@ It must be non-negative. - **`size` (Optional, number)**: The number of hits to return. It must be non-negative. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. -- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. -- **`search_after` (Optional, number | number | string | boolean | null[])**: Retrieve the next page of hits using a set of sort values from the previous page. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Retrieve the next page of hits using a set of sort values from the previous page. ## client.watcher.start [_watcher.start] Start the watch service. @@ -15429,7 +15388,7 @@ client.watcher.start({ ... }) ### Arguments [_arguments_watcher.start] #### Request (object) [_request_watcher.start] -- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.watcher.stats [_watcher.stats] Get Watcher statistics. @@ -15445,7 +15404,7 @@ client.watcher.stats({ ... }) ### Arguments [_arguments_watcher.stats] #### Request (object) [_request_watcher.stats] -- **`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])**: Defines which additional metrics are included in the response. +- **`metric` (Optional, Enum("_all" \| "queued_watches" \| "current_watches" \| "pending_watches") \| Enum("_all" \| "queued_watches" \| "current_watches" \| "pending_watches")[])**: Defines which additional metrics are included in the response. - **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. ## client.watcher.stop [_watcher.stop] @@ -15461,7 +15420,7 @@ client.watcher.stop({ ... }) ### Arguments [_arguments_watcher.stop] #### Request (object) [_request_watcher.stop] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. @@ -15485,9 +15444,9 @@ client.watcher.updateSettings({ ... }) #### Request (object) [_request_watcher.update_settings] - **`index.auto_expand_replicas` (Optional, string)** - **`index.number_of_replicas` (Optional, number)** -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.xpack.info [_xpack.info] @@ -15507,7 +15466,7 @@ client.xpack.info({ ... }) ### Arguments [_arguments_xpack.info] #### Request (object) [_request_xpack.info] -- **`categories` (Optional, Enum("build" | "features" | "license")[])**: A list of the information categories to include in the response. +- **`categories` (Optional, Enum("build" \| "features" \| "license")[])**: A list of the information categories to include in the response. For example, `build,license,features`. - **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true - **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. @@ -15527,7 +15486,7 @@ client.xpack.usage({ ... }) ### Arguments [_arguments_xpack.usage] #### Request (object) [_request_xpack.usage] -- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 5b65421f5..394446967 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -364,7 +364,7 @@ export default class Inference { } /** - * Perform chat completion inference + * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -643,7 +643,7 @@ export default class Inference { } /** - * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -698,7 +698,7 @@ export default class Inference { } /** - * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} */ async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -756,7 +756,7 @@ export default class Inference { } /** - * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} */ async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -814,7 +814,7 @@ export default class Inference { } /** - * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} */ async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -872,7 +872,7 @@ export default class Inference { } /** - * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation} */ async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -930,7 +930,7 @@ export default class Inference { } /** - * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai | Elasticsearch API documentation} */ async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -988,7 +988,7 @@ export default class Inference { } /** - * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere | Elasticsearch API documentation} */ async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1162,7 +1162,7 @@ export default class Inference { } /** - * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation} */ async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1220,7 +1220,7 @@ export default class Inference { } /** - * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation} */ async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1278,7 +1278,7 @@ export default class Inference { } /** - * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} */ async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1336,7 +1336,7 @@ export default class Inference { } /** - * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai | Elasticsearch API documentation} */ async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1394,7 +1394,7 @@ export default class Inference { } /** - * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} */ async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1452,7 +1452,7 @@ export default class Inference { } /** - * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai | Elasticsearch API documentation} */ async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1568,7 +1568,7 @@ export default class Inference { } /** - * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx | Elasticsearch API documentation} */ async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index aed848fb1..5cc78eb73 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -7072,6 +7072,7 @@ export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBa type: 'icu_normalizer' mode?: AnalysisIcuNormalizationMode name?: AnalysisIcuNormalizationType + unicode_set_filter?: string } export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' @@ -10117,6 +10118,10 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] +export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string + +export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] + export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } @@ -13173,15 +13178,16 @@ export interface CatNodesRequest extends CatCatRequestBase { full_id?: boolean | string /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatNodeColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } @@ -22327,7 +22333,9 @@ export interface InferenceRateLimitSetting { } export interface InferenceRequestChatCompletion { - /** A list of objects representing the conversation. */ + /** A list of objects representing the conversation. + * Requests should generally only add new messages from the user (role `user`). + * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ messages: InferenceMessage[] /** The ID of the model to use. */ model?: string @@ -24545,7 +24553,8 @@ export interface MigrationPostFeatureUpgradeRequest extends RequestBase { export interface MigrationPostFeatureUpgradeResponse { accepted: boolean - features: MigrationPostFeatureUpgradeMigrationFeature[] + features?: MigrationPostFeatureUpgradeMigrationFeature[] + reason?: string } export interface MlAdaptiveAllocationsSettings { From 739cb1f11c88d6ef89d28569ba467d01a43b05dd Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 27 May 2025 18:14:41 +0300 Subject: [PATCH 564/647] Auto-generated API code (#2847) --- docs/reference/api-reference.md | 67 ++++++++++++++++++- src/api/api/indices.ts | 110 +++++++++++++++++++++++++++++++- src/api/types.ts | 17 +++-- 3 files changed, 184 insertions(+), 10 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index a658dbb00..78dea18bb 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -5848,6 +5848,7 @@ client.indices.deleteIndexTemplate({ name }) ## client.indices.deleteTemplate [_indices.delete_template] Delete a legacy index template. +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template) @@ -6422,7 +6423,7 @@ received before the timeout expires, the request fails and returns an error. ## client.indices.getTemplate [_indices.get_template] -Get index templates. +Get legacy index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. @@ -6858,7 +6859,7 @@ will be closed temporarily and then reopened in order to apply the changes. timeout expires, the request fails and returns an error. ## client.indices.putTemplate [_indices.put_template] -Create or update an index template. +Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. @@ -13853,6 +13854,68 @@ Note that the operations are performed concurrently so might not always happen i - **`timeout` (Optional, string \| -1 \| 0)**: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. +## client.snapshot.repositoryVerifyIntegrity [_snapshot.repository_verify_integrity] +Verify the repository integrity. +Verify the integrity of the contents of a snapshot repository. + +This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. + +If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. +Until you do so: + +* It may not be possible to restore some snapshots from this repository. +* Searchable snapshots may report errors when searched or may have unassigned shards. +* Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. +* Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. +* Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. + +If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. +The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. +You must also identify what caused the damage and take action to prevent it from happening again. + +If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. +In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. + +Avoid all operations which write to the repository while the verify repository integrity API is running. +If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. +It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. + +NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. + +NOTE: This API may not work correctly in a mixed-version cluster. + +The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. +For instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. +If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. +For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API. + +The response exposes implementation details of the analysis which may change from version to version. +The response body format is therefore not considered stable and may be different in newer versions. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity) + +```ts +client.snapshot.repositoryVerifyIntegrity({ repository }) +``` + +### Arguments [_arguments_snapshot.repository_verify_integrity] + +#### Request (object) [_request_snapshot.repository_verify_integrity] +- **`repository` (string \| string[])**: The name of the snapshot repository. +- **`blob_thread_pool_concurrency` (Optional, number)**: If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. +- **`index_snapshot_verification_concurrency` (Optional, number)**: The maximum number of index snapshots to verify concurrently within each index verification. +- **`index_verification_concurrency` (Optional, number)**: The number of indices to verify concurrently. +The default behavior is to use the entire `snapshot_meta` thread pool. +- **`max_bytes_per_sec` (Optional, string)**: If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. +- **`max_failed_shard_snapshots` (Optional, number)**: The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. +If your repository contains more than this number of shard snapshot failures, the verification will fail. +- **`meta_thread_pool_concurrency` (Optional, number)**: The maximum number of snapshot metadata operations to run concurrently. +The default behavior is to use at most half of the `snapshot_meta` thread pool at once. +- **`snapshot_verification_concurrency` (Optional, number)**: The number of snapshots to verify concurrently. +The default behavior is to use at most half of the `snapshot_meta` thread pool at once. +- **`verify_blob_contents` (Optional, boolean)**: Indicates whether to verify the checksum of every data blob in the repository. +If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. + ## client.snapshot.restore [_snapshot.restore] Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index e613a1c46..d52921b1d 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -422,6 +422,13 @@ export default class Indices { body: [], query: [] }, + 'indices.get_data_stream_settings': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.get_field_mapping': { path: [ 'fields', @@ -580,6 +587,13 @@ export default class Indices { body: [], query: [] }, + 'indices.put_data_stream_settings': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.put_index_template': { path: [ 'name' @@ -1676,7 +1690,7 @@ export default class Indices { } /** - * Delete a legacy index template. + * Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2488,6 +2502,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Gets a data stream's settings + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + */ + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_settings` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream_settings', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} @@ -2753,7 +2812,7 @@ export default class Indices { } /** - * Get index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + * Get legacy index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -3205,6 +3264,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Updates a data stream's settings + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + */ + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.put_data_stream_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_settings` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_stream_settings', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template | Elasticsearch API documentation} @@ -3374,7 +3478,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. + * Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 5cc78eb73..947717dd2 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2954,7 +2954,7 @@ export interface SearchShardProfile { index: IndexName node_id: NodeId searches: SearchSearchProfile[] - shard_id: long + shard_id: integer } export interface SearchSmoothingModelContainer { @@ -9388,8 +9388,6 @@ export interface QueryDslRangeQueryBase extends QueryDslQueryBase { lt?: T /** Less than or equal to. */ lte?: T - from?: T | null - to?: T | null } export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' @@ -22191,6 +22189,13 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } +export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeJinaAi +} + export interface InferenceInferenceResult { text_embedding_bytes?: InferenceTextEmbeddingByteResult[] text_embedding_bits?: InferenceTextEmbeddingByteResult[] @@ -22373,6 +22378,8 @@ export type InferenceTaskSettings = any export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' +export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' + export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector } @@ -22834,7 +22841,7 @@ export interface InferencePutJinaaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfo +export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi export interface InferencePutMistralRequest extends RequestBase { /** The task type. @@ -34214,7 +34221,7 @@ export interface SnapshotSnapshotShardFailure { index: IndexName node_id?: Id reason: string - shard_id: Id + shard_id: integer index_uuid: Id status: string } From ba6ed49727399c075dc716aaccb6a6d828f0c007 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Tue, 27 May 2025 14:01:43 -0500 Subject: [PATCH 565/647] Update dependency @types/node to v22.15.21 (#2849) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 264f2cecb..b040dc7aa 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.15.19", + "@types/node": "22.15.21", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 1c4610e055f46172ddb1dfc02aa02c6c37a4e18a Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 2 Jun 2025 20:10:33 +0300 Subject: [PATCH 566/647] Auto-generated API code (#2859) --- docs/reference/api-reference.md | 25 ++++++++++++++++++------- src/api/api/snapshot.ts | 2 +- src/api/types.ts | 15 +++++++++++++-- 3 files changed, 32 insertions(+), 10 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 78dea18bb..1cc994aed 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -13729,23 +13729,31 @@ To indicate that the request should never timeout, set it to `-1`. ## client.snapshot.repositoryAnalyze [_snapshot.repository_analyze] Analyze a snapshot repository. -Analyze the performance characteristics and any incorrect behaviour found in a repository. -The response exposes implementation details of the analysis which may change from version to version. -The response body format is therefore not considered stable and may be different in newer versions. +Performs operations on a snapshot repository in order to check for incorrect behaviour. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. -Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. +Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. +This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. -If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. +Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads. +If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. +Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. +Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system. +Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures. +However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis. +You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using. +For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis. +This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch. +Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. @@ -13777,7 +13785,9 @@ This consumes bandwidth on the network between the cluster and the repository, a You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. -NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. +NOTE: This API is intended for exploratory use by humans. +You should expect the request parameters and the response format to vary in future versions. +The response exposes immplementation details of the analysis which may change from version to version. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. @@ -13788,7 +13798,8 @@ NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* -NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. +NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. +The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 06a544055..72adcede7 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -624,7 +624,7 @@ export default class Snapshot { } /** - * Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. + * Analyze a snapshot repository. Performs operations on a snapshot repository in order to check for incorrect behaviour. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads. If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system. Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures. However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis. You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using. For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis. This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch. Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. The response exposes immplementation details of the analysis which may change from version to version. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze | Elasticsearch API documentation} */ async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 947717dd2..6dcec292a 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -21908,7 +21908,7 @@ export interface InferenceAzureOpenAITaskSettings { export type InferenceAzureOpenAITaskType = 'completion' | 'text_embedding' -export type InferenceCohereEmbeddingType = 'byte' | 'float' | 'int8' +export type InferenceCohereEmbeddingType = 'binary' | 'bit' | 'byte' | 'float' | 'int8' export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search' @@ -21922,6 +21922,8 @@ export interface InferenceCohereServiceSettings { * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string /** For a `text_embedding` task, the types of embeddings you want to get back. + * Use `binary` for binary embeddings, which are encoded as bytes with signed int8 precision. + * Use `bit` for binary embeddings, which are encoded as bytes with signed int8 precision (this is a synonym of `binary`). * Use `byte` for signed int8 embeddings (this is a synonym of `int8`). * Use `float` for the default float embeddings. * Use `int8` for signed int8 embeddings. */ @@ -22189,6 +22191,13 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } +export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAlibabaCloudAI +} + export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -22378,6 +22387,8 @@ export type InferenceTaskSettings = any export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' +export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding' + export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' export interface InferenceTextEmbeddingByteResult { @@ -22604,7 +22615,7 @@ export interface InferencePutAlibabacloudRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfo +export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI export interface InferencePutAmazonbedrockRequest extends RequestBase { /** The type of the inference task that the model will perform. */ From b185cfe15518fd691876329f9e03e158d254a4c6 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 2 Jun 2025 12:15:13 -0500 Subject: [PATCH 567/647] Update buildkite plugin junit-annotate to v2.7.0 (#2856) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .buildkite/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 307b2f340..d63fc6edf 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -25,7 +25,7 @@ steps: continue_on_failure: true - label: ":junit: Test results" plugins: - - junit-annotate#v2.6.0: + - junit-annotate#v2.7.0: artifacts: "junit-output/junit-*.xml" job-uuid-file-pattern: "junit-(.*).xml" fail-build-on-error: true From c485567c51de2d4b68af59aeaacbe574ed85204b Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 2 Jun 2025 17:19:35 +0000 Subject: [PATCH 568/647] Update dependency @types/node to v22.15.29 (#2855) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b040dc7aa..b8fae2ec8 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.15.21", + "@types/node": "22.15.29", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From e5c10d80e2587039776e1264e8c26212f92a3c82 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 9 Jun 2025 17:29:01 +0200 Subject: [PATCH 569/647] Auto-generated API code (#2865) --- docs/reference/api-reference.md | 142 +++++++++- src/api/api/indices.ts | 147 ++++++---- src/api/api/inference.ts | 2 +- src/api/types.ts | 457 ++++++++++++++++++++++++++++++-- 4 files changed, 656 insertions(+), 92 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 1cc994aed..616865f62 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1651,7 +1651,7 @@ client.search({ ... }) - **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. - **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. -- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. +- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. - **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. @@ -5818,14 +5818,22 @@ client.indices.deleteDataStream({ name }) - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. ## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options] -Deletes the data stream options of the selected data streams. +Delete data stream options. +Removes the data stream options from a data stream. [Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) ```ts -client.indices.deleteDataStreamOptions() +client.indices.deleteDataStreamOptions({ name }) ``` +### Arguments [_arguments_indices.delete_data_stream_options] + +#### Request (object) [_request_indices.delete_data_stream_options] +- **`name` (string \| string[])**: A list of data streams of which the data stream options will be deleted; use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document ## client.indices.deleteIndexTemplate [_indices.delete_index_template] Delete an index template. @@ -6286,14 +6294,45 @@ Supports a list of values, such as `open,hidden`. - **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. ## client.indices.getDataStreamOptions [_indices.get_data_stream_options] -Returns the data stream options of the selected data streams. +Get data stream options. + +Get the data stream options configuration of one or more data streams. [Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) ```ts -client.indices.getDataStreamOptions() +client.indices.getDataStreamOptions({ name }) +``` + +### Arguments [_arguments_indices.get_data_stream_options] + +#### Request (object) [_request_indices.get_data_stream_options] +- **`name` (string \| string[])**: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getDataStreamSettings [_indices.get_data_stream_settings] +Get data stream settings. + +Get setting information for one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings) + +```ts +client.indices.getDataStreamSettings({ name }) ``` +### Arguments [_arguments_indices.get_data_stream_settings] + +#### Request (object) [_request_indices.get_data_stream_settings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. Supports wildcards (`*`). +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. ## client.indices.getFieldMapping [_indices.get_field_mapping] Get mapping definitions. @@ -6651,14 +6690,58 @@ error. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.putDataStreamOptions [_indices.put_data_stream_options] -Updates the data stream options of the selected data streams. +Update data stream options. +Update the data stream options of the specified data streams. [Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) ```ts -client.indices.putDataStreamOptions() +client.indices.putDataStreamOptions({ name }) ``` +### Arguments [_arguments_indices.put_data_stream_options] + +#### Request (object) [_request_indices.put_data_stream_options] +- **`name` (string \| string[])**: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +- **`failure_store` (Optional, { enabled, lifecycle })**: If defined, it will update the failure store configuration of every data stream resolved by the name expression. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `hidden`, `open`, `closed`, `none`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataStreamSettings [_indices.put_data_stream_settings] +Update data stream settings. + +This API can be used to override settings on specific data streams. These overrides will take precedence over what +is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state, +only certain settings are allowed. If possible, the setting change is applied to all +backing indices. Otherwise, it will be applied when the data stream is next rolled over. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings) + +```ts +client.indices.putDataStreamSettings({ name }) +``` + +### Arguments [_arguments_indices.put_data_stream_settings] + +#### Request (object) [_request_indices.put_data_stream_settings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** +- **`dry_run` (Optional, boolean)**: If `true`, the request does not actually change the settings on any data streams or indices. Instead, it +simulates changing the settings and reports back to the user what would have happened had these settings +actually been applied. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. ## client.indices.putIndexTemplate [_indices.put_index_template] Create or update an index template. @@ -6812,9 +6895,45 @@ Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. -The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. +The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: + +``` +{ + "number_of_replicas": 1 +} +``` + +Or you can use an `index` setting object: +``` +{ + "index": { + "number_of_replicas": 1 + } +} +``` + +Or you can use dot annotation: +``` +{ + "index.number_of_replicas": 1 +} +``` + +Or you can embed any of the aforementioned options in a `settings` object. For example: + +``` +{ + "settings": { + "index": { + "number_of_replicas": 1 + } + } +} +``` + NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -7527,12 +7646,9 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ## client.inference.chatCompletionUnified [_inference.chat_completion_unified] Perform chat completion inference -The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. +The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. -IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. -For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. @@ -7904,7 +8020,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser ### Arguments [_arguments_inference.put_googlevertexai] #### Request (object) [_request_inference.put_googlevertexai] -- **`task_type` (Enum("rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("rerank" \| "text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. - **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. - **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index d52921b1d..f5cec4c51 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -214,7 +214,11 @@ export default class Indices { 'name' ], body: [], - query: [] + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] }, 'indices.delete_index_template': { path: [ @@ -420,14 +424,19 @@ export default class Indices { 'name' ], body: [], - query: [] + query: [ + 'expand_wildcards', + 'master_timeout' + ] }, 'indices.get_data_stream_settings': { path: [ 'name' ], body: [], - query: [] + query: [ + 'master_timeout' + ] }, 'indices.get_field_mapping': { path: [ @@ -584,15 +593,27 @@ export default class Indices { path: [ 'name' ], - body: [], - query: [] + body: [ + 'failure_store' + ], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] }, 'indices.put_data_stream_settings': { path: [ 'name' ], - body: [], - query: [] + body: [ + 'settings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout' + ] }, 'indices.put_index_template': { path: [ @@ -1600,13 +1621,13 @@ export default class Indices { } /** - * Deletes the data stream options of the selected data streams. + * Delete data stream options. Removes the data stream options from a data stream. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ - async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptions): Promise + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['indices.delete_data_stream_options'] @@ -1624,11 +1645,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -2458,13 +2479,13 @@ export default class Indices { } /** - * Returns the data stream options of the selected data streams. + * Get data stream options. Get the data stream options configuration of one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ - async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptions): Promise + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['indices.get_data_stream_options'] @@ -2482,11 +2503,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -2503,13 +2524,13 @@ export default class Indices { } /** - * Gets a data stream's settings - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * Get data stream settings. Get setting information for one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings | Elasticsearch API documentation} */ - async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptions): Promise + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['indices.get_data_stream_settings'] @@ -2527,11 +2548,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -3220,15 +3241,17 @@ export default class Indices { } /** - * Updates the data stream options of the selected data streams. + * Update data stream options. Update the data stream options of the specified data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} */ - async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptions): Promise + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this.acceptedParams['indices.put_data_stream_options'] const userQuery = params?.querystring @@ -3244,12 +3267,22 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3265,36 +3298,38 @@ export default class Indices { } /** - * Updates a data stream's settings - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * Update data stream settings. This API can be used to override settings on specific data streams. These overrides will take precedence over what is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state, only certain settings are allowed. If possible, the setting change is applied to all backing indices. Otherwise, it will be applied when the data stream is next rolled over. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings | Elasticsearch API documentation} */ - async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptions): Promise + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this.acceptedParams['indices.put_data_stream_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} - let body: Record | string | undefined - const userBody = params?.body - if (userBody != null) { - if (typeof userBody === 'string') { - body = userBody - } else { - body = { ...userBody } - } - } - - params = params ?? {} + let body: any = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3424,7 +3459,7 @@ export default class Indices { } /** - * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. + * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 394446967..4cbdd53dc 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -364,7 +364,7 @@ export default class Inference { } /** - * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. + * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 6dcec292a..270b9b3df 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2490,7 +2490,7 @@ export interface SearchFieldSuggester { export interface SearchHighlight extends SearchHighlightBase { encoder?: SearchHighlighterEncoder - fields: Record + fields: Partial> | Partial>[] } export interface SearchHighlightBase { @@ -3868,7 +3868,7 @@ export interface ErrorCauseKeys { /** The type of error */ type: string /** A human-readable explanation of the error, in English. */ - reason?: string + reason?: string | null /** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */ stack_trace?: string caused_by?: ErrorCause @@ -4062,6 +4062,12 @@ export interface InlineGetKeys { export type InlineGet = InlineGetKeys & { [property: string]: any } +export interface InnerRetriever { + retriever: RetrieverContainer + weight: float + normalizer: ScoreNormalizer +} + export type Ip = string export interface KnnQuery extends QueryDslQueryBase { @@ -4134,6 +4140,12 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' +export interface LinearRetriever extends RetrieverBase { + /** Inner retrievers. */ + retrievers?: InnerRetriever[] + rank_window_size: integer +} + export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -4230,6 +4242,14 @@ export type Password = string export type Percentage = string | float +export interface PinnedRetriever extends RetrieverBase { + /** Inner retriever. */ + retriever: RetrieverContainer + ids?: string[] + docs?: SpecifiedDocument[] + rank_window_size: integer +} + export type PipelineName = string export interface PluginStats { @@ -4329,6 +4349,12 @@ export interface RescoreVector { oversample: float } +export interface RescorerRetriever extends RetrieverBase { + /** Inner retriever. */ + retriever: RetrieverContainer + rescore: SearchRescore | SearchRescore[] +} + export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { @@ -4343,6 +4369,8 @@ export interface RetrieverBase { filter?: QueryDslQueryContainer | QueryDslQueryContainer[] /** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ min_score?: float + /** Retriever name. */ + _name?: string } export interface RetrieverContainer { @@ -4356,6 +4384,13 @@ export interface RetrieverContainer { text_similarity_reranker?: TextSimilarityReranker /** A retriever that replaces the functionality of a rule query. */ rule?: RuleRetriever + /** A retriever that re-scores only the results produced by its child retriever. */ + rescorer?: RescorerRetriever + /** A retriever that supports the combination of different retrievers through a weighted linear combination. */ + linear?: LinearRetriever + /** A pinned retriever applies pinned documents to the underlying retriever. + * This retriever will rewrite to a PinnedQueryBuilder. */ + pinned?: PinnedRetriever } export type Routing = string @@ -4369,7 +4404,7 @@ export interface RrfRank { export interface RuleRetriever extends RetrieverBase { /** The ruleset IDs containing the rules this retriever is evaluating against. */ - ruleset_ids: Id[] + ruleset_ids: Id | Id[] /** The match criteria that will determine if a rule in the provided rulesets should be applied. */ match_criteria: any /** The retriever whose results rules should be applied to. */ @@ -4380,6 +4415,8 @@ export interface RuleRetriever extends RetrieverBase { export type ScalarValue = long | double | string | boolean | null +export type ScoreNormalizer = 'none' | 'minmax' | 'l2_norm' + export interface ScoreSort { order?: SortOrder } @@ -4562,6 +4599,11 @@ export type SortOrder = 'asc' | 'desc' export type SortResults = FieldValue[] +export interface SpecifiedDocument { + index?: IndexName + id: Id +} + export interface StandardRetriever extends RetrieverBase { /** Defines a query to retrieve a set of top documents. */ query?: QueryDslQueryContainer @@ -8702,7 +8744,7 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys export type QueryDslGeoExecution = 'memory' | 'indexed' export interface QueryDslGeoGridQuery extends QueryDslQueryBase { - geogrid?: GeoTile + geotile?: GeoTile geohash?: GeoHash geohex?: GeoHexCell } @@ -8806,6 +8848,8 @@ export interface QueryDslIntervalsContainer { match?: QueryDslIntervalsMatch /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + range?: QueryDslIntervalsRange + regexp?: QueryDslIntervalsRegexp /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } @@ -8886,10 +8930,38 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase { match?: QueryDslIntervalsMatch /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + range?: QueryDslIntervalsRange + regexp?: QueryDslIntervalsRegexp /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } +export interface QueryDslIntervalsRange { + /** Analyzer used to analyze the `prefix`. */ + analyzer?: string + /** Lower term, either gte or gt must be provided. */ + gte?: string + /** Lower term, either gte or gt must be provided. */ + gt?: string + /** Upper term, either lte or lt must be provided. */ + lte?: string + /** Upper term, either lte or lt must be provided. */ + lt?: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + +export interface QueryDslIntervalsRegexp { + /** Analyzer used to analyze the `prefix`. */ + analyzer?: string + /** Regex pattern. */ + pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + export interface QueryDslIntervalsWildcard { /** Analyzer used to analyze the `pattern`. * Defaults to the top-level field's analyzer. */ @@ -9444,7 +9516,8 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { export interface QueryDslRuleQuery extends QueryDslQueryBase { organic: QueryDslQueryContainer - ruleset_ids: Id[] + ruleset_ids?: Id | Id[] + ruleset_id?: string match_criteria: any } @@ -9728,7 +9801,7 @@ export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { /** The tokens representing this query */ - tokens: Record + tokens: Partial>[] /** Token pruning configurations */ pruning_config?: QueryDslTokenPruningConfig } @@ -15431,6 +15504,7 @@ export interface ClusterComponentTemplateSummary { mappings?: MappingTypeMapping aliases?: Record lifecycle?: IndicesDataStreamLifecycleWithRollover + data_stream_options?: IndicesDataStreamOptionsTemplate | null } export interface ClusterAllocationExplainAllocationDecision { @@ -18605,6 +18679,9 @@ export interface IndicesDataStream { replicated?: boolean /** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */ rollover_on_write: boolean + /** The settings specific to this data stream that will take precedence over the settings in the matching index + * template. */ + settings: IndicesIndexSettings /** Health status of the data stream. * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ status: HealthStatus @@ -18619,6 +18696,24 @@ export interface IndicesDataStream { index_mode?: IndicesIndexMode } +export interface IndicesDataStreamFailureStore { + /** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store + * that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will + * not remove any existing data from the failure store. */ + enabled?: boolean + /** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */ + lifecycle?: IndicesFailureStoreLifecycle +} + +export interface IndicesDataStreamFailureStoreTemplate { + /** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store + * that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will + * not remove any existing data from the failure store. */ + enabled?: boolean | null + /** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */ + lifecycle?: IndicesFailureStoreLifecycleTemplate | null +} + export interface IndicesDataStreamIndex { /** Name of the backing index. */ index_name: IndexName @@ -18671,6 +18766,15 @@ export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStrea rollover?: IndicesDataStreamLifecycleRolloverConditions } +export interface IndicesDataStreamOptions { + /** If defined, it specifies configuration for the failure store of this data stream. */ + failure_store?: IndicesDataStreamFailureStore +} + +export interface IndicesDataStreamOptionsTemplate { + failure_store?: IndicesDataStreamFailureStoreTemplate | null +} + export interface IndicesDataStreamTimestampField { /** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */ name: Field @@ -18699,6 +18803,26 @@ export interface IndicesFailureStore { rollover_on_write: boolean } +export interface IndicesFailureStoreLifecycle { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: Duration + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean +} + +export interface IndicesFailureStoreLifecycleTemplate { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: Duration | null + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -18932,6 +19056,7 @@ export interface IndicesIndexTemplateSummary { /** Configuration options for the index. */ settings?: IndicesIndexSettings lifecycle?: IndicesDataStreamLifecycleWithRollover + data_stream_options?: IndicesDataStreamOptionsTemplate | null } export interface IndicesIndexVersioning { @@ -19674,6 +19799,23 @@ export interface IndicesDeleteDataStreamRequest extends RequestBase { export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase +export interface IndicesDeleteDataStreamOptionsRequest extends RequestBase { + /** A comma-separated list of data streams of which the data stream options will be deleted; use `*` to get all data streams */ + name: DataStreamNames + /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ + expand_wildcards?: ExpandWildcards + /** Specify timeout for connection to master */ + master_timeout?: Duration + /** Explicit timestamp for the document */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteDataStreamOptionsResponse = AcknowledgedResponseBase + export interface IndicesDeleteIndexTemplateRequest extends RequestBase { /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Names @@ -20143,6 +20285,59 @@ export interface IndicesGetDataStreamResponse { data_streams: IndicesDataStream[] } +export interface IndicesGetDataStreamOptionsDataStreamWithOptions { + name: DataStreamName + options?: IndicesDataStreamOptions +} + +export interface IndicesGetDataStreamOptionsRequest extends RequestBase { + /** Comma-separated list of data streams to limit the request. + * Supports wildcards (`*`). + * To target all data streams, omit this parameter or use `*` or `_all`. */ + name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + expand_wildcards?: ExpandWildcards + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } +} + +export interface IndicesGetDataStreamOptionsResponse { + data_streams: IndicesGetDataStreamOptionsDataStreamWithOptions[] +} + +export interface IndicesGetDataStreamSettingsDataStreamSettings { + /** The name of the data stream. */ + name: string + /** The settings specific to this data stream */ + settings: IndicesIndexSettings + /** The settings specific to this data stream merged with the settings from its template. These `effective_settings` + * are the settings that will be used when a new index is created for this data stream. */ + effective_settings: IndicesIndexSettings +} + +export interface IndicesGetDataStreamSettingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). */ + name: Indices + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export interface IndicesGetDataStreamSettingsResponse { + data_streams: IndicesGetDataStreamSettingsDataStreamSettings[] +} + export interface IndicesGetFieldMappingRequest extends RequestBase { /** Comma-separated list or wildcard expression of fields used to limit returned information. * Supports wildcards (`*`). */ @@ -20517,6 +20712,90 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase +export interface IndicesPutDataStreamOptionsRequest extends RequestBase { + /** Comma-separated list of data streams used to limit the request. + * Supports wildcards (`*`). + * To target all data streams use `*` or `_all`. */ + name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ + expand_wildcards?: ExpandWildcards + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** If defined, it will update the failure store configuration of every data stream resolved by the name expression. */ + failure_store?: IndicesDataStreamFailureStore + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, failure_store?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, failure_store?: never } +} + +export type IndicesPutDataStreamOptionsResponse = AcknowledgedResponseBase + +export interface IndicesPutDataStreamSettingsDataStreamSettingsError { + index: IndexName + /** A message explaining why the settings could not be applied to specific indices. */ + error: string +} + +export interface IndicesPutDataStreamSettingsIndexSettingResults { + /** The list of settings that were applied to the data stream but not to backing indices. These will be applied to + * the write index the next time the data stream is rolled over. */ + applied_to_data_stream_only: string[] + /** The list of settings that were applied to the data stream and to all of its backing indices. These settings will + * also be applied to the write index the next time the data stream is rolled over. */ + applied_to_data_stream_and_backing_indices: string[] + errors?: IndicesPutDataStreamSettingsDataStreamSettingsError[] +} + +export interface IndicesPutDataStreamSettingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. */ + name: Indices + /** If `true`, the request does not actually change the settings on any data streams or indices. Instead, it + * simulates changing the settings and reports back to the user what would have happened had these settings + * actually been applied. */ + dry_run?: boolean + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ + timeout?: Duration + settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, settings?: never } +} + +export interface IndicesPutDataStreamSettingsResponse { + data_streams: IndicesPutDataStreamSettingsUpdatedDataStreamSettings[] +} + +export interface IndicesPutDataStreamSettingsUpdatedDataStreamSettings { + /** The data stream name. */ + name: IndexName + /** If the settings were successfully applied to the data stream (or would have been, if running in `dry_run` + * mode), it is `true`. If an error occurred, it is `false`. */ + applied_to_data_stream: boolean + /** A message explaining why the settings could not be applied to the data stream. */ + error?: string + /** The settings that are specfic to this data stream that will override any settings from the matching index template. */ + settings: IndicesIndexSettings + /** The settings that are effective on this data stream, taking into account the settings from the matching index + * template and the settings specific to this data stream. */ + effective_settings: IndicesIndexSettings + /** Information about whether and where each setting was applied. */ + index_settings_results: IndicesPutDataStreamSettingsIndexSettingResults +} + export interface IndicesPutIndexTemplateIndexTemplateMapping { /** Aliases to add. * If the index template includes a `data_stream` object, these are data stream aliases. @@ -22135,7 +22414,7 @@ export interface InferenceGoogleVertexAITaskSettings { top_n?: integer } -export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' +export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion' export interface InferenceHuggingFaceServiceSettings { /** A valid access token for your HuggingFace account. @@ -22198,6 +22477,76 @@ export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceI task_type: InferenceTaskTypeAlibabaCloudAI } +export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAmazonBedrock +} + +export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAnthropic +} + +export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAzureAIStudio +} + +export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAzureOpenAI +} + +export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeCohere +} + +export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeELSER +} + +export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeElasticsearch +} + +export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeGoogleAIStudio +} + +export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeGoogleVertexAI +} + +export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeHuggingFace +} + export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -22205,6 +22554,34 @@ export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInference task_type: InferenceTaskTypeJinaAi } +export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeMistral +} + +export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeOpenAI +} + +export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeVoyageAI +} + +export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeWatsonx +} + export interface InferenceInferenceResult { text_embedding_bytes?: InferenceTextEmbeddingByteResult[] text_embedding_bits?: InferenceTextEmbeddingByteResult[] @@ -22389,8 +22766,36 @@ export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding' +export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion' + +export type InferenceTaskTypeAnthropic = 'completion' + +export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion' + +export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' + +export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' + +export type InferenceTaskTypeELSER = 'sparse_embedding' + +export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank' + +export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion' + +export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank' + +export type InferenceTaskTypeHuggingFace = 'text_embedding' + export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' +export type InferenceTaskTypeMistral = 'text_embedding' + +export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion' + +export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank' + +export type InferenceTaskTypeWatsonx = 'text_embedding' + export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector } @@ -22637,7 +23042,7 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo +export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock export interface InferencePutAnthropicRequest extends RequestBase { /** The task type. @@ -22660,7 +23065,7 @@ export interface InferencePutAnthropicRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfo +export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic export interface InferencePutAzureaistudioRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22682,7 +23087,7 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfo +export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio export interface InferencePutAzureopenaiRequest extends RequestBase { /** The type of the inference task that the model will perform. @@ -22705,7 +23110,7 @@ export interface InferencePutAzureopenaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfo +export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI export interface InferencePutCohereRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22728,7 +23133,7 @@ export interface InferencePutCohereRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutCohereResponse = InferenceInferenceEndpointInfo +export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere export interface InferencePutElasticsearchRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22751,7 +23156,7 @@ export interface InferencePutElasticsearchRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfo +export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch export interface InferencePutElserRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22770,7 +23175,7 @@ export interface InferencePutElserRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutElserResponse = InferenceInferenceEndpointInfo +export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER export interface InferencePutGoogleaistudioRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22789,7 +23194,7 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfo +export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio export interface InferencePutGooglevertexaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22811,7 +23216,7 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfo +export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI export interface InferencePutHuggingFaceRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22830,7 +23235,7 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfo +export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace export interface InferencePutJinaaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22872,7 +23277,7 @@ export interface InferencePutMistralRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutMistralResponse = InferenceInferenceEndpointInfo +export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral export interface InferencePutOpenaiRequest extends RequestBase { /** The type of the inference task that the model will perform. @@ -22895,7 +23300,7 @@ export interface InferencePutOpenaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo +export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI export interface InferencePutVoyageaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22917,7 +23322,7 @@ export interface InferencePutVoyageaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo +export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI export interface InferencePutWatsonxRequest extends RequestBase { /** The task type. @@ -22935,7 +23340,7 @@ export interface InferencePutWatsonxRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } } -export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo +export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx export interface InferenceRerankRequest extends RequestBase { /** The unique identifier for the inference endpoint. */ @@ -33720,6 +34125,14 @@ export interface SlmSnapshotLifecycle { stats: SlmStatistics } +export interface SlmSnapshotPolicyStats { + policy: string + snapshots_taken: long + snapshots_failed: long + snapshots_deleted: long + snapshot_deletion_failures: long +} + export interface SlmStatistics { retention_deletion_time?: Duration retention_deletion_time_millis?: DurationValue @@ -33830,7 +34243,7 @@ export interface SlmGetStatsResponse { total_snapshot_deletion_failures: long total_snapshots_failed: long total_snapshots_taken: long - policy_stats: string[] + policy_stats: SlmSnapshotPolicyStats[] } export interface SlmGetStatusRequest extends RequestBase { From 4022e4b5b3a4daf1fb92f9aeb7d45426d531afd8 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 10:54:00 -0500 Subject: [PATCH 570/647] Update dependency @types/node to v22.15.30 (#2863) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b8fae2ec8..58e009f76 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.15.29", + "@types/node": "22.15.30", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 2b3716f87525dd645ca0e840796ac7cf60059fe6 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 10:54:57 -0500 Subject: [PATCH 571/647] Update dependency zx to v8.5.5 (#2864) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 58e009f76..5f5a0357f 100644 --- a/package.json +++ b/package.json @@ -88,7 +88,7 @@ "typescript": "5.8.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", - "zx": "8.5.4" + "zx": "8.5.5" }, "dependencies": { "@elastic/transport": "^9.0.1", From 59d8c52914e7dad1c86be1857cbb8fa734caae99 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 11 Jun 2025 10:57:53 -0500 Subject: [PATCH 572/647] Trigger auto-publish of unstable builds (#2869) --- .github/workflows/npm-publish-unstable.yml | 86 ++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 .github/workflows/npm-publish-unstable.yml diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml new file mode 100644 index 000000000..848fe70f5 --- /dev/null +++ b/.github/workflows/npm-publish-unstable.yml @@ -0,0 +1,86 @@ +--- +name: Publish unstable builds to npm +on: + push: + branches: + - main + +# kill in-progress action if another one is triggered +concurrency: + group: publish-unstable + cancel-in-progress: true + +jobs: + # don't publish if source code has not changed + paths-filter: + name: Detect files changed + runs-on: ubuntu-latest + outputs: + src-only: "${{ steps.changes.outputs.src-only }}" + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + persist-credentials: false + - uses: dorny/paths-filter/@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: changes + with: + filters: | + src: + - 'src/**' + - 'package.json' + - 'tsconfig.json' + - 'index.d.ts' + - 'index.js' + + # pause for 30 minutes to avoid publishing more than 2x per hour + debounce: + name: Publish max 2x per hour + if: steps.changes.outputs.src == 'true' + runs-on: ubuntu-latest + steps: + - name: Debounce 30 minutes + uses: zachary95/github-actions-debounce + with: + wait: 1800 + + # run tests prior to publish to ensure some stability + test: + name: Run tests + needs: debounce + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + persist-credentials: false + ref: main + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + with: + node-version: "22.x" + registry-url: "https://registry.npmjs.org/" + - run: npm install -g npm + - run: npm install + - run: npm test + + # if tests pass, publish unstable + publish: + name: Publish unstable + needs: test + runs-on: ubuntu-latest + steps: + - name: npm publish + run: | + # set unstable version value + unstable_tag=$(echo "unstable.$(date --utc +%Y%m%d%H%M%S)") + latest=$(npm view @elastic/elasticsearch --json | jq -r '.["dist-tags"].latest') + next=$(yes | npx semver -i minor "$latest") + unstable_version=$(echo "$next-$unstable_tag") + + # overwrite package.json with unstable version value + mv package.json package.json.bak + jq --arg v "$unstable_version" ".version = $v" package.json.bak > package.json + rm package.json.bak + + # publish to npm + npm publish --provenance --access public --tag "unstable" + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} From a78c3b1ca2d8b1c3f30e814a4c56c5408a614c25 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 11 Jun 2025 14:53:36 -0500 Subject: [PATCH 573/647] Fix debounce step of unstable publish action (#2870) --- .github/workflows/npm-publish-unstable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 848fe70f5..048eebec3 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -39,7 +39,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Debounce 30 minutes - uses: zachary95/github-actions-debounce + uses: zachary95/github-actions-debounce@v0.1.0 with: wait: 1800 From 38e77afb295139e9f47098346b272e792f3d0fab Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 11 Jun 2025 14:59:05 -0500 Subject: [PATCH 574/647] Fix invalid conditional in workflow (#2871) --- .github/workflows/npm-publish-unstable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 048eebec3..5819466d8 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -16,7 +16,7 @@ jobs: name: Detect files changed runs-on: ubuntu-latest outputs: - src-only: "${{ steps.changes.outputs.src-only }}" + src: "${{ steps.changes.outputs.src }}" steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: From 7672e4c2ec21d8944683b1894a0c0f96f8abaa8b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 11 Jun 2025 15:09:09 -0500 Subject: [PATCH 575/647] Fix broken path filter condition (#2872) --- .github/workflows/npm-publish-unstable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 5819466d8..dfc08e4c1 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -35,7 +35,7 @@ jobs: # pause for 30 minutes to avoid publishing more than 2x per hour debounce: name: Publish max 2x per hour - if: steps.changes.outputs.src == 'true' + if: needs.paths-filter.outputs.src == 'true' runs-on: ubuntu-latest steps: - name: Debounce 30 minutes From 5860538a358fd28088829930009ee2bd4578aa6c Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 23 Jun 2025 18:23:44 +0100 Subject: [PATCH 576/647] Auto-generated API code (#2877) --- docs/reference/api-reference.md | 333 ++++++++++---------------------- src/api/api/bulk.ts | 2 +- src/api/api/cluster.ts | 2 +- src/api/api/esql.ts | 9 +- src/api/api/indices.ts | 4 +- src/api/api/inference.ts | 13 +- src/api/api/reindex.ts | 2 +- src/api/api/snapshot.ts | 1 + src/api/api/termvectors.ts | 2 +- src/api/api/watcher.ts | 2 +- src/api/types.ts | 320 +++++++++++++++++++----------- 11 files changed, 329 insertions(+), 361 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 616865f62..ada1c8d88 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -121,6 +121,9 @@ Imagine a `_bulk?refresh=wait_for` request with three documents in it that happe The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. +You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests. +Refer to the linked documentation for step-by-step instructions using the index settings API. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) ```ts @@ -517,7 +520,7 @@ client.deleteByQueryRethrottle({ task_id }) #### Request (object) [_request_delete_by_query_rethrottle] -- **`task_id` (string \| number)**: The ID for the task. +- **`task_id` (string)**: The ID for the task. - **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. ## client.deleteScript [_delete_script] @@ -744,7 +747,7 @@ client.get({ id, index }) - **`id` (string)**: A unique document identifier. - **`index` (string)**: The name of the index that contains the document. -- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. - **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). @@ -752,7 +755,7 @@ client.get({ id, index }) - **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. @@ -1242,7 +1245,7 @@ client.openPointInTime({ index, keep_alive }) - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. - **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. @@ -1339,147 +1342,7 @@ In this case, the response includes a count of the version conflicts that were e Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. -NOTE: The reindex API makes no effort to handle ID collisions. -The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. -Instead, make sure that IDs are unique by using a script. - -**Running reindex asynchronously** - -If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. -Elasticsearch creates a record of this task as a document at `_tasks/`. - -**Reindex from multiple sources** - -If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. -That way you can resume the process if there are any errors by removing the partially completed source and starting over. -It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. - -For example, you can use a bash script like this: - -``` -for index in i1 i2 i3 i4 i5; do - curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ - "source": { - "index": "'$index'" - }, - "dest": { - "index": "'$index'-reindexed" - } - }' -done -``` - -**Throttling** - -Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. -Requests are throttled by padding each batch with a wait time. -To turn off throttling, set `requests_per_second` to `-1`. - -The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. -The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. -By default the batch size is `1000`, so if `requests_per_second` is set to `500`: - -``` -target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -``` - -Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. -This is "bursty" instead of "smooth". - -**Slicing** - -Reindex supports sliced scroll to parallelize the reindexing process. -This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. - -NOTE: Reindexing from remote clusters does not support manual or automatic slicing. - -You can slice a reindex request manually by providing a slice ID and total number of slices to each request. -You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. -The `slices` parameter specifies the number of slices to use. - -Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: - -* You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. -* Fetching the status of the task for the request with `slices` only contains the status of completed slices. -* These sub-requests are individually addressable for things like cancellation and rethrottling. -* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. -* Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. -* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. -* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. - -If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. -If slicing manually or otherwise tuning automatic slicing, use the following guidelines. - -Query performance is most efficient when the number of slices is equal to the number of shards in the index. -If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. -Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. - -Indexing performance scales linearly across available resources with the number of slices. - -Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. - -**Modify documents during reindexing** - -Like `_update_by_query`, reindex operations support a script that modifies the document. -Unlike `_update_by_query`, the script is allowed to modify the document's metadata. - -Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. -For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. -Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. -The deletion will be reported in the `deleted` counter in the response body. -Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. - -Think of the possibilities! Just be careful; you are able to change: - -* `_id` -* `_index` -* `_version` -* `_routing` - -Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. -It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. - -**Reindex from remote** - -Reindex supports reindexing from a remote Elasticsearch cluster. -The `host` parameter must contain a scheme, host, port, and optional path. -The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. -Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. -There are a range of settings available to configure the behavior of the HTTPS connection. - -When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. -Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. -It can be set to a comma delimited list of allowed remote host and port combinations. -Scheme is ignored; only the host and port are used. -For example: - -``` -reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] -``` - -The list of allowed hosts must be configured on any nodes that will coordinate the reindex. -This feature should work with remote clusters of any version of Elasticsearch. -This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. - -WARNING: Elasticsearch does not support forward compatibility across major versions. -For example, you cannot reindex from a 7.x cluster into a 6.x cluster. - -To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. - -NOTE: Reindexing from remote clusters does not support manual or automatic slicing. - -Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. -If the remote index includes very large documents you'll need to use a smaller batch size. -It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. -Both default to 30 seconds. - -**Configuring SSL parameters** - -Reindex from remote supports configurable SSL settings. -These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. -It is not possible to configure SSL in the body of the reindex request. +Refer to the linked documentation for examples of how to reindex documents. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) @@ -1657,7 +1520,7 @@ client.search({ ... }) - **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. - **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. - **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. -- **`_source` (Optional, boolean \| { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. - **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. - **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. - **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. @@ -1886,7 +1749,7 @@ client.searchShards({ ... }) - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. @@ -1913,7 +1776,7 @@ client.searchTemplate({ ... }) - **`source` (Optional, string \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. @@ -1992,6 +1855,7 @@ The information is only retrieved for the shard the requested document resides i The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. +Refer to the linked documentation for detailed examples of how to use this API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors) @@ -2056,7 +1920,7 @@ client.update({ id, index }) - **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. - **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. - **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. -- **`_source` (Optional, boolean \| { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. - **`upsert` (Optional, object)**: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. - **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. @@ -2177,7 +2041,7 @@ client.updateByQuery({ index }) - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`from` (Optional, number)**: Skips the specified number of documents. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. @@ -2335,7 +2199,7 @@ than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - **`slice` (Optional, { field, id, max })** - **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** -- **`_source` (Optional, boolean \| { excludes, includes })**: Indicates which source fields are returned for matching documents. These +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. @@ -3537,6 +3401,7 @@ Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. +Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) @@ -3640,6 +3505,7 @@ If no response is received before the timeout expires, the request fails and ret ## client.cluster.getSettings [_cluster.get_settings] Get cluster-wide settings. + By default, it returns only settings that have been explicitly defined. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings) @@ -3849,8 +3715,8 @@ client.cluster.putSettings({ ... }) ### Arguments [_arguments_cluster.put_settings] #### Request (object) [_request_cluster.put_settings] -- **`persistent` (Optional, Record)** -- **`transient` (Optional, Record)** +- **`persistent` (Optional, Record)**: The settings that persist after the cluster restarts. +- **`transient` (Optional, Record)**: The settings that do not persist after the cluster restarts. - **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) - **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node - **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout @@ -4777,6 +4643,12 @@ count. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. +The default period is five days. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. +- **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. +If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. - **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. If `false`, the query will fail if there are any failures. @@ -4786,12 +4658,6 @@ It is valid only for the CSV format. - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. - **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. -- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. -The default period is five days. -When this period expires, the query and its results are deleted, even if the query is still ongoing. -If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. -- **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. -If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. ## client.esql.asyncQueryDelete [_esql.async_query_delete] Delete an async ES|QL query. @@ -5075,7 +4941,7 @@ than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - **`slice` (Optional, { field, id, max })** - **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** -- **`_source` (Optional, boolean \| { excludes, includes })**: Indicates which source fields are returned for matching documents. These +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. @@ -5492,7 +5358,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. - **`fields` (Optional, string \| string[])**: List of field names used to limit the `fielddata` parameter. @@ -5602,7 +5467,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -5753,7 +5617,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -5949,7 +5812,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. @@ -5977,7 +5839,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6100,7 +5961,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. @@ -6232,7 +6092,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6256,7 +6115,6 @@ Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6312,7 +6170,6 @@ Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.getDataStreamSettings [_indices.get_data_stream_settings] @@ -6360,7 +6217,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. @@ -6404,7 +6260,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. @@ -6587,7 +6442,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6682,7 +6536,6 @@ When empty, every document in this data stream will be stored indefinitely. that's disabled (enabled: `false`) will have no effect on the data stream. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `hidden`, `open`, `closed`, `none`. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6708,7 +6561,6 @@ To target all data streams use `*` or `_all`. - **`failure_store` (Optional, { enabled, lifecycle })**: If defined, it will update the failure store configuration of every data stream resolved by the name expression. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `hidden`, `open`, `closed`, `none`. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6819,33 +6671,17 @@ If no response is received before the timeout expires, the request fails and ret ## client.indices.putMapping [_indices.put_mapping] Update field mappings. Add new fields to an existing data stream or index. -You can also use this API to change the search settings of existing fields and add new properties to existing object fields. -For data streams, these changes are applied to all backing indices by default. - -**Add multi-fields to an existing field** - -Multi-fields let you index the same field in different ways. -You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. -WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. -You can populate the new multi-field with the update by query API. - -**Change supported mapping parameters for an existing field** - -The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. -For example, you can use the update mapping API to update the `ignore_above` parameter. - -**Change the mapping of an existing field** - -Except for supported mapping parameters, you can't change the mapping or field type of an existing field. -Changing an existing field could invalidate data that's already indexed. - -If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. -If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. +You can use the update mapping API to: -**Rename a field** +- Add a new field to an existing index +- Update mappings for multiple indices in a single request +- Add new properties to an object field +- Enable multi-fields for an existing field +- Update supported mapping parameters +- Change a field's mapping using reindexing +- Rename a field using a field alias -Renaming a field would invalidate data already indexed under the old field name. -Instead, add an alias field to create an alternate field name. +Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs//manage-data/data-store/mapping/update-mappings-examples) guide. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) @@ -6881,7 +6717,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6898,7 +6733,9 @@ To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. - There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: +For performance optimization during bulk indexing, you can disable the refresh interval. +Refer to [disable refresh interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) for an example. +There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { @@ -6942,6 +6779,7 @@ Then roll over the data stream to apply the new analyzer to the stream's write i This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. +Refer to [updating analyzers on existing indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) for step-by-step examples. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) @@ -7101,7 +6939,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. ## client.indices.reloadSearchAnalyzers [_indices.reload_search_analyzers] @@ -7205,7 +7042,6 @@ options to the `_resolve/cluster` API endpoint that takes no index expression. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. - **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. @@ -7242,7 +7078,6 @@ Resources on remote clusters can be specified using the ``:`` syn - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. @@ -7344,7 +7179,6 @@ This behavior applies even if the request targets other open indices. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. ## client.indices.shardStores [_indices.shard_stores] @@ -7636,7 +7470,6 @@ This parameter can only be used when the `q` query string parameter is specified - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. @@ -7652,7 +7485,7 @@ It only works with the `chat_completion` task type for `openai` and `elastic` in NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. -If you use the `openai` service or the `elastic` service, use the Chat completion inference API. +If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference) @@ -7758,6 +7591,24 @@ IMPORTANT: The inference APIs enable you to use certain services, such as built- For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +The following integrations are available through the inference API. You can find the available task types next to the integration name: +* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) +* Amazon Bedrock (`completion`, `text_embedding`) +* Anthropic (`completion`) +* Azure AI Studio (`completion`, `text_embedding`) +* Azure OpenAI (`completion`, `text_embedding`) +* Cohere (`completion`, `rerank`, `text_embedding`) +* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) +* ELSER (`sparse_embedding`) +* Google AI Studio (`completion`, `text_embedding`) +* Google Vertex AI (`rerank`, `text_embedding`) +* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) +* Mistral (`chat_completion`, `completion`, `text_embedding`) +* OpenAI (`chat_completion`, `completion`, `text_embedding`) +* VoyageAI (`text_embedding`, `rerank`) +* Watsonx inference integration (`text_embedding`) +* JinaAI (`text_embedding`, `rerank`) + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) ```ts @@ -7768,7 +7619,7 @@ client.inference.put({ inference_id }) #### Request (object) [_request_inference.put] - **`inference_id` (string)**: The inference Id -- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type. Refer to the integration list in the API description for the available task types. - **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** ## client.inference.putAlibabacloud [_inference.put_alibabacloud] @@ -7796,7 +7647,7 @@ These settings are specific to the task type you specified. ## client.inference.putAmazonbedrock [_inference.put_amazonbedrock] Create an Amazon Bedrock inference endpoint. -Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. +Create an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. @@ -8032,12 +7883,15 @@ These settings are specific to the task type you specified. Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. +Supported tasks include: `text_embedding`, `completion`, and `chat_completion`. -You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. -Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. -Create the endpoint and copy the URL after the endpoint initialization has been finished. +To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. +Select a model that supports the task you intend to use. -The following models are recommended for the Hugging Face service: +For Elastic's `text_embedding` task: +The selected model must support the `Sentence Embeddings` task. On the new endpoint creation page, select the `Sentence Embeddings` task under the `Advanced Configuration` section. +After the endpoint has initialized, copy the generated endpoint URL. +Recommended models for `text_embedding` task: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` @@ -8047,6 +7901,24 @@ The following models are recommended for the Hugging Face service: * `multilingual-e5-base` * `multilingual-e5-small` +For Elastic's `chat_completion` and `completion` tasks: +The selected model must support the `Text Generation` task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for `Text Generation`. When creating dedicated endpoint select the `Text Generation` task. +After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes `/v1/chat/completions` part in URL. Then, copy the full endpoint URL for use. +Recommended models for `chat_completion` and `completion` tasks: + +* `Mistral-7B-Instruct-v0.2` +* `QwQ-32B` +* `Phi-3-mini-128k-instruct` + +For Elastic's `rerank` task: +The selected model must support the `sentence-ranking` task and expose OpenAI API. +HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` so far. +After the endpoint is initialized, copy the full endpoint URL for use. +Tested models for `rerank` task: + +* `bge-reranker-base` +* `jina-reranker-v1-turbo-en-GGUF` + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face) ```ts @@ -8056,11 +7928,13 @@ client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, ### Arguments [_arguments_inference.put_hugging_face] #### Request (object) [_request_inference.put_hugging_face] -- **`task_type` (Enum("text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("chat_completion" \| "completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`huggingface_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. -- **`service_settings` ({ api_key, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. +- **`service_settings` ({ api_key, rate_limit, url, model_id })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. ## client.inference.putJinaai [_inference.put_jinaai] Create an JinaAI inference endpoint. @@ -8090,7 +7964,7 @@ These settings are specific to the task type you specified. ## client.inference.putMistral [_inference.put_mistral] Create a Mistral inference endpoint. -Creates an inference endpoint to perform an inference task with the `mistral` service. +Create an inference endpoint to perform an inference task with the `mistral` service. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral) @@ -8101,8 +7975,7 @@ client.inference.putMistral({ task_type, mistral_inference_id, service, service_ ### Arguments [_arguments_inference.put_mistral] #### Request (object) [_request_inference.put_mistral] -- **`task_type` (Enum("text_embedding"))**: The task type. -The only valid task type for the model to perform is `text_embedding`. +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. - **`mistral_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. - **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. @@ -8171,8 +8044,7 @@ client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_ ### Arguments [_arguments_inference.put_watsonx] #### Request (object) [_request_inference.put_watsonx] -- **`task_type` (Enum("text_embedding"))**: The task type. -The only valid task type for the model to perform is `text_embedding`. +- **`task_type` (Enum("text_embedding" \| "chat_completion" \| "completion"))**: The type of the inference task that the model will perform. - **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. - **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. @@ -10093,13 +9965,7 @@ client.ml.putJob({ job_id, analysis_config, data_description }) - **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: - -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +whether wildcard expressions match hidden data streams. Supports a list of values. - **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. @@ -10558,13 +10424,7 @@ The maximum value is the value of `index.max_result_window`. - **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: - -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +whether wildcard expressions match hidden data streams. Supports a list of values. - **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. - **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. @@ -13817,6 +13677,8 @@ Note that the wildcard pattern `*` matches all snapshots created by an SLM polic To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. - **`sort` (Optional, Enum("start_time" \| "duration" \| "name" \| "index_count" \| "repository" \| "shard_count" \| "failed_shard_count"))**: The sort order for the result. The default behavior is sorting by snapshot start time stamp. +- **`state` (Optional, Enum("IN_PROGRESS" \| "SUCCESS" \| "FAILED" \| "PARTIAL" \| "INCOMPATIBLE") \| Enum("IN_PROGRESS" \| "SUCCESS" \| "FAILED" \| "PARTIAL" \| "INCOMPATIBLE")[])**: Only return snapshots with a state found in the given list of snapshot states. +The default is all snapshot states. - **`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. @@ -14527,7 +14389,7 @@ client.tasks.cancel({ ... }) ### Arguments [_arguments_tasks.cancel] #### Request (object) [_request_tasks.cancel] -- **`task_id` (Optional, string \| number)**: The task identifier. +- **`task_id` (Optional, string)**: The task identifier. - **`actions` (Optional, string \| string[])**: A list or wildcard expression of actions that is used to limit the request. - **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. - **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. @@ -15367,6 +15229,7 @@ The reason for this behavior is to prevent overwriting the watch status from a w Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). +To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index c2e31ffdd..eba771724 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -54,7 +54,7 @@ const acceptedParams: Record (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index f1e879d6b..06f615279 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -220,7 +220,7 @@ export default class Cluster { } /** - * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. + * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain | Elasticsearch API documentation} */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 50711262b..29ef532e8 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -46,16 +46,15 @@ export default class Esql { 'query', 'tables', 'include_ccs_metadata', - 'wait_for_completion_timeout' + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion' ], query: [ 'allow_partial_results', 'delimiter', 'drop_null_columns', - 'format', - 'keep_alive', - 'keep_on_completion', - 'wait_for_completion_timeout' + 'format' ] }, 'esql.async_query_delete': { diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index f5cec4c51..2f06377d5 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -3402,7 +3402,7 @@ export default class Indices { } /** - * Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. **Add multi-fields to an existing field** Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. **Change supported mapping parameters for an existing field** The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. **Change the mapping of an existing field** Except for supported mapping parameters, you can't change the mapping or field type of an existing field. Changing an existing field could invalidate data that's already indexed. If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. **Rename a field** Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. + * Update field mappings. Add new fields to an existing data stream or index. You can use the update mapping API to: - Add a new field to an existing index - Update mappings for multiple indices in a single request - Add new properties to an object field - Enable multi-fields for an existing field - Update supported mapping parameters - Change a field's mapping using reindexing - Rename a field using a field alias Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs//manage-data/data-store/mapping/update-mappings-examples) guide. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -3459,7 +3459,7 @@ export default class Indices { } /** - * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. + * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. For performance optimization during bulk indexing, you can disable the refresh interval. Refer to [disable refresh interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) for an example. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. Refer to [updating analyzers on existing indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) for step-by-step examples. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 4cbdd53dc..f50074f85 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -237,7 +237,8 @@ export default class Inference { body: [ 'chunking_settings', 'service', - 'service_settings' + 'service_settings', + 'task_settings' ], query: [] }, @@ -364,7 +365,7 @@ export default class Inference { } /** - * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. + * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -643,7 +644,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -756,7 +757,7 @@ export default class Inference { } /** - * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. + * Create an Amazon Bedrock inference endpoint. Create an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} */ async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1278,7 +1279,7 @@ export default class Inference { } /** - * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. Supported tasks include: `text_embedding`, `completion`, and `chat_completion`. To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. Select a model that supports the task you intend to use. For Elastic's `text_embedding` task: The selected model must support the `Sentence Embeddings` task. On the new endpoint creation page, select the `Sentence Embeddings` task under the `Advanced Configuration` section. After the endpoint has initialized, copy the generated endpoint URL. Recommended models for `text_embedding` task: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` For Elastic's `chat_completion` and `completion` tasks: The selected model must support the `Text Generation` task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for `Text Generation`. When creating dedicated endpoint select the `Text Generation` task. After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes `/v1/chat/completions` part in URL. Then, copy the full endpoint URL for use. Recommended models for `chat_completion` and `completion` tasks: * `Mistral-7B-Instruct-v0.2` * `QwQ-32B` * `Phi-3-mini-128k-instruct` For Elastic's `rerank` task: The selected model must support the `sentence-ranking` task and expose OpenAI API. HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` so far. After the endpoint is initialized, copy the full endpoint URL for use. Tested models for `rerank` task: * `bge-reranker-base` * `jina-reranker-v1-turbo-en-GGUF` * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} */ async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1394,7 +1395,7 @@ export default class Inference { } /** - * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. + * Create a Mistral inference endpoint. Create an inference endpoint to perform an inference task with the `mistral` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} */ async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 3f1e31fd9..c5af7c2bb 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -53,7 +53,7 @@ const acceptedParams: Record`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. + * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. Refer to the linked documentation for examples of how to reindex documents. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex | Elasticsearch API documentation} */ export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 72adcede7..f94b58e86 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -129,6 +129,7 @@ export default class Snapshot { 'size', 'slm_policy_filter', 'sort', + 'state', 'verbose' ] }, diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index a2f343d11..7d1378924 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -65,7 +65,7 @@ const acceptedParams: Record warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. + * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. Refer to the linked documentation for detailed examples of how to use this API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors | Elasticsearch API documentation} */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 5aa5712f4..848ab5af5 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -166,7 +166,7 @@ export default class Watcher { } /** - * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 270b9b3df..f1a7b5337 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -812,7 +812,7 @@ export interface GetRequest extends RequestBase { /** The name of the index that contains the document. */ index: IndexName /** Indicates whether the request forces synthetic `_source`. - * Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. + * Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. * Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ force_synthetic_source?: boolean /** The node or shard the operation should be performed on. @@ -844,8 +844,8 @@ export interface GetRequest extends RequestBase { /** A comma-separated list of stored fields to return as part of a hit. * If no fields are specified, no stored fields are included in the response. * If this field is specified, the `_source` parameter defaults to `false`. - * Only leaf fields can be retrieved with the `stored_field` option. - * Object fields can't be returned;if specified, the request fails. */ + * Only leaf fields can be retrieved with the `stored_fields` option. + * Object fields can't be returned; if specified, the request fails. */ stored_fields?: Fields /** The version number for concurrency control. * It must match the current version of the document for the request to succeed. */ @@ -1510,7 +1510,7 @@ export interface OpenPointInTimeRequest extends RequestBase { routing?: Routing /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. * If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. @@ -2971,11 +2971,20 @@ export type SearchSourceConfig = boolean | SearchSourceFilter | Fields export type SearchSourceConfigParam = boolean | Fields export interface SearchSourceFilter { + /** If `true`, vector fields are excluded from the returned source. + * + * This option takes precedence over `includes`: any vector field will + * remain excluded even if it matches an `includes` rule. */ + exclude_vectors?: boolean + /** A list of fields to exclude from the returned source. */ excludes?: Fields - /** @alias excludes */ + /** A list of fields to exclude from the returned source. + * @alias excludes */ exclude?: Fields + /** A list of fields to include in the returned source. */ includes?: Fields - /** @alias includes */ + /** A list of fields to include in the returned source. + * @alias includes */ include?: Fields } @@ -3195,8 +3204,7 @@ export interface SearchShardsRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -3256,8 +3264,7 @@ export interface SearchTemplateRequest extends RequestBase { ccs_minimize_roundtrips?: boolean /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ ignore_throttled?: boolean @@ -3554,8 +3561,7 @@ export interface UpdateByQueryRequest extends RequestBase { df?: string /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * It supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** Skips the specified number of documents. */ from?: long @@ -4657,7 +4663,7 @@ export interface TaskFailure { reason: ErrorCause } -export type TaskId = string | integer +export type TaskId = string export interface TextEmbedding { model_id: string @@ -5069,6 +5075,9 @@ export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ + execution_hint?: AggregationsTDigestExecutionHint } export interface AggregationsBucketAggregationBase { @@ -5924,6 +5933,9 @@ export interface AggregationsMedianAbsoluteDeviationAggregate extends Aggregatio export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ + execution_hint?: AggregationsTDigestExecutionHint } export interface AggregationsMetricAggregationBase { @@ -6456,8 +6468,13 @@ export interface AggregationsSumBucketAggregation extends AggregationsPipelineAg export interface AggregationsTDigest { /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: integer + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ + execution_hint?: AggregationsTDigestExecutionHint } +export type AggregationsTDigestExecutionHint = 'default' | 'high_accuracy' + export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { } @@ -9801,7 +9818,7 @@ export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { /** The tokens representing this query */ - tokens: Partial>[] + tokens: Record | Record[] /** Token pruning configurations */ pruning_config?: QueryDslTokenPruningConfig } @@ -15734,8 +15751,11 @@ export interface ClusterGetSettingsRequest extends RequestBase { } export interface ClusterGetSettingsResponse { + /** The settings that persist after the cluster restarts. */ persistent: Record + /** The settings that do not persist after the cluster restarts. */ transient: Record + /** The default setting values. */ defaults?: Record } @@ -15949,7 +15969,9 @@ export interface ClusterPutSettingsRequest extends RequestBase { master_timeout?: Duration /** Explicit operation timeout */ timeout?: Duration + /** The settings that persist after the cluster restarts. */ persistent?: Record + /** The settings that do not persist after the cluster restarts. */ transient?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { flat_settings?: never, master_timeout?: never, timeout?: never, persistent?: never, transient?: never } @@ -17673,6 +17695,7 @@ export interface EsqlEsqlClusterDetails { indices: string took?: DurationValue _shards?: EsqlEsqlShardInfo + failures?: EsqlEsqlShardFailure[] } export interface EsqlEsqlClusterInfo { @@ -17709,8 +17732,8 @@ export interface EsqlEsqlResult { } export interface EsqlEsqlShardFailure { - shard: Id - index: IndexName + shard: integer + index: IndexName | null node?: NodeId reason: ErrorCause } @@ -17720,7 +17743,6 @@ export interface EsqlEsqlShardInfo { successful?: integer skipped?: integer failed?: integer - failures?: EsqlEsqlShardFailure[] } export interface EsqlTableValuesContainer { @@ -17752,14 +17774,6 @@ export interface EsqlAsyncQueryRequest extends RequestBase { drop_null_columns?: boolean /** A short version of the Accept header, for example `json` or `yaml`. */ format?: EsqlEsqlFormat - /** The period for which the query and its results are stored in the cluster. - * The default period is five days. - * When this period expires, the query and its results are deleted, even if the query is still ongoing. - * If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ - keep_alive?: Duration - /** Indicates whether the query and its results are stored in the cluster. - * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ - keep_on_completion?: boolean /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ @@ -17786,10 +17800,18 @@ export interface EsqlAsyncQueryRequest extends RequestBase { * If the query completes during this period, results are returned * Otherwise, a query ID is returned that can later be used to retrieve the results. */ wait_for_completion_timeout?: Duration + /** The period for which the query and its results are stored in the cluster. + * The default period is five days. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. + * If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ + keep_alive?: Duration + /** Indicates whether the query and its results are stored in the cluster. + * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ + keep_on_completion?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } + body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } + querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } } export type EsqlAsyncQueryResponse = EsqlAsyncEsqlResult @@ -18418,6 +18440,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { step_time_millis?: EpochTime phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution time_since_index_creation?: Duration + skip: boolean } export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { @@ -18910,7 +18933,8 @@ export interface IndicesIndexSettingsKeys { max_shingle_diff?: integer blocks?: IndicesIndexSettingBlocks max_refresh_listeners?: integer - /** Settings to define analyzers, tokenizers, token filters and character filters. */ + /** Settings to define analyzers, tokenizers, token filters and character filters. + * Refer to the linked documentation for step-by-step examples of updating analyzers on existing indices. */ analyze?: IndicesSettingsAnalyze highlight?: IndicesSettingsHighlight max_terms_count?: integer @@ -19484,8 +19508,7 @@ export interface IndicesClearCacheRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, clears the fields cache. * Use the `fields` parameter to clear the cache of specific fields only. */ @@ -19553,8 +19576,7 @@ export interface IndicesCloseRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -19727,8 +19749,7 @@ export interface IndicesDeleteRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -19899,8 +19920,7 @@ export interface IndicesExistsRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, returns settings in flat format. */ flat_settings?: boolean @@ -19929,8 +19949,7 @@ export interface IndicesExistsAliasRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. */ ignore_unavailable?: boolean @@ -20087,8 +20106,7 @@ export interface IndicesFlushRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, the request forces a flush even if there are no changes to commit to the index. */ force?: boolean @@ -20190,8 +20208,7 @@ export interface IndicesGetAliasRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -20217,8 +20234,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase { * To target all data streams, omit this parameter or use `*` or `_all`. */ name: DataStreamNames /** Type of data stream that wildcard patterns can match. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, return all default settings in the response. */ include_defaults?: boolean @@ -20296,8 +20312,7 @@ export interface IndicesGetDataStreamOptionsRequest extends RequestBase { * To target all data streams, omit this parameter or use `*` or `_all`. */ name: DataStreamNames /** Type of data stream that wildcard patterns can match. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -20351,8 +20366,7 @@ export interface IndicesGetFieldMappingRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -20411,8 +20425,7 @@ export interface IndicesGetMappingRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -20600,8 +20613,7 @@ export interface IndicesOpenRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -20685,8 +20697,7 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { * To target all data streams use `*` or `_all`. */ name: DataStreamNames /** Type of data stream that wildcard patterns can match. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an @@ -20718,8 +20729,7 @@ export interface IndicesPutDataStreamOptionsRequest extends RequestBase { * To target all data streams use `*` or `_all`. */ name: DataStreamNames /** Type of data stream that wildcard patterns can match. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an @@ -20874,8 +20884,7 @@ export interface IndicesPutMappingRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -21130,8 +21139,7 @@ export interface IndicesRefreshRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -21189,7 +21197,6 @@ export interface IndicesResolveClusterRequest extends RequestBase { /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index * options to the `_resolve/cluster` API endpoint that takes no index expression. */ expand_wildcards?: ExpandWildcards @@ -21239,8 +21246,7 @@ export interface IndicesResolveIndexRequest extends RequestBase { name: Names /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -21362,8 +21368,7 @@ export interface IndicesSegmentsRequest extends RequestBase { allow_no_indices?: boolean /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -21917,8 +21922,7 @@ export interface IndicesValidateQueryRequest extends RequestBase { df?: string /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. - * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, the response returns detailed information if an error has occurred. */ explain?: boolean @@ -22426,15 +22430,31 @@ export interface InferenceHuggingFaceServiceSettings { * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string /** This setting helps to minimize the number of rate limit errors returned from Hugging Face. - * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000. */ + * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000 for all supported tasks. + * Hugging Face does not publish a universal rate limit — actual limits may vary. + * It is recommended to adjust this value based on the capacity and limits of your specific deployment environment. */ rate_limit?: InferenceRateLimitSetting - /** The URL endpoint to use for the requests. */ + /** The URL endpoint to use for the requests. + * For `completion` and `chat_completion` tasks, the deployed model must be compatible with the Hugging Face Chat Completion interface (see the linked external documentation for details). The endpoint URL for the request must include `/v1/chat/completions`. + * If the model supports the OpenAI Chat Completion schema, a toggle should appear in the interface. Enabling this toggle doesn't change any model behavior, it reveals the full endpoint URL needed (which should include `/v1/chat/completions`) when configuring the inference endpoint in Elasticsearch. If the model doesn't support this schema, the toggle may not be shown. */ url: string + /** The name of the HuggingFace model to use for the inference task. + * For `completion` and `chat_completion` tasks, this field is optional but may be required for certain models — particularly when using serverless inference endpoints. + * For the `text_embedding` task, this field should not be included. Otherwise, the request will fail. */ + model_id?: string } export type InferenceHuggingFaceServiceType = 'hugging_face' -export type InferenceHuggingFaceTaskType = 'text_embedding' +export interface InferenceHuggingFaceTaskSettings { + /** For a `rerank` task, return doc text within the results. */ + return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. */ + top_n?: integer +} + +export type InferenceHuggingFaceTaskType = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding' export interface InferenceInferenceChunkingSettings { /** The maximum size of a chunk in words. @@ -22638,13 +22658,47 @@ export type InferenceJinaAITaskType = 'rerank' | 'text_embedding' export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search' export interface InferenceMessage { - /** The content of the message. */ + /** The content of the message. + * + * String example: + * ``` + * { + * "content": "Some string" + * } + * ``` + * + * Object example: + * ``` + * { + * "content": [ + * { + * "text": "Some text", + * "type": "text" + * } + * ] + * } + * ``` */ content?: InferenceMessageContent - /** The role of the message author. */ + /** The role of the message author. Valid values are `user`, `assistant`, `system`, and `tool`. */ role: string - /** The tool call that this message is responding to. */ + /** Only for `tool` role messages. The tool call that this message is responding to. */ tool_call_id?: Id - /** The tool calls generated by the model. */ + /** Only for `assistant` role messages. The tool calls generated by the model. If it's specified, the `content` field is optional. + * Example: + * ``` + * { + * "tool_calls": [ + * { + * "id": "call_KcAjWtAww20AihPHphUh46Gd", + * "type": "function", + * "function": { + * "name": "get_current_weather", + * "arguments": "{\"location\":\"Boston, MA\"}" + * } + * } + * ] + * } + * ``` */ tool_calls?: InferenceToolCall[] } @@ -22662,7 +22716,7 @@ export interface InferenceMistralServiceSettings { /** The maximum number of tokens per input before chunking occurs. */ max_input_tokens?: integer /** The name of the model to use for the inference task. - * Refer to the Mistral models documentation for the list of available text embedding models. */ + * Refer to the Mistral models documentation for the list of available models. */ model: string /** This setting helps to minimize the number of rate limit errors returned from the Mistral API. * By default, the `mistral` service sets the number of requests allowed per minute to 240. */ @@ -22671,7 +22725,7 @@ export interface InferenceMistralServiceSettings { export type InferenceMistralServiceType = 'mistral' -export type InferenceMistralTaskType = 'text_embedding' +export type InferenceMistralTaskType = 'text_embedding' | 'completion' | 'chat_completion' export interface InferenceOpenAIServiceSettings { /** A valid API key of your OpenAI account. @@ -22719,7 +22773,25 @@ export interface InferenceRankedDocument { } export interface InferenceRateLimitSetting { - /** The number of requests allowed per minute. */ + /** The number of requests allowed per minute. + * By default, the number of requests allowed per minute is set by each service as follows: + * + * * `alibabacloud-ai-search` service: `1000` + * * `anthropic` service: `50` + * * `azureaistudio` service: `240` + * * `azureopenai` service and task type `text_embedding`: `1440` + * * `azureopenai` service and task type `completion`: `120` + * * `cohere` service: `10000` + * * `elastic` service and task type `chat_completion`: `240` + * * `googleaistudio` service: `360` + * * `googlevertexai` service: `30000` + * * `hugging_face` service: `3000` + * * `jinaai` service: `2000` + * * `mistral` service: `240` + * * `openai` service and task type `text_embedding`: `3000` + * * `openai` service and task type `completion`: `500` + * * `voyageai` service: `2000` + * * `watsonxai` service: `120` */ requests_per_minute?: integer } @@ -22736,9 +22808,46 @@ export interface InferenceRequestChatCompletion { stop?: string[] /** The sampling temperature to use. */ temperature?: float - /** Controls which tool is called by the model. */ + /** Controls which tool is called by the model. + * String representation: One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools. + * Example (object representation): + * ``` + * { + * "tool_choice": { + * "type": "function", + * "function": { + * "name": "get_current_weather" + * } + * } + * } + * ``` */ tool_choice?: InferenceCompletionToolType - /** A list of tools that the model can call. */ + /** A list of tools that the model can call. + * Example: + * ``` + * { + * "tools": [ + * { + * "type": "function", + * "function": { + * "name": "get_price_of_item", + * "description": "Get the current price of an item", + * "parameters": { + * "type": "object", + * "properties": { + * "item": { + * "id": "12345" + * }, + * "unit": { + * "type": "currency" + * } + * } + * } + * } + * } + * ] + * } + * ``` */ tools?: InferenceCompletionTool[] /** Nucleus sampling, an alternative to sampling with temperature. */ top_p?: float @@ -22784,17 +22893,17 @@ export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion' export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank' -export type InferenceTaskTypeHuggingFace = 'text_embedding' +export type InferenceTaskTypeHuggingFace = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding' export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' -export type InferenceTaskTypeMistral = 'text_embedding' +export type InferenceTaskTypeMistral = 'text_embedding' | 'chat_completion' | 'completion' export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion' export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank' -export type InferenceTaskTypeWatsonx = 'text_embedding' +export type InferenceTaskTypeWatsonx = 'text_embedding' | 'chat_completion' | 'completion' export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector @@ -22880,7 +22989,8 @@ export interface InferenceWatsonxServiceSettings { * For the active version data parameters, refer to the Wastonx documentation. */ api_version: string /** The name of the model to use for the inference task. - * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. */ + * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. + * Refer to the IBM library - Foundation models in Watsonx.ai. */ model_id: string /** The identifier of the IBM Cloud project to use for the inference task. */ project_id: string @@ -22893,7 +23003,7 @@ export interface InferenceWatsonxServiceSettings { export type InferenceWatsonxServiceType = 'watsonxai' -export type InferenceWatsonxTaskType = 'text_embedding' +export type InferenceWatsonxTaskType = 'text_embedding' | 'chat_completion' | 'completion' export interface InferenceChatCompletionUnifiedRequest extends RequestBase { /** The inference Id */ @@ -22987,7 +23097,7 @@ export interface InferenceInferenceRequest extends RequestBase { export type InferenceInferenceResponse = InferenceInferenceResult export interface InferencePutRequest extends RequestBase { - /** The task type */ + /** The task type. Refer to the integration list in the API description for the available task types. */ task_type?: InferenceTaskType /** The inference Id */ inference_id: Id @@ -23229,10 +23339,13 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { service: InferenceHuggingFaceServiceType /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */ service_settings: InferenceHuggingFaceServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceHuggingFaceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace @@ -23260,8 +23373,7 @@ export interface InferencePutJinaaiRequest extends RequestBase { export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi export interface InferencePutMistralRequest extends RequestBase { - /** The task type. - * The only valid task type for the model to perform is `text_embedding`. */ + /** The type of the inference task that the model will perform. */ task_type: InferenceMistralTaskType /** The unique identifier of the inference endpoint. */ mistral_inference_id: Id @@ -23325,8 +23437,7 @@ export interface InferencePutVoyageaiRequest extends RequestBase { export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI export interface InferencePutWatsonxRequest extends RequestBase { - /** The task type. - * The only valid task type for the model to perform is `text_embedding`. */ + /** The type of the inference task that the model will perform. */ task_type: InferenceWatsonxTaskType /** The unique identifier of the inference endpoint. */ watsonx_inference_id: Id @@ -28707,13 +28818,7 @@ export interface MlPutJobRequest extends RequestBase { * `_all` string or when no indices are specified. */ allow_no_indices?: boolean /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines - * whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: - * - * * `all`: Match any data stream or index, including hidden ones. - * * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. - * * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. - * * `none`: Wildcard patterns are not accepted. - * * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean @@ -29244,13 +29349,7 @@ export interface MlUpdateDatafeedRequest extends RequestBase { * `_all` string or when no indices are specified. */ allow_no_indices?: boolean /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines - * whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: - * - * * `all`: Match any data stream or index, including hidden ones. - * * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. - * * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. - * * `none`: Wildcard patterns are not accepted. - * * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean @@ -34657,6 +34756,8 @@ export interface SnapshotSnapshotShardsStatus { export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' +export type SnapshotSnapshotState = 'IN_PROGRESS' | 'SUCCESS' | 'FAILED' | 'PARTIAL' | 'INCOMPATIBLE' + export interface SnapshotSnapshotStats { /** The number and size of files that still need to be copied as part of the incremental snapshot. * For completed snapshots, this property indicates the number and size of files that were not already in the repository and were copied as part of the incremental snapshot. */ @@ -34956,14 +35057,17 @@ export interface SnapshotGetRequest extends RequestBase { /** The sort order for the result. * The default behavior is sorting by snapshot start time stamp. */ sort?: SnapshotSnapshotSort + /** Only return snapshots with a state found in the given comma-separated list of snapshot states. + * The default is all snapshot states. */ + state?: SnapshotSnapshotState | SnapshotSnapshotState[] /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. * * NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ verbose?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, verbose?: never } + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, state?: never, verbose?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, verbose?: never } + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, state?: never, verbose?: never } } export interface SnapshotGetResponse { @@ -38275,7 +38379,7 @@ export interface SpecUtilsCommonQueryParameters { filter_path?: string | string[] /** When set to `true` will return statistics in a format suitable for humans. * For example `"exists_time": "1h"` for humans and - * `"eixsts_time_in_millis": 3600000` for computers. When disabled the human + * `"exists_time_in_millis": 3600000` for computers. When disabled the human * readable values will be omitted. This makes sense for responses being consumed * only by machines. */ human?: boolean From 8fba18de47e6db8ce17d30a7f1e9e97a22883b91 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 23 Jun 2025 12:27:25 -0500 Subject: [PATCH 577/647] Update dependency @elastic/request-converter to v9.1.1 (#2882) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5f5a0357f..5b4f2bacd 100644 --- a/package.json +++ b/package.json @@ -58,7 +58,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "9.0.1", + "@elastic/request-converter": "9.1.1", "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", From 1f8029fb84a52adba7ae3245a96aba8befcfb0c3 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 23 Jun 2025 12:32:58 -0500 Subject: [PATCH 578/647] Update dependency @types/node to v22.15.32 (#2874) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5b4f2bacd..f0b706ac1 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.15.30", + "@types/node": "22.15.32", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From af71722020c27a9f8fa7b1637e165a6861943bd5 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 23 Jun 2025 12:33:34 -0500 Subject: [PATCH 579/647] Pin zachary95/github-actions-debounce action to ab73634 (#2873) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/npm-publish-unstable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index dfc08e4c1..8c301e26a 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -39,7 +39,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Debounce 30 minutes - uses: zachary95/github-actions-debounce@v0.1.0 + uses: zachary95/github-actions-debounce@ab7363483e2837992b8aa6be891763da00ac14f9 # v0.1.0 with: wait: 1800 From fd9533f18d61b8b15052c42e7b51aa8ac8f207ae Mon Sep 17 00:00:00 2001 From: Jan Calanog Date: Mon, 23 Jun 2025 19:37:54 +0200 Subject: [PATCH 580/647] docs-builder: add `pull-requests: write` permission to docs-build workflow (#2881) --- .github/workflows/docs-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml index bb466166d..adf95da5d 100644 --- a/.github/workflows/docs-build.yml +++ b/.github/workflows/docs-build.yml @@ -16,4 +16,4 @@ jobs: deployments: write id-token: write contents: read - pull-requests: read + pull-requests: write From d6a4aebb879fef294394eaf2e89cf93ce297b55f Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 23 Jun 2025 13:56:57 -0400 Subject: [PATCH 581/647] Combine some jobs in unstable publish workflow (#2883) Workflow is not being triggered as expected --- .github/workflows/npm-publish-unstable.yml | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 8c301e26a..17a593c2f 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -32,23 +32,16 @@ jobs: - 'index.d.ts' - 'index.js' - # pause for 30 minutes to avoid publishing more than 2x per hour - debounce: - name: Publish max 2x per hour + test: + name: Run tests and publish unstable if: needs.paths-filter.outputs.src == 'true' runs-on: ubuntu-latest steps: + # pause for 30 minutes to avoid publishing more than 2x per hour - name: Debounce 30 minutes uses: zachary95/github-actions-debounce@ab7363483e2837992b8aa6be891763da00ac14f9 # v0.1.0 with: wait: 1800 - - # run tests prior to publish to ensure some stability - test: - name: Run tests - needs: debounce - runs-on: ubuntu-latest - steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: persist-credentials: false @@ -57,9 +50,12 @@ jobs: with: node-version: "22.x" registry-url: "https://registry.npmjs.org/" - - run: npm install -g npm - - run: npm install - - run: npm test + - name: Install dependencies + run: | + npm install -g npm + npm install + - name: Run tests + run: npm test # if tests pass, publish unstable publish: From 384898ac5d6ef5952de4d1c67c7e7fed0bf058e4 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 30 Jun 2025 18:46:05 +0200 Subject: [PATCH 582/647] Auto-generated API code (#2885) --- docs/reference/api-reference.md | 73 +++++-------- src/api/api/esql.ts | 1 + src/api/api/indices.ts | 54 ++++++++++ src/api/api/search_mvt.ts | 2 +- src/api/api/security.ts | 4 +- src/api/api/snapshot.ts | 2 +- src/api/api/streams.ts | 179 ++++++++++++++++++++++++++++++++ src/api/api/watcher.ts | 2 +- src/api/index.ts | 8 ++ src/api/types.ts | 42 +++++--- 10 files changed, 299 insertions(+), 68 deletions(-) create mode 100644 src/api/api/streams.ts diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index ada1c8d88..681b7b0d0 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1577,54 +1577,6 @@ Internally, Elasticsearch translates a vector tile search API request into a sea * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. -For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search - -``` -GET my-index/_search -{ - "size": 10000, - "query": { - "geo_bounding_box": { - "my-geo-field": { - "top_left": { - "lat": -40.979898069620134, - "lon": -45 - }, - "bottom_right": { - "lat": -66.51326044311186, - "lon": 0 - } - } - } - }, - "aggregations": { - "grid": { - "geotile_grid": { - "field": "my-geo-field", - "precision": 11, - "size": 65536, - "bounds": { - "top_left": { - "lat": -40.979898069620134, - "lon": -45 - }, - "bottom_right": { - "lat": -66.51326044311186, - "lon": 0 - } - } - } - }, - "bounds": { - "geo_bounds": { - "field": "my-geo-field", - "wrap_longitude": false - } - } - } -} -``` - The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: @@ -1700,6 +1652,8 @@ Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. +Learn how to use the vector tile search API with practical examples in the [Vector tile search examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) guide. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt) ```ts @@ -4701,6 +4655,7 @@ A query ID is provided in the ES|QL async query API response for a query that do A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. - **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. - **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. @@ -6704,7 +6659,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -6972,6 +6927,16 @@ client.indices.reloadSearchAnalyzers({ index }) - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable +## client.indices.removeBlock [_indices.remove_block] +Removes a block from an index. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html) + +```ts +client.indices.removeBlock() +``` + + ## client.indices.resolveCluster [_indices.resolve_cluster] Resolve the cluster. @@ -12662,6 +12627,7 @@ You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. +Refer to the linked documentation for examples of how to find API keys: [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys) @@ -13042,6 +13008,8 @@ The owner user's information, such as the `username` and `realm`, is also update NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. +To learn more about how to use this API, refer to the [Update cross cluter API key API examples page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples). + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key) ```ts @@ -14007,6 +13975,12 @@ If you omit the `` request path parameter, the request retrieves infor This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. +Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster. +Loading the stats from the repository is an expensive operation (see the WARNING below). +Therefore the stats values for such shards will be -1 even though the "stage" value will be "DONE", in order to minimize latency. +A "description" field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid. +Consequently, the total stats for the index will be less than expected due to the missing values from these shards. + WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). @@ -15310,6 +15284,7 @@ When Elasticsearch security features are enabled on your cluster, watches are ru If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. +Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch) diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 29ef532e8..c56bd6838 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -71,6 +71,7 @@ export default class Esql { body: [], query: [ 'drop_null_columns', + 'format', 'keep_alive', 'wait_for_completion_timeout' ] diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 2f06377d5..8a9c806e1 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -733,6 +733,14 @@ export default class Indices { 'resource' ] }, + 'indices.remove_block': { + path: [ + 'index', + 'block' + ], + body: [], + query: [] + }, 'indices.resolve_cluster': { path: [ 'name' @@ -3720,6 +3728,52 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Removes a block from an index. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation} + */ + async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.remove_block'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}/_block/${encodeURIComponent(params.block.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.remove_block', + pathParts: { + index: params.index, + block: params.block + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. ## Note on backwards compatibility The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like `*:*` to bypass the issue. ## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. ## Test availability of remote clusters The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it. You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. The `connected` field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster | Elasticsearch API documentation} diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 1c44a4994..c36f1df0a 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -66,7 +66,7 @@ const acceptedParams: Record`. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. + * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. Learn how to use the vector tile search API with practical examples in the [Vector tile search examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) guide. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt | Elasticsearch API documentation} */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/security.ts b/src/api/api/security.ts index adde7580d..d255fc159 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -3180,7 +3180,7 @@ export default class Security { } /** - * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. Refer to the linked documentation for examples of how to find API keys: * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -3773,7 +3773,7 @@ export default class Security { } /** - * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. + * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. To learn more about how to use this API, refer to the [Update cross cluter API key API examples page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key | Elasticsearch API documentation} */ async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index f94b58e86..b819b1bbe 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -773,7 +773,7 @@ export default class Snapshot { } /** - * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. + * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster. Loading the stats from the repository is an expensive operation (see the WARNING below). Therefore the stats values for such shards will be -1 even though the "stage" value will be "DONE", in order to minimize latency. A "description" field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid. Consequently, the total stats for the index will be less than expected due to the missing values from these shards. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/streams.ts b/src/api/api/streams.ts new file mode 100644 index 000000000..e040401cd --- /dev/null +++ b/src/api/api/streams.ts @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport + acceptedParams: Record +} + +export default class Streams { + transport: Transport + acceptedParams: Record + constructor (transport: Transport) { + this.transport = transport + this.acceptedParams = { + 'streams.logs_disable': { + path: [], + body: [], + query: [] + }, + 'streams.logs_enable': { + path: [], + body: [], + query: [] + }, + 'streams.status': { + path: [], + body: [], + query: [] + } + } + } + + /** + * Disable the Logs Streams feature for this cluster + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-disable.html | Elasticsearch API documentation} + */ + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['streams.logs_disable'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_streams/logs/_disable' + const meta: TransportRequestMetadata = { + name: 'streams.logs_disable' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Enable the Logs Streams feature for this cluster + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-enable.html | Elasticsearch API documentation} + */ + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['streams.logs_enable'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_streams/logs/_enable' + const meta: TransportRequestMetadata = { + name: 'streams.logs_enable' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Return the current status of the streams feature for each streams type + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-status.html | Elasticsearch API documentation} + */ + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['streams.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_streams/status' + const meta: TransportRequestMetadata = { + name: 'streams.status' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 848ab5af5..36fb36fcb 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -354,7 +354,7 @@ export default class Watcher { } /** - * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. + * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/index.ts b/src/api/index.ts index cfa328a82..90a71f688 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -87,6 +87,7 @@ import SlmApi from './api/slm' import SnapshotApi from './api/snapshot' import SqlApi from './api/sql' import SslApi from './api/ssl' +import StreamsApi from './api/streams' import SynonymsApi from './api/synonyms' import TasksApi from './api/tasks' import termsEnumApi from './api/terms_enum' @@ -176,6 +177,7 @@ export default interface API { snapshot: SnapshotApi sql: SqlApi ssl: SslApi + streams: StreamsApi synonyms: SynonymsApi tasks: TasksApi termsEnum: typeof termsEnumApi @@ -224,6 +226,7 @@ const kSlm = Symbol('Slm') const kSnapshot = Symbol('Snapshot') const kSql = Symbol('Sql') const kSsl = Symbol('Ssl') +const kStreams = Symbol('Streams') const kSynonyms = Symbol('Synonyms') const kTasks = Symbol('Tasks') const kTextStructure = Symbol('TextStructure') @@ -267,6 +270,7 @@ export default class API { [kSnapshot]: symbol | null [kSql]: symbol | null [kSsl]: symbol | null + [kStreams]: symbol | null [kSynonyms]: symbol | null [kTasks]: symbol | null [kTextStructure]: symbol | null @@ -309,6 +313,7 @@ export default class API { this[kSnapshot] = null this[kSql] = null this[kSsl] = null + this[kStreams] = null this[kSynonyms] = null this[kTasks] = null this[kTextStructure] = null @@ -470,6 +475,9 @@ Object.defineProperties(API.prototype, { ssl: { get () { return this[kSsl] === null ? (this[kSsl] = new SslApi(this.transport)) : this[kSsl] } }, + streams: { + get () { return this[kStreams] === null ? (this[kStreams] = new StreamsApi(this.transport)) : this[kStreams] } + }, synonyms: { get () { return this[kSynonyms] === null ? (this[kSynonyms] = new SynonymsApi(this.transport)) : this[kSynonyms] } }, diff --git a/src/api/types.ts b/src/api/types.ts index f1a7b5337..c03fd02cd 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4149,7 +4149,7 @@ export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' export interface LinearRetriever extends RetrieverBase { /** Inner retrievers. */ retrievers?: InnerRetriever[] - rank_window_size: integer + rank_window_size?: integer } export type MapboxVectorTiles = ArrayBuffer @@ -4253,7 +4253,7 @@ export interface PinnedRetriever extends RetrieverBase { retriever: RetrieverContainer ids?: string[] docs?: SpecifiedDocument[] - rank_window_size: integer + rank_window_size?: integer } export type PipelineName = string @@ -4678,9 +4678,9 @@ export interface TextSimilarityReranker extends RetrieverBase { /** Unique identifier of the inference endpoint created using the inference API. */ inference_id?: string /** The text snippet used as the basis for similarity comparison */ - inference_text?: string + inference_text: string /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */ - field?: string + field: string } export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' @@ -8377,6 +8377,10 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase type: 'search_as_you_type' } +export interface MappingSemanticTextIndexOptions { + dense_vector?: MappingDenseVectorIndexOptions +} + export interface MappingSemanticTextProperty { type: 'semantic_text' meta?: Record @@ -8388,6 +8392,9 @@ export interface MappingSemanticTextProperty { * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */ search_inference_id?: Id + /** Settings for index_options that override any defaults used by semantic_text, for example + * specific quantization settings. */ + index_options?: MappingSemanticTextIndexOptions /** Settings for chunking text into smaller passages. If specified, these will override the * chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated, * they will not be applied to existing documents until they are reindexed. */ @@ -9729,14 +9736,12 @@ export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { query?: string /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. - * Default: false - * @experimental */ + * Default: false */ prune?: boolean /** Optional pruning configuration. * If enabled, this will omit non-significant tokens from the query in order to improve query performance. * This is only used if prune is set to true. - * If prune is set to true but pruning_config is not specified, default values will be used. - * @experimental */ + * If prune is set to true but pruning_config is not specified, default values will be used. */ pruning_config?: QueryDslTokenPruningConfig } @@ -17837,6 +17842,8 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean + /** A short version of the Accept header, for example `json` or `yaml`. */ + format?: EsqlEsqlFormat /** The period for which the query and its results are stored in the cluster. * When this period expires, the query and its results are deleted, even if the query is still ongoing. */ keep_alive?: Duration @@ -17846,9 +17853,9 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { * Otherwise, the response returns an `is_running` value of `true` and no results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } } export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult @@ -20190,10 +20197,6 @@ export interface IndicesGetRequest extends RequestBase { export type IndicesGetResponse = Record -export interface IndicesGetAliasIndexAliases { - aliases: Record -} - export interface IndicesGetAliasRequest extends RequestBase { /** Comma-separated list of aliases to retrieve. * Supports wildcards (`*`). @@ -20223,6 +20226,17 @@ export interface IndicesGetAliasRequest extends RequestBase { export type IndicesGetAliasResponse = Record +export interface IndicesGetAliasIndexAliases { + aliases: Record +} + +export interface IndicesGetAliasNotFoundAliasesKeys { + error: string + status: number +} +export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys +& { [property: string]: IndicesGetAliasIndexAliases | string | number } + export interface IndicesGetDataLifecycleDataStreamWithLifecycle { name: DataStreamName lifecycle?: IndicesDataStreamLifecycleWithRollover From ac20b78476cbb7c0b926e40b8923ce95699d9818 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 1 Jul 2025 15:58:20 -0500 Subject: [PATCH 583/647] Fix run condition on unstable builds workflow (#2887) --- .github/workflows/npm-publish-unstable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 17a593c2f..2240cd7d2 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -34,7 +34,7 @@ jobs: test: name: Run tests and publish unstable - if: needs.paths-filter.outputs.src == 'true' + if: ${{ needs.paths-filter.outputs.src == 'true' }} runs-on: ubuntu-latest steps: # pause for 30 minutes to avoid publishing more than 2x per hour From 7be0f90526c3c4de51c7880bb8bed0edc3625889 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 1 Jul 2025 16:16:06 -0500 Subject: [PATCH 584/647] Bump to 9.0.3 (#2888) --- docs/release-notes/index.md | 6 ++++++ package.json | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 8582db6cb..be7e11e09 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -20,6 +20,12 @@ To check for security updates, go to [Security announcements for the Elastic sta % ### Fixes [elasticsearch-javascript-client-next-fixes] % \* +## 9.0.3 + +### Fixes [elasticsearch-javascript-client-9.0.3-fixes] + +**Improved compatibility with Elasticsearch 9.0:** Several fixes and improvements have been made to APIs and TypeScript type definitions to better reflect the Elasticsearch 9.0 specification. + ## 9.0.2 ### Fixes [elasticsearch-javascript-client-9.0.2-fixes] diff --git a/package.json b/package.json index f0b706ac1..f1db03ffe 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.2", + "version": "9.0.3", "versionCanary": "9.0.2-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", From 1e6cf8b58eda7ac804dfd2e5498dcb7a2a5c5a22 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 7 Jul 2025 11:31:46 -0500 Subject: [PATCH 585/647] Deep merge options on client instantiation (#2890) --- src/client.ts | 13 +++++-------- test/unit/client.test.ts | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/src/client.ts b/src/client.ts index f758553e2..7e303bd52 100644 --- a/src/client.ts +++ b/src/client.ts @@ -256,11 +256,13 @@ export default class Client extends API { } } - const headers: Record = { + const headers: Record = Object.assign({}, { 'user-agent': `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${nodeVersion}; Transport ${transportVersion})` - } + }, opts.headers ?? {}) if (opts.serverMode === 'serverless') headers['elastic-api-version'] = serverlessApiVersion + const redaction = Object.assign({}, { type: 'replace', additionalKeys: [] }, opts.redaction ?? {}) + const options: Required = Object.assign({}, { Connection: UndiciConnection, Transport: opts.serverMode === 'serverless' ? Transport : SniffingTransport, @@ -277,7 +279,6 @@ export default class Client extends API { tls: null, caFingerprint: null, agent: null, - headers, nodeFilter: null, generateRequestId: null, name: 'elasticsearch-js', @@ -288,12 +289,8 @@ export default class Client extends API { enableMetaHeader: true, maxResponseSize: null, maxCompressedResponseSize: null, - redaction: { - type: 'replace', - additionalKeys: [] - }, serverMode: 'stack' - }, opts) + }, opts, { headers, redaction }) if (options.caFingerprint != null && isHttpConnection(opts.node ?? opts.nodes)) { throw new errors.ConfigurationError('You can\'t configure the caFingerprint with a http connection') diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index fc4016a22..feffc373e 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -98,6 +98,28 @@ test('Custom headers', t => { t.end() }) +test('Custom headers should merge, not overwrite', t => { + const client = new Client({ + node: 'http://localhost:9200/', + headers: { foo: 'bar' } + }) + t.ok(client.transport[symbols.kHeaders]['user-agent']?.startsWith('elasticsearch-js/')) + t.end() +}) + +test('Redaction options should merge, not overwrite', t => { + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + redaction: { + additionalKeys: ['foo'], + } + }) + t.equal(client.transport[symbols.kRedaction].type, 'replace') + t.match(client.transport[symbols.kRedaction].additionalKeys, ['foo']) + t.end() +}) + test('Basic auth', async t => { t.plan(1) From 94b06bc2a8e85942af9d502200c381b323759a37 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 13:44:10 -0500 Subject: [PATCH 586/647] Update dependency zx to v8.6.1 (#2893) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index f1db03ffe..1a701b650 100644 --- a/package.json +++ b/package.json @@ -88,7 +88,7 @@ "typescript": "5.8.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", - "zx": "8.5.5" + "zx": "8.6.1" }, "dependencies": { "@elastic/transport": "^9.0.1", From f5611e723ffc316ece6a7f62e5c04c716008a06e Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 13:47:18 -0500 Subject: [PATCH 587/647] Update dependency @types/node to v22.16.0 (#2892) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 1a701b650..4262d2e32 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.15.32", + "@types/node": "22.16.0", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 1ff79eb2b05f9738e0049cee308a3cb423af2bf5 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 7 Jul 2025 14:51:38 -0400 Subject: [PATCH 588/647] Auto-generated API code (#2896) Co-authored-by: Josh Mock --- docs/reference/api-reference.md | 166 +++++++++++------- src/api/api/indices.ts | 126 +++++++++++++- src/api/api/inference.ts | 126 +++++++++++++- src/api/api/rollup.ts | 2 +- src/api/api/synonyms.ts | 2 +- src/api/api/update.ts | 2 +- src/api/api/update_by_query.ts | 2 +- src/api/types.ts | 292 ++++++++++++++++++++++++++------ 8 files changed, 589 insertions(+), 129 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 681b7b0d0..323a5a73b 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1857,6 +1857,7 @@ The document must still be reindexed, but using this API removes some network ro The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). +For usage examples such as partial updates, upserts, and scripted updates, see the External documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update) @@ -1914,6 +1915,30 @@ A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. +**Refreshing shards** + +Specifying the `refresh` parameter refreshes all shards once the request completes. +This is different to the update API's `refresh` parameter, which causes only the shard +that received the request to be refreshed. Unlike the update API, it does not support +`wait_for`. + +**Running update by query asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch +performs some preflight checks, launches the request, and returns a +[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +**Waiting for active shards** + +`wait_for_active_shards` controls how many copies of a shard must be active +before proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +for details. `timeout` controls how long each write request waits for unavailable +shards to become available. Both work exactly the way they work in the +[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also +specify the `scroll` parameter to control how long it keeps the search context +alive, for example `?scroll=10m`. The default is 5 minutes. + **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. @@ -1958,22 +1983,7 @@ If you're slicing manually or otherwise tuning automatic slicing, keep in mind t * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. - -**Update the document source** - -Update by query supports scripts to update the document source. -As with the update API, you can set `ctx.op` to change the operation that is performed. - -Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. -The update by query operation skips updating the document and increments the `noop` counter. - -Set `ctx.op = "delete"` if your script decides that the document should be deleted. -The update by query operation deletes the document and increments the `deleted` counter. - -Update by query supports only `index`, `noop`, and `delete`. -Setting `ctx.op` to anything else is an error. -Setting any other field in `ctx` is an error. -This API enables you to only modify the source of matching documents; you cannot move them. +Refer to the linked documentation for examples of how to update documents using the `_update_by_query` API: [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) @@ -2795,11 +2805,12 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis") \| Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.repositories [_cat.repositories] Get snapshot repository information. @@ -2846,8 +2857,9 @@ client.cat.segments({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id") \| Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the @@ -2876,12 +2888,12 @@ client.cat.shards({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason") \| Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.snapshots [_cat.snapshots] Get snapshot information. @@ -2904,7 +2916,8 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2985,8 +2998,8 @@ client.cat.threadPool({ ... }) #### Request (object) [_request_cat.thread_pool] - **`thread_pool_patterns` (Optional, string \| string[])**: A list of thread pool names used to limit the request. Accepts wildcard expressions. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type") \| Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. @@ -2994,7 +3007,7 @@ or `:desc` as a suffix to the column name. local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. ## client.cat.transforms [_cat.transforms] Get transform information. @@ -5265,7 +5278,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -- **`filter` (Optional, string \| { type } \| { type } \| { type, preserve_original } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type, delimiter, encoding } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, pattern, replacement } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. +- **`filter` (Optional, string \| { type } \| { type } \| { type } \| { type, preserve_original } \| { type } \| { type } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type } \| { type, delimiter, encoding } \| { type } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, flags, pattern, replacement } \| { type } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. - **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. - **`text` (Optional, string \| string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. @@ -6659,7 +6672,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -6928,14 +6941,38 @@ client.indices.reloadSearchAnalyzers({ index }) - **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable ## client.indices.removeBlock [_indices.remove_block] -Removes a block from an index. +Remove an index block. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html) +Remove an index block from an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block) ```ts -client.indices.removeBlock() +client.indices.removeBlock({ index, block }) ``` +### Arguments [_arguments_indices.remove_block] + +#### Request (object) [_request_indices.remove_block] +- **`index` (string)**: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are removing blocks from. +To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +- **`block` (Enum("metadata" \| "read" \| "read_only" \| "write"))**: The block type to remove from the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. ## client.indices.resolveCluster [_indices.resolve_cluster] Resolve the cluster. @@ -7563,6 +7600,7 @@ The following integrations are available through the inference API. You can find * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) +* DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) @@ -7634,6 +7672,16 @@ client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, servi - **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. +## client.inference.putAmazonsagemaker [_inference.put_amazonsagemaker] +Configure a Amazon SageMaker inference endpoint + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-sagemaker.html) + +```ts +client.inference.putAmazonsagemaker() +``` + + ## client.inference.putAnthropic [_inference.put_anthropic] Create an Anthropic inference endpoint. @@ -7732,6 +7780,27 @@ These settings are specific to the `cohere` service. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. These settings are specific to the task type you specified. +## client.inference.putDeepseek [_inference.put_deepseek] +Create a DeepSeek inference endpoint. + +Create an inference endpoint to perform an inference task with the `deepseek` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek) + +```ts +client.inference.putDeepseek({ task_type, deepseek_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_deepseek] + +#### Request (object) [_request_inference.put_deepseek] +- **`task_type` (Enum("completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`deepseek_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("deepseek"))**: The type of service supported for the specified task type. In this case, `deepseek`. +- **`service_settings` ({ api_key, model_id, url })**: Settings used to install the inference model. +These settings are specific to the `deepseek` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. + ## client.inference.putElasticsearch [_inference.put_elasticsearch] Create an Elasticsearch inference endpoint. @@ -10997,32 +11066,7 @@ The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. -**Searching both historical rollup and non-rollup data** - -The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. -This is done by simply adding the live indices to the URI. For example: - -``` -GET sensor-1,sensor_rollup/_rollup_search -{ - "size": 0, - "aggregations": { - "max_temperature": { - "max": { - "field": "temperature" - } - } - } -} -``` - -The rollup search endpoint does two things when the search runs: - -* The original request is sent to the non-rollup index unaltered. -* A rewritten version of the original request is sent to the rollup index. - -When the two responses are received, the endpoint rewrites the rollup response and merges the two together. -During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. +For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search) @@ -14303,6 +14347,8 @@ If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. +For practical examples of how to create or update a synonyms set, refer to the External documentation. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym) ```ts diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 8a9c806e1..296d93e83 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -419,6 +419,13 @@ export default class Indices { 'verbose' ] }, + 'indices.get_data_stream_mappings': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.get_data_stream_options': { path: [ 'name' @@ -589,6 +596,13 @@ export default class Indices { 'timeout' ] }, + 'indices.put_data_stream_mappings': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.put_data_stream_options': { path: [ 'name' @@ -739,7 +753,13 @@ export default class Indices { 'block' ], body: [], - query: [] + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] }, 'indices.resolve_cluster': { path: [ @@ -2486,6 +2506,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Gets a data stream's mappings + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + */ + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream_mappings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_mappings` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream_mappings', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get data stream options. Get the data stream options configuration of one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} @@ -3248,6 +3313,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Updates a data stream's mappings + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + */ + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.put_data_stream_mappings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_mappings` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_stream_mappings', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Update data stream options. Update the data stream options of the specified data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} @@ -3729,13 +3839,13 @@ export default class Indices { } /** - * Removes a block from an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation} + * Remove an index block. Remove an index block from an index. Index blocks limit the operations allowed on an index by blocking specific operation types. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block | Elasticsearch API documentation} */ - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptions): Promise + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['indices.remove_block'] @@ -3753,11 +3863,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index f50074f85..8f0287319 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -127,6 +127,14 @@ export default class Inference { ], query: [] }, + 'inference.put_amazonsagemaker': { + path: [ + 'task_type', + 'amazonsagemaker_inference_id' + ], + body: [], + query: [] + }, 'inference.put_anthropic': { path: [ 'task_type', @@ -179,6 +187,18 @@ export default class Inference { ], query: [] }, + 'inference.put_deepseek': { + path: [ + 'task_type', + 'deepseek_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, 'inference.put_elasticsearch': { path: [ 'task_type', @@ -644,7 +664,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -814,6 +834,52 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Configure a Amazon SageMaker inference endpoint + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-amazon-sagemaker.html | Elasticsearch API documentation} + */ + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['inference.put_amazonsagemaker'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.amazonsagemaker_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_amazonsagemaker', + pathParts: { + task_type: params.task_type, + amazonsagemaker_inference_id: params.amazonsagemaker_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} @@ -1046,6 +1112,64 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Create a DeepSeek inference endpoint. Create an inference endpoint to perform an inference task with the `deepseek` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek | Elasticsearch API documentation} + */ + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_deepseek'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.deepseek_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_deepseek', + pathParts: { + task_type: params.task_type, + deepseek_inference_id: params.deepseek_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation} diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 3a27e3549..bc6df9264 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -368,7 +368,7 @@ export default class Rollup { } /** - * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 79888212b..d2b3511a5 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -330,7 +330,7 @@ export default class Synonyms { } /** - * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. For practical examples of how to create or update a synonyms set, refer to the External documentation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 264881ce8..982b35272 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -62,7 +62,7 @@ const acceptedParams: Record (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 487bc580a..97097ea65 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -77,7 +77,7 @@ const acceptedParams: Record diff --git a/src/api/types.ts b/src/api/types.ts index c03fd02cd..c5a7bb6b7 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4488,6 +4488,7 @@ export interface SearchStats { suggest_time?: Duration suggest_time_in_millis: DurationValue suggest_total: long + recent_search_load?: double groups?: Record } @@ -4691,6 +4692,15 @@ export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' export type TimeZone = string +export interface TokenPruningConfig { + /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ + tokens_freq_ratio_threshold?: integer + /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ + tokens_weight_threshold?: float + /** Whether to only score pruned tokens, vs only scoring kept tokens. */ + only_score_pruned_tokens?: boolean +} + export interface TopLeftBottomRightGeoBounds { top_left: GeoLocation bottom_right: GeoLocation @@ -6730,6 +6740,10 @@ export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFil type: 'arabic_normalization' } +export interface AnalysisArabicStemTokenFilter extends AnalysisTokenFilterBase { + type: 'arabic_stem' +} + export interface AnalysisArmenianAnalyzer { type: 'armenian' stopwords?: AnalysisStopWords @@ -6757,12 +6771,20 @@ export interface AnalysisBengaliAnalyzer { stem_exclusion?: string[] } +export interface AnalysisBengaliNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'bengali_normalization' +} + export interface AnalysisBrazilianAnalyzer { type: 'brazilian' stopwords?: AnalysisStopWords stopwords_path?: string } +export interface AnalysisBrazilianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'brazilian_stem' +} + export interface AnalysisBulgarianAnalyzer { type: 'bulgarian' stopwords?: AnalysisStopWords @@ -6892,6 +6914,10 @@ export interface AnalysisCzechAnalyzer { stem_exclusion?: string[] } +export interface AnalysisCzechStemTokenFilter extends AnalysisTokenFilterBase { + type: 'czech_stem' +} + export interface AnalysisDanishAnalyzer { type: 'danish' stopwords?: AnalysisStopWords @@ -6923,6 +6949,10 @@ export interface AnalysisDutchAnalyzer { stem_exclusion?: string[] } +export interface AnalysisDutchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'dutch_stem' +} + export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { @@ -6942,7 +6972,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { custom_token_chars?: string max_gram?: integer min_gram?: integer - token_chars?: AnalysisTokenChar[] + token_chars?: string | AnalysisTokenChar[] } export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { @@ -7015,6 +7045,10 @@ export interface AnalysisFrenchAnalyzer { stem_exclusion?: string[] } +export interface AnalysisFrenchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'french_stem' +} + export interface AnalysisGalicianAnalyzer { type: 'galician' stopwords?: AnalysisStopWords @@ -7033,6 +7067,10 @@ export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFil type: 'german_normalization' } +export interface AnalysisGermanStemTokenFilter extends AnalysisTokenFilterBase { + type: 'german_stem' +} + export interface AnalysisGreekAnalyzer { type: 'greek' stopwords?: AnalysisStopWords @@ -7109,16 +7147,16 @@ export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation' alternate?: AnalysisIcuCollationAlternate - case_first?: AnalysisIcuCollationCaseFirst - case_level?: boolean + caseFirst?: AnalysisIcuCollationCaseFirst + caseLevel?: boolean country?: string decomposition?: AnalysisIcuCollationDecomposition - hiragana_quaternary_mode?: boolean + hiraganaQuaternaryMode?: boolean language?: string numeric?: boolean rules?: string strength?: AnalysisIcuCollationStrength - variable_top?: string + variableTop?: string variant?: string } @@ -7457,6 +7495,7 @@ export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBa type: 'pattern_replace' /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */ all?: boolean + flags?: string /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */ pattern: string /** Replacement substring. Defaults to an empty substring (`""`). */ @@ -7480,6 +7519,10 @@ export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFi type: 'persian_normalization' } +export interface AnalysisPersianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'persian_stem' +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -7537,6 +7580,10 @@ export interface AnalysisRussianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisRussianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'russian_stem' +} + export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'scandinavian_folding' } @@ -7733,7 +7780,7 @@ export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicStemTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisBengaliNormalizationTokenFilter | AnalysisBrazilianStemTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisCzechStemTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisDutchStemTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisFrenchStemTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisGermanStemTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPersianStemTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisRussianStemTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export type AnalysisTokenizer = string | AnalysisTokenizerDefinition @@ -7830,6 +7877,7 @@ export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilte export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { type: 'aggregate_metric_double' default_metric: string + ignore_malformed?: boolean metrics: string[] time_series_metric?: MappingTimeSeriesMetricType } @@ -8099,6 +8147,7 @@ export interface MappingFlattenedProperty extends MappingPropertyBase { null_value?: string similarity?: string split_queries_on_whitespace?: boolean + time_series_dimensions?: string[] type: 'flattened' } @@ -8113,6 +8162,8 @@ export interface MappingFloatRangeProperty extends MappingRangePropertyBase { export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' +export type MappingGeoPointMetricType = 'gauge' | 'counter' | 'position' + export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean ignore_z_value?: boolean @@ -8121,6 +8172,7 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { on_script_error?: MappingOnScriptError script?: Script | ScriptSource type: 'geo_point' + time_series_metric?: MappingGeoPointMetricType } export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { @@ -8298,7 +8350,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingRankVectorProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { /** Metadata about the field. */ @@ -8326,6 +8378,14 @@ export interface MappingRankFeaturesProperty extends MappingPropertyBase { type: 'rank_features' } +export type MappingRankVectorElementType = 'byte' | 'float' | 'bit' + +export interface MappingRankVectorProperty extends MappingPropertyBase { + type: 'rank_vectors' + element_type?: MappingRankVectorElementType + dims?: integer +} + export interface MappingRoutingField { required: boolean } @@ -8429,8 +8489,24 @@ export interface MappingSourceField { export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' +export interface MappingSparseVectorIndexOptions { + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false */ + prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. */ + pruning_config?: TokenPruningConfig +} + export interface MappingSparseVectorProperty extends MappingPropertyBase { + store?: boolean type: 'sparse_vector' + /** Additional index options for the sparse vector field that controls the + * token pruning behavior of the sparse vector field. */ + index_options?: MappingSparseVectorIndexOptions } export type MappingSubobjects = boolean | 'true' | 'false' | 'auto' @@ -9742,7 +9818,7 @@ export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { * If enabled, this will omit non-significant tokens from the query in order to improve query performance. * This is only used if prune is set to true. * If prune is set to true but pruning_config is not specified, default values will be used. */ - pruning_config?: QueryDslTokenPruningConfig + pruning_config?: TokenPruningConfig } export interface QueryDslTermQuery extends QueryDslQueryBase { @@ -9788,20 +9864,11 @@ export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { model_text: string /** Token pruning configurations * @experimental */ - pruning_config?: QueryDslTokenPruningConfig + pruning_config?: TokenPruningConfig } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' -export interface QueryDslTokenPruningConfig { - /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ - tokens_freq_ratio_threshold?: integer - /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ - tokens_weight_threshold?: float - /** Whether to only score pruned tokens, vs only scoring kept tokens. */ - only_score_pruned_tokens?: boolean -} - export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } @@ -9825,7 +9892,7 @@ export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { /** The tokens representing this query */ tokens: Record | Record[] /** Token pruning configurations */ - pruning_config?: QueryDslTokenPruningConfig + pruning_config?: TokenPruningConfig } export interface QueryDslWildcardQuery extends QueryDslQueryBase { @@ -10215,9 +10282,29 @@ export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'compl export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] +export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'time' | 't' | 'ti' | 'primaryOrReplica' | 'type' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'tnode' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | string + +export type CatCatRecoveryColumns = CatCatRecoveryColumn | CatCatRecoveryColumn[] + export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } +export type CatCatSegmentsColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'ip' | 'segment' | 'generation' | 'docs.count' | 'docs.deleted' | 'size' | 'size.memory' | 'committed' | 'searchable' | 'version' | 'compound' | 'id' | string + +export type CatCatSegmentsColumns = CatCatSegmentsColumn | CatCatSegmentsColumn[] + +export type CatCatShardColumn = 'completion.size' | 'cs' | 'completionSize' | 'dataset.size' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'docs' | 'd' | 'dc' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'id' | 'index' | 'i' | 'idx' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'node' | 'n' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'recoverysource.type' | 'rs' | 'refresh.time' | 'rti' | 'refreshTime' | 'refresh.total' | 'rto' | 'refreshTotal' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'seq_no.global_checkpoint' | 'sqg' | 'globalCheckpoint' | 'seq_no.local_checkpoint' | 'sql' | 'localCheckpoint' | 'seq_no.max' | 'sqm' | 'maxSeqNo' | 'shard' | 's' | 'sh' | 'dsparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'state' | 'st' | 'store' | 'sto' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'sync_id' | 'unassigned.at' | 'ua' | 'unassigned.details' | 'ud' | 'unassigned.for' | 'uf' | 'unassigned.reason' | 'ur' | string + +export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] + +export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r' | string + +export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatNodeColumn[] + +export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string + +export type CatCatThreadPoolColumns = CatCatThreadPoolColumn | CatCatThreadPoolColumn[] + export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] @@ -13543,13 +13630,14 @@ export interface CatRecoveryRequest extends CatCatRequestBase { bytes?: Bytes /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatRecoveryColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } @@ -13601,9 +13689,10 @@ export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices /** The unit used to display byte values. */ bytes?: Bytes - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSegmentsColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names @@ -13768,14 +13857,14 @@ export interface CatShardsRequest extends CatCatRequestBase { /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + h?: CatCatShardColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } @@ -14405,8 +14494,9 @@ export interface CatSnapshotsRequest extends CatCatRequestBase { repository?: Names /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSnapshotsColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -14692,8 +14782,8 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { * Accepts wildcard expressions. */ thread_pool_patterns?: Names /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + h?: CatCatThreadPoolColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names @@ -14704,7 +14794,7 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } @@ -19119,6 +19209,8 @@ export interface IndicesIndexingSlowlogTresholds { index?: IndicesSlowlogTresholdLevels } +export type IndicesIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' + export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' export interface IndicesMappingLimitSettings { @@ -19318,6 +19410,8 @@ export interface IndicesStorage { * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot * of memory maps so you need disable the ability to use memory-mapping. */ allow_mmap?: boolean + /** How often store statistics are refreshed */ + stats_refresh_interval?: Duration } export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string @@ -19363,9 +19457,7 @@ export interface IndicesTranslogRetention { age?: Duration } -export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' - -export interface IndicesAddBlockIndicesBlockStatus { +export interface IndicesAddBlockAddIndicesBlockStatus { name: IndexName blocked: boolean } @@ -19377,7 +19469,7 @@ export interface IndicesAddBlockRequest extends RequestBase { * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ index: IndexName /** The block type to add to the index. */ - block: IndicesAddBlockIndicesBlockOptions + block: IndicesIndicesBlockOptions /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ @@ -19405,7 +19497,7 @@ export interface IndicesAddBlockRequest extends RequestBase { export interface IndicesAddBlockResponse { acknowledged: boolean shards_acknowledged: boolean - indices: IndicesAddBlockIndicesBlockStatus[] + indices: IndicesAddBlockAddIndicesBlockStatus[] } export interface IndicesAnalyzeAnalyzeDetail { @@ -21195,6 +21287,49 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult +export interface IndicesRemoveBlockRemoveIndicesBlockStatus { + name: IndexName + unblocked?: boolean + exception?: ErrorCause +} + +export interface IndicesRemoveBlockRequest extends RequestBase { + /** A comma-separated list or wildcard expression of index names used to limit the request. + * By default, you must explicitly name the indices you are removing blocks from. + * To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ + index: IndexName + /** The block type to remove from the index. */ + block: IndicesIndicesBlockOptions + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * It can also be set to `-1` to indicate that the request should never timeout. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } +} + +export interface IndicesRemoveBlockResponse { + acknowledged: boolean + indices: IndicesRemoveBlockRemoveIndicesBlockStatus[] +} + export interface IndicesResolveClusterRequest extends RequestBase { /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. * Resources on remote clusters can be specified using the ``:`` syntax. @@ -22324,6 +22459,25 @@ export interface InferenceContentObject { type: string } +export interface InferenceDeepSeekServiceSettings { + /** A valid API key for your DeepSeek account. + * You can find or create your DeepSeek API keys on the DeepSeek API key page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** For a `completion` or `chat_completion` task, the name of the model to use for the inference task. + * + * For the available `completion` and `chat_completion` models, refer to the [DeepSeek Models & Pricing docs](https://api-docs.deepseek.com/quick_start/pricing). */ + model_id: string + /** The URL endpoint to use for the requests. Defaults to `https://api.deepseek.com/chat/completions`. */ + url?: string +} + +export type InferenceDeepSeekServiceType = 'deepseek' + export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { pipelines: string[] } @@ -22546,6 +22700,13 @@ export interface InferenceInferenceEndpointInfoCohere extends InferenceInference task_type: InferenceTaskTypeCohere } +export interface InferenceInferenceEndpointInfoDeepSeek extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeDeepSeek +} + export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -22899,6 +23060,8 @@ export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' +export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion' + export type InferenceTaskTypeELSER = 'sparse_embedding' export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank' @@ -23259,6 +23422,26 @@ export interface InferencePutCohereRequest extends RequestBase { export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere +export interface InferencePutDeepseekRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceTaskTypeDeepSeek + /** The unique identifier of the inference endpoint. */ + deepseek_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `deepseek`. */ + service: InferenceDeepSeekServiceType + /** Settings used to install the inference model. + * These settings are specific to the `deepseek` service. */ + service_settings: InferenceDeepSeekServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek + export interface InferencePutElasticsearchRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceElasticsearchTaskType @@ -30537,15 +30720,16 @@ export interface NodesInfoNodeInfo { /** Short hash of the last git commit in this release. */ build_hash: string build_type: string + component_versions: Record /** The node’s host name. */ host: Host http?: NodesInfoNodeInfoHttp + index_version: VersionNumber /** The node’s IP address. */ ip: Ip jvm?: NodesInfoNodeJvmInfo /** The node's name */ name: Name - network?: NodesInfoNodeInfoNetwork os?: NodesInfoNodeOperatingSystemInfo plugins?: PluginStats[] process?: NodesInfoNodeProcessInfo @@ -30559,11 +30743,13 @@ export interface NodesInfoNodeInfo { transport?: NodesInfoNodeInfoTransport /** Host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress + transport_version: VersionNumber /** Elasticsearch version running on this node. */ version: VersionString modules?: PluginStats[] ingest?: NodesInfoNodeInfoIngest aggregations?: Record + remote_cluster_server?: NodesInfoRemoveClusterServer } export interface NodesInfoNodeInfoAction { @@ -30583,7 +30769,7 @@ export interface NodesInfoNodeInfoClient { } export interface NodesInfoNodeInfoDiscoverKeys { - seed_hosts?: string[] + seed_hosts?: string[] | string type?: string seed_providers?: string[] } @@ -30631,17 +30817,6 @@ export interface NodesInfoNodeInfoMemory { total_in_bytes: long } -export interface NodesInfoNodeInfoNetwork { - primary_interface: NodesInfoNodeInfoNetworkInterface - refresh_interval: integer -} - -export interface NodesInfoNodeInfoNetworkInterface { - address: string - mac_address: string - name: Name -} - export interface NodesInfoNodeInfoOSCPU { cache_size: string cache_size_in_bytes: integer @@ -30703,7 +30878,7 @@ export interface NodesInfoNodeInfoSettingsCluster { name: Name routing?: IndicesIndexRouting election: NodesInfoNodeInfoSettingsClusterElection - initial_master_nodes?: string[] + initial_master_nodes?: string[] | string deprecation_indexing?: NodesInfoDeprecationIndexing } @@ -30773,6 +30948,8 @@ export interface NodesInfoNodeInfoSettingsTransport { type: NodesInfoNodeInfoSettingsTransportType | string 'type.default'?: string features?: NodesInfoNodeInfoSettingsTransportFeatures + /** Only used in unit tests */ + ignore_deserialization_errors?: SpecUtilsStringified } export interface NodesInfoNodeInfoSettingsTransportFeatures { @@ -30850,8 +31027,6 @@ export interface NodesInfoNodeJvmInfo { vm_vendor: string vm_version: VersionString using_bundled_jdk: boolean - /** @alias using_bundled_jdk */ - bundled_jdk: boolean using_compressed_ordinary_object_pointers?: boolean | string input_arguments: string[] } @@ -30893,6 +31068,11 @@ export interface NodesInfoNodeThreadPoolInfo { type: string } +export interface NodesInfoRemoveClusterServer { + bound_address: TransportAddress[] + publish_address: TransportAddress +} + export interface NodesInfoRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds From eec75274ee3d7e67e7a91de850ee11ecaec87811 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 9 Jul 2025 11:26:18 -0500 Subject: [PATCH 589/647] Add rules to ensure unstable publishes work (#2901) --- .github/workflows/npm-publish-unstable.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 2240cd7d2..f7d57d7f9 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -34,6 +34,7 @@ jobs: test: name: Run tests and publish unstable + needs: paths-filter if: ${{ needs.paths-filter.outputs.src == 'true' }} runs-on: ubuntu-latest steps: @@ -62,6 +63,9 @@ jobs: name: Publish unstable needs: test runs-on: ubuntu-latest + permissions: + contents: write + id-token: write steps: - name: npm publish run: | From 65b14a70048477488c9e2da20400c25c139aeb6b Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 14 Jul 2025 15:32:09 -0400 Subject: [PATCH 590/647] Auto-generated API code (#2908) --- docs/reference/api-reference.md | 85 ++++++++++++++--- src/api/api/cluster.ts | 2 + src/api/api/get.ts | 1 + src/api/api/get_source.ts | 1 - src/api/api/index.ts | 3 +- src/api/api/indices.ts | 70 ++++++++------ src/api/api/inference.ts | 3 +- src/api/api/ingest.ts | 4 +- src/api/api/license.ts | 2 +- src/api/api/reindex.ts | 1 + src/api/api/search.ts | 1 + src/api/api/search_mvt.ts | 1 + src/api/api/security.ts | 10 +- src/api/api/snapshot.ts | 3 +- src/api/types.ts | 163 ++++++++++++++++++++++++-------- 15 files changed, 255 insertions(+), 95 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 323a5a73b..cdcd3e8c9 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -754,6 +754,7 @@ client.get({ id, index }) - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_exclude_vectors` (Optional, boolean)**: Whether vectors should be excluded from _source - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. @@ -831,7 +832,6 @@ client.getSource({ id, index }) - **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. -- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. @@ -1014,6 +1014,7 @@ client.index({ index }) - **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. - **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). ## client.info [_info] Get cluster info. @@ -1559,6 +1560,7 @@ client.search({ ... }) - **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. - **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. - **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_exclude_vectors` (Optional, boolean)**: Whether vectors should be excluded from _source - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. - **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. @@ -3464,6 +3466,7 @@ client.cluster.getComponentTemplate({ ... }) - **`name` (Optional, string)**: List of component template names used to limit the request. Wildcard (`*`) expressions are supported. - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`settings_filter` (Optional, string \| string[])**: Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys - **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. @@ -3649,6 +3652,7 @@ To unset `_meta`, replace the template without specifying this information. - **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. +- **`cause` (Optional, string)**: User defined reason for create the component template. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -3778,7 +3782,7 @@ client.cluster.state({ ... }) - **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) -- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`master_timeout` (Optional, string \| -1 \| 0)**: Timeout for waiting for new cluster state in case it is blocked - **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version - **`wait_for_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for wait_for_metadata_version before timing out @@ -6119,6 +6123,25 @@ Supports a list of values, such as `open,hidden`. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. +## client.indices.getDataStreamMappings [_indices.get_data_stream_mappings] +Get data stream mappings. + +Get mapping information for one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-mappings) + +```ts +client.indices.getDataStreamMappings({ name }) +``` + +### Arguments [_arguments_indices.get_data_stream_mappings] + +#### Request (object) [_request_indices.get_data_stream_mappings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. Supports wildcards (`*`). +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. + ## client.indices.getDataStreamOptions [_indices.get_data_stream_options] Get data stream options. @@ -6510,6 +6533,33 @@ error. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.putDataStreamMappings [_indices.put_data_stream_mappings] +Update data stream mappings. + +This API can be used to override mappings on specific data streams. These overrides will take precedence over what +is specified in the template that the data stream matches. The mapping change is only applied to new write indices +that are created during rollover after this API is called. No indices are changed by this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-mappings) + +```ts +client.indices.putDataStreamMappings({ name }) +``` + +### Arguments [_arguments_indices.put_data_stream_mappings] + +#### Request (object) [_request_indices.put_data_stream_mappings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** +- **`dry_run` (Optional, boolean)**: If `true`, the request does not actually change the mappings on any data streams. Instead, it +simulates changing the settings and reports back to the user what would have happened had these settings +actually been applied. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + ## client.indices.putDataStreamOptions [_indices.put_data_stream_options] Update data stream options. Update the data stream options of the specified data streams. @@ -7582,6 +7632,16 @@ It can be a single string or an array. - **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The type of inference task that the model performs. - **`query` (Optional, string)**: The query input, which is required only for the `rerank` task. It is not required for other tasks. +- **`input_type` (Optional, string)**: Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: +* `SEARCH` +* `INGEST` +* `CLASSIFICATION` +* `CLUSTERING` +Not all services support all values. Unsupported values will trigger a validation exception. +Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + +> info +> The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. - **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. - **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. @@ -8084,7 +8144,7 @@ client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_ - **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. ## client.inference.rerank [_inference.rerank] -Perform rereanking inference on the service +Perform reranking inference on the service [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) @@ -8292,9 +8352,6 @@ client.ingest.getIpLocationDatabase({ ... }) - **`id` (Optional, string \| string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. -- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -A value of `-1` indicates that the request should never time out. ## client.ingest.getPipeline [_ingest.get_pipeline] Get pipelines. @@ -8553,7 +8610,7 @@ client.license.postStartTrial({ ... }) #### Request (object) [_request_license.post_start_trial] - **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) -- **`type_query_string` (Optional, string)** +- **`type` (Optional, string)**: The type of trial license to generate (default: "trial") - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.logstash.deletePipeline [_logstash.delete_pipeline] @@ -12233,15 +12290,9 @@ To check whether a user has a specific list of privileges, use the has privilege [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges) ```ts -client.security.getUserPrivileges({ ... }) +client.security.getUserPrivileges() ``` -### Arguments [_arguments_security.get_user_privileges] - -#### Request (object) [_request_security.get_user_privileges] -- **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. -- **`priviledge` (Optional, string)**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. -- **`username` (Optional, string \| null)** ## client.security.getUserProfile [_security.get_user_profile] Get a user profile. @@ -12312,6 +12363,10 @@ It is not valid with other grant types. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - **`run_as` (Optional, string)**: The name of the user to be impersonated. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. ## client.security.hasPrivileges [_security.has_privileges] Check user privileges. @@ -13617,6 +13672,8 @@ It also accepts wildcards (`*`). - **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the matching snapshots are all deleted. +If `false`, the request returns a response as soon as the deletes are scheduled. ## client.snapshot.deleteRepository [_snapshot.delete_repository] Delete snapshot repositories. diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 06f615279..c7ca51911 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -84,6 +84,7 @@ export default class Cluster { body: [], query: [ 'flat_settings', + 'settings_filter', 'include_defaults', 'local', 'master_timeout' @@ -155,6 +156,7 @@ export default class Cluster { ], query: [ 'create', + 'cause', 'master_timeout' ] }, diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 6b8c79f55..5150eef02 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -41,6 +41,7 @@ const acceptedParams: Record - async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptions): Promise + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['indices.get_data_stream_mappings'] @@ -2531,11 +2539,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -3314,36 +3322,38 @@ export default class Indices { } /** - * Updates a data stream's mappings - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * Update data stream mappings. This API can be used to override mappings on specific data streams. These overrides will take precedence over what is specified in the template that the data stream matches. The mapping change is only applied to new write indices that are created during rollover after this API is called. No indices are changed by this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-mappings | Elasticsearch API documentation} */ - async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptions): Promise + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this.acceptedParams['indices.put_data_stream_mappings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} - let body: Record | string | undefined - const userBody = params?.body - if (userBody != null) { - if (typeof userBody === 'string') { - body = userBody - } else { - body = { ...userBody } - } - } - - params = params ?? {} + let body: any = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 8f0287319..f8f0df4ff 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -85,6 +85,7 @@ export default class Inference { body: [ 'query', 'input', + 'input_type', 'task_settings' ], query: [ @@ -1751,7 +1752,7 @@ export default class Inference { } /** - * Perform rereanking inference on the service + * Perform reranking inference on the service * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} */ async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index e5bc73147..732135791 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -82,9 +82,7 @@ export default class Ingest { 'id' ], body: [], - query: [ - 'master_timeout' - ] + query: [] }, 'ingest.get_pipeline': { path: [ diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 8a1fb00d1..b9072b867 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -87,7 +87,7 @@ export default class License { body: [], query: [ 'acknowledge', - 'type_query_string', + 'type', 'master_timeout' ] } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index c5af7c2bb..2a6fedd23 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -44,6 +44,7 @@ const acceptedParams: Record = GetGetResult @@ -944,17 +946,15 @@ export interface GetSourceRequest extends RequestBase { _source_excludes?: Fields /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields - /** A comma-separated list of stored fields to return as part of a hit. */ - stored_fields?: Fields /** The version number for concurrency control. * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } } export type GetSourceResponse = TDocument @@ -1206,11 +1206,13 @@ export interface IndexRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards /** If `true`, the destination must be an index alias. */ require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ + require_data_stream?: boolean document?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, document?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, document?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, document?: never } } export type IndexResponse = WriteResponseBase @@ -2077,6 +2079,8 @@ export interface SearchRequest extends RequestBase { * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** Whether vectors should be excluded from _source */ + _source_exclude_vectors?: boolean /** A comma-separated list of source fields to include in the response. * If this parameter is specified, only these source fields are returned. * You can exclude fields from this subset using the `_source_excludes` query parameter. @@ -2202,9 +2206,9 @@ export interface SearchRequest extends RequestBase { * You can retrieve these stats using the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export type SearchResponse> = SearchResponseBody @@ -15810,6 +15814,8 @@ export interface ClusterGetComponentTemplateRequest extends RequestBase { name?: Name /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys */ + settings_filter?: string | string[] /** Return all default configurations for the component template (default: false) */ include_defaults?: boolean /** If `true`, the request retrieves information from the local node only. @@ -15819,9 +15825,9 @@ export interface ClusterGetComponentTemplateRequest extends RequestBase { * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, include_defaults?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, settings_filter?: never, include_defaults?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, include_defaults?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, settings_filter?: never, include_defaults?: never, local?: never, master_timeout?: never } } export interface ClusterGetComponentTemplateResponse { @@ -16032,6 +16038,8 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { name: Name /** If `true`, this request cannot replace or update existing component templates. */ create?: boolean + /** User defined reason for create the component template. */ + cause?: string /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -16050,9 +16058,9 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } } export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase @@ -16242,7 +16250,7 @@ export interface ClusterStateRequest extends RequestBase { ignore_unavailable?: boolean /** Return local information, do not retrieve the state from master node (default: false) */ local?: boolean - /** Specify timeout for connection to master */ + /** Timeout for waiting for new cluster state in case it is blocked */ master_timeout?: Duration /** Wait for the metadata version to be equal or greater than the specified metadata version */ wait_for_metadata_version?: VersionNumber @@ -18802,6 +18810,9 @@ export interface IndicesDataStream { /** The settings specific to this data stream that will take precedence over the settings in the matching index * template. */ settings: IndicesIndexSettings + /** The mappings specific to this data stream that will take precedence over the mappings in the matching index + * template. */ + mappings?: MappingTypeMapping /** Health status of the data stream. * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ status: HealthStatus @@ -20407,6 +20418,33 @@ export interface IndicesGetDataStreamResponse { data_streams: IndicesDataStream[] } +export interface IndicesGetDataStreamMappingsDataStreamMappings { + /** The name of the data stream. */ + name: string + /** The settings specific to this data stream */ + mappings: MappingTypeMapping + /** The settings specific to this data stream merged with the settings from its template. These `effective_settings` + * are the settings that will be used when a new index is created for this data stream. */ + effective_mappings: MappingTypeMapping +} + +export interface IndicesGetDataStreamMappingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). */ + name: Indices + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export interface IndicesGetDataStreamMappingsResponse { + data_streams: IndicesGetDataStreamMappingsDataStreamMappings[] +} + export interface IndicesGetDataStreamOptionsDataStreamWithOptions { name: DataStreamName options?: IndicesDataStreamOptions @@ -20829,6 +20867,46 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase +export interface IndicesPutDataStreamMappingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. */ + name: Indices + /** If `true`, the request does not actually change the mappings on any data streams. Instead, it + * simulates changing the settings and reports back to the user what would have happened had these settings + * actually been applied. */ + dry_run?: boolean + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ + timeout?: Duration + mappings?: MappingTypeMapping + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, mappings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, mappings?: never } +} + +export interface IndicesPutDataStreamMappingsResponse { + data_streams: IndicesPutDataStreamMappingsUpdatedDataStreamMappings[] +} + +export interface IndicesPutDataStreamMappingsUpdatedDataStreamMappings { + /** The data stream name. */ + name: IndexName + /** If the mappings were successfully applied to the data stream (or would have been, if running in `dry_run` + * mode), it is `true`. If an error occurred, it is `false`. */ + applied_to_data_stream: boolean + /** A message explaining why the mappings could not be applied to the data stream. */ + error?: string + /** The mappings that are specfic to this data stream that will override any mappings from the matching index template. */ + mappings?: MappingTypeMapping + /** The mappings that are effective on this data stream, taking into account the mappings from the matching index + * template and the mappings specific to this data stream. */ + effective_mappings?: MappingTypeMapping +} + export interface IndicesPutDataStreamOptionsRequest extends RequestBase { /** Comma-separated list of data streams used to limit the request. * Supports wildcards (`*`). @@ -23262,13 +23340,24 @@ export interface InferenceInferenceRequest extends RequestBase { * > info * > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[] + /** Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: + * * `SEARCH` + * * `INGEST` + * * `CLASSIFICATION` + * * `CLUSTERING` + * Not all services support all values. Unsupported values will trigger a validation exception. + * Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + * + * > info + * > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. */ + input_type?: string /** Task settings for the individual inference request. * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, input_type?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, input_type?: never, task_settings?: never } } export type InferenceInferenceResponse = InferenceInferenceResult @@ -24842,14 +24931,10 @@ export interface IngestGetIpLocationDatabaseRequest extends RequestBase { * Wildcard (`*`) expressions are supported. * To get all database configurations, omit this parameter or use `*`. */ id?: Ids - /** The period to wait for a connection to the master node. - * If no response is received before the timeout expires, the request fails and returns an error. - * A value of `-1` indicates that the request should never time out. */ - master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { id?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { id?: never } } export interface IngestGetIpLocationDatabaseResponse { @@ -25112,13 +25197,14 @@ export interface LicensePostStartBasicResponse { export interface LicensePostStartTrialRequest extends RequestBase { /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean - type_query_string?: string + /** The type of trial license to generate (default: "trial") */ + type?: string /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { acknowledge?: never, type_query_string?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { acknowledge?: never, type?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { acknowledge?: never, type_query_string?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { acknowledge?: never, type?: never, master_timeout?: never } } export interface LicensePostStartTrialResponse { @@ -33160,15 +33246,10 @@ export interface SecurityGetUserRequest extends RequestBase { export type SecurityGetUserResponse = Record export interface SecurityGetUserPrivilegesRequest extends RequestBase { - /** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ - application?: Name - /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ - priviledge?: Name - username?: Name | null /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { application?: never, priviledge?: never, username?: never } + body?: string | { [key: string]: any } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { application?: never, priviledge?: never, username?: never } + querystring?: { [key: string]: any } } export interface SecurityGetUserPrivilegesResponse { @@ -33225,6 +33306,11 @@ export interface SecurityGrantApiKeyGrantApiKey { } export interface SecurityGrantApiKeyRequest extends RequestBase { + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ + refresh?: Refresh /** The API key. */ api_key: SecurityGrantApiKeyGrantApiKey /** The type of grant. Supported grant types are: `access_token`, `password`. */ @@ -33244,9 +33330,9 @@ export interface SecurityGrantApiKeyRequest extends RequestBase { /** The name of the user to be impersonated. */ run_as?: Username /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } + body?: string | { [key: string]: any } & { refresh?: never, api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } + querystring?: { [key: string]: any } & { refresh?: never, api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } } export interface SecurityGrantApiKeyResponse { @@ -35176,10 +35262,13 @@ export interface SnapshotDeleteRequest extends RequestBase { * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** If `true`, the request returns a response when the matching snapshots are all deleted. + * If `false`, the request returns a response as soon as the deletes are scheduled. */ + wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never } } export type SnapshotDeleteResponse = AcknowledgedResponseBase From 48b77e08672099d35ad0f1220d9ff4462ab8703e Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 14 Jul 2025 14:40:59 -0500 Subject: [PATCH 591/647] Update dependency @types/node to v22.16.3 (#2904) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 4262d2e32..646ee5599 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.16.0", + "@types/node": "22.16.3", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 3ddbf7549e0dcdd2f7f255b3a7b3cd1530e4cd76 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 14 Jul 2025 16:43:54 -0500 Subject: [PATCH 592/647] Update dependency @elastic/request-converter to v9.1.2 (#2903) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 646ee5599..c065fcc17 100644 --- a/package.json +++ b/package.json @@ -58,7 +58,7 @@ "node": ">=18" }, "devDependencies": { - "@elastic/request-converter": "9.1.1", + "@elastic/request-converter": "9.1.2", "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", From 72e58c6f47fa2b03453025348e34994f4f16eb92 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 16 Jul 2025 13:34:29 -0500 Subject: [PATCH 593/647] Pin semver to latest minor release (#2910) --- .github/workflows/npm-publish-unstable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index f7d57d7f9..6eecfdcb9 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -72,7 +72,7 @@ jobs: # set unstable version value unstable_tag=$(echo "unstable.$(date --utc +%Y%m%d%H%M%S)") latest=$(npm view @elastic/elasticsearch --json | jq -r '.["dist-tags"].latest') - next=$(yes | npx semver -i minor "$latest") + next=$(npx -y 'semver@^7.7.0' -i minor "$latest") unstable_version=$(echo "$next-$unstable_tag") # overwrite package.json with unstable version value From e31816c654a266b91f0a3cc62f0237bc16ccf907 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 16 Jul 2025 13:38:08 -0500 Subject: [PATCH 594/647] Script redundancy cleanup (#2911) --- .github/workflows/npm-publish-unstable.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 6eecfdcb9..84cd3baf8 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -70,10 +70,10 @@ jobs: - name: npm publish run: | # set unstable version value - unstable_tag=$(echo "unstable.$(date --utc +%Y%m%d%H%M%S)") + unstable_tag="unstable.$(date --utc +%Y%m%d%H%M%S)" latest=$(npm view @elastic/elasticsearch --json | jq -r '.["dist-tags"].latest') next=$(npx -y 'semver@^7.7.0' -i minor "$latest") - unstable_version=$(echo "$next-$unstable_tag") + unstable_version="$next-$unstable_tag" # overwrite package.json with unstable version value mv package.json package.json.bak From 69c857f5cb57e0107d8ecfd86c5b9743f28e8934 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 21 Jul 2025 18:57:41 +0200 Subject: [PATCH 595/647] Auto-generated API code (#2915) --- docs/reference/api-reference.md | 48 +++- src/api/api/cluster.ts | 10 +- src/api/api/create.ts | 3 - src/api/api/eql.ts | 1 + src/api/api/indices.ts | 5 +- src/api/api/inference.ts | 76 ++++-- src/api/types.ts | 428 +++++++++++++++++++++++--------- 7 files changed, 414 insertions(+), 157 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index cdcd3e8c9..3af05ce88 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -306,10 +306,7 @@ client.create({ id, index }) - **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. - **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. - **`document` (Optional, object)**: A document. -- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. -- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. -- **`op_type` (Optional, Enum("index" \| "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. - **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. - **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. @@ -2520,7 +2517,7 @@ client.cat.indices({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. -- **`health` (Optional, Enum("green" \| "yellow" \| "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. +- **`health` (Optional, Enum("green" \| "yellow" \| "red" \| "unknown" \| "unavailable"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. - **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. @@ -2918,7 +2915,7 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. -- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` @@ -3367,6 +3364,7 @@ It can also be set to `-1` to indicate that the request should never timeout. ## client.cluster.allocationExplain [_cluster.allocation_explain] Explain the shard allocations. Get explanations for shard allocations in the cluster. +This API accepts the current_node, index, primary and shard parameters in the request body or in query parameters, but not in both at the same time. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. @@ -3381,10 +3379,10 @@ client.cluster.allocationExplain({ ... }) ### Arguments [_arguments_cluster.allocation_explain] #### Request (object) [_request_cluster.allocation_explain] -- **`current_node` (Optional, string)**: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. -- **`index` (Optional, string)**: Specifies the name of the index that you would like an explanation for. -- **`primary` (Optional, boolean)**: If true, returns explanation for the primary shard for the given shard ID. -- **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. +- **`index` (Optional, string)**: The name of the index that you would like an explanation for. +- **`shard` (Optional, number)**: An identifier for the shard that you would like an explanation for. +- **`primary` (Optional, boolean)**: If true, returns an explanation for the primary shard for the specified shard ID. +- **`current_node` (Optional, string)**: Explain a shard only if it is currently located on the specified node name or node ID. - **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. - **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. @@ -3527,7 +3525,7 @@ client.cluster.health({ ... }) - **`wait_for_nodes` (Optional, string \| number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. +- **`wait_for_status` (Optional, Enum("green" \| "yellow" \| "red" \| "unknown" \| "unavailable"))**: One of green, yellow or red. Will wait (until the timeout provided) until the status of the cluster changes to the one provided or better, i.e. green > yellow > red. By default, will not wait for any status. ## client.cluster.info [_cluster.info] Get cluster info. @@ -4577,8 +4575,9 @@ If false, the sequence query will return successfully, but will always have empt - **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. -- **`allow_no_indices` (Optional, boolean)** -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])** +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. ## client.esql.asyncQuery [_esql.async_query] @@ -6923,6 +6922,12 @@ Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. ## client.indices.refresh [_indices.refresh] Refresh an index. @@ -7684,6 +7689,7 @@ client.inference.put({ inference_id }) - **`inference_id` (string)**: The inference Id - **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type. Refer to the integration list in the API description for the available task types. - **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAlibabacloud [_inference.put_alibabacloud] Create an AlibabaCloud AI Search inference endpoint. @@ -7706,6 +7712,7 @@ client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAmazonbedrock [_inference.put_amazonbedrock] Create an Amazon Bedrock inference endpoint. @@ -7731,6 +7738,7 @@ client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, servi - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAmazonsagemaker [_inference.put_amazonsagemaker] Configure a Amazon SageMaker inference endpoint @@ -7764,6 +7772,7 @@ The only valid task type for the model to perform is `completion`. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAzureaistudio [_inference.put_azureaistudio] Create an Azure AI studio inference endpoint. @@ -7786,6 +7795,7 @@ client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, servi - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAzureopenai [_inference.put_azureopenai] Create an Azure OpenAI inference endpoint. @@ -7816,6 +7826,7 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putCohere [_inference.put_cohere] Create a Cohere inference endpoint. @@ -7839,6 +7850,7 @@ These settings are specific to the `cohere` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putDeepseek [_inference.put_deepseek] Create a DeepSeek inference endpoint. @@ -7860,6 +7872,7 @@ client.inference.putDeepseek({ task_type, deepseek_inference_id, service, servic - **`service_settings` ({ api_key, model_id, url })**: Settings used to install the inference model. These settings are specific to the `deepseek` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putElasticsearch [_inference.put_elasticsearch] Create an Elasticsearch inference endpoint. @@ -7896,6 +7909,7 @@ The must not match the `model_id`. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putElser [_inference.put_elser] Create an ELSER inference endpoint. @@ -7930,6 +7944,7 @@ client.inference.putElser({ task_type, elser_inference_id, service, service_sett - **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. - **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGoogleaistudio [_inference.put_googleaistudio] Create an Google AI Studio inference endpoint. @@ -7950,6 +7965,7 @@ client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, ser - **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGooglevertexai [_inference.put_googlevertexai] Create a Google Vertex AI inference endpoint. @@ -7972,6 +7988,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { auto_truncate, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putHuggingFace [_inference.put_hugging_face] Create a Hugging Face inference endpoint. @@ -8029,6 +8046,7 @@ client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putJinaai [_inference.put_jinaai] Create an JinaAI inference endpoint. @@ -8054,6 +8072,7 @@ client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_se - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putMistral [_inference.put_mistral] Create a Mistral inference endpoint. @@ -8074,6 +8093,7 @@ client.inference.putMistral({ task_type, mistral_inference_id, service, service_ - **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. - **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putOpenai [_inference.put_openai] Create an OpenAI inference endpoint. @@ -8097,6 +8117,7 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putVoyageai [_inference.put_voyageai] Create a VoyageAI inference endpoint. @@ -8121,6 +8142,7 @@ client.inference.putVoyageai({ task_type, voyageai_inference_id, service, servic - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putWatsonx [_inference.put_watsonx] Create a Watsonx inference endpoint. @@ -8142,6 +8164,7 @@ client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_ - **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. - **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.rerank [_inference.rerank] Perform reranking inference on the service @@ -8208,6 +8231,7 @@ It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. - **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. ## client.inference.textEmbedding [_inference.text_embedding] Perform text embedding inference on the service diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index c7ca51911..9ecac681d 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -38,12 +38,16 @@ export default class Cluster { 'cluster.allocation_explain': { path: [], body: [ - 'current_node', 'index', + 'shard', 'primary', - 'shard' + 'current_node' ], query: [ + 'index', + 'shard', + 'primary', + 'current_node', 'include_disk_info', 'include_yes_decisions', 'master_timeout' @@ -222,7 +226,7 @@ export default class Cluster { } /** - * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API. + * Explain the shard allocations. Get explanations for shard allocations in the cluster. This API accepts the current_node, index, primary and shard parameters in the request body or in query parameters, but not in both at the same time. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain | Elasticsearch API documentation} */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/create.ts b/src/api/api/create.ts index 0e904a233..87a8c3701 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -38,10 +38,7 @@ const acceptedParams: Record extends RequestBase { * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. * If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName - /** Only perform the operation if the document has this primary term. */ - if_primary_term?: long - /** Only perform the operation if the document has this sequence number. */ - if_seq_no?: SequenceNumber /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean - /** Set to `create` to only index the document if it does not already exist (put if absent). - * If a document with the specified `_id` already exists, the indexing operation will fail. - * The behavior is the same as using the `/_create` endpoint. - * If a document ID is specified, this paramater defaults to `index`. - * Otherwise, it defaults to `create`. - * If the request targets a data stream, an `op_type` of `create` is required. */ - op_type?: OpType /** The ID of the pipeline to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ @@ -338,9 +327,9 @@ export interface CreateRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards document?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } } export type CreateResponse = WriteResponseBase @@ -1032,7 +1021,7 @@ export interface HealthReportImpact { export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' -export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' +export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' | 'unavailable' export interface HealthReportIndicatorNode { name: string | null @@ -3831,6 +3820,11 @@ export interface DocStats { * This number is based on documents in Lucene segments. * Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ deleted?: long + /** Returns the total size in bytes of all documents in this stats. + * This value may be more reliable than store_stats.size_in_bytes in estimating the index size. */ + total_size_in_bytes: long + /** Human readable total_size_in_bytes */ + total_size?: ByteSize } export type Duration = string | -1 | 0 @@ -3928,6 +3922,7 @@ export interface FielddataStats { memory_size?: ByteSize memory_size_in_bytes: long fields?: Record + global_ordinals: GlobalOrdinalsStats } export type Fields = Field | Field[] @@ -3996,9 +3991,21 @@ export interface GetStats { total: long } +export interface GlobalOrdinalFieldStats { + build_time_in_millis: UnitMillis + build_time?: string + shard_max_value_count: long +} + +export interface GlobalOrdinalsStats { + build_time_in_millis: UnitMillis + build_time?: string + fields?: Record +} + export type GrokPattern = string -export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' +export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' | 'unknown' | 'unavailable' export type Host = string @@ -4154,6 +4161,9 @@ export interface LinearRetriever extends RetrieverBase { /** Inner retrievers. */ retrievers?: InnerRetriever[] rank_window_size?: integer + query?: string + fields?: string[] + normalizer?: ScoreNormalizer } export type MapboxVectorTiles = ArrayBuffer @@ -4309,6 +4319,8 @@ export interface RRFRetriever extends RetrieverBase { rank_constant?: integer /** This value determines the size of the individual result sets per query. */ rank_window_size?: integer + query?: string + fields?: string[] } export interface RankBase { @@ -4520,7 +4532,6 @@ export interface SegmentsStats { fixed_bit_set_memory_in_bytes: long /** Total amount of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory?: ByteSize - index_writer_max_memory_in_bytes?: long /** Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory_in_bytes: long /** Unix timestamp, in milliseconds, of the most recently retried indexing request. */ @@ -4537,15 +4548,16 @@ export interface SegmentsStats { points_memory?: ByteSize /** Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ points_memory_in_bytes: long - stored_memory?: ByteSize /** Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ stored_fields_memory_in_bytes: long + /** Total amount of memory used for stored fields across all shards assigned to selected nodes. */ + stored_fields_memory?: ByteSize /** Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ terms_memory_in_bytes: long /** Total amount of memory used for terms across all shards assigned to selected nodes. */ terms_memory?: ByteSize /** Total amount of memory used for term vectors across all shards assigned to selected nodes. */ - term_vectory_memory?: ByteSize + term_vectors_memory?: ByteSize /** Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ term_vectors_memory_in_bytes: long /** Total amount of memory used by all version maps across all shards assigned to selected nodes. */ @@ -10303,7 +10315,7 @@ export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r' | string -export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatNodeColumn[] +export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatSnapshotsColumn[] export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string @@ -15693,18 +15705,18 @@ export interface ClusterAllocationExplainRequest extends RequestBase { include_yes_decisions?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. */ - current_node?: string - /** Specifies the name of the index that you would like an explanation for. */ + /** The name of the index that you would like an explanation for. */ index?: IndexName - /** If true, returns explanation for the primary shard for the given shard ID. */ - primary?: boolean - /** Specifies the ID of the shard that you would like an explanation for. */ + /** An identifier for the shard that you would like an explanation for. */ shard?: integer + /** If true, returns an explanation for the primary shard for the specified shard ID. */ + primary?: boolean + /** Explain a shard only if it is currently located on the specified node name or node ID. */ + current_node?: NodeId /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, current_node?: never, index?: never, primary?: never, shard?: never } + body?: string | { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, index?: never, shard?: never, primary?: never, current_node?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, current_node?: never, index?: never, primary?: never, shard?: never } + querystring?: { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, index?: never, shard?: never, primary?: never, current_node?: never } } export interface ClusterAllocationExplainReservedSize { @@ -16337,22 +16349,43 @@ export interface ClusterStatsCharFilterTypes { filter_types: ClusterStatsFieldTypes[] /** Contains statistics about tokenizer types used in selected nodes. */ tokenizer_types: ClusterStatsFieldTypes[] + /** Contains statistics about synonyms types used in selected nodes. */ + synonyms: Record } export interface ClusterStatsClusterFileSystem { + path?: string + mount?: string + type?: string /** Total number of bytes available to JVM in file stores across all selected nodes. * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ - available_in_bytes: long + available_in_bytes?: long + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ + available?: ByteSize + /** Total number, in bytes, of unallocated bytes in file stores across all selected nodes. */ + free_in_bytes?: long /** Total number of unallocated bytes in file stores across all selected nodes. */ - free_in_bytes: long + free?: ByteSize /** Total size, in bytes, of all file stores across all selected nodes. */ - total_in_bytes: long + total_in_bytes?: long + /** Total size of all file stores across all selected nodes. */ + total?: ByteSize + low_watermark_free_space?: ByteSize + low_watermark_free_space_in_bytes?: long + high_watermark_free_space?: ByteSize + high_watermark_free_space_in_bytes?: long + flood_stage_free_space?: ByteSize + flood_stage_free_space_in_bytes?: long + frozen_flood_stage_free_space?: ByteSize + frozen_flood_stage_free_space_in_bytes?: long } export interface ClusterStatsClusterIndices { /** Contains statistics about analyzers and analyzer components used in selected nodes. */ - analysis: ClusterStatsCharFilterTypes + analysis?: ClusterStatsCharFilterTypes /** Contains statistics about memory used for completion in selected nodes. */ completion: CompletionStats /** Total number of indices with shards assigned to selected nodes. */ @@ -16363,6 +16396,10 @@ export interface ClusterStatsClusterIndices { fielddata: FielddataStats /** Contains statistics about the query cache of selected nodes. */ query_cache: QueryCacheStats + /** Holds a snapshot of the search usage statistics. + * Used to hold the stats for a single node that's part of a ClusterStatsNodeResponse, as well as to + * accumulate stats for the entire cluster and return them as part of the ClusterStatsResponse. */ + search: ClusterStatsSearchUsageStats /** Contains statistics about segments in selected nodes. */ segments: SegmentsStats /** Contains statistics about indices with shards assigned to selected nodes. */ @@ -16370,9 +16407,13 @@ export interface ClusterStatsClusterIndices { /** Contains statistics about the size of shards assigned to selected nodes. */ store: StoreStats /** Contains statistics about field mappings in selected nodes. */ - mappings: ClusterStatsFieldTypesMappings + mappings?: ClusterStatsFieldTypesMappings /** Contains statistics about analyzers and analyzer components used in selected nodes. */ versions?: ClusterStatsIndicesVersions[] + /** Contains statistics about indexed dense vector */ + dense_vector: ClusterStatsDenseVectorStats + /** Contains statistics about indexed sparse vector */ + sparse_vector: ClusterStatsSparseVectorStats } export interface ClusterStatsClusterIndicesShards { @@ -16403,6 +16444,8 @@ export interface ClusterStatsClusterIngest { export interface ClusterStatsClusterJvm { /** Uptime duration, in milliseconds, since JVM last started. */ max_uptime_in_millis: DurationValue + /** Uptime duration since JVM last started. */ + max_uptime?: Duration /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsClusterJvmMemory /** Number of active threads in use by JVM across all selected nodes. */ @@ -16414,8 +16457,12 @@ export interface ClusterStatsClusterJvm { export interface ClusterStatsClusterJvmMemory { /** Maximum amount of memory, in bytes, available for use by the heap across all selected nodes. */ heap_max_in_bytes: long + /** Maximum amount of memory available for use by the heap across all selected nodes. */ + heap_max?: ByteSize /** Memory, in bytes, currently in use by the heap across all selected nodes. */ heap_used_in_bytes: long + /** Memory currently in use by the heap across all selected nodes. */ + heap_used?: ByteSize } export interface ClusterStatsClusterJvmVersion { @@ -16444,20 +16491,22 @@ export interface ClusterStatsClusterNetworkTypes { } export interface ClusterStatsClusterNodeCount { - coordinating_only: integer - data: integer - data_cold: integer - data_content: integer - data_frozen?: integer - data_hot: integer - data_warm: integer - ingest: integer - master: integer - ml: integer - remote_cluster_client: integer total: integer - transform: integer - voting_only: integer + coordinating_only?: integer + data?: integer + data_cold?: integer + data_content?: integer + data_frozen?: integer + data_hot?: integer + data_warm?: integer + index?: integer + ingest?: integer + master?: integer + ml?: integer + remote_cluster_client?: integer + search?: integer + transform?: integer + voting_only?: integer } export interface ClusterStatsClusterNodes { @@ -16566,6 +16615,30 @@ export interface ClusterStatsClusterShardMetrics { min: double } +export interface ClusterStatsClusterSnapshotStats { + current_counts: ClusterStatsSnapshotCurrentCounts + repositories: Record +} + +export interface ClusterStatsDenseVectorOffHeapStats { + total_size_bytes: long + total_size?: ByteSize + total_veb_size_bytes: long + total_veb_size?: ByteSize + total_vec_size_bytes: long + total_vec_size?: ByteSize + total_veq_size_bytes: long + total_veq_size?: ByteSize + total_vex_size_bytes: long + total_vex_size?: ByteSize + fielddata?: Record> +} + +export interface ClusterStatsDenseVectorStats { + value_count: long + off_heap?: ClusterStatsDenseVectorOffHeapStats +} + export interface ClusterStatsFieldTypes { /** The name for the field type in selected nodes. */ name: Name @@ -16574,55 +16647,47 @@ export interface ClusterStatsFieldTypes { /** The number of indices containing the field type in selected nodes. */ index_count: integer /** For dense_vector field types, number of indexed vector types in selected nodes. */ - indexed_vector_count?: long + indexed_vector_count?: integer /** For dense_vector field types, the maximum dimension of all indexed vector types in selected nodes. */ - indexed_vector_dim_max?: long + indexed_vector_dim_max?: integer /** For dense_vector field types, the minimum dimension of all indexed vector types in selected nodes. */ - indexed_vector_dim_min?: long + indexed_vector_dim_min?: integer /** The number of fields that declare a script. */ script_count?: integer + /** For dense_vector field types, count of mappings by index type */ + vector_index_type_count?: Record + /** For dense_vector field types, count of mappings by similarity */ + vector_similarity_type_count?: Record + /** For dense_vector field types, count of mappings by element type */ + vector_element_type_count?: Record } export interface ClusterStatsFieldTypesMappings { /** Contains statistics about field data types used in selected nodes. */ field_types: ClusterStatsFieldTypes[] /** Contains statistics about runtime field data types used in selected nodes. */ - runtime_field_types?: ClusterStatsRuntimeFieldTypes[] + runtime_field_types: ClusterStatsRuntimeFieldTypes[] /** Total number of fields in all non-system indices. */ - total_field_count?: integer + total_field_count?: long /** Total number of fields in all non-system indices, accounting for mapping deduplication. */ - total_deduplicated_field_count?: integer + total_deduplicated_field_count?: long /** Total size of all mappings after deduplication and compression. */ total_deduplicated_mapping_size?: ByteSize /** Total size of all mappings, in bytes, after deduplication and compression. */ total_deduplicated_mapping_size_in_bytes?: long + /** Source mode usage count. */ + source_modes: Record } export interface ClusterStatsIndexingPressure { - memory: ClusterStatsIndexingPressureMemory -} - -export interface ClusterStatsIndexingPressureMemory { - current: ClusterStatsIndexingPressureMemorySummary - limit_in_bytes: long - total: ClusterStatsIndexingPressureMemorySummary -} - -export interface ClusterStatsIndexingPressureMemorySummary { - all_in_bytes: long - combined_coordinating_and_primary_in_bytes: long - coordinating_in_bytes: long - coordinating_rejections?: long - primary_in_bytes: long - primary_rejections?: long - replica_in_bytes: long - replica_rejections?: long + memory: NodesIndexingPressureMemory } export interface ClusterStatsIndicesVersions { index_count: integer primary_shard_count: integer total_primary_bytes: long + total_primary_size?: ByteSize version: VersionString } @@ -16638,18 +16703,33 @@ export interface ClusterStatsNodePackagingType { export interface ClusterStatsOperatingSystemMemoryInfo { /** Total amount, in bytes, of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ adjusted_total_in_bytes?: long + /** Total amount of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ + adjusted_total?: ByteSize /** Amount, in bytes, of free physical memory across all selected nodes. */ free_in_bytes: long + /** Amount of free physical memory across all selected nodes. */ + free?: ByteSize /** Percentage of free physical memory across all selected nodes. */ free_percent: integer /** Total amount, in bytes, of physical memory across all selected nodes. */ total_in_bytes: long + /** Total amount of physical memory across all selected nodes. */ + total?: ByteSize /** Amount, in bytes, of physical memory in use across all selected nodes. */ used_in_bytes: long + /** Amount of physical memory in use across all selected nodes. */ + used?: ByteSize /** Percentage of physical memory in use across all selected nodes. */ used_percent: integer } +export interface ClusterStatsPerRepositoryStats { + type: string + oldest_start_time_millis: UnitMillis + oldest_start_time?: DateFormat + current_counts: ClusterStatsRepositoryStatsCurrentCounts +} + export interface ClusterStatsRemoteClusterInfo { /** The UUID of the remote cluster. */ cluster_uuid: string @@ -16658,7 +16738,7 @@ export interface ClusterStatsRemoteClusterInfo { /** The `skip_unavailable` setting used for this remote cluster. */ skip_unavailable: boolean /** Transport compression setting used for this remote cluster. */ - transport_compress: string + 'transport.compress': string /** Health status of the cluster, based on the state of its primary and replica shards. */ status: HealthStatus /** The list of Elasticsearch versions used by the nodes on the remote cluster. */ @@ -16683,6 +16763,23 @@ export interface ClusterStatsRemoteClusterInfo { mem_total?: string } +export interface ClusterStatsRepositoryStatsCurrentCounts { + snapshots: integer + clones: integer + finalizations: integer + deletions: integer + snapshot_deletions: integer + active_deletions: integer + shards: ClusterStatsRepositoryStatsShards +} + +export interface ClusterStatsRepositoryStatsShards { + total: integer + complete: integer + incomplete: integer + states: Record +} + export interface ClusterStatsRequest extends RequestBase { /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds @@ -16731,6 +16828,33 @@ export interface ClusterStatsRuntimeFieldTypes { source_total: integer } +export interface ClusterStatsSearchUsageStats { + total: long + queries: Record + rescorers: Record + sections: Record + retrievers: Record +} + +export type ClusterStatsShardState = 'INIT' | 'SUCCESS' | 'FAILED' | 'ABORTED' | 'MISSING' | 'WAITING' | 'QUEUED' | 'PAUSED_FOR_NODE_REMOVAL' + +export interface ClusterStatsSnapshotCurrentCounts { + /** Snapshots currently in progress */ + snapshots: integer + /** Incomplete shard snapshots */ + shard_snapshots: integer + /** Snapshots deletions in progress */ + snapshot_deletions: integer + /** Sum of snapshots and snapshot_deletions */ + concurrent_operations: integer + /** Cleanups in progress, not counted in concurrent_operations as they are not concurrent */ + cleanups: integer +} + +export interface ClusterStatsSparseVectorStats { + value_count: long +} + export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { /** Name of the cluster, based on the cluster name setting. */ cluster_name: Name @@ -16740,14 +16864,23 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { indices: ClusterStatsClusterIndices /** Contains statistics about nodes selected by the request’s node filters. */ nodes: ClusterStatsClusterNodes + /** Contains stats on repository feature usage exposed in cluster stats for telemetry. */ + repositories: Record> + /** Contains stats cluster snapshots. */ + snapshots: ClusterStatsClusterSnapshotStats /** Health status of the cluster, based on the state of its primary and replica shards. */ - status: HealthStatus + status?: HealthStatus /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ timestamp: long /** Cross-cluster stats */ ccs: ClusterStatsCCSStats } +export interface ClusterStatsSynonymsStats { + count: integer + index_count: integer +} + export interface ConnectorConnector { api_key_id?: string api_key_secret_id?: string @@ -17740,8 +17873,12 @@ export interface EqlGetStatusResponse { export interface EqlSearchRequest extends RequestBase { /** The name of the index to scope the operation */ index: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution */ + ccs_minimize_roundtrips?: boolean /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean /** EQL query you wish to run. */ @@ -17779,9 +17916,9 @@ export interface EqlSearchRequest extends RequestBase { * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } } export type EqlSearchResponse = EqlEqlSearchResponseBase @@ -19877,6 +20014,10 @@ export interface IndicesDeleteRequest extends RequestBase { export type IndicesDeleteResponse = IndicesResponseBase +export interface IndicesDeleteAliasIndicesAliasesResponseBody extends AcknowledgedResponseBase { + errors?: boolean +} + export interface IndicesDeleteAliasRequest extends RequestBase { /** Comma-separated list of data streams or indices used to limit the request. * Supports wildcards (`*`). */ @@ -19896,7 +20037,7 @@ export interface IndicesDeleteAliasRequest extends RequestBase { querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } } -export type IndicesDeleteAliasResponse = AcknowledgedResponseBase +export type IndicesDeleteAliasResponse = IndicesDeleteAliasIndicesAliasesResponseBody export interface IndicesDeleteDataLifecycleRequest extends RequestBase { /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ @@ -21270,10 +21411,19 @@ export interface IndicesRecoveryRequest extends RequestBase { active_only?: boolean /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never } + body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never } + querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } } export type IndicesRecoveryResponse = Record @@ -22442,10 +22592,8 @@ export interface InferenceCohereServiceSettings { * * * For the available `completion` models, refer to the [Cohere command docs](https://docs.cohere.com/docs/models#command). * * For the available `rerank` models, refer to the [Cohere rerank docs](https://docs.cohere.com/reference/rerank-1). - * * For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed). - * - * The default value for a text embedding task is `embed-english-v2.0`. */ - model_id?: string + * * For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed). */ + model_id: string /** This setting helps to minimize the number of rate limit errors returned from Cohere. * By default, the `cohere` service sets the number of requests allowed per minute to 10000. */ rate_limit?: InferenceRateLimitSetting @@ -22469,7 +22617,7 @@ export interface InferenceCohereTaskSettings { * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. * * IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. */ - input_type?: InferenceCohereInputType + input_type: InferenceCohereInputType /** For a `rerank` task, return doc text within the results. */ return_documents?: boolean /** For a `rerank` task, the number of most relevant documents to return. @@ -23367,11 +23515,13 @@ export interface InferencePutRequest extends RequestBase { task_type?: InferenceTaskType /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration inference_config?: InferenceInferenceEndpoint /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } } export type InferencePutResponse = InferenceInferenceEndpointInfo @@ -23381,6 +23531,8 @@ export interface InferencePutAlibabacloudRequest extends RequestBase { task_type: InferenceAlibabaCloudTaskType /** The unique identifier of the inference endpoint. */ alibabacloud_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ @@ -23391,9 +23543,9 @@ export interface InferencePutAlibabacloudRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAlibabaCloudTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI @@ -23403,6 +23555,8 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase { task_type: InferenceAmazonBedrockTaskType /** The unique identifier of the inference endpoint. */ amazonbedrock_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ @@ -23413,9 +23567,9 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAmazonBedrockTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock @@ -23426,6 +23580,8 @@ export interface InferencePutAnthropicRequest extends RequestBase { task_type: InferenceAnthropicTaskType /** The unique identifier of the inference endpoint. */ anthropic_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `anthropic`. */ @@ -23436,9 +23592,9 @@ export interface InferencePutAnthropicRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAnthropicTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic @@ -23448,6 +23604,8 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { task_type: InferenceAzureAiStudioTaskType /** The unique identifier of the inference endpoint. */ azureaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ @@ -23458,9 +23616,9 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureAiStudioTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio @@ -23471,6 +23629,8 @@ export interface InferencePutAzureopenaiRequest extends RequestBase { task_type: InferenceAzureOpenAITaskType /** The unique identifier of the inference endpoint. */ azureopenai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `azureopenai`. */ @@ -23481,9 +23641,9 @@ export interface InferencePutAzureopenaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureOpenAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI @@ -23493,6 +23653,8 @@ export interface InferencePutCohereRequest extends RequestBase { task_type: InferenceCohereTaskType /** The unique identifier of the inference endpoint. */ cohere_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `cohere`. */ @@ -23504,9 +23666,9 @@ export interface InferencePutCohereRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceCohereTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere @@ -23516,6 +23678,8 @@ export interface InferencePutDeepseekRequest extends RequestBase { task_type: InferenceTaskTypeDeepSeek /** The unique identifier of the inference endpoint. */ deepseek_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `deepseek`. */ @@ -23524,9 +23688,9 @@ export interface InferencePutDeepseekRequest extends RequestBase { * These settings are specific to the `deepseek` service. */ service_settings: InferenceDeepSeekServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek @@ -23537,6 +23701,8 @@ export interface InferencePutElasticsearchRequest extends RequestBase { /** The unique identifier of the inference endpoint. * The must not match the `model_id`. */ elasticsearch_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ @@ -23547,9 +23713,9 @@ export interface InferencePutElasticsearchRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceElasticsearchTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch @@ -23559,6 +23725,8 @@ export interface InferencePutElserRequest extends RequestBase { task_type: InferenceElserTaskType /** The unique identifier of the inference endpoint. */ elser_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `elser`. */ @@ -23566,9 +23734,9 @@ export interface InferencePutElserRequest extends RequestBase { /** Settings used to install the inference model. These settings are specific to the `elser` service. */ service_settings: InferenceElserServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER @@ -23578,6 +23746,8 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase { task_type: InferenceGoogleAiStudioTaskType /** The unique identifier of the inference endpoint. */ googleaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ @@ -23585,9 +23755,9 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase { /** Settings used to install the inference model. These settings are specific to the `googleaistudio` service. */ service_settings: InferenceGoogleAiStudioServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio @@ -23597,6 +23767,8 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase { task_type: InferenceGoogleVertexAITaskType /** The unique identifier of the inference endpoint. */ googlevertexai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ @@ -23607,9 +23779,9 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceGoogleVertexAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI @@ -23619,6 +23791,8 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { task_type: InferenceHuggingFaceTaskType /** The unique identifier of the inference endpoint. */ huggingface_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `hugging_face`. */ @@ -23629,9 +23803,9 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceHuggingFaceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace @@ -23641,6 +23815,8 @@ export interface InferencePutJinaaiRequest extends RequestBase { task_type: InferenceJinaAITaskType /** The unique identifier of the inference endpoint. */ jinaai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `jinaai`. */ @@ -23651,9 +23827,9 @@ export interface InferencePutJinaaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceJinaAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi @@ -23663,6 +23839,8 @@ export interface InferencePutMistralRequest extends RequestBase { task_type: InferenceMistralTaskType /** The unique identifier of the inference endpoint. */ mistral_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `mistral`. */ @@ -23670,9 +23848,9 @@ export interface InferencePutMistralRequest extends RequestBase { /** Settings used to install the inference model. These settings are specific to the `mistral` service. */ service_settings: InferenceMistralServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral @@ -23683,6 +23861,8 @@ export interface InferencePutOpenaiRequest extends RequestBase { task_type: InferenceOpenAITaskType /** The unique identifier of the inference endpoint. */ openai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `openai`. */ @@ -23693,9 +23873,9 @@ export interface InferencePutOpenaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceOpenAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI @@ -23705,6 +23885,8 @@ export interface InferencePutVoyageaiRequest extends RequestBase { task_type: InferenceVoyageAITaskType /** The unique identifier of the inference endpoint. */ voyageai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `voyageai`. */ @@ -23715,9 +23897,9 @@ export interface InferencePutVoyageaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceVoyageAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI @@ -23727,14 +23909,16 @@ export interface InferencePutWatsonxRequest extends RequestBase { task_type: InferenceWatsonxTaskType /** The unique identifier of the inference endpoint. */ watsonx_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The type of service supported for the specified task type. In this case, `watsonxai`. */ service: InferenceWatsonxServiceType /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ service_settings: InferenceWatsonxServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } } export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx @@ -23784,6 +23968,8 @@ export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInference export interface InferenceStreamCompletionRequest extends RequestBase { /** The unique identifier for the inference endpoint. */ inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration /** The text on which you want to perform the inference task. * It can be a single string or an array. * @@ -23792,9 +23978,9 @@ export interface InferenceStreamCompletionRequest extends RequestBase { /** Optional task settings */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } export type InferenceStreamCompletionResponse = StreamResult @@ -30373,6 +30559,8 @@ export interface NodesJvmMemoryStats { heap_committed_in_bytes?: long /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes?: long + /** Maximum amount of memory, available for use by the heap. */ + heap_max?: ByteSize /** Non-heap memory used, in bytes. */ non_heap_used_in_bytes?: long /** Amount of non-heap memory available, in bytes. */ @@ -30482,6 +30670,8 @@ export interface NodesPressureMemory { primary_rejections?: long /** Number of indexing requests rejected in the replica stage. */ replica_rejections?: long + primary_document_rejections?: long + large_operation_rejections?: long } export interface NodesProcess { From f644487c3033fe6998be1d3befd1295d8cd1eb71 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 12:03:40 -0500 Subject: [PATCH 596/647] Update dependency @types/node to v22.16.5 (#2912) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index c065fcc17..0a30594df 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.16.3", + "@types/node": "22.16.5", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 839dfc33edb83a4ff5d2db358c41a5542f26d793 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 12:04:20 -0500 Subject: [PATCH 597/647] Update dependency chai to v5.2.1 (#2913) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 0a30594df..252cc229f 100644 --- a/package.json +++ b/package.json @@ -66,7 +66,7 @@ "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", - "chai": "5.2.0", + "chai": "5.2.1", "cross-zip": "4.0.1", "desm": "1.3.1", "into-stream": "8.0.1", From fe20f0aca308b4d41c9b1b799671c9cd5ddfbffb Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 21 Jul 2025 13:20:36 -0500 Subject: [PATCH 598/647] Checkout before npm publish unstable (#2919) --- .github/workflows/npm-publish-unstable.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 84cd3baf8..e7d77c449 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -67,6 +67,10 @@ jobs: contents: write id-token: write steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + persist-credentials: false + ref: main - name: npm publish run: | # set unstable version value From 3d0a4516b2ac1a5e051bc60ecbc976ef1fb06e42 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 24 Jul 2025 12:23:32 -0500 Subject: [PATCH 599/647] Bump to 9.1.0 (#2925) --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 252cc229f..e25bfed15 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.3", - "versionCanary": "9.0.2-canary.0", + "version": "9.1.0", + "versionCanary": "9.1.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", "types": "index.d.ts", From de7c5229ab3118623d3e7516f3b7473ca2611a57 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 24 Jul 2025 20:06:59 +0200 Subject: [PATCH 600/647] Auto-generated API code (#2923) --- docs/reference/api-reference.md | 101 ++++++++-- src/api/api/inference.ts | 73 +++++++- src/api/api/info.ts | 2 +- src/api/api/msearch.ts | 1 + src/api/types.ts | 322 +++++++++++++++++++++++++++++++- 5 files changed, 471 insertions(+), 28 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 3af05ce88..c507df2c6 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1016,6 +1016,7 @@ client.index({ index }) ## client.info [_info] Get cluster info. Get basic build, version, and cluster information. +::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info) @@ -7662,7 +7663,7 @@ The following integrations are available through the inference API. You can find * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) -* Azure AI Studio (`completion`, `text_embedding`) +* Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) @@ -7709,7 +7710,7 @@ client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service - **`alibabacloud_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("alibabacloud-ai-search"))**: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. - **`service_settings` ({ api_key, host, rate_limit, service_id, workspace })**: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7735,7 +7736,7 @@ client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, servi - **`amazonbedrock_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("amazonbedrock"))**: The type of service supported for the specified task type. In this case, `amazonbedrock`. - **`service_settings` ({ access_key, model, provider, region, rate_limit, secret_key })**: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7769,7 +7770,7 @@ The only valid task type for the model to perform is `completion`. - **`anthropic_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("anthropic"))**: The type of service supported for the specified task type. In this case, `anthropic`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7788,12 +7789,12 @@ client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, servi ### Arguments [_arguments_inference.put_azureaistudio] #### Request (object) [_request_inference.put_azureaistudio] -- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`azureaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. - **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `openai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. -- **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user })**: Settings to configure the inference task. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user, return_documents, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7823,7 +7824,7 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`azureopenai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("azureopenai"))**: The type of service supported for the specified task type. In this case, `azureopenai`. - **`service_settings` ({ api_key, api_version, deployment_id, entra_id, rate_limit, resource_name })**: Settings used to install the inference model. These settings are specific to the `azureopenai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7847,11 +7848,73 @@ client.inference.putCohere({ task_type, cohere_inference_id, service, service_se - **`service` (Enum("cohere"))**: The type of service supported for the specified task type. In this case, `cohere`. - **`service_settings` ({ api_key, embedding_type, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `cohere` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. +## client.inference.putCustom [_inference.put_custom] +Create a custom inference endpoint. + +The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. +The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. +The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. +Templates are portions of a string that start with `${` and end with `}`. +The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. +If the definition (key) is not found for a template, an error message is returned. +In case of an endpoint definition like the following: +``` +PUT _inference/text_embedding/test-text-embedding +{ + "service": "custom", + "service_settings": { + "secret_parameters": { + "api_key": "" + }, + "url": "...endpoints.huggingface.cloud/v1/embeddings", + "headers": { + "Authorization": "Bearer ${api_key}", + "Content-Type": "application/json" + }, + "request": "{\"input\": ${input}}", + "response": { + "json_parser": { + "text_embeddings":"$.data[*].embedding[*]" + } + } + } +} +``` +To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. + +> info +> Templates should not be surrounded by quotes. + +Pre-defined templates: +* `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. +* `${input_type}` refers to the input type translation values. +* `${query}` refers to the query field used specifically for reranking tasks. +* `${top_n}` refers to the `top_n` field available when performing rerank requests. +* `${return_documents}` refers to the `return_documents` field available when performing rerank requests. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom) + +```ts +client.inference.putCustom({ task_type, custom_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_custom] + +#### Request (object) [_request_inference.put_custom] +- **`task_type` (Enum("text_embedding" \| "sparse_embedding" \| "rerank" \| "completion"))**: The type of the inference task that the model will perform. +- **`custom_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("custom"))**: The type of service supported for the specified task type. In this case, `custom`. +- **`service_settings` ({ headers, input_type, query_parameters, request, response, secret_parameters, url })**: Settings used to install the inference model. +These settings are specific to the `custom` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { parameters })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + ## client.inference.putDeepseek [_inference.put_deepseek] Create a DeepSeek inference endpoint. @@ -7871,7 +7934,7 @@ client.inference.putDeepseek({ task_type, deepseek_inference_id, service, servic - **`service` (Enum("deepseek"))**: The type of service supported for the specified task type. In this case, `deepseek`. - **`service_settings` ({ api_key, model_id, url })**: Settings used to install the inference model. These settings are specific to the `deepseek` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putElasticsearch [_inference.put_elasticsearch] @@ -7906,7 +7969,7 @@ client.inference.putElasticsearch({ task_type, elasticsearch_inference_id, servi The must not match the `model_id`. - **`service` (Enum("elasticsearch"))**: The type of service supported for the specified task type. In this case, `elasticsearch`. - **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7943,7 +8006,7 @@ client.inference.putElser({ task_type, elser_inference_id, service, service_sett - **`elser_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. - **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGoogleaistudio [_inference.put_googleaistudio] @@ -7964,7 +8027,7 @@ client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, ser - **`googleaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGooglevertexai [_inference.put_googlevertexai] @@ -7985,7 +8048,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser - **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. - **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { auto_truncate, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8043,7 +8106,7 @@ client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, - **`huggingface_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. - **`service_settings` ({ api_key, rate_limit, url, model_id })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8069,7 +8132,7 @@ client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_se - **`jinaai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("jinaai"))**: The type of service supported for the specified task type. In this case, `jinaai`. - **`service_settings` ({ api_key, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `jinaai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8092,7 +8155,7 @@ client.inference.putMistral({ task_type, mistral_inference_id, service, service_ - **`mistral_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. - **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putOpenai [_inference.put_openai] @@ -8114,7 +8177,7 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`openai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. - **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8139,7 +8202,7 @@ client.inference.putVoyageai({ task_type, voyageai_inference_id, service, servic - **`voyageai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("voyageai"))**: The type of service supported for the specified task type. In this case, `voyageai`. - **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 360364cc2..53f59195f 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -202,6 +202,19 @@ export default class Inference { 'timeout' ] }, + 'inference.put_custom': { + path: [ + 'task_type', + 'custom_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, 'inference.put_deepseek': { path: [ 'task_type', @@ -703,7 +716,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1151,6 +1164,64 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Create a custom inference endpoint. The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with `${` and end with `}`. The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following: ``` PUT _inference/text_embedding/test-text-embedding { "service": "custom", "service_settings": { "secret_parameters": { "api_key": "" }, "url": "...endpoints.huggingface.cloud/v1/embeddings", "headers": { "Authorization": "Bearer ${api_key}", "Content-Type": "application/json" }, "request": "{\"input\": ${input}}", "response": { "json_parser": { "text_embeddings":"$.data[*].embedding[*]" } } } } ``` To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. > info > Templates should not be surrounded by quotes. Pre-defined templates: * `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. * `${input_type}` refers to the input type translation values. * `${query}` refers to the query field used specifically for reranking tasks. * `${top_n}` refers to the `top_n` field available when performing rerank requests. * `${return_documents}` refers to the `return_documents` field available when performing rerank requests. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom | Elasticsearch API documentation} + */ + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptions): Promise + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_custom'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.custom_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_custom', + pathParts: { + task_type: params.task_type, + custom_inference_id: params.custom_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create a DeepSeek inference endpoint. Create an inference endpoint to perform an inference task with the `deepseek` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek | Elasticsearch API documentation} diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 536fabb6c..d490a4fac 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -35,7 +35,7 @@ const acceptedParams: Record diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 59a71420b..6a0d5b078 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -43,6 +43,7 @@ const acceptedParams: Record info + * > The content string needs to be a single line except when using the Kibana console. */ + content: string +} + +export interface InferenceCustomResponseParams { + /** Specifies the JSON parser that is used to parse the response from the custom service. + * Different task types require different json_parser parameters. + * For example: + * ``` + * # text_embedding + * # For a response like this: + * + * { + * "object": "list", + * "data": [ + * { + * "object": "embedding", + * "index": 0, + * "embedding": [ + * 0.014539449, + * -0.015288644 + * ] + * } + * ], + * "model": "text-embedding-ada-002-v2", + * "usage": { + * "prompt_tokens": 8, + * "total_tokens": 8 + * } + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "text_embeddings":"$.data[*].embedding[*]" + * } + * } + * + * # sparse_embedding + * # For a response like this: + * + * { + * "request_id": "75C50B5B-E79E-4930-****-F48DBB392231", + * "latency": 22, + * "usage": { + * "token_count": 11 + * }, + * "result": { + * "sparse_embeddings": [ + * { + * "index": 0, + * "embedding": [ + * { + * "token_id": 6, + * "weight": 0.101 + * }, + * { + * "token_id": 163040, + * "weight": 0.28417 + * } + * ] + * } + * ] + * } + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "token_path":"$.result.sparse_embeddings[*].embedding[*].token_id", + * "weight_path":"$.result.sparse_embeddings[*].embedding[*].weight" + * } + * } + * + * # rerank + * # For a response like this: + * + * { + * "results": [ + * { + * "index": 3, + * "relevance_score": 0.999071, + * "document": "abc" + * }, + * { + * "index": 4, + * "relevance_score": 0.7867867, + * "document": "123" + * }, + * { + * "index": 0, + * "relevance_score": 0.32713068, + * "document": "super" + * } + * ], + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "reranked_index":"$.result.scores[*].index", // optional + * "relevance_score":"$.result.scores[*].score", + * "document_text":"xxx" // optional + * } + * } + * + * # completion + * # For a response like this: + * + * { + * "id": "chatcmpl-B9MBs8CjcvOU2jLn4n570S5qMJKcT", + * "object": "chat.completion", + * "created": 1741569952, + * "model": "gpt-4.1-2025-04-14", + * "choices": [ + * { + * "index": 0, + * "message": { + * "role": "assistant", + * "content": "Hello! How can I assist you today?", + * "refusal": null, + * "annotations": [] + * }, + * "logprobs": null, + * "finish_reason": "stop" + * } + * ] + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "completion_result":"$.choices[*].message.content" + * } + * } */ + json_parser: any +} + +export interface InferenceCustomServiceSettings { + /** Specifies the HTTPS header parameters – such as `Authentication` or `Contet-Type` – that are required to access the custom service. + * For example: + * ``` + * "headers":{ + * "Authorization": "Bearer ${api_key}", + * "Content-Type": "application/json;charset=utf-8" + * } + * ``` */ + headers?: any + /** Specifies the input type translation values that are used to replace the `${input_type}` template in the request body. + * For example: + * ``` + * "input_type": { + * "translation": { + * "ingest": "do_ingest", + * "search": "do_search" + * }, + * "default": "a_default" + * }, + * ``` + * If the subsequent inference requests come from a search context, the `search` key will be used and the template will be replaced with `do_search`. + * If it comes from the ingest context `do_ingest` is used. If it's a different context that is not specified, the default value will be used. If no default is specified an empty string is used. + * `translation` can be: + * * `classification` + * * `clustering` + * * `ingest` + * * `search` */ + input_type?: any + /** Specifies the query parameters as a list of tuples. The arrays inside the `query_parameters` must have two items, a key and a value. + * For example: + * ``` + * "query_parameters":[ + * ["param_key", "some_value"], + * ["param_key", "another_value"], + * ["other_key", "other_value"] + * ] + * ``` + * If the base url is `https://www.elastic.co` it results in: `https://www.elastic.co?param_key=some_value¶m_key=another_value&other_key=other_value`. */ + query_parameters?: any + /** The request configuration object. */ + request: InferenceCustomRequestParams + /** The response configuration object. */ + response: InferenceCustomResponseParams + /** Specifies secret parameters, like `api_key` or `api_token`, that are required to access the custom service. + * For example: + * ``` + * "secret_parameters":{ + * "api_key":"" + * } + * ``` */ + secret_parameters: any + /** The URL endpoint to use for the requests. */ + url?: string +} + +export type InferenceCustomServiceType = 'custom' + +export interface InferenceCustomTaskSettings { + /** Specifies parameters that are required to run the custom service. The parameters depend on the model your custom service uses. + * For example: + * ``` + * "task_settings":{ + * "parameters":{ + * "input_type":"query", + * "return_token":true + * } + * } + * ``` */ + parameters?: any +} + +export type InferenceCustomTaskType = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' + export interface InferenceDeepSeekServiceSettings { /** A valid API key for your DeepSeek account. * You can find or create your DeepSeek API keys on the DeepSeek API key page. @@ -22862,7 +23116,29 @@ export interface InferenceInferenceChunkingSettings { * It is applicable only for a `sentence` chunking strategy. * It can be either `1` or `0`. */ sentence_overlap?: integer - /** The chunking strategy: `sentence` or `word`. */ + /** This parameter is only applicable when using the `recursive` chunking strategy. + * + * Sets a predefined list of separators in the saved chunking settings based on the selected text type. + * Values can be `markdown` or `plaintext`. + * + * Using this parameter is an alternative to manually specifying a custom `separators` list. */ + separator_group: string + /** A list of strings used as possible split points when chunking text with the `recursive` strategy. + * + * Each string can be a plain string or a regular expression (regex) pattern. + * The system tries each separator in order to split the text, starting from the first item in the list. + * + * After splitting, it attempts to recombine smaller pieces into larger chunks that stay within + * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ + separators: string[] + /** The chunking strategy: `sentence`, `word`, `none` or `recursive`. + * + * * If `strategy` is set to `recursive`, you must also specify: + * + * - `max_chunk_size` + * - either `separators` or`separator_group` + * + * Learn more about different chunking strategies in the linked documentation. */ strategy?: string } @@ -22926,6 +23202,13 @@ export interface InferenceInferenceEndpointInfoCohere extends InferenceInference task_type: InferenceTaskTypeCohere } +export interface InferenceInferenceEndpointInfoCustom extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeCustom +} + export interface InferenceInferenceEndpointInfoDeepSeek extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -23280,12 +23563,14 @@ export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion' export type InferenceTaskTypeAnthropic = 'completion' -export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion' +export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion' | 'rerank' export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' +export type InferenceTaskTypeCustom = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' + export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion' export type InferenceTaskTypeELSER = 'sparse_embedding' @@ -23673,6 +23958,29 @@ export interface InferencePutCohereRequest extends RequestBase { export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere +export interface InferencePutCustomRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceCustomTaskType + /** The unique identifier of the inference endpoint. */ + custom_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `custom`. */ + service: InferenceCustomServiceType + /** Settings used to install the inference model. + * These settings are specific to the `custom` service. */ + service_settings: InferenceCustomServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceCustomTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, custom_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, custom_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutCustomResponse = InferenceInferenceEndpointInfoCustom + export interface InferencePutDeepseekRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceTaskTypeDeepSeek From 56c25e60f76d5ee4beb3168c22d9e8783c61ef9c Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 12:10:10 -0500 Subject: [PATCH 601/647] Update dependency apache-arrow to v21 (#2929) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index e25bfed15..57bca093a 100644 --- a/package.json +++ b/package.json @@ -92,7 +92,7 @@ }, "dependencies": { "@elastic/transport": "^9.0.1", - "apache-arrow": "18.x - 20.x", + "apache-arrow": "18.x - 21.x", "tslib": "^2.4.0" }, "tap": { From c2272b1c9ab2e12e77ca8bd389020d1dbf21404a Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 12:10:42 -0500 Subject: [PATCH 602/647] Update dependency zx to v8.7.1 (#2928) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 57bca093a..434986d95 100644 --- a/package.json +++ b/package.json @@ -88,7 +88,7 @@ "typescript": "5.8.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", - "zx": "8.6.1" + "zx": "8.7.1" }, "dependencies": { "@elastic/transport": "^9.0.1", From feeaf155558a682ef2b5122940fb1a7f8bb607a0 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 28 Jul 2025 14:36:19 -0500 Subject: [PATCH 603/647] Docs on how to disable OpenTelemetry (#2934) --- docs/reference/observability.md | 35 +++++++ docs/reference/transport.md | 6 +- package.json | 1 + test/unit/client.test.ts | 161 +++++++++++++++++++++++++++----- 4 files changed, 175 insertions(+), 28 deletions(-) diff --git a/docs/reference/observability.md b/docs/reference/observability.md index d3b5f59f3..b307c5571 100644 --- a/docs/reference/observability.md +++ b/docs/reference/observability.md @@ -35,6 +35,41 @@ To start sending Elasticsearch trace data to your OpenTelemetry endpoint, follow node --require '@opentelemetry/auto-instrumentations-node/register' index.js ``` +### Disabling OpenTelemetry collection [disable-otel] + +As of `@elastic/transport` version 9.1.0—or 8.10.0 when using `@elastic/elasticsearch` 8.x—OpenTelemetry tracing can be disabled in multiple ways. + +To entirely disable OpenTelemetry collection, you can provide a custom `Transport` at client instantiation time that sets `openTelemetry.enabled` to `false`: + +```typescript +import { Transport } from '@elastic/transport' + +class MyTransport extends Transport { + async request(params, options = {}): Promise { + options.openTelemetry = { enabled: false } + return super.request(params, options) + } +} + +const client = new Client({ + node: '...', + auth: { ... }, + Transport: MyTransport +}) +``` + +Alternatively, you can also export an environment variable `OTEL_ELASTICSEARCH_ENABLED=false` to achieve the same effect. + +If you would not like OpenTelemetry to be disabled entirely, but would like the client to suppress tracing, you can use the option `openTelemetry.suppressInternalInstrumentation = true` instead. + +If you would like to keep either option enabled by default, but want to disable them for a single API call, you can pass `Transport` options as a second argument to any API function call: + +```typescript +const response = await client.search({ ... }, { + openTelemetry: { enabled: false } +}) +``` + ## Events [_events] The client is an event emitter. This means that you can listen for its events to add additional logic to your code, without needing to change the client’s internals or how you use the client. You can find the events' names by accessing the `events` key of the client: diff --git a/docs/reference/transport.md b/docs/reference/transport.md index 382574bb6..4977a2890 100644 --- a/docs/reference/transport.md +++ b/docs/reference/transport.md @@ -12,7 +12,7 @@ const { Client } = require('@elastic/elasticsearch') const { Transport } = require('@elastic/transport') class MyTransport extends Transport { - request (params, options, callback) { + request (params, options) { // your code } } @@ -26,9 +26,9 @@ Sometimes you need to inject a small snippet of your code and then continue to u ```js class MyTransport extends Transport { - request (params, options, callback) { + request (params, options) { // your code - return super.request(params, options, callback) + return super.request(params, options) } } ``` diff --git a/package.json b/package.json index 434986d95..55d8cdc8f 100644 --- a/package.json +++ b/package.json @@ -59,6 +59,7 @@ }, "devDependencies": { "@elastic/request-converter": "9.1.2", + "@opentelemetry/sdk-trace-base": "1.30.1", "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index feffc373e..ac8203f55 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -8,10 +8,12 @@ import { URL } from 'node:url' import { setTimeout } from 'node:timers/promises' import { test } from 'tap' import FakeTimers from '@sinonjs/fake-timers' +import { Transport } from '@elastic/transport' import { buildServer, connection } from '../utils' import { Client, errors, SniffingTransport } from '../..' import * as symbols from '@elastic/transport/lib/symbols' import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool, HttpConnection } from '@elastic/transport' +import { BasicTracerProvider, InMemorySpanExporter, SimpleSpanProcessor } from '@opentelemetry/sdk-trace-base' let clientVersion: string = require('../../package.json').version // eslint-disable-line if (clientVersion.includes('-')) { @@ -124,7 +126,7 @@ test('Basic auth', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { authorization: 'Basic aGVsbG86d29ybGQ=' }) return { statusCode: 200, @@ -149,7 +151,7 @@ test('Basic auth via url', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { authorization: 'Basic aGVsbG86d29ybGQ=' }) return { statusCode: 200, @@ -170,7 +172,7 @@ test('ApiKey as string', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { authorization: 'ApiKey foobar' }) return { statusCode: 200, @@ -194,7 +196,7 @@ test('ApiKey as object', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) return { statusCode: 200, @@ -221,7 +223,7 @@ test('Bearer auth', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { authorization: 'Bearer token' }) return { statusCode: 200, @@ -245,7 +247,7 @@ test('Override authentication per request', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { authorization: 'Basic foobar' }) return { statusCode: 200, @@ -273,7 +275,7 @@ test('Custom headers per request', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { foo: 'bar', faz: 'bar' @@ -301,7 +303,7 @@ test('Close the client', async t => { t.plan(1) class MyConnectionPool extends BaseConnectionPool { - async empty (): Promise { + async empty(): Promise { t.pass('called') } } @@ -336,10 +338,10 @@ test('Elastic Cloud config', t => { t.test('Invalid Cloud ID will throw ConfigurationError', t => { t.throws(() => new Client({ - cloud : { - id : 'invalidCloudIdThatIsNotBase64' + cloud: { + id: 'invalidCloudIdThatIsNotBase64' }, - auth : { + auth: { username: 'elastic', password: 'changeme' } @@ -414,7 +416,7 @@ test('Meta header enabled by default', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.match(opts.headers, { 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}` }) return { statusCode: 200, @@ -435,7 +437,7 @@ test('Meta header disabled', async t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.notOk(opts.headers?.['x-elastic-client-meta']) return { statusCode: 200, @@ -456,12 +458,13 @@ test('Meta header disabled', async t => { test('Meta header indicates when UndiciConnection is used', async t => { t.plan(1) - function handler (req: http.IncomingMessage, res: http.ServerResponse) { + function handler(req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},un=${nodeVersion}`) res.end('ok') } const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) const client = new Client({ node: `http://localhost:${port}`, @@ -469,18 +472,18 @@ test('Meta header indicates when UndiciConnection is used', async t => { }) await client.transport.request({ method: 'GET', path: '/' }) - server.stop() }) test('Meta header indicates when HttpConnection is used', async t => { t.plan(1) - function handler (req: http.IncomingMessage, res: http.ServerResponse) { + function handler(req: http.IncomingMessage, res: http.ServerResponse) { t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}`) res.end('ok') } const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) const client = new Client({ node: `http://localhost:${port}`, @@ -488,7 +491,6 @@ test('Meta header indicates when HttpConnection is used', async t => { }) await client.transport.request({ method: 'GET', path: '/' }) - server.stop() }) test('caFingerprint', t => { @@ -503,9 +505,9 @@ test('caFingerprint', t => { test('caFingerprint can\'t be configured over http / 1', t => { t.throws(() => new Client({ - node: 'http://localhost:9200/', - caFingerprint: 'FO:OB:AR' - }), + node: 'http://localhost:9200/', + caFingerprint: 'FO:OB:AR' + }), errors.ConfigurationError ) t.end() @@ -513,9 +515,9 @@ test('caFingerprint can\'t be configured over http / 1', t => { test('caFingerprint can\'t be configured over http / 2', t => { t.throws(() => new Client({ - nodes: ['http://localhost:9200/'], - caFingerprint: 'FO:OB:AR' - }), + nodes: ['http://localhost:9200/'], + caFingerprint: 'FO:OB:AR' + }), errors.ConfigurationError ) t.end() @@ -551,7 +553,7 @@ test('Ensure new client does not time out if requestTimeout is not set', async t const clock = FakeTimers.install({ toFake: ['setTimeout'] }) t.teardown(() => clock.uninstall()) - function handler (_req: http.IncomingMessage, res: http.ServerResponse) { + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { setTimeout(1000 * 60 * 60).then(() => { t.ok('timeout ended') res.setHeader('content-type', 'application/json') @@ -660,7 +662,7 @@ test('serverless defaults', t => { t.plan(1) const Connection = connection.buildMockConnection({ - onRequest (opts) { + onRequest(opts) { t.equal(opts.headers?.['elastic-api-version'], '2023-10-31') return { statusCode: 200, @@ -686,3 +688,112 @@ test('serverless defaults', t => { t.end() }) + +test('custom transport: class', async t => { + t.plan(3) + + class MyTransport extends Transport { + async request(params, options): Promise { + t.ok(true, 'custom Transport request function should be called') + return super.request(params, options) + } + } + + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { + t.ok(true, 'handler should be called') + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + Transport: MyTransport + }) + + t.ok(client.transport instanceof MyTransport, 'Custom transport should be used') + + client.transport.request({ method: 'GET', path: '/' }) +}) + +test('custom transport: disable otel via options', async t => { + const exporter = new InMemorySpanExporter() + const processor = new SimpleSpanProcessor(exporter) + const provider = new BasicTracerProvider({ + spanProcessors: [processor] + }) + provider.register() + + t.after(async () => { + await provider.forceFlush() + exporter.reset() + await provider.shutdown() + }) + + class MyTransport extends Transport { + async request(params, options = {}): Promise { + // @ts-expect-error + options.openTelemetry = { enabled: false } + return super.request(params, options) + } + } + + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + Transport: MyTransport + }) + + await client.transport.request({ + path: '/hello', + method: 'GET', + meta: { name: 'hello' }, + }) + + t.equal(exporter.getFinishedSpans().length, 0) + t.end() +}) + +test('custom transport: disable otel via env var', async t => { + const exporter = new InMemorySpanExporter() + const processor = new SimpleSpanProcessor(exporter) + const provider = new BasicTracerProvider({ + spanProcessors: [processor] + }) + provider.register() + + t.after(async () => { + await provider.forceFlush() + exporter.reset() + await provider.shutdown() + }) + + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + }) + + process.env.OTEL_ELASTICSEARCH_ENABLED = 'false' + + await client.transport.request({ + path: '/hello', + method: 'GET', + meta: { name: 'hello' }, + }) + + t.equal(exporter.getFinishedSpans().length, 0) + t.end() +}) From e43d1b1ddc95303b23ca9e8d3c99f0c45dc940dc Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 30 Jul 2025 11:09:41 -0500 Subject: [PATCH 604/647] Merge test and publish jobs for simplicity (#2935) --- .github/workflows/npm-publish-unstable.yml | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index e7d77c449..7e830f2c0 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -37,6 +37,9 @@ jobs: needs: paths-filter if: ${{ needs.paths-filter.outputs.src == 'true' }} runs-on: ubuntu-latest + permissions: + contents: write + id-token: write steps: # pause for 30 minutes to avoid publishing more than 2x per hour - name: Debounce 30 minutes @@ -57,21 +60,8 @@ jobs: npm install - name: Run tests run: npm test - - # if tests pass, publish unstable - publish: - name: Publish unstable - needs: test - runs-on: ubuntu-latest - permissions: - contents: write - id-token: write - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - with: - persist-credentials: false - ref: main - - name: npm publish + # if tests pass, publish unstable + - name: publish unstable build run: | # set unstable version value unstable_tag="unstable.$(date --utc +%Y%m%d%H%M%S)" From eea02406e14cd3c80bec4561ac68a6c80de093c2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 30 Jul 2025 12:36:05 -0500 Subject: [PATCH 605/647] 9.1.0 changelog (#2940) --- docs/release-notes/index.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index be7e11e09..6c1ea53a5 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -20,25 +20,35 @@ To check for security updates, go to [Security announcements for the Elastic sta % ### Fixes [elasticsearch-javascript-client-next-fixes] % \* +## 9.1.0 [elasticsearch-javascript-client-9.1.0-release-notes] + +### Features and enhancements [elasticsearch-javascript-client-9.1.0-features-enhancements] + +- **Compatibility with Elasticsearch 9.1:** All changes and additions to Elasticsearch APIs for its 9.1 release are reflected in this release. + +### Fixes [elasticsearch-javascript-client-9.1.0-fixes] + +- **Deep merge nested options on client instantiation:** If custom values for `redaction` and `headers` options were set by the user during `Client` instantiation, nested default values would be dropped rather than deep-merged. This has been fixed. + ## 9.0.3 ### Fixes [elasticsearch-javascript-client-9.0.3-fixes] -**Improved compatibility with Elasticsearch 9.0:** Several fixes and improvements have been made to APIs and TypeScript type definitions to better reflect the Elasticsearch 9.0 specification. +- **Improved compatibility with Elasticsearch 9.0:** Several fixes and improvements have been made to APIs and TypeScript type definitions to better reflect the Elasticsearch 9.0 specification. ## 9.0.2 ### Fixes [elasticsearch-javascript-client-9.0.2-fixes] -**Remove dangling references to `typesWithBodyKey`:** the `typesWithBodyKey.ts` file and `estypesWithBody` export were removed in 9.0.0 but were still being referenced in the `index.d.ts` file that declares TypeScript types. This reference has been removed. +- **Remove dangling references to `typesWithBodyKey`:** the `typesWithBodyKey.ts` file and `estypesWithBody` export were removed in 9.0.0 but were still being referenced in the `index.d.ts` file that declares TypeScript types. This reference has been removed. ## 9.0.1 ### Fixes [elasticsearch-javascript-client-9.0.1-fixes] -**Reinstate `nodeFilter` and node `roles` feature:** The docs note a `nodeFilter` option on the client that will, by default, filter the nodes based on any `roles` values that are set at instantiation. At some point, this functionality was partially disabled. This brings the feature back, ensuring that it matches what the documentation has said it does all along. +- **Reinstate `nodeFilter` and node `roles` feature:** The docs note a `nodeFilter` option on the client that will, by default, filter the nodes based on any `roles` values that are set at instantiation. At some point, this functionality was partially disabled. This brings the feature back, ensuring that it matches what the documentation has said it does all along. -**Ensure Apache Arrow ES|QL helper uses async iterator:** the [`esql.toArrowReader()` helper function](/reference/client-helpers.md#_toarrowreader) was trying to return `RecordBatchStreamReader`—a synchronous iterator—despite the fact that the `apache-arrow` package was, in most cases, automatically coercing it to `AsyncRecordBatchStreamReader`, its asynchronous counterpart. It now is always returned as an async iterator. +- **Ensure Apache Arrow ES|QL helper uses async iterator:** the [`esql.toArrowReader()` helper function](/reference/client-helpers.md#_toarrowreader) was trying to return `RecordBatchStreamReader`—a synchronous iterator—despite the fact that the `apache-arrow` package was, in most cases, automatically coercing it to `AsyncRecordBatchStreamReader`, its asynchronous counterpart. It now is always returned as an async iterator. ## 9.0.0 [elasticsearch-javascript-client-9.0.0-release-notes] From b604e6db69adf4d5405717d35b05b43c8679cee2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 1 Aug 2025 12:46:10 -0500 Subject: [PATCH 606/647] Pass enableClientMeta option to transport (#2942) --- src/client.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index 7e303bd52..e4623e77f 100644 --- a/src/client.ts +++ b/src/client.ts @@ -407,7 +407,9 @@ export default class Client extends API { productCheck: 'Elasticsearch', maxResponseSize: options.maxResponseSize, maxCompressedResponseSize: options.maxCompressedResponseSize, - redaction: options.redaction + redaction: options.redaction, + // @ts-expect-error new option being added to transport in next minor + enableClientMeta: options.enableMetaHeader } if (options.serverMode !== 'serverless') { transportOptions = Object.assign({}, transportOptions, { From a98721b78cf8807599808bcfe0ee44a51d4211fc Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 1 Aug 2025 12:57:53 -0500 Subject: [PATCH 607/647] Fix egregious option typo (#2943) --- src/client.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index e4623e77f..2aeaa8011 100644 --- a/src/client.ts +++ b/src/client.ts @@ -409,7 +409,7 @@ export default class Client extends API { maxCompressedResponseSize: options.maxCompressedResponseSize, redaction: options.redaction, // @ts-expect-error new option being added to transport in next minor - enableClientMeta: options.enableMetaHeader + enableMetaHeader: options.enableMetaHeader } if (options.serverMode !== 'serverless') { transportOptions = Object.assign({}, transportOptions, { From 9015127a1bd9bf0f598d6e7e77f930c3e3ca17f2 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 1 Aug 2025 13:02:40 -0500 Subject: [PATCH 608/647] Drop @ts-expect-error (#2944) --- src/client.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index 2aeaa8011..9c784ab0e 100644 --- a/src/client.ts +++ b/src/client.ts @@ -408,7 +408,6 @@ export default class Client extends API { maxResponseSize: options.maxResponseSize, maxCompressedResponseSize: options.maxCompressedResponseSize, redaction: options.redaction, - // @ts-expect-error new option being added to transport in next minor enableMetaHeader: options.enableMetaHeader } if (options.serverMode !== 'serverless') { From 845ac27fe8dae57a2f723dea128e5efa9ea53efd Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 1 Aug 2025 13:53:43 -0500 Subject: [PATCH 609/647] Use ts-ignore to fix chicken/egg issue (#2947) --- src/client.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client.ts b/src/client.ts index 9c784ab0e..c159428e3 100644 --- a/src/client.ts +++ b/src/client.ts @@ -408,6 +408,8 @@ export default class Client extends API { maxResponseSize: options.maxResponseSize, maxCompressedResponseSize: options.maxCompressedResponseSize, redaction: options.redaction, + /* eslint-disable-next-line @typescript-eslint/prefer-ts-expect-error */ + // @ts-ignore enableMetaHeader will be available in transport v9.1.1 enableMetaHeader: options.enableMetaHeader } if (options.serverMode !== 'serverless') { From e1cb32edba3aaeb1959438e821825ae2581cc8ae Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 1 Aug 2025 14:31:55 -0500 Subject: [PATCH 610/647] Run tests against Node.js 24 (#2948) --- .buildkite/pipeline.yml | 4 ++-- .github/workflows/nodejs.yml | 4 ++-- .github/workflows/npm-publish-unstable.yml | 2 +- .github/workflows/npm-publish.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d63fc6edf..e664f03d9 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -10,7 +10,7 @@ steps: env: NODE_VERSION: "{{ matrix.nodejs }}" TEST_SUITE: "platinum" - STACK_VERSION: 9.0.0 + STACK_VERSION: 9.1.0 GITHUB_TOKEN_PATH: "secret/ci/elastic-elasticsearch-js/github-token" TEST_ES_STACK: "1" matrix: @@ -18,7 +18,7 @@ steps: nodejs: - "20" - "22" - - "23" + - "24" command: ./.buildkite/run-tests.sh artifact_paths: "./junit-output/junit-*.xml" - wait: ~ diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 9ee524308..5d3896a7c 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false matrix: - node-version: [20.x, 22.x, 23.x] + node-version: [20.x, 22.x, 24.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: @@ -73,7 +73,7 @@ jobs: - name: Use Node.js uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: - node-version: 22.x + node-version: 24.x - name: Install run: | diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 7e830f2c0..0b6a4f666 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -52,7 +52,7 @@ jobs: ref: main - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: - node-version: "22.x" + node-version: "24.x" registry-url: "https://registry.npmjs.org/" - name: Install dependencies run: | diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index 3e12d208e..ac838a8de 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -18,7 +18,7 @@ jobs: ref: ${{ github.event.inputs.branch }} - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: - node-version: "22.x" + node-version: "24.x" registry-url: "https://registry.npmjs.org/" - run: npm install -g npm - run: npm install From ce09e68a6719fc668dceb7776d34bfbef3fb4f81 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 4 Aug 2025 17:31:34 +0200 Subject: [PATCH 611/647] Auto-generated API code (#2952) --- docs/reference/api-reference.md | 32 ++++++++-- src/api/api/inference.ts | 45 ++++++++++---- src/api/types.ts | 102 +++++++++++++++++++++++++++++++- 3 files changed, 158 insertions(+), 21 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index c507df2c6..db2266779 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -4628,7 +4628,12 @@ To override the default behavior, you can set the `esql.query.allow_partial_resu It is valid only for the CSV format. - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. -- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. + +`csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. + +For async requests, nothing will be returned if the async query doesn't finish within the timeout. +The query ID and running status are available in the `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively. ## client.esql.asyncQueryDelete [_esql.async_query_delete] Delete an async ES|QL query. @@ -4749,6 +4754,8 @@ name and the next level key is the column name. object with information about the clusters that participated in the search along with info such as shards count. - **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. + +`csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. - **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. - **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. @@ -7662,6 +7669,7 @@ However, if you do not plan to use the inference APIs to use these models or if The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) +* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) @@ -7742,14 +7750,28 @@ These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAmazonsagemaker [_inference.put_amazonsagemaker] -Configure a Amazon SageMaker inference endpoint +Create an Amazon SageMaker inference endpoint. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-sagemaker.html) +Create an inference endpoint to perform an inference task with the `amazon_sagemaker` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker) ```ts -client.inference.putAmazonsagemaker() +client.inference.putAmazonsagemaker({ task_type, amazonsagemaker_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_amazonsagemaker] + +#### Request (object) [_request_inference.put_amazonsagemaker] +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion" \| "sparse_embedding" \| "rerank"))**: The type of the inference task that the model will perform. +- **`amazonsagemaker_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("amazon_sagemaker"))**: The type of service supported for the specified task type. In this case, `amazon_sagemaker`. +- **`service_settings` ({ access_key, endpoint_name, api, region, secret_key, target_model, target_container_hostname, inference_component_name, batch_size, dimensions })**: Settings used to install the inference model. +These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { custom_attributes, enable_explanations, inference_id, session_id, target_variant })**: Settings to configure the inference task. +These settings are specific to the task type and `service_settings.api` you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAnthropic [_inference.put_anthropic] Create an Anthropic inference endpoint. @@ -14326,7 +14348,7 @@ If `false`, Elasticsearch only stores async searches that don't finish before th - **`page_timeout` (Optional, string \| -1 \| 0)**: The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. -- **`params` (Optional, Record)**: The values for parameters in the query. +- **`params` (Optional, User-defined value[])**: The values for parameters in the query. - **`query` (Optional, string)**: The SQL query to run. - **`request_timeout` (Optional, string \| -1 \| 0)**: The timeout before the request fails. - **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 53f59195f..0d18c34af 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -139,8 +139,15 @@ export default class Inference { 'task_type', 'amazonsagemaker_inference_id' ], - body: [], - query: [] + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] }, 'inference.put_anthropic': { path: [ @@ -716,7 +723,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -887,15 +894,17 @@ export default class Inference { } /** - * Configure a Amazon SageMaker inference endpoint - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-amazon-sagemaker.html | Elasticsearch API documentation} + * Create an Amazon SageMaker inference endpoint. Create an inference endpoint to perform an inference task with the `amazon_sagemaker` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker | Elasticsearch API documentation} */ - async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this.acceptedParams['inference.put_amazonsagemaker'] const userQuery = params?.querystring @@ -911,12 +920,22 @@ export default class Inference { } } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/types.ts b/src/api/types.ts index 417d51a39..31e9ebf97 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -17952,7 +17952,13 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase + params?: any[] /** The SQL query to run. */ query?: string /** The timeout before the request fails. */ From 3a03363d59dc9784c16657c0a624b2c4cdfd9fdc Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 12:02:17 -0500 Subject: [PATCH 612/647] Update dependency @types/node to v22.17.0 (#2951) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 55d8cdc8f..842cd33f6 100644 --- a/package.json +++ b/package.json @@ -63,7 +63,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.16.5", + "@types/node": "22.17.0", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 2f6200eb397df0e54d23848d769a93614ee1fb45 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 12:08:17 -0500 Subject: [PATCH 613/647] Update dependency zx to v8.7.2 (#2950) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 842cd33f6..10b58a0df 100644 --- a/package.json +++ b/package.json @@ -89,7 +89,7 @@ "typescript": "5.8.3", "workq": "3.0.0", "xmlbuilder2": "3.1.1", - "zx": "8.7.1" + "zx": "8.7.2" }, "dependencies": { "@elastic/transport": "^9.0.1", From 85867ae58ca963ba6261d577ff13bb4516710598 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 7 Aug 2025 13:02:15 -0500 Subject: [PATCH 614/647] Fix unstable publish workflow to stop bash quotes from mangling things (#2958) --- .github/workflows/npm-publish-unstable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index 0b6a4f666..da136a535 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -71,7 +71,7 @@ jobs: # overwrite package.json with unstable version value mv package.json package.json.bak - jq --arg v "$unstable_version" ".version = $v" package.json.bak > package.json + jq --arg v "$unstable_version" '.version = $v' package.json.bak > package.json rm package.json.bak # publish to npm From 5f2633c4aed2497a4d2da3382b6f7290cbdefb55 Mon Sep 17 00:00:00 2001 From: Kaarina Tungseth Date: Thu, 7 Aug 2025 13:16:51 -0500 Subject: [PATCH 615/647] Fixes Update mapping API examples link (#2959) --- docs/reference/api-reference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index db2266779..f6f5ae2c0 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -6706,7 +6706,7 @@ You can use the update mapping API to: - Change a field's mapping using reindexing - Rename a field using a field alias -Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs//manage-data/data-store/mapping/update-mappings-examples) guide. +Learn how to use the update mapping API with practical examples in the [Update mapping API examples](docs-content://manage-data/data-store/mapping/update-mappings-examples.md) guide. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) From 09492ff2aa37f91392bb20a52ade7f08a2937f11 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 8 Aug 2025 11:22:57 -0500 Subject: [PATCH 616/647] Skip integration test for watcher API (#2960) --- test/integration/test-builder.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration/test-builder.js b/test/integration/test-builder.js index 64ce97dd2..88c8b39b8 100644 --- a/test/integration/test-builder.js +++ b/test/integration/test-builder.js @@ -76,6 +76,8 @@ const stackSkips = [ 'text_structure/10_basic.yml', // test definition bug: illegal_argument_exception 'transform/10_basic.yml', + // attempts to retrieve index.routing.allocation.include, which does not exist + 'watcher/10_basic.yml' ] const serverlessSkips = [ From 640bbd50f43c24dd11a624f70b96e1e9cf51d059 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Fri, 8 Aug 2025 11:47:17 -0500 Subject: [PATCH 617/647] Add changelog for 9.0.4 and 9.1.1 (#2965) --- docs/release-notes/index.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 6c1ea53a5..484c0afd9 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -20,6 +20,12 @@ To check for security updates, go to [Security announcements for the Elastic sta % ### Fixes [elasticsearch-javascript-client-next-fixes] % \* +## 9.1.1 + +### Fixes [elasticsearch-javascript-client-9.1.1-fixes] + +- **Propagate telemetry disabling option to transport:** an upcoming version of `@elastic/transport` will include the `x-elastic-client-meta` HTTP header that is used to capture some basic client telemetry. This change ensures the client's `enableMetaHeader` setting, which disables collecting this telemetry, is propagated to the transport. + ## 9.1.0 [elasticsearch-javascript-client-9.1.0-release-notes] ### Features and enhancements [elasticsearch-javascript-client-9.1.0-features-enhancements] @@ -30,6 +36,12 @@ To check for security updates, go to [Security announcements for the Elastic sta - **Deep merge nested options on client instantiation:** If custom values for `redaction` and `headers` options were set by the user during `Client` instantiation, nested default values would be dropped rather than deep-merged. This has been fixed. +## 9.0.4 + +### Fixes [elasticsearch-javascript-client-9.0.4-fixes] + +- **Propagate telemetry disabling option to transport:** an upcoming version of `@elastic/transport` will include the `x-elastic-client-meta` HTTP header that is used to capture some basic client telemetry. This change ensures the client's `enableMetaHeader` setting, which disables collecting this telemetry, is propagated to the transport. + ## 9.0.3 ### Fixes [elasticsearch-javascript-client-9.0.3-fixes] From 7b21bc59054a78837274f9af6c91ed7d5e2a7b12 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 11 Aug 2025 19:03:18 +0200 Subject: [PATCH 618/647] Auto-generated API code (#2969) --- docs/reference/api-reference.md | 63 ++++++++++++-- src/api/api/bulk.ts | 2 +- src/api/api/indices.ts | 2 +- src/api/api/inference.ts | 145 +++++++++++++++++++++++++++++++- src/api/types.ts | 118 +++++++++++++++++++++++++- 5 files changed, 318 insertions(+), 12 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index f6f5ae2c0..e7eb54b67 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -76,6 +76,7 @@ Some of the officially supported clients provide helpers to assist with bulk req * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. +* Ruby: Check out `Elasticsearch::Helpers::BulkHelper` **Submitting bulk requests with cURL** @@ -1825,7 +1826,7 @@ client.termvectors({ index }) - **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. - **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. - **`per_field_analyzer` (Optional, Record)**: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. -- **`fields` (Optional, string \| string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`fields` (Optional, string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. - **`field_statistics` (Optional, boolean)**: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). - **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. - **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. @@ -3487,7 +3488,12 @@ client.cluster.getSettings({ ... }) #### Request (object) [_request_cluster.get_settings] - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. -- **`include_defaults` (Optional, boolean)**: If `true`, returns default cluster settings from the local node. +- **`include_defaults` (Optional, boolean)**: If `true`, also returns default values for all other cluster settings, reflecting the values +in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your +cluster do not all have the same values in their `elasticsearch.yml` config files then the +values returned by this API may vary from invocation to invocation and may not reflect the +values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to +fetch the settings for each individual node in your cluster. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. @@ -6706,7 +6712,7 @@ You can use the update mapping API to: - Change a field's mapping using reindexing - Rename a field using a field alias -Learn how to use the update mapping API with practical examples in the [Update mapping API examples](docs-content://manage-data/data-store/mapping/update-mappings-examples.md) guide. +Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs/manage-data/data-store/mapping/update-mappings-examples) guide. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) @@ -7667,6 +7673,7 @@ For built-in models and models uploaded through Eland, the inference APIs offer However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: +* AI21 (`chat_completion`, `completion`) * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) @@ -7674,17 +7681,18 @@ The following integrations are available through the inference API. You can find * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) -* DeepSeek (`completion`, `chat_completion`) +* DeepSeek (`chat_completion`, `completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) -* Google Vertex AI (`rerank`, `text_embedding`) +* Google Vertex AI (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) +* JinaAI (`rerank`, `text_embedding`) +* Llama (`chat_completion`, `completion`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) -* VoyageAI (`text_embedding`, `rerank`) +* VoyageAI (`rerank`, `text_embedding`) * Watsonx inference integration (`text_embedding`) -* JinaAI (`text_embedding`, `rerank`) [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) @@ -7700,6 +7708,26 @@ client.inference.put({ inference_id }) - **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. +## client.inference.putAi21 [_inference.put_ai21] +Create a AI21 inference endpoint. + +Create an inference endpoint to perform an inference task with the `ai21` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21) + +```ts +client.inference.putAi21({ task_type, ai21_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_ai21] + +#### Request (object) [_request_inference.put_ai21] +- **`task_type` (Enum("completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`ai21_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("ai21"))**: The type of service supported for the specified task type. In this case, `ai21`. +- **`service_settings` ({ model_id, api_key, rate_limit })**: Settings used to install the inference model. These settings are specific to the `ai21` service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + ## client.inference.putAlibabacloud [_inference.put_alibabacloud] Create an AlibabaCloud AI Search inference endpoint. @@ -8159,6 +8187,27 @@ client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_se These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. +## client.inference.putLlama [_inference.put_llama] +Create a Llama inference endpoint. + +Create an inference endpoint to perform an inference task with the `llama` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-llama) + +```ts +client.inference.putLlama({ task_type, llama_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_llama] + +#### Request (object) [_request_inference.put_llama] +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`llama_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("llama"))**: The type of service supported for the specified task type. In this case, `llama`. +- **`service_settings` ({ url, model_id, max_input_tokens, similarity, rate_limit })**: Settings used to install the inference model. These settings are specific to the `llama` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + ## client.inference.putMistral [_inference.put_mistral] Create a Mistral inference endpoint. diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index eba771724..fa2b1e66a 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -54,7 +54,7 @@ const acceptedParams: Record (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 26ff44bd8..fdc62f215 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -3533,7 +3533,7 @@ export default class Indices { } /** - * Update field mappings. Add new fields to an existing data stream or index. You can use the update mapping API to: - Add a new field to an existing index - Update mappings for multiple indices in a single request - Add new properties to an object field - Enable multi-fields for an existing field - Update supported mapping parameters - Change a field's mapping using reindexing - Rename a field using a field alias Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs//manage-data/data-store/mapping/update-mappings-examples) guide. + * Update field mappings. Add new fields to an existing data stream or index. You can use the update mapping API to: - Add a new field to an existing index - Update mappings for multiple indices in a single request - Add new properties to an object field - Enable multi-fields for an existing field - Update supported mapping parameters - Change a field's mapping using reindexing - Rename a field using a field alias Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs/manage-data/data-store/mapping/update-mappings-examples) guide. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 0d18c34af..3f45aa6b7 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -104,6 +104,19 @@ export default class Inference { 'timeout' ] }, + 'inference.put_ai21': { + path: [ + 'task_type', + 'ai21_inference_id' + ], + body: [ + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, 'inference.put_alibabacloud': { path: [ 'task_type', @@ -324,6 +337,20 @@ export default class Inference { 'timeout' ] }, + 'inference.put_llama': { + path: [ + 'task_type', + 'llama_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, 'inference.put_mistral': { path: [ 'task_type', @@ -723,7 +750,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AI21 (`chat_completion`, `completion`) * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`chat_completion`, `completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * JinaAI (`rerank`, `text_embedding`) * Llama (`chat_completion`, `completion`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`rerank`, `text_embedding`) * Watsonx inference integration (`text_embedding`) * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -777,6 +804,64 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Create a AI21 inference endpoint. Create an inference endpoint to perform an inference task with the `ai21` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21 | Elasticsearch API documentation} + */ + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptionsWithOutMeta): Promise + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptionsWithMeta): Promise> + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptions): Promise + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_ai21'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.ai21_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_ai21', + pathParts: { + task_type: params.task_type, + ai21_inference_id: params.ai21_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} @@ -1647,6 +1732,64 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Create a Llama inference endpoint. Create an inference endpoint to perform an inference task with the `llama` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-llama | Elasticsearch API documentation} + */ + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptions): Promise + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_llama'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.llama_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_llama', + pathParts: { + task_type: params.task_type, + llama_inference_id: params.llama_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create a Mistral inference endpoint. Create an inference endpoint to perform an inference task with the `mistral` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} diff --git a/src/api/types.ts b/src/api/types.ts index 31e9ebf97..d054380f5 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -3404,7 +3404,7 @@ export interface TermvectorsRequest extends RequestBase { per_field_analyzer?: Record /** A list of fields to include in the statistics. * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ - fields?: Fields + fields?: Field[] /** If `true`, the response includes: * * * The document count (how many documents contain this field). @@ -15875,7 +15875,12 @@ export interface ClusterGetComponentTemplateResponse { export interface ClusterGetSettingsRequest extends RequestBase { /** If `true`, returns settings in flat format. */ flat_settings?: boolean - /** If `true`, returns default cluster settings from the local node. */ + /** If `true`, also returns default values for all other cluster settings, reflecting the values + * in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your + * cluster do not all have the same values in their `elasticsearch.yml` config files then the + * values returned by this API may vary from invocation to invocation and may not reflect the + * values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to + * fetch the settings for each individual node in your cluster. */ include_defaults?: boolean /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ @@ -22377,6 +22382,29 @@ export interface InferenceAdaptiveAllocations { min_number_of_allocations?: integer } +export interface InferenceAi21ServiceSettings { + /** The name of the model to use for the inference task. + * Refer to the AI21 models documentation for the list of supported models and versions. + * Service has been tested and confirmed to be working for `completion` and `chat_completion` tasks with the following models: + * * `jamba-mini` + * * `jamba-large` */ + model_id: string + /** A valid API key for accessing AI21 API. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key?: string + /** This setting helps to minimize the number of rate limit errors returned from the AI21 API. + * By default, the `ai21` service sets the number of requests allowed per minute to 200. Please refer to AI21 documentation for more details. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAi21ServiceType = 'ai21' + +export type InferenceAi21TaskType = 'completion' | 'chat_completion' + export interface InferenceAlibabaCloudServiceSettings { /** A valid API key for the AlibabaCloud AI Search API. */ api_key: string @@ -23222,6 +23250,13 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } +export interface InferenceInferenceEndpointInfoAi21 extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAi21 +} + export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -23327,6 +23362,13 @@ export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInference task_type: InferenceTaskTypeJinaAi } +export interface InferenceInferenceEndpointInfoLlama extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeLlama +} + export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -23410,6 +23452,33 @@ export type InferenceJinaAITaskType = 'rerank' | 'text_embedding' export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search' +export interface InferenceLlamaServiceSettings { + /** The URL endpoint of the Llama stack endpoint. + * URL must contain: + * * For `text_embedding` task - `/v1/inference/embeddings`. + * * For `completion` and `chat_completion` tasks - `/v1/openai/v1/chat/completions`. */ + url: string + /** The name of the model to use for the inference task. + * Refer to the Llama downloading models documentation for different ways of getting a list of available models and downloading them. + * Service has been tested and confirmed to be working with the following models: + * * For `text_embedding` task - `all-MiniLM-L6-v2`. + * * For `completion` and `chat_completion` tasks - `llama3.2:3b`. */ + model_id: string + /** For a `text_embedding` task, the maximum number of tokens per input before chunking occurs. */ + max_input_tokens?: integer + /** For a `text_embedding` task, the similarity measure. One of cosine, dot_product, l2_norm. */ + similarity?: InferenceLlamaSimilarityType + /** This setting helps to minimize the number of rate limit errors returned from the Llama API. + * By default, the `llama` service sets the number of requests allowed per minute to 3000. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceLlamaServiceType = 'llama' + +export type InferenceLlamaSimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export type InferenceLlamaTaskType = 'text_embedding' | 'completion' | 'chat_completion' + export interface InferenceMessage { /** The content of the message. * @@ -23540,6 +23609,7 @@ export interface InferenceRateLimitSetting { * * `googlevertexai` service: `30000` * * `hugging_face` service: `3000` * * `jinaai` service: `2000` + * * `llama` service: `3000` * * `mistral` service: `240` * * `openai` service and task type `text_embedding`: `3000` * * `openai` service and task type `completion`: `500` @@ -23626,6 +23696,8 @@ export type InferenceTaskSettings = any export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' +export type InferenceTaskTypeAi21 = 'completion' | 'chat_completion' + export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding' export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion' @@ -23656,6 +23728,8 @@ export type InferenceTaskTypeHuggingFace = 'chat_completion' | 'completion' | 'r export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' +export type InferenceTaskTypeLlama = 'text_embedding' | 'chat_completion' | 'completion' + export type InferenceTaskTypeMistral = 'text_embedding' | 'chat_completion' | 'completion' export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion' @@ -23882,6 +23956,25 @@ export interface InferencePutRequest extends RequestBase { export type InferencePutResponse = InferenceInferenceEndpointInfo +export interface InferencePutAi21Request extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAi21TaskType + /** The unique identifier of the inference endpoint. */ + ai21_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The type of service supported for the specified task type. In this case, `ai21`. */ + service: InferenceAi21ServiceType + /** Settings used to install the inference model. These settings are specific to the `ai21` service. */ + service_settings: InferenceAi21ServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, ai21_inference_id?: never, timeout?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, ai21_inference_id?: never, timeout?: never, service?: never, service_settings?: never } +} + +export type InferencePutAi21Response = InferenceInferenceEndpointInfoAi21 + export interface InferencePutAlibabacloudRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceAlibabaCloudTaskType @@ -24238,6 +24331,27 @@ export interface InferencePutJinaaiRequest extends RequestBase { export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi +export interface InferencePutLlamaRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceLlamaTaskType + /** The unique identifier of the inference endpoint. */ + llama_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `llama`. */ + service: InferenceLlamaServiceType + /** Settings used to install the inference model. These settings are specific to the `llama` service. */ + service_settings: InferenceLlamaServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, llama_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, llama_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutLlamaResponse = InferenceInferenceEndpointInfoLlama + export interface InferencePutMistralRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceMistralTaskType From 2e255b9fa21859773851a27e0013a26f88022316 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 11 Aug 2025 12:50:28 -0500 Subject: [PATCH 619/647] Adjust Renovate rules (#2973) --- renovate.json | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/renovate.json b/renovate.json index 420ba26d4..a684d271d 100644 --- a/renovate.json +++ b/renovate.json @@ -1,34 +1,30 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "local>elastic/renovate-config" + "local>elastic/renovate-config", + "schedule:automergeMonthly", + "npm:unpublishSafe", + "mergeConfidence:all-badges" ], - "schedule": [ - "* * * * 0" + "prConcurrentLimit": 10, + "baseBranchPatterns": [ + "main", + "8.19", + "/^9\\./" ], "packageRules": [ - { - "matchDepTypes": [ - "devDependencies" - ], - "automerge": true, - "labels": [ - "backport 8.x" - ] - }, { "matchManagers": [ - "dockerfile" + "dockerfile", + "docker" ], - "pinDigests": false, - "automerge": true + "pinDigests": false }, { - "matchDatasources": [ - "docker" + "matchDepNames": [ + "@types/**" ], - "pinDigests": false, - "automerge": true + "bumpVersion": "minor" } ] } From 1a969b3703f3f7da2018fc912f90407b9b5fd736 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 12:52:38 -0500 Subject: [PATCH 620/647] Update dependency @types/node to v22.17.1 (#2967) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 10b58a0df..d7467ac61 100644 --- a/package.json +++ b/package.json @@ -63,7 +63,7 @@ "@sinonjs/fake-timers": "14.0.0", "@types/debug": "4.1.12", "@types/ms": "2.1.0", - "@types/node": "22.17.0", + "@types/node": "22.17.1", "@types/sinonjs__fake-timers": "8.1.5", "@types/split2": "4.2.3", "@types/stoppable": "1.1.3", From 39e5c3e277029a8d1d666dc4102a089dfb024806 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 11 Aug 2025 13:01:02 -0500 Subject: [PATCH 621/647] Renovate: drop base branch pattern config (#2975) --- renovate.json | 5 ----- 1 file changed, 5 deletions(-) diff --git a/renovate.json b/renovate.json index a684d271d..26535f433 100644 --- a/renovate.json +++ b/renovate.json @@ -7,11 +7,6 @@ "mergeConfidence:all-badges" ], "prConcurrentLimit": 10, - "baseBranchPatterns": [ - "main", - "8.19", - "/^9\\./" - ], "packageRules": [ { "matchManagers": [ From 89db3eb4517d111952ecda3a9133180635dba139 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 13:04:35 -0500 Subject: [PATCH 622/647] Update dependency typescript to v5.9.2 (#2968) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d7467ac61..5bf6307b5 100644 --- a/package.json +++ b/package.json @@ -86,7 +86,7 @@ "tap": "21.1.0", "ts-node": "10.9.2", "ts-standard": "12.0.2", - "typescript": "5.8.3", + "typescript": "5.9.2", "workq": "3.0.0", "xmlbuilder2": "3.1.1", "zx": "8.7.2" From 900e167c26a0ab827b07a6a51d0af5a4d7eb9ee5 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Mon, 11 Aug 2025 13:10:47 -0500 Subject: [PATCH 623/647] Adjust Renovate rules (#2976) --- renovate.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/renovate.json b/renovate.json index 26535f433..1d02f0382 100644 --- a/renovate.json +++ b/renovate.json @@ -10,8 +10,7 @@ "packageRules": [ { "matchManagers": [ - "dockerfile", - "docker" + "dockerfile" ], "pinDigests": false }, From f1969bd3154069df51794844ffcd0263ca3ef2b0 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 13:12:43 -0500 Subject: [PATCH 624/647] Pin Node.js to dd5c5e4 (#2977) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .buildkite/Dockerfile | 2 +- .buildkite/Dockerfile-make | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index cc4eaae1a..ccc7b4133 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -1,5 +1,5 @@ ARG NODE_VERSION=${NODE_VERSION:-20} -FROM node:$NODE_VERSION +FROM node:latest@sha256:dd5c5e4d0a67471a683116483409d1e46605a79521b000c668cff29df06efd51:$NODE_VERSION # Install required tools RUN apt-get clean -y && \ diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 0db4d2028..18cc2cac7 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -1,5 +1,5 @@ ARG NODE_JS_VERSION=${NODE_JS_VERSION:-20} -FROM node:${NODE_JS_VERSION} +FROM node:latest@sha256:dd5c5e4d0a67471a683116483409d1e46605a79521b000c668cff29df06efd51:${NODE_JS_VERSION} ARG BUILDER_UID=1000 ARG BUILDER_GID=1000 From 4d350b6411970362d79ee274a534318f39eabca8 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 13:13:20 -0500 Subject: [PATCH 625/647] Update actions/checkout digest to 08eba0b (#2978) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .github/workflows/nodejs.yml | 8 ++++---- .github/workflows/npm-publish-unstable.yml | 4 ++-- .github/workflows/npm-publish.yml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 5d3896a7c..0a6781200 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -11,7 +11,7 @@ jobs: outputs: src-only: "${{ steps.changes.outputs.src-only }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: persist-credentials: false - uses: dorny/paths-filter/@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 @@ -36,7 +36,7 @@ jobs: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: persist-credentials: false @@ -66,7 +66,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: persist-credentials: false @@ -99,7 +99,7 @@ jobs: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: persist-credentials: false diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml index da136a535..662a394a3 100644 --- a/.github/workflows/npm-publish-unstable.yml +++ b/.github/workflows/npm-publish-unstable.yml @@ -18,7 +18,7 @@ jobs: outputs: src: "${{ steps.changes.outputs.src }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: persist-credentials: false - uses: dorny/paths-filter/@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 @@ -46,7 +46,7 @@ jobs: uses: zachary95/github-actions-debounce@ab7363483e2837992b8aa6be891763da00ac14f9 # v0.1.0 with: wait: 1800 - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: persist-credentials: false ref: main diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index ac838a8de..398552980 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -12,7 +12,7 @@ jobs: contents: write id-token: write steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: persist-credentials: false ref: ${{ github.event.inputs.branch }} From 69eefdb53090a06e074157796d349ffe6cfe9d4c Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 13 Aug 2025 10:22:45 -0500 Subject: [PATCH 626/647] Ensure Renovate only runs once a month (#2981) --- renovate.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/renovate.json b/renovate.json index 1d02f0382..a944e4fa1 100644 --- a/renovate.json +++ b/renovate.json @@ -6,6 +6,9 @@ "npm:unpublishSafe", "mergeConfidence:all-badges" ], + "schedule": [ + "* 0-3 1 * *" + ], "prConcurrentLimit": 10, "packageRules": [ { From 2ae83219e1f7390394aea00772572b9f6111ab06 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Wed, 13 Aug 2025 10:23:47 -0500 Subject: [PATCH 627/647] Update Node.js to dc4ac80 (#2979) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- .buildkite/Dockerfile | 2 +- .buildkite/Dockerfile-make | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index ccc7b4133..6e0c6fafd 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -1,5 +1,5 @@ ARG NODE_VERSION=${NODE_VERSION:-20} -FROM node:latest@sha256:dd5c5e4d0a67471a683116483409d1e46605a79521b000c668cff29df06efd51:$NODE_VERSION +FROM node:latest@sha256:dc4ac80350904c2797058e477a30b6285e9e025f23f139ea8b277c9efe55dd9a:$NODE_VERSION # Install required tools RUN apt-get clean -y && \ diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 18cc2cac7..bf7d1ca65 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -1,5 +1,5 @@ ARG NODE_JS_VERSION=${NODE_JS_VERSION:-20} -FROM node:latest@sha256:dd5c5e4d0a67471a683116483409d1e46605a79521b000c668cff29df06efd51:${NODE_JS_VERSION} +FROM node:latest@sha256:dc4ac80350904c2797058e477a30b6285e9e025f23f139ea8b277c9efe55dd9a:${NODE_JS_VERSION} ARG BUILDER_UID=1000 ARG BUILDER_GID=1000 From be7e9daaca3fe6d107c64f81b1489fa0c8d60e78 Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Wed, 13 Aug 2025 12:28:44 -0500 Subject: [PATCH 628/647] Update dependency zx to v8.8.0 (#2980) Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5bf6307b5..7364b86aa 100644 --- a/package.json +++ b/package.json @@ -89,7 +89,7 @@ "typescript": "5.9.2", "workq": "3.0.0", "xmlbuilder2": "3.1.1", - "zx": "8.7.2" + "zx": "8.8.0" }, "dependencies": { "@elastic/transport": "^9.0.1", From 610e9da3c06a1ec0e99ef3ada8e2df0ac9d45ea7 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 13 Aug 2025 12:34:02 -0500 Subject: [PATCH 629/647] Promote start-local in README (#2982) --- README.md | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 1c80f9cfc..41948d68b 100644 --- a/README.md +++ b/README.md @@ -4,13 +4,24 @@ [![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://badge.buildkite.com/15e4246eb268ea78f6e10aa90bce38c1abb0a4489e79f5a0ac.svg)](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) -**[Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)** -or -**[sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page)** -**for a free trial of Elastic Cloud**. - The official Node.js client for Elasticsearch. +## Try Elasticsearch and Kibana locally + +If you want to try Elasticsearch and Kibana locally, you can run the following command: + +```bash +curl -fsSL https://elastic.co/start-local | sh + +``` + +This will run Elasticsearch at http://localhost:9200 and Kibana at http://localhost:5601. + +More information is available [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html). + +Alternatively, you can **[download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)** manually, or +**[sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page) for a free trial of Elastic Cloud**. + ## Installation Refer to the [Installation section](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_installation) From b9ab211cbf85ce7f5eb3c7b1f1f15dcfbeb6d607 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Aug 2025 10:50:32 -0500 Subject: [PATCH 630/647] Renovate: stop trying to inject shasums into Dockerfiles (#2994) --- .buildkite/Dockerfile | 4 ++-- .buildkite/Dockerfile-make | 4 ++-- .github/make.sh | 4 ++-- renovate.json | 3 ++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 6e0c6fafd..b497446fc 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -1,5 +1,5 @@ -ARG NODE_VERSION=${NODE_VERSION:-20} -FROM node:latest@sha256:dc4ac80350904c2797058e477a30b6285e9e025f23f139ea8b277c9efe55dd9a:$NODE_VERSION +ARG NODE_VERSION=${NODE_VERSION:-22} +FROM node:$NODE_VERSION # Install required tools RUN apt-get clean -y && \ diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index bf7d1ca65..4d0712c4d 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -1,5 +1,5 @@ -ARG NODE_JS_VERSION=${NODE_JS_VERSION:-20} -FROM node:latest@sha256:dc4ac80350904c2797058e477a30b6285e9e025f23f139ea8b277c9efe55dd9a:${NODE_JS_VERSION} +ARG NODE_VERSION=${NODE_VERSION:-22} +FROM node:$NODE_VERSION ARG BUILDER_UID=1000 ARG BUILDER_GID=1000 diff --git a/.github/make.sh b/.github/make.sh index d8d9cc391..30cb63e70 100755 --- a/.github/make.sh +++ b/.github/make.sh @@ -37,7 +37,7 @@ product="elastic/elasticsearch-js" output_folder=".buildkite/output" codegen_folder=".buildkite/output" OUTPUT_DIR="$repo/${output_folder}" -NODE_JS_VERSION=22 +NODE_VERSION=22 WORKFLOW=${WORKFLOW-staging} mkdir -p "$OUTPUT_DIR" @@ -133,7 +133,7 @@ echo -e "\033[34;1mINFO: building $product container\033[0m" docker build \ --file .buildkite/Dockerfile-make \ --tag "$product" \ - --build-arg NODE_JS_VERSION="$NODE_JS_VERSION" \ + --build-arg NODE_VERSION="$NODE_VERSION" \ --build-arg "BUILDER_UID=$(id -u)" \ --build-arg "BUILDER_GID=$(id -g)" \ . diff --git a/renovate.json b/renovate.json index a944e4fa1..1dfad0928 100644 --- a/renovate.json +++ b/renovate.json @@ -13,7 +13,8 @@ "packageRules": [ { "matchManagers": [ - "dockerfile" + "dockerfile", + "docker" ], "pinDigests": false }, From 1d5f662256c63a8432ac613def277a255ccbe2c4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Aug 2025 11:04:49 -0500 Subject: [PATCH 631/647] Fix Renovate Docker config (#2995) --- renovate.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/renovate.json b/renovate.json index 1dfad0928..1ae05807b 100644 --- a/renovate.json +++ b/renovate.json @@ -13,7 +13,12 @@ "packageRules": [ { "matchManagers": [ - "dockerfile", + "dockerfile" + ], + "pinDigests": false + }, + { + "matchDatasources": [ "docker" ], "pinDigests": false From 80044746f0f8cf7ee1c06eb1ff882ae3c5bc76b5 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 26 Aug 2025 13:01:11 -0500 Subject: [PATCH 632/647] Update NOTICE (#2992) --- .npmignore | 25 +++++++++++++++++-------- NOTICE.txt | 2 +- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/.npmignore b/.npmignore index 3f909d8c7..c890fc1da 100644 --- a/.npmignore +++ b/.npmignore @@ -54,11 +54,18 @@ elasticsearch* # because we should copy them in the main .d.ts file api/generated.d.ts -# Ignore doc folder +# Ignore docs docs +CODE_OF_CONDUCT.md +CONTRIBUTING.md -# Ignore test folder +# Ignore test-related files +codecov.yml test +.tap +rest-api-spec +yaml-rest-tests +generated-tests # Ignore scripts folder scripts @@ -68,12 +75,14 @@ scripts .buildkite certs .github -CODE_OF_CONDUCT.md -CONTRIBUTING.md +.dockerignore +# ignore unbuilt source src + +# Bun artifact bun.lockb -.tap -rest-api-spec -yaml-rest-tests -generated-tests + +# Elastic org artifacts +renovate.json +catalog-info.yaml diff --git a/NOTICE.txt b/NOTICE.txt index 72d057cab..0a7714782 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,2 +1,2 @@ Elasticsearch JavaScript Client -Copyright 2022 Elasticsearch B.V. +Copyright 2022-2025 Elasticsearch B.V. From e543e69d12f675235fa147608588613f1a474ffe Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 2 Sep 2025 12:37:56 -0400 Subject: [PATCH 633/647] Auto-generated API code (#3002) --- docs/reference/api-reference.md | 92 +++++++++++----- src/api/api/connector.ts | 8 +- src/api/api/delete_by_query.ts | 3 +- src/api/api/esql.ts | 2 + src/api/api/fleet.ts | 6 +- src/api/api/indices.ts | 8 +- src/api/api/knn_search.ts | 3 +- src/api/api/profiling.ts | 8 +- src/api/api/reindex.ts | 2 +- src/api/api/simulate.ts | 3 +- src/api/api/transform.ts | 53 ++++++++- src/api/types.ts | 185 ++++++++++++++++++++++++-------- 12 files changed, 284 insertions(+), 89 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index e7eb54b67..4f0374953 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -474,6 +474,7 @@ client.deleteByQuery({ index }) - **`max_docs` (Optional, number)**: The maximum number of documents to delete. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. - **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: A sort object that specifies the order of deleted documents. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. @@ -495,7 +496,6 @@ client.deleteByQuery({ index }) - **`search_timeout` (Optional, string \| -1 \| 0)**: The explicit timeout for each search request. It defaults to no timeout. - **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. - **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. -- **`sort` (Optional, string[])**: A list of `:` pairs. - **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. - **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - **`timeout` (Optional, string \| -1 \| 0)**: The period each deletion request waits for active shards. @@ -1026,10 +1026,7 @@ client.info() ``` ## client.knnSearch [_knn_search] -Performs a kNN search. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html) - +Performs a kNN search ```ts client.knnSearch() ``` @@ -1342,6 +1339,12 @@ In this case, the response includes a count of the version conflicts that were e Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. +It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes. +* When requested with `wait_for_completion=true` (default), the request fails if the node shuts down. +* When requested with `wait_for_completion=false`, a task id is returned, for use with the task management APIs. The task may disappear or fail if the node shuts down. +When retrying a failed reindex operation, it might be necessary to set `conflicts=proceed` or to first delete the partial destination index. +Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause. + Refer to the linked documentation for examples of how to reindex documents. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) @@ -2326,7 +2329,7 @@ client.cat.aliases({ ... }) #### Request (object) [_request_cat.aliases] - **`name` (Optional, string \| string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("alias" \| "index" \| "filter" \| "routing.index" \| "routing.search" \| "is_write_index") \| Enum("alias" \| "index" \| "filter" \| "routing.index" \| "routing.search" \| "is_write_index")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2355,7 +2358,7 @@ client.cat.allocation({ ... }) #### Request (object) [_request_cat.allocation] - **`node_id` (Optional, string \| string[])**: A list of node identifiers or names used to limit the returned information. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role") \| Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2386,7 +2389,7 @@ client.cat.componentTemplates({ ... }) - **`name` (Optional, string)**: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("name" \| "version" \| "alias_count" \| "mapping_count" \| "settings_count" \| "metadata_count" \| "included_in") \| Enum("name" \| "version" \| "alias_count" \| "mapping_count" \| "settings_count" \| "metadata_count" \| "included_in")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2417,7 +2420,7 @@ client.cat.count({ ... }) - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("epoch" \| "timestamp" \| "count") \| Enum("epoch" \| "timestamp" \| "count")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2442,7 +2445,7 @@ client.cat.fielddata({ ... }) - **`fields` (Optional, string \| string[])**: List of fields used to limit returned information. To retrieve all fields, omit this parameter. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size") \| Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2471,7 +2474,7 @@ client.cat.health({ ... }) #### Request (object) [_request_cat.health] - **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("epoch" \| "timestamp" \| "cluster" \| "status" \| "node.total" \| "node.data" \| "shards" \| "pri" \| "relo" \| "init" \| "unassign" \| "unassign.pri" \| "pending_tasks" \| "max_task_wait_time" \| "active_shards_percent") \| Enum("epoch" \| "timestamp" \| "cluster" \| "status" \| "node.total" \| "node.data" \| "shards" \| "pri" \| "relo" \| "init" \| "unassign" \| "unassign.pri" \| "pending_tasks" \| "max_task_wait_time" \| "active_shards_percent")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2524,7 +2527,7 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para - **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. - **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("health" \| "status" \| "index" \| "uuid" \| "pri" \| "rep" \| "docs.count" \| "docs.deleted" \| "creation.date" \| "creation.date.string" \| "store.size" \| "pri.store.size" \| "dataset.size" \| "completion.size" \| "pri.completion.size" \| "fielddata.memory_size" \| "pri.fielddata.memory_size" \| "fielddata.evictions" \| "pri.fielddata.evictions" \| "query_cache.memory_size" \| "pri.query_cache.memory_size" \| "query_cache.evictions" \| "pri.query_cache.evictions" \| "request_cache.memory_size" \| "pri.request_cache.memory_size" \| "request_cache.evictions" \| "pri.request_cache.evictions" \| "request_cache.hit_count" \| "pri.request_cache.hit_count" \| "request_cache.miss_count" \| "pri.request_cache.miss_count" \| "flush.total" \| "pri.flush.total" \| "flush.total_time" \| "pri.flush.total_time" \| "get.current" \| "pri.get.current" \| "get.time" \| "pri.get.time" \| "get.total" \| "pri.get.total" \| "get.exists_time" \| "pri.get.exists_time" \| "get.exists_total" \| "pri.get.exists_total" \| "get.missing_time" \| "pri.get.missing_time" \| "get.missing_total" \| "pri.get.missing_total" \| "indexing.delete_current" \| "pri.indexing.delete_current" \| "indexing.delete_time" \| "pri.indexing.delete_time" \| "indexing.delete_total" \| "pri.indexing.delete_total" \| "indexing.index_current" \| "pri.indexing.index_current" \| "indexing.index_time" \| "pri.indexing.index_time" \| "indexing.index_total" \| "pri.indexing.index_total" \| "indexing.index_failed" \| "pri.indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "pri.indexing.index_failed_due_to_version_conflict" \| "merges.current" \| "pri.merges.current" \| "merges.current_docs" \| "pri.merges.current_docs" \| "merges.current_size" \| "pri.merges.current_size" \| "merges.total" \| "pri.merges.total" \| "merges.total_docs" \| "pri.merges.total_docs" \| "merges.total_size" \| "pri.merges.total_size" \| "merges.total_time" \| "pri.merges.total_time" \| "refresh.total" \| "pri.refresh.total" \| "refresh.time" \| "pri.refresh.time" \| "refresh.external_total" \| "pri.refresh.external_total" \| "refresh.external_time" \| "pri.refresh.external_time" \| "refresh.listeners" \| "pri.refresh.listeners" \| "search.fetch_current" \| "pri.search.fetch_current" \| "search.fetch_time" \| "pri.search.fetch_time" \| "search.fetch_total" \| "pri.search.fetch_total" \| "search.open_contexts" \| "pri.search.open_contexts" \| "search.query_current" \| "pri.search.query_current" \| "search.query_time" \| "pri.search.query_time" \| "search.query_total" \| "pri.search.query_total" \| "search.scroll_current" \| "pri.search.scroll_current" \| "search.scroll_time" \| "pri.search.scroll_time" \| "search.scroll_total" \| "pri.search.scroll_total" \| "segments.count" \| "pri.segments.count" \| "segments.memory" \| "pri.segments.memory" \| "segments.index_writer_memory" \| "pri.segments.index_writer_memory" \| "segments.version_map_memory" \| "pri.segments.version_map_memory" \| "segments.fixed_bitset_memory" \| "pri.segments.fixed_bitset_memory" \| "warmer.current" \| "pri.warmer.current" \| "warmer.total" \| "pri.warmer.total" \| "warmer.total_time" \| "pri.warmer.total_time" \| "suggest.current" \| "pri.suggest.current" \| "suggest.time" \| "pri.suggest.time" \| "suggest.total" \| "pri.suggest.total" \| "memory.total" \| "pri.memory.total" \| "bulk.total_operations" \| "pri.bulk.total_operations" \| "bulk.total_time" \| "pri.bulk.total_time" \| "bulk.total_size_in_bytes" \| "pri.bulk.total_size_in_bytes" \| "bulk.avg_time" \| "pri.bulk.avg_time" \| "bulk.avg_size_in_bytes" \| "pri.bulk.avg_size_in_bytes" \| "dense_vector.value_count" \| "pri.dense_vector.value_count" \| "sparse_vector.value_count" \| "pri.sparse_vector.value_count") \| Enum("health" \| "status" \| "index" \| "uuid" \| "pri" \| "rep" \| "docs.count" \| "docs.deleted" \| "creation.date" \| "creation.date.string" \| "store.size" \| "pri.store.size" \| "dataset.size" \| "completion.size" \| "pri.completion.size" \| "fielddata.memory_size" \| "pri.fielddata.memory_size" \| "fielddata.evictions" \| "pri.fielddata.evictions" \| "query_cache.memory_size" \| "pri.query_cache.memory_size" \| "query_cache.evictions" \| "pri.query_cache.evictions" \| "request_cache.memory_size" \| "pri.request_cache.memory_size" \| "request_cache.evictions" \| "pri.request_cache.evictions" \| "request_cache.hit_count" \| "pri.request_cache.hit_count" \| "request_cache.miss_count" \| "pri.request_cache.miss_count" \| "flush.total" \| "pri.flush.total" \| "flush.total_time" \| "pri.flush.total_time" \| "get.current" \| "pri.get.current" \| "get.time" \| "pri.get.time" \| "get.total" \| "pri.get.total" \| "get.exists_time" \| "pri.get.exists_time" \| "get.exists_total" \| "pri.get.exists_total" \| "get.missing_time" \| "pri.get.missing_time" \| "get.missing_total" \| "pri.get.missing_total" \| "indexing.delete_current" \| "pri.indexing.delete_current" \| "indexing.delete_time" \| "pri.indexing.delete_time" \| "indexing.delete_total" \| "pri.indexing.delete_total" \| "indexing.index_current" \| "pri.indexing.index_current" \| "indexing.index_time" \| "pri.indexing.index_time" \| "indexing.index_total" \| "pri.indexing.index_total" \| "indexing.index_failed" \| "pri.indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "pri.indexing.index_failed_due_to_version_conflict" \| "merges.current" \| "pri.merges.current" \| "merges.current_docs" \| "pri.merges.current_docs" \| "merges.current_size" \| "pri.merges.current_size" \| "merges.total" \| "pri.merges.total" \| "merges.total_docs" \| "pri.merges.total_docs" \| "merges.total_size" \| "pri.merges.total_size" \| "merges.total_time" \| "pri.merges.total_time" \| "refresh.total" \| "pri.refresh.total" \| "refresh.time" \| "pri.refresh.time" \| "refresh.external_total" \| "pri.refresh.external_total" \| "refresh.external_time" \| "pri.refresh.external_time" \| "refresh.listeners" \| "pri.refresh.listeners" \| "search.fetch_current" \| "pri.search.fetch_current" \| "search.fetch_time" \| "pri.search.fetch_time" \| "search.fetch_total" \| "pri.search.fetch_total" \| "search.open_contexts" \| "pri.search.open_contexts" \| "search.query_current" \| "pri.search.query_current" \| "search.query_time" \| "pri.search.query_time" \| "search.query_total" \| "pri.search.query_total" \| "search.scroll_current" \| "pri.search.scroll_current" \| "search.scroll_time" \| "pri.search.scroll_time" \| "search.scroll_total" \| "pri.search.scroll_total" \| "segments.count" \| "pri.segments.count" \| "segments.memory" \| "pri.segments.memory" \| "segments.index_writer_memory" \| "pri.segments.index_writer_memory" \| "segments.version_map_memory" \| "pri.segments.version_map_memory" \| "segments.fixed_bitset_memory" \| "pri.segments.fixed_bitset_memory" \| "warmer.current" \| "pri.warmer.current" \| "warmer.total" \| "pri.warmer.total" \| "warmer.total_time" \| "pri.warmer.total_time" \| "suggest.current" \| "pri.suggest.current" \| "suggest.time" \| "pri.suggest.time" \| "suggest.total" \| "pri.suggest.total" \| "memory.total" \| "pri.memory.total" \| "bulk.total_operations" \| "pri.bulk.total_operations" \| "bulk.total_time" \| "pri.bulk.total_time" \| "bulk.total_size_in_bytes" \| "pri.bulk.total_size_in_bytes" \| "bulk.avg_time" \| "pri.bulk.avg_time" \| "bulk.avg_size_in_bytes" \| "pri.bulk.avg_size_in_bytes" \| "dense_vector.value_count" \| "pri.dense_vector.value_count" \| "sparse_vector.value_count" \| "pri.sparse_vector.value_count")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2545,7 +2548,7 @@ client.cat.master({ ... }) ### Arguments [_arguments_cat.master] #### Request (object) [_request_cat.master] -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("id" \| "host" \| "ip" \| "node") \| Enum("id" \| "host" \| "ip" \| "node")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2696,7 +2699,7 @@ client.cat.nodeattrs({ ... }) ### Arguments [_arguments_cat.nodeattrs] #### Request (object) [_request_cat.nodeattrs] -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("node" \| "id" \| "pid" \| "host" \| "ip" \| "port" \| "attr" \| "value") \| Enum("node" \| "id" \| "pid" \| "host" \| "ip" \| "port" \| "attr" \| "value")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2747,7 +2750,7 @@ client.cat.pendingTasks({ ... }) ### Arguments [_arguments_cat.pending_tasks] #### Request (object) [_request_cat.pending_tasks] -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("insertOrder" \| "timeInQueue" \| "priority" \| "source") \| Enum("insertOrder" \| "timeInQueue" \| "priority" \| "source")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -4715,6 +4718,9 @@ If `true`, the response will include an extra section under the name `all_column ## client.esql.getQuery [_esql.get_query] Get a specific running ES|QL query information. Returns an object extended information about a running ES|QL query. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-get-query) + ```ts client.esql.getQuery({ id }) ``` @@ -4727,6 +4733,9 @@ client.esql.getQuery({ id }) ## client.esql.listQueries [_esql.list_queries] Get running ES|QL queries information. Returns an object containing IDs and other information about the running ES|QL queries. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-list-queries) + ```ts client.esql.listQueries() ``` @@ -5669,7 +5678,7 @@ client.indices.deleteDataStream({ name }) Delete data stream options. Removes the data stream options from a data stream. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream-options) ```ts client.indices.deleteDataStreamOptions({ name }) @@ -6160,7 +6169,7 @@ Get data stream options. Get the data stream options configuration of one or more data streams. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-options) ```ts client.indices.getDataStreamOptions({ name }) @@ -6577,7 +6586,7 @@ error. Update data stream options. Update the data stream options of the specified data streams. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-options) ```ts client.indices.putDataStreamOptions({ name }) @@ -7156,7 +7165,7 @@ For example, a request targeting `foo*,bar*` returns an error if an index starts ## client.indices.rollover [_indices.rollover] Roll over to a new index. -TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. +TIP: We recommend using the index lifecycle rollover action to automate rollovers. However, Serverless does not support Index Lifecycle Management (ILM), so don't use this approach in the Serverless context. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. @@ -8632,7 +8641,7 @@ client.ingest.simulate({ docs }) - **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. - **`id` (Optional, string)**: The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. -- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })**: The pipeline to test. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis })**: The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. @@ -8818,7 +8827,7 @@ client.logstash.putPipeline({ id }) #### Request (object) [_request_logstash.put_pipeline] - **`id` (string)**: An identifier for the pipeline. -- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })** +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis })** ## client.migration.deprecations [_migration.deprecations] Get deprecation information. @@ -13490,14 +13499,18 @@ client.simulate.ingest({ docs }) - **`index` (Optional, string)**: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. -- **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. -- **`index_template_substitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. +- **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. +- **`index_template_substitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. - **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** -- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. +- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. +- **`merge_type` (Optional, Enum("index" \| "template"))**: The mapping merge type if mapping overrides are being provided in mapping_addition. +The allowed values are one of index or template. +The index option merges mappings the way they would be merged into an existing index. +The template option merges mappings the way they would be merged into a template. ## client.slm.deleteLifecycle [_slm.delete_lifecycle] Delete a policy. @@ -15116,7 +15129,7 @@ index will not be deleted - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. ## client.transform.getNodeStats [_transform.get_node_stats] -Retrieves transform usage information for transform nodes. +Retrieves transform usage information for transform nodes [Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html) @@ -15322,6 +15335,35 @@ client.transform.scheduleNowTransform({ transform_id }) - **`transform_id` (string)**: Identifier for the transform. - **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the scheduling to take place +## client.transform.setUpgradeMode [_transform.set_upgrade_mode] +Set upgrade_mode for transform indices. +Sets a cluster wide upgrade_mode setting that prepares transform +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your transform indices. In those circumstances, +there must be no transforms running. You can close the transforms, +do the upgrade, then open all the transforms again. Alternatively, +you can use this API to temporarily halt tasks associated with the transforms +and prevent new transforms from opening. You can also use this API +during upgrades that do not require you to reindex your transform +indices, though stopping transforms is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +transform info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode) + +```ts +client.transform.setUpgradeMode({ ... }) +``` + +### Arguments [_arguments_transform.set_upgrade_mode] + +#### Request (object) [_request_transform.set_upgrade_mode] +- **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all +transform tasks and prohibits new transform tasks from +starting. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the request to be completed. + ## client.transform.startTransform [_transform.start_transform] Start a transform. diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 3198dfa5c..be52c9af3 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -715,7 +715,7 @@ export default class Connector { } /** - * Deletes a connector secret. + * Deletes a connector secret */ async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -759,7 +759,7 @@ export default class Connector { } /** - * Retrieves a secret stored by Connectors. + * Retrieves a secret stored by Connectors */ async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -803,7 +803,7 @@ export default class Connector { } /** - * Creates a secret for a Connector. + * Creates a secret for a Connector */ async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -844,7 +844,7 @@ export default class Connector { } /** - * Creates or updates a secret for a Connector. + * Creates or updates a secret for a Connector */ async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index c52be429e..d13a0ff6a 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -36,7 +36,8 @@ const acceptedParams: Record async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -354,6 +355,7 @@ export default class Esql { /** * Get running ES|QL queries information. Returns an object containing IDs and other information about the running ES|QL queries. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-list-queries | Elasticsearch API documentation} */ async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 030c72e9a..665e0a5d5 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -177,7 +177,7 @@ export default class Fleet { } /** - * Deletes a secret stored by Fleet. + * Deletes a secret stored by Fleet */ async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -221,7 +221,7 @@ export default class Fleet { } /** - * Retrieves a secret stored by Fleet. + * Retrieves a secret stored by Fleet */ async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> @@ -364,7 +364,7 @@ export default class Fleet { } /** - * Creates a secret stored by Fleet. + * Creates a secret stored by Fleet */ async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index fdc62f215..78f45bd89 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1661,7 +1661,7 @@ export default class Indices { /** * Delete data stream options. Removes the data stream options from a data stream. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream-options | Elasticsearch API documentation} */ async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2564,7 +2564,7 @@ export default class Indices { /** * Get data stream options. Get the data stream options configuration of one or more data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-options | Elasticsearch API documentation} */ async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3373,7 +3373,7 @@ export default class Indices { /** * Update data stream options. Update the data stream options of the specified data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-options | Elasticsearch API documentation} */ async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -3996,7 +3996,7 @@ export default class Indices { } /** - * Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. + * Roll over to a new index. TIP: We recommend using the index lifecycle rollover action to automate rollovers. However, Serverless does not support Index Lifecycle Management (ILM), so don't use this approach in the Serverless context. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 367a61d48..2fd7b462b 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -37,8 +37,7 @@ const acceptedParams: Record export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index 04a1028ab..4cd29fa72 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -57,7 +57,7 @@ export default class Profiling { } /** - * Extracts a UI-optimized structure to render flamegraphs from Universal Profiling. + * Extracts a UI-optimized structure to render flamegraphs from Universal Profiling * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise @@ -99,7 +99,7 @@ export default class Profiling { } /** - * Extracts raw stacktrace information from Universal Profiling. + * Extracts raw stacktrace information from Universal Profiling * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise @@ -141,7 +141,7 @@ export default class Profiling { } /** - * Returns basic information about the status of Universal Profiling. + * Returns basic information about the status of Universal Profiling * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise @@ -183,7 +183,7 @@ export default class Profiling { } /** - * Extracts a list of topN functions from Universal Profiling. + * Extracts a list of topN functions from Universal Profiling * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} */ async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 2a6fedd23..66f6aec87 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -54,7 +54,7 @@ const acceptedParams: Record diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index 4393e380c..407c66002 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -47,7 +47,8 @@ export default class Simulate { 'pipeline_substitutions' ], query: [ - 'pipeline' + 'pipeline', + 'merge_type' ] } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index dc47023c8..91f4477b1 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -134,6 +134,14 @@ export default class Transform { 'timeout' ] }, + 'transform.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, 'transform.start_transform': { path: [ 'transform_id' @@ -233,7 +241,7 @@ export default class Transform { } /** - * Retrieves transform usage information for transform nodes. + * Retrieves transform usage information for transform nodes * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-node-stats.html | Elasticsearch API documentation} */ async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise @@ -584,6 +592,49 @@ export default class Transform { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Set upgrade_mode for transform indices. Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your transform indices. In those circumstances, there must be no transforms running. You can close the transforms, do the upgrade, then open all the transforms again. Alternatively, you can use this API to temporarily halt tasks associated with the transforms and prevent new transforms from opening. You can also use this API during upgrades that do not require you to reindex your transform indices, though stopping transforms is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get transform info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode | Elasticsearch API documentation} + */ + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptions): Promise + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.set_upgrade_mode'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_transform/set_upgrade_mode' + const meta: TransportRequestMetadata = { + name: 'transform.set_upgrade_mode' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Start a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform | Elasticsearch API documentation} diff --git a/src/api/types.ts b/src/api/types.ts index d054380f5..7dce15eee 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -434,8 +434,6 @@ export interface DeleteByQueryRequest extends RequestBase { search_type?: SearchType /** The number of slices this task should be divided into. */ slices?: Slices - /** A comma-separated list of `:` pairs. */ - sort?: string[] /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] /** The maximum number of documents to collect for each shard. @@ -464,10 +462,12 @@ export interface DeleteByQueryRequest extends RequestBase { query?: QueryDslQueryContainer /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** A sort object that specifies the order of deleted documents. */ + sort?: Sort /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never, sort?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never, sort?: never } } export interface DeleteByQueryResponse { @@ -2564,7 +2564,7 @@ export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string export interface SearchHit { _index: IndexName _id?: Id - _score?: double | null + _score: double | null _explanation?: ExplainExplanation fields?: Record highlight?: Record @@ -2588,7 +2588,7 @@ export interface SearchHitsMetadata { /** Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ total?: SearchTotalHits | long hits: SearchHit[] - max_score?: double | null + max_score: double | null } export interface SearchInnerHits { @@ -4575,10 +4575,17 @@ export type Service = string export interface ShardFailure { index?: IndexName + /** @alias index */ + _index?: IndexName node?: string + /** @alias node */ + _node?: string reason: ErrorCause - shard: integer + shard?: integer + /** @alias shard */ + _shard?: integer status?: string + primary?: boolean } export interface ShardStatistics { @@ -5044,7 +5051,7 @@ export interface AggregationsAggregationRange { } export interface AggregationsArrayPercentilesItem { - key: string + key: double value: double | null value_as_string?: string } @@ -5892,7 +5899,7 @@ export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBa export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys & { [property: string]: AggregationsAggregate | string | long } -export type AggregationsKeyedPercentiles = Record +export type AggregationsKeyedPercentiles = Record export interface AggregationsLinearMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { model: 'linear' @@ -7948,21 +7955,23 @@ export interface MappingChunkingSettings { * * Learn more about different chunking strategies in the linked documentation. */ strategy: string - /** This parameter is only applicable when using the `recursive` chunking strategy. + /** Only applicable to the `recursive` strategy and required when using it. * * Sets a predefined list of separators in the saved chunking settings based on the selected text type. * Values can be `markdown` or `plaintext`. * * Using this parameter is an alternative to manually specifying a custom `separators` list. */ - separator_group: string - /** A list of strings used as possible split points when chunking text with the `recursive` strategy. + separator_group?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * A list of strings used as possible split points when chunking text. * * Each string can be a plain string or a regular expression (regex) pattern. * The system tries each separator in order to split the text, starting from the first item in the list. * * After splitting, it attempts to recombine smaller pieces into larger chunks that stay within * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ - separators: string[] + separators?: string[] /** The maximum size of a chunk in words. * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ max_chunk_size: integer @@ -10308,9 +10317,25 @@ export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase +export type CatCatAliasesColumn = 'alias' | 'a' | 'index' | 'i' | 'idx' | 'filter' | 'f' | 'fi' | 'routing.index' | 'ri' | 'routingIndex' | 'routing.search' | 'rs' | 'routingSearch' | 'is_write_index' | 'w' | 'isWriteIndex' | string + +export type CatCatAliasesColumns = CatCatAliasesColumn | CatCatAliasesColumn[] + +export type CatCatAllocationColumn = 'shards' | 's' | 'shards.undesired' | 'write_load.forecast' | 'wlf' | 'writeLoadForecast' | 'disk.indices.forecast' | 'dif' | 'diskIndicesForecast' | 'disk.indices' | 'di' | 'diskIndices' | 'disk.used' | 'du' | 'diskUsed' | 'disk.avail' | 'da' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.percent' | 'dp' | 'diskPercent' | 'host' | 'h' | 'ip' | 'node' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | string + +export type CatCatAllocationColumns = CatCatAllocationColumn | CatCatAllocationColumn[] + export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' -export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] +export type CatCatAnomalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] + +export type CatCatComponentColumn = 'name' | 'n' | 'version' | 'v' | 'alias_count' | 'a' | 'mapping_count' | 'm' | 'settings_count' | 's' | 'metadata_count' | 'me' | 'included_in' | 'i' | string + +export type CatCatComponentColumns = CatCatComponentColumn | CatCatComponentColumn[] + +export type CatCatCountColumn = 'epoch' | 't' | 'time' | 'timestamp' | 'ts' | 'hms' | 'hhmmss' | 'count' | 'dc' | 'docs.count' | 'docsCount' | string + +export type CatCatCountColumns = CatCatCountColumn | CatCatCountColumn[] export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' @@ -10320,10 +10345,34 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] +export type CatCatFieldDataColumn = 'id' | 'host' | 'h' | 'ip' | 'node' | 'n' | 'field' | 'f' | 'size' | 's' | string + +export type CatCatFieldDataColumns = CatCatFieldDataColumn | CatCatFieldDataColumn[] + +export type CatCatHealthColumn = 'epoch' | 't' | 'time' | 'timestamp' | 'ts' | 'hms' | 'hhmmss' | 'cluster' | 'cl' | 'status' | 'st' | 'node.total' | 'nt' | 'nodeTotal' | 'node.data' | 'nd' | 'nodeData' | 'shards' | 't' | 'sh' | 'shards.total' | 'shardsTotal' | 'pri' | 'p' | 'shards.primary' | 'shardsPrimary' | 'relo' | 'r' | 'shards.relocating' | 'shardsRelocating' | 'init' | 'i' | 'shards.initializing' | 'shardsInitializing' | 'unassign' | 'u' | 'shards.unassigned' | 'shardsUnassigned' | 'unassign.pri' | 'up' | 'shards.unassigned.primary' | 'shardsUnassignedPrimary' | 'pending_tasks' | 'pt' | 'pendingTasks' | 'max_task_wait_time' | 'mtwt' | 'maxTaskWaitTime' | 'active_shards_percent' | 'asp' | 'activeShardsPercent' | string + +export type CatCatHealthColumns = CatCatHealthColumn | CatCatHealthColumn[] + +export type CatCatIndicesColumn = 'health' | 'h' | 'status' | 's' | 'index' | 'i' | 'idx' | 'uuid' | 'id' | 'uuid' | 'pri' | 'p' | 'shards.primary' | 'shardsPrimary' | 'rep' | 'r' | 'shards.replica' | 'shardsReplica' | 'docs.count' | 'dc' | 'docsCount' | 'docs.deleted' | 'dd' | 'docsDeleted' | 'creation.date' | 'cd' | 'creation.date.string' | 'cds' | 'store.size' | 'ss' | 'storeSize' | 'pri.store.size' | 'dataset.size' | 'completion.size' | 'cs' | 'completionSize' | 'pri.completion.size' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'pri.fielddata.memory_size' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'pri.fielddata.evictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'pri.query_cache.memory_size' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'pri.query_cache.evictions' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'pri.request_cache.memory_size' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'pri.request_cache.evictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'pri.request_cache.hit_count' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'pri.request_cache.miss_count' | 'flush.total' | 'ft' | 'flushTotal' | 'pri.flush.total' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'pri.flush.total_time' | 'get.current' | 'gc' | 'getCurrent' | 'pri.get.current' | 'get.time' | 'gti' | 'getTime' | 'pri.get.time' | 'get.total' | 'gto' | 'getTotal' | 'pri.get.total' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'pri.get.exists_time' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'pri.get.exists_total' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'pri.get.missing_time' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'pri.get.missing_total' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'pri.indexing.delete_current' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'pri.indexing.delete_time' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'pri.indexing.delete_total' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'pri.indexing.index_current' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'pri.indexing.index_time' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'pri.indexing.index_total' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'pri.indexing.index_failed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'pri.indexing.index_failed_due_to_version_conflict' | 'merges.current' | 'mc' | 'mergesCurrent' | 'pri.merges.current' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'pri.merges.current_docs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'pri.merges.current_size' | 'merges.total' | 'mt' | 'mergesTotal' | 'pri.merges.total' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'pri.merges.total_docs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'pri.merges.total_size' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'pri.merges.total_time' | 'refresh.total' | 'rto' | 'refreshTotal' | 'pri.refresh.total' | 'refresh.time' | 'rti' | 'refreshTime' | 'pri.refresh.time' | 'refresh.external_total' | 'rto' | 'refreshTotal' | 'pri.refresh.external_total' | 'refresh.external_time' | 'rti' | 'refreshTime' | 'pri.refresh.external_time' | 'refresh.listeners' | 'rli' | 'refreshListeners' | 'pri.refresh.listeners' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'pri.search.fetch_current' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'pri.search.fetch_time' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'pri.search.fetch_total' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'pri.search.open_contexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'pri.search.query_current' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'pri.search.query_time' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'pri.search.query_total' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'pri.search.scroll_current' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'pri.search.scroll_time' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'pri.search.scroll_total' | 'segments.count' | 'sc' | 'segmentsCount' | 'pri.segments.count' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'pri.segments.memory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'pri.segments.index_writer_memory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'pri.segments.version_map_memory' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'pri.segments.fixed_bitset_memory' | 'warmer.current' | 'wc' | 'warmerCurrent' | 'pri.warmer.current' | 'warmer.total' | 'wto' | 'warmerTotal' | 'pri.warmer.total' | 'warmer.total_time' | 'wtt' | 'warmerTotalTime' | 'pri.warmer.total_time' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'pri.suggest.current' | 'suggest.time' | 'suti' | 'suggestTime' | 'pri.suggest.time' | 'suggest.total' | 'suto' | 'suggestTotal' | 'pri.suggest.total' | 'memory.total' | 'tm' | 'memoryTotal' | 'pri.memory.total' | 'bulk.total_operations' | 'bto' | 'bulkTotalOperation' | 'pri.bulk.total_operations' | 'bulk.total_time' | 'btti' | 'bulkTotalTime' | 'pri.bulk.total_time' | 'bulk.total_size_in_bytes' | 'btsi' | 'bulkTotalSizeInBytes' | 'pri.bulk.total_size_in_bytes' | 'bulk.avg_time' | 'bati' | 'bulkAvgTime' | 'pri.bulk.avg_time' | 'bulk.avg_size_in_bytes' | 'basi' | 'bulkAvgSizeInBytes' | 'pri.bulk.avg_size_in_bytes' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'pri.dense_vector.value_count' | 'sparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'pri.sparse_vector.value_count' | string + +export type CatCatIndicesColumns = CatCatIndicesColumn | CatCatIndicesColumn[] + +export type CatCatMasterColumn = 'id' | 'host' | 'h' | 'ip' | 'node' | 'n' | string + +export type CatCatMasterColumns = CatCatMasterColumn | CatCatMasterColumn[] + export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] +export type CatCatNodeattrsColumn = 'node' | 'id' | 'id' | 'nodeId' | 'pid' | 'p' | 'host' | 'h' | 'ip' | 'i' | 'port' | 'po' | 'attr' | 'attr.name' | 'value' | 'attr.value' | string + +export type CatCatNodeattrsColumns = CatCatNodeattrsColumn | CatCatNodeattrsColumn[] + +export type CatCatPendingTasksColumn = 'insertOrder' | 'o' | 'timeInQueue' | 't' | 'priority' | 'p' | 'source' | 's' | string + +export type CatCatPendingTasksColumns = CatCatPendingTasksColumn | CatCatPendingTasksColumn[] + export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'time' | 't' | 'ti' | 'primaryOrReplica' | 'type' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'tnode' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | string export type CatCatRecoveryColumns = CatCatRecoveryColumn | CatCatRecoveryColumn[] @@ -10406,8 +10455,8 @@ export interface CatAliasesAliasesRecord { export interface CatAliasesRequest extends CatCatRequestBase { /** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatAliasesColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -10540,8 +10589,8 @@ export interface CatAllocationRequest extends CatCatRequestBase { node_id?: NodeIds /** The unit used to display byte values. */ bytes?: Bytes - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatAllocationColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -10576,8 +10625,8 @@ export interface CatComponentTemplatesRequest extends CatCatRequestBase { * It accepts wildcard expressions. * If it is omitted, all component templates are returned. */ name?: string - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatComponentColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -10635,8 +10684,8 @@ export interface CatCountRequest extends CatCatRequestBase { * It supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatCountColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -10679,8 +10728,8 @@ export interface CatFielddataRequest extends CatCatRequestBase { fields?: Fields /** The unit used to display byte values. */ bytes?: Bytes - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatFieldDataColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -10836,8 +10885,8 @@ export interface CatHealthRequest extends CatCatRequestBase { time?: TimeUnit /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatHealthColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -11603,8 +11652,8 @@ export interface CatIndicesRequest extends CatCatRequestBase { time?: TimeUnit /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatIndicesColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -11635,8 +11684,8 @@ export interface CatMasterMasterRecord { } export interface CatMasterRequest extends CatCatRequestBase { - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatMasterColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -12464,9 +12513,9 @@ export interface CatMlJobsRequest extends CatCatRequestBase { /** The unit used to display byte values. */ bytes?: Bytes /** Comma-separated list of column names to display. */ - h?: CatCatAnonalyDetectorColumns + h?: CatCatAnomalyDetectorColumns /** Comma-separated list of column names or column aliases used to sort the response. */ - s?: CatCatAnonalyDetectorColumns + s?: CatCatAnomalyDetectorColumns /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ @@ -12658,8 +12707,8 @@ export interface CatNodeattrsNodeAttributesRecord { } export interface CatNodeattrsRequest extends CatCatRequestBase { - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatNodeattrsColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -13443,8 +13492,8 @@ export interface CatPendingTasksPendingTasksRecord { } export interface CatPendingTasksRequest extends CatCatRequestBase { - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatPendingTasksColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -15649,6 +15698,14 @@ export interface ClusterComponentTemplateNode { version?: VersionNumber _meta?: Metadata deprecated?: boolean + /** Date and time when the component template was created. Only returned if the `human` query parameter is `true`. */ + created_date?: DateTime + /** Date and time when the component template was created, in milliseconds since the epoch. */ + created_date_millis?: EpochTime + /** Date and time when the component template was last modified. Only returned if the `human` query parameter is `true`. */ + modified_date?: DateTime + /** Date and time when the component template was last modified, in milliseconds since the epoch. */ + modified_date_millis?: EpochTime } export interface ClusterComponentTemplateSummary { @@ -19348,6 +19405,14 @@ export interface IndicesIndexTemplate { deprecated?: boolean /** A list of component template names that are allowed to be absent. */ ignore_missing_component_templates?: Names + /** Date and time when the index template was created. Only returned if the `human` query parameter is `true`. */ + created_date?: DateTime + /** Date and time when the index template was created, in milliseconds since the epoch. */ + created_date_millis?: EpochTime + /** Date and time when the index template was last modified. Only returned if the `human` query parameter is `true`. */ + modified_date?: DateTime + /** Date and time when the index template was last modified, in milliseconds since the epoch. */ + modified_date_millis?: EpochTime } export interface IndicesIndexTemplateDataStreamConfiguration { @@ -21697,6 +21762,7 @@ export interface IndicesResolveIndexResolveIndexItem { aliases?: string[] attributes: string[] data_stream?: DataStreamName + mode?: IndicesIndexMode } export interface IndicesResolveIndexResponse { @@ -23206,21 +23272,23 @@ export interface InferenceInferenceChunkingSettings { * It is applicable only for a `sentence` chunking strategy. * It can be either `1` or `0`. */ sentence_overlap?: integer - /** This parameter is only applicable when using the `recursive` chunking strategy. + /** Only applicable to the `recursive` strategy and required when using it. * * Sets a predefined list of separators in the saved chunking settings based on the selected text type. * Values can be `markdown` or `plaintext`. * * Using this parameter is an alternative to manually specifying a custom `separators` list. */ - separator_group: string - /** A list of strings used as possible split points when chunking text with the `recursive` strategy. + separator_group?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * A list of strings used as possible split points when chunking text. * * Each string can be a plain string or a regular expression (regex) pattern. * The system tries each separator in order to split the text, starting from the first item in the list. * * After splitting, it attempts to recombine smaller pieces into larger chunks that stay within * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ - separators: string[] + separators?: string[] /** The chunking strategy: `sentence`, `word`, `none` or `recursive`. * * * If `strategy` is set to `recursive`, you must also specify: @@ -25100,6 +25168,14 @@ export interface IngestPipeline { deprecated?: boolean /** Arbitrary metadata about the ingest pipeline. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata + /** Date and time when the pipeline was created. Only returned if the `human` query parameter is `true`. */ + created_date?: DateTime + /** Date and time when the pipeline was created, in milliseconds since the epoch. */ + created_date_millis?: EpochTime + /** Date and time when the pipeline was last modified. Only returned if the `human` query parameter is `true`. */ + modified_date?: DateTime + /** Date and time when the pipeline was last modified, in milliseconds since the epoch. */ + modified_date_millis?: EpochTime } export interface IngestPipelineConfig { @@ -35112,9 +35188,12 @@ export interface SimulateIngestIngestDocumentSimulationKeys { * executing a processor, or a mapping validation error when simulating indexing the resulting * doc. */ error?: ErrorCause + effective_mapping?: MappingTypeMapping } export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys -& { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause } +& { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause | MappingTypeMapping } + +export type SimulateIngestMergeType = 'index' | 'template' export interface SimulateIngestRequest extends RequestBase { /** The index to simulate ingesting into. @@ -35124,6 +35203,11 @@ export interface SimulateIngestRequest extends RequestBase { /** The pipeline to use as the default pipeline. * This value can be used to override the default pipeline of the index. */ pipeline?: PipelineName + /** The mapping merge type if mapping overrides are being provided in mapping_addition. + * The allowed values are one of index or template. + * The index option merges mappings the way they would be merged into an existing index. + * The template option merges mappings the way they would be merged into a template. */ + merge_type?: SimulateIngestMergeType /** Sample documents to test in the pipeline. */ docs: IngestDocument[] /** A map of component template names to substitute component template definition objects. */ @@ -35136,9 +35220,9 @@ export interface SimulateIngestRequest extends RequestBase { * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline_substitutions?: Record /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + body?: string | { [key: string]: any } & { index?: never, pipeline?: never, merge_type?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + querystring?: { [key: string]: any } & { index?: never, pipeline?: never, merge_type?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } } export interface SimulateIngestResponse { @@ -37862,6 +37946,21 @@ export interface TransformScheduleNowTransformRequest extends RequestBase { export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase +export interface TransformSetUpgradeModeRequest extends RequestBase { + /** When `true`, it enables `upgrade_mode` which temporarily halts all + * transform tasks and prohibits new transform tasks from + * starting. */ + enabled?: boolean + /** The time to wait for the request to be completed. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { enabled?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { enabled?: never, timeout?: never } +} + +export type TransformSetUpgradeModeResponse = AcknowledgedResponseBase + export interface TransformStartTransformRequest extends RequestBase { /** Identifier for the transform. */ transform_id: Id From 49d7dacc97c78952add488108c31dda6cf274d81 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 8 Sep 2025 12:32:31 -0400 Subject: [PATCH 634/647] Auto-generated API code (#3007) --- docs/reference/api-reference.md | 27 +++++++---- src/api/api/indices.ts | 3 +- src/api/types.ts | 82 +++++++++++++++++++++++---------- 3 files changed, 77 insertions(+), 35 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 4f0374953..4ca10a5e6 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -2776,7 +2776,7 @@ client.cat.plugins({ ... }) ### Arguments [_arguments_cat.plugins] #### Request (object) [_request_cat.plugins] -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("id" \| "name" \| "component" \| "version" \| "description") \| Enum("id" \| "name" \| "component" \| "version" \| "description")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2809,7 +2809,7 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. -- **`h` (Optional, Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis") \| Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis")[])**: A list of columns names to display. +- **`h` (Optional, Enum("index" \| "shard" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent") \| Enum("index" \| "shard" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` @@ -2947,7 +2947,7 @@ client.cat.tasks({ ... }) - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. - **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("id" \| "action" \| "task_id" \| "parent_task_id" \| "type" \| "start_time" \| "timestamp" \| "running_time_ns" \| "running_time" \| "node_id" \| "ip" \| "port" \| "node" \| "version" \| "x_opaque_id") \| Enum("id" \| "action" \| "task_id" \| "parent_task_id" \| "type" \| "start_time" \| "timestamp" \| "running_time_ns" \| "running_time" \| "node_id" \| "ip" \| "port" \| "node" \| "version" \| "x_opaque_id")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2974,7 +2974,7 @@ client.cat.templates({ ... }) #### Request (object) [_request_cat.templates] - **`name` (Optional, string)**: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("name" \| "index_patterns" \| "order" \| "version" \| "composed_of") \| Enum("name" \| "index_patterns" \| "order" \| "version" \| "composed_of")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -7162,6 +7162,7 @@ Supports a list of values, such as `open,hidden`. - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`mode` (Optional, Enum("standard" \| "time_series" \| "logsdb" \| "lookup") \| Enum("standard" \| "time_series" \| "logsdb" \| "lookup")[])**: Filter indices by index mode - standard, lookup, time_series, etc. List of IndexMode. Empty means no filter. ## client.indices.rollover [_indices.rollover] Roll over to a new index. @@ -8066,6 +8067,7 @@ client.inference.putElser({ task_type, elser_inference_id, service, service_sett - **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. - **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Note that for ELSER endpoints, the max_chunk_size may not exceed `300`. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGoogleaistudio [_inference.put_googleaistudio] @@ -8108,7 +8110,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser - **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. - **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. -- **`task_settings` (Optional, { auto_truncate, top_n })**: Settings to configure the inference task. +- **`task_settings` (Optional, { auto_truncate, top_n, thinking_config })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -14062,26 +14064,33 @@ client.snapshot.repositoryAnalyze({ repository }) #### Request (object) [_request_snapshot.repository_analyze] - **`repository` (string)**: The name of the repository. - **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. -For realistic experiments, you should set it to at least `2000`. +For realistic experiments, set this parameter to at least `2000`. - **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. +For realistic experiments, leave this parameter unset. - **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. - **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. +For realistic experiments, leave this parameter unset. - **`max_blob_size` (Optional, number \| string)**: The maximum size of a blob to be written during the test. -For realistic experiments, you should set it to at least `2gb`. +For realistic experiments, set this parameter to at least `2gb`. - **`max_total_data_size` (Optional, number \| string)**: An upper limit on the total size of all the blobs written during the test. -For realistic experiments, you should set it to at least `1tb`. +For realistic experiments, set this parameter to at least `1tb`. - **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. +For realistic experiments, leave this parameter unset. - **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. +For realistic experiments, leave this parameter unset. - **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. +For realistic experiments, leave this parameter unset. - **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. -For realistic experiments, you should set it to at least `100`. +For realistic experiments, set this parameter to at least `100`. - **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. +For realistic experiments, leave this parameter unset. - **`timeout` (Optional, string \| -1 \| 0)**: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. +For realistic experiments, set this parameter sufficiently long to allow the test to complete. ## client.snapshot.repositoryVerifyIntegrity [_snapshot.repository_verify_integrity] Verify the repository integrity. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 78f45bd89..d68766c43 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -793,7 +793,8 @@ export default class Indices { query: [ 'expand_wildcards', 'ignore_unavailable', - 'allow_no_indices' + 'allow_no_indices', + 'mode' ] }, 'indices.rollover': { diff --git a/src/api/types.ts b/src/api/types.ts index 7dce15eee..5ccd7f6f8 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2564,7 +2564,7 @@ export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string export interface SearchHit { _index: IndexName _id?: Id - _score: double | null + _score?: double | null _explanation?: ExplainExplanation fields?: Record highlight?: Record @@ -2588,7 +2588,7 @@ export interface SearchHitsMetadata { /** Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ total?: SearchTotalHits | long hits: SearchHit[] - max_score: double | null + max_score?: double | null } export interface SearchInnerHits { @@ -7973,7 +7973,8 @@ export interface MappingChunkingSettings { * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ separators?: string[] /** The maximum size of a chunk in words. - * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ + * This value cannot be lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). + * This value should not exceed the window size for the associated model. */ max_chunk_size: integer /** The number of overlapping words for chunks. * It is applicable only to a `word` chunking strategy. @@ -10373,7 +10374,11 @@ export type CatCatPendingTasksColumn = 'insertOrder' | 'o' | 'timeInQueue' | 't' export type CatCatPendingTasksColumns = CatCatPendingTasksColumn | CatCatPendingTasksColumn[] -export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'time' | 't' | 'ti' | 'primaryOrReplica' | 'type' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'tnode' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | string +export type CatCatPluginsColumn = 'id' | 'name' | 'n' | 'component' | 'c' | 'version' | 'v' | 'description' | 'd' | string + +export type CatCatPluginsColumns = CatCatPluginsColumn | CatCatPluginsColumn[] + +export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | 'time' | 't' | 'ti' | 'type' | 'ty' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'rep' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | string export type CatCatRecoveryColumns = CatCatRecoveryColumn | CatCatRecoveryColumn[] @@ -10392,6 +10397,14 @@ export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'r export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatSnapshotsColumn[] +export type CatCatTasksColumn = 'id' | 'action' | 'ac' | 'task_id' | 'ti' | 'parent_task_id' | 'pti' | 'type' | 'ty' | 'start_time' | 'start' | 'timestamp' | 'ts' | 'hms' | 'hhmmss' | 'running_time_ns' | 'time' | 'running_time' | 'time' | 'node_id' | 'ni' | 'ip' | 'i' | 'port' | 'po' | 'node' | 'n' | 'version' | 'v' | 'x_opaque_id' | 'x' | string + +export type CatCatTasksColumns = CatCatTasksColumn | CatCatTasksColumn[] + +export type CatCatTemplatesColumn = 'name' | 'n' | 'index_patterns' | 't' | 'order' | 'o' | 'p' | 'version' | 'v' | 'composed_of' | 'c' | string + +export type CatCatTemplatesColumns = CatCatTemplatesColumn | CatCatTemplatesColumn[] + export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string export type CatCatThreadPoolColumns = CatCatThreadPoolColumn | CatCatThreadPoolColumn[] @@ -13546,8 +13559,8 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatPluginsColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -14708,8 +14721,8 @@ export interface CatTasksRequest extends CatCatRequestBase { nodes?: string[] /** The parent task identifier, which is used to limit the response. */ parent_task_id?: string - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatTasksColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -14816,8 +14829,8 @@ export interface CatTemplatesRequest extends CatCatRequestBase { /** The name of the template to return. * Accepts wildcard expressions. If omitted, all templates are returned. */ name?: Name - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatTemplatesColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -21740,10 +21753,12 @@ export interface IndicesResolveIndexRequest extends RequestBase { * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Filter indices by index mode - standard, lookup, time_series, etc. Comma-separated list of IndexMode. Empty means no filter. */ + mode?: IndicesIndexMode | IndicesIndexMode[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never } } export interface IndicesResolveIndexResolveIndexAliasItem { @@ -23220,6 +23235,9 @@ export interface InferenceGoogleVertexAITaskSettings { auto_truncate?: boolean /** For a `rerank` task, the number of the top N documents that should be returned. */ top_n?: integer + /** For a `completion` or `chat_completion` task, allows configuration of the thinking features for the model. + * Refer to the Google documentation for the allowable configurations for each model type. */ + thinking_config?: InferenceThinkingConfig } export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion' @@ -23262,7 +23280,8 @@ export type InferenceHuggingFaceTaskType = 'chat_completion' | 'completion' | 'r export interface InferenceInferenceChunkingSettings { /** The maximum size of a chunk in words. - * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ + * This value cannot be lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). + * This value should not exceed the window size for the associated model. */ max_chunk_size?: integer /** The number of overlapping words for chunks. * It is applicable only to a `word` chunking strategy. @@ -23820,6 +23839,11 @@ export interface InferenceTextEmbeddingResult { embedding: InferenceDenseVector } +export interface InferenceThinkingConfig { + /** Indicates the desired thinking budget in tokens. */ + thinking_budget?: integer +} + export interface InferenceToolCall { /** The identifier of the tool call. */ id: Id @@ -24292,7 +24316,8 @@ export interface InferencePutElserRequest extends RequestBase { elser_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Note that for ELSER endpoints, the max_chunk_size may not exceed `300`. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `elser`. */ service: InferenceElserServiceType @@ -36281,37 +36306,44 @@ export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { /** The name of the repository. */ name: Name /** The total number of blobs to write to the repository during the test. - * For realistic experiments, you should set it to at least `2000`. */ + * For realistic experiments, set this parameter to at least `2000`. */ blob_count?: integer - /** The number of operations to run concurrently during the test. */ + /** The number of operations to run concurrently during the test. + * For realistic experiments, leave this parameter unset. */ concurrency?: integer /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. * If false, it returns only a summary of the analysis. */ detailed?: boolean /** The number of nodes on which to perform an early read operation while writing each blob. - * Early read operations are only rarely performed. */ + * Early read operations are only rarely performed. + * For realistic experiments, leave this parameter unset. */ early_read_node_count?: integer /** The maximum size of a blob to be written during the test. - * For realistic experiments, you should set it to at least `2gb`. */ + * For realistic experiments, set this parameter to at least `2gb`. */ max_blob_size?: ByteSize /** An upper limit on the total size of all the blobs written during the test. - * For realistic experiments, you should set it to at least `1tb`. */ + * For realistic experiments, set this parameter to at least `1tb`. */ max_total_data_size?: ByteSize - /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. */ + /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. + * For realistic experiments, leave this parameter unset. */ rare_action_probability?: double - /** Indicates whether to rarely cancel writes before they complete. */ + /** Indicates whether to rarely cancel writes before they complete. + * For realistic experiments, leave this parameter unset. */ rarely_abort_writes?: boolean - /** The number of nodes on which to read a blob after writing. */ + /** The number of nodes on which to read a blob after writing. + * For realistic experiments, leave this parameter unset. */ read_node_count?: integer /** The minimum number of linearizable register operations to perform in total. - * For realistic experiments, you should set it to at least `100`. */ + * For realistic experiments, set this parameter to at least `100`. */ register_operation_count?: integer /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. * To repeat the same set of operations in multiple experiments, use the same seed in each experiment. - * Note that the operations are performed concurrently so might not always happen in the same order on each run. */ + * Note that the operations are performed concurrently so might not always happen in the same order on each run. + * For realistic experiments, leave this parameter unset. */ seed?: integer /** The period of time to wait for the test to complete. - * If no response is received before the timeout expires, the test is cancelled and returns an error. */ + * If no response is received before the timeout expires, the test is cancelled and returns an error. + * For realistic experiments, set this parameter sufficiently long to allow the test to complete. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } From 4e2e1715f1413f7e243ecc305943c793d6efa0b2 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 15 Sep 2025 12:38:44 -0400 Subject: [PATCH 635/647] Auto-generated API code (#3009) --- docs/reference/api-reference.md | 203 +++----------------------------- src/api/api/indices.ts | 36 +++--- src/api/api/inference.ts | 1 + src/api/types.ts | 40 +++++-- 4 files changed, 70 insertions(+), 210 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 4ca10a5e6..064ab34e0 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -2226,91 +2226,6 @@ A partial reduction is performed every time the coordinating node has received a - **`_source_includes` (Optional, string \| string[])**: A list of fields to extract and return from the _source field - **`q` (Optional, string)**: Query in the Lucene query string syntax -## client.autoscaling.deleteAutoscalingPolicy [_autoscaling.delete_autoscaling_policy] -Delete an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy) - -```ts -client.autoscaling.deleteAutoscalingPolicy({ name }) -``` - -### Arguments [_arguments_autoscaling.delete_autoscaling_policy] - -#### Request (object) [_request_autoscaling.delete_autoscaling_policy] -- **`name` (string)**: the name of the autoscaling policy -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -## client.autoscaling.getAutoscalingCapacity [_autoscaling.get_autoscaling_capacity] -Get the autoscaling capacity. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -This API gets the current autoscaling capacity based on the configured autoscaling policy. -It will return information to size the cluster appropriately to the current workload. - -The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. - -The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. - -The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. -This information is provided for diagnosis only. -Do not use this information to make autoscaling decisions. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) - -```ts -client.autoscaling.getAutoscalingCapacity({ ... }) -``` - -### Arguments [_arguments_autoscaling.get_autoscaling_capacity] - -#### Request (object) [_request_autoscaling.get_autoscaling_capacity] -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -## client.autoscaling.getAutoscalingPolicy [_autoscaling.get_autoscaling_policy] -Get an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) - -```ts -client.autoscaling.getAutoscalingPolicy({ name }) -``` - -### Arguments [_arguments_autoscaling.get_autoscaling_policy] - -#### Request (object) [_request_autoscaling.get_autoscaling_policy] -- **`name` (string)**: the name of the autoscaling policy -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -## client.autoscaling.putAutoscalingPolicy [_autoscaling.put_autoscaling_policy] -Create or update an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy) - -```ts -client.autoscaling.putAutoscalingPolicy({ name }) -``` - -### Arguments [_arguments_autoscaling.put_autoscaling_policy] - -#### Request (object) [_request_autoscaling.put_autoscaling_policy] -- **`name` (string)**: the name of the autoscaling policy -- **`policy` (Optional, { roles, deciders })** -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - ## client.cat.aliases [_cat.aliases] Get aliases. @@ -5741,6 +5656,8 @@ NOTE: The total size of fields of the analyzed shards of the index in the respon Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. +For usage examples see the External documentation or refer to [Analyze the index disk usage example](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/index-disk-usage) for an example. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage) ```ts @@ -7363,6 +7280,7 @@ client.indices.simulateIndexTemplate({ name }) #### Request (object) [_request_indices.simulate_index_template] - **`name` (string)**: Name of the index to simulate +- **`index_template` (Optional, { index_patterns, composed_of, template, version, priority, _meta, allow_auto_create, data_stream, deprecated, ignore_missing_component_templates, created_date, created_date_millis, modified_date, modified_date_millis })** - **`create` (Optional, boolean)**: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one - **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -8393,6 +8311,16 @@ client.inference.textEmbedding({ inference_id, input }) - **`inference_id` (string)**: The inference Id - **`input` (string \| string[])**: Inference input. Either a string or an array of strings. +- **`input_type` (Optional, string)**: The input data type for the text embedding model. Possible values include: +* `SEARCH` +* `INGEST` +* `CLASSIFICATION` +* `CLUSTERING` +Not all services support all values. Unsupported values will trigger a validation exception. +Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + +> info +> The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. - **`task_settings` (Optional, User-defined value)**: Optional task settings - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. @@ -8687,7 +8615,9 @@ client.license.get({ ... }) #### Request (object) [_request_license.get] - **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. -- **`local` (Optional, boolean)**: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. +- **`local` (Optional, boolean)**: Specifies whether to retrieve local information. +From 9.2 onwards the default value is `true`, which means the information is retrieved from the responding node. +In earlier versions the default is `false`, which means the information is retrieved from the elected master node. ## client.license.getBasicStatus [_license.get_basic_status] Get the basic license status. @@ -10965,7 +10895,7 @@ client.nodes.stats({ ... }) - **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. - **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. - **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). -- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`level` (Optional, Enum("node" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`types` (Optional, string[])**: A list of document types for the indexing index metric. - **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. @@ -13370,105 +13300,6 @@ visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. -## client.shutdown.deleteNode [_shutdown.delete_node] -Cancel node shutdown preparations. -Remove a node from the shutdown list so it can resume normal operations. -You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. -Shutdown requests are never removed automatically by Elasticsearch. - -NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. -Direct use is not supported. - -If the operator privileges feature is enabled, you must be an operator to use this API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node) - -```ts -client.shutdown.deleteNode({ node_id }) -``` - -### Arguments [_arguments_shutdown.delete_node] - -#### Request (object) [_request_shutdown.delete_node] -- **`node_id` (string)**: The node id of node to be removed from the shutdown state -- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -## client.shutdown.getNode [_shutdown.get_node] -Get the shutdown status. - -Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. -The API returns status information for each part of the shut down process. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -If the operator privileges feature is enabled, you must be an operator to use this API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node) - -```ts -client.shutdown.getNode({ ... }) -``` - -### Arguments [_arguments_shutdown.get_node] - -#### Request (object) [_request_shutdown.get_node] -- **`node_id` (Optional, string \| string[])**: Which node for which to retrieve the shutdown status -- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -## client.shutdown.putNode [_shutdown.put_node] -Prepare a node to be shut down. - -NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. - -If the operator privileges feature is enabled, you must be an operator to use this API. - -The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. -This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. - -You must specify the type of shutdown: `restart`, `remove`, or `replace`. -If a node is already being prepared for shutdown, you can use this API to change the shutdown type. - -IMPORTANT: This API does NOT terminate the Elasticsearch process. -Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node) - -```ts -client.shutdown.putNode({ node_id, type, reason }) -``` - -### Arguments [_arguments_shutdown.put_node] - -#### Request (object) [_request_shutdown.put_node] -- **`node_id` (string)**: The node identifier. -This parameter is not validated against the cluster's active nodes. -This enables you to register a node for shut down while it is offline. -No error is thrown if you specify an invalid node ID. -- **`type` (Enum("restart" \| "remove" \| "replace"))**: Valid values are restart, remove, or replace. -Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. -Because the node is expected to rejoin the cluster, data is not migrated off of the node. -Use remove when you need to permanently remove a node from the cluster. -The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. -Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. -During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. -- **`reason` (string)**: A human-readable reason that the node is being shut down. -This field provides information for other cluster operators; it does not affect the shut down process. -- **`allocation_delay` (Optional, string)**: Only valid if type is restart. -Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. -This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. -If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. -- **`target_node_name` (Optional, string)**: Only valid if type is replace. -Specifies the name of the node that is replacing the node being shut down. -Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. -During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. -- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -- **`timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - ## client.simulate.ingest [_simulate.ingest] Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index d68766c43..8af4331e5 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -858,7 +858,9 @@ export default class Indices { path: [ 'name' ], - body: [], + body: [ + 'index_template' + ], query: [ 'create', 'cause', @@ -1796,7 +1798,7 @@ export default class Indices { } /** - * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. + * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. For usage examples see the External documentation or refer to [Analyze the index disk usage example](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/index-disk-usage) for an example. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -4234,28 +4236,30 @@ export default class Indices { async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this.acceptedParams['indices.simulate_index_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} - let body: Record | string | undefined - const userBody = params?.body - if (userBody != null) { - if (typeof userBody === 'string') { - body = userBody - } else { - body = { ...userBody } - } - } - + let body: any = params.body ?? undefined for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 3f45aa6b7..05d3fab06 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -451,6 +451,7 @@ export default class Inference { ], body: [ 'input', + 'input_type', 'task_settings' ], query: [ diff --git a/src/api/types.ts b/src/api/types.ts index 5ccd7f6f8..306cd712d 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4257,6 +4257,8 @@ export interface NodeStatistics { failed: integer } +export type NodeStatsLevel = 'node' | 'indices' | 'shards' + export type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z' export type OpType = 'index' | 'create' @@ -22010,10 +22012,11 @@ export interface IndicesSimulateIndexTemplateRequest extends RequestBase { master_timeout?: Duration /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + index_template?: IndicesIndexTemplate /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, index_template?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, index_template?: never } } export interface IndicesSimulateIndexTemplateResponse { @@ -24604,12 +24607,23 @@ export interface InferenceTextEmbeddingRequest extends RequestBase { /** Inference input. * Either a string or an array of strings. */ input: string | string[] + /** The input data type for the text embedding model. Possible values include: + * * `SEARCH` + * * `INGEST` + * * `CLASSIFICATION` + * * `CLUSTERING` + * Not all services support all values. Unsupported values will trigger a validation exception. + * Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + * + * > info + * > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. */ + input_type?: string /** Optional task settings */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, input_type?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, input_type?: never, task_settings?: never } } export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult @@ -24632,8 +24646,10 @@ export interface IngestAppendProcessor extends IngestProcessorBase { /** The field to be appended to. * Supports template snippets. */ field: Field - /** The value to be appended. Supports template snippets. */ - value: any | any[] + /** The value to be appended. Supports template snippets. May specify only one of `value` or `copy_from`. */ + value?: any | any[] + /** The origin field which will be appended to `field`, cannot set `value` simultaneously. */ + copy_from?: Field /** If `false`, the processor does not append values already present in the field. */ allow_duplicates?: boolean } @@ -25917,7 +25933,9 @@ export interface LicenseGetRequest extends RequestBase { /** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. * This parameter is deprecated and will always be set to true in 8.x. */ accept_enterprise?: boolean - /** Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. */ + /** Specifies whether to retrieve local information. + * From 9.2 onwards the default value is `true`, which means the information is retrieved from the responding node. + * In earlier versions the default is `false`, which means the information is retrieved from the elected master node. */ local?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { accept_enterprise?: never, local?: never } @@ -26577,6 +26595,7 @@ export interface MlDatafeed { authorization?: MlDatafeedAuthorization chunking_config?: MlChunkingConfig datafeed_id: Id + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration indices: string[] indexes?: string[] @@ -30510,6 +30529,7 @@ export interface MlUpdateDatafeedResponse { chunking_config: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig datafeed_id: Id + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration indices: string[] indices_options?: IndicesOptions @@ -32029,7 +32049,7 @@ export interface NodesStatsRequest extends RequestBase { /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ - level?: Level + level?: NodeStatsLevel /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** A comma-separated list of document types for the indexing index metric. */ @@ -32913,6 +32933,8 @@ export interface SecurityApiKey { _sort?: SortResults } +export type SecurityApiKeyManagedBy = 'cloud' | 'elasticsearch' + export type SecurityApiKeyType = 'rest' | 'cross_cluster' export interface SecurityApplicationGlobalUserPrivileges { @@ -33240,6 +33262,8 @@ export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadat export interface SecurityAuthenticateAuthenticateApiKey { id: Id name?: Name + managed_by: SecurityApiKeyManagedBy + internal: boolean } export interface SecurityAuthenticateRequest extends RequestBase { From 14dbbdc9ced8018cc60e2980b1e2d4809e1f258b Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Tue, 30 Sep 2025 11:41:11 -0500 Subject: [PATCH 636/647] Bump transport to 9.2.0 (#3019) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 7364b86aa..424490f27 100644 --- a/package.json +++ b/package.json @@ -92,7 +92,7 @@ "zx": "8.8.0" }, "dependencies": { - "@elastic/transport": "^9.0.1", + "@elastic/transport": "^9.2.0", "apache-arrow": "18.x - 21.x", "tslib": "^2.4.0" }, From 80418dffb18520bd7037958335bd1fb2e484c347 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 1 Oct 2025 11:35:24 -0500 Subject: [PATCH 637/647] Add support for accepted parameters metadata on all requests (#3013) --- src/client.ts | 2 + test/unit/api.test.ts | 96 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/src/client.ts b/src/client.ts index c159428e3..1166d21da 100644 --- a/src/client.ts +++ b/src/client.ts @@ -41,6 +41,8 @@ import transportPackageJson from '@elastic/transport/package.json' const kChild = Symbol('elasticsearchjs-child') const kInitialOptions = Symbol('elasticsearchjs-initial-options') +export const kAcceptedParams = Symbol('elasticsearchjs-accepted-params') + let clientVersion: string = packageJson.version /* istanbul ignore next */ if (clientVersion.includes('-')) { diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index 452f53805..213f87da5 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -6,6 +6,7 @@ import { test } from 'tap' import { connection } from '../utils' import { Client } from '../..' +import { Transport } from '@elastic/transport' import * as T from '../../lib/api/types' test('Api with top level body', async t => { @@ -167,3 +168,98 @@ test('With generic document and aggregation', async t => { t.ok(Array.isArray(response.aggregations?.unique.buckets)) }) +test('Api request metadata', t => { + t.test('name', async t => { + class TestTransport extends Transport { + // @ts-expect-error + async request(params, options) { + t.equal(params.meta.name, 'synonyms.put_synonym_rule') + return super.request(params, options) + } + } + + const Connection = connection.buildMockConnection({ + onRequest () { + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + Transport: TestTransport, + Connection + }) + // @ts-expect-error + await client.synonyms.putSynonymRule({ set_id: "foo", rule_id: "bar" }) + }) + + t.test('pathParts', async t => { + class TestTransport extends Transport { + // @ts-expect-error + async request(params, options) { + t.strictSame(params.meta.pathParts, { + set_id: 'foo', + rule_id: 'bar' + }) + return super.request(params, options) + } + } + + const Connection = connection.buildMockConnection({ + onRequest () { + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + Transport: TestTransport, + Connection + }) + // @ts-expect-error + await client.synonyms.putSynonymRule({ set_id: "foo", rule_id: "bar" }) + }) + + t.test('acceptedParams', async t => { + class TestTransport extends Transport { + // @ts-expect-error + async request(params, options) { + t.strictSame(params.meta.acceptedParams, [ + 'set_id', + 'rule_id', + 'synonyms', + 'refresh', + ]) + return super.request(params, options) + } + } + + const Connection = connection.buildMockConnection({ + onRequest () { + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + Transport: TestTransport, + Connection + }) + // @ts-expect-error + await client.synonyms.putSynonymRule({ set_id: "foo", rule_id: "bar" }) + }) + + t.end() +}) From fe4464b132e4fcaa1600dbd017dd9eb49b3813f2 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 1 Oct 2025 18:47:47 +0200 Subject: [PATCH 638/647] Auto-generated API code (#3026) --- docs/reference/api-reference.md | 125 +++- src/api/api/async_search.ts | 117 ++- src/api/api/autoscaling.ts | 41 +- src/api/api/bulk.ts | 18 +- src/api/api/capabilities.ts | 4 +- src/api/api/cat.ts | 328 +++++--- src/api/api/ccr.ts | 155 +++- src/api/api/clear_scroll.ts | 6 +- src/api/api/close_point_in_time.ts | 5 +- src/api/api/cluster.ts | 184 ++++- src/api/api/connector.ts | 309 ++++++-- src/api/api/count.ts | 22 +- src/api/api/create.ts | 17 +- src/api/api/dangling_indices.ts | 33 +- src/api/api/delete.ts | 14 +- src/api/api/delete_by_query.ts | 38 +- src/api/api/delete_by_query_rethrottle.ts | 6 +- src/api/api/delete_script.ts | 7 +- src/api/api/enrich.ts | 50 +- src/api/api/eql.ts | 65 +- src/api/api/esql.ts | 91 ++- src/api/api/exists.ts | 16 +- src/api/api/exists_source.ts | 15 +- src/api/api/explain.ts | 19 +- src/api/api/features.ts | 21 +- src/api/api/field_caps.ts | 20 +- src/api/api/fleet.ts | 141 +++- src/api/api/get.ts | 18 +- src/api/api/get_script.ts | 6 +- src/api/api/get_script_context.ts | 4 +- src/api/api/get_script_languages.ts | 4 +- src/api/api/get_source.ts | 15 +- src/api/api/graph.ts | 20 +- src/api/api/health_report.ts | 8 +- src/api/api/ilm.ts | 100 ++- src/api/api/index.ts | 20 +- src/api/api/indices.ts | 851 +++++++++++++++++---- src/api/api/inference.ts | 381 ++++++++-- src/api/api/info.ts | 4 +- src/api/api/ingest.ts | 120 ++- src/api/api/knn_search.ts | 5 +- src/api/api/license.ts | 64 +- src/api/api/logstash.ts | 29 +- src/api/api/mget.ts | 16 +- src/api/api/migration.ts | 26 +- src/api/api/ml.ts | 863 ++++++++++++++++++---- src/api/api/monitoring.ts | 18 +- src/api/api/msearch.ts | 22 +- src/api/api/msearch_template.ts | 13 +- src/api/api/mtermvectors.ts | 19 +- src/api/api/nodes.ts | 82 +- src/api/api/open_point_in_time.ts | 15 +- src/api/api/ping.ts | 4 +- src/api/api/profiling.ts | 31 +- src/api/api/project.ts | 88 +++ src/api/api/put_script.ts | 10 +- src/api/api/query_rules.ts | 73 +- src/api/api/rank_eval.ts | 11 +- src/api/api/reindex.ts | 19 +- src/api/api/reindex_rethrottle.ts | 6 +- src/api/api/render_search_template.ts | 9 +- src/api/api/rollup.ts | 79 +- src/api/api/scripts_painless_execute.ts | 7 +- src/api/api/scroll.ts | 10 +- src/api/api/search.ts | 87 ++- src/api/api/search_application.ts | 87 ++- src/api/api/search_mvt.ts | 33 +- src/api/api/search_shards.ts | 12 +- src/api/api/search_template.ts | 25 +- src/api/api/searchable_snapshots.ts | 48 +- src/api/api/security.ts | 653 ++++++++++++---- src/api/api/shutdown.ts | 37 +- src/api/api/simulate.ts | 21 +- src/api/api/slm.ts | 88 ++- src/api/api/snapshot.ts | 178 ++++- src/api/api/sql.ts | 77 +- src/api/api/ssl.ts | 13 +- src/api/api/streams.ts | 83 ++- src/api/api/synonyms.ts | 67 +- src/api/api/tasks.ts | 40 +- src/api/api/terms_enum.ts | 12 +- src/api/api/termvectors.ts | 29 +- src/api/api/text_structure.ts | 76 +- src/api/api/transform.ts | 152 +++- src/api/api/update.ts | 26 +- src/api/api/update_by_query.ts | 41 +- src/api/api/update_by_query_rethrottle.ts | 6 +- src/api/api/watcher.ts | 128 +++- src/api/api/xpack.ts | 23 +- src/api/index.ts | 8 + src/api/types.ts | 520 +++++++++---- 91 files changed, 6062 insertions(+), 1445 deletions(-) create mode 100644 src/api/api/project.ts diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 064ab34e0..ed819a820 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -217,6 +217,7 @@ client.count({ ... }) - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - **`q` (Optional, string)**: The query in Lucene query string syntax. This parameter cannot be used with a request body. @@ -673,6 +674,7 @@ client.fieldCaps({ ... }) - **`filters` (Optional, string)**: A list of filters to apply to the response. - **`types` (Optional, string[])**: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. - **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the field-caps query using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. ## client.get [_get] Get a document by its ID. @@ -1101,7 +1103,7 @@ client.msearch({ ... }) #### Request (object) [_request_msearch] - **`index` (Optional, string \| string[])**: List of data streams, indices, and index aliases to search. -- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, project_routing, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. @@ -1111,6 +1113,7 @@ client.msearch({ ... }) - **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. - **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. - **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for a search using project metadata tags in a subset Lucene syntax. Allowed Lucene queries: the _alias tag and a single value (possible wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. - **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. - **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. - **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. @@ -1143,9 +1146,10 @@ client.msearchTemplate({ ... }) #### Request (object) [_request_msearch_template] - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. -- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, project_routing, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - **`max_concurrent_searches` (Optional, number)**: The maximum number of concurrent searches the API can run. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. - **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. - **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. - **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. @@ -1241,6 +1245,7 @@ client.openPointInTime({ index, keep_alive }) - **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the PIT request using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. @@ -1516,7 +1521,7 @@ client.search({ ... }) - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. - **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. -- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +- **`rescore` (Optional, { window_size, query, learning_to_rank, script } \| { window_size, query, learning_to_rank, script }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. - **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. @@ -1551,6 +1556,7 @@ client.search({ ... }) - **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. - **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. - **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. - **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. @@ -1686,6 +1692,7 @@ client.searchMvt({ index, field, zoom, x, y }) - **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. - **`track_total_hits` (Optional, boolean \| number)**: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. ## client.searchShards [_search_shards] Get the search shards. @@ -1738,6 +1745,7 @@ client.searchTemplate({ ... }) - **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. - **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - **`scroll` (Optional, string \| -1 \| 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. - **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. @@ -2159,7 +2167,7 @@ not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - **`profile` (Optional, boolean)** - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. -- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])** +- **`rescore` (Optional, { window_size, query, learning_to_rank, script } \| { window_size, query, learning_to_rank, script }[])** - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - **`search_after` (Optional, number \| number \| string \| boolean \| null[])** - **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more @@ -2213,6 +2221,14 @@ A partial reduction is performed every time the coordinating node has received a - **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests - **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. - **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true - **`routing` (Optional, string)**: A list of specific routing values - **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Search operation type @@ -2272,7 +2288,6 @@ client.cat.allocation({ ... }) #### Request (object) [_request_cat.allocation] - **`node_id` (Optional, string \| string[])**: A list of node identifiers or names used to limit the returned information. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`h` (Optional, Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role") \| Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` @@ -2336,6 +2351,14 @@ client.cat.count({ ... }) It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`h` (Optional, Enum("epoch" \| "timestamp" \| "count") \| Enum("epoch" \| "timestamp" \| "count")[])**: A list of columns names to display. It supports simple wildcards. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2359,7 +2382,6 @@ client.cat.fielddata({ ... }) #### Request (object) [_request_cat.fielddata] - **`fields` (Optional, string \| string[])**: List of fields used to limit returned information. To retrieve all fields, omit this parameter. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`h` (Optional, Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size") \| Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` @@ -2387,7 +2409,6 @@ client.cat.health({ ... }) ### Arguments [_arguments_cat.health] #### Request (object) [_request_cat.health] -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. - **`h` (Optional, Enum("epoch" \| "timestamp" \| "cluster" \| "status" \| "node.total" \| "node.data" \| "shards" \| "pri" \| "relo" \| "init" \| "unassign" \| "unassign.pri" \| "pending_tasks" \| "max_task_wait_time" \| "active_shards_percent") \| Enum("epoch" \| "timestamp" \| "cluster" \| "status" \| "node.total" \| "node.data" \| "shards" \| "pri" \| "relo" \| "init" \| "unassign" \| "unassign.pri" \| "pending_tasks" \| "max_task_wait_time" \| "active_shards_percent")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. @@ -2435,12 +2456,10 @@ client.cat.indices({ ... }) #### Request (object) [_request_cat.indices] - **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. - **`health` (Optional, Enum("green" \| "yellow" \| "red" \| "unknown" \| "unavailable"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. - **`h` (Optional, Enum("health" \| "status" \| "index" \| "uuid" \| "pri" \| "rep" \| "docs.count" \| "docs.deleted" \| "creation.date" \| "creation.date.string" \| "store.size" \| "pri.store.size" \| "dataset.size" \| "completion.size" \| "pri.completion.size" \| "fielddata.memory_size" \| "pri.fielddata.memory_size" \| "fielddata.evictions" \| "pri.fielddata.evictions" \| "query_cache.memory_size" \| "pri.query_cache.memory_size" \| "query_cache.evictions" \| "pri.query_cache.evictions" \| "request_cache.memory_size" \| "pri.request_cache.memory_size" \| "request_cache.evictions" \| "pri.request_cache.evictions" \| "request_cache.hit_count" \| "pri.request_cache.hit_count" \| "request_cache.miss_count" \| "pri.request_cache.miss_count" \| "flush.total" \| "pri.flush.total" \| "flush.total_time" \| "pri.flush.total_time" \| "get.current" \| "pri.get.current" \| "get.time" \| "pri.get.time" \| "get.total" \| "pri.get.total" \| "get.exists_time" \| "pri.get.exists_time" \| "get.exists_total" \| "pri.get.exists_total" \| "get.missing_time" \| "pri.get.missing_time" \| "get.missing_total" \| "pri.get.missing_total" \| "indexing.delete_current" \| "pri.indexing.delete_current" \| "indexing.delete_time" \| "pri.indexing.delete_time" \| "indexing.delete_total" \| "pri.indexing.delete_total" \| "indexing.index_current" \| "pri.indexing.index_current" \| "indexing.index_time" \| "pri.indexing.index_time" \| "indexing.index_total" \| "pri.indexing.index_total" \| "indexing.index_failed" \| "pri.indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "pri.indexing.index_failed_due_to_version_conflict" \| "merges.current" \| "pri.merges.current" \| "merges.current_docs" \| "pri.merges.current_docs" \| "merges.current_size" \| "pri.merges.current_size" \| "merges.total" \| "pri.merges.total" \| "merges.total_docs" \| "pri.merges.total_docs" \| "merges.total_size" \| "pri.merges.total_size" \| "merges.total_time" \| "pri.merges.total_time" \| "refresh.total" \| "pri.refresh.total" \| "refresh.time" \| "pri.refresh.time" \| "refresh.external_total" \| "pri.refresh.external_total" \| "refresh.external_time" \| "pri.refresh.external_time" \| "refresh.listeners" \| "pri.refresh.listeners" \| "search.fetch_current" \| "pri.search.fetch_current" \| "search.fetch_time" \| "pri.search.fetch_time" \| "search.fetch_total" \| "pri.search.fetch_total" \| "search.open_contexts" \| "pri.search.open_contexts" \| "search.query_current" \| "pri.search.query_current" \| "search.query_time" \| "pri.search.query_time" \| "search.query_total" \| "pri.search.query_total" \| "search.scroll_current" \| "pri.search.scroll_current" \| "search.scroll_time" \| "pri.search.scroll_time" \| "search.scroll_total" \| "pri.search.scroll_total" \| "segments.count" \| "pri.segments.count" \| "segments.memory" \| "pri.segments.memory" \| "segments.index_writer_memory" \| "pri.segments.index_writer_memory" \| "segments.version_map_memory" \| "pri.segments.version_map_memory" \| "segments.fixed_bitset_memory" \| "pri.segments.fixed_bitset_memory" \| "warmer.current" \| "pri.warmer.current" \| "warmer.total" \| "pri.warmer.total" \| "warmer.total_time" \| "pri.warmer.total_time" \| "suggest.current" \| "pri.suggest.current" \| "suggest.time" \| "pri.suggest.time" \| "suggest.total" \| "pri.suggest.total" \| "memory.total" \| "pri.memory.total" \| "bulk.total_operations" \| "pri.bulk.total_operations" \| "bulk.total_time" \| "pri.bulk.total_time" \| "bulk.total_size_in_bytes" \| "pri.bulk.total_size_in_bytes" \| "bulk.avg_time" \| "pri.bulk.avg_time" \| "bulk.avg_size_in_bytes" \| "pri.bulk.avg_size_in_bytes" \| "dense_vector.value_count" \| "pri.dense_vector.value_count" \| "sparse_vector.value_count" \| "pri.sparse_vector.value_count") \| Enum("health" \| "status" \| "index" \| "uuid" \| "pri" \| "rep" \| "docs.count" \| "docs.deleted" \| "creation.date" \| "creation.date.string" \| "store.size" \| "pri.store.size" \| "dataset.size" \| "completion.size" \| "pri.completion.size" \| "fielddata.memory_size" \| "pri.fielddata.memory_size" \| "fielddata.evictions" \| "pri.fielddata.evictions" \| "query_cache.memory_size" \| "pri.query_cache.memory_size" \| "query_cache.evictions" \| "pri.query_cache.evictions" \| "request_cache.memory_size" \| "pri.request_cache.memory_size" \| "request_cache.evictions" \| "pri.request_cache.evictions" \| "request_cache.hit_count" \| "pri.request_cache.hit_count" \| "request_cache.miss_count" \| "pri.request_cache.miss_count" \| "flush.total" \| "pri.flush.total" \| "flush.total_time" \| "pri.flush.total_time" \| "get.current" \| "pri.get.current" \| "get.time" \| "pri.get.time" \| "get.total" \| "pri.get.total" \| "get.exists_time" \| "pri.get.exists_time" \| "get.exists_total" \| "pri.get.exists_total" \| "get.missing_time" \| "pri.get.missing_time" \| "get.missing_total" \| "pri.get.missing_total" \| "indexing.delete_current" \| "pri.indexing.delete_current" \| "indexing.delete_time" \| "pri.indexing.delete_time" \| "indexing.delete_total" \| "pri.indexing.delete_total" \| "indexing.index_current" \| "pri.indexing.index_current" \| "indexing.index_time" \| "pri.indexing.index_time" \| "indexing.index_total" \| "pri.indexing.index_total" \| "indexing.index_failed" \| "pri.indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "pri.indexing.index_failed_due_to_version_conflict" \| "merges.current" \| "pri.merges.current" \| "merges.current_docs" \| "pri.merges.current_docs" \| "merges.current_size" \| "pri.merges.current_size" \| "merges.total" \| "pri.merges.total" \| "merges.total_docs" \| "pri.merges.total_docs" \| "merges.total_size" \| "pri.merges.total_size" \| "merges.total_time" \| "pri.merges.total_time" \| "refresh.total" \| "pri.refresh.total" \| "refresh.time" \| "pri.refresh.time" \| "refresh.external_total" \| "pri.refresh.external_total" \| "refresh.external_time" \| "pri.refresh.external_time" \| "refresh.listeners" \| "pri.refresh.listeners" \| "search.fetch_current" \| "pri.search.fetch_current" \| "search.fetch_time" \| "pri.search.fetch_time" \| "search.fetch_total" \| "pri.search.fetch_total" \| "search.open_contexts" \| "pri.search.open_contexts" \| "search.query_current" \| "pri.search.query_current" \| "search.query_time" \| "pri.search.query_time" \| "search.query_total" \| "pri.search.query_total" \| "search.scroll_current" \| "pri.search.scroll_current" \| "search.scroll_time" \| "pri.search.scroll_time" \| "search.scroll_total" \| "pri.search.scroll_total" \| "segments.count" \| "pri.segments.count" \| "segments.memory" \| "pri.segments.memory" \| "segments.index_writer_memory" \| "pri.segments.index_writer_memory" \| "segments.version_map_memory" \| "pri.segments.version_map_memory" \| "segments.fixed_bitset_memory" \| "pri.segments.fixed_bitset_memory" \| "warmer.current" \| "pri.warmer.current" \| "warmer.total" \| "pri.warmer.total" \| "warmer.total_time" \| "pri.warmer.total_time" \| "suggest.current" \| "pri.suggest.current" \| "suggest.time" \| "pri.suggest.time" \| "suggest.total" \| "pri.suggest.total" \| "memory.total" \| "pri.memory.total" \| "bulk.total_operations" \| "pri.bulk.total_operations" \| "bulk.total_time" \| "pri.bulk.total_time" \| "bulk.total_size_in_bytes" \| "pri.bulk.total_size_in_bytes" \| "bulk.avg_time" \| "pri.bulk.avg_time" \| "bulk.avg_size_in_bytes" \| "pri.bulk.avg_size_in_bytes" \| "dense_vector.value_count" \| "pri.dense_vector.value_count" \| "sparse_vector.value_count" \| "pri.sparse_vector.value_count")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. @@ -2493,11 +2512,9 @@ client.cat.mlDataFrameAnalytics({ ... }) #### Request (object) [_request_cat.ml_data_frame_analytics] - **`id` (Optional, string)**: The ID of the data frame analytics to fetch - **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit in which to display byte values - **`h` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names to display. - **`s` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.mlDatafeeds [_cat.ml_datafeeds] Get datafeeds. @@ -2532,7 +2549,6 @@ there are partial matches. If `false`, the API returns a 404 status code when th partial matches. - **`h` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names to display. - **`s` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.mlJobs [_cat.ml_jobs] Get anomaly detection jobs. @@ -2565,10 +2581,8 @@ client.cat.mlJobs({ ... }) If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`h` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names to display. - **`s` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.mlTrainedModels [_cat.ml_trained_models] Get trained models. @@ -2592,12 +2606,10 @@ client.cat.mlTrainedModels({ ... }) - **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`h` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names to display. - **`s` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names or aliases used to sort the response. - **`from` (Optional, number)**: Skips the specified number of transforms. - **`size` (Optional, number)**: The maximum number of transforms to display. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.nodeattrs [_cat.nodeattrs] Get node attribute information. @@ -2639,8 +2651,7 @@ client.cat.nodes({ ... }) ### Arguments [_arguments_cat.nodes] #### Request (object) [_request_cat.nodes] -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`full_id` (Optional, boolean \| string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. +- **`full_id` (Optional, boolean)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - **`h` (Optional, Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. It supports simple wildcards. @@ -2648,7 +2659,6 @@ It supports simple wildcards. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.pendingTasks [_cat.pending_tasks] Get pending task information. @@ -2674,7 +2684,6 @@ local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.plugins [_cat.plugins] Get plugin information. @@ -2722,14 +2731,12 @@ client.cat.recovery({ ... }) - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - **`h` (Optional, Enum("index" \| "shard" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent") \| Enum("index" \| "shard" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.repositories [_cat.repositories] Get snapshot repository information. @@ -2775,7 +2782,6 @@ client.cat.segments({ ... }) - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`h` (Optional, Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id") \| Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. @@ -2806,13 +2812,11 @@ client.cat.shards({ ... }) - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`h` (Optional, Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason") \| Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason")[])**: List of columns to appear in the response. Supports simple wildcards. - **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.snapshots [_cat.snapshots] Get snapshot information. @@ -2841,7 +2845,6 @@ It supports simple wildcards. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. ## client.cat.tasks [_cat.tasks] Get task information. @@ -2866,7 +2869,6 @@ client.cat.tasks({ ... }) - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. @@ -2921,7 +2923,6 @@ Accepts wildcard expressions. - **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2954,7 +2955,6 @@ If `false`, the request returns a 404 status code when there are no matches or o - **`from` (Optional, number)**: Skips the specified number of transforms. - **`h` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names to display. - **`s` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names or column aliases used to sort the response. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. - **`size` (Optional, number)**: The maximum number of transforms to obtain. ## client.ccr.deleteAutoFollowPattern [_ccr.delete_auto_follow_pattern] @@ -4504,6 +4504,14 @@ parameter to get a smaller or larger set of samples. To retrieve more than one s - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`ccs_minimize_roundtrips` (Optional, boolean)**: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. ## client.esql.asyncQuery [_esql.async_query] Run an async ES|QL query. @@ -4531,7 +4539,7 @@ and its format can change at any time but it can give some insight into the perf of each part of the query. - **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. -- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. - **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. @@ -4673,14 +4681,14 @@ client.esql.query({ query }) - **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. - **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. - **`locale` (Optional, string)** -- **`params` (Optional, number \| number \| string \| boolean \| null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`params` (Optional, number \| number \| string \| boolean \| null \| number \| number \| string \| boolean \| null[][])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. - **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. - **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. -- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. - **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. @@ -4788,7 +4796,7 @@ client.fleet.msearch({ ... }) #### Request (object) [_request_fleet.msearch] - **`index` (Optional, string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. -- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, project_routing, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** - **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. @@ -4842,7 +4850,7 @@ not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - **`profile` (Optional, boolean)** - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. -- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])** +- **`rescore` (Optional, { window_size, query, learning_to_rank, script } \| { window_size, query, learning_to_rank, script }[])** - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - **`search_after` (Optional, number \| number \| string \| boolean \| null[])** - **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more @@ -6661,7 +6669,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -6877,6 +6885,8 @@ For data streams, the API runs the refresh operation on the stream’s backing i By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. +In Elastic Cloud Serverless, the default refresh interval is 5 seconds across all indices. + Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. @@ -7080,6 +7090,14 @@ Supports a list of values, such as `open,hidden`. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`mode` (Optional, Enum("standard" \| "time_series" \| "logsdb" \| "lookup") \| Enum("standard" \| "time_series" \| "logsdb" \| "lookup")[])**: Filter indices by index mode - standard, lookup, time_series, etc. List of IndexMode. Empty means no filter. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. ## client.indices.rollover [_indices.rollover] Roll over to a new index. @@ -8026,7 +8044,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser - **`task_type` (Enum("rerank" \| "text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. - **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. -- **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. +- **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json, dimensions })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { auto_truncate, top_n, thinking_config })**: Settings to configure the inference task. These settings are specific to the task type you specified. @@ -8549,6 +8567,7 @@ client.ingest.putPipeline({ id }) - **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. - **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. +- **`field_access_pattern` (Optional, Enum("classic" \| "flexible"))**: Controls how processors in this pipeline should read and write data on a document's source. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates @@ -8571,7 +8590,7 @@ client.ingest.simulate({ docs }) - **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. - **`id` (Optional, string)**: The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. -- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis })**: The pipeline to test. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis, field_access_pattern })**: The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. @@ -8759,7 +8778,8 @@ client.logstash.putPipeline({ id }) #### Request (object) [_request_logstash.put_pipeline] - **`id` (string)**: An identifier for the pipeline. -- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis })** +Pipeline IDs must begin with a letter or underscore and contain only letters, underscores, dashes, hyphens and numbers. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis, field_access_pattern })** ## client.migration.deprecations [_migration.deprecations] Get deprecation information. @@ -9679,7 +9699,7 @@ using `_all` or by specifying `*` as the ``. - **`bucket_span` (Optional, string \| -1 \| 0)**: Refer to the description for the `bucket_span` query parameter. - **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. - **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. -- **`overall_score` (Optional, number \| string)**: Refer to the description for the `overall_score` query parameter. +- **`overall_score` (Optional, number)**: Refer to the description for the `overall_score` query parameter. - **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. - **`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. @@ -10918,6 +10938,13 @@ A list of the following options: `_all`, `rest_actions`. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.project.tags [_project.tags] +Return tags defined for the project +```ts +client.project.tags() +``` + + ## client.queryRules.deleteRule [_query_rules.delete_rule] Delete a query rule. Delete a query rule within a query ruleset. @@ -12319,6 +12346,16 @@ client.security.getSettings({ ... }) - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.security.getStats [_security.get_stats] +Get security statistics for all nodes + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats) + +```ts +client.security.getStats() +``` + + ## client.security.getToken [_security.get_token] Get a token. @@ -13335,7 +13372,7 @@ If you specify this parameter in the request path, it is used for any documents - **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. - **`index_template_substitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. - **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** -- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. +- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. @@ -14264,6 +14301,14 @@ To save a synchronous search, you must specify this parameter and the `keep_on_c - **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile"))**: The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. ## client.sql.translate [_sql.translate] Translate SQL into Elasticsearch queries. diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index c20284ef0..622a16fb8 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class AsyncSearch { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'async_search.delete': { path: [ 'id' @@ -121,6 +122,7 @@ export default class AsyncSearch { 'lenient', 'max_concurrent_shard_requests', 'preference', + 'project_routing', 'request_cache', 'routing', 'search_type', @@ -160,7 +162,7 @@ export default class AsyncSearch { async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['async_search.delete'] + } = this[kAcceptedParams]['async_search.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -190,7 +192,10 @@ export default class AsyncSearch { name: 'async_search.delete', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -205,7 +210,7 @@ export default class AsyncSearch { async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['async_search.get'] + } = this[kAcceptedParams]['async_search.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,7 +240,13 @@ export default class AsyncSearch { name: 'async_search.get', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'keep_alive', + 'typed_keys', + 'wait_for_completion_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -250,7 +261,7 @@ export default class AsyncSearch { async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['async_search.status'] + } = this[kAcceptedParams]['async_search.status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,7 +291,11 @@ export default class AsyncSearch { name: 'async_search.status', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'keep_alive' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -297,7 +312,7 @@ export default class AsyncSearch { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['async_search.submit'] + } = this[kAcceptedParams]['async_search.submit'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -349,7 +364,87 @@ export default class AsyncSearch { name: 'async_search.submit', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats', + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'project_routing', + 'request_cache', + 'routing', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index a7f728dba..dfbe9df2c 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Autoscaling { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'autoscaling.delete_autoscaling_policy': { path: [ 'name' @@ -86,7 +87,7 @@ export default class Autoscaling { async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['autoscaling.delete_autoscaling_policy'] + } = this[kAcceptedParams]['autoscaling.delete_autoscaling_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,7 +117,12 @@ export default class Autoscaling { name: 'autoscaling.delete_autoscaling_policy', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -131,7 +137,7 @@ export default class Autoscaling { async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['autoscaling.get_autoscaling_capacity'] + } = this[kAcceptedParams]['autoscaling.get_autoscaling_capacity'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -159,7 +165,10 @@ export default class Autoscaling { const method = 'GET' const path = '/_autoscaling/capacity' const meta: TransportRequestMetadata = { - name: 'autoscaling.get_autoscaling_capacity' + name: 'autoscaling.get_autoscaling_capacity', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -174,7 +183,7 @@ export default class Autoscaling { async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['autoscaling.get_autoscaling_policy'] + } = this[kAcceptedParams]['autoscaling.get_autoscaling_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -204,7 +213,11 @@ export default class Autoscaling { name: 'autoscaling.get_autoscaling_policy', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -221,7 +234,7 @@ export default class Autoscaling { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['autoscaling.put_autoscaling_policy'] + } = this[kAcceptedParams]['autoscaling.put_autoscaling_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,7 +264,13 @@ export default class Autoscaling { name: 'autoscaling.put_autoscaling_policy', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'policy', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index fa2b1e66a..06cab1229 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -102,7 +102,23 @@ export default async function BulkApi + [kAcceptedParams]: Record } export default class Cat { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'cat.aliases': { path: [ 'name' @@ -51,7 +52,6 @@ export default class Cat { ], body: [], query: [ - 'bytes', 'h', 's', 'local', @@ -77,6 +77,7 @@ export default class Cat { body: [], query: [ 'h', + 'project_routing', 's' ] }, @@ -86,7 +87,6 @@ export default class Cat { ], body: [], query: [ - 'bytes', 'fields', 'h', 's' @@ -96,7 +96,6 @@ export default class Cat { path: [], body: [], query: [ - 'time', 'ts', 'h', 's' @@ -113,12 +112,10 @@ export default class Cat { ], body: [], query: [ - 'bytes', 'expand_wildcards', 'health', 'include_unloaded_segments', 'pri', - 'time', 'master_timeout', 'h', 's' @@ -141,10 +138,8 @@ export default class Cat { body: [], query: [ 'allow_no_match', - 'bytes', 'h', - 's', - 'time' + 's' ] }, 'cat.ml_datafeeds': { @@ -155,8 +150,7 @@ export default class Cat { query: [ 'allow_no_match', 'h', - 's', - 'time' + 's' ] }, 'cat.ml_jobs': { @@ -166,10 +160,8 @@ export default class Cat { body: [], query: [ 'allow_no_match', - 'bytes', 'h', - 's', - 'time' + 's' ] }, 'cat.ml_trained_models': { @@ -179,12 +171,10 @@ export default class Cat { body: [], query: [ 'allow_no_match', - 'bytes', 'h', 's', 'from', - 'size', - 'time' + 'size' ] }, 'cat.nodeattrs': { @@ -201,13 +191,11 @@ export default class Cat { path: [], body: [], query: [ - 'bytes', 'full_id', 'include_unloaded_segments', 'h', 's', - 'master_timeout', - 'time' + 'master_timeout' ] }, 'cat.pending_tasks': { @@ -217,8 +205,7 @@ export default class Cat { 'h', 's', 'local', - 'master_timeout', - 'time' + 'master_timeout' ] }, 'cat.plugins': { @@ -239,12 +226,10 @@ export default class Cat { body: [], query: [ 'active_only', - 'bytes', 'detailed', 'index', 'h', - 's', - 'time' + 's' ] }, 'cat.repositories': { @@ -263,7 +248,6 @@ export default class Cat { ], body: [], query: [ - 'bytes', 'h', 's', 'local', @@ -276,11 +260,9 @@ export default class Cat { ], body: [], query: [ - 'bytes', 'h', 's', - 'master_timeout', - 'time' + 'master_timeout' ] }, 'cat.snapshots': { @@ -292,8 +274,7 @@ export default class Cat { 'ignore_unavailable', 'h', 's', - 'master_timeout', - 'time' + 'master_timeout' ] }, 'cat.tasks': { @@ -306,7 +287,6 @@ export default class Cat { 'parent_task_id', 'h', 's', - 'time', 'timeout', 'wait_for_completion' ] @@ -331,7 +311,6 @@ export default class Cat { query: [ 'h', 's', - 'time', 'local', 'master_timeout' ] @@ -346,7 +325,6 @@ export default class Cat { 'from', 'h', 's', - 'time', 'size' ] } @@ -363,7 +341,7 @@ export default class Cat { async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.aliases'] + } = this[kAcceptedParams]['cat.aliases'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -401,7 +379,14 @@ export default class Cat { name: 'cat.aliases', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'h', + 's', + 'expand_wildcards', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -416,7 +401,7 @@ export default class Cat { async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.allocation'] + } = this[kAcceptedParams]['cat.allocation'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -454,7 +439,14 @@ export default class Cat { name: 'cat.allocation', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -469,7 +461,7 @@ export default class Cat { async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.component_templates'] + } = this[kAcceptedParams]['cat.component_templates'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -507,7 +499,14 @@ export default class Cat { name: 'cat.component_templates', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -522,7 +521,7 @@ export default class Cat { async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.count'] + } = this[kAcceptedParams]['cat.count'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -560,7 +559,13 @@ export default class Cat { name: 'cat.count', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'h', + 'project_routing', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -575,7 +580,7 @@ export default class Cat { async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.fielddata'] + } = this[kAcceptedParams]['cat.fielddata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -613,7 +618,13 @@ export default class Cat { name: 'cat.fielddata', pathParts: { fields: params.fields - } + }, + acceptedParams: [ + 'fields', + 'fields', + 'h', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -628,7 +639,7 @@ export default class Cat { async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.health'] + } = this[kAcceptedParams]['cat.health'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -656,7 +667,12 @@ export default class Cat { const method = 'GET' const path = '/_cat/health' const meta: TransportRequestMetadata = { - name: 'cat.health' + name: 'cat.health', + acceptedParams: [ + 'ts', + 'h', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -671,7 +687,7 @@ export default class Cat { async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.help'] + } = this[kAcceptedParams]['cat.help'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -699,7 +715,9 @@ export default class Cat { const method = 'GET' const path = '/_cat' const meta: TransportRequestMetadata = { - name: 'cat.help' + name: 'cat.help', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -714,7 +732,7 @@ export default class Cat { async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.indices'] + } = this[kAcceptedParams]['cat.indices'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -752,7 +770,17 @@ export default class Cat { name: 'cat.indices', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'expand_wildcards', + 'health', + 'include_unloaded_segments', + 'pri', + 'master_timeout', + 'h', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -767,7 +795,7 @@ export default class Cat { async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.master'] + } = this[kAcceptedParams]['cat.master'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -795,7 +823,13 @@ export default class Cat { const method = 'GET' const path = '/_cat/master' const meta: TransportRequestMetadata = { - name: 'cat.master' + name: 'cat.master', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -810,7 +844,7 @@ export default class Cat { async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.ml_data_frame_analytics'] + } = this[kAcceptedParams]['cat.ml_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -848,7 +882,13 @@ export default class Cat { name: 'cat.ml_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'h', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -863,7 +903,7 @@ export default class Cat { async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.ml_datafeeds'] + } = this[kAcceptedParams]['cat.ml_datafeeds'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -901,7 +941,13 @@ export default class Cat { name: 'cat.ml_datafeeds', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match', + 'h', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -916,7 +962,7 @@ export default class Cat { async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.ml_jobs'] + } = this[kAcceptedParams]['cat.ml_jobs'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -954,7 +1000,13 @@ export default class Cat { name: 'cat.ml_jobs', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'h', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -969,7 +1021,7 @@ export default class Cat { async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.ml_trained_models'] + } = this[kAcceptedParams]['cat.ml_trained_models'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1007,7 +1059,15 @@ export default class Cat { name: 'cat.ml_trained_models', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'h', + 's', + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1022,7 +1082,7 @@ export default class Cat { async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.nodeattrs'] + } = this[kAcceptedParams]['cat.nodeattrs'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1050,7 +1110,13 @@ export default class Cat { const method = 'GET' const path = '/_cat/nodeattrs' const meta: TransportRequestMetadata = { - name: 'cat.nodeattrs' + name: 'cat.nodeattrs', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1065,7 +1131,7 @@ export default class Cat { async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.nodes'] + } = this[kAcceptedParams]['cat.nodes'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1093,7 +1159,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/nodes' const meta: TransportRequestMetadata = { - name: 'cat.nodes' + name: 'cat.nodes', + acceptedParams: [ + 'full_id', + 'include_unloaded_segments', + 'h', + 's', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1108,7 +1181,7 @@ export default class Cat { async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.pending_tasks'] + } = this[kAcceptedParams]['cat.pending_tasks'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1136,7 +1209,13 @@ export default class Cat { const method = 'GET' const path = '/_cat/pending_tasks' const meta: TransportRequestMetadata = { - name: 'cat.pending_tasks' + name: 'cat.pending_tasks', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1151,7 +1230,7 @@ export default class Cat { async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.plugins'] + } = this[kAcceptedParams]['cat.plugins'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1179,7 +1258,14 @@ export default class Cat { const method = 'GET' const path = '/_cat/plugins' const meta: TransportRequestMetadata = { - name: 'cat.plugins' + name: 'cat.plugins', + acceptedParams: [ + 'h', + 's', + 'include_bootstrap', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1194,7 +1280,7 @@ export default class Cat { async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.recovery'] + } = this[kAcceptedParams]['cat.recovery'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1232,7 +1318,15 @@ export default class Cat { name: 'cat.recovery', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'active_only', + 'detailed', + 'index', + 'h', + 's' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1247,7 +1341,7 @@ export default class Cat { async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.repositories'] + } = this[kAcceptedParams]['cat.repositories'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1275,7 +1369,13 @@ export default class Cat { const method = 'GET' const path = '/_cat/repositories' const meta: TransportRequestMetadata = { - name: 'cat.repositories' + name: 'cat.repositories', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1290,7 +1390,7 @@ export default class Cat { async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.segments'] + } = this[kAcceptedParams]['cat.segments'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1328,7 +1428,14 @@ export default class Cat { name: 'cat.segments', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1343,7 +1450,7 @@ export default class Cat { async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.shards'] + } = this[kAcceptedParams]['cat.shards'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1381,7 +1488,13 @@ export default class Cat { name: 'cat.shards', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'h', + 's', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1396,7 +1509,7 @@ export default class Cat { async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.snapshots'] + } = this[kAcceptedParams]['cat.snapshots'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1434,7 +1547,14 @@ export default class Cat { name: 'cat.snapshots', pathParts: { repository: params.repository - } + }, + acceptedParams: [ + 'repository', + 'ignore_unavailable', + 'h', + 's', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1449,7 +1569,7 @@ export default class Cat { async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.tasks'] + } = this[kAcceptedParams]['cat.tasks'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1477,7 +1597,17 @@ export default class Cat { const method = 'GET' const path = '/_cat/tasks' const meta: TransportRequestMetadata = { - name: 'cat.tasks' + name: 'cat.tasks', + acceptedParams: [ + 'actions', + 'detailed', + 'nodes', + 'parent_task_id', + 'h', + 's', + 'timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1492,7 +1622,7 @@ export default class Cat { async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.templates'] + } = this[kAcceptedParams]['cat.templates'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1530,7 +1660,14 @@ export default class Cat { name: 'cat.templates', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1545,7 +1682,7 @@ export default class Cat { async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.thread_pool'] + } = this[kAcceptedParams]['cat.thread_pool'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1583,7 +1720,14 @@ export default class Cat { name: 'cat.thread_pool', pathParts: { thread_pool_patterns: params.thread_pool_patterns - } + }, + acceptedParams: [ + 'thread_pool_patterns', + 'h', + 's', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1598,7 +1742,7 @@ export default class Cat { async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cat.transforms'] + } = this[kAcceptedParams]['cat.transforms'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1636,7 +1780,15 @@ export default class Cat { name: 'cat.transforms', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'from', + 'h', + 's', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 66849dbb9..728f0077e 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ccr { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'ccr.delete_auto_follow_pattern': { path: [ 'name' @@ -212,7 +213,7 @@ export default class Ccr { async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.delete_auto_follow_pattern'] + } = this[kAcceptedParams]['ccr.delete_auto_follow_pattern'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -242,7 +243,11 @@ export default class Ccr { name: 'ccr.delete_auto_follow_pattern', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -259,7 +264,7 @@ export default class Ccr { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ccr.follow'] + } = this[kAcceptedParams]['ccr.follow'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -299,7 +304,26 @@ export default class Ccr { name: 'ccr.follow', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'data_stream_name', + 'leader_index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'remote_cluster', + 'settings', + 'master_timeout', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -314,7 +338,7 @@ export default class Ccr { async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.follow_info'] + } = this[kAcceptedParams]['ccr.follow_info'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -344,7 +368,11 @@ export default class Ccr { name: 'ccr.follow_info', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -359,7 +387,7 @@ export default class Ccr { async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.follow_stats'] + } = this[kAcceptedParams]['ccr.follow_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -389,7 +417,11 @@ export default class Ccr { name: 'ccr.follow_stats', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -406,7 +438,7 @@ export default class Ccr { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ccr.forget_follower'] + } = this[kAcceptedParams]['ccr.forget_follower'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -446,7 +478,15 @@ export default class Ccr { name: 'ccr.forget_follower', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'follower_cluster', + 'follower_index', + 'follower_index_uuid', + 'leader_remote_cluster', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -461,7 +501,7 @@ export default class Ccr { async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.get_auto_follow_pattern'] + } = this[kAcceptedParams]['ccr.get_auto_follow_pattern'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -499,7 +539,11 @@ export default class Ccr { name: 'ccr.get_auto_follow_pattern', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -514,7 +558,7 @@ export default class Ccr { async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.pause_auto_follow_pattern'] + } = this[kAcceptedParams]['ccr.pause_auto_follow_pattern'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -544,7 +588,11 @@ export default class Ccr { name: 'ccr.pause_auto_follow_pattern', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -559,7 +607,7 @@ export default class Ccr { async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.pause_follow'] + } = this[kAcceptedParams]['ccr.pause_follow'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -589,7 +637,11 @@ export default class Ccr { name: 'ccr.pause_follow', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -606,7 +658,7 @@ export default class Ccr { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ccr.put_auto_follow_pattern'] + } = this[kAcceptedParams]['ccr.put_auto_follow_pattern'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -646,7 +698,26 @@ export default class Ccr { name: 'ccr.put_auto_follow_pattern', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'remote_cluster', + 'follow_index_pattern', + 'leader_index_patterns', + 'leader_index_exclusion_patterns', + 'max_outstanding_read_requests', + 'settings', + 'max_outstanding_write_requests', + 'read_poll_timeout', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -661,7 +732,7 @@ export default class Ccr { async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.resume_auto_follow_pattern'] + } = this[kAcceptedParams]['ccr.resume_auto_follow_pattern'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -691,7 +762,11 @@ export default class Ccr { name: 'ccr.resume_auto_follow_pattern', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -708,7 +783,7 @@ export default class Ccr { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ccr.resume_follow'] + } = this[kAcceptedParams]['ccr.resume_follow'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -748,7 +823,21 @@ export default class Ccr { name: 'ccr.resume_follow', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -763,7 +852,7 @@ export default class Ccr { async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.stats'] + } = this[kAcceptedParams]['ccr.stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -791,7 +880,11 @@ export default class Ccr { const method = 'GET' const path = '/_ccr/stats' const meta: TransportRequestMetadata = { - name: 'ccr.stats' + name: 'ccr.stats', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -806,7 +899,7 @@ export default class Ccr { async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ccr.unfollow'] + } = this[kAcceptedParams]['ccr.unfollow'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -836,7 +929,11 @@ export default class Ccr { name: 'ccr.unfollow', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index f611b3bf7..509053aa9 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -91,7 +91,11 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll name: 'clear_scroll', pathParts: { scroll_id: params.scroll_id - } + }, + acceptedParams: [ + 'scroll_id', + 'scroll_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 027c18182..9af0ffe94 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -87,7 +87,10 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo const method = 'DELETE' const path = '/_pit' const meta: TransportRequestMetadata = { - name: 'close_point_in_time' + name: 'close_point_in_time', + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 9ecac681d..27e032432 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Cluster { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'cluster.allocation_explain': { path: [], body: [ @@ -237,7 +238,7 @@ export default class Cluster { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['cluster.allocation_explain'] + } = this[kAcceptedParams]['cluster.allocation_explain'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -275,7 +276,20 @@ export default class Cluster { const method = body != null ? 'POST' : 'GET' const path = '/_cluster/allocation/explain' const meta: TransportRequestMetadata = { - name: 'cluster.allocation_explain' + name: 'cluster.allocation_explain', + acceptedParams: [ + 'index', + 'shard', + 'primary', + 'current_node', + 'index', + 'shard', + 'primary', + 'current_node', + 'include_disk_info', + 'include_yes_decisions', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -290,7 +304,7 @@ export default class Cluster { async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.delete_component_template'] + } = this[kAcceptedParams]['cluster.delete_component_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -320,7 +334,12 @@ export default class Cluster { name: 'cluster.delete_component_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -335,7 +354,7 @@ export default class Cluster { async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.delete_voting_config_exclusions'] + } = this[kAcceptedParams]['cluster.delete_voting_config_exclusions'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -363,7 +382,11 @@ export default class Cluster { const method = 'DELETE' const path = '/_cluster/voting_config_exclusions' const meta: TransportRequestMetadata = { - name: 'cluster.delete_voting_config_exclusions' + name: 'cluster.delete_voting_config_exclusions', + acceptedParams: [ + 'master_timeout', + 'wait_for_removal' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -378,7 +401,7 @@ export default class Cluster { async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.exists_component_template'] + } = this[kAcceptedParams]['cluster.exists_component_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -408,7 +431,12 @@ export default class Cluster { name: 'cluster.exists_component_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'local' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -423,7 +451,7 @@ export default class Cluster { async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.get_component_template'] + } = this[kAcceptedParams]['cluster.get_component_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -461,7 +489,15 @@ export default class Cluster { name: 'cluster.get_component_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'flat_settings', + 'settings_filter', + 'include_defaults', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -476,7 +512,7 @@ export default class Cluster { async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.get_settings'] + } = this[kAcceptedParams]['cluster.get_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -504,7 +540,13 @@ export default class Cluster { const method = 'GET' const path = '/_cluster/settings' const meta: TransportRequestMetadata = { - name: 'cluster.get_settings' + name: 'cluster.get_settings', + acceptedParams: [ + 'flat_settings', + 'include_defaults', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -519,7 +561,7 @@ export default class Cluster { async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.health'] + } = this[kAcceptedParams]['cluster.health'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -557,7 +599,21 @@ export default class Cluster { name: 'cluster.health', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'expand_wildcards', + 'level', + 'local', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'wait_for_events', + 'wait_for_nodes', + 'wait_for_no_initializing_shards', + 'wait_for_no_relocating_shards', + 'wait_for_status' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -572,7 +628,7 @@ export default class Cluster { async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.info'] + } = this[kAcceptedParams]['cluster.info'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -602,7 +658,10 @@ export default class Cluster { name: 'cluster.info', pathParts: { target: params.target - } + }, + acceptedParams: [ + 'target' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -617,7 +676,7 @@ export default class Cluster { async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.pending_tasks'] + } = this[kAcceptedParams]['cluster.pending_tasks'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -645,7 +704,11 @@ export default class Cluster { const method = 'GET' const path = '/_cluster/pending_tasks' const meta: TransportRequestMetadata = { - name: 'cluster.pending_tasks' + name: 'cluster.pending_tasks', + acceptedParams: [ + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -660,7 +723,7 @@ export default class Cluster { async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.post_voting_config_exclusions'] + } = this[kAcceptedParams]['cluster.post_voting_config_exclusions'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -688,7 +751,13 @@ export default class Cluster { const method = 'POST' const path = '/_cluster/voting_config_exclusions' const meta: TransportRequestMetadata = { - name: 'cluster.post_voting_config_exclusions' + name: 'cluster.post_voting_config_exclusions', + acceptedParams: [ + 'node_names', + 'node_ids', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -705,7 +774,7 @@ export default class Cluster { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['cluster.put_component_template'] + } = this[kAcceptedParams]['cluster.put_component_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -745,7 +814,17 @@ export default class Cluster { name: 'cluster.put_component_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'template', + 'version', + '_meta', + 'deprecated', + 'create', + 'cause', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -762,7 +841,7 @@ export default class Cluster { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['cluster.put_settings'] + } = this[kAcceptedParams]['cluster.put_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -800,7 +879,14 @@ export default class Cluster { const method = 'PUT' const path = '/_cluster/settings' const meta: TransportRequestMetadata = { - name: 'cluster.put_settings' + name: 'cluster.put_settings', + acceptedParams: [ + 'persistent', + 'transient', + 'flat_settings', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -815,7 +901,7 @@ export default class Cluster { async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.remote_info'] + } = this[kAcceptedParams]['cluster.remote_info'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -843,7 +929,9 @@ export default class Cluster { const method = 'GET' const path = '/_remote/info' const meta: TransportRequestMetadata = { - name: 'cluster.remote_info' + name: 'cluster.remote_info', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -860,7 +948,7 @@ export default class Cluster { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['cluster.reroute'] + } = this[kAcceptedParams]['cluster.reroute'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -898,7 +986,16 @@ export default class Cluster { const method = 'POST' const path = '/_cluster/reroute' const meta: TransportRequestMetadata = { - name: 'cluster.reroute' + name: 'cluster.reroute', + acceptedParams: [ + 'commands', + 'dry_run', + 'explain', + 'metric', + 'retry_failed', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -913,7 +1010,7 @@ export default class Cluster { async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.state'] + } = this[kAcceptedParams]['cluster.state'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -955,7 +1052,19 @@ export default class Cluster { pathParts: { metric: params.metric, index: params.index - } + }, + acceptedParams: [ + 'metric', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'wait_for_metadata_version', + 'wait_for_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -970,7 +1079,7 @@ export default class Cluster { async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['cluster.stats'] + } = this[kAcceptedParams]['cluster.stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1008,7 +1117,12 @@ export default class Cluster { name: 'cluster.stats', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'include_remotes', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index be52c9af3..eb3745556 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Connector { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'connector.check_in': { path: [ 'connector_id' @@ -369,7 +370,7 @@ export default class Connector { async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.check_in'] + } = this[kAcceptedParams]['connector.check_in'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,7 +400,10 @@ export default class Connector { name: 'connector.check_in', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -414,7 +418,7 @@ export default class Connector { async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.delete'] + } = this[kAcceptedParams]['connector.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -444,7 +448,12 @@ export default class Connector { name: 'connector.delete', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'delete_sync_jobs', + 'hard' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -459,7 +468,7 @@ export default class Connector { async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.get'] + } = this[kAcceptedParams]['connector.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -489,7 +498,11 @@ export default class Connector { name: 'connector.get', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'include_deleted' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -506,7 +519,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.last_sync'] + } = this[kAcceptedParams]['connector.last_sync'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -546,7 +559,22 @@ export default class Connector { name: 'connector.last_sync', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'last_access_control_sync_error', + 'last_access_control_sync_scheduled_at', + 'last_access_control_sync_status', + 'last_deleted_document_count', + 'last_incremental_sync_scheduled_at', + 'last_indexed_document_count', + 'last_seen', + 'last_sync_error', + 'last_sync_scheduled_at', + 'last_sync_status', + 'last_synced', + 'sync_cursor' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -561,7 +589,7 @@ export default class Connector { async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.list'] + } = this[kAcceptedParams]['connector.list'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -589,7 +617,16 @@ export default class Connector { const method = 'GET' const path = '/_connector' const meta: TransportRequestMetadata = { - name: 'connector.list' + name: 'connector.list', + acceptedParams: [ + 'from', + 'size', + 'index_name', + 'connector_name', + 'service_type', + 'include_deleted', + 'query' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -606,7 +643,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.post'] + } = this[kAcceptedParams]['connector.post'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -644,7 +681,15 @@ export default class Connector { const method = 'POST' const path = '/_connector' const meta: TransportRequestMetadata = { - name: 'connector.post' + name: 'connector.post', + acceptedParams: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -661,7 +706,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.put'] + } = this[kAcceptedParams]['connector.put'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -709,7 +754,16 @@ export default class Connector { name: 'connector.put', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -723,7 +777,7 @@ export default class Connector { async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.secret_delete'] + } = this[kAcceptedParams]['connector.secret_delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -753,7 +807,10 @@ export default class Connector { name: 'connector.secret_delete', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -767,7 +824,7 @@ export default class Connector { async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.secret_get'] + } = this[kAcceptedParams]['connector.secret_get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -797,7 +854,10 @@ export default class Connector { name: 'connector.secret_get', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -811,7 +871,7 @@ export default class Connector { async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.secret_post'] + } = this[kAcceptedParams]['connector.secret_post'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -838,7 +898,9 @@ export default class Connector { const method = 'POST' const path = '/_connector/_secret' const meta: TransportRequestMetadata = { - name: 'connector.secret_post' + name: 'connector.secret_post', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -852,7 +914,7 @@ export default class Connector { async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.secret_put'] + } = this[kAcceptedParams]['connector.secret_put'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -882,7 +944,10 @@ export default class Connector { name: 'connector.secret_put', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -897,7 +962,7 @@ export default class Connector { async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.sync_job_cancel'] + } = this[kAcceptedParams]['connector.sync_job_cancel'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -927,7 +992,10 @@ export default class Connector { name: 'connector.sync_job_cancel', pathParts: { connector_sync_job_id: params.connector_sync_job_id - } + }, + acceptedParams: [ + 'connector_sync_job_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -942,7 +1010,7 @@ export default class Connector { async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.sync_job_check_in'] + } = this[kAcceptedParams]['connector.sync_job_check_in'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -972,7 +1040,10 @@ export default class Connector { name: 'connector.sync_job_check_in', pathParts: { connector_sync_job_id: params.connector_sync_job_id - } + }, + acceptedParams: [ + 'connector_sync_job_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -989,7 +1060,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.sync_job_claim'] + } = this[kAcceptedParams]['connector.sync_job_claim'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1029,7 +1100,12 @@ export default class Connector { name: 'connector.sync_job_claim', pathParts: { connector_sync_job_id: params.connector_sync_job_id - } + }, + acceptedParams: [ + 'connector_sync_job_id', + 'sync_cursor', + 'worker_hostname' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1044,7 +1120,7 @@ export default class Connector { async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.sync_job_delete'] + } = this[kAcceptedParams]['connector.sync_job_delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1074,7 +1150,10 @@ export default class Connector { name: 'connector.sync_job_delete', pathParts: { connector_sync_job_id: params.connector_sync_job_id - } + }, + acceptedParams: [ + 'connector_sync_job_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1091,7 +1170,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.sync_job_error'] + } = this[kAcceptedParams]['connector.sync_job_error'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1131,7 +1210,11 @@ export default class Connector { name: 'connector.sync_job_error', pathParts: { connector_sync_job_id: params.connector_sync_job_id - } + }, + acceptedParams: [ + 'connector_sync_job_id', + 'error' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1146,7 +1229,7 @@ export default class Connector { async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.sync_job_get'] + } = this[kAcceptedParams]['connector.sync_job_get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1176,7 +1259,10 @@ export default class Connector { name: 'connector.sync_job_get', pathParts: { connector_sync_job_id: params.connector_sync_job_id - } + }, + acceptedParams: [ + 'connector_sync_job_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1191,7 +1277,7 @@ export default class Connector { async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.sync_job_list'] + } = this[kAcceptedParams]['connector.sync_job_list'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1219,7 +1305,14 @@ export default class Connector { const method = 'GET' const path = '/_connector/_sync_job' const meta: TransportRequestMetadata = { - name: 'connector.sync_job_list' + name: 'connector.sync_job_list', + acceptedParams: [ + 'from', + 'size', + 'status', + 'connector_id', + 'job_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1236,7 +1329,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.sync_job_post'] + } = this[kAcceptedParams]['connector.sync_job_post'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1273,7 +1366,12 @@ export default class Connector { const method = 'POST' const path = '/_connector/_sync_job' const meta: TransportRequestMetadata = { - name: 'connector.sync_job_post' + name: 'connector.sync_job_post', + acceptedParams: [ + 'id', + 'job_type', + 'trigger_method' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1290,7 +1388,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.sync_job_update_stats'] + } = this[kAcceptedParams]['connector.sync_job_update_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1330,7 +1428,16 @@ export default class Connector { name: 'connector.sync_job_update_stats', pathParts: { connector_sync_job_id: params.connector_sync_job_id - } + }, + acceptedParams: [ + 'connector_sync_job_id', + 'deleted_document_count', + 'indexed_document_count', + 'indexed_document_volume', + 'last_seen', + 'metadata', + 'total_document_count' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1345,7 +1452,7 @@ export default class Connector { async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['connector.update_active_filtering'] + } = this[kAcceptedParams]['connector.update_active_filtering'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1375,7 +1482,10 @@ export default class Connector { name: 'connector.update_active_filtering', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1392,7 +1502,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_api_key_id'] + } = this[kAcceptedParams]['connector.update_api_key_id'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1432,7 +1542,12 @@ export default class Connector { name: 'connector.update_api_key_id', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'api_key_id', + 'api_key_secret_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1449,7 +1564,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_configuration'] + } = this[kAcceptedParams]['connector.update_configuration'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1489,7 +1604,12 @@ export default class Connector { name: 'connector.update_configuration', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'configuration', + 'values' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1506,7 +1626,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_error'] + } = this[kAcceptedParams]['connector.update_error'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1546,7 +1666,11 @@ export default class Connector { name: 'connector.update_error', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'error' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1563,7 +1687,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_features'] + } = this[kAcceptedParams]['connector.update_features'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1603,7 +1727,11 @@ export default class Connector { name: 'connector.update_features', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'features' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1620,7 +1748,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_filtering'] + } = this[kAcceptedParams]['connector.update_filtering'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1660,7 +1788,13 @@ export default class Connector { name: 'connector.update_filtering', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'filtering', + 'rules', + 'advanced_snippet' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1677,7 +1811,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_filtering_validation'] + } = this[kAcceptedParams]['connector.update_filtering_validation'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1717,7 +1851,11 @@ export default class Connector { name: 'connector.update_filtering_validation', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'validation' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1734,7 +1872,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_index_name'] + } = this[kAcceptedParams]['connector.update_index_name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1774,7 +1912,11 @@ export default class Connector { name: 'connector.update_index_name', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'index_name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1791,7 +1933,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_name'] + } = this[kAcceptedParams]['connector.update_name'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1831,7 +1973,12 @@ export default class Connector { name: 'connector.update_name', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'name', + 'description' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1848,7 +1995,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_native'] + } = this[kAcceptedParams]['connector.update_native'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1888,7 +2035,11 @@ export default class Connector { name: 'connector.update_native', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'is_native' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1905,7 +2056,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_pipeline'] + } = this[kAcceptedParams]['connector.update_pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1945,7 +2096,11 @@ export default class Connector { name: 'connector.update_pipeline', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'pipeline' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1962,7 +2117,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_scheduling'] + } = this[kAcceptedParams]['connector.update_scheduling'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2002,7 +2157,11 @@ export default class Connector { name: 'connector.update_scheduling', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'scheduling' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2019,7 +2178,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_service_type'] + } = this[kAcceptedParams]['connector.update_service_type'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2059,7 +2218,11 @@ export default class Connector { name: 'connector.update_service_type', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'service_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2076,7 +2239,7 @@ export default class Connector { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['connector.update_status'] + } = this[kAcceptedParams]['connector.update_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2116,7 +2279,11 @@ export default class Connector { name: 'connector.update_status', pathParts: { connector_id: params.connector_id - } + }, + acceptedParams: [ + 'connector_id', + 'status' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 820cd85dc..e71b731aa 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -48,6 +48,7 @@ const acceptedParams: Record (this: That, params pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'document', + 'include_source_on_error', + 'pipeline', + 'refresh', + 'require_alias', + 'require_data_stream', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 92cab870b..6c914f9be 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -21,18 +21,19 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } export default class DanglingIndices { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'dangling_indices.delete_dangling_index': { path: [ 'index_uuid' @@ -73,7 +74,7 @@ export default class DanglingIndices { async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['dangling_indices.delete_dangling_index'] + } = this[kAcceptedParams]['dangling_indices.delete_dangling_index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -103,7 +104,13 @@ export default class DanglingIndices { name: 'dangling_indices.delete_dangling_index', pathParts: { index_uuid: params.index_uuid - } + }, + acceptedParams: [ + 'index_uuid', + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -118,7 +125,7 @@ export default class DanglingIndices { async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['dangling_indices.import_dangling_index'] + } = this[kAcceptedParams]['dangling_indices.import_dangling_index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -148,7 +155,13 @@ export default class DanglingIndices { name: 'dangling_indices.import_dangling_index', pathParts: { index_uuid: params.index_uuid - } + }, + acceptedParams: [ + 'index_uuid', + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -163,7 +176,7 @@ export default class DanglingIndices { async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['dangling_indices.list_dangling_indices'] + } = this[kAcceptedParams]['dangling_indices.list_dangling_indices'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,7 +204,9 @@ export default class DanglingIndices { const method = 'GET' const path = '/_dangling' const meta: TransportRequestMetadata = { - name: 'dangling_indices.list_dangling_indices' + name: 'dangling_indices.list_dangling_indices', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 2e0a2c61d..a8ea798b4 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -87,7 +87,19 @@ export default async function DeleteApi (this: That, params: T.DeleteRequest, op pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'if_primary_term', + 'if_seq_no', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index d13a0ff6a..126746671 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -125,7 +125,43 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu name: 'delete_by_query', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'max_docs', + 'query', + 'slice', + 'sort', + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'preference', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'q', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'wait_for_active_shards', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index edd325fa6..fa5183cdc 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -78,7 +78,11 @@ export default async function DeleteByQueryRethrottleApi (this: That, params: T. name: 'delete_by_query_rethrottle', pathParts: { task_id: params.task_id - } + }, + acceptedParams: [ + 'task_id', + 'requests_per_second' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 32f909b8b..85c17c0c1 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -79,7 +79,12 @@ export default async function DeleteScriptApi (this: That, params: T.DeleteScrip name: 'delete_script', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 1dd3eb590..eb2246366 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Enrich { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'enrich.delete_policy': { path: [ 'name' @@ -96,7 +97,7 @@ export default class Enrich { async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['enrich.delete_policy'] + } = this[kAcceptedParams]['enrich.delete_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -126,7 +127,11 @@ export default class Enrich { name: 'enrich.delete_policy', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -141,7 +146,7 @@ export default class Enrich { async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['enrich.execute_policy'] + } = this[kAcceptedParams]['enrich.execute_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,7 +176,12 @@ export default class Enrich { name: 'enrich.execute_policy', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -186,7 +196,7 @@ export default class Enrich { async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['enrich.get_policy'] + } = this[kAcceptedParams]['enrich.get_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -224,7 +234,11 @@ export default class Enrich { name: 'enrich.get_policy', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -241,7 +255,7 @@ export default class Enrich { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['enrich.put_policy'] + } = this[kAcceptedParams]['enrich.put_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -281,7 +295,14 @@ export default class Enrich { name: 'enrich.put_policy', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'geo_match', + 'match', + 'range', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -296,7 +317,7 @@ export default class Enrich { async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['enrich.stats'] + } = this[kAcceptedParams]['enrich.stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -324,7 +345,10 @@ export default class Enrich { const method = 'GET' const path = '/_enrich/_stats' const meta: TransportRequestMetadata = { - name: 'enrich.stats' + name: 'enrich.stats', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 7e04c2c7d..4a0f403ff 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Eql { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'eql.delete': { path: [ 'id' @@ -91,6 +92,7 @@ export default class Eql { 'ignore_unavailable', 'keep_alive', 'keep_on_completion', + 'project_routing', 'wait_for_completion_timeout' ] } @@ -107,7 +109,7 @@ export default class Eql { async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['eql.delete'] + } = this[kAcceptedParams]['eql.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,7 +139,10 @@ export default class Eql { name: 'eql.delete', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -152,7 +157,7 @@ export default class Eql { async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['eql.get'] + } = this[kAcceptedParams]['eql.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -182,7 +187,12 @@ export default class Eql { name: 'eql.get', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'keep_alive', + 'wait_for_completion_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -197,7 +207,7 @@ export default class Eql { async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['eql.get_status'] + } = this[kAcceptedParams]['eql.get_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -227,7 +237,10 @@ export default class Eql { name: 'eql.get_status', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -244,7 +257,7 @@ export default class Eql { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['eql.search'] + } = this[kAcceptedParams]['eql.search'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,7 +297,37 @@ export default class Eql { name: 'eql.search', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'query', + 'case_sensitive', + 'event_category_field', + 'tiebreaker_field', + 'timestamp_field', + 'fetch_size', + 'filter', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'size', + 'fields', + 'result_position', + 'runtime_mappings', + 'max_samples_per_key', + 'allow_no_indices', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'expand_wildcards', + 'ccs_minimize_roundtrips', + 'ignore_unavailable', + 'keep_alive', + 'keep_on_completion', + 'project_routing', + 'wait_for_completion_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 8a7341f11..c6e87d7d1 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Esql { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'esql.async_query': { path: [], body: [ @@ -46,6 +47,7 @@ export default class Esql { 'query', 'tables', 'include_ccs_metadata', + 'include_execution_metadata', 'wait_for_completion_timeout', 'keep_alive', 'keep_on_completion' @@ -107,7 +109,8 @@ export default class Esql { 'profile', 'query', 'tables', - 'include_ccs_metadata' + 'include_ccs_metadata', + 'include_execution_metadata' ], query: [ 'format', @@ -131,7 +134,7 @@ export default class Esql { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['esql.async_query'] + } = this[kAcceptedParams]['esql.async_query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -168,7 +171,25 @@ export default class Esql { const method = 'POST' const path = '/_query/async' const meta: TransportRequestMetadata = { - name: 'esql.async_query' + name: 'esql.async_query', + acceptedParams: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'include_execution_metadata', + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_partial_results', + 'delimiter', + 'drop_null_columns', + 'format' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -183,7 +204,7 @@ export default class Esql { async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['esql.async_query_delete'] + } = this[kAcceptedParams]['esql.async_query_delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -213,7 +234,10 @@ export default class Esql { name: 'esql.async_query_delete', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -228,7 +252,7 @@ export default class Esql { async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['esql.async_query_get'] + } = this[kAcceptedParams]['esql.async_query_get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -258,7 +282,14 @@ export default class Esql { name: 'esql.async_query_get', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'drop_null_columns', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -273,7 +304,7 @@ export default class Esql { async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['esql.async_query_stop'] + } = this[kAcceptedParams]['esql.async_query_stop'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -303,7 +334,11 @@ export default class Esql { name: 'esql.async_query_stop', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'drop_null_columns' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -318,7 +353,7 @@ export default class Esql { async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['esql.get_query'] + } = this[kAcceptedParams]['esql.get_query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -348,7 +383,10 @@ export default class Esql { name: 'esql.get_query', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -363,7 +401,7 @@ export default class Esql { async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['esql.list_queries'] + } = this[kAcceptedParams]['esql.list_queries'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -391,7 +429,9 @@ export default class Esql { const method = 'GET' const path = '/_query/queries' const meta: TransportRequestMetadata = { - name: 'esql.list_queries' + name: 'esql.list_queries', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -408,7 +448,7 @@ export default class Esql { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['esql.query'] + } = this[kAcceptedParams]['esql.query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -445,7 +485,22 @@ export default class Esql { const method = 'POST' const path = '/_query' const meta: TransportRequestMetadata = { - name: 'esql.query' + name: 'esql.query', + acceptedParams: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'include_execution_metadata', + 'format', + 'delimiter', + 'drop_null_columns', + 'allow_partial_results' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 3c4fad80b..931870b88 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -89,7 +89,21 @@ export default async function ExistsApi (this: That, params: T.ExistsRequest, op pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 908e12489..5f03302ea 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -88,7 +88,20 @@ export default async function ExistsSourceApi (this: That, params: T.ExistsSourc pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index dbf9ae6ce..e8edd8afa 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -107,7 +107,24 @@ export default async function ExplainApi (this: That, param pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'query', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'lenient', + 'preference', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'q' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 2c24d0490..6e6d0dac8 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -21,18 +21,19 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } export default class Features { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'features.get_features': { path: [], body: [], @@ -60,7 +61,7 @@ export default class Features { async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['features.get_features'] + } = this[kAcceptedParams]['features.get_features'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -88,7 +89,10 @@ export default class Features { const method = 'GET' const path = '/_features' const meta: TransportRequestMetadata = { - name: 'features.get_features' + name: 'features.get_features', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -103,7 +107,7 @@ export default class Features { async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['features.reset_features'] + } = this[kAcceptedParams]['features.reset_features'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -131,7 +135,10 @@ export default class Features { const method = 'POST' const path = '/_features/_reset' const meta: TransportRequestMetadata = { - name: 'features.reset_features' + name: 'features.reset_features', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index c2cd39b07..000a51899 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -46,7 +46,8 @@ const acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Fleet { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'fleet.delete_secret': { path: [ 'id' @@ -185,7 +186,7 @@ export default class Fleet { async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['fleet.delete_secret'] + } = this[kAcceptedParams]['fleet.delete_secret'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -215,7 +216,10 @@ export default class Fleet { name: 'fleet.delete_secret', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -229,7 +233,7 @@ export default class Fleet { async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['fleet.get_secret'] + } = this[kAcceptedParams]['fleet.get_secret'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -259,7 +263,10 @@ export default class Fleet { name: 'fleet.get_secret', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -274,7 +281,7 @@ export default class Fleet { async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['fleet.global_checkpoints'] + } = this[kAcceptedParams]['fleet.global_checkpoints'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -304,7 +311,14 @@ export default class Fleet { name: 'fleet.global_checkpoints', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'wait_for_advance', + 'wait_for_index', + 'checkpoints', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -321,7 +335,7 @@ export default class Fleet { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['fleet.msearch'] + } = this[kAcceptedParams]['fleet.msearch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -358,7 +372,24 @@ export default class Fleet { name: 'fleet.msearch', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'searches', + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] } return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } @@ -372,7 +403,7 @@ export default class Fleet { async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['fleet.post_secret'] + } = this[kAcceptedParams]['fleet.post_secret'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,7 +430,9 @@ export default class Fleet { const method = 'POST' const path = '/_fleet/secret' const meta: TransportRequestMetadata = { - name: 'fleet.post_secret' + name: 'fleet.post_secret', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -416,7 +449,7 @@ export default class Fleet { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['fleet.search'] + } = this[kAcceptedParams]['fleet.search'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -456,7 +489,85 @@ export default class Fleet { name: 'fleet.search', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats', + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 5150eef02..80f660e0d 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -91,7 +91,23 @@ export default async function GetApi (this: That, params: T pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_exclude_vectors', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index 82891ae01..863fc3260 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -78,7 +78,11 @@ export default async function GetScriptApi (this: That, params: T.GetScriptReque name: 'get_script', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index 7cd4ea26e..e80b010bd 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -72,7 +72,9 @@ export default async function GetScriptContextApi (this: That, params?: T.GetScr const method = 'GET' const path = '/_script_context' const meta: TransportRequestMetadata = { - name: 'get_script_context' + name: 'get_script_context', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 748b7550e..3b3827019 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -72,7 +72,9 @@ export default async function GetScriptLanguagesApi (this: That, params?: T.GetS const method = 'GET' const path = '/_script_language' const meta: TransportRequestMetadata = { - name: 'get_script_languages' + name: 'get_script_languages', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 889d459d3..3f30b7030 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -88,7 +88,20 @@ export default async function GetSourceApi (this: That, par pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 7f74a9763..580a73e4a 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Graph { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'graph.explore': { path: [ 'index' @@ -65,7 +66,7 @@ export default class Graph { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['graph.explore'] + } = this[kAcceptedParams]['graph.explore'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -105,7 +106,16 @@ export default class Graph { name: 'graph.explore', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'connections', + 'controls', + 'query', + 'vertices', + 'routing', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 9620c003a..b9de60dd1 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -88,7 +88,13 @@ export default async function HealthReportApi (this: That, params?: T.HealthRepo name: 'health_report', pathParts: { feature: params.feature - } + }, + acceptedParams: [ + 'feature', + 'timeout', + 'verbose', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index a6c11c643..70c52ab89 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ilm { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'ilm.delete_lifecycle': { path: [ 'name' @@ -147,7 +148,7 @@ export default class Ilm { async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.delete_lifecycle'] + } = this[kAcceptedParams]['ilm.delete_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,7 +178,12 @@ export default class Ilm { name: 'ilm.delete_lifecycle', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'policy', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -192,7 +198,7 @@ export default class Ilm { async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.explain_lifecycle'] + } = this[kAcceptedParams]['ilm.explain_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -222,7 +228,13 @@ export default class Ilm { name: 'ilm.explain_lifecycle', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'only_errors', + 'only_managed', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -237,7 +249,7 @@ export default class Ilm { async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.get_lifecycle'] + } = this[kAcceptedParams]['ilm.get_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -275,7 +287,12 @@ export default class Ilm { name: 'ilm.get_lifecycle', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'policy', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -290,7 +307,7 @@ export default class Ilm { async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.get_status'] + } = this[kAcceptedParams]['ilm.get_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -318,7 +335,9 @@ export default class Ilm { const method = 'GET' const path = '/_ilm/status' const meta: TransportRequestMetadata = { - name: 'ilm.get_status' + name: 'ilm.get_status', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -335,7 +354,7 @@ export default class Ilm { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ilm.migrate_to_data_tiers'] + } = this[kAcceptedParams]['ilm.migrate_to_data_tiers'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -373,7 +392,13 @@ export default class Ilm { const method = 'POST' const path = '/_ilm/migrate_to_data_tiers' const meta: TransportRequestMetadata = { - name: 'ilm.migrate_to_data_tiers' + name: 'ilm.migrate_to_data_tiers', + acceptedParams: [ + 'legacy_template_to_delete', + 'node_attribute', + 'dry_run', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -390,7 +415,7 @@ export default class Ilm { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ilm.move_to_step'] + } = this[kAcceptedParams]['ilm.move_to_step'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -430,7 +455,12 @@ export default class Ilm { name: 'ilm.move_to_step', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'current_step', + 'next_step' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -447,7 +477,7 @@ export default class Ilm { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ilm.put_lifecycle'] + } = this[kAcceptedParams]['ilm.put_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -487,7 +517,13 @@ export default class Ilm { name: 'ilm.put_lifecycle', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'policy', + 'policy', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -502,7 +538,7 @@ export default class Ilm { async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.remove_policy'] + } = this[kAcceptedParams]['ilm.remove_policy'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -532,7 +568,10 @@ export default class Ilm { name: 'ilm.remove_policy', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -547,7 +586,7 @@ export default class Ilm { async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.retry'] + } = this[kAcceptedParams]['ilm.retry'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -577,7 +616,10 @@ export default class Ilm { name: 'ilm.retry', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -592,7 +634,7 @@ export default class Ilm { async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.start'] + } = this[kAcceptedParams]['ilm.start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -620,7 +662,11 @@ export default class Ilm { const method = 'POST' const path = '/_ilm/start' const meta: TransportRequestMetadata = { - name: 'ilm.start' + name: 'ilm.start', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -635,7 +681,7 @@ export default class Ilm { async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ilm.stop'] + } = this[kAcceptedParams]['ilm.stop'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -663,7 +709,11 @@ export default class Ilm { const method = 'POST' const path = '/_ilm/stop' const meta: TransportRequestMetadata = { - name: 'ilm.stop' + name: 'ilm.stop', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/index.ts b/src/api/api/index.ts index 49c2e7c9a..e86110ed8 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -105,7 +105,25 @@ export default async function IndexApi (this: That, params: pathParts: { id: params.id, index: params.index - } + }, + acceptedParams: [ + 'id', + 'index', + 'document', + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 8af4331e5..849d9b4a1 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Indices { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'indices.add_block': { path: [ 'index', @@ -794,7 +795,8 @@ export default class Indices { 'expand_wildcards', 'ignore_unavailable', 'allow_no_indices', - 'mode' + 'mode', + 'project_routing' ] }, 'indices.rollover': { @@ -969,7 +971,7 @@ export default class Indices { async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.add_block'] + } = this[kAcceptedParams]['indices.add_block'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1000,7 +1002,16 @@ export default class Indices { pathParts: { index: params.index, block: params.block - } + }, + acceptedParams: [ + 'index', + 'block', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1017,7 +1028,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.analyze'] + } = this[kAcceptedParams]['indices.analyze'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1065,7 +1076,20 @@ export default class Indices { name: 'indices.analyze', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'analyzer', + 'attributes', + 'char_filter', + 'explain', + 'field', + 'filter', + 'normalizer', + 'text', + 'tokenizer', + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1080,7 +1104,7 @@ export default class Indices { async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.cancel_migrate_reindex'] + } = this[kAcceptedParams]['indices.cancel_migrate_reindex'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1110,7 +1134,10 @@ export default class Indices { name: 'indices.cancel_migrate_reindex', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1125,7 +1152,7 @@ export default class Indices { async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.clear_cache'] + } = this[kAcceptedParams]['indices.clear_cache'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1163,7 +1190,18 @@ export default class Indices { name: 'indices.clear_cache', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'fielddata', + 'fields', + 'ignore_unavailable', + 'query', + 'request' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1180,7 +1218,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.clone'] + } = this[kAcceptedParams]['indices.clone'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1221,7 +1259,16 @@ export default class Indices { pathParts: { index: params.index, target: params.target - } + }, + acceptedParams: [ + 'index', + 'target', + 'aliases', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1236,7 +1283,7 @@ export default class Indices { async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.close'] + } = this[kAcceptedParams]['indices.close'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1266,7 +1313,16 @@ export default class Indices { name: 'indices.close', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1283,7 +1339,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.create'] + } = this[kAcceptedParams]['indices.create'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1323,7 +1379,16 @@ export default class Indices { name: 'indices.create', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'aliases', + 'mappings', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1338,7 +1403,7 @@ export default class Indices { async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.create_data_stream'] + } = this[kAcceptedParams]['indices.create_data_stream'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1368,7 +1433,12 @@ export default class Indices { name: 'indices.create_data_stream', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1385,7 +1455,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.create_from'] + } = this[kAcceptedParams]['indices.create_from'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1416,7 +1486,12 @@ export default class Indices { pathParts: { source: params.source, dest: params.dest - } + }, + acceptedParams: [ + 'source', + 'dest', + 'create_from' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1431,7 +1506,7 @@ export default class Indices { async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.data_streams_stats'] + } = this[kAcceptedParams]['indices.data_streams_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1469,7 +1544,11 @@ export default class Indices { name: 'indices.data_streams_stats', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'expand_wildcards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1484,7 +1563,7 @@ export default class Indices { async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.delete'] + } = this[kAcceptedParams]['indices.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1514,7 +1593,15 @@ export default class Indices { name: 'indices.delete', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1529,7 +1616,7 @@ export default class Indices { async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.delete_alias'] + } = this[kAcceptedParams]['indices.delete_alias'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1567,7 +1654,13 @@ export default class Indices { pathParts: { index: params.index, name: params.name - } + }, + acceptedParams: [ + 'index', + 'name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1582,7 +1675,7 @@ export default class Indices { async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.delete_data_lifecycle'] + } = this[kAcceptedParams]['indices.delete_data_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1612,7 +1705,13 @@ export default class Indices { name: 'indices.delete_data_lifecycle', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1627,7 +1726,7 @@ export default class Indices { async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.delete_data_stream'] + } = this[kAcceptedParams]['indices.delete_data_stream'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1657,7 +1756,12 @@ export default class Indices { name: 'indices.delete_data_stream', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'expand_wildcards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1672,7 +1776,7 @@ export default class Indices { async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.delete_data_stream_options'] + } = this[kAcceptedParams]['indices.delete_data_stream_options'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1702,7 +1806,13 @@ export default class Indices { name: 'indices.delete_data_stream_options', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1717,7 +1827,7 @@ export default class Indices { async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.delete_index_template'] + } = this[kAcceptedParams]['indices.delete_index_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1747,7 +1857,12 @@ export default class Indices { name: 'indices.delete_index_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1762,7 +1877,7 @@ export default class Indices { async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.delete_template'] + } = this[kAcceptedParams]['indices.delete_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1792,7 +1907,12 @@ export default class Indices { name: 'indices.delete_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1807,7 +1927,7 @@ export default class Indices { async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.disk_usage'] + } = this[kAcceptedParams]['indices.disk_usage'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1837,7 +1957,15 @@ export default class Indices { name: 'indices.disk_usage', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'run_expensive_tasks' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1854,7 +1982,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.downsample'] + } = this[kAcceptedParams]['indices.downsample'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1885,7 +2013,12 @@ export default class Indices { pathParts: { index: params.index, target_index: params.target_index - } + }, + acceptedParams: [ + 'index', + 'target_index', + 'config' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1900,7 +2033,7 @@ export default class Indices { async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.exists'] + } = this[kAcceptedParams]['indices.exists'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1930,7 +2063,16 @@ export default class Indices { name: 'indices.exists', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1945,7 +2087,7 @@ export default class Indices { async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.exists_alias'] + } = this[kAcceptedParams]['indices.exists_alias'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1983,7 +2125,15 @@ export default class Indices { pathParts: { name: params.name, index: params.index - } + }, + acceptedParams: [ + 'name', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1998,7 +2148,7 @@ export default class Indices { async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.exists_index_template'] + } = this[kAcceptedParams]['indices.exists_index_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2028,7 +2178,13 @@ export default class Indices { name: 'indices.exists_index_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'local', + 'flat_settings', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2043,7 +2199,7 @@ export default class Indices { async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.exists_template'] + } = this[kAcceptedParams]['indices.exists_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2073,7 +2229,13 @@ export default class Indices { name: 'indices.exists_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'flat_settings', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2088,7 +2250,7 @@ export default class Indices { async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.explain_data_lifecycle'] + } = this[kAcceptedParams]['indices.explain_data_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2118,7 +2280,12 @@ export default class Indices { name: 'indices.explain_data_lifecycle', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'include_defaults', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2133,7 +2300,7 @@ export default class Indices { async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.field_usage_stats'] + } = this[kAcceptedParams]['indices.field_usage_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2163,7 +2330,14 @@ export default class Indices { name: 'indices.field_usage_stats', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'fields' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2178,7 +2352,7 @@ export default class Indices { async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.flush'] + } = this[kAcceptedParams]['indices.flush'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2216,7 +2390,15 @@ export default class Indices { name: 'indices.flush', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'force', + 'ignore_unavailable', + 'wait_if_ongoing' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2231,7 +2413,7 @@ export default class Indices { async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.forcemerge'] + } = this[kAcceptedParams]['indices.forcemerge'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2269,7 +2451,17 @@ export default class Indices { name: 'indices.forcemerge', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'max_num_segments', + 'only_expunge_deletes', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2284,7 +2476,7 @@ export default class Indices { async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get'] + } = this[kAcceptedParams]['indices.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2314,7 +2506,18 @@ export default class Indices { name: 'indices.get', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout', + 'features' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2329,7 +2532,7 @@ export default class Indices { async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_alias'] + } = this[kAcceptedParams]['indices.get_alias'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2374,7 +2577,15 @@ export default class Indices { pathParts: { name: params.name, index: params.index - } + }, + acceptedParams: [ + 'name', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2389,7 +2600,7 @@ export default class Indices { async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_data_lifecycle'] + } = this[kAcceptedParams]['indices.get_data_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2419,7 +2630,13 @@ export default class Indices { name: 'indices.get_data_lifecycle', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'include_defaults', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2434,7 +2651,7 @@ export default class Indices { async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_data_lifecycle_stats'] + } = this[kAcceptedParams]['indices.get_data_lifecycle_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2462,7 +2679,9 @@ export default class Indices { const method = 'GET' const path = '/_lifecycle/stats' const meta: TransportRequestMetadata = { - name: 'indices.get_data_lifecycle_stats' + name: 'indices.get_data_lifecycle_stats', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2477,7 +2696,7 @@ export default class Indices { async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_data_stream'] + } = this[kAcceptedParams]['indices.get_data_stream'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2515,7 +2734,14 @@ export default class Indices { name: 'indices.get_data_stream', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'include_defaults', + 'master_timeout', + 'verbose' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2530,7 +2756,7 @@ export default class Indices { async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_data_stream_mappings'] + } = this[kAcceptedParams]['indices.get_data_stream_mappings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2560,7 +2786,11 @@ export default class Indices { name: 'indices.get_data_stream_mappings', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2575,7 +2805,7 @@ export default class Indices { async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_data_stream_options'] + } = this[kAcceptedParams]['indices.get_data_stream_options'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2605,7 +2835,12 @@ export default class Indices { name: 'indices.get_data_stream_options', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2620,7 +2855,7 @@ export default class Indices { async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_data_stream_settings'] + } = this[kAcceptedParams]['indices.get_data_stream_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2650,7 +2885,11 @@ export default class Indices { name: 'indices.get_data_stream_settings', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2665,7 +2904,7 @@ export default class Indices { async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_field_mapping'] + } = this[kAcceptedParams]['indices.get_field_mapping'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2703,7 +2942,15 @@ export default class Indices { pathParts: { fields: params.fields, index: params.index - } + }, + acceptedParams: [ + 'fields', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'include_defaults' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2718,7 +2965,7 @@ export default class Indices { async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_index_template'] + } = this[kAcceptedParams]['indices.get_index_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2756,7 +3003,14 @@ export default class Indices { name: 'indices.get_index_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'local', + 'flat_settings', + 'master_timeout', + 'include_defaults' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2771,7 +3025,7 @@ export default class Indices { async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_mapping'] + } = this[kAcceptedParams]['indices.get_mapping'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2809,7 +3063,15 @@ export default class Indices { name: 'indices.get_mapping', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2824,7 +3086,7 @@ export default class Indices { async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_migrate_reindex_status'] + } = this[kAcceptedParams]['indices.get_migrate_reindex_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2854,7 +3116,10 @@ export default class Indices { name: 'indices.get_migrate_reindex_status', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2869,7 +3134,7 @@ export default class Indices { async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_settings'] + } = this[kAcceptedParams]['indices.get_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2914,7 +3179,18 @@ export default class Indices { pathParts: { index: params.index, name: params.name - } + }, + acceptedParams: [ + 'index', + 'name', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2929,7 +3205,7 @@ export default class Indices { async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.get_template'] + } = this[kAcceptedParams]['indices.get_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2967,7 +3243,13 @@ export default class Indices { name: 'indices.get_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'flat_settings', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2984,7 +3266,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.migrate_reindex'] + } = this[kAcceptedParams]['indices.migrate_reindex'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3011,7 +3293,10 @@ export default class Indices { const method = 'POST' const path = '/_migration/reindex' const meta: TransportRequestMetadata = { - name: 'indices.migrate_reindex' + name: 'indices.migrate_reindex', + acceptedParams: [ + 'reindex' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3026,7 +3311,7 @@ export default class Indices { async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.migrate_to_data_stream'] + } = this[kAcceptedParams]['indices.migrate_to_data_stream'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3056,7 +3341,12 @@ export default class Indices { name: 'indices.migrate_to_data_stream', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3073,7 +3363,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.modify_data_stream'] + } = this[kAcceptedParams]['indices.modify_data_stream'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3110,7 +3400,10 @@ export default class Indices { const method = 'POST' const path = '/_data_stream/_modify' const meta: TransportRequestMetadata = { - name: 'indices.modify_data_stream' + name: 'indices.modify_data_stream', + acceptedParams: [ + 'actions' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3125,7 +3418,7 @@ export default class Indices { async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.open'] + } = this[kAcceptedParams]['indices.open'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3155,7 +3448,16 @@ export default class Indices { name: 'indices.open', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3170,7 +3472,7 @@ export default class Indices { async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.promote_data_stream'] + } = this[kAcceptedParams]['indices.promote_data_stream'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3200,7 +3502,11 @@ export default class Indices { name: 'indices.promote_data_stream', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3217,7 +3523,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_alias'] + } = this[kAcceptedParams]['indices.put_alias'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3265,7 +3571,18 @@ export default class Indices { pathParts: { index: params.index, name: params.name - } + }, + acceptedParams: [ + 'index', + 'name', + 'filter', + 'index_routing', + 'is_write_index', + 'routing', + 'search_routing', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3282,7 +3599,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_data_lifecycle'] + } = this[kAcceptedParams]['indices.put_data_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3322,7 +3639,16 @@ export default class Indices { name: 'indices.put_data_lifecycle', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'data_retention', + 'downsampling', + 'enabled', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3339,7 +3665,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_data_stream_mappings'] + } = this[kAcceptedParams]['indices.put_data_stream_mappings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3369,7 +3695,14 @@ export default class Indices { name: 'indices.put_data_stream_mappings', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'mappings', + 'dry_run', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3386,7 +3719,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_data_stream_options'] + } = this[kAcceptedParams]['indices.put_data_stream_options'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3426,7 +3759,14 @@ export default class Indices { name: 'indices.put_data_stream_options', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'failure_store', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3443,7 +3783,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_data_stream_settings'] + } = this[kAcceptedParams]['indices.put_data_stream_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3473,7 +3813,14 @@ export default class Indices { name: 'indices.put_data_stream_settings', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'settings', + 'dry_run', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3490,7 +3837,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_index_template'] + } = this[kAcceptedParams]['indices.put_index_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3530,7 +3877,23 @@ export default class Indices { name: 'indices.put_index_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'allow_auto_create', + 'ignore_missing_component_templates', + 'deprecated', + 'create', + 'master_timeout', + 'cause' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3547,7 +3910,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_mapping'] + } = this[kAcceptedParams]['indices.put_mapping'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3587,7 +3950,27 @@ export default class Indices { name: 'indices.put_mapping', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'date_detection', + 'dynamic', + 'dynamic_date_formats', + 'dynamic_templates', + '_field_names', + '_meta', + 'numeric_detection', + 'properties', + '_routing', + '_source', + 'runtime', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'write_index_only' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3604,7 +3987,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_settings'] + } = this[kAcceptedParams]['indices.put_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3641,7 +4024,19 @@ export default class Indices { name: 'indices.put_settings', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'settings', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'master_timeout', + 'preserve_existing', + 'reopen', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3658,7 +4053,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.put_template'] + } = this[kAcceptedParams]['indices.put_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3698,7 +4093,20 @@ export default class Indices { name: 'indices.put_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'aliases', + 'index_patterns', + 'mappings', + 'order', + 'settings', + 'version', + 'create', + 'master_timeout', + 'order', + 'cause' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3713,7 +4121,7 @@ export default class Indices { async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.recovery'] + } = this[kAcceptedParams]['indices.recovery'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3751,13 +4159,21 @@ export default class Indices { name: 'indices.recovery', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'active_only', + 'detailed', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. + * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. In Elastic Cloud Serverless, the default refresh interval is 5 seconds across all indices. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -3766,7 +4182,7 @@ export default class Indices { async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.refresh'] + } = this[kAcceptedParams]['indices.refresh'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3804,7 +4220,13 @@ export default class Indices { name: 'indices.refresh', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3819,7 +4241,7 @@ export default class Indices { async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.reload_search_analyzers'] + } = this[kAcceptedParams]['indices.reload_search_analyzers'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3849,7 +4271,14 @@ export default class Indices { name: 'indices.reload_search_analyzers', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'resource' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3864,7 +4293,7 @@ export default class Indices { async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.remove_block'] + } = this[kAcceptedParams]['indices.remove_block'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3895,7 +4324,16 @@ export default class Indices { pathParts: { index: params.index, block: params.block - } + }, + acceptedParams: [ + 'index', + 'block', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3910,7 +4348,7 @@ export default class Indices { async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.resolve_cluster'] + } = this[kAcceptedParams]['indices.resolve_cluster'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3948,7 +4386,15 @@ export default class Indices { name: 'indices.resolve_cluster', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3963,7 +4409,7 @@ export default class Indices { async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.resolve_index'] + } = this[kAcceptedParams]['indices.resolve_index'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3993,7 +4439,15 @@ export default class Indices { name: 'indices.resolve_index', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'ignore_unavailable', + 'allow_no_indices', + 'mode', + 'project_routing' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4010,7 +4464,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.rollover'] + } = this[kAcceptedParams]['indices.rollover'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4058,7 +4512,20 @@ export default class Indices { pathParts: { alias: params.alias, new_index: params.new_index - } + }, + acceptedParams: [ + 'alias', + 'new_index', + 'aliases', + 'conditions', + 'mappings', + 'settings', + 'dry_run', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'lazy' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4073,7 +4540,7 @@ export default class Indices { async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.segments'] + } = this[kAcceptedParams]['indices.segments'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4111,7 +4578,13 @@ export default class Indices { name: 'indices.segments', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4126,7 +4599,7 @@ export default class Indices { async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.shard_stores'] + } = this[kAcceptedParams]['indices.shard_stores'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4164,7 +4637,14 @@ export default class Indices { name: 'indices.shard_stores', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'status' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4181,7 +4661,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.shrink'] + } = this[kAcceptedParams]['indices.shrink'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4222,7 +4702,16 @@ export default class Indices { pathParts: { index: params.index, target: params.target - } + }, + acceptedParams: [ + 'index', + 'target', + 'aliases', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4239,7 +4728,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.simulate_index_template'] + } = this[kAcceptedParams]['indices.simulate_index_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4269,7 +4758,15 @@ export default class Indices { name: 'indices.simulate_index_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'index_template', + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4286,7 +4783,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.simulate_template'] + } = this[kAcceptedParams]['indices.simulate_template'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4334,7 +4831,24 @@ export default class Indices { name: 'indices.simulate_template', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'allow_auto_create', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'ignore_missing_component_templates', + 'deprecated', + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4351,7 +4865,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.split'] + } = this[kAcceptedParams]['indices.split'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4392,7 +4906,16 @@ export default class Indices { pathParts: { index: params.index, target: params.target - } + }, + acceptedParams: [ + 'index', + 'target', + 'aliases', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4407,7 +4930,7 @@ export default class Indices { async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['indices.stats'] + } = this[kAcceptedParams]['indices.stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4452,7 +4975,20 @@ export default class Indices { pathParts: { metric: params.metric, index: params.index - } + }, + acceptedParams: [ + 'metric', + 'index', + 'completion_fields', + 'expand_wildcards', + 'fielddata_fields', + 'fields', + 'forbid_closed_indices', + 'groups', + 'include_segment_file_sizes', + 'include_unloaded_segments', + 'level' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4469,7 +5005,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.update_aliases'] + } = this[kAcceptedParams]['indices.update_aliases'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4507,7 +5043,12 @@ export default class Indices { const method = 'POST' const path = '/_aliases' const meta: TransportRequestMetadata = { - name: 'indices.update_aliases' + name: 'indices.update_aliases', + acceptedParams: [ + 'actions', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4524,7 +5065,7 @@ export default class Indices { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['indices.validate_query'] + } = this[kAcceptedParams]['indices.validate_query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4572,7 +5113,23 @@ export default class Indices { name: 'indices.validate_query', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'query', + 'allow_no_indices', + 'all_shards', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'explain', + 'ignore_unavailable', + 'lenient', + 'rewrite', + 'q' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 05d3fab06..a15aa20e4 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Inference { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'inference.chat_completion_unified': { path: [ 'inference_id' @@ -483,7 +484,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.chat_completion_unified'] + } = this[kAcceptedParams]['inference.chat_completion_unified'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -513,7 +514,12 @@ export default class Inference { name: 'inference.chat_completion_unified', pathParts: { inference_id: params.inference_id - } + }, + acceptedParams: [ + 'inference_id', + 'chat_completion_request', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -530,7 +536,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.completion'] + } = this[kAcceptedParams]['inference.completion'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -570,7 +576,13 @@ export default class Inference { name: 'inference.completion', pathParts: { inference_id: params.inference_id - } + }, + acceptedParams: [ + 'inference_id', + 'input', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -585,7 +597,7 @@ export default class Inference { async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['inference.delete'] + } = this[kAcceptedParams]['inference.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -623,7 +635,13 @@ export default class Inference { pathParts: { task_type: params.task_type, inference_id: params.inference_id - } + }, + acceptedParams: [ + 'task_type', + 'inference_id', + 'dry_run', + 'force' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -638,7 +656,7 @@ export default class Inference { async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['inference.get'] + } = this[kAcceptedParams]['inference.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -680,7 +698,11 @@ export default class Inference { pathParts: { task_type: params.task_type, inference_id: params.inference_id - } + }, + acceptedParams: [ + 'task_type', + 'inference_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -697,7 +719,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.inference'] + } = this[kAcceptedParams]['inference.inference'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -745,7 +767,16 @@ export default class Inference { pathParts: { task_type: params.task_type, inference_id: params.inference_id - } + }, + acceptedParams: [ + 'task_type', + 'inference_id', + 'query', + 'input', + 'input_type', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -762,7 +793,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put'] + } = this[kAcceptedParams]['inference.put'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -800,7 +831,13 @@ export default class Inference { pathParts: { task_type: params.task_type, inference_id: params.inference_id - } + }, + acceptedParams: [ + 'task_type', + 'inference_id', + 'inference_config', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -817,7 +854,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_ai21'] + } = this[kAcceptedParams]['inference.put_ai21'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -858,7 +895,14 @@ export default class Inference { pathParts: { task_type: params.task_type, ai21_inference_id: params.ai21_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'ai21_inference_id', + 'service', + 'service_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -875,7 +919,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_alibabacloud'] + } = this[kAcceptedParams]['inference.put_alibabacloud'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -916,7 +960,16 @@ export default class Inference { pathParts: { task_type: params.task_type, alibabacloud_inference_id: params.alibabacloud_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'alibabacloud_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -933,7 +986,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_amazonbedrock'] + } = this[kAcceptedParams]['inference.put_amazonbedrock'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -974,7 +1027,16 @@ export default class Inference { pathParts: { task_type: params.task_type, amazonbedrock_inference_id: params.amazonbedrock_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'amazonbedrock_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -991,7 +1053,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_amazonsagemaker'] + } = this[kAcceptedParams]['inference.put_amazonsagemaker'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1032,7 +1094,16 @@ export default class Inference { pathParts: { task_type: params.task_type, amazonsagemaker_inference_id: params.amazonsagemaker_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'amazonsagemaker_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1049,7 +1120,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_anthropic'] + } = this[kAcceptedParams]['inference.put_anthropic'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1090,7 +1161,16 @@ export default class Inference { pathParts: { task_type: params.task_type, anthropic_inference_id: params.anthropic_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'anthropic_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1107,7 +1187,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_azureaistudio'] + } = this[kAcceptedParams]['inference.put_azureaistudio'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1148,7 +1228,16 @@ export default class Inference { pathParts: { task_type: params.task_type, azureaistudio_inference_id: params.azureaistudio_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'azureaistudio_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1165,7 +1254,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_azureopenai'] + } = this[kAcceptedParams]['inference.put_azureopenai'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1206,7 +1295,16 @@ export default class Inference { pathParts: { task_type: params.task_type, azureopenai_inference_id: params.azureopenai_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'azureopenai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1223,7 +1321,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_cohere'] + } = this[kAcceptedParams]['inference.put_cohere'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1264,7 +1362,16 @@ export default class Inference { pathParts: { task_type: params.task_type, cohere_inference_id: params.cohere_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'cohere_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1281,7 +1388,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_custom'] + } = this[kAcceptedParams]['inference.put_custom'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1322,7 +1429,15 @@ export default class Inference { pathParts: { task_type: params.task_type, custom_inference_id: params.custom_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'custom_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1339,7 +1454,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_deepseek'] + } = this[kAcceptedParams]['inference.put_deepseek'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1380,7 +1495,15 @@ export default class Inference { pathParts: { task_type: params.task_type, deepseek_inference_id: params.deepseek_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'deepseek_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1397,7 +1520,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_elasticsearch'] + } = this[kAcceptedParams]['inference.put_elasticsearch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1438,7 +1561,16 @@ export default class Inference { pathParts: { task_type: params.task_type, elasticsearch_inference_id: params.elasticsearch_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'elasticsearch_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1455,7 +1587,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_elser'] + } = this[kAcceptedParams]['inference.put_elser'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1496,7 +1628,15 @@ export default class Inference { pathParts: { task_type: params.task_type, elser_inference_id: params.elser_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'elser_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1513,7 +1653,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_googleaistudio'] + } = this[kAcceptedParams]['inference.put_googleaistudio'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1554,7 +1694,15 @@ export default class Inference { pathParts: { task_type: params.task_type, googleaistudio_inference_id: params.googleaistudio_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'googleaistudio_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1571,7 +1719,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_googlevertexai'] + } = this[kAcceptedParams]['inference.put_googlevertexai'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1612,7 +1760,16 @@ export default class Inference { pathParts: { task_type: params.task_type, googlevertexai_inference_id: params.googlevertexai_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'googlevertexai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1629,7 +1786,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_hugging_face'] + } = this[kAcceptedParams]['inference.put_hugging_face'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1670,7 +1827,16 @@ export default class Inference { pathParts: { task_type: params.task_type, huggingface_inference_id: params.huggingface_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'huggingface_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1687,7 +1853,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_jinaai'] + } = this[kAcceptedParams]['inference.put_jinaai'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1728,7 +1894,16 @@ export default class Inference { pathParts: { task_type: params.task_type, jinaai_inference_id: params.jinaai_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'jinaai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1745,7 +1920,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_llama'] + } = this[kAcceptedParams]['inference.put_llama'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1786,7 +1961,15 @@ export default class Inference { pathParts: { task_type: params.task_type, llama_inference_id: params.llama_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'llama_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1803,7 +1986,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_mistral'] + } = this[kAcceptedParams]['inference.put_mistral'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1844,7 +2027,15 @@ export default class Inference { pathParts: { task_type: params.task_type, mistral_inference_id: params.mistral_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'mistral_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1861,7 +2052,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_openai'] + } = this[kAcceptedParams]['inference.put_openai'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1902,7 +2093,16 @@ export default class Inference { pathParts: { task_type: params.task_type, openai_inference_id: params.openai_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'openai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1919,7 +2119,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_voyageai'] + } = this[kAcceptedParams]['inference.put_voyageai'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1960,7 +2160,16 @@ export default class Inference { pathParts: { task_type: params.task_type, voyageai_inference_id: params.voyageai_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'voyageai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1977,7 +2186,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.put_watsonx'] + } = this[kAcceptedParams]['inference.put_watsonx'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2018,7 +2227,14 @@ export default class Inference { pathParts: { task_type: params.task_type, watsonx_inference_id: params.watsonx_inference_id - } + }, + acceptedParams: [ + 'task_type', + 'watsonx_inference_id', + 'service', + 'service_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2035,7 +2251,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.rerank'] + } = this[kAcceptedParams]['inference.rerank'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2075,7 +2291,14 @@ export default class Inference { name: 'inference.rerank', pathParts: { inference_id: params.inference_id - } + }, + acceptedParams: [ + 'inference_id', + 'query', + 'input', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2092,7 +2315,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.sparse_embedding'] + } = this[kAcceptedParams]['inference.sparse_embedding'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2132,7 +2355,13 @@ export default class Inference { name: 'inference.sparse_embedding', pathParts: { inference_id: params.inference_id - } + }, + acceptedParams: [ + 'inference_id', + 'input', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2149,7 +2378,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.stream_completion'] + } = this[kAcceptedParams]['inference.stream_completion'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2189,7 +2418,13 @@ export default class Inference { name: 'inference.stream_completion', pathParts: { inference_id: params.inference_id - } + }, + acceptedParams: [ + 'inference_id', + 'input', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2206,7 +2441,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.text_embedding'] + } = this[kAcceptedParams]['inference.text_embedding'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2246,7 +2481,14 @@ export default class Inference { name: 'inference.text_embedding', pathParts: { inference_id: params.inference_id - } + }, + acceptedParams: [ + 'inference_id', + 'input', + 'input_type', + 'task_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2263,7 +2505,7 @@ export default class Inference { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['inference.update'] + } = this[kAcceptedParams]['inference.update'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2301,7 +2543,12 @@ export default class Inference { pathParts: { inference_id: params.inference_id, task_type: params.task_type - } + }, + acceptedParams: [ + 'inference_id', + 'task_type', + 'inference_config' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index d490a4fac..225d118f6 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -72,7 +72,9 @@ export default async function InfoApi (this: That, params?: T.InfoRequest, optio const method = 'GET' const path = '/' const meta: TransportRequestMetadata = { - name: 'info' + name: 'info', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 732135791..2214ab286 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ingest { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'ingest.delete_geoip_database': { path: [ 'id' @@ -134,7 +135,8 @@ export default class Ingest { 'on_failure', 'processors', 'version', - 'deprecated' + 'deprecated', + 'field_access_pattern' ], query: [ 'master_timeout', @@ -167,7 +169,7 @@ export default class Ingest { async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.delete_geoip_database'] + } = this[kAcceptedParams]['ingest.delete_geoip_database'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -197,7 +199,12 @@ export default class Ingest { name: 'ingest.delete_geoip_database', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -212,7 +219,7 @@ export default class Ingest { async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.delete_ip_location_database'] + } = this[kAcceptedParams]['ingest.delete_ip_location_database'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -242,7 +249,12 @@ export default class Ingest { name: 'ingest.delete_ip_location_database', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -257,7 +269,7 @@ export default class Ingest { async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.delete_pipeline'] + } = this[kAcceptedParams]['ingest.delete_pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -287,7 +299,12 @@ export default class Ingest { name: 'ingest.delete_pipeline', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -302,7 +319,7 @@ export default class Ingest { async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.geo_ip_stats'] + } = this[kAcceptedParams]['ingest.geo_ip_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -330,7 +347,9 @@ export default class Ingest { const method = 'GET' const path = '/_ingest/geoip/stats' const meta: TransportRequestMetadata = { - name: 'ingest.geo_ip_stats' + name: 'ingest.geo_ip_stats', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -345,7 +364,7 @@ export default class Ingest { async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.get_geoip_database'] + } = this[kAcceptedParams]['ingest.get_geoip_database'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -383,7 +402,10 @@ export default class Ingest { name: 'ingest.get_geoip_database', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -398,7 +420,7 @@ export default class Ingest { async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.get_ip_location_database'] + } = this[kAcceptedParams]['ingest.get_ip_location_database'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -436,7 +458,10 @@ export default class Ingest { name: 'ingest.get_ip_location_database', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -451,7 +476,7 @@ export default class Ingest { async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.get_pipeline'] + } = this[kAcceptedParams]['ingest.get_pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -489,7 +514,12 @@ export default class Ingest { name: 'ingest.get_pipeline', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'summary' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -504,7 +534,7 @@ export default class Ingest { async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ingest.processor_grok'] + } = this[kAcceptedParams]['ingest.processor_grok'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -532,7 +562,9 @@ export default class Ingest { const method = 'GET' const path = '/_ingest/processor/grok' const meta: TransportRequestMetadata = { - name: 'ingest.processor_grok' + name: 'ingest.processor_grok', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -549,7 +581,7 @@ export default class Ingest { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ingest.put_geoip_database'] + } = this[kAcceptedParams]['ingest.put_geoip_database'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -589,7 +621,14 @@ export default class Ingest { name: 'ingest.put_geoip_database', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'name', + 'maxmind', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -606,7 +645,7 @@ export default class Ingest { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ingest.put_ip_location_database'] + } = this[kAcceptedParams]['ingest.put_ip_location_database'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -636,7 +675,13 @@ export default class Ingest { name: 'ingest.put_ip_location_database', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'configuration', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -653,7 +698,7 @@ export default class Ingest { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ingest.put_pipeline'] + } = this[kAcceptedParams]['ingest.put_pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -693,7 +738,20 @@ export default class Ingest { name: 'ingest.put_pipeline', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + '_meta', + 'description', + 'on_failure', + 'processors', + 'version', + 'deprecated', + 'field_access_pattern', + 'master_timeout', + 'timeout', + 'if_version' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -710,7 +768,7 @@ export default class Ingest { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ingest.simulate'] + } = this[kAcceptedParams]['ingest.simulate'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -757,7 +815,13 @@ export default class Ingest { name: 'ingest.simulate', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'docs', + 'pipeline', + 'verbose' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 2fd7b462b..737050353 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -75,7 +75,10 @@ export default async function KnnSearchApi (this: That, params?: T.TODO, options name: 'knn_search', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index b9072b867..6b501374b 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class License { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'license.delete': { path: [], body: [], @@ -104,7 +105,7 @@ export default class License { async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['license.delete'] + } = this[kAcceptedParams]['license.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -132,7 +133,11 @@ export default class License { const method = 'DELETE' const path = '/_license' const meta: TransportRequestMetadata = { - name: 'license.delete' + name: 'license.delete', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -147,7 +152,7 @@ export default class License { async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['license.get'] + } = this[kAcceptedParams]['license.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -175,7 +180,11 @@ export default class License { const method = 'GET' const path = '/_license' const meta: TransportRequestMetadata = { - name: 'license.get' + name: 'license.get', + acceptedParams: [ + 'accept_enterprise', + 'local' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -190,7 +199,7 @@ export default class License { async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['license.get_basic_status'] + } = this[kAcceptedParams]['license.get_basic_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -218,7 +227,9 @@ export default class License { const method = 'GET' const path = '/_license/basic_status' const meta: TransportRequestMetadata = { - name: 'license.get_basic_status' + name: 'license.get_basic_status', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -233,7 +244,7 @@ export default class License { async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['license.get_trial_status'] + } = this[kAcceptedParams]['license.get_trial_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -261,7 +272,9 @@ export default class License { const method = 'GET' const path = '/_license/trial_status' const meta: TransportRequestMetadata = { - name: 'license.get_trial_status' + name: 'license.get_trial_status', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -278,7 +291,7 @@ export default class License { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['license.post'] + } = this[kAcceptedParams]['license.post'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -316,7 +329,14 @@ export default class License { const method = 'PUT' const path = '/_license' const meta: TransportRequestMetadata = { - name: 'license.post' + name: 'license.post', + acceptedParams: [ + 'license', + 'licenses', + 'acknowledge', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -331,7 +351,7 @@ export default class License { async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['license.post_start_basic'] + } = this[kAcceptedParams]['license.post_start_basic'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -359,7 +379,12 @@ export default class License { const method = 'POST' const path = '/_license/start_basic' const meta: TransportRequestMetadata = { - name: 'license.post_start_basic' + name: 'license.post_start_basic', + acceptedParams: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -374,7 +399,7 @@ export default class License { async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['license.post_start_trial'] + } = this[kAcceptedParams]['license.post_start_trial'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -402,7 +427,12 @@ export default class License { const method = 'POST' const path = '/_license/start_trial' const meta: TransportRequestMetadata = { - name: 'license.post_start_trial' + name: 'license.post_start_trial', + acceptedParams: [ + 'acknowledge', + 'type', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index f3909331b..4832165c8 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Logstash { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'logstash.delete_pipeline': { path: [ 'id' @@ -71,7 +72,7 @@ export default class Logstash { async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['logstash.delete_pipeline'] + } = this[kAcceptedParams]['logstash.delete_pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +102,10 @@ export default class Logstash { name: 'logstash.delete_pipeline', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -116,7 +120,7 @@ export default class Logstash { async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['logstash.get_pipeline'] + } = this[kAcceptedParams]['logstash.get_pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -154,7 +158,10 @@ export default class Logstash { name: 'logstash.get_pipeline', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -171,7 +178,7 @@ export default class Logstash { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['logstash.put_pipeline'] + } = this[kAcceptedParams]['logstash.put_pipeline'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -201,7 +208,11 @@ export default class Logstash { name: 'logstash.put_pipeline', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'pipeline' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index c09cdecaf..3857df0f9 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -111,7 +111,21 @@ export default async function MgetApi (this: That, params?: name: 'mget', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'docs', + 'ids', + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 6c1cbb7bf..dd61c399d 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -21,18 +21,19 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } export default class Migration { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'migration.deprecations': { path: [ 'index' @@ -63,7 +64,7 @@ export default class Migration { async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['migration.deprecations'] + } = this[kAcceptedParams]['migration.deprecations'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -101,7 +102,10 @@ export default class Migration { name: 'migration.deprecations', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -116,7 +120,7 @@ export default class Migration { async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['migration.get_feature_upgrade_status'] + } = this[kAcceptedParams]['migration.get_feature_upgrade_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -144,7 +148,9 @@ export default class Migration { const method = 'GET' const path = '/_migration/system_features' const meta: TransportRequestMetadata = { - name: 'migration.get_feature_upgrade_status' + name: 'migration.get_feature_upgrade_status', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -159,7 +165,7 @@ export default class Migration { async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['migration.post_feature_upgrade'] + } = this[kAcceptedParams]['migration.post_feature_upgrade'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -187,7 +193,9 @@ export default class Migration { const method = 'POST' const path = '/_migration/system_features' const meta: TransportRequestMetadata = { - name: 'migration.post_feature_upgrade' + name: 'migration.post_feature_upgrade', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 55cec2dd8..a7c86168c 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ml { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'ml.clear_trained_model_deployment_cache': { path: [ 'model_id' @@ -984,7 +985,7 @@ export default class Ml { async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.clear_trained_model_deployment_cache'] + } = this[kAcceptedParams]['ml.clear_trained_model_deployment_cache'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1014,7 +1015,10 @@ export default class Ml { name: 'ml.clear_trained_model_deployment_cache', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1031,7 +1035,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.close_job'] + } = this[kAcceptedParams]['ml.close_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1071,7 +1075,16 @@ export default class Ml { name: 'ml.close_job', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'force', + 'timeout', + 'allow_no_match', + 'force', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1086,7 +1099,7 @@ export default class Ml { async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_calendar'] + } = this[kAcceptedParams]['ml.delete_calendar'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1116,7 +1129,10 @@ export default class Ml { name: 'ml.delete_calendar', pathParts: { calendar_id: params.calendar_id - } + }, + acceptedParams: [ + 'calendar_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1131,7 +1147,7 @@ export default class Ml { async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_calendar_event'] + } = this[kAcceptedParams]['ml.delete_calendar_event'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1162,7 +1178,11 @@ export default class Ml { pathParts: { calendar_id: params.calendar_id, event_id: params.event_id - } + }, + acceptedParams: [ + 'calendar_id', + 'event_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1177,7 +1197,7 @@ export default class Ml { async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_calendar_job'] + } = this[kAcceptedParams]['ml.delete_calendar_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1208,7 +1228,11 @@ export default class Ml { pathParts: { calendar_id: params.calendar_id, job_id: params.job_id - } + }, + acceptedParams: [ + 'calendar_id', + 'job_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1223,7 +1247,7 @@ export default class Ml { async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_data_frame_analytics'] + } = this[kAcceptedParams]['ml.delete_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1253,7 +1277,12 @@ export default class Ml { name: 'ml.delete_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'force', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1268,7 +1297,7 @@ export default class Ml { async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_datafeed'] + } = this[kAcceptedParams]['ml.delete_datafeed'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1298,7 +1327,11 @@ export default class Ml { name: 'ml.delete_datafeed', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'force' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1315,7 +1348,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.delete_expired_data'] + } = this[kAcceptedParams]['ml.delete_expired_data'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1363,7 +1396,14 @@ export default class Ml { name: 'ml.delete_expired_data', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'requests_per_second', + 'timeout', + 'requests_per_second', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1378,7 +1418,7 @@ export default class Ml { async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_filter'] + } = this[kAcceptedParams]['ml.delete_filter'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1408,7 +1448,10 @@ export default class Ml { name: 'ml.delete_filter', pathParts: { filter_id: params.filter_id - } + }, + acceptedParams: [ + 'filter_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1423,7 +1466,7 @@ export default class Ml { async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_forecast'] + } = this[kAcceptedParams]['ml.delete_forecast'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1461,7 +1504,13 @@ export default class Ml { pathParts: { job_id: params.job_id, forecast_id: params.forecast_id - } + }, + acceptedParams: [ + 'job_id', + 'forecast_id', + 'allow_no_forecasts', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1476,7 +1525,7 @@ export default class Ml { async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_job'] + } = this[kAcceptedParams]['ml.delete_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1506,7 +1555,13 @@ export default class Ml { name: 'ml.delete_job', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'force', + 'delete_user_annotations', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1521,7 +1576,7 @@ export default class Ml { async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_model_snapshot'] + } = this[kAcceptedParams]['ml.delete_model_snapshot'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1552,7 +1607,11 @@ export default class Ml { pathParts: { job_id: params.job_id, snapshot_id: params.snapshot_id - } + }, + acceptedParams: [ + 'job_id', + 'snapshot_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1567,7 +1626,7 @@ export default class Ml { async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_trained_model'] + } = this[kAcceptedParams]['ml.delete_trained_model'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1597,7 +1656,12 @@ export default class Ml { name: 'ml.delete_trained_model', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'force', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1612,7 +1676,7 @@ export default class Ml { async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.delete_trained_model_alias'] + } = this[kAcceptedParams]['ml.delete_trained_model_alias'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1643,7 +1707,11 @@ export default class Ml { pathParts: { model_alias: params.model_alias, model_id: params.model_id - } + }, + acceptedParams: [ + 'model_alias', + 'model_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1660,7 +1728,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.estimate_model_memory'] + } = this[kAcceptedParams]['ml.estimate_model_memory'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1698,7 +1766,12 @@ export default class Ml { const method = 'POST' const path = '/_ml/anomaly_detectors/_estimate_model_memory' const meta: TransportRequestMetadata = { - name: 'ml.estimate_model_memory' + name: 'ml.estimate_model_memory', + acceptedParams: [ + 'analysis_config', + 'max_bucket_cardinality', + 'overall_cardinality' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1715,7 +1788,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.evaluate_data_frame'] + } = this[kAcceptedParams]['ml.evaluate_data_frame'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1752,7 +1825,12 @@ export default class Ml { const method = 'POST' const path = '/_ml/data_frame/_evaluate' const meta: TransportRequestMetadata = { - name: 'ml.evaluate_data_frame' + name: 'ml.evaluate_data_frame', + acceptedParams: [ + 'evaluation', + 'index', + 'query' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1769,7 +1847,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.explain_data_frame_analytics'] + } = this[kAcceptedParams]['ml.explain_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1817,7 +1895,18 @@ export default class Ml { name: 'ml.explain_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'source', + 'dest', + 'analysis', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'analyzed_fields', + 'allow_lazy_start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1834,7 +1923,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.flush_job'] + } = this[kAcceptedParams]['ml.flush_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1874,7 +1963,20 @@ export default class Ml { name: 'ml.flush_job', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start', + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1891,7 +1993,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.forecast'] + } = this[kAcceptedParams]['ml.forecast'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1931,7 +2033,16 @@ export default class Ml { name: 'ml.forecast', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'duration', + 'expires_in', + 'max_model_memory', + 'duration', + 'expires_in', + 'max_model_memory' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1948,7 +2059,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.get_buckets'] + } = this[kAcceptedParams]['ml.get_buckets'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1996,7 +2107,28 @@ export default class Ml { pathParts: { job_id: params.job_id, timestamp: params.timestamp - } + }, + acceptedParams: [ + 'job_id', + 'timestamp', + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'page', + 'sort', + 'start', + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'from', + 'size', + 'sort', + 'start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2011,7 +2143,7 @@ export default class Ml { async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_calendar_events'] + } = this[kAcceptedParams]['ml.get_calendar_events'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2041,7 +2173,15 @@ export default class Ml { name: 'ml.get_calendar_events', pathParts: { calendar_id: params.calendar_id - } + }, + acceptedParams: [ + 'calendar_id', + 'end', + 'from', + 'job_id', + 'size', + 'start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2058,7 +2198,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.get_calendars'] + } = this[kAcceptedParams]['ml.get_calendars'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2106,7 +2246,13 @@ export default class Ml { name: 'ml.get_calendars', pathParts: { calendar_id: params.calendar_id - } + }, + acceptedParams: [ + 'calendar_id', + 'page', + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2123,7 +2269,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.get_categories'] + } = this[kAcceptedParams]['ml.get_categories'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2171,7 +2317,15 @@ export default class Ml { pathParts: { job_id: params.job_id, category_id: params.category_id - } + }, + acceptedParams: [ + 'job_id', + 'category_id', + 'page', + 'from', + 'partition_field_value', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2186,7 +2340,7 @@ export default class Ml { async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_data_frame_analytics'] + } = this[kAcceptedParams]['ml.get_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2224,7 +2378,14 @@ export default class Ml { name: 'ml.get_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2239,7 +2400,7 @@ export default class Ml { async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_data_frame_analytics_stats'] + } = this[kAcceptedParams]['ml.get_data_frame_analytics_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2277,7 +2438,14 @@ export default class Ml { name: 'ml.get_data_frame_analytics_stats', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'from', + 'size', + 'verbose' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2292,7 +2460,7 @@ export default class Ml { async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_datafeed_stats'] + } = this[kAcceptedParams]['ml.get_datafeed_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2330,7 +2498,11 @@ export default class Ml { name: 'ml.get_datafeed_stats', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2345,7 +2517,7 @@ export default class Ml { async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_datafeeds'] + } = this[kAcceptedParams]['ml.get_datafeeds'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2383,7 +2555,12 @@ export default class Ml { name: 'ml.get_datafeeds', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match', + 'exclude_generated' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2398,7 +2575,7 @@ export default class Ml { async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_filters'] + } = this[kAcceptedParams]['ml.get_filters'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2436,7 +2613,12 @@ export default class Ml { name: 'ml.get_filters', pathParts: { filter_id: params.filter_id - } + }, + acceptedParams: [ + 'filter_id', + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2453,7 +2635,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.get_influencers'] + } = this[kAcceptedParams]['ml.get_influencers'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2493,7 +2675,19 @@ export default class Ml { name: 'ml.get_influencers', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'page', + 'desc', + 'end', + 'exclude_interim', + 'influencer_score', + 'from', + 'size', + 'sort', + 'start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2508,7 +2702,7 @@ export default class Ml { async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_job_stats'] + } = this[kAcceptedParams]['ml.get_job_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2546,7 +2740,11 @@ export default class Ml { name: 'ml.get_job_stats', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'allow_no_match' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2561,7 +2759,7 @@ export default class Ml { async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_jobs'] + } = this[kAcceptedParams]['ml.get_jobs'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2599,7 +2797,12 @@ export default class Ml { name: 'ml.get_jobs', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'exclude_generated' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2614,7 +2817,7 @@ export default class Ml { async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_memory_stats'] + } = this[kAcceptedParams]['ml.get_memory_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2652,7 +2855,12 @@ export default class Ml { name: 'ml.get_memory_stats', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2667,7 +2875,7 @@ export default class Ml { async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_model_snapshot_upgrade_stats'] + } = this[kAcceptedParams]['ml.get_model_snapshot_upgrade_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2698,7 +2906,12 @@ export default class Ml { pathParts: { job_id: params.job_id, snapshot_id: params.snapshot_id - } + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'allow_no_match' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2715,7 +2928,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.get_model_snapshots'] + } = this[kAcceptedParams]['ml.get_model_snapshots'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2763,7 +2976,22 @@ export default class Ml { pathParts: { job_id: params.job_id, snapshot_id: params.snapshot_id - } + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'desc', + 'end', + 'page', + 'sort', + 'start', + 'desc', + 'end', + 'from', + 'size', + 'sort', + 'start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2780,7 +3008,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.get_overall_buckets'] + } = this[kAcceptedParams]['ml.get_overall_buckets'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2820,7 +3048,24 @@ export default class Ml { name: 'ml.get_overall_buckets', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n', + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2837,7 +3082,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.get_records'] + } = this[kAcceptedParams]['ml.get_records'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2877,7 +3122,25 @@ export default class Ml { name: 'ml.get_records', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'desc', + 'end', + 'exclude_interim', + 'page', + 'record_score', + 'sort', + 'start', + 'desc', + 'end', + 'exclude_interim', + 'from', + 'record_score', + 'size', + 'sort', + 'start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2892,7 +3155,7 @@ export default class Ml { async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_trained_models'] + } = this[kAcceptedParams]['ml.get_trained_models'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2930,7 +3193,17 @@ export default class Ml { name: 'ml.get_trained_models', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'decompress_definition', + 'exclude_generated', + 'from', + 'include', + 'size', + 'tags' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2945,7 +3218,7 @@ export default class Ml { async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.get_trained_models_stats'] + } = this[kAcceptedParams]['ml.get_trained_models_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2983,7 +3256,13 @@ export default class Ml { name: 'ml.get_trained_models_stats', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3000,7 +3279,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.infer_trained_model'] + } = this[kAcceptedParams]['ml.infer_trained_model'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3040,7 +3319,13 @@ export default class Ml { name: 'ml.infer_trained_model', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'docs', + 'inference_config', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3055,7 +3340,7 @@ export default class Ml { async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.info'] + } = this[kAcceptedParams]['ml.info'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3083,7 +3368,9 @@ export default class Ml { const method = 'GET' const path = '/_ml/info' const meta: TransportRequestMetadata = { - name: 'ml.info' + name: 'ml.info', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3100,7 +3387,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.open_job'] + } = this[kAcceptedParams]['ml.open_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3140,7 +3427,12 @@ export default class Ml { name: 'ml.open_job', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3157,7 +3449,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.post_calendar_events'] + } = this[kAcceptedParams]['ml.post_calendar_events'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3197,7 +3489,11 @@ export default class Ml { name: 'ml.post_calendar_events', pathParts: { calendar_id: params.calendar_id - } + }, + acceptedParams: [ + 'calendar_id', + 'events' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3214,7 +3510,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.post_data'] + } = this[kAcceptedParams]['ml.post_data'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3244,7 +3540,13 @@ export default class Ml { name: 'ml.post_data', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'data', + 'reset_end', + 'reset_start' + ] } return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } @@ -3261,7 +3563,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.preview_data_frame_analytics'] + } = this[kAcceptedParams]['ml.preview_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3309,7 +3611,11 @@ export default class Ml { name: 'ml.preview_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'config' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3326,7 +3632,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.preview_datafeed'] + } = this[kAcceptedParams]['ml.preview_datafeed'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3374,7 +3680,14 @@ export default class Ml { name: 'ml.preview_datafeed', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'datafeed_config', + 'job_config', + 'start', + 'end' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3391,7 +3704,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_calendar'] + } = this[kAcceptedParams]['ml.put_calendar'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3431,7 +3744,12 @@ export default class Ml { name: 'ml.put_calendar', pathParts: { calendar_id: params.calendar_id - } + }, + acceptedParams: [ + 'calendar_id', + 'job_ids', + 'description' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3446,7 +3764,7 @@ export default class Ml { async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.put_calendar_job'] + } = this[kAcceptedParams]['ml.put_calendar_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3477,7 +3795,11 @@ export default class Ml { pathParts: { calendar_id: params.calendar_id, job_id: params.job_id - } + }, + acceptedParams: [ + 'calendar_id', + 'job_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3494,7 +3816,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_data_frame_analytics'] + } = this[kAcceptedParams]['ml.put_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3534,7 +3856,21 @@ export default class Ml { name: 'ml.put_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'allow_lazy_start', + 'analysis', + 'analyzed_fields', + 'description', + 'dest', + 'max_num_threads', + '_meta', + 'model_memory_limit', + 'source', + 'headers', + 'version' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3551,7 +3887,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_datafeed'] + } = this[kAcceptedParams]['ml.put_datafeed'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3591,7 +3927,30 @@ export default class Ml { name: 'ml.put_datafeed', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'aggregations', + 'aggs', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'headers', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3608,7 +3967,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_filter'] + } = this[kAcceptedParams]['ml.put_filter'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3648,7 +4007,12 @@ export default class Ml { name: 'ml.put_filter', pathParts: { filter_id: params.filter_id - } + }, + acceptedParams: [ + 'filter_id', + 'description', + 'items' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3665,7 +4029,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_job'] + } = this[kAcceptedParams]['ml.put_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3705,7 +4069,30 @@ export default class Ml { name: 'ml.put_job', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'allow_lazy_open', + 'analysis_config', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'daily_model_snapshot_retention_after_days', + 'data_description', + 'datafeed_config', + 'description', + 'job_id', + 'groups', + 'model_plot_config', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_index_name', + 'results_retention_days', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3722,7 +4109,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_trained_model'] + } = this[kAcceptedParams]['ml.put_trained_model'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3762,7 +4149,23 @@ export default class Ml { name: 'ml.put_trained_model', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'compressed_definition', + 'definition', + 'description', + 'inference_config', + 'input', + 'metadata', + 'model_type', + 'model_size_bytes', + 'platform_architecture', + 'tags', + 'prefix_strings', + 'defer_definition_decompression', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3777,7 +4180,7 @@ export default class Ml { async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.put_trained_model_alias'] + } = this[kAcceptedParams]['ml.put_trained_model_alias'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3808,7 +4211,12 @@ export default class Ml { pathParts: { model_alias: params.model_alias, model_id: params.model_id - } + }, + acceptedParams: [ + 'model_alias', + 'model_id', + 'reassign' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3825,7 +4233,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_trained_model_definition_part'] + } = this[kAcceptedParams]['ml.put_trained_model_definition_part'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3866,7 +4274,14 @@ export default class Ml { pathParts: { model_id: params.model_id, part: params.part - } + }, + acceptedParams: [ + 'model_id', + 'part', + 'definition', + 'total_definition_length', + 'total_parts' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3883,7 +4298,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.put_trained_model_vocabulary'] + } = this[kAcceptedParams]['ml.put_trained_model_vocabulary'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3923,7 +4338,13 @@ export default class Ml { name: 'ml.put_trained_model_vocabulary', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'vocabulary', + 'merges', + 'scores' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3938,7 +4359,7 @@ export default class Ml { async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.reset_job'] + } = this[kAcceptedParams]['ml.reset_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3968,7 +4389,12 @@ export default class Ml { name: 'ml.reset_job', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'wait_for_completion', + 'delete_user_annotations' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3985,7 +4411,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.revert_model_snapshot'] + } = this[kAcceptedParams]['ml.revert_model_snapshot'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4026,7 +4452,13 @@ export default class Ml { pathParts: { job_id: params.job_id, snapshot_id: params.snapshot_id - } + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'delete_intervening_results', + 'delete_intervening_results' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4041,7 +4473,7 @@ export default class Ml { async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.set_upgrade_mode'] + } = this[kAcceptedParams]['ml.set_upgrade_mode'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4069,7 +4501,11 @@ export default class Ml { const method = 'POST' const path = '/_ml/set_upgrade_mode' const meta: TransportRequestMetadata = { - name: 'ml.set_upgrade_mode' + name: 'ml.set_upgrade_mode', + acceptedParams: [ + 'enabled', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4084,7 +4520,7 @@ export default class Ml { async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.start_data_frame_analytics'] + } = this[kAcceptedParams]['ml.start_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4114,7 +4550,11 @@ export default class Ml { name: 'ml.start_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4131,7 +4571,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.start_datafeed'] + } = this[kAcceptedParams]['ml.start_datafeed'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4171,7 +4611,16 @@ export default class Ml { name: 'ml.start_datafeed', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'end', + 'start', + 'timeout', + 'end', + 'start', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4188,7 +4637,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.start_trained_model_deployment'] + } = this[kAcceptedParams]['ml.start_trained_model_deployment'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4228,7 +4677,19 @@ export default class Ml { name: 'ml.start_trained_model_deployment', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'adaptive_allocations', + 'cache_size', + 'deployment_id', + 'number_of_allocations', + 'priority', + 'queue_capacity', + 'threads_per_allocation', + 'timeout', + 'wait_for' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4243,7 +4704,7 @@ export default class Ml { async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.stop_data_frame_analytics'] + } = this[kAcceptedParams]['ml.stop_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4273,7 +4734,13 @@ export default class Ml { name: 'ml.stop_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'force', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4290,7 +4757,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.stop_datafeed'] + } = this[kAcceptedParams]['ml.stop_datafeed'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4330,7 +4797,16 @@ export default class Ml { name: 'ml.stop_datafeed', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match', + 'force', + 'timeout', + 'allow_no_match', + 'force', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4345,7 +4821,7 @@ export default class Ml { async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.stop_trained_model_deployment'] + } = this[kAcceptedParams]['ml.stop_trained_model_deployment'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4375,7 +4851,12 @@ export default class Ml { name: 'ml.stop_trained_model_deployment', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'force' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4392,7 +4873,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.update_data_frame_analytics'] + } = this[kAcceptedParams]['ml.update_data_frame_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4432,7 +4913,14 @@ export default class Ml { name: 'ml.update_data_frame_analytics', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'allow_lazy_start' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4449,7 +4937,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.update_datafeed'] + } = this[kAcceptedParams]['ml.update_datafeed'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4489,7 +4977,28 @@ export default class Ml { name: 'ml.update_datafeed', pathParts: { datafeed_id: params.datafeed_id - } + }, + acceptedParams: [ + 'datafeed_id', + 'aggregations', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4506,7 +5015,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.update_filter'] + } = this[kAcceptedParams]['ml.update_filter'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4546,7 +5055,13 @@ export default class Ml { name: 'ml.update_filter', pathParts: { filter_id: params.filter_id - } + }, + acceptedParams: [ + 'filter_id', + 'add_items', + 'description', + 'remove_items' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4563,7 +5078,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.update_job'] + } = this[kAcceptedParams]['ml.update_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4603,7 +5118,25 @@ export default class Ml { name: 'ml.update_job', pathParts: { job_id: params.job_id - } + }, + acceptedParams: [ + 'job_id', + 'allow_lazy_open', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'categorization_filters', + 'description', + 'model_plot_config', + 'model_prune_window', + 'daily_model_snapshot_retention_after_days', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_retention_days', + 'groups', + 'detectors', + 'per_partition_categorization' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4620,7 +5153,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.update_model_snapshot'] + } = this[kAcceptedParams]['ml.update_model_snapshot'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4661,7 +5194,13 @@ export default class Ml { pathParts: { job_id: params.job_id, snapshot_id: params.snapshot_id - } + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'description', + 'retain' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4678,7 +5217,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.update_trained_model_deployment'] + } = this[kAcceptedParams]['ml.update_trained_model_deployment'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4718,7 +5257,13 @@ export default class Ml { name: 'ml.update_trained_model_deployment', pathParts: { model_id: params.model_id - } + }, + acceptedParams: [ + 'model_id', + 'number_of_allocations', + 'adaptive_allocations', + 'number_of_allocations' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4733,7 +5278,7 @@ export default class Ml { async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ml.upgrade_job_snapshot'] + } = this[kAcceptedParams]['ml.upgrade_job_snapshot'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4764,7 +5309,13 @@ export default class Ml { pathParts: { job_id: params.job_id, snapshot_id: params.snapshot_id - } + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'wait_for_completion', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4781,7 +5332,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.validate'] + } = this[kAcceptedParams]['ml.validate'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4819,7 +5370,18 @@ export default class Ml { const method = 'POST' const path = '/_ml/anomaly_detectors/_validate' const meta: TransportRequestMetadata = { - name: 'ml.validate' + name: 'ml.validate', + acceptedParams: [ + 'job_id', + 'analysis_config', + 'analysis_limits', + 'data_description', + 'description', + 'model_plot', + 'model_snapshot_id', + 'model_snapshot_retention_days', + 'results_index_name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -4836,7 +5398,7 @@ export default class Ml { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['ml.validate_detector'] + } = this[kAcceptedParams]['ml.validate_detector'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -4863,7 +5425,10 @@ export default class Ml { const method = 'POST' const path = '/_ml/anomaly_detectors/_validate/detector' const meta: TransportRequestMetadata = { - name: 'ml.validate_detector' + name: 'ml.validate_detector', + acceptedParams: [ + 'detector' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 8974e0c87..f8abf74f6 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Monitoring { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'monitoring.bulk': { path: [ 'type' @@ -63,7 +64,7 @@ export default class Monitoring { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['monitoring.bulk'] + } = this[kAcceptedParams]['monitoring.bulk'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +94,14 @@ export default class Monitoring { name: 'monitoring.bulk', pathParts: { type: params.type - } + }, + acceptedParams: [ + 'type', + 'operations', + 'system_id', + 'system_api_version', + 'interval' + ] } return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 6a0d5b078..d508b62b5 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -47,6 +47,7 @@ const acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Nodes { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'nodes.clear_repositories_metering_archive': { path: [ 'node_id', @@ -129,7 +130,7 @@ export default class Nodes { async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['nodes.clear_repositories_metering_archive'] + } = this[kAcceptedParams]['nodes.clear_repositories_metering_archive'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -160,7 +161,11 @@ export default class Nodes { pathParts: { node_id: params.node_id, max_archive_version: params.max_archive_version - } + }, + acceptedParams: [ + 'node_id', + 'max_archive_version' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -175,7 +180,7 @@ export default class Nodes { async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['nodes.get_repositories_metering_info'] + } = this[kAcceptedParams]['nodes.get_repositories_metering_info'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -205,7 +210,10 @@ export default class Nodes { name: 'nodes.get_repositories_metering_info', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -220,7 +228,7 @@ export default class Nodes { async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['nodes.hot_threads'] + } = this[kAcceptedParams]['nodes.hot_threads'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -258,7 +266,17 @@ export default class Nodes { name: 'nodes.hot_threads', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'ignore_idle_threads', + 'interval', + 'snapshots', + 'threads', + 'timeout', + 'type', + 'sort' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -273,7 +291,7 @@ export default class Nodes { async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['nodes.info'] + } = this[kAcceptedParams]['nodes.info'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -318,7 +336,13 @@ export default class Nodes { pathParts: { node_id: params.node_id, metric: params.metric - } + }, + acceptedParams: [ + 'node_id', + 'metric', + 'flat_settings', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -335,7 +359,7 @@ export default class Nodes { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['nodes.reload_secure_settings'] + } = this[kAcceptedParams]['nodes.reload_secure_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -383,7 +407,12 @@ export default class Nodes { name: 'nodes.reload_secure_settings', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'secure_settings_password', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -398,7 +427,7 @@ export default class Nodes { async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['nodes.stats'] + } = this[kAcceptedParams]['nodes.stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -450,7 +479,21 @@ export default class Nodes { node_id: params.node_id, metric: params.metric, index_metric: params.index_metric - } + }, + acceptedParams: [ + 'node_id', + 'metric', + 'index_metric', + 'completion_fields', + 'fielddata_fields', + 'fields', + 'groups', + 'include_segment_file_sizes', + 'level', + 'timeout', + 'types', + 'include_unloaded_segments' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -465,7 +508,7 @@ export default class Nodes { async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['nodes.usage'] + } = this[kAcceptedParams]['nodes.usage'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -510,7 +553,12 @@ export default class Nodes { pathParts: { node_id: params.node_id, metric: params.metric - } + }, + acceptedParams: [ + 'node_id', + 'metric', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index a93f89d65..1a863b1e6 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -40,6 +40,7 @@ const acceptedParams: Record + [kAcceptedParams]: Record } export default class Profiling { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'profiling.flamegraph': { path: [], body: [], @@ -66,7 +67,7 @@ export default class Profiling { async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['profiling.flamegraph'] + } = this[kAcceptedParams]['profiling.flamegraph'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -93,7 +94,9 @@ export default class Profiling { const method = 'POST' const path = '/_profiling/flamegraph' const meta: TransportRequestMetadata = { - name: 'profiling.flamegraph' + name: 'profiling.flamegraph', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -108,7 +111,7 @@ export default class Profiling { async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['profiling.stacktraces'] + } = this[kAcceptedParams]['profiling.stacktraces'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,7 +138,9 @@ export default class Profiling { const method = 'POST' const path = '/_profiling/stacktraces' const meta: TransportRequestMetadata = { - name: 'profiling.stacktraces' + name: 'profiling.stacktraces', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -150,7 +155,7 @@ export default class Profiling { async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['profiling.status'] + } = this[kAcceptedParams]['profiling.status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -177,7 +182,9 @@ export default class Profiling { const method = 'GET' const path = '/_profiling/status' const meta: TransportRequestMetadata = { - name: 'profiling.status' + name: 'profiling.status', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -192,7 +199,7 @@ export default class Profiling { async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['profiling.topn_functions'] + } = this[kAcceptedParams]['profiling.topn_functions'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -219,7 +226,9 @@ export default class Profiling { const method = 'POST' const path = '/_profiling/topn/functions' const meta: TransportRequestMetadata = { - name: 'profiling.topn_functions' + name: 'profiling.topn_functions', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/project.ts b/src/api/api/project.ts new file mode 100644 index 000000000..e8717a7a4 --- /dev/null +++ b/src/api/api/project.ts @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Project { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'project.tags': { + path: [], + body: [], + query: [] + } + } + } + + /** + * Return tags defined for the project + */ + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptions): Promise + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['project.tags'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_project/tags' + const meta: TransportRequestMetadata = { + name: 'project.tags', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 6762be248..0b62ea3b4 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -105,7 +105,15 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque pathParts: { id: params.id, context: params.context - } + }, + acceptedParams: [ + 'id', + 'context', + 'script', + 'context', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index ba218714f..cbde4cf9e 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class QueryRules { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'query_rules.delete_rule': { path: [ 'ruleset_id', @@ -117,7 +118,7 @@ export default class QueryRules { async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['query_rules.delete_rule'] + } = this[kAcceptedParams]['query_rules.delete_rule'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -148,7 +149,11 @@ export default class QueryRules { pathParts: { ruleset_id: params.ruleset_id, rule_id: params.rule_id - } + }, + acceptedParams: [ + 'ruleset_id', + 'rule_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -163,7 +168,7 @@ export default class QueryRules { async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['query_rules.delete_ruleset'] + } = this[kAcceptedParams]['query_rules.delete_ruleset'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -193,7 +198,10 @@ export default class QueryRules { name: 'query_rules.delete_ruleset', pathParts: { ruleset_id: params.ruleset_id - } + }, + acceptedParams: [ + 'ruleset_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -208,7 +216,7 @@ export default class QueryRules { async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['query_rules.get_rule'] + } = this[kAcceptedParams]['query_rules.get_rule'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -239,7 +247,11 @@ export default class QueryRules { pathParts: { ruleset_id: params.ruleset_id, rule_id: params.rule_id - } + }, + acceptedParams: [ + 'ruleset_id', + 'rule_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -254,7 +266,7 @@ export default class QueryRules { async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['query_rules.get_ruleset'] + } = this[kAcceptedParams]['query_rules.get_ruleset'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,7 +296,10 @@ export default class QueryRules { name: 'query_rules.get_ruleset', pathParts: { ruleset_id: params.ruleset_id - } + }, + acceptedParams: [ + 'ruleset_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -299,7 +314,7 @@ export default class QueryRules { async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['query_rules.list_rulesets'] + } = this[kAcceptedParams]['query_rules.list_rulesets'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -327,7 +342,11 @@ export default class QueryRules { const method = 'GET' const path = '/_query_rules' const meta: TransportRequestMetadata = { - name: 'query_rules.list_rulesets' + name: 'query_rules.list_rulesets', + acceptedParams: [ + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -344,7 +363,7 @@ export default class QueryRules { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['query_rules.put_rule'] + } = this[kAcceptedParams]['query_rules.put_rule'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -385,7 +404,15 @@ export default class QueryRules { pathParts: { ruleset_id: params.ruleset_id, rule_id: params.rule_id - } + }, + acceptedParams: [ + 'ruleset_id', + 'rule_id', + 'type', + 'criteria', + 'actions', + 'priority' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -402,7 +429,7 @@ export default class QueryRules { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['query_rules.put_ruleset'] + } = this[kAcceptedParams]['query_rules.put_ruleset'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -442,7 +469,11 @@ export default class QueryRules { name: 'query_rules.put_ruleset', pathParts: { ruleset_id: params.ruleset_id - } + }, + acceptedParams: [ + 'ruleset_id', + 'rules' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -459,7 +490,7 @@ export default class QueryRules { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['query_rules.test'] + } = this[kAcceptedParams]['query_rules.test'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -499,7 +530,11 @@ export default class QueryRules { name: 'query_rules.test', pathParts: { ruleset_id: params.ruleset_id - } + }, + acceptedParams: [ + 'ruleset_id', + 'match_criteria' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index cd7773c31..1ddb4082a 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -105,7 +105,16 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest name: 'rank_eval', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'requests', + 'metric', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'search_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 66f6aec87..c6f7188ac 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -102,7 +102,24 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, const method = 'POST' const path = '/_reindex' const meta: TransportRequestMetadata = { - name: 'reindex' + name: 'reindex', + acceptedParams: [ + 'conflicts', + 'dest', + 'max_docs', + 'script', + 'size', + 'source', + 'refresh', + 'requests_per_second', + 'scroll', + 'slices', + 'max_docs', + 'timeout', + 'wait_for_active_shards', + 'wait_for_completion', + 'require_alias' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 9c6b73a92..8ef9381e2 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -78,7 +78,11 @@ export default async function ReindexRethrottleApi (this: That, params: T.Reinde name: 'reindex_rethrottle', pathParts: { task_id: params.task_id - } + }, + acceptedParams: [ + 'task_id', + 'requests_per_second' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index b08178668..4b16d3248 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -101,7 +101,14 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re name: 'render_search_template', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'id', + 'file', + 'params', + 'source' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index bc6df9264..4f216dc29 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Rollup { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'rollup.delete_job': { path: [ 'id' @@ -124,7 +125,7 @@ export default class Rollup { async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['rollup.delete_job'] + } = this[kAcceptedParams]['rollup.delete_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -154,7 +155,10 @@ export default class Rollup { name: 'rollup.delete_job', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -169,7 +173,7 @@ export default class Rollup { async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['rollup.get_jobs'] + } = this[kAcceptedParams]['rollup.get_jobs'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -207,7 +211,10 @@ export default class Rollup { name: 'rollup.get_jobs', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -222,7 +229,7 @@ export default class Rollup { async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['rollup.get_rollup_caps'] + } = this[kAcceptedParams]['rollup.get_rollup_caps'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -260,7 +267,10 @@ export default class Rollup { name: 'rollup.get_rollup_caps', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -275,7 +285,7 @@ export default class Rollup { async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['rollup.get_rollup_index_caps'] + } = this[kAcceptedParams]['rollup.get_rollup_index_caps'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -305,7 +315,10 @@ export default class Rollup { name: 'rollup.get_rollup_index_caps', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -322,7 +335,7 @@ export default class Rollup { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['rollup.put_job'] + } = this[kAcceptedParams]['rollup.put_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -362,7 +375,18 @@ export default class Rollup { name: 'rollup.put_job', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'cron', + 'groups', + 'index_pattern', + 'metrics', + 'page_size', + 'rollup_index', + 'timeout', + 'headers' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -379,7 +403,7 @@ export default class Rollup { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['rollup.rollup_search'] + } = this[kAcceptedParams]['rollup.rollup_search'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -419,7 +443,16 @@ export default class Rollup { name: 'rollup.rollup_search', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'aggregations', + 'aggs', + 'query', + 'size', + 'rest_total_hits_as_int', + 'typed_keys' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -434,7 +467,7 @@ export default class Rollup { async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['rollup.start_job'] + } = this[kAcceptedParams]['rollup.start_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -464,7 +497,10 @@ export default class Rollup { name: 'rollup.start_job', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -479,7 +515,7 @@ export default class Rollup { async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['rollup.stop_job'] + } = this[kAcceptedParams]['rollup.stop_job'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -509,7 +545,12 @@ export default class Rollup { name: 'rollup.stop_job', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index 1d524bc02..c8d3f3a1c 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -90,7 +90,12 @@ export default async function ScriptsPainlessExecuteApi (this const method = body != null ? 'POST' : 'GET' const path = '/_scripts/painless/_execute' const meta: TransportRequestMetadata = { - name: 'scripts_painless_execute' + name: 'scripts_painless_execute', + acceptedParams: [ + 'context', + 'context_setup', + 'script' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 2b31642cb..a2f1966d6 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -95,7 +95,15 @@ export default async function ScrollApi + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchApplication { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'search_application.delete': { path: [ 'name' @@ -135,7 +136,7 @@ export default class SearchApplication { async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['search_application.delete'] + } = this[kAcceptedParams]['search_application.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,7 +166,10 @@ export default class SearchApplication { name: 'search_application.delete', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -180,7 +184,7 @@ export default class SearchApplication { async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['search_application.delete_behavioral_analytics'] + } = this[kAcceptedParams]['search_application.delete_behavioral_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -210,7 +214,10 @@ export default class SearchApplication { name: 'search_application.delete_behavioral_analytics', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -225,7 +232,7 @@ export default class SearchApplication { async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['search_application.get'] + } = this[kAcceptedParams]['search_application.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -255,7 +262,10 @@ export default class SearchApplication { name: 'search_application.get', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -270,7 +280,7 @@ export default class SearchApplication { async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['search_application.get_behavioral_analytics'] + } = this[kAcceptedParams]['search_application.get_behavioral_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -308,7 +318,10 @@ export default class SearchApplication { name: 'search_application.get_behavioral_analytics', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -323,7 +336,7 @@ export default class SearchApplication { async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['search_application.list'] + } = this[kAcceptedParams]['search_application.list'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -351,7 +364,12 @@ export default class SearchApplication { const method = 'GET' const path = '/_application/search_application' const meta: TransportRequestMetadata = { - name: 'search_application.list' + name: 'search_application.list', + acceptedParams: [ + 'q', + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -368,7 +386,7 @@ export default class SearchApplication { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['search_application.post_behavioral_analytics_event'] + } = this[kAcceptedParams]['search_application.post_behavioral_analytics_event'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,7 +417,13 @@ export default class SearchApplication { pathParts: { collection_name: params.collection_name, event_type: params.event_type - } + }, + acceptedParams: [ + 'collection_name', + 'event_type', + 'payload', + 'debug' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -416,7 +440,7 @@ export default class SearchApplication { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['search_application.put'] + } = this[kAcceptedParams]['search_application.put'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -446,7 +470,12 @@ export default class SearchApplication { name: 'search_application.put', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'search_application', + 'create' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -461,7 +490,7 @@ export default class SearchApplication { async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['search_application.put_behavioral_analytics'] + } = this[kAcceptedParams]['search_application.put_behavioral_analytics'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -491,7 +520,10 @@ export default class SearchApplication { name: 'search_application.put_behavioral_analytics', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -508,7 +540,7 @@ export default class SearchApplication { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['search_application.render_query'] + } = this[kAcceptedParams]['search_application.render_query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -548,7 +580,11 @@ export default class SearchApplication { name: 'search_application.render_query', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'params' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -565,7 +601,7 @@ export default class SearchApplication { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['search_application.search'] + } = this[kAcceptedParams]['search_application.search'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -605,7 +641,12 @@ export default class SearchApplication { name: 'search_application.search', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'params', + 'typed_keys' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index b8e5e22a2..71d524f2c 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -59,6 +59,7 @@ const acceptedParams: Record (this: That name: 'search_template', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'explain', + 'id', + 'params', + 'profile', + 'source', + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'preference', + 'profile', + 'project_routing', + 'routing', + 'scroll', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 4342c20e6..0486d7ec1 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchableSnapshots { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'searchable_snapshots.cache_stats': { path: [ 'node_id' @@ -94,7 +95,7 @@ export default class SearchableSnapshots { async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['searchable_snapshots.cache_stats'] + } = this[kAcceptedParams]['searchable_snapshots.cache_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -132,7 +133,11 @@ export default class SearchableSnapshots { name: 'searchable_snapshots.cache_stats', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -147,7 +152,7 @@ export default class SearchableSnapshots { async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['searchable_snapshots.clear_cache'] + } = this[kAcceptedParams]['searchable_snapshots.clear_cache'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,7 +190,13 @@ export default class SearchableSnapshots { name: 'searchable_snapshots.clear_cache', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'expand_wildcards', + 'allow_no_indices', + 'ignore_unavailable' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -202,7 +213,7 @@ export default class SearchableSnapshots { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['searchable_snapshots.mount'] + } = this[kAcceptedParams]['searchable_snapshots.mount'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -243,7 +254,18 @@ export default class SearchableSnapshots { pathParts: { repository: params.repository, snapshot: params.snapshot - } + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'index', + 'renamed_index', + 'index_settings', + 'ignore_index_settings', + 'master_timeout', + 'wait_for_completion', + 'storage' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -258,7 +280,7 @@ export default class SearchableSnapshots { async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['searchable_snapshots.stats'] + } = this[kAcceptedParams]['searchable_snapshots.stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -296,7 +318,11 @@ export default class SearchableSnapshots { name: 'searchable_snapshots.stats', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'level' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 2d2236a09..6675abbd2 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Security { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'security.activate_user_profile': { path: [], body: [ @@ -327,6 +328,11 @@ export default class Security { 'master_timeout' ] }, + 'security.get_stats': { + path: [], + body: [], + query: [] + }, 'security.get_token': { path: [], body: [ @@ -675,7 +681,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.activate_user_profile'] + } = this[kAcceptedParams]['security.activate_user_profile'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -712,7 +718,13 @@ export default class Security { const method = 'POST' const path = '/_security/profile/_activate' const meta: TransportRequestMetadata = { - name: 'security.activate_user_profile' + name: 'security.activate_user_profile', + acceptedParams: [ + 'access_token', + 'grant_type', + 'password', + 'username' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -727,7 +739,7 @@ export default class Security { async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.authenticate'] + } = this[kAcceptedParams]['security.authenticate'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -755,7 +767,9 @@ export default class Security { const method = 'GET' const path = '/_security/_authenticate' const meta: TransportRequestMetadata = { - name: 'security.authenticate' + name: 'security.authenticate', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -772,7 +786,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.bulk_delete_role'] + } = this[kAcceptedParams]['security.bulk_delete_role'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -809,7 +823,11 @@ export default class Security { const method = 'DELETE' const path = '/_security/role' const meta: TransportRequestMetadata = { - name: 'security.bulk_delete_role' + name: 'security.bulk_delete_role', + acceptedParams: [ + 'names', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -826,7 +844,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.bulk_put_role'] + } = this[kAcceptedParams]['security.bulk_put_role'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -863,7 +881,11 @@ export default class Security { const method = 'POST' const path = '/_security/role' const meta: TransportRequestMetadata = { - name: 'security.bulk_put_role' + name: 'security.bulk_put_role', + acceptedParams: [ + 'roles', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -880,7 +902,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.bulk_update_api_keys'] + } = this[kAcceptedParams]['security.bulk_update_api_keys'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -917,7 +939,13 @@ export default class Security { const method = 'POST' const path = '/_security/api_key/_bulk_update' const meta: TransportRequestMetadata = { - name: 'security.bulk_update_api_keys' + name: 'security.bulk_update_api_keys', + acceptedParams: [ + 'expiration', + 'ids', + 'metadata', + 'role_descriptors' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -934,7 +962,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.change_password'] + } = this[kAcceptedParams]['security.change_password'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -982,7 +1010,13 @@ export default class Security { name: 'security.change_password', pathParts: { username: params.username - } + }, + acceptedParams: [ + 'username', + 'password', + 'password_hash', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -997,7 +1031,7 @@ export default class Security { async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.clear_api_key_cache'] + } = this[kAcceptedParams]['security.clear_api_key_cache'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1027,7 +1061,10 @@ export default class Security { name: 'security.clear_api_key_cache', pathParts: { ids: params.ids - } + }, + acceptedParams: [ + 'ids' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1042,7 +1079,7 @@ export default class Security { async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.clear_cached_privileges'] + } = this[kAcceptedParams]['security.clear_cached_privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1072,7 +1109,10 @@ export default class Security { name: 'security.clear_cached_privileges', pathParts: { application: params.application - } + }, + acceptedParams: [ + 'application' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1087,7 +1127,7 @@ export default class Security { async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.clear_cached_realms'] + } = this[kAcceptedParams]['security.clear_cached_realms'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1117,7 +1157,11 @@ export default class Security { name: 'security.clear_cached_realms', pathParts: { realms: params.realms - } + }, + acceptedParams: [ + 'realms', + 'usernames' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1132,7 +1176,7 @@ export default class Security { async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.clear_cached_roles'] + } = this[kAcceptedParams]['security.clear_cached_roles'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1162,7 +1206,10 @@ export default class Security { name: 'security.clear_cached_roles', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1177,7 +1224,7 @@ export default class Security { async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.clear_cached_service_tokens'] + } = this[kAcceptedParams]['security.clear_cached_service_tokens'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1209,7 +1256,12 @@ export default class Security { namespace: params.namespace, service: params.service, name: params.name - } + }, + acceptedParams: [ + 'namespace', + 'service', + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1226,7 +1278,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.create_api_key'] + } = this[kAcceptedParams]['security.create_api_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1264,7 +1316,14 @@ export default class Security { const method = 'PUT' const path = '/_security/api_key' const meta: TransportRequestMetadata = { - name: 'security.create_api_key' + name: 'security.create_api_key', + acceptedParams: [ + 'expiration', + 'name', + 'role_descriptors', + 'metadata', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1281,7 +1340,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.create_cross_cluster_api_key'] + } = this[kAcceptedParams]['security.create_cross_cluster_api_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1318,7 +1377,13 @@ export default class Security { const method = 'POST' const path = '/_security/cross_cluster/api_key' const meta: TransportRequestMetadata = { - name: 'security.create_cross_cluster_api_key' + name: 'security.create_cross_cluster_api_key', + acceptedParams: [ + 'access', + 'expiration', + 'metadata', + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1333,7 +1398,7 @@ export default class Security { async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.create_service_token'] + } = this[kAcceptedParams]['security.create_service_token'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1372,7 +1437,13 @@ export default class Security { namespace: params.namespace, service: params.service, name: params.name - } + }, + acceptedParams: [ + 'namespace', + 'service', + 'name', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1389,7 +1460,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.delegate_pki'] + } = this[kAcceptedParams]['security.delegate_pki'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1426,7 +1497,10 @@ export default class Security { const method = 'POST' const path = '/_security/delegate_pki' const meta: TransportRequestMetadata = { - name: 'security.delegate_pki' + name: 'security.delegate_pki', + acceptedParams: [ + 'x509_certificate_chain' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1441,7 +1515,7 @@ export default class Security { async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.delete_privileges'] + } = this[kAcceptedParams]['security.delete_privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1472,7 +1546,12 @@ export default class Security { pathParts: { application: params.application, name: params.name - } + }, + acceptedParams: [ + 'application', + 'name', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1487,7 +1566,7 @@ export default class Security { async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.delete_role'] + } = this[kAcceptedParams]['security.delete_role'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1517,7 +1596,11 @@ export default class Security { name: 'security.delete_role', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1532,7 +1615,7 @@ export default class Security { async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.delete_role_mapping'] + } = this[kAcceptedParams]['security.delete_role_mapping'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1562,7 +1645,11 @@ export default class Security { name: 'security.delete_role_mapping', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1577,7 +1664,7 @@ export default class Security { async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.delete_service_token'] + } = this[kAcceptedParams]['security.delete_service_token'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1609,7 +1696,13 @@ export default class Security { namespace: params.namespace, service: params.service, name: params.name - } + }, + acceptedParams: [ + 'namespace', + 'service', + 'name', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1624,7 +1717,7 @@ export default class Security { async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.delete_user'] + } = this[kAcceptedParams]['security.delete_user'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1654,7 +1747,11 @@ export default class Security { name: 'security.delete_user', pathParts: { username: params.username - } + }, + acceptedParams: [ + 'username', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1669,7 +1766,7 @@ export default class Security { async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.disable_user'] + } = this[kAcceptedParams]['security.disable_user'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1699,7 +1796,11 @@ export default class Security { name: 'security.disable_user', pathParts: { username: params.username - } + }, + acceptedParams: [ + 'username', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1714,7 +1815,7 @@ export default class Security { async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.disable_user_profile'] + } = this[kAcceptedParams]['security.disable_user_profile'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1744,7 +1845,11 @@ export default class Security { name: 'security.disable_user_profile', pathParts: { uid: params.uid - } + }, + acceptedParams: [ + 'uid', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1759,7 +1864,7 @@ export default class Security { async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.enable_user'] + } = this[kAcceptedParams]['security.enable_user'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1789,7 +1894,11 @@ export default class Security { name: 'security.enable_user', pathParts: { username: params.username - } + }, + acceptedParams: [ + 'username', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1804,7 +1913,7 @@ export default class Security { async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.enable_user_profile'] + } = this[kAcceptedParams]['security.enable_user_profile'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1834,7 +1943,11 @@ export default class Security { name: 'security.enable_user_profile', pathParts: { uid: params.uid - } + }, + acceptedParams: [ + 'uid', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1849,7 +1962,7 @@ export default class Security { async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.enroll_kibana'] + } = this[kAcceptedParams]['security.enroll_kibana'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1877,7 +1990,9 @@ export default class Security { const method = 'GET' const path = '/_security/enroll/kibana' const meta: TransportRequestMetadata = { - name: 'security.enroll_kibana' + name: 'security.enroll_kibana', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1892,7 +2007,7 @@ export default class Security { async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.enroll_node'] + } = this[kAcceptedParams]['security.enroll_node'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1920,7 +2035,9 @@ export default class Security { const method = 'GET' const path = '/_security/enroll/node' const meta: TransportRequestMetadata = { - name: 'security.enroll_node' + name: 'security.enroll_node', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1935,7 +2052,7 @@ export default class Security { async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_api_key'] + } = this[kAcceptedParams]['security.get_api_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1963,7 +2080,17 @@ export default class Security { const method = 'GET' const path = '/_security/api_key' const meta: TransportRequestMetadata = { - name: 'security.get_api_key' + name: 'security.get_api_key', + acceptedParams: [ + 'id', + 'name', + 'owner', + 'realm_name', + 'username', + 'with_limited_by', + 'active_only', + 'with_profile_uid' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -1978,7 +2105,7 @@ export default class Security { async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_builtin_privileges'] + } = this[kAcceptedParams]['security.get_builtin_privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2006,7 +2133,9 @@ export default class Security { const method = 'GET' const path = '/_security/privilege/_builtin' const meta: TransportRequestMetadata = { - name: 'security.get_builtin_privileges' + name: 'security.get_builtin_privileges', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2021,7 +2150,7 @@ export default class Security { async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_privileges'] + } = this[kAcceptedParams]['security.get_privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2063,7 +2192,11 @@ export default class Security { pathParts: { application: params.application, name: params.name - } + }, + acceptedParams: [ + 'application', + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2078,7 +2211,7 @@ export default class Security { async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_role'] + } = this[kAcceptedParams]['security.get_role'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2116,7 +2249,10 @@ export default class Security { name: 'security.get_role', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2131,7 +2267,7 @@ export default class Security { async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_role_mapping'] + } = this[kAcceptedParams]['security.get_role_mapping'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2169,7 +2305,10 @@ export default class Security { name: 'security.get_role_mapping', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2184,7 +2323,7 @@ export default class Security { async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_service_accounts'] + } = this[kAcceptedParams]['security.get_service_accounts'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2226,7 +2365,11 @@ export default class Security { pathParts: { namespace: params.namespace, service: params.service - } + }, + acceptedParams: [ + 'namespace', + 'service' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2241,7 +2384,7 @@ export default class Security { async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_service_credentials'] + } = this[kAcceptedParams]['security.get_service_credentials'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2272,7 +2415,11 @@ export default class Security { pathParts: { namespace: params.namespace, service: params.service - } + }, + acceptedParams: [ + 'namespace', + 'service' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2287,7 +2434,7 @@ export default class Security { async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_settings'] + } = this[kAcceptedParams]['security.get_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2315,7 +2462,54 @@ export default class Security { const method = 'GET' const path = '/_security/settings' const meta: TransportRequestMetadata = { - name: 'security.get_settings' + name: 'security.get_settings', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get security statistics for all nodes + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats | Elasticsearch API documentation} + */ + async getStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/stats' + const meta: TransportRequestMetadata = { + name: 'security.get_stats', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2332,7 +2526,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.get_token'] + } = this[kAcceptedParams]['security.get_token'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2370,7 +2564,15 @@ export default class Security { const method = 'POST' const path = '/_security/oauth2/token' const meta: TransportRequestMetadata = { - name: 'security.get_token' + name: 'security.get_token', + acceptedParams: [ + 'grant_type', + 'scope', + 'password', + 'kerberos_ticket', + 'refresh_token', + 'username' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2385,7 +2587,7 @@ export default class Security { async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_user'] + } = this[kAcceptedParams]['security.get_user'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2423,7 +2625,11 @@ export default class Security { name: 'security.get_user', pathParts: { username: params.username - } + }, + acceptedParams: [ + 'username', + 'with_profile_uid' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2438,7 +2644,7 @@ export default class Security { async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_user_privileges'] + } = this[kAcceptedParams]['security.get_user_privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2466,7 +2672,9 @@ export default class Security { const method = 'GET' const path = '/_security/user/_privileges' const meta: TransportRequestMetadata = { - name: 'security.get_user_privileges' + name: 'security.get_user_privileges', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2481,7 +2689,7 @@ export default class Security { async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.get_user_profile'] + } = this[kAcceptedParams]['security.get_user_profile'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2511,7 +2719,11 @@ export default class Security { name: 'security.get_user_profile', pathParts: { uid: params.uid - } + }, + acceptedParams: [ + 'uid', + 'data' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2528,7 +2740,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.grant_api_key'] + } = this[kAcceptedParams]['security.grant_api_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2565,7 +2777,16 @@ export default class Security { const method = 'POST' const path = '/_security/api_key/grant' const meta: TransportRequestMetadata = { - name: 'security.grant_api_key' + name: 'security.grant_api_key', + acceptedParams: [ + 'api_key', + 'grant_type', + 'access_token', + 'username', + 'password', + 'run_as', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2582,7 +2803,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.has_privileges'] + } = this[kAcceptedParams]['security.has_privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2630,7 +2851,13 @@ export default class Security { name: 'security.has_privileges', pathParts: { user: params.user - } + }, + acceptedParams: [ + 'user', + 'application', + 'cluster', + 'index' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2647,7 +2874,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.has_privileges_user_profile'] + } = this[kAcceptedParams]['security.has_privileges_user_profile'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2684,7 +2911,11 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/profile/_has_privileges' const meta: TransportRequestMetadata = { - name: 'security.has_privileges_user_profile' + name: 'security.has_privileges_user_profile', + acceptedParams: [ + 'uids', + 'privileges' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2701,7 +2932,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.invalidate_api_key'] + } = this[kAcceptedParams]['security.invalidate_api_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2739,7 +2970,15 @@ export default class Security { const method = 'DELETE' const path = '/_security/api_key' const meta: TransportRequestMetadata = { - name: 'security.invalidate_api_key' + name: 'security.invalidate_api_key', + acceptedParams: [ + 'id', + 'ids', + 'name', + 'owner', + 'realm_name', + 'username' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2756,7 +2995,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.invalidate_token'] + } = this[kAcceptedParams]['security.invalidate_token'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2794,7 +3033,13 @@ export default class Security { const method = 'DELETE' const path = '/_security/oauth2/token' const meta: TransportRequestMetadata = { - name: 'security.invalidate_token' + name: 'security.invalidate_token', + acceptedParams: [ + 'token', + 'refresh_token', + 'realm_name', + 'username' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2811,7 +3056,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.oidc_authenticate'] + } = this[kAcceptedParams]['security.oidc_authenticate'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2848,7 +3093,13 @@ export default class Security { const method = 'POST' const path = '/_security/oidc/authenticate' const meta: TransportRequestMetadata = { - name: 'security.oidc_authenticate' + name: 'security.oidc_authenticate', + acceptedParams: [ + 'nonce', + 'realm', + 'redirect_uri', + 'state' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2865,7 +3116,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.oidc_logout'] + } = this[kAcceptedParams]['security.oidc_logout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2902,7 +3153,11 @@ export default class Security { const method = 'POST' const path = '/_security/oidc/logout' const meta: TransportRequestMetadata = { - name: 'security.oidc_logout' + name: 'security.oidc_logout', + acceptedParams: [ + 'token', + 'refresh_token' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2919,7 +3174,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.oidc_prepare_authentication'] + } = this[kAcceptedParams]['security.oidc_prepare_authentication'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2957,7 +3212,14 @@ export default class Security { const method = 'POST' const path = '/_security/oidc/prepare' const meta: TransportRequestMetadata = { - name: 'security.oidc_prepare_authentication' + name: 'security.oidc_prepare_authentication', + acceptedParams: [ + 'iss', + 'login_hint', + 'nonce', + 'realm', + 'state' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -2974,7 +3236,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.put_privileges'] + } = this[kAcceptedParams]['security.put_privileges'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3001,7 +3263,11 @@ export default class Security { const method = 'PUT' const path = '/_security/privilege' const meta: TransportRequestMetadata = { - name: 'security.put_privileges' + name: 'security.put_privileges', + acceptedParams: [ + 'privileges', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3018,7 +3284,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.put_role'] + } = this[kAcceptedParams]['security.put_role'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3058,7 +3324,21 @@ export default class Security { name: 'security.put_role', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'applications', + 'cluster', + 'global', + 'indices', + 'remote_indices', + 'remote_cluster', + 'metadata', + 'run_as', + 'description', + 'transient_metadata', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3075,7 +3355,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.put_role_mapping'] + } = this[kAcceptedParams]['security.put_role_mapping'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3115,7 +3395,17 @@ export default class Security { name: 'security.put_role_mapping', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'name', + 'enabled', + 'metadata', + 'roles', + 'role_templates', + 'rules', + 'run_as', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3132,7 +3422,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.put_user'] + } = this[kAcceptedParams]['security.put_user'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3172,7 +3462,19 @@ export default class Security { name: 'security.put_user', pathParts: { username: params.username - } + }, + acceptedParams: [ + 'username', + 'username', + 'email', + 'full_name', + 'metadata', + 'password', + 'password_hash', + 'roles', + 'enabled', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3189,7 +3491,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.query_api_keys'] + } = this[kAcceptedParams]['security.query_api_keys'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3227,7 +3529,19 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/_query/api_key' const meta: TransportRequestMetadata = { - name: 'security.query_api_keys' + name: 'security.query_api_keys', + acceptedParams: [ + 'aggregations', + 'aggs', + 'query', + 'from', + 'sort', + 'size', + 'search_after', + 'with_limited_by', + 'with_profile_uid', + 'typed_keys' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3244,7 +3558,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.query_role'] + } = this[kAcceptedParams]['security.query_role'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3282,7 +3596,14 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/_query/role' const meta: TransportRequestMetadata = { - name: 'security.query_role' + name: 'security.query_role', + acceptedParams: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3299,7 +3620,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.query_user'] + } = this[kAcceptedParams]['security.query_user'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3337,7 +3658,15 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/_query/user' const meta: TransportRequestMetadata = { - name: 'security.query_user' + name: 'security.query_user', + acceptedParams: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after', + 'with_profile_uid' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3354,7 +3683,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.saml_authenticate'] + } = this[kAcceptedParams]['security.saml_authenticate'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3391,7 +3720,12 @@ export default class Security { const method = 'POST' const path = '/_security/saml/authenticate' const meta: TransportRequestMetadata = { - name: 'security.saml_authenticate' + name: 'security.saml_authenticate', + acceptedParams: [ + 'content', + 'ids', + 'realm' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3408,7 +3742,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.saml_complete_logout'] + } = this[kAcceptedParams]['security.saml_complete_logout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3445,7 +3779,13 @@ export default class Security { const method = 'POST' const path = '/_security/saml/complete_logout' const meta: TransportRequestMetadata = { - name: 'security.saml_complete_logout' + name: 'security.saml_complete_logout', + acceptedParams: [ + 'realm', + 'ids', + 'query_string', + 'content' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3462,7 +3802,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.saml_invalidate'] + } = this[kAcceptedParams]['security.saml_invalidate'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3499,7 +3839,12 @@ export default class Security { const method = 'POST' const path = '/_security/saml/invalidate' const meta: TransportRequestMetadata = { - name: 'security.saml_invalidate' + name: 'security.saml_invalidate', + acceptedParams: [ + 'acs', + 'query_string', + 'realm' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3516,7 +3861,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.saml_logout'] + } = this[kAcceptedParams]['security.saml_logout'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3553,7 +3898,11 @@ export default class Security { const method = 'POST' const path = '/_security/saml/logout' const meta: TransportRequestMetadata = { - name: 'security.saml_logout' + name: 'security.saml_logout', + acceptedParams: [ + 'token', + 'refresh_token' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3570,7 +3919,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.saml_prepare_authentication'] + } = this[kAcceptedParams]['security.saml_prepare_authentication'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3608,7 +3957,12 @@ export default class Security { const method = 'POST' const path = '/_security/saml/prepare' const meta: TransportRequestMetadata = { - name: 'security.saml_prepare_authentication' + name: 'security.saml_prepare_authentication', + acceptedParams: [ + 'acs', + 'realm', + 'relay_state' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3623,7 +3977,7 @@ export default class Security { async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['security.saml_service_provider_metadata'] + } = this[kAcceptedParams]['security.saml_service_provider_metadata'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3653,7 +4007,10 @@ export default class Security { name: 'security.saml_service_provider_metadata', pathParts: { realm_name: params.realm_name - } + }, + acceptedParams: [ + 'realm_name' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3670,7 +4027,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.suggest_user_profiles'] + } = this[kAcceptedParams]['security.suggest_user_profiles'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3708,7 +4065,14 @@ export default class Security { const method = body != null ? 'POST' : 'GET' const path = '/_security/profile/_suggest' const meta: TransportRequestMetadata = { - name: 'security.suggest_user_profiles' + name: 'security.suggest_user_profiles', + acceptedParams: [ + 'name', + 'size', + 'data', + 'hint', + 'data' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3725,7 +4089,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.update_api_key'] + } = this[kAcceptedParams]['security.update_api_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3765,7 +4129,13 @@ export default class Security { name: 'security.update_api_key', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'role_descriptors', + 'metadata', + 'expiration' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3782,7 +4152,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.update_cross_cluster_api_key'] + } = this[kAcceptedParams]['security.update_cross_cluster_api_key'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3822,7 +4192,13 @@ export default class Security { name: 'security.update_cross_cluster_api_key', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'access', + 'expiration', + 'metadata' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3839,7 +4215,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.update_settings'] + } = this[kAcceptedParams]['security.update_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3877,7 +4253,14 @@ export default class Security { const method = 'PUT' const path = '/_security/settings' const meta: TransportRequestMetadata = { - name: 'security.update_settings' + name: 'security.update_settings', + acceptedParams: [ + 'security', + 'security-profile', + 'security-tokens', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -3894,7 +4277,7 @@ export default class Security { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['security.update_user_profile_data'] + } = this[kAcceptedParams]['security.update_user_profile_data'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3934,7 +4317,15 @@ export default class Security { name: 'security.update_user_profile_data', pathParts: { uid: params.uid - } + }, + acceptedParams: [ + 'uid', + 'labels', + 'data', + 'if_seq_no', + 'if_primary_term', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 99c32f3c9..2f1f9d5c0 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Shutdown { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'shutdown.delete_node': { path: [ 'node_id' @@ -82,7 +83,7 @@ export default class Shutdown { async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['shutdown.delete_node'] + } = this[kAcceptedParams]['shutdown.delete_node'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -112,7 +113,12 @@ export default class Shutdown { name: 'shutdown.delete_node', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -127,7 +133,7 @@ export default class Shutdown { async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['shutdown.get_node'] + } = this[kAcceptedParams]['shutdown.get_node'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,7 +171,11 @@ export default class Shutdown { name: 'shutdown.get_node', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -182,7 +192,7 @@ export default class Shutdown { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['shutdown.put_node'] + } = this[kAcceptedParams]['shutdown.put_node'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -222,7 +232,16 @@ export default class Shutdown { name: 'shutdown.put_node', pathParts: { node_id: params.node_id - } + }, + acceptedParams: [ + 'node_id', + 'type', + 'reason', + 'allocation_delay', + 'target_node_name', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index 407c66002..a1dff921c 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Simulate { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'simulate.ingest': { path: [ 'index' @@ -66,7 +67,7 @@ export default class Simulate { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['simulate.ingest'] + } = this[kAcceptedParams]['simulate.ingest'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -113,7 +114,17 @@ export default class Simulate { name: 'simulate.ingest', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'docs', + 'component_template_substitutions', + 'index_template_substitutions', + 'mapping_addition', + 'pipeline_substitutions', + 'pipeline', + 'merge_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index f1f5f5e31..c710ce10c 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Slm { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'slm.delete_lifecycle': { path: [ 'policy_id' @@ -134,7 +135,7 @@ export default class Slm { async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.delete_lifecycle'] + } = this[kAcceptedParams]['slm.delete_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -164,7 +165,12 @@ export default class Slm { name: 'slm.delete_lifecycle', pathParts: { policy_id: params.policy_id - } + }, + acceptedParams: [ + 'policy_id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -179,7 +185,7 @@ export default class Slm { async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.execute_lifecycle'] + } = this[kAcceptedParams]['slm.execute_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -209,7 +215,12 @@ export default class Slm { name: 'slm.execute_lifecycle', pathParts: { policy_id: params.policy_id - } + }, + acceptedParams: [ + 'policy_id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -224,7 +235,7 @@ export default class Slm { async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.execute_retention'] + } = this[kAcceptedParams]['slm.execute_retention'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -252,7 +263,11 @@ export default class Slm { const method = 'POST' const path = '/_slm/_execute_retention' const meta: TransportRequestMetadata = { - name: 'slm.execute_retention' + name: 'slm.execute_retention', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -267,7 +282,7 @@ export default class Slm { async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.get_lifecycle'] + } = this[kAcceptedParams]['slm.get_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -305,7 +320,12 @@ export default class Slm { name: 'slm.get_lifecycle', pathParts: { policy_id: params.policy_id - } + }, + acceptedParams: [ + 'policy_id', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -320,7 +340,7 @@ export default class Slm { async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.get_stats'] + } = this[kAcceptedParams]['slm.get_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -348,7 +368,11 @@ export default class Slm { const method = 'GET' const path = '/_slm/stats' const meta: TransportRequestMetadata = { - name: 'slm.get_stats' + name: 'slm.get_stats', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -363,7 +387,7 @@ export default class Slm { async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.get_status'] + } = this[kAcceptedParams]['slm.get_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -391,7 +415,11 @@ export default class Slm { const method = 'GET' const path = '/_slm/status' const meta: TransportRequestMetadata = { - name: 'slm.get_status' + name: 'slm.get_status', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -408,7 +436,7 @@ export default class Slm { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['slm.put_lifecycle'] + } = this[kAcceptedParams]['slm.put_lifecycle'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -448,7 +476,17 @@ export default class Slm { name: 'slm.put_lifecycle', pathParts: { policy_id: params.policy_id - } + }, + acceptedParams: [ + 'policy_id', + 'config', + 'name', + 'repository', + 'retention', + 'schedule', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -463,7 +501,7 @@ export default class Slm { async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.start'] + } = this[kAcceptedParams]['slm.start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -491,7 +529,11 @@ export default class Slm { const method = 'POST' const path = '/_slm/start' const meta: TransportRequestMetadata = { - name: 'slm.start' + name: 'slm.start', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -506,7 +548,7 @@ export default class Slm { async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['slm.stop'] + } = this[kAcceptedParams]['slm.stop'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -534,7 +576,11 @@ export default class Slm { const method = 'POST' const path = '/_slm/stop' const meta: TransportRequestMetadata = { - name: 'slm.stop' + name: 'slm.stop', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index c8363ad2b..556b7a8f6 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Snapshot { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'snapshot.cleanup_repository': { path: [ 'name' @@ -236,7 +237,7 @@ export default class Snapshot { async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.cleanup_repository'] + } = this[kAcceptedParams]['snapshot.cleanup_repository'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -266,7 +267,12 @@ export default class Snapshot { name: 'snapshot.cleanup_repository', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'repository', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -283,7 +289,7 @@ export default class Snapshot { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['snapshot.clone'] + } = this[kAcceptedParams]['snapshot.clone'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -325,7 +331,14 @@ export default class Snapshot { repository: params.repository, snapshot: params.snapshot, target_snapshot: params.target_snapshot - } + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'target_snapshot', + 'indices', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -342,7 +355,7 @@ export default class Snapshot { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['snapshot.create'] + } = this[kAcceptedParams]['snapshot.create'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -383,7 +396,20 @@ export default class Snapshot { pathParts: { repository: params.repository, snapshot: params.snapshot - } + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'expand_wildcards', + 'feature_states', + 'ignore_unavailable', + 'include_global_state', + 'indices', + 'metadata', + 'partial', + 'master_timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -400,7 +426,7 @@ export default class Snapshot { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['snapshot.create_repository'] + } = this[kAcceptedParams]['snapshot.create_repository'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -430,7 +456,14 @@ export default class Snapshot { name: 'snapshot.create_repository', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'repository', + 'repository', + 'master_timeout', + 'timeout', + 'verify' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -445,7 +478,7 @@ export default class Snapshot { async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.delete'] + } = this[kAcceptedParams]['snapshot.delete'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -476,7 +509,13 @@ export default class Snapshot { pathParts: { repository: params.repository, snapshot: params.snapshot - } + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'master_timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -491,7 +530,7 @@ export default class Snapshot { async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.delete_repository'] + } = this[kAcceptedParams]['snapshot.delete_repository'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -521,7 +560,12 @@ export default class Snapshot { name: 'snapshot.delete_repository', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'repository', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -536,7 +580,7 @@ export default class Snapshot { async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.get'] + } = this[kAcceptedParams]['snapshot.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -567,7 +611,25 @@ export default class Snapshot { pathParts: { repository: params.repository, snapshot: params.snapshot - } + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'after', + 'from_sort_value', + 'ignore_unavailable', + 'index_details', + 'index_names', + 'include_repository', + 'master_timeout', + 'order', + 'offset', + 'size', + 'slm_policy_filter', + 'sort', + 'state', + 'verbose' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -582,7 +644,7 @@ export default class Snapshot { async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.get_repository'] + } = this[kAcceptedParams]['snapshot.get_repository'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -620,7 +682,12 @@ export default class Snapshot { name: 'snapshot.get_repository', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'repository', + 'local', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -635,7 +702,7 @@ export default class Snapshot { async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.repository_analyze'] + } = this[kAcceptedParams]['snapshot.repository_analyze'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -665,7 +732,22 @@ export default class Snapshot { name: 'snapshot.repository_analyze', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'repository', + 'blob_count', + 'concurrency', + 'detailed', + 'early_read_node_count', + 'max_blob_size', + 'max_total_data_size', + 'rare_action_probability', + 'rarely_abort_writes', + 'read_node_count', + 'register_operation_count', + 'seed', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -680,7 +762,7 @@ export default class Snapshot { async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.repository_verify_integrity'] + } = this[kAcceptedParams]['snapshot.repository_verify_integrity'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -710,7 +792,18 @@ export default class Snapshot { name: 'snapshot.repository_verify_integrity', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'repository', + 'blob_thread_pool_concurrency', + 'index_snapshot_verification_concurrency', + 'index_verification_concurrency', + 'max_bytes_per_sec', + 'max_failed_shard_snapshots', + 'meta_thread_pool_concurrency', + 'snapshot_verification_concurrency', + 'verify_blob_contents' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -727,7 +820,7 @@ export default class Snapshot { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['snapshot.restore'] + } = this[kAcceptedParams]['snapshot.restore'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -768,7 +861,23 @@ export default class Snapshot { pathParts: { repository: params.repository, snapshot: params.snapshot - } + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'feature_states', + 'ignore_index_settings', + 'ignore_unavailable', + 'include_aliases', + 'include_global_state', + 'index_settings', + 'indices', + 'partial', + 'rename_pattern', + 'rename_replacement', + 'master_timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -783,7 +892,7 @@ export default class Snapshot { async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.status'] + } = this[kAcceptedParams]['snapshot.status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -825,7 +934,13 @@ export default class Snapshot { pathParts: { repository: params.repository, snapshot: params.snapshot - } + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'ignore_unavailable', + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -840,7 +955,7 @@ export default class Snapshot { async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['snapshot.verify_repository'] + } = this[kAcceptedParams]['snapshot.verify_repository'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -870,7 +985,12 @@ export default class Snapshot { name: 'snapshot.verify_repository', pathParts: { name: params.name - } + }, + acceptedParams: [ + 'repository', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 2fa3d0b02..47c411cfb 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Sql { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'sql.clear_cursor': { path: [], body: [ @@ -90,7 +91,8 @@ export default class Sql { 'wait_for_completion_timeout' ], query: [ - 'format' + 'format', + 'project_routing' ] }, 'sql.translate': { @@ -118,7 +120,7 @@ export default class Sql { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['sql.clear_cursor'] + } = this[kAcceptedParams]['sql.clear_cursor'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -155,7 +157,10 @@ export default class Sql { const method = 'POST' const path = '/_sql/close' const meta: TransportRequestMetadata = { - name: 'sql.clear_cursor' + name: 'sql.clear_cursor', + acceptedParams: [ + 'cursor' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -170,7 +175,7 @@ export default class Sql { async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['sql.delete_async'] + } = this[kAcceptedParams]['sql.delete_async'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -200,7 +205,10 @@ export default class Sql { name: 'sql.delete_async', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -215,7 +223,7 @@ export default class Sql { async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['sql.get_async'] + } = this[kAcceptedParams]['sql.get_async'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,7 +253,14 @@ export default class Sql { name: 'sql.get_async', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'delimiter', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -260,7 +275,7 @@ export default class Sql { async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['sql.get_async_status'] + } = this[kAcceptedParams]['sql.get_async_status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -290,7 +305,10 @@ export default class Sql { name: 'sql.get_async_status', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -307,7 +325,7 @@ export default class Sql { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['sql.query'] + } = this[kAcceptedParams]['sql.query'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -345,7 +363,28 @@ export default class Sql { const method = body != null ? 'POST' : 'GET' const path = '/_sql' const meta: TransportRequestMetadata = { - name: 'sql.query' + name: 'sql.query', + acceptedParams: [ + 'allow_partial_search_results', + 'catalog', + 'columnar', + 'cursor', + 'fetch_size', + 'field_multi_value_leniency', + 'filter', + 'index_using_frozen', + 'keep_alive', + 'keep_on_completion', + 'page_timeout', + 'params', + 'query', + 'request_timeout', + 'runtime_mappings', + 'time_zone', + 'wait_for_completion_timeout', + 'format', + 'project_routing' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -362,7 +401,7 @@ export default class Sql { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['sql.translate'] + } = this[kAcceptedParams]['sql.translate'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,7 +438,13 @@ export default class Sql { const method = body != null ? 'POST' : 'GET' const path = '/_sql/translate' const meta: TransportRequestMetadata = { - name: 'sql.translate' + name: 'sql.translate', + acceptedParams: [ + 'fetch_size', + 'filter', + 'query', + 'time_zone' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 1708b535e..ada1ae8db 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -21,18 +21,19 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } export default class Ssl { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'ssl.certificates': { path: [], body: [], @@ -51,7 +52,7 @@ export default class Ssl { async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['ssl.certificates'] + } = this[kAcceptedParams]['ssl.certificates'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -79,7 +80,9 @@ export default class Ssl { const method = 'GET' const path = '/_ssl/certificates' const meta: TransportRequestMetadata = { - name: 'ssl.certificates' + name: 'ssl.certificates', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/streams.ts b/src/api/api/streams.ts index e040401cd..97ea936a6 100644 --- a/src/api/api/streams.ts +++ b/src/api/api/streams.ts @@ -21,47 +21,56 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } export default class Streams { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'streams.logs_disable': { path: [], body: [], - query: [] + query: [ + 'master_timeout', + 'timeout' + ] }, 'streams.logs_enable': { path: [], body: [], - query: [] + query: [ + 'master_timeout', + 'timeout' + ] }, 'streams.status': { path: [], body: [], - query: [] + query: [ + 'master_timeout' + ] } } } /** - * Disable the Logs Streams feature for this cluster - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-disable.html | Elasticsearch API documentation} + * Disable logs stream. Turn off the logs stream feature for this cluster. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} */ - async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptionsWithMeta): Promise> + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptions): Promise + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['streams.logs_disable'] + } = this[kAcceptedParams]['streams.logs_disable'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -81,6 +90,7 @@ export default class Streams { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -88,22 +98,26 @@ export default class Streams { const method = 'POST' const path = '/_streams/logs/_disable' const meta: TransportRequestMetadata = { - name: 'streams.logs_disable' + name: 'streams.logs_disable', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Enable the Logs Streams feature for this cluster - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-enable.html | Elasticsearch API documentation} + * Enable logs stream. Turn on the logs stream feature for this cluster. NOTE: To protect existing data, this feature can be turned on only if the cluster does not have existing indices or data streams that match the pattern `logs|logs.*`. If those indices or data streams exist, a `409 - Conflict` response and error is returned. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} */ - async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptionsWithMeta): Promise> + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptions): Promise + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['streams.logs_enable'] + } = this[kAcceptedParams]['streams.logs_enable'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -123,6 +137,7 @@ export default class Streams { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -130,22 +145,26 @@ export default class Streams { const method = 'POST' const path = '/_streams/logs/_enable' const meta: TransportRequestMetadata = { - name: 'streams.logs_enable' + name: 'streams.logs_enable', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Return the current status of the streams feature for each streams type - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-status.html | Elasticsearch API documentation} + * Get the status of streams. Get the current status for all types of streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} */ - async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['streams.status'] + } = this[kAcceptedParams]['streams.status'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,6 +184,7 @@ export default class Streams { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -172,7 +192,10 @@ export default class Streams { const method = 'GET' const path = '/_streams/status' const meta: TransportRequestMetadata = { - name: 'streams.status' + name: 'streams.status', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index d2b3511a5..e672db93e 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Synonyms { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'synonyms.delete_synonym': { path: [ 'id' @@ -114,7 +115,7 @@ export default class Synonyms { async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['synonyms.delete_synonym'] + } = this[kAcceptedParams]['synonyms.delete_synonym'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -144,7 +145,10 @@ export default class Synonyms { name: 'synonyms.delete_synonym', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -159,7 +163,7 @@ export default class Synonyms { async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['synonyms.delete_synonym_rule'] + } = this[kAcceptedParams]['synonyms.delete_synonym_rule'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -190,7 +194,12 @@ export default class Synonyms { pathParts: { set_id: params.set_id, rule_id: params.rule_id - } + }, + acceptedParams: [ + 'set_id', + 'rule_id', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -205,7 +214,7 @@ export default class Synonyms { async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['synonyms.get_synonym'] + } = this[kAcceptedParams]['synonyms.get_synonym'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,7 +244,12 @@ export default class Synonyms { name: 'synonyms.get_synonym', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -250,7 +264,7 @@ export default class Synonyms { async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['synonyms.get_synonym_rule'] + } = this[kAcceptedParams]['synonyms.get_synonym_rule'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -281,7 +295,11 @@ export default class Synonyms { pathParts: { set_id: params.set_id, rule_id: params.rule_id - } + }, + acceptedParams: [ + 'set_id', + 'rule_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -296,7 +314,7 @@ export default class Synonyms { async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['synonyms.get_synonyms_sets'] + } = this[kAcceptedParams]['synonyms.get_synonyms_sets'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -324,7 +342,11 @@ export default class Synonyms { const method = 'GET' const path = '/_synonyms' const meta: TransportRequestMetadata = { - name: 'synonyms.get_synonyms_sets' + name: 'synonyms.get_synonyms_sets', + acceptedParams: [ + 'from', + 'size' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -341,7 +363,7 @@ export default class Synonyms { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['synonyms.put_synonym'] + } = this[kAcceptedParams]['synonyms.put_synonym'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -381,7 +403,12 @@ export default class Synonyms { name: 'synonyms.put_synonym', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'synonyms_set', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -398,7 +425,7 @@ export default class Synonyms { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['synonyms.put_synonym_rule'] + } = this[kAcceptedParams]['synonyms.put_synonym_rule'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -439,7 +466,13 @@ export default class Synonyms { pathParts: { set_id: params.set_id, rule_id: params.rule_id - } + }, + acceptedParams: [ + 'set_id', + 'rule_id', + 'synonyms', + 'refresh' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index c85a53d77..48de0e23f 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -21,18 +21,19 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } export default class Tasks { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'tasks.cancel': { path: [ 'task_id' @@ -81,7 +82,7 @@ export default class Tasks { async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['tasks.cancel'] + } = this[kAcceptedParams]['tasks.cancel'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -119,7 +120,14 @@ export default class Tasks { name: 'tasks.cancel', pathParts: { task_id: params.task_id - } + }, + acceptedParams: [ + 'task_id', + 'actions', + 'nodes', + 'parent_task_id', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -134,7 +142,7 @@ export default class Tasks { async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['tasks.get'] + } = this[kAcceptedParams]['tasks.get'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -164,7 +172,12 @@ export default class Tasks { name: 'tasks.get', pathParts: { task_id: params.task_id - } + }, + acceptedParams: [ + 'task_id', + 'timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -179,7 +192,7 @@ export default class Tasks { async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['tasks.list'] + } = this[kAcceptedParams]['tasks.list'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -207,7 +220,16 @@ export default class Tasks { const method = 'GET' const path = '/_tasks' const meta: TransportRequestMetadata = { - name: 'tasks.list' + name: 'tasks.list', + acceptedParams: [ + 'actions', + 'detailed', + 'group_by', + 'nodes', + 'parent_task_id', + 'timeout', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index e12d731bf..dec56df1a 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -98,7 +98,17 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque name: 'terms_enum', pathParts: { index: params.index - } + }, + acceptedParams: [ + 'index', + 'field', + 'size', + 'timeout', + 'case_insensitive', + 'index_filter', + 'string', + 'search_after' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 7d1378924..b38dd64f2 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -124,7 +124,34 @@ export default async function TermvectorsApi (this: That, p pathParts: { index: params.index, id: params.id - } + }, + acceptedParams: [ + 'index', + 'id', + 'doc', + 'filter', + 'per_field_analyzer', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'term_statistics', + 'routing', + 'version', + 'version_type', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 4d9997c24..318cadc00 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class TextStructure { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'text_structure.find_field_structure': { path: [], body: [], @@ -120,7 +121,7 @@ export default class TextStructure { async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['text_structure.find_field_structure'] + } = this[kAcceptedParams]['text_structure.find_field_structure'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -147,7 +148,23 @@ export default class TextStructure { const method = 'GET' const path = '/_text_structure/find_field_structure' const meta: TransportRequestMetadata = { - name: 'text_structure.find_field_structure' + name: 'text_structure.find_field_structure', + acceptedParams: [ + 'column_names', + 'delimiter', + 'documents_to_sample', + 'ecs_compatibility', + 'explain', + 'field', + 'format', + 'grok_pattern', + 'index', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -164,7 +181,7 @@ export default class TextStructure { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['text_structure.find_message_structure'] + } = this[kAcceptedParams]['text_structure.find_message_structure'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -201,7 +218,21 @@ export default class TextStructure { const method = body != null ? 'POST' : 'GET' const path = '/_text_structure/find_message_structure' const meta: TransportRequestMetadata = { - name: 'text_structure.find_message_structure' + name: 'text_structure.find_message_structure', + acceptedParams: [ + 'messages', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -218,7 +249,7 @@ export default class TextStructure { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['text_structure.find_structure'] + } = this[kAcceptedParams]['text_structure.find_structure'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,7 +276,25 @@ export default class TextStructure { const method = 'POST' const path = '/_text_structure/find_structure' const meta: TransportRequestMetadata = { - name: 'text_structure.find_structure' + name: 'text_structure.find_structure', + acceptedParams: [ + 'text_files', + 'charset', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'has_header_row', + 'line_merge_size_limit', + 'lines_to_sample', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] } return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) } @@ -262,7 +311,7 @@ export default class TextStructure { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['text_structure.test_grok_pattern'] + } = this[kAcceptedParams]['text_structure.test_grok_pattern'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -299,7 +348,12 @@ export default class TextStructure { const method = body != null ? 'POST' : 'GET' const path = '/_text_structure/test_grok_pattern' const meta: TransportRequestMetadata = { - name: 'text_structure.test_grok_pattern' + name: 'text_structure.test_grok_pattern', + acceptedParams: [ + 'grok_pattern', + 'text', + 'ecs_compatibility' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 91f4477b1..249a62264 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -21,20 +21,21 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Transform { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'transform.delete_transform': { path: [ 'transform_id' @@ -205,7 +206,7 @@ export default class Transform { async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.delete_transform'] + } = this[kAcceptedParams]['transform.delete_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,7 +236,13 @@ export default class Transform { name: 'transform.delete_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'force', + 'delete_dest_index', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -250,7 +257,7 @@ export default class Transform { async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.get_node_stats'] + } = this[kAcceptedParams]['transform.get_node_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -277,7 +284,9 @@ export default class Transform { const method = 'GET' const path = '/_transform/_node_stats' const meta: TransportRequestMetadata = { - name: 'transform.get_node_stats' + name: 'transform.get_node_stats', + acceptedParams: [ + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -292,7 +301,7 @@ export default class Transform { async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.get_transform'] + } = this[kAcceptedParams]['transform.get_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -330,7 +339,14 @@ export default class Transform { name: 'transform.get_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -345,7 +361,7 @@ export default class Transform { async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.get_transform_stats'] + } = this[kAcceptedParams]['transform.get_transform_stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -375,7 +391,14 @@ export default class Transform { name: 'transform.get_transform_stats', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'from', + 'size', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -392,7 +415,7 @@ export default class Transform { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['transform.preview_transform'] + } = this[kAcceptedParams]['transform.preview_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -440,7 +463,20 @@ export default class Transform { name: 'transform.preview_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'dest', + 'description', + 'frequency', + 'pivot', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'latest', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -457,7 +493,7 @@ export default class Transform { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['transform.put_transform'] + } = this[kAcceptedParams]['transform.put_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -497,7 +533,22 @@ export default class Transform { name: 'transform.put_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'dest', + 'description', + 'frequency', + 'latest', + '_meta', + 'pivot', + 'retention_policy', + 'settings', + 'source', + 'sync', + 'defer_validation', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -512,7 +563,7 @@ export default class Transform { async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.reset_transform'] + } = this[kAcceptedParams]['transform.reset_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -542,7 +593,12 @@ export default class Transform { name: 'transform.reset_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'force', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -557,7 +613,7 @@ export default class Transform { async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.schedule_now_transform'] + } = this[kAcceptedParams]['transform.schedule_now_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -587,7 +643,11 @@ export default class Transform { name: 'transform.schedule_now_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -602,7 +662,7 @@ export default class Transform { async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.set_upgrade_mode'] + } = this[kAcceptedParams]['transform.set_upgrade_mode'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -630,7 +690,11 @@ export default class Transform { const method = 'POST' const path = '/_transform/set_upgrade_mode' const meta: TransportRequestMetadata = { - name: 'transform.set_upgrade_mode' + name: 'transform.set_upgrade_mode', + acceptedParams: [ + 'enabled', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -645,7 +709,7 @@ export default class Transform { async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.start_transform'] + } = this[kAcceptedParams]['transform.start_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -675,7 +739,12 @@ export default class Transform { name: 'transform.start_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'timeout', + 'from' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -690,7 +759,7 @@ export default class Transform { async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.stop_transform'] + } = this[kAcceptedParams]['transform.stop_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -720,7 +789,15 @@ export default class Transform { name: 'transform.stop_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'force', + 'timeout', + 'wait_for_checkpoint', + 'wait_for_completion' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -737,7 +814,7 @@ export default class Transform { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['transform.update_transform'] + } = this[kAcceptedParams]['transform.update_transform'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -777,7 +854,20 @@ export default class Transform { name: 'transform.update_transform', pathParts: { transform_id: params.transform_id - } + }, + acceptedParams: [ + 'transform_id', + 'dest', + 'description', + 'frequency', + '_meta', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'defer_validation', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -792,7 +882,7 @@ export default class Transform { async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['transform.upgrade_transforms'] + } = this[kAcceptedParams]['transform.upgrade_transforms'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -820,7 +910,11 @@ export default class Transform { const method = 'POST' const path = '/_transform/_upgrade' const meta: TransportRequestMetadata = { - name: 'transform.upgrade_transforms' + name: 'transform.upgrade_transforms', + acceptedParams: [ + 'dry_run', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 982b35272..5711f8f9f 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -114,7 +114,31 @@ export default async function UpdateApi + [kAcceptedParams]: Record } const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Watcher { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'watcher.ack_watch': { path: [ 'watch_id', @@ -175,7 +176,7 @@ export default class Watcher { async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.ack_watch'] + } = this[kAcceptedParams]['watcher.ack_watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -213,7 +214,11 @@ export default class Watcher { pathParts: { watch_id: params.watch_id, action_id: params.action_id - } + }, + acceptedParams: [ + 'watch_id', + 'action_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -228,7 +233,7 @@ export default class Watcher { async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.activate_watch'] + } = this[kAcceptedParams]['watcher.activate_watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -258,7 +263,10 @@ export default class Watcher { name: 'watcher.activate_watch', pathParts: { watch_id: params.watch_id - } + }, + acceptedParams: [ + 'watch_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -273,7 +281,7 @@ export default class Watcher { async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.deactivate_watch'] + } = this[kAcceptedParams]['watcher.deactivate_watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -303,7 +311,10 @@ export default class Watcher { name: 'watcher.deactivate_watch', pathParts: { watch_id: params.watch_id - } + }, + acceptedParams: [ + 'watch_id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -318,7 +329,7 @@ export default class Watcher { async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.delete_watch'] + } = this[kAcceptedParams]['watcher.delete_watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -348,7 +359,10 @@ export default class Watcher { name: 'watcher.delete_watch', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -365,7 +379,7 @@ export default class Watcher { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['watcher.execute_watch'] + } = this[kAcceptedParams]['watcher.execute_watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -413,7 +427,18 @@ export default class Watcher { name: 'watcher.execute_watch', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'action_modes', + 'alternative_input', + 'ignore_condition', + 'record_execution', + 'simulated_actions', + 'trigger_data', + 'watch', + 'debug' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -428,7 +453,7 @@ export default class Watcher { async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.get_settings'] + } = this[kAcceptedParams]['watcher.get_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -456,7 +481,10 @@ export default class Watcher { const method = 'GET' const path = '/_watcher/settings' const meta: TransportRequestMetadata = { - name: 'watcher.get_settings' + name: 'watcher.get_settings', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -471,7 +499,7 @@ export default class Watcher { async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.get_watch'] + } = this[kAcceptedParams]['watcher.get_watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -501,7 +529,10 @@ export default class Watcher { name: 'watcher.get_watch', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -518,7 +549,7 @@ export default class Watcher { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['watcher.put_watch'] + } = this[kAcceptedParams]['watcher.put_watch'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -558,7 +589,22 @@ export default class Watcher { name: 'watcher.put_watch', pathParts: { id: params.id - } + }, + acceptedParams: [ + 'id', + 'actions', + 'condition', + 'input', + 'metadata', + 'throttle_period', + 'throttle_period_in_millis', + 'transform', + 'trigger', + 'active', + 'if_primary_term', + 'if_seq_no', + 'version' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -575,7 +621,7 @@ export default class Watcher { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['watcher.query_watches'] + } = this[kAcceptedParams]['watcher.query_watches'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -613,7 +659,14 @@ export default class Watcher { const method = body != null ? 'POST' : 'GET' const path = '/_watcher/_query/watches' const meta: TransportRequestMetadata = { - name: 'watcher.query_watches' + name: 'watcher.query_watches', + acceptedParams: [ + 'from', + 'size', + 'query', + 'sort', + 'search_after' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -628,7 +681,7 @@ export default class Watcher { async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.start'] + } = this[kAcceptedParams]['watcher.start'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -656,7 +709,10 @@ export default class Watcher { const method = 'POST' const path = '/_watcher/_start' const meta: TransportRequestMetadata = { - name: 'watcher.start' + name: 'watcher.start', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -671,7 +727,7 @@ export default class Watcher { async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.stats'] + } = this[kAcceptedParams]['watcher.stats'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -709,7 +765,12 @@ export default class Watcher { name: 'watcher.stats', pathParts: { metric: params.metric - } + }, + acceptedParams: [ + 'metric', + 'emit_stacktraces', + 'metric' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -724,7 +785,7 @@ export default class Watcher { async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['watcher.stop'] + } = this[kAcceptedParams]['watcher.stop'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -752,7 +813,10 @@ export default class Watcher { const method = 'POST' const path = '/_watcher/_stop' const meta: TransportRequestMetadata = { - name: 'watcher.stop' + name: 'watcher.stop', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -769,7 +833,7 @@ export default class Watcher { path: acceptedPath, body: acceptedBody, query: acceptedQuery - } = this.acceptedParams['watcher.update_settings'] + } = this[kAcceptedParams]['watcher.update_settings'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -807,7 +871,13 @@ export default class Watcher { const method = 'PUT' const path = '/_watcher/settings' const meta: TransportRequestMetadata = { - name: 'watcher.update_settings' + name: 'watcher.update_settings', + acceptedParams: [ + 'index.auto_expand_replicas', + 'index.number_of_replicas', + 'master_timeout', + 'timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 858e52869..1c14a1314 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -21,18 +21,19 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' +import { kAcceptedParams } from '../../client' interface That { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record } export default class Xpack { transport: Transport - acceptedParams: Record + [kAcceptedParams]: Record constructor (transport: Transport) { this.transport = transport - this.acceptedParams = { + this[kAcceptedParams] = { 'xpack.info': { path: [], body: [], @@ -62,7 +63,7 @@ export default class Xpack { async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['xpack.info'] + } = this[kAcceptedParams]['xpack.info'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -90,7 +91,12 @@ export default class Xpack { const method = 'GET' const path = '/_xpack' const meta: TransportRequestMetadata = { - name: 'xpack.info' + name: 'xpack.info', + acceptedParams: [ + 'categories', + 'accept_enterprise', + 'human' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } @@ -105,7 +111,7 @@ export default class Xpack { async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this.acceptedParams['xpack.usage'] + } = this[kAcceptedParams]['xpack.usage'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -133,7 +139,10 @@ export default class Xpack { const method = 'GET' const path = '/_xpack/usage' const meta: TransportRequestMetadata = { - name: 'xpack.usage' + name: 'xpack.usage', + acceptedParams: [ + 'master_timeout' + ] } return await this.transport.request({ path, method, querystring, body, meta }, options) } diff --git a/src/api/index.ts b/src/api/index.ts index 90a71f688..65ffbd45e 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -65,6 +65,7 @@ import NodesApi from './api/nodes' import openPointInTimeApi from './api/open_point_in_time' import pingApi from './api/ping' import ProfilingApi from './api/profiling' +import ProjectApi from './api/project' import putScriptApi from './api/put_script' import QueryRulesApi from './api/query_rules' import rankEvalApi from './api/rank_eval' @@ -155,6 +156,7 @@ export default interface API { openPointInTime: typeof openPointInTimeApi ping: typeof pingApi profiling: ProfilingApi + project: ProjectApi putScript: typeof putScriptApi queryRules: QueryRulesApi rankEval: typeof rankEvalApi @@ -215,6 +217,7 @@ const kMl = Symbol('Ml') const kMonitoring = Symbol('Monitoring') const kNodes = Symbol('Nodes') const kProfiling = Symbol('Profiling') +const kProject = Symbol('Project') const kQueryRules = Symbol('QueryRules') const kRollup = Symbol('Rollup') const kSearchApplication = Symbol('SearchApplication') @@ -259,6 +262,7 @@ export default class API { [kMonitoring]: symbol | null [kNodes]: symbol | null [kProfiling]: symbol | null + [kProject]: symbol | null [kQueryRules]: symbol | null [kRollup]: symbol | null [kSearchApplication]: symbol | null @@ -302,6 +306,7 @@ export default class API { this[kMonitoring] = null this[kNodes] = null this[kProfiling] = null + this[kProject] = null this[kQueryRules] = null this[kRollup] = null this[kSearchApplication] = null @@ -442,6 +447,9 @@ Object.defineProperties(API.prototype, { profiling: { get () { return this[kProfiling] === null ? (this[kProfiling] = new ProfilingApi(this.transport)) : this[kProfiling] } }, + project: { + get () { return this[kProject] === null ? (this[kProject] = new ProjectApi(this.transport)) : this[kProject] } + }, queryRules: { get () { return this[kQueryRules] === null ? (this[kQueryRules] = new QueryRulesApi(this.transport)) : this[kQueryRules] } }, diff --git a/src/api/types.ts b/src/api/types.ts index 306cd712d..1db7f01e4 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -256,6 +256,15 @@ export interface CountRequest extends RequestBase { /** The node or shard the operation should be performed on. * By default, it is random. */ preference?: string + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** A custom value used to route operations to a specific shard. */ routing?: Routing /** The maximum number of documents to collect for each shard. @@ -273,9 +282,9 @@ export interface CountRequest extends RequestBase { * with the `q` query string parameter. */ query?: QueryDslQueryContainer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, project_routing?: never, routing?: never, terminate_after?: never, q?: never, query?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, project_routing?: never, routing?: never, terminate_after?: never, q?: never, query?: never } } export interface CountResponse { @@ -748,6 +757,15 @@ export interface FieldCapsRequest extends RequestBase { types?: string[] /** If false, empty fields are not included in the response. */ include_empty_fields?: boolean + /** Specifies a subset of projects to target for the field-caps query using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ fields?: Fields /** Filter indices if the provided query rewrites to `match_none` on every shard. @@ -760,9 +778,9 @@ export interface FieldCapsRequest extends RequestBase { * These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ runtime_mappings?: MappingRuntimeFields /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, project_routing?: never, fields?: never, index_filter?: never, runtime_mappings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, project_routing?: never, fields?: never, index_filter?: never, runtime_mappings?: never } } export interface FieldCapsResponse { @@ -1305,6 +1323,7 @@ export interface MsearchMultisearchHeader { ignore_unavailable?: boolean index?: Indices preference?: string + project_routing?: ProjectRouting request_cache?: boolean routing?: Routing search_type?: SearchType @@ -1340,6 +1359,14 @@ export interface MsearchRequest extends RequestBase { max_concurrent_shard_requests?: integer /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long + /** Specifies a subset of projects to target for a search using project metadata + * tags in a subset Lucene syntax. Allowed Lucene queries: the _alias tag + * and a single value (possible wildcarded). Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ rest_total_hits_as_int?: boolean /** Custom routing value used to route search operations to a specific shard. */ @@ -1350,9 +1377,9 @@ export interface MsearchRequest extends RequestBase { typed_keys?: boolean searches?: MsearchRequestItem[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, project_routing?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, project_routing?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } } export type MsearchRequestItem = MsearchMultisearchHeader | SearchSearchRequestBody @@ -1370,6 +1397,15 @@ export interface MsearchTemplateRequest extends RequestBase { ccs_minimize_roundtrips?: boolean /** The maximum number of concurrent searches the API can run. */ max_concurrent_searches?: long + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** The type of the search operation. */ search_type?: SearchType /** If `true`, the response returns `hits.total` as an integer. @@ -1379,9 +1415,9 @@ export interface MsearchTemplateRequest extends RequestBase { typed_keys?: boolean search_templates?: MsearchTemplateRequestItem[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } + body?: string | { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, project_routing?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } + querystring?: { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, project_routing?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } } export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig @@ -1497,6 +1533,15 @@ export interface OpenPointInTimeRequest extends RequestBase { /** The node or shard the operation should be performed on. * By default, it is random. */ preference?: string + /** Specifies a subset of projects to target for the PIT request using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** A custom value that is used to route operations to a specific shard. */ routing?: Routing /** The type of index that wildcard patterns can match. @@ -1512,9 +1557,9 @@ export interface OpenPointInTimeRequest extends RequestBase { /** Filter indices if the provided query rewrites to `match_none` on every shard. */ index_filter?: QueryDslQueryContainer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } + body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } + querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } } export interface OpenPointInTimeResponse { @@ -2038,6 +2083,15 @@ export interface SearchRequest extends RequestBase { * * The request targets one or more read-only index. * * The primary sort of the query targets an indexed field. */ pre_filter_shard_size?: long + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** If `true`, the caching of search results is enabled for requests where `size` is `0`. * It defaults to index level settings. */ request_cache?: boolean @@ -2195,9 +2249,9 @@ export interface SearchRequest extends RequestBase { * You can retrieve these stats using the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, project_routing?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, project_routing?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export type SearchResponse> = SearchResponseBody @@ -2804,6 +2858,7 @@ export interface SearchRescore { window_size?: integer query?: SearchRescoreQuery learning_to_rank?: SearchLearningToRank + script?: SearchScriptRescore } export interface SearchRescoreQuery { @@ -2820,6 +2875,10 @@ export interface SearchRescoreQuery { export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' +export interface SearchScriptRescore { + script: Script | ScriptSource +} + export interface SearchSearchProfile { collector: SearchCollector[] query: SearchQueryProfile[] @@ -3097,6 +3156,15 @@ export interface SearchMvtRequest extends RequestBase { x: SearchMvtCoordinate /** Y coordinate for the vector tile to search */ y: SearchMvtCoordinate + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** Sub-aggregations for the geotile_grid. * * It supports the following aggregation types: @@ -3171,9 +3239,9 @@ export interface SearchMvtRequest extends RequestBase { * In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ with_labels?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } + body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, project_routing?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } + querystring?: { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, project_routing?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } } export type SearchMvtResponse = MapboxVectorTiles @@ -3266,6 +3334,15 @@ export interface SearchTemplateRequest extends RequestBase { /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** A custom value used to route operations to a specific shard. */ routing?: Routing /** Specifies how long a consistent view of the index @@ -3295,9 +3372,9 @@ export interface SearchTemplateRequest extends RequestBase { * parameter is required. */ source?: ScriptSource /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } } export interface SearchTemplateResponse { @@ -3748,6 +3825,13 @@ export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' export type CategoryId = string +export interface ChunkRescorer { + /** The number of chunks per document to evaluate for reranking. */ + size?: integer + /** Chunking settings to apply */ + chunking_settings?: MappingChunkRescorerChunkingSettings +} + export type ClusterAlias = string export interface ClusterDetails { @@ -4289,6 +4373,8 @@ export interface PluginStats { licensed: boolean } +export type ProjectRouting = string + export type PropertyName = string export interface QueryCacheStats { @@ -4706,10 +4792,13 @@ export interface TextSimilarityReranker extends RetrieverBase { rank_window_size?: integer /** Unique identifier of the inference endpoint created using the inference API. */ inference_id?: string - /** The text snippet used as the basis for similarity comparison */ + /** The text snippet used as the basis for similarity comparison. */ inference_text: string - /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */ + /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text. */ field: string + /** Whether to rescore on only the best matching chunks. + * @beta */ + chunk_rescorer?: ChunkRescorer } export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' @@ -4808,6 +4897,8 @@ export interface WriteResponseBase { _shards: ShardStatistics /** The document version, which is incremented each time the document is updated. */ _version: VersionNumber + /** The role of the failure store in this document response */ + failure_store?: BulkFailureStoreStatus forced_refresh?: boolean } @@ -5288,7 +5379,7 @@ export interface AggregationsCompositeAggregation extends AggregationsBucketAggr size?: integer /** The value sources used to build composite buckets. * Keys are returned in the order of the `sources` definition. */ - sources?: Record[] + sources?: Partial>[] } export interface AggregationsCompositeAggregationBase { @@ -6111,6 +6202,14 @@ export interface AggregationsNormalizeAggregation extends AggregationsPipelineAg export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' +export interface AggregationsPValueHeuristic { + background_is_superset?: boolean + /** Should the results be normalized when above the given value. + * Allows for consistent significance results at various scales. + * Note: `0` is a special value which means no normalization */ + normalize_above?: long +} + export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsParentAggregate = AggregationsParentAggregateKeys @@ -6348,6 +6447,14 @@ export interface AggregationsSignificantTermsAggregation extends AggregationsBuc percentage?: AggregationsPercentageScoreHeuristic /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic + /** Significant terms heuristic that calculates the p-value between the term existing in foreground and background sets. + * + * The p-value is the probability of obtaining test results at least as extreme as + * the results actually observed, under the assumption that the null hypothesis is + * correct. The p-value is calculated assuming that the foreground set and the + * background set are independent https://en.wikipedia.org/wiki/Bernoulli_trial, with the null + * hypothesis that the probabilities are the same. */ + p_value?: AggregationsPValueHeuristic /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long @@ -7947,6 +8054,47 @@ export interface MappingByteNumberProperty extends MappingNumberPropertyBase { null_value?: byte } +export interface MappingChunkRescorerChunkingSettings { + /** The chunking strategy: `sentence`, `word`, `none` or `recursive`. + * + * * If `strategy` is set to `recursive`, you must also specify: + * + * - `max_chunk_size` + * - either `separators` or`separator_group` + * + * Learn more about different chunking strategies in the linked documentation. */ + strategy?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * Sets a predefined list of separators in the saved chunking settings based on the selected text type. + * Values can be `markdown` or `plaintext`. + * + * Using this parameter is an alternative to manually specifying a custom `separators` list. */ + separator_group?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * A list of strings used as possible split points when chunking text. + * + * Each string can be a plain string or a regular expression (regex) pattern. + * The system tries each separator in order to split the text, starting from the first item in the list. + * + * After splitting, it attempts to recombine smaller pieces into larger chunks that stay within + * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ + separators?: string[] + /** The maximum size of a chunk in words. + * This value cannot be lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). + * This value should not exceed the window size for the associated model. */ + max_chunk_size: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ + overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ + sentence_overlap?: integer +} + export interface MappingChunkingSettings { /** The chunking strategy: `sentence`, `word`, `none` or `recursive`. * @@ -8513,6 +8661,9 @@ export interface MappingSemanticTextProperty { * chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated, * they will not be applied to existing documents until they are reindexed. */ chunking_settings?: MappingChunkingSettings + /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one + * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ + fields?: Record } export interface MappingShapeProperty extends MappingDocValuesPropertyBase { @@ -10115,6 +10266,15 @@ export interface AsyncSearchSubmitRequest extends RequestBase { max_concurrent_shard_requests?: integer /** Specify the node or shard the operation should be performed on (default: random) */ preference?: string + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** Specify if request cache should be used for this request or not, defaults to true */ request_cache?: boolean /** A comma-separated list of specific routing values */ @@ -10219,9 +10379,9 @@ export interface AsyncSearchSubmitRequest extends RequestBase { * the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + body?: string | { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, project_routing?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + querystring?: { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, project_routing?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase @@ -10602,8 +10762,6 @@ export interface CatAllocationAllocationRecord { export interface CatAllocationRequest extends CatCatRequestBase { /** A comma-separated list of node identifiers or names used to limit the returned information. */ node_id?: NodeIds - /** The unit used to display byte values. */ - bytes?: Bytes /** A comma-separated list of columns names to display. It supports simple wildcards. */ h?: CatCatAllocationColumns /** List of columns that determine how the table should be sorted. @@ -10618,9 +10776,9 @@ export interface CatAllocationRequest extends CatCatRequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { node_id?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { node_id?: never, h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { node_id?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { node_id?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -10701,14 +10859,23 @@ export interface CatCountRequest extends CatCatRequestBase { index?: Indices /** A comma-separated list of columns names to display. It supports simple wildcards. */ h?: CatCatCountColumns + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never } + body?: string | { [key: string]: any } & { index?: never, h?: never, project_routing?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never } + querystring?: { [key: string]: any } & { index?: never, h?: never, project_routing?: never, s?: never } } export type CatCountResponse = CatCountCountRecord[] @@ -10741,8 +10908,6 @@ export interface CatFielddataRequest extends CatCatRequestBase { /** Comma-separated list of fields used to limit returned information. * To retrieve all fields, omit this parameter. */ fields?: Fields - /** The unit used to display byte values. */ - bytes?: Bytes /** A comma-separated list of columns names to display. It supports simple wildcards. */ h?: CatCatFieldDataColumns /** List of columns that determine how the table should be sorted. @@ -10750,9 +10915,9 @@ export interface CatFielddataRequest extends CatCatRequestBase { * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } + body?: string | { [key: string]: any } & { fields?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } + querystring?: { [key: string]: any } & { fields?: never, h?: never, s?: never } } export type CatFielddataResponse = CatFielddataFielddataRecord[] @@ -10896,8 +11061,6 @@ export interface CatHealthHealthRecord { } export interface CatHealthRequest extends CatCatRequestBase { - /** The unit used to display time values. */ - time?: TimeUnit /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean /** A comma-separated list of columns names to display. It supports simple wildcards. */ @@ -10907,9 +11070,9 @@ export interface CatHealthRequest extends CatCatRequestBase { * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } + body?: string | { [key: string]: any } & { ts?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } + querystring?: { [key: string]: any } & { ts?: never, h?: never, s?: never } } export type CatHealthResponse = CatHealthHealthRecord[] @@ -11653,8 +11816,6 @@ export interface CatIndicesRequest extends CatCatRequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** The unit used to display byte values. */ - bytes?: Bytes /** The type of index that wildcard patterns can match. */ expand_wildcards?: ExpandWildcards /** The health status used to limit returned indices. By default, the response includes indices of any health status. */ @@ -11663,8 +11824,6 @@ export interface CatIndicesRequest extends CatCatRequestBase { include_unloaded_segments?: boolean /** If true, the response only includes information from primary shards. */ pri?: boolean - /** The unit used to display time values. */ - time?: TimeUnit /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** A comma-separated list of columns names to display. It supports simple wildcards. */ @@ -11674,9 +11833,9 @@ export interface CatIndicesRequest extends CatCatRequestBase { * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, master_timeout?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, master_timeout?: never, h?: never, s?: never } } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -11835,19 +11994,15 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { id?: Id /** Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) */ allow_no_match?: boolean - /** The unit in which to display byte values */ - bytes?: Bytes /** Comma-separated list of column names to display. */ h?: CatCatDfaColumns /** Comma-separated list of column names or column aliases used to sort the * response. */ s?: CatCatDfaColumns - /** Unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, h?: never, s?: never } } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -11968,12 +12123,10 @@ export interface CatMlDatafeedsRequest extends CatCatRequestBase { h?: CatCatDatafeedColumns /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatDatafeedColumns - /** The unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never, time?: never } + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never, time?: never } + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never } } export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] @@ -12525,18 +12678,14 @@ export interface CatMlJobsRequest extends CatCatRequestBase { * are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial * matches. */ allow_no_match?: boolean - /** The unit used to display byte values. */ - bytes?: Bytes /** Comma-separated list of column names to display. */ h?: CatCatAnomalyDetectorColumns /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatAnomalyDetectorColumns - /** The unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, h?: never, s?: never } } export type CatMlJobsResponse = CatMlJobsJobsRecord[] @@ -12548,8 +12697,6 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { * If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. * If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean - /** The unit used to display byte values. */ - bytes?: Bytes /** A comma-separated list of column names to display. */ h?: CatCatTrainedModelsColumns /** A comma-separated list of column names or aliases used to sort the response. */ @@ -12558,12 +12705,10 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { from?: integer /** The maximum number of transforms to display. */ size?: integer - /** Unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, from?: never, size?: never, time?: never } + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, h?: never, s?: never, from?: never, size?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, from?: never, size?: never, time?: never } + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, h?: never, s?: never, from?: never, size?: never } } export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] @@ -13458,10 +13603,8 @@ export interface CatNodesNodesRecord { } export interface CatNodesRequest extends CatCatRequestBase { - /** The unit used to display byte values. */ - bytes?: Bytes /** If `true`, return the full node ID. If `false`, return the shortened node ID. */ - full_id?: boolean | string + full_id?: boolean /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean /** A comma-separated list of columns names to display. @@ -13473,12 +13616,10 @@ export interface CatNodesRequest extends CatCatRequestBase { s?: Names /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** The unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never } } export type CatNodesResponse = CatNodesNodesRecord[] @@ -13520,12 +13661,10 @@ export interface CatPendingTasksRequest extends CatCatRequestBase { local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** Unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -13732,8 +13871,6 @@ export interface CatRecoveryRequest extends CatCatRequestBase { index?: Indices /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean - /** The unit used to display byte values. */ - bytes?: Bytes /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean /** A comma-separated list of columns names to display. @@ -13743,12 +13880,10 @@ export interface CatRecoveryRequest extends CatCatRequestBase { * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** The unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } + body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, h?: never, s?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } + querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, h?: never, s?: never } } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] @@ -13793,8 +13928,6 @@ export interface CatSegmentsRequest extends CatCatRequestBase { * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** The unit used to display byte values. */ - bytes?: Bytes /** A comma-separated list of columns names to display. * It supports simple wildcards. */ h?: CatCatSegmentsColumns @@ -13810,9 +13943,9 @@ export interface CatSegmentsRequest extends CatCatRequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -13960,8 +14093,6 @@ export interface CatShardsRequest extends CatCatRequestBase { * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** The unit used to display byte values. */ - bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: CatCatShardColumns /** A comma-separated list of column names or aliases that determines the sort order. @@ -13970,12 +14101,10 @@ export interface CatShardsRequest extends CatCatRequestBase { s?: Names /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** The unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never, master_timeout?: never } } export type CatShardsResponse = CatShardsShardsRecord[] @@ -14609,12 +14738,10 @@ export interface CatSnapshotsRequest extends CatCatRequestBase { s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** Unit used to display time values. */ - time?: TimeUnit /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never, time?: never } + body?: string | { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never, time?: never } + querystring?: { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never } } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] @@ -14729,17 +14856,15 @@ export interface CatTasksRequest extends CatCatRequestBase { * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Unit used to display time values. */ - time?: TimeUnit /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, time?: never, timeout?: never, wait_for_completion?: never } + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, timeout?: never, wait_for_completion?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, time?: never, timeout?: never, wait_for_completion?: never } + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, timeout?: never, wait_for_completion?: never } } export type CatTasksResponse = CatTasksTasksRecord[] @@ -14893,8 +15018,6 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** The unit used to display time values. */ - time?: TimeUnit /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating @@ -14903,9 +15026,9 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, local?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -15029,14 +15152,12 @@ export interface CatTransformsRequest extends CatCatRequestBase { h?: CatCatTransformColumns /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatTransformColumns - /** The unit used to display time values. */ - time?: TimeUnit /** The maximum number of transforms to obtain. */ size?: integer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, time?: never, size?: never } + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, size?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, time?: never, size?: never } + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, size?: never } } export type CatTransformsResponse = CatTransformsTransformsRecord[] @@ -17984,6 +18105,15 @@ export interface EqlSearchRequest extends RequestBase { ccs_minimize_roundtrips?: boolean /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** EQL query you wish to run. */ query: string case_sensitive?: boolean @@ -18019,9 +18149,9 @@ export interface EqlSearchRequest extends RequestBase { * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, project_routing?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, project_routing?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } } export type EqlSearchResponse = EqlEqlSearchResponseBase @@ -18039,6 +18169,8 @@ export interface EsqlAsyncEsqlResult extends EsqlEsqlResult { is_running: boolean } +export type EsqlESQLParam = FieldValue | FieldValue[] + export interface EsqlEsqlClusterDetails { status: EsqlEsqlClusterStatus indices: string @@ -18145,10 +18277,15 @@ export interface EsqlAsyncQueryRequest extends RequestBase { /** Tables to use with the LOOKUP operation. The top level key is the table * name and the next level key is the column name. */ tables?: Record> - /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` * object with information about the clusters that participated in the search along with info such as shards * count. */ include_ccs_metadata?: boolean + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. + * @alias include_ccs_metadata */ + include_execution_metadata?: boolean /** The period to wait for the request to finish. * By default, the request waits for 1 second for the query results. * If the query completes during this period, results are returned @@ -18163,9 +18300,9 @@ export interface EsqlAsyncQueryRequest extends RequestBase { * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ keep_on_completion?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } + body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } + querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } } export type EsqlAsyncQueryResponse = EsqlAsyncEsqlResult @@ -18284,7 +18421,7 @@ export interface EsqlQueryRequest extends RequestBase { filter?: QueryDslQueryContainer locale?: string /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ - params?: FieldValue[] + params?: EsqlESQLParam[] /** If provided and `true` the response will include an extra `profile` object * with information on how the query was executed. This information is for human debugging * and its format can change at any time but it can give some insight into the performance @@ -18295,14 +18432,19 @@ export interface EsqlQueryRequest extends RequestBase { /** Tables to use with the LOOKUP operation. The top level key is the table * name and the next level key is the column name. */ tables?: Record> - /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` * object with information about the clusters that participated in the search along with info such as shards * count. */ include_ccs_metadata?: boolean + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. + * @alias include_ccs_metadata */ + include_execution_metadata?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never } } export type EsqlQueryResponse = EsqlEsqlResult @@ -18775,6 +18917,8 @@ export interface IlmExplainLifecycleLifecycleExplainManaged { action_time?: DateTime action_time_millis?: EpochTime age?: Duration + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + age_in_millis?: DurationValue failed_step?: Name failed_step_retry_count?: integer index: IndexName @@ -21757,10 +21901,19 @@ export interface IndicesResolveIndexRequest extends RequestBase { allow_no_indices?: boolean /** Filter indices by index mode - standard, lookup, time_series, etc. Comma-separated list of IndexMode. Empty means no filter. */ mode?: IndicesIndexMode | IndicesIndexMode[] + /** Specifies a subset of projects to target using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never } + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never, project_routing?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never } + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never, project_routing?: never } } export interface IndicesResolveIndexResolveIndexAliasItem { @@ -23229,6 +23382,10 @@ export interface InferenceGoogleVertexAIServiceSettings { rate_limit?: InferenceRateLimitSetting /** A valid service account in JSON format for the Google Vertex AI API. */ service_account_json: string + /** For a `text_embedding` task, the number of dimensions the resulting output embeddings should have. + * By default, the model's standard output dimension is used. + * Refer to the Google documentation for more information. */ + dimensions?: integer } export type InferenceGoogleVertexAIServiceType = 'googlevertexai' @@ -24904,6 +25061,8 @@ export interface IngestFailProcessor extends IngestProcessorBase { message: string } +export type IngestFieldAccessPattern = 'classic' | 'flexible' + export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' export interface IngestFingerprintProcessor extends IngestProcessorBase { @@ -25217,6 +25376,8 @@ export interface IngestPipeline { modified_date?: DateTime /** Date and time when the pipeline was last modified, in milliseconds since the epoch. */ modified_date_millis?: EpochTime + /** Controls how processors in this pipeline should read and write data on a document's source. */ + field_access_pattern?: IngestFieldAccessPattern } export interface IngestPipelineConfig { @@ -25854,10 +26015,12 @@ export interface IngestPutPipelineRequest extends RequestBase { /** Marks this ingest pipeline as deprecated. * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** Controls how processors in this pipeline should read and write data on a document's source. */ + field_access_pattern?: IngestFieldAccessPattern /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never, field_access_pattern?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never, field_access_pattern?: never } } export type IngestPutPipelineResponse = AcknowledgedResponseBase @@ -26099,7 +26262,8 @@ export interface LogstashGetPipelineRequest extends RequestBase { export type LogstashGetPipelineResponse = Record export interface LogstashPutPipelineRequest extends RequestBase { - /** An identifier for the pipeline. */ + /** An identifier for the pipeline. + * Pipeline IDs must begin with a letter or underscore and contain only letters, underscores, dashes, hyphens and numbers. */ id: Id pipeline?: LogstashPipeline /** All values in `body` will be added to the request body. */ @@ -27219,7 +27383,7 @@ export interface MlFillMaskInferenceOptions { tokenization?: MlTokenizationConfigContainer /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string - vocabulary: MlVocabulary + vocabulary?: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { @@ -27964,7 +28128,7 @@ export interface MlTextEmbeddingInferenceOptions { tokenization?: MlTokenizationConfigContainer /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string - vocabulary: MlVocabulary + vocabulary?: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { @@ -27978,7 +28142,7 @@ export interface MlTextExpansionInferenceOptions { tokenization?: MlTokenizationConfigContainer /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string - vocabulary: MlVocabulary + vocabulary?: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { @@ -29362,7 +29526,7 @@ export interface MlGetOverallBucketsRequest extends RequestBase { /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean /** Refer to the description for the `overall_score` query parameter. */ - overall_score?: double | string + overall_score?: double /** Refer to the description for the `start` query parameter. */ start?: DateTime /** Refer to the description for the `top_n` query parameter. */ @@ -32098,6 +32262,29 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { nodes: Record } +export interface ProjectTagsProjectTags { + origin: Partial> + linked_projects?: Record +} + +export interface ProjectTagsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export type ProjectTagsResponse = ProjectTagsProjectTags + +export interface ProjectTagsTagsKeys { + _id: string + _alias: string + _type: string + _organisation: string +} +export type ProjectTagsTags = ProjectTagsTagsKeys +& { [property: string]: string } + export interface QueryRulesQueryRule { /** A unique identifier for the rule. */ rule_id: Id @@ -33263,7 +33450,7 @@ export interface SecurityAuthenticateAuthenticateApiKey { id: Id name?: Name managed_by: SecurityApiKeyManagedBy - internal: boolean + internal?: boolean } export interface SecurityAuthenticateRequest extends RequestBase { @@ -35124,9 +35311,9 @@ export interface ShutdownDeleteNodeRequest extends RequestBase { /** The node id of node to be removed from the shutdown state */ node_id: NodeId /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ - master_timeout?: TimeUnit + master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ - timeout?: TimeUnit + timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -35158,7 +35345,7 @@ export interface ShutdownGetNodeRequest extends RequestBase { /** Which node for which to retrieve the shutdown status */ node_id?: NodeIds /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ - master_timeout?: TimeUnit + master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -35185,10 +35372,10 @@ export interface ShutdownPutNodeRequest extends RequestBase { node_id: NodeId /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ - master_timeout?: TimeUnit + master_timeout?: Duration /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ - timeout?: TimeUnit + timeout?: Duration /** Valid values are restart, remove, or replace. * Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. * Because the node is expected to rejoin the cluster, data is not migrated off of the node. @@ -36740,6 +36927,15 @@ export interface SqlQueryRequest extends RequestBase { * You can also specify a format using the `Accept` HTTP header. * If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ format?: SqlQuerySqlFormat + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting /** If `true`, the response has partial results when there are shard request timeouts or shard failures. * If `false`, the API returns an error with no partial results. */ allow_partial_search_results?: boolean @@ -36789,9 +36985,9 @@ export interface SqlQueryRequest extends RequestBase { * To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } + body?: string | { [key: string]: any } & { format?: never, project_routing?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } + querystring?: { [key: string]: any } & { format?: never, project_routing?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } } export interface SqlQueryResponse { @@ -36843,6 +37039,7 @@ export interface SqlTranslateResponse { fields?: (QueryDslFieldAndFormat | Field)[] query?: QueryDslQueryContainer sort?: Sort + track_total_hits?: SearchTrackHits } export interface SslCertificatesCertificateInformation { @@ -36875,6 +37072,54 @@ export interface SslCertificatesRequest extends RequestBase { export type SslCertificatesResponse = SslCertificatesCertificateInformation[] +export interface StreamsLogsDisableRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type StreamsLogsDisableResponse = AcknowledgedResponseBase + +export interface StreamsLogsEnableRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type StreamsLogsEnableResponse = AcknowledgedResponseBase + +export interface StreamsStatusLogsStatus { + /** If true, the logs stream feature is enabled. */ + enabled: boolean +} + +export interface StreamsStatusRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface StreamsStatusResponse { + logs: StreamsStatusLogsStatus +} + export interface SynonymsSynonymRule { /** The identifier for the synonym rule. * If you do not specify a synonym rule ID when you create a rule, an identifier is created automatically by Elasticsearch. */ @@ -39546,4 +39791,15 @@ export interface SpecUtilsCommonCatQueryParameters { help?: boolean /** When set to `true` will enable verbose output. */ v?: boolean + /** Sets the units for columns that contain a byte-size value. + * Note that byte-size value units work in terms of powers of 1024. For instance `1kb` means 1024 bytes, not 1000 bytes. + * If omitted, byte-size values are rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the numeric value of the column is as small as possible whilst still being at least `1.0`. + * If given, byte-size values are rendered as an integer with no suffix, representing the value of the column in the chosen unit. + * Values that are not an exact multiple of the chosen unit are rounded down. */ + bytes?: Bytes + /** Sets the units for columns that contain a time duration. + * If omitted, time duration values are rendered with a suffix such as `ms`, `s`, `m` or `h`, chosen such that the numeric value of the column is as small as possible whilst still being at least `1.0`. + * If given, time duration values are rendered as an integer with no suffix. + * Values that are not an exact multiple of the chosen unit are rounded down. */ + time?: TimeUnit } From 3cfeca7162d486ed0232a6484e07f2ea3ceeb5fd Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 6 Oct 2025 17:08:38 +0200 Subject: [PATCH 639/647] Auto-generated API code (#3033) --- docs/reference/api-reference.md | 29 ++++++++- src/api/api/inference.ts | 82 ++++++++++++++++++++++++ src/api/api/security.ts | 11 ++-- src/api/types.ts | 108 +++++++++++++++++++++++++++++++- 4 files changed, 223 insertions(+), 7 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index ed819a820..0b3b01cfc 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -7849,6 +7849,31 @@ These settings are specific to the `cohere` service. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. +## client.inference.putContextualai [_inference.put_contextualai] +Create an Contextual AI inference endpoint. + +Create an inference endpoint to perform an inference task with the `contexualai` service. + +To review the available `rerank` models, refer to . + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai) + +```ts +client.inference.putContextualai({ task_type, contextualai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_contextualai] + +#### Request (object) [_request_inference.put_contextualai] +- **`task_type` (Enum("rerank"))**: The type of the inference task that the model will perform. +- **`contextualai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("contextualai"))**: The type of service supported for the specified task type. In this case, `contextualai`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `contextualai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { instruction, return_documents, top_k })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + ## client.inference.putCustom [_inference.put_custom] Create a custom inference endpoint. @@ -12347,7 +12372,9 @@ client.security.getSettings({ ... }) If no response is received before the timeout expires, the request fails and returns an error. ## client.security.getStats [_security.get_stats] -Get security statistics for all nodes +Get security stats. + +Gather security usage statistics from all node(s) within the cluster. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats) diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index a15aa20e4..8c0d1b7ce 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -223,6 +223,21 @@ export default class Inference { 'timeout' ] }, + 'inference.put_contextualai': { + path: [ + 'task_type', + 'contextualai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, 'inference.put_custom': { path: [ 'task_type', @@ -1376,6 +1391,73 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Create an Contextual AI inference endpoint. Create an inference endpoint to perform an inference task with the `contexualai` service. To review the available `rerank` models, refer to . + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai | Elasticsearch API documentation} + */ + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptions): Promise + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_contextualai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.contextualai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_contextualai', + pathParts: { + task_type: params.task_type, + contextualai_inference_id: params.contextualai_inference_id + }, + acceptedParams: [ + 'task_type', + 'contextualai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create a custom inference endpoint. The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with `${` and end with `}`. The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following: ``` PUT _inference/text_embedding/test-text-embedding { "service": "custom", "service_settings": { "secret_parameters": { "api_key": "" }, "url": "...endpoints.huggingface.cloud/v1/embeddings", "headers": { "Authorization": "Bearer ${api_key}", "Content-Type": "application/json" }, "request": "{\"input\": ${input}}", "response": { "json_parser": { "text_embeddings":"$.data[*].embedding[*]" } } } } ``` To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. > info > Templates should not be surrounded by quotes. Pre-defined templates: * `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. * `${input_type}` refers to the input type translation values. * `${query}` refers to the query field used specifically for reranking tasks. * `${top_n}` refers to the `top_n` field available when performing rerank requests. * `${return_documents}` refers to the `return_documents` field available when performing rerank requests. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom | Elasticsearch API documentation} diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 6675abbd2..ba10fe760 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -2471,13 +2471,13 @@ export default class Security { } /** - * Get security statistics for all nodes + * Get security stats. Gather security usage statistics from all node(s) within the cluster. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats | Elasticsearch API documentation} */ - async getStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptions): Promise + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this[kAcceptedParams]['security.get_stats'] @@ -2500,6 +2500,7 @@ export default class Security { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/types.ts b/src/api/types.ts index 1db7f01e4..f0b85f962 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4781,7 +4781,9 @@ export interface TaskFailure { export type TaskId = string export interface TextEmbedding { - model_id: string + /** Model ID is required for all dense_vector fields but + * may be inferred for semantic_text fields */ + model_id?: string model_text: string } @@ -8641,6 +8643,7 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase export interface MappingSemanticTextIndexOptions { dense_vector?: MappingDenseVectorIndexOptions + sparse_vector?: MappingSparseVectorIndexOptions } export interface MappingSemanticTextProperty { @@ -18864,6 +18867,7 @@ export interface IlmPolicy { } export interface IlmRolloverAction { + /** The `max_size` condition has been deprecated in 9.3.0 and `max_primary_shard_size` should be used instead */ max_size?: ByteSize max_primary_shard_size?: ByteSize max_age?: Duration @@ -21999,6 +22003,7 @@ export interface IndicesRolloverRolloverConditions { max_age_millis?: DurationValue min_docs?: long max_docs?: long + /** The `max_size` condition has been deprecated in 9.3.0 and `max_primary_shard_size` should be used instead */ max_size?: ByteSize max_size_bytes?: long min_size?: ByteSize @@ -23043,6 +23048,38 @@ export interface InferenceContentObject { type: string } +export interface InferenceContextualAIServiceSettings { + /** A valid API key for your Contexutual AI account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Contextual AI documentation for the list of available rerank models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Contextual AI. + * The `contextualai` service sets a default number of requests allowed per minute depending on the task type. + * For `rerank`, it is set to `1000`. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceContextualAIServiceType = 'contextualai' + +export interface InferenceContextualAITaskSettings { + /** Instructions for the reranking model. Refer to + * Only for the `rerank` task type. */ + instruction?: string + /** Whether to return the source documents in the response. + * Only for the `rerank` task type. */ + return_documents?: boolean + /** The number of most relevant documents to return. + * If not specified, the reranking results of all documents will be returned. + * Only for the `rerank` task type. */ + top_k?: integer +} + export interface InferenceCustomRequestParams { /** The body structure of the request. It requires passing in the string-escaped result of the JSON format HTTP request body. * For example: @@ -23553,6 +23590,13 @@ export interface InferenceInferenceEndpointInfoCohere extends InferenceInference task_type: InferenceTaskTypeCohere } +export interface InferenceInferenceEndpointInfoContextualAi extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeContextualAI +} + export interface InferenceInferenceEndpointInfoCustom extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -23851,6 +23895,7 @@ export interface InferenceRateLimitSetting { * * `azureopenai` service and task type `text_embedding`: `1440` * * `azureopenai` service and task type `completion`: `120` * * `cohere` service: `10000` + * * `contextualai` service: `1000` * * `elastic` service and task type `chat_completion`: `240` * * `googleaistudio` service: `360` * * `googlevertexai` service: `30000` @@ -23959,6 +24004,8 @@ export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' +export type InferenceTaskTypeContextualAI = 'rerank' + export type InferenceTaskTypeCustom = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion' @@ -24399,6 +24446,30 @@ export interface InferencePutCohereRequest extends RequestBase { export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere +export interface InferencePutContextualaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceTaskTypeContextualAI + /** The unique identifier of the inference endpoint. */ + contextualai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `contextualai`. */ + service: InferenceContextualAIServiceType + /** Settings used to install the inference model. These settings are specific to the `contextualai` service. */ + service_settings: InferenceContextualAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceContextualAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutContextualaiResponse = InferenceInferenceEndpointInfoContextualAi + export interface InferencePutCustomRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceCustomTaskType @@ -33187,6 +33258,11 @@ export interface SecurityManageUserPrivileges { applications: string[] } +export interface SecurityNodeSecurityStats { + /** Role statistics. */ + roles: SecurityRolesStats +} + export interface SecurityRealmInfo { name: Name type: string @@ -33352,6 +33428,11 @@ export interface SecurityRoleTemplateScript { options?: Record } +export interface SecurityRolesStats { + /** Document-level security (DLS) statistics. */ + dls: XpackUsageSecurityRolesDls +} + export interface SecuritySearchAccess { /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity @@ -34194,6 +34275,18 @@ export interface SecurityGetSettingsResponse { 'security-tokens': SecuritySecuritySettings } +export interface SecurityGetStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface SecurityGetStatsResponse { + /** A map of node IDs to security statistics for that node. */ + nodes: Record +} + export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { @@ -39685,9 +39778,22 @@ export interface XpackUsageSecurityRolesDls { } export interface XpackUsageSecurityRolesDlsBitSetCache { + /** Number of entries in the cache. */ count: integer + /** Human-readable amount of memory taken up by the cache. */ memory?: ByteSize + /** Memory taken up by the cache in bytes. */ memory_in_bytes: ulong + /** Total number of cache hits. */ + hits: long + /** Total number of cache misses. */ + misses: long + /** Total number of cache evictions. */ + evictions: long + /** Total combined time spent in cache for hits in milliseconds. */ + hits_time_in_millis: DurationValue + /** Total combined time spent in cache for misses in milliseconds. */ + misses_time_in_millis: DurationValue } export interface XpackUsageSecurityRolesFile { From 64439384cce88fb6ea17f660b0cf6ee1eb9d1407 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 8 Oct 2025 14:18:04 -0500 Subject: [PATCH 640/647] Add docs for updated Undici Agent functionality in Transport (#2957) --- docs/reference/advanced-config.md | 10 +++-- docs/reference/basic-config.md | 19 +++++++-- docs/reference/client-helpers.md | 4 +- docs/reference/connecting.md | 54 ++++++++++++++++++++++-- docs/reference/getting-started.md | 10 ++--- docs/reference/observability.md | 6 +-- docs/reference/timeout-best-practices.md | 2 +- docs/reference/typescript.md | 2 +- src/client.ts | 6 +-- 9 files changed, 88 insertions(+), 25 deletions(-) diff --git a/docs/reference/advanced-config.md b/docs/reference/advanced-config.md index 8145af678..435a09c1d 100644 --- a/docs/reference/advanced-config.md +++ b/docs/reference/advanced-config.md @@ -19,7 +19,7 @@ For information about the `Transport` class, refer to [Transport](/reference/tra ## `ConnectionPool` [_connectionpool] -This class is responsible for keeping in memory all the {{es}} Connection that you are using. There is a single Connection for every node. The connection pool handles the resurrection strategies and the updates of the pool. +This class is responsible for keeping in memory all the {{es}} connections that you are using. There is a single `Connection` for every node. The connection pool handles the resurrection strategies and the updates of the pool. ```js const { Client, ConnectionPool } = require('@elastic/elasticsearch') @@ -41,7 +41,7 @@ const client = new Client({ ## `Connection` [_connection] -This class represents a single node, it holds every information we have on the node, such as roles, id, URL, custom headers and so on. The actual HTTP request is performed here, this means that if you want to swap the default HTTP client (Node.js core), you should override the `request` method of this class. +This class represents a single node, it holds every information we have on the node, such as roles, id, URL, custom headers and so on. The actual HTTP request is performed here, this means that if you want to swap the default HTTP client ([Undici `Pool`](https://undici.nodejs.org/#/docs/api/Pool.md)), you should override the `request` method of this class. ```js const { Client, BaseConnection } = require('@elastic/elasticsearch') @@ -59,6 +59,10 @@ const client = new Client({ }) ``` +`@elastic/transport` provides two `Connection` implementations: + +- `UndiciConnection`: manages HTTP connections using [Undici](https://undici.nodejs.org/), Node.js's high-performance HTTP client implementation; this is the default value of `Connection` and is recommended unless you have a use case that is not yet supported by Undici or `UndiciConnection` +- `HttpConnection`: manages HTTP connections using [the `http` package](https://nodejs.org/api/http.html) from Node.js's standard library ## `Serializer` [_serializer] @@ -175,5 +179,5 @@ try { ## Migrate to v8 [_migrate_to_v8] -The Node.js client can be configured to emit an HTTP header `Accept: application/vnd.elasticsearch+json; compatible-with=7` which signals to Elasticsearch that the client is requesting `7.x` version of request and response bodies. This allows for upgrading from 7.x to 8.x version of Elasticsearch without upgrading everything at once. Elasticsearch should be upgraded first after the compatibility header is configured and clients should be upgraded second. To enable to setting, configure the environment variable `ELASTIC_CLIENT_APIVERSIONING` to `true`. +The Node.js client can be configured to emit an HTTP header `Accept: application/vnd.elasticsearch+json; compatible-with=7` which signals to {{es}} that the client is requesting `7.x` version of request and response bodies. This allows for upgrading from 7.x to 8.x version of {{es}} without upgrading everything at once. {{es}} should be upgraded first after the compatibility header is configured and clients should be upgraded second. To enable to setting, configure the environment variable `ELASTIC_CLIENT_APIVERSIONING` to `true`. diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md index 6c7d75505..db4c5499c 100644 --- a/docs/reference/basic-config.md +++ b/docs/reference/basic-config.md @@ -184,12 +184,21 @@ const client = new Client({ }) ``` -### `agent` +### `agent` [agent-config] -Type: `http.AgentOptions, function`
    +Type: `http.AgentOptions, undici.PoolOptions, function, false`
    Default: `null` -http agent [options](https://nodejs.org/api/http.html#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`. +If using the default `UndiciConnection` from `@elastic/transport`, this value can be: + +- an [Undici `PoolOptions` object](https://undici.nodejs.org/#/docs/api/Pool?id=parameter-pooloptions) +- a function that receives all connection-related options and returns an [Undici `Agent`](https://undici.nodejs.org/#/docs/api/Agent.md) instance (or any other object that follows [Undici's `Dispatch.request()`](https://undici.nodejs.org/#/docs/api/Dispatcher?id=dispatcherrequestoptions-callback) conventions) + +If using the legacy `HttpConnection` from `@elastic/transport`, this value can be: + +- [the options object passed to an `http.Agent`](https://nodejs.org/api/http.html#new-agentoptions) +- a function that returns an `http.Agent` (and thus also an [`https.Agent`](https://nodejs.org/api/https.html#class-httpsagent), or any implementation that follows the same conventions, like [`hpagent`](https://www.npmjs.com/package/hpagent)) +- `false` to disable all agent usage, including the `keep-alive` feature ```js const client = new Client({ @@ -211,6 +220,10 @@ const client = new Client({ }) ``` +::::{warning} +If you have set [the `agent` option](/reference/basic-config.md#agent-config) on your client instance to a function and are using `UndiciConnection`—the default [`Connection`](/reference/advanced-config.md#_connection) value starting in 8.0—all `caFingerprint` and `tls` options will be ignored. It is your responsibility to ensure that your custom agent will properly verify HTTPS connections. +:::: + ### `nodeFilter` Type: `function` diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md index 076a9200b..27b251242 100644 --- a/docs/reference/client-helpers.md +++ b/docs/reference/client-helpers.md @@ -93,7 +93,7 @@ To create a new instance of the Bulk helper, access it as shown in the example a ``` `onSuccess` -: A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations. +: A function that is called for each successful operation in the bulk request, which includes the result from {{es}} along with the original document that was sent, or `null` for delete operations. ```js const b = client.helpers.bulk({ @@ -307,7 +307,7 @@ console.log(result) Added in `v8.8.2` -If you need to modify documents in your datasource before it is sent to Elasticsearch, you can return an array in the `onDocument` function rather than an operation object. The first item in the array must be the operation object, and the second item must be the document or partial document object as you’d like it to be sent to Elasticsearch. +If you need to modify documents in your datasource before it is sent to {{es}}, you can return an array in the `onDocument` function rather than an operation object. The first item in the array must be the operation object, and the second item must be the document or partial document object as you’d like it to be sent to {{es}}. ```js const { Client } = require('@elastic/elasticsearch') diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md index 82a121584..966fe687c 100644 --- a/docs/reference/connecting.md +++ b/docs/reference/connecting.md @@ -77,6 +77,10 @@ When you start {{es}} for the first time you’ll see a distinct block like the Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint. +::::{warning} +If you have set [the `agent` option](/reference/basic-config.md#agent-config) on your client instance to a function and are using `UndiciConnection`—the default `Connection` value starting in 8.0—all `caFingerprint` and `tls` options will be ignored. It is your responsibility to ensure that your custom agent will properly verify HTTPS connections. +:::: + ### TLS configuration [auth-tls] The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). @@ -332,7 +336,7 @@ The supported request specific options are: | Option | Description | | --- | ----------- | | `ignore` | `number[]` -  HTTP status codes which should not be considered errors for this request.
    *Default:* `null` | -| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
    _Default:_ No timeout | +| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [{{es}} best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
    _Default:_ No timeout |connecting | `retryOnTimeout` | `boolean` - Retry requests that have timed out.*Default:* `false` | | `maxRetries` | `number` - Max number of retries for the request, it overrides the client default.
    *Default:* `3` | | `compression` | `string` or `boolean` - Enables body compression for the request.
    *Options:* `false`, `'gzip'`
    *Default:* `false` | @@ -477,9 +481,9 @@ You can find the errors exported by the client in the table below. ## Keep-alive connections [keep-alive] -By default, the client uses persistent, keep-alive connections to reduce the overhead of creating a new HTTP connection for each Elasticsearch request. If you are using the default `UndiciConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 10 minutes. If you are using the legacy `HttpConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 1 minute. +By default, the client uses persistent, keep-alive connections to reduce the overhead of creating a new HTTP connection for each {{es}} request. If you are using the default `UndiciConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 10 minutes. If you are using the legacy `HttpConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 1 minute. -If you need to disable keep-alive connections, you can override the HTTP agent with your preferred [HTTP agent options](https://nodejs.org/api/http.md#http_new_agent_options): +If you need to disable keep-alive connections, you can override the HTTP agent with your preferred [HTTP agent options](/reference/basic-config.md#agent-config): ```js const client = new Client({ @@ -500,6 +504,48 @@ const client = new Client({ }) ``` +## Managing open connection limits [limit-open-connections] + +Starting in client 9.0, when using `@elastic/transport` 9.2.0 or later, you can provide a custom `agent` function to share a singleton [Undici `Agent`](https://undici.nodejs.org/#/docs/api/Agent.md) instance that can enforce client-wide connection limits. + +```typescript +import { Agent } from 'undici' +import { HttpConnection } from '@elastic/transport' + +// `maxOrigins * connections` (50 in this case) is the total connection limit +const maxSocketAgent = new Agent({ + keepAliveTimeout: 1000, + maxOrigins: 5, + connections: 10 +}) + +const client = new Client({ + node: '...', + auth: { ... }, + agent: () => maxSocketAgent +}) +``` + +If using the legacy `HttpConnection`, you can use an [`Agent`](https://nodejs.org/api/https.html#class-httpsagent) singleton that enforces `maxTotalSockets`: + +```typescript +import { Agent } from 'node:http' +import { HttpConnection } from '@elastic/transport' + +const maxSocketAgent = new Agent({ + keepAlive: true, + keepAliveMsecs: 1000, + maxTotalSockets: 50 +}) + +const client = new Client({ + node: '...', + auth: { ... }, + Connection: HttpConnection, + agent: () => maxSocketAgent +}) +``` + ## Closing a client’s connections [close-connections] If you would like to close all open connections being managed by an instance of the client, use the `close()` function: @@ -513,4 +559,4 @@ client.close(); ## Automatic product check [product-check] -Since v7.14.0, the client performs a required product check before the first call. This pre-flight product check allows the client to establish the version of Elasticsearch that it is communicating with. The product check requires one additional HTTP request to be sent to the server as part of the request pipeline before the main API call is sent. In most cases, this will succeed during the very first API call that the client sends. Once the product check completes, no further product check HTTP requests are sent for subsequent API calls. +Since v7.14.0, the client performs a required product check before the first call. This pre-flight product check allows the client to establish the version of {{es}} that it is communicating with. The product check requires one additional HTTP request to be sent to the server as part of the request pipeline before the main API call is sent. In most cases, this will succeed during the very first API call that the client sends. Once the product check completes, no further product check HTTP requests are sent for subsequent API calls. diff --git a/docs/reference/getting-started.md b/docs/reference/getting-started.md index 61f2dabfb..b0de914b8 100644 --- a/docs/reference/getting-started.md +++ b/docs/reference/getting-started.md @@ -6,7 +6,7 @@ mapped_pages: # Getting started [getting-started-js] -This page guides you through the installation process of the Node.js client, shows you how to instantiate the client, and how to perform basic Elasticsearch operations with it. +This page guides you through the installation process of the Node.js client, shows you how to instantiate the client, and how to perform basic {{es}} operations with it. ### Requirements [_requirements] @@ -28,7 +28,7 @@ Refer to the [*Installation*](/reference/installation.md) page to learn more. ### Connecting [_connecting] -You can connect to the Elastic Cloud using an API key and the Elasticsearch endpoint. +You can connect to the Elastic Cloud using an API key and the {{es}} endpoint. ```js const { Client } = require('@elastic/elasticsearch') @@ -43,9 +43,9 @@ const client = new Client({ }) ``` -Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment: +Your {{es}} endpoint can be found on the **My deployment** page of your deployment: -![Finding Elasticsearch endpoint](images/es-endpoint.jpg) +![Finding {{es}} endpoint](images/es-endpoint.jpg) You can generate an API key on the **Management** page under Security. @@ -56,7 +56,7 @@ For other connection options, refer to the [*Connecting*](/reference/connecting. ### Operations [_operations] -Time to use Elasticsearch! This section walks you through the basic, and most important, operations of Elasticsearch. +Time to use {{es}}! This section walks you through the basic, and most important, operations of {{es}}. #### Creating an index [_creating_an_index] diff --git a/docs/reference/observability.md b/docs/reference/observability.md index b307c5571..6ef2999ae 100644 --- a/docs/reference/observability.md +++ b/docs/reference/observability.md @@ -5,7 +5,7 @@ mapped_pages: # Observability [observability] -To observe and measure Elasticsearch client usage, several client features are provided. +To observe and measure {{es}} client usage, several client features are provided. First, as of 8.15.0, the client provides native support for OpenTelemetry, which allows you to send client usage data to any endpoint that supports OpenTelemetry without having to make any changes to your JavaScript codebase. @@ -17,9 +17,9 @@ All of these observability features are documented below. ## OpenTelemetry [_opentelemetry] -The client supports OpenTelemetry’s [zero-code instrumentation](https://opentelemetry.io/docs/zero-code/js/) to enable tracking each client request as an [OpenTelemetry span](https://opentelemetry.io/docs/concepts/signals/traces/#spans). These spans follow all of the [semantic OpenTelemetry conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) except for `db.query.text`. +The client supports OpenTelemetry’s [zero-code instrumentation](https://opentelemetry.io/docs/zero-code/js/) to enable tracking each client request as an [OpenTelemetry span](https://opentelemetry.io/docs/concepts/signals/traces/#spans). These spans follow all of the [semantic OpenTelemetry conventions for {{es}}](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) except for `db.query.text`. -To start sending Elasticsearch trace data to your OpenTelemetry endpoint, follow [OpenTelemetry’s zero-code instrumentation guide](https://opentelemetry.io/docs/zero-code/js/), or the following steps: +To start sending {{es}} trace data to your OpenTelemetry endpoint, follow [OpenTelemetry’s zero-code instrumentation guide](https://opentelemetry.io/docs/zero-code/js/), or the following steps: 1. Install `@opentelemetry/api` and `@opentelemetry/auto-instrumentations-node` as Node.js dependencies 2. Export the following environment variables with the appropriate values: diff --git a/docs/reference/timeout-best-practices.md b/docs/reference/timeout-best-practices.md index 8bb66f961..9938287c4 100644 --- a/docs/reference/timeout-best-practices.md +++ b/docs/reference/timeout-best-practices.md @@ -9,5 +9,5 @@ Starting in 9.0.0, this client is configured to not time out any HTTP request by Prior to 9.0, this client was configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {{es}}, raising a `TimeoutError` when that time period elapsed without receiving a response. -If you need to set timeouts on Elasticsearch requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0. +If you need to set timeouts on {{es}} requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0. diff --git a/docs/reference/typescript.md b/docs/reference/typescript.md index 880fc3e3b..d7cff4ed6 100644 --- a/docs/reference/typescript.md +++ b/docs/reference/typescript.md @@ -5,7 +5,7 @@ mapped_pages: # TypeScript support [typescript] -The client offers a first-class support for TypeScript, shipping a complete set of type definitions of Elasticsearch’s API surface. +The client offers a first-class support for TypeScript, shipping a complete set of type definitions of {{es}}'s API surface. The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g. EQL), and others may contain some errors, but we are continuously pushing fixes & improvements. Contribute type fixes and improvements to [elasticsearch-specification github repository](https://github.com/elastic/elasticsearch-specification). diff --git a/src/client.ts b/src/client.ts index 1166d21da..d8739e87d 100644 --- a/src/client.ts +++ b/src/client.ts @@ -164,7 +164,7 @@ export interface ClientOptions { * @defaultValue true */ enableMetaHeader?: boolean /** @property cloud Custom configuration for connecting to Elastic Cloud, in lieu of a `node` or `nodes` configuration - * @remarks Read https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage for more details + * @remarks Read https://www.elastic.co/docs/reference/elasticsearch/clients/javascript/connecting#client-usage for more details * @defaultValue null */ cloud?: { id: string @@ -182,7 +182,7 @@ export interface ClientOptions { * @defaultValue null */ maxCompressedResponseSize?: number /** @property redaction Options for how to redact potentially sensitive data from metadata attached to `Error` objects - * @remarks Read https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/advanced-config.html#redaction for more details + * @remarks Read https://www.elastic.co/docs/reference/elasticsearch/clients/javascript/advanced-config#redaction for more details * @defaultValue Configuration that will replace known sources of sensitive data */ redaction?: RedactionOptions /** @property serverMode Setting to "serverless" will change some default behavior, like enabling compression and disabling features that assume the possibility of multiple Elasticsearch nodes. @@ -443,7 +443,7 @@ export default class Client extends API { /** * Creates a child client instance that shared its connection pool with the parent client - * @see {@link https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html} + * @see {@link https://www.elastic.co/docs/reference/elasticsearch/clients/javascript/child} */ child (opts: ClientOptions): Client { // Merge the new options with the initial ones From 91a3653f1fc27bc26c314a9eb0c8223757453f66 Mon Sep 17 00:00:00 2001 From: Fabrizio Ferri-Benedetti Date: Mon, 20 Oct 2025 18:24:01 +0200 Subject: [PATCH 641/647] Add EDOT mention to o11y docs (#3041) Co-authored-by: Marci W <333176+marciw@users.noreply.github.com> --- docs/docset.yml | 1 + docs/reference/observability.md | 38 ++++++++------------------------- 2 files changed, 10 insertions(+), 29 deletions(-) diff --git a/docs/docset.yml b/docs/docset.yml index 6034dc549..28407d2fa 100644 --- a/docs/docset.yml +++ b/docs/docset.yml @@ -6,6 +6,7 @@ exclude: cross_links: - docs-content - elasticsearch + - elastic-otel-node toc: - toc: reference - toc: release-notes diff --git a/docs/reference/observability.md b/docs/reference/observability.md index 6ef2999ae..8c3fe211f 100644 --- a/docs/reference/observability.md +++ b/docs/reference/observability.md @@ -5,41 +5,21 @@ mapped_pages: # Observability [observability] -To observe and measure {{es}} client usage, several client features are provided. +Several client features help you observe and measure {{es}} client usage. As of version 8.15.0, the JavaScript client provides native support for OpenTelemetry. You can send client usage data to OpenTelemetry endpoints without making changes to your JavaScript codebase. -First, as of 8.15.0, the client provides native support for OpenTelemetry, which allows you to send client usage data to any endpoint that supports OpenTelemetry without having to make any changes to your JavaScript codebase. - -Also, rather than providing a default logger, the client offers an event emitter interface to hook into internal events, such as `request` and `response`, allowing you to log the events you care about, or otherwise react to client usage however you might need. - -Correlating events can be hard, especially if your applications have a large codebase with many events happening at the same time. To help you with this, the client provides a correlation ID system, and other features. - -All of these observability features are documented below. +Rather than providing a default logger, the client offers an event emitter interface to hook into internal events like `request` and `response`. This allows you to log significant events or otherwise react to client usage. Because correlating events can be complex, the client provides a correlation ID system and other features. ## OpenTelemetry [_opentelemetry] The client supports OpenTelemetry’s [zero-code instrumentation](https://opentelemetry.io/docs/zero-code/js/) to enable tracking each client request as an [OpenTelemetry span](https://opentelemetry.io/docs/concepts/signals/traces/#spans). These spans follow all of the [semantic OpenTelemetry conventions for {{es}}](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) except for `db.query.text`. -To start sending {{es}} trace data to your OpenTelemetry endpoint, follow [OpenTelemetry’s zero-code instrumentation guide](https://opentelemetry.io/docs/zero-code/js/), or the following steps: - -1. Install `@opentelemetry/api` and `@opentelemetry/auto-instrumentations-node` as Node.js dependencies -2. Export the following environment variables with the appropriate values: - - * `OTEL_EXPORTER_OTLP_ENDPOINT` - * `OTEL_EXPORTER_OTLP_HEADERS` - * `OTEL_RESOURCE_ATTRIBUTES` - * `OTEL_SERVICE_NAME` - -3. `require` the Node.js auto-instrumentation library at startup: - -``` -node --require '@opentelemetry/auto-instrumentations-node/register' index.js -``` +To start sending {{es}} trace data to your OpenTelemetry endpoint, instrument the client using the [Elastic Distribution of OpenTelemetry (EDOT) JavaScript](elastic-otel-node://reference/edot-node/index.md), or follow [OpenTelemetry’s zero-code instrumentation guide](https://opentelemetry.io/docs/zero-code/js/). -### Disabling OpenTelemetry collection [disable-otel] +### Turn off OpenTelemetry collection [disable-otel] -As of `@elastic/transport` version 9.1.0—or 8.10.0 when using `@elastic/elasticsearch` 8.x—OpenTelemetry tracing can be disabled in multiple ways. +As of `@elastic/transport` version 9.1.0—or 8.10.0 when using `@elastic/elasticsearch` 8.x—you can turn off OpenTelemetry tracing in several ways. -To entirely disable OpenTelemetry collection, you can provide a custom `Transport` at client instantiation time that sets `openTelemetry.enabled` to `false`: +To entirely turn off OpenTelemetry collection, you can provide a custom `Transport` at client instantiation time that sets `openTelemetry.enabled` to `false`: ```typescript import { Transport } from '@elastic/transport' @@ -58,11 +38,11 @@ const client = new Client({ }) ``` -Alternatively, you can also export an environment variable `OTEL_ELASTICSEARCH_ENABLED=false` to achieve the same effect. +Alternatively, you can export the environment variable `OTEL_ELASTICSEARCH_ENABLED=false`. -If you would not like OpenTelemetry to be disabled entirely, but would like the client to suppress tracing, you can use the option `openTelemetry.suppressInternalInstrumentation = true` instead. +To suppress tracing without turning off all OpenTelemetry collection, use the option `openTelemetry.suppressInternalInstrumentation = true` instead. -If you would like to keep either option enabled by default, but want to disable them for a single API call, you can pass `Transport` options as a second argument to any API function call: +If you would like to keep either option enabled by default, but want to turn them off for a single API call, pass `Transport` options as a second argument to any API function call: ```typescript const response = await client.search({ ... }, { From 211fa2af54c21a4e42556b2cda68541c44b100ec Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 20 Oct 2025 20:01:44 +0200 Subject: [PATCH 642/647] Auto-generated API code (#3039) Co-authored-by: Josh Mock --- docs/reference/api-reference.md | 130 ++++++++++++------- src/api/api/indices.ts | 165 ++++++++++++++++++++++++ src/api/api/inference.ts | 10 +- src/api/api/project.ts | 3 +- src/api/api/reindex.ts | 2 - src/api/api/security.ts | 12 +- src/api/types.ts | 215 ++++++++++++++++++++++++++------ 7 files changed, 443 insertions(+), 94 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 0b3b01cfc..7ca3ef3ad 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -209,7 +209,7 @@ client.count({ ... }) - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. @@ -316,7 +316,7 @@ client.create({ id, index }) - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. - **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. ## client.delete [_delete] @@ -377,7 +377,7 @@ client.delete({ id, index }) - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. - **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. - **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. ## client.deleteByQuery [_delete_by_query] @@ -480,7 +480,7 @@ client.deleteByQuery({ index }) - **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - **`conflicts` (Optional, Enum("abort" \| "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. -- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`from` (Optional, number)**: Skips the specified number of documents. @@ -580,7 +580,7 @@ client.exists({ id, index }) - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. - **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. ## client.existsSource [_exists_source] Check for a document source. @@ -613,7 +613,7 @@ client.existsSource({ id, index }) - **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. ## client.explain [_explain] Explain a document match result. @@ -634,7 +634,7 @@ client.explain({ id, index }) - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. @@ -758,7 +758,7 @@ client.get({ id, index }) - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. ## client.getScript [_get_script] Get a script or search template. @@ -833,7 +833,7 @@ client.getSource({ id, index }) - **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. ## client.healthReport [_health_report] Get the cluster health. @@ -1011,7 +1011,7 @@ client.index({ index }) - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. - **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). @@ -1190,7 +1190,7 @@ client.mtermvectors({ ... }) - **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. - **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. ## client.openPointInTime [_open_point_in_time] Open a point in time. @@ -1366,7 +1366,6 @@ client.reindex({ dest, source }) - **`conflicts` (Optional, Enum("abort" \| "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. - **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. - **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. -- **`size` (Optional, number)** - **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. - **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. - **`scroll` (Optional, string \| -1 \| 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. @@ -1506,7 +1505,7 @@ client.search({ ... }) #### Request (object) [_request_search] - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -- **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. +- **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. - **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. - **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. - **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. @@ -1546,7 +1545,7 @@ client.search({ ... }) - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. - **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. -- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for the query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. - **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. @@ -1678,7 +1677,7 @@ client.searchMvt({ index, field, zoom, x, y }) - **`zoom` (number)**: Zoom level for the vector tile to search - **`x` (number)**: X coordinate for the vector tile to search - **`y` (number)**: Y coordinate for the vector tile to search -- **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +- **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. - **`buffer` (Optional, number)**: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. - **`exact_bounds` (Optional, boolean)**: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. - **`extent` (Optional, number)**: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. @@ -1845,7 +1844,7 @@ client.termvectors({ index }) - **`term_statistics` (Optional, boolean)**: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. - **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. -- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. @@ -2015,7 +2014,7 @@ client.updateByQuery({ index }) - **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. -- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - **`from` (Optional, number)**: Skips the specified number of documents. @@ -2146,7 +2145,7 @@ client.asyncSearch.submit({ ... }) #### Request (object) [_request_async_search.submit] - **`index` (Optional, string \| string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices -- **`aggregations` (Optional, Record)** +- **`aggregations` (Optional, Record)** - **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. - **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. @@ -2511,7 +2510,8 @@ client.cat.mlDataFrameAnalytics({ ... }) #### Request (object) [_request_cat.ml_data_frame_analytics] - **`id` (Optional, string)**: The ID of the data frame analytics to fetch -- **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) +- **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. +(This includes `_all` string or when no configs have been specified.) - **`h` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names to display. - **`s` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names or column aliases used to sort the response. @@ -2653,7 +2653,7 @@ client.cat.nodes({ ... }) #### Request (object) [_request_cat.nodes] - **`full_id` (Optional, boolean)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. -- **`h` (Optional, Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +- **`h` (Optional, Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "available_processors" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "available_processors" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` @@ -3762,7 +3762,7 @@ client.connector.delete({ connector_id }) #### Request (object) [_request_connector.delete] - **`connector_id` (string)**: The unique identifier of the connector to be deleted -- **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. Defaults to false. +- **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. - **`hard` (Optional, boolean)**: A flag indicating if the connector should be hard deleted. ## client.connector.get [_connector.get] @@ -3796,7 +3796,7 @@ client.connector.list({ ... }) ### Arguments [_arguments_connector.list] #### Request (object) [_request_connector.list] -- **`from` (Optional, number)**: Starting offset (default: 0) +- **`from` (Optional, number)**: Starting offset - **`size` (Optional, number)**: Specifies a max number of results to get - **`index_name` (Optional, string \| string[])**: A list of connector index names to fetch connector documents for - **`connector_name` (Optional, string \| string[])**: A list of connector names to fetch connector documents for @@ -3971,7 +3971,7 @@ client.connector.syncJobList({ ... }) ### Arguments [_arguments_connector.sync_job_list] #### Request (object) [_request_connector.sync_job_list] -- **`from` (Optional, number)**: Starting offset (default: 0) +- **`from` (Optional, number)**: Starting offset - **`size` (Optional, number)**: Specifies a max number of results to get - **`status` (Optional, Enum("canceling" \| "canceled" \| "completed" \| "error" \| "in_progress" \| "pending" \| "suspended"))**: A sync job status to fetch connector sync jobs for - **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for @@ -4830,7 +4830,7 @@ client.fleet.search({ index }) #### Request (object) [_request_fleet.search] - **`index` (string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. -- **`aggregations` (Optional, Record)** +- **`aggregations` (Optional, Record)** - **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. - **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. @@ -6219,6 +6219,26 @@ client.indices.getMigrateReindexStatus({ index }) #### Request (object) [_request_indices.get_migrate_reindex_status] - **`index` (string \| string[])**: The index or data stream name. +## client.indices.getSample [_indices.get_sample] +Get random sample of ingested data + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample) + +```ts +client.indices.getSample() +``` + + +## client.indices.getSampleStats [_indices.get_sample_stats] +Get stats about a random sample of ingested data + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample) + +```ts +client.indices.getSampleStats() +``` + + ## client.indices.getSettings [_indices.get_settings] Get index settings. Get setting information for one or more indices. @@ -6689,6 +6709,16 @@ If no response is received before the timeout expires, the request fails and ret If no response is received before the timeout expires, the request fails and returns an error. - **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. +## client.indices.putSampleConfiguration [_indices.put_sample_configuration] +Configure sampling for an index or data stream + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-sample-configuration) + +```ts +client.indices.putSampleConfiguration() +``` + + ## client.indices.putSettings [_indices.put_settings] Update index settings. Changes dynamic index settings in real time. @@ -7437,7 +7467,7 @@ such as `open,hidden`. - **`groups` (Optional, string \| string[])**: List of search groups to include in the search statistics. - **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. -- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, indices, or shards level. ## client.indices.updateAliases [_indices.update_aliases] Create or update an alias. @@ -7481,7 +7511,7 @@ This behavior applies even if the request targets other open indices. - **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. - **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. -- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. - **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. @@ -7494,7 +7524,7 @@ Supports a list of values, such as `open,hidden`. - **`q` (Optional, string)**: Query in the Lucene query string syntax. ## client.inference.chatCompletionUnified [_inference.chat_completion_unified] -Perform chat completion inference +Perform chat completion inference on the service The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. @@ -7519,6 +7549,12 @@ client.inference.chatCompletionUnified({ inference_id }) ## client.inference.completion [_inference.completion] Perform completion inference on the service +Get responses for completion tasks. +This API works only with the completion task type. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) @@ -8286,11 +8322,9 @@ client.inference.rerank({ inference_id, query, input }) #### Request (object) [_request_inference.rerank] - **`inference_id` (string)**: The unique identifier for the inference endpoint. - **`query` (string)**: Query input. -- **`input` (string \| string[])**: The text on which you want to perform the inference task. -It can be a single string or an array. - -> info -> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`input` (string[])**: The documents to rank. +- **`return_documents` (Optional, boolean)**: Include the document text in the response. +- **`top_n` (Optional, number)**: Limit the response to the top N documents. - **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. - **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. @@ -8314,7 +8348,7 @@ Either a string or an array of strings. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.streamCompletion [_inference.stream_completion] -Perform streaming inference. +Perform streaming completion inference on the service Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. @@ -10103,7 +10137,7 @@ client.ml.putDatafeed({ datafeed_id }) - **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. - **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. @@ -10617,7 +10651,7 @@ client.ml.updateDatafeed({ datafeed_id }) - **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. - **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of @@ -10940,7 +10974,7 @@ client.nodes.stats({ ... }) - **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. - **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. - **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). -- **`level` (Optional, Enum("node" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`level` (Optional, Enum("node" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the node, indices, or shards level. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`types` (Optional, string[])**: A list of document types for the indexing index metric. - **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. @@ -10963,13 +10997,6 @@ A list of the following options: `_all`, `rest_actions`. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -## client.project.tags [_project.tags] -Return tags defined for the project -```ts -client.project.tags() -``` - - ## client.queryRules.deleteRule [_query_rules.delete_rule] Delete a query rule. Delete a query rule within a query ruleset. @@ -11292,7 +11319,7 @@ This parameter has the following rules: * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. -- **`aggregations` (Optional, Record)**: Specifies aggregations. +- **`aggregations` (Optional, Record)**: Specifies aggregations. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. - **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. - **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response @@ -11923,6 +11950,9 @@ By default, API keys never expire. - **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`certificate_identity` (Optional, string)**: The certificate identity to associate with this API key. +This field is used to restrict the API key to connections authenticated by a specific TLS certificate. +The value should match the certificate's distinguished name (DN) pattern. ## client.security.createServiceToken [_security.create_service_token] Create a service account token. @@ -13292,6 +13322,12 @@ By default, API keys never expire. This property can be omitted to leave the val It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. +- **`certificate_identity` (Optional, string)**: The certificate identity to associate with this API key. +This field is used to restrict the API key to connections authenticated by a specific TLS certificate. +The value should match the certificate's distinguished name (DN) pattern. +When specified, this fully replaces any previously assigned certificate identity. +To clear an existing certificate identity, explicitly set this field to `null`. +When omitted, the existing certificate identity remains unchanged. ## client.security.updateSettings [_security.update_settings] Update security index settings. @@ -15140,7 +15176,7 @@ indexing. The minimum value is 1s and the maximum is 1h. These objects define the group by fields and the aggregation to reduce the data. - **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. -- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, use_point_in_time, unattended })**: Defines optional transform settings. - **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. @@ -15196,7 +15232,7 @@ The minimum value is `1s` and the maximum is `1h`. and the aggregation to reduce the data. - **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. -- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, use_point_in_time, unattended })**: Defines optional transform settings. - **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source @@ -15367,7 +15403,7 @@ the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. - **`_meta` (Optional, Record)**: Defines optional transform metadata. - **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. -- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, use_point_in_time, unattended })**: Defines optional transform settings. - **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - **`retention_policy` (Optional, { time } \| null)**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 849d9b4a1..dba682038 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -493,6 +493,20 @@ export default class Indices { body: [], query: [] }, + 'indices.get_sample': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_sample_stats': { + path: [ + 'index' + ], + body: [], + query: [] + }, 'indices.get_settings': { path: [ 'index', @@ -686,6 +700,13 @@ export default class Indices { 'write_index_only' ] }, + 'indices.put_sample_configuration': { + path: [ + 'index' + ], + body: [], + query: [] + }, 'indices.put_settings': { path: [ 'index' @@ -3124,6 +3145,102 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Get random sample of ingested data + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample | Elasticsearch API documentation} + */ + async getSample (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSample (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSample (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSample (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_sample'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_sample` + const meta: TransportRequestMetadata = { + name: 'indices.get_sample', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get stats about a random sample of ingested data + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample | Elasticsearch API documentation} + */ + async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_sample_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/stats` + const meta: TransportRequestMetadata = { + name: 'indices.get_sample_stats', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings | Elasticsearch API documentation} @@ -3975,6 +4092,54 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Configure sampling for an index or data stream + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-sample-configuration | Elasticsearch API documentation} + */ + async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.put_sample_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/config` + const meta: TransportRequestMetadata = { + name: 'indices.put_sample_configuration', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. For performance optimization during bulk indexing, you can disable the refresh interval. Refer to [disable refresh interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) for an example. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. Refer to [updating analyzers on existing indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) for step-by-step examples. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings | Elasticsearch API documentation} diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 8c0d1b7ce..d887535f6 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -431,6 +431,8 @@ export default class Inference { body: [ 'query', 'input', + 'return_documents', + 'top_n', 'task_settings' ], query: [ @@ -488,7 +490,7 @@ export default class Inference { } /** - * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. + * Perform chat completion inference on the service The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -540,7 +542,7 @@ export default class Inference { } /** - * Perform completion inference on the service + * Perform completion inference on the service Get responses for completion tasks. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} */ async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2378,6 +2380,8 @@ export default class Inference { 'inference_id', 'query', 'input', + 'return_documents', + 'top_n', 'task_settings', 'timeout' ] @@ -2449,7 +2453,7 @@ export default class Inference { } /** - * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + * Perform streaming completion inference on the service Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference | Elasticsearch API documentation} */ async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/project.ts b/src/api/api/project.ts index e8717a7a4..7818be504 100644 --- a/src/api/api/project.ts +++ b/src/api/api/project.ts @@ -43,7 +43,8 @@ export default class Project { } /** - * Return tags defined for the project + * Get tags. Get the tags that are defined for the project. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch-serverless/operation/operation-project-tags | Elasticsearch API documentation} */ async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index c6f7188ac..2d576a7fc 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -36,7 +36,6 @@ const acceptedParams: Record { } @@ -4937,7 +4949,7 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys & { [property: string]: AggregationsAggregate | string | long } -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsChangePointAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsCartesianBoundsAggregate | AggregationsCartesianCentroidAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate export interface AggregationsAggregateBase { meta?: Metadata @@ -4984,9 +4996,19 @@ export interface AggregationsAggregationContainer { bucket_correlation?: AggregationsBucketCorrelationAggregation /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation + /** A metric aggregation that computes the spatial bounding box containing all values for a Point or Shape field. */ + cartesian_bounds?: AggregationsCartesianBoundsAggregation + /** A metric aggregation that computes the weighted centroid from all coordinate values for point and shape fields. */ + cartesian_centroid?: AggregationsCartesianCentroidAggregation /** A multi-bucket aggregation that groups semi-structured text into buckets. * @experimental */ categorize_text?: AggregationsCategorizeTextAggregation + /** A sibling pipeline that detects, spikes, dips, and change points in a metric. + * Given a distribution of values provided by the sibling multi-bucket aggregation, + * this aggregation indicates the bucket of any spike or dip and/or the bucket at which + * the largest change in the distribution of values, if they are statistically significant. + * There must be at least 22 bucketed values. Fewer than 1,000 is preferred. */ + change_point?: AggregationsChangePointAggregation /** A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */ children?: AggregationsChildrenAggregation /** A multi-bucket aggregation that creates composite buckets from different sources. @@ -5012,6 +5034,9 @@ export interface AggregationsAggregationContainer { extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */ frequent_item_sets?: AggregationsFrequentItemSetsAggregation + /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. + * @alias frequent_item_sets */ + frequent_items?: AggregationsFrequentItemSetsAggregation /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: QueryDslQueryContainer /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ @@ -5313,6 +5338,21 @@ export interface AggregationsCardinalityAggregation extends AggregationsMetricAg export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' +export interface AggregationsCartesianBoundsAggregate extends AggregationsAggregateBase { + bounds?: TopLeftBottomRightGeoBounds +} + +export interface AggregationsCartesianBoundsAggregation extends AggregationsMetricAggregationBase { +} + +export interface AggregationsCartesianCentroidAggregate extends AggregationsAggregateBase { + count: long + location?: CartesianPoint +} + +export interface AggregationsCartesianCentroidAggregation extends AggregationsMetricAggregationBase { +} + export interface AggregationsCategorizeTextAggregation { /** The semi-structured text field to categorize. */ field: Field @@ -5351,6 +5391,31 @@ export interface AggregationsCategorizeTextAggregation { export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer +export interface AggregationsChangePointAggregate extends AggregationsAggregateBase { + type: AggregationsChangeType + bucket?: AggregationsChangePointBucket +} + +export interface AggregationsChangePointAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsChangePointBucketKeys extends AggregationsMultiBucketBase { + key: FieldValue +} +export type AggregationsChangePointBucket = AggregationsChangePointBucketKeys +& { [property: string]: AggregationsAggregate | FieldValue | long } + +export interface AggregationsChangeType { + dip?: AggregationsDip + distribution_change?: AggregationsDistributionChange + indeterminable?: AggregationsIndeterminable + non_stationary?: AggregationsNonStationary + spike?: AggregationsSpike + stationary?: AggregationsStationary + step_change?: AggregationsStepChange + trend_change?: AggregationsTrendChange +} + export interface AggregationsChiSquareHeuristic { /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset: boolean @@ -5532,6 +5597,12 @@ export interface AggregationsDerivativeAggregate extends AggregationsSingleMetri export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsDip extends AggregationsAbstractChangePoint { +} + +export interface AggregationsDistributionChange extends AggregationsAbstractChangePoint { +} + export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { /** The type of value used for de-duplication. */ execution_hint?: AggregationsSamplerAggregationExecutionHint @@ -5899,6 +5970,10 @@ export interface AggregationsHoltWintersMovingAverageAggregation extends Aggrega export type AggregationsHoltWintersType = 'add' | 'mult' +export interface AggregationsIndeterminable { + reason: string +} + export interface AggregationsInferenceAggregateKeys extends AggregationsAggregateBase { value?: FieldValue feature_importance?: AggregationsInferenceFeatureImportance[] @@ -6197,6 +6272,12 @@ export interface AggregationsNestedAggregation extends AggregationsBucketAggrega path?: Field } +export interface AggregationsNonStationary { + p_value: double + r_value: double + trend: string +} + export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { /** The specific method to apply. */ method?: AggregationsNormalizeMethod @@ -6530,6 +6611,9 @@ export interface AggregationsSingleMetricAggregateBase extends AggregationsAggre value_as_string?: string } +export interface AggregationsSpike extends AggregationsAbstractChangePoint { +} + export interface AggregationsStandardDeviationBounds { upper: double | null lower: double | null @@ -6548,6 +6632,9 @@ export interface AggregationsStandardDeviationBoundsAsString { lower_sampling: string } +export interface AggregationsStationary { +} + export interface AggregationsStatsAggregate extends AggregationsAggregateBase { count: long min: double | null @@ -6569,6 +6656,9 @@ export interface AggregationsStatsBucketAggregate extends AggregationsStatsAggre export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { } +export interface AggregationsStepChange extends AggregationsAbstractChangePoint { +} + export interface AggregationsStringRareTermsAggregate extends AggregationsMultiBucketAggregateBase { } @@ -6790,6 +6880,12 @@ export interface AggregationsTopMetricsValue { field: Field } +export interface AggregationsTrendChange { + p_value: double + r_value: double + change_point: integer +} + export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { } @@ -8227,8 +8323,12 @@ export interface MappingDenseVectorIndexOptions { m?: integer /** The type of kNN algorithm to use. */ type: MappingDenseVectorIndexOptionsType - /** The rescore vector options. This is only applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, `int4_flat`, and `int8_flat` index types. */ + /** The rescore vector options. This is only applicable to `bbq_disk`, `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, `int4_flat`, and `int8_flat` index types. */ rescore_vector?: MappingDenseVectorIndexOptionsRescoreVector + /** `true` if vector rescoring should be done on-disk + * + * Only applicable to `bbq_hnsw` */ + on_disk_rescore?: boolean } export interface MappingDenseVectorIndexOptionsRescoreVector { @@ -8239,7 +8339,7 @@ export interface MappingDenseVectorIndexOptionsRescoreVector { oversample: float } -export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' +export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'bbq_disk' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' @@ -8663,7 +8763,7 @@ export interface MappingSemanticTextProperty { /** Settings for chunking text into smaller passages. If specified, these will override the * chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated, * they will not be applied to existing documents until they are reindexed. */ - chunking_settings?: MappingChunkingSettings + chunking_settings?: MappingChunkingSettings | null /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ fields?: Record @@ -10527,7 +10627,7 @@ export type CatCatMasterColumn = 'id' | 'host' | 'h' | 'ip' | 'node' | 'n' | str export type CatCatMasterColumns = CatCatMasterColumn | CatCatMasterColumn[] -export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string +export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'available_processors' | 'ap' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] @@ -11995,7 +12095,8 @@ export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { /** The ID of the data frame analytics to fetch */ id?: Id - /** Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) */ + /** Whether to ignore if a wildcard expression matches no configs. + * (This includes `_all` string or when no configs have been specified.) */ allow_no_match?: boolean /** Comma-separated list of column names to display. */ h?: CatCatDfaColumns @@ -13063,6 +13164,11 @@ export interface CatNodesNodesRecord { /** The load average for the last fifteen minutes. * @alias load_15m */ l?: string + /** The number of available processors (logical CPU cores available to the JVM). */ + available_processors?: string + /** The number of available processors (logical CPU cores available to the JVM). + * @alias available_processors */ + ap?: string /** The node uptime. */ uptime?: string /** The node uptime. @@ -16866,6 +16972,18 @@ export interface ClusterStatsDenseVectorStats { off_heap?: ClusterStatsDenseVectorOffHeapStats } +export interface ClusterStatsExtendedRetrieversSearchUsage { + text_similarity_reranker?: ClusterStatsExtendedTextSimilarityRetrieverUsage +} + +export interface ClusterStatsExtendedSearchUsage { + retrievers?: ClusterStatsExtendedRetrieversSearchUsage +} + +export interface ClusterStatsExtendedTextSimilarityRetrieverUsage { + chunk_rescorer?: long +} + export interface ClusterStatsFieldTypes { /** The name for the field type in selected nodes. */ name: Name @@ -17061,6 +17179,7 @@ export interface ClusterStatsSearchUsageStats { rescorers: Record sections: Record retrievers: Record + extended: ClusterStatsExtendedSearchUsage } export type ClusterStatsShardState = 'INIT' | 'SUCCESS' | 'FAILED' | 'ABORTED' | 'MISSING' | 'WAITING' | 'QUEUED' | 'PAUSED_FOR_NODE_REMOVAL' @@ -17362,7 +17481,7 @@ export interface ConnectorCheckInResponse { export interface ConnectorDeleteRequest extends RequestBase { /** The unique identifier of the connector to be deleted */ connector_id: Id - /** A flag indicating if associated sync jobs should be also removed. Defaults to false. */ + /** A flag indicating if associated sync jobs should be also removed. */ delete_sync_jobs?: boolean /** A flag indicating if the connector should be hard deleted. */ hard?: boolean @@ -17413,7 +17532,7 @@ export interface ConnectorLastSyncResponse { } export interface ConnectorListRequest extends RequestBase { - /** Starting offset (default: 0) */ + /** Starting offset */ from?: integer /** Specifies a max number of results to get */ size?: integer @@ -17555,7 +17674,7 @@ export interface ConnectorSyncJobGetRequest extends RequestBase { export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob export interface ConnectorSyncJobListRequest extends RequestBase { - /** Starting offset (default: 0) */ + /** Starting offset */ from?: integer /** Specifies a max number of results to get */ size?: integer @@ -22359,7 +22478,7 @@ export interface IndicesStatsRequest extends RequestBase { include_segment_file_sizes?: boolean /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean - /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ + /** Indicates whether statistics are aggregated at the cluster, indices, or shards level. */ level?: Level /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { metric?: never, index?: never, completion_fields?: never, expand_wildcards?: never, fielddata_fields?: never, fields?: never, forbid_closed_indices?: never, groups?: never, include_segment_file_sizes?: never, include_unloaded_segments?: never, level?: never } @@ -22578,7 +22697,7 @@ export interface IndicesValidateQueryRequest extends RequestBase { analyzer?: string /** If `true`, wildcard and prefix queries are analyzed. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. */ + /** The default operator for query string query: `and` or `or`. */ default_operator?: QueryDslOperator /** Field to use as default where no field prefix is given in the query string. * This parameter can only be used when the `q` query string parameter is specified. */ @@ -24772,19 +24891,19 @@ export interface InferenceRerankRequest extends RequestBase { timeout?: Duration /** Query input. */ query: string - /** The text on which you want to perform the inference task. - * It can be a single string or an array. - * - * > info - * > Inference endpoints for the `completion` task type currently only support a single string as input. */ - input: string | string[] + /** The documents to rank. */ + input: string[] + /** Include the document text in the response. */ + return_documents?: boolean + /** Limit the response to the top N documents. */ + top_n?: integer /** Task settings for the individual inference request. * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, return_documents?: never, top_n?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, return_documents?: never, top_n?: never, task_settings?: never } } export type InferenceRerankResponse = InferenceRerankedInferenceResult @@ -32283,7 +32402,7 @@ export interface NodesStatsRequest extends RequestBase { groups?: boolean /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean - /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ + /** Indicates whether statistics are aggregated at the node, indices, or shards level. */ level?: NodeStatsLevel /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -33185,6 +33304,11 @@ export interface SecurityApiKey { * At least one of them must be specified. * When specified, the new access assignment fully replaces the previously assigned access. */ access?: SecurityAccess + /** The certificate identity associated with a cross-cluster API key. + * Restricts the API key to connections authenticated by a specific TLS certificate. + * Only applicable to cross-cluster API keys. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + certificate_identity?: string /** The profile uid for the API key owner principal, if requested and if it exists */ profile_uid?: string /** Sorting values when using the `sort` parameter with the `security.query_api_keys` API. */ @@ -33805,10 +33929,14 @@ export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { metadata?: Metadata /** Specifies the name for this API key. */ name: Name + /** The certificate identity to associate with this API key. + * This field is used to restrict the API key to connections authenticated by a specific TLS certificate. + * The value should match the certificate's distinguished name (DN) pattern. */ + certificate_identity?: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never } + body?: string | { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never, certificate_identity?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never } + querystring?: { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never, certificate_identity?: never } } export interface SecurityCreateCrossClusterApiKeyResponse { @@ -35334,10 +35462,17 @@ export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { * Within the metadata object, keys beginning with `_` are reserved for system usage. * When specified, this information fully replaces metadata previously associated with the API key. */ metadata?: Metadata + /** The certificate identity to associate with this API key. + * This field is used to restrict the API key to connections authenticated by a specific TLS certificate. + * The value should match the certificate's distinguished name (DN) pattern. + * When specified, this fully replaces any previously assigned certificate identity. + * To clear an existing certificate identity, explicitly set this field to `null`. + * When omitted, the existing certificate identity remains unchanged. */ + certificate_identity?: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } + body?: string | { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never, certificate_identity?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } + querystring?: { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never, certificate_identity?: never } } export interface SecurityUpdateCrossClusterApiKeyResponse { @@ -37202,7 +37337,7 @@ export interface StreamsStatusLogsStatus { export interface StreamsStatusRequest extends RequestBase { /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ - master_timeout?: TimeUnit + master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -38000,6 +38135,12 @@ export interface TransformSettings { * exceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the * maximum is `65,536`. */ max_page_search_size?: integer + /** Specifies whether the transform checkpoint will use the Point In Time API while searching over the source index. + * In general, Point In Time is an optimization that will reduce pressure on the source index by reducing the amount + * of refreshes and merges, but it can be expensive if a large number of Point In Times are opened and closed for a + * given index. The benefits and impact depend on the data being searched, the ingest rate into the source index, and + * the amount of other consumers searching the same source index. */ + use_point_in_time?: boolean /** If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case * of an error which means the transform never fails. Setting the number of retries other than infinite fails in * validation. */ From eeeed8b9d8a47fc45949ce257a1f66e9f4185a68 Mon Sep 17 00:00:00 2001 From: margaretjgu <136839162+margaretjgu@users.noreply.github.com> Date: Tue, 21 Oct 2025 11:27:38 -0400 Subject: [PATCH 643/647] update workflow for md changes only (#3042) Co-authored-by: Josh Mock --- .github/workflows/nodejs.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 0a6781200..b4379fcba 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -26,8 +26,8 @@ jobs: name: Test runs-on: ${{ matrix.os }} needs: paths-filter - # only run if code relevant to unit tests was changed - if: needs.paths-filter.outputs.src-only == 'true' + env: + CODE_CHANGED: ${{ needs.paths-filter.outputs.src-only }} strategy: fail-fast: false @@ -51,15 +51,15 @@ jobs: - name: Lint run: | - npm run lint + [ "$CODE_CHANGED" = "true" ] && npm run lint || exit 0 - name: Unit test run: | - npm run test:unit + [ "$CODE_CHANGED" = "true" ] && npm run test:unit || exit 0 - name: ECMAScript module test run: | - npm run test:esm + [ "$CODE_CHANGED" = "true" ] && npm run test:esm || exit 0 license: name: License check @@ -90,8 +90,8 @@ jobs: name: Test Bun runs-on: ${{ matrix.os }} needs: paths-filter - # only run if code relevant to unit tests was changed - if: needs.paths-filter.outputs.src-only == 'true' + env: + CODE_CHANGED: ${{ needs.paths-filter.outputs.src-only }} strategy: fail-fast: false @@ -112,12 +112,12 @@ jobs: - name: Lint run: | - bun run lint + [ "$CODE_CHANGED" = "true" ] && bun run lint || exit 0 - name: Unit test run: | - bun run test:unit-bun + [ "$CODE_CHANGED" = "true" ] && bun run test:unit-bun || exit 0 - name: ECMAScript module test run: | - bun run test:esm + [ "$CODE_CHANGED" = "true" ] && bun run test:esm || exit 0 From 9ffe7be1201eac6afb0b6a264aa5d218dfdecac4 Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Wed, 22 Oct 2025 13:21:05 -0500 Subject: [PATCH 644/647] Force Bash for unit tests (#3044) --- .github/workflows/nodejs.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index b4379fcba..0efb7f3a6 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -50,14 +50,17 @@ jobs: npm install - name: Lint + shell: bash run: | [ "$CODE_CHANGED" = "true" ] && npm run lint || exit 0 - name: Unit test + shell: bash run: | [ "$CODE_CHANGED" = "true" ] && npm run test:unit || exit 0 - name: ECMAScript module test + shell: bash run: | [ "$CODE_CHANGED" = "true" ] && npm run test:esm || exit 0 @@ -111,13 +114,16 @@ jobs: bun install - name: Lint + shell: bash run: | [ "$CODE_CHANGED" = "true" ] && bun run lint || exit 0 - name: Unit test + shell: bash run: | [ "$CODE_CHANGED" = "true" ] && bun run test:unit-bun || exit 0 - name: ECMAScript module test + shell: bash run: | [ "$CODE_CHANGED" = "true" ] && bun run test:esm || exit 0 From e0353e71ca2e220f94f43b02163aa2b17e6267ba Mon Sep 17 00:00:00 2001 From: Josh Mock Date: Thu, 23 Oct 2025 11:54:40 -0500 Subject: [PATCH 645/647] Add release notes for 9.2.0 (#3043) --- docs/release-notes/index.md | 10 +++++++++- package.json | 4 ++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 484c0afd9..0a70b6214 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -20,7 +20,15 @@ To check for security updates, go to [Security announcements for the Elastic sta % ### Fixes [elasticsearch-javascript-client-next-fixes] % \* -## 9.1.1 +## 9.2.0 [elasticsearch-javascript-client-9.2.0-release-notes] + +### Features and enhancements [elasticsearch-javascript-client-9.2.0-features-enhancements] + +- **Compatibility with Elasticsearch 9.2:** All changes and additions to Elasticsearch APIs for its 9.2 release are reflected in this release. + +- **Accepted parameter names added to transport request metadata:** All requests sent through `@elastic/transport` already included some metadata about the request (API name, path parameters). An `acceptedParams` array has been added that includes the names of all parameters that an API supports. This helps support more flexible pre-flight request modifications made by custom transports. + +## 9.1.1 [elasticsearch-javascript-client-9.1.1-release-notes] ### Fixes [elasticsearch-javascript-client-9.1.1-fixes] diff --git a/package.json b/package.json index 424490f27..ec21e7362 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "9.1.0", - "versionCanary": "9.1.0-canary.0", + "version": "9.2.0", + "versionCanary": "9.2.0-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", "types": "index.d.ts", From 4fd0170f4fcd2cbebd7c2075d8c36192320753f6 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 27 Oct 2025 18:58:42 +0100 Subject: [PATCH 646/647] Auto-generated API code (#3049) --- docs/reference/api-reference.md | 44 +++++---- src/api/api/indices.ts | 138 +++++++++++++++++++++++--- src/api/types.ts | 170 +++++++++++++++++++++++++++++--- 3 files changed, 303 insertions(+), 49 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 7ca3ef3ad..754d9eb6f 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1514,7 +1514,7 @@ client.search({ ... }) - **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. - **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. -- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. - **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. - **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. @@ -2160,7 +2160,7 @@ Defaults to 10,000 hits. - **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. - **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. -- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. - **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. - **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** @@ -5634,6 +5634,16 @@ client.indices.deleteIndexTemplate({ name }) - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.deleteSampleConfiguration [_indices.delete_sample_configuration] +Delete sampling configuration for an index or data stream + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-sample-configuration) + +```ts +client.indices.deleteSampleConfiguration() +``` + + ## client.indices.deleteTemplate [_indices.delete_template] Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. @@ -6219,23 +6229,13 @@ client.indices.getMigrateReindexStatus({ index }) #### Request (object) [_request_indices.get_migrate_reindex_status] - **`index` (string \| string[])**: The index or data stream name. -## client.indices.getSample [_indices.get_sample] -Get random sample of ingested data +## client.indices.getSampleConfiguration [_indices.get_sample_configuration] +Get sampling configuration for an index or data stream -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample-configuration) ```ts -client.indices.getSample() -``` - - -## client.indices.getSampleStats [_indices.get_sample_stats] -Get stats about a random sample of ingested data - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample) - -```ts -client.indices.getSampleStats() +client.indices.getSampleConfiguration() ``` @@ -7966,7 +7966,7 @@ client.inference.putCustom({ task_type, custom_inference_id, service, service_se - **`task_type` (Enum("text_embedding" \| "sparse_embedding" \| "rerank" \| "completion"))**: The type of the inference task that the model will perform. - **`custom_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("custom"))**: The type of service supported for the specified task type. In this case, `custom`. -- **`service_settings` ({ headers, input_type, query_parameters, request, response, secret_parameters, url })**: Settings used to install the inference model. +- **`service_settings` ({ batch_size, headers, input_type, query_parameters, request, response, secret_parameters, url })**: Settings used to install the inference model. These settings are specific to the `custom` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { parameters })**: Settings to configure the inference task. @@ -8025,8 +8025,10 @@ client.inference.putElasticsearch({ task_type, elasticsearch_inference_id, servi - **`elasticsearch_inference_id` (string)**: The unique identifier of the inference endpoint. The must not match the `model_id`. - **`service` (Enum("elasticsearch"))**: The type of service supported for the specified task type. In this case, `elasticsearch`. -- **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. +- **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads, long_document_strategy, max_chunks_per_doc })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` and `text_embedding` task types. +Not applicable to the `rerank`, `completion`, or `chat_completion` task types. - **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8105,9 +8107,9 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser - **`task_type` (Enum("rerank" \| "text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. - **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. -- **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json, dimensions })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. +- **`service_settings` ({ provider, url, streaming_url, location, model_id, project_id, rate_limit, service_account_json, dimensions })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. -- **`task_settings` (Optional, { auto_truncate, top_n, thinking_config })**: Settings to configure the inference task. +- **`task_settings` (Optional, { auto_truncate, top_n, thinking_config, max_tokens })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8257,7 +8259,7 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. - **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. -- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +- **`task_settings` (Optional, { user, headers })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index dba682038..316448364 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -231,6 +231,13 @@ export default class Indices { 'timeout' ] }, + 'indices.delete_sample_configuration': { + path: [ + 'index' + ], + body: [], + query: [] + }, 'indices.delete_template': { path: [ 'name' @@ -500,6 +507,13 @@ export default class Indices { body: [], query: [] }, + 'indices.get_sample_configuration': { + path: [ + 'index' + ], + body: [], + query: [] + }, 'indices.get_sample_stats': { path: [ 'index' @@ -1888,6 +1902,54 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Delete sampling configuration for an index or data stream + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-sample-configuration | Elasticsearch API documentation} + */ + async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_sample_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/config` + const meta: TransportRequestMetadata = { + name: 'indices.delete_sample_configuration', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template | Elasticsearch API documentation} @@ -3146,13 +3208,13 @@ export default class Indices { } /** - * Get random sample of ingested data - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample | Elasticsearch API documentation} + * Request for a random sample of raw documents ingested into the given index or data stream. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} */ - async getSample (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSample (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSample (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getSample (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptions): Promise + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this[kAcceptedParams]['indices.get_sample'] @@ -3170,11 +3232,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -3194,16 +3256,16 @@ export default class Indices { } /** - * Get stats about a random sample of ingested data - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample | Elasticsearch API documentation} + * Get sampling configuration for an index or data stream + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample-configuration | Elasticsearch API documentation} */ - async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getSampleStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { const { path: acceptedPath - } = this[kAcceptedParams]['indices.get_sample_stats'] + } = this[kAcceptedParams]['indices.get_sample_configuration'] const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3227,6 +3289,54 @@ export default class Indices { } } + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/config` + const meta: TransportRequestMetadata = { + name: 'indices.get_sample_configuration', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Request stats for a random sample of raw documents ingested into the given index or data stream. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} + */ + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptions): Promise + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_sample_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + const method = 'GET' const path = `/${encodeURIComponent(params.index.toString())}/_sample/stats` const meta: TransportRequestMetadata = { diff --git a/src/api/types.ts b/src/api/types.ts index 734eaeea7..a25f974c0 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4190,6 +4190,8 @@ export interface KnnQuery extends QueryDslQueryBase { query_vector_builder?: QueryVectorBuilder /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** The percentage of vectors to explore per shard while doing knn search with bbq_disk */ + visit_percentage?: float /** The final number of nearest neighbors to return as top hits */ k?: integer /** Filters for the kNN search query */ @@ -4211,6 +4213,8 @@ export interface KnnRetriever extends RetrieverBase { k: integer /** Number of nearest neighbor candidates to consider per shard. */ num_candidates: integer + /** The percentage of vectors to explore per shard while doing knn search with bbq_disk */ + visit_percentage?: float /** The minimum similarity required for a document to be considered a match. */ similarity?: float /** Apply oversampling and rescoring to quantized vectors */ @@ -4228,6 +4232,8 @@ export interface KnnSearch { k?: integer /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** The percentage of vectors to explore per shard while doing knn search with bbq_disk */ + visit_percentage?: float /** Boost value to apply to kNN scores */ boost?: float /** Filters for the kNN search query */ @@ -7517,7 +7523,7 @@ export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { export interface AnalysisKuromojiAnalyzer { type: 'kuromoji' - mode: AnalysisKuromojiTokenizationMode + mode?: AnalysisKuromojiTokenizationMode user_dictionary?: string } @@ -11236,12 +11242,24 @@ export interface CatIndicesIndicesRecord { /** number of replica shards * @alias rep */ shardsReplica?: string - /** available docs */ + /** The number of documents in the index, including hidden nested documents. + * For indices with `semantic_text` fields or other nested field types, + * this count includes the internal nested documents. + * To get the logical document count (excluding nested documents), use + * the `_count` API or `_cat/count` API instead. */ 'docs.count'?: string | null - /** available docs + /** The number of documents in the index, including hidden nested documents. + * For indices with `semantic_text` fields or other nested field types, + * this count includes the internal nested documents. + * To get the logical document count (excluding nested documents), use + * the `_count` API or `_cat/count` API instead. * @alias 'docs.count' */ dc?: string | null - /** available docs + /** The number of documents in the index, including hidden nested documents. + * For indices with `semantic_text` fields or other nested field types, + * this count includes the internal nested documents. + * To get the logical document count (excluding nested documents), use + * the `_count` API or `_cat/count` API instead. * @alias 'docs.count' */ docsCount?: string | null /** deleted docs */ @@ -21151,6 +21169,52 @@ export interface IndicesGetMigrateReindexStatusStatusInProgress { reindexed_doc_count: long } +export interface IndicesGetSampleRequest extends RequestBase { + /** Single index or data stream name. Wildcards are not supported. */ + index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface IndicesGetSampleResponse { + sample: IndicesGetSampleRawDocument[] +} + +export interface IndicesGetSampleRawDocument { + /** Name of the index for this raw document. */ + index: string + /** The original raw source. */ + source: Record +} + +export interface IndicesGetSampleStatsRequest extends RequestBase { + /** Single index or data stream name. Wildcards are not supported. */ + index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface IndicesGetSampleStatsResponse { + potential_samples: long + samples_rejected_for_max_samples_exceeded: long + samples_rejected_for_condition: long + samples_rejected_for_rate: long + samples_rejected_for_exception: long + samples_rejected_for_size: long + samples_accepted: long + time_sampling?: Duration + time_sampling_millis: DurationValue + time_evaluating_condition?: Duration + time_evaluating_condition_millis: DurationValue + time_compiling_condition?: Duration + time_compiling_condition_millis: DurationValue + last_exception?: string +} + export interface IndicesGetSettingsRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit * the request. Supports wildcards (`*`). To target all data streams and @@ -23245,6 +23309,22 @@ export interface InferenceCustomResponseParams { * } * } * + * # Elasticsearch supports the following embedding types: + * * float + * * byte + * * bit (or binary) + * + * To specify the embedding type for the response, the `embedding_type` + * field should be added in the `json_parser` object. Here's an example: + * "response":{ + * "json_parser":{ + * "text_embeddings":"$.data[*].embedding[*]", + * "embedding_type":"bit" + * } + * } + * + * If `embedding_type` is not specified, it defaults to `float`. + * * # sparse_embedding * # For a response like this: * @@ -23349,7 +23429,11 @@ export interface InferenceCustomResponseParams { } export interface InferenceCustomServiceSettings { - /** Specifies the HTTPS header parameters – such as `Authentication` or `Contet-Type` – that are required to access the custom service. + /** Specifies the batch size used for the semantic_text field. If the field is not provided, the default is 10. + * The batch size is the maximum number of inputs in a single request to the upstream service. + * The chunk within the batch are controlled by the selected chunking strategy for the semantic_text field. */ + batch_size?: integer + /** Specifies the HTTP header parameters – such as `Authentication` or `Content-Type` – that are required to access the custom service. * For example: * ``` * "headers":{ @@ -23472,6 +23556,20 @@ export interface InferenceElasticsearchServiceSettings { * The value must be a power of 2. * The maximum value is 32. */ num_threads: integer + /** Available only for the `rerank` task type using the Elastic reranker model. + * Controls the strategy used for processing long documents during inference. + * + * Possible values: + * - `truncate` (default): Processes only the beginning of each document. + * - `chunk`: Splits long documents into smaller parts (chunks) before inference. + * + * When `long_document_strategy` is set to `chunk`, Elasticsearch splits each document into smaller parts but still returns a single score per document. + * That score reflects the highest relevance score among all chunks. */ + long_document_strategy?: string + /** Only for the `rerank` task type. + * Limits the number of chunks per document that are sent for inference when chunking is enabled. + * If not set, all chunks generated for the document are processed. */ + max_chunks_per_doc?: integer } export type InferenceElasticsearchServiceType = 'elasticsearch' @@ -23524,15 +23622,41 @@ export interface InferenceGoogleAiStudioServiceSettings { export type InferenceGoogleAiStudioTaskType = 'completion' | 'text_embedding' +export type InferenceGoogleModelGardenProvider = 'google' | 'anthropic' + export interface InferenceGoogleVertexAIServiceSettings { - /** The name of the location to use for the inference task. + /** The name of the Google Model Garden Provider for `completion` and `chat_completion` tasks. + * In order for a Google Model Garden endpoint to be used `provider` must be defined and be other than `google`. + * Modes: + * - Google Model Garden (third-party models): set `provider` to a supported non-`google` value and provide `url` and/or `streaming_url`. + * - Google Vertex AI: omit `provider` or set it to `google`. In this mode, do not set `url` or `streaming_url` and Elastic will construct the endpoint url from `location`, `model_id`, and `project_id` parameters. */ + provider?: InferenceGoogleModelGardenProvider + /** The URL for non-streaming `completion` requests to a Google Model Garden provider endpoint. + * If both `url` and `streaming_url` are provided, each is used for its respective mode. + * If `streaming_url` is not provided, `url` is also used for streaming `completion` and `chat_completion`. + * If `provider` is not provided or set to `google` (Google Vertex AI), do not set `url` (or `streaming_url`). + * At least one of `url` or `streaming_url` must be provided for Google Model Garden endpoint usage. */ + url?: string + /** The URL for streaming `completion` and `chat_completion` requests to a Google Model Garden provider endpoint. + * If both `streaming_url` and `url` are provided, each is used for its respective mode. + * If `url` is not provided, `streaming_url` is also used for non-streaming `completion` requests. + * If `provider` is not provided or set to `google` (Google Vertex AI), do not set `streaming_url` (or `url`). + * At least one of `streaming_url` or `url` must be provided for Google Model Garden endpoint usage. */ + streaming_url?: string + /** The name of the location to use for the inference task for the Google Vertex AI inference task. + * For Google Vertex AI, when `provider` is omitted or `google` `location` is mandatory. + * For Google Model Garden's `completion` and `chat_completion` tasks, when `provider` is a supported non-`google` value - `location` is ignored. * Refer to the Google documentation for the list of supported locations. */ - location: string + location?: string /** The name of the model to use for the inference task. - * Refer to the Google documentation for the list of supported models. */ - model_id: string - /** The name of the project to use for the inference task. */ - project_id: string + * For Google Vertex AI `model_id` is mandatory. + * For Google Model Garden's `completion` and `chat_completion` tasks, when `provider` is a supported non-`google` value - `model_id` will be used for some providers that require it, otherwise - ignored. + * Refer to the Google documentation for the list of supported models for Google Vertex AI. */ + model_id?: string + /** The name of the project to use for the Google Vertex AI inference task. + * For Google Vertex AI `project_id` is mandatory. + * For Google Model Garden's `completion` and `chat_completion` tasks, when `provider` is a supported non-`google` value - `project_id` is ignored. */ + project_id?: string /** This setting helps to minimize the number of rate limit errors returned from Google Vertex AI. * By default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000. */ rate_limit?: InferenceRateLimitSetting @@ -23554,6 +23678,11 @@ export interface InferenceGoogleVertexAITaskSettings { /** For a `completion` or `chat_completion` task, allows configuration of the thinking features for the model. * Refer to the Google documentation for the allowable configurations for each model type. */ thinking_config?: InferenceThinkingConfig + /** For `completion` and `chat_completion` tasks, specifies the `max_tokens` value for requests sent to the Google Model Garden `anthropic` provider. + * If `provider` is not set to `anthropic`, this field is ignored. + * If `max_tokens` is specified - it must be a positive integer. If not specified, the default value of 1024 is used. + * Anthropic models require `max_tokens` to be set for each request. Please refer to the Anthropic documentation for more information. */ + max_tokens?: integer } export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion' @@ -23636,7 +23765,9 @@ export interface InferenceInferenceChunkingSettings { } export interface InferenceInferenceEndpoint { - /** Chunking configuration object */ + /** The chunking configuration object. + * Applies only to the `sparse_embedding` and `text_embedding` task types. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The service type */ service: string @@ -23994,6 +24125,15 @@ export interface InferenceOpenAITaskSettings { /** For a `completion` or `text_embedding` task, specify the user issuing the request. * This information can be used for abuse detection. */ user?: string + /** Specifies custom HTTP header parameters. + * For example: + * ``` + * "headers":{ + * "Custom-Header": "Some-Value", + * "Another-Custom-Header": "Another-Value" + * } + * ``` */ + headers?: any } export type InferenceOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding' @@ -24135,7 +24275,7 @@ export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embeddin export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion' -export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank' +export type InferenceTaskTypeGoogleVertexAI = 'chat_completion' | 'completion' | 'text_embedding' | 'rerank' export type InferenceTaskTypeHuggingFace = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding' @@ -24642,7 +24782,9 @@ export interface InferencePutElasticsearchRequest extends RequestBase { elasticsearch_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `sparse_embedding` and `text_embedding` task types. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ service: InferenceElasticsearchServiceType From cceb1a41e8a005653806f92cc7499f934eddfa42 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 3 Nov 2025 15:55:15 +0100 Subject: [PATCH 647/647] Auto-generated API code (#3057) --- docs/reference/api-reference.md | 60 ++++++++++++++++++++------ src/api/api/cat.ts | 76 ++++++++++++++++++++++++++++++++- src/api/api/indices.ts | 51 +++++++++++++++++++++- src/api/types.ts | 69 +++++++++++++++++++++--------- 4 files changed, 221 insertions(+), 35 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 754d9eb6f..ee75a6e1f 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -2297,6 +2297,16 @@ from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +## client.cat.circuitBreaker [_cat.circuit_breaker] +Get circuit breakers statistics + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch#TODO) + +```ts +client.cat.circuitBreaker() +``` + + ## client.cat.componentTemplates [_cat.component_templates] Get component templates. @@ -2792,6 +2802,16 @@ local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`allow_closed` (Optional, boolean)**: If true, allow closed indices to be returned in the response otherwise if false, keep the legacy behaviour +of throwing an exception if index pattern matches closed indices ## client.cat.shards [_cat.shards] Get shard information. @@ -5701,13 +5721,18 @@ To use the API, this parameter must be set to `true`. ## client.indices.downsample [_indices.downsample] Downsample an index. -Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +Downsamples a time series (TSDS) index and reduces its size by keeping the last value or by pre-aggregating metrics: + +- When running in `aggregate` mode, it pre-calculates and stores statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) +for each metric field grouped by a configured time interval and their dimensions. +- When running in `last_value` mode, it keeps the last value for each metric in the configured interval and their dimensions. + For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. -The source index must be read only (`index.blocks.write: true`). +The source index must be read-only (`index.blocks.write: true`). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample) @@ -5720,7 +5745,7 @@ client.indices.downsample({ index, target_index }) #### Request (object) [_request_indices.downsample] - **`index` (string)**: Name of the time series index to downsample. - **`target_index` (string)**: Name of the index to create. -- **`config` (Optional, { fixed_interval })** +- **`config` (Optional, { fixed_interval, sampling_method })** ## client.indices.exists [_indices.exists] Check indices. @@ -6025,6 +6050,16 @@ Supports a list of values, such as `open,hidden`. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.getAllSampleConfiguration [_indices.get_all_sample_configuration] +Get sampling configurations for all indices and data streams + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-all-sample-configuration) + +```ts +client.indices.getAllSampleConfiguration() +``` + + ## client.indices.getDataLifecycle [_indices.get_data_lifecycle] Get data stream lifecycles. @@ -6489,7 +6524,7 @@ To target all data streams use `*` or `_all`. - **`data_retention` (Optional, string \| -1 \| 0)**: If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. -- **`downsampling` (Optional, { rounds })**: The downsampling configuration to execute for the managed backing index after rollover. +- **`downsampling` (Optional, { after, fixed_interval }[])**: The downsampling configuration to execute for the managed backing index after rollover. - **`enabled` (Optional, boolean)**: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. @@ -7568,7 +7603,7 @@ client.inference.completion({ inference_id, input }) - **`inference_id` (string)**: The inference Id - **`input` (string \| string[])**: Inference input. Either a string or an array of strings. -- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.delete [_inference.delete] @@ -7585,7 +7620,7 @@ client.inference.delete({ inference_id }) #### Request (object) [_request_inference.delete] - **`inference_id` (string)**: The inference identifier. - **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type -- **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. +- **`dry_run` (Optional, boolean)**: When true, checks the semantic_text fields and inference processors that reference the endpoint and returns them in a list, but does not delete the endpoint. - **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. ## client.inference.get [_inference.get] @@ -7801,7 +7836,7 @@ client.inference.putAnthropic({ task_type, anthropic_inference_id, service, serv The only valid task type for the model to perform is `completion`. - **`anthropic_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("anthropic"))**: The type of service supported for the specified task type. In this case, `anthropic`. -- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `anthropic` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. @@ -7824,7 +7859,7 @@ client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, servi - **`task_type` (Enum("completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`azureaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. -- **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `azureaistudio` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user, return_documents, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. @@ -8346,7 +8381,7 @@ client.inference.sparseEmbedding({ inference_id, input }) - **`inference_id` (string)**: The inference Id - **`input` (string \| string[])**: Inference input. Either a string or an array of strings. -- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.streamCompletion [_inference.stream_completion] @@ -8372,7 +8407,7 @@ client.inference.streamCompletion({ inference_id, input }) It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. -- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. - **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. ## client.inference.textEmbedding [_inference.text_embedding] @@ -8400,7 +8435,7 @@ Accepted values depend on the configured inference service, refer to the relevan > info > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. -- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. ## client.inference.update [_inference.update] @@ -12996,7 +13031,8 @@ It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. -You can sort on `username`, `roles`, or `enabled`. +You can sort on `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, +and `applications.resources`. In addition, sort can also be applied to the `_doc` field to sort by index order. - **`size` (Optional, number)**: The number of hits to return. It must not be negative. diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index 213701703..eef10f6c4 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -58,6 +58,13 @@ export default class Cat { 'master_timeout' ] }, + 'cat.circuit_breaker': { + path: [ + 'circuit_breaker_patterns' + ], + body: [], + query: [] + }, 'cat.component_templates': { path: [ 'name' @@ -251,7 +258,12 @@ export default class Cat { 'h', 's', 'local', - 'master_timeout' + 'master_timeout', + 'expand_wildcards', + 'allow_no_indices', + 'ignore_throttled', + 'ignore_unavailable', + 'allow_closed' ] }, 'cat.shards': { @@ -451,6 +463,61 @@ export default class Cat { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Get circuit breakers statistics + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} + */ + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.circuit_breaker'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.circuit_breaker_patterns != null) { + method = 'GET' + path = `/_cat/circuit_breaker/${encodeURIComponent(params.circuit_breaker_patterns.toString())}` + } else { + method = 'GET' + path = '/_cat/circuit_breaker' + } + const meta: TransportRequestMetadata = { + name: 'cat.circuit_breaker', + pathParts: { + circuit_breaker_patterns: params.circuit_breaker_patterns + }, + acceptedParams: [ + 'circuit_breaker_patterns' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates | Elasticsearch API documentation} @@ -1434,7 +1501,12 @@ export default class Cat { 'h', 's', 'local', - 'master_timeout' + 'master_timeout', + 'expand_wildcards', + 'allow_no_indices', + 'ignore_throttled', + 'ignore_unavailable', + 'allow_closed' ] } return await this.transport.request({ path, method, querystring, body, meta }, options) diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 316448364..b1fc1a736 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -399,6 +399,11 @@ export default class Indices { 'master_timeout' ] }, + 'indices.get_all_sample_configuration': { + path: [], + body: [], + query: [] + }, 'indices.get_data_lifecycle': { path: [ 'name' @@ -2054,7 +2059,7 @@ export default class Indices { } /** - * Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). + * Downsample an index. Downsamples a time series (TSDS) index and reduces its size by keeping the last value or by pre-aggregating metrics: - When running in `aggregate` mode, it pre-calculates and stores statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval and their dimensions. - When running in `last_value` mode, it keeps the last value for each metric in the configured interval and their dimensions. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read-only (`index.blocks.write: true`). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2673,6 +2678,50 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Get sampling configurations for all indices and data streams + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-all-sample-configuration | Elasticsearch API documentation} + */ + async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_all_sample_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_sample/config' + const meta: TransportRequestMetadata = { + name: 'indices.get_all_sample_configuration', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get data stream lifecycles. Get the data stream lifecycle configuration of one or more data streams. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle | Elasticsearch API documentation} diff --git a/src/api/types.ts b/src/api/types.ts index a25f974c0..427c55aec 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4417,8 +4417,8 @@ export interface QueryVectorBuilder { } export interface RRFRetriever extends RetrieverBase { - /** A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. */ - retrievers: RetrieverContainer[] + /** A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. Each retriever can optionally include a weight parameter. */ + retrievers: RRFRetrieverEntry[] /** This value determines how much influence documents in individual result sets per query have over the final ranked result set. */ rank_constant?: integer /** This value determines the size of the individual result sets per query. */ @@ -4427,6 +4427,15 @@ export interface RRFRetriever extends RetrieverBase { fields?: string[] } +export interface RRFRetrieverComponent { + /** The nested retriever configuration. */ + retriever: RetrieverContainer + /** Weight multiplier for this retriever's contribution to the RRF score. Higher values increase influence. Defaults to 1.0 if not specified. Must be non-negative. */ + weight?: float +} + +export type RRFRetrieverEntry = RetrieverContainer | RRFRetrieverComponent + export interface RankBase { } @@ -5827,7 +5836,7 @@ export interface AggregationsGeoLineAggregation { point: AggregationsGeoLinePoint /** The name of the numeric field to use as the sort key for ordering the points. * When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */ - sort: AggregationsGeoLineSort + sort?: AggregationsGeoLineSort /** When `true`, returns an additional array of the sort values in the feature properties. */ include_sort?: boolean /** The order in which the line is sorted (ascending or descending). */ @@ -6335,7 +6344,7 @@ export interface AggregationsPercentilesAggregation extends AggregationsFormatMe * Set to `false` to disable this behavior. */ keyed?: boolean /** The percentiles to calculate. */ - percents?: double[] + percents?: double | double[] /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */ hdr?: AggregationsHdrMethod /** Sets parameters for the default TDigest algorithm used to calculate percentiles. */ @@ -14069,10 +14078,25 @@ export interface CatSegmentsRequest extends CatCatRequestBase { local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as open,hidden. */ + expand_wildcards?: ExpandWildcards + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ + allow_no_indices?: boolean + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** If true, allow closed indices to be returned in the response otherwise if false, keep the legacy behaviour + * of throwing an exception if index pattern matches closed indices */ + allow_closed?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_closed?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_closed?: never } } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -16027,7 +16051,7 @@ export interface ClusterAllocationExplainDiskUsage { } export interface ClusterAllocationExplainNodeAllocationExplanation { - deciders: ClusterAllocationExplainAllocationDecision[] + deciders?: ClusterAllocationExplainAllocationDecision[] node_attributes: Record node_decision: ClusterAllocationExplainDecision node_id: Id @@ -16035,7 +16059,7 @@ export interface ClusterAllocationExplainNodeAllocationExplanation { roles: NodeRoles store?: ClusterAllocationExplainAllocationStore transport_address: TransportAddress - weight_ranking: integer + weight_ranking?: integer } export interface ClusterAllocationExplainNodeDiskUsage { @@ -19457,13 +19481,15 @@ export interface IndicesDataStreamVisibility { export interface IndicesDownsampleConfig { /** The interval at which to aggregate the original time series index. */ fixed_interval: DurationLarge + /** The sampling method used to reduce the documents; it can be either `aggregate` or `last_value`. Defaults to `aggregate`. */ + sampling_method?: IndicesSamplingMethod } export interface IndicesDownsamplingRound { /** The duration since rollover when this downsampling round should execute */ after: Duration - /** The downsample configuration to execute. */ - config: IndicesDownsampleConfig + /** The downsample interval. */ + fixed_interval: DurationLarge } export interface IndicesFailureStore { @@ -19854,6 +19880,8 @@ export interface IndicesRetentionLease { period: Duration } +export type IndicesSamplingMethod = 'aggregate' | 'last_value' + export interface IndicesSearchIdle { after?: Duration } @@ -21451,7 +21479,7 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration /** The downsampling configuration to execute for the managed backing index after rollover. */ - downsampling?: IndicesDataStreamLifecycleDownsampling + downsampling?: IndicesDownsamplingRound[] /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean @@ -24174,7 +24202,7 @@ export interface InferenceRequestChatCompletion { * Requests should generally only add new messages from the user (role `user`). * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ messages: InferenceMessage[] - /** The ID of the model to use. */ + /** The ID of the model to use. By default, the model ID is set to the value included when creating the inference endpoint. */ model?: string /** The upper bound limit for the number of tokens that can be generated for a completion request. */ max_completion_tokens?: long @@ -24418,7 +24446,7 @@ export interface InferenceCompletionRequest extends RequestBase { /** Inference input. * Either a string or an array of strings. */ input: string | string[] - /** Optional task settings */ + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } @@ -24433,7 +24461,7 @@ export interface InferenceDeleteRequest extends RequestBase { task_type?: InferenceTaskType /** The inference identifier. */ inference_id: Id - /** When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. */ + /** When true, checks the semantic_text fields and inference processors that reference the endpoint and returns them in a list, but does not delete the endpoint. */ dry_run?: boolean /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. */ force?: boolean @@ -24618,7 +24646,7 @@ export interface InferencePutAnthropicRequest extends RequestBase { chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `anthropic`. */ service: InferenceAnthropicServiceType - /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ + /** Settings used to install the inference model. These settings are specific to the `anthropic` service. */ service_settings: InferenceAnthropicServiceSettings /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ @@ -24642,7 +24670,7 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ service: InferenceAzureAiStudioServiceType - /** Settings used to install the inference model. These settings are specific to the `openai` service. */ + /** Settings used to install the inference model. These settings are specific to the `azureaistudio` service. */ service_settings: InferenceAzureAiStudioServiceSettings /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ @@ -25058,7 +25086,7 @@ export interface InferenceSparseEmbeddingRequest extends RequestBase { /** Inference input. * Either a string or an array of strings. */ input: string | string[] - /** Optional task settings */ + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } @@ -25078,7 +25106,7 @@ export interface InferenceStreamCompletionRequest extends RequestBase { * * NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ input: string | string[] - /** Optional task settings */ + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } @@ -25107,7 +25135,7 @@ export interface InferenceTextEmbeddingRequest extends RequestBase { * > info * > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. */ input_type?: string - /** Optional task settings */ + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, input_type?: never, task_settings?: never } @@ -35252,7 +35280,8 @@ export interface SecurityQueryRoleRequest extends RequestBase { * To page through more hits, use the `search_after` parameter. */ from?: integer /** The sort definition. - * You can sort on `username`, `roles`, or `enabled`. + * You can sort on `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, + * and `applications.resources`. * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort /** The number of hits to return.